ia64/xen-unstable

annotate xen/arch/ia64/xen/xenmisc.c @ 9756:14a34d811e81

[IA64] introduce P2M conversion

introduce P2M conversion functions necessary for dom0vp model.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@ldap.hp.com
date Tue Apr 25 13:06:57 2006 -0600 (2006-04-25)
parents 96bc87dd7ca9
children ced37bea0647
rev   line source
djm@6458 1 /*
djm@6458 2 * Xen misc
djm@6458 3 *
djm@6458 4 * Functions/decls that are/may be needed to link with Xen because
djm@6458 5 * of x86 dependencies
djm@6458 6 *
djm@6458 7 * Copyright (C) 2004 Hewlett-Packard Co.
djm@6458 8 * Dan Magenheimer (dan.magenheimer@hp.com)
djm@6458 9 *
djm@6458 10 */
djm@6458 11
djm@6458 12 #include <linux/config.h>
djm@6458 13 #include <xen/sched.h>
djm@6458 14 #include <linux/efi.h>
djm@6458 15 #include <asm/processor.h>
djm@6458 16 #include <xen/serial.h>
djm@6458 17 #include <asm/io.h>
djm@6458 18 #include <xen/softirq.h>
djm@7279 19 #include <public/sched.h>
djm@7333 20 #include <asm/vhpt.h>
kaf24@8635 21 #include <asm/debugger.h>
awilliam@9005 22 #include <asm/vmx.h>
awilliam@9005 23 #include <asm/vmx_vcpu.h>
djm@6458 24
djm@6458 25 efi_memory_desc_t ia64_efi_io_md;
djm@6458 26 EXPORT_SYMBOL(ia64_efi_io_md);
djm@6458 27 unsigned long wait_init_idle;
djm@6458 28 int phys_proc_id[NR_CPUS];
djm@6458 29 unsigned long loops_per_jiffy = (1<<12); // from linux/init/main.c
djm@6458 30
awilliam@9005 31 /* FIXME: where these declarations should be there ? */
awilliam@9005 32 extern void show_registers(struct pt_regs *regs);
awilliam@9005 33
djm@6458 34 void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check abort handling)\n"); }
djm@6458 35 void ia64_mca_cpu_init(void *x) { }
djm@6458 36 void ia64_patch_mckinley_e9(unsigned long a, unsigned long b) { }
djm@6458 37 void ia64_patch_vtop(unsigned long a, unsigned long b) { }
djm@6458 38 void hpsim_setup(char **x)
djm@6458 39 {
djm@6458 40 #ifdef CONFIG_SMP
djm@6458 41 init_smp_config();
djm@6458 42 #endif
djm@6458 43 }
djm@6458 44
djm@6458 45 // called from mem_init... don't think s/w I/O tlb is needed in Xen
djm@6458 46 //void swiotlb_init(void) { } ...looks like it IS needed
djm@6458 47
djm@6458 48 long
djm@6458 49 is_platform_hp_ski(void)
djm@6458 50 {
djm@6458 51 int i;
djm@6458 52 long cpuid[6];
djm@6458 53
djm@6458 54 for (i = 0; i < 5; ++i)
djm@6458 55 cpuid[i] = ia64_get_cpuid(i);
djm@6458 56 if ((cpuid[0] & 0xff) != 'H') return 0;
djm@6458 57 if ((cpuid[3] & 0xff) != 0x4) return 0;
djm@6458 58 if (((cpuid[3] >> 8) & 0xff) != 0x0) return 0;
djm@6458 59 if (((cpuid[3] >> 16) & 0xff) != 0x0) return 0;
djm@6458 60 if (((cpuid[3] >> 24) & 0x7) != 0x7) return 0;
djm@6458 61 return 1;
djm@6458 62 }
djm@6458 63
djm@6458 64 long
djm@6458 65 platform_is_hp_ski(void)
djm@6458 66 {
djm@6458 67 extern long running_on_sim;
djm@6458 68 return running_on_sim;
djm@6458 69 }
djm@6458 70
djm@6458 71 /* calls in xen/common code that are unused on ia64 */
djm@6458 72
djm@6458 73 void sync_lazy_execstate_cpu(unsigned int cpu) {}
djm@6458 74
djm@6878 75 #if 0
djm@6458 76 int grant_table_create(struct domain *d) { return 0; }
djm@6458 77 void grant_table_destroy(struct domain *d) { return; }
djm@6458 78 #endif
djm@6458 79
djm@7921 80 struct pt_regs *guest_cpu_user_regs(void) { return vcpu_regs(current); }
djm@6458 81
djm@6458 82 void raise_actimer_softirq(void)
djm@6458 83 {
kaf24@8586 84 raise_softirq(TIMER_SOFTIRQ);
djm@6458 85 }
djm@6458 86
djm@6458 87 unsigned long
kaf24@8726 88 gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
djm@6458 89 {
awilliam@9756 90 #ifndef CONFIG_XEN_IA64_DOM0_VP
djm@6458 91 if (d == dom0)
djm@6458 92 return(gpfn);
awilliam@9756 93 else
awilliam@9756 94 #endif
awilliam@9756 95 {
djm@6458 96 unsigned long pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT);
djm@6458 97 if (!pte) {
kaf24@8726 98 printk("gmfn_to_mfn_foreign: bad gpfn. spinning...\n");
djm@6458 99 while(1);
djm@6458 100 return 0;
djm@6458 101 }
djm@6458 102 return ((pte & _PFN_MASK) >> PAGE_SHIFT);
djm@6458 103 }
djm@6458 104 }
djm@6878 105 #if 0
djm@6458 106 u32
kaf24@8726 107 mfn_to_gmfn(struct domain *d, unsigned long frame)
djm@6458 108 {
djm@6458 109 // FIXME: is this right?
djm@6458 110 if ((frame << PAGE_SHIFT) & _PAGE_PPN_MASK) {
kaf24@8726 111 printk("mfn_to_gmfn: bad frame. spinning...\n");
djm@6458 112 while(1);
djm@6458 113 }
djm@6458 114 return frame;
djm@6458 115 }
djm@6458 116 #endif
djm@6458 117
djm@6458 118 ///////////////////////////////
djm@6458 119 // from arch/x86/flushtlb.c
djm@6458 120 ///////////////////////////////
djm@6458 121
djm@6458 122 u32 tlbflush_clock;
djm@6458 123 u32 tlbflush_time[NR_CPUS];
djm@6458 124
djm@6458 125 ///////////////////////////////
djm@6458 126 // from arch/x86/memory.c
djm@6458 127 ///////////////////////////////
djm@6458 128
djm@6458 129
awilliam@9162 130 void free_page_type(struct page_info *page, u32 type)
djm@6458 131 {
awilliam@9162 132 // dummy();
awilliam@9162 133 return;
djm@6458 134 }
awilliam@9162 135
awilliam@9162 136 int alloc_page_type(struct page_info *page, u32 type)
awilliam@9162 137 {
awilliam@9162 138 // dummy();
awilliam@9162 139 return 1;
awilliam@9162 140 }
djm@6458 141
djm@6458 142 ///////////////////////////////
djm@6458 143 //// misc memory stuff
djm@6458 144 ///////////////////////////////
djm@6458 145
djm@6458 146 unsigned long __get_free_pages(unsigned int mask, unsigned int order)
djm@6458 147 {
djm@6458 148 void *p = alloc_xenheap_pages(order);
djm@6458 149
djm@6458 150 memset(p,0,PAGE_SIZE<<order);
djm@6458 151 return (unsigned long)p;
djm@6458 152 }
djm@6458 153
awilliam@9271 154 void __free_pages(struct page_info *page, unsigned int order)
djm@6458 155 {
djm@6458 156 if (order) BUG();
djm@6458 157 free_xenheap_page(page);
djm@6458 158 }
djm@6458 159
djm@6458 160 void *pgtable_quicklist_alloc(void)
djm@6458 161 {
awilliam@8779 162 void *p;
awilliam@8779 163 p = alloc_xenheap_pages(0);
awilliam@8779 164 if (p)
awilliam@8779 165 clear_page(p);
awilliam@8779 166 return p;
djm@6458 167 }
djm@6458 168
djm@6458 169 void pgtable_quicklist_free(void *pgtable_entry)
djm@6458 170 {
djm@6458 171 free_xenheap_page(pgtable_entry);
djm@6458 172 }
djm@6458 173
djm@6458 174 ///////////////////////////////
djm@6458 175 // from arch/ia64/traps.c
djm@6458 176 ///////////////////////////////
djm@6458 177
djm@6458 178 int is_kernel_text(unsigned long addr)
djm@6458 179 {
djm@6458 180 extern char _stext[], _etext[];
djm@6458 181 if (addr >= (unsigned long) _stext &&
djm@6458 182 addr <= (unsigned long) _etext)
djm@6458 183 return 1;
djm@6458 184
djm@6458 185 return 0;
djm@6458 186 }
djm@6458 187
djm@6458 188 unsigned long kernel_text_end(void)
djm@6458 189 {
djm@6458 190 extern char _etext[];
djm@6458 191 return (unsigned long) _etext;
djm@6458 192 }
djm@6458 193
djm@6458 194 ///////////////////////////////
djm@6458 195 // from common/keyhandler.c
djm@6458 196 ///////////////////////////////
djm@6458 197 void dump_pageframe_info(struct domain *d)
djm@6458 198 {
djm@6458 199 printk("dump_pageframe_info not implemented\n");
djm@6458 200 }
djm@6458 201
djm@6458 202 ///////////////////////////////
djm@6458 203 // called from arch/ia64/head.S
djm@6458 204 ///////////////////////////////
djm@6458 205
djm@6458 206 void console_print(char *msg)
djm@6458 207 {
djm@6458 208 printk("console_print called, how did start_kernel return???\n");
djm@6458 209 }
djm@6458 210
djm@6458 211 void kernel_thread_helper(void)
djm@6458 212 {
djm@6458 213 printk("kernel_thread_helper not implemented\n");
djm@6458 214 dummy();
djm@6458 215 }
djm@6458 216
djm@6458 217 void sys_exit(void)
djm@6458 218 {
djm@6458 219 printk("sys_exit not implemented\n");
djm@6458 220 dummy();
djm@6458 221 }
djm@6458 222
djm@6458 223 ////////////////////////////////////
djm@6458 224 // called from unaligned.c
djm@6458 225 ////////////////////////////////////
djm@6458 226
djm@6458 227 void die_if_kernel(char *str, struct pt_regs *regs, long err) /* __attribute__ ((noreturn)) */
djm@6458 228 {
djm@8439 229 if (user_mode(regs))
djm@8439 230 return;
djm@8439 231
djm@8439 232 printk("%s: %s %ld\n", __func__, str, err);
djm@8439 233 debugtrace_dump();
djm@8439 234 show_registers(regs);
djm@8439 235 domain_crash_synchronous();
djm@6458 236 }
djm@6458 237
djm@6458 238 long
djm@6458 239 ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
djm@6458 240 unsigned long user_rbs_end, unsigned long addr, long *val)
djm@6458 241 {
djm@6458 242 printk("ia64_peek: called, not implemented\n");
awilliam@9005 243 return 1;
djm@6458 244 }
djm@6458 245
djm@6458 246 long
djm@6458 247 ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
djm@6458 248 unsigned long user_rbs_end, unsigned long addr, long val)
djm@6458 249 {
djm@6458 250 printk("ia64_poke: called, not implemented\n");
awilliam@9005 251 return 1;
djm@6458 252 }
djm@6458 253
djm@6458 254 void
djm@6458 255 ia64_sync_fph (struct task_struct *task)
djm@6458 256 {
djm@6458 257 printk("ia64_sync_fph: called, not implemented\n");
djm@6458 258 }
djm@6458 259
djm@6458 260 void
djm@6458 261 ia64_flush_fph (struct task_struct *task)
djm@6458 262 {
djm@6458 263 printk("ia64_flush_fph: called, not implemented\n");
djm@6458 264 }
djm@6458 265
djm@6458 266 ////////////////////////////////////
djm@6458 267 // called from irq_ia64.c:init_IRQ()
djm@6458 268 // (because CONFIG_IA64_HP_SIM is specified)
djm@6458 269 ////////////////////////////////////
djm@6458 270 void hpsim_irq_init(void) { }
djm@6458 271
djm@6458 272
djm@6458 273 // accomodate linux extable.c
djm@6458 274 //const struct exception_table_entry *
djm@6458 275 void *search_module_extables(unsigned long addr) { return NULL; }
djm@6458 276 void *__module_text_address(unsigned long addr) { return NULL; }
djm@6458 277 void *module_text_address(unsigned long addr) { return NULL; }
djm@6458 278
djm@6458 279 void cs10foo(void) {}
djm@6458 280 void cs01foo(void) {}
djm@6458 281
djm@6458 282 unsigned long context_switch_count = 0;
djm@6458 283
djm@7332 284 #include <asm/vcpu.h>
djm@7332 285
djm@6458 286 void context_switch(struct vcpu *prev, struct vcpu *next)
djm@6458 287 {
kaf24@8655 288 uint64_t spsr;
awilliam@8834 289 uint64_t pta;
kaf24@8655 290
kaf24@8655 291 local_irq_save(spsr);
awilliam@9157 292 // if(VMX_DOMAIN(prev)){
awilliam@9157 293 // vtm_domain_out(prev);
awilliam@9157 294 // }
djm@6458 295 context_switch_count++;
djm@6458 296 switch_to(prev,next,prev);
awilliam@8834 297 // if(VMX_DOMAIN(current)){
awilliam@8834 298 // vtm_domain_in(current);
awilliam@8834 299 // }
djm@6458 300
djm@6458 301 // leave this debug for now: it acts as a heartbeat when more than
djm@6458 302 // one domain is active
djm@6458 303 {
djm@6458 304 static long cnt[16] = { 50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50};
djm@6458 305 static int i = 100;
djm@6458 306 int id = ((struct vcpu *)current)->domain->domain_id & 0xf;
djm@6458 307 if (!cnt[id]--) { printk("%x",id); cnt[id] = 500000; }
awilliam@9005 308 if (!i--) { printk("+"); i = 1000000; }
djm@6458 309 }
djm@6458 310
djm@7333 311 if (VMX_DOMAIN(current)){
awilliam@9157 312 // vtm_domain_in(current);
djm@6458 313 vmx_load_all_rr(current);
djm@6878 314 }else{
awilliam@8834 315 extern char ia64_ivt;
awilliam@8834 316 ia64_set_iva(&ia64_ivt);
kaf24@8507 317 if (!is_idle_domain(current->domain)) {
awilliam@8834 318 ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
awilliam@8834 319 VHPT_ENABLED);
djm@6878 320 load_region_regs(current);
djm@7504 321 vcpu_load_kernel_regs(current);
awilliam@8834 322 if (vcpu_timer_expired(current))
awilliam@8834 323 vcpu_pend_timer(current);
awilliam@8834 324 }else {
awilliam@8834 325 /* When switching to idle domain, only need to disable vhpt
awilliam@8834 326 * walker. Then all accesses happen within idle context will
awilliam@8834 327 * be handled by TR mapping and identity mapping.
awilliam@8834 328 */
awilliam@8834 329 pta = ia64_get_pta();
awilliam@8834 330 ia64_set_pta(pta & ~VHPT_ENABLED);
awilliam@8834 331 }
djm@6878 332 }
kaf24@8517 333
kaf24@8655 334 local_irq_restore(spsr);
kaf24@8517 335 context_saved(prev);
djm@6458 336 }
djm@6458 337
djm@6458 338 void continue_running(struct vcpu *same)
djm@6458 339 {
djm@6458 340 /* nothing to do */
djm@6458 341 }
djm@6458 342
kaf24@8990 343 void arch_dump_domain_info(struct domain *d)
kaf24@8990 344 {
kaf24@8990 345 }
kaf24@8990 346
djm@6458 347 void panic_domain(struct pt_regs *regs, const char *fmt, ...)
djm@6458 348 {
djm@6458 349 va_list args;
djm@6458 350 char buf[128];
djm@6458 351 struct vcpu *v = current;
djm@6458 352
awilliam@9005 353 printf("$$$$$ PANIC in domain %d (k6=0x%lx): ",
djm@6458 354 v->domain->domain_id,
djm@6458 355 __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT]);
djm@6458 356 va_start(args, fmt);
djm@6458 357 (void)vsnprintf(buf, sizeof(buf), fmt, args);
djm@6458 358 va_end(args);
djm@6458 359 printf(buf);
djm@6458 360 if (regs) show_registers(regs);
kaf24@8635 361 if (regs) {
kaf24@8635 362 debugger_trap_fatal(0 /* don't care */, regs);
kaf24@8635 363 } else {
kaf24@8635 364 debugger_trap_immediate();
kaf24@8635 365 }
awilliam@9681 366 domain_crash_synchronous ();
djm@6458 367 }
djm@8434 368
awilliam@9162 369 ///////////////////////////////
awilliam@9162 370 // from arch/x86/mm.c
awilliam@9162 371 ///////////////////////////////
awilliam@9162 372
awilliam@9162 373 #ifdef VERBOSE
awilliam@9162 374 #define MEM_LOG(_f, _a...) \
awilliam@9162 375 printk("DOM%u: (file=mm.c, line=%d) " _f "\n", \
awilliam@9162 376 current->domain->domain_id , __LINE__ , ## _a )
awilliam@9162 377 #else
awilliam@9162 378 #define MEM_LOG(_f, _a...) ((void)0)
awilliam@9162 379 #endif
awilliam@9162 380
awilliam@9162 381 void cleanup_writable_pagetable(struct domain *d)
awilliam@9162 382 {
awilliam@9162 383 return;
awilliam@9162 384 }
awilliam@9162 385
awilliam@9162 386 void put_page_type(struct page_info *page)
awilliam@9162 387 {
awilliam@9162 388 u32 nx, x, y = page->u.inuse.type_info;
awilliam@9162 389
awilliam@9162 390 again:
awilliam@9162 391 do {
awilliam@9162 392 x = y;
awilliam@9162 393 nx = x - 1;
awilliam@9162 394
awilliam@9162 395 ASSERT((x & PGT_count_mask) != 0);
awilliam@9162 396
awilliam@9162 397 /*
awilliam@9162 398 * The page should always be validated while a reference is held. The
awilliam@9162 399 * exception is during domain destruction, when we forcibly invalidate
awilliam@9162 400 * page-table pages if we detect a referential loop.
awilliam@9162 401 * See domain.c:relinquish_list().
awilliam@9162 402 */
awilliam@9162 403 ASSERT((x & PGT_validated) ||
awilliam@9162 404 test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags));
awilliam@9162 405
awilliam@9162 406 if ( unlikely((nx & PGT_count_mask) == 0) )
awilliam@9162 407 {
awilliam@9162 408 /* Record TLB information for flush later. Races are harmless. */
awilliam@9162 409 page->tlbflush_timestamp = tlbflush_current_time();
awilliam@9162 410
awilliam@9162 411 if ( unlikely((nx & PGT_type_mask) <= PGT_l4_page_table) &&
awilliam@9162 412 likely(nx & PGT_validated) )
awilliam@9162 413 {
awilliam@9162 414 /*
awilliam@9162 415 * Page-table pages must be unvalidated when count is zero. The
awilliam@9162 416 * 'free' is safe because the refcnt is non-zero and validated
awilliam@9162 417 * bit is clear => other ops will spin or fail.
awilliam@9162 418 */
awilliam@9162 419 if ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x,
awilliam@9162 420 x & ~PGT_validated)) != x) )
awilliam@9162 421 goto again;
awilliam@9162 422 /* We cleared the 'valid bit' so we do the clean up. */
awilliam@9162 423 free_page_type(page, x);
awilliam@9162 424 /* Carry on, but with the 'valid bit' now clear. */
awilliam@9162 425 x &= ~PGT_validated;
awilliam@9162 426 nx &= ~PGT_validated;
awilliam@9162 427 }
awilliam@9162 428 }
awilliam@9162 429 else if ( unlikely(((nx & (PGT_pinned | PGT_count_mask)) ==
awilliam@9162 430 (PGT_pinned | 1)) &&
awilliam@9162 431 ((nx & PGT_type_mask) != PGT_writable_page)) )
awilliam@9162 432 {
awilliam@9162 433 /* Page is now only pinned. Make the back pointer mutable again. */
awilliam@9162 434 nx |= PGT_va_mutable;
awilliam@9162 435 }
awilliam@9162 436 }
awilliam@9162 437 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
awilliam@9162 438 }
awilliam@9162 439
awilliam@9162 440
awilliam@9162 441 int get_page_type(struct page_info *page, u32 type)
awilliam@9162 442 {
awilliam@9162 443 u32 nx, x, y = page->u.inuse.type_info;
awilliam@9162 444
awilliam@9162 445 again:
awilliam@9162 446 do {
awilliam@9162 447 x = y;
awilliam@9162 448 nx = x + 1;
awilliam@9162 449 if ( unlikely((nx & PGT_count_mask) == 0) )
awilliam@9162 450 {
awilliam@9162 451 MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page));
awilliam@9162 452 return 0;
awilliam@9162 453 }
awilliam@9162 454 else if ( unlikely((x & PGT_count_mask) == 0) )
awilliam@9162 455 {
awilliam@9162 456 if ( (x & (PGT_type_mask|PGT_va_mask)) != type )
awilliam@9162 457 {
awilliam@9162 458 if ( (x & PGT_type_mask) != (type & PGT_type_mask) )
awilliam@9162 459 {
awilliam@9162 460 /*
awilliam@9162 461 * On type change we check to flush stale TLB
awilliam@9162 462 * entries. This may be unnecessary (e.g., page
awilliam@9162 463 * was GDT/LDT) but those circumstances should be
awilliam@9162 464 * very rare.
awilliam@9162 465 */
awilliam@9162 466 cpumask_t mask =
awilliam@9162 467 page_get_owner(page)->domain_dirty_cpumask;
awilliam@9162 468 tlbflush_filter(mask, page->tlbflush_timestamp);
awilliam@9162 469
awilliam@9162 470 if ( unlikely(!cpus_empty(mask)) )
awilliam@9162 471 {
awilliam@9162 472 perfc_incrc(need_flush_tlb_flush);
awilliam@9162 473 flush_tlb_mask(mask);
awilliam@9162 474 }
awilliam@9162 475 }
awilliam@9162 476
awilliam@9162 477 /* We lose existing type, back pointer, and validity. */
awilliam@9162 478 nx &= ~(PGT_type_mask | PGT_va_mask | PGT_validated);
awilliam@9162 479 nx |= type;
awilliam@9162 480
awilliam@9162 481 /* No special validation needed for writable pages. */
awilliam@9162 482 /* Page tables and GDT/LDT need to be scanned for validity. */
awilliam@9162 483 if ( type == PGT_writable_page )
awilliam@9162 484 nx |= PGT_validated;
awilliam@9162 485 }
awilliam@9162 486 }
awilliam@9162 487 else
awilliam@9162 488 {
awilliam@9162 489 if ( unlikely((x & (PGT_type_mask|PGT_va_mask)) != type) )
awilliam@9162 490 {
awilliam@9162 491 if ( unlikely((x & PGT_type_mask) != (type & PGT_type_mask) ) )
awilliam@9162 492 {
awilliam@9162 493 if ( current->domain == page_get_owner(page) )
awilliam@9162 494 {
awilliam@9162 495 /*
awilliam@9162 496 * This ensures functions like set_gdt() see up-to-date
awilliam@9162 497 * type info without needing to clean up writable p.t.
awilliam@9162 498 * state on the fast path.
awilliam@9162 499 */
awilliam@9162 500 LOCK_BIGLOCK(current->domain);
awilliam@9162 501 cleanup_writable_pagetable(current->domain);
awilliam@9162 502 y = page->u.inuse.type_info;
awilliam@9162 503 UNLOCK_BIGLOCK(current->domain);
awilliam@9162 504 /* Can we make progress now? */
awilliam@9162 505 if ( ((y & PGT_type_mask) == (type & PGT_type_mask)) ||
awilliam@9162 506 ((y & PGT_count_mask) == 0) )
awilliam@9162 507 goto again;
awilliam@9162 508 }
awilliam@9162 509 if ( ((x & PGT_type_mask) != PGT_l2_page_table) ||
awilliam@9162 510 ((type & PGT_type_mask) != PGT_l1_page_table) )
awilliam@9398 511 MEM_LOG("Bad type (saw %08x != exp %08x) "
awilliam@9398 512 "for mfn %016lx (pfn %016lx)",
awilliam@9162 513 x, type, page_to_mfn(page),
awilliam@9162 514 get_gpfn_from_mfn(page_to_mfn(page)));
awilliam@9162 515 return 0;
awilliam@9162 516 }
awilliam@9162 517 else if ( (x & PGT_va_mask) == PGT_va_mutable )
awilliam@9162 518 {
awilliam@9162 519 /* The va backpointer is mutable, hence we update it. */
awilliam@9162 520 nx &= ~PGT_va_mask;
awilliam@9162 521 nx |= type; /* we know the actual type is correct */
awilliam@9162 522 }
awilliam@9162 523 else if ( ((type & PGT_va_mask) != PGT_va_mutable) &&
awilliam@9162 524 ((type & PGT_va_mask) != (x & PGT_va_mask)) )
awilliam@9162 525 {
awilliam@9162 526 #ifdef CONFIG_X86_PAE
awilliam@9162 527 /* We use backptr as extra typing. Cannot be unknown. */
awilliam@9162 528 if ( (type & PGT_type_mask) == PGT_l2_page_table )
awilliam@9162 529 return 0;
awilliam@9162 530 #endif
awilliam@9162 531 /* This table is possibly mapped at multiple locations. */
awilliam@9162 532 nx &= ~PGT_va_mask;
awilliam@9162 533 nx |= PGT_va_unknown;
awilliam@9162 534 }
awilliam@9162 535 }
awilliam@9162 536 if ( unlikely(!(x & PGT_validated)) )
awilliam@9162 537 {
awilliam@9162 538 /* Someone else is updating validation of this page. Wait... */
awilliam@9162 539 while ( (y = page->u.inuse.type_info) == x )
awilliam@9162 540 cpu_relax();
awilliam@9162 541 goto again;
awilliam@9162 542 }
awilliam@9162 543 }
awilliam@9162 544 }
awilliam@9162 545 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
awilliam@9162 546
awilliam@9162 547 if ( unlikely(!(nx & PGT_validated)) )
awilliam@9162 548 {
awilliam@9162 549 /* Try to validate page type; drop the new reference on failure. */
awilliam@9162 550 if ( unlikely(!alloc_page_type(page, type)) )
awilliam@9162 551 {
awilliam@9398 552 MEM_LOG("Error while validating mfn %lx (pfn %lx) for type %08x"
awilliam@9398 553 ": caf=%08x taf=%" PRtype_info,
awilliam@9162 554 page_to_mfn(page), get_gpfn_from_mfn(page_to_mfn(page)),
awilliam@9162 555 type, page->count_info, page->u.inuse.type_info);
awilliam@9162 556 /* Noone else can get a reference. We hold the only ref. */
awilliam@9162 557 page->u.inuse.type_info = 0;
awilliam@9162 558 return 0;
awilliam@9162 559 }
awilliam@9162 560
awilliam@9162 561 /* Noone else is updating simultaneously. */
awilliam@9162 562 __set_bit(_PGT_validated, &page->u.inuse.type_info);
awilliam@9162 563 }
awilliam@9162 564
awilliam@9162 565 return 1;
awilliam@9162 566 }