direct-io.hg
changeset 4527:4d0ec0013844
bitkeeper revision 1.1293.1.1 (425ed7d4ns1ECT4A2gBGlWC_vklpGQ)
Now booting VT-X domain with new shadow code.
Signed-off-by: michael.fetterman@cl.cam.ac.uk
Now booting VT-X domain with new shadow code.
Signed-off-by: michael.fetterman@cl.cam.ac.uk
author | maf46@burn.cl.cam.ac.uk |
---|---|
date | Thu Apr 14 20:51:32 2005 +0000 (2005-04-14) |
parents | 5e994310f44f |
children | aa21bd6f8677 |
files | xen/arch/x86/audit.c xen/arch/x86/shadow.c xen/arch/x86/traps.c xen/arch/x86/vmx.c xen/include/asm-x86/regs.h xen/include/asm-x86/shadow.h xen/include/asm-x86/vmx_vmcs.h |
line diff
1.1 --- a/xen/arch/x86/audit.c Wed Apr 13 10:40:06 2005 +0000 1.2 +++ b/xen/arch/x86/audit.c Thu Apr 14 20:51:32 2005 +0000 1.3 @@ -35,7 +35,6 @@ static int ttot=0, ctot=0, io_mappings=0 1.4 static int l1, l2, oos_count, page_count; 1.5 1.6 #define FILE_AND_LINE 0 1.7 -//#define MFN2_TO_WATCH 0x1d8 1.8 1.9 #if FILE_AND_LINE 1.10 #define adjust(_p, _a) _adjust((_p), (_a), __FILE__, __LINE__) 1.11 @@ -55,13 +54,6 @@ int audit_adjust_pgtables(struct domain 1.12 1.13 void _adjust(struct pfn_info *page, int adjtype ADJUST_EXTRA_ARGS) 1.14 { 1.15 -#ifdef MFN2_TO_WATCH 1.16 - if (page_to_pfn(page) == MFN2_TO_WATCH) 1.17 - { 1.18 - APRINTK("adjust(mfn=%p, dir=%d, adjtype=%d)", 1.19 - page_to_pfn(page), dir, adjtype); 1.20 - } 1.21 -#endif 1.22 if ( adjtype ) 1.23 { 1.24 // adjust the type count
2.1 --- a/xen/arch/x86/shadow.c Wed Apr 13 10:40:06 2005 +0000 2.2 +++ b/xen/arch/x86/shadow.c Thu Apr 14 20:51:32 2005 +0000 2.3 @@ -68,9 +68,14 @@ shadow_promote(struct domain *d, unsigne 2.4 2.5 if ( !shadow_remove_all_write_access(d, gpfn, gmfn) ) 2.6 { 2.7 - FSH_LOG("%s: couldn't find/remove all write accesses, gpfn=%p gmfn=%p\n", 2.8 + FSH_LOG("%s: couldn't find/remove all write accesses, gpfn=%p gmfn=%p", 2.9 __func__, gpfn, gmfn); 2.10 +#if 1 || defined(LIVE_DANGEROUSLY) 2.11 + set_bit(_PGC_page_table, &page->count_info); 2.12 + return 1; 2.13 +#endif 2.14 return 0; 2.15 + 2.16 } 2.17 2.18 // To convert this page to use as a page table, the writable count 2.19 @@ -288,7 +293,7 @@ alloc_shadow_page(struct domain *d, 2.20 return smfn; 2.21 2.22 fail: 2.23 - FSH_LOG("promotion of pfn=%p mfn=%p failed! external gnttab refs?\n", 2.24 + FSH_LOG("promotion of pfn=%p mfn=%p failed! external gnttab refs?", 2.25 gpfn, gmfn); 2.26 free_domheap_page(page); 2.27 return 0; 2.28 @@ -311,9 +316,6 @@ free_shadow_l1_table(struct domain *d, u 2.29 } 2.30 2.31 unmap_domain_mem(pl1e); 2.32 - 2.33 - list_add(&spage->list, &d->arch.free_shadow_frames); 2.34 - perfc_incr(free_l1_pages); 2.35 } 2.36 2.37 static void inline 2.38 @@ -322,6 +324,8 @@ free_shadow_hl2_table(struct domain *d, 2.39 l1_pgentry_t *hl2 = map_domain_mem(smfn << PAGE_SHIFT); 2.40 int i, limit; 2.41 2.42 + SH_VVLOG("%s: smfn=%p freed\n", __func__, smfn); 2.43 + 2.44 #ifdef __i386__ 2.45 if ( shadow_mode_external(d) ) 2.46 limit = L2_PAGETABLE_ENTRIES; 2.47 @@ -353,8 +357,7 @@ free_shadow_l2_table(struct domain *d, u 2.48 put_shadow_ref(pl2e[i] >> PAGE_SHIFT); 2.49 2.50 if ( (PGT_base_page_table == PGT_l2_page_table) && 2.51 - shadow_mode_translate(d) && 2.52 - !shadow_mode_external(d) ) 2.53 + shadow_mode_translate(d) && !external ) 2.54 { 2.55 // free the ref to the hl2 2.56 // 2.57 @@ -373,6 +376,8 @@ void free_shadow_page(unsigned long smfn 2.58 unsigned long gpfn = __mfn_to_gpfn(d, gmfn); 2.59 unsigned long type = page->u.inuse.type_info & PGT_type_mask; 2.60 2.61 + SH_VVLOG("%s: free'ing smfn=%p", __func__, smfn); 2.62 + 2.63 ASSERT( ! IS_INVALID_M2P_ENTRY(gpfn) ); 2.64 2.65 delete_shadow_status(d, gpfn, gmfn, type); 2.66 @@ -414,7 +419,12 @@ void free_shadow_page(unsigned long smfn 2.67 page->tlbflush_timestamp = 0; 2.68 page->u.free.cpu_mask = 0; 2.69 2.70 - if ( type != PGT_l1_shadow ) 2.71 + if ( type == PGT_l1_shadow ) 2.72 + { 2.73 + list_add(&page->list, &d->arch.free_shadow_frames); 2.74 + perfc_incr(free_l1_pages); 2.75 + } 2.76 + else 2.77 free_domheap_page(page); 2.78 } 2.79 2.80 @@ -510,9 +520,9 @@ static void free_out_of_sync_state(struc 2.81 2.82 static void free_shadow_pages(struct domain *d) 2.83 { 2.84 - int i, free = 0; 2.85 - struct shadow_status *x, *n; 2.86 - struct exec_domain *e; 2.87 + int i; 2.88 + struct shadow_status *x; 2.89 + struct exec_domain *ed; 2.90 2.91 /* 2.92 * WARNING! The shadow page table must not currently be in use! 2.93 @@ -529,58 +539,81 @@ static void free_shadow_pages(struct dom 2.94 2.95 // second, remove any outstanding refs from ed->arch.shadow_table... 2.96 // 2.97 - for_each_exec_domain(d, e) 2.98 + for_each_exec_domain(d, ed) 2.99 + { 2.100 + if ( pagetable_val(ed->arch.shadow_table) ) 2.101 + { 2.102 + put_shadow_ref(pagetable_val(ed->arch.shadow_table) >> PAGE_SHIFT); 2.103 + ed->arch.shadow_table = mk_pagetable(0); 2.104 + } 2.105 + } 2.106 + 2.107 + // For external shadows, remove the monitor table's refs 2.108 + // 2.109 + if ( shadow_mode_external(d) ) 2.110 { 2.111 - if ( pagetable_val(e->arch.shadow_table) ) 2.112 + for_each_exec_domain(d, ed) 2.113 { 2.114 - put_shadow_ref(pagetable_val(e->arch.shadow_table) >> PAGE_SHIFT); 2.115 - e->arch.shadow_table = mk_pagetable(0); 2.116 + l2_pgentry_t *mpl2e = ed->arch.monitor_vtable; 2.117 + l2_pgentry_t hl2e = mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)]; 2.118 + l2_pgentry_t smfn = mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)]; 2.119 + if ( l2_pgentry_val(hl2e) & _PAGE_PRESENT ) 2.120 + { 2.121 + put_shadow_ref(l2_pgentry_val(hl2e) >> PAGE_SHIFT); 2.122 + mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = mk_l2_pgentry(0); 2.123 + } 2.124 + if ( l2_pgentry_val(smfn) & _PAGE_PRESENT ) 2.125 + { 2.126 + put_shadow_ref(l2_pgentry_val(smfn) >> PAGE_SHIFT); 2.127 + mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = mk_l2_pgentry(0); 2.128 + } 2.129 } 2.130 } 2.131 2.132 // Now, the only refs to shadow pages that are left are from the shadow 2.133 - // pages themselves. We can just free them. 2.134 + // pages themselves. We just unpin the pinned pages, and the rest 2.135 + // should automatically disappear. 2.136 // 2.137 + // NB: Beware: each explicitly or implicit call to free_shadow_page 2.138 + // can/will result in the hash bucket getting rewritten out from 2.139 + // under us... First, collect the list of pinned pages, then 2.140 + // free them. 2.141 + // 2.142 +#define PINNED(_x) (frame_table[_x].u.inuse.type_info & PGT_pinned) 2.143 for ( i = 0; i < shadow_ht_buckets; i++ ) 2.144 { 2.145 + u32 count; 2.146 + unsigned long *mfn_list; 2.147 + 2.148 /* Skip empty buckets. */ 2.149 x = &d->arch.shadow_ht[i]; 2.150 if ( x->gpfn_and_flags == 0 ) 2.151 continue; 2.152 2.153 - /* Free the head page. */ 2.154 - free_shadow_page(x->smfn); 2.155 - 2.156 - /* Reinitialise the head node. */ 2.157 - x->gpfn_and_flags = 0; 2.158 - x->smfn = 0; 2.159 - n = x->next; 2.160 - x->next = NULL; 2.161 - 2.162 - free++; 2.163 - 2.164 - /* Iterate over non-head nodes. */ 2.165 - for ( x = n; x != NULL; x = n ) 2.166 - { 2.167 - /* Free the shadow page. */ 2.168 - free_shadow_page(x->smfn); 2.169 - 2.170 - /* Re-initialise the chain node. */ 2.171 - x->gpfn_and_flags = 0; 2.172 - x->smfn = 0; 2.173 - 2.174 - /* Add to the free list. */ 2.175 - n = x->next; 2.176 - x->next = d->arch.shadow_ht_free; 2.177 - d->arch.shadow_ht_free = x; 2.178 - 2.179 - free++; 2.180 + count = 0; 2.181 + for ( ; x != NULL; x = x->next ) 2.182 + if ( PINNED(x->smfn) ) 2.183 + count++; 2.184 + if ( !count ) 2.185 + continue; 2.186 + 2.187 + mfn_list = xmalloc_array(unsigned long, count); 2.188 + count = 0; 2.189 + for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next ) 2.190 + if ( PINNED(x->smfn) ) 2.191 + mfn_list[count++] = x->smfn; 2.192 + 2.193 + while ( count ) 2.194 + { 2.195 + shadow_unpin(mfn_list[--count]); 2.196 } 2.197 - 2.198 - shadow_audit(d, 0); 2.199 + xfree(mfn_list); 2.200 } 2.201 - 2.202 - SH_LOG("Free shadow table. Freed=%d.", free); 2.203 +#undef PINNED 2.204 + 2.205 + shadow_audit(d, 0); 2.206 + 2.207 + SH_LOG("Free shadow table."); 2.208 } 2.209 2.210 void shadow_mode_init(void) 2.211 @@ -622,6 +655,12 @@ static void alloc_monitor_pagetable(stru 2.212 mpl2e[l2_table_offset(RO_MPT_VIRT_START)] = 2.213 mk_l2_pgentry(pagetable_val(d->arch.phys_table) | __PAGE_HYPERVISOR); 2.214 2.215 + // Don't (yet) have mappings for these... 2.216 + // Don't want to accidentally see the idle_pg_table's linear mapping. 2.217 + // 2.218 + mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = mk_l2_pgentry(0); 2.219 + mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = mk_l2_pgentry(0); 2.220 + 2.221 ed->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT); 2.222 ed->arch.monitor_vtable = mpl2e; 2.223 } 2.224 @@ -631,7 +670,7 @@ static void alloc_monitor_pagetable(stru 2.225 */ 2.226 void free_monitor_pagetable(struct exec_domain *ed) 2.227 { 2.228 - l2_pgentry_t *mpl2e, hl2e; 2.229 + l2_pgentry_t *mpl2e, hl2e, sl2e; 2.230 unsigned long mfn; 2.231 2.232 ASSERT( pagetable_val(ed->arch.monitor_table) ); 2.233 @@ -643,11 +682,21 @@ void free_monitor_pagetable(struct exec_ 2.234 * First get the mfn for hl2_table by looking at monitor_table 2.235 */ 2.236 hl2e = mpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]; 2.237 - ASSERT(l2_pgentry_val(hl2e) & _PAGE_PRESENT); 2.238 - mfn = l2_pgentry_val(hl2e) >> PAGE_SHIFT; 2.239 - ASSERT(mfn); 2.240 - 2.241 - put_shadow_ref(mfn); 2.242 + if ( l2_pgentry_val(hl2e) & _PAGE_PRESENT ) 2.243 + { 2.244 + mfn = l2_pgentry_val(hl2e) >> PAGE_SHIFT; 2.245 + ASSERT(mfn); 2.246 + put_shadow_ref(mfn); 2.247 + } 2.248 + 2.249 + sl2e = mpl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]; 2.250 + if ( l2_pgentry_val(sl2e) & _PAGE_PRESENT ) 2.251 + { 2.252 + mfn = l2_pgentry_val(sl2e) >> PAGE_SHIFT; 2.253 + ASSERT(mfn); 2.254 + put_shadow_ref(mfn); 2.255 + } 2.256 + 2.257 unmap_domain_mem(mpl2e); 2.258 2.259 /* 2.260 @@ -1253,10 +1302,11 @@ int shadow_mode_control(struct domain *d 2.261 */ 2.262 void vmx_shadow_clear_state(struct domain *d) 2.263 { 2.264 - SH_VVLOG("vmx_clear_shadow_state:"); 2.265 + SH_VVLOG("%s:", __func__); 2.266 shadow_lock(d); 2.267 free_shadow_pages(d); 2.268 shadow_unlock(d); 2.269 + update_pagetables(d->exec_domain[0]); 2.270 } 2.271 2.272 unsigned long 2.273 @@ -1313,6 +1363,8 @@ shadow_hl2_table(struct domain *d, unsig 2.274 BUG(); /* XXX Deal gracefully with failure. */ 2.275 } 2.276 2.277 + SH_VVLOG("shadow_hl2_table(gpfn=%p, gmfn=%p, smfn=%p) => %p", 2.278 + gpfn, gmfn, smfn, hl2mfn); 2.279 perfc_incrc(shadow_hl2_table_count); 2.280 2.281 hl2 = map_domain_mem(hl2mfn << PAGE_SHIFT); 2.282 @@ -1951,9 +2003,13 @@ int shadow_remove_all_write_access( 2.283 2.284 // How many outstanding writable PTEs for this page are there? 2.285 // 2.286 - write_refs = (frame_table[readonly_gmfn].u.inuse.type_info & PGT_count_mask); 2.287 - if ( write_refs && (frame_table[readonly_gmfn].u.inuse.type_info & PGT_pinned) ) 2.288 + write_refs = 2.289 + (frame_table[readonly_gmfn].u.inuse.type_info & PGT_count_mask); 2.290 + if ( write_refs && 2.291 + (frame_table[readonly_gmfn].u.inuse.type_info & PGT_pinned) ) 2.292 + { 2.293 write_refs--; 2.294 + } 2.295 2.296 if ( write_refs == 0 ) 2.297 { 2.298 @@ -1961,7 +2017,7 @@ int shadow_remove_all_write_access( 2.299 return 1; 2.300 } 2.301 2.302 - // Before searching all the L1 page tables, check the typical culprit first. 2.303 + // Before searching all the L1 page tables, check the typical culprit first 2.304 // 2.305 if ( (prediction = predict_writable_pte_page(d, readonly_gpfn)) ) 2.306 { 2.307 @@ -2001,7 +2057,7 @@ int shadow_remove_all_write_access( 2.308 } 2.309 } 2.310 2.311 - FSH_LOG("%s: looking for %d refs, found %d refs\n", 2.312 + FSH_LOG("%s: looking for %d refs, found %d refs", 2.313 __func__, write_refs, found); 2.314 2.315 return 0; 2.316 @@ -2215,6 +2271,14 @@ static int resync_all(struct domain *d, 2.317 { 2.318 perfc_incrc(unshadow_l2_count); 2.319 shadow_unpin(smfn); 2.320 + if ( unlikely(shadow_mode_external(d)) ) 2.321 + { 2.322 + unsigned long hl2mfn; 2.323 + 2.324 + if ( (hl2mfn = __shadow_status(d, entry->gpfn, PGT_hl2_shadow)) && 2.325 + (frame_table[hl2mfn].u.inuse.type_info & PGT_pinned) ) 2.326 + shadow_unpin(hl2mfn); 2.327 + } 2.328 } 2.329 } 2.330 2.331 @@ -2398,14 +2462,14 @@ int shadow_fault(unsigned long va, struc 2.332 * SHADOW MODE: none enable translate external 2.333 * 2.334 * 4KB things: 2.335 - * guest_vtable lin_l2 mapped per gpdt lin_l2 via hl2 mapped per gpdt 2.336 - * shadow_vtable n/a sh_lin_l2 sh_lin_l2 mapped per gpdt 2.337 - * hl2_vtable n/a n/a lin_hl2 via hl2 mapped per gpdt 2.338 + * guest_vtable lin_l2 mapped per gl2 lin_l2 via hl2 mapped per gl2 2.339 + * shadow_vtable n/a sh_lin_l2 sh_lin_l2 mapped per gl2 2.340 + * hl2_vtable n/a n/a lin_hl2 via hl2 mapped per gl2 2.341 * monitor_vtable n/a n/a n/a mapped once 2.342 * 2.343 * 4MB things: 2.344 - * guest_linear lin via gpdt lin via gpdt lin via hl2 lin via hl2 2.345 - * shadow_linear n/a sh_lin via spdt sh_lin via spdt sh_lin via spdt 2.346 + * guest_linear lin via gl2 lin via gl2 lin via hl2 lin via hl2 2.347 + * shadow_linear n/a sh_lin via sl2 sh_lin via sl2 sh_lin via sl2 2.348 * monitor_linear n/a n/a n/a ??? 2.349 * perdomain perdomain perdomain perdomain perdomain 2.350 * R/O M2P R/O M2P R/O M2P n/a n/a 2.351 @@ -2477,9 +2541,6 @@ void __update_pagetables(struct exec_dom 2.352 { 2.353 if ( unlikely(!(hl2mfn = __shadow_status(d, gpfn, PGT_hl2_shadow))) ) 2.354 hl2mfn = shadow_hl2_table(d, gpfn, gmfn, smfn); 2.355 - if ( !get_shadow_ref(hl2mfn) ) 2.356 - BUG(); 2.357 - 2.358 if ( ed->arch.hl2_vtable ) 2.359 unmap_domain_mem(ed->arch.hl2_vtable); 2.360 ed->arch.hl2_vtable = map_domain_mem(hl2mfn << PAGE_SHIFT); 2.361 @@ -2491,16 +2552,24 @@ void __update_pagetables(struct exec_dom 2.362 if ( max_mode == SHM_external ) 2.363 { 2.364 l2_pgentry_t *mpl2e = ed->arch.monitor_vtable; 2.365 + l2_pgentry_t old_hl2e = mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)]; 2.366 + l2_pgentry_t old_sl2e = mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)]; 2.367 2.368 ASSERT( shadow_mode_translate(d) ); 2.369 2.370 - BUG(); // ref counts for hl2mfn and smfn need to be maintained! 2.371 - 2.372 + if ( !get_shadow_ref(hl2mfn) ) 2.373 + BUG(); 2.374 mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = 2.375 mk_l2_pgentry((hl2mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); 2.376 - 2.377 + if ( l2_pgentry_val(old_hl2e) & _PAGE_PRESENT ) 2.378 + put_shadow_ref(l2_pgentry_val(old_hl2e) >> PAGE_SHIFT); 2.379 + 2.380 + if ( !get_shadow_ref(smfn) ) 2.381 + BUG(); 2.382 mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = 2.383 mk_l2_pgentry((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); 2.384 + if ( l2_pgentry_val(old_sl2e) & _PAGE_PRESENT ) 2.385 + put_shadow_ref(l2_pgentry_val(old_sl2e) >> PAGE_SHIFT); 2.386 2.387 // XXX - maybe this can be optimized somewhat?? 2.388 local_flush_tlb(); 2.389 @@ -2769,7 +2838,6 @@ int _check_pagetable(struct exec_domain 2.390 if ( !(smfn = __shadow_status(d, ptbase_pfn, PGT_base_page_table)) ) 2.391 { 2.392 printk("%s-PT %p not shadowed\n", s, gptbase); 2.393 - errors++; 2.394 goto out; 2.395 } 2.396 if ( page_out_of_sync(pfn_to_page(ptbase_mfn)) )
3.1 --- a/xen/arch/x86/traps.c Wed Apr 13 10:40:06 2005 +0000 3.2 +++ b/xen/arch/x86/traps.c Thu Apr 14 20:51:32 2005 +0000 3.3 @@ -297,7 +297,8 @@ asmlinkage int do_page_fault(struct xen_ 3.4 } 3.5 3.6 if ( unlikely(shadow_mode_enabled(d)) && 3.7 - ((addr < HYPERVISOR_VIRT_START) || shadow_mode_external(d)) && 3.8 + ((addr < HYPERVISOR_VIRT_START) || 3.9 + (shadow_mode_external(d) && GUEST_CONTEXT(ed, regs))) && 3.10 shadow_fault(addr, regs) ) 3.11 { 3.12 return EXCRET_fault_fixed;
4.1 --- a/xen/arch/x86/vmx.c Wed Apr 13 10:40:06 2005 +0000 4.2 +++ b/xen/arch/x86/vmx.c Thu Apr 14 20:51:32 2005 +0000 4.3 @@ -521,9 +521,8 @@ static int mov_to_cr(int gp, int cr, str 4.4 domain_crash_synchronous(); /* need to take a clean path */ 4.5 } 4.6 mfn = phys_to_machine_mapping(value >> PAGE_SHIFT); 4.7 - vmx_shadow_clear_state(d->domain); 4.8 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT); 4.9 - update_pagetables(d); 4.10 + update_pagetables(d); 4.11 /* 4.12 * arch.shadow_table should now hold the next CR3 for shadow 4.13 */
5.1 --- a/xen/include/asm-x86/regs.h Wed Apr 13 10:40:06 2005 +0000 5.2 +++ b/xen/include/asm-x86/regs.h Thu Apr 14 20:51:32 2005 +0000 5.3 @@ -33,4 +33,10 @@ enum EFLAGS { 5.4 5.5 #define GUEST_MODE(_r) (likely(VM86_MODE(_r) || !RING_0(_r))) 5.6 5.7 +#ifdef CONFIG_VMX 5.8 +#define GUEST_CONTEXT(_ed, _r) ( (VMX_DOMAIN(_ed) && ((_r)->eflags == 0)) || GUEST_MODE(_r) ) 5.9 +#else 5.10 +#define GUEST_CONTEXT(_ed, _r) GUEST_MODE(_r) 5.11 +#endif 5.12 + 5.13 #endif /* __X86_REGS_H__ */
6.1 --- a/xen/include/asm-x86/shadow.h Wed Apr 13 10:40:06 2005 +0000 6.2 +++ b/xen/include/asm-x86/shadow.h Thu Apr 14 20:51:32 2005 +0000 6.3 @@ -270,20 +270,21 @@ extern int shadow_status_noswap; 6.4 static inline int 6.5 shadow_get_page_from_l1e(l1_pgentry_t l1e, struct domain *d) 6.6 { 6.7 - int res = get_page_from_l1e(l1e, d); 6.8 + l1_pgentry_t nl1e = mk_l1_pgentry(l1_pgentry_val(l1e) & ~_PAGE_GLOBAL); 6.9 + int res = get_page_from_l1e(nl1e, d); 6.10 unsigned long mfn; 6.11 struct domain *owner; 6.12 6.13 - ASSERT( l1_pgentry_val(l1e) & _PAGE_PRESENT ); 6.14 + ASSERT( l1_pgentry_val(nl1e) & _PAGE_PRESENT ); 6.15 6.16 if ( unlikely(!res) && IS_PRIV(d) && !shadow_mode_translate(d) && 6.17 - !(l1_pgentry_val(l1e) & L1_DISALLOW_MASK) && 6.18 - (mfn = l1_pgentry_to_pfn(l1e)) && 6.19 + !(l1_pgentry_val(nl1e) & L1_DISALLOW_MASK) && 6.20 + (mfn = l1_pgentry_to_pfn(nl1e)) && 6.21 pfn_is_ram(mfn) && 6.22 - (owner = page_get_owner(pfn_to_page(l1_pgentry_to_pfn(l1e)))) && 6.23 + (owner = page_get_owner(pfn_to_page(l1_pgentry_to_pfn(nl1e)))) && 6.24 (d != owner) ) 6.25 { 6.26 - res = get_page_from_l1e(l1e, owner); 6.27 + res = get_page_from_l1e(nl1e, owner); 6.28 printk("tried to map mfn %p from domain %d into shadow page tables " 6.29 "of domain %d; %s\n", 6.30 mfn, owner->id, d->id, res ? "success" : "failed"); 6.31 @@ -292,7 +293,7 @@ shadow_get_page_from_l1e(l1_pgentry_t l1 6.32 if ( unlikely(!res) ) 6.33 { 6.34 perfc_incrc(shadow_get_page_fail); 6.35 - FSH_LOG("%s failed to get ref l1e=%p\n", __func__, l1_pgentry_val(l1e)); 6.36 + FSH_LOG("%s failed to get ref l1e=%p", __func__, l1_pgentry_val(l1e)); 6.37 } 6.38 6.39 return res; 6.40 @@ -417,36 +418,11 @@ static inline void shadow_sync_and_drop_ 6.41 6.42 /************************************************************************/ 6.43 6.44 -//#define MFN3_TO_WATCH 0x8575 6.45 -#ifdef MFN3_TO_WATCH 6.46 -#define get_shadow_ref(__s) ( \ 6.47 -{ \ 6.48 - unsigned long _s = (__s); \ 6.49 - if ( _s == MFN3_TO_WATCH ) \ 6.50 - printk("get_shadow_ref(%x) oc=%d @ %s:%d in %s\n", \ 6.51 - MFN3_TO_WATCH, frame_table[_s].count_info, \ 6.52 - __FILE__, __LINE__, __func__); \ 6.53 - _get_shadow_ref(_s); \ 6.54 -}) 6.55 -#define put_shadow_ref(__s) ( \ 6.56 -{ \ 6.57 - unsigned long _s = (__s); \ 6.58 - if ( _s == MFN3_TO_WATCH ) \ 6.59 - printk("put_shadow_ref(%x) oc=%d @ %s:%d in %s\n", \ 6.60 - MFN3_TO_WATCH, frame_table[_s].count_info, \ 6.61 - __FILE__, __LINE__, __func__); \ 6.62 - _put_shadow_ref(_s); \ 6.63 -}) 6.64 -#else 6.65 -#define _get_shadow_ref get_shadow_ref 6.66 -#define _put_shadow_ref put_shadow_ref 6.67 -#endif 6.68 - 6.69 /* 6.70 * Add another shadow reference to smfn. 6.71 */ 6.72 static inline int 6.73 -_get_shadow_ref(unsigned long smfn) 6.74 +get_shadow_ref(unsigned long smfn) 6.75 { 6.76 u32 x, nx; 6.77 6.78 @@ -475,7 +451,7 @@ extern void free_shadow_page(unsigned lo 6.79 * Drop a shadow reference to smfn. 6.80 */ 6.81 static inline void 6.82 -_put_shadow_ref(unsigned long smfn) 6.83 +put_shadow_ref(unsigned long smfn) 6.84 { 6.85 u32 x, nx; 6.86 6.87 @@ -486,7 +462,8 @@ static inline void 6.88 6.89 if ( unlikely(x == 0) ) 6.90 { 6.91 - printk("put_shadow_ref underflow, oc=%p t=%p\n", 6.92 + printk("put_shadow_ref underflow, smfn=%p oc=%p t=%p\n", 6.93 + smfn, 6.94 frame_table[smfn].count_info, 6.95 frame_table[smfn].u.inuse.type_info); 6.96 BUG(); 6.97 @@ -508,13 +485,15 @@ shadow_pin(unsigned long smfn) 6.98 ASSERT( !(frame_table[smfn].u.inuse.type_info & PGT_pinned) ); 6.99 6.100 frame_table[smfn].u.inuse.type_info |= PGT_pinned; 6.101 - if ( !get_shadow_ref(smfn) ) 6.102 + if ( unlikely(!get_shadow_ref(smfn)) ) 6.103 BUG(); 6.104 } 6.105 6.106 static inline void 6.107 shadow_unpin(unsigned long smfn) 6.108 { 6.109 + ASSERT( (frame_table[smfn].u.inuse.type_info & PGT_pinned) ); 6.110 + 6.111 frame_table[smfn].u.inuse.type_info &= ~PGT_pinned; 6.112 put_shadow_ref(smfn); 6.113 } 6.114 @@ -767,7 +746,7 @@ validate_pte_change( 6.115 perfc_incrc(validate_pte_calls); 6.116 6.117 #if 0 6.118 - FSH_LOG("validate_pte(old=%p new=%p)\n", old_pte, new_pte); 6.119 + FSH_LOG("validate_pte(old=%p new=%p)", old_pte, new_pte); 6.120 #endif 6.121 6.122 old_spte = *shadow_pte_p; 6.123 @@ -1457,7 +1436,7 @@ static inline unsigned long gva_to_gpte( 6.124 if ( unlikely(__get_user(gpte, (unsigned long *) 6.125 &linear_pg_table[gva >> PAGE_SHIFT])) ) 6.126 { 6.127 - FSH_LOG("gva_to_gpte got a fault on gva=%p\n", gva); 6.128 + FSH_LOG("gva_to_gpte got a fault on gva=%p", gva); 6.129 return 0; 6.130 } 6.131
7.1 --- a/xen/include/asm-x86/vmx_vmcs.h Wed Apr 13 10:40:06 2005 +0000 7.2 +++ b/xen/include/asm-x86/vmx_vmcs.h Thu Apr 14 20:51:32 2005 +0000 7.3 @@ -59,7 +59,7 @@ struct arch_vmx_struct { 7.4 #define vmx_schedule_tail(next) \ 7.5 (next)->thread.arch_vmx.arch_vmx_schedule_tail((next)) 7.6 7.7 -#define VMX_DOMAIN(d) d->arch.arch_vmx.flags 7.8 +#define VMX_DOMAIN(ed) ((ed)->arch.arch_vmx.flags) 7.9 7.10 #define ARCH_VMX_VMCS_LOADED 0 /* VMCS has been loaded and active */ 7.11 #define ARCH_VMX_VMCS_LAUNCH 1 /* Needs VMCS launch */