direct-io.hg
changeset 4819:51c287330796
bitkeeper revision 1.1389.13.1 (428097bfFCQnxc6PG1CYe-6KhZD8kA)
Minor shadow code improvements.
Minor shadow code improvements.
author | mafetter@fleming.research |
---|---|
date | Tue May 10 11:15:11 2005 +0000 (2005-05-10) |
parents | f470118a979e |
children | d2a202436c1c |
files | xen/arch/x86/shadow.c xen/common/perfc.c xen/include/asm-x86/shadow.h |
line diff
1.1 --- a/xen/arch/x86/shadow.c Mon May 09 17:50:17 2005 +0000 1.2 +++ b/xen/arch/x86/shadow.c Tue May 10 11:15:11 2005 +0000 1.3 @@ -1898,37 +1898,34 @@ void shadow_mark_va_out_of_sync( 1.4 * Returns 0 otherwise. 1.5 */ 1.6 static int snapshot_entry_matches( 1.7 - struct exec_domain *ed, unsigned long gmfn, unsigned index) 1.8 + struct domain *d, l1_pgentry_t *guest_pt, 1.9 + unsigned long gpfn, unsigned index) 1.10 { 1.11 - unsigned long gpfn = __mfn_to_gpfn(ed->domain, gmfn); 1.12 - unsigned long smfn = __shadow_status(ed->domain, gpfn, PGT_snapshot); 1.13 - unsigned long *guest, *snapshot; 1.14 - int compare; 1.15 - 1.16 - ASSERT( ! IS_INVALID_M2P_ENTRY(gpfn) ); 1.17 + unsigned long smfn = __shadow_status(d, gpfn, PGT_snapshot); 1.18 + l1_pgentry_t *snapshot; // could be L1s or L2s or ... 1.19 + int entries_match; 1.20 1.21 perfc_incrc(snapshot_entry_matches_calls); 1.22 1.23 if ( !smfn ) 1.24 return 0; 1.25 1.26 - guest = map_domain_mem(gmfn << PAGE_SHIFT); 1.27 snapshot = map_domain_mem(smfn << PAGE_SHIFT); 1.28 1.29 // This could probably be smarter, but this is sufficent for 1.30 // our current needs. 1.31 // 1.32 - compare = (guest[index] == snapshot[index]); 1.33 - 1.34 - unmap_domain_mem(guest); 1.35 + entries_match = !l1e_has_changed(&guest_pt[index], &snapshot[index], 1.36 + PAGE_FLAG_MASK); 1.37 + 1.38 unmap_domain_mem(snapshot); 1.39 1.40 #ifdef PERF_COUNTERS 1.41 - if ( compare ) 1.42 + if ( entries_match ) 1.43 perfc_incrc(snapshot_entry_matches_true); 1.44 #endif 1.45 1.46 - return compare; 1.47 + return entries_match; 1.48 } 1.49 1.50 /* 1.51 @@ -1939,37 +1936,35 @@ int __shadow_out_of_sync(struct exec_dom 1.52 { 1.53 struct domain *d = ed->domain; 1.54 unsigned long l2mfn = pagetable_get_pfn(ed->arch.guest_table); 1.55 + unsigned long l2pfn = __mfn_to_gpfn(d, l2mfn); 1.56 l2_pgentry_t l2e; 1.57 - unsigned long l1mfn; 1.58 + unsigned long l1pfn, l1mfn; 1.59 1.60 ASSERT(spin_is_locked(&d->arch.shadow_lock)); 1.61 + ASSERT(VALID_M2P(l2pfn)); 1.62 1.63 perfc_incrc(shadow_out_of_sync_calls); 1.64 1.65 - // PERF BUG: snapshot_entry_matches will call map_domain_mem() on the l2 1.66 - // page, but it's already available at ed->arch.guest_vtable... 1.67 - // Ditto for the sl2 page and ed->arch.shadow_vtable. 1.68 - // 1.69 if ( page_out_of_sync(&frame_table[l2mfn]) && 1.70 - !snapshot_entry_matches(ed, l2mfn, l2_table_offset(va)) ) 1.71 + !snapshot_entry_matches(d, (l1_pgentry_t *)ed->arch.guest_vtable, 1.72 + l2pfn, l2_table_offset(va)) ) 1.73 return 1; 1.74 1.75 __guest_get_l2e(ed, va, &l2e); 1.76 if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) 1.77 return 0; 1.78 1.79 - l1mfn = __gpfn_to_mfn(d, l2e_get_pfn(l2e)); 1.80 + l1pfn = l2e_get_pfn(l2e); 1.81 + l1mfn = __gpfn_to_mfn(d, l1pfn); 1.82 1.83 // If the l1 pfn is invalid, it can't be out of sync... 1.84 if ( !VALID_MFN(l1mfn) ) 1.85 return 0; 1.86 1.87 - // PERF BUG: snapshot_entry_matches will call map_domain_mem() on the l1 1.88 - // page, but it's already available at linear_pg_table[l1_linear_offset()]. 1.89 - // Ditto for the sl1 page and shadow_linear_pg_table[]... 1.90 - // 1.91 if ( page_out_of_sync(&frame_table[l1mfn]) && 1.92 - !snapshot_entry_matches(ed, l1mfn, l1_table_offset(va)) ) 1.93 + !snapshot_entry_matches( 1.94 + d, &linear_pg_table[l1_linear_offset(va) & ~(L1_PAGETABLE_ENTRIES-1)], 1.95 + l1pfn, l1_table_offset(va)) ) 1.96 return 1; 1.97 1.98 return 0;
2.1 --- a/xen/common/perfc.c Mon May 09 17:50:17 2005 +0000 2.2 +++ b/xen/common/perfc.c Tue May 10 11:15:11 2005 +0000 2.3 @@ -4,6 +4,7 @@ 2.4 #include <xen/time.h> 2.5 #include <xen/perfc.h> 2.6 #include <xen/keyhandler.h> 2.7 +#include <xen/spinlock.h> 2.8 #include <public/dom0_ops.h> 2.9 #include <asm/uaccess.h> 2.10
3.1 --- a/xen/include/asm-x86/shadow.h Mon May 09 17:50:17 2005 +0000 3.2 +++ b/xen/include/asm-x86/shadow.h Tue May 10 11:15:11 2005 +0000 3.3 @@ -518,9 +518,9 @@ update_hl2e(struct exec_domain *ed, unsi 3.4 else 3.5 new_hl2e = l1e_empty(); 3.6 3.7 - // only do the ref counting if something important changed. 3.8 + // only do the ref counting if something has changed. 3.9 // 3.10 - if ( (l1e_has_changed(&old_hl2e, &new_hl2e, _PAGE_PRESENT)) ) 3.11 + if ( (l1e_has_changed(&old_hl2e, &new_hl2e, PAGE_FLAG_MASK)) ) 3.12 { 3.13 if ( (l1e_get_flags(new_hl2e) & _PAGE_PRESENT) && 3.14 !shadow_get_page(ed->domain, pfn_to_page(l1e_get_pfn(new_hl2e)), 3.15 @@ -531,14 +531,15 @@ update_hl2e(struct exec_domain *ed, unsi 3.16 shadow_put_page(ed->domain, pfn_to_page(l1e_get_pfn(old_hl2e))); 3.17 need_flush = 1; 3.18 } 3.19 - } 3.20 3.21 - ed->arch.hl2_vtable[l2_table_offset(va)] = new_hl2e; 3.22 + ed->arch.hl2_vtable[l2_table_offset(va)] = new_hl2e; 3.23 3.24 - if ( need_flush ) 3.25 - { 3.26 - perfc_incrc(update_hl2e_invlpg); 3.27 - local_flush_tlb_one(&linear_pg_table[l1_linear_offset(va)]); 3.28 + if ( need_flush ) 3.29 + { 3.30 + perfc_incrc(update_hl2e_invlpg); 3.31 + // SMP BUG??? 3.32 + local_flush_tlb_one(&linear_pg_table[l1_linear_offset(va)]); 3.33 + } 3.34 } 3.35 } 3.36 3.37 @@ -1501,11 +1502,6 @@ shadow_set_l1e(unsigned long va, l1_pgen 3.38 struct domain *d = ed->domain; 3.39 l2_pgentry_t sl2e; 3.40 3.41 -#if 0 3.42 - printk("shadow_set_l1e(va=%p, new_spte=%p, create=%d)\n", 3.43 - va, l1e_get_value(new_spte), create_l1_shadow); 3.44 -#endif 3.45 - 3.46 __shadow_get_l2e(ed, va, &sl2e); 3.47 if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) ) 3.48 {