ia64/xen-unstable
changeset 4149:5f0125b2f3a9
bitkeeper revision 1.1236.33.2 (4236b517THiLxPjnIZVybs7stl7QFQ)
Make validate_(pte|pde)_changes a litter smarter.
Avoid some unnecessary calls to __shadow_status.
Added an early out for __shadow_status.
Signed-off-by: michael.fetterman@cl.cam.ac.uk
Make validate_(pte|pde)_changes a litter smarter.
Avoid some unnecessary calls to __shadow_status.
Added an early out for __shadow_status.
Signed-off-by: michael.fetterman@cl.cam.ac.uk
author | mafetter@fleming.research |
---|---|
date | Tue Mar 15 10:12:39 2005 +0000 (2005-03-15) |
parents | 2d50ee7a068d |
children | 9a43f31bae0a |
files | xen/arch/x86/audit.c xen/arch/x86/shadow.c xen/include/asm-x86/mm.h xen/include/asm-x86/shadow.h xen/include/xen/perfc_defn.h |
line diff
1.1 --- a/xen/arch/x86/audit.c Tue Mar 15 08:15:00 2005 +0000 1.2 +++ b/xen/arch/x86/audit.c Tue Mar 15 10:12:39 2005 +0000 1.3 @@ -25,25 +25,17 @@ 1.4 #include <xen/kernel.h> 1.5 #include <xen/lib.h> 1.6 #include <xen/mm.h> 1.7 -//#include <xen/sched.h> 1.8 -//#include <xen/errno.h> 1.9 #include <xen/perfc.h> 1.10 -//#include <xen/irq.h> 1.11 -//#include <xen/softirq.h> 1.12 #include <asm/shadow.h> 1.13 #include <asm/page.h> 1.14 #include <asm/flushtlb.h> 1.15 -//#include <asm/io.h> 1.16 -//#include <asm/uaccess.h> 1.17 -//#include <asm/domain_page.h> 1.18 -//#include <asm/ldt.h> 1.19 1.20 // XXX SMP bug -- these should not be statics... 1.21 // 1.22 static int ttot=0, ctot=0, io_mappings=0, lowmem_mappings=0; 1.23 static int l1, l2, oos_count, page_count; 1.24 1.25 -#define FILE_AND_LINE 1 1.26 +#define FILE_AND_LINE 0 1.27 1.28 #if FILE_AND_LINE 1.29 #define adjust(_p, _a) _adjust((_p), (_a), __FILE__, __LINE__) 1.30 @@ -73,7 +65,7 @@ int audit_adjust_pgtables(struct domain 1.31 if ( page_get_owner(page) == NULL ) 1.32 { 1.33 APRINTK("adjust(mfn=%p, dir=%d, adjtype=%d) owner=NULL", 1.34 - page_to_pfn(page), dir, adjtype, file, line); 1.35 + page_to_pfn(page), dir, adjtype); 1.36 errors++; 1.37 } 1.38
2.1 --- a/xen/arch/x86/shadow.c Tue Mar 15 08:15:00 2005 +0000 2.2 +++ b/xen/arch/x86/shadow.c Tue Mar 15 10:12:39 2005 +0000 2.3 @@ -60,7 +60,7 @@ shadow_promote(struct domain *d, unsigne 2.4 __shadow_sync_mfn(d, gmfn); 2.5 } 2.6 2.7 - if ( unlikely(mfn_is_page_table(gmfn)) ) 2.8 + if ( unlikely(page_is_page_table(page)) ) 2.9 { 2.10 min_type = shadow_max_pgtable_type(d, gpfn) + PGT_l1_shadow; 2.11 max_type = new_type; 2.12 @@ -99,7 +99,7 @@ shadow_promote(struct domain *d, unsigne 2.13 if ( get_page_type(page, PGT_base_page_table) ) 2.14 { 2.15 put_page_type(page); 2.16 - set_bit(_PGC_page_table, &frame_table[gmfn].count_info); 2.17 + set_bit(_PGC_page_table, &page->count_info); 2.18 } 2.19 else 2.20 {
3.1 --- a/xen/include/asm-x86/mm.h Tue Mar 15 08:15:00 2005 +0000 3.2 +++ b/xen/include/asm-x86/mm.h Tue Mar 15 10:12:39 2005 +0000 3.3 @@ -129,8 +129,6 @@ static inline u32 pickle_domptr(struct d 3.4 #define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain)) 3.5 #define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d)) 3.6 3.7 -#define page_out_of_sync(_p) ((_p)->count_info & PGC_out_of_sync) 3.8 - 3.9 #define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) \ 3.10 do { \ 3.11 page_set_owner((_pfn), (_dom)); \ 3.12 @@ -235,22 +233,6 @@ static inline int get_page_and_type(stru 3.13 return rc; 3.14 } 3.15 3.16 -static inline int mfn_is_page_table(unsigned long mfn) 3.17 -{ 3.18 - if ( !pfn_is_ram(mfn) ) 3.19 - return 0; 3.20 - 3.21 - return frame_table[mfn].count_info & PGC_page_table; 3.22 -} 3.23 - 3.24 -static inline int page_is_page_table(struct pfn_info *page) 3.25 -{ 3.26 - if ( !pfn_is_ram(page_to_pfn(page)) ) 3.27 - return 0; 3.28 - 3.29 - return page->count_info & PGC_page_table; 3.30 -} 3.31 - 3.32 #define ASSERT_PAGE_IS_TYPE(_p, _t) \ 3.33 ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \ 3.34 ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0)
4.1 --- a/xen/include/asm-x86/shadow.h Tue Mar 15 08:15:00 2005 +0000 4.2 +++ b/xen/include/asm-x86/shadow.h Tue Mar 15 10:12:39 2005 +0000 4.3 @@ -68,6 +68,33 @@ static inline unsigned long __shadow_sta 4.4 4.5 extern void vmx_shadow_clear_state(struct domain *); 4.6 4.7 +static inline int page_is_page_table(struct pfn_info *page) 4.8 +{ 4.9 + return page->count_info & PGC_page_table; 4.10 +} 4.11 + 4.12 +static inline int mfn_is_page_table(unsigned long mfn) 4.13 +{ 4.14 + if ( !pfn_is_ram(mfn) ) 4.15 + return 0; 4.16 + 4.17 + return frame_table[mfn].count_info & PGC_page_table; 4.18 +} 4.19 + 4.20 +static inline int page_out_of_sync(struct pfn_info *page) 4.21 +{ 4.22 + return page->count_info & PGC_out_of_sync; 4.23 +} 4.24 + 4.25 +static inline int mfn_out_of_sync(unsigned long mfn) 4.26 +{ 4.27 + if ( !pfn_is_ram(mfn) ) 4.28 + return 0; 4.29 + 4.30 + return frame_table[mfn].count_info & PGC_out_of_sync; 4.31 +} 4.32 + 4.33 + 4.34 /************************************************************************/ 4.35 4.36 static void inline 4.37 @@ -565,9 +592,10 @@ static inline void l2pde_general( 4.38 static inline void l2pde_propagate_from_guest( 4.39 struct domain *d, unsigned long *gpde_p, unsigned long *spde_p) 4.40 { 4.41 - unsigned long gpde = *gpde_p, sl1mfn; 4.42 + unsigned long gpde = *gpde_p, sl1mfn = 0; 4.43 4.44 - sl1mfn = __shadow_status(d, gpde >> PAGE_SHIFT, PGT_l1_shadow); 4.45 + if ( gpde & _PAGE_PRESENT ) 4.46 + sl1mfn = __shadow_status(d, gpde >> PAGE_SHIFT, PGT_l1_shadow); 4.47 l2pde_general(d, gpde_p, spde_p, sl1mfn); 4.48 } 4.49 4.50 @@ -583,7 +611,7 @@ validate_pte_change( 4.51 { 4.52 unsigned long old_spte, new_spte; 4.53 4.54 - perfc_incrc(validate_pte_change); 4.55 + perfc_incrc(validate_pte_calls); 4.56 4.57 #if 0 4.58 FSH_LOG("validate_pte(old=%p new=%p)\n", old_pte, new_pte); 4.59 @@ -595,8 +623,11 @@ validate_pte_change( 4.60 4.61 // only do the ref counting if something important changed. 4.62 // 4.63 - if ( (old_spte ^ new_spte) & (PAGE_MASK | _PAGE_RW | _PAGE_PRESENT) ) 4.64 + if ( ((old_spte | new_spte) & _PAGE_PRESENT ) && 4.65 + ((old_spte ^ new_spte) & (PAGE_MASK | _PAGE_RW | _PAGE_PRESENT)) ) 4.66 { 4.67 + perfc_incrc(validate_pte_changes); 4.68 + 4.69 if ( new_spte & _PAGE_PRESENT ) 4.70 shadow_get_page_from_l1e(mk_l1_pgentry(new_spte), d); 4.71 if ( old_spte & _PAGE_PRESENT ) 4.72 @@ -618,15 +649,18 @@ validate_pde_change( 4.73 unsigned long old_spde = *shadow_pde_p; 4.74 unsigned long new_spde; 4.75 4.76 - perfc_incrc(validate_pde_change); 4.77 + perfc_incrc(validate_pde_calls); 4.78 4.79 l2pde_propagate_from_guest(d, &new_pde, shadow_pde_p); 4.80 new_spde = *shadow_pde_p; 4.81 4.82 // only do the ref counting if something important changed. 4.83 // 4.84 - if ( (old_spde ^ new_spde) & (PAGE_MASK | _PAGE_PRESENT) ) 4.85 + if ( ((old_spde | new_spde) & _PAGE_PRESENT) && 4.86 + ((old_spde ^ new_spde) & (PAGE_MASK | _PAGE_PRESENT)) ) 4.87 { 4.88 + perfc_incrc(validate_pde_changes); 4.89 + 4.90 if ( new_spde & _PAGE_PRESENT ) 4.91 get_shadow_ref(new_spde >> PAGE_SHIFT); 4.92 if ( old_spde & _PAGE_PRESENT ) 4.93 @@ -720,16 +754,12 @@ static inline struct shadow_status *hash 4.94 * It returns the shadow's mfn, or zero if it doesn't exist. 4.95 */ 4.96 4.97 -static inline unsigned long __shadow_status( 4.98 +static inline unsigned long ___shadow_status( 4.99 struct domain *d, unsigned long gpfn, unsigned long stype) 4.100 { 4.101 struct shadow_status *p, *x, *head; 4.102 unsigned long key = gpfn | stype; 4.103 4.104 - ASSERT(spin_is_locked(&d->arch.shadow_lock)); 4.105 - ASSERT(gpfn == (gpfn & PGT_mfn_mask)); 4.106 - ASSERT(stype && !(stype & ~PGT_type_mask)); 4.107 - 4.108 perfc_incrc(shadow_status_calls); 4.109 4.110 x = head = hash_bucket(d, gpfn); 4.111 @@ -779,6 +809,27 @@ static inline unsigned long __shadow_sta 4.112 return 0; 4.113 } 4.114 4.115 +static inline unsigned long __shadow_status( 4.116 + struct domain *d, unsigned long gpfn, unsigned long stype) 4.117 +{ 4.118 + unsigned long gmfn = __gpfn_to_mfn(d, gpfn); 4.119 + 4.120 + ASSERT(spin_is_locked(&d->arch.shadow_lock)); 4.121 + ASSERT(gpfn == (gpfn & PGT_mfn_mask)); 4.122 + ASSERT(stype && !(stype & ~PGT_type_mask)); 4.123 + 4.124 + if ( gmfn && ((stype != PGT_snapshot) 4.125 + ? !mfn_is_page_table(gmfn) 4.126 + : !mfn_out_of_sync(gmfn)) ) 4.127 + { 4.128 + perfc_incrc(shadow_status_shortcut); 4.129 + ASSERT(___shadow_status(d, gpfn, stype) == 0); 4.130 + return 0; 4.131 + } 4.132 + 4.133 + return ___shadow_status(d, gmfn, stype); 4.134 +} 4.135 + 4.136 /* 4.137 * Not clear if pull-to-front is worth while for this or not, 4.138 * as it generally needs to scan the entire bucket anyway.
5.1 --- a/xen/include/xen/perfc_defn.h Tue Mar 15 08:15:00 2005 +0000 5.2 +++ b/xen/include/xen/perfc_defn.h Tue Mar 15 10:12:39 2005 +0000 5.3 @@ -38,7 +38,8 @@ PERFSTATUS( shadow_l1_pages, "current # 5.4 PERFSTATUS( hl2_table_pages, "current # hl2 pages" ) 5.5 PERFSTATUS( snapshot_pages, "current # fshadow snapshot pages" ) 5.6 5.7 -PERFCOUNTER_CPU(shadow_status_calls, "calls to __shadow_status" ) 5.8 +PERFCOUNTER_CPU(shadow_status_shortcut, "fastpath miss on shadow cache") 5.9 +PERFCOUNTER_CPU(shadow_status_calls, "calls to ___shadow_status" ) 5.10 PERFCOUNTER_CPU(shadow_status_miss, "missed shadow cache" ) 5.11 PERFCOUNTER_CPU(shadow_status_hit_head, "hits on head of bucket" ) 5.12 PERFCOUNTER_CPU(check_pagetable, "calls to check_pagetable" ) 5.13 @@ -59,5 +60,7 @@ PERFCOUNTER_CPU(shadow_fault_bail_pde_no 5.14 PERFCOUNTER_CPU(shadow_fault_bail_pte_not_present, "sf bailed due to pte not present") 5.15 PERFCOUNTER_CPU(shadow_fault_bail_ro_mapping, "sf bailed due to a ro mapping") 5.16 PERFCOUNTER_CPU(shadow_fault_fixed, "sf fixed the pgfault") 5.17 -PERFCOUNTER_CPU(validate_pte_change, "calls to validate_pte_change") 5.18 -PERFCOUNTER_CPU(validate_pde_change, "calls to validate_pde_change") 5.19 +PERFCOUNTER_CPU(validate_pte_calls, "calls to validate_pte_change") 5.20 +PERFCOUNTER_CPU(validate_pte_changes, "validate_pte makes changes") 5.21 +PERFCOUNTER_CPU(validate_pde_calls, "calls to validate_pde_change") 5.22 +PERFCOUNTER_CPU(validate_pde_changes, "validate_pde makes changes")