ia64/xen-unstable
changeset 12515:0bbc1e003ef2
[XEN] Remove write access to new PT before discarding old shadow.
This allows us to use the old pagetables's linear maps in our
remove-writeable-mappings heuristics, fixing the same crash that
cset 12339 did, but still letting us do fast revoke of writeable
mappings of toplevel pagetables.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
This allows us to use the old pagetables's linear maps in our
remove-writeable-mappings heuristics, fixing the same crash that
cset 12339 did, but still letting us do fast revoke of writeable
mappings of toplevel pagetables.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author | Tim Deegan <Tim.Deegan@xensource.com> |
---|---|
date | Mon Nov 20 12:03:51 2006 +0000 (2006-11-20) |
parents | f0ba459065d3 |
children | 8ff3287f4169 |
files | xen/arch/x86/mm/shadow/multi.c |
line diff
1.1 --- a/xen/arch/x86/mm/shadow/multi.c Thu Nov 16 18:47:28 2006 -0800 1.2 +++ b/xen/arch/x86/mm/shadow/multi.c Mon Nov 20 12:03:51 2006 +0000 1.3 @@ -3262,9 +3262,25 @@ sh_set_toplevel_shadow(struct vcpu *v, 1.4 mfn_t gmfn, 1.5 unsigned int root_type) 1.6 { 1.7 - mfn_t smfn = get_shadow_status(v, gmfn, root_type); 1.8 + mfn_t smfn; 1.9 struct domain *d = v->domain; 1.10 - ASSERT(pagetable_is_null(v->arch.shadow_table[slot])); 1.11 + 1.12 + /* Decrement the refcount of the old contents of this slot */ 1.13 + smfn = pagetable_get_mfn(v->arch.shadow_table[slot]); 1.14 + if ( mfn_x(smfn) ) 1.15 + sh_put_ref(v, smfn, 0); 1.16 + 1.17 + /* Now figure out the new contents: is this a valid guest MFN? */ 1.18 + if ( !valid_mfn(gmfn) ) 1.19 + { 1.20 + SHADOW_PRINTK("%u/%u [%u] invalid gmfn\n", 1.21 + GUEST_PAGING_LEVELS, SHADOW_PAGING_LEVELS, slot); 1.22 + v->arch.shadow_table[slot] = pagetable_null(); 1.23 + return; 1.24 + } 1.25 + 1.26 + /* Guest mfn is valid: shadow it and install the shadow */ 1.27 + smfn = get_shadow_status(v, gmfn, root_type); 1.28 if ( valid_mfn(smfn) ) 1.29 { 1.30 /* Pull this root shadow to the front of the list of roots. */ 1.31 @@ -3273,10 +3289,6 @@ sh_set_toplevel_shadow(struct vcpu *v, 1.32 } 1.33 else 1.34 { 1.35 - /* This guest MFN is a pagetable. Must revoke write access 1.36 - * (and can't use heuristics because we have no linear map here). */ 1.37 - if ( shadow_remove_write_access(v, gmfn, 0, 0) != 0 ) 1.38 - flush_tlb_mask(v->domain->domain_dirty_cpumask); 1.39 /* Make sure there's enough free shadow memory. */ 1.40 shadow_prealloc(d, SHADOW_MAX_ORDER); 1.41 /* Shadow the page. */ 1.42 @@ -3291,7 +3303,8 @@ sh_set_toplevel_shadow(struct vcpu *v, 1.43 mfn_to_page(gmfn)->shadow_flags &= ~SHF_unhooked_mappings; 1.44 #endif 1.45 1.46 - /* Take a ref to this page: it will be released in sh_detach_old_tables. */ 1.47 + /* Take a ref to this page: it will be released in sh_detach_old_tables() 1.48 + * or in the next call to sh_set_toplevel_shadow(). */ 1.49 sh_get_ref(smfn, 0); 1.50 sh_pin(smfn); 1.51 1.52 @@ -3363,8 +3376,6 @@ sh_update_cr3(struct vcpu *v) 1.53 #endif 1.54 gmfn = pagetable_get_mfn(v->arch.guest_table); 1.55 1.56 - sh_detach_old_tables(v); 1.57 - 1.58 if ( !is_hvm_domain(d) && !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 1.59 { 1.60 ASSERT(v->arch.cr3 == 0); 1.61 @@ -3376,10 +3387,16 @@ sh_update_cr3(struct vcpu *v) 1.62 //// 1.63 #if GUEST_PAGING_LEVELS == 4 1.64 if ( shadow_mode_external(d) || shadow_mode_translate(d) ) 1.65 + { 1.66 + if ( v->arch.guest_vtable ) 1.67 + sh_unmap_domain_page_global(v->arch.guest_vtable); 1.68 v->arch.guest_vtable = sh_map_domain_page_global(gmfn); 1.69 + } 1.70 else 1.71 v->arch.guest_vtable = __linear_l4_table; 1.72 #elif GUEST_PAGING_LEVELS == 3 1.73 + if ( v->arch.guest_vtable ) 1.74 + sh_unmap_domain_page_global(v->arch.guest_vtable); 1.75 if ( shadow_mode_external(d) ) 1.76 { 1.77 if ( shadow_vcpu_mode_translate(v) ) 1.78 @@ -3401,7 +3418,11 @@ sh_update_cr3(struct vcpu *v) 1.79 v->arch.guest_vtable = sh_map_domain_page_global(gmfn); 1.80 #elif GUEST_PAGING_LEVELS == 2 1.81 if ( shadow_mode_external(d) || shadow_mode_translate(d) ) 1.82 + { 1.83 + if ( v->arch.guest_vtable ) 1.84 + sh_unmap_domain_page_global(v->arch.guest_vtable); 1.85 v->arch.guest_vtable = sh_map_domain_page_global(gmfn); 1.86 + } 1.87 else 1.88 v->arch.guest_vtable = __linear_l2_table; 1.89 #else 1.90 @@ -3417,29 +3438,49 @@ sh_update_cr3(struct vcpu *v) 1.91 //// vcpu->arch.shadow_table[] 1.92 //// 1.93 1.94 + /* We revoke write access to the new guest toplevel page(s) before we 1.95 + * replace the old shadow pagetable(s), so that we can safely use the 1.96 + * (old) shadow linear maps in the writeable mapping heuristics. */ 1.97 #if GUEST_PAGING_LEVELS == 2 1.98 + if ( shadow_remove_write_access(v, gmfn, 2, 0) != 0 ) 1.99 + flush_tlb_mask(v->domain->domain_dirty_cpumask); 1.100 sh_set_toplevel_shadow(v, 0, gmfn, PGC_SH_l2_shadow); 1.101 #elif GUEST_PAGING_LEVELS == 3 1.102 /* PAE guests have four shadow_table entries, based on the 1.103 * current values of the guest's four l3es. */ 1.104 { 1.105 - int i; 1.106 + int i, flush = 0; 1.107 + gfn_t gl2gfn; 1.108 + mfn_t gl2mfn; 1.109 guest_l3e_t *gl3e = (guest_l3e_t*)v->arch.guest_vtable; 1.110 - for ( i = 0; i < 4; i++ ) 1.111 + /* First, make all four entries read-only. */ 1.112 + for ( i = 0; i < 4; i++ ) 1.113 { 1.114 - ASSERT(pagetable_is_null(v->arch.shadow_table[i])); 1.115 if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT ) 1.116 { 1.117 - gfn_t gl2gfn = guest_l3e_get_gfn(gl3e[i]); 1.118 - mfn_t gl2mfn = vcpu_gfn_to_mfn(v, gl2gfn); 1.119 - if ( valid_mfn(gl2mfn) ) 1.120 - sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3) 1.121 - ? PGC_SH_l2h_shadow 1.122 - : PGC_SH_l2_shadow); 1.123 + gl2gfn = guest_l3e_get_gfn(gl3e[i]); 1.124 + gl2mfn = vcpu_gfn_to_mfn(v, gl2gfn); 1.125 + flush |= shadow_remove_write_access(v, gl2mfn, 2, 0); 1.126 + } 1.127 + } 1.128 + if ( flush ) 1.129 + flush_tlb_mask(v->domain->domain_dirty_cpumask); 1.130 + /* Now install the new shadows. */ 1.131 + for ( i = 0; i < 4; i++ ) 1.132 + { 1.133 + if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT ) 1.134 + { 1.135 + gl2gfn = guest_l3e_get_gfn(gl3e[i]); 1.136 + gl2mfn = vcpu_gfn_to_mfn(v, gl2gfn); 1.137 + sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3) 1.138 + ? PGC_SH_l2h_shadow 1.139 + : PGC_SH_l2_shadow); 1.140 } 1.141 } 1.142 } 1.143 #elif GUEST_PAGING_LEVELS == 4 1.144 + if ( shadow_remove_write_access(v, gmfn, 4, 0) != 0 ) 1.145 + flush_tlb_mask(v->domain->domain_dirty_cpumask); 1.146 sh_set_toplevel_shadow(v, 0, gmfn, PGC_SH_l4_shadow); 1.147 #else 1.148 #error This should never happen 1.149 @@ -3527,9 +3568,9 @@ static int sh_guess_wrmap(struct vcpu *v 1.150 { 1.151 shadow_l1e_t sl1e, *sl1p; 1.152 shadow_l2e_t *sl2p; 1.153 -#if GUEST_PAGING_LEVELS >= 3 1.154 +#if SHADOW_PAGING_LEVELS >= 3 1.155 shadow_l3e_t *sl3p; 1.156 -#if GUEST_PAGING_LEVELS >= 4 1.157 +#if SHADOW_PAGING_LEVELS >= 4 1.158 shadow_l4e_t *sl4p; 1.159 #endif 1.160 #endif 1.161 @@ -3537,14 +3578,14 @@ static int sh_guess_wrmap(struct vcpu *v 1.162 1.163 1.164 /* Carefully look in the shadow linear map for the l1e we expect */ 1.165 -#if GUEST_PAGING_LEVELS >= 4 1.166 +#if SHADOW_PAGING_LEVELS >= 4 1.167 sl4p = sh_linear_l4_table(v) + shadow_l4_linear_offset(vaddr); 1.168 if ( !(shadow_l4e_get_flags(*sl4p) & _PAGE_PRESENT) ) 1.169 return 0; 1.170 sl3p = sh_linear_l3_table(v) + shadow_l3_linear_offset(vaddr); 1.171 if ( !(shadow_l3e_get_flags(*sl3p) & _PAGE_PRESENT) ) 1.172 return 0; 1.173 -#elif GUEST_PAGING_LEVELS == 3 1.174 +#elif SHADOW_PAGING_LEVELS == 3 1.175 sl3p = ((shadow_l3e_t *) v->arch.shadow.l3table) 1.176 + shadow_l3_linear_offset(vaddr); 1.177 if ( !(shadow_l3e_get_flags(*sl3p) & _PAGE_PRESENT) )