ia64/xen-unstable
changeset 10483:1507021dccdf
[HVM][VMX][PAE] Enable PAE VMX guest on PAE host.
The PAE VMX guest supports NX bit and can do kernel build successfully.
Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Xiaohui Xin <xiaohui.xin@intel.com>
The PAE VMX guest supports NX bit and can do kernel build successfully.
Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Xiaohui Xin <xiaohui.xin@intel.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Mon Jun 19 16:47:21 2006 +0100 (2006-06-19) |
parents | 0991ed8e4ae5 |
children | 06afd218b3b7 |
files | xen/arch/x86/Makefile xen/arch/x86/audit.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/shadow.c xen/arch/x86/shadow32.c xen/arch/x86/shadow_guest32pae.c xen/arch/x86/shadow_public.c xen/include/asm-x86/mm.h xen/include/asm-x86/shadow.h xen/include/asm-x86/shadow_64.h xen/include/asm-x86/shadow_ops.h |
line diff
1.1 --- a/xen/arch/x86/Makefile Mon Jun 19 16:39:27 2006 +0100 1.2 +++ b/xen/arch/x86/Makefile Mon Jun 19 16:47:21 2006 +0100 1.3 @@ -41,7 +41,7 @@ obj-y += usercopy.o 1.4 obj-y += x86_emulate.o 1.5 1.6 ifneq ($(pae),n) 1.7 -obj-$(x86_32) += shadow.o shadow_public.o shadow_guest32.o 1.8 +obj-$(x86_32) += shadow.o shadow_public.o shadow_guest32.o shadow_guest32pae.o 1.9 else 1.10 obj-$(x86_32) += shadow32.o 1.11 endif
2.1 --- a/xen/arch/x86/audit.c Mon Jun 19 16:39:27 2006 +0100 2.2 +++ b/xen/arch/x86/audit.c Mon Jun 19 16:47:21 2006 +0100 2.3 @@ -923,8 +923,8 @@ void _audit_domain(struct domain *d, int 2.4 d->domain_id, page_to_mfn(page), 2.5 page->u.inuse.type_info, 2.6 page->count_info); 2.7 - printk("a->gpfn_and_flags=%p\n", 2.8 - (void *)a->gpfn_and_flags); 2.9 + printk("a->gpfn_and_flags=%"PRIx64"\n", 2.10 + (u64)a->gpfn_and_flags); 2.11 errors++; 2.12 } 2.13 break;
3.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Mon Jun 19 16:39:27 2006 +0100 3.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Jun 19 16:47:21 2006 +0100 3.3 @@ -1623,7 +1623,7 @@ static int mov_to_cr(int gp, int cr, str 3.4 if ( vmx_pgbit_test(v) ) 3.5 { 3.6 /* The guest is a 32-bit PAE guest. */ 3.7 -#if CONFIG_PAGING_LEVELS >= 4 3.8 +#if CONFIG_PAGING_LEVELS >= 3 3.9 unsigned long mfn, old_base_mfn; 3.10 3.11 if( !shadow_set_guest_paging_levels(v->domain, PAGING_L3) ) 3.12 @@ -1667,7 +1667,7 @@ static int mov_to_cr(int gp, int cr, str 3.13 else 3.14 { 3.15 /* The guest is a 64 bit or 32-bit PAE guest. */ 3.16 -#if CONFIG_PAGING_LEVELS >= 4 3.17 +#if CONFIG_PAGING_LEVELS >= 3 3.18 if ( (v->domain->arch.ops != NULL) && 3.19 v->domain->arch.ops->guest_paging_levels == PAGING_L2) 3.20 { 3.21 @@ -1683,15 +1683,6 @@ static int mov_to_cr(int gp, int cr, str 3.22 domain_crash_synchronous(); 3.23 } 3.24 } 3.25 - else 3.26 - { 3.27 - if ( !shadow_set_guest_paging_levels(v->domain, 3.28 - PAGING_L4) ) 3.29 - { 3.30 - printk("Unsupported guest paging levels\n"); 3.31 - domain_crash_synchronous(); 3.32 - } 3.33 - } 3.34 #endif 3.35 } 3.36 }
4.1 --- a/xen/arch/x86/shadow.c Mon Jun 19 16:39:27 2006 +0100 4.2 +++ b/xen/arch/x86/shadow.c Mon Jun 19 16:47:21 2006 +0100 4.3 @@ -222,6 +222,7 @@ alloc_shadow_page(struct domain *d, 4.4 unsigned long smfn, real_gpfn; 4.5 int pin = 0; 4.6 void *l1, *lp; 4.7 + u64 index = 0; 4.8 4.9 // Currently, we only keep pre-zero'ed pages around for use as L1's... 4.10 // This will change. Soon. 4.11 @@ -354,9 +355,19 @@ alloc_shadow_page(struct domain *d, 4.12 if ( d->arch.ops->guest_paging_levels == PAGING_L2 ) 4.13 pin = 1; 4.14 #endif 4.15 + 4.16 +#if CONFIG_PAGING_LEVELS == 3 & defined ( GUEST_32PAE ) 4.17 + /* 4.18 + * We use PGT_l4_shadow for 2-level paging guests on PAE 4.19 + */ 4.20 + if ( d->arch.ops->guest_paging_levels == PAGING_L3 ) 4.21 + pin = 1; 4.22 +#endif 4.23 + if ( d->arch.ops->guest_paging_levels == PAGING_L3 ) 4.24 + index = get_cr3_idxval(current); 4.25 break; 4.26 4.27 -#if CONFIG_PAGING_LEVELS >= 4 4.28 +#if CONFIG_PAGING_LEVELS >= 3 4.29 case PGT_fl1_shadow: 4.30 perfc_incr(shadow_l1_pages); 4.31 d->arch.shadow_page_count++; 4.32 @@ -393,7 +404,7 @@ alloc_shadow_page(struct domain *d, 4.33 // 4.34 ASSERT( (psh_type == PGT_snapshot) || !mfn_out_of_sync(gmfn) ); 4.35 4.36 - set_shadow_status(d, gpfn, gmfn, smfn, psh_type); 4.37 + set_shadow_status(d, gpfn, gmfn, smfn, psh_type, index); 4.38 4.39 if ( pin ) 4.40 shadow_pin(smfn); 4.41 @@ -1324,7 +1335,7 @@ increase_writable_pte_prediction(struct 4.42 prediction = (prediction & PGT_mfn_mask) | score; 4.43 4.44 //printk("increase gpfn=%lx pred=%lx create=%d\n", gpfn, prediction, create); 4.45 - set_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, prediction, PGT_writable_pred); 4.46 + set_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, prediction, PGT_writable_pred, 0); 4.47 4.48 if ( create ) 4.49 perfc_incr(writable_pte_predictions); 4.50 @@ -1345,10 +1356,10 @@ decrease_writable_pte_prediction(struct 4.51 //printk("decrease gpfn=%lx pred=%lx score=%lx\n", gpfn, prediction, score); 4.52 4.53 if ( score ) 4.54 - set_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, prediction, PGT_writable_pred); 4.55 + set_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, prediction, PGT_writable_pred, 0); 4.56 else 4.57 { 4.58 - delete_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, PGT_writable_pred); 4.59 + delete_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, PGT_writable_pred, 0); 4.60 perfc_decr(writable_pte_predictions); 4.61 } 4.62 } 4.63 @@ -1385,7 +1396,7 @@ static u32 remove_all_write_access_in_pt 4.64 int is_l1_shadow = 4.65 ((mfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) == 4.66 PGT_l1_shadow); 4.67 -#if CONFIG_PAGING_LEVELS == 4 4.68 +#if CONFIG_PAGING_LEVELS >= 3 4.69 is_l1_shadow |= 4.70 ((mfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) == 4.71 PGT_fl1_shadow); 4.72 @@ -1494,7 +1505,7 @@ static int remove_all_write_access( 4.73 while ( a && a->gpfn_and_flags ) 4.74 { 4.75 if ( (a->gpfn_and_flags & PGT_type_mask) == PGT_l1_shadow 4.76 -#if CONFIG_PAGING_LEVELS >= 4 4.77 +#if CONFIG_PAGING_LEVELS >= 3 4.78 || (a->gpfn_and_flags & PGT_type_mask) == PGT_fl1_shadow 4.79 #endif 4.80 ) 4.81 @@ -1538,8 +1549,8 @@ static void resync_pae_guest_l3(struct d 4.82 continue; 4.83 4.84 idx = get_cr3_idxval(v); 4.85 - smfn = __shadow_status( 4.86 - d, ((unsigned long)(idx << PGT_pae_idx_shift) | entry->gpfn), PGT_l4_shadow); 4.87 + 4.88 + smfn = __shadow_status(d, entry->gpfn, PGT_l4_shadow); 4.89 4.90 if ( !smfn ) 4.91 continue; 4.92 @@ -1706,7 +1717,7 @@ static int resync_all(struct domain *d, 4.93 { 4.94 int error; 4.95 4.96 -#if CONFIG_PAGING_LEVELS == 4 4.97 +#if CONFIG_PAGING_LEVELS >= 3 4.98 unsigned long gpfn; 4.99 4.100 gpfn = guest_l1e_get_paddr(guest1[i]) >> PAGE_SHIFT; 4.101 @@ -2420,17 +2431,6 @@ static void shadow_update_pagetables(str 4.102 v->arch.guest_vtable = map_domain_page_global(gmfn); 4.103 } 4.104 4.105 -#if CONFIG_PAGING_LEVELS >= 3 4.106 - /* 4.107 - * Handle 32-bit PAE enabled guest 4.108 - */ 4.109 - if ( SH_GUEST_32PAE && d->arch.ops->guest_paging_levels == PAGING_L3 ) 4.110 - { 4.111 - u32 index = get_cr3_idxval(v); 4.112 - gpfn = ((unsigned long)index << PGT_pae_idx_shift) | gpfn; 4.113 - } 4.114 -#endif 4.115 - 4.116 /* 4.117 * arch.shadow_table 4.118 */ 4.119 @@ -2445,6 +2445,23 @@ static void shadow_update_pagetables(str 4.120 } 4.121 else 4.122 #endif 4.123 + 4.124 +#if CONFIG_PAGING_LEVELS == 3 & defined ( GUEST_32PAE ) 4.125 + /* 4.126 + * We use PGT_l4_shadow for 2-level paging guests on PAE 4.127 + */ 4.128 + if ( d->arch.ops->guest_paging_levels == PAGING_L3 ) 4.129 + { 4.130 + if ( unlikely(!(smfn = __shadow_status(d, gpfn, PGT_l4_shadow))) ) 4.131 + smfn = shadow_l3_table(v, gpfn, gmfn); 4.132 + else 4.133 + { 4.134 + update_top_level_shadow(v, smfn); 4.135 + need_sync = 1; 4.136 + } 4.137 + } 4.138 + else 4.139 +#endif 4.140 if ( unlikely(!(smfn = __shadow_status(d, gpfn, PGT_base_page_table))) ) 4.141 { 4.142 #if CONFIG_PAGING_LEVELS == 2 4.143 @@ -3093,6 +3110,36 @@ static inline unsigned long init_bl2( 4.144 4.145 return smfn; 4.146 } 4.147 + 4.148 +static inline unsigned long init_l3( 4.149 + struct vcpu *v, unsigned long gpfn, unsigned long gmfn) 4.150 +{ 4.151 + unsigned long smfn; 4.152 + l4_pgentry_t *spl4e; 4.153 + unsigned long index; 4.154 + 4.155 + if ( unlikely(!(smfn = alloc_shadow_page(v->domain, gpfn, gmfn, PGT_l4_shadow))) ) 4.156 + { 4.157 + printk("Couldn't alloc an L4 shadow for pfn= %lx mfn= %lx\n", gpfn, gmfn); 4.158 + BUG(); /* XXX Deal gracefully wiht failure. */ 4.159 + } 4.160 + 4.161 + /* Map the self entry, L4&L3 share the same page */ 4.162 + spl4e = (l4_pgentry_t *)map_domain_page(smfn); 4.163 + 4.164 + /* 4.165 + * Shadow L4's pfn_info->tlbflush_timestamp 4.166 + * should also save it's own index. 4.167 + */ 4.168 + 4.169 + index = get_cr3_idxval(v); 4.170 + frame_table[smfn].tlbflush_timestamp = index; 4.171 + 4.172 + memset(spl4e, 0, L4_PAGETABLE_ENTRIES*sizeof(l4_pgentry_t)); 4.173 + spl4e[PAE_SHADOW_SELF_ENTRY] = l4e_from_pfn(smfn, __PAGE_HYPERVISOR); 4.174 + unmap_domain_page(spl4e); 4.175 + return smfn; 4.176 +} 4.177 #endif 4.178 4.179 #if CONFIG_PAGING_LEVELS == 3 4.180 @@ -3113,6 +3160,12 @@ static unsigned long shadow_l3_table( 4.181 return init_bl2(d, gpfn, gmfn); 4.182 } 4.183 4.184 + if ( SH_GUEST_32PAE && 4.185 + d->arch.ops->guest_paging_levels == PAGING_L3 ) 4.186 + { 4.187 + return init_l3(v, gpfn, gmfn); 4.188 + } 4.189 + 4.190 if ( unlikely(!(smfn = alloc_shadow_page(d, gpfn, gmfn, PGT_l3_shadow))) ) 4.191 { 4.192 printk("Couldn't alloc an L3 shadow for pfn=%lx mfn=%lx\n", gpfn, gmfn); 4.193 @@ -3223,6 +3276,11 @@ static unsigned long shadow_l4_table( 4.194 return init_bl2(d, gpfn, gmfn); 4.195 } 4.196 4.197 + if ( SH_GUEST_32PAE && d->arch.ops->guest_paging_levels == PAGING_L3 ) 4.198 + { 4.199 + return init_l3(v, gpfn, gmfn); 4.200 + } 4.201 + 4.202 if ( unlikely(!(smfn = alloc_shadow_page(d, gpfn, gmfn, PGT_l4_shadow))) ) 4.203 { 4.204 printk("Couldn't alloc an L4 shadow for pfn=%lx mfn=%lx\n", gpfn, gmfn); 4.205 @@ -3231,24 +3289,6 @@ static unsigned long shadow_l4_table( 4.206 4.207 spl4e = (l4_pgentry_t *)map_domain_page(smfn); 4.208 4.209 - /* For 32-bit PAE guest on 64-bit host */ 4.210 - if ( SH_GUEST_32PAE && d->arch.ops->guest_paging_levels == PAGING_L3 ) 4.211 - { 4.212 - unsigned long index; 4.213 - /* 4.214 - * Shadow L4's pfn_info->tlbflush_timestamp 4.215 - * should also save it's own index. 4.216 - */ 4.217 - index = get_cr3_idxval(v); 4.218 - frame_table[smfn].tlbflush_timestamp = index; 4.219 - 4.220 - memset(spl4e, 0, L4_PAGETABLE_ENTRIES*sizeof(l4_pgentry_t)); 4.221 - /* Map the self entry */ 4.222 - spl4e[PAE_SHADOW_SELF_ENTRY] = l4e_from_pfn(smfn, __PAGE_HYPERVISOR); 4.223 - unmap_domain_page(spl4e); 4.224 - return smfn; 4.225 - } 4.226 - 4.227 /* Install hypervisor and 4x linear p.t. mapings. */ 4.228 if ( (PGT_base_page_table == PGT_l4_page_table) && 4.229 !shadow_mode_external(d) ) 4.230 @@ -3378,7 +3418,7 @@ validate_bl2e_change( 4.231 * This shadow_mark_va_out_of_sync() is for 2M page shadow 4.232 */ 4.233 static void shadow_mark_va_out_of_sync_2mp( 4.234 - struct vcpu *v, unsigned long gpfn, unsigned long mfn, unsigned long writable_pl1e) 4.235 + struct vcpu *v, unsigned long gpfn, unsigned long mfn, paddr_t writable_pl1e) 4.236 { 4.237 struct out_of_sync_entry *entry = 4.238 shadow_mark_mfn_out_of_sync(v, gpfn, mfn); 4.239 @@ -3647,6 +3687,7 @@ static inline int l2e_rw_fault( 4.240 } 4.241 4.242 unmap_domain_page(l1_p); 4.243 + *gl2e_p = gl2e; 4.244 return 1; 4.245 4.246 } 4.247 @@ -3720,7 +3761,7 @@ static inline int guest_page_fault( 4.248 4.249 ASSERT( d->arch.ops->guest_paging_levels >= PAGING_L3 ); 4.250 4.251 -#if CONFIG_PAGING_LEVELS >= 4 4.252 +#if CONFIG_PAGING_LEVELS >= 3 4.253 if ( (error_code & (ERROR_I | ERROR_P)) == (ERROR_I | ERROR_P) ) 4.254 return 1; 4.255 #endif 4.256 @@ -4056,7 +4097,7 @@ struct shadow_ops MODE_32_2_HANDLER = { 4.257 }; 4.258 #endif 4.259 4.260 -#if ( CONFIG_PAGING_LEVELS == 3 && !defined (GUEST_PGENTRY_32) ) || \ 4.261 +#if ( CONFIG_PAGING_LEVELS == 3 && !defined (GUEST_PGENTRY_32) && !defined (GUEST_32PAE) ) || \ 4.262 ( CONFIG_PAGING_LEVELS == 4 && defined (GUEST_PGENTRY_32) ) 4.263 4.264
5.1 --- a/xen/arch/x86/shadow32.c Mon Jun 19 16:39:27 2006 +0100 5.2 +++ b/xen/arch/x86/shadow32.c Mon Jun 19 16:47:21 2006 +0100 5.3 @@ -306,7 +306,7 @@ alloc_shadow_page(struct domain *d, 5.4 // 5.5 ASSERT( (psh_type == PGT_snapshot) || !mfn_out_of_sync(gmfn) ); 5.6 5.7 - set_shadow_status(d, gpfn, gmfn, smfn, psh_type); 5.8 + set_shadow_status(d, gpfn, gmfn, smfn, psh_type, 0); 5.9 5.10 if ( pin ) 5.11 shadow_pin(smfn); 5.12 @@ -395,7 +395,7 @@ void free_shadow_page(unsigned long smfn 5.13 5.14 ASSERT( ! IS_INVALID_M2P_ENTRY(gpfn) ); 5.15 5.16 - delete_shadow_status(d, gpfn, gmfn, type); 5.17 + delete_shadow_status(d, gpfn, gmfn, type, 0); 5.18 5.19 switch ( type ) 5.20 { 5.21 @@ -2319,7 +2319,7 @@ increase_writable_pte_prediction(struct 5.22 prediction = (prediction & PGT_mfn_mask) | score; 5.23 5.24 //printk("increase gpfn=%lx pred=%lx create=%d\n", gpfn, prediction, create); 5.25 - set_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, prediction, PGT_writable_pred); 5.26 + set_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, prediction, PGT_writable_pred, 0); 5.27 5.28 if ( create ) 5.29 perfc_incr(writable_pte_predictions); 5.30 @@ -2340,10 +2340,10 @@ decrease_writable_pte_prediction(struct 5.31 //printk("decrease gpfn=%lx pred=%lx score=%lx\n", gpfn, prediction, score); 5.32 5.33 if ( score ) 5.34 - set_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, prediction, PGT_writable_pred); 5.35 + set_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, prediction, PGT_writable_pred, 0); 5.36 else 5.37 { 5.38 - delete_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, PGT_writable_pred); 5.39 + delete_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, PGT_writable_pred, 0); 5.40 perfc_decr(writable_pte_predictions); 5.41 } 5.42 } 5.43 @@ -2381,7 +2381,7 @@ free_writable_pte_predictions(struct dom 5.44 * keep an accurate count of writable_pte_predictions to keep it 5.45 * happy. 5.46 */ 5.47 - delete_shadow_status(d, gpfn_list[count], 0, PGT_writable_pred); 5.48 + delete_shadow_status(d, gpfn_list[count], 0, PGT_writable_pred, 0); 5.49 perfc_decr(writable_pte_predictions); 5.50 } 5.51
6.1 --- a/xen/arch/x86/shadow_guest32pae.c Mon Jun 19 16:39:27 2006 +0100 6.2 +++ b/xen/arch/x86/shadow_guest32pae.c Mon Jun 19 16:47:21 2006 +0100 6.3 @@ -1,5 +1,4 @@ 6.4 #define GUEST_32PAE 6.5 -#if defined (__x86_64__) 6.6 6.7 #include "shadow.c" 6.8 struct shadow_ops MODE_64_PAE_HANDLER = { 6.9 @@ -15,4 +14,3 @@ struct shadow_ops MODE_64_PAE_HANDLER = 6.10 .gva_to_gpa = gva_to_gpa_64, 6.11 }; 6.12 6.13 -#endif
7.1 --- a/xen/arch/x86/shadow_public.c Mon Jun 19 16:39:27 2006 +0100 7.2 +++ b/xen/arch/x86/shadow_public.c Mon Jun 19 16:47:21 2006 +0100 7.3 @@ -123,8 +123,19 @@ int shadow_set_guest_paging_levels(struc 7.4 #endif 7.5 #if CONFIG_PAGING_LEVELS == 3 7.6 case 3: 7.7 - if ( d->arch.ops != &MODE_64_3_HANDLER ) 7.8 - d->arch.ops = &MODE_64_3_HANDLER; 7.9 + if ( d->arch.ops == NULL || 7.10 + shadow_mode_log_dirty(d) ) 7.11 + { 7.12 + if ( d->arch.ops != &MODE_64_3_HANDLER ) 7.13 + d->arch.ops = &MODE_64_3_HANDLER; 7.14 + } 7.15 + else 7.16 + { 7.17 + if ( d->arch.ops == &MODE_64_2_HANDLER ) 7.18 + free_shadow_pages(d); 7.19 + if ( d->arch.ops != &MODE_64_PAE_HANDLER ) 7.20 + d->arch.ops = &MODE_64_PAE_HANDLER; 7.21 + } 7.22 shadow_unlock(d); 7.23 return 1; 7.24 #endif 7.25 @@ -268,10 +279,8 @@ free_shadow_tables(struct domain *d, uns 7.26 put_shadow_ref(entry_get_pfn(ple[i])); 7.27 if (d->arch.ops->guest_paging_levels == PAGING_L3) 7.28 { 7.29 -#if CONFIG_PAGING_LEVELS == 4 7.30 +#if CONFIG_PAGING_LEVELS >= 3 7.31 if ( i == PAE_L3_PAGETABLE_ENTRIES && level == PAGING_L4 ) 7.32 -#elif CONFIG_PAGING_LEVELS == 3 7.33 - if ( i == PAE_L3_PAGETABLE_ENTRIES && level == PAGING_L3 ) 7.34 #endif 7.35 break; 7.36 } 7.37 @@ -710,6 +719,7 @@ void free_shadow_page(unsigned long smfn 7.38 struct domain *d = page_get_owner(mfn_to_page(gmfn)); 7.39 unsigned long gpfn = mfn_to_gmfn(d, gmfn); 7.40 unsigned long type = page->u.inuse.type_info & PGT_type_mask; 7.41 + u64 index = 0; 7.42 7.43 SH_VVLOG("%s: free'ing smfn=%lx", __func__, smfn); 7.44 7.45 @@ -722,12 +732,16 @@ void free_shadow_page(unsigned long smfn 7.46 if ( !mfn ) 7.47 gpfn |= (1UL << 63); 7.48 } 7.49 +#endif 7.50 +#if CONFIG_PAGING_LEVELS >= 3 7.51 if ( d->arch.ops->guest_paging_levels == PAGING_L3 ) 7.52 - if ( type == PGT_l4_shadow ) 7.53 - gpfn = ((unsigned long)page->tlbflush_timestamp << PGT_pae_idx_shift) | gpfn; 7.54 + { 7.55 + if ( type == PGT_l4_shadow ) 7.56 + index = page->tlbflush_timestamp; 7.57 + } 7.58 #endif 7.59 7.60 - delete_shadow_status(d, gpfn, gmfn, type); 7.61 + delete_shadow_status(d, gpfn, gmfn, type, index); 7.62 7.63 switch ( type ) 7.64 { 7.65 @@ -835,7 +849,7 @@ free_writable_pte_predictions(struct dom 7.66 while ( count ) 7.67 { 7.68 count--; 7.69 - delete_shadow_status(d, gpfn_list[count], 0, PGT_writable_pred); 7.70 + delete_shadow_status(d, gpfn_list[count], 0, PGT_writable_pred, 0); 7.71 } 7.72 7.73 xfree(gpfn_list); 7.74 @@ -1050,8 +1064,8 @@ void __shadow_mode_disable(struct domain 7.75 { 7.76 if ( d->arch.shadow_ht[i].gpfn_and_flags != 0 ) 7.77 { 7.78 - printk("%s: d->arch.shadow_ht[%x].gpfn_and_flags=%lx\n", 7.79 - __FILE__, i, d->arch.shadow_ht[i].gpfn_and_flags); 7.80 + printk("%s: d->arch.shadow_ht[%x].gpfn_and_flags=%"PRIx64"\n", 7.81 + __FILE__, i, (u64)d->arch.shadow_ht[i].gpfn_and_flags); 7.82 BUG(); 7.83 } 7.84 }
8.1 --- a/xen/include/asm-x86/mm.h Mon Jun 19 16:39:27 2006 +0100 8.2 +++ b/xen/include/asm-x86/mm.h Mon Jun 19 16:47:21 2006 +0100 8.3 @@ -103,13 +103,11 @@ struct page_info 8.4 #define PGT_high_mfn_mask (0xfffUL << PGT_high_mfn_shift) 8.5 #define PGT_mfn_mask (((1U<<23)-1) | PGT_high_mfn_mask) 8.6 #define PGT_high_mfn_nx (0x800UL << PGT_high_mfn_shift) 8.7 -#define PGT_pae_idx_shift PGT_high_mfn_shift 8.8 #else 8.9 /* 23-bit mfn mask for shadow types: good for up to 32GB RAM. */ 8.10 #define PGT_mfn_mask ((1U<<23)-1) 8.11 /* NX for PAE xen is not supported yet */ 8.12 #define PGT_high_mfn_nx (1ULL << 63) 8.13 -#define PGT_pae_idx_shift 23 8.14 #endif 8.15 8.16 #define PGT_score_shift 23
9.1 --- a/xen/include/asm-x86/shadow.h Mon Jun 19 16:39:27 2006 +0100 9.2 +++ b/xen/include/asm-x86/shadow.h Mon Jun 19 16:47:21 2006 +0100 9.3 @@ -112,6 +112,30 @@ do { 9.4 } while (0) 9.5 #endif 9.6 9.7 +#if CONFIG_PAGING_LEVELS >= 3 9.8 +static inline u64 get_cr3_idxval(struct vcpu *v) 9.9 +{ 9.10 + u64 pae_cr3; 9.11 + 9.12 + if ( v->domain->arch.ops->guest_paging_levels == PAGING_L3 && 9.13 + !shadow_mode_log_dirty(v->domain) ) 9.14 + { 9.15 + pae_cr3 = hvm_get_guest_ctrl_reg(v, 3); /* get CR3 */ 9.16 + return (pae_cr3 >> PAE_CR3_ALIGN) & PAE_CR3_IDX_MASK; 9.17 + } 9.18 + else 9.19 + return 0; 9.20 +} 9.21 + 9.22 +#define shadow_key_t u64 9.23 +#define index_to_key(x) ((x) << 32) 9.24 +#else 9.25 +#define get_cr3_idxval(v) (0) 9.26 +#define shadow_key_t unsigned long 9.27 +#define index_to_key(x) (0) 9.28 +#endif 9.29 + 9.30 + 9.31 #define SHADOW_ENCODE_MIN_MAX(_min, _max) ((((GUEST_L1_PAGETABLE_ENTRIES - 1) - (_max)) << 16) | (_min)) 9.32 #define SHADOW_MIN(_encoded) ((_encoded) & ((1u<<16) - 1)) 9.33 #define SHADOW_MAX(_encoded) ((GUEST_L1_PAGETABLE_ENTRIES - 1) - ((_encoded) >> 16)) 9.34 @@ -309,7 +333,7 @@ extern unsigned long get_mfn_from_gpfn_f 9.35 9.36 struct shadow_status { 9.37 struct shadow_status *next; /* Pull-to-front list per hash bucket. */ 9.38 - unsigned long gpfn_and_flags; /* Guest pfn plus flags. */ 9.39 + shadow_key_t gpfn_and_flags; /* Guest pfn plus flags. */ 9.40 unsigned long smfn; /* Shadow mfn. */ 9.41 }; 9.42 9.43 @@ -1180,7 +1204,13 @@ static inline unsigned long __shadow_sta 9.44 struct domain *d, unsigned long gpfn, unsigned long stype) 9.45 { 9.46 struct shadow_status *p, *x, *head; 9.47 - unsigned long key = gpfn | stype; 9.48 + shadow_key_t key; 9.49 +#if CONFIG_PAGING_LEVELS >= 3 9.50 + if ( d->arch.ops->guest_paging_levels == PAGING_L3 && stype == PGT_l4_shadow ) 9.51 + key = gpfn | stype | index_to_key(get_cr3_idxval(current)); 9.52 + else 9.53 +#endif 9.54 + key = gpfn | stype; 9.55 9.56 ASSERT(shadow_lock_is_acquired(d)); 9.57 ASSERT(gpfn == (gpfn & PGT_mfn_mask)); 9.58 @@ -1295,10 +1325,11 @@ shadow_max_pgtable_type(struct domain *d 9.59 } 9.60 9.61 static inline void delete_shadow_status( 9.62 - struct domain *d, unsigned long gpfn, unsigned long gmfn, unsigned int stype) 9.63 + struct domain *d, unsigned long gpfn, unsigned long gmfn, unsigned int stype, u64 index) 9.64 { 9.65 struct shadow_status *p, *x, *n, *head; 9.66 - unsigned long key = gpfn | stype; 9.67 + 9.68 + shadow_key_t key = gpfn | stype | index_to_key(index); 9.69 9.70 ASSERT(shadow_lock_is_acquired(d)); 9.71 ASSERT(!(gpfn & ~PGT_mfn_mask)); 9.72 @@ -1374,11 +1405,12 @@ static inline void delete_shadow_status( 9.73 9.74 static inline void set_shadow_status( 9.75 struct domain *d, unsigned long gpfn, unsigned long gmfn, 9.76 - unsigned long smfn, unsigned long stype) 9.77 + unsigned long smfn, unsigned long stype, u64 index) 9.78 { 9.79 struct shadow_status *x, *head, *extra; 9.80 int i; 9.81 - unsigned long key = gpfn | stype; 9.82 + 9.83 + shadow_key_t key = gpfn | stype | index_to_key(index); 9.84 9.85 SH_VVLOG("set gpfn=%lx gmfn=%lx smfn=%lx t=%lx", gpfn, gmfn, smfn, stype); 9.86
10.1 --- a/xen/include/asm-x86/shadow_64.h Mon Jun 19 16:39:27 2006 +0100 10.2 +++ b/xen/include/asm-x86/shadow_64.h Mon Jun 19 16:47:21 2006 +0100 10.3 @@ -36,9 +36,9 @@ 10.4 */ 10.5 extern struct shadow_ops MODE_64_2_HANDLER; 10.6 extern struct shadow_ops MODE_64_3_HANDLER; 10.7 +extern struct shadow_ops MODE_64_PAE_HANDLER; 10.8 #if CONFIG_PAGING_LEVELS == 4 10.9 extern struct shadow_ops MODE_64_4_HANDLER; 10.10 -extern struct shadow_ops MODE_64_PAE_HANDLER; 10.11 #endif 10.12 10.13 #if CONFIG_PAGING_LEVELS == 3 10.14 @@ -65,10 +65,6 @@ typedef struct { intpte_t l4; } l4_pgent 10.15 #define ESH_LOG(_f, _a...) ((void)0) 10.16 #endif 10.17 10.18 -#define PAGING_L4 4UL 10.19 -#define PAGING_L3 3UL 10.20 -#define PAGING_L2 2UL 10.21 -#define PAGING_L1 1UL 10.22 #define L_MASK 0xff 10.23 10.24 #define PAE_PAGING_LEVELS 3 10.25 @@ -108,18 +104,14 @@ typedef struct { intpte_t lo; } pgentry_ 10.26 #define entry_has_changed(x,y,flags) \ 10.27 ( !!(((x).lo ^ (y).lo) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) ) 10.28 10.29 -#define PAE_SHADOW_SELF_ENTRY 259 10.30 -#define PAE_L3_PAGETABLE_ENTRIES 4 10.31 - 10.32 /******************************************************************************/ 10.33 /* 10.34 - * The macro and inlines are for 32-bit PAE guest on 64-bit host 10.35 + * The macro and inlines are for 32-bit PAE guest 10.36 */ 10.37 -#define PAE_CR3_ALIGN 5 10.38 -#define PAE_CR3_IDX_MASK 0x7f 10.39 -#define PAE_CR3_IDX_NO 128 10.40 +#define PAE_PDPT_RESERVED 0x1e6 /* [8:5], [2,1] */ 10.41 10.42 -#define PAE_PDPT_RESERVED 0x1e6 /* [8:5], [2,1] */ 10.43 +#define PAE_SHADOW_SELF_ENTRY 259 10.44 +#define PAE_L3_PAGETABLE_ENTRIES 4 10.45 10.46 /******************************************************************************/ 10.47 static inline int table_offset_64(unsigned long va, int level) 10.48 @@ -186,19 +178,10 @@ static inline int guest_table_offset_64( 10.49 } 10.50 } 10.51 10.52 -static inline unsigned long get_cr3_idxval(struct vcpu *v) 10.53 -{ 10.54 - unsigned long pae_cr3 = hvm_get_guest_ctrl_reg(v, 3); /* get CR3 */ 10.55 - 10.56 - return (pae_cr3 >> PAE_CR3_ALIGN) & PAE_CR3_IDX_MASK; 10.57 -} 10.58 - 10.59 - 10.60 #define SH_GUEST_32PAE 1 10.61 #else 10.62 #define guest_table_offset_64(va, level, index) \ 10.63 table_offset_64((va),(level)) 10.64 -#define get_cr3_idxval(v) 0 10.65 #define SH_GUEST_32PAE 0 10.66 #endif 10.67 10.68 @@ -514,7 +497,10 @@ static inline void entry_general( 10.69 10.70 l1_p =(pgentry_64_t *)map_domain_page(smfn); 10.71 for (i = 0; i < L1_PAGETABLE_ENTRIES; i++) 10.72 - entry_remove_flags(l1_p[i], _PAGE_RW); 10.73 + { 10.74 + if ( mfn_is_page_table(entry_get_pfn(l1_p[i])) ) 10.75 + entry_remove_flags(l1_p[i], _PAGE_RW); 10.76 + } 10.77 10.78 unmap_domain_page(l1_p); 10.79 }
11.1 --- a/xen/include/asm-x86/shadow_ops.h Mon Jun 19 16:39:27 2006 +0100 11.2 +++ b/xen/include/asm-x86/shadow_ops.h Mon Jun 19 16:47:21 2006 +0100 11.3 @@ -22,6 +22,14 @@ 11.4 #ifndef _XEN_SHADOW_OPS_H 11.5 #define _XEN_SHADOW_OPS_H 11.6 11.7 +#define PAGING_L4 4UL 11.8 +#define PAGING_L3 3UL 11.9 +#define PAGING_L2 2UL 11.10 +#define PAGING_L1 1UL 11.11 + 11.12 +#define PAE_CR3_ALIGN 5 11.13 +#define PAE_CR3_IDX_MASK 0x7f 11.14 + 11.15 #if defined( GUEST_PGENTRY_32 ) 11.16 11.17 #define GUEST_L1_PAGETABLE_ENTRIES L1_PAGETABLE_ENTRIES_32