ia64/xen-unstable
changeset 9015:71f2d19cd3a5
Clean up p2m functions.
1) set_p2m_entry/alloc_p2m_table call domain_crash instead of
domain_crash_synchronous when map_p2m_entry return with failure, or dom0
will crash.
2) free_p2m_table now uses domain as parameter instead of vcpu.
3) add AP p2m initialization logic for VMX domain.
And some coding style cleanups.
Signed-off-by: Xin Li <xin.b.li@intel.com>
1) set_p2m_entry/alloc_p2m_table call domain_crash instead of
domain_crash_synchronous when map_p2m_entry return with failure, or dom0
will crash.
2) free_p2m_table now uses domain as parameter instead of vcpu.
3) add AP p2m initialization logic for VMX domain.
And some coding style cleanups.
Signed-off-by: Xin Li <xin.b.li@intel.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Fri Feb 24 17:32:19 2006 +0100 (2006-02-24) |
parents | 1980e01346a7 |
children | cf1c1bb9f6d2 |
files | xen/arch/x86/shadow32.c xen/arch/x86/shadow_public.c xen/include/asm-x86/shadow_public.h |
line diff
1.1 --- a/xen/arch/x86/shadow32.c Fri Feb 24 17:25:41 2006 +0100 1.2 +++ b/xen/arch/x86/shadow32.c Fri Feb 24 17:32:19 2006 +0100 1.3 @@ -43,7 +43,8 @@ static void free_writable_pte_prediction 1.4 static void mark_shadows_as_reflecting_snapshot(struct domain *d, unsigned long gpfn); 1.5 #endif 1.6 1.7 -static void free_p2m_table(struct vcpu *v); 1.8 +static int alloc_p2m_table(struct domain *d); 1.9 +static void free_p2m_table(struct domain *d); 1.10 1.11 /******** 1.12 1.13 @@ -739,7 +740,7 @@ static void alloc_monitor_pagetable(stru 1.14 mpl2e = (l2_pgentry_t *)map_domain_page_global(mmfn); 1.15 memset(mpl2e, 0, PAGE_SIZE); 1.16 1.17 - memcpy(&mpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 1.18 + memcpy(&mpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 1.19 &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 1.20 HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t)); 1.21 1.22 @@ -760,6 +761,23 @@ static void alloc_monitor_pagetable(stru 1.23 1.24 if ( v->vcpu_id == 0 ) 1.25 alloc_p2m_table(d); 1.26 + else 1.27 + { 1.28 + unsigned long mfn; 1.29 + 1.30 + mfn = pagetable_get_pfn(d->vcpu[0]->arch.monitor_table); 1.31 + if ( mfn ) 1.32 + { 1.33 + l2_pgentry_t *l2tab; 1.34 + 1.35 + l2tab = map_domain_page(mfn); 1.36 + 1.37 + mpl2e[l2_table_offset(RO_MPT_VIRT_START)] = 1.38 + l2tab[l2_table_offset(RO_MPT_VIRT_START)]; 1.39 + 1.40 + unmap_domain_page(l2tab); 1.41 + } 1.42 + } 1.43 } 1.44 1.45 /* 1.46 @@ -771,7 +789,7 @@ void free_monitor_pagetable(struct vcpu 1.47 unsigned long mfn; 1.48 1.49 ASSERT( pagetable_get_paddr(v->arch.monitor_table) ); 1.50 - 1.51 + 1.52 mpl2e = v->arch.monitor_vtable; 1.53 1.54 /* 1.55 @@ -794,7 +812,7 @@ void free_monitor_pagetable(struct vcpu 1.56 } 1.57 1.58 if ( v->vcpu_id == 0 ) 1.59 - free_p2m_table(v); 1.60 + free_p2m_table(v->domain); 1.61 1.62 /* 1.63 * Then free monitor_table. 1.64 @@ -808,8 +826,8 @@ void free_monitor_pagetable(struct vcpu 1.65 } 1.66 1.67 static int 1.68 -map_p2m_entry( 1.69 - l1_pgentry_t *l1tab, unsigned long va, unsigned long gpa, unsigned long mfn) 1.70 +map_p2m_entry(l1_pgentry_t *l1tab, unsigned long va, 1.71 + unsigned long gpa, unsigned long mfn) 1.72 { 1.73 unsigned long *l0tab = NULL; 1.74 l1_pgentry_t l1e = { 0 }; 1.75 @@ -820,27 +838,22 @@ map_p2m_entry( 1.76 { 1.77 page = alloc_domheap_page(NULL); 1.78 if ( !page ) 1.79 - goto fail; 1.80 - 1.81 - if ( l0tab ) 1.82 - unmap_domain_page(l0tab); 1.83 + return 0; 1.84 + 1.85 l0tab = map_domain_page(page_to_mfn(page)); 1.86 - memset(l0tab, 0, PAGE_SIZE ); 1.87 + memset(l0tab, 0, PAGE_SIZE); 1.88 + 1.89 l1e = l1tab[l1_table_offset(va)] = 1.90 l1e_from_page(page, __PAGE_HYPERVISOR); 1.91 } 1.92 - else if ( l0tab == NULL) 1.93 + else 1.94 l0tab = map_domain_page(l1e_get_pfn(l1e)); 1.95 1.96 - l0tab[gpa & ((PAGE_SIZE / sizeof (mfn)) - 1) ] = mfn; 1.97 - 1.98 - if ( l0tab ) 1.99 - unmap_domain_page(l0tab); 1.100 + l0tab[gpa & ((PAGE_SIZE / sizeof(mfn)) - 1)] = mfn; 1.101 + 1.102 + unmap_domain_page(l0tab); 1.103 1.104 return 1; 1.105 - 1.106 -fail: 1.107 - return 0; 1.108 } 1.109 1.110 int 1.111 @@ -853,7 +866,6 @@ set_p2m_entry(struct domain *d, unsigned 1.112 l1_pgentry_t *l1; 1.113 struct page_info *l1page; 1.114 unsigned long va = pfn << PAGE_SHIFT; 1.115 - int error; 1.116 1.117 if ( shadow_mode_external(d) ) 1.118 { 1.119 @@ -877,6 +889,7 @@ set_p2m_entry(struct domain *d, unsigned 1.120 1.121 if ( shadow_mode_external(d) ) 1.122 { 1.123 + int error; 1.124 l1_pgentry_t *l1tab = NULL; 1.125 l2_pgentry_t l2e; 1.126 1.127 @@ -885,14 +898,13 @@ set_p2m_entry(struct domain *d, unsigned 1.128 ASSERT( l2e_get_flags(l2e) & _PAGE_PRESENT ); 1.129 1.130 l1tab = map_domain_page(l2e_get_pfn(l2e)); 1.131 - error = map_p2m_entry(l1tab, va, pfn, mfn); 1.132 - if ( !error ) 1.133 - domain_crash_synchronous(); 1.134 + if ( !(error = map_p2m_entry(l1tab, va, pfn, mfn)) ) 1.135 + domain_crash(d); 1.136 1.137 unmap_domain_page(l1tab); 1.138 unmap_domain_page_with_cache(l2, l2cache); 1.139 1.140 - return 1; 1.141 + return error; 1.142 } 1.143 1.144 /* 1.145 @@ -926,7 +938,7 @@ set_p2m_entry(struct domain *d, unsigned 1.146 return 1; 1.147 } 1.148 1.149 -int 1.150 +static int 1.151 alloc_p2m_table(struct domain *d) 1.152 { 1.153 struct list_head *list_ent; 1.154 @@ -937,7 +949,7 @@ alloc_p2m_table(struct domain *d) 1.155 l2_pgentry_t l2e = { 0 }; 1.156 struct page_info *page; 1.157 unsigned long gpfn, mfn; 1.158 - int error; 1.159 + int error = 0; 1.160 1.161 if ( pagetable_get_pfn(d->vcpu[0]->arch.monitor_table) ) 1.162 { 1.163 @@ -955,6 +967,9 @@ alloc_p2m_table(struct domain *d) 1.164 } 1.165 else 1.166 l1tab = map_domain_page(l2e_get_pfn(l2e)); 1.167 + 1.168 + if ( l2tab ) 1.169 + unmap_domain_page(l2tab); 1.170 } 1.171 else 1.172 { 1.173 @@ -972,23 +987,23 @@ alloc_p2m_table(struct domain *d) 1.174 page = list_entry(list_ent, struct page_info, list); 1.175 mfn = page_to_mfn(page); 1.176 1.177 - error = map_p2m_entry(l1tab, va, gpfn, mfn); 1.178 - if ( !error ) 1.179 - domain_crash_synchronous(); 1.180 + if ( !(error = map_p2m_entry(l1tab, va, gpfn, mfn)) ) 1.181 + { 1.182 + domain_crash(d); 1.183 + break; 1.184 + } 1.185 1.186 list_ent = frame_table[mfn].list.next; 1.187 va += sizeof(mfn); 1.188 } 1.189 1.190 - if (l2tab) 1.191 - unmap_domain_page(l2tab); 1.192 unmap_domain_page(l1tab); 1.193 1.194 - return 1; 1.195 + return error; 1.196 } 1.197 1.198 -static void 1.199 -free_p2m_table(struct vcpu *v) 1.200 +static void 1.201 +free_p2m_table(struct domain *d) 1.202 { 1.203 unsigned long va; 1.204 l2_pgentry_t *l2tab; 1.205 @@ -996,10 +1011,10 @@ free_p2m_table(struct vcpu *v) 1.206 l2_pgentry_t l2e; 1.207 l1_pgentry_t l1e; 1.208 1.209 - ASSERT ( pagetable_get_pfn(v->arch.monitor_table) ); 1.210 + ASSERT( pagetable_get_pfn(d->vcpu[0]->arch.monitor_table) ); 1.211 1.212 l2tab = map_domain_page( 1.213 - pagetable_get_pfn(v->arch.monitor_table)); 1.214 + pagetable_get_pfn(d->vcpu[0]->arch.monitor_table)); 1.215 1.216 for ( va = RO_MPT_VIRT_START; va < RO_MPT_VIRT_END; ) 1.217 { 1.218 @@ -1015,11 +1030,13 @@ free_p2m_table(struct vcpu *v) 1.219 1.220 if ( l1e_get_flags(l1e) & _PAGE_PRESENT ) 1.221 free_domheap_page(mfn_to_page(l1e_get_pfn(l1e))); 1.222 - va += PAGE_SIZE; 1.223 + va += PAGE_SIZE; 1.224 } 1.225 unmap_domain_page(l1tab); 1.226 free_domheap_page(mfn_to_page(l2e_get_pfn(l2e))); 1.227 } 1.228 + else 1.229 + va += PAGE_SIZE * L1_PAGETABLE_ENTRIES; 1.230 } 1.231 unmap_domain_page(l2tab); 1.232 } 1.233 @@ -1246,7 +1263,7 @@ int __shadow_mode_enable(struct domain * 1.234 1.235 if ( shadow_mode_refcounts(d) ) 1.236 { 1.237 - struct list_head *list_ent; 1.238 + struct list_head *list_ent; 1.239 struct page_info *page; 1.240 1.241 /*
2.1 --- a/xen/arch/x86/shadow_public.c Fri Feb 24 17:25:41 2006 +0100 2.2 +++ b/xen/arch/x86/shadow_public.c Fri Feb 24 17:32:19 2006 +0100 2.3 @@ -31,7 +31,8 @@ 2.4 #include <xen/trace.h> 2.5 #include <asm/shadow_64.h> 2.6 2.7 -static void free_p2m_table(struct vcpu *v); 2.8 +static int alloc_p2m_table(struct domain *d); 2.9 +static void free_p2m_table(struct domain *d); 2.10 2.11 #define SHADOW_MAX_GUEST32(_encoded) ((L1_PAGETABLE_ENTRIES_32 - 1) - ((_encoded) >> 16)) 2.12 2.13 @@ -328,6 +329,23 @@ static void alloc_monitor_pagetable(stru 2.14 2.15 if ( v->vcpu_id == 0 ) 2.16 alloc_p2m_table(d); 2.17 + else 2.18 + { 2.19 + unsigned long mfn; 2.20 + 2.21 + mfn = pagetable_get_pfn(d->vcpu[0]->arch.monitor_table); 2.22 + if ( mfn ) 2.23 + { 2.24 + l4_pgentry_t *l4tab; 2.25 + 2.26 + l4tab = map_domain_page(mfn); 2.27 + 2.28 + mpl4e[l4_table_offset(RO_MPT_VIRT_START)] = 2.29 + l4tab[l4_table_offset(RO_MPT_VIRT_START)]; 2.30 + 2.31 + unmap_domain_page(l4tab); 2.32 + } 2.33 + } 2.34 } 2.35 2.36 void free_monitor_pagetable(struct vcpu *v) 2.37 @@ -338,7 +356,7 @@ void free_monitor_pagetable(struct vcpu 2.38 * free monitor_table. 2.39 */ 2.40 if ( v->vcpu_id == 0 ) 2.41 - free_p2m_table(v); 2.42 + free_p2m_table(v->domain); 2.43 2.44 /* 2.45 * Then free monitor_table. 2.46 @@ -397,13 +415,49 @@ static void alloc_monitor_pagetable(stru 2.47 l2e_empty(); 2.48 mpl2e[l2_table_offset(RO_MPT_VIRT_START)] = l2e_empty(); 2.49 2.50 - unmap_domain_page(mpl2e); 2.51 - 2.52 v->arch.monitor_table = mk_pagetable(m3mfn << PAGE_SHIFT); /* < 4GB */ 2.53 v->arch.monitor_vtable = (l2_pgentry_t *) mpl3e; 2.54 2.55 if ( v->vcpu_id == 0 ) 2.56 alloc_p2m_table(d); 2.57 + else 2.58 + { 2.59 + unsigned long mfn; 2.60 + 2.61 + mfn = pagetable_get_pfn(d->vcpu[0]->arch.monitor_table); 2.62 + if ( mfn ) 2.63 + { 2.64 + l3_pgentry_t *l3tab, l3e; 2.65 + l2_pgentry_t *l2tab; 2.66 + 2.67 + l3tab = map_domain_page(mfn); 2.68 + l3e = l3tab[l3_table_offset(RO_MPT_VIRT_START)]; 2.69 + 2.70 + /* 2.71 + * NB: when CONFIG_PAGING_LEVELS == 3, 2.72 + * (entry_get_flags(l3e) & _PAGE_PRESENT) is always true here. 2.73 + * alloc_monitor_pagetable should guarantee this. 2.74 + */ 2.75 + if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) 2.76 + BUG(); 2.77 + 2.78 + l2tab = map_domain_page(l3e_get_pfn(l3e)); 2.79 + 2.80 + /* 2.81 + * Just one l2 slot is used here, so at most 2M for p2m table: 2.82 + * ((4K * 512)/sizeof(unsigned long)) * 4K = 2G 2.83 + * should be OK on PAE xen, since Qemu DM can only map 1.5G VMX 2.84 + * guest memory. 2.85 + */ 2.86 + mpl2e[l2_table_offset(RO_MPT_VIRT_START)] = 2.87 + l2tab[l2_table_offset(RO_MPT_VIRT_START)]; 2.88 + 2.89 + unmap_domain_page(l2tab); 2.90 + unmap_domain_page(l3tab); 2.91 + } 2.92 + } 2.93 + 2.94 + unmap_domain_page(mpl2e); 2.95 } 2.96 2.97 void free_monitor_pagetable(struct vcpu *v) 2.98 @@ -413,7 +467,7 @@ void free_monitor_pagetable(struct vcpu 2.99 * free monitor_table. 2.100 */ 2.101 if ( v->vcpu_id == 0 ) 2.102 - free_p2m_table(v); 2.103 + free_p2m_table(v->domain); 2.104 2.105 m3mfn = pagetable_get_pfn(v->arch.monitor_table); 2.106 m2mfn = l2e_get_pfn(v->arch.monitor_vtable[L3_PAGETABLE_ENTRIES - 1]); 2.107 @@ -1348,14 +1402,14 @@ int _shadow_mode_refcounts(struct domain 2.108 } 2.109 2.110 static int 2.111 -map_p2m_entry( 2.112 - pgentry_64_t *top_tab, unsigned long va, unsigned long gpa, unsigned long mfn) 2.113 +map_p2m_entry(pgentry_64_t *top_tab, unsigned long va, 2.114 + unsigned long gpfn, unsigned long mfn) 2.115 { 2.116 #if CONFIG_PAGING_LEVELS >= 4 2.117 pgentry_64_t l4e = { 0 }; 2.118 + pgentry_64_t *l3tab = NULL; 2.119 #endif 2.120 #if CONFIG_PAGING_LEVELS >= 3 2.121 - pgentry_64_t *l3tab = NULL; 2.122 pgentry_64_t l3e = { 0 }; 2.123 #endif 2.124 l2_pgentry_t *l2tab = NULL; 2.125 @@ -1367,7 +1421,7 @@ map_p2m_entry( 2.126 2.127 #if CONFIG_PAGING_LEVELS >= 4 2.128 l4e = top_tab[l4_table_offset(va)]; 2.129 - if ( !(entry_get_flags(l4e) & _PAGE_PRESENT) ) 2.130 + if ( !(entry_get_flags(l4e) & _PAGE_PRESENT) ) 2.131 { 2.132 page = alloc_domheap_page(NULL); 2.133 if ( !page ) 2.134 @@ -1375,17 +1429,14 @@ map_p2m_entry( 2.135 2.136 l3tab = map_domain_page(page_to_mfn(page)); 2.137 memset(l3tab, 0, PAGE_SIZE); 2.138 - l4e = top_tab[l4_table_offset(va)] = 2.139 + l4e = top_tab[l4_table_offset(va)] = 2.140 entry_from_page(page, __PAGE_HYPERVISOR); 2.141 - } 2.142 - else if ( l3tab == NULL) 2.143 + } 2.144 + else 2.145 l3tab = map_domain_page(entry_get_pfn(l4e)); 2.146 2.147 l3e = l3tab[l3_table_offset(va)]; 2.148 -#else 2.149 - l3e = top_tab[l3_table_offset(va)]; 2.150 -#endif 2.151 - if ( !(entry_get_flags(l3e) & _PAGE_PRESENT) ) 2.152 + if ( !(entry_get_flags(l3e) & _PAGE_PRESENT) ) 2.153 { 2.154 page = alloc_domheap_page(NULL); 2.155 if ( !page ) 2.156 @@ -1393,14 +1444,29 @@ map_p2m_entry( 2.157 2.158 l2tab = map_domain_page(page_to_mfn(page)); 2.159 memset(l2tab, 0, PAGE_SIZE); 2.160 - l3e = l3tab[l3_table_offset(va)] = 2.161 + l3e = l3tab[l3_table_offset(va)] = 2.162 entry_from_page(page, __PAGE_HYPERVISOR); 2.163 - } 2.164 - else if ( l2tab == NULL) 2.165 + } 2.166 + else 2.167 l2tab = map_domain_page(entry_get_pfn(l3e)); 2.168 2.169 + unmap_domain_page(l3tab); 2.170 +#else 2.171 + l3e = top_tab[l3_table_offset(va)]; 2.172 + 2.173 + /* 2.174 + * NB: when CONFIG_PAGING_LEVELS == 3, 2.175 + * (entry_get_flags(l3e) & _PAGE_PRESENT) is always true here. 2.176 + * alloc_monitor_pagetable should guarantee this. 2.177 + */ 2.178 + if ( !(entry_get_flags(l3e) & _PAGE_PRESENT) ) 2.179 + BUG(); 2.180 + 2.181 + l2tab = map_domain_page(entry_get_pfn(l3e)); 2.182 +#endif 2.183 + 2.184 l2e = l2tab[l2_table_offset(va)]; 2.185 - if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) 2.186 + if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) 2.187 { 2.188 page = alloc_domheap_page(NULL); 2.189 if ( !page ) 2.190 @@ -1408,14 +1474,16 @@ map_p2m_entry( 2.191 2.192 l1tab = map_domain_page(page_to_mfn(page)); 2.193 memset(l1tab, 0, PAGE_SIZE); 2.194 - l2e = l2tab[l2_table_offset(va)] = 2.195 + l2e = l2tab[l2_table_offset(va)] = 2.196 l2e_from_page(page, __PAGE_HYPERVISOR); 2.197 - } 2.198 - else if ( l1tab == NULL) 2.199 + } 2.200 + else 2.201 l1tab = map_domain_page(l2e_get_pfn(l2e)); 2.202 2.203 + unmap_domain_page(l2tab); 2.204 + 2.205 l1e = l1tab[l1_table_offset(va)]; 2.206 - if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) ) 2.207 + if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) ) 2.208 { 2.209 page = alloc_domheap_page(NULL); 2.210 if ( !page ) 2.211 @@ -1423,96 +1491,88 @@ map_p2m_entry( 2.212 2.213 l0tab = map_domain_page(page_to_mfn(page)); 2.214 memset(l0tab, 0, PAGE_SIZE); 2.215 - l1e = l1tab[l1_table_offset(va)] = 2.216 + l1e = l1tab[l1_table_offset(va)] = 2.217 l1e_from_page(page, __PAGE_HYPERVISOR); 2.218 } 2.219 - else if ( l0tab == NULL) 2.220 + else 2.221 l0tab = map_domain_page(l1e_get_pfn(l1e)); 2.222 2.223 - l0tab[gpa & ((PAGE_SIZE / sizeof (mfn)) - 1) ] = mfn; 2.224 + unmap_domain_page(l1tab); 2.225 2.226 - if ( l2tab ) 2.227 - { 2.228 - unmap_domain_page(l2tab); 2.229 - l2tab = NULL; 2.230 - } 2.231 - if ( l1tab ) 2.232 - { 2.233 - unmap_domain_page(l1tab); 2.234 - l1tab = NULL; 2.235 - } 2.236 - if ( l0tab ) 2.237 - { 2.238 - unmap_domain_page(l0tab); 2.239 - l0tab = NULL; 2.240 - } 2.241 + l0tab[gpfn & ((PAGE_SIZE / sizeof (mfn)) - 1) ] = mfn; 2.242 + 2.243 + unmap_domain_page(l0tab); 2.244 2.245 return 1; 2.246 2.247 nomem: 2.248 - 2.249 return 0; 2.250 } 2.251 2.252 int 2.253 -set_p2m_entry(struct domain *d, unsigned long pfn, unsigned long mfn, 2.254 +set_p2m_entry(struct domain *d, unsigned long gpfn, unsigned long mfn, 2.255 struct domain_mmap_cache *l2cache, 2.256 struct domain_mmap_cache *l1cache) 2.257 { 2.258 - unsigned long tabpfn = pagetable_get_pfn(d->vcpu[0]->arch.monitor_table); 2.259 - pgentry_64_t *top; 2.260 - unsigned long va = RO_MPT_VIRT_START + (pfn * sizeof (unsigned long)); 2.261 + unsigned long tabmfn = pagetable_get_pfn(d->vcpu[0]->arch.monitor_table); 2.262 + unsigned long va = RO_MPT_VIRT_START + (gpfn * sizeof(unsigned long)); 2.263 + pgentry_64_t *top_tab; 2.264 int error; 2.265 2.266 - ASSERT(tabpfn != 0); 2.267 + ASSERT(tabmfn != 0); 2.268 ASSERT(shadow_lock_is_acquired(d)); 2.269 2.270 - top = map_domain_page_with_cache(tabpfn, l2cache); 2.271 - error = map_p2m_entry(top, va, pfn, mfn); 2.272 - unmap_domain_page_with_cache(top, l2cache); 2.273 + top_tab = map_domain_page_with_cache(tabmfn, l2cache); 2.274 2.275 - if ( !error ) 2.276 - domain_crash_synchronous(); 2.277 - 2.278 - return 1; 2.279 + if ( !(error = map_p2m_entry(top_tab, va, gpfn, mfn)) ) 2.280 + domain_crash(d); 2.281 + 2.282 + unmap_domain_page_with_cache(top_tab, l2cache); 2.283 + 2.284 + return error; 2.285 } 2.286 2.287 -int 2.288 +static int 2.289 alloc_p2m_table(struct domain *d) 2.290 { 2.291 struct list_head *list_ent; 2.292 unsigned long va = RO_MPT_VIRT_START; /* phys_to_machine_mapping */ 2.293 pgentry_64_t *top_tab = NULL; 2.294 unsigned long mfn; 2.295 - int gpa; 2.296 + int gpfn, error = 0; 2.297 2.298 - ASSERT ( pagetable_get_pfn(d->vcpu[0]->arch.monitor_table) ); 2.299 + ASSERT( pagetable_get_pfn(d->vcpu[0]->arch.monitor_table) ); 2.300 2.301 top_tab = map_domain_page( 2.302 pagetable_get_pfn(d->vcpu[0]->arch.monitor_table)); 2.303 2.304 - 2.305 list_ent = d->page_list.next; 2.306 2.307 - for ( gpa = 0; list_ent != &d->page_list; gpa++ ) 2.308 + for ( gpfn = 0; list_ent != &d->page_list; gpfn++ ) 2.309 { 2.310 struct page_info *page; 2.311 + 2.312 page = list_entry(list_ent, struct page_info, list); 2.313 mfn = page_to_mfn(page); 2.314 2.315 - map_p2m_entry(top_tab, va, gpa, mfn); 2.316 + if ( !(error = map_p2m_entry(top_tab, va, gpfn, mfn)) ) 2.317 + { 2.318 + domain_crash(d); 2.319 + break; 2.320 + } 2.321 + 2.322 list_ent = frame_table[mfn].list.next; 2.323 va += sizeof(mfn); 2.324 } 2.325 2.326 unmap_domain_page(top_tab); 2.327 2.328 - return 1; 2.329 + return error; 2.330 } 2.331 2.332 #if CONFIG_PAGING_LEVELS >= 3 2.333 static void 2.334 -free_p2m_table(struct vcpu *v) 2.335 +free_p2m_table(struct domain *d) 2.336 { 2.337 unsigned long va; 2.338 l1_pgentry_t *l1tab; 2.339 @@ -1520,27 +1580,35 @@ free_p2m_table(struct vcpu *v) 2.340 l2_pgentry_t *l2tab; 2.341 l2_pgentry_t l2e; 2.342 #if CONFIG_PAGING_LEVELS >= 3 2.343 - l3_pgentry_t *l3tab; 2.344 + l3_pgentry_t *l3tab; 2.345 l3_pgentry_t l3e; 2.346 #endif 2.347 #if CONFIG_PAGING_LEVELS == 4 2.348 int i3; 2.349 - l4_pgentry_t *l4tab; 2.350 + l4_pgentry_t *l4tab; 2.351 l4_pgentry_t l4e; 2.352 #endif 2.353 2.354 - ASSERT ( pagetable_get_pfn(v->arch.monitor_table) ); 2.355 + ASSERT( pagetable_get_pfn(d->vcpu[0]->arch.monitor_table) ); 2.356 2.357 #if CONFIG_PAGING_LEVELS == 4 2.358 l4tab = map_domain_page( 2.359 - pagetable_get_pfn(v->arch.monitor_table)); 2.360 + pagetable_get_pfn(d->vcpu[0]->arch.monitor_table)); 2.361 #endif 2.362 #if CONFIG_PAGING_LEVELS == 3 2.363 l3tab = map_domain_page( 2.364 - pagetable_get_pfn(v->arch.monitor_table)); 2.365 + pagetable_get_pfn(d->vcpu[0]->arch.monitor_table)); 2.366 + 2.367 + l3e = l3tab[l3_table_offset(RO_MPT_VIRT_START)]; 2.368 2.369 - va = RO_MPT_VIRT_START; 2.370 - l3e = l3tab[l3_table_offset(va)]; 2.371 + /* 2.372 + * NB: when CONFIG_PAGING_LEVELS == 3, 2.373 + * (entry_get_flags(l3e) & _PAGE_PRESENT) is always true here. 2.374 + * alloc_monitor_pagetable should guarantee this. 2.375 + */ 2.376 + if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) 2.377 + BUG(); 2.378 + 2.379 l2tab = map_domain_page(l3e_get_pfn(l3e)); 2.380 #endif 2.381 2.382 @@ -1555,8 +1623,8 @@ free_p2m_table(struct vcpu *v) 2.383 2.384 for ( i3 = 0; i3 < L3_PAGETABLE_ENTRIES; i3++ ) 2.385 { 2.386 + l3e = l3tab[l3_table_offset(va)]; 2.387 2.388 - l3e = l3tab[l3_table_offset(va)]; 2.389 if ( l3e_get_flags(l3e) & _PAGE_PRESENT ) 2.390 { 2.391 int i2; 2.392 @@ -1567,12 +1635,13 @@ free_p2m_table(struct vcpu *v) 2.393 { 2.394 #endif 2.395 l2e = l2tab[l2_table_offset(va)]; 2.396 + 2.397 if ( l2e_get_flags(l2e) & _PAGE_PRESENT ) 2.398 { 2.399 int i1; 2.400 2.401 l1tab = map_domain_page(l2e_get_pfn(l2e)); 2.402 - 2.403 + 2.404 /* 2.405 * unsigned long phys_to_machine_mapping[] 2.406 */ 2.407 @@ -1591,7 +1660,7 @@ free_p2m_table(struct vcpu *v) 2.408 else 2.409 va += PAGE_SIZE * L1_PAGETABLE_ENTRIES; 2.410 2.411 -#if CONFIG_PAGING_LEVELS == 4 2.412 +#if CONFIG_PAGING_LEVELS == 4 2.413 } 2.414 unmap_domain_page(l2tab); 2.415 free_domheap_page(mfn_to_page(l3e_get_pfn(l3e))); 2.416 @@ -1603,7 +1672,7 @@ free_p2m_table(struct vcpu *v) 2.417 free_domheap_page(mfn_to_page(l4e_get_pfn(l4e))); 2.418 } 2.419 else 2.420 - va += PAGE_SIZE * 2.421 + va += PAGE_SIZE * 2.422 L1_PAGETABLE_ENTRIES * L2_PAGETABLE_ENTRIES * L3_PAGETABLE_ENTRIES; 2.423 #endif 2.424 } 2.425 @@ -1622,7 +1691,7 @@ void shadow_l1_normal_pt_update( 2.426 paddr_t pa, l1_pgentry_t gpte, 2.427 struct domain_mmap_cache *cache) 2.428 { 2.429 - unsigned long sl1mfn; 2.430 + unsigned long sl1mfn; 2.431 l1_pgentry_t *spl1e, spte; 2.432 2.433 shadow_lock(d);
3.1 --- a/xen/include/asm-x86/shadow_public.h Fri Feb 24 17:25:41 2006 +0100 3.2 +++ b/xen/include/asm-x86/shadow_public.h Fri Feb 24 17:32:19 2006 +0100 3.3 @@ -22,8 +22,6 @@ 3.4 #ifndef _XEN_SHADOW_PUBLIC_H 3.5 #define _XEN_SHADOW_PUBLIC_H 3.6 3.7 -extern int alloc_p2m_table(struct domain *d); 3.8 - 3.9 #if CONFIG_PAGING_LEVELS >= 3 3.10 #define MFN_PINNED(_x) (mfn_to_page(_x)->u.inuse.type_info & PGT_pinned) 3.11