ia64/xen-unstable
changeset 5356:2d8e63df504a
bitkeeper revision 1.1689 (42a58901_lkUvZPbAZcV8H9a9NNmtg)
Clean up the domain_page.h interfaces. One common header file
<xen/domain_page.h> and map_domain_mem() -> map_domain_page(), takes
a pfn rather than a paddr.
Signed-off-by: Keir Fraser <keir@xensource.com>
Clean up the domain_page.h interfaces. One common header file
<xen/domain_page.h> and map_domain_mem() -> map_domain_page(), takes
a pfn rather than a paddr.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Tue Jun 07 11:46:09 2005 +0000 (2005-06-07) |
parents | cc6c1889cdb0 |
children | 9d9e48be101d |
files | .rootkeys xen/arch/ia64/dom0_ops.c xen/arch/ia64/pdb-stub.c xen/arch/x86/audit.c xen/arch/x86/dom0_ops.c xen/arch/x86/mm.c xen/arch/x86/setup.c xen/arch/x86/shadow.c xen/arch/x86/traps.c xen/arch/x86/vmx.c xen/arch/x86/vmx_platform.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_32/domain_page.c xen/arch/x86/x86_32/mm.c xen/common/dom0_ops.c xen/common/dom_mem_ops.c xen/common/domain.c xen/common/page_alloc.c xen/include/asm-ia64/domain_page.h xen/include/asm-x86/config.h xen/include/asm-x86/domain_page.h xen/include/asm-x86/shadow.h xen/include/asm-x86/x86_32/domain_page.h xen/include/asm-x86/x86_64/domain_page.h xen/include/xen/domain_page.h xen/include/xen/perfc_defn.h |
line diff
1.1 --- a/.rootkeys Tue Jun 07 08:59:04 2005 +0000 1.2 +++ b/.rootkeys Tue Jun 07 11:46:09 2005 +0000 1.3 @@ -1285,7 +1285,6 @@ 421098b6Y3xqcv873Gvg1rQ5CChfFw xen/inclu 1.4 421098b6ZcIrn_gdqjUtdJyCE0YkZQ xen/include/asm-ia64/debugger.h 1.5 421098b6z0zSuW1rcSJK1gR8RUi-fw xen/include/asm-ia64/dom_fw.h 1.6 421098b6Nn0I7hGB8Mkd1Cis0KMkhA xen/include/asm-ia64/domain.h 1.7 -4241e879ry316Y_teC18DuK7mGKaQw xen/include/asm-ia64/domain_page.h 1.8 4241e880hAyo_dk0PPDYj3LsMIvf-Q xen/include/asm-ia64/flushtlb.h 1.9 421098b6X3Fs2yht42TE2ufgKqt2Fw xen/include/asm-ia64/ia64_int.h 1.10 421098b7psFAn8kbeR-vcRCdc860Vw xen/include/asm-ia64/init.h 1.11 @@ -1333,7 +1332,6 @@ 3ddb79c3r9-31dIsewPV3P3i8HALsQ xen/inclu 1.12 3ddb79c34BFiXjBJ_cCKB0aCsV1IDw xen/include/asm-x86/desc.h 1.13 40715b2dTokMLYGSuD58BnxOqyWVew xen/include/asm-x86/div64.h 1.14 4204e7acwzqgXyTAPKa1nM-L7Ec0Qw xen/include/asm-x86/domain.h 1.15 -41febc4bBKTKHhnAu_KPYwgNkHjFlg xen/include/asm-x86/domain_page.h 1.16 41d3eaaeIBzW621S1oa0c2yk7X43qQ xen/include/asm-x86/e820.h 1.17 3ddb79c3NU8Zy40OTrq3D-i30Y3t4A xen/include/asm-x86/fixmap.h 1.18 3e2d29944GI24gf7vOP_7x8EyuqxeA xen/include/asm-x86/flushtlb.h 1.19 @@ -1399,14 +1397,12 @@ 41c0c41243jC1mcArZx_t3YkBL4lTA xen/inclu 1.20 420951dcqyUCe_gXA_XJPu1ix_poKg xen/include/asm-x86/vmx_virpit.h 1.21 41c0c412lQ0NVVN9PsOSznQ-qhOiPA xen/include/asm-x86/vmx_vmcs.h 1.22 418fbcfe_WliJPToeVM-9VStvym-hw xen/include/asm-x86/x86_32/asm_defns.h 1.23 -3e20b82fl1jmQiKdLy7fxMcutfpjWA xen/include/asm-x86/x86_32/domain_page.h 1.24 429c852fi3pvfa9kIjryYK5AGBmXAg xen/include/asm-x86/x86_32/page-2level.h 1.25 429c852fskvSOgcD5EC25_m9um9t4g xen/include/asm-x86/x86_32/page-3level.h 1.26 4208e2a3ZNFroNXbX9OYaOB-xtUyDQ xen/include/asm-x86/x86_32/page.h 1.27 3ddb79c3mbqEM7QQr3zVq7NiBNhouA xen/include/asm-x86/x86_32/regs.h 1.28 3ddb79c3M2n1ROZH6xk3HbyN4CPDqg xen/include/asm-x86/x86_32/uaccess.h 1.29 41bf1717bML6GxpclTWJabiaO5W5vg xen/include/asm-x86/x86_64/asm_defns.h 1.30 -41febc4b1aCGLsm0Y0b_82h7lFtrEA xen/include/asm-x86/x86_64/domain_page.h 1.31 4208e2a3Fktw4ZttKdDxbhvTQ6brfQ xen/include/asm-x86/x86_64/page.h 1.32 404f1bb86rAXB3aLS1vYdcqpJiEcyg xen/include/asm-x86/x86_64/regs.h 1.33 404f1bc4tWkB9Qr8RkKtZGW5eMQzhw xen/include/asm-x86/x86_64/uaccess.h 1.34 @@ -1443,6 +1439,7 @@ 3ddb79c1V44RD26YqCUm-kqIupM37A xen/inclu 1.35 3ddb79c05DdHQ0UxX_jKsXdR4QlMCA xen/include/xen/delay.h 1.36 4294b5efxcDdUVp4XMEE__IFw7nPow xen/include/xen/dmi.h 1.37 40f2b4a2hC3HtChu-ArD8LyojxWMjg xen/include/xen/domain.h 1.38 +3e20b82fl1jmQiKdLy7fxMcutfpjWA xen/include/xen/domain_page.h 1.39 3ddb79c2O729EttZTYu1c8LcsUO_GQ xen/include/xen/elf.h 1.40 3ddb79c0HIghfBF8zFUdmXhOU8i6hA xen/include/xen/errno.h 1.41 3ddb79c1W0lQca8gRV7sN6j3iY4Luw xen/include/xen/event.h
2.1 --- a/xen/arch/ia64/dom0_ops.c Tue Jun 07 08:59:04 2005 +0000 2.2 +++ b/xen/arch/ia64/dom0_ops.c Tue Jun 07 11:46:09 2005 +0000 2.3 @@ -13,12 +13,9 @@ 2.4 #include <public/dom0_ops.h> 2.5 #include <xen/sched.h> 2.6 #include <xen/event.h> 2.7 -#include <asm/domain_page.h> 2.8 -//#include <asm/msr.h> 2.9 #include <asm/pdb.h> 2.10 #include <xen/trace.h> 2.11 #include <xen/console.h> 2.12 -//#include <xen/shadow.h> 2.13 #include <public/sched_ctl.h> 2.14 2.15 #define TRC_DOM0OP_ENTER_BASE 0x00020000
3.1 --- a/xen/arch/ia64/pdb-stub.c Tue Jun 07 08:59:04 2005 +0000 3.2 +++ b/xen/arch/ia64/pdb-stub.c Tue Jun 07 11:46:09 2005 +0000 3.3 @@ -14,8 +14,6 @@ 3.4 #include <xen/sched.h> 3.5 #include <asm/ptrace.h> 3.6 #include <xen/keyhandler.h> 3.7 -//#include <asm/apic.h> 3.8 -#include <asm/domain_page.h> /* [un]map_domain_mem */ 3.9 #include <asm/processor.h> 3.10 #include <asm/pdb.h> 3.11 #include <xen/list.h>
4.1 --- a/xen/arch/x86/audit.c Tue Jun 07 08:59:04 2005 +0000 4.2 +++ b/xen/arch/x86/audit.c Tue Jun 07 11:46:09 2005 +0000 4.3 @@ -122,7 +122,7 @@ int audit_adjust_pgtables(struct domain 4.4 4.5 void adjust_l2_page(unsigned long mfn, int shadow) 4.6 { 4.7 - unsigned long *pt = map_domain_mem(mfn << PAGE_SHIFT); 4.8 + unsigned long *pt = map_domain_page(mfn); 4.9 int i; 4.10 4.11 for ( i = 0; i < l2limit; i++ ) 4.12 @@ -205,12 +205,12 @@ int audit_adjust_pgtables(struct domain 4.13 adjust(hl2page, 0); 4.14 } 4.15 4.16 - unmap_domain_mem(pt); 4.17 + unmap_domain_page(pt); 4.18 } 4.19 4.20 void adjust_hl2_page(unsigned long hl2mfn) 4.21 { 4.22 - unsigned long *pt = map_domain_mem(hl2mfn << PAGE_SHIFT); 4.23 + unsigned long *pt = map_domain_page(hl2mfn); 4.24 int i; 4.25 4.26 for ( i = 0; i < l2limit; i++ ) 4.27 @@ -251,12 +251,12 @@ int audit_adjust_pgtables(struct domain 4.28 } 4.29 } 4.30 4.31 - unmap_domain_mem(pt); 4.32 + unmap_domain_page(pt); 4.33 } 4.34 4.35 void adjust_l1_page(unsigned long l1mfn) 4.36 { 4.37 - unsigned long *pt = map_domain_mem(l1mfn << PAGE_SHIFT); 4.38 + unsigned long *pt = map_domain_page(l1mfn); 4.39 int i; 4.40 4.41 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) 4.42 @@ -323,7 +323,7 @@ int audit_adjust_pgtables(struct domain 4.43 } 4.44 } 4.45 4.46 - unmap_domain_mem(pt); 4.47 + unmap_domain_page(pt); 4.48 } 4.49 4.50 void adjust_shadow_tables() 4.51 @@ -615,7 +615,7 @@ void _audit_domain(struct domain *d, int 4.52 unsigned long mfn) 4.53 { 4.54 struct pfn_info *page = &frame_table[mfn]; 4.55 - unsigned long *pt = map_domain_mem(mfn<<PAGE_SHIFT); 4.56 + unsigned long *pt = map_domain_page(mfn); 4.57 int i; 4.58 4.59 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) 4.60 @@ -627,7 +627,7 @@ void _audit_domain(struct domain *d, int 4.61 page->count_info, i, pt[i]); 4.62 } 4.63 4.64 - unmap_domain_mem(pt); 4.65 + unmap_domain_page(pt); 4.66 } 4.67 4.68 void scan_for_pfn_in_grant_table(struct domain *d, unsigned xmfn)
5.1 --- a/xen/arch/x86/dom0_ops.c Tue Jun 07 08:59:04 2005 +0000 5.2 +++ b/xen/arch/x86/dom0_ops.c Tue Jun 07 11:46:09 2005 +0000 5.3 @@ -13,7 +13,7 @@ 5.4 #include <public/dom0_ops.h> 5.5 #include <xen/sched.h> 5.6 #include <xen/event.h> 5.7 -#include <asm/domain_page.h> 5.8 +#include <xen/domain_page.h> 5.9 #include <asm/msr.h> 5.10 #include <xen/trace.h> 5.11 #include <xen/console.h>
6.1 --- a/xen/arch/x86/mm.c Tue Jun 07 08:59:04 2005 +0000 6.2 +++ b/xen/arch/x86/mm.c Tue Jun 07 11:46:09 2005 +0000 6.3 @@ -94,12 +94,12 @@ 6.4 #include <xen/perfc.h> 6.5 #include <xen/irq.h> 6.6 #include <xen/softirq.h> 6.7 +#include <xen/domain_page.h> 6.8 #include <asm/shadow.h> 6.9 #include <asm/page.h> 6.10 #include <asm/flushtlb.h> 6.11 #include <asm/io.h> 6.12 #include <asm/uaccess.h> 6.13 -#include <asm/domain_page.h> 6.14 #include <asm/ldt.h> 6.15 #include <asm/x86_emulate.h> 6.16 6.17 @@ -269,17 +269,17 @@ static int alloc_segdesc_page(struct pfn 6.18 struct desc_struct *descs; 6.19 int i; 6.20 6.21 - descs = map_domain_mem((page-frame_table) << PAGE_SHIFT); 6.22 + descs = map_domain_page(page_to_pfn(page)); 6.23 6.24 for ( i = 0; i < 512; i++ ) 6.25 if ( unlikely(!check_descriptor(&descs[i])) ) 6.26 goto fail; 6.27 6.28 - unmap_domain_mem(descs); 6.29 + unmap_domain_page(descs); 6.30 return 1; 6.31 6.32 fail: 6.33 - unmap_domain_mem(descs); 6.34 + unmap_domain_page(descs); 6.35 return 0; 6.36 } 6.37 6.38 @@ -665,14 +665,14 @@ static int alloc_l1_table(struct pfn_inf 6.39 6.40 ASSERT(!shadow_mode_refcounts(d)); 6.41 6.42 - pl1e = map_domain_mem(pfn << PAGE_SHIFT); 6.43 + pl1e = map_domain_page(pfn); 6.44 6.45 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) 6.46 if ( is_guest_l1_slot(i) && 6.47 unlikely(!get_page_from_l1e(pl1e[i], d)) ) 6.48 goto fail; 6.49 6.50 - unmap_domain_mem(pl1e); 6.51 + unmap_domain_page(pl1e); 6.52 return 1; 6.53 6.54 fail: 6.55 @@ -680,7 +680,7 @@ static int alloc_l1_table(struct pfn_inf 6.56 if ( is_guest_l1_slot(i) ) 6.57 put_page_from_l1e(pl1e[i], d); 6.58 6.59 - unmap_domain_mem(pl1e); 6.60 + unmap_domain_page(pl1e); 6.61 return 0; 6.62 } 6.63 6.64 @@ -699,7 +699,7 @@ static inline int fixup_pae_linear_mappi 6.65 return 0; 6.66 } 6.67 6.68 - pl2e = map_domain_mem(l3e_get_paddr(pl3e[3])); 6.69 + pl2e = map_domain_page(l3e_get_pfn(pl3e[3])); 6.70 for (i = 0; i < 4; i++) { 6.71 vaddr = LINEAR_PT_VIRT_START + (i << L2_PAGETABLE_SHIFT); 6.72 idx = (vaddr >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES-1); 6.73 @@ -709,7 +709,7 @@ static inline int fixup_pae_linear_mappi 6.74 } else 6.75 pl2e[idx] = l2e_empty(); 6.76 } 6.77 - unmap_domain_mem(pl2e); 6.78 + unmap_domain_page(pl2e); 6.79 6.80 return 1; 6.81 } 6.82 @@ -749,7 +749,7 @@ static int alloc_l2_table(struct pfn_inf 6.83 ASSERT( !shadow_mode_refcounts(d) ); 6.84 6.85 6.86 - pl2e = map_domain_mem(pfn << PAGE_SHIFT); 6.87 + pl2e = map_domain_page(pfn); 6.88 6.89 for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) { 6.90 vaddr = i << L2_PAGETABLE_SHIFT; 6.91 @@ -790,7 +790,7 @@ static int alloc_l2_table(struct pfn_inf 6.92 } 6.93 #endif 6.94 6.95 - unmap_domain_mem(pl2e); 6.96 + unmap_domain_page(pl2e); 6.97 return 1; 6.98 6.99 fail: 6.100 @@ -798,7 +798,7 @@ static int alloc_l2_table(struct pfn_inf 6.101 if ( is_guest_l2_slot(type, i) ) 6.102 put_page_from_l2e(pl2e[i], pfn); 6.103 6.104 - unmap_domain_mem(pl2e); 6.105 + unmap_domain_page(pl2e); 6.106 return 0; 6.107 } 6.108 6.109 @@ -815,7 +815,7 @@ static int alloc_l3_table(struct pfn_inf 6.110 6.111 ASSERT( !shadow_mode_refcounts(d) ); 6.112 6.113 - pl3e = map_domain_mem(pfn << PAGE_SHIFT); 6.114 + pl3e = map_domain_page(pfn); 6.115 for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ ) { 6.116 vaddr = i << L3_PAGETABLE_SHIFT; 6.117 if ( is_guest_l3_slot(i) && 6.118 @@ -825,7 +825,7 @@ static int alloc_l3_table(struct pfn_inf 6.119 6.120 if (!fixup_pae_linear_mappings(pl3e)) 6.121 goto fail; 6.122 - unmap_domain_mem(pl3e); 6.123 + unmap_domain_page(pl3e); 6.124 return 1; 6.125 6.126 fail: 6.127 @@ -833,7 +833,7 @@ static int alloc_l3_table(struct pfn_inf 6.128 if ( is_guest_l3_slot(i) ) 6.129 put_page_from_l3e(pl3e[i], pfn); 6.130 6.131 - unmap_domain_mem(pl3e); 6.132 + unmap_domain_page(pl3e); 6.133 return 0; 6.134 } 6.135 6.136 @@ -891,13 +891,13 @@ static void free_l1_table(struct pfn_inf 6.137 l1_pgentry_t *pl1e; 6.138 int i; 6.139 6.140 - pl1e = map_domain_mem(pfn << PAGE_SHIFT); 6.141 + pl1e = map_domain_page(pfn); 6.142 6.143 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) 6.144 if ( is_guest_l1_slot(i) ) 6.145 put_page_from_l1e(pl1e[i], d); 6.146 6.147 - unmap_domain_mem(pl1e); 6.148 + unmap_domain_page(pl1e); 6.149 } 6.150 6.151 6.152 @@ -907,14 +907,14 @@ static void free_l2_table(struct pfn_inf 6.153 l2_pgentry_t *pl2e; 6.154 int i; 6.155 6.156 - pl2e = map_domain_mem(pfn << PAGE_SHIFT); 6.157 + pl2e = map_domain_page(pfn); 6.158 6.159 for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) { 6.160 if ( is_guest_l2_slot(page->u.inuse.type_info, i) ) 6.161 put_page_from_l2e(pl2e[i], pfn); 6.162 } 6.163 6.164 - unmap_domain_mem(pl2e); 6.165 + unmap_domain_page(pl2e); 6.166 } 6.167 6.168 6.169 @@ -926,13 +926,13 @@ static void free_l3_table(struct pfn_inf 6.170 l3_pgentry_t *pl3e; 6.171 int i; 6.172 6.173 - pl3e = map_domain_mem(pfn << PAGE_SHIFT); 6.174 + pl3e = map_domain_page(pfn); 6.175 6.176 for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ ) 6.177 if ( is_guest_l3_slot(i) ) 6.178 put_page_from_l3e(pl3e[i], pfn); 6.179 6.180 - unmap_domain_mem(pl3e); 6.181 + unmap_domain_page(pl3e); 6.182 } 6.183 6.184 #endif 6.185 @@ -2011,7 +2011,8 @@ int do_mmu_update( 6.186 break; 6.187 } 6.188 6.189 - va = map_domain_mem_with_cache(req.ptr, &mapcache); 6.190 + va = map_domain_page_with_cache(mfn, &mapcache); 6.191 + va = (void *)((unsigned long)va + (req.ptr & ~PAGE_MASK)); 6.192 page = &frame_table[mfn]; 6.193 6.194 switch ( (type_info = page->u.inuse.type_info) & PGT_type_mask ) 6.195 @@ -2105,7 +2106,7 @@ int do_mmu_update( 6.196 break; 6.197 } 6.198 6.199 - unmap_domain_mem_with_cache(va, &mapcache); 6.200 + unmap_domain_page_with_cache(va, &mapcache); 6.201 6.202 put_page(page); 6.203 break; 6.204 @@ -2452,6 +2453,7 @@ long do_update_descriptor(unsigned long 6.205 struct domain *dom = current->domain; 6.206 unsigned long gpfn = pa >> PAGE_SHIFT; 6.207 unsigned long mfn; 6.208 + unsigned int offset = (pa & ~PAGE_MASK) / sizeof(struct desc_struct); 6.209 struct desc_struct *gdt_pent, d; 6.210 struct pfn_info *page; 6.211 long ret = -EINVAL; 6.212 @@ -2460,18 +2462,18 @@ long do_update_descriptor(unsigned long 6.213 6.214 LOCK_BIGLOCK(dom); 6.215 6.216 - if ( !VALID_MFN(mfn = __gpfn_to_mfn(dom, gpfn)) ) { 6.217 - UNLOCK_BIGLOCK(dom); 6.218 - return -EINVAL; 6.219 - } 6.220 - 6.221 - if ( (pa & 7) || (mfn >= max_page) || !check_descriptor(&d) ) { 6.222 + if ( !VALID_MFN(mfn = __gpfn_to_mfn(dom, gpfn)) || 6.223 + ((pa % sizeof(struct desc_struct)) != 0) || 6.224 + (mfn >= max_page) || 6.225 + !check_descriptor(&d) ) 6.226 + { 6.227 UNLOCK_BIGLOCK(dom); 6.228 return -EINVAL; 6.229 } 6.230 6.231 page = &frame_table[mfn]; 6.232 - if ( unlikely(!get_page(page, dom)) ) { 6.233 + if ( unlikely(!get_page(page, dom)) ) 6.234 + { 6.235 UNLOCK_BIGLOCK(dom); 6.236 return -EINVAL; 6.237 } 6.238 @@ -2505,9 +2507,9 @@ long do_update_descriptor(unsigned long 6.239 } 6.240 6.241 /* All is good so make the update. */ 6.242 - gdt_pent = map_domain_mem((mfn << PAGE_SHIFT) | (pa & ~PAGE_MASK)); 6.243 - memcpy(gdt_pent, &d, 8); 6.244 - unmap_domain_mem(gdt_pent); 6.245 + gdt_pent = map_domain_page(mfn); 6.246 + memcpy(&gdt_pent[offset], &d, 8); 6.247 + unmap_domain_page(gdt_pent); 6.248 6.249 if ( shadow_mode_enabled(dom) ) 6.250 shadow_unlock(dom); 6.251 @@ -2650,7 +2652,7 @@ void ptwr_flush(struct domain *d, const 6.252 6.253 pl1e = d->arch.ptwr[which].pl1e; 6.254 modified = revalidate_l1(d, pl1e, d->arch.ptwr[which].page); 6.255 - unmap_domain_mem(pl1e); 6.256 + unmap_domain_page(pl1e); 6.257 perfc_incr_histo(wpt_updates, modified, PT_UPDATES); 6.258 d->arch.ptwr[which].prev_nr_updates = modified; 6.259 6.260 @@ -2741,13 +2743,14 @@ static int ptwr_emulated_update( 6.261 return X86EMUL_UNHANDLEABLE; 6.262 6.263 /* Checked successfully: do the update (write or cmpxchg). */ 6.264 - pl1e = map_domain_mem(page_to_phys(page) + (addr & ~PAGE_MASK)); 6.265 + pl1e = map_domain_page(page_to_pfn(page)); 6.266 + pl1e = (l1_pgentry_t *)((unsigned long)pl1e + (addr & ~PAGE_MASK)); 6.267 if ( do_cmpxchg ) 6.268 { 6.269 ol1e = l1e_from_intpte(old); 6.270 if ( cmpxchg((unsigned long *)pl1e, old, val) != old ) 6.271 { 6.272 - unmap_domain_mem(pl1e); 6.273 + unmap_domain_page(pl1e); 6.274 put_page_from_l1e(nl1e, d); 6.275 return X86EMUL_CMPXCHG_FAILED; 6.276 } 6.277 @@ -2757,7 +2760,7 @@ static int ptwr_emulated_update( 6.278 ol1e = *pl1e; 6.279 *pl1e = nl1e; 6.280 } 6.281 - unmap_domain_mem(pl1e); 6.282 + unmap_domain_page(pl1e); 6.283 6.284 /* Finally, drop the old PTE. */ 6.285 put_page_from_l1e(ol1e, d); 6.286 @@ -2909,7 +2912,7 @@ int ptwr_do_page_fault(struct domain *d, 6.287 } 6.288 6.289 /* Temporarily map the L1 page, and make a copy of it. */ 6.290 - d->arch.ptwr[which].pl1e = map_domain_mem(pfn << PAGE_SHIFT); 6.291 + d->arch.ptwr[which].pl1e = map_domain_page(pfn); 6.292 memcpy(d->arch.ptwr[which].page, 6.293 d->arch.ptwr[which].pl1e, 6.294 L1_PAGETABLE_ENTRIES * sizeof(l1_pgentry_t)); 6.295 @@ -2922,7 +2925,7 @@ int ptwr_do_page_fault(struct domain *d, 6.296 MEM_LOG("ptwr: Could not update pte at %p", (unsigned long *) 6.297 &linear_pg_table[addr>>PAGE_SHIFT]); 6.298 /* Toss the writable pagetable state and crash. */ 6.299 - unmap_domain_mem(d->arch.ptwr[which].pl1e); 6.300 + unmap_domain_page(d->arch.ptwr[which].pl1e); 6.301 d->arch.ptwr[which].l1va = 0; 6.302 domain_crash(); 6.303 return 0;
7.1 --- a/xen/arch/x86/setup.c Tue Jun 07 08:59:04 2005 +0000 7.2 +++ b/xen/arch/x86/setup.c Tue Jun 07 11:46:09 2005 +0000 7.3 @@ -11,13 +11,13 @@ 7.4 #include <xen/serial.h> 7.5 #include <xen/trace.h> 7.6 #include <xen/multiboot.h> 7.7 +#include <xen/domain_page.h> 7.8 #include <asm/bitops.h> 7.9 #include <asm/smp.h> 7.10 #include <asm/processor.h> 7.11 #include <asm/mpspec.h> 7.12 #include <asm/apic.h> 7.13 #include <asm/desc.h> 7.14 -#include <asm/domain_page.h> 7.15 #include <asm/shadow.h> 7.16 #include <asm/e820.h> 7.17
8.1 --- a/xen/arch/x86/shadow.c Tue Jun 07 08:59:04 2005 +0000 8.2 +++ b/xen/arch/x86/shadow.c Tue Jun 07 11:46:09 2005 +0000 8.3 @@ -23,8 +23,8 @@ 8.4 #include <xen/config.h> 8.5 #include <xen/types.h> 8.6 #include <xen/mm.h> 8.7 +#include <xen/domain_page.h> 8.8 #include <asm/shadow.h> 8.9 -#include <asm/domain_page.h> 8.10 #include <asm/page.h> 8.11 #include <xen/event.h> 8.12 #include <xen/sched.h> 8.13 @@ -222,9 +222,9 @@ alloc_shadow_page(struct domain *d, 8.14 else 8.15 { 8.16 page = alloc_domheap_page(NULL); 8.17 - void *l1 = map_domain_mem(page_to_phys(page)); 8.18 + void *l1 = map_domain_page(page_to_pfn(page)); 8.19 memset(l1, 0, PAGE_SIZE); 8.20 - unmap_domain_mem(l1); 8.21 + unmap_domain_page(l1); 8.22 } 8.23 } 8.24 else 8.25 @@ -315,7 +315,7 @@ alloc_shadow_page(struct domain *d, 8.26 static void inline 8.27 free_shadow_l1_table(struct domain *d, unsigned long smfn) 8.28 { 8.29 - l1_pgentry_t *pl1e = map_domain_mem(smfn << PAGE_SHIFT); 8.30 + l1_pgentry_t *pl1e = map_domain_page(smfn); 8.31 int i; 8.32 struct pfn_info *spage = pfn_to_page(smfn); 8.33 u32 min_max = spage->tlbflush_timestamp; 8.34 @@ -328,13 +328,13 @@ free_shadow_l1_table(struct domain *d, u 8.35 pl1e[i] = l1e_empty(); 8.36 } 8.37 8.38 - unmap_domain_mem(pl1e); 8.39 + unmap_domain_page(pl1e); 8.40 } 8.41 8.42 static void inline 8.43 free_shadow_hl2_table(struct domain *d, unsigned long smfn) 8.44 { 8.45 - l1_pgentry_t *hl2 = map_domain_mem(smfn << PAGE_SHIFT); 8.46 + l1_pgentry_t *hl2 = map_domain_page(smfn); 8.47 int i, limit; 8.48 8.49 SH_VVLOG("%s: smfn=%lx freed", __func__, smfn); 8.50 @@ -354,13 +354,13 @@ free_shadow_hl2_table(struct domain *d, 8.51 put_page(pfn_to_page(l1e_get_pfn(hl2[i]))); 8.52 } 8.53 8.54 - unmap_domain_mem(hl2); 8.55 + unmap_domain_page(hl2); 8.56 } 8.57 8.58 static void inline 8.59 free_shadow_l2_table(struct domain *d, unsigned long smfn, unsigned int type) 8.60 { 8.61 - l2_pgentry_t *pl2e = map_domain_mem(smfn << PAGE_SHIFT); 8.62 + l2_pgentry_t *pl2e = map_domain_page(smfn); 8.63 int i, external = shadow_mode_external(d); 8.64 8.65 for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) 8.66 @@ -376,7 +376,7 @@ free_shadow_l2_table(struct domain *d, u 8.67 put_shadow_ref(l2e_get_pfn(pl2e[l2_table_offset(LINEAR_PT_VIRT_START)])); 8.68 } 8.69 8.70 - unmap_domain_mem(pl2e); 8.71 + unmap_domain_page(pl2e); 8.72 } 8.73 8.74 void free_shadow_page(unsigned long smfn) 8.75 @@ -689,8 +689,8 @@ static void alloc_monitor_pagetable(stru 8.76 mmfn_info = alloc_domheap_page(NULL); 8.77 ASSERT(mmfn_info != NULL); 8.78 8.79 - mmfn = (unsigned long) (mmfn_info - frame_table); 8.80 - mpl2e = (l2_pgentry_t *) map_domain_mem(mmfn << PAGE_SHIFT); 8.81 + mmfn = page_to_pfn(mmfn_info); 8.82 + mpl2e = (l2_pgentry_t *)map_domain_page(mmfn); 8.83 memset(mpl2e, 0, PAGE_SIZE); 8.84 8.85 #ifdef __i386__ /* XXX screws x86/64 build */ 8.86 @@ -749,7 +749,7 @@ void free_monitor_pagetable(struct vcpu 8.87 put_shadow_ref(mfn); 8.88 } 8.89 8.90 - unmap_domain_mem(mpl2e); 8.91 + unmap_domain_page(mpl2e); 8.92 8.93 /* 8.94 * Then free monitor_table. 8.95 @@ -766,37 +766,37 @@ set_p2m_entry(struct domain *d, unsigned 8.96 struct domain_mmap_cache *l2cache, 8.97 struct domain_mmap_cache *l1cache) 8.98 { 8.99 - unsigned long phystab = pagetable_get_paddr(d->arch.phys_table); 8.100 + unsigned long tabpfn = pagetable_get_pfn(d->arch.phys_table); 8.101 l2_pgentry_t *l2, l2e; 8.102 l1_pgentry_t *l1; 8.103 struct pfn_info *l1page; 8.104 unsigned long va = pfn << PAGE_SHIFT; 8.105 8.106 - ASSERT( phystab ); 8.107 - 8.108 - l2 = map_domain_mem_with_cache(phystab, l2cache); 8.109 + ASSERT(tabpfn != 0); 8.110 + 8.111 + l2 = map_domain_page_with_cache(tabpfn, l2cache); 8.112 l2e = l2[l2_table_offset(va)]; 8.113 if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) 8.114 { 8.115 l1page = alloc_domheap_page(NULL); 8.116 if ( !l1page ) 8.117 { 8.118 - unmap_domain_mem_with_cache(l2, l2cache); 8.119 + unmap_domain_page_with_cache(l2, l2cache); 8.120 return 0; 8.121 } 8.122 8.123 - l1 = map_domain_mem_with_cache(page_to_phys(l1page), l1cache); 8.124 + l1 = map_domain_page_with_cache(page_to_pfn(l1page), l1cache); 8.125 memset(l1, 0, PAGE_SIZE); 8.126 - unmap_domain_mem_with_cache(l1, l1cache); 8.127 + unmap_domain_page_with_cache(l1, l1cache); 8.128 8.129 l2e = l2e_from_page(l1page, __PAGE_HYPERVISOR); 8.130 l2[l2_table_offset(va)] = l2e; 8.131 } 8.132 - unmap_domain_mem_with_cache(l2, l2cache); 8.133 - 8.134 - l1 = map_domain_mem_with_cache(l2e_get_paddr(l2e), l1cache); 8.135 + unmap_domain_page_with_cache(l2, l2cache); 8.136 + 8.137 + l1 = map_domain_page_with_cache(l2e_get_pfn(l2e), l1cache); 8.138 l1[l1_table_offset(va)] = l1e_from_pfn(mfn, __PAGE_HYPERVISOR); 8.139 - unmap_domain_mem_with_cache(l1, l1cache); 8.140 + unmap_domain_page_with_cache(l1, l1cache); 8.141 8.142 return 1; 8.143 } 8.144 @@ -818,9 +818,9 @@ alloc_p2m_table(struct domain *d) 8.145 domain_mmap_cache_init(&l2cache); 8.146 8.147 d->arch.phys_table = mk_pagetable(page_to_phys(l2page)); 8.148 - l2 = map_domain_mem_with_cache(page_to_phys(l2page), &l2cache); 8.149 + l2 = map_domain_page_with_cache(page_to_pfn(l2page), &l2cache); 8.150 memset(l2, 0, PAGE_SIZE); 8.151 - unmap_domain_mem_with_cache(l2, &l2cache); 8.152 + unmap_domain_page_with_cache(l2, &l2cache); 8.153 8.154 list_ent = d->page_list.next; 8.155 while ( list_ent != &d->page_list ) 8.156 @@ -888,7 +888,7 @@ int __shadow_mode_enable(struct domain * 8.157 if ( v->arch.guest_vtable && 8.158 (v->arch.guest_vtable != __linear_l2_table) ) 8.159 { 8.160 - unmap_domain_mem(v->arch.guest_vtable); 8.161 + unmap_domain_page(v->arch.guest_vtable); 8.162 } 8.163 if ( (mode & (SHM_translate | SHM_external)) == SHM_translate ) 8.164 v->arch.guest_vtable = __linear_l2_table; 8.165 @@ -901,7 +901,7 @@ int __shadow_mode_enable(struct domain * 8.166 if ( v->arch.shadow_vtable && 8.167 (v->arch.shadow_vtable != __shadow_linear_l2_table) ) 8.168 { 8.169 - unmap_domain_mem(v->arch.shadow_vtable); 8.170 + unmap_domain_page(v->arch.shadow_vtable); 8.171 } 8.172 if ( !(mode & SHM_external) ) 8.173 v->arch.shadow_vtable = __shadow_linear_l2_table; 8.174 @@ -914,7 +914,7 @@ int __shadow_mode_enable(struct domain * 8.175 if ( v->arch.hl2_vtable && 8.176 (v->arch.hl2_vtable != __linear_hl2_table) ) 8.177 { 8.178 - unmap_domain_mem(v->arch.hl2_vtable); 8.179 + unmap_domain_page(v->arch.hl2_vtable); 8.180 } 8.181 if ( (mode & (SHM_translate | SHM_external)) == SHM_translate ) 8.182 v->arch.hl2_vtable = __linear_hl2_table; 8.183 @@ -1073,7 +1073,7 @@ translate_l1pgtable(struct domain *d, l1 8.184 int i; 8.185 l1_pgentry_t *l1; 8.186 8.187 - l1 = map_domain_mem(l1mfn << PAGE_SHIFT); 8.188 + l1 = map_domain_page(l1mfn); 8.189 for (i = 0; i < L1_PAGETABLE_ENTRIES; i++) 8.190 { 8.191 if ( is_guest_l1_slot(i) && 8.192 @@ -1085,7 +1085,7 @@ translate_l1pgtable(struct domain *d, l1 8.193 l1[i] = l1e_from_pfn(gpfn, l1e_get_flags(l1[i])); 8.194 } 8.195 } 8.196 - unmap_domain_mem(l1); 8.197 + unmap_domain_page(l1); 8.198 } 8.199 8.200 // This is not general enough to handle arbitrary pagetables 8.201 @@ -1101,7 +1101,7 @@ translate_l2pgtable(struct domain *d, l1 8.202 8.203 ASSERT(shadow_mode_translate(d) && !shadow_mode_external(d)); 8.204 8.205 - l2 = map_domain_mem(l2mfn << PAGE_SHIFT); 8.206 + l2 = map_domain_page(l2mfn); 8.207 for (i = 0; i < L2_PAGETABLE_ENTRIES; i++) 8.208 { 8.209 if ( is_guest_l2_slot(type, i) && 8.210 @@ -1114,7 +1114,7 @@ translate_l2pgtable(struct domain *d, l1 8.211 translate_l1pgtable(d, p2m, mfn); 8.212 } 8.213 } 8.214 - unmap_domain_mem(l2); 8.215 + unmap_domain_page(l2); 8.216 } 8.217 8.218 static void free_shadow_ht_entries(struct domain *d) 8.219 @@ -1404,24 +1404,23 @@ gpfn_to_mfn_foreign(struct domain *d, un 8.220 perfc_incrc(gpfn_to_mfn_foreign); 8.221 8.222 unsigned long va = gpfn << PAGE_SHIFT; 8.223 - unsigned long phystab = pagetable_get_paddr(d->arch.phys_table); 8.224 - l2_pgentry_t *l2 = map_domain_mem(phystab); 8.225 + unsigned long tabpfn = pagetable_get_pfn(d->arch.phys_table); 8.226 + l2_pgentry_t *l2 = map_domain_page(tabpfn); 8.227 l2_pgentry_t l2e = l2[l2_table_offset(va)]; 8.228 - unmap_domain_mem(l2); 8.229 + unmap_domain_page(l2); 8.230 if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) 8.231 { 8.232 printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => 0 l2e=%" PRIpte "\n", 8.233 d->domain_id, gpfn, l2e_get_intpte(l2e)); 8.234 return INVALID_MFN; 8.235 } 8.236 - unsigned long l1tab = l2e_get_paddr(l2e); 8.237 - l1_pgentry_t *l1 = map_domain_mem(l1tab); 8.238 + l1_pgentry_t *l1 = map_domain_page(l2e_get_pfn(l2e)); 8.239 l1_pgentry_t l1e = l1[l1_table_offset(va)]; 8.240 - unmap_domain_mem(l1); 8.241 + unmap_domain_page(l1); 8.242 8.243 #if 0 8.244 - printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => %lx phystab=%lx l2e=%lx l1tab=%lx, l1e=%lx\n", 8.245 - d->domain_id, gpfn, l1_pgentry_val(l1e) >> PAGE_SHIFT, phystab, l2e, l1tab, l1e); 8.246 + printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => %lx tabpfn=%lx l2e=%lx l1tab=%lx, l1e=%lx\n", 8.247 + d->domain_id, gpfn, l1_pgentry_val(l1e) >> PAGE_SHIFT, tabpfn, l2e, l1tab, l1e); 8.248 #endif 8.249 8.250 if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) ) 8.251 @@ -1455,7 +1454,7 @@ shadow_hl2_table(struct domain *d, unsig 8.252 gpfn, gmfn, smfn, hl2mfn); 8.253 perfc_incrc(shadow_hl2_table_count); 8.254 8.255 - hl2 = map_domain_mem(hl2mfn << PAGE_SHIFT); 8.256 + hl2 = map_domain_page(hl2mfn); 8.257 8.258 #ifdef __i386__ 8.259 if ( shadow_mode_external(d) ) 8.260 @@ -1483,7 +1482,7 @@ shadow_hl2_table(struct domain *d, unsig 8.261 l1e_from_pfn(hl2mfn, __PAGE_HYPERVISOR); 8.262 } 8.263 8.264 - unmap_domain_mem(hl2); 8.265 + unmap_domain_page(hl2); 8.266 8.267 return hl2mfn; 8.268 } 8.269 @@ -1510,7 +1509,7 @@ static unsigned long shadow_l2_table( 8.270 BUG(); /* XXX Deal gracefully with failure. */ 8.271 } 8.272 8.273 - spl2e = (l2_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT); 8.274 + spl2e = (l2_pgentry_t *)map_domain_page(smfn); 8.275 8.276 /* Install hypervisor and 2x linear p.t. mapings. */ 8.277 if ( (PGT_base_page_table == PGT_l2_page_table) && 8.278 @@ -1565,7 +1564,7 @@ static unsigned long shadow_l2_table( 8.279 memset(spl2e, 0, L2_PAGETABLE_ENTRIES*sizeof(l2_pgentry_t)); 8.280 } 8.281 8.282 - unmap_domain_mem(spl2e); 8.283 + unmap_domain_page(spl2e); 8.284 8.285 SH_VLOG("shadow_l2_table(%lx -> %lx)", gmfn, smfn); 8.286 return smfn; 8.287 @@ -1776,11 +1775,11 @@ shadow_make_snapshot( 8.288 min *= sizeof(l1_pgentry_t); 8.289 length *= sizeof(l1_pgentry_t); 8.290 8.291 - original = map_domain_mem(gmfn << PAGE_SHIFT); 8.292 - snapshot = map_domain_mem(smfn << PAGE_SHIFT); 8.293 + original = map_domain_page(gmfn); 8.294 + snapshot = map_domain_page(smfn); 8.295 memcpy(snapshot + min, original + min, length); 8.296 - unmap_domain_mem(original); 8.297 - unmap_domain_mem(snapshot); 8.298 + unmap_domain_page(original); 8.299 + unmap_domain_page(snapshot); 8.300 8.301 return smfn; 8.302 } 8.303 @@ -1800,9 +1799,9 @@ shadow_free_snapshot(struct domain *d, s 8.304 // XXX Need to think about how to protect the domain's 8.305 // information less expensively. 8.306 // 8.307 - snapshot = map_domain_mem(entry->snapshot_mfn << PAGE_SHIFT); 8.308 + snapshot = map_domain_page(entry->snapshot_mfn); 8.309 memset(snapshot, 0, PAGE_SIZE); 8.310 - unmap_domain_mem(snapshot); 8.311 + unmap_domain_page(snapshot); 8.312 8.313 put_shadow_ref(entry->snapshot_mfn); 8.314 } 8.315 @@ -1915,7 +1914,7 @@ static int snapshot_entry_matches( 8.316 if ( !smfn ) 8.317 return 0; 8.318 8.319 - snapshot = map_domain_mem(smfn << PAGE_SHIFT); 8.320 + snapshot = map_domain_page(smfn); 8.321 8.322 // This could probably be smarter, but this is sufficent for 8.323 // our current needs. 8.324 @@ -1923,7 +1922,7 @@ static int snapshot_entry_matches( 8.325 entries_match = !l1e_has_changed(guest_pt[index], snapshot[index], 8.326 PAGE_FLAG_MASK); 8.327 8.328 - unmap_domain_mem(snapshot); 8.329 + unmap_domain_page(snapshot); 8.330 8.331 #ifdef PERF_COUNTERS 8.332 if ( entries_match ) 8.333 @@ -2065,7 +2064,7 @@ static u32 remove_all_write_access_in_pt 8.334 unsigned long readonly_gpfn, unsigned long readonly_gmfn, 8.335 u32 max_refs_to_find, unsigned long prediction) 8.336 { 8.337 - l1_pgentry_t *pt = map_domain_mem(pt_mfn << PAGE_SHIFT); 8.338 + l1_pgentry_t *pt = map_domain_page(pt_mfn); 8.339 l1_pgentry_t match; 8.340 unsigned long flags = _PAGE_RW | _PAGE_PRESENT; 8.341 int i; 8.342 @@ -2105,7 +2104,7 @@ static u32 remove_all_write_access_in_pt 8.343 { 8.344 perfc_incrc(remove_write_fast_exit); 8.345 increase_writable_pte_prediction(d, readonly_gpfn, prediction); 8.346 - unmap_domain_mem(pt); 8.347 + unmap_domain_page(pt); 8.348 return found; 8.349 } 8.350 8.351 @@ -2115,7 +2114,7 @@ static u32 remove_all_write_access_in_pt 8.352 break; 8.353 } 8.354 8.355 - unmap_domain_mem(pt); 8.356 + unmap_domain_page(pt); 8.357 8.358 return found; 8.359 #undef MATCH_ENTRY 8.360 @@ -2207,7 +2206,7 @@ int shadow_remove_all_write_access( 8.361 static u32 remove_all_access_in_page( 8.362 struct domain *d, unsigned long l1mfn, unsigned long forbidden_gmfn) 8.363 { 8.364 - l1_pgentry_t *pl1e = map_domain_mem(l1mfn << PAGE_SHIFT); 8.365 + l1_pgentry_t *pl1e = map_domain_page(l1mfn); 8.366 l1_pgentry_t match; 8.367 unsigned long flags = _PAGE_PRESENT; 8.368 int i; 8.369 @@ -2233,7 +2232,7 @@ static u32 remove_all_access_in_page( 8.370 } 8.371 } 8.372 8.373 - unmap_domain_mem(pl1e); 8.374 + unmap_domain_page(pl1e); 8.375 8.376 return count; 8.377 } 8.378 @@ -2321,11 +2320,11 @@ static int resync_all(struct domain *d, 8.379 // Compare guest's new contents to its snapshot, validating 8.380 // and updating its shadow as appropriate. 8.381 // 8.382 - guest = map_domain_mem(entry->gmfn << PAGE_SHIFT); 8.383 - snapshot = map_domain_mem(entry->snapshot_mfn << PAGE_SHIFT); 8.384 + guest = map_domain_page(entry->gmfn); 8.385 + snapshot = map_domain_page(entry->snapshot_mfn); 8.386 8.387 if ( smfn ) 8.388 - shadow = map_domain_mem(smfn << PAGE_SHIFT); 8.389 + shadow = map_domain_page(smfn); 8.390 else 8.391 shadow = NULL; 8.392 8.393 @@ -2466,9 +2465,9 @@ static int resync_all(struct domain *d, 8.394 } 8.395 8.396 if ( smfn ) 8.397 - unmap_domain_mem(shadow); 8.398 - unmap_domain_mem(snapshot); 8.399 - unmap_domain_mem(guest); 8.400 + unmap_domain_page(shadow); 8.401 + unmap_domain_page(snapshot); 8.402 + unmap_domain_page(guest); 8.403 8.404 if ( unlikely(unshadow) ) 8.405 { 8.406 @@ -2507,7 +2506,9 @@ void __shadow_sync_all(struct domain *d) 8.407 if ( entry->writable_pl1e & (sizeof(l1_pgentry_t)-1) ) 8.408 continue; 8.409 8.410 - l1_pgentry_t *ppte = map_domain_mem(entry->writable_pl1e); 8.411 + l1_pgentry_t *ppte = (l1_pgentry_t *)( 8.412 + (char *)map_domain_page(entry->writable_pl1e >> PAGE_SHIFT) + 8.413 + (entry->writable_pl1e & ~PAGE_MASK)); 8.414 l1_pgentry_t opte = *ppte; 8.415 l1_pgentry_t npte = opte; 8.416 l1e_remove_flags(npte, _PAGE_RW); 8.417 @@ -2518,7 +2519,7 @@ void __shadow_sync_all(struct domain *d) 8.418 *ppte = npte; 8.419 shadow_put_page_from_l1e(opte, d); 8.420 8.421 - unmap_domain_mem(ppte); 8.422 + unmap_domain_page(ppte); 8.423 } 8.424 8.425 // XXX mafetter: SMP 8.426 @@ -2696,9 +2697,9 @@ void shadow_l1_normal_pt_update( 8.427 (void *)pa, l1e_get_intpte(gpte)); 8.428 l1pte_propagate_from_guest(current->domain, gpte, &spte); 8.429 8.430 - spl1e = map_domain_mem_with_cache(sl1mfn << PAGE_SHIFT, cache); 8.431 + spl1e = map_domain_page_with_cache(sl1mfn, cache); 8.432 spl1e[(pa & ~PAGE_MASK) / sizeof(l1_pgentry_t)] = spte; 8.433 - unmap_domain_mem_with_cache(spl1e, cache); 8.434 + unmap_domain_page_with_cache(spl1e, cache); 8.435 } 8.436 8.437 shadow_unlock(d); 8.438 @@ -2719,10 +2720,10 @@ void shadow_l2_normal_pt_update( 8.439 { 8.440 SH_VVLOG("shadow_l2_normal_pt_update pa=%p, gpde=%" PRIpte, 8.441 (void *)pa, l2e_get_intpte(gpde)); 8.442 - spl2e = map_domain_mem_with_cache(sl2mfn << PAGE_SHIFT, cache); 8.443 + spl2e = map_domain_page_with_cache(sl2mfn, cache); 8.444 validate_pde_change(d, gpde, 8.445 &spl2e[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t)]); 8.446 - unmap_domain_mem_with_cache(spl2e, cache); 8.447 + unmap_domain_page_with_cache(spl2e, cache); 8.448 } 8.449 8.450 shadow_unlock(d); 8.451 @@ -2831,8 +2832,8 @@ void __update_pagetables(struct vcpu *v) 8.452 if ( max_mode & (SHM_enable | SHM_external) ) 8.453 { 8.454 if ( likely(v->arch.guest_vtable != NULL) ) 8.455 - unmap_domain_mem(v->arch.guest_vtable); 8.456 - v->arch.guest_vtable = map_domain_mem(gmfn << PAGE_SHIFT); 8.457 + unmap_domain_page(v->arch.guest_vtable); 8.458 + v->arch.guest_vtable = map_domain_page(gmfn); 8.459 } 8.460 8.461 /* 8.462 @@ -2855,8 +2856,8 @@ void __update_pagetables(struct vcpu *v) 8.463 if ( max_mode == SHM_external ) 8.464 { 8.465 if ( v->arch.shadow_vtable ) 8.466 - unmap_domain_mem(v->arch.shadow_vtable); 8.467 - v->arch.shadow_vtable = map_domain_mem(smfn << PAGE_SHIFT); 8.468 + unmap_domain_page(v->arch.shadow_vtable); 8.469 + v->arch.shadow_vtable = map_domain_page(smfn); 8.470 } 8.471 8.472 /* 8.473 @@ -2871,8 +2872,8 @@ void __update_pagetables(struct vcpu *v) 8.474 if ( unlikely(!(hl2mfn = __shadow_status(d, gpfn, PGT_hl2_shadow))) ) 8.475 hl2mfn = shadow_hl2_table(d, gpfn, gmfn, smfn); 8.476 if ( v->arch.hl2_vtable ) 8.477 - unmap_domain_mem(v->arch.hl2_vtable); 8.478 - v->arch.hl2_vtable = map_domain_mem(hl2mfn << PAGE_SHIFT); 8.479 + unmap_domain_page(v->arch.hl2_vtable); 8.480 + v->arch.hl2_vtable = map_domain_page(hl2mfn); 8.481 } 8.482 8.483 /* 8.484 @@ -2934,22 +2935,22 @@ mark_shadows_as_reflecting_snapshot(stru 8.485 8.486 if ( (smfn = __shadow_status(d, gpfn, PGT_l1_shadow)) ) 8.487 { 8.488 - l1e = map_domain_mem(smfn << PAGE_SHIFT); 8.489 + l1e = map_domain_page(smfn); 8.490 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) 8.491 if ( is_guest_l1_slot(i) && 8.492 (l1e_get_flags(l1e[i]) & _PAGE_PRESENT) ) 8.493 l1e_add_flags(l1e[i], SHADOW_REFLECTS_SNAPSHOT); 8.494 - unmap_domain_mem(l1e); 8.495 + unmap_domain_page(l1e); 8.496 } 8.497 8.498 if ( (smfn = __shadow_status(d, gpfn, PGT_l2_shadow)) ) 8.499 { 8.500 - l2e = map_domain_mem(smfn << PAGE_SHIFT); 8.501 + l2e = map_domain_page(smfn); 8.502 for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) 8.503 if ( is_guest_l2_slot(0, i) && 8.504 (l2e_get_flags(l2e[i]) & _PAGE_PRESENT) ) 8.505 l2e_add_flags(l2e[i], SHADOW_REFLECTS_SNAPSHOT); 8.506 - unmap_domain_mem(l2e); 8.507 + unmap_domain_page(l2e); 8.508 } 8.509 } 8.510 8.511 @@ -3117,21 +3118,21 @@ static int check_l1_table( 8.512 { 8.513 snapshot_mfn = __shadow_status(d, gpfn, PGT_snapshot); 8.514 ASSERT(snapshot_mfn); 8.515 - p_snapshot = map_domain_mem(snapshot_mfn << PAGE_SHIFT); 8.516 + p_snapshot = map_domain_page(snapshot_mfn); 8.517 } 8.518 8.519 - p_guest = map_domain_mem(gmfn << PAGE_SHIFT); 8.520 - p_shadow = map_domain_mem(smfn << PAGE_SHIFT); 8.521 + p_guest = map_domain_page(gmfn); 8.522 + p_shadow = map_domain_page(smfn); 8.523 8.524 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) 8.525 errors += check_pte(v, p_guest+i, p_shadow+i, 8.526 p_snapshot ? p_snapshot+i : NULL, 8.527 1, l2_idx, i); 8.528 8.529 - unmap_domain_mem(p_shadow); 8.530 - unmap_domain_mem(p_guest); 8.531 + unmap_domain_page(p_shadow); 8.532 + unmap_domain_page(p_guest); 8.533 if ( p_snapshot ) 8.534 - unmap_domain_mem(p_snapshot); 8.535 + unmap_domain_page(p_snapshot); 8.536 8.537 return errors; 8.538 } 8.539 @@ -3146,8 +3147,8 @@ int check_l2_table( 8.540 struct vcpu *v, unsigned long gmfn, unsigned long smfn, int oos_pdes) 8.541 { 8.542 struct domain *d = v->domain; 8.543 - l2_pgentry_t *gpl2e = (l2_pgentry_t *)map_domain_mem(gmfn << PAGE_SHIFT); 8.544 - l2_pgentry_t *spl2e = (l2_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT); 8.545 + l2_pgentry_t *gpl2e = (l2_pgentry_t *)map_domain_page(gmfn); 8.546 + l2_pgentry_t *spl2e = (l2_pgentry_t *)map_domain_page(smfn); 8.547 l2_pgentry_t match; 8.548 int i; 8.549 int errors = 0; 8.550 @@ -3219,8 +3220,8 @@ int check_l2_table( 8.551 NULL, 8.552 2, i, 0); 8.553 8.554 - unmap_domain_mem(spl2e); 8.555 - unmap_domain_mem(gpl2e); 8.556 + unmap_domain_page(spl2e); 8.557 + unmap_domain_page(gpl2e); 8.558 8.559 #if 1 8.560 if ( errors ) 8.561 @@ -3267,8 +3268,8 @@ int _check_pagetable(struct vcpu *v, cha 8.562 8.563 errors += check_l2_table(v, ptbase_mfn, smfn, oos_pdes); 8.564 8.565 - gpl2e = (l2_pgentry_t *) map_domain_mem( ptbase_mfn << PAGE_SHIFT ); 8.566 - spl2e = (l2_pgentry_t *) map_domain_mem( smfn << PAGE_SHIFT ); 8.567 + gpl2e = (l2_pgentry_t *) map_domain_page(ptbase_mfn); 8.568 + spl2e = (l2_pgentry_t *) map_domain_page(smfn); 8.569 8.570 /* Go back and recurse. */ 8.571 #ifdef __i386__ 8.572 @@ -3292,8 +3293,8 @@ int _check_pagetable(struct vcpu *v, cha 8.573 } 8.574 } 8.575 8.576 - unmap_domain_mem(spl2e); 8.577 - unmap_domain_mem(gpl2e); 8.578 + unmap_domain_page(spl2e); 8.579 + unmap_domain_page(gpl2e); 8.580 8.581 #if 0 8.582 SH_VVLOG("PT verified : l2_present = %d, l1_present = %d",
9.1 --- a/xen/arch/x86/traps.c Tue Jun 07 08:59:04 2005 +0000 9.2 +++ b/xen/arch/x86/traps.c Tue Jun 07 11:46:09 2005 +0000 9.3 @@ -39,8 +39,8 @@ 9.4 #include <xen/irq.h> 9.5 #include <xen/perfc.h> 9.6 #include <xen/softirq.h> 9.7 +#include <xen/domain_page.h> 9.8 #include <asm/shadow.h> 9.9 -#include <asm/domain_page.h> 9.10 #include <asm/system.h> 9.11 #include <asm/io.h> 9.12 #include <asm/atomic.h>
10.1 --- a/xen/arch/x86/vmx.c Tue Jun 07 08:59:04 2005 +0000 10.2 +++ b/xen/arch/x86/vmx.c Tue Jun 07 11:46:09 2005 +0000 10.3 @@ -24,6 +24,7 @@ 10.4 #include <xen/sched.h> 10.5 #include <xen/irq.h> 10.6 #include <xen/softirq.h> 10.7 +#include <xen/domain_page.h> 10.8 #include <asm/current.h> 10.9 #include <asm/io.h> 10.10 #include <asm/shadow.h> 10.11 @@ -102,7 +103,7 @@ void stop_vmx(void) 10.12 } 10.13 10.14 /* 10.15 - * Not all cases recevie valid value in the VM-exit instruction length field. 10.16 + * Not all cases receive valid value in the VM-exit instruction length field. 10.17 */ 10.18 #define __get_instruction_length(len) \ 10.19 __vmread(INSTRUCTION_LEN, &(len)); \ 10.20 @@ -118,8 +119,6 @@ static void inline __update_guest_eip(un 10.21 } 10.22 10.23 10.24 -#include <asm/domain_page.h> 10.25 - 10.26 static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs) 10.27 { 10.28 unsigned long eip; 10.29 @@ -468,23 +467,24 @@ enum { COPY_IN = 0, COPY_OUT }; 10.30 static inline int 10.31 vmx_copy(void *buf, unsigned long laddr, int size, int dir) 10.32 { 10.33 - unsigned char *addr; 10.34 + char *addr; 10.35 unsigned long mfn; 10.36 10.37 - if ((size + (laddr & (PAGE_SIZE - 1))) >= PAGE_SIZE) { 10.38 + if ( (size + (laddr & (PAGE_SIZE - 1))) >= PAGE_SIZE ) 10.39 + { 10.40 printf("vmx_copy exceeds page boundary\n"); 10.41 - return 0; 10.42 + return 0; 10.43 } 10.44 10.45 mfn = phys_to_machine_mapping(laddr >> PAGE_SHIFT); 10.46 - addr = map_domain_mem((mfn << PAGE_SHIFT) | (laddr & ~PAGE_MASK)); 10.47 + addr = (char *)map_domain_page(mfn) + (laddr & ~PAGE_MASK); 10.48 10.49 if (dir == COPY_IN) 10.50 memcpy(buf, addr, size); 10.51 else 10.52 memcpy(addr, buf, size); 10.53 10.54 - unmap_domain_mem(addr); 10.55 + unmap_domain_page(addr); 10.56 return 1; 10.57 } 10.58
11.1 --- a/xen/arch/x86/vmx_platform.c Tue Jun 07 08:59:04 2005 +0000 11.2 +++ b/xen/arch/x86/vmx_platform.c Tue Jun 07 11:46:09 2005 +0000 11.3 @@ -21,7 +21,7 @@ 11.4 #include <xen/types.h> 11.5 #include <xen/mm.h> 11.6 #include <asm/shadow.h> 11.7 -#include <asm/domain_page.h> 11.8 +#include <xen/domain_page.h> 11.9 #include <asm/page.h> 11.10 #include <xen/event.h> 11.11 #include <xen/trace.h> 11.12 @@ -411,43 +411,41 @@ int inst_copy_from_guest(unsigned char * 11.13 { 11.14 l1_pgentry_t gpte; 11.15 unsigned long mfn; 11.16 - unsigned long ma; 11.17 - unsigned char * inst_start; 11.18 + unsigned char *inst_start; 11.19 int remaining = 0; 11.20 11.21 - if (inst_len > MAX_INST_LEN || inst_len <= 0) { 11.22 + if ( (inst_len > MAX_INST_LEN) || (inst_len <= 0) ) 11.23 return 0; 11.24 - } 11.25 11.26 - if (vmx_paging_enabled(current)) { 11.27 + if ( vmx_paging_enabled(current) ) 11.28 + { 11.29 gpte = gva_to_gpte(guest_eip); 11.30 mfn = phys_to_machine_mapping(l1e_get_pfn(gpte)); 11.31 /* Does this cross a page boundary ? */ 11.32 - if ((guest_eip & PAGE_MASK) != ((guest_eip + inst_len) & PAGE_MASK)) { 11.33 + if ( (guest_eip & PAGE_MASK) != ((guest_eip + inst_len) & PAGE_MASK) ) 11.34 + { 11.35 remaining = (guest_eip + inst_len) & ~PAGE_MASK; 11.36 inst_len -= remaining; 11.37 } 11.38 - 11.39 - } else { 11.40 + } 11.41 + else 11.42 + { 11.43 mfn = phys_to_machine_mapping(guest_eip >> PAGE_SHIFT); 11.44 } 11.45 - ma = (mfn << PAGE_SHIFT) | (guest_eip & (PAGE_SIZE - 1)); 11.46 - inst_start = (unsigned char *)map_domain_mem(ma); 11.47 - 11.48 - memcpy((char *)buf, inst_start, inst_len); 11.49 - unmap_domain_mem(inst_start); 11.50 11.51 - if (remaining) { 11.52 + inst_start = map_domain_page(mfn); 11.53 + memcpy((char *)buf, inst_start + (guest_eip & ~PAGE_MASK), inst_len); 11.54 + unmap_domain_page(inst_start); 11.55 + 11.56 + if ( remaining ) 11.57 + { 11.58 gpte = gva_to_gpte(guest_eip+inst_len+remaining); 11.59 mfn = phys_to_machine_mapping(l1e_get_pfn(gpte)); 11.60 + inst_start = map_domain_page(mfn); 11.61 + memcpy((char *)buf+inst_len, inst_start, remaining); 11.62 + unmap_domain_page(inst_start); 11.63 + } 11.64 11.65 - ma = (mfn << PAGE_SHIFT); 11.66 - inst_start = (unsigned char *)map_domain_mem(ma); 11.67 - 11.68 - memcpy((char *)buf+inst_len, inst_start, remaining); 11.69 - unmap_domain_mem(inst_start); 11.70 - 11.71 - } 11.72 return inst_len+remaining; 11.73 } 11.74
12.1 --- a/xen/arch/x86/vmx_vmcs.c Tue Jun 07 08:59:04 2005 +0000 12.2 +++ b/xen/arch/x86/vmx_vmcs.c Tue Jun 07 11:46:09 2005 +0000 12.3 @@ -22,7 +22,7 @@ 12.4 #include <xen/mm.h> 12.5 #include <xen/lib.h> 12.6 #include <xen/errno.h> 12.7 - 12.8 +#include <xen/domain_page.h> 12.9 #include <asm/current.h> 12.10 #include <asm/cpufeature.h> 12.11 #include <asm/processor.h> 12.12 @@ -31,7 +31,6 @@ 12.13 #include <xen/event.h> 12.14 #include <xen/kernel.h> 12.15 #include <public/io/ioreq.h> 12.16 -#include <asm/domain_page.h> 12.17 12.18 #ifdef CONFIG_VMX 12.19 12.20 @@ -122,8 +121,9 @@ int vmx_setup_platform(struct vcpu *d, s 12.21 addr = regs->edi; 12.22 offset = (addr & ~PAGE_MASK); 12.23 addr = round_pgdown(addr); 12.24 + 12.25 mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT); 12.26 - p = map_domain_mem(mpfn << PAGE_SHIFT); 12.27 + p = map_domain_page(mpfn); 12.28 12.29 e820p = (struct e820entry *) ((unsigned long) p + offset); 12.30 12.31 @@ -131,28 +131,28 @@ int vmx_setup_platform(struct vcpu *d, s 12.32 print_e820_memory_map(e820p, n); 12.33 #endif 12.34 12.35 - for (i = 0; i < n; i++) { 12.36 - if (e820p[i].type == E820_SHARED_PAGE) { 12.37 + for ( i = 0; i < n; i++ ) 12.38 + { 12.39 + if ( e820p[i].type == E820_SHARED_PAGE ) 12.40 + { 12.41 gpfn = (e820p[i].addr >> PAGE_SHIFT); 12.42 break; 12.43 } 12.44 } 12.45 12.46 - if (gpfn == 0) { 12.47 - printk("No shared Page ?\n"); 12.48 - unmap_domain_mem(p); 12.49 + if ( gpfn == 0 ) 12.50 + { 12.51 + unmap_domain_page(p); 12.52 return -1; 12.53 } 12.54 - unmap_domain_mem(p); 12.55 12.56 - mpfn = phys_to_machine_mapping(gpfn); 12.57 - p = map_domain_mem(mpfn << PAGE_SHIFT); 12.58 - ASSERT(p != NULL); 12.59 + unmap_domain_page(p); 12.60 12.61 /* Initialise shared page */ 12.62 + mpfn = phys_to_machine_mapping(gpfn); 12.63 + p = map_domain_page(mpfn); 12.64 memset(p, 0, PAGE_SIZE); 12.65 - 12.66 - d->arch.arch_vmx.vmx_platform.shared_page_va = (unsigned long) p; 12.67 + d->arch.arch_vmx.vmx_platform.shared_page_va = (unsigned long)p; 12.68 12.69 return 0; 12.70 }
13.1 --- a/xen/arch/x86/x86_32/domain_page.c Tue Jun 07 08:59:04 2005 +0000 13.2 +++ b/xen/arch/x86/x86_32/domain_page.c Tue Jun 07 11:46:09 2005 +0000 13.3 @@ -15,16 +15,19 @@ 13.4 #include <xen/sched.h> 13.5 #include <xen/mm.h> 13.6 #include <xen/perfc.h> 13.7 +#include <xen/domain_page.h> 13.8 #include <asm/current.h> 13.9 -#include <asm/domain_page.h> 13.10 #include <asm/flushtlb.h> 13.11 #include <asm/hardirq.h> 13.12 13.13 +#define MAPCACHE_ORDER 10 13.14 +#define MAPCACHE_ENTRIES (1 << MAPCACHE_ORDER) 13.15 + 13.16 l1_pgentry_t *mapcache; 13.17 static unsigned int map_idx, epoch, shadow_epoch[NR_CPUS]; 13.18 static spinlock_t map_lock = SPIN_LOCK_UNLOCKED; 13.19 13.20 -void *map_domain_mem(unsigned long pa) 13.21 +void *map_domain_page(unsigned long pfn) 13.22 { 13.23 unsigned long va; 13.24 unsigned int idx, cpu = smp_processor_id(); 13.25 @@ -34,7 +37,7 @@ void *map_domain_mem(unsigned long pa) 13.26 #endif 13.27 13.28 ASSERT(!in_irq()); 13.29 - perfc_incrc(map_domain_mem_count); 13.30 + perfc_incrc(map_domain_page_count); 13.31 13.32 spin_lock(&map_lock); 13.33 13.34 @@ -58,15 +61,15 @@ void *map_domain_mem(unsigned long pa) 13.35 } 13.36 while ( l1e_get_flags(cache[idx]) & _PAGE_PRESENT ); 13.37 13.38 - cache[idx] = l1e_from_paddr(pa & PAGE_MASK, __PAGE_HYPERVISOR); 13.39 + cache[idx] = l1e_from_pfn(pfn, __PAGE_HYPERVISOR); 13.40 13.41 spin_unlock(&map_lock); 13.42 13.43 - va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT) + (pa & ~PAGE_MASK); 13.44 + va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT); 13.45 return (void *)va; 13.46 } 13.47 13.48 -void unmap_domain_mem(void *va) 13.49 +void unmap_domain_page(void *va) 13.50 { 13.51 unsigned int idx; 13.52 ASSERT((void *)MAPCACHE_VIRT_START <= va);
14.1 --- a/xen/arch/x86/x86_32/mm.c Tue Jun 07 08:59:04 2005 +0000 14.2 +++ b/xen/arch/x86/x86_32/mm.c Tue Jun 07 11:46:09 2005 +0000 14.3 @@ -22,11 +22,13 @@ 14.4 #include <xen/lib.h> 14.5 #include <xen/init.h> 14.6 #include <xen/mm.h> 14.7 +#include <xen/sched.h> 14.8 #include <asm/current.h> 14.9 #include <asm/page.h> 14.10 #include <asm/flushtlb.h> 14.11 #include <asm/fixmap.h> 14.12 -#include <asm/domain_page.h> 14.13 + 14.14 +extern l1_pgentry_t *mapcache; 14.15 14.16 unsigned int PAGE_HYPERVISOR = __PAGE_HYPERVISOR; 14.17 unsigned int PAGE_HYPERVISOR_NOCACHE = __PAGE_HYPERVISOR_NOCACHE;
15.1 --- a/xen/common/dom0_ops.c Tue Jun 07 08:59:04 2005 +0000 15.2 +++ b/xen/common/dom0_ops.c Tue Jun 07 11:46:09 2005 +0000 15.3 @@ -10,15 +10,15 @@ 15.4 #include <xen/types.h> 15.5 #include <xen/lib.h> 15.6 #include <xen/mm.h> 15.7 -#include <public/dom0_ops.h> 15.8 #include <xen/sched.h> 15.9 #include <xen/domain.h> 15.10 #include <xen/event.h> 15.11 -#include <asm/domain_page.h> 15.12 +#include <xen/domain_page.h> 15.13 #include <xen/trace.h> 15.14 #include <xen/console.h> 15.15 +#include <asm/current.h> 15.16 +#include <public/dom0_ops.h> 15.17 #include <public/sched_ctl.h> 15.18 -#include <asm/current.h> 15.19 15.20 extern long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op); 15.21 extern void arch_getdomaininfo_ctxt(
16.1 --- a/xen/common/dom_mem_ops.c Tue Jun 07 08:59:04 2005 +0000 16.2 +++ b/xen/common/dom_mem_ops.c Tue Jun 07 11:46:09 2005 +0000 16.3 @@ -15,7 +15,6 @@ 16.4 #include <xen/event.h> 16.5 #include <xen/shadow.h> 16.6 #include <asm/current.h> 16.7 -#include <asm/domain_page.h> 16.8 #include <asm/hardirq.h> 16.9 16.10 /*
17.1 --- a/xen/common/domain.c Tue Jun 07 08:59:04 2005 +0000 17.2 +++ b/xen/common/domain.c Tue Jun 07 11:46:09 2005 +0000 17.3 @@ -15,9 +15,9 @@ 17.4 #include <xen/time.h> 17.5 #include <xen/console.h> 17.6 #include <xen/softirq.h> 17.7 +#include <xen/domain_page.h> 17.8 +#include <asm/debugger.h> 17.9 #include <public/dom0_ops.h> 17.10 -#include <asm/domain_page.h> 17.11 -#include <asm/debugger.h> 17.12 17.13 /* Both these structures are protected by the domlist_lock. */ 17.14 rwlock_t domlist_lock = RW_LOCK_UNLOCKED;
18.1 --- a/xen/common/page_alloc.c Tue Jun 07 08:59:04 2005 +0000 18.2 +++ b/xen/common/page_alloc.c Tue Jun 07 11:46:09 2005 +0000 18.3 @@ -31,7 +31,7 @@ 18.4 #include <xen/irq.h> 18.5 #include <xen/softirq.h> 18.6 #include <xen/shadow.h> 18.7 -#include <asm/domain_page.h> 18.8 +#include <xen/domain_page.h> 18.9 #include <asm/page.h> 18.10 18.11 /* 18.12 @@ -383,9 +383,9 @@ void scrub_heap_pages(void) 18.13 } 18.14 else 18.15 { 18.16 - p = map_domain_mem(pfn << PAGE_SHIFT); 18.17 + p = map_domain_page(pfn); 18.18 clear_page(p); 18.19 - unmap_domain_mem(p); 18.20 + unmap_domain_page(p); 18.21 } 18.22 } 18.23 18.24 @@ -674,9 +674,9 @@ static void page_scrub_softirq(void) 18.25 { 18.26 pg = list_entry(ent, struct pfn_info, list); 18.27 ent = ent->prev; 18.28 - p = map_domain_mem(page_to_phys(pg)); 18.29 + p = map_domain_page(page_to_pfn(pg)); 18.30 clear_page(p); 18.31 - unmap_domain_mem(p); 18.32 + unmap_domain_page(p); 18.33 free_heap_pages(MEMZONE_DOM, pg, 0); 18.34 } 18.35 } while ( (NOW() - start) < MILLISECS(1) );
19.1 --- a/xen/include/asm-ia64/domain_page.h Tue Jun 07 08:59:04 2005 +0000 19.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 19.3 @@ -1,14 +0,0 @@ 19.4 -/****************************************************************************** 19.5 - * domain_page.h 19.6 - * 19.7 - * This is a trivial no-op on ia64, where we can 1:1 map all RAM. 19.8 - */ 19.9 - 19.10 -#ifndef __ASM_DOMAIN_PAGE_H__ 19.11 -#define __ASM_DOMAIN_PAGE_H__ 19.12 - 19.13 -#define map_domain_mem(_pa) phys_to_virt(_pa) 19.14 -#define unmap_domain_mem(_va) ((void)(_va)) 19.15 - 19.16 -#endif /* __ASM_DOMAIN_PAGE_H__ */ 19.17 -
20.1 --- a/xen/include/asm-x86/config.h Tue Jun 07 08:59:04 2005 +0000 20.2 +++ b/xen/include/asm-x86/config.h Tue Jun 07 11:46:09 2005 +0000 20.3 @@ -189,7 +189,8 @@ extern unsigned long _end; /* standard E 20.4 20.5 #elif defined(__i386__) 20.6 20.7 -#define CONFIG_X86_32 1 20.8 +#define CONFIG_X86_32 1 20.9 +#define CONFIG_DOMAIN_PAGE 1 20.10 20.11 #define asmlinkage __attribute__((regparm(0))) 20.12 20.13 @@ -198,7 +199,7 @@ extern unsigned long _end; /* standard E 20.14 * ------ ------ 20.15 * I/O remapping area ( 4MB) 20.16 * Direct-map (1:1) area [Xen code/data/heap] (12MB) 20.17 - * map_domain_mem cache ( 4MB) 20.18 + * map_domain_page cache ( 4MB) 20.19 * Per-domain mappings ( 4MB) 20.20 * Shadow linear pagetable ( 4MB) ( 8MB) 20.21 * Guest linear pagetable ( 4MB) ( 8MB)
21.1 --- a/xen/include/asm-x86/domain_page.h Tue Jun 07 08:59:04 2005 +0000 21.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 21.3 @@ -1,5 +0,0 @@ 21.4 -#ifdef __x86_64__ 21.5 -#include <asm/x86_64/domain_page.h> 21.6 -#else 21.7 -#include <asm/x86_32/domain_page.h> 21.8 -#endif
22.1 --- a/xen/include/asm-x86/shadow.h Tue Jun 07 08:59:04 2005 +0000 22.2 +++ b/xen/include/asm-x86/shadow.h Tue Jun 07 11:46:09 2005 +0000 22.3 @@ -27,14 +27,12 @@ 22.4 #include <xen/perfc.h> 22.5 #include <xen/sched.h> 22.6 #include <xen/mm.h> 22.7 +#include <xen/domain_page.h> 22.8 #include <asm/current.h> 22.9 #include <asm/flushtlb.h> 22.10 #include <asm/processor.h> 22.11 -#include <asm/domain_page.h> 22.12 +#include <asm/vmx.h> 22.13 #include <public/dom0_ops.h> 22.14 -#ifdef CONFIG_VMX 22.15 -#include <asm/vmx.h> 22.16 -#endif 22.17 22.18 /* Shadow PT operation mode : shadow-mode variable in arch_domain. */ 22.19
23.1 --- a/xen/include/asm-x86/x86_32/domain_page.h Tue Jun 07 08:59:04 2005 +0000 23.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 23.3 @@ -1,85 +0,0 @@ 23.4 -/****************************************************************************** 23.5 - * domain_page.h 23.6 - * 23.7 - * Allow temporary mapping of domain page frames into Xen space. 23.8 - */ 23.9 - 23.10 -#ifndef __ASM_DOMAIN_PAGE_H__ 23.11 -#define __ASM_DOMAIN_PAGE_H__ 23.12 - 23.13 -#include <xen/config.h> 23.14 -#include <xen/sched.h> 23.15 - 23.16 -extern l1_pgentry_t *mapcache; 23.17 -#define MAPCACHE_ORDER 10 23.18 -#define MAPCACHE_ENTRIES (1 << MAPCACHE_ORDER) 23.19 - 23.20 -/* 23.21 - * Maps a given physical address, returning corresponding virtual address. 23.22 - * The entire page containing that VA is now accessible until a 23.23 - * corresponding call to unmap_domain_mem(). 23.24 - */ 23.25 -extern void *map_domain_mem(unsigned long pa); 23.26 - 23.27 -/* 23.28 - * Pass a VA within a page previously mapped with map_domain_mem(). 23.29 - * That page will then be removed from the mapping lists. 23.30 - */ 23.31 -extern void unmap_domain_mem(void *va); 23.32 - 23.33 -#define DMCACHE_ENTRY_VALID 1UL 23.34 -#define DMCACHE_ENTRY_HELD 2UL 23.35 - 23.36 -struct domain_mmap_cache { 23.37 - unsigned long pa; 23.38 - void *va; 23.39 -}; 23.40 - 23.41 -static inline void 23.42 -domain_mmap_cache_init(struct domain_mmap_cache *cache) 23.43 -{ 23.44 - ASSERT(cache != NULL); 23.45 - cache->pa = 0; 23.46 -} 23.47 - 23.48 -static inline void * 23.49 -map_domain_mem_with_cache(unsigned long pa, struct domain_mmap_cache *cache) 23.50 -{ 23.51 - ASSERT(cache != NULL); 23.52 - BUG_ON(cache->pa & DMCACHE_ENTRY_HELD); 23.53 - 23.54 - if ( likely(cache->pa) ) 23.55 - { 23.56 - cache->pa |= DMCACHE_ENTRY_HELD; 23.57 - if ( likely((pa & PAGE_MASK) == (cache->pa & PAGE_MASK)) ) 23.58 - goto done; 23.59 - unmap_domain_mem(cache->va); 23.60 - } 23.61 - 23.62 - cache->pa = (pa & PAGE_MASK) | DMCACHE_ENTRY_HELD | DMCACHE_ENTRY_VALID; 23.63 - cache->va = map_domain_mem(cache->pa); 23.64 - 23.65 - done: 23.66 - return (void *)(((unsigned long)cache->va & PAGE_MASK) | 23.67 - (pa & ~PAGE_MASK)); 23.68 -} 23.69 - 23.70 -static inline void 23.71 -unmap_domain_mem_with_cache(void *va, struct domain_mmap_cache *cache) 23.72 -{ 23.73 - ASSERT(cache != NULL); 23.74 - cache->pa &= ~DMCACHE_ENTRY_HELD; 23.75 -} 23.76 - 23.77 -static inline void 23.78 -domain_mmap_cache_destroy(struct domain_mmap_cache *cache) 23.79 -{ 23.80 - ASSERT(cache != NULL); 23.81 - if ( likely(cache->pa) ) 23.82 - { 23.83 - unmap_domain_mem(cache->va); 23.84 - cache->pa = 0; 23.85 - } 23.86 -} 23.87 - 23.88 -#endif /* __ASM_DOMAIN_PAGE_H__ */
24.1 --- a/xen/include/asm-x86/x86_64/domain_page.h Tue Jun 07 08:59:04 2005 +0000 24.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 24.3 @@ -1,21 +0,0 @@ 24.4 -/****************************************************************************** 24.5 - * domain_page.h 24.6 - * 24.7 - * This is a trivial no-op on x86/64, where we can 1:1 map all RAM. 24.8 - */ 24.9 - 24.10 -#ifndef __ASM_DOMAIN_PAGE_H__ 24.11 -#define __ASM_DOMAIN_PAGE_H__ 24.12 - 24.13 -#define map_domain_mem(_pa) phys_to_virt(_pa) 24.14 -#define unmap_domain_mem(_va) ((void)(_va)) 24.15 - 24.16 -struct domain_mmap_cache { 24.17 -}; 24.18 - 24.19 -#define domain_mmap_cache_init(_c) ((void)(_c)) 24.20 -#define map_domain_mem_with_cache(_p,_c) (map_domain_mem(_p)) 24.21 -#define unmap_domain_mem_with_cache(_v,_c) ((void)(_v)) 24.22 -#define domain_mmap_cache_destroy(_c) ((void)(_c)) 24.23 - 24.24 -#endif /* __ASM_DOMAIN_PAGE_H__ */
25.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 25.2 +++ b/xen/include/xen/domain_page.h Tue Jun 07 11:46:09 2005 +0000 25.3 @@ -0,0 +1,100 @@ 25.4 +/****************************************************************************** 25.5 + * domain_page.h 25.6 + * 25.7 + * Allow temporary mapping of domain page frames into Xen space. 25.8 + */ 25.9 + 25.10 +#ifndef __XEN_DOMAIN_PAGE_H__ 25.11 +#define __XEN_DOMAIN_PAGE_H__ 25.12 + 25.13 +#include <xen/config.h> 25.14 +#include <xen/mm.h> 25.15 + 25.16 +#ifdef CONFIG_DOMAIN_PAGE 25.17 + 25.18 +/* 25.19 + * Maps a given page frame, returning the mmap'ed virtual address. The page is 25.20 + * now accessible until a corresponding call to unmap_domain_page(). 25.21 + */ 25.22 +extern void *map_domain_page(unsigned long pfn); 25.23 + 25.24 +/* 25.25 + * Pass a VA within a page previously mapped with map_domain_page(). 25.26 + * That page will then be removed from the mapping lists. 25.27 + */ 25.28 +extern void unmap_domain_page(void *va); 25.29 + 25.30 +#define DMCACHE_ENTRY_VALID 1U 25.31 +#define DMCACHE_ENTRY_HELD 2U 25.32 + 25.33 +struct domain_mmap_cache { 25.34 + unsigned long pfn; 25.35 + void *va; 25.36 + unsigned int flags; 25.37 +}; 25.38 + 25.39 +static inline void 25.40 +domain_mmap_cache_init(struct domain_mmap_cache *cache) 25.41 +{ 25.42 + ASSERT(cache != NULL); 25.43 + cache->flags = 0; 25.44 +} 25.45 + 25.46 +static inline void * 25.47 +map_domain_page_with_cache(unsigned long pfn, struct domain_mmap_cache *cache) 25.48 +{ 25.49 + ASSERT(cache != NULL); 25.50 + BUG_ON(cache->flags & DMCACHE_ENTRY_HELD); 25.51 + 25.52 + if ( likely(cache->flags & DMCACHE_ENTRY_VALID) ) 25.53 + { 25.54 + cache->flags |= DMCACHE_ENTRY_HELD; 25.55 + if ( likely(pfn == cache->pfn) ) 25.56 + goto done; 25.57 + unmap_domain_page(cache->va); 25.58 + } 25.59 + 25.60 + cache->pfn = pfn; 25.61 + cache->va = map_domain_page(pfn); 25.62 + cache->flags = DMCACHE_ENTRY_HELD | DMCACHE_ENTRY_VALID; 25.63 + 25.64 + done: 25.65 + return cache->va; 25.66 +} 25.67 + 25.68 +static inline void 25.69 +unmap_domain_page_with_cache(void *va, struct domain_mmap_cache *cache) 25.70 +{ 25.71 + ASSERT(cache != NULL); 25.72 + cache->flags &= ~DMCACHE_ENTRY_HELD; 25.73 +} 25.74 + 25.75 +static inline void 25.76 +domain_mmap_cache_destroy(struct domain_mmap_cache *cache) 25.77 +{ 25.78 + ASSERT(cache != NULL); 25.79 + BUG_ON(cache->flags & DMCACHE_ENTRY_HELD); 25.80 + 25.81 + if ( likely(cache->flags & DMCACHE_ENTRY_VALID) ) 25.82 + { 25.83 + unmap_domain_page(cache->va); 25.84 + cache->flags = 0; 25.85 + } 25.86 +} 25.87 + 25.88 +#else /* !CONFIG_DOMAIN_PAGE */ 25.89 + 25.90 +#define map_domain_page(pfn) phys_to_virt((pfn)<<PAGE_SHIFT) 25.91 +#define unmap_domain_page(va) ((void)(va)) 25.92 + 25.93 +struct domain_mmap_cache { 25.94 +}; 25.95 + 25.96 +#define domain_mmap_cache_init(c) ((void)(c)) 25.97 +#define map_domain_page_with_cache(pfn,c) (map_domain_page(pfn)) 25.98 +#define unmap_domain_page_with_cache(va,c) ((void)(va)) 25.99 +#define domain_mmap_cache_destroy(c) ((void)(c)) 25.100 + 25.101 +#endif /* !CONFIG_DOMAIN_PAGE */ 25.102 + 25.103 +#endif /* __XEN_DOMAIN_PAGE_H__ */
26.1 --- a/xen/include/xen/perfc_defn.h Tue Jun 07 08:59:04 2005 +0000 26.2 +++ b/xen/include/xen/perfc_defn.h Tue Jun 07 11:46:09 2005 +0000 26.3 @@ -1,112 +1,127 @@ 26.4 +#ifndef __XEN_PERFC_DEFN_H__ 26.5 +#define __XEN_PERFC_DEFN_H__ 26.6 + 26.7 #define PERFC_MAX_PT_UPDATES 64 26.8 #define PERFC_PT_UPDATES_BUCKET_SIZE 3 26.9 -PERFCOUNTER_ARRAY( wpt_updates, "writable pt updates", PERFC_MAX_PT_UPDATES ) 26.10 -PERFCOUNTER_ARRAY( bpt_updates, "batched pt updates", PERFC_MAX_PT_UPDATES ) 26.11 -PERFCOUNTER_ARRAY( l1_entries_checked, "l1 entries checked", PERFC_MAX_PT_UPDATES ) 26.12 -PERFCOUNTER_ARRAY( shm_l2_updates, "shadow mode L2 pt updates", PERFC_MAX_PT_UPDATES ) 26.13 -PERFCOUNTER_ARRAY( shm_hl2_updates, "shadow mode HL2 pt updates", PERFC_MAX_PT_UPDATES ) 26.14 -PERFCOUNTER_ARRAY( snapshot_copies, "entries copied per snapshot", PERFC_MAX_PT_UPDATES ) 26.15 +PERFCOUNTER_ARRAY(wpt_updates, "writable pt updates", 26.16 + PERFC_MAX_PT_UPDATES) 26.17 +PERFCOUNTER_ARRAY(bpt_updates, "batched pt updates", 26.18 + PERFC_MAX_PT_UPDATES) 26.19 +PERFCOUNTER_ARRAY(l1_entries_checked, "l1 entries checked", 26.20 + PERFC_MAX_PT_UPDATES) 26.21 +PERFCOUNTER_ARRAY(shm_l2_updates, "shadow mode L2 pt updates", 26.22 + PERFC_MAX_PT_UPDATES) 26.23 +PERFCOUNTER_ARRAY(shm_hl2_updates, "shadow mode HL2 pt updates", 26.24 + PERFC_MAX_PT_UPDATES) 26.25 +PERFCOUNTER_ARRAY(snapshot_copies, "entries copied per snapshot", 26.26 + PERFC_MAX_PT_UPDATES) 26.27 26.28 -PERFCOUNTER_ARRAY( hypercalls, "hypercalls", NR_hypercalls ) 26.29 -PERFCOUNTER_ARRAY( exceptions, "exceptions", 32 ) 26.30 +PERFCOUNTER_ARRAY(hypercalls, "hypercalls", NR_hypercalls) 26.31 +PERFCOUNTER_ARRAY(exceptions, "exceptions", 32) 26.32 26.33 #define VMX_PERF_EXIT_REASON_SIZE 37 26.34 #define VMX_PERF_VECTOR_SIZE 0x20 26.35 -PERFCOUNTER_ARRAY( vmexits, "vmexits", VMX_PERF_EXIT_REASON_SIZE ) 26.36 -PERFCOUNTER_ARRAY( cause_vector, "cause vector", VMX_PERF_VECTOR_SIZE ) 26.37 +PERFCOUNTER_ARRAY(vmexits, "vmexits", VMX_PERF_EXIT_REASON_SIZE) 26.38 +PERFCOUNTER_ARRAY(cause_vector, "cause vector", VMX_PERF_VECTOR_SIZE) 26.39 26.40 -PERFCOUNTER_CPU (seg_fixups, "segmentation fixups" ) 26.41 +PERFCOUNTER_CPU(seg_fixups, "segmentation fixups") 26.42 26.43 -PERFCOUNTER_CPU( irqs, "#interrupts" ) 26.44 -PERFCOUNTER_CPU( ipis, "#IPIs" ) 26.45 -PERFCOUNTER_CPU( irq_time, "cycles spent in irq handler" ) 26.46 +PERFCOUNTER_CPU(irqs, "#interrupts") 26.47 +PERFCOUNTER_CPU(ipis, "#IPIs") 26.48 +PERFCOUNTER_CPU(irq_time, "cycles spent in irq handler") 26.49 26.50 -PERFCOUNTER_CPU( apic_timer, "apic timer interrupts" ) 26.51 -PERFCOUNTER_CPU( ac_timer_max, "ac_timer max error (ns)" ) 26.52 -PERFCOUNTER_CPU( sched_irq, "sched: timer" ) 26.53 -PERFCOUNTER_CPU( sched_run, "sched: runs through scheduler" ) 26.54 -PERFCOUNTER_CPU( sched_ctx, "sched: context switches" ) 26.55 +PERFCOUNTER_CPU(apic_timer, "apic timer interrupts") 26.56 +PERFCOUNTER_CPU(ac_timer_max, "ac_timer max error (ns)") 26.57 +PERFCOUNTER_CPU(sched_irq, "sched: timer") 26.58 +PERFCOUNTER_CPU(sched_run, "sched: runs through scheduler") 26.59 +PERFCOUNTER_CPU(sched_ctx, "sched: context switches") 26.60 26.61 -PERFCOUNTER_CPU( domain_page_tlb_flush, "domain page tlb flushes" ) 26.62 -PERFCOUNTER_CPU( need_flush_tlb_flush, "PG_need_flush tlb flushes" ) 26.63 +PERFCOUNTER_CPU(domain_page_tlb_flush, "domain page tlb flushes") 26.64 +PERFCOUNTER_CPU(need_flush_tlb_flush, "PG_need_flush tlb flushes") 26.65 26.66 -PERFCOUNTER_CPU( calls_to_mmu_update, "calls_to_mmu_update" ) 26.67 -PERFCOUNTER_CPU( num_page_updates, "num_page_updates" ) 26.68 -PERFCOUNTER_CPU( calls_to_update_va, "calls_to_update_va_map" ) 26.69 -PERFCOUNTER_CPU( page_faults, "page faults" ) 26.70 -PERFCOUNTER_CPU( copy_user_faults, "copy_user faults" ) 26.71 +PERFCOUNTER_CPU(calls_to_mmu_update, "calls_to_mmu_update") 26.72 +PERFCOUNTER_CPU(num_page_updates, "num_page_updates") 26.73 +PERFCOUNTER_CPU(calls_to_update_va, "calls_to_update_va_map") 26.74 +PERFCOUNTER_CPU(page_faults, "page faults") 26.75 +PERFCOUNTER_CPU(copy_user_faults, "copy_user faults") 26.76 26.77 -PERFCOUNTER_CPU(shadow_fault_calls, "calls to shadow_fault") 26.78 -PERFCOUNTER_CPU(shadow_fault_bail_pde_not_present, "sf bailed due to pde not present") 26.79 -PERFCOUNTER_CPU(shadow_fault_bail_pte_not_present, "sf bailed due to pte not present") 26.80 -PERFCOUNTER_CPU(shadow_fault_bail_ro_mapping, "sf bailed due to a ro mapping") 26.81 -PERFCOUNTER_CPU(shadow_fault_fixed, "sf fixed the pgfault") 26.82 -PERFCOUNTER_CPU(write_fault_bail, "sf bailed due to write_fault") 26.83 -PERFCOUNTER_CPU(read_fault_bail, "sf bailed due to read_fault") 26.84 +PERFCOUNTER_CPU(shadow_fault_calls, "calls to shadow_fault") 26.85 +PERFCOUNTER_CPU(shadow_fault_bail_pde_not_present, 26.86 + "sf bailed due to pde not present") 26.87 +PERFCOUNTER_CPU(shadow_fault_bail_pte_not_present, 26.88 + "sf bailed due to pte not present") 26.89 +PERFCOUNTER_CPU(shadow_fault_bail_ro_mapping, 26.90 + "sf bailed due to a ro mapping") 26.91 +PERFCOUNTER_CPU(shadow_fault_fixed, "sf fixed the pgfault") 26.92 +PERFCOUNTER_CPU(write_fault_bail, "sf bailed due to write_fault") 26.93 +PERFCOUNTER_CPU(read_fault_bail, "sf bailed due to read_fault") 26.94 26.95 -PERFCOUNTER_CPU( map_domain_mem_count, "map_domain_mem count" ) 26.96 -PERFCOUNTER_CPU( ptwr_emulations, "writable pt emulations" ) 26.97 +PERFCOUNTER_CPU(map_domain_page_count, "map_domain_page count") 26.98 +PERFCOUNTER_CPU(ptwr_emulations, "writable pt emulations") 26.99 26.100 -PERFCOUNTER_CPU( shadow_l2_table_count, "shadow_l2_table count" ) 26.101 -PERFCOUNTER_CPU( shadow_l1_table_count, "shadow_l1_table count" ) 26.102 -PERFCOUNTER_CPU( unshadow_table_count, "unshadow_table count" ) 26.103 -PERFCOUNTER_CPU( shadow_fixup_count, "shadow_fixup count" ) 26.104 -PERFCOUNTER_CPU( shadow_update_va_fail1, "shadow_update_va_fail1" ) 26.105 -PERFCOUNTER_CPU( shadow_update_va_fail2, "shadow_update_va_fail2" ) 26.106 +PERFCOUNTER_CPU(shadow_l2_table_count, "shadow_l2_table count") 26.107 +PERFCOUNTER_CPU(shadow_l1_table_count, "shadow_l1_table count") 26.108 +PERFCOUNTER_CPU(unshadow_table_count, "unshadow_table count") 26.109 +PERFCOUNTER_CPU(shadow_fixup_count, "shadow_fixup count") 26.110 +PERFCOUNTER_CPU(shadow_update_va_fail1, "shadow_update_va_fail1") 26.111 +PERFCOUNTER_CPU(shadow_update_va_fail2, "shadow_update_va_fail2") 26.112 26.113 /* STATUS counters do not reset when 'P' is hit */ 26.114 -PERFSTATUS( shadow_l2_pages, "current # shadow L2 pages" ) 26.115 -PERFSTATUS( shadow_l1_pages, "current # shadow L1 pages" ) 26.116 -PERFSTATUS( hl2_table_pages, "current # hl2 pages" ) 26.117 -PERFSTATUS( snapshot_pages, "current # fshadow snapshot pages" ) 26.118 -PERFSTATUS( writable_pte_predictions, "# writable pte predictions") 26.119 -PERFSTATUS( free_l1_pages, "current # free shadow L1 pages" ) 26.120 +PERFSTATUS(shadow_l2_pages, "current # shadow L2 pages") 26.121 +PERFSTATUS(shadow_l1_pages, "current # shadow L1 pages") 26.122 +PERFSTATUS(hl2_table_pages, "current # hl2 pages") 26.123 +PERFSTATUS(snapshot_pages, "current # fshadow snapshot pages") 26.124 +PERFSTATUS(writable_pte_predictions, "# writable pte predictions") 26.125 +PERFSTATUS(free_l1_pages, "current # free shadow L1 pages") 26.126 26.127 -PERFCOUNTER_CPU( check_pagetable, "calls to check_pagetable" ) 26.128 -PERFCOUNTER_CPU( check_all_pagetables, "calls to check_all_pagetables" ) 26.129 +PERFCOUNTER_CPU(check_pagetable, "calls to check_pagetable") 26.130 +PERFCOUNTER_CPU(check_all_pagetables, "calls to check_all_pagetables") 26.131 26.132 -PERFCOUNTER_CPU( shadow_hl2_table_count, "shadow_hl2_table count" ) 26.133 -PERFCOUNTER_CPU( shadow_set_l1e_force_map, "shadow_set_l1e forced to map l1" ) 26.134 -PERFCOUNTER_CPU( shadow_set_l1e_unlinked, "shadow_set_l1e found unlinked l1" ) 26.135 -PERFCOUNTER_CPU( shadow_set_l1e_fail, "shadow_set_l1e failed (no sl1)" ) 26.136 -PERFCOUNTER_CPU( shadow_invlpg_faults, "shadow_invlpg's get_user faulted") 26.137 -PERFCOUNTER_CPU( unshadow_l2_count, "unpinned L2 count") 26.138 +PERFCOUNTER_CPU(shadow_hl2_table_count, "shadow_hl2_table count") 26.139 +PERFCOUNTER_CPU(shadow_set_l1e_force_map, "shadow_set_l1e forced to map l1") 26.140 +PERFCOUNTER_CPU(shadow_set_l1e_unlinked, "shadow_set_l1e found unlinked l1") 26.141 +PERFCOUNTER_CPU(shadow_set_l1e_fail, "shadow_set_l1e failed (no sl1)") 26.142 +PERFCOUNTER_CPU(shadow_invlpg_faults, "shadow_invlpg's get_user faulted") 26.143 +PERFCOUNTER_CPU(unshadow_l2_count, "unpinned L2 count") 26.144 26.145 PERFCOUNTER_CPU(shadow_status_shortcut, "fastpath miss on shadow cache") 26.146 -PERFCOUNTER_CPU(shadow_status_calls, "calls to ___shadow_status" ) 26.147 -PERFCOUNTER_CPU(shadow_status_miss, "missed shadow cache" ) 26.148 -PERFCOUNTER_CPU(shadow_status_hit_head, "hits on head of bucket" ) 26.149 -PERFCOUNTER_CPU(shadow_max_type, "calls to shadow_max_type" ) 26.150 +PERFCOUNTER_CPU(shadow_status_calls, "calls to ___shadow_status") 26.151 +PERFCOUNTER_CPU(shadow_status_miss, "missed shadow cache") 26.152 +PERFCOUNTER_CPU(shadow_status_hit_head, "hits on head of bucket") 26.153 +PERFCOUNTER_CPU(shadow_max_type, "calls to shadow_max_type") 26.154 26.155 -PERFCOUNTER_CPU(shadow_sync_all, "calls to shadow_sync_all") 26.156 -PERFCOUNTER_CPU(shadow_sync_va, "calls to shadow_sync_va") 26.157 -PERFCOUNTER_CPU(resync_l1, "resync L1 page") 26.158 -PERFCOUNTER_CPU(resync_l2, "resync L2 page") 26.159 -PERFCOUNTER_CPU(resync_hl2, "resync HL2 page") 26.160 -PERFCOUNTER_CPU(shadow_make_snapshot, "snapshots created") 26.161 -PERFCOUNTER_CPU(shadow_mark_mfn_out_of_sync_calls, "calls to shadow_mk_out_of_sync") 26.162 -PERFCOUNTER_CPU(shadow_out_of_sync_calls, "calls to shadow_out_of_sync") 26.163 -PERFCOUNTER_CPU(snapshot_entry_matches_calls, "calls to ss_entry_matches") 26.164 -PERFCOUNTER_CPU(snapshot_entry_matches_true, "ss_entry_matches returns true") 26.165 +PERFCOUNTER_CPU(shadow_sync_all, "calls to shadow_sync_all") 26.166 +PERFCOUNTER_CPU(shadow_sync_va, "calls to shadow_sync_va") 26.167 +PERFCOUNTER_CPU(resync_l1, "resync L1 page") 26.168 +PERFCOUNTER_CPU(resync_l2, "resync L2 page") 26.169 +PERFCOUNTER_CPU(resync_hl2, "resync HL2 page") 26.170 +PERFCOUNTER_CPU(shadow_make_snapshot, "snapshots created") 26.171 +PERFCOUNTER_CPU(shadow_mark_mfn_out_of_sync_calls, 26.172 + "calls to shadow_mk_out_of_sync") 26.173 +PERFCOUNTER_CPU(shadow_out_of_sync_calls, "calls to shadow_out_of_sync") 26.174 +PERFCOUNTER_CPU(snapshot_entry_matches_calls, "calls to ss_entry_matches") 26.175 +PERFCOUNTER_CPU(snapshot_entry_matches_true, "ss_entry_matches returns true") 26.176 26.177 -PERFCOUNTER_CPU(validate_pte_calls, "calls to validate_pte_change") 26.178 -PERFCOUNTER_CPU(validate_pte_changes1, "validate_pte makes changes1") 26.179 -PERFCOUNTER_CPU(validate_pte_changes2, "validate_pte makes changes2") 26.180 -PERFCOUNTER_CPU(validate_pte_changes3, "validate_pte makes changes3") 26.181 -PERFCOUNTER_CPU(validate_pte_changes4, "validate_pte makes changes4") 26.182 -PERFCOUNTER_CPU(validate_pde_calls, "calls to validate_pde_change") 26.183 -PERFCOUNTER_CPU(validate_pde_changes, "validate_pde makes changes") 26.184 -PERFCOUNTER_CPU(shadow_get_page_fail, "shadow_get_page_from_l1e fails" ) 26.185 -PERFCOUNTER_CPU(validate_hl2e_calls, "calls to validate_hl2e_change") 26.186 -PERFCOUNTER_CPU(validate_hl2e_changes, "validate_hl2e makes changes") 26.187 -PERFCOUNTER_CPU(exception_fixed, "pre-exception fixed") 26.188 -PERFCOUNTER_CPU(gpfn_to_mfn_foreign, "calls to gpfn_to_mfn_foreign") 26.189 -PERFCOUNTER_CPU(remove_all_access, "calls to remove_all_access") 26.190 -PERFCOUNTER_CPU(remove_write_access, "calls to remove_write_access") 26.191 -PERFCOUNTER_CPU(remove_write_access_easy, "easy outs of remove_write_access") 26.192 -PERFCOUNTER_CPU(remove_write_no_work, "no work in remove_write_access") 26.193 -PERFCOUNTER_CPU(remove_write_not_writable, "remove_write non-writable page") 26.194 -PERFCOUNTER_CPU(remove_write_fast_exit, "remove_write hit predicted entry") 26.195 -PERFCOUNTER_CPU(remove_write_predicted, "remove_write predict hit&exit") 26.196 -PERFCOUNTER_CPU(remove_write_bad_prediction, "remove_write bad prediction") 26.197 -PERFCOUNTER_CPU(update_hl2e_invlpg, "update_hl2e calls invlpg") 26.198 +PERFCOUNTER_CPU(validate_pte_calls, "calls to validate_pte_change") 26.199 +PERFCOUNTER_CPU(validate_pte_changes1, "validate_pte makes changes1") 26.200 +PERFCOUNTER_CPU(validate_pte_changes2, "validate_pte makes changes2") 26.201 +PERFCOUNTER_CPU(validate_pte_changes3, "validate_pte makes changes3") 26.202 +PERFCOUNTER_CPU(validate_pte_changes4, "validate_pte makes changes4") 26.203 +PERFCOUNTER_CPU(validate_pde_calls, "calls to validate_pde_change") 26.204 +PERFCOUNTER_CPU(validate_pde_changes, "validate_pde makes changes") 26.205 +PERFCOUNTER_CPU(shadow_get_page_fail, "shadow_get_page_from_l1e fails") 26.206 +PERFCOUNTER_CPU(validate_hl2e_calls, "calls to validate_hl2e_change") 26.207 +PERFCOUNTER_CPU(validate_hl2e_changes, "validate_hl2e makes changes") 26.208 +PERFCOUNTER_CPU(exception_fixed, "pre-exception fixed") 26.209 +PERFCOUNTER_CPU(gpfn_to_mfn_foreign, "calls to gpfn_to_mfn_foreign") 26.210 +PERFCOUNTER_CPU(remove_all_access, "calls to remove_all_access") 26.211 +PERFCOUNTER_CPU(remove_write_access, "calls to remove_write_access") 26.212 +PERFCOUNTER_CPU(remove_write_access_easy, "easy outs of remove_write_access") 26.213 +PERFCOUNTER_CPU(remove_write_no_work, "no work in remove_write_access") 26.214 +PERFCOUNTER_CPU(remove_write_not_writable, "remove_write non-writable page") 26.215 +PERFCOUNTER_CPU(remove_write_fast_exit, "remove_write hit predicted entry") 26.216 +PERFCOUNTER_CPU(remove_write_predicted, "remove_write predict hit&exit") 26.217 +PERFCOUNTER_CPU(remove_write_bad_prediction, "remove_write bad prediction") 26.218 +PERFCOUNTER_CPU(update_hl2e_invlpg, "update_hl2e calls invlpg") 26.219 + 26.220 +#endif /* __XEN_PERFC_DEFN_H__ */