ia64/xen-unstable
changeset 3707:ef5e5cd10778
bitkeeper revision 1.1159.212.120 (42080fdeqkhdPXOxk9B5egncOrellQ)
Make phys_to_machine_mapping a static inline function.
Signed-off-by: Arun Sharma <arun.sharma@intel.com>
Signed-off-by: ian@xensource.com
Make phys_to_machine_mapping a static inline function.
Signed-off-by: Arun Sharma <arun.sharma@intel.com>
Signed-off-by: ian@xensource.com
author | iap10@labyrinth.cl.cam.ac.uk |
---|---|
date | Tue Feb 08 01:03:26 2005 +0000 (2005-02-08) |
parents | 396d0cbdc29b |
children | 9e80fc0dcac5 |
files | xen/arch/x86/shadow.c xen/arch/x86/vmx.c xen/arch/x86/vmx_platform.c xen/arch/x86/vmx_vmcs.c xen/include/asm-x86/mm.h xen/include/asm-x86/shadow.h |
line diff
1.1 --- a/xen/arch/x86/shadow.c Tue Feb 08 00:35:02 2005 +0000 1.2 +++ b/xen/arch/x86/shadow.c Tue Feb 08 01:03:26 2005 +0000 1.3 @@ -576,7 +576,7 @@ void vmx_shadow_invlpg(struct domain *d, 1.4 return; 1.5 } 1.6 1.7 - host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT]; 1.8 + host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT); 1.9 spte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK); 1.10 1.11 if (__put_user(spte, (unsigned long *) 1.12 @@ -813,7 +813,7 @@ static int check_pte( 1.13 1.14 if (d->arch.shadow_mode == SHM_full_32) { 1.15 1.16 - guest_gpfn = phys_to_machine_mapping[gpfn]; 1.17 + guest_gpfn = phys_to_machine_mapping(gpfn); 1.18 1.19 if ( __shadow_status(d, guest_gpfn) != (PSH_shadowed | spfn) ) 1.20 FAIL("spfn problem g.sf=%08lx", 1.21 @@ -889,7 +889,7 @@ int check_pagetable(struct domain *d, pa 1.22 1.23 if (d->arch.shadow_mode == SHM_full_32) 1.24 { 1.25 - host_gpfn = phys_to_machine_mapping[gpfn]; 1.26 + host_gpfn = phys_to_machine_mapping(gpfn); 1.27 gpl2e = (l2_pgentry_t *) map_domain_mem( host_gpfn << PAGE_SHIFT ); 1.28 1.29 } else
2.1 --- a/xen/arch/x86/vmx.c Tue Feb 08 00:35:02 2005 +0000 2.2 +++ b/xen/arch/x86/vmx.c Tue Feb 08 01:03:26 2005 +0000 2.3 @@ -129,7 +129,7 @@ static int vmx_do_page_fault(unsigned lo 2.4 2.5 index = (va >> L2_PAGETABLE_SHIFT); 2.6 if (!l2_pgentry_val(ed->arch.guest_pl2e_cache[index])) { 2.7 - pfn = phys_to_machine_mapping[gpde >> PAGE_SHIFT]; 2.8 + pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT); 2.9 2.10 VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_do_page_fault: pagetable = %lx\n", 2.11 pagetable_val(ed->arch.pagetable)); 2.12 @@ -304,7 +304,7 @@ inline unsigned long gva_to_gpa(unsigned 2.13 __guest_get_pl2e(ed, gva, &gpde); 2.14 index = (gva >> L2_PAGETABLE_SHIFT); 2.15 2.16 - pfn = phys_to_machine_mapping[gpde >> PAGE_SHIFT]; 2.17 + pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT); 2.18 2.19 ed->arch.guest_pl2e_cache[index] = 2.20 mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); 2.21 @@ -451,8 +451,8 @@ static void mov_to_cr(int gp, int cr, st 2.22 /* 2.23 * The guest CR3 must be pointing to the guest physical. 2.24 */ 2.25 - if (!(pfn = phys_to_machine_mapping[ 2.26 - d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT])) 2.27 + if (!(pfn = phys_to_machine_mapping( 2.28 + d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT))) 2.29 { 2.30 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value = %lx\n", 2.31 d->arch.arch_vmx.cpu_cr3); 2.32 @@ -504,7 +504,7 @@ static void mov_to_cr(int gp, int cr, st 2.33 * removed some translation or changed page attributes. 2.34 * We simply invalidate the shadow. 2.35 */ 2.36 - pfn = phys_to_machine_mapping[value >> PAGE_SHIFT]; 2.37 + pfn = phys_to_machine_mapping(value >> PAGE_SHIFT); 2.38 if ((pfn << PAGE_SHIFT) != pagetable_val(d->arch.pagetable)) 2.39 __vmx_bug(regs); 2.40 vmx_shadow_clear_state(d->domain); 2.41 @@ -521,7 +521,7 @@ static void mov_to_cr(int gp, int cr, st 2.42 "Invalid CR3 value=%lx\n", value); 2.43 domain_crash(); /* need to take a clean path */ 2.44 } 2.45 - pfn = phys_to_machine_mapping[value >> PAGE_SHIFT]; 2.46 + pfn = phys_to_machine_mapping(value >> PAGE_SHIFT); 2.47 vmx_shadow_clear_state(d->domain); 2.48 d->arch.pagetable = mk_pagetable(pfn << PAGE_SHIFT); 2.49 shadow_mk_pagetable(d);
3.1 --- a/xen/arch/x86/vmx_platform.c Tue Feb 08 00:35:02 2005 +0000 3.2 +++ b/xen/arch/x86/vmx_platform.c Tue Feb 08 01:03:26 2005 +0000 3.3 @@ -369,7 +369,7 @@ static int inst_copy_from_guest(char *bu 3.4 printk("inst_copy_from_guest- EXIT: read gpte faulted" ); 3.5 return 0; 3.6 } 3.7 - mfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT]; 3.8 + mfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT); 3.9 ma = (mfn << PAGE_SHIFT) | (guest_eip & (PAGE_SIZE - 1)); 3.10 inst_start = (unsigned char *)map_domain_mem(ma); 3.11
4.1 --- a/xen/arch/x86/vmx_vmcs.c Tue Feb 08 00:35:02 2005 +0000 4.2 +++ b/xen/arch/x86/vmx_vmcs.c Tue Feb 08 01:03:26 2005 +0000 4.3 @@ -118,7 +118,7 @@ int vmx_setup_platform(struct exec_domai 4.4 addr = context->edi; 4.5 offset = (addr & ~PAGE_MASK); 4.6 addr = round_pgdown(addr); 4.7 - mpfn = phys_to_machine_mapping[addr >> PAGE_SHIFT]; 4.8 + mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT); 4.9 p = map_domain_mem(mpfn << PAGE_SHIFT); 4.10 4.11 e820p = (struct e820entry *) ((unsigned long) p + offset); 4.12 @@ -136,7 +136,7 @@ int vmx_setup_platform(struct exec_domai 4.13 } 4.14 unmap_domain_mem(p); 4.15 4.16 - mpfn = phys_to_machine_mapping[gpfn]; 4.17 + mpfn = phys_to_machine_mapping(gpfn); 4.18 p = map_domain_mem(mpfn << PAGE_SHIFT); 4.19 d->arch.arch_vmx.vmx_platform.shared_page_va = (unsigned long) p; 4.20 4.21 @@ -172,7 +172,7 @@ static int add_mapping_perdomain(struct 4.22 d->domain->arch.mm_perdomain_pt[gpfn >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)] = 4.23 mk_l1_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); 4.24 } 4.25 - phys_to_machine_mapping[gpfn] = mpfn; 4.26 + __phys_to_machine_mapping[gpfn] = mpfn; 4.27 4.28 return 0; 4.29 }
5.1 --- a/xen/include/asm-x86/mm.h Tue Feb 08 00:35:02 2005 +0000 5.2 +++ b/xen/include/asm-x86/mm.h Tue Feb 08 01:03:26 2005 +0000 5.3 @@ -241,8 +241,12 @@ void synchronise_pagetables(unsigned lon 5.4 #undef phys_to_machine_mapping 5.5 5.6 #define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START) 5.7 -#define phys_to_machine_mapping ((unsigned long *)PERDOMAIN_VIRT_START) 5.8 - 5.9 +#define __phys_to_machine_mapping ((unsigned long *)PERDOMAIN_VIRT_START) 5.10 +/* Returns the machine physical */ 5.11 +static inline unsigned long phys_to_machine_mapping(unsigned long pfn) 5.12 +{ 5.13 + return __phys_to_machine_mapping[pfn]; 5.14 +} 5.15 #define set_machinetophys(_mfn, _pfn) machine_to_phys_mapping[(_mfn)] = (_pfn) 5.16 5.17 #define DEFAULT_GDT_ENTRIES (LAST_RESERVED_GDT_ENTRY+1)
6.1 --- a/xen/include/asm-x86/shadow.h Tue Feb 08 00:35:02 2005 +0000 6.2 +++ b/xen/include/asm-x86/shadow.h Tue Feb 08 01:03:26 2005 +0000 6.3 @@ -51,7 +51,7 @@ extern void vmx_shadow_invlpg(struct dom 6.4 6.5 #define __get_phys_to_machine(_d, host_gpfn, gpfn) \ 6.6 if ((_d)->arch.shadow_mode == SHM_full_32) \ 6.7 - (host_gpfn) = phys_to_machine_mapping[(gpfn)]; \ 6.8 + (host_gpfn) = phys_to_machine_mapping(gpfn); \ 6.9 else \ 6.10 (host_gpfn) = (gpfn); 6.11 6.12 @@ -139,7 +139,7 @@ static inline void __guest_set_pl2e( 6.13 { 6.14 unsigned long pfn; 6.15 6.16 - pfn = phys_to_machine_mapping[value >> PAGE_SHIFT]; 6.17 + pfn = phys_to_machine_mapping(value >> PAGE_SHIFT); 6.18 ed->arch.guest_pl2e_cache[l2_table_offset(va)] = 6.19 mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); 6.20 6.21 @@ -231,7 +231,7 @@ static inline void l1pte_write_fault( 6.22 { 6.23 unsigned long host_pfn, host_gpte; 6.24 6.25 - host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT]; 6.26 + host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT); 6.27 host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK); 6.28 spte = host_gpte | _PAGE_RW; 6.29 } 6.30 @@ -265,7 +265,7 @@ static inline void l1pte_read_fault( 6.31 { 6.32 unsigned long host_pfn, host_gpte; 6.33 6.34 - host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT]; 6.35 + host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT); 6.36 host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK); 6.37 spte = (host_gpte & _PAGE_DIRTY) ? host_gpte : (host_gpte & ~_PAGE_RW); 6.38 } 6.39 @@ -309,7 +309,7 @@ static inline void l1pte_propagate_from_ 6.40 return; 6.41 } 6.42 6.43 - host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT]; 6.44 + host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT); 6.45 host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK); 6.46 6.47 if ( (host_gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==