From: Andrew Cooper Date: Thu, 24 Dec 2015 21:09:47 +0000 (+0000) Subject: Setup for 32bit PV userspace execution X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=e86ce90b84b9a9d47da22e6d0250822db44ee60d;p=people%2Froyger%2Fxen-test-framework.git Setup for 32bit PV userspace execution * Implement hypercall_update_va_mapping(). * Walk the live pagetables setting _PAGE_USER. For now, 32bit PV XTF guests need to be run on a hypervisor booted with "smep=0 smap=0" to prevent Xen's %cr4 settings from interfering. Signed-off-by: Andrew Cooper --- diff --git a/arch/x86/pv/traps.c b/arch/x86/pv/traps.c index 54e9a9b..208adcf 100644 --- a/arch/x86/pv/traps.c +++ b/arch/x86/pv/traps.c @@ -4,6 +4,7 @@ #include #include +#include /* Real entry points */ void entry_DE(void); @@ -85,6 +86,66 @@ void arch_init_traps(void) if ( rc ) panic("Failed to set user %%cr3: %d\n", rc); +#elif defined(__i386__) + /* + * Walk the live pagetables and set _PAGE_USER + * + * XTF uses a shared user/kernel address space, and _PAGE_USER must be set + * to permit cpl3 access to the virtual addresses without taking a + * pagefault. + * + * !!! WARNING !!! + * + * Because PV guests and Xen share CR4, Xen's setting of CR4.{SMEP,SMAP} + * interfere with 32bit PV guests. For now, 32bit PV XTF tests require + * the hypervisor to be booted with "smep=0 smap=0". + * + * TODO - figure out how to work around this restriction while maintaining + * a shared user/kernel address space for the framework. + * + * !!! WARNING !!! + */ + uint64_t *l3 = _p(start_info->pt_base); + unsigned long va = 0; + + while ( va < __HYPERVISOR_VIRT_START_PAE ) + { + unsigned int i3 = l3_table_offset(va); + + if ( !(l3[i3] & _PAGE_PRESENT) ) + { + va += 1UL << L3_PT_SHIFT; + continue; + } + + uint64_t *l2 = maddr_to_virt(pte_to_paddr(l3[i3])); + unsigned int i2 = l2_table_offset(va); + + if ( !(l2[i2] & _PAGE_PRESENT) ) + { + va += 1UL << L2_PT_SHIFT; + continue; + } + + uint64_t *l1 = maddr_to_virt(pte_to_paddr(l2[i2])); + unsigned int i1 = l1_table_offset(va); + + if ( !(l1[i1] & _PAGE_PRESENT) ) + { + va += 1UL << L1_PT_SHIFT; + continue; + } + + if ( !(l1[i1] & _PAGE_USER) ) + { + rc = hypercall_update_va_mapping(_p(va), l1[i1] | _PAGE_USER, 2); + if ( rc ) + panic("update_va_mapping(%p, 0x%016"PRIx64") failed: %d\n", + _p(va), l1[i1] | _PAGE_USER, rc); + } + + va += 1UL << L1_PT_SHIFT; + } #endif } diff --git a/include/arch/x86/mm.h b/include/arch/x86/mm.h index 0706728..900468f 100644 --- a/include/arch/x86/mm.h +++ b/include/arch/x86/mm.h @@ -33,6 +33,11 @@ static inline void *mfn_to_virt(unsigned long mfn) return pfn_to_virt(m2p[mfn]); } +static inline void *maddr_to_virt(uint64_t maddr) +{ + return mfn_to_virt(maddr >> PAGE_SHIFT) + (maddr & ~PAGE_MASK); +} + #undef m2p #endif /* CONFIG_ENV_pv */ diff --git a/include/arch/x86/page.h b/include/arch/x86/page.h index adb8016..1bda7d0 100644 --- a/include/arch/x86/page.h +++ b/include/arch/x86/page.h @@ -22,11 +22,35 @@ #define _PAGE_USER 0x004 #define _PAGE_PSE 0x080 +#define L1_PT_SHIFT 12 +#define L2_PT_SHIFT 21 +#define L3_PT_SHIFT 30 +#define L4_PT_SHIFT 39 + #define L1_PT_ENTRIES 512 #define L2_PT_ENTRIES 512 #define L3_PT_ENTRIES 512 #define L4_PT_ENTRIES 512 +#ifndef __ASSEMBLY__ + +static inline unsigned int l1_table_offset(unsigned long va) +{ return (va >> L1_PT_SHIFT) & (L1_PT_ENTRIES - 1); } +static inline unsigned int l2_table_offset(unsigned long va) +{ return (va >> L2_PT_SHIFT) & (L2_PT_ENTRIES - 1); } +static inline unsigned int l3_table_offset(unsigned long va) +{ return (va >> L3_PT_SHIFT) & (L3_PT_ENTRIES - 1); } +#ifdef __x86_64__ +static inline unsigned int l4_table_offset(unsigned long va) +{ return (va >> L4_PT_SHIFT) & (L4_PT_ENTRIES - 1); } +#endif /* __x86_64__ */ + + +static inline uint64_t pte_to_paddr(uint64_t pte) +{ return pte & 0x000ffffffffff000ULL; } + +#endif /* !__ASSEMBLY__ */ + #endif /* XTF_X86_PAGE_H */ /* diff --git a/include/arch/x86/traps.h b/include/arch/x86/traps.h index d6c0d0c..f2d0450 100644 --- a/include/arch/x86/traps.h +++ b/include/arch/x86/traps.h @@ -18,6 +18,12 @@ void __noreturn arch_crash_hard(void); extern uint8_t boot_stack[2 * PAGE_SIZE]; +#if defined(CONFIG_ENV_pv) +#include + +extern struct start_info *start_info; +#endif + #endif /* XTF_X86_TRAPS_H */ /* diff --git a/include/xen/arch-x86/xen-x86_32.h b/include/xen/arch-x86/xen-x86_32.h index 57a559d..efe4134 100644 --- a/include/xen/arch-x86/xen-x86_32.h +++ b/include/xen/arch-x86/xen-x86_32.h @@ -15,6 +15,8 @@ #define MACH2PHYS_VIRT_START 0xF5800000UL +#define __HYPERVISOR_VIRT_START_PAE 0xF5800000UL + #endif /* XEN_PUBLIC_ARCH_X86_XEN_X86_32_H */ /* diff --git a/include/xtf/hypercall.h b/include/xtf/hypercall.h index 5102912..8a43f87 100644 --- a/include/xtf/hypercall.h +++ b/include/xtf/hypercall.h @@ -46,6 +46,17 @@ static inline long hypercall_stack_switch(const unsigned int ss, const void *sp) return HYPERCALL2(long, __HYPERVISOR_stack_switch, ss, sp); } +static inline long hypercall_update_va_mapping(void *va, uint64_t npte, + unsigned int flags) +{ +#ifdef __x86_64__ + return HYPERCALL3(long, __HYPERVISOR_update_va_mapping, va, npte, flags); +#else + return HYPERCALL4(long, __HYPERVISOR_update_va_mapping, + va, npte, npte >> 32, flags); +#endif +} + static inline long hypercall_mmuext_op(const mmuext_op_t ops[], unsigned int count, unsigned int *done, diff --git a/include/xtf/types.h b/include/xtf/types.h index 1f4b5f4..4e4b153 100644 --- a/include/xtf/types.h +++ b/include/xtf/types.h @@ -5,6 +5,7 @@ #include #include #include +#include #endif /* XTF_TYPES_H */