* Implement hypercall_update_va_mapping().
* Walk the live pagetables setting _PAGE_USER.
For now, 32bit PV XTF guests need to be run on a hypervisor booted with
"smep=0 smap=0" to prevent Xen's %cr4 settings from interfering.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
#include <arch/x86/processor.h>
#include <arch/x86/segment.h>
+#include <arch/x86/mm.h>
/* Real entry points */
void entry_DE(void);
if ( rc )
panic("Failed to set user %%cr3: %d\n", rc);
+#elif defined(__i386__)
+ /*
+ * Walk the live pagetables and set _PAGE_USER
+ *
+ * XTF uses a shared user/kernel address space, and _PAGE_USER must be set
+ * to permit cpl3 access to the virtual addresses without taking a
+ * pagefault.
+ *
+ * !!! WARNING !!!
+ *
+ * Because PV guests and Xen share CR4, Xen's setting of CR4.{SMEP,SMAP}
+ * interfere with 32bit PV guests. For now, 32bit PV XTF tests require
+ * the hypervisor to be booted with "smep=0 smap=0".
+ *
+ * TODO - figure out how to work around this restriction while maintaining
+ * a shared user/kernel address space for the framework.
+ *
+ * !!! WARNING !!!
+ */
+ uint64_t *l3 = _p(start_info->pt_base);
+ unsigned long va = 0;
+
+ while ( va < __HYPERVISOR_VIRT_START_PAE )
+ {
+ unsigned int i3 = l3_table_offset(va);
+
+ if ( !(l3[i3] & _PAGE_PRESENT) )
+ {
+ va += 1UL << L3_PT_SHIFT;
+ continue;
+ }
+
+ uint64_t *l2 = maddr_to_virt(pte_to_paddr(l3[i3]));
+ unsigned int i2 = l2_table_offset(va);
+
+ if ( !(l2[i2] & _PAGE_PRESENT) )
+ {
+ va += 1UL << L2_PT_SHIFT;
+ continue;
+ }
+
+ uint64_t *l1 = maddr_to_virt(pte_to_paddr(l2[i2]));
+ unsigned int i1 = l1_table_offset(va);
+
+ if ( !(l1[i1] & _PAGE_PRESENT) )
+ {
+ va += 1UL << L1_PT_SHIFT;
+ continue;
+ }
+
+ if ( !(l1[i1] & _PAGE_USER) )
+ {
+ rc = hypercall_update_va_mapping(_p(va), l1[i1] | _PAGE_USER, 2);
+ if ( rc )
+ panic("update_va_mapping(%p, 0x%016"PRIx64") failed: %d\n",
+ _p(va), l1[i1] | _PAGE_USER, rc);
+ }
+
+ va += 1UL << L1_PT_SHIFT;
+ }
#endif
}
return pfn_to_virt(m2p[mfn]);
}
+static inline void *maddr_to_virt(uint64_t maddr)
+{
+ return mfn_to_virt(maddr >> PAGE_SHIFT) + (maddr & ~PAGE_MASK);
+}
+
#undef m2p
#endif /* CONFIG_ENV_pv */
#define _PAGE_USER 0x004
#define _PAGE_PSE 0x080
+#define L1_PT_SHIFT 12
+#define L2_PT_SHIFT 21
+#define L3_PT_SHIFT 30
+#define L4_PT_SHIFT 39
+
#define L1_PT_ENTRIES 512
#define L2_PT_ENTRIES 512
#define L3_PT_ENTRIES 512
#define L4_PT_ENTRIES 512
+#ifndef __ASSEMBLY__
+
+static inline unsigned int l1_table_offset(unsigned long va)
+{ return (va >> L1_PT_SHIFT) & (L1_PT_ENTRIES - 1); }
+static inline unsigned int l2_table_offset(unsigned long va)
+{ return (va >> L2_PT_SHIFT) & (L2_PT_ENTRIES - 1); }
+static inline unsigned int l3_table_offset(unsigned long va)
+{ return (va >> L3_PT_SHIFT) & (L3_PT_ENTRIES - 1); }
+#ifdef __x86_64__
+static inline unsigned int l4_table_offset(unsigned long va)
+{ return (va >> L4_PT_SHIFT) & (L4_PT_ENTRIES - 1); }
+#endif /* __x86_64__ */
+
+
+static inline uint64_t pte_to_paddr(uint64_t pte)
+{ return pte & 0x000ffffffffff000ULL; }
+
+#endif /* !__ASSEMBLY__ */
+
#endif /* XTF_X86_PAGE_H */
/*
extern uint8_t boot_stack[2 * PAGE_SIZE];
+#if defined(CONFIG_ENV_pv)
+#include <xen/xen.h>
+
+extern struct start_info *start_info;
+#endif
+
#endif /* XTF_X86_TRAPS_H */
/*
#define MACH2PHYS_VIRT_START 0xF5800000UL
+#define __HYPERVISOR_VIRT_START_PAE 0xF5800000UL
+
#endif /* XEN_PUBLIC_ARCH_X86_XEN_X86_32_H */
/*
return HYPERCALL2(long, __HYPERVISOR_stack_switch, ss, sp);
}
+static inline long hypercall_update_va_mapping(void *va, uint64_t npte,
+ unsigned int flags)
+{
+#ifdef __x86_64__
+ return HYPERCALL3(long, __HYPERVISOR_update_va_mapping, va, npte, flags);
+#else
+ return HYPERCALL4(long, __HYPERVISOR_update_va_mapping,
+ va, npte, npte >> 32, flags);
+#endif
+}
+
static inline long hypercall_mmuext_op(const mmuext_op_t ops[],
unsigned int count,
unsigned int *done,
#include <stdbool.h>
#include <stdarg.h>
#include <stddef.h>
+#include <inttypes.h>
#endif /* XTF_TYPES_H */