return ex;
}
+exinfo_t __user_text stub_vmxon_user(uint64_t paddr)
+{
+ exinfo_t ex = 0;
+ bool fail_valid = false, fail_invalid = false;
+
+ asm volatile ("1: vmxon %[paddr];"
+ ASM_FLAG_OUT(, "setc %[fail_invalid];")
+ ASM_FLAG_OUT(, "setz %[fail_valid];")
+ "2:"
+ _ASM_EXTABLE_HANDLER(1b, 2b, ex_record_fault_edi)
+ : "+D" (ex),
+ ASM_FLAG_OUT("=@ccc", [fail_invalid] "+rm") (fail_invalid),
+ ASM_FLAG_OUT("=@ccz", [fail_valid] "+rm") (fail_valid)
+ : [paddr] "m" (paddr),
+ "X" (ex_record_fault_edi));
+
+ if ( fail_invalid )
+ return VMERR_INVALID;
+ else if ( fail_valid )
+ return get_vmx_insn_err();
+ else
+ return ex;
+}
+
/*
* Local variables:
* mode: C
#include "test.h"
+/* vmxon region which shouldn't be latched in the hardware vmxon pointer. */
+static uint8_t vmxon_region_unused[PAGE_SIZE] __page_aligned_bss;
+
/**
* vmxon with CR4.VMXE cleared
*
check(__func__, stub_vmxon(0), EXINFO_SYM(UD, 0));
}
+/*
+ * Wrapper around stub_vmxon_user(), This stub should always fault for control
+ * or permission reasons, but pointing at a supervisor frame is useful to
+ * check that Xen doesn't dereference the instructions parameter.
+ */
+static unsigned long __user_text vmxon_in_user(void)
+{
+ return stub_vmxon_user(_u(vmxon_region_unused));
+}
+
+/**
+ * vmxon in CPL=3 outside of VMX operation
+ *
+ * Expect: @#GP(0)
+ */
+static void test_vmxon_novmxe_in_user(void)
+{
+ exinfo_t ex = exec_user(vmxon_in_user);
+
+ check(__func__, ex, EXINFO_SYM(UD, 0));
+}
+
+/**
+ * vmxon in CPL=3 in VMX operation
+ *
+ * Expect: @#UD
+ */
+static void test_vmxon_in_user(void)
+{
+ exinfo_t ex = exec_user(vmxon_in_user);
+
+ check(__func__, ex, EXINFO_SYM(GP, 0));
+}
+
void test_vmxon(void)
{
unsigned long cr4 = read_cr4();
printk("Test: vmxon\n");
test_vmxon_novmxe();
+ test_vmxon_novmxe_in_user();
+
+ write_cr4(cr4 |= X86_CR4_VMXE);
+
+ test_vmxon_in_user();
}
/*