/* VMX instruction stubs, wrapped to return exinfo_t information. */
exinfo_t stub_vmxon(uint64_t paddr);
+exinfo_t stub_vmptrld(uint64_t paddr);
exinfo_t stub_vmxon_user(uint64_t paddr);
/* Test routines. */
return ex;
}
+exinfo_t stub_vmptrld(uint64_t paddr)
+{
+ exinfo_t ex = 0;
+ bool fail_valid = false, fail_invalid = false;
+
+ asm volatile ("1: vmptrld %[paddr];"
+ ASM_FLAG_OUT(, "setc %[fail_invalid];")
+ ASM_FLAG_OUT(, "setz %[fail_valid];")
+ "2:"
+ _ASM_EXTABLE_HANDLER(1b, 2b, ex_record_fault_edi)
+ : "+D" (ex),
+ ASM_FLAG_OUT("=@ccc", [fail_invalid] "+rm") (fail_invalid),
+ ASM_FLAG_OUT("=@ccz", [fail_valid] "+rm") (fail_valid)
+ : [paddr] "m" (paddr),
+ "X" (ex_record_fault_edi));
+
+ if ( fail_invalid )
+ return VMERR_INVALID;
+ else if ( fail_valid )
+ return get_vmx_insn_err();
+ else
+ return ex;
+}
+
exinfo_t __user_text stub_vmxon_user(uint64_t paddr)
{
exinfo_t ex = 0;
/* vmxon region which gets latched in hardware. */
static uint8_t vmxon_region_real[PAGE_SIZE] __page_aligned_bss;
+/* Loaded VMCS, to recover VM Instruction Errors. */
+static uint8_t vmcs[PAGE_SIZE] __page_aligned_bss;
+
/**
* vmxon with CR4.VMXE cleared
*
check(__func__, ex, EXINFO_SYM(GP, 0));
}
+/**
+ * vmxon in VMX root w/ CPL = 0 and w/ current VMCS
+ *
+ * Expect: VMfailvalid()
+ */
+static void test_vmxon_in_root_cpl0(void)
+{
+ clear_vmcs(vmxon_region_unused, vmcs_revid);
+ exinfo_t ex = stub_vmxon(_u(vmxon_region_unused));
+
+ check(__func__, ex, VMERR_VALID(VMERR_VMXON_IN_ROOT));
+}
+
void test_vmxon(void)
{
unsigned long cr4 = read_cr4();
+ exinfo_t ex;
if ( cr4 & X86_CR4_VMXE )
write_cr4(cr4 &= ~X86_CR4_VMXE);
test_vmxon_novmcs_in_root_cpl0();
test_vmxon_novmcs_in_root_user();
+
+ /* Load a real VMCS to recover VM Instruction Errors. */
+ clear_vmcs(vmcs, vmcs_revid);
+ ex = stub_vmptrld(_u(vmcs));
+ if ( ex )
+ return xtf_failure("Fail: unexpected vmptrld failure %08x\n", ex);
+
+ test_vmxon_in_root_cpl0();
}
/*