]> xenbits.xensource.com Git - people/royger/xen.git/commitdiff
x86/Intel: virtualize support for cpuid faulting
authorKyle Huey <me@kylehuey.com>
Thu, 20 Oct 2016 13:44:28 +0000 (06:44 -0700)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Thu, 20 Oct 2016 14:28:07 +0000 (15:28 +0100)
On HVM guests, the cpuid triggers a vm exit, so we can check the emulated
faulting state in vmx_do_cpuid and hvmemul_cpuid. A new function,
hvm_check_cpuid_fault will check if cpuid faulting is enabled and the CPL > 0.
When it returns true, the cpuid handling functions will inject a GP(0). Notably
explicit hardware support for faulting on cpuid is not necessary to emulate
support for an HVM guest.

On PV guests, hardware support is required so that userspace cpuid will trap
to Xen. Xen already enables cpuid faulting on supported CPUs for pv guests (that
aren't the control domain, see the comment in intel_ctxt_switch_levelling).
Every PV guest cpuid will trap via a GP(0) to emulate_privileged_op (via
do_general_protection). Once there we simply decline to emulate cpuid if the
CPL > 0 and faulting is enabled, leaving the GP(0) for the guest kernel to
handle.

Signed-off-by: Kyle Huey <khuey@kylehuey.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Release-acked-by: Wei Liu <wei.liu2@citrix.com>
xen/arch/x86/hvm/emulate.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/traps.c
xen/include/asm-x86/domain.h
xen/include/asm-x86/hvm/hvm.h

index 6ed74866deb2447a35835bbb9ad09ec6ed73a6b2..70c8d44716ca0701314a841cb33ca39829466e93 100644 (file)
@@ -1549,6 +1549,26 @@ static int hvmemul_cpuid(
     unsigned int *edx,
     struct x86_emulate_ctxt *ctxt)
 {
+    /*
+     * x86_emulate uses this function to query CPU features for its own internal
+     * use. Make sure we're actually emulating CPUID before emulating CPUID
+     * faulting.
+     */
+    if ( ctxt->opcode == X86EMUL_OPC(0x0f, 0xa2) &&
+         hvm_check_cpuid_faulting(current) )
+    {
+        struct hvm_emulate_ctxt *hvmemul_ctxt =
+            container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
+
+        hvmemul_ctxt->exn_pending = 1;
+        hvmemul_ctxt->trap.vector = TRAP_gp_fault;
+        hvmemul_ctxt->trap.type = X86_EVENTTYPE_HW_EXCEPTION;
+        hvmemul_ctxt->trap.error_code = 0;
+        hvmemul_ctxt->trap.insn_len = 0;
+
+        return X86EMUL_EXCEPTION;
+    }
+
     hvm_funcs.cpuid_intercept(eax, ebx, ecx, edx);
     return X86EMUL_OKAY;
 }
index 3c90ecd44bbb99c5605274149a2b1ee1071cad23..11e2b8238e929853d34625c2cd152c3253947fc7 100644 (file)
@@ -3680,6 +3680,20 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
     }
 }
 
+bool hvm_check_cpuid_faulting(struct vcpu *v)
+{
+    struct segment_register sreg;
+
+    if ( !v->arch.cpuid_faulting )
+        return false;
+
+    hvm_get_segment_register(v, x86_seg_ss, &sreg);
+    if ( sreg.attr.fields.dpl == 0 )
+        return false;
+
+    return true;
+}
+
 static uint64_t _hvm_rdtsc_intercept(void)
 {
     struct vcpu *curr = current;
index db12cdb43b847dd7b754bb84e421c542109c705e..4d30eae2a79370db1b7b0e9e715a35eda4838b92 100644 (file)
@@ -2433,6 +2433,12 @@ static int vmx_do_cpuid(struct cpu_user_regs *regs)
     unsigned int eax, ebx, ecx, edx;
     unsigned int leaf, subleaf;
 
+    if ( hvm_check_cpuid_faulting(current) )
+    {
+        hvm_inject_hw_exception(TRAP_gp_fault, 0);
+        return 1;  /* Don't advance the guest IP! */
+    }
+
     eax = regs->eax;
     ebx = regs->ebx;
     ecx = regs->ecx;
@@ -2699,9 +2705,13 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
         break;
 
     case MSR_INTEL_PLATFORM_INFO:
-        if ( rdmsr_safe(MSR_INTEL_PLATFORM_INFO, *msr_content) )
-            goto gp_fault;
+        *msr_content = MSR_PLATFORM_INFO_CPUID_FAULTING;
+        break;
+
+    case MSR_INTEL_MISC_FEATURES_ENABLES:
         *msr_content = 0;
+        if ( current->arch.cpuid_faulting )
+            *msr_content |= MSR_MISC_FEATURES_CPUID_FAULTING;
         break;
 
     default:
@@ -2930,6 +2940,13 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
             goto gp_fault;
         break;
 
+    case MSR_INTEL_MISC_FEATURES_ENABLES:
+        if ( msr_content & ~MSR_MISC_FEATURES_CPUID_FAULTING )
+            goto gp_fault;
+        v->arch.cpuid_faulting =
+            !!(msr_content & MSR_MISC_FEATURES_CPUID_FAULTING);
+        break;
+
     default:
         if ( passive_domain_do_wrmsr(msr, msr_content) )
             return X86EMUL_OKAY;
index 293ff8d5a72cb99d20a5040525b2791a89c5b563..63765ec5cec9002cd9ecb160402bac5af49d2de9 100644 (file)
@@ -1320,6 +1320,15 @@ static int emulate_forced_invalid_op(struct cpu_user_regs *regs)
     }
     if ( memcmp(instr, "\xf\xa2", sizeof(instr)) )
         return 0;
+
+    /* If cpuid faulting is enabled and CPL>0 inject a #GP in place of #UD. */
+    if ( current->arch.cpuid_faulting && !guest_kernel_mode(current, regs) )
+    {
+        regs->eip = eip;
+        do_guest_trap(TRAP_gp_fault, regs);
+        return EXCRET_fault_fixed;
+    }
+
     eip += sizeof(instr);
 
     pv_cpuid(regs);
@@ -2479,6 +2488,17 @@ static int priv_op_read_msr(unsigned int reg, uint64_t *val,
              rdmsr_safe(MSR_INTEL_PLATFORM_INFO, *val) )
             break;
         *val = 0;
+        if ( this_cpu(cpuid_faulting_enabled) )
+            *val |= MSR_PLATFORM_INFO_CPUID_FAULTING;
+        return X86EMUL_OKAY;
+
+    case MSR_INTEL_MISC_FEATURES_ENABLES:
+        if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+             rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES, *val) )
+            break;
+        *val = 0;
+        if ( curr->arch.cpuid_faulting )
+            *val |= MSR_MISC_FEATURES_CPUID_FAULTING;
         return X86EMUL_OKAY;
 
     case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
@@ -2682,6 +2702,17 @@ static int priv_op_write_msr(unsigned int reg, uint64_t val,
             break;
         return X86EMUL_OKAY;
 
+    case MSR_INTEL_MISC_FEATURES_ENABLES:
+        if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+             (val & ~MSR_MISC_FEATURES_CPUID_FAULTING) ||
+             rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES, temp) )
+            break;
+        if ( (val & MSR_MISC_FEATURES_CPUID_FAULTING) &&
+             !this_cpu(cpuid_faulting_enabled) )
+            break;
+        curr->arch.cpuid_faulting = !!(val & MSR_MISC_FEATURES_CPUID_FAULTING);
+        return X86EMUL_OKAY;
+
     case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
     case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):
     case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
@@ -3191,6 +3222,10 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
         break;
 
     case 0xa2: /* CPUID */
+        /* If cpuid faulting is enabled and CPL>0 leave the #GP untouched. */
+        if ( v->arch.cpuid_faulting && !guest_kernel_mode(v, regs) )
+            goto fail;
+
         pv_cpuid(regs);
         break;
 
index 5807a1f37b3acfd268c6bf88fb03b6b215697388..f6a40eb8812ad818b2020e0be3a9093c2adce5bd 100644 (file)
@@ -557,6 +557,9 @@ struct arch_vcpu
      * and thus should be saved/restored. */
     bool_t nonlazy_xstate_used;
 
+    /* Has the guest enabled CPUID faulting? */
+    bool cpuid_faulting;
+
     /*
      * The SMAP check policy when updating runstate_guest(v) and the
      * secondary system time.
index b1be6bd43ed42f06a021235707436807291e0e23..7e7462e79d038ff64ba4264d0102308764d23f40 100644 (file)
@@ -414,6 +414,7 @@ void hvm_hypervisor_cpuid_leaf(uint32_t sub_idx,
                                uint32_t *ecx, uint32_t *edx);
 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
                                    unsigned int *ecx, unsigned int *edx);
+bool hvm_check_cpuid_faulting(struct vcpu *v);
 void hvm_migrate_timers(struct vcpu *v);
 void hvm_do_resume(struct vcpu *v);
 void hvm_migrate_pirqs(struct vcpu *v);