]> xenbits.xensource.com Git - xen.git/commitdiff
x86: replace arch_vcpu::cpuid_faulting with msr_vcpu_policy
authorSergey Dyasli <sergey.dyasli@citrix.com>
Mon, 25 Sep 2017 08:55:23 +0000 (10:55 +0200)
committerJan Beulich <jbeulich@suse.com>
Mon, 25 Sep 2017 08:55:23 +0000 (10:55 +0200)
Since each vCPU now has struct msr_vcpu_policy, use cpuid_faulting bit
from there in current logic and remove arch_vcpu::cpuid_faulting.

Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
xen/arch/x86/cpu/intel.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/pv/emul-inv-op.c
xen/arch/x86/pv/emul-priv-op.c
xen/include/asm-x86/domain.h

index 6a4133416808cc1de16270df33eecb0277d0d168..218236d0944b993faccd6495089d08d1423828ff 100644 (file)
@@ -156,6 +156,7 @@ static void intel_ctxt_switch_levelling(const struct vcpu *next)
        struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
        const struct domain *nextd = next ? next->domain : NULL;
        const struct cpuidmasks *masks;
+       const struct msr_vcpu_policy *vp = next->arch.msr;
 
        if (cpu_has_cpuid_faulting) {
                /*
@@ -176,7 +177,7 @@ static void intel_ctxt_switch_levelling(const struct vcpu *next)
                 */
                set_cpuid_faulting(nextd && !is_control_domain(nextd) &&
                                   (is_pv_domain(nextd) ||
-                                   next->arch.cpuid_faulting));
+                                   vp->misc_features_enables.cpuid_faulting));
                return;
        }
 
index 93394c1fb6b62f3d71dde0f6721b49cf97e2ed57..afe5609ea7e9d3dfef2e93c8145be3f7c6a18dac 100644 (file)
@@ -3314,7 +3314,9 @@ unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len)
 
 bool hvm_check_cpuid_faulting(struct vcpu *v)
 {
-    if ( !v->arch.cpuid_faulting )
+    const struct msr_vcpu_policy *vp = v->arch.msr;
+
+    if ( !vp->misc_features_enables.cpuid_faulting )
         return false;
 
     return hvm_get_cpl(v) > 0;
index 9cb6279aecc4eabedf3880982337a9b2e4ca33a2..a25d4540735107d701ab301ca9482b63e7efc218 100644 (file)
@@ -2913,7 +2913,7 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
 
     case MSR_INTEL_MISC_FEATURES_ENABLES:
         *msr_content = 0;
-        if ( current->arch.cpuid_faulting )
+        if ( current->arch.msr->misc_features_enables.cpuid_faulting )
             *msr_content |= MSR_MISC_FEATURES_CPUID_FAULTING;
         break;
 
@@ -3145,15 +3145,17 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
 
     case MSR_INTEL_MISC_FEATURES_ENABLES:
     {
-        bool old_cpuid_faulting = v->arch.cpuid_faulting;
+        struct msr_vcpu_policy *vp = v->arch.msr;
+        bool old_cpuid_faulting = vp->misc_features_enables.cpuid_faulting;
 
         if ( msr_content & ~MSR_MISC_FEATURES_CPUID_FAULTING )
             goto gp_fault;
 
-        v->arch.cpuid_faulting = msr_content & MSR_MISC_FEATURES_CPUID_FAULTING;
+        vp->misc_features_enables.cpuid_faulting =
+            msr_content & MSR_MISC_FEATURES_CPUID_FAULTING;
 
         if ( cpu_has_cpuid_faulting &&
-             (old_cpuid_faulting ^ v->arch.cpuid_faulting) )
+             (old_cpuid_faulting ^ vp->misc_features_enables.cpuid_faulting) )
             ctxt_switch_levelling(v);
         break;
     }
index 415d294c5307383129c94b7c93a15229bbc0b8d0..f8944170d51d7a48b92826d5f4b10df97ae537f9 100644 (file)
@@ -66,6 +66,7 @@ static int emulate_forced_invalid_op(struct cpu_user_regs *regs)
     char sig[5], instr[2];
     unsigned long eip, rc;
     struct cpuid_leaf res;
+    const struct msr_vcpu_policy *vp = current->arch.msr;
 
     eip = regs->rip;
 
@@ -89,7 +90,8 @@ static int emulate_forced_invalid_op(struct cpu_user_regs *regs)
         return 0;
 
     /* If cpuid faulting is enabled and CPL>0 inject a #GP in place of #UD. */
-    if ( current->arch.cpuid_faulting && !guest_kernel_mode(current, regs) )
+    if ( vp->misc_features_enables.cpuid_faulting &&
+         !guest_kernel_mode(current, regs) )
     {
         regs->rip = eip;
         pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
index b5599c186993ccab0b57d0a694d1af5224488abd..f0fa43da5330e9735c7a7f1bb6c47f22b0e98ae4 100644 (file)
@@ -955,7 +955,7 @@ static int read_msr(unsigned int reg, uint64_t *val,
              rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES, *val) )
             break;
         *val = 0;
-        if ( curr->arch.cpuid_faulting )
+        if ( curr->arch.msr->misc_features_enables.cpuid_faulting )
             *val |= MSR_MISC_FEATURES_CPUID_FAULTING;
         return X86EMUL_OKAY;
 
@@ -1161,7 +1161,8 @@ static int write_msr(unsigned int reg, uint64_t val,
         if ( (val & MSR_MISC_FEATURES_CPUID_FAULTING) &&
              !this_cpu(cpuid_faulting_enabled) )
             break;
-        curr->arch.cpuid_faulting = !!(val & MSR_MISC_FEATURES_CPUID_FAULTING);
+        curr->arch.msr->misc_features_enables.cpuid_faulting =
+            !!(val & MSR_MISC_FEATURES_CPUID_FAULTING);
         return X86EMUL_OKAY;
 
     case MSR_P6_PERFCTR(0) ... MSR_P6_PERFCTR(7):
index 028bbd2ed3b719ffb209bf23c8ee8fc541732940..57da0fc0b6b9c3eb8de77f23f5efe8ef45ac9372 100644 (file)
@@ -557,9 +557,6 @@ struct arch_vcpu
      * and thus should be saved/restored. */
     bool_t nonlazy_xstate_used;
 
-    /* Has the guest enabled CPUID faulting? */
-    bool cpuid_faulting;
-
     /*
      * The SMAP check policy when updating runstate_guest(v) and the
      * secondary system time.