]> xenbits.xensource.com Git - people/royger/xen.git/commitdiff
VMX: correct feature checks for MPX and XSAVES
authorJan Beulich <jbeulich@suse.com>
Wed, 7 Sep 2016 10:34:43 +0000 (12:34 +0200)
committerJan Beulich <jbeulich@suse.com>
Wed, 7 Sep 2016 10:34:43 +0000 (12:34 +0200)
Their VMCS fields aren't tied to the respective base CPU feature flags
but instead to VMX specific ones.

Note that while the VMCS GUEST_BNDCFGS field exists if either of the
two respective features is available, MPX continues to get exposed to
guests only with both features present.

Also add the so far missing handling of
- GUEST_BNDCFGS in construct_vmcs()
- MSR_IA32_BNDCFGS in vmx_msr_{read,write}_intercept()
and mirror the extra correctness checks during MSR write to
vmx_load_msr().

Reported-by: "Rockosov, Dmitry" <dmitry.rockosov@intel.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Tested-by: "Rockosov, Dmitry" <dmitry.rockosov@intel.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
xen/arch/x86/cpuid.c
xen/arch/x86/hvm/vmx/vmcs.c
xen/arch/x86/hvm/vmx/vmx.c
xen/include/asm-x86/hvm/vmx/vmcs.h
xen/include/asm-x86/msr-index.h

index 38e34bdf9e0238c7540bc75a2642f2646ff51383..63b2db99b86475deac6822946c2c1356d36d9d74 100644 (file)
@@ -168,8 +168,7 @@ static void __init calculate_hvm_featureset(void)
      */
     if ( cpu_has_vmx )
     {
-        if ( !(vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) ||
-             !(vmx_vmentry_control & VM_ENTRY_LOAD_BNDCFGS) )
+        if ( !cpu_has_vmx_mpx )
             __clear_bit(X86_FEATURE_MPX, hvm_featureset);
 
         if ( !cpu_has_vmx_xsaves )
index 1bd875ad08caba5ca16ead5fec59770d57926036..0995496b09ee3ca9ca6e69b72da32e28cf68c2ec 100644 (file)
@@ -1261,6 +1261,8 @@ static int construct_vmcs(struct vcpu *v)
         __vmwrite(HOST_PAT, host_pat);
         __vmwrite(GUEST_PAT, guest_pat);
     }
+    if ( cpu_has_vmx_mpx )
+        __vmwrite(GUEST_BNDCFGS, 0);
     if ( cpu_has_vmx_xsaves )
         __vmwrite(XSS_EXIT_BITMAP, 0);
 
index bb7a329ab8f9001b8b3af8c2cd733c98bfd623eb..306f48271aab59df766d71391c6c145166e2770d 100644 (file)
@@ -788,14 +788,15 @@ static int vmx_load_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
 
 static unsigned int __init vmx_init_msr(void)
 {
-    return !!cpu_has_mpx + !!cpu_has_xsaves;
+    return (cpu_has_mpx && cpu_has_vmx_mpx) +
+           (cpu_has_xsaves && cpu_has_vmx_xsaves);
 }
 
 static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt)
 {
     vmx_vmcs_enter(v);
 
-    if ( cpu_has_mpx )
+    if ( cpu_has_mpx && cpu_has_vmx_mpx )
     {
         __vmread(GUEST_BNDCFGS, &ctxt->msr[ctxt->count].val);
         if ( ctxt->msr[ctxt->count].val )
@@ -804,7 +805,7 @@ static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt)
 
     vmx_vmcs_exit(v);
 
-    if ( cpu_has_xsaves )
+    if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
     {
         ctxt->msr[ctxt->count].val = v->arch.hvm_vcpu.msr_xss;
         if ( ctxt->msr[ctxt->count].val )
@@ -824,13 +825,15 @@ static int vmx_load_msr(struct vcpu *v, struct hvm_msr *ctxt)
         switch ( ctxt->msr[i].index )
         {
         case MSR_IA32_BNDCFGS:
-            if ( cpu_has_mpx )
+            if ( cpu_has_mpx && cpu_has_vmx_mpx &&
+                 is_canonical_address(ctxt->msr[i].val) &&
+                 !(ctxt->msr[i].val & IA32_BNDCFGS_RESERVED) )
                 __vmwrite(GUEST_BNDCFGS, ctxt->msr[i].val);
             else if ( ctxt->msr[i].val )
                 err = -ENXIO;
             break;
         case MSR_IA32_XSS:
-            if ( cpu_has_xsaves )
+            if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
                 v->arch.hvm_vcpu.msr_xss = ctxt->msr[i].val;
             else
                 err = -ENXIO;
@@ -2647,6 +2650,11 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
     case MSR_IA32_DEBUGCTLMSR:
         __vmread(GUEST_IA32_DEBUGCTL, msr_content);
         break;
+    case MSR_IA32_BNDCFGS:
+        if ( !cpu_has_mpx || !cpu_has_vmx_mpx )
+            goto gp_fault;
+        __vmread(GUEST_BNDCFGS, msr_content);
+        break;
     case MSR_IA32_FEATURE_CONTROL:
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_VMFUNC:
         if ( !nvmx_msr_read_intercept(msr, msr_content) )
@@ -2873,6 +2881,13 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
 
         break;
     }
+    case MSR_IA32_BNDCFGS:
+        if ( !cpu_has_mpx || !cpu_has_vmx_mpx ||
+             !is_canonical_address(msr_content) ||
+             (msr_content & IA32_BNDCFGS_RESERVED) )
+            goto gp_fault;
+        __vmwrite(GUEST_BNDCFGS, msr_content);
+        break;
     case MSR_IA32_FEATURE_CONTROL:
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_TRUE_ENTRY_CTLS:
         if ( !nvmx_msr_write_intercept(msr, msr_content) )
index 1e33d9c19541370f676f783d1a48ef81f9b5069d..997f4f55fa78dce8ffad518d302ba1b7cfc98579 100644 (file)
@@ -375,6 +375,9 @@ extern u64 vmx_ept_vpid_cap;
     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS)
 #define cpu_has_vmx_pml \
     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_PML)
+#define cpu_has_vmx_mpx \
+    ((vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) && \
+     (vmx_vmentry_control & VM_ENTRY_LOAD_BNDCFGS))
 #define cpu_has_vmx_xsaves \
     (vmx_secondary_exec_control & SECONDARY_EXEC_XSAVES)
 #define cpu_has_vmx_tsc_scaling \
index 1095c819908244d37fa75fb6a2b97c742dac89ae..deb82a771c1d9d451d306e3ea71761ceb6ead7e8 100644 (file)
 #define MSR_IA32_DS_AREA               0x00000600
 #define MSR_IA32_PERF_CAPABILITIES     0x00000345
 
-#define MSR_IA32_BNDCFGS               0x00000D90
+#define MSR_IA32_BNDCFGS               0x00000d90
+#define IA32_BNDCFGS_ENABLE            0x00000001
+#define IA32_BNDCFGS_PRESERVE          0x00000002
+#define IA32_BNDCFGS_RESERVED          0x00000ffc
 
 #define MSR_IA32_XSS                   0x00000da0