]> xenbits.xensource.com Git - xen.git/commitdiff
x86/msr: Virtualise MSR_FLUSH_CMD for guests
authorAndrew Cooper <andrew.cooper3@citrix.com>
Fri, 13 Apr 2018 15:34:01 +0000 (15:34 +0000)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 14 Aug 2018 16:39:57 +0000 (17:39 +0100)
Guests (outside of the nested virt case, which isn't supported yet) don't need
L1D_FLUSH for their L1TF mitigations, but offering/emulating MSR_FLUSH_CMD is
easy and doesn't pose an issue for Xen.

The MSR is offered to HVM guests only.  PV guests attempting to use it would
trap for emulation, and the L1D cache would fill long before the return to
guest context.  As such, PV guests can't make any use of the L1D_FLUSH
functionality.

This is part of XSA-273 / CVE-2018-3646.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
(cherry picked from commit fd9823faf9df057a69a9a53c2e100691d3f4267c)

xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/vmx/vmcs.c
xen/arch/x86/traps.c

index 873a3fe404f4570576e892d2b7bc461912a54630..702dd1a87240b7a70b4bd0dbb5a12438e186167e 100644 (file)
@@ -4829,6 +4829,7 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
         break;
 
     case MSR_PRED_CMD:
+    case MSR_FLUSH_CMD:
         /* Write-only */
         goto gp_fault;
 
@@ -5019,6 +5020,17 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
         wrmsrl(MSR_PRED_CMD, msr_content);
         break;
 
+    case MSR_FLUSH_CMD:
+        hvm_cpuid(7, NULL, NULL, NULL, &edx);
+        if ( !(edx & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) )
+            goto gp_fault; /* MSR available? */
+
+        if ( msr_content & ~FLUSH_CMD_L1D )
+            goto gp_fault; /* Rsvd bit set? */
+
+        wrmsrl(MSR_FLUSH_CMD, msr_content);
+        break;
+
     case MSR_ARCH_CAPABILITIES:
         /* Read-only */
         goto gp_fault;
index 732b7e08ea447482f329b961cd433f2bfa59edac..e99c0a54a24813fef586881707707cfbc12ce7f1 100644 (file)
@@ -1777,6 +1777,14 @@ void vmx_do_resume(struct vcpu *v)
             vmx_enable_intercept_for_msr(v, MSR_PRED_CMD,
                                          MSR_TYPE_R | MSR_TYPE_W);
 
+        /* MSR_FLUSH_CMD is safe to pass through if the guest knows about it. */
+        if ( (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) )
+            vmx_disable_intercept_for_msr(v, MSR_FLUSH_CMD,
+                                          MSR_TYPE_R | MSR_TYPE_W);
+        else
+            vmx_enable_intercept_for_msr(v, MSR_FLUSH_CMD,
+                                         MSR_TYPE_R | MSR_TYPE_W);
+
         v->arch.flags |= TF_launched;
     }
 
index 598a7a715196e0cf395066632c173c2c2efc2961..58450558241cb0d8f6d3e5cd64cc79a50825dfdf 100644 (file)
@@ -2769,6 +2769,10 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
             wrmsrl(MSR_PRED_CMD, msr_content);
             break;
 
+        case MSR_FLUSH_CMD:
+            /* Not available to PV guests. */
+            break;
+
         case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
         case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):
         case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
@@ -2900,6 +2904,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
             break;
 
         case MSR_PRED_CMD:
+        case MSR_FLUSH_CMD:
             /* Write-only */
             goto fail;