]> xenbits.xensource.com Git - people/dwmw2/xen.git/commitdiff
x86/msr: Virtualise MSR_FLUSH_CMD for guests
authorAndrew Cooper <andrew.cooper3@citrix.com>
Fri, 13 Apr 2018 15:34:01 +0000 (15:34 +0000)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 14 Aug 2018 16:15:42 +0000 (17:15 +0100)
Guests (outside of the nested virt case, which isn't supported yet) don't need
L1D_FLUSH for their L1TF mitigations, but offering/emulating MSR_FLUSH_CMD is
easy and doesn't pose an issue for Xen.

The MSR is offered to HVM guests only.  PV guests attempting to use it would
trap for emulation, and the L1D cache would fill long before the return to
guest context.  As such, PV guests can't make any use of the L1D_FLUSH
functionality.

This is part of XSA-273 / CVE-2018-3646.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
(cherry picked from commit fd9823faf9df057a69a9a53c2e100691d3f4267c)

xen/arch/x86/domctl.c
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/msr.c
xen/include/public/arch-x86/cpufeatureset.h

index fa82b6744e1593e648a502cd901c5bad870106fe..dd91038a67d27503574837874d6d28a6da727823 100644 (file)
@@ -225,7 +225,8 @@ static int update_domain_cpuid_info(struct domain *d,
          */
         call_policy_changed = (is_hvm_domain(d) &&
                                ((old_7d0 ^ p->feat.raw[0].d) &
-                                cpufeat_mask(X86_FEATURE_IBRSB)));
+                                (cpufeat_mask(X86_FEATURE_IBRSB) |
+                                 cpufeat_mask(X86_FEATURE_L1D_FLUSH))));
         break;
 
     case 0xa:
index c7cf3a8fbca5a48a7754076ac9b0fc3955c1e335..b0fababedef5d6a551bee364a031a621567c2700 100644 (file)
@@ -583,6 +583,12 @@ static void vmx_cpuid_policy_changed(struct vcpu *v)
         vmx_clear_msr_intercept(v, MSR_PRED_CMD,  VMX_MSR_RW);
     else
         vmx_set_msr_intercept(v, MSR_PRED_CMD,  VMX_MSR_RW);
+
+    /* MSR_FLUSH_CMD is safe to pass through if the guest knows about it. */
+    if ( cp->feat.l1d_flush )
+        vmx_clear_msr_intercept(v, MSR_FLUSH_CMD, VMX_MSR_RW);
+    else
+        vmx_set_msr_intercept(v, MSR_FLUSH_CMD, VMX_MSR_RW);
 }
 
 int vmx_guest_x86_mode(struct vcpu *v)
index 1e12ccb729297abdcd4dfb694bb77cc4fe8dfac0..1a591dd2b558eccb312a5a0ac136b8fadb9700a2 100644 (file)
@@ -150,6 +150,7 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
     case MSR_AMD_PATCHLOADER:
     case MSR_IA32_UCODE_WRITE:
     case MSR_PRED_CMD:
+    case MSR_FLUSH_CMD:
         /* Write-only */
         goto gp_fault;
 
@@ -254,6 +255,17 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
             wrmsrl(MSR_PRED_CMD, val);
         break;
 
+    case MSR_FLUSH_CMD:
+        if ( !cp->feat.l1d_flush )
+            goto gp_fault; /* MSR available? */
+
+        if ( val & ~FLUSH_CMD_L1D )
+            goto gp_fault; /* Rsvd bit set? */
+
+        if ( v == curr )
+            wrmsrl(MSR_FLUSH_CMD, val);
+        break;
+
     case MSR_INTEL_MISC_FEATURES_ENABLES:
     {
         bool old_cpuid_faulting = vp->misc_features_enables.cpuid_faulting;
index 9f4c8246a9d80263c9a100f5b6ffb200eb84d849..6c82816fd3545d3b95408816f5884ce0314d844e 100644 (file)
@@ -244,7 +244,7 @@ XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A  AVX512 Neural Network Instructions *
 XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A  AVX512 Multiply Accumulation Single Precision */
 XEN_CPUFEATURE(IBRSB,         9*32+26) /*A  IBRS and IBPB support (used by Intel) */
 XEN_CPUFEATURE(STIBP,         9*32+27) /*A  STIBP */
-XEN_CPUFEATURE(L1D_FLUSH,     9*32+28) /*   MSR_FLUSH_CMD and L1D flush. */
+XEN_CPUFEATURE(L1D_FLUSH,     9*32+28) /*S  MSR_FLUSH_CMD and L1D flush. */
 XEN_CPUFEATURE(ARCH_CAPS,     9*32+29) /*   IA32_ARCH_CAPABILITIES MSR */
 XEN_CPUFEATURE(SSBD,          9*32+31) /*A  MSR_SPEC_CTRL.SSBD available */