]> xenbits.xensource.com Git - xen.git/commitdiff
x86/spec-ctrl: Use common MSR_SPEC_CTRL logic for AMD
authorAndrew Cooper <andrew.cooper3@citrix.com>
Fri, 21 Jan 2022 15:59:03 +0000 (15:59 +0000)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 12 Jul 2022 15:33:19 +0000 (16:33 +0100)
Currently, amd_init_ssbd() works by being the only write to MSR_SPEC_CTRL in
the system.  This ceases to be true when using the common logic.

Include AMD MSR_SPEC_CTRL in has_spec_ctrl to activate the common paths, and
introduce an AMD specific block to control alternatives.  Also update the
boot/resume paths to configure default_xen_spec_ctrl.

svm.h needs an adjustment to remove a dependency on include order.

For now, only active alternatives for HVM - PV will require more work.  No
functional change, as no alternatives are defined yet for HVM yet.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
(cherry picked from commit 378f2e6df31442396f0afda19794c5c6091d96f9)

xen/arch/x86/acpi/power.c
xen/arch/x86/cpu/amd.c
xen/arch/x86/smpboot.c
xen/arch/x86/spec_ctrl.c
xen/include/asm-x86/hvm/svm/svm.h

index 6424a9021293782b3fb71650b251290b40cef046..df77b2ca5cfb6363c68e8e6d64f575ad3adb0374 100644 (file)
@@ -295,7 +295,7 @@ static int enter_state(u32 state)
     /* Re-enabled default NMI/#MC use of MSR_SPEC_CTRL. */
     ci->spec_ctrl_flags |= (default_spec_ctrl_flags & SCF_ist_wrmsr);
 
-    if ( boot_cpu_has(X86_FEATURE_IBRSB) )
+    if ( boot_cpu_has(X86_FEATURE_IBRSB) || boot_cpu_has(X86_FEATURE_IBRS) )
         wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
 
     if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
index de0389810b25c19de397b194709688e5719edb45..d5dece37cdeb5ead4135a56023868ae37b5b4bfd 100644 (file)
@@ -552,7 +552,7 @@ void amd_init_ssbd(const struct cpuinfo_x86 *c)
                return;
 
        if (cpu_has_amd_ssbd) {
-               wrmsrl(MSR_SPEC_CTRL, opt_ssbd ? SPEC_CTRL_SSBD : 0);
+               /* Handled by common MSR_SPEC_CTRL logic */
                return;
        }
 
index 2d525c8b2c9cd1ed6c1afe027c12ccbdf45e8d06..91e2ae6263006dc8b089d81e013b8ac6f3fdf3e2 100644 (file)
@@ -365,7 +365,7 @@ void start_secondary(void *unused)
      * settings.  Note: These MSRs may only become available after loading
      * microcode.
      */
-    if ( boot_cpu_has(X86_FEATURE_IBRSB) )
+    if ( boot_cpu_has(X86_FEATURE_IBRSB) || boot_cpu_has(X86_FEATURE_IBRS) )
         wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
     if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
         wrmsrl(MSR_MCU_OPT_CTRL, default_xen_mcu_opt_ctrl);
index 422714f3a2114bd642eb87b3b54112abfed3dfda..a768c57b45e082134f0860d6080ca5bc0b6b026c 100644 (file)
@@ -21,6 +21,7 @@
 #include <xen/lib.h>
 #include <xen/warning.h>
 
+#include <asm/hvm/svm/svm.h>
 #include <asm/microcode.h>
 #include <asm/msr.h>
 #include <asm/processor.h>
@@ -932,7 +933,8 @@ void __init init_speculation_mitigations(void)
 
     hw_smt_enabled = check_smt_enabled();
 
-    has_spec_ctrl = boot_cpu_has(X86_FEATURE_IBRSB);
+    has_spec_ctrl = (boot_cpu_has(X86_FEATURE_IBRSB) ||
+                     boot_cpu_has(X86_FEATURE_IBRS));
 
     /*
      * Has the user specified any custom BTI mitigations?  If so, follow their
@@ -1013,12 +1015,32 @@ void __init init_speculation_mitigations(void)
         }
     }
 
+    /* AMD hardware: MSR_SPEC_CTRL alternatives setup. */
+    if ( boot_cpu_has(X86_FEATURE_IBRS) )
+    {
+        /*
+         * Virtualising MSR_SPEC_CTRL for guests depends on SVM support, which
+         * on real hardware matches the availability of MSR_SPEC_CTRL in the
+         * first place.
+         *
+         * No need for SCF_ist_wrmsr because Xen's value is restored
+         * atomically WRT NMIs in the VMExit path.
+         *
+         * TODO: Adjust cpu_has_svm_spec_ctrl to be usable earlier on boot.
+         */
+        if ( opt_msr_sc_hvm &&
+             (boot_cpu_data.extended_cpuid_level >= 0x8000000a) &&
+             (cpuid_edx(0x8000000a) & (1u << SVM_FEATURE_SPEC_CTRL)) )
+            setup_force_cpu_cap(X86_FEATURE_SC_MSR_HVM);
+    }
+
     /* If we have IBRS available, see whether we should use it. */
     if ( has_spec_ctrl && ibrs )
         default_xen_spec_ctrl |= SPEC_CTRL_IBRS;
 
     /* If we have SSBD available, see whether we should use it. */
-    if ( boot_cpu_has(X86_FEATURE_SSBD) && opt_ssbd )
+    if ( opt_ssbd && (boot_cpu_has(X86_FEATURE_SSBD) ||
+                      boot_cpu_has(X86_FEATURE_AMD_SSBD)) )
         default_xen_spec_ctrl |= SPEC_CTRL_SSBD;
 
     /*
index 2b522ee79b177d74207b9c30893f6503370f344d..f28b40996ca05b27481890df374291ae42b2ceb6 100644 (file)
@@ -45,6 +45,9 @@ static inline void svm_invlpga(unsigned long linear, uint32_t asid)
         "a" (linear), "c" (asid));
 }
 
+struct cpu_user_regs;
+struct vcpu;
+
 unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr);
 void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len);
 void svm_update_guest_cr(struct vcpu *, unsigned int cr, unsigned int flags);