error = 0;
ci = get_cpu_info();
- /* Avoid NMI/#MC using MSR_SPEC_CTRL until we've reloaded microcode. */
- ci->spec_ctrl_flags &= ~SCF_ist_wrmsr;
+ /* Avoid NMI/#MC using unsafe MSRs until we've reloaded microcode. */
+ ci->spec_ctrl_flags &= ~SCF_IST_MASK;
ACPI_FLUSH_CPU_CACHE();
if ( !recheck_cpu_features(0) )
panic("Missing previously available feature(s)\n");
- /* Re-enabled default NMI/#MC use of MSR_SPEC_CTRL. */
- ci->spec_ctrl_flags |= (default_spec_ctrl_flags & SCF_ist_wrmsr);
+ /* Re-enabled default NMI/#MC use of MSRs now microcode is loaded. */
+ ci->spec_ctrl_flags |= (default_spec_ctrl_flags & SCF_IST_MASK);
if ( boot_cpu_has(X86_FEATURE_IBRSB) || boot_cpu_has(X86_FEATURE_IBRS) )
wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
}
}
- /* Update the top-of-stack block with the VERW disposition. */
- info->spec_ctrl_flags &= ~SCF_verw;
- if ( nextd->arch.verw )
- info->spec_ctrl_flags |= SCF_verw;
+ /* Update the top-of-stack block with the new spec_ctrl settings. */
+ info->spec_ctrl_flags =
+ (info->spec_ctrl_flags & ~SCF_DOM_MASK) |
+ (nextd->arch.spec_ctrl_flags & SCF_DOM_MASK);
}
sched_context_switched(prev, next);
{
bool pv = is_pv_domain(d);
- d->arch.verw =
- (pv ? opt_md_clear_pv : opt_md_clear_hvm) ||
- (opt_fb_clear_mmio && is_iommu_enabled(d));
+ bool verw = ((pv ? opt_md_clear_pv : opt_md_clear_hvm) ||
+ (opt_fb_clear_mmio && is_iommu_enabled(d)));
+
+ d->arch.spec_ctrl_flags =
+ (verw ? SCF_verw : 0) |
+ 0;
}
void __init init_speculation_mitigations(void)
uint32_t pci_cf8;
uint8_t cmos_idx;
- /* Use VERW on return-to-guest for its flushing side effect. */
- bool verw;
+ uint8_t spec_ctrl_flags; /* See SCF_DOM_MASK */
union {
struct pv_domain pv;
#ifndef __X86_SPEC_CTRL_H__
#define __X86_SPEC_CTRL_H__
-/* Encoding of cpuinfo.spec_ctrl_flags */
+/*
+ * Encoding of:
+ * cpuinfo.spec_ctrl_flags
+ * default_spec_ctrl_flags
+ * domain.spec_ctrl_flags
+ *
+ * Live settings are in the top-of-stack block, because they need to be
+ * accessable when XPTI is active. Some settings are fixed from boot, some
+ * context switched per domain, and some inhibited in the S3 path.
+ */
#define SCF_use_shadow (1 << 0)
#define SCF_ist_wrmsr (1 << 1)
#define SCF_ist_rsb (1 << 2)
#define SCF_verw (1 << 3)
+/*
+ * The IST paths (NMI/#MC) can interrupt any arbitrary context. Some
+ * functionality requires updated microcode to work.
+ *
+ * On boot, this is easy; we load microcode before figuring out which
+ * speculative protections to apply. However, on the S3 resume path, we must
+ * be able to disable the configured mitigations until microcode is reloaded.
+ *
+ * These are the controls to inhibit on the S3 resume path until microcode has
+ * been reloaded.
+ */
+#define SCF_IST_MASK (SCF_ist_wrmsr)
+
+/*
+ * Some speculative protections are per-domain. These settings are merged
+ * into the top-of-stack block in the context switch path.
+ */
+#define SCF_DOM_MASK (SCF_verw)
+
#ifndef __ASSEMBLY__
#include <asm/alternative.h>
/*
* Use in IST interrupt/exception context. May interrupt Xen or PV context.
- * Fine grain control of SCF_ist_wrmsr is needed for safety in the S3 resume
- * path to avoid using MSR_SPEC_CTRL before the microcode introducing it has
- * been reloaded.
*/
.macro SPEC_CTRL_ENTRY_FROM_INTR_IST
/*