{
if ( opt_msr_sc_pv )
{
- default_spec_ctrl_flags |= SCF_ist_wrmsr;
+ default_spec_ctrl_flags |= SCF_ist_sc_msr;
setup_force_cpu_cap(X86_FEATURE_SC_MSR_PV);
}
* Xen's value is not restored atomically. An early NMI hitting
* the VMExit path needs to restore Xen's value for safety.
*/
- default_spec_ctrl_flags |= SCF_ist_wrmsr;
+ default_spec_ctrl_flags |= SCF_ist_sc_msr;
setup_force_cpu_cap(X86_FEATURE_SC_MSR_HVM);
}
}
* on real hardware matches the availability of MSR_SPEC_CTRL in the
* first place.
*
- * No need for SCF_ist_wrmsr because Xen's value is restored
+ * No need for SCF_ist_sc_msr because Xen's value is restored
* atomically WRT NMIs in the VMExit path.
*
* TODO: Adjust cpu_has_svm_spec_ctrl to be usable earlier on boot.
* context switched per domain, and some inhibited in the S3 path.
*/
#define SCF_use_shadow (1 << 0)
-#define SCF_ist_wrmsr (1 << 1)
+#define SCF_ist_sc_msr (1 << 1)
#define SCF_ist_rsb (1 << 2)
#define SCF_verw (1 << 3)
* These are the controls to inhibit on the S3 resume path until microcode has
* been reloaded.
*/
-#define SCF_IST_MASK (SCF_ist_wrmsr)
+#define SCF_IST_MASK (SCF_ist_sc_msr)
/*
* Some speculative protections are per-domain. These settings are merged
.L\@_skip_rsb:
- test $SCF_ist_wrmsr, %al
- jz .L\@_skip_wrmsr
+ test $SCF_ist_sc_msr, %al
+ jz .L\@_skip_msr_spec_ctrl
xor %edx, %edx
testb $3, UREGS_cs(%rsp)
* to speculate around the WRMSR. As a result, we need a dispatch
* serialising instruction in the else clause.
*/
-.L\@_skip_wrmsr:
+.L\@_skip_msr_spec_ctrl:
lfence
UNLIKELY_END(\@_serialise)
.endm
* Requires %rbx=stack_end
* Clobbers %rax, %rcx, %rdx
*/
- testb $SCF_ist_wrmsr, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%rbx)
+ testb $SCF_ist_sc_msr, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%rbx)
jz .L\@_skip
DO_SPEC_CTRL_EXIT_TO_XEN