In some cases, writes to MSR_SPEC_CTRL do not have interesting side effects,
and we should implement lazy context switching like we do with other MSRs.
In the short term, this will be used by the SVM infrastructure, but I expect
to extend it to other contexts in due course.
Introduce cpu_info.last_spec_ctrl for the purpose, and cache writes made from
the boot/resume paths. The value can't live in regular per-cpu data when it
is eventually used for PV guests when XPTI might be active.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
(cherry picked from commit
00f2992b6c7a9d4090443c1a85bf83224a87eeb9)
ci->spec_ctrl_flags |= (default_spec_ctrl_flags & SCF_ist_wrmsr);
if ( boot_cpu_has(X86_FEATURE_IBRSB) )
+ {
wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
+ ci->last_spec_ctrl = default_xen_spec_ctrl;
+ }
if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
wrmsrl(MSR_MCU_OPT_CTRL, default_xen_mcu_opt_ctrl);
if ( bsp_delay_spec_ctrl )
{
- get_cpu_info()->spec_ctrl_flags &= ~SCF_use_shadow;
+ struct cpu_info *info = get_cpu_info();
+
+ info->spec_ctrl_flags &= ~SCF_use_shadow;
barrier();
wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
+ info->last_spec_ctrl = default_xen_spec_ctrl;
}
/* Jump to the 1:1 virtual mappings of cpu0_stack. */
void start_secondary(void *unused)
{
+ struct cpu_info *info = get_cpu_info();
+
/*
* Dont put anything before smp_callin(), SMP booting is so fragile that we
* want to limit the things done here to the most necessary things.
* microcode.
*/
if ( boot_cpu_has(X86_FEATURE_IBRSB) )
+ {
wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
+ info->last_spec_ctrl = default_xen_spec_ctrl;
+ }
if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
wrmsrl(MSR_MCU_OPT_CTRL, default_xen_mcu_opt_ctrl);
*/
if ( has_spec_ctrl )
{
+ struct cpu_info *info = get_cpu_info();
+ unsigned int val;
+
bsp_delay_spec_ctrl = !cpu_has_hypervisor && default_xen_spec_ctrl;
/*
*/
if ( bsp_delay_spec_ctrl )
{
- struct cpu_info *info = get_cpu_info();
-
info->shadow_spec_ctrl = 0;
barrier();
info->spec_ctrl_flags |= SCF_use_shadow;
barrier();
}
- wrmsrl(MSR_SPEC_CTRL, bsp_delay_spec_ctrl ? 0 : default_xen_spec_ctrl);
+ val = bsp_delay_spec_ctrl ? 0 : default_xen_spec_ctrl;
+
+ wrmsrl(MSR_SPEC_CTRL, val);
+ info->last_spec_ctrl = val;
}
if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
/* See asm-x86/spec_ctrl_asm.h for usage. */
unsigned int shadow_spec_ctrl;
uint8_t xen_spec_ctrl;
+ uint8_t last_spec_ctrl;
uint8_t spec_ctrl_flags;
/*
*/
bool use_pv_cr3;
- unsigned long __pad;
/* get_stack_bottom() must be 16-byte aligned */
};
* steps 2 and 6 will restore the shadow value rather than leaving Xen's value
* loaded and corrupting the value used in guest context.
*
+ * Additionally, in some cases it is safe to skip writes to MSR_SPEC_CTRL when
+ * we don't require any of the side effects of an identical write. Maintain a
+ * per-cpu last_spec_ctrl value for this purpose.
+ *
* The following ASM fragments implement this algorithm. See their local
* comments for further details.
* - SPEC_CTRL_ENTRY_FROM_PV