thunk == THUNK_RETPOLINE ? "RETPOLINE" :
thunk == THUNK_LFENCE ? "LFENCE" :
thunk == THUNK_JMP ? "JMP" : "?",
- boot_cpu_has(X86_FEATURE_XEN_IBRS_SET) ? " IBRS+" :
- boot_cpu_has(X86_FEATURE_XEN_IBRS_CLEAR) ? " IBRS-" : "",
+ boot_cpu_has(X86_FEATURE_SC_MSR) ?
+ default_xen_spec_ctrl & SPEC_CTRL_IBRS ? " IBRS+" :
+ " IBRS-" : "",
opt_ibpb ? " IBPB" : "",
boot_cpu_has(X86_FEATURE_RSB_NATIVE) ? " RSB_NATIVE" : "",
boot_cpu_has(X86_FEATURE_RSB_VMEXIT) ? " RSB_VMEXIT" : "");
* need the IBRS entry/exit logic to virtualise IBRS support for
* guests.
*/
+ setup_force_cpu_cap(X86_FEATURE_SC_MSR);
+
if ( ibrs )
- {
default_xen_spec_ctrl |= SPEC_CTRL_IBRS;
- setup_force_cpu_cap(X86_FEATURE_XEN_IBRS_SET);
- }
- else
- setup_force_cpu_cap(X86_FEATURE_XEN_IBRS_CLEAR);
default_spec_ctrl_flags |= SCF_ist_wrmsr;
}
XEN_CPUFEATURE(IND_THUNK_LFENCE,(FSCAPINTS+0)*32+13) /* Use IND_THUNK_LFENCE */
XEN_CPUFEATURE(IND_THUNK_JMP, (FSCAPINTS+0)*32+14) /* Use IND_THUNK_JMP */
XEN_CPUFEATURE(XEN_IBPB, (FSCAPINTS+0)*32+15) /* IBRSB || IBPB */
-XEN_CPUFEATURE(XEN_IBRS_SET, (FSCAPINTS+0)*32+16) /* IBRSB && IRBS set in Xen */
-XEN_CPUFEATURE(XEN_IBRS_CLEAR, (FSCAPINTS+0)*32+17) /* IBRSB && IBRS clear in Xen */
+XEN_CPUFEATURE(SC_MSR, (FSCAPINTS+0)*32+16) /* MSR_SPEC_CTRL used by Xen */
XEN_CPUFEATURE(RSB_NATIVE, (FSCAPINTS+0)*32+18) /* RSB overwrite needed for native */
XEN_CPUFEATURE(RSB_VMEXIT, (FSCAPINTS+0)*32+19) /* RSB overwrite needed for vmexit */
XEN_CPUFEATURE(NO_XPTI, (FSCAPINTS+0)*32+20) /* XPTI mitigation not in use */
barrier();
info->spec_ctrl_flags |= SCF_use_shadow;
barrier();
- asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", X86_FEATURE_XEN_IBRS_SET)
+ asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", X86_FEATURE_SC_MSR)
:: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0) : "memory" );
}
/* WARNING! `ret`, `call *`, `jmp *` not safe before this call. */
static always_inline void spec_ctrl_exit_idle(struct cpu_info *info)
{
- uint32_t val = SPEC_CTRL_IBRS;
+ uint32_t val = info->xen_spec_ctrl;
/*
* Disable shadowing before updating the MSR. There are no SMP issues
*/
info->spec_ctrl_flags &= ~SCF_use_shadow;
barrier();
- asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", X86_FEATURE_XEN_IBRS_SET)
+ asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", X86_FEATURE_SC_MSR)
:: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0) : "memory" );
}
mov %\tmp, %rsp /* Restore old %rsp */
.endm
-.macro DO_SPEC_CTRL_ENTRY_FROM_VMEXIT ibrs_val:req
+.macro DO_SPEC_CTRL_ENTRY_FROM_VMEXIT
/*
* Requires %rbx=current, %rsp=regs/cpuinfo
* Clobbers %rax, %rcx, %rdx
andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp)
/* Load Xen's intended value. */
- mov $\ibrs_val, %eax
+ movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax
wrmsr
.endm
-.macro DO_SPEC_CTRL_ENTRY maybexen:req ibrs_val:req
+.macro DO_SPEC_CTRL_ENTRY maybexen:req
/*
* Requires %rsp=regs (also cpuinfo if !maybexen)
* Requires %r14=stack_end (if maybexen)
setnz %al
not %eax
and %al, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
+ movzbl STACK_CPUINFO_FIELD(xen_spec_ctrl)(%r14), %eax
.else
andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp)
+ movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax
.endif
- /* Load Xen's intended value. */
- mov $\ibrs_val, %eax
wrmsr
.endm
/* Use after a VMEXIT from an HVM guest. */
#define SPEC_CTRL_ENTRY_FROM_VMEXIT \
ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_RSB_VMEXIT; \
- ALTERNATIVE_2 "", \
- __stringify(DO_SPEC_CTRL_ENTRY_FROM_VMEXIT \
- ibrs_val=SPEC_CTRL_IBRS), \
- X86_FEATURE_XEN_IBRS_SET, \
- __stringify(DO_SPEC_CTRL_ENTRY_FROM_VMEXIT \
- ibrs_val=0), \
- X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE "", DO_SPEC_CTRL_ENTRY_FROM_VMEXIT, \
+ X86_FEATURE_SC_MSR
/* Use after an entry from PV context (syscall/sysenter/int80/int82/etc). */
#define SPEC_CTRL_ENTRY_FROM_PV \
ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \
- ALTERNATIVE_2 "", \
- __stringify(DO_SPEC_CTRL_ENTRY maybexen=0 \
- ibrs_val=SPEC_CTRL_IBRS), \
- X86_FEATURE_XEN_IBRS_SET, \
- __stringify(DO_SPEC_CTRL_ENTRY maybexen=0 ibrs_val=0), \
- X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE "", __stringify(DO_SPEC_CTRL_ENTRY maybexen=0), \
+ X86_FEATURE_SC_MSR
/* Use in interrupt/exception context. May interrupt Xen or PV context. */
#define SPEC_CTRL_ENTRY_FROM_INTR \
ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \
- ALTERNATIVE_2 "", \
- __stringify(DO_SPEC_CTRL_ENTRY maybexen=1 \
- ibrs_val=SPEC_CTRL_IBRS), \
- X86_FEATURE_XEN_IBRS_SET, \
- __stringify(DO_SPEC_CTRL_ENTRY maybexen=1 ibrs_val=0), \
- X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE "", __stringify(DO_SPEC_CTRL_ENTRY maybexen=1), \
+ X86_FEATURE_SC_MSR
/* Use when exiting to Xen context. */
#define SPEC_CTRL_EXIT_TO_XEN \
- ALTERNATIVE_2 "", \
- DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_XEN_IBRS_SET, \
- DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE "", \
+ DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_SC_MSR
/* Use when exiting to guest context. */
#define SPEC_CTRL_EXIT_TO_GUEST \
- ALTERNATIVE_2 "", \
- DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_XEN_IBRS_SET, \
- DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE "", \
+ DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR
/* TODO: Drop these when the alternatives infrastructure is NMI/#MC safe. */
.macro SPEC_CTRL_ENTRY_FROM_INTR_IST