if (test_bit(X86_FEATURE_IND_THUNK_JMP,
boot_cpu_data.x86_capability))
__set_bit(X86_FEATURE_IND_THUNK_JMP, c->x86_capability);
- if (test_bit(X86_FEATURE_XEN_IBRS_SET,
+ if (test_bit(X86_FEATURE_SC_MSR,
boot_cpu_data.x86_capability))
- __set_bit(X86_FEATURE_XEN_IBRS_SET, c->x86_capability);
- if (test_bit(X86_FEATURE_XEN_IBRS_CLEAR,
- boot_cpu_data.x86_capability))
- __set_bit(X86_FEATURE_XEN_IBRS_CLEAR,
- c->x86_capability);
+ __set_bit(X86_FEATURE_SC_MSR, c->x86_capability);
if (test_bit(X86_FEATURE_RSB_NATIVE,
boot_cpu_data.x86_capability))
__set_bit(X86_FEATURE_RSB_NATIVE, c->x86_capability);
thunk == THUNK_RETPOLINE ? "RETPOLINE" :
thunk == THUNK_LFENCE ? "LFENCE" :
thunk == THUNK_JMP ? "JMP" : "?",
- boot_cpu_has(X86_FEATURE_XEN_IBRS_SET) ? " IBRS+" :
- boot_cpu_has(X86_FEATURE_XEN_IBRS_CLEAR) ? " IBRS-" : "",
+ boot_cpu_has(X86_FEATURE_SC_MSR) ?
+ default_xen_spec_ctrl & SPEC_CTRL_IBRS ? " IBRS+" :
+ " IBRS-" : "",
opt_ibpb ? " IBPB" : "",
boot_cpu_has(X86_FEATURE_RSB_NATIVE) ? " RSB_NATIVE" : "",
boot_cpu_has(X86_FEATURE_RSB_VMEXIT) ? " RSB_VMEXIT" : "");
* need the IBRS entry/exit logic to virtualise IBRS support for
* guests.
*/
+ __set_bit(X86_FEATURE_SC_MSR, boot_cpu_data.x86_capability);
+
if ( ibrs )
- {
default_xen_spec_ctrl |= SPEC_CTRL_IBRS;
- __set_bit(X86_FEATURE_XEN_IBRS_SET, boot_cpu_data.x86_capability);
- }
- else
- __set_bit(X86_FEATURE_XEN_IBRS_CLEAR, boot_cpu_data.x86_capability);
default_spec_ctrl_flags |= SCF_ist_wrmsr;
}
barrier();
info->spec_ctrl_flags |= SCF_use_shadow;
barrier();
- asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", X86_FEATURE_XEN_IBRS_SET)
- :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0) : "memory" );
+ asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", %c3)
+ :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0),
+ "i" (X86_FEATURE_SC_MSR)
+ : "memory" );
}
/* WARNING! `ret`, `call *`, `jmp *` not safe before this call. */
static always_inline void spec_ctrl_exit_idle(struct cpu_info *info)
{
- uint32_t val = SPEC_CTRL_IBRS;
+ uint32_t val = info->xen_spec_ctrl;
/*
* Disable shadowing before updating the MSR. There are no SMP issues
*/
info->spec_ctrl_flags &= ~SCF_use_shadow;
barrier();
- asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", X86_FEATURE_XEN_IBRS_SET)
- :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0) : "memory" );
+ asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", %c3)
+ :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0),
+ "i" (X86_FEATURE_SC_MSR)
+ : "memory" );
}
#endif /* !__X86_SPEC_CTRL_H__ */
mov %\tmp, %rsp /* Restore old %rsp */
.endm
-.macro DO_SPEC_CTRL_ENTRY_FROM_VMEXIT ibrs_val:req
+.macro DO_SPEC_CTRL_ENTRY_FROM_VMEXIT
/*
* Requires %rbx=current, %rsp=regs/cpuinfo
* Clobbers %rax, %rcx, %rdx
andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp)
/* Load Xen's intended value. */
- mov $\ibrs_val, %eax
+ movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax
wrmsr
.endm
-.macro DO_SPEC_CTRL_ENTRY maybexen:req ibrs_val:req
+.macro DO_SPEC_CTRL_ENTRY maybexen:req
/*
* Requires %rsp=regs (also cpuinfo if !maybexen)
* Requires %r14=stack_end (if maybexen)
setnz %al
not %eax
and %al, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
+ movzbl STACK_CPUINFO_FIELD(xen_spec_ctrl)(%r14), %eax
.else
andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp)
+ movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax
.endif
- /* Load Xen's intended value. */
- mov $\ibrs_val, %eax
wrmsr
.endm
#define SPEC_CTRL_ENTRY_FROM_VMEXIT \
ALTERNATIVE __stringify(ASM_NOP40), \
DO_OVERWRITE_RSB, X86_FEATURE_RSB_VMEXIT; \
- ALTERNATIVE_2 __stringify(ASM_NOP32), \
- __stringify(DO_SPEC_CTRL_ENTRY_FROM_VMEXIT \
- ibrs_val=SPEC_CTRL_IBRS), \
- X86_FEATURE_XEN_IBRS_SET, \
- __stringify(DO_SPEC_CTRL_ENTRY_FROM_VMEXIT \
- ibrs_val=0), \
- X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE __stringify(ASM_NOP33), \
+ DO_SPEC_CTRL_ENTRY_FROM_VMEXIT, X86_FEATURE_SC_MSR
/* Use after an entry from PV context (syscall/sysenter/int80/int82/etc). */
#define SPEC_CTRL_ENTRY_FROM_PV \
ALTERNATIVE __stringify(ASM_NOP40), \
DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \
- ALTERNATIVE_2 __stringify(ASM_NOP22), \
- __stringify(DO_SPEC_CTRL_ENTRY maybexen=0 \
- ibrs_val=SPEC_CTRL_IBRS), \
- X86_FEATURE_XEN_IBRS_SET, \
- __stringify(DO_SPEC_CTRL_ENTRY maybexen=0 ibrs_val=0), \
- X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE __stringify(ASM_NOP25), \
+ __stringify(DO_SPEC_CTRL_ENTRY maybexen=0), X86_FEATURE_SC_MSR
/* Use in interrupt/exception context. May interrupt Xen or PV context. */
#define SPEC_CTRL_ENTRY_FROM_INTR \
ALTERNATIVE __stringify(ASM_NOP40), \
DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \
- ALTERNATIVE_2 __stringify(ASM_NOP36), \
- __stringify(DO_SPEC_CTRL_ENTRY maybexen=1 \
- ibrs_val=SPEC_CTRL_IBRS), \
- X86_FEATURE_XEN_IBRS_SET, \
- __stringify(DO_SPEC_CTRL_ENTRY maybexen=1 ibrs_val=0), \
- X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE __stringify(ASM_NOP39), \
+ __stringify(DO_SPEC_CTRL_ENTRY maybexen=1), X86_FEATURE_SC_MSR
/* Use when exiting to Xen context. */
#define SPEC_CTRL_EXIT_TO_XEN \
- ALTERNATIVE_2 __stringify(ASM_NOP23), \
- DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_XEN_IBRS_SET, \
- DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE __stringify(ASM_NOP23), \
+ DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_SC_MSR
/* Use when exiting to guest context. */
#define SPEC_CTRL_EXIT_TO_GUEST \
- ALTERNATIVE_2 __stringify(ASM_NOP24), \
- DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_XEN_IBRS_SET, \
- DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE __stringify(ASM_NOP24), \
+ DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR
/* TODO: Drop these when the alternatives infrastructure is NMI/#MC safe. */
.macro SPEC_CTRL_ENTRY_FROM_INTR_IST