mov VCPU_arch_spec_ctrl(%rbx), %eax
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
- SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
+ SPEC_CTRL_EXIT_TO_HVM /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
pop %r15
pop %r14
GET_CURRENT(bx)
- SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
+ SPEC_CTRL_ENTRY_FROM_HVM /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
mov VCPU_svm_vmcb(%rbx),%rcx
movb $1,VCPU_vmx_launched(%rbx)
mov %rax,VCPU_hvm_guest_cr2(%rbx)
- SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
+ SPEC_CTRL_ENTRY_FROM_HVM /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
mov %rsp,%rdi
mov VCPU_arch_spec_ctrl(%rbx), %eax
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
- SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
+ SPEC_CTRL_EXIT_TO_HVM /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
mov VCPU_hvm_guest_cr2(%rbx),%rax
THUNK_JMP,
} opt_thunk __initdata = THUNK_DEFAULT;
static int8_t __initdata opt_ibrs = -1;
-static bool __initdata opt_rsb_native = true;
-static bool __initdata opt_rsb_vmexit = true;
+static bool __initdata opt_rsb_pv = true;
+static bool __initdata opt_rsb_hvm = true;
bool __read_mostly opt_ibpb = true;
uint8_t __read_mostly default_xen_spec_ctrl;
uint8_t __read_mostly default_spec_ctrl_flags;
else if ( (val = parse_boolean("ibpb", s, ss)) >= 0 )
opt_ibpb = val;
else if ( (val = parse_boolean("rsb_native", s, ss)) >= 0 )
- opt_rsb_native = val;
+ opt_rsb_pv = val;
else if ( (val = parse_boolean("rsb_vmexit", s, ss)) >= 0 )
- opt_rsb_vmexit = val;
+ opt_rsb_hvm = val;
else
rc = -EINVAL;
default_xen_spec_ctrl & SPEC_CTRL_IBRS ? " IBRS+" :
" IBRS-" : "",
opt_ibpb ? " IBPB" : "",
- boot_cpu_has(X86_FEATURE_RSB_NATIVE) ? " RSB_NATIVE" : "",
- boot_cpu_has(X86_FEATURE_RSB_VMEXIT) ? " RSB_VMEXIT" : "");
+ boot_cpu_has(X86_FEATURE_SC_RSB_PV) ? " RSB_NATIVE" : "",
+ boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ? " RSB_VMEXIT" : "");
printk("XPTI: %s\n",
boot_cpu_has(X86_FEATURE_NO_XPTI) ? "disabled" : "enabled");
* If a processors speculates to 32bit PV guest kernel mappings, it is
* speculating in 64bit supervisor mode, and can leak data.
*/
- if ( opt_rsb_native )
+ if ( opt_rsb_pv )
{
- setup_force_cpu_cap(X86_FEATURE_RSB_NATIVE);
+ setup_force_cpu_cap(X86_FEATURE_SC_RSB_PV);
default_spec_ctrl_flags |= SCF_ist_rsb;
}
* HVM guests can always poison the RSB to point at Xen supervisor
* mappings.
*/
- if ( opt_rsb_vmexit )
- setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+ if ( opt_rsb_hvm )
+ setup_force_cpu_cap(X86_FEATURE_SC_RSB_HVM);
/* Check we have hardware IBPB support before using it... */
if ( !boot_cpu_has(X86_FEATURE_IBRSB) && !boot_cpu_has(X86_FEATURE_IBPB) )
mov VCPU_arch_spec_ctrl(%rbx), %eax
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
- SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
+ SPEC_CTRL_EXIT_TO_PV /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
RESTORE_ALL adj=8 compat=1
.Lft0: iretq
mov %r15d, %eax
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
- SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
+ SPEC_CTRL_EXIT_TO_PV /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
RESTORE_ALL
testw $TRAP_syscall,4(%rsp)
XEN_CPUFEATURE(IND_THUNK_JMP, (FSCAPINTS+0)*32+16) /* Use IND_THUNK_JMP */
XEN_CPUFEATURE(XEN_IBPB, (FSCAPINTS+0)*32+17) /* IBRSB || IBPB */
XEN_CPUFEATURE(SC_MSR, (FSCAPINTS+0)*32+18) /* MSR_SPEC_CTRL used by Xen */
-XEN_CPUFEATURE(RSB_NATIVE, (FSCAPINTS+0)*32+20) /* RSB overwrite needed for native */
-XEN_CPUFEATURE(RSB_VMEXIT, (FSCAPINTS+0)*32+21) /* RSB overwrite needed for vmexit */
+XEN_CPUFEATURE(SC_RSB_PV, (FSCAPINTS+0)*32+20) /* RSB overwrite needed for PV */
+XEN_CPUFEATURE(SC_RSB_HVM, (FSCAPINTS+0)*32+21) /* RSB overwrite needed for HVM */
XEN_CPUFEATURE(NO_XPTI, (FSCAPINTS+0)*32+22) /* XPTI mitigation not in use */
#define NCAPINTS (FSCAPINTS + 1) /* N 32-bit words worth of info */
*
* The following ASM fragments implement this algorithm. See their local
* comments for further details.
- * - SPEC_CTRL_ENTRY_FROM_VMEXIT
+ * - SPEC_CTRL_ENTRY_FROM_HVM
* - SPEC_CTRL_ENTRY_FROM_PV
* - SPEC_CTRL_ENTRY_FROM_INTR
+ * - SPEC_CTRL_ENTRY_FROM_INTR_IST
+ * - SPEC_CTRL_EXIT_TO_XEN_IST
* - SPEC_CTRL_EXIT_TO_XEN
- * - SPEC_CTRL_EXIT_TO_GUEST
+ * - SPEC_CTRL_EXIT_TO_PV
+ * - SPEC_CTRL_EXIT_TO_HVM
*/
.macro DO_OVERWRITE_RSB tmp=rax
mov %\tmp, %rsp /* Restore old %rsp */
.endm
-.macro DO_SPEC_CTRL_ENTRY_FROM_VMEXIT
+.macro DO_SPEC_CTRL_ENTRY_FROM_HVM
/*
* Requires %rbx=current, %rsp=regs/cpuinfo
* Clobbers %rax, %rcx, %rdx
.endm
/* Use after a VMEXIT from an HVM guest. */
-#define SPEC_CTRL_ENTRY_FROM_VMEXIT \
+#define SPEC_CTRL_ENTRY_FROM_HVM \
ALTERNATIVE __stringify(ASM_NOP40), \
- DO_OVERWRITE_RSB, X86_FEATURE_RSB_VMEXIT; \
+ DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM; \
ALTERNATIVE __stringify(ASM_NOP36), \
- DO_SPEC_CTRL_ENTRY_FROM_VMEXIT, X86_FEATURE_SC_MSR
+ DO_SPEC_CTRL_ENTRY_FROM_HVM, X86_FEATURE_SC_MSR
/* Use after an entry from PV context (syscall/sysenter/int80/int82/etc). */
#define SPEC_CTRL_ENTRY_FROM_PV \
ALTERNATIVE __stringify(ASM_NOP40), \
- DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \
+ DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \
ALTERNATIVE __stringify(ASM_NOP25), \
__stringify(DO_SPEC_CTRL_ENTRY maybexen=0), X86_FEATURE_SC_MSR
/* Use in interrupt/exception context. May interrupt Xen or PV context. */
#define SPEC_CTRL_ENTRY_FROM_INTR \
ALTERNATIVE __stringify(ASM_NOP40), \
- DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \
+ DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \
ALTERNATIVE __stringify(ASM_NOP33), \
__stringify(DO_SPEC_CTRL_ENTRY maybexen=1), X86_FEATURE_SC_MSR
ALTERNATIVE __stringify(ASM_NOP17), \
DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_SC_MSR
-/* Use when exiting to guest context. */
-#define SPEC_CTRL_EXIT_TO_GUEST \
+/* Use when exiting to PV guest context. */
+#define SPEC_CTRL_EXIT_TO_PV \
ALTERNATIVE __stringify(ASM_NOP24), \
DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR
-/* TODO: Drop these when the alternatives infrastructure is NMI/#MC safe. */
+/* Use when exiting to HVM guest context. */
+#define SPEC_CTRL_EXIT_TO_HVM \
+ ALTERNATIVE __stringify(ASM_NOP24), \
+ DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR
+
+/*
+ * Use in IST interrupt/exception context. May interrupt Xen or PV context.
+ * Fine grain control of SCF_ist_wrmsr is needed for safety in the S3 resume
+ * path to avoid using MSR_SPEC_CTRL before the microcode introducing it has
+ * been reloaded.
+ */
.macro SPEC_CTRL_ENTRY_FROM_INTR_IST
/*
* Requires %rsp=regs, %r14=stack_end
UNLIKELY_END(\@_serialise)
.endm
+/* Use when exiting to Xen in IST context. */
.macro SPEC_CTRL_EXIT_TO_XEN_IST
/*
* Requires %rbx=stack_end