#include <xen/init.h>
#include <xen/lib.h>
+#include <asm/msr-index.h>
#include <asm/processor.h>
#include <asm/spec_ctrl.h>
+#include <asm/spec_ctrl_asm.h>
static enum ind_thunk {
THUNK_DEFAULT, /* Decide which thunk to use at boot time. */
print_details(thunk);
}
+static void __init __maybe_unused build_assertions(void)
+{
+ /* The optimised assembly relies on this alias. */
+ BUILD_BUG_ON(BTI_IST_IBRS != SPEC_CTRL_IBRS);
+}
+
/*
* Local variables:
* mode: C
UNLIKELY_END(exit_cr3)
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
- SPEC_CTRL_EXIT_TO_XEN /* Req: %rbx=end, Clob: acd */
+ SPEC_CTRL_EXIT_TO_XEN_IST /* Req: %rbx=end, Clob: acd */
RESTORE_ALL adj=8
iretq
GET_STACK_END(14)
- SPEC_CTRL_ENTRY_FROM_INTR /* Req: %rsp=regs, %r14=end, Clob: acd */
+ SPEC_CTRL_ENTRY_FROM_INTR_IST /* Req: %rsp=regs, %r14=end, Clob: acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rbx
GET_STACK_END(14)
- SPEC_CTRL_ENTRY_FROM_INTR /* Req: %rsp=regs, %r14=end, Clob: acd */
+ SPEC_CTRL_ENTRY_FROM_INTR_IST /* Req: %rsp=regs, %r14=end, Clob: acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx
#ifndef __X86_SPEC_CTRL_ASM_H__
#define __X86_SPEC_CTRL_ASM_H__
+/* Encoding of the bottom bits in cpuinfo.bti_ist_info */
+#define BTI_IST_IBRS (1 << 0)
+#define BTI_IST_WRMSR (1 << 1)
+#define BTI_IST_RSB (1 << 2)
+
#ifdef __ASSEMBLY__
#include <asm/msr-index.h>
DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_XEN_IBRS_SET, \
DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_XEN_IBRS_CLEAR
+/* TODO: Drop these when the alternatives infrastructure is NMI/#MC safe. */
+.macro SPEC_CTRL_ENTRY_FROM_INTR_IST
+/*
+ * Requires %rsp=regs, %r14=stack_end
+ * Clobbers %rax, %rcx, %rdx
+ *
+ * This is logical merge of DO_OVERWRITE_RSB and DO_SPEC_CTRL_ENTRY
+ * maybexen=1, but with conditionals rather than alternatives.
+ */
+ movzbl STACK_CPUINFO_FIELD(bti_ist_info)(%r14), %eax
+
+ testb $BTI_IST_RSB, %al
+ jz .L\@_skip_rsb
+
+ DO_OVERWRITE_RSB
+
+.L\@_skip_rsb:
+
+ testb $BTI_IST_WRMSR, %al
+ jz .L\@_skip_wrmsr
+
+ xor %edx, %edx
+ testb $3, UREGS_cs(%rsp)
+ setz %dl
+ and %dl, STACK_CPUINFO_FIELD(use_shadow_spec_ctrl)(%r14)
+
+.L\@_entry_from_xen:
+ /*
+ * Load Xen's intended value. SPEC_CTRL_IBRS vs 0 is encoded in the
+ * bottom bit of bti_ist_info, via a deliberate alias with BTI_IST_IBRS.
+ */
+ mov $MSR_SPEC_CTRL, %ecx
+ and $BTI_IST_IBRS, %eax
+ wrmsr
+
+ /* Opencoded UNLIKELY_START() with no condition. */
+UNLIKELY_DISPATCH_LABEL(\@_serialise):
+ .subsection 1
+ /*
+ * In the case that we might need to set SPEC_CTRL.IBRS for safety, we
+ * need to ensure that an attacker can't poison the `jz .L\@_skip_wrmsr`
+ * to speculate around the WRMSR. As a result, we need a dispatch
+ * serialising instruction in the else clause.
+ */
+.L\@_skip_wrmsr:
+ lfence
+ UNLIKELY_END(\@_serialise)
+.endm
+
+.macro SPEC_CTRL_EXIT_TO_XEN_IST
+/*
+ * Requires %rbx=stack_end
+ * Clobbers %rax, %rcx, %rdx
+ */
+ testb $BTI_IST_WRMSR, STACK_CPUINFO_FIELD(bti_ist_info)(%rbx)
+ jz .L\@_skip
+
+ DO_SPEC_CTRL_EXIT_TO_XEN
+
+.L\@_skip:
+.endm
+
#endif /* __ASSEMBLY__ */
#endif /* !__X86_SPEC_CTRL_ASM_H__ */