AFLAGS-y += -D__ASSEMBLY__ -include $(BASEDIR)/include/xen/config.h
-# Clang's built-in assembler can't handle .code16/.code32/.code64 yet
-AFLAGS-$(clang) += -no-integrated-as
+# Clang's built-in assembler can't handle embedded .include's
+CFLAGS-$(clang) += -no-integrated-as
ALL_OBJS := $(ALL_OBJS-y)
export CONFIG_INDIRECT_THUNK=y
endif
+# Set up the assembler include path properly for older GCC toolchains. Clang
+# objects to the agument being passed however.
+ifneq ($(clang),y)
+CFLAGS += -Wa,-I$(BASEDIR)/include
+endif
+
CFLAGS-$(shadow-paging) += -DCONFIG_SHADOW_PAGING
CFLAGS-$(bigmem) += -DCONFIG_BIGMEM
.code64
start64:
/* Jump to high mappings. */
- movabs $__high_start,%rax
- jmpq *%rax
+ movabs $__high_start, %rdi
+
+#ifdef CONFIG_INDIRECT_THUNK
+ /*
+ * If booting virtualised, or hot-onlining a CPU, sibling threads can
+ * attempt Branch Target Injection against this jmp.
+ *
+ * We've got no usable stack so can't use a RETPOLINE thunk, and are
+ * further than disp32 from the high mappings so couldn't use
+ * JUMP_THUNK even if it was a non-RETPOLINE thunk. Furthermore, an
+ * LFENCE isn't necessarily safe to use at this point.
+ *
+ * As this isn't a hotpath, use a fully serialising event to reduce
+ * the speculation window as much as possible. %ebx needs preserving
+ * for __high_start.
+ */
+ mov %ebx, %esi
+ cpuid
+ mov %esi, %ebx
+#endif
+
+ jmpq *%rdi
.code32
trampoline_boot_cpu_entry:
return val;
}
+void __x86_indirect_thunk_rcx(void);
+
/* Instruction fetch with error handling. */
#define insn_fetch(type, base, eip, limit) \
({ unsigned long _rc, _ptr = (base) + (eip); \
unsigned long code_base, code_limit;
char *io_emul_stub = NULL;
void (*io_emul)(struct cpu_user_regs *) __attribute__((__regparm__(1)));
+ struct stubs *this_stubs = &this_cpu(stubs);
+ unsigned long stub_va = this_stubs->addr + STUB_BUF_SIZE / 2;
uint64_t val;
bool_t vpmu_msr;
* context. This is needed for some systems which (ab)use IN/OUT
* to communicate with BIOS code in system-management mode.
*/
- io_emul_stub = map_domain_page(_mfn(this_cpu(stubs.mfn))) +
- (this_cpu(stubs.addr) & ~PAGE_MASK) +
- STUB_BUF_SIZE / 2;
+ io_emul_stub = map_domain_page(_mfn(this_stubs->mfn)) +
+ (stub_va & ~PAGE_MASK);
/* movq $host_to_guest_gpr_switch,%rcx */
io_emul_stub[0] = 0x48;
io_emul_stub[1] = 0xb9;
*(void **)&io_emul_stub[2] = (void *)host_to_guest_gpr_switch;
+
+#ifdef CONFIG_INDIRECT_THUNK
+ /* callq __x86_indirect_thunk_rcx */
+ io_emul_stub[10] = 0xe8;
+ *(int32_t *)&io_emul_stub[11] =
+ (long)__x86_indirect_thunk_rcx - (stub_va + 11 + 4);
+#else
/* callq *%rcx */
io_emul_stub[10] = 0xff;
io_emul_stub[11] = 0xd1;
+ /* TODO: untangle ideal_nops from init/livepatch Kconfig options. */
+ memcpy(&io_emul_stub[12], "\x0f\x1f\x00", 3); /* P6_NOP3 */
+#endif
+
/* data16 or nop */
- io_emul_stub[12] = (op_bytes != 2) ? 0x90 : 0x66;
+ io_emul_stub[15] = (op_bytes != 2) ? 0x90 : 0x66;
/* <io-access opcode> */
- io_emul_stub[13] = opcode;
+ io_emul_stub[16] = opcode;
/* imm8 or nop */
- io_emul_stub[14] = 0x90;
+ io_emul_stub[17] = 0x90;
/* ret (jumps to guest_to_host_gpr_switch) */
- io_emul_stub[15] = 0xc3;
- BUILD_BUG_ON(STUB_BUF_SIZE / 2 < 16);
+ io_emul_stub[18] = 0xc3;
+ BUILD_BUG_ON(STUB_BUF_SIZE / 2 < 19);
/* Handy function-typed pointer to the stub. */
- io_emul = (void *)(this_cpu(stubs.addr) + STUB_BUF_SIZE / 2);
+ io_emul = (void *)stub_va;
if ( ioemul_handle_quirk )
- ioemul_handle_quirk(opcode, &io_emul_stub[12], regs);
+ {
+ BUILD_BUG_ON(STUB_BUF_SIZE / 2 < 15 + 10);
+ ioemul_handle_quirk(opcode, &io_emul_stub[15], regs);
+ }
/* I/O Port and Interrupt Flag instructions. */
switch ( opcode )
op_bytes = 1;
case 0xe5: /* IN imm8,%eax */
port = insn_fetch(u8, code_base, eip, code_limit);
- io_emul_stub[14] = port; /* imm8 */
+ io_emul_stub[17] = port; /* imm8 */
exec_in:
if ( !guest_io_okay(port, op_bytes, v, regs) )
goto fail;
op_bytes = 1;
case 0xe7: /* OUT %eax,imm8 */
port = insn_fetch(u8, code_base, eip, code_limit);
- io_emul_stub[14] = port; /* imm8 */
+ io_emul_stub[17] = port; /* imm8 */
exec_out:
if ( !guest_io_okay(port, op_bytes, v, regs) )
goto fail;
movzbl UREGS_entry_vector(%rsp),%eax
leaq exception_table(%rip),%rdx
PERFC_INCR(exceptions, %rax, %rbx)
- callq *(%rdx,%rax,8)
+ mov (%rdx, %rax, 8), %rdx
+ INDIRECT_CALL %rdx
mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
testb $3,UREGS_cs(%rsp)
jz restore_all_xen
1: movq %rsp,%rdi
movzbl UREGS_entry_vector(%rsp),%eax
leaq exception_table(%rip),%rdx
- callq *(%rdx,%rax,8)
+ mov (%rdx, %rax, 8), %rdx
+ INDIRECT_CALL %rdx
mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
cmpb $TRAP_nmi,UREGS_entry_vector(%rsp)
jne ret_from_intr
if ( !rc )
{
copy_REX_VEX(buf, rex_prefix, vex);
- asm volatile ( "call *%0" : : "r" (stub.func), "a" (mmvalp)
- : "memory" );
+ asm volatile ( "INDIRECT_CALL %0" : : "r" (stub.func), "a" (mmvalp)
+ : "memory" );
}
put_fpu(&fic);
put_stub(stub);
if ( !rc )
{
copy_REX_VEX(buf, rex_prefix, vex);
- asm volatile ( "call *%0" : : "r" (stub.func), "a" (mmvalp)
- : "memory" );
+ asm volatile ( "INDIRECT_CALL %0" : : "r" (stub.func), "a" (mmvalp)
+ : "memory" );
}
put_fpu(&fic);
put_stub(stub);
/*
* Hand-rolled longjmp(). Returns to the pointer on the top of
- * wqv->stack, and lands on a `rep movs` instruction.
+ * wqv->stack, and lands on a `rep movs` instruction. All other GPRs are
+ * restored from the stack, so are available for use here.
*/
asm volatile (
- "mov %1,%%"__OP"sp; jmp *(%0)"
+ "mov %1,%%"__OP"sp; INDIRECT_JMP %[ip]"
: : "S" (wqv->stack), "D" (wqv->esp),
- "c" ((char *)get_cpu_info() - (char *)wqv->esp)
+ "c" ((char *)get_cpu_info() - (char *)wqv->esp),
+ [ip] "r" (*(unsigned long *)wqv->stack)
: "memory" );
unreachable();
}
#include <asm/cpufeature.h>
#include <asm/alternative.h>
+#ifdef __ASSEMBLY__
+# include <asm/indirect_thunk_asm.h>
+#else
+# ifdef CONFIG_INDIRECT_THUNK
+asm ( "\t.equ CONFIG_INDIRECT_THUNK, 1" );
+# else
+asm ( "\t.equ CONFIG_INDIRECT_THUNK, 0" );
+# endif
+asm ( "\t.include \"asm/indirect_thunk_asm.h\"" );
+#endif
+
#ifndef __ASSEMBLY__
void ret_from_intr(void);
#endif
--- /dev/null
+/*
+ * Warning! This file is included at an assembler level for .c files, causing
+ * usual #ifdef'ary to turn into comments.
+ */
+
+.macro INDIRECT_BRANCH insn:req arg:req
+/*
+ * Create an indirect branch. insn is one of call/jmp, arg is a single
+ * register.
+ *
+ * With no compiler support, this degrades into a plain indirect call/jmp.
+ * With compiler support, dispatch to the correct __x86_indirect_thunk_*
+ */
+ .if CONFIG_INDIRECT_THUNK == 1
+
+ $done = 0
+ .irp reg, ax, cx, dx, bx, bp, si, di, 8, 9, 10, 11, 12, 13, 14, 15
+ .ifeqs "\arg", "%r\reg"
+ \insn __x86_indirect_thunk_r\reg
+ $done = 1
+ .exitm
+ .endif
+ .endr
+
+ .if $done != 1
+ .error "Bad register arg \arg"
+ .endif
+
+ .else
+ \insn *\arg
+ .endif
+.endm
+
+/* Convenience wrappers. */
+.macro INDIRECT_CALL arg:req
+ INDIRECT_BRANCH call \arg
+.endm
+
+.macro INDIRECT_JMP arg:req
+ INDIRECT_BRANCH jmp \arg
+.endm