dead_idle();
}
-static void idle_loop(void)
+static void noreturn idle_loop(void)
{
unsigned int cpu = smp_processor_id();
/*
reset_stack_and_jump(idle_loop);
}
-static void noreturn continue_idle_domain(struct vcpu *v)
-{
- reset_stack_and_jump(idle_loop);
-}
-
void init_hypercall_page(struct domain *d, void *ptr)
{
memset(ptr, 0xcc, PAGE_SIZE);
static const struct arch_csw idle_csw = {
.from = paravirt_ctxt_switch_from,
.to = paravirt_ctxt_switch_to,
- .tail = continue_idle_domain,
+ .tail = idle_loop,
};
d->arch.ctxt_switch = &idle_csw;
/* Ensure that the vcpu has an up-to-date time base. */
update_vcpu_system_time(next);
- /*
- * Schedule tail *should* be a terminal function pointer, but leave a
- * bug frame around just in case it returns, to save going back into the
- * context switching code and leaving a far more subtle crash to diagnose.
- */
- nextd->arch.ctxt_switch->tail(next);
- BUG();
+ reset_stack_and_jump_ind(nextd->arch.ctxt_switch->tail);
}
void continue_running(struct vcpu *same)
{
- /* See the comment above. */
- same->domain->arch.ctxt_switch->tail(same);
- BUG();
+ reset_stack_and_jump_ind(same->domain->arch.ctxt_switch->tail);
}
int __sync_local_execstate(void)
wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
}
-static void noreturn svm_do_resume(struct vcpu *v)
+static void noreturn svm_do_resume(void)
{
+ struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
bool debug_state = (v->domain->debugger_attached ||
v->domain->arch.monitor.software_breakpoint_enabled ||
domain_crash(curr->domain);
}
-void vmx_do_resume(struct vcpu *v)
+void vmx_do_resume(void)
{
+ struct vcpu *v = current;
bool_t debug_state;
unsigned long host_cr4;
return rc;
}
-static void noreturn continue_nonidle_domain(struct vcpu *v)
+static void noreturn continue_nonidle_domain(void)
{
check_wakeup_from_wait();
reset_stack_and_jump(ret_from_intr);
# define SHADOW_STACK_WORK ""
#endif
-#define reset_stack_and_jump(fn) \
+#define switch_stack_and_jump(fn, instr, constr) \
({ \
unsigned int tmp; \
__asm__ __volatile__ ( \
SHADOW_STACK_WORK \
"mov %[stk], %%rsp;" \
CHECK_FOR_LIVEPATCH_WORK \
- "jmp %c[fun];" \
+ instr "[fun]" \
: [val] "=&r" (tmp), \
[ssp] "=&r" (tmp) \
: [stk] "r" (guest_cpu_user_regs()), \
- [fun] "i" (fn), \
+ [fun] constr (fn), \
[skstk_base] "i" \
((PRIMARY_SHSTK_SLOT + 1) * PAGE_SIZE - 8), \
[stack_mask] "i" (STACK_SIZE - 1), \
unreachable(); \
})
+#define reset_stack_and_jump(fn) \
+ switch_stack_and_jump(fn, "jmp %c", "i")
+
+/* The constraint may only specify non-call-clobbered registers. */
+#define reset_stack_and_jump_ind(fn) \
+ switch_stack_and_jump(fn, "INDIRECT_JMP %", "b")
+
/*
* Which VCPU's state is currently running on each CPU?
* This is not necesasrily the same as 'current' as a CPU may be
const struct arch_csw {
void (*from)(struct vcpu *);
void (*to)(struct vcpu *);
- void (*tail)(struct vcpu *);
+ void noreturn (*tail)(void);
} *ctxt_switch;
#ifdef CONFIG_HVM
void vmx_asm_vmexit_handler(struct cpu_user_regs);
void vmx_asm_do_vmentry(void);
void vmx_intr_assist(void);
-void noreturn vmx_do_resume(struct vcpu *);
+void noreturn vmx_do_resume(void);
void vmx_vlapic_msr_changed(struct vcpu *v);
struct hvm_emulate_ctxt;
void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt);