reset_stack_and_jump(idle_loop);
}
-static void continue_idle_domain(struct vcpu *v)
+static void noreturn continue_idle_domain(struct vcpu *v)
{
reset_stack_and_jump(idle_loop);
}
-static void continue_nonidle_domain(struct vcpu *v)
+static void noreturn continue_nonidle_domain(struct vcpu *v)
{
check_wakeup_from_wait();
mark_regs_dirty(guest_cpu_user_regs());
update_vcpu_system_time(next);
schedule_tail(next);
- BUG();
}
void continue_running(struct vcpu *same)
{
schedule_tail(same);
- BUG();
}
int __sync_local_execstate(void)
wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v));
}
-static void svm_do_resume(struct vcpu *v)
+static void noreturn svm_do_resume(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
bool_t debug_state = v->domain->debugger_attached;
return p;
}
-void __init __start_xen(unsigned long mbi_p)
+void __init noreturn __start_xen(unsigned long mbi_p)
{
char *memmap_type = NULL;
char *cmdline, *kextra, *loader;
((sp & (~(STACK_SIZE-1))) + \
(STACK_SIZE - sizeof(struct cpu_info) - sizeof(unsigned long)))
-#define reset_stack_and_jump(__fn) \
- __asm__ __volatile__ ( \
- "mov %0,%%"__OP"sp; jmp %c1" \
- : : "r" (guest_cpu_user_regs()), "i" (__fn) : "memory" )
+#define reset_stack_and_jump(__fn) \
+ ({ \
+ __asm__ __volatile__ ( \
+ "mov %0,%%"__OP"sp; jmp %c1" \
+ : : "r" (guest_cpu_user_regs()), "i" (__fn) : "memory" ); \
+ unreachable(); \
+ })
#define schedule_tail(vcpu) (((vcpu)->arch.schedule_tail)(vcpu))
unsigned long flags; /* TF_ */
- void (*schedule_tail) (struct vcpu *);
+ void noreturn (*schedule_tail) (struct vcpu *);
void (*ctxt_switch_from) (struct vcpu *);
void (*ctxt_switch_to) (struct vcpu *);
void vmx_asm_vmexit_handler(struct cpu_user_regs);
void vmx_asm_do_vmentry(void);
void vmx_intr_assist(void);
-void vmx_do_resume(struct vcpu *);
+void noreturn vmx_do_resume(struct vcpu *);
void vmx_vlapic_msr_changed(struct vcpu *v);
void vmx_realmode(struct cpu_user_regs *regs);
void vmx_update_debug_state(struct vcpu *v);