dead_idle();
}
-static void noreturn idle_loop(void)
+static void noreturn cf_check idle_loop(void)
{
unsigned int cpu = smp_processor_id();
/*
}
}
-void paravirt_ctxt_switch_from(struct vcpu *v)
+void cf_check paravirt_ctxt_switch_from(struct vcpu *v)
{
save_segments(v);
write_debugreg(7, 0);
}
-void paravirt_ctxt_switch_to(struct vcpu *v)
+void cf_check paravirt_ctxt_switch_to(struct vcpu *v)
{
root_pgentry_t *root_pgt = this_cpu(root_pgt);
wrmsrl(MSR_AMD64_TSC_RATIO, hvm_tsc_scaling_ratio(v->domain));
}
-static void svm_ctxt_switch_from(struct vcpu *v)
+static void cf_check svm_ctxt_switch_from(struct vcpu *v)
{
int cpu = smp_processor_id();
enable_each_ist(idt_tables[cpu]);
}
-static void svm_ctxt_switch_to(struct vcpu *v)
+static void cf_check svm_ctxt_switch_to(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
int cpu = smp_processor_id();
wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
}
-static void noreturn svm_do_resume(void)
+static void noreturn cf_check svm_do_resume(void)
{
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
void noreturn vmx_asm_do_vmentry(void);
-void vmx_do_resume(void)
+void cf_check vmx_do_resume(void)
{
struct vcpu *v = current;
bool_t debug_state;
static bool_t __initdata opt_force_ept;
boolean_param("force-ept", opt_force_ept);
-static void vmx_ctxt_switch_from(struct vcpu *v);
-static void vmx_ctxt_switch_to(struct vcpu *v);
+static void cf_check vmx_ctxt_switch_from(struct vcpu *v);
+static void cf_check vmx_ctxt_switch_to(struct vcpu *v);
static int alloc_vlapic_mapping(void);
static void vmx_install_vlapic_mapping(struct vcpu *v);
}
}
-static void vmx_ctxt_switch_from(struct vcpu *v)
+static void cf_check vmx_ctxt_switch_from(struct vcpu *v)
{
/*
* Return early if trying to do a context switch without VMX enabled,
vmx_pi_switch_from(v);
}
-static void vmx_ctxt_switch_to(struct vcpu *v)
+static void cf_check vmx_ctxt_switch_to(struct vcpu *v)
{
vmx_restore_guest_msrs(v);
vmx_restore_dr(v);
#define switch_stack_and_jump(fn, instr, constr) \
({ \
unsigned int tmp; \
- (void)((fn) == (void (*)(void))NULL); \
BUILD_BUG_ON(!ssaj_has_attr_noreturn(fn)); \
__asm__ __volatile__ ( \
SHADOW_STACK_WORK \
/* The constraint may only specify non-call-clobbered registers. */
#define reset_stack_and_jump_ind(fn) \
- switch_stack_and_jump(fn, "INDIRECT_JMP %", "b")
+ ({ \
+ (void)((fn) == (void (*)(void))NULL); \
+ switch_stack_and_jump(fn, "INDIRECT_JMP %", "b"); \
+ })
/*
* Which VCPU's state is currently running on each CPU?
void vmx_asm_vmexit_handler(struct cpu_user_regs);
void vmx_intr_assist(void);
-void noreturn vmx_do_resume(void);
+void noreturn cf_check vmx_do_resume(void);
void vmx_vlapic_msr_changed(struct vcpu *v);
struct hvm_emulate_ctxt;
void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt);
#endif /* CONFIG_PV */
-void paravirt_ctxt_switch_from(struct vcpu *v);
-void paravirt_ctxt_switch_to(struct vcpu *v);
+void cf_check paravirt_ctxt_switch_from(struct vcpu *v);
+void cf_check paravirt_ctxt_switch_to(struct vcpu *v);
#endif /* __X86_PV_DOMAIN_H__ */
FREE_XENHEAP_PAGE(d->arch.pv.gdt_ldt_l1tab);
}
-void noreturn continue_pv_domain(void);
+void noreturn cf_check continue_pv_domain(void);
int pv_domain_initialise(struct domain *d)
{
/* No special register assumptions. */
#ifdef CONFIG_PV
ENTRY(continue_pv_domain)
+ ENDBR64
call check_wakeup_from_wait
ret_from_intr:
GET_CURRENT(bx)