/* PV guests by default have a 100Hz ticker. */
v->periodic_period = MILLISECS(10);
}
-
- v->arch.schedule_tail = continue_nonidle_domain;
- v->arch.ctxt_switch_from = paravirt_ctxt_switch_from;
- v->arch.ctxt_switch_to = paravirt_ctxt_switch_to;
-
- if ( is_idle_domain(d) )
- {
- v->arch.schedule_tail = continue_idle_domain;
- v->arch.cr3 = __pa(idle_pg_table);
- }
+ else
+ v->arch.cr3 = __pa(idle_pg_table);
v->arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features);
goto fail;
}
else
+ {
+ static const struct arch_csw pv_csw = {
+ .from = paravirt_ctxt_switch_from,
+ .to = paravirt_ctxt_switch_to,
+ .tail = continue_nonidle_domain,
+ };
+ static const struct arch_csw idle_csw = {
+ .from = paravirt_ctxt_switch_from,
+ .to = paravirt_ctxt_switch_to,
+ .tail = continue_idle_domain,
+ };
+
+ d->arch.ctxt_switch = is_idle_domain(d) ? &idle_csw : &pv_csw;
+
/* 64-bit PV guest by default. */
d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
+ }
/* initialize default tsc behavior in case tools don't */
tsc_set_info(d, TSC_MODE_DEFAULT, 0UL, 0, 0);
{
memcpy(&p->arch.user_regs, stack_regs, CTXT_SWITCH_STACK_BYTES);
vcpu_save_fpu(p);
- p->arch.ctxt_switch_from(p);
+ pd->arch.ctxt_switch->from(p);
}
/*
set_msr_xss(n->arch.hvm_vcpu.msr_xss);
}
vcpu_restore_fpu_eager(n);
- n->arch.ctxt_switch_to(n);
+ nd->arch.ctxt_switch->to(n);
}
psr_ctxt_switch_to(nd);
/* Ensure that the vcpu has an up-to-date time base. */
update_vcpu_system_time(next);
- schedule_tail(next);
+ /*
+ * Schedule tail *should* be a terminal function pointer, but leave a
+ * bug frame around just in case it returns, to save going back into the
+ * context switching code and leaving a far more subtle crash to diagnose.
+ */
+ nextd->arch.ctxt_switch->tail(next);
+ BUG();
}
void continue_running(struct vcpu *same)
{
- schedule_tail(same);
+ /* See the comment above. */
+ same->domain->arch.ctxt_switch->tail(same);
+ BUG();
}
int __sync_local_execstate(void)
static int svm_domain_initialise(struct domain *d)
{
+ static const struct arch_csw csw = {
+ .from = svm_ctxt_switch_from,
+ .to = svm_ctxt_switch_to,
+ .tail = svm_do_resume,
+ };
+
+ d->arch.ctxt_switch = &csw;
+
return 0;
}
{
int rc;
- v->arch.schedule_tail = svm_do_resume;
- v->arch.ctxt_switch_from = svm_ctxt_switch_from;
- v->arch.ctxt_switch_to = svm_ctxt_switch_to;
-
v->arch.hvm_svm.launch_core = -1;
if ( (rc = svm_create_vmcb(v)) != 0 )
static int vmx_domain_initialise(struct domain *d)
{
+ static const struct arch_csw csw = {
+ .from = vmx_ctxt_switch_from,
+ .to = vmx_ctxt_switch_to,
+ .tail = vmx_do_resume,
+ };
int rc;
+ d->arch.ctxt_switch = &csw;
+
if ( !has_vlapic(d) )
return 0;
INIT_LIST_HEAD(&v->arch.hvm_vmx.pi_blocking.list);
- v->arch.schedule_tail = vmx_do_resume;
- v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
- v->arch.ctxt_switch_to = vmx_ctxt_switch_to;
-
if ( (rc = vmx_create_vmcs(v)) != 0 )
{
dprintk(XENLOG_WARNING,
} relmem;
struct page_list_head relmem_list;
+ const struct arch_csw {
+ void (*from)(struct vcpu *);
+ void (*to)(struct vcpu *);
+ void (*tail)(struct vcpu *);
+ } *ctxt_switch;
+
/* nestedhvm: translate l2 guest physical to host physical */
struct p2m_domain *nested_p2m[MAX_NESTEDP2M];
mm_lock_t nested_p2m_lock;
unsigned long flags; /* TF_ */
- void (*schedule_tail) (struct vcpu *);
-
- void (*ctxt_switch_from) (struct vcpu *);
- void (*ctxt_switch_to) (struct vcpu *);
-
struct vpmu_struct vpmu;
/* Virtual Machine Extensions */