This prepares making the different schedulers vcpu agnostic.
Note that some scheduler specific accessor function are misnamed after
this patch. This will be corrected in later patches.
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Dario Faggioli <dfaggioli@suse.com>
* Return a pointer to the ARINC 653-specific scheduler data information
* associated with the given VCPU (vc)
*/
-#define AVCPU(vc) ((arinc653_vcpu_t *)(vc)->sched_priv)
+#define AVCPU(vc) ((arinc653_vcpu_t *)(vc)->sched_unit->priv)
/**
* Return the global scheduler private data given the scheduler ops pointer
ASSERT(!pdata && svc && is_idle_vcpu(svc->vc));
- idle_vcpu[cpu]->sched_priv = vdata;
+ idle_vcpu[cpu]->sched_unit->priv = vdata;
return &sd->_lock;
}
((struct csched_private *)((_ops)->sched_data))
#define CSCHED_PCPU(_c) \
((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv)
-#define CSCHED_VCPU(_vcpu) ((struct csched_vcpu *) (_vcpu)->sched_priv)
+#define CSCHED_VCPU(_vcpu) ((struct csched_vcpu *) (_vcpu)->sched_unit->priv)
#define CSCHED_DOM(_dom) ((struct csched_dom *) (_dom)->sched_priv)
#define RUNQ(_cpu) (&(CSCHED_PCPU(_cpu)->runq))
ASSERT(svc && is_idle_vcpu(svc->vcpu));
- idle_vcpu[cpu]->sched_priv = vdata;
+ idle_vcpu[cpu]->sched_unit->priv = vdata;
/*
* We are holding the runqueue lock already (it's been taken in
csched_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
{
struct vcpu *vc = unit->vcpu_list;
- struct csched_vcpu *svc = vc->sched_priv;
+ struct csched_vcpu *svc = unit->priv;
spinlock_t *lock;
BUG_ON( is_idle_vcpu(vc) );
static inline struct csched2_vcpu *csched2_vcpu(const struct vcpu *v)
{
- return v->sched_priv;
+ return v->sched_unit->priv;
}
static inline struct csched2_dom *csched2_dom(const struct domain *d)
static void
runq_assign(const struct scheduler *ops, struct vcpu *vc)
{
- struct csched2_vcpu *svc = vc->sched_priv;
+ struct csched2_vcpu *svc = vc->sched_unit->priv;
ASSERT(svc->rqd == NULL);
static void
runq_deassign(const struct scheduler *ops, struct vcpu *vc)
{
- struct csched2_vcpu *svc = vc->sched_priv;
+ struct csched2_vcpu *svc = vc->sched_unit->priv;
ASSERT(svc->rqd == c2rqd(ops, vc->processor));
csched2_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
{
struct vcpu *vc = unit->vcpu_list;
- struct csched2_vcpu *svc = vc->sched_priv;
+ struct csched2_vcpu *svc = unit->priv;
struct csched2_dom * const sdom = svc->sdom;
spinlock_t *lock;
ASSERT(!local_irq_is_enabled());
write_lock(&prv->lock);
- idle_vcpu[cpu]->sched_priv = vdata;
+ idle_vcpu[cpu]->sched_unit->priv = vdata;
rqi = init_pdata(prv, pdata, cpu);
static inline struct null_vcpu *null_vcpu(const struct vcpu *v)
{
- return v->sched_priv;
+ return v->sched_unit->priv;
}
static inline bool vcpu_check_affinity(struct vcpu *v, unsigned int cpu,
ASSERT(nvc && is_idle_vcpu(nvc->vcpu));
- idle_vcpu[cpu]->sched_priv = vdata;
+ idle_vcpu[cpu]->sched_unit->priv = vdata;
/*
* We are holding the runqueue lock already (it's been taken in
static inline struct rt_vcpu *rt_vcpu(const struct vcpu *vcpu)
{
- return vcpu->sched_priv;
+ return vcpu->sched_unit->priv;
}
static inline struct list_head *rt_runq(const struct scheduler *ops)
dprintk(XENLOG_DEBUG, "RTDS: timer initialized on cpu %u\n", cpu);
}
- idle_vcpu[cpu]->sched_priv = vdata;
+ idle_vcpu[cpu]->sched_unit->priv = vdata;
return &prv->lock;
}
sched_idle_switch_sched(struct scheduler *new_ops, unsigned int cpu,
void *pdata, void *vdata)
{
- idle_vcpu[cpu]->sched_priv = NULL;
+ idle_vcpu[cpu]->sched_unit->priv = NULL;
return &sched_free_cpu_lock;
}
init_timer(&v->poll_timer, poll_timer_fn,
v, v->processor);
- v->sched_priv = sched_alloc_udata(dom_scheduler(d), unit, d->sched_priv);
- if ( v->sched_priv == NULL )
+ unit->priv = sched_alloc_udata(dom_scheduler(d), unit, d->sched_priv);
+ if ( unit->priv == NULL )
{
xfree(unit);
return 1;
{
spinlock_t *lock;
- vcpudata = v->sched_priv;
+ vcpudata = v->sched_unit->priv;
migrate_timer(&v->periodic_timer, new_p);
migrate_timer(&v->singleshot_timer, new_p);
*/
spin_unlock_irq(lock);
- v->sched_priv = vcpu_priv[v->vcpu_id];
+ v->sched_unit->priv = vcpu_priv[v->vcpu_id];
if ( !d->is_dying )
sched_move_irqs(v);
if ( test_and_clear_bool(v->is_urgent) )
atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
sched_remove_unit(vcpu_scheduler(v), unit);
- sched_free_udata(vcpu_scheduler(v), v->sched_priv);
+ sched_free_udata(vcpu_scheduler(v), unit->priv);
v->sched_unit = NULL;
xfree(unit);
}
*/
old_lock = pcpu_schedule_lock_irqsave(cpu, &flags);
- vpriv_old = idle->sched_priv;
+ vpriv_old = idle->sched_unit->priv;
ppriv_old = sd->sched_priv;
new_lock = sched_switch_sched(new_ops, cpu, ppriv, vpriv);
struct timer poll_timer; /* timeout for SCHEDOP_poll */
struct sched_unit *sched_unit;
- void *sched_priv; /* scheduler-specific data */
struct vcpu_runstate_info runstate;
#ifndef CONFIG_COMPAT
struct sched_unit {
struct domain *domain;
struct vcpu *vcpu_list;
+ void *priv; /* scheduler private data */
unsigned int unit_id;
};