* order. If no per-pCPU memory was allocated, there is no need to
* provide an implementation of free_pdata. deinit_pdata may, however,
* be necessary/useful in this case too (e.g., it can undo something done
- * on scheduler wide data structure during init_pdata). Both deinit_pdata
+ * on scheduler wide data structure during switch_sched). Both deinit_pdata
* and free_pdata are called during CPU_DEAD.
*
- * If someting goes wrong during bringup, we go to CPU_UP_CANCELLED.
+ * If something goes wrong during bringup, we go to CPU_UP_CANCELLED.
*/
switch ( action )
{
* To setup the cpu for the new scheduler we need:
* - a valid instance of per-CPU scheduler specific data, as it is
* allocated by sched_alloc_pdata(). Note that we do not want to
- * initialize it yet (i.e., we are not calling sched_init_pdata()).
- * That will be done by the target scheduler, in sched_switch_sched(),
- * in proper ordering and with locking.
+ * initialize it yet, as that will be done by the target scheduler,
+ * in sched_switch_sched(), in proper ordering and with locking.
* - a valid instance of per-vCPU scheduler specific data, for the idle
* vCPU of cpu. That is what the target scheduler will use for the
* sched_priv field of the per-vCPU info of the idle domain.
spc->nr_runnable = 0;
}
-static void
-csched_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
-{
- unsigned long flags;
- struct csched_private *prv = CSCHED_PRIV(ops);
-
- spin_lock_irqsave(&prv->lock, flags);
- init_pdata(prv, pdata, cpu);
- spin_unlock_irqrestore(&prv->lock, flags);
-}
-
/* Change the scheduler of cpu to us (Credit). */
static spinlock_t *
csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
.alloc_udata = csched_alloc_udata,
.free_udata = csched_free_udata,
.alloc_pdata = csched_alloc_pdata,
- .init_pdata = csched_init_pdata,
.deinit_pdata = csched_deinit_pdata,
.free_pdata = csched_free_pdata,
.switch_sched = csched_switch_sched,
return spc->runq_id;
}
-static void
-csched2_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
-{
- struct csched2_private *prv = csched2_priv(ops);
- spinlock_t *old_lock;
- unsigned long flags;
- unsigned rqi;
-
- write_lock_irqsave(&prv->lock, flags);
- old_lock = pcpu_schedule_lock(cpu);
-
- rqi = init_pdata(prv, pdata, cpu);
- /* Move the scheduler lock to the new runq lock. */
- get_sched_res(cpu)->schedule_lock = &prv->rqd[rqi].lock;
-
- /* _Not_ pcpu_schedule_unlock(): schedule_lock may have changed! */
- spin_unlock(old_lock);
- write_unlock_irqrestore(&prv->lock, flags);
-}
-
/* Change the scheduler of cpu to us (Credit2). */
static spinlock_t *
csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
.alloc_udata = csched2_alloc_udata,
.free_udata = csched2_free_udata,
.alloc_pdata = csched2_alloc_pdata,
- .init_pdata = csched2_init_pdata,
.deinit_pdata = csched2_deinit_pdata,
.free_pdata = csched2_free_pdata,
.switch_sched = csched2_switch_sched,
npc->unit = NULL;
}
-static void null_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
-{
- struct null_private *prv = null_priv(ops);
-
- ASSERT(pdata);
-
- init_pdata(prv, pdata, cpu);
-}
-
static void null_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
{
struct null_private *prv = null_priv(ops);
.deinit = null_deinit,
.alloc_pdata = null_alloc_pdata,
.free_pdata = null_free_pdata,
- .init_pdata = null_init_pdata,
.switch_sched = null_switch_sched,
.deinit_pdata = null_deinit_pdata,
struct sched_unit *, void *);
void (*free_pdata) (const struct scheduler *, void *, int);
void * (*alloc_pdata) (const struct scheduler *, int);
- void (*init_pdata) (const struct scheduler *, void *, int);
void (*deinit_pdata) (const struct scheduler *, void *, int);
/* Returns ERR_PTR(-err) for error, NULL for 'nothing needed'. */
s->free_pdata(s, data, cpu);
}
-static inline void sched_init_pdata(const struct scheduler *s, void *data,
- int cpu)
-{
- if ( s->init_pdata )
- s->init_pdata(s, data, cpu);
-}
-
static inline void sched_deinit_pdata(const struct scheduler *s, void *data,
int cpu)
{
xfree(prv);
}
-/*
- * Point per_cpu spinlock to the global system lock;
- * All cpu have same global system lock
- */
-static void
-rt_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
-{
- struct rt_private *prv = rt_priv(ops);
- spinlock_t *old_lock;
- unsigned long flags;
-
- old_lock = pcpu_schedule_lock_irqsave(cpu, &flags);
-
- /*
- * TIMER_STATUS_invalid means we are the first cpu that sees the timer
- * allocated but not initialized, and so it's up to us to initialize it.
- */
- if ( prv->repl_timer.status == TIMER_STATUS_invalid )
- {
- init_timer(&prv->repl_timer, repl_timer_handler, (void *)ops, cpu);
- dprintk(XENLOG_DEBUG, "RTDS: timer initialized on cpu %u\n", cpu);
- }
-
- /* Move the scheduler lock to our global runqueue lock. */
- get_sched_res(cpu)->schedule_lock = &prv->lock;
-
- /* _Not_ pcpu_schedule_unlock(): per_cpu().schedule_lock changed! */
- spin_unlock_irqrestore(old_lock, flags);
-}
-
/* Change the scheduler of cpu to us (RTDS). */
static spinlock_t *
rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
.dump_settings = rt_dump,
.init = rt_init,
.deinit = rt_deinit,
- .init_pdata = rt_init_pdata,
.switch_sched = rt_switch_sched,
.deinit_pdata = rt_deinit_pdata,
.alloc_domdata = rt_alloc_domdata,