static void
csched_vcpu_acct(struct csched_private *prv, unsigned int cpu)
{
- struct csched_unit * const svc = CSCHED_UNIT(current->sched_unit);
+ struct sched_unit *currunit = current->sched_unit;
+ struct csched_unit * const svc = CSCHED_UNIT(currunit);
const struct scheduler *ops = per_cpu(scheduler, cpu);
ASSERT( current->processor == cpu );
{
unsigned int new_cpu;
unsigned long flags;
- spinlock_t *lock = vcpu_schedule_lock_irqsave(current, &flags);
+ spinlock_t *lock = unit_schedule_lock_irqsave(currunit, &flags);
/*
* If it's been active a while, check if we'd be better off
*/
new_cpu = _csched_cpu_pick(ops, current, 0);
- vcpu_schedule_unlock_irqrestore(lock, flags, current);
+ unit_schedule_unlock_irqrestore(lock, flags, currunit);
if ( new_cpu != cpu )
{
BUG_ON( is_idle_vcpu(vc) );
/* csched_res_pick() looks in vc->processor's runq, so we need the lock. */
- lock = vcpu_schedule_lock_irq(vc);
+ lock = unit_schedule_lock_irq(unit);
unit->res = csched_res_pick(ops, unit);
vc->processor = unit->res->master_cpu;
spin_unlock_irq(lock);
- lock = vcpu_schedule_lock_irq(vc);
+ lock = unit_schedule_lock_irq(unit);
if ( !__vcpu_on_runq(svc) && vcpu_runnable(vc) && !vc->is_running )
runq_insert(svc);
- vcpu_schedule_unlock_irq(lock, vc);
+ unit_schedule_unlock_irq(lock, unit);
SCHED_STAT_CRANK(vcpu_insert);
}
spinlock_t *lock;
svc = list_entry(iter_svc, struct csched_unit, active_vcpu_elem);
- lock = vcpu_schedule_lock(svc->vcpu);
+ lock = unit_schedule_lock(svc->vcpu->sched_unit);
printk("\t%3d: ", ++loop);
csched_dump_vcpu(svc);
- vcpu_schedule_unlock(lock, svc->vcpu);
+ unit_schedule_unlock(lock, svc->vcpu->sched_unit);
}
}
* - runqueue lock
* + it is per-runqueue, so:
* * cpus in a runqueue take the runqueue lock, when using
- * pcpu_schedule_lock() / vcpu_schedule_lock() (and friends),
+ * pcpu_schedule_lock() / unit_schedule_lock() (and friends),
* * a cpu may (try to) take a "remote" runqueue lock, e.g., for
* load balancing;
* + serializes runqueue operations (removing and inserting vcpus);
unsigned long flags;
s_time_t now;
- lock = vcpu_schedule_lock_irqsave(svc->vcpu, &flags);
+ lock = unit_schedule_lock_irqsave(svc->vcpu->sched_unit, &flags);
__clear_bit(_VPF_parked, &svc->vcpu->pause_flags);
if ( unlikely(svc->flags & CSFLAG_scheduled) )
}
list_del_init(&svc->parked_elem);
- vcpu_schedule_unlock_irqrestore(lock, flags, svc->vcpu);
+ unit_schedule_unlock_irqrestore(lock, flags, svc->vcpu->sched_unit);
}
}
{
struct vcpu *vc = unit->vcpu_list;
struct csched2_unit * const svc = csched2_unit(unit);
- spinlock_t *lock = vcpu_schedule_lock_irq(vc);
+ spinlock_t *lock = unit_schedule_lock_irq(unit);
s_time_t now = NOW();
LIST_HEAD(were_parked);
else if ( !is_idle_vcpu(vc) )
update_load(ops, svc->rqd, svc, -1, now);
- vcpu_schedule_unlock_irq(lock, vc);
+ unit_schedule_unlock_irq(lock, unit);
unpark_parked_vcpus(ops, &were_parked);
}
for_each_vcpu ( d, v )
{
struct csched2_unit *svc = csched2_unit(v->sched_unit);
- spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
+ spinlock_t *lock = unit_schedule_lock(svc->vcpu->sched_unit);
ASSERT(svc->rqd == c2rqd(ops, svc->vcpu->processor));
svc->weight = sdom->weight;
update_max_weight(svc->rqd, svc->weight, old_weight);
- vcpu_schedule_unlock(lock, svc->vcpu);
+ unit_schedule_unlock(lock, svc->vcpu->sched_unit);
}
}
/* Cap */
for_each_vcpu ( d, v )
{
svc = csched2_unit(v->sched_unit);
- lock = vcpu_schedule_lock(svc->vcpu);
+ lock = unit_schedule_lock(svc->vcpu->sched_unit);
/*
* Too small quotas would in theory cause a lot of overhead,
* which then won't happen because, in csched2_runtime(),
*/
svc->budget_quota = max(sdom->tot_budget / sdom->nr_vcpus,
CSCHED2_MIN_TIMER);
- vcpu_schedule_unlock(lock, svc->vcpu);
+ unit_schedule_unlock(lock, svc->vcpu->sched_unit);
}
if ( sdom->cap == 0 )
for_each_vcpu ( d, v )
{
svc = csched2_unit(v->sched_unit);
- lock = vcpu_schedule_lock(svc->vcpu);
+ lock = unit_schedule_lock(svc->vcpu->sched_unit);
if ( v->is_running )
{
unsigned int cpu = v->processor;
cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
}
svc->budget = 0;
- vcpu_schedule_unlock(lock, svc->vcpu);
+ unit_schedule_unlock(lock, svc->vcpu->sched_unit);
}
}
for_each_vcpu ( d, v )
{
struct csched2_unit *svc = csched2_unit(v->sched_unit);
- spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
+ spinlock_t *lock = unit_schedule_lock(svc->vcpu->sched_unit);
svc->budget = STIME_MAX;
svc->budget_quota = 0;
- vcpu_schedule_unlock(lock, svc->vcpu);
+ unit_schedule_unlock(lock, svc->vcpu->sched_unit);
}
sdom->cap = 0;
/*
ASSERT(list_empty(&svc->runq_elem));
/* csched2_res_pick() expects the pcpu lock to be held */
- lock = vcpu_schedule_lock_irq(vc);
+ lock = unit_schedule_lock_irq(unit);
unit->res = csched2_res_pick(ops, unit);
vc->processor = unit->res->master_cpu;
spin_unlock_irq(lock);
- lock = vcpu_schedule_lock_irq(vc);
+ lock = unit_schedule_lock_irq(unit);
/* Add vcpu to runqueue of initial processor */
runq_assign(ops, vc);
- vcpu_schedule_unlock_irq(lock, vc);
+ unit_schedule_unlock_irq(lock, unit);
sdom->nr_vcpus++;
SCHED_STAT_CRANK(vcpu_remove);
/* Remove from runqueue */
- lock = vcpu_schedule_lock_irq(vc);
+ lock = unit_schedule_lock_irq(unit);
runq_deassign(ops, vc);
- vcpu_schedule_unlock_irq(lock, vc);
+ unit_schedule_unlock_irq(lock, unit);
svc->sdom->nr_vcpus--;
}
struct csched2_unit * const svc = csched2_unit(v->sched_unit);
spinlock_t *lock;
- lock = vcpu_schedule_lock(svc->vcpu);
+ lock = unit_schedule_lock(svc->vcpu->sched_unit);
printk("\t%3d: ", ++loop);
csched2_dump_vcpu(prv, svc);
- vcpu_schedule_unlock(lock, svc->vcpu);
+ unit_schedule_unlock(lock, svc->vcpu->sched_unit);
}
}
* all the pCPUs are busy.
*
* In fact, there must always be something sane in v->processor, or
- * vcpu_schedule_lock() and friends won't work. This is not a problem,
+ * unit_schedule_lock() and friends won't work. This is not a problem,
* as we will actually assign the vCPU to the pCPU we return from here,
* only if the pCPU is free.
*/
ASSERT(!is_idle_vcpu(v));
- lock = vcpu_schedule_lock_irq(v);
+ lock = unit_schedule_lock_irq(unit);
if ( unlikely(!is_vcpu_online(v)) )
{
- vcpu_schedule_unlock_irq(lock, v);
+ unit_schedule_unlock_irq(lock, unit);
return;
}
spin_unlock(lock);
- lock = vcpu_schedule_lock(v);
+ lock = unit_schedule_lock(unit);
cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
cpupool_domain_cpumask(v->domain));
ASSERT(!is_idle_vcpu(v));
- lock = vcpu_schedule_lock_irq(v);
+ lock = unit_schedule_lock_irq(unit);
/* If offline, the vcpu shouldn't be assigned, nor in the waitqueue */
if ( unlikely(!is_vcpu_online(v)) )
vcpu_deassign(prv, v);
out:
- vcpu_schedule_unlock_irq(lock, v);
+ unit_schedule_unlock_irq(lock, unit);
SCHED_STAT_CRANK(vcpu_remove);
}
struct null_unit * const nvc = null_unit(v->sched_unit);
spinlock_t *lock;
- lock = vcpu_schedule_lock(nvc->vcpu);
+ lock = unit_schedule_lock(nvc->vcpu->sched_unit);
printk("\t%3d: ", ++loop);
dump_vcpu(prv, nvc);
printk("\n");
- vcpu_schedule_unlock(lock, nvc->vcpu);
+ unit_schedule_unlock(lock, nvc->vcpu->sched_unit);
}
}
/*
* System-wide private data, include global RunQueue/DepletedQ
* Global lock is referenced by sched_res->schedule_lock from all
- * physical cpus. It can be grabbed via vcpu_schedule_lock_irq()
+ * physical cpus. It can be grabbed via unit_schedule_lock_irq()
*/
struct rt_private {
spinlock_t lock; /* the global coarse-grained lock */
unit->res = rt_res_pick(ops, unit);
vc->processor = unit->res->master_cpu;
- lock = vcpu_schedule_lock_irq(vc);
+ lock = unit_schedule_lock_irq(unit);
now = NOW();
if ( now >= svc->cur_deadline )
if ( !vc->is_running )
runq_insert(ops, svc);
}
- vcpu_schedule_unlock_irq(lock, vc);
+ unit_schedule_unlock_irq(lock, unit);
SCHED_STAT_CRANK(vcpu_insert);
}
static void
rt_unit_remove(const struct scheduler *ops, struct sched_unit *unit)
{
- struct vcpu *vc = unit->vcpu_list;
struct rt_unit * const svc = rt_unit(unit);
struct rt_dom * const sdom = svc->sdom;
spinlock_t *lock;
BUG_ON( sdom == NULL );
- lock = vcpu_schedule_lock_irq(vc);
+ lock = unit_schedule_lock_irq(unit);
if ( vcpu_on_q(svc) )
q_remove(svc);
if ( vcpu_on_replq(svc) )
replq_remove(ops,svc);
- vcpu_schedule_unlock_irq(lock, vc);
+ unit_schedule_unlock_irq(lock, unit);
}
/*
{
struct vcpu *vc = unit->vcpu_list;
struct rt_unit *svc = rt_unit(unit);
- spinlock_t *lock = vcpu_schedule_lock_irq(vc);
+ spinlock_t *lock = unit_schedule_lock_irq(unit);
__clear_bit(__RTDS_scheduled, &svc->flags);
/* not insert idle vcpu to runq */
replq_remove(ops, svc);
out:
- vcpu_schedule_unlock_irq(lock, vc);
+ unit_schedule_unlock_irq(lock, unit);
}
/*
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
{
- spinlock_t *lock = likely(v == current) ? NULL : vcpu_schedule_lock_irq(v);
+ spinlock_t *lock = likely(v == current)
+ ? NULL : unit_schedule_lock_irq(v->sched_unit);
s_time_t delta;
memcpy(runstate, &v->runstate, sizeof(*runstate));
runstate->time[runstate->state] += delta;
if ( unlikely(lock != NULL) )
- vcpu_schedule_unlock_irq(lock, v);
+ unit_schedule_unlock_irq(lock, v->sched_unit);
}
uint64_t get_cpu_idle_time(unsigned int cpu)
migrate_timer(&v->singleshot_timer, new_p);
migrate_timer(&v->poll_timer, new_p);
- lock = vcpu_schedule_lock_irq(v);
+ lock = unit_schedule_lock_irq(v->sched_unit);
sched_set_affinity(v, &cpumask_all, &cpumask_all);
/*
* With v->processor modified we must not
* - make any further changes assuming we hold the scheduler lock,
- * - use vcpu_schedule_unlock_irq().
+ * - use unit_schedule_unlock_irq().
*/
spin_unlock_irq(lock);
TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
- lock = vcpu_schedule_lock_irqsave(v, &flags);
+ lock = unit_schedule_lock_irqsave(v->sched_unit, &flags);
vcpu_sleep_nosync_locked(v);
- vcpu_schedule_unlock_irqrestore(lock, flags, v);
+ unit_schedule_unlock_irqrestore(lock, flags, v->sched_unit);
}
void vcpu_sleep_sync(struct vcpu *v)
TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
- lock = vcpu_schedule_lock_irqsave(v, &flags);
+ lock = unit_schedule_lock_irqsave(v->sched_unit, &flags);
if ( likely(vcpu_runnable(v)) )
{
vcpu_runstate_change(v, RUNSTATE_offline, NOW());
}
- vcpu_schedule_unlock_irqrestore(lock, flags, v);
+ unit_schedule_unlock_irqrestore(lock, flags, v->sched_unit);
}
void vcpu_unblock(struct vcpu *v)
* These steps are encapsulated in the following two functions; they
* should be called like this:
*
- * lock = vcpu_schedule_lock_irq(v);
+ * lock = unit_schedule_lock_irq(unit);
* vcpu_migrate_start(v);
- * vcpu_schedule_unlock_irq(lock, v)
+ * unit_schedule_unlock_irq(lock, unit)
* vcpu_migrate_finish(v);
*
* vcpu_migrate_finish() will do the work now if it can, or simply
* set v->processor of each of their vCPUs to something that will
* make sense for the scheduler of the cpupool in which they are in.
*/
- lock = vcpu_schedule_lock_irq(v);
+ lock = unit_schedule_lock_irq(v->sched_unit);
cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
cpupool_domain_cpumask(d));
spin_unlock_irq(lock);
/* v->processor might have changed, so reacquire the lock. */
- lock = vcpu_schedule_lock_irq(v);
+ lock = unit_schedule_lock_irq(v->sched_unit);
v->sched_unit->res = sched_pick_resource(vcpu_scheduler(v),
v->sched_unit);
v->processor = v->sched_unit->res->master_cpu;
for_each_vcpu ( d, v )
{
unsigned long flags;
- spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags);
+ spinlock_t *lock = unit_schedule_lock_irqsave(v->sched_unit, &flags);
cpumask_and(&online_affinity, v->cpu_hard_affinity, c->cpu_valid);
if ( cpumask_empty(&online_affinity) &&
if ( v->affinity_broken )
{
/* The vcpu is temporarily pinned, can't move it. */
- vcpu_schedule_unlock_irqrestore(lock, flags, v);
+ unit_schedule_unlock_irqrestore(lock, flags, v->sched_unit);
ret = -EADDRINUSE;
break;
}
if ( v->processor != cpu )
{
/* The vcpu is not on this cpu, so we can move on. */
- vcpu_schedule_unlock_irqrestore(lock, flags, v);
+ unit_schedule_unlock_irqrestore(lock, flags, v->sched_unit);
continue;
}
* things would have failed before getting in here.
*/
vcpu_migrate_start(v);
- vcpu_schedule_unlock_irqrestore(lock, flags, v);
+ unit_schedule_unlock_irqrestore(lock, flags, v->sched_unit);
vcpu_migrate_finish(v);
spinlock_t *lock;
int ret = 0;
- lock = vcpu_schedule_lock_irq(v);
+ lock = unit_schedule_lock_irq(v->sched_unit);
if ( v->affinity_broken )
ret = -EBUSY;
vcpu_migrate_start(v);
}
- vcpu_schedule_unlock_irq(lock, v);
+ unit_schedule_unlock_irq(lock, v->sched_unit);
domain_update_node_affinity(v->domain);
long vcpu_yield(void)
{
struct vcpu * v=current;
- spinlock_t *lock = vcpu_schedule_lock_irq(v);
+ spinlock_t *lock = unit_schedule_lock_irq(v->sched_unit);
sched_yield(vcpu_scheduler(v), v->sched_unit);
- vcpu_schedule_unlock_irq(lock, v);
+ unit_schedule_unlock_irq(lock, v->sched_unit);
SCHED_STAT_CRANK(vcpu_yield);
int ret = -EINVAL;
bool migrate;
- lock = vcpu_schedule_lock_irq(v);
+ lock = unit_schedule_lock_irq(v->sched_unit);
if ( cpu == NR_CPUS )
{
if ( migrate )
vcpu_migrate_start(v);
- vcpu_schedule_unlock_irq(lock, v);
+ unit_schedule_unlock_irq(lock, v->sched_unit);
if ( migrate )
vcpu_migrate_finish(v);
#define EXTRA_TYPE(arg)
sched_lock(pcpu, unsigned int cpu, cpu, )
-sched_lock(vcpu, const struct vcpu *v, v->processor, )
+sched_lock(unit, const struct sched_unit *i, i->res->master_cpu, )
sched_lock(pcpu, unsigned int cpu, cpu, _irq)
-sched_lock(vcpu, const struct vcpu *v, v->processor, _irq)
+sched_lock(unit, const struct sched_unit *i, i->res->master_cpu, _irq)
sched_unlock(pcpu, unsigned int cpu, cpu, )
-sched_unlock(vcpu, const struct vcpu *v, v->processor, )
+sched_unlock(unit, const struct sched_unit *i, i->res->master_cpu, )
sched_unlock(pcpu, unsigned int cpu, cpu, _irq)
-sched_unlock(vcpu, const struct vcpu *v, v->processor, _irq)
+sched_unlock(unit, const struct sched_unit *i, i->res->master_cpu, _irq)
#undef EXTRA_TYPE
#define EXTRA_TYPE(arg) , unsigned long arg
#define spin_unlock_irqsave spin_unlock_irqrestore
sched_lock(pcpu, unsigned int cpu, cpu, _irqsave, *flags)
-sched_lock(vcpu, const struct vcpu *v, v->processor, _irqsave, *flags)
+sched_lock(unit, const struct sched_unit *i, i->res->master_cpu, _irqsave, *flags)
#undef spin_unlock_irqsave
sched_unlock(pcpu, unsigned int cpu, cpu, _irqrestore, flags)
-sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags)
+sched_unlock(unit, const struct sched_unit *i, i->res->master_cpu, _irqrestore, flags)
#undef EXTRA_TYPE
#undef sched_unlock