static void vcpu_destroy(struct vcpu *v)
{
- free_cpumask_var(v->cpu_hard_affinity);
- free_cpumask_var(v->cpu_hard_affinity_saved);
- free_cpumask_var(v->cpu_soft_affinity);
-
free_vcpu_struct(v);
}
grant_table_init_vcpu(v);
- if ( !zalloc_cpumask_var(&v->cpu_hard_affinity) ||
- !zalloc_cpumask_var(&v->cpu_hard_affinity_saved) ||
- !zalloc_cpumask_var(&v->cpu_soft_affinity) )
- goto fail;
-
if ( is_idle_domain(d) )
{
v->runstate.state = RUNSTATE_running;
sched_destroy_vcpu(v);
fail_wq:
destroy_waitqueue_vcpu(v);
- fail:
vcpu_destroy(v);
return NULL;
*/
for_each_vcpu ( d, v )
{
- cpumask_or(dom_cpumask, dom_cpumask, v->cpu_hard_affinity);
+ cpumask_or(dom_cpumask, dom_cpumask,
+ v->sched_unit->cpu_hard_affinity);
cpumask_or(dom_cpumask_soft, dom_cpumask_soft,
- v->cpu_soft_affinity);
+ v->sched_unit->cpu_soft_affinity);
}
/* Filter out non-online cpus */
cpumask_and(dom_cpumask, dom_cpumask, online);
case XEN_DOMCTL_getvcpuaffinity:
{
struct vcpu *v;
+ const struct sched_unit *unit;
struct xen_domctl_vcpuaffinity *vcpuaff = &op->u.vcpuaffinity;
ret = -EINVAL;
if ( (v = d->vcpu[vcpuaff->vcpu]) == NULL )
break;
+ unit = v->sched_unit;
ret = -EINVAL;
if ( vcpuaffinity_params_invalid(vcpuaff) )
break;
ret = -ENOMEM;
break;
}
- cpumask_copy(old_affinity, v->cpu_hard_affinity);
+ cpumask_copy(old_affinity, unit->cpu_hard_affinity);
if ( !alloc_cpumask_var(&new_affinity) )
{
* For hard affinity, what we return is the intersection of
* cpupool's online mask and the new hard affinity.
*/
- cpumask_and(new_affinity, online, v->cpu_hard_affinity);
+ cpumask_and(new_affinity, online, unit->cpu_hard_affinity);
ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_hard,
new_affinity);
}
* hard affinity.
*/
cpumask_and(new_affinity, new_affinity, online);
- cpumask_and(new_affinity, new_affinity, v->cpu_hard_affinity);
+ cpumask_and(new_affinity, new_affinity,
+ unit->cpu_hard_affinity);
ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_soft,
new_affinity);
}
{
if ( vcpuaff->flags & XEN_VCPUAFFINITY_HARD )
ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_hard,
- v->cpu_hard_affinity);
+ unit->cpu_hard_affinity);
if ( vcpuaff->flags & XEN_VCPUAFFINITY_SOFT )
ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_soft,
- v->cpu_soft_affinity);
+ unit->cpu_soft_affinity);
}
break;
}
static void dump_domains(unsigned char key)
{
struct domain *d;
+ const struct sched_unit *unit;
struct vcpu *v;
s_time_t now = NOW();
printk("VCPU information and callbacks for domain %u:\n",
d->domain_id);
- for_each_vcpu ( d, v )
- {
- if ( !(v->vcpu_id & 0x3f) )
- process_pending_softirqs();
- printk(" VCPU%d: CPU%d [has=%c] poll=%d "
- "upcall_pend=%02x upcall_mask=%02x ",
- v->vcpu_id, v->processor,
- v->is_running ? 'T':'F', v->poll_evtchn,
- vcpu_info(v, evtchn_upcall_pending),
- !vcpu_event_delivery_is_enabled(v));
- if ( vcpu_cpu_dirty(v) )
- printk("dirty_cpu=%u", v->dirty_cpu);
- printk("\n");
- printk(" cpu_hard_affinity={%*pbl} cpu_soft_affinity={%*pbl}\n",
- CPUMASK_PR(v->cpu_hard_affinity),
- CPUMASK_PR(v->cpu_soft_affinity));
- printk(" pause_count=%d pause_flags=%lx\n",
- atomic_read(&v->pause_count), v->pause_flags);
- arch_dump_vcpu_info(v);
-
- if ( v->periodic_period == 0 )
- printk("No periodic timer\n");
- else
- printk("%"PRI_stime" Hz periodic timer (period %"PRI_stime" ms)\n",
- 1000000000 / v->periodic_period,
- v->periodic_period / 1000000);
+ for_each_sched_unit ( d, unit )
+ {
+ printk(" UNIT%d affinities: hard={%*pbl} soft={%*pbl}\n",
+ unit->unit_id, CPUMASK_PR(unit->cpu_hard_affinity),
+ CPUMASK_PR(unit->cpu_soft_affinity));
+
+ for_each_sched_unit_vcpu ( unit, v )
+ {
+ if ( !(v->vcpu_id & 0x3f) )
+ process_pending_softirqs();
+
+ printk(" VCPU%d: CPU%d [has=%c] poll=%d "
+ "upcall_pend=%02x upcall_mask=%02x ",
+ v->vcpu_id, v->processor,
+ v->is_running ? 'T':'F', v->poll_evtchn,
+ vcpu_info(v, evtchn_upcall_pending),
+ !vcpu_event_delivery_is_enabled(v));
+ if ( vcpu_cpu_dirty(v) )
+ printk("dirty_cpu=%u", v->dirty_cpu);
+ printk("\n");
+ printk(" pause_count=%d pause_flags=%lx\n",
+ atomic_read(&v->pause_count), v->pause_flags);
+ arch_dump_vcpu_info(v);
+
+ if ( v->periodic_period == 0 )
+ printk("No periodic timer\n");
+ else
+ printk("%"PRI_stime" Hz periodic timer (period %"PRI_stime" ms)\n",
+ 1000000000 / v->periodic_period,
+ v->periodic_period / 1000000);
+ }
}
}
static inline void __runq_tickle(struct csched_unit *new)
{
unsigned int cpu = new->vcpu->processor;
+ struct sched_unit *unit = new->vcpu->sched_unit;
struct csched_unit * const cur = CSCHED_UNIT(curr_on_cpu(cpu));
struct csched_private *prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
cpumask_t mask, idle_mask, *online;
if ( unlikely(test_bit(CSCHED_FLAG_VCPU_PINNED, &new->flags) &&
cpumask_test_cpu(cpu, &idle_mask)) )
{
- ASSERT(cpumask_cycle(cpu, new->vcpu->cpu_hard_affinity) == cpu);
+ ASSERT(cpumask_cycle(cpu, unit->cpu_hard_affinity) == cpu);
SCHED_STAT_CRANK(tickled_idle_cpu_excl);
__cpumask_set_cpu(cpu, &mask);
goto tickle;
int new_idlers_empty;
if ( balance_step == BALANCE_SOFT_AFFINITY
- && !has_soft_affinity(new->vcpu) )
+ && !has_soft_affinity(unit) )
continue;
/* Are there idlers suitable for new (for this balance step)? */
- affinity_balance_cpumask(new->vcpu, balance_step,
+ affinity_balance_cpumask(unit, balance_step,
cpumask_scratch_cpu(cpu));
cpumask_and(cpumask_scratch_cpu(cpu),
cpumask_scratch_cpu(cpu), &idle_mask);
*/
if ( new_idlers_empty && new->pri > cur->pri )
{
- if ( cpumask_intersects(cur->vcpu->cpu_hard_affinity,
- &idle_mask) )
+ if ( cpumask_intersects(unit->cpu_hard_affinity, &idle_mask) )
{
SCHED_VCPU_STAT_CRANK(cur, kicked_away);
SCHED_VCPU_STAT_CRANK(cur, migrate_r);
for_each_affinity_balance_step( balance_step )
{
- affinity_balance_cpumask(vc, balance_step, cpus);
+ affinity_balance_cpumask(vc->sched_unit, balance_step, cpus);
cpumask_and(cpus, online, cpus);
/*
* We want to pick up a pcpu among the ones that are online and
* balancing step all together.
*/
if ( balance_step == BALANCE_SOFT_AFFINITY &&
- (!has_soft_affinity(vc) || cpumask_empty(cpus)) )
+ (!has_soft_affinity(vc->sched_unit) || cpumask_empty(cpus)) )
continue;
/* If present, prefer vc's current processor */
* or counter.
*/
if ( vc->is_running || (balance_step == BALANCE_SOFT_AFFINITY &&
- !has_soft_affinity(vc)) )
+ !has_soft_affinity(vc->sched_unit)) )
continue;
- affinity_balance_cpumask(vc, balance_step, cpumask_scratch);
+ affinity_balance_cpumask(vc->sched_unit, balance_step, cpumask_scratch);
if ( __csched_vcpu_is_migrateable(prv, vc, cpu, cpumask_scratch) )
{
/* We got a candidate. Grab it! */
{
int cpu = v->processor;
- if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(v) )
+ if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(v->sched_unit) )
continue;
- affinity_balance_cpumask(v, bs, cpumask_scratch_cpu(cpu));
+ affinity_balance_cpumask(v->sched_unit, bs, cpumask_scratch_cpu(cpu));
cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
cpupool_domain_cpumask(v->domain));
*/
if ( score > 0 )
{
- if ( cpumask_test_cpu(cpu, new->vcpu->cpu_soft_affinity) )
+ if ( cpumask_test_cpu(cpu, new->vcpu->sched_unit->cpu_soft_affinity) )
score += CSCHED2_CREDIT_INIT;
- if ( !cpumask_test_cpu(cpu, cur->vcpu->cpu_soft_affinity) )
+ if ( !cpumask_test_cpu(cpu, cur->vcpu->sched_unit->cpu_soft_affinity) )
score += CSCHED2_CREDIT_INIT;
}
{
int i, ipid = -1;
s_time_t max = 0;
+ struct sched_unit *unit = new->vcpu->sched_unit;
unsigned int bs, cpu = new->vcpu->processor;
struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
cpumask_t *online = cpupool_domain_cpumask(new->vcpu->domain);
cpumask_test_cpu(cpu, &rqd->idle) &&
!cpumask_test_cpu(cpu, &rqd->tickled)) )
{
- ASSERT(cpumask_cycle(cpu, new->vcpu->cpu_hard_affinity) == cpu);
+ ASSERT(cpumask_cycle(cpu, unit->cpu_hard_affinity) == cpu);
SCHED_STAT_CRANK(tickled_idle_cpu_excl);
ipid = cpu;
goto tickle;
for_each_affinity_balance_step( bs )
{
/* Just skip first step, if we don't have a soft affinity */
- if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(new->vcpu) )
+ if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(unit) )
continue;
- affinity_balance_cpumask(new->vcpu, bs, cpumask_scratch_cpu(cpu));
+ affinity_balance_cpumask(unit, bs, cpumask_scratch_cpu(cpu));
/*
* First of all, consider idle cpus, checking if we can just
ipid = cpu;
/* If this is in new's soft affinity, just take it */
- if ( cpumask_test_cpu(cpu, new->vcpu->cpu_soft_affinity) )
+ if ( cpumask_test_cpu(cpu, unit->cpu_soft_affinity) )
{
SCHED_STAT_CRANK(tickled_busy_cpu);
goto tickle;
goto out;
}
- cpumask_and(cpumask_scratch_cpu(cpu), vc->cpu_hard_affinity,
+ cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
cpupool_domain_cpumask(vc->domain));
/*
*
* Find both runqueues in one pass.
*/
- has_soft = has_soft_affinity(vc);
+ has_soft = has_soft_affinity(unit);
for_each_cpu(i, &prv->active_queues)
{
struct csched2_runqueue_data *rqd;
cpumask_t mask;
cpumask_and(&mask, cpumask_scratch_cpu(cpu), &rqd->active);
- if ( cpumask_intersects(&mask, svc->vcpu->cpu_soft_affinity) )
+ if ( cpumask_intersects(&mask, unit->cpu_soft_affinity) )
{
min_s_avgload = rqd_avgload;
min_s_rqi = i;
* Note that, to obtain the soft-affinity mask, we "just" put what we
* have in cpumask_scratch in && with vc->cpu_soft_affinity. This is
* ok because:
- * - we know that vc->cpu_hard_affinity and vc->cpu_soft_affinity have
+ * - we know that unit->cpu_hard_affinity and ->cpu_soft_affinity have
* a non-empty intersection (because has_soft is true);
- * - we have vc->cpu_hard_affinity & cpupool_domain_cpumask() already
+ * - we have unit->cpu_hard_affinity & cpupool_domain_cpumask() already
* in cpumask_scratch, we do save a lot doing like this.
*
* It's kind of like open coding affinity_balance_cpumask() but, in
* cpumask operations.
*/
cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
- vc->cpu_soft_affinity);
+ unit->cpu_soft_affinity);
cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
&prv->rqd[min_s_rqi].active);
}
s_time_t now)
{
int cpu = svc->vcpu->processor;
+ struct sched_unit *unit = svc->vcpu->sched_unit;
if ( unlikely(tb_init_done) )
{
}
_runq_deassign(svc);
- cpumask_and(cpumask_scratch_cpu(cpu), svc->vcpu->cpu_hard_affinity,
+ cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
cpupool_domain_cpumask(svc->vcpu->domain));
cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
&trqd->active);
struct vcpu *v = svc->vcpu;
int cpu = svc->vcpu->processor;
- cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+ cpumask_and(cpumask_scratch_cpu(cpu), v->sched_unit->cpu_hard_affinity,
cpupool_domain_cpumask(v->domain));
return !(svc->flags & CSFLAG_runq_migrate_request) &&
/* If here, new_cpu must be a valid Credit2 pCPU, and in our affinity. */
ASSERT(cpumask_test_cpu(new_cpu, &csched2_priv(ops)->initialized));
- ASSERT(cpumask_test_cpu(new_cpu, vc->cpu_hard_affinity));
+ ASSERT(cpumask_test_cpu(new_cpu, unit->cpu_hard_affinity));
trqd = c2rqd(ops, new_cpu);
}
/* If scurr has a soft-affinity, let's check whether cpu is part of it */
- if ( has_soft_affinity(scurr->vcpu) )
+ if ( has_soft_affinity(scurr->vcpu->sched_unit) )
{
- affinity_balance_cpumask(scurr->vcpu, BALANCE_SOFT_AFFINITY,
+ affinity_balance_cpumask(scurr->vcpu->sched_unit, BALANCE_SOFT_AFFINITY,
cpumask_scratch);
if ( unlikely(!cpumask_test_cpu(cpu, cpumask_scratch)) )
{
}
/* Only consider vcpus that are allowed to run on this processor. */
- if ( !cpumask_test_cpu(cpu, svc->vcpu->cpu_hard_affinity) )
+ if ( !cpumask_test_cpu(cpu, svc->vcpu->sched_unit->cpu_hard_affinity) )
{
(*skipped)++;
continue;
static inline bool vcpu_check_affinity(struct vcpu *v, unsigned int cpu,
unsigned int balance_step)
{
- affinity_balance_cpumask(v, balance_step, cpumask_scratch_cpu(cpu));
+ affinity_balance_cpumask(v->sched_unit, balance_step,
+ cpumask_scratch_cpu(cpu));
cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
cpupool_domain_cpumask(v->domain));
for_each_affinity_balance_step( bs )
{
- if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(v) )
+ if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(unit) )
continue;
- affinity_balance_cpumask(v, bs, cpumask_scratch_cpu(cpu));
+ affinity_balance_cpumask(unit, bs, cpumask_scratch_cpu(cpu));
cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu), cpus);
/*
* as we will actually assign the vCPU to the pCPU we return from here,
* only if the pCPU is free.
*/
- cpumask_and(cpumask_scratch_cpu(cpu), cpus, v->cpu_hard_affinity);
+ cpumask_and(cpumask_scratch_cpu(cpu), cpus, unit->cpu_hard_affinity);
new_cpu = cpumask_any(cpumask_scratch_cpu(cpu));
out:
{
list_for_each_entry( wvc, &prv->waitq, waitq_elem )
{
- if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(wvc->vcpu) )
+ if ( bs == BALANCE_SOFT_AFFINITY &&
+ !has_soft_affinity(wvc->vcpu->sched_unit) )
continue;
if ( vcpu_check_affinity(wvc->vcpu, cpu, bs) )
lock = unit_schedule_lock(unit);
- cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+ cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
cpupool_domain_cpumask(v->domain));
/* If the pCPU is free, we assign v to it */
list_add_tail(&nvc->waitq_elem, &prv->waitq);
spin_unlock(&prv->waitq_lock);
- cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+ cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
cpupool_domain_cpumask(v->domain));
if ( !cpumask_intersects(&prv->cpus_free, cpumask_scratch_cpu(cpu)) )
list_for_each_entry( wvc, &prv->waitq, waitq_elem )
{
if ( bs == BALANCE_SOFT_AFFINITY &&
- !has_soft_affinity(wvc->vcpu) )
+ !has_soft_affinity(wvc->vcpu->sched_unit) )
continue;
if ( vcpu_check_affinity(wvc->vcpu, cpu, bs) )
mask = cpumask_scratch_cpu(svc->vcpu->processor);
cpupool_mask = cpupool_domain_cpumask(svc->vcpu->domain);
- cpumask_and(mask, cpupool_mask, svc->vcpu->cpu_hard_affinity);
+ cpumask_and(mask, cpupool_mask, svc->vcpu->sched_unit->cpu_hard_affinity);
printk("[%5d.%-2u] cpu %u, (%"PRI_stime", %"PRI_stime"),"
" cur_b=%"PRI_stime" cur_d=%"PRI_stime" last_start=%"PRI_stime"\n"
" \t\t priority_level=%d has_extratime=%d\n"
int cpu;
online = cpupool_domain_cpumask(vc->domain);
- cpumask_and(&cpus, online, vc->cpu_hard_affinity);
+ cpumask_and(&cpus, online, unit->cpu_hard_affinity);
cpu = cpumask_test_cpu(vc->processor, &cpus)
? vc->processor
/* mask cpu_hard_affinity & cpupool & mask */
online = cpupool_domain_cpumask(iter_svc->vcpu->domain);
- cpumask_and(&cpu_common, online, iter_svc->vcpu->cpu_hard_affinity);
+ cpumask_and(&cpu_common, online,
+ iter_svc->vcpu->sched_unit->cpu_hard_affinity);
cpumask_and(&cpu_common, mask, &cpu_common);
if ( cpumask_empty(&cpu_common) )
continue;
return;
online = cpupool_domain_cpumask(new->vcpu->domain);
- cpumask_and(¬_tickled, online, new->vcpu->cpu_hard_affinity);
+ cpumask_and(¬_tickled, online, new->vcpu->sched_unit->cpu_hard_affinity);
cpumask_andnot(¬_tickled, ¬_tickled, &prv->tickled);
/*
}
unit->vcpu_list->sched_unit = NULL;
+
+ free_cpumask_var(unit->cpu_hard_affinity);
+ free_cpumask_var(unit->cpu_hard_affinity_saved);
+ free_cpumask_var(unit->cpu_soft_affinity);
+
xfree(unit);
}
unit->next_in_list = *prev_unit;
*prev_unit = unit;
+ if ( !zalloc_cpumask_var(&unit->cpu_hard_affinity) ||
+ !zalloc_cpumask_var(&unit->cpu_hard_affinity_saved) ||
+ !zalloc_cpumask_var(&unit->cpu_soft_affinity) )
+ goto fail;
+
v->sched_unit = unit;
return unit;
+
+ fail:
+ sched_free_unit(unit);
+ return NULL;
}
int sched_init_vcpu(struct vcpu *v, unsigned int processor)
*/
if ( pick_called &&
(new_lock == get_sched_res(new_cpu)->schedule_lock) &&
- cpumask_test_cpu(new_cpu, v->cpu_hard_affinity) &&
+ cpumask_test_cpu(new_cpu, v->sched_unit->cpu_hard_affinity) &&
cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
break;
{
spinlock_t *lock;
unsigned int old_cpu = v->processor;
+ struct sched_unit *unit = v->sched_unit;
ASSERT(!vcpu_runnable(v));
* set v->processor of each of their vCPUs to something that will
* make sense for the scheduler of the cpupool in which they are in.
*/
- lock = unit_schedule_lock_irq(v->sched_unit);
+ lock = unit_schedule_lock_irq(unit);
- cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+ cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
cpupool_domain_cpumask(d));
if ( cpumask_empty(cpumask_scratch_cpu(cpu)) )
{
if ( v->affinity_broken )
{
- sched_set_affinity(v, v->cpu_hard_affinity_saved, NULL);
+ sched_set_affinity(v, unit->cpu_hard_affinity_saved, NULL);
v->affinity_broken = 0;
- cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+ cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
cpupool_domain_cpumask(d));
}
{
printk(XENLOG_DEBUG "Breaking affinity for %pv\n", v);
sched_set_affinity(v, &cpumask_all, NULL);
- cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+ cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
cpupool_domain_cpumask(d));
}
}
v->processor = cpumask_any(cpumask_scratch_cpu(cpu));
- v->sched_unit->res = get_sched_res(v->processor);
+ unit->res = get_sched_res(v->processor);
spin_unlock_irq(lock);
/* v->processor might have changed, so reacquire the lock. */
- lock = unit_schedule_lock_irq(v->sched_unit);
- v->sched_unit->res = sched_pick_resource(vcpu_scheduler(v),
- v->sched_unit);
- v->processor = v->sched_unit->res->master_cpu;
+ lock = unit_schedule_lock_irq(unit);
+ unit->res = sched_pick_resource(vcpu_scheduler(v), unit);
+ v->processor = unit->res->master_cpu;
spin_unlock_irq(lock);
if ( old_cpu != v->processor )
for_each_vcpu ( d, v )
{
unsigned long flags;
- spinlock_t *lock = unit_schedule_lock_irqsave(v->sched_unit, &flags);
+ struct sched_unit *unit = v->sched_unit;
+ spinlock_t *lock = unit_schedule_lock_irqsave(unit, &flags);
- cpumask_and(&online_affinity, v->cpu_hard_affinity, c->cpu_valid);
+ cpumask_and(&online_affinity, unit->cpu_hard_affinity, c->cpu_valid);
if ( cpumask_empty(&online_affinity) &&
- cpumask_test_cpu(cpu, v->cpu_hard_affinity) )
+ cpumask_test_cpu(cpu, unit->cpu_hard_affinity) )
{
if ( v->affinity_broken )
{
/* The vcpu is temporarily pinned, can't move it. */
- unit_schedule_unlock_irqrestore(lock, flags, v->sched_unit);
+ unit_schedule_unlock_irqrestore(lock, flags, unit);
ret = -EADDRINUSE;
break;
}
if ( v->processor != cpu )
{
/* The vcpu is not on this cpu, so we can move on. */
- unit_schedule_unlock_irqrestore(lock, flags, v->sched_unit);
+ unit_schedule_unlock_irqrestore(lock, flags, unit);
continue;
}
* things would have failed before getting in here.
*/
vcpu_migrate_start(v);
- unit_schedule_unlock_irqrestore(lock, flags, v->sched_unit);
+ unit_schedule_unlock_irqrestore(lock, flags, unit);
vcpu_migrate_finish(v);
void sched_set_affinity(
struct vcpu *v, const cpumask_t *hard, const cpumask_t *soft)
{
- sched_adjust_affinity(dom_scheduler(v->domain), v->sched_unit, hard, soft);
+ struct sched_unit *unit = v->sched_unit;
+
+ sched_adjust_affinity(dom_scheduler(v->domain), unit, hard, soft);
if ( hard )
- cpumask_copy(v->cpu_hard_affinity, hard);
+ cpumask_copy(unit->cpu_hard_affinity, hard);
if ( soft )
- cpumask_copy(v->cpu_soft_affinity, soft);
+ cpumask_copy(unit->cpu_soft_affinity, soft);
- v->soft_aff_effective = !cpumask_subset(v->cpu_hard_affinity,
- v->cpu_soft_affinity) &&
- cpumask_intersects(v->cpu_soft_affinity,
- v->cpu_hard_affinity);
+ unit->soft_aff_effective = !cpumask_subset(unit->cpu_hard_affinity,
+ unit->cpu_soft_affinity) &&
+ cpumask_intersects(unit->cpu_soft_affinity,
+ unit->cpu_hard_affinity);
}
static int vcpu_set_affinity(
struct vcpu *v, const cpumask_t *affinity, const cpumask_t *which)
{
+ struct sched_unit *unit = v->sched_unit;
spinlock_t *lock;
int ret = 0;
- lock = unit_schedule_lock_irq(v->sched_unit);
+ lock = unit_schedule_lock_irq(unit);
if ( v->affinity_broken )
ret = -EBUSY;
* Tell the scheduler we changes something about affinity,
* and ask to re-evaluate vcpu placement.
*/
- if ( which == v->cpu_hard_affinity )
+ if ( which == unit->cpu_hard_affinity )
{
sched_set_affinity(v, affinity, NULL);
}
else
{
- ASSERT(which == v->cpu_soft_affinity);
+ ASSERT(which == unit->cpu_soft_affinity);
sched_set_affinity(v, NULL, affinity);
}
vcpu_migrate_start(v);
}
- unit_schedule_unlock_irq(lock, v->sched_unit);
+ unit_schedule_unlock_irq(lock, unit);
domain_update_node_affinity(v->domain);
if ( cpumask_empty(&online_affinity) )
return -EINVAL;
- return vcpu_set_affinity(v, affinity, v->cpu_hard_affinity);
+ return vcpu_set_affinity(v, affinity, v->sched_unit->cpu_hard_affinity);
}
int vcpu_set_soft_affinity(struct vcpu *v, const cpumask_t *affinity)
{
- return vcpu_set_affinity(v, affinity, v->cpu_soft_affinity);
+ return vcpu_set_affinity(v, affinity, v->sched_unit->cpu_soft_affinity);
}
/* Block the currently-executing domain until a pertinent event occurs. */
*/
int vcpu_temporary_affinity(struct vcpu *v, unsigned int cpu, uint8_t reason)
{
+ struct sched_unit *unit = v->sched_unit;
spinlock_t *lock;
int ret = -EINVAL;
bool migrate;
- lock = unit_schedule_lock_irq(v->sched_unit);
+ lock = unit_schedule_lock_irq(unit);
if ( cpu == NR_CPUS )
{
v->affinity_broken &= ~reason;
}
if ( !ret && !v->affinity_broken )
- sched_set_affinity(v, v->cpu_hard_affinity_saved, NULL);
+ sched_set_affinity(v, unit->cpu_hard_affinity_saved, NULL);
}
else if ( cpu < nr_cpu_ids )
{
{
if ( !v->affinity_broken )
{
- cpumask_copy(v->cpu_hard_affinity_saved, v->cpu_hard_affinity);
+ cpumask_copy(unit->cpu_hard_affinity_saved,
+ unit->cpu_hard_affinity);
sched_set_affinity(v, cpumask_of(cpu), NULL);
}
v->affinity_broken |= reason;
}
}
- migrate = !ret && !cpumask_test_cpu(v->processor, v->cpu_hard_affinity);
+ migrate = !ret && !cpumask_test_cpu(v->processor, unit->cpu_hard_affinity);
if ( migrate )
vcpu_migrate_start(v);
- unit_schedule_unlock_irq(lock, v->sched_unit);
+ unit_schedule_unlock_irq(lock, unit);
if ( migrate )
vcpu_migrate_finish(v);
* * The hard affinity is not a subset of soft affinity
* * There is an overlap between the soft and hard affinity masks
*/
-static inline int has_soft_affinity(const struct vcpu *v)
+static inline int has_soft_affinity(const struct sched_unit *unit)
{
- return v->soft_aff_effective &&
- !cpumask_subset(cpupool_domain_cpumask(v->domain),
- v->cpu_soft_affinity);
+ return unit->soft_aff_effective &&
+ !cpumask_subset(cpupool_domain_cpumask(unit->domain),
+ unit->cpu_soft_affinity);
}
/*
* to avoid running a vcpu where it would like, but is not allowed to!
*/
static inline void
-affinity_balance_cpumask(const struct vcpu *v, int step, cpumask_t *mask)
+affinity_balance_cpumask(const struct sched_unit *unit, int step,
+ cpumask_t *mask)
{
if ( step == BALANCE_SOFT_AFFINITY )
{
- cpumask_and(mask, v->cpu_soft_affinity, v->cpu_hard_affinity);
+ cpumask_and(mask, unit->cpu_soft_affinity, unit->cpu_hard_affinity);
if ( unlikely(cpumask_empty(mask)) )
- cpumask_copy(mask, v->cpu_hard_affinity);
+ cpumask_copy(mask, unit->cpu_hard_affinity);
}
else /* step == BALANCE_HARD_AFFINITY */
- cpumask_copy(mask, v->cpu_hard_affinity);
+ cpumask_copy(mask, unit->cpu_hard_affinity);
}
void sched_rm_cpu(unsigned int cpu);
bool hcall_compat;
#endif
- /* Does soft affinity actually play a role (given hard affinity)? */
- bool soft_aff_effective;
-
/* The CPU, if any, which is holding onto this VCPU's state. */
#define VCPU_CPU_CLEAN (~0u)
unsigned int dirty_cpu;
evtchn_port_t virq_to_evtchn[NR_VIRQS];
spinlock_t virq_lock;
- /* Bitmask of CPUs on which this VCPU may run. */
- cpumask_var_t cpu_hard_affinity;
- /* Used to save affinity during temporary pinning. */
- cpumask_var_t cpu_hard_affinity_saved;
-
- /* Bitmask of CPUs on which this VCPU prefers to run. */
- cpumask_var_t cpu_soft_affinity;
-
/* Tasklet for continue_hypercall_on_cpu(). */
struct tasklet continue_hypercall_tasklet;
struct sched_unit *next_in_list;
struct sched_resource *res;
unsigned int unit_id;
+
+ /* Does soft affinity actually play a role (given hard affinity)? */
+ bool soft_aff_effective;
+ /* Bitmask of CPUs on which this VCPU may run. */
+ cpumask_var_t cpu_hard_affinity;
+ /* Used to save affinity during temporary pinning. */
+ cpumask_var_t cpu_hard_affinity_saved;
+ /* Bitmask of CPUs on which this VCPU prefers to run. */
+ cpumask_var_t cpu_soft_affinity;
};
#define for_each_sched_unit(d, u) \
static inline bool is_hwdom_pinned_vcpu(const struct vcpu *v)
{
return (is_hardware_domain(v->domain) &&
- cpumask_weight(v->cpu_hard_affinity) == 1);
+ cpumask_weight(v->sched_unit->cpu_hard_affinity) == 1);
}
static inline bool is_vcpu_online(const struct vcpu *v)