and of (almost every) direct use of cpupool_online_cpumask().
In fact, what we really want for the most of the times,
is the set of valid pCPUs of the cpupool a certain domain
is part of. Furthermore, in case it's called with a NULL
pool as argument, cpupool_scheduler_cpumask() does more
harm than good, by returning the bitmask of free pCPUs!
This commit, therefore:
* gets rid of cpupool_scheduler_cpumask(), in favour of
cpupool_domain_cpumask(), which makes it more evident
what we are after, and accommodates some sanity checking;
* replaces some of the calls to cpupool_online_cpumask()
with calls to the new functions too.
Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
Acked-by: Juergen Gross <jgross@suse.com>
Acked-by: Joshua Whitehead <josh.whitehead@dornerworks.com>
Reviewed-by: Meng Xu <mengxu@cis.upenn.edu>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
/* Must be called after making new vcpu visible to for_each_vcpu(). */
vcpu_check_shutdown(v);
- domain_update_node_affinity(d);
+ if ( !is_idle_domain(d) )
+ domain_update_node_affinity(d);
return v;
}
return;
}
- online = cpupool_online_cpumask(d->cpupool);
+ online = cpupool_domain_cpumask(d);
spin_lock(&d->node_affinity_lock);
goto maxvcpu_out;
ret = -ENOMEM;
- online = cpupool_online_cpumask(d->cpupool);
+ online = cpupool_domain_cpumask(d);
if ( max > d->max_vcpus )
{
struct vcpu **vcpus;
if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
{
cpumask_var_t new_affinity, old_affinity;
- cpumask_t *online = cpupool_online_cpumask(v->domain->cpupool);;
+ cpumask_t *online = cpupool_domain_cpumask(v->domain);;
/*
* We want to be able to restore hard affinity if we are trying
* If present, prefer vc's current processor, else
* just find the first valid vcpu .
*/
- online = cpupool_scheduler_cpumask(vc->domain->cpupool);
+ online = cpupool_domain_cpumask(vc->domain);
cpu = cpumask_first(online);
static inline int __vcpu_has_soft_affinity(const struct vcpu *vc,
const cpumask_t *mask)
{
- return !cpumask_subset(cpupool_online_cpumask(vc->domain->cpupool),
+ return !cpumask_subset(cpupool_domain_cpumask(vc->domain),
vc->cpu_soft_affinity) &&
!cpumask_subset(vc->cpu_hard_affinity, vc->cpu_soft_affinity) &&
cpumask_intersects(vc->cpu_soft_affinity, mask);
ASSERT(cur);
cpumask_clear(&mask);
- /* cpu is vc->processor, so it must be in a cpupool. */
- ASSERT(per_cpu(cpupool, cpu) != NULL);
- online = cpupool_online_cpumask(per_cpu(cpupool, cpu));
+ online = cpupool_domain_cpumask(new->sdom->dom);
cpumask_and(&idle_mask, prv->idlers, online);
idlers_empty = cpumask_empty(&idle_mask);
int balance_step;
/* Store in cpus the mask of online cpus on which the domain can run */
- online = cpupool_scheduler_cpumask(vc->domain->cpupool);
+ online = cpupool_domain_cpumask(vc->domain);
cpumask_and(&cpus, vc->cpu_hard_affinity, online);
for_each_csched_balance_step( balance_step )
*/
mask = _cpumask_scratch[svc->vcpu->processor];
- cpupool_mask = cpupool_scheduler_cpumask(svc->vcpu->domain->cpupool);
+ cpupool_mask = cpupool_domain_cpumask(svc->vcpu->domain);
cpumask_and(mask, cpupool_mask, svc->vcpu->cpu_hard_affinity);
cpulist_scnprintf(keyhandler_scratch, sizeof(keyhandler_scratch), mask);
printk("[%5d.%-2u] cpu %u, (%"PRI_stime", %"PRI_stime"),"
cpumask_t *online;
int cpu;
- online = cpupool_scheduler_cpumask(vc->domain->cpupool);
+ online = cpupool_domain_cpumask(vc->domain);
cpumask_and(&cpus, online, vc->cpu_hard_affinity);
cpu = cpumask_test_cpu(vc->processor, &cpus)
iter_svc = __q_elem(iter);
/* mask cpu_hard_affinity & cpupool & mask */
- online = cpupool_scheduler_cpumask(iter_svc->vcpu->domain->cpupool);
+ online = cpupool_domain_cpumask(iter_svc->vcpu->domain);
cpumask_and(&cpu_common, online, iter_svc->vcpu->cpu_hard_affinity);
cpumask_and(&cpu_common, mask, &cpu_common);
if ( cpumask_empty(&cpu_common) )
if ( new == NULL || is_idle_vcpu(new->vcpu) )
return;
- online = cpupool_scheduler_cpumask(new->vcpu->domain->cpupool);
+ online = cpupool_domain_cpumask(new->vcpu->domain);
cpumask_and(¬_tickled, online, new->vcpu->cpu_hard_affinity);
cpumask_andnot(¬_tickled, ¬_tickled, &prv->tickled);
ASSERT(!list_empty(&prv->sdom));
sdom = list_entry(prv->sdom.next, struct rt_dom, sdom_elem);
- online = cpupool_scheduler_cpumask(sdom->dom->cpupool);
+ online = cpupool_domain_cpumask(sdom->dom);
snext = __runq_pick(ops, online); /* pick snext from ALL valid cpus */
runq_tickle(ops, snext);
ASSERT(!list_empty(&prv->sdom));
sdom = list_entry(prv->sdom.next, struct rt_dom, sdom_elem);
- online = cpupool_scheduler_cpumask(sdom->dom->cpupool);
+ online = cpupool_domain_cpumask(sdom->dom);
snext = __runq_pick(ops, online); /* pick snext from ALL cpus */
runq_tickle(ops, snext);
#define DOM2OP(_d) (((_d)->cpupool == NULL) ? &ops : ((_d)->cpupool->sched))
#define VCPU2OP(_v) (DOM2OP((_v)->domain))
-#define VCPU2ONLINE(_v) cpupool_online_cpumask((_v)->domain->cpupool)
+#define VCPU2ONLINE(_v) cpupool_domain_cpumask((_v)->domain)
static inline void trace_runstate_change(struct vcpu *v, int new_state)
{
atomic_t refcnt;
};
-#define cpupool_scheduler_cpumask(_pool) \
- (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
#define cpupool_online_cpumask(_pool) \
(((_pool) == NULL) ? &cpu_online_map : (_pool)->cpu_valid)
+static inline cpumask_t* cpupool_domain_cpumask(struct domain *d)
+{
+ /*
+ * d->cpupool is NULL only for the idle domain, and no one should
+ * be interested in calling this for the idle domain.
+ */
+ ASSERT(d->cpupool != NULL);
+ return d->cpupool->cpu_valid;
+}
+
#endif /* __XEN_SCHED_IF_H__ */