cpus_or(cpumask, cpumask, v->cpu_affinity);
for_each_online_node ( node )
- {
if ( cpus_intersects(node_to_cpumask(node), cpumask) )
node_set(node, nodemask);
- else
- node_clear(node, nodemask);
- }
d->node_affinity = nodemask;
spin_unlock(&d->node_affinity_lock);
SCHED_OP(VCPU2OP(v), destroy_vcpu, v);
cpus_setall(v->cpu_affinity);
- domain_update_node_affinity(d);
v->processor = new_p;
v->sched_priv = vcpu_priv[v->vcpu_id];
evtchn_move_pirqs(v);
new_p = cycle_cpu(new_p, c->cpu_valid);
}
+ domain_update_node_affinity(d);
d->cpupool = c;
SCHED_OP(DOM2OP(d), free_domdata, d->sched_priv);
struct vcpu *v;
struct cpupool *c;
int ret = 0;
+ bool_t affinity_broken;
c = per_cpu(cpupool, cpu);
if ( c == NULL )
if ( d->cpupool != c )
continue;
+ affinity_broken = 0;
+
for_each_vcpu ( d, v )
{
vcpu_schedule_lock_irq(v);
printk("Breaking vcpu affinity for domain %d vcpu %d\n",
v->domain->domain_id, v->vcpu_id);
cpus_setall(v->cpu_affinity);
- domain_update_node_affinity(d);
+ affinity_broken = 1;
}
if ( v->processor == cpu )
if ( v->processor == cpu )
ret = -EAGAIN;
}
+
+ if ( affinity_broken )
+ domain_update_node_affinity(d);
}
+
return ret;
}