* by alloc_cpu_rm_data() is modified only in case the cpu in question is
* being moved from or to a cpupool.
*/
-struct cpu_rm_data *alloc_cpu_rm_data(unsigned int cpu)
+struct cpu_rm_data *alloc_cpu_rm_data(unsigned int cpu, bool aff_alloc)
{
struct cpu_rm_data *data;
const struct sched_resource *sr;
if ( !data )
goto out;
+ if ( aff_alloc )
+ {
+ if ( !alloc_affinity_masks(&data->affinity) )
+ {
+ XFREE(data);
+ goto out;
+ }
+ }
+ else
+ memset(&data->affinity, 0, sizeof(data->affinity));
+
data->old_ops = sr->scheduler;
data->vpriv_old = idle_vcpu[cpu]->sched_unit->priv;
data->ppriv_old = sr->sched_priv;
{
while ( idx > 0 )
sched_res_free(&data->sr[--idx]->rcu);
+ free_affinity_masks(&data->affinity);
XFREE(data);
goto out;
}
{
sched_free_udata(mem->old_ops, mem->vpriv_old);
sched_free_pdata(mem->old_ops, mem->ppriv_old, cpu);
+ free_affinity_masks(&mem->affinity);
xfree(mem);
}
* The cpu is already marked as "free" and not valid any longer for its
* cpupool.
*/
-int schedule_cpu_rm(unsigned int cpu)
+int schedule_cpu_rm(unsigned int cpu, struct cpu_rm_data *data)
{
struct sched_resource *sr;
- struct cpu_rm_data *data;
struct sched_unit *unit;
spinlock_t *old_lock;
unsigned long flags;
int idx = 0;
unsigned int cpu_iter;
+ bool free_data = !data;
- data = alloc_cpu_rm_data(cpu);
+ if ( !data )
+ data = alloc_cpu_rm_data(cpu, false);
if ( !data )
return -ENOMEM;
sched_deinit_pdata(data->old_ops, data->ppriv_old, cpu);
rcu_read_unlock(&sched_res_rculock);
- free_cpu_rm_data(data, cpu);
+ if ( free_data )
+ free_cpu_rm_data(data, cpu);
return 0;
}
}
/* Update affinities of all domains in a cpupool. */
-static void cpupool_update_node_affinity(const struct cpupool *c)
+static void cpupool_update_node_affinity(const struct cpupool *c,
+ struct affinity_masks *masks)
{
- struct affinity_masks masks;
+ struct affinity_masks local_masks;
struct domain *d;
- if ( !alloc_affinity_masks(&masks) )
- return;
+ if ( !masks )
+ {
+ if ( !alloc_affinity_masks(&local_masks) )
+ return;
+ masks = &local_masks;
+ }
rcu_read_lock(&domlist_read_lock);
for_each_domain_in_cpupool(d, c)
- domain_update_node_aff(d, &masks);
+ domain_update_node_aff(d, masks);
rcu_read_unlock(&domlist_read_lock);
- free_affinity_masks(&masks);
+ if ( masks == &local_masks )
+ free_affinity_masks(masks);
}
/*
rcu_read_unlock(&sched_res_rculock);
- cpupool_update_node_affinity(c);
+ cpupool_update_node_affinity(c, NULL);
return 0;
}
-static int cpupool_unassign_cpu_finish(struct cpupool *c)
+static int cpupool_unassign_cpu_finish(struct cpupool *c,
+ struct cpu_rm_data *mem)
{
int cpu = cpupool_moving_cpu;
const cpumask_t *cpus;
+ struct affinity_masks *masks = mem ? &mem->affinity : NULL;
int ret;
if ( c != cpupool_cpu_moving )
*/
if ( !ret )
{
- ret = schedule_cpu_rm(cpu);
+ ret = schedule_cpu_rm(cpu, mem);
if ( ret )
cpumask_andnot(&cpupool_free_cpus, &cpupool_free_cpus, cpus);
else
}
rcu_read_unlock(&sched_res_rculock);
- cpupool_update_node_affinity(c);
+ cpupool_update_node_affinity(c, masks);
return ret;
}
cpupool_cpu_moving->cpupool_id, cpupool_moving_cpu);
spin_lock(&cpupool_lock);
- ret = cpupool_unassign_cpu_finish(c);
+ ret = cpupool_unassign_cpu_finish(c, NULL);
spin_unlock(&cpupool_lock);
debugtrace_printk("cpupool_unassign_cpu ret=%ld\n", ret);
* This function is called in stop_machine context, so we can be sure no
* non-idle vcpu is active on the system.
*/
-static void cpupool_cpu_remove(unsigned int cpu)
+static void cpupool_cpu_remove(unsigned int cpu, struct cpu_rm_data *mem)
{
int ret;
if ( !cpumask_test_cpu(cpu, &cpupool_free_cpus) )
{
- ret = cpupool_unassign_cpu_finish(cpupool0);
+ ret = cpupool_unassign_cpu_finish(cpupool0, mem);
BUG_ON(ret);
}
cpumask_clear_cpu(cpu, &cpupool_free_cpus);
{
ret = cpupool_unassign_cpu_start(c, master_cpu);
BUG_ON(ret);
- ret = cpupool_unassign_cpu_finish(c);
+ ret = cpupool_unassign_cpu_finish(c, NULL);
BUG_ON(ret);
}
}
static int cpu_callback(
struct notifier_block *nfb, unsigned long action, void *hcpu)
{
+ static struct cpu_rm_data *mem;
+
unsigned int cpu = (unsigned long)hcpu;
int rc = 0;
switch ( action )
{
case CPU_DOWN_FAILED:
+ if ( system_state <= SYS_STATE_active )
+ {
+ if ( mem )
+ {
+ free_cpu_rm_data(mem, cpu);
+ mem = NULL;
+ }
+ rc = cpupool_cpu_add(cpu);
+ }
+ break;
case CPU_ONLINE:
if ( system_state <= SYS_STATE_active )
rc = cpupool_cpu_add(cpu);
case CPU_DOWN_PREPARE:
/* Suspend/Resume don't change assignments of cpus to cpupools. */
if ( system_state <= SYS_STATE_active )
+ {
rc = cpupool_cpu_remove_prologue(cpu);
+ if ( !rc )
+ {
+ ASSERT(!mem);
+ mem = alloc_cpu_rm_data(cpu, true);
+ rc = mem ? 0 : -ENOMEM;
+ }
+ }
break;
case CPU_DYING:
/* Suspend/Resume don't change assignments of cpus to cpupools. */
if ( system_state <= SYS_STATE_active )
- cpupool_cpu_remove(cpu);
+ {
+ ASSERT(mem);
+ cpupool_cpu_remove(cpu, mem);
+ }
+ break;
+ case CPU_DEAD:
+ if ( system_state <= SYS_STATE_active )
+ {
+ ASSERT(mem);
+ free_cpu_rm_data(mem, cpu);
+ mem = NULL;
+ }
break;
case CPU_RESUME_FAILED:
cpupool_cpu_remove_forced(cpu);
/* Memory allocation related data for schedule_cpu_rm(). */
struct cpu_rm_data {
+ struct affinity_masks affinity;
const struct scheduler *old_ops;
void *ppriv_old;
void *vpriv_old;
void scheduler_free(struct scheduler *sched);
int cpu_disable_scheduler(unsigned int cpu);
int schedule_cpu_add(unsigned int cpu, struct cpupool *c);
-struct cpu_rm_data *alloc_cpu_rm_data(unsigned int cpu);
+struct cpu_rm_data *alloc_cpu_rm_data(unsigned int cpu, bool aff_alloc);
void free_cpu_rm_data(struct cpu_rm_data *mem, unsigned int cpu);
-int schedule_cpu_rm(unsigned int cpu);
+int schedule_cpu_rm(unsigned int cpu, struct cpu_rm_data *mem);
int sched_move_domain(struct domain *d, struct cpupool *c);
struct cpupool *cpupool_get_by_id(unsigned int poolid);
void cpupool_put(struct cpupool *pool);