struct list_head sdom; /* Used mostly for dump keyhandler. */
- int runq_map[NR_CPUS];
cpumask_t active_queues; /* Queues which may have active cpus */
struct csched2_runqueue_data *rqd;
unsigned ratelimit_us; /* each cpupool can have its own ratelimit */
};
+/*
+ * Physical CPU
+ *
+ * The only per-pCPU information we need to maintain is of which runqueue
+ * each CPU is part of.
+ */
+static DEFINE_PER_CPU(int, runq_map);
+
/*
* Virtual CPU
*/
}
/* CPU to runq_id macro */
-static inline int c2r(const struct scheduler *ops, unsigned int cpu)
+static inline int c2r(unsigned int cpu)
{
- return csched2_priv(ops)->runq_map[(cpu)];
+ return per_cpu(runq_map, cpu);
}
/* CPU to runqueue struct macro */
static inline struct csched2_runqueue_data *c2rqd(const struct scheduler *ops,
unsigned int cpu)
{
- return &csched2_priv(ops)->rqd[c2r(ops, cpu)];
+ return &csched2_priv(ops)->rqd[c2r(cpu)];
}
/*
ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
ASSERT(!vcpu_on_runq(svc));
- ASSERT(c2r(ops, cpu) == c2r(ops, svc->vcpu->processor));
+ ASSERT(c2r(cpu) == c2r(svc->vcpu->processor));
ASSERT(&svc->rqd->runq == runq);
ASSERT(!is_idle_vcpu(svc->vcpu));
if ( min_rqi == -1 )
{
new_cpu = get_fallback_cpu(svc);
- min_rqi = c2r(ops, new_cpu);
+ min_rqi = c2r(new_cpu);
min_avgload = prv->rqd[min_rqi].b_avgload;
goto out_up;
}
unsigned tasklet:8, idle:8, smt_idle:8, tickled:8;
} d;
d.cpu = cpu;
- d.rq_id = c2r(ops, cpu);
+ d.rq_id = c2r(cpu);
d.tasklet = tasklet_work_scheduled;
d.idle = is_idle_vcpu(current);
d.smt_idle = cpumask_test_cpu(cpu, &rqd->smt_idle);
#define cpustr keyhandler_scratch
cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
- printk("CPU[%02d] runq=%d, sibling=%s, ", cpu, c2r(ops, cpu), cpustr);
+ printk("CPU[%02d] runq=%d, sibling=%s, ", cpu, c2r(cpu), cpustr);
cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
printk("core=%s\n", cpustr);
}
/* Set the runqueue map */
- prv->runq_map[cpu] = rqi;
+ per_cpu(runq_map, cpu) = rqi;
__cpumask_set_cpu(cpu, &rqd->idle);
__cpumask_set_cpu(cpu, &rqd->active);
ASSERT(!pcpu && cpumask_test_cpu(cpu, &prv->initialized));
/* Find the old runqueue and remove this cpu from it */
- rqi = prv->runq_map[cpu];
+ rqi = per_cpu(runq_map, cpu);
rqd = prv->rqd + rqi;
else if ( rqd->pick_bias == cpu )
rqd->pick_bias = cpumask_first(&rqd->active);
+ per_cpu(runq_map, cpu) = -1;
+
spin_unlock(&rqd->lock);
__cpumask_clear_cpu(cpu, &prv->initialized);
return -ENOMEM;
}
for ( i = 0; i < nr_cpu_ids; i++ )
- {
- prv->runq_map[i] = -1;
prv->rqd[i].id = -1;
- }
+
/* initialize ratelimit */
prv->ratelimit_us = sched_ratelimit_us;