integer_param("sched_ratelimit_us", sched_ratelimit_us);
/* Number of vcpus per struct sched_unit. */
-static unsigned int __read_mostly sched_granularity = 1;
bool __read_mostly sched_disable_smt_switching;
const cpumask_t *sched_res_mask = &cpumask_all;
{
struct sched_unit *unit, **prev_unit;
struct domain *d = v->domain;
+ unsigned int gran = cpupool_get_granularity(d->cpupool);
for_each_sched_unit ( d, unit )
- if ( unit->unit_id / sched_granularity ==
- v->vcpu_id / sched_granularity )
+ if ( unit->unit_id / gran == v->vcpu_id / gran )
break;
if ( unit )
void *unitdata;
struct scheduler *old_ops;
void *old_domdata;
+ unsigned int gran = cpupool_get_granularity(c);
for_each_vcpu ( d, v )
{
if ( IS_ERR(domdata) )
return PTR_ERR(domdata);
- unit_priv = xzalloc_array(void *,
- DIV_ROUND_UP(d->max_vcpus, sched_granularity));
+ unit_priv = xzalloc_array(void *, DIV_ROUND_UP(d->max_vcpus, gran));
if ( unit_priv == NULL )
{
sched_free_domdata(c->sched, domdata);
if ( is_idle_unit(prev) )
{
prev->runstate_cnt[RUNSTATE_running] = 0;
- prev->runstate_cnt[RUNSTATE_runnable] = sched_granularity;
+ prev->runstate_cnt[RUNSTATE_runnable] = sr->granularity;
}
if ( is_idle_unit(next) )
{
- next->runstate_cnt[RUNSTATE_running] = sched_granularity;
+ next->runstate_cnt[RUNSTATE_running] = sr->granularity;
next->runstate_cnt[RUNSTATE_runnable] = 0;
}
}
else
{
vcpu_context_saved(vprev, vnext);
- if ( sched_granularity == 1 )
+ if ( sr->granularity == 1 )
unit_context_saved(sr);
}
{
struct sched_unit *next;
struct vcpu *v;
+ unsigned int gran = get_sched_res(cpu)->granularity;
if ( !--prev->rendezvous_in_cnt )
{
next = do_schedule(prev, now, cpu);
- atomic_set(&next->rendezvous_out_cnt, sched_granularity + 1);
+ atomic_set(&next->rendezvous_out_cnt, gran + 1);
return next;
}
struct sched_resource *sr;
spinlock_t *lock;
int cpu = smp_processor_id();
+ unsigned int gran = get_sched_res(cpu)->granularity;
ASSERT_NOT_IN_ATOMIC();
now = NOW();
- if ( sched_granularity > 1 )
+ if ( gran > 1 )
{
cpumask_t mask;
- prev->rendezvous_in_cnt = sched_granularity;
+ prev->rendezvous_in_cnt = gran;
cpumask_andnot(&mask, sr->cpus, cpumask_of(cpu));
cpumask_raise_softirq(&mask, SCHED_SLAVE_SOFTIRQ);
next = sched_wait_rendezvous_in(prev, &lock, cpu, now);
init_timer(&sr->s_timer, s_timer_fn, NULL, cpu);
atomic_set(&per_cpu(sched_urgent_count, cpu), 0);
+ /* We start with cpu granularity. */
+ sr->granularity = 1;
+
/* Boot CPU is dealt with later in scheduler_init(). */
if ( cpu == 0 )
return 0;
sched_free_udata(old_ops, vpriv_old);
sched_free_pdata(old_ops, ppriv_old, cpu);
+ get_sched_res(cpu)->granularity = cpupool_get_granularity(c);
get_sched_res(cpu)->cpupool = c;
/* When a cpu is added to a pool, trigger it to go pick up some work */
if ( c != NULL )
/* Scheduling resource mask. */
extern const cpumask_t *sched_res_mask;
+/* Number of vcpus per struct sched_unit. */
+enum sched_gran {
+ SCHED_GRAN_cpu,
+ SCHED_GRAN_core,
+ SCHED_GRAN_socket
+};
+
/*
* In order to allow a scheduler to remap the lock->cpu mapping,
* we have a per-cpu pointer, along with a pre-allocated set of
/* Cpu with lowest id in scheduling resource. */
unsigned int master_cpu;
+ unsigned int granularity;
const cpumask_t *cpus; /* cpus covered by this struct */
};
struct cpupool *next;
struct scheduler *sched;
atomic_t refcnt;
+ enum sched_gran gran;
};
#define cpupool_online_cpumask(_pool) \
return d->cpupool->res_valid;
}
+unsigned int cpupool_get_granularity(const struct cpupool *c);
+
/*
* Hard and soft affinity load balancing.
*