Make use of the const qualifier more often in scheduling code.
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Dario Faggioli <dfaggioli@suse.com>
Acked-by: Meng Xu <mengxu@cis.upenn.edu>
a653sched_pick_resource(const struct scheduler *ops,
const struct sched_unit *unit)
{
- cpumask_t *online;
+ const cpumask_t *online;
unsigned int cpu;
/*
void *pdata, void *vdata)
{
struct sched_resource *sr = get_sched_res(cpu);
- arinc653_unit_t *svc = vdata;
+ const arinc653_unit_t *svc = vdata;
ASSERT(!pdata && svc && is_idle_unit(svc->unit));
static inline struct scheduler *unit_scheduler(const struct sched_unit *unit)
{
- struct domain *d = unit->domain;
+ const struct domain *d = unit->domain;
if ( likely(d->cpupool != NULL) )
return d->cpupool->sched;
}
#define VCPU2ONLINE(_v) cpupool_domain_master_cpumask((_v)->domain)
-static inline void trace_runstate_change(struct vcpu *v, int new_state)
+static inline void trace_runstate_change(const struct vcpu *v, int new_state)
{
struct { uint32_t vcpu:16, domain:16; } d;
uint32_t event;
__trace_var(event, 1/*tsc*/, sizeof(d), &d);
}
-static inline void trace_continue_running(struct vcpu *v)
+static inline void trace_continue_running(const struct vcpu *v)
{
struct { uint32_t vcpu:16, domain:16; } d;
atomic_dec(&per_cpu(sched_urgent_count, cpu));
}
-void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
+void vcpu_runstate_get(const struct vcpu *v,
+ struct vcpu_runstate_info *runstate)
{
spinlock_t *lock;
s_time_t delta;
uint64_t get_cpu_idle_time(unsigned int cpu)
{
struct vcpu_runstate_info state = { 0 };
- struct vcpu *v = idle_vcpu[cpu];
+ const struct vcpu *v = idle_vcpu[cpu];
if ( cpu_online(cpu) && v )
vcpu_runstate_get(v, &state);
static void sched_free_unit(struct sched_unit *unit, struct vcpu *v)
{
- struct vcpu *vunit;
+ const struct vcpu *vunit;
unsigned int cnt = 0;
/* Don't count to be released vcpu, might be not in vcpu list yet. */
int sched_init_vcpu(struct vcpu *v)
{
- struct domain *d = v->domain;
+ const struct domain *d = v->domain;
struct sched_unit *unit;
unsigned int processor;
unsigned int new_cpu)
{
unsigned int old_cpu = unit->res->master_cpu;
- struct vcpu *v;
+ const struct vcpu *v;
rcu_read_lock(&sched_res_rculock);
return false;
}
-static void sched_reset_affinity_broken(struct sched_unit *unit)
+static void sched_reset_affinity_broken(const struct sched_unit *unit)
{
struct vcpu *v;
int cpu_disable_scheduler(unsigned int cpu)
{
struct domain *d;
- struct cpupool *c;
+ const struct cpupool *c;
cpumask_t online_affinity;
int ret = 0;
static int cpu_disable_scheduler_check(unsigned int cpu)
{
struct domain *d;
- struct vcpu *v;
- struct cpupool *c;
+ const struct vcpu *v;
+ const struct cpupool *c;
c = get_sched_res(cpu)->cpupool;
if ( c == NULL )
static void csched_acct(void *dummy);
static inline int
-__unit_on_runq(struct csched_unit *svc)
+__unit_on_runq(const struct csched_unit *svc)
{
return !list_empty(&svc->runq_elem);
}
DEFINE_PER_CPU(unsigned int, last_tickle_cpu);
-static inline void __runq_tickle(struct csched_unit *new)
+static inline void __runq_tickle(const struct csched_unit *new)
{
unsigned int cpu = sched_unit_master(new->unit);
- struct sched_resource *sr = get_sched_res(cpu);
- struct sched_unit *unit = new->unit;
+ const struct sched_resource *sr = get_sched_res(cpu);
+ const struct sched_unit *unit = new->unit;
struct csched_unit * const cur = CSCHED_UNIT(curr_on_cpu(cpu));
struct csched_private *prv = CSCHED_PRIV(sr->scheduler);
cpumask_t mask, idle_mask, *online;
static void
csched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
{
- struct csched_private *prv = CSCHED_PRIV(ops);
+ const struct csched_private *prv = CSCHED_PRIV(ops);
/*
* pcpu either points to a valid struct csched_pcpu, or is NULL, if we're
#ifndef NDEBUG
static inline void
-__csched_unit_check(struct sched_unit *unit)
+__csched_unit_check(const struct sched_unit *unit)
{
struct csched_unit * const svc = CSCHED_UNIT(unit);
struct csched_dom * const sdom = svc->sdom;
static inline int
__csched_unit_is_migrateable(const struct csched_private *prv,
- struct sched_unit *unit,
- int dest_cpu, cpumask_t *mask)
+ const struct sched_unit *unit,
+ int dest_cpu, const cpumask_t *mask)
{
const struct csched_unit *svc = CSCHED_UNIT(unit);
/*
/* We must always use cpu's scratch space */
cpumask_t *cpus = cpumask_scratch_cpu(cpu);
cpumask_t idlers;
- cpumask_t *online = cpupool_domain_master_cpumask(unit->domain);
+ const cpumask_t *online = cpupool_domain_master_cpumask(unit->domain);
struct csched_pcpu *spc = NULL;
int balance_step;
{
struct sched_unit *currunit = current->sched_unit;
struct csched_unit * const svc = CSCHED_UNIT(currunit);
- struct sched_resource *sr = get_sched_res(cpu);
+ const struct sched_resource *sr = get_sched_res(cpu);
const struct scheduler *ops = sr->scheduler;
ASSERT( sched_unit_master(currunit) == cpu );
{
struct csched_unit * const svc = CSCHED_UNIT(unit);
unsigned int cpu = sched_unit_master(unit);
- struct sched_resource *sr = get_sched_res(cpu);
+ const struct sched_resource *sr = get_sched_res(cpu);
SCHED_STAT_CRANK(unit_sleep);
csched_tick(void *_cpu)
{
unsigned int cpu = (unsigned long)_cpu;
- struct sched_resource *sr = get_sched_res(cpu);
+ const struct sched_resource *sr = get_sched_res(cpu);
struct csched_pcpu *spc = CSCHED_PCPU(cpu);
struct csched_private *prv = CSCHED_PRIV(sr->scheduler);
static struct csched_unit *
csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
{
- struct sched_resource *sr = get_sched_res(cpu);
+ const struct sched_resource *sr = get_sched_res(cpu);
const struct csched_private * const prv = CSCHED_PRIV(sr->scheduler);
const struct csched_pcpu * const peer_pcpu = CSCHED_PCPU(peer_cpu);
struct csched_unit *speer;
csched_load_balance(struct csched_private *prv, int cpu,
struct csched_unit *snext, bool *stolen)
{
- struct cpupool *c = get_sched_res(cpu)->cpupool;
+ const struct cpupool *c = get_sched_res(cpu)->cpupool;
struct csched_unit *speer;
cpumask_t workers;
- cpumask_t *online = c->res_valid;
+ const cpumask_t *online = c->res_valid;
int peer_cpu, first_cpu, peer_node, bstep;
int node = cpu_to_node(cpu);
}
static void
-csched_dump_unit(struct csched_unit *svc)
+csched_dump_unit(const struct csched_unit *svc)
{
struct csched_dom * const sdom = svc->sdom;
static void
csched_dump_pcpu(const struct scheduler *ops, int cpu)
{
- struct list_head *runq, *iter;
+ const struct list_head *runq;
+ struct list_head *iter;
struct csched_private *prv = CSCHED_PRIV(ops);
- struct csched_pcpu *spc;
- struct csched_unit *svc;
+ const struct csched_pcpu *spc;
+ const struct csched_unit *svc;
spinlock_t *lock;
unsigned long flags;
int loop;
loop = 0;
list_for_each( iter_sdom, &prv->active_sdom )
{
- struct csched_dom *sdom;
+ const struct csched_dom *sdom;
+
sdom = list_entry(iter_sdom, struct csched_dom, active_sdom_elem);
list_for_each( iter_svc, &sdom->active_unit )
{
- struct csched_unit *svc;
+ const struct csched_unit *svc;
spinlock_t *lock;
svc = list_entry(iter_svc, struct csched_unit, active_unit_elem);
*/
static int get_fallback_cpu(struct csched2_unit *svc)
{
- struct sched_unit *unit = svc->unit;
+ const struct sched_unit *unit = svc->unit;
unsigned int bs;
SCHED_STAT_CRANK(need_fallback_cpu);
*
* FIXME: Do pre-calculated division?
*/
-static void t2c_update(struct csched2_runqueue_data *rqd, s_time_t time,
+static void t2c_update(const struct csched2_runqueue_data *rqd, s_time_t time,
struct csched2_unit *svc)
{
uint64_t val = time * rqd->max_weight + svc->residual;
svc->credit -= val;
}
-static s_time_t c2t(struct csched2_runqueue_data *rqd, s_time_t credit, struct csched2_unit *svc)
+static s_time_t c2t(const struct csched2_runqueue_data *rqd, s_time_t credit,
+ const struct csched2_unit *svc)
{
return credit * svc->weight / rqd->max_weight;
}
* Runqueue related code.
*/
-static inline int unit_on_runq(struct csched2_unit *svc)
+static inline int unit_on_runq(const struct csched2_unit *svc)
{
return !list_empty(&svc->runq_elem);
}
}
static unsigned int
-cpu_to_runqueue(struct csched2_private *prv, unsigned int cpu)
+cpu_to_runqueue(const struct csched2_private *prv, unsigned int cpu)
{
- struct csched2_runqueue_data *rqd;
+ const struct csched2_runqueue_data *rqd;
unsigned int rqi;
for ( rqi = 0; rqi < nr_cpu_ids; rqi++ )
list_for_each( iter, &rqd->svc )
{
- struct csched2_unit * svc = list_entry(iter, struct csched2_unit, rqd_elem);
+ const struct csched2_unit * svc = list_entry(iter, struct csched2_unit, rqd_elem);
if ( svc->weight > max_weight )
max_weight = svc->weight;
}
static void
-runq_assign(const struct scheduler *ops, struct sched_unit *unit)
+runq_assign(const struct scheduler *ops, const struct sched_unit *unit)
{
struct csched2_unit *svc = unit->priv;
}
static void
-runq_deassign(const struct scheduler *ops, struct sched_unit *unit)
+runq_deassign(const struct scheduler *ops, const struct sched_unit *unit)
{
struct csched2_unit *svc = unit->priv;
update_svc_load(const struct scheduler *ops,
struct csched2_unit *svc, int change, s_time_t now)
{
- struct csched2_private *prv = csched2_priv(ops);
+ const struct csched2_private *prv = csched2_priv(ops);
s_time_t delta, unit_load;
unsigned int P, W;
* Within the same class, the highest difference of credit.
*/
static s_time_t tickle_score(const struct scheduler *ops, s_time_t now,
- struct csched2_unit *new, unsigned int cpu)
+ const struct csched2_unit *new, unsigned int cpu)
{
struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
struct csched2_unit * cur = csched2_unit(curr_on_cpu(cpu));
- struct csched2_private *prv = csched2_priv(ops);
+ const struct csched2_private *prv = csched2_priv(ops);
s_time_t score;
/*
struct sched_unit *unit = new->unit;
unsigned int bs, cpu = sched_unit_master(unit);
struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
- cpumask_t *online = cpupool_domain_master_cpumask(unit->domain);
+ const cpumask_t *online = cpupool_domain_master_cpumask(unit->domain);
cpumask_t mask;
ASSERT(new->rqd == rqd);
#ifndef NDEBUG
static inline void
-csched2_unit_check(struct sched_unit *unit)
+csched2_unit_check(const struct sched_unit *unit)
{
struct csched2_unit * const svc = csched2_unit(unit);
struct csched2_dom * const sdom = svc->sdom;
* - svc is not already flagged to migrate,
* - if svc is allowed to run on at least one of the pcpus of rqd.
*/
-static bool unit_is_migrateable(struct csched2_unit *svc,
- struct csched2_runqueue_data *rqd)
+static bool unit_is_migrateable(const struct csched2_unit *svc,
+ const struct csched2_runqueue_data *rqd)
{
struct sched_unit *unit = svc->unit;
int cpu = sched_unit_master(unit);
static void
csched2_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
{
- struct csched2_unit *svc = unit->priv;
+ const struct csched2_unit *svc = unit->priv;
struct csched2_dom * const sdom = svc->sdom;
spinlock_t *lock;
int rt_credit; /* Proposed runtime measured in credits */
struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
struct list_head *runq = &rqd->runq;
- struct csched2_private *prv = csched2_priv(ops);
+ const struct csched2_private *prv = csched2_priv(ops);
/*
* If we're idle, just stay so. Others (or external events)
unsigned int *skipped)
{
struct list_head *iter, *temp;
- struct sched_resource *sr = get_sched_res(cpu);
+ const struct sched_resource *sr = get_sched_res(cpu);
struct csched2_unit *snext = NULL;
struct csched2_private *prv = csched2_priv(sr->scheduler);
bool yield = false, soft_aff_preempt = false;
}
static void
-csched2_dump_unit(struct csched2_private *prv, struct csched2_unit *svc)
+csched2_dump_unit(const struct csched2_private *prv,
+ const struct csched2_unit *svc)
{
printk("[%i.%i] flags=%x cpu=%i",
svc->unit->domain->domain_id,
static inline void
dump_pcpu(const struct scheduler *ops, int cpu)
{
- struct csched2_private *prv = csched2_priv(ops);
- struct csched2_unit *svc;
+ const struct csched2_private *prv = csched2_priv(ops);
+ const struct csched2_unit *svc;
printk("CPU[%02d] runq=%d, sibling={%*pbl}, core={%*pbl}\n",
cpu, c2r(cpu),
loop = 0;
list_for_each( iter_sdom, &prv->sdom )
{
- struct csched2_dom *sdom;
- struct sched_unit *unit;
+ const struct csched2_dom *sdom;
+ const struct sched_unit *unit;
sdom = list_entry(iter_sdom, struct csched2_dom, sdom_elem);
printk("RUNQ:\n");
list_for_each( iter, runq )
{
- struct csched2_unit *svc = runq_elem(iter);
+ const struct csched2_unit *svc = runq_elem(iter);
if ( svc )
{
* So this is not part of any hot path.
*/
static struct sched_resource *
-pick_res(struct null_private *prv, const struct sched_unit *unit)
+pick_res(const struct null_private *prv, const struct sched_unit *unit)
{
unsigned int bs;
unsigned int cpu = sched_unit_master(unit), new_cpu;
- cpumask_t *cpus = cpupool_domain_master_cpumask(unit->domain);
- struct null_pcpu *npc = get_sched_res(cpu)->sched_priv;
+ const cpumask_t *cpus = cpupool_domain_master_cpumask(unit->domain);
+ const struct null_pcpu *npc = get_sched_res(cpu)->sched_priv;
ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
}
/* Returns true if a cpu was tickled */
-static bool unit_deassign(struct null_private *prv, struct sched_unit *unit)
+static bool unit_deassign(struct null_private *prv, const struct sched_unit *unit)
{
unsigned int bs;
unsigned int cpu = sched_unit_master(unit);
{
struct sched_resource *sr = get_sched_res(cpu);
struct null_private *prv = null_priv(new_ops);
- struct null_unit *nvc = vdata;
+ const struct null_unit *nvc = vdata;
ASSERT(nvc && is_idle_unit(nvc->unit));
prev->next_task->migrated = false;
}
-static inline void dump_unit(struct null_private *prv, struct null_unit *nvc)
+static inline void dump_unit(const struct null_private *prv,
+ const struct null_unit *nvc)
{
printk("[%i.%i] pcpu=%d", nvc->unit->domain->domain_id,
nvc->unit->unit_id, list_empty(&nvc->waitq_elem) ?
static void null_dump_pcpu(const struct scheduler *ops, int cpu)
{
struct null_private *prv = null_priv(ops);
- struct null_pcpu *npc = get_sched_res(cpu)->sched_priv;
- struct null_unit *nvc;
+ const struct null_pcpu *npc = get_sched_res(cpu)->sched_priv;
+ const struct null_unit *nvc;
spinlock_t *lock;
unsigned long flags;
rt_dump_pcpu(const struct scheduler *ops, int cpu)
{
struct rt_private *prv = rt_priv(ops);
- struct rt_unit *svc;
+ const struct rt_unit *svc;
unsigned long flags;
spin_lock_irqsave(&prv->lock, flags);
{
struct list_head *runq, *depletedq, *replq, *iter;
struct rt_private *prv = rt_priv(ops);
- struct rt_unit *svc;
- struct rt_dom *sdom;
+ const struct rt_unit *svc;
+ const struct rt_dom *sdom;
unsigned long flags;
spin_lock_irqsave(&prv->lock, flags);
printk("Domain info:\n");
list_for_each ( iter, &prv->sdom )
{
- struct sched_unit *unit;
+ const struct sched_unit *unit;
sdom = list_entry(iter, struct rt_dom, sdom_elem);
printk("\tdomain: %d\n", sdom->dom->domain_id);
list_for_each ( iter, queue )
{
- struct rt_unit * iter_svc = (*qelem)(iter);
+ const struct rt_unit * iter_svc = (*qelem)(iter);
if ( compare_unit_priority(svc, iter_svc) > 0 )
break;
first = false;
*/
if ( !list_empty(replq) )
{
- struct rt_unit *svc_next = replq_elem(replq->next);
+ const struct rt_unit *svc_next = replq_elem(replq->next);
set_timer(&prv->repl_timer, svc_next->cur_deadline);
}
else
replq_reinsert(const struct scheduler *ops, struct rt_unit *svc)
{
struct list_head *replq = rt_replq(ops);
- struct rt_unit *rearm_svc = svc;
+ const struct rt_unit *rearm_svc = svc;
bool rearm = false;
ASSERT( unit_on_replq(svc) );
rt_res_pick_locked(const struct sched_unit *unit, unsigned int locked_cpu)
{
cpumask_t *cpus = cpumask_scratch_cpu(locked_cpu);
- cpumask_t *online;
+ const cpumask_t *online;
int cpu;
online = cpupool_domain_master_cpumask(unit->domain);
struct rt_unit *svc = NULL;
struct rt_unit *iter_svc = NULL;
cpumask_t *cpu_common = cpumask_scratch_cpu(cpu);
- cpumask_t *online;
+ const cpumask_t *online;
list_for_each ( iter, runq )
{
* lock is grabbed before calling this function
*/
static void
-runq_tickle(const struct scheduler *ops, struct rt_unit *new)
+runq_tickle(const struct scheduler *ops, const struct rt_unit *new)
{
struct rt_private *prv = rt_priv(ops);
- struct rt_unit *latest_deadline_unit = NULL; /* lowest priority */
- struct rt_unit *iter_svc;
- struct sched_unit *iter_unit;
+ const struct rt_unit *latest_deadline_unit = NULL; /* lowest priority */
+ const struct rt_unit *iter_svc;
+ const struct sched_unit *iter_unit;
int cpu = 0, cpu_to_tickle = 0;
cpumask_t *not_tickled = cpumask_scratch_cpu(smp_processor_id());
- cpumask_t *online;
+ const cpumask_t *online;
if ( new == NULL || is_idle_unit(new->unit) )
return;
{
struct rt_private *prv = rt_priv(ops);
struct rt_unit *svc;
- struct sched_unit *unit;
+ const struct sched_unit *unit;
unsigned long flags;
int rc = 0;
struct xen_domctl_schedparam_vcpu local_sched;
*/
static void repl_timer_handler(void *data){
s_time_t now;
- struct scheduler *ops = data;
+ const struct scheduler *ops = data;
struct rt_private *prv = rt_priv(ops);
struct list_head *replq = rt_replq(ops);
struct list_head *runq = rt_runq(ops);
extern struct domain *domain_list;
/* Caller must hold the domlist_read_lock or domlist_update_lock. */
-static inline struct domain *first_domain_in_cpupool( struct cpupool *c)
+static inline struct domain *first_domain_in_cpupool(const struct cpupool *c)
{
struct domain *d;
for (d = rcu_dereference(domain_list); d && d->cpupool != c;
return d;
}
static inline struct domain *next_domain_in_cpupool(
- struct domain *d, struct cpupool *c)
+ struct domain *d, const struct cpupool *c)
{
for (d = rcu_dereference(d->next_in_list); d && d->cpupool != c;
d = rcu_dereference(d->next_in_list));
int vcpu_affinity_domctl(struct domain *d, uint32_t cmd,
struct xen_domctl_vcpuaffinity *vcpuaff);
-void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
+void vcpu_runstate_get(const struct vcpu *v,
+ struct vcpu_runstate_info *runstate);
uint64_t get_cpu_idle_time(unsigned int cpu);
void sched_guest_idle(void (*idle) (void), unsigned int cpu);
void scheduler_enable(void);