Scheduling code has several places using int or bool_t instead of bool.
Switch those.
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Meng Xu <mengxu@cis.upenn.edu>
Reviewed-by: Dario Faggioli <dfaggioli@suse.com>
* arinc653_unit_t pointer. */
struct sched_unit * unit;
/* awake holds whether the UNIT has been woken with vcpu_wake() */
- bool_t awake;
+ bool awake;
/* list holds the linked list information for the list this UNIT
* is stored in */
struct list_head list;
* will mark the UNIT awake.
*/
svc->unit = unit;
- svc->awake = 0;
+ svc->awake = false;
if ( !is_idle_unit(unit) )
list_add(&svc->list, &SCHED_PRIV(ops)->unit_list);
update_schedule_units(ops);
a653sched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
{
if ( AUNIT(unit) != NULL )
- AUNIT(unit)->awake = 0;
+ AUNIT(unit)->awake = false;
/*
* If the UNIT being put to sleep is the same one that is currently
a653sched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
{
if ( AUNIT(unit) != NULL )
- AUNIT(unit)->awake = 1;
+ AUNIT(unit)->awake = true;
cpu_raise_softirq(sched_unit_master(unit), SCHEDULE_SOFTIRQ);
}
* scheduler will give preferrence to partially idle package compared to
* the full idle package, when picking pCPU to schedule vCPU.
*/
-bool_t sched_smt_power_savings = 0;
+bool sched_smt_power_savings;
boolean_param("sched_smt_power_savings", sched_smt_power_savings);
/* Default scheduling rate limit: 1ms
{
get_sched_res(v->processor)->curr = unit;
get_sched_res(v->processor)->sched_unit_idle = unit;
- v->is_running = 1;
+ v->is_running = true;
unit->is_running = true;
unit->state_entry_time = NOW();
}
unsigned long flags;
unsigned int old_cpu, new_cpu;
spinlock_t *old_lock, *new_lock;
- bool_t pick_called = 0;
+ bool pick_called = false;
struct vcpu *v;
/*
if ( (new_lock == get_sched_res(new_cpu)->schedule_lock) &&
cpumask_test_cpu(new_cpu, unit->domain->cpupool->cpu_valid) )
break;
- pick_called = 1;
+ pick_called = true;
}
else
{
* We do not hold the scheduler lock appropriate for this vCPU.
* Thus we cannot select a new CPU on this iteration. Try again.
*/
- pick_called = 0;
+ pick_called = false;
}
sched_spin_unlock_double(old_lock, new_lock, flags);
vcpu_runstate_change(vnext, vnext->new_state, now);
}
- vnext->is_running = 1;
+ vnext->is_running = true;
if ( is_idle_vcpu(vnext) )
vnext->sched_unit = next;
smp_wmb();
if ( vprev != vnext )
- vprev->is_running = 0;
+ vprev->is_running = false;
}
static void unit_context_saved(struct sched_resource *sr)
* the searched id is returned
* returns NULL if not found.
*/
-static struct cpupool *__cpupool_find_by_id(int id, int exact)
+static struct cpupool *__cpupool_find_by_id(int id, bool exact)
{
struct cpupool **q;
static struct cpupool *cpupool_find_by_id(int poolid)
{
- return __cpupool_find_by_id(poolid, 1);
+ return __cpupool_find_by_id(poolid, true);
}
-static struct cpupool *__cpupool_get_by_id(int poolid, int exact)
+static struct cpupool *__cpupool_get_by_id(int poolid, bool exact)
{
struct cpupool *c;
spin_lock(&cpupool_lock);
struct cpupool *cpupool_get_by_id(int poolid)
{
- return __cpupool_get_by_id(poolid, 1);
+ return __cpupool_get_by_id(poolid, true);
}
static struct cpupool *cpupool_get_next_by_id(int poolid)
{
- return __cpupool_get_by_id(poolid, 0);
+ return __cpupool_get_by_id(poolid, false);
}
void cpupool_put(struct cpupool *pool)
}
/* Is the first element of cpu's runq (if any) cpu's idle unit? */
-static inline bool_t is_runq_idle(unsigned int cpu)
+static inline bool is_runq_idle(unsigned int cpu)
{
/*
* We're peeking at cpu's runq, we must hold the proper lock.
svc->start_time += (credits * MILLISECS(1)) / CSCHED_CREDITS_PER_MSEC;
}
-static bool_t __read_mostly opt_tickle_one_idle = 1;
+static bool __read_mostly opt_tickle_one_idle = true;
boolean_param("tickle_one_idle_cpu", opt_tickle_one_idle);
DEFINE_PER_CPU(unsigned int, last_tickle_cpu);
static int
_csched_cpu_pick(const struct scheduler *ops, const struct sched_unit *unit,
- bool_t commit)
+ bool commit)
{
int cpu = sched_unit_master(unit);
/* We must always use cpu's scratch space */
* get boosted, which we don't deserve as we are "only" migrating.
*/
set_bit(CSCHED_FLAG_UNIT_MIGRATING, &svc->flags);
- return get_sched_res(_csched_cpu_pick(ops, unit, 1));
+ return get_sched_res(_csched_cpu_pick(ops, unit, true));
}
static inline void
* migrating it to run elsewhere (see multi-core and multi-thread
* support in csched_res_pick()).
*/
- new_cpu = _csched_cpu_pick(ops, currunit, 0);
+ new_cpu = _csched_cpu_pick(ops, currunit, false);
unit_schedule_unlock_irqrestore(lock, flags, currunit);
csched_unit_wake(const struct scheduler *ops, struct sched_unit *unit)
{
struct csched_unit * const svc = CSCHED_UNIT(unit);
- bool_t migrating;
+ bool migrating;
BUG_ON( is_idle_unit(unit) );
* * The hard affinity is not a subset of soft affinity
* * There is an overlap between the soft and hard affinity masks
*/
-static inline int has_soft_affinity(const struct sched_unit *unit)
+static inline bool has_soft_affinity(const struct sched_unit *unit)
{
return unit->soft_aff_effective &&
!cpumask_subset(cpupool_domain_master_cpumask(unit->domain),
static inline bool
deadline_queue_remove(struct list_head *queue, struct list_head *elem)
{
- int pos = 0;
+ bool first = false;
if ( queue->next != elem )
- pos = 1;
+ first = true;
list_del_init(elem);
- return !pos;
+ return !first;
}
static inline bool
struct list_head *queue)
{
struct list_head *iter;
- int pos = 0;
+ bool first = true;
list_for_each ( iter, queue )
{
struct rt_unit * iter_svc = (*qelem)(iter);
if ( compare_unit_priority(svc, iter_svc) > 0 )
break;
- pos++;
+ first = false;
}
list_add_tail(elem, iter);
- return !pos;
+ return first;
}
#define deadline_runq_insert(...) \
deadline_queue_insert(&q_elem, ##__VA_ARGS__)
{
struct list_head *replq = rt_replq(ops);
struct rt_unit *rearm_svc = svc;
- bool_t rearm = 0;
+ bool rearm = false;
ASSERT( unit_on_replq(svc) );
{
deadline_replq_insert(svc, &svc->replq_elem, replq);
rearm_svc = replq_elem(replq->next);
- rearm = 1;
+ rearm = true;
}
else
rearm = deadline_replq_insert(svc, &svc->replq_elem, replq);
{
struct rt_unit * const svc = rt_unit(unit);
s_time_t now;
- bool_t missed;
+ bool missed;
BUG_ON( is_idle_unit(unit) );
* Use this when you don't have an existing reference to @d. It returns
* FALSE if @d is being destroyed.
*/
-static always_inline int get_domain(struct domain *d)
+static always_inline bool get_domain(struct domain *d)
{
int old, seen = atomic_read(&d->refcnt);
do
{
old = seen;
if ( unlikely(old & DOMAIN_DESTROYED) )
- return 0;
+ return false;
seen = atomic_cmpxchg(&d->refcnt, old, old + 1);
}
while ( unlikely(seen != old) );
- return 1;
+ return true;
}
/*