{
unsigned int cpu = smp_processor_id();
- if ( cpumask_test_and_clear_cpu(cpu, mask) )
+ if ( __cpumask_test_and_clear_cpu(cpu, mask) )
raise_softirq(TIMER_SOFTIRQ);
cpuidle_wakeup_mwait(mask);
continue;
if ( deadline <= now )
- cpumask_set_cpu(cpu, &mask);
+ __cpumask_set_cpu(cpu, &mask);
else if ( deadline < next_event )
next_event = deadline;
}
cpumask_copy(&cpu_eoi_map, action->cpu_eoi_map);
- if ( cpumask_test_and_clear_cpu(smp_processor_id(), &cpu_eoi_map) )
+ if ( __cpumask_test_and_clear_cpu(smp_processor_id(), &cpu_eoi_map) )
{
__set_eoi_ready(desc);
spin_unlock(&desc->lock);
for_each_online_cpu(cpu)
if ( !cpumask_intersects(&mask,
per_cpu(cpu_sibling_mask, cpu)) )
- cpumask_set_cpu(cpu, &mask);
+ __cpumask_set_cpu(cpu, &mask);
flush_mask(&mask, FLUSH_CACHE);
}
else
if ( !idletime )
{
- cpumask_clear_cpu(cpu, cpumap);
+ __cpumask_clear_cpu(cpu, cpumap);
continue;
}
if ( cpumask_test_cpu(cpu, &mask) )
{
- cpumask_clear_cpu(cpu, &mask);
+ __cpumask_clear_cpu(cpu, &mask);
raise_softirq(TIMER_SOFTIRQ);
}
if ( core_weight < core_tmp )
{
core_weight = core_tmp;
- cpumask_clear(&core_candidate_map);
- cpumask_set_cpu(cpu, &core_candidate_map);
+ cpumask_copy(&core_candidate_map, cpumask_of(cpu));
}
else if ( core_weight == core_tmp )
- cpumask_set_cpu(cpu, &core_candidate_map);
+ __cpumask_set_cpu(cpu, &core_candidate_map);
}
for_each_cpu(cpu, &core_candidate_map)
if ( sibling_weight < sibling_tmp )
{
sibling_weight = sibling_tmp;
- cpumask_clear(&sibling_candidate_map);
- cpumask_set_cpu(cpu, &sibling_candidate_map);
+ cpumask_copy(&sibling_candidate_map, cpumask_of(cpu));
}
else if ( sibling_weight == sibling_tmp )
- cpumask_set_cpu(cpu, &sibling_candidate_map);
+ __cpumask_set_cpu(cpu, &sibling_candidate_map);
}
cpu = cpumask_first(&sibling_candidate_map);
if ( core_weight > core_tmp )
{
core_weight = core_tmp;
- cpumask_clear(&core_candidate_map);
- cpumask_set_cpu(cpu, &core_candidate_map);
+ cpumask_copy(&core_candidate_map, cpumask_of(cpu));
}
else if ( core_weight == core_tmp )
- cpumask_set_cpu(cpu, &core_candidate_map);
+ __cpumask_set_cpu(cpu, &core_candidate_map);
}
for_each_cpu(cpu, &core_candidate_map)
if ( sibling_weight > sibling_tmp )
{
sibling_weight = sibling_tmp;
- cpumask_clear(&sibling_candidate_map);
- cpumask_set_cpu(cpu, &sibling_candidate_map);
+ cpumask_copy(&sibling_candidate_map, cpumask_of(cpu));
}
else if ( sibling_weight == sibling_tmp )
- cpumask_set_cpu(cpu, &sibling_candidate_map);
+ __cpumask_set_cpu(cpu, &sibling_candidate_map);
}
cpu = cpumask_first(&sibling_candidate_map);
break;
}
- cpumask_set_cpu(cpu, &frozen_cpus);
+ __cpumask_set_cpu(cpu, &frozen_cpus);
}
BUG_ON(!error && (num_online_cpus() != 1));
if ( cpumask_intersects(dest, per_cpu(cpu_sibling_mask, i)) )
continue;
cpu = cpumask_first(per_cpu(cpu_sibling_mask, i));
- cpumask_set_cpu(cpu, dest);
+ __cpumask_set_cpu(cpu, dest);
}
return cpumask_weight(dest);
}
cpus = find_non_smt(best_node, &node_cpus);
if ( cpus == 0 )
{
- cpumask_set_cpu(smp_processor_id(), &node_cpus);
+ __cpumask_set_cpu(smp_processor_id(), &node_cpus);
cpus = 1;
}
/* We already have the node information from round #0. */
{
if ( cur->pri != CSCHED_PRI_IDLE )
SCHED_STAT_CRANK(tickle_idlers_none);
- cpumask_set_cpu(cpu, &mask);
+ __cpumask_set_cpu(cpu, &mask);
}
else if ( !idlers_empty )
{
SCHED_VCPU_STAT_CRANK(cur, migrate_r);
SCHED_STAT_CRANK(migrate_kicked_away);
set_bit(_VPF_migrating, &cur->vcpu->pause_flags);
- cpumask_set_cpu(cpu, &mask);
+ __cpumask_set_cpu(cpu, &mask);
}
else if ( !new_idlers_empty )
{
{
this_cpu(last_tickle_cpu) =
cpumask_cycle(this_cpu(last_tickle_cpu), &idle_mask);
- cpumask_set_cpu(this_cpu(last_tickle_cpu), &mask);
+ __cpumask_set_cpu(this_cpu(last_tickle_cpu), &mask);
}
else
cpumask_or(&mask, &mask, &idle_mask);
*/
cpumask_and(&idlers, &cpu_online_map, CSCHED_PRIV(ops)->idlers);
if ( vc->processor == cpu && IS_RUNQ_IDLE(cpu) )
- cpumask_set_cpu(cpu, &idlers);
+ __cpumask_set_cpu(cpu, &idlers);
cpumask_and(&cpus, &cpus, &idlers);
/*
*/
if ( !cpumask_test_cpu(cpu, &cpus) && !cpumask_empty(&cpus) )
cpu = cpumask_cycle(cpu, &cpus);
- cpumask_clear_cpu(cpu, &cpus);
+ __cpumask_clear_cpu(cpu, &cpus);
while ( !cpumask_empty(&cpus) )
{
/* Find out what the !idle are in this node */
cpumask_andnot(&workers, online, prv->idlers);
cpumask_and(&workers, &workers, &node_to_cpumask(peer_node));
- cpumask_clear_cpu(cpu, &workers);
+ __cpumask_clear_cpu(cpu, &workers);
peer_cpu = cpumask_first(&workers);
if ( peer_cpu >= nr_cpu_ids )
* lock is grabbed before calling this function
*/
static struct rt_vcpu *
-__runq_pick(const struct scheduler *ops, cpumask_t *mask)
+__runq_pick(const struct scheduler *ops, const cpumask_t *mask)
{
struct list_head *runq = rt_runq(ops);
struct list_head *iter;
}
else
{
- cpumask_t cur_cpu;
- cpumask_clear(&cur_cpu);
- cpumask_set_cpu(cpu, &cur_cpu);
- snext = __runq_pick(ops, &cur_cpu);
+ snext = __runq_pick(ops, cpumask_of(cpu));
if ( snext == NULL )
snext = rt_vcpu(idle_vcpu[cpu]);
if ( !test_and_set_bit(nr, &softirq_pending(cpu)) &&
cpu != this_cpu &&
!arch_skip_send_event_check(cpu) )
- cpumask_set_cpu(cpu, raise_mask);
+ __cpumask_set_cpu(cpu, raise_mask);
if ( raise_mask == &send_mask )
smp_send_event_check_mask(raise_mask);
if ( !per_cpu(batching, this_cpu) || in_irq() )
smp_send_event_check_cpu(cpu);
else
- set_bit(nr, &per_cpu(batch_mask, this_cpu));
+ __cpumask_set_cpu(nr, &per_cpu(batch_mask, this_cpu));
}
void cpu_raise_softirq_batch_begin(void)
ASSERT(per_cpu(batching, this_cpu));
for_each_cpu ( cpu, mask )
if ( !softirq_pending(cpu) )
- cpumask_clear_cpu(cpu, mask);
+ __cpumask_clear_cpu(cpu, mask);
smp_send_event_check_mask(mask);
cpumask_clear(mask);
--per_cpu(batching, this_cpu);
set_bit(cpumask_check(cpu), dstp->bits);
}
+static inline void __cpumask_set_cpu(int cpu, cpumask_t *dstp)
+{
+ __set_bit(cpumask_check(cpu), dstp->bits);
+}
+
static inline void cpumask_clear_cpu(int cpu, volatile cpumask_t *dstp)
{
clear_bit(cpumask_check(cpu), dstp->bits);
}
+static inline void __cpumask_clear_cpu(int cpu, cpumask_t *dstp)
+{
+ __clear_bit(cpumask_check(cpu), dstp->bits);
+}
+
static inline void cpumask_setall(cpumask_t *dstp)
{
bitmap_fill(dstp->bits, nr_cpumask_bits);
#define cpumask_test_cpu(cpu, cpumask) \
test_bit(cpumask_check(cpu), (cpumask)->bits)
-static inline int cpumask_test_and_set_cpu(int cpu, cpumask_t *addr)
+static inline int cpumask_test_and_set_cpu(int cpu, volatile cpumask_t *addr)
{
return test_and_set_bit(cpumask_check(cpu), addr->bits);
}
-static inline int cpumask_test_and_clear_cpu(int cpu, cpumask_t *addr)
+static inline int __cpumask_test_and_set_cpu(int cpu, cpumask_t *addr)
+{
+ return __test_and_set_bit(cpumask_check(cpu), addr->bits);
+}
+
+static inline int cpumask_test_and_clear_cpu(int cpu, volatile cpumask_t *addr)
{
return test_and_clear_bit(cpumask_check(cpu), addr->bits);
}
+static inline int __cpumask_test_and_clear_cpu(int cpu, cpumask_t *addr)
+{
+ return __test_and_clear_bit(cpumask_check(cpu), addr->bits);
+}
+
static inline void cpumask_and(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p)
{