cpu = (i == 0) ?
default_vcpu0_location() :
- (d->vcpu[i-1]->processor + 1) % num_online_cpus();
+ cycle_cpu(d->vcpu[i-1]->processor, cpu_online_map);
if ( alloc_vcpu(d, i, cpu) == NULL )
goto maxvcpu_out;
static void csched_tick(void *_cpu);
-static inline int
-__cycle_cpu(int cpu, const cpumask_t *mask)
-{
- int nxt = next_cpu(cpu, *mask);
- if (nxt == NR_CPUS)
- nxt = first_cpu(*mask);
- return nxt;
-}
-
static inline int
__vcpu_on_runq(struct csched_vcpu *svc)
{
cpus_and(cpus, cpu_online_map, vc->cpu_affinity);
cpu = cpu_isset(vc->processor, cpus)
? vc->processor
- : __cycle_cpu(vc->processor, &cpus);
+ : cycle_cpu(vc->processor, cpus);
ASSERT( !cpus_empty(cpus) && cpu_isset(cpu, cpus) );
/*
cpumask_t nxt_idlers;
int nxt;
- nxt = __cycle_cpu(cpu, &cpus);
+ nxt = cycle_cpu(cpu, cpus);
if ( cpu_isset(cpu, cpu_core_map[nxt]) )
{
while ( !cpus_empty(workers) )
{
- peer_cpu = __cycle_cpu(peer_cpu, &workers);
+ peer_cpu = cycle_cpu(peer_cpu, workers);
cpu_clear(peer_cpu, workers);
/*
*
* int first_cpu(mask) Number lowest set bit, or NR_CPUS
* int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS
+ * int last_cpu(mask) Number highest set bit, or NR_CPUS
+ * int cycle_cpu(cpu, mask) Next cpu cycling from 'cpu', or NR_CPUS
*
* cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
* CPU_MASK_ALL Initializer - all bits set
#define last_cpu(src) __last_cpu(&(src), NR_CPUS)
static inline int __last_cpu(const cpumask_t *srcp, int nbits)
{
- int cpu, pcpu = NR_CPUS;
- for (cpu = first_cpu(*srcp); cpu < NR_CPUS; cpu = next_cpu(cpu, *srcp))
+ int cpu, pcpu = nbits;
+ for (cpu = __first_cpu(srcp, nbits);
+ cpu < nbits;
+ cpu = __next_cpu(cpu, srcp, nbits))
pcpu = cpu;
return pcpu;
}
+#define cycle_cpu(n, src) __cycle_cpu((n), &(src), NR_CPUS)
+static inline int __cycle_cpu(int n, const cpumask_t *srcp, int nbits)
+{
+ int nxt = __next_cpu(n, srcp, nbits);
+ if (nxt == nbits)
+ nxt = __first_cpu(srcp, nbits);
+ return nxt;
+}
+
#define cpumask_of_cpu(cpu) \
({ \
typeof(_unused_cpumask_arg_) m; \