spinlock_t *lock;
unsigned long flags;
int loop;
-#define cpustr keyhandler_scratch
/*
* We need both locks:
spc = CSCHED_PCPU(cpu);
runq = &spc->runq;
- cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
- printk("CPU[%02d] nr_run=%d, sort=%d, sibling=%s, ",
- cpu, spc->nr_runnable, spc->runq_sort_last, cpustr);
- cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
- printk("core=%s\n", cpustr);
+ printk("CPU[%02d] nr_run=%d, sort=%d, sibling=%*pb, core=%*pb\n",
+ cpu, spc->nr_runnable, spc->runq_sort_last,
+ nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)),
+ nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
/* current VCPU (nothing to say if that's the idle vcpu). */
svc = CSCHED_VCPU(curr_on_cpu(cpu));
pcpu_schedule_unlock(lock, cpu);
spin_unlock_irqrestore(&prv->lock, flags);
-#undef cpustr
}
static void
spin_lock_irqsave(&prv->lock, flags);
-#define idlers_buf keyhandler_scratch
-
printk("info:\n"
"\tncpus = %u\n"
"\tmaster = %u\n"
prv->ticks_per_tslice,
prv->vcpu_migr_delay/ MICROSECS(1));
- cpumask_scnprintf(idlers_buf, sizeof(idlers_buf), prv->idlers);
- printk("idlers: %s\n", idlers_buf);
+ printk("idlers: %*pb\n", nr_cpu_ids, cpumask_bits(prv->idlers));
printk("active vcpus:\n");
loop = 0;
vcpu_schedule_unlock(lock, svc->vcpu);
}
}
-#undef idlers_buf
spin_unlock_irqrestore(&prv->lock, flags);
}
{
struct csched2_private *prv = csched2_priv(ops);
struct csched2_vcpu *svc;
-#define cpustr keyhandler_scratch
- cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
- printk("CPU[%02d] runq=%d, sibling=%s, ", cpu, c2r(cpu), cpustr);
- cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
- printk("core=%s\n", cpustr);
+ printk("CPU[%02d] runq=%d, sibling=%*pb, core=%*pb\n",
+ cpu, c2r(cpu),
+ nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)),
+ nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
/* current VCPU (nothing to say if that's the idle vcpu) */
svc = csched2_vcpu(curr_on_cpu(cpu));
printk("\trun: ");
csched2_dump_vcpu(prv, svc);
}
-#undef cpustr
}
static void
struct csched2_private *prv = csched2_priv(ops);
unsigned long flags;
unsigned int i, j, loop;
-#define cpustr keyhandler_scratch
/*
* We need the private scheduler lock as we access global
fraction = (prv->rqd[i].avgload * 100) >> prv->load_precision_shift;
- cpulist_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].active);
printk("Runqueue %d:\n"
"\tncpus = %u\n"
- "\tcpus = %s\n"
+ "\tcpus = %*pbl\n"
"\tmax_weight = %u\n"
"\tpick_bias = %u\n"
"\tinstload = %d\n"
"\taveload = %"PRI_stime" (~%"PRI_stime"%%)\n",
i,
cpumask_weight(&prv->rqd[i].active),
- cpustr,
+ nr_cpu_ids, cpumask_bits(&prv->rqd[i].active),
prv->rqd[i].max_weight,
prv->rqd[i].pick_bias,
prv->rqd[i].load,
prv->rqd[i].avgload,
fraction);
- cpumask_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].idle);
- printk("\tidlers: %s\n", cpustr);
- cpumask_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].tickled);
- printk("\ttickled: %s\n", cpustr);
- cpumask_scnprintf(cpustr, sizeof(cpustr), &prv->rqd[i].smt_idle);
- printk("\tfully idle cores: %s\n", cpustr);
+ printk("\tidlers: %*pb\n"
+ "\ttickled: %*pb\n"
+ "\tfully idle cores: %*pb\n",
+ nr_cpu_ids, cpumask_bits(&prv->rqd[i].idle),
+ nr_cpu_ids, cpumask_bits(&prv->rqd[i].tickled),
+ nr_cpu_ids, cpumask_bits(&prv->rqd[i].smt_idle));
}
printk("Domain info:\n");
}
read_unlock_irqrestore(&prv->lock, flags);
-#undef cpustr
}
static void *
struct null_vcpu *nvc;
spinlock_t *lock;
unsigned long flags;
-#define cpustr keyhandler_scratch
lock = pcpu_schedule_lock_irqsave(cpu, &flags);
- cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
- printk("CPU[%02d] sibling=%s, ", cpu, cpustr);
- cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu));
- printk("core=%s", cpustr);
+ printk("CPU[%02d] sibling=%*pb, core=%*pb",
+ cpu,
+ nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)),
+ nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
if ( per_cpu(npc, cpu).vcpu != NULL )
printk(", vcpu=%pv", per_cpu(npc, cpu).vcpu);
printk("\n");
}
pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
-#undef cpustr
}
static void null_dump(const struct scheduler *ops)
struct list_head *iter;
unsigned long flags;
unsigned int loop;
-#define cpustr keyhandler_scratch
spin_lock_irqsave(&prv->lock, flags);
- cpulist_scnprintf(cpustr, sizeof(cpustr), &prv->cpus_free);
- printk("\tcpus_free = %s\n", cpustr);
+ printk("\tcpus_free = %*pbl\n", nr_cpu_ids, cpumask_bits(&prv->cpus_free));
printk("Domain info:\n");
loop = 0;
spin_unlock(&prv->waitq_lock);
spin_unlock_irqrestore(&prv->lock, flags);
-#undef cpustr
}
const struct scheduler sched_null_def = {
cpupool_mask = cpupool_domain_cpumask(svc->vcpu->domain);
cpumask_and(mask, cpupool_mask, svc->vcpu->cpu_hard_affinity);
- cpulist_scnprintf(keyhandler_scratch, sizeof(keyhandler_scratch), mask);
printk("[%5d.%-2u] cpu %u, (%"PRI_stime", %"PRI_stime"),"
" cur_b=%"PRI_stime" cur_d=%"PRI_stime" last_start=%"PRI_stime"\n"
" \t\t priority_level=%d has_extratime=%d\n"
- " \t\t onQ=%d runnable=%d flags=%x effective hard_affinity=%s\n",
+ " \t\t onQ=%d runnable=%d flags=%x effective hard_affinity=%*pbl\n",
svc->vcpu->domain->domain_id,
svc->vcpu->vcpu_id,
svc->vcpu->processor,
vcpu_on_q(svc),
vcpu_runnable(svc->vcpu),
svc->flags,
- keyhandler_scratch);
+ nr_cpu_ids, cpumask_bits(mask));
}
static void