snprintf(ebuf, sizeof(ebuf),
"MCE: Fatal error happened on CPUs %*pb",
- nr_cpu_ids, cpumask_bits(&mce_fatal_cpus));
+ CPUMASK_PR(&mce_fatal_cpus));
mc_panic(ebuf);
}
printk("Shot down all CPUs\n");
else
printk("Failed to shoot down CPUs {%*pbl}\n",
- nr_cpu_ids, cpumask_bits(&waiting_to_crash));
+ CPUMASK_PR(&waiting_to_crash));
/*
* Try to crash shutdown IOMMU functionality as some old crashdump
SET_DEST(entry, logical, cpu_mask_to_apicid(mask));
} else {
printk(XENLOG_ERR "IRQ%d: no target CPU (%*pb vs %*pb)\n",
- irq, nr_cpu_ids, cpumask_bits(desc->arch.cpu_mask),
- nr_cpu_ids, cpumask_bits(TARGET_CPUS));
+ irq, CPUMASK_PR(desc->arch.cpu_mask), CPUMASK_PR(TARGET_CPUS));
desc->status |= IRQ_DISABLED;
}
else
{
gprintk(XENLOG_ERR, "IRQ%d: no target CPU (%*pb vs %*pb)\n",
- irq, nr_cpu_ids, cpumask_bits(desc->arch.cpu_mask),
- nr_cpu_ids, cpumask_bits(TARGET_CPUS));
+ irq, CPUMASK_PR(desc->arch.cpu_mask), CPUMASK_PR(TARGET_CPUS));
desc->status |= IRQ_DISABLED;
rte.mask = 1;
}
spin_lock_irqsave(&desc->lock, flags);
printk(" IRQ:%4d aff:{%*pbl}/{%*pbl} vec:%02x %-15s status=%03x ",
- irq, nr_cpu_ids, cpumask_bits(desc->affinity),
- nr_cpu_ids, cpumask_bits(desc->arch.cpu_mask),
+ irq, CPUMASK_PR(desc->affinity), CPUMASK_PR(desc->arch.cpu_mask),
desc->arch.vector, desc->handler->typename, desc->status);
if ( ssid )
printk("Cannot set affinity for IRQ%u\n", irq);
else if ( break_affinity )
printk("Broke affinity for IRQ%u, new: %*pb\n",
- irq, nr_cpu_ids, cpumask_bits(affinity));
+ irq, CPUMASK_PR(affinity));
}
/* That doesn't seem sufficient. Give it 1ms. */
if ( !ret )
printk(XENLOG_INFO "SMT %s - online CPUs 0x%*pb\n",
- up ? "enabled" : "disabled",
- nr_cpu_ids, cpumask_bits(&cpu_online_map));
+ up ? "enabled" : "disabled", CPUMASK_PR(&cpu_online_map));
return ret;
}
sched_smt_power_savings? "enabled":"disabled");
printk("NOW=%"PRI_stime"\n", now);
- printk("Online Cpus: %*pbl\n", nr_cpu_ids, cpumask_bits(&cpu_online_map));
+ printk("Online Cpus: %*pbl\n", CPUMASK_PR(&cpu_online_map));
if ( !cpumask_empty(&cpupool_free_cpus) )
{
- printk("Free Cpus: %*pbl\n",
- nr_cpu_ids, cpumask_bits(&cpupool_free_cpus));
+ printk("Free Cpus: %*pbl\n", CPUMASK_PR(&cpupool_free_cpus));
schedule_dump(NULL);
}
for_each_cpupool(c)
{
printk("Cpupool %d:\n", (*c)->cpupool_id);
- printk("Cpus: %*pbl\n", nr_cpu_ids, cpumask_bits((*c)->cpu_valid));
+ printk("Cpus: %*pbl\n", CPUMASK_PR((*c)->cpu_valid));
schedule_dump(*c);
}
printk(" nr_pages=%d xenheap_pages=%d shared_pages=%u paged_pages=%u "
"dirty_cpus={%*pbl} max_pages=%u\n",
d->tot_pages, d->xenheap_pages, atomic_read(&d->shr_pages),
- atomic_read(&d->paged_pages), nr_cpu_ids,
- cpumask_bits(d->dirty_cpumask), d->max_pages);
+ atomic_read(&d->paged_pages), CPUMASK_PR(d->dirty_cpumask),
+ d->max_pages);
printk(" handle=%02x%02x%02x%02x-%02x%02x-%02x%02x-"
"%02x%02x-%02x%02x%02x%02x%02x%02x vm_assist=%08lx\n",
d->handle[ 0], d->handle[ 1], d->handle[ 2], d->handle[ 3],
printk("dirty_cpu=%u", v->dirty_cpu);
printk("\n");
printk(" cpu_hard_affinity={%*pbl} cpu_soft_affinity={%*pbl}\n",
- nr_cpu_ids, cpumask_bits(v->cpu_hard_affinity),
- nr_cpu_ids, cpumask_bits(v->cpu_soft_affinity));
+ CPUMASK_PR(v->cpu_hard_affinity),
+ CPUMASK_PR(v->cpu_soft_affinity));
printk(" pause_count=%d pause_flags=%lx\n",
atomic_read(&v->pause_count), v->pause_flags);
arch_dump_vcpu_info(v);
printk("CPU[%02d] nr_run=%d, sort=%d, sibling=%*pb, core=%*pb\n",
cpu, spc->nr_runnable, spc->runq_sort_last,
- nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)),
- nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
+ CPUMASK_PR(per_cpu(cpu_sibling_mask, cpu)),
+ CPUMASK_PR(per_cpu(cpu_core_mask, cpu)));
/* current VCPU (nothing to say if that's the idle vcpu). */
svc = CSCHED_VCPU(curr_on_cpu(cpu));
prv->ticks_per_tslice,
prv->vcpu_migr_delay/ MICROSECS(1));
- printk("idlers: %*pb\n", nr_cpu_ids, cpumask_bits(prv->idlers));
+ printk("idlers: %*pb\n", CPUMASK_PR(prv->idlers));
printk("active vcpus:\n");
loop = 0;
printk("CPU[%02d] runq=%d, sibling=%*pb, core=%*pb\n",
cpu, c2r(cpu),
- nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)),
- nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
+ CPUMASK_PR(per_cpu(cpu_sibling_mask, cpu)),
+ CPUMASK_PR(per_cpu(cpu_core_mask, cpu)));
/* current VCPU (nothing to say if that's the idle vcpu) */
svc = csched2_vcpu(curr_on_cpu(cpu));
"\taveload = %"PRI_stime" (~%"PRI_stime"%%)\n",
i,
prv->rqd[i].nr_cpus,
- nr_cpu_ids, cpumask_bits(&prv->rqd[i].active),
+ CPUMASK_PR(&prv->rqd[i].active),
prv->rqd[i].max_weight,
prv->rqd[i].pick_bias,
prv->rqd[i].load,
printk("\tidlers: %*pb\n"
"\ttickled: %*pb\n"
"\tfully idle cores: %*pb\n",
- nr_cpu_ids, cpumask_bits(&prv->rqd[i].idle),
- nr_cpu_ids, cpumask_bits(&prv->rqd[i].tickled),
- nr_cpu_ids, cpumask_bits(&prv->rqd[i].smt_idle));
+ CPUMASK_PR(&prv->rqd[i].idle),
+ CPUMASK_PR(&prv->rqd[i].tickled),
+ CPUMASK_PR(&prv->rqd[i].smt_idle));
}
printk("Domain info:\n");
lock = pcpu_schedule_lock_irqsave(cpu, &flags);
printk("CPU[%02d] sibling=%*pb, core=%*pb",
- cpu,
- nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)),
- nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
+ cpu, CPUMASK_PR(per_cpu(cpu_sibling_mask, cpu)),
+ CPUMASK_PR(per_cpu(cpu_core_mask, cpu)));
if ( per_cpu(npc, cpu).vcpu != NULL )
printk(", vcpu=%pv", per_cpu(npc, cpu).vcpu);
printk("\n");
spin_lock_irqsave(&prv->lock, flags);
- printk("\tcpus_free = %*pbl\n", nr_cpu_ids, cpumask_bits(&prv->cpus_free));
+ printk("\tcpus_free = %*pbl\n", CPUMASK_PR(&prv->cpus_free));
printk("Domain info:\n");
loop = 0;
has_extratime(svc),
vcpu_on_q(svc),
vcpu_runnable(svc->vcpu),
- svc->flags,
- nr_cpu_ids, cpumask_bits(mask));
+ svc->flags, CPUMASK_PR(mask));
}
static void
typedef struct cpumask{ DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
+/*
+ * printf arguments for a cpumask. Shorthand for using '%*pb[l]' when
+ * printing a cpumask.
+ */
+#define CPUMASK_PR(src) nr_cpu_ids, cpumask_bits(src)
+
extern unsigned int nr_cpu_ids;
#if NR_CPUS > 4 * BITS_PER_LONG