]> xenbits.xensource.com Git - people/sstabellini/xen-unstable.git/.git/commitdiff
xen/cpumask: Introduce a CPUMASK_PR() wrapper for printing
authorAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 25 Jun 2019 09:48:22 +0000 (10:48 +0100)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Wed, 31 Jul 2019 13:19:02 +0000 (14:19 +0100)
Having to specify 'nr_cpu_id, cpumask_bits(foo)' for all printing operations
is quite repetative.  Introduce a wrapper to help.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
12 files changed:
xen/arch/x86/cpu/mcheck/mce.c
xen/arch/x86/crash.c
xen/arch/x86/io_apic.c
xen/arch/x86/irq.c
xen/arch/x86/sysctl.c
xen/common/cpupool.c
xen/common/keyhandler.c
xen/common/sched_credit.c
xen/common/sched_credit2.c
xen/common/sched_null.c
xen/common/sched_rt.c
xen/include/xen/cpumask.h

index 2a9747ed199d2552ce3b3a19a987d8079a2ed981..28ad7dd659b00ae9fe0c6a21a585cda6765faacf 100644 (file)
@@ -547,7 +547,7 @@ void mcheck_cmn_handler(const struct cpu_user_regs *regs)
 
             snprintf(ebuf, sizeof(ebuf),
                      "MCE: Fatal error happened on CPUs %*pb",
-                     nr_cpu_ids, cpumask_bits(&mce_fatal_cpus));
+                     CPUMASK_PR(&mce_fatal_cpus));
 
             mc_panic(ebuf);
         }
index a9f3e1890c5a1b4966b0c46f9758ac6e135d1180..32132e4cb9744b57fd8409101a9765d395b9d9ce 100644 (file)
@@ -160,7 +160,7 @@ static void nmi_shootdown_cpus(void)
         printk("Shot down all CPUs\n");
     else
         printk("Failed to shoot down CPUs {%*pbl}\n",
-               nr_cpu_ids, cpumask_bits(&waiting_to_crash));
+               CPUMASK_PR(&waiting_to_crash));
 
     /*
      * Try to crash shutdown IOMMU functionality as some old crashdump
index f93f7110515d9959b3d1d1c3ec4df2d9f98dc86f..5d25862bd8bbd05b11c54dcfc75d7e09abb0ab1c 100644 (file)
@@ -2238,8 +2238,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
         SET_DEST(entry, logical, cpu_mask_to_apicid(mask));
     } else {
         printk(XENLOG_ERR "IRQ%d: no target CPU (%*pb vs %*pb)\n",
-               irq, nr_cpu_ids, cpumask_bits(desc->arch.cpu_mask),
-               nr_cpu_ids, cpumask_bits(TARGET_CPUS));
+               irq, CPUMASK_PR(desc->arch.cpu_mask), CPUMASK_PR(TARGET_CPUS));
         desc->status |= IRQ_DISABLED;
     }
 
@@ -2437,8 +2436,7 @@ int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val)
     else
     {
         gprintk(XENLOG_ERR, "IRQ%d: no target CPU (%*pb vs %*pb)\n",
-               irq, nr_cpu_ids, cpumask_bits(desc->arch.cpu_mask),
-               nr_cpu_ids, cpumask_bits(TARGET_CPUS));
+               irq, CPUMASK_PR(desc->arch.cpu_mask), CPUMASK_PR(TARGET_CPUS));
         desc->status |= IRQ_DISABLED;
         rte.mask = 1;
     }
index 668a1f5b367d6117c3323b9319475117991379c7..0ee33464d2d72fba55b4f62fe449559b48db1459 100644 (file)
@@ -2398,8 +2398,7 @@ static void dump_irqs(unsigned char key)
         spin_lock_irqsave(&desc->lock, flags);
 
         printk("   IRQ:%4d aff:{%*pbl}/{%*pbl} vec:%02x %-15s status=%03x ",
-               irq, nr_cpu_ids, cpumask_bits(desc->affinity),
-               nr_cpu_ids, cpumask_bits(desc->arch.cpu_mask),
+               irq, CPUMASK_PR(desc->affinity), CPUMASK_PR(desc->arch.cpu_mask),
                desc->arch.vector, desc->handler->typename, desc->status);
 
         if ( ssid )
@@ -2563,7 +2562,7 @@ void fixup_irqs(const cpumask_t *mask, bool verbose)
             printk("Cannot set affinity for IRQ%u\n", irq);
         else if ( break_affinity )
             printk("Broke affinity for IRQ%u, new: %*pb\n",
-                   irq, nr_cpu_ids, cpumask_bits(affinity));
+                   irq, CPUMASK_PR(affinity));
     }
 
     /* That doesn't seem sufficient.  Give it 1ms. */
index 3f06fecbd888460225dbcb926755bfbcdb06f5b6..c50d910a1c3ec8d0007f65baaae44301a1966f94 100644 (file)
@@ -150,8 +150,7 @@ static long smt_up_down_helper(void *data)
 
     if ( !ret )
         printk(XENLOG_INFO "SMT %s - online CPUs 0x%*pb\n",
-               up ? "enabled" : "disabled",
-               nr_cpu_ids, cpumask_bits(&cpu_online_map));
+               up ? "enabled" : "disabled", CPUMASK_PR(&cpu_online_map));
 
     return ret;
 }
index 31ac323e40288f393e938b1ee3cf8e26807cd207..f90e496edafe147058c4e01e87bf10a59d7eb258 100644 (file)
@@ -712,18 +712,17 @@ void dump_runq(unsigned char key)
             sched_smt_power_savings? "enabled":"disabled");
     printk("NOW=%"PRI_stime"\n", now);
 
-    printk("Online Cpus: %*pbl\n", nr_cpu_ids, cpumask_bits(&cpu_online_map));
+    printk("Online Cpus: %*pbl\n", CPUMASK_PR(&cpu_online_map));
     if ( !cpumask_empty(&cpupool_free_cpus) )
     {
-        printk("Free Cpus: %*pbl\n",
-               nr_cpu_ids, cpumask_bits(&cpupool_free_cpus));
+        printk("Free Cpus: %*pbl\n", CPUMASK_PR(&cpupool_free_cpus));
         schedule_dump(NULL);
     }
 
     for_each_cpupool(c)
     {
         printk("Cpupool %d:\n", (*c)->cpupool_id);
-        printk("Cpus: %*pbl\n", nr_cpu_ids, cpumask_bits((*c)->cpu_valid));
+        printk("Cpus: %*pbl\n", CPUMASK_PR((*c)->cpu_valid));
         schedule_dump(*c);
     }
 
index 4f4a660b0cf035e57cdc6b30c8a9e5446ed5ac1a..a5e95e2fe90037d86396d1efe96ad1cec6b3946c 100644 (file)
@@ -272,8 +272,8 @@ static void dump_domains(unsigned char key)
         printk("    nr_pages=%d xenheap_pages=%d shared_pages=%u paged_pages=%u "
                "dirty_cpus={%*pbl} max_pages=%u\n",
                d->tot_pages, d->xenheap_pages, atomic_read(&d->shr_pages),
-               atomic_read(&d->paged_pages), nr_cpu_ids,
-               cpumask_bits(d->dirty_cpumask), d->max_pages);
+               atomic_read(&d->paged_pages), CPUMASK_PR(d->dirty_cpumask),
+               d->max_pages);
         printk("    handle=%02x%02x%02x%02x-%02x%02x-%02x%02x-"
                "%02x%02x-%02x%02x%02x%02x%02x%02x vm_assist=%08lx\n",
                d->handle[ 0], d->handle[ 1], d->handle[ 2], d->handle[ 3],
@@ -312,8 +312,8 @@ static void dump_domains(unsigned char key)
                 printk("dirty_cpu=%u", v->dirty_cpu);
             printk("\n");
             printk("    cpu_hard_affinity={%*pbl} cpu_soft_affinity={%*pbl}\n",
-                   nr_cpu_ids, cpumask_bits(v->cpu_hard_affinity),
-                   nr_cpu_ids, cpumask_bits(v->cpu_soft_affinity));
+                   CPUMASK_PR(v->cpu_hard_affinity),
+                   CPUMASK_PR(v->cpu_soft_affinity));
             printk("    pause_count=%d pause_flags=%lx\n",
                    atomic_read(&v->pause_count), v->pause_flags);
             arch_dump_vcpu_info(v);
index 3c0d7c72670e4a595dbcb0ddc5930869c604e409..81dee5e4724a49bb6ec6251ef8e15d4a5cc40350 100644 (file)
@@ -2057,8 +2057,8 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
 
     printk("CPU[%02d] nr_run=%d, sort=%d, sibling=%*pb, core=%*pb\n",
            cpu, spc->nr_runnable, spc->runq_sort_last,
-           nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)),
-           nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
+           CPUMASK_PR(per_cpu(cpu_sibling_mask, cpu)),
+           CPUMASK_PR(per_cpu(cpu_core_mask, cpu)));
 
     /* current VCPU (nothing to say if that's the idle vcpu). */
     svc = CSCHED_VCPU(curr_on_cpu(cpu));
@@ -2119,7 +2119,7 @@ csched_dump(const struct scheduler *ops)
            prv->ticks_per_tslice,
            prv->vcpu_migr_delay/ MICROSECS(1));
 
-    printk("idlers: %*pb\n", nr_cpu_ids, cpumask_bits(prv->idlers));
+    printk("idlers: %*pb\n", CPUMASK_PR(prv->idlers));
 
     printk("active vcpus:\n");
     loop = 0;
index fbdc4618cbe02e05bf508269e9265d5b765b43c6..6fff210ca6e0c010c5d37d526834f181f78d9894 100644 (file)
@@ -3654,8 +3654,8 @@ dump_pcpu(const struct scheduler *ops, int cpu)
 
     printk("CPU[%02d] runq=%d, sibling=%*pb, core=%*pb\n",
            cpu, c2r(cpu),
-           nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)),
-           nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
+           CPUMASK_PR(per_cpu(cpu_sibling_mask, cpu)),
+           CPUMASK_PR(per_cpu(cpu_core_mask, cpu)));
 
     /* current VCPU (nothing to say if that's the idle vcpu) */
     svc = csched2_vcpu(curr_on_cpu(cpu));
@@ -3699,7 +3699,7 @@ csched2_dump(const struct scheduler *ops)
                "\taveload            = %"PRI_stime" (~%"PRI_stime"%%)\n",
                i,
                prv->rqd[i].nr_cpus,
-               nr_cpu_ids, cpumask_bits(&prv->rqd[i].active),
+               CPUMASK_PR(&prv->rqd[i].active),
                prv->rqd[i].max_weight,
                prv->rqd[i].pick_bias,
                prv->rqd[i].load,
@@ -3709,9 +3709,9 @@ csched2_dump(const struct scheduler *ops)
         printk("\tidlers: %*pb\n"
                "\ttickled: %*pb\n"
                "\tfully idle cores: %*pb\n",
-               nr_cpu_ids, cpumask_bits(&prv->rqd[i].idle),
-               nr_cpu_ids, cpumask_bits(&prv->rqd[i].tickled),
-               nr_cpu_ids, cpumask_bits(&prv->rqd[i].smt_idle));
+               CPUMASK_PR(&prv->rqd[i].idle),
+               CPUMASK_PR(&prv->rqd[i].tickled),
+               CPUMASK_PR(&prv->rqd[i].smt_idle));
     }
 
     printk("Domain info:\n");
index c47c1b5aaeea54d85bc9d42bfe0674d23a45b65d..5aec9f17bde49e8a2eb52ac20db1b67b6729cafd 100644 (file)
@@ -782,9 +782,8 @@ static void null_dump_pcpu(const struct scheduler *ops, int cpu)
     lock = pcpu_schedule_lock_irqsave(cpu, &flags);
 
     printk("CPU[%02d] sibling=%*pb, core=%*pb",
-           cpu,
-           nr_cpu_ids, cpumask_bits(per_cpu(cpu_sibling_mask, cpu)),
-           nr_cpu_ids, cpumask_bits(per_cpu(cpu_core_mask, cpu)));
+           cpu, CPUMASK_PR(per_cpu(cpu_sibling_mask, cpu)),
+           CPUMASK_PR(per_cpu(cpu_core_mask, cpu)));
     if ( per_cpu(npc, cpu).vcpu != NULL )
         printk(", vcpu=%pv", per_cpu(npc, cpu).vcpu);
     printk("\n");
@@ -810,7 +809,7 @@ static void null_dump(const struct scheduler *ops)
 
     spin_lock_irqsave(&prv->lock, flags);
 
-    printk("\tcpus_free = %*pbl\n", nr_cpu_ids, cpumask_bits(&prv->cpus_free));
+    printk("\tcpus_free = %*pbl\n", CPUMASK_PR(&prv->cpus_free));
 
     printk("Domain info:\n");
     loop = 0;
index 0acfc3d7029605254ed52604444201d633c6a2ac..e0e350bdf33110bafc94486aed59ff73a271f2da 100644 (file)
@@ -344,8 +344,7 @@ rt_dump_vcpu(const struct scheduler *ops, const struct rt_vcpu *svc)
             has_extratime(svc),
             vcpu_on_q(svc),
             vcpu_runnable(svc->vcpu),
-            svc->flags,
-            nr_cpu_ids, cpumask_bits(mask));
+            svc->flags, CPUMASK_PR(mask));
 }
 
 static void
index ae902275c83061683d76b8eba0ae613e4cb3674e..256b60b106a3d44643ee16859d22152fb2c339a6 100644 (file)
 
 typedef struct cpumask{ DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
 
+/*
+ * printf arguments for a cpumask.  Shorthand for using '%*pb[l]' when
+ * printing a cpumask.
+ */
+#define CPUMASK_PR(src) nr_cpu_ids, cpumask_bits(src)
+
 extern unsigned int nr_cpu_ids;
 
 #if NR_CPUS > 4 * BITS_PER_LONG