]> xenbits.xensource.com Git - people/dwmw2/xen.git/commitdiff
eliminate first_cpu() etc
authorJan Beulich <jbeulich@suse.com>
Tue, 8 Nov 2011 09:36:10 +0000 (10:36 +0100)
committerJan Beulich <jbeulich@suse.com>
Tue, 8 Nov 2011 09:36:10 +0000 (10:36 +0100)
This includes the conversion from for_each_cpu_mask() to for_each-cpu().

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Keir Fraser <keir@xen.org>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
37 files changed:
xen/arch/ia64/linux-xen/iosapic.c
xen/arch/ia64/linux-xen/mca.c
xen/arch/ia64/linux-xen/smp.c
xen/arch/ia64/linux-xen/smpboot.c
xen/arch/ia64/vmx/vacpi.c
xen/arch/ia64/xen/dom0_ops.c
xen/arch/ia64/xen/domain.c
xen/arch/ia64/xen/vhpt.c
xen/arch/x86/acpi/cpu_idle.c
xen/arch/x86/acpi/cpufreq/cpufreq.c
xen/arch/x86/acpi/cpufreq/powernow.c
xen/arch/x86/genapic/x2apic.c
xen/arch/x86/hpet.c
xen/arch/x86/irq.c
xen/arch/x86/microcode.c
xen/arch/x86/platform_hypercall.c
xen/arch/x86/setup.c
xen/arch/x86/smp.c
xen/arch/x86/smpboot.c
xen/arch/x86/sysctl.c
xen/common/cpu.c
xen/common/cpupool.c
xen/common/domctl.c
xen/common/keyhandler.c
xen/common/perfc.c
xen/common/sched_credit2.c
xen/common/sched_sedf.c
xen/common/schedule.c
xen/common/softirq.c
xen/common/stop_machine.c
xen/common/timer.c
xen/drivers/acpi/pmstat.c
xen/drivers/cpufreq/cpufreq_ondemand.c
xen/drivers/passthrough/vtd/iommu.c
xen/include/asm-ia64/linux-xen/asm/acpi.h
xen/include/asm-x86/flushtlb.h
xen/include/xen/cpumask.h

index f9a5608f383c50bb8012e8d57bb688d1c6fa75b0..b5e42717fbe6f9c92368e0af6ce2ae5b6e57e986 100644 (file)
@@ -704,7 +704,7 @@ get_target_cpu (unsigned int gsi, int vector)
 
                cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node);
 
-               for_each_cpu_mask(numa_cpu, cpu_mask) {
+               for_each_cpu(numa_cpu, &cpu_mask) {
                        if (!cpu_online(numa_cpu))
                                cpumask_clear_cpu(numa_cpu, &cpu_mask);
                }
@@ -717,8 +717,8 @@ get_target_cpu (unsigned int gsi, int vector)
                /* Use vector assigment to distribute across cpus in node */
                cpu_index = vector % num_cpus;
 
-               for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++)
-                       numa_cpu = next_cpu(numa_cpu, cpu_mask);
+               for (numa_cpu = cpumask_first(&cpu_mask) ; i < cpu_index ; i++)
+                       numa_cpu = cpumask_next(numa_cpu, &cpu_mask);
 
                if (numa_cpu != NR_CPUS)
                        return cpu_physical_id(numa_cpu);
index 8722492edcba6b269cec2b3278e1efefdc8bb223..7d50fa4f067eff7181319a5c49d78b4d84b6c1ed 100644 (file)
@@ -1415,7 +1415,7 @@ ia64_mca_cmc_poll (void *dummy)
 #endif
 {
        /* Trigger a CMC interrupt cascade  */
-       platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
+       platform_send_ipi(cpumask_first(&cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
 }
 
 /*
@@ -1505,7 +1505,7 @@ ia64_mca_cpe_poll (void *dummy)
 #endif
 {
        /* Trigger a CPE interrupt cascade  */
-       platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
+       platform_send_ipi(cpumask_first(&cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
 }
 
 #endif /* CONFIG_ACPI */
index 5fe6651e205c33598e7efbb05444b2c34866413b..526e6eec018642c9364f807580770708011b937e 100644 (file)
@@ -462,7 +462,7 @@ on_selected_cpus(const cpumask_t *selected, void (*func) (void *info),
        call_data = &data;
        wmb();
 
-       for_each_cpu_mask(cpu, *selected)
+       for_each_cpu(cpu, selected)
                send_IPI_single(cpu, IPI_CALL_FUNC);
 
        while (atomic_read(wait ? &data.finished : &data.started) != nr_cpus)
index 9e2de6b70882e6f7e7781fe851e6cfce5e54fafd..f9ee4fd531b9096e72b8197eaaa52a6362d0b6db 100644 (file)
@@ -687,9 +687,9 @@ clear_cpu_sibling_map(int cpu)
 {
        int i;
 
-       for_each_cpu_mask(i, *per_cpu(cpu_sibling_mask, cpu))
+       for_each_cpu(i, per_cpu(cpu_sibling_mask, cpu))
                cpumask_clear_cpu(cpu, per_cpu(cpu_sibling_mask, i));
-       for_each_cpu_mask(i, *per_cpu(cpu_core_mask, cpu))
+       for_each_cpu(i, per_cpu(cpu_core_mask, cpu))
                cpumask_clear_cpu(cpu, per_cpu(cpu_core_mask, i));
 
        cpumask_clear(per_cpu(cpu_sibling_mask, cpu));
index d71ec011c29d0ae631b0744c7ea7da4a86590a47..1720aebcc7e6effc1d7e82c4ceed3166ab5ebd21 100644 (file)
@@ -191,7 +191,7 @@ void vacpi_init(struct domain *d)
        s->last_gtime = NOW();
 
        /* Set up callback to fire SCIs when the MSB of TMR_VAL changes */
-       init_timer(&s->timer, pmt_timer_callback, d, first_cpu(cpu_online_map));
+       init_timer(&s->timer, pmt_timer_callback, d, cpumask_first(&cpu_online_map));
        pmt_timer_callback(d);
 }
 
index 65dab551e38619e0fb7d0b7df170123f91b8b53c..92425096f419b3d6b05f192e650d14d00d255816 100644 (file)
@@ -618,7 +618,7 @@ long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
         XEN_GUEST_HANDLE_64(uint32) arr;
         uint32_t i, val, max_array_ent = ti->max_cpu_index;
 
-        ti->max_cpu_index = last_cpu(cpu_online_map);
+        ti->max_cpu_index = cpumask_last(&cpu_online_map);
         max_array_ent = min(max_array_ent, ti->max_cpu_index);
 
         arr = ti->cpu_to_core;
index e48e31876be7483e56d9768fccdb9934522ecd9a..d439e0a1b17d5baa4e5387d9b246daaa2950f296 100644 (file)
@@ -501,7 +501,7 @@ int vcpu_initialise(struct vcpu *v)
 
        if (!VMX_DOMAIN(v))
                init_timer(&v->arch.hlt_timer, hlt_timer_fn, v,
-                          first_cpu(cpu_online_map));
+                          cpumask_any(&cpu_online_map));
 
        return 0;
 }
index 5af17ba7ffc21586a9fc4037dd3815348765d2a8..684c748f43f389f46e5f410239a2a31bbca44f1c 100644 (file)
@@ -463,7 +463,7 @@ __domain_flush_vtlb_track_entry(struct domain* d,
                                local_purge = 0;
                }
        } else {
-               for_each_cpu_mask(cpu, entry->pcpu_dirty_mask) {
+               for_each_cpu(cpu, &entry->pcpu_dirty_mask) {
                        /* Invalidate VHPT entries.  */
                        cpu_flush_vhpt_range(cpu, vaddr, 1L << ps);
 
@@ -559,7 +559,7 @@ void flush_tlb_mask(const cpumask_t *mask)
     if (cpumask_subset(mask, cpumask_of(cpu)))
         return;
 
-    for_each_cpu_mask (cpu, *mask)
+    for_each_cpu (cpu, mask)
         if (cpu != smp_processor_id())
             smp_call_function_single
                 (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1);
index 805cd13053bfb7b073b041b76658ba00207e62fe..17b198731d9f9c234ac65d3cb0ba249cfed44e88 100644 (file)
@@ -251,7 +251,7 @@ void cpuidle_wakeup_mwait(cpumask_t *mask)
     cpumask_and(&target, mask, &cpuidle_mwait_flags);
 
     /* CPU is MWAITing on the cpuidle_mwait_wakeup flag. */
-    for_each_cpu_mask(cpu, target)
+    for_each_cpu(cpu, &target)
         mwait_wakeup(cpu) = 0;
 
     cpumask_andnot(mask, mask, &target);
index 5fb7cfae9e81c42bd88a08603e2e1d7c33fd0428..b868f164ff77230192d8332023424a683cf3f486 100644 (file)
@@ -487,7 +487,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
         return -EAGAIN;
     }
 
-    for_each_cpu_mask(j, online_policy_cpus)
+    for_each_cpu(j, &online_policy_cpus)
         cpufreq_statistic_update(j, perf->state, next_perf_state);
 
     perf->state = next_perf_state;
index 743321aac8a818f6c42dba5906549e745759edd7..fb174fa771a2bad1254b9ee9919b8f94718bf26b 100644 (file)
@@ -130,7 +130,7 @@ static int powernow_cpufreq_target(struct cpufreq_policy *policy,
 
     on_selected_cpus(cmd.mask, transition_pstate, &cmd, 1);
 
-    for_each_cpu_mask(j, online_policy_cpus)
+    for_each_cpu(j, &online_policy_cpus)
         cpufreq_statistic_update(j, perf->state, next_perf_state);
 
     perf->state = next_perf_state;
index acc6a026df971bc88b7904f32f42bae9be5f2810..bfddc1e32813d184c8b315c93a1cda79cc39ada1 100644 (file)
@@ -72,7 +72,7 @@ static void __send_IPI_mask_x2apic(
 
     local_irq_save(flags);
 
-    for_each_cpu_mask ( cpu, *cpumask )
+    for_each_cpu ( cpu, cpumask )
     {
         if ( !cpu_online(cpu) || (cpu == smp_processor_id()) )
             continue;
index ece6654b33cbaf33c9b2ba43d3a295574317893d..c503790cb555d1b285cb30c3a20ba8aa58c2b08a 100644 (file)
@@ -182,7 +182,7 @@ again:
     now = NOW();
 
     /* find all expired events */
-    for_each_cpu_mask(cpu, *ch->cpumask)
+    for_each_cpu(cpu, ch->cpumask)
     {
         s_time_t deadline;
 
index c0137cab0127f9c595c568e3702db314b0a25d5f..0271be93dcc3bd6744e18732899e236820e02b09 100644 (file)
@@ -125,7 +125,7 @@ static int __init __bind_irq_vector(int irq, int vector, const cpumask_t *cpu_ma
     if ( desc->arch.vector != IRQ_VECTOR_UNASSIGNED )
         return -EBUSY;
     trace_irq_mask(TRC_HW_IRQ_BIND_VECTOR, irq, vector, &online_mask);
-    for_each_cpu_mask(cpu, online_mask)
+    for_each_cpu(cpu, &online_mask)
         per_cpu(vector_irq, cpu)[vector] = irq;
     desc->arch.vector = vector;
     cpumask_copy(desc->arch.cpu_mask, &online_mask);
@@ -223,7 +223,7 @@ static void __clear_irq_vector(int irq)
     vector = desc->arch.vector;
     cpumask_and(&tmp_mask, desc->arch.cpu_mask, &cpu_online_map);
 
-    for_each_cpu_mask(cpu, tmp_mask) {
+    for_each_cpu(cpu, &tmp_mask) {
         ASSERT( per_cpu(vector_irq, cpu)[vector] == irq );
         per_cpu(vector_irq, cpu)[vector] = -1;
     }
@@ -248,7 +248,7 @@ static void __clear_irq_vector(int irq)
     old_vector = desc->arch.old_vector;
     cpumask_and(&tmp_mask, desc->arch.old_cpu_mask, &cpu_online_map);
 
-    for_each_cpu_mask(cpu, tmp_mask) {
+    for_each_cpu(cpu, &tmp_mask) {
         ASSERT( per_cpu(vector_irq, cpu)[old_vector] == irq );
         TRACE_3D(TRC_HW_IRQ_MOVE_FINISH, irq, old_vector, cpu);
         per_cpu(vector_irq, cpu)[old_vector] = -1;
@@ -451,7 +451,7 @@ static int __assign_irq_vector(
     else
         irq_used_vectors = irq_get_used_vector_mask(irq);
 
-    for_each_cpu_mask(cpu, *mask) {
+    for_each_cpu(cpu, mask) {
         int new_cpu;
         int vector, offset;
 
@@ -481,7 +481,7 @@ next:
             && test_bit(vector, irq_used_vectors) )
             goto next;
 
-        for_each_cpu_mask(new_cpu, tmp_mask)
+        for_each_cpu(new_cpu, &tmp_mask)
             if (per_cpu(vector_irq, new_cpu)[vector] != -1)
                 goto next;
         /* Found one! */
@@ -493,7 +493,7 @@ next:
             desc->arch.old_vector = desc->arch.vector;
         }
         trace_irq_mask(TRC_HW_IRQ_ASSIGN_VECTOR, irq, vector, &tmp_mask);
-        for_each_cpu_mask(new_cpu, tmp_mask)
+        for_each_cpu(new_cpu, &tmp_mask)
             per_cpu(vector_irq, new_cpu)[vector] = irq;
         desc->arch.vector = vector;
         cpumask_copy(desc->arch.cpu_mask, &tmp_mask);
index 12594b8c233a70651e766eadf185da6129fb7397..ffe625bc44b954b510864284966ab0e0819ca173 100644 (file)
@@ -125,7 +125,7 @@ static long do_microcode_update(void *_info)
     if ( error )
         info->error = error;
 
-    info->cpu = next_cpu(info->cpu, cpu_online_map);
+    info->cpu = cpumask_next(info->cpu, &cpu_online_map);
     if ( info->cpu < nr_cpu_ids )
         return continue_hypercall_on_cpu(info->cpu, do_microcode_update, info);
 
@@ -158,7 +158,7 @@ int microcode_update(XEN_GUEST_HANDLE(const_void) buf, unsigned long len)
 
     info->buffer_size = len;
     info->error = 0;
-    info->cpu = first_cpu(cpu_online_map);
+    info->cpu = cpumask_first(&cpu_online_map);
 
     return continue_hypercall_on_cpu(info->cpu, do_microcode_update, info);
 }
index c1ad9ef6365fb2ad58dadcb3eb44356ab8c841e0..79b5ec2ccccf6c03cea5b6134a6d0a4f69994e5a 100644 (file)
@@ -366,7 +366,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op)
             goto out;
         guest_from_compat_handle(idletimes, op->u.getidletime.idletime);
 
-        for_each_cpu_mask ( cpu, *cpumap )
+        for_each_cpu ( cpu, cpumap )
         {
             if ( idle_vcpu[cpu] == NULL )
                 cpumask_clear_cpu(cpu, cpumap);
@@ -460,7 +460,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op)
                 g_info->flags |= XEN_PCPU_FLAGS_ONLINE;
         }
 
-        g_info->max_present = last_cpu(cpu_present_map);
+        g_info->max_present = cpumask_last(&cpu_present_map);
 
         put_cpu_maps();
 
index 8bc77b0fc84d5dbf3b77e079e8da5855d884377f..bca36ed1aa70ad3ab496722483ca6d14bbc89775 100644 (file)
@@ -229,9 +229,9 @@ static void __init normalise_cpu_order(void)
          * Find remaining CPU with longest-prefix match on APIC ID.
          * Among identical longest-prefix matches, pick the smallest APIC ID.
          */
-        for ( j = next_cpu(i, cpu_present_map);
+        for ( j = cpumask_next(i, &cpu_present_map);
               j < nr_cpu_ids;
-              j = next_cpu(j, cpu_present_map) )
+              j = cpumask_next(j, &cpu_present_map) )
         {
             diff = x86_cpu_to_apicid[j] ^ apicid;
             while ( diff & (diff-1) )
@@ -248,12 +248,12 @@ static void __init normalise_cpu_order(void)
         /* If no match then there must be no CPUs remaining to consider. */
         if ( min_cpu >= nr_cpu_ids )
         {
-            BUG_ON(next_cpu(i, cpu_present_map) < nr_cpu_ids);
+            BUG_ON(cpumask_next(i, &cpu_present_map) < nr_cpu_ids);
             break;
         }
 
         /* Switch the best-matching CPU with the next CPU in logical order. */
-        j = next_cpu(i, cpu_present_map);
+        j = cpumask_next(i, &cpu_present_map);
         apicid = x86_cpu_to_apicid[min_cpu];
         x86_cpu_to_apicid[min_cpu] = x86_cpu_to_apicid[j];
         x86_cpu_to_apicid[j] = apicid;
index be804150340fa3f2fd04da29501af1f0293a585d..77daca5ce2f0b2a15f1055f3b994f80958360ef2 100644 (file)
@@ -182,7 +182,7 @@ void send_IPI_mask_phys(const cpumask_t *mask, int vector)
 
     local_irq_save(flags);
 
-    for_each_cpu_mask ( query_cpu, *mask )
+    for_each_cpu ( query_cpu, mask )
     {
         if ( !cpu_online(query_cpu) || (query_cpu == smp_processor_id()) )
             continue;
index 454d75b969154b5ff5551375d8ab04463867287e..e14354f29035d7efee1504f62676cb18f0ac74b0 100644 (file)
@@ -248,7 +248,7 @@ static void set_cpu_sibling_map(int cpu)
 
     if ( c[cpu].x86_num_siblings > 1 )
     {
-        for_each_cpu_mask ( i, cpu_sibling_setup_map )
+        for_each_cpu ( i, &cpu_sibling_setup_map )
         {
             if ( cpu_has(c, X86_FEATURE_TOPOEXT) ) {
                 if ( (c[cpu].phys_proc_id == c[i].phys_proc_id) &&
@@ -273,7 +273,7 @@ static void set_cpu_sibling_map(int cpu)
         return;
     }
 
-    for_each_cpu_mask ( i, cpu_sibling_setup_map )
+    for_each_cpu ( i, &cpu_sibling_setup_map )
     {
         if ( c[cpu].phys_proc_id == c[i].phys_proc_id )
         {
@@ -814,7 +814,7 @@ remove_siblinginfo(int cpu)
     int sibling;
     struct cpuinfo_x86 *c = cpu_data;
 
-    for_each_cpu_mask ( sibling, *per_cpu(cpu_core_mask, cpu) )
+    for_each_cpu ( sibling, per_cpu(cpu_core_mask, cpu) )
     {
         cpumask_clear_cpu(cpu, per_cpu(cpu_core_mask, sibling));
         /* Last thread sibling in this cpu core going down. */
@@ -822,7 +822,7 @@ remove_siblinginfo(int cpu)
             c[sibling].booted_cores--;
     }
    
-    for_each_cpu_mask(sibling, *per_cpu(cpu_sibling_mask, cpu))
+    for_each_cpu(sibling, per_cpu(cpu_sibling_mask, cpu))
         cpumask_clear_cpu(cpu, per_cpu(cpu_sibling_mask, sibling));
     cpumask_clear(per_cpu(cpu_sibling_mask, cpu));
     cpumask_clear(per_cpu(cpu_core_mask, cpu));
index 723d623ca58e8c113226c8a86218448ec669cf47..738e5172fea33a2cea66700be2befd10533d29cb 100644 (file)
@@ -103,7 +103,7 @@ long arch_do_sysctl(
         uint32_t i, max_cpu_index, last_online_cpu;
         xen_sysctl_topologyinfo_t *ti = &sysctl->u.topologyinfo;
 
-        last_online_cpu = last_cpu(cpu_online_map);
+        last_online_cpu = cpumask_last(&cpu_online_map);
         max_cpu_index = min_t(uint32_t, ti->max_cpu_index, last_online_cpu);
         ti->max_cpu_index = last_online_cpu;
 
index c4fadef344371e575401764abe76f3c98e930d8c..79abdb7b0926d3d0c5e38a43dbbab4431a570d0d 100644 (file)
@@ -205,7 +205,7 @@ void enable_nonboot_cpus(void)
 
     printk("Enabling non-boot CPUs  ...\n");
 
-    for_each_cpu_mask ( cpu, frozen_cpus )
+    for_each_cpu ( cpu, &frozen_cpus )
     {
         if ( (error = cpu_up(cpu)) )
         {
index e8da05be2ad33ee1237ad9dff62656966d743fe5..fcc44b1e57277dbdab49604f2f35ff3ddadc4cb9 100644 (file)
@@ -494,7 +494,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
                         op->cpupool_id, cpu);
         spin_lock(&cpupool_lock);
         if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
-            cpu = first_cpu(cpupool_free_cpus);
+            cpu = cpumask_first(&cpupool_free_cpus);
         ret = -EINVAL;
         if ( cpu >= nr_cpu_ids )
             goto addcpu_out;
index 74664f4ebc3ddca0e1093d6e30a4f7145ade8783..6705a573df03fbc5cb85d9706aa61e470b63dae7 100644 (file)
@@ -190,7 +190,7 @@ static unsigned int default_vcpu0_location(cpumask_t *online)
     cpu = cpumask_first(&cpu_exclude_map);
     if ( cpumask_weight(&cpu_exclude_map) > 1 )
         cpu = cpumask_next(cpu, &cpu_exclude_map);
-    for_each_cpu_mask(i, *online)
+    for_each_cpu(i, online)
     {
         if ( cpumask_test_cpu(i, &cpu_exclude_map) )
             continue;
@@ -541,7 +541,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
 
             cpu = (i == 0) ?
                 default_vcpu0_location(online) :
-                cycle_cpu(d->vcpu[i-1]->processor, *online);
+                cpumask_cycle(d->vcpu[i-1]->processor, online);
 
             if ( alloc_vcpu(d, i, cpu) == NULL )
                 goto maxvcpu_out;
index 86e6b25e21b32d9245886b226211e7c307a35bd0..a8f256a4101d72d43b07579769a8a794bb3cb73e 100644 (file)
@@ -128,7 +128,7 @@ static void dump_registers(unsigned char key, struct cpu_user_regs *regs)
         return;
 
     /* Normal handling: synchronously dump the remaining CPUs' states. */
-    for_each_cpu_mask ( cpu, dump_execstate_mask )
+    for_each_cpu ( cpu, &dump_execstate_mask )
     {
         smp_send_state_dump(cpu);
         while ( cpumask_test_cpu(cpu, &dump_execstate_mask) )
index 81b41ba3d9619cee082d06112436f73c875f28ad..215e074438c5398ddd8897e2facd58f1d7a13c63 100644 (file)
@@ -211,14 +211,14 @@ static int perfc_copy_info(XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc,
         {
         case TYPE_SINGLE:
         case TYPE_S_SINGLE:
-            for_each_cpu_mask ( cpu, perfc_cpumap )
+            for_each_cpu ( cpu, &perfc_cpumap )
                 perfc_vals[v++] = per_cpu(perfcounters, cpu)[j];
             ++j;
             break;
         case TYPE_ARRAY:
         case TYPE_S_ARRAY:
             memset(perfc_vals + v, 0, perfc_d[i].nr_vals * sizeof(*perfc_vals));
-            for_each_cpu_mask ( cpu, perfc_cpumap )
+            for_each_cpu ( cpu, &perfc_cpumap )
             {
                 perfc_t *counters = per_cpu(perfcounters, cpu) + j;
                 unsigned int k;
index 9314121092e783707ce5aefc5a9daf43bb420710..86c4439300be84a6bfbdb895559a9830e0bd3b58 100644 (file)
@@ -521,7 +521,7 @@ runq_tickle(const struct scheduler *ops, unsigned int cpu, struct csched_vcpu *n
     cpumask_andnot(&mask, &rqd->active, &rqd->idle);
     cpumask_andnot(&mask, &mask, &rqd->tickled);
 
-    for_each_cpu_mask(i, mask)
+    for_each_cpu(i, &mask)
     {
         struct csched_vcpu * cur;
 
@@ -1051,7 +1051,7 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc)
         else
         {
             d2printk("d%dv%d +\n", svc->vcpu->domain->domain_id, svc->vcpu->vcpu_id);
-            new_cpu = first_cpu(svc->migrate_rqd->active);
+            new_cpu = cpumask_first(&svc->migrate_rqd->active);
             goto out_up;
         }
     }
@@ -1061,7 +1061,7 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc)
     min_avgload = MAX_LOAD;
 
     /* Find the runqueue with the lowest instantaneous load */
-    for_each_cpu_mask(i, prv->active_queues)
+    for_each_cpu(i, &prv->active_queues)
     {
         struct csched_runqueue_data *rqd;
         s_time_t rqd_avgload;
@@ -1099,7 +1099,7 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc)
     else
     {
         BUG_ON(cpumask_empty(&prv->rqd[min_rqi].active));
-        new_cpu = first_cpu(prv->rqd[min_rqi].active);
+        new_cpu = cpumask_first(&prv->rqd[min_rqi].active);
     }
 
 out_up:
@@ -1179,7 +1179,7 @@ void migrate(const struct scheduler *ops,
             on_runq=1;
         }
         __runq_deassign(svc);
-        svc->vcpu->processor = first_cpu(trqd->active);
+        svc->vcpu->processor = cpumask_first(&trqd->active);
         __runq_assign(svc, trqd);
         if ( on_runq )
         {
@@ -1219,7 +1219,7 @@ retry:
 
     st.load_delta = 0;
 
-    for_each_cpu_mask(i, prv->active_queues)
+    for_each_cpu(i, &prv->active_queues)
     {
         s_time_t delta;
         
@@ -1618,7 +1618,7 @@ csched_schedule(
         {
             int rq;
             other_rqi = -2;
-            for_each_cpu_mask ( rq, CSCHED_PRIV(ops)->active_queues )
+            for_each_cpu ( rq, &CSCHED_PRIV(ops)->active_queues )
             {
                 if ( scurr->rqd == &CSCHED_PRIV(ops)->rqd[rq] )
                 {
@@ -1803,7 +1803,7 @@ csched_dump(const struct scheduler *ops)
            "\tdefault-weight     = %d\n",
            cpumask_weight(&prv->active_queues),
            CSCHED_DEFAULT_WEIGHT);
-    for_each_cpu_mask(i, prv->active_queues)
+    for_each_cpu(i, &prv->active_queues)
     {
         s_time_t fraction;
         
index dfb7ceef1442b7321d35b11ca7b78bd3633c5c96..76b0e9d68c4a7f78b4c0873f397ef4823cfac8f0 100644 (file)
@@ -442,7 +442,7 @@ static int sedf_pick_cpu(const struct scheduler *ops, struct vcpu *v)
 
     online = SEDF_CPUONLINE(v->domain->cpupool);
     cpumask_and(&online_affinity, v->cpu_affinity, online);
-    return first_cpu(online_affinity);
+    return cpumask_first(&online_affinity);
 }
 
 /*
@@ -1322,7 +1322,7 @@ static int sedf_adjust_weights(struct cpupool *c, struct xen_domctl_scheduler_op
 {
     struct vcpu *p;
     struct domain      *d;
-    unsigned int        cpu, nr_cpus = last_cpu(cpu_online_map) + 1;
+    unsigned int        cpu, nr_cpus = cpumask_last(&cpu_online_map) + 1;
     int                *sumw = xzalloc_array(int, nr_cpus);
     s_time_t           *sumt = xzalloc_array(s_time_t, nr_cpus);
 
index b22cf67fe5090b9b0bceb41f8d6a89b8689cae14..c07d6f0f98b63a87ada4beb748497ddf0e37d2d0 100644 (file)
@@ -1450,7 +1450,7 @@ void schedule_dump(struct cpupool *c)
     printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name);
     SCHED_OP(sched, dump_settings);
 
-    for_each_cpu_mask (i, *cpus)
+    for_each_cpu (i, cpus)
     {
         pcpu_schedule_lock(i);
         printk("CPU[%02d] ", i);
index 8634bafef59bb9a8221f30a39aa1507152c6f485..3f1b3029897bf71dd1e65c78128b2ea710ab996d 100644 (file)
@@ -74,7 +74,7 @@ void cpumask_raise_softirq(const cpumask_t *mask, unsigned int nr)
     cpumask_t send_mask;
 
     cpumask_clear(&send_mask);
-    for_each_cpu_mask(cpu, *mask)
+    for_each_cpu(cpu, mask)
         if ( !test_and_set_bit(nr, &softirq_pending(cpu)) )
             cpumask_set_cpu(cpu, &send_mask);
 
index eb38da58f8faf8695382d4db23a8c6368bc6c289..05905047723b090629baa7b3b1329f83e29c25bb 100644 (file)
@@ -101,7 +101,7 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
 
     smp_wmb();
 
-    for_each_cpu_mask ( i, allbutself )
+    for_each_cpu ( i, &allbutself )
         tasklet_schedule_on_cpu(&per_cpu(stopmachine_tasklet, i), i);
 
     stopmachine_set_state(STOPMACHINE_PREPARE);
index 1e51ce3c58ecffb8e5bd9cdc53f374bdcb5ecaa1..0547ea31a75798e3682663ca5b41189a88531f66 100644 (file)
@@ -548,7 +548,7 @@ static struct keyhandler dump_timerq_keyhandler = {
 
 static void migrate_timers_from_cpu(unsigned int old_cpu)
 {
-    unsigned int new_cpu = first_cpu(cpu_online_map);
+    unsigned int new_cpu = cpumask_any(&cpu_online_map);
     struct timers *old_ts, *new_ts;
     struct timer *t;
     bool_t notify = 0;
index c94a40665326e6987afd38c8dc93d1ce6bc2137c..dbc1c44a613f3ff19626e7cb96602e3e6b0e8e1d 100644 (file)
@@ -223,7 +223,7 @@ static int get_cpufreq_para(struct xen_sysctl_pm_op *op)
 
     if ( !(affected_cpus = xzalloc_array(uint32_t, op->u.get_para.cpu_num)) )
         return -ENOMEM;
-    for_each_cpu_mask(cpu, *policy->cpus)
+    for_each_cpu(cpu, policy->cpus)
         affected_cpus[j++] = cpu;
     ret = copy_to_guest(op->u.get_para.affected_cpus,
                        affected_cpus, op->u.get_para.cpu_num);
index 4323cc5e9f2d5c7209dae474c2a750f769d2bfc2..c9378adc4f349c38c232ea43e7077dd497c2250c 100644 (file)
@@ -122,7 +122,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
         return;
 
     /* Get Idle Time */
-    for_each_cpu_mask(j, *policy->cpus) {
+    for_each_cpu(j, policy->cpus) {
         uint64_t idle_ns, total_idle_ns;
         uint64_t load, load_freq, freq_avg;
         struct cpu_dbs_info_s *j_dbs_info;
@@ -233,7 +233,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
 
         dbs_enable++;
 
-        for_each_cpu_mask(j, *policy->cpus) {
+        for_each_cpu(j, policy->cpus) {
             struct cpu_dbs_info_s *j_dbs_info;
             j_dbs_info = &per_cpu(cpu_dbs_info, j);
             j_dbs_info->cur_policy = policy;
index 2d6f101f277679971e96aa1901db8b8a67050a30..349f7ed058c16a76623e45fd7072f3c2e05ce856 100644 (file)
@@ -1033,7 +1033,7 @@ static void dma_msi_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
     msg.address_lo = (MSI_ADDRESS_HEADER << (MSI_ADDRESS_HEADER_SHIFT + 8));
     msg.address_lo |= MSI_PHYSICAL_MODE << 2;
     msg.address_lo |= MSI_REDIRECTION_HINT_MODE << 3;
-    dest = cpu_physical_id(first_cpu(mask));
+    dest = cpu_physical_id(cpumask_first(mask));
     msg.address_lo |= dest << MSI_TARGET_CPU_SHIFT;
 #endif
 
index 60067149c98318c180942ca2aecaa75f5218aed4..ab07b9acea41d34869155f2b354a9096ef6b9b5b 100644 (file)
@@ -139,7 +139,7 @@ extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
 #ifdef CONFIG_ACPI_NUMA
 extern cpumask_t early_cpu_possible_map;
 #define for_each_possible_early_cpu(cpu)  \
-       for_each_cpu_mask((cpu), early_cpu_possible_map)
+       for_each_cpu(cpu, &early_cpu_possible_map)
 
 static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
 {
index c1519d5358eaec2bdaeafad64e2930251c02a58e..7f46632c1112bb2cfd58ff82110f1d19cf1b8a3d 100644 (file)
@@ -52,7 +52,7 @@ static inline int NEED_FLUSH(u32 cpu_stamp, u32 lastuse_stamp)
 #define tlbflush_filter(mask, page_timestamp)                           \
 do {                                                                    \
     unsigned int cpu;                                                   \
-    for_each_cpu_mask ( cpu, mask )                                     \
+    for_each_cpu ( cpu, &(mask) )                                       \
         if ( !NEED_FLUSH(per_cpu(tlbflush_time, cpu), page_timestamp) ) \
             cpumask_clear_cpu(cpu, &(mask));                            \
 } while ( 0 )
index 8e69ed9b58b161769d3df81fda92e14cf82dfff3..db539486638ef00617ac5583552caf1c7e3456e0 100644 (file)
  * void cpumask_shift_right(dst, src, n) Shift right
  * void cpumask_shift_left(dst, src, n)        Shift left
  *
- * int first_cpu(mask)                 Number lowest set bit, or NR_CPUS
- * int next_cpu(cpu, mask)             Next cpu past 'cpu', or NR_CPUS
- * int last_cpu(mask)                  Number highest set bit, or NR_CPUS
- * int cycle_cpu(cpu, mask)            Next cpu cycling from 'cpu', or NR_CPUS
+ * int cpumask_first(mask)             Number lowest set bit, or NR_CPUS
+ * int cpumask_next(cpu, mask)         Next cpu past 'cpu', or NR_CPUS
+ * int cpumask_last(mask)              Number highest set bit, or NR_CPUS
+ * int cpumask_any(mask)               Any cpu in mask, or NR_CPUS
+ * int cpumask_cycle(cpu, mask)                Next cpu cycling from 'cpu', or NR_CPUS
  *
- * cpumask_t cpumask_of_cpu(cpu)       Return cpumask with bit 'cpu' set
+ * const cpumask_t *cpumask_of(cpu)    Return cpumask with bit 'cpu' set
  * unsigned long *cpumask_bits(mask)   Array of unsigned long's in mask
  *
  * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
  * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
  *
- * for_each_cpu_mask(cpu, mask)                for-loop cpu over mask
+ * for_each_cpu(cpu, mask)             for-loop cpu over mask
  *
  * int num_online_cpus()               Number of online CPUs
  * int num_possible_cpus()             Number of all possible CPUs
@@ -210,42 +211,43 @@ static inline void cpumask_shift_left(cpumask_t *dstp,
        bitmap_shift_left(dstp->bits, srcp->bits, n, nr_cpumask_bits);
 }
 
-#define cpumask_first(src) __first_cpu(src, nr_cpu_ids)
-#define first_cpu(src) __first_cpu(&(src), nr_cpu_ids)
-static inline int __first_cpu(const cpumask_t *srcp, int nbits)
+static inline int cpumask_first(const cpumask_t *srcp)
 {
-       return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
+       return min_t(int, nr_cpu_ids, find_first_bit(srcp->bits, nr_cpu_ids));
 }
 
-#define cpumask_next(n, src) __next_cpu(n, src, nr_cpu_ids)
-#define next_cpu(n, src) __next_cpu((n), &(src), nr_cpu_ids)
-static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
+static inline int cpumask_next(int n, const cpumask_t *srcp)
 {
-       return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
+       /* -1 is a legal arg here. */
+       if (n != -1)
+               cpumask_check(n);
+
+       return min_t(int, nr_cpu_ids,
+                     find_next_bit(srcp->bits, nr_cpu_ids, n + 1));
 }
 
-#define cpumask_last(src) __last_cpu(src, nr_cpu_ids)
-#define last_cpu(src) __last_cpu(&(src), nr_cpu_ids)
-static inline int __last_cpu(const cpumask_t *srcp, int nbits)
+static inline int cpumask_last(const cpumask_t *srcp)
 {
-       int cpu, pcpu = nbits;
-       for (cpu = __first_cpu(srcp, nbits);
-            cpu < nbits;
-            cpu = __next_cpu(cpu, srcp, nbits))
+       int cpu, pcpu = nr_cpu_ids;
+
+       for (cpu = cpumask_first(srcp);
+            cpu < nr_cpu_ids;
+            cpu = cpumask_next(cpu, srcp))
                pcpu = cpu;
        return pcpu;
 }
 
-#define cpumask_cycle(n, src) __cycle_cpu(n, src, nr_cpu_ids)
-#define cycle_cpu(n, src) __cycle_cpu((n), &(src), nr_cpu_ids)
-static inline int __cycle_cpu(int n, const cpumask_t *srcp, int nbits)
+static inline int cpumask_cycle(int n, const cpumask_t *srcp)
 {
-    int nxt = __next_cpu(n, srcp, nbits);
-    if (nxt == nbits)
-        nxt = __first_cpu(srcp, nbits);
+    int nxt = cpumask_next(n, srcp);
+
+    if (nxt == nr_cpu_ids)
+        nxt = cpumask_first(srcp);
     return nxt;
 }
 
+#define cpumask_any(srcp) cpumask_first(srcp)
+
 /*
  * Special-case data structure for "single bit set only" constant CPU masks.
  *
@@ -262,8 +264,6 @@ static inline const cpumask_t *cpumask_of(unsigned int cpu)
        return (const cpumask_t *)(p - cpu / BITS_PER_LONG);
 }
 
-#define cpumask_of_cpu(cpu) (*cpumask_of(cpu))
-
 #if defined(__ia64__) /* XXX needs cleanup */
 #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
 
@@ -366,12 +366,13 @@ static inline void free_cpumask_var(cpumask_var_t mask)
 #endif
 
 #if NR_CPUS > 1
-#define for_each_cpu_mask(cpu, mask)           \
-       for ((cpu) = first_cpu(mask);           \
-               (cpu) < nr_cpu_ids;             \
-               (cpu) = next_cpu((cpu), (mask)))
+#define for_each_cpu(cpu, mask)                        \
+       for ((cpu) = cpumask_first(mask);       \
+            (cpu) < nr_cpu_ids;                \
+            (cpu) = cpumask_next(cpu, mask))
 #else /* NR_CPUS == 1 */
-#define for_each_cpu_mask(cpu, mask) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_cpu(cpu, mask)                        \
+       for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)(mask))
 #endif /* NR_CPUS */
 
 /*
@@ -450,18 +451,9 @@ extern cpumask_t cpu_present_map;
 #define cpu_present(cpu)       ((cpu) == 0)
 #endif
 
-#define any_online_cpu(mask)                   \
-({                                             \
-       int cpu;                                \
-       for_each_cpu_mask(cpu, (mask))          \
-               if (cpu_online(cpu))            \
-                       break;                  \
-       cpu;                                    \
-})
-
-#define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
-#define for_each_online_cpu(cpu)   for_each_cpu_mask((cpu), cpu_online_map)
-#define for_each_present_cpu(cpu)  for_each_cpu_mask((cpu), cpu_present_map)
+#define for_each_possible_cpu(cpu) for_each_cpu(cpu, &cpu_possible_map)
+#define for_each_online_cpu(cpu)   for_each_cpu(cpu, &cpu_online_map)
+#define for_each_present_cpu(cpu)  for_each_cpu(cpu, &cpu_present_map)
 
 /* Copy to/from cpumap provided by control tools. */
 struct xenctl_cpumap;