]> xenbits.xensource.com Git - xen.git/commitdiff
introduce and use relaxed cpumask bitops
authorJan Beulich <jbeulich@suse.com>
Wed, 18 Feb 2015 15:55:17 +0000 (16:55 +0100)
committerJan Beulich <jbeulich@suse.com>
Wed, 18 Feb 2015 15:55:17 +0000 (16:55 +0100)
Using atomic (LOCKed on x86) bitops for certain of the operations on
cpumask_t is overkill when the variables aren't concurrently accessible
(e.g. local function variables, or due to explicit locking). Introduce
alternatives using non-atomic bitops and use them where appropriate.

Note that this
- adds a volatile qualifier to cpumask_test_and_{clear,set}_cpu()
  (should have been there from the beginning, like is the case for
  cpumask_{clear,set}_cpu())
- replaces several cpumask_clear()+cpumask_set_cpu(, n) pairs by the
  simpler cpumask_copy(, cpumask_of(n)) (or just cpumask_of(n) if we
  can do without copying)

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: George Dunlap <george.dunlap@eu.citrix.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
12 files changed:
xen/arch/x86/hpet.c
xen/arch/x86/irq.c
xen/arch/x86/mm.c
xen/arch/x86/platform_hypercall.c
xen/arch/x86/time.c
xen/common/core_parking.c
xen/common/cpu.c
xen/common/page_alloc.c
xen/common/sched_credit.c
xen/common/sched_rt.c
xen/common/softirq.c
xen/include/xen/cpumask.h

index 7aa740f6fa433c09387ea7e81a1f13decc19bba7..8f36f6fb2f459fd615aebbd7c48c1b1427e67b8b 100644 (file)
@@ -158,7 +158,7 @@ static void evt_do_broadcast(cpumask_t *mask)
 {
     unsigned int cpu = smp_processor_id();
 
-    if ( cpumask_test_and_clear_cpu(cpu, mask) )
+    if ( __cpumask_test_and_clear_cpu(cpu, mask) )
         raise_softirq(TIMER_SOFTIRQ);
 
     cpuidle_wakeup_mwait(mask);
@@ -197,7 +197,7 @@ again:
             continue;
 
         if ( deadline <= now )
-            cpumask_set_cpu(cpu, &mask);
+            __cpumask_set_cpu(cpu, &mask);
         else if ( deadline < next_event )
             next_event = deadline;
     }
index f21407240513d2e85f89d42c68a1787bad821c6a..92e1854dce6cfe1236182f0810c4a8c16a1e5e28 100644 (file)
@@ -1450,7 +1450,7 @@ void desc_guest_eoi(struct irq_desc *desc, struct pirq *pirq)
         
     cpumask_copy(&cpu_eoi_map, action->cpu_eoi_map);
 
-    if ( cpumask_test_and_clear_cpu(smp_processor_id(), &cpu_eoi_map) )
+    if ( __cpumask_test_and_clear_cpu(smp_processor_id(), &cpu_eoi_map) )
     {
         __set_eoi_ready(desc);
         spin_unlock(&desc->lock);
index 12e5006678163cf60a71a430116e00564a2f6d3b..9ab7a264b94b4600a73f954df40715b9a2635772 100644 (file)
@@ -3218,7 +3218,7 @@ long do_mmuext_op(
                 for_each_online_cpu(cpu)
                     if ( !cpumask_intersects(&mask,
                                              per_cpu(cpu_sibling_mask, cpu)) )
-                        cpumask_set_cpu(cpu, &mask);
+                        __cpumask_set_cpu(cpu, &mask);
                 flush_mask(&mask, FLUSH_CACHE);
             }
             else
index b427852c67ceeb774bd16fb8ab24397826f86e59..c7255012bc56e4a2261dd98afb1719d59ef21af6 100644 (file)
@@ -489,7 +489,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op)
 
             if ( !idletime )
             {
-                cpumask_clear_cpu(cpu, cpumap);
+                __cpumask_clear_cpu(cpu, cpumap);
                 continue;
             }
 
index b6099389b432128cfbe359059a66fb6c13e1d5d3..becff9917c99fe2f205497f068c9227949baaf81 100644 (file)
@@ -179,7 +179,7 @@ static void smp_send_timer_broadcast_ipi(void)
 
     if ( cpumask_test_cpu(cpu, &mask) )
     {
-        cpumask_clear_cpu(cpu, &mask);
+        __cpumask_clear_cpu(cpu, &mask);
         raise_softirq(TIMER_SOFTIRQ);
     }
 
index 3190fb7a30ae36fe404d8ce19b0095af16a6fada..de269e06c21113ebc2f580143b504600d8c638c9 100644 (file)
@@ -75,11 +75,10 @@ static unsigned int core_parking_performance(unsigned int event)
             if ( core_weight < core_tmp )
             {
                 core_weight = core_tmp;
-                cpumask_clear(&core_candidate_map);
-                cpumask_set_cpu(cpu, &core_candidate_map);
+                cpumask_copy(&core_candidate_map, cpumask_of(cpu));
             }
             else if ( core_weight == core_tmp )
-                cpumask_set_cpu(cpu, &core_candidate_map);
+                __cpumask_set_cpu(cpu, &core_candidate_map);
         }
 
         for_each_cpu(cpu, &core_candidate_map)
@@ -88,11 +87,10 @@ static unsigned int core_parking_performance(unsigned int event)
             if ( sibling_weight < sibling_tmp )
             {
                 sibling_weight = sibling_tmp;
-                cpumask_clear(&sibling_candidate_map);
-                cpumask_set_cpu(cpu, &sibling_candidate_map);
+                cpumask_copy(&sibling_candidate_map, cpumask_of(cpu));
             }
             else if ( sibling_weight == sibling_tmp )
-                cpumask_set_cpu(cpu, &sibling_candidate_map);
+                __cpumask_set_cpu(cpu, &sibling_candidate_map);
         }
 
         cpu = cpumask_first(&sibling_candidate_map);
@@ -135,11 +133,10 @@ static unsigned int core_parking_power(unsigned int event)
             if ( core_weight > core_tmp )
             {
                 core_weight = core_tmp;
-                cpumask_clear(&core_candidate_map);
-                cpumask_set_cpu(cpu, &core_candidate_map);
+                cpumask_copy(&core_candidate_map, cpumask_of(cpu));
             }
             else if ( core_weight == core_tmp )
-                cpumask_set_cpu(cpu, &core_candidate_map);
+                __cpumask_set_cpu(cpu, &core_candidate_map);
         }
 
         for_each_cpu(cpu, &core_candidate_map)
@@ -148,11 +145,10 @@ static unsigned int core_parking_power(unsigned int event)
             if ( sibling_weight > sibling_tmp )
             {
                 sibling_weight = sibling_tmp;
-                cpumask_clear(&sibling_candidate_map);
-                cpumask_set_cpu(cpu, &sibling_candidate_map);
+                cpumask_copy(&sibling_candidate_map, cpumask_of(cpu));
             }
             else if ( sibling_weight == sibling_tmp )
-                cpumask_set_cpu(cpu, &sibling_candidate_map);
+                __cpumask_set_cpu(cpu, &sibling_candidate_map);
         }
 
         cpu = cpumask_first(&sibling_candidate_map);
index 630881e5ab06f35e38c701632a8dc81e989d87ec..47e8b5bb5745dee46b5892f3b3c4099598e391f1 100644 (file)
@@ -192,7 +192,7 @@ int disable_nonboot_cpus(void)
             break;
         }
 
-        cpumask_set_cpu(cpu, &frozen_cpus);
+        __cpumask_set_cpu(cpu, &frozen_cpus);
     }
 
     BUG_ON(!error && (num_online_cpus() != 1));
index be6821b1ef32fbb4493dac7152c1dd0c8ce1bf73..3c27af96ea786e6afe1d02a1738695749f653075 100644 (file)
@@ -1337,7 +1337,7 @@ static int __init find_non_smt(unsigned int node, cpumask_t *dest)
         if ( cpumask_intersects(dest, per_cpu(cpu_sibling_mask, i)) )
             continue;
         cpu = cpumask_first(per_cpu(cpu_sibling_mask, i));
-        cpumask_set_cpu(cpu, dest);
+        __cpumask_set_cpu(cpu, dest);
     }
     return cpumask_weight(dest);
 }
@@ -1449,7 +1449,7 @@ void __init scrub_heap_pages(void)
         cpus = find_non_smt(best_node, &node_cpus);
         if ( cpus == 0 )
         {
-            cpumask_set_cpu(smp_processor_id(), &node_cpus);
+            __cpumask_set_cpu(smp_processor_id(), &node_cpus);
             cpus = 1;
         }
         /* We already have the node information from round #0. */
index 8b02b7b19289ac99c1c810c67726c928450cd494..156b43bc5b002ff99af27805bb1f4d191204e39c 100644 (file)
@@ -372,7 +372,7 @@ __runq_tickle(unsigned int cpu, struct csched_vcpu *new)
     {
         if ( cur->pri != CSCHED_PRI_IDLE )
             SCHED_STAT_CRANK(tickle_idlers_none);
-        cpumask_set_cpu(cpu, &mask);
+        __cpumask_set_cpu(cpu, &mask);
     }
     else if ( !idlers_empty )
     {
@@ -422,7 +422,7 @@ __runq_tickle(unsigned int cpu, struct csched_vcpu *new)
                 SCHED_VCPU_STAT_CRANK(cur, migrate_r);
                 SCHED_STAT_CRANK(migrate_kicked_away);
                 set_bit(_VPF_migrating, &cur->vcpu->pause_flags);
-                cpumask_set_cpu(cpu, &mask);
+                __cpumask_set_cpu(cpu, &mask);
             }
             else if ( !new_idlers_empty )
             {
@@ -432,7 +432,7 @@ __runq_tickle(unsigned int cpu, struct csched_vcpu *new)
                 {
                     this_cpu(last_tickle_cpu) =
                         cpumask_cycle(this_cpu(last_tickle_cpu), &idle_mask);
-                    cpumask_set_cpu(this_cpu(last_tickle_cpu), &mask);
+                    __cpumask_set_cpu(this_cpu(last_tickle_cpu), &mask);
                 }
                 else
                     cpumask_or(&mask, &mask, &idle_mask);
@@ -675,7 +675,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit)
          */
         cpumask_and(&idlers, &cpu_online_map, CSCHED_PRIV(ops)->idlers);
         if ( vc->processor == cpu && IS_RUNQ_IDLE(cpu) )
-            cpumask_set_cpu(cpu, &idlers);
+            __cpumask_set_cpu(cpu, &idlers);
         cpumask_and(&cpus, &cpus, &idlers);
 
         /*
@@ -692,7 +692,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit)
          */
         if ( !cpumask_test_cpu(cpu, &cpus) && !cpumask_empty(&cpus) )
             cpu = cpumask_cycle(cpu, &cpus);
-        cpumask_clear_cpu(cpu, &cpus);
+        __cpumask_clear_cpu(cpu, &cpus);
 
         while ( !cpumask_empty(&cpus) )
         {
@@ -1536,7 +1536,7 @@ csched_load_balance(struct csched_private *prv, int cpu,
             /* Find out what the !idle are in this node */
             cpumask_andnot(&workers, online, prv->idlers);
             cpumask_and(&workers, &workers, &node_to_cpumask(peer_node));
-            cpumask_clear_cpu(cpu, &workers);
+            __cpumask_clear_cpu(cpu, &workers);
 
             peer_cpu = cpumask_first(&workers);
             if ( peer_cpu >= nr_cpu_ids )
index e70d6c733f7670cf148097233acde0776c736290..df4adacbcc87f21502ad1752ac8ba512e8a42720 100644 (file)
@@ -663,7 +663,7 @@ burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now)
  * lock is grabbed before calling this function
  */
 static struct rt_vcpu *
-__runq_pick(const struct scheduler *ops, cpumask_t *mask)
+__runq_pick(const struct scheduler *ops, const cpumask_t *mask)
 {
     struct list_head *runq = rt_runq(ops);
     struct list_head *iter;
@@ -780,10 +780,7 @@ rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched
     }
     else
     {
-        cpumask_t cur_cpu;
-        cpumask_clear(&cur_cpu);
-        cpumask_set_cpu(cpu, &cur_cpu);
-        snext = __runq_pick(ops, &cur_cpu);
+        snext = __runq_pick(ops, cpumask_of(cpu));
         if ( snext == NULL )
             snext = rt_vcpu(idle_vcpu[cpu]);
 
index 22e417ada79e05571d2bab60017d99221b09ea45..bb79aacf7845a6b17a28006ce61bd1bf0516d5d3 100644 (file)
@@ -88,7 +88,7 @@ void cpumask_raise_softirq(const cpumask_t *mask, unsigned int nr)
         if ( !test_and_set_bit(nr, &softirq_pending(cpu)) &&
              cpu != this_cpu &&
              !arch_skip_send_event_check(cpu) )
-            cpumask_set_cpu(cpu, raise_mask);
+            __cpumask_set_cpu(cpu, raise_mask);
 
     if ( raise_mask == &send_mask )
         smp_send_event_check_mask(raise_mask);
@@ -106,7 +106,7 @@ void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
     if ( !per_cpu(batching, this_cpu) || in_irq() )
         smp_send_event_check_cpu(cpu);
     else
-        set_bit(nr, &per_cpu(batch_mask, this_cpu));
+        __cpumask_set_cpu(nr, &per_cpu(batch_mask, this_cpu));
 }
 
 void cpu_raise_softirq_batch_begin(void)
@@ -122,7 +122,7 @@ void cpu_raise_softirq_batch_finish(void)
     ASSERT(per_cpu(batching, this_cpu));
     for_each_cpu ( cpu, mask )
         if ( !softirq_pending(cpu) )
-            cpumask_clear_cpu(cpu, mask);
+            __cpumask_clear_cpu(cpu, mask);
     smp_send_event_check_mask(mask);
     cpumask_clear(mask);
     --per_cpu(batching, this_cpu);
index 850b4a22d7a67808def25f7a2407da346c063fcf..0e7108c6995dec47264f3350e8e11a95eb6f4128 100644 (file)
@@ -103,11 +103,21 @@ static inline void cpumask_set_cpu(int cpu, volatile cpumask_t *dstp)
        set_bit(cpumask_check(cpu), dstp->bits);
 }
 
+static inline void __cpumask_set_cpu(int cpu, cpumask_t *dstp)
+{
+       __set_bit(cpumask_check(cpu), dstp->bits);
+}
+
 static inline void cpumask_clear_cpu(int cpu, volatile cpumask_t *dstp)
 {
        clear_bit(cpumask_check(cpu), dstp->bits);
 }
 
+static inline void __cpumask_clear_cpu(int cpu, cpumask_t *dstp)
+{
+       __clear_bit(cpumask_check(cpu), dstp->bits);
+}
+
 static inline void cpumask_setall(cpumask_t *dstp)
 {
        bitmap_fill(dstp->bits, nr_cpumask_bits);
@@ -122,16 +132,26 @@ static inline void cpumask_clear(cpumask_t *dstp)
 #define cpumask_test_cpu(cpu, cpumask) \
        test_bit(cpumask_check(cpu), (cpumask)->bits)
 
-static inline int cpumask_test_and_set_cpu(int cpu, cpumask_t *addr)
+static inline int cpumask_test_and_set_cpu(int cpu, volatile cpumask_t *addr)
 {
        return test_and_set_bit(cpumask_check(cpu), addr->bits);
 }
 
-static inline int cpumask_test_and_clear_cpu(int cpu, cpumask_t *addr)
+static inline int __cpumask_test_and_set_cpu(int cpu, cpumask_t *addr)
+{
+       return __test_and_set_bit(cpumask_check(cpu), addr->bits);
+}
+
+static inline int cpumask_test_and_clear_cpu(int cpu, volatile cpumask_t *addr)
 {
        return test_and_clear_bit(cpumask_check(cpu), addr->bits);
 }
 
+static inline int __cpumask_test_and_clear_cpu(int cpu, cpumask_t *addr)
+{
+       return __test_and_clear_bit(cpumask_check(cpu), addr->bits);
+}
+
 static inline void cpumask_and(cpumask_t *dstp, const cpumask_t *src1p,
                               const cpumask_t *src2p)
 {