]> xenbits.xensource.com Git - people/vhanquez/xen-unstable.git/commitdiff
scheduler: Introduce pcpu_schedule_lock
authorKeir Fraser <keir@xen.org>
Fri, 24 Dec 2010 08:26:59 +0000 (08:26 +0000)
committerKeir Fraser <keir@xen.org>
Fri, 24 Dec 2010 08:26:59 +0000 (08:26 +0000)
Many places in Xen, particularly schedule.c, grab the per-cpu spinlock
directly, rather than through vcpu_schedule_lock().  Since the lock
pointer may change between the time it's read and the time the lock is
successfully acquired, we need to check after acquiring the lock to
make sure that the pcpu's lock hasn't changed, due to cpu
initialization or cpupool activity.

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
xen/arch/ia64/vmx/vmmu.c
xen/common/sched_credit.c
xen/common/schedule.c
xen/include/xen/sched-if.h

index 9751a12d1f2a792f7bbe65e5a64b22a5c25b68c6..d92c7c73ff5e1598b42425ad9db24a9bcc4ca79d 100644 (file)
@@ -394,7 +394,7 @@ static void ptc_ga_remote_func (void *varg)
     if (cpu != current->processor)
         return;
     local_irq_save(flags);
-    if (!spin_trylock(per_cpu(schedule_data, cpu).schedule_lock))
+    if (!pcpu_schedule_trylock(cpu))
         goto bail2;
     if (v->processor != cpu)
         goto bail1;
@@ -416,7 +416,7 @@ static void ptc_ga_remote_func (void *varg)
     ia64_dv_serialize_data();
     args->vcpu = NULL;
 bail1:
-    spin_unlock(per_cpu(schedule_data, cpu).schedule_lock);
+    pcpu_schedule_unlock(cpu);
 bail2:
     local_irq_restore(flags);
 }
index aad47706a9ad45050087f8474e181573a9f4a1c5..bfe20d30d08dd82a8d8b184224b8a554fb55199e 100644 (file)
@@ -905,7 +905,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int cpu)
 
     spc->runq_sort_last = sort_epoch;
 
-    spin_lock_irqsave(per_cpu(schedule_data, cpu).schedule_lock, flags);
+    pcpu_schedule_lock_irqsave(cpu, flags);
 
     runq = &spc->runq;
     elem = runq->next;
@@ -930,7 +930,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int cpu)
         elem = next;
     }
 
-    spin_unlock_irqrestore(per_cpu(schedule_data, cpu).schedule_lock, flags);
+    pcpu_schedule_unlock_irqrestore(cpu, flags);
 }
 
 static void
@@ -1259,7 +1259,7 @@ csched_load_balance(struct csched_private *prv, int cpu,
          * cause a deadlock if the peer CPU is also load balancing and trying
          * to lock this CPU.
          */
-        if ( !spin_trylock(per_cpu(schedule_data, peer_cpu).schedule_lock) )
+        if ( !pcpu_schedule_trylock(peer_cpu) )
         {
             CSCHED_STAT_CRANK(steal_trylock_failed);
             continue;
@@ -1269,7 +1269,7 @@ csched_load_balance(struct csched_private *prv, int cpu,
          * Any work over there to steal?
          */
         speer = csched_runq_steal(peer_cpu, cpu, snext->pri);
-        spin_unlock(per_cpu(schedule_data, peer_cpu).schedule_lock);
+        pcpu_schedule_unlock(peer_cpu);
         if ( speer != NULL )
         {
             *stolen = 1;
index 812b0d1a4f0dacdf969d3f258a56c501240ef1e8..69996b2f601ef5561be4b2a999635d826de5445d 100644 (file)
@@ -424,7 +424,8 @@ static void vcpu_migrate(struct vcpu *v)
         atomic_dec(&per_cpu(schedule_data, old_cpu).urgent_count);
     }
 
-    /* Switch to new CPU, then unlock old CPU. */
+    /* Switch to new CPU, then unlock old CPU.  This is safe because
+     * the lock pointer cant' change while the current lock is held. */
     v->processor = new_cpu;
     spin_unlock_irqrestore(
         per_cpu(schedule_data, old_cpu).schedule_lock, flags);
@@ -1302,7 +1303,7 @@ void schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
     ppriv = SCHED_OP(new_ops, alloc_pdata, cpu);
     vpriv = SCHED_OP(new_ops, alloc_vdata, idle, idle->domain->sched_priv);
 
-    spin_lock_irqsave(per_cpu(schedule_data, cpu).schedule_lock, flags);
+    pcpu_schedule_lock_irqsave(cpu, flags);
 
     SCHED_OP(old_ops, tick_suspend, cpu);
     vpriv_old = idle->sched_priv;
@@ -1313,7 +1314,7 @@ void schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
     SCHED_OP(new_ops, tick_resume, cpu);
     SCHED_OP(new_ops, insert_vcpu, idle);
 
-    spin_unlock_irqrestore(per_cpu(schedule_data, cpu).schedule_lock, flags);
+    pcpu_schedule_unlock_irqrestore(cpu, flags);
 
     SCHED_OP(old_ops, free_vdata, vpriv_old);
     SCHED_OP(old_ops, free_pdata, ppriv_old, cpu);
@@ -1369,10 +1370,10 @@ void schedule_dump(struct cpupool *c)
 
     for_each_cpu_mask (i, *cpus)
     {
-        spin_lock(per_cpu(schedule_data, i).schedule_lock);
+        pcpu_schedule_lock(i);
         printk("CPU[%02d] ", i);
         SCHED_OP(sched, dump_cpu_state, i);
-        spin_unlock(per_cpu(schedule_data, i).schedule_lock);
+        pcpu_schedule_unlock(i);
     }
 }
 
index 84f7f5a1c8c448b49ee4225977212bb5f9dec563..e8f0262a6bf15f968be67e20a309ef12a36ba98d 100644 (file)
@@ -39,6 +39,57 @@ DECLARE_PER_CPU(struct schedule_data, schedule_data);
 DECLARE_PER_CPU(struct scheduler *, scheduler);
 DECLARE_PER_CPU(struct cpupool *, cpupool);
 
+static inline spinlock_t * pcpu_schedule_lock(int cpu)
+{
+    spinlock_t * lock=NULL;
+
+    for ( ; ; )
+    {
+        /* The per_cpu(v->processor) may also change, if changing
+         * cpu pool also changes the scheduler lock.  Retry
+         * until they match.
+         */
+        lock=per_cpu(schedule_data, cpu).schedule_lock;
+
+        spin_lock(lock);
+        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) )
+            break;
+        spin_unlock(lock);
+    }
+    return lock;
+}
+
+static inline int pcpu_schedule_trylock(int cpu)
+{
+    spinlock_t * lock=NULL;
+
+    lock=per_cpu(schedule_data, cpu).schedule_lock;
+    if ( ! spin_trylock(lock) )
+        return 0;
+    if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
+        return 1;
+    else
+    {
+        spin_unlock(lock);
+        return 0;
+    }
+}
+
+#define pcpu_schedule_lock_irq(p) \
+    do { local_irq_disable(); pcpu_schedule_lock(p); } while ( 0 )
+#define pcpu_schedule_lock_irqsave(p, flags) \
+    do { local_irq_save(flags); pcpu_schedule_lock(p); } while ( 0 )
+
+static inline void pcpu_schedule_unlock(int cpu)
+{
+    spin_unlock(per_cpu(schedule_data, cpu).schedule_lock);
+}
+
+#define pcpu_schedule_unlock_irq(p) \
+    do { pcpu_schedule_unlock(p); local_irq_enable(); } while ( 0 )
+#define pcpu_schedule_unlock_irqrestore(p, flags) \
+    do { pcpu_schedule_unlock(p); local_irq_restore(flags); } while ( 0 )
+
 static inline void vcpu_schedule_lock(struct vcpu *v)
 {
     spinlock_t * lock;