]> xenbits.xensource.com Git - people/tklengyel/xen.git/commitdiff
cpupool: Avoid race when moving cpu between cpupools
authorJuergen Gross <juergen.gross@ts.fujitsu.com>
Fri, 25 Feb 2011 11:28:15 +0000 (11:28 +0000)
committerJuergen Gross <juergen.gross@ts.fujitsu.com>
Fri, 25 Feb 2011 11:28:15 +0000 (11:28 +0000)
Moving cpus between cpupools is done under the schedule lock of the
moved cpu.  When checking a cpu being member of a cpupool this must be
done with the lock of that cpu being held.  Hot-unplugging of physical
cpus might encounter the same problems, but this should happen only
very rarely.

Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
Acked-by: Andre Przywara <andre.przywara@amd.com>
Acked-by: George Dunlap <george.dunlap@eu.citrix.com>
xen/common/sched_credit.c
xen/common/schedule.c

index b53c2ad31fce7977dda60ad8f5fc21682926bd26..d4852fd7071c9ea45df4162026706dd2ed419903 100644 (file)
@@ -1268,7 +1268,8 @@ csched_load_balance(struct csched_private *prv, int cpu,
         /*
          * Any work over there to steal?
          */
-        speer = csched_runq_steal(peer_cpu, cpu, snext->pri);
+        speer = cpu_isset(peer_cpu, *online) ?
+            csched_runq_steal(peer_cpu, cpu, snext->pri) : NULL;
         pcpu_schedule_unlock(peer_cpu);
         if ( speer != NULL )
         {
index 5f192d272ca5a203a631c2ece64c90c24b459b6e..b6ef4769d187ce561c05cd1f53e8ac94b20de021 100644 (file)
@@ -394,8 +394,32 @@ static void vcpu_migrate(struct vcpu *v)
 {
     unsigned long flags;
     int old_cpu, new_cpu;
+    int same_lock;
 
-    vcpu_schedule_lock_irqsave(v, flags);
+    for (;;)
+    {
+        vcpu_schedule_lock_irqsave(v, flags);
+
+        /* Select new CPU. */
+        old_cpu = v->processor;
+        new_cpu = SCHED_OP(VCPU2OP(v), pick_cpu, v);
+        same_lock = (per_cpu(schedule_data, new_cpu).schedule_lock ==
+                     per_cpu(schedule_data, old_cpu).schedule_lock);
+
+        if ( same_lock )
+            break;
+
+        if ( !pcpu_schedule_trylock(new_cpu) )
+        {
+            vcpu_schedule_unlock_irqrestore(v, flags);
+            continue;
+        }
+        if ( cpu_isset(new_cpu, v->domain->cpupool->cpu_valid) )
+            break;
+
+        pcpu_schedule_unlock(new_cpu);
+        vcpu_schedule_unlock_irqrestore(v, flags);
+    }
 
     /*
      * NB. Check of v->running happens /after/ setting migration flag
@@ -405,14 +429,13 @@ static void vcpu_migrate(struct vcpu *v)
     if ( v->is_running ||
          !test_and_clear_bit(_VPF_migrating, &v->pause_flags) )
     {
+        if ( !same_lock )
+            pcpu_schedule_unlock(new_cpu);
+
         vcpu_schedule_unlock_irqrestore(v, flags);
         return;
     }
 
-    /* Select new CPU. */
-    old_cpu = v->processor;
-    new_cpu = SCHED_OP(VCPU2OP(v), pick_cpu, v);
-
     /*
      * Transfer urgency status to new CPU before switching CPUs, as once
      * the switch occurs, v->is_urgent is no longer protected by the per-CPU
@@ -424,9 +447,15 @@ static void vcpu_migrate(struct vcpu *v)
         atomic_dec(&per_cpu(schedule_data, old_cpu).urgent_count);
     }
 
-    /* Switch to new CPU, then unlock old CPU.  This is safe because
-     * the lock pointer cant' change while the current lock is held. */
+    /*
+     * Switch to new CPU, then unlock new and old CPU.  This is safe because
+     * the lock pointer cant' change while the current lock is held.
+     */
     v->processor = new_cpu;
+
+    if ( !same_lock )
+        pcpu_schedule_unlock(new_cpu);
+
     spin_unlock_irqrestore(
         per_cpu(schedule_data, old_cpu).schedule_lock, flags);