]> xenbits.xensource.com Git - xen.git/commitdiff
x86/S3: Restore broken vcpu affinity on resume
authorBen Guthro <benjamin.guthro@citrix.com>
Tue, 9 Apr 2013 14:05:52 +0000 (16:05 +0200)
committerJan Beulich <jbeulich@suse.com>
Tue, 9 Apr 2013 14:05:52 +0000 (16:05 +0200)
When in SYS_STATE_suspend, and going through the cpu_disable_scheduler
path, save a copy of the current cpu affinity, and mark a flag to
restore it later.

Later, in the resume process, when enabling nonboot cpus restore these
affinities.

Signed-off-by: Ben Guthro <benjamin.guthro@citrix.com>
Acked-by: George Dunlap <george.dunlap@eu.citrix.com>
Acked-by: Keir Fraser <keir@xen.org>
master commit: 41e71c2607e036f1ac00df898b8f4acb2d4df7ee
master date: 2013-04-02 09:52:32 +0200

xen/arch/x86/acpi/power.c
xen/common/domain.c
xen/common/schedule.c
xen/include/xen/sched.h

index 9e1f98904f40c4023734c7ef7b8d8a1dcdc10779..72adc2fae846d5bf4ae7d1d8430d166d5d9604eb 100644 (file)
@@ -96,7 +96,10 @@ static void thaw_domains(void)
 
     rcu_read_lock(&domlist_read_lock);
     for_each_domain ( d )
+    {
+        restore_vcpu_affinity(d);
         domain_unpause(d);
+    }
     rcu_read_unlock(&domlist_read_lock);
 }
 
index e728819ac338ee54833122e709a278195eeea27c..c09fb73b8a0032d7820e5b441baf0769c452362b 100644 (file)
@@ -126,6 +126,7 @@ struct vcpu *alloc_vcpu(
 
     if ( !zalloc_cpumask_var(&v->cpu_affinity) ||
          !zalloc_cpumask_var(&v->cpu_affinity_tmp) ||
+         !zalloc_cpumask_var(&v->cpu_affinity_saved) ||
          !zalloc_cpumask_var(&v->vcpu_dirty_cpumask) )
         goto fail_free;
 
@@ -155,6 +156,7 @@ struct vcpu *alloc_vcpu(
  fail_free:
         free_cpumask_var(v->cpu_affinity);
         free_cpumask_var(v->cpu_affinity_tmp);
+        free_cpumask_var(v->cpu_affinity_saved);
         free_cpumask_var(v->vcpu_dirty_cpumask);
         free_vcpu_struct(v);
         return NULL;
index c2cd9d5327c7b4fe4bf40571827198e2c41efc6f..7f298d827f0624801848dfcc55584b969f5e26ff 100644 (file)
@@ -538,6 +538,38 @@ void vcpu_force_reschedule(struct vcpu *v)
     }
 }
 
+void restore_vcpu_affinity(struct domain *d)
+{
+    struct vcpu *v;
+
+    for_each_vcpu ( d, v )
+    {
+        vcpu_schedule_lock_irq(v);
+
+        if ( v->affinity_broken )
+        {
+            printk(XENLOG_DEBUG "Restoring affinity for d%dv%d\n",
+                   d->domain_id, v->vcpu_id);
+            cpumask_copy(v->cpu_affinity, v->cpu_affinity_saved);
+            v->affinity_broken = 0;
+        }
+
+        if ( v->processor == smp_processor_id() )
+        {
+            set_bit(_VPF_migrating, &v->pause_flags);
+            vcpu_schedule_unlock_irq(v);
+            vcpu_sleep_nosync(v);
+            vcpu_migrate(v);
+        }
+        else
+        {
+            vcpu_schedule_unlock_irq(v);
+        }
+    }
+
+    domain_update_node_affinity(d);
+}
+
 /*
  * This function is used by cpu_hotplug code from stop_machine context
  * and from cpupools to switch schedulers on a cpu.
@@ -551,7 +583,7 @@ int cpu_disable_scheduler(unsigned int cpu)
     int    ret = 0;
 
     c = per_cpu(cpupool, cpu);
-    if ( (c == NULL) || (system_state == SYS_STATE_suspend) )
+    if ( c == NULL )
         return ret;
 
     for_each_domain_in_cpupool ( d, c )
@@ -564,8 +596,15 @@ int cpu_disable_scheduler(unsigned int cpu)
             if ( cpumask_empty(&online_affinity) &&
                  cpumask_test_cpu(cpu, v->cpu_affinity) )
             {
-                printk("Breaking vcpu affinity for domain %d vcpu %d\n",
-                        v->domain->domain_id, v->vcpu_id);
+                printk(XENLOG_DEBUG "Breaking affinity for d%dv%d\n",
+                        d->domain_id, v->vcpu_id);
+
+                if (system_state == SYS_STATE_suspend)
+                {
+                    cpumask_copy(v->cpu_affinity_saved, v->cpu_affinity);
+                    v->affinity_broken = 1;
+                }
+
                 cpumask_setall(v->cpu_affinity);
             }
 
index 53804c87d58725ecf2173f93018f4704d70e2fc1..b619269befc40f699aa4eb2f83cc64777724cedf 100644 (file)
@@ -140,6 +140,9 @@ struct vcpu
     bool_t           defer_shutdown;
     /* VCPU is paused following shutdown request (d->is_shutting_down)? */
     bool_t           paused_for_shutdown;
+    /* VCPU need affinity restored */
+    bool_t           affinity_broken;
+
 
     /*
      * > 0: a single port is being polled;
@@ -162,6 +165,8 @@ struct vcpu
     cpumask_var_t    cpu_affinity;
     /* Used to change affinity temporarily. */
     cpumask_var_t    cpu_affinity_tmp;
+    /* Used to restore affinity across S3. */
+    cpumask_var_t    cpu_affinity_saved;
 
     /* Bitmask of CPUs which are holding onto this VCPU's state. */
     cpumask_var_t    vcpu_dirty_cpumask;
@@ -666,6 +671,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c);
 void vcpu_force_reschedule(struct vcpu *v);
 int cpu_disable_scheduler(unsigned int cpu);
 int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity);
+void restore_vcpu_affinity(struct domain *d);
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
 uint64_t get_cpu_idle_time(unsigned int cpu);