]> xenbits.xensource.com Git - people/iwj/xen.git/commitdiff
xen/sched: disable scheduling when entering ACPI deep sleep states
authorJuergen Gross <jgross@suse.com>
Wed, 2 Oct 2019 07:27:43 +0000 (09:27 +0200)
committerJan Beulich <jbeulich@suse.com>
Fri, 4 Oct 2019 11:03:07 +0000 (13:03 +0200)
When entering deep sleep states all domains are paused resulting in
all cpus only running idle vcpus. This enables us to stop scheduling
completely in order to avoid synchronization problems with core
scheduling when individual cpus are offlined.

Disabling the scheduler is done by replacing the softirq handler
with a dummy scheduling routine only enabling tasklets to run.

Signed-off-by: Juergen Gross <jgross@suse.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Dario Faggioli <dfaggioli@suse.com>
xen/arch/x86/acpi/power.c
xen/common/schedule.c
xen/include/xen/sched.h

index 01e6aec4e8ad5a45020d9a04ef893d7210bc1ece..807835231242476365286b9881ac9c6eb7dea4f7 100644 (file)
@@ -145,12 +145,16 @@ static void freeze_domains(void)
     for_each_domain ( d )
         domain_pause(d);
     rcu_read_unlock(&domlist_read_lock);
+
+    scheduler_disable();
 }
 
 static void thaw_domains(void)
 {
     struct domain *d;
 
+    scheduler_enable();
+
     rcu_read_lock(&domlist_read_lock);
     for_each_domain ( d )
     {
index 5fecd7f61fc857f0eba79f493a2774196228c8a4..217fcb09ce4ba4f9bd967eab2b796667cd4b72ed 100644 (file)
@@ -91,6 +91,8 @@ extern const struct scheduler *__start_schedulers_array[], *__end_schedulers_arr
 
 static struct scheduler __read_mostly ops;
 
+static bool scheduler_active;
+
 static void sched_set_affinity(
     struct sched_unit *unit, const cpumask_t *hard, const cpumask_t *soft);
 
@@ -2277,6 +2279,13 @@ static struct sched_unit *sched_wait_rendezvous_in(struct sched_unit *prev,
         cpu_relax();
 
         *lock = pcpu_schedule_lock_irq(cpu);
+
+        if ( unlikely(!scheduler_active) )
+        {
+            ASSERT(is_idle_unit(prev));
+            atomic_set(&prev->next_task->rendezvous_out_cnt, 0);
+            prev->rendezvous_in_cnt = 0;
+        }
     }
 
     return prev->next_task;
@@ -2633,14 +2642,32 @@ const cpumask_t *sched_get_opt_cpumask(enum sched_gran opt, unsigned int cpu)
     return mask;
 }
 
+static void schedule_dummy(void)
+{
+    sched_tasklet_check_cpu(smp_processor_id());
+}
+
+void scheduler_disable(void)
+{
+    scheduler_active = false;
+    open_softirq(SCHEDULE_SOFTIRQ, schedule_dummy);
+    open_softirq(SCHED_SLAVE_SOFTIRQ, schedule_dummy);
+}
+
+void scheduler_enable(void)
+{
+    open_softirq(SCHEDULE_SOFTIRQ, schedule);
+    open_softirq(SCHED_SLAVE_SOFTIRQ, sched_slave);
+    scheduler_active = true;
+}
+
 /* Initialise the data structures. */
 void __init scheduler_init(void)
 {
     struct domain *idle_domain;
     int i;
 
-    open_softirq(SCHEDULE_SOFTIRQ, schedule);
-    open_softirq(SCHED_SLAVE_SOFTIRQ, sched_slave);
+    scheduler_enable();
 
     for ( i = 0; i < NUM_SCHEDULERS; i++)
     {
index a40bd5fb56f653f0ca13e4ac5e54a4127333e802..629a4c52e082686515f75f38ebdbb7ad1bc91d54 100644 (file)
@@ -933,6 +933,8 @@ void restore_vcpu_affinity(struct domain *d);
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
 uint64_t get_cpu_idle_time(unsigned int cpu);
 void sched_guest_idle(void (*idle) (void), unsigned int cpu);
+void scheduler_enable(void);
+void scheduler_disable(void);
 
 /*
  * Used by idle loop to decide whether there is work to do: