ia64/xen-unstable

changeset 19462:f6a2bf60d49c

cpuidle: suspend/resume scheduler tick timer during cpu idle state entry/exit

cpuidle can collaborate with scheduler to reduce unnecessary timer
interrupt. For example, credit scheduler accounting timer
doesn't need to be active at idle time, so it can be stopped at
cpuidle entry and resumed at cpuidle exit. This patch implements this
function by adding two ops in scheduler: tick_suspend/tick_resume, and
implement them for credit scheduler

Signed-off-by: Yu Ke <ke.yu@intel.com>
Signed-off-by: Tian Kevin <kevin.tian@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Mar 31 11:51:56 2009 +0100 (2009-03-31)
parents ee3c5a08f80e
children 57b733f66531
files xen/arch/x86/acpi/cpu_idle.c xen/common/sched_credit.c xen/common/schedule.c xen/include/xen/sched-if.h xen/include/xen/sched.h
line diff
     1.1 --- a/xen/arch/x86/acpi/cpu_idle.c	Tue Mar 31 11:49:56 2009 +0100
     1.2 +++ b/xen/arch/x86/acpi/cpu_idle.c	Tue Mar 31 11:51:56 2009 +0100
     1.3 @@ -195,6 +195,15 @@ static void acpi_processor_idle(void)
     1.4      int sleep_ticks = 0;
     1.5      u32 t1, t2 = 0;
     1.6  
     1.7 +    sched_tick_suspend();
     1.8 +    /*
     1.9 +     * sched_tick_suspend may raise TIMER_SOFTIRQ by __stop_timer,
    1.10 +     * which will break the later assumption of no sofirq pending,
    1.11 +     * so add do_softirq
    1.12 +     */
    1.13 +    if ( softirq_pending(smp_processor_id()) )
    1.14 +        do_softirq();
    1.15 +
    1.16      /*
    1.17       * Interrupts must be disabled during bus mastering calculations and
    1.18       * for C2/C3 transitions.
    1.19 @@ -204,6 +213,7 @@ static void acpi_processor_idle(void)
    1.20      if ( softirq_pending(smp_processor_id()) )
    1.21      {
    1.22          local_irq_enable();
    1.23 +        sched_tick_resume();
    1.24          return;
    1.25      }
    1.26  
    1.27 @@ -223,6 +233,7 @@ static void acpi_processor_idle(void)
    1.28              pm_idle_save();
    1.29          else
    1.30              acpi_safe_halt();
    1.31 +        sched_tick_resume();
    1.32          return;
    1.33      }
    1.34  
    1.35 @@ -329,6 +340,7 @@ static void acpi_processor_idle(void)
    1.36  
    1.37      default:
    1.38          local_irq_enable();
    1.39 +        sched_tick_resume();
    1.40          return;
    1.41      }
    1.42  
    1.43 @@ -339,6 +351,8 @@ static void acpi_processor_idle(void)
    1.44          cx->time += sleep_ticks;
    1.45      }
    1.46  
    1.47 +    sched_tick_resume();
    1.48 +
    1.49      if ( cpuidle_current_governor->reflect )
    1.50          cpuidle_current_governor->reflect(power);
    1.51  }
     2.1 --- a/xen/common/sched_credit.c	Tue Mar 31 11:49:56 2009 +0100
     2.2 +++ b/xen/common/sched_credit.c	Tue Mar 31 11:51:56 2009 +0100
     2.3 @@ -154,6 +154,7 @@ struct csched_private {
     2.4      spinlock_t lock;
     2.5      struct list_head active_sdom;
     2.6      uint32_t ncpus;
     2.7 +    struct timer  master_ticker;
     2.8      unsigned int master;
     2.9      cpumask_t idlers;
    2.10      uint32_t weight;
    2.11 @@ -757,7 +758,7 @@ csched_runq_sort(unsigned int cpu)
    2.12  }
    2.13  
    2.14  static void
    2.15 -csched_acct(void)
    2.16 +csched_acct(void* dummy)
    2.17  {
    2.18      unsigned long flags;
    2.19      struct list_head *iter_vcpu, *next_vcpu;
    2.20 @@ -792,7 +793,7 @@ csched_acct(void)
    2.21          csched_priv.credit_balance = 0;
    2.22          spin_unlock_irqrestore(&csched_priv.lock, flags);
    2.23          CSCHED_STAT_CRANK(acct_no_work);
    2.24 -        return;
    2.25 +        goto out;
    2.26      }
    2.27  
    2.28      CSCHED_STAT_CRANK(acct_run);
    2.29 @@ -950,6 +951,10 @@ csched_acct(void)
    2.30  
    2.31      /* Inform each CPU that its runq needs to be sorted */
    2.32      csched_priv.runq_sort++;
    2.33 +
    2.34 +out:
    2.35 +    set_timer( &csched_priv.master_ticker, NOW() +
    2.36 +            MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT );
    2.37  }
    2.38  
    2.39  static void
    2.40 @@ -967,18 +972,6 @@ csched_tick(void *_cpu)
    2.41          csched_vcpu_acct(cpu);
    2.42  
    2.43      /*
    2.44 -     * Host-wide accounting duty
    2.45 -     *
    2.46 -     * Note: Currently, this is always done by the master boot CPU. Eventually,
    2.47 -     * we could distribute or at the very least cycle the duty.
    2.48 -     */
    2.49 -    if ( (csched_priv.master == cpu) &&
    2.50 -         (spc->tick % CSCHED_TICKS_PER_ACCT) == 0 )
    2.51 -    {
    2.52 -        csched_acct();
    2.53 -    }
    2.54 -
    2.55 -    /*
    2.56       * Check if runq needs to be sorted
    2.57       *
    2.58       * Every physical CPU resorts the runq after the accounting master has
    2.59 @@ -1310,10 +1303,35 @@ static __init int csched_start_tickers(v
    2.60          set_timer(&spc->ticker, NOW() + MILLISECS(CSCHED_MSECS_PER_TICK));
    2.61      }
    2.62  
    2.63 +    init_timer( &csched_priv.master_ticker, csched_acct, NULL,
    2.64 +                    csched_priv.master);
    2.65 +
    2.66 +    set_timer( &csched_priv.master_ticker, NOW() +
    2.67 +            MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT );
    2.68 +
    2.69      return 0;
    2.70  }
    2.71  __initcall(csched_start_tickers);
    2.72  
    2.73 +static void csched_tick_suspend(void)
    2.74 +{
    2.75 +    struct csched_pcpu *spc;
    2.76 +
    2.77 +    spc = CSCHED_PCPU(smp_processor_id());
    2.78 +
    2.79 +    stop_timer(&spc->ticker);
    2.80 +}
    2.81 +
    2.82 +static void csched_tick_resume(void)
    2.83 +{
    2.84 +    struct csched_pcpu *spc;
    2.85 +    uint64_t now = NOW();
    2.86 +
    2.87 +    spc = CSCHED_PCPU(smp_processor_id());
    2.88 +
    2.89 +    set_timer(&spc->ticker, now + MILLISECS(CSCHED_MSECS_PER_TICK)
    2.90 +            - now % MILLISECS(CSCHED_MSECS_PER_TICK) );
    2.91 +}
    2.92  
    2.93  struct scheduler sched_credit_def = {
    2.94      .name           = "SMP Credit Scheduler",
    2.95 @@ -1337,4 +1355,7 @@ struct scheduler sched_credit_def = {
    2.96      .dump_cpu_state = csched_dump_pcpu,
    2.97      .dump_settings  = csched_dump,
    2.98      .init           = csched_init,
    2.99 +
   2.100 +    .tick_suspend   = csched_tick_suspend,
   2.101 +    .tick_resume    = csched_tick_resume,
   2.102  };
     3.1 --- a/xen/common/schedule.c	Tue Mar 31 11:49:56 2009 +0100
     3.2 +++ b/xen/common/schedule.c	Tue Mar 31 11:51:56 2009 +0100
     3.3 @@ -964,6 +964,16 @@ void dump_runq(unsigned char key)
     3.4      local_irq_restore(flags);
     3.5  }
     3.6  
     3.7 +void sched_tick_suspend(void)
     3.8 +{
     3.9 +    SCHED_OP(tick_suspend);
    3.10 +}
    3.11 +
    3.12 +void sched_tick_resume(void)
    3.13 +{
    3.14 +    SCHED_OP(tick_resume);
    3.15 +}
    3.16 +
    3.17  #ifdef CONFIG_COMPAT
    3.18  #include "compat/schedule.c"
    3.19  #endif
     4.1 --- a/xen/include/xen/sched-if.h	Tue Mar 31 11:49:56 2009 +0100
     4.2 +++ b/xen/include/xen/sched-if.h	Tue Mar 31 11:51:56 2009 +0100
     4.3 @@ -77,6 +77,9 @@ struct scheduler {
     4.4                                      struct xen_domctl_scheduler_op *);
     4.5      void         (*dump_settings)  (void);
     4.6      void         (*dump_cpu_state) (int);
     4.7 +
     4.8 +    void         (*tick_suspend)    (void);
     4.9 +    void         (*tick_resume)     (void);
    4.10  };
    4.11  
    4.12  #endif /* __XEN_SCHED_IF_H__ */
     5.1 --- a/xen/include/xen/sched.h	Tue Mar 31 11:49:56 2009 +0100
     5.2 +++ b/xen/include/xen/sched.h	Tue Mar 31 11:51:56 2009 +0100
     5.3 @@ -428,6 +428,8 @@ int  sched_init_domain(struct domain *d)
     5.4  void sched_destroy_domain(struct domain *d);
     5.5  long sched_adjust(struct domain *, struct xen_domctl_scheduler_op *);
     5.6  int  sched_id(void);
     5.7 +void sched_tick_suspend(void);
     5.8 +void sched_tick_resume(void);
     5.9  void vcpu_wake(struct vcpu *d);
    5.10  void vcpu_sleep_nosync(struct vcpu *d);
    5.11  void vcpu_sleep_sync(struct vcpu *d);