ia64/xen-unstable

changeset 19369:09253da8f1c1

x86, cpuidle: disable ARB_DISABLE access for latest intel platforms

ARB_DISABLE is a nop on all of the recent Intel platforms. Disable
ARB_DISABLE and attached c3_lock on C3 entry exit for such platforms.

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Wei Gang <gang.wei@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Mar 17 10:50:16 2009 +0000 (2009-03-17)
parents 27e15492aa25
children 2dcdd2fcb945
files xen/arch/x86/acpi/cpu_idle.c
line diff
     1.1 --- a/xen/arch/x86/acpi/cpu_idle.c	Tue Mar 17 10:49:42 2009 +0000
     1.2 +++ b/xen/arch/x86/acpi/cpu_idle.c	Tue Mar 17 10:50:16 2009 +0000
     1.3 @@ -470,12 +470,22 @@ static void acpi_processor_power_init_bm
     1.4      else if ( c->x86_vendor == X86_VENDOR_INTEL )
     1.5      {
     1.6          /*
     1.7 -         * Today all CPUs that support C3 share cache.
     1.8 -         * TBD: This needs to look at cache shared map, once
     1.9 -         * multi-core detection patch makes to the base.
    1.10 +         * Today all MP CPUs that support C3 share cache.
    1.11 +         * And caches should not be flushed by software while
    1.12 +         * entering C3 type state.
    1.13           */
    1.14          flags->bm_check = 1;
    1.15      }
    1.16 +
    1.17 +    /*
    1.18 +     * On all recent platforms, ARB_DISABLE is a nop.
    1.19 +     * So, set bm_control to zero to indicate that ARB_DISABLE
    1.20 +     * is not required while entering C3 type state on
    1.21 +     * P4, Core and beyond CPUs
    1.22 +     */
    1.23 +    if ( c->x86_vendor == X86_VENDOR_INTEL &&
    1.24 +        (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14)) )
    1.25 +            flags->bm_control = 0;
    1.26  }
    1.27  
    1.28  #define VENDOR_INTEL                   (1)
    1.29 @@ -483,7 +493,8 @@ static void acpi_processor_power_init_bm
    1.30  
    1.31  static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx)
    1.32  {
    1.33 -    static int bm_check_flag;
    1.34 +    static int bm_check_flag = -1;
    1.35 +    static int bm_control_flag = -1;
    1.36  
    1.37      switch ( cx->reg.space_id )
    1.38      {
    1.39 @@ -529,15 +540,17 @@ static int check_cx(struct acpi_processo
    1.40          }
    1.41  
    1.42          /* All the logic here assumes flags.bm_check is same across all CPUs */
    1.43 -        if ( !bm_check_flag )
    1.44 +        if ( bm_check_flag == -1 )
    1.45          {
    1.46              /* Determine whether bm_check is needed based on CPU  */
    1.47              acpi_processor_power_init_bm_check(&(power->flags));
    1.48              bm_check_flag = power->flags.bm_check;
    1.49 +            bm_control_flag = power->flags.bm_control;
    1.50          }
    1.51          else
    1.52          {
    1.53              power->flags.bm_check = bm_check_flag;
    1.54 +            power->flags.bm_control = bm_control_flag;
    1.55          }
    1.56  
    1.57          if ( power->flags.bm_check )