From 2227448abe33893ac9512bc71da5222ca66bec92 Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Tue, 17 Mar 2009 14:54:25 +0000 Subject: [PATCH] x86, cpuidle: disable ARB_DISABLE access for latest intel platforms ARB_DISABLE is a nop on all of the recent Intel platforms. Disable ARB_DISABLE and attached c3_lock on C3 entry exit for such platforms. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Wei Gang xen-unstable changeset: 19364:09253da8f1c17d125127895154bc851ac6d40aba xen-unstable date: Tue Mar 17 10:50:16 2009 +0000 --- xen/arch/x86/acpi/cpu_idle.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c index 1577d1828..aa91efbb4 100644 --- a/xen/arch/x86/acpi/cpu_idle.c +++ b/xen/arch/x86/acpi/cpu_idle.c @@ -722,12 +722,22 @@ static void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flag else if ( c->x86_vendor == X86_VENDOR_INTEL ) { /* - * Today all CPUs that support C3 share cache. - * TBD: This needs to look at cache shared map, once - * multi-core detection patch makes to the base. + * Today all MP CPUs that support C3 share cache. + * And caches should not be flushed by software while + * entering C3 type state. */ flags->bm_check = 1; } + + /* + * On all recent platforms, ARB_DISABLE is a nop. + * So, set bm_control to zero to indicate that ARB_DISABLE + * is not required while entering C3 type state on + * P4, Core and beyond CPUs + */ + if ( c->x86_vendor == X86_VENDOR_INTEL && + (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14)) ) + flags->bm_control = 0; } #define VENDOR_INTEL (1) @@ -735,7 +745,8 @@ static void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flag static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx) { - static int bm_check_flag; + static int bm_check_flag = -1; + static int bm_control_flag = -1; switch ( cx->reg.space_id ) { @@ -784,15 +795,17 @@ static int check_cx(struct acpi_processor_power *power, xen_processor_cx_t *cx) } /* All the logic here assumes flags.bm_check is same across all CPUs */ - if ( !bm_check_flag ) + if ( bm_check_flag == -1 ) { /* Determine whether bm_check is needed based on CPU */ acpi_processor_power_init_bm_check(&(power->flags)); bm_check_flag = power->flags.bm_check; + bm_control_flag = power->flags.bm_control; } else { power->flags.bm_check = bm_check_flag; + power->flags.bm_control = bm_control_flag; } if ( power->flags.bm_check ) -- 2.39.5