*/
spec_ctrl_enter_idle(get_cpu_info());
wbinvd();
- for ( ; ; )
- halt();
+ halt();
+ spec_ctrl_exit_idle(get_cpu_info());
}
-static void play_dead(void)
+void play_dead(void)
{
+ unsigned int cpu = smp_processor_id();
+
local_irq_disable();
+ /* Change the NMI handler to a nop (see comment below). */
+ _set_gate_lower(&idt_tables[cpu][TRAP_nmi], SYS_DESC_irq_gate, 0,
+ &trap_nop);
+
/*
* NOTE: After cpu_exit_clear, per-cpu variables may no longer accessible,
* as they may be freed at any time if offline CPUs don't get parked. In
* Consider very carefully when adding code to *dead_idle. Most hypervisor
* subsystems are unsafe to call.
*/
- cpu_exit_clear(smp_processor_id());
+ cpu_exit_clear(cpu);
- (*dead_idle)();
+ for ( ; ; )
+ dead_idle();
}
static void idle_loop(void)
#include <xen/serial.h>
#include <xen/numa.h>
#include <xen/cpu.h>
+#include <asm/cpuidle.h>
#include <asm/current.h>
#include <asm/mc146818rtc.h>
#include <asm/desc.h>
halt:
clear_local_APIC();
spin_debug_enable();
- cpu_exit_clear(cpu);
- (*dead_idle)();
+ play_dead();
}
/* Allow the master to continue. */
int cpuidle_init_cpu(unsigned int cpu);
void default_dead_idle(void);
void acpi_dead_idle(void);
+void play_dead(void);
void trace_exit_reason(u32 *irq_traced);
void update_idle_stats(struct acpi_processor_power *,
struct acpi_processor_cx *, uint64_t, uint64_t);