#include <asm/mwait.h>
#include <xen/notifier.h>
#include <xen/cpu.h>
+#include <asm/spec_ctrl.h>
/*#define DEBUG_PM_CX*/
*/
if ( (expires > NOW() || expires == 0) && !softirq_pending(cpu) )
{
+ struct cpu_info *info = get_cpu_info();
+
cpumask_set_cpu(cpu, &cpuidle_mwait_flags);
+
+ spec_ctrl_enter_idle(info);
__mwait(eax, ecx);
+ spec_ctrl_exit_idle(info);
+
cpumask_clear_cpu(cpu, &cpuidle_mwait_flags);
}
static void acpi_idle_do_entry(struct acpi_processor_cx *cx)
{
+ struct cpu_info *info = get_cpu_info();
+
switch ( cx->entry_method )
{
case ACPI_CSTATE_EM_FFH:
acpi_processor_ffh_cstate_enter(cx);
return;
case ACPI_CSTATE_EM_SYSIO:
+ spec_ctrl_enter_idle(info);
/* IO port based C-state */
inb(cx->address);
/* Dummy wait op - must do something useless after P_LVL2 read
because chipsets cannot guarantee that STPCLK# signal
gets asserted in time to freeze execution properly. */
inl(pmtmr_ioport);
+ spec_ctrl_exit_idle(info);
return;
case ACPI_CSTATE_EM_HALT:
+ spec_ctrl_enter_idle(info);
safe_halt();
+ spec_ctrl_exit_idle(info);
local_irq_disable();
return;
}
if ( pm_idle_save )
pm_idle_save();
else
+ {
+ struct cpu_info *info = get_cpu_info();
+
+ spec_ctrl_enter_idle(info);
safe_halt();
+ spec_ctrl_exit_idle(info);
+ }
return;
}
* Otherwise, CPU may still hold dirty data, breaking cache coherency,
* leading to strange errors.
*/
+ spec_ctrl_enter_idle(get_cpu_info());
wbinvd();
while ( 1 )
u32 address = cx->address;
u32 pmtmr_ioport_local = pmtmr_ioport;
+ spec_ctrl_enter_idle(get_cpu_info());
wbinvd();
while ( 1 )
#include <asm/hpet.h>
#include <asm/mwait.h>
#include <asm/msr.h>
+#include <asm/spec_ctrl.h>
#include <acpi/cpufreq/cpufreq.h>
#define MWAIT_IDLE_VERSION "0.4.1"
if (pm_idle_save)
pm_idle_save();
else
+ {
+ struct cpu_info *info = get_cpu_info();
+
+ spec_ctrl_enter_idle(info);
safe_halt();
+ spec_ctrl_exit_idle(info);
+ }
return;
}
#include <asm/hvm/viridian.h>
#include <asm/debugreg.h>
#include <asm/msr.h>
+#include <asm/spec_ctrl.h>
#include <asm/traps.h>
#include <asm/nmi.h>
#include <asm/mce.h>
static void default_idle(void)
{
+ struct cpu_info *info = get_cpu_info();
+
local_irq_disable();
if ( cpu_is_haltable(smp_processor_id()) )
+ {
+ spec_ctrl_enter_idle(info);
safe_halt();
+ spec_ctrl_exit_idle(info);
+ }
else
local_irq_enable();
}
* held by the CPUs spinning here indefinitely, and get discarded by
* a subsequent INIT.
*/
+ spec_ctrl_enter_idle(get_cpu_info());
wbinvd();
for ( ; ; )
halt();
#ifndef __X86_SPEC_CTRL_H__
#define __X86_SPEC_CTRL_H__
+#include <asm/alternative.h>
#include <asm/current.h>
+#include <asm/msr-index.h>
void init_speculation_mitigations(void);
info->bti_ist_info = default_bti_ist_info;
}
+/* WARNING! `ret`, `call *`, `jmp *` not safe after this call. */
+static always_inline void spec_ctrl_enter_idle(struct cpu_info *info)
+{
+ uint32_t val = 0;
+
+ /*
+ * Latch the new shadow value, then enable shadowing, then update the MSR.
+ * There are no SMP issues here; only local processor ordering concerns.
+ */
+ info->shadow_spec_ctrl = val;
+ barrier();
+ info->use_shadow_spec_ctrl = true;
+ barrier();
+ asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", X86_FEATURE_XEN_IBRS_SET)
+ :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0) : "memory" );
+}
+
+/* WARNING! `ret`, `call *`, `jmp *` not safe before this call. */
+static always_inline void spec_ctrl_exit_idle(struct cpu_info *info)
+{
+ uint32_t val = SPEC_CTRL_IBRS;
+
+ /*
+ * Disable shadowing before updating the MSR. There are no SMP issues
+ * here; only local processor ordering concerns.
+ */
+ info->use_shadow_spec_ctrl = false;
+ barrier();
+ asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", X86_FEATURE_XEN_IBRS_SET)
+ :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0) : "memory" );
+}
+
#endif /* !__X86_SPEC_CTRL_H__ */
/*