#include "cpu.h"
+/*
+ * Processors which have self-snooping capability can handle conflicting
+ * memory type across CPUs by snooping its own cache. However, there exists
+ * CPU models in which having conflicting memory types still leads to
+ * unpredictable behavior, machine check errors, or hangs. Clear this
+ * feature to prevent its use on machines with known erratas.
+ */
+static void __init check_memory_type_self_snoop_errata(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_SS))
+ return;
+
+ switch (boot_cpu_data.x86_model) {
+ case 0x0f: /* Merom */
+ case 0x16: /* Merom L */
+ case 0x17: /* Penryn */
+ case 0x1d: /* Dunnington */
+ case 0x1e: /* Nehalem */
+ case 0x1f: /* Auburndale / Havendale */
+ case 0x1a: /* Nehalem EP */
+ case 0x2e: /* Nehalem EX */
+ case 0x25: /* Westmere */
+ case 0x2c: /* Westmere EP */
+ case 0x2a: /* SandyBridge */
+ return;
+ }
+
+ setup_force_cpu_cap(X86_FEATURE_XEN_SELFSNOOP);
+}
+
/*
* Set caps in expected_levelling_cap, probe a specific masking MSR, and set
* caps in levelling_caps if it is found, or clobber the MSR index if missing.
(boot_cpu_data.x86_mask == 3 || boot_cpu_data.x86_mask == 4))
paddr_bits = 36;
- if (c == &boot_cpu_data)
+ if (c == &boot_cpu_data) {
+ check_memory_type_self_snoop_errata();
+
intel_init_levelling();
+ }
ctxt_switch_levelling(NULL);
}
XEN_CPUFEATURE(SC_MSR_HVM, (FSCAPINTS+0)*32+17) /* MSR_SPEC_CTRL used by Xen for HVM */
XEN_CPUFEATURE(SC_RSB_PV, (FSCAPINTS+0)*32+18) /* RSB overwrite needed for PV */
XEN_CPUFEATURE(SC_RSB_HVM, (FSCAPINTS+0)*32+19) /* RSB overwrite needed for HVM */
+XEN_CPUFEATURE(XEN_SELFSNOOP, (FSCAPINTS+0)*32+20) /* SELFSNOOP gets used by Xen itself */
XEN_CPUFEATURE(SC_MSR_IDLE, (FSCAPINTS+0)*32+21) /* (SC_MSR_PV || SC_MSR_HVM) && default_xen_spec_ctrl */
XEN_CPUFEATURE(XEN_LBR, (FSCAPINTS+0)*32+22) /* Xen uses MSR_DEBUGCTL.LBR */
XEN_CPUFEATURE(SC_VERW_PV, (FSCAPINTS+0)*32+23) /* VERW used by Xen for PV */