/* Signal whether the ACPI C1E quirk is required. */
bool __read_mostly amd_acpi_c1e_quirk;
+bool __ro_after_init amd_legacy_ssbd;
static inline int rdmsr_amd_safe(unsigned int msr, unsigned int *lo,
unsigned int *hi)
* Refer to the AMD Speculative Store Bypass whitepaper:
* https://developer.amd.com/wp-content/resources/124441_AMD64_SpeculativeStoreBypassDisable_Whitepaper_final.pdf
*/
-void amd_init_ssbd(const struct cpuinfo_x86 *c)
+static bool set_legacy_ssbd(const struct cpuinfo_x86 *c, bool enable)
{
int bit = -1;
- if (cpu_has_ssb_no)
- return;
-
- if (cpu_has_amd_ssbd) {
- /* Handled by common MSR_SPEC_CTRL logic */
- return;
- }
-
- if (cpu_has_virt_ssbd) {
- wrmsrl(MSR_VIRT_SPEC_CTRL, opt_ssbd ? SPEC_CTRL_SSBD : 0);
- return;
- }
-
switch (c->x86) {
case 0x15: bit = 54; break;
case 0x16: bit = 33; break;
if (rdmsr_safe(MSR_AMD64_LS_CFG, val) ||
({
val &= ~mask;
- if (opt_ssbd)
+ if (enable)
val |= mask;
false;
}) ||
wrmsr_safe(MSR_AMD64_LS_CFG, val) ||
({
rdmsrl(MSR_AMD64_LS_CFG, val);
- (val & mask) != (opt_ssbd * mask);
+ (val & mask) != (enable * mask);
}))
bit = -1;
}
- if (bit < 0)
+ return bit >= 0;
+}
+
+void amd_init_ssbd(const struct cpuinfo_x86 *c)
+{
+ if (cpu_has_ssb_no)
+ return;
+
+ if (cpu_has_amd_ssbd) {
+ /* Handled by common MSR_SPEC_CTRL logic */
+ return;
+ }
+
+ if (cpu_has_virt_ssbd) {
+ wrmsrl(MSR_VIRT_SPEC_CTRL, opt_ssbd ? SPEC_CTRL_SSBD : 0);
+ return;
+ }
+
+ if (!set_legacy_ssbd(c, opt_ssbd)) {
printk_once(XENLOG_ERR "No SSBD controls available\n");
+ if (amd_legacy_ssbd)
+ panic("CPU feature mismatch: no legacy SSBD\n");
+ } else if (c == &boot_cpu_data)
+ amd_legacy_ssbd = true;
+}
+
+static struct ssbd_ls_cfg {
+ bool locked;
+ unsigned int count;
+} __cacheline_aligned *ssbd_ls_cfg;
+static unsigned int __ro_after_init ssbd_max_cores;
+#define AMD_FAM17H_MAX_SOCKETS 2
+
+bool __init amd_setup_legacy_ssbd(void)
+{
+ unsigned int i;
+
+ if ((boot_cpu_data.x86 != 0x17 && boot_cpu_data.x86 != 0x18) ||
+ boot_cpu_data.x86_num_siblings <= 1)
+ return true;
+
+ /*
+ * One could be forgiven for thinking that c->x86_max_cores is the
+ * correct value to use here.
+ *
+ * However, that value is derived from the current configuration, and
+ * c->cpu_core_id is sparse on all but the top end CPUs. Derive
+ * max_cpus from ApicIdCoreIdSize which will cover any sparseness.
+ */
+ if (boot_cpu_data.extended_cpuid_level >= 0x80000008) {
+ ssbd_max_cores = 1u << MASK_EXTR(cpuid_ecx(0x80000008), 0xf000);
+ ssbd_max_cores /= boot_cpu_data.x86_num_siblings;
+ }
+ if (!ssbd_max_cores)
+ return false;
+
+ ssbd_ls_cfg = xzalloc_array(struct ssbd_ls_cfg,
+ ssbd_max_cores * AMD_FAM17H_MAX_SOCKETS);
+ if (!ssbd_ls_cfg)
+ return false;
+
+ if (opt_ssbd)
+ for (i = 0; i < ssbd_max_cores * AMD_FAM17H_MAX_SOCKETS; i++)
+ /* Set initial state, applies to any (hotplug) CPU. */
+ ssbd_ls_cfg[i].count = boot_cpu_data.x86_num_siblings;
+
+ return true;
+}
+
+/*
+ * Executed from GIF==0 context: avoid using BUG/ASSERT or other functionality
+ * that relies on exceptions as those are not expected to run in GIF==0
+ * context.
+ */
+void amd_set_legacy_ssbd(bool enable)
+{
+ const struct cpuinfo_x86 *c = ¤t_cpu_data;
+ struct ssbd_ls_cfg *status;
+
+ if ((c->x86 != 0x17 && c->x86 != 0x18) || c->x86_num_siblings <= 1) {
+ set_legacy_ssbd(c, enable);
+ return;
+ }
+
+ status = &ssbd_ls_cfg[c->phys_proc_id * ssbd_max_cores +
+ c->cpu_core_id];
+
+ /*
+ * Open code a very simple spinlock: this function is used with GIF==0
+ * and different IF values, so would trigger the checklock detector.
+ * Instead of trying to workaround the detector, use a very simple lock
+ * implementation: it's better to reduce the amount of code executed
+ * with GIF==0.
+ */
+ while (test_and_set_bool(status->locked))
+ cpu_relax();
+ status->count += enable ? 1 : -1;
+ if (enable ? status->count == 1 : !status->count)
+ set_legacy_ssbd(c, enable);
+ barrier();
+ write_atomic(&status->locked, false);
}
/*