### tsc (x86)
> `= unstable | skewed | stable:socket`
+### tsx
+ = <bool>
+
+ Applicability: x86
+ Default: true
+
+Controls for the use of Transactional Synchronization eXtensions.
+
+On Intel parts released in Q3 2019 (with updated microcode), and future parts,
+a control has been introduced which allows TSX to be turned off.
+
+On systems with the ability to turn TSX off, this boolean offers system wide
+control of whether TSX is enabled or disabled.
+
### ucode (x86)
> `= List of [ <integer> | scan=<bool>, nmi=<bool> ]`
obj-y += time.o
obj-y += trace.o
obj-y += traps.o
+obj-y += tsx.o
obj-y += usercopy.o
obj-y += x86_emulate.o
obj-$(CONFIG_TBOOT) += tboot.o
if ( cpu_has_itsc && (d->disable_migrate || d->arch.vtsc) )
__set_bit(X86_FEATURE_ITSC, max_fs);
+ /*
+ * On hardware with MSR_TSX_CTRL, the admin may have elected to disable
+ * TSX and hide the feature bits. Migrating-in VMs may have been booted
+ * pre-mitigation when the TSX features were visbile.
+ *
+ * This situation is compatible (albeit with a perf hit to any TSX code in
+ * the guest), so allow the feature bits to remain set.
+ */
+ if ( cpu_has_tsx_ctrl )
+ {
+ __set_bit(X86_FEATURE_HLE, max_fs);
+ __set_bit(X86_FEATURE_RTM, max_fs);
+ }
+
/* Clamp the toolstacks choices to reality. */
for ( i = 0; i < ARRAY_SIZE(fs); i++ )
fs[i] &= max_fs[i];
case MSR_FLUSH_CMD:
/* Write-only */
case MSR_TSX_FORCE_ABORT:
+ case MSR_TSX_CTRL:
case MSR_AMD64_LWP_CFG:
case MSR_AMD64_LWP_CBADDR:
/* Not offered to guests. */
case MSR_ARCH_CAPABILITIES:
/* Read-only */
case MSR_TSX_FORCE_ABORT:
+ case MSR_TSX_CTRL:
case MSR_AMD64_LWP_CFG:
case MSR_AMD64_LWP_CBADDR:
/* Not offered to guests. */
early_microcode_init();
+ tsx_init(); /* Needs microcode. May change HLE/RTM feature bits. */
+
identify_cpu(&boot_cpu_data);
set_in_cr4(X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT);
if ( boot_cpu_has(X86_FEATURE_IBRSB) )
wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
+ tsx_init(); /* Needs microcode. May change HLE/RTM feature bits. */
+
if ( xen_guest )
hypervisor_ap_setup();
--- /dev/null
+#include <xen/init.h>
+#include <asm/msr.h>
+
+/*
+ * Valid values:
+ * 1 => Explicit tsx=1
+ * 0 => Explicit tsx=0
+ * -1 => Default, implicit tsx=1
+ *
+ * This is arranged such that the bottom bit encodes whether TSX is actually
+ * disabled, while identifying various explicit (>=0) and implicit (<0)
+ * conditions.
+ */
+int8_t __read_mostly opt_tsx = -1;
+int8_t __read_mostly cpu_has_tsx_ctrl = -1;
+
+static int __init parse_tsx(const char *s)
+{
+ int rc = 0, val = parse_bool(s, NULL);
+
+ if ( val >= 0 )
+ opt_tsx = val;
+ else
+ rc = -EINVAL;
+
+ return rc;
+}
+custom_param("tsx", parse_tsx);
+
+void tsx_init(void)
+{
+ /*
+ * This function is first called between microcode being loaded, and CPUID
+ * being scanned generally. Calculate from raw data whether MSR_TSX_CTRL
+ * is available.
+ */
+ if ( unlikely(cpu_has_tsx_ctrl < 0) )
+ {
+ uint64_t caps = 0;
+
+ if ( boot_cpu_data.cpuid_level >= 7 &&
+ (cpuid_count_edx(7, 0) & cpufeat_mask(X86_FEATURE_ARCH_CAPS)) )
+ rdmsrl(MSR_ARCH_CAPABILITIES, caps);
+
+ cpu_has_tsx_ctrl = !!(caps & ARCH_CAPS_TSX_CTRL);
+ }
+
+ if ( cpu_has_tsx_ctrl )
+ {
+ uint64_t val;
+
+ rdmsrl(MSR_TSX_CTRL, val);
+
+ val &= ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR);
+ /* Check bottom bit only. Higher bits are various sentinals. */
+ if ( !(opt_tsx & 1) )
+ val |= TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR;
+
+ wrmsrl(MSR_TSX_CTRL, val);
+ }
+ else if ( opt_tsx >= 0 )
+ printk_once(XENLOG_WARNING
+ "MSR_TSX_CTRL not available - Ignoring tsx= setting\n");
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
#define ARCH_CAPS_SSB_NO (_AC(1, ULL) << 4)
#define ARCH_CAPS_MDS_NO (_AC(1, ULL) << 5)
#define ARCH_CAPS_IF_PSCHANGE_MC_NO (_AC(1, ULL) << 6)
+#define ARCH_CAPS_TSX_CTRL (_AC(1, ULL) << 7)
#define MSR_FLUSH_CMD 0x0000010b
#define FLUSH_CMD_L1D (_AC(1, ULL) << 0)
#define MSR_TSX_FORCE_ABORT 0x0000010f
#define TSX_FORCE_ABORT_RTM (_AC(1, ULL) << 0)
+#define MSR_TSX_CTRL 0x00000122
+#define TSX_CTRL_RTM_DISABLE (_AC(1, ULL) << 0)
+#define TSX_CTRL_CPUID_CLEAR (_AC(1, ULL) << 1)
+
/* Intel MSRs. Some also available on other CPUs */
#define MSR_IA32_PERFCTR0 0x000000c1
#define MSR_IA32_A_PERFCTR0 0x000004c1
return ebx;
}
+static always_inline unsigned int cpuid_count_edx(
+ unsigned int leaf, unsigned int subleaf)
+{
+ unsigned int edx, tmp;
+
+ cpuid_count(leaf, subleaf, &tmp, &tmp, &tmp, &edx);
+
+ return edx;
+}
+
static inline unsigned long read_cr0(void)
{
unsigned long cr0;
return fam;
}
+extern int8_t opt_tsx, cpu_has_tsx_ctrl;
+void tsx_init(void);
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_X86_PROCESSOR_H */