raw_cpuid_policy.basic.sep )
__set_bit(X86_FEATURE_SEP, hvm_featureset);
+ /*
+ * VIRT_SSBD is exposed in the default policy as a result of
+ * VIRT_SC_MSR_HVM being set, it also needs exposing in the max policy.
+ */
+ if ( boot_cpu_has(X86_FEATURE_VIRT_SC_MSR_HVM) )
+ __set_bit(X86_FEATURE_VIRT_SSBD, hvm_featureset);
+
/*
* If Xen isn't virtualising MSR_SPEC_CTRL for HVM guests (functional
* availability, or admin choice), hide the feature.
guest_common_feature_adjustments(hvm_featureset);
guest_common_default_feature_adjustments(hvm_featureset);
+ /*
+ * Only expose VIRT_SSBD if AMD_SSBD is not available, and thus
+ * VIRT_SC_MSR_HVM is set.
+ */
+ if ( boot_cpu_has(X86_FEATURE_VIRT_SC_MSR_HVM) )
+ __set_bit(X86_FEATURE_VIRT_SSBD, hvm_featureset);
+
sanitise_featureset(hvm_featureset);
cpuid_featureset_to_policy(hvm_featureset, p);
recalculate_xstate(p);
.file "svm/entry.S"
+#include <xen/lib.h>
+
#include <asm/asm_defns.h>
#include <asm/page.h>
clgi
+ ALTERNATIVE "", STR(call vmentry_virt_spec_ctrl), \
+ X86_FEATURE_VIRT_SC_MSR_HVM
+
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
/* SPEC_CTRL_EXIT_TO_SVM Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
.macro svm_vmentry_spec_ctrl
ALTERNATIVE "", svm_vmexit_spec_ctrl, X86_FEATURE_SC_MSR_HVM
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+ ALTERNATIVE "", STR(call vmexit_virt_spec_ctrl), \
+ X86_FEATURE_VIRT_SC_MSR_HVM
+
/*
* STGI is executed unconditionally, and is sufficiently serialising
* to safely resolve any Spectre-v1 concerns in the above logic.
#include <asm/hvm/svm/svmdebug.h>
#include <asm/hvm/svm/nestedsvm.h>
#include <asm/hvm/nestedhvm.h>
+#include <asm/spec_ctrl.h>
#include <asm/x86_emulate.h>
#include <public/sched.h>
#include <asm/hvm/vpt.h>
svm_intercept_msr(v, MSR_SPEC_CTRL,
cp->extd.ibrs ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
+ /*
+ * Always trap write accesses to VIRT_SPEC_CTRL in order to cache the guest
+ * setting and avoid having to perform a rdmsr on vmexit to get the guest
+ * setting even if VIRT_SSBD is offered to Xen itself.
+ */
+ svm_intercept_msr(v, MSR_VIRT_SPEC_CTRL,
+ cp->extd.virt_ssbd && cpu_has_virt_ssbd &&
+ !cpu_has_amd_ssbd ?
+ MSR_INTERCEPT_WRITE : MSR_INTERCEPT_RW);
+
/* Give access to MSR_PRED_CMD if the guest has been told about it. */
svm_intercept_msr(v, MSR_PRED_CMD,
cp->extd.ibpb ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
vmcb_set_vintr(vmcb, intr);
}
+/* Called with GIF=0. */
+void vmexit_virt_spec_ctrl(void)
+{
+ unsigned int val = opt_ssbd ? SPEC_CTRL_SSBD : 0;
+
+ if ( val == current->arch.msrs->virt_spec_ctrl.raw )
+ return;
+
+ if ( cpu_has_virt_ssbd )
+ wrmsr(MSR_VIRT_SPEC_CTRL, val, 0);
+}
+
+/* Called with GIF=0. */
+void vmentry_virt_spec_ctrl(void)
+{
+ unsigned int val = current->arch.msrs->virt_spec_ctrl.raw;
+
+ if ( val == (opt_ssbd ? SPEC_CTRL_SSBD : 0) )
+ return;
+
+ if ( cpu_has_virt_ssbd )
+ wrmsr(MSR_VIRT_SPEC_CTRL, val, 0);
+}
+
/*
* Local variables:
* mode: C
XEN_CPUFEATURE(MFENCE_RDTSC, X86_SYNTH( 9)) /* MFENCE synchronizes RDTSC */
XEN_CPUFEATURE(XEN_SMEP, X86_SYNTH(10)) /* SMEP gets used by Xen itself */
XEN_CPUFEATURE(XEN_SMAP, X86_SYNTH(11)) /* SMAP gets used by Xen itself */
-/* Bit 12 - unused. */
+XEN_CPUFEATURE(VIRT_SC_MSR_HVM, X86_SYNTH(12)) /* MSR_VIRT_SPEC_CTRL exposed to HVM */
XEN_CPUFEATURE(IND_THUNK_LFENCE, X86_SYNTH(13)) /* Use IND_THUNK_LFENCE */
XEN_CPUFEATURE(IND_THUNK_JMP, X86_SYNTH(14)) /* Use IND_THUNK_JMP */
XEN_CPUFEATURE(SC_NO_BRANCH_HARDEN, X86_SYNTH(15)) /* (Disable) Conditional branch hardening */
*/
uint32_t tsc_aux;
+ /*
+ * 0xc001011f - MSR_VIRT_SPEC_CTRL (if !X86_FEATURE_AMD_SSBD)
+ *
+ * AMD only. Guest selected value, context switched on guest VM
+ * entry/exit.
+ */
+ struct {
+ uint32_t raw;
+ } virt_spec_ctrl;
+
/*
* 0xc00110{27,19-1b} MSR_AMD64_DR{0-3}_ADDRESS_MASK
*
if ( !cp->extd.virt_ssbd )
goto gp_fault;
- *val = msrs->spec_ctrl.raw & SPEC_CTRL_SSBD;
+ if ( cpu_has_amd_ssbd )
+ *val = msrs->spec_ctrl.raw & SPEC_CTRL_SSBD;
+ else
+ *val = msrs->virt_spec_ctrl.raw;
break;
case MSR_AMD64_DE_CFG:
goto gp_fault;
/* Only supports SSBD bit, the rest are ignored. */
- if ( val & SPEC_CTRL_SSBD )
- msrs->spec_ctrl.raw |= SPEC_CTRL_SSBD;
+ if ( cpu_has_amd_ssbd )
+ {
+ if ( val & SPEC_CTRL_SSBD )
+ msrs->spec_ctrl.raw |= SPEC_CTRL_SSBD;
+ else
+ msrs->spec_ctrl.raw &= ~SPEC_CTRL_SSBD;
+ }
else
- msrs->spec_ctrl.raw &= ~SPEC_CTRL_SSBD;
+ msrs->virt_spec_ctrl.raw = val & SPEC_CTRL_SSBD;
break;
case MSR_AMD64_DE_CFG:
(boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ||
boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ||
boot_cpu_has(X86_FEATURE_IBPB_ENTRY_HVM) ||
+ boot_cpu_has(X86_FEATURE_VIRT_SC_MSR_HVM) ||
opt_eager_fpu || opt_md_clear_hvm) ? "" : " None",
boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ? " MSR_SPEC_CTRL" : "",
- boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ? " MSR_VIRT_SPEC_CTRL" : "",
+ (boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ||
+ boot_cpu_has(X86_FEATURE_VIRT_SC_MSR_HVM)) ? " MSR_VIRT_SPEC_CTRL"
+ : "",
boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ? " RSB" : "",
opt_eager_fpu ? " EAGER_FPU" : "",
opt_md_clear_hvm ? " MD_CLEAR" : "",
setup_force_cpu_cap(X86_FEATURE_SC_MSR_HVM);
}
+ /* Support VIRT_SPEC_CTRL.SSBD if AMD_SSBD is not available. */
+ if ( opt_msr_sc_hvm && !cpu_has_amd_ssbd && cpu_has_virt_ssbd )
+ setup_force_cpu_cap(X86_FEATURE_VIRT_SC_MSR_HVM);
+
/* Figure out default_xen_spec_ctrl. */
if ( has_spec_ctrl && ibrs )
{