vmsrs->msr_count = nr_msrs;
else
{
+ uint32_t edx, dummy;
+
i = 0;
vcpu_pause(v);
}
}
+ domain_cpuid(d, 7, 0, &dummy, &dummy, &dummy, &edx);
+ if ( (edx & cpufeat_mask(X86_FEATURE_IBRSB)) &&
+ v->arch.spec_ctrl )
+ {
+ if ( i < vmsrs->msr_count && !ret )
+ {
+ msr.index = MSR_SPEC_CTRL;
+ msr.reserved = 0;
+ msr.value = v->arch.spec_ctrl;
+ if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) )
+ ret = -EFAULT;
+ }
+ ++i;
+ }
+
vcpu_unpause(v);
if ( i > vmsrs->msr_count && !ret )
switch ( msr.index )
{
+ case MSR_SPEC_CTRL:
+ if ( !boot_cpu_has(X86_FEATURE_IBRSB) )
+ break; /* MSR available? */
+
+ /*
+ * Note: SPEC_CTRL_STIBP is specified as safe to use (i.e.
+ * ignored) when STIBP isn't enumerated in hardware.
+ */
+
+ if ( msr.value & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP) )
+ break;
+ v->arch.spec_ctrl = msr.value;
+ continue;
+
case MSR_AMD64_DR0_ADDRESS_MASK:
if ( !boot_cpu_has(X86_FEATURE_DBEXT) ||
(msr.value >> 32) )
static unsigned int __init vmx_init_msr(void)
{
- return cpu_has_mpx && cpu_has_vmx_mpx;
+ return !!boot_cpu_has(X86_FEATURE_IBRSB) +
+ (cpu_has_mpx && cpu_has_vmx_mpx);
}
static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt)
{
+ uint32_t edx, dummy;
+
vmx_vmcs_enter(v);
+ domain_cpuid(v->domain, 7, 0, &dummy, &dummy, &dummy, &edx);
+ if ( (edx & cpufeat_mask(X86_FEATURE_IBRSB)) && v->arch.spec_ctrl )
+ {
+ ctxt->msr[ctxt->count].index = MSR_SPEC_CTRL;
+ ctxt->msr[ctxt->count++].val = v->arch.spec_ctrl;
+ }
+
if ( cpu_has_mpx && cpu_has_vmx_mpx )
{
__vmread(GUEST_BNDCFGS, &ctxt->msr[ctxt->count].val);
{
switch ( ctxt->msr[i].index )
{
+ case MSR_SPEC_CTRL:
+ if ( !boot_cpu_has(X86_FEATURE_IBRSB) )
+ err = -ENXIO; /* MSR available? */
+ /*
+ * Note: SPEC_CTRL_STIBP is specified as safe to use (i.e.
+ * ignored) when STIBP isn't enumerated in hardware.
+ */
+ else if ( ctxt->msr[i].val &
+ ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP) )
+ err = -ENXIO;
+ else
+ v->arch.spec_ctrl = ctxt->msr[i].val;
+ break;
case MSR_IA32_BNDCFGS:
if ( !vmx_set_guest_bndcfgs(v, ctxt->msr[i].val) &&
ctxt->msr[i].val )