struct cpuid_policy *p = d->arch.cpuid;
const struct cpuid_leaf leaf = { ctl->eax, ctl->ebx, ctl->ecx, ctl->edx };
int old_vendor = p->x86_vendor;
+ unsigned int old_7d0 = p->feat.raw[0].d, old_e8b = p->extd.raw[8].b;
bool call_policy_changed = false; /* Avoid for_each_vcpu() unnecessarily */
/*
d->arch.pv_domain.cpuidmasks->_7ab0 = mask;
}
+
+ /*
+ * If the IBRS/IBPB policy has changed, we need to recalculate the MSR
+ * interception bitmaps.
+ */
+ call_policy_changed = (is_hvm_domain(d) &&
+ ((old_7d0 ^ p->feat.raw[0].d) &
+ cpufeat_mask(X86_FEATURE_IBRSB)));
break;
case 0xa:
d->arch.pv_domain.cpuidmasks->e1cd = mask;
}
break;
+
+ case 0x80000008:
+ /*
+ * If the IBPB policy has changed, we need to recalculate the MSR
+ * interception bitmaps.
+ */
+ call_policy_changed = (is_hvm_domain(d) &&
+ ((old_e8b ^ p->extd.raw[8].b) &
+ cpufeat_mask(X86_FEATURE_IBPB)));
+ break;
}
if ( call_policy_changed )
{
struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
struct vmcb_struct *vmcb = arch_svm->vmcb;
+ const struct cpuid_policy *cp = v->domain->arch.cpuid;
u32 bitmap = vmcb_get_exception_intercepts(vmcb);
if ( opt_hvm_fep ||
bitmap &= ~(1U << TRAP_invalid_op);
vmcb_set_exception_intercepts(vmcb, bitmap);
+
+ /* Give access to MSR_PRED_CMD if the guest has been told about it. */
+ svm_intercept_msr(v, MSR_PRED_CMD,
+ cp->extd.ibpb ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
}
static void svm_sync_vmcb(struct vcpu *v)
static void vmx_cpuid_policy_changed(struct vcpu *v)
{
+ const struct cpuid_policy *cp = v->domain->arch.cpuid;
+
if ( opt_hvm_fep ||
(v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) )
v->arch.hvm_vmx.exception_bitmap |= (1U << TRAP_invalid_op);
vmx_vmcs_enter(v);
vmx_update_exception_bitmap(v);
vmx_vmcs_exit(v);
+
+ /*
+ * We can safely pass MSR_SPEC_CTRL through to the guest, even if STIBP
+ * isn't enumerated in hardware, as SPEC_CTRL_STIBP is ignored.
+ */
+ if ( cp->feat.ibrsb )
+ vmx_clear_msr_intercept(v, MSR_SPEC_CTRL, VMX_MSR_RW);
+ else
+ vmx_set_msr_intercept(v, MSR_SPEC_CTRL, VMX_MSR_RW);
+
+ /* MSR_PRED_CMD is safe to pass through if the guest knows about it. */
+ if ( cp->feat.ibrsb || cp->extd.ibpb )
+ vmx_clear_msr_intercept(v, MSR_PRED_CMD, VMX_MSR_RW);
+ else
+ vmx_set_msr_intercept(v, MSR_PRED_CMD, VMX_MSR_RW);
}
int vmx_guest_x86_mode(struct vcpu *v)