static const uint32_t msrs_to_send[] = {
MSR_SPEC_CTRL,
MSR_INTEL_MISC_FEATURES_ENABLES,
+ MSR_IA32_BNDCFGS,
MSR_AMD64_DR0_ADDRESS_MASK,
MSR_AMD64_DR1_ADDRESS_MASK,
MSR_AMD64_DR2_ADDRESS_MASK,
case MSR_SPEC_CTRL:
case MSR_INTEL_MISC_FEATURES_ENABLES:
+ case MSR_IA32_BNDCFGS:
case MSR_AMD64_DR0_ADDRESS_MASK:
case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
rc = guest_wrmsr(v, ctxt->msr[i].index, ctxt->msr[i].val);
*msr_content = v->arch.hvm.msr_xss;
break;
- case MSR_IA32_BNDCFGS:
- if ( !d->arch.cpuid->feat.mpx ||
- !hvm_get_guest_bndcfgs(v, msr_content) )
- goto gp_fault;
- break;
-
case MSR_K8_ENABLE_C1E:
case MSR_AMD64_NB_CFG:
/*
v->arch.hvm.msr_xss = msr_content;
break;
- case MSR_IA32_BNDCFGS:
- if ( !d->arch.cpuid->feat.mpx ||
- !hvm_set_guest_bndcfgs(v, msr_content) )
- goto gp_fault;
- break;
-
case MSR_AMD64_NB_CFG:
/* ignore the write */
break;
static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt)
{
- vmx_vmcs_enter(v);
-
- if ( cpu_has_mpx && cpu_has_vmx_mpx )
- {
- __vmread(GUEST_BNDCFGS, &ctxt->msr[ctxt->count].val);
- if ( ctxt->msr[ctxt->count].val )
- ctxt->msr[ctxt->count++].index = MSR_IA32_BNDCFGS;
- }
-
- vmx_vmcs_exit(v);
-
if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
{
ctxt->msr[ctxt->count].val = v->arch.hvm.msr_xss;
{
switch ( ctxt->msr[i].index )
{
- case MSR_IA32_BNDCFGS:
- if ( cpu_has_mpx && cpu_has_vmx_mpx &&
- is_canonical_address(ctxt->msr[i].val) &&
- !(ctxt->msr[i].val & IA32_BNDCFGS_RESERVED) )
- __vmwrite(GUEST_BNDCFGS, ctxt->msr[i].val);
- else if ( ctxt->msr[i].val )
- err = -ENXIO;
- break;
case MSR_IA32_XSS:
if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
v->arch.hvm.msr_xss = ctxt->msr[i].val;
return true;
}
-static bool vmx_get_guest_bndcfgs(struct vcpu *v, u64 *val)
+static bool vmx_get_guest_bndcfgs(const struct vcpu *cv, u64 *val)
{
+ /* Get a non-const pointer for vmx_vmcs_enter() */
+ struct vcpu *v = cv->domain->vcpu[cv->vcpu_id];
+
ASSERT(cpu_has_mpx && cpu_has_vmx_mpx);
vmx_vmcs_enter(v);
ret = guest_rdmsr_x2apic(v, msr, val);
break;
+ case MSR_IA32_BNDCFGS:
+ if ( !cp->feat.mpx )
+ goto gp_fault;
+
+ ASSERT(is_hvm_domain(d));
+ if (!hvm_get_guest_bndcfgs(v, val) )
+ goto gp_fault;
+
+ break;
+
case 0x40000000 ... 0x400001ff:
if ( is_viridian_domain(d) )
{
ret = guest_wrmsr_x2apic(v, msr, val);
break;
+ case MSR_IA32_BNDCFGS:
+ if ( !cp->feat.mpx )
+ goto gp_fault;
+
+ ASSERT(is_hvm_domain(d));
+ if ( !hvm_set_guest_bndcfgs(v, val) )
+ goto gp_fault;
+
+ break;
+
case 0x40000000 ... 0x400001ff:
if ( is_viridian_domain(d) )
{
int (*get_guest_pat)(struct vcpu *v, u64 *);
int (*set_guest_pat)(struct vcpu *v, u64);
- bool (*get_guest_bndcfgs)(struct vcpu *v, u64 *);
+ bool (*get_guest_bndcfgs)(const struct vcpu *v, u64 *);
bool (*set_guest_bndcfgs)(struct vcpu *v, u64);
void (*set_tsc_offset)(struct vcpu *v, u64 offset, u64 at_tsc);
return hvm_funcs.get_shadow_gs_base(v);
}
-static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, u64 *val)
+static inline bool hvm_get_guest_bndcfgs(const struct vcpu *v, u64 *val)
{
return hvm_funcs.get_guest_bndcfgs &&
hvm_funcs.get_guest_bndcfgs(v, val);