From: Jan Beulich Date: Tue, 20 Jun 2017 14:39:41 +0000 (+0200) Subject: x86: avoid leaking BND* between vCPU-s X-Git-Tag: RELEASE-4.6.6~11 X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=86f4ece7c6b0851d276e5fb42dcdedc6b4e9bb10;p=xen.git x86: avoid leaking BND* between vCPU-s For MPX (BND, BNDCFGU, and BNDSTATUS) the situation is less clear, and the SDM has not entirely consistent information for that case. While experimentally the instructions don't change register state as long as the two XCR0 bits aren't both 1, be on the safe side and enable both if BNDCFGS.EN is being set the first time. This is XSA-220. Reported-by: Andrew Cooper Signed-off-by: Jan Beulich Reviewed-by: Andrew Cooper master commit: de20bb6c4f65c4161e0931402613f9ffac86302d master date: 2017-06-20 14:36:51 +0200 --- diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 01c331e5b4..5aac3f2fc0 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -625,6 +626,45 @@ static int vmx_load_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) return 0; } +static bool_t vmx_set_guest_bndcfgs(struct vcpu *v, u64 val) +{ + if ( !cpu_has_mpx || !cpu_has_vmx_mpx || + !is_canonical_address(val) || + (val & IA32_BNDCFGS_RESERVED) ) + return 0; + + /* + * While MPX instructions are supposed to be gated on XCR0.BND*, let's + * nevertheless force the relevant XCR0 bits on when the feature is being + * enabled in BNDCFGS. + */ + if ( (val & IA32_BNDCFGS_ENABLE) && + !(v->arch.xcr0_accum & (XSTATE_BNDREGS | XSTATE_BNDCSR)) ) + { + uint64_t xcr0 = get_xcr0(); + int rc; + + if ( v != current ) + return 0; + + rc = handle_xsetbv(XCR_XFEATURE_ENABLED_MASK, + xcr0 | XSTATE_BNDREGS | XSTATE_BNDCSR); + + if ( rc ) + { + HVM_DBG_LOG(DBG_LEVEL_1, "Failed to force XCR0.BND*: %d", rc); + return 0; + } + + if ( handle_xsetbv(XCR_XFEATURE_ENABLED_MASK, xcr0) ) + /* nothing, best effort only */; + } + + __vmwrite(GUEST_BNDCFGS, val); + + return 1; +} + static unsigned int __init vmx_init_msr(void) { return cpu_has_mpx && cpu_has_vmx_mpx; @@ -656,11 +696,8 @@ static int vmx_load_msr(struct vcpu *v, struct hvm_msr *ctxt) switch ( ctxt->msr[i].index ) { case MSR_IA32_BNDCFGS: - if ( cpu_has_mpx && cpu_has_vmx_mpx && - is_canonical_address(ctxt->msr[i].val) && - !(ctxt->msr[i].val & IA32_BNDCFGS_RESERVED) ) - __vmwrite(GUEST_BNDCFGS, ctxt->msr[i].val); - else + if ( !vmx_set_guest_bndcfgs(v, ctxt->msr[i].val) && + ctxt->msr[i].val ) err = -ENXIO; break; default: @@ -2552,11 +2589,8 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) break; } case MSR_IA32_BNDCFGS: - if ( !cpu_has_mpx || !cpu_has_vmx_mpx || - !is_canonical_address(msr_content) || - (msr_content & IA32_BNDCFGS_RESERVED) ) + if ( !vmx_set_guest_bndcfgs(v, msr_content) ) goto gp_fault; - __vmwrite(GUEST_BNDCFGS, msr_content); break; case IA32_FEATURE_CONTROL_MSR: case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_TRUE_ENTRY_CTLS: