hvm_{get,set}_guest_bndcfgs() are thin wrappers around accessing MSR_BNDCFGS.
MPX was implemented on Skylake uarch CPUs and dropped in subsequent CPUs, and
is disabled by default in Xen VMs.
It would be nice to move all the logic into vmx_msr_{read,write}_intercept(),
but the common HVM migration code uses guest_{rd,wr}msr(). Therefore, use
{get,set}_regs() to reduce the quantity of "common" HVM code.
In lieu of having hvm_set_guest_bndcfgs() split out, use some #ifdef
CONFIG_HVM in guest_wrmsr(). In vmx_{get,set}_regs(), split the switch
statements into two depending on whether the require remote VMCS acquisition
or not.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
return 1;
}
-bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val)
-{
- if ( !hvm_funcs.set_guest_bndcfgs ||
- !is_canonical_address(val) ||
- (val & IA32_BNDCFGS_RESERVED) )
- return false;
-
- /*
- * While MPX instructions are supposed to be gated on XCR0.BND*, let's
- * nevertheless force the relevant XCR0 bits on when the feature is being
- * enabled in BNDCFGS.
- */
- if ( (val & IA32_BNDCFGS_ENABLE) &&
- !(v->arch.xcr0_accum & (X86_XCR0_BNDREGS | X86_XCR0_BNDCSR)) )
- {
- uint64_t xcr0 = get_xcr0();
- int rc;
-
- if ( v != current )
- return false;
-
- rc = handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
- xcr0 | X86_XCR0_BNDREGS | X86_XCR0_BNDCSR);
-
- if ( rc )
- {
- HVM_DBG_LOG(DBG_LEVEL_1, "Failed to force XCR0.BND*: %d", rc);
- return false;
- }
-
- if ( handle_xsetbv(XCR_XFEATURE_ENABLED_MASK, xcr0) )
- /* nothing, best effort only */;
- }
-
- return alternative_call(hvm_funcs.set_guest_bndcfgs, v, val);
-}
-
/*
* Get the ratio to scale host TSC frequency to gtsc_khz. zero will be
* returned if TSC scaling is unavailable or ratio cannot be handled
return 1;
}
-static bool vmx_set_guest_bndcfgs(struct vcpu *v, u64 val)
-{
- ASSERT(cpu_has_mpx && cpu_has_vmx_mpx);
-
- vmx_vmcs_enter(v);
- __vmwrite(GUEST_BNDCFGS, val);
- vmx_vmcs_exit(v);
-
- return true;
-}
-
-static bool vmx_get_guest_bndcfgs(struct vcpu *v, u64 *val)
-{
- ASSERT(cpu_has_mpx && cpu_has_vmx_mpx);
-
- vmx_vmcs_enter(v);
- __vmread(GUEST_BNDCFGS, val);
- vmx_vmcs_exit(v);
-
- return true;
-}
-
static void vmx_handle_cd(struct vcpu *v, unsigned long value)
{
if ( !paging_mode_hap(v->domain) )
uint64_t val = 0;
int rc;
+ /* Logic which doesn't require remote VMCS acquisition. */
switch ( reg )
{
case MSR_SPEC_CTRL:
domain_crash(d);
}
return val;
+ }
+
+ /* Logic which maybe requires remote VMCS acquisition. */
+ vmx_vmcs_enter(v);
+ switch ( reg )
+ {
+ case MSR_IA32_BNDCFGS:
+ __vmread(GUEST_BNDCFGS, &val);
+ break;
default:
printk(XENLOG_G_ERR "%s(%pv, 0x%08x) Bad register\n",
__func__, v, reg);
domain_crash(d);
- return 0;
+ break;
}
+ vmx_vmcs_exit(v);
+
+ return val;
}
static void vmx_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
struct domain *d = v->domain;
int rc;
+ /* Logic which doesn't require remote VMCS acquisition. */
switch ( reg )
{
case MSR_SPEC_CTRL:
__func__, v, reg, rc);
domain_crash(d);
}
+ return;
+ }
+
+ /* Logic which maybe requires remote VMCS acquisition. */
+ vmx_vmcs_enter(v);
+ switch ( reg )
+ {
+ case MSR_IA32_BNDCFGS:
+ __vmwrite(GUEST_BNDCFGS, val);
break;
default:
__func__, v, reg, val);
domain_crash(d);
}
+ vmx_vmcs_exit(v);
}
static struct hvm_function_table __initdata vmx_function_table = {
vmx_function_table.tsc_scaling.setup = vmx_setup_tsc_scaling;
}
- if ( cpu_has_mpx && cpu_has_vmx_mpx )
- {
- vmx_function_table.set_guest_bndcfgs = vmx_set_guest_bndcfgs;
- vmx_function_table.get_guest_bndcfgs = vmx_get_guest_bndcfgs;
- }
-
lbr_tsx_fixup_check();
ler_to_fixup_check();
int (*get_guest_pat)(struct vcpu *v, u64 *);
int (*set_guest_pat)(struct vcpu *v, u64);
- bool (*get_guest_bndcfgs)(struct vcpu *v, u64 *);
- bool (*set_guest_bndcfgs)(struct vcpu *v, u64);
-
void (*set_tsc_offset)(struct vcpu *v, u64 offset, u64 at_tsc);
void (*inject_event)(const struct x86_event *event);
void hvm_set_info_guest(struct vcpu *v);
-bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val);
-
int hvm_vmexit_cpuid(struct cpu_user_regs *regs, unsigned int inst_len);
void hvm_migrate_timers(struct vcpu *v);
void hvm_do_resume(struct vcpu *v);
return alternative_call(hvm_funcs.get_shadow_gs_base, v);
}
-static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, u64 *val)
-{
- return hvm_funcs.get_guest_bndcfgs &&
- alternative_call(hvm_funcs.get_guest_bndcfgs, v, val);
-}
-
#define has_hvm_params(d) \
((d)->arch.hvm.params != NULL)
unsigned long hvm_get_shadow_gs_base(struct vcpu *v);
void hvm_cpuid_policy_changed(struct vcpu *v);
void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset, uint64_t at_tsc);
-bool hvm_get_guest_bndcfgs(struct vcpu *v, uint64_t *val);
/* End of prototype list */
#include <asm/msr.h>
#include <asm/pv/domain.h>
#include <asm/setup.h>
+#include <asm/xstate.h>
#include <public/hvm/params.h>
break;
case MSR_IA32_BNDCFGS:
- if ( !cp->feat.mpx || !is_hvm_domain(d) ||
- !hvm_get_guest_bndcfgs(v, val) )
+ if ( !cp->feat.mpx ) /* Implies Intel HVM only */
goto gp_fault;
- break;
+ goto get_reg;
case MSR_IA32_XSS:
if ( !cp->xstate.xsaves )
ret = guest_wrmsr_x2apic(v, msr, val);
break;
+#ifdef CONFIG_HVM
case MSR_IA32_BNDCFGS:
- if ( !cp->feat.mpx || !is_hvm_domain(d) ||
- !hvm_set_guest_bndcfgs(v, val) )
+ if ( !cp->feat.mpx || /* Implies Intel HVM only */
+ !is_canonical_address(val) || (val & IA32_BNDCFGS_RESERVED) )
goto gp_fault;
- break;
+
+ /*
+ * While MPX instructions are supposed to be gated on XCR0.BND*, let's
+ * nevertheless force the relevant XCR0 bits on when the feature is
+ * being enabled in BNDCFGS.
+ */
+ if ( (val & IA32_BNDCFGS_ENABLE) &&
+ !(v->arch.xcr0_accum & (X86_XCR0_BNDREGS | X86_XCR0_BNDCSR)) )
+ {
+ uint64_t xcr0 = get_xcr0();
+
+ if ( v != current ||
+ handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
+ xcr0 | X86_XCR0_BNDREGS | X86_XCR0_BNDCSR) )
+ goto gp_fault;
+
+ if ( handle_xsetbv(XCR_XFEATURE_ENABLED_MASK, xcr0) )
+ /* nothing, best effort only */;
+ }
+
+ goto set_reg;
+#endif /* CONFIG_HVM */
case MSR_IA32_XSS:
if ( !cp->xstate.xsaves )