/* SPEC_CTRL_ENTRY_FROM_VMX Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM
- ALTERNATIVE "", DO_SPEC_CTRL_ENTRY_FROM_HVM, X86_FEATURE_SC_MSR_HVM
+
+ .macro restore_spec_ctrl
+ mov $MSR_SPEC_CTRL, %ecx
+ movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax
+ xor %edx, %edx
+ wrmsr
+ .endm
+ ALTERNATIVE "", restore_spec_ctrl, X86_FEATURE_SC_MSR_HVM
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
/* Hardware clears MSR_DEBUGCTL on VMExit. Reinstate it if debugging Xen. */
mov VCPUMSR_spec_ctrl_raw(%rax), %eax
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
- /* SPEC_CTRL_EXIT_TO_VMX Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
- ALTERNATIVE "", DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_HVM
+ /* SPEC_CTRL_EXIT_TO_VMX Req: %rsp=regs/cpuinfo Clob: */
ALTERNATIVE "", __stringify(verw CPUINFO_verw_sel(%rsp)), X86_FEATURE_SC_VERW_HVM
mov VCPU_hvm_guest_cr2(%rbx),%rax
SAVE_ALL
/*
- * PV variant needed here as no guest code has executed (so
- * MSR_SPEC_CTRL can't have changed value), and NMIs/MCEs are liable
- * to hit (in which case the HVM variant might corrupt things).
+ * SPEC_CTRL_ENTRY notes
+ *
+ * If we end up here, no guest code has executed. The MSR lists have
+ * not been processed, so we still have Xen's choice of MSR_SPEC_CTRL
+ * in context, and the RSB is unchanged.
*/
- SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo Clob: acd */
- /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
call vmx_vmentry_failure
jmp .Lvmx_process_softirqs
static void vmx_cpuid_policy_changed(struct vcpu *v)
{
const struct cpuid_policy *cp = v->domain->arch.cpuid;
+ int rc = 0;
if ( opt_hvm_fep ||
(v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) )
vmx_vmcs_enter(v);
vmx_update_exception_bitmap(v);
- vmx_vmcs_exit(v);
/*
* We can safely pass MSR_SPEC_CTRL through to the guest, even if STIBP
* isn't enumerated in hardware, as SPEC_CTRL_STIBP is ignored.
*/
if ( cp->feat.ibrsb )
+ {
vmx_clear_msr_intercept(v, MSR_SPEC_CTRL, VMX_MSR_RW);
+
+ rc = vmx_add_guest_msr(v, MSR_SPEC_CTRL, 0);
+ if ( rc )
+ goto out;
+ }
else
+ {
vmx_set_msr_intercept(v, MSR_SPEC_CTRL, VMX_MSR_RW);
+ rc = vmx_del_msr(v, MSR_SPEC_CTRL, VMX_MSR_GUEST);
+ if ( rc && rc != -ESRCH )
+ goto out;
+ rc = 0; /* Tolerate -ESRCH */
+ }
+
/* MSR_PRED_CMD is safe to pass through if the guest knows about it. */
if ( cp->feat.ibrsb || cp->extd.ibpb )
vmx_clear_msr_intercept(v, MSR_PRED_CMD, VMX_MSR_RW);
vmx_clear_msr_intercept(v, MSR_FLUSH_CMD, VMX_MSR_RW);
else
vmx_set_msr_intercept(v, MSR_FLUSH_CMD, VMX_MSR_RW);
+
+ out:
+ vmx_vmcs_exit(v);
+
+ if ( rc )
+ {
+ printk(XENLOG_G_ERR "%pv MSR list error: %d", v, rc);
+ domain_crash(v->domain);
+ }
}
int vmx_guest_x86_mode(struct vcpu *v)
static uint64_t vmx_get_reg(struct vcpu *v, unsigned int reg)
{
struct domain *d = v->domain;
+ uint64_t val = 0;
+ int rc;
switch ( reg )
{
case MSR_SPEC_CTRL:
- return v->arch.msrs->spec_ctrl.raw;
+ rc = vmx_read_guest_msr(v, reg, &val);
+ if ( rc )
+ {
+ printk(XENLOG_G_ERR "%s(%pv, 0x%08x) MSR list error: %d\n",
+ __func__, v, reg, rc);
+ domain_crash(d);
+ }
+ return val;
default:
printk(XENLOG_G_ERR "%s(%pv, 0x%08x) Bad register\n",
static void vmx_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
{
struct domain *d = v->domain;
+ int rc;
switch ( reg )
{
case MSR_SPEC_CTRL:
- v->arch.msrs->spec_ctrl.raw = val;
+ rc = vmx_write_guest_msr(v, reg, val);
+ if ( rc )
+ {
+ printk(XENLOG_G_ERR "%s(%pv, 0x%08x) MSR list error: %d\n",
+ __func__, v, reg, rc);
+ domain_crash(d);
+ }
break;
default:
/* Container object for per-vCPU MSRs */
struct vcpu_msrs
{
- /* 0x00000048 - MSR_SPEC_CTRL */
+ /*
+ * 0x00000048 - MSR_SPEC_CTRL
+ *
+ * For PV guests, this holds the guest kernel value. It is accessed on
+ * every entry/exit path.
+ *
+ * For VT-x guests, the guest value is held in the MSR guest load/save
+ * list.
+ */
struct {
uint32_t raw;
} spec_ctrl;
* path, or late in the exit path after restoring the guest value. This
* will corrupt the guest value.
*
- * Factor 1 is dealt with by relying on NMIs/MCEs being blocked immediately
- * after VMEXIT. The VMEXIT-specific code reads MSR_SPEC_CTRL and updates
- * current before loading Xen's MSR_SPEC_CTRL setting.
+ * Factor 1 is dealt with:
+ * - On VMX by using MSR load/save lists to have vmentry/exit atomically
+ * load/save the guest value. Xen's value is loaded in regular code, and
+ * there is no need to use the shadow logic (below).
*
* Factor 2 is harder. We maintain a shadow_spec_ctrl value, and a use_shadow
* boolean in the per cpu spec_ctrl_flags. The synchronous use is:
#endif
.endm
-.macro DO_SPEC_CTRL_ENTRY_FROM_HVM
-/*
- * Requires %rbx=current, %rsp=regs/cpuinfo
- * Clobbers %rax, %rcx, %rdx
- *
- * The common case is that a guest has direct access to MSR_SPEC_CTRL, at
- * which point we need to save the guest value before setting IBRS for Xen.
- * Unilaterally saving the guest value is shorter and faster than checking.
- */
- mov $MSR_SPEC_CTRL, %ecx
- rdmsr
-
- /* Stash the value from hardware. */
- mov VCPU_arch_msrs(%rbx), %rdx
- mov %eax, VCPUMSR_spec_ctrl_raw(%rdx)
- xor %edx, %edx
-
- /* Clear SPEC_CTRL shadowing *before* loading Xen's value. */
- andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp)
-
- /* Load Xen's intended value. */
- movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax
- wrmsr
-.endm
-
.macro DO_SPEC_CTRL_ENTRY maybexen:req
/*
* Requires %rsp=regs (also cpuinfo if !maybexen)