*msr_content = var_range_base[index];
break;
+ case MSR_PRED_CMD:
+ /* Write-only */
+ goto gp_fault;
+
+ case MSR_SPEC_CTRL:
+ hvm_cpuid(7, NULL, NULL, NULL, &edx);
+ if ( !(edx & cpufeat_mask(X86_FEATURE_IBRSB)) )
+ goto gp_fault;
+ *msr_content = v->arch.spec_ctrl;
+ break;
+
+ case MSR_ARCH_CAPABILITIES:
+ /* Not implemented yet. */
+ goto gp_fault;
+
case MSR_K8_ENABLE_C1E:
case MSR_AMD64_NB_CFG:
/*
{
struct vcpu *v = current;
bool_t mtrr;
- unsigned int edx, index;
+ unsigned int edx, ebx, index;
int ret = X86EMUL_OKAY;
struct arch_domain *currad = ¤t->domain->arch;
goto gp_fault;
break;
+ case MSR_SPEC_CTRL:
+ hvm_cpuid(7, NULL, NULL, NULL, &edx);
+ if ( !(edx & cpufeat_mask(X86_FEATURE_IBRSB)) )
+ goto gp_fault; /* MSR available? */
+
+ /*
+ * Note: SPEC_CTRL_STIBP is specified as safe to use (i.e. ignored)
+ * when STIBP isn't enumerated in hardware.
+ */
+
+ if ( msr_content & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP) )
+ goto gp_fault; /* Rsvd bit set? */
+
+ v->arch.spec_ctrl = msr_content;
+ break;
+
+ case MSR_PRED_CMD:
+ hvm_cpuid(7, NULL, NULL, NULL, &edx);
+ hvm_cpuid(0x80000008, NULL, &ebx, NULL, NULL);
+ if ( !(edx & cpufeat_mask(X86_FEATURE_IBRSB)) &&
+ !(ebx & cpufeat_mask(X86_FEATURE_IBPB)) )
+ goto gp_fault; /* MSR available? */
+
+ /*
+ * The only defined behaviour is when writing PRED_CMD_IBPB. In
+ * practice, real hardware accepts any value without faulting.
+ */
+ if ( msr_content & PRED_CMD_IBPB )
+ wrmsrl(MSR_PRED_CMD, PRED_CMD_IBPB);
+ break;
+
+ case MSR_ARCH_CAPABILITIES:
+ /* Read-only */
+ goto gp_fault;
+
case MSR_AMD64_NB_CFG:
/* ignore the write */
break;
vpmu_msr = 0;
switch ( regs->_ecx )
{
+ uint32_t ebx, dummy;
+
case MSR_FS_BASE:
if ( is_pv_32bit_domain(currd) ||
!is_canonical_address(msr_content) )
break;
case MSR_INTEL_PLATFORM_INFO:
+ case MSR_ARCH_CAPABILITIES:
/* The MSR is read-only. */
goto fail;
+ case MSR_SPEC_CTRL:
+ domain_cpuid(currd, 7, 0, &dummy, &dummy, &dummy, &edx);
+ if ( !(edx & cpufeat_mask(X86_FEATURE_IBRSB)) )
+ goto fail; /* MSR available? */
+
+ /*
+ * Note: SPEC_CTRL_STIBP is specified as safe to use (i.e. ignored)
+ * when STIBP isn't enumerated in hardware.
+ */
+
+ if ( eax & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP) )
+ goto fail; /* Rsvd bit set? */
+
+ v->arch.spec_ctrl = eax;
+ break;
+
+ case MSR_PRED_CMD:
+ domain_cpuid(currd, 7, 0, &dummy, &dummy, &dummy, &edx);
+ domain_cpuid(currd, 0x80000008, 0, &dummy, &ebx, &dummy, &dummy);
+ if ( !(edx & cpufeat_mask(X86_FEATURE_IBRSB)) &&
+ !(ebx & cpufeat_mask(X86_FEATURE_IBPB)) )
+ goto fail; /* MSR available? */
+
+ /*
+ * The only defined behaviour is when writing PRED_CMD_IBPB. In
+ * practice, real hardware accepts any value without faulting.
+ */
+ if ( eax & PRED_CMD_IBPB )
+ wrmsrl(MSR_PRED_CMD, PRED_CMD_IBPB);
+ break;
+
case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):
case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
vpmu_msr = 0;
switch ( regs->_ecx )
{
+ uint32_t edx, dummy;
+
case MSR_FS_BASE:
if ( is_pv_32bit_domain(currd) )
goto fail;
regs->eax = regs->edx = 0;
break;
+ case MSR_PRED_CMD:
+ /* Write-only */
+ goto fail;
+
+ case MSR_SPEC_CTRL:
+ domain_cpuid(currd, 7, 0, &dummy, &dummy, &dummy, &edx);
+ if ( !(edx & cpufeat_mask(X86_FEATURE_IBRSB)) )
+ goto fail;
+ regs->eax = v->arch.spec_ctrl;
+ regs->edx = 0;
+ break;
+
case MSR_INTEL_PLATFORM_INFO:
if ( !boot_cpu_has(X86_FEATURE_MSR_PLATFORM_INFO) )
goto fail;
regs->eax = regs->edx = 0;
break;
+ case MSR_ARCH_CAPABILITIES:
+ /* Not implemented yet. */
+ goto fail;
+
case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):
case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2: