struct vcpu *v = current;
struct domain *d = v->domain;
uint64_t *var_range_base, *fixed_range_base;
- int ret = X86EMUL_OKAY;
+ int ret;
var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges;
fixed_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.fixed_ranges;
+ if ( (ret = guest_rdmsr(v, msr, msr_content)) != X86EMUL_UNHANDLEABLE )
+ return ret;
+
+ ret = X86EMUL_OKAY;
+
switch ( msr )
{
unsigned int index;
goto gp_fault;
break;
- case MSR_INTEL_PLATFORM_INFO:
- *msr_content = MSR_PLATFORM_INFO_CPUID_FAULTING;
- break;
-
- case MSR_INTEL_MISC_FEATURES_ENABLES:
- *msr_content = 0;
- if ( current->arch.msr->misc_features_enables.cpuid_faulting )
- *msr_content |= MSR_MISC_FEATURES_CPUID_FAULTING;
- break;
-
default:
if ( passive_domain_do_rdmsr(msr, msr_content) )
goto done;
return 0;
}
+int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
+{
+ const struct msr_domain_policy *dp = v->domain->arch.msr;
+ const struct msr_vcpu_policy *vp = v->arch.msr;
+
+ switch ( msr )
+ {
+ case MSR_INTEL_PLATFORM_INFO:
+ if ( !dp->plaform_info.available )
+ goto gp_fault;
+ *val = (uint64_t)dp->plaform_info.cpuid_faulting <<
+ _MSR_PLATFORM_INFO_CPUID_FAULTING;
+ break;
+
+ case MSR_INTEL_MISC_FEATURES_ENABLES:
+ if ( !vp->misc_features_enables.available )
+ goto gp_fault;
+ *val = (uint64_t)vp->misc_features_enables.cpuid_faulting <<
+ _MSR_MISC_FEATURES_CPUID_FAULTING;
+ break;
+
+ default:
+ return X86EMUL_UNHANDLEABLE;
+ }
+
+ return X86EMUL_OKAY;
+
+ gp_fault:
+ return X86EMUL_EXCEPTION;
+}
+
/*
* Local variables:
* mode: C
const struct vcpu *curr = current;
const struct domain *currd = curr->domain;
bool vpmu_msr = false;
+ int ret;
+
+ if ( (ret = guest_rdmsr(curr, reg, val)) != X86EMUL_UNHANDLEABLE )
+ return ret;
switch ( reg )
{
*val = 0;
return X86EMUL_OKAY;
- case MSR_INTEL_PLATFORM_INFO:
- if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
- rdmsr_safe(MSR_INTEL_PLATFORM_INFO, *val) )
- break;
- *val = 0;
- if ( this_cpu(cpuid_faulting_enabled) )
- *val |= MSR_PLATFORM_INFO_CPUID_FAULTING;
- return X86EMUL_OKAY;
-
- case MSR_INTEL_MISC_FEATURES_ENABLES:
- if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
- rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES, *val) )
- break;
- *val = 0;
- if ( curr->arch.msr->misc_features_enables.cpuid_faulting )
- *val |= MSR_MISC_FEATURES_CPUID_FAULTING;
- return X86EMUL_OKAY;
-
case MSR_P6_PERFCTR(0) ... MSR_P6_PERFCTR(7):
case MSR_P6_EVNTSEL(0) ... MSR_P6_EVNTSEL(3):
case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR2:
int init_domain_msr_policy(struct domain *d);
int init_vcpu_msr_policy(struct vcpu *v);
+/*
+ * Below functions can return X86EMUL_UNHANDLEABLE which means that MSR is
+ * not (yet) handled by it and must be processed by legacy handlers. Such
+ * behaviour is needed for transition period until all rd/wrmsr are handled
+ * by the new MSR infrastructure.
+ */
+int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val);
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_MSR_H */