if ( vpmu_do_rdmsr(msr, msr_content) )
goto gp_fault;
break;
+
+ case MSR_INTEL_PLATFORM_INFO:
+ if ( rdmsr_safe(MSR_INTEL_PLATFORM_INFO, *msr_content) )
+ goto gp_fault;
+ *msr_content = 0;
+ break;
+
default:
if ( passive_domain_do_rdmsr(msr, msr_content) )
goto done;
if ( vpmu_do_wrmsr(msr, msr_content, 0) )
goto gp_fault;
break;
+
+ case MSR_INTEL_PLATFORM_INFO:
+ if ( msr_content ||
+ rdmsr_safe(MSR_INTEL_PLATFORM_INFO, msr_content) )
+ goto gp_fault;
+ break;
+
default:
if ( passive_domain_do_wrmsr(msr, msr_content) )
return X86EMUL_OKAY;
if ( v->arch.debugreg[7] & DR7_ACTIVE_MASK )
wrmsrl(regs->_ecx, msr_content);
break;
+
+ case MSR_INTEL_PLATFORM_INFO:
+ if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+ msr_content ||
+ rdmsr_safe(MSR_INTEL_PLATFORM_INFO, msr_content) )
+ goto fail;
+ break;
+
case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):
case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
/* No extra capabilities are supported */
regs->eax = regs->edx = 0;
break;
+
+ case MSR_INTEL_PLATFORM_INFO:
+ if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+ rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val) )
+ goto fail;
+ regs->eax = regs->edx = 0;
+ break;
+
case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):
case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2: