/* Not offered to guests. */
goto gp_fault;
+ case MSR_AMD_PATCHLEVEL:
+ BUILD_BUG_ON(MSR_IA32_UCODE_REV != MSR_AMD_PATCHLEVEL);
+ /*
+ * AMD and Intel use the same MSR for the current microcode version.
+ *
+ * There is no need to jump through the SDM-provided hoops for Intel.
+ * A guest might itself perform the "write 0, CPUID, read" sequence,
+ * but servicing the CPUID for the guest typically wont result in
+ * actually executing a CPUID instruction.
+ *
+ * As a guest can't influence the value of this MSR, the value will be
+ * from Xen's last microcode load, which can be forwarded straight to
+ * the guest.
+ */
+ if ( (cp->x86_vendor != X86_VENDOR_INTEL &&
+ cp->x86_vendor != X86_VENDOR_AMD) ||
+ (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL &&
+ boot_cpu_data.x86_vendor != X86_VENDOR_AMD) ||
+ rdmsr_safe(MSR_AMD_PATCHLEVEL, *val) )
+ goto gp_fault;
+ break;
+
case MSR_SPEC_CTRL:
if ( !cp->feat.ibrsb )
goto gp_fault;
/* Not offered to guests. */
goto gp_fault;
+ case MSR_AMD_PATCHLEVEL:
+ BUILD_BUG_ON(MSR_IA32_UCODE_REV != MSR_AMD_PATCHLEVEL);
+ /*
+ * AMD and Intel use the same MSR for the current microcode version.
+ *
+ * Both document it as read-only. However Intel also document that,
+ * for backwards compatiblity, the OS should write 0 to it before
+ * trying to access the current microcode version.
+ */
+ if ( d->arch.cpuid->x86_vendor != X86_VENDOR_INTEL || val != 0 )
+ goto gp_fault;
+ break;
+
case MSR_AMD_PATCHLOADER:
/*
* See note on MSR_IA32_UCODE_WRITE below, which may or may not apply
*val = 0;
return X86EMUL_OKAY;
- case MSR_IA32_UCODE_REV:
- BUILD_BUG_ON(MSR_IA32_UCODE_REV != MSR_AMD_PATCHLEVEL);
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
- {
- if ( wrmsr_safe(MSR_IA32_UCODE_REV, 0) )
- break;
- /* As documented in the SDM: Do a CPUID 1 here */
- cpuid_eax(1);
- }
- goto normal;
-
case MSR_FAM10H_MMIO_CONF_BASE:
if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD ||
boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 >= 0x17 )
return X86EMUL_OKAY;
break;
- case MSR_IA32_UCODE_REV:
- if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
- break;
- if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr) )
- return X86EMUL_OKAY;
- if ( rdmsr_safe(reg, temp) )
- break;
- if ( val )
- goto invalid;
- return X86EMUL_OKAY;
-
case MSR_IA32_MISC_ENABLE:
rdmsrl(reg, temp);
if ( val != guest_misc_enable(temp) )