d->arch.cpuid->x86_vendor == X86_VENDOR_AMD &&
(x86_fam = get_cpu_family(
d->arch.cpuid->basic.raw_fms, NULL, NULL)) > 0x10 &&
- x86_fam <= 0x17 )
+ x86_fam < 0x17 )
{
uint64_t msr_val;
/* AMD extended configuration space access? */
if ( CF8_ADDR_HI(currd->arch.pci_cf8) &&
boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
- boot_cpu_data.x86 >= 0x10 && boot_cpu_data.x86 <= 0x17 )
+ boot_cpu_data.x86 >= 0x10 && boot_cpu_data.x86 < 0x17 )
{
uint64_t msr_val;
}
goto normal;
+ case MSR_FAM10H_MMIO_CONF_BASE:
+ if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD ||
+ boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 >= 0x17 )
+ break;
+ /* fall through */
+ case MSR_AMD64_NB_CFG:
+ if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr) )
+ goto normal;
+ *val = 0;
+ return X86EMUL_OKAY;
+
case MSR_IA32_MISC_ENABLE:
rdmsrl(reg, *val);
*val = guest_misc_enable(*val);
break;
case MSR_AMD64_NB_CFG:
- if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD ||
- boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 > 0x17 )
- break;
if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr) )
return X86EMUL_OKAY;
if ( (rdmsr_safe(MSR_AMD64_NB_CFG, temp) != 0) ||
case MSR_FAM10H_MMIO_CONF_BASE:
if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD ||
- boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 > 0x17 )
+ boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 >= 0x17 )
break;
if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr) )
return X86EMUL_OKAY;