Both PV and HVM logic have similar but not similar enough code here.
Synchronize the two so that
- in the HVM case we don't unconditionally try to access extended
config space
- in the PV case we pass a correct range to the XSM hook
- in the PV case we don't needlessly deny access when the operation
isn't really on PCI config space
All this along with sharing the macros HVM already had here.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
check_syscfg_dram_mod_en();
}
-static struct cpu_dev amd_cpu_dev __cpuinitdata = {
+static const struct cpu_dev amd_cpu_dev = {
.c_vendor = "AMD",
.c_ident = { "AuthenticAMD" },
.c_init = init_amd,
init_c3(c);
}
-static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
+static const struct cpu_dev centaur_cpu_dev = {
.c_vendor = "Centaur",
.c_ident = { "CentaurHauls" },
.c_init = init_centaur,
unsigned int __devinitdata opt_cpuid_mask_ext_edx = ~0u;
integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
-struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
+const struct cpu_dev *__read_mostly cpu_devs[X86_VENDOR_NUM] = {};
unsigned int paddr_bits __read_mostly = 36;
__clear_bit(X86_FEATURE_SEP, c->x86_capability);
}
-static struct cpu_dev default_cpu = {
+static const struct cpu_dev default_cpu = {
.c_init = default_init,
.c_vendor = "Unknown",
};
-static struct cpu_dev * this_cpu = &default_cpu;
+static const struct cpu_dev *this_cpu = &default_cpu;
bool_t opt_cpu_info;
boolean_param("cpuinfo", opt_cpu_info);
l2size, ecx & 0xFF);
}
-static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
+int get_cpu_vendor(const char v[], enum get_cpu_vendor mode)
{
- char *v = c->x86_vendor_id;
int i;
static int printed;
if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
(cpu_devs[i]->c_ident[1] &&
!strcmp(v,cpu_devs[i]->c_ident[1]))) {
- c->x86_vendor = i;
- if (!early)
+ if (mode == gcv_host_late)
this_cpu = cpu_devs[i];
- return;
+ return i;
}
}
}
+ if (mode == gcv_guest)
+ return X86_VENDOR_UNKNOWN;
if (!printed) {
printed++;
printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
printk(KERN_ERR "CPU: Your system may be unstable.\n");
}
- c->x86_vendor = X86_VENDOR_UNKNOWN;
this_cpu = &default_cpu;
+
+ return X86_VENDOR_UNKNOWN;
}
static inline u32 _phys_pkg_id(u32 cpuid_apic, int index_msb)
(int *)&c->x86_vendor_id[8],
(int *)&c->x86_vendor_id[4]);
- get_cpu_vendor(c, 1);
+ c->x86_vendor = get_cpu_vendor(c->x86_vendor_id, gcv_host_early);
cpuid(0x00000001, &tfms, &misc, &cap4, &cap0);
c->x86 = (tfms >> 8) & 15;
(int *)&c->x86_vendor_id[8],
(int *)&c->x86_vendor_id[4]);
- get_cpu_vendor(c, 0);
+ c->x86_vendor = get_cpu_vendor(c->x86_vendor_id, gcv_host_late);
/* Initialize the standard set of capabilities */
/* Note that the vendor-specific code below might override */
void (*c_init)(struct cpuinfo_x86 * c);
};
-extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
+extern const struct cpu_dev *cpu_devs[X86_VENDOR_NUM];
extern bool_t opt_arat;
extern unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx;
set_bit(X86_FEATURE_ARAT, c->x86_capability);
}
-static struct cpu_dev intel_cpu_dev __cpuinitdata = {
+static const struct cpu_dev intel_cpu_dev = {
.c_vendor = "Intel",
.c_ident = { "GenuineIntel" },
.c_init = init_intel,
d->arch.cpuids[i].input[1] = XEN_CPUID_INPUT_UNUSED;
}
+ d->arch.x86_vendor = boot_cpu_data.x86_vendor;
+ d->arch.x86 = boot_cpu_data.x86;
+ d->arch.x86_model = boot_cpu_data.x86_model;
+
d->arch.ioport_caps =
rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex);
rc = -ENOMEM;
*unused = *ctl;
else
ret = -ENOENT;
+
+ if ( !ret )
+ {
+ switch ( ctl->input[0] )
+ {
+ case 0: {
+ union {
+ typeof(boot_cpu_data.x86_vendor_id) str;
+ struct {
+ uint32_t ebx, edx, ecx;
+ } reg;
+ } vendor_id = {
+ .reg = {
+ .ebx = ctl->ebx,
+ .edx = ctl->edx,
+ .ecx = ctl->ecx
+ }
+ };
+
+ d->arch.x86_vendor = get_cpu_vendor(vendor_id.str, gcv_guest);
+ break;
+ }
+ case 1:
+ d->arch.x86 = (ctl->eax >> 8) & 0xf;
+ if ( d->arch.x86 == 0xf )
+ d->arch.x86 += (ctl->eax >> 20) & 0xff;
+ d->arch.x86_model = (ctl->eax >> 4) & 0xf;
+ if ( d->arch.x86 >= 0x6 )
+ d->arch.x86_model |= (ctl->eax >> 12) & 0xf0;
+ break;
+ }
+ }
break;
}
struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
ioreq_t *p)
{
-#define CF8_BDF(cf8) (((cf8) & 0x00ffff00) >> 8)
-#define CF8_ADDR_LO(cf8) ((cf8) & 0x000000fc)
-#define CF8_ADDR_HI(cf8) (((cf8) & 0x0f000000) >> 16)
-#define CF8_ENABLED(cf8) (!!((cf8) & 0x80000000))
-
struct hvm_ioreq_server *s;
uint32_t cf8;
uint8_t type;
type = IOREQ_TYPE_PCI_CONFIG;
addr = ((uint64_t)sbdf << 32) |
- CF8_ADDR_HI(cf8) |
CF8_ADDR_LO(cf8) |
(p->addr & 3);
+ /* AMD extended configuration space access? */
+ if ( CF8_ADDR_HI(cf8) &&
+ d->arch.x86_vendor == X86_VENDOR_AMD &&
+ d->arch.x86 >= 0x10 && d->arch.x86 <= 0x17 )
+ {
+ uint64_t msr_val;
+
+ if ( !rdmsr_safe(MSR_AMD64_NB_CFG, msr_val) &&
+ (msr_val & (1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT)) )
+ addr |= CF8_ADDR_HI(cf8);
+ }
}
else
{
}
return d->arch.hvm_domain.default_ioreq_server;
-
-#undef CF8_ADDR_ENABLED
-#undef CF8_ADDR_HI
-#undef CF8_ADDR_LO
-#undef CF8_BDF
}
int hvm_buffered_io_send(ioreq_t *p)
return ioports_access_permitted(d, port, port + bytes - 1);
}
-static bool_t pci_cfg_ok(struct domain *currd, bool_t write, unsigned int size)
+static bool_t pci_cfg_ok(struct domain *currd, bool_t write,
+ unsigned int start, unsigned int size)
{
uint32_t machine_bdf;
- unsigned int start;
if ( !is_hardware_domain(currd) )
return 0;
- machine_bdf = (currd->arch.pci_cf8 >> 8) & 0xFFFF;
+ if ( !CF8_ENABLED(currd->arch.pci_cf8) )
+ return 1;
+
+ machine_bdf = CF8_BDF(currd->arch.pci_cf8);
if ( write )
{
const unsigned long *ro_map = pci_get_ro_map(0);
if ( ro_map && test_bit(machine_bdf, ro_map) )
return 0;
}
- start = currd->arch.pci_cf8 & 0xFF;
+ start |= CF8_ADDR_LO(currd->arch.pci_cf8);
/* AMD extended configuration space access? */
- if ( (currd->arch.pci_cf8 & 0x0F000000) &&
+ if ( CF8_ADDR_HI(currd->arch.pci_cf8) &&
boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
boot_cpu_data.x86 >= 0x10 && boot_cpu_data.x86 <= 0x17 )
{
if ( rdmsr_safe(MSR_AMD64_NB_CFG, msr_val) )
return 0;
if ( msr_val & (1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT) )
- start |= (currd->arch.pci_cf8 >> 16) & 0xF00;
+ start |= CF8_ADDR_HI(currd->arch.pci_cf8);
}
return !xsm_pci_config_permission(XSM_HOOK, currd, machine_bdf,
size = min(bytes, 4 - (port & 3));
if ( size == 3 )
size = 2;
- if ( pci_cfg_ok(currd, 0, size) )
+ if ( pci_cfg_ok(currd, 0, port & 3, size) )
sub_data = pci_conf_read(currd->arch.pci_cf8, port & 3, size);
}
size = min(bytes, 4 - (port & 3));
if ( size == 3 )
size = 2;
- if ( pci_cfg_ok(currd, 1, size) )
+ if ( pci_cfg_ok(currd, 1, port & 3, size) )
pci_conf_write(currd->arch.pci_cf8, port & 3, size, data);
}
/* Is PHYSDEVOP_eoi to automatically unmask the event channel? */
bool_t auto_unmask;
+ /* Values snooped from updates to cpuids[] (below). */
+ u8 x86; /* CPU family */
+ u8 x86_vendor; /* CPU vendor */
+ u8 x86_model; /* CPU model */
+
cpuid_input_t *cpuids;
struct PITState vpit;
#ifndef __X86_PCI_H__
#define __X86_PCI_H__
+#define CF8_BDF(cf8) ( ((cf8) & 0x00ffff00) >> 8)
+#define CF8_ADDR_LO(cf8) ( (cf8) & 0x000000fc)
+#define CF8_ADDR_HI(cf8) ( ((cf8) & 0x0f000000) >> 16)
+#define CF8_ENABLED(cf8) (!!((cf8) & 0x80000000))
+
#define IS_SNB_GFX(id) (id == 0x01068086 || id == 0x01168086 \
|| id == 0x01268086 || id == 0x01028086 \
|| id == 0x01128086 || id == 0x01228086 \
int microcode_update(XEN_GUEST_HANDLE_PARAM(const_void), unsigned long len);
int microcode_resume_cpu(int cpu);
+enum get_cpu_vendor {
+ gcv_host_early,
+ gcv_host_late,
+ gcv_guest
+};
+
+int get_cpu_vendor(const char vendor_id[], enum get_cpu_vendor);
void pv_cpuid(struct cpu_user_regs *regs);
#endif /* !__ASSEMBLY__ */