int node, nr_nodes;
/* Read the number of nodes from the first Northbridge. */
- nr_nodes = ((pci_conf_read32(0, 0, 0x18, 0x0, 0x60)>>4)&0x07)+1;
+ nr_nodes = ((pci_conf_read32(PCI_SBDF_T(0, 0, 0x18, 0),
+ 0x60)>>4)&0x07)+1;
for (node = 0; node < nr_nodes; node++) {
+ const pci_sbdf_t sbdf = {
+ .dev = 0x18 + node,
+ .func = 0x3
+ };
+
/* PMM7: bus=0, dev=0x18+node, function=0x3, register=0x87. */
- pmm7 = pci_conf_read8(0, 0, 0x18+node, 0x3, 0x87);
+ pmm7 = pci_conf_read8(sbdf, 0x87);
/* Invalid read means we've updated every Northbridge. */
if (pmm7 == 0xFF)
break;
pmm7 &= 0xFC; /* clear pmm7[1:0] */
- pci_conf_write8(0, 0, 0x18+node, 0x3, 0x87, pmm7);
+ pci_conf_write8(sbdf, 0x87, pmm7);
printk ("AMD: Disabling C1 Clock Ramping Node #%x\n", node);
}
}
if (c->x86 == 0x16 && c->x86_model <= 0xf) {
if (c == &boot_cpu_data) {
- l = pci_conf_read32(0, 0, 0x18, 0x3, 0x58);
- h = pci_conf_read32(0, 0, 0x18, 0x3, 0x5c);
+ const pci_sbdf_t sbdf = {
+ .dev = 0x18,
+ .func = 0x3,
+ };
+
+ l = pci_conf_read32(sbdf, 0x58);
+ h = pci_conf_read32(sbdf, 0x5c);
if ((l & 0x1f) | (h & 0x1))
printk(KERN_WARNING
"Applying workaround for erratum 792: %s%s%s\n",
(h & 0x1) ? "clearing D18F3x5C[0]" : "");
if (l & 0x1f)
- pci_conf_write32(0, 0, 0x18, 0x3, 0x58,
- l & ~0x1f);
+ pci_conf_write32(sbdf, 0x58, l & ~0x1f);
if (h & 0x1)
- pci_conf_write32(0, 0, 0x18, 0x3, 0x5c,
- h & ~0x1);
+ pci_conf_write32(sbdf, 0x5c, h & ~0x1);
}
rdmsrl(MSR_AMD64_LS_CFG, value);
static int __init ich10_bios_quirk(struct dmi_system_id *d)
{
u32 port, smictl;
+ const pci_sbdf_t sbdf = {
+ .dev = 0x1f,
+ };
- if ( pci_conf_read16(0, 0, 0x1f, 0, PCI_VENDOR_ID) != 0x8086 )
+ if ( pci_conf_read16(sbdf, PCI_VENDOR_ID) != 0x8086 )
return 0;
- switch ( pci_conf_read16(0, 0, 0x1f, 0, PCI_DEVICE_ID) ) {
+ switch ( pci_conf_read16(sbdf, PCI_DEVICE_ID) ) {
case 0x3a14:
case 0x3a16:
case 0x3a18:
case 0x3a1a:
- port = (pci_conf_read16(0, 0, 0x1f, 0, 0x40) & 0xff80) + 0x30;
+ port = (pci_conf_read16(sbdf, 0x40) & 0xff80) + 0x30;
smictl = inl(port);
/* turn off LEGACY_USB{,2}_EN if enabled */
if ( smictl & 0x20008 )
}
*array_size = ARRAY_SIZE(snb_bad_pages);
- igd_id = pci_conf_read32(0, 0, 2, 0, 0);
+ igd_id = pci_conf_read32(PCI_SBDF_T(0, 0, 2, 0), 0);
if ( IS_SNB_GFX(igd_id) )
return snb_bad_pages;
static bool memory_decoded(const struct pci_dev *dev)
{
- uint8_t bus, slot, func;
+ pci_sbdf_t sbdf = dev->sbdf;
- if ( !dev->info.is_virtfn )
+ if ( dev->info.is_virtfn )
{
- bus = dev->sbdf.bus;
- slot = dev->sbdf.dev;
- func = dev->sbdf.func;
- }
- else
- {
- bus = dev->info.physfn.bus;
- slot = PCI_SLOT(dev->info.physfn.devfn);
- func = PCI_FUNC(dev->info.physfn.devfn);
+ sbdf.bus = dev->info.physfn.bus;
+ sbdf.extfunc = dev->info.physfn.devfn;
}
- return !!(pci_conf_read16(dev->sbdf.seg, bus, slot, func, PCI_COMMAND) &
- PCI_COMMAND_MEMORY);
+ return !!(pci_conf_read16(sbdf, PCI_COMMAND) & PCI_COMMAND_MEMORY);
}
static bool msix_memory_decoded(const struct pci_dev *dev, unsigned int pos)
{
- u16 control = pci_conf_read16(dev->sbdf.seg, dev->sbdf.bus, dev->sbdf.dev,
- dev->sbdf.func, msix_control_reg(pos));
+ u16 control = pci_conf_read16(dev->sbdf, msix_control_reg(pos));
if ( !(control & PCI_MSIX_FLAGS_ENABLE) )
return false;
{
struct pci_dev *dev = entry->dev;
int pos = entry->msi_attrib.pos;
- uint16_t data, seg = dev->sbdf.seg;
- uint8_t bus = dev->sbdf.bus;
- uint8_t slot = dev->sbdf.dev;
- uint8_t func = dev->sbdf.func;
+ uint16_t data;
- msg->address_lo = pci_conf_read32(seg, bus, slot, func,
+ msg->address_lo = pci_conf_read32(dev->sbdf,
msi_lower_address_reg(pos));
if ( entry->msi_attrib.is_64 )
{
- msg->address_hi = pci_conf_read32(seg, bus, slot, func,
+ msg->address_hi = pci_conf_read32(dev->sbdf,
msi_upper_address_reg(pos));
- data = pci_conf_read16(seg, bus, slot, func,
- msi_data_reg(pos, 1));
+ data = pci_conf_read16(dev->sbdf, msi_data_reg(pos, 1));
}
else
{
msg->address_hi = 0;
- data = pci_conf_read16(seg, bus, slot, func,
- msi_data_reg(pos, 0));
+ data = pci_conf_read16(dev->sbdf, msi_data_reg(pos, 0));
}
msg->data = data;
break;
{
struct pci_dev *dev = entry->dev;
int pos = entry->msi_attrib.pos;
- uint16_t seg = dev->sbdf.seg;
- uint8_t bus = dev->sbdf.bus;
- uint8_t slot = dev->sbdf.dev;
- uint8_t func = dev->sbdf.func;
int nr = entry->msi_attrib.entry_nr;
ASSERT((msg->data & (entry[-nr].msi.nvec - 1)) == nr);
if ( nr )
return 0;
- pci_conf_write32(seg, bus, slot, func, msi_lower_address_reg(pos),
- msg->address_lo);
+ pci_conf_write32(dev->sbdf, msi_lower_address_reg(pos), msg->address_lo);
if ( entry->msi_attrib.is_64 )
{
- pci_conf_write32(seg, bus, slot, func, msi_upper_address_reg(pos),
+ pci_conf_write32(dev->sbdf, msi_upper_address_reg(pos),
msg->address_hi);
- pci_conf_write16(seg, bus, slot, func, msi_data_reg(pos, 1),
+ pci_conf_write16(dev->sbdf, msi_data_reg(pos, 1),
msg->data);
}
else
- pci_conf_write16(seg, bus, slot, func, msi_data_reg(pos, 0),
- msg->data);
+ pci_conf_write16(dev->sbdf, msi_data_reg(pos, 0), msg->data);
break;
}
case PCI_CAP_ID_MSIX:
void __msi_set_enable(u16 seg, u8 bus, u8 slot, u8 func, int pos, int enable)
{
- u16 control = pci_conf_read16(seg, bus, slot, func, pos + PCI_MSI_FLAGS);
+ const pci_sbdf_t sbdf = {
+ .seg = seg,
+ .bus = bus,
+ .dev = slot,
+ .func = func,
+ };
+ uint16_t control = pci_conf_read16(sbdf, pos + PCI_MSI_FLAGS);
control &= ~PCI_MSI_FLAGS_ENABLE;
if ( enable )
control |= PCI_MSI_FLAGS_ENABLE;
- pci_conf_write16(seg, bus, slot, func, pos + PCI_MSI_FLAGS, control);
+ pci_conf_write16(sbdf, pos + PCI_MSI_FLAGS, control);
}
static void msi_set_enable(struct pci_dev *dev, int enable)
pos = pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSIX);
if ( pos )
{
- control = pci_conf_read16(seg, bus, slot, func, msix_control_reg(pos));
+ control = pci_conf_read16(dev->sbdf, msix_control_reg(pos));
control &= ~PCI_MSIX_FLAGS_ENABLE;
if ( enable )
control |= PCI_MSIX_FLAGS_ENABLE;
- pci_conf_write16(seg, bus, slot, func, msix_control_reg(pos), control);
+ pci_conf_write16(dev->sbdf, msix_control_reg(pos), control);
}
}
{
u32 mask_bits;
- mask_bits = pci_conf_read32(seg, bus, slot, func, entry->msi.mpos);
+ mask_bits = pci_conf_read32(pdev->sbdf, entry->msi.mpos);
mask_bits &= ~((u32)1 << entry->msi_attrib.entry_nr);
mask_bits |= (u32)flag << entry->msi_attrib.entry_nr;
- pci_conf_write32(seg, bus, slot, func, entry->msi.mpos, mask_bits);
+ pci_conf_write32(pdev->sbdf, entry->msi.mpos, mask_bits);
}
break;
case PCI_CAP_ID_MSIX:
maskall = pdev->msix->host_maskall;
- control = pci_conf_read16(seg, bus, slot, func,
+ control = pci_conf_read16(pdev->sbdf,
msix_control_reg(entry->msi_attrib.pos));
if ( unlikely(!(control & PCI_MSIX_FLAGS_ENABLE)) )
{
pdev->msix->host_maskall = 1;
- pci_conf_write16(seg, bus, slot, func,
+ pci_conf_write16(pdev->sbdf,
msix_control_reg(entry->msi_attrib.pos),
control | (PCI_MSIX_FLAGS_ENABLE |
PCI_MSIX_FLAGS_MASKALL));
pdev->msix->host_maskall = maskall;
if ( maskall || pdev->msix->guest_maskall )
control |= PCI_MSIX_FLAGS_MASKALL;
- pci_conf_write16(seg, bus, slot, func,
+ pci_conf_write16(pdev->sbdf,
msix_control_reg(entry->msi_attrib.pos), control);
return flag;
default:
case PCI_CAP_ID_MSI:
if ( !entry->msi_attrib.maskbit )
break;
- return (pci_conf_read32(entry->dev->sbdf.seg, entry->dev->sbdf.bus,
- entry->dev->sbdf.dev, entry->dev->sbdf.func,
- entry->msi.mpos) >>
+ return (pci_conf_read32(entry->dev->sbdf, entry->msi.mpos) >>
entry->msi_attrib.entry_nr) & 1;
case PCI_CAP_ID_MSIX:
if ( unlikely(!msix_memory_decoded(entry->dev,
if ( msidesc->msi_attrib.type == PCI_CAP_ID_MSIX )
{
- control = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus,
- pdev->sbdf.dev, pdev->sbdf.func, cpos);
+ control = pci_conf_read16(pdev->sbdf, cpos);
if ( !(control & PCI_MSIX_FLAGS_ENABLE) )
- pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, cpos,
+ pci_conf_write16(pdev->sbdf, cpos,
control | (PCI_MSIX_FLAGS_ENABLE |
PCI_MSIX_FLAGS_MASKALL));
}
: &pci_msi_nonmaskable);
if ( !(control & PCI_MSIX_FLAGS_ENABLE) )
- pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, cpos, control);
+ pci_conf_write16(pdev->sbdf, cpos, control);
return rc;
}
pos = pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSI);
if ( !pos )
return -ENODEV;
- control = pci_conf_read16(seg, bus, slot, func, msi_control_reg(pos));
+ control = pci_conf_read16(dev->sbdf, msi_control_reg(pos));
maxvec = multi_msi_capable(control);
if ( nvec > maxvec )
return maxvec;
u32 maskbits;
/* All MSIs are unmasked by default, Mask them all */
- maskbits = pci_conf_read32(seg, bus, slot, func, mpos);
+ maskbits = pci_conf_read32(dev->sbdf, mpos);
maskbits |= ~(u32)0 >> (32 - maxvec);
- pci_conf_write32(seg, bus, slot, func, mpos, maskbits);
+ pci_conf_write32(dev->sbdf, mpos, maskbits);
}
list_add_tail(&entry->list, &dev->msi_list);
pci_intx(dev, false);
control |= PCI_MSI_FLAGS_ENABLE;
}
- pci_conf_write16(seg, bus, slot, func, msi_control_reg(pos), control);
+ pci_conf_write16(dev->sbdf, msi_control_reg(pos), control);
return 0;
}
u8 limit;
u32 addr, base = PCI_BASE_ADDRESS_0;
u64 disp = 0;
+ const pci_sbdf_t sbdf = {
+ .seg = seg,
+ .bus = bus,
+ .dev = slot,
+ .func = func,
+ };
if ( vf >= 0 )
{
unsigned int pos = pci_find_ext_capability(seg, bus,
PCI_DEVFN(slot, func),
PCI_EXT_CAP_ID_SRIOV);
- u16 ctrl = pci_conf_read16(seg, bus, slot, func, pos + PCI_SRIOV_CTRL);
- u16 num_vf = pci_conf_read16(seg, bus, slot, func,
- pos + PCI_SRIOV_NUM_VF);
- u16 offset = pci_conf_read16(seg, bus, slot, func,
- pos + PCI_SRIOV_VF_OFFSET);
- u16 stride = pci_conf_read16(seg, bus, slot, func,
- pos + PCI_SRIOV_VF_STRIDE);
+ u16 ctrl = pci_conf_read16(sbdf, pos + PCI_SRIOV_CTRL);
+ u16 num_vf = pci_conf_read16(sbdf, pos + PCI_SRIOV_NUM_VF);
+ u16 offset = pci_conf_read16(sbdf, pos + PCI_SRIOV_VF_OFFSET);
+ u16 stride = pci_conf_read16(sbdf, pos + PCI_SRIOV_VF_STRIDE);
if ( !pdev || !pos ||
!(ctrl & PCI_SRIOV_CTRL_VFE) ||
disp = vf * pdev->vf_rlen[bir];
limit = PCI_SRIOV_NUM_BARS;
}
- else switch ( pci_conf_read8(seg, bus, slot, func,
- PCI_HEADER_TYPE) & 0x7f )
+ else switch ( pci_conf_read8(sbdf, PCI_HEADER_TYPE) & 0x7f )
{
case PCI_HEADER_TYPE_NORMAL:
limit = 6;
if ( bir >= limit )
return 0;
- addr = pci_conf_read32(seg, bus, slot, func, base + bir * 4);
+ addr = pci_conf_read32(sbdf, base + bir * 4);
if ( (addr & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO )
return 0;
if ( (addr & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64 )
if ( ++bir >= limit )
return 0;
return addr + disp +
- ((u64)pci_conf_read32(seg, bus, slot, func,
- base + bir * 4) << 32);
+ ((uint64_t)pci_conf_read32(sbdf, base + bir * 4) << 32);
}
return (addr & PCI_BASE_ADDRESS_MEM_MASK) + disp;
}
ASSERT(pcidevs_locked());
- control = pci_conf_read16(seg, bus, slot, func, msix_control_reg(pos));
+ control = pci_conf_read16(dev->sbdf, msix_control_reg(pos));
/*
* Ensure MSI-X interrupts are masked during setup. Some devices require
* MSI-X to be enabled before we can touch the MSI-X registers. We need
* fully set up.
*/
msix->host_maskall = 1;
- pci_conf_write16(seg, bus, slot, func, msix_control_reg(pos),
+ pci_conf_write16(dev->sbdf, msix_control_reg(pos),
control | (PCI_MSIX_FLAGS_ENABLE |
PCI_MSIX_FLAGS_MASKALL));
if ( unlikely(!memory_decoded(dev)) )
{
- pci_conf_write16(seg, bus, slot, func, msix_control_reg(pos),
+ pci_conf_write16(dev->sbdf, msix_control_reg(pos),
control & ~PCI_MSIX_FLAGS_ENABLE);
return -ENXIO;
}
entry = alloc_msi_entry(1);
if ( !entry )
{
- pci_conf_write16(seg, bus, slot, func, msix_control_reg(pos),
+ pci_conf_write16(dev->sbdf, msix_control_reg(pos),
control & ~PCI_MSIX_FLAGS_ENABLE);
return -ENOMEM;
}
}
/* Locate MSI-X table region */
- table_offset = pci_conf_read32(seg, bus, slot, func,
- msix_table_offset_reg(pos));
+ table_offset = pci_conf_read32(dev->sbdf, msix_table_offset_reg(pos));
bir = (u8)(table_offset & PCI_MSIX_BIRMASK);
table_offset &= ~PCI_MSIX_BIRMASK;
{
if ( !msi || !msi->table_base )
{
- pci_conf_write16(seg, bus, slot, func, msix_control_reg(pos),
+ pci_conf_write16(dev->sbdf, msix_control_reg(pos),
control & ~PCI_MSIX_FLAGS_ENABLE);
xfree(entry);
return -ENXIO;
WARN_ON(rangeset_overlaps_range(mmio_ro_ranges, msix->table.first,
msix->table.last));
- pba_offset = pci_conf_read32(seg, bus, slot, func,
- msix_pba_offset_reg(pos));
+ pba_offset = pci_conf_read32(dev->sbdf, msix_pba_offset_reg(pos));
bir = (u8)(pba_offset & PCI_MSIX_BIRMASK);
pba_paddr = read_pci_mem_bar(seg, pbus, pslot, pfunc, bir, vf);
WARN_ON(!pba_paddr);
if ( idx < 0 )
{
- pci_conf_write16(seg, bus, slot, func, msix_control_reg(pos),
+ pci_conf_write16(dev->sbdf, msix_control_reg(pos),
control & ~PCI_MSIX_FLAGS_ENABLE);
xfree(entry);
return idx;
maskall = 0;
}
msix->host_maskall = maskall;
- pci_conf_write16(seg, bus, slot, func, msix_control_reg(pos), control);
+ pci_conf_write16(dev->sbdf, msix_control_reg(pos), control);
return 0;
}
if ( !pdev || !pos )
return -ENODEV;
- control = pci_conf_read16(msi->seg, msi->bus, slot, func,
- msix_control_reg(pos));
+ control = pci_conf_read16(pdev->sbdf, msix_control_reg(pos));
nr_entries = multi_msix_capable(control);
if ( msi->entry_nr >= nr_entries )
return -EINVAL;
uint8_t func = dev->sbdf.func;
unsigned int pos = pci_find_cap_offset(seg, bus, slot, func,
PCI_CAP_ID_MSIX);
- u16 control = pci_conf_read16(seg, bus, slot, func,
- msix_control_reg(entry->msi_attrib.pos));
+ uint16_t control = pci_conf_read16(dev->sbdf,
+ msix_control_reg(entry->msi_attrib.pos));
bool maskall = dev->msix->host_maskall;
if ( unlikely(!(control & PCI_MSIX_FLAGS_ENABLE)) )
{
dev->msix->host_maskall = 1;
- pci_conf_write16(seg, bus, slot, func, msix_control_reg(pos),
+ pci_conf_write16(dev->sbdf, msix_control_reg(pos),
control | (PCI_MSIX_FLAGS_ENABLE |
PCI_MSIX_FLAGS_MASKALL));
}
dev->msix->host_maskall = maskall;
if ( maskall || dev->msix->guest_maskall )
control |= PCI_MSIX_FLAGS_MASKALL;
- pci_conf_write16(seg, bus, slot, func, msix_control_reg(pos), control);
+ pci_conf_write16(dev->sbdf, msix_control_reg(pos), control);
_pci_cleanup_msix(dev->msix);
}
}
else
{
- u16 control = pci_conf_read16(seg, bus, slot, func,
- msix_control_reg(pos));
+ uint16_t control = pci_conf_read16(pdev->sbdf, msix_control_reg(pos));
rc = msix_capability_init(pdev, pos, NULL, NULL,
multi_msix_capable(control));
if ( reg < entry->msi.mpos || reg >= entry->msi.mpos + 4 || size != 4 )
return -EACCES;
- cntl = pci_conf_read16(seg, bus, slot, func, msi_control_reg(pos));
+ cntl = pci_conf_read16(pdev->sbdf, msi_control_reg(pos));
unused = ~(uint32_t)0 >> (32 - multi_msi_capable(cntl));
for ( pos = 0; pos < entry->msi.nvec; ++pos, ++entry )
{
pdev->sbdf.seg, pdev->sbdf.bus, slot, func, i);
spin_unlock_irqrestore(&desc->lock, flags);
if ( type == PCI_CAP_ID_MSIX )
- pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, slot, func,
- msix_control_reg(pos),
+ pci_conf_write16(pdev->sbdf, msix_control_reg(pos),
control & ~PCI_MSIX_FLAGS_ENABLE);
return -EINVAL;
}
}
else if ( !type && entry->msi_attrib.type == PCI_CAP_ID_MSIX )
{
- control = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus, slot,
- func, msix_control_reg(pos));
- pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, slot, func,
- msix_control_reg(pos),
+ control = pci_conf_read16(pdev->sbdf, msix_control_reg(pos));
+ pci_conf_write16(pdev->sbdf, msix_control_reg(pos),
control | (PCI_MSIX_FLAGS_ENABLE |
PCI_MSIX_FLAGS_MASKALL));
if ( unlikely(!memory_decoded(pdev)) )
{
spin_unlock_irqrestore(&desc->lock, flags);
- pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, slot, func,
- msix_control_reg(pos),
+ pci_conf_write16(pdev->sbdf, msix_control_reg(pos),
control & ~PCI_MSIX_FLAGS_ENABLE);
return -ENXIO;
}
{
unsigned int cpos = msi_control_reg(pos);
- control = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus, slot,
- func, cpos) & ~PCI_MSI_FLAGS_QSIZE;
+ control = pci_conf_read16(pdev->sbdf, cpos) & ~PCI_MSI_FLAGS_QSIZE;
multi_msi_enable(control, entry->msi.nvec);
- pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, slot, func, cpos,
- control);
+ pci_conf_write16(pdev->sbdf, cpos, control);
msi_set_enable(pdev, 1);
}
}
if ( type == PCI_CAP_ID_MSIX )
- pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, slot, func,
- msix_control_reg(pos),
+ pci_conf_write16(pdev->sbdf, msix_control_reg(pos),
control | PCI_MSIX_FLAGS_ENABLE);
return 0;
for (bus = 0; bus < 256; bus++) {
for (dev = 0; dev < 32; dev++) {
for (func = 0; func < 8; func++) {
- id = pci_conf_read32(0, bus, dev, func, PCI_VENDOR_ID);
+ const pci_sbdf_t sbdf = {
+ .bus = bus,
+ .dev = dev,
+ .func = func,
+ };
+
+ id = pci_conf_read32(sbdf, PCI_VENDOR_ID);
vendor_id = id & 0xffff;
dev_id = (id >> 16) & 0xffff;
if ((vendor_id == PCI_VENDOR_ID_AMD) &&
(dev_id == PCI_DEVICE_ID_AMD_10H_NB_MISC)) {
- pci_conf_write32(0, bus, dev, func, IBSCTL,
+ pci_conf_write32(sbdf, IBSCTL,
IBSCTL_LVTOFFSETVAL | APIC_EILVT_LVTOFF_IBS);
- value = pci_conf_read32(0, bus, dev, func, IBSCTL);
+ value = pci_conf_read32(sbdf, IBSCTL);
if (value != (IBSCTL_LVTOFFSETVAL |
APIC_EILVT_LVTOFF_IBS)) {
#define BASE_VALID(b) ((b) + SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40))
static void __init get_fam10h_pci_mmconf_base(void)
{
- unsigned int i, j, bus, slot, hi_mmio_num;
+ unsigned int i, j, hi_mmio_num;
u32 address;
u64 val, tom2, start, end;
struct range {
u64 start, end;
} range[8];
+ pci_sbdf_t sbdf = { };
for (i = 0; i < ARRAY_SIZE(pci_probes); i++) {
u32 id;
u16 device;
u16 vendor;
- bus = pci_probes[i].bus;
- slot = pci_probes[i].slot;
- id = pci_conf_read32(0, bus, slot, 0, PCI_VENDOR_ID);
+ sbdf.bus = pci_probes[i].bus;
+ sbdf.dev = pci_probes[i].slot;
+ id = pci_conf_read32(sbdf, PCI_VENDOR_ID);
vendor = id & 0xffff;
device = (id>>16) & 0xffff;
* above 4G
*/
for (hi_mmio_num = i = 0; i < 8; i++) {
- val = pci_conf_read32(0, bus, slot, 1, 0x80 + (i << 3));
+ val = pci_conf_read32(sbdf, 0x80 + (i << 3));
if (!(val & 3))
continue;
start = (val & 0xffffff00) << 8; /* 39:16 on 31:8*/
- val = pci_conf_read32(0, bus, slot, 1, 0x84 + (i << 3));
+ val = pci_conf_read32(sbdf, 0x84 + (i << 3));
end = ((val & 0xffffff00) << 8) | 0xffff; /* 39:16 on 31:8*/
if (end < tom2)
static const char __init *pci_mmcfg_e7520(void)
{
- u32 win;
- win = pci_conf_read16(0, 0, 0, 0, 0xce);
+ uint32_t win = pci_conf_read16(PCI_SBDF_T(0, 0, 0, 0), 0xce) & 0xf000;
- win = win & 0xf000;
if(win == 0x0000 || win == 0xf000)
pci_mmcfg_config_num = 0;
else {
pci_mmcfg_config_num = 1;
- pciexbar = pci_conf_read32(0, 0, 0, 0, 0x48);
+ pciexbar = pci_conf_read32(PCI_SBDF_T(0, 0, 0, 0), 0x48);
/* Enable bit */
if (!(pciexbar & 1))
for (i = bus = 0; bus < 256; bus++) {
u32 l, extcfg;
u16 vendor, device;
+ const pci_sbdf_t sbdf = {
+ .bus = bus,
+ };
- l = pci_conf_read32(0, bus, 0, 0, 0);
+ l = pci_conf_read32(sbdf, 0);
vendor = l & 0xffff;
device = (l >> 16) & 0xffff;
if (PCI_VENDOR_ID_NVIDIA != vendor || 0x0369 != device)
continue;
- extcfg = pci_conf_read32(0, bus, 0, 0, extcfg_regnum);
+ extcfg = pci_conf_read32(sbdf, extcfg_regnum);
if (extcfg & extcfg_enable_mask)
i++;
u32 l, extcfg;
u16 vendor, device;
int size_index;
+ const pci_sbdf_t sbdf = {
+ .bus = bus,
+ };
- l = pci_conf_read32(0, bus, 0, 0, 0);
+ l = pci_conf_read32(sbdf, 0);
vendor = l & 0xffff;
device = (l >> 16) & 0xffff;
if (PCI_VENDOR_ID_NVIDIA != vendor || 0x0369 != device)
continue;
- extcfg = pci_conf_read32(0, bus, 0, 0, extcfg_regnum);
+ extcfg = pci_conf_read32(sbdf, extcfg_regnum);
if (!(extcfg & extcfg_enable_mask))
continue;
static int __init pci_mmcfg_check_hostbridge(void)
{
u32 l;
- u32 bus, devfn;
u16 vendor, device;
int i;
const char *name;
name = NULL;
for (i = 0; !name && i < ARRAY_SIZE(pci_mmcfg_probes); i++) {
- bus = pci_mmcfg_probes[i].bus;
- devfn = pci_mmcfg_probes[i].devfn;
- l = pci_conf_read32(0, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), 0);
+ l = pci_conf_read32(PCI_SBDF3_T(0, pci_mmcfg_probes[i].bus,
+ pci_mmcfg_probes[i].devfn), 0);
vendor = l & 0xffff;
device = (l >> 16) & 0xffff;
#include <xen/pci.h>
#include <asm/io.h>
-#define PCI_CONF_ADDRESS(bus, dev, func, reg) \
- (0x80000000 | (bus << 16) | (dev << 11) | (func << 8) | (reg & ~3))
+#define PCI_CONF_ADDRESS(bdf, reg) (0x80000000 | (bdf << 8) | (reg & ~3))
#define GEN_PCI_CONF_READ(s) \
- uint ## s ## _t pci_conf_read ## s (unsigned int seg, unsigned int bus, \
- unsigned int dev, unsigned int func, \
- unsigned int reg) \
+ uint ## s ## _t pci_conf_read ## s (pci_sbdf_t sbdf, unsigned int reg) \
{ \
uint32_t value; \
\
BUILD_BUG_ON(s != 8 && s != 16 && s != 32); \
- if ( seg || reg > 255 ) \
- pci_mmcfg_read(seg, bus, PCI_DEVFN(dev, func), reg, s / 8, &value);\
+ if ( sbdf.seg || reg > 255 ) \
+ pci_mmcfg_read(sbdf.seg, sbdf.bus, sbdf.extfunc, reg, s / 8, \
+ &value); \
else \
- { \
- BUG_ON((bus > 255) || (dev > 31) || (func > 7)); \
- value = pci_conf_read(PCI_CONF_ADDRESS(bus, dev, func, reg), \
+ value = pci_conf_read(PCI_CONF_ADDRESS(sbdf.bdf, reg), \
reg & (4 - s / 8), s / 8); \
- } \
\
return value; \
}
#undef GEN_PCI_CONF_READ
#define GEN_PCI_CONF_WRITE(s) \
- void pci_conf_write ## s (unsigned int seg, unsigned int bus, \
- unsigned int dev, unsigned int func, \
- unsigned int reg, uint ## s ## _t data) \
+ void pci_conf_write ## s (pci_sbdf_t sbdf, unsigned int reg, \
+ uint ## s ## _t data) \
{ \
BUILD_BUG_ON(s != 8 && s != 16 && s != 32); \
- if ( seg || reg > 255 ) \
- pci_mmcfg_write(seg, bus, PCI_DEVFN(dev, func), reg, s / 8, data); \
+ if ( sbdf.seg || reg > 255 ) \
+ pci_mmcfg_write(sbdf.seg, sbdf.bus, sbdf.extfunc, reg, s / 8, \
+ data); \
else \
- { \
- BUG_ON((bus > 255) || (dev > 31) || (func > 7)); \
- pci_conf_write(PCI_CONF_ADDRESS(bus, dev, func, reg), \
+ pci_conf_write(PCI_CONF_ADDRESS(sbdf.bdf, reg), \
reg & (4 - s / 8), s / 8, data); \
- } \
}
/* Grep fodder */
case ACPI_ADR_SPACE_PCI_CONFIG:
printk("Resetting with ACPI PCI RESET_REG.\n");
/* Write the value that resets us. */
- pci_conf_write8(0, 0,
- (rr->address >> 32) & 31,
- (rr->address >> 16) & 7,
- (rr->address & 255),
- reset_value);
+ pci_conf_write8(PCI_SBDF_T(0, 0, (rr->address >> 32) & 31,
+ (rr->address >> 16) & 7),
+ (rr->address & 255), reset_value);
break;
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
case ACPI_ADR_SPACE_SYSTEM_IO:
static unsigned int __init __find_dbgp(u8 bus, u8 slot, u8 func)
{
- u32 class = pci_conf_read32(0, bus, slot, func, PCI_CLASS_REVISION);
+ uint32_t class = pci_conf_read32(PCI_SBDF_T(0, bus, slot, func),
+ PCI_CLASS_REVISION);
if ( (class >> 8) != PCI_CLASS_SERIAL_USB_EHCI )
return 0;
cap = __find_dbgp(bus, slot, func);
if ( !cap || ehci_num-- )
{
- if ( !func && !(pci_conf_read8(0, bus, slot, func,
- PCI_HEADER_TYPE) & 0x80) )
+ if ( !func &&
+ !(pci_conf_read8(PCI_SBDF_T(0, bus, slot, func),
+ PCI_HEADER_TYPE) & 0x80) )
break;
continue;
}
static void nvidia_set_debug_port(struct ehci_dbgp *dbgp, unsigned int port)
{
- u32 dword = pci_conf_read32(0, dbgp->bus, dbgp->slot, dbgp->func, 0x74);
+ const pci_sbdf_t sbdf = {
+ .bus = dbgp->bus,
+ .dev = dbgp->slot,
+ .func = dbgp->func,
+ };
+ uint32_t dword = pci_conf_read32(sbdf, 0x74);
dword &= ~(0x0f << 12);
dword |= (port & 0x0f) << 12;
- pci_conf_write32(0, dbgp->bus, dbgp->slot, dbgp->func, 0x74, dword);
+ pci_conf_write32(sbdf, 0x74, dword);
dbgp_printk("set debug port to %u\n", port);
}
static void __init detect_set_debug_port(struct ehci_dbgp *dbgp)
{
- if ( pci_conf_read16(0, dbgp->bus, dbgp->slot, dbgp->func,
+ if ( pci_conf_read16(PCI_SBDF_T(0, dbgp->bus, dbgp->slot, dbgp->func),
PCI_VENDOR_ID) == 0x10de )
{
dbgp_printk("using nvidia set_debug_port\n");
u32 cap;
unsigned int offset = HCC_EXT_CAPS(hcc_params);
int msec;
+ const pci_sbdf_t sbdf = {
+ .bus = dbgp->bus,
+ .dev = dbgp->slot,
+ .func = dbgp->func,
+ };
if ( !offset )
return;
- cap = pci_conf_read32(0, dbgp->bus, dbgp->slot, dbgp->func, offset);
+ cap = pci_conf_read32(sbdf, offset);
dbgp_printk("dbgp: EHCI BIOS state %08x\n", cap);
if ( (cap & 0xff) == 1 && (cap & EHCI_USBLEGSUP_BIOS) )
{
dbgp_printk("dbgp: BIOS handoff\n");
- pci_conf_write8(0, dbgp->bus, dbgp->slot, dbgp->func, offset + 3, 1);
+ pci_conf_write8(sbdf, offset + 3, 1);
}
/* if boot firmware now owns EHCI, spin till it hands it over. */
{
mdelay(10);
msec -= 10;
- cap = pci_conf_read32(0, dbgp->bus, dbgp->slot, dbgp->func, offset);
+ cap = pci_conf_read32(sbdf, offset);
}
if ( cap & EHCI_USBLEGSUP_BIOS )
/* well, possibly buggy BIOS... try to shut it down,
* and hope nothing goes too wrong */
dbgp_printk("dbgp: BIOS handoff failed: %08x\n", cap);
- pci_conf_write8(0, dbgp->bus, dbgp->slot, dbgp->func, offset + 2, 0);
+ pci_conf_write8(sbdf, offset + 2, 0);
}
/* just in case, always disable EHCI SMIs */
- pci_conf_write8(0, dbgp->bus, dbgp->slot, dbgp->func,
- offset + EHCI_USBLEGCTLSTS, 0);
+ pci_conf_write8(sbdf, offset + EHCI_USBLEGCTLSTS, 0);
}
static int ehci_dbgp_setup(struct ehci_dbgp *dbgp)
struct ehci_dbgp *dbgp = port->uart;
u32 debug_port, offset;
void __iomem *ehci_bar;
+ const pci_sbdf_t sbdf = {
+ .bus = dbgp->bus,
+ .dev = dbgp->slot,
+ .func = dbgp->func,
+ };
- debug_port = pci_conf_read32(0, dbgp->bus, dbgp->slot, dbgp->func,
- dbgp->cap);
+ debug_port = pci_conf_read32(sbdf, dbgp->cap);
offset = (debug_port >> 16) & 0xfff;
/* double check if the mem space is enabled */
- dbgp->pci_cr = pci_conf_read8(0, dbgp->bus, dbgp->slot, dbgp->func,
- PCI_COMMAND);
+ dbgp->pci_cr = pci_conf_read8(sbdf, PCI_COMMAND);
if ( !(dbgp->pci_cr & PCI_COMMAND_MEMORY) )
{
dbgp->pci_cr |= PCI_COMMAND_MEMORY;
- pci_conf_write16(0, dbgp->bus, dbgp->slot, dbgp->func, PCI_COMMAND,
- dbgp->pci_cr);
+ pci_conf_write16(sbdf, PCI_COMMAND, dbgp->pci_cr);
dbgp_printk("MMIO for EHCI enabled\n");
}
stop_timer(&dbgp->timer);
dbgp->timer.expires = 0;
- dbgp->pci_cr = pci_conf_read16(0, dbgp->bus, dbgp->slot, dbgp->func,
- PCI_COMMAND);
+ dbgp->pci_cr = pci_conf_read16(PCI_SBDF_T(0, dbgp->bus, dbgp->slot,
+ dbgp->func), PCI_COMMAND);
dbgp->state = dbgp_unsafe;
}
static void ehci_dbgp_resume(struct serial_port *port)
{
struct ehci_dbgp *dbgp = port->uart;
+ const pci_sbdf_t sbdf = {
+ .bus = dbgp->bus,
+ .dev = dbgp->slot,
+ .func = dbgp->func,
+ };
if ( !dbgp->ehci_debug )
return;
- pci_conf_write32(0, dbgp->bus, dbgp->slot, dbgp->func, dbgp->bar,
- dbgp->bar_val);
- pci_conf_write16(0, dbgp->bus, dbgp->slot, dbgp->func,
- PCI_COMMAND, dbgp->pci_cr);
+ pci_conf_write32(sbdf, dbgp->bar, dbgp->bar_val);
+ pci_conf_write16(sbdf, PCI_COMMAND, dbgp->pci_cr);
ehci_dbgp_setup_preirq(dbgp);
ehci_dbgp_setup_postirq(dbgp);
struct ehci_dbgp *dbgp = &ehci_dbgp;
u32 debug_port, offset, bar_val;
const char *e;
+ const pci_sbdf_t sbdf = {
+ .bus = dbgp->bus,
+ .dev = dbgp->slot,
+ .func = dbgp->func,
+ };
if ( strncmp(opt_dbgp, "ehci", 4) )
return;
else
return;
- debug_port = pci_conf_read32(0, dbgp->bus, dbgp->slot, dbgp->func,
- dbgp->cap);
+ debug_port = pci_conf_read32(sbdf, dbgp->cap);
dbgp->bar = (debug_port >> 29) & 0x7;
dbgp->bar = ((dbgp->bar - 1) * 4) + PCI_BASE_ADDRESS_0;
offset = (debug_port >> 16) & 0xfff;
return;
}
- dbgp->bar_val = bar_val = pci_conf_read32(0, dbgp->bus, dbgp->slot,
- dbgp->func, dbgp->bar);
+ dbgp->bar_val = bar_val = pci_conf_read32(sbdf, dbgp->bar);
dbgp_printk("bar_val: %08x\n", bar_val);
if ( bar_val & ~PCI_BASE_ADDRESS_MEM_MASK )
{
static void pci_serial_early_init(struct ns16550 *uart)
{
#ifdef CONFIG_HAS_PCI
+ const pci_sbdf_t sbdf = {
+ .bus = uart->pb_bdf[0],
+ .dev = uart->pb_bdf[1],
+ .func = uart->pb_bdf[2],
+ };
+
if ( !uart->ps_bdf_enable || uart->io_base >= 0x10000 )
return;
if ( uart->pb_bdf_enable )
- pci_conf_write16(0, uart->pb_bdf[0], uart->pb_bdf[1], uart->pb_bdf[2],
- PCI_IO_BASE,
+ pci_conf_write16(sbdf, PCI_IO_BASE,
(uart->io_base & 0xF000) |
((uart->io_base & 0xF000) >> 8));
- pci_conf_write32(0, uart->ps_bdf[0], uart->ps_bdf[1], uart->ps_bdf[2],
- PCI_BASE_ADDRESS_0,
+ pci_conf_write32(sbdf, PCI_BASE_ADDRESS_0,
uart->io_base | PCI_BASE_ADDRESS_SPACE_IO);
- pci_conf_write16(0, uart->ps_bdf[0], uart->ps_bdf[1], uart->ps_bdf[2],
- PCI_COMMAND, PCI_COMMAND_IO);
+ pci_conf_write16(sbdf, PCI_COMMAND, PCI_COMMAND_IO);
#endif
}
#ifdef CONFIG_HAS_PCI
if ( uart->bar )
- uart->cr = pci_conf_read16(0, uart->ps_bdf[0], uart->ps_bdf[1],
- uart->ps_bdf[2], PCI_COMMAND);
+ uart->cr = pci_conf_read16(PCI_SBDF_T(0, uart->ps_bdf[0],
+ uart->ps_bdf[1], uart->ps_bdf[2]),
+ PCI_COMMAND);
#endif
}
{
#ifdef CONFIG_HAS_PCI
struct ns16550 *uart = port->uart;
+ const pci_sbdf_t sbdf = {
+ .bus = uart->ps_bdf[0],
+ .dev = uart->ps_bdf[1],
+ .func = uart->ps_bdf[2],
+ };
if ( uart->bar )
{
- pci_conf_write32(0, uart->ps_bdf[0], uart->ps_bdf[1], uart->ps_bdf[2],
- PCI_BASE_ADDRESS_0 + uart->bar_idx*4, uart->bar);
+ pci_conf_write32(sbdf, PCI_BASE_ADDRESS_0 + uart->bar_idx*4, uart->bar);
/* If 64 bit BAR, write higher 32 bits to BAR+4 */
if ( uart->bar & PCI_BASE_ADDRESS_MEM_TYPE_64 )
- pci_conf_write32(0, uart->ps_bdf[0],
- uart->ps_bdf[1], uart->ps_bdf[2],
- PCI_BASE_ADDRESS_0 + (uart->bar_idx+1)*4, uart->bar64);
+ pci_conf_write32(sbdf, PCI_BASE_ADDRESS_0 + (uart->bar_idx+1)*4,
+ uart->bar64);
- pci_conf_write16(0, uart->ps_bdf[0], uart->ps_bdf[1], uart->ps_bdf[2],
- PCI_COMMAND, uart->cr);
+ pci_conf_write16(sbdf, PCI_COMMAND, uart->cr);
}
#endif
uint32_t bar, bar_64 = 0, len, len_64;
u64 size = 0;
const struct ns16550_config_param *param = uart_param;
+ const pci_sbdf_t sbdf = {
+ .bus = b,
+ .dev = d,
+ .func = f,
+ };
- nextf = (f || (pci_conf_read16(0, b, d, f, PCI_HEADER_TYPE) &
+ nextf = (f || (pci_conf_read16(sbdf, PCI_HEADER_TYPE) &
0x80)) ? f + 1 : 8;
- switch ( pci_conf_read16(0, b, d, f, PCI_CLASS_DEVICE) )
+ switch ( pci_conf_read16(sbdf, PCI_CLASS_DEVICE) )
{
case 0x0700: /* single port serial */
case 0x0702: /* multi port serial */
/* Check for params in uart_config lookup table */
for ( i = 0; i < ARRAY_SIZE(uart_config); i++ )
{
- u16 vendor = pci_conf_read16(0, b, d, f, PCI_VENDOR_ID);
- u16 device = pci_conf_read16(0, b, d, f, PCI_DEVICE_ID);
+ uint16_t vendor = pci_conf_read16(sbdf, PCI_VENDOR_ID);
+ uint16_t device = pci_conf_read16(sbdf, PCI_DEVICE_ID);
if ( uart_config[i].vendor_id == vendor &&
uart_config[i].dev_id == device )
}
uart->io_base = 0;
- bar = pci_conf_read32(0, b, d, f,
- PCI_BASE_ADDRESS_0 + bar_idx*4);
+ bar = pci_conf_read32(sbdf, PCI_BASE_ADDRESS_0 + bar_idx*4);
/* MMIO based */
if ( param->mmio && !(bar & PCI_BASE_ADDRESS_SPACE_IO) )
{
- pci_conf_write32(0, b, d, f,
- PCI_BASE_ADDRESS_0 + bar_idx*4, ~0u);
- len = pci_conf_read32(0, b, d, f, PCI_BASE_ADDRESS_0 + bar_idx*4);
- pci_conf_write32(0, b, d, f,
- PCI_BASE_ADDRESS_0 + bar_idx*4, bar);
+ pci_conf_write32(sbdf, PCI_BASE_ADDRESS_0 + bar_idx*4, ~0u);
+ len = pci_conf_read32(sbdf, PCI_BASE_ADDRESS_0 + bar_idx*4);
+ pci_conf_write32(sbdf, PCI_BASE_ADDRESS_0 + bar_idx*4, bar);
/* Handle 64 bit BAR if found */
if ( bar & PCI_BASE_ADDRESS_MEM_TYPE_64 )
{
- bar_64 = pci_conf_read32(0, b, d, f,
+ bar_64 = pci_conf_read32(sbdf,
PCI_BASE_ADDRESS_0 + (bar_idx+1)*4);
- pci_conf_write32(0, b, d, f,
+ pci_conf_write32(sbdf,
PCI_BASE_ADDRESS_0 + (bar_idx+1)*4, ~0u);
- len_64 = pci_conf_read32(0, b, d, f,
+ len_64 = pci_conf_read32(sbdf,
PCI_BASE_ADDRESS_0 + (bar_idx+1)*4);
- pci_conf_write32(0, b, d, f,
+ pci_conf_write32(sbdf,
PCI_BASE_ADDRESS_0 + (bar_idx+1)*4, bar_64);
size = ((u64)~0 << 32) | PCI_BASE_ADDRESS_MEM_MASK;
size &= ((u64)len_64 << 32) | len;
/* IO based */
else if ( !param->mmio && (bar & PCI_BASE_ADDRESS_SPACE_IO) )
{
- pci_conf_write32(0, b, d, f,
- PCI_BASE_ADDRESS_0 + bar_idx*4, ~0u);
- len = pci_conf_read32(0, b, d, f, PCI_BASE_ADDRESS_0);
- pci_conf_write32(0, b, d, f,
- PCI_BASE_ADDRESS_0 + bar_idx*4, bar);
+ pci_conf_write32(sbdf, PCI_BASE_ADDRESS_0 + bar_idx*4, ~0u);
+ len = pci_conf_read32(sbdf, PCI_BASE_ADDRESS_0);
+ pci_conf_write32(sbdf, PCI_BASE_ADDRESS_0 + bar_idx*4, bar);
size = len & PCI_BASE_ADDRESS_IO_MASK;
uart->io_base = bar & ~PCI_BASE_ADDRESS_SPACE_IO;
uart->bar64 = bar_64;
uart->io_size = max(8U << param->reg_shift,
param->uart_offset);
- uart->irq = pci_conf_read8(0, b, d, f, PCI_INTERRUPT_PIN) ?
- pci_conf_read8(0, b, d, f, PCI_INTERRUPT_LINE) : 0;
+ uart->irq = pci_conf_read8(sbdf, PCI_INTERRUPT_PIN) ?
+ pci_conf_read8(sbdf, PCI_INTERRUPT_LINE) : 0;
return 0;
}
{
u8 type;
- iommu->cap.header = pci_conf_read32(seg, bus, dev, func, cap_ptr);
+ iommu->cap.header = pci_conf_read32(PCI_SBDF_T(seg, bus, dev, func),
+ cap_ptr);
type = get_field_from_reg_u32(iommu->cap.header, PCI_CAP_TYPE_MASK,
PCI_CAP_TYPE_SHIFT);
PCI_SLOT(iommu->bdf), PCI_FUNC(iommu->bdf));
return 0;
}
- control = pci_conf_read16(iommu->seg, PCI_BUS(iommu->bdf),
- PCI_SLOT(iommu->bdf), PCI_FUNC(iommu->bdf),
+ control = pci_conf_read16(iommu->msi.dev->sbdf,
iommu->msi.msi_attrib.pos + PCI_MSI_FLAGS);
iommu->msi.msi.nvec = 1;
if ( is_mask_bit_support(control) )
static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
{
u32 value;
+ const pci_sbdf_t sbdf = {
+ .seg = iommu->seg,
+ .bdf = iommu->bdf,
+ };
u8 bus = PCI_BUS(iommu->bdf);
u8 dev = PCI_SLOT(iommu->bdf);
u8 func = PCI_FUNC(iommu->bdf);
(boot_cpu_data.x86_model > 0x1f) )
return;
- pci_conf_write32(iommu->seg, bus, dev, func, 0xf0, 0x90);
- value = pci_conf_read32(iommu->seg, bus, dev, func, 0xf4);
+ pci_conf_write32(sbdf, 0xf0, 0x90);
+ value = pci_conf_read32(sbdf, 0xf4);
if ( value & (1 << 2) )
return;
/* Select NB indirect register 0x90 and enable writing */
- pci_conf_write32(iommu->seg, bus, dev, func, 0xf0, 0x90 | (1 << 8));
+ pci_conf_write32(sbdf, 0xf0, 0x90 | (1 << 8));
- pci_conf_write32(iommu->seg, bus, dev, func, 0xf4, value | (1 << 2));
+ pci_conf_write32(sbdf, 0xf4, value | (1 << 2));
printk(XENLOG_INFO
"AMD-Vi: Applying erratum 746 workaround for IOMMU at %04x:%02x:%02x.%u\n",
iommu->seg, bus, dev, func);
/* Clear the enable writing bit */
- pci_conf_write32(iommu->seg, bus, dev, func, 0xf0, 0x90);
+ pci_conf_write32(sbdf, 0xf0, 0x90);
}
static void enable_iommu(struct amd_iommu *iommu)
for (bus = 0; bus < 256; bus++)
{
- id = pci_conf_read32(0, bus, 0x14, 0, PCI_VENDOR_ID);
+ const pci_sbdf_t sbdf = {
+ .bus = bus,
+ .dev = 0x14,
+ };
+
+ id = pci_conf_read32(sbdf, PCI_VENDOR_ID);
vendor_id = id & 0xffff;
dev_id = (id >> 16) & 0xffff;
if (vendor_id != 0x1002 || dev_id != 0x4385)
continue;
- byte = pci_conf_read8(0, bus, 0x14, 0, 0xad);
+ byte = pci_conf_read8(sbdf, 0xad);
if ( (byte >> 3) & 1 )
{
printk(XENLOG_WARNING "AMD-Vi: SP5100 erratum 28 detected, disabling IOMMU.\n"
pos = pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_ATS);
BUG_ON(!pos);
- value = pci_conf_read16(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
- pos + ATS_REG_CTL);
+ value = pci_conf_read16(PCI_SBDF3_T(seg, bus, devfn), pos + ATS_REG_CTL);
+
return value & ATS_ENABLE;
}
if ( command_mask )
{
- val = pci_conf_read16(seg, bus, dev, func, PCI_COMMAND);
+ val = pci_conf_read16(pdev->sbdf, PCI_COMMAND);
if ( val & command_mask )
- pci_conf_write16(seg, bus, dev, func, PCI_COMMAND,
- val & ~command_mask);
- val = pci_conf_read16(seg, bus, dev, func, PCI_STATUS);
+ pci_conf_write16(pdev->sbdf, PCI_COMMAND, val & ~command_mask);
+ val = pci_conf_read16(pdev->sbdf, PCI_STATUS);
if ( val & PCI_STATUS_CHECK )
{
printk(XENLOG_INFO "%04x:%02x:%02x.%u status %04x -> %04x\n",
seg, bus, dev, func, val, val & ~PCI_STATUS_CHECK);
- pci_conf_write16(seg, bus, dev, func, PCI_STATUS,
- val & PCI_STATUS_CHECK);
+ pci_conf_write16(pdev->sbdf, PCI_STATUS, val & PCI_STATUS_CHECK);
}
}
- switch ( pci_conf_read8(seg, bus, dev, func, PCI_HEADER_TYPE) & 0x7f )
+ switch ( pci_conf_read8(pdev->sbdf, PCI_HEADER_TYPE) & 0x7f )
{
case PCI_HEADER_TYPE_BRIDGE:
if ( !bridge_ctl_mask )
break;
- val = pci_conf_read16(seg, bus, dev, func, PCI_BRIDGE_CONTROL);
+ val = pci_conf_read16(pdev->sbdf, PCI_BRIDGE_CONTROL);
if ( val & bridge_ctl_mask )
- pci_conf_write16(seg, bus, dev, func, PCI_BRIDGE_CONTROL,
+ pci_conf_write16(pdev->sbdf, PCI_BRIDGE_CONTROL,
val & ~bridge_ctl_mask);
- val = pci_conf_read16(seg, bus, dev, func, PCI_SEC_STATUS);
+ val = pci_conf_read16(pdev->sbdf, PCI_SEC_STATUS);
if ( val & PCI_STATUS_CHECK )
{
printk(XENLOG_INFO
"%04x:%02x:%02x.%u secondary status %04x -> %04x\n",
seg, bus, dev, func, val, val & ~PCI_STATUS_CHECK);
- pci_conf_write16(seg, bus, dev, func, PCI_SEC_STATUS,
+ pci_conf_write16(pdev->sbdf, PCI_SEC_STATUS,
val & PCI_STATUS_CHECK);
}
break;
static void apply_quirks(struct pci_dev *pdev)
{
- uint16_t vendor = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus,
- pdev->sbdf.dev, pdev->sbdf.func,
- PCI_VENDOR_ID);
- uint16_t device = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus,
- pdev->sbdf.dev, pdev->sbdf.func,
- PCI_DEVICE_ID);
+ uint16_t vendor = pci_conf_read16(pdev->sbdf, PCI_VENDOR_ID);
+ uint16_t device = pci_conf_read16(pdev->sbdf, PCI_DEVICE_ID);
static const struct {
uint16_t vendor, device;
} ignore_bars[] = {
case DEV_TYPE_PCIe2PCI_BRIDGE:
case DEV_TYPE_LEGACY_PCI_BRIDGE:
- sec_bus = pci_conf_read8(pseg->nr, bus, PCI_SLOT(devfn),
- PCI_FUNC(devfn), PCI_SECONDARY_BUS);
- sub_bus = pci_conf_read8(pseg->nr, bus, PCI_SLOT(devfn),
- PCI_FUNC(devfn), PCI_SUBORDINATE_BUS);
+ sec_bus = pci_conf_read8(pdev->sbdf, PCI_SECONDARY_BUS);
+ sub_bus = pci_conf_read8(pdev->sbdf, PCI_SUBORDINATE_BUS);
spin_lock(&pseg->bus2bridge_lock);
for ( ; sec_bus <= sub_bus; sec_bus++ )
pos = pci_find_cap_offset(pseg->nr, bus, PCI_SLOT(devfn),
PCI_FUNC(devfn), PCI_CAP_ID_EXP);
BUG_ON(!pos);
- cap = pci_conf_read16(pseg->nr, bus, PCI_SLOT(devfn),
- PCI_FUNC(devfn), pos + PCI_EXP_DEVCAP);
+ cap = pci_conf_read16(pdev->sbdf, pos + PCI_EXP_DEVCAP);
if ( cap & PCI_EXP_DEVCAP_PHANTOM )
{
pdev->phantom_stride = 8 >> MASK_EXTR(cap,
case DEV_TYPE_PCIe2PCI_BRIDGE:
case DEV_TYPE_LEGACY_PCI_BRIDGE:
- sec_bus = pci_conf_read8(pseg->nr, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, PCI_SECONDARY_BUS);
- sub_bus = pci_conf_read8(pseg->nr, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, PCI_SUBORDINATE_BUS);
+ sec_bus = pci_conf_read8(pdev->sbdf, PCI_SECONDARY_BUS);
+ sub_bus = pci_conf_read8(pdev->sbdf, PCI_SUBORDINATE_BUS);
spin_lock(&pseg->bus2bridge_lock);
for ( ; sec_bus <= sub_bus; sec_bus++ )
int pos;
uint16_t cap, ctrl, seg = pdev->sbdf.seg;
uint8_t bus = pdev->sbdf.bus;
- uint8_t dev = pdev->sbdf.dev;
- uint8_t func = pdev->sbdf.func;
if ( !iommu_enabled )
return;
if (!pos)
return;
- cap = pci_conf_read16(seg, bus, dev, func, pos + PCI_ACS_CAP);
- ctrl = pci_conf_read16(seg, bus, dev, func, pos + PCI_ACS_CTRL);
+ cap = pci_conf_read16(pdev->sbdf, pos + PCI_ACS_CAP);
+ ctrl = pci_conf_read16(pdev->sbdf, pos + PCI_ACS_CTRL);
/* Source Validation */
ctrl |= (cap & PCI_ACS_SV);
/* Upstream Forwarding */
ctrl |= (cap & PCI_ACS_UF);
- pci_conf_write16(seg, bus, dev, func, pos + PCI_ACS_CTRL, ctrl);
+ pci_conf_write16(pdev->sbdf, pos + PCI_ACS_CTRL, ctrl);
}
static int iommu_add_device(struct pci_dev *pdev);
uint64_t *paddr, uint64_t *psize,
unsigned int flags)
{
- uint32_t hi = 0, bar = pci_conf_read32(sbdf.seg, sbdf.bus, sbdf.dev,
- sbdf.func, pos);
+ uint32_t hi = 0, bar = pci_conf_read32(sbdf, pos);
uint64_t size;
bool is64bits = !(flags & PCI_BAR_ROM) &&
(bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64;
ASSERT(!((flags & PCI_BAR_VF) && (flags & PCI_BAR_ROM)));
ASSERT((flags & PCI_BAR_ROM) ||
(bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY);
- pci_conf_write32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, pos, ~0);
+ pci_conf_write32(sbdf, pos, ~0);
if ( is64bits )
{
if ( flags & PCI_BAR_LAST )
*psize = 0;
return 1;
}
- hi = pci_conf_read32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, pos + 4);
- pci_conf_write32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, pos + 4, ~0);
+ hi = pci_conf_read32(sbdf, pos + 4);
+ pci_conf_write32(sbdf, pos + 4, ~0);
}
- size = pci_conf_read32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func,
- pos) & mask;
+ size = pci_conf_read32(sbdf, pos) & mask;
if ( is64bits )
{
- size |= (uint64_t)pci_conf_read32(sbdf.seg, sbdf.bus, sbdf.dev,
- sbdf.func, pos + 4) << 32;
- pci_conf_write32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, pos + 4, hi);
+ size |= (uint64_t)pci_conf_read32(sbdf, pos + 4) << 32;
+ pci_conf_write32(sbdf, pos + 4, hi);
}
else if ( size )
size |= (uint64_t)~0 << 32;
- pci_conf_write32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, pos, bar);
+ pci_conf_write32(sbdf, pos, bar);
size = -size;
if ( paddr )
{
unsigned int pos = pci_find_ext_capability(seg, bus, devfn,
PCI_EXT_CAP_ID_SRIOV);
- u16 ctrl = pci_conf_read16(seg, bus, slot, func, pos + PCI_SRIOV_CTRL);
+ u16 ctrl = pci_conf_read16(pdev->sbdf, pos + PCI_SRIOV_CTRL);
if ( !pos )
/* Nothing */;
for ( i = 0; i < PCI_SRIOV_NUM_BARS; )
{
unsigned int idx = pos + PCI_SRIOV_BAR + i * 4;
- u32 bar = pci_conf_read32(seg, bus, slot, func, idx);
- pci_sbdf_t sbdf = {
- .sbdf = PCI_SBDF3(seg, bus, devfn),
- };
+ uint32_t bar = pci_conf_read32(pdev->sbdf, idx);
if ( (bar & PCI_BASE_ADDRESS_SPACE) ==
PCI_BASE_ADDRESS_SPACE_IO )
seg, bus, slot, func, i);
continue;
}
- ret = pci_size_mem_bar(sbdf, idx, NULL, &pdev->vf_rlen[i],
+ ret = pci_size_mem_bar(pdev->sbdf, idx, NULL, &pdev->vf_rlen[i],
PCI_BAR_VF |
((i == PCI_SRIOV_NUM_BARS - 1) ?
PCI_BAR_LAST : 0));
u16 class_device, creg;
u8 d = PCI_SLOT(devfn), f = PCI_FUNC(devfn);
int pos = pci_find_cap_offset(seg, bus, d, f, PCI_CAP_ID_EXP);
+ const pci_sbdf_t sbdf = {
+ .seg = seg,
+ .bus = bus,
+ .dev = d,
+ .func = f,
+ };
- class_device = pci_conf_read16(seg, bus, d, f, PCI_CLASS_DEVICE);
+ class_device = pci_conf_read16(sbdf, PCI_CLASS_DEVICE);
switch ( class_device )
{
case PCI_CLASS_BRIDGE_PCI:
if ( !pos )
return DEV_TYPE_LEGACY_PCI_BRIDGE;
- creg = pci_conf_read16(seg, bus, d, f, pos + PCI_EXP_FLAGS);
+ creg = pci_conf_read16(sbdf, pos + PCI_EXP_FLAGS);
switch ( (creg & PCI_EXP_FLAGS_TYPE) >> 4 )
{
case PCI_EXP_TYPE_PCI_BRIDGE:
{
u32 vendor;
- vendor = pci_conf_read32(seg, bus, dev, func, PCI_VENDOR_ID);
+ vendor = pci_conf_read32(PCI_SBDF_T(seg, bus, dev, func), PCI_VENDOR_ID);
/* some broken boards return 0 or ~0 if a slot is empty: */
if ( (vendor == 0xffffffff) || (vendor == 0x00000000) ||
(vendor == 0x0000ffff) || (vendor == 0xffff0000) )
/* Tell the device to stop DMAing; we can't rely on the guest to
* control it for us. */
- cword = pci_conf_read16(seg, bus, pdev->sbdf.dev, pdev->sbdf.func,
- PCI_COMMAND);
- pci_conf_write16(seg, bus, pdev->sbdf.dev, pdev->sbdf.func,
- PCI_COMMAND, cword & ~PCI_COMMAND_MASTER);
+ cword = pci_conf_read16(pdev->sbdf, PCI_COMMAND);
+ pci_conf_write16(pdev->sbdf, PCI_COMMAND, cword & ~PCI_COMMAND_MASTER);
}
/*
return -ENOMEM;
}
- if ( !func && !(pci_conf_read8(pseg->nr, bus, dev, func,
+ if ( !func && !(pci_conf_read8(pdev->sbdf,
PCI_HEADER_TYPE) & 0x80) )
break;
}
unsigned int pos = pci_find_cap_offset(pdev->sbdf.seg, pdev->sbdf.bus,
pdev->sbdf.dev, pdev->sbdf.func,
PCI_CAP_ID_EXP);
- uint8_t pcie = MASK_EXTR(pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus,
- pdev->sbdf.dev, pdev->sbdf.func,
- pos + PCI_EXP_FLAGS),
+ uint8_t pcie = MASK_EXTR(pci_conf_read16(pdev->sbdf, pos + PCI_EXP_FLAGS),
PCI_EXP_FLAGS_TYPE);
switch ( hest_hdr->type )
case ACPI_HEST_TYPE_AER_ENDPOINT:
return pcie == PCI_EXP_TYPE_ENDPOINT;
case ACPI_HEST_TYPE_AER_BRIDGE:
- return pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, PCI_CLASS_DEVICE) ==
+ return pci_conf_read16(pdev->sbdf, PCI_CLASS_DEVICE) ==
PCI_CLASS_BRIDGE_PCI;
}
while ( --depth > 0 )
{
- bus = pci_conf_read8(seg, bus, path->dev, path->fn,
+ bus = pci_conf_read8(PCI_SBDF_T(seg, bus, path->dev, path->fn),
PCI_SECONDARY_BUS);
path++;
}
switch ( acpi_scope->entry_type )
{
case ACPI_DMAR_SCOPE_TYPE_BRIDGE:
- sec_bus = pci_conf_read8(seg, bus, path->dev, path->fn,
- PCI_SECONDARY_BUS);
- sub_bus = pci_conf_read8(seg, bus, path->dev, path->fn,
- PCI_SUBORDINATE_BUS);
+ {
+ const pci_sbdf_t sbdf = {
+ .seg = seg,
+ .bus = bus,
+ .dev = path->dev,
+ .func = path->fn,
+ };
+
+ sec_bus = pci_conf_read8(sbdf, PCI_SECONDARY_BUS);
+ sub_bus = pci_conf_read8(sbdf, PCI_SUBORDINATE_BUS);
if ( iommu_verbose )
printk(VTDPREFIX
" bridge: %04x:%02x:%02x.%u start=%x sec=%x sub=%x\n",
dmar_scope_add_buses(scope, sec_bus, sub_bus);
break;
-
+ }
case ACPI_DMAR_SCOPE_TYPE_HPET:
if ( iommu_verbose )
printk(VTDPREFIX " MSI HPET: %04x:%02x:%02x.%u\n",
static u8 *__read_mostly igd_reg_va;
static spinlock_t igd_lock;
+static const pci_sbdf_t igd_sbdf = {
+ .dev = IGD_DEV,
+};
+
+static const pci_sbdf_t ioh_sbdf = {
+ .dev = IOH_DEV,
+};
+
/*
* QUIRK to workaround Xen boot issue on Calpella/Ironlake OEM BIOS
* not enabling VT-d properly in IGD. The workaround is to not enabling
return 1;
/* integrated graphics on Intel platforms is located at 0:2.0 */
- ggc = pci_conf_read16(0, 0, IGD_DEV, 0, GGC);
+ ggc = pci_conf_read16(igd_sbdf, GGC);
return ( ggc & GGC_MEMORY_VT_ENABLED ? 1 : 0 );
}
u16 vid;
u8 did_hi, rid;
- vid = pci_conf_read16(0, 0, IGD_DEV, 0, 0);
+ vid = pci_conf_read16(igd_sbdf, 0);
if ( vid != 0x8086 )
return;
- did_hi = pci_conf_read8(0, 0, IGD_DEV, 0, 3);
- rid = pci_conf_read8(0, 0, IGD_DEV, 0, 8);
+ did_hi = pci_conf_read8(igd_sbdf, 3);
+ rid = pci_conf_read8(igd_sbdf, 8);
if ( (did_hi == 0x2A) && (rid == 0x7) )
is_cantiga_b3 = 1;
if ( igd_reg_va )
return;
- igd_mmio = pci_conf_read32(0, 0, IGD_DEV, 0, PCI_BASE_ADDRESS_1);
+ igd_mmio = pci_conf_read32(igd_sbdf, PCI_BASE_ADDRESS_1);
igd_mmio <<= 32;
- igd_mmio += pci_conf_read32(0, 0, IGD_DEV, 0, PCI_BASE_ADDRESS_0);
+ igd_mmio += pci_conf_read32(igd_sbdf, PCI_BASE_ADDRESS_0);
igd_reg_va = ioremap(igd_mmio & IGD_BAR_MASK, 0x3000);
}
for ( bus = 0; bus < 0x100; bus++ )
{
+ const pci_sbdf_t sbdf = {
+ .bus = bus,
+ .dev = 20,
+ };
+
/* Match on System Management Registers on Device 20 Function 0 */
- device = pci_conf_read32(0, bus, 20, 0, PCI_VENDOR_ID);
- rev = pci_conf_read8(0, bus, 20, 0, PCI_REVISION_ID);
+ device = pci_conf_read32(sbdf, PCI_VENDOR_ID);
+ rev = pci_conf_read8(sbdf, PCI_REVISION_ID);
if ( rev == 0x13 && device == 0x342e8086 )
{
/* initialize platform identification flags */
void __init platform_quirks_init(void)
{
- ioh_id = pci_conf_read32(0, 0, IOH_DEV, 0, 0);
- igd_id = pci_conf_read32(0, 0, IGD_DEV, 0, 0);
+ ioh_id = pci_conf_read32(ioh_sbdf, 0);
+ igd_id = pci_conf_read32(igd_sbdf, 0);
/* Mobile 4 Series Chipset neglects to set RWBF capability. */
if ( ioh_id == 0x2a408086 )
u32 id;
int rc = 0;
- id = pci_conf_read32(0, 0, 0, 0, 0);
+ id = pci_conf_read32(PCI_SBDF_T(0, 0, 0, 0), 0);
if ( IS_CTG(id) )
{
/* quit if ME does not exist */
- if ( pci_conf_read32(0, 0, 3, 0, 0) == 0xffffffff )
+ if ( pci_conf_read32(PCI_SBDF_T(0, 0, 3, 0), 0) == 0xffffffff )
return 0;
/* if device is WLAN device, map ME phantom device 0:3.7 */
- id = pci_conf_read32(0, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), 0);
+ id = pci_conf_read32(PCI_SBDF3_T(0, bus, devfn), 0);
switch (id)
{
case 0x42328086:
else if ( IS_ILK(id) || IS_CPT(id) )
{
/* quit if ME does not exist */
- if ( pci_conf_read32(0, 0, 22, 0, 0) == 0xffffffff )
+ if ( pci_conf_read32(PCI_SBDF_T(0, 0, 22, 0), 0) == 0xffffffff )
return 0;
/* if device is WLAN device, map ME phantom device 0:22.7 */
- id = pci_conf_read32(0, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), 0);
+ id = pci_conf_read32(PCI_SBDF3_T(0, bus, devfn), 0);
switch (id)
{
case 0x00878086: /* Kilmer Peak */
paddr_t pa;
const char *action;
- if ( pci_conf_read16(seg, bus, dev, func, PCI_VENDOR_ID) !=
+ if ( pci_conf_read16(pdev->sbdf, PCI_VENDOR_ID) !=
PCI_VENDOR_ID_INTEL )
return;
- switch ( pci_conf_read16(seg, bus, dev, func, PCI_DEVICE_ID) )
+ switch ( pci_conf_read16(pdev->sbdf, PCI_DEVICE_ID) )
{
/*
* Mask reporting Intel VT-d faults to IOH core logic:
case 0x342e: /* Tylersburg chipset (Nehalem / Westmere systems) */
case 0x3728: /* Xeon C5500/C3500 (JasperForest) */
case 0x3c28: /* Sandybridge */
- val = pci_conf_read32(seg, bus, dev, func, 0x1AC);
- pci_conf_write32(seg, bus, dev, func, 0x1AC, val | (1 << 31));
+ val = pci_conf_read32(pdev->sbdf, 0x1AC);
+ pci_conf_write32(pdev->sbdf, 0x1AC, val | (1 << 31));
printk(XENLOG_INFO "Masked VT-d error signaling on %04x:%02x:%02x.%u\n",
seg, bus, dev, func);
break;
PCI_EXT_CAP_ID_VNDR);
while ( pos )
{
- val = pci_conf_read32(seg, bus, dev, func, pos + PCI_VNDR_HEADER);
+ val = pci_conf_read32(pdev->sbdf, pos + PCI_VNDR_HEADER);
if ( PCI_VNDR_HEADER_ID(val) == 4 && PCI_VNDR_HEADER_REV(val) == 1 )
{
pos += PCI_VNDR_HEADER;
break;
}
- val = pci_conf_read32(seg, bus, dev, func, pos + PCI_ERR_UNCOR_MASK);
- val2 = pci_conf_read32(seg, bus, dev, func, pos + PCI_ERR_COR_MASK);
+ val = pci_conf_read32(pdev->sbdf, pos + PCI_ERR_UNCOR_MASK);
+ val2 = pci_conf_read32(pdev->sbdf, pos + PCI_ERR_COR_MASK);
if ( (val & PCI_ERR_UNC_UNSUP) && (val2 & PCI_ERR_COR_ADV_NFAT) )
action = "Found masked";
else if ( !ff )
{
- pci_conf_write32(seg, bus, dev, func, pos + PCI_ERR_UNCOR_MASK,
+ pci_conf_write32(pdev->sbdf, pos + PCI_ERR_UNCOR_MASK,
val | PCI_ERR_UNC_UNSUP);
- pci_conf_write32(seg, bus, dev, func, pos + PCI_ERR_COR_MASK,
+ pci_conf_write32(pdev->sbdf, pos + PCI_ERR_COR_MASK,
val2 | PCI_ERR_COR_ADV_NFAT);
action = "Masked";
}
action = "Must not mask";
/* XPUNCERRMSK Send Completion with Unsupported Request */
- val = pci_conf_read32(seg, bus, dev, func, 0x20c);
- pci_conf_write32(seg, bus, dev, func, 0x20c, val | (1 << 4));
+ val = pci_conf_read32(pdev->sbdf, 0x20c);
+ pci_conf_write32(pdev->sbdf, 0x20c, val | (1 << 4));
printk(XENLOG_INFO "%s UR signaling on %04x:%02x:%02x.%u\n",
action, seg, bus, dev, func);
case 0x1610: case 0x1614: case 0x1618: /* Broadwell */
case 0x1900: case 0x1904: case 0x1908: case 0x190c: case 0x190f: /* Skylake */
case 0x1910: case 0x1918: case 0x191f: /* Skylake */
- bar = pci_conf_read32(seg, bus, dev, func, 0x6c);
- bar = (bar << 32) | pci_conf_read32(seg, bus, dev, func, 0x68);
+ bar = pci_conf_read32(pdev->sbdf, 0x6c);
+ bar = (bar << 32) | pci_conf_read32(pdev->sbdf, 0x68);
pa = bar & 0x7ffffff000UL; /* bits 12...38 */
if ( (bar & 1) && pa &&
page_is_ram_type(paddr_to_pfn(pa), RAM_TYPE_RESERVED) )
dprintk(XENLOG_INFO, "%04x:%02x:%02x.%u: ATS capability found\n",
seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
- value = pci_conf_read16(seg, bus, PCI_SLOT(devfn),
- PCI_FUNC(devfn), pos + ATS_REG_CTL);
+ value = pci_conf_read16(pdev->sbdf, pos + ATS_REG_CTL);
if ( value & ATS_ENABLE )
{
struct pci_dev *other;
if ( !(value & ATS_ENABLE) )
{
value |= ATS_ENABLE;
- pci_conf_write16(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
- pos + ATS_REG_CTL, value);
+ pci_conf_write16(pdev->sbdf, pos + ATS_REG_CTL, value);
}
if ( pos )
{
pdev->ats.cap_pos = pos;
- value = pci_conf_read16(seg, bus, PCI_SLOT(devfn),
- PCI_FUNC(devfn), pos + ATS_REG_CAP);
+ value = pci_conf_read16(pdev->sbdf, pos + ATS_REG_CAP);
pdev->ats.queue_depth = value & ATS_QUEUE_DEPTH_MASK ?:
ATS_QUEUE_DEPTH_MASK + 1;
list_add(&pdev->ats.list, ats_list);
BUG_ON(!pdev->ats.cap_pos);
- value = pci_conf_read16(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
- pdev->ats.cap_pos + ATS_REG_CTL);
+ value = pci_conf_read16(pdev->sbdf, pdev->ats.cap_pos + ATS_REG_CTL);
value &= ~ATS_ENABLE;
- pci_conf_write16(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
- pdev->ats.cap_pos + ATS_REG_CTL, value);
+ pci_conf_write16(pdev->sbdf, pdev->ats.cap_pos + ATS_REG_CTL, value);
list_del(&pdev->ats.list);
int max_cap = 48;
u8 pos = PCI_CAPABILITY_LIST;
u16 status;
-
- status = pci_conf_read16(seg, bus, dev, func, PCI_STATUS);
+ const pci_sbdf_t sbdf = {
+ .seg = seg,
+ .bus = bus,
+ .dev = dev,
+ .func = func,
+ };
+
+ status = pci_conf_read16(sbdf, PCI_STATUS);
if ( (status & PCI_STATUS_CAP_LIST) == 0 )
return 0;
while ( max_cap-- )
{
- pos = pci_conf_read8(seg, bus, dev, func, pos);
+ pos = pci_conf_read8(sbdf, pos);
if ( pos < 0x40 )
break;
pos &= ~3;
- id = pci_conf_read8(seg, bus, dev, func, pos + PCI_CAP_LIST_ID);
+ id = pci_conf_read8(sbdf, pos + PCI_CAP_LIST_ID);
if ( id == 0xff )
break;
{
u8 id;
int ttl = 48;
+ const pci_sbdf_t sbdf = {
+ .seg = seg,
+ .bus = bus,
+ .extfunc = devfn,
+ };
while ( ttl-- )
{
- pos = pci_conf_read8(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos);
+ pos = pci_conf_read8(sbdf, pos);
if ( pos < 0x40 )
break;
pos &= ~3;
- id = pci_conf_read8(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
- pos + PCI_CAP_LIST_ID);
+ id = pci_conf_read8(sbdf, pos + PCI_CAP_LIST_ID);
if ( id == 0xff )
break;
*/
int pci_find_next_ext_capability(int seg, int bus, int devfn, int start, int cap)
{
- u32 header;
int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */
int pos = max(start, 0x100);
-
- header = pci_conf_read32(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos);
+ const pci_sbdf_t sbdf = {
+ .seg = seg,
+ .bus = bus,
+ .extfunc = devfn,
+ };
+ uint32_t header = pci_conf_read32(sbdf, pos);
/*
* If we have no capabilities, this is indicated by cap ID,
pos = PCI_EXT_CAP_NEXT(header);
if ( pos < 0x100 )
break;
- header = pci_conf_read32(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos);
+ header = pci_conf_read32(sbdf, pos);
}
return 0;
}
void pci_intx(const struct pci_dev *pdev, bool enable)
{
- uint16_t seg = pdev->sbdf.seg;
- uint8_t bus = pdev->sbdf.bus;
- uint8_t slot = pdev->sbdf.dev;
- uint8_t func = pdev->sbdf.func;
- uint16_t cmd = pci_conf_read16(seg, bus, slot, func, PCI_COMMAND);
+ uint16_t cmd = pci_conf_read16(pdev->sbdf, PCI_COMMAND);
if ( enable )
cmd &= ~PCI_COMMAND_INTX_DISABLE;
else
cmd |= PCI_COMMAND_INTX_DISABLE;
- pci_conf_write16(seg, bus, slot, func, PCI_COMMAND, cmd);
+ pci_conf_write16(pdev->sbdf, PCI_COMMAND, cmd);
}
const char *__init parse_pci(const char *s, unsigned int *seg_p,
pcidevs_unlock();
if ( !pdev ||
- pci_conf_read16(0, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
- PCI_CLASS_DEVICE) != 0x0300 ||
- !(pci_conf_read16(0, bus, PCI_SLOT(devfn),
- PCI_FUNC(devfn), PCI_COMMAND) &
+ pci_conf_read16(pdev->sbdf, PCI_CLASS_DEVICE) != 0x0300 ||
+ !(pci_conf_read16(pdev->sbdf, PCI_COMMAND) &
(PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) )
continue;
b = 0;
break;
case 1:
- switch ( pci_conf_read8(0, b, PCI_SLOT(df),
- PCI_FUNC(df),
- PCI_HEADER_TYPE) )
+ {
+ const pci_sbdf_t sbdf = {
+ .bus = b,
+ .extfunc = df,
+ };
+
+ switch ( pci_conf_read8(sbdf, PCI_HEADER_TYPE) )
{
case PCI_HEADER_TYPE_BRIDGE:
case PCI_HEADER_TYPE_CARDBUS:
- if ( pci_conf_read16(0, b, PCI_SLOT(df),
- PCI_FUNC(df),
- PCI_BRIDGE_CONTROL) &
+ if ( pci_conf_read16(sbdf, PCI_BRIDGE_CONTROL) &
PCI_BRIDGE_CTL_VGA )
continue;
break;
}
break;
}
+ }
break;
}
if ( !b )
(map ? PCI_ROM_ADDRESS_ENABLE : 0);
header->bars[i].enabled = header->rom_enabled = map;
- pci_conf_write32(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, rom_pos, val);
+ pci_conf_write32(pdev->sbdf, rom_pos, val);
return;
}
}
if ( !rom_only )
- pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, PCI_COMMAND, cmd);
+ pci_conf_write16(pdev->sbdf, PCI_COMMAND, cmd);
else
ASSERT_UNREACHABLE();
}
static void cmd_write(const struct pci_dev *pdev, unsigned int reg,
uint32_t cmd, void *data)
{
- uint16_t current_cmd = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus,
- pdev->sbdf.dev, pdev->sbdf.func,
- reg);
+ uint16_t current_cmd = pci_conf_read16(pdev->sbdf, reg);
/*
* Let Dom0 play with all the bits directly except for the memory
*/
modify_bars(pdev, cmd, false);
else
- pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, reg, cmd);
+ pci_conf_write16(pdev->sbdf, reg, cmd);
}
static void bar_write(const struct pci_dev *pdev, unsigned int reg,
else
val &= PCI_BASE_ADDRESS_MEM_MASK;
- if ( pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, PCI_COMMAND) & PCI_COMMAND_MEMORY )
+ if ( pci_conf_read16(pdev->sbdf, PCI_COMMAND) & PCI_COMMAND_MEMORY )
{
/* If the value written is the current one avoid printing a warning. */
if ( val != (uint32_t)(bar->addr >> (hi ? 32 : 0)) )
val |= bar->prefetchable ? PCI_BASE_ADDRESS_MEM_PREFETCH : 0;
}
- pci_conf_write32(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, reg, val);
+ pci_conf_write32(pdev->sbdf, reg, val);
}
static void rom_write(const struct pci_dev *pdev, unsigned int reg,
{
struct vpci_header *header = &pdev->vpci->header;
struct vpci_bar *rom = data;
- uint16_t cmd = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus,
- pdev->sbdf.dev, pdev->sbdf.func,
- PCI_COMMAND);
+ uint16_t cmd = pci_conf_read16(pdev->sbdf, PCI_COMMAND);
bool new_enabled = val & PCI_ROM_ADDRESS_ENABLE;
if ( (cmd & PCI_COMMAND_MEMORY) && header->rom_enabled && new_enabled )
{
/* Just update the ROM BAR field. */
header->rom_enabled = new_enabled;
- pci_conf_write32(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, reg, val);
+ pci_conf_write32(pdev->sbdf, reg, val);
}
/*
* Pass PCI_COMMAND_MEMORY or 0 to signal a map/unmap request, note that
struct vpci_bar *bars = header->bars;
int rc;
- switch ( pci_conf_read8(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, PCI_HEADER_TYPE) & 0x7f )
+ switch ( pci_conf_read8(pdev->sbdf, PCI_HEADER_TYPE) & 0x7f )
{
case PCI_HEADER_TYPE_NORMAL:
num_bars = PCI_HEADER_NORMAL_NR_BARS;
return 0;
/* Disable memory decoding before sizing. */
- cmd = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, PCI_COMMAND);
+ cmd = pci_conf_read16(pdev->sbdf, PCI_COMMAND);
if ( cmd & PCI_COMMAND_MEMORY )
- pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, PCI_COMMAND,
- cmd & ~PCI_COMMAND_MEMORY);
+ pci_conf_write16(pdev->sbdf, PCI_COMMAND, cmd & ~PCI_COMMAND_MEMORY);
for ( i = 0; i < num_bars; i++ )
{
4, &bars[i]);
if ( rc )
{
- pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, PCI_COMMAND, cmd);
+ pci_conf_write16(pdev->sbdf, PCI_COMMAND, cmd);
return rc;
}
continue;
}
- val = pci_conf_read32(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, reg);
+ val = pci_conf_read32(pdev->sbdf, reg);
if ( (val & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO )
{
bars[i].type = VPCI_BAR_IO;
(i == num_bars - 1) ? PCI_BAR_LAST : 0);
if ( rc < 0 )
{
- pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, PCI_COMMAND, cmd);
+ pci_conf_write16(pdev->sbdf, PCI_COMMAND, cmd);
return rc;
}
&bars[i]);
if ( rc )
{
- pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, PCI_COMMAND, cmd);
+ pci_conf_write16(pdev->sbdf, PCI_COMMAND, cmd);
return rc;
}
}
rom->type = VPCI_BAR_ROM;
rom->size = size;
rom->addr = addr;
- header->rom_enabled = pci_conf_read32(pdev->sbdf.seg, pdev->sbdf.bus,
- pdev->sbdf.dev, pdev->sbdf.func,
- rom_reg) & PCI_ROM_ADDRESS_ENABLE;
+ header->rom_enabled = pci_conf_read32(pdev->sbdf, rom_reg) &
+ PCI_ROM_ADDRESS_ENABLE;
rc = vpci_add_register(pdev->vpci, vpci_hw_read32, rom_write, rom_reg,
4, rom);
msi->vectors = vectors;
msi->enabled = new_enabled;
- pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, reg, control_read(pdev, reg, data));
+ pci_conf_write16(pdev->sbdf, reg, control_read(pdev, reg, data));
}
static void update_msi(const struct pci_dev *pdev, struct vpci_msi *msi)
return ret;
/* Get the maximum number of vectors the device supports. */
- control = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, msi_control_reg(pos));
+ control = pci_conf_read16(pdev->sbdf, msi_control_reg(pos));
/*
* FIXME: I've only been able to test this code with devices using a single
val = control_read(pdev, reg, data);
if ( pci_msi_conf_write_intercept(msix->pdev, reg, 2, &val) >= 0 )
- pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, reg, val);
+ pci_conf_write16(pdev->sbdf, reg, val);
}
static struct vpci_msix *msix_find(const struct domain *d, unsigned long addr)
if ( !msix_offset )
return 0;
- control = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, msix_control_reg(msix_offset));
+ control = pci_conf_read16(pdev->sbdf, msix_control_reg(msix_offset));
max_entries = msix_table_size(control);
pdev->vpci->msix->pdev = pdev;
pdev->vpci->msix->tables[VPCI_MSIX_TABLE] =
- pci_conf_read32(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, msix_table_offset_reg(msix_offset));
+ pci_conf_read32(pdev->sbdf, msix_table_offset_reg(msix_offset));
pdev->vpci->msix->tables[VPCI_MSIX_PBA] =
- pci_conf_read32(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, msix_pba_offset_reg(msix_offset));
+ pci_conf_read32(pdev->sbdf, msix_pba_offset_reg(msix_offset));
for ( i = 0; i < pdev->vpci->msix->max_entries; i++)
{
uint32_t vpci_hw_read16(const struct pci_dev *pdev, unsigned int reg,
void *data)
{
- return pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, reg);
+ return pci_conf_read16(pdev->sbdf, reg);
}
uint32_t vpci_hw_read32(const struct pci_dev *pdev, unsigned int reg,
void *data)
{
- return pci_conf_read32(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
- pdev->sbdf.func, reg);
+ return pci_conf_read32(pdev->sbdf, reg);
}
int vpci_add_register(struct vpci *vpci, vpci_read_t *read_handler,
switch ( size )
{
case 4:
- data = pci_conf_read32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, reg);
+ data = pci_conf_read32(sbdf, reg);
break;
case 3:
*/
if ( reg & 1 )
{
- data = pci_conf_read8(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func,
- reg);
- data |= pci_conf_read16(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func,
- reg + 1) << 8;
+ data = pci_conf_read8(sbdf, reg);
+ data |= pci_conf_read16(sbdf, reg + 1) << 8;
}
else
{
- data = pci_conf_read16(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func,
- reg);
- data |= pci_conf_read8(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func,
- reg + 2) << 16;
+ data = pci_conf_read16(sbdf, reg);
+ data |= pci_conf_read8(sbdf, reg + 2) << 16;
}
break;
case 2:
- data = pci_conf_read16(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, reg);
+ data = pci_conf_read16(sbdf, reg);
break;
case 1:
- data = pci_conf_read8(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, reg);
+ data = pci_conf_read8(sbdf, reg);
break;
default:
switch ( size )
{
case 4:
- pci_conf_write32(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, reg, data);
+ pci_conf_write32(sbdf, reg, data);
break;
case 3:
*/
if ( reg & 1 )
{
- pci_conf_write8(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, reg,
- data);
- pci_conf_write16(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, reg + 1,
- data >> 8);
+ pci_conf_write8(sbdf, reg, data);
+ pci_conf_write16(sbdf, reg + 1, data >> 8);
}
else
{
- pci_conf_write16(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, reg,
- data);
- pci_conf_write8(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, reg + 2,
- data >> 16);
+ pci_conf_write16(sbdf, reg, data);
+ pci_conf_write8(sbdf, reg + 2, data >> 16);
}
break;
case 2:
- pci_conf_write16(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, reg, data);
+ pci_conf_write16(sbdf, reg, data);
break;
case 1:
- pci_conf_write8(sbdf.seg, sbdf.bus, sbdf.dev, sbdf.func, reg, data);
+ pci_conf_write8(sbdf, reg, data);
break;
default:
};
} pci_sbdf_t;
+#define PCI_SBDF_T(s, b, d, f) \
+ ((pci_sbdf_t) { .seg = (s), .bus = (b), .dev = (d), .func = (f) })
+#define PCI_SBDF3_T(s, b, e) \
+ ((pci_sbdf_t) { .seg = (s), .bus = (b), .extfunc = (e) })
+
struct pci_dev_info {
/*
* VF's 'is_extfn' field is used to indicate whether its PF is an extended
int bus, int devfn);
void pci_check_disable_device(u16 seg, u8 bus, u8 devfn);
-uint8_t pci_conf_read8(
- unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func,
- unsigned int reg);
-uint16_t pci_conf_read16(
- unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func,
- unsigned int reg);
-uint32_t pci_conf_read32(
- unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func,
- unsigned int reg);
-void pci_conf_write8(
- unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func,
- unsigned int reg, uint8_t data);
-void pci_conf_write16(
- unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func,
- unsigned int reg, uint16_t data);
-void pci_conf_write32(
- unsigned int seg, unsigned int bus, unsigned int dev, unsigned int func,
- unsigned int reg, uint32_t data);
+uint8_t pci_conf_read8(pci_sbdf_t sbdf, unsigned int reg);
+uint16_t pci_conf_read16(pci_sbdf_t sbdf, unsigned int reg);
+uint32_t pci_conf_read32(pci_sbdf_t sbdf, unsigned int reg);
+void pci_conf_write8(pci_sbdf_t sbdf, unsigned int reg, uint8_t data);
+void pci_conf_write16(pci_sbdf_t sbdf, unsigned int reg, uint16_t data);
+void pci_conf_write32(pci_sbdf_t sbdf, unsigned int reg, uint32_t data);
uint32_t pci_conf_read(uint32_t cf8, uint8_t offset, uint8_t bytes);
void pci_conf_write(uint32_t cf8, uint8_t offset, uint8_t bytes, uint32_t data);
int pci_mmcfg_read(unsigned int seg, unsigned int bus,