if ( pci_conf_write_intercept(mmio_ctxt->seg, mmio_ctxt->bdf,
offset, bytes, p_data) >= 0 )
pci_mmcfg_write(mmio_ctxt->seg, PCI_BUS(mmio_ctxt->bdf),
- PCI_DEVFN2(mmio_ctxt->bdf), offset, bytes,
+ PCI_DEVFN(mmio_ctxt->bdf), offset, bytes,
*(uint32_t *)p_data);
return X86EMUL_OKAY;
pcidevs_lock();
- pdev = pci_get_pdev(seg, PCI_BUS(bdf), PCI_DEVFN2(bdf));
+ pdev = pci_get_pdev(seg, PCI_BUS(bdf), PCI_DEVFN(bdf));
if ( pdev )
rc = pci_msi_conf_write_intercept(pdev, reg, size, data);
for (i = 0; !name && i < ARRAY_SIZE(pci_mmcfg_probes); i++) {
bus = pci_mmcfg_probes[i].bus;
devfn = pci_mmcfg_probes[i].devfn;
- l = pci_conf_read32(PCI_SBDF3(0, bus, devfn), 0);
+ l = pci_conf_read32(PCI_SBDF(0, bus, devfn), 0);
vendor = l & 0xffff;
device = (l >> 16) & 0xffff;
xen_pfn_t start, xen_ulong_t nr, u32 id, void *ctxt)
{
struct get_reserved_device_memory *grdm = ctxt;
- uint32_t sbdf = PCI_SBDF3(grdm->map.dev.pci.seg, grdm->map.dev.pci.bus,
- grdm->map.dev.pci.devfn).sbdf;
+ uint32_t sbdf = PCI_SBDF(grdm->map.dev.pci.seg, grdm->map.dev.pci.bus,
+ grdm->map.dev.pci.devfn).sbdf;
if ( !(grdm->map.flags & XENMEM_RDM_ALL) && (sbdf != id) )
return 0;
xen_pfn_t start, xen_ulong_t nr, u32 id, void *ctxt)
{
struct get_reserved_device_memory *grdm = ctxt;
- uint32_t sbdf = PCI_SBDF3(grdm->map.dev.pci.seg, grdm->map.dev.pci.bus,
- grdm->map.dev.pci.devfn).sbdf;
+ uint32_t sbdf = PCI_SBDF(grdm->map.dev.pci.seg, grdm->map.dev.pci.bus,
+ grdm->map.dev.pci.devfn).sbdf;
if ( !(grdm->map.flags & XENMEM_RDM_ALL) && (sbdf != id) )
return 0;
if ( !ivrs_mappings[alias_id].intremap_table )
panic("No memory for %pp's IRT\n",
- &PCI_SBDF2(iommu->seg, alias_id));
+ &PCI_SBDF(iommu->seg, alias_id));
}
}
}
AMD_IOMMU_DEBUG("IVHD Special: %pp variety %#x handle %#x\n",
- &PCI_SBDF2(seg, bdf), special->variety, special->handle);
+ &PCI_SBDF(seg, bdf), special->variety, special->handle);
add_ivrs_mapping_entry(bdf, bdf, special->header.data_setting, 0, true,
iommu);
AMD_IOMMU_DEBUG("IVHD: Command line override present for IO-APIC %#x"
"(IVRS: %#x devID %pp)\n",
ioapic_sbdf[idx].id, special->handle,
- &PCI_SBDF2(seg, bdf));
+ &PCI_SBDF(seg, bdf));
break;
}
AMD_IOMMU_DEBUG("IVHD: Command line override present for HPET %#x "
"(IVRS: %#x devID %pp)\n",
hpet_sbdf.id, special->handle,
- &PCI_SBDF2(seg, bdf));
+ &PCI_SBDF(seg, bdf));
break;
case HPET_NONE:
/* set device id of hpet */
IOMMU_RING_BUFFER_PTR_MASK) )
{
printk_once(XENLOG_ERR "AMD IOMMU %pp: no cmd slot available\n",
- &PCI_SBDF2(iommu->seg, iommu->bdf));
+ &PCI_SBDF(iommu->seg, iommu->bdf));
cpu_relax();
}
threshold |= threshold << 1;
printk(XENLOG_WARNING
"AMD IOMMU %pp: %scompletion wait taking too long\n",
- &PCI_SBDF2(iommu->seg, iommu->bdf),
+ &PCI_SBDF(iommu->seg, iommu->bdf),
timeout_base ? "iotlb " : "");
timeout = 0;
}
if ( !timeout )
printk(XENLOG_WARNING
"AMD IOMMU %pp: %scompletion wait took %lums\n",
- &PCI_SBDF2(iommu->seg, iommu->bdf),
+ &PCI_SBDF(iommu->seg, iommu->bdf),
timeout_base ? "iotlb " : "",
(NOW() - start) / 10000000);
}
if ( !iommu )
{
AMD_IOMMU_WARN("can't find IOMMU for %pp\n",
- &PCI_SBDF3(pdev->seg, pdev->bus, devfn));
+ &PCI_SBDF(pdev->seg, pdev->bus, devfn));
return;
}
if ( !iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
return;
- req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(pdev->bus, devfn));
+ req_id = get_dma_requestor_id(iommu->seg, PCI_BDF(pdev->bus, devfn));
queueid = req_id;
maxpend = pdev->ats.queue_depth & 0xff;
rt = pci_ro_device(iommu->seg, bus, PCI_DEVFN(dev, func));
if ( rt )
printk(XENLOG_ERR "Could not mark config space of %pp read-only (%d)\n",
- &PCI_SBDF2(iommu->seg, iommu->bdf), rt);
+ &PCI_SBDF(iommu->seg, iommu->bdf), rt);
list_add_tail(&iommu->list, &amd_iommu_head);
rt = 0;
printk(XENLOG_ERR "AMD-Vi: %s: %pp d%u addr %016"PRIx64
" flags %#x%s%s%s%s%s%s%s%s%s%s\n",
- code_str, &PCI_SBDF2(iommu->seg, device_id),
+ code_str, &PCI_SBDF(iommu->seg, device_id),
domain_id, addr, flags,
(flags & 0xe00) ? " ??" : "",
(flags & 0x100) ? " TR" : "",
for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
if ( get_dma_requestor_id(iommu->seg, bdf) == device_id )
pci_check_disable_device(iommu->seg, PCI_BUS(bdf),
- PCI_DEVFN2(bdf));
+ PCI_DEVFN(bdf));
}
else
printk(XENLOG_ERR "%s %08x %08x %08x %08x\n",
static void cf_check parse_ppr_log_entry(struct amd_iommu *iommu, u32 entry[])
{
-
- u16 device_id;
- u8 bus, devfn;
- struct pci_dev *pdev;
-
/* here device_id is physical value */
- device_id = iommu_get_devid_from_cmd(entry[0]);
- bus = PCI_BUS(device_id);
- devfn = PCI_DEVFN2(device_id);
+ uint16_t device_id = iommu_get_devid_from_cmd(entry[0]);
+ struct pci_dev *pdev;
pcidevs_lock();
- pdev = pci_get_real_pdev(iommu->seg, bus, devfn);
+ pdev = pci_get_real_pdev(iommu->seg, PCI_BUS(device_id),
+ PCI_DEVFN(device_id));
pcidevs_unlock();
if ( pdev )
pcidevs_lock();
iommu->msi.dev = pci_get_pdev(iommu->seg, PCI_BUS(iommu->bdf),
- PCI_DEVFN2(iommu->bdf));
+ PCI_DEVFN(iommu->bdf));
pcidevs_unlock();
if ( !iommu->msi.dev )
{
AMD_IOMMU_WARN("no pdev for %pp\n",
- &PCI_SBDF2(iommu->seg, iommu->bdf));
+ &PCI_SBDF(iommu->seg, iommu->bdf));
return 0;
}
hw_irq_controller *handler;
u16 control;
- control = pci_conf_read16(PCI_SBDF2(iommu->seg, iommu->bdf),
+ control = pci_conf_read16(PCI_SBDF(iommu->seg, iommu->bdf),
iommu->msi.msi_attrib.pos + PCI_MSI_FLAGS);
iommu->msi.msi.nvec = 1;
(boot_cpu_data.x86_model > 0x1f) )
return;
- pci_conf_write32(PCI_SBDF2(iommu->seg, iommu->bdf), 0xf0, 0x90);
- value = pci_conf_read32(PCI_SBDF2(iommu->seg, iommu->bdf), 0xf4);
+ pci_conf_write32(PCI_SBDF(iommu->seg, iommu->bdf), 0xf0, 0x90);
+ value = pci_conf_read32(PCI_SBDF(iommu->seg, iommu->bdf), 0xf4);
if ( value & (1 << 2) )
return;
/* Select NB indirect register 0x90 and enable writing */
- pci_conf_write32(PCI_SBDF2(iommu->seg, iommu->bdf), 0xf0, 0x90 | (1 << 8));
+ pci_conf_write32(PCI_SBDF(iommu->seg, iommu->bdf), 0xf0, 0x90 | (1 << 8));
- pci_conf_write32(PCI_SBDF2(iommu->seg, iommu->bdf), 0xf4, value | (1 << 2));
+ pci_conf_write32(PCI_SBDF(iommu->seg, iommu->bdf), 0xf4, value | (1 << 2));
printk(XENLOG_INFO
"AMD-Vi: Applying erratum 746 workaround for IOMMU at %pp\n",
- &PCI_SBDF2(iommu->seg, iommu->bdf));
+ &PCI_SBDF(iommu->seg, iommu->bdf));
/* Clear the enable writing bit */
- pci_conf_write32(PCI_SBDF2(iommu->seg, iommu->bdf), 0xf0, 0x90);
+ pci_conf_write32(PCI_SBDF(iommu->seg, iommu->bdf), 0xf0, 0x90);
}
static void enable_iommu(struct amd_iommu *iommu)
if ( !pci_init )
continue;
pcidevs_lock();
- pdev = pci_get_pdev(seg, PCI_BUS(bdf), PCI_DEVFN2(bdf));
+ pdev = pci_get_pdev(seg, PCI_BUS(bdf), PCI_DEVFN(bdf));
pcidevs_unlock();
}
if ( iommu )
return iommu;
- AMD_IOMMU_DEBUG("No IOMMU for MSI dev = %pp\n", &PCI_SBDF2(seg, bdf));
+ AMD_IOMMU_DEBUG("No IOMMU for MSI dev = %pp\n", &PCI_SBDF(seg, bdf));
return ERR_PTR(-EINVAL);
}
if ( ivrs_mapping )
{
printk(" %pp:\n",
- &PCI_SBDF2(iommu->seg, ivrs_mapping->dte_requestor_id));
+ &PCI_SBDF(iommu->seg, ivrs_mapping->dte_requestor_id));
ivrs_mapping = NULL;
}
for ( bdf = 0; bdf < ivrs_bdf_entries; ++bdf )
{
- pci_sbdf_t sbdf = PCI_SBDF2(seg, bdf);
+ pci_sbdf_t sbdf = PCI_SBDF(seg, bdf);
const struct ivrs_unity_map *um = ivrs_mappings[bdf].unity_map;
unsigned int req = ivrs_mappings[bdf].dte_requestor_id;
const struct amd_iommu *iommu = ivrs_mappings[bdf].iommu;
* the same alias ID.
*/
if ( bdf != req && ivrs_mappings[req].iommu &&
- func(0, 0, PCI_SBDF2(seg, req).sbdf, ctxt) )
+ func(0, 0, PCI_SBDF(seg, req).sbdf, ctxt) )
continue;
if ( global == pending )
ivrs_mappings[bdf] = tmp;
printk(XENLOG_WARNING "%pp not found in ACPI tables;"
- " using same IOMMU as function 0\n", &PCI_SBDF2(seg, bdf));
+ " using same IOMMU as function 0\n", &PCI_SBDF(seg, bdf));
/* write iommu field last */
ivrs_mappings[bdf].iommu = ivrs_mappings[bd0].iommu;
| (ivrs_dev->unity_map ? SET_ROOT_WITH_UNITY_MAP : 0);
/* get device-table entry */
- req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(bus, devfn));
+ req_id = get_dma_requestor_id(iommu->seg, PCI_BDF(bus, devfn));
table = iommu->dev_table.buffer;
dte = &table[req_id];
ivrs_dev = &get_ivrs_mappings(iommu->seg)[req_id];
* presence. But let's deal with that case only if it is actually
* found in the wild.
*/
- if ( req_id != PCI_BDF2(bus, devfn) &&
+ if ( req_id != PCI_BDF(bus, devfn) &&
(sr_flags & SET_ROOT_WITH_UNITY_MAP) )
rc = -EOPNOTSUPP;
else
(any_pdev_behind_iommu(pdev->domain, pdev, iommu) ||
pdev->phantom_stride) )
AMD_IOMMU_WARN(" %pp: reassignment may cause %pd data corruption\n",
- &PCI_SBDF3(pdev->seg, bus, devfn), pdev->domain);
+ &PCI_SBDF(pdev->seg, bus, devfn), pdev->domain);
/*
* Check remaining settings are still in place from an earlier call
disable_ats_device(pdev);
BUG_ON ( iommu->dev_table.buffer == NULL );
- req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(bus, devfn));
+ req_id = get_dma_requestor_id(iommu->seg, PCI_BDF(bus, devfn));
table = iommu->dev_table.buffer;
dte = &table[req_id];
if ( !iommu )
{
AMD_IOMMU_WARN("failed to find IOMMU: %pp cannot be assigned to %pd\n",
- &PCI_SBDF3(pdev->seg, pdev->bus, devfn), target);
+ &PCI_SBDF(pdev->seg, pdev->bus, devfn), target);
return -ENODEV;
}
if ( !is_hardware_domain(source) )
{
const struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(pdev->seg);
- unsigned int bdf = PCI_BDF2(pdev->bus, devfn);
+ unsigned int bdf = PCI_BDF(pdev->bus, devfn);
rc = amd_iommu_reserve_domain_unity_unmap(
source,
}
AMD_IOMMU_DEBUG("Re-assign %pp from %pd to %pd\n",
- &PCI_SBDF3(pdev->seg, pdev->bus, devfn), source, target);
+ &PCI_SBDF(pdev->seg, pdev->bus, devfn), source, target);
return 0;
}
struct domain *d, u8 devfn, struct pci_dev *pdev, u32 flag)
{
struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(pdev->seg);
- int bdf = PCI_BDF2(pdev->bus, devfn);
+ unsigned int bdf = PCI_BDF(pdev->bus, devfn);
int req_id = get_dma_requestor_id(pdev->seg, bdf);
int rc = amd_iommu_reserve_domain_unity_map(
d, ivrs_mappings[req_id].unity_map, flag);
}
AMD_IOMMU_WARN("no IOMMU for %pp; cannot be handed to %pd\n",
- &PCI_SBDF3(pdev->seg, pdev->bus, devfn), pdev->domain);
+ &PCI_SBDF(pdev->seg, pdev->bus, devfn), pdev->domain);
return -ENODEV;
}
ivrs_mappings = get_ivrs_mappings(pdev->seg);
- bdf = PCI_BDF2(pdev->bus, devfn);
+ bdf = PCI_BDF(pdev->bus, devfn);
if ( !ivrs_mappings ||
!ivrs_mappings[ivrs_mappings[bdf].dte_requestor_id].valid )
return -EPERM;
ivrs_mappings[ivrs_mappings[bdf].dte_requestor_id].unity_map,
0) )
AMD_IOMMU_WARN("%pd: unity mapping failed for %pp\n",
- pdev->domain, &PCI_SBDF2(pdev->seg, bdf));
+ pdev->domain, &PCI_SBDF(pdev->seg, bdf));
if ( iommu_quarantine && pdev->arch.pseudo_domid == DOMID_INVALID )
{
if ( !iommu )
{
AMD_IOMMU_WARN("failed to find IOMMU: %pp cannot be removed from %pd\n",
- &PCI_SBDF3(pdev->seg, pdev->bus, devfn), pdev->domain);
+ &PCI_SBDF(pdev->seg, pdev->bus, devfn), pdev->domain);
return -ENODEV;
}
amd_iommu_disable_domain_device(pdev->domain, iommu, devfn, pdev);
ivrs_mappings = get_ivrs_mappings(pdev->seg);
- bdf = PCI_BDF2(pdev->bus, devfn);
+ bdf = PCI_BDF(pdev->bus, devfn);
if ( amd_iommu_reserve_domain_unity_unmap(
pdev->domain,
ivrs_mappings[ivrs_mappings[bdf].dte_requestor_id].unity_map) )
AMD_IOMMU_WARN("%pd: unity unmapping failed for %pp\n",
- pdev->domain, &PCI_SBDF2(pdev->seg, bdf));
+ pdev->domain, &PCI_SBDF(pdev->seg, bdf));
amd_iommu_quarantine_teardown(pdev);
static int cf_check amd_iommu_group_id(u16 seg, u8 bus, u8 devfn)
{
- int bdf = PCI_BDF2(bus, devfn);
+ unsigned int bdf = PCI_BDF(bus, devfn);
return (bdf < ivrs_bdf_entries) ? get_dma_requestor_id(seg, bdf) : bdf;
}
pos = pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_ATS);
BUG_ON(!pos);
- value = pci_conf_read16(PCI_SBDF3(seg, bus, devfn), pos + ATS_REG_CTL);
+ value = pci_conf_read16(PCI_SBDF(seg, bus, devfn), pos + ATS_REG_CTL);
return value & ATS_ENABLE;
}
memset(pseg->ro_map, 0, sz);
}
- __set_bit(PCI_BDF2(bus, devfn), pseg->ro_map);
+ __set_bit(PCI_BDF(bus, devfn), pseg->ro_map);
_pci_hide_device(pdev);
return 0;
out:
if ( ret )
printk(XENLOG_G_ERR "%pd: deassign (%pp) failed (%d)\n",
- d, &PCI_SBDF3(seg, bus, devfn), ret);
+ d, &PCI_SBDF(seg, bus, devfn), ret);
return ret;
}
rc = iommu_call(hd->platform_ops, add_device, devfn, pci_to_dev(pdev));
if ( rc )
printk(XENLOG_WARNING "IOMMU: add %pp failed (%d)\n",
- &PCI_SBDF3(pdev->seg, pdev->bus, devfn), rc);
+ &PCI_SBDF(pdev->seg, pdev->bus, devfn), rc);
}
}
continue;
printk(XENLOG_ERR "IOMMU: remove %pp failed (%d)\n",
- &PCI_SBDF3(pdev->seg, pdev->bus, devfn), rc);
+ &PCI_SBDF(pdev->seg, pdev->bus, devfn), rc);
return rc;
}
done:
if ( rc )
printk(XENLOG_G_WARNING "%pd: assign (%pp) failed (%d)\n",
- d, &PCI_SBDF3(seg, bus, devfn), rc);
+ d, &PCI_SBDF(seg, bus, devfn), rc);
/* The device is assigned to dom_io so mark it as quarantined */
else if ( d == dom_io )
pdev->quarantine = true;
seg = domctl->u.get_device_group.machine_sbdf >> 16;
bus = PCI_BUS(domctl->u.get_device_group.machine_sbdf);
- devfn = PCI_DEVFN2(domctl->u.get_device_group.machine_sbdf);
+ devfn = PCI_DEVFN(domctl->u.get_device_group.machine_sbdf);
max_sdevs = domctl->u.get_device_group.max_sdevs;
sdevs = domctl->u.get_device_group.sdev_array;
seg = machine_sbdf >> 16;
bus = PCI_BUS(machine_sbdf);
- devfn = PCI_DEVFN2(machine_sbdf);
+ devfn = PCI_DEVFN(machine_sbdf);
pcidevs_lock();
ret = device_assigned(seg, bus, devfn);
if ( ret )
{
printk(XENLOG_G_INFO "%pp already assigned, or non-existent\n",
- &PCI_SBDF3(seg, bus, devfn));
+ &PCI_SBDF(seg, bus, devfn));
ret = -EINVAL;
}
}
seg = machine_sbdf >> 16;
bus = PCI_BUS(machine_sbdf);
- devfn = PCI_DEVFN2(machine_sbdf);
+ devfn = PCI_DEVFN(machine_sbdf);
pcidevs_lock();
ret = deassign_device(d, seg, bus, devfn);
continue;
for (i = 0; i < drhd->scope.devices_cnt; i++)
- if ( drhd->scope.devices[i] == PCI_BDF2(bus, devfn) )
+ if ( drhd->scope.devices[i] == PCI_BDF(bus, devfn) )
return drhd;
if ( test_bit(bus, drhd->scope.buses) )
rc = func(PFN_DOWN(rmrr->base_address),
PFN_UP(rmrr->end_address) - PFN_DOWN(rmrr->base_address),
- PCI_SBDF2(rmrr->segment, bdf).sbdf, ctxt);
+ PCI_SBDF(rmrr->segment, bdf).sbdf, ctxt);
if ( unlikely(rc < 0) )
return rc;
case 4: sq = SQ_13_IGNORE_1; break;
default: sq = SQ_ALL_16; break;
}
- set_ire_sid(ire, SVT_VERIFY_SID_SQ, sq, PCI_BDF2(bus, devfn));
+ set_ire_sid(ire, SVT_VERIFY_SID_SQ, sq, PCI_BDF(bus, devfn));
break;
case DEV_TYPE_PCI:
if ( ret == 0 ) /* integrated PCI device */
{
set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_ALL_16,
- PCI_BDF2(bus, devfn));
+ PCI_BDF(bus, devfn));
}
else if ( ret == 1 ) /* find upstream bridge */
{
(bus << 8) | pdev->bus);
else
set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_ALL_16,
- PCI_BDF2(bus, devfn));
+ PCI_BDF(bus, devfn));
}
else
dprintk(XENLOG_WARNING VTDPREFIX,
"DMAR:[%s] Request device [%pp] "
"fault addr %"PRIx64"\n",
(type ? "DMA Read" : "DMA Write"),
- &PCI_SBDF2(seg, source_id), addr);
+ &PCI_SBDF(seg, source_id), addr);
kind = "DMAR";
break;
case INTR_REMAP:
printk(XENLOG_G_WARNING VTDPREFIX
"INTR-REMAP: Request device [%pp] "
"fault index %"PRIx64"\n",
- &PCI_SBDF2(seg, source_id), addr >> 48);
+ &PCI_SBDF(seg, source_id), addr >> 48);
kind = "INTR-REMAP";
break;
default:
printk(XENLOG_G_WARNING VTDPREFIX
"UNKNOWN: Request device [%pp] "
"fault addr %"PRIx64"\n",
- &PCI_SBDF2(seg, source_id), addr);
+ &PCI_SBDF(seg, source_id), addr);
kind = "UNKNOWN";
break;
}
kind, fault_reason, reason);
if ( iommu_verbose && fault_type == DMA_REMAP )
- print_vtd_entries(iommu, PCI_BUS(source_id), PCI_DEVFN2(source_id),
+ print_vtd_entries(iommu, PCI_BUS(source_id), PCI_DEVFN(source_id),
addr >> PAGE_SHIFT);
return 0;
source_id, guest_addr);
pci_check_disable_device(iommu->drhd->segment,
- PCI_BUS(source_id), PCI_DEVFN2(source_id));
+ PCI_BUS(source_id), PCI_DEVFN(source_id));
fault_index++;
if ( fault_index > cap_num_fault_regs(iommu->cap) )
check_cleanup_domid_map(domain, pdev, iommu);
printk(XENLOG_ERR
"%pp: unexpected context entry %016lx_%016lx (expected %016lx_%016lx)\n",
- &PCI_SBDF3(seg, bus, devfn),
+ &PCI_SBDF(seg, bus, devfn),
(uint64_t)(res >> 64), (uint64_t)res,
(uint64_t)(old >> 64), (uint64_t)old);
rc = -EILSEQ;
if ( !(mode & (MAP_OWNER_DYING | MAP_SINGLE_DEVICE)) )
printk(XENLOG_WARNING VTDPREFIX
" %pp: reassignment may cause %pd data corruption\n",
- &PCI_SBDF3(seg, bus, devfn), prev_dom);
+ &PCI_SBDF(seg, bus, devfn), prev_dom);
write_atomic(&context->lo, lctxt.lo);
/* No barrier should be needed between these two. */
iommu_sync_cache(context, sizeof(struct context_entry));
spin_unlock(&iommu->lock);
- rc = iommu_flush_context_device(iommu, prev_did, PCI_BDF2(bus, devfn),
+ rc = iommu_flush_context_device(iommu, prev_did, PCI_BDF(bus, devfn),
DMA_CCMD_MASK_NOBIT, !prev_dom);
flush_dev_iotlb = !!find_ats_dev_drhd(iommu);
ret = iommu_flush_iotlb_dsi(iommu, prev_did, !prev_dom, flush_dev_iotlb);
case DEV_TYPE_PCI_HOST_BRIDGE:
if ( iommu_debug )
printk(VTDPREFIX "%pd:Hostbridge: skip %pp map\n",
- domain, &PCI_SBDF3(seg, bus, devfn));
+ domain, &PCI_SBDF(seg, bus, devfn));
if ( !is_hardware_domain(domain) )
return -EPERM;
break;
if ( iommu_debug )
printk(VTDPREFIX "%pd:PCIe: map %pp\n",
- domain, &PCI_SBDF3(seg, bus, devfn));
+ domain, &PCI_SBDF(seg, bus, devfn));
ret = domain_context_mapping_one(domain, drhd->iommu, bus, devfn, pdev,
DEVICE_DOMID(domain, pdev), pgd_maddr,
mode);
if ( iommu_debug )
printk(VTDPREFIX "%pd:PCI: map %pp\n",
- domain, &PCI_SBDF3(seg, bus, devfn));
+ domain, &PCI_SBDF(seg, bus, devfn));
ret = domain_context_mapping_one(domain, drhd->iommu, bus, devfn,
pdev, DEVICE_DOMID(domain, pdev),
default:
dprintk(XENLOG_ERR VTDPREFIX, "%pd:unknown(%u): %pp\n",
- domain, pdev->type, &PCI_SBDF3(seg, bus, devfn));
+ domain, pdev->type, &PCI_SBDF(seg, bus, devfn));
ret = -EINVAL;
break;
}
iommu_sync_cache(context, sizeof(struct context_entry));
rc = iommu_flush_context_device(iommu, iommu_domid,
- PCI_BDF2(bus, devfn),
+ PCI_BDF(bus, devfn),
DMA_CCMD_MASK_NOBIT, 0);
flush_dev_iotlb = !!find_ats_dev_drhd(iommu);
case DEV_TYPE_PCI_HOST_BRIDGE:
if ( iommu_debug )
printk(VTDPREFIX "%pd:Hostbridge: skip %pp unmap\n",
- domain, &PCI_SBDF3(seg, bus, devfn));
+ domain, &PCI_SBDF(seg, bus, devfn));
return ERR_PTR(is_hardware_domain(domain) ? 0 : -EPERM);
case DEV_TYPE_PCIe_BRIDGE:
if ( iommu_debug )
printk(VTDPREFIX "%pd:PCIe: unmap %pp\n",
- domain, &PCI_SBDF3(seg, bus, devfn));
+ domain, &PCI_SBDF(seg, bus, devfn));
ret = domain_context_unmap_one(domain, iommu, bus, devfn);
if ( !ret && devfn == pdev->devfn && ats_device(pdev, drhd) > 0 )
disable_ats_device(pdev);
if ( iommu_debug )
printk(VTDPREFIX "%pd:PCI: unmap %pp\n",
- domain, &PCI_SBDF3(seg, bus, devfn));
+ domain, &PCI_SBDF(seg, bus, devfn));
ret = domain_context_unmap_one(domain, iommu, bus, devfn);
if ( ret )
break;
default:
dprintk(XENLOG_ERR VTDPREFIX, "%pd:unknown(%u): %pp\n",
- domain, pdev->type, &PCI_SBDF3(seg, bus, devfn));
+ domain, pdev->type, &PCI_SBDF(seg, bus, devfn));
return ERR_PTR(-EINVAL);
}
for_each_rmrr_device ( rmrr, bdf, i )
{
- if ( rmrr->segment == pdev->seg &&
- PCI_BUS(bdf) == pdev->bus &&
- PCI_DEVFN2(bdf) == devfn )
+ if ( rmrr->segment == pdev->seg && bdf == PCI_BDF(pdev->bus, devfn) )
{
/*
* iommu_add_device() is only called for the hardware
for_each_rmrr_device ( rmrr, bdf, i )
{
- if ( rmrr->segment != pdev->seg ||
- PCI_BUS(bdf) != pdev->bus ||
- PCI_DEVFN2(bdf) != devfn )
+ if ( rmrr->segment != pdev->seg || bdf != PCI_BDF(pdev->bus, devfn) )
continue;
/*
for_each_rmrr_device( rmrr, bdf, i )
if ( rmrr->segment == pdev->seg &&
- PCI_BUS(bdf) == pdev->bus &&
- PCI_DEVFN2(bdf) == devfn )
+ bdf == PCI_BDF(pdev->bus, devfn) )
{
/*
* Any RMRR flag is always ignored when remove a device,
*/
for_each_rmrr_device( rmrr, bdf, i )
{
- if ( rmrr->segment == seg &&
- PCI_BUS(bdf) == bus &&
- PCI_DEVFN2(bdf) == devfn &&
+ if ( rmrr->segment == seg && bdf == PCI_BDF(bus, devfn) &&
rmrr->scope.devices_cnt > 1 )
{
bool_t relaxed = !!(flag & XEN_DOMCTL_DEV_RDM_RELAXED);
" with shared RMRR at %"PRIx64" for %pd.\n",
relaxed ? XENLOG_WARNING : XENLOG_ERR,
relaxed ? "risky" : "disallowed",
- &PCI_SBDF3(seg, bus, devfn), rmrr->base_address, d);
+ &PCI_SBDF(seg, bus, devfn), rmrr->base_address, d);
if ( !relaxed )
return -EPERM;
}
/* Setup rmrr identity mapping */
for_each_rmrr_device( rmrr, bdf, i )
{
- if ( rmrr->segment == seg &&
- PCI_BUS(bdf) == bus &&
- PCI_DEVFN2(bdf) == devfn )
+ if ( rmrr->segment == seg && bdf == PCI_BDF(bus, devfn) )
{
ret = iommu_identity_mapping(d, p2m_access_rw, rmrr->base_address,
rmrr->end_address, flag);
for_each_rmrr_device( rmrr, bdf, i )
{
- if ( rmrr->segment == seg &&
- PCI_BUS(bdf) == bus &&
- PCI_DEVFN2(bdf) == devfn )
+ if ( rmrr->segment == seg && bdf == PCI_BDF(bus, devfn) )
{
int rc = iommu_identity_mapping(d, p2m_access_x,
rmrr->base_address,
if ( find_upstream_bridge(seg, &bus, &devfn, &secbus) < 0 )
return -ENODEV;
- return PCI_BDF2(bus, devfn);
+ return PCI_BDF(bus, devfn);
}
static int __must_check cf_check vtd_suspend(void)
return true;
/* Check for the specific device. */
- sbdf = PCI_SBDF2(drhd->segment, drhd->scope.devices[0]);
+ sbdf = PCI_SBDF(drhd->segment, drhd->scope.devices[0]);
if ( pci_conf_read16(sbdf, PCI_VENDOR_ID) != PCI_VENDOR_ID_INTEL ||
pci_conf_read16(sbdf, PCI_DEVICE_ID) != 0x3a3e )
return true;
return 0;
/* if device is WLAN device, map ME phantom device 0:3.7 */
- id = pci_conf_read32(PCI_SBDF3(0, bus, devfn), 0);
+ id = pci_conf_read32(PCI_SBDF(0, bus, devfn), 0);
switch (id)
{
case 0x42328086:
return 0;
/* if device is WLAN device, map ME phantom device 0:22.7 */
- id = pci_conf_read32(PCI_SBDF3(0, bus, devfn), 0);
+ id = pci_conf_read32(PCI_SBDF(0, bus, devfn), 0);
switch (id)
{
case 0x00878086: /* Kilmer Peak */
u32 l_index, level;
printk("print_vtd_entries: iommu #%u dev %pp gmfn %"PRI_gfn"\n",
- iommu->index, &PCI_SBDF3(iommu->drhd->segment, bus, devfn),
+ iommu->index, &PCI_SBDF(iommu->drhd->segment, bus, devfn),
gmfn);
if ( iommu->root_maddr == 0 )
while ( ttl-- )
{
- pos = pci_conf_read8(PCI_SBDF3(seg, bus, devfn), pos);
+ pos = pci_conf_read8(PCI_SBDF(seg, bus, devfn), pos);
if ( pos < 0x40 )
break;
pos &= ~3;
- id = pci_conf_read8(PCI_SBDF3(seg, bus, devfn), pos + PCI_CAP_LIST_ID);
+ id = pci_conf_read8(PCI_SBDF(seg, bus, devfn), pos + PCI_CAP_LIST_ID);
if ( id == 0xff )
break;
int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */
int pos = max(start, 0x100);
- header = pci_conf_read32(PCI_SBDF3(seg, bus, devfn), pos);
+ header = pci_conf_read32(PCI_SBDF(seg, bus, devfn), pos);
/*
* If we have no capabilities, this is indicated by cap ID,
pos = PCI_EXT_CAP_NEXT(header);
if ( pos < 0x100 )
break;
- header = pci_conf_read32(PCI_SBDF3(seg, bus, devfn), pos);
+ header = pci_conf_read32(PCI_SBDF(seg, bus, devfn), pos);
}
return 0;
}
else
func = 0;
if ( seg != (seg_p ? (u16)seg : 0) ||
- bus != PCI_BUS(PCI_BDF2(bus, 0)) ||
+ bus != PCI_BUS(PCI_BDF(bus, 0)) ||
dev != PCI_SLOT(PCI_DEVFN(dev, 0)) ||
func != PCI_FUNC(PCI_DEVFN(0, func)) )
return NULL;
pcidevs_unlock();
if ( !pdev ||
- pci_conf_read16(PCI_SBDF3(0, bus, devfn),
+ pci_conf_read16(PCI_SBDF(0, bus, devfn),
PCI_CLASS_DEVICE) != 0x0300 ||
- !(pci_conf_read16(PCI_SBDF3(0, bus, devfn), PCI_COMMAND) &
+ !(pci_conf_read16(PCI_SBDF(0, bus, devfn), PCI_COMMAND) &
(PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) )
continue;
b = 0;
break;
case 1:
- switch ( pci_conf_read8(PCI_SBDF3(0, b, df),
+ switch ( pci_conf_read8(PCI_SBDF(0, b, df),
PCI_HEADER_TYPE) )
{
case PCI_HEADER_TYPE_BRIDGE:
case PCI_HEADER_TYPE_CARDBUS:
- if ( pci_conf_read16(PCI_SBDF3(0, b, df),
+ if ( pci_conf_read16(PCI_SBDF(0, b, df),
PCI_BRIDGE_CONTROL) &
PCI_BRIDGE_CTL_VGA )
continue;
#define PCI_BUS(bdf) (((bdf) >> 8) & 0xff)
#define PCI_SLOT(bdf) (((bdf) >> 3) & 0x1f)
#define PCI_FUNC(bdf) ((bdf) & 0x07)
-#define PCI_DEVFN(d,f) ((((d) & 0x1f) << 3) | ((f) & 0x07))
-#define PCI_DEVFN2(bdf) ((bdf) & 0xff)
-#define PCI_BDF(b,d,f) ((((b) & 0xff) << 8) | PCI_DEVFN(d,f))
-#define PCI_BDF2(b,df) ((((b) & 0xff) << 8) | ((df) & 0xff))
-#define PCI_SBDF(s,b,d,f) \
- ((pci_sbdf_t){ .sbdf = (((s) & 0xffff) << 16) | PCI_BDF(b, d, f) })
-#define PCI_SBDF2(s,bdf) \
+
+#define PCI_DEVFN1_(df) ((df) & 0xff)
+#define PCI_DEVFN2_(d, f) ((((d) & 0x1f) << 3) | ((f) & 7))
+#define PCI_SBDF4_(s, b, d, f...) \
+ ((pci_sbdf_t){ .sbdf = (((s) & 0xffff) << 16) | PCI_BDF(b, d, ##f) })
+#define PCI_SBDF3_ PCI_SBDF4_
+#define PCI_SBDF2_(s, bdf) \
((pci_sbdf_t){ .sbdf = (((s) & 0xffff) << 16) | ((bdf) & 0xffff) })
-#define PCI_SBDF3(s,b,df) \
- ((pci_sbdf_t){ .sbdf = (((s) & 0xffff) << 16) | PCI_BDF2(b, df) })
+
+#define PCI__(what, nr) PCI_##what##nr##_
+#define PCI_(what, nr) PCI__(what, nr)
+
+#define PCI_DEVFN(d, f...) PCI_(DEVFN, count_args(d, ##f))(d, ##f)
+#define PCI_BDF(b, d, f...) ((((b) & 0xff) << 8) | PCI_DEVFN(d, ##f))
+#define PCI_SBDF(s, b, d...) PCI_(SBDF, count_args(s, b, ##d))(s, b, ##d)
#define ECAM_REG_OFFSET(addr) ((addr) & 0x00000fff)