send_iommu_command(iommu, cmd);
}
-void amd_iommu_flush_iotlb(struct pci_dev *pdev,
+void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev,
uint64_t gaddr, unsigned int order)
{
unsigned long flags;
struct amd_iommu *iommu;
- unsigned int bdf, req_id, queueid, maxpend;
+ unsigned int req_id, queueid, maxpend;
struct pci_ats_dev *ats_pdev;
if ( !ats_enabled )
if ( !pci_ats_enabled(ats_pdev->seg, ats_pdev->bus, ats_pdev->devfn) )
return;
- bdf = PCI_BDF2(ats_pdev->bus, ats_pdev->devfn);
- iommu = find_iommu_for_device(ats_pdev->seg, bdf);
+ iommu = find_iommu_for_device(ats_pdev->seg,
+ PCI_BDF2(ats_pdev->bus, ats_pdev->devfn));
if ( !iommu )
{
if ( !iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
return;
- req_id = get_dma_requestor_id(iommu->seg, bdf);
+ req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(ats_pdev->bus, devfn));
queueid = req_id;
maxpend = ats_pdev->ats_queue_depth & 0xff;
return;
for_each_pdev( d, pdev )
- amd_iommu_flush_iotlb(pdev, gaddr, order);
+ amd_iommu_flush_iotlb(pdev->devfn, pdev, gaddr, order);
}
/* Flush iommu cache after p2m changes. */
if ( devfn == pdev->devfn )
enable_ats_device(iommu->seg, bus, devfn);
- amd_iommu_flush_iotlb(pdev, INV_IOMMU_ALL_PAGES_ADDRESS, 0);
+ amd_iommu_flush_iotlb(devfn, pdev, INV_IOMMU_ALL_PAGES_ADDRESS, 0);
}
}
void amd_iommu_flush_all_pages(struct domain *d);
void amd_iommu_flush_pages(struct domain *d, unsigned long gfn,
unsigned int order);
-void amd_iommu_flush_iotlb(struct pci_dev *pdev, uint64_t gaddr,
- unsigned int order);
+void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev,
+ uint64_t gaddr, unsigned int order);
void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf);
void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf);
void amd_iommu_flush_all_caches(struct amd_iommu *iommu);