spin_unlock(&hd->arch.mapping_lock);
amd_iommu_flush_pages(d, dfn_x(dfn), 0);
+ return 0;
+}
+
+static unsigned long flush_count(unsigned long dfn, unsigned int page_count,
+ unsigned int order)
+{
+ unsigned long start = dfn >> order;
+ unsigned long end = ((dfn + page_count - 1) >> order) + 1;
+
+ ASSERT(end > start);
+ return end - start;
+}
+
+int amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
+ unsigned int page_count)
+{
+ unsigned long dfn_l = dfn_x(dfn);
+
+ ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
+
+ /* If the range wraps then just flush everything */
+ if ( dfn_l + page_count < dfn_l )
+ {
+ amd_iommu_flush_all_pages(d);
+ return 0;
+ }
+
+ /*
+ * Flushes are expensive so find the minimal single flush that will
+ * cover the page range.
+ *
+ * NOTE: It is unnecessary to round down the DFN value to align with
+ * the flush order here. This is done by the internals of the
+ * flush code.
+ */
+ if ( page_count == 1 ) /* order 0 flush count */
+ amd_iommu_flush_pages(d, dfn_l, 0);
+ else if ( flush_count(dfn_l, page_count, 9) == 1 )
+ amd_iommu_flush_pages(d, dfn_l, 9);
+ else if ( flush_count(dfn_l, page_count, 18) == 1 )
+ amd_iommu_flush_pages(d, dfn_l, 18);
+ else
+ amd_iommu_flush_all_pages(d);
+
+ return 0;
+}
+
+int amd_iommu_flush_iotlb_all(struct domain *d)
+{
+ amd_iommu_flush_all_pages(d);
return 0;
}
.teardown = amd_iommu_domain_destroy,
.map_page = amd_iommu_map_page,
.unmap_page = amd_iommu_unmap_page,
+ .iotlb_flush = amd_iommu_flush_iotlb_pages,
+ .iotlb_flush_all = amd_iommu_flush_iotlb_all,
.free_page_table = deallocate_page_table,
.reassign_device = reassign_device,
.get_device_group_id = amd_iommu_group_id,
const struct domain_iommu *hd = dom_iommu(d);
int rc;
- if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush )
+ if ( !iommu_enabled || !hd->platform_ops ||
+ !hd->platform_ops->iotlb_flush || !page_count )
return 0;
+ if ( dfn_eq(dfn, INVALID_DFN) )
+ return -EINVAL;
+
rc = hd->platform_ops->iotlb_flush(d, dfn, page_count);
if ( unlikely(rc) )
{
int amd_iommu_reserve_domain_unity_map(struct domain *domain,
paddr_t phys_addr, unsigned long size,
int iw, int ir);
+int __must_check amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
+ unsigned int page_count);
+int __must_check amd_iommu_flush_iotlb_all(struct domain *d);
/* Share p2m table with iommu */
void amd_iommu_share_p2m(struct domain *d);