]> xenbits.xensource.com Git - xen.git/commitdiff
vtd: improve IOMMU TLB flush
authorJan Beulich <jbeulich@suse.com>
Tue, 7 Jul 2020 13:40:56 +0000 (15:40 +0200)
committerJan Beulich <jbeulich@suse.com>
Tue, 7 Jul 2020 13:40:56 +0000 (15:40 +0200)
Do not limit PSI flushes to order 0 pages, in order to avoid doing a
full TLB flush if the passed in page has an order greater than 0 and
is aligned. Should increase the performance of IOMMU TLB flushes when
dealing with page orders greater than 0.

This is part of XSA-321.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
master commit: 5fe515a0fede07543f2a3b049167b1fd8b873caf
master date: 2020-07-07 14:37:46 +0200

xen/drivers/passthrough/vtd/iommu.c

index 336b778c81c39f5bc71f8499d186e492be08d21e..2180f0e899ad7a5b01d7873628600062fdc45418 100644 (file)
@@ -612,13 +612,14 @@ static int __must_check iommu_flush_iotlb(struct domain *d,
         if ( iommu_domid == -1 )
             continue;
 
-        if ( page_count != 1 || gfn == gfn_x(INVALID_GFN) )
+        if ( !page_count || (page_count & (page_count - 1)) ||
+             gfn == gfn_x(INVALID_GFN) || !IS_ALIGNED(gfn, page_count) )
             rc = iommu_flush_iotlb_dsi(iommu, iommu_domid,
                                        0, flush_dev_iotlb);
         else
             rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
                                        (paddr_t)gfn << PAGE_SHIFT_4K,
-                                       PAGE_ORDER_4K,
+                                       get_order_from_pages(page_count),
                                        !dma_old_pte_present,
                                        flush_dev_iotlb);