]> xenbits.xensource.com Git - people/royger/xen.git/commitdiff
Revert "x86/mm/p2m: stop checking for IOMMU shared page tables in mmio_order()"
authorRoger Pau Monne <roger.pau@citrix.com>
Thu, 24 Jan 2019 10:06:47 +0000 (11:06 +0100)
committerRoger Pau Monne <roger.pau@citrix.com>
Thu, 24 Jan 2019 10:06:47 +0000 (11:06 +0100)
This reverts commit a5b0eb363694e7e15405f0b3fc5fb6fab79df1db.

Now that the iommu_map() and iommu_unmap() operations take an order
parameter and elide flushing there's no strong reason why modifying MMIO
ranges in the p2m should be restricted to a 4k granularity simply because
the IOMMU is enabled but shared page tables are not in operation.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/mm/p2m.c

index d14ce57dd59300c5006901442f25bb7abcc5225c..41134f437b428bab8f71938f1346b6af92387255 100644 (file)
@@ -2216,12 +2216,13 @@ static unsigned int mmio_order(const struct domain *d,
                                unsigned long start_fn, unsigned long nr)
 {
     /*
-     * Note that the !hap_enabled() here has two effects:
+     * Note that the !iommu_use_hap_pt() here has three effects:
+     * - cover iommu_{,un}map_page() not having an "order" input yet,
      * - exclude shadow mode (which doesn't support large MMIO mappings),
      * - exclude PV guests, should execution reach this code for such.
      * So be careful when altering this.
      */
-    if ( !hap_enabled(d) ||
+    if ( !iommu_use_hap_pt(d) ||
          (start_fn & ((1UL << PAGE_ORDER_2M) - 1)) || !(nr >> PAGE_ORDER_2M) )
         return PAGE_ORDER_4K;