]> xenbits.xensource.com Git - people/royger/xen.git/commitdiff
iommu / p2m: add a page_order parameter to iommu_map/unmap_page()...
authorPaul Durrant <paul.durrant@citrix.com>
Wed, 21 Nov 2018 09:50:29 +0000 (10:50 +0100)
committerJan Beulich <jbeulich@suse.com>
Wed, 21 Nov 2018 09:50:29 +0000 (10:50 +0100)
...and re-name them to iommu_map/unmap() since they no longer necessarily
operate on a single page.

The P2M code currently contains many loops to deal with the fact that,
while it may be require to handle page orders greater than 0, the
IOMMU map and unmap functions do not.
This patch adds a page_order parameter to those functions and implements
the necessary loops within. This allows the P2M code to be substantially
simplified.

This patch also adds emacs boilerplate to xen/iommu.h to avoid tabbing
problem.

NOTE: This patch does not modify the underlying vendor IOMMU
      implementations to deal with more than a single page at once.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
xen/arch/x86/mm.c
xen/arch/x86/mm/p2m-ept.c
xen/arch/x86/mm/p2m-pt.c
xen/arch/x86/mm/p2m.c
xen/arch/x86/x86_64/mm.c
xen/common/grant_table.c
xen/drivers/passthrough/iommu.c
xen/drivers/passthrough/x86/iommu.c
xen/include/xen/iommu.h

index 126a4d2ef5334ea5d7eeb74bde37c4600d9c775c..28a003063e70fb0c1ece499914a6e583f4d6f8db 100644 (file)
@@ -2801,11 +2801,12 @@ static int _get_page_type(struct page_info *page, unsigned long type,
             mfn_t mfn = page_to_mfn(page);
 
             if ( (x & PGT_type_mask) == PGT_writable_page )
-                iommu_ret = iommu_unmap_page(d, _dfn(mfn_x(mfn)));
+                iommu_ret = iommu_unmap(d, _dfn(mfn_x(mfn)),
+                                        PAGE_ORDER_4K);
             else if ( type == PGT_writable_page )
-                iommu_ret = iommu_map_page(d, _dfn(mfn_x(mfn)), mfn,
-                                           IOMMUF_readable |
-                                           IOMMUF_writable);
+                iommu_ret = iommu_map(d, _dfn(mfn_x(mfn)), mfn,
+                                      PAGE_ORDER_4K,
+                                      IOMMUF_readable | IOMMUF_writable);
         }
     }
 
index fabcd06f07f260f8b1911da28c4bfaf340e839f9..6e4e375bad51952b26b4b4adba3f4baad74691a5 100644 (file)
@@ -881,33 +881,9 @@ out:
         if ( iommu_use_hap_pt(d) )
             rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
         else if ( need_iommu_pt_sync(d) )
-        {
-            dfn_t dfn = _dfn(gfn);
-
-            if ( iommu_flags )
-                for ( i = 0; i < (1 << order); i++ )
-                {
-                    rc = iommu_map_page(d, dfn_add(dfn, i),
-                                        mfn_add(mfn, i), iommu_flags);
-                    if ( unlikely(rc) )
-                    {
-                        while ( i-- )
-                            /* If statement to satisfy __must_check. */
-                            if ( iommu_unmap_page(p2m->domain,
-                                                  dfn_add(dfn, i)) )
-                                continue;
-
-                        break;
-                    }
-                }
-            else
-                for ( i = 0; i < (1 << order); i++ )
-                {
-                    ret = iommu_unmap_page(d, dfn_add(dfn, i));
-                    if ( !rc )
-                        rc = ret;
-                }
-        }
+            rc = iommu_flags ?
+                iommu_map(d, _dfn(gfn), mfn, order, iommu_flags) :
+                iommu_unmap(d, _dfn(gfn), order);
     }
 
     unmap_domain_page(table);
index 55df18501e0ae32ee59d8ce05c3fd5a99941b754..17a6b61f12797fec25acb2b9159f39ea7db73a60 100644 (file)
@@ -477,10 +477,11 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
                  unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma,
                  int sve)
 {
+    struct domain *d = p2m->domain;
     /* XXX -- this might be able to be faster iff current->domain == d */
     void *table;
     unsigned long gfn = gfn_x(gfn_);
-    unsigned long i, gfn_remainder = gfn;
+    unsigned long gfn_remainder = gfn;
     l1_pgentry_t *p2m_entry, entry_content;
     /* Intermediate table to free if we're replacing it with a superpage. */
     l1_pgentry_t intermediate_entry = l1e_empty();
@@ -515,7 +516,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
         t.gfn = gfn;
         t.mfn = mfn_x(mfn);
         t.p2mt = p2mt;
-        t.d = p2m->domain->domain_id;
+        t.d = d->domain_id;
         t.order = page_order;
 
         __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), &t);
@@ -683,41 +684,12 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
     {
         ASSERT(rc == 0);
 
-        if ( iommu_use_hap_pt(p2m->domain) )
-        {
-            if ( iommu_old_flags )
-                amd_iommu_flush_pages(p2m->domain, gfn, page_order);
-        }
-        else if ( need_iommu_pt_sync(p2m->domain) )
-        {
-            dfn_t dfn = _dfn(gfn);
-
-            if ( iommu_pte_flags )
-                for ( i = 0; i < (1UL << page_order); i++ )
-                {
-                    rc = iommu_map_page(p2m->domain, dfn_add(dfn, i),
-                                        mfn_add(mfn, i), iommu_pte_flags);
-                    if ( unlikely(rc) )
-                    {
-                        while ( i-- )
-                            /* If statement to satisfy __must_check. */
-                            if ( iommu_unmap_page(p2m->domain,
-                                                  dfn_add(dfn, i)) )
-                                continue;
-
-                        break;
-                    }
-                }
-            else
-                for ( i = 0; i < (1UL << page_order); i++ )
-                {
-                    int ret = iommu_unmap_page(p2m->domain,
-                                               dfn_add(dfn, i));
-
-                    if ( !rc )
-                        rc = ret;
-                }
-        }
+        if ( need_iommu_pt_sync(p2m->domain) )
+            rc = iommu_pte_flags ?
+                iommu_map(d, _dfn(gfn), mfn, page_order, iommu_pte_flags) :
+                iommu_unmap(d, _dfn(gfn), page_order);
+        else if ( iommu_use_hap_pt(d) && iommu_old_flags )
+            amd_iommu_flush_pages(p2m->domain, gfn, page_order);
     }
 
     /*
index 6a1abb6df5563a9f567b41442baee653304b0b79..b5a59d6919a2e8afe8a332319a8fcbe56b3243cf 100644 (file)
@@ -710,24 +710,8 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn_l, unsigned long mfn,
     p2m_access_t a;
 
     if ( !paging_mode_translate(p2m->domain) )
-    {
-        int rc = 0;
-
-        if ( need_iommu_pt_sync(p2m->domain) )
-        {
-            dfn_t dfn = _dfn(mfn);
-
-            for ( i = 0; i < (1 << page_order); i++ )
-            {
-                int ret = iommu_unmap_page(p2m->domain, dfn_add(dfn, i));
-
-                if ( !rc )
-                    rc = ret;
-            }
-        }
-
-        return rc;
-    }
+        return need_iommu_pt_sync(p2m->domain) ?
+            iommu_unmap(p2m->domain, _dfn(mfn), page_order) : 0;
 
     ASSERT(gfn_locked_by_me(p2m, gfn));
     P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn_l, mfn);
@@ -773,28 +757,9 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
     int rc = 0;
 
     if ( !paging_mode_translate(d) )
-    {
-        if ( need_iommu_pt_sync(d) && t == p2m_ram_rw )
-        {
-            dfn_t dfn = _dfn(mfn_x(mfn));
-
-            for ( i = 0; i < (1 << page_order); i++ )
-            {
-                rc = iommu_map_page(d, dfn_add(dfn, i), mfn_add(mfn, i),
-                                    IOMMUF_readable|IOMMUF_writable);
-                if ( rc != 0 )
-                {
-                    while ( i-- > 0 )
-                        /* If statement to satisfy __must_check. */
-                        if ( iommu_unmap_page(d, dfn_add(dfn, i)) )
-                            continue;
-
-                    return rc;
-                }
-            }
-        }
-        return 0;
-    }
+        return (need_iommu_pt_sync(d) && t == p2m_ram_rw) ?
+            iommu_map(d, _dfn(mfn_x(mfn)), mfn, page_order,
+                      IOMMUF_readable | IOMMUF_writable) : 0;
 
     /* foreign pages are added thru p2m_add_foreign */
     if ( p2m_is_foreign(t) )
@@ -1164,8 +1129,8 @@ int set_identity_p2m_entry(struct domain *d, unsigned long gfn_l,
     {
         if ( !need_iommu_pt_sync(d) )
             return 0;
-        return iommu_map_page(d, _dfn(gfn_l), _mfn(gfn_l),
-                              IOMMUF_readable | IOMMUF_writable);
+        return iommu_map(d, _dfn(gfn_l), _mfn(gfn_l), PAGE_ORDER_4K,
+                         IOMMUF_readable | IOMMUF_writable);
     }
 
     gfn_lock(p2m, gfn, 0);
@@ -1255,7 +1220,7 @@ int clear_identity_p2m_entry(struct domain *d, unsigned long gfn_l)
     {
         if ( !need_iommu_pt_sync(d) )
             return 0;
-        return iommu_unmap_page(d, _dfn(gfn_l));
+        return iommu_unmap(d, _dfn(gfn_l), PAGE_ORDER_4K);
     }
 
     gfn_lock(p2m, gfn, 0);
index 543ea030e30efc754d8aac7c201ca92c53406b2b..11977f267146007f18cf225244a72b80643f878a 100644 (file)
@@ -1436,14 +1436,15 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
          !need_iommu_pt_sync(hardware_domain) )
     {
         for ( i = spfn; i < epfn; i++ )
-            if ( iommu_map_page(hardware_domain, _dfn(i), _mfn(i),
-                                IOMMUF_readable | IOMMUF_writable) )
+            if ( iommu_map(hardware_domain, _dfn(i), _mfn(i),
+                           PAGE_ORDER_4K,
+                           IOMMUF_readable | IOMMUF_writable) )
                 break;
         if ( i != epfn )
         {
             while (i-- > old_max)
                 /* If statement to satisfy __must_check. */
-                if ( iommu_unmap_page(hardware_domain, _dfn(i)) )
+                if ( iommu_unmap(hardware_domain, _dfn(i), PAGE_ORDER_4K) )
                     continue;
 
             goto destroy_m2p;
index 0b5894a07026cadb727c4203e063ab5f184bcb30..fc41b655e03fceb17ba3db4b1ef03d63e304683f 100644 (file)
@@ -1134,14 +1134,14 @@ map_grant_ref(
              !(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
         {
             if ( !(kind & MAPKIND_WRITE) )
-                err = iommu_map_page(ld, _dfn(mfn_x(mfn)), mfn,
-                                     IOMMUF_readable | IOMMUF_writable);
+                err = iommu_map(ld, _dfn(mfn_x(mfn)), mfn, 0,
+                                IOMMUF_readable | IOMMUF_writable);
         }
         else if ( act_pin && !old_pin )
         {
             if ( !kind )
-                err = iommu_map_page(ld, _dfn(mfn_x(mfn)), mfn,
-                                     IOMMUF_readable);
+                err = iommu_map(ld, _dfn(mfn_x(mfn)), mfn, 0,
+                                IOMMUF_readable);
         }
         if ( err )
         {
@@ -1389,10 +1389,10 @@ unmap_common(
 
         kind = mapkind(lgt, rd, op->mfn);
         if ( !kind )
-            err = iommu_unmap_page(ld, _dfn(mfn_x(op->mfn)));
+            err = iommu_unmap(ld, _dfn(mfn_x(op->mfn)), 0);
         else if ( !(kind & MAPKIND_WRITE) )
-            err = iommu_map_page(ld, _dfn(mfn_x(op->mfn)), op->mfn,
-                                 IOMMUF_readable);
+            err = iommu_map(ld, _dfn(mfn_x(op->mfn)), op->mfn, 0,
+                            IOMMUF_readable);
 
         double_gt_unlock(lgt, rgt);
 
index 8b438ae4bcb2201cadfdea80bb2ae3fbd6f3e7af..ac62d7f52aa91e3da06195fed77930ee7dcc8d3d 100644 (file)
@@ -304,48 +304,78 @@ void iommu_domain_destroy(struct domain *d)
     arch_iommu_domain_destroy(d);
 }
 
-int iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn,
-                   unsigned int flags)
+int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
+              unsigned int page_order, unsigned int flags)
 {
     const struct domain_iommu *hd = dom_iommu(d);
-    int rc;
+    unsigned long i;
+    int rc = 0;
 
     if ( !iommu_enabled || !hd->platform_ops )
         return 0;
 
-    rc = hd->platform_ops->map_page(d, dfn, mfn, flags);
-    if ( unlikely(rc) )
+    ASSERT(IS_ALIGNED(dfn_x(dfn), (1ul << page_order)));
+    ASSERT(IS_ALIGNED(mfn_x(mfn), (1ul << page_order)));
+
+    for ( i = 0; i < (1ul << page_order); i++ )
     {
+        rc = hd->platform_ops->map_page(d, dfn_add(dfn, i),
+                                        mfn_add(mfn, i), flags);
+
+        if ( likely(!rc) )
+            continue;
+
         if ( !d->is_shutting_down && printk_ratelimit() )
             printk(XENLOG_ERR
                    "d%d: IOMMU mapping dfn %"PRI_dfn" to mfn %"PRI_mfn" failed: %d\n",
-                   d->domain_id, dfn_x(dfn), mfn_x(mfn), rc);
+                   d->domain_id, dfn_x(dfn_add(dfn, i)),
+                   mfn_x(mfn_add(mfn, i)), rc);
+
+        while ( i-- )
+            /* if statement to satisfy __must_check */
+            if ( hd->platform_ops->unmap_page(d, dfn_add(dfn, i)) )
+                continue;
 
         if ( !is_hardware_domain(d) )
             domain_crash(d);
+
+        break;
     }
 
     return rc;
 }
 
-int iommu_unmap_page(struct domain *d, dfn_t dfn)
+int iommu_unmap(struct domain *d, dfn_t dfn, unsigned int page_order)
 {
     const struct domain_iommu *hd = dom_iommu(d);
-    int rc;
+    unsigned long i;
+    int rc = 0;
 
     if ( !iommu_enabled || !hd->platform_ops )
         return 0;
 
-    rc = hd->platform_ops->unmap_page(d, dfn);
-    if ( unlikely(rc) )
+    ASSERT(IS_ALIGNED(dfn_x(dfn), (1ul << page_order)));
+
+    for ( i = 0; i < (1ul << page_order); i++ )
     {
+        int err = hd->platform_ops->unmap_page(d, dfn_add(dfn, i));
+
+        if ( likely(!err) )
+            continue;
+
         if ( !d->is_shutting_down && printk_ratelimit() )
             printk(XENLOG_ERR
                    "d%d: IOMMU unmapping dfn %"PRI_dfn" failed: %d\n",
-                   d->domain_id, dfn_x(dfn), rc);
+                   d->domain_id, dfn_x(dfn_add(dfn, i)), err);
+
+        if ( !rc )
+            rc = err;
 
         if ( !is_hardware_domain(d) )
+        {
             domain_crash(d);
+            break;
+        }
     }
 
     return rc;
index e488889071d75e0b478e15b64610532d4484173c..c68a72279d4f0d3ab2166e9a8c46a6672a1dbf96 100644 (file)
@@ -241,8 +241,8 @@ void __hwdom_init arch_iommu_hwdom_init(struct domain *d)
         if ( paging_mode_translate(d) )
             rc = set_identity_p2m_entry(d, pfn, p2m_access_rw, 0);
         else
-            rc = iommu_map_page(d, _dfn(pfn), _mfn(pfn),
-                                IOMMUF_readable | IOMMUF_writable);
+            rc = iommu_map(d, _dfn(pfn), _mfn(pfn), PAGE_ORDER_4K,
+                           IOMMUF_readable | IOMMUF_writable);
         if ( rc )
             printk(XENLOG_WARNING " d%d: IOMMU mapping failed: %d\n",
                    d->domain_id, rc);
index 5ba0904b88c61abdba87a807e79798f9119e89b9..3d781268010abb35752012ce0d14145ceb1852cf 100644 (file)
@@ -88,9 +88,10 @@ void iommu_teardown(struct domain *d);
 #define IOMMUF_readable  (1u<<_IOMMUF_readable)
 #define _IOMMUF_writable 1
 #define IOMMUF_writable  (1u<<_IOMMUF_writable)
-int __must_check iommu_map_page(struct domain *d, dfn_t dfn,
-                                mfn_t mfn, unsigned int flags);
-int __must_check iommu_unmap_page(struct domain *d, dfn_t dfn);
+int __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
+                           unsigned int page_order, unsigned int flags);
+int __must_check iommu_unmap(struct domain *d, dfn_t dfn,
+                             unsigned int page_order);
 int __must_check iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
                                    unsigned int *flags);
 
@@ -268,3 +269,12 @@ extern struct spinlock iommu_pt_cleanup_lock;
 extern struct page_list_head iommu_pt_cleanup_list;
 
 #endif /* _IOMMU_H_ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */