]> xenbits.xensource.com Git - xen.git/commitdiff
IOMMU/MMU: enhance the call trees of IOMMU unmapping and mapping
authorQuan Xu <quan.xu@intel.com>
Tue, 14 Jun 2016 13:11:48 +0000 (15:11 +0200)
committerJan Beulich <jbeulich@suse.com>
Tue, 14 Jun 2016 13:11:48 +0000 (15:11 +0200)
When IOMMU mapping is failed, we issue a best effort rollback, stopping
IOMMU mapping, unmapping the previous IOMMU maps and then reporting the
error up to the call trees. When rollback is not feasible (in early
initialization phase or trade-off of complexity) for the hardware domain,
we do things on a best effort basis, only throwing out an error message.

IOMMU unmapping should continue despite an error, in an attempt to do
best effort cleanup.

Signed-off-by: Quan Xu <quan.xu@intel.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
xen/arch/x86/mm.c
xen/arch/x86/mm/p2m-ept.c
xen/arch/x86/mm/p2m-pt.c
xen/arch/x86/mm/p2m.c
xen/arch/x86/x86_64/mm.c
xen/drivers/passthrough/amd/pci_amd_iommu.c
xen/drivers/passthrough/iommu.c
xen/drivers/passthrough/vtd/x86/vtd.c
xen/include/xen/iommu.h

index 8d10a3ecb9695d0bc3d8168c5db06d9a37c14ead..ae7c8ab9a8ea9b48051c6b1ced71a8d95aebfad6 100644 (file)
@@ -2467,7 +2467,7 @@ static int __get_page_type(struct page_info *page, unsigned long type,
                            int preemptible)
 {
     unsigned long nx, x, y = page->u.inuse.type_info;
-    int rc = 0;
+    int rc = 0, iommu_ret = 0;
 
     ASSERT(!(type & ~(PGT_type_mask | PGT_pae_xen_l2)));
 
@@ -2578,11 +2578,11 @@ static int __get_page_type(struct page_info *page, unsigned long type,
         if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) )
         {
             if ( (x & PGT_type_mask) == PGT_writable_page )
-                iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
+                iommu_ret = iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
             else if ( type == PGT_writable_page )
-                iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
-                               page_to_mfn(page),
-                               IOMMUF_readable|IOMMUF_writable);
+                iommu_ret = iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
+                                           page_to_mfn(page),
+                                           IOMMUF_readable|IOMMUF_writable);
         }
     }
 
@@ -2599,6 +2599,9 @@ static int __get_page_type(struct page_info *page, unsigned long type,
     if ( (x & PGT_partial) && !(nx & PGT_partial) )
         put_page(page);
 
+    if ( !rc )
+        rc = iommu_ret;
+
     return rc;
 }
 
index 1ed5b47c0d786a9f0e3d3894006f49ddeb43aeb8..b4505f35782e8a1b17b369ac9a9ae31fbdf904ed 100644 (file)
@@ -667,6 +667,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
     unsigned long gfn_remainder = gfn;
     unsigned int i, target = order / EPT_TABLE_ORDER;
     int ret, rc = 0;
+    bool_t entry_written = 0;
     bool_t direct_mmio = (p2mt == p2m_mmio_direct);
     uint8_t ipat = 0;
     bool_t need_modify_vtd_table = 1;
@@ -812,10 +813,15 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
     rc = atomic_write_ept_entry(ept_entry, new_entry, target);
     if ( unlikely(rc) )
         old_entry.epte = 0;
-    else if ( p2mt != p2m_invalid &&
-              (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) )
-        /* Track the highest gfn for which we have ever had a valid mapping */
-        p2m->max_mapped_pfn = gfn + (1UL << order) - 1;
+    else
+    {
+        entry_written = 1;
+
+        if ( p2mt != p2m_invalid &&
+             (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) )
+            /* Track the highest gfn for which we have ever had a valid mapping */
+            p2m->max_mapped_pfn = gfn + (1UL << order) - 1;
+    }
 
 out:
     if ( needs_sync )
@@ -831,10 +837,25 @@ out:
         {
             if ( iommu_flags )
                 for ( i = 0; i < (1 << order); i++ )
-                    iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
+                {
+                    rc = iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
+                    if ( unlikely(rc) )
+                    {
+                        while ( i-- )
+                            /* If statement to satisfy __must_check. */
+                            if ( iommu_unmap_page(p2m->domain, gfn + i) )
+                                continue;
+
+                        break;
+                    }
+                }
             else
                 for ( i = 0; i < (1 << order); i++ )
-                    iommu_unmap_page(d, gfn + i);
+                {
+                    ret = iommu_unmap_page(d, gfn + i);
+                    if ( !rc )
+                        rc = ret;
+                }
         }
     }
 
@@ -847,7 +868,7 @@ out:
     if ( is_epte_present(&old_entry) )
         ept_free_entry(p2m, &old_entry, target);
 
-    if ( rc == 0 && p2m_is_hostp2m(p2m) )
+    if ( entry_written && p2m_is_hostp2m(p2m) )
         p2m_altp2m_propagate_change(d, _gfn(gfn), mfn, order, p2mt, p2ma);
 
     return rc;
index 3d8061251387e21e4f0e1fb9448b31009ab81112..498093496e0f72ea55a09a31a9e2ed47dfe5c002 100644 (file)
@@ -673,6 +673,8 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
     if ( iommu_enabled && need_iommu(p2m->domain) &&
          (iommu_old_flags != iommu_pte_flags || old_mfn != mfn_x(mfn)) )
     {
+        ASSERT(rc == 0);
+
         if ( iommu_use_hap_pt(p2m->domain) )
         {
             if ( iommu_old_flags )
@@ -680,11 +682,27 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
         }
         else if ( iommu_pte_flags )
             for ( i = 0; i < (1UL << page_order); i++ )
-                iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
-                               iommu_pte_flags);
+            {
+                rc = iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
+                                    iommu_pte_flags);
+                if ( unlikely(rc) )
+                {
+                    while ( i-- )
+                        /* If statement to satisfy __must_check. */
+                        if ( iommu_unmap_page(p2m->domain, gfn + i) )
+                            continue;
+
+                    break;
+                }
+            }
         else
             for ( i = 0; i < (1UL << page_order); i++ )
-                iommu_unmap_page(p2m->domain, gfn + i);
+            {
+                int ret = iommu_unmap_page(p2m->domain, gfn + i);
+
+                if ( !rc )
+                    rc = ret;
+            }
     }
 
     /*
index 9b19769abe36e917537336783d8ab044e2df8d2f..89462b2c60bb8f7650e1f3a58f8cb0f7e3ebb124 100644 (file)
@@ -641,10 +641,20 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
 
     if ( !paging_mode_translate(p2m->domain) )
     {
+        int rc = 0;
+
         if ( need_iommu(p2m->domain) )
+        {
             for ( i = 0; i < (1 << page_order); i++ )
-                iommu_unmap_page(p2m->domain, mfn + i);
-        return 0;
+            {
+                int ret = iommu_unmap_page(p2m->domain, mfn + i);
+
+                if ( !rc )
+                    rc = ret;
+            }
+        }
+
+        return rc;
     }
 
     ASSERT(gfn_locked_by_me(p2m, gfn));
@@ -700,7 +710,10 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn,
                 if ( rc != 0 )
                 {
                     while ( i-- > 0 )
-                        iommu_unmap_page(d, mfn + i);
+                        /* If statement to satisfy __must_check. */
+                        if ( iommu_unmap_page(d, mfn + i) )
+                            continue;
+
                     return rc;
                 }
             }
index e07e69e3bfc6ad93ae1dbd601761a968d4f8e210..ff9fc434333e64c31ea5b966fe0b9671032dc08e 100644 (file)
@@ -1436,7 +1436,10 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
         if ( i != epfn )
         {
             while (i-- > old_max)
-                iommu_unmap_page(hardware_domain, i);
+                /* If statement to satisfy __must_check. */
+                if ( iommu_unmap_page(hardware_domain, i) )
+                    continue;
+
             goto destroy_m2p;
         }
     }
index fce9827b00c638f955ac9548badb96db2c8499c9..4a860afbaab1021a8d5325fb07c19cb05b14500e 100644 (file)
@@ -282,6 +282,8 @@ static void __hwdom_init amd_iommu_hwdom_init(struct domain *d)
 
     if ( !iommu_passthrough && !need_iommu(d) )
     {
+        int rc = 0;
+
         /* Set up 1:1 page table for dom0 */
         for ( i = 0; i < max_pdx; i++ )
         {
@@ -292,12 +294,21 @@ static void __hwdom_init amd_iommu_hwdom_init(struct domain *d)
              * a pfn_valid() check would seem desirable here.
              */
             if ( mfn_valid(pfn) )
-                amd_iommu_map_page(d, pfn, pfn, 
-                                   IOMMUF_readable|IOMMUF_writable);
+            {
+                int ret = amd_iommu_map_page(d, pfn, pfn,
+                                             IOMMUF_readable|IOMMUF_writable);
+
+                if ( !rc )
+                    rc = ret;
+            }
 
             if ( !(i & 0xfffff) )
                 process_pending_softirqs();
         }
+
+        if ( rc )
+            AMD_IOMMU_DEBUG("d%d: IOMMU mapping failed: %d\n",
+                            d->domain_id, rc);
     }
 
     for_each_amd_iommu ( iommu )
index 673e1262058b9d5f60a9bc6696e15a4046560012..ec85352ab2cfd3424dbf72cd5d7cbcdc49862f1d 100644 (file)
@@ -171,20 +171,31 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
     {
         struct page_info *page;
         unsigned int i = 0;
+        int rc = 0;
+
         page_list_for_each ( page, &d->page_list )
         {
             unsigned long mfn = page_to_mfn(page);
             unsigned long gfn = mfn_to_gmfn(d, mfn);
             unsigned int mapping = IOMMUF_readable;
+            int ret;
 
             if ( ((page->u.inuse.type_info & PGT_count_mask) == 0) ||
                  ((page->u.inuse.type_info & PGT_type_mask)
                   == PGT_writable_page) )
                 mapping |= IOMMUF_writable;
-            hd->platform_ops->map_page(d, gfn, mfn, mapping);
+
+            ret = hd->platform_ops->map_page(d, gfn, mfn, mapping);
+            if ( !rc )
+                rc = ret;
+
             if ( !(i++ & 0xfffff) )
                 process_pending_softirqs();
         }
+
+        if ( rc )
+            printk(XENLOG_WARNING "d%d: IOMMU mapping failed: %d\n",
+                   d->domain_id, rc);
     }
 
     return hd->platform_ops->hwdom_init(d);
index c0d6aabfe12cc56285c75f6b22f36d8dabb3ad79..974b537478d6975fd50f9ae95f20264dd08ae20d 100644 (file)
@@ -118,6 +118,8 @@ void __hwdom_init vtd_set_hwdom_mapping(struct domain *d)
 
     for ( i = 0; i < top; i++ )
     {
+        int rc = 0;
+
         /*
          * Set up 1:1 mapping for dom0. Default to use only conventional RAM
          * areas and let RMRRs include needed reserved regions. When set, the
@@ -140,8 +142,17 @@ void __hwdom_init vtd_set_hwdom_mapping(struct domain *d)
 
         tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
         for ( j = 0; j < tmp; j++ )
-            iommu_map_page(d, pfn * tmp + j, pfn * tmp + j,
-                           IOMMUF_readable|IOMMUF_writable);
+        {
+            int ret = iommu_map_page(d, pfn * tmp + j, pfn * tmp + j,
+                                     IOMMUF_readable|IOMMUF_writable);
+
+            if ( !rc )
+               rc = ret;
+        }
+
+        if ( rc )
+           printk(XENLOG_WARNING VTDPREFIX " d%d: IOMMU mapping failed: %d\n",
+                  d->domain_id, rc);
 
         if (!(i & (0xfffff >> (PAGE_SHIFT - PAGE_SHIFT_4K))))
             process_pending_softirqs();
index 19ba976fbe7deab4b36a4f04ce7bf48b36e7bb13..eaa2c77a8e0d4361e2e01b147bb45254f9823998 100644 (file)
@@ -74,9 +74,9 @@ void iommu_teardown(struct domain *d);
 #define IOMMUF_readable  (1u<<_IOMMUF_readable)
 #define _IOMMUF_writable 1
 #define IOMMUF_writable  (1u<<_IOMMUF_writable)
-int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
-                   unsigned int flags);
-int iommu_unmap_page(struct domain *d, unsigned long gfn);
+int __must_check iommu_map_page(struct domain *d, unsigned long gfn,
+                                unsigned long mfn, unsigned int flags);
+int __must_check iommu_unmap_page(struct domain *d, unsigned long gfn);
 
 enum iommu_feature
 {