]> xenbits.xensource.com Git - people/iwj/xen.git/commitdiff
p2m: Check return value of p2m_set_entry() when decreasing reservation
authorGeorge Dunlap <george.dunlap@citrix.com>
Tue, 28 Nov 2017 12:13:26 +0000 (13:13 +0100)
committerJan Beulich <jbeulich@suse.com>
Tue, 28 Nov 2017 12:13:26 +0000 (13:13 +0100)
If the entire range specified to p2m_pod_decrease_reservation() is marked
populate-on-demand, then it will make a single p2m_set_entry() call,
reducing its PoD entry count.

Unfortunately, in the right circumstances, this p2m_set_entry() call
may fail.  It that case, repeated calls to decrease_reservation() may
cause p2m->pod.entry_count to fall below zero, potentially tripping
over BUG_ON()s to the contrary.

Instead, check to see if the entry succeeded, and return false if not.
The caller will then call guest_remove_page() on the gfns, which will
return -EINVAL upon finding no valid memory there to return.

Unfortunately if the order > 0, the entry may have partially changed.
A domain_crash() is probably the safest thing in that case.

Other p2m_set_entry() calls in the same function should be fine,
because they are writing the entry at its current order.  Nonetheless,
check the return value and crash if our assumption turns otu to be
wrong.

This is part of XSA-247.

Signed-off-by: George Dunlap <george.dunlap@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/mm/p2m-pod.c

index cc8e3fb845fc4a013aadd96a5832196d3da37496..e8d561b97e6ebd41f24d3afdf0dda49c6b5259c4 100644 (file)
@@ -565,8 +565,18 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
          * All PoD: Mark the whole region invalid and tell caller
          * we're done.
          */
-        p2m_set_entry(p2m, gfn, INVALID_MFN, order, p2m_invalid,
-                      p2m->default_access);
+        if ( p2m_set_entry(p2m, gfn, INVALID_MFN, order, p2m_invalid,
+                           p2m->default_access) )
+        {
+            /*
+             * If this fails, we can't tell how much of the range was changed.
+             * Best to crash the domain unless we're sure a partial change is
+             * impossible.
+             */
+            if ( order != 0 )
+                domain_crash(d);
+            goto out_unlock;
+        }
         p2m->pod.entry_count -= 1UL << order;
         BUG_ON(p2m->pod.entry_count < 0);
         ret = 1;
@@ -609,8 +619,14 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
         n = 1UL << cur_order;
         if ( t == p2m_populate_on_demand )
         {
-            p2m_set_entry(p2m, gfn_add(gfn, i), INVALID_MFN, cur_order,
-                          p2m_invalid, p2m->default_access);
+            /* This shouldn't be able to fail */
+            if ( p2m_set_entry(p2m, gfn_add(gfn, i), INVALID_MFN, cur_order,
+                               p2m_invalid, p2m->default_access) )
+            {
+                ASSERT_UNREACHABLE();
+                domain_crash(d);
+                goto out_unlock;
+            }
             p2m->pod.entry_count -= n;
             BUG_ON(p2m->pod.entry_count < 0);
             pod -= n;
@@ -631,8 +647,14 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
 
             page = mfn_to_page(mfn);
 
-            p2m_set_entry(p2m, gfn_add(gfn, i), INVALID_MFN, cur_order,
-                          p2m_invalid, p2m->default_access);
+            /* This shouldn't be able to fail */
+            if ( p2m_set_entry(p2m, gfn_add(gfn, i), INVALID_MFN, cur_order,
+                               p2m_invalid, p2m->default_access) )
+            {
+                ASSERT_UNREACHABLE();
+                domain_crash(d);
+                goto out_unlock;
+            }
             p2m_tlb_flush_sync(p2m);
             for ( j = 0; j < n; ++j )
                 set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);