]> xenbits.xensource.com Git - people/dwmw2/xen.git/commitdiff
x86/p2m: adjust non-PoD accounting in p2m_pod_decrease_reservation()
authorJan Beulich <jbeulich@suse.com>
Fri, 21 Feb 2020 16:15:22 +0000 (17:15 +0100)
committerJan Beulich <jbeulich@suse.com>
Fri, 21 Feb 2020 16:15:22 +0000 (17:15 +0100)
Throughout the function the equation

pod + nonpod == (1UL << order)

should hold. This has been violated by the final loop of the function:
* changing a range from a type other than p2m_populate_on_demand to
  p2m_invalid doesn't alter the amount of non-PoD pages in the region,
* changing a range from p2m_populate_on_demand to p2m_invalid does
  increase the amount of non-PoD pages in the region along with
  decreasing the amount of PoD pages there.
Fortunately the variable isn't used anymore after the loop. Instead of
correcting the updating of the "nonpod" variable, however, drop it
altogether, to avoid getting the above equation to not hold again by a
future change.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
xen/arch/x86/mm/p2m-pod.c

index f2c94095681d970fee93eaade7129b64bb33c8f2..2a7b8c117b8a93e766820b0eaed2fbba0c65b960 100644 (file)
@@ -509,7 +509,7 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
     unsigned long ret = 0, i, n;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     bool_t steal_for_cache;
-    long pod, nonpod, ram;
+    long pod = 0, ram = 0;
 
     gfn_lock(p2m, gfn, order);
     pod_lock(p2m);
@@ -524,8 +524,6 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
     if ( unlikely(d->is_dying) )
         goto out_unlock;
 
-    pod = nonpod = ram = 0;
-
     /* Figure out if we need to steal some freed memory for our cache */
     steal_for_cache =  ( p2m->pod.entry_count > p2m->pod.count );
 
@@ -539,19 +537,15 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
         n = 1UL << min(order, cur_order);
         if ( t == p2m_populate_on_demand )
             pod += n;
-        else
-        {
-            nonpod += n;
-            if ( p2m_is_ram(t) )
-                ram += n;
-        }
+        else if ( p2m_is_ram(t) )
+            ram += n;
     }
 
     /* No populate-on-demand?  Don't need to steal anything?  Then we're done!*/
     if ( !pod && !steal_for_cache )
         goto out_unlock;
 
-    if ( !nonpod )
+    if ( i == pod )
     {
         /*
          * All PoD: Mark the whole region invalid and tell caller
@@ -587,7 +581,7 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
          p2m_pod_zero_check_superpage(p2m, _gfn(gfn_x(gfn) & ~(SUPERPAGE_PAGES - 1))) )
     {
         pod = 1UL << order;
-        ram = nonpod = 0;
+        ram = 0;
         ASSERT(steal_for_cache == (p2m->pod.entry_count > p2m->pod.count));
     }
 
@@ -655,7 +649,6 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
 
             steal_for_cache =  ( p2m->pod.entry_count > p2m->pod.count );
 
-            nonpod -= n;
             ram -= n;
             ret += n;
         }