]> xenbits.xensource.com Git - people/dariof/xen.git/commitdiff
xen/x86: p2m-pod: Use typesafe gfn in p2m_pod_decrease_reservation
authorJulien Grall <julien.grall@arm.com>
Mon, 2 Oct 2017 15:40:04 +0000 (16:40 +0100)
committerGeorge Dunlap <george.dunlap@citrix.com>
Mon, 2 Oct 2017 15:40:04 +0000 (16:40 +0100)
Signed-off-by: Julien Grall <julien.grall@arm.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
xen/arch/arm/p2m.c
xen/arch/x86/mm/p2m-pod.c
xen/common/memory.c
xen/include/asm-arm/p2m.h
xen/include/asm-x86/p2m.h
xen/include/xen/p2m-common.h

index 192a1c329daee34ea1e739e726c1a84c407924fb..0410b1e86b1de799d641e1f618af86097194bb0a 100644 (file)
@@ -393,8 +393,7 @@ int guest_physmap_mark_populate_on_demand(struct domain *d,
     return -ENOSYS;
 }
 
-int p2m_pod_decrease_reservation(struct domain *d,
-                                 xen_pfn_t gpfn,
+int p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
                                  unsigned int order)
 {
     return -ENOSYS;
index 34f5239b6d81f557b024bfb3a8e00b6da55b32f8..eb74e5c01fdfe4c8c611cb64ef04e30bdb36ce37 100644 (file)
@@ -511,9 +511,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn);
  * allow decrease_reservation() to handle everything else.
  */
 int
-p2m_pod_decrease_reservation(struct domain *d,
-                             xen_pfn_t gpfn,
-                             unsigned int order)
+p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order)
 {
     int ret = 0;
     unsigned long i, n;
@@ -521,7 +519,7 @@ p2m_pod_decrease_reservation(struct domain *d,
     bool_t steal_for_cache;
     long pod, nonpod, ram;
 
-    gfn_lock(p2m, gpfn, order);
+    gfn_lock(p2m, gfn, order);
     pod_lock(p2m);
 
     /*
@@ -545,7 +543,7 @@ p2m_pod_decrease_reservation(struct domain *d,
         p2m_type_t t;
         unsigned int cur_order;
 
-        p2m->get_entry(p2m, gpfn + i, &t, &a, 0, &cur_order, NULL);
+        p2m->get_entry(p2m, gfn_x(gfn) + i, &t, &a, 0, &cur_order, NULL);
         n = 1UL << min(order, cur_order);
         if ( t == p2m_populate_on_demand )
             pod += n;
@@ -567,7 +565,7 @@ p2m_pod_decrease_reservation(struct domain *d,
          * All PoD: Mark the whole region invalid and tell caller
          * we're done.
          */
-        p2m_set_entry(p2m, gpfn, INVALID_MFN, order, p2m_invalid,
+        p2m_set_entry(p2m, gfn_x(gfn), INVALID_MFN, order, p2m_invalid,
                       p2m->default_access);
         p2m->pod.entry_count -= 1UL << order;
         BUG_ON(p2m->pod.entry_count < 0);
@@ -584,7 +582,7 @@ p2m_pod_decrease_reservation(struct domain *d,
      * - not all of the pages were RAM (now knowing order < SUPERPAGE_ORDER)
      */
     if ( steal_for_cache && order < SUPERPAGE_ORDER && ram == (1UL << order) &&
-         p2m_pod_zero_check_superpage(p2m, gpfn & ~(SUPERPAGE_PAGES - 1)) )
+         p2m_pod_zero_check_superpage(p2m, gfn_x(gfn) & ~(SUPERPAGE_PAGES - 1)) )
     {
         pod = 1UL << order;
         ram = nonpod = 0;
@@ -605,13 +603,13 @@ p2m_pod_decrease_reservation(struct domain *d,
         p2m_access_t a;
         unsigned int cur_order;
 
-        mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, 0, &cur_order, NULL);
+        mfn = p2m->get_entry(p2m, gfn_x(gfn) + i, &t, &a, 0, &cur_order, NULL);
         if ( order < cur_order )
             cur_order = order;
         n = 1UL << cur_order;
         if ( t == p2m_populate_on_demand )
         {
-            p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
+            p2m_set_entry(p2m, gfn_x(gfn) + i, INVALID_MFN, cur_order,
                           p2m_invalid, p2m->default_access);
             p2m->pod.entry_count -= n;
             BUG_ON(p2m->pod.entry_count < 0);
@@ -633,7 +631,7 @@ p2m_pod_decrease_reservation(struct domain *d,
 
             page = mfn_to_page(mfn);
 
-            p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
+            p2m_set_entry(p2m, gfn_x(gfn) + i, INVALID_MFN, cur_order,
                           p2m_invalid, p2m->default_access);
             p2m_tlb_flush_sync(p2m);
             for ( j = 0; j < n; ++j )
@@ -663,7 +661,7 @@ out_entry_check:
 
 out_unlock:
     pod_unlock(p2m);
-    gfn_unlock(p2m, gpfn, order);
+    gfn_unlock(p2m, gfn, order);
     return ret;
 }
 
index a2abf554e369b53f616cd35098995e5da6fc29d2..ad987e0f29d378124e6cc236fb2c84408c03b008 100644 (file)
@@ -417,7 +417,8 @@ static void decrease_reservation(struct memop_args *a)
 
         /* See if populate-on-demand wants to handle this */
         if ( is_hvm_domain(a->domain)
-             && p2m_pod_decrease_reservation(a->domain, gmfn, a->extent_order) )
+             && p2m_pod_decrease_reservation(a->domain, _gfn(gmfn),
+                                             a->extent_order) )
             continue;
 
         for ( j = 0; j < (1 << a->extent_order); j++ )
index bc5bbf0db781d91381bad768f4db155a8e006ff3..faadcfe8fe902c632d3eaf434624cb9f1b2326c4 100644 (file)
@@ -266,19 +266,6 @@ static inline int guest_physmap_add_page(struct domain *d,
 
 mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn);
 
-/*
- * Populate-on-demand
- */
-
-/*
- * Call when decreasing memory reservation to handle PoD entries properly.
- * Will return '1' if all entries were handled and nothing more need be done.
- */
-int
-p2m_pod_decrease_reservation(struct domain *d,
-                             xen_pfn_t gpfn,
-                             unsigned int order);
-
 /* Look up a GFN and take a reference count on the backing page. */
 typedef unsigned int p2m_query_t;
 #define P2M_ALLOC    (1u<<0)   /* Populate PoD and paged-out entries */
index 10cdfc09a9e872bfd66c73330995bb9736adf9b9..8f3409b400457b032e78ac2d1aaa049bc87e67f0 100644 (file)
@@ -643,13 +643,6 @@ int p2m_pod_empty_cache(struct domain *d);
  * domain matches target */
 int p2m_pod_set_mem_target(struct domain *d, unsigned long target);
 
-/* Call when decreasing memory reservation to handle PoD entries properly.
- * Will return '1' if all entries were handled and nothing more need be done.*/
-int
-p2m_pod_decrease_reservation(struct domain *d,
-                             xen_pfn_t gpfn,
-                             unsigned int order);
-
 /* Scan pod cache when offline/broken page triggered */
 int
 p2m_pod_offline_or_broken_hit(struct page_info *p);
index 2b5696cf334ef036ffea88fb31bcb533713deb65..27f89208f540d6bed8f383cce0746a7b479f798b 100644 (file)
@@ -20,4 +20,17 @@ int unmap_mmio_regions(struct domain *d,
                        unsigned long nr,
                        mfn_t mfn);
 
+/*
+ * Populate-on-Demand
+ */
+
+/*
+ * Call when decreasing memory reservation to handle PoD entries properly.
+ * Will return '1' if all entries were handled and nothing more need be done.
+ */
+int
+p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
+                             unsigned int order);
+
+
 #endif /* _XEN_P2M_COMMON_H */