]> xenbits.xensource.com Git - people/dwmw2/xen.git/commitdiff
x86/mem_sharing: drop flags from mem_sharing_unshare_page
authorTamas K Lengyel <tamas.lengyel@intel.com>
Fri, 24 Jan 2020 09:19:42 +0000 (10:19 +0100)
committerJan Beulich <jbeulich@suse.com>
Fri, 24 Jan 2020 09:19:42 +0000 (10:19 +0100)
All callers pass 0 in.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
Reviewed-by: Wei Liu <wl@xen.org>
Acked-by: George Dunlap <george.dunlap@citrix.com>
xen/arch/x86/hvm/hvm.c
xen/arch/x86/mm/p2m.c
xen/common/memory.c
xen/include/asm-x86/mem_sharing.h

index b96fafed65e66c15a71e289a64d102ab6abcaf78..4d41a16e7500a23903c23d6e3660d6b9be5450b9 100644 (file)
@@ -1898,7 +1898,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
     if ( npfec.write_access && (p2mt == p2m_ram_shared) )
     {
         ASSERT(p2m_is_hostp2m(p2m));
-        sharing_enomem = mem_sharing_unshare_page(currd, gfn, 0);
+        sharing_enomem = mem_sharing_unshare_page(currd, gfn);
         rc = 1;
         goto out_put_gfn;
     }
index 4599a0bc2498210b590ea85a6e3e7921e85e9843..49cc138362005dde28522b8b78060c7028bf7a16 100644 (file)
@@ -516,7 +516,7 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn_l,
          * Try to unshare. If we fail, communicate ENOMEM without
          * sleeping.
          */
-        if ( mem_sharing_unshare_page(p2m->domain, gfn_l, 0) < 0 )
+        if ( mem_sharing_unshare_page(p2m->domain, gfn_l) < 0 )
             mem_sharing_notify_enomem(p2m->domain, gfn_l, false);
         mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL);
     }
@@ -897,8 +897,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
         {
             /* Do an unshare to cleanly take care of all corner cases. */
             int rc;
-            rc = mem_sharing_unshare_page(p2m->domain,
-                                          gfn_x(gfn_add(gfn, i)), 0);
+            rc = mem_sharing_unshare_page(p2m->domain, gfn_x(gfn_add(gfn, i)));
             if ( rc )
             {
                 p2m_unlock(p2m);
index 309e872edfa4a13865a9c33ddaad60255b410f24..c7d2bac45206171745688d19df67001e722ae204 100644 (file)
@@ -352,7 +352,7 @@ int guest_remove_page(struct domain *d, unsigned long gmfn)
          * might be the only one using this shared page, and we need to
          * trigger proper cleanup. Once done, this is like any other page.
          */
-        rc = mem_sharing_unshare_page(d, gmfn, 0);
+        rc = mem_sharing_unshare_page(d, gmfn);
         if ( rc )
         {
             mem_sharing_notify_enomem(d, gmfn, false);
index af2a1038b54b5aa45ff242dfcbe4e8763f0f96e6..cf7848709fd1c940164ff0bc993ad1021ccf2ee9 100644 (file)
@@ -69,10 +69,9 @@ int __mem_sharing_unshare_page(struct domain *d,
                                uint16_t flags);
 
 static inline int mem_sharing_unshare_page(struct domain *d,
-                                           unsigned long gfn,
-                                           uint16_t flags)
+                                           unsigned long gfn)
 {
-    int rc = __mem_sharing_unshare_page(d, gfn, flags);
+    int rc = __mem_sharing_unshare_page(d, gfn, 0);
     BUG_ON(rc && (rc != -ENOMEM));
     return rc;
 }
@@ -115,8 +114,7 @@ static inline unsigned int mem_sharing_get_nr_shared_mfns(void)
     return 0;
 }
 
-static inline int mem_sharing_unshare_page(struct domain *d, unsigned long gfn,
-                                           uint16_t flags)
+static inline int mem_sharing_unshare_page(struct domain *d, unsigned long gfn)
 {
     ASSERT_UNREACHABLE();
     return -EOPNOTSUPP;