]> xenbits.xensource.com Git - xen.git/commitdiff
x86: make get_page_from_mfn() return struct page_info *
authorJan Beulich <jbeulich@suse.com>
Fri, 15 Dec 2017 10:13:49 +0000 (11:13 +0100)
committerJan Beulich <jbeulich@suse.com>
Fri, 15 Dec 2017 10:13:49 +0000 (11:13 +0100)
Almost all users of it want it, and it calculates it anyway.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
xen/arch/x86/mm.c
xen/arch/x86/pv/grant_table.c
xen/arch/x86/pv/ro-page-fault.c
xen/include/asm-x86/mm.h

index 0daedec10ddb5b8be9a0fcdbc2a84f419e50c888..3eb338036e717c1464a50b87ed949107293f7fb9 100644 (file)
@@ -711,7 +711,6 @@ get_##level##_linear_pagetable(                                             \
     level##_pgentry_t pde, unsigned long pde_pfn, struct domain *d)         \
 {                                                                           \
     unsigned long x, y;                                                     \
-    struct page_info *page;                                                 \
     unsigned long pfn;                                                      \
                                                                             \
     if ( !opt_pv_linear_pt )                                                \
@@ -730,14 +729,15 @@ get_##level##_linear_pagetable(                                             \
                                                                             \
     if ( (pfn = level##e_get_pfn(pde)) != pde_pfn )                         \
     {                                                                       \
-        struct page_info *ptpg = mfn_to_page(_mfn(pde_pfn));                \
+        struct page_info *page, *ptpg = mfn_to_page(_mfn(pde_pfn));         \
                                                                             \
         /* Make sure the page table belongs to the correct domain. */       \
         if ( unlikely(page_get_owner(ptpg) != d) )                          \
             return 0;                                                       \
                                                                             \
         /* Make sure the mapped frame belongs to the correct domain. */     \
-        if ( unlikely(!get_page_from_mfn(_mfn(pfn), d)) )                   \
+        page = get_page_from_mfn(_mfn(pfn), d);                             \
+        if ( unlikely(!page) )                                              \
             return 0;                                                       \
                                                                             \
         /*                                                                  \
@@ -747,7 +747,6 @@ get_##level##_linear_pagetable(                                             \
          * elsewhere.                                                       \
          * If so, atomically increment the count (checking for overflow).   \
          */                                                                 \
-        page = mfn_to_page(_mfn(pfn));                                      \
         if ( !inc_linear_entries(ptpg) )                                    \
         {                                                                   \
             put_page(page);                                                 \
@@ -3730,7 +3729,8 @@ long do_mmu_update(
                 xsm_checked = xsm_needed;
             }
 
-            if ( unlikely(!get_page_from_mfn(_mfn(mfn), pg_owner)) )
+            page = get_page_from_mfn(_mfn(mfn), pg_owner);
+            if ( unlikely(!page) )
             {
                 gdprintk(XENLOG_WARNING,
                          "Could not get page for mach->phys update\n");
@@ -3742,7 +3742,7 @@ long do_mmu_update(
 
             paging_mark_dirty(pg_owner, _mfn(mfn));
 
-            put_page(mfn_to_page(_mfn(mfn)));
+            put_page(page);
             break;
 
         default:
@@ -3927,10 +3927,10 @@ static int __do_update_va_mapping(
 
     rc = -EINVAL;
     pl1e = map_guest_l1e(va, &gl1mfn);
-    if ( unlikely(!pl1e || !get_page_from_mfn(gl1mfn, d)) )
+    gl1pg = pl1e ? get_page_from_mfn(gl1mfn, d) : NULL;
+    if ( unlikely(!gl1pg) )
         goto out;
 
-    gl1pg = mfn_to_page(gl1mfn);
     if ( !page_lock(gl1pg) )
     {
         put_page(gl1pg);
@@ -4126,10 +4126,10 @@ int xenmem_add_to_physmap_one(
                 put_gfn(d, gfn);
                 return -ENOMEM;
             }
-            if ( !get_page_from_mfn(_mfn(idx), d) )
-                break;
             mfn = _mfn(idx);
-            page = mfn_to_page(mfn);
+            page = get_page_from_mfn(mfn, d);
+            if ( unlikely(!page) )
+                mfn = INVALID_MFN;
             break;
         }
         case XENMAPSPACE_gmfn_foreign:
index aaca228c6b96952ddf68045ecc7fc64f0395f23c..4dbc550366000191c98373b847129a02a03d8f77 100644 (file)
@@ -80,7 +80,8 @@ int create_grant_pv_mapping(uint64_t addr, unsigned long frame,
 
         gl1mfn = _mfn(addr >> PAGE_SHIFT);
 
-        if ( !get_page_from_mfn(gl1mfn, currd) )
+        page = get_page_from_mfn(gl1mfn, currd);
+        if ( !page )
             goto out;
 
         pl1e = map_domain_page(gl1mfn) + (addr & ~PAGE_MASK);
@@ -101,11 +102,11 @@ int create_grant_pv_mapping(uint64_t addr, unsigned long frame,
             goto out;
         }
 
-        if ( !get_page_from_mfn(gl1mfn, currd) )
+        page = get_page_from_mfn(gl1mfn, currd);
+        if ( !page )
             goto out_unmap;
     }
 
-    page = mfn_to_page(gl1mfn);
     if ( !page_lock(page) )
         goto out_put;
 
@@ -159,10 +160,10 @@ static bool steal_linear_address(unsigned long linear, l1_pgentry_t *out)
         goto out;
     }
 
-    if ( !get_page_from_mfn(gl1mfn, currd) )
+    page = get_page_from_mfn(gl1mfn, currd);
+    if ( !page )
         goto out_unmap;
 
-    page = mfn_to_page(gl1mfn);
     if ( !page_lock(page) )
         goto out_put;
 
@@ -235,7 +236,8 @@ int replace_grant_pv_mapping(uint64_t addr, unsigned long frame,
 
         gl1mfn = _mfn(addr >> PAGE_SHIFT);
 
-        if ( !get_page_from_mfn(gl1mfn, currd) )
+        page = get_page_from_mfn(gl1mfn, currd);
+        if ( !page )
             goto out;
 
         pl1e = map_domain_page(gl1mfn) + (addr & ~PAGE_MASK);
@@ -263,12 +265,11 @@ int replace_grant_pv_mapping(uint64_t addr, unsigned long frame,
         if ( !pl1e )
             goto out;
 
-        if ( !get_page_from_mfn(gl1mfn, currd) )
+        page = get_page_from_mfn(gl1mfn, currd);
+        if ( !page )
             goto out_unmap;
     }
 
-    page = mfn_to_page(gl1mfn);
-
     if ( !page_lock(page) )
         goto out_put;
 
index 6b2976d3df675afff626ea0edc19bce328579580..7e0e7e8dfcfc168cbdbccffdcef0d35efc512786 100644 (file)
@@ -253,10 +253,10 @@ static int ptwr_do_page_fault(struct x86_emulate_ctxt *ctxt,
     struct page_info *page;
     int rc;
 
-    if ( !get_page_from_mfn(l1e_get_mfn(pte), current->domain) )
+    page = get_page_from_mfn(l1e_get_mfn(pte), current->domain);
+    if ( !page )
         return X86EMUL_UNHANDLEABLE;
 
-    page = l1e_get_page(pte);
     if ( !page_lock(page) )
     {
         put_page(page);
index 7e9f306ff57de4fa003b95ba406725cb6f36d138..4af6b2341ab7c4043e1b65ae12ff42a6e85c7325 100644 (file)
@@ -382,7 +382,7 @@ int  get_page_from_l1e(
     l1_pgentry_t l1e, struct domain *l1e_owner, struct domain *pg_owner);
 void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner);
 
-static inline bool get_page_from_mfn(mfn_t mfn, struct domain *d)
+static inline struct page_info *get_page_from_mfn(mfn_t mfn, struct domain *d)
 {
     struct page_info *page = __mfn_to_page(mfn_x(mfn));
 
@@ -390,10 +390,10 @@ static inline bool get_page_from_mfn(mfn_t mfn, struct domain *d)
     {
         gdprintk(XENLOG_WARNING,
                  "Could not get page ref for mfn %"PRI_mfn"\n", mfn_x(mfn));
-        return false;
+        return NULL;
     }
 
-    return true;
+    return page;
 }
 
 static inline void put_page_and_type(struct page_info *page)