]> xenbits.xensource.com Git - freebsd.git/commitdiff
Replace redundant code with a few new vm_page_grab facilities:
authorjeff <jeff@FreeBSD.org>
Tue, 10 Sep 2019 19:08:01 +0000 (19:08 +0000)
committerjeff <jeff@FreeBSD.org>
Tue, 10 Sep 2019 19:08:01 +0000 (19:08 +0000)
 - VM_ALLOC_NOCREAT will grab without creating a page.
 - vm_page_grab_valid() will grab and page in if necessary.
 - vm_page_busy_acquire() automates some busy acquire loops.

Discussed with: alc, kib, markj
Tested by: pho (part of larger branch)
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D21546

sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
sys/compat/linuxkpi/common/src/linux_page.c
sys/dev/drm2/ttm/ttm_bo_vm.c
sys/dev/drm2/ttm/ttm_tt.c
sys/dev/xen/gntdev/gntdev.c
sys/dev/xen/privcmd/privcmd.c
sys/kern/uipc_shm.c
sys/vm/vm_glue.c
sys/vm/vm_object.c
sys/vm/vm_page.c
sys/vm/vm_page.h

index aa4992ba2d816938f76beba8c5a686aa02d1f599..d79cd2a2c0caf4a89f06ef79f3a73432edbe2a41 100644 (file)
@@ -412,33 +412,14 @@ page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
        obj = vp->v_object;
        zfs_vmobject_assert_wlocked(obj);
 
-       for (;;) {
-               if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
-                   pp->valid) {
-                       if (vm_page_xbusied(pp)) {
-                               /*
-                                * Reference the page before unlocking and
-                                * sleeping so that the page daemon is less
-                                * likely to reclaim it.
-                                */
-                               vm_page_reference(pp);
-                               vm_page_sleep_if_xbusy(pp, "zfsmwb");
-                               continue;
-                       }
-                       vm_page_sbusy(pp);
-               } else if (pp != NULL) {
-                       ASSERT(!pp->valid);
-                       pp = NULL;
-               }
-
-               if (pp != NULL) {
-                       ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
-                       vm_object_pip_add(obj, 1);
-                       pmap_remove_write(pp);
-                       if (nbytes != 0)
-                               vm_page_clear_dirty(pp, off, nbytes);
-               }
-               break;
+       vm_page_grab_valid(&pp, obj, OFF_TO_IDX(start), VM_ALLOC_NOCREAT |
+           VM_ALLOC_SBUSY | VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY);
+       if (pp != NULL) {
+               ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
+               vm_object_pip_add(obj, 1);
+               pmap_remove_write(pp);
+               if (nbytes != 0)
+                       vm_page_clear_dirty(pp, off, nbytes);
        }
        return (pp);
 }
@@ -455,32 +436,14 @@ static vm_page_t
 page_wire(vnode_t *vp, int64_t start)
 {
        vm_object_t obj;
-       vm_page_t pp;
+       vm_page_t m;
 
        obj = vp->v_object;
        zfs_vmobject_assert_wlocked(obj);
 
-       for (;;) {
-               if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
-                   pp->valid) {
-                       if (vm_page_xbusied(pp)) {
-                               /*
-                                * Reference the page before unlocking and
-                                * sleeping so that the page daemon is less
-                                * likely to reclaim it.
-                                */
-                               vm_page_reference(pp);
-                               vm_page_sleep_if_xbusy(pp, "zfsmwb");
-                               continue;
-                       }
-
-                       ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
-                       vm_page_wire(pp);
-               } else
-                       pp = NULL;
-               break;
-       }
-       return (pp);
+       vm_page_grab_valid(&m, obj, OFF_TO_IDX(start), VM_ALLOC_NOCREAT |
+           VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOBUSY);
+       return (m);
 }
 
 static void
index 83f87d5f3318702d274b1ad5186fb98a822ce0b2..ac8b1a2781aac6ec5d4d320cdb8a0832ce99d142 100644 (file)
@@ -286,27 +286,11 @@ linux_shmem_read_mapping_page_gfp(vm_object_t obj, int pindex, gfp_t gfp)
                panic("GFP_NOWAIT is unimplemented");
 
        VM_OBJECT_WLOCK(obj);
-       page = vm_page_grab(obj, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
-           VM_ALLOC_WIRED);
-       if (page->valid != VM_PAGE_BITS_ALL) {
-               vm_page_xbusy(page);
-               if (vm_pager_has_page(obj, pindex, NULL, NULL)) {
-                       rv = vm_pager_get_pages(obj, &page, 1, NULL, NULL);
-                       if (rv != VM_PAGER_OK) {
-                               vm_page_unwire_noq(page);
-                               vm_page_free(page);
-                               VM_OBJECT_WUNLOCK(obj);
-                               return (ERR_PTR(-EINVAL));
-                       }
-                       MPASS(page->valid == VM_PAGE_BITS_ALL);
-               } else {
-                       pmap_zero_page(page);
-                       page->valid = VM_PAGE_BITS_ALL;
-                       page->dirty = 0;
-               }
-               vm_page_xunbusy(page);
-       }
+       rv = vm_page_grab_valid(&page, obj, pindex, VM_ALLOC_NORMAL |
+           VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
        VM_OBJECT_WUNLOCK(obj);
+       if (rv != VM_PAGER_OK)
+               return (ERR_PTR(-EINVAL));
        return (page);
 }
 
index 6bda57e7c973976797da0c2df481b67502b28926..3538831bbdfdd22f9e03eed05b412a9e59c87003 100644 (file)
@@ -231,8 +231,7 @@ reserve:
        }
 
        VM_OBJECT_WLOCK(vm_obj);
-       if (vm_page_busied(m)) {
-               vm_page_sleep_if_busy(m, "ttmpbs");
+       if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0) {
                ttm_mem_io_unlock(man);
                ttm_bo_unreserve(bo);
                goto retry;
@@ -240,6 +239,7 @@ reserve:
        m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
        if (m1 == NULL) {
                if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) {
+                       vm_page_xunbusy(m);
                        VM_OBJECT_WUNLOCK(vm_obj);
                        vm_wait(vm_obj);
                        VM_OBJECT_WLOCK(vm_obj);
@@ -253,7 +253,6 @@ reserve:
                    bo, m, m1, (uintmax_t)offset));
        }
        m->valid = VM_PAGE_BITS_ALL;
-       vm_page_xbusy(m);
        if (*mres != NULL) {
                KASSERT(*mres != m, ("losing %p %p", *mres, m));
                vm_page_free(*mres);
@@ -375,7 +374,7 @@ retry:
                m = vm_page_lookup(vm_obj, i);
                if (m == NULL)
                        continue;
-               if (vm_page_sleep_if_busy(m, "ttm_unm"))
+               if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0)
                        goto retry;
                cdev_pager_free_page(vm_obj, m);
        }
index 82aaddf4b1d43832542cfc6db3e1c7b536e093a3..ec3aed665e3b40515dd18d6126d54175607ecc12 100644 (file)
@@ -288,20 +288,12 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
        VM_OBJECT_WLOCK(obj);
        vm_object_pip_add(obj, 1);
        for (i = 0; i < ttm->num_pages; ++i) {
-               from_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL);
-               if (from_page->valid != VM_PAGE_BITS_ALL) {
-                       if (vm_pager_has_page(obj, i, NULL, NULL)) {
-                               rv = vm_pager_get_pages(obj, &from_page, 1,
-                                   NULL, NULL);
-                               if (rv != VM_PAGER_OK) {
-                                       vm_page_free(from_page);
-                                       ret = -EIO;
-                                       goto err_ret;
-                               }
-                       } else
-                               vm_page_zero_invalid(from_page, TRUE);
+               rv = vm_page_grab_valid(&from_page, obj, i,
+                   VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
+               if (rv != VM_PAGER_OK) {
+                       ret = -EIO;
+                       goto err_ret;
                }
-               vm_page_xunbusy(from_page);
                to_page = ttm->pages[i];
                if (unlikely(to_page == NULL)) {
                        ret = -ENOMEM;
index 667d46f333b36ef20100c37b73fd39c7c655051f..3ad4072e3ea27967d404ac5ee52615d48216039e 100644 (file)
@@ -606,7 +606,7 @@ retry:
                m = vm_page_lookup(gmap->map->mem, i);
                if (m == NULL)
                        continue;
-               if (vm_page_sleep_if_busy(m, "pcmdum"))
+               if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0)
                        goto retry;
                cdev_pager_free_page(gmap->map->mem, m);
        }
index 3b6b2033e80f045fb61779e6c698bf43015b9bfc..691229018f2e5869eb0422b42f93a37125881944 100644 (file)
@@ -128,7 +128,7 @@ retry:
                        m = vm_page_lookup(map->mem, i);
                        if (m == NULL)
                                continue;
-                       if (vm_page_sleep_if_busy(m, "pcmdum"))
+                       if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0)
                                goto retry;
                        cdev_pager_free_page(map->mem, m);
                }
index 4c5e67524a16f478c7b20d5ce1338172bb50bb4f..415104ec8e8dea1794afda79568940b0461358b7 100644 (file)
@@ -188,24 +188,13 @@ uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
         * lock to page out tobj's pages because tobj is a OBJT_SWAP
         * type object.
         */
-       m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
-           VM_ALLOC_WIRED);
-       if (m->valid != VM_PAGE_BITS_ALL) {
-               vm_page_xbusy(m);
-               if (vm_pager_has_page(obj, idx, NULL, NULL)) {
-                       rv = vm_pager_get_pages(obj, &m, 1, NULL, NULL);
-                       if (rv != VM_PAGER_OK) {
-                               printf(
-           "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
-                                   obj, idx, m->valid, rv);
-                               vm_page_unwire_noq(m);
-                               vm_page_free(m);
-                               VM_OBJECT_WUNLOCK(obj);
-                               return (EIO);
-                       }
-               } else
-                       vm_page_zero_invalid(m, TRUE);
-               vm_page_xunbusy(m);
+       rv = vm_page_grab_valid(&m, obj, idx,
+           VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOBUSY);
+       if (rv != VM_PAGER_OK) {
+               VM_OBJECT_WUNLOCK(obj);
+               printf("uiomove_object: vm_obj %p idx %jd pager error %d\n",
+                   obj, idx, rv);
+               return (EIO);
        }
        VM_OBJECT_WUNLOCK(obj);
        error = uiomove_fromphys(&m, offset, tlen, uio);
index e531b13aef2c31af71a149a315224c7768effb40..be4c85c200001d38cc914480396ea2ad518250fc 100644 (file)
@@ -219,24 +219,11 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
 {
        vm_page_t m;
        vm_pindex_t pindex;
-       int rv;
 
-       VM_OBJECT_WLOCK(object);
        pindex = OFF_TO_IDX(offset);
-       m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
-           VM_ALLOC_WIRED);
-       if (m->valid != VM_PAGE_BITS_ALL) {
-               vm_page_xbusy(m);
-               rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
-               if (rv != VM_PAGER_OK) {
-                       vm_page_unwire_noq(m);
-                       vm_page_free(m);
-                       m = NULL;
-                       goto out;
-               }
-               vm_page_xunbusy(m);
-       }
-out:
+       VM_OBJECT_WLOCK(object);
+       (void)vm_page_grab_valid(&m, object, pindex,
+           VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
        VM_OBJECT_WUNLOCK(object);
        return (m);
 }
index c384c029a4bcace19d2048aba897e1dd5493086d..b65a74d136c652ab51f944dcb67cc8deab8e543b 100644 (file)
@@ -1954,14 +1954,10 @@ vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
 
        VM_OBJECT_ASSERT_WLOCKED(object);
        for (pindex = start; pindex < end; pindex++) {
-               m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
-               if (m->valid != VM_PAGE_BITS_ALL) {
-                       rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
-                       if (rv != VM_PAGER_OK) {
-                               vm_page_free(m);
-                               break;
-                       }
-               }
+               rv = vm_page_grab_valid(&m, object, pindex, VM_ALLOC_NORMAL);
+               if (rv != VM_PAGER_OK)
+                       break;
+
                /*
                 * Keep "m" busy because a subsequent iteration may unlock
                 * the object.
index e26727d2192887b1062338f850fe017ffb03ba8e..99c3abe1f9e72cce25b7df467ebb079a1639d370 100644 (file)
@@ -865,6 +865,66 @@ vm_page_reference(vm_page_t m)
        vm_page_aflag_set(m, PGA_REFERENCED);
 }
 
+/*
+ *     vm_page_busy_acquire:
+ *
+ *     Acquire the busy lock as described by VM_ALLOC_* flags.  Will loop
+ *     and drop the object lock if necessary.
+ */
+int
+vm_page_busy_acquire(vm_page_t m, int allocflags)
+{
+       vm_object_t obj;
+       u_int x;
+       bool locked;
+
+       /*
+        * The page-specific object must be cached because page
+        * identity can change during the sleep, causing the
+        * re-lock of a different object.
+        * It is assumed that a reference to the object is already
+        * held by the callers.
+        */
+       obj = m->object;
+       for (;;) {
+               if ((allocflags & VM_ALLOC_SBUSY) == 0) {
+                       if (vm_page_tryxbusy(m))
+                               return (TRUE);
+               } else {
+                       if (vm_page_trysbusy(m))
+                               return (TRUE);
+               }
+               if ((allocflags & VM_ALLOC_NOWAIT) != 0)
+                       return (FALSE);
+               if (obj != NULL) {
+                       locked = VM_OBJECT_WOWNED(obj);
+               } else {
+                       MPASS(vm_page_wired(m));
+                       locked = FALSE;
+               }
+               sleepq_lock(m);
+               x = m->busy_lock;
+               if (x == VPB_UNBUSIED ||
+                   ((allocflags & VM_ALLOC_SBUSY) != 0 &&
+                   (x & VPB_BIT_SHARED) != 0) ||
+                   ((x & VPB_BIT_WAITERS) == 0 &&
+                   !atomic_cmpset_int(&m->busy_lock, x,
+                   x | VPB_BIT_WAITERS))) {
+                       sleepq_release(m);
+                       continue;
+               }
+               if (locked)
+                       VM_OBJECT_WUNLOCK(obj);
+               sleepq_add(m, NULL, "vmpba", 0, 0);
+               sleepq_wait(m, PVM);
+               if (locked)
+                       VM_OBJECT_WLOCK(obj);
+               MPASS(m->object == obj || m->object == NULL);
+               if ((allocflags & VM_ALLOC_WAITFAIL) != 0)
+                       return (FALSE);
+       }
+}
+
 /*
  *     vm_page_busy_downgrade:
  *
@@ -4046,10 +4106,13 @@ retrylookup:
                         * sleeping so that the page daemon is less
                         * likely to reclaim it.
                         */
-                       vm_page_aflag_set(m, PGA_REFERENCED);
+                       if ((allocflags & VM_ALLOC_NOCREAT) == 0)
+                               vm_page_aflag_set(m, PGA_REFERENCED);
                        vm_page_busy_sleep(m, "pgrbwt", (allocflags &
                            VM_ALLOC_IGN_SBUSY) != 0);
                        VM_OBJECT_WLOCK(object);
+                       if ((allocflags & VM_ALLOC_WAITFAIL) != 0)
+                               return (NULL);
                        goto retrylookup;
                } else {
                        if ((allocflags & VM_ALLOC_WIRED) != 0)
@@ -4057,11 +4120,13 @@ retrylookup:
                        if ((allocflags &
                            (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
                                vm_page_xbusy(m);
-                       if ((allocflags & VM_ALLOC_SBUSY) != 0)
+                       else if ((allocflags & VM_ALLOC_SBUSY) != 0)
                                vm_page_sbusy(m);
                        return (m);
                }
        }
+       if ((allocflags & VM_ALLOC_NOCREAT) != 0)
+               return (NULL);
        m = vm_page_alloc(object, pindex, pflags);
        if (m == NULL) {
                if ((allocflags & VM_ALLOC_NOWAIT) != 0)
@@ -4073,6 +4138,109 @@ retrylookup:
        return (m);
 }
 
+/*
+ * Grab a page and make it valid, paging in if necessary.  Pages missing from
+ * their pager are zero filled and validated.
+ */
+int
+vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags)
+{
+       vm_page_t m;
+       bool sleep, xbusy;
+       int pflags;
+       int rv;
+
+       KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
+           (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
+           ("vm_page_grab_valid: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
+       KASSERT((allocflags &
+           (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0,
+           ("vm_page_grab_valid: Invalid flags 0x%X", allocflags));
+       VM_OBJECT_ASSERT_WLOCKED(object);
+       pflags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY);
+       pflags |= VM_ALLOC_WAITFAIL;
+
+retrylookup:
+       xbusy = false;
+       if ((m = vm_page_lookup(object, pindex)) != NULL) {
+               /*
+                * If the page is fully valid it can only become invalid
+                * with the object lock held.  If it is not valid it can
+                * become valid with the busy lock held.  Therefore, we
+                * may unnecessarily lock the exclusive busy here if we
+                * race with I/O completion not using the object lock.
+                * However, we will not end up with an invalid page and a
+                * shared lock.
+                */
+               if (m->valid != VM_PAGE_BITS_ALL ||
+                   (allocflags & (VM_ALLOC_IGN_SBUSY | VM_ALLOC_SBUSY)) == 0) {
+                       sleep = !vm_page_tryxbusy(m);
+                       xbusy = true;
+               } else
+                       sleep = !vm_page_trysbusy(m);
+               if (sleep) {
+                       /*
+                        * Reference the page before unlocking and
+                        * sleeping so that the page daemon is less
+                        * likely to reclaim it.
+                        */
+                       if ((allocflags & VM_ALLOC_NOCREAT) == 0)
+                               vm_page_aflag_set(m, PGA_REFERENCED);
+                       vm_page_busy_sleep(m, "pgrbwt", (allocflags &
+                           VM_ALLOC_IGN_SBUSY) != 0);
+                       VM_OBJECT_WLOCK(object);
+                       goto retrylookup;
+               }
+               if ((allocflags & VM_ALLOC_NOCREAT) != 0 &&
+                  m->valid != VM_PAGE_BITS_ALL) {
+                       if (xbusy)
+                               vm_page_xunbusy(m);
+                       else
+                               vm_page_sunbusy(m);
+                       *mp = NULL;
+                       return (VM_PAGER_FAIL);
+               }
+               if ((allocflags & VM_ALLOC_WIRED) != 0)
+                       vm_page_wire(m);
+               if (m->valid == VM_PAGE_BITS_ALL)
+                       goto out;
+       } else if ((allocflags & VM_ALLOC_NOCREAT) != 0) {
+               *mp = NULL;
+               return (VM_PAGER_FAIL);
+       } else if ((m = vm_page_alloc(object, pindex, pflags)) != NULL) {
+               xbusy = true;
+       } else {
+               goto retrylookup;
+       }
+
+       vm_page_assert_xbusied(m);
+       MPASS(xbusy);
+       if (vm_pager_has_page(object, pindex, NULL, NULL)) {
+               rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
+               if (rv != VM_PAGER_OK) {
+                       if (allocflags & VM_ALLOC_WIRED)
+                               vm_page_unwire_noq(m);
+                       vm_page_free(m);
+                       *mp = NULL;
+                       return (rv);
+               }
+               MPASS(m->valid == VM_PAGE_BITS_ALL);
+       } else {
+               vm_page_zero_invalid(m, TRUE);
+       }
+out:
+       if ((allocflags & VM_ALLOC_NOBUSY) != 0) {
+               if (xbusy)
+                       vm_page_xunbusy(m);
+               else
+                       vm_page_sunbusy(m);
+       }
+       if ((allocflags & VM_ALLOC_SBUSY) != 0 && xbusy)
+               vm_page_busy_downgrade(m);
+       *mp = m;
+       return (VM_PAGER_OK);
+}
+
 /*
  * Return the specified range of pages from the given object.  For each
  * page offset within the range, if a page already exists within the object
@@ -4143,7 +4311,8 @@ retrylookup:
                                 * sleeping so that the page daemon is less
                                 * likely to reclaim it.
                                 */
-                               vm_page_aflag_set(m, PGA_REFERENCED);
+                               if ((allocflags & VM_ALLOC_NOCREAT) == 0)
+                                       vm_page_aflag_set(m, PGA_REFERENCED);
                                vm_page_busy_sleep(m, "grbmaw", (allocflags &
                                    VM_ALLOC_IGN_SBUSY) != 0);
                                VM_OBJECT_WLOCK(object);
@@ -4157,6 +4326,8 @@ retrylookup:
                        if ((allocflags & VM_ALLOC_SBUSY) != 0)
                                vm_page_sbusy(m);
                } else {
+                       if ((allocflags & VM_ALLOC_NOCREAT) != 0)
+                               break;
                        m = vm_page_alloc_after(object, pindex + i,
                            pflags | VM_ALLOC_COUNT(count - i), mpred);
                        if (m == NULL) {
index 84654e20b3e6f147a6fcad3a137acb7dff04f5e2..0c3f3a9bade2ea3a94ceac4553d51f2b1b27f2a3 100644 (file)
@@ -497,6 +497,7 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
 #define        VM_ALLOC_ZERO           0x0040  /* (acfgp) Allocate a prezeroed page */
 #define        VM_ALLOC_NOOBJ          0x0100  /* (acg) No associated object */
 #define        VM_ALLOC_NOBUSY         0x0200  /* (acgp) Do not excl busy the page */
+#define        VM_ALLOC_NOCREAT        0x0400  /* (gp) Don't create a page */
 #define        VM_ALLOC_IGN_SBUSY      0x1000  /* (gp) Ignore shared busy flag */
 #define        VM_ALLOC_NODUMP         0x2000  /* (ag) don't include in dump */
 #define        VM_ALLOC_SBUSY          0x4000  /* (acgp) Shared busy the page */
@@ -539,6 +540,7 @@ malloc2vm_flags(int malloc_flags)
 #define        PS_ALL_VALID    0x2
 #define        PS_NONE_BUSY    0x4
 
+int vm_page_busy_acquire(vm_page_t m, int allocflags);
 void vm_page_busy_downgrade(vm_page_t m);
 void vm_page_busy_sleep(vm_page_t m, const char *msg, bool nonshared);
 void vm_page_free(vm_page_t m);
@@ -565,6 +567,8 @@ void vm_page_change_lock(vm_page_t m, struct mtx **mtx);
 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
 int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
     vm_page_t *ma, int count);
+int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex,
+    int allocflags);
 void vm_page_deactivate(vm_page_t);
 void vm_page_deactivate_noreuse(vm_page_t);
 void vm_page_dequeue(vm_page_t m);