static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
-static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
static void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask);
static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
struct rwlock **lockp);
struct spglist *free, struct rwlock **lockp);
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
-static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
+static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
struct spglist *free);
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
}
/*
- * Looks for a page table page mapping the specified virtual address in the
- * specified pmap's collection of idle page table pages. Returns NULL if there
- * is no page table page corresponding to the specified virtual address.
+ * Removes the page table page mapping the specified virtual address from the
+ * specified pmap's collection of idle page table pages, and returns it.
+ * Otherwise, returns NULL if there is no page table page corresponding to the
+ * specified virtual address.
*/
static __inline vm_page_t
-pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
+pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
{
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
- return (vm_radix_lookup(&pmap->pm_root, pmap_pde_pindex(va)));
-}
-
-/*
- * Removes the specified page table page from the specified pmap's collection
- * of idle page table pages. The specified page table page must be a member of
- * the pmap's collection.
- */
-static __inline void
-pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
-{
-
- PMAP_LOCK_ASSERT(pmap, MA_OWNED);
- vm_radix_remove(&pmap->pm_root, mpte->pindex);
+ return (vm_radix_remove(&pmap->pm_root, pmap_pde_pindex(va)));
}
/*
oldpde = *pde;
KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
- if ((oldpde & PG_A) != 0 && (mpte = pmap_lookup_pt_page(pmap, va)) !=
- NULL)
- pmap_remove_pt_page(pmap, mpte);
- else {
+ if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
+ NULL) {
KASSERT((oldpde & PG_W) == 0,
("pmap_demote_pde: page table page for a wired mapping"
" is missing"));
KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
- mpte = pmap_lookup_pt_page(pmap, va);
+ mpte = pmap_remove_pt_page(pmap, va);
if (mpte == NULL)
panic("pmap_remove_kernel_pde: Missing pt page.");
- pmap_remove_pt_page(pmap, mpte);
mptepa = VM_PAGE_TO_PHYS(mpte);
newpde = mptepa | X86_PG_M | X86_PG_A | X86_PG_RW | X86_PG_V;
if (pmap == kernel_pmap) {
pmap_remove_kernel_pde(pmap, pdq, sva);
} else {
- mpte = pmap_lookup_pt_page(pmap, sva);
+ mpte = pmap_remove_pt_page(pmap, sva);
if (mpte != NULL) {
- pmap_remove_pt_page(pmap, mpte);
pmap_resident_count_dec(pmap, 1);
KASSERT(mpte->wire_count == NPTEPG,
("pmap_remove_pde: pte page wire count error"));
TAILQ_EMPTY(&mt->md.pv_list))
vm_page_aflag_clear(mt, PGA_WRITEABLE);
}
- mpte = pmap_lookup_pt_page(pmap, pv->pv_va);
+ mpte = pmap_remove_pt_page(pmap, pv->pv_va);
if (mpte != NULL) {
- pmap_remove_pt_page(pmap, mpte);
pmap_resident_count_dec(pmap, 1);
KASSERT(mpte->wire_count == NPTEPG,
("pmap_remove_pages: pte page wire count error"));
}
/*
- * Looks for a page table page mapping the specified virtual address in the
- * specified pmap's collection of idle page table pages. Returns NULL if there
- * is no page table page corresponding to the specified virtual address.
+ * Removes the page table page mapping the specified virtual address from the
+ * specified pmap's collection of idle page table pages, and returns it.
+ * Otherwise, returns NULL if there is no page table page corresponding to the
+ * specified virtual address.
*/
static __inline vm_page_t
-pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
+pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
{
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
- return (vm_radix_lookup(&pmap->pm_root, pmap_l2_pindex(va)));
-}
-
-/*
- * Removes the specified page table page from the specified pmap's collection
- * of idle page table pages. The specified page table page must be a member of
- * the pmap's collection.
- */
-static __inline void
-pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
-{
-
- PMAP_LOCK_ASSERT(pmap, MA_OWNED);
- vm_radix_remove(&pmap->pm_root, mpte->pindex);
+ return (vm_radix_remove(&pmap->pm_root, pmap_l2_pindex(va)));
}
/*
TAILQ_EMPTY(&mt->md.pv_list))
vm_page_aflag_clear(mt, PGA_WRITEABLE);
}
- ml3 = pmap_lookup_pt_page(pmap,
+ ml3 = pmap_remove_pt_page(pmap,
pv->pv_va);
if (ml3 != NULL) {
- pmap_remove_pt_page(pmap, ml3);
pmap_resident_count_dec(pmap,1);
KASSERT(ml3->wire_count == NL3PG,
("pmap_remove_pages: l3 page wire count error"));
return (NULL);
}
- if ((ml3 = pmap_lookup_pt_page(pmap, va)) != NULL) {
- pmap_remove_pt_page(pmap, ml3);
- } else {
+ if ((ml3 = pmap_remove_pt_page(pmap, va)) == NULL) {
ml3 = vm_page_alloc(NULL, pmap_l2_pindex(va),
(VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
static boolean_t pmap_is_referenced_pvh(struct md_page *pvh);
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde);
-static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
struct spglist *free);
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
struct spglist *free);
-static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
+static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
static void pmap_remove_page(struct pmap *pmap, vm_offset_t va,
struct spglist *free);
static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
}
/*
- * Looks for a page table page mapping the specified virtual address in the
- * specified pmap's collection of idle page table pages. Returns NULL if there
- * is no page table page corresponding to the specified virtual address.
+ * Removes the page table page mapping the specified virtual address from the
+ * specified pmap's collection of idle page table pages, and returns it.
+ * Otherwise, returns NULL if there is no page table page corresponding to the
+ * specified virtual address.
*/
static __inline vm_page_t
-pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
+pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
{
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
- return (vm_radix_lookup(&pmap->pm_root, va >> PDRSHIFT));
-}
-
-/*
- * Removes the specified page table page from the specified pmap's collection
- * of idle page table pages. The specified page table page must be a member of
- * the pmap's collection.
- */
-static __inline void
-pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
-{
-
- PMAP_LOCK_ASSERT(pmap, MA_OWNED);
- vm_radix_remove(&pmap->pm_root, mpte->pindex);
+ return (vm_radix_remove(&pmap->pm_root, va >> PDRSHIFT));
}
/*
oldpde = *pde;
KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
- if ((oldpde & PG_A) != 0 && (mpte = pmap_lookup_pt_page(pmap, va)) !=
- NULL)
- pmap_remove_pt_page(pmap, mpte);
- else {
+ if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
+ NULL) {
KASSERT((oldpde & PG_W) == 0,
("pmap_demote_pde: page table page for a wired mapping"
" is missing"));
vm_page_t mpte;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
- mpte = pmap_lookup_pt_page(pmap, va);
+ mpte = pmap_remove_pt_page(pmap, va);
if (mpte == NULL)
panic("pmap_remove_kernel_pde: Missing pt page.");
- pmap_remove_pt_page(pmap, mpte);
mptepa = VM_PAGE_TO_PHYS(mpte);
newpde = mptepa | PG_M | PG_A | PG_RW | PG_V;
if (pmap == kernel_pmap) {
pmap_remove_kernel_pde(pmap, pdq, sva);
} else {
- mpte = pmap_lookup_pt_page(pmap, sva);
+ mpte = pmap_remove_pt_page(pmap, sva);
if (mpte != NULL) {
- pmap_remove_pt_page(pmap, mpte);
pmap->pm_stats.resident_count--;
KASSERT(mpte->wire_count == NPTEPG,
("pmap_remove_pde: pte page wire count error"));
if (TAILQ_EMPTY(&mt->md.pv_list))
vm_page_aflag_clear(mt, PGA_WRITEABLE);
}
- mpte = pmap_lookup_pt_page(pmap, pv->pv_va);
+ mpte = pmap_remove_pt_page(pmap, pv->pv_va);
if (mpte != NULL) {
- pmap_remove_pt_page(pmap, mpte);
pmap->pm_stats.resident_count--;
KASSERT(mpte->wire_count == NPTEPG,
("pmap_remove_pages: pte page wire count error"));
/*
* vm_page_remove:
*
- * Removes the given mem entry from the object/offset-page
- * table and the object page list, but do not invalidate/terminate
- * the backing store.
+ * Removes the specified page from its containing object, but does not
+ * invalidate any backing storage.
*
* The object must be locked. The page must be locked if it is managed.
*/
vm_page_remove(vm_page_t m)
{
vm_object_t object;
+ vm_page_t mrem;
if ((m->oflags & VPO_UNMANAGED) == 0)
vm_page_assert_locked(m);
VM_OBJECT_ASSERT_WLOCKED(object);
if (vm_page_xbusied(m))
vm_page_xunbusy_maybelocked(m);
+ mrem = vm_radix_remove(&object->rtree, m->pindex);
+ KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m));
/*
* Now remove from the object's list of backed pages.
*/
- vm_radix_remove(&object->rtree, m->pindex);
TAILQ_REMOVE(&object->memq, m, listq);
/*
}
/*
- * Remove the specified index from the tree.
- * Panics if the key is not present.
+ * Remove the specified index from the trie, and return the value stored at
+ * that index. If the index is not present, return NULL.
*/
-void
+vm_page_t
vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
{
struct vm_radix_node *rnode, *parent;
if (vm_radix_isleaf(rnode)) {
m = vm_radix_topage(rnode);
if (m->pindex != index)
- panic("%s: invalid key found", __func__);
+ return (NULL);
vm_radix_setroot(rtree, NULL);
- return;
+ return (m);
}
parent = NULL;
for (;;) {
if (rnode == NULL)
- panic("vm_radix_remove: impossible to locate the key");
+ return (NULL);
slot = vm_radix_slot(index, rnode->rn_clev);
if (vm_radix_isleaf(rnode->rn_child[slot])) {
m = vm_radix_topage(rnode->rn_child[slot]);
if (m->pindex != index)
- panic("%s: invalid key found", __func__);
+ return (NULL);
rnode->rn_child[slot] = NULL;
rnode->rn_count--;
if (rnode->rn_count > 1)
- break;
+ return (m);
for (i = 0; i < VM_RADIX_COUNT; i++)
if (rnode->rn_child[i] != NULL)
break;
rnode->rn_count--;
rnode->rn_child[i] = NULL;
vm_radix_node_put(rnode);
- break;
+ return (m);
}
parent = rnode;
rnode = rnode->rn_child[slot];
vm_page_t vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index);
vm_page_t vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index);
void vm_radix_reclaim_allnodes(struct vm_radix *rtree);
-void vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index);
+vm_page_t vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index);
vm_page_t vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage);
#endif /* _KERNEL */