* processes. This optimization causes one-time-use metadata to be
* reused more quickly.
*
- * Normally athead is 0 resulting in LRU operation. athead is set
- * to 1 if we want this page to be 'as if it were placed in the cache',
- * except without unmapping it from the process address space.
+ * Normally noreuse is FALSE, resulting in LRU operation. noreuse is set
+ * to TRUE if we want this page to be 'as if it were placed in the cache',
+ * except without unmapping it from the process address space. In
+ * practice this is implemented by inserting the page at the head of the
+ * queue, using a marker page to guide FIFO insertion ordering.
*
* The page must be locked.
*/
static inline void
-_vm_page_deactivate(vm_page_t m, int athead)
+_vm_page_deactivate(vm_page_t m, boolean_t noreuse)
{
struct vm_pagequeue *pq;
int queue;
* Ignore if the page is already inactive, unless it is unlikely to be
* reactivated.
*/
- if ((queue = m->queue) == PQ_INACTIVE && !athead)
+ if ((queue = m->queue) == PQ_INACTIVE && !noreuse)
return;
if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
pq = &vm_phys_domain(m)->vmd_pagequeues[PQ_INACTIVE];
vm_pagequeue_lock(pq);
}
m->queue = PQ_INACTIVE;
- if (athead)
- TAILQ_INSERT_HEAD(&pq->pq_pl, m, plinks.q);
+ if (noreuse)
+ TAILQ_INSERT_BEFORE(&vm_phys_domain(m)->vmd_inacthead,
+ m, plinks.q);
else
TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
vm_pagequeue_cnt_inc(pq);
vm_page_deactivate(vm_page_t m)
{
- _vm_page_deactivate(m, 0);
+ _vm_page_deactivate(m, FALSE);
}
/*
vm_page_deactivate_noreuse(vm_page_t m)
{
- _vm_page_deactivate(m, 1);
+ _vm_page_deactivate(m, TRUE);
}
/*
KASSERT(domain->vmd_segs != 0, ("domain without segments"));
domain->vmd_last_active_scan = ticks;
vm_pageout_init_marker(&domain->vmd_marker, PQ_INACTIVE);
+ vm_pageout_init_marker(&domain->vmd_inacthead, PQ_INACTIVE);
+ TAILQ_INSERT_HEAD(&domain->vmd_pagequeues[PQ_INACTIVE].pq_pl,
+ &domain->vmd_inacthead, plinks.q);
/*
* The pageout daemon worker is never done, so loop forever.