}
#define POD_SWEEP_LIMIT 1024
-
-/* When populating a new superpage, look at recently populated superpages
- * hoping that they've been zeroed. This will snap up zeroed pages as soon as
- * the guest OS is done with them. */
-static void
-p2m_pod_check_last_super(struct p2m_domain *p2m, unsigned long gfn_aligned)
-{
- unsigned long check_gfn;
-
- ASSERT(p2m->pod.last_populated_index < POD_HISTORY_MAX);
-
- check_gfn = p2m->pod.last_populated[p2m->pod.last_populated_index];
-
- p2m->pod.last_populated[p2m->pod.last_populated_index] = gfn_aligned;
-
- p2m->pod.last_populated_index =
- ( p2m->pod.last_populated_index + 1 ) % POD_HISTORY_MAX;
-
- p2m_pod_zero_check_superpage(p2m, check_gfn);
-}
-
-
#define POD_SWEEP_STRIDE 16
static void
p2m_pod_emergency_sweep(struct p2m_domain *p2m)
* NB that this is a zero-sum game; we're increasing our cache size
* by re-increasing our 'debt'. Since we hold the pod lock,
* (entry_count - count) must remain the same. */
- if ( p2m->pod.count > 0 && i < limit )
+ if ( i < limit && (p2m->pod.count > 0 || hypercall_preempt_check()) )
break;
}
}
+static void pod_eager_reclaim(struct p2m_domain *p2m)
+{
+ struct pod_mrp_list *mrp = &p2m->pod.mrp;
+ unsigned int i = 0;
+
+ /*
+ * Always check one page for reclaimation.
+ *
+ * If the PoD pool is empty, keep checking some space is found, or all
+ * entries have been exhaused.
+ */
+ do
+ {
+ unsigned int idx = (mrp->idx + i++) % ARRAY_SIZE(mrp->list);
+ unsigned long gfn = mrp->list[idx];
+
+ if ( gfn != INVALID_GFN )
+ {
+ if ( gfn & POD_LAST_SUPERPAGE )
+ {
+ gfn &= ~POD_LAST_SUPERPAGE;
+
+ if ( p2m_pod_zero_check_superpage(p2m, gfn) == 0 )
+ {
+ unsigned int x;
+
+ for ( x = 0; x < SUPERPAGE_PAGES; ++x, ++gfn )
+ p2m_pod_zero_check(p2m, &gfn, 1);
+ }
+ }
+ else
+ p2m_pod_zero_check(p2m, &gfn, 1);
+
+ mrp->list[idx] = INVALID_GFN;
+ }
+
+ } while ( (p2m->pod.count == 0) && (i < ARRAY_SIZE(mrp->list)) );
+}
+
+static void pod_eager_record(struct p2m_domain *p2m,
+ unsigned long gfn, unsigned int order)
+{
+ struct pod_mrp_list *mrp = &p2m->pod.mrp;
+
+ ASSERT(mrp->list[mrp->idx] == INVALID_GFN);
+ ASSERT(gfn != INVALID_GFN);
+
+ mrp->list[mrp->idx++] =
+ gfn | (order == PAGE_ORDER_2M ? POD_LAST_SUPERPAGE : 0);
+ mrp->idx %= ARRAY_SIZE(mrp->list);
+}
+
int
p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
unsigned int order,
return 0;
}
+ pod_eager_reclaim(p2m);
+
/* Only sweep if we're actually out of memory. Doing anything else
* causes unnecessary time and fragmentation of superpages in the p2m. */
if ( p2m->pod.count == 0 )
p2m->pod.entry_count -= (1 << order);
BUG_ON(p2m->pod.entry_count < 0);
+ pod_eager_record(p2m, gfn_aligned, order);
+
if ( tb_init_done )
{
struct {
__trace_var(TRC_MEM_POD_POPULATE, 0, sizeof(t), &t);
}
- /* Check the last guest demand-populate */
- if ( p2m->pod.entry_count > p2m->pod.count
- && (order == PAGE_ORDER_2M)
- && (q & P2M_ALLOC) )
- p2m_pod_check_last_super(p2m, gfn_aligned);
-
pod_unlock(p2m);
return 0;
out_of_memory:
entry_count; /* # of pages in p2m marked pod */
unsigned long reclaim_single; /* Last gpfn of a scan */
unsigned long max_guest; /* gpfn of max guest demand-populate */
-#define POD_HISTORY_MAX 128
- /* gpfn of last guest superpage demand-populated */
- unsigned long last_populated[POD_HISTORY_MAX];
- unsigned int last_populated_index;
+
+ /*
+ * Tracking of the most recently populated PoD pages, for eager
+ * reclamation.
+ */
+ struct pod_mrp_list {
+#define NR_POD_MRP_ENTRIES 32
+
+/* Encode ORDER_2M superpage in top bit of GFN */
+#define POD_LAST_SUPERPAGE (INVALID_GFN & ~(INVALID_GFN >> 1))
+
+ unsigned long list[NR_POD_MRP_ENTRIES];
+ unsigned int idx;
+ } mrp;
mm_lock_t lock; /* Locking of private pod structs, *
* not relying on the p2m lock. */
} pod;