unlock_page_alloc(p2m);
- /* Then add the first one to the appropriate populate-on-demand list */
- switch(order)
+ /* Then add to the appropriate populate-on-demand list. */
+ switch ( order )
{
+ case PAGE_ORDER_1G:
+ for ( i = 0; i < (1UL << PAGE_ORDER_1G); i += 1UL << PAGE_ORDER_2M )
+ page_list_add_tail(page + i, &p2m->pod.super);
+ break;
case PAGE_ORDER_2M:
- page_list_add_tail(page, &p2m->pod.super); /* lock: page_alloc */
- p2m->pod.count += 1 << order;
+ page_list_add_tail(page, &p2m->pod.super);
break;
case PAGE_ORDER_4K:
- page_list_add_tail(page, &p2m->pod.single); /* lock: page_alloc */
- p2m->pod.count += 1;
+ page_list_add_tail(page, &p2m->pod.single);
break;
default:
BUG();
}
+ p2m->pod.count += 1L << order;
return 0;
}
unsigned int order)
{
int ret=0;
- int i;
+ unsigned long i, n;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
-
- int steal_for_cache;
- int pod, nonpod, ram;
+ bool_t steal_for_cache;
+ long pod, nonpod, ram;
gfn_lock(p2m, gpfn, order);
pod_lock(p2m);
/* Figure out if we need to steal some freed memory for our cache */
steal_for_cache = ( p2m->pod.entry_count > p2m->pod.count );
- /* FIXME: Add contiguous; query for PSE entries? */
- for ( i=0; i<(1<<order); i++)
+ for ( i = 0; i < (1UL << order); i += n )
{
p2m_access_t a;
p2m_type_t t;
+ unsigned int cur_order;
- (void)p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL, NULL);
-
+ p2m->get_entry(p2m, gpfn + i, &t, &a, 0, &cur_order, NULL);
+ n = 1UL << min(order, cur_order);
if ( t == p2m_populate_on_demand )
- pod++;
+ pod += n;
else
{
- nonpod++;
+ nonpod += n;
if ( p2m_is_ram(t) )
- ram++;
+ ram += n;
}
}
* + There are PoD entries to handle, or
* + There is ram left, and we want to steal it
*/
- for ( i=0;
- i<(1<<order) && (pod>0 || (steal_for_cache && ram > 0));
- i++)
+ for ( i = 0;
+ i < (1UL << order) && (pod > 0 || (steal_for_cache && ram > 0));
+ i += n )
{
mfn_t mfn;
p2m_type_t t;
p2m_access_t a;
+ unsigned int cur_order;
- mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL, NULL);
+ mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, 0, &cur_order, NULL);
+ if ( order < cur_order )
+ cur_order = order;
+ n = 1UL << cur_order;
if ( t == p2m_populate_on_demand )
{
- p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid,
- p2m->default_access);
- p2m->pod.entry_count--;
+ p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
+ p2m_invalid, p2m->default_access);
+ p2m->pod.entry_count -= n;
BUG_ON(p2m->pod.entry_count < 0);
- pod--;
+ pod -= n;
}
else if ( steal_for_cache && p2m_is_ram(t) )
{
+ /*
+ * If we need less than 1 << cur_order, we may end up stealing
+ * more memory here than we actually need. This will be rectified
+ * below, however; and stealing too much and then freeing what we
+ * need may allow us to free smaller pages from the cache, and
+ * avoid breaking up superpages.
+ */
struct page_info *page;
+ unsigned int j;
ASSERT(mfn_valid(mfn));
page = mfn_to_page(mfn);
- p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid,
- p2m->default_access);
- set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
-
- p2m_pod_cache_add(p2m, page, 0);
+ p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
+ p2m_invalid, p2m->default_access);
+ for ( j = 0; j < n; ++j )
+ set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
+ p2m_pod_cache_add(p2m, page, cur_order);
steal_for_cache = ( p2m->pod.entry_count > p2m->pod.count );
- nonpod--;
- ram--;
+ nonpod -= n;
+ ram -= n;
}
}
p2m_type_t type, type0 = 0;
unsigned long * map = NULL;
int ret=0, reset = 0;
- int i, j;
+ unsigned long i, n;
+ unsigned int j;
int max_ref = 1;
struct domain *d = p2m->domain;
/* Look up the mfns, checking to make sure they're the same mfn
* and aligned, and mapping them. */
- for ( i=0; i<SUPERPAGE_PAGES; i++ )
+ for ( i = 0; i < SUPERPAGE_PAGES; i += n )
{
p2m_access_t a;
- mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, NULL, NULL);
+ unsigned int cur_order;
+ unsigned long k;
+ const struct page_info *page;
- if ( i == 0 )
- {
- mfn0 = mfn;
- type0 = type;
- }
+ mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, &cur_order, NULL);
/* Conditions that must be met for superpage-superpage:
* + All gfns are ram types
* + None of the mfns are likely to be mapped elsewhere (refcount
* 2 or less for shadow, 1 for hap)
*/
- if ( !p2m_is_ram(type)
- || type != type0
- || ( (mfn_to_page(mfn)->count_info & PGC_allocated) == 0 )
- || ( (mfn_to_page(mfn)->count_info & (PGC_page_table|PGC_xen_heap)) != 0 )
- || ( (mfn_to_page(mfn)->count_info & PGC_xen_heap ) != 0 )
- || ( (mfn_to_page(mfn)->count_info & PGC_count_mask) > max_ref )
- || !( ( i == 0 && superpage_aligned(mfn_x(mfn0)) )
- || ( i != 0 && mfn_x(mfn) == (mfn_x(mfn0) + i) ) ) )
+ if ( !p2m_is_ram(type) )
+ goto out;
+
+ if ( i == 0 )
+ {
+ if ( !superpage_aligned(mfn_x(mfn)) )
+ goto out;
+ mfn0 = mfn;
+ type0 = type;
+ }
+ else if ( type != type0 || mfn_x(mfn) != (mfn_x(mfn0) + i) )
goto out;
+
+ n = 1UL << min(cur_order, SUPERPAGE_ORDER + 0U);
+ for ( k = 0, page = mfn_to_page(mfn); k < n; ++k, ++page )
+ if ( !(page->count_info & PGC_allocated) ||
+ (page->count_info & (PGC_page_table | PGC_xen_heap)) ||
+ (page->count_info & PGC_count_mask) > max_ref )
+ goto out;
}
/* Now, do a quick check to see if it may be zero before unmapping. */
unsigned int order)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
- unsigned long i, pod_count = 0;
+ unsigned long i, n, pod_count = 0;
p2m_type_t ot;
mfn_t omfn;
int rc = 0;
P2M_DEBUG("mark pod gfn=%#lx\n", gfn);
/* Make sure all gpfns are unused */
- for ( i = 0; i < (1UL << order); i++ )
+ for ( i = 0; i < (1UL << order); i += n )
{
p2m_access_t a;
- omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL, NULL);
+ unsigned int cur_order;
+
+ omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, &cur_order, NULL);
+ n = 1UL << min(order, cur_order);
if ( p2m_is_ram(ot) )
{
P2M_DEBUG("gfn_to_mfn returned type %d!\n", ot);
else if ( ot == p2m_populate_on_demand )
{
/* Count how man PoD entries we'll be replacing if successful */
- pod_count++;
+ pod_count += n;
}
}