if ( unlikely(d->is_dying) )
return NULL;
- pg = page_list_remove_head(&d->arch.paging.hap.freelist);
+ pg = page_list_remove_head(&d->arch.paging.freelist);
if ( unlikely(!pg) )
return NULL;
- d->arch.paging.hap.free_pages--;
+ d->arch.paging.free_pages--;
clear_domain_page(page_to_mfn(pg));
if ( unlikely(d->is_dying) )
{
free_domheap_page(pg);
- d->arch.paging.hap.total_pages--;
+ d->arch.paging.total_pages--;
return;
}
- d->arch.paging.hap.free_pages++;
- page_list_add_tail(pg, &d->arch.paging.hap.freelist);
+ d->arch.paging.free_pages++;
+ page_list_add_tail(pg, &d->arch.paging.freelist);
}
static struct page_info *cf_check hap_alloc_p2m_page(struct domain *d)
if ( likely(pg != NULL) )
{
- d->arch.paging.hap.total_pages--;
- d->arch.paging.hap.p2m_pages++;
+ d->arch.paging.total_pages--;
+ d->arch.paging.p2m_pages++;
ASSERT(!page_get_owner(pg) && !(pg->count_info & PGC_count_mask));
}
else if ( !d->arch.paging.p2m_alloc_failed && !d->is_dying )
pg->count_info &= ~PGC_count_mask;
page_set_owner(pg, NULL);
}
- d->arch.paging.hap.p2m_pages--;
- d->arch.paging.hap.total_pages++;
+ d->arch.paging.p2m_pages--;
+ d->arch.paging.total_pages++;
hap_free(d, page_to_mfn(pg));
paging_unlock(d);
/* Return the size of the pool, rounded up to the nearest MB */
unsigned int hap_get_allocation(struct domain *d)
{
- unsigned int pg = d->arch.paging.hap.total_pages
- + d->arch.paging.hap.p2m_pages;
+ unsigned int pg = d->arch.paging.total_pages
+ + d->arch.paging.p2m_pages;
return ((pg >> (20 - PAGE_SHIFT))
+ ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
}
-int hap_get_allocation_bytes(struct domain *d, uint64_t *size)
-{
- unsigned long pages = d->arch.paging.hap.total_pages;
-
- pages += d->arch.paging.hap.p2m_pages;
-
- *size = pages << PAGE_SHIFT;
-
- return 0;
-}
-
/* Set the pool of pages to the required number of pages.
* Returns 0 for success, non-zero for failure. */
int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted)
ASSERT(paging_locked_by_me(d));
- if ( pages < d->arch.paging.hap.p2m_pages )
+ if ( pages < d->arch.paging.p2m_pages )
pages = 0;
else
- pages -= d->arch.paging.hap.p2m_pages;
+ pages -= d->arch.paging.p2m_pages;
for ( ; ; )
{
- if ( d->arch.paging.hap.total_pages < pages )
+ if ( d->arch.paging.total_pages < pages )
{
/* Need to allocate more memory from domheap */
pg = alloc_domheap_page(d, MEMF_no_owner);
HAP_PRINTK("failed to allocate hap pages.\n");
return -ENOMEM;
}
- d->arch.paging.hap.free_pages++;
- d->arch.paging.hap.total_pages++;
- page_list_add_tail(pg, &d->arch.paging.hap.freelist);
+ d->arch.paging.free_pages++;
+ d->arch.paging.total_pages++;
+ page_list_add_tail(pg, &d->arch.paging.freelist);
}
- else if ( d->arch.paging.hap.total_pages > pages )
+ else if ( d->arch.paging.total_pages > pages )
{
/* Need to return memory to domheap */
- if ( page_list_empty(&d->arch.paging.hap.freelist) )
+ if ( page_list_empty(&d->arch.paging.freelist) )
{
HAP_PRINTK("failed to free enough hap pages.\n");
return -ENOMEM;
}
- pg = page_list_remove_head(&d->arch.paging.hap.freelist);
+ pg = page_list_remove_head(&d->arch.paging.freelist);
ASSERT(pg);
- d->arch.paging.hap.free_pages--;
- d->arch.paging.hap.total_pages--;
+ d->arch.paging.free_pages--;
+ d->arch.paging.total_pages--;
free_domheap_page(pg);
}
else
.clean = hap_clean_dirty_bitmap,
};
- INIT_PAGE_LIST_HEAD(&d->arch.paging.hap.freelist);
-
/* Use HAP logdirty mechanism. */
paging_log_dirty_init(d, &hap_ops);
}
domain_pause(d);
- old_pages = d->arch.paging.hap.total_pages;
+ old_pages = d->arch.paging.total_pages;
if ( old_pages == 0 )
{
paging_lock(d);
p2m_teardown(d->arch.nested_p2m[i], true, NULL);
}
- if ( d->arch.paging.hap.total_pages != 0 )
+ if ( d->arch.paging.total_pages != 0 )
hap_teardown(d, NULL);
p2m_teardown(p2m_get_hostp2m(d), true, NULL);
/* Free any memory that the p2m teardown released */
paging_lock(d);
hap_set_allocation(d, 0, NULL);
- ASSERT(d->arch.paging.hap.p2m_pages == 0);
- ASSERT(d->arch.paging.hap.free_pages == 0);
- ASSERT(d->arch.paging.hap.total_pages == 0);
+ ASSERT(d->arch.paging.p2m_pages == 0);
+ ASSERT(d->arch.paging.free_pages == 0);
+ ASSERT(d->arch.paging.total_pages == 0);
paging_unlock(d);
}
paging_lock(d); /* Keep various asserts happy */
- if ( d->arch.paging.hap.total_pages != 0 )
+ if ( d->arch.paging.total_pages != 0 )
{
hap_set_allocation(d, 0, preempted);
if ( preempted && *preempted )
goto out;
- ASSERT(d->arch.paging.hap.total_pages == 0);
+ ASSERT(d->arch.paging.total_pages == 0);
}
d->arch.paging.mode &= ~PG_log_dirty;
.clean = sh_clean_dirty_bitmap,
};
- INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.freelist);
INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
/* Use shadow pagetables for log-dirty support */
mfn_t smfn;
int i;
- if ( d->arch.paging.shadow.free_pages >= pages )
+ if ( d->arch.paging.free_pages >= pages )
return true;
if ( unlikely(d->is_dying) )
sh_unpin(d, smfn);
/* See if that freed up enough space */
- if ( d->arch.paging.shadow.free_pages >= pages )
+ if ( d->arch.paging.free_pages >= pages )
return true;
}
0);
/* See if that freed up enough space */
- if ( d->arch.paging.shadow.free_pages >= pages )
+ if ( d->arch.paging.free_pages >= pages )
{
guest_flush_tlb_mask(d, d->dirty_cpumask);
return true;
* hold Xen mappings for some vcpu. This can never happen. */
printk(XENLOG_ERR "Can't pre-allocate %u shadow pages!\n"
" shadow pages total = %u, free = %u, p2m=%u\n",
- pages,
- d->arch.paging.shadow.total_pages,
- d->arch.paging.shadow.free_pages,
- d->arch.paging.shadow.p2m_pages);
+ pages, d->arch.paging.total_pages,
+ d->arch.paging.free_pages, d->arch.paging.p2m_pages);
ASSERT_UNREACHABLE();
ASSERT(shadow_type != SH_type_none);
perfc_incr(shadow_alloc);
- if ( d->arch.paging.shadow.free_pages < pages )
+ if ( d->arch.paging.free_pages < pages )
{
/* If we get here, we failed to allocate. This should never
* happen. It means that we didn't call shadow_prealloc()
printk(XENLOG_ERR "Can't allocate %u shadow pages!\n", pages);
BUG();
}
- d->arch.paging.shadow.free_pages -= pages;
+ d->arch.paging.free_pages -= pages;
/* Backpointers that are MFNs need to be packed into PDXs (PFNs don't) */
switch (shadow_type)
/* Init page info fields and clear the pages */
for ( i = 0; i < pages ; i++ )
{
- sp = page_list_remove_head(&d->arch.paging.shadow.freelist);
+ sp = page_list_remove_head(&d->arch.paging.freelist);
/* Before we overwrite the old contents of this page,
* we need to be sure that no TLB holds a pointer to it. */
cpumask_copy(&mask, d->dirty_cpumask);
free_domheap_page(sp);
}
else
- page_list_add_tail(sp, &d->arch.paging.shadow.freelist);
+ page_list_add_tail(sp, &d->arch.paging.freelist);
sp = next;
}
if ( unlikely(dying) )
- d->arch.paging.shadow.total_pages -= pages;
+ d->arch.paging.total_pages -= pages;
else
- d->arch.paging.shadow.free_pages += pages;
+ d->arch.paging.free_pages += pages;
}
/* Divert a page from the pool to be used by the p2m mapping.
* paging lock) and the log-dirty code (which always does). */
paging_lock_recursive(d);
- if ( d->arch.paging.shadow.total_pages
+ if ( d->arch.paging.total_pages
< shadow_min_acceptable_pages(d) + 1 )
{
if ( !d->arch.paging.p2m_alloc_failed )
d->arch.paging.p2m_alloc_failed = 1;
dprintk(XENLOG_ERR,
"d%d failed to allocate from shadow pool (tot=%u p2m=%u min=%u)\n",
- d->domain_id, d->arch.paging.shadow.total_pages,
- d->arch.paging.shadow.p2m_pages,
+ d->domain_id, d->arch.paging.total_pages,
+ d->arch.paging.p2m_pages,
shadow_min_acceptable_pages(d));
}
goto out;
goto out;
pg = mfn_to_page(shadow_alloc(d, SH_type_p2m_table, 0));
- d->arch.paging.shadow.p2m_pages++;
- d->arch.paging.shadow.total_pages--;
+ d->arch.paging.p2m_pages++;
+ d->arch.paging.total_pages--;
ASSERT(!page_get_owner(pg) && !(pg->count_info & PGC_count_mask));
out:
* paging lock) and the log-dirty code (which always does). */
paging_lock_recursive(d);
- d->arch.paging.shadow.p2m_pages--;
- d->arch.paging.shadow.total_pages++;
+ d->arch.paging.p2m_pages--;
+ d->arch.paging.total_pages++;
shadow_free(d, page_to_mfn(pg));
paging_unlock(d);
max(max(domain_tot_pages(d) / 256,
is_hvm_domain(d) ? CONFIG_PAGING_LEVELS + 2 : 0U) +
is_hvm_domain(d),
- d->arch.paging.shadow.p2m_pages);
+ d->arch.paging.p2m_pages);
}
int shadow_set_allocation(struct domain *d, unsigned int pages, bool *preempted)
if ( pages < lower_bound )
pages = lower_bound;
- pages -= d->arch.paging.shadow.p2m_pages;
+ pages -= d->arch.paging.p2m_pages;
}
SHADOW_PRINTK("current %i target %i\n",
- d->arch.paging.shadow.total_pages, pages);
+ d->arch.paging.total_pages, pages);
for ( ; ; )
{
- if ( d->arch.paging.shadow.total_pages < pages )
+ if ( d->arch.paging.total_pages < pages )
{
/* Need to allocate more memory from domheap */
sp = (struct page_info *)
SHADOW_PRINTK("failed to allocate shadow pages.\n");
return -ENOMEM;
}
- d->arch.paging.shadow.free_pages++;
- d->arch.paging.shadow.total_pages++;
+ d->arch.paging.free_pages++;
+ d->arch.paging.total_pages++;
sp->u.sh.type = 0;
sp->u.sh.pinned = 0;
sp->u.sh.count = 0;
sp->tlbflush_timestamp = 0; /* Not in any TLB */
- page_list_add_tail(sp, &d->arch.paging.shadow.freelist);
+ page_list_add_tail(sp, &d->arch.paging.freelist);
}
- else if ( d->arch.paging.shadow.total_pages > pages )
+ else if ( d->arch.paging.total_pages > pages )
{
/* Need to return memory to domheap */
if ( !_shadow_prealloc(d, 1) )
return -ENOMEM;
- sp = page_list_remove_head(&d->arch.paging.shadow.freelist);
+ sp = page_list_remove_head(&d->arch.paging.freelist);
ASSERT(sp);
/*
* The pages were allocated anonymously, but the owner field
* gets overwritten normally, so need to clear it here.
*/
page_set_owner(sp, NULL);
- d->arch.paging.shadow.free_pages--;
- d->arch.paging.shadow.total_pages--;
+ d->arch.paging.free_pages--;
+ d->arch.paging.total_pages--;
free_domheap_page(sp);
}
else
/* Return the size of the shadow pool, rounded up to the nearest MB */
static unsigned int shadow_get_allocation(struct domain *d)
{
- unsigned int pg = d->arch.paging.shadow.total_pages
- + d->arch.paging.shadow.p2m_pages;
+ unsigned int pg = d->arch.paging.total_pages
+ + d->arch.paging.p2m_pages;
return ((pg >> (20 - PAGE_SHIFT))
+ ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
}
-int shadow_get_allocation_bytes(struct domain *d, uint64_t *size)
-{
- unsigned long pages = d->arch.paging.shadow.total_pages;
-
- pages += d->arch.paging.shadow.p2m_pages;
-
- *size = pages << PAGE_SHIFT;
-
- return 0;
-}
-
/**************************************************************************/
/* Hash table for storing the guest->shadow mappings.
* The table itself is an array of pointers to shadows; the shadows are then
}
/* Init the shadow memory allocation if the user hasn't done so */
- old_pages = d->arch.paging.shadow.total_pages;
+ old_pages = d->arch.paging.total_pages;
if ( old_pages < sh_min_allocation(d) )
{
paging_lock(d);
}
#endif /* (SHADOW_OPTIMIZATIONS & (SHOPT_VIRTUAL_TLB|SHOPT_OUT_OF_SYNC)) */
- if ( d->arch.paging.shadow.total_pages != 0 )
+ if ( d->arch.paging.total_pages != 0 )
{
/* Destroy all the shadows and release memory to domheap */
shadow_set_allocation(d, 0, preempted);
if (d->arch.paging.shadow.hash_table)
shadow_hash_teardown(d);
- ASSERT(d->arch.paging.shadow.total_pages == 0);
+ ASSERT(d->arch.paging.total_pages == 0);
}
/* Free the non-paged-vcpus pagetable; must happen after we've
{
SHADOW_PRINTK("dom %u final teardown starts."
" Shadow pages total = %u, free = %u, p2m=%u\n",
- d->domain_id,
- d->arch.paging.shadow.total_pages,
- d->arch.paging.shadow.free_pages,
- d->arch.paging.shadow.p2m_pages);
+ d->domain_id, d->arch.paging.total_pages,
+ d->arch.paging.free_pages, d->arch.paging.p2m_pages);
/* Double-check that the domain didn't have any shadow memory.
* It is possible for a domain that never got domain_kill()ed
* to get here with its shadow allocation intact. */
- if ( d->arch.paging.shadow.total_pages != 0 )
+ if ( d->arch.paging.total_pages != 0 )
shadow_teardown(d, NULL);
/* It is now safe to pull down the p2m map. */
shadow_set_allocation(d, 0, NULL);
SHADOW_PRINTK("dom %u final teardown done."
" Shadow pages total = %u, free = %u, p2m=%u\n",
- d->domain_id,
- d->arch.paging.shadow.total_pages,
- d->arch.paging.shadow.free_pages,
- d->arch.paging.shadow.p2m_pages);
- ASSERT(!d->arch.paging.shadow.total_pages);
- ASSERT(!d->arch.paging.shadow.free_pages);
- ASSERT(!d->arch.paging.shadow.p2m_pages);
+ d->domain_id, d->arch.paging.total_pages,
+ d->arch.paging.free_pages, d->arch.paging.p2m_pages);
+ ASSERT(d->arch.paging.p2m_pages == 0);
+ ASSERT(d->arch.paging.free_pages == 0);
+ ASSERT(d->arch.paging.total_pages == 0);
paging_unlock(d);
}
mode |= PG_SH_enable;
- if ( d->arch.paging.shadow.total_pages < sh_min_allocation(d) )
+ if ( d->arch.paging.total_pages < sh_min_allocation(d) )
{
/* Init the shadow memory allocation if the user hasn't done so */
if ( shadow_set_allocation(d, 1, NULL) != 0 )
/* Get this domain off shadows */
SHADOW_PRINTK("un-shadowing of domain %u starts."
" Shadow pages total = %u, free = %u, p2m=%u\n",
- d->domain_id,
- d->arch.paging.shadow.total_pages,
- d->arch.paging.shadow.free_pages,
- d->arch.paging.shadow.p2m_pages);
+ d->domain_id, d->arch.paging.total_pages,
+ d->arch.paging.free_pages, d->arch.paging.p2m_pages);
for_each_vcpu(d, v)
{
if ( v->arch.paging.mode )
shadow_hash_teardown(d);
SHADOW_PRINTK("un-shadowing of domain %u done."
" Shadow pages total = %u, free = %u, p2m=%u\n",
- d->domain_id,
- d->arch.paging.shadow.total_pages,
- d->arch.paging.shadow.free_pages,
- d->arch.paging.shadow.p2m_pages);
+ d->domain_id, d->arch.paging.total_pages,
+ d->arch.paging.free_pages, d->arch.paging.p2m_pages);
}
return 0;