if ( d == NULL )
return -EINVAL;
- if ( d->tot_pages != 0 )
+ if ( domain_tot_pages(d) != 0 )
return -EBUSY;
if ( d->arch.type == type )
return 0;
printk("Memory pages belonging to domain %u:\n", d->domain_id);
- if ( d->tot_pages >= 10 && d->is_dying < DOMDYING_dead )
+ if ( domain_tot_pages(d) >= 10 && d->is_dying < DOMDYING_dead )
{
printk(" DomPage list too long to display\n");
}
else if ( rc >= 0 )
{
p2m = p2m_get_hostp2m(d);
- target.tot_pages = d->tot_pages;
+ target.tot_pages = domain_tot_pages(d);
target.pod_cache_pages = p2m->pod.count;
target.pod_entries = p2m->pod.entry_count;
* The following equations should hold:
* 0 <= P <= T <= B <= M
* d->arch.p2m->pod.entry_count == B - P
- * d->tot_pages == P + d->arch.p2m->pod.count
+ * domain_tot_pages(d) == P + d->arch.p2m->pod.count
*
* Now we have the following potential cases to cover:
* B <T': Set the PoD cache size equal to the number of outstanding PoD
pod_lock(p2m);
/* P == B: Nothing to do (unless the guest is being created). */
- populated = d->tot_pages - p2m->pod.count;
+ populated = domain_tot_pages(d) - p2m->pod.count;
if ( populated > 0 && p2m->pod.entry_count == 0 )
goto out;
* T' < B: Don't reduce the cache size; let the balloon driver
* take care of it.
*/
- if ( target < d->tot_pages )
+ if ( target < domain_tot_pages(d) )
goto out;
pod_target = target - populated;
pod_unlock(p2m);
printk("%s: Dom%d out of PoD memory! (tot=%"PRIu32" ents=%ld dom%d)\n",
- __func__, d->domain_id, d->tot_pages, p2m->pod.entry_count,
- current->domain->domain_id);
+ __func__, d->domain_id, domain_tot_pages(d),
+ p2m->pod.entry_count, current->domain->domain_id);
domain_crash(d);
return false;
out_fail:
* up of slot zero and an LAPIC page), plus one for HVM's 1-to-1 pagetable.
*/
return shadow_min_acceptable_pages(d) +
- max(max(d->tot_pages / 256,
+ max(max(domain_tot_pages(d) / 256,
is_hvm_domain(d) ? CONFIG_PAGING_LEVELS + 2 : 0U) +
is_hvm_domain(d),
d->arch.paging.shadow.p2m_pages);
seg, bus, slot, func, d->domain_id);
if ( !is_hardware_domain(d) &&
/* Assume a domain without memory has no mappings yet. */
- (!is_hardware_domain(currd) || d->tot_pages) )
+ (!is_hardware_domain(currd) || domain_tot_pages(d)) )
domain_crash(d);
/* XXX How to deal with existing mappings? */
}
{
process_pending_softirqs();
- printk("Domain %u (total: %u):\n", d->domain_id, d->tot_pages);
+ printk("Domain %u (total: %u):\n", d->domain_id, domain_tot_pages(d));
for_each_online_node ( i )
page_num_node[i] = 0;
while ( vphysmap_start < vphysmap_end )
{
- if ( d->tot_pages + ((round_pgup(vphysmap_end) - vphysmap_start)
- >> PAGE_SHIFT) + 3 > nr_pages )
+ if ( domain_tot_pages(d) +
+ ((round_pgup(vphysmap_end) - vphysmap_start) >> PAGE_SHIFT) +
+ 3 > nr_pages )
panic("Dom0 allocation too small for initial P->M table\n");
if ( pl1e )
{
struct page_info *pg2;
- if ( d->tot_pages + (1 << order) > d->max_pages )
+ if ( domain_tot_pages(d) + (1 << order) > d->max_pages )
continue;
pg2 = alloc_domheap_pages(d, order, MEMF_exact_node | MEMF_no_scrub);
if ( pg2 > page )
if ( page == NULL )
panic("Not enough RAM for domain 0 allocation\n");
alloc_spfn = mfn_x(page_to_mfn(page));
- alloc_epfn = alloc_spfn + d->tot_pages;
+ alloc_epfn = alloc_spfn + domain_tot_pages(d);
if ( initrd_len )
{
initrd_pfn = vinitrd_start ?
(vinitrd_start - v_start) >> PAGE_SHIFT :
- d->tot_pages;
+ domain_tot_pages(d);
initrd_mfn = mfn = initrd->mod_start;
count = PFN_UP(initrd_len);
if ( d->arch.physaddr_bitsize &&
printk("PHYSICAL MEMORY ARRANGEMENT:\n"
" Dom0 alloc.: %"PRIpaddr"->%"PRIpaddr,
pfn_to_paddr(alloc_spfn), pfn_to_paddr(alloc_epfn));
- if ( d->tot_pages < nr_pages )
+ if ( domain_tot_pages(d) < nr_pages )
printk(" (%lu pages to be allocated)",
- nr_pages - d->tot_pages);
+ nr_pages - domain_tot_pages(d));
if ( initrd )
{
mpt_alloc = (paddr_t)initrd->mod_start << PAGE_SHIFT;
snprintf(si->magic, sizeof(si->magic), "xen-3.0-x86_%d%s",
elf_64bit(&elf) ? 64 : 32, parms.pae ? "p" : "");
- count = d->tot_pages;
+ count = domain_tot_pages(d);
/* Set up the phys->machine table if not part of the initial mapping. */
if ( parms.p2m_base != UNSET_ADDR )
process_pending_softirqs();
}
si->first_p2m_pfn = pfn;
- si->nr_p2m_frames = d->tot_pages - count;
+ si->nr_p2m_frames = domain_tot_pages(d) - count;
page_list_for_each ( page, &d->page_list )
{
mfn = mfn_x(page_to_mfn(page));
process_pending_softirqs();
}
}
- BUG_ON(pfn != d->tot_pages);
+ BUG_ON(pfn != domain_tot_pages(d));
#ifndef NDEBUG
alloc_epfn += PFN_UP(initrd_len) + si->nr_p2m_frames;
#endif
while ( pfn < nr_pages )
{
- if ( (page = alloc_chunk(d, nr_pages - d->tot_pages)) == NULL )
+ if ( (page = alloc_chunk(d, nr_pages - domain_tot_pages(d))) == NULL )
panic("Not enough RAM for DOM0 reservation\n");
- while ( pfn < d->tot_pages )
+ while ( pfn < domain_tot_pages(d) )
{
mfn = mfn_x(page_to_mfn(page));
#ifndef NDEBUG
BUILD_BUG_ON(offsetof(struct shared_info, vcpu_info) != 0);
- if ( is_hvm_domain(d) || d->tot_pages != 0 )
+ if ( is_hvm_domain(d) || domain_tot_pages(d) != 0 )
return -EACCES;
if ( is_pv_32bit_domain(d) )
return 0;
* Set the max pages to the current number of pages to prevent the
* guest from depleting the shim memory pool.
*/
- d->max_pages = d->tot_pages;
+ d->max_pages = domain_tot_pages(d);
}
static void write_start_info(struct domain *d)
snprintf(si->magic, sizeof(si->magic), "xen-3.0-x86_%s",
is_pv_32bit_domain(d) ? "32p" : "64");
- si->nr_pages = d->tot_pages;
+ si->nr_pages = domain_tot_pages(d);
si->shared_info = virt_to_maddr(d->shared_info);
si->flags = 0;
BUG_ON(xen_hypercall_hvm_get_param(HVM_PARAM_STORE_PFN, &si->store_mfn));
xsm_security_domaininfo(d, info);
- info->tot_pages = d->tot_pages;
+ info->tot_pages = domain_tot_pages(d);
info->max_pages = d->max_pages;
info->outstanding_pages = d->outstanding_pages;
info->shr_pages = atomic_read(&d->shr_pages);
* pages when it is dying.
*/
if ( unlikely(e->is_dying) ||
- unlikely(e->tot_pages >= e->max_pages) )
+ unlikely(domain_tot_pages(e) >= e->max_pages) )
{
spin_unlock(&e->page_alloc_lock);
else
gdprintk(XENLOG_INFO,
"Transferee d%d has no headroom (tot %u, max %u)\n",
- e->domain_id, e->tot_pages, e->max_pages);
+ e->domain_id, domain_tot_pages(e), e->max_pages);
gop.status = GNTST_general_error;
goto unlock_and_copyback;
atomic_read(&d->pause_count));
printk(" nr_pages=%d xenheap_pages=%d shared_pages=%u paged_pages=%u "
"dirty_cpus={%*pbl} max_pages=%u\n",
- d->tot_pages, d->xenheap_pages, atomic_read(&d->shr_pages),
+ domain_tot_pages(d), d->xenheap_pages, atomic_read(&d->shr_pages),
atomic_read(&d->paged_pages), CPUMASK_PR(d->dirty_cpumask),
d->max_pages);
printk(" handle=%02x%02x%02x%02x-%02x%02x-%02x%02x-"
switch ( op )
{
case XENMEM_current_reservation:
- rc = d->tot_pages;
+ rc = domain_tot_pages(d);
break;
case XENMEM_maximum_reservation:
rc = d->max_pages;
goto out;
}
- /* disallow a claim not exceeding current tot_pages or above max_pages */
- if ( (pages <= d->tot_pages) || (pages > d->max_pages) )
+ /* disallow a claim not exceeding domain_tot_pages() or above max_pages */
+ if ( (pages <= domain_tot_pages(d)) || (pages > d->max_pages) )
{
ret = -EINVAL;
goto out;
/*
* Note, if domain has already allocated memory before making a claim
- * then the claim must take tot_pages into account
+ * then the claim must take domain_tot_pages() into account
*/
- claim = pages - d->tot_pages;
+ claim = pages - domain_tot_pages(d);
if ( claim > avail_pages )
goto out;
if ( !(memflags & MEMF_no_refcount) )
{
- if ( unlikely((d->tot_pages + (1 << order)) > d->max_pages) )
+ unsigned int tot_pages = domain_tot_pages(d) + (1 << order);
+
+ if ( unlikely(tot_pages > d->max_pages) )
{
gprintk(XENLOG_INFO, "Over-allocation for domain %u: "
- "%u > %u\n", d->domain_id,
- d->tot_pages + (1 << order), d->max_pages);
+ "%u > %u\n", d->domain_id, tot_pages, d->max_pages);
rc = -E2BIG;
goto out;
}
*
* Note that a valid claim may be staked even after memory has been
* allocated for a domain. In this case, the claim is not incremental,
- * i.e. if the domain's tot_pages is 3, and a claim is staked for 10,
- * only 7 additional pages are claimed.
+ * i.e. if the domain's total page count is 3, and a claim is staked
+ * for 10, only 7 additional pages are claimed.
*
* Caller must be privileged or the hypercall fails.
*/
spinlock_t page_alloc_lock; /* protects all the following fields */
struct page_list_head page_list; /* linked list */
struct page_list_head xenpage_list; /* linked list (size xenheap_pages) */
- unsigned int tot_pages; /* number of pages currently possesed */
- unsigned int xenheap_pages; /* # pages allocated from Xen heap */
- unsigned int outstanding_pages; /* pages claimed but not possessed */
- unsigned int max_pages; /* maximum value for tot_pages */
- atomic_t shr_pages; /* number of shared pages */
- atomic_t paged_pages; /* number of paged-out pages */
+
+ /*
+ * This field should only be directly accessed by domain_adjust_tot_pages()
+ * and the domain_tot_pages() helper function defined below.
+ */
+ unsigned int tot_pages;
+
+ unsigned int xenheap_pages; /* pages allocated from Xen heap */
+ unsigned int outstanding_pages; /* pages claimed but not possessed */
+ unsigned int max_pages; /* maximum value for domain_tot_pages() */
+ atomic_t shr_pages; /* shared pages */
+ atomic_t paged_pages; /* paged-out pages */
/* Scheduling. */
void *sched_priv; /* scheduler-specific data */
#endif
};
+/* Return number of pages currently posessed by the domain */
+static inline unsigned int domain_tot_pages(const struct domain *d)
+{
+ return d->tot_pages;
+}
+
/* Protect updates/reads (resp.) of domain_list and domain_hash. */
extern spinlock_t domlist_update_lock;
extern rcu_read_lock_t domlist_read_lock;