{
if ( d->tot_pages >= d->max_pages )
goto fail;
- d->tot_pages++;
+ domain_adjust_tot_pages(d, 1);
}
page->count_info = PGC_allocated | 1;
} while ( (y = cmpxchg(&page->count_info, x, x | 1)) != x );
/* Unlink from original owner. */
- if ( !(memflags & MEMF_no_refcount) && !--d->tot_pages )
+ if ( !(memflags & MEMF_no_refcount) && !domain_adjust_tot_pages(d, -1) )
drop_dom_ref = 1;
page_list_del(page, &d->page_list);
}
page_set_owner(page, dom_cow);
- d->tot_pages--;
+ domain_adjust_tot_pages(d, -1);
drop_dom_ref = (d->tot_pages == 0);
page_list_del(page, &d->page_list);
spin_unlock(&d->page_alloc_lock);
ASSERT(page_get_owner(page) == dom_cow);
page_set_owner(page, d);
- if ( d->tot_pages++ == 0 )
+ if ( domain_adjust_tot_pages(d, 1) == 1 )
get_domain(d);
page_list_add_tail(page, &d->page_list);
spin_unlock(&d->page_alloc_lock);
}
/* Okay, add the page to 'e'. */
- if ( unlikely(e->tot_pages++ == 0) )
+ if ( unlikely(domain_adjust_tot_pages(e, 1) == 1) )
get_knownalive_domain(e);
page_list_add_tail(page, &e->page_list);
page_set_owner(page, e);
(j * (1UL << exch.out.extent_order)));
spin_lock(&d->page_alloc_lock);
- d->tot_pages -= dec_count;
+ domain_adjust_tot_pages(d, -dec_count);
drop_dom_ref = (dec_count && !d->tot_pages);
spin_unlock(&d->page_alloc_lock);
static DEFINE_SPINLOCK(heap_lock);
+unsigned long domain_adjust_tot_pages(struct domain *d, long pages)
+{
+ ASSERT(spin_is_locked(&d->page_alloc_lock));
+ return d->tot_pages += pages;
+}
+
static unsigned long init_node_heap(int node, unsigned long mfn,
unsigned long nr, bool_t *use_tail)
{
if ( unlikely(d->tot_pages == 0) )
get_knownalive_domain(d);
- d->tot_pages += 1 << order;
+ domain_adjust_tot_pages(d, 1 << order);
}
for ( i = 0; i < (1 << order); i++ )
page_list_del2(&pg[i], &d->page_list, &d->arch.relmem_list);
}
- d->tot_pages -= 1 << order;
+ domain_adjust_tot_pages(d, -(1 << order));
drop_dom_ref = (d->tot_pages == 0);
spin_unlock_recursive(&d->page_alloc_lock);
#define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
#define free_xenheap_page(v) (free_xenheap_pages(v,0))
+unsigned long domain_adjust_tot_pages(struct domain *d, long pages);
+
/* Domain suballocator. These functions are *not* interrupt-safe.*/
void init_domheap_pages(paddr_t ps, paddr_t pe);
struct page_info *alloc_domheap_pages(