ASSERT(paging_locked_by_me(d));
+ if ( unlikely(d->is_dying) )
+ return NULL;
+
pg = page_list_remove_head(&d->arch.paging.hap.freelist);
if ( unlikely(!pg) )
return NULL;
d->arch.paging.hap.p2m_pages++;
ASSERT(!page_get_owner(pg) && !(pg->count_info & PGC_count_mask));
}
- else if ( !d->arch.paging.p2m_alloc_failed )
+ else if ( !d->arch.paging.p2m_alloc_failed && !d->is_dying )
{
d->arch.paging.p2m_alloc_failed = 1;
dprintk(XENLOG_ERR, "d%i failed to allocate from HAP pool\n",
if ( d->arch.paging.shadow.free_pages >= pages )
return true;
+ if ( unlikely(d->is_dying) )
+ /* No reclaim when the domain is dying, teardown will take care of it. */
+ return false;
+
/* Shouldn't have enabled shadows if we've no vcpus. */
ASSERT(d->vcpu && d->vcpu[0]);
d->arch.paging.shadow.free_pages,
d->arch.paging.shadow.p2m_pages);
- ASSERT(d->is_dying);
+ ASSERT_UNREACHABLE();
flush_tlb_mask(d->dirty_cpumask);
* to avoid freeing shadows that the caller is currently working on. */
bool shadow_prealloc(struct domain *d, unsigned int type, unsigned int count)
{
- bool ret = _shadow_prealloc(d, shadow_size(type) * count);
+ bool ret;
- if ( !ret && !d->is_dying &&
- (!d->is_shutting_down || d->shutdown_code != SHUTDOWN_crash) )
+ if ( unlikely(d->is_dying) )
+ return false;
+
+ ret = _shadow_prealloc(d, shadow_size(type) * count);
+ if ( !ret && (!d->is_shutting_down || d->shutdown_code != SHUTDOWN_crash) )
/*
* Failing to allocate memory required for shadow usage can only result in
* a domain crash, do it here rather that relying on every caller to do it.
{
struct page_info *pg = NULL;
+ if ( unlikely(d->is_dying) )
+ return NULL;
+
/* This is called both from the p2m code (which never holds the
* paging lock) and the log-dirty code (which always does). */
paging_lock_recursive(d);