*/
continue;
- if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
- put_page(page);
-
+ put_page_alloc_ref(page);
put_page(page);
if ( hypercall_preempt_check() )
BUG();
}
- if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
- put_page(page);
+ put_page_alloc_ref(page);
/*
* Forcibly invalidate top-most, still valid page tables at this point
return 0;
fail:
- if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
- put_page(page);
+ put_page_alloc_ref(page);
put_page_and_type(page);
return -ENOMEM;
unmap_domain_page_global(iorp->va);
iorp->va = NULL;
- /*
- * Check whether we need to clear the allocation reference before
- * dropping the explicit references taken by get_page_and_type().
- */
- if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
- put_page(page);
-
+ put_page_alloc_ref(page);
put_page_and_type(page);
}
void free_shared_domheap_page(struct page_info *page)
{
- if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
- put_page(page);
+ put_page_alloc_ref(page);
if ( !test_and_clear_bit(_PGC_xen_heap, &page->count_info) )
ASSERT_UNREACHABLE();
page->u.inuse.type_info = 0;
mem_sharing_page_unlock(firstpg);
/* Free the client page */
- if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
- put_page(cpage);
+ put_page_alloc_ref(cpage);
put_page(cpage);
/* We managed to free a domain page. */
ret = -EOVERFLOW;
goto err_unlock;
}
- if ( test_and_clear_bit(_PGC_allocated, &cpage->count_info) )
- put_page(cpage);
+ put_page_alloc_ref(cpage);
put_page(cpage);
}
}
domain_crash(d);
return -EOVERFLOW;
}
- if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
- put_page(page);
+ put_page_alloc_ref(page);
put_page(page);
}
put_gfn(d, gfn);
if ( test_and_clear_bit(_PGT_pinned, &(page+i)->u.inuse.type_info) )
put_page_and_type(page + i);
- if ( test_and_clear_bit(_PGC_allocated, &(page+i)->count_info) )
- put_page(page + i);
-
+ put_page_alloc_ref(page + i);
put_page(page + i);
if ( preemptible && pod_target != p2m->pod.count &&
goto out_put;
/* Decrement guest domain's ref count of the page */
- if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
- put_page(page);
+ put_page_alloc_ref(page);
/* Remove mapping from p2m table */
ret = p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_4K,
}
BUG_ON(page_get_owner(pg) != d);
- if ( test_and_clear_bit(_PGC_allocated, &pg->count_info) )
- put_page(pg);
+ put_page_alloc_ref(pg);
if ( pg->count_info & ~PGC_xen_heap )
{
* For this purpose (and to match populate_physmap() behavior), the page
* is kept allocated.
*/
- if ( !rc && !is_domain_direct_mapped(d) &&
- test_and_clear_bit(_PGC_allocated, &page->count_info) )
- put_page(page);
+ if ( !rc && !is_domain_direct_mapped(d) )
+ put_page_alloc_ref(page);
put_page(page);
struct page_info *page = mfn_to_page(mfn_add(mfn, i));
BUG_ON(page_get_owner(page) != current->domain);
- if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
- put_page(page);
+ put_page_alloc_ref(page);
}
}
share_xen_page_with_guest(page, dom_xen, flags);
}
+static inline void put_page_alloc_ref(struct page_info *page)
+{
+ /*
+ * Whenever a page is assigned to a domain then the _PGC_allocated bit
+ * is set and the reference count is set to at least 1. This function
+ * clears that 'allocation reference' but it is unsafe to do so without
+ * the caller holding an additional reference. I.e. the allocation
+ * reference must never be the last reference held.
+ */
+ BUG_ON((page->count_info & PGC_count_mask) <= 1);
+ if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
+ put_page(page);
+}
+
#endif /* __XEN_MM_H__ */