static int hvm_alloc_ioreq_mfn(struct hvm_ioreq_server *s, bool buf)
{
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+ struct page_info *page;
if ( iorp->page )
{
* could fail if the emulating domain has already reached its
* maximum allocation.
*/
- iorp->page = alloc_domheap_page(s->emulator, MEMF_no_refcount);
+ page = alloc_domheap_page(s->emulator, MEMF_no_refcount);
- if ( !iorp->page )
+ if ( !page )
return -ENOMEM;
- if ( !get_page_type(iorp->page, PGT_writable_page) )
- goto fail1;
+ if ( !get_page_and_type(page, s->emulator, PGT_writable_page) )
+ {
+ /*
+ * The domain can't possibly know about this page yet, so failure
+ * here is a clear indication of something fishy going on.
+ */
+ domain_crash(s->emulator);
+ return -ENODATA;
+ }
- iorp->va = __map_domain_page_global(iorp->page);
+ iorp->va = __map_domain_page_global(page);
if ( !iorp->va )
- goto fail2;
+ goto fail;
+ iorp->page = page;
clear_page(iorp->va);
return 0;
- fail2:
- put_page_type(iorp->page);
-
- fail1:
- put_page(iorp->page);
- iorp->page = NULL;
+ fail:
+ if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
+ put_page(page);
+ put_page_and_type(page);
return -ENOMEM;
}
static void hvm_free_ioreq_mfn(struct hvm_ioreq_server *s, bool buf)
{
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+ struct page_info *page = iorp->page;
- if ( !iorp->page )
+ if ( !page )
return;
+ iorp->page = NULL;
+
unmap_domain_page_global(iorp->va);
iorp->va = NULL;
- put_page_and_type(iorp->page);
- iorp->page = NULL;
+ /*
+ * Check whether we need to clear the allocation reference before
+ * dropping the explicit references taken by get_page_and_type().
+ */
+ if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
+ put_page(page);
+
+ put_page_and_type(page);
}
bool is_ioreq_server_page(struct domain *d, const struct page_info *page)