{
unsigned long gfn = (unsigned long)(addr >> PAGE_SHIFT);
struct domain *d = current->domain;
+ int rc;
if ( new_addr != 0 || (flags & GNTMAP_contains_pte) )
return GNTST_general_error;
- guest_physmap_remove_page(d, gfn, mfn, 0);
+ rc = guest_physmap_remove_page(d, gfn, mfn, 0);
- return GNTST_okay;
+ return rc ? GNTST_general_error : GNTST_okay;
}
int is_iomem_page(unsigned long mfn)
pfn_to_paddr(mfn), MATTR_MEM, t);
}
-void guest_physmap_remove_page(struct domain *d,
- unsigned long gpfn,
- unsigned long mfn, unsigned int page_order)
+int guest_physmap_remove_page(struct domain *d, unsigned long gfn,
+ unsigned long mfn, unsigned int page_order)
{
- apply_p2m_changes(d, REMOVE,
- pfn_to_paddr(gpfn),
- pfn_to_paddr(gpfn + (1<<page_order)),
- pfn_to_paddr(mfn), MATTR_MEM, p2m_invalid);
+ return apply_p2m_changes(d, REMOVE,
+ pfn_to_paddr(gfn),
+ pfn_to_paddr(gfn + (1 << page_order)),
+ pfn_to_paddr(mfn), MATTR_MEM, p2m_invalid);
}
int p2m_alloc_table(struct domain *d)
static void hvm_remove_ioreq_gmfn(
struct domain *d, struct hvm_ioreq_page *iorp)
{
- guest_physmap_remove_page(d, iorp->gmfn,
- page_to_mfn(iorp->page), 0);
+ if ( guest_physmap_remove_page(d, iorp->gmfn,
+ page_to_mfn(iorp->page), 0) )
+ domain_crash(d);
clear_page(iorp->va);
}
type, mfn_x(old_mfn), frame);
return GNTST_general_error;
}
- guest_physmap_remove_page(d, gfn, frame, PAGE_ORDER_4K);
+ if ( guest_physmap_remove_page(d, gfn, frame, PAGE_ORDER_4K) )
+ {
+ put_gfn(d, gfn);
+ return GNTST_general_error;
+ }
put_gfn(d, gfn);
return GNTST_okay;
struct page_info *page = NULL;
unsigned long gfn = 0; /* gcc ... */
unsigned long prev_mfn, mfn = 0, old_gpfn;
- int rc;
+ int rc = 0;
p2m_type_t p2mt;
switch ( space )
{
if ( is_xen_heap_mfn(prev_mfn) )
/* Xen heap frames are simply unhooked from this phys slot. */
- guest_physmap_remove_page(d, gpfn, prev_mfn, PAGE_ORDER_4K);
+ rc = guest_physmap_remove_page(d, gpfn, prev_mfn, PAGE_ORDER_4K);
else
/* Normal domain memory is freed, to avoid leaking memory. */
- guest_remove_page(d, gpfn);
+ rc = guest_remove_page(d, gpfn);
}
/* In the XENMAPSPACE_gmfn case we still hold a ref on the old page. */
put_gfn(d, gpfn);
+ if ( rc )
+ goto put_both;
+
/* Unmap from old location, if any. */
old_gpfn = get_gpfn_from_mfn(mfn);
ASSERT( old_gpfn != SHARED_M2P_ENTRY );
if ( space == XENMAPSPACE_gmfn || space == XENMAPSPACE_gmfn_range )
ASSERT( old_gpfn == gfn );
if ( old_gpfn != INVALID_M2P_ENTRY )
- guest_physmap_remove_page(d, old_gpfn, mfn, PAGE_ORDER_4K);
+ rc = guest_physmap_remove_page(d, old_gpfn, mfn, PAGE_ORDER_4K);
/* Map at new location. */
- rc = guest_physmap_add_page(d, gpfn, mfn, PAGE_ORDER_4K);
+ if ( !rc )
+ rc = guest_physmap_add_page(d, gpfn, mfn, PAGE_ORDER_4K);
+ put_both:
/* In the XENMAPSPACE_gmfn, we took a ref of the gfn at the top */
if ( space == XENMAPSPACE_gmfn || space == XENMAPSPACE_gmfn_range )
put_gfn(d, gfn);
p2m->default_access);
}
-void
+int
guest_physmap_remove_page(struct domain *d, unsigned long gfn,
unsigned long mfn, unsigned int page_order)
{
+ int rc;
+
struct p2m_domain *p2m = p2m_get_hostp2m(d);
gfn_lock(p2m, gfn, page_order);
- p2m_remove_page(p2m, gfn, mfn, page_order);
+ rc = p2m_remove_page(p2m, gfn, mfn, page_order);
gfn_unlock(p2m, gfn, page_order);
+
+ return rc;
}
int
{
if ( is_xen_heap_mfn(prev_mfn) )
/* Xen heap frames are simply unhooked from this phys slot */
- guest_physmap_remove_page(tdom, gpfn, prev_mfn, 0);
+ rc = guest_physmap_remove_page(tdom, gpfn, prev_mfn, 0);
else
/* Normal domain memory is freed, to avoid leaking memory. */
- guest_remove_page(tdom, gpfn);
+ rc = guest_remove_page(tdom, gpfn);
+ if ( rc )
+ goto put_both;
}
/*
* Create the new mapping. Can't use guest_physmap_add_page() because it
"gpfn:%lx mfn:%lx fgfn:%lx td:%d fd:%d\n",
gpfn, mfn, fgfn, tdom->domain_id, fdom->domain_id);
+ put_both:
put_page(page);
/*
for ( i = 0; i < count; i++ )
{
bool_t okay;
+ int rc;
if (i && hypercall_preempt_check())
return i;
goto copyback;
}
- guest_physmap_remove_page(d, gop.mfn, mfn, 0);
+ rc = guest_physmap_remove_page(d, gop.mfn, mfn, 0);
gnttab_flush_tlb(d);
+ if ( rc )
+ {
+ gdprintk(XENLOG_INFO,
+ "gnttab_transfer: can't remove GFN %"PRI_xen_pfn" (MFN %lx)\n",
+ gop.mfn, mfn);
+ gop.status = GNTST_general_error;
+ goto put_gfn_and_copyback;
+ }
/* Find the target domain. */
if ( unlikely((e = rcu_lock_domain_by_id(gop.domid)) == NULL) )
{
- put_gfn(d, gop.mfn);
gdprintk(XENLOG_INFO, "gnttab_transfer: can't find domain %d\n",
gop.domid);
- page->count_info &= ~(PGC_count_mask|PGC_allocated);
- free_domheap_page(page);
gop.status = GNTST_bad_domain;
- goto copyback;
+ goto put_gfn_and_copyback;
}
if ( xsm_grant_transfer(XSM_HOOK, d, e) )
{
- put_gfn(d, gop.mfn);
gop.status = GNTST_permission_denied;
unlock_and_copyback:
rcu_unlock_domain(e);
+ put_gfn_and_copyback:
+ put_gfn(d, gop.mfn);
page->count_info &= ~(PGC_count_mask|PGC_allocated);
free_domheap_page(page);
goto copyback;
"Transferee (d%d) has no headroom (tot %u, max %u)\n",
e->domain_id, e->tot_pages, e->max_pages);
- rcu_unlock_domain(e);
- put_gfn(d, gop.mfn);
- page->count_info &= ~(PGC_count_mask|PGC_allocated);
- free_domheap_page(page);
gop.status = GNTST_general_error;
- goto copyback;
+ goto unlock_and_copyback;
}
/* Okay, add the page to 'e'. */
if ( drop_dom_ref )
put_domain(e);
- rcu_unlock_domain(e);
-
- put_gfn(d, gop.mfn);
- page->count_info &= ~(PGC_count_mask|PGC_allocated);
- free_domheap_page(page);
gop.status = GNTST_general_error;
- goto copyback;
+ goto unlock_and_copyback;
}
page_list_add_tail(page, &e->page_list);
mfn = mfn_x(get_gfn_query(d, gmfn, &p2mt));
if ( unlikely(p2m_is_paging(p2mt)) )
{
- guest_physmap_remove_page(d, gmfn, mfn, 0);
+ rc = guest_physmap_remove_page(d, gmfn, mfn, 0);
put_gfn(d, gmfn);
+
+ if ( rc )
+ return rc;
+
/* If the page hasn't yet been paged out, there is an
* actual page that needs to be released. */
if ( p2mt == p2m_ram_paging_out )
return -ENXIO;
}
- if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
+ rc = guest_physmap_remove_page(d, gmfn, mfn, 0);
+
+ if ( !rc && test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
put_page_and_type(page);
- if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
+ if ( !rc && test_and_clear_bit(_PGC_allocated, &page->count_info) )
put_page(page);
- guest_physmap_remove_page(d, gmfn, mfn, 0);
-
put_page(page);
put_gfn(d, gmfn);
- return 0;
+ return rc;
}
static void decrease_reservation(struct memop_args *a)
gfn = mfn_to_gmfn(d, mfn);
/* Pages were unshared above */
BUG_ON(SHARED_M2P(gfn));
- guest_physmap_remove_page(d, gfn, mfn, 0);
+ if ( guest_physmap_remove_page(d, gfn, mfn, 0) )
+ domain_crash(d);
put_page(page);
}
page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
if ( page )
{
- guest_physmap_remove_page(d, xrfp.gpfn, page_to_mfn(page), 0);
+ rc = guest_physmap_remove_page(d, xrfp.gpfn, page_to_mfn(page), 0);
put_page(page);
}
else
if ( !is_domain_direct_mapped(d) )
return -EINVAL;
- guest_physmap_remove_page(d, gfn, gfn, 0);
-
- return 0;
+ return guest_physmap_remove_page(d, gfn, gfn, 0);
}
static const struct iommu_ops arm_smmu_iommu_ops = {
return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
}
-void guest_physmap_remove_page(struct domain *d,
- unsigned long gpfn,
- unsigned long mfn, unsigned int page_order);
-
unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn);
/*
return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
}
-/* Remove a page from a domain's p2m table */
-void guest_physmap_remove_page(struct domain *d,
- unsigned long gfn,
- unsigned long mfn, unsigned int page_order);
-
/* Set a p2m range as populate-on-demand */
int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
unsigned int order);
unsigned long idx, xen_pfn_t gpfn);
/* Returns 0 on success, or negative on error. */
-int guest_remove_page(struct domain *d, unsigned long gmfn);
+int __must_check guest_remove_page(struct domain *d, unsigned long gmfn);
#define RAM_TYPE_CONVENTIONAL 0x00000001
#define RAM_TYPE_RESERVED 0x00000002
#ifndef _XEN_P2M_COMMON_H
#define _XEN_P2M_COMMON_H
+#include <xen/mm.h>
#include <public/mem_event.h>
/*
/* NOTE: Assumed to be only 4 bits right now on x86. */
} p2m_access_t;
+/* Remove a page from a domain's p2m table */
+int __must_check
+guest_physmap_remove_page(struct domain *d, unsigned long gfn,
+ unsigned long mfn, unsigned int page_order);
+
/* Map MMIO regions in the p2m: start_gfn and nr describe the range in
* * the guest physical address space to map, starting from the machine
* * frame number mfn. */