All callers pass 0 in.
Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
Reviewed-by: Wei Liu <wl@xen.org>
Acked-by: George Dunlap <george.dunlap@citrix.com>
if ( npfec.write_access && (p2mt == p2m_ram_shared) )
{
ASSERT(p2m_is_hostp2m(p2m));
- sharing_enomem = mem_sharing_unshare_page(currd, gfn, 0);
+ sharing_enomem = mem_sharing_unshare_page(currd, gfn);
rc = 1;
goto out_put_gfn;
}
* Try to unshare. If we fail, communicate ENOMEM without
* sleeping.
*/
- if ( mem_sharing_unshare_page(p2m->domain, gfn_l, 0) < 0 )
+ if ( mem_sharing_unshare_page(p2m->domain, gfn_l) < 0 )
mem_sharing_notify_enomem(p2m->domain, gfn_l, false);
mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL);
}
{
/* Do an unshare to cleanly take care of all corner cases. */
int rc;
- rc = mem_sharing_unshare_page(p2m->domain,
- gfn_x(gfn_add(gfn, i)), 0);
+ rc = mem_sharing_unshare_page(p2m->domain, gfn_x(gfn_add(gfn, i)));
if ( rc )
{
p2m_unlock(p2m);
* might be the only one using this shared page, and we need to
* trigger proper cleanup. Once done, this is like any other page.
*/
- rc = mem_sharing_unshare_page(d, gmfn, 0);
+ rc = mem_sharing_unshare_page(d, gmfn);
if ( rc )
{
mem_sharing_notify_enomem(d, gmfn, false);
uint16_t flags);
static inline int mem_sharing_unshare_page(struct domain *d,
- unsigned long gfn,
- uint16_t flags)
+ unsigned long gfn)
{
- int rc = __mem_sharing_unshare_page(d, gfn, flags);
+ int rc = __mem_sharing_unshare_page(d, gfn, 0);
BUG_ON(rc && (rc != -ENOMEM));
return rc;
}
return 0;
}
-static inline int mem_sharing_unshare_page(struct domain *d, unsigned long gfn,
- uint16_t flags)
+static inline int mem_sharing_unshare_page(struct domain *d, unsigned long gfn)
{
ASSERT_UNREACHABLE();
return -EOPNOTSUPP;