Make the do_memory_op function accessible to tools linking with libxc.
Similar functions are already available for both domctl and sysctl. As part
of this patch we also change the input 'cmd' to be unsigned int to accurately
reflect what the hypervisor expects.
Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
Reviewed-by: Juergen Gross <jgross@suse.com>
Acked-by: Anthony PERARD <anthony.perard@citrix.com>
int xc_domctl(xc_interface *xch, struct xen_domctl *domctl);
int xc_sysctl(xc_interface *xch, struct xen_sysctl *sysctl);
+long xc_memory_op(xc_interface *xch, unsigned int cmd, void *arg, size_t len);
int xc_version(xc_interface *xch, int cmd, void *arg);
set_xen_guest_handle(fmap.map.buffer, entries);
- rc = do_memory_op(xch, XENMEM_set_memory_map, &fmap, sizeof(fmap));
+ rc = xc_memory_op(xch, XENMEM_set_memory_map, &fmap, sizeof(fmap));
xc_hypercall_bounce_post(xch, entries);
set_xen_guest_handle(memmap.buffer, entries);
- rc = do_memory_op(xch, XENMEM_machine_memory_map, &memmap, sizeof(memmap));
+ rc = xc_memory_op(xch, XENMEM_machine_memory_map, &memmap, sizeof(memmap));
xc_hypercall_bounce_post(xch, entries);
set_xen_guest_handle(xrdmmap.buffer, entries);
- rc = do_memory_op(xch, XENMEM_reserved_device_memory_map,
+ rc = xc_memory_op(xch, XENMEM_reserved_device_memory_map,
&xrdmmap, sizeof(xrdmmap));
xc_hypercall_bounce_post(xch, entries);
int xc_domain_maximum_gpfn(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns)
{
struct xen_memory_domain dom = { .domid = domid };
- long rc = do_memory_op(xch, XENMEM_maximum_gpfn, &dom, sizeof(dom));
+ long rc = xc_memory_op(xch, XENMEM_maximum_gpfn, &dom, sizeof(dom));
if ( rc >= 0 )
{
set_xen_guest_handle(reservation.extent_start, extent_start);
- err = do_memory_op(xch, XENMEM_increase_reservation, &reservation, sizeof(reservation));
+ err = xc_memory_op(xch, XENMEM_increase_reservation, &reservation, sizeof(reservation));
xc_hypercall_bounce_post(xch, extent_start);
}
set_xen_guest_handle(reservation.extent_start, extent_start);
- err = do_memory_op(xch, XENMEM_decrease_reservation, &reservation, sizeof(reservation));
+ err = xc_memory_op(xch, XENMEM_decrease_reservation, &reservation, sizeof(reservation));
xc_hypercall_bounce_post(xch, extent_start);
.idx = idx,
.gpfn = gpfn,
};
- return do_memory_op(xch, XENMEM_add_to_physmap, &xatp, sizeof(xatp));
+ return xc_memory_op(xch, XENMEM_add_to_physmap, &xatp, sizeof(xatp));
}
int xc_domain_add_to_physmap_batch(xc_interface *xch,
set_xen_guest_handle(xatp_batch.gpfns, gpfns);
set_xen_guest_handle(xatp_batch.errs, errs);
- rc = do_memory_op(xch, XENMEM_add_to_physmap_batch,
+ rc = xc_memory_op(xch, XENMEM_add_to_physmap_batch,
&xatp_batch, sizeof(xatp_batch));
out:
.domid = domid,
.gpfn = gpfn,
};
- return do_memory_op(xch, XENMEM_remove_from_physmap, &xrfp, sizeof(xrfp));
+ return xc_memory_op(xch, XENMEM_remove_from_physmap, &xrfp, sizeof(xrfp));
}
int xc_domain_claim_pages(xc_interface *xch,
set_xen_guest_handle(reservation.extent_start, HYPERCALL_BUFFER_NULL);
- err = do_memory_op(xch, XENMEM_claim_pages, &reservation, sizeof(reservation));
+ err = xc_memory_op(xch, XENMEM_claim_pages, &reservation, sizeof(reservation));
/* Ignore it if the hypervisor does not support the call. */
if (err == -1 && errno == ENOSYS)
err = errno = 0;
}
set_xen_guest_handle(reservation.extent_start, extent_start);
- err = do_memory_op(xch, XENMEM_populate_physmap, &reservation, sizeof(reservation));
+ err = xc_memory_op(xch, XENMEM_populate_physmap, &reservation, sizeof(reservation));
xc_hypercall_bounce_post(xch, extent_start);
return err;
set_xen_guest_handle(exchange.in.extent_start, in_extents);
set_xen_guest_handle(exchange.out.extent_start, out_extents);
- rc = do_memory_op(xch, XENMEM_exchange, &exchange, sizeof(exchange));
+ rc = xc_memory_op(xch, XENMEM_exchange, &exchange, sizeof(exchange));
out:
xc_hypercall_bounce_post(xch, in_extents);
.target_pages = target_pages
};
- err = do_memory_op(xch, op, &pod_target, sizeof(pod_target));
+ err = xc_memory_op(xch, op, &pod_target, sizeof(pod_target));
if ( err < 0 )
DPRINTF("Failed %s_pod_target dom %d\n",
vnuma_topo.domid = domid;
vnuma_topo.pad = 0;
- rc = do_memory_op(xch, XENMEM_get_vnumainfo, &vnuma_topo,
+ rc = xc_memory_op(xch, XENMEM_get_vnumainfo, &vnuma_topo,
sizeof(vnuma_topo));
*nr_vnodes = vnuma_topo.nr_vnodes;
.nr = nr
};
- return do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
+ return xc_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
}
int xc_set_mem_access_multi(xc_interface *xch,
set_xen_guest_handle(mao.pfn_list, pages);
set_xen_guest_handle(mao.access_list, access);
- rc = do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
+ rc = xc_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
xc_hypercall_bounce_post(xch, access);
xc_hypercall_bounce_post(xch, pages);
.pfn = pfn
};
- rc = do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
+ rc = xc_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
if ( rc == 0 )
*access = mao.access;
set_xen_guest_handle(mpo.buffer, buffer);
}
- rc = do_memory_op(xch, XENMEM_paging_op, &mpo, sizeof(mpo));
+ rc = xc_memory_op(xch, XENMEM_paging_op, &mpo, sizeof(mpo));
if ( buffer )
xc_hypercall_bounce_post(xch, buffer);
{
mso->domain = domid;
- return do_memory_op(xch, XENMEM_sharing_op, mso, sizeof(*mso));
+ return xc_memory_op(xch, XENMEM_sharing_op, mso, sizeof(*mso));
}
int xc_memshr_nominate_gfn(xc_interface *xch,
mso.op = XENMEM_sharing_op_audit;
- return do_memory_op(xch, XENMEM_sharing_op, &mso, sizeof(mso));
+ return xc_memory_op(xch, XENMEM_sharing_op, &mso, sizeof(mso));
}
long xc_sharing_freed_pages(xc_interface *xch)
{
- return do_memory_op(xch, XENMEM_get_sharing_freed_pages, NULL, 0);
+ return xc_memory_op(xch, XENMEM_get_sharing_freed_pages, NULL, 0);
}
long xc_sharing_used_frames(xc_interface *xch)
{
- return do_memory_op(xch, XENMEM_get_sharing_shared_pages, NULL, 0);
+ return xc_memory_op(xch, XENMEM_get_sharing_shared_pages, NULL, 0);
}
return flush_mmu_updates(xch, mmu);
}
-long do_memory_op(xc_interface *xch, int cmd, void *arg, size_t len)
+long xc_memory_op(xc_interface *xch, unsigned int cmd, void *arg, size_t len)
{
DECLARE_HYPERCALL_BOUNCE(arg, len, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
long ret = -1;
int xc_maximum_ram_page(xc_interface *xch, unsigned long *max_mfn)
{
- long rc = do_memory_op(xch, XENMEM_maximum_ram_page, NULL, 0);
+ long rc = xc_memory_op(xch, XENMEM_maximum_ram_page, NULL, 0);
if ( rc >= 0 )
{
}
set_xen_guest_handle(xmml.extent_start, extent_start);
- rc = do_memory_op(xch, XENMEM_machphys_mfn_list, &xmml, sizeof(xmml));
+ rc = xc_memory_op(xch, XENMEM_machphys_mfn_list, &xmml, sizeof(xmml));
if (rc || xmml.nr_extents != max_extents)
rc = -1;
else
return ret;
}
-long do_memory_op(xc_interface *xch, int cmd, void *arg, size_t len);
-
void *xc_map_foreign_ranges(xc_interface *xch, uint32_t dom,
size_t size, int prot, size_t chunksize,
privcmd_mmap_entry_t entries[], int nentries);
xc_dom_printf(xch, "%s: d%d: pfn=0x%"PRI_xen_pfn, __func__,
domid, scratch_gfn);
- rc = do_memory_op(xch, XENMEM_add_to_physmap, &xatp, sizeof(xatp));
+ rc = xc_memory_op(xch, XENMEM_add_to_physmap, &xatp, sizeof(xatp));
if ( rc != 0 )
{
xc_dom_panic(xch, XC_INTERNAL_ERROR,
xc_dom_panic(xch, XC_INTERNAL_ERROR,
"%s: failed to seed gnttab entries for d%d\n",
__func__, domid);
- (void) do_memory_op(xch, XENMEM_remove_from_physmap, &xrfp,
+ (void) xc_memory_op(xch, XENMEM_remove_from_physmap, &xrfp,
sizeof(xrfp));
return -1;
}
- rc = do_memory_op(xch, XENMEM_remove_from_physmap, &xrfp, sizeof(xrfp));
+ rc = xc_memory_op(xch, XENMEM_remove_from_physmap, &xrfp, sizeof(xrfp));
if (rc != 0)
{
xc_dom_panic(xch, XC_INTERNAL_ERROR,
.extent_start = { &ctx->x86.pv.compat_m2p_mfn0 },
};
- rc = do_memory_op(xch, XENMEM_machphys_compat_mfn_list,
+ rc = xc_memory_op(xch, XENMEM_machphys_compat_mfn_list,
&xmml, sizeof(xmml));
if ( rc || xmml.nr_extents != 1 )
{