]> xenbits.xensource.com Git - xen.git/commitdiff
libxc: add xc_domain_add_to_physmap_batch to wrap XENMEM_add_to_physmap_batch
authorZhongze Liu <blackskygg@gmail.com>
Thu, 22 Jun 2017 16:35:28 +0000 (00:35 +0800)
committerWei Liu <wei.liu2@citrix.com>
Thu, 29 Jun 2017 10:23:15 +0000 (11:23 +0100)
This is a preparation for the proposal "allow setting up shared memory areas
between VMs from xl config file". See:
V2: https://lists.xen.org/archives/html/xen-devel/2017-06/msg02256.html
V1: https://lists.xen.org/archives/html/xen-devel/2017-05/msg01288.html

The plan is to use XENMEM_add_to_physmap_batch in xl to map foregin pages from
one DomU to another so that the page could be shared. But currently there is no
wrapper for XENMEM_add_to_physmap_batch in libxc, so we just add a wrapper for
it.

Signed-off-by: Zhongze Liu <blackskygg@gmail.com>
Acked-by: Wei Liu <wei.liu2@citrix.com>
tools/libxc/include/xenctrl.h
tools/libxc/xc_domain.c

index 8c26cb4141fe22b6037c9a7745bc86b879c64a9b..0ee6331c412e1ed9560a214a6653a639ddde9dde 100644 (file)
@@ -1372,6 +1372,15 @@ int xc_domain_add_to_physmap(xc_interface *xch,
                              unsigned long idx,
                              xen_pfn_t gpfn);
 
+int xc_domain_add_to_physmap_batch(xc_interface *xch,
+                                   domid_t domid,
+                                   domid_t foreign_domid,
+                                   unsigned int space,
+                                   unsigned int size,
+                                   xen_ulong_t *idxs,
+                                   xen_pfn_t *gfpns,
+                                   int *errs);
+
 int xc_domain_populate_physmap(xc_interface *xch,
                                uint32_t domid,
                                unsigned long nr_extents,
index 5d192ea0e49ebc0b61023eeee4fb9a8833bba72a..3bab4e8bab4907bf2b282cb4ec7641057a6a30a6 100644 (file)
@@ -1032,6 +1032,51 @@ int xc_domain_add_to_physmap(xc_interface *xch,
     return do_memory_op(xch, XENMEM_add_to_physmap, &xatp, sizeof(xatp));
 }
 
+int xc_domain_add_to_physmap_batch(xc_interface *xch,
+                                   domid_t domid,
+                                   domid_t foreign_domid,
+                                   unsigned int space,
+                                   unsigned int size,
+                                   xen_ulong_t *idxs,
+                                   xen_pfn_t *gpfns,
+                                   int *errs)
+{
+    int rc;
+    DECLARE_HYPERCALL_BOUNCE(idxs, size * sizeof(*idxs), XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    DECLARE_HYPERCALL_BOUNCE(gpfns, size * sizeof(*gpfns), XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    DECLARE_HYPERCALL_BOUNCE(errs, size * sizeof(*errs), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    struct xen_add_to_physmap_batch xatp_batch = {
+        .domid = domid,
+        .space = space,
+        .size = size,
+        .u = { .foreign_domid = foreign_domid }
+    };
+
+    if ( xc_hypercall_bounce_pre(xch, idxs)  ||
+         xc_hypercall_bounce_pre(xch, gpfns) ||
+         xc_hypercall_bounce_pre(xch, errs)  )
+    {
+        PERROR("Could not bounce memory for XENMEM_add_to_physmap_batch");
+        rc = -1;
+        goto out;
+    }
+
+    set_xen_guest_handle(xatp_batch.idxs, idxs);
+    set_xen_guest_handle(xatp_batch.gpfns, gpfns);
+    set_xen_guest_handle(xatp_batch.errs, errs);
+
+    rc = do_memory_op(xch, XENMEM_add_to_physmap_batch,
+                      &xatp_batch, sizeof(xatp_batch));
+
+out:
+    xc_hypercall_bounce_post(xch, idxs);
+    xc_hypercall_bounce_post(xch, gpfns);
+    xc_hypercall_bounce_post(xch, errs);
+
+    return rc;
+}
+
 int xc_domain_claim_pages(xc_interface *xch,
                                uint32_t domid,
                                unsigned long nr_pages)