--- /dev/null
+/******************************************************************************
+ * xc_cpupool.c
+ *
+ * API for manipulating and obtaining information on cpupools.
+ *
+ * Copyright (c) 2009, J Gross.
+ */
+
+#include <stdarg.h>
+#include "xc_private.h"
+
+int xc_cpupool_create(int xc_handle,
+ uint32_t *ppoolid,
+ uint32_t sched_id)
+{
+ int err;
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_CREATE;
+ domctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ?
+ XEN_DOMCTL_CPUPOOL_PAR_ANY : *ppoolid;
+ domctl.u.cpupool_op.sched_id = sched_id;
+ if ( (err = do_domctl_save(xc_handle, &domctl)) != 0 )
+ return err;
+
+ *ppoolid = domctl.u.cpupool_op.cpupool_id;
+ return 0;
+}
+
+int xc_cpupool_destroy(int xc_handle,
+ uint32_t poolid)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_DESTROY;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_getinfo(int xc_handle,
+ uint32_t first_poolid,
+ uint32_t n_max,
+ xc_cpupoolinfo_t *info)
+{
+ int err = 0;
+ int p;
+ uint32_t poolid = first_poolid;
+ uint8_t local[sizeof (info->cpumap)];
+ DECLARE_DOMCTL;
+
+ memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
+
+ for (p = 0; p < n_max; p++)
+ {
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_INFO;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
+ domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+
+ if ( (err = lock_pages(local, sizeof(local))) != 0 )
+ {
+ PERROR("Could not lock memory for Xen hypercall");
+ break;
+ }
+ err = do_domctl_save(xc_handle, &domctl);
+ unlock_pages(local, sizeof (local));
+
+ if ( err < 0 )
+ break;
+
+ info->cpupool_id = domctl.u.cpupool_op.cpupool_id;
+ info->sched_id = domctl.u.cpupool_op.sched_id;
+ info->n_dom = domctl.u.cpupool_op.n_dom;
+ bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
+ poolid = domctl.u.cpupool_op.cpupool_id + 1;
+ info++;
+ }
+
+ if ( p == 0 )
+ return err;
+
+ return p;
+}
+
+int xc_cpupool_addcpu(int xc_handle,
+ uint32_t poolid,
+ int cpu)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_ADDCPU;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
+ return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_removecpu(int xc_handle,
+ uint32_t poolid,
+ int cpu)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_RMCPU;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
+ return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_movedomain(int xc_handle,
+ uint32_t poolid,
+ uint32_t domid)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ domctl.u.cpupool_op.domid = domid;
+ return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_freeinfo(int xc_handle,
+ uint64_t *cpumap)
+{
+ int err;
+ uint8_t local[sizeof (*cpumap)];
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_FREEINFO;
+ set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
+ domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+
+ if ( (err = lock_pages(local, sizeof(local))) != 0 )
+ {
+ PERROR("Could not lock memory for Xen hypercall");
+ return err;
+ }
+
+ err = do_domctl_save(xc_handle, &domctl);
+ unlock_pages(local, sizeof (local));
+
+ if (err < 0)
+ return err;
+
+ bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
+
+ return 0;
+}
unsigned int nr_online_vcpus;
unsigned int max_vcpu_id;
xen_domain_handle_t handle;
+ unsigned int cpupool;
} xc_dominfo_t;
typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
uint32_t domid,
unsigned int enable);
+/*
+ * CPUPOOL MANAGEMENT FUNCTIONS
+ */
+
+typedef struct xc_cpupoolinfo {
+ uint32_t cpupool_id;
+ uint32_t sched_id;
+ uint32_t n_dom;
+ uint64_t cpumap;
+} xc_cpupoolinfo_t;
+
+/**
+ * Create a new cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm ppoolid pointer to the new cpupool id (in/out)
+ * @parm sched_id id of scheduler to use for pool
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_create(int xc_handle,
+ uint32_t *ppoolid,
+ uint32_t sched_id);
+
+/**
+ * Destroy a cpupool. Pool must be unused and have no cpu assigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool to destroy
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_destroy(int xc_handle,
+ uint32_t poolid);
+
+/**
+ * Get cpupool info. Returns info for up to the specified number of cpupools
+ * starting at the given id.
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm first_poolid lowest id for which info is returned
+ * @parm n_max maximum number of cpupools to return info
+ * @parm info pointer to xc_cpupoolinfo_t array
+ * return number of cpupool infos
+ */
+int xc_cpupool_getinfo(int xc_handle,
+ uint32_t first_poolid,
+ uint32_t n_max,
+ xc_cpupoolinfo_t *info);
+
+/**
+ * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to add
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_addcpu(int xc_handle,
+ uint32_t poolid,
+ int cpu);
+
+/**
+ * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to remove
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_removecpu(int xc_handle,
+ uint32_t poolid,
+ int cpu);
+
+/**
+ * Move domain to another cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the destination cpupool
+ * @parm domid id of the domain to move
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_movedomain(int xc_handle,
+ uint32_t poolid,
+ uint32_t domid);
+
+/**
+ * Return map of cpus not in any cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm cpumap pointer where to store the cpumap
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_freeinfo(int xc_handle,
+ uint64_t *cpumap);
+
+
/*
* EVENT CHANNEL FUNCTIONS
*/