#include <stdarg.h>
#include "xc_private.h"
+static int do_sysctl_save(int xc_handle, struct xen_sysctl *sysctl)
+{
+ int ret;
+
+ do {
+ ret = do_sysctl(xc_handle, sysctl);
+ } while ( (ret < 0) && (errno == EAGAIN) );
+
+ return ret;
+}
+
int xc_cpupool_create(int xc_handle,
uint32_t *ppoolid,
uint32_t sched_id)
{
int err;
- DECLARE_DOMCTL;
-
- domctl.cmd = XEN_DOMCTL_cpupool_op;
- domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_CREATE;
- domctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ?
- XEN_DOMCTL_CPUPOOL_PAR_ANY : *ppoolid;
- domctl.u.cpupool_op.sched_id = sched_id;
- if ( (err = do_domctl_save(xc_handle, &domctl)) != 0 )
+ DECLARE_SYSCTL;
+
+ sysctl.cmd = XEN_SYSCTL_cpupool_op;
+ sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_CREATE;
+ sysctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ?
+ XEN_SYSCTL_CPUPOOL_PAR_ANY : *ppoolid;
+ sysctl.u.cpupool_op.sched_id = sched_id;
+ if ( (err = do_sysctl_save(xc_handle, &sysctl)) != 0 )
return err;
- *ppoolid = domctl.u.cpupool_op.cpupool_id;
+ *ppoolid = sysctl.u.cpupool_op.cpupool_id;
return 0;
}
int xc_cpupool_destroy(int xc_handle,
uint32_t poolid)
{
- DECLARE_DOMCTL;
+ DECLARE_SYSCTL;
- domctl.cmd = XEN_DOMCTL_cpupool_op;
- domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_DESTROY;
- domctl.u.cpupool_op.cpupool_id = poolid;
- return do_domctl_save(xc_handle, &domctl);
+ sysctl.cmd = XEN_SYSCTL_cpupool_op;
+ sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_DESTROY;
+ sysctl.u.cpupool_op.cpupool_id = poolid;
+ return do_sysctl_save(xc_handle, &sysctl);
}
int xc_cpupool_getinfo(int xc_handle,
int p;
uint32_t poolid = first_poolid;
uint8_t local[sizeof (info->cpumap)];
- DECLARE_DOMCTL;
+ DECLARE_SYSCTL;
memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
for (p = 0; p < n_max; p++)
{
- domctl.cmd = XEN_DOMCTL_cpupool_op;
- domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_INFO;
- domctl.u.cpupool_op.cpupool_id = poolid;
- set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
- domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+ sysctl.cmd = XEN_SYSCTL_cpupool_op;
+ sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
+ sysctl.u.cpupool_op.cpupool_id = poolid;
+ set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+ sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
if ( (err = lock_pages(local, sizeof(local))) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
break;
}
- err = do_domctl_save(xc_handle, &domctl);
+ err = do_sysctl_save(xc_handle, &sysctl);
unlock_pages(local, sizeof (local));
if ( err < 0 )
break;
- info->cpupool_id = domctl.u.cpupool_op.cpupool_id;
- info->sched_id = domctl.u.cpupool_op.sched_id;
- info->n_dom = domctl.u.cpupool_op.n_dom;
+ info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
+ info->sched_id = sysctl.u.cpupool_op.sched_id;
+ info->n_dom = sysctl.u.cpupool_op.n_dom;
bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
- poolid = domctl.u.cpupool_op.cpupool_id + 1;
+ poolid = sysctl.u.cpupool_op.cpupool_id + 1;
info++;
}
uint32_t poolid,
int cpu)
{
- DECLARE_DOMCTL;
+ DECLARE_SYSCTL;
- domctl.cmd = XEN_DOMCTL_cpupool_op;
- domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_ADDCPU;
- domctl.u.cpupool_op.cpupool_id = poolid;
- domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
- return do_domctl_save(xc_handle, &domctl);
+ sysctl.cmd = XEN_SYSCTL_cpupool_op;
+ sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_ADDCPU;
+ sysctl.u.cpupool_op.cpupool_id = poolid;
+ sysctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_SYSCTL_CPUPOOL_PAR_ANY : cpu;
+ return do_sysctl_save(xc_handle, &sysctl);
}
int xc_cpupool_removecpu(int xc_handle,
uint32_t poolid,
int cpu)
{
- DECLARE_DOMCTL;
+ DECLARE_SYSCTL;
- domctl.cmd = XEN_DOMCTL_cpupool_op;
- domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_RMCPU;
- domctl.u.cpupool_op.cpupool_id = poolid;
- domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
- return do_domctl_save(xc_handle, &domctl);
+ sysctl.cmd = XEN_SYSCTL_cpupool_op;
+ sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_RMCPU;
+ sysctl.u.cpupool_op.cpupool_id = poolid;
+ sysctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_SYSCTL_CPUPOOL_PAR_ANY : cpu;
+ return do_sysctl_save(xc_handle, &sysctl);
}
int xc_cpupool_movedomain(int xc_handle,
uint32_t poolid,
uint32_t domid)
{
- DECLARE_DOMCTL;
+ DECLARE_SYSCTL;
- domctl.cmd = XEN_DOMCTL_cpupool_op;
- domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN;
- domctl.u.cpupool_op.cpupool_id = poolid;
- domctl.u.cpupool_op.domid = domid;
- return do_domctl_save(xc_handle, &domctl);
+ sysctl.cmd = XEN_SYSCTL_cpupool_op;
+ sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN;
+ sysctl.u.cpupool_op.cpupool_id = poolid;
+ sysctl.u.cpupool_op.domid = domid;
+ return do_sysctl_save(xc_handle, &sysctl);
}
int xc_cpupool_freeinfo(int xc_handle,
{
int err;
uint8_t local[sizeof (*cpumap)];
- DECLARE_DOMCTL;
+ DECLARE_SYSCTL;
- domctl.cmd = XEN_DOMCTL_cpupool_op;
- domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_FREEINFO;
- set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
- domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+ sysctl.cmd = XEN_SYSCTL_cpupool_op;
+ sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
+ set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+ sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
if ( (err = lock_pages(local, sizeof(local))) != 0 )
{
return err;
}
- err = do_domctl_save(xc_handle, &domctl);
+ err = do_sysctl_save(xc_handle, &sysctl);
unlock_pages(local, sizeof (local));
if (err < 0)
}
/*
- * do cpupool related domctl operations
+ * do cpupool related sysctl operations
*/
-int cpupool_do_domctl(struct xen_domctl_cpupool_op *op)
+int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
{
int ret;
struct cpupool *c;
switch ( op->op )
{
- case XEN_DOMCTL_CPUPOOL_OP_CREATE:
+ case XEN_SYSCTL_CPUPOOL_OP_CREATE:
{
int poolid;
const struct scheduler *sched;
- poolid = (op->cpupool_id == XEN_DOMCTL_CPUPOOL_PAR_ANY) ?
+ poolid = (op->cpupool_id == XEN_SYSCTL_CPUPOOL_PAR_ANY) ?
CPUPOOLID_NONE: op->cpupool_id;
sched = scheduler_get_by_id(op->sched_id);
ret = -ENOENT;
}
break;
- case XEN_DOMCTL_CPUPOOL_OP_DESTROY:
+ case XEN_SYSCTL_CPUPOOL_OP_DESTROY:
{
spin_lock(&cpupool_lock);
c = cpupool_find_by_id(op->cpupool_id, 1);
}
break;
- case XEN_DOMCTL_CPUPOOL_OP_INFO:
+ case XEN_SYSCTL_CPUPOOL_OP_INFO:
{
spin_lock(&cpupool_lock);
c = cpupool_find_by_id(op->cpupool_id, 0);
}
break;
- case XEN_DOMCTL_CPUPOOL_OP_ADDCPU:
+ case XEN_SYSCTL_CPUPOOL_OP_ADDCPU:
{
unsigned cpu;
printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d)\n",
op->cpupool_id, cpu);
spin_lock(&cpupool_lock);
- if ( cpu == XEN_DOMCTL_CPUPOOL_PAR_ANY )
+ if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
cpu = first_cpu(cpupool_free_cpus);
ret = -EINVAL;
if ( cpu >= NR_CPUS )
}
break;
- case XEN_DOMCTL_CPUPOOL_OP_RMCPU:
+ case XEN_SYSCTL_CPUPOOL_OP_RMCPU:
{
unsigned cpu;
if ( c == NULL )
break;
cpu = op->cpu;
- if ( cpu == XEN_DOMCTL_CPUPOOL_PAR_ANY )
+ if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
cpu = last_cpu(c->cpu_valid);
ret = -EINVAL;
if ( cpu >= NR_CPUS )
}
break;
- case XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN:
+ case XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN:
{
struct domain *d;
}
break;
- case XEN_DOMCTL_CPUPOOL_OP_FREEINFO:
+ case XEN_SYSCTL_CPUPOOL_OP_FREEINFO:
{
cpumask_to_xenctl_cpumap(&(op->cpumap),
&cpupool_free_cpus);
typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t);
-/*
- * cpupool operations
- */
-/* XEN_DOMCTL_cpupool_op */
-#define XEN_DOMCTL_CPUPOOL_OP_CREATE 1 /* C */
-#define XEN_DOMCTL_CPUPOOL_OP_DESTROY 2 /* D */
-#define XEN_DOMCTL_CPUPOOL_OP_INFO 3 /* I */
-#define XEN_DOMCTL_CPUPOOL_OP_ADDCPU 4 /* A */
-#define XEN_DOMCTL_CPUPOOL_OP_RMCPU 5 /* R */
-#define XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN 6 /* M */
-#define XEN_DOMCTL_CPUPOOL_OP_FREEINFO 7 /* F */
-#define XEN_DOMCTL_CPUPOOL_PAR_ANY 0xFFFFFFFF
-struct xen_domctl_cpupool_op {
- uint32_t op; /* IN */
- uint32_t cpupool_id; /* IN: CDIARM OUT: CI */
- uint32_t sched_id; /* IN: C OUT: I */
- uint32_t domid; /* IN: M */
- uint32_t cpu; /* IN: AR */
- uint32_t n_dom; /* OUT: I */
- struct xenctl_cpumap cpumap; /* OUT: IF */
-};
-typedef struct xen_domctl_cpupool_op xen_domctl_cpupool_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpupool_op_t);
-
-
struct xen_domctl {
uint32_t cmd;
#define XEN_DOMCTL_createdomain 1
#define XEN_DOMCTL_gettscinfo 59
#define XEN_DOMCTL_settscinfo 60
#define XEN_DOMCTL_getpageframeinfo3 61
-#define XEN_DOMCTL_cpupool_op 62
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
struct xen_domctl_debug_op debug_op;
struct xen_domctl_mem_event_op mem_event_op;
struct xen_domctl_mem_sharing_op mem_sharing_op;
- struct xen_domctl_cpupool_op cpupool_op;
#if defined(__i386__) || defined(__x86_64__)
struct xen_domctl_cpuid cpuid;
#endif
typedef struct xen_sysctl_numainfo xen_sysctl_numainfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_numainfo_t);
+#define XEN_SYSCTL_cpupool_op 18
+/* XEN_SYSCTL_cpupool_op */
+#define XEN_SYSCTL_CPUPOOL_OP_CREATE 1 /* C */
+#define XEN_SYSCTL_CPUPOOL_OP_DESTROY 2 /* D */
+#define XEN_SYSCTL_CPUPOOL_OP_INFO 3 /* I */
+#define XEN_SYSCTL_CPUPOOL_OP_ADDCPU 4 /* A */
+#define XEN_SYSCTL_CPUPOOL_OP_RMCPU 5 /* R */
+#define XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN 6 /* M */
+#define XEN_SYSCTL_CPUPOOL_OP_FREEINFO 7 /* F */
+#define XEN_SYSCTL_CPUPOOL_PAR_ANY 0xFFFFFFFF
+struct xen_sysctl_cpupool_op {
+ uint32_t op; /* IN */
+ uint32_t cpupool_id; /* IN: CDIARM OUT: CI */
+ uint32_t sched_id; /* IN: C OUT: I */
+ uint32_t domid; /* IN: M */
+ uint32_t cpu; /* IN: AR */
+ uint32_t n_dom; /* OUT: I */
+ struct xenctl_cpumap cpumap; /* OUT: IF */
+};
+typedef struct xen_sysctl_cpupool_op xen_sysctl_cpupool_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupool_op_t);
struct xen_sysctl {
uint32_t cmd;
struct xen_sysctl_pm_op pm_op;
struct xen_sysctl_page_offline_op page_offline;
struct xen_sysctl_lockprof_op lockprof_op;
+ struct xen_sysctl_cpupool_op cpupool_op;
uint8_t pad[128];
} u;
};