]> xenbits.xensource.com Git - xen.git/commitdiff
cpupool: Control interface should be a sysctl rather than a domctl.
authorKeir Fraser <keir.fraser@citrix.com>
Tue, 4 May 2010 21:20:05 +0000 (22:20 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Tue, 4 May 2010 21:20:05 +0000 (22:20 +0100)
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
tools/libxc/xc_cpupool.c
tools/libxc/xc_private.h
xen/common/cpupool.c
xen/common/domctl.c
xen/common/sysctl.c
xen/include/public/domctl.h
xen/include/public/sysctl.h
xen/include/xen/sched.h

index a7a759fcadc9a239e02daec22b7654198500fc03..12bcc0ee9c128aa152454354c9e0907dab59be2c 100644 (file)
@@ -9,34 +9,45 @@
 #include <stdarg.h>
 #include "xc_private.h"
 
+static int do_sysctl_save(int xc_handle, struct xen_sysctl *sysctl)
+{
+    int ret;
+
+    do {
+        ret = do_sysctl(xc_handle, sysctl);
+    } while ( (ret < 0) && (errno == EAGAIN) );
+
+    return ret;
+}
+
 int xc_cpupool_create(int xc_handle,
                       uint32_t *ppoolid,
                       uint32_t sched_id)
 {
     int err;
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_cpupool_op;
-    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_CREATE;
-    domctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ?
-        XEN_DOMCTL_CPUPOOL_PAR_ANY : *ppoolid;
-    domctl.u.cpupool_op.sched_id = sched_id;
-    if ( (err = do_domctl_save(xc_handle, &domctl)) != 0 )
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_CREATE;
+    sysctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ?
+        XEN_SYSCTL_CPUPOOL_PAR_ANY : *ppoolid;
+    sysctl.u.cpupool_op.sched_id = sched_id;
+    if ( (err = do_sysctl_save(xc_handle, &sysctl)) != 0 )
         return err;
 
-    *ppoolid = domctl.u.cpupool_op.cpupool_id;
+    *ppoolid = sysctl.u.cpupool_op.cpupool_id;
     return 0;
 }
 
 int xc_cpupool_destroy(int xc_handle,
                        uint32_t poolid)
 {
-    DECLARE_DOMCTL;
+    DECLARE_SYSCTL;
 
-    domctl.cmd = XEN_DOMCTL_cpupool_op;
-    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_DESTROY;
-    domctl.u.cpupool_op.cpupool_id = poolid;
-    return do_domctl_save(xc_handle, &domctl);
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_DESTROY;
+    sysctl.u.cpupool_op.cpupool_id = poolid;
+    return do_sysctl_save(xc_handle, &sysctl);
 }
 
 int xc_cpupool_getinfo(int xc_handle, 
@@ -48,34 +59,34 @@ int xc_cpupool_getinfo(int xc_handle,
     int p;
     uint32_t poolid = first_poolid;
     uint8_t local[sizeof (info->cpumap)];
-    DECLARE_DOMCTL;
+    DECLARE_SYSCTL;
 
     memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
 
     for (p = 0; p < n_max; p++)
     {
-        domctl.cmd = XEN_DOMCTL_cpupool_op;
-        domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_INFO;
-        domctl.u.cpupool_op.cpupool_id = poolid;
-        set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
-        domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+        sysctl.cmd = XEN_SYSCTL_cpupool_op;
+        sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
+        sysctl.u.cpupool_op.cpupool_id = poolid;
+        set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+        sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
 
         if ( (err = lock_pages(local, sizeof(local))) != 0 )
         {
             PERROR("Could not lock memory for Xen hypercall");
             break;
         }
-        err = do_domctl_save(xc_handle, &domctl);
+        err = do_sysctl_save(xc_handle, &sysctl);
         unlock_pages(local, sizeof (local));
 
         if ( err < 0 )
             break;
 
-        info->cpupool_id = domctl.u.cpupool_op.cpupool_id;
-        info->sched_id = domctl.u.cpupool_op.sched_id;
-        info->n_dom = domctl.u.cpupool_op.n_dom;
+        info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
+        info->sched_id = sysctl.u.cpupool_op.sched_id;
+        info->n_dom = sysctl.u.cpupool_op.n_dom;
         bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
-        poolid = domctl.u.cpupool_op.cpupool_id + 1;
+        poolid = sysctl.u.cpupool_op.cpupool_id + 1;
         info++;
     }
 
@@ -89,39 +100,39 @@ int xc_cpupool_addcpu(int xc_handle,
                       uint32_t poolid,
                       int cpu)
 {
-    DECLARE_DOMCTL;
+    DECLARE_SYSCTL;
 
-    domctl.cmd = XEN_DOMCTL_cpupool_op;
-    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_ADDCPU;
-    domctl.u.cpupool_op.cpupool_id = poolid;
-    domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
-    return do_domctl_save(xc_handle, &domctl);
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_ADDCPU;
+    sysctl.u.cpupool_op.cpupool_id = poolid;
+    sysctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_SYSCTL_CPUPOOL_PAR_ANY : cpu;
+    return do_sysctl_save(xc_handle, &sysctl);
 }
 
 int xc_cpupool_removecpu(int xc_handle,
                          uint32_t poolid,
                          int cpu)
 {
-    DECLARE_DOMCTL;
+    DECLARE_SYSCTL;
 
-    domctl.cmd = XEN_DOMCTL_cpupool_op;
-    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_RMCPU;
-    domctl.u.cpupool_op.cpupool_id = poolid;
-    domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
-    return do_domctl_save(xc_handle, &domctl);
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_RMCPU;
+    sysctl.u.cpupool_op.cpupool_id = poolid;
+    sysctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_SYSCTL_CPUPOOL_PAR_ANY : cpu;
+    return do_sysctl_save(xc_handle, &sysctl);
 }
 
 int xc_cpupool_movedomain(int xc_handle,
                           uint32_t poolid,
                           uint32_t domid)
 {
-    DECLARE_DOMCTL;
+    DECLARE_SYSCTL;
 
-    domctl.cmd = XEN_DOMCTL_cpupool_op;
-    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN;
-    domctl.u.cpupool_op.cpupool_id = poolid;
-    domctl.u.cpupool_op.domid = domid;
-    return do_domctl_save(xc_handle, &domctl);
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN;
+    sysctl.u.cpupool_op.cpupool_id = poolid;
+    sysctl.u.cpupool_op.domid = domid;
+    return do_sysctl_save(xc_handle, &sysctl);
 }
 
 int xc_cpupool_freeinfo(int xc_handle,
@@ -129,12 +140,12 @@ int xc_cpupool_freeinfo(int xc_handle,
 {
     int err;
     uint8_t local[sizeof (*cpumap)];
-    DECLARE_DOMCTL;
+    DECLARE_SYSCTL;
 
-    domctl.cmd = XEN_DOMCTL_cpupool_op;
-    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_FREEINFO;
-    set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
-    domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
+    set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+    sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
 
     if ( (err = lock_pages(local, sizeof(local))) != 0 )
     {
@@ -142,7 +153,7 @@ int xc_cpupool_freeinfo(int xc_handle,
         return err;
     }
 
-    err = do_domctl_save(xc_handle, &domctl);
+    err = do_sysctl_save(xc_handle, &sysctl);
     unlock_pages(local, sizeof (local));
 
     if (err < 0)
index 004c5938d808f5dc39f2a014bc10875944b15fb7..fba384cc73a2e1ffd4f00bf286b023e69a3b9c33 100644 (file)
@@ -164,19 +164,6 @@ static inline int do_domctl(int xc_handle, struct xen_domctl *domctl)
     return ret;
 }
 
-static inline int do_domctl_save(int xc_handle, struct xen_domctl *domctl)
-{
-    int ret;
-
-    do
-    {
-        ret = do_domctl(xc_handle, domctl);
-    }
-    while ( (ret < 0 ) && (errno == EAGAIN) );
-
-    return ret;
-}
-
 static inline int do_sysctl(int xc_handle, struct xen_sysctl *sysctl)
 {
     int ret = -1;
index 6baedd0e148a2f39818bb55abbb1d74fc2ee0204..6cb99773ba84faf6cd7b65baf2aaa13fc4879209 100644 (file)
@@ -394,9 +394,9 @@ int cpupool_cpu_remove(unsigned int cpu)
 }
 
 /*
- * do cpupool related domctl operations
+ * do cpupool related sysctl operations
  */
-int cpupool_do_domctl(struct xen_domctl_cpupool_op *op)
+int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
 {
     int ret;
     struct cpupool *c;
@@ -404,12 +404,12 @@ int cpupool_do_domctl(struct xen_domctl_cpupool_op *op)
     switch ( op->op )
     {
 
-    case XEN_DOMCTL_CPUPOOL_OP_CREATE:
+    case XEN_SYSCTL_CPUPOOL_OP_CREATE:
     {
         int poolid;
         const struct scheduler *sched;
 
-        poolid = (op->cpupool_id == XEN_DOMCTL_CPUPOOL_PAR_ANY) ?
+        poolid = (op->cpupool_id == XEN_SYSCTL_CPUPOOL_PAR_ANY) ?
             CPUPOOLID_NONE: op->cpupool_id;
         sched = scheduler_get_by_id(op->sched_id);
         ret = -ENOENT;
@@ -424,7 +424,7 @@ int cpupool_do_domctl(struct xen_domctl_cpupool_op *op)
     }
     break;
 
-    case XEN_DOMCTL_CPUPOOL_OP_DESTROY:
+    case XEN_SYSCTL_CPUPOOL_OP_DESTROY:
     {
         spin_lock(&cpupool_lock);
         c = cpupool_find_by_id(op->cpupool_id, 1);
@@ -436,7 +436,7 @@ int cpupool_do_domctl(struct xen_domctl_cpupool_op *op)
     }
     break;
 
-    case XEN_DOMCTL_CPUPOOL_OP_INFO:
+    case XEN_SYSCTL_CPUPOOL_OP_INFO:
     {
         spin_lock(&cpupool_lock);
         c = cpupool_find_by_id(op->cpupool_id, 0);
@@ -452,7 +452,7 @@ int cpupool_do_domctl(struct xen_domctl_cpupool_op *op)
     }
     break;
 
-    case XEN_DOMCTL_CPUPOOL_OP_ADDCPU:
+    case XEN_SYSCTL_CPUPOOL_OP_ADDCPU:
     {
         unsigned cpu;
 
@@ -460,7 +460,7 @@ int cpupool_do_domctl(struct xen_domctl_cpupool_op *op)
         printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d)\n",
             op->cpupool_id, cpu);
         spin_lock(&cpupool_lock);
-        if ( cpu == XEN_DOMCTL_CPUPOOL_PAR_ANY )
+        if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
             cpu = first_cpu(cpupool_free_cpus);
         ret = -EINVAL;
         if ( cpu >= NR_CPUS )
@@ -480,7 +480,7 @@ addcpu_out:
     }
     break;
 
-    case XEN_DOMCTL_CPUPOOL_OP_RMCPU:
+    case XEN_SYSCTL_CPUPOOL_OP_RMCPU:
     {
         unsigned cpu;
 
@@ -491,7 +491,7 @@ addcpu_out:
         if ( c == NULL )
             break;
         cpu = op->cpu;
-        if ( cpu == XEN_DOMCTL_CPUPOOL_PAR_ANY )
+        if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
             cpu = last_cpu(c->cpu_valid);
         ret = -EINVAL;
         if ( cpu >= NR_CPUS )
@@ -503,7 +503,7 @@ addcpu_out:
     }
     break;
 
-    case XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN:
+    case XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN:
     {
         struct domain *d;
 
@@ -547,7 +547,7 @@ addcpu_out:
     }
     break;
 
-    case XEN_DOMCTL_CPUPOOL_OP_FREEINFO:
+    case XEN_SYSCTL_CPUPOOL_OP_FREEINFO:
     {
         cpumask_to_xenctl_cpumap(&(op->cpumap),
             &cpupool_free_cpus);
index a80d34b8d859d0c41b39ae22803b0b76ef5e902d..2631960aa5e0a97d19fc60194259d4340781fbad 100644 (file)
@@ -966,14 +966,6 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
     }
     break;
 
-    case XEN_DOMCTL_cpupool_op:
-    {
-        ret = cpupool_do_domctl(&op->u.cpupool_op);
-        if ( (ret == 0) && copy_to_guest(u_domctl, op, 1) )
-            ret = -EFAULT;
-    }
-    break;
-
     default:
         ret = arch_do_domctl(op, u_domctl);
         break;
index db27fc233c88aca3585d9d9a8323d35ee3835d83..51b0a8e41482a2059cbba2809876b55e8ab590e1 100644 (file)
@@ -318,6 +318,14 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
     }
     break;
 
+    case XEN_SYSCTL_cpupool_op:
+    {
+        ret = cpupool_do_sysctl(&op->u.cpupool_op);
+        if ( (ret == 0) && copy_to_guest(u_sysctl, op, 1) )
+            ret = -EFAULT;
+    }
+    break;
+
     default:
         ret = arch_do_sysctl(op, u_sysctl);
         break;
index 3ed6468c99e0a84f65786b38586e977d97ceb6ec..5bae1cb406db7603c45a75f091e2d7150e956113 100644 (file)
@@ -786,31 +786,6 @@ struct xen_domctl_mem_sharing_op {
 typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t);
 
-/*
- * cpupool operations
- */
-/* XEN_DOMCTL_cpupool_op */
-#define XEN_DOMCTL_CPUPOOL_OP_CREATE                1  /* C */
-#define XEN_DOMCTL_CPUPOOL_OP_DESTROY               2  /* D */
-#define XEN_DOMCTL_CPUPOOL_OP_INFO                  3  /* I */
-#define XEN_DOMCTL_CPUPOOL_OP_ADDCPU                4  /* A */
-#define XEN_DOMCTL_CPUPOOL_OP_RMCPU                 5  /* R */
-#define XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN            6  /* M */
-#define XEN_DOMCTL_CPUPOOL_OP_FREEINFO              7  /* F */
-#define XEN_DOMCTL_CPUPOOL_PAR_ANY     0xFFFFFFFF
-struct xen_domctl_cpupool_op {
-    uint32_t op;          /* IN */
-    uint32_t cpupool_id;  /* IN: CDIARM OUT: CI */
-    uint32_t sched_id;    /* IN: C      OUT: I  */
-    uint32_t domid;       /* IN: M              */
-    uint32_t cpu;         /* IN: AR             */
-    uint32_t n_dom;       /*            OUT: I  */
-    struct xenctl_cpumap cpumap; /*     OUT: IF */
-};
-typedef struct xen_domctl_cpupool_op xen_domctl_cpupool_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpupool_op_t);
-
-
 struct xen_domctl {
     uint32_t cmd;
 #define XEN_DOMCTL_createdomain                   1
@@ -871,7 +846,6 @@ struct xen_domctl {
 #define XEN_DOMCTL_gettscinfo                    59
 #define XEN_DOMCTL_settscinfo                    60
 #define XEN_DOMCTL_getpageframeinfo3             61
-#define XEN_DOMCTL_cpupool_op                    62
 #define XEN_DOMCTL_gdbsx_guestmemio            1000
 #define XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
@@ -920,7 +894,6 @@ struct xen_domctl {
         struct xen_domctl_debug_op          debug_op;
         struct xen_domctl_mem_event_op      mem_event_op;
         struct xen_domctl_mem_sharing_op    mem_sharing_op;
-        struct xen_domctl_cpupool_op        cpupool_op;
 #if defined(__i386__) || defined(__x86_64__)
         struct xen_domctl_cpuid             cpuid;
 #endif
index aa64055ed89214f9636bd6502e7fc97a224e2ba4..7d8d9294b836e9be00b395f234b28c24b4e1f20b 100644 (file)
@@ -514,6 +514,27 @@ struct xen_sysctl_numainfo {
 typedef struct xen_sysctl_numainfo xen_sysctl_numainfo_t;
 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_numainfo_t);
 
+#define XEN_SYSCTL_cpupool_op        18        
+/* XEN_SYSCTL_cpupool_op */
+#define XEN_SYSCTL_CPUPOOL_OP_CREATE                1  /* C */
+#define XEN_SYSCTL_CPUPOOL_OP_DESTROY               2  /* D */
+#define XEN_SYSCTL_CPUPOOL_OP_INFO                  3  /* I */
+#define XEN_SYSCTL_CPUPOOL_OP_ADDCPU                4  /* A */
+#define XEN_SYSCTL_CPUPOOL_OP_RMCPU                 5  /* R */
+#define XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN            6  /* M */
+#define XEN_SYSCTL_CPUPOOL_OP_FREEINFO              7  /* F */
+#define XEN_SYSCTL_CPUPOOL_PAR_ANY     0xFFFFFFFF
+struct xen_sysctl_cpupool_op {
+    uint32_t op;          /* IN */
+    uint32_t cpupool_id;  /* IN: CDIARM OUT: CI */
+    uint32_t sched_id;    /* IN: C      OUT: I  */
+    uint32_t domid;       /* IN: M              */
+    uint32_t cpu;         /* IN: AR             */
+    uint32_t n_dom;       /*            OUT: I  */
+    struct xenctl_cpumap cpumap; /*     OUT: IF */
+};
+typedef struct xen_sysctl_cpupool_op xen_sysctl_cpupool_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupool_op_t);
 
 struct xen_sysctl {
     uint32_t cmd;
@@ -535,6 +556,7 @@ struct xen_sysctl {
         struct xen_sysctl_pm_op             pm_op;
         struct xen_sysctl_page_offline_op   page_offline;
         struct xen_sysctl_lockprof_op       lockprof_op;
+        struct xen_sysctl_cpupool_op        cpupool_op;
         uint8_t                             pad[128];
     } u;
 };
index 91368466ff04d962119e2c38f7f72aaa64e6157e..71ead96c8e26f1177d817d8a1bf597b546c4b1f8 100644 (file)
@@ -9,6 +9,7 @@
 #include <xen/shared.h>
 #include <public/xen.h>
 #include <public/domctl.h>
+#include <public/sysctl.h>
 #include <public/vcpu.h>
 #include <public/xsm/acm.h>
 #include <xen/time.h>
@@ -624,7 +625,7 @@ void cpupool_cpu_add(unsigned int cpu);
 int cpupool_cpu_remove(unsigned int cpu);
 int cpupool_add_domain(struct domain *d, int poolid);
 void cpupool_rm_domain(struct domain *d);
-int cpupool_do_domctl(struct xen_domctl_cpupool_op *op);
+int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op);
 #define num_cpupool_cpus(c) (cpus_weight((c)->cpu_valid))
 
 #endif /* __SCHED_H__ */