]> xenbits.xensource.com Git - libvirt.git/commitdiff
qemu driver for virDomainGetCPUstats using cpuacct cgroup.
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Fri, 2 Mar 2012 02:54:23 +0000 (10:54 +0800)
committerEric Blake <eblake@redhat.com>
Wed, 7 Mar 2012 04:54:48 +0000 (21:54 -0700)
* For now, only "cpu_time" is supported.
* cpuacct cgroup is used for providing percpu cputime information.

* src/qemu/qemu.conf     - take care of cpuacct cgroup.
* src/qemu/qemu_conf.c   - take care of cpuacct cgroup.
* src/qemu/qemu_driver.c - added an interface
* src/util/cgroup.c/h    - added interface for getting percpu cputime

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
src/libvirt_private.syms
src/qemu/qemu.conf
src/qemu/qemu_conf.c
src/qemu/qemu_driver.c
src/util/cgroup.c
src/util/cgroup.h

index 1f58832ebb5a7f9c8e4b0571726304b4d5d78790..c44c61732ccc3d3ef1641b78a8acaf930052665d 100644 (file)
@@ -74,16 +74,17 @@ virCgroupForDriver;
 virCgroupForVcpu;
 virCgroupFree;
 virCgroupGetBlkioWeight;
-virCgroupGetCpuShares;
 virCgroupGetCpuCfsPeriod;
 virCgroupGetCpuCfsQuota;
+virCgroupGetCpuShares;
+virCgroupGetCpuacctPercpuUsage;
 virCgroupGetCpuacctUsage;
 virCgroupGetCpusetMems;
 virCgroupGetFreezerState;
+virCgroupGetMemSwapHardLimit;
 virCgroupGetMemoryHardLimit;
 virCgroupGetMemorySoftLimit;
 virCgroupGetMemoryUsage;
-virCgroupGetMemSwapHardLimit;
 virCgroupKill;
 virCgroupKillPainfully;
 virCgroupKillRecursive;
@@ -92,15 +93,15 @@ virCgroupPathOfController;
 virCgroupRemove;
 virCgroupSetBlkioDeviceWeight;
 virCgroupSetBlkioWeight;
-virCgroupSetCpuShares;
 virCgroupSetCpuCfsPeriod;
 virCgroupSetCpuCfsQuota;
+virCgroupSetCpuShares;
 virCgroupSetCpusetMems;
 virCgroupSetFreezerState;
+virCgroupSetMemSwapHardLimit;
 virCgroupSetMemory;
 virCgroupSetMemoryHardLimit;
 virCgroupSetMemorySoftLimit;
-virCgroupSetMemSwapHardLimit;
 
 
 # command.h
index 95428c1968ce3fc4ac02342e2ccc6d75b077af1f..cb877281920c97448ffbfddf40cab70221ceeed0 100644 (file)
 #  - 'memory' - use for memory tunables
 #  - 'blkio' - use for block devices I/O tunables
 #  - 'cpuset' - use for CPUs and memory nodes
+#  - 'cpuacct' - use for CPUs statistics.
 #
 # NB, even if configured here, they won't be used unless
 # the administrator has mounted cgroups, e.g.:
 # can be mounted in different locations. libvirt will detect
 # where they are located.
 #
-# cgroup_controllers = [ "cpu", "devices", "memory", "blkio", "cpuset" ]
+# cgroup_controllers = [ "cpu", "devices", "memory", "blkio", "cpuset", "cpuacct" ]
 
 # This is the basic set of devices allowed / required by
 # all virtual machines.
index e95c7a55e043927c92d4391b5916ce0bde398b92..a709cbf1253a20b3f3d95b86be2d983b461b95a8 100644 (file)
@@ -318,7 +318,8 @@ int qemudLoadDriverConfig(struct qemud_driver *driver,
             (1 << VIR_CGROUP_CONTROLLER_DEVICES) |
             (1 << VIR_CGROUP_CONTROLLER_MEMORY) |
             (1 << VIR_CGROUP_CONTROLLER_BLKIO) |
-            (1 << VIR_CGROUP_CONTROLLER_CPUSET);
+            (1 << VIR_CGROUP_CONTROLLER_CPUSET) |
+            (1 << VIR_CGROUP_CONTROLLER_CPUACCT);
     }
     for (i = 0 ; i < VIR_CGROUP_CONTROLLER_LAST ; i++) {
         if (driver->cgroupControllers & (1 << i)) {
index 733df0a57ccc4516e032f648dd4797bede76505b..538a4190ce5325bbe137e4fb58def36a840fd12c 100644 (file)
@@ -12095,6 +12095,158 @@ cleanup:
     return ret;
 }
 
+/* qemuDomainGetCPUStats() with start_cpu == -1 */
+static int
+qemuDomainGetTotalcpuStats(virCgroupPtr group,
+                           virTypedParameterPtr params,
+                           int nparams)
+{
+    unsigned long long cpu_time;
+    int param_idx = 0;
+    int ret;
+
+    if (nparams == 0) /* return supported number of params */
+        return 1;
+    /* entry 0 is cputime */
+    ret = virCgroupGetCpuacctUsage(group, &cpu_time);
+    if (ret < 0) {
+        virReportSystemError(-ret, "%s", _("unable to get cpu account"));
+        return -1;
+    }
+
+    virTypedParameterAssign(&params[param_idx], VIR_DOMAIN_CPU_STATS_CPUTIME,
+                            VIR_TYPED_PARAM_ULLONG, cpu_time);
+    return 1;
+}
+
+static int
+qemuDomainGetPercpuStats(virDomainPtr domain,
+                         virCgroupPtr group,
+                         virTypedParameterPtr params,
+                         unsigned int nparams,
+                         int start_cpu,
+                         unsigned int ncpus)
+{
+    char *map = NULL;
+    int rv = -1;
+    int i, max_id;
+    char *pos;
+    char *buf = NULL;
+    virTypedParameterPtr ent;
+    int param_idx;
+
+    /* return the number of supported params */
+    if (nparams == 0 && ncpus != 0)
+        return 1; /* only cpu_time is supported */
+
+    /* return percpu cputime in index 0 */
+    param_idx = 0;
+    /* to parse account file, we need "present" cpu map */
+    map = nodeGetCPUmap(domain->conn, &max_id, "present");
+    if (!map)
+        return rv;
+
+    if (ncpus == 0) { /* returns max cpu ID */
+        rv = max_id + 1;
+        goto cleanup;
+    }
+
+    if (start_cpu > max_id) {
+        qemuReportError(VIR_ERR_INVALID_ARG,
+                        _("start_cpu %d larger than maximum of %d"),
+                        start_cpu, max_id);
+        goto cleanup;
+    }
+
+    /* we get percpu cputime accounting info. */
+    if (virCgroupGetCpuacctPercpuUsage(group, &buf))
+        goto cleanup;
+    pos = buf;
+
+    if (max_id - start_cpu > ncpus - 1)
+        max_id = start_cpu + ncpus - 1;
+
+    for (i = 0; i <= max_id; i++) {
+        unsigned long long cpu_time;
+
+        if (!map[i]) {
+            cpu_time = 0;
+        } else if (virStrToLong_ull(pos, &pos, 10, &cpu_time) < 0) {
+            qemuReportError(VIR_ERR_INTERNAL_ERROR,
+                            _("cpuacct parse error"));
+            goto cleanup;
+        }
+        if (i < start_cpu)
+            continue;
+        ent = &params[ (i - start_cpu) * nparams + param_idx];
+        virTypedParameterAssign(ent, VIR_DOMAIN_CPU_STATS_CPUTIME,
+                                VIR_TYPED_PARAM_ULLONG, cpu_time);
+    }
+    rv = param_idx + 1;
+cleanup:
+    VIR_FREE(buf);
+    VIR_FREE(map);
+    return rv;
+}
+
+
+static int
+qemuDomainGetCPUStats(virDomainPtr domain,
+                virTypedParameterPtr params,
+                unsigned int nparams,
+                int start_cpu,
+                unsigned int ncpus,
+                unsigned int flags)
+{
+    struct qemud_driver *driver = domain->conn->privateData;
+    virCgroupPtr group = NULL;
+    virDomainObjPtr vm = NULL;
+    int ret = -1;
+    bool isActive;
+
+    virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY, -1);
+
+    qemuDriverLock(driver);
+
+    vm = virDomainFindByUUID(&driver->domains, domain->uuid);
+    if (vm == NULL) {
+        qemuReportError(VIR_ERR_INTERNAL_ERROR,
+                        _("No such domain %s"), domain->uuid);
+        goto cleanup;
+    }
+
+    isActive = virDomainObjIsActive(vm);
+    if (!isActive) {
+        qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
+                        _("domain is not running"));
+        goto cleanup;
+    }
+
+    if (!qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPUACCT)) {
+        qemuReportError(VIR_ERR_OPERATION_INVALID,
+                        "%s", _("cgroup CPUACCT controller is not mounted"));
+        goto cleanup;
+    }
+
+    if (virCgroupForDomain(driver->cgroup, vm->def->name, &group, 0) != 0) {
+        qemuReportError(VIR_ERR_INTERNAL_ERROR,
+                        _("cannot find cgroup for domain %s"), vm->def->name);
+        goto cleanup;
+    }
+
+    if (start_cpu == -1)
+        ret = qemuDomainGetTotalcpuStats(group, params, nparams);
+    else
+        ret = qemuDomainGetPercpuStats(domain, group, params, nparams,
+                                       start_cpu, ncpus);
+cleanup:
+    virCgroupFree(&group);
+    if (vm)
+        virDomainObjUnlock(vm);
+    qemuDriverUnlock(driver);
+    return ret;
+}
+
 static int
 qemuDomainPMSuspendForDuration(virDomainPtr dom,
                                unsigned int target,
@@ -12395,6 +12547,7 @@ static virDriver qemuDriver = {
     .domainGetMetadata = qemuDomainGetMetadata, /* 0.9.10 */
     .domainPMSuspendForDuration = qemuDomainPMSuspendForDuration, /* 0.9.11 */
     .domainPMWakeup = qemuDomainPMWakeup, /* 0.9.11 */
+    .domainGetCPUStats = qemuDomainGetCPUStats, /* 0.9.11 */
 };
 
 
index 00528c522547ad85cc9a4079709cb0aa60094f09..c150fbb8b2b18aadb0ca35050162673761648968 100644 (file)
@@ -1555,6 +1555,12 @@ int virCgroupGetCpuacctUsage(virCgroupPtr group, unsigned long long *usage)
                                 "cpuacct.usage", usage);
 }
 
+int virCgroupGetCpuacctPercpuUsage(virCgroupPtr group, char **usage)
+{
+    return virCgroupGetValueStr(group, VIR_CGROUP_CONTROLLER_CPUACCT,
+                                "cpuacct.usage_percpu", usage);
+}
+
 int virCgroupSetFreezerState(virCgroupPtr group, const char *state)
 {
     return virCgroupSetValueStr(group,
index 8d757350c3a0c00ec6415ed63e662f38355093d8..b4e0f373d8b4a1b2579f704064fd06a5e49d3e1f 100644 (file)
@@ -115,6 +115,7 @@ int virCgroupSetCpuCfsQuota(virCgroupPtr group, long long cfs_quota);
 int virCgroupGetCpuCfsQuota(virCgroupPtr group, long long *cfs_quota);
 
 int virCgroupGetCpuacctUsage(virCgroupPtr group, unsigned long long *usage);
+int virCgroupGetCpuacctPercpuUsage(virCgroupPtr group, char **usage);
 
 int virCgroupSetFreezerState(virCgroupPtr group, const char *state);
 int virCgroupGetFreezerState(virCgroupPtr group, char **state);