return ret;
}
-/* This function gets the sums of cpu time consumed by all vcpus.
- * For example, if there are 4 physical cpus, and 2 vcpus in a domain,
- * then for each vcpu, the cpuacct.usage_percpu looks like this:
- * t0 t1 t2 t3
- * and we have 2 groups of such data:
- * v\p 0 1 2 3
- * 0 t00 t01 t02 t03
- * 1 t10 t11 t12 t13
- * for each pcpu, the sum is cpu time consumed by all vcpus.
- * s0 = t00 + t10
- * s1 = t01 + t11
- * s2 = t02 + t12
- * s3 = t03 + t13
- */
-static int
-getSumVcpuPercpuStats(virCgroupPtr group,
- unsigned int nvcpupids,
- unsigned long long *sum_cpu_time,
- unsigned int num)
-{
- int ret = -1;
- size_t i;
- char *buf = NULL;
- virCgroupPtr group_vcpu = NULL;
-
- for (i = 0; i < nvcpupids; i++) {
- char *pos;
- unsigned long long tmp;
- size_t j;
-
- if (virCgroupNewVcpu(group, i, false, &group_vcpu) < 0)
- goto cleanup;
-
- if (virCgroupGetCpuacctPercpuUsage(group_vcpu, &buf) < 0)
- goto cleanup;
-
- pos = buf;
- for (j = 0; j < num; j++) {
- if (virStrToLong_ull(pos, &pos, 10, &tmp) < 0) {
- virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("cpuacct parse error"));
- goto cleanup;
- }
- sum_cpu_time[j] += tmp;
- }
-
- virCgroupFree(&group_vcpu);
- VIR_FREE(buf);
- }
-
- ret = 0;
- cleanup:
- virCgroupFree(&group_vcpu);
- VIR_FREE(buf);
- return ret;
-}
-
-static int
-qemuDomainGetPercpuStats(virCgroupPtr group,
- virTypedParameterPtr params,
- unsigned int nparams,
- int start_cpu,
- unsigned int ncpus,
- unsigned int nvcpupids)
-{
- int rv = -1;
- size_t i;
- int id, max_id;
- char *pos;
- char *buf = NULL;
- unsigned long long *sum_cpu_time = NULL;
- unsigned long long *sum_cpu_pos;
- unsigned int n = 0;
- virTypedParameterPtr ent;
- int param_idx;
- unsigned long long cpu_time;
-
- /* return the number of supported params */
- if (nparams == 0 && ncpus != 0)
- return QEMU_NB_PER_CPU_STAT_PARAM;
-
- /* To parse account file, we need to know how many cpus are present. */
- max_id = nodeGetCPUCount();
- if (max_id < 0)
- return rv;
-
- if (ncpus == 0) { /* returns max cpu ID */
- rv = max_id;
- goto cleanup;
- }
-
- if (start_cpu > max_id) {
- virReportError(VIR_ERR_INVALID_ARG,
- _("start_cpu %d larger than maximum of %d"),
- start_cpu, max_id);
- goto cleanup;
- }
-
- /* we get percpu cputime accounting info. */
- if (virCgroupGetCpuacctPercpuUsage(group, &buf))
- goto cleanup;
- pos = buf;
-
- /* return percpu cputime in index 0 */
- param_idx = 0;
-
- /* number of cpus to compute */
- if (start_cpu >= max_id - ncpus)
- id = max_id - 1;
- else
- id = start_cpu + ncpus - 1;
-
- for (i = 0; i <= id; i++) {
- if (virStrToLong_ull(pos, &pos, 10, &cpu_time) < 0) {
- virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("cpuacct parse error"));
- goto cleanup;
- } else {
- n++;
- }
- if (i < start_cpu)
- continue;
- ent = ¶ms[(i - start_cpu) * nparams + param_idx];
- if (virTypedParameterAssign(ent, VIR_DOMAIN_CPU_STATS_CPUTIME,
- VIR_TYPED_PARAM_ULLONG, cpu_time) < 0)
- goto cleanup;
- }
-
- /* return percpu vcputime in index 1 */
- if (++param_idx >= nparams) {
- rv = nparams;
- goto cleanup;
- }
-
- if (VIR_ALLOC_N(sum_cpu_time, n) < 0)
- goto cleanup;
- if (getSumVcpuPercpuStats(group, nvcpupids, sum_cpu_time, n) < 0)
- goto cleanup;
-
- sum_cpu_pos = sum_cpu_time;
- for (i = 0; i <= id; i++) {
- cpu_time = *(sum_cpu_pos++);
- if (i < start_cpu)
- continue;
- if (virTypedParameterAssign(¶ms[(i - start_cpu) * nparams +
- param_idx],
- VIR_DOMAIN_CPU_STATS_VCPUTIME,
- VIR_TYPED_PARAM_ULLONG,
- cpu_time) < 0)
- goto cleanup;
- }
-
- rv = param_idx + 1;
- cleanup:
- VIR_FREE(sum_cpu_time);
- VIR_FREE(buf);
- return rv;
-}
-
static int
qemuDomainGetCPUStats(virDomainPtr domain,
ret = virCgroupGetDomainTotalCpuStats(priv->cgroup,
params, nparams);
else
- ret = qemuDomainGetPercpuStats(priv->cgroup, params, nparams,
- start_cpu, ncpus, priv->nvcpupids);
+ ret = virCgroupGetPercpuStats(priv->cgroup, params, nparams,
+ start_cpu, ncpus, priv->nvcpupids);
cleanup:
if (vm)
virObjectUnlock(vm);
}
+/* This function gets the sums of cpu time consumed by all vcpus.
+ * For example, if there are 4 physical cpus, and 2 vcpus in a domain,
+ * then for each vcpu, the cpuacct.usage_percpu looks like this:
+ * t0 t1 t2 t3
+ * and we have 2 groups of such data:
+ * v\p 0 1 2 3
+ * 0 t00 t01 t02 t03
+ * 1 t10 t11 t12 t13
+ * for each pcpu, the sum is cpu time consumed by all vcpus.
+ * s0 = t00 + t10
+ * s1 = t01 + t11
+ * s2 = t02 + t12
+ * s3 = t03 + t13
+ */
+static int
+virCgroupGetPercpuVcpuSum(virCgroupPtr group,
+ unsigned int nvcpupids,
+ unsigned long long *sum_cpu_time,
+ unsigned int num)
+{
+ int ret = -1;
+ size_t i;
+ char *buf = NULL;
+ virCgroupPtr group_vcpu = NULL;
+
+ for (i = 0; i < nvcpupids; i++) {
+ char *pos;
+ unsigned long long tmp;
+ size_t j;
+
+ if (virCgroupNewVcpu(group, i, false, &group_vcpu) < 0)
+ goto cleanup;
+
+ if (virCgroupGetCpuacctPercpuUsage(group_vcpu, &buf) < 0)
+ goto cleanup;
+
+ pos = buf;
+ for (j = 0; j < num; j++) {
+ if (virStrToLong_ull(pos, &pos, 10, &tmp) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("cpuacct parse error"));
+ goto cleanup;
+ }
+ sum_cpu_time[j] += tmp;
+ }
+
+ virCgroupFree(&group_vcpu);
+ VIR_FREE(buf);
+ }
+
+ ret = 0;
+ cleanup:
+ virCgroupFree(&group_vcpu);
+ VIR_FREE(buf);
+ return ret;
+}
+
+
int
virCgroupGetPercpuStats(virCgroupPtr group,
virTypedParameterPtr params,
unsigned int nparams,
int start_cpu,
- unsigned int ncpus)
+ unsigned int ncpus,
+ unsigned int nvcpupids)
{
int rv = -1;
size_t i;
int id, max_id;
char *pos;
char *buf = NULL;
+ unsigned long long *sum_cpu_time = NULL;
+ unsigned long long *sum_cpu_pos;
+ unsigned int n = 0;
virTypedParameterPtr ent;
int param_idx;
unsigned long long cpu_time;
/* return the number of supported params */
- if (nparams == 0 && ncpus != 0)
- return CGROUP_NB_PER_CPU_STAT_PARAM;
+ if (nparams == 0 && ncpus != 0) {
+ if (nvcpupids == 0)
+ return CGROUP_NB_PER_CPU_STAT_PARAM;
+ else
+ return CGROUP_NB_PER_CPU_STAT_PARAM + 1;
+ }
/* To parse account file, we need to know how many cpus are present. */
max_id = nodeGetCPUCount();
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("cpuacct parse error"));
goto cleanup;
+ } else {
+ n++;
}
if (i < start_cpu)
continue;
goto cleanup;
}
+ if (nvcpupids == 0 || param_idx + 1 >= nparams)
+ goto success;
+ /* return percpu vcputime in index 1 */
+ param_idx++;
+
+ if (VIR_ALLOC_N(sum_cpu_time, n) < 0)
+ goto cleanup;
+ if (virCgroupGetPercpuVcpuSum(group, nvcpupids, sum_cpu_time, n) < 0)
+ goto cleanup;
+
+ sum_cpu_pos = sum_cpu_time;
+ for (i = 0; i <= id; i++) {
+ cpu_time = *(sum_cpu_pos++);
+ if (i < start_cpu)
+ continue;
+ if (virTypedParameterAssign(¶ms[(i - start_cpu) * nparams +
+ param_idx],
+ VIR_DOMAIN_CPU_STATS_VCPUTIME,
+ VIR_TYPED_PARAM_ULLONG,
+ cpu_time) < 0)
+ goto cleanup;
+ }
+
+ success:
rv = param_idx + 1;
cleanup:
+ VIR_FREE(sum_cpu_time);
VIR_FREE(buf);
return rv;
}