]> xenbits.xensource.com Git - people/liuw/libxenctrl-split/libvirt.git/commitdiff
cputune: Support cputune for qemu driver
authorOsier Yang <jyang@redhat.com>
Tue, 29 Mar 2011 13:41:25 +0000 (21:41 +0800)
committerOsier Yang <jyang@redhat.com>
Tue, 29 Mar 2011 14:13:46 +0000 (22:13 +0800)
When domain startup, setting cpu affinity and cpu shares according
to the cputune xml specified in domain xml.

Modify "qemudDomainPinVcpu" to update domain config for vcpupin,
and modify "qemuSetSchedulerParameters" to update domain config
for cpu shares.

v1 - v2:
   * Use "VIR_ALLOC_N" instead of "VIR_ALLOC_VAR"
   * But keep raising error when it fails on adding vcpupin xml
     entry, as I still don't have a better idea yet.

src/qemu/qemu_cgroup.c
src/qemu/qemu_driver.c
src/qemu/qemu_process.c

index 935517fb7f2933af4c2add4bbb7f0325e54ef643..8cf45dad4158856bd346ee425bb73ac403a48033 100644 (file)
@@ -343,6 +343,21 @@ int qemuSetupCgroup(struct qemud_driver *driver,
                  vm->def->name);
     }
 
+    if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPU)) {
+        if (vm->def->cputune.shares != 0) {
+            rc = virCgroupSetCpuShares(cgroup, vm->def->cputune.shares);
+            if(rc != 0) {
+                virReportSystemError(-rc,
+                                     _("Unable to set io cpu shares for domain %s"),
+                                     vm->def->name);
+                goto cleanup;
+            }
+        }
+    } else {
+        qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+                        _("CPU tuning is not available on this host"));
+    }
+
 done:
     virCgroupFree(&cgroup);
     return 0;
index 539b5ad9a0aed40059b13d3b1fbb2e6c12cabad8..104e92d524506c82a6cdc27c6e688eb0d640b5fa 100644 (file)
@@ -2679,6 +2679,13 @@ qemudDomainPinVcpu(virDomainPtr dom,
                         "%s", _("cpu affinity is not supported"));
         goto cleanup;
     }
+
+    if (virDomainVcpupinAdd(vm->def, cpumap, maplen, vcpu) < 0) {
+        qemuReportError(VIR_ERR_INTERNAL_ERROR,
+                        "%s", _("failed to update or add vcpupin xml"));
+        goto cleanup;
+    }
+
     ret = 0;
 
 cleanup:
@@ -4644,6 +4651,8 @@ static int qemuSetSchedulerParameters(virDomainPtr dom,
                                      _("unable to set cpu shares tunable"));
                 goto cleanup;
             }
+
+            vm->def->cputune.shares = params[i].value.ul;
         } else {
             qemuReportError(VIR_ERR_INVALID_ARG,
                             _("Invalid parameter `%s'"), param->field);
index 209c8cfb4d2a61982f12e26910e016566c66fbac..e31e1b4bece7e869bab739e59b54fb5238360967 100644 (file)
@@ -1143,6 +1143,77 @@ qemuProcessInitCpuAffinity(virDomainObjPtr vm)
     return 0;
 }
 
+/* Set CPU affinites for vcpus if vcpupin xml provided. */
+static int
+qemuProcessSetVcpuAffinites(virConnectPtr conn,
+                            virDomainObjPtr vm)
+{
+    qemuDomainObjPrivatePtr priv = vm->privateData;
+    virDomainDefPtr def = vm->def;
+    virNodeInfo nodeinfo;
+    pid_t vcpupid;
+    unsigned char *cpumask;
+    int vcpu, cpumaplen, hostcpus, maxcpu;
+
+    if (virNodeGetInfo(conn, &nodeinfo) != 0) {
+        return  -1;
+    }
+
+    if (!def->cputune.nvcpupin)
+        return 0;
+
+    if (priv->vcpupids == NULL) {
+        qemuReportError(VIR_ERR_NO_SUPPORT,
+                        "%s", _("cpu affinity is not supported"));
+        return -1;
+    }
+
+    hostcpus = VIR_NODEINFO_MAXCPUS(nodeinfo);
+    cpumaplen = VIR_CPU_MAPLEN(hostcpus);
+    maxcpu = cpumaplen * 8;
+
+    if (maxcpu > hostcpus)
+        maxcpu = hostcpus;
+
+    for (vcpu = 0; vcpu < def->cputune.nvcpupin; vcpu++) {
+        if (vcpu != def->cputune.vcpupin[vcpu]->vcpuid)
+            continue;
+
+        int i;
+        unsigned char *cpumap = NULL;
+
+        if (VIR_ALLOC_N(cpumap, cpumaplen) < 0) {
+            virReportOOMError();
+            return -1;
+        }
+
+        cpumask = (unsigned char *)def->cputune.vcpupin[vcpu]->cpumask;
+        vcpupid = priv->vcpupids[vcpu];
+
+        /* Convert cpumask to bitmap here. */
+        for (i = 0; i < VIR_DOMAIN_CPUMASK_LEN; i++) {
+            int cur = 0;
+            int mod = 0;
+
+            if (i) {
+                cur = i / 8;
+                mod = i % 8;
+            }
+
+            if (cpumask[i])
+                cpumap[cur] |= 1 << mod;
+        }
+
+        if (virProcessInfoSetAffinity(vcpupid,
+                                      cpumap,
+                                      cpumaplen,
+                                      maxcpu) < 0) {
+            return -1;
+        }
+    }
+
+    return 0;
+}
 
 static int
 qemuProcessInitPasswords(virConnectPtr conn,
@@ -2217,6 +2288,10 @@ int qemuProcessStart(virConnectPtr conn,
     if (qemuProcessDetectVcpuPIDs(driver, vm) < 0)
         goto cleanup;
 
+    VIR_DEBUG0("Setting VCPU affinities");
+    if (qemuProcessSetVcpuAffinites(conn, vm) < 0)
+        goto cleanup;
+
     VIR_DEBUG0("Setting any required VM passwords");
     if (qemuProcessInitPasswords(conn, driver, vm, qemuCaps) < 0)
         goto cleanup;