]> xenbits.xensource.com Git - people/liuw/libxenctrl-split/libvirt.git/commitdiff
qemu: Replace checking for vcpu<->pid mapping availability with a helper
authorPeter Krempa <pkrempa@redhat.com>
Wed, 11 Nov 2015 13:20:04 +0000 (14:20 +0100)
committerPeter Krempa <pkrempa@redhat.com>
Wed, 9 Dec 2015 13:57:12 +0000 (14:57 +0100)
Add qemuDomainHasVCpuPids to do the checking and replace in place checks
with it.

We no longer need checking whether the thread contains fake data
(vcpupids[0] == vm->pid) as in b07f3d821dfb11a118ee75ea275fd6ab737d9500
and 65686e5a81d654d834d338fceeaf0229b2ca4f0d this was removed.

src/qemu/qemu_cgroup.c
src/qemu/qemu_domain.c
src/qemu/qemu_domain.h
src/qemu/qemu_driver.c
src/qemu/qemu_process.c

index 95a8e66e23060c473d5343ecf018fc52f8da7221..56b2bc41da0c88b75d0793de223d10b18c539a56 100644 (file)
@@ -1025,12 +1025,9 @@ qemuSetupCgroupForVcpu(virDomainObjPtr vm)
         !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET))
         return 0;
 
-    if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
-        /* If we don't know VCPU<->PID mapping or all vcpu runs in the same
-         * thread, we cannot control each vcpu.
-         */
+    /* If vCPU<->pid mapping is missing we can't do vCPU pinning */
+    if (!qemuDomainHasVcpuPids(vm))
         return 0;
-    }
 
     if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) == 0 &&
         mem_mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT &&
index cb1f826fbfe0b2eeb27b5aa3a9301fac1e7c2a72..018f6f6bdbb292956d6e4b9c0514bd085e298427 100644 (file)
@@ -4116,3 +4116,18 @@ qemuDomainRequiresMlock(virDomainDefPtr def)
 
     return false;
 }
+
+
+/**
+ * qemuDomainHasVcpuPids:
+ * @vm: Domain object
+ *
+ * Returns true if we were able to successfully detect vCPU pids for the VM.
+ */
+bool
+qemuDomainHasVcpuPids(virDomainObjPtr vm)
+{
+    qemuDomainObjPrivatePtr priv = vm->privateData;
+
+    return priv->nvcpupids > 0;
+}
index 31c7d33d8935a73c26d1902c0aeafcb9cac692a5..5e2b69957bf28b544b7c2a88de473a090a1a236d 100644 (file)
@@ -505,4 +505,6 @@ int qemuDomainDefValidateMemoryHotplug(const virDomainDef *def,
                                        virQEMUCapsPtr qemuCaps,
                                        const virDomainMemoryDef *mem);
 
+bool qemuDomainHasVcpuPids(virDomainObjPtr vm);
+
 #endif /* __QEMU_DOMAIN_H__ */
index 5c3703f3b136e503954ee6224443b0483e9cba3e..3b3761a7a04803b4616553a28689e94f3d6209ca 100644 (file)
@@ -1428,7 +1428,7 @@ qemuDomainHelperGetVcpus(virDomainObjPtr vm, virVcpuInfoPtr info, int maxinfo,
     size_t i, v;
     qemuDomainObjPrivatePtr priv = vm->privateData;
 
-    if (priv->vcpupids == NULL) {
+    if (!qemuDomainHasVcpuPids(vm)) {
         virReportError(VIR_ERR_OPERATION_INVALID,
                        "%s", _("cpu affinity is not supported"));
         return -1;
@@ -5118,7 +5118,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom,
     }
 
     if (def) {
-        if (priv->vcpupids == NULL) {
+        if (!qemuDomainHasVcpuPids(vm)) {
             virReportError(VIR_ERR_OPERATION_INVALID,
                            "%s", _("cpu affinity is not supported"));
             goto endjob;
@@ -10287,21 +10287,18 @@ qemuSetVcpusBWLive(virDomainObjPtr vm, virCgroupPtr cgroup,
     if (period == 0 && quota == 0)
         return 0;
 
-    /* If we does not know VCPU<->PID mapping or all vcpu runs in the same
-     * thread, we cannot control each vcpu. So we only modify cpu bandwidth
-     * when each vcpu has a separated thread.
-     */
-    if (priv->nvcpupids != 0 && priv->vcpupids[0] != vm->pid) {
-        for (i = 0; i < priv->nvcpupids; i++) {
-            if (virCgroupNewThread(cgroup, VIR_CGROUP_THREAD_VCPU, i,
-                                   false, &cgroup_vcpu) < 0)
-                goto cleanup;
+    if (!qemuDomainHasVcpuPids(vm))
+        return 0;
 
-            if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0)
-                goto cleanup;
+    for (i = 0; i < priv->nvcpupids; i++) {
+        if (virCgroupNewThread(cgroup, VIR_CGROUP_THREAD_VCPU, i,
+                               false, &cgroup_vcpu) < 0)
+            goto cleanup;
 
-            virCgroupFree(&cgroup_vcpu);
-        }
+        if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0)
+            goto cleanup;
+
+        virCgroupFree(&cgroup_vcpu);
     }
 
     return 0;
@@ -10604,7 +10601,7 @@ qemuGetVcpusBWLive(virDomainObjPtr vm,
     int ret = -1;
 
     priv = vm->privateData;
-    if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
+    if (!qemuDomainHasVcpuPids(vm)) {
         /* We do not create sub dir for each vcpu */
         rc = qemuGetVcpuBWLive(priv->cgroup, period, quota);
         if (rc < 0)
index 97dd53b19883e67ef6629deb8e5ae7697fb1d643..c59e1b4d4b971066690cd70187baca8ee2feb3f6 100644 (file)
@@ -2239,12 +2239,13 @@ qemuProcessSetVcpuAffinities(virDomainObjPtr vm)
     virDomainPinDefPtr pininfo;
     int n;
     int ret = -1;
-    VIR_DEBUG("Setting affinity on CPUs nvcpupin=%zu nvcpus=%d nvcpupids=%d",
-              def->cputune.nvcpupin, virDomainDefGetVcpus(def), priv->nvcpupids);
+    VIR_DEBUG("Setting affinity on CPUs nvcpupin=%zu nvcpus=%d hasVcpupids=%d",
+              def->cputune.nvcpupin, virDomainDefGetVcpus(def),
+              qemuDomainHasVcpuPids(vm));
     if (!def->cputune.nvcpupin)
         return 0;
 
-    if (priv->vcpupids == NULL) {
+    if (!qemuDomainHasVcpuPids(vm)) {
         /* If any CPU has custom affinity that differs from the
          * VM default affinity, we must reject it
          */