From: Osier Yang Date: Mon, 16 Apr 2012 10:04:27 +0000 (+0800) Subject: numad: Convert node list to cpumap before setting affinity X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=ccf80e36301d538505c5c053cf369a61d4671831;p=people%2Fliuw%2Flibxenctrl-split%2Flibvirt.git numad: Convert node list to cpumap before setting affinity Instead of returning a CPUs list, numad returns NUMA node list instead, this patch is to convert the node list to cpumap before affinity setting. Otherwise, the domain processes will be pinned only to CPU[$numa_cell_num], which will cause significiant performance losses. Also because numad will balance the affinity dynamically, reflecting the cpuset from numad back doesn't make much sense then, and it may just could produce confusion for the users. Thus the better way is not to reflect it back to XML. And in this case, it's better to ignore the cpuset when parsing XML. The codes to update the cpuset is removed in this patch incidentally, and there will be a follow up patch to ignore the manually specified "cpuset" if "placement" is "auto", and document will be updated too. --- diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 692fc3224..b6132d282 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -1820,38 +1820,45 @@ qemuProcessInitCpuAffinity(struct qemud_driver *driver, } if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) { - char *tmp_cpumask = NULL; char *nodeset = NULL; + char *nodemask = NULL; nodeset = qemuGetNumadAdvice(vm->def); if (!nodeset) goto cleanup; - if (VIR_ALLOC_N(tmp_cpumask, VIR_DOMAIN_CPUMASK_LEN) < 0) { + if (VIR_ALLOC_N(nodemask, VIR_DOMAIN_CPUMASK_LEN) < 0) { virReportOOMError(); VIR_FREE(nodeset); goto cleanup; } - if (virDomainCpuSetParse(nodeset, 0, tmp_cpumask, + if (virDomainCpuSetParse(nodeset, 0, nodemask, VIR_DOMAIN_CPUMASK_LEN) < 0) { - VIR_FREE(tmp_cpumask); + VIR_FREE(nodemask); VIR_FREE(nodeset); goto cleanup; } VIR_FREE(nodeset); - for (i = 0; i < maxcpu && i < VIR_DOMAIN_CPUMASK_LEN; i++) { - if (tmp_cpumask[i]) - VIR_USE_CPU(cpumap, i); + /* numad returns the NUMA node list, convert it to cpumap */ + int prev_total_ncpus = 0; + for (i = 0; i < driver->caps->host.nnumaCell; i++) { + int j; + int cur_ncpus = driver->caps->host.numaCell[i]->ncpus; + if (nodemask[i]) { + for (j = prev_total_ncpus; + j < cur_ncpus + prev_total_ncpus && + j < maxcpu && + j < VIR_DOMAIN_CPUMASK_LEN; + j++) { + VIR_USE_CPU(cpumap, j); + } + } + prev_total_ncpus += cur_ncpus; } - VIR_FREE(vm->def->cpumask); - vm->def->cpumask = tmp_cpumask; - if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { - VIR_WARN("Unable to save status on vm %s after state change", - vm->def->name); - } + VIR_FREE(nodemask); } else { if (vm->def->cpumask) { /* XXX why don't we keep 'cpumask' in the libvirt cpumap