return -1;
}
+int qemuSetupCgroupVcpuPin(virCgroupPtr cgroup,
+ virDomainVcpuPinDefPtr *vcpupin,
+ int nvcpupin,
+ int vcpuid)
+{
+ int i, rc = 0;
+ char *new_cpus = NULL;
+
+ for (i = 0; i < nvcpupin; i++) {
+ if (vcpuid == vcpupin[i]->vcpuid) {
+ new_cpus = virDomainCpuSetFormat(vcpupin[i]->cpumask,
+ VIR_DOMAIN_CPUMASK_LEN);
+ if (!new_cpus) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("failed to convert cpu mask"));
+ rc = -1;
+ goto cleanup;
+ }
+ rc = virCgroupSetCpusetCpus(cgroup, new_cpus);
+ if (rc != 0) {
+ virReportSystemError(-rc,
+ "%s",
+ _("Unable to set cpuset.cpus"));
+ goto cleanup;
+ }
+ }
+ }
+
+cleanup:
+ VIR_FREE(new_cpus);
+ return rc;
+}
+
int qemuSetupCgroupForVcpu(struct qemud_driver *driver, virDomainObjPtr vm)
{
virCgroupPtr cgroup = NULL;
virCgroupPtr cgroup_vcpu = NULL;
qemuDomainObjPrivatePtr priv = vm->privateData;
+ virDomainDefPtr def = vm->def;
int rc;
unsigned int i;
unsigned long long period = vm->def->cputune.period;
}
}
+ /* Set vcpupin in cgroup if vcpupin xml is provided */
+ if (def->cputune.nvcpupin &&
+ qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPUSET) &&
+ qemuSetupCgroupVcpuPin(cgroup_vcpu,
+ def->cputune.vcpupin,
+ def->cputune.nvcpupin,
+ i) < 0)
+ goto cleanup;
+
virCgroupFree(&cgroup_vcpu);
}
struct qemud_driver *driver = dom->conn->privateData;
virDomainObjPtr vm;
virDomainDefPtr persistentDef = NULL;
+ virCgroupPtr cgroup_dom = NULL;
+ virCgroupPtr cgroup_vcpu = NULL;
int maxcpu, hostcpus;
virNodeInfo nodeinfo;
int ret = -1;
qemuDomainObjPrivatePtr priv;
bool canResetting = true;
+ int newVcpuPinNum = 0;
+ virDomainVcpuPinDefPtr *newVcpuPin = NULL;
int pcpu;
virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
- if (priv->vcpupids != NULL) {
- if (virProcessInfoSetAffinity(priv->vcpupids[vcpu],
- cpumap, maplen, maxcpu) < 0)
- goto cleanup;
- } else {
+ if (priv->vcpupids == NULL) {
virReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("cpu affinity is not supported"));
goto cleanup;
}
- if (canResetting) {
- if (virDomainVcpuPinDel(vm->def, vcpu) < 0) {
- virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("failed to delete vcpupin xml of "
- "a running domain"));
+ if (vm->def->cputune.vcpupin) {
+ newVcpuPin = virDomainVcpuPinDefCopy(vm->def->cputune.vcpupin,
+ vm->def->cputune.nvcpupin);
+ if (!newVcpuPin)
+ goto cleanup;
+
+ newVcpuPinNum = vm->def->cputune.nvcpupin;
+ } else {
+ if (VIR_ALLOC(newVcpuPin) < 0) {
+ virReportOOMError();
+ goto cleanup;
+ }
+ newVcpuPinNum = 0;
+ }
+
+ if (virDomainVcpuPinAdd(newVcpuPin, &newVcpuPinNum, cpumap, maplen, vcpu) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("failed to update vcpupin"));
+ virDomainVcpuPinDefFree(newVcpuPin, newVcpuPinNum);
+ goto cleanup;
+ }
+
+ /* Configure the corresponding cpuset cgroup before set affinity. */
+ if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPUSET)) {
+ if (virCgroupForDomain(driver->cgroup, vm->def->name, &cgroup_dom, 0) == 0 &&
+ virCgroupForVcpu(cgroup_dom, vcpu, &cgroup_vcpu, 0) == 0 &&
+ qemuSetupCgroupVcpuPin(cgroup_vcpu, newVcpuPin, newVcpuPinNum, vcpu) < 0) {
+ virReportError(VIR_ERR_OPERATION_INVALID,
+ _("failed to set cpuset.cpus in cgroup"
+ " for vcpu %d"), vcpu);
goto cleanup;
}
} else {
- if (!vm->def->cputune.vcpupin) {
- if (VIR_ALLOC(vm->def->cputune.vcpupin) < 0) {
- virReportOOMError();
- goto cleanup;
- }
- vm->def->cputune.nvcpupin = 0;
+ if (virProcessInfoSetAffinity(priv->vcpupids[vcpu],
+ cpumap, maplen, maxcpu) < 0) {
+ virReportError(VIR_ERR_SYSTEM_ERROR,
+ _("failed to set cpu affinity for vcpu %d"),
+ vcpu);
+ goto cleanup;
}
- if (virDomainVcpuPinAdd(vm->def->cputune.vcpupin,
- &vm->def->cputune.nvcpupin,
- cpumap,
- maplen,
- vcpu) < 0) {
+ }
+
+ if (canResetting) {
+ if (virDomainVcpuPinDel(vm->def, vcpu) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("failed to update or add vcpupin xml of "
+ _("failed to delete vcpupin xml of "
"a running domain"));
goto cleanup;
}
+ } else {
+ if (vm->def->cputune.vcpupin)
+ virDomainVcpuPinDefFree(vm->def->cputune.vcpupin, vm->def->cputune.nvcpupin);
+
+ vm->def->cputune.vcpupin = newVcpuPin;
+ vm->def->cputune.nvcpupin = newVcpuPinNum;
+ newVcpuPin = NULL;
}
+ if (newVcpuPin)
+ virDomainVcpuPinDefFree(newVcpuPin, newVcpuPinNum);
+
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0)
goto cleanup;
}
ret = 0;
cleanup:
+ if (cgroup_vcpu)
+ virCgroupFree(&cgroup_vcpu);
+ if (cgroup_dom)
+ virCgroupFree(&cgroup_dom);
if (vm)
virDomainObjUnlock(vm);
return ret;
/* We need to control cpu bandwidth for each vcpu now */
if ((flags & VIR_CGROUP_VCPU) &&
(i != VIR_CGROUP_CONTROLLER_CPU &&
- i != VIR_CGROUP_CONTROLLER_CPUACCT)) {
+ i != VIR_CGROUP_CONTROLLER_CPUACCT &&
+ i != VIR_CGROUP_CONTROLLER_CPUSET)) {
/* treat it as unmounted and we can use virCgroupAddTask */
VIR_FREE(group->controllers[i].mountPoint);
continue;
mems);
}
+/**
+ * virCgroupSetCpusetCpus:
+ *
+ * @group: The cgroup to set cpuset.cpus for
+ * @cpus: the cpus to set
+ *
+ * Retuens: 0 on success
+ */
+int virCgroupSetCpusetCpus(virCgroupPtr group, const char *cpus)
+{
+ return virCgroupSetValueStr(group,
+ VIR_CGROUP_CONTROLLER_CPUSET,
+ "cpuset.cpus",
+ cpus);
+}
+
+/**
+ * virCgroupGetCpusetCpus:
+ *
+ * @group: The cgroup to get cpuset.cpus for
+ * @cpus: the cpus to get
+ *
+ * Retuens: 0 on success
+ */
+int virCgroupGetCpusetCpus(virCgroupPtr group, char **cpus)
+{
+ return virCgroupGetValueStr(group,
+ VIR_CGROUP_CONTROLLER_CPUSET,
+ "cpuset.cpus",
+ cpus);
+}
+
/**
* virCgroupDenyAllDevices:
*