From e8f263e0d006390c3764aaa07093b2d174b61379 Mon Sep 17 00:00:00 2001 From: Jiri Denemark Date: Tue, 19 May 2015 17:28:25 +0200 Subject: [PATCH] qemu: Cancel disk mirrors after libvirtd restart When libvirtd is restarted during migration, we properly cancel the ongoing migration (unless it managed to almost finished before the restart). But if we were also migrating storage using NBD, we would completely forget about the running disk mirrors. Signed-off-by: Jiri Denemark --- src/qemu/qemu_domain.c | 45 ++++++++++++++++++++- src/qemu/qemu_migration.c | 85 +++++++++++++++++++++++++++++++++++++++ src/qemu/qemu_migration.h | 3 ++ src/qemu/qemu_process.c | 8 +--- 4 files changed, 133 insertions(+), 8 deletions(-) diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 9bb3cee935..e459c18680 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -578,7 +578,27 @@ qemuDomainObjPrivateXMLFormat(virBufferPtr buf, qemuDomainAsyncJobPhaseToString( priv->job.asyncJob, priv->job.phase)); } - virBufferAddLit(buf, "/>\n"); + if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) { + virBufferAddLit(buf, "/>\n"); + } else { + size_t i; + virDomainDiskDefPtr disk; + qemuDomainDiskPrivatePtr diskPriv; + + virBufferAddLit(buf, ">\n"); + virBufferAdjustIndent(buf, 2); + + for (i = 0; i < vm->def->ndisks; i++) { + disk = vm->def->disks[i]; + diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); + virBufferAsprintf(buf, "\n", + disk->dst, + diskPriv->migrating ? "yes" : "no"); + } + + virBufferAdjustIndent(buf, -2); + virBufferAddLit(buf, "\n"); + } } priv->job.active = job; @@ -736,6 +756,29 @@ qemuDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt, } } + if ((n = virXPathNodeSet("./job[1]/disk[@migrating='yes']", + ctxt, &nodes)) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("failed to parse list of disks marked for migration")); + goto error; + } + if (n > 0) { + if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) { + VIR_WARN("Found disks marked for migration but we were not " + "migrating"); + n = 0; + } + for (i = 0; i < n; i++) { + char *dst = virXMLPropString(nodes[i], "dev"); + virDomainDiskDefPtr disk; + + if (dst && (disk = virDomainDiskByName(vm->def, dst, false))) + QEMU_DOMAIN_DISK_PRIVATE(disk)->migrating = true; + VIR_FREE(dst); + } + } + VIR_FREE(nodes); + priv->fakeReboot = virXPathBoolean("boolean(./fakereboot)", ctxt) == 1; if ((n = virXPathNodeSet("./devices/device", ctxt, &nodes)) < 0) { diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index b3223fb628..0f1d2382ee 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -2026,6 +2026,7 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver, char *hoststr = NULL; unsigned int mirror_flags = VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT; int rv; + virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); VIR_DEBUG("Starting drive mirrors for domain %s", vm->def->name); @@ -2075,6 +2076,11 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver, goto cleanup; } diskPriv->migrating = true; + + if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) { + VIR_WARN("Failed to save status on vm %s", vm->def->name); + goto cleanup; + } } while ((rv = qemuMigrationDriveMirrorReady(driver, vm)) != 1) { @@ -2102,6 +2108,7 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver, ret = 0; cleanup: + virObjectUnref(cfg); VIR_FREE(diskAlias); VIR_FREE(nbd_dest); VIR_FREE(hoststr); @@ -5817,6 +5824,84 @@ qemuMigrationToFile(virQEMUDriverPtr driver, virDomainObjPtr vm, return ret; } + +int +qemuMigrationCancel(virQEMUDriverPtr driver, + virDomainObjPtr vm) +{ + qemuDomainObjPrivatePtr priv = vm->privateData; + virHashTablePtr blockJobs = NULL; + bool storage = false; + size_t i; + int ret = -1; + + VIR_DEBUG("Canceling unfinished outgoing migration of domain %s", + vm->def->name); + + for (i = 0; i < vm->def->ndisks; i++) { + virDomainDiskDefPtr disk = vm->def->disks[i]; + if (QEMU_DOMAIN_DISK_PRIVATE(disk)->migrating) { + qemuBlockJobSyncBegin(disk); + storage = true; + } + } + + qemuDomainObjEnterMonitor(driver, vm); + + ignore_value(qemuMonitorMigrateCancel(priv->mon)); + if (storage) + blockJobs = qemuMonitorGetAllBlockJobInfo(priv->mon); + + if (qemuDomainObjExitMonitor(driver, vm) < 0 || (storage && !blockJobs)) + goto endsyncjob; + + if (!storage) { + ret = 0; + goto cleanup; + } + + for (i = 0; i < vm->def->ndisks; i++) { + virDomainDiskDefPtr disk = vm->def->disks[i]; + qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); + + if (!diskPriv->migrating) + continue; + + if (virHashLookup(blockJobs, disk->info.alias)) { + VIR_DEBUG("Drive mirror on disk %s is still running", disk->dst); + } else { + VIR_DEBUG("Drive mirror on disk %s is gone", disk->dst); + qemuBlockJobSyncEnd(driver, vm, disk); + diskPriv->migrating = false; + } + } + + if (qemuMigrationCancelDriveMirror(driver, vm, false, + QEMU_ASYNC_JOB_NONE) < 0) + goto endsyncjob; + + ret = 0; + + cleanup: + virHashFree(blockJobs); + return ret; + + endsyncjob: + if (storage) { + for (i = 0; i < vm->def->ndisks; i++) { + virDomainDiskDefPtr disk = vm->def->disks[i]; + qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); + + if (diskPriv->migrating) { + qemuBlockJobSyncEnd(driver, vm, disk); + diskPriv->migrating = false; + } + } + } + goto cleanup; +} + + int qemuMigrationJobStart(virQEMUDriverPtr driver, virDomainObjPtr vm, diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index 030b32fdfd..78fb6487b1 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -185,4 +185,7 @@ int qemuMigrationToFile(virQEMUDriverPtr driver, virDomainObjPtr vm, ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_NONNULL(5) ATTRIBUTE_RETURN_CHECK; +int qemuMigrationCancel(virQEMUDriverPtr driver, + virDomainObjPtr vm); + #endif /* __QEMU_MIGRATION_H__ */ diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 3c9d4bc40e..5be0002bd7 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -3354,8 +3354,6 @@ qemuProcessRecoverMigration(virQEMUDriverPtr driver, virDomainState state, int reason) { - qemuDomainObjPrivatePtr priv = vm->privateData; - if (job == QEMU_ASYNC_JOB_MIGRATION_IN) { switch (phase) { case QEMU_MIGRATION_PHASE_NONE: @@ -3409,11 +3407,7 @@ qemuProcessRecoverMigration(virQEMUDriverPtr driver, case QEMU_MIGRATION_PHASE_PERFORM3: /* migration is still in progress, let's cancel it and resume the * domain */ - VIR_DEBUG("Canceling unfinished outgoing migration of domain %s", - vm->def->name); - qemuDomainObjEnterMonitor(driver, vm); - ignore_value(qemuMonitorMigrateCancel(priv->mon)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuMigrationCancel(driver, vm) < 0) return -1; /* resume the domain but only if it was paused as a result of * migration */ -- 2.39.5