int ret;
virDomainInfo info;
virErrorPtr orig_err = NULL;
- int cancelled;
+ int cancelled = 1;
+ unsigned long protection = 0;
+
VIR_DOMAIN_DEBUG(domain, "dconn=%p xmlin=%s, flags=%lx, "
"dname=%s, uri=%s, bandwidth=%lu",
dconn, NULLSTR(xmlin), flags,
return NULL;
}
+ if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
+ VIR_DRV_FEATURE_MIGRATE_CHANGE_PROTECTION))
+ protection = VIR_MIGRATE_CHANGE_PROTECTION;
+
VIR_DEBUG("Begin3 %p", domain->conn);
dom_xml = domain->conn->driver->domainMigrateBegin3
(domain, xmlin, &cookieout, &cookieoutlen,
- flags, dname, bandwidth);
+ flags | protection, dname, bandwidth);
if (!dom_xml)
goto done;
(dconn, cookiein, cookieinlen, &cookieout, &cookieoutlen,
uri, &uri_out, flags, dname, bandwidth, dom_xml);
VIR_FREE (dom_xml);
- if (ret == -1)
- goto done;
+ if (ret == -1) {
+ if (protection) {
+ /* Begin already started a migration job so we need to cancel it by
+ * calling Confirm while making sure it doesn't overwrite the error
+ */
+ orig_err = virSaveLastError();
+ goto confirm;
+ } else {
+ goto done;
+ }
+ }
if (uri == NULL && uri_out == NULL) {
virLibConnError(VIR_ERR_INTERNAL_ERROR,
_("domainMigratePrepare3 did not set uri"));
virDispatchError(domain->conn);
- cancelled = 1;
goto finish;
}
if (uri_out)
ret = domain->conn->driver->domainMigratePerform3
(domain, NULL, cookiein, cookieinlen,
&cookieout, &cookieoutlen, NULL,
- uri, flags, dname, bandwidth);
+ uri, flags | protection, dname, bandwidth);
/* Perform failed. Make sure Finish doesn't overwrite the error */
if (ret < 0)
if (!orig_err)
orig_err = virSaveLastError();
+confirm:
/*
* If cancelled, then src VM will be restarted, else
* it will be killed
cookieoutlen = 0;
ret = domain->conn->driver->domainMigrateConfirm3
(domain, cookiein, cookieinlen,
- flags, cancelled);
+ flags | protection, cancelled);
/* If Confirm3 returns -1, there's nothing more we can
* do, but fortunately worst case is that there is a
* domain left in 'paused' state on source.
/*
- * In normal migration, the libvirt client co-ordinates communcation
+ * In normal migration, the libvirt client co-ordinates communication
* between the 2 libvirtd instances on source & dest hosts.
*
* In this peer-2-peer migration alternative, the libvirt client
/*
- * In normal migration, the libvirt client co-ordinates communcation
+ * In normal migration, the libvirt client co-ordinates communication
* between the 2 libvirtd instances on source & dest hosts.
*
* Some hypervisors support an alternative, direct migration where
* VIR_MIGRATE_UNDEFINE_SOURCE If the migration is successful, undefine the
* domain on the source host.
* VIR_MIGRATE_PAUSED Leave the domain suspended on the remote side.
+ * VIR_MIGRATE_CHANGE_PROTECTION Protect against domain configuration
+ * changes during the migration process (set
+ * automatically when supported).
*
* VIR_MIGRATE_TUNNELLED requires that VIR_MIGRATE_PEER2PEER be set.
* Applications using the VIR_MIGRATE_PEER2PEER flag will probably
goto error;
}
} else {
+ /* Change protection requires support only on source side, and
+ * is only needed in v3 migration, which automatically re-adds
+ * the flag for just the source side. We mask it out for
+ * non-peer2peer to allow migration from newer source to an
+ * older destination that rejects the flag. */
+ if (flags & VIR_MIGRATE_CHANGE_PROTECTION &&
+ !VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
+ VIR_DRV_FEATURE_MIGRATE_CHANGE_PROTECTION)) {
+ virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
+ _("cannot enforce change protection"));
+ goto error;
+ }
+ flags &= ~VIR_MIGRATE_CHANGE_PROTECTION;
if (flags & VIR_MIGRATE_TUNNELLED) {
virLibConnError(VIR_ERR_OPERATION_INVALID,
_("cannot perform tunnelled migration without using peer2peer flag"));
* VIR_MIGRATE_UNDEFINE_SOURCE If the migration is successful, undefine the
* domain on the source host.
* VIR_MIGRATE_PAUSED Leave the domain suspended on the remote side.
+ * VIR_MIGRATE_CHANGE_PROTECTION Protect against domain configuration
+ * changes during the migration process (set
+ * automatically when supported).
*
* VIR_MIGRATE_TUNNELLED requires that VIR_MIGRATE_PEER2PEER be set.
* Applications using the VIR_MIGRATE_PEER2PEER flag will probably
goto error;
}
} else {
+ /* Change protection requires support only on source side, and
+ * is only needed in v3 migration, which automatically re-adds
+ * the flag for just the source side. We mask it out for
+ * non-peer2peer to allow migration from newer source to an
+ * older destination that rejects the flag. */
+ if (flags & VIR_MIGRATE_CHANGE_PROTECTION &&
+ !VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
+ VIR_DRV_FEATURE_MIGRATE_CHANGE_PROTECTION)) {
+ virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
+ _("cannot enforce change protection"));
+ goto error;
+ }
+ flags &= ~VIR_MIGRATE_CHANGE_PROTECTION;
if (flags & VIR_MIGRATE_TUNNELLED) {
virLibConnError(VIR_ERR_OPERATION_INVALID,
_("cannot perform tunnelled migration without using peer2peer flag"));
* on the destination host.
* VIR_MIGRATE_UNDEFINE_SOURCE If the migration is successful, undefine the
* domain on the source host.
+ * VIR_MIGRATE_CHANGE_PROTECTION Protect against domain configuration
+ * changes during the migration process (set
+ * automatically when supported).
*
* The operation of this API hinges on the VIR_MIGRATE_PEER2PEER flag.
* If the VIR_MIGRATE_PEER2PEER flag is NOT set, the duri parameter
* on the destination host.
* VIR_MIGRATE_UNDEFINE_SOURCE If the migration is successful, undefine the
* domain on the source host.
+ * VIR_MIGRATE_CHANGE_PROTECTION Protect against domain configuration
+ * changes during the migration process (set
+ * automatically when supported).
*
* The operation of this API hinges on the VIR_MIGRATE_PEER2PEER flag.
*
}
+/* The caller is supposed to lock the vm and start a migration job. */
char *qemuMigrationBegin(struct qemud_driver *driver,
virDomainObjPtr vm,
const char *xmlin,
char *rv = NULL;
qemuMigrationCookiePtr mig = NULL;
virDomainDefPtr def = NULL;
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+
VIR_DEBUG("driver=%p, vm=%p, xmlin=%s, cookieout=%p, cookieoutlen=%p",
driver, vm, NULLSTR(xmlin), cookieout, cookieoutlen);
- if (!virDomainObjIsActive(vm)) {
- qemuReportError(VIR_ERR_OPERATION_INVALID,
- "%s", _("domain is not running"));
- goto cleanup;
- }
+ /* Only set the phase if we are inside QEMU_ASYNC_JOB_MIGRATION_OUT.
+ * Otherwise we will start the async job later in the perform phase losing
+ * change protection.
+ */
+ if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
+ qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_BEGIN3);
if (qemuProcessAutoDestroyActive(driver, vm)) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
}
cleanup:
- virDomainObjUnlock(vm);
qemuMigrationCookieFree(mig);
virDomainDefFree(def);
return rv;
* until the migration is complete.
*/
VIR_DEBUG("Perform %p", sconn);
+ qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
if (flags & VIR_MIGRATE_TUNNELLED)
ret = doTunnelMigrate(driver, vm, st,
NULL, 0, NULL, NULL,
NULLSTR(dconnuri), NULLSTR(uri), flags,
NULLSTR(dname), resource);
+ /* Unlike the virDomainMigrateVersion3 counterpart, we don't need
+ * to worry about auto-setting the VIR_MIGRATE_CHANGE_PROTECTION
+ * bit here, because we are already running inside the context of
+ * a single job. */
+
dom_xml = qemuMigrationBegin(driver, vm, xmlin,
&cookieout, &cookieoutlen);
if (!dom_xml)
* confirm migration completion.
*/
VIR_DEBUG("Perform3 %p uri=%s uri_out=%s", sconn, uri, uri_out);
+ qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3);
VIR_FREE(cookiein);
cookiein = cookieout;
cookieinlen = cookieoutlen;
flags, dname, resource);
/* Perform failed. Make sure Finish doesn't overwrite the error */
- if (ret < 0)
+ if (ret < 0) {
orig_err = virSaveLastError();
+ } else {
+ qemuMigrationJobSetPhase(driver, vm,
+ QEMU_MIGRATION_PHASE_PERFORM3_DONE);
+ }
/* If Perform returns < 0, then we need to cancel the VM
* startup on the destination
p2p = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
VIR_DRV_FEATURE_MIGRATION_P2P);
/* v3proto reflects whether the caller used Perform3, but with
- * p2p migrate, regardless of whether Perform3 or Perform3
+ * p2p migrate, regardless of whether Perform2 or Perform3
* were used, we decide protocol based on what target supports
*/
*v3proto = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
goto cleanup;
}
+ /* Change protection is only required on the source side (us), and
+ * only for v3 migration when begin and perform are separate jobs.
+ * But peer-2-peer is already a single job, and we still want to
+ * talk to older destinations that would reject the flag.
+ * Therefore it is safe to clear the bit here. */
+ flags &= ~VIR_MIGRATE_CHANGE_PROTECTION;
+
if (*v3proto)
ret = doPeer2PeerMigrate3(driver, sconn, dconn, vm, xmlin,
dconnuri, uri, flags, dname, resource);
}
-int qemuMigrationPerform(struct qemud_driver *driver,
- virConnectPtr conn,
- virDomainObjPtr vm,
- const char *xmlin,
- const char *dconnuri,
- const char *uri,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- unsigned long flags,
- const char *dname,
- unsigned long resource,
- bool v3proto)
+/*
+ * This implements perform part of the migration protocol when migration job
+ * does not need to be active across several APIs, i.e., peer2peer migration or
+ * perform phase of v2 non-peer2peer migration.
+ */
+static int
+qemuMigrationPerformJob(struct qemud_driver *driver,
+ virConnectPtr conn,
+ virDomainObjPtr vm,
+ const char *xmlin,
+ const char *dconnuri,
+ const char *uri,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource,
+ bool v3proto)
{
virDomainEventPtr event = NULL;
int ret = -1;
int resume = 0;
- qemuDomainObjPrivatePtr priv = vm->privateData;
- VIR_DEBUG("driver=%p, conn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
- "uri=%s, cookiein=%s, cookieinlen=%d, cookieout=%p, "
- "cookieoutlen=%p, flags=%lx, dname=%s, resource=%lu, v3proto=%d",
- driver, conn, vm, NULLSTR(xmlin), NULLSTR(dconnuri),
- NULLSTR(uri), NULLSTR(cookiein), cookieinlen,
- cookieout, cookieoutlen, flags, NULLSTR(dname),
- resource, v3proto);
- if (qemuDomainObjBeginAsyncJobWithDriver(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
goto endjob;
}
- memset(&priv->job.info, 0, sizeof(priv->job.info));
- priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
-
resume = virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING;
if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
- if (cookieinlen) {
- qemuReportError(VIR_ERR_OPERATION_INVALID,
- "%s", _("received unexpected cookie with P2P migration"));
- goto endjob;
- }
-
- if (doPeer2PeerMigrate(driver, conn, vm, xmlin,
- dconnuri, uri, flags, dname,
- resource, &v3proto) < 0)
- /* doPeer2PeerMigrate already set the error, so just get out */
- goto endjob;
+ ret = doPeer2PeerMigrate(driver, conn, vm, xmlin,
+ dconnuri, uri, flags, dname,
+ resource, &v3proto);
} else {
- if (dconnuri) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR,
- "%s", _("Unexpected dconnuri parameter with non-peer2peer migration"));
- goto endjob;
- }
- if (doNativeMigrate(driver, vm, uri, cookiein, cookieinlen,
- cookieout, cookieoutlen,
- flags, dname, resource) < 0)
- goto endjob;
+ qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
+ ret = doNativeMigrate(driver, vm, uri, cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, dname, resource);
}
+ if (ret < 0)
+ goto endjob;
/*
* In v3 protocol, the source VM is not killed off until the
* confirm step.
*/
- if (v3proto) {
- resume = 0;
- } else {
+ if (!v3proto) {
qemuProcessStop(driver, vm, 1, VIR_DOMAIN_SHUTOFF_MIGRATED);
virDomainAuditStop(vm, "migrated");
- resume = 0;
-
event = virDomainEventNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
}
-
- ret = 0;
+ resume = 0;
endjob:
if (resume && virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
VIR_DOMAIN_EVENT_RESUMED,
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
}
- if (vm) {
- if (qemuDomainObjEndAsyncJob(driver, vm) == 0) {
- vm = NULL;
- } else if (!virDomainObjIsActive(vm) &&
- (!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE))) {
- if (flags & VIR_MIGRATE_UNDEFINE_SOURCE)
- virDomainDeleteConfig(driver->configDir, driver->autostartDir, vm);
- virDomainRemoveInactive(&driver->domains, vm);
- vm = NULL;
+
+ if (qemuMigrationJobFinish(driver, vm) == 0) {
+ vm = NULL;
+ } else if (!virDomainObjIsActive(vm) &&
+ (!vm->persistent ||
+ (ret == 0 && (flags & VIR_MIGRATE_UNDEFINE_SOURCE)))) {
+ if (flags & VIR_MIGRATE_UNDEFINE_SOURCE)
+ virDomainDeleteConfig(driver->configDir, driver->autostartDir, vm);
+ virDomainRemoveInactive(&driver->domains, vm);
+ vm = NULL;
+ }
+
+cleanup:
+ if (vm)
+ virDomainObjUnlock(vm);
+ if (event)
+ qemuDomainEventQueue(driver, event);
+ return ret;
+}
+
+/*
+ * This implements perform phase of v3 migration protocol.
+ */
+static int
+qemuMigrationPerformPhase(struct qemud_driver *driver,
+ virConnectPtr conn,
+ virDomainObjPtr vm,
+ const char *uri,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource)
+{
+ virDomainEventPtr event = NULL;
+ int ret = -1;
+ bool resume;
+ int refs;
+
+ /* If we didn't start the job in the begin phase, start it now. */
+ if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
+ if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ goto cleanup;
+ } else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) {
+ goto cleanup;
+ }
+
+ qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3);
+
+ resume = virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING;
+ ret = doNativeMigrate(driver, vm, uri, cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, dname, resource);
+
+ if (ret < 0 && resume &&
+ virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
+ /* we got here through some sort of failure; start the domain again */
+ if (qemuProcessStartCPUs(driver, vm, conn,
+ VIR_DOMAIN_RUNNING_MIGRATION_CANCELED) < 0) {
+ /* Hm, we already know we are in error here. We don't want to
+ * overwrite the previous error, though, so we just throw something
+ * to the logs and hope for the best
+ */
+ VIR_ERROR(_("Failed to resume guest %s after failure"),
+ vm->def->name);
}
+
+ event = virDomainEventNewFromObj(vm,
+ VIR_DOMAIN_EVENT_RESUMED,
+ VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
+ }
+
+ if (ret < 0)
+ goto endjob;
+
+ qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3_DONE);
+
+endjob:
+ if (ret < 0)
+ refs = qemuMigrationJobFinish(driver, vm);
+ else
+ refs = qemuMigrationJobContinue(vm);
+ if (refs == 0) {
+ vm = NULL;
+ } else if (!virDomainObjIsActive(vm) && !vm->persistent) {
+ virDomainRemoveInactive(&driver->domains, vm);
+ vm = NULL;
}
cleanup:
return ret;
}
+int
+qemuMigrationPerform(struct qemud_driver *driver,
+ virConnectPtr conn,
+ virDomainObjPtr vm,
+ const char *xmlin,
+ const char *dconnuri,
+ const char *uri,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource,
+ bool v3proto)
+{
+ VIR_DEBUG("driver=%p, conn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
+ "uri=%s, cookiein=%s, cookieinlen=%d, cookieout=%p, "
+ "cookieoutlen=%p, flags=%lx, dname=%s, resource=%lu, v3proto=%d",
+ driver, conn, vm, NULLSTR(xmlin), NULLSTR(dconnuri),
+ NULLSTR(uri), NULLSTR(cookiein), cookieinlen,
+ cookieout, cookieoutlen, flags, NULLSTR(dname),
+ resource, v3proto);
+
+ if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
+ if (cookieinlen) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("received unexpected cookie with P2P migration"));
+ return -1;
+ }
+
+ return qemuMigrationPerformJob(driver, conn, vm, xmlin, dconnuri, uri,
+ cookiein, cookieinlen, cookieout,
+ cookieoutlen, flags, dname, resource,
+ v3proto);
+ } else {
+ if (dconnuri) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ "%s", _("Unexpected dconnuri parameter with non-peer2peer migration"));
+ return -1;
+ }
+
+ if (v3proto) {
+ return qemuMigrationPerformPhase(driver, conn, vm, uri,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, dname, resource);
+ } else {
+ return qemuMigrationPerformJob(driver, conn, vm, xmlin, dconnuri,
+ uri, cookiein, cookieinlen,
+ cookieout, cookieoutlen, flags,
+ dname, resource, v3proto);
+ }
+ }
+}
#if WITH_MACVTAP
static void
virCheckFlags(QEMU_MIGRATION_FLAGS, -1);
+ qemuMigrationJobSetPhase(driver, vm,
+ retcode == 0
+ ? QEMU_MIGRATION_PHASE_CONFIRM3
+ : QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED);
+
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0)))
return -1;
- if (!virDomainObjIsActive(vm)) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("guest unexpectedly quit"));
- goto cleanup;
- }
-
/* Did the migration go as planned? If yes, kill off the
* domain object, but if no, resume CPUs
*/