if (!(qemu_driver->sharedDevices = virHashCreate(30, qemuSharedDeviceEntryFree)))
goto error;
- if (qemuMigrationErrorInit(qemu_driver) < 0)
+ if (qemuMigrationDstErrorInit(qemu_driver) < 0)
goto error;
if (privileged) {
goto cleanup;
/* Perform the migration */
- if (qemuMigrationToFile(driver, vm, fd, compressedpath, asyncJob) < 0)
+ if (qemuMigrationSrcToFile(driver, vm, fd, compressedpath, asyncJob) < 0)
goto cleanup;
/* Touch up file header to mark image complete. */
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
goto cleanup;
- if (!qemuMigrationIsAllowed(driver, vm, false, 0))
+ if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0))
goto cleanup;
if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SAVE,
goto cleanup;
}
- if (!qemuMigrationIsAllowed(driver, vm, false, 0))
+ if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0))
goto cleanup;
- ret = qemuMigrationToFile(driver, vm, fd, compressedpath,
- QEMU_ASYNC_JOB_DUMP);
+ ret = qemuMigrationSrcToFile(driver, vm, fd, compressedpath,
+ QEMU_ASYNC_JOB_DUMP);
}
if (ret < 0)
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
- qemuMigrationErrorSave(driver, vm->def->name,
- qemuMonitorLastError(priv->mon));
+ qemuMigrationDstErrorSave(driver, vm->def->name,
+ qemuMonitorLastError(priv->mon));
}
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
goto cleanup;
}
- if (!(def = qemuMigrationPrepareDef(driver, dom_xml, dname, &origname)))
+ if (!(def = qemuMigrationAnyPrepareDef(driver, dom_xml, dname, &origname)))
goto cleanup;
if (virDomainMigratePrepareTunnelEnsureACL(dconn, def) < 0)
goto cleanup;
- ret = qemuMigrationPrepareTunnel(driver,
- NULL, 0, NULL, NULL, /* No cookies in v2 */
- st, &def, origname, flags);
+ ret = qemuMigrationDstPrepareTunnel(driver,
+ NULL, 0, NULL, NULL, /* No cookies in v2 */
+ st, &def, origname, flags);
cleanup:
VIR_FREE(origname);
goto cleanup;
}
- if (!(compression = qemuMigrationCompressionParse(NULL, 0, flags)))
+ if (!(compression = qemuMigrationAnyCompressionParse(NULL, 0, flags)))
goto cleanup;
if (virLockManagerPluginUsesState(driver->lockManager)) {
goto cleanup;
}
- if (!(def = qemuMigrationPrepareDef(driver, dom_xml, dname, &origname)))
+ if (!(def = qemuMigrationAnyPrepareDef(driver, dom_xml, dname, &origname)))
goto cleanup;
if (virDomainMigratePrepare2EnsureACL(dconn, def) < 0)
* length was not sufficiently large, causing failures
* migrating between old & new libvirtd
*/
- ret = qemuMigrationPrepareDirect(driver,
- NULL, 0, NULL, NULL, /* No cookies */
- uri_in, uri_out,
- &def, origname, NULL, 0, NULL, 0,
- compression, flags);
+ ret = qemuMigrationDstPrepareDirect(driver,
+ NULL, 0, NULL, NULL, /* No cookies */
+ uri_in, uri_out,
+ &def, origname, NULL, 0, NULL, 0,
+ compression, flags);
cleanup:
VIR_FREE(compression);
goto cleanup;
}
- if (!(compression = qemuMigrationCompressionParse(NULL, 0, flags)))
+ if (!(compression = qemuMigrationAnyCompressionParse(NULL, 0, flags)))
goto cleanup;
if (!(vm = qemuDomObjFromDomain(dom)))
*
* Consume any cookie we were able to decode though
*/
- ret = qemuMigrationPerform(driver, dom->conn, vm, NULL,
- NULL, dconnuri, uri, NULL, NULL, 0, NULL, 0,
- compression, &migParams, cookie, cookielen,
- NULL, NULL, /* No output cookies in v2 */
- flags, dname, resource, false);
+ ret = qemuMigrationSrcPerform(driver, dom->conn, vm, NULL,
+ NULL, dconnuri, uri, NULL, NULL, 0, NULL, 0,
+ compression, &migParams, cookie, cookielen,
+ NULL, NULL, /* No output cookies in v2 */
+ flags, dname, resource, false);
cleanup:
qemuMigrationParamsClear(&migParams);
if (!vm) {
virReportError(VIR_ERR_NO_DOMAIN,
_("no domain with matching name '%s'"), dname);
- qemuMigrationErrorReport(driver, dname);
+ qemuMigrationDstErrorReport(driver, dname);
goto cleanup;
}
* length was not sufficiently large, causing failures
* migrating between old & new libvirtd
*/
- dom = qemuMigrationFinish(driver, dconn, vm,
- NULL, 0, NULL, NULL, /* No cookies */
- flags, retcode, false);
+ dom = qemuMigrationDstFinish(driver, dconn, vm,
+ NULL, 0, NULL, NULL, /* No cookies */
+ flags, retcode, false);
cleanup:
return dom;
return NULL;
}
- return qemuMigrationBegin(domain->conn, vm, xmlin, dname,
- cookieout, cookieoutlen, 0, NULL, flags);
+ return qemuMigrationSrcBegin(domain->conn, vm, xmlin, dname,
+ cookieout, cookieoutlen, 0, NULL, flags);
}
static char *
goto cleanup;
}
- ret = qemuMigrationBegin(domain->conn, vm, xmlin, dname,
- cookieout, cookieoutlen,
- nmigrate_disks, migrate_disks, flags);
+ ret = qemuMigrationSrcBegin(domain->conn, vm, xmlin, dname,
+ cookieout, cookieoutlen,
+ nmigrate_disks, migrate_disks, flags);
cleanup:
VIR_FREE(migrate_disks);
goto cleanup;
}
- if (!(compression = qemuMigrationCompressionParse(NULL, 0, flags)))
+ if (!(compression = qemuMigrationAnyCompressionParse(NULL, 0, flags)))
goto cleanup;
- if (!(def = qemuMigrationPrepareDef(driver, dom_xml, dname, &origname)))
+ if (!(def = qemuMigrationAnyPrepareDef(driver, dom_xml, dname, &origname)))
goto cleanup;
if (virDomainMigratePrepare3EnsureACL(dconn, def) < 0)
goto cleanup;
- ret = qemuMigrationPrepareDirect(driver,
- cookiein, cookieinlen,
- cookieout, cookieoutlen,
- uri_in, uri_out,
- &def, origname, NULL, 0, NULL, 0,
- compression, flags);
+ ret = qemuMigrationDstPrepareDirect(driver,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ uri_in, uri_out,
+ &def, origname, NULL, 0, NULL, 0,
+ compression, flags);
cleanup:
VIR_FREE(compression);
if (nmigrate_disks < 0)
goto cleanup;
- if (!(compression = qemuMigrationCompressionParse(params, nparams, flags)))
+ if (!(compression = qemuMigrationAnyCompressionParse(params, nparams, flags)))
goto cleanup;
if (flags & VIR_MIGRATE_TUNNELLED) {
goto cleanup;
}
- if (!(def = qemuMigrationPrepareDef(driver, dom_xml, dname, &origname)))
+ if (!(def = qemuMigrationAnyPrepareDef(driver, dom_xml, dname, &origname)))
goto cleanup;
if (virDomainMigratePrepare3ParamsEnsureACL(dconn, def) < 0)
goto cleanup;
- ret = qemuMigrationPrepareDirect(driver,
- cookiein, cookieinlen,
- cookieout, cookieoutlen,
- uri_in, uri_out,
- &def, origname, listenAddress,
- nmigrate_disks, migrate_disks, nbdPort,
- compression, flags);
+ ret = qemuMigrationDstPrepareDirect(driver,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ uri_in, uri_out,
+ &def, origname, listenAddress,
+ nmigrate_disks, migrate_disks, nbdPort,
+ compression, flags);
cleanup:
VIR_FREE(compression);
goto cleanup;
}
- if (!(def = qemuMigrationPrepareDef(driver, dom_xml, dname, &origname)))
+ if (!(def = qemuMigrationAnyPrepareDef(driver, dom_xml, dname, &origname)))
goto cleanup;
if (virDomainMigratePrepareTunnel3EnsureACL(dconn, def) < 0)
goto cleanup;
- ret = qemuMigrationPrepareTunnel(driver,
- cookiein, cookieinlen,
- cookieout, cookieoutlen,
- st, &def, origname, flags);
+ ret = qemuMigrationDstPrepareTunnel(driver,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ st, &def, origname, flags);
cleanup:
VIR_FREE(origname);
goto cleanup;
}
- if (!(def = qemuMigrationPrepareDef(driver, dom_xml, dname, &origname)))
+ if (!(def = qemuMigrationAnyPrepareDef(driver, dom_xml, dname, &origname)))
goto cleanup;
if (virDomainMigratePrepareTunnel3ParamsEnsureACL(dconn, def) < 0)
goto cleanup;
- ret = qemuMigrationPrepareTunnel(driver,
- cookiein, cookieinlen,
- cookieout, cookieoutlen,
- st, &def, origname, flags);
+ ret = qemuMigrationDstPrepareTunnel(driver,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ st, &def, origname, flags);
cleanup:
VIR_FREE(origname);
virCheckFlags(QEMU_MIGRATION_FLAGS, -1);
- if (!(compression = qemuMigrationCompressionParse(NULL, 0, flags)))
+ if (!(compression = qemuMigrationAnyCompressionParse(NULL, 0, flags)))
return -1;
if (!(vm = qemuDomObjFromDomain(dom)))
goto cleanup;
}
- ret = qemuMigrationPerform(driver, dom->conn, vm, xmlin, NULL,
- dconnuri, uri, NULL, NULL, 0, NULL, 0,
- compression, &migParams,
- cookiein, cookieinlen,
- cookieout, cookieoutlen,
- flags, dname, resource, true);
+ ret = qemuMigrationSrcPerform(driver, dom->conn, vm, xmlin, NULL,
+ dconnuri, uri, NULL, NULL, 0, NULL, 0,
+ compression, &migParams,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, dname, resource, true);
cleanup:
qemuMigrationParamsClear(&migParams);
if (!(migParams = qemuMigrationParams(params, nparams, flags)))
goto cleanup;
- if (!(compression = qemuMigrationCompressionParse(params, nparams, flags)))
+ if (!(compression = qemuMigrationAnyCompressionParse(params, nparams, flags)))
goto cleanup;
if (!(vm = qemuDomObjFromDomain(dom)))
goto cleanup;
}
- ret = qemuMigrationPerform(driver, dom->conn, vm, dom_xml, persist_xml,
- dconnuri, uri, graphicsuri, listenAddress,
- nmigrate_disks, migrate_disks, nbdPort,
- compression, migParams,
- cookiein, cookieinlen, cookieout, cookieoutlen,
- flags, dname, bandwidth, true);
+ ret = qemuMigrationSrcPerform(driver, dom->conn, vm, dom_xml, persist_xml,
+ dconnuri, uri, graphicsuri, listenAddress,
+ nmigrate_disks, migrate_disks, nbdPort,
+ compression, migParams,
+ cookiein, cookieinlen, cookieout, cookieoutlen,
+ flags, dname, bandwidth, true);
cleanup:
VIR_FREE(compression);
qemuMigrationParamsFree(&migParams);
if (!vm) {
virReportError(VIR_ERR_NO_DOMAIN,
_("no domain with matching name '%s'"), dname);
- qemuMigrationErrorReport(driver, dname);
+ qemuMigrationDstErrorReport(driver, dname);
return NULL;
}
return NULL;
}
- return qemuMigrationFinish(driver, dconn, vm,
- cookiein, cookieinlen,
- cookieout, cookieoutlen,
- flags, cancelled, true);
+ return qemuMigrationDstFinish(driver, dconn, vm,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, cancelled, true);
}
static virDomainPtr
if (!vm) {
virReportError(VIR_ERR_NO_DOMAIN,
_("no domain with matching name '%s'"), dname);
- qemuMigrationErrorReport(driver, dname);
+ qemuMigrationDstErrorReport(driver, dname);
return NULL;
}
return NULL;
}
- return qemuMigrationFinish(driver, dconn, vm,
- cookiein, cookieinlen,
- cookieout, cookieoutlen,
- flags, cancelled, true);
+ return qemuMigrationDstFinish(driver, dconn, vm,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, cancelled, true);
}
return -1;
}
- return qemuMigrationConfirm(domain->conn->privateData, vm, cookiein, cookieinlen,
- flags, cancelled);
+ return qemuMigrationSrcConfirm(domain->conn->privateData, vm, cookiein, cookieinlen,
+ flags, cancelled);
}
static int
return -1;
}
- return qemuMigrationConfirm(domain->conn->privateData, vm, cookiein, cookieinlen,
- flags, cancelled);
+ return qemuMigrationSrcConfirm(domain->conn->privateData, vm, cookiein, cookieinlen,
+ flags, cancelled);
}
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) {
if (events &&
jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE &&
- qemuMigrationFetchStats(driver, vm, QEMU_ASYNC_JOB_NONE,
- jobInfo, NULL) < 0)
+ qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_NONE,
+ jobInfo, NULL) < 0)
return -1;
if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE &&
jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION &&
- qemuMigrationFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_NONE,
- jobInfo) < 0)
+ qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_NONE,
+ jobInfo) < 0)
return -1;
if (qemuDomainJobInfoUpdateTime(jobInfo) < 0)
priv = vm->privateData;
- if (!qemuMigrationCapsGet(vm, QEMU_MONITOR_MIGRATION_CAPS_XBZRLE)) {
+ if (!qemuMigrationAnyCapsGet(vm, QEMU_MONITOR_MIGRATION_CAPS_XBZRLE)) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
_("Compressed migration is not supported by "
"QEMU binary"));
priv = vm->privateData;
- if (!qemuMigrationCapsGet(vm, QEMU_MONITOR_MIGRATION_CAPS_XBZRLE)) {
+ if (!qemuMigrationAnyCapsGet(vm, QEMU_MONITOR_MIGRATION_CAPS_XBZRLE)) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
_("Compressed migration is not supported by "
"QEMU binary"));
bool resume = false;
int ret = -1;
- if (!qemuMigrationIsAllowed(driver, vm, false, 0))
+ if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0))
goto cleanup;
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
/* do the memory snapshot if necessary */
if (memory) {
/* check if migration is possible */
- if (!qemuMigrationIsAllowed(driver, vm, false, 0))
+ if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0))
goto cleanup;
/* allow the migration job to be cancelled or the domain to be paused */
virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
-/* qemuMigrationCheckTLSCreds
+/* qemuMigrationParamsCheckTLSCreds
* @driver: pointer to qemu driver
* @vm: domain object
* @asyncJob: migration job to join
* private domain structure. Returns -1 on failure.
*/
static int
-qemuMigrationCheckTLSCreds(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+qemuMigrationParamsCheckTLSCreds(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob)
{
int ret = -1;
qemuDomainObjPrivatePtr priv = vm->privateData;
}
-/* qemuMigrationCheckSetupTLS
+/* qemuMigrationParamsCheckSetupTLS
* @driver: pointer to qemu driver
* @vm: domain object
* @cfg: configuration pointer
* Returns 0 on success, -1 on error/failure
*/
static int
-qemuMigrationCheckSetupTLS(virQEMUDriverPtr driver,
- virQEMUDriverConfigPtr cfg,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+qemuMigrationParamsCheckSetupTLS(virQEMUDriverPtr driver,
+ virQEMUDriverConfigPtr cfg,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
return -1;
}
- if (qemuMigrationCheckTLSCreds(driver, vm, asyncJob) < 0)
+ if (qemuMigrationParamsCheckTLSCreds(driver, vm, asyncJob) < 0)
return -1;
if (!priv->migTLSAlias) {
}
-/* qemuMigrationAddTLSObjects
+/* qemuMigrationParamsAddTLSObjects
* @driver: pointer to qemu driver
* @vm: domain object
* @cfg: configuration pointer
* Returns 0 on success, -1 on failure
*/
static int
-qemuMigrationAddTLSObjects(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- virQEMUDriverConfigPtr cfg,
- bool tlsListen,
- qemuDomainAsyncJob asyncJob,
- char **tlsAlias,
- char **secAlias,
- qemuMonitorMigrationParamsPtr migParams)
+qemuMigrationParamsAddTLSObjects(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ virQEMUDriverConfigPtr cfg,
+ bool tlsListen,
+ qemuDomainAsyncJob asyncJob,
+ char **tlsAlias,
+ char **secAlias,
+ qemuMonitorMigrationParamsPtr migParams)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virJSONValuePtr tlsProps = NULL;
static void
-qemuMigrationStoreDomainState(virDomainObjPtr vm)
+qemuMigrationSrcStoreDomainState(virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
priv->preMigrationState = virDomainObjGetState(vm, NULL);
/* Returns true if the domain was resumed, false otherwise */
static bool
-qemuMigrationRestoreDomainState(virQEMUDriverPtr driver, virDomainObjPtr vm)
+qemuMigrationSrcRestoreDomainState(virQEMUDriverPtr driver, virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int reason;
static int
-qemuMigrationPrecreateDisk(virConnectPtr conn,
- virDomainDiskDefPtr disk,
- unsigned long long capacity)
+qemuMigrationDstPrecreateDisk(virConnectPtr conn,
+ virDomainDiskDefPtr disk,
+ unsigned long long capacity)
{
int ret = -1;
virStoragePoolPtr pool = NULL;
}
static bool
-qemuMigrateDisk(virDomainDiskDef const *disk,
- size_t nmigrate_disks, const char **migrate_disks)
+qemuMigrationAnyCopyDisk(virDomainDiskDef const *disk,
+ size_t nmigrate_disks, const char **migrate_disks)
{
size_t i;
static int
-qemuMigrationPrecreateStorage(virDomainObjPtr vm,
- qemuMigrationCookieNBDPtr nbd,
- size_t nmigrate_disks,
- const char **migrate_disks,
- bool incremental)
+qemuMigrationDstPrecreateStorage(virDomainObjPtr vm,
+ qemuMigrationCookieNBDPtr nbd,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ bool incremental)
{
int ret = -1;
size_t i = 0;
diskSrcPath = virDomainDiskGetSource(disk);
/* Skip disks we don't want to migrate and already existing disks. */
- if (!qemuMigrateDisk(disk, nmigrate_disks, migrate_disks) ||
+ if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks) ||
(diskSrcPath && virFileExists(diskSrcPath))) {
continue;
}
VIR_DEBUG("Proceeding with disk source %s", NULLSTR(diskSrcPath));
- if (qemuMigrationPrecreateDisk(conn, disk, nbd->disks[i].capacity) < 0)
+ if (qemuMigrationDstPrecreateDisk(conn, disk, nbd->disks[i].capacity) < 0)
goto cleanup;
}
/**
- * qemuMigrationStartNBDServer:
+ * qemuMigrationDstStartNBDServer:
* @driver: qemu driver
* @vm: domain
*
* Returns 0 on success, -1 otherwise.
*/
static int
-qemuMigrationStartNBDServer(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- const char *listenAddr,
- size_t nmigrate_disks,
- const char **migrate_disks,
- int nbdPort)
+qemuMigrationDstStartNBDServer(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ const char *listenAddr,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ int nbdPort)
{
int ret = -1;
qemuDomainObjPrivatePtr priv = vm->privateData;
virDomainDiskDefPtr disk = vm->def->disks[i];
/* check whether disk should be migrated */
- if (!qemuMigrateDisk(disk, nmigrate_disks, migrate_disks))
+ if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks))
continue;
if (disk->src->readonly || virStorageSourceIsEmpty(disk->src)) {
static int
-qemuMigrationStopNBDServer(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuMigrationCookiePtr mig)
+qemuMigrationDstStopNBDServer(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuMigrationCookiePtr mig)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
/**
- * qemuMigrationDriveMirrorReady:
+ * qemuMigrationSrcDriveMirrorReady:
* @driver: qemu driver
* @vm: domain
*
* Check the status of all drive-mirrors started by
- * qemuMigrationDriveMirror. Any pending block job events
+ * qemuMigrationSrcDriveMirror. Any pending block job events
* for the mirrored disks will be processed.
*
* Returns 1 if all mirrors are "ready",
* -1 on error.
*/
static int
-qemuMigrationDriveMirrorReady(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+qemuMigrationSrcDriveMirrorReady(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob)
{
size_t i;
size_t notReady = 0;
* -1 on error or when job failed and failNoJob is true.
*/
static int
-qemuMigrationCancelOneDriveMirror(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- virDomainDiskDefPtr disk,
- bool failNoJob,
- qemuDomainAsyncJob asyncJob)
+qemuMigrationSrcCancelOneDriveMirror(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ virDomainDiskDefPtr disk,
+ bool failNoJob,
+ qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
char *diskAlias = NULL;
/**
- * qemuMigrationCancelDriveMirror:
+ * qemuMigrationSrcCancelDriveMirror:
* @driver: qemu driver
* @vm: domain
* @check: if true report an error when some of the mirrors fails
* Returns 0 on success, -1 otherwise.
*/
static int
-qemuMigrationCancelDriveMirror(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- bool check,
- qemuDomainAsyncJob asyncJob,
- virConnectPtr dconn)
+qemuMigrationSrcCancelDriveMirror(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ bool check,
+ qemuDomainAsyncJob asyncJob,
+ virConnectPtr dconn)
{
virErrorPtr err = NULL;
int ret = -1;
if (!diskPriv->migrating)
continue;
- rv = qemuMigrationCancelOneDriveMirror(driver, vm, disk,
- check, asyncJob);
+ rv = qemuMigrationSrcCancelOneDriveMirror(driver, vm, disk,
+ check, asyncJob);
if (rv != 0) {
if (rv < 0) {
if (!err)
* simultaneously to both source and destination. On success,
* update @migrate_flags so we don't tell 'migrate' command
* to do the very same operation. On failure, the caller is
- * expected to call qemuMigrationCancelDriveMirror to stop all
+ * expected to call qemuMigrationSrcCancelDriveMirror to stop all
* running mirrors.
*
* Returns 0 on success (@migrate_flags updated),
* -1 otherwise.
*/
static int
-qemuMigrationDriveMirror(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuMigrationCookiePtr mig,
- const char *host,
- unsigned long speed,
- unsigned int *migrate_flags,
- size_t nmigrate_disks,
- const char **migrate_disks,
- virConnectPtr dconn)
+qemuMigrationSrcDriveMirror(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuMigrationCookiePtr mig,
+ const char *host,
+ unsigned long speed,
+ unsigned int *migrate_flags,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ virConnectPtr dconn)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret = -1;
int mon_ret;
/* check whether disk should be migrated */
- if (!qemuMigrateDisk(disk, nmigrate_disks, migrate_disks))
+ if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks))
continue;
if (!(diskAlias = qemuAliasFromDisk(disk)) ||
}
}
- while ((rv = qemuMigrationDriveMirrorReady(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT)) != 1) {
+ while ((rv = qemuMigrationSrcDriveMirrorReady(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_OUT)) != 1) {
if (rv < 0)
goto cleanup;
goto cleanup;
}
- qemuMigrationFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
- priv->job.current);
+ qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ priv->job.current);
/* Okay, all disks are ready. Modify migrate_flags */
*migrate_flags &= ~(QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
/**
- * qemuMigrationIsAllowedHostdev:
+ * qemuMigrationSrcIsAllowedHostdev:
* @def: domain definition
*
* Checks that @def does not contain any host devices unsupported accross
* migrations. Returns true if the vm is allowed to migrate.
*/
static bool
-qemuMigrationIsAllowedHostdev(const virDomainDef *def)
+qemuMigrationSrcIsAllowedHostdev(const virDomainDef *def)
{
size_t i;
/**
- * qemuMigrationIsAllowed:
+ * qemuMigrationSrcIsAllowed:
* @driver: qemu driver struct
* @vm: domain object
* @remote: migration is remote
* false otherwise.
*/
bool
-qemuMigrationIsAllowed(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- bool remote,
- unsigned int flags)
+qemuMigrationSrcIsAllowed(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ bool remote,
+ unsigned int flags)
{
int nsnapshots;
int pauseReason;
return false;
}
- if (!qemuMigrationIsAllowedHostdev(vm->def))
+ if (!qemuMigrationSrcIsAllowedHostdev(vm->def))
return false;
if (vm->def->cpu) {
}
static bool
-qemuMigrationIsSafe(virDomainDefPtr def,
- size_t nmigrate_disks,
- const char **migrate_disks,
- unsigned int flags)
+qemuMigrationSrcIsSafe(virDomainDefPtr def,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ unsigned int flags)
{
bool storagemigration = flags & (VIR_MIGRATE_NON_SHARED_DISK |
/* disks which are migrated by qemu are safe too */
if (storagemigration &&
- qemuMigrateDisk(disk, nmigrate_disks, migrate_disks))
+ qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks))
continue;
if (virDomainDiskGetType(disk) == VIR_STORAGE_TYPE_FILE) {
return true;
}
-/** qemuMigrationSetOffline
+/** qemuMigrationSrcSetOffline
* Pause domain for non-live migration.
*/
int
-qemuMigrationSetOffline(virQEMUDriverPtr driver,
- virDomainObjPtr vm)
+qemuMigrationSrcSetOffline(virQEMUDriverPtr driver,
+ virDomainObjPtr vm)
{
int ret;
VIR_DEBUG("driver=%p vm=%p", driver, vm);
void
-qemuMigrationPostcopyFailed(virQEMUDriverPtr driver,
- virDomainObjPtr vm)
+qemuMigrationAnyPostcopyFailed(virQEMUDriverPtr driver,
+ virDomainObjPtr vm)
{
virDomainState state;
int reason;
static int
-qemuMigrationSetOption(virQEMUDriverPtr driver,
+qemuMigrationOptionSet(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuMonitorMigrationCaps capability,
bool state,
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret;
- if (!qemuMigrationCapsGet(vm, capability)) {
+ if (!qemuMigrationAnyCapsGet(vm, capability)) {
if (!state) {
/* Unsupported but we want it off anyway */
return 0;
static int
-qemuMigrationSetPostCopy(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- bool state,
- qemuDomainAsyncJob job)
+qemuMigrationOptionSetPostCopy(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ bool state,
+ qemuDomainAsyncJob job)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
- if (qemuMigrationSetOption(driver, vm,
+ if (qemuMigrationOptionSet(driver, vm,
QEMU_MONITOR_MIGRATION_CAPS_POSTCOPY,
state, job) < 0)
return -1;
static int
-qemuMigrationWaitForSpice(virDomainObjPtr vm)
+qemuMigrationSrcWaitForSpice(virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int
-qemuMigrationFetchStats(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
- qemuDomainJobInfoPtr jobInfo,
- char **error)
+qemuMigrationAnyFetchStats(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob,
+ qemuDomainJobInfoPtr jobInfo,
+ char **error)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
qemuMonitorMigrationStats stats;
static int
-qemuMigrationCheckJobStatus(virQEMUDriverPtr driver,
+qemuMigrationJobCheckStatus(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainAsyncJob asyncJob)
{
if (!events ||
jobInfo->stats.mig.status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) {
- if (qemuMigrationFetchStats(driver, vm, asyncJob, jobInfo, &error) < 0)
+ if (qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, &error) < 0)
return -1;
}
enum qemuMigrationCompletedFlags {
QEMU_MIGRATION_COMPLETED_ABORT_ON_ERROR = (1 << 0),
+ /* This flag should only be set when run on src host */
QEMU_MIGRATION_COMPLETED_CHECK_STORAGE = (1 << 1),
QEMU_MIGRATION_COMPLETED_POSTCOPY = (1 << 2),
QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER = (1 << 3),
* -2 something else failed, we need to cancel migration.
*/
static int
-qemuMigrationCompleted(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
- virConnectPtr dconn,
- unsigned int flags)
+qemuMigrationAnyCompleted(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob,
+ virConnectPtr dconn,
+ unsigned int flags)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
qemuDomainJobInfoPtr jobInfo = priv->job.current;
int pauseReason;
- if (qemuMigrationCheckJobStatus(driver, vm, asyncJob) < 0)
+ if (qemuMigrationJobCheckStatus(driver, vm, asyncJob) < 0)
goto error;
+ /* This flag should only be set when run on src host */
if (flags & QEMU_MIGRATION_COMPLETED_CHECK_STORAGE &&
- qemuMigrationDriveMirrorReady(driver, vm, asyncJob) < 0)
+ qemuMigrationSrcDriveMirrorReady(driver, vm, asyncJob) < 0)
goto error;
if (flags & QEMU_MIGRATION_COMPLETED_ABORT_ON_ERROR &&
* QEMU reports failed migration.
*/
static int
-qemuMigrationWaitForCompletion(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
- virConnectPtr dconn,
- unsigned int flags)
+qemuMigrationSrcWaitForCompletion(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob,
+ virConnectPtr dconn,
+ unsigned int flags)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
qemuDomainJobInfoPtr jobInfo = priv->job.current;
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING;
- while ((rv = qemuMigrationCompleted(driver, vm, asyncJob,
- dconn, flags)) != 1) {
+ while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob,
+ dconn, flags)) != 1) {
if (rv < 0)
return rv;
}
if (events)
- ignore_value(qemuMigrationFetchStats(driver, vm, asyncJob, jobInfo, NULL));
+ ignore_value(qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, NULL));
qemuDomainJobInfoUpdateTime(jobInfo);
qemuDomainJobInfoUpdateDowntime(jobInfo);
static int
-qemuMigrationWaitForDestCompletion(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
- bool postcopy)
+qemuMigrationDstWaitForCompletion(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob,
+ bool postcopy)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
unsigned int flags = 0;
if (postcopy)
flags = QEMU_MIGRATION_COMPLETED_POSTCOPY;
- while ((rv = qemuMigrationCompleted(driver, vm, asyncJob,
- NULL, flags)) != 1) {
+ while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob,
+ NULL, flags)) != 1) {
if (rv < 0 || virDomainObjWait(vm) < 0)
return -1;
}
static int
-qemuDomainMigrateGraphicsRelocate(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuMigrationCookiePtr cookie,
- const char *graphicsuri)
+qemuMigrationSrcGraphicsRelocate(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuMigrationCookiePtr cookie,
+ const char *graphicsuri)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret = -1;
static int
-qemuDomainMigrateOPDRelocate(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
- virDomainObjPtr vm,
- qemuMigrationCookiePtr cookie)
+qemuMigrationDstOPDRelocate(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
+ virDomainObjPtr vm,
+ qemuMigrationCookiePtr cookie)
{
virDomainNetDefPtr netptr;
int ret = -1;
int
-qemuMigrationCheckIncoming(virQEMUCapsPtr qemuCaps,
- const char *migrateFrom)
+qemuMigrationDstCheckProtocol(virQEMUCapsPtr qemuCaps,
+ const char *migrateFrom)
{
if (STRPREFIX(migrateFrom, "rdma")) {
if (!virQEMUCapsGet(qemuCaps, QEMU_CAPS_MIGRATE_RDMA)) {
char *
-qemuMigrationIncomingURI(const char *migrateFrom,
- int migrateFd)
+qemuMigrationDstGetURI(const char *migrateFrom,
+ int migrateFd)
{
char *uri = NULL;
int
-qemuMigrationRunIncoming(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- const char *uri,
- qemuDomainAsyncJob asyncJob)
+qemuMigrationDstRun(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ const char *uri,
+ qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret = -1;
goto cleanup;
if (asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
- /* qemuMigrationWaitForDestCompletion is called from the Finish phase */
+ /* qemuMigrationDstWaitForCompletion is called from the Finish phase */
ret = 0;
goto cleanup;
}
- if (qemuMigrationWaitForDestCompletion(driver, vm, asyncJob, false) < 0)
+ if (qemuMigrationDstWaitForCompletion(driver, vm, asyncJob, false) < 0)
goto cleanup;
ret = 0;
* qemuDomainMigratePerform3 and qemuDomainMigrateConfirm3.
*/
static virDomainObjPtr
-qemuMigrationCleanup(virDomainObjPtr vm,
- virConnectPtr conn,
- void *opaque)
+qemuMigrationSrcCleanup(virDomainObjPtr vm,
+ virConnectPtr conn,
+ void *opaque)
{
virQEMUDriverPtr driver = opaque;
qemuDomainObjPrivatePtr priv = vm->privateData;
/* The caller is supposed to lock the vm and start a migration job. */
static char *
-qemuMigrationBeginPhase(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- const char *xmlin,
- const char *dname,
- char **cookieout,
- int *cookieoutlen,
- size_t nmigrate_disks,
- const char **migrate_disks,
- unsigned long flags)
+qemuMigrationSrcBeginPhase(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ const char *xmlin,
+ const char *dname,
+ char **cookieout,
+ int *cookieoutlen,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ unsigned long flags)
{
char *rv = NULL;
qemuMigrationCookiePtr mig = NULL;
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_BEGIN3);
- if (!qemuMigrationIsAllowed(driver, vm, true, flags))
+ if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags))
goto cleanup;
if (!(flags & (VIR_MIGRATE_UNSAFE | VIR_MIGRATE_OFFLINE)) &&
- !qemuMigrationIsSafe(vm->def, nmigrate_disks, migrate_disks, flags))
+ !qemuMigrationSrcIsSafe(vm->def, nmigrate_disks, migrate_disks, flags))
goto cleanup;
if (flags & VIR_MIGRATE_POSTCOPY &&
}
char *
-qemuMigrationBegin(virConnectPtr conn,
- virDomainObjPtr vm,
- const char *xmlin,
- const char *dname,
- char **cookieout,
- int *cookieoutlen,
- size_t nmigrate_disks,
- const char **migrate_disks,
- unsigned long flags)
+qemuMigrationSrcBegin(virConnectPtr conn,
+ virDomainObjPtr vm,
+ const char *xmlin,
+ const char *dname,
+ char **cookieout,
+ int *cookieoutlen,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ unsigned long flags)
{
virQEMUDriverPtr driver = conn->privateData;
virQEMUDriverConfigPtr cfg = NULL;
asyncJob = QEMU_ASYNC_JOB_NONE;
}
- qemuMigrationStoreDomainState(vm);
+ qemuMigrationSrcStoreDomainState(vm);
if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
virReportError(VIR_ERR_OPERATION_INVALID,
qemuProcessRefreshDisks(driver, vm, asyncJob) < 0)
goto endjob;
- if (!(xml = qemuMigrationBeginPhase(driver, vm, xmlin, dname,
- cookieout, cookieoutlen,
- nmigrate_disks, migrate_disks, flags)))
+ if (!(xml = qemuMigrationSrcBeginPhase(driver, vm, xmlin, dname,
+ cookieout, cookieoutlen,
+ nmigrate_disks, migrate_disks, flags)))
goto endjob;
if (flags & VIR_MIGRATE_TLS) {
cfg = virQEMUDriverGetConfig(driver);
- if (qemuMigrationCheckSetupTLS(driver, cfg, vm, asyncJob) < 0)
+ if (qemuMigrationParamsCheckSetupTLS(driver, cfg, vm, asyncJob) < 0)
goto endjob;
}
* place.
*/
if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
- qemuMigrationCleanup) < 0) {
+ qemuMigrationSrcCleanup) < 0) {
VIR_FREE(xml);
goto endjob;
}
*/
static void
-qemuMigrationPrepareCleanup(virQEMUDriverPtr driver,
- virDomainObjPtr vm)
+qemuMigrationDstPrepareCleanup(virQEMUDriverPtr driver,
+ virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
}
static qemuProcessIncomingDefPtr
-qemuMigrationPrepareIncoming(virDomainObjPtr vm,
- bool tunnel,
- const char *protocol,
- const char *listenAddress,
- unsigned short port,
- int fd)
+qemuMigrationDstPrepare(virDomainObjPtr vm,
+ bool tunnel,
+ const char *protocol,
+ const char *listenAddress,
+ unsigned short port,
+ int fd)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
qemuProcessIncomingDefPtr inc = NULL;
}
static int
-qemuMigrationSetCompression(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob job,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams)
+qemuMigrationParamsSetCompression(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob job,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams)
{
int ret = -1;
qemuDomainObjPrivatePtr priv = vm->privateData;
- if (qemuMigrationSetOption(driver, vm,
+ if (qemuMigrationOptionSet(driver, vm,
QEMU_MONITOR_MIGRATION_CAPS_XBZRLE,
compression->methods &
- (1ULL << QEMU_MIGRATION_COMPRESS_XBZRLE),
+ (1ULL << QEMU_MIGRATION_COMPRESS_XBZRLE),
job) < 0)
return -1;
- if (qemuMigrationSetOption(driver, vm,
+ if (qemuMigrationOptionSet(driver, vm,
QEMU_MONITOR_MIGRATION_CAPS_COMPRESS,
compression->methods &
- (1ULL << QEMU_MIGRATION_COMPRESS_MT),
+ (1ULL << QEMU_MIGRATION_COMPRESS_MT),
job) < 0)
return -1;
}
-/* qemuMigrationSetEmptyTLSParams
+/* qemuMigrationParamsSetEmptyTLS
* @driver: pointer to qemu driver
* @vm: domain object
* @asyncJob: migration job to join
* Returns 0 on success, -1 on failure
*/
static int
-qemuMigrationSetEmptyTLSParams(virQEMUDriverPtr driver,
+qemuMigrationParamsSetEmptyTLS(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainAsyncJob asyncJob,
qemuMonitorMigrationParamsPtr migParams)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
- if (qemuMigrationCheckTLSCreds(driver, vm, asyncJob) < 0)
+ if (qemuMigrationParamsCheckTLSCreds(driver, vm, asyncJob) < 0)
return -1;
if (!priv->migTLSAlias)
static int
-qemuMigrationSetParams(virQEMUDriverPtr driver,
+qemuMigrationParamsSet(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainAsyncJob job,
qemuMonitorMigrationParamsPtr migParams)
}
-/* qemuMigrationResetTLS
+/* qemuMigrationParamsResetTLS
* @driver: pointer to qemu driver
* @vm: domain object
* @asyncJob: migration job to join
* Returns 0 on success, -1 on failure
*/
static int
-qemuMigrationResetTLS(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+qemuMigrationParamsResetTLS(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
char *tlsAlias = NULL;
qemuMonitorMigrationParams migParams = { 0 };
int ret = -1;
- if (qemuMigrationCheckTLSCreds(driver, vm, asyncJob) < 0)
+ if (qemuMigrationParamsCheckTLSCreds(driver, vm, asyncJob) < 0)
return -1;
/* If the tls-creds doesn't exist or if they're set to "" then there's
if (VIR_STRDUP(migParams.tlsCreds, "") < 0 ||
VIR_STRDUP(migParams.tlsHostname, "") < 0 ||
- qemuMigrationSetParams(driver, vm, asyncJob, &migParams) < 0)
+ qemuMigrationParamsSet(driver, vm, asyncJob, &migParams) < 0)
goto cleanup;
ret = 0;
static int
-qemuMigrationPrepareAny(virQEMUDriverPtr driver,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- virDomainDefPtr *def,
- const char *origname,
- virStreamPtr st,
- const char *protocol,
- unsigned short port,
- bool autoPort,
- const char *listenAddress,
- size_t nmigrate_disks,
- const char **migrate_disks,
- int nbdPort,
- qemuMigrationCompressionPtr compression,
- unsigned long flags)
+qemuMigrationDstPrepareAny(virQEMUDriverPtr driver,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ virDomainDefPtr *def,
+ const char *origname,
+ virStreamPtr st,
+ const char *protocol,
+ unsigned short port,
+ bool autoPort,
+ const char *listenAddress,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ int nbdPort,
+ qemuMigrationCompressionPtr compression,
+ unsigned long flags)
{
virDomainObjPtr vm = NULL;
virObjectEventPtr event = NULL;
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
goto cleanup;
- if (!qemuMigrationIsAllowedHostdev(*def))
+ if (!qemuMigrationSrcIsAllowedHostdev(*def))
goto cleanup;
/* Let migration hook filter domain XML */
goto cleanup;
}
- if (qemuMigrationPrecreateStorage(vm, mig->nbd,
- nmigrate_disks, migrate_disks,
- !!(flags & VIR_MIGRATE_NON_SHARED_INC)) < 0)
+ if (qemuMigrationDstPrecreateStorage(vm, mig->nbd,
+ nmigrate_disks, migrate_disks,
+ !!(flags & VIR_MIGRATE_NON_SHARED_INC)) < 0)
goto cleanup;
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
priv->allowReboot = mig->allowReboot;
- if (!(incoming = qemuMigrationPrepareIncoming(vm, tunnel, protocol,
- listenAddress, port,
- dataFD[0])))
+ if (!(incoming = qemuMigrationDstPrepare(vm, tunnel, protocol,
+ listenAddress, port,
+ dataFD[0])))
goto stopjob;
if (qemuProcessPrepareDomain(driver, vm, startFlags) < 0)
dataFD[1] = -1; /* 'st' owns the FD now & will close it */
}
- if (qemuMigrationSetCompression(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
- compression, &migParams) < 0)
+ if (qemuMigrationParamsSetCompression(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ compression, &migParams) < 0)
goto stopjob;
/* Migrations using TLS need to add the "tls-creds-x509" object and
* set the migration TLS parameters */
if (flags & VIR_MIGRATE_TLS) {
cfg = virQEMUDriverGetConfig(driver);
- if (qemuMigrationCheckSetupTLS(driver, cfg, vm,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ if (qemuMigrationParamsCheckSetupTLS(driver, cfg, vm,
+ QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
goto stopjob;
- if (qemuMigrationAddTLSObjects(driver, vm, cfg, true,
- QEMU_ASYNC_JOB_MIGRATION_IN,
- &tlsAlias, &secAlias, &migParams) < 0)
+ if (qemuMigrationParamsAddTLSObjects(driver, vm, cfg, true,
+ QEMU_ASYNC_JOB_MIGRATION_IN,
+ &tlsAlias, &secAlias, &migParams) < 0)
goto stopjob;
/* Force reset of 'tls-hostname', it's a source only parameter */
goto stopjob;
} else {
- if (qemuMigrationSetEmptyTLSParams(driver, vm,
+ if (qemuMigrationParamsSetEmptyTLS(driver, vm,
QEMU_ASYNC_JOB_MIGRATION_IN,
&migParams) < 0)
goto stopjob;
goto stopjob;
}
- if (qemuMigrationSetOption(driver, vm,
+ if (qemuMigrationOptionSet(driver, vm,
QEMU_MONITOR_MIGRATION_CAPS_RDMA_PIN_ALL,
flags & VIR_MIGRATE_RDMA_PIN_ALL,
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
goto stopjob;
- if (qemuMigrationSetPostCopy(driver, vm,
- flags & VIR_MIGRATE_POSTCOPY,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ if (qemuMigrationOptionSetPostCopy(driver, vm,
+ flags & VIR_MIGRATE_POSTCOPY,
+ QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
goto stopjob;
- if (qemuMigrationSetParams(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuMigrationParamsSet(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
&migParams) < 0)
goto stopjob;
if (mig->nbd &&
flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC) &&
virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_SERVER)) {
- if (qemuMigrationStartNBDServer(driver, vm, incoming->address,
- nmigrate_disks, migrate_disks,
- nbdPort) < 0) {
+ if (qemuMigrationDstStartNBDServer(driver, vm, incoming->address,
+ nmigrate_disks, migrate_disks,
+ nbdPort) < 0) {
goto stopjob;
}
cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
}
if (incoming->deferredURI &&
- qemuMigrationRunIncoming(driver, vm, incoming->deferredURI,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ qemuMigrationDstRun(driver, vm, incoming->deferredURI,
+ QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
goto stopjob;
if (qemuProcessFinishStartup(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
VIR_WARN("Unable to encode migration cookie");
}
- if (qemuDomainCleanupAdd(vm, qemuMigrationPrepareCleanup) < 0)
+ if (qemuDomainCleanupAdd(vm, qemuMigrationDstPrepareCleanup) < 0)
goto stopjob;
if (!(flags & VIR_MIGRATE_OFFLINE)) {
return ret;
stopjob:
- qemuMigrationReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN);
+ qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN);
if (stopProcess) {
unsigned int stopFlags = VIR_QEMU_PROCESS_STOP_MIGRATED;
* sets up the corresponding virStream to handle the incoming data.
*/
int
-qemuMigrationPrepareTunnel(virQEMUDriverPtr driver,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- virStreamPtr st,
- virDomainDefPtr *def,
- const char *origname,
- unsigned long flags)
+qemuMigrationDstPrepareTunnel(virQEMUDriverPtr driver,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ virStreamPtr st,
+ virDomainDefPtr *def,
+ const char *origname,
+ unsigned long flags)
{
qemuMigrationCompressionPtr compression = NULL;
int ret;
return -1;
}
- if (!(compression = qemuMigrationCompressionParse(NULL, 0, flags)))
+ if (!(compression = qemuMigrationAnyCompressionParse(NULL, 0, flags)))
return -1;
- ret = qemuMigrationPrepareAny(driver, cookiein, cookieinlen,
- cookieout, cookieoutlen, def, origname,
- st, NULL, 0, false, NULL, 0, NULL, 0,
- compression, flags);
+ ret = qemuMigrationDstPrepareAny(driver, cookiein, cookieinlen,
+ cookieout, cookieoutlen, def, origname,
+ st, NULL, 0, false, NULL, 0, NULL, 0,
+ compression, flags);
VIR_FREE(compression);
return ret;
}
static virURIPtr
-qemuMigrationParseURI(const char *uri, bool *wellFormed)
+qemuMigrationAnyParseURI(const char *uri, bool *wellFormed)
{
char *tmp = NULL;
virURIPtr parsed;
int
-qemuMigrationPrepareDirect(virQEMUDriverPtr driver,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- const char *uri_in,
- char **uri_out,
- virDomainDefPtr *def,
- const char *origname,
- const char *listenAddress,
- size_t nmigrate_disks,
- const char **migrate_disks,
- int nbdPort,
- qemuMigrationCompressionPtr compression,
- unsigned long flags)
+qemuMigrationDstPrepareDirect(virQEMUDriverPtr driver,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ const char *uri_in,
+ char **uri_out,
+ virDomainDefPtr *def,
+ const char *origname,
+ const char *listenAddress,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ int nbdPort,
+ qemuMigrationCompressionPtr compression,
+ unsigned long flags)
{
unsigned short port = 0;
bool autoPort = true;
} else {
bool well_formed_uri;
- if (!(uri = qemuMigrationParseURI(uri_in, &well_formed_uri)))
+ if (!(uri = qemuMigrationAnyParseURI(uri_in, &well_formed_uri)))
goto cleanup;
if (uri->scheme == NULL) {
if (*uri_out)
VIR_DEBUG("Generated uri_out=%s", *uri_out);
- ret = qemuMigrationPrepareAny(driver, cookiein, cookieinlen,
- cookieout, cookieoutlen, def, origname,
- NULL, uri ? uri->scheme : "tcp",
- port, autoPort, listenAddress,
- nmigrate_disks, migrate_disks, nbdPort,
- compression, flags);
+ ret = qemuMigrationDstPrepareAny(driver, cookiein, cookieinlen,
+ cookieout, cookieoutlen, def, origname,
+ NULL, uri ? uri->scheme : "tcp",
+ port, autoPort, listenAddress,
+ nmigrate_disks, migrate_disks, nbdPort,
+ compression, flags);
cleanup:
virURIFree(uri);
VIR_FREE(hostname);
virDomainDefPtr
-qemuMigrationPrepareDef(virQEMUDriverPtr driver,
- const char *dom_xml,
- const char *dname,
- char **origname)
+qemuMigrationAnyPrepareDef(virQEMUDriverPtr driver,
+ const char *dom_xml,
+ const char *dname,
+ char **origname)
{
virCapsPtr caps = NULL;
virDomainDefPtr def;
static int
-qemuMigrationConfirmPhase(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- const char *cookiein,
- int cookieinlen,
- unsigned int flags,
- int retcode)
+qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ const char *cookiein,
+ int cookieinlen,
+ unsigned int flags,
+ int retcode)
{
qemuMigrationCookiePtr mig;
virObjectEventPtr event;
*/
if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_POSTCOPY &&
- qemuMigrationFetchStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
- jobInfo, NULL) < 0)
+ qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ jobInfo, NULL) < 0)
VIR_WARN("Could not refresh migration statistics");
qemuDomainJobInfoUpdateTime(jobInfo);
if (retcode == 0) {
/* If guest uses SPICE and supports seamless migration we have to hold
* up domain shutdown until SPICE server transfers its data */
- qemuMigrationWaitForSpice(vm);
+ qemuMigrationSrcWaitForSpice(vm);
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
QEMU_ASYNC_JOB_MIGRATION_OUT,
int reason;
/* cancel any outstanding NBD jobs */
- qemuMigrationCancelDriveMirror(driver, vm, false,
- QEMU_ASYNC_JOB_MIGRATION_OUT, NULL);
+ qemuMigrationSrcCancelDriveMirror(driver, vm, false,
+ QEMU_ASYNC_JOB_MIGRATION_OUT, NULL);
virSetError(orig_err);
virFreeError(orig_err);
if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_POSTCOPY) {
- qemuMigrationPostcopyFailed(driver, vm);
- } else if (qemuMigrationRestoreDomainState(driver, vm)) {
+ qemuMigrationAnyPostcopyFailed(driver, vm);
+ } else if (qemuMigrationSrcRestoreDomainState(driver, vm)) {
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_RESUMED,
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
qemuDomainEventQueue(driver, event);
}
- qemuMigrationReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
+ qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
VIR_WARN("Failed to save status on vm %s", vm->def->name);
}
int
-qemuMigrationConfirm(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- const char *cookiein,
- int cookieinlen,
- unsigned int flags,
- int cancelled)
+qemuMigrationSrcConfirm(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ const char *cookiein,
+ int cookieinlen,
+ unsigned int flags,
+ int cancelled)
{
qemuMigrationJobPhase phase;
virQEMUDriverConfigPtr cfg = NULL;
qemuMigrationJobStartPhase(driver, vm, phase);
virCloseCallbacksUnset(driver->closeCallbacks, vm,
- qemuMigrationCleanup);
+ qemuMigrationSrcCleanup);
- ret = qemuMigrationConfirmPhase(driver, vm,
- cookiein, cookieinlen,
- flags, cancelled);
+ ret = qemuMigrationSrcConfirmPhase(driver, vm,
+ cookiein, cookieinlen,
+ flags, cancelled);
qemuMigrationJobFinish(driver, vm);
if (!virDomainObjIsActive(vm)) {
int wakeupSendFD;
};
-static void qemuMigrationIOFunc(void *arg)
+static void qemuMigrationSrcIOFunc(void *arg)
{
qemuMigrationIOThreadPtr data = arg;
char *buffer = NULL;
static qemuMigrationIOThreadPtr
-qemuMigrationStartTunnel(virStreamPtr st,
- int sock)
+qemuMigrationSrcStartTunnel(virStreamPtr st,
+ int sock)
{
qemuMigrationIOThreadPtr io = NULL;
int wakeupFD[2] = { -1, -1 };
io->wakeupSendFD = wakeupFD[1];
if (virThreadCreate(&io->thread, true,
- qemuMigrationIOFunc,
+ qemuMigrationSrcIOFunc,
io) < 0) {
virReportSystemError(errno, "%s",
_("Unable to create migration thread"));
}
static int
-qemuMigrationStopTunnel(qemuMigrationIOThreadPtr io, bool error)
+qemuMigrationSrcStopTunnel(qemuMigrationIOThreadPtr io, bool error)
{
int rv = -1;
char stop = error ? 1 : 0;
}
static int
-qemuMigrationConnect(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuMigrationSpecPtr spec)
+qemuMigrationSrcConnect(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuMigrationSpecPtr spec)
{
virNetSocketPtr sock;
const char *host;
static int
-qemuMigrationContinue(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuMonitorMigrationStatus status,
- qemuDomainAsyncJob asyncJob)
+qemuMigrationSrcContinue(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuMonitorMigrationStatus status,
+ qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret;
static int
-qemuMigrationRun(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- const char *persist_xml,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- unsigned long flags,
- unsigned long resource,
- qemuMigrationSpecPtr spec,
- virConnectPtr dconn,
- const char *graphicsuri,
- size_t nmigrate_disks,
- const char **migrate_disks,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams)
+qemuMigrationSrcRun(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ const char *persist_xml,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ unsigned long resource,
+ qemuMigrationSpecPtr spec,
+ virConnectPtr dconn,
+ const char *graphicsuri,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams)
{
int ret = -1;
unsigned int migrate_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
if (flags & VIR_MIGRATE_PERSIST_DEST) {
if (persist_xml) {
- if (!(persistDef = qemuMigrationPrepareDef(driver, persist_xml,
- NULL, NULL)))
+ if (!(persistDef = qemuMigrationAnyPrepareDef(driver, persist_xml,
+ NULL, NULL)))
goto error;
} else {
virDomainDefPtr def = vm->newDef ? vm->newDef : vm->def;
if (!mig)
goto error;
- if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig, graphicsuri) < 0)
+ if (qemuMigrationSrcGraphicsRelocate(driver, vm, mig, graphicsuri) < 0)
VIR_WARN("unable to provide data for graphics client relocation");
if (flags & VIR_MIGRATE_TLS) {
/* Begin/CheckSetupTLS already set up migTLSAlias, the following
* assumes that and adds the TLS objects to the domain. */
- if (qemuMigrationAddTLSObjects(driver, vm, cfg, false,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
- &tlsAlias, &secAlias, migParams) < 0)
+ if (qemuMigrationParamsAddTLSObjects(driver, vm, cfg, false,
+ QEMU_ASYNC_JOB_MIGRATION_OUT,
+ &tlsAlias, &secAlias, migParams) < 0)
goto error;
/* We need to add tls-hostname whenever QEMU itself does not
goto error;
}
} else {
- if (qemuMigrationSetEmptyTLSParams(driver, vm,
+ if (qemuMigrationParamsSetEmptyTLS(driver, vm,
QEMU_ASYNC_JOB_MIGRATION_OUT,
migParams) < 0)
goto error;
QEMU_MONITOR_MIGRATE_NON_SHARED_INC)) {
if (mig->nbd) {
/* This will update migrate_flags on success */
- if (qemuMigrationDriveMirror(driver, vm, mig,
- spec->dest.host.name,
- migrate_speed,
- &migrate_flags,
- nmigrate_disks,
- migrate_disks,
- dconn) < 0) {
+ if (qemuMigrationSrcDriveMirror(driver, vm, mig,
+ spec->dest.host.name,
+ migrate_speed,
+ &migrate_flags,
+ nmigrate_disks,
+ migrate_disks,
+ dconn) < 0) {
goto error;
}
} else {
/* Before EnterMonitor, since qemuMigrationSetOffline already does that */
if (!(flags & VIR_MIGRATE_LIVE) &&
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
- if (qemuMigrationSetOffline(driver, vm) < 0)
+ if (qemuMigrationSrcSetOffline(driver, vm) < 0)
goto error;
}
- if (qemuMigrationSetCompression(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
- compression, migParams) < 0)
+ if (qemuMigrationParamsSetCompression(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ compression, migParams) < 0)
goto error;
- if (qemuMigrationSetOption(driver, vm,
+ if (qemuMigrationOptionSet(driver, vm,
QEMU_MONITOR_MIGRATION_CAPS_AUTO_CONVERGE,
flags & VIR_MIGRATE_AUTO_CONVERGE,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
- if (qemuMigrationSetOption(driver, vm,
+ if (qemuMigrationOptionSet(driver, vm,
QEMU_MONITOR_MIGRATION_CAPS_RDMA_PIN_ALL,
flags & VIR_MIGRATE_RDMA_PIN_ALL,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
- if (qemuMigrationSetPostCopy(driver, vm,
- flags & VIR_MIGRATE_POSTCOPY,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ if (qemuMigrationOptionSetPostCopy(driver, vm,
+ flags & VIR_MIGRATE_POSTCOPY,
+ QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
- if (qemuMigrationCapsGet(vm, QEMU_MONITOR_MIGRATION_CAPS_PAUSE_BEFORE_SWITCHOVER) &&
- qemuMigrationSetOption(driver, vm,
+ if (qemuMigrationAnyCapsGet(vm, QEMU_MONITOR_MIGRATION_CAPS_PAUSE_BEFORE_SWITCHOVER) &&
+ qemuMigrationOptionSet(driver, vm,
QEMU_MONITOR_MIGRATION_CAPS_PAUSE_BEFORE_SWITCHOVER,
true, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
- if (qemuMigrationSetParams(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ if (qemuMigrationParamsSet(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
migParams) < 0)
goto error;
/* connect to the destination qemu if needed */
if (spec->destType == MIGRATION_DEST_CONNECT_HOST &&
- qemuMigrationConnect(driver, vm, spec) < 0) {
+ qemuMigrationSrcConnect(driver, vm, spec) < 0) {
goto exit_monitor;
}
cancel = true;
if (spec->fwdType != MIGRATION_FWD_DIRECT) {
- if (!(iothread = qemuMigrationStartTunnel(spec->fwd.stream, fd)))
+ if (!(iothread = qemuMigrationSrcStartTunnel(spec->fwd.stream, fd)))
goto error;
/* If we've created a tunnel, then the 'fd' will be closed in the
* qemuMigrationIOFunc as data->sock.
if (flags & VIR_MIGRATE_POSTCOPY)
waitFlags |= QEMU_MIGRATION_COMPLETED_POSTCOPY;
- rc = qemuMigrationWaitForCompletion(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
- dconn, waitFlags);
+ rc = qemuMigrationSrcWaitForCompletion(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_OUT,
+ dconn, waitFlags);
if (rc == -2) {
goto error;
} else if (rc == -1) {
goto error;
}
} else if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING &&
- qemuMigrationSetOffline(driver, vm) < 0) {
+ qemuMigrationSrcSetOffline(driver, vm) < 0) {
goto error;
}
if (mig && mig->nbd &&
- qemuMigrationCancelDriveMirror(driver, vm, true,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
- dconn) < 0)
+ qemuMigrationSrcCancelDriveMirror(driver, vm, true,
+ QEMU_ASYNC_JOB_MIGRATION_OUT,
+ dconn) < 0)
goto error;
/* When migration was paused before serializing device state we need to
* end of the migration.
*/
if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) {
- if (qemuMigrationContinue(driver, vm,
- QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ if (qemuMigrationSrcContinue(driver, vm,
+ QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER,
+ QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
waitFlags ^= QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER;
- rc = qemuMigrationWaitForCompletion(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
- dconn, waitFlags);
+ rc = qemuMigrationSrcWaitForCompletion(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_OUT,
+ dconn, waitFlags);
if (rc == -2) {
goto error;
} else if (rc == -1) {
qemuMigrationIOThreadPtr io;
VIR_STEAL_PTR(io, iothread);
- if (qemuMigrationStopTunnel(io, false) < 0)
+ if (qemuMigrationSrcStopTunnel(io, false) < 0)
goto error;
}
/* cancel any outstanding NBD jobs */
if (mig && mig->nbd)
- qemuMigrationCancelDriveMirror(driver, vm, false,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
- dconn);
+ qemuMigrationSrcCancelDriveMirror(driver, vm, false,
+ QEMU_ASYNC_JOB_MIGRATION_OUT,
+ dconn);
if (iothread)
- qemuMigrationStopTunnel(iothread, true);
+ qemuMigrationSrcStopTunnel(iothread, true);
if (priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_CANCELED)
priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
/* Perform migration using QEMU's native migrate support,
* not encrypted obviously
*/
-static int doNativeMigrate(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- const char *persist_xml,
- const char *uri,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- unsigned long flags,
- unsigned long resource,
- virConnectPtr dconn,
- const char *graphicsuri,
- size_t nmigrate_disks,
- const char **migrate_disks,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams)
+static int
+qemuMigrationSrcPerformNative(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ const char *persist_xml,
+ const char *uri,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ unsigned long resource,
+ virConnectPtr dconn,
+ const char *graphicsuri,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virURIPtr uribits = NULL;
cookieout, cookieoutlen, flags, resource,
NULLSTR(graphicsuri), nmigrate_disks, migrate_disks);
- if (!(uribits = qemuMigrationParseURI(uri, NULL)))
+ if (!(uribits = qemuMigrationAnyParseURI(uri, NULL)))
return -1;
if (uribits->scheme == NULL) {
spec.dest.host.port = uribits->port;
spec.fwdType = MIGRATION_FWD_DIRECT;
- ret = qemuMigrationRun(driver, vm, persist_xml, cookiein, cookieinlen, cookieout,
- cookieoutlen, flags, resource, &spec, dconn,
- graphicsuri, nmigrate_disks, migrate_disks,
- compression, migParams);
+ ret = qemuMigrationSrcRun(driver, vm, persist_xml, cookiein, cookieinlen, cookieout,
+ cookieoutlen, flags, resource, &spec, dconn,
+ graphicsuri, nmigrate_disks, migrate_disks,
+ compression, migParams);
if (spec.destType == MIGRATION_DEST_FD)
VIR_FORCE_CLOSE(spec.dest.fd.qemu);
}
-static int doTunnelMigrate(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- virStreamPtr st,
- const char *persist_xml,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- unsigned long flags,
- unsigned long resource,
- virConnectPtr dconn,
- const char *graphicsuri,
- size_t nmigrate_disks,
- const char **migrate_disks,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams)
+static int
+qemuMigrationSrcPerformTunnel(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ virStreamPtr st,
+ const char *persist_xml,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ unsigned long resource,
+ virConnectPtr dconn,
+ const char *graphicsuri,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams)
{
int ret = -1;
qemuMigrationSpec spec;
goto cleanup;
}
- ret = qemuMigrationRun(driver, vm, persist_xml, cookiein, cookieinlen,
- cookieout, cookieoutlen, flags, resource, &spec,
- dconn, graphicsuri, nmigrate_disks, migrate_disks,
- compression, migParams);
+ ret = qemuMigrationSrcRun(driver, vm, persist_xml, cookiein, cookieinlen,
+ cookieout, cookieoutlen, flags, resource, &spec,
+ dconn, graphicsuri, nmigrate_disks, migrate_disks,
+ compression, migParams);
cleanup:
VIR_FORCE_CLOSE(spec.dest.fd.qemu);
* from libvirt.c, but running in source libvirtd context,
* instead of client app context & also adding in tunnel
* handling */
-static int doPeer2PeerMigrate2(virQEMUDriverPtr driver,
- virConnectPtr sconn,
- virConnectPtr dconn,
- virDomainObjPtr vm,
- const char *dconnuri,
- unsigned long flags,
- const char *dname,
- unsigned long resource)
+static int
+qemuMigrationSrcPerformPeer2Peer2(virQEMUDriverPtr driver,
+ virConnectPtr sconn,
+ virConnectPtr dconn,
+ virDomainObjPtr vm,
+ const char *dconnuri,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource)
{
virDomainPtr ddomain = NULL;
char *uri_out = NULL;
destflags = flags & ~(VIR_MIGRATE_ABORT_ON_ERROR |
VIR_MIGRATE_AUTO_CONVERGE);
- if (!(compression = qemuMigrationCompressionParse(NULL, 0, flags)))
+ if (!(compression = qemuMigrationAnyCompressionParse(NULL, 0, flags)))
goto cleanup;
VIR_DEBUG("Prepare2 %p", dconn);
VIR_DEBUG("Perform %p", sconn);
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
if (flags & VIR_MIGRATE_TUNNELLED)
- ret = doTunnelMigrate(driver, vm, st, NULL,
- NULL, 0, NULL, NULL,
- flags, resource, dconn,
- NULL, 0, NULL, compression, &migParams);
+ ret = qemuMigrationSrcPerformTunnel(driver, vm, st, NULL,
+ NULL, 0, NULL, NULL,
+ flags, resource, dconn,
+ NULL, 0, NULL, compression, &migParams);
else
- ret = doNativeMigrate(driver, vm, NULL, uri_out,
- cookie, cookielen,
- NULL, NULL, /* No out cookie with v2 migration */
- flags, resource, dconn, NULL, 0, NULL,
- compression, &migParams);
+ ret = qemuMigrationSrcPerformNative(driver, vm, NULL, uri_out,
+ cookie, cookielen,
+ NULL, NULL, /* No out cookie with v2 migration */
+ flags, resource, dconn, NULL, 0, NULL,
+ compression, &migParams);
/* Perform failed. Make sure Finish doesn't overwrite the error */
if (ret < 0)
* instead of client app context & also adding in tunnel
* handling */
static int
-doPeer2PeerMigrate3(virQEMUDriverPtr driver,
- virConnectPtr sconn,
- virConnectPtr dconn,
- const char *dconnuri,
- virDomainObjPtr vm,
- const char *xmlin,
- const char *persist_xml,
- const char *dname,
- const char *uri,
- const char *graphicsuri,
- const char *listenAddress,
- size_t nmigrate_disks,
- const char **migrate_disks,
- int nbdPort,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams,
- unsigned long long bandwidth,
- bool useParams,
- unsigned long flags)
+qemuMigrationSrcPerformPeer2Peer3(virQEMUDriverPtr driver,
+ virConnectPtr sconn,
+ virConnectPtr dconn,
+ const char *dconnuri,
+ virDomainObjPtr vm,
+ const char *xmlin,
+ const char *persist_xml,
+ const char *dname,
+ const char *uri,
+ const char *graphicsuri,
+ const char *listenAddress,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ int nbdPort,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams,
+ unsigned long long bandwidth,
+ bool useParams,
+ unsigned long flags)
{
virDomainPtr ddomain = NULL;
char *uri_out = NULL;
* bit here, because we are already running inside the context of
* a single job. */
- dom_xml = qemuMigrationBeginPhase(driver, vm, xmlin, dname,
- &cookieout, &cookieoutlen,
- nmigrate_disks, migrate_disks, flags);
+ dom_xml = qemuMigrationSrcBeginPhase(driver, vm, xmlin, dname,
+ &cookieout, &cookieoutlen,
+ nmigrate_disks, migrate_disks, flags);
if (!dom_xml)
goto cleanup;
nbdPort) < 0)
goto cleanup;
- if (qemuMigrationCompressionDump(compression, ¶ms, &nparams,
- &maxparams, &flags) < 0)
+ if (qemuMigrationAnyCompressionDump(compression, ¶ms, &nparams,
+ &maxparams, &flags) < 0)
goto cleanup;
}
cookieout = NULL;
cookieoutlen = 0;
if (flags & VIR_MIGRATE_TUNNELLED) {
- ret = doTunnelMigrate(driver, vm, st, persist_xml,
- cookiein, cookieinlen,
- &cookieout, &cookieoutlen,
- flags, bandwidth, dconn, graphicsuri,
- nmigrate_disks, migrate_disks, compression,
- migParams);
+ ret = qemuMigrationSrcPerformTunnel(driver, vm, st, persist_xml,
+ cookiein, cookieinlen,
+ &cookieout, &cookieoutlen,
+ flags, bandwidth, dconn, graphicsuri,
+ nmigrate_disks, migrate_disks, compression,
+ migParams);
} else {
- ret = doNativeMigrate(driver, vm, persist_xml, uri,
- cookiein, cookieinlen,
- &cookieout, &cookieoutlen,
- flags, bandwidth, dconn, graphicsuri,
- nmigrate_disks, migrate_disks, compression,
- migParams);
+ ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri,
+ cookiein, cookieinlen,
+ &cookieout, &cookieoutlen,
+ flags, bandwidth, dconn, graphicsuri,
+ nmigrate_disks, migrate_disks, compression,
+ migParams);
}
/* Perform failed. Make sure Finish doesn't overwrite the error */
cookieinlen = cookieoutlen;
cookieout = NULL;
cookieoutlen = 0;
- ret = qemuMigrationConfirmPhase(driver, vm,
- cookiein, cookieinlen,
- flags, cancelled);
+ ret = qemuMigrationSrcConfirmPhase(driver, vm,
+ cookiein, cookieinlen,
+ flags, cancelled);
/* If Confirm3 returns -1, there's nothing more we can
* do, but fortunately worst case is that there is a
* domain left in 'paused' state on source.
static void
-qemuMigrationConnectionClosed(virConnectPtr conn,
- int reason,
- void *opaque)
+qemuMigrationSrcConnectionClosed(virConnectPtr conn,
+ int reason,
+ void *opaque)
{
virDomainObjPtr vm = opaque;
};
-static int doPeer2PeerMigrate(virQEMUDriverPtr driver,
- virConnectPtr sconn,
- virDomainObjPtr vm,
- const char *xmlin,
- const char *persist_xml,
- const char *dconnuri,
- const char *uri,
- const char *graphicsuri,
- const char *listenAddress,
- size_t nmigrate_disks,
- const char **migrate_disks,
- int nbdPort,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams,
- unsigned long flags,
- const char *dname,
- unsigned long resource,
- bool *v3proto)
+static int
+qemuMigrationSrcPerformPeer2Peer(virQEMUDriverPtr driver,
+ virConnectPtr sconn,
+ virDomainObjPtr vm,
+ const char *xmlin,
+ const char *persist_xml,
+ const char *dconnuri,
+ const char *uri,
+ const char *graphicsuri,
+ const char *listenAddress,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ int nbdPort,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource,
+ bool *v3proto)
{
int ret = -1;
virConnectPtr dconn = NULL;
cfg->keepAliveCount) < 0)
goto cleanup;
- if (virConnectRegisterCloseCallback(dconn, qemuMigrationConnectionClosed,
+ if (virConnectRegisterCloseCallback(dconn, qemuMigrationSrcConnectionClosed,
vm, NULL) < 0) {
goto cleanup;
}
flags &= ~VIR_MIGRATE_CHANGE_PROTECTION;
if (*v3proto) {
- ret = doPeer2PeerMigrate3(driver, sconn, dconn, dconnuri, vm, xmlin,
- persist_xml, dname, uri, graphicsuri,
- listenAddress, nmigrate_disks, migrate_disks,
- nbdPort, compression, migParams, resource,
- useParams, flags);
+ ret = qemuMigrationSrcPerformPeer2Peer3(driver, sconn, dconn, dconnuri, vm, xmlin,
+ persist_xml, dname, uri, graphicsuri,
+ listenAddress, nmigrate_disks, migrate_disks,
+ nbdPort, compression, migParams, resource,
+ useParams, flags);
} else {
- ret = doPeer2PeerMigrate2(driver, sconn, dconn, vm,
- dconnuri, flags, dname, resource);
+ ret = qemuMigrationSrcPerformPeer2Peer2(driver, sconn, dconn, vm,
+ dconnuri, flags, dname, resource);
}
cleanup:
orig_err = virSaveLastError();
qemuDomainObjEnterRemote(vm);
- virConnectUnregisterCloseCallback(dconn, qemuMigrationConnectionClosed);
+ virConnectUnregisterCloseCallback(dconn, qemuMigrationSrcConnectionClosed);
virObjectUnref(dconn);
qemuDomainObjExitRemote(vm);
if (orig_err) {
* perform phase of v2 non-peer2peer migration.
*/
static int
-qemuMigrationPerformJob(virQEMUDriverPtr driver,
- virConnectPtr conn,
- virDomainObjPtr vm,
- const char *xmlin,
- const char *persist_xml,
- const char *dconnuri,
- const char *uri,
- const char *graphicsuri,
- const char *listenAddress,
- size_t nmigrate_disks,
- const char **migrate_disks,
- int nbdPort,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- unsigned long flags,
- const char *dname,
- unsigned long resource,
- bool v3proto)
+qemuMigrationSrcPerformJob(virQEMUDriverPtr driver,
+ virConnectPtr conn,
+ virDomainObjPtr vm,
+ const char *xmlin,
+ const char *persist_xml,
+ const char *dconnuri,
+ const char *uri,
+ const char *graphicsuri,
+ const char *listenAddress,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ int nbdPort,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource,
+ bool v3proto)
{
virObjectEventPtr event = NULL;
int ret = -1;
goto endjob;
}
- if (!qemuMigrationIsAllowed(driver, vm, true, flags))
+ if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags))
goto endjob;
if (!(flags & (VIR_MIGRATE_UNSAFE | VIR_MIGRATE_OFFLINE)) &&
- !qemuMigrationIsSafe(vm->def, nmigrate_disks, migrate_disks, flags))
+ !qemuMigrationSrcIsSafe(vm->def, nmigrate_disks, migrate_disks, flags))
goto endjob;
- qemuMigrationStoreDomainState(vm);
+ qemuMigrationSrcStoreDomainState(vm);
if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
- ret = doPeer2PeerMigrate(driver, conn, vm, xmlin, persist_xml,
- dconnuri, uri, graphicsuri, listenAddress,
- nmigrate_disks, migrate_disks, nbdPort,
- compression, migParams, flags, dname, resource,
- &v3proto);
+ ret = qemuMigrationSrcPerformPeer2Peer(driver, conn, vm, xmlin, persist_xml,
+ dconnuri, uri, graphicsuri, listenAddress,
+ nmigrate_disks, migrate_disks, nbdPort,
+ compression, migParams, flags, dname, resource,
+ &v3proto);
} else {
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
- ret = doNativeMigrate(driver, vm, persist_xml, uri, cookiein, cookieinlen,
- cookieout, cookieoutlen,
- flags, resource, NULL, NULL, 0, NULL,
- compression, migParams);
+ ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri, cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, resource, NULL, NULL, 0, NULL,
+ compression, migParams);
}
if (ret < 0)
goto endjob;
* here
*/
if (!v3proto && ret < 0)
- qemuMigrationReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
+ qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
- if (qemuMigrationRestoreDomainState(driver, vm)) {
+ if (qemuMigrationSrcRestoreDomainState(driver, vm)) {
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_RESUMED,
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
* This implements perform phase of v3 migration protocol.
*/
static int
-qemuMigrationPerformPhase(virQEMUDriverPtr driver,
- virConnectPtr conn,
- virDomainObjPtr vm,
- const char *persist_xml,
- const char *uri,
- const char *graphicsuri,
- size_t nmigrate_disks,
- const char **migrate_disks,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- unsigned long flags,
- unsigned long resource)
+qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver,
+ virConnectPtr conn,
+ virDomainObjPtr vm,
+ const char *persist_xml,
+ const char *uri,
+ const char *graphicsuri,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ unsigned long resource)
{
virObjectEventPtr event = NULL;
int ret = -1;
qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3);
virCloseCallbacksUnset(driver->closeCallbacks, vm,
- qemuMigrationCleanup);
+ qemuMigrationSrcCleanup);
- ret = doNativeMigrate(driver, vm, persist_xml, uri, cookiein, cookieinlen,
- cookieout, cookieoutlen,
- flags, resource, NULL, graphicsuri,
- nmigrate_disks, migrate_disks, compression, migParams);
+ ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri, cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, resource, NULL, graphicsuri,
+ nmigrate_disks, migrate_disks, compression, migParams);
if (ret < 0) {
- if (qemuMigrationRestoreDomainState(driver, vm)) {
+ if (qemuMigrationSrcRestoreDomainState(driver, vm)) {
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_RESUMED,
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3_DONE);
if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
- qemuMigrationCleanup) < 0)
+ qemuMigrationSrcCleanup) < 0)
goto endjob;
endjob:
if (ret < 0) {
- qemuMigrationReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
+ qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
qemuMigrationJobFinish(driver, vm);
} else {
qemuMigrationJobContinue(vm);
}
int
-qemuMigrationPerform(virQEMUDriverPtr driver,
- virConnectPtr conn,
- virDomainObjPtr vm,
- const char *xmlin,
- const char *persist_xml,
- const char *dconnuri,
- const char *uri,
- const char *graphicsuri,
- const char *listenAddress,
- size_t nmigrate_disks,
- const char **migrate_disks,
- int nbdPort,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- unsigned long flags,
- const char *dname,
- unsigned long resource,
- bool v3proto)
+qemuMigrationSrcPerform(virQEMUDriverPtr driver,
+ virConnectPtr conn,
+ virDomainObjPtr vm,
+ const char *xmlin,
+ const char *persist_xml,
+ const char *dconnuri,
+ const char *uri,
+ const char *graphicsuri,
+ const char *listenAddress,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ int nbdPort,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource,
+ bool v3proto)
{
VIR_DEBUG("driver=%p, conn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
"uri=%s, graphicsuri=%s, listenAddress=%s, "
return -1;
}
- return qemuMigrationPerformJob(driver, conn, vm, xmlin, persist_xml, dconnuri, uri,
- graphicsuri, listenAddress,
- nmigrate_disks, migrate_disks, nbdPort,
- compression, migParams,
- cookiein, cookieinlen,
- cookieout, cookieoutlen,
- flags, dname, resource, v3proto);
+ return qemuMigrationSrcPerformJob(driver, conn, vm, xmlin, persist_xml, dconnuri, uri,
+ graphicsuri, listenAddress,
+ nmigrate_disks, migrate_disks, nbdPort,
+ compression, migParams,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, dname, resource, v3proto);
} else {
if (dconnuri) {
virReportError(VIR_ERR_INTERNAL_ERROR,
}
if (v3proto) {
- return qemuMigrationPerformPhase(driver, conn, vm, persist_xml, uri,
- graphicsuri,
- nmigrate_disks, migrate_disks,
- compression, migParams,
- cookiein, cookieinlen,
- cookieout, cookieoutlen,
- flags, resource);
+ return qemuMigrationSrcPerformPhase(driver, conn, vm, persist_xml, uri,
+ graphicsuri,
+ nmigrate_disks, migrate_disks,
+ compression, migParams,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, resource);
} else {
- return qemuMigrationPerformJob(driver, conn, vm, xmlin, persist_xml, NULL,
- uri, graphicsuri, listenAddress,
- nmigrate_disks, migrate_disks, nbdPort,
- compression, migParams,
- cookiein, cookieinlen,
- cookieout, cookieoutlen, flags,
- dname, resource, v3proto);
+ return qemuMigrationSrcPerformJob(driver, conn, vm, xmlin, persist_xml, NULL,
+ uri, graphicsuri, listenAddress,
+ nmigrate_disks, migrate_disks, nbdPort,
+ compression, migParams,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen, flags,
+ dname, resource, v3proto);
}
}
}
static int
-qemuMigrationVPAssociatePortProfiles(virDomainDefPtr def)
+qemuMigrationDstVPAssociatePortProfiles(virDomainDefPtr def)
{
size_t i;
int last_good_net = -1;
static int
-qemuMigrationPersist(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuMigrationCookiePtr mig,
- bool ignoreSaveError)
+qemuMigrationDstPersist(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuMigrationCookiePtr mig,
+ bool ignoreSaveError)
{
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
virCapsPtr caps = NULL;
virDomainPtr
-qemuMigrationFinish(virQEMUDriverPtr driver,
- virConnectPtr dconn,
- virDomainObjPtr vm,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- unsigned long flags,
- int retcode,
- bool v3proto)
+qemuMigrationDstFinish(virQEMUDriverPtr driver,
+ virConnectPtr dconn,
+ virDomainObjPtr vm,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ int retcode,
+ bool v3proto)
{
virDomainPtr dom = NULL;
qemuMigrationCookiePtr mig = NULL;
priv->migrationPort = 0;
if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) {
- qemuMigrationErrorReport(driver, vm->def->name);
+ qemuMigrationDstErrorReport(driver, vm->def->name);
goto cleanup;
}
v3proto ? QEMU_MIGRATION_PHASE_FINISH3
: QEMU_MIGRATION_PHASE_FINISH2);
- qemuDomainCleanupRemove(vm, qemuMigrationPrepareCleanup);
+ qemuDomainCleanupRemove(vm, qemuMigrationDstPrepareCleanup);
VIR_FREE(priv->job.completed);
cookie_flags = QEMU_MIGRATION_COOKIE_NETWORK |
if (flags & VIR_MIGRATE_OFFLINE) {
if (retcode == 0 &&
- qemuMigrationPersist(driver, vm, mig, false) == 0)
+ qemuMigrationDstPersist(driver, vm, mig, false) == 0)
dom = virGetDomain(dconn, vm->def->name, vm->def->uuid, -1);
goto endjob;
}
if (!virDomainObjIsActive(vm)) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("guest unexpectedly quit"));
- qemuMigrationErrorReport(driver, vm->def->name);
+ qemuMigrationDstErrorReport(driver, vm->def->name);
goto endjob;
}
- if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0)
+ if (qemuMigrationDstVPAssociatePortProfiles(vm->def) < 0)
goto endjob;
- if (mig->network && qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0)
+ if (mig->network && qemuMigrationDstOPDRelocate(driver, vm, mig) < 0)
VIR_WARN("unable to provide network data for relocation");
- if (qemuMigrationStopNBDServer(driver, vm, mig) < 0)
+ if (qemuMigrationDstStopNBDServer(driver, vm, mig) < 0)
goto endjob;
if (qemuRefreshVirtioChannelState(driver, vm,
goto endjob;
if (flags & VIR_MIGRATE_PERSIST_DEST) {
- if (qemuMigrationPersist(driver, vm, mig, !v3proto) < 0) {
+ if (qemuMigrationDstPersist(driver, vm, mig, !v3proto) < 0) {
/* Hmpf. Migration was successful, but making it persistent
* was not. If we report successful, then when this domain
* shuts down, management tools are in for a surprise. On the
/* We need to wait for QEMU to process all data sent by the source
* before starting guest CPUs.
*/
- if (qemuMigrationWaitForDestCompletion(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_IN,
- !!(flags & VIR_MIGRATE_POSTCOPY)) < 0) {
+ if (qemuMigrationDstWaitForCompletion(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_IN,
+ !!(flags & VIR_MIGRATE_POSTCOPY)) < 0) {
/* There's not much we can do for v2 protocol since the
* original domain on the source host is already gone.
*/
}
if (inPostCopy) {
- if (qemuMigrationWaitForDestCompletion(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_IN,
- false) < 0) {
+ if (qemuMigrationDstWaitForCompletion(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_IN,
+ false) < 0) {
goto endjob;
}
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
VIR_DOMAIN_EVENT_STOPPED_FAILED);
qemuDomainEventQueue(driver, event);
} else {
- qemuMigrationPostcopyFailed(driver, vm);
+ qemuMigrationAnyPostcopyFailed(driver, vm);
}
}
VIR_FREE(priv->job.completed);
}
- qemuMigrationReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN);
+ qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN);
qemuMigrationJobFinish(driver, vm);
if (!virDomainObjIsActive(vm))
/* Helper function called while vm is active. */
int
-qemuMigrationToFile(virQEMUDriverPtr driver, virDomainObjPtr vm,
- int fd,
- const char *compressor,
- qemuDomainAsyncJob asyncJob)
+qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm,
+ int fd,
+ const char *compressor,
+ qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int rc;
if (rc < 0)
goto cleanup;
- rc = qemuMigrationWaitForCompletion(driver, vm, asyncJob, NULL, 0);
+ rc = qemuMigrationSrcWaitForCompletion(driver, vm, asyncJob, NULL, 0);
if (rc < 0) {
if (rc == -2) {
int
-qemuMigrationCancel(virQEMUDriverPtr driver,
- virDomainObjPtr vm)
+qemuMigrationSrcCancel(virQEMUDriverPtr driver,
+ virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virHashTablePtr blockJobs = NULL;
}
}
- if (qemuMigrationCancelDriveMirror(driver, vm, false,
- QEMU_ASYNC_JOB_NONE, NULL) < 0)
+ if (qemuMigrationSrcCancelDriveMirror(driver, vm, false,
+ QEMU_ASYNC_JOB_NONE, NULL) < 0)
goto endsyncjob;
ret = 0;
static void
-qemuMigrationErrorFree(void *data,
+qemuMigrationDstErrorFree(void *data,
const void *name ATTRIBUTE_UNUSED)
{
virErrorPtr err = data;
}
int
-qemuMigrationErrorInit(virQEMUDriverPtr driver)
+qemuMigrationDstErrorInit(virQEMUDriverPtr driver)
{
- driver->migrationErrors = virHashAtomicNew(64, qemuMigrationErrorFree);
+ driver->migrationErrors = virHashAtomicNew(64, qemuMigrationDstErrorFree);
if (driver->migrationErrors)
return 0;
else
* invalid after calling this function.
*/
void
-qemuMigrationErrorSave(virQEMUDriverPtr driver,
- const char *name,
- virErrorPtr err)
+qemuMigrationDstErrorSave(virQEMUDriverPtr driver,
+ const char *name,
+ virErrorPtr err)
{
if (!err)
return;
}
void
-qemuMigrationErrorReport(virQEMUDriverPtr driver,
- const char *name)
+qemuMigrationDstErrorReport(virQEMUDriverPtr driver,
+ const char *name)
{
virErrorPtr err;
/* don't ever pass NULL params with non zero nparams */
qemuMigrationCompressionPtr
-qemuMigrationCompressionParse(virTypedParameterPtr params,
- int nparams,
- unsigned long flags)
+qemuMigrationAnyCompressionParse(virTypedParameterPtr params,
+ int nparams,
+ unsigned long flags)
{
size_t i;
qemuMigrationCompressionPtr compression = NULL;
}
int
-qemuMigrationCompressionDump(qemuMigrationCompressionPtr compression,
- virTypedParameterPtr *params,
- int *nparams,
- int *maxparams,
- unsigned long *flags)
+qemuMigrationAnyCompressionDump(qemuMigrationCompressionPtr compression,
+ virTypedParameterPtr *params,
+ int *nparams,
+ int *maxparams,
+ unsigned long *flags)
{
size_t i;
/*
- * qemuMigrationReset:
+ * qemuMigrationParamsReset:
*
* Reset all migration parameters so that the next job which internally uses
* migration (save, managedsave, snapshots, dump) will not try to use them.
*/
void
-qemuMigrationReset(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob job)
+qemuMigrationParamsReset(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob job)
{
qemuMonitorMigrationCaps cap;
virErrorPtr err = virSaveLastError();
if (!virDomainObjIsActive(vm))
goto cleanup;
- if (qemuMigrationResetTLS(driver, vm, job) < 0)
+ if (qemuMigrationParamsResetTLS(driver, vm, job) < 0)
goto cleanup;
for (cap = 0; cap < QEMU_MONITOR_MIGRATION_CAPS_LAST; cap++) {
- if (qemuMigrationCapsGet(vm, cap) &&
- qemuMigrationSetOption(driver, vm, cap, false, job) < 0)
+ if (qemuMigrationAnyCapsGet(vm, cap) &&
+ qemuMigrationOptionSet(driver, vm, cap, false, job) < 0)
goto cleanup;
}
int
-qemuMigrationFetchMirrorStats(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
- qemuDomainJobInfoPtr jobInfo)
+qemuMigrationSrcFetchMirrorStats(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob,
+ qemuDomainJobInfoPtr jobInfo)
{
size_t i;
qemuDomainObjPrivatePtr priv = vm->privateData;
bool
-qemuMigrationCapsGet(virDomainObjPtr vm,
- qemuMonitorMigrationCaps cap)
+qemuMigrationAnyCapsGet(virDomainObjPtr vm,
+ qemuMonitorMigrationCaps cap)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
bool enabled = false;