#include "virterror_internal.h"
#include "logging.h"
#include "datatypes.h"
-#include "libvirt_internal.h"
#include "driver.h"
#include "uuid.h"
return ddomain;
}
+
+ /*
+ * This is sort of a migration v3
+ *
+ * In this version, the client does not talk to the destination
+ * libvirtd. The source libvirtd will still try to talk to the
+ * destination libvirtd though, and will do the prepare/perform/finish
+ * steps.
+ */
+static int
+virDomainMigratePeer2Peer (virDomainPtr domain,
+ unsigned long flags,
+ const char *dname,
+ const char *uri,
+ unsigned long bandwidth)
+{
+ if (!domain->conn->driver->domainMigratePerform) {
+ virLibConnError (domain->conn, VIR_ERR_NO_SUPPORT, __FUNCTION__);
+ return -1;
+ }
+
+ /* Perform the migration. The driver isn't supposed to return
+ * until the migration is complete.
+ */
+ return domain->conn->driver->domainMigratePerform(domain,
+ NULL, /* cookie */
+ 0, /* cookielen */
+ uri,
+ flags,
+ dname,
+ bandwidth);
+}
+
+
/*
- * Tunnelled migration has the following flow:
+ * This is a variation on v1 & 2 migration
+ *
+ * This is for hypervisors which can directly handshake
+ * without any libvirtd involvement on destination either
+ * from client, or source libvirt.
*
- * virDomainMigrate(src, uri)
- * - virDomainMigratePerform(src, uri)
- * - dst = virConnectOpen(uri)
- * - virDomainMigratePrepareTunnel(dst)
- * - while (1)
- * - virStreamSend(dst, data)
- * - virDomainMigrateFinish(dst)
- * - virConnectClose(dst)
+ * eg, XenD can talk direct to XenD, so libvirtd on dest
+ * does not need to be involved at all, or even running
*/
-static virDomainPtr
-virDomainMigrateTunnelled(virDomainPtr domain,
- virConnectPtr dconn,
- unsigned long flags,
- const char *dname,
- const char *uri,
- unsigned long bandwidth)
-{
+static int
+virDomainMigrateDirect (virDomainPtr domain,
+ unsigned long flags,
+ const char *dname,
+ const char *uri,
+ unsigned long bandwidth)
+{
+ if (!domain->conn->driver->domainMigratePerform) {
+ virLibConnError (domain->conn, VIR_ERR_NO_SUPPORT, __FUNCTION__);
+ return -1;
+ }
+
/* Perform the migration. The driver isn't supposed to return
* until the migration is complete.
*/
- if (domain->conn->driver->domainMigratePerform
- (domain, NULL, 0, uri, flags, dname, bandwidth) == -1)
- return NULL;
-
- return virDomainLookupByName(dconn, dname ? dname : domain->name);
+ return domain->conn->driver->domainMigratePerform(domain,
+ NULL, /* cookie */
+ 0, /* cookielen */
+ uri,
+ flags,
+ dname,
+ bandwidth);
}
+
/**
* virDomainMigrate:
* @domain: a domain object
* host given by dconn (a connection to the destination host).
*
* Flags may be one of more of the following:
- * VIR_MIGRATE_LIVE Attempt a live migration.
- * VIR_MIGRATE_TUNNELLED Attempt to do a migration tunnelled through the
- * libvirt RPC mechanism
+ * VIR_MIGRATE_LIVE Do not pause the VM during migration
+ * VIR_MIGRATE_PEER2PEER Direct connection between source & destination hosts
+ * VIR_MIGRATE_TUNNELLED Tunnel migration data over the libvirt RPC channel
+ *
+ * VIR_MIGRATE_TUNNELLED requires that VIR_MIGRATE_PEER2PEER be set.
+ * Applications using the VIR_MIGRATE_PEER2PEER flag will probably
+ * prefer to invoke virDomainMigrateToURI, avoiding the need to
+ * open connection to the destination host themselves.
*
* If a hypervisor supports renaming domains during migration,
* then you may set the dname parameter to the new name (otherwise
* it keeps the same name). If this is not supported by the
* hypervisor, dname must be NULL or else you will get an error.
*
- * Since typically the two hypervisors connect directly to each
- * other in order to perform the migration, you may need to specify
- * a path from the source to the destination. This is the purpose
- * of the uri parameter. If uri is NULL, then libvirt will try to
- * find the best method. Uri may specify the hostname or IP address
- * of the destination host as seen from the source. Or uri may be
- * a URI giving transport, hostname, user, port, etc. in the usual
- * form. Refer to driver documentation for the particular URIs
- * supported.
+ * If the VIR_MIGRATE_PEER2PEER flag is set, the uri parameter
+ * must be a valid libvirt connection URI, by which the source
+ * libvirt driver can connect to the destination libvirt. If
+ * omitted, the dconn connection object will be queried for its
+ * current URI.
+ *
+ * If the VIR_MIGRATE_PEER2PEER flag is NOT set, the URI parameter
+ * takes a hypervisor specific format. The hypervisor capabilities
+ * XML includes details of the support URI schemes. If omitted
+ * the dconn will be asked for a default URI.
+ *
+ * In either case it is typically only neccessary to specify a
+ * URI if the destination host has multiple interfaces and a
+ * specific interface is required to transmit migration data.
*
* The maximum bandwidth (in Mbps) that will be used to do migration
* can be specified with the bandwidth parameter. If set to 0,
goto error;
}
- if (flags & VIR_MIGRATE_TUNNELLED) {
- char *dstURI = NULL;
- if (uri == NULL) {
- dstURI = virConnectGetURI(dconn);
- if (!uri)
- return NULL;
- }
+ if (flags & VIR_MIGRATE_PEER2PEER) {
+ if (VIR_DRV_SUPPORTS_FEATURE (domain->conn->driver, domain->conn,
+ VIR_DRV_FEATURE_MIGRATION_P2P)) {
+ char *dstURI = NULL;
+ if (uri == NULL) {
+ dstURI = virConnectGetURI(dconn);
+ if (!uri)
+ return NULL;
+ }
- ddomain = virDomainMigrateTunnelled(domain, dconn, flags, dname, uri ? uri : dstURI, bandwidth);
+ if (virDomainMigratePeer2Peer(domain, flags, dname, uri ? uri : dstURI, bandwidth) < 0) {
+ VIR_FREE(dstURI);
+ goto error;
+ }
+ VIR_FREE(dstURI);
- VIR_FREE(dstURI);
+ ddomain = virDomainLookupByName (dconn, dname ? dname : domain->name);
+ } else {
+ /* This driver does not support peer to peer migration */
+ virLibConnError (domain->conn, VIR_ERR_NO_SUPPORT, __FUNCTION__);
+ goto error;
+ }
} else {
+ if (flags & VIR_MIGRATE_TUNNELLED) {
+ virLibConnError(domain->conn, VIR_ERR_OPERATION_INVALID,
+ _("cannot perform tunnelled migration without using peer2peer flag"));
+ goto error;
+ }
+
/* Check that migration is supported by both drivers. */
if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
VIR_DRV_FEATURE_MIGRATION_V1) &&
VIR_DRV_FEATURE_MIGRATION_V2))
ddomain = virDomainMigrateVersion2(domain, dconn, flags, dname, uri, bandwidth);
else {
+ /* This driver does not support any migration method */
virLibConnError(domain->conn, VIR_ERR_NO_SUPPORT, __FUNCTION__);
goto error;
}
}
- if (ddomain == NULL)
- goto error;
+ if (ddomain == NULL)
+ goto error;
return ddomain;
return NULL;
}
+
+/**
+ * virDomainMigrateToURI:
+ * @domain: a domain object
+ * @duri: mandatory URI for the destination host
+ * @flags: flags
+ * @dname: (optional) rename domain to this at destination
+ * @bandwidth: (optional) specify migration bandwidth limit in Mbps
+ *
+ * Migrate the domain object from its current host to the destination
+ * host given by dconn (a connection to the destination host).
+ *
+ * Flags may be one of more of the following:
+ * VIR_MIGRATE_LIVE Do not pause the VM during migration
+ * VIR_MIGRATE_PEER2PEER Direct connection between source & destination hosts
+ * VIR_MIGRATE_TUNNELLED Tunnel migration data over the libvirt RPC channel
+ *
+ * VIR_MIGRATE_TUNNELLED requires that VIR_MIGRATE_PEER2PEER be set.
+ * Applications using the VIR_MIGRATE_PEER2PEER flag will probably
+ * prefer to invoke virDomainMigrateToURI, avoiding the need to
+ * open connection to the destination host themselves.
+ *
+ * If a hypervisor supports renaming domains during migration,
+ * then you may set the dname parameter to the new name (otherwise
+ * it keeps the same name). If this is not supported by the
+ * hypervisor, dname must be NULL or else you will get an error.
+ *
+ * If the VIR_MIGRATE_PEER2PEER flag is set, the duri parameter
+ * must be a valid libvirt connection URI, by which the source
+ * libvirt driver can connect to the destination libvirt. If
+ * omitted, the dconn connection object will be queried for its
+ * current URI.
+ *
+ * If the VIR_MIGRATE_PEER2PEER flag is NOT set, the duri parameter
+ * takes a hypervisor specific format. The hypervisor capabilities
+ * XML includes details of the support URI schemes. If omitted
+ * the dconn will be asked for a default URI. Not all hypervisors
+ * will support this mode of migration, so if the VIR_MIGRATE_PEER2PEER
+ * flag is not set, then it may be neccessary to use the alternative
+ * virDomainMigrate API providing an explicit virConnectPtr for the
+ * destination host
+ *
+ * The maximum bandwidth (in Mbps) that will be used to do migration
+ * can be specified with the bandwidth parameter. If set to 0,
+ * libvirt will choose a suitable default. Some hypervisors do
+ * not support this feature and will return an error if bandwidth
+ * is not 0.
+ *
+ * To see which features are supported by the current hypervisor,
+ * see virConnectGetCapabilities, /capabilities/host/migration_features.
+ *
+ * There are many limitations on migration imposed by the underlying
+ * technology - for example it may not be possible to migrate between
+ * different processors even with the same architecture, or between
+ * different types of hypervisor.
+ *
+ * Returns 0 if the migration succeeded, -1 upon error.
+ */
+int
+virDomainMigrateToURI (virDomainPtr domain,
+ const char *duri,
+ unsigned long flags,
+ const char *dname,
+ unsigned long bandwidth)
+{
+ DEBUG("domain=%p, duri=%p, flags=%lu, dname=%s, bandwidth=%lu",
+ domain, NULLSTR(duri), flags, NULLSTR(dname), bandwidth);
+
+ virResetLastError();
+
+ /* First checkout the source */
+ if (!VIR_IS_CONNECTED_DOMAIN (domain)) {
+ virLibDomainError(NULL, VIR_ERR_INVALID_DOMAIN, __FUNCTION__);
+ return -1;
+ }
+ if (domain->conn->flags & VIR_CONNECT_RO) {
+ virLibDomainError(domain, VIR_ERR_OPERATION_DENIED, __FUNCTION__);
+ goto error;
+ }
+
+ if (duri == NULL) {
+ virLibConnError (domain->conn, VIR_ERR_INVALID_ARG, __FUNCTION__);
+ goto error;
+ }
+
+ if (flags & VIR_MIGRATE_PEER2PEER) {
+ if (VIR_DRV_SUPPORTS_FEATURE (domain->conn->driver, domain->conn,
+ VIR_DRV_FEATURE_MIGRATION_P2P)) {
+ if (virDomainMigratePeer2Peer (domain, flags, dname, duri, bandwidth) < 0)
+ goto error;
+ } else {
+ /* No peer to peer migration supported */
+ virLibConnError (domain->conn, VIR_ERR_NO_SUPPORT, __FUNCTION__);
+ goto error;
+ }
+ } else {
+ if (VIR_DRV_SUPPORTS_FEATURE (domain->conn->driver, domain->conn,
+ VIR_DRV_FEATURE_MIGRATION_DIRECT)) {
+ if (virDomainMigrateDirect (domain, flags, dname, duri, bandwidth) < 0)
+ goto error;
+ } else {
+ /* Cannot do a migration with only the perform step */
+ virLibConnError (domain->conn, VIR_ERR_NO_SUPPORT, __FUNCTION__);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ /* Copy to connection error object for back compatability */
+ virSetConnError(domain->conn);
+ return -1;
+}
+
+
/*
* Not for public use. This function is part of the internal
* implementation of migration in the remote case.
qemudSupportsFeature (virConnectPtr conn ATTRIBUTE_UNUSED, int feature)
{
switch (feature) {
- case VIR_DRV_FEATURE_MIGRATION_V2: return 1;
- default: return 0;
+ case VIR_DRV_FEATURE_MIGRATION_V2:
+ case VIR_DRV_FEATURE_MIGRATION_P2P:
+ return 1;
+ default:
+ return 0;
}
}
unsigned long resource)
{
int ret = -1;
- xmlURIPtr uribits;
+ xmlURIPtr uribits = NULL;
int status;
unsigned long long transferred, remaining, total;
}
+/* This is essentially a simplified re-impl of
+ * virDomainMigrateVersion2 from libvirt.c, but running in source
+ * libvirtd context, instead of client app context */
+static int doNonTunnelMigrate(virDomainPtr dom,
+ virConnectPtr dconn,
+ virDomainObjPtr vm,
+ const char *dom_xml,
+ const char *uri ATTRIBUTE_UNUSED,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource)
+{
+ virDomainPtr ddomain = NULL;
+ int retval = -1;
+ char *uri_out = NULL;
+
+ /* NB we don't pass 'uri' into this, since that's the libvirtd
+ * URI in this context - so we let dest pick it */
+ if (dconn->driver->domainMigratePrepare2(dconn,
+ NULL, /* cookie */
+ 0, /* cookielen */
+ NULL, /* uri */
+ &uri_out,
+ flags, dname,
+ resource, dom_xml) < 0)
+ /* domainMigratePrepare2 sets the error for us */
+ goto cleanup;
+
+ if (uri_out == NULL) {
+ qemudReportError(NULL, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
+ _("domainMigratePrepare2 did not set uri"));
+ }
+
+ if (doNativeMigrate(dom, vm, uri_out, flags, dname, resource) < 0)
+ goto finish;
+
+ retval = 0;
+
+finish:
+ dname = dname ? dname : dom->name;
+ ddomain = dconn->driver->domainMigrateFinish2
+ (dconn, dname, NULL, 0, uri_out, flags, retval);
+
+ if (ddomain)
+ virUnrefDomain(ddomain);
+
+cleanup:
+ return retval;
+}
+
+
static int doPeer2PeerMigrate(virDomainPtr dom,
virDomainObjPtr vm,
const char *uri,
return -1;
}
if (!VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
- VIR_DRV_FEATURE_MIGRATION_V2)) {
+ VIR_DRV_FEATURE_MIGRATION_P2P)) {
qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED, "%s",
- _("Destination libvirt does not support required migration protocol 2"));
+ _("Destination libvirt does not support peer-to-peer migration protocol"));
goto cleanup;
}
goto cleanup;
}
- ret = doTunnelMigrate(dom, dconn, vm, dom_xml, uri, flags, dname, resource);
+ if (flags & VIR_MIGRATE_TUNNELLED)
+ ret = doTunnelMigrate(dom, dconn, vm, dom_xml, uri, flags, dname, resource);
+ else
+ ret = doNonTunnelMigrate(dom, dconn, vm, dom_xml, uri, flags, dname, resource);
cleanup:
VIR_FREE(dom_xml);
static const vshCmdOptDef opts_migrate[] = {
{"live", VSH_OT_BOOL, 0, gettext_noop("live migration")},
+ {"p2p", VSH_OT_BOOL, 0, gettext_noop("peer-2-peer migration")},
+ {"direct", VSH_OT_BOOL, 0, gettext_noop("direct migration")},
{"tunnelled", VSH_OT_BOOL, 0, gettext_noop("tunnelled migration")},
{"domain", VSH_OT_DATA, VSH_OFLAG_REQ, gettext_noop("domain name, id or uuid")},
{"desturi", VSH_OT_DATA, VSH_OFLAG_REQ, gettext_noop("connection URI of the destination host")},
const char *migrateuri;
const char *dname;
int flags = 0, found, ret = FALSE;
- virConnectPtr dconn = NULL;
- virDomainPtr ddom = NULL;
if (!vshConnectionUsability (ctl, ctl->conn, TRUE))
return FALSE;
if (vshCommandOptBool (cmd, "live"))
flags |= VIR_MIGRATE_LIVE;
-
+ if (vshCommandOptBool (cmd, "p2p"))
+ flags |= VIR_MIGRATE_PEER2PEER;
if (vshCommandOptBool (cmd, "tunnelled"))
flags |= VIR_MIGRATE_TUNNELLED;
- if (!(flags & VIR_MIGRATE_TUNNELLED)) {
- /* For regular live migration, temporarily connect to the destination
- * host. For tunnelled migration, that will be done by the remote
- * libvirtd.
- */
- dconn = virConnectOpenAuth(desturi, virConnectAuthPtrDefault, 0);
- if (!dconn) goto done;
- }
- else {
- /* when doing tunnelled migration, use migrateuri if it's available,
- * but if not, fall back to desturi. This allows both of these
- * to work:
- *
- * virsh migrate guest qemu+tls://dest/system
- * virsh migrate guest qemu+tls://dest/system qemu+tls://dest-alt/system
- */
- if (migrateuri == NULL)
- migrateuri = desturi;
- }
+ if ((flags & VIR_MIGRATE_PEER2PEER) ||
+ vshCommandOptBool (cmd, "direct")) {
+ /* For peer2peer migration or direct migration we only expect one URI
+ * a libvirt URI, or a hypervisor specific URI. */
- /* Migrate. */
- ddom = virDomainMigrate(dom, dconn, flags, dname, migrateuri, 0);
- if (!ddom) goto done;
+ if (migrateuri != NULL) {
+ vshError(ctl, FALSE, "%s", _("migrate: Unexpected migrateuri for peer2peer/direct migration"));
+ goto done;
+ }
- ret = TRUE;
+ if (virDomainMigrateToURI (dom, desturi, flags, dname, 0) == 0)
+ ret = TRUE;
+ } else {
+ /* For traditional live migration, connect to the destination host directly. */
+ virConnectPtr dconn = NULL;
+ virDomainPtr ddom = NULL;
+
+ dconn = virConnectOpenAuth (desturi, virConnectAuthPtrDefault, 0);
+ if (!dconn) goto done;
+
+ ddom = virDomainMigrate (dom, dconn, flags, dname, migrateuri, 0);
+ if (ddom) {
+ virDomainFree(ddom);
+ ret = TRUE;
+ }
+ virConnectClose (dconn);
+ }
done:
if (dom) virDomainFree (dom);
- if (ddom) virDomainFree (ddom);
- if (dconn) virConnectClose (dconn);
return ret;
}