<exports symbol='VIR_MIGRATE_LIVE' type='enum'/>
<exports symbol='VIR_DOMAIN_EVENT_STOPPED_DESTROYED' type='enum'/>
<exports symbol='VIR_DOMAIN_EVENT_DEFINED_ADDED' type='enum'/>
+ <exports symbol='VIR_VCPU_BLOCKED' type='enum'/>
<exports symbol='VIR_SECRET_USAGE_TYPE_NONE' type='enum'/>
<exports symbol='VIR_DOMAIN_EVENT_STARTED_MIGRATED' type='enum'/>
<exports symbol='VIR_STREAM_EVENT_HANGUP' type='enum'/>
<exports symbol='VIR_STREAM_EVENT_WRITABLE' type='enum'/>
<exports symbol='VIR_DOMAIN_SCHED_FIELD_DOUBLE' type='enum'/>
<exports symbol='VIR_DOMAIN_SCHED_FIELD_LLONG' type='enum'/>
- <exports symbol='VIR_VCPU_BLOCKED' type='enum'/>
+ <exports symbol='VIR_MIGRATE_TUNNELLED' type='enum'/>
<exports symbol='VIR_DOMAIN_SCHED_FIELD_BOOLEAN' type='enum'/>
<exports symbol='VIR_DOMAIN_XML_INACTIVE' type='enum'/>
<exports symbol='VIR_STORAGE_VOL_BLOCK' type='enum'/>
<enum name='VIR_FROM_XML' file='virterror' value='5' type='virErrorDomain' info='Error in the XML code'/>
<enum name='VIR_MEMORY_PHYSICAL' file='libvirt' value='2' type='virDomainMemoryFlags' info=' addresses are physical addresses'/>
<enum name='VIR_MEMORY_VIRTUAL' file='libvirt' value='1' type='virDomainMemoryFlags' info='addresses are virtual addresses'/>
- <enum name='VIR_MIGRATE_LIVE' file='libvirt' value='1' type='virDomainMigrateFlags' info=' live migration'/>
+ <enum name='VIR_MIGRATE_LIVE' file='libvirt' value='1' type='virDomainMigrateFlags' info='live migration'/>
+ <enum name='VIR_MIGRATE_TUNNELLED' file='libvirt' value='2' type='virDomainMigrateFlags' info=' tunnelled migration'/>
<enum name='VIR_SECRET_USAGE_TYPE_NONE' file='libvirt' value='0' type='virSecretUsageType'/>
<enum name='VIR_SECRET_USAGE_TYPE_VOLUME' file='libvirt' value='1' type='virSecretUsageType' info=' Expect more owner types later...'/>
<enum name='VIR_STORAGE_POOL_BUILDING' file='libvirt' value='1' type='virStoragePoolState' info='Initializing pool, not available'/>
Flags may be one of more of the following:
VIR_MIGRATE_LIVE Attempt a live migration.
+ VIR_MIGRATE_TUNNELLED Attempt to do a migration tunnelled through the
+ libvirt RPC mechanism
If a hypervisor supports renaming domains during migration,
then you may set the dname parameter to the new name (otherwise
<reference name='VIR_MEMORY_PHYSICAL' href='html/libvirt-libvirt.html#VIR_MEMORY_PHYSICAL'/>
<reference name='VIR_MEMORY_VIRTUAL' href='html/libvirt-libvirt.html#VIR_MEMORY_VIRTUAL'/>
<reference name='VIR_MIGRATE_LIVE' href='html/libvirt-libvirt.html#VIR_MIGRATE_LIVE'/>
+ <reference name='VIR_MIGRATE_TUNNELLED' href='html/libvirt-libvirt.html#VIR_MIGRATE_TUNNELLED'/>
<reference name='VIR_NODEINFO_MAXCPUS' href='html/libvirt-libvirt.html#VIR_NODEINFO_MAXCPUS'/>
<reference name='VIR_SECRET_USAGE_TYPE_NONE' href='html/libvirt-libvirt.html#VIR_SECRET_USAGE_TYPE_NONE'/>
<reference name='VIR_SECRET_USAGE_TYPE_VOLUME' href='html/libvirt-libvirt.html#VIR_SECRET_USAGE_TYPE_VOLUME'/>
<ref name='VIR_MEMORY_PHYSICAL'/>
<ref name='VIR_MEMORY_VIRTUAL'/>
<ref name='VIR_MIGRATE_LIVE'/>
+ <ref name='VIR_MIGRATE_TUNNELLED'/>
<ref name='VIR_NODEINFO_MAXCPUS'/>
<ref name='VIR_SECRET_USAGE_TYPE_NONE'/>
<ref name='VIR_SECRET_USAGE_TYPE_VOLUME'/>
<ref name='VIR_MEMORY_PHYSICAL'/>
<ref name='VIR_MEMORY_VIRTUAL'/>
<ref name='VIR_MIGRATE_LIVE'/>
+ <ref name='VIR_MIGRATE_TUNNELLED'/>
<ref name='VIR_NODEINFO_MAXCPUS'/>
<ref name='VIR_SECRET_USAGE_TYPE_NONE'/>
<ref name='VIR_SECRET_USAGE_TYPE_VOLUME'/>
<ref name='virNetworkGetUUIDString'/>
<ref name='virSecretGetUUIDString'/>
</word>
+ <word name='RPC'>
+ <ref name='virDomainMigrate'/>
+ </word>
<word name='Re-attach'>
<ref name='virNodeDeviceReAttach'/>
</word>
<word name='VIR_MIGRATE_LIVE'>
<ref name='virDomainMigrate'/>
</word>
+ <word name='VIR_MIGRATE_TUNNELLED'>
+ <ref name='virDomainMigrate'/>
+ </word>
<word name='VIR_SECRET_USAGE_TYPE_VOLUME'>
<ref name='virSecretGetUsageID'/>
</word>
<word name='means'>
<ref name='virDomainPinVcpu'/>
</word>
+ <word name='mechanism'>
+ <ref name='virDomainMigrate'/>
+ </word>
<word name='mem'>
<ref name='_virNodeInfo'/>
</word>
<ref name='virStoragePoolRef'/>
<ref name='virStorageVolRef'/>
</word>
+ <word name='through'>
+ <ref name='virDomainMigrate'/>
+ </word>
<word name='time'>
<ref name='_virDomainInfo'/>
<ref name='_virVcpuInfo'/>
<word name='trying'>
<ref name='virConnectOpen'/>
</word>
+ <word name='tunnelled'>
+ <ref name='virDomainMigrate'/>
+ </word>
<word name='turn'>
<ref name='virConnectOpen'/>
</word>
static int doTunnelMigrate(virDomainPtr dom,
+ virConnectPtr dconn,
virDomainObjPtr vm,
+ const char *dom_xml,
const char *uri,
unsigned long flags,
const char *dname,
int client_sock, qemu_sock;
struct sockaddr_un sa_qemu, sa_client;
socklen_t addrlen;
- virConnectPtr dconn;
virDomainPtr ddomain;
int retval = -1;
ssize_t bytes;
char buffer[65536];
virStreamPtr st;
- char *dom_xml = NULL;
char *unixfile = NULL;
int internalret;
unsigned int qemuCmdFlags;
* destination side is completely setup before we touch the source
*/
- dconn = virConnectOpen(uri);
- if (dconn == NULL) {
- qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED,
- _("Failed to connect to remote libvirt URI %s"), uri);
- return -1;
- }
- if (!VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
- VIR_DRV_FEATURE_MIGRATION_V2)) {
- qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED, "%s",
- _("Destination libvirt does not support required migration protocol 2"));
- goto close_dconn;
- }
-
st = virStreamNew(dconn, 0);
if (st == NULL)
/* virStreamNew only fails on OOM, and it reports the error itself */
- goto close_dconn;
-
- dom_xml = virDomainDefFormat(dom->conn, vm->def, VIR_DOMAIN_XML_SECURE);
- if (!dom_xml) {
- qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED,
- "%s", _("failed to get domain xml"));
- goto close_stream;
- }
+ goto cleanup;
internalret = dconn->driver->domainMigratePrepareTunnel(dconn, st,
flags, dname,
resource, dom_xml);
- VIR_FREE(dom_xml);
+
if (internalret < 0)
/* domainMigratePrepareTunnel sets the error for us */
goto close_stream;
/* don't call virStreamFree(), because that resets any pending errors */
virUnrefStream(st);
-close_dconn:
+cleanup:
+ return retval;
+}
+
+
+static int doPeer2PeerMigrate(virDomainPtr dom,
+ virDomainObjPtr vm,
+ const char *uri,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource)
+{
+ int ret = -1;
+ virConnectPtr dconn = NULL;
+ char *dom_xml;
+
+ /* the order of operations is important here; we make sure the
+ * destination side is completely setup before we touch the source
+ */
+
+ dconn = virConnectOpen(uri);
+ if (dconn == NULL) {
+ qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED,
+ _("Failed to connect to remote libvirt URI %s"), uri);
+ return -1;
+ }
+ if (!VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
+ VIR_DRV_FEATURE_MIGRATION_V2)) {
+ qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED, "%s",
+ _("Destination libvirt does not support required migration protocol 2"));
+ goto cleanup;
+ }
+
+ dom_xml = virDomainDefFormat(dom->conn, vm->def, VIR_DOMAIN_XML_SECURE);
+ if (!dom_xml) {
+ qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED,
+ "%s", _("failed to get domain xml"));
+ goto cleanup;
+ }
+
+ ret = doTunnelMigrate(dom, dconn, vm, dom_xml, uri, flags, dname, resource);
+
+cleanup:
+ VIR_FREE(dom_xml);
/* don't call virConnectClose(), because that resets any pending errors */
virUnrefConnect(dconn);
- return retval;
+ return ret;
}
+
/* Perform is the second step, and it runs on the source host. */
static int
qemudDomainMigratePerform (virDomainPtr dom,
}
if ((flags & VIR_MIGRATE_TUNNELLED)) {
- if (doTunnelMigrate(dom, vm, uri, flags, dname, resource) < 0)
+ if (doPeer2PeerMigrate(dom, vm, uri, flags, dname, resource) < 0)
/* doTunnelMigrate already set the error, so just get out */
goto cleanup;
} else {