* object is finally released. This will also happen if the
* client application crashes / loses its connection to the
* libvirtd daemon. Any domains marked for auto destroy will
- * block attempts at migration or save-to-file
+ * block attempts at migration, save-to-file, or snapshots.
*
* Returns a new domain object or NULL in case of failure
*/
* object is finally released. This will also happen if the
* client application crashes / loses its connection to the
* libvirtd daemon. Any domains marked for auto destroy will
- * block attempts at migration or save-to-file
+ * block attempts at migration, save-to-file, or snapshots.
*
* If the VIR_DOMAIN_START_BYPASS_CACHE flag is set, and there is a
* managed save file for this domain (created by virDomainManagedSave()),
goto cleanup;
}
- if (qemuProcessAutoDestroyActive(driver, vm)) {
- qemuReportError(VIR_ERR_OPERATION_INVALID,
- "%s", _("domain is marked for auto destroy"));
- goto cleanup;
- }
-
priv = vm->privateData;
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) {
int directFlag = 0;
virFileDirectFdPtr directFd = NULL;
+ if (qemuProcessAutoDestroyActive(driver, vm)) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("domain is marked for auto destroy"));
+ return -1;
+ }
+
memset(&header, 0, sizeof(header));
memcpy(header.magic, QEMUD_SAVE_MAGIC, sizeof(header.magic));
header.version = QEMUD_SAVE_VERSION;
goto cleanup;
}
+ if (qemuProcessAutoDestroyActive(driver, vm)) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("domain is marked for auto destroy"));
+ goto cleanup;
+ }
+
/* in a perfect world, we would allow qemu to tell us this. The problem
* is that qemu only does this check device-by-device; so if you had a
* domain that booted from a large qcow2 device, but had a secondary raw