}
+static char **
+qemuSnapshotActiveInternalDeleteGetDevices(virDomainObj *vm,
+ virDomainSnapshotDef *snapdef)
+{
+ /* In contrast to internal snapshot *creation* we can't always rely on the
+ * metadata to give us the full status of the disk images we'd need to
+ * delete the snapshot from, as users might have edited the VM XML,
+ * unplugged or plugged in disks and/or did many other kinds of modification.
+ *
+ * In order for this code to behave the same as it did with the 'delvm' HMP
+ * command, which simply deleted the snapshot where it found them and
+ * ignored any failures we'll detect the images in qemu first and use
+ * that information as source of truth for now instead of introducing
+ * other failure scenarios.
+ */
+ g_autoptr(GHashTable) blockNamedNodeData = NULL;
+ const char *snapname = snapdef->parent.name;
+ g_auto(GStrv) devices = g_new0(char *, vm->def->ndisks + 2);
+ size_t ndevs = 0;
+ size_t i = 0;
+
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_SNAPSHOT)))
+ return NULL;
+
+ for (i = 0; i < vm->def->ndisks; i++) {
+ virDomainDiskDef *domdisk = vm->def->disks[i];
+ const char *format_nodename;
+ qemuBlockNamedNodeData *d;
+
+ /* readonly disks will not have permissions to delete the snapshot, and
+ * in fact should not have an internal snapshot */
+ if (domdisk->src->readonly)
+ continue;
+
+ /* This effectively filters out empty and 'raw' disks */
+ if (!(format_nodename = qemuBlockStorageSourceGetFormatNodename(domdisk->src)))
+ continue;
+
+ /* the data should always be present */
+ if (!(d = virHashLookup(blockNamedNodeData, format_nodename)))
+ continue;
+
+ /* there might be no snapshot for given disk with given name */
+ if (!d->snapshots ||
+ !g_strv_contains((const char **) d->snapshots, snapname))
+ continue;
+
+ devices[ndevs++] = g_strdup(format_nodename);
+ }
+
+ if (vm->def->os.loader &&
+ vm->def->os.loader->nvram &&
+ vm->def->os.loader->nvram->format == VIR_STORAGE_FILE_QCOW2) {
+ const char *format_nodename;
+ qemuBlockNamedNodeData *d;
+
+ if ((format_nodename = qemuBlockStorageSourceGetFormatNodename(vm->def->os.loader->nvram)) &&
+ (d = virHashLookup(blockNamedNodeData, format_nodename)) &&
+ d->snapshots &&
+ g_strv_contains((const char **) d->snapshots, snapname)) {
+ devices[ndevs++] = g_strdup(format_nodename);
+ }
+ }
+
+ return g_steal_pointer(&devices);
+}
+
+
+static int
+qemuSnapshotDiscardActiveInternal(virDomainObj *vm,
+ virDomainSnapshotDef *snapdef)
+{
+ g_autofree char *jobname = g_strdup_printf("internal-snapshot-delete-%s", snapdef->parent.name);
+ qemuBlockJobData *job = NULL;
+ g_auto(GStrv) devices = NULL;
+ int ret = -1;
+ int rc = 0;
+
+ if (!(devices = qemuSnapshotActiveInternalDeleteGetDevices(vm, snapdef)))
+ return -1;
+
+ /* It's possible that no devices were selected */
+ if (devices[0] == NULL)
+ return 0;
+
+ if (!(job = qemuBlockJobDiskNew(vm, NULL, QEMU_BLOCKJOB_TYPE_SNAPSHOT_DELETE,
+ jobname)))
+ return -1;
+
+ qemuBlockJobSyncBegin(job);
+
+ if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0)
+ goto cleanup;
+
+ rc = qemuMonitorSnapshotDelete(qemuDomainGetMonitor(vm), job->name,
+ snapdef->parent.name, (const char **) devices);
+ qemuDomainObjExitMonitor(vm);
+
+ if (rc < 0)
+ goto cleanup;
+
+ qemuBlockJobStarted(job, vm);
+
+ while (true) {
+ qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_SNAPSHOT);
+
+ if (job->state == VIR_DOMAIN_BLOCK_JOB_FAILED) {
+ virReportError(VIR_ERR_OPERATION_FAILED,
+ _("deletion of internal snapshot '%1$s' job failed: %2$s"),
+ snapdef->parent.name, NULLSTR(job->errmsg));
+ goto cleanup;
+ }
+
+ if (job->state == VIR_DOMAIN_BLOCK_JOB_COMPLETED)
+ break;
+
+ if (qemuDomainObjWait(vm) < 0)
+ goto cleanup;
+ }
+
+ ret = 0;
+
+ cleanup:
+ qemuBlockJobStartupFinalize(vm, job);
+ return ret;
+}
+
+
/* Discard one snapshot (or its metadata), without reparenting any children. */
static int
qemuSnapshotDiscardImpl(virQEMUDriver *driver,
if (qemuSnapshotDiscardExternal(vm, snap, externalData) < 0)
return -1;
} else {
+ virDomainSnapshotDef *snapdef = virDomainSnapshotObjGetDef(snap);
+ qemuDomainObjPrivate *priv = vm->privateData;
+
/* Similarly as internal snapshot creation we would use a regular job
* here so set a mask to forbid any other job. */
qemuDomainObjSetAsyncJobMask(vm, VIR_JOB_NONE);
- if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0)
- return -1;
- /* we continue on even in the face of error */
- qemuMonitorDeleteSnapshot(qemuDomainGetMonitor(vm), snap->def->name);
- qemuDomainObjExitMonitor(vm);
+
+ if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_SNAPSHOT_INTERNAL_QMP)) {
+ if (qemuSnapshotDiscardActiveInternal(vm, snapdef) < 0)
+ return -1;
+ } else {
+ if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0)
+ return -1;
+ /* we continue on even in the face of error */
+ qemuMonitorDeleteSnapshot(qemuDomainGetMonitor(vm), snap->def->name);
+ qemuDomainObjExitMonitor(vm);
+ }
+
qemuDomainObjSetAsyncJobMask(vm, VIR_JOB_DEFAULT_MASK);
}
}