/**
* qemuBlockStorageSourceCreateDetectSize:
- * @vm: domain object
+ * @blockNamedNodeData: hash table filled with qemuBlockNamedNodeData
* @src: storage source to update size/capacity on
* @templ: storage source template
- * @asyncJob: qemu asynchronous job type
*
* When creating a storage source via blockdev-create we need to know the size
* and capacity of the original volume (e.g. when creating a snapshot or copy).
* to the detected sizes from @templ.
*/
int
-qemuBlockStorageSourceCreateDetectSize(virDomainObjPtr vm,
+qemuBlockStorageSourceCreateDetectSize(virHashTablePtr blockNamedNodeData,
virStorageSourcePtr src,
- virStorageSourcePtr templ,
- qemuDomainAsyncJob asyncJob)
+ virStorageSourcePtr templ)
{
- qemuDomainObjPrivatePtr priv = vm->privateData;
- g_autoptr(virHashTable) stats = NULL;
- qemuBlockStatsPtr entry;
- int rc;
-
- if (!(stats = virHashCreate(10, virHashValueFree)))
- return -1;
-
- if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0)
- return -1;
-
- rc = qemuMonitorBlockStatsUpdateCapacityBlockdev(priv->mon, stats);
-
- if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0)
- return -1;
+ qemuBlockNamedNodeDataPtr entry;
- if (!(entry = virHashLookup(stats, templ->nodeformat))) {
+ if (!(entry = virHashLookup(blockNamedNodeData, templ->nodeformat))) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("failed to update capacity data for block node '%s'"),
templ->nodeformat);
virDomainDiskDefPtr disk,
virDomainSnapshotDiskDefPtr snapdisk,
qemuDomainSnapshotDiskDataPtr dd,
+ virHashTablePtr blockNamedNodeData,
bool reuse,
bool blockdev,
qemuDomainAsyncJob asyncJob)
if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0)
return -1;
} else {
- if (qemuBlockStorageSourceCreateDetectSize(vm, dd->src, dd->disk->src,
- asyncJob) < 0)
+ if (qemuBlockStorageSourceCreateDetectSize(blockNamedNodeData,
+ dd->src, dd->disk->src) < 0)
return -1;
if (qemuBlockStorageSourceCreate(vm, dd->src, dd->disk->src,
virQEMUDriverConfigPtr cfg,
bool reuse,
bool blockdev,
+ virHashTablePtr blockNamedNodeData,
qemuDomainAsyncJob asyncJob,
qemuDomainSnapshotDiskDataPtr *rdata,
size_t *rndata)
if (qemuDomainSnapshotDiskPrepareOne(driver, vm, cfg, vm->def->disks[i],
snapdef->disks + i,
- data + ndata++, reuse, blockdev,
+ data + ndata++,
+ blockNamedNodeData,
+ reuse, blockdev,
asyncJob) < 0)
goto cleanup;
}
qemuDomainSnapshotDiskDataPtr diskdata = NULL;
size_t ndiskdata = 0;
bool blockdev = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV);
+ g_autoptr(virHashTable) blockNamedNodeData = NULL;
if (virDomainObjCheckActive(vm) < 0)
return -1;
if (!(actions = virJSONValueNewArray()))
return -1;
+ if (blockdev) {
+ if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
+ return -1;
+
+ blockNamedNodeData = qemuMonitorBlockGetNamedNodeData(priv->mon);
+
+ if (qemuDomainObjExitMonitor(driver, vm) < 0 || !blockNamedNodeData)
+ return -1;
+ }
+
/* prepare a list of objects to use in the vm definition so that we don't
* have to roll back later */
if (qemuDomainSnapshotDiskPrepare(driver, vm, snap, cfg, reuse, blockdev,
- asyncJob, &diskdata, &ndiskdata) < 0)
+ blockNamedNodeData, asyncJob,
+ &diskdata, &ndiskdata) < 0)
goto cleanup;
/* check whether there's anything to do */
g_autoptr(qemuBlockStorageSourceChainData) crdata = NULL;
virStorageSourcePtr n;
virStorageSourcePtr mirrorBacking = NULL;
+ g_autoptr(virHashTable) blockNamedNodeData = NULL;
int rc = 0;
/* Preliminaries: find the disk we are editing, sanity checks */
priv->qemuCaps)))
goto endjob;
} else {
- if (qemuBlockStorageSourceCreateDetectSize(vm, mirror, disk->src, QEMU_ASYNC_JOB_NONE) < 0)
+ qemuDomainObjEnterMonitor(driver, vm);
+ blockNamedNodeData = qemuMonitorBlockGetNamedNodeData(priv->mon);
+ if (qemuDomainObjExitMonitor(driver, vm) < 0 || !blockNamedNodeData)
+ goto endjob;
+
+ if (qemuBlockStorageSourceCreateDetectSize(blockNamedNodeData,
+ mirror, disk->src))
goto endjob;
if (mirror_shallow) {