NB: caller must take care to drop the driver lock if necessary
- These functions automatically begin/end nested job if called inside an
- asynchronous job. The caller must then check the return value of
- qemuDomainObjEnterMonitor to detect if domain died while waiting on
- the nested job.
+ These functions must not be used by an asynchronous job.
To acquire the QEMU monitor lock with the driver lock held
NB: caller must take care to drop the driver lock if necessary
- These functions automatically begin/end nested job if called inside an
- asynchronous job. The caller must then check the return value of
- qemuDomainObjEnterMonitorWithDriver to detect if domain died while
- waiting on the nested job.
+ These functions must not be used inside an asynchronous job.
+
+
+To acquire the QEMU monitor lock with the driver lock held and as part
+of an asynchronous job
+
+ qemuDomainObjEnterMonitorAsync()
+ - Validates that the right async job is still running
+ - Acquires the qemuMonitorObjPtr lock
+ - Releases the virDomainObjPtr lock
+ - Releases the driver lock
+ - Validates that the VM is still active
+
+ qemuDomainObjExitMonitorWithDriver()
+ - Releases the qemuMonitorObjPtr lock
+ - Acquires the driver lock
+ - Acquires the virDomainObjPtr lock
+
+ NB: caller must take care to drop the driver lock if necessary
+
+ These functions are for use inside an asynchronous job; the caller
+ must check for a return of -1 (VM not running, so nothing to exit).
+ Helper functions may also call this with QEMU_ASYNC_JOB_NONE when
+ used from a sync job (such as when first starting a domain).
To keep a domain alive while waiting on a remote command, starting
...do prep work...
if (virDomainObjIsActive(vm)) {
- /* using ignore_value is safe since vm is active */
- ignore_value(qemuDomainObjEnterMonitor(obj));
+ qemuDomainObjEnterMonitor(obj);
qemuMonitorXXXX(priv->mon);
qemuDomainObjExitMonitor(obj);
}
...do prep work...
if (virDomainObjIsActive(vm)) {
- /* using ignore_value is safe since vm is active */
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, obj));
+ qemuDomainObjEnterMonitorWithDriver(driver, obj);
qemuMonitorXXXX(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, obj);
}
qemuDriverUnlock(driver);
- * Running asynchronous job
+ * Running asynchronous job with driver lock held
virDomainObjPtr obj;
qemuDomainObjPrivatePtr priv;
...do prep work...
- if (qemuDomainObjEnterMonitorWithDriver(driver, obj) < 0) {
+ if (qemuDomainObjEnterMonitorAsync(driver, obj,
+ QEMU_ASYNC_JOB_TYPE) < 0) {
/* domain died in the meantime */
goto error;
}
qemuDomainObjExitMonitorWithDriver(driver, obj);
while (!finished) {
- if (qemuDomainObjEnterMonitorWithDriver(driver, obj) < 0) {
+ if (qemuDomainObjEnterMonitorAsync(driver, obj,
+ QEMU_ASYNC_JOB_TYPE) < 0) {
/* domain died in the meantime */
goto error;
}
return virDomainObjUnref(obj);
}
-static int ATTRIBUTE_NONNULL(1)
+static int
qemuDomainObjEnterMonitorInternal(struct qemud_driver *driver,
bool driver_locked,
- virDomainObjPtr obj)
+ virDomainObjPtr obj,
+ enum qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
- if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
+ if (asyncJob != QEMU_ASYNC_JOB_NONE) {
+ if (asyncJob != priv->job.asyncJob) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ _("unepxected async job %d"), asyncJob);
+ return -1;
+ }
if (qemuDomainObjBeginJobInternal(driver, driver_locked, obj,
QEMU_JOB_ASYNC_NESTED,
QEMU_ASYNC_JOB_NONE) < 0)
if (!virDomainObjIsActive(obj)) {
qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
_("domain is no longer running"));
+ /* Still referenced by the containing async job. */
+ ignore_value(qemuDomainObjEndJob(driver, obj));
return -1;
}
}
*
* To be called immediately before any QEMU monitor API call
* Must have already either called qemuDomainObjBeginJob() and checked
- * that the VM is still active or called qemuDomainObjBeginAsyncJob, in which
- * case this will start a nested job.
+ * that the VM is still active; may not be used for nested async jobs.
*
* To be followed with qemuDomainObjExitMonitor() once complete
*/
-int qemuDomainObjEnterMonitor(struct qemud_driver *driver,
- virDomainObjPtr obj)
+void qemuDomainObjEnterMonitor(struct qemud_driver *driver,
+ virDomainObjPtr obj)
{
- return qemuDomainObjEnterMonitorInternal(driver, false, obj);
+ ignore_value(qemuDomainObjEnterMonitorInternal(driver, false, obj,
+ QEMU_ASYNC_JOB_NONE));
}
/* obj must NOT be locked before calling, qemud_driver must be unlocked
*
* To be called immediately before any QEMU monitor API call
* Must have already either called qemuDomainObjBeginJobWithDriver() and
- * checked that the VM is still active or called qemuDomainObjBeginAsyncJob,
- * in which case this will start a nested job.
+ * checked that the VM is still active; may not be used for nested async jobs.
*
* To be followed with qemuDomainObjExitMonitorWithDriver() once complete
*/
-int qemuDomainObjEnterMonitorWithDriver(struct qemud_driver *driver,
- virDomainObjPtr obj)
+void qemuDomainObjEnterMonitorWithDriver(struct qemud_driver *driver,
+ virDomainObjPtr obj)
+{
+ ignore_value(qemuDomainObjEnterMonitorInternal(driver, true, obj,
+ QEMU_ASYNC_JOB_NONE));
+}
+
+/*
+ * obj and qemud_driver must be locked before calling
+ *
+ * To be called immediately before any QEMU monitor API call.
+ * Must have already either called qemuDomainObjBeginJobWithDriver()
+ * and checked that the VM is still active, with asyncJob of
+ * QEMU_ASYNC_JOB_NONE; or already called qemuDomainObjBeginAsyncJob,
+ * with the same asyncJob.
+ *
+ * Returns 0 if job was started, in which case this must be followed with
+ * qemuDomainObjExitMonitorWithDriver(); or -1 if the job could not be
+ * started (probably because the vm exited in the meantime).
+ */
+int
+qemuDomainObjEnterMonitorAsync(struct qemud_driver *driver,
+ virDomainObjPtr obj,
+ enum qemuDomainAsyncJob asyncJob)
{
- return qemuDomainObjEnterMonitorInternal(driver, true, obj);
+ return qemuDomainObjEnterMonitorInternal(driver, true, obj, asyncJob);
}
/* obj must NOT be locked before calling, qemud_driver must be unlocked,
void qemuDomainObjDiscardAsyncJob(struct qemud_driver *driver,
virDomainObjPtr obj);
-int qemuDomainObjEnterMonitor(struct qemud_driver *driver,
- virDomainObjPtr obj)
- ATTRIBUTE_RETURN_CHECK;
+void qemuDomainObjEnterMonitor(struct qemud_driver *driver,
+ virDomainObjPtr obj)
+ ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
void qemuDomainObjExitMonitor(struct qemud_driver *driver,
- virDomainObjPtr obj);
-int qemuDomainObjEnterMonitorWithDriver(struct qemud_driver *driver,
- virDomainObjPtr obj)
- ATTRIBUTE_RETURN_CHECK;
+ virDomainObjPtr obj)
+ ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
+void qemuDomainObjEnterMonitorWithDriver(struct qemud_driver *driver,
+ virDomainObjPtr obj)
+ ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
+int qemuDomainObjEnterMonitorAsync(struct qemud_driver *driver,
+ virDomainObjPtr obj,
+ enum qemuDomainAsyncJob asyncJob)
+ ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_RETURN_CHECK;
void qemuDomainObjExitMonitorWithDriver(struct qemud_driver *driver,
- virDomainObjPtr obj);
+ virDomainObjPtr obj)
+ ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
void qemuDomainObjEnterRemoteWithDriver(struct qemud_driver *driver,
- virDomainObjPtr obj);
+ virDomainObjPtr obj)
+ ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
void qemuDomainObjExitRemoteWithDriver(struct qemud_driver *driver,
- virDomainObjPtr obj);
+ virDomainObjPtr obj)
+ ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
char *qemuDomainDefFormatXML(struct qemud_driver *driver,
virDomainDefPtr vm,
goto endjob;
}
if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_PAUSED) {
- if (qemuProcessStopCPUs(driver, vm, reason) < 0) {
+ if (qemuProcessStopCPUs(driver, vm, reason, QEMU_ASYNC_JOB_NONE) < 0) {
goto endjob;
}
event = virDomainEventNewFromObj(vm,
}
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
if (qemuProcessStartCPUs(driver, vm, dom->conn,
- VIR_DOMAIN_RUNNING_UNPAUSED) < 0) {
+ VIR_DOMAIN_RUNNING_UNPAUSED,
+ QEMU_ASYNC_JOB_NONE) < 0) {
if (virGetLastError() == NULL)
qemuReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("resume operation failed"));
}
priv = vm->privateData;
- ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorSystemPowerdown(priv->mon);
qemuDomainObjExitMonitor(driver, vm);
goto endjob;
}
- ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorSystemPowerdown(priv->mon);
qemuDomainObjExitMonitor(driver, vm);
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
priv = vm->privateData;
- ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ qemuDomainObjEnterMonitor(driver, vm);
r = qemuMonitorSetBalloon(priv->mon, newmem);
qemuDomainObjExitMonitor(driver, vm);
virDomainAuditMemory(vm, vm->def->mem.cur_balloon, newmem, "update",
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorInjectNMI(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, vm);
if (qemuDomainObjEndJob(driver, vm) == 0) {
goto cleanup;
}
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorSendKey(priv->mon, holdtime, keycodes, nkeycodes);
qemuDomainObjExitMonitorWithDriver(driver, vm);
if (qemuDomainObjEndJob(driver, vm) == 0) {
if (!virDomainObjIsActive(vm))
err = 0;
else {
- ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ qemuDomainObjEnterMonitor(driver, vm);
err = qemuMonitorGetBalloonInfo(priv->mon, &balloon);
qemuDomainObjExitMonitor(driver, vm);
}
/* Pause */
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
header.was_running = 1;
- if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE) < 0)
+ if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
+ QEMU_ASYNC_JOB_SAVE) < 0)
goto endjob;
if (!virDomainObjIsActive(vm)) {
/* Perform the migration */
if (qemuMigrationToFile(driver, vm, fd, offset, path,
qemuCompressProgramName(compressed),
- is_reg, bypassSecurityDriver) < 0)
+ is_reg, bypassSecurityDriver,
+ QEMU_ASYNC_JOB_SAVE) < 0)
goto endjob;
if (VIR_CLOSE(fd) < 0) {
virReportSystemError(errno, _("unable to close %s"), path);
if (ret != 0) {
if (header.was_running && virDomainObjIsActive(vm)) {
rc = qemuProcessStartCPUs(driver, vm, dom->conn,
- VIR_DOMAIN_RUNNING_SAVE_CANCELED);
+ VIR_DOMAIN_RUNNING_SAVE_CANCELED,
+ QEMU_ASYNC_JOB_SAVE);
if (rc < 0)
VIR_WARN("Unable to resume guest CPUs after save failure");
}
goto cleanup;
if (qemuMigrationToFile(driver, vm, fd, 0, path,
- qemuCompressProgramName(compress), true, false) < 0)
+ qemuCompressProgramName(compress), true, false,
+ QEMU_ASYNC_JOB_DUMP) < 0)
goto cleanup;
if (VIR_CLOSE(fd) < 0) {
/* Pause domain for non-live dump */
if (!(flags & VIR_DUMP_LIVE) &&
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
- if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_DUMP) < 0)
+ if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_DUMP,
+ QEMU_ASYNC_JOB_DUMP) < 0)
goto endjob;
paused = 1;
the migration is complete. */
else if (resume && paused && virDomainObjIsActive(vm)) {
if (qemuProcessStartCPUs(driver, vm, dom->conn,
- VIR_DOMAIN_RUNNING_UNPAUSED) < 0) {
+ VIR_DOMAIN_RUNNING_UNPAUSED,
+ QEMU_ASYNC_JOB_DUMP) < 0) {
if (virGetLastError() == NULL)
qemuReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("resuming after dump failed"));
virSecurityManagerSetSavedStateLabel(qemu_driver->securityManager, vm, tmp);
- ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ qemuDomainObjEnterMonitor(driver, vm);
if (qemuMonitorScreendump(priv->mon, tmp) < 0) {
qemuDomainObjExitMonitor(driver, vm);
goto endjob;
"%s", _("Dump failed"));
ret = qemuProcessStartCPUs(driver, wdEvent->vm, NULL,
- VIR_DOMAIN_RUNNING_UNPAUSED);
+ VIR_DOMAIN_RUNNING_UNPAUSED,
+ QEMU_ASYNC_JOB_DUMP);
if (ret < 0)
qemuReportError(VIR_ERR_OPERATION_FAILED,
int oldvcpus = vm->def->vcpus;
int vcpus = oldvcpus;
- ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ qemuDomainObjEnterMonitor(driver, vm);
/* We need different branches here, because we want to offline
* in reverse order to onlining, so any partial fail leaves us in a
/* If it was running before, resume it now. */
if (header->was_running) {
if (qemuProcessStartCPUs(driver, vm, conn,
- VIR_DOMAIN_RUNNING_RESTORED) < 0) {
+ VIR_DOMAIN_RUNNING_RESTORED,
+ QEMU_ASYNC_JOB_NONE) < 0) {
if (virGetLastError() == NULL)
qemuReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("failed to resume domain"));
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
err = qemuMonitorGetBalloonInfo(priv->mon, &balloon);
qemuDomainObjExitMonitorWithDriver(driver, vm);
if (qemuDomainObjEndJob(driver, vm) == 0) {
goto endjob;
}
- ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorGetBlockStatsInfo(priv->mon,
disk->info.alias,
&stats->rd_req,
if (virDomainObjIsActive(vm)) {
qemuDomainObjPrivatePtr priv = vm->privateData;
- ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorGetMemoryStats(priv->mon, stats, nr_stats);
qemuDomainObjExitMonitor(driver, vm);
} else {
virSecurityManagerSetSavedStateLabel(qemu_driver->securityManager, vm, tmp);
priv = vm->privateData;
- ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ qemuDomainObjEnterMonitor(driver, vm);
if (flags == VIR_MEMORY_VIRTUAL) {
if (qemuMonitorSaveVirtualMemory(priv->mon, offset, size, tmp) < 0) {
qemuDomainObjExitMonitor(driver, vm);
goto cleanup;
if (virDomainObjIsActive(vm)) {
- ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorGetBlockExtent(priv->mon,
disk->info.alias,
&info->allocation);
}
VIR_DEBUG("Cancelling job at client request");
- ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorMigrateCancel(priv->mon);
qemuDomainObjExitMonitor(driver, vm);
}
VIR_DEBUG("Setting migration downtime to %llums", downtime);
- ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorSetMigrationDowntime(priv->mon, downtime);
qemuDomainObjExitMonitor(driver, vm);
}
VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth);
- ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
qemuDomainObjExitMonitor(driver, vm);
* confuses libvirt since it's not notified when qemu resumes the
* domain. Thus we stop and start CPUs ourselves.
*/
- if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE) < 0)
+ if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
+ QEMU_ASYNC_JOB_NONE) < 0)
goto cleanup;
resume = true;
}
}
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name);
qemuDomainObjExitMonitorWithDriver(driver, vm);
cleanup:
if (resume && virDomainObjIsActive(vm) &&
qemuProcessStartCPUs(driver, vm, conn,
- VIR_DOMAIN_RUNNING_UNPAUSED) < 0 &&
+ VIR_DOMAIN_RUNNING_UNPAUSED,
+ QEMU_ASYNC_JOB_NONE) < 0 &&
virGetLastError() == NULL) {
qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
_("resuming after snapshot failed"));
if (virDomainObjIsActive(vm)) {
priv = vm->privateData;
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
rc = qemuMonitorLoadSnapshot(priv->mon, snap->def->name);
qemuDomainObjExitMonitorWithDriver(driver, vm);
if (rc < 0)
if (snap->def->state == VIR_DOMAIN_PAUSED) {
/* qemu unconditionally starts the domain running again after
* loadvm, so let's pause it to keep consistency
+ * XXX we should have used qemuProcessStart's start_paused instead
*/
rc = qemuProcessStopCPUs(driver, vm,
- VIR_DOMAIN_PAUSED_FROM_SNAPSHOT);
+ VIR_DOMAIN_PAUSED_FROM_SNAPSHOT,
+ QEMU_ASYNC_JOB_NONE);
if (rc < 0)
goto endjob;
} else {
}
else {
priv = vm->privateData;
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
/* we continue on even in the face of error */
qemuMonitorDeleteSnapshot(priv->mon, snap->def->name);
qemuDomainObjExitMonitorWithDriver(driver, vm);
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorArbitraryCommand(priv->mon, cmd, result, hmp);
qemuDomainObjExitMonitorWithDriver(driver, vm);
if (qemuDomainObjEndJob(driver, vm) == 0) {
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
priv = vm->privateData;
ret = qemuMonitorBlockJob(priv->mon, device, bandwidth, info, mode);
qemuDomainObjExitMonitorWithDriver(driver, vm);
if (!(driveAlias = qemuDeviceDriveHostAlias(origdisk, priv->qemuCaps)))
goto error;
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (disk->src) {
const char *format = NULL;
if (disk->type != VIR_DOMAIN_DISK_TYPE_DIR) {
goto error;
}
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
ret = qemuMonitorAddDrive(priv->mon, drivestr);
if (ret == 0) {
goto cleanup;
}
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
ret = qemuMonitorAddDevice(priv->mon, devstr);
} else {
goto error;
}
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
ret = qemuMonitorAddDrive(priv->mon, drivestr);
if (ret == 0) {
goto error;
}
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
ret = qemuMonitorAddDrive(priv->mon, drivestr);
if (ret == 0) {
goto cleanup;
}
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_NETDEV) &&
qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorAddNetdev(priv->mon, netstr, tapfd, tapfd_name,
goto try_remove;
}
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorAddDevice(priv->mon, nicstr) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm);
char *netdev_name;
if (virAsprintf(&netdev_name, "host%s", net->info.alias) < 0)
goto no_memory;
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuMonitorRemoveNetdev(priv->mon, netdev_name) < 0)
VIR_WARN("Failed to remove network backend for netdev %s",
netdev_name);
char *hostnet_name;
if (virAsprintf(&hostnet_name, "host%s", net->info.alias) < 0)
goto no_memory;
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuMonitorRemoveHostNetwork(priv->mon, vlan, hostnet_name) < 0)
VIR_WARN("Failed to remove network backend for vlan %d, net %s",
vlan, hostnet_name);
priv->qemuCaps)))
goto error;
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorAddDeviceWithFd(priv->mon, devstr,
configfd, configfd_name);
qemuDomainObjExitMonitorWithDriver(driver, vm);
} else {
virDomainDevicePCIAddress guestAddr;
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorAddPCIHostDevice(priv->mon,
&hostdev->source.subsys.u.pci,
&guestAddr);
goto error;
}
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE))
ret = qemuMonitorAddDevice(priv->mon, devstr);
else
goto cleanup;
}
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorDelDevice(priv->mon, detach->info.alias) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm);
goto cleanup;
}
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuMonitorDelDevice(priv->mon, detach->info.alias) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm);
virDomainAuditDisk(vm, detach, NULL, "detach", false);
goto cleanup;
}
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorDelDevice(priv->mon, detach->info.alias)) {
qemuDomainObjExitMonitorWithDriver(driver, vm);
goto cleanup;
}
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
if (qemuMonitorDelDevice(priv->mon, detach->info.alias) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm);
return -1;
}
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
ret = qemuMonitorDelDevice(priv->mon, detach->info.alias);
} else {
return -1;
}
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorDelDevice(priv->mon, detach->info.alias);
qemuDomainObjExitMonitorWithDriver(driver, vm);
virDomainAuditHostdev(vm, detach, "detach", ret == 0);
if (auth->connected)
connected = virDomainGraphicsAuthConnectedTypeToString(auth->connected);
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorSetPassword(priv->mon,
type,
auth->passwd ? auth->passwd : defaultPasswd,
{
int ret;
VIR_DEBUG("driver=%p vm=%p", driver, vm);
- ret = qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION);
+ ret = qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION,
+ QEMU_ASYNC_JOB_MIGRATION_OUT);
if (ret == 0) {
virDomainEventPtr event;
static int
qemuMigrationUpdateJobStatus(struct qemud_driver *driver,
virDomainObjPtr vm,
- const char *job)
+ const char *job,
+ enum qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret = -1;
unsigned long long memRemaining;
unsigned long long memTotal;
- if (!virDomainObjIsActive(vm)) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"),
- job, _("guest unexpectedly quit"));
+ ret = qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob);
+ if (ret < 0) {
+ /* Guest already exited; nothing further to update. */
return -1;
}
-
- ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
- if (ret == 0) {
- ret = qemuMonitorGetMigrationStatus(priv->mon,
- &status,
- &memProcessed,
- &memRemaining,
- &memTotal);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
- }
+ ret = qemuMonitorGetMigrationStatus(priv->mon,
+ &status,
+ &memProcessed,
+ &memRemaining,
+ &memTotal);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
if (ret < 0 || virTimeMs(&priv->job.info.timeElapsed) < 0) {
priv->job.info.type = VIR_DOMAIN_JOB_FAILED;
}
-int
-qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm)
+static int
+qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm,
+ enum qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
const char *job;
/* Poll every 50ms for progress & to allow cancellation */
struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
- if (qemuMigrationUpdateJobStatus(driver, vm, job) < 0)
+ if (qemuMigrationUpdateJobStatus(driver, vm, job, asyncJob) < 0)
goto cleanup;
virDomainObjUnlock(vm);
if (cookie->graphics->type != VIR_DOMAIN_GRAPHICS_TYPE_SPICE)
return 0;
- ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ret = qemuDomainObjEnterMonitorAsync(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_OUT);
if (ret == 0) {
ret = qemuMonitorGraphicsRelocate(priv->mon,
cookie->graphics->type,
goto cleanup;
}
- if (qemuDomainObjEnterMonitorWithDriver(driver, vm) < 0)
+ if (qemuDomainObjEnterMonitorAsync(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto cleanup;
if (resource > 0 &&
}
qemuDomainObjExitMonitorWithDriver(driver, vm);
- if (qemuMigrationWaitForCompletion(driver, vm) < 0)
+ if (qemuMigrationWaitForCompletion(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto cleanup;
/* When migration completed, QEMU will have paused the
goto cleanup;
}
- if (qemuDomainObjEnterMonitorWithDriver(driver, vm) < 0)
+ if (qemuDomainObjEnterMonitorAsync(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto cleanup;
if (resource > 0 &&
/* it is also possible that the migrate didn't fail initially, but
* rather failed later on. Check the output of "info migrate"
*/
- if (qemuDomainObjEnterMonitorWithDriver(driver, vm) < 0)
+ if (qemuDomainObjEnterMonitorAsync(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto cancel;
if (qemuMonitorGetMigrationStatus(priv->mon,
&status,
if (!(iothread = qemuMigrationStartTunnel(st, client_sock)))
goto cancel;
- ret = qemuMigrationWaitForCompletion(driver, vm);
+ ret = qemuMigrationWaitForCompletion(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_OUT);
/* When migration completed, QEMU will have paused the
* CPUs for us, but unless we're using the JSON monitor
if (ret != 0 && virDomainObjIsActive(vm)) {
VIR_FORCE_CLOSE(client_sock);
VIR_FORCE_CLOSE(qemu_sock);
- if (qemuDomainObjEnterMonitorWithDriver(driver, vm) == 0) {
+ if (qemuDomainObjEnterMonitorAsync(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
qemuMonitorMigrateCancel(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, vm);
}
if (resume && virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
/* we got here through some sort of failure; start the domain again */
if (qemuProcessStartCPUs(driver, vm, conn,
- VIR_DOMAIN_RUNNING_MIGRATION_CANCELED) < 0) {
+ VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
+ QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
/* Hm, we already know we are in error here. We don't want to
* overwrite the previous error, though, so we just throw something
* to the logs and hope for the best
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
/* we got here through some sort of failure; start the domain again */
if (qemuProcessStartCPUs(driver, vm, conn,
- VIR_DOMAIN_RUNNING_MIGRATION_CANCELED) < 0) {
+ VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
+ QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
/* Hm, we already know we are in error here. We don't want to
* overwrite the previous error, though, so we just throw something
* to the logs and hope for the best
* older qemu's, but it also doesn't hurt anything there
*/
if (qemuProcessStartCPUs(driver, vm, dconn,
- VIR_DOMAIN_RUNNING_MIGRATED) < 0) {
+ VIR_DOMAIN_RUNNING_MIGRATED,
+ QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
if (virGetLastError() == NULL)
qemuReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("resume operation failed"));
* older qemu's, but it also doesn't hurt anything there
*/
if (qemuProcessStartCPUs(driver, vm, conn,
- VIR_DOMAIN_RUNNING_MIGRATED) < 0) {
+ VIR_DOMAIN_RUNNING_MIGRATED,
+ QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
if (virGetLastError() == NULL)
qemuReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("resume operation failed"));
qemuMigrationToFile(struct qemud_driver *driver, virDomainObjPtr vm,
int fd, off_t offset, const char *path,
const char *compressor,
- bool is_reg, bool bypassSecurityDriver)
+ bool is_reg, bool bypassSecurityDriver,
+ enum qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virCgroupPtr cgroup = NULL;
restoreLabel = true;
}
- if (qemuDomainObjEnterMonitorWithDriver(driver, vm) < 0)
+ if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
goto cleanup;
if (!compressor) {
if (rc < 0)
goto cleanup;
- rc = qemuMigrationWaitForCompletion(driver, vm);
+ rc = qemuMigrationWaitForCompletion(driver, vm, asyncJob);
if (rc < 0)
goto cleanup;
int qemuMigrationSetOffline(struct qemud_driver *driver,
virDomainObjPtr vm);
-int qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm);
-
char *qemuMigrationBegin(struct qemud_driver *driver,
virDomainObjPtr vm,
const char *xmlin,
int qemuMigrationToFile(struct qemud_driver *driver, virDomainObjPtr vm,
int fd, off_t offset, const char *path,
const char *compressor,
- bool is_reg, bool bypassSecurityDriver)
+ bool is_reg, bool bypassSecurityDriver,
+ enum qemuDomainAsyncJob asyncJob)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_NONNULL(5)
ATTRIBUTE_RETURN_CHECK;
goto endjob;
}
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuMonitorSystemReset(priv->mon) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm);
goto endjob;
}
if (qemuProcessStartCPUs(driver, vm, NULL,
- VIR_DOMAIN_RUNNING_BOOTED) < 0) {
+ VIR_DOMAIN_RUNNING_BOOTED,
+ QEMU_ASYNC_JOB_NONE) < 0) {
if (virGetLastError() == NULL)
qemuReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("resume operation failed"));
}
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorSetCapabilities(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, vm);
goto cleanup;
priv = vm->privateData;
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorGetPtyPaths(priv->mon, paths);
qemuDomainObjExitMonitorWithDriver(driver, vm);
/* What follows is now all KVM specific */
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if ((ncpupids = qemuMonitorGetCPUInfo(priv->mon, &cpupids)) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm);
return -1;
goto cleanup;
alias = vm->def->disks[i]->info.alias;
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorSetDrivePassphrase(priv->mon, alias, secret);
VIR_FREE(secret);
qemuDomainObjExitMonitorWithDriver(driver, vm);
int ret;
qemuMonitorPCIAddress *addrs = NULL;
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
naddrs = qemuMonitorGetAllPCIAddresses(priv->mon,
&addrs);
qemuDomainObjExitMonitorWithDriver(driver, vm);
*/
int
qemuProcessStartCPUs(struct qemud_driver *driver, virDomainObjPtr vm,
- virConnectPtr conn, virDomainRunningReason reason)
+ virConnectPtr conn, virDomainRunningReason reason,
+ enum qemuDomainAsyncJob asyncJob)
{
int ret;
qemuDomainObjPrivatePtr priv = vm->privateData;
}
VIR_FREE(priv->lockState);
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
- ret = qemuMonitorStartCPUs(priv->mon, conn);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
+ ret = qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob);
+ if (ret == 0) {
+ ret = qemuMonitorStartCPUs(priv->mon, conn);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ }
if (ret == 0) {
virDomainObjSetState(vm, VIR_DOMAIN_RUNNING, reason);
int qemuProcessStopCPUs(struct qemud_driver *driver, virDomainObjPtr vm,
- virDomainPausedReason reason)
+ virDomainPausedReason reason,
+ enum qemuDomainAsyncJob asyncJob)
{
int ret;
int oldState;
oldState = virDomainObjGetState(vm, &oldReason);
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, reason);
- ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ret = qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob);
if (ret == 0) {
ret = qemuMonitorStopCPUs(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, vm);
bool running;
int ret;
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorGetStatus(priv->mon, &running);
qemuDomainObjExitMonitorWithDriver(driver, vm);
VIR_DEBUG("Incoming migration finished, resuming domain %s",
vm->def->name);
if (qemuProcessStartCPUs(driver, vm, conn,
- VIR_DOMAIN_RUNNING_UNPAUSED) < 0) {
+ VIR_DOMAIN_RUNNING_UNPAUSED,
+ QEMU_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s", vm->def->name);
}
break;
* domain */
VIR_DEBUG("Canceling unfinished outgoing migration of domain %s",
vm->def->name);
- ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ qemuDomainObjEnterMonitor(driver, vm);
ignore_value(qemuMonitorMigrateCancel(priv->mon));
qemuDomainObjExitMonitor(driver, vm);
/* resume the domain but only if it was paused as a result of
(reason == VIR_DOMAIN_PAUSED_MIGRATION ||
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
if (qemuProcessStartCPUs(driver, vm, conn,
- VIR_DOMAIN_RUNNING_UNPAUSED) < 0) {
+ VIR_DOMAIN_RUNNING_UNPAUSED,
+ QEMU_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s", vm->def->name);
}
}
(reason == VIR_DOMAIN_PAUSED_MIGRATION ||
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
if (qemuProcessStartCPUs(driver, vm, conn,
- VIR_DOMAIN_RUNNING_UNPAUSED) < 0) {
+ VIR_DOMAIN_RUNNING_UNPAUSED,
+ QEMU_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s", vm->def->name);
}
}
case QEMU_ASYNC_JOB_SAVE:
case QEMU_ASYNC_JOB_DUMP:
- ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ qemuDomainObjEnterMonitor(driver, vm);
ignore_value(qemuMonitorMigrateCancel(priv->mon));
qemuDomainObjExitMonitor(driver, vm);
/* resume the domain but only if it was paused as a result of
- * running save/dump operation */
+ * running save/dump operation. Although we are recovering an
+ * async job, this function is run at startup and must resume
+ * things using sync monitor connections. */
if (state == VIR_DOMAIN_PAUSED &&
((job->asyncJob == QEMU_ASYNC_JOB_DUMP &&
reason == VIR_DOMAIN_PAUSED_DUMP) ||
reason == VIR_DOMAIN_PAUSED_SAVE) ||
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
if (qemuProcessStartCPUs(driver, vm, conn,
- VIR_DOMAIN_RUNNING_UNPAUSED) < 0) {
+ VIR_DOMAIN_RUNNING_UNPAUSED,
+ QEMU_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s after", vm->def->name);
}
}
goto cleanup;
}
+ /* Technically, qemuProcessStart can be called from inside
+ * QEMU_ASYNC_JOB_MIGRATION_IN, but we are okay treating this like
+ * a sync job since no other job can call into the domain until
+ * migration completes. */
VIR_DEBUG("Setting initial memory amount");
cur_balloon = vm->def->mem.cur_balloon;
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuMonitorSetBalloon(priv->mon, cur_balloon) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm);
goto cleanup;
VIR_DEBUG("Starting domain CPUs");
/* Allow the CPUS to start executing */
if (qemuProcessStartCPUs(driver, vm, conn,
- VIR_DOMAIN_RUNNING_BOOTED) < 0) {
+ VIR_DOMAIN_RUNNING_BOOTED,
+ QEMU_ASYNC_JOB_NONE) < 0) {
if (virGetLastError() == NULL)
qemuReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("resume operation failed"));
}
VIR_DEBUG("Getting initial memory amount");
- ignore_value(qemuDomainObjEnterMonitorWithDriver(driver, vm));
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuMonitorGetBalloonInfo(priv->mon, &vm->def->mem.cur_balloon) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm);
goto cleanup;
# define __QEMU_PROCESS_H__
# include "qemu_conf.h"
+# include "qemu_domain.h"
int qemuProcessPrepareMonitorChr(struct qemud_driver *driver,
virDomainChrSourceDefPtr monConfig,
int qemuProcessStartCPUs(struct qemud_driver *driver,
virDomainObjPtr vm,
virConnectPtr conn,
- virDomainRunningReason reason);
+ virDomainRunningReason reason,
+ enum qemuDomainAsyncJob asyncJob);
int qemuProcessStopCPUs(struct qemud_driver *driver,
virDomainObjPtr vm,
- virDomainPausedReason reason);
+ virDomainPausedReason reason,
+ enum qemuDomainAsyncJob asyncJob);
void qemuProcessAutostartAll(struct qemud_driver *driver);
void qemuProcessReconnectAll(virConnectPtr conn, struct qemud_driver *driver);