#include "domain_job.h"
+VIR_ENUM_IMPL(virDomainJob,
+ VIR_JOB_LAST,
+ "none",
+ "query",
+ "destroy",
+ "suspend",
+ "modify",
+ "abort",
+ "migration operation",
+ "none", /* async job is never stored in job.active */
+ "async nested",
+);
+
+VIR_ENUM_IMPL(virDomainAgentJob,
+ VIR_AGENT_JOB_LAST,
+ "none",
+ "query",
+ "modify",
+);
+
+VIR_ENUM_IMPL(virDomainAsyncJob,
+ VIR_ASYNC_JOB_LAST,
+ "none",
+ "migration out",
+ "migration in",
+ "save",
+ "dump",
+ "snapshot",
+ "start",
+ "backup",
+);
+
virDomainJobData *
virDomainJobDataInit(virDomainJobDataPrivateDataCallbacks *cb)
{
#pragma once
#include "internal.h"
+#include "virenum.h"
+
+/* Only 1 job is allowed at any time
+ * A job includes *all* monitor commands, even those just querying
+ * information, not merely actions */
+typedef enum {
+ VIR_JOB_NONE = 0, /* Always set to 0 for easy if (jobActive) conditions */
+ VIR_JOB_QUERY, /* Doesn't change any state */
+ VIR_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
+ VIR_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
+ VIR_JOB_MODIFY, /* May change state */
+ VIR_JOB_ABORT, /* Abort current async job */
+ VIR_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
+
+ /* The following two items must always be the last items before JOB_LAST */
+ VIR_JOB_ASYNC, /* Asynchronous job */
+ VIR_JOB_ASYNC_NESTED, /* Normal job within an async job */
+
+ VIR_JOB_LAST
+} virDomainJob;
+VIR_ENUM_DECL(virDomainJob);
+
+
+/* Currently only QEMU driver uses agent jobs */
+typedef enum {
+ VIR_AGENT_JOB_NONE = 0, /* No agent job. */
+ VIR_AGENT_JOB_QUERY, /* Does not change state of domain */
+ VIR_AGENT_JOB_MODIFY, /* May change state of domain */
+
+ VIR_AGENT_JOB_LAST
+} virDomainAgentJob;
+VIR_ENUM_DECL(virDomainAgentJob);
+
+
+/* Async job consists of a series of jobs that may change state. Independent
+ * jobs that do not change state (and possibly others if explicitly allowed by
+ * current async job) are allowed to be run even if async job is active.
+ * Currently supported by QEMU only. */
+typedef enum {
+ VIR_ASYNC_JOB_NONE = 0,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_IN,
+ VIR_ASYNC_JOB_SAVE,
+ VIR_ASYNC_JOB_DUMP,
+ VIR_ASYNC_JOB_SNAPSHOT,
+ VIR_ASYNC_JOB_START,
+ VIR_ASYNC_JOB_BACKUP,
+
+ VIR_ASYNC_JOB_LAST
+} virDomainAsyncJob;
+VIR_ENUM_DECL(virDomainAsyncJob);
+
typedef enum {
VIR_DOMAIN_JOB_STATUS_NONE = 0,
],
include_directories: [
conf_inc_dir,
+ util_inc_dir,
],
)
# hypervisor/domain_job.h
+virDomainAgentJobTypeToString;
+virDomainAsyncJobTypeFromString;
+virDomainAsyncJobTypeToString;
virDomainJobDataCopy;
virDomainJobDataFree;
virDomainJobDataInit;
virDomainJobStatusToType;
+virDomainJobTypeFromString;
+virDomainJobTypeToString;
# hypervisor/virclosecallbacks.h
#include "xen_common.h"
#include "driver.h"
#include "domain_validate.h"
+#include "domain_job.h"
#define VIR_FROM_THIS VIR_FROM_LIBXL
- The first API of a migration protocol (Prepare or Perform/Begin depending on
migration type and version) has to start migration job and keep it active:
- qemuMigrationJobStart(driver, vm, QEMU_JOB_MIGRATION_{IN,OUT});
+ qemuMigrationJobStart(driver, vm, VIR_JOB_MIGRATION_{IN,OUT});
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_*);
...do work...
qemuMigrationJobContinue(vm);
- All consequent phases except for the last one have to keep the job active:
- if (!qemuMigrationJobIsActive(vm, QEMU_JOB_MIGRATION_{IN,OUT}))
+ if (!qemuMigrationJobIsActive(vm, VIR_JOB_MIGRATION_{IN,OUT}))
return;
qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_*);
...do work...
- The last migration phase finally finishes the migration job:
- if (!qemuMigrationJobIsActive(vm, QEMU_JOB_MIGRATION_{IN,OUT}))
+ if (!qemuMigrationJobIsActive(vm, VIR_JOB_MIGRATION_{IN,OUT}))
return;
qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_*);
...do work...
These functions are for use inside an asynchronous job; the caller
must check for a return of -1 (VM not running, so nothing to exit).
- Helper functions may also call this with QEMU_ASYNC_JOB_NONE when
+ Helper functions may also call this with VIR_ASYNC_JOB_NONE when
used from a sync job (such as when first starting a domain).
obj = qemuDomObjFromDomain(dom);
- qemuDomainObjBeginJob(obj, QEMU_JOB_TYPE);
+ qemuDomainObjBeginJob(obj, VIR_JOB_TYPE);
...do work...
obj = qemuDomObjFromDomain(dom);
- qemuDomainObjBeginJob(obj, QEMU_JOB_TYPE);
+ qemuDomainObjBeginJob(obj, VIR_JOB_TYPE);
...do prep work...
obj = qemuDomObjFromDomain(dom);
- qemuDomainObjBeginAgentJob(obj, QEMU_AGENT_JOB_TYPE);
+ qemuDomainObjBeginAgentJob(obj, VIR_AGENT_JOB_TYPE);
...do prep work...
obj = qemuDomObjFromDomain(dom);
- qemuDomainObjBeginAsyncJob(obj, QEMU_ASYNC_JOB_TYPE);
+ qemuDomainObjBeginAsyncJob(obj, VIR_ASYNC_JOB_TYPE);
qemuDomainObjSetAsyncJobMask(obj, allowedJobs);
...do prep work...
if (qemuDomainObjEnterMonitorAsync(driver, obj,
- QEMU_ASYNC_JOB_TYPE) < 0) {
+ VIR_ASYNC_JOB_TYPE) < 0) {
/* domain died in the meantime */
goto error;
}
while (!finished) {
if (qemuDomainObjEnterMonitorAsync(driver, obj,
- QEMU_ASYNC_JOB_TYPE) < 0) {
+ VIR_ASYNC_JOB_TYPE) < 0) {
/* domain died in the meantime */
goto error;
}
obj = qemuDomObjFromDomain(dom);
- qemuDomainObjBeginAsyncJob(obj, QEMU_ASYNC_JOB_TYPE);
+ qemuDomainObjBeginAsyncJob(obj, VIR_ASYNC_JOB_TYPE);
...do prep work...
if (qemuBlockStorageSourceCreate(vm, dd->store, dd->backingStore, NULL,
dd->crdata->srcdata[0],
- QEMU_ASYNC_JOB_BACKUP) < 0)
+ VIR_ASYNC_JOB_BACKUP) < 0)
return -1;
} else {
- if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) < 0)
+ if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, VIR_ASYNC_JOB_BACKUP) < 0)
return -1;
rc = qemuBlockStorageSourceAttachApply(priv->mon, dd->crdata->srcdata[0]);
g_clear_pointer(&priv->backup, virDomainBackupDefFree);
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_BACKUP)
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_BACKUP)
qemuDomainObjEndAsyncJob(vm);
}
* infrastructure for async jobs. We'll allow standard modify-type jobs
* as the interlocking of conflicting operations is handled on the block
* job level */
- if (qemuDomainObjBeginAsyncJob(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP,
+ if (qemuDomainObjBeginAsyncJob(priv->driver, vm, VIR_ASYNC_JOB_BACKUP,
VIR_DOMAIN_JOB_OPERATION_BACKUP, flags) < 0)
return -1;
qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |
- JOB_MASK(QEMU_JOB_SUSPEND) |
- JOB_MASK(QEMU_JOB_MODIFY)));
+ JOB_MASK(VIR_JOB_SUSPEND) |
+ JOB_MASK(VIR_JOB_MODIFY)));
qemuDomainJobSetStatsType(priv->job.current,
QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP);
goto endjob;
}
- if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_BACKUP)))
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_BACKUP)))
goto endjob;
if ((ndd = qemuBackupDiskPrepareData(vm, def, blockNamedNodeData, actions,
priv->backup = g_steal_pointer(&def);
- if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) < 0)
+ if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, VIR_ASYNC_JOB_BACKUP) < 0)
goto endjob;
if (pull) {
}
if (pull) {
- if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) < 0)
+ if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, VIR_ASYNC_JOB_BACKUP) < 0)
goto endjob;
/* note that if the export fails we've already created the checkpoint
* and we will not delete it */
qemuDomainObjExitMonitor(vm);
if (rc < 0) {
- qemuBackupJobCancelBlockjobs(vm, priv->backup, false, QEMU_ASYNC_JOB_BACKUP);
+ qemuBackupJobCancelBlockjobs(vm, priv->backup, false, VIR_ASYNC_JOB_BACKUP);
goto endjob;
}
}
qemuCheckpointRollbackMetadata(vm, chk);
if (!job_started && (nbd_running || tlsAlias || tlsSecretAlias) &&
- qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) == 0) {
+ qemuDomainObjEnterMonitorAsync(priv->driver, vm, VIR_ASYNC_JOB_BACKUP) == 0) {
if (nbd_running)
ignore_value(qemuMonitorNBDServerStop(priv->mon));
if (tlsAlias)
int
qemuBlockNodeNamesDetect(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
g_autoptr(GHashTable) disktable = NULL;
int
qemuBlockStorageSourceDetachOneBlockdev(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virStorageSource *src)
{
int ret;
virStorageSource *src,
virStorageSource *chain,
bool storageCreate,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(virJSONValue) props = createProps;
qemuDomainObjPrivate *priv = vm->privateData;
qemuBlockStorageSourceCreateStorage(virDomainObj *vm,
virStorageSource *src,
virStorageSource *chain,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
int actualType = virStorageSourceGetActualType(src);
g_autoptr(virJSONValue) createstorageprops = NULL;
virStorageSource *src,
virStorageSource *backingStore,
virStorageSource *chain,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(virJSONValue) createformatprops = NULL;
int ret;
virStorageSource *backingStore,
virStorageSource *chain,
qemuBlockStorageSourceAttachData *data,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
int ret = -1;
GHashTable *
qemuBlockGetNamedNodeData(virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virQEMUDriver *driver = priv->driver;
static int
qemuBlockReopenFormat(virDomainObj *vm,
virStorageSource *src,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virQEMUDriver *driver = priv->driver;
int
qemuBlockReopenReadWrite(virDomainObj *vm,
virStorageSource *src,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
if (!src->readonly)
return 0;
int
qemuBlockReopenReadOnly(virDomainObj *vm,
virStorageSource *src,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
if (src->readonly)
return 0;
int
qemuBlockNodeNamesDetect(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
GHashTable *
qemuBlockGetNodeData(virJSONValue *data);
int
qemuBlockStorageSourceDetachOneBlockdev(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virStorageSource *src);
struct _qemuBlockStorageSourceChainData {
virStorageSource *backingStore,
virStorageSource *chain,
qemuBlockStorageSourceAttachData *data,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int
qemuBlockStorageSourceCreateDetectSize(GHashTable *blockNamedNodeData,
GHashTable *
qemuBlockGetNamedNodeData(virDomainObj *vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int
qemuBlockGetBitmapMergeActions(virStorageSource *topsrc,
int
qemuBlockReopenReadWrite(virDomainObj *vm,
virStorageSource *src,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int
qemuBlockReopenReadOnly(virDomainObj *vm,
virStorageSource *src,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
bool
qemuBlockStorageSourceNeedsStorageSliceLayer(const virStorageSource *src);
job->reconnected = true;
if (job->newstate != -1)
- qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
+ qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
/* 'job' may be invalid after this update */
}
static void
qemuBlockJobEventProcessConcludedRemoveChain(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virStorageSource *chain)
{
g_autoptr(qemuBlockStorageSourceChainData) data = NULL;
static int
qemuBlockJobProcessEventCompletedPullBitmaps(virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
g_autoptr(GHashTable) blockNamedNodeData = NULL;
qemuBlockJobProcessEventCompletedPull(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
virStorageSource *base = NULL;
virStorageSource *baseparent = NULL;
static int
qemuBlockJobProcessEventCompletedCommitBitmaps(virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
g_autoptr(GHashTable) blockNamedNodeData = NULL;
qemuBlockJobProcessEventCompletedCommit(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
virStorageSource *baseparent = NULL;
virDomainDiskDef *cfgdisk = NULL;
qemuBlockJobProcessEventCompletedActiveCommit(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
virStorageSource *baseparent = NULL;
virDomainDiskDef *cfgdisk = NULL;
static int
qemuBlockJobProcessEventCompletedCopyBitmaps(virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
g_autoptr(GHashTable) blockNamedNodeData = NULL;
qemuBlockJobProcessEventConcludedCopyPivot(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
VIR_DEBUG("copy job '%s' on VM '%s' pivoted", job->name, vm->def->name);
qemuBlockJobProcessEventConcludedCopyAbort(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
qemuBlockJobProcessEventFailedActiveCommit(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virDomainDiskDef *disk = job->disk;
qemuBlockJobProcessEventConcludedCreate(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(qemuBlockStorageSourceAttachData) backend = NULL;
qemuBlockJobProcessEventConcludedBackup(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
qemuBlockjobState newstate,
unsigned long long progressCurrent,
unsigned long long progressTotal)
qemuBlockJobEventProcessConcludedTransition(qemuBlockJobData *job,
virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
unsigned long long progressCurrent,
unsigned long long progressTotal)
{
qemuBlockJobEventProcessConcluded(qemuBlockJobData *job,
virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuMonitorJobInfo **jobinfo = NULL;
size_t njobinfo = 0;
qemuBlockJobEventProcess(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
switch ((qemuBlockjobState) job->newstate) {
actions = virJSONValueNewArray();
- if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE)))
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE)))
return -1;
for (i = 0; i < chkdef->ndisks; i++) {
goto relabel;
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_REOPEN) &&
- qemuBlockReopenReadWrite(vm, src, QEMU_ASYNC_JOB_NONE) < 0)
+ qemuBlockReopenReadWrite(vm, src, VIR_ASYNC_JOB_NONE) < 0)
goto relabel;
relabelimages = g_slist_prepend(relabelimages, src);
virStorageSource *src = next->data;
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_REOPEN))
- ignore_value(qemuBlockReopenReadOnly(vm, src, QEMU_ASYNC_JOB_NONE));
+ ignore_value(qemuBlockReopenReadOnly(vm, src, VIR_ASYNC_JOB_NONE));
ignore_value(qemuDomainStorageSourceAccessAllow(driver, vm, src,
true, false, false));
if (virDomainObjCheckActive(vm) < 0)
return -1;
- if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE)))
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE)))
return -1;
for (i = 0; i < chkdef->ndisks; i++) {
/* Unlike snapshots, the RNG schema already ensured a sane filename. */
/* We are going to modify the domain below. */
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return NULL;
if (redefine) {
size_t i;
int ret = -1;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
goto endjob;
- if (!(nodedataMerge = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE)))
+ if (!(nodedataMerge = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE)))
goto endjob;
/* enumerate disks relevant for the checkpoint which are also present in the
goto endjob;
/* now do a final refresh */
- if (!(nodedataStats = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE)))
+ if (!(nodedataStats = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE)))
goto endjob;
qemuDomainObjEnterMonitor(driver, vm);
VIR_DOMAIN_CHECKPOINT_DELETE_METADATA_ONLY |
VIR_DOMAIN_CHECKPOINT_DELETE_CHILDREN_ONLY, -1);
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return -1;
if (!metadata_only) {
{
qemuDomainJobPrivate *priv = job->privateData;
- if (job->asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) {
+ if (job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) {
if (qemuDomainObjPrivateXMLFormatNBDMigration(buf, vm) < 0)
return -1;
return -1;
if (n > 0) {
- if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
+ if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
VIR_WARN("Found disks marked for migration but we were not "
"migrating");
n = 0;
static int
qemuDomainObjEnterMonitorInternal(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = obj->privateData;
- if (asyncJob != QEMU_ASYNC_JOB_NONE) {
+ if (asyncJob != VIR_ASYNC_JOB_NONE) {
int ret;
if ((ret = qemuDomainObjBeginNestedJob(driver, obj, asyncJob)) < 0)
return ret;
} else if (priv->job.owner != virThreadSelfID()) {
VIR_WARN("Entering a monitor without owning a job. "
"Job %s owner %s (%llu)",
- qemuDomainJobTypeToString(priv->job.active),
+ virDomainJobTypeToString(priv->job.active),
priv->job.ownerAPI, priv->job.owner);
}
if (!hasRefs)
priv->mon = NULL;
- if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
+ if (priv->job.active == VIR_JOB_ASYNC_NESTED)
qemuDomainObjEndJob(obj);
}
virDomainObj *obj)
{
ignore_value(qemuDomainObjEnterMonitorInternal(driver, obj,
- QEMU_ASYNC_JOB_NONE));
+ VIR_ASYNC_JOB_NONE));
}
/*
* To be called immediately before any QEMU monitor API call.
* Must have already either called qemuDomainObjBeginJob()
* and checked that the VM is still active, with asyncJob of
- * QEMU_ASYNC_JOB_NONE; or already called qemuDomainObjBeginAsyncJob,
+ * VIR_ASYNC_JOB_NONE; or already called qemuDomainObjBeginAsyncJob,
* with the same asyncJob.
*
* Returns 0 if job was started, in which case this must be followed with
int
qemuDomainObjEnterMonitorAsync(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
return qemuDomainObjEnterMonitorInternal(driver, obj, asyncJob);
}
* qemuDomainRemoveInactiveJob:
*
* Just like qemuDomainRemoveInactive but it tries to grab a
- * QEMU_JOB_MODIFY first. Even though it doesn't succeed in
+ * VIR_JOB_MODIFY first. Even though it doesn't succeed in
* grabbing the job the control carries with
* qemuDomainRemoveInactive call.
*/
{
bool haveJob;
- haveJob = qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) >= 0;
+ haveJob = qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) >= 0;
qemuDomainRemoveInactive(driver, vm);
{
bool haveJob;
- haveJob = qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) >= 0;
+ haveJob = qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) >= 0;
qemuDomainRemoveInactiveLocked(driver, vm);
int
qemuDomainCheckMonitor(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
int ret;
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
int qemuDomainObjEnterMonitorAsync(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT;
int qemuDomainCheckMonitor(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
bool qemuDomainSupportsVideoVga(const virDomainVideoDef *video,
virQEMUCaps *qemuCaps);
VIR_LOG_INIT("qemu.qemu_domainjob");
-VIR_ENUM_IMPL(qemuDomainJob,
- QEMU_JOB_LAST,
- "none",
- "query",
- "destroy",
- "suspend",
- "modify",
- "abort",
- "migration operation",
- "none", /* async job is never stored in job.active */
- "async nested",
-);
-
-VIR_ENUM_IMPL(qemuDomainAgentJob,
- QEMU_AGENT_JOB_LAST,
- "none",
- "query",
- "modify",
-);
-
-VIR_ENUM_IMPL(qemuDomainAsyncJob,
- QEMU_ASYNC_JOB_LAST,
- "none",
- "migration out",
- "migration in",
- "save",
- "dump",
- "snapshot",
- "start",
- "backup",
-);
-
static void *
qemuJobDataAllocPrivateData(void)
{
const char *
-qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
+virDomainAsyncJobPhaseToString(virDomainAsyncJob job,
int phase G_GNUC_UNUSED)
{
switch (job) {
- case QEMU_ASYNC_JOB_MIGRATION_OUT:
- case QEMU_ASYNC_JOB_MIGRATION_IN:
+ case VIR_ASYNC_JOB_MIGRATION_OUT:
+ case VIR_ASYNC_JOB_MIGRATION_IN:
return qemuMigrationJobPhaseTypeToString(phase);
- case QEMU_ASYNC_JOB_SAVE:
- case QEMU_ASYNC_JOB_DUMP:
- case QEMU_ASYNC_JOB_SNAPSHOT:
- case QEMU_ASYNC_JOB_START:
- case QEMU_ASYNC_JOB_NONE:
- case QEMU_ASYNC_JOB_BACKUP:
+ case VIR_ASYNC_JOB_SAVE:
+ case VIR_ASYNC_JOB_DUMP:
+ case VIR_ASYNC_JOB_SNAPSHOT:
+ case VIR_ASYNC_JOB_START:
+ case VIR_ASYNC_JOB_NONE:
+ case VIR_ASYNC_JOB_BACKUP:
G_GNUC_FALLTHROUGH;
- case QEMU_ASYNC_JOB_LAST:
+ case VIR_ASYNC_JOB_LAST:
break;
}
}
int
-qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,
+virDomainAsyncJobPhaseFromString(virDomainAsyncJob job,
const char *phase)
{
if (!phase)
return 0;
switch (job) {
- case QEMU_ASYNC_JOB_MIGRATION_OUT:
- case QEMU_ASYNC_JOB_MIGRATION_IN:
+ case VIR_ASYNC_JOB_MIGRATION_OUT:
+ case VIR_ASYNC_JOB_MIGRATION_IN:
return qemuMigrationJobPhaseTypeFromString(phase);
- case QEMU_ASYNC_JOB_SAVE:
- case QEMU_ASYNC_JOB_DUMP:
- case QEMU_ASYNC_JOB_SNAPSHOT:
- case QEMU_ASYNC_JOB_START:
- case QEMU_ASYNC_JOB_NONE:
- case QEMU_ASYNC_JOB_BACKUP:
+ case VIR_ASYNC_JOB_SAVE:
+ case VIR_ASYNC_JOB_DUMP:
+ case VIR_ASYNC_JOB_SNAPSHOT:
+ case VIR_ASYNC_JOB_START:
+ case VIR_ASYNC_JOB_NONE:
+ case VIR_ASYNC_JOB_BACKUP:
G_GNUC_FALLTHROUGH;
- case QEMU_ASYNC_JOB_LAST:
+ case VIR_ASYNC_JOB_LAST:
break;
}
static void
qemuDomainObjResetJob(qemuDomainJobObj *job)
{
- job->active = QEMU_JOB_NONE;
+ job->active = VIR_JOB_NONE;
job->owner = 0;
g_clear_pointer(&job->ownerAPI, g_free);
job->started = 0;
static void
qemuDomainObjResetAgentJob(qemuDomainJobObj *job)
{
- job->agentActive = QEMU_AGENT_JOB_NONE;
+ job->agentActive = VIR_AGENT_JOB_NONE;
job->agentOwner = 0;
g_clear_pointer(&job->agentOwnerAPI, g_free);
job->agentStarted = 0;
static void
qemuDomainObjResetAsyncJob(qemuDomainJobObj *job)
{
- job->asyncJob = QEMU_ASYNC_JOB_NONE;
+ job->asyncJob = VIR_ASYNC_JOB_NONE;
job->asyncOwner = 0;
g_clear_pointer(&job->asyncOwnerAPI, g_free);
job->asyncStarted = 0;
}
bool
-qemuDomainTrackJob(qemuDomainJob job)
+qemuDomainTrackJob(virDomainJob job)
{
return (QEMU_DOMAIN_TRACK_JOBS & JOB_MASK(job)) != 0;
}
return;
VIR_DEBUG("Setting '%s' phase to '%s'",
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
- qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, phase));
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobPhaseToString(priv->job.asyncJob, phase));
if (priv->job.asyncOwner == 0) {
priv->job.asyncOwnerAPI = g_strdup(virThreadJobGet());
} else if (me != priv->job.asyncOwner) {
VIR_WARN("'%s' async job is owned by thread %llu",
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
priv->job.asyncOwner);
}
if (!priv->job.asyncJob)
return;
- priv->job.mask = allowedJobs | JOB_MASK(QEMU_JOB_DESTROY);
+ priv->job.mask = allowedJobs | JOB_MASK(VIR_JOB_DESTROY);
}
void
{
qemuDomainObjPrivate *priv = obj->privateData;
- if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
+ if (priv->job.active == VIR_JOB_ASYNC_NESTED)
qemuDomainObjResetJob(&priv->job);
qemuDomainObjResetAsyncJob(&priv->job);
qemuDomainSaveStatus(obj);
qemuDomainObjPrivate *priv = obj->privateData;
VIR_DEBUG("Releasing ownership of '%s' async job",
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
+ virDomainAsyncJobTypeToString(priv->job.asyncJob));
if (priv->job.asyncOwner != virThreadSelfID()) {
VIR_WARN("'%s' async job is owned by thread %llu",
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
priv->job.asyncOwner);
}
priv->job.asyncOwner = 0;
}
static bool
-qemuDomainNestedJobAllowed(qemuDomainJobObj *jobs, qemuDomainJob newJob)
+qemuDomainNestedJobAllowed(qemuDomainJobObj *jobs, virDomainJob newJob)
{
return !jobs->asyncJob ||
- newJob == QEMU_JOB_NONE ||
+ newJob == VIR_JOB_NONE ||
(jobs->mask & JOB_MASK(newJob)) != 0;
}
static bool
qemuDomainObjCanSetJob(qemuDomainJobObj *job,
- qemuDomainJob newJob,
- qemuDomainAgentJob newAgentJob)
+ virDomainJob newJob,
+ virDomainAgentJob newAgentJob)
{
- return ((newJob == QEMU_JOB_NONE ||
- job->active == QEMU_JOB_NONE) &&
- (newAgentJob == QEMU_AGENT_JOB_NONE ||
- job->agentActive == QEMU_AGENT_JOB_NONE));
+ return ((newJob == VIR_JOB_NONE ||
+ job->active == VIR_JOB_NONE) &&
+ (newAgentJob == VIR_AGENT_JOB_NONE ||
+ job->agentActive == VIR_AGENT_JOB_NONE));
}
/* Give up waiting for mutex after 30 seconds */
* qemuDomainObjBeginJobInternal:
* @driver: qemu driver
* @obj: domain object
- * @job: qemuDomainJob to start
- * @asyncJob: qemuDomainAsyncJob to start
+ * @job: virDomainJob to start
+ * @asyncJob: virDomainAsyncJob to start
* @nowait: don't wait trying to acquire @job
*
* Acquires job for a domain object which must be locked before
static int ATTRIBUTE_NONNULL(1)
qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainJob job,
- qemuDomainAgentJob agentJob,
- qemuDomainAsyncJob asyncJob,
+ virDomainJob job,
+ virDomainAgentJob agentJob,
+ virDomainAsyncJob asyncJob,
bool nowait)
{
qemuDomainObjPrivate *priv = obj->privateData;
unsigned long long now;
unsigned long long then;
- bool nested = job == QEMU_JOB_ASYNC_NESTED;
- bool async = job == QEMU_JOB_ASYNC;
+ bool nested = job == VIR_JOB_ASYNC_NESTED;
+ bool async = job == VIR_JOB_ASYNC;
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
const char *blocker = NULL;
const char *agentBlocker = NULL;
VIR_DEBUG("Starting job: API=%s job=%s agentJob=%s asyncJob=%s "
"(vm=%p name=%s, current job=%s agentJob=%s async=%s)",
NULLSTR(currentAPI),
- qemuDomainJobTypeToString(job),
- qemuDomainAgentJobTypeToString(agentJob),
- qemuDomainAsyncJobTypeToString(asyncJob),
+ virDomainJobTypeToString(job),
+ virDomainAgentJobTypeToString(agentJob),
+ virDomainAsyncJobTypeToString(asyncJob),
obj, obj->def->name,
- qemuDomainJobTypeToString(priv->job.active),
- qemuDomainAgentJobTypeToString(priv->job.agentActive),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
+ virDomainJobTypeToString(priv->job.active),
+ virDomainAgentJobTypeToString(priv->job.agentActive),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob));
if (virTimeMillisNow(&now) < 0)
return -1;
then = now + QEMU_JOB_WAIT_TIME;
retry:
- if ((!async && job != QEMU_JOB_DESTROY) &&
+ if ((!async && job != VIR_JOB_DESTROY) &&
cfg->maxQueuedJobs &&
priv->job.jobsQueued > cfg->maxQueuedJobs) {
goto error;
if (job) {
qemuDomainObjResetJob(&priv->job);
- if (job != QEMU_JOB_ASYNC) {
+ if (job != VIR_JOB_ASYNC) {
VIR_DEBUG("Started job: %s (async=%s vm=%p name=%s)",
- qemuDomainJobTypeToString(job),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainJobTypeToString(job),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
priv->job.active = job;
priv->job.owner = virThreadSelfID();
priv->job.started = now;
} else {
VIR_DEBUG("Started async job: %s (vm=%p name=%s)",
- qemuDomainAsyncJobTypeToString(asyncJob),
+ virDomainAsyncJobTypeToString(asyncJob),
obj, obj->def->name);
qemuDomainObjResetAsyncJob(&priv->job);
priv->job.current = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks);
qemuDomainObjResetAgentJob(&priv->job);
VIR_DEBUG("Started agent job: %s (vm=%p name=%s job=%s async=%s)",
- qemuDomainAgentJobTypeToString(agentJob),
+ virDomainAgentJobTypeToString(agentJob),
obj, obj->def->name,
- qemuDomainJobTypeToString(priv->job.active),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
+ virDomainJobTypeToString(priv->job.active),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob));
priv->job.agentActive = agentJob;
priv->job.agentOwner = virThreadSelfID();
priv->job.agentOwnerAPI = g_strdup(virThreadJobGet());
"current job is (%s, %s, %s) "
"owned by (%llu %s, %llu %s, %llu %s (flags=0x%lx)) "
"for (%llus, %llus, %llus)",
- qemuDomainJobTypeToString(job),
- qemuDomainAgentJobTypeToString(agentJob),
- qemuDomainAsyncJobTypeToString(asyncJob),
+ virDomainJobTypeToString(job),
+ virDomainAgentJobTypeToString(agentJob),
+ virDomainAsyncJobTypeToString(asyncJob),
NULLSTR(currentAPI),
obj->def->name,
- qemuDomainJobTypeToString(priv->job.active),
- qemuDomainAgentJobTypeToString(priv->job.agentActive),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainJobTypeToString(priv->job.active),
+ virDomainAgentJobTypeToString(priv->job.agentActive),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
priv->job.owner, NULLSTR(priv->job.ownerAPI),
priv->job.agentOwner, NULLSTR(priv->job.agentOwnerAPI),
priv->job.asyncOwner, NULLSTR(priv->job.asyncOwnerAPI),
*/
int qemuDomainObjBeginJob(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainJob job)
+ virDomainJob job)
{
if (qemuDomainObjBeginJobInternal(driver, obj, job,
- QEMU_AGENT_JOB_NONE,
- QEMU_ASYNC_JOB_NONE, false) < 0)
+ VIR_AGENT_JOB_NONE,
+ VIR_ASYNC_JOB_NONE, false) < 0)
return -1;
return 0;
}
int
qemuDomainObjBeginAgentJob(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainAgentJob agentJob)
+ virDomainAgentJob agentJob)
{
- return qemuDomainObjBeginJobInternal(driver, obj, QEMU_JOB_NONE,
+ return qemuDomainObjBeginJobInternal(driver, obj, VIR_JOB_NONE,
agentJob,
- QEMU_ASYNC_JOB_NONE, false);
+ VIR_ASYNC_JOB_NONE, false);
}
int qemuDomainObjBeginAsyncJob(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virDomainJobOperation operation,
unsigned long apiFlags)
{
qemuDomainObjPrivate *priv;
- if (qemuDomainObjBeginJobInternal(driver, obj, QEMU_JOB_ASYNC,
- QEMU_AGENT_JOB_NONE,
+ if (qemuDomainObjBeginJobInternal(driver, obj, VIR_JOB_ASYNC,
+ VIR_AGENT_JOB_NONE,
asyncJob, false) < 0)
return -1;
int
qemuDomainObjBeginNestedJob(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = obj->privateData;
}
return qemuDomainObjBeginJobInternal(driver, obj,
- QEMU_JOB_ASYNC_NESTED,
- QEMU_AGENT_JOB_NONE,
- QEMU_ASYNC_JOB_NONE,
+ VIR_JOB_ASYNC_NESTED,
+ VIR_AGENT_JOB_NONE,
+ VIR_ASYNC_JOB_NONE,
false);
}
*
* @driver: qemu driver
* @obj: domain object
- * @job: qemuDomainJob to start
+ * @job: virDomainJob to start
*
* Acquires job for a domain object which must be locked before
* calling. If there's already a job running it returns
int
qemuDomainObjBeginJobNowait(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainJob job)
+ virDomainJob job)
{
return qemuDomainObjBeginJobInternal(driver, obj, job,
- QEMU_AGENT_JOB_NONE,
- QEMU_ASYNC_JOB_NONE, true);
+ VIR_AGENT_JOB_NONE,
+ VIR_ASYNC_JOB_NONE, true);
}
/*
qemuDomainObjEndJob(virDomainObj *obj)
{
qemuDomainObjPrivate *priv = obj->privateData;
- qemuDomainJob job = priv->job.active;
+ virDomainJob job = priv->job.active;
priv->job.jobsQueued--;
VIR_DEBUG("Stopping job: %s (async=%s vm=%p name=%s)",
- qemuDomainJobTypeToString(job),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainJobTypeToString(job),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
qemuDomainObjResetJob(&priv->job);
qemuDomainObjEndAgentJob(virDomainObj *obj)
{
qemuDomainObjPrivate *priv = obj->privateData;
- qemuDomainAgentJob agentJob = priv->job.agentActive;
+ virDomainAgentJob agentJob = priv->job.agentActive;
priv->job.jobsQueued--;
VIR_DEBUG("Stopping agent job: %s (async=%s vm=%p name=%s)",
- qemuDomainAgentJobTypeToString(agentJob),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAgentJobTypeToString(agentJob),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
qemuDomainObjResetAgentJob(&priv->job);
priv->job.jobsQueued--;
VIR_DEBUG("Stopping async job: %s (vm=%p name=%s)",
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
qemuDomainObjResetAsyncJob(&priv->job);
qemuDomainObjPrivate *priv = obj->privateData;
VIR_DEBUG("Requesting abort of async job: %s (vm=%p name=%s)",
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
priv->job.abortJob = true;
qemuDomainObjPrivate *priv = vm->privateData;
g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER;
g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf);
- qemuDomainJob job = priv->job.active;
+ virDomainJob job = priv->job.active;
if (!qemuDomainTrackJob(job))
- job = QEMU_JOB_NONE;
+ job = VIR_JOB_NONE;
- if (job == QEMU_JOB_NONE &&
- priv->job.asyncJob == QEMU_ASYNC_JOB_NONE)
+ if (job == VIR_JOB_NONE &&
+ priv->job.asyncJob == VIR_ASYNC_JOB_NONE)
return 0;
virBufferAsprintf(&attrBuf, " type='%s' async='%s'",
- qemuDomainJobTypeToString(job),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
+ virDomainJobTypeToString(job),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob));
if (priv->job.phase) {
virBufferAsprintf(&attrBuf, " phase='%s'",
- qemuDomainAsyncJobPhaseToString(priv->job.asyncJob,
+ virDomainAsyncJobPhaseToString(priv->job.asyncJob,
priv->job.phase));
}
- if (priv->job.asyncJob != QEMU_ASYNC_JOB_NONE)
+ if (priv->job.asyncJob != VIR_ASYNC_JOB_NONE)
virBufferAsprintf(&attrBuf, " flags='0x%lx'", priv->job.apiFlags);
if (priv->job.cb &&
if ((tmp = virXPathString("string(@type)", ctxt))) {
int type;
- if ((type = qemuDomainJobTypeFromString(tmp)) < 0) {
+ if ((type = virDomainJobTypeFromString(tmp)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown job type %s"), tmp);
return -1;
if ((tmp = virXPathString("string(@async)", ctxt))) {
int async;
- if ((async = qemuDomainAsyncJobTypeFromString(tmp)) < 0) {
+ if ((async = virDomainAsyncJobTypeFromString(tmp)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown async job type %s"), tmp);
return -1;
priv->job.asyncJob = async;
if ((tmp = virXPathString("string(@phase)", ctxt))) {
- priv->job.phase = qemuDomainAsyncJobPhaseFromString(async, tmp);
+ priv->job.phase = virDomainAsyncJobPhaseFromString(async, tmp);
if (priv->job.phase < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown job phase %s"), tmp);
#define JOB_MASK(job) (job == 0 ? 0 : 1 << (job - 1))
#define QEMU_JOB_DEFAULT_MASK \
- (JOB_MASK(QEMU_JOB_QUERY) | \
- JOB_MASK(QEMU_JOB_DESTROY) | \
- JOB_MASK(QEMU_JOB_ABORT))
+ (JOB_MASK(VIR_JOB_QUERY) | \
+ JOB_MASK(VIR_JOB_DESTROY) | \
+ JOB_MASK(VIR_JOB_ABORT))
/* Jobs which have to be tracked in domain state XML. */
#define QEMU_DOMAIN_TRACK_JOBS \
- (JOB_MASK(QEMU_JOB_DESTROY) | \
- JOB_MASK(QEMU_JOB_ASYNC))
-
-/* Only 1 job is allowed at any time
- * A job includes *all* monitor commands, even those just querying
- * information, not merely actions */
-typedef enum {
- QEMU_JOB_NONE = 0, /* Always set to 0 for easy if (jobActive) conditions */
- QEMU_JOB_QUERY, /* Doesn't change any state */
- QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
- QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
- QEMU_JOB_MODIFY, /* May change state */
- QEMU_JOB_ABORT, /* Abort current async job */
- QEMU_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
-
- /* The following two items must always be the last items before JOB_LAST */
- QEMU_JOB_ASYNC, /* Asynchronous job */
- QEMU_JOB_ASYNC_NESTED, /* Normal job within an async job */
-
- QEMU_JOB_LAST
-} qemuDomainJob;
-VIR_ENUM_DECL(qemuDomainJob);
-
-typedef enum {
- QEMU_AGENT_JOB_NONE = 0, /* No agent job. */
- QEMU_AGENT_JOB_QUERY, /* Does not change state of domain */
- QEMU_AGENT_JOB_MODIFY, /* May change state of domain */
-
- QEMU_AGENT_JOB_LAST
-} qemuDomainAgentJob;
-VIR_ENUM_DECL(qemuDomainAgentJob);
-
-/* Async job consists of a series of jobs that may change state. Independent
- * jobs that do not change state (and possibly others if explicitly allowed by
- * current async job) are allowed to be run even if async job is active.
- */
-typedef enum {
- QEMU_ASYNC_JOB_NONE = 0,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
- QEMU_ASYNC_JOB_MIGRATION_IN,
- QEMU_ASYNC_JOB_SAVE,
- QEMU_ASYNC_JOB_DUMP,
- QEMU_ASYNC_JOB_SNAPSHOT,
- QEMU_ASYNC_JOB_START,
- QEMU_ASYNC_JOB_BACKUP,
-
- QEMU_ASYNC_JOB_LAST
-} qemuDomainAsyncJob;
-VIR_ENUM_DECL(qemuDomainAsyncJob);
+ (JOB_MASK(VIR_JOB_DESTROY) | \
+ JOB_MASK(VIR_JOB_ASYNC))
typedef enum {
int jobsQueued;
- /* The following members are for QEMU_JOB_* */
- qemuDomainJob active; /* Currently running job */
+ /* The following members are for VIR_JOB_* */
+ virDomainJob active; /* Currently running job */
unsigned long long owner; /* Thread id which set current job */
char *ownerAPI; /* The API which owns the job */
unsigned long long started; /* When the current job started */
- /* The following members are for QEMU_AGENT_JOB_* */
- qemuDomainAgentJob agentActive; /* Currently running agent job */
+ /* The following members are for VIR_AGENT_JOB_* */
+ virDomainAgentJob agentActive; /* Currently running agent job */
unsigned long long agentOwner; /* Thread id which set current agent job */
char *agentOwnerAPI; /* The API which owns the agent job */
unsigned long long agentStarted; /* When the current agent job started */
- /* The following members are for QEMU_ASYNC_JOB_* */
+ /* The following members are for VIR_ASYNC_JOB_* */
virCond asyncCond; /* Use to coordinate with async jobs */
- qemuDomainAsyncJob asyncJob; /* Currently active async job */
+ virDomainAsyncJob asyncJob; /* Currently active async job */
unsigned long long asyncOwner; /* Thread which set current async job */
char *asyncOwnerAPI; /* The API which owns the async job */
unsigned long long asyncStarted; /* When the current async job started */
void qemuDomainJobSetStatsType(virDomainJobData *jobData,
qemuDomainJobStatsType type);
-const char *qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
+const char *virDomainAsyncJobPhaseToString(virDomainAsyncJob job,
int phase);
-int qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,
+int virDomainAsyncJobPhaseFromString(virDomainAsyncJob job,
const char *phase);
void qemuDomainEventEmitJobCompleted(virQEMUDriver *driver,
int qemuDomainObjBeginJob(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainJob job)
+ virDomainJob job)
G_GNUC_WARN_UNUSED_RESULT;
int qemuDomainObjBeginAgentJob(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainAgentJob agentJob)
+ virDomainAgentJob agentJob)
G_GNUC_WARN_UNUSED_RESULT;
int qemuDomainObjBeginAsyncJob(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virDomainJobOperation operation,
unsigned long apiFlags)
G_GNUC_WARN_UNUSED_RESULT;
int qemuDomainObjBeginNestedJob(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
G_GNUC_WARN_UNUSED_RESULT;
int qemuDomainObjBeginJobNowait(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainJob job)
+ virDomainJob job)
G_GNUC_WARN_UNUSED_RESULT;
void qemuDomainObjEndJob(virDomainObj *obj);
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2)
ATTRIBUTE_NONNULL(3) ATTRIBUTE_NONNULL(4);
-bool qemuDomainTrackJob(qemuDomainJob job);
+bool qemuDomainTrackJob(virDomainJob job);
void qemuDomainObjClearJob(qemuDomainJobObj *job);
G_DEFINE_AUTO_CLEANUP_CLEAR_FUNC(qemuDomainJobObj, qemuDomainObjClearJob);
virQEMUDriver *driver,
virDomainObj *vm,
unsigned int flags,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
static int qemuDomainManagedSaveLoad(virDomainObj *vm,
void *opaque);
}
if (qemuDomainObjStart(NULL, driver, vm, flags,
- QEMU_ASYNC_JOB_START) < 0) {
+ VIR_ASYNC_JOB_START) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Failed to autostart VM '%s': %s"),
vm->def->name, virGetLastErrorMessage());
goto cleanup;
}
- if (qemuProcessStart(conn, driver, vm, NULL, QEMU_ASYNC_JOB_START,
+ if (qemuProcessStart(conn, driver, vm, NULL, VIR_ASYNC_JOB_START,
NULL, -1, NULL, NULL,
VIR_NETDEV_VPORT_PROFILE_OP_CREATE,
start_flags) < 0) {
priv = vm->privateData;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_SUSPEND) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_SUSPEND) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
goto endjob;
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
reason = VIR_DOMAIN_PAUSED_MIGRATION;
- else if (priv->job.asyncJob == QEMU_ASYNC_JOB_SNAPSHOT)
+ else if (priv->job.asyncJob == VIR_ASYNC_JOB_SNAPSHOT)
reason = VIR_DOMAIN_PAUSED_SNAPSHOT;
else
reason = VIR_DOMAIN_PAUSED_USER;
goto endjob;
}
if (state != VIR_DOMAIN_PAUSED) {
- if (qemuProcessStopCPUs(driver, vm, reason, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuProcessStopCPUs(driver, vm, reason, VIR_ASYNC_JOB_NONE) < 0)
goto endjob;
}
qemuDomainSaveStatus(vm);
if (virDomainResumeEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
state == VIR_DOMAIN_PAUSED) {
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
- QEMU_ASYNC_JOB_NONE) < 0) {
+ VIR_ASYNC_JOB_NONE) < 0) {
if (virGetLastErrorCode() == VIR_ERR_OK)
virReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("resume operation failed"));
QEMU_AGENT_SHUTDOWN_POWERDOWN;
if (qemuDomainObjBeginAgentJob(driver, vm,
- QEMU_AGENT_JOB_MODIFY) < 0)
+ VIR_AGENT_JOB_MODIFY) < 0)
return -1;
if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) {
priv = vm->privateData;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return -1;
if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) {
agentFlag = QEMU_AGENT_SHUTDOWN_POWERDOWN;
if (qemuDomainObjBeginAgentJob(driver, vm,
- QEMU_AGENT_JOB_MODIFY) < 0)
+ VIR_AGENT_JOB_MODIFY) < 0)
return -1;
if (!qemuDomainAgentAvailable(vm, agentForced))
int ret = -1;
if (qemuDomainObjBeginJob(driver, vm,
- QEMU_JOB_MODIFY) < 0)
+ VIR_JOB_MODIFY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainResetEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
reason == VIR_DOMAIN_PAUSED_STARTING_UP &&
!priv->beingDestroyed);
- if (qemuProcessBeginStopJob(driver, vm, QEMU_JOB_DESTROY,
+ if (qemuProcessBeginStopJob(driver, vm, VIR_JOB_DESTROY,
!(flags & VIR_DOMAIN_DESTROY_GRACEFUL)) < 0)
goto cleanup;
qemuDomainSetFakeReboot(vm, false);
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_DESTROYED,
- QEMU_ASYNC_JOB_NONE, stopFlags);
+ VIR_ASYNC_JOB_NONE, stopFlags);
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_DESTROYED);
if (virDomainSetMemoryFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
if (virDomainSetMemoryStatsPeriodEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
priv = vm->privateData;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainSendKeyEnsureACL(domain->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0))
goto cleanup;
- if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SAVE,
+ if (qemuDomainObjBeginAsyncJob(driver, vm, VIR_ASYNC_JOB_SAVE,
VIR_DOMAIN_JOB_OPERATION_SAVE, flags) < 0)
goto cleanup;
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
was_running = true;
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
- QEMU_ASYNC_JOB_SAVE) < 0)
+ VIR_ASYNC_JOB_SAVE) < 0)
goto endjob;
if (!virDomainObjIsActive(vm)) {
xml = NULL;
ret = qemuSaveImageCreate(driver, vm, path, data, compressor,
- flags, QEMU_ASYNC_JOB_SAVE);
+ flags, VIR_ASYNC_JOB_SAVE);
if (ret < 0)
goto endjob;
/* Shut it down */
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_SAVED,
- QEMU_ASYNC_JOB_SAVE, 0);
+ VIR_ASYNC_JOB_SAVE, 0);
virDomainAuditStop(vm, "saved");
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_SAVED);
virErrorPreserveLast(&save_err);
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_SAVE_CANCELED,
- QEMU_ASYNC_JOB_SAVE) < 0) {
+ VIR_ASYNC_JOB_SAVE) < 0) {
VIR_WARN("Unable to resume guest CPUs after save failure");
virObjectEventStateQueue(driver->domainEventState,
virDomainEventLifecycleNewFromObj(vm,
qemuDumpToFd(virQEMUDriver *driver,
virDomainObj *vm,
int fd,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
const char *dumpformat)
{
qemuDomainObjPrivate *priv = vm->privateData;
if (STREQ(memory_dump_format, "elf"))
memory_dump_format = NULL;
- if (qemuDumpToFd(driver, vm, fd, QEMU_ASYNC_JOB_DUMP,
+ if (qemuDumpToFd(driver, vm, fd, VIR_ASYNC_JOB_DUMP,
memory_dump_format) < 0)
goto cleanup;
} else {
goto cleanup;
if (qemuMigrationSrcToFile(driver, vm, fd, compressor,
- QEMU_ASYNC_JOB_DUMP) < 0)
+ VIR_ASYNC_JOB_DUMP) < 0)
goto cleanup;
}
goto cleanup;
if (qemuDomainObjBeginAsyncJob(driver, vm,
- QEMU_ASYNC_JOB_DUMP,
+ VIR_ASYNC_JOB_DUMP,
VIR_DOMAIN_JOB_OPERATION_DUMP,
flags) < 0)
goto cleanup;
if (!(flags & VIR_DUMP_LIVE) &&
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_DUMP,
- QEMU_ASYNC_JOB_DUMP) < 0)
+ VIR_ASYNC_JOB_DUMP) < 0)
goto endjob;
paused = true;
endjob:
if ((ret == 0) && (flags & VIR_DUMP_CRASH)) {
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_CRASHED,
- QEMU_ASYNC_JOB_DUMP, 0);
+ VIR_ASYNC_JOB_DUMP, 0);
virDomainAuditStop(vm, "crashed");
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
if (resume && virDomainObjIsActive(vm)) {
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
- QEMU_ASYNC_JOB_DUMP) < 0) {
+ VIR_ASYNC_JOB_DUMP) < 0) {
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_SUSPENDED,
VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR);
if (virDomainScreenshotEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
switch (action) {
case VIR_DOMAIN_WATCHDOG_ACTION_DUMP:
if (qemuDomainObjBeginAsyncJob(driver, vm,
- QEMU_ASYNC_JOB_DUMP,
+ VIR_ASYNC_JOB_DUMP,
VIR_DOMAIN_JOB_OPERATION_DUMP,
flags) < 0) {
return;
ret = qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
- QEMU_ASYNC_JOB_DUMP);
+ VIR_ASYNC_JOB_DUMP);
if (ret < 0)
virReportError(VIR_ERR_OPERATION_FAILED,
bool removeInactive = false;
unsigned long flags = VIR_DUMP_MEMORY_ONLY;
- if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_DUMP,
+ if (qemuDomainObjBeginAsyncJob(driver, vm, VIR_ASYNC_JOB_DUMP,
VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0)
return;
case VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY:
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_CRASHED,
- QEMU_ASYNC_JOB_DUMP, 0);
+ VIR_ASYNC_JOB_DUMP, 0);
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_CRASHED);
VIR_DEBUG("Removing device %s from domain %p %s",
devAlias, vm, vm->def->name);
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return;
if (!virDomainObjIsActive(vm)) {
"from domain %p %s",
devAlias, vm, vm->def->name);
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
memset(&dev, 0, sizeof(dev));
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return;
if (!virDomainObjIsActive(vm)) {
virDomainDiskDef *disk;
g_autoptr(qemuBlockJobData) job = NULL;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return;
if (!virDomainObjIsActive(vm)) {
job->newstate = status;
- qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
+ qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
endjob:
qemuDomainObjEndJob(vm);
virDomainObj *vm,
qemuBlockJobData *job)
{
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return;
if (!virDomainObjIsActive(vm)) {
goto endjob;
}
- qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
+ qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
endjob:
qemuDomainObjEndJob(vm);
unsigned int stopFlags = 0;
virObjectEvent *event = NULL;
- if (qemuProcessBeginStopJob(driver, vm, QEMU_JOB_DESTROY, true) < 0)
+ if (qemuProcessBeginStopJob(driver, vm, VIR_JOB_DESTROY, true) < 0)
return;
if (!virDomainObjIsActive(vm)) {
auditReason = "failed";
}
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
qemuMigrationDstErrorSave(driver, vm->def->name,
qemuMonitorLastError(priv->mon));
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
eventReason);
- qemuProcessStop(driver, vm, stopReason, QEMU_ASYNC_JOB_NONE, stopFlags);
+ qemuProcessStop(driver, vm, stopReason, VIR_ASYNC_JOB_NONE, stopFlags);
virDomainAuditStop(vm, auditReason);
virObjectEventStateQueue(driver->domainEventState, event);
virObjectEvent *event = NULL;
unsigned long long balloon;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return;
if (!virDomainObjIsActive(vm)) {
if (useAgent) {
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
} else {
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
}
if (virDomainPinVcpuFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
if (virDomainPinEmulatorEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
goto cleanup;
if (flags & VIR_DOMAIN_VCPU_GUEST) {
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
size_t i;
int ret = -1;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
if (virDomainPinIOThreadEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
priv = vm->privateData;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return -1;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
goto cleanup;
ret = qemuSaveImageStartVM(conn, driver, vm, &fd, data, path,
- false, reset_nvram, QEMU_ASYNC_JOB_START);
+ false, reset_nvram, VIR_ASYNC_JOB_START);
qemuProcessEndJob(vm);
bool start_paused,
bool bypass_cache,
bool reset_nvram,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(virDomainDef) def = NULL;
qemuDomainObjPrivate *priv = vm->privateData;
virQEMUDriver *driver,
virDomainObj *vm,
unsigned int flags,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
int ret = -1;
g_autofree char *managed_save = NULL;
}
if (qemuDomainObjStart(dom->conn, driver, vm, flags,
- QEMU_ASYNC_JOB_START) < 0)
+ VIR_ASYNC_JOB_START) < 0)
goto endjob;
dom->id = vm->def->id;
if (virDomainUndefineFlagsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (!vm->persistent) {
}
if (ret == 0)
- ret = qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE);
+ ret = qemuDomainUpdateDeviceList(driver, vm, VIR_ASYNC_JOB_NONE);
return ret;
}
if (virDomainAttachDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjUpdateModificationImpact(vm, &flags) < 0)
if (virDomainUpdateDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjUpdateModificationImpact(vm, &flags) < 0)
if ((rc = qemuDomainDetachDeviceLive(vm, dev, driver, false)) < 0)
goto cleanup;
- if (rc == 0 && qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (rc == 0 && qemuDomainUpdateDeviceList(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
goto cleanup;
qemuDomainSaveStatus(vm);
if ((rc = qemuDomainDetachDeviceLive(vm, &dev, driver, true)) < 0)
return -1;
- if (rc == 0 && qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (rc == 0 && qemuDomainUpdateDeviceList(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
}
if (virDomainDetachDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjUpdateModificationImpact(vm, &flags) < 0)
if (virDomainDetachDeviceAliasEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjUpdateModificationImpact(vm, &flags) < 0)
autostart = (autostart != 0);
if (vm->autostart != autostart) {
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (!(configFile = virDomainConfigFile(cfg->configDir, vm->def->name)))
goto cleanup;
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
goto cleanup;
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
/* QEMU and LXC implementation are identical */
}
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
if (virDomainSetPerfEventsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
if (virDomainGetPerfEventsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (!(def = virDomainObjGetOneDefState(vm, flags, &live)))
goto cleanup;
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
if (virDomainBlockResizeEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainBlockStatsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainBlockStatsFlagsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainSetInterfaceParametersEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
return ret;
}
-/* This functions assumes that job QEMU_JOB_QUERY is started by a caller */
+/* This functions assumes that job VIR_JOB_QUERY is started by a caller */
static int
qemuDomainMemoryStatsInternal(virQEMUDriver *driver,
virDomainObj *vm,
if (virDomainMemoryStatsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
ret = qemuDomainMemoryStatsInternal(driver, vm, stats, nr_stats);
goto cleanup;
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainGetBlockInfoEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (!(disk = virDomainDiskByName(vm->def, path, false))) {
jobData->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY) {
if (events &&
jobData->status != VIR_DOMAIN_JOB_STATUS_ACTIVE &&
- qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_NONE,
+ qemuMigrationAnyFetchStats(driver, vm, VIR_ASYNC_JOB_NONE,
jobData, NULL) < 0)
return -1;
if (jobData->status == VIR_DOMAIN_JOB_STATUS_ACTIVE &&
privStats->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION &&
- qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_NONE,
+ qemuMigrationSrcFetchMirrorStats(driver, vm, VIR_ASYNC_JOB_NONE,
jobData) < 0)
return -1;
qemuMonitorDumpStats stats = { 0 };
int rc;
- if (qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuDomainObjEnterMonitorAsync(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
rc = qemuMonitorQueryDump(priv->mon, &stats);
return 0;
}
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
_("migration statistics are available only on "
"the source host"));
return -1;
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainAbortJobEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_ABORT) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_ABORT) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
priv = vm->privateData;
switch (priv->job.asyncJob) {
- case QEMU_ASYNC_JOB_NONE:
+ case VIR_ASYNC_JOB_NONE:
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("no job is active on the domain"));
break;
- case QEMU_ASYNC_JOB_MIGRATION_IN:
+ case VIR_ASYNC_JOB_MIGRATION_IN:
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("cannot abort incoming migration;"
" use virDomainDestroy instead"));
break;
- case QEMU_ASYNC_JOB_START:
+ case VIR_ASYNC_JOB_START:
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("cannot abort VM start;"
" use virDomainDestroy instead"));
break;
- case QEMU_ASYNC_JOB_MIGRATION_OUT:
+ case VIR_ASYNC_JOB_MIGRATION_OUT:
if ((priv->job.current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY ||
(virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_POSTCOPY))) {
ret = qemuDomainAbortJobMigration(vm);
break;
- case QEMU_ASYNC_JOB_SAVE:
+ case VIR_ASYNC_JOB_SAVE:
ret = qemuDomainAbortJobMigration(vm);
break;
- case QEMU_ASYNC_JOB_DUMP:
+ case VIR_ASYNC_JOB_DUMP:
if (priv->job.apiFlags & VIR_DUMP_MEMORY_ONLY) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("cannot abort memory-only dump"));
ret = qemuDomainAbortJobMigration(vm);
break;
- case QEMU_ASYNC_JOB_SNAPSHOT:
+ case VIR_ASYNC_JOB_SNAPSHOT:
ret = qemuDomainAbortJobMigration(vm);
break;
- case QEMU_ASYNC_JOB_BACKUP:
- qemuBackupJobCancelBlockjobs(vm, priv->backup, true, QEMU_ASYNC_JOB_NONE);
+ case VIR_ASYNC_JOB_BACKUP:
+ qemuBackupJobCancelBlockjobs(vm, priv->backup, true, VIR_ASYNC_JOB_NONE);
ret = 0;
break;
- case QEMU_ASYNC_JOB_LAST:
+ case VIR_ASYNC_JOB_LAST:
default:
- virReportEnumRangeError(qemuDomainAsyncJob, priv->job.asyncJob);
+ virReportEnumRangeError(virDomainAsyncJob, priv->job.asyncJob);
break;
}
if (virDomainMigrateSetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MIGRATION_OP) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
downtime) < 0)
goto endjob;
- if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuMigrationParamsApply(driver, vm, VIR_ASYNC_JOB_NONE,
migParams) < 0)
goto endjob;
} else {
if (virDomainMigrateGetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
goto endjob;
- if (qemuMigrationParamsFetch(driver, vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuMigrationParamsFetch(driver, vm, VIR_ASYNC_JOB_NONE,
&migParams) < 0)
goto endjob;
if (virDomainMigrateGetCompressionCacheEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
}
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_PARAM_XBZRLE_CACHE_SIZE)) {
- if (qemuMigrationParamsFetch(driver, vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuMigrationParamsFetch(driver, vm, VIR_ASYNC_JOB_NONE,
&migParams) < 0)
goto endjob;
if (virDomainMigrateSetCompressionCacheEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MIGRATION_OP) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
cacheSize) < 0)
goto endjob;
- if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuMigrationParamsApply(driver, vm, VIR_ASYNC_JOB_NONE,
migParams) < 0)
goto endjob;
} else {
goto cleanup;
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MIGRATION_OP) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
bandwidth * 1024 * 1024) < 0)
goto endjob;
- if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuMigrationParamsApply(driver, vm, VIR_ASYNC_JOB_NONE,
migParams) < 0)
goto endjob;
} else {
int rc;
int ret = -1;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
goto cleanup;
- if (qemuMigrationParamsFetch(driver, vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuMigrationParamsFetch(driver, vm, VIR_ASYNC_JOB_NONE,
&migParams) < 0)
goto cleanup;
if (virDomainMigrateStartPostCopyEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MIGRATION_OP) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
priv = vm->privateData;
- if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
+ if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("post-copy can only be started while "
"outgoing migration is in progress"));
if (virDomainQemuMonitorCommandWithFilesEnsureACL(domain->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
goto cleanup;
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainBlockJobAbortEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
qemuDomainSaveStatus(vm);
if (!async) {
- qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
+ qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
while (qemuBlockJobIsRunning(job)) {
if (virDomainObjWait(vm) < 0) {
ret = -1;
goto endjob;
}
- qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
+ qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
}
if (pivot &&
endjob:
if (job && !async)
- qemuBlockJobSyncEnd(vm, job, QEMU_ASYNC_JOB_NONE);
+ qemuBlockJobSyncEnd(vm, job, VIR_ASYNC_JOB_NONE);
qemuDomainObjEndJob(vm);
cleanup:
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainBlockJobSetSpeedEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
return -1;
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
goto endjob;
}
} else {
- if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE)))
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE)))
goto endjob;
if (qemuBlockStorageSourceCreateDetectSize(blockNamedNodeData,
if (crdata &&
qemuBlockStorageSourceCreate(vm, mirror, mirrorBacking, mirror->backingStore,
- crdata->srcdata[0], QEMU_ASYNC_JOB_NONE) < 0)
+ crdata->srcdata[0], VIR_ASYNC_JOB_NONE) < 0)
goto endjob;
}
if (virDomainBlockCommitEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainOpenGraphicsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorOpenGraphics(priv->mon, protocol, pair[1], "graphicsfd",
cfg = virQEMUDriverGetConfig(driver);
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
priv = vm->privateData;
if (virDomainGetBlockIoTuneEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
/* the API check guarantees that only one of the definitions will be set */
if (virDomainGetDiskErrorsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainSetMetadataEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
ret = virDomainObjSetMetadata(vm, type, metadata, key, uri,
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CURRENT_MACHINE))
return -1;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return -1;
if ((ret = virDomainObjCheckActive(vm)) < 0)
qemuAgent *agent;
int ret = -1;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainPMWakeupEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainQemuAgentCommandEnsureACL(domain->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainFSTrimEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
qemuAgent *agent;
int ret = -1;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_QUERY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
size_t i, j;
int ret = -1;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainGetTimeEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
qemuAgent *agent;
int ret = -1;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
if (qemuDomainSetTimeAgent(driver, vm, seconds, nseconds, rtcSync) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainFSFreezeEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainFSThawEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
cpudelay = g_new0(unsigned long long, virDomainDefGetVcpus(dom->def));
if (HAVE_JOB(privflags) && virDomainObjIsActive(dom) &&
- qemuDomainRefreshVcpuHalted(driver, dom, QEMU_ASYNC_JOB_NONE) < 0) {
+ qemuDomainRefreshVcpuHalted(driver, dom, VIR_ASYNC_JOB_NONE) < 0) {
/* it's ok to be silent and go ahead, because halted vcpu info
* wasn't here from the beginning */
virResetLastError();
int rv;
if (flags & VIR_CONNECT_GET_ALL_DOMAINS_STATS_NOWAIT)
- rv = qemuDomainObjBeginJobNowait(driver, vm, QEMU_JOB_QUERY);
+ rv = qemuDomainObjBeginJobNowait(driver, vm, VIR_JOB_QUERY);
else
- rv = qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY);
+ rv = qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY);
if (rv == 0)
domflags |= QEMU_DOMAIN_STATS_HAVE_JOB;
qemuAgent *agent;
if (qemuDomainObjBeginAgentJob(driver, vm,
- QEMU_AGENT_JOB_QUERY) < 0)
+ VIR_AGENT_JOB_QUERY) < 0)
return ret;
if (virDomainObjCheckActive(vm) < 0)
if ((nfs = qemuDomainGetFSInfoAgent(driver, vm, &agentinfo)) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
break;
case VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT:
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
if (virDomainSetUserPasswordEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainRenameEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjIsActive(vm)) {
if (virDomainGetGuestVcpusEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
if (virDomainSetGuestVcpusEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
if (virDomainSetVcpuEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
if (virDomainSetBlockThresholdEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) &&
!src->nodestorage &&
- qemuBlockNodeNamesDetect(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ qemuBlockNodeNamesDetect(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
goto endjob;
if (!src->nodestorage) {
if (virDomainSetLifecycleActionEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY, -1);
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0) {
else if (rc == 1)
hasSetaddr = true;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
goto cleanup;
if (qemuDomainObjBeginAgentJob(driver, vm,
- QEMU_AGENT_JOB_QUERY) < 0)
+ VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
qemuDomainObjEndAgentJob(vm);
if (nfs > 0 || ndisks > 0) {
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
if (virDomainAuthorizedSshKeysGetEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
if (virDomainAuthorizedSshKeysSetEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
goto cleanup;
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0) {
int
qemuHotplugAttachDBusVMState(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
g_autoptr(virJSONValue) props = NULL;
int
qemuHotplugRemoveDBusVMState(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
int ret;
qemuHotplugAttachManagedPR(virQEMUDriver *driver,
virDomainObj *vm,
virStorageSource *src,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
g_autoptr(virJSONValue) props = NULL;
static int
qemuHotplugRemoveManagedPR(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virErrorPtr orig_err;
if (qemuDomainStorageSourceChainAccessAllow(driver, vm, newsrc) < 0)
goto cleanup;
- if (qemuHotplugAttachManagedPR(driver, vm, newsrc, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuHotplugAttachManagedPR(driver, vm, newsrc, VIR_ASYNC_JOB_NONE) < 0)
goto cleanup;
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV))
/* remove PR manager object if unneeded */
if (managedpr)
- ignore_value(qemuHotplugRemoveManagedPR(driver, vm, QEMU_ASYNC_JOB_NONE));
+ ignore_value(qemuHotplugRemoveManagedPR(driver, vm, VIR_ASYNC_JOB_NONE));
/* revert old image do the disk definition */
if (oldsrc)
qemuDomainAttachDiskGenericTransient(virDomainObj *vm,
virDomainDiskDef *disk,
GHashTable *blockNamedNodeData,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(qemuSnapshotDiskContext) snapctxt = NULL;
g_autoptr(virDomainSnapshotDiskDef) snapdiskdef = NULL;
qemuDomainAttachDiskGeneric(virQEMUDriver *driver,
virDomainObj *vm,
virDomainDiskDef *disk,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(qemuBlockStorageSourceChainData) data = NULL;
qemuDomainObjPrivate *priv = vm->privateData;
if (qemuDomainPrepareDiskSource(disk, priv, cfg) < 0)
goto cleanup;
- if (qemuHotplugAttachManagedPR(driver, vm, disk->src, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuHotplugAttachManagedPR(driver, vm, disk->src, VIR_ASYNC_JOB_NONE) < 0)
goto cleanup;
- ret = qemuDomainAttachDiskGeneric(driver, vm, disk, QEMU_ASYNC_JOB_NONE);
+ ret = qemuDomainAttachDiskGeneric(driver, vm, disk, VIR_ASYNC_JOB_NONE);
virDomainAuditDisk(vm, NULL, disk->src, "attach", ret == 0);
ignore_value(qemuDomainStorageSourceChainAccessRevoke(driver, vm, disk->src));
if (virStorageSourceChainHasManagedPR(disk->src))
- ignore_value(qemuHotplugRemoveManagedPR(driver, vm, QEMU_ASYNC_JOB_NONE));
+ ignore_value(qemuHotplugRemoveManagedPR(driver, vm, VIR_ASYNC_JOB_NONE));
}
qemuDomainSecretDiskDestroy(disk);
void
qemuDomainDelTLSObjects(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
const char *secAlias,
const char *tlsAlias)
{
int
qemuDomainAddTLSObjects(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virJSONValue **secProps,
virJSONValue **tlsProps)
{
dev->data.tcp.tlscreds = true;
- if (qemuDomainAddTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuDomainAddTLSObjects(driver, vm, VIR_ASYNC_JOB_NONE,
&secProps, &tlsProps) < 0)
return -1;
ignore_value(qemuMonitorDetachCharDev(priv->mon, charAlias));
qemuDomainObjExitMonitor(vm);
virErrorRestore(&orig_err);
- qemuDomainDelTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE,
+ qemuDomainDelTLSObjects(driver, vm, VIR_ASYNC_JOB_NONE,
secAlias, tlsAlias);
goto audit;
}
qemuDomainObjExitMonitor(vm);
virErrorRestore(&orig_err);
- qemuDomainDelTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE,
+ qemuDomainDelTLSObjects(driver, vm, VIR_ASYNC_JOB_NONE,
secAlias, tlsAlias);
goto audit;
}
qemuDomainObjExitMonitor(vm);
virErrorRestore(&orig_err);
- qemuDomainDelTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE,
+ qemuDomainDelTLSObjects(driver, vm, VIR_ASYNC_JOB_NONE,
secAlias, tlsAlias);
goto audit;
}
virObjectEventStateQueue(driver->domainEventState, event);
/* fix the balloon size */
- ignore_value(qemuProcessRefreshBalloonState(driver, vm, QEMU_ASYNC_JOB_NONE));
+ ignore_value(qemuProcessRefreshBalloonState(driver, vm, VIR_ASYNC_JOB_NONE));
/* mem is consumed by vm->def */
mem = NULL;
/* this step is best effort, removing the device would be so much trouble */
ignore_value(qemuDomainUpdateMemoryDeviceInfo(driver, vm,
- QEMU_ASYNC_JOB_NONE));
+ VIR_ASYNC_JOB_NONE));
ret = 0;
VIR_DOMAIN_GRAPHICS_TYPE_VNC,
&dev->data.vnc.auth,
cfg->vncPassword,
- QEMU_ASYNC_JOB_NONE) < 0)
+ VIR_ASYNC_JOB_NONE) < 0)
return -1;
/* Steal the new dev's char * reference */
VIR_DOMAIN_GRAPHICS_TYPE_SPICE,
&dev->data.spice.auth,
cfg->spicePassword,
- QEMU_ASYNC_JOB_NONE) < 0)
+ VIR_ASYNC_JOB_NONE) < 0)
return -1;
/* Steal the new dev's char * reference */
qemuDomainStorageSourceChainAccessRevoke(driver, vm, disk->src);
if (virStorageSourceChainHasManagedPR(disk->src) &&
- qemuHotplugRemoveManagedPR(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ qemuHotplugRemoveManagedPR(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
goto cleanup;
if (disk->transient) {
virDomainMemoryDefFree(mem);
/* fix the balloon size */
- ignore_value(qemuProcessRefreshBalloonState(driver, vm, QEMU_ASYNC_JOB_NONE));
+ ignore_value(qemuProcessRefreshBalloonState(driver, vm, VIR_ASYNC_JOB_NONE));
/* decrease the mlock limit after memory unplug if necessary */
ignore_value(qemuDomainAdjustMaxMemLock(vm, false));
virErrorPtr save_error = NULL;
size_t i;
- if (qemuDomainRefreshVcpuInfo(driver, vm, QEMU_ASYNC_JOB_NONE, false) < 0)
+ if (qemuDomainRefreshVcpuInfo(driver, vm, VIR_ASYNC_JOB_NONE, false) < 0)
return -1;
/* validation requires us to set the expected state prior to calling it */
/* start outputting of the new XML element to allow keeping unpluggability */
vm->def->individualvcpus = true;
- if (qemuDomainRefreshVcpuInfo(driver, vm, QEMU_ASYNC_JOB_NONE, false) < 0)
+ if (qemuDomainRefreshVcpuInfo(driver, vm, VIR_ASYNC_JOB_NONE, false) < 0)
return -1;
/* validation requires us to set the expected state prior to calling it */
void qemuDomainDelTLSObjects(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
const char *secAlias,
const char *tlsAlias);
int qemuDomainAddTLSObjects(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virJSONValue **secProps,
virJSONValue **tlsProps);
int qemuDomainAttachDiskGeneric(virQEMUDriver *driver,
virDomainObj *vm,
virDomainDiskDef *disk,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuDomainAttachNetDevice(virQEMUDriver *driver,
virDomainObj *vm,
int qemuHotplugAttachDBusVMState(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuHotplugRemoveDBusVMState(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuDomainChangeMemoryRequestedSize(virQEMUDriver *driver,
virDomainObj *vm,
static int
qemuMigrationJobStart(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob job,
+ virDomainAsyncJob job,
unsigned long apiFlags)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT;
static bool
qemuMigrationJobIsActive(virDomainObj *vm,
- qemuDomainAsyncJob job)
+ virDomainAsyncJob job)
ATTRIBUTE_NONNULL(1);
static void
/* we got here through some sort of failure; start the domain again */
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
+ VIR_ASYNC_JOB_MIGRATION_OUT) < 0) {
/* Hm, we already know we are in error here. We don't want to
* overwrite the previous error, though, so we just throw something
* to the logs and hope for the best */
}
if (qemuDomainObjEnterMonitorAsync(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ VIR_ASYNC_JOB_MIGRATION_IN) < 0)
goto cleanup;
if (!server_started) {
return 0;
if (qemuDomainObjEnterMonitorAsync(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ VIR_ASYNC_JOB_MIGRATION_IN) < 0)
return -1;
if (qemuMonitorNBDServerStop(priv->mon) < 0)
*/
static int
qemuMigrationSrcNBDStorageCopyReady(virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
size_t i;
size_t notReady = 0;
*/
static int
qemuMigrationSrcNBDCopyCancelled(virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
bool abortMigration)
{
size_t i;
virDomainDiskDef *disk,
qemuBlockJobData *job,
bool abortMigration,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
int rv;
qemuMigrationSrcNBDCopyCancel(virQEMUDriver *driver,
virDomainObj *vm,
bool abortMigration,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virConnectPtr dconn)
{
virErrorPtr err = NULL;
static int
qemuMigrationSrcCancelRemoveTempBitmaps(virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virQEMUDriver *driver = priv->driver;
return -1;
if (qemuDomainObjEnterMonitorAsync(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
return -1;
mon_ret = qemuBlockStorageSourceAttachApply(qemuDomainGetMonitor(vm), data);
}
if (qemuDomainObjEnterMonitorAsync(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
return -1;
mon_ret = qemuMonitorDriveMirror(qemuDomainGetMonitor(vm),
}
}
- while ((rv = qemuMigrationSrcNBDStorageCopyReady(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) != 1) {
+ while ((rv = qemuMigrationSrcNBDStorageCopyReady(vm, VIR_ASYNC_JOB_MIGRATION_OUT)) != 1) {
if (rv < 0)
return -1;
if (priv->job.abortJob) {
priv->job.current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
_("canceled by client"));
return -1;
}
return -1;
}
- qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ qemuMigrationSrcFetchMirrorStats(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
priv->job.current);
return 0;
}
if (state == VIR_DOMAIN_RUNNING) {
if (qemuProcessStopCPUs(driver, vm,
VIR_DOMAIN_PAUSED_POSTCOPY_FAILED,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ VIR_ASYNC_JOB_MIGRATION_IN) < 0)
VIR_WARN("Unable to pause guest CPUs for %s", vm->def->name);
} else {
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED,
int
qemuMigrationAnyFetchStats(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virDomainJobData *jobData,
char **error)
{
qemuDomainObjPrivate *priv = vm->privateData;
switch (priv->job.asyncJob) {
- case QEMU_ASYNC_JOB_MIGRATION_OUT:
+ case VIR_ASYNC_JOB_MIGRATION_OUT:
return _("migration out job");
- case QEMU_ASYNC_JOB_SAVE:
+ case VIR_ASYNC_JOB_SAVE:
return _("domain save job");
- case QEMU_ASYNC_JOB_DUMP:
+ case VIR_ASYNC_JOB_DUMP:
return _("domain core dump job");
- case QEMU_ASYNC_JOB_NONE:
+ case VIR_ASYNC_JOB_NONE:
return _("undefined");
- case QEMU_ASYNC_JOB_MIGRATION_IN:
+ case VIR_ASYNC_JOB_MIGRATION_IN:
return _("migration in job");
- case QEMU_ASYNC_JOB_SNAPSHOT:
+ case VIR_ASYNC_JOB_SNAPSHOT:
return _("snapshot job");
- case QEMU_ASYNC_JOB_START:
+ case VIR_ASYNC_JOB_START:
return _("start job");
- case QEMU_ASYNC_JOB_BACKUP:
+ case VIR_ASYNC_JOB_BACKUP:
return _("backup job");
- case QEMU_ASYNC_JOB_LAST:
+ case VIR_ASYNC_JOB_LAST:
default:
return _("job");
}
static int
qemuMigrationJobCheckStatus(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virDomainJobData *jobData = priv->job.current;
static int
qemuMigrationAnyCompleted(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virConnectPtr dconn,
unsigned int flags)
{
static int
qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virConnectPtr dconn,
unsigned int flags)
{
priv->job.completed = virDomainJobDataCopy(jobData);
priv->job.completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
- if (asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT &&
+ if (asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT &&
jobData->status == VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED)
jobData->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
static int
qemuMigrationDstWaitForCompletion(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
bool postcopy)
{
qemuDomainObjPrivate *priv = vm->privateData;
}
if (qemuDomainObjEnterMonitorAsync(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
+ VIR_ASYNC_JOB_MIGRATION_OUT) == 0) {
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
ret = qemuMonitorGraphicsRelocate(priv->mon, type, listenAddress,
qemuMigrationDstRun(virQEMUDriver *driver,
virDomainObj *vm,
const char *uri,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
int rv;
if (rv < 0)
return -1;
- if (asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
+ if (asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
/* qemuMigrationDstWaitForCompletion is called from the Finish phase */
return 0;
}
VIR_DEBUG("vm=%s, conn=%p, asyncJob=%s, phase=%s",
vm->def->name, conn,
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
- qemuDomainAsyncJobPhaseToString(priv->job.asyncJob,
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobPhaseToString(priv->job.asyncJob,
priv->job.phase));
- if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
+ if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_OUT))
return;
VIR_DEBUG("The connection which started outgoing migration of domain %s"
VIR_WARN("Migration of domain %s finished but we don't know if the"
" domain was successfully started on destination or not",
vm->def->name);
- qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobPriv->migParams, priv->job.apiFlags);
/* clear the job and let higher levels decide what to do */
qemuMigrationJobFinish(vm);
cookieout, cookieoutlen, nmigrate_disks,
migrate_disks, flags);
- /* Only set the phase if we are inside QEMU_ASYNC_JOB_MIGRATION_OUT.
+ /* Only set the phase if we are inside VIR_ASYNC_JOB_MIGRATION_OUT.
* Otherwise we will start the async job later in the perform phase losing
* change protection.
*/
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_BEGIN3);
if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags))
virQEMUDriver *driver = conn->privateData;
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
char *xml = NULL;
- qemuDomainAsyncJob asyncJob;
+ virDomainAsyncJob asyncJob;
if (cfg->migrateTLSForce &&
!(flags & VIR_MIGRATE_TUNNELLED) &&
}
if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
- if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ if (qemuMigrationJobStart(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
flags) < 0)
goto cleanup;
- asyncJob = QEMU_ASYNC_JOB_MIGRATION_OUT;
+ asyncJob = VIR_ASYNC_JOB_MIGRATION_OUT;
} else {
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
- asyncJob = QEMU_ASYNC_JOB_NONE;
+ asyncJob = VIR_ASYNC_JOB_NONE;
}
qemuMigrationSrcStoreDomainState(vm);
VIR_DEBUG("driver=%p, vm=%s, job=%s, asyncJob=%s",
driver,
vm->def->name,
- qemuDomainJobTypeToString(priv->job.active),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
+ virDomainJobTypeToString(priv->job.active),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob));
virPortAllocatorRelease(priv->migrationPort);
priv->migrationPort = 0;
- if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN))
+ if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_IN))
return;
qemuDomainObjDiscardAsyncJob(vm);
}
if (qemuMigrationCookieBlockDirtyBitmapsMatchDisks(vm->def, mig->blockDirtyBitmaps) < 0)
return -1;
- if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_MIGRATION_IN)))
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_MIGRATION_IN)))
return -1;
for (nextdisk = mig->blockDirtyBitmaps; nextdisk; nextdisk = nextdisk->next) {
!!(flags & VIR_MIGRATE_NON_SHARED_INC)) < 0)
goto cleanup;
- if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuMigrationJobStart(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
flags) < 0)
goto cleanup;
qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_PREPARE);
startFlags = VIR_QEMU_PROCESS_START_AUTODESTROY;
- if (qemuProcessInit(driver, vm, mig->cpu, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuProcessInit(driver, vm, mig->cpu, VIR_ASYNC_JOB_MIGRATION_IN,
true, startFlags) < 0)
goto stopjob;
stopProcess = true;
if (qemuProcessPrepareHost(driver, vm, startFlags) < 0)
goto stopjob;
- rv = qemuProcessLaunch(dconn, driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ rv = qemuProcessLaunch(dconn, driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
incoming, NULL,
VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START,
startFlags);
if (qemuMigrationDstPrepareAnyBlockDirtyBitmaps(vm, mig, migParams, flags) < 0)
goto stopjob;
- if (qemuMigrationParamsCheck(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuMigrationParamsCheck(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
migParams, mig->caps->automatic) < 0)
goto stopjob;
* set the migration TLS parameters */
if (flags & VIR_MIGRATE_TLS) {
if (qemuMigrationParamsEnableTLS(driver, vm, true,
- QEMU_ASYNC_JOB_MIGRATION_IN,
+ VIR_ASYNC_JOB_MIGRATION_IN,
&tlsAlias, NULL,
migParams) < 0)
goto stopjob;
goto stopjob;
}
- if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuMigrationParamsApply(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
migParams) < 0)
goto stopjob;
if (incoming->deferredURI &&
qemuMigrationDstRun(driver, vm, incoming->deferredURI,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ VIR_ASYNC_JOB_MIGRATION_IN) < 0)
goto stopjob;
- if (qemuProcessFinishStartup(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuProcessFinishStartup(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
false, VIR_DOMAIN_PAUSED_MIGRATION) < 0)
goto stopjob;
return ret;
stopjob:
- qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
jobPriv->migParams, priv->job.apiFlags);
if (stopProcess) {
stopFlags |= VIR_QEMU_PROCESS_STOP_NO_RELABEL;
virDomainAuditStart(vm, "migrated", false);
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
- QEMU_ASYNC_JOB_MIGRATION_IN, stopFlags);
+ VIR_ASYNC_JOB_MIGRATION_IN, stopFlags);
}
qemuMigrationJobFinish(vm);
*/
if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_POSTCOPY &&
- qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ qemuMigrationAnyFetchStats(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobData, NULL) < 0)
VIR_WARN("Could not refresh migration statistics");
qemuMigrationSrcWaitForSpice(vm);
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
VIR_QEMU_PROCESS_STOP_MIGRATED);
virDomainAuditStop(vm, "migrated");
/* cancel any outstanding NBD jobs */
qemuMigrationSrcNBDCopyCancel(driver, vm, false,
- QEMU_ASYNC_JOB_MIGRATION_OUT, NULL);
+ VIR_ASYNC_JOB_MIGRATION_OUT, NULL);
virErrorRestore(&orig_err);
else
qemuMigrationSrcRestoreDomainState(driver, vm);
- qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobPriv->migParams, priv->job.apiFlags);
qemuDomainSaveStatus(vm);
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
int ret = -1;
- if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
+ if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_OUT))
goto cleanup;
if (cancelled)
qemuMigrationSrcContinue(virQEMUDriver *driver,
virDomainObj *vm,
qemuMonitorMigrationStatus status,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
int ret;
if (priv->dbusVMStateIds) {
int rv;
- if (qemuHotplugAttachDBusVMState(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuHotplugAttachDBusVMState(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
- if (qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuDomainObjEnterMonitorAsync(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
rv = qemuMonitorSetDBusVMStateIdList(priv->mon, priv->dbusVMStateIds);
return rv;
} else {
- if (qemuHotplugRemoveDBusVMState(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuHotplugRemoveDBusVMState(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
}
GSList *nextdisk;
int rc;
- if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)))
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_MIGRATION_OUT)))
return -1;
for (nextdisk = mig->blockDirtyBitmaps; nextdisk; nextdisk = nextdisk->next) {
}
}
- if (qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ if (qemuDomainObjEnterMonitorAsync(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
return -1;
rc = qemuMonitorTransaction(priv->mon, &actions);
qemuMigrationSrcRunPrepareBlockDirtyBitmaps(vm, mig, migParams, flags) < 0)
goto error;
- if (qemuMigrationParamsCheck(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ if (qemuMigrationParamsCheck(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
migParams, mig->caps->automatic) < 0)
goto error;
hostname = spec->dest.host.name;
if (qemuMigrationParamsEnableTLS(driver, vm, false,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
&tlsAlias, hostname,
migParams) < 0)
goto error;
priv->migMaxBandwidth * 1024 * 1024) < 0)
goto error;
- if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ if (qemuMigrationParamsApply(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
migParams) < 0)
goto error;
if (!(flags & VIR_MIGRATE_LIVE) &&
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
}
if (qemuDomainObjEnterMonitorAsync(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
if (priv->job.abortJob) {
* priv->job.abortJob will not change */
priv->job.current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
_("canceled by client"));
goto exit_monitor;
}
waitFlags |= QEMU_MIGRATION_COMPLETED_POSTCOPY;
rc = qemuMigrationSrcWaitForCompletion(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
dconn, waitFlags);
if (rc == -2)
goto error;
if (mig->nbd &&
qemuMigrationSrcNBDCopyCancel(driver, vm, false,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
dconn) < 0)
goto error;
if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_PAUSED) {
if (qemuMigrationSrcContinue(driver, vm,
QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
waitFlags ^= QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER;
rc = qemuMigrationSrcWaitForCompletion(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
dconn, waitFlags);
if (rc == -2)
goto error;
if (cancel &&
priv->job.current->status != VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED &&
qemuDomainObjEnterMonitorAsync(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
+ VIR_ASYNC_JOB_MIGRATION_OUT) == 0) {
qemuMonitorMigrateCancel(priv->mon);
qemuDomainObjExitMonitor(vm);
}
/* cancel any outstanding NBD jobs */
if (mig && mig->nbd)
qemuMigrationSrcNBDCopyCancel(driver, vm, true,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
dconn);
- qemuMigrationSrcCancelRemoveTempBitmaps(vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
+ qemuMigrationSrcCancelRemoveTempBitmaps(vm, VIR_ASYNC_JOB_MIGRATION_OUT);
if (priv->job.current->status != VIR_DOMAIN_JOB_STATUS_CANCELED)
priv->job.current->status = VIR_DOMAIN_JOB_STATUS_FAILED;
qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
- if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ if (qemuMigrationJobStart(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
flags) < 0)
goto cleanup;
*/
if (!v3proto) {
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
VIR_QEMU_PROCESS_STOP_MIGRATED);
virDomainAuditStop(vm, "migrated");
event = virDomainEventLifecycleNewFromObj(vm,
* here
*/
if (!v3proto && ret < 0)
- qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobPriv->migParams, priv->job.apiFlags);
qemuMigrationSrcRestoreDomainState(driver, vm);
/* If we didn't start the job in the begin phase, start it now. */
if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
- if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ if (qemuMigrationJobStart(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
flags) < 0)
return ret;
- } else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) {
+ } else if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_OUT)) {
return ret;
}
endjob:
if (ret < 0) {
- qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobPriv->migParams, priv->job.apiFlags);
qemuMigrationJobFinish(vm);
} else {
port = priv->migrationPort;
priv->migrationPort = 0;
- if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) {
+ if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_IN)) {
qemuMigrationDstErrorReport(driver, vm->def->name);
goto cleanup;
}
/* Check for a possible error on the monitor in case Finish was called
* earlier than monitor EOF handler got a chance to process the error
*/
- qemuDomainCheckMonitor(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN);
+ qemuDomainCheckMonitor(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN);
goto endjob;
}
goto endjob;
if (qemuRefreshVirtioChannelState(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ VIR_ASYNC_JOB_MIGRATION_IN) < 0)
goto endjob;
if (qemuConnectAgent(driver, vm) < 0)
* before starting guest CPUs.
*/
if (qemuMigrationDstWaitForCompletion(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_IN,
+ VIR_ASYNC_JOB_MIGRATION_IN,
!!(flags & VIR_MIGRATE_POSTCOPY)) < 0) {
/* There's not much we can do for v2 protocol since the
* original domain on the source host is already gone.
/* Now that the state data was transferred we can refresh the actual state
* of the devices */
- if (qemuProcessRefreshState(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
+ if (qemuProcessRefreshState(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN) < 0) {
/* Similarly to the case above v2 protocol will not be able to recover
* from this. Let's ignore this and perhaps stuff will not break. */
if (v3proto)
if (qemuProcessStartCPUs(driver, vm,
inPostCopy ? VIR_DOMAIN_RUNNING_POSTCOPY
: VIR_DOMAIN_RUNNING_MIGRATED,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
+ VIR_ASYNC_JOB_MIGRATION_IN) < 0) {
if (virGetLastErrorCode() == VIR_ERR_OK)
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("resume operation failed"));
if (inPostCopy) {
if (qemuMigrationDstWaitForCompletion(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_IN,
+ VIR_ASYNC_JOB_MIGRATION_IN,
false) < 0) {
goto endjob;
}
virDomainObjIsActive(vm)) {
if (doKill) {
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
- QEMU_ASYNC_JOB_MIGRATION_IN,
+ VIR_ASYNC_JOB_MIGRATION_IN,
VIR_QEMU_PROCESS_STOP_MIGRATED);
virDomainAuditStop(vm, "failed");
event = virDomainEventLifecycleNewFromObj(vm,
g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
}
- qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
jobPriv->migParams, priv->job.apiFlags);
qemuMigrationJobFinish(vm);
qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm,
int fd,
virCommand *compressor,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
bool bwParam = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_PARAM_BANDWIDTH);
if (storage &&
qemuMigrationSrcNBDCopyCancel(driver, vm, true,
- QEMU_ASYNC_JOB_NONE, NULL) < 0)
+ VIR_ASYNC_JOB_NONE, NULL) < 0)
return -1;
- if (qemuMigrationSrcCancelRemoveTempBitmaps(vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuMigrationSrcCancelRemoveTempBitmaps(vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
return 0;
static int
qemuMigrationJobStart(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob job,
+ virDomainAsyncJob job,
unsigned long apiFlags)
{
qemuDomainObjPrivate *priv = vm->privateData;
virDomainJobOperation op;
unsigned long long mask;
- if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
+ if (job == VIR_ASYNC_JOB_MIGRATION_IN) {
op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_IN;
- mask = QEMU_JOB_NONE;
+ mask = VIR_JOB_NONE;
} else {
op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_OUT;
mask = QEMU_JOB_DEFAULT_MASK |
- JOB_MASK(QEMU_JOB_SUSPEND) |
- JOB_MASK(QEMU_JOB_MIGRATION_OP);
+ JOB_MASK(VIR_JOB_SUSPEND) |
+ JOB_MASK(VIR_JOB_MIGRATION_OP);
}
if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0)
static bool
qemuMigrationJobIsActive(virDomainObj *vm,
- qemuDomainAsyncJob job)
+ virDomainAsyncJob job)
{
qemuDomainObjPrivate *priv = vm->privateData;
if (priv->job.asyncJob != job) {
const char *msg;
- if (job == QEMU_ASYNC_JOB_MIGRATION_IN)
+ if (job == VIR_ASYNC_JOB_MIGRATION_IN)
msg = _("domain '%s' is not processing incoming migration");
else
msg = _("domain '%s' is not being migrated");
int
qemuMigrationSrcFetchMirrorStats(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virDomainJobData *jobData)
{
size_t i;
virDomainObj *vm,
int fd,
virCommand *compressor,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT;
int
int
qemuMigrationAnyFetchStats(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virDomainJobData *jobData,
char **error);
qemuMigrationDstRun(virQEMUDriver *driver,
virDomainObj *vm,
const char *uri,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
void
qemuMigrationAnyPostcopyFailed(virQEMUDriver *driver,
int
qemuMigrationSrcFetchMirrorStats(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virDomainJobData *jobData);
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
return -1;
- if (asyncJob == QEMU_ASYNC_JOB_NONE) {
+ if (asyncJob == VIR_ASYNC_JOB_NONE) {
if (!virBitmapIsAllClear(migParams->caps)) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Migration capabilities can only be set by "
qemuMigrationParty party;
size_t i;
- if (asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
+ if (asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
party = QEMU_MIGRATION_SOURCE;
else
party = QEMU_MIGRATION_DESTINATION;
VIR_DEBUG("vm=%p", vm);
virObjectLock(vm);
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
if (qemuProcessStartCPUs(driver, vm,
reason,
- QEMU_ASYNC_JOB_NONE) < 0) {
+ VIR_ASYNC_JOB_NONE) < 0) {
if (virGetLastErrorCode() == VIR_ERR_OK)
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("resume operation failed"));
* reveal it in domain state nor sent events */
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING &&
!priv->pausedShutdown) {
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) {
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) {
if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY)
reason = VIR_DOMAIN_PAUSED_POSTCOPY;
else
priv = vm->privateData;
jobPriv = priv->job.privateData;
- if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
+ if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
VIR_DEBUG("got SPICE_MIGRATE_COMPLETED event without a migration job");
goto cleanup;
}
qemuMonitorMigrationStatusTypeToString(status));
priv = vm->privateData;
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) {
VIR_DEBUG("got MIGRATION event without a migration job");
goto cleanup;
}
virDomainObjBroadcast(vm);
if (status == QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY &&
- priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT &&
+ priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT &&
virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_MIGRATION) {
VIR_DEBUG("Correcting paused state reason for domain %s to %s",
vm, vm->def->name, pass);
priv = vm->privateData;
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) {
VIR_DEBUG("got MIGRATION_PASS event without a migration job");
goto cleanup;
}
priv = vm->privateData;
jobPriv = priv->job.privateData;
privJobCurrent = priv->job.current->privateData;
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) {
VIR_DEBUG("got DUMP_COMPLETED event without a dump_completed job");
goto cleanup;
}
static int
qemuProcessInitMonitor(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
int ret;
int
qemuRefreshVirtioChannelState(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
g_autoptr(GHashTable) info = NULL;
static int
qemuProcessSetLinkStates(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virDomainDef *def = vm->def;
int
qemuProcessStartCPUs(virQEMUDriver *driver, virDomainObj *vm,
virDomainRunningReason reason,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
int ret = -1;
qemuDomainObjPrivate *priv = vm->privateData;
int qemuProcessStopCPUs(virQEMUDriver *driver,
virDomainObj *vm,
virDomainPausedReason reason,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
int ret = -1;
qemuDomainObjPrivate *priv = vm->privateData;
vm->def->name);
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_MIGRATED,
- QEMU_ASYNC_JOB_NONE) < 0) {
+ VIR_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s", vm->def->name);
}
break;
break;
}
- qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE,
+ qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_NONE,
jobPriv->migParams, job->apiFlags);
return 0;
}
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
- QEMU_ASYNC_JOB_NONE) < 0) {
+ VIR_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s", vm->def->name);
}
}
}
- qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE,
+ qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_NONE,
jobPriv->migParams, job->apiFlags);
return 0;
}
state = virDomainObjGetState(vm, &reason);
switch (job->asyncJob) {
- case QEMU_ASYNC_JOB_MIGRATION_OUT:
+ case VIR_ASYNC_JOB_MIGRATION_OUT:
if (qemuProcessRecoverMigrationOut(driver, vm, job,
state, reason, stopFlags) < 0)
return -1;
break;
- case QEMU_ASYNC_JOB_MIGRATION_IN:
+ case VIR_ASYNC_JOB_MIGRATION_IN:
if (qemuProcessRecoverMigrationIn(driver, vm, job,
state, reason) < 0)
return -1;
break;
- case QEMU_ASYNC_JOB_SAVE:
- case QEMU_ASYNC_JOB_DUMP:
- case QEMU_ASYNC_JOB_SNAPSHOT:
+ case VIR_ASYNC_JOB_SAVE:
+ case VIR_ASYNC_JOB_DUMP:
+ case VIR_ASYNC_JOB_SNAPSHOT:
qemuDomainObjEnterMonitor(driver, vm);
ignore_value(qemuMonitorMigrateCancel(priv->mon));
qemuDomainObjExitMonitor(vm);
* recovering an async job, this function is run at startup
* and must resume things using sync monitor connections. */
if (state == VIR_DOMAIN_PAUSED &&
- ((job->asyncJob == QEMU_ASYNC_JOB_DUMP &&
+ ((job->asyncJob == VIR_ASYNC_JOB_DUMP &&
reason == VIR_DOMAIN_PAUSED_DUMP) ||
- (job->asyncJob == QEMU_ASYNC_JOB_SAVE &&
+ (job->asyncJob == VIR_ASYNC_JOB_SAVE &&
reason == VIR_DOMAIN_PAUSED_SAVE) ||
- (job->asyncJob == QEMU_ASYNC_JOB_SNAPSHOT &&
+ (job->asyncJob == VIR_ASYNC_JOB_SNAPSHOT &&
(reason == VIR_DOMAIN_PAUSED_SNAPSHOT ||
reason == VIR_DOMAIN_PAUSED_MIGRATION)) ||
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_SAVE_CANCELED,
- QEMU_ASYNC_JOB_NONE) < 0) {
+ VIR_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain '%s' after migration to file",
vm->def->name);
}
}
break;
- case QEMU_ASYNC_JOB_START:
+ case VIR_ASYNC_JOB_START:
/* Already handled in VIR_DOMAIN_PAUSED_STARTING_UP check. */
break;
- case QEMU_ASYNC_JOB_BACKUP:
+ case VIR_ASYNC_JOB_BACKUP:
ignore_value(virTimeMillisNow(&now));
/* Restore the config of the async job which is not persisted */
priv->job.jobsQueued++;
- priv->job.asyncJob = QEMU_ASYNC_JOB_BACKUP;
+ priv->job.asyncJob = VIR_ASYNC_JOB_BACKUP;
priv->job.asyncOwnerAPI = g_strdup(virThreadJobGet());
priv->job.asyncStarted = now;
qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |
- JOB_MASK(QEMU_JOB_SUSPEND) |
- JOB_MASK(QEMU_JOB_MODIFY)));
+ JOB_MASK(VIR_JOB_SUSPEND) |
+ JOB_MASK(VIR_JOB_MODIFY)));
/* We reset the job parameters for backup so that the job will look
* active. This is possible because we are able to recover the state
priv->job.current->started = now;
break;
- case QEMU_ASYNC_JOB_NONE:
- case QEMU_ASYNC_JOB_LAST:
+ case VIR_ASYNC_JOB_NONE:
+ case VIR_ASYNC_JOB_LAST:
break;
}
* for the job to be properly tracked in domain state XML.
*/
switch (job->active) {
- case QEMU_JOB_QUERY:
+ case VIR_JOB_QUERY:
/* harmless */
break;
- case QEMU_JOB_DESTROY:
+ case VIR_JOB_DESTROY:
VIR_DEBUG("Domain %s should have already been destroyed",
vm->def->name);
return -1;
- case QEMU_JOB_SUSPEND:
+ case VIR_JOB_SUSPEND:
/* mostly harmless */
break;
- case QEMU_JOB_MODIFY:
+ case VIR_JOB_MODIFY:
/* XXX depending on the command we may be in an inconsistent state and
* we should probably fall back to "monitor error" state and refuse to
*/
break;
- case QEMU_JOB_MIGRATION_OP:
- case QEMU_JOB_ABORT:
- case QEMU_JOB_ASYNC:
- case QEMU_JOB_ASYNC_NESTED:
+ case VIR_JOB_MIGRATION_OP:
+ case VIR_JOB_ABORT:
+ case VIR_JOB_ASYNC:
+ case VIR_JOB_ASYNC_NESTED:
/* async job was already handled above */
- case QEMU_JOB_NONE:
- case QEMU_JOB_LAST:
+ case VIR_JOB_NONE:
+ case VIR_JOB_LAST:
break;
}
g_auto(GStrv) old = g_steal_pointer(&priv->qemuDevices);
GStrv tmp;
- if (qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuDomainUpdateDeviceList(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
if (!old)
static int
qemuProcessFetchGuestCPU(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virCPUData **enabled,
virCPUData **disabled)
{
static int
qemuProcessUpdateAndVerifyCPU(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(virCPUData) cpu = NULL;
g_autoptr(virCPUData) disabled = NULL;
static int
qemuProcessFetchCPUDefinitions(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virDomainCapsCPUModels **cpuModels)
{
qemuDomainObjPrivate *priv = vm->privateData;
static int
qemuProcessUpdateCPU(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(virCPUData) cpu = NULL;
g_autoptr(virCPUData) disabled = NULL;
/*
- * This function starts a new QEMU_ASYNC_JOB_START async job. The user is
+ * This function starts a new VIR_ASYNC_JOB_START async job. The user is
* responsible for calling qemuProcessEndJob to stop this job and for passing
- * QEMU_ASYNC_JOB_START as @asyncJob argument to any function requiring this
+ * VIR_ASYNC_JOB_START as @asyncJob argument to any function requiring this
* parameter between qemuProcessBeginJob and qemuProcessEndJob.
*/
int
virDomainJobOperation operation,
unsigned long apiFlags)
{
- if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_START,
+ if (qemuDomainObjBeginAsyncJob(driver, vm, VIR_ASYNC_JOB_START,
operation, apiFlags) < 0)
return -1;
- qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
+ qemuDomainObjSetAsyncJobMask(vm, VIR_JOB_NONE);
return 0;
}
static int
qemuProcessSetupBalloon(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
unsigned long long balloon = vm->def->mem.cur_balloon;
qemuDomainObjPrivate *priv = vm->privateData;
qemuProcessInit(virQEMUDriver *driver,
virDomainObj *vm,
virCPUDef *updatedCPU,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
bool migration,
unsigned int flags)
{
static int
qemuProcessSetupHotpluggableVcpus(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
unsigned int maxvcpus = virDomainDefGetVcpusMax(vm->def);
qemuDomainObjPrivate *priv = vm->privateData;
static int
qemuProcessSetupDiskThrottlingBlockdev(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
size_t i;
static int
qemuProcessSetupDisksTransientSnapshot(virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(qemuSnapshotDiskContext) snapctxt = NULL;
g_autoptr(GHashTable) blockNamedNodeData = NULL;
static int
qemuProcessSetupDisksTransientHotplug(virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
bool hasHotpluggedDisk = false;
static int
qemuProcessSetupDisksTransient(virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
static int
qemuProcessSetupLifecycleActions(virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
int rc;
qemuProcessLaunch(virConnectPtr conn,
virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
qemuProcessIncomingDef *incoming,
virDomainMomentObj *snapshot,
virNetDevVPortProfileOp vmop,
int
qemuProcessRefreshState(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
int
qemuProcessFinishStartup(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
bool startCPUs,
virDomainPausedReason pausedReason)
{
virQEMUDriver *driver,
virDomainObj *vm,
virCPUDef *updatedCPU,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
const char *migrateFrom,
int migrateFd,
const char *migratePath,
"migrateFrom=%s migrateFd=%d migratePath=%s "
"snapshot=%p vmop=%d flags=0x%x",
conn, driver, vm, vm->def->name, vm->def->id,
- qemuDomainAsyncJobTypeToString(asyncJob),
+ virDomainAsyncJobTypeToString(asyncJob),
NULLSTR(migrateFrom), migrateFd, NULLSTR(migratePath),
snapshot, vmop, flags);
if (!migrateURI)
flags |= VIR_QEMU_PROCESS_START_NEW;
- if (qemuProcessInit(driver, vm, NULL, QEMU_ASYNC_JOB_NONE,
+ if (qemuProcessInit(driver, vm, NULL, VIR_ASYNC_JOB_NONE,
!!migrateURI, flags) < 0)
return -1;
int
qemuProcessBeginStopJob(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainJob job,
+ virDomainJob job,
bool forceKill)
{
qemuDomainObjPrivate *priv = vm->privateData;
void qemuProcessStop(virQEMUDriver *driver,
virDomainObj *vm,
virDomainShutoffReason reason,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
unsigned int flags)
{
int ret;
vm, vm->def->name, vm->def->id,
(long long)vm->pid,
virDomainShutoffReasonTypeToString(reason),
- qemuDomainAsyncJobTypeToString(asyncJob),
+ virDomainAsyncJobTypeToString(asyncJob),
flags);
/* This method is routinely used in clean up paths. Disable error
* reporting so we don't squash a legit error. */
virErrorPreserveLast(&orig_err);
- if (asyncJob != QEMU_ASYNC_JOB_NONE) {
+ if (asyncJob != VIR_ASYNC_JOB_NONE) {
if (qemuDomainObjBeginNestedJob(driver, vm, asyncJob) < 0)
goto cleanup;
- } else if (priv->job.asyncJob != QEMU_ASYNC_JOB_NONE &&
+ } else if (priv->job.asyncJob != VIR_ASYNC_JOB_NONE &&
priv->job.asyncOwner == virThreadSelfID() &&
- priv->job.active != QEMU_JOB_ASYNC_NESTED) {
+ priv->job.active != VIR_JOB_ASYNC_NESTED) {
VIR_WARN("qemuProcessStop called without a nested job (async=%s)",
- qemuDomainAsyncJobTypeToString(asyncJob));
+ virDomainAsyncJobTypeToString(asyncJob));
}
if (!virDomainObjIsActive(vm)) {
virDomainObjRemoveTransientDef(vm);
endjob:
- if (asyncJob != QEMU_ASYNC_JOB_NONE)
+ if (asyncJob != VIR_ASYNC_JOB_NONE)
qemuDomainObjEndJob(vm);
cleanup:
VIR_DEBUG("vm=%s, conn=%p", dom->def->name, conn);
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
if (priv->job.asyncJob) {
VIR_DEBUG("Killing domain");
- if (qemuProcessBeginStopJob(driver, dom, QEMU_JOB_DESTROY, true) < 0)
+ if (qemuProcessBeginStopJob(driver, dom, VIR_JOB_DESTROY, true) < 0)
return;
qemuProcessStop(driver, dom, VIR_DOMAIN_SHUTOFF_DESTROYED,
- QEMU_ASYNC_JOB_NONE, stopFlags);
+ VIR_ASYNC_JOB_NONE, stopFlags);
virDomainAuditStop(dom, "destroyed");
event = virDomainEventLifecycleNewFromObj(dom,
int
qemuProcessRefreshDisks(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
bool blockdev = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV);
static int
qemuProcessRefreshCPUMigratability(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virDomainDef *def = vm->def;
if (!vm->def->cpu)
return 0;
- if (qemuProcessRefreshCPUMigratability(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuProcessRefreshCPUMigratability(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
if (!(host = virQEMUDriverGetHostCPU(driver))) {
if (virCPUUpdate(vm->def->os.arch, vm->def->cpu, cpu) < 0)
return -1;
- if (qemuProcessUpdateCPU(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuProcessUpdateCPU(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
} else if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CPU_MODEL_EXPANSION)) {
/* We only try to fix CPUs when the libvirt/QEMU combo used to start
priv = obj->privateData;
qemuDomainObjRestoreJob(obj, &oldjob);
- if (oldjob.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
+ if (oldjob.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
- if (oldjob.asyncJob == QEMU_ASYNC_JOB_BACKUP && priv->backup)
+ if (oldjob.asyncJob == VIR_ASYNC_JOB_BACKUP && priv->backup)
priv->backup->apiFlags = oldjob.apiFlags;
- if (qemuDomainObjBeginJob(driver, obj, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, obj, VIR_JOB_MODIFY) < 0)
goto error;
jobStarted = true;
tryMonReconn = true;
/* XXX check PID liveliness & EXE path */
- if (qemuConnectMonitor(driver, obj, QEMU_ASYNC_JOB_NONE, retry, NULL) < 0)
+ if (qemuConnectMonitor(driver, obj, VIR_ASYNC_JOB_NONE, retry, NULL) < 0)
goto error;
priv->machineName = qemuDomainGetMachineName(obj);
ignore_value(qemuSecurityCheckAllLabel(driver->securityManager,
obj->def));
- if (qemuDomainRefreshVcpuInfo(driver, obj, QEMU_ASYNC_JOB_NONE, true) < 0)
+ if (qemuDomainRefreshVcpuInfo(driver, obj, VIR_ASYNC_JOB_NONE, true) < 0)
goto error;
qemuDomainVcpuPersistOrder(obj->def);
if (qemuProcessRefreshCPU(driver, obj) < 0)
goto error;
- if (qemuDomainUpdateMemoryDeviceInfo(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuDomainUpdateMemoryDeviceInfo(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
goto error;
- if (qemuProcessDetectIOThreadPIDs(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuProcessDetectIOThreadPIDs(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
goto error;
if (qemuSecurityReserveLabel(driver->securityManager, obj->def, obj->pid) < 0)
qemuProcessFiltersInstantiate(obj->def);
- if (qemuProcessRefreshDisks(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuProcessRefreshDisks(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
goto error;
/* At this point we've already checked that the startup of the VM was
}
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) &&
- qemuBlockNodeNamesDetect(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
+ qemuBlockNodeNamesDetect(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
goto error;
- if (qemuRefreshVirtioChannelState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuRefreshVirtioChannelState(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
goto error;
/* If querying of guest's RTC failed, report error, but do not kill the domain. */
qemuRefreshRTC(driver, obj);
- if (qemuProcessRefreshBalloonState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuProcessRefreshBalloonState(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
goto error;
if (qemuProcessRecoverJob(driver, obj, &oldjob, &stopFlags) < 0)
* thread didn't have a chance to start playing with the domain yet
* (it's all we can do anyway).
*/
- qemuProcessStop(driver, obj, state, QEMU_ASYNC_JOB_NONE, stopFlags);
+ qemuProcessStop(driver, obj, state, VIR_ASYNC_JOB_NONE, stopFlags);
}
goto cleanup;
}
* object.
*/
qemuProcessStop(src->driver, obj, VIR_DOMAIN_SHUTOFF_FAILED,
- QEMU_ASYNC_JOB_NONE, 0);
+ VIR_ASYNC_JOB_NONE, 0);
qemuDomainRemoveInactiveJobLocked(src->driver, obj);
virDomainObjEndAPI(&obj);
int qemuProcessStartCPUs(virQEMUDriver *driver,
virDomainObj *vm,
virDomainRunningReason reason,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuProcessStopCPUs(virQEMUDriver *driver,
virDomainObj *vm,
virDomainPausedReason reason,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuProcessBuildDestroyMemoryPaths(virQEMUDriver *driver,
virDomainObj *vm,
virQEMUDriver *driver,
virDomainObj *vm,
virCPUDef *updatedCPU,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
const char *migrateFrom,
int stdin_fd,
const char *stdin_path,
int qemuProcessInit(virQEMUDriver *driver,
virDomainObj *vm,
virCPUDef *updatedCPU,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
bool migration,
unsigned int flags);
int qemuProcessLaunch(virConnectPtr conn,
virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
qemuProcessIncomingDef *incoming,
virDomainMomentObj *snapshot,
virNetDevVPortProfileOp vmop,
int qemuProcessFinishStartup(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
bool startCPUs,
virDomainPausedReason pausedReason);
int qemuProcessRefreshState(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
typedef enum {
VIR_QEMU_PROCESS_STOP_MIGRATED = 1 << 0,
int qemuProcessBeginStopJob(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainJob job,
+ virDomainJob job,
bool forceKill);
void qemuProcessStop(virQEMUDriver *driver,
virDomainObj *vm,
virDomainShutoffReason reason,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
unsigned int flags);
typedef enum {
int qemuRefreshVirtioChannelState(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuProcessRefreshBalloonState(virQEMUDriver *driver,
virDomainObj *vm,
int qemuProcessRefreshDisks(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuProcessStartManagedPRDaemon(virDomainObj *vm) G_GNUC_NO_INLINE;
virQEMUSaveData *data,
virCommand *compressor,
unsigned int flags,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
bool needUnlink = false;
const char *path,
bool start_paused,
bool reset_nvram,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
int ret = -1;
const char *path,
bool start_paused,
bool reset_nvram,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
ATTRIBUTE_NONNULL(4) ATTRIBUTE_NONNULL(5) ATTRIBUTE_NONNULL(6);
int
virQEMUSaveData *data,
virCommand *compressor,
unsigned int flags,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int
virQEMUSaveDataWrite(virQEMUSaveData *data,
* domain. Thus we stop and start CPUs ourselves.
*/
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
- QEMU_ASYNC_JOB_SNAPSHOT) < 0)
+ VIR_ASYNC_JOB_SNAPSHOT) < 0)
goto cleanup;
resume = true;
}
if (qemuDomainObjEnterMonitorAsync(driver, vm,
- QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
+ VIR_ASYNC_JOB_SNAPSHOT) < 0) {
resume = false;
goto cleanup;
}
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT,
- QEMU_ASYNC_JOB_SNAPSHOT, 0);
+ VIR_ASYNC_JOB_SNAPSHOT, 0);
virDomainAuditStop(vm, "from-snapshot");
resume = false;
}
if (resume && virDomainObjIsActive(vm) &&
qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
- QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
+ VIR_ASYNC_JOB_SNAPSHOT) < 0) {
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_SUSPENDED,
VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR);
qemuSnapshotDiskCleanup(qemuSnapshotDiskData *data,
size_t ndata,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virQEMUDriver *driver = priv->driver;
/* needed for automatic cleanup of 'dd' */
virDomainObj *vm;
- qemuDomainAsyncJob asyncJob;
+ virDomainAsyncJob asyncJob;
};
typedef struct _qemuSnapshotDiskContext qemuSnapshotDiskContext;
qemuSnapshotDiskContext *
qemuSnapshotDiskContextNew(size_t ndisks,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virQEMUDriver *driver = priv->driver;
virQEMUDriverConfig *cfg,
bool reuse,
GHashTable *blockNamedNodeData,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
g_autoptr(virStorageSource) terminator = NULL;
virDomainMomentObj *snap,
bool reuse,
GHashTable *blockNamedNodeData,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(qemuSnapshotDiskContext) snapctxt = NULL;
size_t i;
virDomainMomentObj *snap,
GHashTable *blockNamedNodeData,
unsigned int flags,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
bool reuse = (flags & VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) != 0;
g_autoptr(qemuSnapshotDiskContext) snapctxt = NULL;
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) {
int frozen;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0) {
* when the user wants to manually snapshot some disks */
if (((memory || has_manual) && !(flags & VIR_DOMAIN_SNAPSHOT_CREATE_LIVE))) {
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SNAPSHOT,
- QEMU_ASYNC_JOB_SNAPSHOT) < 0)
+ VIR_ASYNC_JOB_SNAPSHOT) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
* migration step as qemu deactivates bitmaps after migration so the result
* would be wrong */
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) &&
- !(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_SNAPSHOT)))
+ !(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_SNAPSHOT)))
goto cleanup;
/* do the memory snapshot if necessary */
/* allow the migration job to be cancelled or the domain to be paused */
qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |
- JOB_MASK(QEMU_JOB_SUSPEND) |
- JOB_MASK(QEMU_JOB_MIGRATION_OP)));
+ JOB_MASK(VIR_JOB_SUSPEND) |
+ JOB_MASK(VIR_JOB_MIGRATION_OP)));
if ((compressed = qemuSaveImageGetCompressionProgram(cfg->snapshotImageFormat,
&compressor,
if ((ret = qemuSaveImageCreate(driver, vm, snapdef->memorysnapshotfile,
data, compressor, 0,
- QEMU_ASYNC_JOB_SNAPSHOT)) < 0)
+ VIR_ASYNC_JOB_SNAPSHOT)) < 0)
goto cleanup;
/* the memory image was created, remove it on errors */
if ((ret = qemuSnapshotCreateActiveExternalDisks(vm, snap,
blockNamedNodeData, flags,
- QEMU_ASYNC_JOB_SNAPSHOT)) < 0)
+ VIR_ASYNC_JOB_SNAPSHOT)) < 0)
goto cleanup;
/* the snapshot is complete now */
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT,
- QEMU_ASYNC_JOB_SNAPSHOT, 0);
+ VIR_ASYNC_JOB_SNAPSHOT, 0);
virDomainAuditStop(vm, "from-snapshot");
resume = false;
thaw = false;
if (resume && virDomainObjIsActive(vm) &&
qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
- QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
+ VIR_ASYNC_JOB_SNAPSHOT) < 0) {
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_SUSPENDED,
VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR);
}
if (thaw &&
- qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) >= 0 &&
+ qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) >= 0 &&
virDomainObjIsActive(vm)) {
/* report error only on an otherwise successful snapshot */
if (qemuSnapshotFSThaw(vm, ret == 0) < 0)
* a regular job, so we need to set the job mask to disallow query as
* 'savevm' blocks the monitor. External snapshot will then modify the
* job mask appropriately. */
- if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SNAPSHOT,
+ if (qemuDomainObjBeginAsyncJob(driver, vm, VIR_ASYNC_JOB_SNAPSHOT,
VIR_DOMAIN_JOB_OPERATION_SNAPSHOT, flags) < 0)
return NULL;
- qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
+ qemuDomainObjSetAsyncJobMask(vm, VIR_JOB_NONE);
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE) {
snapshot = qemuSnapshotRedefine(vm, domain, def, driver, cfg, flags);
/* Transitions 5, 6, 8, 9 */
qemuProcessStop(driver, vm,
VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT,
- QEMU_ASYNC_JOB_START, 0);
+ VIR_ASYNC_JOB_START, 0);
virDomainAuditStop(vm, "from-snapshot");
detail = VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT;
event = virDomainEventLifecycleNewFromObj(vm,
rc = qemuProcessStart(snapshot->domain->conn, driver, vm,
cookie ? cookie->cpu : NULL,
- QEMU_ASYNC_JOB_START, NULL, -1, NULL, snap,
+ VIR_ASYNC_JOB_START, NULL, -1, NULL, snap,
VIR_NETDEV_VPORT_PROFILE_OP_CREATE,
start_flags);
virDomainAuditStart(vm, "from-snapshot", rc >= 0);
}
rc = qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_FROM_SNAPSHOT,
- QEMU_ASYNC_JOB_START);
+ VIR_ASYNC_JOB_START);
if (rc < 0)
return -1;
}
if (virDomainObjIsActive(vm)) {
/* Transitions 4, 7 */
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT,
- QEMU_ASYNC_JOB_START, 0);
+ VIR_ASYNC_JOB_START, 0);
virDomainAuditStop(vm, "from-snapshot");
detail = VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT;
event = virDomainEventLifecycleNewFromObj(vm,
start_flags |= paused ? VIR_QEMU_PROCESS_START_PAUSED : 0;
rc = qemuProcessStart(snapshot->domain->conn, driver, vm, NULL,
- QEMU_ASYNC_JOB_START, NULL, -1, NULL, NULL,
+ VIR_ASYNC_JOB_START, NULL, -1, NULL, NULL,
VIR_NETDEV_VPORT_PROFILE_OP_CREATE,
start_flags);
virDomainAuditStart(vm, "from-snapshot", rc >= 0);
VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY |
VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN_ONLY, -1);
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return -1;
if (!(snap = qemuSnapObjFromSnapshot(vm, snapshot)))
qemuSnapshotDiskContext *
qemuSnapshotDiskContextNew(size_t ndisks,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
void
qemuSnapshotDiskContextCleanup(qemuSnapshotDiskContext *snapctxt);