int
virCHDomainObjBeginJob(virDomainObj *obj, virDomainJob job)
{
- virCHDomainObjPrivate *priv = obj->privateData;
unsigned long long now;
unsigned long long then;
return -1;
then = now + CH_JOB_WAIT_TIME;
- while (priv->job.active) {
+ while (obj->job->active) {
VIR_DEBUG("Wait normal job condition for starting job: %s",
virDomainJobTypeToString(job));
- if (virCondWaitUntil(&priv->job.cond, &obj->parent.lock, then) < 0) {
+ if (virCondWaitUntil(&obj->job->cond, &obj->parent.lock, then) < 0) {
VIR_WARN("Cannot start job (%s) for domain %s;"
" current job is (%s) owned by (%llu)",
virDomainJobTypeToString(job),
obj->def->name,
- virDomainJobTypeToString(priv->job.active),
- priv->job.owner);
+ virDomainJobTypeToString(obj->job->active),
+ obj->job->owner);
if (errno == ETIMEDOUT)
virReportError(VIR_ERR_OPERATION_TIMEOUT,
}
}
- virDomainObjResetJob(&priv->job);
+ virDomainObjResetJob(obj->job);
VIR_DEBUG("Starting job: %s", virDomainJobTypeToString(job));
- priv->job.active = job;
- priv->job.owner = virThreadSelfID();
+ obj->job->active = job;
+ obj->job->owner = virThreadSelfID();
return 0;
}
void
virCHDomainObjEndJob(virDomainObj *obj)
{
- virCHDomainObjPrivate *priv = obj->privateData;
- virDomainJob job = priv->job.active;
+ virDomainJob job = obj->job->active;
VIR_DEBUG("Stopping job: %s",
virDomainJobTypeToString(job));
- virDomainObjResetJob(&priv->job);
- virCondSignal(&priv->job.cond);
+ virDomainObjResetJob(obj->job);
+ virCondSignal(&obj->job->cond);
}
void
priv = g_new0(virCHDomainObjPrivate, 1);
- if (virDomainObjInitJob(&priv->job, NULL, NULL) < 0) {
- g_free(priv);
- return NULL;
- }
-
if (!(priv->chrdevs = virChrdevAlloc())) {
- virDomainObjClearJob(&priv->job);
g_free(priv);
return NULL;
}
virCHDomainObjPrivate *priv = data;
virChrdevFree(priv->chrdevs);
- virDomainObjClearJob(&priv->job);
g_free(priv->machineName);
g_free(priv);
}
typedef struct _virCHDomainObjPrivate virCHDomainObjPrivate;
struct _virCHDomainObjPrivate {
- virDomainJobObj job;
-
virChrdevs *chrdevs;
virCHDriver *driver;
virCHMonitor *monitor;
#include "virdomaincheckpointobjlist.h"
#include "virutil.h"
#include "virsecureerase.h"
+#include "virdomainjob.h"
#define VIR_FROM_THIS VIR_FROM_DOMAIN
virDomainObj *obj,
virDomainJob job)
{
- libxlDomainObjPrivate *priv = obj->privateData;
unsigned long long now;
unsigned long long then;
return -1;
then = now + LIBXL_JOB_WAIT_TIME;
- while (priv->job.active) {
+ while (obj->job->active) {
VIR_DEBUG("Wait normal job condition for starting job: %s",
virDomainJobTypeToString(job));
- if (virCondWaitUntil(&priv->job.cond, &obj->parent.lock, then) < 0)
+ if (virCondWaitUntil(&obj->job->cond, &obj->parent.lock, then) < 0)
goto error;
}
- virDomainObjResetJob(&priv->job);
+ virDomainObjResetJob(obj->job);
VIR_DEBUG("Starting job: %s", virDomainJobTypeToString(job));
- priv->job.active = job;
- priv->job.owner = virThreadSelfID();
- priv->job.started = now;
+ obj->job->active = job;
+ obj->job->owner = virThreadSelfID();
+ obj->job->started = now;
return 0;
" current job is (%s) owned by (%llu)",
virDomainJobTypeToString(job),
obj->def->name,
- virDomainJobTypeToString(priv->job.active),
- priv->job.owner);
+ virDomainJobTypeToString(obj->job->active),
+ obj->job->owner);
if (errno == ETIMEDOUT)
virReportError(VIR_ERR_OPERATION_TIMEOUT,
libxlDomainObjEndJob(libxlDriverPrivate *driver G_GNUC_UNUSED,
virDomainObj *obj)
{
- libxlDomainObjPrivate *priv = obj->privateData;
- virDomainJob job = priv->job.active;
+ virDomainJob job = obj->job->active;
VIR_DEBUG("Stopping job: %s",
virDomainJobTypeToString(job));
- virDomainObjResetJob(&priv->job);
- virCondSignal(&priv->job.cond);
+ virDomainObjResetJob(obj->job);
+ virCondSignal(&obj->job->cond);
}
int
return NULL;
}
- if (virDomainObjInitJob(&priv->job, NULL, NULL) < 0) {
- virChrdevFree(priv->devs);
- g_free(priv);
- return NULL;
- }
-
return priv;
}
libxlDomainObjPrivate *priv = data;
g_free(priv->lockState);
- virDomainObjClearJob(&priv->job);
virChrdevFree(priv->devs);
g_free(priv);
}
char *lockState;
bool lockProcessRunning;
- virDomainJobObj job;
-
bool hookRun; /* true if there was a hook run over this domain */
};
libxlDomainGetJobInfo(virDomainPtr dom,
virDomainJobInfoPtr info)
{
- libxlDomainObjPrivate *priv;
virDomainObj *vm;
int ret = -1;
unsigned long long timeElapsed = 0;
if (virDomainGetJobInfoEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- priv = vm->privateData;
- if (!priv->job.active) {
+ if (!vm->job->active) {
memset(info, 0, sizeof(*info));
info->type = VIR_DOMAIN_JOB_NONE;
ret = 0;
/* In libxl we don't have an estimated completion time
* thus we always set to unbounded and update time
* for the active job. */
- if (libxlDomainJobGetTimeElapsed(&priv->job, &timeElapsed) < 0)
+ if (libxlDomainJobGetTimeElapsed(vm->job, &timeElapsed) < 0)
goto cleanup;
/* setting only these two attributes is enough because libxl never sets
int *nparams,
unsigned int flags)
{
- libxlDomainObjPrivate *priv;
virDomainObj *vm;
int ret = -1;
int maxparams = 0;
if (virDomainGetJobStatsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- priv = vm->privateData;
- if (!priv->job.active) {
+ if (!vm->job->active) {
*type = VIR_DOMAIN_JOB_NONE;
*params = NULL;
*nparams = 0;
/* In libxl we don't have an estimated completion time
* thus we always set to unbounded and update time
* for the active job. */
- if (libxlDomainJobGetTimeElapsed(&priv->job, &timeElapsed) < 0)
+ if (libxlDomainJobGetTimeElapsed(vm->job, &timeElapsed) < 0)
goto cleanup;
if (virTypedParamsAddULLong(params, nparams, &maxparams,
virDomainObj *obj,
virDomainJob job)
{
- virLXCDomainObjPrivate *priv = obj->privateData;
unsigned long long now;
unsigned long long then;
return -1;
then = now + LXC_JOB_WAIT_TIME;
- while (priv->job.active) {
+ while (obj->job->active) {
VIR_DEBUG("Wait normal job condition for starting job: %s",
virDomainJobTypeToString(job));
- if (virCondWaitUntil(&priv->job.cond, &obj->parent.lock, then) < 0)
+ if (virCondWaitUntil(&obj->job->cond, &obj->parent.lock, then) < 0)
goto error;
}
- virDomainObjResetJob(&priv->job);
+ virDomainObjResetJob(obj->job);
VIR_DEBUG("Starting job: %s", virDomainJobTypeToString(job));
- priv->job.active = job;
- priv->job.owner = virThreadSelfID();
+ obj->job->active = job;
+ obj->job->owner = virThreadSelfID();
return 0;
" current job is (%s) owned by (%llu)",
virDomainJobTypeToString(job),
obj->def->name,
- virDomainJobTypeToString(priv->job.active),
- priv->job.owner);
+ virDomainJobTypeToString(obj->job->active),
+ obj->job->owner);
if (errno == ETIMEDOUT)
virReportError(VIR_ERR_OPERATION_TIMEOUT,
virLXCDomainObjEndJob(virLXCDriver *driver G_GNUC_UNUSED,
virDomainObj *obj)
{
- virLXCDomainObjPrivate *priv = obj->privateData;
- virDomainJob job = priv->job.active;
+ virDomainJob job = obj->job->active;
VIR_DEBUG("Stopping job: %s",
virDomainJobTypeToString(job));
- virDomainObjResetJob(&priv->job);
- virCondSignal(&priv->job.cond);
+ virDomainObjResetJob(obj->job);
+ virCondSignal(&obj->job->cond);
}
{
virLXCDomainObjPrivate *priv = g_new0(virLXCDomainObjPrivate, 1);
- if (virDomainObjInitJob(&priv->job, NULL, NULL) < 0) {
- g_free(priv);
- return NULL;
- }
-
priv->driver = opaque;
return priv;
virLXCDomainObjPrivate *priv = data;
virCgroupFree(priv->cgroup);
- virDomainObjClearJob(&priv->job);
g_free(priv);
}
virCgroup *cgroup;
char *machineName;
-
- virDomainJobObj job;
};
extern virXMLNamespace virLXCDriverDomainXMLNamespace;
}
}
- if (priv->job.current) {
+ if (vm->job->current) {
qemuDomainJobDataPrivate *privData = NULL;
- qemuDomainJobDataUpdateTime(priv->job.current);
+ qemuDomainJobDataUpdateTime(vm->job->current);
- g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
- priv->job.completed = virDomainJobDataCopy(priv->job.current);
+ g_clear_pointer(&vm->job->completed, virDomainJobDataFree);
+ vm->job->completed = virDomainJobDataCopy(vm->job->current);
- privData = priv->job.completed->privateData;
+ privData = vm->job->completed->privateData;
privData->stats.backup.total = priv->backup->push_total;
privData->stats.backup.transferred = priv->backup->push_transferred;
privData->stats.backup.tmp_used = priv->backup->pull_tmp_used;
privData->stats.backup.tmp_total = priv->backup->pull_tmp_total;
- priv->job.completed->status = jobstatus;
- priv->job.completed->errmsg = g_strdup(priv->backup->errmsg);
+ vm->job->completed->status = jobstatus;
+ vm->job->completed->errmsg = g_strdup(priv->backup->errmsg);
qemuDomainEventEmitJobCompleted(priv->driver, vm);
}
g_clear_pointer(&priv->backup, virDomainBackupDefFree);
- if (priv->job.asyncJob == VIR_ASYNC_JOB_BACKUP)
+ if (vm->job->asyncJob == VIR_ASYNC_JOB_BACKUP)
qemuDomainObjEndAsyncJob(vm);
}
qemuDomainObjSetAsyncJobMask(vm, (VIR_JOB_DEFAULT_MASK |
JOB_MASK(VIR_JOB_SUSPEND) |
JOB_MASK(VIR_JOB_MODIFY)));
- qemuDomainJobSetStatsType(priv->job.current,
+ qemuDomainJobSetStatsType(vm->job->current,
QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP);
if (!virDomainObjIsActive(vm)) {
virQEMUDriverCreateXMLConf(virQEMUDriver *driver,
const char *defsecmodel)
{
+ g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
+
virQEMUDriverDomainDefParserConfig.priv = driver;
virQEMUDriverDomainDefParserConfig.defSecModel = defsecmodel;
+ virQEMUDriverDomainJobConfig.maxQueuedJobs = cfg->maxQueuedJobs;
+
return virDomainXMLOptionNew(&virQEMUDriverDomainDefParserConfig,
&virQEMUDriverPrivateDataCallbacks,
&virQEMUDriverDomainXMLNamespace,
&virQEMUDriverDomainABIStability,
&virQEMUDriverDomainSaveCookie,
- NULL);
+ &virQEMUDriverDomainJobConfig);
}
return -1;
if (n > 0) {
- if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
+ if (vm->job->asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
VIR_WARN("Found disks marked for migration but we were not "
"migrating");
n = 0;
return 0;
}
+static void *
+qemuJobDataAllocPrivateData(void)
+{
+ return g_new0(qemuDomainJobDataPrivate, 1);
+}
+
+
+static void *
+qemuJobDataCopyPrivateData(void *data)
+{
+ qemuDomainJobDataPrivate *ret = g_new0(qemuDomainJobDataPrivate, 1);
+
+ memcpy(ret, data, sizeof(qemuDomainJobDataPrivate));
+
+ return ret;
+}
+
-static virDomainObjPrivateJobCallbacks qemuPrivateJobCallbacks = {
- .allocJobPrivate = qemuJobAllocPrivate,
- .freeJobPrivate = qemuJobFreePrivate,
- .resetJobPrivate = qemuJobResetPrivate,
- .formatJobPrivate = qemuDomainFormatJobPrivate,
- .parseJobPrivate = qemuDomainParseJobPrivate,
- .saveStatusPrivate = qemuDomainSaveStatus,
+static void
+qemuJobDataFreePrivateData(void *data)
+{
+ g_free(data);
+}
+
+
+virDomainJobObjConfig virQEMUDriverDomainJobConfig = {
+ .cb = {
+ .allocJobPrivate = qemuJobAllocPrivate,
+ .freeJobPrivate = qemuJobFreePrivate,
+ .resetJobPrivate = qemuJobResetPrivate,
+ .formatJobPrivate = qemuDomainFormatJobPrivate,
+ .parseJobPrivate = qemuDomainParseJobPrivate,
+ .saveStatusPrivate = qemuDomainSaveStatus,
+ },
+ .jobDataPrivateCb = {
+ .allocPrivateData = qemuJobDataAllocPrivateData,
+ .copyPrivateData = qemuJobDataCopyPrivateData,
+ .freePrivateData = qemuJobDataFreePrivateData,
+ },
+ .maxQueuedJobs = 0,
};
/**
qemuDomainObjPrivateDataClear(priv);
virObjectUnref(priv->monConfig);
- virDomainObjClearJob(&priv->job);
g_free(priv->lockState);
g_free(priv->origname);
qemuDomainObjPrivateAlloc(void *opaque)
{
g_autoptr(qemuDomainObjPrivate) priv = g_new0(qemuDomainObjPrivate, 1);
- g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(opaque);
-
- if (virDomainObjInitJob(&priv->job, &qemuPrivateJobCallbacks,
- &qemuJobDataPrivateDataCallbacks) < 0) {
- virReportSystemError(errno, "%s",
- _("Unable to init qemu driver mutexes"));
- return NULL;
- }
if (!(priv->devs = virChrdevAlloc()))
return NULL;
priv->blockjobs = virHashNew(virObjectUnref);
- priv->job.maxQueuedJobs = cfg->maxQueuedJobs;
-
/* agent commands block by default, user can choose different behavior */
priv->agentTimeout = VIR_DOMAIN_AGENT_RESPONSE_TIMEOUT_BLOCK;
priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
qemuDomainObjEndJob(obj);
return -1;
}
- } else if (priv->job.asyncOwner == virThreadSelfID()) {
+ } else if (obj->job->asyncOwner == virThreadSelfID()) {
VIR_WARN("This thread seems to be the async job owner; entering"
" monitor without asking for a nested job is dangerous");
- } else if (priv->job.owner != virThreadSelfID()) {
+ } else if (obj->job->owner != virThreadSelfID()) {
VIR_WARN("Entering a monitor without owning a job. "
"Job %s owner %s (%llu)",
- virDomainJobTypeToString(priv->job.active),
- priv->job.ownerAPI, priv->job.owner);
+ virDomainJobTypeToString(obj->job->active),
+ obj->job->ownerAPI, obj->job->owner);
}
VIR_DEBUG("Entering monitor (mon=%p vm=%p name=%s)",
if (!hasRefs)
priv->mon = NULL;
- if (priv->job.active == VIR_JOB_ASYNC_NESTED)
+ if (obj->job->active == VIR_JOB_ASYNC_NESTED)
qemuDomainObjEndJob(obj);
}
struct _qemuDomainObjPrivate {
virQEMUDriver *driver;
- virDomainJobObj job;
-
virBitmap *namespaces;
virEventThread *eventThread;
extern virDomainDefParserConfig virQEMUDriverDomainDefParserConfig;
extern virDomainABIStability virQEMUDriverDomainABIStability;
extern virSaveCookieCallbacks virQEMUDriverDomainSaveCookie;
+extern virDomainJobObjConfig virQEMUDriverDomainJobConfig;
int qemuDomainUpdateDeviceList(virDomainObj *vm, int asyncJob);
VIR_LOG_INIT("qemu.qemu_domainjob");
-static void *
-qemuJobDataAllocPrivateData(void)
-{
- return g_new0(qemuDomainJobDataPrivate, 1);
-}
-
-
-static void *
-qemuJobDataCopyPrivateData(void *data)
-{
- qemuDomainJobDataPrivate *ret = g_new0(qemuDomainJobDataPrivate, 1);
-
- memcpy(ret, data, sizeof(qemuDomainJobDataPrivate));
-
- return ret;
-}
-
-
-static void
-qemuJobDataFreePrivateData(void *data)
-{
- g_free(data);
-}
-
-
-virDomainJobDataPrivateDataCallbacks qemuJobDataPrivateDataCallbacks = {
- .allocPrivateData = qemuJobDataAllocPrivateData,
- .copyPrivateData = qemuJobDataCopyPrivateData,
- .freePrivateData = qemuJobDataFreePrivateData,
-};
-
-
void
qemuDomainJobSetStatsType(virDomainJobData *jobData,
qemuDomainJobStatsType type)
qemuDomainEventEmitJobCompleted(virQEMUDriver *driver,
virDomainObj *vm)
{
- qemuDomainObjPrivate *priv = vm->privateData;
virObjectEvent *event;
virTypedParameterPtr params = NULL;
int nparams = 0;
int type;
- if (!priv->job.completed)
+ if (!vm->job->completed)
return;
- if (qemuDomainJobDataToParams(priv->job.completed, &type,
+ if (qemuDomainJobDataToParams(vm->job->completed, &type,
¶ms, &nparams) < 0) {
VIR_WARN("Could not get stats for completed job; domain %s",
vm->def->name);
virDomainJobStatus status,
unsigned long long allowedJobs)
{
- qemuDomainObjPrivate *priv = vm->privateData;
- virDomainJobObj *job = &priv->job;
+ virDomainJobObj *job = vm->job;
VIR_DEBUG("Restoring %s async job for domain %s",
virDomainAsyncJobTypeToString(asyncJob), vm->def->name);
qemuDomainObjSetAsyncJobMask(vm, allowedJobs);
- job->current = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks);
- qemuDomainJobSetStatsType(priv->job.current, statsType);
+ job->current = virDomainJobDataInit(&virQEMUDriverDomainJobConfig.jobDataPrivateCb);
+ qemuDomainJobSetStatsType(vm->job->current, statsType);
job->current->operation = operation;
job->current->status = status;
job->current->started = started;
qemuDomainObjSetJobPhase(virDomainObj *obj,
int phase)
{
- qemuDomainObjPrivate *priv = obj->privateData;
unsigned long long me = virThreadSelfID();
- if (!priv->job.asyncJob)
+ if (!obj->job->asyncJob)
return;
VIR_DEBUG("Setting '%s' phase to '%s'",
- virDomainAsyncJobTypeToString(priv->job.asyncJob),
- qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, phase));
+ virDomainAsyncJobTypeToString(obj->job->asyncJob),
+ qemuDomainAsyncJobPhaseToString(obj->job->asyncJob, phase));
- if (priv->job.asyncOwner != 0 &&
- priv->job.asyncOwner != me) {
+ if (obj->job->asyncOwner != 0 &&
+ obj->job->asyncOwner != me) {
VIR_WARN("'%s' async job is owned by thread %llu, API '%s'",
- virDomainAsyncJobTypeToString(priv->job.asyncJob),
- priv->job.asyncOwner,
- NULLSTR(priv->job.asyncOwnerAPI));
+ virDomainAsyncJobTypeToString(obj->job->asyncJob),
+ obj->job->asyncOwner,
+ NULLSTR(obj->job->asyncOwnerAPI));
}
- priv->job.phase = phase;
+ obj->job->phase = phase;
qemuDomainSaveStatus(obj);
}
qemuDomainObjStartJobPhase(virDomainObj *obj,
int phase)
{
- qemuDomainObjPrivate *priv = obj->privateData;
unsigned long long me = virThreadSelfID();
- if (!priv->job.asyncJob)
+ if (!obj->job->asyncJob)
return;
VIR_DEBUG("Starting phase '%s' of '%s' job",
- qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, phase),
- virDomainAsyncJobTypeToString(priv->job.asyncJob));
+ qemuDomainAsyncJobPhaseToString(obj->job->asyncJob, phase),
+ virDomainAsyncJobTypeToString(obj->job->asyncJob));
- if (priv->job.asyncOwner == 0) {
- priv->job.asyncOwnerAPI = g_strdup(virThreadJobGet());
- } else if (me != priv->job.asyncOwner) {
+ if (obj->job->asyncOwner == 0) {
+ obj->job->asyncOwnerAPI = g_strdup(virThreadJobGet());
+ } else if (me != obj->job->asyncOwner) {
VIR_WARN("'%s' async job is owned by thread %llu, API '%s'",
- virDomainAsyncJobTypeToString(priv->job.asyncJob),
- priv->job.asyncOwner,
- NULLSTR(priv->job.asyncOwnerAPI));
+ virDomainAsyncJobTypeToString(obj->job->asyncJob),
+ obj->job->asyncOwner,
+ NULLSTR(obj->job->asyncOwnerAPI));
}
- priv->job.asyncOwner = me;
+ obj->job->asyncOwner = me;
qemuDomainObjSetJobPhase(obj, phase);
}
qemuDomainObjSetAsyncJobMask(virDomainObj *obj,
unsigned long long allowedJobs)
{
- qemuDomainObjPrivate *priv = obj->privateData;
-
- if (!priv->job.asyncJob)
+ if (!obj->job->asyncJob)
return;
- priv->job.mask = allowedJobs | JOB_MASK(VIR_JOB_DESTROY);
+ obj->job->mask = allowedJobs | JOB_MASK(VIR_JOB_DESTROY);
}
void
qemuDomainObjDiscardAsyncJob(virDomainObj *obj)
{
- qemuDomainObjPrivate *priv = obj->privateData;
-
- if (priv->job.active == VIR_JOB_ASYNC_NESTED)
- virDomainObjResetJob(&priv->job);
- virDomainObjResetAsyncJob(&priv->job);
+ if (obj->job->active == VIR_JOB_ASYNC_NESTED)
+ virDomainObjResetJob(obj->job);
+ virDomainObjResetAsyncJob(obj->job);
qemuDomainSaveStatus(obj);
}
void
qemuDomainObjReleaseAsyncJob(virDomainObj *obj)
{
- qemuDomainObjPrivate *priv = obj->privateData;
-
VIR_DEBUG("Releasing ownership of '%s' async job",
- virDomainAsyncJobTypeToString(priv->job.asyncJob));
+ virDomainAsyncJobTypeToString(obj->job->asyncJob));
- if (priv->job.asyncOwner != virThreadSelfID()) {
+ if (obj->job->asyncOwner != virThreadSelfID()) {
VIR_WARN("'%s' async job is owned by thread %llu",
- virDomainAsyncJobTypeToString(priv->job.asyncJob),
- priv->job.asyncOwner);
+ virDomainAsyncJobTypeToString(obj->job->asyncJob),
+ obj->job->asyncOwner);
}
- priv->job.asyncOwner = 0;
+ obj->job->asyncOwner = 0;
}
/*
int qemuDomainObjBeginJob(virDomainObj *obj,
virDomainJob job)
{
- qemuDomainObjPrivate *priv = obj->privateData;
-
- if (virDomainObjBeginJobInternal(obj, &priv->job, job,
+ if (virDomainObjBeginJobInternal(obj, obj->job, job,
VIR_AGENT_JOB_NONE,
VIR_ASYNC_JOB_NONE, false) < 0)
return -1;
qemuDomainObjBeginAgentJob(virDomainObj *obj,
virDomainAgentJob agentJob)
{
- qemuDomainObjPrivate *priv = obj->privateData;
-
- return virDomainObjBeginJobInternal(obj, &priv->job, VIR_JOB_NONE,
+ return virDomainObjBeginJobInternal(obj, obj->job, VIR_JOB_NONE,
agentJob,
VIR_ASYNC_JOB_NONE, false);
}
virDomainJobOperation operation,
unsigned long apiFlags)
{
- qemuDomainObjPrivate *priv = obj->privateData;
-
- if (virDomainObjBeginJobInternal(obj, &priv->job, VIR_JOB_ASYNC,
+ if (virDomainObjBeginJobInternal(obj, obj->job, VIR_JOB_ASYNC,
VIR_AGENT_JOB_NONE,
asyncJob, false) < 0)
return -1;
- priv = obj->privateData;
- priv->job.current->operation = operation;
- priv->job.apiFlags = apiFlags;
+ obj->job->current->operation = operation;
+ obj->job->apiFlags = apiFlags;
return 0;
}
qemuDomainObjBeginNestedJob(virDomainObj *obj,
virDomainAsyncJob asyncJob)
{
- qemuDomainObjPrivate *priv = obj->privateData;
-
- if (asyncJob != priv->job.asyncJob) {
+ if (asyncJob != obj->job->asyncJob) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("unexpected async job %d type expected %d"),
- asyncJob, priv->job.asyncJob);
+ asyncJob, obj->job->asyncJob);
return -1;
}
- if (priv->job.asyncOwner != virThreadSelfID()) {
+ if (obj->job->asyncOwner != virThreadSelfID()) {
VIR_WARN("This thread doesn't seem to be the async job owner: %llu",
- priv->job.asyncOwner);
+ obj->job->asyncOwner);
}
- return virDomainObjBeginJobInternal(obj, &priv->job,
+ return virDomainObjBeginJobInternal(obj, obj->job,
VIR_JOB_ASYNC_NESTED,
VIR_AGENT_JOB_NONE,
VIR_ASYNC_JOB_NONE,
qemuDomainObjBeginJobNowait(virDomainObj *obj,
virDomainJob job)
{
- qemuDomainObjPrivate *priv = obj->privateData;
-
- return virDomainObjBeginJobInternal(obj, &priv->job, job,
+ return virDomainObjBeginJobInternal(obj, obj->job, job,
VIR_AGENT_JOB_NONE,
VIR_ASYNC_JOB_NONE, true);
}
void
qemuDomainObjEndJob(virDomainObj *obj)
{
- qemuDomainObjPrivate *priv = obj->privateData;
- virDomainJob job = priv->job.active;
+ virDomainJob job = obj->job->active;
- priv->job.jobsQueued--;
+ obj->job->jobsQueued--;
VIR_DEBUG("Stopping job: %s (async=%s vm=%p name=%s)",
virDomainJobTypeToString(job),
- virDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(obj->job->asyncJob),
obj, obj->def->name);
- virDomainObjResetJob(&priv->job);
+ virDomainObjResetJob(obj->job);
if (virDomainTrackJob(job))
qemuDomainSaveStatus(obj);
/* We indeed need to wake up ALL threads waiting because
* grabbing a job requires checking more variables. */
- virCondBroadcast(&priv->job.cond);
+ virCondBroadcast(&obj->job->cond);
}
void
qemuDomainObjEndAgentJob(virDomainObj *obj)
{
- qemuDomainObjPrivate *priv = obj->privateData;
- virDomainAgentJob agentJob = priv->job.agentActive;
+ virDomainAgentJob agentJob = obj->job->agentActive;
- priv->job.jobsQueued--;
+ obj->job->jobsQueued--;
VIR_DEBUG("Stopping agent job: %s (async=%s vm=%p name=%s)",
virDomainAgentJobTypeToString(agentJob),
- virDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(obj->job->asyncJob),
obj, obj->def->name);
- virDomainObjResetAgentJob(&priv->job);
+ virDomainObjResetAgentJob(obj->job);
/* We indeed need to wake up ALL threads waiting because
* grabbing a job requires checking more variables. */
- virCondBroadcast(&priv->job.cond);
+ virCondBroadcast(&obj->job->cond);
}
void
qemuDomainObjEndAsyncJob(virDomainObj *obj)
{
- qemuDomainObjPrivate *priv = obj->privateData;
-
- priv->job.jobsQueued--;
+ obj->job->jobsQueued--;
VIR_DEBUG("Stopping async job: %s (vm=%p name=%s)",
- virDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(obj->job->asyncJob),
obj, obj->def->name);
- virDomainObjResetAsyncJob(&priv->job);
+ virDomainObjResetAsyncJob(obj->job);
qemuDomainSaveStatus(obj);
- virCondBroadcast(&priv->job.asyncCond);
+ virCondBroadcast(&obj->job->asyncCond);
}
void
qemuDomainObjAbortAsyncJob(virDomainObj *obj)
{
- qemuDomainObjPrivate *priv = obj->privateData;
-
VIR_DEBUG("Requesting abort of async job: %s (vm=%p name=%s)",
- virDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(obj->job->asyncJob),
obj, obj->def->name);
- priv->job.abortJob = true;
+ obj->job->abortJob = true;
virDomainObjBroadcast(obj);
}
qemuDomainObjPrivateXMLFormatJob(virBuffer *buf,
virDomainObj *vm)
{
- qemuDomainObjPrivate *priv = vm->privateData;
g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER;
g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf);
- virDomainJob job = priv->job.active;
+ virDomainJob job = vm->job->active;
if (!virDomainTrackJob(job))
job = VIR_JOB_NONE;
if (job == VIR_JOB_NONE &&
- priv->job.asyncJob == VIR_ASYNC_JOB_NONE)
+ vm->job->asyncJob == VIR_ASYNC_JOB_NONE)
return 0;
virBufferAsprintf(&attrBuf, " type='%s' async='%s'",
virDomainJobTypeToString(job),
- virDomainAsyncJobTypeToString(priv->job.asyncJob));
+ virDomainAsyncJobTypeToString(vm->job->asyncJob));
- if (priv->job.phase) {
+ if (vm->job->phase) {
virBufferAsprintf(&attrBuf, " phase='%s'",
- qemuDomainAsyncJobPhaseToString(priv->job.asyncJob,
- priv->job.phase));
+ qemuDomainAsyncJobPhaseToString(vm->job->asyncJob,
+ vm->job->phase));
}
- if (priv->job.asyncJob != VIR_ASYNC_JOB_NONE) {
- virBufferAsprintf(&attrBuf, " flags='0x%lx'", priv->job.apiFlags);
- virBufferAsprintf(&attrBuf, " asyncStarted='%llu'", priv->job.asyncStarted);
+ if (vm->job->asyncJob != VIR_ASYNC_JOB_NONE) {
+ virBufferAsprintf(&attrBuf, " flags='0x%lx'", vm->job->apiFlags);
+ virBufferAsprintf(&attrBuf, " asyncStarted='%llu'", vm->job->asyncStarted);
}
- if (priv->job.cb &&
- priv->job.cb->formatJobPrivate(&childBuf, &priv->job, vm) < 0)
+ if (vm->job->cb &&
+ vm->job->cb->formatJobPrivate(&childBuf, vm->job, vm) < 0)
return -1;
virXMLFormatElement(buf, "job", &attrBuf, &childBuf);
qemuDomainObjPrivateXMLParseJob(virDomainObj *vm,
xmlXPathContextPtr ctxt)
{
- qemuDomainObjPrivate *priv = vm->privateData;
- virDomainJobObj *job = &priv->job;
+ virDomainJobObj *job = vm->job;
VIR_XPATH_NODE_AUTORESTORE(ctxt)
g_autofree char *tmp = NULL;
return -1;
}
VIR_FREE(tmp);
- priv->job.active = type;
+ vm->job->active = type;
}
if ((tmp = virXPathString("string(@async)", ctxt))) {
return -1;
}
VIR_FREE(tmp);
- priv->job.asyncJob = async;
+ vm->job->asyncJob = async;
if ((tmp = virXPathString("string(@phase)", ctxt))) {
- priv->job.phase = qemuDomainAsyncJobPhaseFromString(async, tmp);
- if (priv->job.phase < 0) {
+ vm->job->phase = qemuDomainAsyncJobPhaseFromString(async, tmp);
+ if (vm->job->phase < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown job phase %s"), tmp);
return -1;
}
if (virXPathULongLong("string(@asyncStarted)", ctxt,
- &priv->job.asyncStarted) == -2) {
+ &vm->job->asyncStarted) == -2) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Invalid async job start"));
return -1;
}
}
- if (virXPathULongHex("string(@flags)", ctxt, &priv->job.apiFlags) == -2) {
+ if (virXPathULongHex("string(@flags)", ctxt, &vm->job->apiFlags) == -2) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Invalid job flags"));
return -1;
}
- if (priv->job.cb &&
- priv->job.cb->parseJobPrivate(ctxt, job, vm) < 0)
+ if (vm->job->cb &&
+ vm->job->cb->parseJobPrivate(ctxt, job, vm) < 0)
return -1;
return 0;
qemuDomainMirrorStats mirrorStats;
};
-extern virDomainJobDataPrivateDataCallbacks qemuJobDataPrivateDataCallbacks;
-
void qemuDomainJobSetStatsType(virDomainJobData *jobData,
qemuDomainJobStatsType type);
virQEMUDriver *driver = dom->conn->privateData;
virDomainObj *vm;
int ret = -1;
- qemuDomainObjPrivate *priv;
virDomainPausedReason reason;
int state;
if (virDomainSuspendEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- priv = vm->privateData;
-
if (qemuDomainObjBeginJob(vm, VIR_JOB_SUSPEND) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
goto endjob;
- if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
+ if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
reason = VIR_DOMAIN_PAUSED_MIGRATION;
- else if (priv->job.asyncJob == VIR_ASYNC_JOB_SNAPSHOT)
+ else if (vm->job->asyncJob == VIR_ASYNC_JOB_SNAPSHOT)
reason = VIR_DOMAIN_PAUSED_SNAPSHOT;
else
reason = VIR_DOMAIN_PAUSED_USER;
qemuDomainSetFakeReboot(vm, false);
- if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
+ if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_DESTROYED,
if (priv->monError) {
info->state = VIR_DOMAIN_CONTROL_ERROR;
info->details = VIR_DOMAIN_CONTROL_ERROR_REASON_MONITOR;
- } else if (priv->job.active) {
+ } else if (vm->job->active) {
if (virTimeMillisNow(&info->stateTime) < 0)
goto cleanup;
- if (priv->job.current) {
+ if (vm->job->current) {
info->state = VIR_DOMAIN_CONTROL_JOB;
- info->stateTime -= priv->job.current->started;
+ info->stateTime -= vm->job->current->started;
} else {
if (priv->monStart > 0) {
info->state = VIR_DOMAIN_CONTROL_OCCUPIED;
goto endjob;
}
- qemuDomainJobSetStatsType(priv->job.current,
+ qemuDomainJobSetStatsType(vm->job->current,
QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP);
/* Pause */
static int
qemuDumpWaitForCompletion(virDomainObj *vm)
{
- qemuDomainObjPrivate *priv = vm->privateData;
- qemuDomainJobPrivate *jobPriv = priv->job.privateData;
- qemuDomainJobDataPrivate *privJobCurrent = priv->job.current->privateData;
+ qemuDomainJobPrivate *jobPriv = vm->job->privateData;
+ qemuDomainJobDataPrivate *privJobCurrent = vm->job->current->privateData;
VIR_DEBUG("Waiting for dump completion");
- while (!jobPriv->dumpCompleted && !priv->job.abortJob) {
+ while (!jobPriv->dumpCompleted && !vm->job->abortJob) {
if (qemuDomainObjWait(vm) < 0)
return -1;
}
if (privJobCurrent->stats.dump.status == QEMU_MONITOR_DUMP_STATUS_FAILED) {
- if (priv->job.error)
+ if (vm->job->error)
virReportError(VIR_ERR_OPERATION_FAILED,
_("memory-only dump failed: %s"),
- priv->job.error);
+ vm->job->error);
else
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
_("memory-only dump failed for unknown reason"));
return -1;
}
- qemuDomainJobDataUpdateTime(priv->job.current);
+ qemuDomainJobDataUpdateTime(vm->job->current);
return 0;
}
return -1;
if (detach) {
- qemuDomainJobSetStatsType(priv->job.current,
+ qemuDomainJobSetStatsType(vm->job->current,
QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP);
} else {
- g_clear_pointer(&priv->job.current, virDomainJobDataFree);
+ g_clear_pointer(&vm->job->current, virDomainJobDataFree);
}
if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0)
goto endjob;
priv = vm->privateData;
- qemuDomainJobSetStatsType(priv->job.current,
+ qemuDomainJobSetStatsType(vm->job->current,
QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP);
/* Migrate will always stop the VM, so the resume condition is
auditReason = "failed";
}
- if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
+ if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
qemuMigrationDstErrorSave(driver, vm->def->name,
qemuMonitorLastError(priv->mon));
bool force_boot = (flags & VIR_DOMAIN_START_FORCE_BOOT) != 0;
bool reset_nvram = (flags & VIR_DOMAIN_START_RESET_NVRAM) != 0;
unsigned int start_flags = VIR_QEMU_PROCESS_START_COLD;
- qemuDomainObjPrivate *priv = vm->privateData;
start_flags |= start_paused ? VIR_QEMU_PROCESS_START_PAUSED : 0;
start_flags |= autodestroy ? VIR_QEMU_PROCESS_START_AUTODESTROY : 0;
}
vm->hasManagedSave = false;
} else {
- virDomainJobOperation op = priv->job.current->operation;
- priv->job.current->operation = VIR_DOMAIN_JOB_OPERATION_RESTORE;
+ virDomainJobOperation op = vm->job->current->operation;
+ vm->job->current->operation = VIR_DOMAIN_JOB_OPERATION_RESTORE;
ret = qemuDomainObjRestore(conn, driver, vm, managed_save,
start_paused, bypass_cache,
return ret;
} else {
VIR_WARN("Ignoring incomplete managed state %s", managed_save);
- priv->job.current->operation = op;
+ vm->job->current->operation = op;
vm->hasManagedSave = false;
}
}
bool completed,
virDomainJobData **jobData)
{
- qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privStats = NULL;
int ret = -1;
*jobData = NULL;
if (completed) {
- if (priv->job.completed && !priv->job.current)
- *jobData = virDomainJobDataCopy(priv->job.completed);
+ if (vm->job->completed && !vm->job->current)
+ *jobData = virDomainJobDataCopy(vm->job->completed);
return 0;
}
- if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
+ if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
_("migration statistics are available only on "
"the source host"));
if (virDomainObjCheckActive(vm) < 0)
goto cleanup;
- if (!priv->job.current) {
+ if (!vm->job->current) {
ret = 0;
goto cleanup;
}
- *jobData = virDomainJobDataCopy(priv->job.current);
+ *jobData = virDomainJobDataCopy(vm->job->current);
privStats = (*jobData)->privateData;
unsigned int flags)
{
virDomainObj *vm;
- qemuDomainObjPrivate *priv;
g_autoptr(virDomainJobData) jobData = NULL;
bool completed = !!(flags & VIR_DOMAIN_JOB_STATS_COMPLETED);
int ret = -1;
if (virDomainGetJobStatsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- priv = vm->privateData;
if (qemuDomainGetJobStatsInternal(vm, completed, &jobData) < 0)
goto cleanup;
ret = qemuDomainJobDataToParams(jobData, type, params, nparams);
if (completed && ret == 0 && !(flags & VIR_DOMAIN_JOB_STATS_KEEP_COMPLETED))
- g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
+ g_clear_pointer(&vm->job->completed, virDomainJobDataFree);
cleanup:
virDomainObjEndAPI(&vm);
priv = vm->privateData;
if (flags & VIR_DOMAIN_ABORT_JOB_POSTCOPY &&
- (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT ||
+ (vm->job->asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT ||
!virDomainObjIsPostcopy(vm, VIR_DOMAIN_JOB_OPERATION_MIGRATION_OUT))) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("current job is not outgoing migration in post-copy mode"));
goto endjob;
}
- switch (priv->job.asyncJob) {
+ switch (vm->job->asyncJob) {
case VIR_ASYNC_JOB_NONE:
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("no job is active on the domain"));
break;
case VIR_ASYNC_JOB_DUMP:
- if (priv->job.apiFlags & VIR_DUMP_MEMORY_ONLY) {
+ if (vm->job->apiFlags & VIR_DUMP_MEMORY_ONLY) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("cannot abort memory-only dump"));
goto endjob;
case VIR_ASYNC_JOB_LAST:
default:
- virReportEnumRangeError(virDomainAsyncJob, priv->job.asyncJob);
+ virReportEnumRangeError(virDomainAsyncJob, vm->job->asyncJob);
break;
}
priv = vm->privateData;
- if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
+ if (vm->job->asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("post-copy can only be started while "
"outgoing migration is in progress"));
goto endjob;
}
- if (!(priv->job.apiFlags & VIR_MIGRATE_POSTCOPY)) {
+ if (!(vm->job->apiFlags & VIR_MIGRATE_POSTCOPY)) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("switching to post-copy requires migration to be "
"started with VIR_MIGRATE_POSTCOPY flag"));
static bool ATTRIBUTE_NONNULL(1)
qemuMigrationJobIsAllowed(virDomainObj *vm)
{
- qemuDomainObjPrivate *priv = vm->privateData;
-
- if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN ||
- priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) {
+ if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_IN ||
+ vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) {
virReportError(VIR_ERR_OPERATION_INVALID,
_("another migration job is already running for domain '%s'"),
vm->def->name);
virDomainAsyncJob job,
unsigned long apiFlags)
{
- qemuDomainObjPrivate *priv = vm->privateData;
virDomainJobOperation op;
unsigned long long mask;
if (qemuDomainObjBeginAsyncJob(vm, job, op, apiFlags) < 0)
return -1;
- qemuDomainJobSetStatsType(priv->job.current,
+ qemuDomainJobSetStatsType(vm->job->current,
QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION);
qemuDomainObjSetAsyncJobMask(vm, mask);
qemuMigrationCheckPhase(virDomainObj *vm,
qemuMigrationJobPhase phase)
{
- qemuDomainObjPrivate *priv = vm->privateData;
-
if (phase < QEMU_MIGRATION_PHASE_POSTCOPY_FAILED &&
- phase < priv->job.phase) {
+ phase < vm->job->phase) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("migration protocol going backwards %s => %s"),
- qemuMigrationJobPhaseTypeToString(priv->job.phase),
+ qemuMigrationJobPhaseTypeToString(vm->job->phase),
qemuMigrationJobPhaseTypeToString(phase));
return -1;
}
qemuMigrationJobIsActive(virDomainObj *vm,
virDomainAsyncJob job)
{
- qemuDomainObjPrivate *priv = vm->privateData;
-
- if (priv->job.asyncJob != job) {
+ if (vm->job->asyncJob != job) {
const char *msg;
if (job == VIR_ASYNC_JOB_MIGRATION_IN)
virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
- qemuDomainJobPrivate *jobPriv = priv->job.privateData;
+ qemuDomainJobPrivate *jobPriv = vm->job->privateData;
GSList *next;
for (next = jobPriv->migTempBitmaps; next; next = next->next) {
if (rv < 0)
return -1;
- if (priv->job.abortJob) {
- priv->job.current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
+ if (vm->job->abortJob) {
+ vm->job->current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
- virDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(vm->job->asyncJob),
_("canceled by client"));
return -1;
}
}
qemuMigrationSrcFetchMirrorStats(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
- priv->job.current);
+ vm->job->current);
return 0;
}
static void
qemuMigrationSrcWaitForSpice(virDomainObj *vm)
{
- qemuDomainObjPrivate *priv = vm->privateData;
- qemuDomainJobPrivate *jobPriv = priv->job.privateData;
+ qemuDomainJobPrivate *jobPriv = vm->job->privateData;
if (!jobPriv->spiceMigration)
return;
VIR_DEBUG("Waiting for SPICE to finish migration");
- while (!jobPriv->spiceMigrated && !priv->job.abortJob) {
+ while (!jobPriv->spiceMigrated && !vm->job->abortJob) {
if (qemuDomainObjWait(vm) < 0)
return;
}
static const char *
qemuMigrationJobName(virDomainObj *vm)
{
- qemuDomainObjPrivate *priv = vm->privateData;
-
- switch (priv->job.asyncJob) {
+ switch (vm->job->asyncJob) {
case VIR_ASYNC_JOB_MIGRATION_OUT:
return _("migration out");
case VIR_ASYNC_JOB_SAVE:
qemuMigrationJobCheckStatus(virDomainObj *vm,
virDomainAsyncJob asyncJob)
{
- qemuDomainObjPrivate *priv = vm->privateData;
- virDomainJobData *jobData = priv->job.current;
+ virDomainJobData *jobData = vm->job->current;
qemuDomainJobDataPrivate *privJob = jobData->privateData;
g_autofree char *error = NULL;
virConnectPtr dconn,
unsigned int flags)
{
- qemuDomainObjPrivate *priv = vm->privateData;
- virDomainJobData *jobData = priv->job.current;
+ virDomainJobData *jobData = vm->job->current;
int pauseReason;
if (qemuMigrationJobCheckStatus(vm, asyncJob) < 0)
unsigned int flags)
{
qemuDomainObjPrivate *priv = vm->privateData;
- virDomainJobData *jobData = priv->job.current;
+ virDomainJobData *jobData = vm->job->current;
int rv;
jobData->status = VIR_DOMAIN_JOB_STATUS_MIGRATING;
qemuDomainJobDataUpdateTime(jobData);
qemuDomainJobDataUpdateDowntime(jobData);
- g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
- priv->job.completed = virDomainJobDataCopy(jobData);
- priv->job.completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
+ g_clear_pointer(&vm->job->completed, virDomainJobDataFree);
+ vm->job->completed = virDomainJobDataCopy(jobData);
+ vm->job->completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
if (asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT &&
jobData->status == VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED)
return 0;
if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_OUT) == 0) {
- qemuDomainJobPrivate *jobPriv = priv->job.privateData;
+ qemuDomainJobPrivate *jobPriv = vm->job->privateData;
rc = qemuMonitorGraphicsRelocate(priv->mon, type, listenAddress,
port, tlsPort, tlsSubject);
qemuMigrationAnyConnectionClosed(virDomainObj *vm,
virConnectPtr conn)
{
- qemuDomainObjPrivate *priv = vm->privateData;
- qemuDomainJobPrivate *jobPriv = priv->job.privateData;
+ qemuDomainJobPrivate *jobPriv = vm->job->privateData;
bool postcopy = false;
int phase;
VIR_DEBUG("vm=%s, conn=%p, asyncJob=%s, phase=%s",
vm->def->name, conn,
- virDomainAsyncJobTypeToString(priv->job.asyncJob),
- qemuDomainAsyncJobPhaseToString(priv->job.asyncJob,
- priv->job.phase));
+ virDomainAsyncJobTypeToString(vm->job->asyncJob),
+ qemuDomainAsyncJobPhaseToString(vm->job->asyncJob,
+ vm->job->phase));
if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_IN) &&
!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_OUT))
VIR_WARN("The connection which controls migration of domain %s was closed",
vm->def->name);
- switch ((qemuMigrationJobPhase) priv->job.phase) {
+ switch ((qemuMigrationJobPhase) vm->job->phase) {
case QEMU_MIGRATION_PHASE_BEGIN3:
VIR_DEBUG("Aborting outgoing migration after Begin phase");
break;
ignore_value(qemuMigrationJobStartPhase(vm, phase));
if (postcopy) {
- if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
+ if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
qemuMigrationSrcPostcopyFailed(vm);
else
qemuMigrationDstPostcopyFailed(vm);
qemuMigrationJobContinue(vm, qemuProcessCleanupMigrationJob);
} else {
- qemuMigrationParamsReset(vm, priv->job.asyncJob,
- jobPriv->migParams, priv->job.apiFlags);
+ qemuMigrationParamsReset(vm, vm->job->asyncJob,
+ jobPriv->migParams, vm->job->apiFlags);
qemuMigrationJobFinish(vm);
}
}
{
GSList *disks = NULL;
- qemuDomainObjPrivate *priv = vm->privateData;
size_t i;
g_autoptr(GHashTable) blockNamedNodeData = NULL;
- if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, priv->job.asyncJob)))
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, vm->job->asyncJob)))
return -1;
for (i = 0; i < vm->def->ndisks; i++) {
g_autoptr(virDomainJobData) jobData = NULL;
qemuDomainJobDataPrivate *priv;
- jobData = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks);
+ jobData = virDomainJobDataInit(&virQEMUDriverDomainJobConfig.jobDataPrivateCb);
priv = jobData->privateData;
if (qemuMigrationAnyFetchStats(vm, asyncJob, jobData, NULL) < 0)
* Otherwise we will start the async job later in the perform phase losing
* change protection.
*/
- if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT &&
+ if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT &&
qemuMigrationJobStartPhase(vm, QEMU_MIGRATION_PHASE_BEGIN3) < 0)
return NULL;
- if (!qemuMigrationSrcIsAllowed(driver, vm, true, priv->job.asyncJob, flags))
+ if (!qemuMigrationSrcIsAllowed(driver, vm, true, vm->job->asyncJob, flags))
return NULL;
if (!(flags & (VIR_MIGRATE_UNSAFE | VIR_MIGRATE_OFFLINE)) &&
unsigned long flags,
qemuMigrationJobPhase expectedPhase)
{
- qemuDomainObjPrivate *priv = vm->privateData;
-
VIR_DEBUG("vm=%p, job=%s, flags=0x%lx, expectedPhase=%s",
vm, virDomainAsyncJobTypeToString(job), flags,
qemuDomainAsyncJobPhaseToString(VIR_ASYNC_JOB_MIGRATION_OUT,
if (!qemuMigrationJobIsActive(vm, job))
return false;
- if (priv->job.asyncOwner != 0 &&
- priv->job.asyncOwner != virThreadSelfID()) {
+ if (vm->job->asyncOwner != 0 &&
+ vm->job->asyncOwner != virThreadSelfID()) {
virReportError(VIR_ERR_OPERATION_INVALID,
_("migration of domain %s is being actively monitored by another thread"),
vm->def->name);
return false;
}
- if (!virDomainObjIsPostcopy(vm, priv->job.current->operation)) {
+ if (!virDomainObjIsPostcopy(vm, vm->job->current->operation)) {
virReportError(VIR_ERR_OPERATION_INVALID,
_("migration of domain %s is not in post-copy phase"),
vm->def->name);
return false;
}
- if (priv->job.phase < QEMU_MIGRATION_PHASE_POSTCOPY_FAILED &&
+ if (vm->job->phase < QEMU_MIGRATION_PHASE_POSTCOPY_FAILED &&
!virDomainObjIsFailedPostcopy(vm)) {
virReportError(VIR_ERR_OPERATION_INVALID,
_("post-copy migration of domain %s has not failed"),
return false;
}
- if (priv->job.phase > expectedPhase) {
+ if (vm->job->phase > expectedPhase) {
virReportError(VIR_ERR_OPERATION_INVALID,
_("resuming failed post-copy migration of domain %s already in progress"),
vm->def->name);
VIR_DEBUG("driver=%p, vm=%s, job=%s, asyncJob=%s",
driver,
vm->def->name,
- virDomainJobTypeToString(priv->job.active),
- virDomainAsyncJobTypeToString(priv->job.asyncJob));
+ virDomainJobTypeToString(vm->job->active),
+ virDomainAsyncJobTypeToString(vm->job->asyncJob));
virPortAllocatorRelease(priv->migrationPort);
priv->migrationPort = 0;
unsigned long flags)
{
qemuDomainObjPrivate *priv = vm->privateData;
- qemuDomainJobPrivate *jobPriv = priv->job.privateData;
+ qemuDomainJobPrivate *jobPriv = vm->job->privateData;
qemuProcessIncomingDef *incoming = NULL;
g_autofree char *tlsAlias = NULL;
virObjectEvent *event = NULL;
error:
virErrorPreserveLast(&origErr);
qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_IN,
- jobPriv->migParams, priv->job.apiFlags);
+ jobPriv->migParams, vm->job->apiFlags);
if (stopProcess) {
unsigned int stopFlags = VIR_QEMU_PROCESS_STOP_MIGRATED;
QEMU_MIGRATION_COOKIE_CPU_HOTPLUG |
QEMU_MIGRATION_COOKIE_CPU |
QEMU_MIGRATION_COOKIE_CAPS |
- QEMU_MIGRATION_COOKIE_BLOCK_DIRTY_BITMAPS)))
+ QEMU_MIGRATION_COOKIE_BLOCK_DIRTY_BITMAPS,
+ NULL)))
goto cleanup;
if (!(vm = virDomainObjListAdd(driver->domains, def,
if (!(mig = qemuMigrationCookieParse(driver, def, origname, NULL,
cookiein, cookieinlen,
- QEMU_MIGRATION_COOKIE_CAPS)))
+ QEMU_MIGRATION_COOKIE_CAPS, vm)))
goto cleanup;
priv->origname = g_strdup(origname);
virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
- virDomainJobData *jobData = priv->job.completed;
+ virDomainJobData *jobData = vm->job->completed;
virObjectEvent *event;
int reason;
if (!jobData) {
- priv->job.completed = virDomainJobDataCopy(priv->job.current);
- jobData = priv->job.completed;
+ vm->job->completed = virDomainJobDataCopy(vm->job->current);
+ jobData = vm->job->completed;
jobData->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
}
{
g_autoptr(qemuMigrationCookie) mig = NULL;
qemuDomainObjPrivate *priv = vm->privateData;
- qemuDomainJobPrivate *jobPriv = priv->job.privateData;
+ qemuDomainJobPrivate *jobPriv = vm->job->privateData;
virDomainJobData *jobData = NULL;
qemuMigrationJobPhase phase;
* job will stay active even though migration API finishes with an
* error.
*/
- phase = priv->job.phase;
+ phase = vm->job->phase;
} else if (retcode == 0) {
phase = QEMU_MIGRATION_PHASE_CONFIRM3;
} else {
if (!(mig = qemuMigrationCookieParse(driver, vm->def, priv->origname, priv,
cookiein, cookieinlen,
- QEMU_MIGRATION_COOKIE_STATS)))
+ QEMU_MIGRATION_COOKIE_STATS, vm)))
return -1;
if (retcode == 0)
- jobData = priv->job.completed;
+ jobData = vm->job->completed;
else
- g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
+ g_clear_pointer(&vm->job->completed, virDomainJobDataFree);
/* Update times with the values sent by the destination daemon */
if (mig->jobData && jobData) {
qemuMigrationSrcRestoreDomainState(driver, vm);
qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
- jobPriv->migParams, priv->job.apiFlags);
+ jobPriv->migParams, vm->job->apiFlags);
qemuDomainSetMaxMemLock(vm, 0, &priv->preMigrationMemlock);
}
{
qemuMigrationJobPhase phase;
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
- qemuDomainObjPrivate *priv = vm->privateData;
int ret = -1;
VIR_DEBUG("vm=%p, flags=0x%x, cancelled=%d", vm, flags, cancelled);
* error.
*/
if (virDomainObjIsFailedPostcopy(vm))
- phase = priv->job.phase;
+ phase = vm->job->phase;
else if (cancelled)
phase = QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED;
else
{
g_autoslist(qemuDomainJobPrivateMigrateTempBitmap) tmpbitmaps = NULL;
qemuDomainObjPrivate *priv = vm->privateData;
- qemuDomainJobPrivate *jobPriv = priv->job.privateData;
+ qemuDomainJobPrivate *jobPriv = vm->job->privateData;
g_autoptr(virJSONValue) actions = virJSONValueNewArray();
g_autoptr(GHashTable) blockNamedNodeData = NULL;
GSList *nextdisk;
static bool
qemuMigrationSrcIsCanceled(virDomainObj *vm)
{
- qemuDomainObjPrivate *priv = vm->privateData;
- virDomainJobData *jobData = priv->job.current;
+ virDomainJobData *jobData = vm->job->current;
qemuMigrationUpdateJobType(jobData);
switch (jobData->status) {
cookieFlags |
QEMU_MIGRATION_COOKIE_GRAPHICS |
QEMU_MIGRATION_COOKIE_CAPS |
- QEMU_MIGRATION_COOKIE_BLOCK_DIRTY_BITMAPS);
+ QEMU_MIGRATION_COOKIE_BLOCK_DIRTY_BITMAPS,
+ NULL);
if (!mig)
goto error;
if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
- if (priv->job.abortJob) {
+ if (vm->job->abortJob) {
/* explicitly do this *after* we entered the monitor,
* as this is a critical section so we are guaranteed
- * priv->job.abortJob will not change */
- priv->job.current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
+ * vm->job->abortJob will not change */
+ vm->job->current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
- virDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(vm->job->asyncJob),
_("canceled by client"));
goto exit_monitor;
}
* resume it now once we finished all block jobs and wait for the real
* end of the migration.
*/
- if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_PAUSED) {
+ if (vm->job->current->status == VIR_DOMAIN_JOB_STATUS_PAUSED) {
if (qemuMigrationSrcContinue(vm,
QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER,
VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
}
- if (priv->job.completed) {
- priv->job.completed->stopped = priv->job.current->stopped;
- qemuDomainJobDataUpdateTime(priv->job.completed);
- qemuDomainJobDataUpdateDowntime(priv->job.completed);
- ignore_value(virTimeMillisNow(&priv->job.completed->sent));
+ if (vm->job->completed) {
+ vm->job->completed->stopped = vm->job->current->stopped;
+ qemuDomainJobDataUpdateTime(vm->job->completed);
+ qemuDomainJobDataUpdateDowntime(vm->job->completed);
+ ignore_value(virTimeMillisNow(&vm->job->completed->sent));
}
cookieFlags |= QEMU_MIGRATION_COOKIE_NETWORK |
}
if (cancel &&
- priv->job.current->status != VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED)
+ vm->job->current->status != VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED)
qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_MIGRATION_OUT, true);
/* cancel any outstanding NBD jobs */
qemuMigrationSrcCancelRemoveTempBitmaps(vm, VIR_ASYNC_JOB_MIGRATION_OUT);
- if (priv->job.current->status != VIR_DOMAIN_JOB_STATUS_CANCELED)
- priv->job.current->status = VIR_DOMAIN_JOB_STATUS_FAILED;
+ if (vm->job->current->status != VIR_DOMAIN_JOB_STATUS_CANCELED)
+ vm->job->current->status = VIR_DOMAIN_JOB_STATUS_FAILED;
}
if (iothread)
mig = qemuMigrationCookieParse(driver, vm->def, priv->origname, priv,
cookiein, cookieinlen,
- QEMU_MIGRATION_COOKIE_CAPS);
+ QEMU_MIGRATION_COOKIE_CAPS, vm);
if (!mig)
return -1;
virErrorPtr orig_err = NULL;
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
qemuDomainObjPrivate *priv = vm->privateData;
- qemuDomainJobPrivate *jobPriv = priv->job.privateData;
+ qemuDomainJobPrivate *jobPriv = vm->job->privateData;
if (flags & VIR_MIGRATE_POSTCOPY_RESUME) {
if (!qemuMigrationAnyCanResume(vm, VIR_ASYNC_JOB_MIGRATION_OUT, flags,
*/
if (!v3proto && ret < 0)
qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
- jobPriv->migParams, priv->job.apiFlags);
+ jobPriv->migParams, vm->job->apiFlags);
qemuMigrationSrcRestoreDomainState(driver, vm);
const char *nbdURI)
{
qemuDomainObjPrivate *priv = vm->privateData;
- qemuDomainJobPrivate *jobPriv = priv->job.privateData;
+ qemuDomainJobPrivate *jobPriv = vm->job->privateData;
int ret = -1;
if (flags & VIR_MIGRATE_POSTCOPY_RESUME) {
if (ret < 0 && !virDomainObjIsFailedPostcopy(vm)) {
qemuMigrationSrcRestoreDomainState(driver, vm);
qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
- jobPriv->migParams, priv->job.apiFlags);
+ jobPriv->migParams, vm->job->apiFlags);
qemuDomainSetMaxMemLock(vm, 0, &priv->preMigrationMemlock);
qemuMigrationJobFinish(vm);
} else {
g_autoptr(qemuMigrationCookie) mig = NULL;
if (!(mig = qemuMigrationCookieParse(driver, vm->def, priv->origname, priv,
- cookiein, cookieinlen, cookie_flags)))
+ cookiein, cookieinlen, cookie_flags, NULL)))
return NULL;
if (qemuMigrationDstPersist(driver, vm, mig, false) < 0)
bool *doKill,
bool *inPostCopy)
{
- qemuDomainObjPrivate *priv = vm->privateData;
g_autoptr(virDomainJobData) jobData = NULL;
if (qemuMigrationDstVPAssociatePortProfiles(vm->def) < 0)
return -1;
}
- if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY)
+ if (vm->job->current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY)
*inPostCopy = true;
if (!(flags & VIR_MIGRATE_PAUSED)) {
}
if (jobData) {
- priv->job.completed = g_steal_pointer(&jobData);
- priv->job.completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
- qemuDomainJobSetStatsType(priv->job.completed,
+ vm->job->completed = g_steal_pointer(&jobData);
+ vm->job->completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
+ qemuDomainJobSetStatsType(vm->job->completed,
QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION);
}
virDomainPtr dom = NULL;
g_autoptr(qemuMigrationCookie) mig = NULL;
qemuDomainObjPrivate *priv = vm->privateData;
- qemuDomainJobPrivate *jobPriv = priv->job.privateData;
+ qemuDomainJobPrivate *jobPriv = vm->job->privateData;
virObjectEvent *event;
bool inPostCopy = false;
- bool doKill = priv->job.phase != QEMU_MIGRATION_PHASE_FINISH_RESUME;
+ bool doKill = vm->job->phase != QEMU_MIGRATION_PHASE_FINISH_RESUME;
int rc;
VIR_DEBUG("vm=%p, flags=0x%lx, retcode=%d",
vm, flags, retcode);
if (!(mig = qemuMigrationCookieParse(driver, vm->def, priv->origname, priv,
- cookiein, cookieinlen, cookie_flags)))
+ cookiein, cookieinlen, cookie_flags, NULL)))
goto error;
if (retcode != 0) {
VIR_WARN("Unable to encode migration cookie");
qemuMigrationDstComplete(driver, vm, inPostCopy,
- VIR_ASYNC_JOB_MIGRATION_IN, &priv->job);
+ VIR_ASYNC_JOB_MIGRATION_IN, vm->job);
return dom;
*finishJob = false;
} else {
qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_IN,
- jobPriv->migParams, priv->job.apiFlags);
+ jobPriv->migParams, vm->job->apiFlags);
}
if (!virDomainObjIsActive(vm))
} else {
qemuDomainCleanupRemove(vm, qemuMigrationDstPrepareCleanup);
}
- g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
+ g_clear_pointer(&vm->job->completed, virDomainJobDataFree);
cookie_flags = QEMU_MIGRATION_COOKIE_NETWORK |
QEMU_MIGRATION_COOKIE_STATS |
virDomainAsyncJob job,
qemuMonitorMigrationStatus status)
{
- qemuDomainObjPrivate *priv = vm->privateData;
qemuMigrationJobPhase phase;
if (!qemuMigrationJobIsActive(vm, job) ||
return;
if (job == VIR_ASYNC_JOB_MIGRATION_IN)
- qemuMigrationDstComplete(driver, vm, true, job, &priv->job);
+ qemuMigrationDstComplete(driver, vm, true, job, vm->job);
else
qemuMigrationSrcComplete(driver, vm, job);
mig->nbd->disks = g_new0(struct qemuMigrationCookieNBDDisk, vm->def->ndisks);
mig->nbd->ndisks = 0;
- if (qemuDomainObjEnterMonitorAsync(vm, priv->job.asyncJob) < 0)
+ if (qemuDomainObjEnterMonitorAsync(vm, vm->job->asyncJob) < 0)
return -1;
rc = qemuMonitorBlockStatsUpdateCapacityBlockdev(priv->mon, stats);
qemuMigrationCookieAddStatistics(qemuMigrationCookie *mig,
virDomainObj *vm)
{
- qemuDomainObjPrivate *priv = vm->privateData;
-
- if (!priv->job.completed)
+ if (!vm->job->completed)
return 0;
g_clear_pointer(&mig->jobData, virDomainJobDataFree);
- mig->jobData = virDomainJobDataCopy(priv->job.completed);
+ mig->jobData = virDomainJobDataCopy(vm->job->completed);
mig->flags |= QEMU_MIGRATION_COOKIE_STATS;
if (!(ctxt->node = virXPathNode("./statistics", ctxt)))
return NULL;
- jobData = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks);
+ jobData = virDomainJobDataInit(&virQEMUDriverDomainJobConfig.jobDataPrivateCb);
priv = jobData->privateData;
stats = &priv->stats.mig;
jobData->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
qemuDomainObjPrivate *priv,
const char *cookiein,
int cookieinlen,
- unsigned int flags)
+ unsigned int flags,
+ virDomainObj *vm)
{
g_autoptr(qemuMigrationCookie) mig = NULL;
}
}
- if (flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobData && priv->job.current)
- mig->jobData->operation = priv->job.current->operation;
+ if (vm && flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobData && vm->job->current)
+ mig->jobData->operation = vm->job->current->operation;
return g_steal_pointer(&mig);
}
qemuDomainObjPrivate *priv,
const char *cookiein,
int cookieinlen,
- unsigned int flags);
+ unsigned int flags,
+ virDomainObj *vm);
void
qemuMigrationCookieFree(qemuMigrationCookie *mig);
qemuMigrationParams *migParams)
{
qemuDomainObjPrivate *priv = vm->privateData;
- qemuDomainJobPrivate *jobPriv = priv->job.privateData;
+ qemuDomainJobPrivate *jobPriv = vm->job->privateData;
g_autoptr(virJSONValue) tlsProps = NULL;
g_autoptr(virJSONValue) secProps = NULL;
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
qemuMigrationParamsDisableTLS(virDomainObj *vm,
qemuMigrationParams *migParams)
{
- qemuDomainObjPrivate *priv = vm->privateData;
- qemuDomainJobPrivate *jobPriv = priv->job.privateData;
+ qemuDomainJobPrivate *jobPriv = vm->job->privateData;
if (!jobPriv->migParams->params[QEMU_MIGRATION_PARAM_TLS_CREDS].set)
return 0;
qemuMigrationParams *migParams,
virBitmap *remoteCaps)
{
- qemuDomainObjPrivate *priv = vm->privateData;
- qemuDomainJobPrivate *jobPriv = priv->job.privateData;
+ qemuDomainJobPrivate *jobPriv = vm->job->privateData;
qemuMigrationCapability cap;
qemuMigrationParty party;
size_t i;
* reveal it in domain state nor sent events */
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING &&
!priv->pausedShutdown) {
- if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) {
- if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY)
+ if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) {
+ if (vm->job->current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY)
reason = VIR_DOMAIN_PAUSED_POSTCOPY;
else
reason = VIR_DOMAIN_PAUSED_MIGRATION;
vm->def->name, virDomainPausedReasonTypeToString(reason),
detail);
- if (priv->job.current)
- ignore_value(virTimeMillisNow(&priv->job.current->stopped));
+ if (vm->job->current)
+ ignore_value(virTimeMillisNow(&vm->job->current->stopped));
if (priv->signalStop)
virDomainObjBroadcast(vm);
qemuProcessHandleSpiceMigrated(qemuMonitor *mon G_GNUC_UNUSED,
virDomainObj *vm)
{
- qemuDomainObjPrivate *priv;
qemuDomainJobPrivate *jobPriv;
virObjectLock(vm);
VIR_DEBUG("Spice migration completed for domain %p %s",
vm, vm->def->name);
- priv = vm->privateData;
- jobPriv = priv->job.privateData;
- if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
+ jobPriv = vm->job->privateData;
+ if (vm->job->asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
VIR_DEBUG("got SPICE_MIGRATE_COMPLETED event without a migration job");
goto cleanup;
}
priv = vm->privateData;
driver = priv->driver;
- if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) {
+ if (vm->job->asyncJob == VIR_ASYNC_JOB_NONE) {
VIR_DEBUG("got MIGRATION event without a migration job");
goto cleanup;
}
- privJob = priv->job.current->privateData;
+ privJob = vm->job->current->privateData;
privJob->stats.mig.status = status;
virDomainObjBroadcast(vm);
switch ((qemuMonitorMigrationStatus) status) {
case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY:
- if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT &&
+ if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT &&
state == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_MIGRATION) {
VIR_DEBUG("Correcting paused state reason for domain %s to %s",
break;
case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY_PAUSED:
- if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT &&
+ if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT &&
state == VIR_DOMAIN_PAUSED) {
/* At this point no thread is watching the migration progress on
* the source as it is just waiting for the Finish phase to end.
* watching it in any thread. Let's make sure the migration is properly
* finished in case we get a "completed" event.
*/
- if (virDomainObjIsPostcopy(vm, priv->job.current->operation) &&
- priv->job.phase == QEMU_MIGRATION_PHASE_POSTCOPY_FAILED &&
- priv->job.asyncOwner == 0) {
+ if (virDomainObjIsPostcopy(vm, vm->job->current->operation) &&
+ vm->job->phase == QEMU_MIGRATION_PHASE_POSTCOPY_FAILED &&
+ vm->job->asyncOwner == 0) {
qemuProcessEventSubmit(vm, QEMU_PROCESS_EVENT_UNATTENDED_MIGRATION,
- priv->job.asyncJob, status, NULL);
+ vm->job->asyncJob, status, NULL);
}
break;
vm, vm->def->name, pass);
priv = vm->privateData;
- if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) {
+ if (vm->job->asyncJob == VIR_ASYNC_JOB_NONE) {
VIR_DEBUG("got MIGRATION_PASS event without a migration job");
goto cleanup;
}
qemuMonitorDumpStats *stats,
const char *error)
{
- qemuDomainObjPrivate *priv;
qemuDomainJobPrivate *jobPriv;
qemuDomainJobDataPrivate *privJobCurrent = NULL;
VIR_DEBUG("Dump completed for domain %p %s with stats=%p error='%s'",
vm, vm->def->name, stats, NULLSTR(error));
- priv = vm->privateData;
- jobPriv = priv->job.privateData;
- privJobCurrent = priv->job.current->privateData;
- if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) {
+ jobPriv = vm->job->privateData;
+ privJobCurrent = vm->job->current->privateData;
+ if (vm->job->asyncJob == VIR_ASYNC_JOB_NONE) {
VIR_DEBUG("got DUMP_COMPLETED event without a dump_completed job");
goto cleanup;
}
jobPriv->dumpCompleted = true;
privJobCurrent->stats.dump = *stats;
- priv->job.error = g_strdup(error);
+ vm->job->error = g_strdup(error);
/* Force error if extracting the DUMP_COMPLETED status failed */
if (!error && status < 0) {
- priv->job.error = g_strdup(virGetLastErrorMessage());
+ vm->job->error = g_strdup(virGetLastErrorMessage());
privJobCurrent->stats.dump.status = QEMU_MONITOR_DUMP_STATUS_FAILED;
}
/* de-activate netdevs after stopping CPUs */
ignore_value(qemuInterfaceStopDevices(vm->def));
- if (priv->job.current)
- ignore_value(virTimeMillisNow(&priv->job.current->stopped));
+ if (vm->job->current)
+ ignore_value(virTimeMillisNow(&vm->job->current->stopped));
/* The STOP event handler will change the domain state with the reason
* saved in priv->pausedReason and it will also emit corresponding domain
VIR_DEBUG("driver=%p, vm=%s, asyncJob=%s, state=%s, reason=%s",
driver, vm->def->name,
- virDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(vm->job->asyncJob),
virDomainStateTypeToString(state),
virDomainStateReasonToString(state, reason));
- if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_IN &&
- priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT)
+ if (vm->job->asyncJob != VIR_ASYNC_JOB_MIGRATION_IN &&
+ vm->job->asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT)
return;
virPortAllocatorRelease(priv->migrationPort);
qemuProcessRestoreMigrationJob(virDomainObj *vm,
virDomainJobObj *job)
{
- qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobPrivate *jobPriv = job->privateData;
virDomainJobOperation op;
unsigned long long allowedJobs;
VIR_DOMAIN_JOB_STATUS_PAUSED,
allowedJobs);
- job->privateData = g_steal_pointer(&priv->job.privateData);
- priv->job.privateData = jobPriv;
- priv->job.apiFlags = job->apiFlags;
+ job->privateData = g_steal_pointer(&vm->job->privateData);
+ vm->job->privateData = jobPriv;
+ vm->job->apiFlags = job->apiFlags;
qemuDomainCleanupAdd(vm, qemuProcessCleanupMigrationJob);
}
if (asyncJob != VIR_ASYNC_JOB_NONE) {
if (qemuDomainObjBeginNestedJob(vm, asyncJob) < 0)
goto cleanup;
- } else if (priv->job.asyncJob != VIR_ASYNC_JOB_NONE &&
- priv->job.asyncOwner == virThreadSelfID() &&
- priv->job.active != VIR_JOB_ASYNC_NESTED) {
+ } else if (vm->job->asyncJob != VIR_ASYNC_JOB_NONE &&
+ vm->job->asyncOwner == virThreadSelfID() &&
+ vm->job->active != VIR_JOB_ASYNC_NESTED) {
VIR_WARN("qemuProcessStop called without a nested job (async=%s)",
virDomainAsyncJobTypeToString(asyncJob));
}
VIR_DEBUG("vm=%s, conn=%p", dom->def->name, conn);
- if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
+ if (dom->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
- if (priv->job.asyncJob) {
+ if (dom->job->asyncJob) {
VIR_DEBUG("vm=%s has long-term job active, cancelling",
dom->def->name);
qemuDomainObjDiscardAsyncJob(dom);
cfg = virQEMUDriverGetConfig(driver);
priv = obj->privateData;
- virDomainObjPreserveJob(&priv->job, &oldjob);
+ virDomainObjPreserveJob(obj->job, &oldjob);
if (oldjob.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
if (oldjob.asyncJob == VIR_ASYNC_JOB_BACKUP && priv->backup)
if (!qemuMigrationSrcIsAllowed(driver, vm, false, VIR_ASYNC_JOB_SNAPSHOT, 0))
goto cleanup;
- qemuDomainJobSetStatsType(priv->job.current,
+ qemuDomainJobSetStatsType(vm->job->current,
QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP);
/* allow the migration job to be cancelled or the domain to be paused */
priv,
data->xmlstr,
data->xmlstrlen,
- data->cookieParseFlags))) {
+ data->cookieParseFlags,
+ data->vm))) {
VIR_TEST_DEBUG("\nfailed to parse qemu migration cookie:\n%s\n", data->xmlstr);
return -1;
}