This doesn't abort migration job in any phase, yet.
"destroy",
"suspend",
"modify",
+ "abort",
"migration operation",
"none", /* async job is never stored in job.active */
"async nested",
return -1;
}
- if (virCondInit(&priv->job.signalCond) < 0) {
- ignore_value(virCondDestroy(&priv->job.cond));
- ignore_value(virCondDestroy(&priv->job.asyncCond));
- return -1;
- }
-
return 0;
}
job->mask = DEFAULT_JOB_MASK;
job->start = 0;
memset(&job->info, 0, sizeof(job->info));
- job->signals = 0;
}
void
{
ignore_value(virCondDestroy(&priv->job.cond));
ignore_value(virCondDestroy(&priv->job.asyncCond));
- ignore_value(virCondDestroy(&priv->job.signalCond));
}
# define JOB_MASK(job) (1 << (job - 1))
# define DEFAULT_JOB_MASK \
- (JOB_MASK(QEMU_JOB_QUERY) | JOB_MASK(QEMU_JOB_DESTROY))
+ (JOB_MASK(QEMU_JOB_QUERY) | \
+ JOB_MASK(QEMU_JOB_DESTROY) | \
+ JOB_MASK(QEMU_JOB_ABORT))
/* Only 1 job is allowed at any time
* A job includes *all* monitor commands, even those just querying
QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
QEMU_JOB_MODIFY, /* May change state */
+ QEMU_JOB_ABORT, /* Abort current async job */
QEMU_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
/* The following two items must always be the last items before JOB_LAST */
QEMU_ASYNC_JOB_LAST
};
-enum qemuDomainJobSignals {
- QEMU_JOB_SIGNAL_CANCEL = 1 << 0, /* Request job cancellation */
-};
-
struct qemuDomainJobObj {
virCond cond; /* Use to coordinate jobs */
enum qemuDomainJob active; /* Currently running job */
unsigned long long mask; /* Jobs allowed during async job */
unsigned long long start; /* When the async job started */
virDomainJobInfo info; /* Async job progress data */
-
- virCond signalCond; /* Use to coordinate the safe queries during migration */
- unsigned int signals; /* Signals for running job */
};
typedef struct _qemuDomainPCIAddressSet qemuDomainPCIAddressSet;
goto cleanup;
}
- priv = vm->privateData;
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_ABORT) < 0)
+ goto cleanup;
if (virDomainObjIsActive(vm)) {
- if (priv->job.asyncJob) {
- VIR_DEBUG("Requesting cancellation of job on vm %s", vm->def->name);
- priv->job.signals |= QEMU_JOB_SIGNAL_CANCEL;
- } else {
- qemuReportError(VIR_ERR_OPERATION_INVALID,
- "%s", _("no job is active on the domain"));
- goto cleanup;
- }
- } else {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running"));
- goto cleanup;
+ goto endjob;
}
- ret = 0;
+ priv = vm->privateData;
+
+ if (!priv->job.asyncJob) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("no job is active on the domain"));
+ goto endjob;
+ } else if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
+ _("cannot abort incoming migration;"
+ " use virDomainDestroy instead"));
+ goto endjob;
+ }
+
+ VIR_DEBUG("Cancelling job at client request");
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ ret = qemuMonitorMigrateCancel(priv->mon);
+ qemuDomainObjExitMonitor(driver, vm);
+
+endjob:
+ if (qemuDomainObjEndJob(driver, vm) == 0)
+ vm = NULL;
cleanup:
if (vm)
}
-static int
-qemuMigrationProcessJobSignals(struct qemud_driver *driver,
- virDomainObjPtr vm,
- const char *job,
- bool cleanup)
-{
- qemuDomainObjPrivatePtr priv = vm->privateData;
- int ret = -1;
-
- if (!virDomainObjIsActive(vm)) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"),
- job, _("guest unexpectedly quit"));
- if (cleanup)
- priv->job.signals = 0;
- return -1;
- }
-
- if (priv->job.signals & QEMU_JOB_SIGNAL_CANCEL) {
- priv->job.signals ^= QEMU_JOB_SIGNAL_CANCEL;
- VIR_DEBUG("Cancelling job at client request");
- ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
- if (ret == 0) {
- ret = qemuMonitorMigrateCancel(priv->mon);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
- }
- if (ret < 0) {
- VIR_WARN("Unable to cancel job");
- }
- } else {
- ret = 0;
- }
-
- return ret;
-}
-
-
static int
qemuMigrationUpdateJobStatus(struct qemud_driver *driver,
virDomainObjPtr vm,
while (priv->job.info.type == VIR_DOMAIN_JOB_UNBOUNDED) {
/* Poll every 50ms for progress & to allow cancellation */
struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
- while (priv->job.signals) {
- if (qemuMigrationProcessJobSignals(driver, vm, job, false) < 0)
- goto cleanup;
- }
-
- virCondSignal(&priv->job.signalCond);
if (qemuMigrationUpdateJobStatus(driver, vm, job) < 0)
goto cleanup;
-
virDomainObjUnlock(vm);
qemuDriverUnlock(driver);
}
cleanup:
- while (priv->job.signals) {
- qemuMigrationProcessJobSignals(driver, vm, job, true);
- }
- virCondBroadcast(&priv->job.signalCond);
-
if (priv->job.info.type == VIR_DOMAIN_JOB_COMPLETED)
return 0;
else
virDomainState state,
int reason)
{
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+
if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
switch (phase) {
case QEMU_MIGRATION_PHASE_NONE:
* domain */
VIR_DEBUG("Canceling unfinished outgoing migration of domain %s",
vm->def->name);
- /* TODO cancel possibly running migrate operation */
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ ignore_value(qemuMonitorMigrateCancel(priv->mon));
+ qemuDomainObjExitMonitor(driver, vm);
/* resume the domain but only if it was paused as a result of
* migration */
if (state == VIR_DOMAIN_PAUSED &&
virConnectPtr conn,
const struct qemuDomainJobObj *job)
{
+ qemuDomainObjPrivatePtr priv = vm->privateData;
virDomainState state;
int reason;
case QEMU_ASYNC_JOB_SAVE:
case QEMU_ASYNC_JOB_DUMP:
- /* TODO cancel possibly running migrate operation */
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ ignore_value(qemuMonitorMigrateCancel(priv->mon));
+ qemuDomainObjExitMonitor(driver, vm);
/* resume the domain but only if it was paused as a result of
* running save/dump operation */
if (state == VIR_DOMAIN_PAUSED &&
break;
case QEMU_JOB_MIGRATION_OP:
+ case QEMU_JOB_ABORT:
case QEMU_JOB_ASYNC:
case QEMU_JOB_ASYNC_NESTED:
/* async job was already handled above */