int
qemuBlockJobUpdateDisk(virDomainObjPtr vm,
int asyncJob,
- virDomainDiskDefPtr disk,
- char **error)
+ virDomainDiskDefPtr disk)
{
qemuBlockJobDataPtr job = QEMU_DOMAIN_DISK_PRIVATE(disk)->blockjob;
qemuDomainObjPrivatePtr priv = vm->privateData;
int state = job->newstate;
- if (error)
- *error = NULL;
-
if (state != -1) {
qemuBlockJobEventProcessLegacy(priv->driver, vm, job, asyncJob);
job->newstate = -1;
- if (error)
- VIR_STEAL_PTR(*error, job->errmsg);
- else
- VIR_FREE(job->errmsg);
}
return state;
virDomainDiskDefPtr disk)
{
VIR_DEBUG("disk=%s", disk->dst);
- qemuBlockJobUpdateDisk(vm, asyncJob, disk, NULL);
+ qemuBlockJobUpdateDisk(vm, asyncJob, disk);
QEMU_DOMAIN_DISK_PRIVATE(disk)->blockjob->synchronous = false;
}
job->newstate = status;
- qemuBlockJobUpdateDisk(vm, QEMU_ASYNC_JOB_NONE, disk, NULL);
+ qemuBlockJobUpdateDisk(vm, QEMU_ASYNC_JOB_NONE, disk);
endjob:
qemuBlockJobStartupFinalize(job);
* do the waiting while still holding the VM job, to prevent newly
* scheduled block jobs from confusing us. */
if (!async) {
- qemuBlockJobUpdateDisk(vm, QEMU_ASYNC_JOB_NONE, disk, NULL);
+ qemuBlockJobUpdateDisk(vm, QEMU_ASYNC_JOB_NONE, disk);
while (qemuBlockJobIsRunning(job)) {
if (virDomainObjWait(vm) < 0) {
ret = -1;
goto endjob;
}
- qemuBlockJobUpdateDisk(vm, QEMU_ASYNC_JOB_NONE, disk, NULL);
+ qemuBlockJobUpdateDisk(vm, QEMU_ASYNC_JOB_NONE, disk);
}
}
static void
-qemuMigrationNBDReportMirrorError(virDomainDiskDefPtr disk,
- const char *errmsg)
+qemuMigrationNBDReportMirrorError(virDomainDiskDefPtr disk)
{
- if (errmsg) {
+ qemuBlockJobDataPtr job = QEMU_DOMAIN_DISK_PRIVATE(disk)->blockjob;
+
+ if (job->errmsg) {
virReportError(VIR_ERR_OPERATION_FAILED,
_("migration of disk %s failed: %s"),
- disk->dst, errmsg);
+ disk->dst, job->errmsg);
} else {
virReportError(VIR_ERR_OPERATION_FAILED,
_("migration of disk %s failed"), disk->dst);
for (i = 0; i < vm->def->ndisks; i++) {
virDomainDiskDefPtr disk = vm->def->disks[i];
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
- char *error = NULL;
if (!diskPriv->migrating)
continue;
- status = qemuBlockJobUpdateDisk(vm, asyncJob, disk, &error);
+ status = qemuBlockJobUpdateDisk(vm, asyncJob, disk);
if (status == VIR_DOMAIN_BLOCK_JOB_FAILED) {
- qemuMigrationNBDReportMirrorError(disk, error);
+ qemuMigrationNBDReportMirrorError(disk);
return -1;
}
- VIR_FREE(error);
if (disk->mirrorState != VIR_DOMAIN_DISK_MIRROR_STATE_READY)
notReady++;
for (i = 0; i < vm->def->ndisks; i++) {
virDomainDiskDefPtr disk = vm->def->disks[i];
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
- char *error = NULL;
if (!diskPriv->migrating)
continue;
- status = qemuBlockJobUpdateDisk(vm, asyncJob, disk, &error);
+ status = qemuBlockJobUpdateDisk(vm, asyncJob, disk);
switch (status) {
case VIR_DOMAIN_BLOCK_JOB_FAILED:
if (check) {
- qemuMigrationNBDReportMirrorError(disk, error);
+ qemuMigrationNBDReportMirrorError(disk);
failed = true;
}
ATTRIBUTE_FALLTHROUGH;
if (status == VIR_DOMAIN_BLOCK_JOB_COMPLETED)
completed++;
-
- VIR_FREE(error);
}
/* Updating completed block job drops the lock thus we have to recheck
{
qemuDomainObjPrivatePtr priv = vm->privateData;
char *diskAlias = NULL;
- char *error = NULL;
int ret = -1;
int status;
int rv;
- status = qemuBlockJobUpdateDisk(vm, asyncJob, disk, &error);
+ status = qemuBlockJobUpdateDisk(vm, asyncJob, disk);
switch (status) {
case VIR_DOMAIN_BLOCK_JOB_FAILED:
case VIR_DOMAIN_BLOCK_JOB_CANCELED:
if (failNoJob) {
- qemuMigrationNBDReportMirrorError(disk, error);
+ qemuMigrationNBDReportMirrorError(disk);
goto cleanup;
}
ATTRIBUTE_FALLTHROUGH;
cleanup:
VIR_FREE(diskAlias);
- VIR_FREE(error);
return ret;
}