]> xenbits.xensource.com Git - people/liuw/libxenctrl-split/libvirt.git/commitdiff
qemu: blockjob: Synchronously update backing chain in XML on ABORT/PIVOT
authorPeter Krempa <pkrempa@redhat.com>
Mon, 30 Mar 2015 09:26:20 +0000 (11:26 +0200)
committerDaniel Veillard <veillard@redhat.com>
Tue, 31 Mar 2015 00:36:17 +0000 (08:36 +0800)
When the synchronous pivot option is selected, libvirt would not update
the backing chain until the job was exitted. Some applications then
received invalid data as their job serialized first.

This patch removes polling to wait for the ABORT/PIVOT job completion
and replaces it with a condition. If a synchronous operation is
requested the update of the XML is executed in the job of the caller of
the synchronous request. Otherwise the monitor event callback uses a
separate worker to update the backing chain with a new job.

This is a regression since 1a92c719101e5bfa6fe2b78006ad04c7f075ea28

When the ABORT job is finished synchronously you get the following call
stack:
 #0  qemuBlockJobEventProcess
 #1  qemuDomainBlockJobImpl
 #2  qemuDomainBlockJobAbort
 #3  virDomainBlockJobAbort

While previously or while using the _ASYNC flag you'd get:
 #0  qemuBlockJobEventProcess
 #1  processBlockJobEvent
 #2  qemuProcessEventHandler
 #3  virThreadPoolWorker

src/conf/domain_conf.c
src/conf/domain_conf.h
src/qemu/qemu_driver.c
src/qemu/qemu_process.c

index 9324de065cb70c72e13882cd1914da447edbc70a..cd6ee22e57a4282bde9fdb326ded3269941a6b08 100644 (file)
@@ -1289,9 +1289,22 @@ virDomainDiskDefNew(void)
 
     if (VIR_ALLOC(ret) < 0)
         return NULL;
+
     if (VIR_ALLOC(ret->src) < 0)
-        VIR_FREE(ret);
+        goto error;
+
+    if (virCondInit(&ret->blockJobSyncCond) < 0) {
+        virReportSystemError(errno, "%s", _("Failed to initialize condition"));
+        goto error;
+    }
+
     return ret;
+
+ error:
+    virStorageSourceFree(ret->src);
+    VIR_FREE(ret);
+
+    return NULL;
 }
 
 
@@ -1310,6 +1323,7 @@ virDomainDiskDefFree(virDomainDiskDefPtr def)
     VIR_FREE(def->product);
     VIR_FREE(def->domain_name);
     virDomainDeviceInfoClear(&def->info);
+    virCondDestroy(&def->blockJobSyncCond);
 
     VIR_FREE(def);
 }
index 608f61f0e3c2d43ba75e777d07b5050046e8601a..84e880a6d7c2c4ceb700ab59893e0e3bfcb18279 100644 (file)
@@ -685,6 +685,12 @@ struct _virDomainDiskDef {
     int mirrorState; /* enum virDomainDiskMirrorState */
     int mirrorJob; /* virDomainBlockJobType */
 
+    /* for some synchronous block jobs, we need to notify the owner */
+    virCond blockJobSyncCond;
+    int blockJobType;   /* type of the block job from the event */
+    int blockJobStatus; /* status of the finished block job */
+    bool blockJobSync; /* the block job needs synchronized termination */
+
     struct {
         unsigned int cylinders;
         unsigned int heads;
index 5a90dcde29554f443c1db8f1fbd383675601111c..becf415b8b36666cc84b421ec1a1b5bd81e5a3d6 100644 (file)
@@ -16276,6 +16276,12 @@ qemuDomainBlockJobImpl(virDomainObjPtr vm,
         goto endjob;
 
     if (mode == BLOCK_JOB_ABORT) {
+        if (async && !(flags & VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC)) {
+            /* prepare state for event delivery */
+            disk->blockJobStatus = -1;
+            disk->blockJobSync = true;
+        }
+
         if ((flags & VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT) &&
             !(async && disk->mirror)) {
             virReportError(VIR_ERR_OPERATION_INVALID,
@@ -16389,37 +16395,24 @@ qemuDomainBlockJobImpl(virDomainObjPtr vm,
                                                      status);
             event2 = virDomainEventBlockJob2NewFromObj(vm, disk->dst, type,
                                                        status);
-        } else if (!(flags & VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC)) {
+        } else if (disk->blockJobSync) {
             /* XXX If the event reports failure, we should reflect
              * that back into the return status of this API call.  */
-            while (1) {
-                /* Poll every 50ms */
-                static struct timespec ts = {
-                    .tv_sec = 0,
-                    .tv_nsec = 50 * 1000 * 1000ull };
-                virDomainBlockJobInfo dummy;
-
-                qemuDomainObjEnterMonitor(driver, vm);
-                ret = qemuMonitorBlockJobInfo(priv->mon, device, &dummy, NULL);
-                if (qemuDomainObjExitMonitor(driver, vm) < 0)
-                    ret = -1;
-
-                if (ret <= 0)
-                    break;
-
-                virObjectUnlock(vm);
 
-                nanosleep(&ts, NULL);
-
-                virObjectLock(vm);
-
-                if (!virDomainObjIsActive(vm)) {
-                    virReportError(VIR_ERR_OPERATION_INVALID, "%s",
-                                   _("domain is not running"));
-                    ret = -1;
-                    break;
+            while (disk->blockJobStatus == -1 && disk->blockJobSync) {
+                if (virCondWait(&disk->blockJobSyncCond, &vm->parent.lock) < 0) {
+                    virReportSystemError(errno, "%s",
+                                         _("Unable to wait on block job sync "
+                                           "condition"));
+                    disk->blockJobSync = false;
+                    goto endjob;
                 }
             }
+
+            qemuBlockJobEventProcess(driver, vm, disk,
+                                     disk->blockJobType,
+                                     disk->blockJobStatus);
+            disk->blockJobSync = false;
         }
     }
 
index 79f763e3241821b74e421c361e433ded34602c53..2d86eb841113b38428baca5f132cd517cff9b8a5 100644 (file)
@@ -1020,28 +1020,40 @@ qemuProcessHandleBlockJob(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
 {
     virQEMUDriverPtr driver = opaque;
     struct qemuProcessEvent *processEvent = NULL;
-    char *data;
+    virDomainDiskDefPtr disk;
+    char *data = NULL;
 
     virObjectLock(vm);
 
     VIR_DEBUG("Block job for device %s (domain: %p,%s) type %d status %d",
               diskAlias, vm, vm->def->name, type, status);
 
-    if (VIR_ALLOC(processEvent) < 0)
+    if (!(disk = qemuProcessFindDomainDiskByAlias(vm, diskAlias)))
         goto error;
 
-    processEvent->eventType = QEMU_PROCESS_EVENT_BLOCK_JOB;
-    if (VIR_STRDUP(data, diskAlias) < 0)
-        goto error;
-    processEvent->data = data;
-    processEvent->vm = vm;
-    processEvent->action = type;
-    processEvent->status = status;
+    if (disk->blockJobSync) {
+        disk->blockJobType = type;
+        disk->blockJobStatus = status;
+        /* We have an SYNC API waiting for this event, dispatch it back */
+        virCondSignal(&disk->blockJobSyncCond);
+    } else {
+        /* there is no waiting SYNC API, dispatch the update to a thread */
+        if (VIR_ALLOC(processEvent) < 0)
+            goto error;
 
-    virObjectRef(vm);
-    if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
-        ignore_value(virObjectUnref(vm));
-        goto error;
+        processEvent->eventType = QEMU_PROCESS_EVENT_BLOCK_JOB;
+        if (VIR_STRDUP(data, diskAlias) < 0)
+            goto error;
+        processEvent->data = data;
+        processEvent->vm = vm;
+        processEvent->action = type;
+        processEvent->status = status;
+
+        virObjectRef(vm);
+        if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
+            ignore_value(virObjectUnref(vm));
+            goto error;
+        }
     }
 
  cleanup: