]> xenbits.xensource.com Git - libvirt.git/commitdiff
qemu: Report more migration statistics
authorJiri Denemark <jdenemar@redhat.com>
Fri, 27 Nov 2015 11:30:09 +0000 (12:30 +0100)
committerJiri Denemark <jdenemar@redhat.com>
Fri, 8 Jan 2016 17:18:58 +0000 (18:18 +0100)
memory_dirty_rate corresponds to dirty-pages-rate in QEMU and
memory_iteration is what QEMU reports in dirty-sync-count.

Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
include/libvirt/libvirt-domain.h
src/qemu/qemu_domain.c
src/qemu/qemu_migration.c
src/qemu/qemu_monitor.h
src/qemu/qemu_monitor_json.c
tools/virsh-domain.c

index a1ea6a5d0786c533feddb2bf3e8faeefef54eb84..d26faa5db689cd98a8bda38050eb8cc14145d0e6 100644 (file)
@@ -2724,6 +2724,25 @@ int virDomainAbortJob(virDomainPtr dom);
  */
 # define VIR_DOMAIN_JOB_MEMORY_BPS               "memory_bps"
 
+/** VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE:
+ *
+ * virDomainGetJobStats field: number of memory pages dirtied by the guest
+ * per second, as VIR_TYPED_PARAM_ULLONG. This statistics makes sense only
+ * when live migration is running.
+ */
+# define VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE        "memory_dirty_rate"
+
+/**
+ * VIR_DOMAIN_JOB_MEMORY_ITERATION:
+ *
+ * virDomainGetJobStats field: current iteration over domain's memory
+ * during live migration, as VIR_TYPED_PARAM_ULLONG. This is set to zero
+ * when memory starts to be transferred and the value is increased by one
+ * every time a new iteration is started to transfer memory pages dirtied
+ * since the last iteration.
+ */
+# define VIR_DOMAIN_JOB_MEMORY_ITERATION         "memory_iteration"
+
 /**
  * VIR_DOMAIN_JOB_DISK_TOTAL:
  *
index e76e76b9cb22b73db4c881af5047a2ab166e2a98..080f508eb9f3438d27544d54fbb92ec2904ac744 100644 (file)
@@ -383,6 +383,14 @@ qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
             goto error;
     }
 
+    if (virTypedParamsAddULLong(&par, &npar, &maxpar,
+                                VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE,
+                                stats->ram_dirty_rate) < 0 ||
+        virTypedParamsAddULLong(&par, &npar, &maxpar,
+                                VIR_DOMAIN_JOB_MEMORY_ITERATION,
+                                stats->ram_iteration) < 0)
+        goto error;
+
     if (virTypedParamsAddULLong(&par, &npar, &maxpar,
                                 VIR_DOMAIN_JOB_DISK_TOTAL,
                                 stats->disk_total) < 0 ||
index cd4ed94e6d58d34a3770699bf1ff6e0148a04478..290e6205aeb7fcb58b615e657bfc36a29e22d227 100644 (file)
@@ -750,6 +750,13 @@ qemuMigrationCookieStatisticsXMLFormat(virBufferPtr buf,
                           stats->ram_normal_bytes);
     }
 
+    virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
+                      VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE,
+                      stats->ram_dirty_rate);
+    virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
+                      VIR_DOMAIN_JOB_MEMORY_ITERATION,
+                      stats->ram_iteration);
+
     virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
                       VIR_DOMAIN_JOB_DISK_TOTAL,
                       stats->disk_total);
@@ -1100,6 +1107,11 @@ qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt)
     virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_NORMAL_BYTES "[1])",
                       ctxt, &stats->ram_normal_bytes);
 
+    virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE "[1])",
+                      ctxt, &stats->ram_dirty_rate);
+    virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_ITERATION "[1])",
+                      ctxt, &stats->ram_iteration);
+
     virXPathULongLong("string(./" VIR_DOMAIN_JOB_DISK_TOTAL "[1])",
                       ctxt, &stats->disk_total);
     virXPathULongLong("string(./" VIR_DOMAIN_JOB_DISK_PROCESSED "[1])",
index 4f1c8d37e6053f1dc8674e55d241d8aa58b0ac0f..4193ad2fa30c9365ba9817686ddb6500470cccf4 100644 (file)
@@ -493,6 +493,8 @@ struct _qemuMonitorMigrationStats {
     unsigned long long ram_duplicate;
     unsigned long long ram_normal;
     unsigned long long ram_normal_bytes;
+    unsigned long long ram_dirty_rate;
+    unsigned long long ram_iteration;
 
     unsigned long long disk_transferred;
     unsigned long long disk_remaining;
index 50d05b4424bdb0fcc5f48f5768b69cfafed900a4..077be3abfb13046ea278d63fd17cc84095c6b896 100644 (file)
@@ -2520,6 +2520,10 @@ qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply,
                                                       &stats->ram_normal));
         ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
                                                       &stats->ram_normal_bytes));
+        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
+                                                      &stats->ram_dirty_rate));
+        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
+                                                      &stats->ram_iteration));
 
         disk = virJSONValueObjectGetObject(ret, "disk");
         if (disk) {
index 3594c6f2f95ff971a60c7e19b627391929f5f3df..7c65bf48865649cd196803122db763d036384172 100644 (file)
@@ -6045,6 +6045,22 @@ cmdDomjobinfo(vshControl *ctl, const vshCmd *cmd)
             vshPrint(ctl, "%-17s %-.3lf %s/s\n",
                      _("Memory bandwidth:"), val, unit);
         }
+
+        if ((rc = virTypedParamsGetULLong(params, nparams,
+                                          VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE,
+                                          &value)) < 0) {
+            goto save_error;
+        } else if (rc) {
+            vshPrint(ctl, "%-17s %-12llu pages/s\n", _("Dirty rate:"), value);
+        }
+
+        if ((rc = virTypedParamsGetULLong(params, nparams,
+                                          VIR_DOMAIN_JOB_MEMORY_ITERATION,
+                                          &value)) < 0) {
+            goto save_error;
+        } else if (rc) {
+            vshPrint(ctl, "%-17s %-12llu\n", _("Iteration:"), value);
+        }
     }
 
     if (info.fileTotal || info.fileRemaining || info.fileProcessed) {