]> xenbits.xensource.com Git - people/liuw/libxenctrl-split/libvirt.git/commitdiff
parallels: add block device statistics to driver
authorNikolay Shirokovskiy <nshirokovskiy@parallels.com>
Tue, 9 Jun 2015 07:35:53 +0000 (10:35 +0300)
committerDmitry Guryanov <dguryanov@parallels.com>
Tue, 9 Jun 2015 13:43:10 +0000 (16:43 +0300)
Statistics provided through PCS SDK. As we have only async interface in SDK we
need to be subscribed to statistics in order to get it. Trivial solution on
every stat request to subscribe, wait event and then unsubscribe will lead to
significant delays in case of a number of successive requests, as the event
will be delivered on next PCS server notify cycle. On the other hand we don't
want to keep unnesessary subscribtion. So we take an hibrid solution to
subcsribe on first request and then keep a subscription while requests are
active. We populate cache of statistics on subscribtion events and use this
cache to serve libvirts requests.

 * Cache details.
Cache is just handle to last arrived event, we call this cache
as if this handle is valid it is used to serve synchronous
statistics requests. We use number of successive events count
to detect that user lost interest to statistics. We reset this
count to 0 on every request. If more than PARALLELS_STATISTICS_DROP_COUNT
successive events arrive we unsubscribe. Special value of -1
of this counter is used to differentiate between subscribed/unsubscribed state
to protect from delayed events.

Values of PARALLELS_STATISTICS_DROP_COUNT and PARALLELS_STATISTICS_TIMEOUT are
just drop-ins, choosen without special consideration.

 * Thread safety issues
Use parallelsDomObjFromDomainRef in parallelsDomainBlockStats as
we could wait on domain lock down on stack in prlsdkGetStatsParam
and if we won't keep reference we could get dangling pointer
on return from wait.

Signed-off-by: Nikolay Shirokovskiy <nshirokovskiy@parallels.com>
src/parallels/parallels_driver.c
src/parallels/parallels_sdk.c
src/parallels/parallels_sdk.h
src/parallels/parallels_utils.c
src/parallels/parallels_utils.h

index e3ac90d3a1a34b0072dc703215c2d247ce21d614..fdba0dbfdc9fff072f245dc0e59e7eeb64d4e56e 100644 (file)
@@ -51,6 +51,7 @@
 #include "nodeinfo.h"
 #include "virstring.h"
 #include "cpu/cpu.h"
+#include "virtypedparam.h"
 
 #include "parallels_driver.h"
 #include "parallels_utils.h"
@@ -1218,6 +1219,109 @@ parallelsDomainGetMaxMemory(virDomainPtr domain)
     return ret;
 }
 
+static int
+parallelsDomainBlockStats(virDomainPtr domain, const char *path,
+                     virDomainBlockStatsPtr stats)
+{
+    virDomainObjPtr dom = NULL;
+    int ret = -1;
+    size_t i;
+    int idx;
+
+    if (!(dom = parallelsDomObjFromDomainRef(domain)))
+        return -1;
+
+    if (*path) {
+        if ((idx = virDomainDiskIndexByName(dom->def, path, false)) < 0) {
+            virReportError(VIR_ERR_INVALID_ARG, _("invalid path: %s"), path);
+            goto cleanup;
+        }
+        if (prlsdkGetBlockStats(dom, dom->def->disks[idx], stats) < 0)
+            goto cleanup;
+    } else {
+        virDomainBlockStatsStruct s;
+
+#define PARALLELS_ZERO_STATS(VAR, TYPE, NAME)           \
+        stats->VAR = 0;
+
+        PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_ZERO_STATS)
+
+#undef PARALLELS_ZERO_STATS
+
+        for (i = 0; i < dom->def->ndisks; i++) {
+            if (prlsdkGetBlockStats(dom, dom->def->disks[i], &s) < 0)
+                goto cleanup;
+
+#define     PARALLELS_SUM_STATS(VAR, TYPE, NAME)        \
+            if (s.VAR != -1)                            \
+                stats->VAR += s.VAR;
+
+            PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_SUM_STATS)
+
+#undef      PARALLELS_SUM_STATS
+        }
+    }
+    stats->errs = -1;
+    ret = 0;
+
+ cleanup:
+    if (dom)
+        virDomainObjEndAPI(&dom);
+
+    return ret;
+}
+
+static int
+parallelsDomainBlockStatsFlags(virDomainPtr domain,
+                          const char *path,
+                          virTypedParameterPtr params,
+                          int *nparams,
+                          unsigned int flags)
+{
+    virDomainBlockStatsStruct stats;
+    int ret = -1;
+    size_t i;
+
+    virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY, -1);
+    /* We don't return strings, and thus trivially support this flag.  */
+    flags &= ~VIR_TYPED_PARAM_STRING_OKAY;
+
+    if (parallelsDomainBlockStats(domain, path, &stats) < 0)
+        goto cleanup;
+
+    if (*nparams == 0) {
+#define PARALLELS_COUNT_STATS(VAR, TYPE, NAME)       \
+        if ((stats.VAR) != -1)                       \
+            ++*nparams;
+
+        PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_COUNT_STATS)
+
+#undef PARALLELS_COUNT_STATS
+        ret = 0;
+        goto cleanup;
+    }
+
+    i = 0;
+#define PARALLELS_BLOCK_STATS_ASSIGN_PARAM(VAR, TYPE, NAME)                    \
+    if (i < *nparams && (stats.VAR) != -1) {                                   \
+        if (virTypedParameterAssign(params + i, TYPE,                          \
+                                    VIR_TYPED_PARAM_LLONG, (stats.VAR)) < 0)   \
+            goto cleanup;                                                      \
+        i++;                                                                   \
+    }
+
+    PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_BLOCK_STATS_ASSIGN_PARAM)
+
+#undef PARALLELS_BLOCK_STATS_ASSIGN_PARAM
+
+    *nparams = i;
+    ret = 0;
+
+ cleanup:
+    return ret;
+}
+
+
 static virHypervisorDriver vzDriver = {
     .name = "vz",
     .connectOpen = parallelsConnectOpen,            /* 0.10.0 */
@@ -1267,6 +1371,8 @@ static virHypervisorDriver vzDriver = {
     .domainManagedSave = parallelsDomainManagedSave, /* 1.2.14 */
     .domainManagedSaveRemove = parallelsDomainManagedSaveRemove, /* 1.2.14 */
     .domainGetMaxMemory = parallelsDomainGetMaxMemory, /* 1.2.15 */
+    .domainBlockStats = parallelsDomainBlockStats, /* 1.2.17 */
+    .domainBlockStatsFlags = parallelsDomainBlockStatsFlags, /* 1.2.17 */
 };
 
 static virConnectDriver vzConnectDriver = {
index 5a565d33b1f6df568affb89d41e8c42439cf3917..3f32838a330014787d4af7bb30678fdbbdddbfcf 100644 (file)
@@ -21,6 +21,7 @@
  */
 
 #include <config.h>
+#include <stdarg.h>
 
 #include "virerror.h"
 #include "viralloc.h"
@@ -29,6 +30,7 @@
 #include "virlog.h"
 #include "datatypes.h"
 #include "domain_conf.h"
+#include "virtime.h"
 
 #include "parallels_sdk.h"
 
@@ -410,6 +412,8 @@ prlsdkDomObjFreePrivate(void *p)
         return;
 
     PrlHandle_Free(pdom->sdkdom);
+    PrlHandle_Free(pdom->cache.stats);
+    virCondDestroy(&pdom->cache.cond);
     VIR_FREE(pdom->uuid);
     VIR_FREE(pdom->home);
     VIR_FREE(p);
@@ -1267,6 +1271,13 @@ prlsdkLoadDomain(parallelsConnPtr privconn,
      * to NULL temporarily */
     pdom->uuid = NULL;
 
+    pdom->cache.stats = PRL_INVALID_HANDLE;
+    pdom->cache.count = -1;
+    if (virCondInit(&pdom->cache.cond) < 0) {
+        virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("cannot initialize condition"));
+        goto error;
+    }
+
     if (prlsdkGetDomainIds(sdkdom, &def->name, def->uuid) < 0)
         goto error;
 
@@ -1629,6 +1640,51 @@ prlsdkHandleVmRemovedEvent(parallelsConnPtr privconn,
     return;
 }
 
+#define PARALLELS_STATISTICS_DROP_COUNT 3
+
+static PRL_RESULT
+prlsdkHandlePerfEvent(parallelsConnPtr privconn,
+                           PRL_HANDLE event,
+                           unsigned char *uuid)
+{
+    virDomainObjPtr dom = NULL;
+    parallelsDomObjPtr privdom = NULL;
+    PRL_HANDLE job = PRL_INVALID_HANDLE;
+
+    dom = virDomainObjListFindByUUID(privconn->domains, uuid);
+    if (dom == NULL)
+        goto cleanup;
+    privdom = dom->privateData;
+
+    // delayed event after unsubscribe
+    if (privdom->cache.count == -1)
+        goto cleanup;
+
+    PrlHandle_Free(privdom->cache.stats);
+    privdom->cache.stats = PRL_INVALID_HANDLE;
+
+    if (privdom->cache.count > PARALLELS_STATISTICS_DROP_COUNT) {
+        job = PrlVm_UnsubscribeFromPerfStats(privdom->sdkdom);
+        if (PRL_FAILED(waitJob(job)))
+            goto cleanup;
+        // change state to unsubscribed
+        privdom->cache.count = -1;
+    } else {
+        ++privdom->cache.count;
+        privdom->cache.stats = event;
+        // thus we get own of event handle
+        event = PRL_INVALID_HANDLE;
+        virCondSignal(&privdom->cache.cond);
+    }
+
+ cleanup:
+    PrlHandle_Free(event);
+    if (dom)
+        virObjectUnlock(dom);
+
+    return PRL_ERR_SUCCESS;
+}
+
 static void
 prlsdkHandleVmEvent(parallelsConnPtr privconn, PRL_HANDLE prlEvent)
 {
@@ -1639,13 +1695,13 @@ prlsdkHandleVmEvent(parallelsConnPtr privconn, PRL_HANDLE prlEvent)
     PRL_EVENT_TYPE prlEventType;
 
     pret = PrlEvent_GetType(prlEvent, &prlEventType);
-    prlsdkCheckRetGoto(pret, error);
+    prlsdkCheckRetGoto(pret, cleanup);
 
     pret = PrlEvent_GetIssuerId(prlEvent, uuidstr, &bufsize);
-    prlsdkCheckRetGoto(pret, error);
+    prlsdkCheckRetGoto(pret, cleanup);
 
     if (prlsdkUUIDParse(uuidstr, uuid) < 0)
-        return;
+        goto cleanup;
 
     switch (prlEventType) {
         case PET_DSP_EVT_VM_STATE_CHANGED:
@@ -1662,12 +1718,18 @@ prlsdkHandleVmEvent(parallelsConnPtr privconn, PRL_HANDLE prlEvent)
         case PET_DSP_EVT_VM_UNREGISTERED:
             prlsdkHandleVmRemovedEvent(privconn, uuid);
             break;
+        case PET_DSP_EVT_VM_PERFSTATS:
+            prlsdkHandlePerfEvent(privconn, prlEvent, uuid);
+            // above function takes own of event
+            prlEvent = PRL_INVALID_HANDLE;
+            break;
         default:
             virReportError(VIR_ERR_INTERNAL_ERROR,
                        _("Can't handle event of type %d"), prlEventType);
     }
 
- error:
+ cleanup:
+    PrlHandle_Free(prlEvent);
     return;
 }
 
@@ -1695,6 +1757,8 @@ prlsdkEventsHandler(PRL_HANDLE prlEvent, PRL_VOID_PTR opaque)
     switch (prlIssuerType) {
         case PIE_VIRTUAL_MACHINE:
             prlsdkHandleVmEvent(privconn, prlEvent);
+            // above function takes own of event
+            prlEvent = PRL_INVALID_HANDLE;
             break;
         default:
             VIR_DEBUG("Skipping event of issuer type %d", prlIssuerType);
@@ -1705,6 +1769,7 @@ prlsdkEventsHandler(PRL_HANDLE prlEvent, PRL_VOID_PTR opaque)
     return PRL_ERR_SUCCESS;
 }
 
+
 int prlsdkSubscribeToPCSEvents(parallelsConnPtr privconn)
 {
     PRL_RESULT pret = PRL_ERR_UNINITIALIZED;
@@ -3442,3 +3507,130 @@ prlsdkDomainManagedSaveRemove(virDomainObjPtr dom)
 
     return 0;
 }
+
+static int
+prlsdkExtractStatsParam(PRL_HANDLE sdkstats, const char *name, long long *val)
+{
+    PRL_HANDLE param = PRL_INVALID_HANDLE;
+    PRL_RESULT pret;
+    PRL_INT64 pval = 0;
+    int ret = -1;
+
+    pret = PrlEvent_GetParamByName(sdkstats, name, &param);
+    if (pret == PRL_ERR_NO_DATA) {
+        *val = -1;
+        ret = 0;
+        goto cleanup;
+    } else if (PRL_FAILED(pret)) {
+        logPrlError(pret);
+        goto cleanup;
+    }
+    pret = PrlEvtPrm_ToInt64(param, &pval);
+    prlsdkCheckRetGoto(pret, cleanup);
+
+    *val = pval;
+    ret = 0;
+
+ cleanup:
+    PrlHandle_Free(param);
+    return ret;
+}
+
+#define PARALLELS_STATISTICS_TIMEOUT (60 * 1000)
+
+static int
+prlsdkGetStatsParam(virDomainObjPtr dom, const char *name, long long *val)
+{
+    parallelsDomObjPtr privdom = dom->privateData;
+    PRL_HANDLE job = PRL_INVALID_HANDLE;
+    unsigned long long now;
+
+    if (privdom->cache.stats != PRL_INVALID_HANDLE) {
+        // reset count to keep subscribtion
+        privdom->cache.count = 0;
+        return prlsdkExtractStatsParam(privdom->cache.stats, name, val);
+    }
+
+    if (privdom->cache.count == -1) {
+        job = PrlVm_SubscribeToPerfStats(privdom->sdkdom, NULL);
+        if (PRL_FAILED(waitJob(job)))
+            goto error;
+    }
+
+    // change state to subscribed in case of unsubscribed
+    // or reset count so we stop unsubscribe attempts
+    privdom->cache.count = 0;
+
+    if (virTimeMillisNow(&now) < 0) {
+        virReportSystemError(errno, "%s", _("Unable to get current time"));
+        goto error;
+    }
+
+    while (privdom->cache.stats == PRL_INVALID_HANDLE) {
+        if (virCondWaitUntil(&privdom->cache.cond, &dom->parent.lock,
+                        now + PARALLELS_STATISTICS_TIMEOUT) < 0) {
+            if (errno == ETIMEDOUT) {
+                virReportError(VIR_ERR_OPERATION_TIMEOUT, "%s",
+                               _("Timeout on waiting statistics event."));
+                goto error;
+            } else {
+                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+                               _("Unable to wait on monitor condition"));
+                goto error;
+            }
+        }
+    }
+
+    return prlsdkExtractStatsParam(privdom->cache.stats, name, val);
+ error:
+    return -1;
+}
+
+int
+prlsdkGetBlockStats(virDomainObjPtr dom, virDomainDiskDefPtr disk, virDomainBlockStatsPtr stats)
+{
+    virDomainDeviceDriveAddressPtr address;
+    int idx;
+    const char *prefix;
+    int ret = -1;
+    char *name = NULL;
+
+    address = &disk->info.addr.drive;
+    switch (disk->bus) {
+    case VIR_DOMAIN_DISK_BUS_IDE:
+        prefix = "ide";
+        idx = address->bus * 2 + address->unit;
+        break;
+    case VIR_DOMAIN_DISK_BUS_SATA:
+        prefix = "sata";
+        idx = address->unit;
+        break;
+    case VIR_DOMAIN_DISK_BUS_SCSI:
+        prefix = "scsi";
+        idx = address->unit;
+        break;
+    default:
+        virReportError(VIR_ERR_INTERNAL_ERROR,
+                   _("Unknown disk bus: %X"), disk->bus);
+        goto cleanup;
+    }
+
+
+#define PRLSDK_GET_STAT_PARAM(VAL, TYPE, NAME)                          \
+    if (virAsprintf(&name, "devices.%s%d.%s", prefix, idx, NAME) < 0)   \
+        goto cleanup;                                                   \
+    if (prlsdkGetStatsParam(dom, name, &stats->VAL) < 0)                \
+        goto cleanup;                                                   \
+    VIR_FREE(name);
+
+    PARALLELS_BLOCK_STATS_FOREACH(PRLSDK_GET_STAT_PARAM)
+
+#undef PRLSDK_GET_STAT_PARAM
+
+    ret = 0;
+
+ cleanup:
+
+    VIR_FREE(name);
+    return ret;
+}
index 3f17fc8b2fbf6972e117c610f3bc053625a1a31a..afa6745b74547a8275fd5e8819a2b13a5d3b2a64 100644 (file)
@@ -64,3 +64,5 @@ int
 prlsdkAttachVolume(virDomainObjPtr dom, virDomainDiskDefPtr disk);
 int
 prlsdkDetachVolume(virDomainObjPtr dom, virDomainDiskDefPtr disk);
+int
+prlsdkGetBlockStats(virDomainObjPtr dom, virDomainDiskDefPtr disk, virDomainBlockStatsPtr stats);
index ff9d47d9d8bd6b3ee18d5fae439906da37c51203..540986bc316f6f948e7ee1288e66956777d4f97c 100644 (file)
@@ -64,6 +64,34 @@ parallelsDomObjFromDomain(virDomainPtr domain)
 
 }
 
+/**
+ * parallelsDomObjFromDomainRef:
+ * @domain: Domain pointer that has to be looked up
+ *
+ * This function looks up @domain and returns the appropriate virDomainObjPtr
+ * that has to be released by calling virDomainObjEndAPI().
+ *
+ * Returns the domain object with incremented reference counter which is locked
+ * on success, NULL otherwise.
+ */
+virDomainObjPtr
+parallelsDomObjFromDomainRef(virDomainPtr domain)
+{
+    virDomainObjPtr vm;
+    parallelsConnPtr privconn = domain->conn->privateData;
+    char uuidstr[VIR_UUID_STRING_BUFLEN];
+
+    vm = virDomainObjListFindByUUIDRef(privconn->domains, domain->uuid);
+    if (!vm) {
+        virUUIDFormat(domain->uuid, uuidstr);
+        virReportError(VIR_ERR_NO_DOMAIN,
+                       _("no domain with matching uuid '%s' (%s)"),
+                       uuidstr, domain->name);
+        return NULL;
+    }
+
+    return vm;
+}
 
 static int
 parallelsDoCmdRun(char **outbuf, const char *binary, va_list list)
index e8a92bb0246996d78e9e8015108b4b292c41a368..84bef24a39a3ff5acf3f772ac96f7506e7a1163a 100644 (file)
@@ -74,11 +74,22 @@ struct _parallelsConn {
 typedef struct _parallelsConn parallelsConn;
 typedef struct _parallelsConn *parallelsConnPtr;
 
+struct _parallelsCountersCache {
+    PRL_HANDLE stats;
+    virCond cond;
+    // -1 - unsubscribed
+    // > -1 - subscribed
+    int count;
+};
+
+typedef struct _parallelsCountersCache parallelsCountersCache;
+
 struct parallelsDomObj {
     int id;
     char *uuid;
     char *home;
     PRL_HANDLE sdkdom;
+    parallelsCountersCache cache;
 };
 
 typedef struct parallelsDomObj *parallelsDomObjPtr;
@@ -92,6 +103,7 @@ int parallelsNetworkClose(virConnectPtr conn);
 extern virNetworkDriver parallelsNetworkDriver;
 
 virDomainObjPtr parallelsDomObjFromDomain(virDomainPtr domain);
+virDomainObjPtr parallelsDomObjFromDomainRef(virDomainPtr domain);
 
 virJSONValuePtr parallelsParseOutput(const char *binary, ...)
     ATTRIBUTE_NONNULL(1) ATTRIBUTE_SENTINEL;
@@ -107,4 +119,10 @@ virStorageVolPtr parallelsStorageVolLookupByPathLocked(virConnectPtr conn,
 int parallelsStorageVolDefRemove(virStoragePoolObjPtr privpool,
                                  virStorageVolDefPtr privvol);
 
+#define PARALLELS_BLOCK_STATS_FOREACH(OP)                                   \
+            OP(rd_req, VIR_DOMAIN_BLOCK_STATS_READ_REQ, "read_requests")    \
+            OP(rd_bytes, VIR_DOMAIN_BLOCK_STATS_READ_BYTES, "read_total")   \
+            OP(wr_req, VIR_DOMAIN_BLOCK_STATS_WRITE_REQ, "write_requests")  \
+            OP(wr_bytes, VIR_DOMAIN_BLOCK_STATS_WRITE_BYTES, "write_total")
+
 #endif