return rv;
}
+
+static int
+remoteDispatchNodeGetFreePages(virNetServerPtr server ATTRIBUTE_UNUSED,
+ virNetServerClientPtr client,
+ virNetMessagePtr msg ATTRIBUTE_UNUSED,
+ virNetMessageErrorPtr rerr,
+ remote_node_get_free_pages_args *args,
+ remote_node_get_free_pages_ret *ret)
+{
+ int rv = -1;
+ int len;
+ struct daemonClientPrivate *priv =
+ virNetServerClientGetPrivateData(client);
+
+ if (!priv->conn) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("connection not open"));
+ goto cleanup;
+ }
+
+ if (args->pages.pages_len * args->cellCount > REMOTE_NODE_MAX_CELLS) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("the result won't fit into REMOTE_NODE_MAX_CELLS"));
+ goto cleanup;
+ }
+
+ /* Allocate return buffer. */
+ if (VIR_ALLOC_N(ret->counts.counts_val,
+ args->pages.pages_len * args->cellCount) < 0)
+ goto cleanup;
+
+ if ((len = virNodeGetFreePages(priv->conn,
+ args->pages.pages_len,
+ args->pages.pages_val,
+ args->startCell,
+ args->cellCount,
+ (unsigned long long *) ret->counts.counts_val,
+ args->flags)) <= 0)
+ goto cleanup;
+
+ ret->counts.counts_len = len;
+ rv = 0;
+
+ cleanup:
+ if (rv < 0) {
+ virNetMessageSaveError(rerr);
+ VIR_FREE(ret->counts.counts_val);
+ }
+ return rv;
+
+}
+
+
/*----- Helpers. -----*/
/* get_nonnull_domain and get_nonnull_network turn an on-wire
unsigned int nseconds,
unsigned int flags);
+int virNodeGetFreePages(virConnectPtr conn,
+ unsigned int npages,
+ unsigned int *pages,
+ int startcell,
+ unsigned int cellcount,
+ unsigned long long *counts,
+ unsigned int flags);
/**
* virSchedParameterType:
*
unsigned int nmountpoints,
unsigned int flags);
+typedef int
+(*virDrvNodeGetFreePages)(virConnectPtr conn,
+ unsigned int npages,
+ unsigned int *pages,
+ int startCell,
+ unsigned int cellCount,
+ unsigned long long *counts,
+ unsigned int flags);
+
typedef struct _virDriver virDriver;
typedef virDriver *virDriverPtr;
virDrvDomainFSThaw domainFSThaw;
virDrvDomainGetTime domainGetTime;
virDrvDomainSetTime domainSetTime;
+ virDrvNodeGetFreePages nodeGetFreePages;
};
virDispatchError(dom->conn);
return -1;
}
+
+
+/**
+ * virNodeGetFreePages:
+ * @conn: pointer to the hypervisor connection
+ * @npages: number of items in the @pages array
+ * @pages: page sizes to query
+ * @startCell: index of first cell to return free pages info on.
+ * @cellCount: maximum number of cells for which free pages
+ * information can be returned.
+ * @counts: returned counts of free pages
+ * @flags: extra flags; not used yet, so callers should always pass 0
+ *
+ * This calls queries the host system on free pages of
+ * specified size. Ont the input, @pages is expected to be
+ * filled with pages that caller is interested in (the size
+ * unit is kibibytes, so e.g. pass 2048 for 2MB), then @startcell
+ * refers to the first NUMA node that info should be collected
+ * from, and @cellcount tells how many consecutive nodes should
+ * be queried. On the function output, @counts is filled with
+ * desired information, where items are grouped by NUMA node.
+ * So from @counts[0] till @counts[@npages - 1] you'll find count
+ * for the first node (@startcell), then from @counts[@npages]
+ * till @count[2 * @npages - 1] you'll find info for the
+ * (@startcell + 1) node, and so on. It's callers responsibility
+ * to allocate the @counts array.
+ *
+ * Example how to use this API:
+ *
+ * unsigned int pages[] = { 4, 2048, 1048576}
+ * unsigned int npages = ARRAY_CARDINALITY(pages);
+ * int startcell = 0;
+ * unsigned int cellcount = 2;
+ *
+ * unsigned long long counts = malloc(sizeof(long long) * npages * cellcount);
+ *
+ * virNodeGetFreePages(conn, pages, npages,
+ * startcell, cellcount, counts, 0);
+ *
+ * for (i = 0 ; i < cellcount ; i++) {
+ * fprintf(stdout, "Cell %d\n", startcell + i);
+ * for (j = 0 ; j < npages ; j++) {
+ * fprintf(stdout, " Page size=%d count=%d bytes=%llu\n",
+ * pages[j], counts[(i * npages) + j],
+ * pages[j] * counts[(i * npages) + j]);
+ * }
+ * }
+ *
+ * This little code snippet will produce something like this:
+ * Cell 0
+ * Page size=4096 count=300 bytes=1228800
+ * Page size=2097152 count=0 bytes=0
+ * Page size=1073741824 count=1 bytes=1073741824
+ * Cell 1
+ * Page size=4096 count=0 bytes=0
+ * Page size=2097152 count=20 bytes=41943040
+ * Page size=1073741824 count=0 bytes=0
+ *
+ * Returns: the number of entries filled in @counts or -1 in case of error.
+ */
+int
+virNodeGetFreePages(virConnectPtr conn,
+ unsigned int npages,
+ unsigned int *pages,
+ int startCell,
+ unsigned int cellCount,
+ unsigned long long *counts,
+ unsigned int flags)
+{
+ VIR_DEBUG("conn=%p, npages=%u, pages=%p, startCell=%u, "
+ "cellCount=%u, counts=%p, flags=%x",
+ conn, npages, pages, startCell, cellCount, counts, flags);
+
+ virResetLastError();
+
+ virCheckConnectReturn(conn, -1);
+ virCheckNonZeroArgGoto(npages, error);
+ virCheckNonNullArgGoto(pages, error);
+ virCheckNonZeroArgGoto(cellCount, error);
+ virCheckNonNullArgGoto(counts, error);
+
+ if (conn->driver->nodeGetFreePages) {
+ int ret;
+ ret = conn->driver->nodeGetFreePages(conn, npages, pages, startCell,
+ cellCount, counts, flags);
+ if (ret < 0)
+ goto error;
+ return ret;
+ }
+
+ virReportUnsupportedError();
+ error:
+ virDispatchError(conn);
+ return -1;
+}
virDomainSetTime;
} LIBVIRT_1.2.3;
+LIBVIRT_1.2.6 {
+ global:
+ virNodeGetFreePages;
+} LIBVIRT_1.2.5;
# .... define new API here using predicted next version number ....
}
+static int
+remoteNodeGetFreePages(virConnectPtr conn,
+ unsigned int npages,
+ unsigned int *pages,
+ int startCell,
+ unsigned int cellCount,
+ unsigned long long *counts,
+ unsigned int flags)
+{
+ int rv = -1;
+ remote_node_get_free_pages_args args;
+ remote_node_get_free_pages_ret ret;
+ struct private_data *priv = conn->privateData;
+
+ remoteDriverLock(priv);
+
+ if (npages * cellCount > REMOTE_NODE_MAX_CELLS) {
+ virReportError(VIR_ERR_RPC,
+ _("too many NUMA cells: %d > %d"),
+ npages * cellCount, REMOTE_NODE_MAX_CELLS);
+ goto done;
+ }
+
+ if (VIR_ALLOC_N(args.pages.pages_val, npages) < 0)
+ goto done;
+ memcpy(args.pages.pages_val, pages, npages * sizeof(*pages));
+ args.pages.pages_len = npages;
+ args.startCell = startCell;
+ args.cellCount = cellCount;
+ args.flags = flags;
+
+ memset(&ret, 0, sizeof(ret));
+ if (call(conn, priv, 0, REMOTE_PROC_NODE_GET_FREE_PAGES,
+ (xdrproc_t) xdr_remote_node_get_free_pages_args, (char *)&args,
+ (xdrproc_t) xdr_remote_node_get_free_pages_ret, (char *)&ret) == -1)
+ goto done;
+
+ memcpy(counts, ret.counts.counts_val, ret.counts.counts_len * sizeof(*counts));
+
+ xdr_free((xdrproc_t) xdr_remote_node_get_free_pages_ret, (char *) &ret);
+
+ rv = ret.counts.counts_len;
+
+ done:
+ remoteDriverUnlock(priv);
+ return rv;
+}
+
+
/* get_nonnull_domain and get_nonnull_network turn an on-wire
* (name, uuid) pair into virDomainPtr or virNetworkPtr object.
* These can return NULL if underlying memory allocations fail,
.domainFSThaw = remoteDomainFSThaw, /* 1.2.5 */
.domainGetTime = remoteDomainGetTime, /* 1.2.5 */
.domainSetTime = remoteDomainSetTime, /* 1.2.5 */
+ .nodeGetFreePages = remoteNodeGetFreePages, /* 1.2.6 */
};
static virNetworkDriver network_driver = {
int filesystems;
};
+struct remote_node_get_free_pages_args {
+ unsigned int pages<REMOTE_NODE_MAX_CELLS>;
+ int startCell;
+ unsigned int cellCount;
+ unsigned int flags;
+};
+
+struct remote_node_get_free_pages_ret {
+ unsigned hyper counts<REMOTE_NODE_MAX_CELLS>;
+};
+
/*----- Protocol. -----*/
* @generate: none
* @acl: none
*/
- REMOTE_PROC_DOMAIN_EVENT_BLOCK_JOB_2 = 339
+ REMOTE_PROC_DOMAIN_EVENT_BLOCK_JOB_2 = 339,
+
+ /**
+ * @generate: none
+ * @priority: high
+ * @acl: connect:read
+ */
+ REMOTE_PROC_NODE_GET_FREE_PAGES = 340
};
struct remote_domain_fsthaw_ret {
int filesystems;
};
+struct remote_node_get_free_pages_args {
+ struct {
+ u_int pages_len;
+ u_int * pages_val;
+ } pages;
+ int startCell;
+ u_int cellCount;
+ u_int flags;
+};
+struct remote_node_get_free_pages_ret {
+ struct {
+ u_int counts_len;
+ uint64_t * counts_val;
+ } counts;
+};
enum remote_procedure {
REMOTE_PROC_CONNECT_OPEN = 1,
REMOTE_PROC_CONNECT_CLOSE = 2,
REMOTE_PROC_DOMAIN_GET_TIME = 337,
REMOTE_PROC_DOMAIN_SET_TIME = 338,
REMOTE_PROC_DOMAIN_EVENT_BLOCK_JOB_2 = 339,
+ REMOTE_PROC_NODE_GET_FREE_PAGES = 340,
};