}
+static int
+remoteDispatchNodeAllocPages(virNetServerPtr server ATTRIBUTE_UNUSED,
+ virNetServerClientPtr client,
+ virNetMessagePtr msg ATTRIBUTE_UNUSED,
+ virNetMessageErrorPtr rerr,
+ remote_node_alloc_pages_args *args,
+ remote_node_alloc_pages_ret *ret)
+{
+ int rv = -1;
+ int len;
+ struct daemonClientPrivate *priv =
+ virNetServerClientGetPrivateData(client);
+
+ if (!priv->conn) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("connection not open"));
+ goto cleanup;
+ }
+
+ if ((len = virNodeAllocPages(priv->conn,
+ args->pageSizes.pageSizes_len,
+ args->pageSizes.pageSizes_val,
+ (unsigned long long *) args->pageCounts.pageCounts_val,
+ args->startCell,
+ args->cellCount,
+ args->flags)) < 0)
+ goto cleanup;
+
+ ret->ret = len;
+ rv = 0;
+
+ cleanup:
+ if (rv < 0)
+ virNetMessageSaveError(rerr);
+ return rv;
+}
+
+
/*----- Helpers. -----*/
/* get_nonnull_domain and get_nonnull_network turn an on-wire
unsigned int cellcount,
unsigned long long *counts,
unsigned int flags);
+
+typedef enum {
+ VIR_NODE_ALLOC_PAGES_ADD = 0, /* Add @pageCounts to the pages pool. This
+ can be used only to size up the pool. */
+ VIR_NODE_ALLOC_PAGES_SET = (1 << 0), /* Don't add @pageCounts, instead set
+ passed number of pages. This can be
+ used to free allocated pages. */
+} virNodeAllocPagesFlags;
+
+int virNodeAllocPages(virConnectPtr conn,
+ unsigned int npages,
+ unsigned int *pageSizes,
+ unsigned long long *pageCounts,
+ int startCell,
+ unsigned int cellCount,
+ unsigned int flags);
/**
* virSchedParameterType:
*
virDomainStatsRecordPtr **retStats,
unsigned int flags);
+typedef int
+(*virDrvNodeAllocPages)(virConnectPtr conn,
+ unsigned int npages,
+ unsigned int *pageSizes,
+ unsigned long long *pageCounts,
+ int startCell,
+ unsigned int cellCount,
+ unsigned int flags);
+
typedef struct _virDriver virDriver;
typedef virDriver *virDriverPtr;
virDrvNodeGetFreePages nodeGetFreePages;
virDrvConnectGetDomainCapabilities connectGetDomainCapabilities;
virDrvConnectGetAllDomainStats connectGetAllDomainStats;
+ virDrvNodeAllocPages nodeAllocPages;
};
VIR_FREE(stats);
}
+
+
+/**
+ * virNodeAllocPages:
+ * @conn: pointer to the hypervisor connection
+ * @npages: number of items in the @pageSizes and
+ * @pageCounts arrays
+ * @pageSizes: which huge page sizes to allocate
+ * @pageCounts: how many pages should be allocated
+ * @startCell: index of first cell to allocate pages on
+ * @cellCount: number of consecutive cells to allocate pages on
+ * @flags: extra flags; binary-OR of virNodeAllocPagesFlags
+ *
+ * Sometimes, when trying to start a new domain, it may be
+ * necessary to reserve some huge pages in the system pool which
+ * can be then allocated by the domain. This API serves that
+ * purpose. On its input, @pageSizes and @pageCounts are arrays
+ * of the same cardinality of @npages. The @pageSizes contains
+ * page sizes which are to be allocated in the system (the size
+ * unit is kibibytes), and @pageCounts then contains the number
+ * of pages to reserve. If @flags is 0
+ * (VIR_NODE_ALLOC_PAGES_ADD), each pool corresponding to
+ * @pageSizes grows by the number of pages specified in the
+ * corresponding @pageCounts. If @flags contains
+ * VIR_NODE_ALLOC_PAGES_SET, each pool mentioned is resized to
+ * the given number of pages. The pages pool can be allocated
+ * over several NUMA nodes at once, just point at @startCell and
+ * tell how many subsequent NUMA nodes should be taken in. As a
+ * special case, if @startCell is equal to negative one, then
+ * kernel is instructed to allocate the pages over all NUMA nodes
+ * proportionally.
+ *
+ * Returns: the number of nodes successfully adjusted or -1 in
+ * case of an error.
+ */
+int
+virNodeAllocPages(virConnectPtr conn,
+ unsigned int npages,
+ unsigned int *pageSizes,
+ unsigned long long *pageCounts,
+ int startCell,
+ unsigned int cellCount,
+ unsigned int flags)
+{
+ VIR_DEBUG("conn=%p npages=%u pageSizes=%p pageCounts=%p "
+ "startCell=%d cellCount=%u flagx=%x",
+ conn, npages, pageSizes, pageCounts, startCell,
+ cellCount, flags);
+
+ virResetLastError();
+
+ virCheckConnectReturn(conn, -1);
+ virCheckNonZeroArgGoto(npages, error);
+ virCheckNonNullArgGoto(pageSizes, error);
+ virCheckNonNullArgGoto(pageCounts, error);
+ virCheckNonZeroArgGoto(cellCount, error);
+
+ if (conn->driver->nodeAllocPages) {
+ int ret;
+ ret = conn->driver->nodeAllocPages(conn, npages, pageSizes,
+ pageCounts, startCell,
+ cellCount, flags);
+ if (ret < 0)
+ goto error;
+ return ret;
+ }
+
+ virReportUnsupportedError();
+ error:
+ virDispatchError(conn);
+ return -1;
+}
virDomainStatsRecordListFree;
} LIBVIRT_1.2.7;
+LIBVIRT_1.2.9 {
+ global:
+ virNodeAllocPages;
+} LIBVIRT_1.2.8;
+
# .... define new API here using predicted next version number ....
return rv;
}
+
+static int
+remoteNodeAllocPages(virConnectPtr conn,
+ unsigned int npages,
+ unsigned int *pageSizes,
+ unsigned long long *pageCounts,
+ int startCell,
+ unsigned int cellCount,
+ unsigned int flags)
+{
+ int rv = -1;
+ remote_node_alloc_pages_args args;
+ remote_node_alloc_pages_ret ret;
+ struct private_data *priv = conn->privateData;
+
+ remoteDriverLock(priv);
+
+ if (npages > REMOTE_NODE_MAX_CELLS) {
+ virReportError(VIR_ERR_RPC,
+ _("too many NUMA cells: %d > %d"),
+ npages, REMOTE_NODE_MAX_CELLS);
+ goto done;
+ }
+
+ args.pageSizes.pageSizes_val = (u_int *) pageSizes;
+ args.pageSizes.pageSizes_len = npages;
+ args.pageCounts.pageCounts_val = (uint64_t *) pageCounts;
+ args.pageCounts.pageCounts_len = npages;
+ args.startCell = startCell;
+ args.cellCount = cellCount;
+ args.flags = flags;
+
+ memset(&ret, 0, sizeof(ret));
+ if (call(conn, priv, 0, REMOTE_PROC_NODE_ALLOC_PAGES,
+ (xdrproc_t) xdr_remote_node_alloc_pages_args, (char *) &args,
+ (xdrproc_t) xdr_remote_node_alloc_pages_ret, (char *) &ret) == -1)
+ goto done;
+
+ rv = ret.ret;
+
+ done:
+ remoteDriverUnlock(priv);
+ return rv;
+}
+
+
/* get_nonnull_domain and get_nonnull_network turn an on-wire
* (name, uuid) pair into virDomainPtr or virNetworkPtr object.
* These can return NULL if underlying memory allocations fail,
.nodeGetFreePages = remoteNodeGetFreePages, /* 1.2.6 */
.connectGetDomainCapabilities = remoteConnectGetDomainCapabilities, /* 1.2.7 */
.connectGetAllDomainStats = remoteConnectGetAllDomainStats, /* 1.2.8 */
+ .nodeAllocPages = remoteNodeAllocPages, /* 1.2.9 */
};
static virNetworkDriver network_driver = {
unsigned hyper counts<REMOTE_NODE_MAX_CELLS>;
};
+struct remote_node_alloc_pages_args {
+ unsigned int pageSizes<REMOTE_NODE_MAX_CELLS>;
+ unsigned hyper pageCounts<REMOTE_NODE_MAX_CELLS>;
+ int startCell;
+ unsigned int cellCount;
+ unsigned int flags;
+};
+
+struct remote_node_alloc_pages_ret {
+ int ret;
+};
+
struct remote_network_dhcp_lease {
remote_nonnull_string iface;
hyper expirytime;
* @generate: both
* @acl: none
*/
- REMOTE_PROC_DOMAIN_EVENT_CALLBACK_TUNABLE = 346
+ REMOTE_PROC_DOMAIN_EVENT_CALLBACK_TUNABLE = 346,
+
+ /**
+ * @generate: none
+ * @acl: connect:write
+ */
+ REMOTE_PROC_NODE_ALLOC_PAGES = 347
};
uint64_t * counts_val;
} counts;
};
+struct remote_node_alloc_pages_args {
+ struct {
+ u_int pageSizes_len;
+ u_int * pageSizes_val;
+ } pageSizes;
+ struct {
+ u_int pageCounts_len;
+ uint64_t * pageCounts_val;
+ } pageCounts;
+ int startCell;
+ u_int cellCount;
+ u_int flags;
+};
+struct remote_node_alloc_pages_ret {
+ int ret;
+};
struct remote_network_dhcp_lease {
remote_nonnull_string iface;
int64_t expirytime;
REMOTE_PROC_CONNECT_GET_ALL_DOMAIN_STATS = 344,
REMOTE_PROC_DOMAIN_BLOCK_COPY = 345,
REMOTE_PROC_DOMAIN_EVENT_CALLBACK_TUNABLE = 346,
+ REMOTE_PROC_NODE_ALLOC_PAGES = 347,
};