int xc_tmem_control(xc_interface *xch,
int32_t pool_id,
- uint32_t subop,
+ uint32_t cmd,
uint32_t cli_id,
uint32_t arg1,
uint32_t arg2,
void *buf)
{
- tmem_op_t op;
+ DECLARE_SYSCTL;
DECLARE_HYPERCALL_BOUNCE(buf, arg1, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
int rc;
- op.cmd = TMEM_CONTROL;
- op.pool_id = pool_id;
- op.u.ctrl.subop = subop;
- op.u.ctrl.cli_id = cli_id;
- op.u.ctrl.arg1 = arg1;
- op.u.ctrl.arg2 = arg2;
- op.u.ctrl.oid[0] = 0;
- op.u.ctrl.oid[1] = 0;
- op.u.ctrl.oid[2] = 0;
-
- if ( subop == TMEMC_LIST && arg1 != 0 )
+ sysctl.cmd = XEN_SYSCTL_tmem_op;
+ sysctl.u.tmem_op.pool_id = pool_id;
+ sysctl.u.tmem_op.cmd = cmd;
+ sysctl.u.tmem_op.cli_id = cli_id;
+ sysctl.u.tmem_op.arg1 = arg1;
+ sysctl.u.tmem_op.arg2 = arg2;
+ sysctl.u.tmem_op.pad = 0;
+ sysctl.u.tmem_op.oid[0] = 0;
+ sysctl.u.tmem_op.oid[1] = 0;
+ sysctl.u.tmem_op.oid[2] = 0;
+
+ if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 )
{
if ( buf == NULL )
{
}
}
- set_xen_guest_handle(op.u.ctrl.buf, buf);
+ set_xen_guest_handle(sysctl.u.tmem_op.buf, buf);
- rc = do_tmem_op(xch, &op);
+ rc = do_sysctl(xch, &sysctl);
- if (subop == TMEMC_LIST && arg1 != 0)
+ if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 )
xc_hypercall_bounce_post(xch, buf);
return rc;
int xc_tmem_control_oid(xc_interface *xch,
int32_t pool_id,
- uint32_t subop,
+ uint32_t cmd,
uint32_t cli_id,
uint32_t arg1,
uint32_t arg2,
struct tmem_oid oid,
void *buf)
{
- tmem_op_t op;
+ DECLARE_SYSCTL;
DECLARE_HYPERCALL_BOUNCE(buf, arg1, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
int rc;
- op.cmd = TMEM_CONTROL;
- op.pool_id = pool_id;
- op.u.ctrl.subop = subop;
- op.u.ctrl.cli_id = cli_id;
- op.u.ctrl.arg1 = arg1;
- op.u.ctrl.arg2 = arg2;
- op.u.ctrl.oid[0] = oid.oid[0];
- op.u.ctrl.oid[1] = oid.oid[1];
- op.u.ctrl.oid[2] = oid.oid[2];
-
- if ( subop == TMEMC_LIST && arg1 != 0 )
+ sysctl.cmd = XEN_SYSCTL_tmem_op;
+ sysctl.u.tmem_op.pool_id = pool_id;
+ sysctl.u.tmem_op.cmd = cmd;
+ sysctl.u.tmem_op.cli_id = cli_id;
+ sysctl.u.tmem_op.arg1 = arg1;
+ sysctl.u.tmem_op.arg2 = arg2;
+ sysctl.u.tmem_op.pad = 0;
+ sysctl.u.tmem_op.oid[0] = oid.oid[0];
+ sysctl.u.tmem_op.oid[1] = oid.oid[1];
+ sysctl.u.tmem_op.oid[2] = oid.oid[2];
+
+ if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 )
{
if ( buf == NULL )
{
}
}
- set_xen_guest_handle(op.u.ctrl.buf, buf);
+ set_xen_guest_handle(sysctl.u.tmem_op.buf, buf);
- rc = do_tmem_op(xch, &op);
+ rc = do_sysctl(xch, &sysctl);
- if (subop == TMEMC_LIST && arg1 != 0)
+ if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 )
xc_hypercall_bounce_post(xch, buf);
return rc;
uint32_t minusone = -1;
struct tmem_handle *h;
- if ( xc_tmem_control(xch,0,TMEMC_SAVE_BEGIN,dom,live,0,NULL) <= 0 )
+ if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_BEGIN,dom,live,0,NULL) <= 0 )
return 0;
if ( write_exact(io_fd, &marker, sizeof(marker)) )
return -1;
- version = xc_tmem_control(xch,0,TMEMC_SAVE_GET_VERSION,0,0,0,NULL);
+ version = xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION,0,0,0,NULL);
if ( write_exact(io_fd, &version, sizeof(version)) )
return -1;
- max_pools = xc_tmem_control(xch,0,TMEMC_SAVE_GET_MAXPOOLS,0,0,0,NULL);
+ max_pools = xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS,0,0,0,NULL);
if ( write_exact(io_fd, &max_pools, sizeof(max_pools)) )
return -1;
if ( version == -1 || max_pools == -1 )
return -1;
if ( write_exact(io_fd, &minusone, sizeof(minusone)) )
return -1;
- flags = xc_tmem_control(xch,0,TMEMC_SAVE_GET_CLIENT_FLAGS,dom,0,0,NULL);
+ flags = xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS,dom,0,0,NULL);
if ( write_exact(io_fd, &flags, sizeof(flags)) )
return -1;
- weight = xc_tmem_control(xch,0,TMEMC_SAVE_GET_CLIENT_WEIGHT,dom,0,0,NULL);
+ weight = xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT,dom,0,0,NULL);
if ( write_exact(io_fd, &weight, sizeof(weight)) )
return -1;
- cap = xc_tmem_control(xch,0,TMEMC_SAVE_GET_CLIENT_CAP,dom,0,0,NULL);
+ cap = xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_CAP,dom,0,0,NULL);
if ( write_exact(io_fd, &cap, sizeof(cap)) )
return -1;
if ( flags == -1 || weight == -1 || cap == -1 )
int checksum = 0;
/* get pool id, flags, pagesize, n_pages, uuid */
- flags = xc_tmem_control(xch,i,TMEMC_SAVE_GET_POOL_FLAGS,dom,0,0,NULL);
+ flags = xc_tmem_control(xch,i,XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS,dom,0,0,NULL);
if ( flags != -1 )
{
pool_id = i;
- n_pages = xc_tmem_control(xch,i,TMEMC_SAVE_GET_POOL_NPAGES,dom,0,0,NULL);
+ n_pages = xc_tmem_control(xch,i,XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES,dom,0,0,NULL);
if ( !(flags & TMEM_POOL_PERSIST) )
n_pages = 0;
- (void)xc_tmem_control(xch,i,TMEMC_SAVE_GET_POOL_UUID,dom,sizeof(uuid),0,&uuid);
+ (void)xc_tmem_control(xch,i,XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID,dom,sizeof(uuid),0,&uuid);
if ( write_exact(io_fd, &pool_id, sizeof(pool_id)) )
return -1;
if ( write_exact(io_fd, &flags, sizeof(flags)) )
{
int ret;
if ( (ret = xc_tmem_control(xch, pool_id,
- TMEMC_SAVE_GET_NEXT_PAGE, dom,
+ XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE, dom,
bufsize, 0, buf)) > 0 )
{
h = (struct tmem_handle *)buf;
if ( write_exact(io_fd, &marker, sizeof(marker)) )
return -1;
- while ( xc_tmem_control(xch, 0, TMEMC_SAVE_GET_NEXT_INV, dom,
+ while ( xc_tmem_control(xch, 0, XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV, dom,
sizeof(handle),0,&handle) > 0 ) {
if ( write_exact(io_fd, &handle.pool_id, sizeof(handle.pool_id)) )
return -1;
/* only called for live migration */
void xc_tmem_save_done(xc_interface *xch, int dom)
{
- xc_tmem_control(xch,0,TMEMC_SAVE_END,dom,0,0,NULL);
+ xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_END,dom,0,0,NULL);
}
/* restore routines */
uint32_t weight, cap, flags;
int checksum = 0;
- save_version = xc_tmem_control(xch,0,TMEMC_SAVE_GET_VERSION,dom,0,0,NULL);
+ save_version = xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION,dom,0,0,NULL);
if ( save_version == -1 )
return -1; /* domain doesn't exist */
if ( read_exact(io_fd, &this_version, sizeof(this_version)) )
return -1;
if ( minusone != -1 )
return -1;
- if ( xc_tmem_control(xch,0,TMEMC_RESTORE_BEGIN,dom,0,0,NULL) < 0 )
+ if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN,dom,0,0,NULL) < 0 )
return -1;
if ( read_exact(io_fd, &flags, sizeof(flags)) )
return -1;
if ( flags & TMEM_CLIENT_COMPRESS )
- if ( xc_tmem_control(xch,0,TMEMC_SET_COMPRESS,dom,1,0,NULL) < 0 )
+ if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SET_COMPRESS,dom,1,0,NULL) < 0 )
return -1;
if ( flags & TMEM_CLIENT_FROZEN )
- if ( xc_tmem_control(xch,0,TMEMC_FREEZE,dom,0,0,NULL) < 0 )
+ if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_FREEZE,dom,0,0,NULL) < 0 )
return -1;
if ( read_exact(io_fd, &weight, sizeof(weight)) )
return -1;
- if ( xc_tmem_control(xch,0,TMEMC_SET_WEIGHT,dom,0,0,NULL) < 0 )
+ if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SET_WEIGHT,dom,0,0,NULL) < 0 )
return -1;
if ( read_exact(io_fd, &cap, sizeof(cap)) )
return -1;
- if ( xc_tmem_control(xch,0,TMEMC_SET_CAP,dom,0,0,NULL) < 0 )
+ if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SET_CAP,dom,0,0,NULL) < 0 )
return -1;
if ( read_exact(io_fd, &minusone, sizeof(minusone)) )
return -1;
return -1;
checksum += *buf;
if ( (rc = xc_tmem_control_oid(xch, pool_id,
- TMEMC_RESTORE_PUT_PAGE, dom,
+ XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE, dom,
bufsize, index, oid, buf)) <= 0 )
{
DPRINTF("xc_tmem_restore: putting page failed, rc=%d\n",rc);
return -1;
if ( read_exact(io_fd, &index, sizeof(index)) )
return -1;
- if ( xc_tmem_control_oid(xch, pool_id, TMEMC_RESTORE_FLUSH_PAGE, dom,
+ if ( xc_tmem_control_oid(xch, pool_id, XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE, dom,
0,index,oid,NULL) <= 0 )
return -1;
count++;
int rc;
char _buf[32768];
- rc = xc_tmem_control(ctx->xch, -1, TMEMC_LIST, domid, 32768, use_long,
+ rc = xc_tmem_control(ctx->xch, -1, XEN_SYSCTL_TMEM_OP_LIST, domid, 32768, use_long,
_buf);
if (rc < 0) {
LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
{
int rc;
- rc = xc_tmem_control(ctx->xch, -1, TMEMC_FREEZE, domid, 0, 0,
+ rc = xc_tmem_control(ctx->xch, -1, XEN_SYSCTL_TMEM_OP_FREEZE, domid, 0, 0,
NULL);
if (rc < 0) {
LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
{
int rc;
- rc = xc_tmem_control(ctx->xch, -1, TMEMC_THAW, domid, 0, 0,
+ rc = xc_tmem_control(ctx->xch, -1, XEN_SYSCTL_TMEM_OP_THAW, domid, 0, 0,
NULL);
if (rc < 0) {
LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
static int32_t tmem_setop_from_string(char *set_name)
{
if (!strcmp(set_name, "weight"))
- return TMEMC_SET_WEIGHT;
+ return XEN_SYSCTL_TMEM_OP_SET_WEIGHT;
else if (!strcmp(set_name, "cap"))
- return TMEMC_SET_CAP;
+ return XEN_SYSCTL_TMEM_OP_SET_CAP;
else if (!strcmp(set_name, "compress"))
- return TMEMC_SET_COMPRESS;
+ return XEN_SYSCTL_TMEM_OP_SET_COMPRESS;
else
return -1;
}
{
int rc;
- rc = xc_tmem_control(ctx->xch, -1, TMEMC_QUERY_FREEABLE_MB, -1, 0, 0, 0);
+ rc = xc_tmem_control(ctx->xch, -1, XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB, -1, 0, 0, 0);
if (rc < 0) {
LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
"Can not get tmem freeable memory");
&pool_id, &subop, &cli_id, &arg1, &arg2, &buf) )
return NULL;
- if ( (subop == TMEMC_LIST) && (arg1 > 32768) )
+ if ( (subop == XEN_SYSCTL_TMEM_OP_LIST) && (arg1 > 32768) )
arg1 = 32768;
if ( (rc = xc_tmem_control(self->xc_handle, pool_id, subop, cli_id, arg1, arg2, buffer)) < 0 )
return Py_BuildValue("i", rc);
switch (subop) {
- case TMEMC_LIST:
+ case XEN_SYSCTL_TMEM_OP_LIST:
return Py_BuildValue("s", buffer);
- case TMEMC_FLUSH:
+ case XEN_SYSCTL_TMEM_OP_FLUSH:
return Py_BuildValue("i", rc);
- case TMEMC_QUERY_FREEABLE_MB:
+ case XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB:
return Py_BuildValue("i", rc);
- case TMEMC_THAW:
- case TMEMC_FREEZE:
- case TMEMC_DESTROY:
- case TMEMC_SET_WEIGHT:
- case TMEMC_SET_CAP:
- case TMEMC_SET_COMPRESS:
+ case XEN_SYSCTL_TMEM_OP_THAW:
+ case XEN_SYSCTL_TMEM_OP_FREEZE:
+ case XEN_SYSCTL_TMEM_OP_DESTROY:
+ case XEN_SYSCTL_TMEM_OP_SET_WEIGHT:
+ case XEN_SYSCTL_TMEM_OP_SET_CAP:
+ case XEN_SYSCTL_TMEM_OP_SET_COMPRESS:
default:
break;
}
{
char buffer[4096];
- if (xc_tmem_control(handle->xc_handle,-1,TMEMC_LIST,domain->id,
+ if (xc_tmem_control(handle->xc_handle,-1,XEN_SYSCTL_TMEM_OP_LIST,domain->id,
sizeof(buffer),-1,buffer) < 0)
return;
domain->tmem_stats.curr_eph_pages = parse(buffer,"Ec");
* handle->page_size;
rc = xc_tmem_control(handle->xc_handle, -1,
- TMEMC_QUERY_FREEABLE_MB, -1, 0, 0, NULL);
+ XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB, -1, 0, 0, NULL);
node->freeable_mb = (rc < 0) ? 0 : rc;
/* malloc(0) is not portable, so allocate a single domain. This will
* be resized below. */
#include <xen/domain.h>
#include <xen/event.h>
#include <xen/domain_page.h>
+#include <xen/tmem.h>
#include <xen/trace.h>
#include <xen/console.h>
#include <xen/iocap.h>
case XEN_SYSCTL_tbuf_op:
ret = tb_control(&op->u.tbuf_op);
break;
-
+
case XEN_SYSCTL_sched_id:
op->u.sched_id.sched_id = sched_id();
break;
}
#endif
+ case XEN_SYSCTL_tmem_op:
+ ret = tmem_control(&op->u.tmem_op);
+ break;
+
default:
ret = arch_do_sysctl(op, u_sysctl);
copyback = 0;
#include <xen/tmem_xen.h> /* host-specific (eg Xen) code goes here */
#endif
+#include <public/sysctl.h>
#include <xen/tmem.h>
#include <xen/rbtree.h>
#include <xen/radix-tree.h>
static int tmemc_freeze_pools(domid_t cli_id, int arg)
{
struct client *client;
- bool_t freeze = (arg == TMEMC_FREEZE) ? 1 : 0;
- bool_t destroy = (arg == TMEMC_DESTROY) ? 1 : 0;
+ bool_t freeze = (arg == XEN_SYSCTL_TMEM_OP_FREEZE) ? 1 : 0;
+ bool_t destroy = (arg == XEN_SYSCTL_TMEM_OP_DESTROY) ? 1 : 0;
char *s;
s = destroy ? "destroyed" : ( freeze ? "frozen" : "thawed" );
switch (subop)
{
- case TMEMC_SET_WEIGHT:
+ case XEN_SYSCTL_TMEM_OP_SET_WEIGHT:
old_weight = client->weight;
client->weight = arg1;
tmem_client_info("tmem: weight set to %d for %s=%d\n",
atomic_sub(old_weight,&client_weight_total);
atomic_add(client->weight,&client_weight_total);
break;
- case TMEMC_SET_CAP:
+ case XEN_SYSCTL_TMEM_OP_SET_CAP:
client->cap = arg1;
tmem_client_info("tmem: cap set to %d for %s=%d\n",
arg1, tmem_cli_id_str, cli_id);
break;
- case TMEMC_SET_COMPRESS:
+ case XEN_SYSCTL_TMEM_OP_SET_COMPRESS:
if ( tmem_dedup_enabled() )
{
tmem_client_warn("tmem: compression %s for all %ss, cannot be changed when tmem_dedup is enabled\n",
switch(subop)
{
- case TMEMC_SAVE_BEGIN:
+ case XEN_SYSCTL_TMEM_OP_SAVE_BEGIN:
if ( client == NULL )
return 0;
for (p = 0; p < MAX_POOLS_PER_DOMAIN; p++)
client->live_migrating = 1;
rc = 1;
break;
- case TMEMC_RESTORE_BEGIN:
+ case XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN:
if ( client == NULL && (client = client_create(cli_id)) != NULL )
return 1;
break;
- case TMEMC_SAVE_GET_VERSION:
+ case XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION:
rc = TMEM_SPEC_VERSION;
break;
- case TMEMC_SAVE_GET_MAXPOOLS:
+ case XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS:
rc = MAX_POOLS_PER_DOMAIN;
break;
- case TMEMC_SAVE_GET_CLIENT_WEIGHT:
+ case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT:
if ( client == NULL )
break;
rc = client->weight == -1 ? -2 : client->weight;
break;
- case TMEMC_SAVE_GET_CLIENT_CAP:
+ case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_CAP:
if ( client == NULL )
break;
rc = client->cap == -1 ? -2 : client->cap;
break;
- case TMEMC_SAVE_GET_CLIENT_FLAGS:
+ case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS:
if ( client == NULL )
break;
rc = (client->compress ? TMEM_CLIENT_COMPRESS : 0 ) |
(client->was_frozen ? TMEM_CLIENT_FROZEN : 0 );
break;
- case TMEMC_SAVE_GET_POOL_FLAGS:
+ case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS:
if ( pool == NULL )
break;
rc = (pool->persistent ? TMEM_POOL_PERSIST : 0) |
(POOL_PAGESHIFT << TMEM_POOL_PAGESIZE_SHIFT) |
(TMEM_SPEC_VERSION << TMEM_POOL_VERSION_SHIFT);
break;
- case TMEMC_SAVE_GET_POOL_NPAGES:
+ case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES:
if ( pool == NULL )
break;
rc = _atomic_read(pool->pgp_count);
break;
- case TMEMC_SAVE_GET_POOL_UUID:
+ case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID:
if ( pool == NULL )
break;
rc = 0;
if ( copy_to_guest(guest_handle_cast(buf, void), pool->uuid, 2) )
rc = -EFAULT;
break;
- case TMEMC_SAVE_END:
+ case XEN_SYSCTL_TMEM_OP_SAVE_END:
if ( client == NULL )
break;
client->live_migrating = 0;
return do_tmem_flush_page(pool,oidp,index);
}
-static int do_tmem_control(struct tmem_op *op)
+int tmem_control(struct xen_sysctl_tmem_op *op)
{
int ret;
uint32_t pool_id = op->pool_id;
- uint32_t subop = op->u.ctrl.subop;
- struct oid *oidp = (struct oid *)(&op->u.ctrl.oid[0]);
+ uint32_t cmd = op->cmd;
+ struct oid *oidp = (struct oid *)(&op->oid[0]);
- if ( xsm_tmem_control(XSM_PRIV) )
- return -EPERM;
+ if ( op->pad != 0 )
+ return -EINVAL;
- switch(subop)
+ write_lock(&tmem_rwlock);
+
+ switch (cmd)
{
- case TMEMC_THAW:
- case TMEMC_FREEZE:
- case TMEMC_DESTROY:
- ret = tmemc_freeze_pools(op->u.ctrl.cli_id,subop);
+ case XEN_SYSCTL_TMEM_OP_THAW:
+ case XEN_SYSCTL_TMEM_OP_FREEZE:
+ case XEN_SYSCTL_TMEM_OP_DESTROY:
+ ret = tmemc_freeze_pools(op->cli_id, cmd);
break;
- case TMEMC_FLUSH:
- ret = tmemc_flush_mem(op->u.ctrl.cli_id,op->u.ctrl.arg1);
+ case XEN_SYSCTL_TMEM_OP_FLUSH:
+ ret = tmemc_flush_mem(op->cli_id,op->arg1);
break;
- case TMEMC_LIST:
- ret = tmemc_list(op->u.ctrl.cli_id,
- guest_handle_cast(op->u.ctrl.buf, char),
- op->u.ctrl.arg1,op->u.ctrl.arg2);
+ case XEN_SYSCTL_TMEM_OP_LIST:
+ ret = tmemc_list(op->cli_id,
+ guest_handle_cast(op->buf, char), op->arg1, op->arg2);
break;
- case TMEMC_SET_WEIGHT:
- case TMEMC_SET_CAP:
- case TMEMC_SET_COMPRESS:
- ret = tmemc_set_var(op->u.ctrl.cli_id,subop,op->u.ctrl.arg1);
+ case XEN_SYSCTL_TMEM_OP_SET_WEIGHT:
+ case XEN_SYSCTL_TMEM_OP_SET_CAP:
+ case XEN_SYSCTL_TMEM_OP_SET_COMPRESS:
+ ret = tmemc_set_var(op->cli_id, cmd, op->arg1);
break;
- case TMEMC_QUERY_FREEABLE_MB:
+ case XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB:
ret = tmem_freeable_pages() >> (20 - PAGE_SHIFT);
break;
- case TMEMC_SAVE_BEGIN:
- case TMEMC_RESTORE_BEGIN:
- case TMEMC_SAVE_GET_VERSION:
- case TMEMC_SAVE_GET_MAXPOOLS:
- case TMEMC_SAVE_GET_CLIENT_WEIGHT:
- case TMEMC_SAVE_GET_CLIENT_CAP:
- case TMEMC_SAVE_GET_CLIENT_FLAGS:
- case TMEMC_SAVE_GET_POOL_FLAGS:
- case TMEMC_SAVE_GET_POOL_NPAGES:
- case TMEMC_SAVE_GET_POOL_UUID:
- case TMEMC_SAVE_END:
- ret = tmemc_save_subop(op->u.ctrl.cli_id,pool_id,subop,
- guest_handle_cast(op->u.ctrl.buf, char),
- op->u.ctrl.arg1);
+ case XEN_SYSCTL_TMEM_OP_SAVE_BEGIN:
+ case XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN:
+ case XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION:
+ case XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS:
+ case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT:
+ case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_CAP:
+ case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS:
+ case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS:
+ case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES:
+ case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID:
+ case XEN_SYSCTL_TMEM_OP_SAVE_END:
+ ret = tmemc_save_subop(op->cli_id, pool_id, cmd,
+ guest_handle_cast(op->buf, char), op->arg1);
break;
- case TMEMC_SAVE_GET_NEXT_PAGE:
- ret = tmemc_save_get_next_page(op->u.ctrl.cli_id, pool_id,
- guest_handle_cast(op->u.ctrl.buf, char),
- op->u.ctrl.arg1);
+ case XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE:
+ ret = tmemc_save_get_next_page(op->cli_id, pool_id,
+ guest_handle_cast(op->buf, char), op->arg1);
break;
- case TMEMC_SAVE_GET_NEXT_INV:
- ret = tmemc_save_get_next_inv(op->u.ctrl.cli_id,
- guest_handle_cast(op->u.ctrl.buf, char),
- op->u.ctrl.arg1);
+ case XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV:
+ ret = tmemc_save_get_next_inv(op->cli_id,
+ guest_handle_cast(op->buf, char), op->arg1);
break;
- case TMEMC_RESTORE_PUT_PAGE:
- ret = tmemc_restore_put_page(op->u.ctrl.cli_id,pool_id,
- oidp, op->u.ctrl.arg2,
- guest_handle_cast(op->u.ctrl.buf, char),
- op->u.ctrl.arg1);
+ case XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE:
+ ret = tmemc_restore_put_page(op->cli_id, pool_id, oidp, op->arg2,
+ guest_handle_cast(op->buf, char), op->arg1);
break;
- case TMEMC_RESTORE_FLUSH_PAGE:
- ret = tmemc_restore_flush_page(op->u.ctrl.cli_id,pool_id,
- oidp, op->u.ctrl.arg2);
+ case XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE:
+ ret = tmemc_restore_flush_page(op->cli_id, pool_id, oidp, op->arg2);
break;
default:
ret = -1;
}
+
+ write_unlock(&tmem_rwlock);
+
return ret;
}
if ( op.cmd == TMEM_CONTROL )
{
- rc = do_tmem_control(&op);
+ rc = -EOPNOTSUPP;
}
else if ( op.cmd == TMEM_AUTH )
{
write_unlock(&tmem_rwlock);
}
-#define MAX_EVICTS 10 /* should be variable or set via TMEMC_ ?? */
+#define MAX_EVICTS 10 /* should be variable or set via XEN_SYSCTL_TMEM_OP_ ?? */
void *tmem_relinquish_pages(unsigned int order, unsigned int memflags)
{
struct page_info *pfp;
typedef struct xen_sysctl_psr_cat_op xen_sysctl_psr_cat_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
+#define XEN_SYSCTL_TMEM_OP_ALL_CLIENTS 0xFFFFU
+
+#define XEN_SYSCTL_TMEM_OP_THAW 0
+#define XEN_SYSCTL_TMEM_OP_FREEZE 1
+#define XEN_SYSCTL_TMEM_OP_FLUSH 2
+#define XEN_SYSCTL_TMEM_OP_DESTROY 3
+#define XEN_SYSCTL_TMEM_OP_LIST 4
+#define XEN_SYSCTL_TMEM_OP_SET_WEIGHT 5
+#define XEN_SYSCTL_TMEM_OP_SET_CAP 6
+#define XEN_SYSCTL_TMEM_OP_SET_COMPRESS 7
+#define XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB 8
+#define XEN_SYSCTL_TMEM_OP_SAVE_BEGIN 10
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION 11
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS 12
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT 13
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_CAP 14
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS 15
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS 16
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES 17
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID 18
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE 19
+#define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV 20
+#define XEN_SYSCTL_TMEM_OP_SAVE_END 21
+#define XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN 30
+#define XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE 32
+#define XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE 33
+
+struct xen_sysctl_tmem_op {
+ uint32_t cmd; /* IN: XEN_SYSCTL_TMEM_OP_* . */
+ int32_t pool_id; /* IN: 0 by default unless _SAVE_*, RESTORE_* .*/
+ uint32_t cli_id; /* IN: client id, 0 for XEN_SYSCTL_TMEM_QUERY_FREEABLE_MB
+ for all others can be the domain id or
+ XEN_SYSCTL_TMEM_OP_ALL_CLIENTS for all. */
+ uint32_t arg1; /* IN: If not applicable to command use 0. */
+ uint32_t arg2; /* IN: If not applicable to command use 0. */
+ uint32_t pad; /* Padding so structure is the same under 32 and 64. */
+ uint64_t oid[3]; /* IN: If not applicable to command use 0s. */
+ XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save and restore ops. */
+};
+typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tmem_op_t);
+
struct xen_sysctl {
uint32_t cmd;
#define XEN_SYSCTL_readconsole 1
#define XEN_SYSCTL_psr_cmt_op 21
#define XEN_SYSCTL_pcitopoinfo 22
#define XEN_SYSCTL_psr_cat_op 23
+#define XEN_SYSCTL_tmem_op 24
uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
union {
struct xen_sysctl_readconsole readconsole;
struct xen_sysctl_coverage_op coverage_op;
struct xen_sysctl_psr_cmt_op psr_cmt_op;
struct xen_sysctl_psr_cat_op psr_cat_op;
+ struct xen_sysctl_tmem_op tmem_op;
uint8_t pad[128];
} u;
};
#define TMEM_SPEC_VERSION 1
/* Commands to HYPERVISOR_tmem_op() */
-#define TMEM_CONTROL 0
+#ifdef __XEN__
+#define TMEM_CONTROL 0 /* Now called XEN_SYSCTL_tmem_op */
+#else
+#undef TMEM_CONTROL
+#endif
#define TMEM_NEW_POOL 1
#define TMEM_DESTROY_POOL 2
#define TMEM_PUT_PAGE 4
#endif
/* Privileged commands to HYPERVISOR_tmem_op() */
-#define TMEM_AUTH 101
+#define TMEM_AUTH 101
#define TMEM_RESTORE_NEW 102
-/* Subops for HYPERVISOR_tmem_op(TMEM_CONTROL) */
-#define TMEMC_THAW 0
-#define TMEMC_FREEZE 1
-#define TMEMC_FLUSH 2
-#define TMEMC_DESTROY 3
-#define TMEMC_LIST 4
-#define TMEMC_SET_WEIGHT 5
-#define TMEMC_SET_CAP 6
-#define TMEMC_SET_COMPRESS 7
-#define TMEMC_QUERY_FREEABLE_MB 8
-#define TMEMC_SAVE_BEGIN 10
-#define TMEMC_SAVE_GET_VERSION 11
-#define TMEMC_SAVE_GET_MAXPOOLS 12
-#define TMEMC_SAVE_GET_CLIENT_WEIGHT 13
-#define TMEMC_SAVE_GET_CLIENT_CAP 14
-#define TMEMC_SAVE_GET_CLIENT_FLAGS 15
-#define TMEMC_SAVE_GET_POOL_FLAGS 16
-#define TMEMC_SAVE_GET_POOL_NPAGES 17
-#define TMEMC_SAVE_GET_POOL_UUID 18
-#define TMEMC_SAVE_GET_NEXT_PAGE 19
-#define TMEMC_SAVE_GET_NEXT_INV 20
-#define TMEMC_SAVE_END 21
-#define TMEMC_RESTORE_BEGIN 30
-#define TMEMC_RESTORE_PUT_PAGE 32
-#define TMEMC_RESTORE_FLUSH_PAGE 33
-
/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
#define TMEM_POOL_PERSIST 1
#define TMEM_POOL_SHARED 2
uint32_t flags;
uint32_t arg1;
} creat; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH, TMEM_RESTORE_NEW */
- struct {
- uint32_t subop;
- uint32_t cli_id;
- uint32_t arg1;
- uint32_t arg2;
- uint64_t oid[3];
- tmem_cli_va_t buf;
- } ctrl; /* for cmd == TMEM_CONTROL */
struct {
-
uint64_t oid[3];
uint32_t index;
uint32_t tmem_offset;
#ifndef __XEN_TMEM_H__
#define __XEN_TMEM_H__
+struct xen_sysctl_tmem_op;
+
+extern int tmem_control(struct xen_sysctl_tmem_op *op);
extern void tmem_destroy(void *);
extern void *tmem_relinquish_pages(unsigned int, unsigned int);
extern unsigned long tmem_freeable_pages(void);
switch ( cop.cmd )
{
case TMEM_NEW_POOL: u = XLAT_tmem_op_u_creat; break;
- case TMEM_CONTROL: u = XLAT_tmem_op_u_ctrl; break;
case TMEM_AUTH: u = XLAT_tmem_op_u_creat; break;
case TMEM_RESTORE_NEW:u = XLAT_tmem_op_u_creat; break;
default: u = XLAT_tmem_op_u_gen ; break;
}
-#define XLAT_tmem_op_HNDL_u_ctrl_buf(_d_, _s_) \
- guest_from_compat_handle((_d_)->u.ctrl.buf, (_s_)->u.ctrl.buf)
XLAT_tmem_op(op, &cop);
-#undef XLAT_tmem_op_HNDL_u_ctrl_buf
return 0;
}
#endif
return avc_current_has_perm(SECINITSID_XEN, SECCLASS_XEN2,
XEN2__PSR_CAT_OP, NULL);
+ case XEN_SYSCTL_tmem_op:
+ return domain_has_xen(current->domain, XEN__TMEM_CONTROL);
+
default:
printk("flask_sysctl: Unknown op %d\n", cmd);
return -EPERM;