*/
#include "xc_private.h"
+#include <assert.h>
#include <xen/tmem.h>
static int do_tmem_op(xc_interface *xch, tmem_op_t *op)
sysctl.u.tmem_op.oid.oid[1] = 0;
sysctl.u.tmem_op.oid.oid[2] = 0;
- if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 )
+ if ( cmd == XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO )
+ HYPERCALL_BOUNCE_SET_DIR(buf, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+ if ( arg1 )
{
if ( buf == NULL )
{
rc = do_sysctl(xch, &sysctl);
- if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 )
- xc_hypercall_bounce_post(xch, buf);
+ if ( arg1 )
+ xc_hypercall_bounce_post(xch, buf);
return rc;
}
{
int marker = field_marker;
int i, j;
- uint32_t max_pools, version;
- uint32_t weight, flags;
- uint32_t pool_id;
+ uint32_t flags;
uint32_t minusone = -1;
+ uint32_t pool_id;
struct tmem_handle *h;
+ xen_tmem_client_t info;
if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_BEGIN,dom,live,0,NULL) <= 0 )
return 0;
if ( write_exact(io_fd, &marker, sizeof(marker)) )
return -1;
- version = xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION,0,0,0,NULL);
- if ( write_exact(io_fd, &version, sizeof(version)) )
- return -1;
- max_pools = xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS,0,0,0,NULL);
- if ( write_exact(io_fd, &max_pools, sizeof(max_pools)) )
- return -1;
- if ( version == -1 || max_pools == -1 )
- return -1;
- if ( write_exact(io_fd, &minusone, sizeof(minusone)) )
- return -1;
- flags = xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS,dom,0,0,NULL);
- if ( write_exact(io_fd, &flags, sizeof(flags)) )
- return -1;
- weight = xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT,dom,0,0,NULL);
- if ( write_exact(io_fd, &weight, sizeof(weight)) )
+
+ if ( xc_tmem_control(xch, 0 /* pool_id */,
+ XEN_SYSCTL_TMEM_OP_GET_CLIENT_INFO,
+ dom /* cli_id */, sizeof(info) /* arg1 */, 0 /* arg2 */,
+ &info) < 0 )
return -1;
- if ( flags == -1 || weight == -1 )
+
+ if ( write_exact(io_fd, &info, sizeof(info)) )
return -1;
if ( write_exact(io_fd, &minusone, sizeof(minusone)) )
return -1;
- for ( i = 0; i < max_pools; i++ )
+ for ( i = 0; i < info.maxpools; i++ )
{
uint64_t uuid[2];
uint32_t n_pages;
int xc_tmem_restore(xc_interface *xch, int dom, int io_fd)
{
- uint32_t this_max_pools, this_version;
uint32_t pool_id;
uint32_t minusone;
- uint32_t weight, flags;
+ uint32_t flags;
+ xen_tmem_client_t info;
int checksum = 0;
- if ( read_exact(io_fd, &this_version, sizeof(this_version)) )
- return -1;
- if ( read_exact(io_fd, &this_max_pools, sizeof(this_max_pools)) )
- return -1;
- /* FIXME check here to ensure no version mismatch or maxpools mismatch */
- if ( read_exact(io_fd, &minusone, sizeof(minusone)) )
- return -1;
- if ( minusone != -1 )
+ if ( read_exact(io_fd, &info, sizeof(info)) )
return -1;
if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN,dom,0,0,NULL) < 0 )
return -1;
- if ( read_exact(io_fd, &flags, sizeof(flags)) )
- return -1;
- if ( flags & TMEM_CLIENT_COMPRESS )
- if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SET_COMPRESS,dom,1,0,NULL) < 0 )
- return -1;
- if ( flags & TMEM_CLIENT_FROZEN )
- if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_FREEZE,dom,0,0,NULL) < 0 )
- return -1;
- if ( read_exact(io_fd, &weight, sizeof(weight)) )
- return -1;
- if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SET_WEIGHT,dom,0,0,NULL) < 0 )
+
+ if ( xc_tmem_control(xch, 0 /* pool_id */,
+ XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO,
+ dom /* cli_id */, sizeof(info) /* arg1 */, 0 /* arg2 */,
+ &info) < 0 )
return -1;
+
if ( read_exact(io_fd, &minusone, sizeof(minusone)) )
return -1;
while ( read_exact(io_fd, &pool_id, sizeof(pool_id)) == 0 && pool_id != -1 )
return rc;
}
-static int32_t tmem_setop_from_string(char *set_name)
+static int32_t tmem_setop_from_string(char *set_name, uint32_t val,
+ xen_tmem_client_t *info)
{
if (!strcmp(set_name, "weight"))
- return XEN_SYSCTL_TMEM_OP_SET_WEIGHT;
+ info->weight = val;
else if (!strcmp(set_name, "compress"))
- return XEN_SYSCTL_TMEM_OP_SET_COMPRESS;
+ info->flags.u.compress = val;
else
return -1;
+
+ return 0;
}
int libxl_tmem_set(libxl_ctx *ctx, uint32_t domid, char* name, uint32_t set)
{
int r, rc;
- int32_t subop = tmem_setop_from_string(name);
+ xen_tmem_client_t info;
GC_INIT(ctx);
- if (subop == -1) {
+ r = xc_tmem_control(ctx->xch, -1 /* pool_id */,
+ XEN_SYSCTL_TMEM_OP_GET_CLIENT_INFO,
+ domid, sizeof(info), 0 /* arg2 */, &info);
+ if (r < 0) {
+ LOGE(ERROR, "Can not get tmem data!");
+ rc = ERROR_FAIL;
+ goto out;
+ }
+ rc = tmem_setop_from_string(name, set, &info);
+ if (rc == -1) {
LOGEV(ERROR, -1, "Invalid set, valid sets are <weight|compress>");
rc = ERROR_INVAL;
goto out;
}
- r = xc_tmem_control(ctx->xch, -1, subop, domid, set, 0, NULL);
+ r = xc_tmem_control(ctx->xch, -1 /* pool_id */,
+ XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO,
+ domid, sizeof(info), 0 /* arg2 */, &info);
if (r < 0) {
LOGE(ERROR, "Can not set tmem %s", name);
rc = ERROR_FAIL;
case XEN_SYSCTL_TMEM_OP_THAW:
case XEN_SYSCTL_TMEM_OP_FREEZE:
case XEN_SYSCTL_TMEM_OP_DESTROY:
- case XEN_SYSCTL_TMEM_OP_SET_WEIGHT:
- case XEN_SYSCTL_TMEM_OP_SET_COMPRESS:
default:
break;
}
rcu_unlock_domain(d);
client->cli_id = cli_id;
+ client->info.version = TMEM_SPEC_VERSION;
+ client->info.maxpools = MAX_POOLS_PER_DOMAIN;
client->info.flags.u.compress = tmem_compression_enabled();
client->shared_auth_required = tmem_shared_auth();
for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++)
return 0;
}
-static int __tmemc_set_var(struct client *client, uint32_t subop, uint32_t arg1)
+static int __tmemc_set_client_info(struct client *client,
+ XEN_GUEST_HANDLE(xen_tmem_client_t) buf)
{
- domid_t cli_id = client->cli_id;
+ domid_t cli_id;
uint32_t old_weight;
+ xen_tmem_client_t info = { };
- switch (subop)
+ ASSERT(client);
+
+ if ( copy_from_guest(&info, buf, 1) )
+ return -EFAULT;
+
+ if ( info.version != TMEM_SPEC_VERSION )
+ return -EOPNOTSUPP;
+
+ if ( info.maxpools > MAX_POOLS_PER_DOMAIN )
+ return -ERANGE;
+
+ cli_id = client->cli_id;
+ if ( info.weight != client->info.weight )
{
- case XEN_SYSCTL_TMEM_OP_SET_WEIGHT:
old_weight = client->info.weight;
- client->info.weight = arg1;
+ client->info.weight = info.weight;
tmem_client_info("tmem: weight set to %d for %s=%d\n",
- arg1, tmem_cli_id_str, cli_id);
+ info.weight, tmem_cli_id_str, cli_id);
atomic_sub(old_weight,&tmem_global.client_weight_total);
atomic_add(client->info.weight,&tmem_global.client_weight_total);
- break;
- case XEN_SYSCTL_TMEM_OP_SET_COMPRESS:
- client->info.flags.u.compress = arg1 ? 1 : 0;
+ }
+
+
+ if ( info.flags.u.compress != client->info.flags.u.compress )
+ {
+ client->info.flags.u.compress = info.flags.u.compress;
tmem_client_info("tmem: compression %s for %s=%d\n",
- arg1 ? "enabled" : "disabled",tmem_cli_id_str,cli_id);
- break;
- default:
- tmem_client_warn("tmem: unknown subop %d for tmemc_set_var\n", subop);
- return -1;
+ info.flags.u.compress ? "enabled" : "disabled",
+ tmem_cli_id_str,cli_id);
}
return 0;
}
-static int tmemc_set_var(domid_t cli_id, uint32_t subop, uint32_t arg1)
+static int tmemc_set_client_info(domid_t cli_id,
+ XEN_GUEST_HANDLE(xen_tmem_client_t) info)
{
struct client *client;
- int ret = -1;
+ int ret = -ENOENT;
if ( cli_id == TMEM_CLI_ID_NULL )
{
list_for_each_entry(client,&tmem_global.client_list,client_list)
{
- ret = __tmemc_set_var(client, subop, arg1);
+ ret = __tmemc_set_client_info(client, info);
if (ret)
break;
}
{
client = tmem_client_from_cli_id(cli_id);
if ( client )
- ret = __tmemc_set_var(client, subop, arg1);
+ ret = __tmemc_set_client_info(client, info);
}
return ret;
}
-static int tmemc_save_subop(int cli_id, uint32_t pool_id,
- uint32_t subop, tmem_cli_va_param_t buf, uint32_t arg1)
+static int tmemc_get_client_info(int cli_id,
+ XEN_GUEST_HANDLE(xen_tmem_client_t) info)
+{
+ struct client *client = tmem_client_from_cli_id(cli_id);
+
+ if ( client )
+ {
+ if ( copy_to_guest(info, &client->info, 1) )
+ return -EFAULT;
+ }
+ else
+ {
+ static const xen_tmem_client_t generic = {
+ .version = TMEM_SPEC_VERSION,
+ .maxpools = MAX_POOLS_PER_DOMAIN
+ };
+
+ if ( copy_to_guest(info, &generic, 1) )
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int tmemc_save_subop(int cli_id, uint32_t pool_id, uint32_t subop,
+ XEN_GUEST_HANDLE_PARAM(char) buf, uint32_t arg1)
{
struct client *client = tmem_client_from_cli_id(cli_id);
struct tmem_pool *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
switch(subop)
{
- case XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION:
- rc = TMEM_SPEC_VERSION;
- break;
- case XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS:
- rc = MAX_POOLS_PER_DOMAIN;
- break;
- case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT:
- if ( client == NULL )
- break;
- rc = client->info.weight == -1 ? -2 : client->info.weight;
- break;
- case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS:
- if ( client == NULL )
- break;
- rc = (client->info.flags.u.compress ? TMEM_CLIENT_COMPRESS : 0 ) |
- (client->was_frozen ? TMEM_CLIENT_FROZEN : 0 );
- break;
case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS:
if ( pool == NULL )
break;
ret = tmemc_list(op->cli_id,
guest_handle_cast(op->u.buf, char), op->arg1, op->arg2);
break;
- case XEN_SYSCTL_TMEM_OP_SET_WEIGHT:
- case XEN_SYSCTL_TMEM_OP_SET_COMPRESS:
- ret = tmemc_set_var(op->cli_id, cmd, op->arg1);
+ case XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO:
+ ret = tmemc_set_client_info(op->cli_id, op->u.client);
break;
case XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB:
ret = tmem_freeable_pages() >> (20 - PAGE_SHIFT);
break;
- case XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION:
- case XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS:
- case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT:
- case XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS:
+ case XEN_SYSCTL_TMEM_OP_GET_CLIENT_INFO:
+ ret = tmemc_get_client_info(op->cli_id, op->u.client);
+ break;
case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS:
case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES:
case XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID:
#include "physdev.h"
#include "tmem.h"
-#define XEN_SYSCTL_INTERFACE_VERSION 0x0000000D
+#define XEN_SYSCTL_INTERFACE_VERSION 0x0000000E
/*
* Read console content from Xen buffer ring.
#define XEN_SYSCTL_TMEM_OP_FLUSH 2
#define XEN_SYSCTL_TMEM_OP_DESTROY 3
#define XEN_SYSCTL_TMEM_OP_LIST 4
-#define XEN_SYSCTL_TMEM_OP_SET_WEIGHT 5
-#define XEN_SYSCTL_TMEM_OP_SET_COMPRESS 7
+#define XEN_SYSCTL_TMEM_OP_GET_CLIENT_INFO 5
+#define XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO 6
#define XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB 8
#define XEN_SYSCTL_TMEM_OP_SAVE_BEGIN 10
-#define XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION 11
-#define XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS 12
-#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT 13
-#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS 15
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS 16
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES 17
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID 18
xen_tmem_oid_t oid;
};
+/*
+ * XEN_SYSCTL_TMEM_OP_[GET,SAVE]_CLIENT uses the 'client' in
+ * xen_tmem_op with this structure, which is mostly used during migration.
+ */
struct xen_tmem_client {
+ uint32_t version; /* If mismatched we will get XEN_EOPNOTSUPP. */
+ uint32_t maxpools; /* If greater than what hypervisor supports, will get
+ XEN_ERANGE. */
union { /* See TMEM_CLIENT_[COMPRESS,FROZEN] */
uint32_t raw;
struct {
} flags;
uint32_t weight;
};
+typedef struct xen_tmem_client xen_tmem_client_t;
+DEFINE_XEN_GUEST_HANDLE(xen_tmem_client_t);
struct xen_sysctl_tmem_op {
uint32_t cmd; /* IN: XEN_SYSCTL_TMEM_OP_* . */
uint32_t pad; /* Padding so structure is the same under 32 and 64. */
xen_tmem_oid_t oid; /* IN: If not applicable to command use 0s. */
union {
- XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save and restore ops. */
+ XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save/restore */
+ XEN_GUEST_HANDLE_64(xen_tmem_client_t) client; /* IN/OUT for */
+ /* XEN_SYSCTL_TMEM_OP_[GET,SAVE]_CLIENT. */
} u;
};
typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t;
struct list_head ephemeral_page_list;
long eph_count, eph_count_max;
domid_t cli_id;
- struct xen_tmem_client info;
+ xen_tmem_client_t info;
bool_t shared_auth_required;
/* For save/restore/migration. */
bool_t was_frozen;