sysctl.u.tmem_op.arg1 = arg1;
sysctl.u.tmem_op.arg2 = arg2;
sysctl.u.tmem_op.pad = 0;
- sysctl.u.tmem_op.oid[0] = 0;
- sysctl.u.tmem_op.oid[1] = 0;
- sysctl.u.tmem_op.oid[2] = 0;
+ sysctl.u.tmem_op.oid.oid[0] = 0;
+ sysctl.u.tmem_op.oid.oid[1] = 0;
+ sysctl.u.tmem_op.oid.oid[2] = 0;
if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 )
{
uint32_t cli_id,
uint32_t arg1,
uint32_t arg2,
- struct tmem_oid oid,
+ struct xen_tmem_oid oid,
void *buf)
{
DECLARE_SYSCTL;
sysctl.u.tmem_op.arg1 = arg1;
sysctl.u.tmem_op.arg2 = arg2;
sysctl.u.tmem_op.pad = 0;
- sysctl.u.tmem_op.oid[0] = oid.oid[0];
- sysctl.u.tmem_op.oid[1] = oid.oid[1];
- sysctl.u.tmem_op.oid[2] = oid.oid[2];
+ sysctl.u.tmem_op.oid = oid;
if ( cmd == XEN_SYSCTL_TMEM_OP_LIST && arg1 != 0 )
{
}
for ( j = n_pages; j > 0; j-- )
{
- struct tmem_oid oid;
+ struct xen_tmem_oid oid;
uint32_t index;
int rc;
if ( read_exact(io_fd, &oid, sizeof(oid)) )
int xc_tmem_restore_extra(xc_interface *xch, int dom, int io_fd)
{
uint32_t pool_id;
- struct tmem_oid oid;
+ struct xen_tmem_oid oid;
uint32_t index;
int count = 0;
int checksum = 0;
#define is_persistent(_p) (_p->persistent)
#define is_shared(_p) (_p->shared)
-struct oid {
- uint64_t oid[3];
-};
-
struct tmem_object_root {
- struct oid oid;
+ struct xen_tmem_oid oid;
struct rb_node rb_tree_node; /* protected by pool->pool_rwlock */
unsigned long objnode_count; /* atomicity depends on obj_spinlock */
long pgp_count; /* atomicity depends on obj_spinlock */
};
struct tmem_object_root *obj;
} us;
- struct oid inv_oid; /* used for invalid list only */
+ struct xen_tmem_oid inv_oid; /* used for invalid list only */
};
pagesize_t size; /* 0 == PAGE_SIZE (pfp), -1 == data invalid,
else compressed data (cdata) */
/************ POOL OBJECT COLLECTION MANIPULATION ROUTINES *******************/
-static int oid_compare(struct oid *left, struct oid *right)
+static int oid_compare(struct xen_tmem_oid *left,
+ struct xen_tmem_oid *right)
{
if ( left->oid[2] == right->oid[2] )
{
return 1;
}
-static void oid_set_invalid(struct oid *oidp)
+static void oid_set_invalid(struct xen_tmem_oid *oidp)
{
oidp->oid[0] = oidp->oid[1] = oidp->oid[2] = -1UL;
}
-static unsigned oid_hash(struct oid *oidp)
+static unsigned oid_hash(struct xen_tmem_oid *oidp)
{
return (tmem_hash(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
BITS_PER_LONG) & OBJ_HASH_BUCKETS_MASK);
}
/* searches for object==oid in pool, returns locked object if found */
-static struct tmem_object_root * obj_find(struct tmem_pool *pool, struct oid *oidp)
+static struct tmem_object_root * obj_find(struct tmem_pool *pool,
+ struct xen_tmem_oid *oidp)
{
struct rb_node *node;
struct tmem_object_root *obj;
static void obj_free(struct tmem_object_root *obj)
{
struct tmem_pool *pool;
- struct oid old_oid;
+ struct xen_tmem_oid old_oid;
ASSERT_SPINLOCK(&obj->obj_spinlock);
ASSERT(obj != NULL);
* allocate, initialize, and insert an tmem_object_root
* (should be called only if find failed)
*/
-static struct tmem_object_root * obj_alloc(struct tmem_pool *pool, struct oid *oidp)
+static struct tmem_object_root * obj_alloc(struct tmem_pool *pool,
+ struct xen_tmem_oid *oidp)
{
struct tmem_object_root *obj;
}
static int do_tmem_put(struct tmem_pool *pool,
- struct oid *oidp, uint32_t index,
- xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
+ struct xen_tmem_oid *oidp, uint32_t index,
+ xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
{
struct tmem_object_root *obj = NULL;
struct tmem_page_descriptor *pgp = NULL;
return ret;
}
-static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index,
- xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
+static int do_tmem_get(struct tmem_pool *pool,
+ struct xen_tmem_oid *oidp, uint32_t index,
+ xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
{
struct tmem_object_root *obj;
struct tmem_page_descriptor *pgp;
return rc;
}
-static int do_tmem_flush_page(struct tmem_pool *pool, struct oid *oidp, uint32_t index)
+static int do_tmem_flush_page(struct tmem_pool *pool,
+ struct xen_tmem_oid *oidp, uint32_t index)
{
struct tmem_object_root *obj;
struct tmem_page_descriptor *pgp;
return 1;
}
-static int do_tmem_flush_object(struct tmem_pool *pool, struct oid *oidp)
+static int do_tmem_flush_object(struct tmem_pool *pool,
+ struct xen_tmem_oid *oidp)
{
struct tmem_object_root *obj;
struct tmem_pool *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
? NULL : client->pools[pool_id];
struct tmem_page_descriptor *pgp;
- struct oid oid;
+ struct xen_tmem_oid oid;
int ret = 0;
struct tmem_handle h;
return ret;
}
-static int tmemc_restore_put_page(int cli_id, uint32_t pool_id, struct oid *oidp,
- uint32_t index, tmem_cli_va_param_t buf, uint32_t bufsize)
+static int tmemc_restore_put_page(int cli_id, uint32_t pool_id,
+ struct xen_tmem_oid *oidp,
+ uint32_t index, tmem_cli_va_param_t buf,
+ uint32_t bufsize)
{
struct client *client = tmem_client_from_cli_id(cli_id);
struct tmem_pool *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
return do_tmem_put(pool, oidp, index, 0, buf);
}
-static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id, struct oid *oidp,
- uint32_t index)
+static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id,
+ struct xen_tmem_oid *oidp,
+ uint32_t index)
{
struct client *client = tmem_client_from_cli_id(cli_id);
struct tmem_pool *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
int ret;
uint32_t pool_id = op->pool_id;
uint32_t cmd = op->cmd;
- struct oid *oidp = (struct oid *)(&op->oid[0]);
+ struct xen_tmem_oid *oidp = &op->oid;
if ( op->pad != 0 )
return -EINVAL;
struct tmem_op op;
struct client *client = current->domain->tmem_client;
struct tmem_pool *pool = NULL;
- struct oid *oidp;
+ struct xen_tmem_oid *oidp;
int rc = 0;
bool_t succ_get = 0, succ_put = 0;
bool_t non_succ_get = 0, non_succ_put = 0;
write_unlock(&tmem_rwlock);
read_lock(&tmem_rwlock);
- oidp = (struct oid *)&op.u.gen.oid[0];
+ oidp = container_of(&op.u.gen.oid[0], struct xen_tmem_oid, oid[0]);
switch ( op.cmd )
{
case TMEM_NEW_POOL: