*/
#ifdef __XEN__
-#include <xen/tmem_xen.h> /* host-specific (eg Xen) code goes here */
+#include <xen/tmem_xen.h> /* host-specific (eg Xen) code goes here. */
#endif
#include <public/sysctl.h>
#define TMEM_SPEC_VERSION 1
-/* global statistics (none need to be locked) */
+/* Global statistics (none need to be locked). */
static unsigned long total_tmem_ops = 0;
static unsigned long errored_tmem_ops = 0;
static unsigned long total_flush_pool = 0;
bool_t compress;
bool_t frozen;
bool_t shared_auth_required;
- /* for save/restore/migration */
+ /* For save/restore/migration. */
bool_t live_migrating;
bool_t was_frozen;
struct list_head persistent_invalidated_list;
struct tmem_page_descriptor *cur_pgp;
- /* statistics collection */
+ /* Statistics collection. */
unsigned long compress_poor, compress_nomem;
unsigned long compressed_pages;
uint64_t compressed_sum_size;
uint64_t total_cycles;
unsigned long succ_pers_puts, succ_eph_gets, succ_pers_gets;
- /* shared pool authentication */
+ /* Shared pool authentication. */
uint64_t shared_auth_uuid[MAX_GLOBAL_SHARED_POOLS][2];
};
};
#define POOL_PAGESHIFT (PAGE_SHIFT - 12)
-#define OBJ_HASH_BUCKETS 256 /* must be power of two */
+#define OBJ_HASH_BUCKETS 256 /* Must be power of two. */
#define OBJ_HASH_BUCKETS_MASK (OBJ_HASH_BUCKETS-1)
struct tmem_pool {
bool_t persistent;
bool_t is_dying;
struct client *client;
- uint64_t uuid[2]; /* 0 for private, non-zero for shared */
+ uint64_t uuid[2]; /* 0 for private, non-zero for shared. */
uint32_t pool_id;
rwlock_t pool_rwlock;
- struct rb_root obj_rb_root[OBJ_HASH_BUCKETS]; /* protected by pool_rwlock */
- struct list_head share_list; /* valid if shared */
- int shared_count; /* valid if shared */
- /* for save/restore/migration */
+ struct rb_root obj_rb_root[OBJ_HASH_BUCKETS]; /* Protected by pool_rwlock. */
+ struct list_head share_list; /* Valid if shared. */
+ int shared_count; /* Valid if shared. */
+ /* For save/restore/migration. */
struct list_head persistent_page_list;
struct tmem_page_descriptor *cur_pgp;
- /* statistics collection */
+ /* Statistics collection. */
atomic_t pgp_count;
int pgp_count_max;
- long obj_count; /* atomicity depends on pool_rwlock held for write */
+ long obj_count; /* Atomicity depends on pool_rwlock held for write. */
long obj_count_max;
unsigned long objnode_count, objnode_count_max;
uint64_t sum_life_cycles;
struct tmem_object_root {
struct xen_tmem_oid oid;
- struct rb_node rb_tree_node; /* protected by pool->pool_rwlock */
- unsigned long objnode_count; /* atomicity depends on obj_spinlock */
- long pgp_count; /* atomicity depends on obj_spinlock */
- struct radix_tree_root tree_root; /* tree of pages within object */
+ struct rb_node rb_tree_node; /* Protected by pool->pool_rwlock. */
+ unsigned long objnode_count; /* Atomicity depends on obj_spinlock. */
+ long pgp_count; /* Atomicity depends on obj_spinlock. */
+ struct radix_tree_root tree_root; /* Tree of pages within object. */
struct tmem_pool *pool;
domid_t last_client;
spinlock_t obj_spinlock;
};
struct tmem_object_root *obj;
} us;
- struct xen_tmem_oid inv_oid; /* used for invalid list only */
+ struct xen_tmem_oid inv_oid; /* Used for invalid list only. */
};
pagesize_t size; /* 0 == PAGE_SIZE (pfp), -1 == data invalid,
- else compressed data (cdata) */
+ else compressed data (cdata). */
uint32_t index;
- /* must hold pcd_tree_rwlocks[firstbyte] to use pcd pointer/siblings */
- uint16_t firstbyte; /* NON_SHAREABLE->pfp otherwise->pcd */
- bool_t eviction_attempted; /* CHANGE TO lifetimes? (settable) */
+ /* Must hold pcd_tree_rwlocks[firstbyte] to use pcd pointer/siblings. */
+ uint16_t firstbyte; /* NON_SHAREABLE->pfp otherwise->pcd. */
+ bool_t eviction_attempted; /* CHANGE TO lifetimes? (settable). */
struct list_head pcd_siblings;
union {
- struct page_info *pfp; /* page frame pointer */
- char *cdata; /* compressed data */
- struct tmem_page_content_descriptor *pcd; /* page dedup */
+ struct page_info *pfp; /* Page frame pointer. */
+ char *cdata; /* Compressed data. */
+ struct tmem_page_content_descriptor *pcd; /* Page dedup. */
};
union {
uint64_t timestamp;
- uint32_t pool_id; /* used for invalid list only */
+ uint32_t pool_id; /* Used for invalid list only. */
};
};
struct tmem_page_content_descriptor {
union {
- struct page_info *pfp; /* page frame pointer */
- char *cdata; /* if compression_enabled */
- char *tze; /* if !compression_enabled, trailing zeroes eliminated */
+ struct page_info *pfp; /* Page frame pointer. */
+ char *cdata; /* If compression_enabled. */
+ char *tze; /* If !compression_enabled, trailing zeroes eliminated. */
};
struct list_head pgp_list;
struct rb_node pcd_rb_tree_node;
uint32_t pgp_ref_count;
- pagesize_t size; /* if compression_enabled -> 0<size<PAGE_SIZE (*cdata)
+ pagesize_t size; /* If compression_enabled -> 0<size<PAGE_SIZE (*cdata)
* else if tze, 0<=size<PAGE_SIZE, rounded up to mult of 8
- * else PAGE_SIZE -> *pfp */
+ * else PAGE_SIZE -> *pfp. */
};
-struct rb_root pcd_tree_roots[256]; /* choose based on first byte of page */
-rwlock_t pcd_tree_rwlocks[256]; /* poor man's concurrency for now */
+struct rb_root pcd_tree_roots[256]; /* Choose based on first byte of page. */
+rwlock_t pcd_tree_rwlocks[256]; /* Poor man's concurrency for now. */
-static LIST_HEAD(global_ephemeral_page_list); /* all pages in ephemeral pools */
+static LIST_HEAD(global_ephemeral_page_list); /* All pages in ephemeral pools. */
static LIST_HEAD(global_client_list);
unsigned long tmem_page_list_pages = 0;
DEFINE_RWLOCK(tmem_rwlock);
-static DEFINE_SPINLOCK(eph_lists_spinlock); /* protects global AND clients */
+static DEFINE_SPINLOCK(eph_lists_spinlock); /* Protects global AND clients. */
static DEFINE_SPINLOCK(pers_lists_spinlock);
#define ASSERT_SPINLOCK(_l) ASSERT(spin_is_locked(_l))
#define ASSERT_WRITELOCK(_l) ASSERT(rw_is_write_locked(_l))
-/* global counters (should use long_atomic_t access) */
-static long global_eph_count = 0; /* atomicity depends on eph_lists_spinlock */
+/* Global counters (should use long_atomic_t access). */
+static long global_eph_count = 0; /* Atomicity depends on eph_lists_spinlock. */
static atomic_t global_obj_count = ATOMIC_INIT(0);
static atomic_t global_pgp_count = ATOMIC_INIT(0);
static atomic_t global_pcd_count = ATOMIC_INIT(0);
return tmem_mempool != NULL;
}
-/* persistent pools are per-domain */
+/* Persistent pools are per-domain. */
static void *tmem_persistent_pool_page_get(unsigned long size)
{
struct page_info *pi;
}
/*
- * Page content descriptor manipulation routines
+ * Page content descriptor manipulation routines.
*/
#define NOT_SHAREABLE ((uint16_t)-1UL)
return ret;
}
-/* ensure pgp no longer points to pcd, nor vice-versa */
-/* take pcd rwlock unless have_pcd_rwlock is set, always unlock when done */
+/*
+ * Ensure pgp no longer points to pcd, nor vice-versa.
+ * Take pcd rwlock unless have_pcd_rwlock is set, always unlock when done.
+ */
static void pcd_disassociate(struct tmem_page_descriptor *pgp, struct tmem_pool *pool, bool_t have_pcd_rwlock)
{
struct tmem_page_content_descriptor *pcd = pgp->pcd;
return;
}
- /* no more references to this pcd, recycle it and the physical page */
+ /* No more references to this pcd, recycle it and the physical page. */
ASSERT(list_empty(&pcd->pgp_list));
pcd->pfp = NULL;
- /* remove pcd from rbtree */
+ /* Remove pcd from rbtree. */
rb_erase(&pcd->pcd_rb_tree_node,&pcd_tree_roots[firstbyte]);
- /* reinit the struct for safety for now */
+ /* Reinit the struct for safety for now. */
RB_CLEAR_NODE(&pcd->pcd_rb_tree_node);
- /* now free up the pcd memory */
+ /* Now free up the pcd memory. */
tmem_free(pcd, NULL);
atomic_dec_and_assert(global_pcd_count);
if ( pgp_size != 0 && pcd_size < PAGE_SIZE )
{
- /* compressed data */
+ /* Compressed data. */
tmem_free(pcd_cdata, pool);
pcd_tot_csize -= pcd_csize;
}
else if ( pcd_size != PAGE_SIZE )
{
- /* trailing zero data */
+ /* Trailing zero data. */
pcd_tot_tze_size -= pcd_size;
if ( pcd_size )
tmem_free(pcd_tze, pool);
} else {
- /* real physical page */
+ /* Real physical page. */
if ( tmem_tze_enabled() )
pcd_tot_tze_size -= PAGE_SIZE;
if ( tmem_compression_enabled() )
}
write_lock(&pcd_tree_rwlocks[firstbyte]);
- /* look for page match */
+ /* Look for page match. */
root = &pcd_tree_roots[firstbyte];
new = &(root->rb_node);
while ( *new )
{
pcd = container_of(*new, struct tmem_page_content_descriptor, pcd_rb_tree_node);
parent = *new;
- /* compare new entry and rbtree entry, set cmp accordingly */
+ /* Compare new entry and rbtree entry, set cmp accordingly. */
if ( cdata != NULL )
{
if ( pcd->size < PAGE_SIZE )
- /* both new entry and rbtree entry are compressed */
+ /* Both new entry and rbtree entry are compressed. */
cmp = tmem_pcd_cmp(cdata,csize,pcd->cdata,pcd->size);
else
- /* new entry is compressed, rbtree entry is not */
+ /* New entry is compressed, rbtree entry is not. */
cmp = -1;
} else if ( pcd->size < PAGE_SIZE )
- /* rbtree entry is compressed, rbtree entry is not */
+ /* Rbtree entry is compressed, rbtree entry is not. */
cmp = 1;
else if ( tmem_tze_enabled() ) {
if ( pcd->size < PAGE_SIZE )
- /* both new entry and rbtree entry are trailing zero */
+ /* Both new entry and rbtree entry are trailing zero. */
cmp = tmem_tze_pfp_cmp(pgp->pfp,pfp_size,pcd->tze,pcd->size);
else
- /* new entry is trailing zero, rbtree entry is not */
+ /* New entry is trailing zero, rbtree entry is not. */
cmp = tmem_tze_pfp_cmp(pgp->pfp,pfp_size,pcd->pfp,PAGE_SIZE);
} else {
- /* both new entry and rbtree entry are full physical pages */
+ /* Both new entry and rbtree entry are full physical pages. */
ASSERT(pgp->pfp != NULL);
ASSERT(pcd->pfp != NULL);
cmp = tmem_page_cmp(pgp->pfp,pcd->pfp);
}
- /* walk tree or match depending on cmp */
+ /* Walk tree or match depending on cmp. */
if ( cmp < 0 )
new = &((*new)->rb_left);
else if ( cmp > 0 )
new = &((*new)->rb_right);
else
{
- /* match! if not compressed, free the no-longer-needed page */
- /* but if compressed, data is assumed static so don't free! */
+ /*
+ * Match! if not compressed, free the no-longer-needed page
+ * but if compressed, data is assumed static so don't free!
+ */
if ( cdata == NULL )
tmem_free_page(pgp->us.obj->pool,pgp->pfp);
deduped_puts++;
}
}
- /* exited while loop with no match, so alloc a pcd and put it in the tree */
+ /* Exited while loop with no match, so alloc a pcd and put it in the tree. */
if ( (pcd = tmem_malloc(sizeof(struct tmem_page_content_descriptor), NULL)) == NULL )
{
ret = -ENOMEM;
}
}
atomic_inc_and_max(global_pcd_count);
- RB_CLEAR_NODE(&pcd->pcd_rb_tree_node); /* is this necessary */
- INIT_LIST_HEAD(&pcd->pgp_list); /* is this necessary */
+ RB_CLEAR_NODE(&pcd->pcd_rb_tree_node); /* Is this necessary? */
+ INIT_LIST_HEAD(&pcd->pgp_list); /* Is this necessary? */
pcd->pgp_ref_count = 0;
if ( cdata != NULL )
{
/************ PAGE DESCRIPTOR MANIPULATION ROUTINES *******************/
-/* allocate a struct tmem_page_descriptor and associate it with an object */
+/* Allocate a struct tmem_page_descriptor and associate it with an object. */
static struct tmem_page_descriptor *pgp_alloc(struct tmem_object_root *obj)
{
struct tmem_page_descriptor *pgp;
if ( pgp->pfp == NULL )
return;
if ( tmem_dedup_enabled() && pgp->firstbyte != NOT_SHAREABLE )
- pcd_disassociate(pgp,pool,0); /* pgp->size lost */
+ pcd_disassociate(pgp,pool,0); /* pgp->size lost. */
else if ( pgp_size )
tmem_free(pgp->cdata, pool);
else
__pgp_free(pgp, pool);
}
-/* remove pgp from global/pool/client lists and free it */
+/* Remove pgp from global/pool/client lists and free it. */
static void pgp_delist_free(struct tmem_page_descriptor *pgp)
{
struct client *client;
client = pgp->us.obj->pool->client;
ASSERT(client != NULL);
- /* Delist pgp */
+ /* Delist pgp. */
if ( !is_persistent(pgp->us.obj->pool) )
{
spin_lock(&eph_lists_spinlock);
life = get_cycles() - pgp->timestamp;
pgp->us.obj->pool->sum_life_cycles += life;
- /* free pgp */
+ /* Free pgp. */
pgp_free(pgp);
}
-/* called only indirectly by radix_tree_destroy */
+/* Called only indirectly by radix_tree_destroy. */
static void pgp_destroy(void *v)
{
struct tmem_page_descriptor *pgp = (struct tmem_page_descriptor *)v;
/************ RADIX TREE NODE MANIPULATION ROUTINES *******************/
-/* called only indirectly from radix_tree_insert */
+/* Called only indirectly from radix_tree_insert. */
static struct radix_tree_node *rtn_alloc(void *arg)
{
struct tmem_object_node *objnode;
return &objnode->rtn;
}
-/* called only indirectly from radix_tree_delete/destroy */
+/* Called only indirectly from radix_tree_delete/destroy. */
static void rtn_free(struct radix_tree_node *rtn, void *arg)
{
struct tmem_pool *pool;
BITS_PER_LONG) & OBJ_HASH_BUCKETS_MASK);
}
-/* searches for object==oid in pool, returns locked object if found */
+/* Searches for object==oid in pool, returns locked object if found. */
static struct tmem_object_root * obj_find(struct tmem_pool *pool,
struct xen_tmem_oid *oidp)
{
obj = container_of(node, struct tmem_object_root, rb_tree_node);
switch ( oid_compare(&obj->oid, oidp) )
{
- case 0: /* equal */
+ case 0: /* Equal. */
if ( !spin_trylock(&obj->obj_spinlock) )
{
read_unlock(&pool->pool_rwlock);
return NULL;
}
-/* free an object that has no more pgps in it */
+/* Free an object that has no more pgps in it. */
static void obj_free(struct tmem_object_root *obj)
{
struct tmem_pool *pool;
ASSERT(pool != NULL);
ASSERT(pool->client != NULL);
ASSERT_WRITELOCK(&pool->pool_rwlock);
- if ( obj->tree_root.rnode != NULL ) /* may be a "stump" with no leaves */
+ if ( obj->tree_root.rnode != NULL ) /* May be a "stump" with no leaves. */
radix_tree_destroy(&obj->tree_root, pgp_destroy);
ASSERT((long)obj->objnode_count == 0);
ASSERT(obj->tree_root.rnode == NULL);
}
/*
- * allocate, initialize, and insert an tmem_object_root
- * (should be called only if find failed)
+ * Allocate, initialize, and insert an tmem_object_root
+ * (should be called only if find failed).
*/
static struct tmem_object_root * obj_alloc(struct tmem_pool *pool,
struct xen_tmem_oid *oidp)
return obj;
}
-/* free an object after destroying any pgps in it */
+/* Free an object after destroying any pgps in it. */
static void obj_destroy(struct tmem_object_root *obj)
{
ASSERT_WRITELOCK(&obj->pool->pool_rwlock);
obj_free(obj);
}
-/* destroys all objs in a pool, or only if obj->last_client matches cli_id */
+/* Destroys all objs in a pool, or only if obj->last_client matches cli_id. */
static void pool_destroy_objs(struct tmem_pool *pool, domid_t cli_id)
{
struct rb_node *node;
return 0;
}
-/* reassign "ownership" of the pool to another client that shares this pool */
+/* Reassign "ownership" of the pool to another client that shares this pool. */
static void shared_pool_reassign(struct tmem_pool *pool)
{
struct share_list *sl;
pool->pool_id = poolid;
}
-/* destroy all objects with last_client same as passed cli_id,
- remove pool's cli_id from list of sharers of this pool */
+/*
+ * Destroy all objects with last_client same as passed cli_id,
+ * remove pool's cli_id from list of sharers of this pool.
+ */
static int shared_pool_quit(struct tmem_pool *pool, domid_t cli_id)
{
struct share_list *sl;
return -1;
}
-/* flush all data (owned by cli_id) from a pool and, optionally, free it */
+/* Flush all data (owned by cli_id) from a pool and, optionally, free it. */
static void pool_flush(struct tmem_pool *pool, domid_t cli_id)
{
ASSERT(pool != NULL);
xfree(client);
}
-/* flush all data from a client and, optionally, free it */
+/* Flush all data from a client and, optionally, free it. */
static void client_flush(struct client *client)
{
int i;
goto found;
}
}
- /* global_ephemeral_page_list is empty, so we bail out. */
+ /* Global_ephemeral_page_list is empty, so we bail out. */
spin_unlock(&eph_lists_spinlock);
goto out;
found:
- /* Delist */
+ /* Delist. */
list_del_init(&pgp->us.client_eph_pages);
client->eph_count--;
list_del_init(&pgp->global_eph_pages);
pcd_disassociate(pgp,pool,1);
}
- /* pgp already delist, so call pgp_free directly */
+ /* pgp already delist, so call pgp_free directly. */
pgp_free(pgp);
if ( obj->pgp_count == 0 )
{
ASSERT(pool != NULL);
client = pool->client;
if ( client->live_migrating )
- goto failed_dup; /* no dups allowed when migrating */
- /* can we successfully manipulate pgp to change out the data? */
+ goto failed_dup; /* No dups allowed when migrating. */
+ /* Can we successfully manipulate pgp to change out the data? */
if ( client->compress && pgp->size != 0 )
{
ret = do_tmem_put_compress(pgp, cmfn, clibuf);
}
done:
- /* successfully replaced data, clean up and return success */
+ /* Successfully replaced data, clean up and return success. */
if ( is_shared(pool) )
obj->last_client = client->cli_id;
spin_unlock(&obj->obj_spinlock);
goto cleanup;
failed_dup:
- /* couldn't change out the data, flush the old data and return
- * -ENOSPC instead of -ENOMEM to differentiate failed _dup_ put */
+ /*
+ * Couldn't change out the data, flush the old data and return
+ * -ENOSPC instead of -ENOMEM to differentiate failed _dup_ put.
+ */
ret = -ENOSPC;
cleanup:
pgpfound = pgp_delete_from_obj(obj, pgp->index);
pool->puts++;
refind:
- /* does page already exist (dup)? if so, handle specially */
+ /* Does page already exist (dup)? if so, handle specially. */
if ( (obj = obj_find(pool, oidp)) != NULL )
{
if ((pgp = pgp_lookup_in_obj(obj, index)) != NULL)
}
else
{
- /* no puts allowed into a frozen pool (except dup puts) */
+ /* No puts allowed into a frozen pool (except dup puts). */
if ( client->frozen )
goto unlock_obj;
}
}
else
{
- /* no puts allowed into a frozen pool (except dup puts) */
+ /* No puts allowed into a frozen pool (except dup puts). */
if ( client->frozen )
return ret;
if ( (obj = obj_alloc(pool, oidp)) == NULL )
write_unlock(&pool->pool_rwlock);
}
- /* When arrive here, we have a spinlocked obj for use */
+ /* When arrive here, we have a spinlocked obj for use. */
ASSERT_SPINLOCK(&obj->obj_spinlock);
if ( (pgp = pgp_alloc(obj)) == NULL )
goto unlock_obj;
ret = pgp_add_to_obj(obj, index, pgp);
if ( ret == -ENOMEM )
- /* warning, may result in partially built radix tree ("stump") */
+ /* Warning: may result in partially built radix tree ("stump"). */
goto free_pgp;
pgp->index = index;
spin_unlock(&eph_lists_spinlock);
}
else
- { /* is_persistent */
+ { /* is_persistent. */
spin_lock(&pers_lists_spinlock);
list_add_tail(&pgp->us.pool_pers_pages,
&pool->persistent_page_list);
if ( is_shared(pool) )
obj->last_client = client->cli_id;
- /* free the obj spinlock */
+ /* Free the obj spinlock. */
spin_unlock(&obj->obj_spinlock);
pool->good_puts++;
/*
* Authorize okay, match a global shared pool or use the newly allocated
- * one
+ * one.
*/
first_unused_s_poolid = MAX_GLOBAL_SHARED_POOLS;
for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++ )
{
if ( shpool->uuid[0] == uuid_lo && shpool->uuid[1] == uuid_hi )
{
- /* Succ to match a global shared pool */
+ /* Succ to match a global shared pool. */
tmem_client_info("(matches shared pool uuid=%"PRIx64".%"PRIx64") pool_id=%d\n",
uuid_hi, uuid_lo, d_poolid);
client->pools[d_poolid] = shpool;
}
}
- /* Failed to find a global shard pool slot */
+ /* Failed to find a global shared pool slot. */
if ( first_unused_s_poolid == MAX_GLOBAL_SHARED_POOLS )
{
tmem_client_warn("tmem: failed... no global shared pool slots available\n");
goto fail;
}
- /* Add pool to global shard pool */
+ /* Add pool to global shared pool. */
else
{
INIT_LIST_HEAD(&pool->share_list);
/************ TMEM CONTROL OPERATIONS ************************************/
-/* freeze/thaw all pools belonging to client cli_id (all domains if -1) */
+/* Freeze/thaw all pools belonging to client cli_id (all domains if -1). */
static int tmemc_freeze_pools(domid_t cli_id, int arg)
{
struct client *client;
tmem_client_str);
return -1;
}
- /* convert kb to pages, rounding up if necessary */
+ /* Convert kb to pages, rounding up if necessary. */
npages = (kb + ((1 << (PAGE_SHIFT-10))-1)) >> (PAGE_SHIFT-10);
flushed_pages = tmem_flush_npages(npages);
flushed_kb = flushed_pages << (PAGE_SHIFT-10);
int n = 0, sum = 0;
n = scnprintf(info+n,BSIZE-n,"T=");
- n--; /* overwrite trailing comma */
+ n--; /* Overwrite trailing comma. */
n += scnprintf(info+n,BSIZE-n,"\n");
if ( sum + n >= len )
return sum;
ret = -1;
goto out;
}
- /* note: pool->cur_pgp is the pgp last returned by get_next_page */
+ /* Note: pool->cur_pgp is the pgp last returned by get_next_page. */
if ( pool->cur_pgp == NULL )
{
- /* process the first one */
+ /* Process the first one. */
pool->cur_pgp = pgp = list_entry((&pool->persistent_page_list)->next,
struct tmem_page_descriptor,us.pool_pers_pages);
} else if ( list_is_last(&pool->cur_pgp->us.pool_pers_pages,
&pool->persistent_page_list) )
{
- /* already processed the last one in the list */
+ /* Already processed the last one in the list. */
ret = -1;
goto out;
}
return -EFAULT;
}
- /* Acquire wirte lock for all command at first */
+ /* Acquire write lock for all commands at first. */
write_lock(&tmem_rwlock);
if ( op.cmd == TMEM_CONTROL )
rc = -ENODEV;
goto out;
}
- /* Commands only need read lock */
+ /* Commands that only need read lock. */
write_unlock(&tmem_rwlock);
read_lock(&tmem_rwlock);
return rc;
}
-/* this should be called when the host is destroying a client */
+/* This should be called when the host is destroying a client (domain). */
void tmem_destroy(void *v)
{
struct client *client = (struct client *)v;
write_unlock(&tmem_rwlock);
}
-#define MAX_EVICTS 10 /* should be variable or set via XEN_SYSCTL_TMEM_OP_ ?? */
+#define MAX_EVICTS 10 /* Should be variable or set via XEN_SYSCTL_TMEM_OP_ ?? */
void *tmem_relinquish_pages(unsigned int order, unsigned int memflags)
{
struct page_info *pfp;
return tmem_page_list_pages + _atomic_read(freeable_page_count);
}
-/* called at hypervisor startup */
+/* Called at hypervisor startup. */
static int __init init_tmem(void)
{
int i;