atomic_t pgp_count;
int pgp_count_max;
long obj_count; /* atomicity depends on pool_rwlock held for write */
- long obj_count_max;
+ long obj_count_max;
unsigned long objnode_count, objnode_count_max;
uint64_t sum_life_cycles;
uint64_t sum_evicted_cycles;
ASSERT(is_shared(pool));
ASSERT(pool->client != NULL);
-
+
ASSERT_WRITELOCK(&tmem_rwlock);
pool_destroy_objs(pool, cli_id);
list_for_each_entry(sl,&pool->share_list, share_list)
}
if ( !d->is_dying ) {
d->tmem_client = client;
- client->domain = d;
+ client->domain = d;
}
rcu_unlock_domain(d);
int total = _atomic_read(client_weight_total);
ASSERT(client != NULL);
- if ( (total == 0) || (client->weight == 0) ||
+ if ( (total == 0) || (client->weight == 0) ||
(client->eph_count == 0) )
return 0;
return ( ((global_eph_count*100L) / client->eph_count ) >
void *dst, *p;
size_t size;
int ret = 0;
-
+
ASSERT(pgp != NULL);
ASSERT(pgp->us.obj != NULL);
ASSERT_SPINLOCK(&pgp->us.obj->obj_spinlock);
{
/* no puts allowed into a frozen pool (except dup puts) */
if ( client->frozen )
- goto unlock_obj;
+ goto unlock_obj;
}
}
else
write_lock(&pool->pool_rwlock);
/*
- * Parallel callers may already allocated obj and inserted to obj_rb_root
- * before us.
- */
- if (!obj_rb_insert(&pool->obj_rb_root[oid_hash(oidp)], obj))
+ * Parallel callers may already allocated obj and inserted to obj_rb_root
+ * before us.
+ */
+ if ( !obj_rb_insert(&pool->obj_rb_root[oid_hash(oidp)], obj) )
{
tmem_free(obj, pool);
write_unlock(&pool->pool_rwlock);
(client->shared_auth_uuid[i][1] == uuid_hi) )
break;
if ( i == MAX_GLOBAL_SHARED_POOLS )
- {
+ {
tmem_client_info("Shared auth failed, create non shared pool instead!\n");
pool->shared = 0;
goto out;
p->obj_count, p->obj_count_max,
p->objnode_count, p->objnode_count_max,
p->good_puts, p->puts,p->dup_puts_flushed, p->dup_puts_replaced,
- p->no_mem_puts,
+ p->no_mem_puts,
p->found_gets, p->gets,
p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs);
if ( sum + n >= len )
p->obj_count, p->obj_count_max,
p->objnode_count, p->objnode_count_max,
p->good_puts, p->puts,p->dup_puts_flushed, p->dup_puts_replaced,
- p->no_mem_puts,
+ p->no_mem_puts,
p->found_gets, p->gets,
p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs);
if ( sum + n >= len )
/* process the first one */
pool->cur_pgp = pgp = list_entry((&pool->persistent_page_list)->next,
struct tmem_page_descriptor,us.pool_pers_pages);
- } else if ( list_is_last(&pool->cur_pgp->us.pool_pers_pages,
+ } else if ( list_is_last(&pool->cur_pgp->us.pool_pers_pages,
&pool->persistent_page_list) )
{
/* already processed the last one in the list */
pgp = list_entry((&client->persistent_invalidated_list)->next,
struct tmem_page_descriptor,client_inv_pages);
client->cur_pgp = pgp;
- } else if ( list_is_last(&client->cur_pgp->client_inv_pages,
+ } else if ( list_is_last(&client->cur_pgp->client_inv_pages,
&client->persistent_invalidated_list) )
{
client->cur_pgp = NULL;
if (bufsize != PAGE_SIZE) {
tmem_client_err("tmem: %s: invalid parameter bufsize(%d) != (%ld)\n",
__func__, bufsize, PAGE_SIZE);
- return -EINVAL;
+ return -EINVAL;
}
return do_tmem_put(pool, oidp, index, 0, buf);
}