]> xenbits.xensource.com Git - xen.git/commitdiff
tmem: bugfix in obj allocate path
authorBob Liu <lliubbo@gmail.com>
Tue, 28 Jan 2014 04:28:23 +0000 (12:28 +0800)
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Wed, 9 Apr 2014 13:05:20 +0000 (09:05 -0400)
There is a potential bug in the obj allocate path. When there are parallel
callers allocate a obj and insert it to pool->obj_rb_root, an unexpected
obj might be returned (both callers use the same oid).

Caller A:                            Caller B:

obj_find(oidp) == NULL               obj_find(oidp) == NULL

write_lock(&pool->pool_rwlock)
obj_new():
    objA = tmem_malloc()
    obj_rb_insert(objA)
wirte_unlock()
                                     write_lock(&pool->pool_rwlock)
                                     obj_new():
                                        objB = tmem_malloc()
                                        obj_rb_insert(objB)
                                     write_unlock()

Continue write data to objA
But in future obj_find(), objB
will always be returned.

The route cause is the allocate path didn't check the return value of
obj_rb_insert(). This patch fix it and replace obj_new() with better name
obj_alloc().

Signed-off-by: Bob Liu <bob.liu@oracle.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
xen/common/tmem.c

index 8c788acfba7ea90ac25f787ba2a36cd6a58f95a7..39ffe17828ba286bc41273afd35e18251755726f 100644 (file)
@@ -959,12 +959,11 @@ static int obj_rb_insert(struct rb_root *root, struct tmem_object_root *obj)
  * allocate, initialize, and insert an tmem_object_root
  * (should be called only if find failed)
  */
-static struct tmem_object_root * obj_new(struct tmem_pool *pool, struct oid *oidp)
+static struct tmem_object_root * obj_alloc(struct tmem_pool *pool, struct oid *oidp)
 {
     struct tmem_object_root *obj;
 
     ASSERT(pool != NULL);
-    ASSERT_WRITELOCK(&pool->pool_rwlock);
     if ( (obj = tmem_malloc(sizeof(struct tmem_object_root), pool)) == NULL )
         return NULL;
     pool->obj_count++;
@@ -979,9 +978,6 @@ static struct tmem_object_root * obj_new(struct tmem_pool *pool, struct oid *oid
     obj->objnode_count = 0;
     obj->pgp_count = 0;
     obj->last_client = TMEM_CLI_ID_NULL;
-    spin_lock(&obj->obj_spinlock);
-    obj_rb_insert(&pool->obj_rb_root[oid_hash(oidp)], obj);
-    ASSERT_SPINLOCK(&obj->obj_spinlock);
     return obj;
 }
 
@@ -1552,10 +1548,13 @@ static int do_tmem_put(struct tmem_pool *pool,
 
     ASSERT(pool != NULL);
     client = pool->client;
+    ASSERT(client != NULL);
     ret = client->frozen ? -EFROZEN : -ENOMEM;
     pool->puts++;
+
+refind:
     /* does page already exist (dup)?  if so, handle specially */
-    if ( (obj = obj_find(pool,oidp)) != NULL )
+    if ( (obj = obj_find(pool, oidp)) != NULL )
     {
         if ((pgp = pgp_lookup_in_obj(obj, index)) != NULL)
         {
@@ -1573,12 +1572,22 @@ static int do_tmem_put(struct tmem_pool *pool,
         /* no puts allowed into a frozen pool (except dup puts) */
         if ( client->frozen )
             return ret;
+        if ( (obj = obj_alloc(pool, oidp)) == NULL )
+            return -ENOMEM;
+
         write_lock(&pool->pool_rwlock);
-        if ( (obj = obj_new(pool,oidp)) == NULL )
+        /*
+        * Parallel callers may already allocated obj and inserted to obj_rb_root
+        * before us.
+        */
+        if (!obj_rb_insert(&pool->obj_rb_root[oid_hash(oidp)], obj))
         {
+            tmem_free(obj, pool);
             write_unlock(&pool->pool_rwlock);
-            return -ENOMEM;
+            goto refind;
         }
+
+        spin_lock(&obj->obj_spinlock);
         newobj = 1;
         write_unlock(&pool->pool_rwlock);
     }