ia64/xen-unstable

changeset 18646:bb1a67a7db26

xmalloc: Add pooled allocator interface.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Oct 16 15:46:04 2008 +0100 (2008-10-16)
parents 48fba1dbcfaf
children dc61548aa479
files xen/common/xmalloc_tlsf.c xen/include/xen/xmalloc.h
line diff
     1.1 --- a/xen/common/xmalloc_tlsf.c	Thu Oct 16 11:09:50 2008 +0100
     1.2 +++ b/xen/common/xmalloc_tlsf.c	Thu Oct 16 15:46:04 2008 +0100
     1.3 @@ -30,9 +30,6 @@
     1.4  
     1.5  #define MAX_POOL_NAME_LEN       16
     1.6  
     1.7 -typedef void *(get_memory)(size_t bytes);
     1.8 -typedef void (put_memory)(void *ptr);
     1.9 -
    1.10  /* Some IMPORTANT TLSF parameters */
    1.11  #define MEM_ALIGN       (sizeof(void *) * 2)
    1.12  #define MEM_ALIGN_MASK  (~(MEM_ALIGN - 1))
    1.13 @@ -92,7 +89,7 @@ struct bhdr {
    1.14      } ptr;
    1.15  };
    1.16  
    1.17 -struct pool {
    1.18 +struct xmem_pool {
    1.19      /* First level bitmap (REAL_FLI bits) */
    1.20      u32 fl_bitmap;
    1.21  
    1.22 @@ -104,17 +101,17 @@ struct pool {
    1.23  
    1.24      spinlock_t lock;
    1.25  
    1.26 -    size_t init_size;
    1.27 -    size_t max_size;
    1.28 -    size_t grow_size;
    1.29 +    unsigned long init_size;
    1.30 +    unsigned long max_size;
    1.31 +    unsigned long grow_size;
    1.32  
    1.33      /* Basic stats */
    1.34 -    size_t used_size;
    1.35 -    size_t num_regions;
    1.36 +    unsigned long used_size;
    1.37 +    unsigned long num_regions;
    1.38  
    1.39      /* User provided functions for expanding/shrinking pool */
    1.40 -    get_memory *get_mem;
    1.41 -    put_memory *put_mem;
    1.42 +    xmem_pool_get_memory *get_mem;
    1.43 +    xmem_pool_put_memory *put_mem;
    1.44  
    1.45      struct list_head list;
    1.46  
    1.47 @@ -129,7 +126,7 @@ struct pool {
    1.48  /**
    1.49   * Returns indexes (fl, sl) of the list used to serve request of size r
    1.50   */
    1.51 -static inline void MAPPING_SEARCH(size_t *r, int *fl, int *sl)
    1.52 +static inline void MAPPING_SEARCH(unsigned long *r, int *fl, int *sl)
    1.53  {
    1.54      int t;
    1.55  
    1.56 @@ -157,7 +154,7 @@ static inline void MAPPING_SEARCH(size_t
    1.57   * for a block of size r. It also rounds up requested size(r) to the
    1.58   * next list.
    1.59   */
    1.60 -static inline void MAPPING_INSERT(size_t r, int *fl, int *sl)
    1.61 +static inline void MAPPING_INSERT(unsigned long r, int *fl, int *sl)
    1.62  {
    1.63      if ( r < SMALL_BLOCK )
    1.64      {
    1.65 @@ -176,7 +173,7 @@ static inline void MAPPING_INSERT(size_t
    1.66   * Returns first block from a list that hold blocks larger than or
    1.67   * equal to the one pointed by the indexes (fl, sl)
    1.68   */
    1.69 -static inline struct bhdr *FIND_SUITABLE_BLOCK(struct pool *p, int *fl,
    1.70 +static inline struct bhdr *FIND_SUITABLE_BLOCK(struct xmem_pool *p, int *fl,
    1.71                                                 int *sl)
    1.72  {
    1.73      u32 tmp = p->sl_bitmap[*fl] & (~0 << *sl);
    1.74 @@ -203,7 +200,7 @@ static inline struct bhdr *FIND_SUITABLE
    1.75  /**
    1.76   * Remove first free block(b) from free list with indexes (fl, sl).
    1.77   */
    1.78 -static inline void EXTRACT_BLOCK_HDR(struct bhdr *b, struct pool *p, int fl,
    1.79 +static inline void EXTRACT_BLOCK_HDR(struct bhdr *b, struct xmem_pool *p, int fl,
    1.80                                       int sl)
    1.81  {
    1.82      p->matrix[fl][sl] = b->ptr.free_ptr.next;
    1.83 @@ -223,7 +220,7 @@ static inline void EXTRACT_BLOCK_HDR(str
    1.84  /**
    1.85   * Removes block(b) from free list with indexes (fl, sl)
    1.86   */
    1.87 -static inline void EXTRACT_BLOCK(struct bhdr *b, struct pool *p, int fl,
    1.88 +static inline void EXTRACT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl,
    1.89                                   int sl)
    1.90  {
    1.91      if ( b->ptr.free_ptr.next )
    1.92 @@ -248,7 +245,7 @@ static inline void EXTRACT_BLOCK(struct 
    1.93  /**
    1.94   * Insert block(b) in free list with indexes (fl, sl)
    1.95   */
    1.96 -static inline void INSERT_BLOCK(struct bhdr *b, struct pool *p, int fl, int sl)
    1.97 +static inline void INSERT_BLOCK(struct bhdr *b, struct xmem_pool *p, int fl, int sl)
    1.98  {
    1.99      b->ptr.free_ptr = (struct free_ptr) {NULL, p->matrix[fl][sl]};
   1.100      if ( p->matrix[fl][sl] )
   1.101 @@ -262,8 +259,8 @@ static inline void INSERT_BLOCK(struct b
   1.102   * Region is a virtually contiguous memory region and Pool is
   1.103   * collection of such regions
   1.104   */
   1.105 -static inline void ADD_REGION(void *region, size_t region_size,
   1.106 -                              struct pool *pool)
   1.107 +static inline void ADD_REGION(void *region, unsigned long region_size,
   1.108 +                              struct xmem_pool *pool)
   1.109  {
   1.110      int fl, sl;
   1.111      struct bhdr *b, *lb;
   1.112 @@ -283,28 +280,18 @@ static inline void ADD_REGION(void *regi
   1.113  }
   1.114  
   1.115  /*
   1.116 - * Allocator code start
   1.117 + * TLSF pool-based allocator start.
   1.118   */
   1.119  
   1.120 -/**
   1.121 - * tlsf_create_memory_pool - create dynamic memory pool
   1.122 - * @name: name of the pool
   1.123 - * @get_mem: callback function used to expand pool
   1.124 - * @put_mem: callback function used to shrink pool
   1.125 - * @init_size: inital pool size (in bytes)
   1.126 - * @max_size: maximum pool size (in bytes) - set this as 0 for no limit
   1.127 - * @grow_size: amount of memory (in bytes) added to pool whenever required
   1.128 - *
   1.129 - * All size values are rounded up to next page boundary.
   1.130 - */
   1.131 -static void *tlsf_create_memory_pool(const char *name,
   1.132 -                                     get_memory get_mem,
   1.133 -                                     put_memory put_mem,
   1.134 -                                     size_t init_size,
   1.135 -                                     size_t max_size,
   1.136 -                                     size_t grow_size)
   1.137 +struct xmem_pool *xmem_pool_create(
   1.138 +    const char *name,
   1.139 +    xmem_pool_get_memory get_mem,
   1.140 +    xmem_pool_put_memory put_mem,
   1.141 +    unsigned long init_size,
   1.142 +    unsigned long max_size,
   1.143 +    unsigned long grow_size)
   1.144  {
   1.145 -    struct pool *pool;
   1.146 +    struct xmem_pool *pool;
   1.147      void *region;
   1.148      int pool_bytes, pool_order;
   1.149  
   1.150 @@ -331,7 +318,7 @@ static void *tlsf_create_memory_pool(con
   1.151      pool->grow_size = grow_size;
   1.152      pool->get_mem = get_mem;
   1.153      pool->put_mem = put_mem;
   1.154 -    pool->name[0] = '\0';  /* ignore name for now */
   1.155 +    strlcpy(pool->name, name, sizeof(pool->name));
   1.156      region = get_mem(init_size);
   1.157      if ( region == NULL )
   1.158          goto out_region;
   1.159 @@ -351,66 +338,37 @@ static void *tlsf_create_memory_pool(con
   1.160      return NULL;
   1.161  }
   1.162  
   1.163 -#if 0
   1.164 -
   1.165 -/**
   1.166 - * tlsf_get_used_size - get memory currently used by given pool
   1.167 - *
   1.168 - * Used memory includes stored data + metadata + internal fragmentation
   1.169 - */
   1.170 -static size_t tlsf_get_used_size(void *mem_pool)
   1.171 +unsigned long xmem_pool_get_used_size(struct xmem_pool *pool)
   1.172  {
   1.173 -    struct pool *pool = (struct pool *)mem_pool;
   1.174      return pool->used_size;
   1.175  }
   1.176  
   1.177 -/**
   1.178 - * tlsf_get_total_size - get total memory currently allocated for given pool
   1.179 - *
   1.180 - * This is the total memory currently allocated for this pool which includes
   1.181 - * used size + free size.
   1.182 - *
   1.183 - * (Total - Used) is good indicator of memory efficiency of allocator.
   1.184 - */
   1.185 -static size_t tlsf_get_total_size(void *mem_pool)
   1.186 +unsigned long xmem_pool_get_total_size(struct xmem_pool *pool)
   1.187  {
   1.188 -    size_t total;
   1.189 -    struct pool *pool = (struct pool *)mem_pool;
   1.190 +    unsigned long total;
   1.191      total = ROUNDUP_SIZE(sizeof(*pool))
   1.192          + pool->init_size
   1.193          + (pool->num_regions - 1) * pool->grow_size;
   1.194      return total;
   1.195  }
   1.196  
   1.197 -/**
   1.198 - * tlsf_destroy_memory_pool - cleanup given pool
   1.199 - * @mem_pool: Pool to be destroyed
   1.200 - *
   1.201 - * Data structures associated with pool are freed.
   1.202 - * All memory allocated from pool must be freed before
   1.203 - * destorying it.
   1.204 - */
   1.205 -static void tlsf_destroy_memory_pool(void *mem_pool) 
   1.206 +void xmem_pool_destroy(struct xmem_pool *pool) 
   1.207  {
   1.208 -    struct pool *pool;
   1.209 -
   1.210 -    if ( mem_pool == NULL )
   1.211 +    if ( pool == NULL )
   1.212          return;
   1.213  
   1.214 -    pool = (struct pool *)mem_pool;
   1.215 -
   1.216      /* User is destroying without ever allocating from this pool */
   1.217 -    if ( tlsf_get_used_size(pool) == BHDR_OVERHEAD )
   1.218 +    if ( xmem_pool_get_used_size(pool) == BHDR_OVERHEAD )
   1.219      {
   1.220          pool->put_mem(pool->init_region);
   1.221          pool->used_size -= BHDR_OVERHEAD;
   1.222      }
   1.223  
   1.224      /* Check for memory leaks in this pool */
   1.225 -    if ( tlsf_get_used_size(pool) )
   1.226 +    if ( xmem_pool_get_used_size(pool) )
   1.227          printk("memory leak in pool: %s (%p). "
   1.228                 "%lu bytes still in use.\n",
   1.229 -               pool->name, pool, (long)tlsf_get_used_size(pool));
   1.230 +               pool->name, pool, xmem_pool_get_used_size(pool));
   1.231  
   1.232      spin_lock(&pool_list_lock);
   1.233      list_del_init(&pool->list);
   1.234 @@ -418,19 +376,11 @@ static void tlsf_destroy_memory_pool(voi
   1.235      pool->put_mem(pool);
   1.236  }
   1.237  
   1.238 -#endif
   1.239 -
   1.240 -/**
   1.241 - * tlsf_malloc - allocate memory from given pool
   1.242 - * @size: no. of bytes
   1.243 - * @mem_pool: pool to allocate from
   1.244 - */
   1.245 -static void *tlsf_malloc(size_t size, void *mem_pool)
   1.246 +void *xmem_pool_alloc(unsigned long size, struct xmem_pool *pool)
   1.247  {
   1.248 -    struct pool *pool = (struct pool *)mem_pool;
   1.249      struct bhdr *b, *b2, *next_b, *region;
   1.250      int fl, sl;
   1.251 -    size_t tmp_size;
   1.252 +    unsigned long tmp_size;
   1.253  
   1.254      size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size);
   1.255      /* Rounding up the requested size and calculating fl and sl */
   1.256 @@ -496,14 +446,8 @@ static void *tlsf_malloc(size_t size, vo
   1.257      return NULL;
   1.258  }
   1.259  
   1.260 -/**
   1.261 - * tlsf_free - free memory from given pool
   1.262 - * @ptr: address of memory to be freed
   1.263 - * @mem_pool: pool to free from
   1.264 - */
   1.265 -static void tlsf_free(void *ptr, void *mem_pool)
   1.266 +void xmem_pool_free(void *ptr, struct xmem_pool *pool)
   1.267  {
   1.268 -    struct pool *pool = (struct pool *)mem_pool;
   1.269      struct bhdr *b, *tmp_b;
   1.270      int fl = 0, sl = 0;
   1.271  
   1.272 @@ -556,20 +500,20 @@ static void tlsf_free(void *ptr, void *m
   1.273   * Glue for xmalloc().
   1.274   */
   1.275  
   1.276 -static struct pool *xenpool;
   1.277 +static struct xmem_pool *xenpool;
   1.278  
   1.279 -static void *tlsf_get_xenheap_page(size_t size)
   1.280 +static void *xmalloc_pool_get(unsigned long size)
   1.281  {
   1.282      ASSERT(size == PAGE_SIZE);
   1.283      return alloc_xenheap_pages(0);
   1.284  }
   1.285  
   1.286 -static void tlsf_put_xenheap_page(void *p)
   1.287 +static void xmalloc_pool_put(void *p)
   1.288  {
   1.289      free_xenheap_pages(p,0);
   1.290  }
   1.291  
   1.292 -static void *tlsf_xenheap_malloc_whole_pages(size_t size)
   1.293 +static void *xmalloc_whole_pages(unsigned long size)
   1.294  {
   1.295      struct bhdr *b;
   1.296      unsigned int pageorder = get_order_from_bytes(size + BHDR_OVERHEAD);
   1.297 @@ -582,13 +526,13 @@ static void *tlsf_xenheap_malloc_whole_p
   1.298      return (void *)b->ptr.buffer;
   1.299  }
   1.300  
   1.301 -static void tlsf_xenheap_init(void)
   1.302 +static void tlsf_init(void)
   1.303  {
   1.304      INIT_LIST_HEAD(&pool_list_head);
   1.305      spin_lock_init(&pool_list_lock);
   1.306 -    xenpool = tlsf_create_memory_pool(
   1.307 -        "", tlsf_get_xenheap_page,
   1.308 -        tlsf_put_xenheap_page, PAGE_SIZE, 0, PAGE_SIZE);
   1.309 +    xenpool = xmem_pool_create(
   1.310 +        "xmalloc", xmalloc_pool_get, xmalloc_pool_put,
   1.311 +        PAGE_SIZE, 0, PAGE_SIZE);
   1.312      BUG_ON(!xenpool);
   1.313  }
   1.314  
   1.315 @@ -596,7 +540,7 @@ static void tlsf_xenheap_init(void)
   1.316   * xmalloc()
   1.317   */
   1.318  
   1.319 -void *_xmalloc(size_t size, size_t align)
   1.320 +void *_xmalloc(unsigned long size, unsigned long align)
   1.321  {
   1.322      void *p;
   1.323      u32 pad;
   1.324 @@ -609,12 +553,12 @@ void *_xmalloc(size_t size, size_t align
   1.325      size += align - MEM_ALIGN;
   1.326  
   1.327      if ( !xenpool )
   1.328 -        tlsf_xenheap_init();
   1.329 +        tlsf_init();
   1.330  
   1.331      if ( size >= (PAGE_SIZE - (2*BHDR_OVERHEAD)) )
   1.332 -        p = tlsf_xenheap_malloc_whole_pages(size);
   1.333 +        p = xmalloc_whole_pages(size);
   1.334      else
   1.335 -        p = tlsf_malloc(size, xenpool);
   1.336 +        p = xmem_pool_alloc(size, xenpool);
   1.337  
   1.338      /* Add alignment padding. */
   1.339      if ( (pad = -(long)p & (align - 1)) != 0 )
   1.340 @@ -651,5 +595,5 @@ void xfree(void *p)
   1.341      if ( b->size >= (PAGE_SIZE - (2*BHDR_OVERHEAD)) )
   1.342          free_xenheap_pages((void *)b, get_order_from_bytes(b->size));
   1.343      else
   1.344 -        tlsf_free(p, xenpool);
   1.345 +        xmem_pool_free(p, xenpool);
   1.346  }
     2.1 --- a/xen/include/xen/xmalloc.h	Thu Oct 16 11:09:50 2008 +0100
     2.2 +++ b/xen/include/xen/xmalloc.h	Thu Oct 16 15:46:04 2008 +0100
     2.3 @@ -2,11 +2,16 @@
     2.4  #ifndef __XMALLOC_H__
     2.5  #define __XMALLOC_H__
     2.6  
     2.7 +/*
     2.8 + * Xen malloc/free-style interface.
     2.9 + */
    2.10 +
    2.11  /* Allocate space for typed object. */
    2.12  #define xmalloc(_type) ((_type *)_xmalloc(sizeof(_type), __alignof__(_type)))
    2.13  
    2.14  /* Allocate space for array of typed objects. */
    2.15 -#define xmalloc_array(_type, _num) ((_type *)_xmalloc_array(sizeof(_type), __alignof__(_type), _num))
    2.16 +#define xmalloc_array(_type, _num) \
    2.17 +    ((_type *)_xmalloc_array(sizeof(_type), __alignof__(_type), _num))
    2.18  
    2.19  /* Allocate untyped storage. */
    2.20  #define xmalloc_bytes(_bytes) (_xmalloc(_bytes, SMP_CACHE_BYTES))
    2.21 @@ -15,8 +20,9 @@
    2.22  extern void xfree(void *);
    2.23  
    2.24  /* Underlying functions */
    2.25 -extern void *_xmalloc(size_t size, size_t align);
    2.26 -static inline void *_xmalloc_array(size_t size, size_t align, size_t num)
    2.27 +extern void *_xmalloc(unsigned long size, unsigned long align);
    2.28 +static inline void *_xmalloc_array(
    2.29 +    unsigned long size, unsigned long align, unsigned long num)
    2.30  {
    2.31  	/* Check for overflow. */
    2.32  	if (size && num > UINT_MAX / size)
    2.33 @@ -24,4 +30,73 @@ static inline void *_xmalloc_array(size_
    2.34   	return _xmalloc(size * num, align);
    2.35  }
    2.36  
    2.37 +/*
    2.38 + * Pooled allocator interface.
    2.39 + */
    2.40 +
    2.41 +struct xmem_pool;
    2.42 +
    2.43 +typedef void *(xmem_pool_get_memory)(unsigned long bytes);
    2.44 +typedef void (xmem_pool_put_memory)(void *ptr);
    2.45 +
    2.46 +/**
    2.47 + * xmem_pool_create - create dynamic memory pool
    2.48 + * @name: name of the pool
    2.49 + * @get_mem: callback function used to expand pool
    2.50 + * @put_mem: callback function used to shrink pool
    2.51 + * @init_size: inital pool size (in bytes)
    2.52 + * @max_size: maximum pool size (in bytes) - set this as 0 for no limit
    2.53 + * @grow_size: amount of memory (in bytes) added to pool whenever required
    2.54 + *
    2.55 + * All size values are rounded up to next page boundary.
    2.56 + */
    2.57 +struct xmem_pool *xmem_pool_create(
    2.58 +    const char *name,
    2.59 +    xmem_pool_get_memory get_mem,
    2.60 +    xmem_pool_put_memory put_mem,
    2.61 +    unsigned long init_size,
    2.62 +    unsigned long max_size,
    2.63 +    unsigned long grow_size);
    2.64 +
    2.65 +/**
    2.66 + * xmem_pool_destroy - cleanup given pool
    2.67 + * @mem_pool: Pool to be destroyed
    2.68 + *
    2.69 + * Data structures associated with pool are freed.
    2.70 + * All memory allocated from pool must be freed before
    2.71 + * destorying it.
    2.72 + */
    2.73 +void xmem_pool_destroy(struct xmem_pool *pool);
    2.74 +
    2.75 +/**
    2.76 + * xmem_pool_alloc - allocate memory from given pool
    2.77 + * @size: no. of bytes
    2.78 + * @mem_pool: pool to allocate from
    2.79 + */
    2.80 +void *xmem_pool_alloc(unsigned long size, struct xmem_pool *pool);
    2.81 +
    2.82 +/**
    2.83 + * xmem_pool_free - free memory from given pool
    2.84 + * @ptr: address of memory to be freed
    2.85 + * @mem_pool: pool to free from
    2.86 + */
    2.87 +void xmem_pool_free(void *ptr, struct xmem_pool *pool);
    2.88 +
    2.89 +/**
    2.90 + * xmem_pool_get_used_size - get memory currently used by given pool
    2.91 + *
    2.92 + * Used memory includes stored data + metadata + internal fragmentation
    2.93 + */
    2.94 +unsigned long xmem_pool_get_used_size(struct xmem_pool *pool);
    2.95 +
    2.96 +/**
    2.97 + * xmem_pool_get_total_size - get total memory currently allocated for pool
    2.98 + *
    2.99 + * This is the total memory currently allocated for this pool which includes
   2.100 + * used size + free size.
   2.101 + *
   2.102 + * (Total - Used) is good indicator of memory efficiency of allocator.
   2.103 + */
   2.104 +unsigned long xmem_pool_get_total_size(struct xmem_pool *pool);
   2.105 +
   2.106  #endif /* __XMALLOC_H__ */