ia64/linux-2.6.18-xen.hg

annotate lib/genalloc.c @ 0:831230e53067

Import 2.6.18 from kernel.org tarball.
author Ian Campbell <ian.campbell@xensource.com>
date Wed Apr 11 14:15:44 2007 +0100 (2007-04-11)
parents
children
rev   line source
ian@0 1 /*
ian@0 2 * Basic general purpose allocator for managing special purpose memory
ian@0 3 * not managed by the regular kmalloc/kfree interface.
ian@0 4 * Uses for this includes on-device special memory, uncached memory
ian@0 5 * etc.
ian@0 6 *
ian@0 7 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
ian@0 8 *
ian@0 9 * This source code is licensed under the GNU General Public License,
ian@0 10 * Version 2. See the file COPYING for more details.
ian@0 11 */
ian@0 12
ian@0 13 #include <linux/module.h>
ian@0 14 #include <linux/genalloc.h>
ian@0 15
ian@0 16
ian@0 17 /*
ian@0 18 * Create a new special memory pool.
ian@0 19 *
ian@0 20 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
ian@0 21 * @nid: node id of the node the pool structure should be allocated on, or -1
ian@0 22 */
ian@0 23 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
ian@0 24 {
ian@0 25 struct gen_pool *pool;
ian@0 26
ian@0 27 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
ian@0 28 if (pool != NULL) {
ian@0 29 rwlock_init(&pool->lock);
ian@0 30 INIT_LIST_HEAD(&pool->chunks);
ian@0 31 pool->min_alloc_order = min_alloc_order;
ian@0 32 }
ian@0 33 return pool;
ian@0 34 }
ian@0 35 EXPORT_SYMBOL(gen_pool_create);
ian@0 36
ian@0 37
ian@0 38 /*
ian@0 39 * Add a new chunk of memory to the specified pool.
ian@0 40 *
ian@0 41 * @pool: pool to add new memory chunk to
ian@0 42 * @addr: starting address of memory chunk to add to pool
ian@0 43 * @size: size in bytes of the memory chunk to add to pool
ian@0 44 * @nid: node id of the node the chunk structure and bitmap should be
ian@0 45 * allocated on, or -1
ian@0 46 */
ian@0 47 int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
ian@0 48 int nid)
ian@0 49 {
ian@0 50 struct gen_pool_chunk *chunk;
ian@0 51 int nbits = size >> pool->min_alloc_order;
ian@0 52 int nbytes = sizeof(struct gen_pool_chunk) +
ian@0 53 (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
ian@0 54
ian@0 55 chunk = kmalloc_node(nbytes, GFP_KERNEL, nid);
ian@0 56 if (unlikely(chunk == NULL))
ian@0 57 return -1;
ian@0 58
ian@0 59 memset(chunk, 0, nbytes);
ian@0 60 spin_lock_init(&chunk->lock);
ian@0 61 chunk->start_addr = addr;
ian@0 62 chunk->end_addr = addr + size;
ian@0 63
ian@0 64 write_lock(&pool->lock);
ian@0 65 list_add(&chunk->next_chunk, &pool->chunks);
ian@0 66 write_unlock(&pool->lock);
ian@0 67
ian@0 68 return 0;
ian@0 69 }
ian@0 70 EXPORT_SYMBOL(gen_pool_add);
ian@0 71
ian@0 72
ian@0 73 /*
ian@0 74 * Allocate the requested number of bytes from the specified pool.
ian@0 75 * Uses a first-fit algorithm.
ian@0 76 *
ian@0 77 * @pool: pool to allocate from
ian@0 78 * @size: number of bytes to allocate from the pool
ian@0 79 */
ian@0 80 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
ian@0 81 {
ian@0 82 struct list_head *_chunk;
ian@0 83 struct gen_pool_chunk *chunk;
ian@0 84 unsigned long addr, flags;
ian@0 85 int order = pool->min_alloc_order;
ian@0 86 int nbits, bit, start_bit, end_bit;
ian@0 87
ian@0 88 if (size == 0)
ian@0 89 return 0;
ian@0 90
ian@0 91 nbits = (size + (1UL << order) - 1) >> order;
ian@0 92
ian@0 93 read_lock(&pool->lock);
ian@0 94 list_for_each(_chunk, &pool->chunks) {
ian@0 95 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
ian@0 96
ian@0 97 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
ian@0 98 end_bit -= nbits + 1;
ian@0 99
ian@0 100 spin_lock_irqsave(&chunk->lock, flags);
ian@0 101 bit = -1;
ian@0 102 while (bit + 1 < end_bit) {
ian@0 103 bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
ian@0 104 if (bit >= end_bit)
ian@0 105 break;
ian@0 106
ian@0 107 start_bit = bit;
ian@0 108 if (nbits > 1) {
ian@0 109 bit = find_next_bit(chunk->bits, bit + nbits,
ian@0 110 bit + 1);
ian@0 111 if (bit - start_bit < nbits)
ian@0 112 continue;
ian@0 113 }
ian@0 114
ian@0 115 addr = chunk->start_addr +
ian@0 116 ((unsigned long)start_bit << order);
ian@0 117 while (nbits--)
ian@0 118 __set_bit(start_bit++, &chunk->bits);
ian@0 119 spin_unlock_irqrestore(&chunk->lock, flags);
ian@0 120 read_unlock(&pool->lock);
ian@0 121 return addr;
ian@0 122 }
ian@0 123 spin_unlock_irqrestore(&chunk->lock, flags);
ian@0 124 }
ian@0 125 read_unlock(&pool->lock);
ian@0 126 return 0;
ian@0 127 }
ian@0 128 EXPORT_SYMBOL(gen_pool_alloc);
ian@0 129
ian@0 130
ian@0 131 /*
ian@0 132 * Free the specified memory back to the specified pool.
ian@0 133 *
ian@0 134 * @pool: pool to free to
ian@0 135 * @addr: starting address of memory to free back to pool
ian@0 136 * @size: size in bytes of memory to free
ian@0 137 */
ian@0 138 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
ian@0 139 {
ian@0 140 struct list_head *_chunk;
ian@0 141 struct gen_pool_chunk *chunk;
ian@0 142 unsigned long flags;
ian@0 143 int order = pool->min_alloc_order;
ian@0 144 int bit, nbits;
ian@0 145
ian@0 146 nbits = (size + (1UL << order) - 1) >> order;
ian@0 147
ian@0 148 read_lock(&pool->lock);
ian@0 149 list_for_each(_chunk, &pool->chunks) {
ian@0 150 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
ian@0 151
ian@0 152 if (addr >= chunk->start_addr && addr < chunk->end_addr) {
ian@0 153 BUG_ON(addr + size > chunk->end_addr);
ian@0 154 spin_lock_irqsave(&chunk->lock, flags);
ian@0 155 bit = (addr - chunk->start_addr) >> order;
ian@0 156 while (nbits--)
ian@0 157 __clear_bit(bit++, &chunk->bits);
ian@0 158 spin_unlock_irqrestore(&chunk->lock, flags);
ian@0 159 break;
ian@0 160 }
ian@0 161 }
ian@0 162 BUG_ON(nbits > 0);
ian@0 163 read_unlock(&pool->lock);
ian@0 164 }
ian@0 165 EXPORT_SYMBOL(gen_pool_free);