ia64/linux-2.6.18-xen.hg

view lib/genalloc.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * Basic general purpose allocator for managing special purpose memory
3 * not managed by the regular kmalloc/kfree interface.
4 * Uses for this includes on-device special memory, uncached memory
5 * etc.
6 *
7 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
8 *
9 * This source code is licensed under the GNU General Public License,
10 * Version 2. See the file COPYING for more details.
11 */
13 #include <linux/module.h>
14 #include <linux/genalloc.h>
17 /*
18 * Create a new special memory pool.
19 *
20 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
21 * @nid: node id of the node the pool structure should be allocated on, or -1
22 */
23 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
24 {
25 struct gen_pool *pool;
27 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
28 if (pool != NULL) {
29 rwlock_init(&pool->lock);
30 INIT_LIST_HEAD(&pool->chunks);
31 pool->min_alloc_order = min_alloc_order;
32 }
33 return pool;
34 }
35 EXPORT_SYMBOL(gen_pool_create);
38 /*
39 * Add a new chunk of memory to the specified pool.
40 *
41 * @pool: pool to add new memory chunk to
42 * @addr: starting address of memory chunk to add to pool
43 * @size: size in bytes of the memory chunk to add to pool
44 * @nid: node id of the node the chunk structure and bitmap should be
45 * allocated on, or -1
46 */
47 int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
48 int nid)
49 {
50 struct gen_pool_chunk *chunk;
51 int nbits = size >> pool->min_alloc_order;
52 int nbytes = sizeof(struct gen_pool_chunk) +
53 (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
55 chunk = kmalloc_node(nbytes, GFP_KERNEL, nid);
56 if (unlikely(chunk == NULL))
57 return -1;
59 memset(chunk, 0, nbytes);
60 spin_lock_init(&chunk->lock);
61 chunk->start_addr = addr;
62 chunk->end_addr = addr + size;
64 write_lock(&pool->lock);
65 list_add(&chunk->next_chunk, &pool->chunks);
66 write_unlock(&pool->lock);
68 return 0;
69 }
70 EXPORT_SYMBOL(gen_pool_add);
73 /*
74 * Allocate the requested number of bytes from the specified pool.
75 * Uses a first-fit algorithm.
76 *
77 * @pool: pool to allocate from
78 * @size: number of bytes to allocate from the pool
79 */
80 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
81 {
82 struct list_head *_chunk;
83 struct gen_pool_chunk *chunk;
84 unsigned long addr, flags;
85 int order = pool->min_alloc_order;
86 int nbits, bit, start_bit, end_bit;
88 if (size == 0)
89 return 0;
91 nbits = (size + (1UL << order) - 1) >> order;
93 read_lock(&pool->lock);
94 list_for_each(_chunk, &pool->chunks) {
95 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
97 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
98 end_bit -= nbits + 1;
100 spin_lock_irqsave(&chunk->lock, flags);
101 bit = -1;
102 while (bit + 1 < end_bit) {
103 bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
104 if (bit >= end_bit)
105 break;
107 start_bit = bit;
108 if (nbits > 1) {
109 bit = find_next_bit(chunk->bits, bit + nbits,
110 bit + 1);
111 if (bit - start_bit < nbits)
112 continue;
113 }
115 addr = chunk->start_addr +
116 ((unsigned long)start_bit << order);
117 while (nbits--)
118 __set_bit(start_bit++, &chunk->bits);
119 spin_unlock_irqrestore(&chunk->lock, flags);
120 read_unlock(&pool->lock);
121 return addr;
122 }
123 spin_unlock_irqrestore(&chunk->lock, flags);
124 }
125 read_unlock(&pool->lock);
126 return 0;
127 }
128 EXPORT_SYMBOL(gen_pool_alloc);
131 /*
132 * Free the specified memory back to the specified pool.
133 *
134 * @pool: pool to free to
135 * @addr: starting address of memory to free back to pool
136 * @size: size in bytes of memory to free
137 */
138 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
139 {
140 struct list_head *_chunk;
141 struct gen_pool_chunk *chunk;
142 unsigned long flags;
143 int order = pool->min_alloc_order;
144 int bit, nbits;
146 nbits = (size + (1UL << order) - 1) >> order;
148 read_lock(&pool->lock);
149 list_for_each(_chunk, &pool->chunks) {
150 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
152 if (addr >= chunk->start_addr && addr < chunk->end_addr) {
153 BUG_ON(addr + size > chunk->end_addr);
154 spin_lock_irqsave(&chunk->lock, flags);
155 bit = (addr - chunk->start_addr) >> order;
156 while (nbits--)
157 __clear_bit(bit++, &chunk->bits);
158 spin_unlock_irqrestore(&chunk->lock, flags);
159 break;
160 }
161 }
162 BUG_ON(nbits > 0);
163 read_unlock(&pool->lock);
164 }
165 EXPORT_SYMBOL(gen_pool_free);