ia64/linux-2.6.18-xen.hg

view lib/spinlock_debug.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * Copyright 2005, Red Hat, Inc., Ingo Molnar
3 * Released under the General Public License (GPL).
4 *
5 * This file contains the spinlock/rwlock implementations for
6 * DEBUG_SPINLOCK.
7 */
9 #include <linux/spinlock.h>
10 #include <linux/interrupt.h>
11 #include <linux/debug_locks.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
15 void __spin_lock_init(spinlock_t *lock, const char *name,
16 struct lock_class_key *key)
17 {
18 #ifdef CONFIG_DEBUG_LOCK_ALLOC
19 /*
20 * Make sure we are not reinitializing a held lock:
21 */
22 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
23 lockdep_init_map(&lock->dep_map, name, key);
24 #endif
25 lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
26 lock->magic = SPINLOCK_MAGIC;
27 lock->owner = SPINLOCK_OWNER_INIT;
28 lock->owner_cpu = -1;
29 }
31 EXPORT_SYMBOL(__spin_lock_init);
33 void __rwlock_init(rwlock_t *lock, const char *name,
34 struct lock_class_key *key)
35 {
36 #ifdef CONFIG_DEBUG_LOCK_ALLOC
37 /*
38 * Make sure we are not reinitializing a held lock:
39 */
40 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
41 lockdep_init_map(&lock->dep_map, name, key);
42 #endif
43 lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
44 lock->magic = RWLOCK_MAGIC;
45 lock->owner = SPINLOCK_OWNER_INIT;
46 lock->owner_cpu = -1;
47 }
49 EXPORT_SYMBOL(__rwlock_init);
51 static void spin_bug(spinlock_t *lock, const char *msg)
52 {
53 struct task_struct *owner = NULL;
55 if (!debug_locks_off())
56 return;
58 if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
59 owner = lock->owner;
60 printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
61 msg, raw_smp_processor_id(),
62 current->comm, current->pid);
63 printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
64 ".owner_cpu: %d\n",
65 lock, lock->magic,
66 owner ? owner->comm : "<none>",
67 owner ? owner->pid : -1,
68 lock->owner_cpu);
69 dump_stack();
70 }
72 #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
74 static inline void
75 debug_spin_lock_before(spinlock_t *lock)
76 {
77 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
78 SPIN_BUG_ON(lock->owner == current, lock, "recursion");
79 SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
80 lock, "cpu recursion");
81 }
83 static inline void debug_spin_lock_after(spinlock_t *lock)
84 {
85 lock->owner_cpu = raw_smp_processor_id();
86 lock->owner = current;
87 }
89 static inline void debug_spin_unlock(spinlock_t *lock)
90 {
91 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
92 SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
93 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
94 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
95 lock, "wrong CPU");
96 lock->owner = SPINLOCK_OWNER_INIT;
97 lock->owner_cpu = -1;
98 }
100 static void __spin_lock_debug(spinlock_t *lock)
101 {
102 int print_once = 1;
103 u64 i;
105 for (;;) {
106 for (i = 0; i < loops_per_jiffy * HZ; i++) {
107 if (__raw_spin_trylock(&lock->raw_lock))
108 return;
109 __delay(1);
110 }
111 /* lockup suspected: */
112 if (print_once) {
113 print_once = 0;
114 printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
115 "%s/%d, %p\n",
116 raw_smp_processor_id(), current->comm,
117 current->pid, lock);
118 dump_stack();
119 }
120 }
121 }
123 void _raw_spin_lock(spinlock_t *lock)
124 {
125 debug_spin_lock_before(lock);
126 if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
127 __spin_lock_debug(lock);
128 debug_spin_lock_after(lock);
129 }
131 int _raw_spin_trylock(spinlock_t *lock)
132 {
133 int ret = __raw_spin_trylock(&lock->raw_lock);
135 if (ret)
136 debug_spin_lock_after(lock);
137 #ifndef CONFIG_SMP
138 /*
139 * Must not happen on UP:
140 */
141 SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
142 #endif
143 return ret;
144 }
146 void _raw_spin_unlock(spinlock_t *lock)
147 {
148 debug_spin_unlock(lock);
149 __raw_spin_unlock(&lock->raw_lock);
150 }
152 static void rwlock_bug(rwlock_t *lock, const char *msg)
153 {
154 if (!debug_locks_off())
155 return;
157 printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
158 msg, raw_smp_processor_id(), current->comm,
159 current->pid, lock);
160 dump_stack();
161 }
163 #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
165 #if 0 /* __write_lock_debug() can lock up - maybe this can too? */
166 static void __read_lock_debug(rwlock_t *lock)
167 {
168 int print_once = 1;
169 u64 i;
171 for (;;) {
172 for (i = 0; i < loops_per_jiffy * HZ; i++) {
173 if (__raw_read_trylock(&lock->raw_lock))
174 return;
175 __delay(1);
176 }
177 /* lockup suspected: */
178 if (print_once) {
179 print_once = 0;
180 printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
181 "%s/%d, %p\n",
182 raw_smp_processor_id(), current->comm,
183 current->pid, lock);
184 dump_stack();
185 }
186 }
187 }
188 #endif
190 void _raw_read_lock(rwlock_t *lock)
191 {
192 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
193 __raw_read_lock(&lock->raw_lock);
194 }
196 int _raw_read_trylock(rwlock_t *lock)
197 {
198 int ret = __raw_read_trylock(&lock->raw_lock);
200 #ifndef CONFIG_SMP
201 /*
202 * Must not happen on UP:
203 */
204 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
205 #endif
206 return ret;
207 }
209 void _raw_read_unlock(rwlock_t *lock)
210 {
211 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
212 __raw_read_unlock(&lock->raw_lock);
213 }
215 static inline void debug_write_lock_before(rwlock_t *lock)
216 {
217 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
218 RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
219 RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
220 lock, "cpu recursion");
221 }
223 static inline void debug_write_lock_after(rwlock_t *lock)
224 {
225 lock->owner_cpu = raw_smp_processor_id();
226 lock->owner = current;
227 }
229 static inline void debug_write_unlock(rwlock_t *lock)
230 {
231 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
232 RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
233 RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
234 lock, "wrong CPU");
235 lock->owner = SPINLOCK_OWNER_INIT;
236 lock->owner_cpu = -1;
237 }
239 #if 0 /* This can cause lockups */
240 static void __write_lock_debug(rwlock_t *lock)
241 {
242 int print_once = 1;
243 u64 i;
245 for (;;) {
246 for (i = 0; i < loops_per_jiffy * HZ; i++) {
247 if (__raw_write_trylock(&lock->raw_lock))
248 return;
249 __delay(1);
250 }
251 /* lockup suspected: */
252 if (print_once) {
253 print_once = 0;
254 printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
255 "%s/%d, %p\n",
256 raw_smp_processor_id(), current->comm,
257 current->pid, lock);
258 dump_stack();
259 }
260 }
261 }
262 #endif
264 void _raw_write_lock(rwlock_t *lock)
265 {
266 debug_write_lock_before(lock);
267 __raw_write_lock(&lock->raw_lock);
268 debug_write_lock_after(lock);
269 }
271 int _raw_write_trylock(rwlock_t *lock)
272 {
273 int ret = __raw_write_trylock(&lock->raw_lock);
275 if (ret)
276 debug_write_lock_after(lock);
277 #ifndef CONFIG_SMP
278 /*
279 * Must not happen on UP:
280 */
281 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
282 #endif
283 return ret;
284 }
286 void _raw_write_unlock(rwlock_t *lock)
287 {
288 debug_write_unlock(lock);
289 __raw_write_unlock(&lock->raw_lock);
290 }