ia64/linux-2.6.18-xen.hg

view arch/alpha/kernel/semaphore.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * Alpha semaphore implementation.
3 *
4 * (C) Copyright 1996 Linus Torvalds
5 * (C) Copyright 1999, 2000 Richard Henderson
6 */
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/init.h>
12 /*
13 * This is basically the PPC semaphore scheme ported to use
14 * the Alpha ll/sc sequences, so see the PPC code for
15 * credits.
16 */
18 /*
19 * Atomically update sem->count.
20 * This does the equivalent of the following:
21 *
22 * old_count = sem->count;
23 * tmp = MAX(old_count, 0) + incr;
24 * sem->count = tmp;
25 * return old_count;
26 */
27 static inline int __sem_update_count(struct semaphore *sem, int incr)
28 {
29 long old_count, tmp = 0;
31 __asm__ __volatile__(
32 "1: ldl_l %0,%2\n"
33 " cmovgt %0,%0,%1\n"
34 " addl %1,%3,%1\n"
35 " stl_c %1,%2\n"
36 " beq %1,2f\n"
37 " mb\n"
38 ".subsection 2\n"
39 "2: br 1b\n"
40 ".previous"
41 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
42 : "Ir" (incr), "1" (tmp), "m" (sem->count));
44 return old_count;
45 }
47 /*
48 * Perform the "down" function. Return zero for semaphore acquired,
49 * return negative for signalled out of the function.
50 *
51 * If called from down, the return is ignored and the wait loop is
52 * not interruptible. This means that a task waiting on a semaphore
53 * using "down()" cannot be killed until someone does an "up()" on
54 * the semaphore.
55 *
56 * If called from down_interruptible, the return value gets checked
57 * upon return. If the return value is negative then the task continues
58 * with the negative value in the return register (it can be tested by
59 * the caller).
60 *
61 * Either form may be used in conjunction with "up()".
62 */
64 void __sched
65 __down_failed(struct semaphore *sem)
66 {
67 struct task_struct *tsk = current;
68 DECLARE_WAITQUEUE(wait, tsk);
70 #ifdef CONFIG_DEBUG_SEMAPHORE
71 printk("%s(%d): down failed(%p)\n",
72 tsk->comm, tsk->pid, sem);
73 #endif
75 tsk->state = TASK_UNINTERRUPTIBLE;
76 wmb();
77 add_wait_queue_exclusive(&sem->wait, &wait);
79 /*
80 * Try to get the semaphore. If the count is > 0, then we've
81 * got the semaphore; we decrement count and exit the loop.
82 * If the count is 0 or negative, we set it to -1, indicating
83 * that we are asleep, and then sleep.
84 */
85 while (__sem_update_count(sem, -1) <= 0) {
86 schedule();
87 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
88 }
89 remove_wait_queue(&sem->wait, &wait);
90 tsk->state = TASK_RUNNING;
92 /*
93 * If there are any more sleepers, wake one of them up so
94 * that it can either get the semaphore, or set count to -1
95 * indicating that there are still processes sleeping.
96 */
97 wake_up(&sem->wait);
99 #ifdef CONFIG_DEBUG_SEMAPHORE
100 printk("%s(%d): down acquired(%p)\n",
101 tsk->comm, tsk->pid, sem);
102 #endif
103 }
105 int __sched
106 __down_failed_interruptible(struct semaphore *sem)
107 {
108 struct task_struct *tsk = current;
109 DECLARE_WAITQUEUE(wait, tsk);
110 long ret = 0;
112 #ifdef CONFIG_DEBUG_SEMAPHORE
113 printk("%s(%d): down failed(%p)\n",
114 tsk->comm, tsk->pid, sem);
115 #endif
117 tsk->state = TASK_INTERRUPTIBLE;
118 wmb();
119 add_wait_queue_exclusive(&sem->wait, &wait);
121 while (__sem_update_count(sem, -1) <= 0) {
122 if (signal_pending(current)) {
123 /*
124 * A signal is pending - give up trying.
125 * Set sem->count to 0 if it is negative,
126 * since we are no longer sleeping.
127 */
128 __sem_update_count(sem, 0);
129 ret = -EINTR;
130 break;
131 }
132 schedule();
133 set_task_state(tsk, TASK_INTERRUPTIBLE);
134 }
136 remove_wait_queue(&sem->wait, &wait);
137 tsk->state = TASK_RUNNING;
138 wake_up(&sem->wait);
140 #ifdef CONFIG_DEBUG_SEMAPHORE
141 printk("%s(%d): down %s(%p)\n",
142 current->comm, current->pid,
143 (ret < 0 ? "interrupted" : "acquired"), sem);
144 #endif
145 return ret;
146 }
148 void
149 __up_wakeup(struct semaphore *sem)
150 {
151 /*
152 * Note that we incremented count in up() before we came here,
153 * but that was ineffective since the result was <= 0, and
154 * any negative value of count is equivalent to 0.
155 * This ends up setting count to 1, unless count is now > 0
156 * (i.e. because some other cpu has called up() in the meantime),
157 * in which case we just increment count.
158 */
159 __sem_update_count(sem, 1);
160 wake_up(&sem->wait);
161 }
163 void __sched
164 down(struct semaphore *sem)
165 {
166 #ifdef WAITQUEUE_DEBUG
167 CHECK_MAGIC(sem->__magic);
168 #endif
169 #ifdef CONFIG_DEBUG_SEMAPHORE
170 printk("%s(%d): down(%p) <count=%d> from %p\n",
171 current->comm, current->pid, sem,
172 atomic_read(&sem->count), __builtin_return_address(0));
173 #endif
174 __down(sem);
175 }
177 int __sched
178 down_interruptible(struct semaphore *sem)
179 {
180 #ifdef WAITQUEUE_DEBUG
181 CHECK_MAGIC(sem->__magic);
182 #endif
183 #ifdef CONFIG_DEBUG_SEMAPHORE
184 printk("%s(%d): down(%p) <count=%d> from %p\n",
185 current->comm, current->pid, sem,
186 atomic_read(&sem->count), __builtin_return_address(0));
187 #endif
188 return __down_interruptible(sem);
189 }
191 int
192 down_trylock(struct semaphore *sem)
193 {
194 int ret;
196 #ifdef WAITQUEUE_DEBUG
197 CHECK_MAGIC(sem->__magic);
198 #endif
200 ret = __down_trylock(sem);
202 #ifdef CONFIG_DEBUG_SEMAPHORE
203 printk("%s(%d): down_trylock %s from %p\n",
204 current->comm, current->pid,
205 ret ? "failed" : "acquired",
206 __builtin_return_address(0));
207 #endif
209 return ret;
210 }
212 void
213 up(struct semaphore *sem)
214 {
215 #ifdef WAITQUEUE_DEBUG
216 CHECK_MAGIC(sem->__magic);
217 #endif
218 #ifdef CONFIG_DEBUG_SEMAPHORE
219 printk("%s(%d): up(%p) <count=%d> from %p\n",
220 current->comm, current->pid, sem,
221 atomic_read(&sem->count), __builtin_return_address(0));
222 #endif
223 __up(sem);
224 }