ia64/linux-2.6.18-xen.hg

view arch/alpha/kernel/irq_srm.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * Handle interrupts from the SRM, assuming no additional weirdness.
3 */
5 #include <linux/init.h>
6 #include <linux/sched.h>
7 #include <linux/irq.h>
9 #include "proto.h"
10 #include "irq_impl.h"
13 /*
14 * Is the palcode SMP safe? In other words: can we call cserve_ena/dis
15 * at the same time in multiple CPUs? To be safe I added a spinlock
16 * but it can be removed trivially if the palcode is robust against smp.
17 */
18 DEFINE_SPINLOCK(srm_irq_lock);
20 static inline void
21 srm_enable_irq(unsigned int irq)
22 {
23 spin_lock(&srm_irq_lock);
24 cserve_ena(irq - 16);
25 spin_unlock(&srm_irq_lock);
26 }
28 static void
29 srm_disable_irq(unsigned int irq)
30 {
31 spin_lock(&srm_irq_lock);
32 cserve_dis(irq - 16);
33 spin_unlock(&srm_irq_lock);
34 }
36 static unsigned int
37 srm_startup_irq(unsigned int irq)
38 {
39 srm_enable_irq(irq);
40 return 0;
41 }
43 static void
44 srm_end_irq(unsigned int irq)
45 {
46 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
47 srm_enable_irq(irq);
48 }
50 /* Handle interrupts from the SRM, assuming no additional weirdness. */
51 static struct hw_interrupt_type srm_irq_type = {
52 .typename = "SRM",
53 .startup = srm_startup_irq,
54 .shutdown = srm_disable_irq,
55 .enable = srm_enable_irq,
56 .disable = srm_disable_irq,
57 .ack = srm_disable_irq,
58 .end = srm_end_irq,
59 };
61 void __init
62 init_srm_irqs(long max, unsigned long ignore_mask)
63 {
64 long i;
66 for (i = 16; i < max; ++i) {
67 if (i < 64 && ((ignore_mask >> i) & 1))
68 continue;
69 irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
70 irq_desc[i].chip = &srm_irq_type;
71 }
72 }
74 void
75 srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
76 {
77 int irq = (vector - 0x800) >> 4;
78 handle_irq(irq, regs);
79 }