ia64/linux-2.6.18-xen.hg

view arch/alpha/kernel/irq.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * linux/arch/alpha/kernel/irq.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
10 * should be easier.
11 */
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/errno.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/ptrace.h>
20 #include <linux/interrupt.h>
21 #include <linux/slab.h>
22 #include <linux/random.h>
23 #include <linux/init.h>
24 #include <linux/irq.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/profile.h>
28 #include <linux/bitops.h>
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/uaccess.h>
34 volatile unsigned long irq_err_count;
36 void ack_bad_irq(unsigned int irq)
37 {
38 irq_err_count++;
39 printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", irq);
40 }
42 #ifdef CONFIG_SMP
43 static char irq_user_affinity[NR_IRQS];
45 int
46 select_smp_affinity(unsigned int irq)
47 {
48 static int last_cpu;
49 int cpu = last_cpu + 1;
51 if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq])
52 return 1;
54 while (!cpu_possible(cpu))
55 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
56 last_cpu = cpu;
58 irq_desc[irq].affinity = cpumask_of_cpu(cpu);
59 irq_desc[irq].chip->set_affinity(irq, cpumask_of_cpu(cpu));
60 return 0;
61 }
62 #endif /* CONFIG_SMP */
64 int
65 show_interrupts(struct seq_file *p, void *v)
66 {
67 #ifdef CONFIG_SMP
68 int j;
69 #endif
70 int irq = *(loff_t *) v;
71 struct irqaction * action;
72 unsigned long flags;
74 #ifdef CONFIG_SMP
75 if (irq == 0) {
76 seq_puts(p, " ");
77 for_each_online_cpu(j)
78 seq_printf(p, "CPU%d ", j);
79 seq_putc(p, '\n');
80 }
81 #endif
83 if (irq < ACTUAL_NR_IRQS) {
84 spin_lock_irqsave(&irq_desc[irq].lock, flags);
85 action = irq_desc[irq].action;
86 if (!action)
87 goto unlock;
88 seq_printf(p, "%3d: ", irq);
89 #ifndef CONFIG_SMP
90 seq_printf(p, "%10u ", kstat_irqs(irq));
91 #else
92 for_each_online_cpu(j)
93 seq_printf(p, "%10u ", kstat_cpu(j).irqs[irq]);
94 #endif
95 seq_printf(p, " %14s", irq_desc[irq].chip->typename);
96 seq_printf(p, " %c%s",
97 (action->flags & IRQF_DISABLED)?'+':' ',
98 action->name);
100 for (action=action->next; action; action = action->next) {
101 seq_printf(p, ", %c%s",
102 (action->flags & IRQF_DISABLED)?'+':' ',
103 action->name);
104 }
106 seq_putc(p, '\n');
107 unlock:
108 spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
109 } else if (irq == ACTUAL_NR_IRQS) {
110 #ifdef CONFIG_SMP
111 seq_puts(p, "IPI: ");
112 for_each_online_cpu(j)
113 seq_printf(p, "%10lu ", cpu_data[j].ipi_count);
114 seq_putc(p, '\n');
115 #endif
116 seq_printf(p, "ERR: %10lu\n", irq_err_count);
117 }
118 return 0;
119 }
121 /*
122 * handle_irq handles all normal device IRQ's (the special
123 * SMP cross-CPU interrupts have their own specific
124 * handlers).
125 */
127 #define MAX_ILLEGAL_IRQS 16
129 void
130 handle_irq(int irq, struct pt_regs * regs)
131 {
132 /*
133 * We ack quickly, we don't want the irq controller
134 * thinking we're snobs just because some other CPU has
135 * disabled global interrupts (we have already done the
136 * INT_ACK cycles, it's too late to try to pretend to the
137 * controller that we aren't taking the interrupt).
138 *
139 * 0 return value means that this irq is already being
140 * handled by some other CPU. (or is disabled)
141 */
142 static unsigned int illegal_count=0;
144 if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) {
145 irq_err_count++;
146 illegal_count++;
147 printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n",
148 irq);
149 return;
150 }
152 irq_enter();
153 /*
154 * __do_IRQ() must be called with IPL_MAX. Note that we do not
155 * explicitly enable interrupts afterwards - some MILO PALcode
156 * (namely LX164 one) seems to have severe problems with RTI
157 * at IPL 0.
158 */
159 local_irq_disable();
160 __do_IRQ(irq, regs);
161 irq_exit();
162 }