ia64/linux-2.6.18-xen.hg

view arch/alpha/kernel/sys_eiger.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * linux/arch/alpha/kernel/sys_eiger.c
3 *
4 * Copyright (C) 1995 David A Rusling
5 * Copyright (C) 1996, 1999 Jay A Estabrook
6 * Copyright (C) 1998, 1999 Richard Henderson
7 * Copyright (C) 1999 Iain Grant
8 *
9 * Code supporting the EIGER (EV6+TSUNAMI).
10 */
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/mm.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
17 #include <linux/init.h>
18 #include <linux/bitops.h>
20 #include <asm/ptrace.h>
21 #include <asm/system.h>
22 #include <asm/dma.h>
23 #include <asm/irq.h>
24 #include <asm/mmu_context.h>
25 #include <asm/io.h>
26 #include <asm/pci.h>
27 #include <asm/pgtable.h>
28 #include <asm/core_tsunami.h>
29 #include <asm/hwrpb.h>
30 #include <asm/tlbflush.h>
32 #include "proto.h"
33 #include "irq_impl.h"
34 #include "pci_impl.h"
35 #include "machvec_impl.h"
38 /* Note that this interrupt code is identical to TAKARA. */
40 /* Note mask bit is true for DISABLED irqs. */
41 static unsigned long cached_irq_mask[2] = { -1, -1 };
43 static inline void
44 eiger_update_irq_hw(unsigned long irq, unsigned long mask)
45 {
46 int regaddr;
48 mask = (irq >= 64 ? mask << 16 : mask >> ((irq - 16) & 0x30));
49 regaddr = 0x510 + (((irq - 16) >> 2) & 0x0c);
50 outl(mask & 0xffff0000UL, regaddr);
51 }
53 static inline void
54 eiger_enable_irq(unsigned int irq)
55 {
56 unsigned long mask;
57 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
58 eiger_update_irq_hw(irq, mask);
59 }
61 static void
62 eiger_disable_irq(unsigned int irq)
63 {
64 unsigned long mask;
65 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
66 eiger_update_irq_hw(irq, mask);
67 }
69 static unsigned int
70 eiger_startup_irq(unsigned int irq)
71 {
72 eiger_enable_irq(irq);
73 return 0; /* never anything pending */
74 }
76 static void
77 eiger_end_irq(unsigned int irq)
78 {
79 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
80 eiger_enable_irq(irq);
81 }
83 static struct hw_interrupt_type eiger_irq_type = {
84 .typename = "EIGER",
85 .startup = eiger_startup_irq,
86 .shutdown = eiger_disable_irq,
87 .enable = eiger_enable_irq,
88 .disable = eiger_disable_irq,
89 .ack = eiger_disable_irq,
90 .end = eiger_end_irq,
91 };
93 static void
94 eiger_device_interrupt(unsigned long vector, struct pt_regs * regs)
95 {
96 unsigned intstatus;
98 /*
99 * The PALcode will have passed us vectors 0x800 or 0x810,
100 * which are fairly arbitrary values and serve only to tell
101 * us whether an interrupt has come in on IRQ0 or IRQ1. If
102 * it's IRQ1 it's a PCI interrupt; if it's IRQ0, it's
103 * probably ISA, but PCI interrupts can come through IRQ0
104 * as well if the interrupt controller isn't in accelerated
105 * mode.
106 *
107 * OTOH, the accelerator thing doesn't seem to be working
108 * overly well, so what we'll do instead is try directly
109 * examining the Master Interrupt Register to see if it's a
110 * PCI interrupt, and if _not_ then we'll pass it on to the
111 * ISA handler.
112 */
114 intstatus = inw(0x500) & 15;
115 if (intstatus) {
116 /*
117 * This is a PCI interrupt. Check each bit and
118 * despatch an interrupt if it's set.
119 */
121 if (intstatus & 8) handle_irq(16+3, regs);
122 if (intstatus & 4) handle_irq(16+2, regs);
123 if (intstatus & 2) handle_irq(16+1, regs);
124 if (intstatus & 1) handle_irq(16+0, regs);
125 } else {
126 isa_device_interrupt(vector, regs);
127 }
128 }
130 static void
131 eiger_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
132 {
133 int irq = (vector - 0x800) >> 4;
134 handle_irq(irq, regs);
135 }
137 static void __init
138 eiger_init_irq(void)
139 {
140 long i;
142 outb(0, DMA1_RESET_REG);
143 outb(0, DMA2_RESET_REG);
144 outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
145 outb(0, DMA2_MASK_REG);
147 if (alpha_using_srm)
148 alpha_mv.device_interrupt = eiger_srm_device_interrupt;
150 for (i = 16; i < 128; i += 16)
151 eiger_update_irq_hw(i, -1);
153 init_i8259a_irqs();
155 for (i = 16; i < 128; ++i) {
156 irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
157 irq_desc[i].chip = &eiger_irq_type;
158 }
159 }
161 static int __init
162 eiger_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
163 {
164 u8 irq_orig;
166 /* The SRM console has already calculated out the IRQ value's for
167 option cards. As this works lets just read in the value already
168 set and change it to a useable value by Linux.
170 All the IRQ values generated by the console are greater than 90,
171 so we subtract 80 because it is (90 - allocated ISA IRQ's). */
173 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq_orig);
175 return irq_orig - 0x80;
176 }
178 static u8 __init
179 eiger_swizzle(struct pci_dev *dev, u8 *pinp)
180 {
181 struct pci_controller *hose = dev->sysdata;
182 int slot, pin = *pinp;
183 int bridge_count = 0;
185 /* Find the number of backplane bridges. */
186 int backplane = inw(0x502) & 0x0f;
188 switch (backplane)
189 {
190 case 0x00: bridge_count = 0; break; /* No bridges */
191 case 0x01: bridge_count = 1; break; /* 1 */
192 case 0x03: bridge_count = 2; break; /* 2 */
193 case 0x07: bridge_count = 3; break; /* 3 */
194 case 0x0f: bridge_count = 4; break; /* 4 */
195 };
197 slot = PCI_SLOT(dev->devfn);
198 while (dev->bus->self) {
199 /* Check for built-in bridges on hose 0. */
200 if (hose->index == 0
201 && (PCI_SLOT(dev->bus->self->devfn)
202 > 20 - bridge_count)) {
203 slot = PCI_SLOT(dev->devfn);
204 break;
205 }
206 /* Must be a card-based bridge. */
207 pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
209 /* Move up the chain of bridges. */
210 dev = dev->bus->self;
211 }
212 *pinp = pin;
213 return slot;
214 }
216 /*
217 * The System Vectors
218 */
220 struct alpha_machine_vector eiger_mv __initmv = {
221 .vector_name = "Eiger",
222 DO_EV6_MMU,
223 DO_DEFAULT_RTC,
224 DO_TSUNAMI_IO,
225 .machine_check = tsunami_machine_check,
226 .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
227 .min_io_address = DEFAULT_IO_BASE,
228 .min_mem_address = DEFAULT_MEM_BASE,
229 .pci_dac_offset = TSUNAMI_DAC_OFFSET,
231 .nr_irqs = 128,
232 .device_interrupt = eiger_device_interrupt,
234 .init_arch = tsunami_init_arch,
235 .init_irq = eiger_init_irq,
236 .init_rtc = common_init_rtc,
237 .init_pci = common_init_pci,
238 .kill_arch = tsunami_kill_arch,
239 .pci_map_irq = eiger_map_irq,
240 .pci_swizzle = eiger_swizzle,
241 };
242 ALIAS_MV(eiger)