ia64/linux-2.6.18-xen.hg

view lib/iomap.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * Implement the default iomap interfaces
3 *
4 * (C) Copyright 2004 Linus Torvalds
5 */
6 #include <linux/pci.h>
7 #include <linux/module.h>
8 #include <asm/io.h>
10 /*
11 * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
12 * access or a MMIO access, these functions don't care. The info is
13 * encoded in the hardware mapping set up by the mapping functions
14 * (or the cookie itself, depending on implementation and hw).
15 *
16 * The generic routines don't assume any hardware mappings, and just
17 * encode the PIO/MMIO as part of the cookie. They coldly assume that
18 * the MMIO IO mappings are not in the low address range.
19 *
20 * Architectures for which this is not true can't use this generic
21 * implementation and should do their own copy.
22 */
24 #ifndef HAVE_ARCH_PIO_SIZE
25 /*
26 * We encode the physical PIO addresses (0-0xffff) into the
27 * pointer by offsetting them with a constant (0x10000) and
28 * assuming that all the low addresses are always PIO. That means
29 * we can do some sanity checks on the low bits, and don't
30 * need to just take things for granted.
31 */
32 #define PIO_OFFSET 0x10000UL
33 #define PIO_MASK 0x0ffffUL
34 #define PIO_RESERVED 0x40000UL
35 #endif
37 /*
38 * Ugly macros are a way of life.
39 */
40 #define VERIFY_PIO(port) BUG_ON((port & ~PIO_MASK) != PIO_OFFSET)
42 #define IO_COND(addr, is_pio, is_mmio) do { \
43 unsigned long port = (unsigned long __force)addr; \
44 if (port < PIO_RESERVED) { \
45 VERIFY_PIO(port); \
46 port &= PIO_MASK; \
47 is_pio; \
48 } else { \
49 is_mmio; \
50 } \
51 } while (0)
53 unsigned int fastcall ioread8(void __iomem *addr)
54 {
55 IO_COND(addr, return inb(port), return readb(addr));
56 }
57 unsigned int fastcall ioread16(void __iomem *addr)
58 {
59 IO_COND(addr, return inw(port), return readw(addr));
60 }
61 unsigned int fastcall ioread16be(void __iomem *addr)
62 {
63 IO_COND(addr, return inw(port), return be16_to_cpu(__raw_readw(addr)));
64 }
65 unsigned int fastcall ioread32(void __iomem *addr)
66 {
67 IO_COND(addr, return inl(port), return readl(addr));
68 }
69 unsigned int fastcall ioread32be(void __iomem *addr)
70 {
71 IO_COND(addr, return inl(port), return be32_to_cpu(__raw_readl(addr)));
72 }
73 EXPORT_SYMBOL(ioread8);
74 EXPORT_SYMBOL(ioread16);
75 EXPORT_SYMBOL(ioread16be);
76 EXPORT_SYMBOL(ioread32);
77 EXPORT_SYMBOL(ioread32be);
79 void fastcall iowrite8(u8 val, void __iomem *addr)
80 {
81 IO_COND(addr, outb(val,port), writeb(val, addr));
82 }
83 void fastcall iowrite16(u16 val, void __iomem *addr)
84 {
85 IO_COND(addr, outw(val,port), writew(val, addr));
86 }
87 void fastcall iowrite16be(u16 val, void __iomem *addr)
88 {
89 IO_COND(addr, outw(val,port), __raw_writew(cpu_to_be16(val), addr));
90 }
91 void fastcall iowrite32(u32 val, void __iomem *addr)
92 {
93 IO_COND(addr, outl(val,port), writel(val, addr));
94 }
95 void fastcall iowrite32be(u32 val, void __iomem *addr)
96 {
97 IO_COND(addr, outl(val,port), __raw_writel(cpu_to_be32(val), addr));
98 }
99 EXPORT_SYMBOL(iowrite8);
100 EXPORT_SYMBOL(iowrite16);
101 EXPORT_SYMBOL(iowrite16be);
102 EXPORT_SYMBOL(iowrite32);
103 EXPORT_SYMBOL(iowrite32be);
105 /*
106 * These are the "repeat MMIO read/write" functions.
107 * Note the "__raw" accesses, since we don't want to
108 * convert to CPU byte order. We write in "IO byte
109 * order" (we also don't have IO barriers).
110 */
111 static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
112 {
113 while (--count >= 0) {
114 u8 data = __raw_readb(addr);
115 *dst = data;
116 dst++;
117 }
118 }
119 static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
120 {
121 while (--count >= 0) {
122 u16 data = __raw_readw(addr);
123 *dst = data;
124 dst++;
125 }
126 }
127 static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
128 {
129 while (--count >= 0) {
130 u32 data = __raw_readl(addr);
131 *dst = data;
132 dst++;
133 }
134 }
136 static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
137 {
138 while (--count >= 0) {
139 __raw_writeb(*src, addr);
140 src++;
141 }
142 }
143 static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
144 {
145 while (--count >= 0) {
146 __raw_writew(*src, addr);
147 src++;
148 }
149 }
150 static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
151 {
152 while (--count >= 0) {
153 __raw_writel(*src, addr);
154 src++;
155 }
156 }
158 void fastcall ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
159 {
160 IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count));
161 }
162 void fastcall ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
163 {
164 IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count));
165 }
166 void fastcall ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
167 {
168 IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count));
169 }
170 EXPORT_SYMBOL(ioread8_rep);
171 EXPORT_SYMBOL(ioread16_rep);
172 EXPORT_SYMBOL(ioread32_rep);
174 void fastcall iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
175 {
176 IO_COND(addr, outsb(port, src, count), mmio_outsb(addr, src, count));
177 }
178 void fastcall iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
179 {
180 IO_COND(addr, outsw(port, src, count), mmio_outsw(addr, src, count));
181 }
182 void fastcall iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
183 {
184 IO_COND(addr, outsl(port, src,count), mmio_outsl(addr, src, count));
185 }
186 EXPORT_SYMBOL(iowrite8_rep);
187 EXPORT_SYMBOL(iowrite16_rep);
188 EXPORT_SYMBOL(iowrite32_rep);
190 /* Create a virtual mapping cookie for an IO port range */
191 void __iomem *ioport_map(unsigned long port, unsigned int nr)
192 {
193 if (port > PIO_MASK)
194 return NULL;
195 return (void __iomem *) (unsigned long) (port + PIO_OFFSET);
196 }
198 void ioport_unmap(void __iomem *addr)
199 {
200 /* Nothing to do */
201 }
202 EXPORT_SYMBOL(ioport_map);
203 EXPORT_SYMBOL(ioport_unmap);
205 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
206 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
207 {
208 unsigned long start = pci_resource_start(dev, bar);
209 unsigned long len = pci_resource_len(dev, bar);
210 unsigned long flags = pci_resource_flags(dev, bar);
212 if (!len || !start)
213 return NULL;
214 if (maxlen && len > maxlen)
215 len = maxlen;
216 if (flags & IORESOURCE_IO)
217 return ioport_map(start, len);
218 if (flags & IORESOURCE_MEM) {
219 if (flags & IORESOURCE_CACHEABLE)
220 return ioremap(start, len);
221 return ioremap_nocache(start, len);
222 }
223 /* What? */
224 return NULL;
225 }
227 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
228 {
229 IO_COND(addr, /* nothing */, iounmap(addr));
230 }
231 EXPORT_SYMBOL(pci_iomap);
232 EXPORT_SYMBOL(pci_iounmap);