ia64/linux-2.6.18-xen.hg

view arch/alpha/kernel/pci-noop.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * linux/arch/alpha/kernel/pci-noop.c
3 *
4 * Stub PCI interfaces for Jensen-specific kernels.
5 */
7 #include <linux/pci.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/capability.h>
11 #include <linux/mm.h>
12 #include <linux/errno.h>
13 #include <linux/sched.h>
14 #include <linux/dma-mapping.h>
16 #include "proto.h"
19 /*
20 * The PCI controller list.
21 */
23 struct pci_controller *hose_head, **hose_tail = &hose_head;
24 struct pci_controller *pci_isa_hose;
27 struct pci_controller * __init
28 alloc_pci_controller(void)
29 {
30 struct pci_controller *hose;
32 hose = alloc_bootmem(sizeof(*hose));
34 *hose_tail = hose;
35 hose_tail = &hose->next;
37 return hose;
38 }
40 struct resource * __init
41 alloc_resource(void)
42 {
43 struct resource *res;
45 res = alloc_bootmem(sizeof(*res));
47 return res;
48 }
50 asmlinkage long
51 sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
52 {
53 struct pci_controller *hose;
55 /* from hose or from bus.devfn */
56 if (which & IOBASE_FROM_HOSE) {
57 for (hose = hose_head; hose; hose = hose->next)
58 if (hose->index == bus)
59 break;
60 if (!hose)
61 return -ENODEV;
62 } else {
63 /* Special hook for ISA access. */
64 if (bus == 0 && dfn == 0)
65 hose = pci_isa_hose;
66 else
67 return -ENODEV;
68 }
70 switch (which & ~IOBASE_FROM_HOSE) {
71 case IOBASE_HOSE:
72 return hose->index;
73 case IOBASE_SPARSE_MEM:
74 return hose->sparse_mem_base;
75 case IOBASE_DENSE_MEM:
76 return hose->dense_mem_base;
77 case IOBASE_SPARSE_IO:
78 return hose->sparse_io_base;
79 case IOBASE_DENSE_IO:
80 return hose->dense_io_base;
81 case IOBASE_ROOT_BUS:
82 return hose->bus->number;
83 }
85 return -EOPNOTSUPP;
86 }
88 asmlinkage long
89 sys_pciconfig_read(unsigned long bus, unsigned long dfn,
90 unsigned long off, unsigned long len, void *buf)
91 {
92 if (!capable(CAP_SYS_ADMIN))
93 return -EPERM;
94 else
95 return -ENODEV;
96 }
98 asmlinkage long
99 sys_pciconfig_write(unsigned long bus, unsigned long dfn,
100 unsigned long off, unsigned long len, void *buf)
101 {
102 if (!capable(CAP_SYS_ADMIN))
103 return -EPERM;
104 else
105 return -ENODEV;
106 }
108 /* Stubs for the routines in pci_iommu.c: */
110 void *
111 pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
112 {
113 return NULL;
114 }
116 void
117 pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
118 dma_addr_t dma_addr)
119 {
120 }
122 dma_addr_t
123 pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size,
124 int direction)
125 {
126 return (dma_addr_t) 0;
127 }
129 void
130 pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
131 int direction)
132 {
133 }
135 int
136 pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
137 int direction)
138 {
139 return 0;
140 }
142 void
143 pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
144 int direction)
145 {
146 }
148 int
149 pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask)
150 {
151 return 0;
152 }
154 /* Generic DMA mapping functions: */
156 void *
157 dma_alloc_coherent(struct device *dev, size_t size,
158 dma_addr_t *dma_handle, gfp_t gfp)
159 {
160 void *ret;
162 if (!dev || *dev->dma_mask >= 0xffffffffUL)
163 gfp &= ~GFP_DMA;
164 ret = (void *)__get_free_pages(gfp, get_order(size));
165 if (ret) {
166 memset(ret, 0, size);
167 *dma_handle = virt_to_bus(ret);
168 }
169 return ret;
170 }
172 EXPORT_SYMBOL(dma_alloc_coherent);
174 int
175 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
176 enum dma_data_direction direction)
177 {
178 int i;
180 for (i = 0; i < nents; i++ ) {
181 void *va;
183 BUG_ON(!sg[i].page);
184 va = page_address(sg[i].page) + sg[i].offset;
185 sg_dma_address(sg + i) = (dma_addr_t)virt_to_bus(va);
186 sg_dma_len(sg + i) = sg[i].length;
187 }
189 return nents;
190 }
192 EXPORT_SYMBOL(dma_map_sg);
194 int
195 dma_set_mask(struct device *dev, u64 mask)
196 {
197 if (!dev->dma_mask || !dma_supported(dev, mask))
198 return -EIO;
200 *dev->dma_mask = mask;
202 return 0;
203 }
205 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
206 {
207 return NULL;
208 }
210 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
211 {
212 }
214 EXPORT_SYMBOL(pci_iomap);
215 EXPORT_SYMBOL(pci_iounmap);