ia64/linux-2.6.18-xen.hg

view arch/ia64/xen/xen_dma.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents e410857fd83c
children
line source
1 /*
2 * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
3 * Alex Williamson <alex.williamson@hp.com>
4 *
5 * Basic DMA mapping services for Xen guests.
6 * Based on arch/i386/kernel/pci-dma-xen.c.
7 *
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */
25 #include <linux/bitops.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/mm.h>
28 #include <asm/scatterlist.h>
29 #include <xen/gnttab.h>
30 #include <asm/gnttab_dma.h>
32 #define IOMMU_BUG_ON(test) \
33 do { \
34 if (unlikely(test)) { \
35 printk(KERN_ALERT "Fatal DMA error!\n"); \
36 BUG(); \
37 } \
38 } while (0)
40 static int check_pages_physically_contiguous(unsigned long pfn,
41 unsigned int offset,
42 size_t length)
43 {
44 unsigned long next_bus;
45 int i;
46 int nr_pages;
48 next_bus = pfn_to_mfn_for_dma(pfn);
49 nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
51 for (i = 1; i < nr_pages; i++) {
52 if (pfn_to_mfn_for_dma(++pfn) != ++next_bus)
53 return 0;
54 }
55 return 1;
56 }
58 int range_straddles_page_boundary(paddr_t p, size_t size)
59 {
60 unsigned long pfn = p >> PAGE_SHIFT;
61 unsigned int offset = p & ~PAGE_MASK;
63 if (!is_running_on_xen())
64 return 0;
66 if (offset + size <= PAGE_SIZE)
67 return 0;
68 if (check_pages_physically_contiguous(pfn, offset, size))
69 return 0;
70 return 1;
71 }
73 /*
74 * This should be broken out of swiotlb and put in a common place
75 * when merged with upstream Linux.
76 */
77 static inline int
78 address_needs_mapping(struct device *dev, dma_addr_t addr)
79 {
80 dma_addr_t mask = 0xffffffff;
82 /* If the device has a mask, use it, otherwise default to 32 bits */
83 if (dev && dev->dma_mask)
84 mask = *dev->dma_mask;
85 return (addr & ~mask) != 0;
86 }
88 int
89 xen_map_sg(struct device *dev, struct scatterlist *sg, int nents,
90 int direction)
91 {
92 int i;
94 for (i = 0 ; i < nents ; i++) {
95 sg[i].dma_address = gnttab_dma_map_page(sg[i].page) + sg[i].offset;
96 sg[i].dma_length = sg[i].length;
98 IOMMU_BUG_ON(address_needs_mapping(dev, sg[i].dma_address));
99 IOMMU_BUG_ON(range_straddles_page_boundary(
100 page_to_pseudophys(sg[i].page) + sg[i].offset,
101 sg[i].length));
102 }
104 return nents;
105 }
106 EXPORT_SYMBOL(xen_map_sg);
108 void
109 xen_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
110 int direction)
111 {
112 int i;
113 for (i = 0; i < nents; i++)
114 __gnttab_dma_unmap_page(sg[i].page);
115 }
116 EXPORT_SYMBOL(xen_unmap_sg);
118 int
119 xen_dma_mapping_error(dma_addr_t dma_addr)
120 {
121 return 0;
122 }
123 EXPORT_SYMBOL(xen_dma_mapping_error);
125 int
126 xen_dma_supported(struct device *dev, u64 mask)
127 {
128 return 1;
129 }
130 EXPORT_SYMBOL(xen_dma_supported);
132 void *
133 xen_alloc_coherent(struct device *dev, size_t size,
134 dma_addr_t *dma_handle, gfp_t gfp)
135 {
136 unsigned long vaddr;
137 unsigned int order = get_order(size);
139 vaddr = __get_free_pages(gfp, order);
141 if (!vaddr)
142 return NULL;
144 if (xen_create_contiguous_region(vaddr, order,
145 fls64(dev->coherent_dma_mask))) {
146 free_pages(vaddr, order);
147 return NULL;
148 }
150 memset((void *)vaddr, 0, size);
151 *dma_handle = virt_to_bus((void *)vaddr);
153 return (void *)vaddr;
154 }
155 EXPORT_SYMBOL(xen_alloc_coherent);
157 void
158 xen_free_coherent(struct device *dev, size_t size,
159 void *vaddr, dma_addr_t dma_handle)
160 {
161 unsigned int order = get_order(size);
163 xen_destroy_contiguous_region((unsigned long)vaddr, order);
164 free_pages((unsigned long)vaddr, order);
165 }
166 EXPORT_SYMBOL(xen_free_coherent);
168 dma_addr_t
169 xen_map_single(struct device *dev, void *ptr, size_t size,
170 int direction)
171 {
172 dma_addr_t dma_addr = gnttab_dma_map_virt(ptr);
174 IOMMU_BUG_ON(range_straddles_page_boundary(__pa(ptr), size));
175 IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
177 return dma_addr;
178 }
179 EXPORT_SYMBOL(xen_map_single);
181 void
182 xen_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
183 int direction)
184 {
185 gnttab_dma_unmap_page(dma_addr);
186 }
187 EXPORT_SYMBOL(xen_unmap_single);