ia64/xen-unstable

view linux-2.6-xen-sparse/arch/i386/kernel/pci-dma-xen.c @ 8861:c62a5327e6a8

Fix prototype mismatch.
Should fix e1000/tg3 hang and cciss problems.

From: Chris Wright <chrisw@sous-sol.org>
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
author cl349@firebug.cl.cam.ac.uk
date Thu Feb 16 11:25:36 2006 +0000 (2006-02-16)
parents 2494b4e00cbb
children b765c96f8127
line source
1 /*
2 * Dynamic DMA mapping support.
3 *
4 * On i386 there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
7 * in asm/pci.h.
8 */
10 #include <linux/types.h>
11 #include <linux/mm.h>
12 #include <linux/string.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/version.h>
16 #include <asm/io.h>
17 #include <xen/balloon.h>
18 #include <asm/tlbflush.h>
19 #include <asm-i386/mach-xen/asm/swiotlb.h>
20 #include <asm/bug.h>
22 #ifdef __x86_64__
23 int iommu_merge __read_mostly = 0;
24 EXPORT_SYMBOL(iommu_merge);
26 dma_addr_t bad_dma_address __read_mostly;
27 EXPORT_SYMBOL(bad_dma_address);
29 /* This tells the BIO block layer to assume merging. Default to off
30 because we cannot guarantee merging later. */
31 int iommu_bio_merge __read_mostly = 0;
32 EXPORT_SYMBOL(iommu_bio_merge);
34 __init int iommu_setup(char *p)
35 {
36 return 1;
37 }
38 #endif
40 struct dma_coherent_mem {
41 void *virt_base;
42 u32 device_base;
43 int size;
44 int flags;
45 unsigned long *bitmap;
46 };
48 #define IOMMU_BUG_ON(test) \
49 do { \
50 if (unlikely(test)) { \
51 printk(KERN_ALERT "Fatal DMA error! " \
52 "Please use 'swiotlb=force'\n"); \
53 BUG(); \
54 } \
55 } while (0)
57 int
58 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
59 enum dma_data_direction direction)
60 {
61 int i, rc;
63 if (direction == DMA_NONE)
64 BUG();
65 WARN_ON(nents == 0 || sg[0].length == 0);
67 if (swiotlb) {
68 rc = swiotlb_map_sg(hwdev, sg, nents, direction);
69 } else {
70 for (i = 0; i < nents; i++ ) {
71 sg[i].dma_address =
72 page_to_phys(sg[i].page) + sg[i].offset;
73 sg[i].dma_length = sg[i].length;
74 BUG_ON(!sg[i].page);
75 IOMMU_BUG_ON(address_needs_mapping(
76 hwdev, sg[i].dma_address));
77 }
78 rc = nents;
79 }
81 flush_write_buffers();
82 return rc;
83 }
84 EXPORT_SYMBOL(dma_map_sg);
86 void
87 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
88 enum dma_data_direction direction)
89 {
90 BUG_ON(direction == DMA_NONE);
91 if (swiotlb)
92 swiotlb_unmap_sg(hwdev, sg, nents, direction);
93 }
94 EXPORT_SYMBOL(dma_unmap_sg);
96 dma_addr_t
97 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
98 size_t size, enum dma_data_direction direction)
99 {
100 dma_addr_t dma_addr;
102 BUG_ON(direction == DMA_NONE);
104 if (swiotlb) {
105 dma_addr = swiotlb_map_page(
106 dev, page, offset, size, direction);
107 } else {
108 dma_addr = page_to_phys(page) + offset;
109 IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
110 }
112 return dma_addr;
113 }
114 EXPORT_SYMBOL(dma_map_page);
116 void
117 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
118 enum dma_data_direction direction)
119 {
120 BUG_ON(direction == DMA_NONE);
121 if (swiotlb)
122 swiotlb_unmap_page(dev, dma_address, size, direction);
123 }
124 EXPORT_SYMBOL(dma_unmap_page);
126 int
127 dma_mapping_error(dma_addr_t dma_addr)
128 {
129 if (swiotlb)
130 return swiotlb_dma_mapping_error(dma_addr);
131 return 0;
132 }
133 EXPORT_SYMBOL(dma_mapping_error);
135 int
136 dma_supported(struct device *dev, u64 mask)
137 {
138 if (swiotlb)
139 return swiotlb_dma_supported(dev, mask);
140 /*
141 * By default we'll BUG when an infeasible DMA is requested, and
142 * request swiotlb=force (see IOMMU_BUG_ON).
143 */
144 return 1;
145 }
146 EXPORT_SYMBOL(dma_supported);
148 void *dma_alloc_coherent(struct device *dev, size_t size,
149 dma_addr_t *dma_handle, gfp_t gfp)
150 {
151 void *ret;
152 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
153 unsigned int order = get_order(size);
154 unsigned long vstart;
155 /* ignore region specifiers */
156 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
158 if (mem) {
159 int page = bitmap_find_free_region(mem->bitmap, mem->size,
160 order);
161 if (page >= 0) {
162 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
163 ret = mem->virt_base + (page << PAGE_SHIFT);
164 memset(ret, 0, size);
165 return ret;
166 }
167 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
168 return NULL;
169 }
171 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
172 gfp |= GFP_DMA;
174 vstart = __get_free_pages(gfp, order);
175 ret = (void *)vstart;
177 if (ret != NULL) {
178 /* NB. Hardcode 31 address bits for now: aacraid limitation. */
179 if (xen_create_contiguous_region(vstart, order, 31) != 0) {
180 free_pages(vstart, order);
181 return NULL;
182 }
183 memset(ret, 0, size);
184 *dma_handle = virt_to_bus(ret);
185 }
186 return ret;
187 }
188 EXPORT_SYMBOL(dma_alloc_coherent);
190 void dma_free_coherent(struct device *dev, size_t size,
191 void *vaddr, dma_addr_t dma_handle)
192 {
193 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
194 int order = get_order(size);
196 if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
197 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
199 bitmap_release_region(mem->bitmap, page, order);
200 } else {
201 xen_destroy_contiguous_region((unsigned long)vaddr, order);
202 free_pages((unsigned long)vaddr, order);
203 }
204 }
205 EXPORT_SYMBOL(dma_free_coherent);
207 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
208 dma_addr_t device_addr, size_t size, int flags)
209 {
210 void __iomem *mem_base;
211 int pages = size >> PAGE_SHIFT;
212 int bitmap_size = (pages + 31)/32;
214 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
215 goto out;
216 if (!size)
217 goto out;
218 if (dev->dma_mem)
219 goto out;
221 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
223 mem_base = ioremap(bus_addr, size);
224 if (!mem_base)
225 goto out;
227 dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
228 if (!dev->dma_mem)
229 goto out;
230 memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
231 dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
232 if (!dev->dma_mem->bitmap)
233 goto free1_out;
234 memset(dev->dma_mem->bitmap, 0, bitmap_size);
236 dev->dma_mem->virt_base = mem_base;
237 dev->dma_mem->device_base = device_addr;
238 dev->dma_mem->size = pages;
239 dev->dma_mem->flags = flags;
241 if (flags & DMA_MEMORY_MAP)
242 return DMA_MEMORY_MAP;
244 return DMA_MEMORY_IO;
246 free1_out:
247 kfree(dev->dma_mem->bitmap);
248 out:
249 return 0;
250 }
251 EXPORT_SYMBOL(dma_declare_coherent_memory);
253 void dma_release_declared_memory(struct device *dev)
254 {
255 struct dma_coherent_mem *mem = dev->dma_mem;
257 if(!mem)
258 return;
259 dev->dma_mem = NULL;
260 iounmap(mem->virt_base);
261 kfree(mem->bitmap);
262 kfree(mem);
263 }
264 EXPORT_SYMBOL(dma_release_declared_memory);
266 void *dma_mark_declared_memory_occupied(struct device *dev,
267 dma_addr_t device_addr, size_t size)
268 {
269 struct dma_coherent_mem *mem = dev->dma_mem;
270 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
271 int pos, err;
273 if (!mem)
274 return ERR_PTR(-EINVAL);
276 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
277 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
278 if (err != 0)
279 return ERR_PTR(err);
280 return mem->virt_base + (pos << PAGE_SHIFT);
281 }
282 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
284 dma_addr_t
285 dma_map_single(struct device *dev, void *ptr, size_t size,
286 enum dma_data_direction direction)
287 {
288 dma_addr_t dma;
290 if (direction == DMA_NONE)
291 BUG();
292 WARN_ON(size == 0);
294 if (swiotlb) {
295 dma = swiotlb_map_single(dev, ptr, size, direction);
296 } else {
297 dma = virt_to_bus(ptr);
298 IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
299 IOMMU_BUG_ON(address_needs_mapping(dev, dma));
300 }
302 flush_write_buffers();
303 return dma;
304 }
305 EXPORT_SYMBOL(dma_map_single);
307 void
308 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
309 enum dma_data_direction direction)
310 {
311 if (direction == DMA_NONE)
312 BUG();
313 if (swiotlb)
314 swiotlb_unmap_single(dev, dma_addr, size, direction);
315 }
316 EXPORT_SYMBOL(dma_unmap_single);
318 void
319 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
320 enum dma_data_direction direction)
321 {
322 if (swiotlb)
323 swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
324 }
325 EXPORT_SYMBOL(dma_sync_single_for_cpu);
327 void
328 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
329 enum dma_data_direction direction)
330 {
331 if (swiotlb)
332 swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
333 }
334 EXPORT_SYMBOL(dma_sync_single_for_device);
336 /*
337 * Local variables:
338 * c-file-style: "linux"
339 * indent-tabs-mode: t
340 * c-indent-level: 8
341 * c-basic-offset: 8
342 * tab-width: 8
343 * End:
344 */