ia64/linux-2.6.18-xen.hg

view arch/i386/kernel/pci-dma-xen.c @ 708:e410857fd83c

Remove contiguous_bitmap[] as it's no longer needed.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Oct 22 14:55:29 2008 +0100 (2008-10-22)
parents 5486a234923d
children
line source
1 /*
2 * Dynamic DMA mapping support.
3 *
4 * On i386 there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
7 * in asm/pci.h.
8 */
10 #include <linux/types.h>
11 #include <linux/mm.h>
12 #include <linux/string.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/version.h>
16 #include <asm/io.h>
17 #include <xen/balloon.h>
18 #include <xen/gnttab.h>
19 #include <asm/swiotlb.h>
20 #include <asm/tlbflush.h>
21 #include <asm-i386/mach-xen/asm/swiotlb.h>
22 #include <asm-i386/mach-xen/asm/gnttab_dma.h>
23 #include <asm/bug.h>
25 #ifdef __x86_64__
26 #include <asm/proto.h>
28 int iommu_merge __read_mostly = 0;
29 EXPORT_SYMBOL(iommu_merge);
31 dma_addr_t bad_dma_address __read_mostly;
32 EXPORT_SYMBOL(bad_dma_address);
34 /* This tells the BIO block layer to assume merging. Default to off
35 because we cannot guarantee merging later. */
36 int iommu_bio_merge __read_mostly = 0;
37 EXPORT_SYMBOL(iommu_bio_merge);
39 int force_iommu __read_mostly= 0;
41 __init int iommu_setup(char *p)
42 {
43 return 1;
44 }
46 void __init pci_iommu_alloc(void)
47 {
48 #ifdef CONFIG_SWIOTLB
49 pci_swiotlb_init();
50 #endif
51 }
53 static int __init pci_iommu_init(void)
54 {
55 no_iommu_init();
56 return 0;
57 }
59 /* Must execute after PCI subsystem */
60 fs_initcall(pci_iommu_init);
61 #endif
63 struct dma_coherent_mem {
64 void *virt_base;
65 u32 device_base;
66 int size;
67 int flags;
68 unsigned long *bitmap;
69 };
71 #define IOMMU_BUG_ON(test) \
72 do { \
73 if (unlikely(test)) { \
74 printk(KERN_ALERT "Fatal DMA error! " \
75 "Please use 'swiotlb=force'\n"); \
76 BUG(); \
77 } \
78 } while (0)
80 static int check_pages_physically_contiguous(unsigned long pfn,
81 unsigned int offset,
82 size_t length)
83 {
84 unsigned long next_mfn;
85 int i;
86 int nr_pages;
88 next_mfn = pfn_to_mfn(pfn);
89 nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
91 for (i = 1; i < nr_pages; i++) {
92 if (pfn_to_mfn(++pfn) != ++next_mfn)
93 return 0;
94 }
95 return 1;
96 }
98 int range_straddles_page_boundary(paddr_t p, size_t size)
99 {
100 unsigned long pfn = p >> PAGE_SHIFT;
101 unsigned int offset = p & ~PAGE_MASK;
103 return ((offset + size > PAGE_SIZE) &&
104 !check_pages_physically_contiguous(pfn, offset, size));
105 }
107 int
108 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
109 enum dma_data_direction direction)
110 {
111 int i, rc;
113 if (direction == DMA_NONE)
114 BUG();
115 WARN_ON(nents == 0 || sg[0].length == 0);
117 if (swiotlb) {
118 rc = swiotlb_map_sg(hwdev, sg, nents, direction);
119 } else {
120 for (i = 0; i < nents; i++ ) {
121 BUG_ON(!sg[i].page);
122 sg[i].dma_address =
123 gnttab_dma_map_page(sg[i].page) + sg[i].offset;
124 sg[i].dma_length = sg[i].length;
125 IOMMU_BUG_ON(address_needs_mapping(
126 hwdev, sg[i].dma_address));
127 IOMMU_BUG_ON(range_straddles_page_boundary(
128 page_to_pseudophys(sg[i].page) + sg[i].offset,
129 sg[i].length));
130 }
131 rc = nents;
132 }
134 flush_write_buffers();
135 return rc;
136 }
137 EXPORT_SYMBOL(dma_map_sg);
139 void
140 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
141 enum dma_data_direction direction)
142 {
143 int i;
145 BUG_ON(direction == DMA_NONE);
146 if (swiotlb)
147 swiotlb_unmap_sg(hwdev, sg, nents, direction);
148 else {
149 for (i = 0; i < nents; i++ )
150 gnttab_dma_unmap_page(sg[i].dma_address);
151 }
152 }
153 EXPORT_SYMBOL(dma_unmap_sg);
155 #ifdef CONFIG_HIGHMEM
156 dma_addr_t
157 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
158 size_t size, enum dma_data_direction direction)
159 {
160 dma_addr_t dma_addr;
162 BUG_ON(direction == DMA_NONE);
164 if (swiotlb) {
165 dma_addr = swiotlb_map_page(
166 dev, page, offset, size, direction);
167 } else {
168 dma_addr = gnttab_dma_map_page(page) + offset;
169 IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
170 }
172 return dma_addr;
173 }
174 EXPORT_SYMBOL(dma_map_page);
176 void
177 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
178 enum dma_data_direction direction)
179 {
180 BUG_ON(direction == DMA_NONE);
181 if (swiotlb)
182 swiotlb_unmap_page(dev, dma_address, size, direction);
183 else
184 gnttab_dma_unmap_page(dma_address);
185 }
186 EXPORT_SYMBOL(dma_unmap_page);
187 #endif /* CONFIG_HIGHMEM */
189 int
190 dma_mapping_error(dma_addr_t dma_addr)
191 {
192 if (swiotlb)
193 return swiotlb_dma_mapping_error(dma_addr);
194 return 0;
195 }
196 EXPORT_SYMBOL(dma_mapping_error);
198 int
199 dma_supported(struct device *dev, u64 mask)
200 {
201 if (swiotlb)
202 return swiotlb_dma_supported(dev, mask);
203 /*
204 * By default we'll BUG when an infeasible DMA is requested, and
205 * request swiotlb=force (see IOMMU_BUG_ON).
206 */
207 return 1;
208 }
209 EXPORT_SYMBOL(dma_supported);
211 void *dma_alloc_coherent(struct device *dev, size_t size,
212 dma_addr_t *dma_handle, gfp_t gfp)
213 {
214 void *ret;
215 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
216 unsigned int order = get_order(size);
217 unsigned long vstart;
218 u64 mask;
220 /* ignore region specifiers */
221 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
223 if (mem) {
224 int page = bitmap_find_free_region(mem->bitmap, mem->size,
225 order);
226 if (page >= 0) {
227 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
228 ret = mem->virt_base + (page << PAGE_SHIFT);
229 memset(ret, 0, size);
230 return ret;
231 }
232 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
233 return NULL;
234 }
236 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
237 gfp |= GFP_DMA;
239 vstart = __get_free_pages(gfp, order);
240 ret = (void *)vstart;
242 if (dev != NULL && dev->coherent_dma_mask)
243 mask = dev->coherent_dma_mask;
244 else
245 mask = 0xffffffff;
247 if (ret != NULL) {
248 if (xen_create_contiguous_region(vstart, order,
249 fls64(mask)) != 0) {
250 free_pages(vstart, order);
251 return NULL;
252 }
253 memset(ret, 0, size);
254 *dma_handle = virt_to_bus(ret);
255 }
256 return ret;
257 }
258 EXPORT_SYMBOL(dma_alloc_coherent);
260 void dma_free_coherent(struct device *dev, size_t size,
261 void *vaddr, dma_addr_t dma_handle)
262 {
263 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
264 int order = get_order(size);
266 if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
267 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
269 bitmap_release_region(mem->bitmap, page, order);
270 } else {
271 xen_destroy_contiguous_region((unsigned long)vaddr, order);
272 free_pages((unsigned long)vaddr, order);
273 }
274 }
275 EXPORT_SYMBOL(dma_free_coherent);
277 #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
278 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
279 dma_addr_t device_addr, size_t size, int flags)
280 {
281 void __iomem *mem_base;
282 int pages = size >> PAGE_SHIFT;
283 int bitmap_size = (pages + 31)/32;
285 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
286 goto out;
287 if (!size)
288 goto out;
289 if (dev->dma_mem)
290 goto out;
292 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
294 mem_base = ioremap(bus_addr, size);
295 if (!mem_base)
296 goto out;
298 dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
299 if (!dev->dma_mem)
300 goto out;
301 memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
302 dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
303 if (!dev->dma_mem->bitmap)
304 goto free1_out;
305 memset(dev->dma_mem->bitmap, 0, bitmap_size);
307 dev->dma_mem->virt_base = mem_base;
308 dev->dma_mem->device_base = device_addr;
309 dev->dma_mem->size = pages;
310 dev->dma_mem->flags = flags;
312 if (flags & DMA_MEMORY_MAP)
313 return DMA_MEMORY_MAP;
315 return DMA_MEMORY_IO;
317 free1_out:
318 kfree(dev->dma_mem->bitmap);
319 out:
320 return 0;
321 }
322 EXPORT_SYMBOL(dma_declare_coherent_memory);
324 void dma_release_declared_memory(struct device *dev)
325 {
326 struct dma_coherent_mem *mem = dev->dma_mem;
328 if(!mem)
329 return;
330 dev->dma_mem = NULL;
331 iounmap(mem->virt_base);
332 kfree(mem->bitmap);
333 kfree(mem);
334 }
335 EXPORT_SYMBOL(dma_release_declared_memory);
337 void *dma_mark_declared_memory_occupied(struct device *dev,
338 dma_addr_t device_addr, size_t size)
339 {
340 struct dma_coherent_mem *mem = dev->dma_mem;
341 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
342 int pos, err;
344 if (!mem)
345 return ERR_PTR(-EINVAL);
347 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
348 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
349 if (err != 0)
350 return ERR_PTR(err);
351 return mem->virt_base + (pos << PAGE_SHIFT);
352 }
353 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
354 #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
356 dma_addr_t
357 dma_map_single(struct device *dev, void *ptr, size_t size,
358 enum dma_data_direction direction)
359 {
360 dma_addr_t dma;
362 if (direction == DMA_NONE)
363 BUG();
364 WARN_ON(size == 0);
366 if (swiotlb) {
367 dma = swiotlb_map_single(dev, ptr, size, direction);
368 } else {
369 dma = gnttab_dma_map_page(virt_to_page(ptr)) +
370 offset_in_page(ptr);
371 IOMMU_BUG_ON(range_straddles_page_boundary(__pa(ptr), size));
372 IOMMU_BUG_ON(address_needs_mapping(dev, dma));
373 }
375 flush_write_buffers();
376 return dma;
377 }
378 EXPORT_SYMBOL(dma_map_single);
380 void
381 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
382 enum dma_data_direction direction)
383 {
384 if (direction == DMA_NONE)
385 BUG();
386 if (swiotlb)
387 swiotlb_unmap_single(dev, dma_addr, size, direction);
388 else
389 gnttab_dma_unmap_page(dma_addr);
390 }
391 EXPORT_SYMBOL(dma_unmap_single);
393 void
394 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
395 enum dma_data_direction direction)
396 {
397 if (swiotlb)
398 swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
399 }
400 EXPORT_SYMBOL(dma_sync_single_for_cpu);
402 void
403 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
404 enum dma_data_direction direction)
405 {
406 if (swiotlb)
407 swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
408 }
409 EXPORT_SYMBOL(dma_sync_single_for_device);