ia64/xen-unstable

view linux-2.6-xen-sparse/arch/xen/i386/kernel/pci-dma.c @ 6233:7be2d754647d

have a useful line reporting when IOMMU_BUG_ON trigger

Signed-off-by: Vincent Hanquez <vincent@xensource.com>
author vh249@arcadians.cl.cam.ac.uk
date Wed Aug 17 15:09:31 2005 +0000 (2005-08-17)
parents 3d187585c141
children d4f6247b2a1b
line source
1 /*
2 * Dynamic DMA mapping support.
3 *
4 * On i386 there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
7 * in asm/pci.h.
8 */
10 #include <linux/types.h>
11 #include <linux/mm.h>
12 #include <linux/string.h>
13 #include <linux/pci.h>
14 #include <linux/version.h>
15 #include <asm/io.h>
16 #include <asm-xen/balloon.h>
17 #include <asm/tlbflush.h>
19 struct dma_coherent_mem {
20 void *virt_base;
21 u32 device_base;
22 int size;
23 int flags;
24 unsigned long *bitmap;
25 };
27 #define IOMMU_BUG_ON(test) \
28 do { \
29 if (unlikely(test)) { \
30 printk(KERN_ALERT "Fatal DMA error! Please use 'swiotlb=force'\n"); \
31 BUG(); \
32 } \
33 } while(0)
35 int
36 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
37 enum dma_data_direction direction)
38 {
39 int i, rc;
41 BUG_ON(direction == DMA_NONE);
43 if (swiotlb) {
44 rc = swiotlb_map_sg(hwdev, sg, nents, direction);
45 } else {
46 for (i = 0; i < nents; i++ ) {
47 sg[i].dma_address =
48 page_to_phys(sg[i].page) + sg[i].offset;
49 sg[i].dma_length = sg[i].length;
50 BUG_ON(!sg[i].page);
51 IOMMU_BUG_ON(address_needs_mapping(
52 hwdev, sg[i].dma_address));
53 }
54 rc = nents;
55 }
57 flush_write_buffers();
58 return rc;
59 }
60 EXPORT_SYMBOL(dma_map_sg);
62 void
63 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
64 enum dma_data_direction direction)
65 {
66 BUG_ON(direction == DMA_NONE);
67 if (swiotlb)
68 swiotlb_unmap_sg(hwdev, sg, nents, direction);
69 }
70 EXPORT_SYMBOL(dma_unmap_sg);
72 dma_addr_t
73 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
74 size_t size, enum dma_data_direction direction)
75 {
76 dma_addr_t dma_addr;
78 BUG_ON(direction == DMA_NONE);
80 if (swiotlb) {
81 dma_addr = swiotlb_map_page(
82 dev, page, offset, size, direction);
83 } else {
84 dma_addr = page_to_phys(page) + offset;
85 IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
86 }
88 return dma_addr;
89 }
90 EXPORT_SYMBOL(dma_map_page);
92 void
93 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
94 enum dma_data_direction direction)
95 {
96 BUG_ON(direction == DMA_NONE);
97 if (swiotlb)
98 swiotlb_unmap_page(dev, dma_address, size, direction);
99 }
100 EXPORT_SYMBOL(dma_unmap_page);
102 int
103 dma_mapping_error(dma_addr_t dma_addr)
104 {
105 if (swiotlb)
106 return swiotlb_dma_mapping_error(dma_addr);
107 return 0;
108 }
109 EXPORT_SYMBOL(dma_mapping_error);
111 int
112 dma_supported(struct device *dev, u64 mask)
113 {
114 if (swiotlb)
115 return swiotlb_dma_supported(dev, mask);
116 /*
117 * By default we'll BUG when an infeasible DMA is requested, and
118 * request swiotlb=force (see IOMMU_BUG_ON).
119 */
120 return 1;
121 }
122 EXPORT_SYMBOL(dma_supported);
124 void *dma_alloc_coherent(struct device *dev, size_t size,
125 dma_addr_t *dma_handle, unsigned int __nocast gfp)
126 {
127 void *ret;
128 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
129 unsigned int order = get_order(size);
130 unsigned long vstart;
131 /* ignore region specifiers */
132 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
134 if (mem) {
135 int page = bitmap_find_free_region(mem->bitmap, mem->size,
136 order);
137 if (page >= 0) {
138 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
139 ret = mem->virt_base + (page << PAGE_SHIFT);
140 memset(ret, 0, size);
141 return ret;
142 }
143 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
144 return NULL;
145 }
147 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
148 gfp |= GFP_DMA;
150 vstart = __get_free_pages(gfp, order);
151 ret = (void *)vstart;
153 if (ret != NULL) {
154 xen_create_contiguous_region(vstart, order);
156 memset(ret, 0, size);
157 *dma_handle = virt_to_bus(ret);
158 }
159 return ret;
160 }
161 EXPORT_SYMBOL(dma_alloc_coherent);
163 void dma_free_coherent(struct device *dev, size_t size,
164 void *vaddr, dma_addr_t dma_handle)
165 {
166 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
167 int order = get_order(size);
169 if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
170 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
172 bitmap_release_region(mem->bitmap, page, order);
173 } else {
174 xen_destroy_contiguous_region((unsigned long)vaddr, order);
175 free_pages((unsigned long)vaddr, order);
176 }
177 }
178 EXPORT_SYMBOL(dma_free_coherent);
180 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
181 dma_addr_t device_addr, size_t size, int flags)
182 {
183 void __iomem *mem_base;
184 int pages = size >> PAGE_SHIFT;
185 int bitmap_size = (pages + 31)/32;
187 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
188 goto out;
189 if (!size)
190 goto out;
191 if (dev->dma_mem)
192 goto out;
194 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
196 mem_base = ioremap(bus_addr, size);
197 if (!mem_base)
198 goto out;
200 dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
201 if (!dev->dma_mem)
202 goto out;
203 memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
204 dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
205 if (!dev->dma_mem->bitmap)
206 goto free1_out;
207 memset(dev->dma_mem->bitmap, 0, bitmap_size);
209 dev->dma_mem->virt_base = mem_base;
210 dev->dma_mem->device_base = device_addr;
211 dev->dma_mem->size = pages;
212 dev->dma_mem->flags = flags;
214 if (flags & DMA_MEMORY_MAP)
215 return DMA_MEMORY_MAP;
217 return DMA_MEMORY_IO;
219 free1_out:
220 kfree(dev->dma_mem->bitmap);
221 out:
222 return 0;
223 }
224 EXPORT_SYMBOL(dma_declare_coherent_memory);
226 void dma_release_declared_memory(struct device *dev)
227 {
228 struct dma_coherent_mem *mem = dev->dma_mem;
230 if(!mem)
231 return;
232 dev->dma_mem = NULL;
233 iounmap(mem->virt_base);
234 kfree(mem->bitmap);
235 kfree(mem);
236 }
237 EXPORT_SYMBOL(dma_release_declared_memory);
239 void *dma_mark_declared_memory_occupied(struct device *dev,
240 dma_addr_t device_addr, size_t size)
241 {
242 struct dma_coherent_mem *mem = dev->dma_mem;
243 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
244 int pos, err;
246 if (!mem)
247 return ERR_PTR(-EINVAL);
249 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
250 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
251 if (err != 0)
252 return ERR_PTR(err);
253 return mem->virt_base + (pos << PAGE_SHIFT);
254 }
255 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
257 dma_addr_t
258 dma_map_single(struct device *dev, void *ptr, size_t size,
259 enum dma_data_direction direction)
260 {
261 dma_addr_t dma;
263 BUG_ON(direction == DMA_NONE);
265 if (swiotlb) {
266 dma = swiotlb_map_single(dev, ptr, size, direction);
267 } else {
268 dma = virt_to_bus(ptr);
269 IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
270 IOMMU_BUG_ON(address_needs_mapping(dev, dma));
271 }
273 flush_write_buffers();
274 return dma;
275 }
276 EXPORT_SYMBOL(dma_map_single);
278 void
279 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
280 enum dma_data_direction direction)
281 {
282 BUG_ON(direction == DMA_NONE);
283 if (swiotlb)
284 swiotlb_unmap_single(dev, dma_addr, size, direction);
285 }
286 EXPORT_SYMBOL(dma_unmap_single);
288 void
289 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
290 enum dma_data_direction direction)
291 {
292 if (swiotlb)
293 swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
294 }
295 EXPORT_SYMBOL(dma_sync_single_for_cpu);
297 void
298 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
299 enum dma_data_direction direction)
300 {
301 if (swiotlb)
302 swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
303 }
304 EXPORT_SYMBOL(dma_sync_single_for_device);
306 /*
307 * Local variables:
308 * c-file-style: "linux"
309 * indent-tabs-mode: t
310 * c-indent-level: 8
311 * c-basic-offset: 8
312 * tab-width: 8
313 * End:
314 */