ia64/xen-unstable

view linux-2.6-xen-sparse/arch/xen/i386/kernel/pci-dma.c @ 6234:d4f6247b2a1b

Formatting cleanup.
author kaf24@firebug.cl.cam.ac.uk
date Wed Aug 17 15:34:58 2005 +0000 (2005-08-17)
parents 7be2d754647d
children 11f556cac45b 23979fb12c49 84ee014ebd41 99914b54f7bf
line source
1 /*
2 * Dynamic DMA mapping support.
3 *
4 * On i386 there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
7 * in asm/pci.h.
8 */
10 #include <linux/types.h>
11 #include <linux/mm.h>
12 #include <linux/string.h>
13 #include <linux/pci.h>
14 #include <linux/version.h>
15 #include <asm/io.h>
16 #include <asm-xen/balloon.h>
17 #include <asm/tlbflush.h>
19 struct dma_coherent_mem {
20 void *virt_base;
21 u32 device_base;
22 int size;
23 int flags;
24 unsigned long *bitmap;
25 };
27 #define IOMMU_BUG_ON(test) \
28 do { \
29 if (unlikely(test)) { \
30 printk(KERN_ALERT "Fatal DMA error! " \
31 "Please use 'swiotlb=force'\n"); \
32 BUG(); \
33 } \
34 } while (0)
36 int
37 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
38 enum dma_data_direction direction)
39 {
40 int i, rc;
42 BUG_ON(direction == DMA_NONE);
44 if (swiotlb) {
45 rc = swiotlb_map_sg(hwdev, sg, nents, direction);
46 } else {
47 for (i = 0; i < nents; i++ ) {
48 sg[i].dma_address =
49 page_to_phys(sg[i].page) + sg[i].offset;
50 sg[i].dma_length = sg[i].length;
51 BUG_ON(!sg[i].page);
52 IOMMU_BUG_ON(address_needs_mapping(
53 hwdev, sg[i].dma_address));
54 }
55 rc = nents;
56 }
58 flush_write_buffers();
59 return rc;
60 }
61 EXPORT_SYMBOL(dma_map_sg);
63 void
64 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
65 enum dma_data_direction direction)
66 {
67 BUG_ON(direction == DMA_NONE);
68 if (swiotlb)
69 swiotlb_unmap_sg(hwdev, sg, nents, direction);
70 }
71 EXPORT_SYMBOL(dma_unmap_sg);
73 dma_addr_t
74 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
75 size_t size, enum dma_data_direction direction)
76 {
77 dma_addr_t dma_addr;
79 BUG_ON(direction == DMA_NONE);
81 if (swiotlb) {
82 dma_addr = swiotlb_map_page(
83 dev, page, offset, size, direction);
84 } else {
85 dma_addr = page_to_phys(page) + offset;
86 IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
87 }
89 return dma_addr;
90 }
91 EXPORT_SYMBOL(dma_map_page);
93 void
94 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
95 enum dma_data_direction direction)
96 {
97 BUG_ON(direction == DMA_NONE);
98 if (swiotlb)
99 swiotlb_unmap_page(dev, dma_address, size, direction);
100 }
101 EXPORT_SYMBOL(dma_unmap_page);
103 int
104 dma_mapping_error(dma_addr_t dma_addr)
105 {
106 if (swiotlb)
107 return swiotlb_dma_mapping_error(dma_addr);
108 return 0;
109 }
110 EXPORT_SYMBOL(dma_mapping_error);
112 int
113 dma_supported(struct device *dev, u64 mask)
114 {
115 if (swiotlb)
116 return swiotlb_dma_supported(dev, mask);
117 /*
118 * By default we'll BUG when an infeasible DMA is requested, and
119 * request swiotlb=force (see IOMMU_BUG_ON).
120 */
121 return 1;
122 }
123 EXPORT_SYMBOL(dma_supported);
125 void *dma_alloc_coherent(struct device *dev, size_t size,
126 dma_addr_t *dma_handle, unsigned int __nocast gfp)
127 {
128 void *ret;
129 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
130 unsigned int order = get_order(size);
131 unsigned long vstart;
132 /* ignore region specifiers */
133 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
135 if (mem) {
136 int page = bitmap_find_free_region(mem->bitmap, mem->size,
137 order);
138 if (page >= 0) {
139 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
140 ret = mem->virt_base + (page << PAGE_SHIFT);
141 memset(ret, 0, size);
142 return ret;
143 }
144 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
145 return NULL;
146 }
148 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
149 gfp |= GFP_DMA;
151 vstart = __get_free_pages(gfp, order);
152 ret = (void *)vstart;
154 if (ret != NULL) {
155 xen_create_contiguous_region(vstart, order);
157 memset(ret, 0, size);
158 *dma_handle = virt_to_bus(ret);
159 }
160 return ret;
161 }
162 EXPORT_SYMBOL(dma_alloc_coherent);
164 void dma_free_coherent(struct device *dev, size_t size,
165 void *vaddr, dma_addr_t dma_handle)
166 {
167 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
168 int order = get_order(size);
170 if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
171 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
173 bitmap_release_region(mem->bitmap, page, order);
174 } else {
175 xen_destroy_contiguous_region((unsigned long)vaddr, order);
176 free_pages((unsigned long)vaddr, order);
177 }
178 }
179 EXPORT_SYMBOL(dma_free_coherent);
181 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
182 dma_addr_t device_addr, size_t size, int flags)
183 {
184 void __iomem *mem_base;
185 int pages = size >> PAGE_SHIFT;
186 int bitmap_size = (pages + 31)/32;
188 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
189 goto out;
190 if (!size)
191 goto out;
192 if (dev->dma_mem)
193 goto out;
195 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
197 mem_base = ioremap(bus_addr, size);
198 if (!mem_base)
199 goto out;
201 dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
202 if (!dev->dma_mem)
203 goto out;
204 memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
205 dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
206 if (!dev->dma_mem->bitmap)
207 goto free1_out;
208 memset(dev->dma_mem->bitmap, 0, bitmap_size);
210 dev->dma_mem->virt_base = mem_base;
211 dev->dma_mem->device_base = device_addr;
212 dev->dma_mem->size = pages;
213 dev->dma_mem->flags = flags;
215 if (flags & DMA_MEMORY_MAP)
216 return DMA_MEMORY_MAP;
218 return DMA_MEMORY_IO;
220 free1_out:
221 kfree(dev->dma_mem->bitmap);
222 out:
223 return 0;
224 }
225 EXPORT_SYMBOL(dma_declare_coherent_memory);
227 void dma_release_declared_memory(struct device *dev)
228 {
229 struct dma_coherent_mem *mem = dev->dma_mem;
231 if(!mem)
232 return;
233 dev->dma_mem = NULL;
234 iounmap(mem->virt_base);
235 kfree(mem->bitmap);
236 kfree(mem);
237 }
238 EXPORT_SYMBOL(dma_release_declared_memory);
240 void *dma_mark_declared_memory_occupied(struct device *dev,
241 dma_addr_t device_addr, size_t size)
242 {
243 struct dma_coherent_mem *mem = dev->dma_mem;
244 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
245 int pos, err;
247 if (!mem)
248 return ERR_PTR(-EINVAL);
250 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
251 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
252 if (err != 0)
253 return ERR_PTR(err);
254 return mem->virt_base + (pos << PAGE_SHIFT);
255 }
256 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
258 dma_addr_t
259 dma_map_single(struct device *dev, void *ptr, size_t size,
260 enum dma_data_direction direction)
261 {
262 dma_addr_t dma;
264 BUG_ON(direction == DMA_NONE);
266 if (swiotlb) {
267 dma = swiotlb_map_single(dev, ptr, size, direction);
268 } else {
269 dma = virt_to_bus(ptr);
270 IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
271 IOMMU_BUG_ON(address_needs_mapping(dev, dma));
272 }
274 flush_write_buffers();
275 return dma;
276 }
277 EXPORT_SYMBOL(dma_map_single);
279 void
280 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
281 enum dma_data_direction direction)
282 {
283 BUG_ON(direction == DMA_NONE);
284 if (swiotlb)
285 swiotlb_unmap_single(dev, dma_addr, size, direction);
286 }
287 EXPORT_SYMBOL(dma_unmap_single);
289 void
290 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
291 enum dma_data_direction direction)
292 {
293 if (swiotlb)
294 swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
295 }
296 EXPORT_SYMBOL(dma_sync_single_for_cpu);
298 void
299 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
300 enum dma_data_direction direction)
301 {
302 if (swiotlb)
303 swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
304 }
305 EXPORT_SYMBOL(dma_sync_single_for_device);
307 /*
308 * Local variables:
309 * c-file-style: "linux"
310 * indent-tabs-mode: t
311 * c-indent-level: 8
312 * c-basic-offset: 8
313 * tab-width: 8
314 * End:
315 */