ia64/xen-unstable

view linux-2.6-xen-sparse/arch/xen/i386/kernel/pci-dma.c @ 7850:54a05cd0d060

Add definitions of some swiotlb symbols to pci-dma.c.

Signed-off-by: Vincent Hanquez <vincent@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Nov 16 11:34:54 2005 +0100 (2005-11-16)
parents 356c175366a1
children
line source
1 /*
2 * Dynamic DMA mapping support.
3 *
4 * On i386 there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
7 * in asm/pci.h.
8 */
10 #include <linux/types.h>
11 #include <linux/mm.h>
12 #include <linux/string.h>
13 #include <linux/pci.h>
14 #include <linux/version.h>
15 #include <asm/io.h>
16 #include <asm-xen/balloon.h>
17 #include <asm/tlbflush.h>
18 #include <asm/swiotlb.h>
20 struct dma_coherent_mem {
21 void *virt_base;
22 u32 device_base;
23 int size;
24 int flags;
25 unsigned long *bitmap;
26 };
28 #define IOMMU_BUG_ON(test) \
29 do { \
30 if (unlikely(test)) { \
31 printk(KERN_ALERT "Fatal DMA error! " \
32 "Please use 'swiotlb=force'\n"); \
33 BUG(); \
34 } \
35 } while (0)
37 int
38 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
39 enum dma_data_direction direction)
40 {
41 int i, rc;
43 BUG_ON(direction == DMA_NONE);
45 if (swiotlb) {
46 rc = swiotlb_map_sg(hwdev, sg, nents, direction);
47 } else {
48 for (i = 0; i < nents; i++ ) {
49 sg[i].dma_address =
50 page_to_phys(sg[i].page) + sg[i].offset;
51 sg[i].dma_length = sg[i].length;
52 BUG_ON(!sg[i].page);
53 IOMMU_BUG_ON(address_needs_mapping(
54 hwdev, sg[i].dma_address));
55 }
56 rc = nents;
57 }
59 flush_write_buffers();
60 return rc;
61 }
62 EXPORT_SYMBOL(dma_map_sg);
64 void
65 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
66 enum dma_data_direction direction)
67 {
68 BUG_ON(direction == DMA_NONE);
69 if (swiotlb)
70 swiotlb_unmap_sg(hwdev, sg, nents, direction);
71 }
72 EXPORT_SYMBOL(dma_unmap_sg);
74 dma_addr_t
75 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
76 size_t size, enum dma_data_direction direction)
77 {
78 dma_addr_t dma_addr;
80 BUG_ON(direction == DMA_NONE);
82 if (swiotlb) {
83 dma_addr = swiotlb_map_page(
84 dev, page, offset, size, direction);
85 } else {
86 dma_addr = page_to_phys(page) + offset;
87 IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
88 }
90 return dma_addr;
91 }
92 EXPORT_SYMBOL(dma_map_page);
94 void
95 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
96 enum dma_data_direction direction)
97 {
98 BUG_ON(direction == DMA_NONE);
99 if (swiotlb)
100 swiotlb_unmap_page(dev, dma_address, size, direction);
101 }
102 EXPORT_SYMBOL(dma_unmap_page);
104 int
105 dma_mapping_error(dma_addr_t dma_addr)
106 {
107 if (swiotlb)
108 return swiotlb_dma_mapping_error(dma_addr);
109 return 0;
110 }
111 EXPORT_SYMBOL(dma_mapping_error);
113 int
114 dma_supported(struct device *dev, u64 mask)
115 {
116 if (swiotlb)
117 return swiotlb_dma_supported(dev, mask);
118 /*
119 * By default we'll BUG when an infeasible DMA is requested, and
120 * request swiotlb=force (see IOMMU_BUG_ON).
121 */
122 return 1;
123 }
124 EXPORT_SYMBOL(dma_supported);
126 void *dma_alloc_coherent(struct device *dev, size_t size,
127 dma_addr_t *dma_handle, unsigned int __nocast gfp)
128 {
129 void *ret;
130 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
131 unsigned int order = get_order(size);
132 unsigned long vstart;
133 /* ignore region specifiers */
134 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
136 if (mem) {
137 int page = bitmap_find_free_region(mem->bitmap, mem->size,
138 order);
139 if (page >= 0) {
140 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
141 ret = mem->virt_base + (page << PAGE_SHIFT);
142 memset(ret, 0, size);
143 return ret;
144 }
145 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
146 return NULL;
147 }
149 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
150 gfp |= GFP_DMA;
152 vstart = __get_free_pages(gfp, order);
153 ret = (void *)vstart;
155 if (ret != NULL) {
156 /* NB. Hardcode 31 address bits for now: aacraid limitation. */
157 if (xen_create_contiguous_region(vstart, order, 31) != 0) {
158 free_pages(vstart, order);
159 return NULL;
160 }
161 memset(ret, 0, size);
162 *dma_handle = virt_to_bus(ret);
163 }
164 return ret;
165 }
166 EXPORT_SYMBOL(dma_alloc_coherent);
168 void dma_free_coherent(struct device *dev, size_t size,
169 void *vaddr, dma_addr_t dma_handle)
170 {
171 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
172 int order = get_order(size);
174 if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
175 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
177 bitmap_release_region(mem->bitmap, page, order);
178 } else {
179 xen_destroy_contiguous_region((unsigned long)vaddr, order);
180 free_pages((unsigned long)vaddr, order);
181 }
182 }
183 EXPORT_SYMBOL(dma_free_coherent);
185 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
186 dma_addr_t device_addr, size_t size, int flags)
187 {
188 void __iomem *mem_base;
189 int pages = size >> PAGE_SHIFT;
190 int bitmap_size = (pages + 31)/32;
192 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
193 goto out;
194 if (!size)
195 goto out;
196 if (dev->dma_mem)
197 goto out;
199 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
201 mem_base = ioremap(bus_addr, size);
202 if (!mem_base)
203 goto out;
205 dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
206 if (!dev->dma_mem)
207 goto out;
208 memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
209 dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
210 if (!dev->dma_mem->bitmap)
211 goto free1_out;
212 memset(dev->dma_mem->bitmap, 0, bitmap_size);
214 dev->dma_mem->virt_base = mem_base;
215 dev->dma_mem->device_base = device_addr;
216 dev->dma_mem->size = pages;
217 dev->dma_mem->flags = flags;
219 if (flags & DMA_MEMORY_MAP)
220 return DMA_MEMORY_MAP;
222 return DMA_MEMORY_IO;
224 free1_out:
225 kfree(dev->dma_mem->bitmap);
226 out:
227 return 0;
228 }
229 EXPORT_SYMBOL(dma_declare_coherent_memory);
231 void dma_release_declared_memory(struct device *dev)
232 {
233 struct dma_coherent_mem *mem = dev->dma_mem;
235 if(!mem)
236 return;
237 dev->dma_mem = NULL;
238 iounmap(mem->virt_base);
239 kfree(mem->bitmap);
240 kfree(mem);
241 }
242 EXPORT_SYMBOL(dma_release_declared_memory);
244 void *dma_mark_declared_memory_occupied(struct device *dev,
245 dma_addr_t device_addr, size_t size)
246 {
247 struct dma_coherent_mem *mem = dev->dma_mem;
248 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
249 int pos, err;
251 if (!mem)
252 return ERR_PTR(-EINVAL);
254 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
255 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
256 if (err != 0)
257 return ERR_PTR(err);
258 return mem->virt_base + (pos << PAGE_SHIFT);
259 }
260 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
262 dma_addr_t
263 dma_map_single(struct device *dev, void *ptr, size_t size,
264 enum dma_data_direction direction)
265 {
266 dma_addr_t dma;
268 BUG_ON(direction == DMA_NONE);
270 if (swiotlb) {
271 dma = swiotlb_map_single(dev, ptr, size, direction);
272 } else {
273 dma = virt_to_bus(ptr);
274 IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
275 IOMMU_BUG_ON(address_needs_mapping(dev, dma));
276 }
278 flush_write_buffers();
279 return dma;
280 }
281 EXPORT_SYMBOL(dma_map_single);
283 void
284 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
285 enum dma_data_direction direction)
286 {
287 BUG_ON(direction == DMA_NONE);
288 if (swiotlb)
289 swiotlb_unmap_single(dev, dma_addr, size, direction);
290 }
291 EXPORT_SYMBOL(dma_unmap_single);
293 void
294 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
295 enum dma_data_direction direction)
296 {
297 if (swiotlb)
298 swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
299 }
300 EXPORT_SYMBOL(dma_sync_single_for_cpu);
302 void
303 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
304 enum dma_data_direction direction)
305 {
306 if (swiotlb)
307 swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
308 }
309 EXPORT_SYMBOL(dma_sync_single_for_device);
311 /*
312 * Local variables:
313 * c-file-style: "linux"
314 * indent-tabs-mode: t
315 * c-indent-level: 8
316 * c-basic-offset: 8
317 * tab-width: 8
318 * End:
319 */