ia64/xen-unstable

view linux-2.6-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c @ 6088:e87001315978

dma_map_single can't sleep since it may be called with locks held, or
interrupts off, etc. Thus it shouldn't make sleeping allocations.

Signed-off-by: Chris Wright <chrisw@osdl.org>
author kaf24@firebug.cl.cam.ac.uk
date Wed Aug 10 09:50:05 2005 +0000 (2005-08-10)
parents 35b74976f598
children d74e320900fd
line source
1 /*
2 * Dynamic DMA mapping support.
3 */
5 #include <linux/types.h>
6 #include <linux/mm.h>
7 #include <linux/string.h>
8 #include <linux/pci.h>
9 #include <linux/module.h>
10 #include <asm/io.h>
11 #include <asm-xen/balloon.h>
13 /* Map a set of buffers described by scatterlist in streaming
14 * mode for DMA. This is the scatter-gather version of the
15 * above pci_map_single interface. Here the scatter gather list
16 * elements are each tagged with the appropriate dma address
17 * and length. They are obtained via sg_dma_{address,length}(SG).
18 *
19 * NOTE: An implementation may be able to use a smaller number of
20 * DMA address/length pairs than there are SG table elements.
21 * (for example via virtual mapping capabilities)
22 * The routine returns the number of addr/length pairs actually
23 * used, at most nents.
24 *
25 * Device ownership issues as mentioned above for pci_map_single are
26 * the same here.
27 */
28 int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
29 int nents, int direction)
30 {
31 int i;
33 BUG_ON(direction == DMA_NONE);
34 for (i = 0; i < nents; i++ ) {
35 struct scatterlist *s = &sg[i];
36 BUG_ON(!s->page);
37 s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
38 s->dma_length = s->length;
39 }
40 return nents;
41 }
43 EXPORT_SYMBOL(dma_map_sg);
45 /* Unmap a set of streaming mode DMA translations.
46 * Again, cpu read rules concerning calls here are the same as for
47 * pci_unmap_single() above.
48 */
49 void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
50 int nents, int dir)
51 {
52 int i;
53 for (i = 0; i < nents; i++) {
54 struct scatterlist *s = &sg[i];
55 BUG_ON(s->page == NULL);
56 BUG_ON(s->dma_address == 0);
57 dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
58 }
59 }
61 EXPORT_SYMBOL(dma_unmap_sg);
63 struct dma_coherent_mem {
64 void *virt_base;
65 u32 device_base;
66 int size;
67 int flags;
68 unsigned long *bitmap;
69 };
71 void *dma_alloc_coherent(struct device *dev, size_t size,
72 dma_addr_t *dma_handle, unsigned gfp)
73 {
74 void *ret;
75 unsigned int order = get_order(size);
76 unsigned long vstart;
78 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
80 /* ignore region specifiers */
81 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
83 if (mem) {
84 int page = bitmap_find_free_region(mem->bitmap, mem->size,
85 order);
86 if (page >= 0) {
87 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
88 ret = mem->virt_base + (page << PAGE_SHIFT);
89 memset(ret, 0, size);
90 return ret;
91 }
92 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
93 return NULL;
94 }
96 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
97 gfp |= GFP_DMA;
99 vstart = __get_free_pages(gfp, order);
100 ret = (void *)vstart;
101 if (ret == NULL)
102 return ret;
104 xen_contig_memory(vstart, order);
106 memset(ret, 0, size);
107 *dma_handle = virt_to_bus(ret);
109 return ret;
110 }
111 EXPORT_SYMBOL(dma_alloc_coherent);
113 void dma_free_coherent(struct device *dev, size_t size,
114 void *vaddr, dma_addr_t dma_handle)
115 {
116 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
117 int order = get_order(size);
119 if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
120 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
122 bitmap_release_region(mem->bitmap, page, order);
123 } else
124 free_pages((unsigned long)vaddr, order);
125 }
126 EXPORT_SYMBOL(dma_free_coherent);
128 #if 0
129 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
130 dma_addr_t device_addr, size_t size, int flags)
131 {
132 void __iomem *mem_base;
133 int pages = size >> PAGE_SHIFT;
134 int bitmap_size = (pages + 31)/32;
136 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
137 goto out;
138 if (!size)
139 goto out;
140 if (dev->dma_mem)
141 goto out;
143 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
145 mem_base = ioremap(bus_addr, size);
146 if (!mem_base)
147 goto out;
149 dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
150 if (!dev->dma_mem)
151 goto out;
152 memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
153 dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
154 if (!dev->dma_mem->bitmap)
155 goto free1_out;
156 memset(dev->dma_mem->bitmap, 0, bitmap_size);
158 dev->dma_mem->virt_base = mem_base;
159 dev->dma_mem->device_base = device_addr;
160 dev->dma_mem->size = pages;
161 dev->dma_mem->flags = flags;
163 if (flags & DMA_MEMORY_MAP)
164 return DMA_MEMORY_MAP;
166 return DMA_MEMORY_IO;
168 free1_out:
169 kfree(dev->dma_mem->bitmap);
170 out:
171 return 0;
172 }
173 EXPORT_SYMBOL(dma_declare_coherent_memory);
175 void dma_release_declared_memory(struct device *dev)
176 {
177 struct dma_coherent_mem *mem = dev->dma_mem;
179 if(!mem)
180 return;
181 dev->dma_mem = NULL;
182 iounmap(mem->virt_base);
183 kfree(mem->bitmap);
184 kfree(mem);
185 }
186 EXPORT_SYMBOL(dma_release_declared_memory);
188 void *dma_mark_declared_memory_occupied(struct device *dev,
189 dma_addr_t device_addr, size_t size)
190 {
191 struct dma_coherent_mem *mem = dev->dma_mem;
192 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
193 int pos, err;
195 if (!mem)
196 return ERR_PTR(-EINVAL);
198 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
199 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
200 if (err != 0)
201 return ERR_PTR(err);
202 return mem->virt_base + (pos << PAGE_SHIFT);
203 }
204 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
205 #endif
207 static LIST_HEAD(dma_map_head);
208 static DEFINE_SPINLOCK(dma_map_lock);
209 struct dma_map_entry {
210 struct list_head list;
211 dma_addr_t dma;
212 char *bounce, *host;
213 size_t size;
214 };
215 #define DMA_MAP_MATCHES(e,d) (((e)->dma<=(d)) && (((e)->dma+(e)->size)>(d)))
217 dma_addr_t
218 dma_map_single(struct device *dev, void *ptr, size_t size,
219 enum dma_data_direction direction)
220 {
221 struct dma_map_entry *ent;
222 void *bnc;
223 dma_addr_t dma;
224 unsigned long flags;
226 if (direction == DMA_NONE)
227 out_of_line_bug();
229 /*
230 * Even if size is sub-page, the buffer may still straddle a page
231 * boundary. Take into account buffer start offset. All other calls are
232 * conservative and always search the dma_map list if it's non-empty.
233 */
234 if (((((unsigned long)ptr) & ~PAGE_MASK) + size) <= PAGE_SIZE) {
235 dma = virt_to_bus(ptr);
236 } else {
237 BUG_ON((bnc = dma_alloc_coherent(dev, size, &dma, GFP_ATOMIC)) == NULL);
238 BUG_ON((ent = kmalloc(sizeof(*ent), GFP_ATOMIC)) == NULL);
239 if (direction != DMA_FROM_DEVICE)
240 memcpy(bnc, ptr, size);
241 ent->dma = dma;
242 ent->bounce = bnc;
243 ent->host = ptr;
244 ent->size = size;
245 spin_lock_irqsave(&dma_map_lock, flags);
246 list_add(&ent->list, &dma_map_head);
247 spin_unlock_irqrestore(&dma_map_lock, flags);
248 }
250 if ((dma+size) & ~*dev->dma_mask)
251 out_of_line_bug();
252 return dma;
253 }
254 EXPORT_SYMBOL(dma_map_single);
256 void
257 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
258 enum dma_data_direction direction)
259 {
260 struct dma_map_entry *ent;
261 unsigned long flags;
263 if (direction == DMA_NONE)
264 out_of_line_bug();
266 /* Fast-path check: are there any multi-page DMA mappings? */
267 if (!list_empty(&dma_map_head)) {
268 spin_lock_irqsave(&dma_map_lock, flags);
269 list_for_each_entry ( ent, &dma_map_head, list ) {
270 if (DMA_MAP_MATCHES(ent, dma_addr)) {
271 list_del(&ent->list);
272 break;
273 }
274 }
275 spin_unlock_irqrestore(&dma_map_lock, flags);
276 if (&ent->list != &dma_map_head) {
277 BUG_ON(dma_addr != ent->dma);
278 BUG_ON(size != ent->size);
279 if (direction != DMA_TO_DEVICE)
280 memcpy(ent->host, ent->bounce, size);
281 dma_free_coherent(dev, size, ent->bounce, ent->dma);
282 kfree(ent);
283 }
284 }
285 }
286 EXPORT_SYMBOL(dma_unmap_single);
288 void
289 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
290 enum dma_data_direction direction)
291 {
292 struct dma_map_entry *ent;
293 unsigned long flags, off;
295 /* Fast-path check: are there any multi-page DMA mappings? */
296 if (!list_empty(&dma_map_head)) {
297 spin_lock_irqsave(&dma_map_lock, flags);
298 list_for_each_entry ( ent, &dma_map_head, list )
299 if (DMA_MAP_MATCHES(ent, dma_handle))
300 break;
301 spin_unlock_irqrestore(&dma_map_lock, flags);
302 if (&ent->list != &dma_map_head) {
303 off = dma_handle - ent->dma;
304 BUG_ON((off + size) > ent->size);
305 /*if (direction != DMA_TO_DEVICE)*/
306 memcpy(ent->host+off, ent->bounce+off, size);
307 }
308 }
309 }
310 EXPORT_SYMBOL(dma_sync_single_for_cpu);
312 void
313 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
314 enum dma_data_direction direction)
315 {
316 struct dma_map_entry *ent;
317 unsigned long flags, off;
319 /* Fast-path check: are there any multi-page DMA mappings? */
320 if (!list_empty(&dma_map_head)) {
321 spin_lock_irqsave(&dma_map_lock, flags);
322 list_for_each_entry ( ent, &dma_map_head, list )
323 if (DMA_MAP_MATCHES(ent, dma_handle))
324 break;
325 spin_unlock_irqrestore(&dma_map_lock, flags);
326 if (&ent->list != &dma_map_head) {
327 off = dma_handle - ent->dma;
328 BUG_ON((off + size) > ent->size);
329 /*if (direction != DMA_FROM_DEVICE)*/
330 memcpy(ent->bounce+off, ent->host+off, size);
331 }
332 }
334 flush_write_buffers();
335 }
336 EXPORT_SYMBOL(dma_sync_single_for_device);