ia64/xen-unstable

view linux-2.6-xen-sparse/arch/xen/i386/kernel/pci-dma.c @ 6088:e87001315978

dma_map_single can't sleep since it may be called with locks held, or
interrupts off, etc. Thus it shouldn't make sleeping allocations.

Signed-off-by: Chris Wright <chrisw@osdl.org>
author kaf24@firebug.cl.cam.ac.uk
date Wed Aug 10 09:50:05 2005 +0000 (2005-08-10)
parents 35b74976f598
children d74e320900fd
line source
1 /*
2 * Dynamic DMA mapping support.
3 *
4 * On i386 there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
7 * in asm/pci.h.
8 */
10 #include <linux/types.h>
11 #include <linux/mm.h>
12 #include <linux/string.h>
13 #include <linux/pci.h>
14 #include <linux/version.h>
15 #include <asm/io.h>
16 #include <asm-xen/balloon.h>
17 #include <asm/tlbflush.h>
19 struct dma_coherent_mem {
20 void *virt_base;
21 u32 device_base;
22 int size;
23 int flags;
24 unsigned long *bitmap;
25 };
27 void *dma_alloc_coherent(struct device *dev, size_t size,
28 dma_addr_t *dma_handle, unsigned int __nocast gfp)
29 {
30 void *ret;
31 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
32 unsigned int order = get_order(size);
33 unsigned long vstart;
34 /* ignore region specifiers */
35 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
37 if (mem) {
38 int page = bitmap_find_free_region(mem->bitmap, mem->size,
39 order);
40 if (page >= 0) {
41 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
42 ret = mem->virt_base + (page << PAGE_SHIFT);
43 memset(ret, 0, size);
44 return ret;
45 }
46 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
47 return NULL;
48 }
50 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
51 gfp |= GFP_DMA;
53 vstart = __get_free_pages(gfp, order);
54 ret = (void *)vstart;
56 if (ret != NULL) {
57 xen_contig_memory(vstart, order);
59 memset(ret, 0, size);
60 *dma_handle = virt_to_bus(ret);
61 }
62 return ret;
63 }
65 void dma_free_coherent(struct device *dev, size_t size,
66 void *vaddr, dma_addr_t dma_handle)
67 {
68 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
69 int order = get_order(size);
71 if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
72 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
74 bitmap_release_region(mem->bitmap, page, order);
75 } else
76 free_pages((unsigned long)vaddr, order);
77 }
79 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
80 dma_addr_t device_addr, size_t size, int flags)
81 {
82 void __iomem *mem_base;
83 int pages = size >> PAGE_SHIFT;
84 int bitmap_size = (pages + 31)/32;
86 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
87 goto out;
88 if (!size)
89 goto out;
90 if (dev->dma_mem)
91 goto out;
93 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
95 mem_base = ioremap(bus_addr, size);
96 if (!mem_base)
97 goto out;
99 dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
100 if (!dev->dma_mem)
101 goto out;
102 memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
103 dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
104 if (!dev->dma_mem->bitmap)
105 goto free1_out;
106 memset(dev->dma_mem->bitmap, 0, bitmap_size);
108 dev->dma_mem->virt_base = mem_base;
109 dev->dma_mem->device_base = device_addr;
110 dev->dma_mem->size = pages;
111 dev->dma_mem->flags = flags;
113 if (flags & DMA_MEMORY_MAP)
114 return DMA_MEMORY_MAP;
116 return DMA_MEMORY_IO;
118 free1_out:
119 kfree(dev->dma_mem->bitmap);
120 out:
121 return 0;
122 }
123 EXPORT_SYMBOL(dma_declare_coherent_memory);
125 void dma_release_declared_memory(struct device *dev)
126 {
127 struct dma_coherent_mem *mem = dev->dma_mem;
129 if(!mem)
130 return;
131 dev->dma_mem = NULL;
132 iounmap(mem->virt_base);
133 kfree(mem->bitmap);
134 kfree(mem);
135 }
136 EXPORT_SYMBOL(dma_release_declared_memory);
138 void *dma_mark_declared_memory_occupied(struct device *dev,
139 dma_addr_t device_addr, size_t size)
140 {
141 struct dma_coherent_mem *mem = dev->dma_mem;
142 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
143 int pos, err;
145 if (!mem)
146 return ERR_PTR(-EINVAL);
148 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
149 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
150 if (err != 0)
151 return ERR_PTR(err);
152 return mem->virt_base + (pos << PAGE_SHIFT);
153 }
154 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
156 static LIST_HEAD(dma_map_head);
157 static DEFINE_SPINLOCK(dma_map_lock);
158 struct dma_map_entry {
159 struct list_head list;
160 dma_addr_t dma;
161 char *bounce, *host;
162 size_t size;
163 };
164 #define DMA_MAP_MATCHES(e,d) (((e)->dma<=(d)) && (((e)->dma+(e)->size)>(d)))
166 dma_addr_t
167 dma_map_single(struct device *dev, void *ptr, size_t size,
168 enum dma_data_direction direction)
169 {
170 struct dma_map_entry *ent;
171 void *bnc;
172 dma_addr_t dma;
173 unsigned long flags;
175 BUG_ON(direction == DMA_NONE);
177 /*
178 * Even if size is sub-page, the buffer may still straddle a page
179 * boundary. Take into account buffer start offset. All other calls are
180 * conservative and always search the dma_map list if it's non-empty.
181 */
182 if ((((unsigned int)ptr & ~PAGE_MASK) + size) <= PAGE_SIZE) {
183 dma = virt_to_bus(ptr);
184 } else {
185 BUG_ON((bnc = dma_alloc_coherent(dev, size, &dma, GFP_ATOMIC)) == NULL);
186 BUG_ON((ent = kmalloc(sizeof(*ent), GFP_ATOMIC)) == NULL);
187 if (direction != DMA_FROM_DEVICE)
188 memcpy(bnc, ptr, size);
189 ent->dma = dma;
190 ent->bounce = bnc;
191 ent->host = ptr;
192 ent->size = size;
193 spin_lock_irqsave(&dma_map_lock, flags);
194 list_add(&ent->list, &dma_map_head);
195 spin_unlock_irqrestore(&dma_map_lock, flags);
196 }
198 flush_write_buffers();
199 return dma;
200 }
201 EXPORT_SYMBOL(dma_map_single);
203 void
204 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
205 enum dma_data_direction direction)
206 {
207 struct dma_map_entry *ent;
208 unsigned long flags;
210 BUG_ON(direction == DMA_NONE);
212 /* Fast-path check: are there any multi-page DMA mappings? */
213 if (!list_empty(&dma_map_head)) {
214 spin_lock_irqsave(&dma_map_lock, flags);
215 list_for_each_entry ( ent, &dma_map_head, list ) {
216 if (DMA_MAP_MATCHES(ent, dma_addr)) {
217 list_del(&ent->list);
218 break;
219 }
220 }
221 spin_unlock_irqrestore(&dma_map_lock, flags);
222 if (&ent->list != &dma_map_head) {
223 BUG_ON(dma_addr != ent->dma);
224 BUG_ON(size != ent->size);
225 if (direction != DMA_TO_DEVICE)
226 memcpy(ent->host, ent->bounce, size);
227 dma_free_coherent(dev, size, ent->bounce, ent->dma);
228 kfree(ent);
229 }
230 }
231 }
232 EXPORT_SYMBOL(dma_unmap_single);
234 void
235 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
236 enum dma_data_direction direction)
237 {
238 struct dma_map_entry *ent;
239 unsigned long flags, off;
241 /* Fast-path check: are there any multi-page DMA mappings? */
242 if (!list_empty(&dma_map_head)) {
243 spin_lock_irqsave(&dma_map_lock, flags);
244 list_for_each_entry ( ent, &dma_map_head, list )
245 if (DMA_MAP_MATCHES(ent, dma_handle))
246 break;
247 spin_unlock_irqrestore(&dma_map_lock, flags);
248 if (&ent->list != &dma_map_head) {
249 off = dma_handle - ent->dma;
250 BUG_ON((off + size) > ent->size);
251 /*if (direction != DMA_TO_DEVICE)*/
252 memcpy(ent->host+off, ent->bounce+off, size);
253 }
254 }
255 }
256 EXPORT_SYMBOL(dma_sync_single_for_cpu);
258 void
259 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
260 enum dma_data_direction direction)
261 {
262 struct dma_map_entry *ent;
263 unsigned long flags, off;
265 /* Fast-path check: are there any multi-page DMA mappings? */
266 if (!list_empty(&dma_map_head)) {
267 spin_lock_irqsave(&dma_map_lock, flags);
268 list_for_each_entry ( ent, &dma_map_head, list )
269 if (DMA_MAP_MATCHES(ent, dma_handle))
270 break;
271 spin_unlock_irqrestore(&dma_map_lock, flags);
272 if (&ent->list != &dma_map_head) {
273 off = dma_handle - ent->dma;
274 BUG_ON((off + size) > ent->size);
275 /*if (direction != DMA_FROM_DEVICE)*/
276 memcpy(ent->bounce+off, ent->host+off, size);
277 }
278 }
280 flush_write_buffers();
281 }
282 EXPORT_SYMBOL(dma_sync_single_for_device);