ia64/xen-unstable

view linux-2.6-xen-sparse/arch/xen/i386/kernel/swiotlb.c @ 7959:ff18a169e866

Update the memory_op() hypercall. Add two new subcommands, to
query a domain's current and maximum memory reservation. Also,
XENMEM_maximum_ram_page now returns the max_page directly,
rather than writing through a passed-in pointer.

Also, disable PAE in the default config (accidentally checked
in two changesets ago).

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Nov 21 16:56:39 2005 +0100 (2005-11-21)
parents 112a769787d2
children fad187cad5bd
line source
1 /*
2 * Dynamic DMA mapping support.
3 *
4 * This implementation is a fallback for platforms that do not support
5 * I/O TLBs (aka DMA address translation hardware).
6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
8 * Copyright (C) 2000, 2003 Hewlett-Packard Co
9 * David Mosberger-Tang <davidm@hpl.hp.com>
10 * Copyright (C) 2005 Keir Fraser <keir@xensource.com>
11 */
13 #include <linux/cache.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/ctype.h>
21 #include <linux/init.h>
22 #include <linux/bootmem.h>
23 #include <linux/highmem.h>
24 #include <asm/io.h>
25 #include <asm/pci.h>
26 #include <asm/dma.h>
27 #include <asm-xen/xen-public/memory.h>
29 #define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
31 #define SG_ENT_PHYS_ADDRESS(sg) (page_to_phys((sg)->page) + (sg)->offset)
33 /*
34 * Maximum allowable number of contiguous slabs to map,
35 * must be a power of 2. What is the appropriate value ?
36 * The complexity of {map,unmap}_single is linearly dependent on this value.
37 */
38 #define IO_TLB_SEGSIZE 128
40 /*
41 * log of the size of each IO TLB slab. The number of slabs is command line
42 * controllable.
43 */
44 #define IO_TLB_SHIFT 11
46 int swiotlb_force;
47 static char *iotlb_virt_start;
48 static unsigned long iotlb_nslabs;
50 /*
51 * Used to do a quick range check in swiotlb_unmap_single and
52 * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
53 * API.
54 */
55 static dma_addr_t iotlb_bus_start, iotlb_bus_end, iotlb_bus_mask;
57 /* Does the given dma address reside within the swiotlb aperture? */
58 #define in_swiotlb_aperture(a) (!(((a) ^ iotlb_bus_start) & iotlb_bus_mask))
60 /*
61 * When the IOMMU overflows we return a fallback buffer. This sets the size.
62 */
63 static unsigned long io_tlb_overflow = 32*1024;
65 void *io_tlb_overflow_buffer;
67 /*
68 * This is a free list describing the number of free entries available from
69 * each index
70 */
71 static unsigned int *io_tlb_list;
72 static unsigned int io_tlb_index;
74 /*
75 * We need to save away the original address corresponding to a mapped entry
76 * for the sync operations.
77 */
78 static struct phys_addr {
79 struct page *page;
80 unsigned int offset;
81 } *io_tlb_orig_addr;
83 /*
84 * Protect the above data structures in the map and unmap calls
85 */
86 static DEFINE_SPINLOCK(io_tlb_lock);
88 static int __init
89 setup_io_tlb_npages(char *str)
90 {
91 /* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
92 if (isdigit(*str)) {
93 iotlb_nslabs = simple_strtoul(str, &str, 0) <<
94 (20 - IO_TLB_SHIFT);
95 iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
96 /* Round up to power of two (xen_create_contiguous_region). */
97 while (iotlb_nslabs & (iotlb_nslabs-1))
98 iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
99 }
100 if (*str == ',')
101 ++str;
102 /*
103 * NB. 'force' enables the swiotlb, but doesn't force its use for
104 * every DMA like it does on native Linux.
105 */
106 if (!strcmp(str, "force"))
107 swiotlb_force = 1;
108 return 1;
109 }
110 __setup("swiotlb=", setup_io_tlb_npages);
111 /* make io_tlb_overflow tunable too? */
113 /*
114 * Statically reserve bounce buffer space and initialize bounce buffer data
115 * structures for the software IO TLB used to implement the PCI DMA API.
116 */
117 void
118 swiotlb_init_with_default_size (size_t default_size)
119 {
120 unsigned long i, bytes;
121 int rc;
123 if (!iotlb_nslabs) {
124 iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
125 iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
126 /* Round up to power of two (xen_create_contiguous_region). */
127 while (iotlb_nslabs & (iotlb_nslabs-1))
128 iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
129 }
131 bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
133 /*
134 * Get IO TLB memory from the low pages
135 */
136 iotlb_virt_start = alloc_bootmem_low_pages(bytes);
137 if (!iotlb_virt_start)
138 panic("Cannot allocate SWIOTLB buffer!\n"
139 "Use dom0_mem Xen boot parameter to reserve\n"
140 "some DMA memory (e.g., dom0_mem=-128M).\n");
142 /* Hardcode 31 address bits for now: aacraid limitation. */
143 rc = xen_create_contiguous_region(
144 (unsigned long)iotlb_virt_start, get_order(bytes), 31);
145 BUG_ON(rc);
147 /*
148 * Allocate and initialize the free list array. This array is used
149 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE.
150 */
151 io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
152 for (i = 0; i < iotlb_nslabs; i++)
153 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
154 io_tlb_index = 0;
155 io_tlb_orig_addr = alloc_bootmem(
156 iotlb_nslabs * sizeof(*io_tlb_orig_addr));
158 /*
159 * Get the overflow emergency buffer
160 */
161 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
163 iotlb_bus_start = virt_to_bus(iotlb_virt_start);
164 iotlb_bus_end = iotlb_bus_start + bytes;
165 iotlb_bus_mask = ~(dma_addr_t)(bytes - 1);
167 printk(KERN_INFO "Software IO TLB enabled: \n"
168 " Aperture: %lu megabytes\n"
169 " Bus range: 0x%016lx - 0x%016lx\n"
170 " Kernel range: 0x%016lx - 0x%016lx\n",
171 bytes >> 20,
172 (unsigned long)iotlb_bus_start,
173 (unsigned long)iotlb_bus_end,
174 (unsigned long)iotlb_virt_start,
175 (unsigned long)iotlb_virt_start + bytes);
176 }
178 void
179 swiotlb_init(void)
180 {
181 long ram_end;
183 /* The user can forcibly enable swiotlb. */
184 if (swiotlb_force)
185 swiotlb = 1;
187 /*
188 * Otherwise, enable for domain 0 if the machine has 'lots of memory',
189 * which we take to mean more than 2GB.
190 */
191 if (xen_start_info->flags & SIF_INITDOMAIN) {
192 ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
193 if (ram_end > 0x7ffff)
194 swiotlb = 1;
195 }
197 if (swiotlb)
198 swiotlb_init_with_default_size(64 * (1<<20));
199 else
200 printk(KERN_INFO "Software IO TLB disabled\n");
201 }
203 static void
204 __sync_single(struct phys_addr buffer, char *dma_addr, size_t size, int dir)
205 {
206 if (PageHighMem(buffer.page)) {
207 size_t len, bytes;
208 char *dev, *host, *kmp;
209 len = size;
210 while (len != 0) {
211 if (((bytes = len) + buffer.offset) > PAGE_SIZE)
212 bytes = PAGE_SIZE - buffer.offset;
213 kmp = kmap_atomic(buffer.page, KM_SWIOTLB);
214 dev = dma_addr + size - len;
215 host = kmp + buffer.offset;
216 memcpy((dir == DMA_FROM_DEVICE) ? host : dev,
217 (dir == DMA_FROM_DEVICE) ? dev : host,
218 bytes);
219 kunmap_atomic(kmp, KM_SWIOTLB);
220 len -= bytes;
221 buffer.page++;
222 buffer.offset = 0;
223 }
224 } else {
225 char *host = (char *)phys_to_virt(
226 page_to_pseudophys(buffer.page)) + buffer.offset;
227 if (dir == DMA_FROM_DEVICE)
228 memcpy(host, dma_addr, size);
229 else if (dir == DMA_TO_DEVICE)
230 memcpy(dma_addr, host, size);
231 }
232 }
234 /*
235 * Allocates bounce buffer and returns its kernel virtual address.
236 */
237 static void *
238 map_single(struct device *hwdev, struct phys_addr buffer, size_t size, int dir)
239 {
240 unsigned long flags;
241 char *dma_addr;
242 unsigned int nslots, stride, index, wrap;
243 int i;
245 /*
246 * For mappings greater than a page, we limit the stride (and
247 * hence alignment) to a page size.
248 */
249 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
250 if (size > PAGE_SIZE)
251 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
252 else
253 stride = 1;
255 BUG_ON(!nslots);
257 /*
258 * Find suitable number of IO TLB entries size that will fit this
259 * request and allocate a buffer from that IO TLB pool.
260 */
261 spin_lock_irqsave(&io_tlb_lock, flags);
262 {
263 wrap = index = ALIGN(io_tlb_index, stride);
265 if (index >= iotlb_nslabs)
266 wrap = index = 0;
268 do {
269 /*
270 * If we find a slot that indicates we have 'nslots'
271 * number of contiguous buffers, we allocate the
272 * buffers from that slot and mark the entries as '0'
273 * indicating unavailable.
274 */
275 if (io_tlb_list[index] >= nslots) {
276 int count = 0;
278 for (i = index; i < (int)(index + nslots); i++)
279 io_tlb_list[i] = 0;
280 for (i = index - 1;
281 (OFFSET(i, IO_TLB_SEGSIZE) !=
282 IO_TLB_SEGSIZE -1) && io_tlb_list[i];
283 i--)
284 io_tlb_list[i] = ++count;
285 dma_addr = iotlb_virt_start +
286 (index << IO_TLB_SHIFT);
288 /*
289 * Update the indices to avoid searching in
290 * the next round.
291 */
292 io_tlb_index =
293 ((index + nslots) < iotlb_nslabs
294 ? (index + nslots) : 0);
296 goto found;
297 }
298 index += stride;
299 if (index >= iotlb_nslabs)
300 index = 0;
301 } while (index != wrap);
303 spin_unlock_irqrestore(&io_tlb_lock, flags);
304 return NULL;
305 }
306 found:
307 spin_unlock_irqrestore(&io_tlb_lock, flags);
309 /*
310 * Save away the mapping from the original address to the DMA address.
311 * This is needed when we sync the memory. Then we sync the buffer if
312 * needed.
313 */
314 io_tlb_orig_addr[index] = buffer;
315 if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
316 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
318 return dma_addr;
319 }
321 /*
322 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
323 */
324 static void
325 unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
326 {
327 unsigned long flags;
328 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
329 int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
330 struct phys_addr buffer = io_tlb_orig_addr[index];
332 /*
333 * First, sync the memory before unmapping the entry
334 */
335 if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
336 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
338 /*
339 * Return the buffer to the free list by setting the corresponding
340 * entries to indicate the number of contigous entries available.
341 * While returning the entries to the free list, we merge the entries
342 * with slots below and above the pool being returned.
343 */
344 spin_lock_irqsave(&io_tlb_lock, flags);
345 {
346 count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
347 io_tlb_list[index + nslots] : 0);
348 /*
349 * Step 1: return the slots to the free list, merging the
350 * slots with superceeding slots
351 */
352 for (i = index + nslots - 1; i >= index; i--)
353 io_tlb_list[i] = ++count;
354 /*
355 * Step 2: merge the returned slots with the preceding slots,
356 * if available (non zero)
357 */
358 for (i = index - 1;
359 (OFFSET(i, IO_TLB_SEGSIZE) !=
360 IO_TLB_SEGSIZE -1) && io_tlb_list[i];
361 i--)
362 io_tlb_list[i] = ++count;
363 }
364 spin_unlock_irqrestore(&io_tlb_lock, flags);
365 }
367 static void
368 sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
369 {
370 int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
371 struct phys_addr buffer = io_tlb_orig_addr[index];
372 BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
373 __sync_single(buffer, dma_addr, size, dir);
374 }
376 static void
377 swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
378 {
379 /*
380 * Ran out of IOMMU space for this operation. This is very bad.
381 * Unfortunately the drivers cannot handle this operation properly.
382 * unless they check for pci_dma_mapping_error (most don't)
383 * When the mapping is small enough return a static buffer to limit
384 * the damage, or panic when the transfer is too big.
385 */
386 printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
387 "device %s\n", (unsigned long)size, dev ? dev->bus_id : "?");
389 if (size > io_tlb_overflow && do_panic) {
390 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
391 panic("PCI-DMA: Memory would be corrupted\n");
392 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
393 panic("PCI-DMA: Random memory would be DMAed\n");
394 }
395 }
397 /*
398 * Map a single buffer of the indicated size for DMA in streaming mode. The
399 * PCI address to use is returned.
400 *
401 * Once the device is given the dma address, the device owns this memory until
402 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
403 */
404 dma_addr_t
405 swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
406 {
407 dma_addr_t dev_addr = virt_to_bus(ptr);
408 void *map;
409 struct phys_addr buffer;
411 BUG_ON(dir == DMA_NONE);
413 /*
414 * If the pointer passed in happens to be in the device's DMA window,
415 * we can safely return the device addr and not worry about bounce
416 * buffering it.
417 */
418 if (!range_straddles_page_boundary(ptr, size) &&
419 !address_needs_mapping(hwdev, dev_addr))
420 return dev_addr;
422 /*
423 * Oh well, have to allocate and map a bounce buffer.
424 */
425 buffer.page = virt_to_page(ptr);
426 buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
427 map = map_single(hwdev, buffer, size, dir);
428 if (!map) {
429 swiotlb_full(hwdev, size, dir, 1);
430 map = io_tlb_overflow_buffer;
431 }
433 dev_addr = virt_to_bus(map);
434 return dev_addr;
435 }
437 /*
438 * Unmap a single streaming mode DMA translation. The dma_addr and size must
439 * match what was provided for in a previous swiotlb_map_single call. All
440 * other usages are undefined.
441 *
442 * After this call, reads by the cpu to the buffer are guaranteed to see
443 * whatever the device wrote there.
444 */
445 void
446 swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
447 int dir)
448 {
449 BUG_ON(dir == DMA_NONE);
450 if (in_swiotlb_aperture(dev_addr))
451 unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
452 }
454 /*
455 * Make physical memory consistent for a single streaming mode DMA translation
456 * after a transfer.
457 *
458 * If you perform a swiotlb_map_single() but wish to interrogate the buffer
459 * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
460 * call this function before doing so. At the next point you give the PCI dma
461 * address back to the card, you must first perform a
462 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
463 */
464 void
465 swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
466 size_t size, int dir)
467 {
468 BUG_ON(dir == DMA_NONE);
469 if (in_swiotlb_aperture(dev_addr))
470 sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
471 }
473 void
474 swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
475 size_t size, int dir)
476 {
477 BUG_ON(dir == DMA_NONE);
478 if (in_swiotlb_aperture(dev_addr))
479 sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
480 }
482 /*
483 * Map a set of buffers described by scatterlist in streaming mode for DMA.
484 * This is the scatter-gather version of the above swiotlb_map_single
485 * interface. Here the scatter gather list elements are each tagged with the
486 * appropriate dma address and length. They are obtained via
487 * sg_dma_{address,length}(SG).
488 *
489 * NOTE: An implementation may be able to use a smaller number of
490 * DMA address/length pairs than there are SG table elements.
491 * (for example via virtual mapping capabilities)
492 * The routine returns the number of addr/length pairs actually
493 * used, at most nents.
494 *
495 * Device ownership issues as mentioned above for swiotlb_map_single are the
496 * same here.
497 */
498 int
499 swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
500 int dir)
501 {
502 struct phys_addr buffer;
503 dma_addr_t dev_addr;
504 char *map;
505 int i;
507 BUG_ON(dir == DMA_NONE);
509 for (i = 0; i < nelems; i++, sg++) {
510 dev_addr = SG_ENT_PHYS_ADDRESS(sg);
511 if (address_needs_mapping(hwdev, dev_addr)) {
512 buffer.page = sg->page;
513 buffer.offset = sg->offset;
514 map = map_single(hwdev, buffer, sg->length, dir);
515 if (!map) {
516 /* Don't panic here, we expect map_sg users
517 to do proper error handling. */
518 swiotlb_full(hwdev, sg->length, dir, 0);
519 swiotlb_unmap_sg(hwdev, sg - i, i, dir);
520 sg[0].dma_length = 0;
521 return 0;
522 }
523 sg->dma_address = (dma_addr_t)virt_to_bus(map);
524 } else
525 sg->dma_address = dev_addr;
526 sg->dma_length = sg->length;
527 }
528 return nelems;
529 }
531 /*
532 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
533 * concerning calls here are the same as for swiotlb_unmap_single() above.
534 */
535 void
536 swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
537 int dir)
538 {
539 int i;
541 BUG_ON(dir == DMA_NONE);
543 for (i = 0; i < nelems; i++, sg++)
544 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
545 unmap_single(hwdev,
546 (void *)bus_to_virt(sg->dma_address),
547 sg->dma_length, dir);
548 }
550 /*
551 * Make physical memory consistent for a set of streaming mode DMA translations
552 * after a transfer.
553 *
554 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
555 * and usage.
556 */
557 void
558 swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
559 int nelems, int dir)
560 {
561 int i;
563 BUG_ON(dir == DMA_NONE);
565 for (i = 0; i < nelems; i++, sg++)
566 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
567 sync_single(hwdev,
568 (void *)bus_to_virt(sg->dma_address),
569 sg->dma_length, dir);
570 }
572 void
573 swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
574 int nelems, int dir)
575 {
576 int i;
578 BUG_ON(dir == DMA_NONE);
580 for (i = 0; i < nelems; i++, sg++)
581 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
582 sync_single(hwdev,
583 (void *)bus_to_virt(sg->dma_address),
584 sg->dma_length, dir);
585 }
587 dma_addr_t
588 swiotlb_map_page(struct device *hwdev, struct page *page,
589 unsigned long offset, size_t size,
590 enum dma_data_direction direction)
591 {
592 struct phys_addr buffer;
593 dma_addr_t dev_addr;
594 char *map;
596 dev_addr = page_to_phys(page) + offset;
597 if (address_needs_mapping(hwdev, dev_addr)) {
598 buffer.page = page;
599 buffer.offset = offset;
600 map = map_single(hwdev, buffer, size, direction);
601 if (!map) {
602 swiotlb_full(hwdev, size, direction, 1);
603 map = io_tlb_overflow_buffer;
604 }
605 dev_addr = (dma_addr_t)virt_to_bus(map);
606 }
608 return dev_addr;
609 }
611 void
612 swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
613 size_t size, enum dma_data_direction direction)
614 {
615 BUG_ON(direction == DMA_NONE);
616 if (in_swiotlb_aperture(dma_address))
617 unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
618 }
620 int
621 swiotlb_dma_mapping_error(dma_addr_t dma_addr)
622 {
623 return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
624 }
626 /*
627 * Return whether the given PCI device DMA address mask can be supported
628 * properly. For example, if your device can only drive the low 24-bits
629 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
630 * this function.
631 */
632 int
633 swiotlb_dma_supported (struct device *hwdev, u64 mask)
634 {
635 return (mask >= (iotlb_bus_end - 1));
636 }
638 EXPORT_SYMBOL(swiotlb_init);
639 EXPORT_SYMBOL(swiotlb_map_single);
640 EXPORT_SYMBOL(swiotlb_unmap_single);
641 EXPORT_SYMBOL(swiotlb_map_sg);
642 EXPORT_SYMBOL(swiotlb_unmap_sg);
643 EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
644 EXPORT_SYMBOL(swiotlb_sync_single_for_device);
645 EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
646 EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
647 EXPORT_SYMBOL(swiotlb_map_page);
648 EXPORT_SYMBOL(swiotlb_unmap_page);
649 EXPORT_SYMBOL(swiotlb_dma_mapping_error);
650 EXPORT_SYMBOL(swiotlb_dma_supported);
652 /*
653 * Local variables:
654 * c-file-style: "linux"
655 * indent-tabs-mode: t
656 * c-indent-level: 8
657 * c-basic-offset: 8
658 * tab-width: 8
659 * End:
660 */