ia64/linux-2.6.18-xen.hg

annotate lib/swiotlb-xen.c @ 871:9cbcc9008446

xen/x86: don't initialize cpu_data[]'s apicid field on generic code

Afaict, this is not only redundant with the intialization done in
drivers/xen/core/smpboot.c, but actually results - at least for
secondary CPUs - in the Xen-specific value written to be later
overwritten with whatever the generic code determines (with no
guarantee that the two values are identical).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu May 14 10:09:15 2009 +0100 (2009-05-14)
parents 87c84f7dd850
children
rev   line source
kfraser@140 1 /*
kfraser@140 2 * Dynamic DMA mapping support.
kfraser@140 3 *
kfraser@140 4 * This implementation is a fallback for platforms that do not support
kfraser@140 5 * I/O TLBs (aka DMA address translation hardware).
kfraser@140 6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
kfraser@140 7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
kfraser@140 8 * Copyright (C) 2000, 2003 Hewlett-Packard Co
kfraser@140 9 * David Mosberger-Tang <davidm@hpl.hp.com>
kfraser@140 10 * Copyright (C) 2005 Keir Fraser <keir@xensource.com>
kfraser@140 11 */
kfraser@140 12
kfraser@140 13 #include <linux/cache.h>
kfraser@140 14 #include <linux/mm.h>
kfraser@140 15 #include <linux/module.h>
kfraser@140 16 #include <linux/pci.h>
kfraser@140 17 #include <linux/spinlock.h>
kfraser@140 18 #include <linux/string.h>
kfraser@140 19 #include <linux/types.h>
kfraser@140 20 #include <linux/ctype.h>
kfraser@140 21 #include <linux/init.h>
kfraser@140 22 #include <linux/bootmem.h>
kfraser@140 23 #include <linux/highmem.h>
kfraser@140 24 #include <asm/io.h>
kfraser@140 25 #include <asm/pci.h>
kfraser@140 26 #include <asm/dma.h>
kfraser@140 27 #include <asm/uaccess.h>
kfraser@140 28 #include <xen/gnttab.h>
kfraser@140 29 #include <xen/interface/memory.h>
kfraser@140 30 #include <asm-i386/mach-xen/asm/gnttab_dma.h>
kfraser@140 31
kfraser@140 32 int swiotlb;
kfraser@140 33 EXPORT_SYMBOL(swiotlb);
kfraser@140 34
kfraser@140 35 #define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
kfraser@140 36
kfraser@140 37 /*
kfraser@140 38 * Maximum allowable number of contiguous slabs to map,
kfraser@140 39 * must be a power of 2. What is the appropriate value ?
kfraser@140 40 * The complexity of {map,unmap}_single is linearly dependent on this value.
kfraser@140 41 */
kfraser@140 42 #define IO_TLB_SEGSIZE 128
kfraser@140 43
kfraser@140 44 /*
kfraser@140 45 * log of the size of each IO TLB slab. The number of slabs is command line
kfraser@140 46 * controllable.
kfraser@140 47 */
kfraser@140 48 #define IO_TLB_SHIFT 11
kfraser@140 49
kfraser@140 50 int swiotlb_force;
kfraser@140 51
kfraser@140 52 static char *iotlb_virt_start;
kfraser@140 53 static unsigned long iotlb_nslabs;
kfraser@140 54
kfraser@140 55 /*
kfraser@140 56 * Used to do a quick range check in swiotlb_unmap_single and
kfraser@140 57 * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
kfraser@140 58 * API.
kfraser@140 59 */
kfraser@140 60 static unsigned long iotlb_pfn_start, iotlb_pfn_end;
kfraser@140 61
kfraser@140 62 /* Does the given dma address reside within the swiotlb aperture? */
kfraser@140 63 static inline int in_swiotlb_aperture(dma_addr_t dev_addr)
kfraser@140 64 {
kfraser@140 65 unsigned long pfn = mfn_to_local_pfn(dev_addr >> PAGE_SHIFT);
kfraser@140 66 return (pfn_valid(pfn)
kfraser@140 67 && (pfn >= iotlb_pfn_start)
kfraser@140 68 && (pfn < iotlb_pfn_end));
kfraser@140 69 }
kfraser@140 70
kfraser@140 71 /*
kfraser@140 72 * When the IOMMU overflows we return a fallback buffer. This sets the size.
kfraser@140 73 */
kfraser@140 74 static unsigned long io_tlb_overflow = 32*1024;
kfraser@140 75
kfraser@140 76 void *io_tlb_overflow_buffer;
kfraser@140 77
kfraser@140 78 /*
kfraser@140 79 * This is a free list describing the number of free entries available from
kfraser@140 80 * each index
kfraser@140 81 */
kfraser@140 82 static unsigned int *io_tlb_list;
kfraser@140 83 static unsigned int io_tlb_index;
kfraser@140 84
kfraser@140 85 /*
kfraser@140 86 * We need to save away the original address corresponding to a mapped entry
kfraser@140 87 * for the sync operations.
kfraser@140 88 */
kfraser@140 89 static struct phys_addr {
kfraser@140 90 struct page *page;
kfraser@140 91 unsigned int offset;
kfraser@140 92 } *io_tlb_orig_addr;
kfraser@140 93
kfraser@140 94 /*
kfraser@140 95 * Protect the above data structures in the map and unmap calls
kfraser@140 96 */
kfraser@140 97 static DEFINE_SPINLOCK(io_tlb_lock);
kfraser@140 98
kfraser@140 99 static unsigned int dma_bits;
kfraser@140 100 static unsigned int __initdata max_dma_bits = 32;
kfraser@140 101 static int __init
kfraser@140 102 setup_dma_bits(char *str)
kfraser@140 103 {
kfraser@140 104 max_dma_bits = simple_strtoul(str, NULL, 0);
kfraser@140 105 return 0;
kfraser@140 106 }
kfraser@140 107 __setup("dma_bits=", setup_dma_bits);
kfraser@140 108
kfraser@140 109 static int __init
kfraser@140 110 setup_io_tlb_npages(char *str)
kfraser@140 111 {
kfraser@140 112 /* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
kfraser@140 113 if (isdigit(*str)) {
kfraser@140 114 iotlb_nslabs = simple_strtoul(str, &str, 0) <<
kfraser@140 115 (20 - IO_TLB_SHIFT);
kfraser@140 116 iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
kfraser@140 117 }
kfraser@140 118 if (*str == ',')
kfraser@140 119 ++str;
kfraser@140 120 /*
kfraser@140 121 * NB. 'force' enables the swiotlb, but doesn't force its use for
kfraser@140 122 * every DMA like it does on native Linux. 'off' forcibly disables
kfraser@140 123 * use of the swiotlb.
kfraser@140 124 */
kfraser@140 125 if (!strcmp(str, "force"))
kfraser@140 126 swiotlb_force = 1;
kfraser@140 127 else if (!strcmp(str, "off"))
kfraser@140 128 swiotlb_force = -1;
kfraser@140 129 return 1;
kfraser@140 130 }
kfraser@140 131 __setup("swiotlb=", setup_io_tlb_npages);
kfraser@140 132 /* make io_tlb_overflow tunable too? */
kfraser@140 133
kfraser@140 134 /*
kfraser@140 135 * Statically reserve bounce buffer space and initialize bounce buffer data
kfraser@140 136 * structures for the software IO TLB used to implement the PCI DMA API.
kfraser@140 137 */
kfraser@140 138 void
kfraser@140 139 swiotlb_init_with_default_size (size_t default_size)
kfraser@140 140 {
kfraser@140 141 unsigned long i, bytes;
kfraser@140 142 int rc;
kfraser@140 143
kfraser@140 144 if (!iotlb_nslabs) {
kfraser@140 145 iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
kfraser@140 146 iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
kfraser@140 147 }
kfraser@140 148
kfraser@140 149 bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
kfraser@140 150
kfraser@140 151 /*
kfraser@140 152 * Get IO TLB memory from the low pages
kfraser@140 153 */
keir@844 154 iotlb_virt_start = alloc_bootmem_pages(bytes);
kfraser@140 155 if (!iotlb_virt_start)
kfraser@140 156 panic("Cannot allocate SWIOTLB buffer!\n");
kfraser@140 157
kfraser@140 158 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
kfraser@140 159 for (i = 0; i < iotlb_nslabs; i += IO_TLB_SEGSIZE) {
kfraser@140 160 do {
kfraser@140 161 rc = xen_create_contiguous_region(
kfraser@140 162 (unsigned long)iotlb_virt_start + (i << IO_TLB_SHIFT),
kfraser@140 163 get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
kfraser@140 164 dma_bits);
kfraser@140 165 } while (rc && dma_bits++ < max_dma_bits);
kfraser@140 166 if (rc) {
kfraser@140 167 if (i == 0)
kfraser@140 168 panic("No suitable physical memory available for SWIOTLB buffer!\n"
kfraser@140 169 "Use dom0_mem Xen boot parameter to reserve\n"
kfraser@140 170 "some DMA memory (e.g., dom0_mem=-128M).\n");
kfraser@140 171 iotlb_nslabs = i;
kfraser@140 172 i <<= IO_TLB_SHIFT;
kfraser@140 173 free_bootmem(__pa(iotlb_virt_start + i), bytes - i);
kfraser@140 174 bytes = i;
kfraser@140 175 for (dma_bits = 0; i > 0; i -= IO_TLB_SEGSIZE << IO_TLB_SHIFT) {
kfraser@140 176 unsigned int bits = fls64(virt_to_bus(iotlb_virt_start + i - 1));
kfraser@140 177
kfraser@140 178 if (bits > dma_bits)
kfraser@140 179 dma_bits = bits;
kfraser@140 180 }
kfraser@140 181 break;
kfraser@140 182 }
kfraser@140 183 }
kfraser@140 184
kfraser@140 185 /*
kfraser@140 186 * Allocate and initialize the free list array. This array is used
kfraser@140 187 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE.
kfraser@140 188 */
kfraser@140 189 io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
kfraser@140 190 for (i = 0; i < iotlb_nslabs; i++)
kfraser@140 191 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
kfraser@140 192 io_tlb_index = 0;
kfraser@140 193 io_tlb_orig_addr = alloc_bootmem(
kfraser@140 194 iotlb_nslabs * sizeof(*io_tlb_orig_addr));
kfraser@140 195
kfraser@140 196 /*
kfraser@140 197 * Get the overflow emergency buffer
kfraser@140 198 */
keir@844 199 io_tlb_overflow_buffer = alloc_bootmem(io_tlb_overflow);
kfraser@140 200 if (!io_tlb_overflow_buffer)
kfraser@140 201 panic("Cannot allocate SWIOTLB overflow buffer!\n");
kfraser@140 202
kfraser@140 203 do {
kfraser@140 204 rc = xen_create_contiguous_region(
kfraser@140 205 (unsigned long)io_tlb_overflow_buffer,
kfraser@140 206 get_order(io_tlb_overflow),
kfraser@140 207 dma_bits);
kfraser@140 208 } while (rc && dma_bits++ < max_dma_bits);
kfraser@140 209 if (rc)
kfraser@140 210 panic("No suitable physical memory available for SWIOTLB overflow buffer!\n");
kfraser@140 211
kfraser@140 212 iotlb_pfn_start = __pa(iotlb_virt_start) >> PAGE_SHIFT;
kfraser@140 213 iotlb_pfn_end = iotlb_pfn_start + (bytes >> PAGE_SHIFT);
kfraser@140 214
kfraser@140 215 printk(KERN_INFO "Software IO TLB enabled: \n"
kfraser@140 216 " Aperture: %lu megabytes\n"
kfraser@140 217 " Kernel range: %p - %p\n"
kfraser@140 218 " Address size: %u bits\n",
kfraser@140 219 bytes >> 20,
kfraser@140 220 iotlb_virt_start, iotlb_virt_start + bytes,
kfraser@140 221 dma_bits);
kfraser@140 222 }
kfraser@140 223
kfraser@140 224 void
kfraser@140 225 swiotlb_init(void)
kfraser@140 226 {
kfraser@140 227 long ram_end;
kfraser@140 228 size_t defsz = 64 * (1 << 20); /* 64MB default size */
kfraser@140 229
kfraser@140 230 if (swiotlb_force == 1) {
kfraser@140 231 swiotlb = 1;
kfraser@140 232 } else if ((swiotlb_force != -1) &&
kfraser@140 233 is_running_on_xen() &&
kfraser@140 234 is_initial_xendomain()) {
kfraser@140 235 /* Domain 0 always has a swiotlb. */
kfraser@140 236 ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
kfraser@140 237 if (ram_end <= 0x7ffff)
kfraser@140 238 defsz = 2 * (1 << 20); /* 2MB on <2GB on systems. */
kfraser@140 239 swiotlb = 1;
kfraser@140 240 }
kfraser@140 241
kfraser@140 242 if (swiotlb)
kfraser@140 243 swiotlb_init_with_default_size(defsz);
kfraser@140 244 else
kfraser@140 245 printk(KERN_INFO "Software IO TLB disabled\n");
kfraser@140 246 }
kfraser@140 247
kfraser@140 248 /*
kfraser@140 249 * We use __copy_to_user_inatomic to transfer to the host buffer because the
kfraser@140 250 * buffer may be mapped read-only (e.g, in blkback driver) but lower-level
kfraser@140 251 * drivers map the buffer for DMA_BIDIRECTIONAL access. This causes an
kfraser@140 252 * unnecessary copy from the aperture to the host buffer, and a page fault.
kfraser@140 253 */
kfraser@140 254 static void
kfraser@140 255 __sync_single(struct phys_addr buffer, char *dma_addr, size_t size, int dir)
kfraser@140 256 {
kfraser@140 257 if (PageHighMem(buffer.page)) {
kfraser@140 258 size_t len, bytes;
kfraser@140 259 char *dev, *host, *kmp;
kfraser@140 260 len = size;
kfraser@140 261 while (len != 0) {
kfraser@140 262 unsigned long flags;
kfraser@140 263
kfraser@140 264 if (((bytes = len) + buffer.offset) > PAGE_SIZE)
kfraser@140 265 bytes = PAGE_SIZE - buffer.offset;
kfraser@140 266 local_irq_save(flags); /* protects KM_BOUNCE_READ */
kfraser@140 267 kmp = kmap_atomic(buffer.page, KM_BOUNCE_READ);
kfraser@140 268 dev = dma_addr + size - len;
kfraser@140 269 host = kmp + buffer.offset;
kfraser@140 270 if (dir == DMA_FROM_DEVICE) {
kfraser@140 271 if (__copy_to_user_inatomic(host, dev, bytes))
kfraser@140 272 /* inaccessible */;
kfraser@140 273 } else
kfraser@140 274 memcpy(dev, host, bytes);
kfraser@140 275 kunmap_atomic(kmp, KM_BOUNCE_READ);
kfraser@140 276 local_irq_restore(flags);
kfraser@140 277 len -= bytes;
kfraser@140 278 buffer.page++;
kfraser@140 279 buffer.offset = 0;
kfraser@140 280 }
kfraser@140 281 } else {
kfraser@140 282 char *host = (char *)phys_to_virt(
kfraser@140 283 page_to_pseudophys(buffer.page)) + buffer.offset;
kfraser@140 284 if (dir == DMA_FROM_DEVICE) {
kfraser@140 285 if (__copy_to_user_inatomic(host, dma_addr, size))
kfraser@140 286 /* inaccessible */;
kfraser@140 287 } else if (dir == DMA_TO_DEVICE)
kfraser@140 288 memcpy(dma_addr, host, size);
kfraser@140 289 }
kfraser@140 290 }
kfraser@140 291
kfraser@140 292 /*
kfraser@140 293 * Allocates bounce buffer and returns its kernel virtual address.
kfraser@140 294 */
kfraser@140 295 static void *
kfraser@140 296 map_single(struct device *hwdev, struct phys_addr buffer, size_t size, int dir)
kfraser@140 297 {
kfraser@140 298 unsigned long flags;
kfraser@140 299 char *dma_addr;
kfraser@140 300 unsigned int nslots, stride, index, wrap;
kfraser@140 301 struct phys_addr slot_buf;
kfraser@140 302 int i;
kfraser@140 303
kfraser@140 304 /*
kfraser@140 305 * For mappings greater than a page, we limit the stride (and
kfraser@140 306 * hence alignment) to a page size.
kfraser@140 307 */
kfraser@140 308 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
kfraser@140 309 if (size > PAGE_SIZE)
kfraser@140 310 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
kfraser@140 311 else
kfraser@140 312 stride = 1;
kfraser@140 313
kfraser@140 314 BUG_ON(!nslots);
kfraser@140 315
kfraser@140 316 /*
kfraser@140 317 * Find suitable number of IO TLB entries size that will fit this
kfraser@140 318 * request and allocate a buffer from that IO TLB pool.
kfraser@140 319 */
kfraser@140 320 spin_lock_irqsave(&io_tlb_lock, flags);
kfraser@140 321 {
kfraser@140 322 wrap = index = ALIGN(io_tlb_index, stride);
kfraser@140 323
kfraser@140 324 if (index >= iotlb_nslabs)
kfraser@140 325 wrap = index = 0;
kfraser@140 326
kfraser@140 327 do {
kfraser@140 328 /*
kfraser@140 329 * If we find a slot that indicates we have 'nslots'
kfraser@140 330 * number of contiguous buffers, we allocate the
kfraser@140 331 * buffers from that slot and mark the entries as '0'
kfraser@140 332 * indicating unavailable.
kfraser@140 333 */
kfraser@140 334 if (io_tlb_list[index] >= nslots) {
kfraser@140 335 int count = 0;
kfraser@140 336
kfraser@140 337 for (i = index; i < (int)(index + nslots); i++)
kfraser@140 338 io_tlb_list[i] = 0;
kfraser@140 339 for (i = index - 1;
kfraser@140 340 (OFFSET(i, IO_TLB_SEGSIZE) !=
kfraser@140 341 IO_TLB_SEGSIZE -1) && io_tlb_list[i];
kfraser@140 342 i--)
kfraser@140 343 io_tlb_list[i] = ++count;
kfraser@140 344 dma_addr = iotlb_virt_start +
kfraser@140 345 (index << IO_TLB_SHIFT);
kfraser@140 346
kfraser@140 347 /*
kfraser@140 348 * Update the indices to avoid searching in
kfraser@140 349 * the next round.
kfraser@140 350 */
kfraser@140 351 io_tlb_index =
kfraser@140 352 ((index + nslots) < iotlb_nslabs
kfraser@140 353 ? (index + nslots) : 0);
kfraser@140 354
kfraser@140 355 goto found;
kfraser@140 356 }
kfraser@140 357 index += stride;
kfraser@140 358 if (index >= iotlb_nslabs)
kfraser@140 359 index = 0;
kfraser@140 360 } while (index != wrap);
kfraser@140 361
kfraser@140 362 spin_unlock_irqrestore(&io_tlb_lock, flags);
kfraser@140 363 return NULL;
kfraser@140 364 }
kfraser@140 365 found:
kfraser@140 366 spin_unlock_irqrestore(&io_tlb_lock, flags);
kfraser@140 367
kfraser@140 368 /*
kfraser@140 369 * Save away the mapping from the original address to the DMA address.
kfraser@140 370 * This is needed when we sync the memory. Then we sync the buffer if
kfraser@140 371 * needed.
kfraser@140 372 */
kfraser@140 373 slot_buf = buffer;
kfraser@140 374 for (i = 0; i < nslots; i++) {
kfraser@140 375 slot_buf.page += slot_buf.offset >> PAGE_SHIFT;
kfraser@140 376 slot_buf.offset &= PAGE_SIZE - 1;
kfraser@140 377 io_tlb_orig_addr[index+i] = slot_buf;
kfraser@140 378 slot_buf.offset += 1 << IO_TLB_SHIFT;
kfraser@140 379 }
kfraser@140 380 if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
kfraser@140 381 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
kfraser@140 382
kfraser@140 383 return dma_addr;
kfraser@140 384 }
kfraser@140 385
kfraser@140 386 static struct phys_addr dma_addr_to_phys_addr(char *dma_addr)
kfraser@140 387 {
kfraser@140 388 int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
kfraser@140 389 struct phys_addr buffer = io_tlb_orig_addr[index];
kfraser@140 390 buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1);
kfraser@140 391 buffer.page += buffer.offset >> PAGE_SHIFT;
kfraser@140 392 buffer.offset &= PAGE_SIZE - 1;
kfraser@140 393 return buffer;
kfraser@140 394 }
kfraser@140 395
kfraser@140 396 /*
kfraser@140 397 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
kfraser@140 398 */
kfraser@140 399 static void
kfraser@140 400 unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
kfraser@140 401 {
kfraser@140 402 unsigned long flags;
kfraser@140 403 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
kfraser@140 404 int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
kfraser@140 405 struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr);
kfraser@140 406
kfraser@140 407 /*
kfraser@140 408 * First, sync the memory before unmapping the entry
kfraser@140 409 */
kfraser@140 410 if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
kfraser@140 411 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
kfraser@140 412
kfraser@140 413 /*
kfraser@140 414 * Return the buffer to the free list by setting the corresponding
kfraser@140 415 * entries to indicate the number of contigous entries available.
kfraser@140 416 * While returning the entries to the free list, we merge the entries
kfraser@140 417 * with slots below and above the pool being returned.
kfraser@140 418 */
kfraser@140 419 spin_lock_irqsave(&io_tlb_lock, flags);
kfraser@140 420 {
kfraser@140 421 count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
kfraser@140 422 io_tlb_list[index + nslots] : 0);
kfraser@140 423 /*
kfraser@140 424 * Step 1: return the slots to the free list, merging the
kfraser@140 425 * slots with superceeding slots
kfraser@140 426 */
kfraser@140 427 for (i = index + nslots - 1; i >= index; i--)
kfraser@140 428 io_tlb_list[i] = ++count;
kfraser@140 429 /*
kfraser@140 430 * Step 2: merge the returned slots with the preceding slots,
kfraser@140 431 * if available (non zero)
kfraser@140 432 */
kfraser@140 433 for (i = index - 1;
kfraser@140 434 (OFFSET(i, IO_TLB_SEGSIZE) !=
kfraser@140 435 IO_TLB_SEGSIZE -1) && io_tlb_list[i];
kfraser@140 436 i--)
kfraser@140 437 io_tlb_list[i] = ++count;
kfraser@140 438 }
kfraser@140 439 spin_unlock_irqrestore(&io_tlb_lock, flags);
kfraser@140 440 }
kfraser@140 441
kfraser@140 442 static void
kfraser@140 443 sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
kfraser@140 444 {
kfraser@140 445 struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr);
kfraser@140 446 BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
kfraser@140 447 __sync_single(buffer, dma_addr, size, dir);
kfraser@140 448 }
kfraser@140 449
kfraser@140 450 static void
kfraser@140 451 swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
kfraser@140 452 {
kfraser@140 453 /*
kfraser@140 454 * Ran out of IOMMU space for this operation. This is very bad.
kfraser@140 455 * Unfortunately the drivers cannot handle this operation properly.
kfraser@140 456 * unless they check for pci_dma_mapping_error (most don't)
kfraser@140 457 * When the mapping is small enough return a static buffer to limit
kfraser@140 458 * the damage, or panic when the transfer is too big.
kfraser@140 459 */
kfraser@140 460 printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
kfraser@140 461 "device %s\n", (unsigned long)size, dev ? dev->bus_id : "?");
kfraser@140 462
kfraser@140 463 if (size > io_tlb_overflow && do_panic) {
kfraser@140 464 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
kfraser@140 465 panic("PCI-DMA: Memory would be corrupted\n");
kfraser@140 466 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
kfraser@140 467 panic("PCI-DMA: Random memory would be DMAed\n");
kfraser@140 468 }
kfraser@140 469 }
kfraser@140 470
kfraser@140 471 /*
kfraser@140 472 * Map a single buffer of the indicated size for DMA in streaming mode. The
kfraser@140 473 * PCI address to use is returned.
kfraser@140 474 *
kfraser@140 475 * Once the device is given the dma address, the device owns this memory until
kfraser@140 476 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
kfraser@140 477 */
kfraser@140 478 dma_addr_t
kfraser@140 479 swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
kfraser@140 480 {
kfraser@140 481 dma_addr_t dev_addr = gnttab_dma_map_page(virt_to_page(ptr)) +
kfraser@140 482 offset_in_page(ptr);
kfraser@140 483 void *map;
kfraser@140 484 struct phys_addr buffer;
kfraser@140 485
kfraser@140 486 BUG_ON(dir == DMA_NONE);
kfraser@140 487
kfraser@140 488 /*
kfraser@140 489 * If the pointer passed in happens to be in the device's DMA window,
kfraser@140 490 * we can safely return the device addr and not worry about bounce
kfraser@140 491 * buffering it.
kfraser@140 492 */
kfraser@140 493 if (!range_straddles_page_boundary(__pa(ptr), size) &&
kfraser@140 494 !address_needs_mapping(hwdev, dev_addr))
kfraser@140 495 return dev_addr;
kfraser@140 496
kfraser@140 497 /*
kfraser@140 498 * Oh well, have to allocate and map a bounce buffer.
kfraser@140 499 */
kfraser@140 500 gnttab_dma_unmap_page(dev_addr);
kfraser@140 501 buffer.page = virt_to_page(ptr);
kfraser@140 502 buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
kfraser@140 503 map = map_single(hwdev, buffer, size, dir);
kfraser@140 504 if (!map) {
kfraser@140 505 swiotlb_full(hwdev, size, dir, 1);
kfraser@140 506 map = io_tlb_overflow_buffer;
kfraser@140 507 }
kfraser@140 508
kfraser@140 509 dev_addr = virt_to_bus(map);
kfraser@140 510 return dev_addr;
kfraser@140 511 }
kfraser@140 512
kfraser@140 513 /*
kfraser@140 514 * Unmap a single streaming mode DMA translation. The dma_addr and size must
kfraser@140 515 * match what was provided for in a previous swiotlb_map_single call. All
kfraser@140 516 * other usages are undefined.
kfraser@140 517 *
kfraser@140 518 * After this call, reads by the cpu to the buffer are guaranteed to see
kfraser@140 519 * whatever the device wrote there.
kfraser@140 520 */
kfraser@140 521 void
kfraser@140 522 swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
kfraser@140 523 int dir)
kfraser@140 524 {
kfraser@140 525 BUG_ON(dir == DMA_NONE);
kfraser@140 526 if (in_swiotlb_aperture(dev_addr))
kfraser@140 527 unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
kfraser@140 528 else
kfraser@140 529 gnttab_dma_unmap_page(dev_addr);
kfraser@140 530 }
kfraser@140 531
kfraser@140 532 /*
kfraser@140 533 * Make physical memory consistent for a single streaming mode DMA translation
kfraser@140 534 * after a transfer.
kfraser@140 535 *
kfraser@140 536 * If you perform a swiotlb_map_single() but wish to interrogate the buffer
kfraser@140 537 * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
kfraser@140 538 * call this function before doing so. At the next point you give the PCI dma
kfraser@140 539 * address back to the card, you must first perform a
kfraser@140 540 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
kfraser@140 541 */
kfraser@140 542 void
kfraser@140 543 swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
kfraser@140 544 size_t size, int dir)
kfraser@140 545 {
kfraser@140 546 BUG_ON(dir == DMA_NONE);
kfraser@140 547 if (in_swiotlb_aperture(dev_addr))
kfraser@140 548 sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
kfraser@140 549 }
kfraser@140 550
kfraser@140 551 void
kfraser@140 552 swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
kfraser@140 553 size_t size, int dir)
kfraser@140 554 {
kfraser@140 555 BUG_ON(dir == DMA_NONE);
kfraser@140 556 if (in_swiotlb_aperture(dev_addr))
kfraser@140 557 sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
kfraser@140 558 }
kfraser@140 559
kfraser@140 560 /*
kfraser@140 561 * Map a set of buffers described by scatterlist in streaming mode for DMA.
kfraser@140 562 * This is the scatter-gather version of the above swiotlb_map_single
kfraser@140 563 * interface. Here the scatter gather list elements are each tagged with the
kfraser@140 564 * appropriate dma address and length. They are obtained via
kfraser@140 565 * sg_dma_{address,length}(SG).
kfraser@140 566 *
kfraser@140 567 * NOTE: An implementation may be able to use a smaller number of
kfraser@140 568 * DMA address/length pairs than there are SG table elements.
kfraser@140 569 * (for example via virtual mapping capabilities)
kfraser@140 570 * The routine returns the number of addr/length pairs actually
kfraser@140 571 * used, at most nents.
kfraser@140 572 *
kfraser@140 573 * Device ownership issues as mentioned above for swiotlb_map_single are the
kfraser@140 574 * same here.
kfraser@140 575 */
kfraser@140 576 int
kfraser@140 577 swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
kfraser@140 578 int dir)
kfraser@140 579 {
kfraser@140 580 struct phys_addr buffer;
kfraser@140 581 dma_addr_t dev_addr;
kfraser@140 582 char *map;
kfraser@140 583 int i;
kfraser@140 584
kfraser@140 585 BUG_ON(dir == DMA_NONE);
kfraser@140 586
kfraser@140 587 for (i = 0; i < nelems; i++, sg++) {
kfraser@140 588 dev_addr = gnttab_dma_map_page(sg->page) + sg->offset;
kfraser@140 589
kfraser@140 590 if (range_straddles_page_boundary(page_to_pseudophys(sg->page)
kfraser@140 591 + sg->offset, sg->length)
kfraser@140 592 || address_needs_mapping(hwdev, dev_addr)) {
kfraser@140 593 gnttab_dma_unmap_page(dev_addr);
kfraser@140 594 buffer.page = sg->page;
kfraser@140 595 buffer.offset = sg->offset;
kfraser@140 596 map = map_single(hwdev, buffer, sg->length, dir);
kfraser@140 597 if (!map) {
kfraser@140 598 /* Don't panic here, we expect map_sg users
kfraser@140 599 to do proper error handling. */
kfraser@140 600 swiotlb_full(hwdev, sg->length, dir, 0);
kfraser@140 601 swiotlb_unmap_sg(hwdev, sg - i, i, dir);
kfraser@140 602 sg[0].dma_length = 0;
kfraser@140 603 return 0;
kfraser@140 604 }
kfraser@140 605 sg->dma_address = (dma_addr_t)virt_to_bus(map);
kfraser@140 606 } else
kfraser@140 607 sg->dma_address = dev_addr;
kfraser@140 608 sg->dma_length = sg->length;
kfraser@140 609 }
kfraser@140 610 return nelems;
kfraser@140 611 }
kfraser@140 612
kfraser@140 613 /*
kfraser@140 614 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
kfraser@140 615 * concerning calls here are the same as for swiotlb_unmap_single() above.
kfraser@140 616 */
kfraser@140 617 void
kfraser@140 618 swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
kfraser@140 619 int dir)
kfraser@140 620 {
kfraser@140 621 int i;
kfraser@140 622
kfraser@140 623 BUG_ON(dir == DMA_NONE);
kfraser@140 624
kfraser@140 625 for (i = 0; i < nelems; i++, sg++)
kfraser@140 626 if (in_swiotlb_aperture(sg->dma_address))
kfraser@140 627 unmap_single(hwdev,
kfraser@140 628 (void *)bus_to_virt(sg->dma_address),
kfraser@140 629 sg->dma_length, dir);
kfraser@140 630 else
kfraser@140 631 gnttab_dma_unmap_page(sg->dma_address);
kfraser@140 632 }
kfraser@140 633
kfraser@140 634 /*
kfraser@140 635 * Make physical memory consistent for a set of streaming mode DMA translations
kfraser@140 636 * after a transfer.
kfraser@140 637 *
kfraser@140 638 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
kfraser@140 639 * and usage.
kfraser@140 640 */
kfraser@140 641 void
kfraser@140 642 swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
kfraser@140 643 int nelems, int dir)
kfraser@140 644 {
kfraser@140 645 int i;
kfraser@140 646
kfraser@140 647 BUG_ON(dir == DMA_NONE);
kfraser@140 648
kfraser@140 649 for (i = 0; i < nelems; i++, sg++)
kfraser@140 650 if (in_swiotlb_aperture(sg->dma_address))
kfraser@140 651 sync_single(hwdev,
kfraser@140 652 (void *)bus_to_virt(sg->dma_address),
kfraser@140 653 sg->dma_length, dir);
kfraser@140 654 }
kfraser@140 655
kfraser@140 656 void
kfraser@140 657 swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
kfraser@140 658 int nelems, int dir)
kfraser@140 659 {
kfraser@140 660 int i;
kfraser@140 661
kfraser@140 662 BUG_ON(dir == DMA_NONE);
kfraser@140 663
kfraser@140 664 for (i = 0; i < nelems; i++, sg++)
kfraser@140 665 if (in_swiotlb_aperture(sg->dma_address))
kfraser@140 666 sync_single(hwdev,
kfraser@140 667 (void *)bus_to_virt(sg->dma_address),
kfraser@140 668 sg->dma_length, dir);
kfraser@140 669 }
kfraser@140 670
kfraser@140 671 #ifdef CONFIG_HIGHMEM
kfraser@140 672
kfraser@140 673 dma_addr_t
kfraser@140 674 swiotlb_map_page(struct device *hwdev, struct page *page,
kfraser@140 675 unsigned long offset, size_t size,
kfraser@140 676 enum dma_data_direction direction)
kfraser@140 677 {
kfraser@140 678 struct phys_addr buffer;
kfraser@140 679 dma_addr_t dev_addr;
kfraser@140 680 char *map;
kfraser@140 681
kfraser@140 682 dev_addr = gnttab_dma_map_page(page) + offset;
kfraser@140 683 if (address_needs_mapping(hwdev, dev_addr)) {
kfraser@140 684 gnttab_dma_unmap_page(dev_addr);
kfraser@140 685 buffer.page = page;
kfraser@140 686 buffer.offset = offset;
kfraser@140 687 map = map_single(hwdev, buffer, size, direction);
kfraser@140 688 if (!map) {
kfraser@140 689 swiotlb_full(hwdev, size, direction, 1);
kfraser@140 690 map = io_tlb_overflow_buffer;
kfraser@140 691 }
kfraser@140 692 dev_addr = (dma_addr_t)virt_to_bus(map);
kfraser@140 693 }
kfraser@140 694
kfraser@140 695 return dev_addr;
kfraser@140 696 }
kfraser@140 697
kfraser@140 698 void
kfraser@140 699 swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
kfraser@140 700 size_t size, enum dma_data_direction direction)
kfraser@140 701 {
kfraser@140 702 BUG_ON(direction == DMA_NONE);
kfraser@140 703 if (in_swiotlb_aperture(dma_address))
kfraser@140 704 unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
kfraser@140 705 else
kfraser@140 706 gnttab_dma_unmap_page(dma_address);
kfraser@140 707 }
kfraser@140 708
kfraser@140 709 #endif
kfraser@140 710
kfraser@140 711 int
kfraser@140 712 swiotlb_dma_mapping_error(dma_addr_t dma_addr)
kfraser@140 713 {
kfraser@140 714 return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
kfraser@140 715 }
kfraser@140 716
kfraser@140 717 /*
kfraser@140 718 * Return whether the given PCI device DMA address mask can be supported
kfraser@140 719 * properly. For example, if your device can only drive the low 24-bits
kfraser@140 720 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
kfraser@140 721 * this function.
kfraser@140 722 */
kfraser@140 723 int
kfraser@140 724 swiotlb_dma_supported (struct device *hwdev, u64 mask)
kfraser@140 725 {
kfraser@140 726 return (mask >= ((1UL << dma_bits) - 1));
kfraser@140 727 }
kfraser@140 728
kfraser@140 729 EXPORT_SYMBOL(swiotlb_init);
kfraser@140 730 EXPORT_SYMBOL(swiotlb_map_single);
kfraser@140 731 EXPORT_SYMBOL(swiotlb_unmap_single);
kfraser@140 732 EXPORT_SYMBOL(swiotlb_map_sg);
kfraser@140 733 EXPORT_SYMBOL(swiotlb_unmap_sg);
kfraser@140 734 EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
kfraser@140 735 EXPORT_SYMBOL(swiotlb_sync_single_for_device);
kfraser@140 736 EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
kfraser@140 737 EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
kfraser@140 738 EXPORT_SYMBOL(swiotlb_dma_mapping_error);
kfraser@140 739 EXPORT_SYMBOL(swiotlb_dma_supported);