]> xenbits.xensource.com Git - legacy/linux-2.6.18-xen.git/commitdiff
x86: dma_map_sg() must handle multi-page segments.
authorkfraser <kfraser@localhost.localdomain>
Mon, 9 Jul 2007 12:45:10 +0000 (13:45 +0100)
committerkfraser <kfraser@localhost.localdomain>
Mon, 9 Jul 2007 12:45:10 +0000 (13:45 +0100)
Signed-off-by: Keir Fraser <keir@xensource.com>
arch/i386/kernel/pci-dma-xen.c
arch/i386/kernel/swiotlb.c
include/asm-i386/mach-xen/asm/dma-mapping.h

index 5ba15fa3a7a10727336d1264e9f59c9a90f3f8f3..7fe2e6dd7e0e0eae3830ccd4f9437f3db1235da5 100644 (file)
@@ -97,6 +97,9 @@ dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
                        BUG_ON(!sg[i].page);
                        IOMMU_BUG_ON(address_needs_mapping(
                                hwdev, sg[i].dma_address));
+                       IOMMU_BUG_ON(range_straddles_page_boundary(
+                               page_to_pseudophys(sg[i].page) + sg[i].offset,
+                               sg[i].length));
                }
                rc = nents;
        }
@@ -338,7 +341,7 @@ dma_map_single(struct device *dev, void *ptr, size_t size,
        } else {
                dma = gnttab_dma_map_page(virt_to_page(ptr)) +
                      offset_in_page(ptr);
-               IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
+               IOMMU_BUG_ON(range_straddles_page_boundary(__pa(ptr), size));
                IOMMU_BUG_ON(address_needs_mapping(dev, dma));
        }
 
index 702f48c2ea27b69df1c85a9aaf226c7a1be8f5dc..2834a2cebc51828e490df4eb9605989701f01fc6 100644 (file)
@@ -480,7 +480,7 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
         * we can safely return the device addr and not worry about bounce
         * buffering it.
         */
-       if (!range_straddles_page_boundary(ptr, size) &&
+       if (!range_straddles_page_boundary(__pa(ptr), size) &&
            !address_needs_mapping(hwdev, dev_addr))
                return dev_addr;
 
@@ -577,7 +577,9 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
        for (i = 0; i < nelems; i++, sg++) {
                dev_addr = gnttab_dma_map_page(sg->page) + sg->offset;
 
-               if (address_needs_mapping(hwdev, dev_addr)) {
+               if (range_straddles_page_boundary(page_to_pseudophys(sg->page)
+                                                 + sg->offset, sg->length)
+                   || address_needs_mapping(hwdev, dev_addr)) {
                        gnttab_dma_unmap_page(dev_addr);
                        buffer.page   = sg->page;
                        buffer.offset = sg->offset;
index 39a9ce0ad04ace00edca2cfacc79020bc0a31e1e..14fffa0acae49cdfddba5df0e081fa7e7de4a06d 100644 (file)
@@ -23,11 +23,11 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
 }
 
 static inline int
-range_straddles_page_boundary(void *p, size_t size)
+range_straddles_page_boundary(paddr_t p, size_t size)
 {
        extern unsigned long *contiguous_bitmap;
-       return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
-               !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
+       return ((((p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
+               !test_bit(p >> PAGE_SHIFT, contiguous_bitmap));
 }
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)