BUG_ON(!sg[i].page);
IOMMU_BUG_ON(address_needs_mapping(
hwdev, sg[i].dma_address));
+ IOMMU_BUG_ON(range_straddles_page_boundary(
+ page_to_pseudophys(sg[i].page) + sg[i].offset,
+ sg[i].length));
}
rc = nents;
}
} else {
dma = gnttab_dma_map_page(virt_to_page(ptr)) +
offset_in_page(ptr);
- IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
+ IOMMU_BUG_ON(range_straddles_page_boundary(__pa(ptr), size));
IOMMU_BUG_ON(address_needs_mapping(dev, dma));
}
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (!range_straddles_page_boundary(ptr, size) &&
+ if (!range_straddles_page_boundary(__pa(ptr), size) &&
!address_needs_mapping(hwdev, dev_addr))
return dev_addr;
for (i = 0; i < nelems; i++, sg++) {
dev_addr = gnttab_dma_map_page(sg->page) + sg->offset;
- if (address_needs_mapping(hwdev, dev_addr)) {
+ if (range_straddles_page_boundary(page_to_pseudophys(sg->page)
+ + sg->offset, sg->length)
+ || address_needs_mapping(hwdev, dev_addr)) {
gnttab_dma_unmap_page(dev_addr);
buffer.page = sg->page;
buffer.offset = sg->offset;
}
static inline int
-range_straddles_page_boundary(void *p, size_t size)
+range_straddles_page_boundary(paddr_t p, size_t size)
{
extern unsigned long *contiguous_bitmap;
- return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
- !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
+ return ((((p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
+ !test_bit(p >> PAGE_SHIFT, contiguous_bitmap));
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)