ia64/linux-2.6.18-xen.hg

changeset 503:5486a234923d

Xen dma: avoid unnecessarily SWIOTLB bounce buffering.

On Xen kernels, BIOVEC_PHYS_MERGEABLE permits merging of disk IOs that
span multiple pages, provided that the pages are both
pseudophysically- AND machine-contiguous ---

(((bvec_to_phys((vec1)) + (vec1)->bv_len) ==
bvec_to_phys((vec2))) && \
((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
bvec_to_pseudophys((vec2))))

However, this best-effort merging of adjacent pages can occur in
regions of dom0 memory which just happen, by virtue of having been
initially set up that way, to be machine-contiguous. Such pages
which occur outside of a range created by xen_create_contiguous_
region won't be seen as contiguous by range_straddles_page_boundary(),
so the pci-dma-xen.c code for dma_map_sg() will send these regions
to the swiotlb for bounce buffering.

This patch adds a new check, check_pages_physically_contiguous(),
to the test for pages stradding page boundaries both in
swiotlb_map_sg() and dma_map_sg(), to capture these ranges and map
them directly via virt_to_bus() mapping rather than through the
swiotlb.

Signed-off-by: Stephen Tweedie <sct@redhat.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Mar 31 10:32:25 2008 +0100 (2008-03-31)
parents 171ffa6bf3a5
children 4018c0da3360
files arch/i386/kernel/pci-dma-xen.c include/asm-i386/mach-xen/asm/dma-mapping.h
line diff
     1.1 --- a/arch/i386/kernel/pci-dma-xen.c	Fri Mar 28 14:27:38 2008 +0000
     1.2 +++ b/arch/i386/kernel/pci-dma-xen.c	Mon Mar 31 10:32:25 2008 +0100
     1.3 @@ -77,6 +77,39 @@ do {							\
     1.4  	}						\
     1.5  } while (0)
     1.6  
     1.7 +static int check_pages_physically_contiguous(unsigned long pfn, 
     1.8 +					     unsigned int offset,
     1.9 +					     size_t length)
    1.10 +{
    1.11 +	unsigned long next_mfn;
    1.12 +	int i;
    1.13 +	int nr_pages;
    1.14 +	
    1.15 +	next_mfn = pfn_to_mfn(pfn);
    1.16 +	nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
    1.17 +	
    1.18 +	for (i = 1; i < nr_pages; i++) {
    1.19 +		if (pfn_to_mfn(++pfn) != ++next_mfn) 
    1.20 +			return 0;
    1.21 +	}
    1.22 +	return 1;
    1.23 +}
    1.24 +
    1.25 +int range_straddles_page_boundary(paddr_t p, size_t size)
    1.26 +{
    1.27 +	extern unsigned long *contiguous_bitmap;
    1.28 +	unsigned long pfn = p >> PAGE_SHIFT;
    1.29 +	unsigned int offset = p & ~PAGE_MASK;
    1.30 +
    1.31 +	if (offset + size <= PAGE_SIZE)
    1.32 +		return 0;
    1.33 +	if (test_bit(pfn, contiguous_bitmap))
    1.34 +		return 0;
    1.35 +	if (check_pages_physically_contiguous(pfn, offset, size))
    1.36 +		return 0;
    1.37 +	return 1;
    1.38 +}
    1.39 +
    1.40  int
    1.41  dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
    1.42  	   enum dma_data_direction direction)
     2.1 --- a/include/asm-i386/mach-xen/asm/dma-mapping.h	Fri Mar 28 14:27:38 2008 +0000
     2.2 +++ b/include/asm-i386/mach-xen/asm/dma-mapping.h	Mon Mar 31 10:32:25 2008 +0100
     2.3 @@ -22,13 +22,7 @@ address_needs_mapping(struct device *hwd
     2.4  	return (addr & ~mask) != 0;
     2.5  }
     2.6  
     2.7 -static inline int
     2.8 -range_straddles_page_boundary(paddr_t p, size_t size)
     2.9 -{
    2.10 -	extern unsigned long *contiguous_bitmap;
    2.11 -	return ((((p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
    2.12 -		!test_bit(p >> PAGE_SHIFT, contiguous_bitmap));
    2.13 -}
    2.14 +extern int range_straddles_page_boundary(paddr_t p, size_t size);
    2.15  
    2.16  #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
    2.17  #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)