ia64/xen-unstable

changeset 6241:2bab84a5d122

Fix the SWIOTLB to correctly match on bus addresses.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Aug 18 09:47:04 2005 +0000 (2005-08-18)
parents a06430752462
children aa8abf007260
files linux-2.6-xen-sparse/arch/xen/i386/kernel/swiotlb.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/swiotlb.c	Thu Aug 18 01:13:49 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/swiotlb.c	Thu Aug 18 09:47:04 2005 +0000
     1.3 @@ -49,13 +49,14 @@ int swiotlb_force;
     1.4   * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
     1.5   * API.
     1.6   */
     1.7 -static char *io_tlb_start, *io_tlb_end;
     1.8 +static char *iotlb_virt_start, *iotlb_virt_end;
     1.9 +static dma_addr_t iotlb_bus_start, iotlb_bus_end;
    1.10  
    1.11  /*
    1.12 - * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
    1.13 - * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
    1.14 + * The number of IO TLB blocks (in groups of 64) betweeen iotlb_virt_start and
    1.15 + * iotlb_virt_end.  This is command line adjustable via setup_io_tlb_npages.
    1.16   */
    1.17 -static unsigned long io_tlb_nslabs;
    1.18 +static unsigned long iotlb_nslabs;
    1.19  
    1.20  /*
    1.21   * When the IOMMU overflows we return a fallback buffer. This sets the size.
    1.22 @@ -88,11 +89,14 @@ static DEFINE_SPINLOCK(io_tlb_lock);
    1.23  static int __init
    1.24  setup_io_tlb_npages(char *str)
    1.25  {
    1.26 +	/* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
    1.27  	if (isdigit(*str)) {
    1.28 -		io_tlb_nslabs = simple_strtoul(str, &str, 0) <<
    1.29 -			(PAGE_SHIFT - IO_TLB_SHIFT);
    1.30 -		/* avoid tail segment of size < IO_TLB_SEGSIZE */
    1.31 -		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
    1.32 +		iotlb_nslabs = simple_strtoul(str, &str, 0) <<
    1.33 +			(20 - IO_TLB_SHIFT);
    1.34 +		iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
    1.35 +		/* Round up to power of two (xen_create_contiguous_region). */
    1.36 +		while (iotlb_nslabs & (iotlb_nslabs-1))
    1.37 +			iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
    1.38  	}
    1.39  	if (*str == ',')
    1.40  		++str;
    1.41 @@ -114,45 +118,55 @@ setup_io_tlb_npages(char *str)
    1.42  void
    1.43  swiotlb_init_with_default_size (size_t default_size)
    1.44  {
    1.45 -	unsigned long i;
    1.46 +	unsigned long i, bytes;
    1.47  
    1.48 -	if (!io_tlb_nslabs) {
    1.49 -		io_tlb_nslabs = (default_size >> PAGE_SHIFT);
    1.50 -		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
    1.51 +	if (!iotlb_nslabs) {
    1.52 +		iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
    1.53 +		iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
    1.54 +		/* Round up to power of two (xen_create_contiguous_region). */
    1.55 +		while (iotlb_nslabs & (iotlb_nslabs-1))
    1.56 +			iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
    1.57  	}
    1.58  
    1.59 +	bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
    1.60 +
    1.61  	/*
    1.62  	 * Get IO TLB memory from the low pages
    1.63  	 */
    1.64 -	io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs *
    1.65 -					       (1 << IO_TLB_SHIFT));
    1.66 -	if (!io_tlb_start)
    1.67 +	iotlb_virt_start = alloc_bootmem_low_pages(bytes);
    1.68 +	if (!iotlb_virt_start)
    1.69  		panic("Cannot allocate SWIOTLB buffer");
    1.70  
    1.71  	xen_create_contiguous_region(
    1.72 -		(unsigned long)io_tlb_start, 
    1.73 -		get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)));
    1.74 +		(unsigned long)iotlb_virt_start, get_order(bytes));
    1.75  
    1.76 -	io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
    1.77 +	iotlb_virt_end = iotlb_virt_start + bytes;
    1.78  
    1.79  	/*
    1.80  	 * Allocate and initialize the free list array.  This array is used
    1.81  	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
    1.82 -	 * between io_tlb_start and io_tlb_end.
    1.83 +	 * between iotlb_virt_start and iotlb_virt_end.
    1.84  	 */
    1.85 -	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
    1.86 -	for (i = 0; i < io_tlb_nslabs; i++)
    1.87 +	io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
    1.88 +	for (i = 0; i < iotlb_nslabs; i++)
    1.89   		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
    1.90  	io_tlb_index = 0;
    1.91  	io_tlb_orig_addr = alloc_bootmem(
    1.92 -		io_tlb_nslabs * sizeof(*io_tlb_orig_addr));
    1.93 +		iotlb_nslabs * sizeof(*io_tlb_orig_addr));
    1.94  
    1.95  	/*
    1.96  	 * Get the overflow emergency buffer
    1.97  	 */
    1.98  	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
    1.99 -	printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
   1.100 -	       virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end-1));
   1.101 +	iotlb_bus_start = virt_to_bus(iotlb_virt_start);
   1.102 +	iotlb_bus_end   = iotlb_bus_start + bytes;
   1.103 +	printk(KERN_INFO "Software IO TLB enabled: \n"
   1.104 +	       " Aperture:     %lu megabytes\n"
   1.105 +	       " Bus range:    0x%016lx - 0x%016lx\n"
   1.106 +	       " Kernel range: 0x%016lx - 0x%016lx\n",
   1.107 +	       bytes >> 20,
   1.108 +	       (unsigned long)iotlb_bus_start, (unsigned long)iotlb_bus_end,
   1.109 +	       (unsigned long)iotlb_virt_start, (unsigned long)iotlb_virt_end);
   1.110  }
   1.111  
   1.112  void
   1.113 @@ -240,7 +254,7 @@ map_single(struct device *hwdev, struct 
   1.114  	{
   1.115  		wrap = index = ALIGN(io_tlb_index, stride);
   1.116  
   1.117 -		if (index >= io_tlb_nslabs)
   1.118 +		if (index >= iotlb_nslabs)
   1.119  			wrap = index = 0;
   1.120  
   1.121  		do {
   1.122 @@ -260,7 +274,7 @@ map_single(struct device *hwdev, struct 
   1.123  				      IO_TLB_SEGSIZE -1) && io_tlb_list[i];
   1.124  				     i--)
   1.125  					io_tlb_list[i] = ++count;
   1.126 -				dma_addr = io_tlb_start +
   1.127 +				dma_addr = iotlb_virt_start +
   1.128  					(index << IO_TLB_SHIFT);
   1.129  
   1.130  				/*
   1.131 @@ -268,13 +282,13 @@ map_single(struct device *hwdev, struct 
   1.132  				 * the next round.
   1.133  				 */
   1.134  				io_tlb_index = 
   1.135 -					((index + nslots) < io_tlb_nslabs
   1.136 +					((index + nslots) < iotlb_nslabs
   1.137  					 ? (index + nslots) : 0);
   1.138  
   1.139  				goto found;
   1.140  			}
   1.141  			index += stride;
   1.142 -			if (index >= io_tlb_nslabs)
   1.143 +			if (index >= iotlb_nslabs)
   1.144  				index = 0;
   1.145  		} while (index != wrap);
   1.146  
   1.147 @@ -304,7 +318,7 @@ unmap_single(struct device *hwdev, char 
   1.148  {
   1.149  	unsigned long flags;
   1.150  	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
   1.151 -	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
   1.152 +	int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
   1.153  	struct phys_addr buffer = io_tlb_orig_addr[index];
   1.154  
   1.155  	/*
   1.156 @@ -345,7 +359,7 @@ unmap_single(struct device *hwdev, char 
   1.157  static void
   1.158  sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
   1.159  {
   1.160 -	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
   1.161 +	int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
   1.162  	struct phys_addr buffer = io_tlb_orig_addr[index];
   1.163  	BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
   1.164  	__sync_single(buffer, dma_addr, size, dir);
   1.165 @@ -431,11 +445,9 @@ void
   1.166  swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
   1.167  		     int dir)
   1.168  {
   1.169 -	char *dma_addr = bus_to_virt(dev_addr);
   1.170 -
   1.171  	BUG_ON(dir == DMA_NONE);
   1.172 -	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
   1.173 -		unmap_single(hwdev, dma_addr, size, dir);
   1.174 +	if ((dev_addr >= iotlb_bus_start) && (dev_addr < iotlb_bus_end))
   1.175 +		unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
   1.176  }
   1.177  
   1.178  /*
   1.179 @@ -452,22 +464,18 @@ void
   1.180  swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
   1.181  			    size_t size, int dir)
   1.182  {
   1.183 -	char *dma_addr = bus_to_virt(dev_addr);
   1.184 -
   1.185  	BUG_ON(dir == DMA_NONE);
   1.186 -	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
   1.187 -		sync_single(hwdev, dma_addr, size, dir);
   1.188 +	if ((dev_addr >= iotlb_bus_start) && (dev_addr < iotlb_bus_end))
   1.189 +		sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
   1.190  }
   1.191  
   1.192  void
   1.193  swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
   1.194  			       size_t size, int dir)
   1.195  {
   1.196 -	char *dma_addr = bus_to_virt(dev_addr);
   1.197 -
   1.198  	BUG_ON(dir == DMA_NONE);
   1.199 -	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
   1.200 -		sync_single(hwdev, dma_addr, size, dir);
   1.201 +	if ((dev_addr >= iotlb_bus_start) && (dev_addr < iotlb_bus_end))
   1.202 +		sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
   1.203  }
   1.204  
   1.205  /*
   1.206 @@ -603,11 +611,9 @@ void
   1.207  swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
   1.208  		   size_t size, enum dma_data_direction direction)
   1.209  {
   1.210 -	char *dma_addr = bus_to_virt(dma_address);
   1.211 -
   1.212  	BUG_ON(direction == DMA_NONE);
   1.213 -	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
   1.214 -		unmap_single(hwdev, dma_addr, size, direction);
   1.215 +	if ((dma_address >= iotlb_bus_start) && (dma_address < iotlb_bus_end))
   1.216 +		unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
   1.217  }
   1.218  
   1.219  int