ia64/xen-unstable

changeset 5752:c5db6fd54e36

Fix dma_map_single to work correctly with multi-page buffers.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Jul 12 10:29:46 2005 +0000 (2005-07-12)
parents 57a5441b323b
children 9f6057761c8f
files linux-2.6-xen-sparse/arch/xen/i386/kernel/pci-dma.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c linux-2.6-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/dma-mapping.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/pci-dma.c	Tue Jul 12 10:16:33 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/pci-dma.c	Tue Jul 12 10:29:46 2005 +0000
     1.3 @@ -152,3 +152,131 @@ void *dma_mark_declared_memory_occupied(
     1.4  	return mem->virt_base + (pos << PAGE_SHIFT);
     1.5  }
     1.6  EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
     1.7 +
     1.8 +static LIST_HEAD(dma_map_head);
     1.9 +static DEFINE_SPINLOCK(dma_map_lock);
    1.10 +struct dma_map_entry {
    1.11 +	struct list_head list;
    1.12 +	dma_addr_t dma;
    1.13 +	char *bounce, *host;
    1.14 +	size_t size;
    1.15 +};
    1.16 +#define DMA_MAP_MATCHES(e,d) (((e)->dma<=(d)) && (((e)->dma+(e)->size)>(d)))
    1.17 +
    1.18 +dma_addr_t
    1.19 +dma_map_single(struct device *dev, void *ptr, size_t size,
    1.20 +	       enum dma_data_direction direction)
    1.21 +{
    1.22 +	struct dma_map_entry *ent;
    1.23 +	void *bnc;
    1.24 +	dma_addr_t dma;
    1.25 +	unsigned long flags;
    1.26 +
    1.27 +	BUG_ON(direction == DMA_NONE);
    1.28 +
    1.29 +	/*
    1.30 +	 * Even if size is sub-page, the buffer may still straddle a page
    1.31 +	 * boundary. Take into account buffer start offset. All other calls are
    1.32 +	 * conservative and always search the dma_map list if it's non-empty.
    1.33 +	 */
    1.34 +	if ((((unsigned int)ptr & ~PAGE_MASK) + size) <= PAGE_SIZE) {
    1.35 +		dma = virt_to_bus(ptr);
    1.36 +	} else {
    1.37 +		BUG_ON((bnc = dma_alloc_coherent(dev, size, &dma, 0)) == NULL);
    1.38 +		BUG_ON((ent = kmalloc(sizeof(*ent), GFP_KERNEL)) == NULL);
    1.39 +		if (direction != DMA_FROM_DEVICE)
    1.40 +			memcpy(bnc, ptr, size);
    1.41 +		ent->dma    = dma;
    1.42 +		ent->bounce = bnc;
    1.43 +		ent->host   = ptr;
    1.44 +		ent->size   = size;
    1.45 +		spin_lock_irqsave(&dma_map_lock, flags);
    1.46 +		list_add(&ent->list, &dma_map_head);
    1.47 +		spin_unlock_irqrestore(&dma_map_lock, flags);
    1.48 +	}
    1.49 +
    1.50 +	flush_write_buffers();
    1.51 +	return dma;
    1.52 +}
    1.53 +EXPORT_SYMBOL(dma_map_single);
    1.54 +
    1.55 +void
    1.56 +dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
    1.57 +		 enum dma_data_direction direction)
    1.58 +{
    1.59 +	struct dma_map_entry *ent;
    1.60 +	unsigned long flags;
    1.61 +
    1.62 +	BUG_ON(direction == DMA_NONE);
    1.63 +
    1.64 +	/* Fast-path check: are there any multi-page DMA mappings? */
    1.65 +	if (!list_empty(&dma_map_head)) {
    1.66 +		spin_lock_irqsave(&dma_map_lock, flags);
    1.67 +		list_for_each_entry ( ent, &dma_map_head, list ) {
    1.68 +			if (DMA_MAP_MATCHES(ent, dma_addr)) {
    1.69 +				list_del(&ent->list);
    1.70 +				break;
    1.71 +			}
    1.72 +		}
    1.73 +		spin_unlock_irqrestore(&dma_map_lock, flags);
    1.74 +		if (&ent->list != &dma_map_head) {
    1.75 +			BUG_ON(dma_addr != ent->dma);
    1.76 +			BUG_ON(size != ent->size);
    1.77 +			if (direction != DMA_TO_DEVICE)
    1.78 +				memcpy(ent->host, ent->bounce, size);
    1.79 +			dma_free_coherent(dev, size, ent->bounce, ent->dma);
    1.80 +			kfree(ent);
    1.81 +		}
    1.82 +	}
    1.83 +}
    1.84 +EXPORT_SYMBOL(dma_unmap_single);
    1.85 +
    1.86 +void
    1.87 +dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
    1.88 +			enum dma_data_direction direction)
    1.89 +{
    1.90 +	struct dma_map_entry *ent;
    1.91 +	unsigned long flags, off;
    1.92 +
    1.93 +	/* Fast-path check: are there any multi-page DMA mappings? */
    1.94 +	if (!list_empty(&dma_map_head)) {
    1.95 +		spin_lock_irqsave(&dma_map_lock, flags);
    1.96 +		list_for_each_entry ( ent, &dma_map_head, list )
    1.97 +			if (DMA_MAP_MATCHES(ent, dma_handle))
    1.98 +				break;
    1.99 +		spin_unlock_irqrestore(&dma_map_lock, flags);
   1.100 +		if (&ent->list != &dma_map_head) {
   1.101 +			off = dma_handle - ent->dma;
   1.102 +			BUG_ON((off + size) > ent->size);
   1.103 +			if (direction != DMA_TO_DEVICE)
   1.104 +				memcpy(ent->host+off, ent->bounce+off, size);
   1.105 +		}
   1.106 +	}
   1.107 +}
   1.108 +EXPORT_SYMBOL(dma_sync_single_for_cpu);
   1.109 +
   1.110 +void
   1.111 +dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
   1.112 +                           enum dma_data_direction direction)
   1.113 +{
   1.114 +	struct dma_map_entry *ent;
   1.115 +	unsigned long flags, off;
   1.116 +
   1.117 +	/* Fast-path check: are there any multi-page DMA mappings? */
   1.118 +	if (!list_empty(&dma_map_head)) {
   1.119 +		spin_lock_irqsave(&dma_map_lock, flags);
   1.120 +		list_for_each_entry ( ent, &dma_map_head, list )
   1.121 +			if (DMA_MAP_MATCHES(ent, dma_handle))
   1.122 +				break;
   1.123 +		spin_unlock_irqrestore(&dma_map_lock, flags);
   1.124 +		if (&ent->list != &dma_map_head) {
   1.125 +			off = dma_handle - ent->dma;
   1.126 +			BUG_ON((off + size) > ent->size);
   1.127 +			if (direction != DMA_FROM_DEVICE)
   1.128 +				memcpy(ent->bounce+off, ent->host+off, size);
   1.129 +		}
   1.130 +	}
   1.131 +
   1.132 +	flush_write_buffers();
   1.133 +}
   1.134 +EXPORT_SYMBOL(dma_sync_single_for_device);
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c	Tue Jul 12 10:16:33 2005 +0000
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c	Tue Jul 12 10:29:46 2005 +0000
     2.3 @@ -203,3 +203,134 @@ void *dma_mark_declared_memory_occupied(
     2.4  }
     2.5  EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
     2.6  #endif
     2.7 +
     2.8 +static LIST_HEAD(dma_map_head);
     2.9 +static DEFINE_SPINLOCK(dma_map_lock);
    2.10 +struct dma_map_entry {
    2.11 +	struct list_head list;
    2.12 +	dma_addr_t dma;
    2.13 +	char *bounce, *host;
    2.14 +	size_t size;
    2.15 +};
    2.16 +#define DMA_MAP_MATCHES(e,d) (((e)->dma<=(d)) && (((e)->dma+(e)->size)>(d)))
    2.17 +
    2.18 +dma_addr_t
    2.19 +dma_map_single(struct device *dev, void *ptr, size_t size,
    2.20 +	       enum dma_data_direction direction)
    2.21 +{
    2.22 +	struct dma_map_entry *ent;
    2.23 +	void *bnc;
    2.24 +	dma_addr_t dma;
    2.25 +	unsigned long flags;
    2.26 +
    2.27 +	if (direction == DMA_NONE)
    2.28 +		out_of_line_bug();
    2.29 +
    2.30 +	/*
    2.31 +	 * Even if size is sub-page, the buffer may still straddle a page
    2.32 +	 * boundary. Take into account buffer start offset. All other calls are
    2.33 +	 * conservative and always search the dma_map list if it's non-empty.
    2.34 +	 */
    2.35 +	if ((((unsigned int)ptr & ~PAGE_MASK) + size) <= PAGE_SIZE) {
    2.36 +		dma = virt_to_bus(ptr);
    2.37 +	} else {
    2.38 +		BUG_ON((bnc = dma_alloc_coherent(dev, size, &dma, 0)) == NULL);
    2.39 +		BUG_ON((ent = kmalloc(sizeof(*ent), GFP_KERNEL)) == NULL);
    2.40 +		if (direction != DMA_FROM_DEVICE)
    2.41 +			memcpy(bnc, ptr, size);
    2.42 +		ent->dma    = dma;
    2.43 +		ent->bounce = bnc;
    2.44 +		ent->host   = ptr;
    2.45 +		ent->size   = size;
    2.46 +		spin_lock_irqsave(&dma_map_lock, flags);
    2.47 +		list_add(&ent->list, &dma_map_head);
    2.48 +		spin_unlock_irqrestore(&dma_map_lock, flags);
    2.49 +	}
    2.50 +
    2.51 +	if ((dma+size) & ~*hwdev->dma_mask)
    2.52 +		out_of_line_bug();
    2.53 +	return dma;
    2.54 +}
    2.55 +EXPORT_SYMBOL(dma_map_single);
    2.56 +
    2.57 +void
    2.58 +dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
    2.59 +		 enum dma_data_direction direction)
    2.60 +{
    2.61 +	struct dma_map_entry *ent;
    2.62 +	unsigned long flags;
    2.63 +
    2.64 +	if (direction == DMA_NONE)
    2.65 +		out_of_line_bug();
    2.66 +
    2.67 +	/* Fast-path check: are there any multi-page DMA mappings? */
    2.68 +	if (!list_empty(&dma_map_head)) {
    2.69 +		spin_lock_irqsave(&dma_map_lock, flags);
    2.70 +		list_for_each_entry ( ent, &dma_map_head, list ) {
    2.71 +			if (DMA_MAP_MATCHES(ent, dma_addr)) {
    2.72 +				list_del(&ent->list);
    2.73 +				break;
    2.74 +			}
    2.75 +		}
    2.76 +		spin_unlock_irqrestore(&dma_map_lock, flags);
    2.77 +		if (&ent->list != &dma_map_head) {
    2.78 +			BUG_ON(dma_addr != ent->dma);
    2.79 +			BUG_ON(size != ent->size);
    2.80 +			if (direction != DMA_TO_DEVICE)
    2.81 +				memcpy(ent->host, ent->bounce, size);
    2.82 +			dma_free_coherent(dev, size, ent->bounce, ent->dma);
    2.83 +			kfree(ent);
    2.84 +		}
    2.85 +	}
    2.86 +}
    2.87 +EXPORT_SYMBOL(dma_unmap_single);
    2.88 +
    2.89 +void
    2.90 +dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
    2.91 +			enum dma_data_direction direction)
    2.92 +{
    2.93 +	struct dma_map_entry *ent;
    2.94 +	unsigned long flags, off;
    2.95 +
    2.96 +	/* Fast-path check: are there any multi-page DMA mappings? */
    2.97 +	if (!list_empty(&dma_map_head)) {
    2.98 +		spin_lock_irqsave(&dma_map_lock, flags);
    2.99 +		list_for_each_entry ( ent, &dma_map_head, list )
   2.100 +			if (DMA_MAP_MATCHES(ent, dma_handle))
   2.101 +				break;
   2.102 +		spin_unlock_irqrestore(&dma_map_lock, flags);
   2.103 +		if (&ent->list != &dma_map_head) {
   2.104 +			off = dma_handle - ent->dma;
   2.105 +			BUG_ON((off + size) > ent->size);
   2.106 +			if (direction != DMA_TO_DEVICE)
   2.107 +				memcpy(ent->host+off, ent->bounce+off, size);
   2.108 +		}
   2.109 +	}
   2.110 +}
   2.111 +EXPORT_SYMBOL(dma_sync_single_for_cpu);
   2.112 +
   2.113 +void
   2.114 +dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
   2.115 +                           enum dma_data_direction direction)
   2.116 +{
   2.117 +	struct dma_map_entry *ent;
   2.118 +	unsigned long flags, off;
   2.119 +
   2.120 +	/* Fast-path check: are there any multi-page DMA mappings? */
   2.121 +	if (!list_empty(&dma_map_head)) {
   2.122 +		spin_lock_irqsave(&dma_map_lock, flags);
   2.123 +		list_for_each_entry ( ent, &dma_map_head, list )
   2.124 +			if (DMA_MAP_MATCHES(ent, dma_handle))
   2.125 +				break;
   2.126 +		spin_unlock_irqrestore(&dma_map_lock, flags);
   2.127 +		if (&ent->list != &dma_map_head) {
   2.128 +			off = dma_handle - ent->dma;
   2.129 +			BUG_ON((off + size) > ent->size);
   2.130 +			if (direction != DMA_FROM_DEVICE)
   2.131 +				memcpy(ent->bounce+off, ent->host+off, size);
   2.132 +		}
   2.133 +	}
   2.134 +
   2.135 +	flush_write_buffers();
   2.136 +}
   2.137 +EXPORT_SYMBOL(dma_sync_single_for_device);
     3.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h	Tue Jul 12 10:16:33 2005 +0000
     3.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h	Tue Jul 12 10:29:46 2005 +0000
     3.3 @@ -16,21 +16,13 @@ void *dma_alloc_coherent(struct device *
     3.4  void dma_free_coherent(struct device *dev, size_t size,
     3.5  			 void *vaddr, dma_addr_t dma_handle);
     3.6  
     3.7 -static inline dma_addr_t
     3.8 +extern dma_addr_t
     3.9  dma_map_single(struct device *dev, void *ptr, size_t size,
    3.10 -	       enum dma_data_direction direction)
    3.11 -{
    3.12 -	BUG_ON(direction == DMA_NONE);
    3.13 -	flush_write_buffers();
    3.14 -	return virt_to_bus(ptr);
    3.15 -}
    3.16 +	       enum dma_data_direction direction);
    3.17  
    3.18 -static inline void
    3.19 +extern void
    3.20  dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
    3.21 -		 enum dma_data_direction direction)
    3.22 -{
    3.23 -	BUG_ON(direction == DMA_NONE);
    3.24 -}
    3.25 +		 enum dma_data_direction direction);
    3.26  
    3.27  static inline int
    3.28  dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
    3.29 @@ -73,24 +65,20 @@ dma_unmap_sg(struct device *dev, struct 
    3.30  	BUG_ON(direction == DMA_NONE);
    3.31  }
    3.32  
    3.33 -static inline void
    3.34 +extern void
    3.35  dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
    3.36 -			enum dma_data_direction direction)
    3.37 -{
    3.38 -}
    3.39 +			enum dma_data_direction direction);
    3.40  
    3.41 -static inline void
    3.42 +extern void
    3.43  dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
    3.44 -			enum dma_data_direction direction)
    3.45 -{
    3.46 -	flush_write_buffers();
    3.47 -}
    3.48 +                           enum dma_data_direction direction);
    3.49  
    3.50  static inline void
    3.51  dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
    3.52  			      unsigned long offset, size_t size,
    3.53  			      enum dma_data_direction direction)
    3.54  {
    3.55 +	dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
    3.56  }
    3.57  
    3.58  static inline void
    3.59 @@ -98,7 +86,7 @@ dma_sync_single_range_for_device(struct 
    3.60  				 unsigned long offset, size_t size,
    3.61  				 enum dma_data_direction direction)
    3.62  {
    3.63 -	flush_write_buffers();
    3.64 +	dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
    3.65  }
    3.66  
    3.67  static inline void
     4.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/dma-mapping.h	Tue Jul 12 10:16:33 2005 +0000
     4.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/dma-mapping.h	Tue Jul 12 10:29:46 2005 +0000
     4.3 @@ -21,68 +21,21 @@ void *dma_alloc_coherent(struct device *
     4.4  void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
     4.5  			 dma_addr_t dma_handle);
     4.6  
     4.7 -#ifdef CONFIG_GART_IOMMU
     4.8 -
     4.9  extern dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size,
    4.10  				 int direction);
    4.11  extern void dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
    4.12  			     int direction);
    4.13  
    4.14 -#else
    4.15 -
    4.16 -/* No IOMMU */
    4.17 -
    4.18 -static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr,
    4.19 -					size_t size, int direction)
    4.20 -{
    4.21 -	dma_addr_t addr;
    4.22 -
    4.23 -	if (direction == DMA_NONE)
    4.24 -		out_of_line_bug();
    4.25 -	addr = virt_to_machine(ptr);
    4.26 -
    4.27 -	if ((addr+size) & ~*hwdev->dma_mask)
    4.28 -		out_of_line_bug();
    4.29 -	return addr;
    4.30 -}
    4.31 -
    4.32 -static inline void dma_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
    4.33 -				    size_t size, int direction)
    4.34 -{
    4.35 -	if (direction == DMA_NONE)
    4.36 -		out_of_line_bug();
    4.37 -	/* Nothing to do */
    4.38 -}
    4.39 -#endif
    4.40 -
    4.41  #define dma_map_page(dev,page,offset,size,dir) \
    4.42  	dma_map_single((dev), page_address(page)+(offset), (size), (dir))
    4.43  
    4.44 -static inline void dma_sync_single_for_cpu(struct device *hwdev,
    4.45 -					       dma_addr_t dma_handle,
    4.46 -					       size_t size, int direction)
    4.47 -{
    4.48 -	if (direction == DMA_NONE)
    4.49 -		out_of_line_bug();
    4.50 -
    4.51 -	if (swiotlb)
    4.52 -		return swiotlb_sync_single_for_cpu(hwdev,dma_handle,size,direction);
    4.53 -
    4.54 -	flush_write_buffers();
    4.55 -}
    4.56 +extern void
    4.57 +dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
    4.58 +			int direction);
    4.59  
    4.60 -static inline void dma_sync_single_for_device(struct device *hwdev,
    4.61 -						  dma_addr_t dma_handle,
    4.62 -						  size_t size, int direction)
    4.63 -{
    4.64 -        if (direction == DMA_NONE)
    4.65 -		out_of_line_bug();
    4.66 -
    4.67 -	if (swiotlb)
    4.68 -		return swiotlb_sync_single_for_device(hwdev,dma_handle,size,direction);
    4.69 -
    4.70 -	flush_write_buffers();
    4.71 -}
    4.72 +extern void
    4.73 +dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
    4.74 +                           int direction);
    4.75  
    4.76  static inline void dma_sync_sg_for_cpu(struct device *hwdev,
    4.77  				       struct scatterlist *sg,