ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h @ 6538:84ee014ebd41

Merge xen-vtx-unstable.hg
author adsharma@los-vmm.sc.intel.com
date Wed Aug 17 12:34:38 2005 -0800 (2005-08-17)
parents 23979fb12c49 3d187585c141
children 99914b54f7bf
line source
1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
4 /*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
9 #include <linux/config.h>
10 #include <linux/mm.h>
11 #include <asm/cache.h>
12 #include <asm/io.h>
13 #include <asm/scatterlist.h>
14 #include <asm-i386/swiotlb.h>
16 static inline int
17 address_needs_mapping(struct device *hwdev, dma_addr_t addr)
18 {
19 dma_addr_t mask = 0xffffffff;
20 /* If the device has a mask, use it, otherwise default to 32 bits */
21 if (hwdev && hwdev->dma_mask)
22 mask = *hwdev->dma_mask;
23 return (addr & ~mask) != 0;
24 }
26 static inline int
27 range_straddles_page_boundary(void *p, size_t size)
28 {
29 return ((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE);
30 }
32 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
33 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
35 void *dma_alloc_coherent(struct device *dev, size_t size,
36 dma_addr_t *dma_handle, unsigned int __nocast flag);
38 void dma_free_coherent(struct device *dev, size_t size,
39 void *vaddr, dma_addr_t dma_handle);
41 extern dma_addr_t
42 dma_map_single(struct device *dev, void *ptr, size_t size,
43 enum dma_data_direction direction);
45 extern void
46 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
47 enum dma_data_direction direction);
49 extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
50 int nents, enum dma_data_direction direction);
51 extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
52 int nents, enum dma_data_direction direction);
54 extern dma_addr_t
55 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
56 size_t size, enum dma_data_direction direction);
58 extern void
59 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
60 enum dma_data_direction direction);
62 extern void
63 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
64 enum dma_data_direction direction);
66 extern void
67 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
68 enum dma_data_direction direction);
70 static inline void
71 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
72 unsigned long offset, size_t size,
73 enum dma_data_direction direction)
74 {
75 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
76 }
78 static inline void
79 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
80 unsigned long offset, size_t size,
81 enum dma_data_direction direction)
82 {
83 dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
84 }
86 static inline void
87 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
88 enum dma_data_direction direction)
89 {
90 if (swiotlb)
91 swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
92 flush_write_buffers();
93 }
95 static inline void
96 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
97 enum dma_data_direction direction)
98 {
99 if (swiotlb)
100 swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
101 flush_write_buffers();
102 }
104 extern int
105 dma_mapping_error(dma_addr_t dma_addr);
107 extern int
108 dma_supported(struct device *dev, u64 mask);
110 static inline int
111 dma_set_mask(struct device *dev, u64 mask)
112 {
113 if(!dev->dma_mask || !dma_supported(dev, mask))
114 return -EIO;
116 *dev->dma_mask = mask;
118 return 0;
119 }
121 #ifdef __i386__
122 static inline int
123 dma_get_cache_alignment(void)
124 {
125 /* no easy way to get cache size on all x86, so return the
126 * maximum possible, to be safe */
127 return (1 << L1_CACHE_SHIFT_MAX);
128 }
129 #else
130 extern int dma_get_cache_alignment(void);
131 #endif
133 #define dma_is_consistent(d) (1)
135 static inline void
136 dma_cache_sync(void *vaddr, size_t size,
137 enum dma_data_direction direction)
138 {
139 flush_write_buffers();
140 }
142 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
143 extern int
144 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
145 dma_addr_t device_addr, size_t size, int flags);
147 extern void
148 dma_release_declared_memory(struct device *dev);
150 extern void *
151 dma_mark_declared_memory_occupied(struct device *dev,
152 dma_addr_t device_addr, size_t size);
154 #endif