ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/dma-mapping.h @ 14100:e47738923a05

[LINUX] Purge include <linux/config.h>. It has been obsolete for some time now.

Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
author Ian Campbell <ian.campbell@xensource.com>
date Fri Feb 23 16:56:45 2007 +0000 (2007-02-23)
parents c149ffa2fd1a
children
line source
1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
4 /*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
9 #include <linux/mm.h>
10 #include <asm/cache.h>
11 #include <asm/io.h>
12 #include <asm/scatterlist.h>
13 #include <asm/swiotlb.h>
15 static inline int
16 address_needs_mapping(struct device *hwdev, dma_addr_t addr)
17 {
18 dma_addr_t mask = 0xffffffff;
19 /* If the device has a mask, use it, otherwise default to 32 bits */
20 if (hwdev && hwdev->dma_mask)
21 mask = *hwdev->dma_mask;
22 return (addr & ~mask) != 0;
23 }
25 static inline int
26 range_straddles_page_boundary(void *p, size_t size)
27 {
28 extern unsigned long *contiguous_bitmap;
29 return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
30 !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
31 }
33 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
34 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
36 void *dma_alloc_coherent(struct device *dev, size_t size,
37 dma_addr_t *dma_handle, gfp_t flag);
39 void dma_free_coherent(struct device *dev, size_t size,
40 void *vaddr, dma_addr_t dma_handle);
42 extern dma_addr_t
43 dma_map_single(struct device *dev, void *ptr, size_t size,
44 enum dma_data_direction direction);
46 extern void
47 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
48 enum dma_data_direction direction);
50 extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
51 int nents, enum dma_data_direction direction);
52 extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
53 int nents, enum dma_data_direction direction);
55 #ifdef CONFIG_HIGHMEM
56 extern dma_addr_t
57 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
58 size_t size, enum dma_data_direction direction);
60 extern void
61 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
62 enum dma_data_direction direction);
63 #else
64 #define dma_map_page(dev, page, offset, size, dir) \
65 dma_map_single(dev, page_address(page) + (offset), (size), (dir))
66 #define dma_unmap_page dma_unmap_single
67 #endif
69 extern void
70 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
71 enum dma_data_direction direction);
73 extern void
74 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
75 enum dma_data_direction direction);
77 static inline void
78 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
79 unsigned long offset, size_t size,
80 enum dma_data_direction direction)
81 {
82 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
83 }
85 static inline void
86 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
87 unsigned long offset, size_t size,
88 enum dma_data_direction direction)
89 {
90 dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
91 }
93 static inline void
94 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
95 enum dma_data_direction direction)
96 {
97 if (swiotlb)
98 swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
99 flush_write_buffers();
100 }
102 static inline void
103 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
104 enum dma_data_direction direction)
105 {
106 if (swiotlb)
107 swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
108 flush_write_buffers();
109 }
111 extern int
112 dma_mapping_error(dma_addr_t dma_addr);
114 extern int
115 dma_supported(struct device *dev, u64 mask);
117 static inline int
118 dma_set_mask(struct device *dev, u64 mask)
119 {
120 if(!dev->dma_mask || !dma_supported(dev, mask))
121 return -EIO;
123 *dev->dma_mask = mask;
125 return 0;
126 }
128 static inline int
129 dma_get_cache_alignment(void)
130 {
131 /* no easy way to get cache size on all x86, so return the
132 * maximum possible, to be safe */
133 return (1 << INTERNODE_CACHE_SHIFT);
134 }
136 #define dma_is_consistent(d) (1)
138 static inline void
139 dma_cache_sync(void *vaddr, size_t size,
140 enum dma_data_direction direction)
141 {
142 flush_write_buffers();
143 }
145 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
146 extern int
147 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
148 dma_addr_t device_addr, size_t size, int flags);
150 extern void
151 dma_release_declared_memory(struct device *dev);
153 extern void *
154 dma_mark_declared_memory_occupied(struct device *dev,
155 dma_addr_t device_addr, size_t size);
157 #endif