ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/dma-mapping.h @ 13341:3040ba0f2d3d

When booting via xm, only run the bootloader if it's in non-interactive mode:
otherwise we lose the user's named kernel and try to bootload the temporary
file pygrub returned.

Signed-off-by: John Levon <john.levon@sun.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Tue Jan 09 13:24:45 2007 +0000 (2007-01-09)
parents 3243ab41b09e
children c149ffa2fd1a
line source
1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
4 /*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
9 #include <linux/config.h>
10 #include <linux/mm.h>
11 #include <asm/cache.h>
12 #include <asm/io.h>
13 #include <asm/scatterlist.h>
14 #include <asm/swiotlb.h>
16 static inline int
17 address_needs_mapping(struct device *hwdev, dma_addr_t addr)
18 {
19 dma_addr_t mask = 0xffffffff;
20 /* If the device has a mask, use it, otherwise default to 32 bits */
21 if (hwdev && hwdev->dma_mask)
22 mask = *hwdev->dma_mask;
23 return (addr & ~mask) != 0;
24 }
26 static inline int
27 range_straddles_page_boundary(void *p, size_t size)
28 {
29 extern unsigned long *contiguous_bitmap;
30 return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
31 !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
32 }
34 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
35 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
37 void *dma_alloc_coherent(struct device *dev, size_t size,
38 dma_addr_t *dma_handle, gfp_t flag);
40 void dma_free_coherent(struct device *dev, size_t size,
41 void *vaddr, dma_addr_t dma_handle);
43 extern dma_addr_t
44 dma_map_single(struct device *dev, void *ptr, size_t size,
45 enum dma_data_direction direction);
47 extern void
48 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
49 enum dma_data_direction direction);
51 extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
52 int nents, enum dma_data_direction direction);
53 extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
54 int nents, enum dma_data_direction direction);
56 extern dma_addr_t
57 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
58 size_t size, enum dma_data_direction direction);
60 extern void
61 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
62 enum dma_data_direction direction);
64 extern void
65 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
66 enum dma_data_direction direction);
68 extern void
69 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
70 enum dma_data_direction direction);
72 static inline void
73 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
74 unsigned long offset, size_t size,
75 enum dma_data_direction direction)
76 {
77 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
78 }
80 static inline void
81 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
82 unsigned long offset, size_t size,
83 enum dma_data_direction direction)
84 {
85 dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
86 }
88 static inline void
89 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
90 enum dma_data_direction direction)
91 {
92 if (swiotlb)
93 swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
94 flush_write_buffers();
95 }
97 static inline void
98 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
99 enum dma_data_direction direction)
100 {
101 if (swiotlb)
102 swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
103 flush_write_buffers();
104 }
106 extern int
107 dma_mapping_error(dma_addr_t dma_addr);
109 extern int
110 dma_supported(struct device *dev, u64 mask);
112 static inline int
113 dma_set_mask(struct device *dev, u64 mask)
114 {
115 if(!dev->dma_mask || !dma_supported(dev, mask))
116 return -EIO;
118 *dev->dma_mask = mask;
120 return 0;
121 }
123 static inline int
124 dma_get_cache_alignment(void)
125 {
126 /* no easy way to get cache size on all x86, so return the
127 * maximum possible, to be safe */
128 return (1 << INTERNODE_CACHE_SHIFT);
129 }
131 #define dma_is_consistent(d) (1)
133 static inline void
134 dma_cache_sync(void *vaddr, size_t size,
135 enum dma_data_direction direction)
136 {
137 flush_write_buffers();
138 }
140 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
141 extern int
142 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
143 dma_addr_t device_addr, size_t size, int flags);
145 extern void
146 dma_release_declared_memory(struct device *dev);
148 extern void *
149 dma_mark_declared_memory_occupied(struct device *dev,
150 dma_addr_t device_addr, size_t size);
152 #endif