ia64/linux-2.6.18-xen.hg

view include/asm-sh64/dma-mapping.h @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 #ifndef __ASM_SH_DMA_MAPPING_H
2 #define __ASM_SH_DMA_MAPPING_H
4 #include <linux/mm.h>
5 #include <asm/scatterlist.h>
6 #include <asm/io.h>
8 struct pci_dev;
9 extern void *consistent_alloc(struct pci_dev *hwdev, size_t size,
10 dma_addr_t *dma_handle);
11 extern void consistent_free(struct pci_dev *hwdev, size_t size,
12 void *vaddr, dma_addr_t dma_handle);
14 #define dma_supported(dev, mask) (1)
16 static inline int dma_set_mask(struct device *dev, u64 mask)
17 {
18 if (!dev->dma_mask || !dma_supported(dev, mask))
19 return -EIO;
21 *dev->dma_mask = mask;
23 return 0;
24 }
26 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
27 dma_addr_t *dma_handle, gfp_t flag)
28 {
29 return consistent_alloc(NULL, size, dma_handle);
30 }
32 static inline void dma_free_coherent(struct device *dev, size_t size,
33 void *vaddr, dma_addr_t dma_handle)
34 {
35 consistent_free(NULL, size, vaddr, dma_handle);
36 }
38 static inline void dma_cache_sync(void *vaddr, size_t size,
39 enum dma_data_direction dir)
40 {
41 dma_cache_wback_inv((unsigned long)vaddr, size);
42 }
44 static inline dma_addr_t dma_map_single(struct device *dev,
45 void *ptr, size_t size,
46 enum dma_data_direction dir)
47 {
48 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
49 if (dev->bus == &pci_bus_type)
50 return virt_to_bus(ptr);
51 #endif
52 dma_cache_sync(ptr, size, dir);
54 return virt_to_bus(ptr);
55 }
57 #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
59 static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
60 int nents, enum dma_data_direction dir)
61 {
62 int i;
64 for (i = 0; i < nents; i++) {
65 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
66 dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
67 sg[i].length, dir);
68 #endif
69 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
70 }
72 return nents;
73 }
75 #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
77 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
78 unsigned long offset, size_t size,
79 enum dma_data_direction dir)
80 {
81 return dma_map_single(dev, page_address(page) + offset, size, dir);
82 }
84 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
85 size_t size, enum dma_data_direction dir)
86 {
87 dma_unmap_single(dev, dma_address, size, dir);
88 }
90 static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
91 size_t size, enum dma_data_direction dir)
92 {
93 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
94 if (dev->bus == &pci_bus_type)
95 return;
96 #endif
97 dma_cache_sync(bus_to_virt(dma_handle), size, dir);
98 }
100 static inline void dma_sync_single_range(struct device *dev,
101 dma_addr_t dma_handle,
102 unsigned long offset, size_t size,
103 enum dma_data_direction dir)
104 {
105 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
106 if (dev->bus == &pci_bus_type)
107 return;
108 #endif
109 dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir);
110 }
112 static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
113 int nelems, enum dma_data_direction dir)
114 {
115 int i;
117 for (i = 0; i < nelems; i++) {
118 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
119 dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
120 sg[i].length, dir);
121 #endif
122 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
123 }
124 }
126 static inline void dma_sync_single_for_cpu(struct device *dev,
127 dma_addr_t dma_handle, size_t size,
128 enum dma_data_direction dir)
129 {
130 dma_sync_single(dev, dma_handle, size, dir);
131 }
133 static inline void dma_sync_single_for_device(struct device *dev,
134 dma_addr_t dma_handle, size_t size,
135 enum dma_data_direction dir)
136 {
137 dma_sync_single(dev, dma_handle, size, dir);
138 }
140 static inline void dma_sync_sg_for_cpu(struct device *dev,
141 struct scatterlist *sg, int nelems,
142 enum dma_data_direction dir)
143 {
144 dma_sync_sg(dev, sg, nelems, dir);
145 }
147 static inline void dma_sync_sg_for_device(struct device *dev,
148 struct scatterlist *sg, int nelems,
149 enum dma_data_direction dir)
150 {
151 dma_sync_sg(dev, sg, nelems, dir);
152 }
154 static inline int dma_get_cache_alignment(void)
155 {
156 /*
157 * Each processor family will define its own L1_CACHE_SHIFT,
158 * L1_CACHE_BYTES wraps to this, so this is always safe.
159 */
160 return L1_CACHE_BYTES;
161 }
163 static inline int dma_mapping_error(dma_addr_t dma_addr)
164 {
165 return dma_addr == 0;
166 }
168 #endif /* __ASM_SH_DMA_MAPPING_H */