ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-ia64/dma-mapping.h @ 9762:a3cc276f2e87

[IA64] dma paravirtualization

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@localhost
date Tue Apr 25 16:53:27 2006 -0600 (2006-04-25)
parents ebec4edfa8e4
children d8d2b5c08245
line source
1 #ifndef _ASM_IA64_DMA_MAPPING_H
2 #define _ASM_IA64_DMA_MAPPING_H
4 /*
5 * Copyright (C) 2003-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8 #include <linux/config.h>
9 #include <asm/machvec.h>
10 #ifdef CONFIG_XEN_IA64_DOM0_VP
11 #include <asm/hypervisor.h> //XXX to compile arch/i386/kernel/swiotlb.c
12 // and arch/i386/kernel/pci-dma-xen.c
13 #include <asm-i386/mach-xen/asm/swiotlb.h> //XXX to compile arch/i386/kernel/swiotlb.c
14 #endif
16 #ifndef CONFIG_XEN_IA64_DOM0_VP
17 #define dma_alloc_coherent platform_dma_alloc_coherent
18 #define dma_alloc_noncoherent platform_dma_alloc_coherent /* coherent mem. is cheap */
19 #define dma_free_coherent platform_dma_free_coherent
20 #define dma_free_noncoherent platform_dma_free_coherent
21 #define dma_map_single platform_dma_map_single
22 #define dma_map_sg platform_dma_map_sg
23 #define dma_unmap_single platform_dma_unmap_single
24 #define dma_unmap_sg platform_dma_unmap_sg
25 #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
26 #define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
27 #define dma_sync_single_for_device platform_dma_sync_single_for_device
28 #define dma_sync_sg_for_device platform_dma_sync_sg_for_device
29 #define dma_mapping_error platform_dma_mapping_error
30 #else
31 int dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
32 enum dma_data_direction direction);
33 void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
34 enum dma_data_direction direction);
35 int dma_supported(struct device *dev, u64 mask);
36 void *dma_alloc_coherent(struct device *dev, size_t size,
37 dma_addr_t *dma_handle, gfp_t gfp);
38 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
39 dma_addr_t dma_handle);
40 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
41 enum dma_data_direction direction);
42 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
43 enum dma_data_direction direction);
44 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
45 size_t size, enum dma_data_direction direction);
46 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
47 size_t size,
48 enum dma_data_direction direction);
49 int dma_mapping_error(dma_addr_t dma_addr);
51 #define flush_write_buffers() do { } while (0)
52 static inline void
53 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
54 enum dma_data_direction direction)
55 {
56 if (swiotlb)
57 swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
58 flush_write_buffers();
59 }
61 static inline void
62 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
63 enum dma_data_direction direction)
64 {
65 if (swiotlb)
66 swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
67 flush_write_buffers();
68 }
69 #endif
71 #define dma_map_page(dev, pg, off, size, dir) \
72 dma_map_single(dev, page_address(pg) + (off), (size), (dir))
73 #define dma_unmap_page(dev, dma_addr, size, dir) \
74 dma_unmap_single(dev, dma_addr, size, dir)
76 /*
77 * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
78 * See Documentation/DMA-API.txt for details.
79 */
81 #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
82 dma_sync_single_for_cpu(dev, dma_handle, size, dir)
83 #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
84 dma_sync_single_for_device(dev, dma_handle, size, dir)
86 #define dma_supported platform_dma_supported
88 static inline int
89 dma_set_mask (struct device *dev, u64 mask)
90 {
91 if (!dev->dma_mask || !dma_supported(dev, mask))
92 return -EIO;
93 *dev->dma_mask = mask;
94 return 0;
95 }
97 extern int dma_get_cache_alignment(void);
99 static inline void
100 dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
101 {
102 /*
103 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
104 * ensure that dma_cache_sync() enforces order, hence the mb().
105 */
106 mb();
107 }
109 #define dma_is_consistent(dma_handle) (1) /* all we do is coherent memory... */
111 #ifdef CONFIG_XEN_IA64_DOM0_VP
112 // arch/i386/kernel/swiotlb.o requires
113 void contiguous_bitmap_init(unsigned long end_pfn);
115 static inline int
116 address_needs_mapping(struct device *hwdev, dma_addr_t addr)
117 {
118 dma_addr_t mask = DMA_64BIT_MASK;
119 /* If the device has a mask, use it, otherwise default to 64 bits */
120 if (hwdev && hwdev->dma_mask)
121 mask = *hwdev->dma_mask;
122 return (addr & ~mask) != 0;
123 }
125 static inline int
126 range_straddles_page_boundary(void *p, size_t size)
127 {
128 extern unsigned long *contiguous_bitmap;
129 return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
130 !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
131 }
132 #else
133 #define contiguous_bitmap_init(end_pfn) ((void)end_pfn)
134 #endif
136 #endif /* _ASM_IA64_DMA_MAPPING_H */