ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-ia64/machvec.h @ 9762:a3cc276f2e87

[IA64] dma paravirtualization

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@localhost
date Tue Apr 25 16:53:27 2006 -0600 (2006-04-25)
parents 70467f5491d8
children c3e20511c745
line source
1 /*
2 * Machine vector for IA-64.
3 *
4 * Copyright (C) 1999 Silicon Graphics, Inc.
5 * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
6 * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
7 * Copyright (C) 1999-2001, 2003-2004 Hewlett-Packard Co.
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 */
10 #ifndef _ASM_IA64_MACHVEC_H
11 #define _ASM_IA64_MACHVEC_H
13 #include <linux/config.h>
14 #include <linux/types.h>
16 /* forward declarations: */
17 struct device;
18 struct pt_regs;
19 struct scatterlist;
20 struct page;
21 struct mm_struct;
22 struct pci_bus;
24 typedef void ia64_mv_setup_t (char **);
25 typedef void ia64_mv_cpu_init_t (void);
26 typedef void ia64_mv_irq_init_t (void);
27 typedef void ia64_mv_send_ipi_t (int, int, int, int);
28 typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *);
29 typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long);
30 typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
31 typedef unsigned int ia64_mv_local_vector_to_irq (u8);
32 typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
33 typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val,
34 u8 size);
35 typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
36 u8 size);
38 /* DMA-mapping interface: */
39 typedef void ia64_mv_dma_init (void);
40 typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t);
41 typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
42 typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
43 typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
44 typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int);
45 typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int);
46 typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int);
47 typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
48 typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
49 typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
50 typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
51 typedef int ia64_mv_dma_supported (struct device *, u64);
53 /*
54 * WARNING: The legacy I/O space is _architected_. Platforms are
55 * expected to follow this architected model (see Section 10.7 in the
56 * IA-64 Architecture Software Developer's Manual). Unfortunately,
57 * some broken machines do not follow that model, which is why we have
58 * to make the inX/outX operations part of the machine vector.
59 * Platform designers should follow the architected model whenever
60 * possible.
61 */
62 typedef unsigned int ia64_mv_inb_t (unsigned long);
63 typedef unsigned int ia64_mv_inw_t (unsigned long);
64 typedef unsigned int ia64_mv_inl_t (unsigned long);
65 typedef void ia64_mv_outb_t (unsigned char, unsigned long);
66 typedef void ia64_mv_outw_t (unsigned short, unsigned long);
67 typedef void ia64_mv_outl_t (unsigned int, unsigned long);
68 typedef void ia64_mv_mmiowb_t (void);
69 typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *);
70 typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *);
71 typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *);
72 typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *);
73 typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *);
74 typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
75 typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
76 typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
78 static inline void
79 machvec_noop (void)
80 {
81 }
83 static inline void
84 machvec_noop_mm (struct mm_struct *mm)
85 {
86 }
88 extern void machvec_setup (char **);
89 extern void machvec_timer_interrupt (int, void *, struct pt_regs *);
90 extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
91 extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int);
92 extern void machvec_tlb_migrate_finish (struct mm_struct *);
94 # if defined (CONFIG_IA64_HP_SIM)
95 # include <asm/machvec_hpsim.h>
96 # elif defined (CONFIG_IA64_DIG)
97 # include <asm/machvec_dig.h>
98 # elif defined (CONFIG_IA64_HP_ZX1)
99 # include <asm/machvec_hpzx1.h>
100 # elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
101 # include <asm/machvec_hpzx1_swiotlb.h>
102 # elif defined (CONFIG_IA64_SGI_SN2)
103 # include <asm/machvec_sn2.h>
104 # elif defined (CONFIG_IA64_GENERIC)
106 # ifdef MACHVEC_PLATFORM_HEADER
107 # include MACHVEC_PLATFORM_HEADER
108 # else
109 # define platform_name ia64_mv.name
110 # define platform_setup ia64_mv.setup
111 # define platform_cpu_init ia64_mv.cpu_init
112 # define platform_irq_init ia64_mv.irq_init
113 # define platform_send_ipi ia64_mv.send_ipi
114 # define platform_timer_interrupt ia64_mv.timer_interrupt
115 # define platform_global_tlb_purge ia64_mv.global_tlb_purge
116 # define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
117 # define platform_dma_init ia64_mv.dma_init
118 # define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent
119 # define platform_dma_free_coherent ia64_mv.dma_free_coherent
120 # define platform_dma_map_single ia64_mv.dma_map_single
121 # define platform_dma_unmap_single ia64_mv.dma_unmap_single
122 # define platform_dma_map_sg ia64_mv.dma_map_sg
123 # define platform_dma_unmap_sg ia64_mv.dma_unmap_sg
124 # define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
125 # define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
126 # define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
127 # define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
128 # define platform_dma_mapping_error ia64_mv.dma_mapping_error
129 # define platform_dma_supported ia64_mv.dma_supported
130 # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
131 # define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
132 # define platform_pci_legacy_read ia64_mv.pci_legacy_read
133 # define platform_pci_legacy_write ia64_mv.pci_legacy_write
134 # define platform_inb ia64_mv.inb
135 # define platform_inw ia64_mv.inw
136 # define platform_inl ia64_mv.inl
137 # define platform_outb ia64_mv.outb
138 # define platform_outw ia64_mv.outw
139 # define platform_outl ia64_mv.outl
140 # define platform_mmiowb ia64_mv.mmiowb
141 # define platform_readb ia64_mv.readb
142 # define platform_readw ia64_mv.readw
143 # define platform_readl ia64_mv.readl
144 # define platform_readq ia64_mv.readq
145 # define platform_readb_relaxed ia64_mv.readb_relaxed
146 # define platform_readw_relaxed ia64_mv.readw_relaxed
147 # define platform_readl_relaxed ia64_mv.readl_relaxed
148 # define platform_readq_relaxed ia64_mv.readq_relaxed
149 # endif
151 /* __attribute__((__aligned__(16))) is required to make size of the
152 * structure multiple of 16 bytes.
153 * This will fillup the holes created because of section 3.3.1 in
154 * Software Conventions guide.
155 */
156 struct ia64_machine_vector {
157 const char *name;
158 ia64_mv_setup_t *setup;
159 ia64_mv_cpu_init_t *cpu_init;
160 ia64_mv_irq_init_t *irq_init;
161 ia64_mv_send_ipi_t *send_ipi;
162 ia64_mv_timer_interrupt_t *timer_interrupt;
163 ia64_mv_global_tlb_purge_t *global_tlb_purge;
164 ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
165 ia64_mv_dma_init *dma_init;
166 ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
167 ia64_mv_dma_free_coherent *dma_free_coherent;
168 ia64_mv_dma_map_single *dma_map_single;
169 ia64_mv_dma_unmap_single *dma_unmap_single;
170 ia64_mv_dma_map_sg *dma_map_sg;
171 ia64_mv_dma_unmap_sg *dma_unmap_sg;
172 ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
173 ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
174 ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
175 ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
176 ia64_mv_dma_mapping_error *dma_mapping_error;
177 ia64_mv_dma_supported *dma_supported;
178 ia64_mv_local_vector_to_irq *local_vector_to_irq;
179 ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
180 ia64_mv_pci_legacy_read_t *pci_legacy_read;
181 ia64_mv_pci_legacy_write_t *pci_legacy_write;
182 ia64_mv_inb_t *inb;
183 ia64_mv_inw_t *inw;
184 ia64_mv_inl_t *inl;
185 ia64_mv_outb_t *outb;
186 ia64_mv_outw_t *outw;
187 ia64_mv_outl_t *outl;
188 ia64_mv_mmiowb_t *mmiowb;
189 ia64_mv_readb_t *readb;
190 ia64_mv_readw_t *readw;
191 ia64_mv_readl_t *readl;
192 ia64_mv_readq_t *readq;
193 ia64_mv_readb_relaxed_t *readb_relaxed;
194 ia64_mv_readw_relaxed_t *readw_relaxed;
195 ia64_mv_readl_relaxed_t *readl_relaxed;
196 ia64_mv_readq_relaxed_t *readq_relaxed;
197 } __attribute__((__aligned__(16))); /* align attrib? see above comment */
199 #define MACHVEC_INIT(name) \
200 { \
201 #name, \
202 platform_setup, \
203 platform_cpu_init, \
204 platform_irq_init, \
205 platform_send_ipi, \
206 platform_timer_interrupt, \
207 platform_global_tlb_purge, \
208 platform_tlb_migrate_finish, \
209 platform_dma_init, \
210 platform_dma_alloc_coherent, \
211 platform_dma_free_coherent, \
212 platform_dma_map_single, \
213 platform_dma_unmap_single, \
214 platform_dma_map_sg, \
215 platform_dma_unmap_sg, \
216 platform_dma_sync_single_for_cpu, \
217 platform_dma_sync_sg_for_cpu, \
218 platform_dma_sync_single_for_device, \
219 platform_dma_sync_sg_for_device, \
220 platform_dma_mapping_error, \
221 platform_dma_supported, \
222 platform_local_vector_to_irq, \
223 platform_pci_get_legacy_mem, \
224 platform_pci_legacy_read, \
225 platform_pci_legacy_write, \
226 platform_inb, \
227 platform_inw, \
228 platform_inl, \
229 platform_outb, \
230 platform_outw, \
231 platform_outl, \
232 platform_mmiowb, \
233 platform_readb, \
234 platform_readw, \
235 platform_readl, \
236 platform_readq, \
237 platform_readb_relaxed, \
238 platform_readw_relaxed, \
239 platform_readl_relaxed, \
240 platform_readq_relaxed, \
241 }
243 extern struct ia64_machine_vector ia64_mv;
244 extern void machvec_init (const char *name);
246 # else
247 # error Unknown configuration. Update asm-ia64/machvec.h.
248 # endif /* CONFIG_IA64_GENERIC */
250 #ifdef CONFIG_XEN_IA64_DOM0_VP
251 # define platform_dma_map_sg dma_map_sg
252 # define platform_dma_unmap_sg dma_unmap_sg
253 # define platform_dma_mapping_error dma_mapping_error
254 # define platform_dma_supported dma_supported
255 # define platform_dma_alloc_coherent dma_alloc_coherent
256 # define platform_dma_free_coherent dma_free_coherent
257 # define platform_dma_map_single dma_map_single
258 # define platform_dma_unmap_single dma_unmap_single
259 # define platform_dma_sync_single_for_cpu \
260 dma_sync_single_for_cpu
261 # define platform_dma_sync_single_for_device \
262 dma_sync_single_for_device
263 #endif
265 /*
266 * Declare default routines which aren't declared anywhere else:
267 */
268 extern ia64_mv_dma_init swiotlb_init;
269 extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent;
270 extern ia64_mv_dma_free_coherent swiotlb_free_coherent;
271 extern ia64_mv_dma_map_single swiotlb_map_single;
272 extern ia64_mv_dma_unmap_single swiotlb_unmap_single;
273 extern ia64_mv_dma_map_sg swiotlb_map_sg;
274 extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg;
275 extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu;
276 extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu;
277 extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
278 extern ia64_mv_dma_sync_sg_for_device swiotlb_sync_sg_for_device;
279 extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error;
280 extern ia64_mv_dma_supported swiotlb_dma_supported;
282 /*
283 * Define default versions so we can extend machvec for new platforms without having
284 * to update the machvec files for all existing platforms.
285 */
286 #ifndef platform_setup
287 # define platform_setup machvec_setup
288 #endif
289 #ifndef platform_cpu_init
290 # define platform_cpu_init machvec_noop
291 #endif
292 #ifndef platform_irq_init
293 # define platform_irq_init machvec_noop
294 #endif
296 #ifndef platform_send_ipi
297 # define platform_send_ipi ia64_send_ipi /* default to architected version */
298 #endif
299 #ifndef platform_timer_interrupt
300 # define platform_timer_interrupt machvec_timer_interrupt
301 #endif
302 #ifndef platform_global_tlb_purge
303 # define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */
304 #endif
305 #ifndef platform_tlb_migrate_finish
306 # define platform_tlb_migrate_finish machvec_noop_mm
307 #endif
308 #ifndef platform_dma_init
309 # define platform_dma_init swiotlb_init
310 #endif
311 #ifndef platform_dma_alloc_coherent
312 # define platform_dma_alloc_coherent swiotlb_alloc_coherent
313 #endif
314 #ifndef platform_dma_free_coherent
315 # define platform_dma_free_coherent swiotlb_free_coherent
316 #endif
317 #ifndef platform_dma_map_single
318 # define platform_dma_map_single swiotlb_map_single
319 #endif
320 #ifndef platform_dma_unmap_single
321 # define platform_dma_unmap_single swiotlb_unmap_single
322 #endif
323 #ifndef platform_dma_map_sg
324 # define platform_dma_map_sg swiotlb_map_sg
325 #endif
326 #ifndef platform_dma_unmap_sg
327 # define platform_dma_unmap_sg swiotlb_unmap_sg
328 #endif
329 #ifndef platform_dma_sync_single_for_cpu
330 # define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu
331 #endif
332 #ifndef platform_dma_sync_sg_for_cpu
333 # define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu
334 #endif
335 #ifndef platform_dma_sync_single_for_device
336 # define platform_dma_sync_single_for_device swiotlb_sync_single_for_device
337 #endif
338 #ifndef platform_dma_sync_sg_for_device
339 # define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device
340 #endif
341 #ifndef platform_dma_mapping_error
342 # define platform_dma_mapping_error swiotlb_dma_mapping_error
343 #endif
344 #ifndef platform_dma_supported
345 # define platform_dma_supported swiotlb_dma_supported
346 #endif
347 #ifndef platform_local_vector_to_irq
348 # define platform_local_vector_to_irq __ia64_local_vector_to_irq
349 #endif
350 #ifndef platform_pci_get_legacy_mem
351 # define platform_pci_get_legacy_mem ia64_pci_get_legacy_mem
352 #endif
353 #ifndef platform_pci_legacy_read
354 # define platform_pci_legacy_read ia64_pci_legacy_read
355 #endif
356 #ifndef platform_pci_legacy_write
357 # define platform_pci_legacy_write ia64_pci_legacy_write
358 #endif
359 #ifndef platform_inb
360 # define platform_inb __ia64_inb
361 #endif
362 #ifndef platform_inw
363 # define platform_inw __ia64_inw
364 #endif
365 #ifndef platform_inl
366 # define platform_inl __ia64_inl
367 #endif
368 #ifndef platform_outb
369 # define platform_outb __ia64_outb
370 #endif
371 #ifndef platform_outw
372 # define platform_outw __ia64_outw
373 #endif
374 #ifndef platform_outl
375 # define platform_outl __ia64_outl
376 #endif
377 #ifndef platform_mmiowb
378 # define platform_mmiowb __ia64_mmiowb
379 #endif
380 #ifndef platform_readb
381 # define platform_readb __ia64_readb
382 #endif
383 #ifndef platform_readw
384 # define platform_readw __ia64_readw
385 #endif
386 #ifndef platform_readl
387 # define platform_readl __ia64_readl
388 #endif
389 #ifndef platform_readq
390 # define platform_readq __ia64_readq
391 #endif
392 #ifndef platform_readb_relaxed
393 # define platform_readb_relaxed __ia64_readb_relaxed
394 #endif
395 #ifndef platform_readw_relaxed
396 # define platform_readw_relaxed __ia64_readw_relaxed
397 #endif
398 #ifndef platform_readl_relaxed
399 # define platform_readl_relaxed __ia64_readl_relaxed
400 #endif
401 #ifndef platform_readq_relaxed
402 # define platform_readq_relaxed __ia64_readq_relaxed
403 #endif
405 #endif /* _ASM_IA64_MACHVEC_H */