ia64/linux-2.6.18-xen.hg

view include/asm-m68knommu/io.h @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 #ifndef _M68KNOMMU_IO_H
2 #define _M68KNOMMU_IO_H
4 #ifdef __KERNEL__
7 /*
8 * These are for ISA/PCI shared memory _only_ and should never be used
9 * on any other type of memory, including Zorro memory. They are meant to
10 * access the bus in the bus byte order which is little-endian!.
11 *
12 * readX/writeX() are used to access memory mapped devices. On some
13 * architectures the memory mapped IO stuff needs to be accessed
14 * differently. On the m68k architecture, we just read/write the
15 * memory location directly.
16 */
17 /* ++roman: The assignments to temp. vars avoid that gcc sometimes generates
18 * two accesses to memory, which may be undesireable for some devices.
19 */
21 /*
22 * swap functions are sometimes needed to interface little-endian hardware
23 */
24 static inline unsigned short _swapw(volatile unsigned short v)
25 {
26 return ((v << 8) | (v >> 8));
27 }
29 static inline unsigned int _swapl(volatile unsigned long v)
30 {
31 return ((v << 24) | ((v & 0xff00) << 8) | ((v & 0xff0000) >> 8) | (v >> 24));
32 }
34 #define readb(addr) \
35 ({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; })
36 #define readw(addr) \
37 ({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; })
38 #define readl(addr) \
39 ({ unsigned int __v = (*(volatile unsigned int *) (addr)); __v; })
41 #define readb_relaxed(addr) readb(addr)
42 #define readw_relaxed(addr) readw(addr)
43 #define readl_relaxed(addr) readl(addr)
45 #define writeb(b,addr) (void)((*(volatile unsigned char *) (addr)) = (b))
46 #define writew(b,addr) (void)((*(volatile unsigned short *) (addr)) = (b))
47 #define writel(b,addr) (void)((*(volatile unsigned int *) (addr)) = (b))
49 #define __raw_readb readb
50 #define __raw_readw readw
51 #define __raw_readl readl
52 #define __raw_writeb writeb
53 #define __raw_writew writew
54 #define __raw_writel writel
56 static inline void io_outsb(unsigned int addr, void *buf, int len)
57 {
58 volatile unsigned char *ap = (volatile unsigned char *) addr;
59 unsigned char *bp = (unsigned char *) buf;
60 while (len--)
61 *ap = *bp++;
62 }
64 static inline void io_outsw(unsigned int addr, void *buf, int len)
65 {
66 volatile unsigned short *ap = (volatile unsigned short *) addr;
67 unsigned short *bp = (unsigned short *) buf;
68 while (len--)
69 *ap = _swapw(*bp++);
70 }
72 static inline void io_outsl(unsigned int addr, void *buf, int len)
73 {
74 volatile unsigned int *ap = (volatile unsigned int *) addr;
75 unsigned int *bp = (unsigned int *) buf;
76 while (len--)
77 *ap = _swapl(*bp++);
78 }
80 static inline void io_insb(unsigned int addr, void *buf, int len)
81 {
82 volatile unsigned char *ap = (volatile unsigned char *) addr;
83 unsigned char *bp = (unsigned char *) buf;
84 while (len--)
85 *bp++ = *ap;
86 }
88 static inline void io_insw(unsigned int addr, void *buf, int len)
89 {
90 volatile unsigned short *ap = (volatile unsigned short *) addr;
91 unsigned short *bp = (unsigned short *) buf;
92 while (len--)
93 *bp++ = _swapw(*ap);
94 }
96 static inline void io_insl(unsigned int addr, void *buf, int len)
97 {
98 volatile unsigned int *ap = (volatile unsigned int *) addr;
99 unsigned int *bp = (unsigned int *) buf;
100 while (len--)
101 *bp++ = _swapl(*ap);
102 }
104 #define mmiowb()
106 /*
107 * make the short names macros so specific devices
108 * can override them as required
109 */
111 #define memset_io(a,b,c) memset((void *)(a),(b),(c))
112 #define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
113 #define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
115 #define inb(addr) readb(addr)
116 #define inw(addr) readw(addr)
117 #define inl(addr) readl(addr)
118 #define outb(x,addr) ((void) writeb(x,addr))
119 #define outw(x,addr) ((void) writew(x,addr))
120 #define outl(x,addr) ((void) writel(x,addr))
122 #define inb_p(addr) inb(addr)
123 #define inw_p(addr) inw(addr)
124 #define inl_p(addr) inl(addr)
125 #define outb_p(x,addr) outb(x,addr)
126 #define outw_p(x,addr) outw(x,addr)
127 #define outl_p(x,addr) outl(x,addr)
129 #define outsb(a,b,l) io_outsb(a,b,l)
130 #define outsw(a,b,l) io_outsw(a,b,l)
131 #define outsl(a,b,l) io_outsl(a,b,l)
133 #define insb(a,b,l) io_insb(a,b,l)
134 #define insw(a,b,l) io_insw(a,b,l)
135 #define insl(a,b,l) io_insl(a,b,l)
137 #define IO_SPACE_LIMIT 0xffff
140 /* Values for nocacheflag and cmode */
141 #define IOMAP_FULL_CACHING 0
142 #define IOMAP_NOCACHE_SER 1
143 #define IOMAP_NOCACHE_NONSER 2
144 #define IOMAP_WRITETHROUGH 3
146 extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag);
147 extern void __iounmap(void *addr, unsigned long size);
149 static inline void *ioremap(unsigned long physaddr, unsigned long size)
150 {
151 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
152 }
153 static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size)
154 {
155 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
156 }
157 static inline void *ioremap_writethrough(unsigned long physaddr, unsigned long size)
158 {
159 return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
160 }
161 static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size)
162 {
163 return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
164 }
166 extern void iounmap(void *addr);
168 /* Nothing to do */
170 #define dma_cache_inv(_start,_size) do { } while (0)
171 #define dma_cache_wback(_start,_size) do { } while (0)
172 #define dma_cache_wback_inv(_start,_size) do { } while (0)
174 /* Pages to physical address... */
175 #define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
176 #define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT)
178 /*
179 * Macros used for converting between virtual and physical mappings.
180 */
181 #define mm_ptov(vaddr) ((void *) (vaddr))
182 #define mm_vtop(vaddr) ((unsigned long) (vaddr))
183 #define phys_to_virt(vaddr) ((void *) (vaddr))
184 #define virt_to_phys(vaddr) ((unsigned long) (vaddr))
186 #define virt_to_bus virt_to_phys
187 #define bus_to_virt phys_to_virt
189 /*
190 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
191 * access
192 */
193 #define xlate_dev_mem_ptr(p) __va(p)
195 /*
196 * Convert a virtual cached pointer to an uncached pointer
197 */
198 #define xlate_dev_kmem_ptr(p) p
200 #endif /* __KERNEL__ */
202 #endif /* _M68KNOMMU_IO_H */