ia64/xen-unstable

view xen/include/xen/byteorder/swab.h @ 19800:78962f85c562

IOMMU: Add two generic functions to vendor neutral interface

Add 2 generic functions into the vendor neutral iommu interface, The
reason is that from changeset 19732, there is only one global flag
"iommu_enabled" that controls iommu enablement for both vtd and amd
systems, so we need different code paths for vtd and amd iommu systems
if this flag has been turned on. Also, the early checking of
"iommu_enabled" in iommu_setup() is removed to prevent iommu
functionalities from been disabled on amd systems.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 19 08:41:50 2009 +0100 (2009-06-19)
parents fe76b80d081a
children
line source
1 #ifndef __XEN_BYTEORDER_SWAB_H__
2 #define __XEN_BYTEORDER_SWAB_H__
4 /*
5 * Byte-swapping, independently from CPU endianness
6 * swabXX[ps]?(foo)
7 *
8 * Francois-Rene Rideau <fare@tunes.org> 19971205
9 * separated swab functions from cpu_to_XX,
10 * to clean up support for bizarre-endian architectures.
11 */
13 #include <xen/compiler.h>
15 /* casts are necessary for constants, because we never know how for sure
16 * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
17 */
18 #define ___swab16(x) \
19 ({ \
20 __u16 __x = (x); \
21 ((__u16)( \
22 (((__u16)(__x) & (__u16)0x00ffU) << 8) | \
23 (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
24 })
26 #define ___swab32(x) \
27 ({ \
28 __u32 __x = (x); \
29 ((__u32)( \
30 (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
31 (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \
32 (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \
33 (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
34 })
36 #define ___swab64(x) \
37 ({ \
38 __u64 __x = (x); \
39 ((__u64)( \
40 (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
41 (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
42 (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
43 (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \
44 (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \
45 (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
46 (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
47 (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
48 })
50 #define ___constant_swab16(x) \
51 ((__u16)( \
52 (((__u16)(x) & (__u16)0x00ffU) << 8) | \
53 (((__u16)(x) & (__u16)0xff00U) >> 8) ))
54 #define ___constant_swab32(x) \
55 ((__u32)( \
56 (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
57 (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
58 (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
59 (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
60 #define ___constant_swab64(x) \
61 ((__u64)( \
62 (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
63 (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
64 (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
65 (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
66 (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
67 (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
68 (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
69 (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
71 /*
72 * provide defaults when no architecture-specific optimization is detected
73 */
74 #ifndef __arch__swab16
75 # define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
76 #endif
77 #ifndef __arch__swab32
78 # define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
79 #endif
80 #ifndef __arch__swab64
81 # define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
82 #endif
84 #ifndef __arch__swab16p
85 # define __arch__swab16p(x) __arch__swab16(*(x))
86 #endif
87 #ifndef __arch__swab32p
88 # define __arch__swab32p(x) __arch__swab32(*(x))
89 #endif
90 #ifndef __arch__swab64p
91 # define __arch__swab64p(x) __arch__swab64(*(x))
92 #endif
94 #ifndef __arch__swab16s
95 # define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
96 #endif
97 #ifndef __arch__swab32s
98 # define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
99 #endif
100 #ifndef __arch__swab64s
101 # define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
102 #endif
105 /*
106 * Allow constant folding
107 */
108 #if defined(__GNUC__) && defined(__OPTIMIZE__)
109 # define __swab16(x) \
110 (__builtin_constant_p((__u16)(x)) ? \
111 ___swab16((x)) : \
112 __fswab16((x)))
113 # define __swab32(x) \
114 (__builtin_constant_p((__u32)(x)) ? \
115 ___swab32((x)) : \
116 __fswab32((x)))
117 # define __swab64(x) \
118 (__builtin_constant_p((__u64)(x)) ? \
119 ___swab64((x)) : \
120 __fswab64((x)))
121 #else
122 # define __swab16(x) __fswab16(x)
123 # define __swab32(x) __fswab32(x)
124 # define __swab64(x) __fswab64(x)
125 #endif /* OPTIMIZE */
128 static inline __attribute_const__ __u16 __fswab16(__u16 x)
129 {
130 return __arch__swab16(x);
131 }
132 static inline __u16 __swab16p(const __u16 *x)
133 {
134 return __arch__swab16p(x);
135 }
136 static inline void __swab16s(__u16 *addr)
137 {
138 __arch__swab16s(addr);
139 }
141 static inline __attribute_const__ __u32 __fswab32(__u32 x)
142 {
143 return __arch__swab32(x);
144 }
145 static inline __u32 __swab32p(const __u32 *x)
146 {
147 return __arch__swab32p(x);
148 }
149 static inline void __swab32s(__u32 *addr)
150 {
151 __arch__swab32s(addr);
152 }
154 #ifdef __BYTEORDER_HAS_U64__
155 static inline __attribute_const__ __u64 __fswab64(__u64 x)
156 {
157 # ifdef __SWAB_64_THRU_32__
158 __u32 h = x >> 32;
159 __u32 l = x & ((1ULL<<32)-1);
160 return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
161 # else
162 return __arch__swab64(x);
163 # endif
164 }
165 static inline __u64 __swab64p(const __u64 *x)
166 {
167 return __arch__swab64p(x);
168 }
169 static inline void __swab64s(__u64 *addr)
170 {
171 __arch__swab64s(addr);
172 }
173 #endif /* __BYTEORDER_HAS_U64__ */
175 #define swab16 __swab16
176 #define swab32 __swab32
177 #define swab64 __swab64
178 #define swab16p __swab16p
179 #define swab32p __swab32p
180 #define swab64p __swab64p
181 #define swab16s __swab16s
182 #define swab32s __swab32s
183 #define swab64s __swab64s
185 #endif /* __XEN_BYTEORDER_SWAB_H__ */