direct-io.hg

view xen/include/asm-x86/system.h @ 15516:fa9fa5f98c91

Simplify Unisys ES7000 code in light of the fact we do not support
legacy boxes with very non-standard APIC setup.
From: Raj Subrahmanian <raj.subrahmanian@unisys.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Tue Jul 10 10:07:00 2007 +0100 (2007-07-10)
parents 3e2d3d737624
children d7e3224b661a
line source
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
4 #include <xen/config.h>
5 #include <xen/types.h>
6 #include <asm/bitops.h>
8 #define read_segment_register(name) \
9 ({ u16 __sel; \
10 __asm__ __volatile__ ( "movw %%" STR(name) ",%0" : "=r" (__sel) ); \
11 __sel; \
12 })
14 #define wbinvd() \
15 __asm__ __volatile__ ("wbinvd": : :"memory");
17 #define nop() __asm__ __volatile__ ("nop")
19 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
21 struct __xchg_dummy { unsigned long a[100]; };
22 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
25 /*
26 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
27 * Note 2: xchg has side effect, so that attribute volatile is necessary,
28 * but generally the primitive is invalid, *ptr is output argument. --ANK
29 */
30 static always_inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
31 {
32 switch (size) {
33 case 1:
34 __asm__ __volatile__("xchgb %b0,%1"
35 :"=q" (x)
36 :"m" (*__xg((volatile void *)ptr)), "0" (x)
37 :"memory");
38 break;
39 case 2:
40 __asm__ __volatile__("xchgw %w0,%1"
41 :"=r" (x)
42 :"m" (*__xg((volatile void *)ptr)), "0" (x)
43 :"memory");
44 break;
45 #if defined(__i386__)
46 case 4:
47 __asm__ __volatile__("xchgl %0,%1"
48 :"=r" (x)
49 :"m" (*__xg((volatile void *)ptr)), "0" (x)
50 :"memory");
51 break;
52 #elif defined(__x86_64__)
53 case 4:
54 __asm__ __volatile__("xchgl %k0,%1"
55 :"=r" (x)
56 :"m" (*__xg((volatile void *)ptr)), "0" (x)
57 :"memory");
58 break;
59 case 8:
60 __asm__ __volatile__("xchgq %0,%1"
61 :"=r" (x)
62 :"m" (*__xg((volatile void *)ptr)), "0" (x)
63 :"memory");
64 break;
65 #endif
66 }
67 return x;
68 }
70 /*
71 * Atomic compare and exchange. Compare OLD with MEM, if identical,
72 * store NEW in MEM. Return the initial value in MEM. Success is
73 * indicated by comparing RETURN with OLD.
74 */
76 static always_inline unsigned long __cmpxchg(
77 volatile void *ptr, unsigned long old, unsigned long new, int size)
78 {
79 unsigned long prev;
80 switch (size) {
81 case 1:
82 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
83 : "=a"(prev)
84 : "q"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
85 : "memory");
86 return prev;
87 case 2:
88 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
89 : "=a"(prev)
90 : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
91 : "memory");
92 return prev;
93 #if defined(__i386__)
94 case 4:
95 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
96 : "=a"(prev)
97 : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
98 : "memory");
99 return prev;
100 #elif defined(__x86_64__)
101 case 4:
102 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
103 : "=a"(prev)
104 : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
105 : "memory");
106 return prev;
107 case 8:
108 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
109 : "=a"(prev)
110 : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
111 : "memory");
112 return prev;
113 #endif
114 }
115 return old;
116 }
118 #define __HAVE_ARCH_CMPXCHG
120 #if BITS_PER_LONG == 64
122 #define cmpxchg(ptr,o,n) \
123 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \
124 (unsigned long)(n),sizeof(*(ptr))))
125 #else
127 static always_inline unsigned long long __cmpxchg8b(
128 volatile void *ptr, unsigned long long old, unsigned long long new)
129 {
130 unsigned long long prev;
131 __asm__ __volatile__ (
132 LOCK_PREFIX "cmpxchg8b %3"
133 : "=A" (prev)
134 : "c" ((u32)(new>>32)), "b" ((u32)new),
135 "m" (*__xg((volatile void *)ptr)), "0" (old)
136 : "memory" );
137 return prev;
138 }
140 #define cmpxchg(ptr,o,n) \
141 ({ \
142 __typeof__(*(ptr)) __prev; \
143 switch ( sizeof(*(ptr)) ) { \
144 case 8: \
145 __prev = ((__typeof__(*(ptr)))__cmpxchg8b( \
146 (ptr), \
147 (unsigned long long)(o), \
148 (unsigned long long)(n))); \
149 break; \
150 default: \
151 __prev = ((__typeof__(*(ptr)))__cmpxchg( \
152 (ptr), \
153 (unsigned long)(o), \
154 (unsigned long)(n), \
155 sizeof(*(ptr)))); \
156 break; \
157 } \
158 __prev; \
159 })
161 #endif
164 /*
165 * This function causes value _o to be changed to _n at location _p.
166 * If this access causes a fault then we return 1, otherwise we return 0.
167 * If no fault occurs then _o is updated to the value we saw at _p. If this
168 * is the same as the initial value of _o then _n is written to location _p.
169 */
170 #ifdef __i386__
171 #define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
172 __asm__ __volatile__ ( \
173 "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \
174 "2:\n" \
175 ".section .fixup,\"ax\"\n" \
176 "3: movl $1,%1\n" \
177 " jmp 2b\n" \
178 ".previous\n" \
179 ".section __ex_table,\"a\"\n" \
180 " .align 4\n" \
181 " .long 1b,3b\n" \
182 ".previous" \
183 : "=a" (_o), "=r" (_rc) \
184 : _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \
185 : "memory");
186 #define cmpxchg_user(_p,_o,_n) \
187 ({ \
188 int _rc; \
189 switch ( sizeof(*(_p)) ) { \
190 case 1: \
191 __cmpxchg_user(_p,_o,_n,"b","b","q"); \
192 break; \
193 case 2: \
194 __cmpxchg_user(_p,_o,_n,"w","w","r"); \
195 break; \
196 case 4: \
197 __cmpxchg_user(_p,_o,_n,"l","","r"); \
198 break; \
199 case 8: \
200 __asm__ __volatile__ ( \
201 "1: " LOCK_PREFIX "cmpxchg8b %4\n" \
202 "2:\n" \
203 ".section .fixup,\"ax\"\n" \
204 "3: movl $1,%1\n" \
205 " jmp 2b\n" \
206 ".previous\n" \
207 ".section __ex_table,\"a\"\n" \
208 " .align 4\n" \
209 " .long 1b,3b\n" \
210 ".previous" \
211 : "=A" (_o), "=r" (_rc) \
212 : "c" ((u32)((u64)(_n)>>32)), "b" ((u32)(_n)), \
213 "m" (*__xg((volatile void *)(_p))), "0" (_o), "1" (0) \
214 : "memory"); \
215 break; \
216 } \
217 _rc; \
218 })
219 #else
220 #define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
221 __asm__ __volatile__ ( \
222 "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \
223 "2:\n" \
224 ".section .fixup,\"ax\"\n" \
225 "3: movl $1,%1\n" \
226 " jmp 2b\n" \
227 ".previous\n" \
228 ".section __ex_table,\"a\"\n" \
229 " .align 8\n" \
230 " .quad 1b,3b\n" \
231 ".previous" \
232 : "=a" (_o), "=r" (_rc) \
233 : _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \
234 : "memory");
235 #define cmpxchg_user(_p,_o,_n) \
236 ({ \
237 int _rc; \
238 switch ( sizeof(*(_p)) ) { \
239 case 1: \
240 __cmpxchg_user(_p,_o,_n,"b","b","q"); \
241 break; \
242 case 2: \
243 __cmpxchg_user(_p,_o,_n,"w","w","r"); \
244 break; \
245 case 4: \
246 __cmpxchg_user(_p,_o,_n,"l","k","r"); \
247 break; \
248 case 8: \
249 __cmpxchg_user(_p,_o,_n,"q","","r"); \
250 break; \
251 } \
252 _rc; \
253 })
254 #endif
256 /*
257 * Force strict CPU ordering.
258 * And yes, this is required on UP too when we're talking
259 * to devices.
260 *
261 * For now, "wmb()" doesn't actually do anything, as all
262 * Intel CPU's follow what Intel calls a *Processor Order*,
263 * in which all writes are seen in the program order even
264 * outside the CPU.
265 *
266 * I expect future Intel CPU's to have a weaker ordering,
267 * but I'd also expect them to finally get their act together
268 * and add some real memory barriers if so.
269 *
270 * Some non intel clones support out of order store. wmb() ceases to be a
271 * nop for these.
272 */
273 #if defined(__i386__)
274 #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
275 #define rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
276 #ifdef CONFIG_X86_OOSTORE
277 #define wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
278 #endif
279 #elif defined(__x86_64__)
280 #define mb() __asm__ __volatile__ ("mfence":::"memory")
281 #define rmb() __asm__ __volatile__ ("lfence":::"memory")
282 #ifdef CONFIG_X86_OOSTORE
283 #define wmb() __asm__ __volatile__ ("sfence":::"memory")
284 #endif
285 #endif
287 #ifndef CONFIG_X86_OOSTORE
288 #define wmb() __asm__ __volatile__ ("": : :"memory")
289 #endif
291 #ifdef CONFIG_SMP
292 #define smp_mb() mb()
293 #define smp_rmb() rmb()
294 #define smp_wmb() wmb()
295 #else
296 #define smp_mb() barrier()
297 #define smp_rmb() barrier()
298 #define smp_wmb() barrier()
299 #endif
301 #define set_mb(var, value) do { xchg(&var, value); } while (0)
302 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
304 /* interrupt control.. */
305 #if defined(__i386__)
306 #define __save_flags(x) __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */)
307 #define __restore_flags(x) __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc")
308 #elif defined(__x86_64__)
309 #define __save_flags(x) do { __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
310 #define __restore_flags(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
311 #endif
312 #define __cli() __asm__ __volatile__("cli": : :"memory")
313 #define __sti() __asm__ __volatile__("sti": : :"memory")
314 /* used in the idle loop; sti takes one instruction cycle to complete */
315 #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
317 /* For spinlocks etc */
318 #if defined(__i386__)
319 #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
320 #define local_irq_restore(x) __restore_flags(x)
321 #elif defined(__x86_64__)
322 #define local_irq_save(x) do { __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
323 #define local_irq_restore(x) __asm__ __volatile__("# local_irq_restore \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory")
324 #endif
325 #define local_irq_disable() __cli()
326 #define local_irq_enable() __sti()
328 static inline int local_irq_is_enabled(void)
329 {
330 unsigned long flags;
331 __save_flags(flags);
332 return !!(flags & (1<<9)); /* EFLAGS_IF */
333 }
335 #define BROKEN_ACPI_Sx 0x0001
336 #define BROKEN_INIT_AFTER_S1 0x0002
338 #endif