direct-io.hg

view xen/include/asm-x86/system.h @ 15534:da4c76340184

NativeDom 1:1 support for x86_64, 32bitbios reloc bug fix
author Guy Zana <guy@neocleus.com>
date Wed Sep 19 10:51:46 2007 +0200 (2007-09-19)
parents d7e3224b661a
children
line source
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
4 #include <xen/config.h>
5 #include <xen/types.h>
6 #include <asm/bitops.h>
8 #define read_segment_register(name) \
9 ({ u16 __sel; \
10 __asm__ __volatile__ ( "movw %%" STR(name) ",%0" : "=r" (__sel) ); \
11 __sel; \
12 })
14 #define wbinvd() \
15 __asm__ __volatile__ ("wbinvd": : :"memory");
17 #define clflush(a) \
18 __asm__ __volatile__ ("clflush (%0)": :"r"(a));
20 #define nop() __asm__ __volatile__ ("nop")
22 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
24 struct __xchg_dummy { unsigned long a[100]; };
25 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
28 /*
29 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
30 * Note 2: xchg has side effect, so that attribute volatile is necessary,
31 * but generally the primitive is invalid, *ptr is output argument. --ANK
32 */
33 static always_inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
34 {
35 switch (size) {
36 case 1:
37 __asm__ __volatile__("xchgb %b0,%1"
38 :"=q" (x)
39 :"m" (*__xg((volatile void *)ptr)), "0" (x)
40 :"memory");
41 break;
42 case 2:
43 __asm__ __volatile__("xchgw %w0,%1"
44 :"=r" (x)
45 :"m" (*__xg((volatile void *)ptr)), "0" (x)
46 :"memory");
47 break;
48 #if defined(__i386__)
49 case 4:
50 __asm__ __volatile__("xchgl %0,%1"
51 :"=r" (x)
52 :"m" (*__xg((volatile void *)ptr)), "0" (x)
53 :"memory");
54 break;
55 #elif defined(__x86_64__)
56 case 4:
57 __asm__ __volatile__("xchgl %k0,%1"
58 :"=r" (x)
59 :"m" (*__xg((volatile void *)ptr)), "0" (x)
60 :"memory");
61 break;
62 case 8:
63 __asm__ __volatile__("xchgq %0,%1"
64 :"=r" (x)
65 :"m" (*__xg((volatile void *)ptr)), "0" (x)
66 :"memory");
67 break;
68 #endif
69 }
70 return x;
71 }
73 /*
74 * Atomic compare and exchange. Compare OLD with MEM, if identical,
75 * store NEW in MEM. Return the initial value in MEM. Success is
76 * indicated by comparing RETURN with OLD.
77 */
79 static always_inline unsigned long __cmpxchg(
80 volatile void *ptr, unsigned long old, unsigned long new, int size)
81 {
82 unsigned long prev;
83 switch (size) {
84 case 1:
85 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
86 : "=a"(prev)
87 : "q"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
88 : "memory");
89 return prev;
90 case 2:
91 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
92 : "=a"(prev)
93 : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
94 : "memory");
95 return prev;
96 #if defined(__i386__)
97 case 4:
98 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
99 : "=a"(prev)
100 : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
101 : "memory");
102 return prev;
103 #elif defined(__x86_64__)
104 case 4:
105 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
106 : "=a"(prev)
107 : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
108 : "memory");
109 return prev;
110 case 8:
111 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
112 : "=a"(prev)
113 : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
114 : "memory");
115 return prev;
116 #endif
117 }
118 return old;
119 }
121 #define __HAVE_ARCH_CMPXCHG
123 #if BITS_PER_LONG == 64
125 #define cmpxchg(ptr,o,n) \
126 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \
127 (unsigned long)(n),sizeof(*(ptr))))
128 #else
130 static always_inline unsigned long long __cmpxchg8b(
131 volatile void *ptr, unsigned long long old, unsigned long long new)
132 {
133 unsigned long long prev;
134 __asm__ __volatile__ (
135 LOCK_PREFIX "cmpxchg8b %3"
136 : "=A" (prev)
137 : "c" ((u32)(new>>32)), "b" ((u32)new),
138 "m" (*__xg((volatile void *)ptr)), "0" (old)
139 : "memory" );
140 return prev;
141 }
143 #define cmpxchg(ptr,o,n) \
144 ({ \
145 __typeof__(*(ptr)) __prev; \
146 switch ( sizeof(*(ptr)) ) { \
147 case 8: \
148 __prev = ((__typeof__(*(ptr)))__cmpxchg8b( \
149 (ptr), \
150 (unsigned long long)(o), \
151 (unsigned long long)(n))); \
152 break; \
153 default: \
154 __prev = ((__typeof__(*(ptr)))__cmpxchg( \
155 (ptr), \
156 (unsigned long)(o), \
157 (unsigned long)(n), \
158 sizeof(*(ptr)))); \
159 break; \
160 } \
161 __prev; \
162 })
164 #endif
167 /*
168 * This function causes value _o to be changed to _n at location _p.
169 * If this access causes a fault then we return 1, otherwise we return 0.
170 * If no fault occurs then _o is updated to the value we saw at _p. If this
171 * is the same as the initial value of _o then _n is written to location _p.
172 */
173 #ifdef __i386__
174 #define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
175 __asm__ __volatile__ ( \
176 "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \
177 "2:\n" \
178 ".section .fixup,\"ax\"\n" \
179 "3: movl $1,%1\n" \
180 " jmp 2b\n" \
181 ".previous\n" \
182 ".section __ex_table,\"a\"\n" \
183 " .align 4\n" \
184 " .long 1b,3b\n" \
185 ".previous" \
186 : "=a" (_o), "=r" (_rc) \
187 : _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \
188 : "memory");
189 #define cmpxchg_user(_p,_o,_n) \
190 ({ \
191 int _rc; \
192 switch ( sizeof(*(_p)) ) { \
193 case 1: \
194 __cmpxchg_user(_p,_o,_n,"b","b","q"); \
195 break; \
196 case 2: \
197 __cmpxchg_user(_p,_o,_n,"w","w","r"); \
198 break; \
199 case 4: \
200 __cmpxchg_user(_p,_o,_n,"l","","r"); \
201 break; \
202 case 8: \
203 __asm__ __volatile__ ( \
204 "1: " LOCK_PREFIX "cmpxchg8b %4\n" \
205 "2:\n" \
206 ".section .fixup,\"ax\"\n" \
207 "3: movl $1,%1\n" \
208 " jmp 2b\n" \
209 ".previous\n" \
210 ".section __ex_table,\"a\"\n" \
211 " .align 4\n" \
212 " .long 1b,3b\n" \
213 ".previous" \
214 : "=A" (_o), "=r" (_rc) \
215 : "c" ((u32)((u64)(_n)>>32)), "b" ((u32)(_n)), \
216 "m" (*__xg((volatile void *)(_p))), "0" (_o), "1" (0) \
217 : "memory"); \
218 break; \
219 } \
220 _rc; \
221 })
222 #else
223 #define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
224 __asm__ __volatile__ ( \
225 "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \
226 "2:\n" \
227 ".section .fixup,\"ax\"\n" \
228 "3: movl $1,%1\n" \
229 " jmp 2b\n" \
230 ".previous\n" \
231 ".section __ex_table,\"a\"\n" \
232 " .align 8\n" \
233 " .quad 1b,3b\n" \
234 ".previous" \
235 : "=a" (_o), "=r" (_rc) \
236 : _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \
237 : "memory");
238 #define cmpxchg_user(_p,_o,_n) \
239 ({ \
240 int _rc; \
241 switch ( sizeof(*(_p)) ) { \
242 case 1: \
243 __cmpxchg_user(_p,_o,_n,"b","b","q"); \
244 break; \
245 case 2: \
246 __cmpxchg_user(_p,_o,_n,"w","w","r"); \
247 break; \
248 case 4: \
249 __cmpxchg_user(_p,_o,_n,"l","k","r"); \
250 break; \
251 case 8: \
252 __cmpxchg_user(_p,_o,_n,"q","","r"); \
253 break; \
254 } \
255 _rc; \
256 })
257 #endif
259 /*
260 * Force strict CPU ordering.
261 * And yes, this is required on UP too when we're talking
262 * to devices.
263 *
264 * For now, "wmb()" doesn't actually do anything, as all
265 * Intel CPU's follow what Intel calls a *Processor Order*,
266 * in which all writes are seen in the program order even
267 * outside the CPU.
268 *
269 * I expect future Intel CPU's to have a weaker ordering,
270 * but I'd also expect them to finally get their act together
271 * and add some real memory barriers if so.
272 *
273 * Some non intel clones support out of order store. wmb() ceases to be a
274 * nop for these.
275 */
276 #if defined(__i386__)
277 #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
278 #define rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
279 #ifdef CONFIG_X86_OOSTORE
280 #define wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
281 #endif
282 #elif defined(__x86_64__)
283 #define mb() __asm__ __volatile__ ("mfence":::"memory")
284 #define rmb() __asm__ __volatile__ ("lfence":::"memory")
285 #ifdef CONFIG_X86_OOSTORE
286 #define wmb() __asm__ __volatile__ ("sfence":::"memory")
287 #endif
288 #endif
290 #ifndef CONFIG_X86_OOSTORE
291 #define wmb() __asm__ __volatile__ ("": : :"memory")
292 #endif
294 #ifdef CONFIG_SMP
295 #define smp_mb() mb()
296 #define smp_rmb() rmb()
297 #define smp_wmb() wmb()
298 #else
299 #define smp_mb() barrier()
300 #define smp_rmb() barrier()
301 #define smp_wmb() barrier()
302 #endif
304 #define set_mb(var, value) do { xchg(&var, value); } while (0)
305 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
307 /* interrupt control.. */
308 #if defined(__i386__)
309 #define __save_flags(x) __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */)
310 #define __restore_flags(x) __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc")
311 #elif defined(__x86_64__)
312 #define __save_flags(x) do { __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
313 #define __restore_flags(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
314 #endif
315 #define __cli() __asm__ __volatile__("cli": : :"memory")
316 #define __sti() __asm__ __volatile__("sti": : :"memory")
317 /* used in the idle loop; sti takes one instruction cycle to complete */
318 #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
320 /* For spinlocks etc */
321 #if defined(__i386__)
322 #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
323 #define local_irq_restore(x) __restore_flags(x)
324 #elif defined(__x86_64__)
325 #define local_irq_save(x) do { __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
326 #define local_irq_restore(x) __asm__ __volatile__("# local_irq_restore \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory")
327 #endif
328 #define local_irq_disable() __cli()
329 #define local_irq_enable() __sti()
331 static inline int local_irq_is_enabled(void)
332 {
333 unsigned long flags;
334 __save_flags(flags);
335 return !!(flags & (1<<9)); /* EFLAGS_IF */
336 }
338 #define BROKEN_ACPI_Sx 0x0001
339 #define BROKEN_INIT_AFTER_S1 0x0002
341 #endif