ia64/xen-unstable

view xen/include/asm-x86/x86_32/system.h @ 16416:05cbf512b82b

x86: rmb() can be weakened according to new Intel spec.

Both Intel and AMD agree that, from a programmer's viewpoint:
Loads cannot be reordered relative to other loads.
Stores cannot be reordered relative to other stores.

Intel64 Architecture Memory Ordering White Paper
<http://developer.intel.com/products/processor/manuals/318147.pdf>

AMD64 Architecture Programmer's Manual, Volume 2: System Programming
<http://www.amd.com/us-en/assets/content_type/\
white_papers_and_tech_docs/24593.pdf>

Signed-off-by: Keir Fraser <keir.fraser@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Nov 21 14:36:07 2007 +0000 (2007-11-21)
parents 7ccf7d373d0e
children f4c1a347311b
line source
1 #ifndef __X86_32_SYSTEM_H__
2 #define __X86_32_SYSTEM_H__
4 static always_inline unsigned long long __cmpxchg8b(
5 volatile void *ptr, unsigned long long old, unsigned long long new)
6 {
7 unsigned long long prev;
8 asm volatile (
9 LOCK_PREFIX "cmpxchg8b %3"
10 : "=A" (prev)
11 : "c" ((u32)(new>>32)), "b" ((u32)new),
12 "m" (*__xg((volatile void *)ptr)), "0" (old)
13 : "memory" );
14 return prev;
15 }
17 #define cmpxchg(ptr,o,n) \
18 ({ \
19 __typeof__(*(ptr)) __prev; \
20 switch ( sizeof(*(ptr)) ) { \
21 case 8: \
22 __prev = ((__typeof__(*(ptr)))__cmpxchg8b( \
23 (ptr), \
24 (unsigned long long)(o), \
25 (unsigned long long)(n))); \
26 break; \
27 default: \
28 __prev = ((__typeof__(*(ptr)))__cmpxchg( \
29 (ptr), \
30 (unsigned long)(o), \
31 (unsigned long)(n), \
32 sizeof(*(ptr)))); \
33 break; \
34 } \
35 __prev; \
36 })
38 /*
39 * This function causes value _o to be changed to _n at location _p.
40 * If this access causes a fault then we return 1, otherwise we return 0.
41 * If no fault occurs then _o is updated to the value we saw at _p. If this
42 * is the same as the initial value of _o then _n is written to location _p.
43 */
44 #define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
45 asm volatile ( \
46 "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \
47 "2:\n" \
48 ".section .fixup,\"ax\"\n" \
49 "3: movl $1,%1\n" \
50 " jmp 2b\n" \
51 ".previous\n" \
52 ".section __ex_table,\"a\"\n" \
53 " .align 4\n" \
54 " .long 1b,3b\n" \
55 ".previous" \
56 : "=a" (_o), "=r" (_rc) \
57 : _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \
58 : "memory");
60 #define cmpxchg_user(_p,_o,_n) \
61 ({ \
62 int _rc; \
63 switch ( sizeof(*(_p)) ) { \
64 case 1: \
65 __cmpxchg_user(_p,_o,_n,"b","b","q"); \
66 break; \
67 case 2: \
68 __cmpxchg_user(_p,_o,_n,"w","w","r"); \
69 break; \
70 case 4: \
71 __cmpxchg_user(_p,_o,_n,"l","","r"); \
72 break; \
73 case 8: \
74 asm volatile ( \
75 "1: " LOCK_PREFIX "cmpxchg8b %4\n" \
76 "2:\n" \
77 ".section .fixup,\"ax\"\n" \
78 "3: movl $1,%1\n" \
79 " jmp 2b\n" \
80 ".previous\n" \
81 ".section __ex_table,\"a\"\n" \
82 " .align 4\n" \
83 " .long 1b,3b\n" \
84 ".previous" \
85 : "=A" (_o), "=r" (_rc) \
86 : "c" ((u32)((u64)(_n)>>32)), "b" ((u32)(_n)), \
87 "m" (*__xg((volatile void *)(_p))), "0" (_o), "1" (0) \
88 : "memory"); \
89 break; \
90 } \
91 _rc; \
92 })
94 static inline void atomic_write64(uint64_t *p, uint64_t v)
95 {
96 uint64_t w = *p, x;
97 while ( (x = __cmpxchg8b(p, w, v)) != w )
98 w = x;
99 }
101 #define mb() \
102 asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
104 #define __save_flags(x) \
105 asm volatile ( "pushfl ; popl %0" : "=g" (x) : )
106 #define __restore_flags(x) \
107 asm volatile ( "pushl %0 ; popfl" : : "g" (x) : "memory", "cc" )
109 #define local_irq_save(x) \
110 asm volatile ( "pushfl ; popl %0 ; cli" : "=g" (x) : : "memory" )
111 #define local_irq_restore(x) \
112 __restore_flags(x)
114 #endif /* __X86_32_SYSTEM_H__ */