ia64/xen-unstable

view xen/include/asm-x86/x86_64/system.h @ 16416:05cbf512b82b

x86: rmb() can be weakened according to new Intel spec.

Both Intel and AMD agree that, from a programmer's viewpoint:
Loads cannot be reordered relative to other loads.
Stores cannot be reordered relative to other stores.

Intel64 Architecture Memory Ordering White Paper
<http://developer.intel.com/products/processor/manuals/318147.pdf>

AMD64 Architecture Programmer's Manual, Volume 2: System Programming
<http://www.amd.com/us-en/assets/content_type/\
white_papers_and_tech_docs/24593.pdf>

Signed-off-by: Keir Fraser <keir.fraser@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Nov 21 14:36:07 2007 +0000 (2007-11-21)
parents 7ccf7d373d0e
children f4c1a347311b
line source
1 #ifndef __X86_64_SYSTEM_H__
2 #define __X86_64_SYSTEM_H__
4 #define cmpxchg(ptr,o,n) \
5 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \
6 (unsigned long)(n),sizeof(*(ptr))))
8 /*
9 * This function causes value _o to be changed to _n at location _p.
10 * If this access causes a fault then we return 1, otherwise we return 0.
11 * If no fault occurs then _o is updated to the value we saw at _p. If this
12 * is the same as the initial value of _o then _n is written to location _p.
13 */
14 #define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
15 asm volatile ( \
16 "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \
17 "2:\n" \
18 ".section .fixup,\"ax\"\n" \
19 "3: movl $1,%1\n" \
20 " jmp 2b\n" \
21 ".previous\n" \
22 ".section __ex_table,\"a\"\n" \
23 " .align 8\n" \
24 " .quad 1b,3b\n" \
25 ".previous" \
26 : "=a" (_o), "=r" (_rc) \
27 : _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \
28 : "memory");
30 #define cmpxchg_user(_p,_o,_n) \
31 ({ \
32 int _rc; \
33 switch ( sizeof(*(_p)) ) { \
34 case 1: \
35 __cmpxchg_user(_p,_o,_n,"b","b","q"); \
36 break; \
37 case 2: \
38 __cmpxchg_user(_p,_o,_n,"w","w","r"); \
39 break; \
40 case 4: \
41 __cmpxchg_user(_p,_o,_n,"l","k","r"); \
42 break; \
43 case 8: \
44 __cmpxchg_user(_p,_o,_n,"q","","r"); \
45 break; \
46 } \
47 _rc; \
48 })
50 static inline void atomic_write64(uint64_t *p, uint64_t v)
51 {
52 *p = v;
53 }
55 #define mb() \
56 asm volatile ( "mfence" : : : "memory" )
58 #define __save_flags(x) \
59 asm volatile ( "pushfq ; popq %q0" : "=g" (x) : :"memory" )
60 #define __restore_flags(x) \
61 asm volatile ( "pushq %0 ; popfq" : : "g" (x) : "memory", "cc" )
63 #define local_irq_save(x) \
64 asm volatile ( "pushfq ; popq %0 ; cli" : "=g" (x) : : "memory" )
65 #define local_irq_restore(x) \
66 __restore_flags(x)
68 #endif /* __X86_64_SYSTEM_H__ */