ia64/xen-unstable

view xen/include/asm-x86/system.h @ 16416:05cbf512b82b

x86: rmb() can be weakened according to new Intel spec.

Both Intel and AMD agree that, from a programmer's viewpoint:
Loads cannot be reordered relative to other loads.
Stores cannot be reordered relative to other stores.

Intel64 Architecture Memory Ordering White Paper
<http://developer.intel.com/products/processor/manuals/318147.pdf>

AMD64 Architecture Programmer's Manual, Volume 2: System Programming
<http://www.amd.com/us-en/assets/content_type/\
white_papers_and_tech_docs/24593.pdf>

Signed-off-by: Keir Fraser <keir.fraser@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Nov 21 14:36:07 2007 +0000 (2007-11-21)
parents 7ccf7d373d0e
children f4c1a347311b
line source
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
4 #include <xen/config.h>
5 #include <xen/types.h>
6 #include <asm/bitops.h>
8 #define read_segment_register(name) \
9 ({ u16 __sel; \
10 asm volatile ( "movw %%" STR(name) ",%0" : "=r" (__sel) ); \
11 __sel; \
12 })
14 #define wbinvd() \
15 asm volatile ( "wbinvd" : : : "memory" )
17 #define clflush(a) \
18 asm volatile ( "clflush (%0)" : : "r"(a) )
20 #define nop() \
21 asm volatile ( "nop" )
23 #define xchg(ptr,v) \
24 ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
26 struct __xchg_dummy { unsigned long a[100]; };
27 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
29 #if defined(__i386__)
30 # include <asm/x86_32/system.h>
31 #elif defined(__x86_64__)
32 # include <asm/x86_64/system.h>
33 #endif
35 /*
36 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
37 * Note 2: xchg has side effect, so that attribute volatile is necessary,
38 * but generally the primitive is invalid, *ptr is output argument. --ANK
39 */
40 static always_inline unsigned long __xchg(
41 unsigned long x, volatile void *ptr, int size)
42 {
43 switch ( size )
44 {
45 case 1:
46 asm volatile ( "xchgb %b0,%1"
47 : "=q" (x)
48 : "m" (*__xg((volatile void *)ptr)), "0" (x)
49 : "memory" );
50 break;
51 case 2:
52 asm volatile ( "xchgw %w0,%1"
53 : "=r" (x)
54 : "m" (*__xg((volatile void *)ptr)), "0" (x)
55 : "memory" );
56 break;
57 #if defined(__i386__)
58 case 4:
59 asm volatile ( "xchgl %0,%1"
60 : "=r" (x)
61 : "m" (*__xg((volatile void *)ptr)), "0" (x)
62 : "memory" );
63 break;
64 #elif defined(__x86_64__)
65 case 4:
66 asm volatile ( "xchgl %k0,%1"
67 : "=r" (x)
68 : "m" (*__xg((volatile void *)ptr)), "0" (x)
69 : "memory" );
70 break;
71 case 8:
72 asm volatile ( "xchgq %0,%1"
73 : "=r" (x)
74 : "m" (*__xg((volatile void *)ptr)), "0" (x)
75 : "memory" );
76 break;
77 #endif
78 }
79 return x;
80 }
82 /*
83 * Atomic compare and exchange. Compare OLD with MEM, if identical,
84 * store NEW in MEM. Return the initial value in MEM. Success is
85 * indicated by comparing RETURN with OLD.
86 */
88 static always_inline unsigned long __cmpxchg(
89 volatile void *ptr, unsigned long old, unsigned long new, int size)
90 {
91 unsigned long prev;
92 switch ( size )
93 {
94 case 1:
95 asm volatile ( LOCK_PREFIX "cmpxchgb %b1,%2"
96 : "=a" (prev)
97 : "q" (new), "m" (*__xg((volatile void *)ptr)),
98 "0" (old)
99 : "memory" );
100 return prev;
101 case 2:
102 asm volatile ( LOCK_PREFIX "cmpxchgw %w1,%2"
103 : "=a" (prev)
104 : "r" (new), "m" (*__xg((volatile void *)ptr)),
105 "0" (old)
106 : "memory" );
107 return prev;
108 #if defined(__i386__)
109 case 4:
110 asm volatile ( LOCK_PREFIX "cmpxchgl %1,%2"
111 : "=a" (prev)
112 : "r" (new), "m" (*__xg((volatile void *)ptr)),
113 "0" (old)
114 : "memory" );
115 return prev;
116 #elif defined(__x86_64__)
117 case 4:
118 asm volatile ( LOCK_PREFIX "cmpxchgl %k1,%2"
119 : "=a" (prev)
120 : "r" (new), "m" (*__xg((volatile void *)ptr)),
121 "0" (old)
122 : "memory" );
123 return prev;
124 case 8:
125 asm volatile ( LOCK_PREFIX "cmpxchgq %1,%2"
126 : "=a" (prev)
127 : "r" (new), "m" (*__xg((volatile void *)ptr)),
128 "0" (old)
129 : "memory" );
130 return prev;
131 #endif
132 }
133 return old;
134 }
136 #define __HAVE_ARCH_CMPXCHG
138 /*
139 * Both Intel and AMD agree that, from a programmer's viewpoint:
140 * Loads cannot be reordered relative to other loads.
141 * Stores cannot be reordered relative to other stores.
142 *
143 * Intel64 Architecture Memory Ordering White Paper
144 * <http://developer.intel.com/products/processor/manuals/318147.pdf>
145 *
146 * AMD64 Architecture Programmer's Manual, Volume 2: System Programming
147 * <http://www.amd.com/us-en/assets/content_type/\
148 * white_papers_and_tech_docs/24593.pdf>
149 */
150 #define rmb() barrier()
151 #define wmb() barrier()
153 #ifdef CONFIG_SMP
154 #define smp_mb() mb()
155 #define smp_rmb() rmb()
156 #define smp_wmb() wmb()
157 #else
158 #define smp_mb() barrier()
159 #define smp_rmb() barrier()
160 #define smp_wmb() barrier()
161 #endif
163 #define set_mb(var, value) do { xchg(&var, value); } while (0)
164 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
166 #define local_irq_disable() asm volatile ( "cli" : : : "memory" )
167 #define local_irq_enable() asm volatile ( "sti" : : : "memory" )
169 /* used in the idle loop; sti takes one instruction cycle to complete */
170 #define safe_halt() asm volatile ( "sti; hlt" : : : "memory" )
171 /* used when interrupts are already enabled or to shutdown the processor */
172 #define halt() asm volatile ( "hlt" : : : "memory" )
174 static inline int local_irq_is_enabled(void)
175 {
176 unsigned long flags;
177 __save_flags(flags);
178 return !!(flags & (1<<9)); /* EFLAGS_IF */
179 }
181 #define BROKEN_ACPI_Sx 0x0001
182 #define BROKEN_INIT_AFTER_S1 0x0002
184 void trap_init(void);
185 void percpu_traps_init(void);
186 void subarch_percpu_traps_init(void);
188 #endif