ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-i386/atomic.h @ 11221:7c9d7fc3dce5

[HVM] Fix SMBIOS entry point copy destination.
Spotted by Xiaowei Yang <xiaowei.yang@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@localhost.localdomain
date Sat Aug 19 12:06:36 2006 +0100 (2006-08-19)
parents 2494b4e00cbb
children
line source
1 #ifndef __ARCH_I386_ATOMIC__
2 #define __ARCH_I386_ATOMIC__
4 #include <linux/config.h>
5 #include <linux/compiler.h>
6 #include <asm/processor.h>
7 #include <asm/smp_alt.h>
9 /*
10 * Atomic operations that C can't guarantee us. Useful for
11 * resource counting etc..
12 */
14 /*
15 * Make sure gcc doesn't try to be clever and move things around
16 * on us. We need to use _exactly_ the address the user gave us,
17 * not some alias that contains the same information.
18 */
19 typedef struct { volatile int counter; } atomic_t;
21 #define ATOMIC_INIT(i) { (i) }
23 /**
24 * atomic_read - read atomic variable
25 * @v: pointer of type atomic_t
26 *
27 * Atomically reads the value of @v.
28 */
29 #define atomic_read(v) ((v)->counter)
31 /**
32 * atomic_set - set atomic variable
33 * @v: pointer of type atomic_t
34 * @i: required value
35 *
36 * Atomically sets the value of @v to @i.
37 */
38 #define atomic_set(v,i) (((v)->counter) = (i))
40 /**
41 * atomic_add - add integer to atomic variable
42 * @i: integer value to add
43 * @v: pointer of type atomic_t
44 *
45 * Atomically adds @i to @v.
46 */
47 static __inline__ void atomic_add(int i, atomic_t *v)
48 {
49 __asm__ __volatile__(
50 LOCK "addl %1,%0"
51 :"=m" (v->counter)
52 :"ir" (i), "m" (v->counter));
53 }
55 /**
56 * atomic_sub - subtract the atomic variable
57 * @i: integer value to subtract
58 * @v: pointer of type atomic_t
59 *
60 * Atomically subtracts @i from @v.
61 */
62 static __inline__ void atomic_sub(int i, atomic_t *v)
63 {
64 __asm__ __volatile__(
65 LOCK "subl %1,%0"
66 :"=m" (v->counter)
67 :"ir" (i), "m" (v->counter));
68 }
70 /**
71 * atomic_sub_and_test - subtract value from variable and test result
72 * @i: integer value to subtract
73 * @v: pointer of type atomic_t
74 *
75 * Atomically subtracts @i from @v and returns
76 * true if the result is zero, or false for all
77 * other cases.
78 */
79 static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
80 {
81 unsigned char c;
83 __asm__ __volatile__(
84 LOCK "subl %2,%0; sete %1"
85 :"=m" (v->counter), "=qm" (c)
86 :"ir" (i), "m" (v->counter) : "memory");
87 return c;
88 }
90 /**
91 * atomic_inc - increment atomic variable
92 * @v: pointer of type atomic_t
93 *
94 * Atomically increments @v by 1.
95 */
96 static __inline__ void atomic_inc(atomic_t *v)
97 {
98 __asm__ __volatile__(
99 LOCK "incl %0"
100 :"=m" (v->counter)
101 :"m" (v->counter));
102 }
104 /**
105 * atomic_dec - decrement atomic variable
106 * @v: pointer of type atomic_t
107 *
108 * Atomically decrements @v by 1.
109 */
110 static __inline__ void atomic_dec(atomic_t *v)
111 {
112 __asm__ __volatile__(
113 LOCK "decl %0"
114 :"=m" (v->counter)
115 :"m" (v->counter));
116 }
118 /**
119 * atomic_dec_and_test - decrement and test
120 * @v: pointer of type atomic_t
121 *
122 * Atomically decrements @v by 1 and
123 * returns true if the result is 0, or false for all other
124 * cases.
125 */
126 static __inline__ int atomic_dec_and_test(atomic_t *v)
127 {
128 unsigned char c;
130 __asm__ __volatile__(
131 LOCK "decl %0; sete %1"
132 :"=m" (v->counter), "=qm" (c)
133 :"m" (v->counter) : "memory");
134 return c != 0;
135 }
137 /**
138 * atomic_inc_and_test - increment and test
139 * @v: pointer of type atomic_t
140 *
141 * Atomically increments @v by 1
142 * and returns true if the result is zero, or false for all
143 * other cases.
144 */
145 static __inline__ int atomic_inc_and_test(atomic_t *v)
146 {
147 unsigned char c;
149 __asm__ __volatile__(
150 LOCK "incl %0; sete %1"
151 :"=m" (v->counter), "=qm" (c)
152 :"m" (v->counter) : "memory");
153 return c != 0;
154 }
156 /**
157 * atomic_add_negative - add and test if negative
158 * @v: pointer of type atomic_t
159 * @i: integer value to add
160 *
161 * Atomically adds @i to @v and returns true
162 * if the result is negative, or false when
163 * result is greater than or equal to zero.
164 */
165 static __inline__ int atomic_add_negative(int i, atomic_t *v)
166 {
167 unsigned char c;
169 __asm__ __volatile__(
170 LOCK "addl %2,%0; sets %1"
171 :"=m" (v->counter), "=qm" (c)
172 :"ir" (i), "m" (v->counter) : "memory");
173 return c;
174 }
176 /**
177 * atomic_add_return - add and return
178 * @v: pointer of type atomic_t
179 * @i: integer value to add
180 *
181 * Atomically adds @i to @v and returns @i + @v
182 */
183 static __inline__ int atomic_add_return(int i, atomic_t *v)
184 {
185 int __i;
186 #ifdef CONFIG_M386
187 if(unlikely(boot_cpu_data.x86==3))
188 goto no_xadd;
189 #endif
190 /* Modern 486+ processor */
191 __i = i;
192 __asm__ __volatile__(
193 LOCK "xaddl %0, %1;"
194 :"=r"(i)
195 :"m"(v->counter), "0"(i));
196 return i + __i;
198 #ifdef CONFIG_M386
199 no_xadd: /* Legacy 386 processor */
200 local_irq_disable();
201 __i = atomic_read(v);
202 atomic_set(v, i + __i);
203 local_irq_enable();
204 return i + __i;
205 #endif
206 }
208 static __inline__ int atomic_sub_return(int i, atomic_t *v)
209 {
210 return atomic_add_return(-i,v);
211 }
213 #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
214 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
216 /**
217 * atomic_add_unless - add unless the number is a given value
218 * @v: pointer of type atomic_t
219 * @a: the amount to add to v...
220 * @u: ...unless v is equal to u.
221 *
222 * Atomically adds @a to @v, so long as it was not @u.
223 * Returns non-zero if @v was not @u, and zero otherwise.
224 */
225 #define atomic_add_unless(v, a, u) \
226 ({ \
227 int c, old; \
228 c = atomic_read(v); \
229 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
230 c = old; \
231 c != (u); \
232 })
233 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
235 #define atomic_inc_return(v) (atomic_add_return(1,v))
236 #define atomic_dec_return(v) (atomic_sub_return(1,v))
238 /* These are x86-specific, used by some header files */
239 #define atomic_clear_mask(mask, addr) \
240 __asm__ __volatile__(LOCK "andl %0,%1" \
241 : : "r" (~(mask)),"m" (*addr) : "memory")
243 #define atomic_set_mask(mask, addr) \
244 __asm__ __volatile__(LOCK "orl %0,%1" \
245 : : "r" (mask),"m" (*(addr)) : "memory")
247 /* Atomic operations are already serializing on x86 */
248 #define smp_mb__before_atomic_dec() barrier()
249 #define smp_mb__after_atomic_dec() barrier()
250 #define smp_mb__before_atomic_inc() barrier()
251 #define smp_mb__after_atomic_inc() barrier()
253 #include <asm-generic/atomic.h>
254 #endif