ia64/linux-2.6.18-xen.hg

view include/asm-i386/atomic.h @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 #ifndef __ARCH_I386_ATOMIC__
2 #define __ARCH_I386_ATOMIC__
4 #include <linux/compiler.h>
5 #include <asm/processor.h>
7 /*
8 * Atomic operations that C can't guarantee us. Useful for
9 * resource counting etc..
10 */
12 /*
13 * Make sure gcc doesn't try to be clever and move things around
14 * on us. We need to use _exactly_ the address the user gave us,
15 * not some alias that contains the same information.
16 */
17 typedef struct { volatile int counter; } atomic_t;
19 #define ATOMIC_INIT(i) { (i) }
21 /**
22 * atomic_read - read atomic variable
23 * @v: pointer of type atomic_t
24 *
25 * Atomically reads the value of @v.
26 */
27 #define atomic_read(v) ((v)->counter)
29 /**
30 * atomic_set - set atomic variable
31 * @v: pointer of type atomic_t
32 * @i: required value
33 *
34 * Atomically sets the value of @v to @i.
35 */
36 #define atomic_set(v,i) (((v)->counter) = (i))
38 /**
39 * atomic_add - add integer to atomic variable
40 * @i: integer value to add
41 * @v: pointer of type atomic_t
42 *
43 * Atomically adds @i to @v.
44 */
45 static __inline__ void atomic_add(int i, atomic_t *v)
46 {
47 __asm__ __volatile__(
48 LOCK_PREFIX "addl %1,%0"
49 :"+m" (v->counter)
50 :"ir" (i));
51 }
53 /**
54 * atomic_sub - subtract the atomic variable
55 * @i: integer value to subtract
56 * @v: pointer of type atomic_t
57 *
58 * Atomically subtracts @i from @v.
59 */
60 static __inline__ void atomic_sub(int i, atomic_t *v)
61 {
62 __asm__ __volatile__(
63 LOCK_PREFIX "subl %1,%0"
64 :"+m" (v->counter)
65 :"ir" (i));
66 }
68 /**
69 * atomic_sub_and_test - subtract value from variable and test result
70 * @i: integer value to subtract
71 * @v: pointer of type atomic_t
72 *
73 * Atomically subtracts @i from @v and returns
74 * true if the result is zero, or false for all
75 * other cases.
76 */
77 static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
78 {
79 unsigned char c;
81 __asm__ __volatile__(
82 LOCK_PREFIX "subl %2,%0; sete %1"
83 :"+m" (v->counter), "=qm" (c)
84 :"ir" (i) : "memory");
85 return c;
86 }
88 /**
89 * atomic_inc - increment atomic variable
90 * @v: pointer of type atomic_t
91 *
92 * Atomically increments @v by 1.
93 */
94 static __inline__ void atomic_inc(atomic_t *v)
95 {
96 __asm__ __volatile__(
97 LOCK_PREFIX "incl %0"
98 :"+m" (v->counter));
99 }
101 /**
102 * atomic_dec - decrement atomic variable
103 * @v: pointer of type atomic_t
104 *
105 * Atomically decrements @v by 1.
106 */
107 static __inline__ void atomic_dec(atomic_t *v)
108 {
109 __asm__ __volatile__(
110 LOCK_PREFIX "decl %0"
111 :"+m" (v->counter));
112 }
114 /**
115 * atomic_dec_and_test - decrement and test
116 * @v: pointer of type atomic_t
117 *
118 * Atomically decrements @v by 1 and
119 * returns true if the result is 0, or false for all other
120 * cases.
121 */
122 static __inline__ int atomic_dec_and_test(atomic_t *v)
123 {
124 unsigned char c;
126 __asm__ __volatile__(
127 LOCK_PREFIX "decl %0; sete %1"
128 :"+m" (v->counter), "=qm" (c)
129 : : "memory");
130 return c != 0;
131 }
133 /**
134 * atomic_inc_and_test - increment and test
135 * @v: pointer of type atomic_t
136 *
137 * Atomically increments @v by 1
138 * and returns true if the result is zero, or false for all
139 * other cases.
140 */
141 static __inline__ int atomic_inc_and_test(atomic_t *v)
142 {
143 unsigned char c;
145 __asm__ __volatile__(
146 LOCK_PREFIX "incl %0; sete %1"
147 :"+m" (v->counter), "=qm" (c)
148 : : "memory");
149 return c != 0;
150 }
152 /**
153 * atomic_add_negative - add and test if negative
154 * @v: pointer of type atomic_t
155 * @i: integer value to add
156 *
157 * Atomically adds @i to @v and returns true
158 * if the result is negative, or false when
159 * result is greater than or equal to zero.
160 */
161 static __inline__ int atomic_add_negative(int i, atomic_t *v)
162 {
163 unsigned char c;
165 __asm__ __volatile__(
166 LOCK_PREFIX "addl %2,%0; sets %1"
167 :"+m" (v->counter), "=qm" (c)
168 :"ir" (i) : "memory");
169 return c;
170 }
172 /**
173 * atomic_add_return - add and return
174 * @v: pointer of type atomic_t
175 * @i: integer value to add
176 *
177 * Atomically adds @i to @v and returns @i + @v
178 */
179 static __inline__ int atomic_add_return(int i, atomic_t *v)
180 {
181 int __i;
182 #ifdef CONFIG_M386
183 unsigned long flags;
184 if(unlikely(boot_cpu_data.x86==3))
185 goto no_xadd;
186 #endif
187 /* Modern 486+ processor */
188 __i = i;
189 __asm__ __volatile__(
190 LOCK_PREFIX "xaddl %0, %1;"
191 :"=r"(i)
192 :"m"(v->counter), "0"(i));
193 return i + __i;
195 #ifdef CONFIG_M386
196 no_xadd: /* Legacy 386 processor */
197 local_irq_save(flags);
198 __i = atomic_read(v);
199 atomic_set(v, i + __i);
200 local_irq_restore(flags);
201 return i + __i;
202 #endif
203 }
205 static __inline__ int atomic_sub_return(int i, atomic_t *v)
206 {
207 return atomic_add_return(-i,v);
208 }
210 #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
211 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
213 /**
214 * atomic_add_unless - add unless the number is a given value
215 * @v: pointer of type atomic_t
216 * @a: the amount to add to v...
217 * @u: ...unless v is equal to u.
218 *
219 * Atomically adds @a to @v, so long as it was not @u.
220 * Returns non-zero if @v was not @u, and zero otherwise.
221 */
222 #define atomic_add_unless(v, a, u) \
223 ({ \
224 int c, old; \
225 c = atomic_read(v); \
226 for (;;) { \
227 if (unlikely(c == (u))) \
228 break; \
229 old = atomic_cmpxchg((v), c, c + (a)); \
230 if (likely(old == c)) \
231 break; \
232 c = old; \
233 } \
234 c != (u); \
235 })
236 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
238 #define atomic_inc_return(v) (atomic_add_return(1,v))
239 #define atomic_dec_return(v) (atomic_sub_return(1,v))
241 /* These are x86-specific, used by some header files */
242 #define atomic_clear_mask(mask, addr) \
243 __asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
244 : : "r" (~(mask)),"m" (*addr) : "memory")
246 #define atomic_set_mask(mask, addr) \
247 __asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
248 : : "r" (mask),"m" (*(addr)) : "memory")
250 /* Atomic operations are already serializing on x86 */
251 #define smp_mb__before_atomic_dec() barrier()
252 #define smp_mb__after_atomic_dec() barrier()
253 #define smp_mb__before_atomic_inc() barrier()
254 #define smp_mb__after_atomic_inc() barrier()
256 #include <asm-generic/atomic.h>
257 #endif