ia64/xen-unstable

view xen/include/asm-ia64/linux-xen/asm/atomic.h @ 12009:37a8dbe24201

[IA64] compilation fix when crash_debug=y.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Tue Oct 31 22:25:17 2006 -0700 (2006-10-31)
parents fa4281cb7a5b
children
line source
1 #ifndef _ASM_IA64_ATOMIC_H
2 #define _ASM_IA64_ATOMIC_H
4 /*
5 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc..
7 *
8 * NOTE: don't mess with the types below! The "unsigned long" and
9 * "int" types were carefully placed so as to ensure proper operation
10 * of the macros.
11 *
12 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
13 * David Mosberger-Tang <davidm@hpl.hp.com>
14 */
15 #include <linux/types.h>
17 #include <asm/intrinsics.h>
19 /*
20 * On IA-64, counter must always be volatile to ensure that that the
21 * memory accesses are ordered.
22 */
23 typedef struct { volatile __s32 counter; } atomic_t;
24 typedef struct { volatile __s64 counter; } atomic64_t;
26 #ifndef XEN
27 #define ATOMIC_INIT(i) ((atomic_t) { (i) })
28 #define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
29 #else
30 #define ATOMIC_INIT(i) { (i) }
31 #define ATOMIC64_INIT(i) { (i) }
32 #endif
34 #define atomic_read(v) ((v)->counter)
35 #define atomic64_read(v) ((v)->counter)
37 #define atomic_set(v,i) (((v)->counter) = (i))
38 #define atomic64_set(v,i) (((v)->counter) = (i))
40 static __inline__ int
41 ia64_atomic_add (int i, atomic_t *v)
42 {
43 __s32 old, new;
44 CMPXCHG_BUGCHECK_DECL
46 do {
47 CMPXCHG_BUGCHECK(v);
48 old = atomic_read(v);
49 new = old + i;
50 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
51 return new;
52 }
54 static __inline__ int
55 ia64_atomic64_add (__s64 i, atomic64_t *v)
56 {
57 __s64 old, new;
58 CMPXCHG_BUGCHECK_DECL
60 do {
61 CMPXCHG_BUGCHECK(v);
62 old = atomic_read(v);
63 new = old + i;
64 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
65 return new;
66 }
68 static __inline__ int
69 ia64_atomic_sub (int i, atomic_t *v)
70 {
71 __s32 old, new;
72 CMPXCHG_BUGCHECK_DECL
74 do {
75 CMPXCHG_BUGCHECK(v);
76 old = atomic_read(v);
77 new = old - i;
78 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
79 return new;
80 }
82 static __inline__ int
83 ia64_atomic64_sub (__s64 i, atomic64_t *v)
84 {
85 __s64 old, new;
86 CMPXCHG_BUGCHECK_DECL
88 do {
89 CMPXCHG_BUGCHECK(v);
90 old = atomic_read(v);
91 new = old - i;
92 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
93 return new;
94 }
96 #define atomic_add_return(i,v) \
97 ({ \
98 int __ia64_aar_i = (i); \
99 (__builtin_constant_p(i) \
100 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
101 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
102 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
103 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
104 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
105 : ia64_atomic_add(__ia64_aar_i, v); \
106 })
108 #define atomic64_add_return(i,v) \
109 ({ \
110 long __ia64_aar_i = (i); \
111 (__builtin_constant_p(i) \
112 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
113 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
114 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
115 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
116 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
117 : ia64_atomic64_add(__ia64_aar_i, v); \
118 })
120 /*
121 * Atomically add I to V and return TRUE if the resulting value is
122 * negative.
123 */
124 static __inline__ int
125 atomic_add_negative (int i, atomic_t *v)
126 {
127 return atomic_add_return(i, v) < 0;
128 }
130 static __inline__ int
131 atomic64_add_negative (__s64 i, atomic64_t *v)
132 {
133 return atomic64_add_return(i, v) < 0;
134 }
136 #define atomic_sub_return(i,v) \
137 ({ \
138 int __ia64_asr_i = (i); \
139 (__builtin_constant_p(i) \
140 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
141 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
142 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
143 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
144 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
145 : ia64_atomic_sub(__ia64_asr_i, v); \
146 })
148 #define atomic64_sub_return(i,v) \
149 ({ \
150 long __ia64_asr_i = (i); \
151 (__builtin_constant_p(i) \
152 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
153 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
154 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
155 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
156 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
157 : ia64_atomic64_sub(__ia64_asr_i, v); \
158 })
160 #define atomic_dec_return(v) atomic_sub_return(1, (v))
161 #define atomic_inc_return(v) atomic_add_return(1, (v))
162 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
163 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
165 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
166 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
167 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
168 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
169 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
170 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
172 #define atomic_add(i,v) atomic_add_return((i), (v))
173 #define atomic_sub(i,v) atomic_sub_return((i), (v))
174 #define atomic_inc(v) atomic_add(1, (v))
175 #define atomic_dec(v) atomic_sub(1, (v))
177 #define atomic64_add(i,v) atomic64_add_return((i), (v))
178 #define atomic64_sub(i,v) atomic64_sub_return((i), (v))
179 #define atomic64_inc(v) atomic64_add(1, (v))
180 #define atomic64_dec(v) atomic64_sub(1, (v))
182 /* Atomic operations are already serializing */
183 #define smp_mb__before_atomic_dec() barrier()
184 #define smp_mb__after_atomic_dec() barrier()
185 #define smp_mb__before_atomic_inc() barrier()
186 #define smp_mb__after_atomic_inc() barrier()
188 #endif /* _ASM_IA64_ATOMIC_H */