ia64/xen-unstable

view xen/include/asm-x86/atomic.h @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents cd6b3af19191
children
line source
1 #ifndef __ARCH_X86_ATOMIC__
2 #define __ARCH_X86_ATOMIC__
4 #include <xen/config.h>
5 #include <asm/system.h>
7 #ifdef CONFIG_SMP
8 #define LOCK "lock ; "
9 #else
10 #define LOCK ""
11 #endif
13 /*
14 * NB. I've pushed the volatile qualifier into the operations. This allows
15 * fast accessors such as _atomic_read() and _atomic_set() which don't give
16 * the compiler a fit.
17 */
18 typedef struct { int counter; } atomic_t;
20 #define ATOMIC_INIT(i) { (i) }
22 /**
23 * atomic_read - read atomic variable
24 * @v: pointer of type atomic_t
25 *
26 * Atomically reads the value of @v.
27 */
28 #define _atomic_read(v) ((v).counter)
29 #define atomic_read(v) (*(volatile int *)&((v)->counter))
31 /**
32 * atomic_set - set atomic variable
33 * @v: pointer of type atomic_t
34 * @i: required value
35 *
36 * Atomically sets the value of @v to @i.
37 */
38 #define _atomic_set(v,i) (((v).counter) = (i))
39 #define atomic_set(v,i) (*(volatile int *)&((v)->counter) = (i))
41 /**
42 * atomic_add - add integer to atomic variable
43 * @i: integer value to add
44 * @v: pointer of type atomic_t
45 *
46 * Atomically adds @i to @v.
47 */
48 static __inline__ void atomic_add(int i, atomic_t *v)
49 {
50 asm volatile(
51 LOCK "addl %1,%0"
52 :"=m" (*(volatile int *)&v->counter)
53 :"ir" (i), "m" (*(volatile int *)&v->counter));
54 }
56 /**
57 * atomic_sub - subtract the atomic variable
58 * @i: integer value to subtract
59 * @v: pointer of type atomic_t
60 *
61 * Atomically subtracts @i from @v.
62 */
63 static __inline__ void atomic_sub(int i, atomic_t *v)
64 {
65 asm volatile(
66 LOCK "subl %1,%0"
67 :"=m" (*(volatile int *)&v->counter)
68 :"ir" (i), "m" (*(volatile int *)&v->counter));
69 }
71 /**
72 * atomic_sub_and_test - subtract value from variable and test result
73 * @i: integer value to subtract
74 * @v: pointer of type atomic_t
75 *
76 * Atomically subtracts @i from @v and returns
77 * true if the result is zero, or false for all
78 * other cases.
79 */
80 static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
81 {
82 unsigned char c;
84 asm volatile(
85 LOCK "subl %2,%0; sete %1"
86 :"=m" (*(volatile int *)&v->counter), "=qm" (c)
87 :"ir" (i), "m" (*(volatile int *)&v->counter) : "memory");
88 return c;
89 }
91 /**
92 * atomic_inc - increment atomic variable
93 * @v: pointer of type atomic_t
94 *
95 * Atomically increments @v by 1.
96 */
97 static __inline__ void atomic_inc(atomic_t *v)
98 {
99 asm volatile(
100 LOCK "incl %0"
101 :"=m" (*(volatile int *)&v->counter)
102 :"m" (*(volatile int *)&v->counter));
103 }
105 /**
106 * atomic_dec - decrement atomic variable
107 * @v: pointer of type atomic_t
108 *
109 * Atomically decrements @v by 1.
110 */
111 static __inline__ void atomic_dec(atomic_t *v)
112 {
113 asm volatile(
114 LOCK "decl %0"
115 :"=m" (*(volatile int *)&v->counter)
116 :"m" (*(volatile int *)&v->counter));
117 }
119 /**
120 * atomic_dec_and_test - decrement and test
121 * @v: pointer of type atomic_t
122 *
123 * Atomically decrements @v by 1 and
124 * returns true if the result is 0, or false for all other
125 * cases.
126 */
127 static __inline__ int atomic_dec_and_test(atomic_t *v)
128 {
129 unsigned char c;
131 asm volatile(
132 LOCK "decl %0; sete %1"
133 :"=m" (*(volatile int *)&v->counter), "=qm" (c)
134 :"m" (*(volatile int *)&v->counter) : "memory");
135 return c != 0;
136 }
138 /**
139 * atomic_inc_and_test - increment and test
140 * @v: pointer of type atomic_t
141 *
142 * Atomically increments @v by 1
143 * and returns true if the result is zero, or false for all
144 * other cases.
145 */
146 static __inline__ int atomic_inc_and_test(atomic_t *v)
147 {
148 unsigned char c;
150 asm volatile(
151 LOCK "incl %0; sete %1"
152 :"=m" (*(volatile int *)&v->counter), "=qm" (c)
153 :"m" (*(volatile int *)&v->counter) : "memory");
154 return c != 0;
155 }
157 /**
158 * atomic_add_negative - add and test if negative
159 * @v: pointer of type atomic_t
160 * @i: integer value to add
161 *
162 * Atomically adds @i to @v and returns true
163 * if the result is negative, or false when
164 * result is greater than or equal to zero.
165 */
166 static __inline__ int atomic_add_negative(int i, atomic_t *v)
167 {
168 unsigned char c;
170 asm volatile(
171 LOCK "addl %2,%0; sets %1"
172 :"=m" (*(volatile int *)&v->counter), "=qm" (c)
173 :"ir" (i), "m" (*(volatile int *)&v->counter) : "memory");
174 return c;
175 }
177 static __inline__ atomic_t atomic_compareandswap(
178 atomic_t old, atomic_t new, atomic_t *v)
179 {
180 atomic_t rc;
181 rc.counter =
182 __cmpxchg(&v->counter, old.counter, new.counter, sizeof(int));
183 return rc;
184 }
186 /* Atomic operations are already serializing on x86 */
187 #define smp_mb__before_atomic_dec() barrier()
188 #define smp_mb__after_atomic_dec() barrier()
189 #define smp_mb__before_atomic_inc() barrier()
190 #define smp_mb__after_atomic_inc() barrier()
192 #endif /* __ARCH_X86_ATOMIC__ */