ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-i386/rwsem.h @ 11221:7c9d7fc3dce5

[HVM] Fix SMBIOS entry point copy destination.
Spotted by Xiaowei Yang <xiaowei.yang@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@localhost.localdomain
date Sat Aug 19 12:06:36 2006 +0100 (2006-08-19)
parents 5a63f675107c
children
line source
1 /* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 *
5 * Derived from asm-i386/semaphore.h
6 *
7 *
8 * The MSW of the count is the negated number of active writers and waiting
9 * lockers, and the LSW is the total number of active locks
10 *
11 * The lock count is initialized to 0 (no active and no waiting lockers).
12 *
13 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
14 * uncontended lock. This can be determined because XADD returns the old value.
15 * Readers increment by 1 and see a positive value when uncontended, negative
16 * if there are writers (and maybe) readers waiting (in which case it goes to
17 * sleep).
18 *
19 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
20 * be extended to 65534 by manually checking the whole MSW rather than relying
21 * on the S flag.
22 *
23 * The value of ACTIVE_BIAS supports up to 65535 active processes.
24 *
25 * This should be totally fair - if anything is waiting, a process that wants a
26 * lock will go to the back of the queue. When the currently active lock is
27 * released, if there's a writer at the front of the queue, then that and only
28 * that will be woken up; if there's a bunch of consequtive readers at the
29 * front, then they'll all be woken up, but no other readers will be.
30 */
32 #ifndef _I386_RWSEM_H
33 #define _I386_RWSEM_H
35 #ifndef _LINUX_RWSEM_H
36 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
37 #endif
39 #ifdef __KERNEL__
41 #include <linux/list.h>
42 #include <linux/spinlock.h>
43 #include <asm/smp_alt.h>
45 struct rwsem_waiter;
47 extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem));
48 extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem));
49 extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *));
50 extern struct rw_semaphore *FASTCALL(rwsem_downgrade_wake(struct rw_semaphore *sem));
52 /*
53 * the semaphore definition
54 */
55 struct rw_semaphore {
56 signed long count;
57 #define RWSEM_UNLOCKED_VALUE 0x00000000
58 #define RWSEM_ACTIVE_BIAS 0x00000001
59 #define RWSEM_ACTIVE_MASK 0x0000ffff
60 #define RWSEM_WAITING_BIAS (-0x00010000)
61 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
62 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
63 spinlock_t wait_lock;
64 struct list_head wait_list;
65 #if RWSEM_DEBUG
66 int debug;
67 #endif
68 };
70 /*
71 * initialisation
72 */
73 #if RWSEM_DEBUG
74 #define __RWSEM_DEBUG_INIT , 0
75 #else
76 #define __RWSEM_DEBUG_INIT /* */
77 #endif
79 #define __RWSEM_INITIALIZER(name) \
80 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
81 __RWSEM_DEBUG_INIT }
83 #define DECLARE_RWSEM(name) \
84 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
86 static inline void init_rwsem(struct rw_semaphore *sem)
87 {
88 sem->count = RWSEM_UNLOCKED_VALUE;
89 spin_lock_init(&sem->wait_lock);
90 INIT_LIST_HEAD(&sem->wait_list);
91 #if RWSEM_DEBUG
92 sem->debug = 0;
93 #endif
94 }
96 /*
97 * lock for reading
98 */
99 static inline void __down_read(struct rw_semaphore *sem)
100 {
101 __asm__ __volatile__(
102 "# beginning down_read\n\t"
103 LOCK " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */
104 " js 2f\n\t" /* jump if we weren't granted the lock */
105 "1:\n\t"
106 LOCK_SECTION_START("")
107 "2:\n\t"
108 " pushl %%ecx\n\t"
109 " pushl %%edx\n\t"
110 " call rwsem_down_read_failed\n\t"
111 " popl %%edx\n\t"
112 " popl %%ecx\n\t"
113 " jmp 1b\n"
114 LOCK_SECTION_END
115 "# ending down_read\n\t"
116 : "=m"(sem->count)
117 : "a"(sem), "m"(sem->count)
118 : "memory", "cc");
119 }
121 /*
122 * trylock for reading -- returns 1 if successful, 0 if contention
123 */
124 static inline int __down_read_trylock(struct rw_semaphore *sem)
125 {
126 __s32 result, tmp;
127 __asm__ __volatile__(
128 "# beginning __down_read_trylock\n\t"
129 " movl %0,%1\n\t"
130 "1:\n\t"
131 " movl %1,%2\n\t"
132 " addl %3,%2\n\t"
133 " jle 2f\n\t"
134 LOCK " cmpxchgl %2,%0\n\t"
135 " jnz 1b\n\t"
136 "2:\n\t"
137 "# ending __down_read_trylock\n\t"
138 : "+m"(sem->count), "=&a"(result), "=&r"(tmp)
139 : "i"(RWSEM_ACTIVE_READ_BIAS)
140 : "memory", "cc");
141 return result>=0 ? 1 : 0;
142 }
144 /*
145 * lock for writing
146 */
147 static inline void __down_write(struct rw_semaphore *sem)
148 {
149 int tmp;
151 tmp = RWSEM_ACTIVE_WRITE_BIAS;
152 __asm__ __volatile__(
153 "# beginning down_write\n\t"
154 LOCK " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
155 " testl %%edx,%%edx\n\t" /* was the count 0 before? */
156 " jnz 2f\n\t" /* jump if we weren't granted the lock */
157 "1:\n\t"
158 LOCK_SECTION_START("")
159 "2:\n\t"
160 " pushl %%ecx\n\t"
161 " call rwsem_down_write_failed\n\t"
162 " popl %%ecx\n\t"
163 " jmp 1b\n"
164 LOCK_SECTION_END
165 "# ending down_write"
166 : "=m"(sem->count), "=d"(tmp)
167 : "a"(sem), "1"(tmp), "m"(sem->count)
168 : "memory", "cc");
169 }
171 /*
172 * trylock for writing -- returns 1 if successful, 0 if contention
173 */
174 static inline int __down_write_trylock(struct rw_semaphore *sem)
175 {
176 signed long ret = cmpxchg(&sem->count,
177 RWSEM_UNLOCKED_VALUE,
178 RWSEM_ACTIVE_WRITE_BIAS);
179 if (ret == RWSEM_UNLOCKED_VALUE)
180 return 1;
181 return 0;
182 }
184 /*
185 * unlock after reading
186 */
187 static inline void __up_read(struct rw_semaphore *sem)
188 {
189 __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
190 __asm__ __volatile__(
191 "# beginning __up_read\n\t"
192 LOCK " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
193 " js 2f\n\t" /* jump if the lock is being waited upon */
194 "1:\n\t"
195 LOCK_SECTION_START("")
196 "2:\n\t"
197 " decw %%dx\n\t" /* do nothing if still outstanding active readers */
198 " jnz 1b\n\t"
199 " pushl %%ecx\n\t"
200 " call rwsem_wake\n\t"
201 " popl %%ecx\n\t"
202 " jmp 1b\n"
203 LOCK_SECTION_END
204 "# ending __up_read\n"
205 : "=m"(sem->count), "=d"(tmp)
206 : "a"(sem), "1"(tmp), "m"(sem->count)
207 : "memory", "cc");
208 }
210 /*
211 * unlock after writing
212 */
213 static inline void __up_write(struct rw_semaphore *sem)
214 {
215 __asm__ __volatile__(
216 "# beginning __up_write\n\t"
217 " movl %2,%%edx\n\t"
218 LOCK " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
219 " jnz 2f\n\t" /* jump if the lock is being waited upon */
220 "1:\n\t"
221 LOCK_SECTION_START("")
222 "2:\n\t"
223 " decw %%dx\n\t" /* did the active count reduce to 0? */
224 " jnz 1b\n\t" /* jump back if not */
225 " pushl %%ecx\n\t"
226 " call rwsem_wake\n\t"
227 " popl %%ecx\n\t"
228 " jmp 1b\n"
229 LOCK_SECTION_END
230 "# ending __up_write\n"
231 : "=m"(sem->count)
232 : "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS), "m"(sem->count)
233 : "memory", "cc", "edx");
234 }
236 /*
237 * downgrade write lock to read lock
238 */
239 static inline void __downgrade_write(struct rw_semaphore *sem)
240 {
241 __asm__ __volatile__(
242 "# beginning __downgrade_write\n\t"
243 LOCK " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
244 " js 2f\n\t" /* jump if the lock is being waited upon */
245 "1:\n\t"
246 LOCK_SECTION_START("")
247 "2:\n\t"
248 " pushl %%ecx\n\t"
249 " pushl %%edx\n\t"
250 " call rwsem_downgrade_wake\n\t"
251 " popl %%edx\n\t"
252 " popl %%ecx\n\t"
253 " jmp 1b\n"
254 LOCK_SECTION_END
255 "# ending __downgrade_write\n"
256 : "=m"(sem->count)
257 : "a"(sem), "i"(-RWSEM_WAITING_BIAS), "m"(sem->count)
258 : "memory", "cc");
259 }
261 /*
262 * implement atomic add functionality
263 */
264 static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
265 {
266 __asm__ __volatile__(
267 LOCK "addl %1,%0"
268 : "=m"(sem->count)
269 : "ir"(delta), "m"(sem->count));
270 }
272 /*
273 * implement exchange and add functionality
274 */
275 static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
276 {
277 int tmp = delta;
279 __asm__ __volatile__(
280 LOCK "xadd %0,(%2)"
281 : "+r"(tmp), "=m"(sem->count)
282 : "r"(sem), "m"(sem->count)
283 : "memory");
285 return tmp+delta;
286 }
288 static inline int rwsem_is_locked(struct rw_semaphore *sem)
289 {
290 return (sem->count != 0);
291 }
293 #endif /* __KERNEL__ */
294 #endif /* _I386_RWSEM_H */