ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-xen/asm-i386/spinlock.h @ 8534:da7873110bbb

Tiny bootstrap cleanup.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Jan 09 19:46:46 2006 +0100 (2006-01-09)
parents 5ffe3f16aa52
children
line source
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
6 #include <asm/page.h>
7 #include <linux/config.h>
8 #include <linux/compiler.h>
9 #include <asm/smp_alt.h>
11 asmlinkage int printk(const char * fmt, ...)
12 __attribute__ ((format (printf, 1, 2)));
14 /*
15 * Your basic SMP spinlocks, allowing only a single CPU anywhere
16 */
18 typedef struct {
19 volatile unsigned int slock;
20 #ifdef CONFIG_DEBUG_SPINLOCK
21 unsigned magic;
22 #endif
23 #ifdef CONFIG_PREEMPT
24 unsigned int break_lock;
25 #endif
26 } spinlock_t;
28 #define SPINLOCK_MAGIC 0xdead4ead
30 #ifdef CONFIG_DEBUG_SPINLOCK
31 #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
32 #else
33 #define SPINLOCK_MAGIC_INIT /* */
34 #endif
36 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
38 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
40 /*
41 * Simple spin lock operations. There are two variants, one clears IRQ's
42 * on the local processor, one does not.
43 *
44 * We make no fairness assumptions. They have a cost.
45 */
47 #define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0)
48 #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
50 #define spin_lock_string \
51 "1:\n" \
52 LOCK \
53 "decb %0\n\t" \
54 "jns 3f\n" \
55 "2:\t" \
56 "rep;nop\n\t" \
57 "cmpb $0,%0\n\t" \
58 "jle 2b\n\t" \
59 "jmp 1b\n" \
60 "3:\n\t"
62 #define spin_lock_string_flags \
63 "1:\n" \
64 LOCK \
65 "decb %0\n\t" \
66 "jns 4f\n\t" \
67 "2:\t" \
68 "testl $0x200, %1\n\t" \
69 "jz 3f\n\t" \
70 "#sti\n\t" \
71 "3:\t" \
72 "rep;nop\n\t" \
73 "cmpb $0, %0\n\t" \
74 "jle 3b\n\t" \
75 "#cli\n\t" \
76 "jmp 1b\n" \
77 "4:\n\t"
79 /*
80 * This works. Despite all the confusion.
81 * (except on PPro SMP or if we are using OOSTORE)
82 * (PPro errata 66, 92)
83 */
85 #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
87 #define spin_unlock_string \
88 "movb $1,%0" \
89 :"=m" (lock->slock) : : "memory"
92 static inline void _raw_spin_unlock(spinlock_t *lock)
93 {
94 #ifdef CONFIG_DEBUG_SPINLOCK
95 BUG_ON(lock->magic != SPINLOCK_MAGIC);
96 BUG_ON(!spin_is_locked(lock));
97 #endif
98 __asm__ __volatile__(
99 spin_unlock_string
100 );
101 }
103 #else
105 #define spin_unlock_string \
106 "xchgb %b0, %1" \
107 :"=q" (oldval), "=m" (lock->slock) \
108 :"0" (oldval) : "memory"
110 static inline void _raw_spin_unlock(spinlock_t *lock)
111 {
112 char oldval = 1;
113 #ifdef CONFIG_DEBUG_SPINLOCK
114 BUG_ON(lock->magic != SPINLOCK_MAGIC);
115 BUG_ON(!spin_is_locked(lock));
116 #endif
117 __asm__ __volatile__(
118 spin_unlock_string
119 );
120 }
122 #endif
124 static inline int _raw_spin_trylock(spinlock_t *lock)
125 {
126 char oldval;
127 #ifdef CONFIG_SMP_ALTERNATIVES
128 __asm__ __volatile__(
129 "1:movb %1,%b0\n"
130 "movb $0,%1\n"
131 "2:"
132 ".section __smp_alternatives,\"a\"\n"
133 ".long 1b\n"
134 ".long 3f\n"
135 ".previous\n"
136 ".section __smp_replacements,\"a\"\n"
137 "3: .byte 2b - 1b\n"
138 ".byte 5f-4f\n"
139 ".byte 0\n"
140 ".byte 6f-5f\n"
141 ".byte -1\n"
142 "4: xchgb %b0,%1\n"
143 "5: movb %1,%b0\n"
144 "movb $0,%1\n"
145 "6:\n"
146 ".previous\n"
147 :"=q" (oldval), "=m" (lock->slock)
148 :"0" (0) : "memory");
149 #else
150 __asm__ __volatile__(
151 "xchgb %b0,%1\n"
152 :"=q" (oldval), "=m" (lock->slock)
153 :"0" (0) : "memory");
154 #endif
155 return oldval > 0;
156 }
158 static inline void _raw_spin_lock(spinlock_t *lock)
159 {
160 #ifdef CONFIG_DEBUG_SPINLOCK
161 if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
162 printk("eip: %p\n", __builtin_return_address(0));
163 BUG();
164 }
165 #endif
166 __asm__ __volatile__(
167 spin_lock_string
168 :"=m" (lock->slock) : : "memory");
169 }
171 static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
172 {
173 #ifdef CONFIG_DEBUG_SPINLOCK
174 if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
175 printk("eip: %p\n", __builtin_return_address(0));
176 BUG();
177 }
178 #endif
179 __asm__ __volatile__(
180 spin_lock_string_flags
181 :"=m" (lock->slock) : "r" (flags) : "memory");
182 }
184 /*
185 * Read-write spinlocks, allowing multiple readers
186 * but only one writer.
187 *
188 * NOTE! it is quite common to have readers in interrupts
189 * but no interrupt writers. For those circumstances we
190 * can "mix" irq-safe locks - any writer needs to get a
191 * irq-safe write-lock, but readers can get non-irqsafe
192 * read-locks.
193 */
194 typedef struct {
195 volatile unsigned int lock;
196 #ifdef CONFIG_DEBUG_SPINLOCK
197 unsigned magic;
198 #endif
199 #ifdef CONFIG_PREEMPT
200 unsigned int break_lock;
201 #endif
202 } rwlock_t;
204 #define RWLOCK_MAGIC 0xdeaf1eed
206 #ifdef CONFIG_DEBUG_SPINLOCK
207 #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
208 #else
209 #define RWLOCK_MAGIC_INIT /* */
210 #endif
212 #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
214 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
216 /**
217 * read_can_lock - would read_trylock() succeed?
218 * @lock: the rwlock in question.
219 */
220 #define read_can_lock(x) ((int)(x)->lock > 0)
222 /**
223 * write_can_lock - would write_trylock() succeed?
224 * @lock: the rwlock in question.
225 */
226 #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
228 /*
229 * On x86, we implement read-write locks as a 32-bit counter
230 * with the high bit (sign) being the "contended" bit.
231 *
232 * The inline assembly is non-obvious. Think about it.
233 *
234 * Changed to use the same technique as rw semaphores. See
235 * semaphore.h for details. -ben
236 */
237 /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
239 static inline void _raw_read_lock(rwlock_t *rw)
240 {
241 #ifdef CONFIG_DEBUG_SPINLOCK
242 BUG_ON(rw->magic != RWLOCK_MAGIC);
243 #endif
244 __build_read_lock(rw, "__read_lock_failed");
245 }
247 static inline void _raw_write_lock(rwlock_t *rw)
248 {
249 #ifdef CONFIG_DEBUG_SPINLOCK
250 BUG_ON(rw->magic != RWLOCK_MAGIC);
251 #endif
252 __build_write_lock(rw, "__write_lock_failed");
253 }
255 #define _raw_read_unlock(rw) asm volatile(LOCK "incl %0" :"=m" ((rw)->lock) : : "memory")
256 #define _raw_write_unlock(rw) asm volatile(LOCK "addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
258 static inline int _raw_read_trylock(rwlock_t *lock)
259 {
260 atomic_t *count = (atomic_t *)lock;
261 atomic_dec(count);
262 if (atomic_read(count) >= 0)
263 return 1;
264 atomic_inc(count);
265 return 0;
266 }
268 static inline int _raw_write_trylock(rwlock_t *lock)
269 {
270 atomic_t *count = (atomic_t *)lock;
271 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
272 return 1;
273 atomic_add(RW_LOCK_BIAS, count);
274 return 0;
275 }
277 #endif /* __ASM_SPINLOCK_H */