ia64/xen-unstable

view xen/common/spinlock.c @ 19835:edfdeb150f27

Fix buildsystem to detect udev > version 124

udev removed the udevinfo symlink from versions higher than 123 and
xen's build-system could not detect if udev is in place and has the
required version.

Signed-off-by: Marc-A. Dahlhaus <mad@wol.de>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 25 13:02:37 2009 +0100 (2009-06-25)
parents f210a633571c
children
line source
1 #include <xen/config.h>
2 #include <xen/irq.h>
3 #include <xen/smp.h>
4 #include <xen/spinlock.h>
5 #include <asm/processor.h>
7 #ifndef NDEBUG
9 static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0);
11 static void check_lock(struct lock_debug *debug)
12 {
13 int irq_safe = !local_irq_is_enabled();
15 if ( unlikely(atomic_read(&spin_debug) <= 0) )
16 return;
18 /* A few places take liberties with this. */
19 /* BUG_ON(in_irq() && !irq_safe); */
21 if ( unlikely(debug->irq_safe != irq_safe) )
22 {
23 int seen = cmpxchg(&debug->irq_safe, -1, irq_safe);
24 BUG_ON(seen == !irq_safe);
25 }
26 }
28 void spin_debug_enable(void)
29 {
30 atomic_inc(&spin_debug);
31 }
33 void spin_debug_disable(void)
34 {
35 atomic_dec(&spin_debug);
36 }
38 #else /* defined(NDEBUG) */
40 #define check_lock(l) ((void)0)
42 #endif
44 void _spin_lock(spinlock_t *lock)
45 {
46 check_lock(&lock->debug);
47 while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
48 while ( likely(_raw_spin_is_locked(&lock->raw)) )
49 cpu_relax();
50 }
52 void _spin_lock_irq(spinlock_t *lock)
53 {
54 ASSERT(local_irq_is_enabled());
55 local_irq_disable();
56 check_lock(&lock->debug);
57 while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
58 {
59 local_irq_enable();
60 while ( likely(_raw_spin_is_locked(&lock->raw)) )
61 cpu_relax();
62 local_irq_disable();
63 }
64 }
66 unsigned long _spin_lock_irqsave(spinlock_t *lock)
67 {
68 unsigned long flags;
69 local_irq_save(flags);
70 check_lock(&lock->debug);
71 while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
72 {
73 local_irq_restore(flags);
74 while ( likely(_raw_spin_is_locked(&lock->raw)) )
75 cpu_relax();
76 local_irq_save(flags);
77 }
78 return flags;
79 }
81 void _spin_unlock(spinlock_t *lock)
82 {
83 _raw_spin_unlock(&lock->raw);
84 }
86 void _spin_unlock_irq(spinlock_t *lock)
87 {
88 _raw_spin_unlock(&lock->raw);
89 local_irq_enable();
90 }
92 void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
93 {
94 _raw_spin_unlock(&lock->raw);
95 local_irq_restore(flags);
96 }
98 int _spin_is_locked(spinlock_t *lock)
99 {
100 check_lock(&lock->debug);
101 return _raw_spin_is_locked(&lock->raw);
102 }
104 int _spin_trylock(spinlock_t *lock)
105 {
106 check_lock(&lock->debug);
107 return _raw_spin_trylock(&lock->raw);
108 }
110 void _spin_barrier(spinlock_t *lock)
111 {
112 check_lock(&lock->debug);
113 do { mb(); } while ( _raw_spin_is_locked(&lock->raw) );
114 mb();
115 }
117 void _spin_barrier_irq(spinlock_t *lock)
118 {
119 unsigned long flags;
120 local_irq_save(flags);
121 _spin_barrier(lock);
122 local_irq_restore(flags);
123 }
125 void _spin_lock_recursive(spinlock_t *lock)
126 {
127 int cpu = smp_processor_id();
129 /* Don't allow overflow of recurse_cpu field. */
130 BUILD_BUG_ON(NR_CPUS > 0xfffu);
132 check_lock(&lock->debug);
134 if ( likely(lock->recurse_cpu != cpu) )
135 {
136 spin_lock(lock);
137 lock->recurse_cpu = cpu;
138 }
140 /* We support only fairly shallow recursion, else the counter overflows. */
141 ASSERT(lock->recurse_cnt < 0xfu);
142 lock->recurse_cnt++;
143 }
145 void _spin_unlock_recursive(spinlock_t *lock)
146 {
147 if ( likely(--lock->recurse_cnt == 0) )
148 {
149 lock->recurse_cpu = 0xfffu;
150 spin_unlock(lock);
151 }
152 }
154 void _read_lock(rwlock_t *lock)
155 {
156 check_lock(&lock->debug);
157 _raw_read_lock(&lock->raw);
158 }
160 void _read_lock_irq(rwlock_t *lock)
161 {
162 ASSERT(local_irq_is_enabled());
163 local_irq_disable();
164 check_lock(&lock->debug);
165 _raw_read_lock(&lock->raw);
166 }
168 unsigned long _read_lock_irqsave(rwlock_t *lock)
169 {
170 unsigned long flags;
171 local_irq_save(flags);
172 check_lock(&lock->debug);
173 _raw_read_lock(&lock->raw);
174 return flags;
175 }
177 void _read_unlock(rwlock_t *lock)
178 {
179 _raw_read_unlock(&lock->raw);
180 }
182 void _read_unlock_irq(rwlock_t *lock)
183 {
184 _raw_read_unlock(&lock->raw);
185 local_irq_enable();
186 }
188 void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
189 {
190 _raw_read_unlock(&lock->raw);
191 local_irq_restore(flags);
192 }
194 void _write_lock(rwlock_t *lock)
195 {
196 check_lock(&lock->debug);
197 _raw_write_lock(&lock->raw);
198 }
200 void _write_lock_irq(rwlock_t *lock)
201 {
202 ASSERT(local_irq_is_enabled());
203 local_irq_disable();
204 check_lock(&lock->debug);
205 _raw_write_lock(&lock->raw);
206 }
208 unsigned long _write_lock_irqsave(rwlock_t *lock)
209 {
210 unsigned long flags;
211 local_irq_save(flags);
212 check_lock(&lock->debug);
213 _raw_write_lock(&lock->raw);
214 return flags;
215 }
217 int _write_trylock(rwlock_t *lock)
218 {
219 check_lock(&lock->debug);
220 return _raw_write_trylock(&lock->raw);
221 }
223 void _write_unlock(rwlock_t *lock)
224 {
225 _raw_write_unlock(&lock->raw);
226 }
228 void _write_unlock_irq(rwlock_t *lock)
229 {
230 _raw_write_unlock(&lock->raw);
231 local_irq_enable();
232 }
234 void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
235 {
236 _raw_write_unlock(&lock->raw);
237 local_irq_restore(flags);
238 }
240 int _rw_is_locked(rwlock_t *lock)
241 {
242 check_lock(&lock->debug);
243 return _raw_rw_is_locked(&lock->raw);
244 }
246 int _rw_is_write_locked(rwlock_t *lock)
247 {
248 check_lock(&lock->debug);
249 return _raw_rw_is_write_locked(&lock->raw);
250 }