ia64/xen-unstable

view xen/common/spinlock.c @ 18668:54d74fc0037c

spinlock: Modify recursive spinlock definitions to support up to 4095 CPUs.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Oct 20 17:16:45 2008 +0100 (2008-10-20)
parents c003e5a23a4e
children 876618c33914
line source
1 #include <xen/config.h>
2 #include <xen/smp.h>
3 #include <xen/spinlock.h>
5 void _spin_lock(spinlock_t *lock)
6 {
7 _raw_spin_lock(&lock->raw);
8 }
10 void _spin_lock_irq(spinlock_t *lock)
11 {
12 local_irq_disable();
13 _raw_spin_lock(&lock->raw);
14 }
16 unsigned long _spin_lock_irqsave(spinlock_t *lock)
17 {
18 unsigned long flags;
19 local_irq_save(flags);
20 _raw_spin_lock(&lock->raw);
21 return flags;
22 }
24 void _spin_unlock(spinlock_t *lock)
25 {
26 _raw_spin_unlock(&lock->raw);
27 }
29 void _spin_unlock_irq(spinlock_t *lock)
30 {
31 _raw_spin_unlock(&lock->raw);
32 local_irq_enable();
33 }
35 void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
36 {
37 _raw_spin_unlock(&lock->raw);
38 local_irq_restore(flags);
39 }
41 int _spin_is_locked(spinlock_t *lock)
42 {
43 return _raw_spin_is_locked(&lock->raw);
44 }
46 int _spin_trylock(spinlock_t *lock)
47 {
48 return _raw_spin_trylock(&lock->raw);
49 }
51 void _spin_barrier(spinlock_t *lock)
52 {
53 do { mb(); } while ( _raw_spin_is_locked(&lock->raw) );
54 mb();
55 }
57 void _spin_lock_recursive(spinlock_t *lock)
58 {
59 int cpu = smp_processor_id();
61 /* Don't allow overflow of recurse_cpu field. */
62 BUILD_BUG_ON(NR_CPUS > 0xfffu);
64 if ( likely(lock->recurse_cpu != cpu) )
65 {
66 spin_lock(lock);
67 lock->recurse_cpu = cpu;
68 }
70 /* We support only fairly shallow recursion, else the counter overflows. */
71 ASSERT(lock->recurse_cnt < 0xfu);
72 lock->recurse_cnt++;
73 }
75 void _spin_unlock_recursive(spinlock_t *lock)
76 {
77 if ( likely(--lock->recurse_cnt == 0) )
78 {
79 lock->recurse_cpu = 0xfffu;
80 spin_unlock(lock);
81 }
82 }
84 void _read_lock(rwlock_t *lock)
85 {
86 _raw_read_lock(&lock->raw);
87 }
89 void _read_lock_irq(rwlock_t *lock)
90 {
91 local_irq_disable();
92 _raw_read_lock(&lock->raw);
93 }
95 unsigned long _read_lock_irqsave(rwlock_t *lock)
96 {
97 unsigned long flags;
98 local_irq_save(flags);
99 _raw_read_lock(&lock->raw);
100 return flags;
101 }
103 void _read_unlock(rwlock_t *lock)
104 {
105 _raw_read_unlock(&lock->raw);
106 }
108 void _read_unlock_irq(rwlock_t *lock)
109 {
110 _raw_read_unlock(&lock->raw);
111 local_irq_enable();
112 }
114 void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
115 {
116 _raw_read_unlock(&lock->raw);
117 local_irq_restore(flags);
118 }
120 void _write_lock(rwlock_t *lock)
121 {
122 _raw_write_lock(&lock->raw);
123 }
125 void _write_lock_irq(rwlock_t *lock)
126 {
127 local_irq_disable();
128 _raw_write_lock(&lock->raw);
129 }
131 unsigned long _write_lock_irqsave(rwlock_t *lock)
132 {
133 unsigned long flags;
134 local_irq_save(flags);
135 _raw_write_lock(&lock->raw);
136 return flags;
137 }
139 void _write_unlock(rwlock_t *lock)
140 {
141 _raw_write_unlock(&lock->raw);
142 }
144 void _write_unlock_irq(rwlock_t *lock)
145 {
146 _raw_write_unlock(&lock->raw);
147 local_irq_enable();
148 }
150 void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
151 {
152 _raw_write_unlock(&lock->raw);
153 local_irq_restore(flags);
154 }