ia64/xen-unstable

view xen/common/spinlock.c @ 18666:c003e5a23a4e

Clean up spinlock operations and compile as first-class functions.

This follows modern Linux, since apparently outlining spinlock
operations does not slow down execution. The cleanups will also allow
more convenient addition of diagnostic code.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Oct 20 16:48:17 2008 +0100 (2008-10-20)
parents
children 54d74fc0037c
line source
1 #include <xen/config.h>
2 #include <xen/smp.h>
3 #include <xen/spinlock.h>
5 void _spin_lock(spinlock_t *lock)
6 {
7 _raw_spin_lock(&lock->raw);
8 }
10 void _spin_lock_irq(spinlock_t *lock)
11 {
12 local_irq_disable();
13 _raw_spin_lock(&lock->raw);
14 }
16 unsigned long _spin_lock_irqsave(spinlock_t *lock)
17 {
18 unsigned long flags;
19 local_irq_save(flags);
20 _raw_spin_lock(&lock->raw);
21 return flags;
22 }
24 void _spin_unlock(spinlock_t *lock)
25 {
26 _raw_spin_unlock(&lock->raw);
27 }
29 void _spin_unlock_irq(spinlock_t *lock)
30 {
31 _raw_spin_unlock(&lock->raw);
32 local_irq_enable();
33 }
35 void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
36 {
37 _raw_spin_unlock(&lock->raw);
38 local_irq_restore(flags);
39 }
41 int _spin_is_locked(spinlock_t *lock)
42 {
43 return _raw_spin_is_locked(&lock->raw);
44 }
46 int _spin_trylock(spinlock_t *lock)
47 {
48 return _raw_spin_trylock(&lock->raw);
49 }
51 void _spin_barrier(spinlock_t *lock)
52 {
53 do { mb(); } while ( _raw_spin_is_locked(&lock->raw) );
54 mb();
55 }
57 void _spin_lock_recursive(spinlock_t *lock)
58 {
59 int cpu = smp_processor_id();
60 if ( likely(lock->recurse_cpu != cpu) )
61 {
62 spin_lock(lock);
63 lock->recurse_cpu = cpu;
64 }
65 lock->recurse_cnt++;
66 }
68 void _spin_unlock_recursive(spinlock_t *lock)
69 {
70 if ( likely(--lock->recurse_cnt == 0) )
71 {
72 lock->recurse_cpu = -1;
73 spin_unlock(lock);
74 }
75 }
77 void _read_lock(rwlock_t *lock)
78 {
79 _raw_read_lock(&lock->raw);
80 }
82 void _read_lock_irq(rwlock_t *lock)
83 {
84 local_irq_disable();
85 _raw_read_lock(&lock->raw);
86 }
88 unsigned long _read_lock_irqsave(rwlock_t *lock)
89 {
90 unsigned long flags;
91 local_irq_save(flags);
92 _raw_read_lock(&lock->raw);
93 return flags;
94 }
96 void _read_unlock(rwlock_t *lock)
97 {
98 _raw_read_unlock(&lock->raw);
99 }
101 void _read_unlock_irq(rwlock_t *lock)
102 {
103 _raw_read_unlock(&lock->raw);
104 local_irq_enable();
105 }
107 void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
108 {
109 _raw_read_unlock(&lock->raw);
110 local_irq_restore(flags);
111 }
113 void _write_lock(rwlock_t *lock)
114 {
115 _raw_write_lock(&lock->raw);
116 }
118 void _write_lock_irq(rwlock_t *lock)
119 {
120 local_irq_disable();
121 _raw_write_lock(&lock->raw);
122 }
124 unsigned long _write_lock_irqsave(rwlock_t *lock)
125 {
126 unsigned long flags;
127 local_irq_save(flags);
128 _raw_write_lock(&lock->raw);
129 return flags;
130 }
132 void _write_unlock(rwlock_t *lock)
133 {
134 _raw_write_unlock(&lock->raw);
135 }
137 void _write_unlock_irq(rwlock_t *lock)
138 {
139 _raw_write_unlock(&lock->raw);
140 local_irq_enable();
141 }
143 void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
144 {
145 _raw_write_unlock(&lock->raw);
146 local_irq_restore(flags);
147 }