ia64/xen-unstable

view xen/include/xen/spinlock.h @ 18704:876618c33914

Define spin_barrier_irq() for IRQ-safe spinlocks, and use it for virq_lock.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Oct 22 15:06:01 2008 +0100 (2008-10-22)
parents 54d74fc0037c
children 0358305c6883
line source
1 #ifndef __SPINLOCK_H__
2 #define __SPINLOCK_H__
4 #include <xen/config.h>
5 #include <asm/system.h>
6 #include <asm/spinlock.h>
8 typedef struct {
9 raw_spinlock_t raw;
10 u16 recurse_cpu:12;
11 u16 recurse_cnt:4;
12 } spinlock_t;
14 #define SPIN_LOCK_UNLOCKED { _RAW_SPIN_LOCK_UNLOCKED, 0xfffu, 0 }
15 #define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED
16 #define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)
18 typedef struct {
19 raw_rwlock_t raw;
20 } rwlock_t;
22 #define RW_LOCK_UNLOCKED { _RAW_RW_LOCK_UNLOCKED }
23 #define DEFINE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED
24 #define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED)
26 void _spin_lock(spinlock_t *lock);
27 void _spin_lock_irq(spinlock_t *lock);
28 unsigned long _spin_lock_irqsave(spinlock_t *lock);
30 void _spin_unlock(spinlock_t *lock);
31 void _spin_unlock_irq(spinlock_t *lock);
32 void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
34 int _spin_is_locked(spinlock_t *lock);
35 int _spin_trylock(spinlock_t *lock);
36 void _spin_barrier(spinlock_t *lock);
37 void _spin_barrier_irq(spinlock_t *lock);
39 void _spin_lock_recursive(spinlock_t *lock);
40 void _spin_unlock_recursive(spinlock_t *lock);
42 void _read_lock(rwlock_t *lock);
43 void _read_lock_irq(rwlock_t *lock);
44 unsigned long _read_lock_irqsave(rwlock_t *lock);
46 void _read_unlock(rwlock_t *lock);
47 void _read_unlock_irq(rwlock_t *lock);
48 void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
50 void _write_lock(rwlock_t *lock);
51 void _write_lock_irq(rwlock_t *lock);
52 unsigned long _write_lock_irqsave(rwlock_t *lock);
54 void _write_unlock(rwlock_t *lock);
55 void _write_unlock_irq(rwlock_t *lock);
56 void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
58 #define spin_lock(l) _spin_lock(l)
59 #define spin_lock_irq(l) _spin_lock_irq(l)
60 #define spin_lock_irqsave(l, f) ((f) = _spin_lock_irqsave(l))
62 #define spin_unlock(l) _spin_unlock(l)
63 #define spin_unlock_irq(l) _spin_unlock_irq(l)
64 #define spin_unlock_irqrestore(l, f) _spin_unlock_irqrestore(l, f)
66 #define spin_is_locked(l) _raw_spin_is_locked(&(l)->raw)
67 #define spin_trylock(l) _spin_trylock(l)
69 /* Ensure a lock is quiescent between two critical operations. */
70 #define spin_barrier(l) _spin_barrier(l)
71 #define spin_barrier_irq(l) _spin_barrier_irq(l)
73 /*
74 * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
75 * reentered recursively on the same CPU. All critical regions that may form
76 * part of a recursively-nested set must be protected by these forms. If there
77 * are any critical regions that cannot form part of such a set, they can use
78 * standard spin_[un]lock().
79 */
80 #define spin_lock_recursive(l) _spin_lock_recursive(l)
81 #define spin_unlock_recursive(l) _spin_unlock_recursive(l)
83 #define read_lock(l) _read_lock(l)
84 #define read_lock_irq(l) _read_lock_irq(l)
85 #define read_lock_irqsave(l, f) ((f) = _read_lock_irqsave(l))
87 #define read_unlock(l) _read_unlock(l)
88 #define read_unlock_irq(l) _read_unlock_irq(l)
89 #define read_unlock_irqrestore(l, f) _read_unlock_irqrestore(l, f)
91 #define write_lock(l) _write_lock(l)
92 #define write_lock_irq(l) _write_lock_irq(l)
93 #define write_lock_irqsave(l, f) ((f) = _write_lock_irqsave(l))
95 #define write_unlock(l) _write_unlock(l)
96 #define write_unlock_irq(l) _write_unlock_irq(l)
97 #define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f)
99 #endif /* __SPINLOCK_H__ */