ia64/xen-unstable

annotate xen/include/xen/spinlock.h @ 18668:54d74fc0037c

spinlock: Modify recursive spinlock definitions to support up to 4095 CPUs.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Oct 20 17:16:45 2008 +0100 (2008-10-20)
parents c003e5a23a4e
children 876618c33914
rev   line source
kaf24@1542 1 #ifndef __SPINLOCK_H__
kaf24@1542 2 #define __SPINLOCK_H__
kaf24@1210 3
kaf24@1210 4 #include <xen/config.h>
kaf24@1210 5 #include <asm/system.h>
kaf24@1210 6 #include <asm/spinlock.h>
kaf24@1210 7
keir@18666 8 typedef struct {
keir@18666 9 raw_spinlock_t raw;
keir@18668 10 u16 recurse_cpu:12;
keir@18668 11 u16 recurse_cnt:4;
keir@18666 12 } spinlock_t;
kaf24@1210 13
keir@18668 14 #define SPIN_LOCK_UNLOCKED { _RAW_SPIN_LOCK_UNLOCKED, 0xfffu, 0 }
keir@18666 15 #define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED
keir@18666 16 #define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)
kaf24@1210 17
keir@18666 18 typedef struct {
keir@18666 19 raw_rwlock_t raw;
keir@18666 20 } rwlock_t;
kaf24@2373 21
keir@18666 22 #define RW_LOCK_UNLOCKED { _RAW_RW_LOCK_UNLOCKED }
keir@18666 23 #define DEFINE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED
keir@18666 24 #define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED)
keir@18666 25
keir@18666 26 void _spin_lock(spinlock_t *lock);
keir@18666 27 void _spin_lock_irq(spinlock_t *lock);
keir@18666 28 unsigned long _spin_lock_irqsave(spinlock_t *lock);
keir@18666 29
keir@18666 30 void _spin_unlock(spinlock_t *lock);
keir@18666 31 void _spin_unlock_irq(spinlock_t *lock);
keir@18666 32 void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
keir@18666 33
keir@18666 34 int _spin_is_locked(spinlock_t *lock);
keir@18666 35 int _spin_trylock(spinlock_t *lock);
keir@18666 36 void _spin_barrier(spinlock_t *lock);
keir@18666 37
keir@18666 38 void _spin_lock_recursive(spinlock_t *lock);
keir@18666 39 void _spin_unlock_recursive(spinlock_t *lock);
keir@18666 40
keir@18666 41 void _read_lock(rwlock_t *lock);
keir@18666 42 void _read_lock_irq(rwlock_t *lock);
keir@18666 43 unsigned long _read_lock_irqsave(rwlock_t *lock);
keir@18666 44
keir@18666 45 void _read_unlock(rwlock_t *lock);
keir@18666 46 void _read_unlock_irq(rwlock_t *lock);
keir@18666 47 void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
keir@18666 48
keir@18666 49 void _write_lock(rwlock_t *lock);
keir@18666 50 void _write_lock_irq(rwlock_t *lock);
keir@18666 51 unsigned long _write_lock_irqsave(rwlock_t *lock);
keir@18666 52
keir@18666 53 void _write_unlock(rwlock_t *lock);
keir@18666 54 void _write_unlock_irq(rwlock_t *lock);
keir@18666 55 void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
keir@18666 56
keir@18666 57 #define spin_lock(l) _spin_lock(l)
keir@18666 58 #define spin_lock_irq(l) _spin_lock_irq(l)
keir@18666 59 #define spin_lock_irqsave(l, f) ((f) = _spin_lock_irqsave(l))
keir@18666 60
keir@18666 61 #define spin_unlock(l) _spin_unlock(l)
keir@18666 62 #define spin_unlock_irq(l) _spin_unlock_irq(l)
keir@18666 63 #define spin_unlock_irqrestore(l, f) _spin_unlock_irqrestore(l, f)
keir@18666 64
keir@18666 65 #define spin_is_locked(l) _raw_spin_is_locked(&(l)->raw)
keir@18666 66 #define spin_trylock(l) _spin_trylock(l)
kaf24@1210 67
kfraser@14642 68 /* Ensure a lock is quiescent between two critical operations. */
keir@18666 69 #define spin_barrier(l) _spin_barrier(l)
kfraser@14642 70
keir@18666 71 /*
keir@18666 72 * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
keir@18666 73 * reentered recursively on the same CPU. All critical regions that may form
keir@18666 74 * part of a recursively-nested set must be protected by these forms. If there
keir@18666 75 * are any critical regions that cannot form part of such a set, they can use
keir@18666 76 * standard spin_[un]lock().
keir@18666 77 */
keir@18666 78 #define spin_lock_recursive(l) _spin_lock_recursive(l)
keir@18666 79 #define spin_unlock_recursive(l) _spin_unlock_recursive(l)
keir@18666 80
keir@18666 81 #define read_lock(l) _read_lock(l)
keir@18666 82 #define read_lock_irq(l) _read_lock_irq(l)
keir@18666 83 #define read_lock_irqsave(l, f) ((f) = _read_lock_irqsave(l))
keir@18666 84
keir@18666 85 #define read_unlock(l) _read_unlock(l)
keir@18666 86 #define read_unlock_irq(l) _read_unlock_irq(l)
keir@18666 87 #define read_unlock_irqrestore(l, f) _read_unlock_irqrestore(l, f)
keir@18666 88
keir@18666 89 #define write_lock(l) _write_lock(l)
keir@18666 90 #define write_lock_irq(l) _write_lock_irq(l)
keir@18666 91 #define write_lock_irqsave(l, f) ((f) = _write_lock_irqsave(l))
keir@18666 92
keir@18666 93 #define write_unlock(l) _write_unlock(l)
keir@18666 94 #define write_unlock_irq(l) _write_unlock_irq(l)
keir@18666 95 #define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f)
kaf24@4804 96
kaf24@1542 97 #endif /* __SPINLOCK_H__ */