ia64/xen-unstable
changeset 18666:c003e5a23a4e
Clean up spinlock operations and compile as first-class functions.
This follows modern Linux, since apparently outlining spinlock
operations does not slow down execution. The cleanups will also allow
more convenient addition of diagnostic code.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
This follows modern Linux, since apparently outlining spinlock
operations does not slow down execution. The cleanups will also allow
more convenient addition of diagnostic code.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Mon Oct 20 16:48:17 2008 +0100 (2008-10-20) |
parents | 824892134573 |
children | f4dab783b58b |
files | xen/arch/x86/x86_64/mm.c xen/common/Makefile xen/common/spinlock.c xen/include/asm-ia64/linux-xen/asm/spinlock.h xen/include/asm-ia64/xenspinlock.h xen/include/asm-x86/spinlock.h xen/include/xen/spinlock.h |
line diff
1.1 --- a/xen/arch/x86/x86_64/mm.c Mon Oct 20 15:31:54 2008 +0100 1.2 +++ b/xen/arch/x86/x86_64/mm.c Mon Oct 20 16:48:17 2008 +0100 1.3 @@ -252,8 +252,6 @@ void __init subarch_init_memory(void) 1.4 BUILD_BUG_ON(offsetof(struct page_info, u.inuse._domain) != 1.5 (offsetof(struct page_info, count_info) + sizeof(u32))); 1.6 BUILD_BUG_ON((offsetof(struct page_info, count_info) & 7) != 0); 1.7 - BUILD_BUG_ON(sizeof(struct page_info) != 1.8 - (32 + BITS_TO_LONGS(NR_CPUS)*sizeof(long))); 1.9 1.10 /* M2P table is mappable read-only by privileged domains. */ 1.11 for ( v = RDWR_MPT_VIRT_START;
2.1 --- a/xen/common/Makefile Mon Oct 20 15:31:54 2008 +0100 2.2 +++ b/xen/common/Makefile Mon Oct 20 16:48:17 2008 +0100 2.3 @@ -16,6 +16,7 @@ obj-y += sched_sedf.o 2.4 obj-y += schedule.o 2.5 obj-y += shutdown.o 2.6 obj-y += softirq.o 2.7 +obj-y += spinlock.o 2.8 obj-y += stop_machine.o 2.9 obj-y += string.o 2.10 obj-y += symbols.o
3.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 3.2 +++ b/xen/common/spinlock.c Mon Oct 20 16:48:17 2008 +0100 3.3 @@ -0,0 +1,147 @@ 3.4 +#include <xen/config.h> 3.5 +#include <xen/smp.h> 3.6 +#include <xen/spinlock.h> 3.7 + 3.8 +void _spin_lock(spinlock_t *lock) 3.9 +{ 3.10 + _raw_spin_lock(&lock->raw); 3.11 +} 3.12 + 3.13 +void _spin_lock_irq(spinlock_t *lock) 3.14 +{ 3.15 + local_irq_disable(); 3.16 + _raw_spin_lock(&lock->raw); 3.17 +} 3.18 + 3.19 +unsigned long _spin_lock_irqsave(spinlock_t *lock) 3.20 +{ 3.21 + unsigned long flags; 3.22 + local_irq_save(flags); 3.23 + _raw_spin_lock(&lock->raw); 3.24 + return flags; 3.25 +} 3.26 + 3.27 +void _spin_unlock(spinlock_t *lock) 3.28 +{ 3.29 + _raw_spin_unlock(&lock->raw); 3.30 +} 3.31 + 3.32 +void _spin_unlock_irq(spinlock_t *lock) 3.33 +{ 3.34 + _raw_spin_unlock(&lock->raw); 3.35 + local_irq_enable(); 3.36 +} 3.37 + 3.38 +void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 3.39 +{ 3.40 + _raw_spin_unlock(&lock->raw); 3.41 + local_irq_restore(flags); 3.42 +} 3.43 + 3.44 +int _spin_is_locked(spinlock_t *lock) 3.45 +{ 3.46 + return _raw_spin_is_locked(&lock->raw); 3.47 +} 3.48 + 3.49 +int _spin_trylock(spinlock_t *lock) 3.50 +{ 3.51 + return _raw_spin_trylock(&lock->raw); 3.52 +} 3.53 + 3.54 +void _spin_barrier(spinlock_t *lock) 3.55 +{ 3.56 + do { mb(); } while ( _raw_spin_is_locked(&lock->raw) ); 3.57 + mb(); 3.58 +} 3.59 + 3.60 +void _spin_lock_recursive(spinlock_t *lock) 3.61 +{ 3.62 + int cpu = smp_processor_id(); 3.63 + if ( likely(lock->recurse_cpu != cpu) ) 3.64 + { 3.65 + spin_lock(lock); 3.66 + lock->recurse_cpu = cpu; 3.67 + } 3.68 + lock->recurse_cnt++; 3.69 +} 3.70 + 3.71 +void _spin_unlock_recursive(spinlock_t *lock) 3.72 +{ 3.73 + if ( likely(--lock->recurse_cnt == 0) ) 3.74 + { 3.75 + lock->recurse_cpu = -1; 3.76 + spin_unlock(lock); 3.77 + } 3.78 +} 3.79 + 3.80 +void _read_lock(rwlock_t *lock) 3.81 +{ 3.82 + _raw_read_lock(&lock->raw); 3.83 +} 3.84 + 3.85 +void _read_lock_irq(rwlock_t *lock) 3.86 +{ 3.87 + local_irq_disable(); 3.88 + _raw_read_lock(&lock->raw); 3.89 +} 3.90 + 3.91 +unsigned long _read_lock_irqsave(rwlock_t *lock) 3.92 +{ 3.93 + unsigned long flags; 3.94 + local_irq_save(flags); 3.95 + _raw_read_lock(&lock->raw); 3.96 + return flags; 3.97 +} 3.98 + 3.99 +void _read_unlock(rwlock_t *lock) 3.100 +{ 3.101 + _raw_read_unlock(&lock->raw); 3.102 +} 3.103 + 3.104 +void _read_unlock_irq(rwlock_t *lock) 3.105 +{ 3.106 + _raw_read_unlock(&lock->raw); 3.107 + local_irq_enable(); 3.108 +} 3.109 + 3.110 +void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 3.111 +{ 3.112 + _raw_read_unlock(&lock->raw); 3.113 + local_irq_restore(flags); 3.114 +} 3.115 + 3.116 +void _write_lock(rwlock_t *lock) 3.117 +{ 3.118 + _raw_write_lock(&lock->raw); 3.119 +} 3.120 + 3.121 +void _write_lock_irq(rwlock_t *lock) 3.122 +{ 3.123 + local_irq_disable(); 3.124 + _raw_write_lock(&lock->raw); 3.125 +} 3.126 + 3.127 +unsigned long _write_lock_irqsave(rwlock_t *lock) 3.128 +{ 3.129 + unsigned long flags; 3.130 + local_irq_save(flags); 3.131 + _raw_write_lock(&lock->raw); 3.132 + return flags; 3.133 +} 3.134 + 3.135 +void _write_unlock(rwlock_t *lock) 3.136 +{ 3.137 + _raw_write_unlock(&lock->raw); 3.138 +} 3.139 + 3.140 +void _write_unlock_irq(rwlock_t *lock) 3.141 +{ 3.142 + _raw_write_unlock(&lock->raw); 3.143 + local_irq_enable(); 3.144 +} 3.145 + 3.146 +void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 3.147 +{ 3.148 + _raw_write_unlock(&lock->raw); 3.149 + local_irq_restore(flags); 3.150 +}
4.1 --- a/xen/include/asm-ia64/linux-xen/asm/spinlock.h Mon Oct 20 15:31:54 2008 +0100 4.2 +++ b/xen/include/asm-ia64/linux-xen/asm/spinlock.h Mon Oct 20 16:48:17 2008 +0100 4.3 @@ -27,25 +27,16 @@ typedef struct { 4.4 #ifdef DEBUG_SPINLOCK 4.5 void *locker; 4.6 #endif 4.7 -#ifdef XEN 4.8 - unsigned char recurse_cpu; 4.9 - unsigned char recurse_cnt; 4.10 -#endif 4.11 -} spinlock_t; 4.12 +} raw_spinlock_t; 4.13 4.14 #ifdef XEN 4.15 #ifdef DEBUG_SPINLOCK 4.16 -#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 0, NULL, -1, 0 } 4.17 +#define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 0, NULL } 4.18 #else 4.19 -#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 0, -1, 0 } 4.20 +#define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 0 } 4.21 #endif 4.22 -static inline void spin_lock_init(spinlock_t *lock) 4.23 -{ 4.24 - *lock = ((spinlock_t)SPIN_LOCK_UNLOCKED); 4.25 -} 4.26 #else 4.27 -#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 0 } 4.28 -#define spin_lock_init(x) ((x)->lock = 0) 4.29 +#define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 0 } 4.30 #endif 4.31 4.32 #ifdef ASM_SUPPORTED 4.33 @@ -59,7 +50,7 @@ static inline void spin_lock_init(spinlo 4.34 #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" 4.35 4.36 static inline void 4.37 -_raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) 4.38 +_raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags) 4.39 { 4.40 register volatile unsigned int *ptr asm ("r31") = &lock->lock; 4.41 4.42 @@ -136,8 +127,8 @@ do { \ 4.43 } while (0) 4.44 #endif /* !ASM_SUPPORTED */ 4.45 4.46 -#define spin_is_locked(x) ((x)->lock != 0) 4.47 -#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) 4.48 +#define _raw_spin_is_locked(x) ((x)->lock != 0) 4.49 +#define _raw_spin_unlock(x) do { barrier(); (x)->lock = 0; } while (0) 4.50 #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) 4.51 #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) 4.52 4.53 @@ -147,16 +138,15 @@ typedef struct { 4.54 #ifdef CONFIG_PREEMPT 4.55 unsigned int break_lock; 4.56 #endif 4.57 -} rwlock_t; 4.58 -#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { 0, 0 } 4.59 +} raw_rwlock_t; 4.60 +#define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { 0, 0 } 4.61 4.62 -#define rwlock_init(x) do { *(x) = (rwlock_t) RW_LOCK_UNLOCKED; } while(0) 4.63 #define read_can_lock(rw) (*(volatile int *)(rw) >= 0) 4.64 #define write_can_lock(rw) (*(volatile int *)(rw) == 0) 4.65 4.66 #define _raw_read_lock(rw) \ 4.67 do { \ 4.68 - rwlock_t *__read_lock_ptr = (rw); \ 4.69 + raw_rwlock_t *__read_lock_ptr = (rw); \ 4.70 \ 4.71 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ 4.72 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ 4.73 @@ -167,7 +157,7 @@ do { \ 4.74 4.75 #define _raw_read_unlock(rw) \ 4.76 do { \ 4.77 - rwlock_t *__read_lock_ptr = (rw); \ 4.78 + raw_rwlock_t *__read_lock_ptr = (rw); \ 4.79 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ 4.80 } while (0) 4.81 4.82 @@ -230,7 +220,4 @@ do { \ 4.83 clear_bit(31, (x)); \ 4.84 }) 4.85 4.86 -#ifdef XEN 4.87 -#include <asm/xenspinlock.h> 4.88 -#endif 4.89 #endif /* _ASM_IA64_SPINLOCK_H */
5.1 --- a/xen/include/asm-ia64/xenspinlock.h Mon Oct 20 15:31:54 2008 +0100 5.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 5.3 @@ -1,30 +0,0 @@ 5.4 -#ifndef _ASM_IA64_XENSPINLOCK_H 5.5 -#define _ASM_IA64_XENSPINLOCK_H 5.6 - 5.7 -/* 5.8 - * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be 5.9 - * reentered recursively on the same CPU. All critical regions that may form 5.10 - * part of a recursively-nested set must be protected by these forms. If there 5.11 - * are any critical regions that cannot form part of such a set, they can use 5.12 - * standard spin_[un]lock(). 5.13 - */ 5.14 -#define _raw_spin_lock_recursive(_lock) \ 5.15 - do { \ 5.16 - int cpu = smp_processor_id(); \ 5.17 - if ( likely((_lock)->recurse_cpu != cpu) ) \ 5.18 - { \ 5.19 - spin_lock(_lock); \ 5.20 - (_lock)->recurse_cpu = cpu; \ 5.21 - } \ 5.22 - (_lock)->recurse_cnt++; \ 5.23 - } while ( 0 ) 5.24 - 5.25 -#define _raw_spin_unlock_recursive(_lock) \ 5.26 - do { \ 5.27 - if ( likely(--(_lock)->recurse_cnt == 0) ) \ 5.28 - { \ 5.29 - (_lock)->recurse_cpu = -1; \ 5.30 - spin_unlock(_lock); \ 5.31 - } \ 5.32 - } while ( 0 ) 5.33 -#endif /* _ASM_IA64_XENSPINLOCK_H */
6.1 --- a/xen/include/asm-x86/spinlock.h Mon Oct 20 15:31:54 2008 +0100 6.2 +++ b/xen/include/asm-x86/spinlock.h Mon Oct 20 16:48:17 2008 +0100 6.3 @@ -8,18 +8,16 @@ 6.4 6.5 typedef struct { 6.6 volatile s16 lock; 6.7 - s8 recurse_cpu; 6.8 - u8 recurse_cnt; 6.9 -} spinlock_t; 6.10 +} raw_spinlock_t; 6.11 6.12 -#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 1, -1, 0 } 6.13 +#define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 1 } 6.14 6.15 -#define spin_lock_init(x) do { *(x) = (spinlock_t) SPIN_LOCK_UNLOCKED; } while(0) 6.16 -#define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0) 6.17 +#define _raw_spin_is_locked(x) \ 6.18 + (*(volatile char *)(&(x)->lock) <= 0) 6.19 6.20 -static inline void _raw_spin_lock(spinlock_t *lock) 6.21 +static inline void _raw_spin_lock(raw_spinlock_t *lock) 6.22 { 6.23 - __asm__ __volatile__ ( 6.24 + asm volatile ( 6.25 "1: lock; decb %0 \n" 6.26 " js 2f \n" 6.27 ".section .text.lock,\"ax\"\n" 6.28 @@ -31,81 +29,51 @@ static inline void _raw_spin_lock(spinlo 6.29 : "=m" (lock->lock) : : "memory" ); 6.30 } 6.31 6.32 -static inline void _raw_spin_unlock(spinlock_t *lock) 6.33 +static inline void _raw_spin_unlock(raw_spinlock_t *lock) 6.34 { 6.35 - ASSERT(spin_is_locked(lock)); 6.36 - __asm__ __volatile__ ( 6.37 - "movb $1,%0" 6.38 + ASSERT(_raw_spin_is_locked(lock)); 6.39 + asm volatile ( 6.40 + "movb $1,%0" 6.41 : "=m" (lock->lock) : : "memory" ); 6.42 } 6.43 6.44 -static inline int _raw_spin_trylock(spinlock_t *lock) 6.45 +static inline int _raw_spin_trylock(raw_spinlock_t *lock) 6.46 { 6.47 char oldval; 6.48 - __asm__ __volatile__( 6.49 + asm volatile ( 6.50 "xchgb %b0,%1" 6.51 :"=q" (oldval), "=m" (lock->lock) 6.52 - :"0" (0) : "memory"); 6.53 - return oldval > 0; 6.54 + :"0" (0) : "memory" ); 6.55 + return (oldval > 0); 6.56 } 6.57 6.58 -/* 6.59 - * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be 6.60 - * reentered recursively on the same CPU. All critical regions that may form 6.61 - * part of a recursively-nested set must be protected by these forms. If there 6.62 - * are any critical regions that cannot form part of such a set, they can use 6.63 - * standard spin_[un]lock(). 6.64 - */ 6.65 -#define _raw_spin_lock_recursive(_lock) \ 6.66 - do { \ 6.67 - int cpu = smp_processor_id(); \ 6.68 - if ( likely((_lock)->recurse_cpu != cpu) ) \ 6.69 - { \ 6.70 - spin_lock(_lock); \ 6.71 - (_lock)->recurse_cpu = cpu; \ 6.72 - } \ 6.73 - (_lock)->recurse_cnt++; \ 6.74 - } while ( 0 ) 6.75 - 6.76 -#define _raw_spin_unlock_recursive(_lock) \ 6.77 - do { \ 6.78 - if ( likely(--(_lock)->recurse_cnt == 0) ) \ 6.79 - { \ 6.80 - (_lock)->recurse_cpu = -1; \ 6.81 - spin_unlock(_lock); \ 6.82 - } \ 6.83 - } while ( 0 ) 6.84 - 6.85 - 6.86 typedef struct { 6.87 volatile unsigned int lock; 6.88 -} rwlock_t; 6.89 +} raw_rwlock_t; 6.90 6.91 -#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { RW_LOCK_BIAS } 6.92 - 6.93 -#define rwlock_init(x) do { *(x) = (rwlock_t) RW_LOCK_UNLOCKED; } while(0) 6.94 +#define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { RW_LOCK_BIAS } 6.95 6.96 /* 6.97 * On x86, we implement read-write locks as a 32-bit counter 6.98 * with the high bit (sign) being the "contended" bit. 6.99 */ 6.100 -static inline void _raw_read_lock(rwlock_t *rw) 6.101 +static inline void _raw_read_lock(raw_rwlock_t *rw) 6.102 { 6.103 __build_read_lock(rw, "__read_lock_failed"); 6.104 } 6.105 6.106 -static inline void _raw_write_lock(rwlock_t *rw) 6.107 +static inline void _raw_write_lock(raw_rwlock_t *rw) 6.108 { 6.109 __build_write_lock(rw, "__write_lock_failed"); 6.110 } 6.111 6.112 -#define _raw_read_unlock(rw) \ 6.113 - __asm__ __volatile__ ( \ 6.114 - "lock ; incl %0" : \ 6.115 +#define _raw_read_unlock(rw) \ 6.116 + asm volatile ( \ 6.117 + "lock ; incl %0" : \ 6.118 "=m" ((rw)->lock) : : "memory" ) 6.119 -#define _raw_write_unlock(rw) \ 6.120 - __asm__ __volatile__ ( \ 6.121 - "lock ; addl $" RW_LOCK_BIAS_STR ",%0" : \ 6.122 +#define _raw_write_unlock(rw) \ 6.123 + asm volatile ( \ 6.124 + "lock ; addl $" RW_LOCK_BIAS_STR ",%0" : \ 6.125 "=m" ((rw)->lock) : : "memory" ) 6.126 6.127 #endif /* __ASM_SPINLOCK_H */
7.1 --- a/xen/include/xen/spinlock.h Mon Oct 20 15:31:54 2008 +0100 7.2 +++ b/xen/include/xen/spinlock.h Mon Oct 20 16:48:17 2008 +0100 7.3 @@ -3,93 +3,95 @@ 7.4 7.5 #include <xen/config.h> 7.6 #include <asm/system.h> 7.7 - 7.8 -#define spin_lock_irqsave(lock, flags) \ 7.9 - do { local_irq_save(flags); spin_lock(lock); } while ( 0 ) 7.10 -#define spin_lock_irq(lock) \ 7.11 - do { local_irq_disable(); spin_lock(lock); } while ( 0 ) 7.12 - 7.13 -#define read_lock_irqsave(lock, flags) \ 7.14 - do { local_irq_save(flags); read_lock(lock); } while ( 0 ) 7.15 -#define read_lock_irq(lock) \ 7.16 - do { local_irq_disable(); read_lock(lock); } while ( 0 ) 7.17 - 7.18 -#define write_lock_irqsave(lock, flags) \ 7.19 - do { local_irq_save(flags); write_lock(lock); } while ( 0 ) 7.20 -#define write_lock_irq(lock) \ 7.21 - do { local_irq_disable(); write_lock(lock); } while ( 0 ) 7.22 - 7.23 -#define spin_unlock_irqrestore(lock, flags) \ 7.24 - do { spin_unlock(lock); local_irq_restore(flags); } while ( 0 ) 7.25 -#define spin_unlock_irq(lock) \ 7.26 - do { spin_unlock(lock); local_irq_enable(); } while ( 0 ) 7.27 - 7.28 -#define read_unlock_irqrestore(lock, flags) \ 7.29 - do { read_unlock(lock); local_irq_restore(flags); } while ( 0 ) 7.30 -#define read_unlock_irq(lock) \ 7.31 - do { read_unlock(lock); local_irq_enable(); } while ( 0 ) 7.32 - 7.33 -#define write_unlock_irqrestore(lock, flags) \ 7.34 - do { write_unlock(lock); local_irq_restore(flags); } while ( 0 ) 7.35 -#define write_unlock_irq(lock) \ 7.36 - do { write_unlock(lock); local_irq_enable(); } while ( 0 ) 7.37 - 7.38 -#ifdef CONFIG_SMP 7.39 - 7.40 #include <asm/spinlock.h> 7.41 7.42 -#else 7.43 +typedef struct { 7.44 + raw_spinlock_t raw; 7.45 + s8 recurse_cpu; 7.46 + u8 recurse_cnt; 7.47 +} spinlock_t; 7.48 + 7.49 +#define SPIN_LOCK_UNLOCKED { _RAW_SPIN_LOCK_UNLOCKED, -1, 0 } 7.50 +#define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED 7.51 +#define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED) 7.52 + 7.53 +typedef struct { 7.54 + raw_rwlock_t raw; 7.55 +} rwlock_t; 7.56 7.57 -#if (__GNUC__ > 2) 7.58 -typedef struct { } spinlock_t; 7.59 -#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { } 7.60 -#else 7.61 -typedef struct { int gcc_is_buggy; } spinlock_t; 7.62 -#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 0 } 7.63 -#endif 7.64 +#define RW_LOCK_UNLOCKED { _RAW_RW_LOCK_UNLOCKED } 7.65 +#define DEFINE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED 7.66 +#define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED) 7.67 + 7.68 +void _spin_lock(spinlock_t *lock); 7.69 +void _spin_lock_irq(spinlock_t *lock); 7.70 +unsigned long _spin_lock_irqsave(spinlock_t *lock); 7.71 7.72 -#define spin_lock_init(lock) do { } while(0) 7.73 -#define spin_is_locked(lock) (0) 7.74 -#define _raw_spin_lock(lock) (void)(lock) 7.75 -#define _raw_spin_trylock(lock) ({1; }) 7.76 -#define _raw_spin_unlock(lock) do { } while(0) 7.77 -#define _raw_spin_lock_recursive(lock) do { } while(0) 7.78 -#define _raw_spin_unlock_recursive(lock) do { } while(0) 7.79 +void _spin_unlock(spinlock_t *lock); 7.80 +void _spin_unlock_irq(spinlock_t *lock); 7.81 +void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags); 7.82 + 7.83 +int _spin_is_locked(spinlock_t *lock); 7.84 +int _spin_trylock(spinlock_t *lock); 7.85 +void _spin_barrier(spinlock_t *lock); 7.86 7.87 -#if (__GNUC__ > 2) 7.88 -typedef struct { } rwlock_t; 7.89 -#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { } 7.90 -#else 7.91 -typedef struct { int gcc_is_buggy; } rwlock_t; 7.92 -#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { 0 } 7.93 -#endif 7.94 +void _spin_lock_recursive(spinlock_t *lock); 7.95 +void _spin_unlock_recursive(spinlock_t *lock); 7.96 + 7.97 +void _read_lock(rwlock_t *lock); 7.98 +void _read_lock_irq(rwlock_t *lock); 7.99 +unsigned long _read_lock_irqsave(rwlock_t *lock); 7.100 + 7.101 +void _read_unlock(rwlock_t *lock); 7.102 +void _read_unlock_irq(rwlock_t *lock); 7.103 +void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags); 7.104 + 7.105 +void _write_lock(rwlock_t *lock); 7.106 +void _write_lock_irq(rwlock_t *lock); 7.107 +unsigned long _write_lock_irqsave(rwlock_t *lock); 7.108 7.109 -#define rwlock_init(lock) do { } while(0) 7.110 -#define _raw_read_lock(lock) (void)(lock) /* Not "unused variable". */ 7.111 -#define _raw_read_unlock(lock) do { } while(0) 7.112 -#define _raw_write_lock(lock) (void)(lock) /* Not "unused variable". */ 7.113 -#define _raw_write_unlock(lock) do { } while(0) 7.114 - 7.115 -#endif 7.116 +void _write_unlock(rwlock_t *lock); 7.117 +void _write_unlock_irq(rwlock_t *lock); 7.118 +void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags); 7.119 7.120 -#define spin_lock(_lock) _raw_spin_lock(_lock) 7.121 -#define spin_trylock(_lock) _raw_spin_trylock(_lock) 7.122 -#define spin_unlock(_lock) _raw_spin_unlock(_lock) 7.123 -#define spin_lock_recursive(_lock) _raw_spin_lock_recursive(_lock) 7.124 -#define spin_unlock_recursive(_lock) _raw_spin_unlock_recursive(_lock) 7.125 -#define read_lock(_lock) _raw_read_lock(_lock) 7.126 -#define read_unlock(_lock) _raw_read_unlock(_lock) 7.127 -#define write_lock(_lock) _raw_write_lock(_lock) 7.128 -#define write_unlock(_lock) _raw_write_unlock(_lock) 7.129 +#define spin_lock(l) _spin_lock(l) 7.130 +#define spin_lock_irq(l) _spin_lock_irq(l) 7.131 +#define spin_lock_irqsave(l, f) ((f) = _spin_lock_irqsave(l)) 7.132 + 7.133 +#define spin_unlock(l) _spin_unlock(l) 7.134 +#define spin_unlock_irq(l) _spin_unlock_irq(l) 7.135 +#define spin_unlock_irqrestore(l, f) _spin_unlock_irqrestore(l, f) 7.136 + 7.137 +#define spin_is_locked(l) _raw_spin_is_locked(&(l)->raw) 7.138 +#define spin_trylock(l) _spin_trylock(l) 7.139 7.140 /* Ensure a lock is quiescent between two critical operations. */ 7.141 -static inline void spin_barrier(spinlock_t *lock) 7.142 -{ 7.143 - do { mb(); } while ( spin_is_locked(lock) ); 7.144 - mb(); 7.145 -} 7.146 +#define spin_barrier(l) _spin_barrier(l) 7.147 + 7.148 +/* 7.149 + * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be 7.150 + * reentered recursively on the same CPU. All critical regions that may form 7.151 + * part of a recursively-nested set must be protected by these forms. If there 7.152 + * are any critical regions that cannot form part of such a set, they can use 7.153 + * standard spin_[un]lock(). 7.154 + */ 7.155 +#define spin_lock_recursive(l) _spin_lock_recursive(l) 7.156 +#define spin_unlock_recursive(l) _spin_unlock_recursive(l) 7.157 7.158 -#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED 7.159 -#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED 7.160 +#define read_lock(l) _read_lock(l) 7.161 +#define read_lock_irq(l) _read_lock_irq(l) 7.162 +#define read_lock_irqsave(l, f) ((f) = _read_lock_irqsave(l)) 7.163 + 7.164 +#define read_unlock(l) _read_unlock(l) 7.165 +#define read_unlock_irq(l) _read_unlock_irq(l) 7.166 +#define read_unlock_irqrestore(l, f) _read_unlock_irqrestore(l, f) 7.167 + 7.168 +#define write_lock(l) _write_lock(l) 7.169 +#define write_lock_irq(l) _write_lock_irq(l) 7.170 +#define write_lock_irqsave(l, f) ((f) = _write_lock_irqsave(l)) 7.171 + 7.172 +#define write_unlock(l) _write_unlock(l) 7.173 +#define write_unlock_irq(l) _write_unlock_irq(l) 7.174 +#define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f) 7.175 7.176 #endif /* __SPINLOCK_H__ */