ia64/xen-unstable
changeset 18666:c003e5a23a4e
Clean up spinlock operations and compile as first-class functions.
This follows modern Linux, since apparently outlining spinlock
operations does not slow down execution. The cleanups will also allow
more convenient addition of diagnostic code.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
This follows modern Linux, since apparently outlining spinlock
operations does not slow down execution. The cleanups will also allow
more convenient addition of diagnostic code.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Mon Oct 20 16:48:17 2008 +0100 (2008-10-20) |
parents | 824892134573 |
children | f4dab783b58b |
files | xen/arch/x86/x86_64/mm.c xen/common/Makefile xen/common/spinlock.c xen/include/asm-ia64/linux-xen/asm/spinlock.h xen/include/asm-ia64/xenspinlock.h xen/include/asm-x86/spinlock.h xen/include/xen/spinlock.h |
line diff
1.1 --- a/xen/arch/x86/x86_64/mm.c Mon Oct 20 15:31:54 2008 +0100 1.2 +++ b/xen/arch/x86/x86_64/mm.c Mon Oct 20 16:48:17 2008 +0100 1.3 @@ -252,8 +252,6 @@ void __init subarch_init_memory(void) 1.4 BUILD_BUG_ON(offsetof(struct page_info, u.inuse._domain) != 1.5 (offsetof(struct page_info, count_info) + sizeof(u32))); 1.6 BUILD_BUG_ON((offsetof(struct page_info, count_info) & 7) != 0); 1.7 - BUILD_BUG_ON(sizeof(struct page_info) != 1.8 - (32 + BITS_TO_LONGS(NR_CPUS)*sizeof(long))); 1.9 1.10 /* M2P table is mappable read-only by privileged domains. */ 1.11 for ( v = RDWR_MPT_VIRT_START;
2.1 --- a/xen/common/Makefile Mon Oct 20 15:31:54 2008 +0100 2.2 +++ b/xen/common/Makefile Mon Oct 20 16:48:17 2008 +0100 2.3 @@ -16,6 +16,7 @@ obj-y += sched_sedf.o 2.4 obj-y += schedule.o 2.5 obj-y += shutdown.o 2.6 obj-y += softirq.o 2.7 +obj-y += spinlock.o 2.8 obj-y += stop_machine.o 2.9 obj-y += string.o 2.10 obj-y += symbols.o
3.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 3.2 +++ b/xen/common/spinlock.c Mon Oct 20 16:48:17 2008 +0100 3.3 @@ -0,0 +1,147 @@ 3.4 +#include <xen/config.h> 3.5 +#include <xen/smp.h> 3.6 +#include <xen/spinlock.h> 3.7 + 3.8 +void _spin_lock(spinlock_t *lock) 3.9 +{ 3.10 + _raw_spin_lock(&lock->raw); 3.11 +} 3.12 + 3.13 +void _spin_lock_irq(spinlock_t *lock) 3.14 +{ 3.15 + local_irq_disable(); 3.16 + _raw_spin_lock(&lock->raw); 3.17 +} 3.18 + 3.19 +unsigned long _spin_lock_irqsave(spinlock_t *lock) 3.20 +{ 3.21 + unsigned long flags; 3.22 + local_irq_save(flags); 3.23 + _raw_spin_lock(&lock->raw); 3.24 + return flags; 3.25 +} 3.26 + 3.27 +void _spin_unlock(spinlock_t *lock) 3.28 +{ 3.29 + _raw_spin_unlock(&lock->raw); 3.30 +} 3.31 + 3.32 +void _spin_unlock_irq(spinlock_t *lock) 3.33 +{ 3.34 + _raw_spin_unlock(&lock->raw); 3.35 + local_irq_enable(); 3.36 +} 3.37 + 3.38 +void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 3.39 +{ 3.40 + _raw_spin_unlock(&lock->raw); 3.41 + local_irq_restore(flags); 3.42 +} 3.43 + 3.44 +int _spin_is_locked(spinlock_t *lock) 3.45 +{ 3.46 + return _raw_spin_is_locked(&lock->raw); 3.47 +} 3.48 + 3.49 +int _spin_trylock(spinlock_t *lock) 3.50 +{ 3.51 + return _raw_spin_trylock(&lock->raw); 3.52 +} 3.53 + 3.54 +void _spin_barrier(spinlock_t *lock) 3.55 +{ 3.56 + do { mb(); } while ( _raw_spin_is_locked(&lock->raw) ); 3.57 + mb(); 3.58 +} 3.59 + 3.60 +void _spin_lock_recursive(spinlock_t *lock) 3.61 +{ 3.62 + int cpu = smp_processor_id(); 3.63 + if ( likely(lock->recurse_cpu != cpu) ) 3.64 + { 3.65 + spin_lock(lock); 3.66 + lock->recurse_cpu = cpu; 3.67 + } 3.68 + lock->recurse_cnt++; 3.69 +} 3.70 + 3.71 +void _spin_unlock_recursive(spinlock_t *lock) 3.72 +{ 3.73 + if ( likely(--lock->recurse_cnt == 0) ) 3.74 + { 3.75 + lock->recurse_cpu = -1; 3.76 + spin_unlock(lock); 3.77 + } 3.78 +} 3.79 + 3.80 +void _read_lock(rwlock_t *lock) 3.81 +{ 3.82 + _raw_read_lock(&lock->raw); 3.83 +} 3.84 + 3.85 +void _read_lock_irq(rwlock_t *lock) 3.86 +{ 3.87 + local_irq_disable(); 3.88 + _raw_read_lock(&lock->raw); 3.89 +} 3.90 + 3.91 +unsigned long _read_lock_irqsave(rwlock_t *lock) 3.92 +{ 3.93 + unsigned long flags; 3.94 + local_irq_save(flags); 3.95 + _raw_read_lock(&lock->raw); 3.96 + return flags; 3.97 +} 3.98 + 3.99 +void _read_unlock(rwlock_t *lock) 3.100 +{ 3.101 + _raw_read_unlock(&lock->raw); 3.102 +} 3.103 + 3.104 +void _read_unlock_irq(rwlock_t *lock) 3.105 +{ 3.106 + _raw_read_unlock(&lock->raw); 3.107 + local_irq_enable(); 3.108 +} 3.109 + 3.110 +void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 3.111 +{ 3.112 + _raw_read_unlock(&lock->raw); 3.113 + local_irq_restore(flags); 3.114 +} 3.115 + 3.116 +void _write_lock(rwlock_t *lock) 3.117 +{ 3.118 + _raw_write_lock(&lock->raw); 3.119 +} 3.120 + 3.121 +void _write_lock_irq(rwlock_t *lock) 3.122 +{ 3.123 + local_irq_disable(); 3.124 + _raw_write_lock(&lock->raw); 3.125 +} 3.126 + 3.127 +unsigned long _write_lock_irqsave(rwlock_t *lock) 3.128 +{ 3.129 + unsigned long flags; 3.130 + local_irq_save(flags); 3.131 + _raw_write_lock(&lock->raw); 3.132 + return flags; 3.133 +} 3.134 + 3.135 +void _write_unlock(rwlock_t *lock) 3.136 +{ 3.137 + _raw_write_unlock(&lock->raw); 3.138 +} 3.139 + 3.140 +void _write_unlock_irq(rwlock_t *lock) 3.141 +{ 3.142 + _raw_write_unlock(&lock->raw); 3.143 + local_irq_enable(); 3.144 +} 3.145 + 3.146 +void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 3.147 +{ 3.148 + _raw_write_unlock(&lock->raw); 3.149 + local_irq_restore(flags); 3.150 +}
4.1 --- a/xen/include/asm-ia64/linux-xen/asm/spinlock.h Mon Oct 20 15:31:54 2008 +0100 4.2 +++ b/xen/include/asm-ia64/linux-xen/asm/spinlock.h Mon Oct 20 16:48:17 2008 +0100 4.3 @@ -27,25 +27,16 @@ typedef struct { 4.4 #ifdef DEBUG_SPINLOCK 4.5 void *locker; 4.6 #endif 4.7 -#ifdef XEN 4.8 - unsigned char recurse_cpu; 4.9 - unsigned char recurse_cnt; 4.10 -#endif 4.11 -} spinlock_t; 4.12 +} raw_spinlock_t; 4.13 4.14 #ifdef XEN 4.15 #ifdef DEBUG_SPINLOCK 4.16 -#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 0, NULL, -1, 0 } 4.17 +#define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 0, NULL } 4.18 #else 4.19 -#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 0, -1, 0 } 4.20 +#define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 0 } 4.21 #endif 4.22 -static inline void spin_lock_init(spinlock_t *lock) 4.23 -{ 4.24 - *lock = ((spinlock_t)SPIN_LOCK_UNLOCKED); 4.25 -} 4.26 #else 4.27 -#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 0 } 4.28 -#define spin_lock_init(x) ((x)->lock = 0) 4.29 +#define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 0 } 4.30 #endif 4.31 4.32 #ifdef ASM_SUPPORTED 4.33 @@ -59,7 +50,7 @@ static inline void spin_lock_init(spinlo 4.34 #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" 4.35 4.36 static inline void 4.37 -_raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) 4.38 +_raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags) 4.39 { 4.40 register volatile unsigned int *ptr asm ("r31") = &lock->lock; 4.41 4.42 @@ -136,8 +127,8 @@ do { \ 4.43 } while (0) 4.44 #endif /* !ASM_SUPPORTED */ 4.45 4.46 -#define spin_is_locked(x) ((x)->lock != 0) 4.47 -#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) 4.48 +#define _raw_spin_is_locked(x) ((x)->lock != 0) 4.49 +#define _raw_spin_unlock(x) do { barrier(); (x)->lock = 0; } while (0) 4.50 #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) 4.51 #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) 4.52 4.53 @@ -147,16 +138,15 @@ typedef struct { 4.54 #ifdef CONFIG_PREEMPT 4.55 unsigned int break_lock; 4.56 #endif 4.57 -} rwlock_t; 4.58 -#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { 0, 0 } 4.59 +} raw_rwlock_t; 4.60 +#define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { 0, 0 } 4.61 4.62 -#define rwlock_init(x) do { *(x) = (rwlock_t) RW_LOCK_UNLOCKED; } while(0) 4.63 #define read_can_lock(rw) (*(volatile int *)(rw) >= 0) 4.64 #define write_can_lock(rw) (*(volatile int *)(rw) == 0) 4.65 4.66 #define _raw_read_lock(rw) \ 4.67 do { \ 4.68 - rwlock_t *__read_lock_ptr = (rw); \ 4.69 + raw_rwlock_t *__read_lock_ptr = (rw); \ 4.70 \ 4.71 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ 4.72 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ 4.73 @@ -167,7 +157,7 @@ do { \ 4.74 4.75 #define _raw_read_unlock(rw) \ 4.76 do { \ 4.77 - rwlock_t *__read_lock_ptr = (rw); \ 4.78 + raw_rwlock_t *__read_lock_ptr = (rw); \ 4.79 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ 4.80 } while (0) 4.81 4.82 @@ -230,7 +220,4 @@ do { \ 4.83 clear_bit(31, (x)); \ 4.84 }) 4.85 4.86 -#ifdef XEN 4.87 -#include <asm/xenspinlock.h> 4.88 -#endif 4.89 #endif /* _ASM_IA64_SPINLOCK_H */
5.1 --- a/xen/include/asm-ia64/xenspinlock.h Mon Oct 20 15:31:54 2008 +0100 5.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 5.3 @@ -1,30 +0,0 @@ 5.4 -#ifndef _ASM_IA64_XENSPINLOCK_H 5.5 -#define _ASM_IA64_XENSPINLOCK_H 5.6 - 5.7 -/* 5.8 - * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be 5.9 - * reentered recursively on the same CPU. All critical regions that may form 5.10 - * part of a recursively-nested set must be protected by these forms. If there 5.11 - * are any critical regions that cannot form part of such a set, they can use 5.12 - * standard spin_[un]lock(). 5.13 - */ 5.14 -#define _raw_spin_lock_recursive(_lock) \ 5.15 - do { \ 5.16 - int cpu = smp_processor_id(); \ 5.17 - if ( likely((_lock)->recurse_cpu != cpu) ) \ 5.18 - { \ 5.19 - spin_lock(_lock); \ 5.20 - (_lock)->recurse_cpu = cpu; \ 5.21 - } \ 5.22 - (_lock)->recurse_cnt++; \ 5.23 - } while ( 0 ) 5.24 - 5.25 -#define _raw_spin_unlock_recursive(_lock) \ 5.26 - do { \ 5.27 - if ( likely(--(_lock)->recurse_cnt == 0) ) \ 5.28 - { \ 5.29 - (_lock)->recurse_cpu = -1; \ 5.30 - spin_unlock(_lock); \ 5.31 - } \ 5.32 - } while ( 0 ) 5.33 -#endif /* _ASM_IA64_XENSPINLOCK_H */
6.1 --- a/xen/include/asm-x86/spinlock.h Mon Oct 20 15:31:54 2008 +0100 6.2 +++ b/xen/include/asm-x86/spinlock.h Mon Oct 20 16:48:17 2008 +0100 6.3 @@ -8,18 +8,16 @@ 6.4 6.5 typedef struct { 6.6 volatile s16 lock; 6.7 - s8 recurse_cpu; 6.8 - u8 recurse_cnt; 6.9 -} spinlock_t; 6.10 - 6.11 -#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 1, -1, 0 } 6.12 +} raw_spinlock_t; 6.13 6.14 -#define spin_lock_init(x) do { *(x) = (spinlock_t) SPIN_LOCK_UNLOCKED; } while(0) 6.15 -#define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0) 6.16 +#define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 1 } 6.17 6.18 -static inline void _raw_spin_lock(spinlock_t *lock) 6.19 +#define _raw_spin_is_locked(x) \ 6.20 + (*(volatile char *)(&(x)->lock) <= 0) 6.21 + 6.22 +static inline void _raw_spin_lock(raw_spinlock_t *lock) 6.23 { 6.24 - __asm__ __volatile__ ( 6.25 + asm volatile ( 6.26 "1: lock; decb %0 \n" 6.27 " js 2f \n" 6.28 ".section .text.lock,\"ax\"\n" 6.29 @@ -31,81 +29,51 @@ static inline void _raw_spin_lock(spinlo 6.30 : "=m" (lock->lock) : : "memory" ); 6.31 } 6.32 6.33 -static inline void _raw_spin_unlock(spinlock_t *lock) 6.34 +static inline void _raw_spin_unlock(raw_spinlock_t *lock) 6.35 { 6.36 - ASSERT(spin_is_locked(lock)); 6.37 - __asm__ __volatile__ ( 6.38 - "movb $1,%0" 6.39 + ASSERT(_raw_spin_is_locked(lock)); 6.40 + asm volatile ( 6.41 + "movb $1,%0" 6.42 : "=m" (lock->lock) : : "memory" ); 6.43 } 6.44 6.45 -static inline int _raw_spin_trylock(spinlock_t *lock) 6.46 +static inline int _raw_spin_trylock(raw_spinlock_t *lock) 6.47 { 6.48 char oldval; 6.49 - __asm__ __volatile__( 6.50 + asm volatile ( 6.51 "xchgb %b0,%1" 6.52 :"=q" (oldval), "=m" (lock->lock) 6.53 - :"0" (0) : "memory"); 6.54 - return oldval > 0; 6.55 + :"0" (0) : "memory" ); 6.56 + return (oldval > 0); 6.57 } 6.58 6.59 -/* 6.60 - * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be 6.61 - * reentered recursively on the same CPU. All critical regions that may form 6.62 - * part of a recursively-nested set must be protected by these forms. If there 6.63 - * are any critical regions that cannot form part of such a set, they can use 6.64 - * standard spin_[un]lock(). 6.65 - */ 6.66 -#define _raw_spin_lock_recursive(_lock) \ 6.67 - do { \ 6.68 - int cpu = smp_processor_id(); \ 6.69 - if ( likely((_lock)->recurse_cpu != cpu) ) \ 6.70 - { \ 6.71 - spin_lock(_lock); \ 6.72 - (_lock)->recurse_cpu = cpu; \ 6.73 - } \ 6.74 - (_lock)->recurse_cnt++; \ 6.75 - } while ( 0 ) 6.76 - 6.77 -#define _raw_spin_unlock_recursive(_lock) \ 6.78 - do { \ 6.79 - if ( likely(--(_lock)->recurse_cnt == 0) ) \ 6.80 - { \ 6.81 - (_lock)->recurse_cpu = -1; \ 6.82 - spin_unlock(_lock); \ 6.83 - } \ 6.84 - } while ( 0 ) 6.85 - 6.86 - 6.87 typedef struct { 6.88 volatile unsigned int lock; 6.89 -} rwlock_t; 6.90 +} raw_rwlock_t; 6.91 6.92 -#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { RW_LOCK_BIAS } 6.93 - 6.94 -#define rwlock_init(x) do { *(x) = (rwlock_t) RW_LOCK_UNLOCKED; } while(0) 6.95 +#define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { RW_LOCK_BIAS } 6.96 6.97 /* 6.98 * On x86, we implement read-write locks as a 32-bit counter 6.99 * with the high bit (sign) being the "contended" bit. 6.100 */ 6.101 -static inline void _raw_read_lock(rwlock_t *rw) 6.102 +static inline void _raw_read_lock(raw_rwlock_t *rw) 6.103 { 6.104 __build_read_lock(rw, "__read_lock_failed"); 6.105 } 6.106 6.107 -static inline void _raw_write_lock(rwlock_t *rw) 6.108 +static inline void _raw_write_lock(raw_rwlock_t *rw) 6.109 { 6.110 __build_write_lock(rw, "__write_lock_failed"); 6.111 } 6.112 6.113 -#define _raw_read_unlock(rw) \ 6.114 - __asm__ __volatile__ ( \ 6.115 - "lock ; incl %0" : \ 6.116 +#define _raw_read_unlock(rw) \ 6.117 + asm volatile ( \ 6.118 + "lock ; incl %0" : \ 6.119 "=m" ((rw)->lock) : : "memory" ) 6.120 -#define _raw_write_unlock(rw) \ 6.121 - __asm__ __volatile__ ( \ 6.122 - "lock ; addl $" RW_LOCK_BIAS_STR ",%0" : \ 6.123 +#define _raw_write_unlock(rw) \ 6.124 + asm volatile ( \ 6.125 + "lock ; addl $" RW_LOCK_BIAS_STR ",%0" : \ 6.126 "=m" ((rw)->lock) : : "memory" ) 6.127 6.128 #endif /* __ASM_SPINLOCK_H */
7.1 --- a/xen/include/xen/spinlock.h Mon Oct 20 15:31:54 2008 +0100 7.2 +++ b/xen/include/xen/spinlock.h Mon Oct 20 16:48:17 2008 +0100 7.3 @@ -3,93 +3,95 @@ 7.4 7.5 #include <xen/config.h> 7.6 #include <asm/system.h> 7.7 - 7.8 -#define spin_lock_irqsave(lock, flags) \ 7.9 - do { local_irq_save(flags); spin_lock(lock); } while ( 0 ) 7.10 -#define spin_lock_irq(lock) \ 7.11 - do { local_irq_disable(); spin_lock(lock); } while ( 0 ) 7.12 - 7.13 -#define read_lock_irqsave(lock, flags) \ 7.14 - do { local_irq_save(flags); read_lock(lock); } while ( 0 ) 7.15 -#define read_lock_irq(lock) \ 7.16 - do { local_irq_disable(); read_lock(lock); } while ( 0 ) 7.17 - 7.18 -#define write_lock_irqsave(lock, flags) \ 7.19 - do { local_irq_save(flags); write_lock(lock); } while ( 0 ) 7.20 -#define write_lock_irq(lock) \ 7.21 - do { local_irq_disable(); write_lock(lock); } while ( 0 ) 7.22 - 7.23 -#define spin_unlock_irqrestore(lock, flags) \ 7.24 - do { spin_unlock(lock); local_irq_restore(flags); } while ( 0 ) 7.25 -#define spin_unlock_irq(lock) \ 7.26 - do { spin_unlock(lock); local_irq_enable(); } while ( 0 ) 7.27 - 7.28 -#define read_unlock_irqrestore(lock, flags) \ 7.29 - do { read_unlock(lock); local_irq_restore(flags); } while ( 0 ) 7.30 -#define read_unlock_irq(lock) \ 7.31 - do { read_unlock(lock); local_irq_enable(); } while ( 0 ) 7.32 - 7.33 -#define write_unlock_irqrestore(lock, flags) \ 7.34 - do { write_unlock(lock); local_irq_restore(flags); } while ( 0 ) 7.35 -#define write_unlock_irq(lock) \ 7.36 - do { write_unlock(lock); local_irq_enable(); } while ( 0 ) 7.37 - 7.38 -#ifdef CONFIG_SMP 7.39 - 7.40 #include <asm/spinlock.h> 7.41 7.42 -#else 7.43 - 7.44 -#if (__GNUC__ > 2) 7.45 -typedef struct { } spinlock_t; 7.46 -#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { } 7.47 -#else 7.48 -typedef struct { int gcc_is_buggy; } spinlock_t; 7.49 -#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 0 } 7.50 -#endif 7.51 - 7.52 -#define spin_lock_init(lock) do { } while(0) 7.53 -#define spin_is_locked(lock) (0) 7.54 -#define _raw_spin_lock(lock) (void)(lock) 7.55 -#define _raw_spin_trylock(lock) ({1; }) 7.56 -#define _raw_spin_unlock(lock) do { } while(0) 7.57 -#define _raw_spin_lock_recursive(lock) do { } while(0) 7.58 -#define _raw_spin_unlock_recursive(lock) do { } while(0) 7.59 +typedef struct { 7.60 + raw_spinlock_t raw; 7.61 + s8 recurse_cpu; 7.62 + u8 recurse_cnt; 7.63 +} spinlock_t; 7.64 7.65 -#if (__GNUC__ > 2) 7.66 -typedef struct { } rwlock_t; 7.67 -#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { } 7.68 -#else 7.69 -typedef struct { int gcc_is_buggy; } rwlock_t; 7.70 -#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { 0 } 7.71 -#endif 7.72 +#define SPIN_LOCK_UNLOCKED { _RAW_SPIN_LOCK_UNLOCKED, -1, 0 } 7.73 +#define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED 7.74 +#define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED) 7.75 7.76 -#define rwlock_init(lock) do { } while(0) 7.77 -#define _raw_read_lock(lock) (void)(lock) /* Not "unused variable". */ 7.78 -#define _raw_read_unlock(lock) do { } while(0) 7.79 -#define _raw_write_lock(lock) (void)(lock) /* Not "unused variable". */ 7.80 -#define _raw_write_unlock(lock) do { } while(0) 7.81 - 7.82 -#endif 7.83 +typedef struct { 7.84 + raw_rwlock_t raw; 7.85 +} rwlock_t; 7.86 7.87 -#define spin_lock(_lock) _raw_spin_lock(_lock) 7.88 -#define spin_trylock(_lock) _raw_spin_trylock(_lock) 7.89 -#define spin_unlock(_lock) _raw_spin_unlock(_lock) 7.90 -#define spin_lock_recursive(_lock) _raw_spin_lock_recursive(_lock) 7.91 -#define spin_unlock_recursive(_lock) _raw_spin_unlock_recursive(_lock) 7.92 -#define read_lock(_lock) _raw_read_lock(_lock) 7.93 -#define read_unlock(_lock) _raw_read_unlock(_lock) 7.94 -#define write_lock(_lock) _raw_write_lock(_lock) 7.95 -#define write_unlock(_lock) _raw_write_unlock(_lock) 7.96 +#define RW_LOCK_UNLOCKED { _RAW_RW_LOCK_UNLOCKED } 7.97 +#define DEFINE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED 7.98 +#define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED) 7.99 + 7.100 +void _spin_lock(spinlock_t *lock); 7.101 +void _spin_lock_irq(spinlock_t *lock); 7.102 +unsigned long _spin_lock_irqsave(spinlock_t *lock); 7.103 + 7.104 +void _spin_unlock(spinlock_t *lock); 7.105 +void _spin_unlock_irq(spinlock_t *lock); 7.106 +void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags); 7.107 + 7.108 +int _spin_is_locked(spinlock_t *lock); 7.109 +int _spin_trylock(spinlock_t *lock); 7.110 +void _spin_barrier(spinlock_t *lock); 7.111 + 7.112 +void _spin_lock_recursive(spinlock_t *lock); 7.113 +void _spin_unlock_recursive(spinlock_t *lock); 7.114 + 7.115 +void _read_lock(rwlock_t *lock); 7.116 +void _read_lock_irq(rwlock_t *lock); 7.117 +unsigned long _read_lock_irqsave(rwlock_t *lock); 7.118 + 7.119 +void _read_unlock(rwlock_t *lock); 7.120 +void _read_unlock_irq(rwlock_t *lock); 7.121 +void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags); 7.122 + 7.123 +void _write_lock(rwlock_t *lock); 7.124 +void _write_lock_irq(rwlock_t *lock); 7.125 +unsigned long _write_lock_irqsave(rwlock_t *lock); 7.126 + 7.127 +void _write_unlock(rwlock_t *lock); 7.128 +void _write_unlock_irq(rwlock_t *lock); 7.129 +void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags); 7.130 + 7.131 +#define spin_lock(l) _spin_lock(l) 7.132 +#define spin_lock_irq(l) _spin_lock_irq(l) 7.133 +#define spin_lock_irqsave(l, f) ((f) = _spin_lock_irqsave(l)) 7.134 + 7.135 +#define spin_unlock(l) _spin_unlock(l) 7.136 +#define spin_unlock_irq(l) _spin_unlock_irq(l) 7.137 +#define spin_unlock_irqrestore(l, f) _spin_unlock_irqrestore(l, f) 7.138 + 7.139 +#define spin_is_locked(l) _raw_spin_is_locked(&(l)->raw) 7.140 +#define spin_trylock(l) _spin_trylock(l) 7.141 7.142 /* Ensure a lock is quiescent between two critical operations. */ 7.143 -static inline void spin_barrier(spinlock_t *lock) 7.144 -{ 7.145 - do { mb(); } while ( spin_is_locked(lock) ); 7.146 - mb(); 7.147 -} 7.148 +#define spin_barrier(l) _spin_barrier(l) 7.149 7.150 -#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED 7.151 -#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED 7.152 +/* 7.153 + * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be 7.154 + * reentered recursively on the same CPU. All critical regions that may form 7.155 + * part of a recursively-nested set must be protected by these forms. If there 7.156 + * are any critical regions that cannot form part of such a set, they can use 7.157 + * standard spin_[un]lock(). 7.158 + */ 7.159 +#define spin_lock_recursive(l) _spin_lock_recursive(l) 7.160 +#define spin_unlock_recursive(l) _spin_unlock_recursive(l) 7.161 + 7.162 +#define read_lock(l) _read_lock(l) 7.163 +#define read_lock_irq(l) _read_lock_irq(l) 7.164 +#define read_lock_irqsave(l, f) ((f) = _read_lock_irqsave(l)) 7.165 + 7.166 +#define read_unlock(l) _read_unlock(l) 7.167 +#define read_unlock_irq(l) _read_unlock_irq(l) 7.168 +#define read_unlock_irqrestore(l, f) _read_unlock_irqrestore(l, f) 7.169 + 7.170 +#define write_lock(l) _write_lock(l) 7.171 +#define write_lock_irq(l) _write_lock_irq(l) 7.172 +#define write_lock_irqsave(l, f) ((f) = _write_lock_irqsave(l)) 7.173 + 7.174 +#define write_unlock(l) _write_unlock(l) 7.175 +#define write_unlock_irq(l) _write_unlock_irq(l) 7.176 +#define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f) 7.177 7.178 #endif /* __SPINLOCK_H__ */