ia64/xen-unstable
changeset 18714:0358305c6883
spinlock: Add debug-build checks for IRQ-safe spinlocks.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Thu Oct 23 11:53:52 2008 +0100 (2008-10-23) |
parents | 4941c5a14598 |
children | 5bca96f74d59 |
files | xen/arch/x86/setup.c xen/common/spinlock.c xen/include/xen/spinlock.h |
line diff
1.1 --- a/xen/arch/x86/setup.c Thu Oct 23 11:40:59 2008 +0100 1.2 +++ b/xen/arch/x86/setup.c Thu Oct 23 11:53:52 2008 +0100 1.3 @@ -1059,6 +1059,8 @@ void __init __start_xen(unsigned long mb 1.4 cmdline) != 0) 1.5 panic("Could not set up DOM0 guest OS\n"); 1.6 1.7 + spin_debug_enable(); 1.8 + 1.9 /* Scrub RAM that is still free and so may go to an unprivileged domain. */ 1.10 scrub_heap_pages(); 1.11
2.1 --- a/xen/common/spinlock.c Thu Oct 23 11:40:59 2008 +0100 2.2 +++ b/xen/common/spinlock.c Thu Oct 23 11:53:52 2008 +0100 2.3 @@ -1,9 +1,48 @@ 2.4 #include <xen/config.h> 2.5 +#include <xen/irq.h> 2.6 #include <xen/smp.h> 2.7 #include <xen/spinlock.h> 2.8 2.9 +#ifndef NDEBUG 2.10 + 2.11 +static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0); 2.12 + 2.13 +static void check_lock(struct lock_debug *debug) 2.14 +{ 2.15 + int irq_safe = !local_irq_is_enabled(); 2.16 + 2.17 + if ( unlikely(atomic_read(&spin_debug) <= 0) ) 2.18 + return; 2.19 + 2.20 + /* A few places take liberties with this. */ 2.21 + /* BUG_ON(in_irq() && !irq_safe); */ 2.22 + 2.23 + if ( unlikely(debug->irq_safe != irq_safe) ) 2.24 + { 2.25 + int seen = cmpxchg(&debug->irq_safe, -1, irq_safe); 2.26 + BUG_ON(seen == !irq_safe); 2.27 + } 2.28 +} 2.29 + 2.30 +void spin_debug_enable(void) 2.31 +{ 2.32 + atomic_inc(&spin_debug); 2.33 +} 2.34 + 2.35 +void spin_debug_disable(void) 2.36 +{ 2.37 + atomic_dec(&spin_debug); 2.38 +} 2.39 + 2.40 +#else /* defined(NDEBUG) */ 2.41 + 2.42 +#define check_lock(l) ((void)0) 2.43 + 2.44 +#endif 2.45 + 2.46 void _spin_lock(spinlock_t *lock) 2.47 { 2.48 + check_lock(&lock->debug); 2.49 _raw_spin_lock(&lock->raw); 2.50 } 2.51 2.52 @@ -11,6 +50,7 @@ void _spin_lock_irq(spinlock_t *lock) 2.53 { 2.54 ASSERT(local_irq_is_enabled()); 2.55 local_irq_disable(); 2.56 + check_lock(&lock->debug); 2.57 _raw_spin_lock(&lock->raw); 2.58 } 2.59 2.60 @@ -18,6 +58,7 @@ unsigned long _spin_lock_irqsave(spinloc 2.61 { 2.62 unsigned long flags; 2.63 local_irq_save(flags); 2.64 + check_lock(&lock->debug); 2.65 _raw_spin_lock(&lock->raw); 2.66 return flags; 2.67 } 2.68 @@ -41,16 +82,19 @@ void _spin_unlock_irqrestore(spinlock_t 2.69 2.70 int _spin_is_locked(spinlock_t *lock) 2.71 { 2.72 + check_lock(&lock->debug); 2.73 return _raw_spin_is_locked(&lock->raw); 2.74 } 2.75 2.76 int _spin_trylock(spinlock_t *lock) 2.77 { 2.78 + check_lock(&lock->debug); 2.79 return _raw_spin_trylock(&lock->raw); 2.80 } 2.81 2.82 void _spin_barrier(spinlock_t *lock) 2.83 { 2.84 + check_lock(&lock->debug); 2.85 do { mb(); } while ( _raw_spin_is_locked(&lock->raw) ); 2.86 mb(); 2.87 } 2.88 @@ -70,6 +114,8 @@ void _spin_lock_recursive(spinlock_t *lo 2.89 /* Don't allow overflow of recurse_cpu field. */ 2.90 BUILD_BUG_ON(NR_CPUS > 0xfffu); 2.91 2.92 + check_lock(&lock->debug); 2.93 + 2.94 if ( likely(lock->recurse_cpu != cpu) ) 2.95 { 2.96 spin_lock(lock); 2.97 @@ -92,6 +138,7 @@ void _spin_unlock_recursive(spinlock_t * 2.98 2.99 void _read_lock(rwlock_t *lock) 2.100 { 2.101 + check_lock(&lock->debug); 2.102 _raw_read_lock(&lock->raw); 2.103 } 2.104 2.105 @@ -99,6 +146,7 @@ void _read_lock_irq(rwlock_t *lock) 2.106 { 2.107 ASSERT(local_irq_is_enabled()); 2.108 local_irq_disable(); 2.109 + check_lock(&lock->debug); 2.110 _raw_read_lock(&lock->raw); 2.111 } 2.112 2.113 @@ -106,6 +154,7 @@ unsigned long _read_lock_irqsave(rwlock_ 2.114 { 2.115 unsigned long flags; 2.116 local_irq_save(flags); 2.117 + check_lock(&lock->debug); 2.118 _raw_read_lock(&lock->raw); 2.119 return flags; 2.120 } 2.121 @@ -129,6 +178,7 @@ void _read_unlock_irqrestore(rwlock_t *l 2.122 2.123 void _write_lock(rwlock_t *lock) 2.124 { 2.125 + check_lock(&lock->debug); 2.126 _raw_write_lock(&lock->raw); 2.127 } 2.128 2.129 @@ -136,6 +186,7 @@ void _write_lock_irq(rwlock_t *lock) 2.130 { 2.131 ASSERT(local_irq_is_enabled()); 2.132 local_irq_disable(); 2.133 + check_lock(&lock->debug); 2.134 _raw_write_lock(&lock->raw); 2.135 } 2.136 2.137 @@ -143,6 +194,7 @@ unsigned long _write_lock_irqsave(rwlock 2.138 { 2.139 unsigned long flags; 2.140 local_irq_save(flags); 2.141 + check_lock(&lock->debug); 2.142 _raw_write_lock(&lock->raw); 2.143 return flags; 2.144 }
3.1 --- a/xen/include/xen/spinlock.h Thu Oct 23 11:40:59 2008 +0100 3.2 +++ b/xen/include/xen/spinlock.h Thu Oct 23 11:53:52 2008 +0100 3.3 @@ -5,21 +5,38 @@ 3.4 #include <asm/system.h> 3.5 #include <asm/spinlock.h> 3.6 3.7 +#ifndef NDEBUG 3.8 +struct lock_debug { 3.9 + int irq_safe; /* +1: IRQ-safe; 0: not IRQ-safe; -1: don't know yet */ 3.10 +}; 3.11 +#define _LOCK_DEBUG { -1 } 3.12 +void spin_debug_enable(void); 3.13 +void spin_debug_disable(void); 3.14 +#else 3.15 +struct lock_debug { }; 3.16 +#define _LOCK_DEBUG { } 3.17 +#define spin_debug_enable() ((void)0) 3.18 +#define spin_debug_disable() ((void)0) 3.19 +#endif 3.20 + 3.21 typedef struct { 3.22 raw_spinlock_t raw; 3.23 u16 recurse_cpu:12; 3.24 u16 recurse_cnt:4; 3.25 + struct lock_debug debug; 3.26 } spinlock_t; 3.27 3.28 -#define SPIN_LOCK_UNLOCKED { _RAW_SPIN_LOCK_UNLOCKED, 0xfffu, 0 } 3.29 + 3.30 +#define SPIN_LOCK_UNLOCKED { _RAW_SPIN_LOCK_UNLOCKED, 0xfffu, 0, _LOCK_DEBUG } 3.31 #define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED 3.32 #define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED) 3.33 3.34 typedef struct { 3.35 raw_rwlock_t raw; 3.36 + struct lock_debug debug; 3.37 } rwlock_t; 3.38 3.39 -#define RW_LOCK_UNLOCKED { _RAW_RW_LOCK_UNLOCKED } 3.40 +#define RW_LOCK_UNLOCKED { _RAW_RW_LOCK_UNLOCKED, _LOCK_DEBUG } 3.41 #define DEFINE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED 3.42 #define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED) 3.43