ia64/linux-2.6.18-xen.hg

annotate lib/kernel_lock.c @ 782:9ab1c319531f

merge with linux-2.6.18-xen.hg
author Isaku Yamahata <yamahata@valinux.co.jp>
date Wed Jan 28 13:07:23 2009 +0900 (2009-01-28)
parents 831230e53067
children
rev   line source
ian@0 1 /*
ian@0 2 * lib/kernel_lock.c
ian@0 3 *
ian@0 4 * This is the traditional BKL - big kernel lock. Largely
ian@0 5 * relegated to obsolescense, but used by various less
ian@0 6 * important (or lazy) subsystems.
ian@0 7 */
ian@0 8 #include <linux/smp_lock.h>
ian@0 9 #include <linux/module.h>
ian@0 10 #include <linux/kallsyms.h>
ian@0 11
ian@0 12 #ifdef CONFIG_PREEMPT_BKL
ian@0 13 /*
ian@0 14 * The 'big kernel semaphore'
ian@0 15 *
ian@0 16 * This mutex is taken and released recursively by lock_kernel()
ian@0 17 * and unlock_kernel(). It is transparently dropped and reacquired
ian@0 18 * over schedule(). It is used to protect legacy code that hasn't
ian@0 19 * been migrated to a proper locking design yet.
ian@0 20 *
ian@0 21 * Note: code locked by this semaphore will only be serialized against
ian@0 22 * other code using the same locking facility. The code guarantees that
ian@0 23 * the task remains on the same CPU.
ian@0 24 *
ian@0 25 * Don't use in new code.
ian@0 26 */
ian@0 27 static DECLARE_MUTEX(kernel_sem);
ian@0 28
ian@0 29 /*
ian@0 30 * Re-acquire the kernel semaphore.
ian@0 31 *
ian@0 32 * This function is called with preemption off.
ian@0 33 *
ian@0 34 * We are executing in schedule() so the code must be extremely careful
ian@0 35 * about recursion, both due to the down() and due to the enabling of
ian@0 36 * preemption. schedule() will re-check the preemption flag after
ian@0 37 * reacquiring the semaphore.
ian@0 38 */
ian@0 39 int __lockfunc __reacquire_kernel_lock(void)
ian@0 40 {
ian@0 41 struct task_struct *task = current;
ian@0 42 int saved_lock_depth = task->lock_depth;
ian@0 43
ian@0 44 BUG_ON(saved_lock_depth < 0);
ian@0 45
ian@0 46 task->lock_depth = -1;
ian@0 47 preempt_enable_no_resched();
ian@0 48
ian@0 49 down(&kernel_sem);
ian@0 50
ian@0 51 preempt_disable();
ian@0 52 task->lock_depth = saved_lock_depth;
ian@0 53
ian@0 54 return 0;
ian@0 55 }
ian@0 56
ian@0 57 void __lockfunc __release_kernel_lock(void)
ian@0 58 {
ian@0 59 up(&kernel_sem);
ian@0 60 }
ian@0 61
ian@0 62 /*
ian@0 63 * Getting the big kernel semaphore.
ian@0 64 */
ian@0 65 void __lockfunc lock_kernel(void)
ian@0 66 {
ian@0 67 struct task_struct *task = current;
ian@0 68 int depth = task->lock_depth + 1;
ian@0 69
ian@0 70 if (likely(!depth))
ian@0 71 /*
ian@0 72 * No recursion worries - we set up lock_depth _after_
ian@0 73 */
ian@0 74 down(&kernel_sem);
ian@0 75
ian@0 76 task->lock_depth = depth;
ian@0 77 }
ian@0 78
ian@0 79 void __lockfunc unlock_kernel(void)
ian@0 80 {
ian@0 81 struct task_struct *task = current;
ian@0 82
ian@0 83 BUG_ON(task->lock_depth < 0);
ian@0 84
ian@0 85 if (likely(--task->lock_depth < 0))
ian@0 86 up(&kernel_sem);
ian@0 87 }
ian@0 88
ian@0 89 #else
ian@0 90
ian@0 91 /*
ian@0 92 * The 'big kernel lock'
ian@0 93 *
ian@0 94 * This spinlock is taken and released recursively by lock_kernel()
ian@0 95 * and unlock_kernel(). It is transparently dropped and reacquired
ian@0 96 * over schedule(). It is used to protect legacy code that hasn't
ian@0 97 * been migrated to a proper locking design yet.
ian@0 98 *
ian@0 99 * Don't use in new code.
ian@0 100 */
ian@0 101 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
ian@0 102
ian@0 103
ian@0 104 /*
ian@0 105 * Acquire/release the underlying lock from the scheduler.
ian@0 106 *
ian@0 107 * This is called with preemption disabled, and should
ian@0 108 * return an error value if it cannot get the lock and
ian@0 109 * TIF_NEED_RESCHED gets set.
ian@0 110 *
ian@0 111 * If it successfully gets the lock, it should increment
ian@0 112 * the preemption count like any spinlock does.
ian@0 113 *
ian@0 114 * (This works on UP too - _raw_spin_trylock will never
ian@0 115 * return false in that case)
ian@0 116 */
ian@0 117 int __lockfunc __reacquire_kernel_lock(void)
ian@0 118 {
ian@0 119 while (!_raw_spin_trylock(&kernel_flag)) {
ian@0 120 if (test_thread_flag(TIF_NEED_RESCHED))
ian@0 121 return -EAGAIN;
ian@0 122 cpu_relax();
ian@0 123 }
ian@0 124 preempt_disable();
ian@0 125 return 0;
ian@0 126 }
ian@0 127
ian@0 128 void __lockfunc __release_kernel_lock(void)
ian@0 129 {
ian@0 130 _raw_spin_unlock(&kernel_flag);
ian@0 131 preempt_enable_no_resched();
ian@0 132 }
ian@0 133
ian@0 134 /*
ian@0 135 * These are the BKL spinlocks - we try to be polite about preemption.
ian@0 136 * If SMP is not on (ie UP preemption), this all goes away because the
ian@0 137 * _raw_spin_trylock() will always succeed.
ian@0 138 */
ian@0 139 #ifdef CONFIG_PREEMPT
ian@0 140 static inline void __lock_kernel(void)
ian@0 141 {
ian@0 142 preempt_disable();
ian@0 143 if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
ian@0 144 /*
ian@0 145 * If preemption was disabled even before this
ian@0 146 * was called, there's nothing we can be polite
ian@0 147 * about - just spin.
ian@0 148 */
ian@0 149 if (preempt_count() > 1) {
ian@0 150 _raw_spin_lock(&kernel_flag);
ian@0 151 return;
ian@0 152 }
ian@0 153
ian@0 154 /*
ian@0 155 * Otherwise, let's wait for the kernel lock
ian@0 156 * with preemption enabled..
ian@0 157 */
ian@0 158 do {
ian@0 159 preempt_enable();
ian@0 160 while (spin_is_locked(&kernel_flag))
ian@0 161 cpu_relax();
ian@0 162 preempt_disable();
ian@0 163 } while (!_raw_spin_trylock(&kernel_flag));
ian@0 164 }
ian@0 165 }
ian@0 166
ian@0 167 #else
ian@0 168
ian@0 169 /*
ian@0 170 * Non-preemption case - just get the spinlock
ian@0 171 */
ian@0 172 static inline void __lock_kernel(void)
ian@0 173 {
ian@0 174 _raw_spin_lock(&kernel_flag);
ian@0 175 }
ian@0 176 #endif
ian@0 177
ian@0 178 static inline void __unlock_kernel(void)
ian@0 179 {
ian@0 180 /*
ian@0 181 * the BKL is not covered by lockdep, so we open-code the
ian@0 182 * unlocking sequence (and thus avoid the dep-chain ops):
ian@0 183 */
ian@0 184 _raw_spin_unlock(&kernel_flag);
ian@0 185 preempt_enable();
ian@0 186 }
ian@0 187
ian@0 188 /*
ian@0 189 * Getting the big kernel lock.
ian@0 190 *
ian@0 191 * This cannot happen asynchronously, so we only need to
ian@0 192 * worry about other CPU's.
ian@0 193 */
ian@0 194 void __lockfunc lock_kernel(void)
ian@0 195 {
ian@0 196 int depth = current->lock_depth+1;
ian@0 197 if (likely(!depth))
ian@0 198 __lock_kernel();
ian@0 199 current->lock_depth = depth;
ian@0 200 }
ian@0 201
ian@0 202 void __lockfunc unlock_kernel(void)
ian@0 203 {
ian@0 204 BUG_ON(current->lock_depth < 0);
ian@0 205 if (likely(--current->lock_depth < 0))
ian@0 206 __unlock_kernel();
ian@0 207 }
ian@0 208
ian@0 209 #endif
ian@0 210
ian@0 211 EXPORT_SYMBOL(lock_kernel);
ian@0 212 EXPORT_SYMBOL(unlock_kernel);
ian@0 213