ia64/linux-2.6.18-xen.hg

annotate lib/rwsem.c @ 871:9cbcc9008446

xen/x86: don't initialize cpu_data[]'s apicid field on generic code

Afaict, this is not only redundant with the intialization done in
drivers/xen/core/smpboot.c, but actually results - at least for
secondary CPUs - in the Xen-specific value written to be later
overwritten with whatever the generic code determines (with no
guarantee that the two values are identical).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu May 14 10:09:15 2009 +0100 (2009-05-14)
parents 831230e53067
children
rev   line source
ian@0 1 /* rwsem.c: R/W semaphores: contention handling functions
ian@0 2 *
ian@0 3 * Written by David Howells (dhowells@redhat.com).
ian@0 4 * Derived from arch/i386/kernel/semaphore.c
ian@0 5 */
ian@0 6 #include <linux/rwsem.h>
ian@0 7 #include <linux/sched.h>
ian@0 8 #include <linux/init.h>
ian@0 9 #include <linux/module.h>
ian@0 10
ian@0 11 /*
ian@0 12 * Initialize an rwsem:
ian@0 13 */
ian@0 14 void __init_rwsem(struct rw_semaphore *sem, const char *name,
ian@0 15 struct lock_class_key *key)
ian@0 16 {
ian@0 17 #ifdef CONFIG_DEBUG_LOCK_ALLOC
ian@0 18 /*
ian@0 19 * Make sure we are not reinitializing a held semaphore:
ian@0 20 */
ian@0 21 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
ian@0 22 lockdep_init_map(&sem->dep_map, name, key);
ian@0 23 #endif
ian@0 24 sem->count = RWSEM_UNLOCKED_VALUE;
ian@0 25 spin_lock_init(&sem->wait_lock);
ian@0 26 INIT_LIST_HEAD(&sem->wait_list);
ian@0 27 }
ian@0 28
ian@0 29 EXPORT_SYMBOL(__init_rwsem);
ian@0 30
ian@0 31 struct rwsem_waiter {
ian@0 32 struct list_head list;
ian@0 33 struct task_struct *task;
ian@0 34 unsigned int flags;
ian@0 35 #define RWSEM_WAITING_FOR_READ 0x00000001
ian@0 36 #define RWSEM_WAITING_FOR_WRITE 0x00000002
ian@0 37 };
ian@0 38
ian@0 39 /*
ian@0 40 * handle the lock release when processes blocked on it that can now run
ian@0 41 * - if we come here from up_xxxx(), then:
ian@0 42 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
ian@0 43 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
ian@0 44 * - there must be someone on the queue
ian@0 45 * - the spinlock must be held by the caller
ian@0 46 * - woken process blocks are discarded from the list after having task zeroed
ian@0 47 * - writers are only woken if downgrading is false
ian@0 48 */
ian@0 49 static inline struct rw_semaphore *
ian@0 50 __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
ian@0 51 {
ian@0 52 struct rwsem_waiter *waiter;
ian@0 53 struct task_struct *tsk;
ian@0 54 struct list_head *next;
ian@0 55 signed long oldcount, woken, loop;
ian@0 56
ian@0 57 if (downgrading)
ian@0 58 goto dont_wake_writers;
ian@0 59
ian@0 60 /* if we came through an up_xxxx() call, we only only wake someone up
ian@0 61 * if we can transition the active part of the count from 0 -> 1
ian@0 62 */
ian@0 63 try_again:
ian@0 64 oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
ian@0 65 - RWSEM_ACTIVE_BIAS;
ian@0 66 if (oldcount & RWSEM_ACTIVE_MASK)
ian@0 67 goto undo;
ian@0 68
ian@0 69 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
ian@0 70
ian@0 71 /* try to grant a single write lock if there's a writer at the front
ian@0 72 * of the queue - note we leave the 'active part' of the count
ian@0 73 * incremented by 1 and the waiting part incremented by 0x00010000
ian@0 74 */
ian@0 75 if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
ian@0 76 goto readers_only;
ian@0 77
ian@0 78 /* We must be careful not to touch 'waiter' after we set ->task = NULL.
ian@0 79 * It is an allocated on the waiter's stack and may become invalid at
ian@0 80 * any time after that point (due to a wakeup from another source).
ian@0 81 */
ian@0 82 list_del(&waiter->list);
ian@0 83 tsk = waiter->task;
ian@0 84 smp_mb();
ian@0 85 waiter->task = NULL;
ian@0 86 wake_up_process(tsk);
ian@0 87 put_task_struct(tsk);
ian@0 88 goto out;
ian@0 89
ian@0 90 /* don't want to wake any writers */
ian@0 91 dont_wake_writers:
ian@0 92 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
ian@0 93 if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
ian@0 94 goto out;
ian@0 95
ian@0 96 /* grant an infinite number of read locks to the readers at the front
ian@0 97 * of the queue
ian@0 98 * - note we increment the 'active part' of the count by the number of
ian@0 99 * readers before waking any processes up
ian@0 100 */
ian@0 101 readers_only:
ian@0 102 woken = 0;
ian@0 103 do {
ian@0 104 woken++;
ian@0 105
ian@0 106 if (waiter->list.next == &sem->wait_list)
ian@0 107 break;
ian@0 108
ian@0 109 waiter = list_entry(waiter->list.next,
ian@0 110 struct rwsem_waiter, list);
ian@0 111
ian@0 112 } while (waiter->flags & RWSEM_WAITING_FOR_READ);
ian@0 113
ian@0 114 loop = woken;
ian@0 115 woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS;
ian@0 116 if (!downgrading)
ian@0 117 /* we'd already done one increment earlier */
ian@0 118 woken -= RWSEM_ACTIVE_BIAS;
ian@0 119
ian@0 120 rwsem_atomic_add(woken, sem);
ian@0 121
ian@0 122 next = sem->wait_list.next;
ian@0 123 for (; loop > 0; loop--) {
ian@0 124 waiter = list_entry(next, struct rwsem_waiter, list);
ian@0 125 next = waiter->list.next;
ian@0 126 tsk = waiter->task;
ian@0 127 smp_mb();
ian@0 128 waiter->task = NULL;
ian@0 129 wake_up_process(tsk);
ian@0 130 put_task_struct(tsk);
ian@0 131 }
ian@0 132
ian@0 133 sem->wait_list.next = next;
ian@0 134 next->prev = &sem->wait_list;
ian@0 135
ian@0 136 out:
ian@0 137 return sem;
ian@0 138
ian@0 139 /* undo the change to count, but check for a transition 1->0 */
ian@0 140 undo:
ian@0 141 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0)
ian@0 142 goto out;
ian@0 143 goto try_again;
ian@0 144 }
ian@0 145
ian@0 146 /*
ian@0 147 * wait for a lock to be granted
ian@0 148 */
ian@0 149 static inline struct rw_semaphore *
ian@0 150 rwsem_down_failed_common(struct rw_semaphore *sem,
ian@0 151 struct rwsem_waiter *waiter, signed long adjustment)
ian@0 152 {
ian@0 153 struct task_struct *tsk = current;
ian@0 154 signed long count;
ian@0 155
ian@0 156 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
ian@0 157
ian@0 158 /* set up my own style of waitqueue */
ian@0 159 spin_lock_irq(&sem->wait_lock);
ian@0 160 waiter->task = tsk;
ian@0 161 get_task_struct(tsk);
ian@0 162
ian@0 163 list_add_tail(&waiter->list, &sem->wait_list);
ian@0 164
ian@0 165 /* we're now waiting on the lock, but no longer actively read-locking */
ian@0 166 count = rwsem_atomic_update(adjustment, sem);
ian@0 167
ian@0 168 /* if there are no active locks, wake the front queued process(es) up */
ian@0 169 if (!(count & RWSEM_ACTIVE_MASK))
ian@0 170 sem = __rwsem_do_wake(sem, 0);
ian@0 171
ian@0 172 spin_unlock_irq(&sem->wait_lock);
ian@0 173
ian@0 174 /* wait to be given the lock */
ian@0 175 for (;;) {
ian@0 176 if (!waiter->task)
ian@0 177 break;
ian@0 178 schedule();
ian@0 179 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
ian@0 180 }
ian@0 181
ian@0 182 tsk->state = TASK_RUNNING;
ian@0 183
ian@0 184 return sem;
ian@0 185 }
ian@0 186
ian@0 187 /*
ian@0 188 * wait for the read lock to be granted
ian@0 189 */
ian@0 190 struct rw_semaphore fastcall __sched *
ian@0 191 rwsem_down_read_failed(struct rw_semaphore *sem)
ian@0 192 {
ian@0 193 struct rwsem_waiter waiter;
ian@0 194
ian@0 195 waiter.flags = RWSEM_WAITING_FOR_READ;
ian@0 196 rwsem_down_failed_common(sem, &waiter,
ian@0 197 RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
ian@0 198 return sem;
ian@0 199 }
ian@0 200
ian@0 201 /*
ian@0 202 * wait for the write lock to be granted
ian@0 203 */
ian@0 204 struct rw_semaphore fastcall __sched *
ian@0 205 rwsem_down_write_failed(struct rw_semaphore *sem)
ian@0 206 {
ian@0 207 struct rwsem_waiter waiter;
ian@0 208
ian@0 209 waiter.flags = RWSEM_WAITING_FOR_WRITE;
ian@0 210 rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
ian@0 211
ian@0 212 return sem;
ian@0 213 }
ian@0 214
ian@0 215 /*
ian@0 216 * handle waking up a waiter on the semaphore
ian@0 217 * - up_read/up_write has decremented the active part of count if we come here
ian@0 218 */
ian@0 219 struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
ian@0 220 {
ian@0 221 unsigned long flags;
ian@0 222
ian@0 223 spin_lock_irqsave(&sem->wait_lock, flags);
ian@0 224
ian@0 225 /* do nothing if list empty */
ian@0 226 if (!list_empty(&sem->wait_list))
ian@0 227 sem = __rwsem_do_wake(sem, 0);
ian@0 228
ian@0 229 spin_unlock_irqrestore(&sem->wait_lock, flags);
ian@0 230
ian@0 231 return sem;
ian@0 232 }
ian@0 233
ian@0 234 /*
ian@0 235 * downgrade a write lock into a read lock
ian@0 236 * - caller incremented waiting part of count and discovered it still negative
ian@0 237 * - just wake up any readers at the front of the queue
ian@0 238 */
ian@0 239 struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
ian@0 240 {
ian@0 241 unsigned long flags;
ian@0 242
ian@0 243 spin_lock_irqsave(&sem->wait_lock, flags);
ian@0 244
ian@0 245 /* do nothing if list empty */
ian@0 246 if (!list_empty(&sem->wait_list))
ian@0 247 sem = __rwsem_do_wake(sem, 1);
ian@0 248
ian@0 249 spin_unlock_irqrestore(&sem->wait_lock, flags);
ian@0 250
ian@0 251 return sem;
ian@0 252 }
ian@0 253
ian@0 254 EXPORT_SYMBOL(rwsem_down_read_failed);
ian@0 255 EXPORT_SYMBOL(rwsem_down_write_failed);
ian@0 256 EXPORT_SYMBOL(rwsem_wake);
ian@0 257 EXPORT_SYMBOL(rwsem_downgrade_wake);