ia64/xen-unstable

annotate xen/common/rcupdate.c @ 19835:edfdeb150f27

Fix buildsystem to detect udev > version 124

udev removed the udevinfo symlink from versions higher than 123 and
xen's build-system could not detect if udev is in place and has the
required version.

Signed-off-by: Marc-A. Dahlhaus <mad@wol.de>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 25 13:02:37 2009 +0100 (2009-06-25)
parents 759d924af6d8
children
rev   line source
kaf24@13662 1 /*
kaf24@13662 2 * Read-Copy Update mechanism for mutual exclusion
kaf24@13662 3 *
kaf24@13662 4 * This program is free software; you can redistribute it and/or modify
kaf24@13662 5 * it under the terms of the GNU General Public License as published by
kaf24@13662 6 * the Free Software Foundation; either version 2 of the License, or
kaf24@13662 7 * (at your option) any later version.
kaf24@13662 8 *
kaf24@13662 9 * This program is distributed in the hope that it will be useful,
kaf24@13662 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
kaf24@13662 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
kaf24@13662 12 * GNU General Public License for more details.
kaf24@13662 13 *
kaf24@13662 14 * You should have received a copy of the GNU General Public License
kaf24@13662 15 * along with this program; if not, write to the Free Software
kaf24@13662 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
kaf24@13662 17 *
kaf24@13662 18 * Copyright (C) IBM Corporation, 2001
kaf24@13662 19 *
kaf24@13662 20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
kaf24@13662 21 * Manfred Spraul <manfred@colorfullife.com>
kaf24@13662 22 *
kaf24@13662 23 * Modifications for Xen: Jose Renato Santos
kaf24@13662 24 * Copyright (C) Hewlett-Packard, 2006
kaf24@13662 25 *
kaf24@13662 26 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
kaf24@13662 27 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
kaf24@13662 28 * Papers:
kaf24@13662 29 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
kaf24@13662 30 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
kaf24@13662 31 *
kaf24@13662 32 * For detailed explanation of Read-Copy Update mechanism see -
kaf24@13662 33 * http://lse.sourceforge.net/locking/rcupdate.html
kaf24@13662 34 */
kaf24@13662 35 #include <xen/types.h>
kaf24@13662 36 #include <xen/kernel.h>
kaf24@13662 37 #include <xen/init.h>
kaf24@13662 38 #include <xen/spinlock.h>
kaf24@13662 39 #include <xen/smp.h>
kaf24@13662 40 #include <xen/rcupdate.h>
kaf24@13662 41 #include <xen/sched.h>
kaf24@13662 42 #include <asm/atomic.h>
kaf24@13662 43 #include <xen/bitops.h>
kaf24@13662 44 #include <xen/percpu.h>
kaf24@13662 45 #include <xen/softirq.h>
kaf24@13662 46
kaf24@13662 47 /* Definition for rcupdate control block. */
kaf24@13662 48 struct rcu_ctrlblk rcu_ctrlblk = {
kaf24@13662 49 .cur = -300,
kaf24@13662 50 .completed = -300,
kaf24@13662 51 .lock = SPIN_LOCK_UNLOCKED,
kaf24@13662 52 .cpumask = CPU_MASK_NONE,
kaf24@13662 53 };
kaf24@13662 54
kaf24@13662 55 DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
kaf24@13662 56
kaf24@13662 57 static int blimit = 10;
kaf24@13662 58 static int qhimark = 10000;
kaf24@13662 59 static int qlowmark = 100;
kaf24@13662 60 static int rsinterval = 1000;
kaf24@13662 61
kaf24@13662 62 static void force_quiescent_state(struct rcu_data *rdp,
kaf24@13662 63 struct rcu_ctrlblk *rcp)
kaf24@13662 64 {
kaf24@13662 65 cpumask_t cpumask;
kaf24@13662 66 raise_softirq(SCHEDULE_SOFTIRQ);
kaf24@13662 67 if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) {
kaf24@13662 68 rdp->last_rs_qlen = rdp->qlen;
kaf24@13662 69 /*
kaf24@13662 70 * Don't send IPI to itself. With irqs disabled,
kaf24@13662 71 * rdp->cpu is the current cpu.
kaf24@13662 72 */
kaf24@13662 73 cpumask = rcp->cpumask;
kaf24@13662 74 cpu_clear(rdp->cpu, cpumask);
kaf24@13662 75 cpumask_raise_softirq(cpumask, SCHEDULE_SOFTIRQ);
kaf24@13662 76 }
kaf24@13662 77 }
kaf24@13662 78
kaf24@13662 79 /**
kaf24@13662 80 * call_rcu - Queue an RCU callback for invocation after a grace period.
kaf24@13662 81 * @head: structure to be used for queueing the RCU updates.
kaf24@13662 82 * @func: actual update function to be invoked after the grace period
kaf24@13662 83 *
kaf24@13662 84 * The update function will be invoked some time after a full grace
kaf24@13662 85 * period elapses, in other words after all currently executing RCU
kaf24@13662 86 * read-side critical sections have completed. RCU read-side critical
kaf24@13662 87 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
kaf24@13662 88 * and may be nested.
kaf24@13662 89 */
kaf24@13662 90 void fastcall call_rcu(struct rcu_head *head,
kaf24@13662 91 void (*func)(struct rcu_head *rcu))
kaf24@13662 92 {
kaf24@13662 93 unsigned long flags;
kaf24@13662 94 struct rcu_data *rdp;
kaf24@13662 95
kaf24@13662 96 head->func = func;
kaf24@13662 97 head->next = NULL;
kaf24@13662 98 local_irq_save(flags);
kaf24@13662 99 rdp = &__get_cpu_var(rcu_data);
kaf24@13662 100 *rdp->nxttail = head;
kaf24@13662 101 rdp->nxttail = &head->next;
kaf24@13662 102 if (unlikely(++rdp->qlen > qhimark)) {
kaf24@13662 103 rdp->blimit = INT_MAX;
kaf24@13662 104 force_quiescent_state(rdp, &rcu_ctrlblk);
kaf24@13662 105 }
kaf24@13662 106 local_irq_restore(flags);
kaf24@13662 107 }
kaf24@13662 108
kaf24@13662 109 /*
kaf24@13662 110 * Invoke the completed RCU callbacks. They are expected to be in
kaf24@13662 111 * a per-cpu list.
kaf24@13662 112 */
kaf24@13662 113 static void rcu_do_batch(struct rcu_data *rdp)
kaf24@13662 114 {
kaf24@13662 115 struct rcu_head *next, *list;
kaf24@13662 116 int count = 0;
kaf24@13662 117
kaf24@13662 118 list = rdp->donelist;
kaf24@13662 119 while (list) {
kaf24@13662 120 next = rdp->donelist = list->next;
kaf24@13662 121 list->func(list);
kaf24@13662 122 list = next;
kaf24@13662 123 rdp->qlen--;
kaf24@13662 124 if (++count >= rdp->blimit)
kaf24@13662 125 break;
kaf24@13662 126 }
kaf24@13662 127 if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
kaf24@13662 128 rdp->blimit = blimit;
kaf24@13662 129 if (!rdp->donelist)
kaf24@13662 130 rdp->donetail = &rdp->donelist;
kaf24@13662 131 else
kaf24@13662 132 raise_softirq(RCU_SOFTIRQ);
kaf24@13662 133 }
kaf24@13662 134
kaf24@13662 135 /*
kaf24@13662 136 * Grace period handling:
kaf24@13662 137 * The grace period handling consists out of two steps:
kaf24@13662 138 * - A new grace period is started.
kaf24@13662 139 * This is done by rcu_start_batch. The start is not broadcasted to
kaf24@13662 140 * all cpus, they must pick this up by comparing rcp->cur with
kaf24@13662 141 * rdp->quiescbatch. All cpus are recorded in the
kaf24@13662 142 * rcu_ctrlblk.cpumask bitmap.
kaf24@13662 143 * - All cpus must go through a quiescent state.
kaf24@13662 144 * Since the start of the grace period is not broadcasted, at least two
kaf24@13662 145 * calls to rcu_check_quiescent_state are required:
kaf24@13662 146 * The first call just notices that a new grace period is running. The
kaf24@13662 147 * following calls check if there was a quiescent state since the beginning
kaf24@13662 148 * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
kaf24@13662 149 * the bitmap is empty, then the grace period is completed.
kaf24@13662 150 * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
kaf24@13662 151 * period (if necessary).
kaf24@13662 152 */
kaf24@13662 153 /*
kaf24@13662 154 * Register a new batch of callbacks, and start it up if there is currently no
kaf24@13662 155 * active batch and the batch to be registered has not already occurred.
kaf24@13662 156 * Caller must hold rcu_ctrlblk.lock.
kaf24@13662 157 */
kaf24@13662 158 static void rcu_start_batch(struct rcu_ctrlblk *rcp)
kaf24@13662 159 {
kaf24@13662 160 if (rcp->next_pending &&
kaf24@13662 161 rcp->completed == rcp->cur) {
kaf24@13662 162 rcp->next_pending = 0;
kaf24@13662 163 /*
kaf24@13662 164 * next_pending == 0 must be visible in
kaf24@13662 165 * __rcu_process_callbacks() before it can see new value of cur.
kaf24@13662 166 */
kaf24@13662 167 smp_wmb();
kaf24@13662 168 rcp->cur++;
kaf24@13662 169
kaf24@13662 170 rcp->cpumask = cpu_online_map;
kaf24@13662 171 }
kaf24@13662 172 }
kaf24@13662 173
kaf24@13662 174 /*
kaf24@13662 175 * cpu went through a quiescent state since the beginning of the grace period.
kaf24@13662 176 * Clear it from the cpu mask and complete the grace period if it was the last
kaf24@13662 177 * cpu. Start another grace period if someone has further entries pending
kaf24@13662 178 */
kaf24@13662 179 static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
kaf24@13662 180 {
kaf24@13662 181 cpu_clear(cpu, rcp->cpumask);
kaf24@13662 182 if (cpus_empty(rcp->cpumask)) {
kaf24@13662 183 /* batch completed ! */
kaf24@13662 184 rcp->completed = rcp->cur;
kaf24@13662 185 rcu_start_batch(rcp);
kaf24@13662 186 }
kaf24@13662 187 }
kaf24@13662 188
kaf24@13662 189 /*
kaf24@13662 190 * Check if the cpu has gone through a quiescent state (say context
kaf24@13662 191 * switch). If so and if it already hasn't done so in this RCU
kaf24@13662 192 * quiescent cycle, then indicate that it has done so.
kaf24@13662 193 */
kaf24@13662 194 static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
kaf24@13662 195 struct rcu_data *rdp)
kaf24@13662 196 {
kaf24@13662 197 if (rdp->quiescbatch != rcp->cur) {
kaf24@13662 198 /* start new grace period: */
kaf24@13662 199 rdp->qs_pending = 1;
kaf24@13662 200 rdp->quiescbatch = rcp->cur;
kaf24@13662 201 return;
kaf24@13662 202 }
kaf24@13662 203
kaf24@13662 204 /* Grace period already completed for this cpu?
kaf24@13662 205 * qs_pending is checked instead of the actual bitmap to avoid
kaf24@13662 206 * cacheline trashing.
kaf24@13662 207 */
kaf24@13662 208 if (!rdp->qs_pending)
kaf24@13662 209 return;
kaf24@13662 210
kaf24@13662 211 rdp->qs_pending = 0;
kaf24@13662 212
kaf24@13662 213 spin_lock(&rcp->lock);
kaf24@13662 214 /*
kaf24@13662 215 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
kaf24@13662 216 * during cpu startup. Ignore the quiescent state.
kaf24@13662 217 */
kaf24@13662 218 if (likely(rdp->quiescbatch == rcp->cur))
kaf24@13662 219 cpu_quiet(rdp->cpu, rcp);
kaf24@13662 220
kaf24@13662 221 spin_unlock(&rcp->lock);
kaf24@13662 222 }
kaf24@13662 223
kaf24@13662 224
kaf24@13662 225 /*
kaf24@13662 226 * This does the RCU processing work from softirq context.
kaf24@13662 227 */
kaf24@13662 228 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
kaf24@13662 229 struct rcu_data *rdp)
kaf24@13662 230 {
kaf24@13662 231 if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) {
kaf24@13662 232 *rdp->donetail = rdp->curlist;
kaf24@13662 233 rdp->donetail = rdp->curtail;
kaf24@13662 234 rdp->curlist = NULL;
kaf24@13662 235 rdp->curtail = &rdp->curlist;
kaf24@13662 236 }
kaf24@13662 237
kaf24@13662 238 local_irq_disable();
kaf24@13662 239 if (rdp->nxtlist && !rdp->curlist) {
kaf24@13662 240 rdp->curlist = rdp->nxtlist;
kaf24@13662 241 rdp->curtail = rdp->nxttail;
kaf24@13662 242 rdp->nxtlist = NULL;
kaf24@13662 243 rdp->nxttail = &rdp->nxtlist;
kaf24@13662 244 local_irq_enable();
kaf24@13662 245
kaf24@13662 246 /*
kaf24@13662 247 * start the next batch of callbacks
kaf24@13662 248 */
kaf24@13662 249
kaf24@13662 250 /* determine batch number */
kaf24@13662 251 rdp->batch = rcp->cur + 1;
kaf24@13662 252 /* see the comment and corresponding wmb() in
kaf24@13662 253 * the rcu_start_batch()
kaf24@13662 254 */
kaf24@13662 255 smp_rmb();
kaf24@13662 256
kaf24@13662 257 if (!rcp->next_pending) {
kaf24@13662 258 /* and start it/schedule start if it's a new batch */
kaf24@13662 259 spin_lock(&rcp->lock);
kaf24@13662 260 rcp->next_pending = 1;
kaf24@13662 261 rcu_start_batch(rcp);
kaf24@13662 262 spin_unlock(&rcp->lock);
kaf24@13662 263 }
kaf24@13662 264 } else {
kaf24@13662 265 local_irq_enable();
kaf24@13662 266 }
kaf24@13662 267 rcu_check_quiescent_state(rcp, rdp);
kaf24@13662 268 if (rdp->donelist)
kaf24@13662 269 rcu_do_batch(rdp);
kaf24@13662 270 }
kaf24@13662 271
kaf24@13662 272 static void rcu_process_callbacks(void)
kaf24@13662 273 {
kaf24@13662 274 __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
kaf24@13662 275 }
kaf24@13662 276
kaf24@13662 277 static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
kaf24@13662 278 {
kaf24@13662 279 /* This cpu has pending rcu entries and the grace period
kaf24@13662 280 * for them has completed.
kaf24@13662 281 */
kaf24@13662 282 if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch))
kaf24@13662 283 return 1;
kaf24@13662 284
kaf24@13662 285 /* This cpu has no pending entries, but there are new entries */
kaf24@13662 286 if (!rdp->curlist && rdp->nxtlist)
kaf24@13662 287 return 1;
kaf24@13662 288
kaf24@13662 289 /* This cpu has finished callbacks to invoke */
kaf24@13662 290 if (rdp->donelist)
kaf24@13662 291 return 1;
kaf24@13662 292
kaf24@13662 293 /* The rcu core waits for a quiescent state from the cpu */
kaf24@13662 294 if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
kaf24@13662 295 return 1;
kaf24@13662 296
kaf24@13662 297 /* nothing to do */
kaf24@13662 298 return 0;
kaf24@13662 299 }
kaf24@13662 300
kaf24@13662 301 int rcu_pending(int cpu)
kaf24@13662 302 {
kaf24@13662 303 return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu));
kaf24@13662 304 }
kaf24@13662 305
kaf24@13662 306 /*
kaf24@13662 307 * Check to see if any future RCU-related work will need to be done
kaf24@13662 308 * by the current CPU, even if none need be done immediately, returning
kaf24@13662 309 * 1 if so. This function is part of the RCU implementation; it is -not-
kaf24@13662 310 * an exported member of the RCU API.
kaf24@13662 311 */
kaf24@13662 312 int rcu_needs_cpu(int cpu)
kaf24@13662 313 {
kaf24@13662 314 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
kaf24@13662 315
kaf24@13662 316 return (!!rdp->curlist || rcu_pending(cpu));
kaf24@13662 317 }
kaf24@13662 318
kaf24@13662 319 void rcu_check_callbacks(int cpu)
kaf24@13662 320 {
kaf24@13662 321 raise_softirq(RCU_SOFTIRQ);
kaf24@13662 322 }
kaf24@13662 323
kaf24@13662 324 static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
kaf24@13662 325 struct rcu_data *rdp)
kaf24@13662 326 {
kaf24@13662 327 memset(rdp, 0, sizeof(*rdp));
kaf24@13662 328 rdp->curtail = &rdp->curlist;
kaf24@13662 329 rdp->nxttail = &rdp->nxtlist;
kaf24@13662 330 rdp->donetail = &rdp->donelist;
kaf24@13662 331 rdp->quiescbatch = rcp->completed;
kaf24@13662 332 rdp->qs_pending = 0;
kaf24@13662 333 rdp->cpu = cpu;
kaf24@13662 334 rdp->blimit = blimit;
kaf24@13662 335 }
kaf24@13662 336
kaf24@13662 337 void __devinit rcu_online_cpu(int cpu)
kaf24@13662 338 {
kaf24@13662 339 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
kaf24@13662 340
kaf24@13662 341 rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
kaf24@13662 342 }
kaf24@13662 343
keir@15082 344 void __init rcu_init(void)
kaf24@13662 345 {
kaf24@13662 346 rcu_online_cpu(smp_processor_id());
kaf24@13662 347 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
kaf24@13662 348 }