ia64/linux-2.6.18-xen.hg

annotate kernel/lockdep.c @ 798:b02a90bf5bbc

ACPI: Backport missing part for T-State MSR support

Part of below kernel commit was missed while packporting T-State
support.

commit f79f06ab9f86d7203006d2ec8992ac80df36a34e
Author: Zhao Yakui <yakui.zhao@intel.com>
Date: Thu Nov 15 17:06:36 2007 +0800

ACPI: Enable MSR (FixedHW) support for T-States

Add throttling control via MSR when T-states uses
the FixHW Control Status registers.

Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
Signed-off-by: Li Shaohua <shaohua.li@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>

Signed-off-by: Wei Gang <gang.wei@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Mar 02 10:53:59 2009 +0000 (2009-03-02)
parents 831230e53067
children
rev   line source
ian@0 1 /*
ian@0 2 * kernel/lockdep.c
ian@0 3 *
ian@0 4 * Runtime locking correctness validator
ian@0 5 *
ian@0 6 * Started by Ingo Molnar:
ian@0 7 *
ian@0 8 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
ian@0 9 *
ian@0 10 * this code maps all the lock dependencies as they occur in a live kernel
ian@0 11 * and will warn about the following classes of locking bugs:
ian@0 12 *
ian@0 13 * - lock inversion scenarios
ian@0 14 * - circular lock dependencies
ian@0 15 * - hardirq/softirq safe/unsafe locking bugs
ian@0 16 *
ian@0 17 * Bugs are reported even if the current locking scenario does not cause
ian@0 18 * any deadlock at this point.
ian@0 19 *
ian@0 20 * I.e. if anytime in the past two locks were taken in a different order,
ian@0 21 * even if it happened for another task, even if those were different
ian@0 22 * locks (but of the same class as this lock), this code will detect it.
ian@0 23 *
ian@0 24 * Thanks to Arjan van de Ven for coming up with the initial idea of
ian@0 25 * mapping lock dependencies runtime.
ian@0 26 */
ian@0 27 #include <linux/mutex.h>
ian@0 28 #include <linux/sched.h>
ian@0 29 #include <linux/delay.h>
ian@0 30 #include <linux/module.h>
ian@0 31 #include <linux/proc_fs.h>
ian@0 32 #include <linux/seq_file.h>
ian@0 33 #include <linux/spinlock.h>
ian@0 34 #include <linux/kallsyms.h>
ian@0 35 #include <linux/interrupt.h>
ian@0 36 #include <linux/stacktrace.h>
ian@0 37 #include <linux/debug_locks.h>
ian@0 38 #include <linux/irqflags.h>
ian@0 39
ian@0 40 #include <asm/sections.h>
ian@0 41
ian@0 42 #include "lockdep_internals.h"
ian@0 43
ian@0 44 /*
ian@0 45 * hash_lock: protects the lockdep hashes and class/list/hash allocators.
ian@0 46 *
ian@0 47 * This is one of the rare exceptions where it's justified
ian@0 48 * to use a raw spinlock - we really dont want the spinlock
ian@0 49 * code to recurse back into the lockdep code.
ian@0 50 */
ian@0 51 static raw_spinlock_t hash_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
ian@0 52
ian@0 53 static int lockdep_initialized;
ian@0 54
ian@0 55 unsigned long nr_list_entries;
ian@0 56 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
ian@0 57
ian@0 58 /*
ian@0 59 * Allocate a lockdep entry. (assumes hash_lock held, returns
ian@0 60 * with NULL on failure)
ian@0 61 */
ian@0 62 static struct lock_list *alloc_list_entry(void)
ian@0 63 {
ian@0 64 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
ian@0 65 __raw_spin_unlock(&hash_lock);
ian@0 66 debug_locks_off();
ian@0 67 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
ian@0 68 printk("turning off the locking correctness validator.\n");
ian@0 69 return NULL;
ian@0 70 }
ian@0 71 return list_entries + nr_list_entries++;
ian@0 72 }
ian@0 73
ian@0 74 /*
ian@0 75 * All data structures here are protected by the global debug_lock.
ian@0 76 *
ian@0 77 * Mutex key structs only get allocated, once during bootup, and never
ian@0 78 * get freed - this significantly simplifies the debugging code.
ian@0 79 */
ian@0 80 unsigned long nr_lock_classes;
ian@0 81 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
ian@0 82
ian@0 83 /*
ian@0 84 * We keep a global list of all lock classes. The list only grows,
ian@0 85 * never shrinks. The list is only accessed with the lockdep
ian@0 86 * spinlock lock held.
ian@0 87 */
ian@0 88 LIST_HEAD(all_lock_classes);
ian@0 89
ian@0 90 /*
ian@0 91 * The lockdep classes are in a hash-table as well, for fast lookup:
ian@0 92 */
ian@0 93 #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
ian@0 94 #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
ian@0 95 #define CLASSHASH_MASK (CLASSHASH_SIZE - 1)
ian@0 96 #define __classhashfn(key) ((((unsigned long)key >> CLASSHASH_BITS) + (unsigned long)key) & CLASSHASH_MASK)
ian@0 97 #define classhashentry(key) (classhash_table + __classhashfn((key)))
ian@0 98
ian@0 99 static struct list_head classhash_table[CLASSHASH_SIZE];
ian@0 100
ian@0 101 unsigned long nr_lock_chains;
ian@0 102 static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
ian@0 103
ian@0 104 /*
ian@0 105 * We put the lock dependency chains into a hash-table as well, to cache
ian@0 106 * their existence:
ian@0 107 */
ian@0 108 #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
ian@0 109 #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
ian@0 110 #define CHAINHASH_MASK (CHAINHASH_SIZE - 1)
ian@0 111 #define __chainhashfn(chain) \
ian@0 112 (((chain >> CHAINHASH_BITS) + chain) & CHAINHASH_MASK)
ian@0 113 #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
ian@0 114
ian@0 115 static struct list_head chainhash_table[CHAINHASH_SIZE];
ian@0 116
ian@0 117 /*
ian@0 118 * The hash key of the lock dependency chains is a hash itself too:
ian@0 119 * it's a hash of all locks taken up to that lock, including that lock.
ian@0 120 * It's a 64-bit hash, because it's important for the keys to be
ian@0 121 * unique.
ian@0 122 */
ian@0 123 #define iterate_chain_key(key1, key2) \
ian@0 124 (((key1) << MAX_LOCKDEP_KEYS_BITS/2) ^ \
ian@0 125 ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS/2)) ^ \
ian@0 126 (key2))
ian@0 127
ian@0 128 void lockdep_off(void)
ian@0 129 {
ian@0 130 current->lockdep_recursion++;
ian@0 131 }
ian@0 132
ian@0 133 EXPORT_SYMBOL(lockdep_off);
ian@0 134
ian@0 135 void lockdep_on(void)
ian@0 136 {
ian@0 137 current->lockdep_recursion--;
ian@0 138 }
ian@0 139
ian@0 140 EXPORT_SYMBOL(lockdep_on);
ian@0 141
ian@0 142 int lockdep_internal(void)
ian@0 143 {
ian@0 144 return current->lockdep_recursion != 0;
ian@0 145 }
ian@0 146
ian@0 147 EXPORT_SYMBOL(lockdep_internal);
ian@0 148
ian@0 149 /*
ian@0 150 * Debugging switches:
ian@0 151 */
ian@0 152
ian@0 153 #define VERBOSE 0
ian@0 154 #ifdef VERBOSE
ian@0 155 # define VERY_VERBOSE 0
ian@0 156 #endif
ian@0 157
ian@0 158 #if VERBOSE
ian@0 159 # define HARDIRQ_VERBOSE 1
ian@0 160 # define SOFTIRQ_VERBOSE 1
ian@0 161 #else
ian@0 162 # define HARDIRQ_VERBOSE 0
ian@0 163 # define SOFTIRQ_VERBOSE 0
ian@0 164 #endif
ian@0 165
ian@0 166 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
ian@0 167 /*
ian@0 168 * Quick filtering for interesting events:
ian@0 169 */
ian@0 170 static int class_filter(struct lock_class *class)
ian@0 171 {
ian@0 172 #if 0
ian@0 173 /* Example */
ian@0 174 if (class->name_version == 1 &&
ian@0 175 !strcmp(class->name, "lockname"))
ian@0 176 return 1;
ian@0 177 if (class->name_version == 1 &&
ian@0 178 !strcmp(class->name, "&struct->lockfield"))
ian@0 179 return 1;
ian@0 180 #endif
ian@0 181 /* Allow everything else. 0 would be filter everything else */
ian@0 182 return 1;
ian@0 183 }
ian@0 184 #endif
ian@0 185
ian@0 186 static int verbose(struct lock_class *class)
ian@0 187 {
ian@0 188 #if VERBOSE
ian@0 189 return class_filter(class);
ian@0 190 #endif
ian@0 191 return 0;
ian@0 192 }
ian@0 193
ian@0 194 #ifdef CONFIG_TRACE_IRQFLAGS
ian@0 195
ian@0 196 static int hardirq_verbose(struct lock_class *class)
ian@0 197 {
ian@0 198 #if HARDIRQ_VERBOSE
ian@0 199 return class_filter(class);
ian@0 200 #endif
ian@0 201 return 0;
ian@0 202 }
ian@0 203
ian@0 204 static int softirq_verbose(struct lock_class *class)
ian@0 205 {
ian@0 206 #if SOFTIRQ_VERBOSE
ian@0 207 return class_filter(class);
ian@0 208 #endif
ian@0 209 return 0;
ian@0 210 }
ian@0 211
ian@0 212 #endif
ian@0 213
ian@0 214 /*
ian@0 215 * Stack-trace: tightly packed array of stack backtrace
ian@0 216 * addresses. Protected by the hash_lock.
ian@0 217 */
ian@0 218 unsigned long nr_stack_trace_entries;
ian@0 219 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
ian@0 220
ian@0 221 static int save_trace(struct stack_trace *trace)
ian@0 222 {
ian@0 223 trace->nr_entries = 0;
ian@0 224 trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
ian@0 225 trace->entries = stack_trace + nr_stack_trace_entries;
ian@0 226
ian@0 227 save_stack_trace(trace, NULL, 0, 3);
ian@0 228
ian@0 229 trace->max_entries = trace->nr_entries;
ian@0 230
ian@0 231 nr_stack_trace_entries += trace->nr_entries;
ian@0 232 if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES))
ian@0 233 return 0;
ian@0 234
ian@0 235 if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
ian@0 236 __raw_spin_unlock(&hash_lock);
ian@0 237 if (debug_locks_off()) {
ian@0 238 printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
ian@0 239 printk("turning off the locking correctness validator.\n");
ian@0 240 dump_stack();
ian@0 241 }
ian@0 242 return 0;
ian@0 243 }
ian@0 244
ian@0 245 return 1;
ian@0 246 }
ian@0 247
ian@0 248 unsigned int nr_hardirq_chains;
ian@0 249 unsigned int nr_softirq_chains;
ian@0 250 unsigned int nr_process_chains;
ian@0 251 unsigned int max_lockdep_depth;
ian@0 252 unsigned int max_recursion_depth;
ian@0 253
ian@0 254 #ifdef CONFIG_DEBUG_LOCKDEP
ian@0 255 /*
ian@0 256 * We cannot printk in early bootup code. Not even early_printk()
ian@0 257 * might work. So we mark any initialization errors and printk
ian@0 258 * about it later on, in lockdep_info().
ian@0 259 */
ian@0 260 static int lockdep_init_error;
ian@0 261
ian@0 262 /*
ian@0 263 * Various lockdep statistics:
ian@0 264 */
ian@0 265 atomic_t chain_lookup_hits;
ian@0 266 atomic_t chain_lookup_misses;
ian@0 267 atomic_t hardirqs_on_events;
ian@0 268 atomic_t hardirqs_off_events;
ian@0 269 atomic_t redundant_hardirqs_on;
ian@0 270 atomic_t redundant_hardirqs_off;
ian@0 271 atomic_t softirqs_on_events;
ian@0 272 atomic_t softirqs_off_events;
ian@0 273 atomic_t redundant_softirqs_on;
ian@0 274 atomic_t redundant_softirqs_off;
ian@0 275 atomic_t nr_unused_locks;
ian@0 276 atomic_t nr_cyclic_checks;
ian@0 277 atomic_t nr_cyclic_check_recursions;
ian@0 278 atomic_t nr_find_usage_forwards_checks;
ian@0 279 atomic_t nr_find_usage_forwards_recursions;
ian@0 280 atomic_t nr_find_usage_backwards_checks;
ian@0 281 atomic_t nr_find_usage_backwards_recursions;
ian@0 282 # define debug_atomic_inc(ptr) atomic_inc(ptr)
ian@0 283 # define debug_atomic_dec(ptr) atomic_dec(ptr)
ian@0 284 # define debug_atomic_read(ptr) atomic_read(ptr)
ian@0 285 #else
ian@0 286 # define debug_atomic_inc(ptr) do { } while (0)
ian@0 287 # define debug_atomic_dec(ptr) do { } while (0)
ian@0 288 # define debug_atomic_read(ptr) 0
ian@0 289 #endif
ian@0 290
ian@0 291 /*
ian@0 292 * Locking printouts:
ian@0 293 */
ian@0 294
ian@0 295 static const char *usage_str[] =
ian@0 296 {
ian@0 297 [LOCK_USED] = "initial-use ",
ian@0 298 [LOCK_USED_IN_HARDIRQ] = "in-hardirq-W",
ian@0 299 [LOCK_USED_IN_SOFTIRQ] = "in-softirq-W",
ian@0 300 [LOCK_ENABLED_SOFTIRQS] = "softirq-on-W",
ian@0 301 [LOCK_ENABLED_HARDIRQS] = "hardirq-on-W",
ian@0 302 [LOCK_USED_IN_HARDIRQ_READ] = "in-hardirq-R",
ian@0 303 [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R",
ian@0 304 [LOCK_ENABLED_SOFTIRQS_READ] = "softirq-on-R",
ian@0 305 [LOCK_ENABLED_HARDIRQS_READ] = "hardirq-on-R",
ian@0 306 };
ian@0 307
ian@0 308 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
ian@0 309 {
ian@0 310 unsigned long offs, size;
ian@0 311 char *modname;
ian@0 312
ian@0 313 return kallsyms_lookup((unsigned long)key, &size, &offs, &modname, str);
ian@0 314 }
ian@0 315
ian@0 316 void
ian@0 317 get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
ian@0 318 {
ian@0 319 *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';
ian@0 320
ian@0 321 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
ian@0 322 *c1 = '+';
ian@0 323 else
ian@0 324 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
ian@0 325 *c1 = '-';
ian@0 326
ian@0 327 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
ian@0 328 *c2 = '+';
ian@0 329 else
ian@0 330 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
ian@0 331 *c2 = '-';
ian@0 332
ian@0 333 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
ian@0 334 *c3 = '-';
ian@0 335 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {
ian@0 336 *c3 = '+';
ian@0 337 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
ian@0 338 *c3 = '?';
ian@0 339 }
ian@0 340
ian@0 341 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
ian@0 342 *c4 = '-';
ian@0 343 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {
ian@0 344 *c4 = '+';
ian@0 345 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
ian@0 346 *c4 = '?';
ian@0 347 }
ian@0 348 }
ian@0 349
ian@0 350 static void print_lock_name(struct lock_class *class)
ian@0 351 {
ian@0 352 char str[128], c1, c2, c3, c4;
ian@0 353 const char *name;
ian@0 354
ian@0 355 get_usage_chars(class, &c1, &c2, &c3, &c4);
ian@0 356
ian@0 357 name = class->name;
ian@0 358 if (!name) {
ian@0 359 name = __get_key_name(class->key, str);
ian@0 360 printk(" (%s", name);
ian@0 361 } else {
ian@0 362 printk(" (%s", name);
ian@0 363 if (class->name_version > 1)
ian@0 364 printk("#%d", class->name_version);
ian@0 365 if (class->subclass)
ian@0 366 printk("/%d", class->subclass);
ian@0 367 }
ian@0 368 printk("){%c%c%c%c}", c1, c2, c3, c4);
ian@0 369 }
ian@0 370
ian@0 371 static void print_lockdep_cache(struct lockdep_map *lock)
ian@0 372 {
ian@0 373 const char *name;
ian@0 374 char str[128];
ian@0 375
ian@0 376 name = lock->name;
ian@0 377 if (!name)
ian@0 378 name = __get_key_name(lock->key->subkeys, str);
ian@0 379
ian@0 380 printk("%s", name);
ian@0 381 }
ian@0 382
ian@0 383 static void print_lock(struct held_lock *hlock)
ian@0 384 {
ian@0 385 print_lock_name(hlock->class);
ian@0 386 printk(", at: ");
ian@0 387 print_ip_sym(hlock->acquire_ip);
ian@0 388 }
ian@0 389
ian@0 390 static void lockdep_print_held_locks(struct task_struct *curr)
ian@0 391 {
ian@0 392 int i, depth = curr->lockdep_depth;
ian@0 393
ian@0 394 if (!depth) {
ian@0 395 printk("no locks held by %s/%d.\n", curr->comm, curr->pid);
ian@0 396 return;
ian@0 397 }
ian@0 398 printk("%d lock%s held by %s/%d:\n",
ian@0 399 depth, depth > 1 ? "s" : "", curr->comm, curr->pid);
ian@0 400
ian@0 401 for (i = 0; i < depth; i++) {
ian@0 402 printk(" #%d: ", i);
ian@0 403 print_lock(curr->held_locks + i);
ian@0 404 }
ian@0 405 }
ian@0 406
ian@0 407 static void print_lock_class_header(struct lock_class *class, int depth)
ian@0 408 {
ian@0 409 int bit;
ian@0 410
ian@0 411 printk("%*s->", depth, "");
ian@0 412 print_lock_name(class);
ian@0 413 printk(" ops: %lu", class->ops);
ian@0 414 printk(" {\n");
ian@0 415
ian@0 416 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
ian@0 417 if (class->usage_mask & (1 << bit)) {
ian@0 418 int len = depth;
ian@0 419
ian@0 420 len += printk("%*s %s", depth, "", usage_str[bit]);
ian@0 421 len += printk(" at:\n");
ian@0 422 print_stack_trace(class->usage_traces + bit, len);
ian@0 423 }
ian@0 424 }
ian@0 425 printk("%*s }\n", depth, "");
ian@0 426
ian@0 427 printk("%*s ... key at: ",depth,"");
ian@0 428 print_ip_sym((unsigned long)class->key);
ian@0 429 }
ian@0 430
ian@0 431 /*
ian@0 432 * printk all lock dependencies starting at <entry>:
ian@0 433 */
ian@0 434 static void print_lock_dependencies(struct lock_class *class, int depth)
ian@0 435 {
ian@0 436 struct lock_list *entry;
ian@0 437
ian@0 438 if (DEBUG_LOCKS_WARN_ON(depth >= 20))
ian@0 439 return;
ian@0 440
ian@0 441 print_lock_class_header(class, depth);
ian@0 442
ian@0 443 list_for_each_entry(entry, &class->locks_after, entry) {
ian@0 444 DEBUG_LOCKS_WARN_ON(!entry->class);
ian@0 445 print_lock_dependencies(entry->class, depth + 1);
ian@0 446
ian@0 447 printk("%*s ... acquired at:\n",depth,"");
ian@0 448 print_stack_trace(&entry->trace, 2);
ian@0 449 printk("\n");
ian@0 450 }
ian@0 451 }
ian@0 452
ian@0 453 /*
ian@0 454 * Add a new dependency to the head of the list:
ian@0 455 */
ian@0 456 static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
ian@0 457 struct list_head *head, unsigned long ip)
ian@0 458 {
ian@0 459 struct lock_list *entry;
ian@0 460 /*
ian@0 461 * Lock not present yet - get a new dependency struct and
ian@0 462 * add it to the list:
ian@0 463 */
ian@0 464 entry = alloc_list_entry();
ian@0 465 if (!entry)
ian@0 466 return 0;
ian@0 467
ian@0 468 entry->class = this;
ian@0 469 save_trace(&entry->trace);
ian@0 470
ian@0 471 /*
ian@0 472 * Since we never remove from the dependency list, the list can
ian@0 473 * be walked lockless by other CPUs, it's only allocation
ian@0 474 * that must be protected by the spinlock. But this also means
ian@0 475 * we must make new entries visible only once writes to the
ian@0 476 * entry become visible - hence the RCU op:
ian@0 477 */
ian@0 478 list_add_tail_rcu(&entry->entry, head);
ian@0 479
ian@0 480 return 1;
ian@0 481 }
ian@0 482
ian@0 483 /*
ian@0 484 * Recursive, forwards-direction lock-dependency checking, used for
ian@0 485 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
ian@0 486 * checking.
ian@0 487 *
ian@0 488 * (to keep the stackframe of the recursive functions small we
ian@0 489 * use these global variables, and we also mark various helper
ian@0 490 * functions as noinline.)
ian@0 491 */
ian@0 492 static struct held_lock *check_source, *check_target;
ian@0 493
ian@0 494 /*
ian@0 495 * Print a dependency chain entry (this is only done when a deadlock
ian@0 496 * has been detected):
ian@0 497 */
ian@0 498 static noinline int
ian@0 499 print_circular_bug_entry(struct lock_list *target, unsigned int depth)
ian@0 500 {
ian@0 501 if (debug_locks_silent)
ian@0 502 return 0;
ian@0 503 printk("\n-> #%u", depth);
ian@0 504 print_lock_name(target->class);
ian@0 505 printk(":\n");
ian@0 506 print_stack_trace(&target->trace, 6);
ian@0 507
ian@0 508 return 0;
ian@0 509 }
ian@0 510
ian@0 511 /*
ian@0 512 * When a circular dependency is detected, print the
ian@0 513 * header first:
ian@0 514 */
ian@0 515 static noinline int
ian@0 516 print_circular_bug_header(struct lock_list *entry, unsigned int depth)
ian@0 517 {
ian@0 518 struct task_struct *curr = current;
ian@0 519
ian@0 520 __raw_spin_unlock(&hash_lock);
ian@0 521 debug_locks_off();
ian@0 522 if (debug_locks_silent)
ian@0 523 return 0;
ian@0 524
ian@0 525 printk("\n=======================================================\n");
ian@0 526 printk( "[ INFO: possible circular locking dependency detected ]\n");
ian@0 527 printk( "-------------------------------------------------------\n");
ian@0 528 printk("%s/%d is trying to acquire lock:\n",
ian@0 529 curr->comm, curr->pid);
ian@0 530 print_lock(check_source);
ian@0 531 printk("\nbut task is already holding lock:\n");
ian@0 532 print_lock(check_target);
ian@0 533 printk("\nwhich lock already depends on the new lock.\n\n");
ian@0 534 printk("\nthe existing dependency chain (in reverse order) is:\n");
ian@0 535
ian@0 536 print_circular_bug_entry(entry, depth);
ian@0 537
ian@0 538 return 0;
ian@0 539 }
ian@0 540
ian@0 541 static noinline int print_circular_bug_tail(void)
ian@0 542 {
ian@0 543 struct task_struct *curr = current;
ian@0 544 struct lock_list this;
ian@0 545
ian@0 546 if (debug_locks_silent)
ian@0 547 return 0;
ian@0 548
ian@0 549 this.class = check_source->class;
ian@0 550 save_trace(&this.trace);
ian@0 551 print_circular_bug_entry(&this, 0);
ian@0 552
ian@0 553 printk("\nother info that might help us debug this:\n\n");
ian@0 554 lockdep_print_held_locks(curr);
ian@0 555
ian@0 556 printk("\nstack backtrace:\n");
ian@0 557 dump_stack();
ian@0 558
ian@0 559 return 0;
ian@0 560 }
ian@0 561
ian@0 562 static int noinline print_infinite_recursion_bug(void)
ian@0 563 {
ian@0 564 __raw_spin_unlock(&hash_lock);
ian@0 565 DEBUG_LOCKS_WARN_ON(1);
ian@0 566
ian@0 567 return 0;
ian@0 568 }
ian@0 569
ian@0 570 /*
ian@0 571 * Prove that the dependency graph starting at <entry> can not
ian@0 572 * lead to <target>. Print an error and return 0 if it does.
ian@0 573 */
ian@0 574 static noinline int
ian@0 575 check_noncircular(struct lock_class *source, unsigned int depth)
ian@0 576 {
ian@0 577 struct lock_list *entry;
ian@0 578
ian@0 579 debug_atomic_inc(&nr_cyclic_check_recursions);
ian@0 580 if (depth > max_recursion_depth)
ian@0 581 max_recursion_depth = depth;
ian@0 582 if (depth >= 20)
ian@0 583 return print_infinite_recursion_bug();
ian@0 584 /*
ian@0 585 * Check this lock's dependency list:
ian@0 586 */
ian@0 587 list_for_each_entry(entry, &source->locks_after, entry) {
ian@0 588 if (entry->class == check_target->class)
ian@0 589 return print_circular_bug_header(entry, depth+1);
ian@0 590 debug_atomic_inc(&nr_cyclic_checks);
ian@0 591 if (!check_noncircular(entry->class, depth+1))
ian@0 592 return print_circular_bug_entry(entry, depth+1);
ian@0 593 }
ian@0 594 return 1;
ian@0 595 }
ian@0 596
ian@0 597 static int very_verbose(struct lock_class *class)
ian@0 598 {
ian@0 599 #if VERY_VERBOSE
ian@0 600 return class_filter(class);
ian@0 601 #endif
ian@0 602 return 0;
ian@0 603 }
ian@0 604 #ifdef CONFIG_TRACE_IRQFLAGS
ian@0 605
ian@0 606 /*
ian@0 607 * Forwards and backwards subgraph searching, for the purposes of
ian@0 608 * proving that two subgraphs can be connected by a new dependency
ian@0 609 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
ian@0 610 */
ian@0 611 static enum lock_usage_bit find_usage_bit;
ian@0 612 static struct lock_class *forwards_match, *backwards_match;
ian@0 613
ian@0 614 /*
ian@0 615 * Find a node in the forwards-direction dependency sub-graph starting
ian@0 616 * at <source> that matches <find_usage_bit>.
ian@0 617 *
ian@0 618 * Return 2 if such a node exists in the subgraph, and put that node
ian@0 619 * into <forwards_match>.
ian@0 620 *
ian@0 621 * Return 1 otherwise and keep <forwards_match> unchanged.
ian@0 622 * Return 0 on error.
ian@0 623 */
ian@0 624 static noinline int
ian@0 625 find_usage_forwards(struct lock_class *source, unsigned int depth)
ian@0 626 {
ian@0 627 struct lock_list *entry;
ian@0 628 int ret;
ian@0 629
ian@0 630 if (depth > max_recursion_depth)
ian@0 631 max_recursion_depth = depth;
ian@0 632 if (depth >= 20)
ian@0 633 return print_infinite_recursion_bug();
ian@0 634
ian@0 635 debug_atomic_inc(&nr_find_usage_forwards_checks);
ian@0 636 if (source->usage_mask & (1 << find_usage_bit)) {
ian@0 637 forwards_match = source;
ian@0 638 return 2;
ian@0 639 }
ian@0 640
ian@0 641 /*
ian@0 642 * Check this lock's dependency list:
ian@0 643 */
ian@0 644 list_for_each_entry(entry, &source->locks_after, entry) {
ian@0 645 debug_atomic_inc(&nr_find_usage_forwards_recursions);
ian@0 646 ret = find_usage_forwards(entry->class, depth+1);
ian@0 647 if (ret == 2 || ret == 0)
ian@0 648 return ret;
ian@0 649 }
ian@0 650 return 1;
ian@0 651 }
ian@0 652
ian@0 653 /*
ian@0 654 * Find a node in the backwards-direction dependency sub-graph starting
ian@0 655 * at <source> that matches <find_usage_bit>.
ian@0 656 *
ian@0 657 * Return 2 if such a node exists in the subgraph, and put that node
ian@0 658 * into <backwards_match>.
ian@0 659 *
ian@0 660 * Return 1 otherwise and keep <backwards_match> unchanged.
ian@0 661 * Return 0 on error.
ian@0 662 */
ian@0 663 static noinline int
ian@0 664 find_usage_backwards(struct lock_class *source, unsigned int depth)
ian@0 665 {
ian@0 666 struct lock_list *entry;
ian@0 667 int ret;
ian@0 668
ian@0 669 if (depth > max_recursion_depth)
ian@0 670 max_recursion_depth = depth;
ian@0 671 if (depth >= 20)
ian@0 672 return print_infinite_recursion_bug();
ian@0 673
ian@0 674 debug_atomic_inc(&nr_find_usage_backwards_checks);
ian@0 675 if (source->usage_mask & (1 << find_usage_bit)) {
ian@0 676 backwards_match = source;
ian@0 677 return 2;
ian@0 678 }
ian@0 679
ian@0 680 /*
ian@0 681 * Check this lock's dependency list:
ian@0 682 */
ian@0 683 list_for_each_entry(entry, &source->locks_before, entry) {
ian@0 684 debug_atomic_inc(&nr_find_usage_backwards_recursions);
ian@0 685 ret = find_usage_backwards(entry->class, depth+1);
ian@0 686 if (ret == 2 || ret == 0)
ian@0 687 return ret;
ian@0 688 }
ian@0 689 return 1;
ian@0 690 }
ian@0 691
ian@0 692 static int
ian@0 693 print_bad_irq_dependency(struct task_struct *curr,
ian@0 694 struct held_lock *prev,
ian@0 695 struct held_lock *next,
ian@0 696 enum lock_usage_bit bit1,
ian@0 697 enum lock_usage_bit bit2,
ian@0 698 const char *irqclass)
ian@0 699 {
ian@0 700 __raw_spin_unlock(&hash_lock);
ian@0 701 debug_locks_off();
ian@0 702 if (debug_locks_silent)
ian@0 703 return 0;
ian@0 704
ian@0 705 printk("\n======================================================\n");
ian@0 706 printk( "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
ian@0 707 irqclass, irqclass);
ian@0 708 printk( "------------------------------------------------------\n");
ian@0 709 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
ian@0 710 curr->comm, curr->pid,
ian@0 711 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
ian@0 712 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
ian@0 713 curr->hardirqs_enabled,
ian@0 714 curr->softirqs_enabled);
ian@0 715 print_lock(next);
ian@0 716
ian@0 717 printk("\nand this task is already holding:\n");
ian@0 718 print_lock(prev);
ian@0 719 printk("which would create a new lock dependency:\n");
ian@0 720 print_lock_name(prev->class);
ian@0 721 printk(" ->");
ian@0 722 print_lock_name(next->class);
ian@0 723 printk("\n");
ian@0 724
ian@0 725 printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
ian@0 726 irqclass);
ian@0 727 print_lock_name(backwards_match);
ian@0 728 printk("\n... which became %s-irq-safe at:\n", irqclass);
ian@0 729
ian@0 730 print_stack_trace(backwards_match->usage_traces + bit1, 1);
ian@0 731
ian@0 732 printk("\nto a %s-irq-unsafe lock:\n", irqclass);
ian@0 733 print_lock_name(forwards_match);
ian@0 734 printk("\n... which became %s-irq-unsafe at:\n", irqclass);
ian@0 735 printk("...");
ian@0 736
ian@0 737 print_stack_trace(forwards_match->usage_traces + bit2, 1);
ian@0 738
ian@0 739 printk("\nother info that might help us debug this:\n\n");
ian@0 740 lockdep_print_held_locks(curr);
ian@0 741
ian@0 742 printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
ian@0 743 print_lock_dependencies(backwards_match, 0);
ian@0 744
ian@0 745 printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
ian@0 746 print_lock_dependencies(forwards_match, 0);
ian@0 747
ian@0 748 printk("\nstack backtrace:\n");
ian@0 749 dump_stack();
ian@0 750
ian@0 751 return 0;
ian@0 752 }
ian@0 753
ian@0 754 static int
ian@0 755 check_usage(struct task_struct *curr, struct held_lock *prev,
ian@0 756 struct held_lock *next, enum lock_usage_bit bit_backwards,
ian@0 757 enum lock_usage_bit bit_forwards, const char *irqclass)
ian@0 758 {
ian@0 759 int ret;
ian@0 760
ian@0 761 find_usage_bit = bit_backwards;
ian@0 762 /* fills in <backwards_match> */
ian@0 763 ret = find_usage_backwards(prev->class, 0);
ian@0 764 if (!ret || ret == 1)
ian@0 765 return ret;
ian@0 766
ian@0 767 find_usage_bit = bit_forwards;
ian@0 768 ret = find_usage_forwards(next->class, 0);
ian@0 769 if (!ret || ret == 1)
ian@0 770 return ret;
ian@0 771 /* ret == 2 */
ian@0 772 return print_bad_irq_dependency(curr, prev, next,
ian@0 773 bit_backwards, bit_forwards, irqclass);
ian@0 774 }
ian@0 775
ian@0 776 #endif
ian@0 777
ian@0 778 static int
ian@0 779 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
ian@0 780 struct held_lock *next)
ian@0 781 {
ian@0 782 debug_locks_off();
ian@0 783 __raw_spin_unlock(&hash_lock);
ian@0 784 if (debug_locks_silent)
ian@0 785 return 0;
ian@0 786
ian@0 787 printk("\n=============================================\n");
ian@0 788 printk( "[ INFO: possible recursive locking detected ]\n");
ian@0 789 printk( "---------------------------------------------\n");
ian@0 790 printk("%s/%d is trying to acquire lock:\n",
ian@0 791 curr->comm, curr->pid);
ian@0 792 print_lock(next);
ian@0 793 printk("\nbut task is already holding lock:\n");
ian@0 794 print_lock(prev);
ian@0 795
ian@0 796 printk("\nother info that might help us debug this:\n");
ian@0 797 lockdep_print_held_locks(curr);
ian@0 798
ian@0 799 printk("\nstack backtrace:\n");
ian@0 800 dump_stack();
ian@0 801
ian@0 802 return 0;
ian@0 803 }
ian@0 804
ian@0 805 /*
ian@0 806 * Check whether we are holding such a class already.
ian@0 807 *
ian@0 808 * (Note that this has to be done separately, because the graph cannot
ian@0 809 * detect such classes of deadlocks.)
ian@0 810 *
ian@0 811 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
ian@0 812 */
ian@0 813 static int
ian@0 814 check_deadlock(struct task_struct *curr, struct held_lock *next,
ian@0 815 struct lockdep_map *next_instance, int read)
ian@0 816 {
ian@0 817 struct held_lock *prev;
ian@0 818 int i;
ian@0 819
ian@0 820 for (i = 0; i < curr->lockdep_depth; i++) {
ian@0 821 prev = curr->held_locks + i;
ian@0 822 if (prev->class != next->class)
ian@0 823 continue;
ian@0 824 /*
ian@0 825 * Allow read-after-read recursion of the same
ian@0 826 * lock class (i.e. read_lock(lock)+read_lock(lock)):
ian@0 827 */
ian@0 828 if ((read == 2) && prev->read)
ian@0 829 return 2;
ian@0 830 return print_deadlock_bug(curr, prev, next);
ian@0 831 }
ian@0 832 return 1;
ian@0 833 }
ian@0 834
ian@0 835 /*
ian@0 836 * There was a chain-cache miss, and we are about to add a new dependency
ian@0 837 * to a previous lock. We recursively validate the following rules:
ian@0 838 *
ian@0 839 * - would the adding of the <prev> -> <next> dependency create a
ian@0 840 * circular dependency in the graph? [== circular deadlock]
ian@0 841 *
ian@0 842 * - does the new prev->next dependency connect any hardirq-safe lock
ian@0 843 * (in the full backwards-subgraph starting at <prev>) with any
ian@0 844 * hardirq-unsafe lock (in the full forwards-subgraph starting at
ian@0 845 * <next>)? [== illegal lock inversion with hardirq contexts]
ian@0 846 *
ian@0 847 * - does the new prev->next dependency connect any softirq-safe lock
ian@0 848 * (in the full backwards-subgraph starting at <prev>) with any
ian@0 849 * softirq-unsafe lock (in the full forwards-subgraph starting at
ian@0 850 * <next>)? [== illegal lock inversion with softirq contexts]
ian@0 851 *
ian@0 852 * any of these scenarios could lead to a deadlock.
ian@0 853 *
ian@0 854 * Then if all the validations pass, we add the forwards and backwards
ian@0 855 * dependency.
ian@0 856 */
ian@0 857 static int
ian@0 858 check_prev_add(struct task_struct *curr, struct held_lock *prev,
ian@0 859 struct held_lock *next)
ian@0 860 {
ian@0 861 struct lock_list *entry;
ian@0 862 int ret;
ian@0 863
ian@0 864 /*
ian@0 865 * Prove that the new <prev> -> <next> dependency would not
ian@0 866 * create a circular dependency in the graph. (We do this by
ian@0 867 * forward-recursing into the graph starting at <next>, and
ian@0 868 * checking whether we can reach <prev>.)
ian@0 869 *
ian@0 870 * We are using global variables to control the recursion, to
ian@0 871 * keep the stackframe size of the recursive functions low:
ian@0 872 */
ian@0 873 check_source = next;
ian@0 874 check_target = prev;
ian@0 875 if (!(check_noncircular(next->class, 0)))
ian@0 876 return print_circular_bug_tail();
ian@0 877
ian@0 878 #ifdef CONFIG_TRACE_IRQFLAGS
ian@0 879 /*
ian@0 880 * Prove that the new dependency does not connect a hardirq-safe
ian@0 881 * lock with a hardirq-unsafe lock - to achieve this we search
ian@0 882 * the backwards-subgraph starting at <prev>, and the
ian@0 883 * forwards-subgraph starting at <next>:
ian@0 884 */
ian@0 885 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
ian@0 886 LOCK_ENABLED_HARDIRQS, "hard"))
ian@0 887 return 0;
ian@0 888
ian@0 889 /*
ian@0 890 * Prove that the new dependency does not connect a hardirq-safe-read
ian@0 891 * lock with a hardirq-unsafe lock - to achieve this we search
ian@0 892 * the backwards-subgraph starting at <prev>, and the
ian@0 893 * forwards-subgraph starting at <next>:
ian@0 894 */
ian@0 895 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
ian@0 896 LOCK_ENABLED_HARDIRQS, "hard-read"))
ian@0 897 return 0;
ian@0 898
ian@0 899 /*
ian@0 900 * Prove that the new dependency does not connect a softirq-safe
ian@0 901 * lock with a softirq-unsafe lock - to achieve this we search
ian@0 902 * the backwards-subgraph starting at <prev>, and the
ian@0 903 * forwards-subgraph starting at <next>:
ian@0 904 */
ian@0 905 if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
ian@0 906 LOCK_ENABLED_SOFTIRQS, "soft"))
ian@0 907 return 0;
ian@0 908 /*
ian@0 909 * Prove that the new dependency does not connect a softirq-safe-read
ian@0 910 * lock with a softirq-unsafe lock - to achieve this we search
ian@0 911 * the backwards-subgraph starting at <prev>, and the
ian@0 912 * forwards-subgraph starting at <next>:
ian@0 913 */
ian@0 914 if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
ian@0 915 LOCK_ENABLED_SOFTIRQS, "soft"))
ian@0 916 return 0;
ian@0 917 #endif
ian@0 918 /*
ian@0 919 * For recursive read-locks we do all the dependency checks,
ian@0 920 * but we dont store read-triggered dependencies (only
ian@0 921 * write-triggered dependencies). This ensures that only the
ian@0 922 * write-side dependencies matter, and that if for example a
ian@0 923 * write-lock never takes any other locks, then the reads are
ian@0 924 * equivalent to a NOP.
ian@0 925 */
ian@0 926 if (next->read == 2 || prev->read == 2)
ian@0 927 return 1;
ian@0 928 /*
ian@0 929 * Is the <prev> -> <next> dependency already present?
ian@0 930 *
ian@0 931 * (this may occur even though this is a new chain: consider
ian@0 932 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
ian@0 933 * chains - the second one will be new, but L1 already has
ian@0 934 * L2 added to its dependency list, due to the first chain.)
ian@0 935 */
ian@0 936 list_for_each_entry(entry, &prev->class->locks_after, entry) {
ian@0 937 if (entry->class == next->class)
ian@0 938 return 2;
ian@0 939 }
ian@0 940
ian@0 941 /*
ian@0 942 * Ok, all validations passed, add the new lock
ian@0 943 * to the previous lock's dependency list:
ian@0 944 */
ian@0 945 ret = add_lock_to_list(prev->class, next->class,
ian@0 946 &prev->class->locks_after, next->acquire_ip);
ian@0 947 if (!ret)
ian@0 948 return 0;
ian@0 949 /*
ian@0 950 * Return value of 2 signals 'dependency already added',
ian@0 951 * in that case we dont have to add the backlink either.
ian@0 952 */
ian@0 953 if (ret == 2)
ian@0 954 return 2;
ian@0 955 ret = add_lock_to_list(next->class, prev->class,
ian@0 956 &next->class->locks_before, next->acquire_ip);
ian@0 957
ian@0 958 /*
ian@0 959 * Debugging printouts:
ian@0 960 */
ian@0 961 if (verbose(prev->class) || verbose(next->class)) {
ian@0 962 __raw_spin_unlock(&hash_lock);
ian@0 963 printk("\n new dependency: ");
ian@0 964 print_lock_name(prev->class);
ian@0 965 printk(" => ");
ian@0 966 print_lock_name(next->class);
ian@0 967 printk("\n");
ian@0 968 dump_stack();
ian@0 969 __raw_spin_lock(&hash_lock);
ian@0 970 }
ian@0 971 return 1;
ian@0 972 }
ian@0 973
ian@0 974 /*
ian@0 975 * Add the dependency to all directly-previous locks that are 'relevant'.
ian@0 976 * The ones that are relevant are (in increasing distance from curr):
ian@0 977 * all consecutive trylock entries and the final non-trylock entry - or
ian@0 978 * the end of this context's lock-chain - whichever comes first.
ian@0 979 */
ian@0 980 static int
ian@0 981 check_prevs_add(struct task_struct *curr, struct held_lock *next)
ian@0 982 {
ian@0 983 int depth = curr->lockdep_depth;
ian@0 984 struct held_lock *hlock;
ian@0 985
ian@0 986 /*
ian@0 987 * Debugging checks.
ian@0 988 *
ian@0 989 * Depth must not be zero for a non-head lock:
ian@0 990 */
ian@0 991 if (!depth)
ian@0 992 goto out_bug;
ian@0 993 /*
ian@0 994 * At least two relevant locks must exist for this
ian@0 995 * to be a head:
ian@0 996 */
ian@0 997 if (curr->held_locks[depth].irq_context !=
ian@0 998 curr->held_locks[depth-1].irq_context)
ian@0 999 goto out_bug;
ian@0 1000
ian@0 1001 for (;;) {
ian@0 1002 hlock = curr->held_locks + depth-1;
ian@0 1003 /*
ian@0 1004 * Only non-recursive-read entries get new dependencies
ian@0 1005 * added:
ian@0 1006 */
ian@0 1007 if (hlock->read != 2) {
ian@0 1008 check_prev_add(curr, hlock, next);
ian@0 1009 /*
ian@0 1010 * Stop after the first non-trylock entry,
ian@0 1011 * as non-trylock entries have added their
ian@0 1012 * own direct dependencies already, so this
ian@0 1013 * lock is connected to them indirectly:
ian@0 1014 */
ian@0 1015 if (!hlock->trylock)
ian@0 1016 break;
ian@0 1017 }
ian@0 1018 depth--;
ian@0 1019 /*
ian@0 1020 * End of lock-stack?
ian@0 1021 */
ian@0 1022 if (!depth)
ian@0 1023 break;
ian@0 1024 /*
ian@0 1025 * Stop the search if we cross into another context:
ian@0 1026 */
ian@0 1027 if (curr->held_locks[depth].irq_context !=
ian@0 1028 curr->held_locks[depth-1].irq_context)
ian@0 1029 break;
ian@0 1030 }
ian@0 1031 return 1;
ian@0 1032 out_bug:
ian@0 1033 __raw_spin_unlock(&hash_lock);
ian@0 1034 DEBUG_LOCKS_WARN_ON(1);
ian@0 1035
ian@0 1036 return 0;
ian@0 1037 }
ian@0 1038
ian@0 1039
ian@0 1040 /*
ian@0 1041 * Is this the address of a static object:
ian@0 1042 */
ian@0 1043 static int static_obj(void *obj)
ian@0 1044 {
ian@0 1045 unsigned long start = (unsigned long) &_stext,
ian@0 1046 end = (unsigned long) &_end,
ian@0 1047 addr = (unsigned long) obj;
ian@0 1048 #ifdef CONFIG_SMP
ian@0 1049 int i;
ian@0 1050 #endif
ian@0 1051
ian@0 1052 /*
ian@0 1053 * static variable?
ian@0 1054 */
ian@0 1055 if ((addr >= start) && (addr < end))
ian@0 1056 return 1;
ian@0 1057
ian@0 1058 #ifdef CONFIG_SMP
ian@0 1059 /*
ian@0 1060 * percpu var?
ian@0 1061 */
ian@0 1062 for_each_possible_cpu(i) {
ian@0 1063 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
ian@0 1064 end = (unsigned long) &__per_cpu_end + per_cpu_offset(i);
ian@0 1065
ian@0 1066 if ((addr >= start) && (addr < end))
ian@0 1067 return 1;
ian@0 1068 }
ian@0 1069 #endif
ian@0 1070
ian@0 1071 /*
ian@0 1072 * module var?
ian@0 1073 */
ian@0 1074 return is_module_address(addr);
ian@0 1075 }
ian@0 1076
ian@0 1077 /*
ian@0 1078 * To make lock name printouts unique, we calculate a unique
ian@0 1079 * class->name_version generation counter:
ian@0 1080 */
ian@0 1081 static int count_matching_names(struct lock_class *new_class)
ian@0 1082 {
ian@0 1083 struct lock_class *class;
ian@0 1084 int count = 0;
ian@0 1085
ian@0 1086 if (!new_class->name)
ian@0 1087 return 0;
ian@0 1088
ian@0 1089 list_for_each_entry(class, &all_lock_classes, lock_entry) {
ian@0 1090 if (new_class->key - new_class->subclass == class->key)
ian@0 1091 return class->name_version;
ian@0 1092 if (class->name && !strcmp(class->name, new_class->name))
ian@0 1093 count = max(count, class->name_version);
ian@0 1094 }
ian@0 1095
ian@0 1096 return count + 1;
ian@0 1097 }
ian@0 1098
ian@0 1099 extern void __error_too_big_MAX_LOCKDEP_SUBCLASSES(void);
ian@0 1100
ian@0 1101 /*
ian@0 1102 * Register a lock's class in the hash-table, if the class is not present
ian@0 1103 * yet. Otherwise we look it up. We cache the result in the lock object
ian@0 1104 * itself, so actual lookup of the hash should be once per lock object.
ian@0 1105 */
ian@0 1106 static inline struct lock_class *
ian@0 1107 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
ian@0 1108 {
ian@0 1109 struct lockdep_subclass_key *key;
ian@0 1110 struct list_head *hash_head;
ian@0 1111 struct lock_class *class;
ian@0 1112
ian@0 1113 #ifdef CONFIG_DEBUG_LOCKDEP
ian@0 1114 /*
ian@0 1115 * If the architecture calls into lockdep before initializing
ian@0 1116 * the hashes then we'll warn about it later. (we cannot printk
ian@0 1117 * right now)
ian@0 1118 */
ian@0 1119 if (unlikely(!lockdep_initialized)) {
ian@0 1120 lockdep_init();
ian@0 1121 lockdep_init_error = 1;
ian@0 1122 }
ian@0 1123 #endif
ian@0 1124
ian@0 1125 /*
ian@0 1126 * Static locks do not have their class-keys yet - for them the key
ian@0 1127 * is the lock object itself:
ian@0 1128 */
ian@0 1129 if (unlikely(!lock->key))
ian@0 1130 lock->key = (void *)lock;
ian@0 1131
ian@0 1132 /*
ian@0 1133 * NOTE: the class-key must be unique. For dynamic locks, a static
ian@0 1134 * lock_class_key variable is passed in through the mutex_init()
ian@0 1135 * (or spin_lock_init()) call - which acts as the key. For static
ian@0 1136 * locks we use the lock object itself as the key.
ian@0 1137 */
ian@0 1138 if (sizeof(struct lock_class_key) > sizeof(struct lock_class))
ian@0 1139 __error_too_big_MAX_LOCKDEP_SUBCLASSES();
ian@0 1140
ian@0 1141 key = lock->key->subkeys + subclass;
ian@0 1142
ian@0 1143 hash_head = classhashentry(key);
ian@0 1144
ian@0 1145 /*
ian@0 1146 * We can walk the hash lockfree, because the hash only
ian@0 1147 * grows, and we are careful when adding entries to the end:
ian@0 1148 */
ian@0 1149 list_for_each_entry(class, hash_head, hash_entry)
ian@0 1150 if (class->key == key)
ian@0 1151 return class;
ian@0 1152
ian@0 1153 return NULL;
ian@0 1154 }
ian@0 1155
ian@0 1156 /*
ian@0 1157 * Register a lock's class in the hash-table, if the class is not present
ian@0 1158 * yet. Otherwise we look it up. We cache the result in the lock object
ian@0 1159 * itself, so actual lookup of the hash should be once per lock object.
ian@0 1160 */
ian@0 1161 static inline struct lock_class *
ian@0 1162 register_lock_class(struct lockdep_map *lock, unsigned int subclass)
ian@0 1163 {
ian@0 1164 struct lockdep_subclass_key *key;
ian@0 1165 struct list_head *hash_head;
ian@0 1166 struct lock_class *class;
ian@0 1167
ian@0 1168 class = look_up_lock_class(lock, subclass);
ian@0 1169 if (likely(class))
ian@0 1170 return class;
ian@0 1171
ian@0 1172 /*
ian@0 1173 * Debug-check: all keys must be persistent!
ian@0 1174 */
ian@0 1175 if (!static_obj(lock->key)) {
ian@0 1176 debug_locks_off();
ian@0 1177 printk("INFO: trying to register non-static key.\n");
ian@0 1178 printk("the code is fine but needs lockdep annotation.\n");
ian@0 1179 printk("turning off the locking correctness validator.\n");
ian@0 1180 dump_stack();
ian@0 1181
ian@0 1182 return NULL;
ian@0 1183 }
ian@0 1184
ian@0 1185 key = lock->key->subkeys + subclass;
ian@0 1186 hash_head = classhashentry(key);
ian@0 1187
ian@0 1188 __raw_spin_lock(&hash_lock);
ian@0 1189 /*
ian@0 1190 * We have to do the hash-walk again, to avoid races
ian@0 1191 * with another CPU:
ian@0 1192 */
ian@0 1193 list_for_each_entry(class, hash_head, hash_entry)
ian@0 1194 if (class->key == key)
ian@0 1195 goto out_unlock_set;
ian@0 1196 /*
ian@0 1197 * Allocate a new key from the static array, and add it to
ian@0 1198 * the hash:
ian@0 1199 */
ian@0 1200 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
ian@0 1201 __raw_spin_unlock(&hash_lock);
ian@0 1202 debug_locks_off();
ian@0 1203 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
ian@0 1204 printk("turning off the locking correctness validator.\n");
ian@0 1205 return NULL;
ian@0 1206 }
ian@0 1207 class = lock_classes + nr_lock_classes++;
ian@0 1208 debug_atomic_inc(&nr_unused_locks);
ian@0 1209 class->key = key;
ian@0 1210 class->name = lock->name;
ian@0 1211 class->subclass = subclass;
ian@0 1212 INIT_LIST_HEAD(&class->lock_entry);
ian@0 1213 INIT_LIST_HEAD(&class->locks_before);
ian@0 1214 INIT_LIST_HEAD(&class->locks_after);
ian@0 1215 class->name_version = count_matching_names(class);
ian@0 1216 /*
ian@0 1217 * We use RCU's safe list-add method to make
ian@0 1218 * parallel walking of the hash-list safe:
ian@0 1219 */
ian@0 1220 list_add_tail_rcu(&class->hash_entry, hash_head);
ian@0 1221
ian@0 1222 if (verbose(class)) {
ian@0 1223 __raw_spin_unlock(&hash_lock);
ian@0 1224 printk("\nnew class %p: %s", class->key, class->name);
ian@0 1225 if (class->name_version > 1)
ian@0 1226 printk("#%d", class->name_version);
ian@0 1227 printk("\n");
ian@0 1228 dump_stack();
ian@0 1229 __raw_spin_lock(&hash_lock);
ian@0 1230 }
ian@0 1231 out_unlock_set:
ian@0 1232 __raw_spin_unlock(&hash_lock);
ian@0 1233
ian@0 1234 if (!subclass)
ian@0 1235 lock->class_cache = class;
ian@0 1236
ian@0 1237 DEBUG_LOCKS_WARN_ON(class->subclass != subclass);
ian@0 1238
ian@0 1239 return class;
ian@0 1240 }
ian@0 1241
ian@0 1242 /*
ian@0 1243 * Look up a dependency chain. If the key is not present yet then
ian@0 1244 * add it and return 0 - in this case the new dependency chain is
ian@0 1245 * validated. If the key is already hashed, return 1.
ian@0 1246 */
ian@0 1247 static inline int lookup_chain_cache(u64 chain_key)
ian@0 1248 {
ian@0 1249 struct list_head *hash_head = chainhashentry(chain_key);
ian@0 1250 struct lock_chain *chain;
ian@0 1251
ian@0 1252 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
ian@0 1253 /*
ian@0 1254 * We can walk it lock-free, because entries only get added
ian@0 1255 * to the hash:
ian@0 1256 */
ian@0 1257 list_for_each_entry(chain, hash_head, entry) {
ian@0 1258 if (chain->chain_key == chain_key) {
ian@0 1259 cache_hit:
ian@0 1260 debug_atomic_inc(&chain_lookup_hits);
ian@0 1261 /*
ian@0 1262 * In the debugging case, force redundant checking
ian@0 1263 * by returning 1:
ian@0 1264 */
ian@0 1265 #ifdef CONFIG_DEBUG_LOCKDEP
ian@0 1266 __raw_spin_lock(&hash_lock);
ian@0 1267 return 1;
ian@0 1268 #endif
ian@0 1269 return 0;
ian@0 1270 }
ian@0 1271 }
ian@0 1272 /*
ian@0 1273 * Allocate a new chain entry from the static array, and add
ian@0 1274 * it to the hash:
ian@0 1275 */
ian@0 1276 __raw_spin_lock(&hash_lock);
ian@0 1277 /*
ian@0 1278 * We have to walk the chain again locked - to avoid duplicates:
ian@0 1279 */
ian@0 1280 list_for_each_entry(chain, hash_head, entry) {
ian@0 1281 if (chain->chain_key == chain_key) {
ian@0 1282 __raw_spin_unlock(&hash_lock);
ian@0 1283 goto cache_hit;
ian@0 1284 }
ian@0 1285 }
ian@0 1286 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
ian@0 1287 __raw_spin_unlock(&hash_lock);
ian@0 1288 debug_locks_off();
ian@0 1289 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
ian@0 1290 printk("turning off the locking correctness validator.\n");
ian@0 1291 return 0;
ian@0 1292 }
ian@0 1293 chain = lock_chains + nr_lock_chains++;
ian@0 1294 chain->chain_key = chain_key;
ian@0 1295 list_add_tail_rcu(&chain->entry, hash_head);
ian@0 1296 debug_atomic_inc(&chain_lookup_misses);
ian@0 1297 #ifdef CONFIG_TRACE_IRQFLAGS
ian@0 1298 if (current->hardirq_context)
ian@0 1299 nr_hardirq_chains++;
ian@0 1300 else {
ian@0 1301 if (current->softirq_context)
ian@0 1302 nr_softirq_chains++;
ian@0 1303 else
ian@0 1304 nr_process_chains++;
ian@0 1305 }
ian@0 1306 #else
ian@0 1307 nr_process_chains++;
ian@0 1308 #endif
ian@0 1309
ian@0 1310 return 1;
ian@0 1311 }
ian@0 1312
ian@0 1313 /*
ian@0 1314 * We are building curr_chain_key incrementally, so double-check
ian@0 1315 * it from scratch, to make sure that it's done correctly:
ian@0 1316 */
ian@0 1317 static void check_chain_key(struct task_struct *curr)
ian@0 1318 {
ian@0 1319 #ifdef CONFIG_DEBUG_LOCKDEP
ian@0 1320 struct held_lock *hlock, *prev_hlock = NULL;
ian@0 1321 unsigned int i, id;
ian@0 1322 u64 chain_key = 0;
ian@0 1323
ian@0 1324 for (i = 0; i < curr->lockdep_depth; i++) {
ian@0 1325 hlock = curr->held_locks + i;
ian@0 1326 if (chain_key != hlock->prev_chain_key) {
ian@0 1327 debug_locks_off();
ian@0 1328 printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n",
ian@0 1329 curr->lockdep_depth, i,
ian@0 1330 (unsigned long long)chain_key,
ian@0 1331 (unsigned long long)hlock->prev_chain_key);
ian@0 1332 WARN_ON(1);
ian@0 1333 return;
ian@0 1334 }
ian@0 1335 id = hlock->class - lock_classes;
ian@0 1336 DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS);
ian@0 1337 if (prev_hlock && (prev_hlock->irq_context !=
ian@0 1338 hlock->irq_context))
ian@0 1339 chain_key = 0;
ian@0 1340 chain_key = iterate_chain_key(chain_key, id);
ian@0 1341 prev_hlock = hlock;
ian@0 1342 }
ian@0 1343 if (chain_key != curr->curr_chain_key) {
ian@0 1344 debug_locks_off();
ian@0 1345 printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n",
ian@0 1346 curr->lockdep_depth, i,
ian@0 1347 (unsigned long long)chain_key,
ian@0 1348 (unsigned long long)curr->curr_chain_key);
ian@0 1349 WARN_ON(1);
ian@0 1350 }
ian@0 1351 #endif
ian@0 1352 }
ian@0 1353
ian@0 1354 #ifdef CONFIG_TRACE_IRQFLAGS
ian@0 1355
ian@0 1356 /*
ian@0 1357 * print irq inversion bug:
ian@0 1358 */
ian@0 1359 static int
ian@0 1360 print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
ian@0 1361 struct held_lock *this, int forwards,
ian@0 1362 const char *irqclass)
ian@0 1363 {
ian@0 1364 __raw_spin_unlock(&hash_lock);
ian@0 1365 debug_locks_off();
ian@0 1366 if (debug_locks_silent)
ian@0 1367 return 0;
ian@0 1368
ian@0 1369 printk("\n=========================================================\n");
ian@0 1370 printk( "[ INFO: possible irq lock inversion dependency detected ]\n");
ian@0 1371 printk( "---------------------------------------------------------\n");
ian@0 1372 printk("%s/%d just changed the state of lock:\n",
ian@0 1373 curr->comm, curr->pid);
ian@0 1374 print_lock(this);
ian@0 1375 if (forwards)
ian@0 1376 printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
ian@0 1377 else
ian@0 1378 printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
ian@0 1379 print_lock_name(other);
ian@0 1380 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
ian@0 1381
ian@0 1382 printk("\nother info that might help us debug this:\n");
ian@0 1383 lockdep_print_held_locks(curr);
ian@0 1384
ian@0 1385 printk("\nthe first lock's dependencies:\n");
ian@0 1386 print_lock_dependencies(this->class, 0);
ian@0 1387
ian@0 1388 printk("\nthe second lock's dependencies:\n");
ian@0 1389 print_lock_dependencies(other, 0);
ian@0 1390
ian@0 1391 printk("\nstack backtrace:\n");
ian@0 1392 dump_stack();
ian@0 1393
ian@0 1394 return 0;
ian@0 1395 }
ian@0 1396
ian@0 1397 /*
ian@0 1398 * Prove that in the forwards-direction subgraph starting at <this>
ian@0 1399 * there is no lock matching <mask>:
ian@0 1400 */
ian@0 1401 static int
ian@0 1402 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
ian@0 1403 enum lock_usage_bit bit, const char *irqclass)
ian@0 1404 {
ian@0 1405 int ret;
ian@0 1406
ian@0 1407 find_usage_bit = bit;
ian@0 1408 /* fills in <forwards_match> */
ian@0 1409 ret = find_usage_forwards(this->class, 0);
ian@0 1410 if (!ret || ret == 1)
ian@0 1411 return ret;
ian@0 1412
ian@0 1413 return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
ian@0 1414 }
ian@0 1415
ian@0 1416 /*
ian@0 1417 * Prove that in the backwards-direction subgraph starting at <this>
ian@0 1418 * there is no lock matching <mask>:
ian@0 1419 */
ian@0 1420 static int
ian@0 1421 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
ian@0 1422 enum lock_usage_bit bit, const char *irqclass)
ian@0 1423 {
ian@0 1424 int ret;
ian@0 1425
ian@0 1426 find_usage_bit = bit;
ian@0 1427 /* fills in <backwards_match> */
ian@0 1428 ret = find_usage_backwards(this->class, 0);
ian@0 1429 if (!ret || ret == 1)
ian@0 1430 return ret;
ian@0 1431
ian@0 1432 return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
ian@0 1433 }
ian@0 1434
ian@0 1435 static inline void print_irqtrace_events(struct task_struct *curr)
ian@0 1436 {
ian@0 1437 printk("irq event stamp: %u\n", curr->irq_events);
ian@0 1438 printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event);
ian@0 1439 print_ip_sym(curr->hardirq_enable_ip);
ian@0 1440 printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
ian@0 1441 print_ip_sym(curr->hardirq_disable_ip);
ian@0 1442 printk("softirqs last enabled at (%u): ", curr->softirq_enable_event);
ian@0 1443 print_ip_sym(curr->softirq_enable_ip);
ian@0 1444 printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
ian@0 1445 print_ip_sym(curr->softirq_disable_ip);
ian@0 1446 }
ian@0 1447
ian@0 1448 #else
ian@0 1449 static inline void print_irqtrace_events(struct task_struct *curr)
ian@0 1450 {
ian@0 1451 }
ian@0 1452 #endif
ian@0 1453
ian@0 1454 static int
ian@0 1455 print_usage_bug(struct task_struct *curr, struct held_lock *this,
ian@0 1456 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
ian@0 1457 {
ian@0 1458 __raw_spin_unlock(&hash_lock);
ian@0 1459 debug_locks_off();
ian@0 1460 if (debug_locks_silent)
ian@0 1461 return 0;
ian@0 1462
ian@0 1463 printk("\n=================================\n");
ian@0 1464 printk( "[ INFO: inconsistent lock state ]\n");
ian@0 1465 printk( "---------------------------------\n");
ian@0 1466
ian@0 1467 printk("inconsistent {%s} -> {%s} usage.\n",
ian@0 1468 usage_str[prev_bit], usage_str[new_bit]);
ian@0 1469
ian@0 1470 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
ian@0 1471 curr->comm, curr->pid,
ian@0 1472 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
ian@0 1473 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
ian@0 1474 trace_hardirqs_enabled(curr),
ian@0 1475 trace_softirqs_enabled(curr));
ian@0 1476 print_lock(this);
ian@0 1477
ian@0 1478 printk("{%s} state was registered at:\n", usage_str[prev_bit]);
ian@0 1479 print_stack_trace(this->class->usage_traces + prev_bit, 1);
ian@0 1480
ian@0 1481 print_irqtrace_events(curr);
ian@0 1482 printk("\nother info that might help us debug this:\n");
ian@0 1483 lockdep_print_held_locks(curr);
ian@0 1484
ian@0 1485 printk("\nstack backtrace:\n");
ian@0 1486 dump_stack();
ian@0 1487
ian@0 1488 return 0;
ian@0 1489 }
ian@0 1490
ian@0 1491 /*
ian@0 1492 * Print out an error if an invalid bit is set:
ian@0 1493 */
ian@0 1494 static inline int
ian@0 1495 valid_state(struct task_struct *curr, struct held_lock *this,
ian@0 1496 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
ian@0 1497 {
ian@0 1498 if (unlikely(this->class->usage_mask & (1 << bad_bit)))
ian@0 1499 return print_usage_bug(curr, this, bad_bit, new_bit);
ian@0 1500 return 1;
ian@0 1501 }
ian@0 1502
ian@0 1503 #define STRICT_READ_CHECKS 1
ian@0 1504
ian@0 1505 /*
ian@0 1506 * Mark a lock with a usage bit, and validate the state transition:
ian@0 1507 */
ian@0 1508 static int mark_lock(struct task_struct *curr, struct held_lock *this,
ian@0 1509 enum lock_usage_bit new_bit, unsigned long ip)
ian@0 1510 {
ian@0 1511 unsigned int new_mask = 1 << new_bit, ret = 1;
ian@0 1512
ian@0 1513 /*
ian@0 1514 * If already set then do not dirty the cacheline,
ian@0 1515 * nor do any checks:
ian@0 1516 */
ian@0 1517 if (likely(this->class->usage_mask & new_mask))
ian@0 1518 return 1;
ian@0 1519
ian@0 1520 __raw_spin_lock(&hash_lock);
ian@0 1521 /*
ian@0 1522 * Make sure we didnt race:
ian@0 1523 */
ian@0 1524 if (unlikely(this->class->usage_mask & new_mask)) {
ian@0 1525 __raw_spin_unlock(&hash_lock);
ian@0 1526 return 1;
ian@0 1527 }
ian@0 1528
ian@0 1529 this->class->usage_mask |= new_mask;
ian@0 1530
ian@0 1531 #ifdef CONFIG_TRACE_IRQFLAGS
ian@0 1532 if (new_bit == LOCK_ENABLED_HARDIRQS ||
ian@0 1533 new_bit == LOCK_ENABLED_HARDIRQS_READ)
ian@0 1534 ip = curr->hardirq_enable_ip;
ian@0 1535 else if (new_bit == LOCK_ENABLED_SOFTIRQS ||
ian@0 1536 new_bit == LOCK_ENABLED_SOFTIRQS_READ)
ian@0 1537 ip = curr->softirq_enable_ip;
ian@0 1538 #endif
ian@0 1539 if (!save_trace(this->class->usage_traces + new_bit))
ian@0 1540 return 0;
ian@0 1541
ian@0 1542 switch (new_bit) {
ian@0 1543 #ifdef CONFIG_TRACE_IRQFLAGS
ian@0 1544 case LOCK_USED_IN_HARDIRQ:
ian@0 1545 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
ian@0 1546 return 0;
ian@0 1547 if (!valid_state(curr, this, new_bit,
ian@0 1548 LOCK_ENABLED_HARDIRQS_READ))
ian@0 1549 return 0;
ian@0 1550 /*
ian@0 1551 * just marked it hardirq-safe, check that this lock
ian@0 1552 * took no hardirq-unsafe lock in the past:
ian@0 1553 */
ian@0 1554 if (!check_usage_forwards(curr, this,
ian@0 1555 LOCK_ENABLED_HARDIRQS, "hard"))
ian@0 1556 return 0;
ian@0 1557 #if STRICT_READ_CHECKS
ian@0 1558 /*
ian@0 1559 * just marked it hardirq-safe, check that this lock
ian@0 1560 * took no hardirq-unsafe-read lock in the past:
ian@0 1561 */
ian@0 1562 if (!check_usage_forwards(curr, this,
ian@0 1563 LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
ian@0 1564 return 0;
ian@0 1565 #endif
ian@0 1566 if (hardirq_verbose(this->class))
ian@0 1567 ret = 2;
ian@0 1568 break;
ian@0 1569 case LOCK_USED_IN_SOFTIRQ:
ian@0 1570 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
ian@0 1571 return 0;
ian@0 1572 if (!valid_state(curr, this, new_bit,
ian@0 1573 LOCK_ENABLED_SOFTIRQS_READ))
ian@0 1574 return 0;
ian@0 1575 /*
ian@0 1576 * just marked it softirq-safe, check that this lock
ian@0 1577 * took no softirq-unsafe lock in the past:
ian@0 1578 */
ian@0 1579 if (!check_usage_forwards(curr, this,
ian@0 1580 LOCK_ENABLED_SOFTIRQS, "soft"))
ian@0 1581 return 0;
ian@0 1582 #if STRICT_READ_CHECKS
ian@0 1583 /*
ian@0 1584 * just marked it softirq-safe, check that this lock
ian@0 1585 * took no softirq-unsafe-read lock in the past:
ian@0 1586 */
ian@0 1587 if (!check_usage_forwards(curr, this,
ian@0 1588 LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
ian@0 1589 return 0;
ian@0 1590 #endif
ian@0 1591 if (softirq_verbose(this->class))
ian@0 1592 ret = 2;
ian@0 1593 break;
ian@0 1594 case LOCK_USED_IN_HARDIRQ_READ:
ian@0 1595 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
ian@0 1596 return 0;
ian@0 1597 /*
ian@0 1598 * just marked it hardirq-read-safe, check that this lock
ian@0 1599 * took no hardirq-unsafe lock in the past:
ian@0 1600 */
ian@0 1601 if (!check_usage_forwards(curr, this,
ian@0 1602 LOCK_ENABLED_HARDIRQS, "hard"))
ian@0 1603 return 0;
ian@0 1604 if (hardirq_verbose(this->class))
ian@0 1605 ret = 2;
ian@0 1606 break;
ian@0 1607 case LOCK_USED_IN_SOFTIRQ_READ:
ian@0 1608 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
ian@0 1609 return 0;
ian@0 1610 /*
ian@0 1611 * just marked it softirq-read-safe, check that this lock
ian@0 1612 * took no softirq-unsafe lock in the past:
ian@0 1613 */
ian@0 1614 if (!check_usage_forwards(curr, this,
ian@0 1615 LOCK_ENABLED_SOFTIRQS, "soft"))
ian@0 1616 return 0;
ian@0 1617 if (softirq_verbose(this->class))
ian@0 1618 ret = 2;
ian@0 1619 break;
ian@0 1620 case LOCK_ENABLED_HARDIRQS:
ian@0 1621 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
ian@0 1622 return 0;
ian@0 1623 if (!valid_state(curr, this, new_bit,
ian@0 1624 LOCK_USED_IN_HARDIRQ_READ))
ian@0 1625 return 0;
ian@0 1626 /*
ian@0 1627 * just marked it hardirq-unsafe, check that no hardirq-safe
ian@0 1628 * lock in the system ever took it in the past:
ian@0 1629 */
ian@0 1630 if (!check_usage_backwards(curr, this,
ian@0 1631 LOCK_USED_IN_HARDIRQ, "hard"))
ian@0 1632 return 0;
ian@0 1633 #if STRICT_READ_CHECKS
ian@0 1634 /*
ian@0 1635 * just marked it hardirq-unsafe, check that no
ian@0 1636 * hardirq-safe-read lock in the system ever took
ian@0 1637 * it in the past:
ian@0 1638 */
ian@0 1639 if (!check_usage_backwards(curr, this,
ian@0 1640 LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
ian@0 1641 return 0;
ian@0 1642 #endif
ian@0 1643 if (hardirq_verbose(this->class))
ian@0 1644 ret = 2;
ian@0 1645 break;
ian@0 1646 case LOCK_ENABLED_SOFTIRQS:
ian@0 1647 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
ian@0 1648 return 0;
ian@0 1649 if (!valid_state(curr, this, new_bit,
ian@0 1650 LOCK_USED_IN_SOFTIRQ_READ))
ian@0 1651 return 0;
ian@0 1652 /*
ian@0 1653 * just marked it softirq-unsafe, check that no softirq-safe
ian@0 1654 * lock in the system ever took it in the past:
ian@0 1655 */
ian@0 1656 if (!check_usage_backwards(curr, this,
ian@0 1657 LOCK_USED_IN_SOFTIRQ, "soft"))
ian@0 1658 return 0;
ian@0 1659 #if STRICT_READ_CHECKS
ian@0 1660 /*
ian@0 1661 * just marked it softirq-unsafe, check that no
ian@0 1662 * softirq-safe-read lock in the system ever took
ian@0 1663 * it in the past:
ian@0 1664 */
ian@0 1665 if (!check_usage_backwards(curr, this,
ian@0 1666 LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
ian@0 1667 return 0;
ian@0 1668 #endif
ian@0 1669 if (softirq_verbose(this->class))
ian@0 1670 ret = 2;
ian@0 1671 break;
ian@0 1672 case LOCK_ENABLED_HARDIRQS_READ:
ian@0 1673 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
ian@0 1674 return 0;
ian@0 1675 #if STRICT_READ_CHECKS
ian@0 1676 /*
ian@0 1677 * just marked it hardirq-read-unsafe, check that no
ian@0 1678 * hardirq-safe lock in the system ever took it in the past:
ian@0 1679 */
ian@0 1680 if (!check_usage_backwards(curr, this,
ian@0 1681 LOCK_USED_IN_HARDIRQ, "hard"))
ian@0 1682 return 0;
ian@0 1683 #endif
ian@0 1684 if (hardirq_verbose(this->class))
ian@0 1685 ret = 2;
ian@0 1686 break;
ian@0 1687 case LOCK_ENABLED_SOFTIRQS_READ:
ian@0 1688 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
ian@0 1689 return 0;
ian@0 1690 #if STRICT_READ_CHECKS
ian@0 1691 /*
ian@0 1692 * just marked it softirq-read-unsafe, check that no
ian@0 1693 * softirq-safe lock in the system ever took it in the past:
ian@0 1694 */
ian@0 1695 if (!check_usage_backwards(curr, this,
ian@0 1696 LOCK_USED_IN_SOFTIRQ, "soft"))
ian@0 1697 return 0;
ian@0 1698 #endif
ian@0 1699 if (softirq_verbose(this->class))
ian@0 1700 ret = 2;
ian@0 1701 break;
ian@0 1702 #endif
ian@0 1703 case LOCK_USED:
ian@0 1704 /*
ian@0 1705 * Add it to the global list of classes:
ian@0 1706 */
ian@0 1707 list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes);
ian@0 1708 debug_atomic_dec(&nr_unused_locks);
ian@0 1709 break;
ian@0 1710 default:
ian@0 1711 debug_locks_off();
ian@0 1712 WARN_ON(1);
ian@0 1713 return 0;
ian@0 1714 }
ian@0 1715
ian@0 1716 __raw_spin_unlock(&hash_lock);
ian@0 1717
ian@0 1718 /*
ian@0 1719 * We must printk outside of the hash_lock:
ian@0 1720 */
ian@0 1721 if (ret == 2) {
ian@0 1722 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
ian@0 1723 print_lock(this);
ian@0 1724 print_irqtrace_events(curr);
ian@0 1725 dump_stack();
ian@0 1726 }
ian@0 1727
ian@0 1728 return ret;
ian@0 1729 }
ian@0 1730
ian@0 1731 #ifdef CONFIG_TRACE_IRQFLAGS
ian@0 1732 /*
ian@0 1733 * Mark all held locks with a usage bit:
ian@0 1734 */
ian@0 1735 static int
ian@0 1736 mark_held_locks(struct task_struct *curr, int hardirq, unsigned long ip)
ian@0 1737 {
ian@0 1738 enum lock_usage_bit usage_bit;
ian@0 1739 struct held_lock *hlock;
ian@0 1740 int i;
ian@0 1741
ian@0 1742 for (i = 0; i < curr->lockdep_depth; i++) {
ian@0 1743 hlock = curr->held_locks + i;
ian@0 1744
ian@0 1745 if (hardirq) {
ian@0 1746 if (hlock->read)
ian@0 1747 usage_bit = LOCK_ENABLED_HARDIRQS_READ;
ian@0 1748 else
ian@0 1749 usage_bit = LOCK_ENABLED_HARDIRQS;
ian@0 1750 } else {
ian@0 1751 if (hlock->read)
ian@0 1752 usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
ian@0 1753 else
ian@0 1754 usage_bit = LOCK_ENABLED_SOFTIRQS;
ian@0 1755 }
ian@0 1756 if (!mark_lock(curr, hlock, usage_bit, ip))
ian@0 1757 return 0;
ian@0 1758 }
ian@0 1759
ian@0 1760 return 1;
ian@0 1761 }
ian@0 1762
ian@0 1763 /*
ian@0 1764 * Debugging helper: via this flag we know that we are in
ian@0 1765 * 'early bootup code', and will warn about any invalid irqs-on event:
ian@0 1766 */
ian@0 1767 static int early_boot_irqs_enabled;
ian@0 1768
ian@0 1769 void early_boot_irqs_off(void)
ian@0 1770 {
ian@0 1771 early_boot_irqs_enabled = 0;
ian@0 1772 }
ian@0 1773
ian@0 1774 void early_boot_irqs_on(void)
ian@0 1775 {
ian@0 1776 early_boot_irqs_enabled = 1;
ian@0 1777 }
ian@0 1778
ian@0 1779 /*
ian@0 1780 * Hardirqs will be enabled:
ian@0 1781 */
ian@0 1782 void trace_hardirqs_on(void)
ian@0 1783 {
ian@0 1784 struct task_struct *curr = current;
ian@0 1785 unsigned long ip;
ian@0 1786
ian@0 1787 if (unlikely(!debug_locks || current->lockdep_recursion))
ian@0 1788 return;
ian@0 1789
ian@0 1790 if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
ian@0 1791 return;
ian@0 1792
ian@0 1793 if (unlikely(curr->hardirqs_enabled)) {
ian@0 1794 debug_atomic_inc(&redundant_hardirqs_on);
ian@0 1795 return;
ian@0 1796 }
ian@0 1797 /* we'll do an OFF -> ON transition: */
ian@0 1798 curr->hardirqs_enabled = 1;
ian@0 1799 ip = (unsigned long) __builtin_return_address(0);
ian@0 1800
ian@0 1801 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
ian@0 1802 return;
ian@0 1803 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
ian@0 1804 return;
ian@0 1805 /*
ian@0 1806 * We are going to turn hardirqs on, so set the
ian@0 1807 * usage bit for all held locks:
ian@0 1808 */
ian@0 1809 if (!mark_held_locks(curr, 1, ip))
ian@0 1810 return;
ian@0 1811 /*
ian@0 1812 * If we have softirqs enabled, then set the usage
ian@0 1813 * bit for all held locks. (disabled hardirqs prevented
ian@0 1814 * this bit from being set before)
ian@0 1815 */
ian@0 1816 if (curr->softirqs_enabled)
ian@0 1817 if (!mark_held_locks(curr, 0, ip))
ian@0 1818 return;
ian@0 1819
ian@0 1820 curr->hardirq_enable_ip = ip;
ian@0 1821 curr->hardirq_enable_event = ++curr->irq_events;
ian@0 1822 debug_atomic_inc(&hardirqs_on_events);
ian@0 1823 }
ian@0 1824
ian@0 1825 EXPORT_SYMBOL(trace_hardirqs_on);
ian@0 1826
ian@0 1827 /*
ian@0 1828 * Hardirqs were disabled:
ian@0 1829 */
ian@0 1830 void trace_hardirqs_off(void)
ian@0 1831 {
ian@0 1832 struct task_struct *curr = current;
ian@0 1833
ian@0 1834 if (unlikely(!debug_locks || current->lockdep_recursion))
ian@0 1835 return;
ian@0 1836
ian@0 1837 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
ian@0 1838 return;
ian@0 1839
ian@0 1840 if (curr->hardirqs_enabled) {
ian@0 1841 /*
ian@0 1842 * We have done an ON -> OFF transition:
ian@0 1843 */
ian@0 1844 curr->hardirqs_enabled = 0;
ian@0 1845 curr->hardirq_disable_ip = _RET_IP_;
ian@0 1846 curr->hardirq_disable_event = ++curr->irq_events;
ian@0 1847 debug_atomic_inc(&hardirqs_off_events);
ian@0 1848 } else
ian@0 1849 debug_atomic_inc(&redundant_hardirqs_off);
ian@0 1850 }
ian@0 1851
ian@0 1852 EXPORT_SYMBOL(trace_hardirqs_off);
ian@0 1853
ian@0 1854 /*
ian@0 1855 * Softirqs will be enabled:
ian@0 1856 */
ian@0 1857 void trace_softirqs_on(unsigned long ip)
ian@0 1858 {
ian@0 1859 struct task_struct *curr = current;
ian@0 1860
ian@0 1861 if (unlikely(!debug_locks))
ian@0 1862 return;
ian@0 1863
ian@0 1864 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
ian@0 1865 return;
ian@0 1866
ian@0 1867 if (curr->softirqs_enabled) {
ian@0 1868 debug_atomic_inc(&redundant_softirqs_on);
ian@0 1869 return;
ian@0 1870 }
ian@0 1871
ian@0 1872 /*
ian@0 1873 * We'll do an OFF -> ON transition:
ian@0 1874 */
ian@0 1875 curr->softirqs_enabled = 1;
ian@0 1876 curr->softirq_enable_ip = ip;
ian@0 1877 curr->softirq_enable_event = ++curr->irq_events;
ian@0 1878 debug_atomic_inc(&softirqs_on_events);
ian@0 1879 /*
ian@0 1880 * We are going to turn softirqs on, so set the
ian@0 1881 * usage bit for all held locks, if hardirqs are
ian@0 1882 * enabled too:
ian@0 1883 */
ian@0 1884 if (curr->hardirqs_enabled)
ian@0 1885 mark_held_locks(curr, 0, ip);
ian@0 1886 }
ian@0 1887
ian@0 1888 /*
ian@0 1889 * Softirqs were disabled:
ian@0 1890 */
ian@0 1891 void trace_softirqs_off(unsigned long ip)
ian@0 1892 {
ian@0 1893 struct task_struct *curr = current;
ian@0 1894
ian@0 1895 if (unlikely(!debug_locks))
ian@0 1896 return;
ian@0 1897
ian@0 1898 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
ian@0 1899 return;
ian@0 1900
ian@0 1901 if (curr->softirqs_enabled) {
ian@0 1902 /*
ian@0 1903 * We have done an ON -> OFF transition:
ian@0 1904 */
ian@0 1905 curr->softirqs_enabled = 0;
ian@0 1906 curr->softirq_disable_ip = ip;
ian@0 1907 curr->softirq_disable_event = ++curr->irq_events;
ian@0 1908 debug_atomic_inc(&softirqs_off_events);
ian@0 1909 DEBUG_LOCKS_WARN_ON(!softirq_count());
ian@0 1910 } else
ian@0 1911 debug_atomic_inc(&redundant_softirqs_off);
ian@0 1912 }
ian@0 1913
ian@0 1914 #endif
ian@0 1915
ian@0 1916 /*
ian@0 1917 * Initialize a lock instance's lock-class mapping info:
ian@0 1918 */
ian@0 1919 void lockdep_init_map(struct lockdep_map *lock, const char *name,
ian@0 1920 struct lock_class_key *key)
ian@0 1921 {
ian@0 1922 if (unlikely(!debug_locks))
ian@0 1923 return;
ian@0 1924
ian@0 1925 if (DEBUG_LOCKS_WARN_ON(!key))
ian@0 1926 return;
ian@0 1927 if (DEBUG_LOCKS_WARN_ON(!name))
ian@0 1928 return;
ian@0 1929 /*
ian@0 1930 * Sanity check, the lock-class key must be persistent:
ian@0 1931 */
ian@0 1932 if (!static_obj(key)) {
ian@0 1933 printk("BUG: key %p not in .data!\n", key);
ian@0 1934 DEBUG_LOCKS_WARN_ON(1);
ian@0 1935 return;
ian@0 1936 }
ian@0 1937 lock->name = name;
ian@0 1938 lock->key = key;
ian@0 1939 lock->class_cache = NULL;
ian@0 1940 }
ian@0 1941
ian@0 1942 EXPORT_SYMBOL_GPL(lockdep_init_map);
ian@0 1943
ian@0 1944 /*
ian@0 1945 * This gets called for every mutex_lock*()/spin_lock*() operation.
ian@0 1946 * We maintain the dependency maps and validate the locking attempt:
ian@0 1947 */
ian@0 1948 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
ian@0 1949 int trylock, int read, int check, int hardirqs_off,
ian@0 1950 unsigned long ip)
ian@0 1951 {
ian@0 1952 struct task_struct *curr = current;
ian@0 1953 struct lock_class *class = NULL;
ian@0 1954 struct held_lock *hlock;
ian@0 1955 unsigned int depth, id;
ian@0 1956 int chain_head = 0;
ian@0 1957 u64 chain_key;
ian@0 1958
ian@0 1959 if (unlikely(!debug_locks))
ian@0 1960 return 0;
ian@0 1961
ian@0 1962 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
ian@0 1963 return 0;
ian@0 1964
ian@0 1965 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
ian@0 1966 debug_locks_off();
ian@0 1967 printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
ian@0 1968 printk("turning off the locking correctness validator.\n");
ian@0 1969 return 0;
ian@0 1970 }
ian@0 1971
ian@0 1972 if (!subclass)
ian@0 1973 class = lock->class_cache;
ian@0 1974 /*
ian@0 1975 * Not cached yet or subclass?
ian@0 1976 */
ian@0 1977 if (unlikely(!class)) {
ian@0 1978 class = register_lock_class(lock, subclass);
ian@0 1979 if (!class)
ian@0 1980 return 0;
ian@0 1981 }
ian@0 1982 debug_atomic_inc((atomic_t *)&class->ops);
ian@0 1983 if (very_verbose(class)) {
ian@0 1984 printk("\nacquire class [%p] %s", class->key, class->name);
ian@0 1985 if (class->name_version > 1)
ian@0 1986 printk("#%d", class->name_version);
ian@0 1987 printk("\n");
ian@0 1988 dump_stack();
ian@0 1989 }
ian@0 1990
ian@0 1991 /*
ian@0 1992 * Add the lock to the list of currently held locks.
ian@0 1993 * (we dont increase the depth just yet, up until the
ian@0 1994 * dependency checks are done)
ian@0 1995 */
ian@0 1996 depth = curr->lockdep_depth;
ian@0 1997 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
ian@0 1998 return 0;
ian@0 1999
ian@0 2000 hlock = curr->held_locks + depth;
ian@0 2001
ian@0 2002 hlock->class = class;
ian@0 2003 hlock->acquire_ip = ip;
ian@0 2004 hlock->instance = lock;
ian@0 2005 hlock->trylock = trylock;
ian@0 2006 hlock->read = read;
ian@0 2007 hlock->check = check;
ian@0 2008 hlock->hardirqs_off = hardirqs_off;
ian@0 2009
ian@0 2010 if (check != 2)
ian@0 2011 goto out_calc_hash;
ian@0 2012 #ifdef CONFIG_TRACE_IRQFLAGS
ian@0 2013 /*
ian@0 2014 * If non-trylock use in a hardirq or softirq context, then
ian@0 2015 * mark the lock as used in these contexts:
ian@0 2016 */
ian@0 2017 if (!trylock) {
ian@0 2018 if (read) {
ian@0 2019 if (curr->hardirq_context)
ian@0 2020 if (!mark_lock(curr, hlock,
ian@0 2021 LOCK_USED_IN_HARDIRQ_READ, ip))
ian@0 2022 return 0;
ian@0 2023 if (curr->softirq_context)
ian@0 2024 if (!mark_lock(curr, hlock,
ian@0 2025 LOCK_USED_IN_SOFTIRQ_READ, ip))
ian@0 2026 return 0;
ian@0 2027 } else {
ian@0 2028 if (curr->hardirq_context)
ian@0 2029 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ, ip))
ian@0 2030 return 0;
ian@0 2031 if (curr->softirq_context)
ian@0 2032 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ, ip))
ian@0 2033 return 0;
ian@0 2034 }
ian@0 2035 }
ian@0 2036 if (!hardirqs_off) {
ian@0 2037 if (read) {
ian@0 2038 if (!mark_lock(curr, hlock,
ian@0 2039 LOCK_ENABLED_HARDIRQS_READ, ip))
ian@0 2040 return 0;
ian@0 2041 if (curr->softirqs_enabled)
ian@0 2042 if (!mark_lock(curr, hlock,
ian@0 2043 LOCK_ENABLED_SOFTIRQS_READ, ip))
ian@0 2044 return 0;
ian@0 2045 } else {
ian@0 2046 if (!mark_lock(curr, hlock,
ian@0 2047 LOCK_ENABLED_HARDIRQS, ip))
ian@0 2048 return 0;
ian@0 2049 if (curr->softirqs_enabled)
ian@0 2050 if (!mark_lock(curr, hlock,
ian@0 2051 LOCK_ENABLED_SOFTIRQS, ip))
ian@0 2052 return 0;
ian@0 2053 }
ian@0 2054 }
ian@0 2055 #endif
ian@0 2056 /* mark it as used: */
ian@0 2057 if (!mark_lock(curr, hlock, LOCK_USED, ip))
ian@0 2058 return 0;
ian@0 2059 out_calc_hash:
ian@0 2060 /*
ian@0 2061 * Calculate the chain hash: it's the combined has of all the
ian@0 2062 * lock keys along the dependency chain. We save the hash value
ian@0 2063 * at every step so that we can get the current hash easily
ian@0 2064 * after unlock. The chain hash is then used to cache dependency
ian@0 2065 * results.
ian@0 2066 *
ian@0 2067 * The 'key ID' is what is the most compact key value to drive
ian@0 2068 * the hash, not class->key.
ian@0 2069 */
ian@0 2070 id = class - lock_classes;
ian@0 2071 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
ian@0 2072 return 0;
ian@0 2073
ian@0 2074 chain_key = curr->curr_chain_key;
ian@0 2075 if (!depth) {
ian@0 2076 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
ian@0 2077 return 0;
ian@0 2078 chain_head = 1;
ian@0 2079 }
ian@0 2080
ian@0 2081 hlock->prev_chain_key = chain_key;
ian@0 2082
ian@0 2083 #ifdef CONFIG_TRACE_IRQFLAGS
ian@0 2084 /*
ian@0 2085 * Keep track of points where we cross into an interrupt context:
ian@0 2086 */
ian@0 2087 hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
ian@0 2088 curr->softirq_context;
ian@0 2089 if (depth) {
ian@0 2090 struct held_lock *prev_hlock;
ian@0 2091
ian@0 2092 prev_hlock = curr->held_locks + depth-1;
ian@0 2093 /*
ian@0 2094 * If we cross into another context, reset the
ian@0 2095 * hash key (this also prevents the checking and the
ian@0 2096 * adding of the dependency to 'prev'):
ian@0 2097 */
ian@0 2098 if (prev_hlock->irq_context != hlock->irq_context) {
ian@0 2099 chain_key = 0;
ian@0 2100 chain_head = 1;
ian@0 2101 }
ian@0 2102 }
ian@0 2103 #endif
ian@0 2104 chain_key = iterate_chain_key(chain_key, id);
ian@0 2105 curr->curr_chain_key = chain_key;
ian@0 2106
ian@0 2107 /*
ian@0 2108 * Trylock needs to maintain the stack of held locks, but it
ian@0 2109 * does not add new dependencies, because trylock can be done
ian@0 2110 * in any order.
ian@0 2111 *
ian@0 2112 * We look up the chain_key and do the O(N^2) check and update of
ian@0 2113 * the dependencies only if this is a new dependency chain.
ian@0 2114 * (If lookup_chain_cache() returns with 1 it acquires
ian@0 2115 * hash_lock for us)
ian@0 2116 */
ian@0 2117 if (!trylock && (check == 2) && lookup_chain_cache(chain_key)) {
ian@0 2118 /*
ian@0 2119 * Check whether last held lock:
ian@0 2120 *
ian@0 2121 * - is irq-safe, if this lock is irq-unsafe
ian@0 2122 * - is softirq-safe, if this lock is hardirq-unsafe
ian@0 2123 *
ian@0 2124 * And check whether the new lock's dependency graph
ian@0 2125 * could lead back to the previous lock.
ian@0 2126 *
ian@0 2127 * any of these scenarios could lead to a deadlock. If
ian@0 2128 * All validations
ian@0 2129 */
ian@0 2130 int ret = check_deadlock(curr, hlock, lock, read);
ian@0 2131
ian@0 2132 if (!ret)
ian@0 2133 return 0;
ian@0 2134 /*
ian@0 2135 * Mark recursive read, as we jump over it when
ian@0 2136 * building dependencies (just like we jump over
ian@0 2137 * trylock entries):
ian@0 2138 */
ian@0 2139 if (ret == 2)
ian@0 2140 hlock->read = 2;
ian@0 2141 /*
ian@0 2142 * Add dependency only if this lock is not the head
ian@0 2143 * of the chain, and if it's not a secondary read-lock:
ian@0 2144 */
ian@0 2145 if (!chain_head && ret != 2)
ian@0 2146 if (!check_prevs_add(curr, hlock))
ian@0 2147 return 0;
ian@0 2148 __raw_spin_unlock(&hash_lock);
ian@0 2149 }
ian@0 2150 curr->lockdep_depth++;
ian@0 2151 check_chain_key(curr);
ian@0 2152 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
ian@0 2153 debug_locks_off();
ian@0 2154 printk("BUG: MAX_LOCK_DEPTH too low!\n");
ian@0 2155 printk("turning off the locking correctness validator.\n");
ian@0 2156 return 0;
ian@0 2157 }
ian@0 2158 if (unlikely(curr->lockdep_depth > max_lockdep_depth))
ian@0 2159 max_lockdep_depth = curr->lockdep_depth;
ian@0 2160
ian@0 2161 return 1;
ian@0 2162 }
ian@0 2163
ian@0 2164 static int
ian@0 2165 print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
ian@0 2166 unsigned long ip)
ian@0 2167 {
ian@0 2168 if (!debug_locks_off())
ian@0 2169 return 0;
ian@0 2170 if (debug_locks_silent)
ian@0 2171 return 0;
ian@0 2172
ian@0 2173 printk("\n=====================================\n");
ian@0 2174 printk( "[ BUG: bad unlock balance detected! ]\n");
ian@0 2175 printk( "-------------------------------------\n");
ian@0 2176 printk("%s/%d is trying to release lock (",
ian@0 2177 curr->comm, curr->pid);
ian@0 2178 print_lockdep_cache(lock);
ian@0 2179 printk(") at:\n");
ian@0 2180 print_ip_sym(ip);
ian@0 2181 printk("but there are no more locks to release!\n");
ian@0 2182 printk("\nother info that might help us debug this:\n");
ian@0 2183 lockdep_print_held_locks(curr);
ian@0 2184
ian@0 2185 printk("\nstack backtrace:\n");
ian@0 2186 dump_stack();
ian@0 2187
ian@0 2188 return 0;
ian@0 2189 }
ian@0 2190
ian@0 2191 /*
ian@0 2192 * Common debugging checks for both nested and non-nested unlock:
ian@0 2193 */
ian@0 2194 static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
ian@0 2195 unsigned long ip)
ian@0 2196 {
ian@0 2197 if (unlikely(!debug_locks))
ian@0 2198 return 0;
ian@0 2199 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
ian@0 2200 return 0;
ian@0 2201
ian@0 2202 if (curr->lockdep_depth <= 0)
ian@0 2203 return print_unlock_inbalance_bug(curr, lock, ip);
ian@0 2204
ian@0 2205 return 1;
ian@0 2206 }
ian@0 2207
ian@0 2208 /*
ian@0 2209 * Remove the lock to the list of currently held locks in a
ian@0 2210 * potentially non-nested (out of order) manner. This is a
ian@0 2211 * relatively rare operation, as all the unlock APIs default
ian@0 2212 * to nested mode (which uses lock_release()):
ian@0 2213 */
ian@0 2214 static int
ian@0 2215 lock_release_non_nested(struct task_struct *curr,
ian@0 2216 struct lockdep_map *lock, unsigned long ip)
ian@0 2217 {
ian@0 2218 struct held_lock *hlock, *prev_hlock;
ian@0 2219 unsigned int depth;
ian@0 2220 int i;
ian@0 2221
ian@0 2222 /*
ian@0 2223 * Check whether the lock exists in the current stack
ian@0 2224 * of held locks:
ian@0 2225 */
ian@0 2226 depth = curr->lockdep_depth;
ian@0 2227 if (DEBUG_LOCKS_WARN_ON(!depth))
ian@0 2228 return 0;
ian@0 2229
ian@0 2230 prev_hlock = NULL;
ian@0 2231 for (i = depth-1; i >= 0; i--) {
ian@0 2232 hlock = curr->held_locks + i;
ian@0 2233 /*
ian@0 2234 * We must not cross into another context:
ian@0 2235 */
ian@0 2236 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
ian@0 2237 break;
ian@0 2238 if (hlock->instance == lock)
ian@0 2239 goto found_it;
ian@0 2240 prev_hlock = hlock;
ian@0 2241 }
ian@0 2242 return print_unlock_inbalance_bug(curr, lock, ip);
ian@0 2243
ian@0 2244 found_it:
ian@0 2245 /*
ian@0 2246 * We have the right lock to unlock, 'hlock' points to it.
ian@0 2247 * Now we remove it from the stack, and add back the other
ian@0 2248 * entries (if any), recalculating the hash along the way:
ian@0 2249 */
ian@0 2250 curr->lockdep_depth = i;
ian@0 2251 curr->curr_chain_key = hlock->prev_chain_key;
ian@0 2252
ian@0 2253 for (i++; i < depth; i++) {
ian@0 2254 hlock = curr->held_locks + i;
ian@0 2255 if (!__lock_acquire(hlock->instance,
ian@0 2256 hlock->class->subclass, hlock->trylock,
ian@0 2257 hlock->read, hlock->check, hlock->hardirqs_off,
ian@0 2258 hlock->acquire_ip))
ian@0 2259 return 0;
ian@0 2260 }
ian@0 2261
ian@0 2262 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
ian@0 2263 return 0;
ian@0 2264 return 1;
ian@0 2265 }
ian@0 2266
ian@0 2267 /*
ian@0 2268 * Remove the lock to the list of currently held locks - this gets
ian@0 2269 * called on mutex_unlock()/spin_unlock*() (or on a failed
ian@0 2270 * mutex_lock_interruptible()). This is done for unlocks that nest
ian@0 2271 * perfectly. (i.e. the current top of the lock-stack is unlocked)
ian@0 2272 */
ian@0 2273 static int lock_release_nested(struct task_struct *curr,
ian@0 2274 struct lockdep_map *lock, unsigned long ip)
ian@0 2275 {
ian@0 2276 struct held_lock *hlock;
ian@0 2277 unsigned int depth;
ian@0 2278
ian@0 2279 /*
ian@0 2280 * Pop off the top of the lock stack:
ian@0 2281 */
ian@0 2282 depth = curr->lockdep_depth - 1;
ian@0 2283 hlock = curr->held_locks + depth;
ian@0 2284
ian@0 2285 /*
ian@0 2286 * Is the unlock non-nested:
ian@0 2287 */
ian@0 2288 if (hlock->instance != lock)
ian@0 2289 return lock_release_non_nested(curr, lock, ip);
ian@0 2290 curr->lockdep_depth--;
ian@0 2291
ian@0 2292 if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
ian@0 2293 return 0;
ian@0 2294
ian@0 2295 curr->curr_chain_key = hlock->prev_chain_key;
ian@0 2296
ian@0 2297 #ifdef CONFIG_DEBUG_LOCKDEP
ian@0 2298 hlock->prev_chain_key = 0;
ian@0 2299 hlock->class = NULL;
ian@0 2300 hlock->acquire_ip = 0;
ian@0 2301 hlock->irq_context = 0;
ian@0 2302 #endif
ian@0 2303 return 1;
ian@0 2304 }
ian@0 2305
ian@0 2306 /*
ian@0 2307 * Remove the lock to the list of currently held locks - this gets
ian@0 2308 * called on mutex_unlock()/spin_unlock*() (or on a failed
ian@0 2309 * mutex_lock_interruptible()). This is done for unlocks that nest
ian@0 2310 * perfectly. (i.e. the current top of the lock-stack is unlocked)
ian@0 2311 */
ian@0 2312 static void
ian@0 2313 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
ian@0 2314 {
ian@0 2315 struct task_struct *curr = current;
ian@0 2316
ian@0 2317 if (!check_unlock(curr, lock, ip))
ian@0 2318 return;
ian@0 2319
ian@0 2320 if (nested) {
ian@0 2321 if (!lock_release_nested(curr, lock, ip))
ian@0 2322 return;
ian@0 2323 } else {
ian@0 2324 if (!lock_release_non_nested(curr, lock, ip))
ian@0 2325 return;
ian@0 2326 }
ian@0 2327
ian@0 2328 check_chain_key(curr);
ian@0 2329 }
ian@0 2330
ian@0 2331 /*
ian@0 2332 * Check whether we follow the irq-flags state precisely:
ian@0 2333 */
ian@0 2334 static void check_flags(unsigned long flags)
ian@0 2335 {
ian@0 2336 #if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS)
ian@0 2337 if (!debug_locks)
ian@0 2338 return;
ian@0 2339
ian@0 2340 if (irqs_disabled_flags(flags))
ian@0 2341 DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled);
ian@0 2342 else
ian@0 2343 DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled);
ian@0 2344
ian@0 2345 /*
ian@0 2346 * We dont accurately track softirq state in e.g.
ian@0 2347 * hardirq contexts (such as on 4KSTACKS), so only
ian@0 2348 * check if not in hardirq contexts:
ian@0 2349 */
ian@0 2350 if (!hardirq_count()) {
ian@0 2351 if (softirq_count())
ian@0 2352 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
ian@0 2353 else
ian@0 2354 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
ian@0 2355 }
ian@0 2356
ian@0 2357 if (!debug_locks)
ian@0 2358 print_irqtrace_events(current);
ian@0 2359 #endif
ian@0 2360 }
ian@0 2361
ian@0 2362 /*
ian@0 2363 * We are not always called with irqs disabled - do that here,
ian@0 2364 * and also avoid lockdep recursion:
ian@0 2365 */
ian@0 2366 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
ian@0 2367 int trylock, int read, int check, unsigned long ip)
ian@0 2368 {
ian@0 2369 unsigned long flags;
ian@0 2370
ian@0 2371 if (unlikely(current->lockdep_recursion))
ian@0 2372 return;
ian@0 2373
ian@0 2374 raw_local_irq_save(flags);
ian@0 2375 check_flags(flags);
ian@0 2376
ian@0 2377 current->lockdep_recursion = 1;
ian@0 2378 __lock_acquire(lock, subclass, trylock, read, check,
ian@0 2379 irqs_disabled_flags(flags), ip);
ian@0 2380 current->lockdep_recursion = 0;
ian@0 2381 raw_local_irq_restore(flags);
ian@0 2382 }
ian@0 2383
ian@0 2384 EXPORT_SYMBOL_GPL(lock_acquire);
ian@0 2385
ian@0 2386 void lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
ian@0 2387 {
ian@0 2388 unsigned long flags;
ian@0 2389
ian@0 2390 if (unlikely(current->lockdep_recursion))
ian@0 2391 return;
ian@0 2392
ian@0 2393 raw_local_irq_save(flags);
ian@0 2394 check_flags(flags);
ian@0 2395 current->lockdep_recursion = 1;
ian@0 2396 __lock_release(lock, nested, ip);
ian@0 2397 current->lockdep_recursion = 0;
ian@0 2398 raw_local_irq_restore(flags);
ian@0 2399 }
ian@0 2400
ian@0 2401 EXPORT_SYMBOL_GPL(lock_release);
ian@0 2402
ian@0 2403 /*
ian@0 2404 * Used by the testsuite, sanitize the validator state
ian@0 2405 * after a simulated failure:
ian@0 2406 */
ian@0 2407
ian@0 2408 void lockdep_reset(void)
ian@0 2409 {
ian@0 2410 unsigned long flags;
ian@0 2411
ian@0 2412 raw_local_irq_save(flags);
ian@0 2413 current->curr_chain_key = 0;
ian@0 2414 current->lockdep_depth = 0;
ian@0 2415 current->lockdep_recursion = 0;
ian@0 2416 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
ian@0 2417 nr_hardirq_chains = 0;
ian@0 2418 nr_softirq_chains = 0;
ian@0 2419 nr_process_chains = 0;
ian@0 2420 debug_locks = 1;
ian@0 2421 raw_local_irq_restore(flags);
ian@0 2422 }
ian@0 2423
ian@0 2424 static void zap_class(struct lock_class *class)
ian@0 2425 {
ian@0 2426 int i;
ian@0 2427
ian@0 2428 /*
ian@0 2429 * Remove all dependencies this lock is
ian@0 2430 * involved in:
ian@0 2431 */
ian@0 2432 for (i = 0; i < nr_list_entries; i++) {
ian@0 2433 if (list_entries[i].class == class)
ian@0 2434 list_del_rcu(&list_entries[i].entry);
ian@0 2435 }
ian@0 2436 /*
ian@0 2437 * Unhash the class and remove it from the all_lock_classes list:
ian@0 2438 */
ian@0 2439 list_del_rcu(&class->hash_entry);
ian@0 2440 list_del_rcu(&class->lock_entry);
ian@0 2441
ian@0 2442 }
ian@0 2443
ian@0 2444 static inline int within(void *addr, void *start, unsigned long size)
ian@0 2445 {
ian@0 2446 return addr >= start && addr < start + size;
ian@0 2447 }
ian@0 2448
ian@0 2449 void lockdep_free_key_range(void *start, unsigned long size)
ian@0 2450 {
ian@0 2451 struct lock_class *class, *next;
ian@0 2452 struct list_head *head;
ian@0 2453 unsigned long flags;
ian@0 2454 int i;
ian@0 2455
ian@0 2456 raw_local_irq_save(flags);
ian@0 2457 __raw_spin_lock(&hash_lock);
ian@0 2458
ian@0 2459 /*
ian@0 2460 * Unhash all classes that were created by this module:
ian@0 2461 */
ian@0 2462 for (i = 0; i < CLASSHASH_SIZE; i++) {
ian@0 2463 head = classhash_table + i;
ian@0 2464 if (list_empty(head))
ian@0 2465 continue;
ian@0 2466 list_for_each_entry_safe(class, next, head, hash_entry)
ian@0 2467 if (within(class->key, start, size))
ian@0 2468 zap_class(class);
ian@0 2469 }
ian@0 2470
ian@0 2471 __raw_spin_unlock(&hash_lock);
ian@0 2472 raw_local_irq_restore(flags);
ian@0 2473 }
ian@0 2474
ian@0 2475 void lockdep_reset_lock(struct lockdep_map *lock)
ian@0 2476 {
ian@0 2477 struct lock_class *class, *next;
ian@0 2478 struct list_head *head;
ian@0 2479 unsigned long flags;
ian@0 2480 int i, j;
ian@0 2481
ian@0 2482 raw_local_irq_save(flags);
ian@0 2483
ian@0 2484 /*
ian@0 2485 * Remove all classes this lock might have:
ian@0 2486 */
ian@0 2487 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
ian@0 2488 /*
ian@0 2489 * If the class exists we look it up and zap it:
ian@0 2490 */
ian@0 2491 class = look_up_lock_class(lock, j);
ian@0 2492 if (class)
ian@0 2493 zap_class(class);
ian@0 2494 }
ian@0 2495 /*
ian@0 2496 * Debug check: in the end all mapped classes should
ian@0 2497 * be gone.
ian@0 2498 */
ian@0 2499 __raw_spin_lock(&hash_lock);
ian@0 2500 for (i = 0; i < CLASSHASH_SIZE; i++) {
ian@0 2501 head = classhash_table + i;
ian@0 2502 if (list_empty(head))
ian@0 2503 continue;
ian@0 2504 list_for_each_entry_safe(class, next, head, hash_entry) {
ian@0 2505 if (unlikely(class == lock->class_cache)) {
ian@0 2506 __raw_spin_unlock(&hash_lock);
ian@0 2507 DEBUG_LOCKS_WARN_ON(1);
ian@0 2508 goto out_restore;
ian@0 2509 }
ian@0 2510 }
ian@0 2511 }
ian@0 2512 __raw_spin_unlock(&hash_lock);
ian@0 2513
ian@0 2514 out_restore:
ian@0 2515 raw_local_irq_restore(flags);
ian@0 2516 }
ian@0 2517
ian@0 2518 void __init lockdep_init(void)
ian@0 2519 {
ian@0 2520 int i;
ian@0 2521
ian@0 2522 /*
ian@0 2523 * Some architectures have their own start_kernel()
ian@0 2524 * code which calls lockdep_init(), while we also
ian@0 2525 * call lockdep_init() from the start_kernel() itself,
ian@0 2526 * and we want to initialize the hashes only once:
ian@0 2527 */
ian@0 2528 if (lockdep_initialized)
ian@0 2529 return;
ian@0 2530
ian@0 2531 for (i = 0; i < CLASSHASH_SIZE; i++)
ian@0 2532 INIT_LIST_HEAD(classhash_table + i);
ian@0 2533
ian@0 2534 for (i = 0; i < CHAINHASH_SIZE; i++)
ian@0 2535 INIT_LIST_HEAD(chainhash_table + i);
ian@0 2536
ian@0 2537 lockdep_initialized = 1;
ian@0 2538 }
ian@0 2539
ian@0 2540 void __init lockdep_info(void)
ian@0 2541 {
ian@0 2542 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
ian@0 2543
ian@0 2544 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
ian@0 2545 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
ian@0 2546 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
ian@0 2547 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
ian@0 2548 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
ian@0 2549 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
ian@0 2550 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
ian@0 2551
ian@0 2552 printk(" memory used by lock dependency info: %lu kB\n",
ian@0 2553 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
ian@0 2554 sizeof(struct list_head) * CLASSHASH_SIZE +
ian@0 2555 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
ian@0 2556 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
ian@0 2557 sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
ian@0 2558
ian@0 2559 printk(" per task-struct memory footprint: %lu bytes\n",
ian@0 2560 sizeof(struct held_lock) * MAX_LOCK_DEPTH);
ian@0 2561
ian@0 2562 #ifdef CONFIG_DEBUG_LOCKDEP
ian@0 2563 if (lockdep_init_error)
ian@0 2564 printk("WARNING: lockdep init error! Arch code didnt call lockdep_init() early enough?\n");
ian@0 2565 #endif
ian@0 2566 }
ian@0 2567
ian@0 2568 static inline int in_range(const void *start, const void *addr, const void *end)
ian@0 2569 {
ian@0 2570 return addr >= start && addr <= end;
ian@0 2571 }
ian@0 2572
ian@0 2573 static void
ian@0 2574 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
ian@0 2575 const void *mem_to, struct held_lock *hlock)
ian@0 2576 {
ian@0 2577 if (!debug_locks_off())
ian@0 2578 return;
ian@0 2579 if (debug_locks_silent)
ian@0 2580 return;
ian@0 2581
ian@0 2582 printk("\n=========================\n");
ian@0 2583 printk( "[ BUG: held lock freed! ]\n");
ian@0 2584 printk( "-------------------------\n");
ian@0 2585 printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
ian@0 2586 curr->comm, curr->pid, mem_from, mem_to-1);
ian@0 2587 print_lock(hlock);
ian@0 2588 lockdep_print_held_locks(curr);
ian@0 2589
ian@0 2590 printk("\nstack backtrace:\n");
ian@0 2591 dump_stack();
ian@0 2592 }
ian@0 2593
ian@0 2594 /*
ian@0 2595 * Called when kernel memory is freed (or unmapped), or if a lock
ian@0 2596 * is destroyed or reinitialized - this code checks whether there is
ian@0 2597 * any held lock in the memory range of <from> to <to>:
ian@0 2598 */
ian@0 2599 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
ian@0 2600 {
ian@0 2601 const void *mem_to = mem_from + mem_len, *lock_from, *lock_to;
ian@0 2602 struct task_struct *curr = current;
ian@0 2603 struct held_lock *hlock;
ian@0 2604 unsigned long flags;
ian@0 2605 int i;
ian@0 2606
ian@0 2607 if (unlikely(!debug_locks))
ian@0 2608 return;
ian@0 2609
ian@0 2610 local_irq_save(flags);
ian@0 2611 for (i = 0; i < curr->lockdep_depth; i++) {
ian@0 2612 hlock = curr->held_locks + i;
ian@0 2613
ian@0 2614 lock_from = (void *)hlock->instance;
ian@0 2615 lock_to = (void *)(hlock->instance + 1);
ian@0 2616
ian@0 2617 if (!in_range(mem_from, lock_from, mem_to) &&
ian@0 2618 !in_range(mem_from, lock_to, mem_to))
ian@0 2619 continue;
ian@0 2620
ian@0 2621 print_freed_lock_bug(curr, mem_from, mem_to, hlock);
ian@0 2622 break;
ian@0 2623 }
ian@0 2624 local_irq_restore(flags);
ian@0 2625 }
ian@0 2626
ian@0 2627 static void print_held_locks_bug(struct task_struct *curr)
ian@0 2628 {
ian@0 2629 if (!debug_locks_off())
ian@0 2630 return;
ian@0 2631 if (debug_locks_silent)
ian@0 2632 return;
ian@0 2633
ian@0 2634 printk("\n=====================================\n");
ian@0 2635 printk( "[ BUG: lock held at task exit time! ]\n");
ian@0 2636 printk( "-------------------------------------\n");
ian@0 2637 printk("%s/%d is exiting with locks still held!\n",
ian@0 2638 curr->comm, curr->pid);
ian@0 2639 lockdep_print_held_locks(curr);
ian@0 2640
ian@0 2641 printk("\nstack backtrace:\n");
ian@0 2642 dump_stack();
ian@0 2643 }
ian@0 2644
ian@0 2645 void debug_check_no_locks_held(struct task_struct *task)
ian@0 2646 {
ian@0 2647 if (unlikely(task->lockdep_depth > 0))
ian@0 2648 print_held_locks_bug(task);
ian@0 2649 }
ian@0 2650
ian@0 2651 void debug_show_all_locks(void)
ian@0 2652 {
ian@0 2653 struct task_struct *g, *p;
ian@0 2654 int count = 10;
ian@0 2655 int unlock = 1;
ian@0 2656
ian@0 2657 printk("\nShowing all locks held in the system:\n");
ian@0 2658
ian@0 2659 /*
ian@0 2660 * Here we try to get the tasklist_lock as hard as possible,
ian@0 2661 * if not successful after 2 seconds we ignore it (but keep
ian@0 2662 * trying). This is to enable a debug printout even if a
ian@0 2663 * tasklist_lock-holding task deadlocks or crashes.
ian@0 2664 */
ian@0 2665 retry:
ian@0 2666 if (!read_trylock(&tasklist_lock)) {
ian@0 2667 if (count == 10)
ian@0 2668 printk("hm, tasklist_lock locked, retrying... ");
ian@0 2669 if (count) {
ian@0 2670 count--;
ian@0 2671 printk(" #%d", 10-count);
ian@0 2672 mdelay(200);
ian@0 2673 goto retry;
ian@0 2674 }
ian@0 2675 printk(" ignoring it.\n");
ian@0 2676 unlock = 0;
ian@0 2677 }
ian@0 2678 if (count != 10)
ian@0 2679 printk(" locked it.\n");
ian@0 2680
ian@0 2681 do_each_thread(g, p) {
ian@0 2682 if (p->lockdep_depth)
ian@0 2683 lockdep_print_held_locks(p);
ian@0 2684 if (!unlock)
ian@0 2685 if (read_trylock(&tasklist_lock))
ian@0 2686 unlock = 1;
ian@0 2687 } while_each_thread(g, p);
ian@0 2688
ian@0 2689 printk("\n");
ian@0 2690 printk("=============================================\n\n");
ian@0 2691
ian@0 2692 if (unlock)
ian@0 2693 read_unlock(&tasklist_lock);
ian@0 2694 }
ian@0 2695
ian@0 2696 EXPORT_SYMBOL_GPL(debug_show_all_locks);
ian@0 2697
ian@0 2698 void debug_show_held_locks(struct task_struct *task)
ian@0 2699 {
ian@0 2700 lockdep_print_held_locks(task);
ian@0 2701 }
ian@0 2702
ian@0 2703 EXPORT_SYMBOL_GPL(debug_show_held_locks);
ian@0 2704