1 /* 2 * kernel/lockdep.c 3 * 4 * Runtime locking correctness validator 5 * 6 * Started by Ingo Molnar: 7 * 8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 10 * 11 * this code maps all the lock dependencies as they occur in a live kernel 12 * and will warn about the following classes of locking bugs: 13 * 14 * - lock inversion scenarios 15 * - circular lock dependencies 16 * - hardirq/softirq safe/unsafe locking bugs 17 * 18 * Bugs are reported even if the current locking scenario does not cause 19 * any deadlock at this point. 20 * 21 * I.e. if anytime in the past two locks were taken in a different order, 22 * even if it happened for another task, even if those were different 23 * locks (but of the same class as this lock), this code will detect it. 24 * 25 * Thanks to Arjan van de Ven for coming up with the initial idea of 26 * mapping lock dependencies runtime. 27 */ 28 #define DISABLE_BRANCH_PROFILING 29 #include <linux/mutex.h> 30 #include <linux/sched.h> 31 #include <linux/delay.h> 32 #include <linux/module.h> 33 #include <linux/proc_fs.h> 34 #include <linux/seq_file.h> 35 #include <linux/spinlock.h> 36 #include <linux/kallsyms.h> 37 #include <linux/interrupt.h> 38 #include <linux/stacktrace.h> 39 #include <linux/debug_locks.h> 40 #include <linux/irqflags.h> 41 #include <linux/utsname.h> 42 #include <linux/hash.h> 43 #include <linux/ftrace.h> 44 #include <linux/stringify.h> 45 #include <linux/bitops.h> 46 #include <linux/gfp.h> 47 #include <linux/kmemcheck.h> 48 49 #include <asm/sections.h> 50 51 #include "lockdep_internals.h" 52 53 #define CREATE_TRACE_POINTS 54 #include <trace/events/lock.h> 55 56 #ifdef CONFIG_PROVE_LOCKING 57 int prove_locking = 1; 58 module_param(prove_locking, int, 0644); 59 #else 60 #define prove_locking 0 61 #endif 62 63 #ifdef CONFIG_LOCK_STAT 64 int lock_stat = 1; 65 module_param(lock_stat, int, 0644); 66 #else 67 #define lock_stat 0 68 #endif 69 70 /* 71 * lockdep_lock: protects the lockdep graph, the hashes and the 72 * class/list/hash allocators. 73 * 74 * This is one of the rare exceptions where it's justified 75 * to use a raw spinlock - we really dont want the spinlock 76 * code to recurse back into the lockdep code... 77 */ 78 static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 79 80 static int graph_lock(void) 81 { 82 arch_spin_lock(&lockdep_lock); 83 /* 84 * Make sure that if another CPU detected a bug while 85 * walking the graph we dont change it (while the other 86 * CPU is busy printing out stuff with the graph lock 87 * dropped already) 88 */ 89 if (!debug_locks) { 90 arch_spin_unlock(&lockdep_lock); 91 return 0; 92 } 93 /* prevent any recursions within lockdep from causing deadlocks */ 94 current->lockdep_recursion++; 95 return 1; 96 } 97 98 static inline int graph_unlock(void) 99 { 100 if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) { 101 /* 102 * The lockdep graph lock isn't locked while we expect it to 103 * be, we're confused now, bye! 104 */ 105 return DEBUG_LOCKS_WARN_ON(1); 106 } 107 108 current->lockdep_recursion--; 109 arch_spin_unlock(&lockdep_lock); 110 return 0; 111 } 112 113 /* 114 * Turn lock debugging off and return with 0 if it was off already, 115 * and also release the graph lock: 116 */ 117 static inline int debug_locks_off_graph_unlock(void) 118 { 119 int ret = debug_locks_off(); 120 121 arch_spin_unlock(&lockdep_lock); 122 123 return ret; 124 } 125 126 unsigned long nr_list_entries; 127 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; 128 129 /* 130 * All data structures here are protected by the global debug_lock. 131 * 132 * Mutex key structs only get allocated, once during bootup, and never 133 * get freed - this significantly simplifies the debugging code. 134 */ 135 unsigned long nr_lock_classes; 136 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; 137 138 static inline struct lock_class *hlock_class(struct held_lock *hlock) 139 { 140 if (!hlock->class_idx) { 141 /* 142 * Someone passed in garbage, we give up. 143 */ 144 DEBUG_LOCKS_WARN_ON(1); 145 return NULL; 146 } 147 return lock_classes + hlock->class_idx - 1; 148 } 149 150 #ifdef CONFIG_LOCK_STAT 151 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats); 152 153 static inline u64 lockstat_clock(void) 154 { 155 return local_clock(); 156 } 157 158 static int lock_point(unsigned long points[], unsigned long ip) 159 { 160 int i; 161 162 for (i = 0; i < LOCKSTAT_POINTS; i++) { 163 if (points[i] == 0) { 164 points[i] = ip; 165 break; 166 } 167 if (points[i] == ip) 168 break; 169 } 170 171 return i; 172 } 173 174 static void lock_time_inc(struct lock_time *lt, u64 time) 175 { 176 if (time > lt->max) 177 lt->max = time; 178 179 if (time < lt->min || !lt->nr) 180 lt->min = time; 181 182 lt->total += time; 183 lt->nr++; 184 } 185 186 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) 187 { 188 if (!src->nr) 189 return; 190 191 if (src->max > dst->max) 192 dst->max = src->max; 193 194 if (src->min < dst->min || !dst->nr) 195 dst->min = src->min; 196 197 dst->total += src->total; 198 dst->nr += src->nr; 199 } 200 201 struct lock_class_stats lock_stats(struct lock_class *class) 202 { 203 struct lock_class_stats stats; 204 int cpu, i; 205 206 memset(&stats, 0, sizeof(struct lock_class_stats)); 207 for_each_possible_cpu(cpu) { 208 struct lock_class_stats *pcs = 209 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; 210 211 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) 212 stats.contention_point[i] += pcs->contention_point[i]; 213 214 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++) 215 stats.contending_point[i] += pcs->contending_point[i]; 216 217 lock_time_add(&pcs->read_waittime, &stats.read_waittime); 218 lock_time_add(&pcs->write_waittime, &stats.write_waittime); 219 220 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime); 221 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime); 222 223 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++) 224 stats.bounces[i] += pcs->bounces[i]; 225 } 226 227 return stats; 228 } 229 230 void clear_lock_stats(struct lock_class *class) 231 { 232 int cpu; 233 234 for_each_possible_cpu(cpu) { 235 struct lock_class_stats *cpu_stats = 236 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; 237 238 memset(cpu_stats, 0, sizeof(struct lock_class_stats)); 239 } 240 memset(class->contention_point, 0, sizeof(class->contention_point)); 241 memset(class->contending_point, 0, sizeof(class->contending_point)); 242 } 243 244 static struct lock_class_stats *get_lock_stats(struct lock_class *class) 245 { 246 return &get_cpu_var(cpu_lock_stats)[class - lock_classes]; 247 } 248 249 static void put_lock_stats(struct lock_class_stats *stats) 250 { 251 put_cpu_var(cpu_lock_stats); 252 } 253 254 static void lock_release_holdtime(struct held_lock *hlock) 255 { 256 struct lock_class_stats *stats; 257 u64 holdtime; 258 259 if (!lock_stat) 260 return; 261 262 holdtime = lockstat_clock() - hlock->holdtime_stamp; 263 264 stats = get_lock_stats(hlock_class(hlock)); 265 if (hlock->read) 266 lock_time_inc(&stats->read_holdtime, holdtime); 267 else 268 lock_time_inc(&stats->write_holdtime, holdtime); 269 put_lock_stats(stats); 270 } 271 #else 272 static inline void lock_release_holdtime(struct held_lock *hlock) 273 { 274 } 275 #endif 276 277 /* 278 * We keep a global list of all lock classes. The list only grows, 279 * never shrinks. The list is only accessed with the lockdep 280 * spinlock lock held. 281 */ 282 LIST_HEAD(all_lock_classes); 283 284 /* 285 * The lockdep classes are in a hash-table as well, for fast lookup: 286 */ 287 #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) 288 #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS) 289 #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS) 290 #define classhashentry(key) (classhash_table + __classhashfn((key))) 291 292 static struct hlist_head classhash_table[CLASSHASH_SIZE]; 293 294 /* 295 * We put the lock dependency chains into a hash-table as well, to cache 296 * their existence: 297 */ 298 #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1) 299 #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS) 300 #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS) 301 #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) 302 303 static struct hlist_head chainhash_table[CHAINHASH_SIZE]; 304 305 /* 306 * The hash key of the lock dependency chains is a hash itself too: 307 * it's a hash of all locks taken up to that lock, including that lock. 308 * It's a 64-bit hash, because it's important for the keys to be 309 * unique. 310 */ 311 #define iterate_chain_key(key1, key2) \ 312 (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \ 313 ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \ 314 (key2)) 315 316 void lockdep_off(void) 317 { 318 current->lockdep_recursion++; 319 } 320 EXPORT_SYMBOL(lockdep_off); 321 322 void lockdep_on(void) 323 { 324 current->lockdep_recursion--; 325 } 326 EXPORT_SYMBOL(lockdep_on); 327 328 /* 329 * Debugging switches: 330 */ 331 332 #define VERBOSE 0 333 #define VERY_VERBOSE 0 334 335 #if VERBOSE 336 # define HARDIRQ_VERBOSE 1 337 # define SOFTIRQ_VERBOSE 1 338 # define RECLAIM_VERBOSE 1 339 #else 340 # define HARDIRQ_VERBOSE 0 341 # define SOFTIRQ_VERBOSE 0 342 # define RECLAIM_VERBOSE 0 343 #endif 344 345 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE 346 /* 347 * Quick filtering for interesting events: 348 */ 349 static int class_filter(struct lock_class *class) 350 { 351 #if 0 352 /* Example */ 353 if (class->name_version == 1 && 354 !strcmp(class->name, "lockname")) 355 return 1; 356 if (class->name_version == 1 && 357 !strcmp(class->name, "&struct->lockfield")) 358 return 1; 359 #endif 360 /* Filter everything else. 1 would be to allow everything else */ 361 return 0; 362 } 363 #endif 364 365 static int verbose(struct lock_class *class) 366 { 367 #if VERBOSE 368 return class_filter(class); 369 #endif 370 return 0; 371 } 372 373 /* 374 * Stack-trace: tightly packed array of stack backtrace 375 * addresses. Protected by the graph_lock. 376 */ 377 unsigned long nr_stack_trace_entries; 378 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; 379 380 static void print_lockdep_off(const char *bug_msg) 381 { 382 printk(KERN_DEBUG "%s\n", bug_msg); 383 printk(KERN_DEBUG "turning off the locking correctness validator.\n"); 384 #ifdef CONFIG_LOCK_STAT 385 printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n"); 386 #endif 387 } 388 389 static int save_trace(struct stack_trace *trace) 390 { 391 trace->nr_entries = 0; 392 trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; 393 trace->entries = stack_trace + nr_stack_trace_entries; 394 395 trace->skip = 3; 396 397 save_stack_trace(trace); 398 399 /* 400 * Some daft arches put -1 at the end to indicate its a full trace. 401 * 402 * <rant> this is buggy anyway, since it takes a whole extra entry so a 403 * complete trace that maxes out the entries provided will be reported 404 * as incomplete, friggin useless </rant> 405 */ 406 if (trace->nr_entries != 0 && 407 trace->entries[trace->nr_entries-1] == ULONG_MAX) 408 trace->nr_entries--; 409 410 trace->max_entries = trace->nr_entries; 411 412 nr_stack_trace_entries += trace->nr_entries; 413 414 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { 415 if (!debug_locks_off_graph_unlock()) 416 return 0; 417 418 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!"); 419 dump_stack(); 420 421 return 0; 422 } 423 424 return 1; 425 } 426 427 unsigned int nr_hardirq_chains; 428 unsigned int nr_softirq_chains; 429 unsigned int nr_process_chains; 430 unsigned int max_lockdep_depth; 431 432 #ifdef CONFIG_DEBUG_LOCKDEP 433 /* 434 * Various lockdep statistics: 435 */ 436 DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); 437 #endif 438 439 /* 440 * Locking printouts: 441 */ 442 443 #define __USAGE(__STATE) \ 444 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \ 445 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \ 446 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\ 447 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R", 448 449 static const char *usage_str[] = 450 { 451 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE) 452 #include "lockdep_states.h" 453 #undef LOCKDEP_STATE 454 [LOCK_USED] = "INITIAL USE", 455 }; 456 457 const char * __get_key_name(struct lockdep_subclass_key *key, char *str) 458 { 459 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); 460 } 461 462 static inline unsigned long lock_flag(enum lock_usage_bit bit) 463 { 464 return 1UL << bit; 465 } 466 467 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) 468 { 469 char c = '.'; 470 471 if (class->usage_mask & lock_flag(bit + 2)) 472 c = '+'; 473 if (class->usage_mask & lock_flag(bit)) { 474 c = '-'; 475 if (class->usage_mask & lock_flag(bit + 2)) 476 c = '?'; 477 } 478 479 return c; 480 } 481 482 void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) 483 { 484 int i = 0; 485 486 #define LOCKDEP_STATE(__STATE) \ 487 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \ 488 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ); 489 #include "lockdep_states.h" 490 #undef LOCKDEP_STATE 491 492 usage[i] = '\0'; 493 } 494 495 static void __print_lock_name(struct lock_class *class) 496 { 497 char str[KSYM_NAME_LEN]; 498 const char *name; 499 500 name = class->name; 501 if (!name) { 502 name = __get_key_name(class->key, str); 503 printk("%s", name); 504 } else { 505 printk("%s", name); 506 if (class->name_version > 1) 507 printk("#%d", class->name_version); 508 if (class->subclass) 509 printk("/%d", class->subclass); 510 } 511 } 512 513 static void print_lock_name(struct lock_class *class) 514 { 515 char usage[LOCK_USAGE_CHARS]; 516 517 get_usage_chars(class, usage); 518 519 printk(" ("); 520 __print_lock_name(class); 521 printk("){%s}", usage); 522 } 523 524 static void print_lockdep_cache(struct lockdep_map *lock) 525 { 526 const char *name; 527 char str[KSYM_NAME_LEN]; 528 529 name = lock->name; 530 if (!name) 531 name = __get_key_name(lock->key->subkeys, str); 532 533 printk("%s", name); 534 } 535 536 static void print_lock(struct held_lock *hlock) 537 { 538 /* 539 * We can be called locklessly through debug_show_all_locks() so be 540 * extra careful, the hlock might have been released and cleared. 541 */ 542 unsigned int class_idx = hlock->class_idx; 543 544 /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */ 545 barrier(); 546 547 if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) { 548 printk("<RELEASED>\n"); 549 return; 550 } 551 552 print_lock_name(lock_classes + class_idx - 1); 553 printk(", at: "); 554 print_ip_sym(hlock->acquire_ip); 555 } 556 557 static void lockdep_print_held_locks(struct task_struct *curr) 558 { 559 int i, depth = curr->lockdep_depth; 560 561 if (!depth) { 562 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr)); 563 return; 564 } 565 printk("%d lock%s held by %s/%d:\n", 566 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr)); 567 568 for (i = 0; i < depth; i++) { 569 printk(" #%d: ", i); 570 print_lock(curr->held_locks + i); 571 } 572 } 573 574 static void print_kernel_ident(void) 575 { 576 printk("%s %.*s %s\n", init_utsname()->release, 577 (int)strcspn(init_utsname()->version, " "), 578 init_utsname()->version, 579 print_tainted()); 580 } 581 582 static int very_verbose(struct lock_class *class) 583 { 584 #if VERY_VERBOSE 585 return class_filter(class); 586 #endif 587 return 0; 588 } 589 590 /* 591 * Is this the address of a static object: 592 */ 593 #ifdef __KERNEL__ 594 static int static_obj(void *obj) 595 { 596 unsigned long start = (unsigned long) &_stext, 597 end = (unsigned long) &_end, 598 addr = (unsigned long) obj; 599 600 /* 601 * static variable? 602 */ 603 if ((addr >= start) && (addr < end)) 604 return 1; 605 606 if (arch_is_kernel_data(addr)) 607 return 1; 608 609 /* 610 * in-kernel percpu var? 611 */ 612 if (is_kernel_percpu_address(addr)) 613 return 1; 614 615 /* 616 * module static or percpu var? 617 */ 618 return is_module_address(addr) || is_module_percpu_address(addr); 619 } 620 #endif 621 622 /* 623 * To make lock name printouts unique, we calculate a unique 624 * class->name_version generation counter: 625 */ 626 static int count_matching_names(struct lock_class *new_class) 627 { 628 struct lock_class *class; 629 int count = 0; 630 631 if (!new_class->name) 632 return 0; 633 634 list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) { 635 if (new_class->key - new_class->subclass == class->key) 636 return class->name_version; 637 if (class->name && !strcmp(class->name, new_class->name)) 638 count = max(count, class->name_version); 639 } 640 641 return count + 1; 642 } 643 644 /* 645 * Register a lock's class in the hash-table, if the class is not present 646 * yet. Otherwise we look it up. We cache the result in the lock object 647 * itself, so actual lookup of the hash should be once per lock object. 648 */ 649 static inline struct lock_class * 650 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) 651 { 652 struct lockdep_subclass_key *key; 653 struct hlist_head *hash_head; 654 struct lock_class *class; 655 656 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { 657 debug_locks_off(); 658 printk(KERN_ERR 659 "BUG: looking up invalid subclass: %u\n", subclass); 660 printk(KERN_ERR 661 "turning off the locking correctness validator.\n"); 662 dump_stack(); 663 return NULL; 664 } 665 666 /* 667 * Static locks do not have their class-keys yet - for them the key 668 * is the lock object itself: 669 */ 670 if (unlikely(!lock->key)) 671 lock->key = (void *)lock; 672 673 /* 674 * NOTE: the class-key must be unique. For dynamic locks, a static 675 * lock_class_key variable is passed in through the mutex_init() 676 * (or spin_lock_init()) call - which acts as the key. For static 677 * locks we use the lock object itself as the key. 678 */ 679 BUILD_BUG_ON(sizeof(struct lock_class_key) > 680 sizeof(struct lockdep_map)); 681 682 key = lock->key->subkeys + subclass; 683 684 hash_head = classhashentry(key); 685 686 /* 687 * We do an RCU walk of the hash, see lockdep_free_key_range(). 688 */ 689 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 690 return NULL; 691 692 hlist_for_each_entry_rcu(class, hash_head, hash_entry) { 693 if (class->key == key) { 694 /* 695 * Huh! same key, different name? Did someone trample 696 * on some memory? We're most confused. 697 */ 698 WARN_ON_ONCE(class->name != lock->name); 699 return class; 700 } 701 } 702 703 return NULL; 704 } 705 706 /* 707 * Register a lock's class in the hash-table, if the class is not present 708 * yet. Otherwise we look it up. We cache the result in the lock object 709 * itself, so actual lookup of the hash should be once per lock object. 710 */ 711 static inline struct lock_class * 712 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) 713 { 714 struct lockdep_subclass_key *key; 715 struct hlist_head *hash_head; 716 struct lock_class *class; 717 718 DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 719 720 class = look_up_lock_class(lock, subclass); 721 if (likely(class)) 722 goto out_set_class_cache; 723 724 /* 725 * Debug-check: all keys must be persistent! 726 */ 727 if (!static_obj(lock->key)) { 728 debug_locks_off(); 729 printk("INFO: trying to register non-static key.\n"); 730 printk("the code is fine but needs lockdep annotation.\n"); 731 printk("turning off the locking correctness validator.\n"); 732 dump_stack(); 733 734 return NULL; 735 } 736 737 key = lock->key->subkeys + subclass; 738 hash_head = classhashentry(key); 739 740 if (!graph_lock()) { 741 return NULL; 742 } 743 /* 744 * We have to do the hash-walk again, to avoid races 745 * with another CPU: 746 */ 747 hlist_for_each_entry_rcu(class, hash_head, hash_entry) { 748 if (class->key == key) 749 goto out_unlock_set; 750 } 751 752 /* 753 * Allocate a new key from the static array, and add it to 754 * the hash: 755 */ 756 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { 757 if (!debug_locks_off_graph_unlock()) { 758 return NULL; 759 } 760 761 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); 762 dump_stack(); 763 return NULL; 764 } 765 class = lock_classes + nr_lock_classes++; 766 debug_atomic_inc(nr_unused_locks); 767 class->key = key; 768 class->name = lock->name; 769 class->subclass = subclass; 770 INIT_LIST_HEAD(&class->lock_entry); 771 INIT_LIST_HEAD(&class->locks_before); 772 INIT_LIST_HEAD(&class->locks_after); 773 class->name_version = count_matching_names(class); 774 /* 775 * We use RCU's safe list-add method to make 776 * parallel walking of the hash-list safe: 777 */ 778 hlist_add_head_rcu(&class->hash_entry, hash_head); 779 /* 780 * Add it to the global list of classes: 781 */ 782 list_add_tail_rcu(&class->lock_entry, &all_lock_classes); 783 784 if (verbose(class)) { 785 graph_unlock(); 786 787 printk("\nnew class %p: %s", class->key, class->name); 788 if (class->name_version > 1) 789 printk("#%d", class->name_version); 790 printk("\n"); 791 dump_stack(); 792 793 if (!graph_lock()) { 794 return NULL; 795 } 796 } 797 out_unlock_set: 798 graph_unlock(); 799 800 out_set_class_cache: 801 if (!subclass || force) 802 lock->class_cache[0] = class; 803 else if (subclass < NR_LOCKDEP_CACHING_CLASSES) 804 lock->class_cache[subclass] = class; 805 806 /* 807 * Hash collision, did we smoke some? We found a class with a matching 808 * hash but the subclass -- which is hashed in -- didn't match. 809 */ 810 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) 811 return NULL; 812 813 return class; 814 } 815 816 #ifdef CONFIG_PROVE_LOCKING 817 /* 818 * Allocate a lockdep entry. (assumes the graph_lock held, returns 819 * with NULL on failure) 820 */ 821 static struct lock_list *alloc_list_entry(void) 822 { 823 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) { 824 if (!debug_locks_off_graph_unlock()) 825 return NULL; 826 827 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!"); 828 dump_stack(); 829 return NULL; 830 } 831 return list_entries + nr_list_entries++; 832 } 833 834 /* 835 * Add a new dependency to the head of the list: 836 */ 837 static int add_lock_to_list(struct lock_class *class, struct lock_class *this, 838 struct list_head *head, unsigned long ip, 839 int distance, struct stack_trace *trace) 840 { 841 struct lock_list *entry; 842 /* 843 * Lock not present yet - get a new dependency struct and 844 * add it to the list: 845 */ 846 entry = alloc_list_entry(); 847 if (!entry) 848 return 0; 849 850 entry->class = this; 851 entry->distance = distance; 852 entry->trace = *trace; 853 /* 854 * Both allocation and removal are done under the graph lock; but 855 * iteration is under RCU-sched; see look_up_lock_class() and 856 * lockdep_free_key_range(). 857 */ 858 list_add_tail_rcu(&entry->entry, head); 859 860 return 1; 861 } 862 863 /* 864 * For good efficiency of modular, we use power of 2 865 */ 866 #define MAX_CIRCULAR_QUEUE_SIZE 4096UL 867 #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1) 868 869 /* 870 * The circular_queue and helpers is used to implement the 871 * breadth-first search(BFS)algorithem, by which we can build 872 * the shortest path from the next lock to be acquired to the 873 * previous held lock if there is a circular between them. 874 */ 875 struct circular_queue { 876 unsigned long element[MAX_CIRCULAR_QUEUE_SIZE]; 877 unsigned int front, rear; 878 }; 879 880 static struct circular_queue lock_cq; 881 882 unsigned int max_bfs_queue_depth; 883 884 static unsigned int lockdep_dependency_gen_id; 885 886 static inline void __cq_init(struct circular_queue *cq) 887 { 888 cq->front = cq->rear = 0; 889 lockdep_dependency_gen_id++; 890 } 891 892 static inline int __cq_empty(struct circular_queue *cq) 893 { 894 return (cq->front == cq->rear); 895 } 896 897 static inline int __cq_full(struct circular_queue *cq) 898 { 899 return ((cq->rear + 1) & CQ_MASK) == cq->front; 900 } 901 902 static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem) 903 { 904 if (__cq_full(cq)) 905 return -1; 906 907 cq->element[cq->rear] = elem; 908 cq->rear = (cq->rear + 1) & CQ_MASK; 909 return 0; 910 } 911 912 static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem) 913 { 914 if (__cq_empty(cq)) 915 return -1; 916 917 *elem = cq->element[cq->front]; 918 cq->front = (cq->front + 1) & CQ_MASK; 919 return 0; 920 } 921 922 static inline unsigned int __cq_get_elem_count(struct circular_queue *cq) 923 { 924 return (cq->rear - cq->front) & CQ_MASK; 925 } 926 927 static inline void mark_lock_accessed(struct lock_list *lock, 928 struct lock_list *parent) 929 { 930 unsigned long nr; 931 932 nr = lock - list_entries; 933 WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ 934 lock->parent = parent; 935 lock->class->dep_gen_id = lockdep_dependency_gen_id; 936 } 937 938 static inline unsigned long lock_accessed(struct lock_list *lock) 939 { 940 unsigned long nr; 941 942 nr = lock - list_entries; 943 WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ 944 return lock->class->dep_gen_id == lockdep_dependency_gen_id; 945 } 946 947 static inline struct lock_list *get_lock_parent(struct lock_list *child) 948 { 949 return child->parent; 950 } 951 952 static inline int get_lock_depth(struct lock_list *child) 953 { 954 int depth = 0; 955 struct lock_list *parent; 956 957 while ((parent = get_lock_parent(child))) { 958 child = parent; 959 depth++; 960 } 961 return depth; 962 } 963 964 static int __bfs(struct lock_list *source_entry, 965 void *data, 966 int (*match)(struct lock_list *entry, void *data), 967 struct lock_list **target_entry, 968 int forward) 969 { 970 struct lock_list *entry; 971 struct list_head *head; 972 struct circular_queue *cq = &lock_cq; 973 int ret = 1; 974 975 if (match(source_entry, data)) { 976 *target_entry = source_entry; 977 ret = 0; 978 goto exit; 979 } 980 981 if (forward) 982 head = &source_entry->class->locks_after; 983 else 984 head = &source_entry->class->locks_before; 985 986 if (list_empty(head)) 987 goto exit; 988 989 __cq_init(cq); 990 __cq_enqueue(cq, (unsigned long)source_entry); 991 992 while (!__cq_empty(cq)) { 993 struct lock_list *lock; 994 995 __cq_dequeue(cq, (unsigned long *)&lock); 996 997 if (!lock->class) { 998 ret = -2; 999 goto exit; 1000 } 1001 1002 if (forward) 1003 head = &lock->class->locks_after; 1004 else 1005 head = &lock->class->locks_before; 1006 1007 DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 1008 1009 list_for_each_entry_rcu(entry, head, entry) { 1010 if (!lock_accessed(entry)) { 1011 unsigned int cq_depth; 1012 mark_lock_accessed(entry, lock); 1013 if (match(entry, data)) { 1014 *target_entry = entry; 1015 ret = 0; 1016 goto exit; 1017 } 1018 1019 if (__cq_enqueue(cq, (unsigned long)entry)) { 1020 ret = -1; 1021 goto exit; 1022 } 1023 cq_depth = __cq_get_elem_count(cq); 1024 if (max_bfs_queue_depth < cq_depth) 1025 max_bfs_queue_depth = cq_depth; 1026 } 1027 } 1028 } 1029 exit: 1030 return ret; 1031 } 1032 1033 static inline int __bfs_forwards(struct lock_list *src_entry, 1034 void *data, 1035 int (*match)(struct lock_list *entry, void *data), 1036 struct lock_list **target_entry) 1037 { 1038 return __bfs(src_entry, data, match, target_entry, 1); 1039 1040 } 1041 1042 static inline int __bfs_backwards(struct lock_list *src_entry, 1043 void *data, 1044 int (*match)(struct lock_list *entry, void *data), 1045 struct lock_list **target_entry) 1046 { 1047 return __bfs(src_entry, data, match, target_entry, 0); 1048 1049 } 1050 1051 /* 1052 * Recursive, forwards-direction lock-dependency checking, used for 1053 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe 1054 * checking. 1055 */ 1056 1057 /* 1058 * Print a dependency chain entry (this is only done when a deadlock 1059 * has been detected): 1060 */ 1061 static noinline int 1062 print_circular_bug_entry(struct lock_list *target, int depth) 1063 { 1064 if (debug_locks_silent) 1065 return 0; 1066 printk("\n-> #%u", depth); 1067 print_lock_name(target->class); 1068 printk(":\n"); 1069 print_stack_trace(&target->trace, 6); 1070 1071 return 0; 1072 } 1073 1074 static void 1075 print_circular_lock_scenario(struct held_lock *src, 1076 struct held_lock *tgt, 1077 struct lock_list *prt) 1078 { 1079 struct lock_class *source = hlock_class(src); 1080 struct lock_class *target = hlock_class(tgt); 1081 struct lock_class *parent = prt->class; 1082 1083 /* 1084 * A direct locking problem where unsafe_class lock is taken 1085 * directly by safe_class lock, then all we need to show 1086 * is the deadlock scenario, as it is obvious that the 1087 * unsafe lock is taken under the safe lock. 1088 * 1089 * But if there is a chain instead, where the safe lock takes 1090 * an intermediate lock (middle_class) where this lock is 1091 * not the same as the safe lock, then the lock chain is 1092 * used to describe the problem. Otherwise we would need 1093 * to show a different CPU case for each link in the chain 1094 * from the safe_class lock to the unsafe_class lock. 1095 */ 1096 if (parent != source) { 1097 printk("Chain exists of:\n "); 1098 __print_lock_name(source); 1099 printk(" --> "); 1100 __print_lock_name(parent); 1101 printk(" --> "); 1102 __print_lock_name(target); 1103 printk("\n\n"); 1104 } 1105 1106 printk(" Possible unsafe locking scenario:\n\n"); 1107 printk(" CPU0 CPU1\n"); 1108 printk(" ---- ----\n"); 1109 printk(" lock("); 1110 __print_lock_name(target); 1111 printk(");\n"); 1112 printk(" lock("); 1113 __print_lock_name(parent); 1114 printk(");\n"); 1115 printk(" lock("); 1116 __print_lock_name(target); 1117 printk(");\n"); 1118 printk(" lock("); 1119 __print_lock_name(source); 1120 printk(");\n"); 1121 printk("\n *** DEADLOCK ***\n\n"); 1122 } 1123 1124 /* 1125 * When a circular dependency is detected, print the 1126 * header first: 1127 */ 1128 static noinline int 1129 print_circular_bug_header(struct lock_list *entry, unsigned int depth, 1130 struct held_lock *check_src, 1131 struct held_lock *check_tgt) 1132 { 1133 struct task_struct *curr = current; 1134 1135 if (debug_locks_silent) 1136 return 0; 1137 1138 printk("\n"); 1139 printk("======================================================\n"); 1140 printk("[ INFO: possible circular locking dependency detected ]\n"); 1141 print_kernel_ident(); 1142 printk("-------------------------------------------------------\n"); 1143 printk("%s/%d is trying to acquire lock:\n", 1144 curr->comm, task_pid_nr(curr)); 1145 print_lock(check_src); 1146 printk("\nbut task is already holding lock:\n"); 1147 print_lock(check_tgt); 1148 printk("\nwhich lock already depends on the new lock.\n\n"); 1149 printk("\nthe existing dependency chain (in reverse order) is:\n"); 1150 1151 print_circular_bug_entry(entry, depth); 1152 1153 return 0; 1154 } 1155 1156 static inline int class_equal(struct lock_list *entry, void *data) 1157 { 1158 return entry->class == data; 1159 } 1160 1161 static noinline int print_circular_bug(struct lock_list *this, 1162 struct lock_list *target, 1163 struct held_lock *check_src, 1164 struct held_lock *check_tgt) 1165 { 1166 struct task_struct *curr = current; 1167 struct lock_list *parent; 1168 struct lock_list *first_parent; 1169 int depth; 1170 1171 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1172 return 0; 1173 1174 if (!save_trace(&this->trace)) 1175 return 0; 1176 1177 depth = get_lock_depth(target); 1178 1179 print_circular_bug_header(target, depth, check_src, check_tgt); 1180 1181 parent = get_lock_parent(target); 1182 first_parent = parent; 1183 1184 while (parent) { 1185 print_circular_bug_entry(parent, --depth); 1186 parent = get_lock_parent(parent); 1187 } 1188 1189 printk("\nother info that might help us debug this:\n\n"); 1190 print_circular_lock_scenario(check_src, check_tgt, 1191 first_parent); 1192 1193 lockdep_print_held_locks(curr); 1194 1195 printk("\nstack backtrace:\n"); 1196 dump_stack(); 1197 1198 return 0; 1199 } 1200 1201 static noinline int print_bfs_bug(int ret) 1202 { 1203 if (!debug_locks_off_graph_unlock()) 1204 return 0; 1205 1206 /* 1207 * Breadth-first-search failed, graph got corrupted? 1208 */ 1209 WARN(1, "lockdep bfs error:%d\n", ret); 1210 1211 return 0; 1212 } 1213 1214 static int noop_count(struct lock_list *entry, void *data) 1215 { 1216 (*(unsigned long *)data)++; 1217 return 0; 1218 } 1219 1220 static unsigned long __lockdep_count_forward_deps(struct lock_list *this) 1221 { 1222 unsigned long count = 0; 1223 struct lock_list *uninitialized_var(target_entry); 1224 1225 __bfs_forwards(this, (void *)&count, noop_count, &target_entry); 1226 1227 return count; 1228 } 1229 unsigned long lockdep_count_forward_deps(struct lock_class *class) 1230 { 1231 unsigned long ret, flags; 1232 struct lock_list this; 1233 1234 this.parent = NULL; 1235 this.class = class; 1236 1237 local_irq_save(flags); 1238 arch_spin_lock(&lockdep_lock); 1239 ret = __lockdep_count_forward_deps(&this); 1240 arch_spin_unlock(&lockdep_lock); 1241 local_irq_restore(flags); 1242 1243 return ret; 1244 } 1245 1246 static unsigned long __lockdep_count_backward_deps(struct lock_list *this) 1247 { 1248 unsigned long count = 0; 1249 struct lock_list *uninitialized_var(target_entry); 1250 1251 __bfs_backwards(this, (void *)&count, noop_count, &target_entry); 1252 1253 return count; 1254 } 1255 1256 unsigned long lockdep_count_backward_deps(struct lock_class *class) 1257 { 1258 unsigned long ret, flags; 1259 struct lock_list this; 1260 1261 this.parent = NULL; 1262 this.class = class; 1263 1264 local_irq_save(flags); 1265 arch_spin_lock(&lockdep_lock); 1266 ret = __lockdep_count_backward_deps(&this); 1267 arch_spin_unlock(&lockdep_lock); 1268 local_irq_restore(flags); 1269 1270 return ret; 1271 } 1272 1273 /* 1274 * Prove that the dependency graph starting at <entry> can not 1275 * lead to <target>. Print an error and return 0 if it does. 1276 */ 1277 static noinline int 1278 check_noncircular(struct lock_list *root, struct lock_class *target, 1279 struct lock_list **target_entry) 1280 { 1281 int result; 1282 1283 debug_atomic_inc(nr_cyclic_checks); 1284 1285 result = __bfs_forwards(root, target, class_equal, target_entry); 1286 1287 return result; 1288 } 1289 1290 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 1291 /* 1292 * Forwards and backwards subgraph searching, for the purposes of 1293 * proving that two subgraphs can be connected by a new dependency 1294 * without creating any illegal irq-safe -> irq-unsafe lock dependency. 1295 */ 1296 1297 static inline int usage_match(struct lock_list *entry, void *bit) 1298 { 1299 return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit); 1300 } 1301 1302 1303 1304 /* 1305 * Find a node in the forwards-direction dependency sub-graph starting 1306 * at @root->class that matches @bit. 1307 * 1308 * Return 0 if such a node exists in the subgraph, and put that node 1309 * into *@target_entry. 1310 * 1311 * Return 1 otherwise and keep *@target_entry unchanged. 1312 * Return <0 on error. 1313 */ 1314 static int 1315 find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit, 1316 struct lock_list **target_entry) 1317 { 1318 int result; 1319 1320 debug_atomic_inc(nr_find_usage_forwards_checks); 1321 1322 result = __bfs_forwards(root, (void *)bit, usage_match, target_entry); 1323 1324 return result; 1325 } 1326 1327 /* 1328 * Find a node in the backwards-direction dependency sub-graph starting 1329 * at @root->class that matches @bit. 1330 * 1331 * Return 0 if such a node exists in the subgraph, and put that node 1332 * into *@target_entry. 1333 * 1334 * Return 1 otherwise and keep *@target_entry unchanged. 1335 * Return <0 on error. 1336 */ 1337 static int 1338 find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit, 1339 struct lock_list **target_entry) 1340 { 1341 int result; 1342 1343 debug_atomic_inc(nr_find_usage_backwards_checks); 1344 1345 result = __bfs_backwards(root, (void *)bit, usage_match, target_entry); 1346 1347 return result; 1348 } 1349 1350 static void print_lock_class_header(struct lock_class *class, int depth) 1351 { 1352 int bit; 1353 1354 printk("%*s->", depth, ""); 1355 print_lock_name(class); 1356 printk(" ops: %lu", class->ops); 1357 printk(" {\n"); 1358 1359 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { 1360 if (class->usage_mask & (1 << bit)) { 1361 int len = depth; 1362 1363 len += printk("%*s %s", depth, "", usage_str[bit]); 1364 len += printk(" at:\n"); 1365 print_stack_trace(class->usage_traces + bit, len); 1366 } 1367 } 1368 printk("%*s }\n", depth, ""); 1369 1370 printk("%*s ... key at: ",depth,""); 1371 print_ip_sym((unsigned long)class->key); 1372 } 1373 1374 /* 1375 * printk the shortest lock dependencies from @start to @end in reverse order: 1376 */ 1377 static void __used 1378 print_shortest_lock_dependencies(struct lock_list *leaf, 1379 struct lock_list *root) 1380 { 1381 struct lock_list *entry = leaf; 1382 int depth; 1383 1384 /*compute depth from generated tree by BFS*/ 1385 depth = get_lock_depth(leaf); 1386 1387 do { 1388 print_lock_class_header(entry->class, depth); 1389 printk("%*s ... acquired at:\n", depth, ""); 1390 print_stack_trace(&entry->trace, 2); 1391 printk("\n"); 1392 1393 if (depth == 0 && (entry != root)) { 1394 printk("lockdep:%s bad path found in chain graph\n", __func__); 1395 break; 1396 } 1397 1398 entry = get_lock_parent(entry); 1399 depth--; 1400 } while (entry && (depth >= 0)); 1401 1402 return; 1403 } 1404 1405 static void 1406 print_irq_lock_scenario(struct lock_list *safe_entry, 1407 struct lock_list *unsafe_entry, 1408 struct lock_class *prev_class, 1409 struct lock_class *next_class) 1410 { 1411 struct lock_class *safe_class = safe_entry->class; 1412 struct lock_class *unsafe_class = unsafe_entry->class; 1413 struct lock_class *middle_class = prev_class; 1414 1415 if (middle_class == safe_class) 1416 middle_class = next_class; 1417 1418 /* 1419 * A direct locking problem where unsafe_class lock is taken 1420 * directly by safe_class lock, then all we need to show 1421 * is the deadlock scenario, as it is obvious that the 1422 * unsafe lock is taken under the safe lock. 1423 * 1424 * But if there is a chain instead, where the safe lock takes 1425 * an intermediate lock (middle_class) where this lock is 1426 * not the same as the safe lock, then the lock chain is 1427 * used to describe the problem. Otherwise we would need 1428 * to show a different CPU case for each link in the chain 1429 * from the safe_class lock to the unsafe_class lock. 1430 */ 1431 if (middle_class != unsafe_class) { 1432 printk("Chain exists of:\n "); 1433 __print_lock_name(safe_class); 1434 printk(" --> "); 1435 __print_lock_name(middle_class); 1436 printk(" --> "); 1437 __print_lock_name(unsafe_class); 1438 printk("\n\n"); 1439 } 1440 1441 printk(" Possible interrupt unsafe locking scenario:\n\n"); 1442 printk(" CPU0 CPU1\n"); 1443 printk(" ---- ----\n"); 1444 printk(" lock("); 1445 __print_lock_name(unsafe_class); 1446 printk(");\n"); 1447 printk(" local_irq_disable();\n"); 1448 printk(" lock("); 1449 __print_lock_name(safe_class); 1450 printk(");\n"); 1451 printk(" lock("); 1452 __print_lock_name(middle_class); 1453 printk(");\n"); 1454 printk(" <Interrupt>\n"); 1455 printk(" lock("); 1456 __print_lock_name(safe_class); 1457 printk(");\n"); 1458 printk("\n *** DEADLOCK ***\n\n"); 1459 } 1460 1461 static int 1462 print_bad_irq_dependency(struct task_struct *curr, 1463 struct lock_list *prev_root, 1464 struct lock_list *next_root, 1465 struct lock_list *backwards_entry, 1466 struct lock_list *forwards_entry, 1467 struct held_lock *prev, 1468 struct held_lock *next, 1469 enum lock_usage_bit bit1, 1470 enum lock_usage_bit bit2, 1471 const char *irqclass) 1472 { 1473 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1474 return 0; 1475 1476 printk("\n"); 1477 printk("======================================================\n"); 1478 printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n", 1479 irqclass, irqclass); 1480 print_kernel_ident(); 1481 printk("------------------------------------------------------\n"); 1482 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", 1483 curr->comm, task_pid_nr(curr), 1484 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, 1485 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, 1486 curr->hardirqs_enabled, 1487 curr->softirqs_enabled); 1488 print_lock(next); 1489 1490 printk("\nand this task is already holding:\n"); 1491 print_lock(prev); 1492 printk("which would create a new lock dependency:\n"); 1493 print_lock_name(hlock_class(prev)); 1494 printk(" ->"); 1495 print_lock_name(hlock_class(next)); 1496 printk("\n"); 1497 1498 printk("\nbut this new dependency connects a %s-irq-safe lock:\n", 1499 irqclass); 1500 print_lock_name(backwards_entry->class); 1501 printk("\n... which became %s-irq-safe at:\n", irqclass); 1502 1503 print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); 1504 1505 printk("\nto a %s-irq-unsafe lock:\n", irqclass); 1506 print_lock_name(forwards_entry->class); 1507 printk("\n... which became %s-irq-unsafe at:\n", irqclass); 1508 printk("..."); 1509 1510 print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); 1511 1512 printk("\nother info that might help us debug this:\n\n"); 1513 print_irq_lock_scenario(backwards_entry, forwards_entry, 1514 hlock_class(prev), hlock_class(next)); 1515 1516 lockdep_print_held_locks(curr); 1517 1518 printk("\nthe dependencies between %s-irq-safe lock", irqclass); 1519 printk(" and the holding lock:\n"); 1520 if (!save_trace(&prev_root->trace)) 1521 return 0; 1522 print_shortest_lock_dependencies(backwards_entry, prev_root); 1523 1524 printk("\nthe dependencies between the lock to be acquired"); 1525 printk(" and %s-irq-unsafe lock:\n", irqclass); 1526 if (!save_trace(&next_root->trace)) 1527 return 0; 1528 print_shortest_lock_dependencies(forwards_entry, next_root); 1529 1530 printk("\nstack backtrace:\n"); 1531 dump_stack(); 1532 1533 return 0; 1534 } 1535 1536 static int 1537 check_usage(struct task_struct *curr, struct held_lock *prev, 1538 struct held_lock *next, enum lock_usage_bit bit_backwards, 1539 enum lock_usage_bit bit_forwards, const char *irqclass) 1540 { 1541 int ret; 1542 struct lock_list this, that; 1543 struct lock_list *uninitialized_var(target_entry); 1544 struct lock_list *uninitialized_var(target_entry1); 1545 1546 this.parent = NULL; 1547 1548 this.class = hlock_class(prev); 1549 ret = find_usage_backwards(&this, bit_backwards, &target_entry); 1550 if (ret < 0) 1551 return print_bfs_bug(ret); 1552 if (ret == 1) 1553 return ret; 1554 1555 that.parent = NULL; 1556 that.class = hlock_class(next); 1557 ret = find_usage_forwards(&that, bit_forwards, &target_entry1); 1558 if (ret < 0) 1559 return print_bfs_bug(ret); 1560 if (ret == 1) 1561 return ret; 1562 1563 return print_bad_irq_dependency(curr, &this, &that, 1564 target_entry, target_entry1, 1565 prev, next, 1566 bit_backwards, bit_forwards, irqclass); 1567 } 1568 1569 static const char *state_names[] = { 1570 #define LOCKDEP_STATE(__STATE) \ 1571 __stringify(__STATE), 1572 #include "lockdep_states.h" 1573 #undef LOCKDEP_STATE 1574 }; 1575 1576 static const char *state_rnames[] = { 1577 #define LOCKDEP_STATE(__STATE) \ 1578 __stringify(__STATE)"-READ", 1579 #include "lockdep_states.h" 1580 #undef LOCKDEP_STATE 1581 }; 1582 1583 static inline const char *state_name(enum lock_usage_bit bit) 1584 { 1585 return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2]; 1586 } 1587 1588 static int exclusive_bit(int new_bit) 1589 { 1590 /* 1591 * USED_IN 1592 * USED_IN_READ 1593 * ENABLED 1594 * ENABLED_READ 1595 * 1596 * bit 0 - write/read 1597 * bit 1 - used_in/enabled 1598 * bit 2+ state 1599 */ 1600 1601 int state = new_bit & ~3; 1602 int dir = new_bit & 2; 1603 1604 /* 1605 * keep state, bit flip the direction and strip read. 1606 */ 1607 return state | (dir ^ 2); 1608 } 1609 1610 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, 1611 struct held_lock *next, enum lock_usage_bit bit) 1612 { 1613 /* 1614 * Prove that the new dependency does not connect a hardirq-safe 1615 * lock with a hardirq-unsafe lock - to achieve this we search 1616 * the backwards-subgraph starting at <prev>, and the 1617 * forwards-subgraph starting at <next>: 1618 */ 1619 if (!check_usage(curr, prev, next, bit, 1620 exclusive_bit(bit), state_name(bit))) 1621 return 0; 1622 1623 bit++; /* _READ */ 1624 1625 /* 1626 * Prove that the new dependency does not connect a hardirq-safe-read 1627 * lock with a hardirq-unsafe lock - to achieve this we search 1628 * the backwards-subgraph starting at <prev>, and the 1629 * forwards-subgraph starting at <next>: 1630 */ 1631 if (!check_usage(curr, prev, next, bit, 1632 exclusive_bit(bit), state_name(bit))) 1633 return 0; 1634 1635 return 1; 1636 } 1637 1638 static int 1639 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, 1640 struct held_lock *next) 1641 { 1642 #define LOCKDEP_STATE(__STATE) \ 1643 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \ 1644 return 0; 1645 #include "lockdep_states.h" 1646 #undef LOCKDEP_STATE 1647 1648 return 1; 1649 } 1650 1651 static void inc_chains(void) 1652 { 1653 if (current->hardirq_context) 1654 nr_hardirq_chains++; 1655 else { 1656 if (current->softirq_context) 1657 nr_softirq_chains++; 1658 else 1659 nr_process_chains++; 1660 } 1661 } 1662 1663 #else 1664 1665 static inline int 1666 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, 1667 struct held_lock *next) 1668 { 1669 return 1; 1670 } 1671 1672 static inline void inc_chains(void) 1673 { 1674 nr_process_chains++; 1675 } 1676 1677 #endif 1678 1679 static void 1680 print_deadlock_scenario(struct held_lock *nxt, 1681 struct held_lock *prv) 1682 { 1683 struct lock_class *next = hlock_class(nxt); 1684 struct lock_class *prev = hlock_class(prv); 1685 1686 printk(" Possible unsafe locking scenario:\n\n"); 1687 printk(" CPU0\n"); 1688 printk(" ----\n"); 1689 printk(" lock("); 1690 __print_lock_name(prev); 1691 printk(");\n"); 1692 printk(" lock("); 1693 __print_lock_name(next); 1694 printk(");\n"); 1695 printk("\n *** DEADLOCK ***\n\n"); 1696 printk(" May be due to missing lock nesting notation\n\n"); 1697 } 1698 1699 static int 1700 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, 1701 struct held_lock *next) 1702 { 1703 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1704 return 0; 1705 1706 printk("\n"); 1707 printk("=============================================\n"); 1708 printk("[ INFO: possible recursive locking detected ]\n"); 1709 print_kernel_ident(); 1710 printk("---------------------------------------------\n"); 1711 printk("%s/%d is trying to acquire lock:\n", 1712 curr->comm, task_pid_nr(curr)); 1713 print_lock(next); 1714 printk("\nbut task is already holding lock:\n"); 1715 print_lock(prev); 1716 1717 printk("\nother info that might help us debug this:\n"); 1718 print_deadlock_scenario(next, prev); 1719 lockdep_print_held_locks(curr); 1720 1721 printk("\nstack backtrace:\n"); 1722 dump_stack(); 1723 1724 return 0; 1725 } 1726 1727 /* 1728 * Check whether we are holding such a class already. 1729 * 1730 * (Note that this has to be done separately, because the graph cannot 1731 * detect such classes of deadlocks.) 1732 * 1733 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read 1734 */ 1735 static int 1736 check_deadlock(struct task_struct *curr, struct held_lock *next, 1737 struct lockdep_map *next_instance, int read) 1738 { 1739 struct held_lock *prev; 1740 struct held_lock *nest = NULL; 1741 int i; 1742 1743 for (i = 0; i < curr->lockdep_depth; i++) { 1744 prev = curr->held_locks + i; 1745 1746 if (prev->instance == next->nest_lock) 1747 nest = prev; 1748 1749 if (hlock_class(prev) != hlock_class(next)) 1750 continue; 1751 1752 /* 1753 * Allow read-after-read recursion of the same 1754 * lock class (i.e. read_lock(lock)+read_lock(lock)): 1755 */ 1756 if ((read == 2) && prev->read) 1757 return 2; 1758 1759 /* 1760 * We're holding the nest_lock, which serializes this lock's 1761 * nesting behaviour. 1762 */ 1763 if (nest) 1764 return 2; 1765 1766 return print_deadlock_bug(curr, prev, next); 1767 } 1768 return 1; 1769 } 1770 1771 /* 1772 * There was a chain-cache miss, and we are about to add a new dependency 1773 * to a previous lock. We recursively validate the following rules: 1774 * 1775 * - would the adding of the <prev> -> <next> dependency create a 1776 * circular dependency in the graph? [== circular deadlock] 1777 * 1778 * - does the new prev->next dependency connect any hardirq-safe lock 1779 * (in the full backwards-subgraph starting at <prev>) with any 1780 * hardirq-unsafe lock (in the full forwards-subgraph starting at 1781 * <next>)? [== illegal lock inversion with hardirq contexts] 1782 * 1783 * - does the new prev->next dependency connect any softirq-safe lock 1784 * (in the full backwards-subgraph starting at <prev>) with any 1785 * softirq-unsafe lock (in the full forwards-subgraph starting at 1786 * <next>)? [== illegal lock inversion with softirq contexts] 1787 * 1788 * any of these scenarios could lead to a deadlock. 1789 * 1790 * Then if all the validations pass, we add the forwards and backwards 1791 * dependency. 1792 */ 1793 static int 1794 check_prev_add(struct task_struct *curr, struct held_lock *prev, 1795 struct held_lock *next, int distance, int *stack_saved) 1796 { 1797 struct lock_list *entry; 1798 int ret; 1799 struct lock_list this; 1800 struct lock_list *uninitialized_var(target_entry); 1801 /* 1802 * Static variable, serialized by the graph_lock(). 1803 * 1804 * We use this static variable to save the stack trace in case 1805 * we call into this function multiple times due to encountering 1806 * trylocks in the held lock stack. 1807 */ 1808 static struct stack_trace trace; 1809 1810 /* 1811 * Prove that the new <prev> -> <next> dependency would not 1812 * create a circular dependency in the graph. (We do this by 1813 * forward-recursing into the graph starting at <next>, and 1814 * checking whether we can reach <prev>.) 1815 * 1816 * We are using global variables to control the recursion, to 1817 * keep the stackframe size of the recursive functions low: 1818 */ 1819 this.class = hlock_class(next); 1820 this.parent = NULL; 1821 ret = check_noncircular(&this, hlock_class(prev), &target_entry); 1822 if (unlikely(!ret)) 1823 return print_circular_bug(&this, target_entry, next, prev); 1824 else if (unlikely(ret < 0)) 1825 return print_bfs_bug(ret); 1826 1827 if (!check_prev_add_irq(curr, prev, next)) 1828 return 0; 1829 1830 /* 1831 * For recursive read-locks we do all the dependency checks, 1832 * but we dont store read-triggered dependencies (only 1833 * write-triggered dependencies). This ensures that only the 1834 * write-side dependencies matter, and that if for example a 1835 * write-lock never takes any other locks, then the reads are 1836 * equivalent to a NOP. 1837 */ 1838 if (next->read == 2 || prev->read == 2) 1839 return 1; 1840 /* 1841 * Is the <prev> -> <next> dependency already present? 1842 * 1843 * (this may occur even though this is a new chain: consider 1844 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3 1845 * chains - the second one will be new, but L1 already has 1846 * L2 added to its dependency list, due to the first chain.) 1847 */ 1848 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) { 1849 if (entry->class == hlock_class(next)) { 1850 if (distance == 1) 1851 entry->distance = 1; 1852 return 2; 1853 } 1854 } 1855 1856 if (!*stack_saved) { 1857 if (!save_trace(&trace)) 1858 return 0; 1859 *stack_saved = 1; 1860 } 1861 1862 /* 1863 * Ok, all validations passed, add the new lock 1864 * to the previous lock's dependency list: 1865 */ 1866 ret = add_lock_to_list(hlock_class(prev), hlock_class(next), 1867 &hlock_class(prev)->locks_after, 1868 next->acquire_ip, distance, &trace); 1869 1870 if (!ret) 1871 return 0; 1872 1873 ret = add_lock_to_list(hlock_class(next), hlock_class(prev), 1874 &hlock_class(next)->locks_before, 1875 next->acquire_ip, distance, &trace); 1876 if (!ret) 1877 return 0; 1878 1879 /* 1880 * Debugging printouts: 1881 */ 1882 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { 1883 /* We drop graph lock, so another thread can overwrite trace. */ 1884 *stack_saved = 0; 1885 graph_unlock(); 1886 printk("\n new dependency: "); 1887 print_lock_name(hlock_class(prev)); 1888 printk(" => "); 1889 print_lock_name(hlock_class(next)); 1890 printk("\n"); 1891 dump_stack(); 1892 return graph_lock(); 1893 } 1894 return 1; 1895 } 1896 1897 /* 1898 * Add the dependency to all directly-previous locks that are 'relevant'. 1899 * The ones that are relevant are (in increasing distance from curr): 1900 * all consecutive trylock entries and the final non-trylock entry - or 1901 * the end of this context's lock-chain - whichever comes first. 1902 */ 1903 static int 1904 check_prevs_add(struct task_struct *curr, struct held_lock *next) 1905 { 1906 int depth = curr->lockdep_depth; 1907 int stack_saved = 0; 1908 struct held_lock *hlock; 1909 1910 /* 1911 * Debugging checks. 1912 * 1913 * Depth must not be zero for a non-head lock: 1914 */ 1915 if (!depth) 1916 goto out_bug; 1917 /* 1918 * At least two relevant locks must exist for this 1919 * to be a head: 1920 */ 1921 if (curr->held_locks[depth].irq_context != 1922 curr->held_locks[depth-1].irq_context) 1923 goto out_bug; 1924 1925 for (;;) { 1926 int distance = curr->lockdep_depth - depth + 1; 1927 hlock = curr->held_locks + depth - 1; 1928 /* 1929 * Only non-recursive-read entries get new dependencies 1930 * added: 1931 */ 1932 if (hlock->read != 2 && hlock->check) { 1933 if (!check_prev_add(curr, hlock, next, 1934 distance, &stack_saved)) 1935 return 0; 1936 /* 1937 * Stop after the first non-trylock entry, 1938 * as non-trylock entries have added their 1939 * own direct dependencies already, so this 1940 * lock is connected to them indirectly: 1941 */ 1942 if (!hlock->trylock) 1943 break; 1944 } 1945 depth--; 1946 /* 1947 * End of lock-stack? 1948 */ 1949 if (!depth) 1950 break; 1951 /* 1952 * Stop the search if we cross into another context: 1953 */ 1954 if (curr->held_locks[depth].irq_context != 1955 curr->held_locks[depth-1].irq_context) 1956 break; 1957 } 1958 return 1; 1959 out_bug: 1960 if (!debug_locks_off_graph_unlock()) 1961 return 0; 1962 1963 /* 1964 * Clearly we all shouldn't be here, but since we made it we 1965 * can reliable say we messed up our state. See the above two 1966 * gotos for reasons why we could possibly end up here. 1967 */ 1968 WARN_ON(1); 1969 1970 return 0; 1971 } 1972 1973 unsigned long nr_lock_chains; 1974 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; 1975 int nr_chain_hlocks; 1976 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]; 1977 1978 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i) 1979 { 1980 return lock_classes + chain_hlocks[chain->base + i]; 1981 } 1982 1983 /* 1984 * Returns the index of the first held_lock of the current chain 1985 */ 1986 static inline int get_first_held_lock(struct task_struct *curr, 1987 struct held_lock *hlock) 1988 { 1989 int i; 1990 struct held_lock *hlock_curr; 1991 1992 for (i = curr->lockdep_depth - 1; i >= 0; i--) { 1993 hlock_curr = curr->held_locks + i; 1994 if (hlock_curr->irq_context != hlock->irq_context) 1995 break; 1996 1997 } 1998 1999 return ++i; 2000 } 2001 2002 /* 2003 * Checks whether the chain and the current held locks are consistent 2004 * in depth and also in content. If they are not it most likely means 2005 * that there was a collision during the calculation of the chain_key. 2006 * Returns: 0 not passed, 1 passed 2007 */ 2008 static int check_no_collision(struct task_struct *curr, 2009 struct held_lock *hlock, 2010 struct lock_chain *chain) 2011 { 2012 #ifdef CONFIG_DEBUG_LOCKDEP 2013 int i, j, id; 2014 2015 i = get_first_held_lock(curr, hlock); 2016 2017 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) 2018 return 0; 2019 2020 for (j = 0; j < chain->depth - 1; j++, i++) { 2021 id = curr->held_locks[i].class_idx - 1; 2022 2023 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) 2024 return 0; 2025 } 2026 #endif 2027 return 1; 2028 } 2029 2030 /* 2031 * Look up a dependency chain. If the key is not present yet then 2032 * add it and return 1 - in this case the new dependency chain is 2033 * validated. If the key is already hashed, return 0. 2034 * (On return with 1 graph_lock is held.) 2035 */ 2036 static inline int lookup_chain_cache(struct task_struct *curr, 2037 struct held_lock *hlock, 2038 u64 chain_key) 2039 { 2040 struct lock_class *class = hlock_class(hlock); 2041 struct hlist_head *hash_head = chainhashentry(chain_key); 2042 struct lock_chain *chain; 2043 int i, j; 2044 2045 /* 2046 * We might need to take the graph lock, ensure we've got IRQs 2047 * disabled to make this an IRQ-safe lock.. for recursion reasons 2048 * lockdep won't complain about its own locking errors. 2049 */ 2050 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2051 return 0; 2052 /* 2053 * We can walk it lock-free, because entries only get added 2054 * to the hash: 2055 */ 2056 hlist_for_each_entry_rcu(chain, hash_head, entry) { 2057 if (chain->chain_key == chain_key) { 2058 cache_hit: 2059 debug_atomic_inc(chain_lookup_hits); 2060 if (!check_no_collision(curr, hlock, chain)) 2061 return 0; 2062 2063 if (very_verbose(class)) 2064 printk("\nhash chain already cached, key: " 2065 "%016Lx tail class: [%p] %s\n", 2066 (unsigned long long)chain_key, 2067 class->key, class->name); 2068 return 0; 2069 } 2070 } 2071 if (very_verbose(class)) 2072 printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n", 2073 (unsigned long long)chain_key, class->key, class->name); 2074 /* 2075 * Allocate a new chain entry from the static array, and add 2076 * it to the hash: 2077 */ 2078 if (!graph_lock()) 2079 return 0; 2080 /* 2081 * We have to walk the chain again locked - to avoid duplicates: 2082 */ 2083 hlist_for_each_entry(chain, hash_head, entry) { 2084 if (chain->chain_key == chain_key) { 2085 graph_unlock(); 2086 goto cache_hit; 2087 } 2088 } 2089 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { 2090 if (!debug_locks_off_graph_unlock()) 2091 return 0; 2092 2093 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!"); 2094 dump_stack(); 2095 return 0; 2096 } 2097 chain = lock_chains + nr_lock_chains++; 2098 chain->chain_key = chain_key; 2099 chain->irq_context = hlock->irq_context; 2100 i = get_first_held_lock(curr, hlock); 2101 chain->depth = curr->lockdep_depth + 1 - i; 2102 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { 2103 chain->base = nr_chain_hlocks; 2104 nr_chain_hlocks += chain->depth; 2105 for (j = 0; j < chain->depth - 1; j++, i++) { 2106 int lock_id = curr->held_locks[i].class_idx - 1; 2107 chain_hlocks[chain->base + j] = lock_id; 2108 } 2109 chain_hlocks[chain->base + j] = class - lock_classes; 2110 } 2111 hlist_add_head_rcu(&chain->entry, hash_head); 2112 debug_atomic_inc(chain_lookup_misses); 2113 inc_chains(); 2114 2115 return 1; 2116 } 2117 2118 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, 2119 struct held_lock *hlock, int chain_head, u64 chain_key) 2120 { 2121 /* 2122 * Trylock needs to maintain the stack of held locks, but it 2123 * does not add new dependencies, because trylock can be done 2124 * in any order. 2125 * 2126 * We look up the chain_key and do the O(N^2) check and update of 2127 * the dependencies only if this is a new dependency chain. 2128 * (If lookup_chain_cache() returns with 1 it acquires 2129 * graph_lock for us) 2130 */ 2131 if (!hlock->trylock && hlock->check && 2132 lookup_chain_cache(curr, hlock, chain_key)) { 2133 /* 2134 * Check whether last held lock: 2135 * 2136 * - is irq-safe, if this lock is irq-unsafe 2137 * - is softirq-safe, if this lock is hardirq-unsafe 2138 * 2139 * And check whether the new lock's dependency graph 2140 * could lead back to the previous lock. 2141 * 2142 * any of these scenarios could lead to a deadlock. If 2143 * All validations 2144 */ 2145 int ret = check_deadlock(curr, hlock, lock, hlock->read); 2146 2147 if (!ret) 2148 return 0; 2149 /* 2150 * Mark recursive read, as we jump over it when 2151 * building dependencies (just like we jump over 2152 * trylock entries): 2153 */ 2154 if (ret == 2) 2155 hlock->read = 2; 2156 /* 2157 * Add dependency only if this lock is not the head 2158 * of the chain, and if it's not a secondary read-lock: 2159 */ 2160 if (!chain_head && ret != 2) 2161 if (!check_prevs_add(curr, hlock)) 2162 return 0; 2163 graph_unlock(); 2164 } else 2165 /* after lookup_chain_cache(): */ 2166 if (unlikely(!debug_locks)) 2167 return 0; 2168 2169 return 1; 2170 } 2171 #else 2172 static inline int validate_chain(struct task_struct *curr, 2173 struct lockdep_map *lock, struct held_lock *hlock, 2174 int chain_head, u64 chain_key) 2175 { 2176 return 1; 2177 } 2178 #endif 2179 2180 /* 2181 * We are building curr_chain_key incrementally, so double-check 2182 * it from scratch, to make sure that it's done correctly: 2183 */ 2184 static void check_chain_key(struct task_struct *curr) 2185 { 2186 #ifdef CONFIG_DEBUG_LOCKDEP 2187 struct held_lock *hlock, *prev_hlock = NULL; 2188 unsigned int i; 2189 u64 chain_key = 0; 2190 2191 for (i = 0; i < curr->lockdep_depth; i++) { 2192 hlock = curr->held_locks + i; 2193 if (chain_key != hlock->prev_chain_key) { 2194 debug_locks_off(); 2195 /* 2196 * We got mighty confused, our chain keys don't match 2197 * with what we expect, someone trample on our task state? 2198 */ 2199 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", 2200 curr->lockdep_depth, i, 2201 (unsigned long long)chain_key, 2202 (unsigned long long)hlock->prev_chain_key); 2203 return; 2204 } 2205 /* 2206 * Whoops ran out of static storage again? 2207 */ 2208 if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS)) 2209 return; 2210 2211 if (prev_hlock && (prev_hlock->irq_context != 2212 hlock->irq_context)) 2213 chain_key = 0; 2214 chain_key = iterate_chain_key(chain_key, hlock->class_idx); 2215 prev_hlock = hlock; 2216 } 2217 if (chain_key != curr->curr_chain_key) { 2218 debug_locks_off(); 2219 /* 2220 * More smoking hash instead of calculating it, damn see these 2221 * numbers float.. I bet that a pink elephant stepped on my memory. 2222 */ 2223 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", 2224 curr->lockdep_depth, i, 2225 (unsigned long long)chain_key, 2226 (unsigned long long)curr->curr_chain_key); 2227 } 2228 #endif 2229 } 2230 2231 static void 2232 print_usage_bug_scenario(struct held_lock *lock) 2233 { 2234 struct lock_class *class = hlock_class(lock); 2235 2236 printk(" Possible unsafe locking scenario:\n\n"); 2237 printk(" CPU0\n"); 2238 printk(" ----\n"); 2239 printk(" lock("); 2240 __print_lock_name(class); 2241 printk(");\n"); 2242 printk(" <Interrupt>\n"); 2243 printk(" lock("); 2244 __print_lock_name(class); 2245 printk(");\n"); 2246 printk("\n *** DEADLOCK ***\n\n"); 2247 } 2248 2249 static int 2250 print_usage_bug(struct task_struct *curr, struct held_lock *this, 2251 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) 2252 { 2253 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 2254 return 0; 2255 2256 printk("\n"); 2257 printk("=================================\n"); 2258 printk("[ INFO: inconsistent lock state ]\n"); 2259 print_kernel_ident(); 2260 printk("---------------------------------\n"); 2261 2262 printk("inconsistent {%s} -> {%s} usage.\n", 2263 usage_str[prev_bit], usage_str[new_bit]); 2264 2265 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", 2266 curr->comm, task_pid_nr(curr), 2267 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, 2268 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, 2269 trace_hardirqs_enabled(curr), 2270 trace_softirqs_enabled(curr)); 2271 print_lock(this); 2272 2273 printk("{%s} state was registered at:\n", usage_str[prev_bit]); 2274 print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); 2275 2276 print_irqtrace_events(curr); 2277 printk("\nother info that might help us debug this:\n"); 2278 print_usage_bug_scenario(this); 2279 2280 lockdep_print_held_locks(curr); 2281 2282 printk("\nstack backtrace:\n"); 2283 dump_stack(); 2284 2285 return 0; 2286 } 2287 2288 /* 2289 * Print out an error if an invalid bit is set: 2290 */ 2291 static inline int 2292 valid_state(struct task_struct *curr, struct held_lock *this, 2293 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) 2294 { 2295 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) 2296 return print_usage_bug(curr, this, bad_bit, new_bit); 2297 return 1; 2298 } 2299 2300 static int mark_lock(struct task_struct *curr, struct held_lock *this, 2301 enum lock_usage_bit new_bit); 2302 2303 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 2304 2305 /* 2306 * print irq inversion bug: 2307 */ 2308 static int 2309 print_irq_inversion_bug(struct task_struct *curr, 2310 struct lock_list *root, struct lock_list *other, 2311 struct held_lock *this, int forwards, 2312 const char *irqclass) 2313 { 2314 struct lock_list *entry = other; 2315 struct lock_list *middle = NULL; 2316 int depth; 2317 2318 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 2319 return 0; 2320 2321 printk("\n"); 2322 printk("=========================================================\n"); 2323 printk("[ INFO: possible irq lock inversion dependency detected ]\n"); 2324 print_kernel_ident(); 2325 printk("---------------------------------------------------------\n"); 2326 printk("%s/%d just changed the state of lock:\n", 2327 curr->comm, task_pid_nr(curr)); 2328 print_lock(this); 2329 if (forwards) 2330 printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass); 2331 else 2332 printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); 2333 print_lock_name(other->class); 2334 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); 2335 2336 printk("\nother info that might help us debug this:\n"); 2337 2338 /* Find a middle lock (if one exists) */ 2339 depth = get_lock_depth(other); 2340 do { 2341 if (depth == 0 && (entry != root)) { 2342 printk("lockdep:%s bad path found in chain graph\n", __func__); 2343 break; 2344 } 2345 middle = entry; 2346 entry = get_lock_parent(entry); 2347 depth--; 2348 } while (entry && entry != root && (depth >= 0)); 2349 if (forwards) 2350 print_irq_lock_scenario(root, other, 2351 middle ? middle->class : root->class, other->class); 2352 else 2353 print_irq_lock_scenario(other, root, 2354 middle ? middle->class : other->class, root->class); 2355 2356 lockdep_print_held_locks(curr); 2357 2358 printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); 2359 if (!save_trace(&root->trace)) 2360 return 0; 2361 print_shortest_lock_dependencies(other, root); 2362 2363 printk("\nstack backtrace:\n"); 2364 dump_stack(); 2365 2366 return 0; 2367 } 2368 2369 /* 2370 * Prove that in the forwards-direction subgraph starting at <this> 2371 * there is no lock matching <mask>: 2372 */ 2373 static int 2374 check_usage_forwards(struct task_struct *curr, struct held_lock *this, 2375 enum lock_usage_bit bit, const char *irqclass) 2376 { 2377 int ret; 2378 struct lock_list root; 2379 struct lock_list *uninitialized_var(target_entry); 2380 2381 root.parent = NULL; 2382 root.class = hlock_class(this); 2383 ret = find_usage_forwards(&root, bit, &target_entry); 2384 if (ret < 0) 2385 return print_bfs_bug(ret); 2386 if (ret == 1) 2387 return ret; 2388 2389 return print_irq_inversion_bug(curr, &root, target_entry, 2390 this, 1, irqclass); 2391 } 2392 2393 /* 2394 * Prove that in the backwards-direction subgraph starting at <this> 2395 * there is no lock matching <mask>: 2396 */ 2397 static int 2398 check_usage_backwards(struct task_struct *curr, struct held_lock *this, 2399 enum lock_usage_bit bit, const char *irqclass) 2400 { 2401 int ret; 2402 struct lock_list root; 2403 struct lock_list *uninitialized_var(target_entry); 2404 2405 root.parent = NULL; 2406 root.class = hlock_class(this); 2407 ret = find_usage_backwards(&root, bit, &target_entry); 2408 if (ret < 0) 2409 return print_bfs_bug(ret); 2410 if (ret == 1) 2411 return ret; 2412 2413 return print_irq_inversion_bug(curr, &root, target_entry, 2414 this, 0, irqclass); 2415 } 2416 2417 void print_irqtrace_events(struct task_struct *curr) 2418 { 2419 printk("irq event stamp: %u\n", curr->irq_events); 2420 printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event); 2421 print_ip_sym(curr->hardirq_enable_ip); 2422 printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event); 2423 print_ip_sym(curr->hardirq_disable_ip); 2424 printk("softirqs last enabled at (%u): ", curr->softirq_enable_event); 2425 print_ip_sym(curr->softirq_enable_ip); 2426 printk("softirqs last disabled at (%u): ", curr->softirq_disable_event); 2427 print_ip_sym(curr->softirq_disable_ip); 2428 } 2429 2430 static int HARDIRQ_verbose(struct lock_class *class) 2431 { 2432 #if HARDIRQ_VERBOSE 2433 return class_filter(class); 2434 #endif 2435 return 0; 2436 } 2437 2438 static int SOFTIRQ_verbose(struct lock_class *class) 2439 { 2440 #if SOFTIRQ_VERBOSE 2441 return class_filter(class); 2442 #endif 2443 return 0; 2444 } 2445 2446 static int RECLAIM_FS_verbose(struct lock_class *class) 2447 { 2448 #if RECLAIM_VERBOSE 2449 return class_filter(class); 2450 #endif 2451 return 0; 2452 } 2453 2454 #define STRICT_READ_CHECKS 1 2455 2456 static int (*state_verbose_f[])(struct lock_class *class) = { 2457 #define LOCKDEP_STATE(__STATE) \ 2458 __STATE##_verbose, 2459 #include "lockdep_states.h" 2460 #undef LOCKDEP_STATE 2461 }; 2462 2463 static inline int state_verbose(enum lock_usage_bit bit, 2464 struct lock_class *class) 2465 { 2466 return state_verbose_f[bit >> 2](class); 2467 } 2468 2469 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, 2470 enum lock_usage_bit bit, const char *name); 2471 2472 static int 2473 mark_lock_irq(struct task_struct *curr, struct held_lock *this, 2474 enum lock_usage_bit new_bit) 2475 { 2476 int excl_bit = exclusive_bit(new_bit); 2477 int read = new_bit & 1; 2478 int dir = new_bit & 2; 2479 2480 /* 2481 * mark USED_IN has to look forwards -- to ensure no dependency 2482 * has ENABLED state, which would allow recursion deadlocks. 2483 * 2484 * mark ENABLED has to look backwards -- to ensure no dependee 2485 * has USED_IN state, which, again, would allow recursion deadlocks. 2486 */ 2487 check_usage_f usage = dir ? 2488 check_usage_backwards : check_usage_forwards; 2489 2490 /* 2491 * Validate that this particular lock does not have conflicting 2492 * usage states. 2493 */ 2494 if (!valid_state(curr, this, new_bit, excl_bit)) 2495 return 0; 2496 2497 /* 2498 * Validate that the lock dependencies don't have conflicting usage 2499 * states. 2500 */ 2501 if ((!read || !dir || STRICT_READ_CHECKS) && 2502 !usage(curr, this, excl_bit, state_name(new_bit & ~1))) 2503 return 0; 2504 2505 /* 2506 * Check for read in write conflicts 2507 */ 2508 if (!read) { 2509 if (!valid_state(curr, this, new_bit, excl_bit + 1)) 2510 return 0; 2511 2512 if (STRICT_READ_CHECKS && 2513 !usage(curr, this, excl_bit + 1, 2514 state_name(new_bit + 1))) 2515 return 0; 2516 } 2517 2518 if (state_verbose(new_bit, hlock_class(this))) 2519 return 2; 2520 2521 return 1; 2522 } 2523 2524 enum mark_type { 2525 #define LOCKDEP_STATE(__STATE) __STATE, 2526 #include "lockdep_states.h" 2527 #undef LOCKDEP_STATE 2528 }; 2529 2530 /* 2531 * Mark all held locks with a usage bit: 2532 */ 2533 static int 2534 mark_held_locks(struct task_struct *curr, enum mark_type mark) 2535 { 2536 enum lock_usage_bit usage_bit; 2537 struct held_lock *hlock; 2538 int i; 2539 2540 for (i = 0; i < curr->lockdep_depth; i++) { 2541 hlock = curr->held_locks + i; 2542 2543 usage_bit = 2 + (mark << 2); /* ENABLED */ 2544 if (hlock->read) 2545 usage_bit += 1; /* READ */ 2546 2547 BUG_ON(usage_bit >= LOCK_USAGE_STATES); 2548 2549 if (!hlock->check) 2550 continue; 2551 2552 if (!mark_lock(curr, hlock, usage_bit)) 2553 return 0; 2554 } 2555 2556 return 1; 2557 } 2558 2559 /* 2560 * Hardirqs will be enabled: 2561 */ 2562 static void __trace_hardirqs_on_caller(unsigned long ip) 2563 { 2564 struct task_struct *curr = current; 2565 2566 /* we'll do an OFF -> ON transition: */ 2567 curr->hardirqs_enabled = 1; 2568 2569 /* 2570 * We are going to turn hardirqs on, so set the 2571 * usage bit for all held locks: 2572 */ 2573 if (!mark_held_locks(curr, HARDIRQ)) 2574 return; 2575 /* 2576 * If we have softirqs enabled, then set the usage 2577 * bit for all held locks. (disabled hardirqs prevented 2578 * this bit from being set before) 2579 */ 2580 if (curr->softirqs_enabled) 2581 if (!mark_held_locks(curr, SOFTIRQ)) 2582 return; 2583 2584 curr->hardirq_enable_ip = ip; 2585 curr->hardirq_enable_event = ++curr->irq_events; 2586 debug_atomic_inc(hardirqs_on_events); 2587 } 2588 2589 __visible void trace_hardirqs_on_caller(unsigned long ip) 2590 { 2591 time_hardirqs_on(CALLER_ADDR0, ip); 2592 2593 if (unlikely(!debug_locks || current->lockdep_recursion)) 2594 return; 2595 2596 if (unlikely(current->hardirqs_enabled)) { 2597 /* 2598 * Neither irq nor preemption are disabled here 2599 * so this is racy by nature but losing one hit 2600 * in a stat is not a big deal. 2601 */ 2602 __debug_atomic_inc(redundant_hardirqs_on); 2603 return; 2604 } 2605 2606 /* 2607 * We're enabling irqs and according to our state above irqs weren't 2608 * already enabled, yet we find the hardware thinks they are in fact 2609 * enabled.. someone messed up their IRQ state tracing. 2610 */ 2611 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2612 return; 2613 2614 /* 2615 * See the fine text that goes along with this variable definition. 2616 */ 2617 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) 2618 return; 2619 2620 /* 2621 * Can't allow enabling interrupts while in an interrupt handler, 2622 * that's general bad form and such. Recursion, limited stack etc.. 2623 */ 2624 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) 2625 return; 2626 2627 current->lockdep_recursion = 1; 2628 __trace_hardirqs_on_caller(ip); 2629 current->lockdep_recursion = 0; 2630 } 2631 EXPORT_SYMBOL(trace_hardirqs_on_caller); 2632 2633 void trace_hardirqs_on(void) 2634 { 2635 trace_hardirqs_on_caller(CALLER_ADDR0); 2636 } 2637 EXPORT_SYMBOL(trace_hardirqs_on); 2638 2639 /* 2640 * Hardirqs were disabled: 2641 */ 2642 __visible void trace_hardirqs_off_caller(unsigned long ip) 2643 { 2644 struct task_struct *curr = current; 2645 2646 time_hardirqs_off(CALLER_ADDR0, ip); 2647 2648 if (unlikely(!debug_locks || current->lockdep_recursion)) 2649 return; 2650 2651 /* 2652 * So we're supposed to get called after you mask local IRQs, but for 2653 * some reason the hardware doesn't quite think you did a proper job. 2654 */ 2655 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2656 return; 2657 2658 if (curr->hardirqs_enabled) { 2659 /* 2660 * We have done an ON -> OFF transition: 2661 */ 2662 curr->hardirqs_enabled = 0; 2663 curr->hardirq_disable_ip = ip; 2664 curr->hardirq_disable_event = ++curr->irq_events; 2665 debug_atomic_inc(hardirqs_off_events); 2666 } else 2667 debug_atomic_inc(redundant_hardirqs_off); 2668 } 2669 EXPORT_SYMBOL(trace_hardirqs_off_caller); 2670 2671 void trace_hardirqs_off(void) 2672 { 2673 trace_hardirqs_off_caller(CALLER_ADDR0); 2674 } 2675 EXPORT_SYMBOL(trace_hardirqs_off); 2676 2677 /* 2678 * Softirqs will be enabled: 2679 */ 2680 void trace_softirqs_on(unsigned long ip) 2681 { 2682 struct task_struct *curr = current; 2683 2684 if (unlikely(!debug_locks || current->lockdep_recursion)) 2685 return; 2686 2687 /* 2688 * We fancy IRQs being disabled here, see softirq.c, avoids 2689 * funny state and nesting things. 2690 */ 2691 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2692 return; 2693 2694 if (curr->softirqs_enabled) { 2695 debug_atomic_inc(redundant_softirqs_on); 2696 return; 2697 } 2698 2699 current->lockdep_recursion = 1; 2700 /* 2701 * We'll do an OFF -> ON transition: 2702 */ 2703 curr->softirqs_enabled = 1; 2704 curr->softirq_enable_ip = ip; 2705 curr->softirq_enable_event = ++curr->irq_events; 2706 debug_atomic_inc(softirqs_on_events); 2707 /* 2708 * We are going to turn softirqs on, so set the 2709 * usage bit for all held locks, if hardirqs are 2710 * enabled too: 2711 */ 2712 if (curr->hardirqs_enabled) 2713 mark_held_locks(curr, SOFTIRQ); 2714 current->lockdep_recursion = 0; 2715 } 2716 2717 /* 2718 * Softirqs were disabled: 2719 */ 2720 void trace_softirqs_off(unsigned long ip) 2721 { 2722 struct task_struct *curr = current; 2723 2724 if (unlikely(!debug_locks || current->lockdep_recursion)) 2725 return; 2726 2727 /* 2728 * We fancy IRQs being disabled here, see softirq.c 2729 */ 2730 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2731 return; 2732 2733 if (curr->softirqs_enabled) { 2734 /* 2735 * We have done an ON -> OFF transition: 2736 */ 2737 curr->softirqs_enabled = 0; 2738 curr->softirq_disable_ip = ip; 2739 curr->softirq_disable_event = ++curr->irq_events; 2740 debug_atomic_inc(softirqs_off_events); 2741 /* 2742 * Whoops, we wanted softirqs off, so why aren't they? 2743 */ 2744 DEBUG_LOCKS_WARN_ON(!softirq_count()); 2745 } else 2746 debug_atomic_inc(redundant_softirqs_off); 2747 } 2748 2749 static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags) 2750 { 2751 struct task_struct *curr = current; 2752 2753 if (unlikely(!debug_locks)) 2754 return; 2755 2756 /* no reclaim without waiting on it */ 2757 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 2758 return; 2759 2760 /* this guy won't enter reclaim */ 2761 if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) 2762 return; 2763 2764 /* We're only interested __GFP_FS allocations for now */ 2765 if (!(gfp_mask & __GFP_FS)) 2766 return; 2767 2768 /* 2769 * Oi! Can't be having __GFP_FS allocations with IRQs disabled. 2770 */ 2771 if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags))) 2772 return; 2773 2774 mark_held_locks(curr, RECLAIM_FS); 2775 } 2776 2777 static void check_flags(unsigned long flags); 2778 2779 void lockdep_trace_alloc(gfp_t gfp_mask) 2780 { 2781 unsigned long flags; 2782 2783 if (unlikely(current->lockdep_recursion)) 2784 return; 2785 2786 raw_local_irq_save(flags); 2787 check_flags(flags); 2788 current->lockdep_recursion = 1; 2789 __lockdep_trace_alloc(gfp_mask, flags); 2790 current->lockdep_recursion = 0; 2791 raw_local_irq_restore(flags); 2792 } 2793 2794 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) 2795 { 2796 /* 2797 * If non-trylock use in a hardirq or softirq context, then 2798 * mark the lock as used in these contexts: 2799 */ 2800 if (!hlock->trylock) { 2801 if (hlock->read) { 2802 if (curr->hardirq_context) 2803 if (!mark_lock(curr, hlock, 2804 LOCK_USED_IN_HARDIRQ_READ)) 2805 return 0; 2806 if (curr->softirq_context) 2807 if (!mark_lock(curr, hlock, 2808 LOCK_USED_IN_SOFTIRQ_READ)) 2809 return 0; 2810 } else { 2811 if (curr->hardirq_context) 2812 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) 2813 return 0; 2814 if (curr->softirq_context) 2815 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ)) 2816 return 0; 2817 } 2818 } 2819 if (!hlock->hardirqs_off) { 2820 if (hlock->read) { 2821 if (!mark_lock(curr, hlock, 2822 LOCK_ENABLED_HARDIRQ_READ)) 2823 return 0; 2824 if (curr->softirqs_enabled) 2825 if (!mark_lock(curr, hlock, 2826 LOCK_ENABLED_SOFTIRQ_READ)) 2827 return 0; 2828 } else { 2829 if (!mark_lock(curr, hlock, 2830 LOCK_ENABLED_HARDIRQ)) 2831 return 0; 2832 if (curr->softirqs_enabled) 2833 if (!mark_lock(curr, hlock, 2834 LOCK_ENABLED_SOFTIRQ)) 2835 return 0; 2836 } 2837 } 2838 2839 /* 2840 * We reuse the irq context infrastructure more broadly as a general 2841 * context checking code. This tests GFP_FS recursion (a lock taken 2842 * during reclaim for a GFP_FS allocation is held over a GFP_FS 2843 * allocation). 2844 */ 2845 if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) { 2846 if (hlock->read) { 2847 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ)) 2848 return 0; 2849 } else { 2850 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS)) 2851 return 0; 2852 } 2853 } 2854 2855 return 1; 2856 } 2857 2858 static int separate_irq_context(struct task_struct *curr, 2859 struct held_lock *hlock) 2860 { 2861 unsigned int depth = curr->lockdep_depth; 2862 2863 /* 2864 * Keep track of points where we cross into an interrupt context: 2865 */ 2866 hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) + 2867 curr->softirq_context; 2868 if (depth) { 2869 struct held_lock *prev_hlock; 2870 2871 prev_hlock = curr->held_locks + depth-1; 2872 /* 2873 * If we cross into another context, reset the 2874 * hash key (this also prevents the checking and the 2875 * adding of the dependency to 'prev'): 2876 */ 2877 if (prev_hlock->irq_context != hlock->irq_context) 2878 return 1; 2879 } 2880 return 0; 2881 } 2882 2883 #else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ 2884 2885 static inline 2886 int mark_lock_irq(struct task_struct *curr, struct held_lock *this, 2887 enum lock_usage_bit new_bit) 2888 { 2889 WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */ 2890 return 1; 2891 } 2892 2893 static inline int mark_irqflags(struct task_struct *curr, 2894 struct held_lock *hlock) 2895 { 2896 return 1; 2897 } 2898 2899 static inline int separate_irq_context(struct task_struct *curr, 2900 struct held_lock *hlock) 2901 { 2902 return 0; 2903 } 2904 2905 void lockdep_trace_alloc(gfp_t gfp_mask) 2906 { 2907 } 2908 2909 #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ 2910 2911 /* 2912 * Mark a lock with a usage bit, and validate the state transition: 2913 */ 2914 static int mark_lock(struct task_struct *curr, struct held_lock *this, 2915 enum lock_usage_bit new_bit) 2916 { 2917 unsigned int new_mask = 1 << new_bit, ret = 1; 2918 2919 /* 2920 * If already set then do not dirty the cacheline, 2921 * nor do any checks: 2922 */ 2923 if (likely(hlock_class(this)->usage_mask & new_mask)) 2924 return 1; 2925 2926 if (!graph_lock()) 2927 return 0; 2928 /* 2929 * Make sure we didn't race: 2930 */ 2931 if (unlikely(hlock_class(this)->usage_mask & new_mask)) { 2932 graph_unlock(); 2933 return 1; 2934 } 2935 2936 hlock_class(this)->usage_mask |= new_mask; 2937 2938 if (!save_trace(hlock_class(this)->usage_traces + new_bit)) 2939 return 0; 2940 2941 switch (new_bit) { 2942 #define LOCKDEP_STATE(__STATE) \ 2943 case LOCK_USED_IN_##__STATE: \ 2944 case LOCK_USED_IN_##__STATE##_READ: \ 2945 case LOCK_ENABLED_##__STATE: \ 2946 case LOCK_ENABLED_##__STATE##_READ: 2947 #include "lockdep_states.h" 2948 #undef LOCKDEP_STATE 2949 ret = mark_lock_irq(curr, this, new_bit); 2950 if (!ret) 2951 return 0; 2952 break; 2953 case LOCK_USED: 2954 debug_atomic_dec(nr_unused_locks); 2955 break; 2956 default: 2957 if (!debug_locks_off_graph_unlock()) 2958 return 0; 2959 WARN_ON(1); 2960 return 0; 2961 } 2962 2963 graph_unlock(); 2964 2965 /* 2966 * We must printk outside of the graph_lock: 2967 */ 2968 if (ret == 2) { 2969 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]); 2970 print_lock(this); 2971 print_irqtrace_events(curr); 2972 dump_stack(); 2973 } 2974 2975 return ret; 2976 } 2977 2978 /* 2979 * Initialize a lock instance's lock-class mapping info: 2980 */ 2981 void lockdep_init_map(struct lockdep_map *lock, const char *name, 2982 struct lock_class_key *key, int subclass) 2983 { 2984 int i; 2985 2986 kmemcheck_mark_initialized(lock, sizeof(*lock)); 2987 2988 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) 2989 lock->class_cache[i] = NULL; 2990 2991 #ifdef CONFIG_LOCK_STAT 2992 lock->cpu = raw_smp_processor_id(); 2993 #endif 2994 2995 /* 2996 * Can't be having no nameless bastards around this place! 2997 */ 2998 if (DEBUG_LOCKS_WARN_ON(!name)) { 2999 lock->name = "NULL"; 3000 return; 3001 } 3002 3003 lock->name = name; 3004 3005 /* 3006 * No key, no joy, we need to hash something. 3007 */ 3008 if (DEBUG_LOCKS_WARN_ON(!key)) 3009 return; 3010 /* 3011 * Sanity check, the lock-class key must be persistent: 3012 */ 3013 if (!static_obj(key)) { 3014 printk("BUG: key %p not in .data!\n", key); 3015 /* 3016 * What it says above ^^^^^, I suggest you read it. 3017 */ 3018 DEBUG_LOCKS_WARN_ON(1); 3019 return; 3020 } 3021 lock->key = key; 3022 3023 if (unlikely(!debug_locks)) 3024 return; 3025 3026 if (subclass) { 3027 unsigned long flags; 3028 3029 if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion)) 3030 return; 3031 3032 raw_local_irq_save(flags); 3033 current->lockdep_recursion = 1; 3034 register_lock_class(lock, subclass, 1); 3035 current->lockdep_recursion = 0; 3036 raw_local_irq_restore(flags); 3037 } 3038 } 3039 EXPORT_SYMBOL_GPL(lockdep_init_map); 3040 3041 struct lock_class_key __lockdep_no_validate__; 3042 EXPORT_SYMBOL_GPL(__lockdep_no_validate__); 3043 3044 static int 3045 print_lock_nested_lock_not_held(struct task_struct *curr, 3046 struct held_lock *hlock, 3047 unsigned long ip) 3048 { 3049 if (!debug_locks_off()) 3050 return 0; 3051 if (debug_locks_silent) 3052 return 0; 3053 3054 printk("\n"); 3055 printk("==================================\n"); 3056 printk("[ BUG: Nested lock was not taken ]\n"); 3057 print_kernel_ident(); 3058 printk("----------------------------------\n"); 3059 3060 printk("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); 3061 print_lock(hlock); 3062 3063 printk("\nbut this task is not holding:\n"); 3064 printk("%s\n", hlock->nest_lock->name); 3065 3066 printk("\nstack backtrace:\n"); 3067 dump_stack(); 3068 3069 printk("\nother info that might help us debug this:\n"); 3070 lockdep_print_held_locks(curr); 3071 3072 printk("\nstack backtrace:\n"); 3073 dump_stack(); 3074 3075 return 0; 3076 } 3077 3078 static int __lock_is_held(struct lockdep_map *lock); 3079 3080 /* 3081 * This gets called for every mutex_lock*()/spin_lock*() operation. 3082 * We maintain the dependency maps and validate the locking attempt: 3083 */ 3084 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, 3085 int trylock, int read, int check, int hardirqs_off, 3086 struct lockdep_map *nest_lock, unsigned long ip, 3087 int references, int pin_count) 3088 { 3089 struct task_struct *curr = current; 3090 struct lock_class *class = NULL; 3091 struct held_lock *hlock; 3092 unsigned int depth; 3093 int chain_head = 0; 3094 int class_idx; 3095 u64 chain_key; 3096 3097 if (unlikely(!debug_locks)) 3098 return 0; 3099 3100 /* 3101 * Lockdep should run with IRQs disabled, otherwise we could 3102 * get an interrupt which would want to take locks, which would 3103 * end up in lockdep and have you got a head-ache already? 3104 */ 3105 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 3106 return 0; 3107 3108 if (!prove_locking || lock->key == &__lockdep_no_validate__) 3109 check = 0; 3110 3111 if (subclass < NR_LOCKDEP_CACHING_CLASSES) 3112 class = lock->class_cache[subclass]; 3113 /* 3114 * Not cached? 3115 */ 3116 if (unlikely(!class)) { 3117 class = register_lock_class(lock, subclass, 0); 3118 if (!class) 3119 return 0; 3120 } 3121 atomic_inc((atomic_t *)&class->ops); 3122 if (very_verbose(class)) { 3123 printk("\nacquire class [%p] %s", class->key, class->name); 3124 if (class->name_version > 1) 3125 printk("#%d", class->name_version); 3126 printk("\n"); 3127 dump_stack(); 3128 } 3129 3130 /* 3131 * Add the lock to the list of currently held locks. 3132 * (we dont increase the depth just yet, up until the 3133 * dependency checks are done) 3134 */ 3135 depth = curr->lockdep_depth; 3136 /* 3137 * Ran out of static storage for our per-task lock stack again have we? 3138 */ 3139 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) 3140 return 0; 3141 3142 class_idx = class - lock_classes + 1; 3143 3144 if (depth) { 3145 hlock = curr->held_locks + depth - 1; 3146 if (hlock->class_idx == class_idx && nest_lock) { 3147 if (hlock->references) 3148 hlock->references++; 3149 else 3150 hlock->references = 2; 3151 3152 return 1; 3153 } 3154 } 3155 3156 hlock = curr->held_locks + depth; 3157 /* 3158 * Plain impossible, we just registered it and checked it weren't no 3159 * NULL like.. I bet this mushroom I ate was good! 3160 */ 3161 if (DEBUG_LOCKS_WARN_ON(!class)) 3162 return 0; 3163 hlock->class_idx = class_idx; 3164 hlock->acquire_ip = ip; 3165 hlock->instance = lock; 3166 hlock->nest_lock = nest_lock; 3167 hlock->trylock = trylock; 3168 hlock->read = read; 3169 hlock->check = check; 3170 hlock->hardirqs_off = !!hardirqs_off; 3171 hlock->references = references; 3172 #ifdef CONFIG_LOCK_STAT 3173 hlock->waittime_stamp = 0; 3174 hlock->holdtime_stamp = lockstat_clock(); 3175 #endif 3176 hlock->pin_count = pin_count; 3177 3178 if (check && !mark_irqflags(curr, hlock)) 3179 return 0; 3180 3181 /* mark it as used: */ 3182 if (!mark_lock(curr, hlock, LOCK_USED)) 3183 return 0; 3184 3185 /* 3186 * Calculate the chain hash: it's the combined hash of all the 3187 * lock keys along the dependency chain. We save the hash value 3188 * at every step so that we can get the current hash easily 3189 * after unlock. The chain hash is then used to cache dependency 3190 * results. 3191 * 3192 * The 'key ID' is what is the most compact key value to drive 3193 * the hash, not class->key. 3194 */ 3195 /* 3196 * Whoops, we did it again.. ran straight out of our static allocation. 3197 */ 3198 if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS)) 3199 return 0; 3200 3201 chain_key = curr->curr_chain_key; 3202 if (!depth) { 3203 /* 3204 * How can we have a chain hash when we ain't got no keys?! 3205 */ 3206 if (DEBUG_LOCKS_WARN_ON(chain_key != 0)) 3207 return 0; 3208 chain_head = 1; 3209 } 3210 3211 hlock->prev_chain_key = chain_key; 3212 if (separate_irq_context(curr, hlock)) { 3213 chain_key = 0; 3214 chain_head = 1; 3215 } 3216 chain_key = iterate_chain_key(chain_key, class_idx); 3217 3218 if (nest_lock && !__lock_is_held(nest_lock)) 3219 return print_lock_nested_lock_not_held(curr, hlock, ip); 3220 3221 if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) 3222 return 0; 3223 3224 curr->curr_chain_key = chain_key; 3225 curr->lockdep_depth++; 3226 check_chain_key(curr); 3227 #ifdef CONFIG_DEBUG_LOCKDEP 3228 if (unlikely(!debug_locks)) 3229 return 0; 3230 #endif 3231 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { 3232 debug_locks_off(); 3233 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!"); 3234 printk(KERN_DEBUG "depth: %i max: %lu!\n", 3235 curr->lockdep_depth, MAX_LOCK_DEPTH); 3236 3237 lockdep_print_held_locks(current); 3238 debug_show_all_locks(); 3239 dump_stack(); 3240 3241 return 0; 3242 } 3243 3244 if (unlikely(curr->lockdep_depth > max_lockdep_depth)) 3245 max_lockdep_depth = curr->lockdep_depth; 3246 3247 return 1; 3248 } 3249 3250 static int 3251 print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock, 3252 unsigned long ip) 3253 { 3254 if (!debug_locks_off()) 3255 return 0; 3256 if (debug_locks_silent) 3257 return 0; 3258 3259 printk("\n"); 3260 printk("=====================================\n"); 3261 printk("[ BUG: bad unlock balance detected! ]\n"); 3262 print_kernel_ident(); 3263 printk("-------------------------------------\n"); 3264 printk("%s/%d is trying to release lock (", 3265 curr->comm, task_pid_nr(curr)); 3266 print_lockdep_cache(lock); 3267 printk(") at:\n"); 3268 print_ip_sym(ip); 3269 printk("but there are no more locks to release!\n"); 3270 printk("\nother info that might help us debug this:\n"); 3271 lockdep_print_held_locks(curr); 3272 3273 printk("\nstack backtrace:\n"); 3274 dump_stack(); 3275 3276 return 0; 3277 } 3278 3279 static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) 3280 { 3281 if (hlock->instance == lock) 3282 return 1; 3283 3284 if (hlock->references) { 3285 struct lock_class *class = lock->class_cache[0]; 3286 3287 if (!class) 3288 class = look_up_lock_class(lock, 0); 3289 3290 /* 3291 * If look_up_lock_class() failed to find a class, we're trying 3292 * to test if we hold a lock that has never yet been acquired. 3293 * Clearly if the lock hasn't been acquired _ever_, we're not 3294 * holding it either, so report failure. 3295 */ 3296 if (!class) 3297 return 0; 3298 3299 /* 3300 * References, but not a lock we're actually ref-counting? 3301 * State got messed up, follow the sites that change ->references 3302 * and try to make sense of it. 3303 */ 3304 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) 3305 return 0; 3306 3307 if (hlock->class_idx == class - lock_classes + 1) 3308 return 1; 3309 } 3310 3311 return 0; 3312 } 3313 3314 static int 3315 __lock_set_class(struct lockdep_map *lock, const char *name, 3316 struct lock_class_key *key, unsigned int subclass, 3317 unsigned long ip) 3318 { 3319 struct task_struct *curr = current; 3320 struct held_lock *hlock, *prev_hlock; 3321 struct lock_class *class; 3322 unsigned int depth; 3323 int i; 3324 3325 depth = curr->lockdep_depth; 3326 /* 3327 * This function is about (re)setting the class of a held lock, 3328 * yet we're not actually holding any locks. Naughty user! 3329 */ 3330 if (DEBUG_LOCKS_WARN_ON(!depth)) 3331 return 0; 3332 3333 prev_hlock = NULL; 3334 for (i = depth-1; i >= 0; i--) { 3335 hlock = curr->held_locks + i; 3336 /* 3337 * We must not cross into another context: 3338 */ 3339 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3340 break; 3341 if (match_held_lock(hlock, lock)) 3342 goto found_it; 3343 prev_hlock = hlock; 3344 } 3345 return print_unlock_imbalance_bug(curr, lock, ip); 3346 3347 found_it: 3348 lockdep_init_map(lock, name, key, 0); 3349 class = register_lock_class(lock, subclass, 0); 3350 hlock->class_idx = class - lock_classes + 1; 3351 3352 curr->lockdep_depth = i; 3353 curr->curr_chain_key = hlock->prev_chain_key; 3354 3355 for (; i < depth; i++) { 3356 hlock = curr->held_locks + i; 3357 if (!__lock_acquire(hlock->instance, 3358 hlock_class(hlock)->subclass, hlock->trylock, 3359 hlock->read, hlock->check, hlock->hardirqs_off, 3360 hlock->nest_lock, hlock->acquire_ip, 3361 hlock->references, hlock->pin_count)) 3362 return 0; 3363 } 3364 3365 /* 3366 * I took it apart and put it back together again, except now I have 3367 * these 'spare' parts.. where shall I put them. 3368 */ 3369 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) 3370 return 0; 3371 return 1; 3372 } 3373 3374 /* 3375 * Remove the lock to the list of currently held locks - this gets 3376 * called on mutex_unlock()/spin_unlock*() (or on a failed 3377 * mutex_lock_interruptible()). 3378 * 3379 * @nested is an hysterical artifact, needs a tree wide cleanup. 3380 */ 3381 static int 3382 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) 3383 { 3384 struct task_struct *curr = current; 3385 struct held_lock *hlock, *prev_hlock; 3386 unsigned int depth; 3387 int i; 3388 3389 if (unlikely(!debug_locks)) 3390 return 0; 3391 3392 depth = curr->lockdep_depth; 3393 /* 3394 * So we're all set to release this lock.. wait what lock? We don't 3395 * own any locks, you've been drinking again? 3396 */ 3397 if (DEBUG_LOCKS_WARN_ON(depth <= 0)) 3398 return print_unlock_imbalance_bug(curr, lock, ip); 3399 3400 /* 3401 * Check whether the lock exists in the current stack 3402 * of held locks: 3403 */ 3404 prev_hlock = NULL; 3405 for (i = depth-1; i >= 0; i--) { 3406 hlock = curr->held_locks + i; 3407 /* 3408 * We must not cross into another context: 3409 */ 3410 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3411 break; 3412 if (match_held_lock(hlock, lock)) 3413 goto found_it; 3414 prev_hlock = hlock; 3415 } 3416 return print_unlock_imbalance_bug(curr, lock, ip); 3417 3418 found_it: 3419 if (hlock->instance == lock) 3420 lock_release_holdtime(hlock); 3421 3422 WARN(hlock->pin_count, "releasing a pinned lock\n"); 3423 3424 if (hlock->references) { 3425 hlock->references--; 3426 if (hlock->references) { 3427 /* 3428 * We had, and after removing one, still have 3429 * references, the current lock stack is still 3430 * valid. We're done! 3431 */ 3432 return 1; 3433 } 3434 } 3435 3436 /* 3437 * We have the right lock to unlock, 'hlock' points to it. 3438 * Now we remove it from the stack, and add back the other 3439 * entries (if any), recalculating the hash along the way: 3440 */ 3441 3442 curr->lockdep_depth = i; 3443 curr->curr_chain_key = hlock->prev_chain_key; 3444 3445 for (i++; i < depth; i++) { 3446 hlock = curr->held_locks + i; 3447 if (!__lock_acquire(hlock->instance, 3448 hlock_class(hlock)->subclass, hlock->trylock, 3449 hlock->read, hlock->check, hlock->hardirqs_off, 3450 hlock->nest_lock, hlock->acquire_ip, 3451 hlock->references, hlock->pin_count)) 3452 return 0; 3453 } 3454 3455 /* 3456 * We had N bottles of beer on the wall, we drank one, but now 3457 * there's not N-1 bottles of beer left on the wall... 3458 */ 3459 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) 3460 return 0; 3461 3462 return 1; 3463 } 3464 3465 static int __lock_is_held(struct lockdep_map *lock) 3466 { 3467 struct task_struct *curr = current; 3468 int i; 3469 3470 for (i = 0; i < curr->lockdep_depth; i++) { 3471 struct held_lock *hlock = curr->held_locks + i; 3472 3473 if (match_held_lock(hlock, lock)) 3474 return 1; 3475 } 3476 3477 return 0; 3478 } 3479 3480 static void __lock_pin_lock(struct lockdep_map *lock) 3481 { 3482 struct task_struct *curr = current; 3483 int i; 3484 3485 if (unlikely(!debug_locks)) 3486 return; 3487 3488 for (i = 0; i < curr->lockdep_depth; i++) { 3489 struct held_lock *hlock = curr->held_locks + i; 3490 3491 if (match_held_lock(hlock, lock)) { 3492 hlock->pin_count++; 3493 return; 3494 } 3495 } 3496 3497 WARN(1, "pinning an unheld lock\n"); 3498 } 3499 3500 static void __lock_unpin_lock(struct lockdep_map *lock) 3501 { 3502 struct task_struct *curr = current; 3503 int i; 3504 3505 if (unlikely(!debug_locks)) 3506 return; 3507 3508 for (i = 0; i < curr->lockdep_depth; i++) { 3509 struct held_lock *hlock = curr->held_locks + i; 3510 3511 if (match_held_lock(hlock, lock)) { 3512 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n")) 3513 return; 3514 3515 hlock->pin_count--; 3516 return; 3517 } 3518 } 3519 3520 WARN(1, "unpinning an unheld lock\n"); 3521 } 3522 3523 /* 3524 * Check whether we follow the irq-flags state precisely: 3525 */ 3526 static void check_flags(unsigned long flags) 3527 { 3528 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \ 3529 defined(CONFIG_TRACE_IRQFLAGS) 3530 if (!debug_locks) 3531 return; 3532 3533 if (irqs_disabled_flags(flags)) { 3534 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) { 3535 printk("possible reason: unannotated irqs-off.\n"); 3536 } 3537 } else { 3538 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) { 3539 printk("possible reason: unannotated irqs-on.\n"); 3540 } 3541 } 3542 3543 /* 3544 * We dont accurately track softirq state in e.g. 3545 * hardirq contexts (such as on 4KSTACKS), so only 3546 * check if not in hardirq contexts: 3547 */ 3548 if (!hardirq_count()) { 3549 if (softirq_count()) { 3550 /* like the above, but with softirqs */ 3551 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); 3552 } else { 3553 /* lick the above, does it taste good? */ 3554 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); 3555 } 3556 } 3557 3558 if (!debug_locks) 3559 print_irqtrace_events(current); 3560 #endif 3561 } 3562 3563 void lock_set_class(struct lockdep_map *lock, const char *name, 3564 struct lock_class_key *key, unsigned int subclass, 3565 unsigned long ip) 3566 { 3567 unsigned long flags; 3568 3569 if (unlikely(current->lockdep_recursion)) 3570 return; 3571 3572 raw_local_irq_save(flags); 3573 current->lockdep_recursion = 1; 3574 check_flags(flags); 3575 if (__lock_set_class(lock, name, key, subclass, ip)) 3576 check_chain_key(current); 3577 current->lockdep_recursion = 0; 3578 raw_local_irq_restore(flags); 3579 } 3580 EXPORT_SYMBOL_GPL(lock_set_class); 3581 3582 /* 3583 * We are not always called with irqs disabled - do that here, 3584 * and also avoid lockdep recursion: 3585 */ 3586 void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 3587 int trylock, int read, int check, 3588 struct lockdep_map *nest_lock, unsigned long ip) 3589 { 3590 unsigned long flags; 3591 3592 if (unlikely(current->lockdep_recursion)) 3593 return; 3594 3595 raw_local_irq_save(flags); 3596 check_flags(flags); 3597 3598 current->lockdep_recursion = 1; 3599 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); 3600 __lock_acquire(lock, subclass, trylock, read, check, 3601 irqs_disabled_flags(flags), nest_lock, ip, 0, 0); 3602 current->lockdep_recursion = 0; 3603 raw_local_irq_restore(flags); 3604 } 3605 EXPORT_SYMBOL_GPL(lock_acquire); 3606 3607 void lock_release(struct lockdep_map *lock, int nested, 3608 unsigned long ip) 3609 { 3610 unsigned long flags; 3611 3612 if (unlikely(current->lockdep_recursion)) 3613 return; 3614 3615 raw_local_irq_save(flags); 3616 check_flags(flags); 3617 current->lockdep_recursion = 1; 3618 trace_lock_release(lock, ip); 3619 if (__lock_release(lock, nested, ip)) 3620 check_chain_key(current); 3621 current->lockdep_recursion = 0; 3622 raw_local_irq_restore(flags); 3623 } 3624 EXPORT_SYMBOL_GPL(lock_release); 3625 3626 int lock_is_held(struct lockdep_map *lock) 3627 { 3628 unsigned long flags; 3629 int ret = 0; 3630 3631 if (unlikely(current->lockdep_recursion)) 3632 return 1; /* avoid false negative lockdep_assert_held() */ 3633 3634 raw_local_irq_save(flags); 3635 check_flags(flags); 3636 3637 current->lockdep_recursion = 1; 3638 ret = __lock_is_held(lock); 3639 current->lockdep_recursion = 0; 3640 raw_local_irq_restore(flags); 3641 3642 return ret; 3643 } 3644 EXPORT_SYMBOL_GPL(lock_is_held); 3645 3646 void lock_pin_lock(struct lockdep_map *lock) 3647 { 3648 unsigned long flags; 3649 3650 if (unlikely(current->lockdep_recursion)) 3651 return; 3652 3653 raw_local_irq_save(flags); 3654 check_flags(flags); 3655 3656 current->lockdep_recursion = 1; 3657 __lock_pin_lock(lock); 3658 current->lockdep_recursion = 0; 3659 raw_local_irq_restore(flags); 3660 } 3661 EXPORT_SYMBOL_GPL(lock_pin_lock); 3662 3663 void lock_unpin_lock(struct lockdep_map *lock) 3664 { 3665 unsigned long flags; 3666 3667 if (unlikely(current->lockdep_recursion)) 3668 return; 3669 3670 raw_local_irq_save(flags); 3671 check_flags(flags); 3672 3673 current->lockdep_recursion = 1; 3674 __lock_unpin_lock(lock); 3675 current->lockdep_recursion = 0; 3676 raw_local_irq_restore(flags); 3677 } 3678 EXPORT_SYMBOL_GPL(lock_unpin_lock); 3679 3680 void lockdep_set_current_reclaim_state(gfp_t gfp_mask) 3681 { 3682 current->lockdep_reclaim_gfp = gfp_mask; 3683 } 3684 3685 void lockdep_clear_current_reclaim_state(void) 3686 { 3687 current->lockdep_reclaim_gfp = 0; 3688 } 3689 3690 #ifdef CONFIG_LOCK_STAT 3691 static int 3692 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, 3693 unsigned long ip) 3694 { 3695 if (!debug_locks_off()) 3696 return 0; 3697 if (debug_locks_silent) 3698 return 0; 3699 3700 printk("\n"); 3701 printk("=================================\n"); 3702 printk("[ BUG: bad contention detected! ]\n"); 3703 print_kernel_ident(); 3704 printk("---------------------------------\n"); 3705 printk("%s/%d is trying to contend lock (", 3706 curr->comm, task_pid_nr(curr)); 3707 print_lockdep_cache(lock); 3708 printk(") at:\n"); 3709 print_ip_sym(ip); 3710 printk("but there are no locks held!\n"); 3711 printk("\nother info that might help us debug this:\n"); 3712 lockdep_print_held_locks(curr); 3713 3714 printk("\nstack backtrace:\n"); 3715 dump_stack(); 3716 3717 return 0; 3718 } 3719 3720 static void 3721 __lock_contended(struct lockdep_map *lock, unsigned long ip) 3722 { 3723 struct task_struct *curr = current; 3724 struct held_lock *hlock, *prev_hlock; 3725 struct lock_class_stats *stats; 3726 unsigned int depth; 3727 int i, contention_point, contending_point; 3728 3729 depth = curr->lockdep_depth; 3730 /* 3731 * Whee, we contended on this lock, except it seems we're not 3732 * actually trying to acquire anything much at all.. 3733 */ 3734 if (DEBUG_LOCKS_WARN_ON(!depth)) 3735 return; 3736 3737 prev_hlock = NULL; 3738 for (i = depth-1; i >= 0; i--) { 3739 hlock = curr->held_locks + i; 3740 /* 3741 * We must not cross into another context: 3742 */ 3743 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3744 break; 3745 if (match_held_lock(hlock, lock)) 3746 goto found_it; 3747 prev_hlock = hlock; 3748 } 3749 print_lock_contention_bug(curr, lock, ip); 3750 return; 3751 3752 found_it: 3753 if (hlock->instance != lock) 3754 return; 3755 3756 hlock->waittime_stamp = lockstat_clock(); 3757 3758 contention_point = lock_point(hlock_class(hlock)->contention_point, ip); 3759 contending_point = lock_point(hlock_class(hlock)->contending_point, 3760 lock->ip); 3761 3762 stats = get_lock_stats(hlock_class(hlock)); 3763 if (contention_point < LOCKSTAT_POINTS) 3764 stats->contention_point[contention_point]++; 3765 if (contending_point < LOCKSTAT_POINTS) 3766 stats->contending_point[contending_point]++; 3767 if (lock->cpu != smp_processor_id()) 3768 stats->bounces[bounce_contended + !!hlock->read]++; 3769 put_lock_stats(stats); 3770 } 3771 3772 static void 3773 __lock_acquired(struct lockdep_map *lock, unsigned long ip) 3774 { 3775 struct task_struct *curr = current; 3776 struct held_lock *hlock, *prev_hlock; 3777 struct lock_class_stats *stats; 3778 unsigned int depth; 3779 u64 now, waittime = 0; 3780 int i, cpu; 3781 3782 depth = curr->lockdep_depth; 3783 /* 3784 * Yay, we acquired ownership of this lock we didn't try to 3785 * acquire, how the heck did that happen? 3786 */ 3787 if (DEBUG_LOCKS_WARN_ON(!depth)) 3788 return; 3789 3790 prev_hlock = NULL; 3791 for (i = depth-1; i >= 0; i--) { 3792 hlock = curr->held_locks + i; 3793 /* 3794 * We must not cross into another context: 3795 */ 3796 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3797 break; 3798 if (match_held_lock(hlock, lock)) 3799 goto found_it; 3800 prev_hlock = hlock; 3801 } 3802 print_lock_contention_bug(curr, lock, _RET_IP_); 3803 return; 3804 3805 found_it: 3806 if (hlock->instance != lock) 3807 return; 3808 3809 cpu = smp_processor_id(); 3810 if (hlock->waittime_stamp) { 3811 now = lockstat_clock(); 3812 waittime = now - hlock->waittime_stamp; 3813 hlock->holdtime_stamp = now; 3814 } 3815 3816 trace_lock_acquired(lock, ip); 3817 3818 stats = get_lock_stats(hlock_class(hlock)); 3819 if (waittime) { 3820 if (hlock->read) 3821 lock_time_inc(&stats->read_waittime, waittime); 3822 else 3823 lock_time_inc(&stats->write_waittime, waittime); 3824 } 3825 if (lock->cpu != cpu) 3826 stats->bounces[bounce_acquired + !!hlock->read]++; 3827 put_lock_stats(stats); 3828 3829 lock->cpu = cpu; 3830 lock->ip = ip; 3831 } 3832 3833 void lock_contended(struct lockdep_map *lock, unsigned long ip) 3834 { 3835 unsigned long flags; 3836 3837 if (unlikely(!lock_stat)) 3838 return; 3839 3840 if (unlikely(current->lockdep_recursion)) 3841 return; 3842 3843 raw_local_irq_save(flags); 3844 check_flags(flags); 3845 current->lockdep_recursion = 1; 3846 trace_lock_contended(lock, ip); 3847 __lock_contended(lock, ip); 3848 current->lockdep_recursion = 0; 3849 raw_local_irq_restore(flags); 3850 } 3851 EXPORT_SYMBOL_GPL(lock_contended); 3852 3853 void lock_acquired(struct lockdep_map *lock, unsigned long ip) 3854 { 3855 unsigned long flags; 3856 3857 if (unlikely(!lock_stat)) 3858 return; 3859 3860 if (unlikely(current->lockdep_recursion)) 3861 return; 3862 3863 raw_local_irq_save(flags); 3864 check_flags(flags); 3865 current->lockdep_recursion = 1; 3866 __lock_acquired(lock, ip); 3867 current->lockdep_recursion = 0; 3868 raw_local_irq_restore(flags); 3869 } 3870 EXPORT_SYMBOL_GPL(lock_acquired); 3871 #endif 3872 3873 /* 3874 * Used by the testsuite, sanitize the validator state 3875 * after a simulated failure: 3876 */ 3877 3878 void lockdep_reset(void) 3879 { 3880 unsigned long flags; 3881 int i; 3882 3883 raw_local_irq_save(flags); 3884 current->curr_chain_key = 0; 3885 current->lockdep_depth = 0; 3886 current->lockdep_recursion = 0; 3887 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); 3888 nr_hardirq_chains = 0; 3889 nr_softirq_chains = 0; 3890 nr_process_chains = 0; 3891 debug_locks = 1; 3892 for (i = 0; i < CHAINHASH_SIZE; i++) 3893 INIT_HLIST_HEAD(chainhash_table + i); 3894 raw_local_irq_restore(flags); 3895 } 3896 3897 static void zap_class(struct lock_class *class) 3898 { 3899 int i; 3900 3901 /* 3902 * Remove all dependencies this lock is 3903 * involved in: 3904 */ 3905 for (i = 0; i < nr_list_entries; i++) { 3906 if (list_entries[i].class == class) 3907 list_del_rcu(&list_entries[i].entry); 3908 } 3909 /* 3910 * Unhash the class and remove it from the all_lock_classes list: 3911 */ 3912 hlist_del_rcu(&class->hash_entry); 3913 list_del_rcu(&class->lock_entry); 3914 3915 RCU_INIT_POINTER(class->key, NULL); 3916 RCU_INIT_POINTER(class->name, NULL); 3917 } 3918 3919 static inline int within(const void *addr, void *start, unsigned long size) 3920 { 3921 return addr >= start && addr < start + size; 3922 } 3923 3924 /* 3925 * Used in module.c to remove lock classes from memory that is going to be 3926 * freed; and possibly re-used by other modules. 3927 * 3928 * We will have had one sync_sched() before getting here, so we're guaranteed 3929 * nobody will look up these exact classes -- they're properly dead but still 3930 * allocated. 3931 */ 3932 void lockdep_free_key_range(void *start, unsigned long size) 3933 { 3934 struct lock_class *class; 3935 struct hlist_head *head; 3936 unsigned long flags; 3937 int i; 3938 int locked; 3939 3940 raw_local_irq_save(flags); 3941 locked = graph_lock(); 3942 3943 /* 3944 * Unhash all classes that were created by this module: 3945 */ 3946 for (i = 0; i < CLASSHASH_SIZE; i++) { 3947 head = classhash_table + i; 3948 hlist_for_each_entry_rcu(class, head, hash_entry) { 3949 if (within(class->key, start, size)) 3950 zap_class(class); 3951 else if (within(class->name, start, size)) 3952 zap_class(class); 3953 } 3954 } 3955 3956 if (locked) 3957 graph_unlock(); 3958 raw_local_irq_restore(flags); 3959 3960 /* 3961 * Wait for any possible iterators from look_up_lock_class() to pass 3962 * before continuing to free the memory they refer to. 3963 * 3964 * sync_sched() is sufficient because the read-side is IRQ disable. 3965 */ 3966 synchronize_sched(); 3967 3968 /* 3969 * XXX at this point we could return the resources to the pool; 3970 * instead we leak them. We would need to change to bitmap allocators 3971 * instead of the linear allocators we have now. 3972 */ 3973 } 3974 3975 void lockdep_reset_lock(struct lockdep_map *lock) 3976 { 3977 struct lock_class *class; 3978 struct hlist_head *head; 3979 unsigned long flags; 3980 int i, j; 3981 int locked; 3982 3983 raw_local_irq_save(flags); 3984 3985 /* 3986 * Remove all classes this lock might have: 3987 */ 3988 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { 3989 /* 3990 * If the class exists we look it up and zap it: 3991 */ 3992 class = look_up_lock_class(lock, j); 3993 if (class) 3994 zap_class(class); 3995 } 3996 /* 3997 * Debug check: in the end all mapped classes should 3998 * be gone. 3999 */ 4000 locked = graph_lock(); 4001 for (i = 0; i < CLASSHASH_SIZE; i++) { 4002 head = classhash_table + i; 4003 hlist_for_each_entry_rcu(class, head, hash_entry) { 4004 int match = 0; 4005 4006 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) 4007 match |= class == lock->class_cache[j]; 4008 4009 if (unlikely(match)) { 4010 if (debug_locks_off_graph_unlock()) { 4011 /* 4012 * We all just reset everything, how did it match? 4013 */ 4014 WARN_ON(1); 4015 } 4016 goto out_restore; 4017 } 4018 } 4019 } 4020 if (locked) 4021 graph_unlock(); 4022 4023 out_restore: 4024 raw_local_irq_restore(flags); 4025 } 4026 4027 void __init lockdep_info(void) 4028 { 4029 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); 4030 4031 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES); 4032 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH); 4033 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS); 4034 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE); 4035 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES); 4036 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); 4037 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); 4038 4039 printk(" memory used by lock dependency info: %lu kB\n", 4040 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS + 4041 sizeof(struct list_head) * CLASSHASH_SIZE + 4042 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES + 4043 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS + 4044 sizeof(struct list_head) * CHAINHASH_SIZE 4045 #ifdef CONFIG_PROVE_LOCKING 4046 + sizeof(struct circular_queue) 4047 #endif 4048 ) / 1024 4049 ); 4050 4051 printk(" per task-struct memory footprint: %lu bytes\n", 4052 sizeof(struct held_lock) * MAX_LOCK_DEPTH); 4053 } 4054 4055 static void 4056 print_freed_lock_bug(struct task_struct *curr, const void *mem_from, 4057 const void *mem_to, struct held_lock *hlock) 4058 { 4059 if (!debug_locks_off()) 4060 return; 4061 if (debug_locks_silent) 4062 return; 4063 4064 printk("\n"); 4065 printk("=========================\n"); 4066 printk("[ BUG: held lock freed! ]\n"); 4067 print_kernel_ident(); 4068 printk("-------------------------\n"); 4069 printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", 4070 curr->comm, task_pid_nr(curr), mem_from, mem_to-1); 4071 print_lock(hlock); 4072 lockdep_print_held_locks(curr); 4073 4074 printk("\nstack backtrace:\n"); 4075 dump_stack(); 4076 } 4077 4078 static inline int not_in_range(const void* mem_from, unsigned long mem_len, 4079 const void* lock_from, unsigned long lock_len) 4080 { 4081 return lock_from + lock_len <= mem_from || 4082 mem_from + mem_len <= lock_from; 4083 } 4084 4085 /* 4086 * Called when kernel memory is freed (or unmapped), or if a lock 4087 * is destroyed or reinitialized - this code checks whether there is 4088 * any held lock in the memory range of <from> to <to>: 4089 */ 4090 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) 4091 { 4092 struct task_struct *curr = current; 4093 struct held_lock *hlock; 4094 unsigned long flags; 4095 int i; 4096 4097 if (unlikely(!debug_locks)) 4098 return; 4099 4100 local_irq_save(flags); 4101 for (i = 0; i < curr->lockdep_depth; i++) { 4102 hlock = curr->held_locks + i; 4103 4104 if (not_in_range(mem_from, mem_len, hlock->instance, 4105 sizeof(*hlock->instance))) 4106 continue; 4107 4108 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); 4109 break; 4110 } 4111 local_irq_restore(flags); 4112 } 4113 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); 4114 4115 static void print_held_locks_bug(void) 4116 { 4117 if (!debug_locks_off()) 4118 return; 4119 if (debug_locks_silent) 4120 return; 4121 4122 printk("\n"); 4123 printk("=====================================\n"); 4124 printk("[ BUG: %s/%d still has locks held! ]\n", 4125 current->comm, task_pid_nr(current)); 4126 print_kernel_ident(); 4127 printk("-------------------------------------\n"); 4128 lockdep_print_held_locks(current); 4129 printk("\nstack backtrace:\n"); 4130 dump_stack(); 4131 } 4132 4133 void debug_check_no_locks_held(void) 4134 { 4135 if (unlikely(current->lockdep_depth > 0)) 4136 print_held_locks_bug(); 4137 } 4138 EXPORT_SYMBOL_GPL(debug_check_no_locks_held); 4139 4140 #ifdef __KERNEL__ 4141 void debug_show_all_locks(void) 4142 { 4143 struct task_struct *g, *p; 4144 int count = 10; 4145 int unlock = 1; 4146 4147 if (unlikely(!debug_locks)) { 4148 printk("INFO: lockdep is turned off.\n"); 4149 return; 4150 } 4151 printk("\nShowing all locks held in the system:\n"); 4152 4153 /* 4154 * Here we try to get the tasklist_lock as hard as possible, 4155 * if not successful after 2 seconds we ignore it (but keep 4156 * trying). This is to enable a debug printout even if a 4157 * tasklist_lock-holding task deadlocks or crashes. 4158 */ 4159 retry: 4160 if (!read_trylock(&tasklist_lock)) { 4161 if (count == 10) 4162 printk("hm, tasklist_lock locked, retrying... "); 4163 if (count) { 4164 count--; 4165 printk(" #%d", 10-count); 4166 mdelay(200); 4167 goto retry; 4168 } 4169 printk(" ignoring it.\n"); 4170 unlock = 0; 4171 } else { 4172 if (count != 10) 4173 printk(KERN_CONT " locked it.\n"); 4174 } 4175 4176 do_each_thread(g, p) { 4177 /* 4178 * It's not reliable to print a task's held locks 4179 * if it's not sleeping (or if it's not the current 4180 * task): 4181 */ 4182 if (p->state == TASK_RUNNING && p != current) 4183 continue; 4184 if (p->lockdep_depth) 4185 lockdep_print_held_locks(p); 4186 if (!unlock) 4187 if (read_trylock(&tasklist_lock)) 4188 unlock = 1; 4189 } while_each_thread(g, p); 4190 4191 printk("\n"); 4192 printk("=============================================\n\n"); 4193 4194 if (unlock) 4195 read_unlock(&tasklist_lock); 4196 } 4197 EXPORT_SYMBOL_GPL(debug_show_all_locks); 4198 #endif 4199 4200 /* 4201 * Careful: only use this function if you are sure that 4202 * the task cannot run in parallel! 4203 */ 4204 void debug_show_held_locks(struct task_struct *task) 4205 { 4206 if (unlikely(!debug_locks)) { 4207 printk("INFO: lockdep is turned off.\n"); 4208 return; 4209 } 4210 lockdep_print_held_locks(task); 4211 } 4212 EXPORT_SYMBOL_GPL(debug_show_held_locks); 4213 4214 asmlinkage __visible void lockdep_sys_exit(void) 4215 { 4216 struct task_struct *curr = current; 4217 4218 if (unlikely(curr->lockdep_depth)) { 4219 if (!debug_locks_off()) 4220 return; 4221 printk("\n"); 4222 printk("================================================\n"); 4223 printk("[ BUG: lock held when returning to user space! ]\n"); 4224 print_kernel_ident(); 4225 printk("------------------------------------------------\n"); 4226 printk("%s/%d is leaving the kernel with locks still held!\n", 4227 curr->comm, curr->pid); 4228 lockdep_print_held_locks(curr); 4229 } 4230 } 4231 4232 void lockdep_rcu_suspicious(const char *file, const int line, const char *s) 4233 { 4234 struct task_struct *curr = current; 4235 4236 #ifndef CONFIG_PROVE_RCU_REPEATEDLY 4237 if (!debug_locks_off()) 4238 return; 4239 #endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */ 4240 /* Note: the following can be executed concurrently, so be careful. */ 4241 printk("\n"); 4242 printk("===============================\n"); 4243 printk("[ INFO: suspicious RCU usage. ]\n"); 4244 print_kernel_ident(); 4245 printk("-------------------------------\n"); 4246 printk("%s:%d %s!\n", file, line, s); 4247 printk("\nother info that might help us debug this:\n\n"); 4248 printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n", 4249 !rcu_lockdep_current_cpu_online() 4250 ? "RCU used illegally from offline CPU!\n" 4251 : !rcu_is_watching() 4252 ? "RCU used illegally from idle CPU!\n" 4253 : "", 4254 rcu_scheduler_active, debug_locks); 4255 4256 /* 4257 * If a CPU is in the RCU-free window in idle (ie: in the section 4258 * between rcu_idle_enter() and rcu_idle_exit(), then RCU 4259 * considers that CPU to be in an "extended quiescent state", 4260 * which means that RCU will be completely ignoring that CPU. 4261 * Therefore, rcu_read_lock() and friends have absolutely no 4262 * effect on a CPU running in that state. In other words, even if 4263 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well 4264 * delete data structures out from under it. RCU really has no 4265 * choice here: we need to keep an RCU-free window in idle where 4266 * the CPU may possibly enter into low power mode. This way we can 4267 * notice an extended quiescent state to other CPUs that started a grace 4268 * period. Otherwise we would delay any grace period as long as we run 4269 * in the idle task. 4270 * 4271 * So complain bitterly if someone does call rcu_read_lock(), 4272 * rcu_read_lock_bh() and so on from extended quiescent states. 4273 */ 4274 if (!rcu_is_watching()) 4275 printk("RCU used illegally from extended quiescent state!\n"); 4276 4277 lockdep_print_held_locks(curr); 4278 printk("\nstack backtrace:\n"); 4279 dump_stack(); 4280 } 4281 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious); 4282