1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 28eddac3fSPeter Zijlstra /* 38eddac3fSPeter Zijlstra * kernel/lockdep.c 48eddac3fSPeter Zijlstra * 58eddac3fSPeter Zijlstra * Runtime locking correctness validator 68eddac3fSPeter Zijlstra * 78eddac3fSPeter Zijlstra * Started by Ingo Molnar: 88eddac3fSPeter Zijlstra * 98eddac3fSPeter Zijlstra * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 1090eec103SPeter Zijlstra * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 118eddac3fSPeter Zijlstra * 128eddac3fSPeter Zijlstra * this code maps all the lock dependencies as they occur in a live kernel 138eddac3fSPeter Zijlstra * and will warn about the following classes of locking bugs: 148eddac3fSPeter Zijlstra * 158eddac3fSPeter Zijlstra * - lock inversion scenarios 168eddac3fSPeter Zijlstra * - circular lock dependencies 178eddac3fSPeter Zijlstra * - hardirq/softirq safe/unsafe locking bugs 188eddac3fSPeter Zijlstra * 198eddac3fSPeter Zijlstra * Bugs are reported even if the current locking scenario does not cause 208eddac3fSPeter Zijlstra * any deadlock at this point. 218eddac3fSPeter Zijlstra * 228eddac3fSPeter Zijlstra * I.e. if anytime in the past two locks were taken in a different order, 238eddac3fSPeter Zijlstra * even if it happened for another task, even if those were different 248eddac3fSPeter Zijlstra * locks (but of the same class as this lock), this code will detect it. 258eddac3fSPeter Zijlstra * 268eddac3fSPeter Zijlstra * Thanks to Arjan van de Ven for coming up with the initial idea of 278eddac3fSPeter Zijlstra * mapping lock dependencies runtime. 288eddac3fSPeter Zijlstra */ 298eddac3fSPeter Zijlstra #define DISABLE_BRANCH_PROFILING 308eddac3fSPeter Zijlstra #include <linux/mutex.h> 318eddac3fSPeter Zijlstra #include <linux/sched.h> 32e6017571SIngo Molnar #include <linux/sched/clock.h> 3329930025SIngo Molnar #include <linux/sched/task.h> 346d7225f0SNikolay Borisov #include <linux/sched/mm.h> 358eddac3fSPeter Zijlstra #include <linux/delay.h> 368eddac3fSPeter Zijlstra #include <linux/module.h> 378eddac3fSPeter Zijlstra #include <linux/proc_fs.h> 388eddac3fSPeter Zijlstra #include <linux/seq_file.h> 398eddac3fSPeter Zijlstra #include <linux/spinlock.h> 408eddac3fSPeter Zijlstra #include <linux/kallsyms.h> 418eddac3fSPeter Zijlstra #include <linux/interrupt.h> 428eddac3fSPeter Zijlstra #include <linux/stacktrace.h> 438eddac3fSPeter Zijlstra #include <linux/debug_locks.h> 448eddac3fSPeter Zijlstra #include <linux/irqflags.h> 458eddac3fSPeter Zijlstra #include <linux/utsname.h> 468eddac3fSPeter Zijlstra #include <linux/hash.h> 478eddac3fSPeter Zijlstra #include <linux/ftrace.h> 488eddac3fSPeter Zijlstra #include <linux/stringify.h> 49ace35a7aSBart Van Assche #include <linux/bitmap.h> 508eddac3fSPeter Zijlstra #include <linux/bitops.h> 518eddac3fSPeter Zijlstra #include <linux/gfp.h> 52e7904a28SPeter Zijlstra #include <linux/random.h> 53dfaaf3faSPeter Zijlstra #include <linux/jhash.h> 5488f1c87dSTejun Heo #include <linux/nmi.h> 55a0b0fd53SBart Van Assche #include <linux/rcupdate.h> 562f43c602SMasami Hiramatsu #include <linux/kprobes.h> 578eddac3fSPeter Zijlstra 588eddac3fSPeter Zijlstra #include <asm/sections.h> 598eddac3fSPeter Zijlstra 608eddac3fSPeter Zijlstra #include "lockdep_internals.h" 618eddac3fSPeter Zijlstra 628eddac3fSPeter Zijlstra #define CREATE_TRACE_POINTS 638eddac3fSPeter Zijlstra #include <trace/events/lock.h> 648eddac3fSPeter Zijlstra 658eddac3fSPeter Zijlstra #ifdef CONFIG_PROVE_LOCKING 668eddac3fSPeter Zijlstra int prove_locking = 1; 678eddac3fSPeter Zijlstra module_param(prove_locking, int, 0644); 688eddac3fSPeter Zijlstra #else 698eddac3fSPeter Zijlstra #define prove_locking 0 708eddac3fSPeter Zijlstra #endif 718eddac3fSPeter Zijlstra 728eddac3fSPeter Zijlstra #ifdef CONFIG_LOCK_STAT 738eddac3fSPeter Zijlstra int lock_stat = 1; 748eddac3fSPeter Zijlstra module_param(lock_stat, int, 0644); 758eddac3fSPeter Zijlstra #else 768eddac3fSPeter Zijlstra #define lock_stat 0 778eddac3fSPeter Zijlstra #endif 788eddac3fSPeter Zijlstra 794d004099SPeter Zijlstra DEFINE_PER_CPU(unsigned int, lockdep_recursion); 804d004099SPeter Zijlstra EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion); 814d004099SPeter Zijlstra 824d004099SPeter Zijlstra static inline bool lockdep_enabled(void) 834d004099SPeter Zijlstra { 844d004099SPeter Zijlstra if (!debug_locks) 854d004099SPeter Zijlstra return false; 864d004099SPeter Zijlstra 87d48e3850SPeter Zijlstra if (this_cpu_read(lockdep_recursion)) 884d004099SPeter Zijlstra return false; 894d004099SPeter Zijlstra 904d004099SPeter Zijlstra if (current->lockdep_recursion) 914d004099SPeter Zijlstra return false; 924d004099SPeter Zijlstra 934d004099SPeter Zijlstra return true; 944d004099SPeter Zijlstra } 954d004099SPeter Zijlstra 968eddac3fSPeter Zijlstra /* 978eddac3fSPeter Zijlstra * lockdep_lock: protects the lockdep graph, the hashes and the 988eddac3fSPeter Zijlstra * class/list/hash allocators. 998eddac3fSPeter Zijlstra * 1008eddac3fSPeter Zijlstra * This is one of the rare exceptions where it's justified 1018eddac3fSPeter Zijlstra * to use a raw spinlock - we really dont want the spinlock 1028eddac3fSPeter Zijlstra * code to recurse back into the lockdep code... 1038eddac3fSPeter Zijlstra */ 104248efb21SPeter Zijlstra static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 105248efb21SPeter Zijlstra static struct task_struct *__owner; 106248efb21SPeter Zijlstra 107248efb21SPeter Zijlstra static inline void lockdep_lock(void) 108248efb21SPeter Zijlstra { 109248efb21SPeter Zijlstra DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 110248efb21SPeter Zijlstra 11143be4388SBoqun Feng __this_cpu_inc(lockdep_recursion); 112248efb21SPeter Zijlstra arch_spin_lock(&__lock); 113248efb21SPeter Zijlstra __owner = current; 114248efb21SPeter Zijlstra } 115248efb21SPeter Zijlstra 116248efb21SPeter Zijlstra static inline void lockdep_unlock(void) 117248efb21SPeter Zijlstra { 11843be4388SBoqun Feng DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 11943be4388SBoqun Feng 120248efb21SPeter Zijlstra if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current)) 121248efb21SPeter Zijlstra return; 122248efb21SPeter Zijlstra 123248efb21SPeter Zijlstra __owner = NULL; 124248efb21SPeter Zijlstra arch_spin_unlock(&__lock); 12543be4388SBoqun Feng __this_cpu_dec(lockdep_recursion); 126248efb21SPeter Zijlstra } 127248efb21SPeter Zijlstra 128248efb21SPeter Zijlstra static inline bool lockdep_assert_locked(void) 129248efb21SPeter Zijlstra { 130248efb21SPeter Zijlstra return DEBUG_LOCKS_WARN_ON(__owner != current); 131248efb21SPeter Zijlstra } 132248efb21SPeter Zijlstra 133cdc84d79SBart Van Assche static struct task_struct *lockdep_selftest_task_struct; 1348eddac3fSPeter Zijlstra 135248efb21SPeter Zijlstra 1368eddac3fSPeter Zijlstra static int graph_lock(void) 1378eddac3fSPeter Zijlstra { 138248efb21SPeter Zijlstra lockdep_lock(); 1398eddac3fSPeter Zijlstra /* 1408eddac3fSPeter Zijlstra * Make sure that if another CPU detected a bug while 1418eddac3fSPeter Zijlstra * walking the graph we dont change it (while the other 1428eddac3fSPeter Zijlstra * CPU is busy printing out stuff with the graph lock 1438eddac3fSPeter Zijlstra * dropped already) 1448eddac3fSPeter Zijlstra */ 1458eddac3fSPeter Zijlstra if (!debug_locks) { 146248efb21SPeter Zijlstra lockdep_unlock(); 1478eddac3fSPeter Zijlstra return 0; 1488eddac3fSPeter Zijlstra } 1498eddac3fSPeter Zijlstra return 1; 1508eddac3fSPeter Zijlstra } 1518eddac3fSPeter Zijlstra 152248efb21SPeter Zijlstra static inline void graph_unlock(void) 1538eddac3fSPeter Zijlstra { 154248efb21SPeter Zijlstra lockdep_unlock(); 1558eddac3fSPeter Zijlstra } 1568eddac3fSPeter Zijlstra 1578eddac3fSPeter Zijlstra /* 1588eddac3fSPeter Zijlstra * Turn lock debugging off and return with 0 if it was off already, 1598eddac3fSPeter Zijlstra * and also release the graph lock: 1608eddac3fSPeter Zijlstra */ 1618eddac3fSPeter Zijlstra static inline int debug_locks_off_graph_unlock(void) 1628eddac3fSPeter Zijlstra { 1638eddac3fSPeter Zijlstra int ret = debug_locks_off(); 1648eddac3fSPeter Zijlstra 165248efb21SPeter Zijlstra lockdep_unlock(); 1668eddac3fSPeter Zijlstra 1678eddac3fSPeter Zijlstra return ret; 1688eddac3fSPeter Zijlstra } 1698eddac3fSPeter Zijlstra 1708eddac3fSPeter Zijlstra unsigned long nr_list_entries; 1718eddac3fSPeter Zijlstra static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; 172ace35a7aSBart Van Assche static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES); 1738eddac3fSPeter Zijlstra 1748eddac3fSPeter Zijlstra /* 1758eddac3fSPeter Zijlstra * All data structures here are protected by the global debug_lock. 1768eddac3fSPeter Zijlstra * 177a0b0fd53SBart Van Assche * nr_lock_classes is the number of elements of lock_classes[] that is 178a0b0fd53SBart Van Assche * in use. 1798eddac3fSPeter Zijlstra */ 180108c1485SBart Van Assche #define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) 181108c1485SBart Van Assche #define KEYHASH_SIZE (1UL << KEYHASH_BITS) 182108c1485SBart Van Assche static struct hlist_head lock_keys_hash[KEYHASH_SIZE]; 1838eddac3fSPeter Zijlstra unsigned long nr_lock_classes; 1841d44bcb4SWaiman Long unsigned long nr_zapped_classes; 1851431a5d2SBart Van Assche #ifndef CONFIG_DEBUG_LOCKDEP 1861431a5d2SBart Van Assche static 1871431a5d2SBart Van Assche #endif 1888ca2b56cSWaiman Long struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; 18901bb6f0aSYuyang Du static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS); 1908eddac3fSPeter Zijlstra 1918eddac3fSPeter Zijlstra static inline struct lock_class *hlock_class(struct held_lock *hlock) 1928eddac3fSPeter Zijlstra { 19301bb6f0aSYuyang Du unsigned int class_idx = hlock->class_idx; 19401bb6f0aSYuyang Du 19501bb6f0aSYuyang Du /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfield */ 19601bb6f0aSYuyang Du barrier(); 19701bb6f0aSYuyang Du 19801bb6f0aSYuyang Du if (!test_bit(class_idx, lock_classes_in_use)) { 1998eddac3fSPeter Zijlstra /* 2008eddac3fSPeter Zijlstra * Someone passed in garbage, we give up. 2018eddac3fSPeter Zijlstra */ 2028eddac3fSPeter Zijlstra DEBUG_LOCKS_WARN_ON(1); 2038eddac3fSPeter Zijlstra return NULL; 2048eddac3fSPeter Zijlstra } 20501bb6f0aSYuyang Du 20601bb6f0aSYuyang Du /* 20701bb6f0aSYuyang Du * At this point, if the passed hlock->class_idx is still garbage, 20801bb6f0aSYuyang Du * we just have to live with it 20901bb6f0aSYuyang Du */ 21001bb6f0aSYuyang Du return lock_classes + class_idx; 2118eddac3fSPeter Zijlstra } 2128eddac3fSPeter Zijlstra 2138eddac3fSPeter Zijlstra #ifdef CONFIG_LOCK_STAT 21425528213SPeter Zijlstra static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats); 2158eddac3fSPeter Zijlstra 2168eddac3fSPeter Zijlstra static inline u64 lockstat_clock(void) 2178eddac3fSPeter Zijlstra { 2188eddac3fSPeter Zijlstra return local_clock(); 2198eddac3fSPeter Zijlstra } 2208eddac3fSPeter Zijlstra 2218eddac3fSPeter Zijlstra static int lock_point(unsigned long points[], unsigned long ip) 2228eddac3fSPeter Zijlstra { 2238eddac3fSPeter Zijlstra int i; 2248eddac3fSPeter Zijlstra 2258eddac3fSPeter Zijlstra for (i = 0; i < LOCKSTAT_POINTS; i++) { 2268eddac3fSPeter Zijlstra if (points[i] == 0) { 2278eddac3fSPeter Zijlstra points[i] = ip; 2288eddac3fSPeter Zijlstra break; 2298eddac3fSPeter Zijlstra } 2308eddac3fSPeter Zijlstra if (points[i] == ip) 2318eddac3fSPeter Zijlstra break; 2328eddac3fSPeter Zijlstra } 2338eddac3fSPeter Zijlstra 2348eddac3fSPeter Zijlstra return i; 2358eddac3fSPeter Zijlstra } 2368eddac3fSPeter Zijlstra 2378eddac3fSPeter Zijlstra static void lock_time_inc(struct lock_time *lt, u64 time) 2388eddac3fSPeter Zijlstra { 2398eddac3fSPeter Zijlstra if (time > lt->max) 2408eddac3fSPeter Zijlstra lt->max = time; 2418eddac3fSPeter Zijlstra 2428eddac3fSPeter Zijlstra if (time < lt->min || !lt->nr) 2438eddac3fSPeter Zijlstra lt->min = time; 2448eddac3fSPeter Zijlstra 2458eddac3fSPeter Zijlstra lt->total += time; 2468eddac3fSPeter Zijlstra lt->nr++; 2478eddac3fSPeter Zijlstra } 2488eddac3fSPeter Zijlstra 2498eddac3fSPeter Zijlstra static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) 2508eddac3fSPeter Zijlstra { 2518eddac3fSPeter Zijlstra if (!src->nr) 2528eddac3fSPeter Zijlstra return; 2538eddac3fSPeter Zijlstra 2548eddac3fSPeter Zijlstra if (src->max > dst->max) 2558eddac3fSPeter Zijlstra dst->max = src->max; 2568eddac3fSPeter Zijlstra 2578eddac3fSPeter Zijlstra if (src->min < dst->min || !dst->nr) 2588eddac3fSPeter Zijlstra dst->min = src->min; 2598eddac3fSPeter Zijlstra 2608eddac3fSPeter Zijlstra dst->total += src->total; 2618eddac3fSPeter Zijlstra dst->nr += src->nr; 2628eddac3fSPeter Zijlstra } 2638eddac3fSPeter Zijlstra 2648eddac3fSPeter Zijlstra struct lock_class_stats lock_stats(struct lock_class *class) 2658eddac3fSPeter Zijlstra { 2668eddac3fSPeter Zijlstra struct lock_class_stats stats; 2678eddac3fSPeter Zijlstra int cpu, i; 2688eddac3fSPeter Zijlstra 2698eddac3fSPeter Zijlstra memset(&stats, 0, sizeof(struct lock_class_stats)); 2708eddac3fSPeter Zijlstra for_each_possible_cpu(cpu) { 2718eddac3fSPeter Zijlstra struct lock_class_stats *pcs = 2728eddac3fSPeter Zijlstra &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; 2738eddac3fSPeter Zijlstra 2748eddac3fSPeter Zijlstra for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) 2758eddac3fSPeter Zijlstra stats.contention_point[i] += pcs->contention_point[i]; 2768eddac3fSPeter Zijlstra 2778eddac3fSPeter Zijlstra for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++) 2788eddac3fSPeter Zijlstra stats.contending_point[i] += pcs->contending_point[i]; 2798eddac3fSPeter Zijlstra 2808eddac3fSPeter Zijlstra lock_time_add(&pcs->read_waittime, &stats.read_waittime); 2818eddac3fSPeter Zijlstra lock_time_add(&pcs->write_waittime, &stats.write_waittime); 2828eddac3fSPeter Zijlstra 2838eddac3fSPeter Zijlstra lock_time_add(&pcs->read_holdtime, &stats.read_holdtime); 2848eddac3fSPeter Zijlstra lock_time_add(&pcs->write_holdtime, &stats.write_holdtime); 2858eddac3fSPeter Zijlstra 2868eddac3fSPeter Zijlstra for (i = 0; i < ARRAY_SIZE(stats.bounces); i++) 2878eddac3fSPeter Zijlstra stats.bounces[i] += pcs->bounces[i]; 2888eddac3fSPeter Zijlstra } 2898eddac3fSPeter Zijlstra 2908eddac3fSPeter Zijlstra return stats; 2918eddac3fSPeter Zijlstra } 2928eddac3fSPeter Zijlstra 2938eddac3fSPeter Zijlstra void clear_lock_stats(struct lock_class *class) 2948eddac3fSPeter Zijlstra { 2958eddac3fSPeter Zijlstra int cpu; 2968eddac3fSPeter Zijlstra 2978eddac3fSPeter Zijlstra for_each_possible_cpu(cpu) { 2988eddac3fSPeter Zijlstra struct lock_class_stats *cpu_stats = 2998eddac3fSPeter Zijlstra &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; 3008eddac3fSPeter Zijlstra 3018eddac3fSPeter Zijlstra memset(cpu_stats, 0, sizeof(struct lock_class_stats)); 3028eddac3fSPeter Zijlstra } 3038eddac3fSPeter Zijlstra memset(class->contention_point, 0, sizeof(class->contention_point)); 3048eddac3fSPeter Zijlstra memset(class->contending_point, 0, sizeof(class->contending_point)); 3058eddac3fSPeter Zijlstra } 3068eddac3fSPeter Zijlstra 3078eddac3fSPeter Zijlstra static struct lock_class_stats *get_lock_stats(struct lock_class *class) 3088eddac3fSPeter Zijlstra { 30901f38497SJoel Fernandes (Google) return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes]; 3108eddac3fSPeter Zijlstra } 3118eddac3fSPeter Zijlstra 3128eddac3fSPeter Zijlstra static void lock_release_holdtime(struct held_lock *hlock) 3138eddac3fSPeter Zijlstra { 3148eddac3fSPeter Zijlstra struct lock_class_stats *stats; 3158eddac3fSPeter Zijlstra u64 holdtime; 3168eddac3fSPeter Zijlstra 3178eddac3fSPeter Zijlstra if (!lock_stat) 3188eddac3fSPeter Zijlstra return; 3198eddac3fSPeter Zijlstra 3208eddac3fSPeter Zijlstra holdtime = lockstat_clock() - hlock->holdtime_stamp; 3218eddac3fSPeter Zijlstra 3228eddac3fSPeter Zijlstra stats = get_lock_stats(hlock_class(hlock)); 3238eddac3fSPeter Zijlstra if (hlock->read) 3248eddac3fSPeter Zijlstra lock_time_inc(&stats->read_holdtime, holdtime); 3258eddac3fSPeter Zijlstra else 3268eddac3fSPeter Zijlstra lock_time_inc(&stats->write_holdtime, holdtime); 3278eddac3fSPeter Zijlstra } 3288eddac3fSPeter Zijlstra #else 3298eddac3fSPeter Zijlstra static inline void lock_release_holdtime(struct held_lock *hlock) 3308eddac3fSPeter Zijlstra { 3318eddac3fSPeter Zijlstra } 3328eddac3fSPeter Zijlstra #endif 3338eddac3fSPeter Zijlstra 3348eddac3fSPeter Zijlstra /* 335a0b0fd53SBart Van Assche * We keep a global list of all lock classes. The list is only accessed with 336a0b0fd53SBart Van Assche * the lockdep spinlock lock held. free_lock_classes is a list with free 337a0b0fd53SBart Van Assche * elements. These elements are linked together by the lock_entry member in 338a0b0fd53SBart Van Assche * struct lock_class. 3398eddac3fSPeter Zijlstra */ 3408eddac3fSPeter Zijlstra LIST_HEAD(all_lock_classes); 341a0b0fd53SBart Van Assche static LIST_HEAD(free_lock_classes); 342a0b0fd53SBart Van Assche 343a0b0fd53SBart Van Assche /** 344a0b0fd53SBart Van Assche * struct pending_free - information about data structures about to be freed 345a0b0fd53SBart Van Assche * @zapped: Head of a list with struct lock_class elements. 346de4643a7SBart Van Assche * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements 347de4643a7SBart Van Assche * are about to be freed. 348a0b0fd53SBart Van Assche */ 349a0b0fd53SBart Van Assche struct pending_free { 350a0b0fd53SBart Van Assche struct list_head zapped; 351de4643a7SBart Van Assche DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS); 352a0b0fd53SBart Van Assche }; 353a0b0fd53SBart Van Assche 354a0b0fd53SBart Van Assche /** 355a0b0fd53SBart Van Assche * struct delayed_free - data structures used for delayed freeing 356a0b0fd53SBart Van Assche * 357a0b0fd53SBart Van Assche * A data structure for delayed freeing of data structures that may be 358a0b0fd53SBart Van Assche * accessed by RCU readers at the time these were freed. 359a0b0fd53SBart Van Assche * 360a0b0fd53SBart Van Assche * @rcu_head: Used to schedule an RCU callback for freeing data structures. 361a0b0fd53SBart Van Assche * @index: Index of @pf to which freed data structures are added. 362a0b0fd53SBart Van Assche * @scheduled: Whether or not an RCU callback has been scheduled. 363a0b0fd53SBart Van Assche * @pf: Array with information about data structures about to be freed. 364a0b0fd53SBart Van Assche */ 365a0b0fd53SBart Van Assche static struct delayed_free { 366a0b0fd53SBart Van Assche struct rcu_head rcu_head; 367a0b0fd53SBart Van Assche int index; 368a0b0fd53SBart Van Assche int scheduled; 369a0b0fd53SBart Van Assche struct pending_free pf[2]; 370a0b0fd53SBart Van Assche } delayed_free; 3718eddac3fSPeter Zijlstra 3728eddac3fSPeter Zijlstra /* 3738eddac3fSPeter Zijlstra * The lockdep classes are in a hash-table as well, for fast lookup: 3748eddac3fSPeter Zijlstra */ 3758eddac3fSPeter Zijlstra #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) 3768eddac3fSPeter Zijlstra #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS) 3778eddac3fSPeter Zijlstra #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS) 3788eddac3fSPeter Zijlstra #define classhashentry(key) (classhash_table + __classhashfn((key))) 3798eddac3fSPeter Zijlstra 380a63f38ccSAndrew Morton static struct hlist_head classhash_table[CLASSHASH_SIZE]; 3818eddac3fSPeter Zijlstra 3828eddac3fSPeter Zijlstra /* 3838eddac3fSPeter Zijlstra * We put the lock dependency chains into a hash-table as well, to cache 3848eddac3fSPeter Zijlstra * their existence: 3858eddac3fSPeter Zijlstra */ 3868eddac3fSPeter Zijlstra #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1) 3878eddac3fSPeter Zijlstra #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS) 3888eddac3fSPeter Zijlstra #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS) 3898eddac3fSPeter Zijlstra #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) 3908eddac3fSPeter Zijlstra 391a63f38ccSAndrew Morton static struct hlist_head chainhash_table[CHAINHASH_SIZE]; 3928eddac3fSPeter Zijlstra 3938eddac3fSPeter Zijlstra /* 394f611e8cfSBoqun Feng * the id of held_lock 395f611e8cfSBoqun Feng */ 396f611e8cfSBoqun Feng static inline u16 hlock_id(struct held_lock *hlock) 397f611e8cfSBoqun Feng { 398f611e8cfSBoqun Feng BUILD_BUG_ON(MAX_LOCKDEP_KEYS_BITS + 2 > 16); 399f611e8cfSBoqun Feng 400f611e8cfSBoqun Feng return (hlock->class_idx | (hlock->read << MAX_LOCKDEP_KEYS_BITS)); 401f611e8cfSBoqun Feng } 402f611e8cfSBoqun Feng 403f611e8cfSBoqun Feng static inline unsigned int chain_hlock_class_idx(u16 hlock_id) 404f611e8cfSBoqun Feng { 405f611e8cfSBoqun Feng return hlock_id & (MAX_LOCKDEP_KEYS - 1); 406f611e8cfSBoqun Feng } 407f611e8cfSBoqun Feng 408f611e8cfSBoqun Feng /* 4098eddac3fSPeter Zijlstra * The hash key of the lock dependency chains is a hash itself too: 4108eddac3fSPeter Zijlstra * it's a hash of all locks taken up to that lock, including that lock. 4118eddac3fSPeter Zijlstra * It's a 64-bit hash, because it's important for the keys to be 4128eddac3fSPeter Zijlstra * unique. 4138eddac3fSPeter Zijlstra */ 414dfaaf3faSPeter Zijlstra static inline u64 iterate_chain_key(u64 key, u32 idx) 415dfaaf3faSPeter Zijlstra { 416dfaaf3faSPeter Zijlstra u32 k0 = key, k1 = key >> 32; 417dfaaf3faSPeter Zijlstra 418dfaaf3faSPeter Zijlstra __jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */ 419dfaaf3faSPeter Zijlstra 420dfaaf3faSPeter Zijlstra return k0 | (u64)k1 << 32; 421dfaaf3faSPeter Zijlstra } 4228eddac3fSPeter Zijlstra 423e196e479SYuyang Du void lockdep_init_task(struct task_struct *task) 424e196e479SYuyang Du { 425e196e479SYuyang Du task->lockdep_depth = 0; /* no locks held yet */ 426f6ec8829SYuyang Du task->curr_chain_key = INITIAL_CHAIN_KEY; 427e196e479SYuyang Du task->lockdep_recursion = 0; 428e196e479SYuyang Du } 429e196e479SYuyang Du 4304d004099SPeter Zijlstra static __always_inline void lockdep_recursion_inc(void) 4314d004099SPeter Zijlstra { 4324d004099SPeter Zijlstra __this_cpu_inc(lockdep_recursion); 4334d004099SPeter Zijlstra } 4344d004099SPeter Zijlstra 4356eebad1aSPeter Zijlstra static __always_inline void lockdep_recursion_finish(void) 43610476e63SPeter Zijlstra { 4374d004099SPeter Zijlstra if (WARN_ON_ONCE(__this_cpu_dec_return(lockdep_recursion))) 4384d004099SPeter Zijlstra __this_cpu_write(lockdep_recursion, 0); 43910476e63SPeter Zijlstra } 44010476e63SPeter Zijlstra 441cdc84d79SBart Van Assche void lockdep_set_selftest_task(struct task_struct *task) 442cdc84d79SBart Van Assche { 443cdc84d79SBart Van Assche lockdep_selftest_task_struct = task; 444cdc84d79SBart Van Assche } 445cdc84d79SBart Van Assche 4468eddac3fSPeter Zijlstra /* 4478eddac3fSPeter Zijlstra * Debugging switches: 4488eddac3fSPeter Zijlstra */ 4498eddac3fSPeter Zijlstra 4508eddac3fSPeter Zijlstra #define VERBOSE 0 4518eddac3fSPeter Zijlstra #define VERY_VERBOSE 0 4528eddac3fSPeter Zijlstra 4538eddac3fSPeter Zijlstra #if VERBOSE 4548eddac3fSPeter Zijlstra # define HARDIRQ_VERBOSE 1 4558eddac3fSPeter Zijlstra # define SOFTIRQ_VERBOSE 1 4568eddac3fSPeter Zijlstra #else 4578eddac3fSPeter Zijlstra # define HARDIRQ_VERBOSE 0 4588eddac3fSPeter Zijlstra # define SOFTIRQ_VERBOSE 0 4598eddac3fSPeter Zijlstra #endif 4608eddac3fSPeter Zijlstra 461d92a8cfcSPeter Zijlstra #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE 4628eddac3fSPeter Zijlstra /* 4638eddac3fSPeter Zijlstra * Quick filtering for interesting events: 4648eddac3fSPeter Zijlstra */ 4658eddac3fSPeter Zijlstra static int class_filter(struct lock_class *class) 4668eddac3fSPeter Zijlstra { 4678eddac3fSPeter Zijlstra #if 0 4688eddac3fSPeter Zijlstra /* Example */ 4698eddac3fSPeter Zijlstra if (class->name_version == 1 && 4708eddac3fSPeter Zijlstra !strcmp(class->name, "lockname")) 4718eddac3fSPeter Zijlstra return 1; 4728eddac3fSPeter Zijlstra if (class->name_version == 1 && 4738eddac3fSPeter Zijlstra !strcmp(class->name, "&struct->lockfield")) 4748eddac3fSPeter Zijlstra return 1; 4758eddac3fSPeter Zijlstra #endif 4768eddac3fSPeter Zijlstra /* Filter everything else. 1 would be to allow everything else */ 4778eddac3fSPeter Zijlstra return 0; 4788eddac3fSPeter Zijlstra } 4798eddac3fSPeter Zijlstra #endif 4808eddac3fSPeter Zijlstra 4818eddac3fSPeter Zijlstra static int verbose(struct lock_class *class) 4828eddac3fSPeter Zijlstra { 4838eddac3fSPeter Zijlstra #if VERBOSE 4848eddac3fSPeter Zijlstra return class_filter(class); 4858eddac3fSPeter Zijlstra #endif 4868eddac3fSPeter Zijlstra return 0; 4878eddac3fSPeter Zijlstra } 4888eddac3fSPeter Zijlstra 4898eddac3fSPeter Zijlstra static void print_lockdep_off(const char *bug_msg) 4908eddac3fSPeter Zijlstra { 4918eddac3fSPeter Zijlstra printk(KERN_DEBUG "%s\n", bug_msg); 4928eddac3fSPeter Zijlstra printk(KERN_DEBUG "turning off the locking correctness validator.\n"); 493acf59377SAndreas Gruenbacher #ifdef CONFIG_LOCK_STAT 4948eddac3fSPeter Zijlstra printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n"); 495acf59377SAndreas Gruenbacher #endif 4968eddac3fSPeter Zijlstra } 4978eddac3fSPeter Zijlstra 498886532aeSArnd Bergmann unsigned long nr_stack_trace_entries; 499886532aeSArnd Bergmann 50030a35f79SArnd Bergmann #ifdef CONFIG_PROVE_LOCKING 50112593b74SBart Van Assche /** 50212593b74SBart Van Assche * struct lock_trace - single stack backtrace 50312593b74SBart Van Assche * @hash_entry: Entry in a stack_trace_hash[] list. 50412593b74SBart Van Assche * @hash: jhash() of @entries. 50512593b74SBart Van Assche * @nr_entries: Number of entries in @entries. 50612593b74SBart Van Assche * @entries: Actual stack backtrace. 50712593b74SBart Van Assche */ 50812593b74SBart Van Assche struct lock_trace { 50912593b74SBart Van Assche struct hlist_node hash_entry; 51012593b74SBart Van Assche u32 hash; 51112593b74SBart Van Assche u32 nr_entries; 512db78538cSGustavo A. R. Silva unsigned long entries[] __aligned(sizeof(unsigned long)); 51312593b74SBart Van Assche }; 51412593b74SBart Van Assche #define LOCK_TRACE_SIZE_IN_LONGS \ 51512593b74SBart Van Assche (sizeof(struct lock_trace) / sizeof(unsigned long)) 516886532aeSArnd Bergmann /* 51712593b74SBart Van Assche * Stack-trace: sequence of lock_trace structures. Protected by the graph_lock. 518886532aeSArnd Bergmann */ 519886532aeSArnd Bergmann static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; 52012593b74SBart Van Assche static struct hlist_head stack_trace_hash[STACK_TRACE_HASH_SIZE]; 521886532aeSArnd Bergmann 52212593b74SBart Van Assche static bool traces_identical(struct lock_trace *t1, struct lock_trace *t2) 5238eddac3fSPeter Zijlstra { 52412593b74SBart Van Assche return t1->hash == t2->hash && t1->nr_entries == t2->nr_entries && 52512593b74SBart Van Assche memcmp(t1->entries, t2->entries, 52612593b74SBart Van Assche t1->nr_entries * sizeof(t1->entries[0])) == 0; 52712593b74SBart Van Assche } 52812593b74SBart Van Assche 52912593b74SBart Van Assche static struct lock_trace *save_trace(void) 53012593b74SBart Van Assche { 53112593b74SBart Van Assche struct lock_trace *trace, *t2; 53212593b74SBart Van Assche struct hlist_head *hash_head; 53312593b74SBart Van Assche u32 hash; 534d91f3057SWaiman Long int max_entries; 5358eddac3fSPeter Zijlstra 53612593b74SBart Van Assche BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE); 53712593b74SBart Van Assche BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES); 5388eddac3fSPeter Zijlstra 53912593b74SBart Van Assche trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries); 54012593b74SBart Van Assche max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries - 54112593b74SBart Van Assche LOCK_TRACE_SIZE_IN_LONGS; 54212593b74SBart Van Assche 543d91f3057SWaiman Long if (max_entries <= 0) { 5448eddac3fSPeter Zijlstra if (!debug_locks_off_graph_unlock()) 54512593b74SBart Van Assche return NULL; 5468eddac3fSPeter Zijlstra 5478eddac3fSPeter Zijlstra print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!"); 5488eddac3fSPeter Zijlstra dump_stack(); 5498eddac3fSPeter Zijlstra 55012593b74SBart Van Assche return NULL; 5518eddac3fSPeter Zijlstra } 552d91f3057SWaiman Long trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3); 5538eddac3fSPeter Zijlstra 55412593b74SBart Van Assche hash = jhash(trace->entries, trace->nr_entries * 55512593b74SBart Van Assche sizeof(trace->entries[0]), 0); 55612593b74SBart Van Assche trace->hash = hash; 55712593b74SBart Van Assche hash_head = stack_trace_hash + (hash & (STACK_TRACE_HASH_SIZE - 1)); 55812593b74SBart Van Assche hlist_for_each_entry(t2, hash_head, hash_entry) { 55912593b74SBart Van Assche if (traces_identical(trace, t2)) 56012593b74SBart Van Assche return t2; 56112593b74SBart Van Assche } 56212593b74SBart Van Assche nr_stack_trace_entries += LOCK_TRACE_SIZE_IN_LONGS + trace->nr_entries; 56312593b74SBart Van Assche hlist_add_head(&trace->hash_entry, hash_head); 56412593b74SBart Van Assche 56512593b74SBart Van Assche return trace; 5668eddac3fSPeter Zijlstra } 5678c779229SBart Van Assche 5688c779229SBart Van Assche /* Return the number of stack traces in the stack_trace[] array. */ 5698c779229SBart Van Assche u64 lockdep_stack_trace_count(void) 5708c779229SBart Van Assche { 5718c779229SBart Van Assche struct lock_trace *trace; 5728c779229SBart Van Assche u64 c = 0; 5738c779229SBart Van Assche int i; 5748c779229SBart Van Assche 5758c779229SBart Van Assche for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++) { 5768c779229SBart Van Assche hlist_for_each_entry(trace, &stack_trace_hash[i], hash_entry) { 5778c779229SBart Van Assche c++; 5788c779229SBart Van Assche } 5798c779229SBart Van Assche } 5808c779229SBart Van Assche 5818c779229SBart Van Assche return c; 5828c779229SBart Van Assche } 5838c779229SBart Van Assche 5848c779229SBart Van Assche /* Return the number of stack hash chains that have at least one stack trace. */ 5858c779229SBart Van Assche u64 lockdep_stack_hash_count(void) 5868c779229SBart Van Assche { 5878c779229SBart Van Assche u64 c = 0; 5888c779229SBart Van Assche int i; 5898c779229SBart Van Assche 5908c779229SBart Van Assche for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++) 5918c779229SBart Van Assche if (!hlist_empty(&stack_trace_hash[i])) 5928c779229SBart Van Assche c++; 5938c779229SBart Van Assche 5948c779229SBart Van Assche return c; 5958eddac3fSPeter Zijlstra } 596886532aeSArnd Bergmann #endif 5978eddac3fSPeter Zijlstra 5988eddac3fSPeter Zijlstra unsigned int nr_hardirq_chains; 5998eddac3fSPeter Zijlstra unsigned int nr_softirq_chains; 6008eddac3fSPeter Zijlstra unsigned int nr_process_chains; 6018eddac3fSPeter Zijlstra unsigned int max_lockdep_depth; 6028eddac3fSPeter Zijlstra 6038eddac3fSPeter Zijlstra #ifdef CONFIG_DEBUG_LOCKDEP 6048eddac3fSPeter Zijlstra /* 6058eddac3fSPeter Zijlstra * Various lockdep statistics: 6068eddac3fSPeter Zijlstra */ 6078eddac3fSPeter Zijlstra DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); 6088eddac3fSPeter Zijlstra #endif 6098eddac3fSPeter Zijlstra 61030a35f79SArnd Bergmann #ifdef CONFIG_PROVE_LOCKING 6118eddac3fSPeter Zijlstra /* 6128eddac3fSPeter Zijlstra * Locking printouts: 6138eddac3fSPeter Zijlstra */ 6148eddac3fSPeter Zijlstra 6158eddac3fSPeter Zijlstra #define __USAGE(__STATE) \ 6168eddac3fSPeter Zijlstra [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \ 6178eddac3fSPeter Zijlstra [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \ 6188eddac3fSPeter Zijlstra [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\ 6198eddac3fSPeter Zijlstra [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R", 6208eddac3fSPeter Zijlstra 6218eddac3fSPeter Zijlstra static const char *usage_str[] = 6228eddac3fSPeter Zijlstra { 6238eddac3fSPeter Zijlstra #define LOCKDEP_STATE(__STATE) __USAGE(__STATE) 6248eddac3fSPeter Zijlstra #include "lockdep_states.h" 6258eddac3fSPeter Zijlstra #undef LOCKDEP_STATE 6268eddac3fSPeter Zijlstra [LOCK_USED] = "INITIAL USE", 6272bb8945bSPeter Zijlstra [LOCK_USED_READ] = "INITIAL READ USE", 6282bb8945bSPeter Zijlstra /* abused as string storage for verify_lock_unused() */ 629f6f48e18SPeter Zijlstra [LOCK_USAGE_STATES] = "IN-NMI", 6308eddac3fSPeter Zijlstra }; 631886532aeSArnd Bergmann #endif 6328eddac3fSPeter Zijlstra 633364f6afcSBart Van Assche const char *__get_key_name(const struct lockdep_subclass_key *key, char *str) 6348eddac3fSPeter Zijlstra { 6358eddac3fSPeter Zijlstra return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); 6368eddac3fSPeter Zijlstra } 6378eddac3fSPeter Zijlstra 6388eddac3fSPeter Zijlstra static inline unsigned long lock_flag(enum lock_usage_bit bit) 6398eddac3fSPeter Zijlstra { 6408eddac3fSPeter Zijlstra return 1UL << bit; 6418eddac3fSPeter Zijlstra } 6428eddac3fSPeter Zijlstra 6438eddac3fSPeter Zijlstra static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) 6448eddac3fSPeter Zijlstra { 645c52478f4SYuyang Du /* 646c52478f4SYuyang Du * The usage character defaults to '.' (i.e., irqs disabled and not in 647c52478f4SYuyang Du * irq context), which is the safest usage category. 648c52478f4SYuyang Du */ 6498eddac3fSPeter Zijlstra char c = '.'; 6508eddac3fSPeter Zijlstra 651c52478f4SYuyang Du /* 652c52478f4SYuyang Du * The order of the following usage checks matters, which will 653c52478f4SYuyang Du * result in the outcome character as follows: 654c52478f4SYuyang Du * 655c52478f4SYuyang Du * - '+': irq is enabled and not in irq context 656c52478f4SYuyang Du * - '-': in irq context and irq is disabled 657c52478f4SYuyang Du * - '?': in irq context and irq is enabled 658c52478f4SYuyang Du */ 659c52478f4SYuyang Du if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) { 6608eddac3fSPeter Zijlstra c = '+'; 661c52478f4SYuyang Du if (class->usage_mask & lock_flag(bit)) 6628eddac3fSPeter Zijlstra c = '?'; 663c52478f4SYuyang Du } else if (class->usage_mask & lock_flag(bit)) 664c52478f4SYuyang Du c = '-'; 6658eddac3fSPeter Zijlstra 6668eddac3fSPeter Zijlstra return c; 6678eddac3fSPeter Zijlstra } 6688eddac3fSPeter Zijlstra 6698eddac3fSPeter Zijlstra void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) 6708eddac3fSPeter Zijlstra { 6718eddac3fSPeter Zijlstra int i = 0; 6728eddac3fSPeter Zijlstra 6738eddac3fSPeter Zijlstra #define LOCKDEP_STATE(__STATE) \ 6748eddac3fSPeter Zijlstra usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \ 6758eddac3fSPeter Zijlstra usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ); 6768eddac3fSPeter Zijlstra #include "lockdep_states.h" 6778eddac3fSPeter Zijlstra #undef LOCKDEP_STATE 6788eddac3fSPeter Zijlstra 6798eddac3fSPeter Zijlstra usage[i] = '\0'; 6808eddac3fSPeter Zijlstra } 6818eddac3fSPeter Zijlstra 6828eddac3fSPeter Zijlstra static void __print_lock_name(struct lock_class *class) 6838eddac3fSPeter Zijlstra { 6848eddac3fSPeter Zijlstra char str[KSYM_NAME_LEN]; 6858eddac3fSPeter Zijlstra const char *name; 6868eddac3fSPeter Zijlstra 6878eddac3fSPeter Zijlstra name = class->name; 6888eddac3fSPeter Zijlstra if (!name) { 6898eddac3fSPeter Zijlstra name = __get_key_name(class->key, str); 690f943fe0fSDmitry Vyukov printk(KERN_CONT "%s", name); 6918eddac3fSPeter Zijlstra } else { 692f943fe0fSDmitry Vyukov printk(KERN_CONT "%s", name); 6938eddac3fSPeter Zijlstra if (class->name_version > 1) 694f943fe0fSDmitry Vyukov printk(KERN_CONT "#%d", class->name_version); 6958eddac3fSPeter Zijlstra if (class->subclass) 696f943fe0fSDmitry Vyukov printk(KERN_CONT "/%d", class->subclass); 6978eddac3fSPeter Zijlstra } 6988eddac3fSPeter Zijlstra } 6998eddac3fSPeter Zijlstra 7008eddac3fSPeter Zijlstra static void print_lock_name(struct lock_class *class) 7018eddac3fSPeter Zijlstra { 7028eddac3fSPeter Zijlstra char usage[LOCK_USAGE_CHARS]; 7038eddac3fSPeter Zijlstra 7048eddac3fSPeter Zijlstra get_usage_chars(class, usage); 7058eddac3fSPeter Zijlstra 706f943fe0fSDmitry Vyukov printk(KERN_CONT " ("); 7078eddac3fSPeter Zijlstra __print_lock_name(class); 708de8f5e4fSPeter Zijlstra printk(KERN_CONT "){%s}-{%hd:%hd}", usage, 709de8f5e4fSPeter Zijlstra class->wait_type_outer ?: class->wait_type_inner, 710de8f5e4fSPeter Zijlstra class->wait_type_inner); 7118eddac3fSPeter Zijlstra } 7128eddac3fSPeter Zijlstra 7138eddac3fSPeter Zijlstra static void print_lockdep_cache(struct lockdep_map *lock) 7148eddac3fSPeter Zijlstra { 7158eddac3fSPeter Zijlstra const char *name; 7168eddac3fSPeter Zijlstra char str[KSYM_NAME_LEN]; 7178eddac3fSPeter Zijlstra 7188eddac3fSPeter Zijlstra name = lock->name; 7198eddac3fSPeter Zijlstra if (!name) 7208eddac3fSPeter Zijlstra name = __get_key_name(lock->key->subkeys, str); 7218eddac3fSPeter Zijlstra 722f943fe0fSDmitry Vyukov printk(KERN_CONT "%s", name); 7238eddac3fSPeter Zijlstra } 7248eddac3fSPeter Zijlstra 7258eddac3fSPeter Zijlstra static void print_lock(struct held_lock *hlock) 7268eddac3fSPeter Zijlstra { 727d7bc3197SPeter Zijlstra /* 728d7bc3197SPeter Zijlstra * We can be called locklessly through debug_show_all_locks() so be 729d7bc3197SPeter Zijlstra * extra careful, the hlock might have been released and cleared. 73001bb6f0aSYuyang Du * 73101bb6f0aSYuyang Du * If this indeed happens, lets pretend it does not hurt to continue 73201bb6f0aSYuyang Du * to print the lock unless the hlock class_idx does not point to a 73301bb6f0aSYuyang Du * registered class. The rationale here is: since we don't attempt 73401bb6f0aSYuyang Du * to distinguish whether we are in this situation, if it just 73501bb6f0aSYuyang Du * happened we can't count on class_idx to tell either. 736d7bc3197SPeter Zijlstra */ 73701bb6f0aSYuyang Du struct lock_class *lock = hlock_class(hlock); 738d7bc3197SPeter Zijlstra 73901bb6f0aSYuyang Du if (!lock) { 740f943fe0fSDmitry Vyukov printk(KERN_CONT "<RELEASED>\n"); 741d7bc3197SPeter Zijlstra return; 742d7bc3197SPeter Zijlstra } 743d7bc3197SPeter Zijlstra 744519248f3SPaul E. McKenney printk(KERN_CONT "%px", hlock->instance); 74501bb6f0aSYuyang Du print_lock_name(lock); 746b3c39758STetsuo Handa printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip); 7478eddac3fSPeter Zijlstra } 7488eddac3fSPeter Zijlstra 7498cc05c71STetsuo Handa static void lockdep_print_held_locks(struct task_struct *p) 7508eddac3fSPeter Zijlstra { 7518cc05c71STetsuo Handa int i, depth = READ_ONCE(p->lockdep_depth); 7528eddac3fSPeter Zijlstra 7538cc05c71STetsuo Handa if (!depth) 7548cc05c71STetsuo Handa printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p)); 7558cc05c71STetsuo Handa else 7568cc05c71STetsuo Handa printk("%d lock%s held by %s/%d:\n", depth, 7578cc05c71STetsuo Handa depth > 1 ? "s" : "", p->comm, task_pid_nr(p)); 7588cc05c71STetsuo Handa /* 7598cc05c71STetsuo Handa * It's not reliable to print a task's held locks if it's not sleeping 7608cc05c71STetsuo Handa * and it's not the current task. 7618cc05c71STetsuo Handa */ 7628cc05c71STetsuo Handa if (p->state == TASK_RUNNING && p != current) 7638eddac3fSPeter Zijlstra return; 7648eddac3fSPeter Zijlstra for (i = 0; i < depth; i++) { 7658eddac3fSPeter Zijlstra printk(" #%d: ", i); 7668cc05c71STetsuo Handa print_lock(p->held_locks + i); 7678eddac3fSPeter Zijlstra } 7688eddac3fSPeter Zijlstra } 7698eddac3fSPeter Zijlstra 7708eddac3fSPeter Zijlstra static void print_kernel_ident(void) 7718eddac3fSPeter Zijlstra { 7728eddac3fSPeter Zijlstra printk("%s %.*s %s\n", init_utsname()->release, 7738eddac3fSPeter Zijlstra (int)strcspn(init_utsname()->version, " "), 7748eddac3fSPeter Zijlstra init_utsname()->version, 7758eddac3fSPeter Zijlstra print_tainted()); 7768eddac3fSPeter Zijlstra } 7778eddac3fSPeter Zijlstra 7788eddac3fSPeter Zijlstra static int very_verbose(struct lock_class *class) 7798eddac3fSPeter Zijlstra { 7808eddac3fSPeter Zijlstra #if VERY_VERBOSE 7818eddac3fSPeter Zijlstra return class_filter(class); 7828eddac3fSPeter Zijlstra #endif 7838eddac3fSPeter Zijlstra return 0; 7848eddac3fSPeter Zijlstra } 7858eddac3fSPeter Zijlstra 7868eddac3fSPeter Zijlstra /* 7878eddac3fSPeter Zijlstra * Is this the address of a static object: 7888eddac3fSPeter Zijlstra */ 7898dce7a9aSSasha Levin #ifdef __KERNEL__ 790108c1485SBart Van Assche static int static_obj(const void *obj) 7918eddac3fSPeter Zijlstra { 7928eddac3fSPeter Zijlstra unsigned long start = (unsigned long) &_stext, 7938eddac3fSPeter Zijlstra end = (unsigned long) &_end, 7948eddac3fSPeter Zijlstra addr = (unsigned long) obj; 7958eddac3fSPeter Zijlstra 7967a5da02dSGerald Schaefer if (arch_is_kernel_initmem_freed(addr)) 7977a5da02dSGerald Schaefer return 0; 7987a5da02dSGerald Schaefer 7998eddac3fSPeter Zijlstra /* 8008eddac3fSPeter Zijlstra * static variable? 8018eddac3fSPeter Zijlstra */ 8028eddac3fSPeter Zijlstra if ((addr >= start) && (addr < end)) 8038eddac3fSPeter Zijlstra return 1; 8048eddac3fSPeter Zijlstra 8058eddac3fSPeter Zijlstra if (arch_is_kernel_data(addr)) 8068eddac3fSPeter Zijlstra return 1; 8078eddac3fSPeter Zijlstra 8088eddac3fSPeter Zijlstra /* 8098eddac3fSPeter Zijlstra * in-kernel percpu var? 8108eddac3fSPeter Zijlstra */ 8118eddac3fSPeter Zijlstra if (is_kernel_percpu_address(addr)) 8128eddac3fSPeter Zijlstra return 1; 8138eddac3fSPeter Zijlstra 8148eddac3fSPeter Zijlstra /* 8158eddac3fSPeter Zijlstra * module static or percpu var? 8168eddac3fSPeter Zijlstra */ 8178eddac3fSPeter Zijlstra return is_module_address(addr) || is_module_percpu_address(addr); 8188eddac3fSPeter Zijlstra } 8198dce7a9aSSasha Levin #endif 8208eddac3fSPeter Zijlstra 8218eddac3fSPeter Zijlstra /* 8228eddac3fSPeter Zijlstra * To make lock name printouts unique, we calculate a unique 823fe27b0deSBart Van Assche * class->name_version generation counter. The caller must hold the graph 824fe27b0deSBart Van Assche * lock. 8258eddac3fSPeter Zijlstra */ 8268eddac3fSPeter Zijlstra static int count_matching_names(struct lock_class *new_class) 8278eddac3fSPeter Zijlstra { 8288eddac3fSPeter Zijlstra struct lock_class *class; 8298eddac3fSPeter Zijlstra int count = 0; 8308eddac3fSPeter Zijlstra 8318eddac3fSPeter Zijlstra if (!new_class->name) 8328eddac3fSPeter Zijlstra return 0; 8338eddac3fSPeter Zijlstra 834fe27b0deSBart Van Assche list_for_each_entry(class, &all_lock_classes, lock_entry) { 8358eddac3fSPeter Zijlstra if (new_class->key - new_class->subclass == class->key) 8368eddac3fSPeter Zijlstra return class->name_version; 8378eddac3fSPeter Zijlstra if (class->name && !strcmp(class->name, new_class->name)) 8388eddac3fSPeter Zijlstra count = max(count, class->name_version); 8398eddac3fSPeter Zijlstra } 8408eddac3fSPeter Zijlstra 8418eddac3fSPeter Zijlstra return count + 1; 8428eddac3fSPeter Zijlstra } 8438eddac3fSPeter Zijlstra 844f6f48e18SPeter Zijlstra /* used from NMI context -- must be lockless */ 8456eebad1aSPeter Zijlstra static __always_inline struct lock_class * 84608f36ff6SMatthew Wilcox look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass) 8478eddac3fSPeter Zijlstra { 8488eddac3fSPeter Zijlstra struct lockdep_subclass_key *key; 849a63f38ccSAndrew Morton struct hlist_head *hash_head; 8508eddac3fSPeter Zijlstra struct lock_class *class; 8518eddac3fSPeter Zijlstra 8528eddac3fSPeter Zijlstra if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { 8538eddac3fSPeter Zijlstra debug_locks_off(); 8548eddac3fSPeter Zijlstra printk(KERN_ERR 8558eddac3fSPeter Zijlstra "BUG: looking up invalid subclass: %u\n", subclass); 8568eddac3fSPeter Zijlstra printk(KERN_ERR 8578eddac3fSPeter Zijlstra "turning off the locking correctness validator.\n"); 8588eddac3fSPeter Zijlstra dump_stack(); 8598eddac3fSPeter Zijlstra return NULL; 8608eddac3fSPeter Zijlstra } 8618eddac3fSPeter Zijlstra 8628eddac3fSPeter Zijlstra /* 86364f29d1bSMatthew Wilcox * If it is not initialised then it has never been locked, 86464f29d1bSMatthew Wilcox * so it won't be present in the hash table. 8658eddac3fSPeter Zijlstra */ 86664f29d1bSMatthew Wilcox if (unlikely(!lock->key)) 86764f29d1bSMatthew Wilcox return NULL; 8688eddac3fSPeter Zijlstra 8698eddac3fSPeter Zijlstra /* 8708eddac3fSPeter Zijlstra * NOTE: the class-key must be unique. For dynamic locks, a static 8718eddac3fSPeter Zijlstra * lock_class_key variable is passed in through the mutex_init() 8728eddac3fSPeter Zijlstra * (or spin_lock_init()) call - which acts as the key. For static 8738eddac3fSPeter Zijlstra * locks we use the lock object itself as the key. 8748eddac3fSPeter Zijlstra */ 8758eddac3fSPeter Zijlstra BUILD_BUG_ON(sizeof(struct lock_class_key) > 8768eddac3fSPeter Zijlstra sizeof(struct lockdep_map)); 8778eddac3fSPeter Zijlstra 8788eddac3fSPeter Zijlstra key = lock->key->subkeys + subclass; 8798eddac3fSPeter Zijlstra 8808eddac3fSPeter Zijlstra hash_head = classhashentry(key); 8818eddac3fSPeter Zijlstra 8828eddac3fSPeter Zijlstra /* 88335a9393cSPeter Zijlstra * We do an RCU walk of the hash, see lockdep_free_key_range(). 8848eddac3fSPeter Zijlstra */ 88535a9393cSPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 88635a9393cSPeter Zijlstra return NULL; 88735a9393cSPeter Zijlstra 888a63f38ccSAndrew Morton hlist_for_each_entry_rcu(class, hash_head, hash_entry) { 8898eddac3fSPeter Zijlstra if (class->key == key) { 8908eddac3fSPeter Zijlstra /* 8918eddac3fSPeter Zijlstra * Huh! same key, different name? Did someone trample 8928eddac3fSPeter Zijlstra * on some memory? We're most confused. 8938eddac3fSPeter Zijlstra */ 89497831546SSebastian Andrzej Siewior WARN_ON_ONCE(class->name != lock->name && 89597831546SSebastian Andrzej Siewior lock->key != &__lockdep_no_validate__); 8968eddac3fSPeter Zijlstra return class; 8978eddac3fSPeter Zijlstra } 8988eddac3fSPeter Zijlstra } 8998eddac3fSPeter Zijlstra 90064f29d1bSMatthew Wilcox return NULL; 90164f29d1bSMatthew Wilcox } 90264f29d1bSMatthew Wilcox 90364f29d1bSMatthew Wilcox /* 90464f29d1bSMatthew Wilcox * Static locks do not have their class-keys yet - for them the key is 90564f29d1bSMatthew Wilcox * the lock object itself. If the lock is in the per cpu area, the 90664f29d1bSMatthew Wilcox * canonical address of the lock (per cpu offset removed) is used. 90764f29d1bSMatthew Wilcox */ 90864f29d1bSMatthew Wilcox static bool assign_lock_key(struct lockdep_map *lock) 90964f29d1bSMatthew Wilcox { 91064f29d1bSMatthew Wilcox unsigned long can_addr, addr = (unsigned long)lock; 91164f29d1bSMatthew Wilcox 9124bf50862SBart Van Assche #ifdef __KERNEL__ 9134bf50862SBart Van Assche /* 9144bf50862SBart Van Assche * lockdep_free_key_range() assumes that struct lock_class_key 9154bf50862SBart Van Assche * objects do not overlap. Since we use the address of lock 9164bf50862SBart Van Assche * objects as class key for static objects, check whether the 9174bf50862SBart Van Assche * size of lock_class_key objects does not exceed the size of 9184bf50862SBart Van Assche * the smallest lock object. 9194bf50862SBart Van Assche */ 9204bf50862SBart Van Assche BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t)); 9214bf50862SBart Van Assche #endif 9224bf50862SBart Van Assche 92364f29d1bSMatthew Wilcox if (__is_kernel_percpu_address(addr, &can_addr)) 92464f29d1bSMatthew Wilcox lock->key = (void *)can_addr; 92564f29d1bSMatthew Wilcox else if (__is_module_percpu_address(addr, &can_addr)) 92664f29d1bSMatthew Wilcox lock->key = (void *)can_addr; 92764f29d1bSMatthew Wilcox else if (static_obj(lock)) 92864f29d1bSMatthew Wilcox lock->key = (void *)lock; 92964f29d1bSMatthew Wilcox else { 93064f29d1bSMatthew Wilcox /* Debug-check: all keys must be persistent! */ 93164f29d1bSMatthew Wilcox debug_locks_off(); 93264f29d1bSMatthew Wilcox pr_err("INFO: trying to register non-static key.\n"); 93364f29d1bSMatthew Wilcox pr_err("the code is fine but needs lockdep annotation.\n"); 93464f29d1bSMatthew Wilcox pr_err("turning off the locking correctness validator.\n"); 93564f29d1bSMatthew Wilcox dump_stack(); 93664f29d1bSMatthew Wilcox return false; 93764f29d1bSMatthew Wilcox } 93864f29d1bSMatthew Wilcox 93964f29d1bSMatthew Wilcox return true; 9408eddac3fSPeter Zijlstra } 9418eddac3fSPeter Zijlstra 94272dcd505SPeter Zijlstra #ifdef CONFIG_DEBUG_LOCKDEP 94372dcd505SPeter Zijlstra 944b526b2e3SBart Van Assche /* Check whether element @e occurs in list @h */ 945b526b2e3SBart Van Assche static bool in_list(struct list_head *e, struct list_head *h) 946b526b2e3SBart Van Assche { 947b526b2e3SBart Van Assche struct list_head *f; 948b526b2e3SBart Van Assche 949b526b2e3SBart Van Assche list_for_each(f, h) { 950b526b2e3SBart Van Assche if (e == f) 951b526b2e3SBart Van Assche return true; 952b526b2e3SBart Van Assche } 953b526b2e3SBart Van Assche 954b526b2e3SBart Van Assche return false; 955b526b2e3SBart Van Assche } 956b526b2e3SBart Van Assche 957b526b2e3SBart Van Assche /* 958b526b2e3SBart Van Assche * Check whether entry @e occurs in any of the locks_after or locks_before 959b526b2e3SBart Van Assche * lists. 960b526b2e3SBart Van Assche */ 961b526b2e3SBart Van Assche static bool in_any_class_list(struct list_head *e) 962b526b2e3SBart Van Assche { 963b526b2e3SBart Van Assche struct lock_class *class; 964b526b2e3SBart Van Assche int i; 965b526b2e3SBart Van Assche 966b526b2e3SBart Van Assche for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { 967b526b2e3SBart Van Assche class = &lock_classes[i]; 968b526b2e3SBart Van Assche if (in_list(e, &class->locks_after) || 969b526b2e3SBart Van Assche in_list(e, &class->locks_before)) 970b526b2e3SBart Van Assche return true; 971b526b2e3SBart Van Assche } 972b526b2e3SBart Van Assche return false; 973b526b2e3SBart Van Assche } 974b526b2e3SBart Van Assche 975b526b2e3SBart Van Assche static bool class_lock_list_valid(struct lock_class *c, struct list_head *h) 976b526b2e3SBart Van Assche { 977b526b2e3SBart Van Assche struct lock_list *e; 978b526b2e3SBart Van Assche 979b526b2e3SBart Van Assche list_for_each_entry(e, h, entry) { 980b526b2e3SBart Van Assche if (e->links_to != c) { 981b526b2e3SBart Van Assche printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s", 982b526b2e3SBart Van Assche c->name ? : "(?)", 983b526b2e3SBart Van Assche (unsigned long)(e - list_entries), 984b526b2e3SBart Van Assche e->links_to && e->links_to->name ? 985b526b2e3SBart Van Assche e->links_to->name : "(?)", 986b526b2e3SBart Van Assche e->class && e->class->name ? e->class->name : 987b526b2e3SBart Van Assche "(?)"); 988b526b2e3SBart Van Assche return false; 989b526b2e3SBart Van Assche } 990b526b2e3SBart Van Assche } 991b526b2e3SBart Van Assche return true; 992b526b2e3SBart Van Assche } 993b526b2e3SBart Van Assche 9943fe7522fSArnd Bergmann #ifdef CONFIG_PROVE_LOCKING 9953fe7522fSArnd Bergmann static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]; 9963fe7522fSArnd Bergmann #endif 997b526b2e3SBart Van Assche 998b526b2e3SBart Van Assche static bool check_lock_chain_key(struct lock_chain *chain) 999b526b2e3SBart Van Assche { 1000b526b2e3SBart Van Assche #ifdef CONFIG_PROVE_LOCKING 1001f6ec8829SYuyang Du u64 chain_key = INITIAL_CHAIN_KEY; 1002b526b2e3SBart Van Assche int i; 1003b526b2e3SBart Van Assche 1004b526b2e3SBart Van Assche for (i = chain->base; i < chain->base + chain->depth; i++) 100501bb6f0aSYuyang Du chain_key = iterate_chain_key(chain_key, chain_hlocks[i]); 1006b526b2e3SBart Van Assche /* 1007b526b2e3SBart Van Assche * The 'unsigned long long' casts avoid that a compiler warning 1008b526b2e3SBart Van Assche * is reported when building tools/lib/lockdep. 1009b526b2e3SBart Van Assche */ 101072dcd505SPeter Zijlstra if (chain->chain_key != chain_key) { 1011b526b2e3SBart Van Assche printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n", 1012b526b2e3SBart Van Assche (unsigned long long)(chain - lock_chains), 1013b526b2e3SBart Van Assche (unsigned long long)chain->chain_key, 1014b526b2e3SBart Van Assche (unsigned long long)chain_key); 101572dcd505SPeter Zijlstra return false; 101672dcd505SPeter Zijlstra } 1017b526b2e3SBart Van Assche #endif 101872dcd505SPeter Zijlstra return true; 1019b526b2e3SBart Van Assche } 1020b526b2e3SBart Van Assche 1021b526b2e3SBart Van Assche static bool in_any_zapped_class_list(struct lock_class *class) 1022b526b2e3SBart Van Assche { 1023b526b2e3SBart Van Assche struct pending_free *pf; 1024b526b2e3SBart Van Assche int i; 1025b526b2e3SBart Van Assche 102672dcd505SPeter Zijlstra for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) { 1027b526b2e3SBart Van Assche if (in_list(&class->lock_entry, &pf->zapped)) 1028b526b2e3SBart Van Assche return true; 102972dcd505SPeter Zijlstra } 1030b526b2e3SBart Van Assche 1031b526b2e3SBart Van Assche return false; 1032b526b2e3SBart Van Assche } 1033b526b2e3SBart Van Assche 103472dcd505SPeter Zijlstra static bool __check_data_structures(void) 1035b526b2e3SBart Van Assche { 1036b526b2e3SBart Van Assche struct lock_class *class; 1037b526b2e3SBart Van Assche struct lock_chain *chain; 1038b526b2e3SBart Van Assche struct hlist_head *head; 1039b526b2e3SBart Van Assche struct lock_list *e; 1040b526b2e3SBart Van Assche int i; 1041b526b2e3SBart Van Assche 1042b526b2e3SBart Van Assche /* Check whether all classes occur in a lock list. */ 1043b526b2e3SBart Van Assche for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { 1044b526b2e3SBart Van Assche class = &lock_classes[i]; 1045b526b2e3SBart Van Assche if (!in_list(&class->lock_entry, &all_lock_classes) && 1046b526b2e3SBart Van Assche !in_list(&class->lock_entry, &free_lock_classes) && 1047b526b2e3SBart Van Assche !in_any_zapped_class_list(class)) { 1048b526b2e3SBart Van Assche printk(KERN_INFO "class %px/%s is not in any class list\n", 1049b526b2e3SBart Van Assche class, class->name ? : "(?)"); 1050b526b2e3SBart Van Assche return false; 1051b526b2e3SBart Van Assche } 1052b526b2e3SBart Van Assche } 1053b526b2e3SBart Van Assche 1054b526b2e3SBart Van Assche /* Check whether all classes have valid lock lists. */ 1055b526b2e3SBart Van Assche for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { 1056b526b2e3SBart Van Assche class = &lock_classes[i]; 1057b526b2e3SBart Van Assche if (!class_lock_list_valid(class, &class->locks_before)) 1058b526b2e3SBart Van Assche return false; 1059b526b2e3SBart Van Assche if (!class_lock_list_valid(class, &class->locks_after)) 1060b526b2e3SBart Van Assche return false; 1061b526b2e3SBart Van Assche } 1062b526b2e3SBart Van Assche 1063b526b2e3SBart Van Assche /* Check the chain_key of all lock chains. */ 1064b526b2e3SBart Van Assche for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) { 1065b526b2e3SBart Van Assche head = chainhash_table + i; 1066b526b2e3SBart Van Assche hlist_for_each_entry_rcu(chain, head, entry) { 1067b526b2e3SBart Van Assche if (!check_lock_chain_key(chain)) 1068b526b2e3SBart Van Assche return false; 1069b526b2e3SBart Van Assche } 1070b526b2e3SBart Van Assche } 1071b526b2e3SBart Van Assche 1072b526b2e3SBart Van Assche /* 1073b526b2e3SBart Van Assche * Check whether all list entries that are in use occur in a class 1074b526b2e3SBart Van Assche * lock list. 1075b526b2e3SBart Van Assche */ 1076b526b2e3SBart Van Assche for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) { 1077b526b2e3SBart Van Assche e = list_entries + i; 1078b526b2e3SBart Van Assche if (!in_any_class_list(&e->entry)) { 1079b526b2e3SBart Van Assche printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n", 1080b526b2e3SBart Van Assche (unsigned int)(e - list_entries), 1081b526b2e3SBart Van Assche e->class->name ? : "(?)", 1082b526b2e3SBart Van Assche e->links_to->name ? : "(?)"); 1083b526b2e3SBart Van Assche return false; 1084b526b2e3SBart Van Assche } 1085b526b2e3SBart Van Assche } 1086b526b2e3SBart Van Assche 1087b526b2e3SBart Van Assche /* 1088b526b2e3SBart Van Assche * Check whether all list entries that are not in use do not occur in 1089b526b2e3SBart Van Assche * a class lock list. 1090b526b2e3SBart Van Assche */ 1091b526b2e3SBart Van Assche for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) { 1092b526b2e3SBart Van Assche e = list_entries + i; 1093b526b2e3SBart Van Assche if (in_any_class_list(&e->entry)) { 1094b526b2e3SBart Van Assche printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n", 1095b526b2e3SBart Van Assche (unsigned int)(e - list_entries), 1096b526b2e3SBart Van Assche e->class && e->class->name ? e->class->name : 1097b526b2e3SBart Van Assche "(?)", 1098b526b2e3SBart Van Assche e->links_to && e->links_to->name ? 1099b526b2e3SBart Van Assche e->links_to->name : "(?)"); 1100b526b2e3SBart Van Assche return false; 1101b526b2e3SBart Van Assche } 1102b526b2e3SBart Van Assche } 1103b526b2e3SBart Van Assche 1104b526b2e3SBart Van Assche return true; 1105b526b2e3SBart Van Assche } 1106b526b2e3SBart Van Assche 110772dcd505SPeter Zijlstra int check_consistency = 0; 110872dcd505SPeter Zijlstra module_param(check_consistency, int, 0644); 110972dcd505SPeter Zijlstra 111072dcd505SPeter Zijlstra static void check_data_structures(void) 111172dcd505SPeter Zijlstra { 111272dcd505SPeter Zijlstra static bool once = false; 111372dcd505SPeter Zijlstra 111472dcd505SPeter Zijlstra if (check_consistency && !once) { 111572dcd505SPeter Zijlstra if (!__check_data_structures()) { 111672dcd505SPeter Zijlstra once = true; 111772dcd505SPeter Zijlstra WARN_ON(once); 111872dcd505SPeter Zijlstra } 111972dcd505SPeter Zijlstra } 112072dcd505SPeter Zijlstra } 112172dcd505SPeter Zijlstra 112272dcd505SPeter Zijlstra #else /* CONFIG_DEBUG_LOCKDEP */ 112372dcd505SPeter Zijlstra 112472dcd505SPeter Zijlstra static inline void check_data_structures(void) { } 112572dcd505SPeter Zijlstra 112672dcd505SPeter Zijlstra #endif /* CONFIG_DEBUG_LOCKDEP */ 112772dcd505SPeter Zijlstra 1128810507feSWaiman Long static void init_chain_block_buckets(void); 1129810507feSWaiman Long 11308eddac3fSPeter Zijlstra /* 1131a0b0fd53SBart Van Assche * Initialize the lock_classes[] array elements, the free_lock_classes list 1132a0b0fd53SBart Van Assche * and also the delayed_free structure. 1133feb0a386SBart Van Assche */ 1134feb0a386SBart Van Assche static void init_data_structures_once(void) 1135feb0a386SBart Van Assche { 1136810507feSWaiman Long static bool __read_mostly ds_initialized, rcu_head_initialized; 1137feb0a386SBart Van Assche int i; 1138feb0a386SBart Van Assche 11390126574fSBart Van Assche if (likely(rcu_head_initialized)) 1140feb0a386SBart Van Assche return; 1141feb0a386SBart Van Assche 11420126574fSBart Van Assche if (system_state >= SYSTEM_SCHEDULING) { 1143a0b0fd53SBart Van Assche init_rcu_head(&delayed_free.rcu_head); 11440126574fSBart Van Assche rcu_head_initialized = true; 11450126574fSBart Van Assche } 11460126574fSBart Van Assche 11470126574fSBart Van Assche if (ds_initialized) 11480126574fSBart Van Assche return; 11490126574fSBart Van Assche 11500126574fSBart Van Assche ds_initialized = true; 11510126574fSBart Van Assche 1152a0b0fd53SBart Van Assche INIT_LIST_HEAD(&delayed_free.pf[0].zapped); 1153a0b0fd53SBart Van Assche INIT_LIST_HEAD(&delayed_free.pf[1].zapped); 1154a0b0fd53SBart Van Assche 1155feb0a386SBart Van Assche for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { 1156a0b0fd53SBart Van Assche list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes); 1157feb0a386SBart Van Assche INIT_LIST_HEAD(&lock_classes[i].locks_after); 1158feb0a386SBart Van Assche INIT_LIST_HEAD(&lock_classes[i].locks_before); 1159feb0a386SBart Van Assche } 1160810507feSWaiman Long init_chain_block_buckets(); 1161feb0a386SBart Van Assche } 1162feb0a386SBart Van Assche 1163108c1485SBart Van Assche static inline struct hlist_head *keyhashentry(const struct lock_class_key *key) 1164108c1485SBart Van Assche { 1165108c1485SBart Van Assche unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS); 1166108c1485SBart Van Assche 1167108c1485SBart Van Assche return lock_keys_hash + hash; 1168108c1485SBart Van Assche } 1169108c1485SBart Van Assche 1170108c1485SBart Van Assche /* Register a dynamically allocated key. */ 1171108c1485SBart Van Assche void lockdep_register_key(struct lock_class_key *key) 1172108c1485SBart Van Assche { 1173108c1485SBart Van Assche struct hlist_head *hash_head; 1174108c1485SBart Van Assche struct lock_class_key *k; 1175108c1485SBart Van Assche unsigned long flags; 1176108c1485SBart Van Assche 1177108c1485SBart Van Assche if (WARN_ON_ONCE(static_obj(key))) 1178108c1485SBart Van Assche return; 1179108c1485SBart Van Assche hash_head = keyhashentry(key); 1180108c1485SBart Van Assche 1181108c1485SBart Van Assche raw_local_irq_save(flags); 1182108c1485SBart Van Assche if (!graph_lock()) 1183108c1485SBart Van Assche goto restore_irqs; 1184108c1485SBart Van Assche hlist_for_each_entry_rcu(k, hash_head, hash_entry) { 1185108c1485SBart Van Assche if (WARN_ON_ONCE(k == key)) 1186108c1485SBart Van Assche goto out_unlock; 1187108c1485SBart Van Assche } 1188108c1485SBart Van Assche hlist_add_head_rcu(&key->hash_entry, hash_head); 1189108c1485SBart Van Assche out_unlock: 1190108c1485SBart Van Assche graph_unlock(); 1191108c1485SBart Van Assche restore_irqs: 1192108c1485SBart Van Assche raw_local_irq_restore(flags); 1193108c1485SBart Van Assche } 1194108c1485SBart Van Assche EXPORT_SYMBOL_GPL(lockdep_register_key); 1195108c1485SBart Van Assche 1196108c1485SBart Van Assche /* Check whether a key has been registered as a dynamic key. */ 1197108c1485SBart Van Assche static bool is_dynamic_key(const struct lock_class_key *key) 1198108c1485SBart Van Assche { 1199108c1485SBart Van Assche struct hlist_head *hash_head; 1200108c1485SBart Van Assche struct lock_class_key *k; 1201108c1485SBart Van Assche bool found = false; 1202108c1485SBart Van Assche 1203108c1485SBart Van Assche if (WARN_ON_ONCE(static_obj(key))) 1204108c1485SBart Van Assche return false; 1205108c1485SBart Van Assche 1206108c1485SBart Van Assche /* 1207108c1485SBart Van Assche * If lock debugging is disabled lock_keys_hash[] may contain 1208108c1485SBart Van Assche * pointers to memory that has already been freed. Avoid triggering 1209108c1485SBart Van Assche * a use-after-free in that case by returning early. 1210108c1485SBart Van Assche */ 1211108c1485SBart Van Assche if (!debug_locks) 1212108c1485SBart Van Assche return true; 1213108c1485SBart Van Assche 1214108c1485SBart Van Assche hash_head = keyhashentry(key); 1215108c1485SBart Van Assche 1216108c1485SBart Van Assche rcu_read_lock(); 1217108c1485SBart Van Assche hlist_for_each_entry_rcu(k, hash_head, hash_entry) { 1218108c1485SBart Van Assche if (k == key) { 1219108c1485SBart Van Assche found = true; 1220108c1485SBart Van Assche break; 1221108c1485SBart Van Assche } 1222108c1485SBart Van Assche } 1223108c1485SBart Van Assche rcu_read_unlock(); 1224108c1485SBart Van Assche 1225108c1485SBart Van Assche return found; 1226108c1485SBart Van Assche } 1227108c1485SBart Van Assche 1228feb0a386SBart Van Assche /* 12298eddac3fSPeter Zijlstra * Register a lock's class in the hash-table, if the class is not present 12308eddac3fSPeter Zijlstra * yet. Otherwise we look it up. We cache the result in the lock object 12318eddac3fSPeter Zijlstra * itself, so actual lookup of the hash should be once per lock object. 12328eddac3fSPeter Zijlstra */ 1233c003ed92SDenys Vlasenko static struct lock_class * 12348eddac3fSPeter Zijlstra register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) 12358eddac3fSPeter Zijlstra { 12368eddac3fSPeter Zijlstra struct lockdep_subclass_key *key; 1237a63f38ccSAndrew Morton struct hlist_head *hash_head; 12388eddac3fSPeter Zijlstra struct lock_class *class; 123935a9393cSPeter Zijlstra 124035a9393cSPeter Zijlstra DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 12418eddac3fSPeter Zijlstra 12428eddac3fSPeter Zijlstra class = look_up_lock_class(lock, subclass); 124364f29d1bSMatthew Wilcox if (likely(class)) 12448eddac3fSPeter Zijlstra goto out_set_class_cache; 12458eddac3fSPeter Zijlstra 124664f29d1bSMatthew Wilcox if (!lock->key) { 124764f29d1bSMatthew Wilcox if (!assign_lock_key(lock)) 124864f29d1bSMatthew Wilcox return NULL; 1249108c1485SBart Van Assche } else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) { 12508eddac3fSPeter Zijlstra return NULL; 12518eddac3fSPeter Zijlstra } 12528eddac3fSPeter Zijlstra 12538eddac3fSPeter Zijlstra key = lock->key->subkeys + subclass; 12548eddac3fSPeter Zijlstra hash_head = classhashentry(key); 12558eddac3fSPeter Zijlstra 12568eddac3fSPeter Zijlstra if (!graph_lock()) { 12578eddac3fSPeter Zijlstra return NULL; 12588eddac3fSPeter Zijlstra } 12598eddac3fSPeter Zijlstra /* 12608eddac3fSPeter Zijlstra * We have to do the hash-walk again, to avoid races 12618eddac3fSPeter Zijlstra * with another CPU: 12628eddac3fSPeter Zijlstra */ 1263a63f38ccSAndrew Morton hlist_for_each_entry_rcu(class, hash_head, hash_entry) { 12648eddac3fSPeter Zijlstra if (class->key == key) 12658eddac3fSPeter Zijlstra goto out_unlock_set; 126635a9393cSPeter Zijlstra } 126735a9393cSPeter Zijlstra 1268feb0a386SBart Van Assche init_data_structures_once(); 1269feb0a386SBart Van Assche 1270a0b0fd53SBart Van Assche /* Allocate a new lock class and add it to the hash. */ 1271a0b0fd53SBart Van Assche class = list_first_entry_or_null(&free_lock_classes, typeof(*class), 1272a0b0fd53SBart Van Assche lock_entry); 1273a0b0fd53SBart Van Assche if (!class) { 12748eddac3fSPeter Zijlstra if (!debug_locks_off_graph_unlock()) { 12758eddac3fSPeter Zijlstra return NULL; 12768eddac3fSPeter Zijlstra } 12778eddac3fSPeter Zijlstra 12788eddac3fSPeter Zijlstra print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); 12798eddac3fSPeter Zijlstra dump_stack(); 12808eddac3fSPeter Zijlstra return NULL; 12818eddac3fSPeter Zijlstra } 1282a0b0fd53SBart Van Assche nr_lock_classes++; 128301bb6f0aSYuyang Du __set_bit(class - lock_classes, lock_classes_in_use); 12848eddac3fSPeter Zijlstra debug_atomic_inc(nr_unused_locks); 12858eddac3fSPeter Zijlstra class->key = key; 12868eddac3fSPeter Zijlstra class->name = lock->name; 12878eddac3fSPeter Zijlstra class->subclass = subclass; 1288feb0a386SBart Van Assche WARN_ON_ONCE(!list_empty(&class->locks_before)); 1289feb0a386SBart Van Assche WARN_ON_ONCE(!list_empty(&class->locks_after)); 12908eddac3fSPeter Zijlstra class->name_version = count_matching_names(class); 1291de8f5e4fSPeter Zijlstra class->wait_type_inner = lock->wait_type_inner; 1292de8f5e4fSPeter Zijlstra class->wait_type_outer = lock->wait_type_outer; 1293dfd5e3f5SPeter Zijlstra class->lock_type = lock->lock_type; 12948eddac3fSPeter Zijlstra /* 12958eddac3fSPeter Zijlstra * We use RCU's safe list-add method to make 12968eddac3fSPeter Zijlstra * parallel walking of the hash-list safe: 12978eddac3fSPeter Zijlstra */ 1298a63f38ccSAndrew Morton hlist_add_head_rcu(&class->hash_entry, hash_head); 12998eddac3fSPeter Zijlstra /* 1300a0b0fd53SBart Van Assche * Remove the class from the free list and add it to the global list 1301a0b0fd53SBart Van Assche * of classes. 13028eddac3fSPeter Zijlstra */ 1303a0b0fd53SBart Van Assche list_move_tail(&class->lock_entry, &all_lock_classes); 13048eddac3fSPeter Zijlstra 13058eddac3fSPeter Zijlstra if (verbose(class)) { 13068eddac3fSPeter Zijlstra graph_unlock(); 13078eddac3fSPeter Zijlstra 130804860d48SBorislav Petkov printk("\nnew class %px: %s", class->key, class->name); 13098eddac3fSPeter Zijlstra if (class->name_version > 1) 1310f943fe0fSDmitry Vyukov printk(KERN_CONT "#%d", class->name_version); 1311f943fe0fSDmitry Vyukov printk(KERN_CONT "\n"); 13128eddac3fSPeter Zijlstra dump_stack(); 13138eddac3fSPeter Zijlstra 13148eddac3fSPeter Zijlstra if (!graph_lock()) { 13158eddac3fSPeter Zijlstra return NULL; 13168eddac3fSPeter Zijlstra } 13178eddac3fSPeter Zijlstra } 13188eddac3fSPeter Zijlstra out_unlock_set: 13198eddac3fSPeter Zijlstra graph_unlock(); 13208eddac3fSPeter Zijlstra 13218eddac3fSPeter Zijlstra out_set_class_cache: 13228eddac3fSPeter Zijlstra if (!subclass || force) 13238eddac3fSPeter Zijlstra lock->class_cache[0] = class; 13248eddac3fSPeter Zijlstra else if (subclass < NR_LOCKDEP_CACHING_CLASSES) 13258eddac3fSPeter Zijlstra lock->class_cache[subclass] = class; 13268eddac3fSPeter Zijlstra 13278eddac3fSPeter Zijlstra /* 13288eddac3fSPeter Zijlstra * Hash collision, did we smoke some? We found a class with a matching 13298eddac3fSPeter Zijlstra * hash but the subclass -- which is hashed in -- didn't match. 13308eddac3fSPeter Zijlstra */ 13318eddac3fSPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) 13328eddac3fSPeter Zijlstra return NULL; 13338eddac3fSPeter Zijlstra 13348eddac3fSPeter Zijlstra return class; 13358eddac3fSPeter Zijlstra } 13368eddac3fSPeter Zijlstra 13378eddac3fSPeter Zijlstra #ifdef CONFIG_PROVE_LOCKING 13388eddac3fSPeter Zijlstra /* 13398eddac3fSPeter Zijlstra * Allocate a lockdep entry. (assumes the graph_lock held, returns 13408eddac3fSPeter Zijlstra * with NULL on failure) 13418eddac3fSPeter Zijlstra */ 13428eddac3fSPeter Zijlstra static struct lock_list *alloc_list_entry(void) 13438eddac3fSPeter Zijlstra { 1344ace35a7aSBart Van Assche int idx = find_first_zero_bit(list_entries_in_use, 1345ace35a7aSBart Van Assche ARRAY_SIZE(list_entries)); 1346ace35a7aSBart Van Assche 1347ace35a7aSBart Van Assche if (idx >= ARRAY_SIZE(list_entries)) { 13488eddac3fSPeter Zijlstra if (!debug_locks_off_graph_unlock()) 13498eddac3fSPeter Zijlstra return NULL; 13508eddac3fSPeter Zijlstra 13518eddac3fSPeter Zijlstra print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!"); 13528eddac3fSPeter Zijlstra dump_stack(); 13538eddac3fSPeter Zijlstra return NULL; 13548eddac3fSPeter Zijlstra } 1355ace35a7aSBart Van Assche nr_list_entries++; 1356ace35a7aSBart Van Assche __set_bit(idx, list_entries_in_use); 1357ace35a7aSBart Van Assche return list_entries + idx; 13588eddac3fSPeter Zijlstra } 13598eddac3fSPeter Zijlstra 13608eddac3fSPeter Zijlstra /* 13618eddac3fSPeter Zijlstra * Add a new dependency to the head of the list: 13628eddac3fSPeter Zijlstra */ 136386cffb80SBart Van Assche static int add_lock_to_list(struct lock_class *this, 136486cffb80SBart Van Assche struct lock_class *links_to, struct list_head *head, 13653454a36dSBoqun Feng unsigned long ip, u16 distance, u8 dep, 136612593b74SBart Van Assche const struct lock_trace *trace) 13678eddac3fSPeter Zijlstra { 13688eddac3fSPeter Zijlstra struct lock_list *entry; 13698eddac3fSPeter Zijlstra /* 13708eddac3fSPeter Zijlstra * Lock not present yet - get a new dependency struct and 13718eddac3fSPeter Zijlstra * add it to the list: 13728eddac3fSPeter Zijlstra */ 13738eddac3fSPeter Zijlstra entry = alloc_list_entry(); 13748eddac3fSPeter Zijlstra if (!entry) 13758eddac3fSPeter Zijlstra return 0; 13768eddac3fSPeter Zijlstra 13778eddac3fSPeter Zijlstra entry->class = this; 137886cffb80SBart Van Assche entry->links_to = links_to; 13793454a36dSBoqun Feng entry->dep = dep; 13808eddac3fSPeter Zijlstra entry->distance = distance; 138112593b74SBart Van Assche entry->trace = trace; 13828eddac3fSPeter Zijlstra /* 138335a9393cSPeter Zijlstra * Both allocation and removal are done under the graph lock; but 138435a9393cSPeter Zijlstra * iteration is under RCU-sched; see look_up_lock_class() and 138535a9393cSPeter Zijlstra * lockdep_free_key_range(). 13868eddac3fSPeter Zijlstra */ 13878eddac3fSPeter Zijlstra list_add_tail_rcu(&entry->entry, head); 13888eddac3fSPeter Zijlstra 13898eddac3fSPeter Zijlstra return 1; 13908eddac3fSPeter Zijlstra } 13918eddac3fSPeter Zijlstra 13928eddac3fSPeter Zijlstra /* 13938eddac3fSPeter Zijlstra * For good efficiency of modular, we use power of 2 13948eddac3fSPeter Zijlstra */ 13958eddac3fSPeter Zijlstra #define MAX_CIRCULAR_QUEUE_SIZE 4096UL 13968eddac3fSPeter Zijlstra #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1) 13978eddac3fSPeter Zijlstra 13988eddac3fSPeter Zijlstra /* 1399aa480771SYuyang Du * The circular_queue and helpers are used to implement graph 1400aa480771SYuyang Du * breadth-first search (BFS) algorithm, by which we can determine 1401aa480771SYuyang Du * whether there is a path from a lock to another. In deadlock checks, 1402aa480771SYuyang Du * a path from the next lock to be acquired to a previous held lock 1403aa480771SYuyang Du * indicates that adding the <prev> -> <next> lock dependency will 1404aa480771SYuyang Du * produce a circle in the graph. Breadth-first search instead of 1405aa480771SYuyang Du * depth-first search is used in order to find the shortest (circular) 1406aa480771SYuyang Du * path. 14078eddac3fSPeter Zijlstra */ 14088eddac3fSPeter Zijlstra struct circular_queue { 1409aa480771SYuyang Du struct lock_list *element[MAX_CIRCULAR_QUEUE_SIZE]; 14108eddac3fSPeter Zijlstra unsigned int front, rear; 14118eddac3fSPeter Zijlstra }; 14128eddac3fSPeter Zijlstra 14138eddac3fSPeter Zijlstra static struct circular_queue lock_cq; 14148eddac3fSPeter Zijlstra 14158eddac3fSPeter Zijlstra unsigned int max_bfs_queue_depth; 14168eddac3fSPeter Zijlstra 14178eddac3fSPeter Zijlstra static unsigned int lockdep_dependency_gen_id; 14188eddac3fSPeter Zijlstra 14198eddac3fSPeter Zijlstra static inline void __cq_init(struct circular_queue *cq) 14208eddac3fSPeter Zijlstra { 14218eddac3fSPeter Zijlstra cq->front = cq->rear = 0; 14228eddac3fSPeter Zijlstra lockdep_dependency_gen_id++; 14238eddac3fSPeter Zijlstra } 14248eddac3fSPeter Zijlstra 14258eddac3fSPeter Zijlstra static inline int __cq_empty(struct circular_queue *cq) 14268eddac3fSPeter Zijlstra { 14278eddac3fSPeter Zijlstra return (cq->front == cq->rear); 14288eddac3fSPeter Zijlstra } 14298eddac3fSPeter Zijlstra 14308eddac3fSPeter Zijlstra static inline int __cq_full(struct circular_queue *cq) 14318eddac3fSPeter Zijlstra { 14328eddac3fSPeter Zijlstra return ((cq->rear + 1) & CQ_MASK) == cq->front; 14338eddac3fSPeter Zijlstra } 14348eddac3fSPeter Zijlstra 1435aa480771SYuyang Du static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem) 14368eddac3fSPeter Zijlstra { 14378eddac3fSPeter Zijlstra if (__cq_full(cq)) 14388eddac3fSPeter Zijlstra return -1; 14398eddac3fSPeter Zijlstra 14408eddac3fSPeter Zijlstra cq->element[cq->rear] = elem; 14418eddac3fSPeter Zijlstra cq->rear = (cq->rear + 1) & CQ_MASK; 14428eddac3fSPeter Zijlstra return 0; 14438eddac3fSPeter Zijlstra } 14448eddac3fSPeter Zijlstra 1445c1661325SYuyang Du /* 1446c1661325SYuyang Du * Dequeue an element from the circular_queue, return a lock_list if 1447c1661325SYuyang Du * the queue is not empty, or NULL if otherwise. 1448c1661325SYuyang Du */ 1449c1661325SYuyang Du static inline struct lock_list * __cq_dequeue(struct circular_queue *cq) 14508eddac3fSPeter Zijlstra { 1451c1661325SYuyang Du struct lock_list * lock; 14528eddac3fSPeter Zijlstra 1453c1661325SYuyang Du if (__cq_empty(cq)) 1454c1661325SYuyang Du return NULL; 1455c1661325SYuyang Du 1456c1661325SYuyang Du lock = cq->element[cq->front]; 14578eddac3fSPeter Zijlstra cq->front = (cq->front + 1) & CQ_MASK; 1458c1661325SYuyang Du 1459c1661325SYuyang Du return lock; 14608eddac3fSPeter Zijlstra } 14618eddac3fSPeter Zijlstra 14628eddac3fSPeter Zijlstra static inline unsigned int __cq_get_elem_count(struct circular_queue *cq) 14638eddac3fSPeter Zijlstra { 14648eddac3fSPeter Zijlstra return (cq->rear - cq->front) & CQ_MASK; 14658eddac3fSPeter Zijlstra } 14668eddac3fSPeter Zijlstra 1467d563bc6eSBoqun Feng static inline void mark_lock_accessed(struct lock_list *lock) 1468d563bc6eSBoqun Feng { 1469d563bc6eSBoqun Feng lock->class->dep_gen_id = lockdep_dependency_gen_id; 1470d563bc6eSBoqun Feng } 1471d563bc6eSBoqun Feng 1472d563bc6eSBoqun Feng static inline void visit_lock_entry(struct lock_list *lock, 14738eddac3fSPeter Zijlstra struct lock_list *parent) 14748eddac3fSPeter Zijlstra { 14758eddac3fSPeter Zijlstra lock->parent = parent; 14768eddac3fSPeter Zijlstra } 14778eddac3fSPeter Zijlstra 14788eddac3fSPeter Zijlstra static inline unsigned long lock_accessed(struct lock_list *lock) 14798eddac3fSPeter Zijlstra { 14808eddac3fSPeter Zijlstra return lock->class->dep_gen_id == lockdep_dependency_gen_id; 14818eddac3fSPeter Zijlstra } 14828eddac3fSPeter Zijlstra 14838eddac3fSPeter Zijlstra static inline struct lock_list *get_lock_parent(struct lock_list *child) 14848eddac3fSPeter Zijlstra { 14858eddac3fSPeter Zijlstra return child->parent; 14868eddac3fSPeter Zijlstra } 14878eddac3fSPeter Zijlstra 14888eddac3fSPeter Zijlstra static inline int get_lock_depth(struct lock_list *child) 14898eddac3fSPeter Zijlstra { 14908eddac3fSPeter Zijlstra int depth = 0; 14918eddac3fSPeter Zijlstra struct lock_list *parent; 14928eddac3fSPeter Zijlstra 14938eddac3fSPeter Zijlstra while ((parent = get_lock_parent(child))) { 14948eddac3fSPeter Zijlstra child = parent; 14958eddac3fSPeter Zijlstra depth++; 14968eddac3fSPeter Zijlstra } 14978eddac3fSPeter Zijlstra return depth; 14988eddac3fSPeter Zijlstra } 14998eddac3fSPeter Zijlstra 150077a80692SYuyang Du /* 150177a80692SYuyang Du * Return the forward or backward dependency list. 150277a80692SYuyang Du * 150377a80692SYuyang Du * @lock: the lock_list to get its class's dependency list 150477a80692SYuyang Du * @offset: the offset to struct lock_class to determine whether it is 150577a80692SYuyang Du * locks_after or locks_before 150677a80692SYuyang Du */ 150777a80692SYuyang Du static inline struct list_head *get_dep_list(struct lock_list *lock, int offset) 150877a80692SYuyang Du { 150977a80692SYuyang Du void *lock_class = lock->class; 151077a80692SYuyang Du 151177a80692SYuyang Du return lock_class + offset; 151277a80692SYuyang Du } 1513b11be024SBoqun Feng /* 1514b11be024SBoqun Feng * Return values of a bfs search: 1515b11be024SBoqun Feng * 1516b11be024SBoqun Feng * BFS_E* indicates an error 1517b11be024SBoqun Feng * BFS_R* indicates a result (match or not) 1518b11be024SBoqun Feng * 1519b11be024SBoqun Feng * BFS_EINVALIDNODE: Find a invalid node in the graph. 1520b11be024SBoqun Feng * 1521b11be024SBoqun Feng * BFS_EQUEUEFULL: The queue is full while doing the bfs. 1522b11be024SBoqun Feng * 1523b11be024SBoqun Feng * BFS_RMATCH: Find the matched node in the graph, and put that node into 1524b11be024SBoqun Feng * *@target_entry. 1525b11be024SBoqun Feng * 1526b11be024SBoqun Feng * BFS_RNOMATCH: Haven't found the matched node and keep *@target_entry 1527b11be024SBoqun Feng * _unchanged_. 1528b11be024SBoqun Feng */ 1529b11be024SBoqun Feng enum bfs_result { 1530b11be024SBoqun Feng BFS_EINVALIDNODE = -2, 1531b11be024SBoqun Feng BFS_EQUEUEFULL = -1, 1532b11be024SBoqun Feng BFS_RMATCH = 0, 1533b11be024SBoqun Feng BFS_RNOMATCH = 1, 1534b11be024SBoqun Feng }; 1535b11be024SBoqun Feng 1536b11be024SBoqun Feng /* 1537b11be024SBoqun Feng * bfs_result < 0 means error 1538b11be024SBoqun Feng */ 1539b11be024SBoqun Feng static inline bool bfs_error(enum bfs_result res) 1540b11be024SBoqun Feng { 1541b11be024SBoqun Feng return res < 0; 1542b11be024SBoqun Feng } 154377a80692SYuyang Du 1544154f185eSYuyang Du /* 15453454a36dSBoqun Feng * DEP_*_BIT in lock_list::dep 15463454a36dSBoqun Feng * 15473454a36dSBoqun Feng * For dependency @prev -> @next: 15483454a36dSBoqun Feng * 15493454a36dSBoqun Feng * SR: @prev is shared reader (->read != 0) and @next is recursive reader 15503454a36dSBoqun Feng * (->read == 2) 15513454a36dSBoqun Feng * ER: @prev is exclusive locker (->read == 0) and @next is recursive reader 15523454a36dSBoqun Feng * SN: @prev is shared reader and @next is non-recursive locker (->read != 2) 15533454a36dSBoqun Feng * EN: @prev is exclusive locker and @next is non-recursive locker 15543454a36dSBoqun Feng * 15553454a36dSBoqun Feng * Note that we define the value of DEP_*_BITs so that: 15563454a36dSBoqun Feng * bit0 is prev->read == 0 15573454a36dSBoqun Feng * bit1 is next->read != 2 15583454a36dSBoqun Feng */ 15593454a36dSBoqun Feng #define DEP_SR_BIT (0 + (0 << 1)) /* 0 */ 15603454a36dSBoqun Feng #define DEP_ER_BIT (1 + (0 << 1)) /* 1 */ 15613454a36dSBoqun Feng #define DEP_SN_BIT (0 + (1 << 1)) /* 2 */ 15623454a36dSBoqun Feng #define DEP_EN_BIT (1 + (1 << 1)) /* 3 */ 15633454a36dSBoqun Feng 15643454a36dSBoqun Feng #define DEP_SR_MASK (1U << (DEP_SR_BIT)) 15653454a36dSBoqun Feng #define DEP_ER_MASK (1U << (DEP_ER_BIT)) 15663454a36dSBoqun Feng #define DEP_SN_MASK (1U << (DEP_SN_BIT)) 15673454a36dSBoqun Feng #define DEP_EN_MASK (1U << (DEP_EN_BIT)) 15683454a36dSBoqun Feng 15693454a36dSBoqun Feng static inline unsigned int 15703454a36dSBoqun Feng __calc_dep_bit(struct held_lock *prev, struct held_lock *next) 15713454a36dSBoqun Feng { 15723454a36dSBoqun Feng return (prev->read == 0) + ((next->read != 2) << 1); 15733454a36dSBoqun Feng } 15743454a36dSBoqun Feng 15753454a36dSBoqun Feng static inline u8 calc_dep(struct held_lock *prev, struct held_lock *next) 15763454a36dSBoqun Feng { 15773454a36dSBoqun Feng return 1U << __calc_dep_bit(prev, next); 15783454a36dSBoqun Feng } 15793454a36dSBoqun Feng 15803454a36dSBoqun Feng /* 15813454a36dSBoqun Feng * calculate the dep_bit for backwards edges. We care about whether @prev is 15823454a36dSBoqun Feng * shared and whether @next is recursive. 15833454a36dSBoqun Feng */ 15843454a36dSBoqun Feng static inline unsigned int 15853454a36dSBoqun Feng __calc_dep_bitb(struct held_lock *prev, struct held_lock *next) 15863454a36dSBoqun Feng { 15873454a36dSBoqun Feng return (next->read != 2) + ((prev->read == 0) << 1); 15883454a36dSBoqun Feng } 15893454a36dSBoqun Feng 15903454a36dSBoqun Feng static inline u8 calc_depb(struct held_lock *prev, struct held_lock *next) 15913454a36dSBoqun Feng { 15923454a36dSBoqun Feng return 1U << __calc_dep_bitb(prev, next); 15933454a36dSBoqun Feng } 15943454a36dSBoqun Feng 15953454a36dSBoqun Feng /* 15966971c0f3SBoqun Feng * Initialize a lock_list entry @lock belonging to @class as the root for a BFS 15976971c0f3SBoqun Feng * search. 15986971c0f3SBoqun Feng */ 15996971c0f3SBoqun Feng static inline void __bfs_init_root(struct lock_list *lock, 16006971c0f3SBoqun Feng struct lock_class *class) 16016971c0f3SBoqun Feng { 16026971c0f3SBoqun Feng lock->class = class; 16036971c0f3SBoqun Feng lock->parent = NULL; 16046971c0f3SBoqun Feng lock->only_xr = 0; 16056971c0f3SBoqun Feng } 16066971c0f3SBoqun Feng 16076971c0f3SBoqun Feng /* 16086971c0f3SBoqun Feng * Initialize a lock_list entry @lock based on a lock acquisition @hlock as the 16096971c0f3SBoqun Feng * root for a BFS search. 16106971c0f3SBoqun Feng * 16116971c0f3SBoqun Feng * ->only_xr of the initial lock node is set to @hlock->read == 2, to make sure 16126971c0f3SBoqun Feng * that <prev> -> @hlock and @hlock -> <whatever __bfs() found> is not -(*R)-> 16136971c0f3SBoqun Feng * and -(S*)->. 16146971c0f3SBoqun Feng */ 16156971c0f3SBoqun Feng static inline void bfs_init_root(struct lock_list *lock, 16166971c0f3SBoqun Feng struct held_lock *hlock) 16176971c0f3SBoqun Feng { 16186971c0f3SBoqun Feng __bfs_init_root(lock, hlock_class(hlock)); 16196971c0f3SBoqun Feng lock->only_xr = (hlock->read == 2); 16206971c0f3SBoqun Feng } 16216971c0f3SBoqun Feng 16226971c0f3SBoqun Feng /* 16236971c0f3SBoqun Feng * Similar to bfs_init_root() but initialize the root for backwards BFS. 16246971c0f3SBoqun Feng * 16256971c0f3SBoqun Feng * ->only_xr of the initial lock node is set to @hlock->read != 0, to make sure 16266971c0f3SBoqun Feng * that <next> -> @hlock and @hlock -> <whatever backwards BFS found> is not 16276971c0f3SBoqun Feng * -(*S)-> and -(R*)-> (reverse order of -(*R)-> and -(S*)->). 16286971c0f3SBoqun Feng */ 16296971c0f3SBoqun Feng static inline void bfs_init_rootb(struct lock_list *lock, 16306971c0f3SBoqun Feng struct held_lock *hlock) 16316971c0f3SBoqun Feng { 16326971c0f3SBoqun Feng __bfs_init_root(lock, hlock_class(hlock)); 16336971c0f3SBoqun Feng lock->only_xr = (hlock->read != 0); 16346971c0f3SBoqun Feng } 16356971c0f3SBoqun Feng 16366d1823ccSBoqun Feng static inline struct lock_list *__bfs_next(struct lock_list *lock, int offset) 16376d1823ccSBoqun Feng { 16386d1823ccSBoqun Feng if (!lock || !lock->parent) 16396d1823ccSBoqun Feng return NULL; 16406d1823ccSBoqun Feng 16416d1823ccSBoqun Feng return list_next_or_null_rcu(get_dep_list(lock->parent, offset), 16426d1823ccSBoqun Feng &lock->entry, struct lock_list, entry); 16436d1823ccSBoqun Feng } 16446d1823ccSBoqun Feng 16456971c0f3SBoqun Feng /* 16466971c0f3SBoqun Feng * Breadth-First Search to find a strong path in the dependency graph. 16476971c0f3SBoqun Feng * 16486971c0f3SBoqun Feng * @source_entry: the source of the path we are searching for. 16496971c0f3SBoqun Feng * @data: data used for the second parameter of @match function 16506971c0f3SBoqun Feng * @match: match function for the search 16516971c0f3SBoqun Feng * @target_entry: pointer to the target of a matched path 16526971c0f3SBoqun Feng * @offset: the offset to struct lock_class to determine whether it is 16536971c0f3SBoqun Feng * locks_after or locks_before 16546971c0f3SBoqun Feng * 16556971c0f3SBoqun Feng * We may have multiple edges (considering different kinds of dependencies, 16566971c0f3SBoqun Feng * e.g. ER and SN) between two nodes in the dependency graph. But 16576971c0f3SBoqun Feng * only the strong dependency path in the graph is relevant to deadlocks. A 16586971c0f3SBoqun Feng * strong dependency path is a dependency path that doesn't have two adjacent 16596971c0f3SBoqun Feng * dependencies as -(*R)-> -(S*)->, please see: 16606971c0f3SBoqun Feng * 16616971c0f3SBoqun Feng * Documentation/locking/lockdep-design.rst 16626971c0f3SBoqun Feng * 16636971c0f3SBoqun Feng * for more explanation of the definition of strong dependency paths 16646971c0f3SBoqun Feng * 16656971c0f3SBoqun Feng * In __bfs(), we only traverse in the strong dependency path: 16666971c0f3SBoqun Feng * 16676971c0f3SBoqun Feng * In lock_list::only_xr, we record whether the previous dependency only 16686971c0f3SBoqun Feng * has -(*R)-> in the search, and if it does (prev only has -(*R)->), we 16696971c0f3SBoqun Feng * filter out any -(S*)-> in the current dependency and after that, the 16706971c0f3SBoqun Feng * ->only_xr is set according to whether we only have -(*R)-> left. 1671154f185eSYuyang Du */ 1672b11be024SBoqun Feng static enum bfs_result __bfs(struct lock_list *source_entry, 16738eddac3fSPeter Zijlstra void *data, 167461775ed2SBoqun Feng bool (*match)(struct lock_list *entry, void *data), 1675bc2dd71bSBoqun Feng bool (*skip)(struct lock_list *entry, void *data), 16768eddac3fSPeter Zijlstra struct lock_list **target_entry, 167777a80692SYuyang Du int offset) 16788eddac3fSPeter Zijlstra { 16798eddac3fSPeter Zijlstra struct circular_queue *cq = &lock_cq; 16806d1823ccSBoqun Feng struct lock_list *lock = NULL; 16816d1823ccSBoqun Feng struct lock_list *entry; 16826d1823ccSBoqun Feng struct list_head *head; 16836d1823ccSBoqun Feng unsigned int cq_depth; 16846d1823ccSBoqun Feng bool first; 16858eddac3fSPeter Zijlstra 1686248efb21SPeter Zijlstra lockdep_assert_locked(); 1687248efb21SPeter Zijlstra 16888eddac3fSPeter Zijlstra __cq_init(cq); 1689aa480771SYuyang Du __cq_enqueue(cq, source_entry); 16908eddac3fSPeter Zijlstra 16916d1823ccSBoqun Feng while ((lock = __bfs_next(lock, offset)) || (lock = __cq_dequeue(cq))) { 16926d1823ccSBoqun Feng if (!lock->class) 16936d1823ccSBoqun Feng return BFS_EINVALIDNODE; 16948eddac3fSPeter Zijlstra 1695d563bc6eSBoqun Feng /* 16966d1823ccSBoqun Feng * Step 1: check whether we already finish on this one. 16976d1823ccSBoqun Feng * 1698d563bc6eSBoqun Feng * If we have visited all the dependencies from this @lock to 1699d563bc6eSBoqun Feng * others (iow, if we have visited all lock_list entries in 1700d563bc6eSBoqun Feng * @lock->class->locks_{after,before}) we skip, otherwise go 1701d563bc6eSBoqun Feng * and visit all the dependencies in the list and mark this 1702d563bc6eSBoqun Feng * list accessed. 1703d563bc6eSBoqun Feng */ 1704d563bc6eSBoqun Feng if (lock_accessed(lock)) 1705d563bc6eSBoqun Feng continue; 1706d563bc6eSBoqun Feng else 1707d563bc6eSBoqun Feng mark_lock_accessed(lock); 1708d563bc6eSBoqun Feng 17096d1823ccSBoqun Feng /* 17106d1823ccSBoqun Feng * Step 2: check whether prev dependency and this form a strong 17116d1823ccSBoqun Feng * dependency path. 17126d1823ccSBoqun Feng */ 17136d1823ccSBoqun Feng if (lock->parent) { /* Parent exists, check prev dependency */ 17146d1823ccSBoqun Feng u8 dep = lock->dep; 17156d1823ccSBoqun Feng bool prev_only_xr = lock->parent->only_xr; 17166971c0f3SBoqun Feng 17176971c0f3SBoqun Feng /* 17186971c0f3SBoqun Feng * Mask out all -(S*)-> if we only have *R in previous 17196971c0f3SBoqun Feng * step, because -(*R)-> -(S*)-> don't make up a strong 17206971c0f3SBoqun Feng * dependency. 17216971c0f3SBoqun Feng */ 17226971c0f3SBoqun Feng if (prev_only_xr) 17236971c0f3SBoqun Feng dep &= ~(DEP_SR_MASK | DEP_SN_MASK); 17246971c0f3SBoqun Feng 17256971c0f3SBoqun Feng /* If nothing left, we skip */ 17266971c0f3SBoqun Feng if (!dep) 17276971c0f3SBoqun Feng continue; 17286971c0f3SBoqun Feng 17296971c0f3SBoqun Feng /* If there are only -(*R)-> left, set that for the next step */ 17306d1823ccSBoqun Feng lock->only_xr = !(dep & (DEP_SN_MASK | DEP_EN_MASK)); 17316d1823ccSBoqun Feng } 1732d563bc6eSBoqun Feng 17336d1823ccSBoqun Feng /* 17346d1823ccSBoqun Feng * Step 3: we haven't visited this and there is a strong 17356d1823ccSBoqun Feng * dependency path to this, so check with @match. 1736bc2dd71bSBoqun Feng * If @skip is provide and returns true, we skip this 1737bc2dd71bSBoqun Feng * lock (and any path this lock is in). 17386d1823ccSBoqun Feng */ 1739bc2dd71bSBoqun Feng if (skip && skip(lock, data)) 1740bc2dd71bSBoqun Feng continue; 1741bc2dd71bSBoqun Feng 17426d1823ccSBoqun Feng if (match(lock, data)) { 17436d1823ccSBoqun Feng *target_entry = lock; 17446d1823ccSBoqun Feng return BFS_RMATCH; 17456d1823ccSBoqun Feng } 17466d1823ccSBoqun Feng 17476d1823ccSBoqun Feng /* 17486d1823ccSBoqun Feng * Step 4: if not match, expand the path by adding the 17496d1823ccSBoqun Feng * forward or backwards dependencis in the search 17506d1823ccSBoqun Feng * 17516d1823ccSBoqun Feng */ 17526d1823ccSBoqun Feng first = true; 17536d1823ccSBoqun Feng head = get_dep_list(lock, offset); 17546d1823ccSBoqun Feng list_for_each_entry_rcu(entry, head, entry) { 1755d563bc6eSBoqun Feng visit_lock_entry(entry, lock); 17568eddac3fSPeter Zijlstra 17576d1823ccSBoqun Feng /* 17586d1823ccSBoqun Feng * Note we only enqueue the first of the list into the 17596d1823ccSBoqun Feng * queue, because we can always find a sibling 17606d1823ccSBoqun Feng * dependency from one (see __bfs_next()), as a result 17616d1823ccSBoqun Feng * the space of queue is saved. 17626d1823ccSBoqun Feng */ 17636d1823ccSBoqun Feng if (!first) 17646d1823ccSBoqun Feng continue; 17656d1823ccSBoqun Feng 17666d1823ccSBoqun Feng first = false; 17676d1823ccSBoqun Feng 17686d1823ccSBoqun Feng if (__cq_enqueue(cq, entry)) 17696d1823ccSBoqun Feng return BFS_EQUEUEFULL; 17706d1823ccSBoqun Feng 17718eddac3fSPeter Zijlstra cq_depth = __cq_get_elem_count(cq); 17728eddac3fSPeter Zijlstra if (max_bfs_queue_depth < cq_depth) 17738eddac3fSPeter Zijlstra max_bfs_queue_depth = cq_depth; 17748eddac3fSPeter Zijlstra } 17758eddac3fSPeter Zijlstra } 17766d1823ccSBoqun Feng 17776d1823ccSBoqun Feng return BFS_RNOMATCH; 17788eddac3fSPeter Zijlstra } 17798eddac3fSPeter Zijlstra 1780b11be024SBoqun Feng static inline enum bfs_result 1781b11be024SBoqun Feng __bfs_forwards(struct lock_list *src_entry, 17828eddac3fSPeter Zijlstra void *data, 178361775ed2SBoqun Feng bool (*match)(struct lock_list *entry, void *data), 1784bc2dd71bSBoqun Feng bool (*skip)(struct lock_list *entry, void *data), 17858eddac3fSPeter Zijlstra struct lock_list **target_entry) 17868eddac3fSPeter Zijlstra { 1787bc2dd71bSBoqun Feng return __bfs(src_entry, data, match, skip, target_entry, 178877a80692SYuyang Du offsetof(struct lock_class, locks_after)); 17898eddac3fSPeter Zijlstra 17908eddac3fSPeter Zijlstra } 17918eddac3fSPeter Zijlstra 1792b11be024SBoqun Feng static inline enum bfs_result 1793b11be024SBoqun Feng __bfs_backwards(struct lock_list *src_entry, 17948eddac3fSPeter Zijlstra void *data, 179561775ed2SBoqun Feng bool (*match)(struct lock_list *entry, void *data), 1796bc2dd71bSBoqun Feng bool (*skip)(struct lock_list *entry, void *data), 17978eddac3fSPeter Zijlstra struct lock_list **target_entry) 17988eddac3fSPeter Zijlstra { 1799bc2dd71bSBoqun Feng return __bfs(src_entry, data, match, skip, target_entry, 180077a80692SYuyang Du offsetof(struct lock_class, locks_before)); 18018eddac3fSPeter Zijlstra 18028eddac3fSPeter Zijlstra } 18038eddac3fSPeter Zijlstra 180412593b74SBart Van Assche static void print_lock_trace(const struct lock_trace *trace, 180512593b74SBart Van Assche unsigned int spaces) 1806c120bce7SThomas Gleixner { 180712593b74SBart Van Assche stack_trace_print(trace->entries, trace->nr_entries, spaces); 1808c120bce7SThomas Gleixner } 1809c120bce7SThomas Gleixner 18108eddac3fSPeter Zijlstra /* 18118eddac3fSPeter Zijlstra * Print a dependency chain entry (this is only done when a deadlock 18128eddac3fSPeter Zijlstra * has been detected): 18138eddac3fSPeter Zijlstra */ 1814f7c1c6b3SYuyang Du static noinline void 18158eddac3fSPeter Zijlstra print_circular_bug_entry(struct lock_list *target, int depth) 18168eddac3fSPeter Zijlstra { 18178eddac3fSPeter Zijlstra if (debug_locks_silent) 1818f7c1c6b3SYuyang Du return; 18198eddac3fSPeter Zijlstra printk("\n-> #%u", depth); 18208eddac3fSPeter Zijlstra print_lock_name(target->class); 1821f943fe0fSDmitry Vyukov printk(KERN_CONT ":\n"); 182212593b74SBart Van Assche print_lock_trace(target->trace, 6); 18238eddac3fSPeter Zijlstra } 18248eddac3fSPeter Zijlstra 18258eddac3fSPeter Zijlstra static void 18268eddac3fSPeter Zijlstra print_circular_lock_scenario(struct held_lock *src, 18278eddac3fSPeter Zijlstra struct held_lock *tgt, 18288eddac3fSPeter Zijlstra struct lock_list *prt) 18298eddac3fSPeter Zijlstra { 18308eddac3fSPeter Zijlstra struct lock_class *source = hlock_class(src); 18318eddac3fSPeter Zijlstra struct lock_class *target = hlock_class(tgt); 18328eddac3fSPeter Zijlstra struct lock_class *parent = prt->class; 18338eddac3fSPeter Zijlstra 18348eddac3fSPeter Zijlstra /* 18358eddac3fSPeter Zijlstra * A direct locking problem where unsafe_class lock is taken 18368eddac3fSPeter Zijlstra * directly by safe_class lock, then all we need to show 18378eddac3fSPeter Zijlstra * is the deadlock scenario, as it is obvious that the 18388eddac3fSPeter Zijlstra * unsafe lock is taken under the safe lock. 18398eddac3fSPeter Zijlstra * 18408eddac3fSPeter Zijlstra * But if there is a chain instead, where the safe lock takes 18418eddac3fSPeter Zijlstra * an intermediate lock (middle_class) where this lock is 18428eddac3fSPeter Zijlstra * not the same as the safe lock, then the lock chain is 18438eddac3fSPeter Zijlstra * used to describe the problem. Otherwise we would need 18448eddac3fSPeter Zijlstra * to show a different CPU case for each link in the chain 18458eddac3fSPeter Zijlstra * from the safe_class lock to the unsafe_class lock. 18468eddac3fSPeter Zijlstra */ 18478eddac3fSPeter Zijlstra if (parent != source) { 18488eddac3fSPeter Zijlstra printk("Chain exists of:\n "); 18498eddac3fSPeter Zijlstra __print_lock_name(source); 1850f943fe0fSDmitry Vyukov printk(KERN_CONT " --> "); 18518eddac3fSPeter Zijlstra __print_lock_name(parent); 1852f943fe0fSDmitry Vyukov printk(KERN_CONT " --> "); 18538eddac3fSPeter Zijlstra __print_lock_name(target); 1854f943fe0fSDmitry Vyukov printk(KERN_CONT "\n\n"); 18558eddac3fSPeter Zijlstra } 18568eddac3fSPeter Zijlstra 18578eddac3fSPeter Zijlstra printk(" Possible unsafe locking scenario:\n\n"); 18588eddac3fSPeter Zijlstra printk(" CPU0 CPU1\n"); 18598eddac3fSPeter Zijlstra printk(" ---- ----\n"); 18608eddac3fSPeter Zijlstra printk(" lock("); 18618eddac3fSPeter Zijlstra __print_lock_name(target); 1862f943fe0fSDmitry Vyukov printk(KERN_CONT ");\n"); 18638eddac3fSPeter Zijlstra printk(" lock("); 18648eddac3fSPeter Zijlstra __print_lock_name(parent); 1865f943fe0fSDmitry Vyukov printk(KERN_CONT ");\n"); 18668eddac3fSPeter Zijlstra printk(" lock("); 18678eddac3fSPeter Zijlstra __print_lock_name(target); 1868f943fe0fSDmitry Vyukov printk(KERN_CONT ");\n"); 18698eddac3fSPeter Zijlstra printk(" lock("); 18708eddac3fSPeter Zijlstra __print_lock_name(source); 1871f943fe0fSDmitry Vyukov printk(KERN_CONT ");\n"); 18728eddac3fSPeter Zijlstra printk("\n *** DEADLOCK ***\n\n"); 18738eddac3fSPeter Zijlstra } 18748eddac3fSPeter Zijlstra 18758eddac3fSPeter Zijlstra /* 18768eddac3fSPeter Zijlstra * When a circular dependency is detected, print the 18778eddac3fSPeter Zijlstra * header first: 18788eddac3fSPeter Zijlstra */ 1879f7c1c6b3SYuyang Du static noinline void 18808eddac3fSPeter Zijlstra print_circular_bug_header(struct lock_list *entry, unsigned int depth, 18818eddac3fSPeter Zijlstra struct held_lock *check_src, 18828eddac3fSPeter Zijlstra struct held_lock *check_tgt) 18838eddac3fSPeter Zijlstra { 18848eddac3fSPeter Zijlstra struct task_struct *curr = current; 18858eddac3fSPeter Zijlstra 18868eddac3fSPeter Zijlstra if (debug_locks_silent) 1887f7c1c6b3SYuyang Du return; 18888eddac3fSPeter Zijlstra 1889681fbec8SPaul E. McKenney pr_warn("\n"); 1890a5dd63efSPaul E. McKenney pr_warn("======================================================\n"); 1891a5dd63efSPaul E. McKenney pr_warn("WARNING: possible circular locking dependency detected\n"); 18928eddac3fSPeter Zijlstra print_kernel_ident(); 1893a5dd63efSPaul E. McKenney pr_warn("------------------------------------------------------\n"); 1894681fbec8SPaul E. McKenney pr_warn("%s/%d is trying to acquire lock:\n", 18958eddac3fSPeter Zijlstra curr->comm, task_pid_nr(curr)); 18968eddac3fSPeter Zijlstra print_lock(check_src); 1897383a4bc8SByungchul Park 1898681fbec8SPaul E. McKenney pr_warn("\nbut task is already holding lock:\n"); 1899383a4bc8SByungchul Park 19008eddac3fSPeter Zijlstra print_lock(check_tgt); 1901681fbec8SPaul E. McKenney pr_warn("\nwhich lock already depends on the new lock.\n\n"); 1902681fbec8SPaul E. McKenney pr_warn("\nthe existing dependency chain (in reverse order) is:\n"); 19038eddac3fSPeter Zijlstra 19048eddac3fSPeter Zijlstra print_circular_bug_entry(entry, depth); 19058eddac3fSPeter Zijlstra } 19068eddac3fSPeter Zijlstra 190768e30567SBoqun Feng /* 190868e30567SBoqun Feng * We are about to add A -> B into the dependency graph, and in __bfs() a 190968e30567SBoqun Feng * strong dependency path A -> .. -> B is found: hlock_class equals 191068e30567SBoqun Feng * entry->class. 191168e30567SBoqun Feng * 191268e30567SBoqun Feng * If A -> .. -> B can replace A -> B in any __bfs() search (means the former 191368e30567SBoqun Feng * is _stronger_ than or equal to the latter), we consider A -> B as redundant. 191468e30567SBoqun Feng * For example if A -> .. -> B is -(EN)-> (i.e. A -(E*)-> .. -(*N)-> B), and A 191568e30567SBoqun Feng * -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the 191668e30567SBoqun Feng * dependency graph, as any strong path ..-> A -> B ->.. we can get with 191768e30567SBoqun Feng * having dependency A -> B, we could already get a equivalent path ..-> A -> 191868e30567SBoqun Feng * .. -> B -> .. with A -> .. -> B. Therefore A -> B is reduntant. 191968e30567SBoqun Feng * 192068e30567SBoqun Feng * We need to make sure both the start and the end of A -> .. -> B is not 192168e30567SBoqun Feng * weaker than A -> B. For the start part, please see the comment in 192268e30567SBoqun Feng * check_redundant(). For the end part, we need: 192368e30567SBoqun Feng * 192468e30567SBoqun Feng * Either 192568e30567SBoqun Feng * 192668e30567SBoqun Feng * a) A -> B is -(*R)-> (everything is not weaker than that) 192768e30567SBoqun Feng * 192868e30567SBoqun Feng * or 192968e30567SBoqun Feng * 193068e30567SBoqun Feng * b) A -> .. -> B is -(*N)-> (nothing is stronger than this) 193168e30567SBoqun Feng * 193268e30567SBoqun Feng */ 193368e30567SBoqun Feng static inline bool hlock_equal(struct lock_list *entry, void *data) 19348eddac3fSPeter Zijlstra { 193568e30567SBoqun Feng struct held_lock *hlock = (struct held_lock *)data; 193668e30567SBoqun Feng 193768e30567SBoqun Feng return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */ 193868e30567SBoqun Feng (hlock->read == 2 || /* A -> B is -(*R)-> */ 193968e30567SBoqun Feng !entry->only_xr); /* A -> .. -> B is -(*N)-> */ 19408eddac3fSPeter Zijlstra } 19418eddac3fSPeter Zijlstra 19429de0c9bbSBoqun Feng /* 19439de0c9bbSBoqun Feng * We are about to add B -> A into the dependency graph, and in __bfs() a 19449de0c9bbSBoqun Feng * strong dependency path A -> .. -> B is found: hlock_class equals 19459de0c9bbSBoqun Feng * entry->class. 19469de0c9bbSBoqun Feng * 19479de0c9bbSBoqun Feng * We will have a deadlock case (conflict) if A -> .. -> B -> A is a strong 19489de0c9bbSBoqun Feng * dependency cycle, that means: 19499de0c9bbSBoqun Feng * 19509de0c9bbSBoqun Feng * Either 19519de0c9bbSBoqun Feng * 19529de0c9bbSBoqun Feng * a) B -> A is -(E*)-> 19539de0c9bbSBoqun Feng * 19549de0c9bbSBoqun Feng * or 19559de0c9bbSBoqun Feng * 19569de0c9bbSBoqun Feng * b) A -> .. -> B is -(*N)-> (i.e. A -> .. -(*N)-> B) 19579de0c9bbSBoqun Feng * 19589de0c9bbSBoqun Feng * as then we don't have -(*R)-> -(S*)-> in the cycle. 19599de0c9bbSBoqun Feng */ 19609de0c9bbSBoqun Feng static inline bool hlock_conflict(struct lock_list *entry, void *data) 19619de0c9bbSBoqun Feng { 19629de0c9bbSBoqun Feng struct held_lock *hlock = (struct held_lock *)data; 19639de0c9bbSBoqun Feng 19649de0c9bbSBoqun Feng return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */ 19659de0c9bbSBoqun Feng (hlock->read == 0 || /* B -> A is -(E*)-> */ 19669de0c9bbSBoqun Feng !entry->only_xr); /* A -> .. -> B is -(*N)-> */ 19679de0c9bbSBoqun Feng } 19689de0c9bbSBoqun Feng 1969f7c1c6b3SYuyang Du static noinline void print_circular_bug(struct lock_list *this, 19708eddac3fSPeter Zijlstra struct lock_list *target, 19718eddac3fSPeter Zijlstra struct held_lock *check_src, 1972b1abe462SThomas Gleixner struct held_lock *check_tgt) 19738eddac3fSPeter Zijlstra { 19748eddac3fSPeter Zijlstra struct task_struct *curr = current; 19758eddac3fSPeter Zijlstra struct lock_list *parent; 19768eddac3fSPeter Zijlstra struct lock_list *first_parent; 19778eddac3fSPeter Zijlstra int depth; 19788eddac3fSPeter Zijlstra 19798eddac3fSPeter Zijlstra if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1980f7c1c6b3SYuyang Du return; 19818eddac3fSPeter Zijlstra 198212593b74SBart Van Assche this->trace = save_trace(); 198312593b74SBart Van Assche if (!this->trace) 1984f7c1c6b3SYuyang Du return; 19858eddac3fSPeter Zijlstra 19868eddac3fSPeter Zijlstra depth = get_lock_depth(target); 19878eddac3fSPeter Zijlstra 19888eddac3fSPeter Zijlstra print_circular_bug_header(target, depth, check_src, check_tgt); 19898eddac3fSPeter Zijlstra 19908eddac3fSPeter Zijlstra parent = get_lock_parent(target); 19918eddac3fSPeter Zijlstra first_parent = parent; 19928eddac3fSPeter Zijlstra 19938eddac3fSPeter Zijlstra while (parent) { 19948eddac3fSPeter Zijlstra print_circular_bug_entry(parent, --depth); 19958eddac3fSPeter Zijlstra parent = get_lock_parent(parent); 19968eddac3fSPeter Zijlstra } 19978eddac3fSPeter Zijlstra 19988eddac3fSPeter Zijlstra printk("\nother info that might help us debug this:\n\n"); 19998eddac3fSPeter Zijlstra print_circular_lock_scenario(check_src, check_tgt, 20008eddac3fSPeter Zijlstra first_parent); 20018eddac3fSPeter Zijlstra 20028eddac3fSPeter Zijlstra lockdep_print_held_locks(curr); 20038eddac3fSPeter Zijlstra 20048eddac3fSPeter Zijlstra printk("\nstack backtrace:\n"); 20058eddac3fSPeter Zijlstra dump_stack(); 20068eddac3fSPeter Zijlstra } 20078eddac3fSPeter Zijlstra 2008f7c1c6b3SYuyang Du static noinline void print_bfs_bug(int ret) 20098eddac3fSPeter Zijlstra { 20108eddac3fSPeter Zijlstra if (!debug_locks_off_graph_unlock()) 2011f7c1c6b3SYuyang Du return; 20128eddac3fSPeter Zijlstra 20138eddac3fSPeter Zijlstra /* 20148eddac3fSPeter Zijlstra * Breadth-first-search failed, graph got corrupted? 20158eddac3fSPeter Zijlstra */ 20168eddac3fSPeter Zijlstra WARN(1, "lockdep bfs error:%d\n", ret); 20178eddac3fSPeter Zijlstra } 20188eddac3fSPeter Zijlstra 201961775ed2SBoqun Feng static bool noop_count(struct lock_list *entry, void *data) 20208eddac3fSPeter Zijlstra { 20218eddac3fSPeter Zijlstra (*(unsigned long *)data)++; 202261775ed2SBoqun Feng return false; 20238eddac3fSPeter Zijlstra } 20248eddac3fSPeter Zijlstra 20255216d530SFengguang Wu static unsigned long __lockdep_count_forward_deps(struct lock_list *this) 20268eddac3fSPeter Zijlstra { 20278eddac3fSPeter Zijlstra unsigned long count = 0; 20283f649ab7SKees Cook struct lock_list *target_entry; 20298eddac3fSPeter Zijlstra 2030bc2dd71bSBoqun Feng __bfs_forwards(this, (void *)&count, noop_count, NULL, &target_entry); 20318eddac3fSPeter Zijlstra 20328eddac3fSPeter Zijlstra return count; 20338eddac3fSPeter Zijlstra } 20348eddac3fSPeter Zijlstra unsigned long lockdep_count_forward_deps(struct lock_class *class) 20358eddac3fSPeter Zijlstra { 20368eddac3fSPeter Zijlstra unsigned long ret, flags; 20378eddac3fSPeter Zijlstra struct lock_list this; 20388eddac3fSPeter Zijlstra 20396971c0f3SBoqun Feng __bfs_init_root(&this, class); 20408eddac3fSPeter Zijlstra 2041fcc784beSSteven Rostedt (VMware) raw_local_irq_save(flags); 2042248efb21SPeter Zijlstra lockdep_lock(); 20438eddac3fSPeter Zijlstra ret = __lockdep_count_forward_deps(&this); 2044248efb21SPeter Zijlstra lockdep_unlock(); 2045fcc784beSSteven Rostedt (VMware) raw_local_irq_restore(flags); 20468eddac3fSPeter Zijlstra 20478eddac3fSPeter Zijlstra return ret; 20488eddac3fSPeter Zijlstra } 20498eddac3fSPeter Zijlstra 20505216d530SFengguang Wu static unsigned long __lockdep_count_backward_deps(struct lock_list *this) 20518eddac3fSPeter Zijlstra { 20528eddac3fSPeter Zijlstra unsigned long count = 0; 20533f649ab7SKees Cook struct lock_list *target_entry; 20548eddac3fSPeter Zijlstra 2055bc2dd71bSBoqun Feng __bfs_backwards(this, (void *)&count, noop_count, NULL, &target_entry); 20568eddac3fSPeter Zijlstra 20578eddac3fSPeter Zijlstra return count; 20588eddac3fSPeter Zijlstra } 20598eddac3fSPeter Zijlstra 20608eddac3fSPeter Zijlstra unsigned long lockdep_count_backward_deps(struct lock_class *class) 20618eddac3fSPeter Zijlstra { 20628eddac3fSPeter Zijlstra unsigned long ret, flags; 20638eddac3fSPeter Zijlstra struct lock_list this; 20648eddac3fSPeter Zijlstra 20656971c0f3SBoqun Feng __bfs_init_root(&this, class); 20668eddac3fSPeter Zijlstra 2067fcc784beSSteven Rostedt (VMware) raw_local_irq_save(flags); 2068248efb21SPeter Zijlstra lockdep_lock(); 20698eddac3fSPeter Zijlstra ret = __lockdep_count_backward_deps(&this); 2070248efb21SPeter Zijlstra lockdep_unlock(); 2071fcc784beSSteven Rostedt (VMware) raw_local_irq_restore(flags); 20728eddac3fSPeter Zijlstra 20738eddac3fSPeter Zijlstra return ret; 20748eddac3fSPeter Zijlstra } 20758eddac3fSPeter Zijlstra 20768eddac3fSPeter Zijlstra /* 20778c2c2b44SYuyang Du * Check that the dependency graph starting at <src> can lead to 2078b11be024SBoqun Feng * <target> or not. 20798eddac3fSPeter Zijlstra */ 2080b11be024SBoqun Feng static noinline enum bfs_result 20819de0c9bbSBoqun Feng check_path(struct held_lock *target, struct lock_list *src_entry, 20829de0c9bbSBoqun Feng bool (*match)(struct lock_list *entry, void *data), 2083bc2dd71bSBoqun Feng bool (*skip)(struct lock_list *entry, void *data), 20848eddac3fSPeter Zijlstra struct lock_list **target_entry) 20858eddac3fSPeter Zijlstra { 2086b11be024SBoqun Feng enum bfs_result ret; 20878c2c2b44SYuyang Du 2088bc2dd71bSBoqun Feng ret = __bfs_forwards(src_entry, target, match, skip, target_entry); 20898c2c2b44SYuyang Du 2090b11be024SBoqun Feng if (unlikely(bfs_error(ret))) 20918c2c2b44SYuyang Du print_bfs_bug(ret); 20928c2c2b44SYuyang Du 20938c2c2b44SYuyang Du return ret; 20948c2c2b44SYuyang Du } 20958c2c2b44SYuyang Du 20968c2c2b44SYuyang Du /* 20978c2c2b44SYuyang Du * Prove that the dependency graph starting at <src> can not 20988c2c2b44SYuyang Du * lead to <target>. If it can, there is a circle when adding 20998c2c2b44SYuyang Du * <target> -> <src> dependency. 21008c2c2b44SYuyang Du * 2101b11be024SBoqun Feng * Print an error and return BFS_RMATCH if it does. 21028c2c2b44SYuyang Du */ 2103b11be024SBoqun Feng static noinline enum bfs_result 21048c2c2b44SYuyang Du check_noncircular(struct held_lock *src, struct held_lock *target, 210512593b74SBart Van Assche struct lock_trace **const trace) 21068c2c2b44SYuyang Du { 2107b11be024SBoqun Feng enum bfs_result ret; 21083f649ab7SKees Cook struct lock_list *target_entry; 21096971c0f3SBoqun Feng struct lock_list src_entry; 21106971c0f3SBoqun Feng 21116971c0f3SBoqun Feng bfs_init_root(&src_entry, src); 21128eddac3fSPeter Zijlstra 21138eddac3fSPeter Zijlstra debug_atomic_inc(nr_cyclic_checks); 21148eddac3fSPeter Zijlstra 2115bc2dd71bSBoqun Feng ret = check_path(target, &src_entry, hlock_conflict, NULL, &target_entry); 21168eddac3fSPeter Zijlstra 2117b11be024SBoqun Feng if (unlikely(ret == BFS_RMATCH)) { 211812593b74SBart Van Assche if (!*trace) { 21198c2c2b44SYuyang Du /* 21208c2c2b44SYuyang Du * If save_trace fails here, the printing might 21218c2c2b44SYuyang Du * trigger a WARN but because of the !nr_entries it 21228c2c2b44SYuyang Du * should not do bad things. 21238c2c2b44SYuyang Du */ 212412593b74SBart Van Assche *trace = save_trace(); 21258eddac3fSPeter Zijlstra } 21268eddac3fSPeter Zijlstra 21278c2c2b44SYuyang Du print_circular_bug(&src_entry, target_entry, src, target); 21288c2c2b44SYuyang Du } 21298c2c2b44SYuyang Du 21308c2c2b44SYuyang Du return ret; 21318c2c2b44SYuyang Du } 21328c2c2b44SYuyang Du 2133e7a38f63SYuyang Du #ifdef CONFIG_TRACE_IRQFLAGS 2134948f8376SFrederic Weisbecker 2135f08e3888SBoqun Feng /* 2136f08e3888SBoqun Feng * Forwards and backwards subgraph searching, for the purposes of 2137f08e3888SBoqun Feng * proving that two subgraphs can be connected by a new dependency 2138f08e3888SBoqun Feng * without creating any illegal irq-safe -> irq-unsafe lock dependency. 2139f08e3888SBoqun Feng * 2140f08e3888SBoqun Feng * A irq safe->unsafe deadlock happens with the following conditions: 2141f08e3888SBoqun Feng * 2142f08e3888SBoqun Feng * 1) We have a strong dependency path A -> ... -> B 2143f08e3888SBoqun Feng * 2144f08e3888SBoqun Feng * 2) and we have ENABLED_IRQ usage of B and USED_IN_IRQ usage of A, therefore 2145f08e3888SBoqun Feng * irq can create a new dependency B -> A (consider the case that a holder 2146f08e3888SBoqun Feng * of B gets interrupted by an irq whose handler will try to acquire A). 2147f08e3888SBoqun Feng * 2148f08e3888SBoqun Feng * 3) the dependency circle A -> ... -> B -> A we get from 1) and 2) is a 2149f08e3888SBoqun Feng * strong circle: 2150f08e3888SBoqun Feng * 2151f08e3888SBoqun Feng * For the usage bits of B: 2152f08e3888SBoqun Feng * a) if A -> B is -(*N)->, then B -> A could be any type, so any 2153f08e3888SBoqun Feng * ENABLED_IRQ usage suffices. 2154f08e3888SBoqun Feng * b) if A -> B is -(*R)->, then B -> A must be -(E*)->, so only 2155f08e3888SBoqun Feng * ENABLED_IRQ_*_READ usage suffices. 2156f08e3888SBoqun Feng * 2157f08e3888SBoqun Feng * For the usage bits of A: 2158f08e3888SBoqun Feng * c) if A -> B is -(E*)->, then B -> A could be any type, so any 2159f08e3888SBoqun Feng * USED_IN_IRQ usage suffices. 2160f08e3888SBoqun Feng * d) if A -> B is -(S*)->, then B -> A must be -(*N)->, so only 2161f08e3888SBoqun Feng * USED_IN_IRQ_*_READ usage suffices. 2162f08e3888SBoqun Feng */ 2163f08e3888SBoqun Feng 2164f08e3888SBoqun Feng /* 2165f08e3888SBoqun Feng * There is a strong dependency path in the dependency graph: A -> B, and now 2166f08e3888SBoqun Feng * we need to decide which usage bit of A should be accumulated to detect 2167f08e3888SBoqun Feng * safe->unsafe bugs. 2168f08e3888SBoqun Feng * 2169f08e3888SBoqun Feng * Note that usage_accumulate() is used in backwards search, so ->only_xr 2170f08e3888SBoqun Feng * stands for whether A -> B only has -(S*)-> (in this case ->only_xr is true). 2171f08e3888SBoqun Feng * 2172f08e3888SBoqun Feng * As above, if only_xr is false, which means A -> B has -(E*)-> dependency 2173f08e3888SBoqun Feng * path, any usage of A should be considered. Otherwise, we should only 2174f08e3888SBoqun Feng * consider _READ usage. 2175f08e3888SBoqun Feng */ 217661775ed2SBoqun Feng static inline bool usage_accumulate(struct lock_list *entry, void *mask) 2177948f8376SFrederic Weisbecker { 2178f08e3888SBoqun Feng if (!entry->only_xr) 2179948f8376SFrederic Weisbecker *(unsigned long *)mask |= entry->class->usage_mask; 2180f08e3888SBoqun Feng else /* Mask out _READ usage bits */ 2181f08e3888SBoqun Feng *(unsigned long *)mask |= (entry->class->usage_mask & LOCKF_IRQ); 2182948f8376SFrederic Weisbecker 218361775ed2SBoqun Feng return false; 2184948f8376SFrederic Weisbecker } 2185948f8376SFrederic Weisbecker 21868eddac3fSPeter Zijlstra /* 2187f08e3888SBoqun Feng * There is a strong dependency path in the dependency graph: A -> B, and now 2188f08e3888SBoqun Feng * we need to decide which usage bit of B conflicts with the usage bits of A, 2189f08e3888SBoqun Feng * i.e. which usage bit of B may introduce safe->unsafe deadlocks. 2190f08e3888SBoqun Feng * 2191f08e3888SBoqun Feng * As above, if only_xr is false, which means A -> B has -(*N)-> dependency 2192f08e3888SBoqun Feng * path, any usage of B should be considered. Otherwise, we should only 2193f08e3888SBoqun Feng * consider _READ usage. 21948eddac3fSPeter Zijlstra */ 219561775ed2SBoqun Feng static inline bool usage_match(struct lock_list *entry, void *mask) 21968eddac3fSPeter Zijlstra { 2197f08e3888SBoqun Feng if (!entry->only_xr) 219861775ed2SBoqun Feng return !!(entry->class->usage_mask & *(unsigned long *)mask); 2199f08e3888SBoqun Feng else /* Mask out _READ usage bits */ 2200f08e3888SBoqun Feng return !!((entry->class->usage_mask & LOCKF_IRQ) & *(unsigned long *)mask); 22018eddac3fSPeter Zijlstra } 22028eddac3fSPeter Zijlstra 2203*5f296240SBoqun Feng static inline bool usage_skip(struct lock_list *entry, void *mask) 2204*5f296240SBoqun Feng { 2205*5f296240SBoqun Feng /* 2206*5f296240SBoqun Feng * Skip local_lock() for irq inversion detection. 2207*5f296240SBoqun Feng * 2208*5f296240SBoqun Feng * For !RT, local_lock() is not a real lock, so it won't carry any 2209*5f296240SBoqun Feng * dependency. 2210*5f296240SBoqun Feng * 2211*5f296240SBoqun Feng * For RT, an irq inversion happens when we have lock A and B, and on 2212*5f296240SBoqun Feng * some CPU we can have: 2213*5f296240SBoqun Feng * 2214*5f296240SBoqun Feng * lock(A); 2215*5f296240SBoqun Feng * <interrupted> 2216*5f296240SBoqun Feng * lock(B); 2217*5f296240SBoqun Feng * 2218*5f296240SBoqun Feng * where lock(B) cannot sleep, and we have a dependency B -> ... -> A. 2219*5f296240SBoqun Feng * 2220*5f296240SBoqun Feng * Now we prove local_lock() cannot exist in that dependency. First we 2221*5f296240SBoqun Feng * have the observation for any lock chain L1 -> ... -> Ln, for any 2222*5f296240SBoqun Feng * 1 <= i <= n, Li.inner_wait_type <= L1.inner_wait_type, otherwise 2223*5f296240SBoqun Feng * wait context check will complain. And since B is not a sleep lock, 2224*5f296240SBoqun Feng * therefore B.inner_wait_type >= 2, and since the inner_wait_type of 2225*5f296240SBoqun Feng * local_lock() is 3, which is greater than 2, therefore there is no 2226*5f296240SBoqun Feng * way the local_lock() exists in the dependency B -> ... -> A. 2227*5f296240SBoqun Feng * 2228*5f296240SBoqun Feng * As a result, we will skip local_lock(), when we search for irq 2229*5f296240SBoqun Feng * inversion bugs. 2230*5f296240SBoqun Feng */ 2231*5f296240SBoqun Feng if (entry->class->lock_type == LD_LOCK_PERCPU) { 2232*5f296240SBoqun Feng if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG)) 2233*5f296240SBoqun Feng return false; 2234*5f296240SBoqun Feng 2235*5f296240SBoqun Feng return true; 2236*5f296240SBoqun Feng } 2237*5f296240SBoqun Feng 2238*5f296240SBoqun Feng return false; 2239*5f296240SBoqun Feng } 2240*5f296240SBoqun Feng 22418eddac3fSPeter Zijlstra /* 22428eddac3fSPeter Zijlstra * Find a node in the forwards-direction dependency sub-graph starting 22438eddac3fSPeter Zijlstra * at @root->class that matches @bit. 22448eddac3fSPeter Zijlstra * 2245b11be024SBoqun Feng * Return BFS_MATCH if such a node exists in the subgraph, and put that node 22468eddac3fSPeter Zijlstra * into *@target_entry. 22478eddac3fSPeter Zijlstra */ 2248b11be024SBoqun Feng static enum bfs_result 2249627f364dSFrederic Weisbecker find_usage_forwards(struct lock_list *root, unsigned long usage_mask, 22508eddac3fSPeter Zijlstra struct lock_list **target_entry) 22518eddac3fSPeter Zijlstra { 2252b11be024SBoqun Feng enum bfs_result result; 22538eddac3fSPeter Zijlstra 22548eddac3fSPeter Zijlstra debug_atomic_inc(nr_find_usage_forwards_checks); 22558eddac3fSPeter Zijlstra 2256*5f296240SBoqun Feng result = __bfs_forwards(root, &usage_mask, usage_match, usage_skip, target_entry); 22578eddac3fSPeter Zijlstra 22588eddac3fSPeter Zijlstra return result; 22598eddac3fSPeter Zijlstra } 22608eddac3fSPeter Zijlstra 22618eddac3fSPeter Zijlstra /* 22628eddac3fSPeter Zijlstra * Find a node in the backwards-direction dependency sub-graph starting 22638eddac3fSPeter Zijlstra * at @root->class that matches @bit. 22648eddac3fSPeter Zijlstra */ 2265b11be024SBoqun Feng static enum bfs_result 2266627f364dSFrederic Weisbecker find_usage_backwards(struct lock_list *root, unsigned long usage_mask, 22678eddac3fSPeter Zijlstra struct lock_list **target_entry) 22688eddac3fSPeter Zijlstra { 2269b11be024SBoqun Feng enum bfs_result result; 22708eddac3fSPeter Zijlstra 22718eddac3fSPeter Zijlstra debug_atomic_inc(nr_find_usage_backwards_checks); 22728eddac3fSPeter Zijlstra 2273*5f296240SBoqun Feng result = __bfs_backwards(root, &usage_mask, usage_match, usage_skip, target_entry); 22748eddac3fSPeter Zijlstra 22758eddac3fSPeter Zijlstra return result; 22768eddac3fSPeter Zijlstra } 22778eddac3fSPeter Zijlstra 22788eddac3fSPeter Zijlstra static void print_lock_class_header(struct lock_class *class, int depth) 22798eddac3fSPeter Zijlstra { 22808eddac3fSPeter Zijlstra int bit; 22818eddac3fSPeter Zijlstra 22828eddac3fSPeter Zijlstra printk("%*s->", depth, ""); 22838eddac3fSPeter Zijlstra print_lock_name(class); 22848ca2b56cSWaiman Long #ifdef CONFIG_DEBUG_LOCKDEP 22858ca2b56cSWaiman Long printk(KERN_CONT " ops: %lu", debug_class_ops_read(class)); 22868ca2b56cSWaiman Long #endif 2287f943fe0fSDmitry Vyukov printk(KERN_CONT " {\n"); 22888eddac3fSPeter Zijlstra 22892bb8945bSPeter Zijlstra for (bit = 0; bit < LOCK_TRACE_STATES; bit++) { 22908eddac3fSPeter Zijlstra if (class->usage_mask & (1 << bit)) { 22918eddac3fSPeter Zijlstra int len = depth; 22928eddac3fSPeter Zijlstra 22938eddac3fSPeter Zijlstra len += printk("%*s %s", depth, "", usage_str[bit]); 2294f943fe0fSDmitry Vyukov len += printk(KERN_CONT " at:\n"); 229512593b74SBart Van Assche print_lock_trace(class->usage_traces[bit], len); 22968eddac3fSPeter Zijlstra } 22978eddac3fSPeter Zijlstra } 22988eddac3fSPeter Zijlstra printk("%*s }\n", depth, ""); 22998eddac3fSPeter Zijlstra 230004860d48SBorislav Petkov printk("%*s ... key at: [<%px>] %pS\n", 2301f943fe0fSDmitry Vyukov depth, "", class->key, class->key); 23028eddac3fSPeter Zijlstra } 23038eddac3fSPeter Zijlstra 23048eddac3fSPeter Zijlstra /* 23058eddac3fSPeter Zijlstra * printk the shortest lock dependencies from @start to @end in reverse order: 23068eddac3fSPeter Zijlstra */ 23078eddac3fSPeter Zijlstra static void __used 23088eddac3fSPeter Zijlstra print_shortest_lock_dependencies(struct lock_list *leaf, 23098eddac3fSPeter Zijlstra struct lock_list *root) 23108eddac3fSPeter Zijlstra { 23118eddac3fSPeter Zijlstra struct lock_list *entry = leaf; 23128eddac3fSPeter Zijlstra int depth; 23138eddac3fSPeter Zijlstra 23148eddac3fSPeter Zijlstra /*compute depth from generated tree by BFS*/ 23158eddac3fSPeter Zijlstra depth = get_lock_depth(leaf); 23168eddac3fSPeter Zijlstra 23178eddac3fSPeter Zijlstra do { 23188eddac3fSPeter Zijlstra print_lock_class_header(entry->class, depth); 23198eddac3fSPeter Zijlstra printk("%*s ... acquired at:\n", depth, ""); 232012593b74SBart Van Assche print_lock_trace(entry->trace, 2); 23218eddac3fSPeter Zijlstra printk("\n"); 23228eddac3fSPeter Zijlstra 23238eddac3fSPeter Zijlstra if (depth == 0 && (entry != root)) { 23248eddac3fSPeter Zijlstra printk("lockdep:%s bad path found in chain graph\n", __func__); 23258eddac3fSPeter Zijlstra break; 23268eddac3fSPeter Zijlstra } 23278eddac3fSPeter Zijlstra 23288eddac3fSPeter Zijlstra entry = get_lock_parent(entry); 23298eddac3fSPeter Zijlstra depth--; 23308eddac3fSPeter Zijlstra } while (entry && (depth >= 0)); 23318eddac3fSPeter Zijlstra } 23328eddac3fSPeter Zijlstra 23338eddac3fSPeter Zijlstra static void 23348eddac3fSPeter Zijlstra print_irq_lock_scenario(struct lock_list *safe_entry, 23358eddac3fSPeter Zijlstra struct lock_list *unsafe_entry, 23368eddac3fSPeter Zijlstra struct lock_class *prev_class, 23378eddac3fSPeter Zijlstra struct lock_class *next_class) 23388eddac3fSPeter Zijlstra { 23398eddac3fSPeter Zijlstra struct lock_class *safe_class = safe_entry->class; 23408eddac3fSPeter Zijlstra struct lock_class *unsafe_class = unsafe_entry->class; 23418eddac3fSPeter Zijlstra struct lock_class *middle_class = prev_class; 23428eddac3fSPeter Zijlstra 23438eddac3fSPeter Zijlstra if (middle_class == safe_class) 23448eddac3fSPeter Zijlstra middle_class = next_class; 23458eddac3fSPeter Zijlstra 23468eddac3fSPeter Zijlstra /* 23478eddac3fSPeter Zijlstra * A direct locking problem where unsafe_class lock is taken 23488eddac3fSPeter Zijlstra * directly by safe_class lock, then all we need to show 23498eddac3fSPeter Zijlstra * is the deadlock scenario, as it is obvious that the 23508eddac3fSPeter Zijlstra * unsafe lock is taken under the safe lock. 23518eddac3fSPeter Zijlstra * 23528eddac3fSPeter Zijlstra * But if there is a chain instead, where the safe lock takes 23538eddac3fSPeter Zijlstra * an intermediate lock (middle_class) where this lock is 23548eddac3fSPeter Zijlstra * not the same as the safe lock, then the lock chain is 23558eddac3fSPeter Zijlstra * used to describe the problem. Otherwise we would need 23568eddac3fSPeter Zijlstra * to show a different CPU case for each link in the chain 23578eddac3fSPeter Zijlstra * from the safe_class lock to the unsafe_class lock. 23588eddac3fSPeter Zijlstra */ 23598eddac3fSPeter Zijlstra if (middle_class != unsafe_class) { 23608eddac3fSPeter Zijlstra printk("Chain exists of:\n "); 23618eddac3fSPeter Zijlstra __print_lock_name(safe_class); 2362f943fe0fSDmitry Vyukov printk(KERN_CONT " --> "); 23638eddac3fSPeter Zijlstra __print_lock_name(middle_class); 2364f943fe0fSDmitry Vyukov printk(KERN_CONT " --> "); 23658eddac3fSPeter Zijlstra __print_lock_name(unsafe_class); 2366f943fe0fSDmitry Vyukov printk(KERN_CONT "\n\n"); 23678eddac3fSPeter Zijlstra } 23688eddac3fSPeter Zijlstra 23698eddac3fSPeter Zijlstra printk(" Possible interrupt unsafe locking scenario:\n\n"); 23708eddac3fSPeter Zijlstra printk(" CPU0 CPU1\n"); 23718eddac3fSPeter Zijlstra printk(" ---- ----\n"); 23728eddac3fSPeter Zijlstra printk(" lock("); 23738eddac3fSPeter Zijlstra __print_lock_name(unsafe_class); 2374f943fe0fSDmitry Vyukov printk(KERN_CONT ");\n"); 23758eddac3fSPeter Zijlstra printk(" local_irq_disable();\n"); 23768eddac3fSPeter Zijlstra printk(" lock("); 23778eddac3fSPeter Zijlstra __print_lock_name(safe_class); 2378f943fe0fSDmitry Vyukov printk(KERN_CONT ");\n"); 23798eddac3fSPeter Zijlstra printk(" lock("); 23808eddac3fSPeter Zijlstra __print_lock_name(middle_class); 2381f943fe0fSDmitry Vyukov printk(KERN_CONT ");\n"); 23828eddac3fSPeter Zijlstra printk(" <Interrupt>\n"); 23838eddac3fSPeter Zijlstra printk(" lock("); 23848eddac3fSPeter Zijlstra __print_lock_name(safe_class); 2385f943fe0fSDmitry Vyukov printk(KERN_CONT ");\n"); 23868eddac3fSPeter Zijlstra printk("\n *** DEADLOCK ***\n\n"); 23878eddac3fSPeter Zijlstra } 23888eddac3fSPeter Zijlstra 2389f7c1c6b3SYuyang Du static void 23908eddac3fSPeter Zijlstra print_bad_irq_dependency(struct task_struct *curr, 23918eddac3fSPeter Zijlstra struct lock_list *prev_root, 23928eddac3fSPeter Zijlstra struct lock_list *next_root, 23938eddac3fSPeter Zijlstra struct lock_list *backwards_entry, 23948eddac3fSPeter Zijlstra struct lock_list *forwards_entry, 23958eddac3fSPeter Zijlstra struct held_lock *prev, 23968eddac3fSPeter Zijlstra struct held_lock *next, 23978eddac3fSPeter Zijlstra enum lock_usage_bit bit1, 23988eddac3fSPeter Zijlstra enum lock_usage_bit bit2, 23998eddac3fSPeter Zijlstra const char *irqclass) 24008eddac3fSPeter Zijlstra { 24018eddac3fSPeter Zijlstra if (!debug_locks_off_graph_unlock() || debug_locks_silent) 2402f7c1c6b3SYuyang Du return; 24038eddac3fSPeter Zijlstra 2404681fbec8SPaul E. McKenney pr_warn("\n"); 2405a5dd63efSPaul E. McKenney pr_warn("=====================================================\n"); 2406a5dd63efSPaul E. McKenney pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n", 24078eddac3fSPeter Zijlstra irqclass, irqclass); 24088eddac3fSPeter Zijlstra print_kernel_ident(); 2409a5dd63efSPaul E. McKenney pr_warn("-----------------------------------------------------\n"); 2410681fbec8SPaul E. McKenney pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", 24118eddac3fSPeter Zijlstra curr->comm, task_pid_nr(curr), 2412f9ad4a5fSPeter Zijlstra lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT, 24138eddac3fSPeter Zijlstra curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, 2414f9ad4a5fSPeter Zijlstra lockdep_hardirqs_enabled(), 24158eddac3fSPeter Zijlstra curr->softirqs_enabled); 24168eddac3fSPeter Zijlstra print_lock(next); 24178eddac3fSPeter Zijlstra 2418681fbec8SPaul E. McKenney pr_warn("\nand this task is already holding:\n"); 24198eddac3fSPeter Zijlstra print_lock(prev); 2420681fbec8SPaul E. McKenney pr_warn("which would create a new lock dependency:\n"); 24218eddac3fSPeter Zijlstra print_lock_name(hlock_class(prev)); 2422681fbec8SPaul E. McKenney pr_cont(" ->"); 24238eddac3fSPeter Zijlstra print_lock_name(hlock_class(next)); 2424681fbec8SPaul E. McKenney pr_cont("\n"); 24258eddac3fSPeter Zijlstra 2426681fbec8SPaul E. McKenney pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n", 24278eddac3fSPeter Zijlstra irqclass); 24288eddac3fSPeter Zijlstra print_lock_name(backwards_entry->class); 2429681fbec8SPaul E. McKenney pr_warn("\n... which became %s-irq-safe at:\n", irqclass); 24308eddac3fSPeter Zijlstra 243112593b74SBart Van Assche print_lock_trace(backwards_entry->class->usage_traces[bit1], 1); 24328eddac3fSPeter Zijlstra 2433681fbec8SPaul E. McKenney pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass); 24348eddac3fSPeter Zijlstra print_lock_name(forwards_entry->class); 2435681fbec8SPaul E. McKenney pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass); 2436681fbec8SPaul E. McKenney pr_warn("..."); 24378eddac3fSPeter Zijlstra 243812593b74SBart Van Assche print_lock_trace(forwards_entry->class->usage_traces[bit2], 1); 24398eddac3fSPeter Zijlstra 2440681fbec8SPaul E. McKenney pr_warn("\nother info that might help us debug this:\n\n"); 24418eddac3fSPeter Zijlstra print_irq_lock_scenario(backwards_entry, forwards_entry, 24428eddac3fSPeter Zijlstra hlock_class(prev), hlock_class(next)); 24438eddac3fSPeter Zijlstra 24448eddac3fSPeter Zijlstra lockdep_print_held_locks(curr); 24458eddac3fSPeter Zijlstra 2446681fbec8SPaul E. McKenney pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass); 244712593b74SBart Van Assche prev_root->trace = save_trace(); 244812593b74SBart Van Assche if (!prev_root->trace) 2449f7c1c6b3SYuyang Du return; 24508eddac3fSPeter Zijlstra print_shortest_lock_dependencies(backwards_entry, prev_root); 24518eddac3fSPeter Zijlstra 2452681fbec8SPaul E. McKenney pr_warn("\nthe dependencies between the lock to be acquired"); 2453681fbec8SPaul E. McKenney pr_warn(" and %s-irq-unsafe lock:\n", irqclass); 245412593b74SBart Van Assche next_root->trace = save_trace(); 245512593b74SBart Van Assche if (!next_root->trace) 2456f7c1c6b3SYuyang Du return; 24578eddac3fSPeter Zijlstra print_shortest_lock_dependencies(forwards_entry, next_root); 24588eddac3fSPeter Zijlstra 2459681fbec8SPaul E. McKenney pr_warn("\nstack backtrace:\n"); 24608eddac3fSPeter Zijlstra dump_stack(); 24618eddac3fSPeter Zijlstra } 24628eddac3fSPeter Zijlstra 24638eddac3fSPeter Zijlstra static const char *state_names[] = { 24648eddac3fSPeter Zijlstra #define LOCKDEP_STATE(__STATE) \ 24658eddac3fSPeter Zijlstra __stringify(__STATE), 24668eddac3fSPeter Zijlstra #include "lockdep_states.h" 24678eddac3fSPeter Zijlstra #undef LOCKDEP_STATE 24688eddac3fSPeter Zijlstra }; 24698eddac3fSPeter Zijlstra 24708eddac3fSPeter Zijlstra static const char *state_rnames[] = { 24718eddac3fSPeter Zijlstra #define LOCKDEP_STATE(__STATE) \ 24728eddac3fSPeter Zijlstra __stringify(__STATE)"-READ", 24738eddac3fSPeter Zijlstra #include "lockdep_states.h" 24748eddac3fSPeter Zijlstra #undef LOCKDEP_STATE 24758eddac3fSPeter Zijlstra }; 24768eddac3fSPeter Zijlstra 24778eddac3fSPeter Zijlstra static inline const char *state_name(enum lock_usage_bit bit) 24788eddac3fSPeter Zijlstra { 2479c902a1e8SFrederic Weisbecker if (bit & LOCK_USAGE_READ_MASK) 2480c902a1e8SFrederic Weisbecker return state_rnames[bit >> LOCK_USAGE_DIR_MASK]; 2481c902a1e8SFrederic Weisbecker else 2482c902a1e8SFrederic Weisbecker return state_names[bit >> LOCK_USAGE_DIR_MASK]; 24838eddac3fSPeter Zijlstra } 24848eddac3fSPeter Zijlstra 2485948f8376SFrederic Weisbecker /* 2486948f8376SFrederic Weisbecker * The bit number is encoded like: 2487948f8376SFrederic Weisbecker * 2488948f8376SFrederic Weisbecker * bit0: 0 exclusive, 1 read lock 2489948f8376SFrederic Weisbecker * bit1: 0 used in irq, 1 irq enabled 2490948f8376SFrederic Weisbecker * bit2-n: state 2491948f8376SFrederic Weisbecker */ 24928eddac3fSPeter Zijlstra static int exclusive_bit(int new_bit) 24938eddac3fSPeter Zijlstra { 2494bba2a8f1SFrederic Weisbecker int state = new_bit & LOCK_USAGE_STATE_MASK; 2495bba2a8f1SFrederic Weisbecker int dir = new_bit & LOCK_USAGE_DIR_MASK; 24968eddac3fSPeter Zijlstra 24978eddac3fSPeter Zijlstra /* 24988eddac3fSPeter Zijlstra * keep state, bit flip the direction and strip read. 24998eddac3fSPeter Zijlstra */ 2500bba2a8f1SFrederic Weisbecker return state | (dir ^ LOCK_USAGE_DIR_MASK); 25018eddac3fSPeter Zijlstra } 25028eddac3fSPeter Zijlstra 2503948f8376SFrederic Weisbecker /* 2504948f8376SFrederic Weisbecker * Observe that when given a bitmask where each bitnr is encoded as above, a 2505948f8376SFrederic Weisbecker * right shift of the mask transforms the individual bitnrs as -1 and 2506948f8376SFrederic Weisbecker * conversely, a left shift transforms into +1 for the individual bitnrs. 2507948f8376SFrederic Weisbecker * 2508948f8376SFrederic Weisbecker * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can 2509948f8376SFrederic Weisbecker * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0) 2510948f8376SFrederic Weisbecker * instead by subtracting the bit number by 2, or shifting the mask right by 2. 2511948f8376SFrederic Weisbecker * 2512948f8376SFrederic Weisbecker * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2. 2513948f8376SFrederic Weisbecker * 2514948f8376SFrederic Weisbecker * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is 2515948f8376SFrederic Weisbecker * all bits set) and recompose with bitnr1 flipped. 2516948f8376SFrederic Weisbecker */ 2517948f8376SFrederic Weisbecker static unsigned long invert_dir_mask(unsigned long mask) 25188eddac3fSPeter Zijlstra { 2519948f8376SFrederic Weisbecker unsigned long excl = 0; 25208eddac3fSPeter Zijlstra 2521948f8376SFrederic Weisbecker /* Invert dir */ 2522948f8376SFrederic Weisbecker excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK; 2523948f8376SFrederic Weisbecker excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK; 25248eddac3fSPeter Zijlstra 2525948f8376SFrederic Weisbecker return excl; 25268eddac3fSPeter Zijlstra } 25278eddac3fSPeter Zijlstra 2528948f8376SFrederic Weisbecker /* 2529f08e3888SBoqun Feng * Note that a LOCK_ENABLED_IRQ_*_READ usage and a LOCK_USED_IN_IRQ_*_READ 2530f08e3888SBoqun Feng * usage may cause deadlock too, for example: 2531f08e3888SBoqun Feng * 2532f08e3888SBoqun Feng * P1 P2 2533f08e3888SBoqun Feng * <irq disabled> 2534f08e3888SBoqun Feng * write_lock(l1); <irq enabled> 2535f08e3888SBoqun Feng * read_lock(l2); 2536f08e3888SBoqun Feng * write_lock(l2); 2537f08e3888SBoqun Feng * <in irq> 2538f08e3888SBoqun Feng * read_lock(l1); 2539f08e3888SBoqun Feng * 2540f08e3888SBoqun Feng * , in above case, l1 will be marked as LOCK_USED_IN_IRQ_HARDIRQ_READ and l2 2541f08e3888SBoqun Feng * will marked as LOCK_ENABLE_IRQ_HARDIRQ_READ, and this is a possible 2542f08e3888SBoqun Feng * deadlock. 2543f08e3888SBoqun Feng * 2544f08e3888SBoqun Feng * In fact, all of the following cases may cause deadlocks: 2545f08e3888SBoqun Feng * 2546f08e3888SBoqun Feng * LOCK_USED_IN_IRQ_* -> LOCK_ENABLED_IRQ_* 2547f08e3888SBoqun Feng * LOCK_USED_IN_IRQ_*_READ -> LOCK_ENABLED_IRQ_* 2548f08e3888SBoqun Feng * LOCK_USED_IN_IRQ_* -> LOCK_ENABLED_IRQ_*_READ 2549f08e3888SBoqun Feng * LOCK_USED_IN_IRQ_*_READ -> LOCK_ENABLED_IRQ_*_READ 2550f08e3888SBoqun Feng * 2551f08e3888SBoqun Feng * As a result, to calculate the "exclusive mask", first we invert the 2552f08e3888SBoqun Feng * direction (USED_IN/ENABLED) of the original mask, and 1) for all bits with 2553f08e3888SBoqun Feng * bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*). 2) for all 2554f08e3888SBoqun Feng * bits with bitnr0 cleared (LOCK_*_READ), add those with bitnr0 set (LOCK_*). 2555948f8376SFrederic Weisbecker */ 2556948f8376SFrederic Weisbecker static unsigned long exclusive_mask(unsigned long mask) 2557948f8376SFrederic Weisbecker { 2558948f8376SFrederic Weisbecker unsigned long excl = invert_dir_mask(mask); 2559948f8376SFrederic Weisbecker 2560948f8376SFrederic Weisbecker excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK; 2561f08e3888SBoqun Feng excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK; 2562948f8376SFrederic Weisbecker 2563948f8376SFrederic Weisbecker return excl; 2564948f8376SFrederic Weisbecker } 2565948f8376SFrederic Weisbecker 2566948f8376SFrederic Weisbecker /* 2567948f8376SFrederic Weisbecker * Retrieve the _possible_ original mask to which @mask is 2568948f8376SFrederic Weisbecker * exclusive. Ie: this is the opposite of exclusive_mask(). 2569948f8376SFrederic Weisbecker * Note that 2 possible original bits can match an exclusive 2570948f8376SFrederic Weisbecker * bit: one has LOCK_USAGE_READ_MASK set, the other has it 2571948f8376SFrederic Weisbecker * cleared. So both are returned for each exclusive bit. 2572948f8376SFrederic Weisbecker */ 2573948f8376SFrederic Weisbecker static unsigned long original_mask(unsigned long mask) 2574948f8376SFrederic Weisbecker { 2575948f8376SFrederic Weisbecker unsigned long excl = invert_dir_mask(mask); 2576948f8376SFrederic Weisbecker 2577948f8376SFrederic Weisbecker /* Include read in existing usages */ 2578f08e3888SBoqun Feng excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK; 2579948f8376SFrederic Weisbecker excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK; 2580948f8376SFrederic Weisbecker 2581948f8376SFrederic Weisbecker return excl; 2582948f8376SFrederic Weisbecker } 2583948f8376SFrederic Weisbecker 2584948f8376SFrederic Weisbecker /* 2585948f8376SFrederic Weisbecker * Find the first pair of bit match between an original 2586948f8376SFrederic Weisbecker * usage mask and an exclusive usage mask. 2587948f8376SFrederic Weisbecker */ 2588948f8376SFrederic Weisbecker static int find_exclusive_match(unsigned long mask, 2589948f8376SFrederic Weisbecker unsigned long excl_mask, 2590948f8376SFrederic Weisbecker enum lock_usage_bit *bitp, 2591948f8376SFrederic Weisbecker enum lock_usage_bit *excl_bitp) 2592948f8376SFrederic Weisbecker { 2593f08e3888SBoqun Feng int bit, excl, excl_read; 2594948f8376SFrederic Weisbecker 2595948f8376SFrederic Weisbecker for_each_set_bit(bit, &mask, LOCK_USED) { 2596f08e3888SBoqun Feng /* 2597f08e3888SBoqun Feng * exclusive_bit() strips the read bit, however, 2598f08e3888SBoqun Feng * LOCK_ENABLED_IRQ_*_READ may cause deadlocks too, so we need 2599f08e3888SBoqun Feng * to search excl | LOCK_USAGE_READ_MASK as well. 2600f08e3888SBoqun Feng */ 2601948f8376SFrederic Weisbecker excl = exclusive_bit(bit); 2602f08e3888SBoqun Feng excl_read = excl | LOCK_USAGE_READ_MASK; 2603948f8376SFrederic Weisbecker if (excl_mask & lock_flag(excl)) { 2604948f8376SFrederic Weisbecker *bitp = bit; 2605948f8376SFrederic Weisbecker *excl_bitp = excl; 2606948f8376SFrederic Weisbecker return 0; 2607f08e3888SBoqun Feng } else if (excl_mask & lock_flag(excl_read)) { 2608f08e3888SBoqun Feng *bitp = bit; 2609f08e3888SBoqun Feng *excl_bitp = excl_read; 2610f08e3888SBoqun Feng return 0; 2611948f8376SFrederic Weisbecker } 2612948f8376SFrederic Weisbecker } 2613948f8376SFrederic Weisbecker return -1; 2614948f8376SFrederic Weisbecker } 2615948f8376SFrederic Weisbecker 2616948f8376SFrederic Weisbecker /* 2617948f8376SFrederic Weisbecker * Prove that the new dependency does not connect a hardirq-safe(-read) 2618948f8376SFrederic Weisbecker * lock with a hardirq-unsafe lock - to achieve this we search 2619948f8376SFrederic Weisbecker * the backwards-subgraph starting at <prev>, and the 2620948f8376SFrederic Weisbecker * forwards-subgraph starting at <next>: 2621948f8376SFrederic Weisbecker */ 2622948f8376SFrederic Weisbecker static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, 26238eddac3fSPeter Zijlstra struct held_lock *next) 26248eddac3fSPeter Zijlstra { 2625948f8376SFrederic Weisbecker unsigned long usage_mask = 0, forward_mask, backward_mask; 2626948f8376SFrederic Weisbecker enum lock_usage_bit forward_bit = 0, backward_bit = 0; 26273f649ab7SKees Cook struct lock_list *target_entry1; 26283f649ab7SKees Cook struct lock_list *target_entry; 2629948f8376SFrederic Weisbecker struct lock_list this, that; 2630b11be024SBoqun Feng enum bfs_result ret; 26318eddac3fSPeter Zijlstra 2632948f8376SFrederic Weisbecker /* 2633948f8376SFrederic Weisbecker * Step 1: gather all hard/soft IRQs usages backward in an 2634948f8376SFrederic Weisbecker * accumulated usage mask. 2635948f8376SFrederic Weisbecker */ 2636f08e3888SBoqun Feng bfs_init_rootb(&this, prev); 2637948f8376SFrederic Weisbecker 2638*5f296240SBoqun Feng ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, usage_skip, NULL); 2639b11be024SBoqun Feng if (bfs_error(ret)) { 2640f7c1c6b3SYuyang Du print_bfs_bug(ret); 2641f7c1c6b3SYuyang Du return 0; 2642f7c1c6b3SYuyang Du } 2643948f8376SFrederic Weisbecker 2644948f8376SFrederic Weisbecker usage_mask &= LOCKF_USED_IN_IRQ_ALL; 2645948f8376SFrederic Weisbecker if (!usage_mask) 26468eddac3fSPeter Zijlstra return 1; 2647948f8376SFrederic Weisbecker 2648948f8376SFrederic Weisbecker /* 2649948f8376SFrederic Weisbecker * Step 2: find exclusive uses forward that match the previous 2650948f8376SFrederic Weisbecker * backward accumulated mask. 2651948f8376SFrederic Weisbecker */ 2652948f8376SFrederic Weisbecker forward_mask = exclusive_mask(usage_mask); 2653948f8376SFrederic Weisbecker 2654f08e3888SBoqun Feng bfs_init_root(&that, next); 2655948f8376SFrederic Weisbecker 2656948f8376SFrederic Weisbecker ret = find_usage_forwards(&that, forward_mask, &target_entry1); 2657b11be024SBoqun Feng if (bfs_error(ret)) { 2658f7c1c6b3SYuyang Du print_bfs_bug(ret); 2659f7c1c6b3SYuyang Du return 0; 2660f7c1c6b3SYuyang Du } 2661b11be024SBoqun Feng if (ret == BFS_RNOMATCH) 2662b11be024SBoqun Feng return 1; 2663948f8376SFrederic Weisbecker 2664948f8376SFrederic Weisbecker /* 2665948f8376SFrederic Weisbecker * Step 3: we found a bad match! Now retrieve a lock from the backward 2666948f8376SFrederic Weisbecker * list whose usage mask matches the exclusive usage mask from the 2667948f8376SFrederic Weisbecker * lock found on the forward list. 2668948f8376SFrederic Weisbecker */ 2669948f8376SFrederic Weisbecker backward_mask = original_mask(target_entry1->class->usage_mask); 2670948f8376SFrederic Weisbecker 2671948f8376SFrederic Weisbecker ret = find_usage_backwards(&this, backward_mask, &target_entry); 2672b11be024SBoqun Feng if (bfs_error(ret)) { 2673f7c1c6b3SYuyang Du print_bfs_bug(ret); 2674f7c1c6b3SYuyang Du return 0; 2675f7c1c6b3SYuyang Du } 2676b11be024SBoqun Feng if (DEBUG_LOCKS_WARN_ON(ret == BFS_RNOMATCH)) 2677948f8376SFrederic Weisbecker return 1; 2678948f8376SFrederic Weisbecker 2679948f8376SFrederic Weisbecker /* 2680948f8376SFrederic Weisbecker * Step 4: narrow down to a pair of incompatible usage bits 2681948f8376SFrederic Weisbecker * and report it. 2682948f8376SFrederic Weisbecker */ 2683948f8376SFrederic Weisbecker ret = find_exclusive_match(target_entry->class->usage_mask, 2684948f8376SFrederic Weisbecker target_entry1->class->usage_mask, 2685948f8376SFrederic Weisbecker &backward_bit, &forward_bit); 2686948f8376SFrederic Weisbecker if (DEBUG_LOCKS_WARN_ON(ret == -1)) 2687948f8376SFrederic Weisbecker return 1; 2688948f8376SFrederic Weisbecker 2689f7c1c6b3SYuyang Du print_bad_irq_dependency(curr, &this, &that, 2690948f8376SFrederic Weisbecker target_entry, target_entry1, 2691948f8376SFrederic Weisbecker prev, next, 2692948f8376SFrederic Weisbecker backward_bit, forward_bit, 2693948f8376SFrederic Weisbecker state_name(backward_bit)); 2694f7c1c6b3SYuyang Du 2695f7c1c6b3SYuyang Du return 0; 26968eddac3fSPeter Zijlstra } 26978eddac3fSPeter Zijlstra 26988eddac3fSPeter Zijlstra #else 26998eddac3fSPeter Zijlstra 2700948f8376SFrederic Weisbecker static inline int check_irq_usage(struct task_struct *curr, 2701948f8376SFrederic Weisbecker struct held_lock *prev, struct held_lock *next) 27028eddac3fSPeter Zijlstra { 27038eddac3fSPeter Zijlstra return 1; 27048eddac3fSPeter Zijlstra } 2705*5f296240SBoqun Feng 2706*5f296240SBoqun Feng static inline bool usage_skip(struct lock_list *entry, void *mask) 2707*5f296240SBoqun Feng { 2708*5f296240SBoqun Feng return false; 2709*5f296240SBoqun Feng } 2710*5f296240SBoqun Feng 2711b3b9c187SWaiman Long #endif /* CONFIG_TRACE_IRQFLAGS */ 27128eddac3fSPeter Zijlstra 2713175b1a60SPeter Zijlstra #ifdef CONFIG_LOCKDEP_SMALL 2714175b1a60SPeter Zijlstra /* 2715175b1a60SPeter Zijlstra * Check that the dependency graph starting at <src> can lead to 2716175b1a60SPeter Zijlstra * <target> or not. If it can, <src> -> <target> dependency is already 2717175b1a60SPeter Zijlstra * in the graph. 2718175b1a60SPeter Zijlstra * 2719175b1a60SPeter Zijlstra * Return BFS_RMATCH if it does, or BFS_RMATCH if it does not, return BFS_E* if 2720175b1a60SPeter Zijlstra * any error appears in the bfs search. 2721175b1a60SPeter Zijlstra */ 2722175b1a60SPeter Zijlstra static noinline enum bfs_result 2723175b1a60SPeter Zijlstra check_redundant(struct held_lock *src, struct held_lock *target) 2724175b1a60SPeter Zijlstra { 2725175b1a60SPeter Zijlstra enum bfs_result ret; 2726175b1a60SPeter Zijlstra struct lock_list *target_entry; 2727175b1a60SPeter Zijlstra struct lock_list src_entry; 2728175b1a60SPeter Zijlstra 2729175b1a60SPeter Zijlstra bfs_init_root(&src_entry, src); 2730175b1a60SPeter Zijlstra /* 2731175b1a60SPeter Zijlstra * Special setup for check_redundant(). 2732175b1a60SPeter Zijlstra * 2733175b1a60SPeter Zijlstra * To report redundant, we need to find a strong dependency path that 2734175b1a60SPeter Zijlstra * is equal to or stronger than <src> -> <target>. So if <src> is E, 2735175b1a60SPeter Zijlstra * we need to let __bfs() only search for a path starting at a -(E*)->, 2736175b1a60SPeter Zijlstra * we achieve this by setting the initial node's ->only_xr to true in 2737175b1a60SPeter Zijlstra * that case. And if <prev> is S, we set initial ->only_xr to false 2738175b1a60SPeter Zijlstra * because both -(S*)-> (equal) and -(E*)-> (stronger) are redundant. 2739175b1a60SPeter Zijlstra */ 2740175b1a60SPeter Zijlstra src_entry.only_xr = src->read == 0; 2741175b1a60SPeter Zijlstra 2742175b1a60SPeter Zijlstra debug_atomic_inc(nr_redundant_checks); 2743175b1a60SPeter Zijlstra 2744*5f296240SBoqun Feng /* 2745*5f296240SBoqun Feng * Note: we skip local_lock() for redundant check, because as the 2746*5f296240SBoqun Feng * comment in usage_skip(), A -> local_lock() -> B and A -> B are not 2747*5f296240SBoqun Feng * the same. 2748*5f296240SBoqun Feng */ 2749*5f296240SBoqun Feng ret = check_path(target, &src_entry, hlock_equal, usage_skip, &target_entry); 2750175b1a60SPeter Zijlstra 2751175b1a60SPeter Zijlstra if (ret == BFS_RMATCH) 2752175b1a60SPeter Zijlstra debug_atomic_inc(nr_redundant); 2753175b1a60SPeter Zijlstra 2754175b1a60SPeter Zijlstra return ret; 2755175b1a60SPeter Zijlstra } 2756175b1a60SPeter Zijlstra 2757175b1a60SPeter Zijlstra #else 2758175b1a60SPeter Zijlstra 2759175b1a60SPeter Zijlstra static inline enum bfs_result 2760175b1a60SPeter Zijlstra check_redundant(struct held_lock *src, struct held_lock *target) 2761175b1a60SPeter Zijlstra { 2762175b1a60SPeter Zijlstra return BFS_RNOMATCH; 2763175b1a60SPeter Zijlstra } 2764175b1a60SPeter Zijlstra 2765175b1a60SPeter Zijlstra #endif 2766175b1a60SPeter Zijlstra 2767b3b9c187SWaiman Long static void inc_chains(int irq_context) 27688eddac3fSPeter Zijlstra { 2769b3b9c187SWaiman Long if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT) 2770b3b9c187SWaiman Long nr_hardirq_chains++; 2771b3b9c187SWaiman Long else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT) 2772b3b9c187SWaiman Long nr_softirq_chains++; 2773b3b9c187SWaiman Long else 27748eddac3fSPeter Zijlstra nr_process_chains++; 27758eddac3fSPeter Zijlstra } 27768eddac3fSPeter Zijlstra 2777b3b9c187SWaiman Long static void dec_chains(int irq_context) 2778b3b9c187SWaiman Long { 2779b3b9c187SWaiman Long if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT) 2780b3b9c187SWaiman Long nr_hardirq_chains--; 2781b3b9c187SWaiman Long else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT) 2782b3b9c187SWaiman Long nr_softirq_chains--; 2783b3b9c187SWaiman Long else 2784b3b9c187SWaiman Long nr_process_chains--; 2785b3b9c187SWaiman Long } 27868eddac3fSPeter Zijlstra 27878eddac3fSPeter Zijlstra static void 2788f7c1c6b3SYuyang Du print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv) 27898eddac3fSPeter Zijlstra { 27908eddac3fSPeter Zijlstra struct lock_class *next = hlock_class(nxt); 27918eddac3fSPeter Zijlstra struct lock_class *prev = hlock_class(prv); 27928eddac3fSPeter Zijlstra 27938eddac3fSPeter Zijlstra printk(" Possible unsafe locking scenario:\n\n"); 27948eddac3fSPeter Zijlstra printk(" CPU0\n"); 27958eddac3fSPeter Zijlstra printk(" ----\n"); 27968eddac3fSPeter Zijlstra printk(" lock("); 27978eddac3fSPeter Zijlstra __print_lock_name(prev); 2798f943fe0fSDmitry Vyukov printk(KERN_CONT ");\n"); 27998eddac3fSPeter Zijlstra printk(" lock("); 28008eddac3fSPeter Zijlstra __print_lock_name(next); 2801f943fe0fSDmitry Vyukov printk(KERN_CONT ");\n"); 28028eddac3fSPeter Zijlstra printk("\n *** DEADLOCK ***\n\n"); 28038eddac3fSPeter Zijlstra printk(" May be due to missing lock nesting notation\n\n"); 28048eddac3fSPeter Zijlstra } 28058eddac3fSPeter Zijlstra 2806f7c1c6b3SYuyang Du static void 28078eddac3fSPeter Zijlstra print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, 28088eddac3fSPeter Zijlstra struct held_lock *next) 28098eddac3fSPeter Zijlstra { 28108eddac3fSPeter Zijlstra if (!debug_locks_off_graph_unlock() || debug_locks_silent) 2811f7c1c6b3SYuyang Du return; 28128eddac3fSPeter Zijlstra 2813681fbec8SPaul E. McKenney pr_warn("\n"); 2814a5dd63efSPaul E. McKenney pr_warn("============================================\n"); 2815a5dd63efSPaul E. McKenney pr_warn("WARNING: possible recursive locking detected\n"); 28168eddac3fSPeter Zijlstra print_kernel_ident(); 2817a5dd63efSPaul E. McKenney pr_warn("--------------------------------------------\n"); 2818681fbec8SPaul E. McKenney pr_warn("%s/%d is trying to acquire lock:\n", 28198eddac3fSPeter Zijlstra curr->comm, task_pid_nr(curr)); 28208eddac3fSPeter Zijlstra print_lock(next); 2821681fbec8SPaul E. McKenney pr_warn("\nbut task is already holding lock:\n"); 28228eddac3fSPeter Zijlstra print_lock(prev); 28238eddac3fSPeter Zijlstra 2824681fbec8SPaul E. McKenney pr_warn("\nother info that might help us debug this:\n"); 28258eddac3fSPeter Zijlstra print_deadlock_scenario(next, prev); 28268eddac3fSPeter Zijlstra lockdep_print_held_locks(curr); 28278eddac3fSPeter Zijlstra 2828681fbec8SPaul E. McKenney pr_warn("\nstack backtrace:\n"); 28298eddac3fSPeter Zijlstra dump_stack(); 28308eddac3fSPeter Zijlstra } 28318eddac3fSPeter Zijlstra 28328eddac3fSPeter Zijlstra /* 28338eddac3fSPeter Zijlstra * Check whether we are holding such a class already. 28348eddac3fSPeter Zijlstra * 28358eddac3fSPeter Zijlstra * (Note that this has to be done separately, because the graph cannot 28368eddac3fSPeter Zijlstra * detect such classes of deadlocks.) 28378eddac3fSPeter Zijlstra * 2838d61fc96aSBoqun Feng * Returns: 0 on deadlock detected, 1 on OK, 2 if another lock with the same 2839d61fc96aSBoqun Feng * lock class is held but nest_lock is also held, i.e. we rely on the 2840d61fc96aSBoqun Feng * nest_lock to avoid the deadlock. 28418eddac3fSPeter Zijlstra */ 28428eddac3fSPeter Zijlstra static int 28434609c4f9SYuyang Du check_deadlock(struct task_struct *curr, struct held_lock *next) 28448eddac3fSPeter Zijlstra { 28458eddac3fSPeter Zijlstra struct held_lock *prev; 28468eddac3fSPeter Zijlstra struct held_lock *nest = NULL; 28478eddac3fSPeter Zijlstra int i; 28488eddac3fSPeter Zijlstra 28498eddac3fSPeter Zijlstra for (i = 0; i < curr->lockdep_depth; i++) { 28508eddac3fSPeter Zijlstra prev = curr->held_locks + i; 28518eddac3fSPeter Zijlstra 28528eddac3fSPeter Zijlstra if (prev->instance == next->nest_lock) 28538eddac3fSPeter Zijlstra nest = prev; 28548eddac3fSPeter Zijlstra 28558eddac3fSPeter Zijlstra if (hlock_class(prev) != hlock_class(next)) 28568eddac3fSPeter Zijlstra continue; 28578eddac3fSPeter Zijlstra 28588eddac3fSPeter Zijlstra /* 28598eddac3fSPeter Zijlstra * Allow read-after-read recursion of the same 28608eddac3fSPeter Zijlstra * lock class (i.e. read_lock(lock)+read_lock(lock)): 28618eddac3fSPeter Zijlstra */ 28624609c4f9SYuyang Du if ((next->read == 2) && prev->read) 2863d61fc96aSBoqun Feng continue; 28648eddac3fSPeter Zijlstra 28658eddac3fSPeter Zijlstra /* 28668eddac3fSPeter Zijlstra * We're holding the nest_lock, which serializes this lock's 28678eddac3fSPeter Zijlstra * nesting behaviour. 28688eddac3fSPeter Zijlstra */ 28698eddac3fSPeter Zijlstra if (nest) 28708eddac3fSPeter Zijlstra return 2; 28718eddac3fSPeter Zijlstra 2872f7c1c6b3SYuyang Du print_deadlock_bug(curr, prev, next); 2873f7c1c6b3SYuyang Du return 0; 28748eddac3fSPeter Zijlstra } 28758eddac3fSPeter Zijlstra return 1; 28768eddac3fSPeter Zijlstra } 28778eddac3fSPeter Zijlstra 28788eddac3fSPeter Zijlstra /* 28798eddac3fSPeter Zijlstra * There was a chain-cache miss, and we are about to add a new dependency 2880154f185eSYuyang Du * to a previous lock. We validate the following rules: 28818eddac3fSPeter Zijlstra * 28828eddac3fSPeter Zijlstra * - would the adding of the <prev> -> <next> dependency create a 28838eddac3fSPeter Zijlstra * circular dependency in the graph? [== circular deadlock] 28848eddac3fSPeter Zijlstra * 28858eddac3fSPeter Zijlstra * - does the new prev->next dependency connect any hardirq-safe lock 28868eddac3fSPeter Zijlstra * (in the full backwards-subgraph starting at <prev>) with any 28878eddac3fSPeter Zijlstra * hardirq-unsafe lock (in the full forwards-subgraph starting at 28888eddac3fSPeter Zijlstra * <next>)? [== illegal lock inversion with hardirq contexts] 28898eddac3fSPeter Zijlstra * 28908eddac3fSPeter Zijlstra * - does the new prev->next dependency connect any softirq-safe lock 28918eddac3fSPeter Zijlstra * (in the full backwards-subgraph starting at <prev>) with any 28928eddac3fSPeter Zijlstra * softirq-unsafe lock (in the full forwards-subgraph starting at 28938eddac3fSPeter Zijlstra * <next>)? [== illegal lock inversion with softirq contexts] 28948eddac3fSPeter Zijlstra * 28958eddac3fSPeter Zijlstra * any of these scenarios could lead to a deadlock. 28968eddac3fSPeter Zijlstra * 28978eddac3fSPeter Zijlstra * Then if all the validations pass, we add the forwards and backwards 28988eddac3fSPeter Zijlstra * dependency. 28998eddac3fSPeter Zijlstra */ 29008eddac3fSPeter Zijlstra static int 29018eddac3fSPeter Zijlstra check_prev_add(struct task_struct *curr, struct held_lock *prev, 2902bd76eca1SBoqun Feng struct held_lock *next, u16 distance, 290312593b74SBart Van Assche struct lock_trace **const trace) 29048eddac3fSPeter Zijlstra { 29058b405d5cSPeter Zijlstra struct lock_list *entry; 2906b11be024SBoqun Feng enum bfs_result ret; 29078eddac3fSPeter Zijlstra 2908a0b0fd53SBart Van Assche if (!hlock_class(prev)->key || !hlock_class(next)->key) { 2909a0b0fd53SBart Van Assche /* 2910a0b0fd53SBart Van Assche * The warning statements below may trigger a use-after-free 2911a0b0fd53SBart Van Assche * of the class name. It is better to trigger a use-after free 2912a0b0fd53SBart Van Assche * and to have the class name most of the time instead of not 2913a0b0fd53SBart Van Assche * having the class name available. 2914a0b0fd53SBart Van Assche */ 2915a0b0fd53SBart Van Assche WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key, 2916a0b0fd53SBart Van Assche "Detected use-after-free of lock class %px/%s\n", 2917a0b0fd53SBart Van Assche hlock_class(prev), 2918a0b0fd53SBart Van Assche hlock_class(prev)->name); 2919a0b0fd53SBart Van Assche WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key, 2920a0b0fd53SBart Van Assche "Detected use-after-free of lock class %px/%s\n", 2921a0b0fd53SBart Van Assche hlock_class(next), 2922a0b0fd53SBart Van Assche hlock_class(next)->name); 2923a0b0fd53SBart Van Assche return 2; 2924a0b0fd53SBart Van Assche } 2925a0b0fd53SBart Van Assche 29268eddac3fSPeter Zijlstra /* 29278eddac3fSPeter Zijlstra * Prove that the new <prev> -> <next> dependency would not 29288eddac3fSPeter Zijlstra * create a circular dependency in the graph. (We do this by 2929154f185eSYuyang Du * a breadth-first search into the graph starting at <next>, 2930154f185eSYuyang Du * and check whether we can reach <prev>.) 29318eddac3fSPeter Zijlstra * 2932154f185eSYuyang Du * The search is limited by the size of the circular queue (i.e., 2933154f185eSYuyang Du * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes 2934154f185eSYuyang Du * in the graph whose neighbours are to be checked. 29358eddac3fSPeter Zijlstra */ 29368c2c2b44SYuyang Du ret = check_noncircular(next, prev, trace); 2937b11be024SBoqun Feng if (unlikely(bfs_error(ret) || ret == BFS_RMATCH)) 2938f7c1c6b3SYuyang Du return 0; 29398eddac3fSPeter Zijlstra 2940948f8376SFrederic Weisbecker if (!check_irq_usage(curr, prev, next)) 29418eddac3fSPeter Zijlstra return 0; 29428eddac3fSPeter Zijlstra 29438eddac3fSPeter Zijlstra /* 29448eddac3fSPeter Zijlstra * Is the <prev> -> <next> dependency already present? 29458eddac3fSPeter Zijlstra * 29468eddac3fSPeter Zijlstra * (this may occur even though this is a new chain: consider 29478eddac3fSPeter Zijlstra * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3 29488eddac3fSPeter Zijlstra * chains - the second one will be new, but L1 already has 29498eddac3fSPeter Zijlstra * L2 added to its dependency list, due to the first chain.) 29508eddac3fSPeter Zijlstra */ 29518eddac3fSPeter Zijlstra list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) { 29528eddac3fSPeter Zijlstra if (entry->class == hlock_class(next)) { 29538eddac3fSPeter Zijlstra if (distance == 1) 29548eddac3fSPeter Zijlstra entry->distance = 1; 29553454a36dSBoqun Feng entry->dep |= calc_dep(prev, next); 29563454a36dSBoqun Feng 29573454a36dSBoqun Feng /* 29583454a36dSBoqun Feng * Also, update the reverse dependency in @next's 29593454a36dSBoqun Feng * ->locks_before list. 29603454a36dSBoqun Feng * 29613454a36dSBoqun Feng * Here we reuse @entry as the cursor, which is fine 29623454a36dSBoqun Feng * because we won't go to the next iteration of the 29633454a36dSBoqun Feng * outer loop: 29643454a36dSBoqun Feng * 29653454a36dSBoqun Feng * For normal cases, we return in the inner loop. 29663454a36dSBoqun Feng * 29673454a36dSBoqun Feng * If we fail to return, we have inconsistency, i.e. 29683454a36dSBoqun Feng * <prev>::locks_after contains <next> while 29693454a36dSBoqun Feng * <next>::locks_before doesn't contain <prev>. In 29703454a36dSBoqun Feng * that case, we return after the inner and indicate 29713454a36dSBoqun Feng * something is wrong. 29723454a36dSBoqun Feng */ 29733454a36dSBoqun Feng list_for_each_entry(entry, &hlock_class(next)->locks_before, entry) { 29743454a36dSBoqun Feng if (entry->class == hlock_class(prev)) { 29753454a36dSBoqun Feng if (distance == 1) 29763454a36dSBoqun Feng entry->distance = 1; 29773454a36dSBoqun Feng entry->dep |= calc_depb(prev, next); 297870911fdcSByungchul Park return 1; 29798eddac3fSPeter Zijlstra } 29808eddac3fSPeter Zijlstra } 29818eddac3fSPeter Zijlstra 29823454a36dSBoqun Feng /* <prev> is not found in <next>::locks_before */ 29833454a36dSBoqun Feng return 0; 29843454a36dSBoqun Feng } 29853454a36dSBoqun Feng } 29863454a36dSBoqun Feng 2987ae813308SPeter Zijlstra /* 2988ae813308SPeter Zijlstra * Is the <prev> -> <next> link redundant? 2989ae813308SPeter Zijlstra */ 29908c2c2b44SYuyang Du ret = check_redundant(prev, next); 2991b11be024SBoqun Feng if (bfs_error(ret)) 2992b11be024SBoqun Feng return 0; 2993b11be024SBoqun Feng else if (ret == BFS_RMATCH) 2994b11be024SBoqun Feng return 2; 2995ae813308SPeter Zijlstra 299612593b74SBart Van Assche if (!*trace) { 299712593b74SBart Van Assche *trace = save_trace(); 299812593b74SBart Van Assche if (!*trace) 29998eddac3fSPeter Zijlstra return 0; 300012593b74SBart Van Assche } 30018eddac3fSPeter Zijlstra 30028eddac3fSPeter Zijlstra /* 30038eddac3fSPeter Zijlstra * Ok, all validations passed, add the new lock 30048eddac3fSPeter Zijlstra * to the previous lock's dependency list: 30058eddac3fSPeter Zijlstra */ 300686cffb80SBart Van Assche ret = add_lock_to_list(hlock_class(next), hlock_class(prev), 30078eddac3fSPeter Zijlstra &hlock_class(prev)->locks_after, 30083454a36dSBoqun Feng next->acquire_ip, distance, 30093454a36dSBoqun Feng calc_dep(prev, next), 30103454a36dSBoqun Feng *trace); 30118eddac3fSPeter Zijlstra 30128eddac3fSPeter Zijlstra if (!ret) 30138eddac3fSPeter Zijlstra return 0; 30148eddac3fSPeter Zijlstra 301586cffb80SBart Van Assche ret = add_lock_to_list(hlock_class(prev), hlock_class(next), 30168eddac3fSPeter Zijlstra &hlock_class(next)->locks_before, 30173454a36dSBoqun Feng next->acquire_ip, distance, 30183454a36dSBoqun Feng calc_depb(prev, next), 30193454a36dSBoqun Feng *trace); 30208eddac3fSPeter Zijlstra if (!ret) 30218eddac3fSPeter Zijlstra return 0; 30228eddac3fSPeter Zijlstra 302370911fdcSByungchul Park return 2; 30248eddac3fSPeter Zijlstra } 30258eddac3fSPeter Zijlstra 30268eddac3fSPeter Zijlstra /* 30278eddac3fSPeter Zijlstra * Add the dependency to all directly-previous locks that are 'relevant'. 30288eddac3fSPeter Zijlstra * The ones that are relevant are (in increasing distance from curr): 30298eddac3fSPeter Zijlstra * all consecutive trylock entries and the final non-trylock entry - or 30308eddac3fSPeter Zijlstra * the end of this context's lock-chain - whichever comes first. 30318eddac3fSPeter Zijlstra */ 30328eddac3fSPeter Zijlstra static int 30338eddac3fSPeter Zijlstra check_prevs_add(struct task_struct *curr, struct held_lock *next) 30348eddac3fSPeter Zijlstra { 303512593b74SBart Van Assche struct lock_trace *trace = NULL; 30368eddac3fSPeter Zijlstra int depth = curr->lockdep_depth; 30378eddac3fSPeter Zijlstra struct held_lock *hlock; 30388eddac3fSPeter Zijlstra 30398eddac3fSPeter Zijlstra /* 30408eddac3fSPeter Zijlstra * Debugging checks. 30418eddac3fSPeter Zijlstra * 30428eddac3fSPeter Zijlstra * Depth must not be zero for a non-head lock: 30438eddac3fSPeter Zijlstra */ 30448eddac3fSPeter Zijlstra if (!depth) 30458eddac3fSPeter Zijlstra goto out_bug; 30468eddac3fSPeter Zijlstra /* 30478eddac3fSPeter Zijlstra * At least two relevant locks must exist for this 30488eddac3fSPeter Zijlstra * to be a head: 30498eddac3fSPeter Zijlstra */ 30508eddac3fSPeter Zijlstra if (curr->held_locks[depth].irq_context != 30518eddac3fSPeter Zijlstra curr->held_locks[depth-1].irq_context) 30528eddac3fSPeter Zijlstra goto out_bug; 30538eddac3fSPeter Zijlstra 30548eddac3fSPeter Zijlstra for (;;) { 3055bd76eca1SBoqun Feng u16 distance = curr->lockdep_depth - depth + 1; 30568eddac3fSPeter Zijlstra hlock = curr->held_locks + depth - 1; 3057e966eaeeSIngo Molnar 3058621c9dacSBoqun Feng if (hlock->check) { 3059621c9dacSBoqun Feng int ret = check_prev_add(curr, hlock, next, distance, &trace); 3060ce07a941SByungchul Park if (!ret) 30618eddac3fSPeter Zijlstra return 0; 3062ce07a941SByungchul Park 3063ce07a941SByungchul Park /* 30648eddac3fSPeter Zijlstra * Stop after the first non-trylock entry, 30658eddac3fSPeter Zijlstra * as non-trylock entries have added their 30668eddac3fSPeter Zijlstra * own direct dependencies already, so this 30678eddac3fSPeter Zijlstra * lock is connected to them indirectly: 30688eddac3fSPeter Zijlstra */ 30698eddac3fSPeter Zijlstra if (!hlock->trylock) 30708eddac3fSPeter Zijlstra break; 30718eddac3fSPeter Zijlstra } 3072e966eaeeSIngo Molnar 30738eddac3fSPeter Zijlstra depth--; 30748eddac3fSPeter Zijlstra /* 30758eddac3fSPeter Zijlstra * End of lock-stack? 30768eddac3fSPeter Zijlstra */ 30778eddac3fSPeter Zijlstra if (!depth) 30788eddac3fSPeter Zijlstra break; 30798eddac3fSPeter Zijlstra /* 30808eddac3fSPeter Zijlstra * Stop the search if we cross into another context: 30818eddac3fSPeter Zijlstra */ 30828eddac3fSPeter Zijlstra if (curr->held_locks[depth].irq_context != 30838eddac3fSPeter Zijlstra curr->held_locks[depth-1].irq_context) 30848eddac3fSPeter Zijlstra break; 30858eddac3fSPeter Zijlstra } 30868eddac3fSPeter Zijlstra return 1; 30878eddac3fSPeter Zijlstra out_bug: 30888eddac3fSPeter Zijlstra if (!debug_locks_off_graph_unlock()) 30898eddac3fSPeter Zijlstra return 0; 30908eddac3fSPeter Zijlstra 30918eddac3fSPeter Zijlstra /* 30928eddac3fSPeter Zijlstra * Clearly we all shouldn't be here, but since we made it we 30938eddac3fSPeter Zijlstra * can reliable say we messed up our state. See the above two 30948eddac3fSPeter Zijlstra * gotos for reasons why we could possibly end up here. 30958eddac3fSPeter Zijlstra */ 30968eddac3fSPeter Zijlstra WARN_ON(1); 30978eddac3fSPeter Zijlstra 30988eddac3fSPeter Zijlstra return 0; 30998eddac3fSPeter Zijlstra } 31008eddac3fSPeter Zijlstra 31018eddac3fSPeter Zijlstra struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; 3102de4643a7SBart Van Assche static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS); 31038eddac3fSPeter Zijlstra static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]; 3104797b82ebSWaiman Long unsigned long nr_zapped_lock_chains; 3105810507feSWaiman Long unsigned int nr_free_chain_hlocks; /* Free chain_hlocks in buckets */ 3106810507feSWaiman Long unsigned int nr_lost_chain_hlocks; /* Lost chain_hlocks */ 3107810507feSWaiman Long unsigned int nr_large_chain_blocks; /* size > MAX_CHAIN_BUCKETS */ 3108810507feSWaiman Long 3109810507feSWaiman Long /* 3110810507feSWaiman Long * The first 2 chain_hlocks entries in the chain block in the bucket 3111810507feSWaiman Long * list contains the following meta data: 3112810507feSWaiman Long * 3113810507feSWaiman Long * entry[0]: 3114810507feSWaiman Long * Bit 15 - always set to 1 (it is not a class index) 3115810507feSWaiman Long * Bits 0-14 - upper 15 bits of the next block index 3116810507feSWaiman Long * entry[1] - lower 16 bits of next block index 3117810507feSWaiman Long * 3118810507feSWaiman Long * A next block index of all 1 bits means it is the end of the list. 3119810507feSWaiman Long * 3120810507feSWaiman Long * On the unsized bucket (bucket-0), the 3rd and 4th entries contain 3121810507feSWaiman Long * the chain block size: 3122810507feSWaiman Long * 3123810507feSWaiman Long * entry[2] - upper 16 bits of the chain block size 3124810507feSWaiman Long * entry[3] - lower 16 bits of the chain block size 3125810507feSWaiman Long */ 3126810507feSWaiman Long #define MAX_CHAIN_BUCKETS 16 3127810507feSWaiman Long #define CHAIN_BLK_FLAG (1U << 15) 3128810507feSWaiman Long #define CHAIN_BLK_LIST_END 0xFFFFU 3129810507feSWaiman Long 3130810507feSWaiman Long static int chain_block_buckets[MAX_CHAIN_BUCKETS]; 3131810507feSWaiman Long 3132810507feSWaiman Long static inline int size_to_bucket(int size) 3133810507feSWaiman Long { 3134810507feSWaiman Long if (size > MAX_CHAIN_BUCKETS) 3135810507feSWaiman Long return 0; 3136810507feSWaiman Long 3137810507feSWaiman Long return size - 1; 3138810507feSWaiman Long } 3139810507feSWaiman Long 3140810507feSWaiman Long /* 3141810507feSWaiman Long * Iterate all the chain blocks in a bucket. 3142810507feSWaiman Long */ 3143810507feSWaiman Long #define for_each_chain_block(bucket, prev, curr) \ 3144810507feSWaiman Long for ((prev) = -1, (curr) = chain_block_buckets[bucket]; \ 3145810507feSWaiman Long (curr) >= 0; \ 3146810507feSWaiman Long (prev) = (curr), (curr) = chain_block_next(curr)) 3147810507feSWaiman Long 3148810507feSWaiman Long /* 3149810507feSWaiman Long * next block or -1 3150810507feSWaiman Long */ 3151810507feSWaiman Long static inline int chain_block_next(int offset) 3152810507feSWaiman Long { 3153810507feSWaiman Long int next = chain_hlocks[offset]; 3154810507feSWaiman Long 3155810507feSWaiman Long WARN_ON_ONCE(!(next & CHAIN_BLK_FLAG)); 3156810507feSWaiman Long 3157810507feSWaiman Long if (next == CHAIN_BLK_LIST_END) 3158810507feSWaiman Long return -1; 3159810507feSWaiman Long 3160810507feSWaiman Long next &= ~CHAIN_BLK_FLAG; 3161810507feSWaiman Long next <<= 16; 3162810507feSWaiman Long next |= chain_hlocks[offset + 1]; 3163810507feSWaiman Long 3164810507feSWaiman Long return next; 3165810507feSWaiman Long } 3166810507feSWaiman Long 3167810507feSWaiman Long /* 3168810507feSWaiman Long * bucket-0 only 3169810507feSWaiman Long */ 3170810507feSWaiman Long static inline int chain_block_size(int offset) 3171810507feSWaiman Long { 3172810507feSWaiman Long return (chain_hlocks[offset + 2] << 16) | chain_hlocks[offset + 3]; 3173810507feSWaiman Long } 3174810507feSWaiman Long 3175810507feSWaiman Long static inline void init_chain_block(int offset, int next, int bucket, int size) 3176810507feSWaiman Long { 3177810507feSWaiman Long chain_hlocks[offset] = (next >> 16) | CHAIN_BLK_FLAG; 3178810507feSWaiman Long chain_hlocks[offset + 1] = (u16)next; 3179810507feSWaiman Long 3180810507feSWaiman Long if (size && !bucket) { 3181810507feSWaiman Long chain_hlocks[offset + 2] = size >> 16; 3182810507feSWaiman Long chain_hlocks[offset + 3] = (u16)size; 3183810507feSWaiman Long } 3184810507feSWaiman Long } 3185810507feSWaiman Long 3186810507feSWaiman Long static inline void add_chain_block(int offset, int size) 3187810507feSWaiman Long { 3188810507feSWaiman Long int bucket = size_to_bucket(size); 3189810507feSWaiman Long int next = chain_block_buckets[bucket]; 3190810507feSWaiman Long int prev, curr; 3191810507feSWaiman Long 3192810507feSWaiman Long if (unlikely(size < 2)) { 3193810507feSWaiman Long /* 3194810507feSWaiman Long * We can't store single entries on the freelist. Leak them. 3195810507feSWaiman Long * 3196810507feSWaiman Long * One possible way out would be to uniquely mark them, other 3197810507feSWaiman Long * than with CHAIN_BLK_FLAG, such that we can recover them when 3198810507feSWaiman Long * the block before it is re-added. 3199810507feSWaiman Long */ 3200810507feSWaiman Long if (size) 3201810507feSWaiman Long nr_lost_chain_hlocks++; 3202810507feSWaiman Long return; 3203810507feSWaiman Long } 3204810507feSWaiman Long 3205810507feSWaiman Long nr_free_chain_hlocks += size; 3206810507feSWaiman Long if (!bucket) { 3207810507feSWaiman Long nr_large_chain_blocks++; 3208810507feSWaiman Long 3209810507feSWaiman Long /* 3210810507feSWaiman Long * Variable sized, sort large to small. 3211810507feSWaiman Long */ 3212810507feSWaiman Long for_each_chain_block(0, prev, curr) { 3213810507feSWaiman Long if (size >= chain_block_size(curr)) 3214810507feSWaiman Long break; 3215810507feSWaiman Long } 3216810507feSWaiman Long init_chain_block(offset, curr, 0, size); 3217810507feSWaiman Long if (prev < 0) 3218810507feSWaiman Long chain_block_buckets[0] = offset; 3219810507feSWaiman Long else 3220810507feSWaiman Long init_chain_block(prev, offset, 0, 0); 3221810507feSWaiman Long return; 3222810507feSWaiman Long } 3223810507feSWaiman Long /* 3224810507feSWaiman Long * Fixed size, add to head. 3225810507feSWaiman Long */ 3226810507feSWaiman Long init_chain_block(offset, next, bucket, size); 3227810507feSWaiman Long chain_block_buckets[bucket] = offset; 3228810507feSWaiman Long } 3229810507feSWaiman Long 3230810507feSWaiman Long /* 3231810507feSWaiman Long * Only the first block in the list can be deleted. 3232810507feSWaiman Long * 3233810507feSWaiman Long * For the variable size bucket[0], the first block (the largest one) is 3234810507feSWaiman Long * returned, broken up and put back into the pool. So if a chain block of 3235810507feSWaiman Long * length > MAX_CHAIN_BUCKETS is ever used and zapped, it will just be 3236810507feSWaiman Long * queued up after the primordial chain block and never be used until the 3237810507feSWaiman Long * hlock entries in the primordial chain block is almost used up. That 3238810507feSWaiman Long * causes fragmentation and reduce allocation efficiency. That can be 3239810507feSWaiman Long * monitored by looking at the "large chain blocks" number in lockdep_stats. 3240810507feSWaiman Long */ 3241810507feSWaiman Long static inline void del_chain_block(int bucket, int size, int next) 3242810507feSWaiman Long { 3243810507feSWaiman Long nr_free_chain_hlocks -= size; 3244810507feSWaiman Long chain_block_buckets[bucket] = next; 3245810507feSWaiman Long 3246810507feSWaiman Long if (!bucket) 3247810507feSWaiman Long nr_large_chain_blocks--; 3248810507feSWaiman Long } 3249810507feSWaiman Long 3250810507feSWaiman Long static void init_chain_block_buckets(void) 3251810507feSWaiman Long { 3252810507feSWaiman Long int i; 3253810507feSWaiman Long 3254810507feSWaiman Long for (i = 0; i < MAX_CHAIN_BUCKETS; i++) 3255810507feSWaiman Long chain_block_buckets[i] = -1; 3256810507feSWaiman Long 3257810507feSWaiman Long add_chain_block(0, ARRAY_SIZE(chain_hlocks)); 3258810507feSWaiman Long } 3259810507feSWaiman Long 3260810507feSWaiman Long /* 3261810507feSWaiman Long * Return offset of a chain block of the right size or -1 if not found. 3262810507feSWaiman Long * 3263810507feSWaiman Long * Fairly simple worst-fit allocator with the addition of a number of size 3264810507feSWaiman Long * specific free lists. 3265810507feSWaiman Long */ 3266810507feSWaiman Long static int alloc_chain_hlocks(int req) 3267810507feSWaiman Long { 3268810507feSWaiman Long int bucket, curr, size; 3269810507feSWaiman Long 3270810507feSWaiman Long /* 3271810507feSWaiman Long * We rely on the MSB to act as an escape bit to denote freelist 3272810507feSWaiman Long * pointers. Make sure this bit isn't set in 'normal' class_idx usage. 3273810507feSWaiman Long */ 3274810507feSWaiman Long BUILD_BUG_ON((MAX_LOCKDEP_KEYS-1) & CHAIN_BLK_FLAG); 3275810507feSWaiman Long 3276810507feSWaiman Long init_data_structures_once(); 3277810507feSWaiman Long 3278810507feSWaiman Long if (nr_free_chain_hlocks < req) 3279810507feSWaiman Long return -1; 3280810507feSWaiman Long 3281810507feSWaiman Long /* 3282810507feSWaiman Long * We require a minimum of 2 (u16) entries to encode a freelist 3283810507feSWaiman Long * 'pointer'. 3284810507feSWaiman Long */ 3285810507feSWaiman Long req = max(req, 2); 3286810507feSWaiman Long bucket = size_to_bucket(req); 3287810507feSWaiman Long curr = chain_block_buckets[bucket]; 3288810507feSWaiman Long 3289810507feSWaiman Long if (bucket) { 3290810507feSWaiman Long if (curr >= 0) { 3291810507feSWaiman Long del_chain_block(bucket, req, chain_block_next(curr)); 3292810507feSWaiman Long return curr; 3293810507feSWaiman Long } 3294810507feSWaiman Long /* Try bucket 0 */ 3295810507feSWaiman Long curr = chain_block_buckets[0]; 3296810507feSWaiman Long } 3297810507feSWaiman Long 3298810507feSWaiman Long /* 3299810507feSWaiman Long * The variable sized freelist is sorted by size; the first entry is 3300810507feSWaiman Long * the largest. Use it if it fits. 3301810507feSWaiman Long */ 3302810507feSWaiman Long if (curr >= 0) { 3303810507feSWaiman Long size = chain_block_size(curr); 3304810507feSWaiman Long if (likely(size >= req)) { 3305810507feSWaiman Long del_chain_block(0, size, chain_block_next(curr)); 3306810507feSWaiman Long add_chain_block(curr + req, size - req); 3307810507feSWaiman Long return curr; 3308810507feSWaiman Long } 3309810507feSWaiman Long } 3310810507feSWaiman Long 3311810507feSWaiman Long /* 3312810507feSWaiman Long * Last resort, split a block in a larger sized bucket. 3313810507feSWaiman Long */ 3314810507feSWaiman Long for (size = MAX_CHAIN_BUCKETS; size > req; size--) { 3315810507feSWaiman Long bucket = size_to_bucket(size); 3316810507feSWaiman Long curr = chain_block_buckets[bucket]; 3317810507feSWaiman Long if (curr < 0) 3318810507feSWaiman Long continue; 3319810507feSWaiman Long 3320810507feSWaiman Long del_chain_block(bucket, size, chain_block_next(curr)); 3321810507feSWaiman Long add_chain_block(curr + req, size - req); 3322810507feSWaiman Long return curr; 3323810507feSWaiman Long } 3324810507feSWaiman Long 3325810507feSWaiman Long return -1; 3326810507feSWaiman Long } 3327810507feSWaiman Long 3328810507feSWaiman Long static inline void free_chain_hlocks(int base, int size) 3329810507feSWaiman Long { 3330810507feSWaiman Long add_chain_block(base, max(size, 2)); 3331810507feSWaiman Long } 33328eddac3fSPeter Zijlstra 33338eddac3fSPeter Zijlstra struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i) 33348eddac3fSPeter Zijlstra { 3335f611e8cfSBoqun Feng u16 chain_hlock = chain_hlocks[chain->base + i]; 3336f611e8cfSBoqun Feng unsigned int class_idx = chain_hlock_class_idx(chain_hlock); 3337f611e8cfSBoqun Feng 3338f611e8cfSBoqun Feng return lock_classes + class_idx - 1; 33398eddac3fSPeter Zijlstra } 33408eddac3fSPeter Zijlstra 33418eddac3fSPeter Zijlstra /* 33429e4e7554SIngo Molnar * Returns the index of the first held_lock of the current chain 33439e4e7554SIngo Molnar */ 33449e4e7554SIngo Molnar static inline int get_first_held_lock(struct task_struct *curr, 33459e4e7554SIngo Molnar struct held_lock *hlock) 33469e4e7554SIngo Molnar { 33479e4e7554SIngo Molnar int i; 33489e4e7554SIngo Molnar struct held_lock *hlock_curr; 33499e4e7554SIngo Molnar 33509e4e7554SIngo Molnar for (i = curr->lockdep_depth - 1; i >= 0; i--) { 33519e4e7554SIngo Molnar hlock_curr = curr->held_locks + i; 33529e4e7554SIngo Molnar if (hlock_curr->irq_context != hlock->irq_context) 33539e4e7554SIngo Molnar break; 33549e4e7554SIngo Molnar 33559e4e7554SIngo Molnar } 33569e4e7554SIngo Molnar 33579e4e7554SIngo Molnar return ++i; 33589e4e7554SIngo Molnar } 33599e4e7554SIngo Molnar 33605c8a010cSBorislav Petkov #ifdef CONFIG_DEBUG_LOCKDEP 33619e4e7554SIngo Molnar /* 336239e2e173SAlfredo Alvarez Fernandez * Returns the next chain_key iteration 336339e2e173SAlfredo Alvarez Fernandez */ 3364f611e8cfSBoqun Feng static u64 print_chain_key_iteration(u16 hlock_id, u64 chain_key) 336539e2e173SAlfredo Alvarez Fernandez { 3366f611e8cfSBoqun Feng u64 new_chain_key = iterate_chain_key(chain_key, hlock_id); 336739e2e173SAlfredo Alvarez Fernandez 3368f611e8cfSBoqun Feng printk(" hlock_id:%d -> chain_key:%016Lx", 3369f611e8cfSBoqun Feng (unsigned int)hlock_id, 337039e2e173SAlfredo Alvarez Fernandez (unsigned long long)new_chain_key); 337139e2e173SAlfredo Alvarez Fernandez return new_chain_key; 337239e2e173SAlfredo Alvarez Fernandez } 337339e2e173SAlfredo Alvarez Fernandez 337439e2e173SAlfredo Alvarez Fernandez static void 337539e2e173SAlfredo Alvarez Fernandez print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next) 337639e2e173SAlfredo Alvarez Fernandez { 337739e2e173SAlfredo Alvarez Fernandez struct held_lock *hlock; 3378f6ec8829SYuyang Du u64 chain_key = INITIAL_CHAIN_KEY; 337939e2e173SAlfredo Alvarez Fernandez int depth = curr->lockdep_depth; 3380834494b2SYuyang Du int i = get_first_held_lock(curr, hlock_next); 338139e2e173SAlfredo Alvarez Fernandez 3382834494b2SYuyang Du printk("depth: %u (irq_context %u)\n", depth - i + 1, 3383834494b2SYuyang Du hlock_next->irq_context); 3384834494b2SYuyang Du for (; i < depth; i++) { 338539e2e173SAlfredo Alvarez Fernandez hlock = curr->held_locks + i; 3386f611e8cfSBoqun Feng chain_key = print_chain_key_iteration(hlock_id(hlock), chain_key); 338739e2e173SAlfredo Alvarez Fernandez 338839e2e173SAlfredo Alvarez Fernandez print_lock(hlock); 338939e2e173SAlfredo Alvarez Fernandez } 339039e2e173SAlfredo Alvarez Fernandez 3391f611e8cfSBoqun Feng print_chain_key_iteration(hlock_id(hlock_next), chain_key); 339239e2e173SAlfredo Alvarez Fernandez print_lock(hlock_next); 339339e2e173SAlfredo Alvarez Fernandez } 339439e2e173SAlfredo Alvarez Fernandez 339539e2e173SAlfredo Alvarez Fernandez static void print_chain_keys_chain(struct lock_chain *chain) 339639e2e173SAlfredo Alvarez Fernandez { 339739e2e173SAlfredo Alvarez Fernandez int i; 3398f6ec8829SYuyang Du u64 chain_key = INITIAL_CHAIN_KEY; 3399f611e8cfSBoqun Feng u16 hlock_id; 340039e2e173SAlfredo Alvarez Fernandez 340139e2e173SAlfredo Alvarez Fernandez printk("depth: %u\n", chain->depth); 340239e2e173SAlfredo Alvarez Fernandez for (i = 0; i < chain->depth; i++) { 3403f611e8cfSBoqun Feng hlock_id = chain_hlocks[chain->base + i]; 3404f611e8cfSBoqun Feng chain_key = print_chain_key_iteration(hlock_id, chain_key); 340539e2e173SAlfredo Alvarez Fernandez 3406f611e8cfSBoqun Feng print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id) - 1); 340739e2e173SAlfredo Alvarez Fernandez printk("\n"); 340839e2e173SAlfredo Alvarez Fernandez } 340939e2e173SAlfredo Alvarez Fernandez } 341039e2e173SAlfredo Alvarez Fernandez 341139e2e173SAlfredo Alvarez Fernandez static void print_collision(struct task_struct *curr, 341239e2e173SAlfredo Alvarez Fernandez struct held_lock *hlock_next, 341339e2e173SAlfredo Alvarez Fernandez struct lock_chain *chain) 341439e2e173SAlfredo Alvarez Fernandez { 3415681fbec8SPaul E. McKenney pr_warn("\n"); 3416a5dd63efSPaul E. McKenney pr_warn("============================\n"); 3417a5dd63efSPaul E. McKenney pr_warn("WARNING: chain_key collision\n"); 341839e2e173SAlfredo Alvarez Fernandez print_kernel_ident(); 3419a5dd63efSPaul E. McKenney pr_warn("----------------------------\n"); 3420681fbec8SPaul E. McKenney pr_warn("%s/%d: ", current->comm, task_pid_nr(current)); 3421681fbec8SPaul E. McKenney pr_warn("Hash chain already cached but the contents don't match!\n"); 342239e2e173SAlfredo Alvarez Fernandez 3423681fbec8SPaul E. McKenney pr_warn("Held locks:"); 342439e2e173SAlfredo Alvarez Fernandez print_chain_keys_held_locks(curr, hlock_next); 342539e2e173SAlfredo Alvarez Fernandez 3426681fbec8SPaul E. McKenney pr_warn("Locks in cached chain:"); 342739e2e173SAlfredo Alvarez Fernandez print_chain_keys_chain(chain); 342839e2e173SAlfredo Alvarez Fernandez 3429681fbec8SPaul E. McKenney pr_warn("\nstack backtrace:\n"); 343039e2e173SAlfredo Alvarez Fernandez dump_stack(); 343139e2e173SAlfredo Alvarez Fernandez } 34325c8a010cSBorislav Petkov #endif 343339e2e173SAlfredo Alvarez Fernandez 343439e2e173SAlfredo Alvarez Fernandez /* 34359e4e7554SIngo Molnar * Checks whether the chain and the current held locks are consistent 34369e4e7554SIngo Molnar * in depth and also in content. If they are not it most likely means 34379e4e7554SIngo Molnar * that there was a collision during the calculation of the chain_key. 34389e4e7554SIngo Molnar * Returns: 0 not passed, 1 passed 34399e4e7554SIngo Molnar */ 34409e4e7554SIngo Molnar static int check_no_collision(struct task_struct *curr, 34419e4e7554SIngo Molnar struct held_lock *hlock, 34429e4e7554SIngo Molnar struct lock_chain *chain) 34439e4e7554SIngo Molnar { 34449e4e7554SIngo Molnar #ifdef CONFIG_DEBUG_LOCKDEP 34459e4e7554SIngo Molnar int i, j, id; 34469e4e7554SIngo Molnar 34479e4e7554SIngo Molnar i = get_first_held_lock(curr, hlock); 34489e4e7554SIngo Molnar 344939e2e173SAlfredo Alvarez Fernandez if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) { 345039e2e173SAlfredo Alvarez Fernandez print_collision(curr, hlock, chain); 34519e4e7554SIngo Molnar return 0; 345239e2e173SAlfredo Alvarez Fernandez } 34539e4e7554SIngo Molnar 34549e4e7554SIngo Molnar for (j = 0; j < chain->depth - 1; j++, i++) { 3455f611e8cfSBoqun Feng id = hlock_id(&curr->held_locks[i]); 34569e4e7554SIngo Molnar 345739e2e173SAlfredo Alvarez Fernandez if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) { 345839e2e173SAlfredo Alvarez Fernandez print_collision(curr, hlock, chain); 34599e4e7554SIngo Molnar return 0; 34609e4e7554SIngo Molnar } 346139e2e173SAlfredo Alvarez Fernandez } 34629e4e7554SIngo Molnar #endif 34639e4e7554SIngo Molnar return 1; 34649e4e7554SIngo Molnar } 34659e4e7554SIngo Molnar 34669e4e7554SIngo Molnar /* 34672212684aSBart Van Assche * Given an index that is >= -1, return the index of the next lock chain. 34682212684aSBart Van Assche * Return -2 if there is no next lock chain. 34692212684aSBart Van Assche */ 34702212684aSBart Van Assche long lockdep_next_lockchain(long i) 34712212684aSBart Van Assche { 3472de4643a7SBart Van Assche i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1); 3473de4643a7SBart Van Assche return i < ARRAY_SIZE(lock_chains) ? i : -2; 34742212684aSBart Van Assche } 34752212684aSBart Van Assche 34762212684aSBart Van Assche unsigned long lock_chain_count(void) 34772212684aSBart Van Assche { 3478de4643a7SBart Van Assche return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains)); 3479de4643a7SBart Van Assche } 3480de4643a7SBart Van Assche 3481de4643a7SBart Van Assche /* Must be called with the graph lock held. */ 3482de4643a7SBart Van Assche static struct lock_chain *alloc_lock_chain(void) 3483de4643a7SBart Van Assche { 3484de4643a7SBart Van Assche int idx = find_first_zero_bit(lock_chains_in_use, 3485de4643a7SBart Van Assche ARRAY_SIZE(lock_chains)); 3486de4643a7SBart Van Assche 3487de4643a7SBart Van Assche if (unlikely(idx >= ARRAY_SIZE(lock_chains))) 3488de4643a7SBart Van Assche return NULL; 3489de4643a7SBart Van Assche __set_bit(idx, lock_chains_in_use); 3490de4643a7SBart Van Assche return lock_chains + idx; 34912212684aSBart Van Assche } 34922212684aSBart Van Assche 34932212684aSBart Van Assche /* 3494545c23f2SByungchul Park * Adds a dependency chain into chain hashtable. And must be called with 3495545c23f2SByungchul Park * graph_lock held. 3496545c23f2SByungchul Park * 3497545c23f2SByungchul Park * Return 0 if fail, and graph_lock is released. 3498545c23f2SByungchul Park * Return 1 if succeed, with graph_lock held. 34998eddac3fSPeter Zijlstra */ 3500545c23f2SByungchul Park static inline int add_chain_cache(struct task_struct *curr, 35018eddac3fSPeter Zijlstra struct held_lock *hlock, 35028eddac3fSPeter Zijlstra u64 chain_key) 35038eddac3fSPeter Zijlstra { 3504a63f38ccSAndrew Morton struct hlist_head *hash_head = chainhashentry(chain_key); 35058eddac3fSPeter Zijlstra struct lock_chain *chain; 35068eddac3fSPeter Zijlstra int i, j; 35078eddac3fSPeter Zijlstra 35088eddac3fSPeter Zijlstra /* 3509527af3eaSBart Van Assche * The caller must hold the graph lock, ensure we've got IRQs 35108eddac3fSPeter Zijlstra * disabled to make this an IRQ-safe lock.. for recursion reasons 35118eddac3fSPeter Zijlstra * lockdep won't complain about its own locking errors. 35128eddac3fSPeter Zijlstra */ 3513248efb21SPeter Zijlstra if (lockdep_assert_locked()) 35148eddac3fSPeter Zijlstra return 0; 35159e4e7554SIngo Molnar 3516de4643a7SBart Van Assche chain = alloc_lock_chain(); 3517de4643a7SBart Van Assche if (!chain) { 35188eddac3fSPeter Zijlstra if (!debug_locks_off_graph_unlock()) 35198eddac3fSPeter Zijlstra return 0; 35208eddac3fSPeter Zijlstra 35218eddac3fSPeter Zijlstra print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!"); 35228eddac3fSPeter Zijlstra dump_stack(); 35238eddac3fSPeter Zijlstra return 0; 35248eddac3fSPeter Zijlstra } 35258eddac3fSPeter Zijlstra chain->chain_key = chain_key; 35268eddac3fSPeter Zijlstra chain->irq_context = hlock->irq_context; 35279e4e7554SIngo Molnar i = get_first_held_lock(curr, hlock); 35288eddac3fSPeter Zijlstra chain->depth = curr->lockdep_depth + 1 - i; 352975dd602aSPeter Zijlstra 353075dd602aSPeter Zijlstra BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks)); 353175dd602aSPeter Zijlstra BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks)); 353275dd602aSPeter Zijlstra BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes)); 353375dd602aSPeter Zijlstra 3534810507feSWaiman Long j = alloc_chain_hlocks(chain->depth); 3535810507feSWaiman Long if (j < 0) { 3536f9af456aSByungchul Park if (!debug_locks_off_graph_unlock()) 353775dd602aSPeter Zijlstra return 0; 353875dd602aSPeter Zijlstra 353975dd602aSPeter Zijlstra print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!"); 354075dd602aSPeter Zijlstra dump_stack(); 354175dd602aSPeter Zijlstra return 0; 354275dd602aSPeter Zijlstra } 354375dd602aSPeter Zijlstra 3544810507feSWaiman Long chain->base = j; 3545810507feSWaiman Long for (j = 0; j < chain->depth - 1; j++, i++) { 3546f611e8cfSBoqun Feng int lock_id = hlock_id(curr->held_locks + i); 3547810507feSWaiman Long 3548810507feSWaiman Long chain_hlocks[chain->base + j] = lock_id; 3549810507feSWaiman Long } 3550f611e8cfSBoqun Feng chain_hlocks[chain->base + j] = hlock_id(hlock); 3551a63f38ccSAndrew Morton hlist_add_head_rcu(&chain->entry, hash_head); 35528eddac3fSPeter Zijlstra debug_atomic_inc(chain_lookup_misses); 3553b3b9c187SWaiman Long inc_chains(chain->irq_context); 35548eddac3fSPeter Zijlstra 35558eddac3fSPeter Zijlstra return 1; 35568eddac3fSPeter Zijlstra } 35578eddac3fSPeter Zijlstra 3558545c23f2SByungchul Park /* 3559a0b0fd53SBart Van Assche * Look up a dependency chain. Must be called with either the graph lock or 3560a0b0fd53SBart Van Assche * the RCU read lock held. 3561545c23f2SByungchul Park */ 3562545c23f2SByungchul Park static inline struct lock_chain *lookup_chain_cache(u64 chain_key) 3563545c23f2SByungchul Park { 3564545c23f2SByungchul Park struct hlist_head *hash_head = chainhashentry(chain_key); 3565545c23f2SByungchul Park struct lock_chain *chain; 3566545c23f2SByungchul Park 3567545c23f2SByungchul Park hlist_for_each_entry_rcu(chain, hash_head, entry) { 3568a0b0fd53SBart Van Assche if (READ_ONCE(chain->chain_key) == chain_key) { 3569545c23f2SByungchul Park debug_atomic_inc(chain_lookup_hits); 3570545c23f2SByungchul Park return chain; 3571545c23f2SByungchul Park } 3572545c23f2SByungchul Park } 3573545c23f2SByungchul Park return NULL; 3574545c23f2SByungchul Park } 3575545c23f2SByungchul Park 3576545c23f2SByungchul Park /* 3577545c23f2SByungchul Park * If the key is not present yet in dependency chain cache then 3578545c23f2SByungchul Park * add it and return 1 - in this case the new dependency chain is 3579545c23f2SByungchul Park * validated. If the key is already hashed, return 0. 3580545c23f2SByungchul Park * (On return with 1 graph_lock is held.) 3581545c23f2SByungchul Park */ 3582545c23f2SByungchul Park static inline int lookup_chain_cache_add(struct task_struct *curr, 3583545c23f2SByungchul Park struct held_lock *hlock, 3584545c23f2SByungchul Park u64 chain_key) 3585545c23f2SByungchul Park { 3586545c23f2SByungchul Park struct lock_class *class = hlock_class(hlock); 3587545c23f2SByungchul Park struct lock_chain *chain = lookup_chain_cache(chain_key); 3588545c23f2SByungchul Park 3589545c23f2SByungchul Park if (chain) { 3590545c23f2SByungchul Park cache_hit: 3591545c23f2SByungchul Park if (!check_no_collision(curr, hlock, chain)) 3592545c23f2SByungchul Park return 0; 3593545c23f2SByungchul Park 3594545c23f2SByungchul Park if (very_verbose(class)) { 3595545c23f2SByungchul Park printk("\nhash chain already cached, key: " 359604860d48SBorislav Petkov "%016Lx tail class: [%px] %s\n", 3597545c23f2SByungchul Park (unsigned long long)chain_key, 3598545c23f2SByungchul Park class->key, class->name); 3599545c23f2SByungchul Park } 3600545c23f2SByungchul Park 3601545c23f2SByungchul Park return 0; 3602545c23f2SByungchul Park } 3603545c23f2SByungchul Park 3604545c23f2SByungchul Park if (very_verbose(class)) { 360504860d48SBorislav Petkov printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n", 3606545c23f2SByungchul Park (unsigned long long)chain_key, class->key, class->name); 3607545c23f2SByungchul Park } 3608545c23f2SByungchul Park 3609545c23f2SByungchul Park if (!graph_lock()) 3610545c23f2SByungchul Park return 0; 3611545c23f2SByungchul Park 3612545c23f2SByungchul Park /* 3613545c23f2SByungchul Park * We have to walk the chain again locked - to avoid duplicates: 3614545c23f2SByungchul Park */ 3615545c23f2SByungchul Park chain = lookup_chain_cache(chain_key); 3616545c23f2SByungchul Park if (chain) { 3617545c23f2SByungchul Park graph_unlock(); 3618545c23f2SByungchul Park goto cache_hit; 3619545c23f2SByungchul Park } 3620545c23f2SByungchul Park 3621545c23f2SByungchul Park if (!add_chain_cache(curr, hlock, chain_key)) 3622545c23f2SByungchul Park return 0; 3623545c23f2SByungchul Park 3624545c23f2SByungchul Park return 1; 3625545c23f2SByungchul Park } 3626545c23f2SByungchul Park 36270b9fc8ecSYuyang Du static int validate_chain(struct task_struct *curr, 36280b9fc8ecSYuyang Du struct held_lock *hlock, 36290b9fc8ecSYuyang Du int chain_head, u64 chain_key) 36308eddac3fSPeter Zijlstra { 36318eddac3fSPeter Zijlstra /* 36328eddac3fSPeter Zijlstra * Trylock needs to maintain the stack of held locks, but it 36338eddac3fSPeter Zijlstra * does not add new dependencies, because trylock can be done 36348eddac3fSPeter Zijlstra * in any order. 36358eddac3fSPeter Zijlstra * 36368eddac3fSPeter Zijlstra * We look up the chain_key and do the O(N^2) check and update of 36378eddac3fSPeter Zijlstra * the dependencies only if this is a new dependency chain. 3638545c23f2SByungchul Park * (If lookup_chain_cache_add() return with 1 it acquires 36398eddac3fSPeter Zijlstra * graph_lock for us) 36408eddac3fSPeter Zijlstra */ 3641fb9edbe9SOleg Nesterov if (!hlock->trylock && hlock->check && 3642545c23f2SByungchul Park lookup_chain_cache_add(curr, hlock, chain_key)) { 36438eddac3fSPeter Zijlstra /* 36448eddac3fSPeter Zijlstra * Check whether last held lock: 36458eddac3fSPeter Zijlstra * 36468eddac3fSPeter Zijlstra * - is irq-safe, if this lock is irq-unsafe 36478eddac3fSPeter Zijlstra * - is softirq-safe, if this lock is hardirq-unsafe 36488eddac3fSPeter Zijlstra * 36498eddac3fSPeter Zijlstra * And check whether the new lock's dependency graph 365031a490e5SYuyang Du * could lead back to the previous lock: 36518eddac3fSPeter Zijlstra * 365231a490e5SYuyang Du * - within the current held-lock stack 365331a490e5SYuyang Du * - across our accumulated lock dependency records 365431a490e5SYuyang Du * 365531a490e5SYuyang Du * any of these scenarios could lead to a deadlock. 365631a490e5SYuyang Du */ 365731a490e5SYuyang Du /* 365831a490e5SYuyang Du * The simple case: does the current hold the same lock 365931a490e5SYuyang Du * already? 36608eddac3fSPeter Zijlstra */ 36614609c4f9SYuyang Du int ret = check_deadlock(curr, hlock); 36628eddac3fSPeter Zijlstra 36638eddac3fSPeter Zijlstra if (!ret) 36648eddac3fSPeter Zijlstra return 0; 36658eddac3fSPeter Zijlstra /* 36668eddac3fSPeter Zijlstra * Add dependency only if this lock is not the head 3667d61fc96aSBoqun Feng * of the chain, and if the new lock introduces no more 3668d61fc96aSBoqun Feng * lock dependency (because we already hold a lock with the 3669d61fc96aSBoqun Feng * same lock class) nor deadlock (because the nest_lock 3670d61fc96aSBoqun Feng * serializes nesting locks), see the comments for 3671d61fc96aSBoqun Feng * check_deadlock(). 36728eddac3fSPeter Zijlstra */ 3673545c23f2SByungchul Park if (!chain_head && ret != 2) { 36748eddac3fSPeter Zijlstra if (!check_prevs_add(curr, hlock)) 36758eddac3fSPeter Zijlstra return 0; 3676545c23f2SByungchul Park } 3677545c23f2SByungchul Park 36788eddac3fSPeter Zijlstra graph_unlock(); 3679545c23f2SByungchul Park } else { 3680545c23f2SByungchul Park /* after lookup_chain_cache_add(): */ 36818eddac3fSPeter Zijlstra if (unlikely(!debug_locks)) 36828eddac3fSPeter Zijlstra return 0; 3683545c23f2SByungchul Park } 36848eddac3fSPeter Zijlstra 36858eddac3fSPeter Zijlstra return 1; 36868eddac3fSPeter Zijlstra } 36878eddac3fSPeter Zijlstra #else 36888eddac3fSPeter Zijlstra static inline int validate_chain(struct task_struct *curr, 36890b9fc8ecSYuyang Du struct held_lock *hlock, 36908eddac3fSPeter Zijlstra int chain_head, u64 chain_key) 36918eddac3fSPeter Zijlstra { 36928eddac3fSPeter Zijlstra return 1; 36938eddac3fSPeter Zijlstra } 3694810507feSWaiman Long 3695810507feSWaiman Long static void init_chain_block_buckets(void) { } 3696e7a38f63SYuyang Du #endif /* CONFIG_PROVE_LOCKING */ 36978eddac3fSPeter Zijlstra 36988eddac3fSPeter Zijlstra /* 36998eddac3fSPeter Zijlstra * We are building curr_chain_key incrementally, so double-check 37008eddac3fSPeter Zijlstra * it from scratch, to make sure that it's done correctly: 37018eddac3fSPeter Zijlstra */ 37028eddac3fSPeter Zijlstra static void check_chain_key(struct task_struct *curr) 37038eddac3fSPeter Zijlstra { 37048eddac3fSPeter Zijlstra #ifdef CONFIG_DEBUG_LOCKDEP 37058eddac3fSPeter Zijlstra struct held_lock *hlock, *prev_hlock = NULL; 37065f18ab5cSAlfredo Alvarez Fernandez unsigned int i; 3707f6ec8829SYuyang Du u64 chain_key = INITIAL_CHAIN_KEY; 37088eddac3fSPeter Zijlstra 37098eddac3fSPeter Zijlstra for (i = 0; i < curr->lockdep_depth; i++) { 37108eddac3fSPeter Zijlstra hlock = curr->held_locks + i; 37118eddac3fSPeter Zijlstra if (chain_key != hlock->prev_chain_key) { 37128eddac3fSPeter Zijlstra debug_locks_off(); 37138eddac3fSPeter Zijlstra /* 37148eddac3fSPeter Zijlstra * We got mighty confused, our chain keys don't match 37158eddac3fSPeter Zijlstra * with what we expect, someone trample on our task state? 37168eddac3fSPeter Zijlstra */ 37178eddac3fSPeter Zijlstra WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", 37188eddac3fSPeter Zijlstra curr->lockdep_depth, i, 37198eddac3fSPeter Zijlstra (unsigned long long)chain_key, 37208eddac3fSPeter Zijlstra (unsigned long long)hlock->prev_chain_key); 37218eddac3fSPeter Zijlstra return; 37228eddac3fSPeter Zijlstra } 372301bb6f0aSYuyang Du 37248eddac3fSPeter Zijlstra /* 372501bb6f0aSYuyang Du * hlock->class_idx can't go beyond MAX_LOCKDEP_KEYS, but is 372601bb6f0aSYuyang Du * it registered lock class index? 37278eddac3fSPeter Zijlstra */ 372801bb6f0aSYuyang Du if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use))) 37298eddac3fSPeter Zijlstra return; 37308eddac3fSPeter Zijlstra 37318eddac3fSPeter Zijlstra if (prev_hlock && (prev_hlock->irq_context != 37328eddac3fSPeter Zijlstra hlock->irq_context)) 3733f6ec8829SYuyang Du chain_key = INITIAL_CHAIN_KEY; 3734f611e8cfSBoqun Feng chain_key = iterate_chain_key(chain_key, hlock_id(hlock)); 37358eddac3fSPeter Zijlstra prev_hlock = hlock; 37368eddac3fSPeter Zijlstra } 37378eddac3fSPeter Zijlstra if (chain_key != curr->curr_chain_key) { 37388eddac3fSPeter Zijlstra debug_locks_off(); 37398eddac3fSPeter Zijlstra /* 37408eddac3fSPeter Zijlstra * More smoking hash instead of calculating it, damn see these 37418eddac3fSPeter Zijlstra * numbers float.. I bet that a pink elephant stepped on my memory. 37428eddac3fSPeter Zijlstra */ 37438eddac3fSPeter Zijlstra WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", 37448eddac3fSPeter Zijlstra curr->lockdep_depth, i, 37458eddac3fSPeter Zijlstra (unsigned long long)chain_key, 37468eddac3fSPeter Zijlstra (unsigned long long)curr->curr_chain_key); 37478eddac3fSPeter Zijlstra } 37488eddac3fSPeter Zijlstra #endif 37498eddac3fSPeter Zijlstra } 37508eddac3fSPeter Zijlstra 375130a35f79SArnd Bergmann #ifdef CONFIG_PROVE_LOCKING 37520d2cc3b3SFrederic Weisbecker static int mark_lock(struct task_struct *curr, struct held_lock *this, 37530d2cc3b3SFrederic Weisbecker enum lock_usage_bit new_bit); 37540d2cc3b3SFrederic Weisbecker 3755f7c1c6b3SYuyang Du static void print_usage_bug_scenario(struct held_lock *lock) 37568eddac3fSPeter Zijlstra { 37578eddac3fSPeter Zijlstra struct lock_class *class = hlock_class(lock); 37588eddac3fSPeter Zijlstra 37598eddac3fSPeter Zijlstra printk(" Possible unsafe locking scenario:\n\n"); 37608eddac3fSPeter Zijlstra printk(" CPU0\n"); 37618eddac3fSPeter Zijlstra printk(" ----\n"); 37628eddac3fSPeter Zijlstra printk(" lock("); 37638eddac3fSPeter Zijlstra __print_lock_name(class); 3764f943fe0fSDmitry Vyukov printk(KERN_CONT ");\n"); 37658eddac3fSPeter Zijlstra printk(" <Interrupt>\n"); 37668eddac3fSPeter Zijlstra printk(" lock("); 37678eddac3fSPeter Zijlstra __print_lock_name(class); 3768f943fe0fSDmitry Vyukov printk(KERN_CONT ");\n"); 37698eddac3fSPeter Zijlstra printk("\n *** DEADLOCK ***\n\n"); 37708eddac3fSPeter Zijlstra } 37718eddac3fSPeter Zijlstra 3772f7c1c6b3SYuyang Du static void 37738eddac3fSPeter Zijlstra print_usage_bug(struct task_struct *curr, struct held_lock *this, 37748eddac3fSPeter Zijlstra enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) 37758eddac3fSPeter Zijlstra { 37768eddac3fSPeter Zijlstra if (!debug_locks_off_graph_unlock() || debug_locks_silent) 3777f7c1c6b3SYuyang Du return; 37788eddac3fSPeter Zijlstra 3779681fbec8SPaul E. McKenney pr_warn("\n"); 3780a5dd63efSPaul E. McKenney pr_warn("================================\n"); 3781a5dd63efSPaul E. McKenney pr_warn("WARNING: inconsistent lock state\n"); 37828eddac3fSPeter Zijlstra print_kernel_ident(); 3783a5dd63efSPaul E. McKenney pr_warn("--------------------------------\n"); 37848eddac3fSPeter Zijlstra 3785681fbec8SPaul E. McKenney pr_warn("inconsistent {%s} -> {%s} usage.\n", 37868eddac3fSPeter Zijlstra usage_str[prev_bit], usage_str[new_bit]); 37878eddac3fSPeter Zijlstra 3788681fbec8SPaul E. McKenney pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", 37898eddac3fSPeter Zijlstra curr->comm, task_pid_nr(curr), 3790f9ad4a5fSPeter Zijlstra lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT, 3791ef996916SPeter Zijlstra lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, 3792f9ad4a5fSPeter Zijlstra lockdep_hardirqs_enabled(), 3793ef996916SPeter Zijlstra lockdep_softirqs_enabled(curr)); 37948eddac3fSPeter Zijlstra print_lock(this); 37958eddac3fSPeter Zijlstra 3796681fbec8SPaul E. McKenney pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]); 379712593b74SBart Van Assche print_lock_trace(hlock_class(this)->usage_traces[prev_bit], 1); 37988eddac3fSPeter Zijlstra 37998eddac3fSPeter Zijlstra print_irqtrace_events(curr); 3800681fbec8SPaul E. McKenney pr_warn("\nother info that might help us debug this:\n"); 38018eddac3fSPeter Zijlstra print_usage_bug_scenario(this); 38028eddac3fSPeter Zijlstra 38038eddac3fSPeter Zijlstra lockdep_print_held_locks(curr); 38048eddac3fSPeter Zijlstra 3805681fbec8SPaul E. McKenney pr_warn("\nstack backtrace:\n"); 38068eddac3fSPeter Zijlstra dump_stack(); 38078eddac3fSPeter Zijlstra } 38088eddac3fSPeter Zijlstra 38098eddac3fSPeter Zijlstra /* 38108eddac3fSPeter Zijlstra * Print out an error if an invalid bit is set: 38118eddac3fSPeter Zijlstra */ 38128eddac3fSPeter Zijlstra static inline int 38138eddac3fSPeter Zijlstra valid_state(struct task_struct *curr, struct held_lock *this, 38148eddac3fSPeter Zijlstra enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) 38158eddac3fSPeter Zijlstra { 3816f7c1c6b3SYuyang Du if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) { 3817f7c1c6b3SYuyang Du print_usage_bug(curr, this, bad_bit, new_bit); 3818f7c1c6b3SYuyang Du return 0; 3819f7c1c6b3SYuyang Du } 38208eddac3fSPeter Zijlstra return 1; 38218eddac3fSPeter Zijlstra } 38228eddac3fSPeter Zijlstra 38238eddac3fSPeter Zijlstra 38248eddac3fSPeter Zijlstra /* 38258eddac3fSPeter Zijlstra * print irq inversion bug: 38268eddac3fSPeter Zijlstra */ 3827f7c1c6b3SYuyang Du static void 38288eddac3fSPeter Zijlstra print_irq_inversion_bug(struct task_struct *curr, 38298eddac3fSPeter Zijlstra struct lock_list *root, struct lock_list *other, 38308eddac3fSPeter Zijlstra struct held_lock *this, int forwards, 38318eddac3fSPeter Zijlstra const char *irqclass) 38328eddac3fSPeter Zijlstra { 38338eddac3fSPeter Zijlstra struct lock_list *entry = other; 38348eddac3fSPeter Zijlstra struct lock_list *middle = NULL; 38358eddac3fSPeter Zijlstra int depth; 38368eddac3fSPeter Zijlstra 38378eddac3fSPeter Zijlstra if (!debug_locks_off_graph_unlock() || debug_locks_silent) 3838f7c1c6b3SYuyang Du return; 38398eddac3fSPeter Zijlstra 3840681fbec8SPaul E. McKenney pr_warn("\n"); 3841a5dd63efSPaul E. McKenney pr_warn("========================================================\n"); 3842a5dd63efSPaul E. McKenney pr_warn("WARNING: possible irq lock inversion dependency detected\n"); 38438eddac3fSPeter Zijlstra print_kernel_ident(); 3844a5dd63efSPaul E. McKenney pr_warn("--------------------------------------------------------\n"); 3845681fbec8SPaul E. McKenney pr_warn("%s/%d just changed the state of lock:\n", 38468eddac3fSPeter Zijlstra curr->comm, task_pid_nr(curr)); 38478eddac3fSPeter Zijlstra print_lock(this); 38488eddac3fSPeter Zijlstra if (forwards) 3849681fbec8SPaul E. McKenney pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass); 38508eddac3fSPeter Zijlstra else 3851681fbec8SPaul E. McKenney pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); 38528eddac3fSPeter Zijlstra print_lock_name(other->class); 3853681fbec8SPaul E. McKenney pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n"); 38548eddac3fSPeter Zijlstra 3855681fbec8SPaul E. McKenney pr_warn("\nother info that might help us debug this:\n"); 38568eddac3fSPeter Zijlstra 38578eddac3fSPeter Zijlstra /* Find a middle lock (if one exists) */ 38588eddac3fSPeter Zijlstra depth = get_lock_depth(other); 38598eddac3fSPeter Zijlstra do { 38608eddac3fSPeter Zijlstra if (depth == 0 && (entry != root)) { 3861681fbec8SPaul E. McKenney pr_warn("lockdep:%s bad path found in chain graph\n", __func__); 38628eddac3fSPeter Zijlstra break; 38638eddac3fSPeter Zijlstra } 38648eddac3fSPeter Zijlstra middle = entry; 38658eddac3fSPeter Zijlstra entry = get_lock_parent(entry); 38668eddac3fSPeter Zijlstra depth--; 38678eddac3fSPeter Zijlstra } while (entry && entry != root && (depth >= 0)); 38688eddac3fSPeter Zijlstra if (forwards) 38698eddac3fSPeter Zijlstra print_irq_lock_scenario(root, other, 38708eddac3fSPeter Zijlstra middle ? middle->class : root->class, other->class); 38718eddac3fSPeter Zijlstra else 38728eddac3fSPeter Zijlstra print_irq_lock_scenario(other, root, 38738eddac3fSPeter Zijlstra middle ? middle->class : other->class, root->class); 38748eddac3fSPeter Zijlstra 38758eddac3fSPeter Zijlstra lockdep_print_held_locks(curr); 38768eddac3fSPeter Zijlstra 3877681fbec8SPaul E. McKenney pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); 387812593b74SBart Van Assche root->trace = save_trace(); 387912593b74SBart Van Assche if (!root->trace) 3880f7c1c6b3SYuyang Du return; 38818eddac3fSPeter Zijlstra print_shortest_lock_dependencies(other, root); 38828eddac3fSPeter Zijlstra 3883681fbec8SPaul E. McKenney pr_warn("\nstack backtrace:\n"); 38848eddac3fSPeter Zijlstra dump_stack(); 38858eddac3fSPeter Zijlstra } 38868eddac3fSPeter Zijlstra 38878eddac3fSPeter Zijlstra /* 38888eddac3fSPeter Zijlstra * Prove that in the forwards-direction subgraph starting at <this> 38898eddac3fSPeter Zijlstra * there is no lock matching <mask>: 38908eddac3fSPeter Zijlstra */ 38918eddac3fSPeter Zijlstra static int 38928eddac3fSPeter Zijlstra check_usage_forwards(struct task_struct *curr, struct held_lock *this, 3893f08e3888SBoqun Feng enum lock_usage_bit bit) 38948eddac3fSPeter Zijlstra { 3895b11be024SBoqun Feng enum bfs_result ret; 38968eddac3fSPeter Zijlstra struct lock_list root; 38973f649ab7SKees Cook struct lock_list *target_entry; 3898f08e3888SBoqun Feng enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK; 3899f08e3888SBoqun Feng unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit); 39008eddac3fSPeter Zijlstra 39016971c0f3SBoqun Feng bfs_init_root(&root, this); 3902f08e3888SBoqun Feng ret = find_usage_forwards(&root, usage_mask, &target_entry); 3903b11be024SBoqun Feng if (bfs_error(ret)) { 3904f7c1c6b3SYuyang Du print_bfs_bug(ret); 3905f7c1c6b3SYuyang Du return 0; 3906f7c1c6b3SYuyang Du } 3907b11be024SBoqun Feng if (ret == BFS_RNOMATCH) 3908b11be024SBoqun Feng return 1; 39098eddac3fSPeter Zijlstra 3910f08e3888SBoqun Feng /* Check whether write or read usage is the match */ 3911f08e3888SBoqun Feng if (target_entry->class->usage_mask & lock_flag(bit)) { 3912f7c1c6b3SYuyang Du print_irq_inversion_bug(curr, &root, target_entry, 3913f08e3888SBoqun Feng this, 1, state_name(bit)); 3914f08e3888SBoqun Feng } else { 3915f08e3888SBoqun Feng print_irq_inversion_bug(curr, &root, target_entry, 3916f08e3888SBoqun Feng this, 1, state_name(read_bit)); 3917f08e3888SBoqun Feng } 3918f08e3888SBoqun Feng 3919f7c1c6b3SYuyang Du return 0; 39208eddac3fSPeter Zijlstra } 39218eddac3fSPeter Zijlstra 39228eddac3fSPeter Zijlstra /* 39238eddac3fSPeter Zijlstra * Prove that in the backwards-direction subgraph starting at <this> 39248eddac3fSPeter Zijlstra * there is no lock matching <mask>: 39258eddac3fSPeter Zijlstra */ 39268eddac3fSPeter Zijlstra static int 39278eddac3fSPeter Zijlstra check_usage_backwards(struct task_struct *curr, struct held_lock *this, 3928f08e3888SBoqun Feng enum lock_usage_bit bit) 39298eddac3fSPeter Zijlstra { 3930b11be024SBoqun Feng enum bfs_result ret; 39318eddac3fSPeter Zijlstra struct lock_list root; 39323f649ab7SKees Cook struct lock_list *target_entry; 3933f08e3888SBoqun Feng enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK; 3934f08e3888SBoqun Feng unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit); 39358eddac3fSPeter Zijlstra 39366971c0f3SBoqun Feng bfs_init_rootb(&root, this); 3937f08e3888SBoqun Feng ret = find_usage_backwards(&root, usage_mask, &target_entry); 3938b11be024SBoqun Feng if (bfs_error(ret)) { 3939f7c1c6b3SYuyang Du print_bfs_bug(ret); 3940f7c1c6b3SYuyang Du return 0; 3941f7c1c6b3SYuyang Du } 3942b11be024SBoqun Feng if (ret == BFS_RNOMATCH) 3943b11be024SBoqun Feng return 1; 39448eddac3fSPeter Zijlstra 3945f08e3888SBoqun Feng /* Check whether write or read usage is the match */ 3946f08e3888SBoqun Feng if (target_entry->class->usage_mask & lock_flag(bit)) { 3947f7c1c6b3SYuyang Du print_irq_inversion_bug(curr, &root, target_entry, 3948f08e3888SBoqun Feng this, 0, state_name(bit)); 3949f08e3888SBoqun Feng } else { 3950f08e3888SBoqun Feng print_irq_inversion_bug(curr, &root, target_entry, 3951f08e3888SBoqun Feng this, 0, state_name(read_bit)); 3952f08e3888SBoqun Feng } 3953f08e3888SBoqun Feng 3954f7c1c6b3SYuyang Du return 0; 39558eddac3fSPeter Zijlstra } 39568eddac3fSPeter Zijlstra 39578eddac3fSPeter Zijlstra void print_irqtrace_events(struct task_struct *curr) 39588eddac3fSPeter Zijlstra { 39590584df9cSMarco Elver const struct irqtrace_events *trace = &curr->irqtrace; 39600584df9cSMarco Elver 39610584df9cSMarco Elver printk("irq event stamp: %u\n", trace->irq_events); 396204860d48SBorislav Petkov printk("hardirqs last enabled at (%u): [<%px>] %pS\n", 39630584df9cSMarco Elver trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip, 39640584df9cSMarco Elver (void *)trace->hardirq_enable_ip); 396504860d48SBorislav Petkov printk("hardirqs last disabled at (%u): [<%px>] %pS\n", 39660584df9cSMarco Elver trace->hardirq_disable_event, (void *)trace->hardirq_disable_ip, 39670584df9cSMarco Elver (void *)trace->hardirq_disable_ip); 396804860d48SBorislav Petkov printk("softirqs last enabled at (%u): [<%px>] %pS\n", 39690584df9cSMarco Elver trace->softirq_enable_event, (void *)trace->softirq_enable_ip, 39700584df9cSMarco Elver (void *)trace->softirq_enable_ip); 397104860d48SBorislav Petkov printk("softirqs last disabled at (%u): [<%px>] %pS\n", 39720584df9cSMarco Elver trace->softirq_disable_event, (void *)trace->softirq_disable_ip, 39730584df9cSMarco Elver (void *)trace->softirq_disable_ip); 39748eddac3fSPeter Zijlstra } 39758eddac3fSPeter Zijlstra 39768eddac3fSPeter Zijlstra static int HARDIRQ_verbose(struct lock_class *class) 39778eddac3fSPeter Zijlstra { 39788eddac3fSPeter Zijlstra #if HARDIRQ_VERBOSE 39798eddac3fSPeter Zijlstra return class_filter(class); 39808eddac3fSPeter Zijlstra #endif 39818eddac3fSPeter Zijlstra return 0; 39828eddac3fSPeter Zijlstra } 39838eddac3fSPeter Zijlstra 39848eddac3fSPeter Zijlstra static int SOFTIRQ_verbose(struct lock_class *class) 39858eddac3fSPeter Zijlstra { 39868eddac3fSPeter Zijlstra #if SOFTIRQ_VERBOSE 39878eddac3fSPeter Zijlstra return class_filter(class); 39888eddac3fSPeter Zijlstra #endif 39898eddac3fSPeter Zijlstra return 0; 39908eddac3fSPeter Zijlstra } 39918eddac3fSPeter Zijlstra 39928eddac3fSPeter Zijlstra static int (*state_verbose_f[])(struct lock_class *class) = { 39938eddac3fSPeter Zijlstra #define LOCKDEP_STATE(__STATE) \ 39948eddac3fSPeter Zijlstra __STATE##_verbose, 39958eddac3fSPeter Zijlstra #include "lockdep_states.h" 39968eddac3fSPeter Zijlstra #undef LOCKDEP_STATE 39978eddac3fSPeter Zijlstra }; 39988eddac3fSPeter Zijlstra 39998eddac3fSPeter Zijlstra static inline int state_verbose(enum lock_usage_bit bit, 40008eddac3fSPeter Zijlstra struct lock_class *class) 40018eddac3fSPeter Zijlstra { 4002c902a1e8SFrederic Weisbecker return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class); 40038eddac3fSPeter Zijlstra } 40048eddac3fSPeter Zijlstra 40058eddac3fSPeter Zijlstra typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, 40068eddac3fSPeter Zijlstra enum lock_usage_bit bit, const char *name); 40078eddac3fSPeter Zijlstra 40088eddac3fSPeter Zijlstra static int 40098eddac3fSPeter Zijlstra mark_lock_irq(struct task_struct *curr, struct held_lock *this, 40108eddac3fSPeter Zijlstra enum lock_usage_bit new_bit) 40118eddac3fSPeter Zijlstra { 40128eddac3fSPeter Zijlstra int excl_bit = exclusive_bit(new_bit); 4013bba2a8f1SFrederic Weisbecker int read = new_bit & LOCK_USAGE_READ_MASK; 4014bba2a8f1SFrederic Weisbecker int dir = new_bit & LOCK_USAGE_DIR_MASK; 40158eddac3fSPeter Zijlstra 40168eddac3fSPeter Zijlstra /* 40178eddac3fSPeter Zijlstra * Validate that this particular lock does not have conflicting 40188eddac3fSPeter Zijlstra * usage states. 40198eddac3fSPeter Zijlstra */ 40208eddac3fSPeter Zijlstra if (!valid_state(curr, this, new_bit, excl_bit)) 40218eddac3fSPeter Zijlstra return 0; 40228eddac3fSPeter Zijlstra 40238eddac3fSPeter Zijlstra /* 4024f08e3888SBoqun Feng * Check for read in write conflicts 4025f08e3888SBoqun Feng */ 4026f08e3888SBoqun Feng if (!read && !valid_state(curr, this, new_bit, 4027f08e3888SBoqun Feng excl_bit + LOCK_USAGE_READ_MASK)) 4028f08e3888SBoqun Feng return 0; 4029f08e3888SBoqun Feng 4030f08e3888SBoqun Feng 4031f08e3888SBoqun Feng /* 40328eddac3fSPeter Zijlstra * Validate that the lock dependencies don't have conflicting usage 40338eddac3fSPeter Zijlstra * states. 40348eddac3fSPeter Zijlstra */ 4035f08e3888SBoqun Feng if (dir) { 40368eddac3fSPeter Zijlstra /* 4037f08e3888SBoqun Feng * mark ENABLED has to look backwards -- to ensure no dependee 4038f08e3888SBoqun Feng * has USED_IN state, which, again, would allow recursion deadlocks. 40398eddac3fSPeter Zijlstra */ 4040f08e3888SBoqun Feng if (!check_usage_backwards(curr, this, excl_bit)) 40418eddac3fSPeter Zijlstra return 0; 4042f08e3888SBoqun Feng } else { 4043f08e3888SBoqun Feng /* 4044f08e3888SBoqun Feng * mark USED_IN has to look forwards -- to ensure no dependency 4045f08e3888SBoqun Feng * has ENABLED state, which would allow recursion deadlocks. 4046f08e3888SBoqun Feng */ 4047f08e3888SBoqun Feng if (!check_usage_forwards(curr, this, excl_bit)) 40488eddac3fSPeter Zijlstra return 0; 40498eddac3fSPeter Zijlstra } 40508eddac3fSPeter Zijlstra 40518eddac3fSPeter Zijlstra if (state_verbose(new_bit, hlock_class(this))) 40528eddac3fSPeter Zijlstra return 2; 40538eddac3fSPeter Zijlstra 40548eddac3fSPeter Zijlstra return 1; 40558eddac3fSPeter Zijlstra } 40568eddac3fSPeter Zijlstra 40578eddac3fSPeter Zijlstra /* 40588eddac3fSPeter Zijlstra * Mark all held locks with a usage bit: 40598eddac3fSPeter Zijlstra */ 40608eddac3fSPeter Zijlstra static int 4061436a49aeSFrederic Weisbecker mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit) 40628eddac3fSPeter Zijlstra { 40638eddac3fSPeter Zijlstra struct held_lock *hlock; 40648eddac3fSPeter Zijlstra int i; 40658eddac3fSPeter Zijlstra 40668eddac3fSPeter Zijlstra for (i = 0; i < curr->lockdep_depth; i++) { 4067436a49aeSFrederic Weisbecker enum lock_usage_bit hlock_bit = base_bit; 40688eddac3fSPeter Zijlstra hlock = curr->held_locks + i; 40698eddac3fSPeter Zijlstra 40708eddac3fSPeter Zijlstra if (hlock->read) 4071bba2a8f1SFrederic Weisbecker hlock_bit += LOCK_USAGE_READ_MASK; 40728eddac3fSPeter Zijlstra 4073436a49aeSFrederic Weisbecker BUG_ON(hlock_bit >= LOCK_USAGE_STATES); 40748eddac3fSPeter Zijlstra 407534d0ed5eSOleg Nesterov if (!hlock->check) 40768eddac3fSPeter Zijlstra continue; 40778eddac3fSPeter Zijlstra 4078436a49aeSFrederic Weisbecker if (!mark_lock(curr, hlock, hlock_bit)) 40798eddac3fSPeter Zijlstra return 0; 40808eddac3fSPeter Zijlstra } 40818eddac3fSPeter Zijlstra 40828eddac3fSPeter Zijlstra return 1; 40838eddac3fSPeter Zijlstra } 40848eddac3fSPeter Zijlstra 40858eddac3fSPeter Zijlstra /* 40868eddac3fSPeter Zijlstra * Hardirqs will be enabled: 40878eddac3fSPeter Zijlstra */ 4088c86e9b98SPeter Zijlstra static void __trace_hardirqs_on_caller(void) 40898eddac3fSPeter Zijlstra { 40908eddac3fSPeter Zijlstra struct task_struct *curr = current; 40918eddac3fSPeter Zijlstra 40928eddac3fSPeter Zijlstra /* 40938eddac3fSPeter Zijlstra * We are going to turn hardirqs on, so set the 40948eddac3fSPeter Zijlstra * usage bit for all held locks: 40958eddac3fSPeter Zijlstra */ 4096436a49aeSFrederic Weisbecker if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ)) 40978eddac3fSPeter Zijlstra return; 40988eddac3fSPeter Zijlstra /* 40998eddac3fSPeter Zijlstra * If we have softirqs enabled, then set the usage 41008eddac3fSPeter Zijlstra * bit for all held locks. (disabled hardirqs prevented 41018eddac3fSPeter Zijlstra * this bit from being set before) 41028eddac3fSPeter Zijlstra */ 41038eddac3fSPeter Zijlstra if (curr->softirqs_enabled) 4104c86e9b98SPeter Zijlstra mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ); 41058eddac3fSPeter Zijlstra } 41068eddac3fSPeter Zijlstra 4107c86e9b98SPeter Zijlstra /** 4108c86e9b98SPeter Zijlstra * lockdep_hardirqs_on_prepare - Prepare for enabling interrupts 4109c86e9b98SPeter Zijlstra * @ip: Caller address 4110c86e9b98SPeter Zijlstra * 4111c86e9b98SPeter Zijlstra * Invoked before a possible transition to RCU idle from exit to user or 4112c86e9b98SPeter Zijlstra * guest mode. This ensures that all RCU operations are done before RCU 4113c86e9b98SPeter Zijlstra * stops watching. After the RCU transition lockdep_hardirqs_on() has to be 4114c86e9b98SPeter Zijlstra * invoked to set the final state. 4115c86e9b98SPeter Zijlstra */ 4116c86e9b98SPeter Zijlstra void lockdep_hardirqs_on_prepare(unsigned long ip) 41178eddac3fSPeter Zijlstra { 4118859d069eSPeter Zijlstra if (unlikely(!debug_locks)) 41198eddac3fSPeter Zijlstra return; 41208eddac3fSPeter Zijlstra 4121859d069eSPeter Zijlstra /* 4122859d069eSPeter Zijlstra * NMIs do not (and cannot) track lock dependencies, nothing to do. 4123859d069eSPeter Zijlstra */ 4124859d069eSPeter Zijlstra if (unlikely(in_nmi())) 4125859d069eSPeter Zijlstra return; 4126859d069eSPeter Zijlstra 4127f8e48a3dSPeter Zijlstra if (unlikely(this_cpu_read(lockdep_recursion))) 41288eddac3fSPeter Zijlstra return; 41298eddac3fSPeter Zijlstra 4130f9ad4a5fSPeter Zijlstra if (unlikely(lockdep_hardirqs_enabled())) { 41318eddac3fSPeter Zijlstra /* 41328eddac3fSPeter Zijlstra * Neither irq nor preemption are disabled here 41338eddac3fSPeter Zijlstra * so this is racy by nature but losing one hit 41348eddac3fSPeter Zijlstra * in a stat is not a big deal. 41358eddac3fSPeter Zijlstra */ 41368eddac3fSPeter Zijlstra __debug_atomic_inc(redundant_hardirqs_on); 41378eddac3fSPeter Zijlstra return; 41388eddac3fSPeter Zijlstra } 41398eddac3fSPeter Zijlstra 41408eddac3fSPeter Zijlstra /* 41418eddac3fSPeter Zijlstra * We're enabling irqs and according to our state above irqs weren't 41428eddac3fSPeter Zijlstra * already enabled, yet we find the hardware thinks they are in fact 41438eddac3fSPeter Zijlstra * enabled.. someone messed up their IRQ state tracing. 41448eddac3fSPeter Zijlstra */ 41458eddac3fSPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 41468eddac3fSPeter Zijlstra return; 41478eddac3fSPeter Zijlstra 41488eddac3fSPeter Zijlstra /* 41498eddac3fSPeter Zijlstra * See the fine text that goes along with this variable definition. 41508eddac3fSPeter Zijlstra */ 4151d671002bSzhengbin if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled)) 41528eddac3fSPeter Zijlstra return; 41538eddac3fSPeter Zijlstra 41548eddac3fSPeter Zijlstra /* 41558eddac3fSPeter Zijlstra * Can't allow enabling interrupts while in an interrupt handler, 41568eddac3fSPeter Zijlstra * that's general bad form and such. Recursion, limited stack etc.. 41578eddac3fSPeter Zijlstra */ 4158f9ad4a5fSPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context())) 41598eddac3fSPeter Zijlstra return; 41608eddac3fSPeter Zijlstra 4161c86e9b98SPeter Zijlstra current->hardirq_chain_key = current->curr_chain_key; 4162c86e9b98SPeter Zijlstra 41634d004099SPeter Zijlstra lockdep_recursion_inc(); 4164c86e9b98SPeter Zijlstra __trace_hardirqs_on_caller(); 416510476e63SPeter Zijlstra lockdep_recursion_finish(); 41668eddac3fSPeter Zijlstra } 4167c86e9b98SPeter Zijlstra EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare); 4168c86e9b98SPeter Zijlstra 4169c86e9b98SPeter Zijlstra void noinstr lockdep_hardirqs_on(unsigned long ip) 4170c86e9b98SPeter Zijlstra { 41710584df9cSMarco Elver struct irqtrace_events *trace = ¤t->irqtrace; 4172c86e9b98SPeter Zijlstra 4173859d069eSPeter Zijlstra if (unlikely(!debug_locks)) 4174c86e9b98SPeter Zijlstra return; 4175c86e9b98SPeter Zijlstra 4176859d069eSPeter Zijlstra /* 4177859d069eSPeter Zijlstra * NMIs can happen in the middle of local_irq_{en,dis}able() where the 4178859d069eSPeter Zijlstra * tracking state and hardware state are out of sync. 4179859d069eSPeter Zijlstra * 4180859d069eSPeter Zijlstra * NMIs must save lockdep_hardirqs_enabled() to restore IRQ state from, 4181859d069eSPeter Zijlstra * and not rely on hardware state like normal interrupts. 4182859d069eSPeter Zijlstra */ 4183859d069eSPeter Zijlstra if (unlikely(in_nmi())) { 4184ed004953Speterz@infradead.org if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI)) 4185ed004953Speterz@infradead.org return; 4186ed004953Speterz@infradead.org 4187859d069eSPeter Zijlstra /* 4188859d069eSPeter Zijlstra * Skip: 4189859d069eSPeter Zijlstra * - recursion check, because NMI can hit lockdep; 4190859d069eSPeter Zijlstra * - hardware state check, because above; 4191859d069eSPeter Zijlstra * - chain_key check, see lockdep_hardirqs_on_prepare(). 4192859d069eSPeter Zijlstra */ 4193859d069eSPeter Zijlstra goto skip_checks; 4194859d069eSPeter Zijlstra } 4195859d069eSPeter Zijlstra 4196f8e48a3dSPeter Zijlstra if (unlikely(this_cpu_read(lockdep_recursion))) 4197c86e9b98SPeter Zijlstra return; 4198c86e9b98SPeter Zijlstra 4199f9ad4a5fSPeter Zijlstra if (lockdep_hardirqs_enabled()) { 4200c86e9b98SPeter Zijlstra /* 4201c86e9b98SPeter Zijlstra * Neither irq nor preemption are disabled here 4202c86e9b98SPeter Zijlstra * so this is racy by nature but losing one hit 4203c86e9b98SPeter Zijlstra * in a stat is not a big deal. 4204c86e9b98SPeter Zijlstra */ 4205c86e9b98SPeter Zijlstra __debug_atomic_inc(redundant_hardirqs_on); 4206c86e9b98SPeter Zijlstra return; 4207c86e9b98SPeter Zijlstra } 4208c86e9b98SPeter Zijlstra 4209c86e9b98SPeter Zijlstra /* 4210c86e9b98SPeter Zijlstra * We're enabling irqs and according to our state above irqs weren't 4211c86e9b98SPeter Zijlstra * already enabled, yet we find the hardware thinks they are in fact 4212c86e9b98SPeter Zijlstra * enabled.. someone messed up their IRQ state tracing. 4213c86e9b98SPeter Zijlstra */ 4214c86e9b98SPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 4215c86e9b98SPeter Zijlstra return; 4216c86e9b98SPeter Zijlstra 4217c86e9b98SPeter Zijlstra /* 4218c86e9b98SPeter Zijlstra * Ensure the lock stack remained unchanged between 4219c86e9b98SPeter Zijlstra * lockdep_hardirqs_on_prepare() and lockdep_hardirqs_on(). 4220c86e9b98SPeter Zijlstra */ 4221c86e9b98SPeter Zijlstra DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key != 4222c86e9b98SPeter Zijlstra current->curr_chain_key); 4223c86e9b98SPeter Zijlstra 4224859d069eSPeter Zijlstra skip_checks: 4225c86e9b98SPeter Zijlstra /* we'll do an OFF -> ON transition: */ 4226fddf9055SPeter Zijlstra __this_cpu_write(hardirqs_enabled, 1); 42270584df9cSMarco Elver trace->hardirq_enable_ip = ip; 42280584df9cSMarco Elver trace->hardirq_enable_event = ++trace->irq_events; 4229c86e9b98SPeter Zijlstra debug_atomic_inc(hardirqs_on_events); 4230c86e9b98SPeter Zijlstra } 4231c86e9b98SPeter Zijlstra EXPORT_SYMBOL_GPL(lockdep_hardirqs_on); 42328eddac3fSPeter Zijlstra 42338eddac3fSPeter Zijlstra /* 42348eddac3fSPeter Zijlstra * Hardirqs were disabled: 42358eddac3fSPeter Zijlstra */ 4236c86e9b98SPeter Zijlstra void noinstr lockdep_hardirqs_off(unsigned long ip) 42378eddac3fSPeter Zijlstra { 4238859d069eSPeter Zijlstra if (unlikely(!debug_locks)) 4239859d069eSPeter Zijlstra return; 42408eddac3fSPeter Zijlstra 4241859d069eSPeter Zijlstra /* 4242859d069eSPeter Zijlstra * Matching lockdep_hardirqs_on(), allow NMIs in the middle of lockdep; 4243859d069eSPeter Zijlstra * they will restore the software state. This ensures the software 4244859d069eSPeter Zijlstra * state is consistent inside NMIs as well. 4245859d069eSPeter Zijlstra */ 4246ed004953Speterz@infradead.org if (in_nmi()) { 4247ed004953Speterz@infradead.org if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI)) 4248ed004953Speterz@infradead.org return; 42494d004099SPeter Zijlstra } else if (__this_cpu_read(lockdep_recursion)) 42508eddac3fSPeter Zijlstra return; 42518eddac3fSPeter Zijlstra 42528eddac3fSPeter Zijlstra /* 42538eddac3fSPeter Zijlstra * So we're supposed to get called after you mask local IRQs, but for 42548eddac3fSPeter Zijlstra * some reason the hardware doesn't quite think you did a proper job. 42558eddac3fSPeter Zijlstra */ 42568eddac3fSPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 42578eddac3fSPeter Zijlstra return; 42588eddac3fSPeter Zijlstra 4259f9ad4a5fSPeter Zijlstra if (lockdep_hardirqs_enabled()) { 42600584df9cSMarco Elver struct irqtrace_events *trace = ¤t->irqtrace; 42610584df9cSMarco Elver 42628eddac3fSPeter Zijlstra /* 42638eddac3fSPeter Zijlstra * We have done an ON -> OFF transition: 42648eddac3fSPeter Zijlstra */ 4265fddf9055SPeter Zijlstra __this_cpu_write(hardirqs_enabled, 0); 42660584df9cSMarco Elver trace->hardirq_disable_ip = ip; 42670584df9cSMarco Elver trace->hardirq_disable_event = ++trace->irq_events; 42688eddac3fSPeter Zijlstra debug_atomic_inc(hardirqs_off_events); 4269c86e9b98SPeter Zijlstra } else { 42708eddac3fSPeter Zijlstra debug_atomic_inc(redundant_hardirqs_off); 42718eddac3fSPeter Zijlstra } 4272c86e9b98SPeter Zijlstra } 4273c86e9b98SPeter Zijlstra EXPORT_SYMBOL_GPL(lockdep_hardirqs_off); 42748eddac3fSPeter Zijlstra 42758eddac3fSPeter Zijlstra /* 42768eddac3fSPeter Zijlstra * Softirqs will be enabled: 42778eddac3fSPeter Zijlstra */ 42780d38453cSPeter Zijlstra void lockdep_softirqs_on(unsigned long ip) 42798eddac3fSPeter Zijlstra { 42800584df9cSMarco Elver struct irqtrace_events *trace = ¤t->irqtrace; 42818eddac3fSPeter Zijlstra 42824d004099SPeter Zijlstra if (unlikely(!lockdep_enabled())) 42838eddac3fSPeter Zijlstra return; 42848eddac3fSPeter Zijlstra 42858eddac3fSPeter Zijlstra /* 42868eddac3fSPeter Zijlstra * We fancy IRQs being disabled here, see softirq.c, avoids 42878eddac3fSPeter Zijlstra * funny state and nesting things. 42888eddac3fSPeter Zijlstra */ 42898eddac3fSPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 42908eddac3fSPeter Zijlstra return; 42918eddac3fSPeter Zijlstra 42920584df9cSMarco Elver if (current->softirqs_enabled) { 42938eddac3fSPeter Zijlstra debug_atomic_inc(redundant_softirqs_on); 42948eddac3fSPeter Zijlstra return; 42958eddac3fSPeter Zijlstra } 42968eddac3fSPeter Zijlstra 42974d004099SPeter Zijlstra lockdep_recursion_inc(); 42988eddac3fSPeter Zijlstra /* 42998eddac3fSPeter Zijlstra * We'll do an OFF -> ON transition: 43008eddac3fSPeter Zijlstra */ 43010584df9cSMarco Elver current->softirqs_enabled = 1; 43020584df9cSMarco Elver trace->softirq_enable_ip = ip; 43030584df9cSMarco Elver trace->softirq_enable_event = ++trace->irq_events; 43048eddac3fSPeter Zijlstra debug_atomic_inc(softirqs_on_events); 43058eddac3fSPeter Zijlstra /* 43068eddac3fSPeter Zijlstra * We are going to turn softirqs on, so set the 43078eddac3fSPeter Zijlstra * usage bit for all held locks, if hardirqs are 43088eddac3fSPeter Zijlstra * enabled too: 43098eddac3fSPeter Zijlstra */ 4310f9ad4a5fSPeter Zijlstra if (lockdep_hardirqs_enabled()) 43110584df9cSMarco Elver mark_held_locks(current, LOCK_ENABLED_SOFTIRQ); 431210476e63SPeter Zijlstra lockdep_recursion_finish(); 43138eddac3fSPeter Zijlstra } 43148eddac3fSPeter Zijlstra 43158eddac3fSPeter Zijlstra /* 43168eddac3fSPeter Zijlstra * Softirqs were disabled: 43178eddac3fSPeter Zijlstra */ 43180d38453cSPeter Zijlstra void lockdep_softirqs_off(unsigned long ip) 43198eddac3fSPeter Zijlstra { 43204d004099SPeter Zijlstra if (unlikely(!lockdep_enabled())) 43218eddac3fSPeter Zijlstra return; 43228eddac3fSPeter Zijlstra 43238eddac3fSPeter Zijlstra /* 43248eddac3fSPeter Zijlstra * We fancy IRQs being disabled here, see softirq.c 43258eddac3fSPeter Zijlstra */ 43268eddac3fSPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 43278eddac3fSPeter Zijlstra return; 43288eddac3fSPeter Zijlstra 43290584df9cSMarco Elver if (current->softirqs_enabled) { 43300584df9cSMarco Elver struct irqtrace_events *trace = ¤t->irqtrace; 43310584df9cSMarco Elver 43328eddac3fSPeter Zijlstra /* 43338eddac3fSPeter Zijlstra * We have done an ON -> OFF transition: 43348eddac3fSPeter Zijlstra */ 43350584df9cSMarco Elver current->softirqs_enabled = 0; 43360584df9cSMarco Elver trace->softirq_disable_ip = ip; 43370584df9cSMarco Elver trace->softirq_disable_event = ++trace->irq_events; 43388eddac3fSPeter Zijlstra debug_atomic_inc(softirqs_off_events); 43398eddac3fSPeter Zijlstra /* 43408eddac3fSPeter Zijlstra * Whoops, we wanted softirqs off, so why aren't they? 43418eddac3fSPeter Zijlstra */ 43428eddac3fSPeter Zijlstra DEBUG_LOCKS_WARN_ON(!softirq_count()); 43438eddac3fSPeter Zijlstra } else 43448eddac3fSPeter Zijlstra debug_atomic_inc(redundant_softirqs_off); 43458eddac3fSPeter Zijlstra } 43468eddac3fSPeter Zijlstra 434709180651SYuyang Du static int 434809180651SYuyang Du mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) 43498eddac3fSPeter Zijlstra { 435009180651SYuyang Du if (!check) 435109180651SYuyang Du goto lock_used; 435209180651SYuyang Du 43538eddac3fSPeter Zijlstra /* 43548eddac3fSPeter Zijlstra * If non-trylock use in a hardirq or softirq context, then 43558eddac3fSPeter Zijlstra * mark the lock as used in these contexts: 43568eddac3fSPeter Zijlstra */ 43578eddac3fSPeter Zijlstra if (!hlock->trylock) { 43588eddac3fSPeter Zijlstra if (hlock->read) { 4359f9ad4a5fSPeter Zijlstra if (lockdep_hardirq_context()) 43608eddac3fSPeter Zijlstra if (!mark_lock(curr, hlock, 43618eddac3fSPeter Zijlstra LOCK_USED_IN_HARDIRQ_READ)) 43628eddac3fSPeter Zijlstra return 0; 43638eddac3fSPeter Zijlstra if (curr->softirq_context) 43648eddac3fSPeter Zijlstra if (!mark_lock(curr, hlock, 43658eddac3fSPeter Zijlstra LOCK_USED_IN_SOFTIRQ_READ)) 43668eddac3fSPeter Zijlstra return 0; 43678eddac3fSPeter Zijlstra } else { 4368f9ad4a5fSPeter Zijlstra if (lockdep_hardirq_context()) 43698eddac3fSPeter Zijlstra if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) 43708eddac3fSPeter Zijlstra return 0; 43718eddac3fSPeter Zijlstra if (curr->softirq_context) 43728eddac3fSPeter Zijlstra if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ)) 43738eddac3fSPeter Zijlstra return 0; 43748eddac3fSPeter Zijlstra } 43758eddac3fSPeter Zijlstra } 43768eddac3fSPeter Zijlstra if (!hlock->hardirqs_off) { 43778eddac3fSPeter Zijlstra if (hlock->read) { 43788eddac3fSPeter Zijlstra if (!mark_lock(curr, hlock, 43798eddac3fSPeter Zijlstra LOCK_ENABLED_HARDIRQ_READ)) 43808eddac3fSPeter Zijlstra return 0; 43818eddac3fSPeter Zijlstra if (curr->softirqs_enabled) 43828eddac3fSPeter Zijlstra if (!mark_lock(curr, hlock, 43838eddac3fSPeter Zijlstra LOCK_ENABLED_SOFTIRQ_READ)) 43848eddac3fSPeter Zijlstra return 0; 43858eddac3fSPeter Zijlstra } else { 43868eddac3fSPeter Zijlstra if (!mark_lock(curr, hlock, 43878eddac3fSPeter Zijlstra LOCK_ENABLED_HARDIRQ)) 43888eddac3fSPeter Zijlstra return 0; 43898eddac3fSPeter Zijlstra if (curr->softirqs_enabled) 43908eddac3fSPeter Zijlstra if (!mark_lock(curr, hlock, 43918eddac3fSPeter Zijlstra LOCK_ENABLED_SOFTIRQ)) 43928eddac3fSPeter Zijlstra return 0; 43938eddac3fSPeter Zijlstra } 43948eddac3fSPeter Zijlstra } 43958eddac3fSPeter Zijlstra 439609180651SYuyang Du lock_used: 439709180651SYuyang Du /* mark it as used: */ 439809180651SYuyang Du if (!mark_lock(curr, hlock, LOCK_USED)) 439909180651SYuyang Du return 0; 440009180651SYuyang Du 44018eddac3fSPeter Zijlstra return 1; 44028eddac3fSPeter Zijlstra } 44038eddac3fSPeter Zijlstra 4404c2469756SBoqun Feng static inline unsigned int task_irq_context(struct task_struct *task) 4405c2469756SBoqun Feng { 4406f9ad4a5fSPeter Zijlstra return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context() + 4407b3b9c187SWaiman Long LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context; 4408c2469756SBoqun Feng } 4409c2469756SBoqun Feng 44108eddac3fSPeter Zijlstra static int separate_irq_context(struct task_struct *curr, 44118eddac3fSPeter Zijlstra struct held_lock *hlock) 44128eddac3fSPeter Zijlstra { 44138eddac3fSPeter Zijlstra unsigned int depth = curr->lockdep_depth; 44148eddac3fSPeter Zijlstra 44158eddac3fSPeter Zijlstra /* 44168eddac3fSPeter Zijlstra * Keep track of points where we cross into an interrupt context: 44178eddac3fSPeter Zijlstra */ 44188eddac3fSPeter Zijlstra if (depth) { 44198eddac3fSPeter Zijlstra struct held_lock *prev_hlock; 44208eddac3fSPeter Zijlstra 44218eddac3fSPeter Zijlstra prev_hlock = curr->held_locks + depth-1; 44228eddac3fSPeter Zijlstra /* 44238eddac3fSPeter Zijlstra * If we cross into another context, reset the 44248eddac3fSPeter Zijlstra * hash key (this also prevents the checking and the 44258eddac3fSPeter Zijlstra * adding of the dependency to 'prev'): 44268eddac3fSPeter Zijlstra */ 44278eddac3fSPeter Zijlstra if (prev_hlock->irq_context != hlock->irq_context) 44288eddac3fSPeter Zijlstra return 1; 44298eddac3fSPeter Zijlstra } 44308eddac3fSPeter Zijlstra return 0; 44318eddac3fSPeter Zijlstra } 44328eddac3fSPeter Zijlstra 44338eddac3fSPeter Zijlstra /* 44348eddac3fSPeter Zijlstra * Mark a lock with a usage bit, and validate the state transition: 44358eddac3fSPeter Zijlstra */ 44368eddac3fSPeter Zijlstra static int mark_lock(struct task_struct *curr, struct held_lock *this, 44378eddac3fSPeter Zijlstra enum lock_usage_bit new_bit) 44388eddac3fSPeter Zijlstra { 44392bb8945bSPeter Zijlstra unsigned int new_mask, ret = 1; 44408eddac3fSPeter Zijlstra 44414d56330dSYuyang Du if (new_bit >= LOCK_USAGE_STATES) { 44424d56330dSYuyang Du DEBUG_LOCKS_WARN_ON(1); 44434d56330dSYuyang Du return 0; 44444d56330dSYuyang Du } 44454d56330dSYuyang Du 444623870f12Speterz@infradead.org if (new_bit == LOCK_USED && this->read) 444723870f12Speterz@infradead.org new_bit = LOCK_USED_READ; 444823870f12Speterz@infradead.org 444923870f12Speterz@infradead.org new_mask = 1 << new_bit; 445023870f12Speterz@infradead.org 44518eddac3fSPeter Zijlstra /* 44528eddac3fSPeter Zijlstra * If already set then do not dirty the cacheline, 44538eddac3fSPeter Zijlstra * nor do any checks: 44548eddac3fSPeter Zijlstra */ 44558eddac3fSPeter Zijlstra if (likely(hlock_class(this)->usage_mask & new_mask)) 44568eddac3fSPeter Zijlstra return 1; 44578eddac3fSPeter Zijlstra 44588eddac3fSPeter Zijlstra if (!graph_lock()) 44598eddac3fSPeter Zijlstra return 0; 44608eddac3fSPeter Zijlstra /* 44618eddac3fSPeter Zijlstra * Make sure we didn't race: 44628eddac3fSPeter Zijlstra */ 446323870f12Speterz@infradead.org if (unlikely(hlock_class(this)->usage_mask & new_mask)) 446423870f12Speterz@infradead.org goto unlock; 44658eddac3fSPeter Zijlstra 44661a393408SPeter Zijlstra if (!hlock_class(this)->usage_mask) 44671a393408SPeter Zijlstra debug_atomic_dec(nr_unused_locks); 44681a393408SPeter Zijlstra 44698eddac3fSPeter Zijlstra hlock_class(this)->usage_mask |= new_mask; 44708eddac3fSPeter Zijlstra 44712bb8945bSPeter Zijlstra if (new_bit < LOCK_TRACE_STATES) { 447212593b74SBart Van Assche if (!(hlock_class(this)->usage_traces[new_bit] = save_trace())) 44738eddac3fSPeter Zijlstra return 0; 44748eddac3fSPeter Zijlstra } 44758eddac3fSPeter Zijlstra 44761a393408SPeter Zijlstra if (new_bit < LOCK_USED) { 44778eddac3fSPeter Zijlstra ret = mark_lock_irq(curr, this, new_bit); 44788eddac3fSPeter Zijlstra if (!ret) 44798eddac3fSPeter Zijlstra return 0; 44808eddac3fSPeter Zijlstra } 44818eddac3fSPeter Zijlstra 448223870f12Speterz@infradead.org unlock: 44838eddac3fSPeter Zijlstra graph_unlock(); 44848eddac3fSPeter Zijlstra 44858eddac3fSPeter Zijlstra /* 44868eddac3fSPeter Zijlstra * We must printk outside of the graph_lock: 44878eddac3fSPeter Zijlstra */ 44888eddac3fSPeter Zijlstra if (ret == 2) { 44898eddac3fSPeter Zijlstra printk("\nmarked lock as {%s}:\n", usage_str[new_bit]); 44908eddac3fSPeter Zijlstra print_lock(this); 44918eddac3fSPeter Zijlstra print_irqtrace_events(curr); 44928eddac3fSPeter Zijlstra dump_stack(); 44938eddac3fSPeter Zijlstra } 44948eddac3fSPeter Zijlstra 44958eddac3fSPeter Zijlstra return ret; 44968eddac3fSPeter Zijlstra } 44978eddac3fSPeter Zijlstra 44989a019db0SPeter Zijlstra static inline short task_wait_context(struct task_struct *curr) 44999a019db0SPeter Zijlstra { 45009a019db0SPeter Zijlstra /* 45019a019db0SPeter Zijlstra * Set appropriate wait type for the context; for IRQs we have to take 45029a019db0SPeter Zijlstra * into account force_irqthread as that is implied by PREEMPT_RT. 45039a019db0SPeter Zijlstra */ 4504f9ad4a5fSPeter Zijlstra if (lockdep_hardirq_context()) { 45059a019db0SPeter Zijlstra /* 45069a019db0SPeter Zijlstra * Check if force_irqthreads will run us threaded. 45079a019db0SPeter Zijlstra */ 45089a019db0SPeter Zijlstra if (curr->hardirq_threaded || curr->irq_config) 45099a019db0SPeter Zijlstra return LD_WAIT_CONFIG; 45109a019db0SPeter Zijlstra 45119a019db0SPeter Zijlstra return LD_WAIT_SPIN; 45129a019db0SPeter Zijlstra } else if (curr->softirq_context) { 45139a019db0SPeter Zijlstra /* 45149a019db0SPeter Zijlstra * Softirqs are always threaded. 45159a019db0SPeter Zijlstra */ 45169a019db0SPeter Zijlstra return LD_WAIT_CONFIG; 45179a019db0SPeter Zijlstra } 45189a019db0SPeter Zijlstra 45199a019db0SPeter Zijlstra return LD_WAIT_MAX; 45209a019db0SPeter Zijlstra } 45219a019db0SPeter Zijlstra 4522de8f5e4fSPeter Zijlstra static int 4523de8f5e4fSPeter Zijlstra print_lock_invalid_wait_context(struct task_struct *curr, 4524de8f5e4fSPeter Zijlstra struct held_lock *hlock) 4525de8f5e4fSPeter Zijlstra { 45269a019db0SPeter Zijlstra short curr_inner; 45279a019db0SPeter Zijlstra 4528de8f5e4fSPeter Zijlstra if (!debug_locks_off()) 4529de8f5e4fSPeter Zijlstra return 0; 4530de8f5e4fSPeter Zijlstra if (debug_locks_silent) 4531de8f5e4fSPeter Zijlstra return 0; 4532de8f5e4fSPeter Zijlstra 4533de8f5e4fSPeter Zijlstra pr_warn("\n"); 4534de8f5e4fSPeter Zijlstra pr_warn("=============================\n"); 4535de8f5e4fSPeter Zijlstra pr_warn("[ BUG: Invalid wait context ]\n"); 4536de8f5e4fSPeter Zijlstra print_kernel_ident(); 4537de8f5e4fSPeter Zijlstra pr_warn("-----------------------------\n"); 4538de8f5e4fSPeter Zijlstra 4539de8f5e4fSPeter Zijlstra pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); 4540de8f5e4fSPeter Zijlstra print_lock(hlock); 4541de8f5e4fSPeter Zijlstra 4542de8f5e4fSPeter Zijlstra pr_warn("other info that might help us debug this:\n"); 45439a019db0SPeter Zijlstra 45449a019db0SPeter Zijlstra curr_inner = task_wait_context(curr); 45459a019db0SPeter Zijlstra pr_warn("context-{%d:%d}\n", curr_inner, curr_inner); 45469a019db0SPeter Zijlstra 4547de8f5e4fSPeter Zijlstra lockdep_print_held_locks(curr); 4548de8f5e4fSPeter Zijlstra 4549de8f5e4fSPeter Zijlstra pr_warn("stack backtrace:\n"); 4550de8f5e4fSPeter Zijlstra dump_stack(); 4551de8f5e4fSPeter Zijlstra 4552de8f5e4fSPeter Zijlstra return 0; 4553de8f5e4fSPeter Zijlstra } 4554de8f5e4fSPeter Zijlstra 4555de8f5e4fSPeter Zijlstra /* 4556de8f5e4fSPeter Zijlstra * Verify the wait_type context. 4557de8f5e4fSPeter Zijlstra * 4558de8f5e4fSPeter Zijlstra * This check validates we takes locks in the right wait-type order; that is it 4559de8f5e4fSPeter Zijlstra * ensures that we do not take mutexes inside spinlocks and do not attempt to 4560de8f5e4fSPeter Zijlstra * acquire spinlocks inside raw_spinlocks and the sort. 4561de8f5e4fSPeter Zijlstra * 4562de8f5e4fSPeter Zijlstra * The entire thing is slightly more complex because of RCU, RCU is a lock that 4563de8f5e4fSPeter Zijlstra * can be taken from (pretty much) any context but also has constraints. 4564de8f5e4fSPeter Zijlstra * However when taken in a stricter environment the RCU lock does not loosen 4565de8f5e4fSPeter Zijlstra * the constraints. 4566de8f5e4fSPeter Zijlstra * 4567de8f5e4fSPeter Zijlstra * Therefore we must look for the strictest environment in the lock stack and 4568de8f5e4fSPeter Zijlstra * compare that to the lock we're trying to acquire. 4569de8f5e4fSPeter Zijlstra */ 4570de8f5e4fSPeter Zijlstra static int check_wait_context(struct task_struct *curr, struct held_lock *next) 4571de8f5e4fSPeter Zijlstra { 4572dfd5e3f5SPeter Zijlstra u8 next_inner = hlock_class(next)->wait_type_inner; 4573dfd5e3f5SPeter Zijlstra u8 next_outer = hlock_class(next)->wait_type_outer; 4574dfd5e3f5SPeter Zijlstra u8 curr_inner; 4575de8f5e4fSPeter Zijlstra int depth; 4576de8f5e4fSPeter Zijlstra 4577de8f5e4fSPeter Zijlstra if (!curr->lockdep_depth || !next_inner || next->trylock) 4578de8f5e4fSPeter Zijlstra return 0; 4579de8f5e4fSPeter Zijlstra 4580de8f5e4fSPeter Zijlstra if (!next_outer) 4581de8f5e4fSPeter Zijlstra next_outer = next_inner; 4582de8f5e4fSPeter Zijlstra 4583de8f5e4fSPeter Zijlstra /* 4584de8f5e4fSPeter Zijlstra * Find start of current irq_context.. 4585de8f5e4fSPeter Zijlstra */ 4586de8f5e4fSPeter Zijlstra for (depth = curr->lockdep_depth - 1; depth >= 0; depth--) { 4587de8f5e4fSPeter Zijlstra struct held_lock *prev = curr->held_locks + depth; 4588de8f5e4fSPeter Zijlstra if (prev->irq_context != next->irq_context) 4589de8f5e4fSPeter Zijlstra break; 4590de8f5e4fSPeter Zijlstra } 4591de8f5e4fSPeter Zijlstra depth++; 4592de8f5e4fSPeter Zijlstra 45939a019db0SPeter Zijlstra curr_inner = task_wait_context(curr); 4594de8f5e4fSPeter Zijlstra 4595de8f5e4fSPeter Zijlstra for (; depth < curr->lockdep_depth; depth++) { 4596de8f5e4fSPeter Zijlstra struct held_lock *prev = curr->held_locks + depth; 4597dfd5e3f5SPeter Zijlstra u8 prev_inner = hlock_class(prev)->wait_type_inner; 4598de8f5e4fSPeter Zijlstra 4599de8f5e4fSPeter Zijlstra if (prev_inner) { 4600de8f5e4fSPeter Zijlstra /* 4601de8f5e4fSPeter Zijlstra * We can have a bigger inner than a previous one 4602de8f5e4fSPeter Zijlstra * when outer is smaller than inner, as with RCU. 4603de8f5e4fSPeter Zijlstra * 4604de8f5e4fSPeter Zijlstra * Also due to trylocks. 4605de8f5e4fSPeter Zijlstra */ 4606de8f5e4fSPeter Zijlstra curr_inner = min(curr_inner, prev_inner); 4607de8f5e4fSPeter Zijlstra } 4608de8f5e4fSPeter Zijlstra } 4609de8f5e4fSPeter Zijlstra 4610de8f5e4fSPeter Zijlstra if (next_outer > curr_inner) 4611de8f5e4fSPeter Zijlstra return print_lock_invalid_wait_context(curr, next); 4612de8f5e4fSPeter Zijlstra 4613de8f5e4fSPeter Zijlstra return 0; 4614de8f5e4fSPeter Zijlstra } 4615de8f5e4fSPeter Zijlstra 461630a35f79SArnd Bergmann #else /* CONFIG_PROVE_LOCKING */ 4617886532aeSArnd Bergmann 4618886532aeSArnd Bergmann static inline int 4619886532aeSArnd Bergmann mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) 4620886532aeSArnd Bergmann { 4621886532aeSArnd Bergmann return 1; 4622886532aeSArnd Bergmann } 4623886532aeSArnd Bergmann 4624886532aeSArnd Bergmann static inline unsigned int task_irq_context(struct task_struct *task) 4625886532aeSArnd Bergmann { 4626886532aeSArnd Bergmann return 0; 4627886532aeSArnd Bergmann } 4628886532aeSArnd Bergmann 4629886532aeSArnd Bergmann static inline int separate_irq_context(struct task_struct *curr, 4630886532aeSArnd Bergmann struct held_lock *hlock) 4631886532aeSArnd Bergmann { 4632886532aeSArnd Bergmann return 0; 4633886532aeSArnd Bergmann } 4634886532aeSArnd Bergmann 4635de8f5e4fSPeter Zijlstra static inline int check_wait_context(struct task_struct *curr, 4636de8f5e4fSPeter Zijlstra struct held_lock *next) 4637de8f5e4fSPeter Zijlstra { 4638de8f5e4fSPeter Zijlstra return 0; 4639de8f5e4fSPeter Zijlstra } 4640de8f5e4fSPeter Zijlstra 464130a35f79SArnd Bergmann #endif /* CONFIG_PROVE_LOCKING */ 4642886532aeSArnd Bergmann 46438eddac3fSPeter Zijlstra /* 46448eddac3fSPeter Zijlstra * Initialize a lock instance's lock-class mapping info: 46458eddac3fSPeter Zijlstra */ 4646dfd5e3f5SPeter Zijlstra void lockdep_init_map_type(struct lockdep_map *lock, const char *name, 4647de8f5e4fSPeter Zijlstra struct lock_class_key *key, int subclass, 4648dfd5e3f5SPeter Zijlstra u8 inner, u8 outer, u8 lock_type) 46498eddac3fSPeter Zijlstra { 46508eddac3fSPeter Zijlstra int i; 46518eddac3fSPeter Zijlstra 46528eddac3fSPeter Zijlstra for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) 46538eddac3fSPeter Zijlstra lock->class_cache[i] = NULL; 46548eddac3fSPeter Zijlstra 46558eddac3fSPeter Zijlstra #ifdef CONFIG_LOCK_STAT 46568eddac3fSPeter Zijlstra lock->cpu = raw_smp_processor_id(); 46578eddac3fSPeter Zijlstra #endif 46588eddac3fSPeter Zijlstra 46598eddac3fSPeter Zijlstra /* 46608eddac3fSPeter Zijlstra * Can't be having no nameless bastards around this place! 46618eddac3fSPeter Zijlstra */ 46628eddac3fSPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(!name)) { 46638eddac3fSPeter Zijlstra lock->name = "NULL"; 46648eddac3fSPeter Zijlstra return; 46658eddac3fSPeter Zijlstra } 46668eddac3fSPeter Zijlstra 46678eddac3fSPeter Zijlstra lock->name = name; 46688eddac3fSPeter Zijlstra 4669de8f5e4fSPeter Zijlstra lock->wait_type_outer = outer; 4670de8f5e4fSPeter Zijlstra lock->wait_type_inner = inner; 4671dfd5e3f5SPeter Zijlstra lock->lock_type = lock_type; 4672de8f5e4fSPeter Zijlstra 46738eddac3fSPeter Zijlstra /* 46748eddac3fSPeter Zijlstra * No key, no joy, we need to hash something. 46758eddac3fSPeter Zijlstra */ 46768eddac3fSPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(!key)) 46778eddac3fSPeter Zijlstra return; 46788eddac3fSPeter Zijlstra /* 4679108c1485SBart Van Assche * Sanity check, the lock-class key must either have been allocated 4680108c1485SBart Van Assche * statically or must have been registered as a dynamic key. 46818eddac3fSPeter Zijlstra */ 4682108c1485SBart Van Assche if (!static_obj(key) && !is_dynamic_key(key)) { 4683108c1485SBart Van Assche if (debug_locks) 4684108c1485SBart Van Assche printk(KERN_ERR "BUG: key %px has not been registered!\n", key); 46858eddac3fSPeter Zijlstra DEBUG_LOCKS_WARN_ON(1); 46868eddac3fSPeter Zijlstra return; 46878eddac3fSPeter Zijlstra } 46888eddac3fSPeter Zijlstra lock->key = key; 46898eddac3fSPeter Zijlstra 46908eddac3fSPeter Zijlstra if (unlikely(!debug_locks)) 46918eddac3fSPeter Zijlstra return; 46928eddac3fSPeter Zijlstra 469335a9393cSPeter Zijlstra if (subclass) { 469435a9393cSPeter Zijlstra unsigned long flags; 469535a9393cSPeter Zijlstra 46964d004099SPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled())) 469735a9393cSPeter Zijlstra return; 469835a9393cSPeter Zijlstra 469935a9393cSPeter Zijlstra raw_local_irq_save(flags); 47004d004099SPeter Zijlstra lockdep_recursion_inc(); 47018eddac3fSPeter Zijlstra register_lock_class(lock, subclass, 1); 470210476e63SPeter Zijlstra lockdep_recursion_finish(); 470335a9393cSPeter Zijlstra raw_local_irq_restore(flags); 470435a9393cSPeter Zijlstra } 47058eddac3fSPeter Zijlstra } 4706dfd5e3f5SPeter Zijlstra EXPORT_SYMBOL_GPL(lockdep_init_map_type); 47078eddac3fSPeter Zijlstra 47088eddac3fSPeter Zijlstra struct lock_class_key __lockdep_no_validate__; 47098eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(__lockdep_no_validate__); 47108eddac3fSPeter Zijlstra 4711f7c1c6b3SYuyang Du static void 47128eddac3fSPeter Zijlstra print_lock_nested_lock_not_held(struct task_struct *curr, 47138eddac3fSPeter Zijlstra struct held_lock *hlock, 47148eddac3fSPeter Zijlstra unsigned long ip) 47158eddac3fSPeter Zijlstra { 47168eddac3fSPeter Zijlstra if (!debug_locks_off()) 4717f7c1c6b3SYuyang Du return; 47188eddac3fSPeter Zijlstra if (debug_locks_silent) 4719f7c1c6b3SYuyang Du return; 47208eddac3fSPeter Zijlstra 4721681fbec8SPaul E. McKenney pr_warn("\n"); 4722a5dd63efSPaul E. McKenney pr_warn("==================================\n"); 4723a5dd63efSPaul E. McKenney pr_warn("WARNING: Nested lock was not taken\n"); 47248eddac3fSPeter Zijlstra print_kernel_ident(); 4725a5dd63efSPaul E. McKenney pr_warn("----------------------------------\n"); 47268eddac3fSPeter Zijlstra 4727681fbec8SPaul E. McKenney pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); 47288eddac3fSPeter Zijlstra print_lock(hlock); 47298eddac3fSPeter Zijlstra 4730681fbec8SPaul E. McKenney pr_warn("\nbut this task is not holding:\n"); 4731681fbec8SPaul E. McKenney pr_warn("%s\n", hlock->nest_lock->name); 47328eddac3fSPeter Zijlstra 4733681fbec8SPaul E. McKenney pr_warn("\nstack backtrace:\n"); 47348eddac3fSPeter Zijlstra dump_stack(); 47358eddac3fSPeter Zijlstra 4736681fbec8SPaul E. McKenney pr_warn("\nother info that might help us debug this:\n"); 47378eddac3fSPeter Zijlstra lockdep_print_held_locks(curr); 47388eddac3fSPeter Zijlstra 4739681fbec8SPaul E. McKenney pr_warn("\nstack backtrace:\n"); 47408eddac3fSPeter Zijlstra dump_stack(); 47418eddac3fSPeter Zijlstra } 47428eddac3fSPeter Zijlstra 474308f36ff6SMatthew Wilcox static int __lock_is_held(const struct lockdep_map *lock, int read); 47448eddac3fSPeter Zijlstra 47458eddac3fSPeter Zijlstra /* 47468eddac3fSPeter Zijlstra * This gets called for every mutex_lock*()/spin_lock*() operation. 47478eddac3fSPeter Zijlstra * We maintain the dependency maps and validate the locking attempt: 47488ee10862SWaiman Long * 47498ee10862SWaiman Long * The callers must make sure that IRQs are disabled before calling it, 47508ee10862SWaiman Long * otherwise we could get an interrupt which would want to take locks, 47518ee10862SWaiman Long * which would end up in lockdep again. 47528eddac3fSPeter Zijlstra */ 47538eddac3fSPeter Zijlstra static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, 47548eddac3fSPeter Zijlstra int trylock, int read, int check, int hardirqs_off, 47558eddac3fSPeter Zijlstra struct lockdep_map *nest_lock, unsigned long ip, 475621199f27SPeter Zijlstra int references, int pin_count) 47578eddac3fSPeter Zijlstra { 47588eddac3fSPeter Zijlstra struct task_struct *curr = current; 47598eddac3fSPeter Zijlstra struct lock_class *class = NULL; 47608eddac3fSPeter Zijlstra struct held_lock *hlock; 47615f18ab5cSAlfredo Alvarez Fernandez unsigned int depth; 47628eddac3fSPeter Zijlstra int chain_head = 0; 47638eddac3fSPeter Zijlstra int class_idx; 47648eddac3fSPeter Zijlstra u64 chain_key; 47658eddac3fSPeter Zijlstra 47668eddac3fSPeter Zijlstra if (unlikely(!debug_locks)) 47678eddac3fSPeter Zijlstra return 0; 47688eddac3fSPeter Zijlstra 4769fb9edbe9SOleg Nesterov if (!prove_locking || lock->key == &__lockdep_no_validate__) 4770fb9edbe9SOleg Nesterov check = 0; 47718eddac3fSPeter Zijlstra 47728eddac3fSPeter Zijlstra if (subclass < NR_LOCKDEP_CACHING_CLASSES) 47738eddac3fSPeter Zijlstra class = lock->class_cache[subclass]; 47748eddac3fSPeter Zijlstra /* 47758eddac3fSPeter Zijlstra * Not cached? 47768eddac3fSPeter Zijlstra */ 47778eddac3fSPeter Zijlstra if (unlikely(!class)) { 47788eddac3fSPeter Zijlstra class = register_lock_class(lock, subclass, 0); 47798eddac3fSPeter Zijlstra if (!class) 47808eddac3fSPeter Zijlstra return 0; 47818eddac3fSPeter Zijlstra } 47828ca2b56cSWaiman Long 47838ca2b56cSWaiman Long debug_class_ops_inc(class); 47848ca2b56cSWaiman Long 47858eddac3fSPeter Zijlstra if (very_verbose(class)) { 478604860d48SBorislav Petkov printk("\nacquire class [%px] %s", class->key, class->name); 47878eddac3fSPeter Zijlstra if (class->name_version > 1) 4788f943fe0fSDmitry Vyukov printk(KERN_CONT "#%d", class->name_version); 4789f943fe0fSDmitry Vyukov printk(KERN_CONT "\n"); 47908eddac3fSPeter Zijlstra dump_stack(); 47918eddac3fSPeter Zijlstra } 47928eddac3fSPeter Zijlstra 47938eddac3fSPeter Zijlstra /* 47948eddac3fSPeter Zijlstra * Add the lock to the list of currently held locks. 47958eddac3fSPeter Zijlstra * (we dont increase the depth just yet, up until the 47968eddac3fSPeter Zijlstra * dependency checks are done) 47978eddac3fSPeter Zijlstra */ 47988eddac3fSPeter Zijlstra depth = curr->lockdep_depth; 47998eddac3fSPeter Zijlstra /* 48008eddac3fSPeter Zijlstra * Ran out of static storage for our per-task lock stack again have we? 48018eddac3fSPeter Zijlstra */ 48028eddac3fSPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) 48038eddac3fSPeter Zijlstra return 0; 48048eddac3fSPeter Zijlstra 480501bb6f0aSYuyang Du class_idx = class - lock_classes; 48068eddac3fSPeter Zijlstra 4807de8f5e4fSPeter Zijlstra if (depth) { /* we're holding locks */ 48088eddac3fSPeter Zijlstra hlock = curr->held_locks + depth - 1; 48098eddac3fSPeter Zijlstra if (hlock->class_idx == class_idx && nest_lock) { 4810d9349850SImre Deak if (!references) 4811d9349850SImre Deak references++; 48127fb4a2ceSPeter Zijlstra 4813d9349850SImre Deak if (!hlock->references) 48148eddac3fSPeter Zijlstra hlock->references++; 4815d9349850SImre Deak 4816d9349850SImre Deak hlock->references += references; 4817d9349850SImre Deak 4818d9349850SImre Deak /* Overflow */ 4819d9349850SImre Deak if (DEBUG_LOCKS_WARN_ON(hlock->references < references)) 4820d9349850SImre Deak return 0; 48218eddac3fSPeter Zijlstra 48228c8889d8SImre Deak return 2; 48238eddac3fSPeter Zijlstra } 48248eddac3fSPeter Zijlstra } 48258eddac3fSPeter Zijlstra 48268eddac3fSPeter Zijlstra hlock = curr->held_locks + depth; 48278eddac3fSPeter Zijlstra /* 48288eddac3fSPeter Zijlstra * Plain impossible, we just registered it and checked it weren't no 48298eddac3fSPeter Zijlstra * NULL like.. I bet this mushroom I ate was good! 48308eddac3fSPeter Zijlstra */ 48318eddac3fSPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(!class)) 48328eddac3fSPeter Zijlstra return 0; 48338eddac3fSPeter Zijlstra hlock->class_idx = class_idx; 48348eddac3fSPeter Zijlstra hlock->acquire_ip = ip; 48358eddac3fSPeter Zijlstra hlock->instance = lock; 48368eddac3fSPeter Zijlstra hlock->nest_lock = nest_lock; 4837c2469756SBoqun Feng hlock->irq_context = task_irq_context(curr); 48388eddac3fSPeter Zijlstra hlock->trylock = trylock; 48398eddac3fSPeter Zijlstra hlock->read = read; 48408eddac3fSPeter Zijlstra hlock->check = check; 48418eddac3fSPeter Zijlstra hlock->hardirqs_off = !!hardirqs_off; 48428eddac3fSPeter Zijlstra hlock->references = references; 48438eddac3fSPeter Zijlstra #ifdef CONFIG_LOCK_STAT 48448eddac3fSPeter Zijlstra hlock->waittime_stamp = 0; 48458eddac3fSPeter Zijlstra hlock->holdtime_stamp = lockstat_clock(); 48468eddac3fSPeter Zijlstra #endif 484721199f27SPeter Zijlstra hlock->pin_count = pin_count; 48488eddac3fSPeter Zijlstra 4849de8f5e4fSPeter Zijlstra if (check_wait_context(curr, hlock)) 4850de8f5e4fSPeter Zijlstra return 0; 4851de8f5e4fSPeter Zijlstra 485209180651SYuyang Du /* Initialize the lock usage bit */ 485309180651SYuyang Du if (!mark_usage(curr, hlock, check)) 48548eddac3fSPeter Zijlstra return 0; 48558eddac3fSPeter Zijlstra 48568eddac3fSPeter Zijlstra /* 48578eddac3fSPeter Zijlstra * Calculate the chain hash: it's the combined hash of all the 48588eddac3fSPeter Zijlstra * lock keys along the dependency chain. We save the hash value 48598eddac3fSPeter Zijlstra * at every step so that we can get the current hash easily 48608eddac3fSPeter Zijlstra * after unlock. The chain hash is then used to cache dependency 48618eddac3fSPeter Zijlstra * results. 48628eddac3fSPeter Zijlstra * 48638eddac3fSPeter Zijlstra * The 'key ID' is what is the most compact key value to drive 48648eddac3fSPeter Zijlstra * the hash, not class->key. 48658eddac3fSPeter Zijlstra */ 48668eddac3fSPeter Zijlstra /* 486701bb6f0aSYuyang Du * Whoops, we did it again.. class_idx is invalid. 48688eddac3fSPeter Zijlstra */ 486901bb6f0aSYuyang Du if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx, lock_classes_in_use))) 48708eddac3fSPeter Zijlstra return 0; 48718eddac3fSPeter Zijlstra 48728eddac3fSPeter Zijlstra chain_key = curr->curr_chain_key; 48738eddac3fSPeter Zijlstra if (!depth) { 48748eddac3fSPeter Zijlstra /* 48758eddac3fSPeter Zijlstra * How can we have a chain hash when we ain't got no keys?! 48768eddac3fSPeter Zijlstra */ 4877f6ec8829SYuyang Du if (DEBUG_LOCKS_WARN_ON(chain_key != INITIAL_CHAIN_KEY)) 48788eddac3fSPeter Zijlstra return 0; 48798eddac3fSPeter Zijlstra chain_head = 1; 48808eddac3fSPeter Zijlstra } 48818eddac3fSPeter Zijlstra 48828eddac3fSPeter Zijlstra hlock->prev_chain_key = chain_key; 48838eddac3fSPeter Zijlstra if (separate_irq_context(curr, hlock)) { 4884f6ec8829SYuyang Du chain_key = INITIAL_CHAIN_KEY; 48858eddac3fSPeter Zijlstra chain_head = 1; 48868eddac3fSPeter Zijlstra } 4887f611e8cfSBoqun Feng chain_key = iterate_chain_key(chain_key, hlock_id(hlock)); 48888eddac3fSPeter Zijlstra 4889f7c1c6b3SYuyang Du if (nest_lock && !__lock_is_held(nest_lock, -1)) { 4890f7c1c6b3SYuyang Du print_lock_nested_lock_not_held(curr, hlock, ip); 4891f7c1c6b3SYuyang Du return 0; 4892f7c1c6b3SYuyang Du } 48938eddac3fSPeter Zijlstra 4894a0b0fd53SBart Van Assche if (!debug_locks_silent) { 4895a0b0fd53SBart Van Assche WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key); 4896a0b0fd53SBart Van Assche WARN_ON_ONCE(!hlock_class(hlock)->key); 4897a0b0fd53SBart Van Assche } 4898a0b0fd53SBart Van Assche 48990b9fc8ecSYuyang Du if (!validate_chain(curr, hlock, chain_head, chain_key)) 49008eddac3fSPeter Zijlstra return 0; 49018eddac3fSPeter Zijlstra 49028eddac3fSPeter Zijlstra curr->curr_chain_key = chain_key; 49038eddac3fSPeter Zijlstra curr->lockdep_depth++; 49048eddac3fSPeter Zijlstra check_chain_key(curr); 49058eddac3fSPeter Zijlstra #ifdef CONFIG_DEBUG_LOCKDEP 49068eddac3fSPeter Zijlstra if (unlikely(!debug_locks)) 49078eddac3fSPeter Zijlstra return 0; 49088eddac3fSPeter Zijlstra #endif 49098eddac3fSPeter Zijlstra if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { 49108eddac3fSPeter Zijlstra debug_locks_off(); 49118eddac3fSPeter Zijlstra print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!"); 49128eddac3fSPeter Zijlstra printk(KERN_DEBUG "depth: %i max: %lu!\n", 49138eddac3fSPeter Zijlstra curr->lockdep_depth, MAX_LOCK_DEPTH); 49148eddac3fSPeter Zijlstra 49158eddac3fSPeter Zijlstra lockdep_print_held_locks(current); 49168eddac3fSPeter Zijlstra debug_show_all_locks(); 49178eddac3fSPeter Zijlstra dump_stack(); 49188eddac3fSPeter Zijlstra 49198eddac3fSPeter Zijlstra return 0; 49208eddac3fSPeter Zijlstra } 49218eddac3fSPeter Zijlstra 49228eddac3fSPeter Zijlstra if (unlikely(curr->lockdep_depth > max_lockdep_depth)) 49238eddac3fSPeter Zijlstra max_lockdep_depth = curr->lockdep_depth; 49248eddac3fSPeter Zijlstra 49258eddac3fSPeter Zijlstra return 1; 49268eddac3fSPeter Zijlstra } 49278eddac3fSPeter Zijlstra 4928f7c1c6b3SYuyang Du static void print_unlock_imbalance_bug(struct task_struct *curr, 4929f7c1c6b3SYuyang Du struct lockdep_map *lock, 49308eddac3fSPeter Zijlstra unsigned long ip) 49318eddac3fSPeter Zijlstra { 49328eddac3fSPeter Zijlstra if (!debug_locks_off()) 4933f7c1c6b3SYuyang Du return; 49348eddac3fSPeter Zijlstra if (debug_locks_silent) 4935f7c1c6b3SYuyang Du return; 49368eddac3fSPeter Zijlstra 4937681fbec8SPaul E. McKenney pr_warn("\n"); 4938a5dd63efSPaul E. McKenney pr_warn("=====================================\n"); 4939a5dd63efSPaul E. McKenney pr_warn("WARNING: bad unlock balance detected!\n"); 49408eddac3fSPeter Zijlstra print_kernel_ident(); 4941a5dd63efSPaul E. McKenney pr_warn("-------------------------------------\n"); 4942681fbec8SPaul E. McKenney pr_warn("%s/%d is trying to release lock (", 49438eddac3fSPeter Zijlstra curr->comm, task_pid_nr(curr)); 49448eddac3fSPeter Zijlstra print_lockdep_cache(lock); 4945681fbec8SPaul E. McKenney pr_cont(") at:\n"); 49462062a4e8SDmitry Safonov print_ip_sym(KERN_WARNING, ip); 4947681fbec8SPaul E. McKenney pr_warn("but there are no more locks to release!\n"); 4948681fbec8SPaul E. McKenney pr_warn("\nother info that might help us debug this:\n"); 49498eddac3fSPeter Zijlstra lockdep_print_held_locks(curr); 49508eddac3fSPeter Zijlstra 4951681fbec8SPaul E. McKenney pr_warn("\nstack backtrace:\n"); 49528eddac3fSPeter Zijlstra dump_stack(); 49538eddac3fSPeter Zijlstra } 49548eddac3fSPeter Zijlstra 4955c86e9b98SPeter Zijlstra static noinstr int match_held_lock(const struct held_lock *hlock, 495608f36ff6SMatthew Wilcox const struct lockdep_map *lock) 49578eddac3fSPeter Zijlstra { 49588eddac3fSPeter Zijlstra if (hlock->instance == lock) 49598eddac3fSPeter Zijlstra return 1; 49608eddac3fSPeter Zijlstra 49618eddac3fSPeter Zijlstra if (hlock->references) { 496208f36ff6SMatthew Wilcox const struct lock_class *class = lock->class_cache[0]; 49638eddac3fSPeter Zijlstra 49648eddac3fSPeter Zijlstra if (!class) 49658eddac3fSPeter Zijlstra class = look_up_lock_class(lock, 0); 49668eddac3fSPeter Zijlstra 49678eddac3fSPeter Zijlstra /* 49688eddac3fSPeter Zijlstra * If look_up_lock_class() failed to find a class, we're trying 49698eddac3fSPeter Zijlstra * to test if we hold a lock that has never yet been acquired. 49708eddac3fSPeter Zijlstra * Clearly if the lock hasn't been acquired _ever_, we're not 49718eddac3fSPeter Zijlstra * holding it either, so report failure. 49728eddac3fSPeter Zijlstra */ 497364f29d1bSMatthew Wilcox if (!class) 49748eddac3fSPeter Zijlstra return 0; 49758eddac3fSPeter Zijlstra 49768eddac3fSPeter Zijlstra /* 49778eddac3fSPeter Zijlstra * References, but not a lock we're actually ref-counting? 49788eddac3fSPeter Zijlstra * State got messed up, follow the sites that change ->references 49798eddac3fSPeter Zijlstra * and try to make sense of it. 49808eddac3fSPeter Zijlstra */ 49818eddac3fSPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) 49828eddac3fSPeter Zijlstra return 0; 49838eddac3fSPeter Zijlstra 498401bb6f0aSYuyang Du if (hlock->class_idx == class - lock_classes) 49858eddac3fSPeter Zijlstra return 1; 49868eddac3fSPeter Zijlstra } 49878eddac3fSPeter Zijlstra 49888eddac3fSPeter Zijlstra return 0; 49898eddac3fSPeter Zijlstra } 49908eddac3fSPeter Zijlstra 499141c2c5b8SJ. R. Okajima /* @depth must not be zero */ 499241c2c5b8SJ. R. Okajima static struct held_lock *find_held_lock(struct task_struct *curr, 499341c2c5b8SJ. R. Okajima struct lockdep_map *lock, 499441c2c5b8SJ. R. Okajima unsigned int depth, int *idx) 499541c2c5b8SJ. R. Okajima { 499641c2c5b8SJ. R. Okajima struct held_lock *ret, *hlock, *prev_hlock; 499741c2c5b8SJ. R. Okajima int i; 499841c2c5b8SJ. R. Okajima 499941c2c5b8SJ. R. Okajima i = depth - 1; 500041c2c5b8SJ. R. Okajima hlock = curr->held_locks + i; 500141c2c5b8SJ. R. Okajima ret = hlock; 500241c2c5b8SJ. R. Okajima if (match_held_lock(hlock, lock)) 500341c2c5b8SJ. R. Okajima goto out; 500441c2c5b8SJ. R. Okajima 500541c2c5b8SJ. R. Okajima ret = NULL; 500641c2c5b8SJ. R. Okajima for (i--, prev_hlock = hlock--; 500741c2c5b8SJ. R. Okajima i >= 0; 500841c2c5b8SJ. R. Okajima i--, prev_hlock = hlock--) { 500941c2c5b8SJ. R. Okajima /* 501041c2c5b8SJ. R. Okajima * We must not cross into another context: 501141c2c5b8SJ. R. Okajima */ 501241c2c5b8SJ. R. Okajima if (prev_hlock->irq_context != hlock->irq_context) { 501341c2c5b8SJ. R. Okajima ret = NULL; 501441c2c5b8SJ. R. Okajima break; 501541c2c5b8SJ. R. Okajima } 501641c2c5b8SJ. R. Okajima if (match_held_lock(hlock, lock)) { 501741c2c5b8SJ. R. Okajima ret = hlock; 501841c2c5b8SJ. R. Okajima break; 501941c2c5b8SJ. R. Okajima } 502041c2c5b8SJ. R. Okajima } 502141c2c5b8SJ. R. Okajima 502241c2c5b8SJ. R. Okajima out: 502341c2c5b8SJ. R. Okajima *idx = i; 502441c2c5b8SJ. R. Okajima return ret; 502541c2c5b8SJ. R. Okajima } 502641c2c5b8SJ. R. Okajima 5027e969970bSJ. R. Okajima static int reacquire_held_locks(struct task_struct *curr, unsigned int depth, 50288c8889d8SImre Deak int idx, unsigned int *merged) 5029e969970bSJ. R. Okajima { 5030e969970bSJ. R. Okajima struct held_lock *hlock; 50318c8889d8SImre Deak int first_idx = idx; 5032e969970bSJ. R. Okajima 50338ee10862SWaiman Long if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 50348ee10862SWaiman Long return 0; 50358ee10862SWaiman Long 5036e969970bSJ. R. Okajima for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) { 50378c8889d8SImre Deak switch (__lock_acquire(hlock->instance, 5038e969970bSJ. R. Okajima hlock_class(hlock)->subclass, 5039e969970bSJ. R. Okajima hlock->trylock, 5040e969970bSJ. R. Okajima hlock->read, hlock->check, 5041e969970bSJ. R. Okajima hlock->hardirqs_off, 5042e969970bSJ. R. Okajima hlock->nest_lock, hlock->acquire_ip, 50438c8889d8SImre Deak hlock->references, hlock->pin_count)) { 50448c8889d8SImre Deak case 0: 5045e969970bSJ. R. Okajima return 1; 50468c8889d8SImre Deak case 1: 50478c8889d8SImre Deak break; 50488c8889d8SImre Deak case 2: 50498c8889d8SImre Deak *merged += (idx == first_idx); 50508c8889d8SImre Deak break; 50518c8889d8SImre Deak default: 50528c8889d8SImre Deak WARN_ON(1); 50538c8889d8SImre Deak return 0; 50548c8889d8SImre Deak } 5055e969970bSJ. R. Okajima } 5056e969970bSJ. R. Okajima return 0; 5057e969970bSJ. R. Okajima } 5058e969970bSJ. R. Okajima 50598eddac3fSPeter Zijlstra static int 50608eddac3fSPeter Zijlstra __lock_set_class(struct lockdep_map *lock, const char *name, 50618eddac3fSPeter Zijlstra struct lock_class_key *key, unsigned int subclass, 50628eddac3fSPeter Zijlstra unsigned long ip) 50638eddac3fSPeter Zijlstra { 50648eddac3fSPeter Zijlstra struct task_struct *curr = current; 50658c8889d8SImre Deak unsigned int depth, merged = 0; 506641c2c5b8SJ. R. Okajima struct held_lock *hlock; 50678eddac3fSPeter Zijlstra struct lock_class *class; 50688eddac3fSPeter Zijlstra int i; 50698eddac3fSPeter Zijlstra 5070513e1073SWaiman Long if (unlikely(!debug_locks)) 5071513e1073SWaiman Long return 0; 5072513e1073SWaiman Long 50738eddac3fSPeter Zijlstra depth = curr->lockdep_depth; 50748eddac3fSPeter Zijlstra /* 50758eddac3fSPeter Zijlstra * This function is about (re)setting the class of a held lock, 50768eddac3fSPeter Zijlstra * yet we're not actually holding any locks. Naughty user! 50778eddac3fSPeter Zijlstra */ 50788eddac3fSPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(!depth)) 50798eddac3fSPeter Zijlstra return 0; 50808eddac3fSPeter Zijlstra 508141c2c5b8SJ. R. Okajima hlock = find_held_lock(curr, lock, depth, &i); 5082f7c1c6b3SYuyang Du if (!hlock) { 5083f7c1c6b3SYuyang Du print_unlock_imbalance_bug(curr, lock, ip); 5084f7c1c6b3SYuyang Du return 0; 5085f7c1c6b3SYuyang Du } 50868eddac3fSPeter Zijlstra 5087de8f5e4fSPeter Zijlstra lockdep_init_map_waits(lock, name, key, 0, 5088de8f5e4fSPeter Zijlstra lock->wait_type_inner, 5089de8f5e4fSPeter Zijlstra lock->wait_type_outer); 50908eddac3fSPeter Zijlstra class = register_lock_class(lock, subclass, 0); 509101bb6f0aSYuyang Du hlock->class_idx = class - lock_classes; 50928eddac3fSPeter Zijlstra 50938eddac3fSPeter Zijlstra curr->lockdep_depth = i; 50948eddac3fSPeter Zijlstra curr->curr_chain_key = hlock->prev_chain_key; 50958eddac3fSPeter Zijlstra 50968c8889d8SImre Deak if (reacquire_held_locks(curr, depth, i, &merged)) 50978eddac3fSPeter Zijlstra return 0; 50988eddac3fSPeter Zijlstra 50998eddac3fSPeter Zijlstra /* 51008eddac3fSPeter Zijlstra * I took it apart and put it back together again, except now I have 51018eddac3fSPeter Zijlstra * these 'spare' parts.. where shall I put them. 51028eddac3fSPeter Zijlstra */ 51038c8889d8SImre Deak if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged)) 51048eddac3fSPeter Zijlstra return 0; 51058eddac3fSPeter Zijlstra return 1; 51068eddac3fSPeter Zijlstra } 51078eddac3fSPeter Zijlstra 51086419c4afSJ. R. Okajima static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip) 51096419c4afSJ. R. Okajima { 51106419c4afSJ. R. Okajima struct task_struct *curr = current; 51118c8889d8SImre Deak unsigned int depth, merged = 0; 51126419c4afSJ. R. Okajima struct held_lock *hlock; 51136419c4afSJ. R. Okajima int i; 51146419c4afSJ. R. Okajima 511571492580SWaiman Long if (unlikely(!debug_locks)) 511671492580SWaiman Long return 0; 511771492580SWaiman Long 51186419c4afSJ. R. Okajima depth = curr->lockdep_depth; 51196419c4afSJ. R. Okajima /* 51206419c4afSJ. R. Okajima * This function is about (re)setting the class of a held lock, 51216419c4afSJ. R. Okajima * yet we're not actually holding any locks. Naughty user! 51226419c4afSJ. R. Okajima */ 51236419c4afSJ. R. Okajima if (DEBUG_LOCKS_WARN_ON(!depth)) 51246419c4afSJ. R. Okajima return 0; 51256419c4afSJ. R. Okajima 51266419c4afSJ. R. Okajima hlock = find_held_lock(curr, lock, depth, &i); 5127f7c1c6b3SYuyang Du if (!hlock) { 5128f7c1c6b3SYuyang Du print_unlock_imbalance_bug(curr, lock, ip); 5129f7c1c6b3SYuyang Du return 0; 5130f7c1c6b3SYuyang Du } 51316419c4afSJ. R. Okajima 51326419c4afSJ. R. Okajima curr->lockdep_depth = i; 51336419c4afSJ. R. Okajima curr->curr_chain_key = hlock->prev_chain_key; 51346419c4afSJ. R. Okajima 51356419c4afSJ. R. Okajima WARN(hlock->read, "downgrading a read lock"); 51366419c4afSJ. R. Okajima hlock->read = 1; 51376419c4afSJ. R. Okajima hlock->acquire_ip = ip; 51386419c4afSJ. R. Okajima 51398c8889d8SImre Deak if (reacquire_held_locks(curr, depth, i, &merged)) 51408c8889d8SImre Deak return 0; 51418c8889d8SImre Deak 51428c8889d8SImre Deak /* Merging can't happen with unchanged classes.. */ 51438c8889d8SImre Deak if (DEBUG_LOCKS_WARN_ON(merged)) 51446419c4afSJ. R. Okajima return 0; 51456419c4afSJ. R. Okajima 51466419c4afSJ. R. Okajima /* 51476419c4afSJ. R. Okajima * I took it apart and put it back together again, except now I have 51486419c4afSJ. R. Okajima * these 'spare' parts.. where shall I put them. 51496419c4afSJ. R. Okajima */ 51506419c4afSJ. R. Okajima if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) 51516419c4afSJ. R. Okajima return 0; 51528c8889d8SImre Deak 51536419c4afSJ. R. Okajima return 1; 51546419c4afSJ. R. Okajima } 51556419c4afSJ. R. Okajima 51568eddac3fSPeter Zijlstra /* 5157c759bc47SDan Carpenter * Remove the lock from the list of currently held locks - this gets 5158e0f56fd7SPeter Zijlstra * called on mutex_unlock()/spin_unlock*() (or on a failed 5159e0f56fd7SPeter Zijlstra * mutex_lock_interruptible()). 51608eddac3fSPeter Zijlstra */ 51618eddac3fSPeter Zijlstra static int 5162b4adfe8eSYuyang Du __lock_release(struct lockdep_map *lock, unsigned long ip) 51638eddac3fSPeter Zijlstra { 5164e0f56fd7SPeter Zijlstra struct task_struct *curr = current; 51658c8889d8SImre Deak unsigned int depth, merged = 1; 516641c2c5b8SJ. R. Okajima struct held_lock *hlock; 5167e966eaeeSIngo Molnar int i; 51688eddac3fSPeter Zijlstra 5169e0f56fd7SPeter Zijlstra if (unlikely(!debug_locks)) 5170e0f56fd7SPeter Zijlstra return 0; 5171e0f56fd7SPeter Zijlstra 51728eddac3fSPeter Zijlstra depth = curr->lockdep_depth; 51738eddac3fSPeter Zijlstra /* 51748eddac3fSPeter Zijlstra * So we're all set to release this lock.. wait what lock? We don't 51758eddac3fSPeter Zijlstra * own any locks, you've been drinking again? 51768eddac3fSPeter Zijlstra */ 5177dd471efeSKobe Wu if (depth <= 0) { 5178f7c1c6b3SYuyang Du print_unlock_imbalance_bug(curr, lock, ip); 5179f7c1c6b3SYuyang Du return 0; 5180f7c1c6b3SYuyang Du } 51818eddac3fSPeter Zijlstra 5182e0f56fd7SPeter Zijlstra /* 5183e0f56fd7SPeter Zijlstra * Check whether the lock exists in the current stack 5184e0f56fd7SPeter Zijlstra * of held locks: 5185e0f56fd7SPeter Zijlstra */ 518641c2c5b8SJ. R. Okajima hlock = find_held_lock(curr, lock, depth, &i); 5187f7c1c6b3SYuyang Du if (!hlock) { 5188f7c1c6b3SYuyang Du print_unlock_imbalance_bug(curr, lock, ip); 5189f7c1c6b3SYuyang Du return 0; 5190f7c1c6b3SYuyang Du } 51918eddac3fSPeter Zijlstra 51928eddac3fSPeter Zijlstra if (hlock->instance == lock) 51938eddac3fSPeter Zijlstra lock_release_holdtime(hlock); 51948eddac3fSPeter Zijlstra 5195a24fc60dSPeter Zijlstra WARN(hlock->pin_count, "releasing a pinned lock\n"); 5196a24fc60dSPeter Zijlstra 51978eddac3fSPeter Zijlstra if (hlock->references) { 51988eddac3fSPeter Zijlstra hlock->references--; 51998eddac3fSPeter Zijlstra if (hlock->references) { 52008eddac3fSPeter Zijlstra /* 52018eddac3fSPeter Zijlstra * We had, and after removing one, still have 52028eddac3fSPeter Zijlstra * references, the current lock stack is still 52038eddac3fSPeter Zijlstra * valid. We're done! 52048eddac3fSPeter Zijlstra */ 52058eddac3fSPeter Zijlstra return 1; 52068eddac3fSPeter Zijlstra } 52078eddac3fSPeter Zijlstra } 52088eddac3fSPeter Zijlstra 52098eddac3fSPeter Zijlstra /* 52108eddac3fSPeter Zijlstra * We have the right lock to unlock, 'hlock' points to it. 52118eddac3fSPeter Zijlstra * Now we remove it from the stack, and add back the other 52128eddac3fSPeter Zijlstra * entries (if any), recalculating the hash along the way: 52138eddac3fSPeter Zijlstra */ 52148eddac3fSPeter Zijlstra 52158eddac3fSPeter Zijlstra curr->lockdep_depth = i; 52168eddac3fSPeter Zijlstra curr->curr_chain_key = hlock->prev_chain_key; 52178eddac3fSPeter Zijlstra 5218ce52a18dSWaiman Long /* 5219ce52a18dSWaiman Long * The most likely case is when the unlock is on the innermost 5220ce52a18dSWaiman Long * lock. In this case, we are done! 5221ce52a18dSWaiman Long */ 5222ce52a18dSWaiman Long if (i == depth-1) 5223ce52a18dSWaiman Long return 1; 5224ce52a18dSWaiman Long 52258c8889d8SImre Deak if (reacquire_held_locks(curr, depth, i + 1, &merged)) 52268eddac3fSPeter Zijlstra return 0; 52278eddac3fSPeter Zijlstra 52288eddac3fSPeter Zijlstra /* 52298eddac3fSPeter Zijlstra * We had N bottles of beer on the wall, we drank one, but now 52308eddac3fSPeter Zijlstra * there's not N-1 bottles of beer left on the wall... 52318c8889d8SImre Deak * Pouring two of the bottles together is acceptable. 52328eddac3fSPeter Zijlstra */ 52338c8889d8SImre Deak DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged); 5234e0f56fd7SPeter Zijlstra 5235ce52a18dSWaiman Long /* 5236ce52a18dSWaiman Long * Since reacquire_held_locks() would have called check_chain_key() 5237ce52a18dSWaiman Long * indirectly via __lock_acquire(), we don't need to do it again 5238ce52a18dSWaiman Long * on return. 5239ce52a18dSWaiman Long */ 5240ce52a18dSWaiman Long return 0; 52418eddac3fSPeter Zijlstra } 52428eddac3fSPeter Zijlstra 5243c86e9b98SPeter Zijlstra static __always_inline 52442f43c602SMasami Hiramatsu int __lock_is_held(const struct lockdep_map *lock, int read) 52458eddac3fSPeter Zijlstra { 52468eddac3fSPeter Zijlstra struct task_struct *curr = current; 52478eddac3fSPeter Zijlstra int i; 52488eddac3fSPeter Zijlstra 52498eddac3fSPeter Zijlstra for (i = 0; i < curr->lockdep_depth; i++) { 52508eddac3fSPeter Zijlstra struct held_lock *hlock = curr->held_locks + i; 52518eddac3fSPeter Zijlstra 5252f8319483SPeter Zijlstra if (match_held_lock(hlock, lock)) { 5253f8319483SPeter Zijlstra if (read == -1 || hlock->read == read) 52548eddac3fSPeter Zijlstra return 1; 5255f8319483SPeter Zijlstra 5256f8319483SPeter Zijlstra return 0; 5257f8319483SPeter Zijlstra } 52588eddac3fSPeter Zijlstra } 52598eddac3fSPeter Zijlstra 52608eddac3fSPeter Zijlstra return 0; 52618eddac3fSPeter Zijlstra } 52628eddac3fSPeter Zijlstra 5263e7904a28SPeter Zijlstra static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock) 5264e7904a28SPeter Zijlstra { 5265e7904a28SPeter Zijlstra struct pin_cookie cookie = NIL_COOKIE; 5266e7904a28SPeter Zijlstra struct task_struct *curr = current; 5267e7904a28SPeter Zijlstra int i; 5268e7904a28SPeter Zijlstra 5269e7904a28SPeter Zijlstra if (unlikely(!debug_locks)) 5270e7904a28SPeter Zijlstra return cookie; 5271e7904a28SPeter Zijlstra 5272e7904a28SPeter Zijlstra for (i = 0; i < curr->lockdep_depth; i++) { 5273e7904a28SPeter Zijlstra struct held_lock *hlock = curr->held_locks + i; 5274e7904a28SPeter Zijlstra 5275e7904a28SPeter Zijlstra if (match_held_lock(hlock, lock)) { 5276e7904a28SPeter Zijlstra /* 5277e7904a28SPeter Zijlstra * Grab 16bits of randomness; this is sufficient to not 5278e7904a28SPeter Zijlstra * be guessable and still allows some pin nesting in 5279e7904a28SPeter Zijlstra * our u32 pin_count. 5280e7904a28SPeter Zijlstra */ 5281e7904a28SPeter Zijlstra cookie.val = 1 + (prandom_u32() >> 16); 5282e7904a28SPeter Zijlstra hlock->pin_count += cookie.val; 5283e7904a28SPeter Zijlstra return cookie; 5284e7904a28SPeter Zijlstra } 5285e7904a28SPeter Zijlstra } 5286e7904a28SPeter Zijlstra 5287e7904a28SPeter Zijlstra WARN(1, "pinning an unheld lock\n"); 5288e7904a28SPeter Zijlstra return cookie; 5289e7904a28SPeter Zijlstra } 5290e7904a28SPeter Zijlstra 5291e7904a28SPeter Zijlstra static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie) 5292a24fc60dSPeter Zijlstra { 5293a24fc60dSPeter Zijlstra struct task_struct *curr = current; 5294a24fc60dSPeter Zijlstra int i; 5295a24fc60dSPeter Zijlstra 5296a24fc60dSPeter Zijlstra if (unlikely(!debug_locks)) 5297a24fc60dSPeter Zijlstra return; 5298a24fc60dSPeter Zijlstra 5299a24fc60dSPeter Zijlstra for (i = 0; i < curr->lockdep_depth; i++) { 5300a24fc60dSPeter Zijlstra struct held_lock *hlock = curr->held_locks + i; 5301a24fc60dSPeter Zijlstra 5302a24fc60dSPeter Zijlstra if (match_held_lock(hlock, lock)) { 5303e7904a28SPeter Zijlstra hlock->pin_count += cookie.val; 5304a24fc60dSPeter Zijlstra return; 5305a24fc60dSPeter Zijlstra } 5306a24fc60dSPeter Zijlstra } 5307a24fc60dSPeter Zijlstra 5308a24fc60dSPeter Zijlstra WARN(1, "pinning an unheld lock\n"); 5309a24fc60dSPeter Zijlstra } 5310a24fc60dSPeter Zijlstra 5311e7904a28SPeter Zijlstra static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie) 5312a24fc60dSPeter Zijlstra { 5313a24fc60dSPeter Zijlstra struct task_struct *curr = current; 5314a24fc60dSPeter Zijlstra int i; 5315a24fc60dSPeter Zijlstra 5316a24fc60dSPeter Zijlstra if (unlikely(!debug_locks)) 5317a24fc60dSPeter Zijlstra return; 5318a24fc60dSPeter Zijlstra 5319a24fc60dSPeter Zijlstra for (i = 0; i < curr->lockdep_depth; i++) { 5320a24fc60dSPeter Zijlstra struct held_lock *hlock = curr->held_locks + i; 5321a24fc60dSPeter Zijlstra 5322a24fc60dSPeter Zijlstra if (match_held_lock(hlock, lock)) { 5323a24fc60dSPeter Zijlstra if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n")) 5324a24fc60dSPeter Zijlstra return; 5325a24fc60dSPeter Zijlstra 5326e7904a28SPeter Zijlstra hlock->pin_count -= cookie.val; 5327e7904a28SPeter Zijlstra 5328e7904a28SPeter Zijlstra if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n")) 5329e7904a28SPeter Zijlstra hlock->pin_count = 0; 5330e7904a28SPeter Zijlstra 5331a24fc60dSPeter Zijlstra return; 5332a24fc60dSPeter Zijlstra } 5333a24fc60dSPeter Zijlstra } 5334a24fc60dSPeter Zijlstra 5335a24fc60dSPeter Zijlstra WARN(1, "unpinning an unheld lock\n"); 5336a24fc60dSPeter Zijlstra } 5337a24fc60dSPeter Zijlstra 53388eddac3fSPeter Zijlstra /* 53398eddac3fSPeter Zijlstra * Check whether we follow the irq-flags state precisely: 53408eddac3fSPeter Zijlstra */ 53418eddac3fSPeter Zijlstra static void check_flags(unsigned long flags) 53428eddac3fSPeter Zijlstra { 534330a35f79SArnd Bergmann #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) 53448eddac3fSPeter Zijlstra if (!debug_locks) 53458eddac3fSPeter Zijlstra return; 53468eddac3fSPeter Zijlstra 53478eddac3fSPeter Zijlstra if (irqs_disabled_flags(flags)) { 5348f9ad4a5fSPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) { 53498eddac3fSPeter Zijlstra printk("possible reason: unannotated irqs-off.\n"); 53508eddac3fSPeter Zijlstra } 53518eddac3fSPeter Zijlstra } else { 5352f9ad4a5fSPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled())) { 53538eddac3fSPeter Zijlstra printk("possible reason: unannotated irqs-on.\n"); 53548eddac3fSPeter Zijlstra } 53558eddac3fSPeter Zijlstra } 53568eddac3fSPeter Zijlstra 53578eddac3fSPeter Zijlstra /* 53588eddac3fSPeter Zijlstra * We dont accurately track softirq state in e.g. 53598eddac3fSPeter Zijlstra * hardirq contexts (such as on 4KSTACKS), so only 53608eddac3fSPeter Zijlstra * check if not in hardirq contexts: 53618eddac3fSPeter Zijlstra */ 53628eddac3fSPeter Zijlstra if (!hardirq_count()) { 53638eddac3fSPeter Zijlstra if (softirq_count()) { 53648eddac3fSPeter Zijlstra /* like the above, but with softirqs */ 53658eddac3fSPeter Zijlstra DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); 53668eddac3fSPeter Zijlstra } else { 53678eddac3fSPeter Zijlstra /* lick the above, does it taste good? */ 53688eddac3fSPeter Zijlstra DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); 53698eddac3fSPeter Zijlstra } 53708eddac3fSPeter Zijlstra } 53718eddac3fSPeter Zijlstra 53728eddac3fSPeter Zijlstra if (!debug_locks) 53738eddac3fSPeter Zijlstra print_irqtrace_events(current); 53748eddac3fSPeter Zijlstra #endif 53758eddac3fSPeter Zijlstra } 53768eddac3fSPeter Zijlstra 53778eddac3fSPeter Zijlstra void lock_set_class(struct lockdep_map *lock, const char *name, 53788eddac3fSPeter Zijlstra struct lock_class_key *key, unsigned int subclass, 53798eddac3fSPeter Zijlstra unsigned long ip) 53808eddac3fSPeter Zijlstra { 53818eddac3fSPeter Zijlstra unsigned long flags; 53828eddac3fSPeter Zijlstra 53834d004099SPeter Zijlstra if (unlikely(!lockdep_enabled())) 53848eddac3fSPeter Zijlstra return; 53858eddac3fSPeter Zijlstra 53868eddac3fSPeter Zijlstra raw_local_irq_save(flags); 53874d004099SPeter Zijlstra lockdep_recursion_inc(); 53888eddac3fSPeter Zijlstra check_flags(flags); 53898eddac3fSPeter Zijlstra if (__lock_set_class(lock, name, key, subclass, ip)) 53908eddac3fSPeter Zijlstra check_chain_key(current); 539110476e63SPeter Zijlstra lockdep_recursion_finish(); 53928eddac3fSPeter Zijlstra raw_local_irq_restore(flags); 53938eddac3fSPeter Zijlstra } 53948eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(lock_set_class); 53958eddac3fSPeter Zijlstra 53966419c4afSJ. R. Okajima void lock_downgrade(struct lockdep_map *lock, unsigned long ip) 53976419c4afSJ. R. Okajima { 53986419c4afSJ. R. Okajima unsigned long flags; 53996419c4afSJ. R. Okajima 54004d004099SPeter Zijlstra if (unlikely(!lockdep_enabled())) 54016419c4afSJ. R. Okajima return; 54026419c4afSJ. R. Okajima 54036419c4afSJ. R. Okajima raw_local_irq_save(flags); 54044d004099SPeter Zijlstra lockdep_recursion_inc(); 54056419c4afSJ. R. Okajima check_flags(flags); 54066419c4afSJ. R. Okajima if (__lock_downgrade(lock, ip)) 54076419c4afSJ. R. Okajima check_chain_key(current); 540810476e63SPeter Zijlstra lockdep_recursion_finish(); 54096419c4afSJ. R. Okajima raw_local_irq_restore(flags); 54106419c4afSJ. R. Okajima } 54116419c4afSJ. R. Okajima EXPORT_SYMBOL_GPL(lock_downgrade); 54126419c4afSJ. R. Okajima 5413f6f48e18SPeter Zijlstra /* NMI context !!! */ 5414f6f48e18SPeter Zijlstra static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock, int subclass) 5415f6f48e18SPeter Zijlstra { 5416f6f48e18SPeter Zijlstra #ifdef CONFIG_PROVE_LOCKING 5417f6f48e18SPeter Zijlstra struct lock_class *class = look_up_lock_class(lock, subclass); 541823870f12Speterz@infradead.org unsigned long mask = LOCKF_USED; 5419f6f48e18SPeter Zijlstra 5420f6f48e18SPeter Zijlstra /* if it doesn't have a class (yet), it certainly hasn't been used yet */ 5421f6f48e18SPeter Zijlstra if (!class) 5422f6f48e18SPeter Zijlstra return; 5423f6f48e18SPeter Zijlstra 542423870f12Speterz@infradead.org /* 542523870f12Speterz@infradead.org * READ locks only conflict with USED, such that if we only ever use 542623870f12Speterz@infradead.org * READ locks, there is no deadlock possible -- RCU. 542723870f12Speterz@infradead.org */ 542823870f12Speterz@infradead.org if (!hlock->read) 542923870f12Speterz@infradead.org mask |= LOCKF_USED_READ; 543023870f12Speterz@infradead.org 543123870f12Speterz@infradead.org if (!(class->usage_mask & mask)) 5432f6f48e18SPeter Zijlstra return; 5433f6f48e18SPeter Zijlstra 5434f6f48e18SPeter Zijlstra hlock->class_idx = class - lock_classes; 5435f6f48e18SPeter Zijlstra 5436f6f48e18SPeter Zijlstra print_usage_bug(current, hlock, LOCK_USED, LOCK_USAGE_STATES); 5437f6f48e18SPeter Zijlstra #endif 5438f6f48e18SPeter Zijlstra } 5439f6f48e18SPeter Zijlstra 5440f6f48e18SPeter Zijlstra static bool lockdep_nmi(void) 5441f6f48e18SPeter Zijlstra { 54424d004099SPeter Zijlstra if (raw_cpu_read(lockdep_recursion)) 5443f6f48e18SPeter Zijlstra return false; 5444f6f48e18SPeter Zijlstra 5445f6f48e18SPeter Zijlstra if (!in_nmi()) 5446f6f48e18SPeter Zijlstra return false; 5447f6f48e18SPeter Zijlstra 5448f6f48e18SPeter Zijlstra return true; 5449f6f48e18SPeter Zijlstra } 5450f6f48e18SPeter Zijlstra 54518eddac3fSPeter Zijlstra /* 5452e9181886SBoqun Feng * read_lock() is recursive if: 5453e9181886SBoqun Feng * 1. We force lockdep think this way in selftests or 5454e9181886SBoqun Feng * 2. The implementation is not queued read/write lock or 5455e9181886SBoqun Feng * 3. The locker is at an in_interrupt() context. 5456e9181886SBoqun Feng */ 5457e9181886SBoqun Feng bool read_lock_is_recursive(void) 5458e9181886SBoqun Feng { 5459e9181886SBoqun Feng return force_read_lock_recursive || 5460e9181886SBoqun Feng !IS_ENABLED(CONFIG_QUEUED_RWLOCKS) || 5461e9181886SBoqun Feng in_interrupt(); 5462e9181886SBoqun Feng } 5463e9181886SBoqun Feng EXPORT_SYMBOL_GPL(read_lock_is_recursive); 5464e9181886SBoqun Feng 5465e9181886SBoqun Feng /* 54668eddac3fSPeter Zijlstra * We are not always called with irqs disabled - do that here, 54678eddac3fSPeter Zijlstra * and also avoid lockdep recursion: 54688eddac3fSPeter Zijlstra */ 54698eddac3fSPeter Zijlstra void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 54708eddac3fSPeter Zijlstra int trylock, int read, int check, 54718eddac3fSPeter Zijlstra struct lockdep_map *nest_lock, unsigned long ip) 54728eddac3fSPeter Zijlstra { 54738eddac3fSPeter Zijlstra unsigned long flags; 54748eddac3fSPeter Zijlstra 5475eb1f0023SPeter Zijlstra trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); 5476eb1f0023SPeter Zijlstra 54774d004099SPeter Zijlstra if (!debug_locks) 54784d004099SPeter Zijlstra return; 54794d004099SPeter Zijlstra 54804d004099SPeter Zijlstra if (unlikely(!lockdep_enabled())) { 5481f6f48e18SPeter Zijlstra /* XXX allow trylock from NMI ?!? */ 5482f6f48e18SPeter Zijlstra if (lockdep_nmi() && !trylock) { 5483f6f48e18SPeter Zijlstra struct held_lock hlock; 5484f6f48e18SPeter Zijlstra 5485f6f48e18SPeter Zijlstra hlock.acquire_ip = ip; 5486f6f48e18SPeter Zijlstra hlock.instance = lock; 5487f6f48e18SPeter Zijlstra hlock.nest_lock = nest_lock; 5488f6f48e18SPeter Zijlstra hlock.irq_context = 2; // XXX 5489f6f48e18SPeter Zijlstra hlock.trylock = trylock; 5490f6f48e18SPeter Zijlstra hlock.read = read; 5491f6f48e18SPeter Zijlstra hlock.check = check; 5492f6f48e18SPeter Zijlstra hlock.hardirqs_off = true; 5493f6f48e18SPeter Zijlstra hlock.references = 0; 5494f6f48e18SPeter Zijlstra 5495f6f48e18SPeter Zijlstra verify_lock_unused(lock, &hlock, subclass); 5496f6f48e18SPeter Zijlstra } 54978eddac3fSPeter Zijlstra return; 5498f6f48e18SPeter Zijlstra } 54998eddac3fSPeter Zijlstra 55008eddac3fSPeter Zijlstra raw_local_irq_save(flags); 55018eddac3fSPeter Zijlstra check_flags(flags); 55028eddac3fSPeter Zijlstra 55034d004099SPeter Zijlstra lockdep_recursion_inc(); 55048eddac3fSPeter Zijlstra __lock_acquire(lock, subclass, trylock, read, check, 550521199f27SPeter Zijlstra irqs_disabled_flags(flags), nest_lock, ip, 0, 0); 550610476e63SPeter Zijlstra lockdep_recursion_finish(); 55078eddac3fSPeter Zijlstra raw_local_irq_restore(flags); 55088eddac3fSPeter Zijlstra } 55098eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(lock_acquire); 55108eddac3fSPeter Zijlstra 55115facae4fSQian Cai void lock_release(struct lockdep_map *lock, unsigned long ip) 55128eddac3fSPeter Zijlstra { 55138eddac3fSPeter Zijlstra unsigned long flags; 55148eddac3fSPeter Zijlstra 5515eb1f0023SPeter Zijlstra trace_lock_release(lock, ip); 5516eb1f0023SPeter Zijlstra 55174d004099SPeter Zijlstra if (unlikely(!lockdep_enabled())) 55188eddac3fSPeter Zijlstra return; 55198eddac3fSPeter Zijlstra 55208eddac3fSPeter Zijlstra raw_local_irq_save(flags); 55218eddac3fSPeter Zijlstra check_flags(flags); 5522eb1f0023SPeter Zijlstra 55234d004099SPeter Zijlstra lockdep_recursion_inc(); 5524b4adfe8eSYuyang Du if (__lock_release(lock, ip)) 5525e0f56fd7SPeter Zijlstra check_chain_key(current); 552610476e63SPeter Zijlstra lockdep_recursion_finish(); 55278eddac3fSPeter Zijlstra raw_local_irq_restore(flags); 55288eddac3fSPeter Zijlstra } 55298eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(lock_release); 55308eddac3fSPeter Zijlstra 5531c86e9b98SPeter Zijlstra noinstr int lock_is_held_type(const struct lockdep_map *lock, int read) 55328eddac3fSPeter Zijlstra { 55338eddac3fSPeter Zijlstra unsigned long flags; 55348eddac3fSPeter Zijlstra int ret = 0; 55358eddac3fSPeter Zijlstra 55364d004099SPeter Zijlstra if (unlikely(!lockdep_enabled())) 55378eddac3fSPeter Zijlstra return 1; /* avoid false negative lockdep_assert_held() */ 55388eddac3fSPeter Zijlstra 55398eddac3fSPeter Zijlstra raw_local_irq_save(flags); 55408eddac3fSPeter Zijlstra check_flags(flags); 55418eddac3fSPeter Zijlstra 55424d004099SPeter Zijlstra lockdep_recursion_inc(); 5543f8319483SPeter Zijlstra ret = __lock_is_held(lock, read); 554410476e63SPeter Zijlstra lockdep_recursion_finish(); 55458eddac3fSPeter Zijlstra raw_local_irq_restore(flags); 55468eddac3fSPeter Zijlstra 55478eddac3fSPeter Zijlstra return ret; 55488eddac3fSPeter Zijlstra } 5549f8319483SPeter Zijlstra EXPORT_SYMBOL_GPL(lock_is_held_type); 55502f43c602SMasami Hiramatsu NOKPROBE_SYMBOL(lock_is_held_type); 55518eddac3fSPeter Zijlstra 5552e7904a28SPeter Zijlstra struct pin_cookie lock_pin_lock(struct lockdep_map *lock) 5553a24fc60dSPeter Zijlstra { 5554e7904a28SPeter Zijlstra struct pin_cookie cookie = NIL_COOKIE; 5555a24fc60dSPeter Zijlstra unsigned long flags; 5556a24fc60dSPeter Zijlstra 55574d004099SPeter Zijlstra if (unlikely(!lockdep_enabled())) 5558e7904a28SPeter Zijlstra return cookie; 5559a24fc60dSPeter Zijlstra 5560a24fc60dSPeter Zijlstra raw_local_irq_save(flags); 5561a24fc60dSPeter Zijlstra check_flags(flags); 5562a24fc60dSPeter Zijlstra 55634d004099SPeter Zijlstra lockdep_recursion_inc(); 5564e7904a28SPeter Zijlstra cookie = __lock_pin_lock(lock); 556510476e63SPeter Zijlstra lockdep_recursion_finish(); 5566a24fc60dSPeter Zijlstra raw_local_irq_restore(flags); 5567e7904a28SPeter Zijlstra 5568e7904a28SPeter Zijlstra return cookie; 5569a24fc60dSPeter Zijlstra } 5570a24fc60dSPeter Zijlstra EXPORT_SYMBOL_GPL(lock_pin_lock); 5571a24fc60dSPeter Zijlstra 5572e7904a28SPeter Zijlstra void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie) 5573a24fc60dSPeter Zijlstra { 5574a24fc60dSPeter Zijlstra unsigned long flags; 5575a24fc60dSPeter Zijlstra 55764d004099SPeter Zijlstra if (unlikely(!lockdep_enabled())) 5577a24fc60dSPeter Zijlstra return; 5578a24fc60dSPeter Zijlstra 5579a24fc60dSPeter Zijlstra raw_local_irq_save(flags); 5580a24fc60dSPeter Zijlstra check_flags(flags); 5581a24fc60dSPeter Zijlstra 55824d004099SPeter Zijlstra lockdep_recursion_inc(); 5583e7904a28SPeter Zijlstra __lock_repin_lock(lock, cookie); 558410476e63SPeter Zijlstra lockdep_recursion_finish(); 5585e7904a28SPeter Zijlstra raw_local_irq_restore(flags); 5586e7904a28SPeter Zijlstra } 5587e7904a28SPeter Zijlstra EXPORT_SYMBOL_GPL(lock_repin_lock); 5588e7904a28SPeter Zijlstra 5589e7904a28SPeter Zijlstra void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie) 5590e7904a28SPeter Zijlstra { 5591e7904a28SPeter Zijlstra unsigned long flags; 5592e7904a28SPeter Zijlstra 55934d004099SPeter Zijlstra if (unlikely(!lockdep_enabled())) 5594e7904a28SPeter Zijlstra return; 5595e7904a28SPeter Zijlstra 5596e7904a28SPeter Zijlstra raw_local_irq_save(flags); 5597e7904a28SPeter Zijlstra check_flags(flags); 5598e7904a28SPeter Zijlstra 55994d004099SPeter Zijlstra lockdep_recursion_inc(); 5600e7904a28SPeter Zijlstra __lock_unpin_lock(lock, cookie); 560110476e63SPeter Zijlstra lockdep_recursion_finish(); 5602a24fc60dSPeter Zijlstra raw_local_irq_restore(flags); 5603a24fc60dSPeter Zijlstra } 5604a24fc60dSPeter Zijlstra EXPORT_SYMBOL_GPL(lock_unpin_lock); 5605a24fc60dSPeter Zijlstra 56068eddac3fSPeter Zijlstra #ifdef CONFIG_LOCK_STAT 5607f7c1c6b3SYuyang Du static void print_lock_contention_bug(struct task_struct *curr, 5608f7c1c6b3SYuyang Du struct lockdep_map *lock, 56098eddac3fSPeter Zijlstra unsigned long ip) 56108eddac3fSPeter Zijlstra { 56118eddac3fSPeter Zijlstra if (!debug_locks_off()) 5612f7c1c6b3SYuyang Du return; 56138eddac3fSPeter Zijlstra if (debug_locks_silent) 5614f7c1c6b3SYuyang Du return; 56158eddac3fSPeter Zijlstra 5616681fbec8SPaul E. McKenney pr_warn("\n"); 5617a5dd63efSPaul E. McKenney pr_warn("=================================\n"); 5618a5dd63efSPaul E. McKenney pr_warn("WARNING: bad contention detected!\n"); 56198eddac3fSPeter Zijlstra print_kernel_ident(); 5620a5dd63efSPaul E. McKenney pr_warn("---------------------------------\n"); 5621681fbec8SPaul E. McKenney pr_warn("%s/%d is trying to contend lock (", 56228eddac3fSPeter Zijlstra curr->comm, task_pid_nr(curr)); 56238eddac3fSPeter Zijlstra print_lockdep_cache(lock); 5624681fbec8SPaul E. McKenney pr_cont(") at:\n"); 56252062a4e8SDmitry Safonov print_ip_sym(KERN_WARNING, ip); 5626681fbec8SPaul E. McKenney pr_warn("but there are no locks held!\n"); 5627681fbec8SPaul E. McKenney pr_warn("\nother info that might help us debug this:\n"); 56288eddac3fSPeter Zijlstra lockdep_print_held_locks(curr); 56298eddac3fSPeter Zijlstra 5630681fbec8SPaul E. McKenney pr_warn("\nstack backtrace:\n"); 56318eddac3fSPeter Zijlstra dump_stack(); 56328eddac3fSPeter Zijlstra } 56338eddac3fSPeter Zijlstra 56348eddac3fSPeter Zijlstra static void 56358eddac3fSPeter Zijlstra __lock_contended(struct lockdep_map *lock, unsigned long ip) 56368eddac3fSPeter Zijlstra { 56378eddac3fSPeter Zijlstra struct task_struct *curr = current; 563841c2c5b8SJ. R. Okajima struct held_lock *hlock; 56398eddac3fSPeter Zijlstra struct lock_class_stats *stats; 56408eddac3fSPeter Zijlstra unsigned int depth; 56418eddac3fSPeter Zijlstra int i, contention_point, contending_point; 56428eddac3fSPeter Zijlstra 56438eddac3fSPeter Zijlstra depth = curr->lockdep_depth; 56448eddac3fSPeter Zijlstra /* 56458eddac3fSPeter Zijlstra * Whee, we contended on this lock, except it seems we're not 56468eddac3fSPeter Zijlstra * actually trying to acquire anything much at all.. 56478eddac3fSPeter Zijlstra */ 56488eddac3fSPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(!depth)) 56498eddac3fSPeter Zijlstra return; 56508eddac3fSPeter Zijlstra 565141c2c5b8SJ. R. Okajima hlock = find_held_lock(curr, lock, depth, &i); 565241c2c5b8SJ. R. Okajima if (!hlock) { 56538eddac3fSPeter Zijlstra print_lock_contention_bug(curr, lock, ip); 56548eddac3fSPeter Zijlstra return; 565541c2c5b8SJ. R. Okajima } 56568eddac3fSPeter Zijlstra 56578eddac3fSPeter Zijlstra if (hlock->instance != lock) 56588eddac3fSPeter Zijlstra return; 56598eddac3fSPeter Zijlstra 56608eddac3fSPeter Zijlstra hlock->waittime_stamp = lockstat_clock(); 56618eddac3fSPeter Zijlstra 56628eddac3fSPeter Zijlstra contention_point = lock_point(hlock_class(hlock)->contention_point, ip); 56638eddac3fSPeter Zijlstra contending_point = lock_point(hlock_class(hlock)->contending_point, 56648eddac3fSPeter Zijlstra lock->ip); 56658eddac3fSPeter Zijlstra 56668eddac3fSPeter Zijlstra stats = get_lock_stats(hlock_class(hlock)); 56678eddac3fSPeter Zijlstra if (contention_point < LOCKSTAT_POINTS) 56688eddac3fSPeter Zijlstra stats->contention_point[contention_point]++; 56698eddac3fSPeter Zijlstra if (contending_point < LOCKSTAT_POINTS) 56708eddac3fSPeter Zijlstra stats->contending_point[contending_point]++; 56718eddac3fSPeter Zijlstra if (lock->cpu != smp_processor_id()) 56728eddac3fSPeter Zijlstra stats->bounces[bounce_contended + !!hlock->read]++; 56738eddac3fSPeter Zijlstra } 56748eddac3fSPeter Zijlstra 56758eddac3fSPeter Zijlstra static void 56768eddac3fSPeter Zijlstra __lock_acquired(struct lockdep_map *lock, unsigned long ip) 56778eddac3fSPeter Zijlstra { 56788eddac3fSPeter Zijlstra struct task_struct *curr = current; 567941c2c5b8SJ. R. Okajima struct held_lock *hlock; 56808eddac3fSPeter Zijlstra struct lock_class_stats *stats; 56818eddac3fSPeter Zijlstra unsigned int depth; 56828eddac3fSPeter Zijlstra u64 now, waittime = 0; 56838eddac3fSPeter Zijlstra int i, cpu; 56848eddac3fSPeter Zijlstra 56858eddac3fSPeter Zijlstra depth = curr->lockdep_depth; 56868eddac3fSPeter Zijlstra /* 56878eddac3fSPeter Zijlstra * Yay, we acquired ownership of this lock we didn't try to 56888eddac3fSPeter Zijlstra * acquire, how the heck did that happen? 56898eddac3fSPeter Zijlstra */ 56908eddac3fSPeter Zijlstra if (DEBUG_LOCKS_WARN_ON(!depth)) 56918eddac3fSPeter Zijlstra return; 56928eddac3fSPeter Zijlstra 569341c2c5b8SJ. R. Okajima hlock = find_held_lock(curr, lock, depth, &i); 569441c2c5b8SJ. R. Okajima if (!hlock) { 56958eddac3fSPeter Zijlstra print_lock_contention_bug(curr, lock, _RET_IP_); 56968eddac3fSPeter Zijlstra return; 569741c2c5b8SJ. R. Okajima } 56988eddac3fSPeter Zijlstra 56998eddac3fSPeter Zijlstra if (hlock->instance != lock) 57008eddac3fSPeter Zijlstra return; 57018eddac3fSPeter Zijlstra 57028eddac3fSPeter Zijlstra cpu = smp_processor_id(); 57038eddac3fSPeter Zijlstra if (hlock->waittime_stamp) { 57048eddac3fSPeter Zijlstra now = lockstat_clock(); 57058eddac3fSPeter Zijlstra waittime = now - hlock->waittime_stamp; 57068eddac3fSPeter Zijlstra hlock->holdtime_stamp = now; 57078eddac3fSPeter Zijlstra } 57088eddac3fSPeter Zijlstra 57098eddac3fSPeter Zijlstra stats = get_lock_stats(hlock_class(hlock)); 57108eddac3fSPeter Zijlstra if (waittime) { 57118eddac3fSPeter Zijlstra if (hlock->read) 57128eddac3fSPeter Zijlstra lock_time_inc(&stats->read_waittime, waittime); 57138eddac3fSPeter Zijlstra else 57148eddac3fSPeter Zijlstra lock_time_inc(&stats->write_waittime, waittime); 57158eddac3fSPeter Zijlstra } 57168eddac3fSPeter Zijlstra if (lock->cpu != cpu) 57178eddac3fSPeter Zijlstra stats->bounces[bounce_acquired + !!hlock->read]++; 57188eddac3fSPeter Zijlstra 57198eddac3fSPeter Zijlstra lock->cpu = cpu; 57208eddac3fSPeter Zijlstra lock->ip = ip; 57218eddac3fSPeter Zijlstra } 57228eddac3fSPeter Zijlstra 57238eddac3fSPeter Zijlstra void lock_contended(struct lockdep_map *lock, unsigned long ip) 57248eddac3fSPeter Zijlstra { 57258eddac3fSPeter Zijlstra unsigned long flags; 57268eddac3fSPeter Zijlstra 5727eb1f0023SPeter Zijlstra trace_lock_acquired(lock, ip); 5728eb1f0023SPeter Zijlstra 57294d004099SPeter Zijlstra if (unlikely(!lock_stat || !lockdep_enabled())) 57308eddac3fSPeter Zijlstra return; 57318eddac3fSPeter Zijlstra 57328eddac3fSPeter Zijlstra raw_local_irq_save(flags); 57338eddac3fSPeter Zijlstra check_flags(flags); 57344d004099SPeter Zijlstra lockdep_recursion_inc(); 57358eddac3fSPeter Zijlstra __lock_contended(lock, ip); 573610476e63SPeter Zijlstra lockdep_recursion_finish(); 57378eddac3fSPeter Zijlstra raw_local_irq_restore(flags); 57388eddac3fSPeter Zijlstra } 57398eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(lock_contended); 57408eddac3fSPeter Zijlstra 57418eddac3fSPeter Zijlstra void lock_acquired(struct lockdep_map *lock, unsigned long ip) 57428eddac3fSPeter Zijlstra { 57438eddac3fSPeter Zijlstra unsigned long flags; 57448eddac3fSPeter Zijlstra 5745eb1f0023SPeter Zijlstra trace_lock_contended(lock, ip); 5746eb1f0023SPeter Zijlstra 57474d004099SPeter Zijlstra if (unlikely(!lock_stat || !lockdep_enabled())) 57488eddac3fSPeter Zijlstra return; 57498eddac3fSPeter Zijlstra 57508eddac3fSPeter Zijlstra raw_local_irq_save(flags); 57518eddac3fSPeter Zijlstra check_flags(flags); 57524d004099SPeter Zijlstra lockdep_recursion_inc(); 57538eddac3fSPeter Zijlstra __lock_acquired(lock, ip); 575410476e63SPeter Zijlstra lockdep_recursion_finish(); 57558eddac3fSPeter Zijlstra raw_local_irq_restore(flags); 57568eddac3fSPeter Zijlstra } 57578eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(lock_acquired); 57588eddac3fSPeter Zijlstra #endif 57598eddac3fSPeter Zijlstra 57608eddac3fSPeter Zijlstra /* 57618eddac3fSPeter Zijlstra * Used by the testsuite, sanitize the validator state 57628eddac3fSPeter Zijlstra * after a simulated failure: 57638eddac3fSPeter Zijlstra */ 57648eddac3fSPeter Zijlstra 57658eddac3fSPeter Zijlstra void lockdep_reset(void) 57668eddac3fSPeter Zijlstra { 57678eddac3fSPeter Zijlstra unsigned long flags; 57688eddac3fSPeter Zijlstra int i; 57698eddac3fSPeter Zijlstra 57708eddac3fSPeter Zijlstra raw_local_irq_save(flags); 5771e196e479SYuyang Du lockdep_init_task(current); 57728eddac3fSPeter Zijlstra memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); 57738eddac3fSPeter Zijlstra nr_hardirq_chains = 0; 57748eddac3fSPeter Zijlstra nr_softirq_chains = 0; 57758eddac3fSPeter Zijlstra nr_process_chains = 0; 57768eddac3fSPeter Zijlstra debug_locks = 1; 57778eddac3fSPeter Zijlstra for (i = 0; i < CHAINHASH_SIZE; i++) 5778a63f38ccSAndrew Morton INIT_HLIST_HEAD(chainhash_table + i); 57798eddac3fSPeter Zijlstra raw_local_irq_restore(flags); 57808eddac3fSPeter Zijlstra } 57818eddac3fSPeter Zijlstra 5782a0b0fd53SBart Van Assche /* Remove a class from a lock chain. Must be called with the graph lock held. */ 5783de4643a7SBart Van Assche static void remove_class_from_lock_chain(struct pending_free *pf, 5784de4643a7SBart Van Assche struct lock_chain *chain, 5785a0b0fd53SBart Van Assche struct lock_class *class) 5786a0b0fd53SBart Van Assche { 5787a0b0fd53SBart Van Assche #ifdef CONFIG_PROVE_LOCKING 5788a0b0fd53SBart Van Assche int i; 5789a0b0fd53SBart Van Assche 5790a0b0fd53SBart Van Assche for (i = chain->base; i < chain->base + chain->depth; i++) { 5791f611e8cfSBoqun Feng if (chain_hlock_class_idx(chain_hlocks[i]) != class - lock_classes) 5792a0b0fd53SBart Van Assche continue; 5793a0b0fd53SBart Van Assche /* 5794a0b0fd53SBart Van Assche * Each lock class occurs at most once in a lock chain so once 5795a0b0fd53SBart Van Assche * we found a match we can break out of this loop. 5796a0b0fd53SBart Van Assche */ 5797836bd74bSWaiman Long goto free_lock_chain; 5798a0b0fd53SBart Van Assche } 5799a0b0fd53SBart Van Assche /* Since the chain has not been modified, return. */ 5800a0b0fd53SBart Van Assche return; 5801a0b0fd53SBart Van Assche 5802836bd74bSWaiman Long free_lock_chain: 5803810507feSWaiman Long free_chain_hlocks(chain->base, chain->depth); 5804a0b0fd53SBart Van Assche /* Overwrite the chain key for concurrent RCU readers. */ 5805836bd74bSWaiman Long WRITE_ONCE(chain->chain_key, INITIAL_CHAIN_KEY); 5806b3b9c187SWaiman Long dec_chains(chain->irq_context); 5807b3b9c187SWaiman Long 5808a0b0fd53SBart Van Assche /* 5809a0b0fd53SBart Van Assche * Note: calling hlist_del_rcu() from inside a 5810a0b0fd53SBart Van Assche * hlist_for_each_entry_rcu() loop is safe. 5811a0b0fd53SBart Van Assche */ 5812a0b0fd53SBart Van Assche hlist_del_rcu(&chain->entry); 5813de4643a7SBart Van Assche __set_bit(chain - lock_chains, pf->lock_chains_being_freed); 5814797b82ebSWaiman Long nr_zapped_lock_chains++; 5815a0b0fd53SBart Van Assche #endif 5816a0b0fd53SBart Van Assche } 5817a0b0fd53SBart Van Assche 5818a0b0fd53SBart Van Assche /* Must be called with the graph lock held. */ 5819de4643a7SBart Van Assche static void remove_class_from_lock_chains(struct pending_free *pf, 5820de4643a7SBart Van Assche struct lock_class *class) 5821a0b0fd53SBart Van Assche { 5822a0b0fd53SBart Van Assche struct lock_chain *chain; 5823a0b0fd53SBart Van Assche struct hlist_head *head; 5824a0b0fd53SBart Van Assche int i; 5825a0b0fd53SBart Van Assche 5826a0b0fd53SBart Van Assche for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) { 5827a0b0fd53SBart Van Assche head = chainhash_table + i; 5828a0b0fd53SBart Van Assche hlist_for_each_entry_rcu(chain, head, entry) { 5829de4643a7SBart Van Assche remove_class_from_lock_chain(pf, chain, class); 5830a0b0fd53SBart Van Assche } 5831a0b0fd53SBart Van Assche } 5832a0b0fd53SBart Van Assche } 5833a0b0fd53SBart Van Assche 5834786fa29eSBart Van Assche /* 5835786fa29eSBart Van Assche * Remove all references to a lock class. The caller must hold the graph lock. 5836786fa29eSBart Van Assche */ 5837a0b0fd53SBart Van Assche static void zap_class(struct pending_free *pf, struct lock_class *class) 58388eddac3fSPeter Zijlstra { 583986cffb80SBart Van Assche struct lock_list *entry; 58408eddac3fSPeter Zijlstra int i; 58418eddac3fSPeter Zijlstra 5842a0b0fd53SBart Van Assche WARN_ON_ONCE(!class->key); 5843a0b0fd53SBart Van Assche 58448eddac3fSPeter Zijlstra /* 58458eddac3fSPeter Zijlstra * Remove all dependencies this lock is 58468eddac3fSPeter Zijlstra * involved in: 58478eddac3fSPeter Zijlstra */ 5848ace35a7aSBart Van Assche for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) { 5849ace35a7aSBart Van Assche entry = list_entries + i; 585086cffb80SBart Van Assche if (entry->class != class && entry->links_to != class) 585186cffb80SBart Van Assche continue; 5852ace35a7aSBart Van Assche __clear_bit(i, list_entries_in_use); 5853ace35a7aSBart Van Assche nr_list_entries--; 585486cffb80SBart Van Assche list_del_rcu(&entry->entry); 58558eddac3fSPeter Zijlstra } 5856a0b0fd53SBart Van Assche if (list_empty(&class->locks_after) && 5857a0b0fd53SBart Van Assche list_empty(&class->locks_before)) { 5858a0b0fd53SBart Van Assche list_move_tail(&class->lock_entry, &pf->zapped); 5859a63f38ccSAndrew Morton hlist_del_rcu(&class->hash_entry); 5860a0b0fd53SBart Van Assche WRITE_ONCE(class->key, NULL); 5861a0b0fd53SBart Van Assche WRITE_ONCE(class->name, NULL); 5862a0b0fd53SBart Van Assche nr_lock_classes--; 586301bb6f0aSYuyang Du __clear_bit(class - lock_classes, lock_classes_in_use); 5864a0b0fd53SBart Van Assche } else { 5865a0b0fd53SBart Van Assche WARN_ONCE(true, "%s() failed for class %s\n", __func__, 5866a0b0fd53SBart Van Assche class->name); 5867a0b0fd53SBart Van Assche } 58688eddac3fSPeter Zijlstra 5869de4643a7SBart Van Assche remove_class_from_lock_chains(pf, class); 58701d44bcb4SWaiman Long nr_zapped_classes++; 5871a0b0fd53SBart Van Assche } 5872a0b0fd53SBart Van Assche 5873a0b0fd53SBart Van Assche static void reinit_class(struct lock_class *class) 5874a0b0fd53SBart Van Assche { 5875a0b0fd53SBart Van Assche void *const p = class; 5876a0b0fd53SBart Van Assche const unsigned int offset = offsetof(struct lock_class, key); 5877a0b0fd53SBart Van Assche 5878a0b0fd53SBart Van Assche WARN_ON_ONCE(!class->lock_entry.next); 5879a0b0fd53SBart Van Assche WARN_ON_ONCE(!list_empty(&class->locks_after)); 5880a0b0fd53SBart Van Assche WARN_ON_ONCE(!list_empty(&class->locks_before)); 5881a0b0fd53SBart Van Assche memset(p + offset, 0, sizeof(*class) - offset); 5882a0b0fd53SBart Van Assche WARN_ON_ONCE(!class->lock_entry.next); 5883a0b0fd53SBart Van Assche WARN_ON_ONCE(!list_empty(&class->locks_after)); 5884a0b0fd53SBart Van Assche WARN_ON_ONCE(!list_empty(&class->locks_before)); 58858eddac3fSPeter Zijlstra } 58868eddac3fSPeter Zijlstra 58878eddac3fSPeter Zijlstra static inline int within(const void *addr, void *start, unsigned long size) 58888eddac3fSPeter Zijlstra { 58898eddac3fSPeter Zijlstra return addr >= start && addr < start + size; 58908eddac3fSPeter Zijlstra } 58918eddac3fSPeter Zijlstra 5892a0b0fd53SBart Van Assche static bool inside_selftest(void) 5893a0b0fd53SBart Van Assche { 5894a0b0fd53SBart Van Assche return current == lockdep_selftest_task_struct; 5895a0b0fd53SBart Van Assche } 5896a0b0fd53SBart Van Assche 5897a0b0fd53SBart Van Assche /* The caller must hold the graph lock. */ 5898a0b0fd53SBart Van Assche static struct pending_free *get_pending_free(void) 5899a0b0fd53SBart Van Assche { 5900a0b0fd53SBart Van Assche return delayed_free.pf + delayed_free.index; 5901a0b0fd53SBart Van Assche } 5902a0b0fd53SBart Van Assche 5903a0b0fd53SBart Van Assche static void free_zapped_rcu(struct rcu_head *cb); 5904a0b0fd53SBart Van Assche 5905a0b0fd53SBart Van Assche /* 5906a0b0fd53SBart Van Assche * Schedule an RCU callback if no RCU callback is pending. Must be called with 5907a0b0fd53SBart Van Assche * the graph lock held. 5908a0b0fd53SBart Van Assche */ 5909a0b0fd53SBart Van Assche static void call_rcu_zapped(struct pending_free *pf) 5910a0b0fd53SBart Van Assche { 5911a0b0fd53SBart Van Assche WARN_ON_ONCE(inside_selftest()); 5912a0b0fd53SBart Van Assche 5913a0b0fd53SBart Van Assche if (list_empty(&pf->zapped)) 5914a0b0fd53SBart Van Assche return; 5915a0b0fd53SBart Van Assche 5916a0b0fd53SBart Van Assche if (delayed_free.scheduled) 5917a0b0fd53SBart Van Assche return; 5918a0b0fd53SBart Van Assche 5919a0b0fd53SBart Van Assche delayed_free.scheduled = true; 5920a0b0fd53SBart Van Assche 5921a0b0fd53SBart Van Assche WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf); 5922a0b0fd53SBart Van Assche delayed_free.index ^= 1; 5923a0b0fd53SBart Van Assche 5924a0b0fd53SBart Van Assche call_rcu(&delayed_free.rcu_head, free_zapped_rcu); 5925a0b0fd53SBart Van Assche } 5926a0b0fd53SBart Van Assche 5927a0b0fd53SBart Van Assche /* The caller must hold the graph lock. May be called from RCU context. */ 5928a0b0fd53SBart Van Assche static void __free_zapped_classes(struct pending_free *pf) 5929a0b0fd53SBart Van Assche { 5930a0b0fd53SBart Van Assche struct lock_class *class; 5931a0b0fd53SBart Van Assche 593272dcd505SPeter Zijlstra check_data_structures(); 5933b526b2e3SBart Van Assche 5934a0b0fd53SBart Van Assche list_for_each_entry(class, &pf->zapped, lock_entry) 5935a0b0fd53SBart Van Assche reinit_class(class); 5936a0b0fd53SBart Van Assche 5937a0b0fd53SBart Van Assche list_splice_init(&pf->zapped, &free_lock_classes); 5938de4643a7SBart Van Assche 5939de4643a7SBart Van Assche #ifdef CONFIG_PROVE_LOCKING 5940de4643a7SBart Van Assche bitmap_andnot(lock_chains_in_use, lock_chains_in_use, 5941de4643a7SBart Van Assche pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains)); 5942de4643a7SBart Van Assche bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains)); 5943de4643a7SBart Van Assche #endif 5944a0b0fd53SBart Van Assche } 5945a0b0fd53SBart Van Assche 5946a0b0fd53SBart Van Assche static void free_zapped_rcu(struct rcu_head *ch) 5947a0b0fd53SBart Van Assche { 5948a0b0fd53SBart Van Assche struct pending_free *pf; 5949a0b0fd53SBart Van Assche unsigned long flags; 5950a0b0fd53SBart Van Assche 5951a0b0fd53SBart Van Assche if (WARN_ON_ONCE(ch != &delayed_free.rcu_head)) 5952a0b0fd53SBart Van Assche return; 5953a0b0fd53SBart Van Assche 5954a0b0fd53SBart Van Assche raw_local_irq_save(flags); 5955248efb21SPeter Zijlstra lockdep_lock(); 5956a0b0fd53SBart Van Assche 5957a0b0fd53SBart Van Assche /* closed head */ 5958a0b0fd53SBart Van Assche pf = delayed_free.pf + (delayed_free.index ^ 1); 5959a0b0fd53SBart Van Assche __free_zapped_classes(pf); 5960a0b0fd53SBart Van Assche delayed_free.scheduled = false; 5961a0b0fd53SBart Van Assche 5962a0b0fd53SBart Van Assche /* 5963a0b0fd53SBart Van Assche * If there's anything on the open list, close and start a new callback. 5964a0b0fd53SBart Van Assche */ 5965a0b0fd53SBart Van Assche call_rcu_zapped(delayed_free.pf + delayed_free.index); 5966a0b0fd53SBart Van Assche 5967248efb21SPeter Zijlstra lockdep_unlock(); 5968a0b0fd53SBart Van Assche raw_local_irq_restore(flags); 5969a0b0fd53SBart Van Assche } 5970a0b0fd53SBart Van Assche 5971a0b0fd53SBart Van Assche /* 5972a0b0fd53SBart Van Assche * Remove all lock classes from the class hash table and from the 5973a0b0fd53SBart Van Assche * all_lock_classes list whose key or name is in the address range [start, 5974a0b0fd53SBart Van Assche * start + size). Move these lock classes to the zapped_classes list. Must 5975a0b0fd53SBart Van Assche * be called with the graph lock held. 5976a0b0fd53SBart Van Assche */ 5977a0b0fd53SBart Van Assche static void __lockdep_free_key_range(struct pending_free *pf, void *start, 5978a0b0fd53SBart Van Assche unsigned long size) 5979956f3563SBart Van Assche { 5980956f3563SBart Van Assche struct lock_class *class; 5981956f3563SBart Van Assche struct hlist_head *head; 5982956f3563SBart Van Assche int i; 5983956f3563SBart Van Assche 5984956f3563SBart Van Assche /* Unhash all classes that were created by a module. */ 5985956f3563SBart Van Assche for (i = 0; i < CLASSHASH_SIZE; i++) { 5986956f3563SBart Van Assche head = classhash_table + i; 5987956f3563SBart Van Assche hlist_for_each_entry_rcu(class, head, hash_entry) { 5988956f3563SBart Van Assche if (!within(class->key, start, size) && 5989956f3563SBart Van Assche !within(class->name, start, size)) 5990956f3563SBart Van Assche continue; 5991a0b0fd53SBart Van Assche zap_class(pf, class); 5992956f3563SBart Van Assche } 5993956f3563SBart Van Assche } 5994956f3563SBart Van Assche } 5995956f3563SBart Van Assche 599635a9393cSPeter Zijlstra /* 599735a9393cSPeter Zijlstra * Used in module.c to remove lock classes from memory that is going to be 599835a9393cSPeter Zijlstra * freed; and possibly re-used by other modules. 599935a9393cSPeter Zijlstra * 600029fc33fbSBart Van Assche * We will have had one synchronize_rcu() before getting here, so we're 600129fc33fbSBart Van Assche * guaranteed nobody will look up these exact classes -- they're properly dead 600229fc33fbSBart Van Assche * but still allocated. 600335a9393cSPeter Zijlstra */ 6004a0b0fd53SBart Van Assche static void lockdep_free_key_range_reg(void *start, unsigned long size) 60058eddac3fSPeter Zijlstra { 6006a0b0fd53SBart Van Assche struct pending_free *pf; 60078eddac3fSPeter Zijlstra unsigned long flags; 60088eddac3fSPeter Zijlstra 6009feb0a386SBart Van Assche init_data_structures_once(); 6010feb0a386SBart Van Assche 60118eddac3fSPeter Zijlstra raw_local_irq_save(flags); 6012248efb21SPeter Zijlstra lockdep_lock(); 6013a0b0fd53SBart Van Assche pf = get_pending_free(); 6014a0b0fd53SBart Van Assche __lockdep_free_key_range(pf, start, size); 6015a0b0fd53SBart Van Assche call_rcu_zapped(pf); 6016248efb21SPeter Zijlstra lockdep_unlock(); 60178eddac3fSPeter Zijlstra raw_local_irq_restore(flags); 601835a9393cSPeter Zijlstra 601935a9393cSPeter Zijlstra /* 602035a9393cSPeter Zijlstra * Wait for any possible iterators from look_up_lock_class() to pass 602135a9393cSPeter Zijlstra * before continuing to free the memory they refer to. 602235a9393cSPeter Zijlstra */ 602351959d85SPaul E. McKenney synchronize_rcu(); 6024a0b0fd53SBart Van Assche } 602535a9393cSPeter Zijlstra 602635a9393cSPeter Zijlstra /* 6027a0b0fd53SBart Van Assche * Free all lockdep keys in the range [start, start+size). Does not sleep. 6028a0b0fd53SBart Van Assche * Ignores debug_locks. Must only be used by the lockdep selftests. 602935a9393cSPeter Zijlstra */ 6030a0b0fd53SBart Van Assche static void lockdep_free_key_range_imm(void *start, unsigned long size) 6031a0b0fd53SBart Van Assche { 6032a0b0fd53SBart Van Assche struct pending_free *pf = delayed_free.pf; 6033a0b0fd53SBart Van Assche unsigned long flags; 6034a0b0fd53SBart Van Assche 6035a0b0fd53SBart Van Assche init_data_structures_once(); 6036a0b0fd53SBart Van Assche 6037a0b0fd53SBart Van Assche raw_local_irq_save(flags); 6038248efb21SPeter Zijlstra lockdep_lock(); 6039a0b0fd53SBart Van Assche __lockdep_free_key_range(pf, start, size); 6040a0b0fd53SBart Van Assche __free_zapped_classes(pf); 6041248efb21SPeter Zijlstra lockdep_unlock(); 6042a0b0fd53SBart Van Assche raw_local_irq_restore(flags); 6043a0b0fd53SBart Van Assche } 6044a0b0fd53SBart Van Assche 6045a0b0fd53SBart Van Assche void lockdep_free_key_range(void *start, unsigned long size) 6046a0b0fd53SBart Van Assche { 6047a0b0fd53SBart Van Assche init_data_structures_once(); 6048a0b0fd53SBart Van Assche 6049a0b0fd53SBart Van Assche if (inside_selftest()) 6050a0b0fd53SBart Van Assche lockdep_free_key_range_imm(start, size); 6051a0b0fd53SBart Van Assche else 6052a0b0fd53SBart Van Assche lockdep_free_key_range_reg(start, size); 60538eddac3fSPeter Zijlstra } 60548eddac3fSPeter Zijlstra 60552904d9faSBart Van Assche /* 60562904d9faSBart Van Assche * Check whether any element of the @lock->class_cache[] array refers to a 60572904d9faSBart Van Assche * registered lock class. The caller must hold either the graph lock or the 60582904d9faSBart Van Assche * RCU read lock. 60592904d9faSBart Van Assche */ 60602904d9faSBart Van Assche static bool lock_class_cache_is_registered(struct lockdep_map *lock) 60618eddac3fSPeter Zijlstra { 606235a9393cSPeter Zijlstra struct lock_class *class; 6063a63f38ccSAndrew Morton struct hlist_head *head; 60648eddac3fSPeter Zijlstra int i, j; 60652904d9faSBart Van Assche 60662904d9faSBart Van Assche for (i = 0; i < CLASSHASH_SIZE; i++) { 60672904d9faSBart Van Assche head = classhash_table + i; 60682904d9faSBart Van Assche hlist_for_each_entry_rcu(class, head, hash_entry) { 60692904d9faSBart Van Assche for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) 60702904d9faSBart Van Assche if (lock->class_cache[j] == class) 60712904d9faSBart Van Assche return true; 60722904d9faSBart Van Assche } 60732904d9faSBart Van Assche } 60742904d9faSBart Van Assche return false; 60752904d9faSBart Van Assche } 60762904d9faSBart Van Assche 6077956f3563SBart Van Assche /* The caller must hold the graph lock. Does not sleep. */ 6078a0b0fd53SBart Van Assche static void __lockdep_reset_lock(struct pending_free *pf, 6079a0b0fd53SBart Van Assche struct lockdep_map *lock) 60802904d9faSBart Van Assche { 60812904d9faSBart Van Assche struct lock_class *class; 6082956f3563SBart Van Assche int j; 60838eddac3fSPeter Zijlstra 60848eddac3fSPeter Zijlstra /* 60858eddac3fSPeter Zijlstra * Remove all classes this lock might have: 60868eddac3fSPeter Zijlstra */ 60878eddac3fSPeter Zijlstra for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { 60888eddac3fSPeter Zijlstra /* 60898eddac3fSPeter Zijlstra * If the class exists we look it up and zap it: 60908eddac3fSPeter Zijlstra */ 60918eddac3fSPeter Zijlstra class = look_up_lock_class(lock, j); 609264f29d1bSMatthew Wilcox if (class) 6093a0b0fd53SBart Van Assche zap_class(pf, class); 60948eddac3fSPeter Zijlstra } 60958eddac3fSPeter Zijlstra /* 60968eddac3fSPeter Zijlstra * Debug check: in the end all mapped classes should 60978eddac3fSPeter Zijlstra * be gone. 60988eddac3fSPeter Zijlstra */ 6099956f3563SBart Van Assche if (WARN_ON_ONCE(lock_class_cache_is_registered(lock))) 6100956f3563SBart Van Assche debug_locks_off(); 61018eddac3fSPeter Zijlstra } 6102956f3563SBart Van Assche 6103a0b0fd53SBart Van Assche /* 6104a0b0fd53SBart Van Assche * Remove all information lockdep has about a lock if debug_locks == 1. Free 6105a0b0fd53SBart Van Assche * released data structures from RCU context. 6106a0b0fd53SBart Van Assche */ 6107a0b0fd53SBart Van Assche static void lockdep_reset_lock_reg(struct lockdep_map *lock) 6108956f3563SBart Van Assche { 6109a0b0fd53SBart Van Assche struct pending_free *pf; 6110956f3563SBart Van Assche unsigned long flags; 6111956f3563SBart Van Assche int locked; 6112956f3563SBart Van Assche 6113956f3563SBart Van Assche raw_local_irq_save(flags); 6114956f3563SBart Van Assche locked = graph_lock(); 6115a0b0fd53SBart Van Assche if (!locked) 6116a0b0fd53SBart Van Assche goto out_irq; 6117a0b0fd53SBart Van Assche 6118a0b0fd53SBart Van Assche pf = get_pending_free(); 6119a0b0fd53SBart Van Assche __lockdep_reset_lock(pf, lock); 6120a0b0fd53SBart Van Assche call_rcu_zapped(pf); 6121a0b0fd53SBart Van Assche 61228eddac3fSPeter Zijlstra graph_unlock(); 6123a0b0fd53SBart Van Assche out_irq: 61248eddac3fSPeter Zijlstra raw_local_irq_restore(flags); 61258eddac3fSPeter Zijlstra } 61268eddac3fSPeter Zijlstra 6127a0b0fd53SBart Van Assche /* 6128a0b0fd53SBart Van Assche * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the 6129a0b0fd53SBart Van Assche * lockdep selftests. 6130a0b0fd53SBart Van Assche */ 6131a0b0fd53SBart Van Assche static void lockdep_reset_lock_imm(struct lockdep_map *lock) 6132a0b0fd53SBart Van Assche { 6133a0b0fd53SBart Van Assche struct pending_free *pf = delayed_free.pf; 6134a0b0fd53SBart Van Assche unsigned long flags; 6135a0b0fd53SBart Van Assche 6136a0b0fd53SBart Van Assche raw_local_irq_save(flags); 6137248efb21SPeter Zijlstra lockdep_lock(); 6138a0b0fd53SBart Van Assche __lockdep_reset_lock(pf, lock); 6139a0b0fd53SBart Van Assche __free_zapped_classes(pf); 6140248efb21SPeter Zijlstra lockdep_unlock(); 6141a0b0fd53SBart Van Assche raw_local_irq_restore(flags); 6142a0b0fd53SBart Van Assche } 6143a0b0fd53SBart Van Assche 6144a0b0fd53SBart Van Assche void lockdep_reset_lock(struct lockdep_map *lock) 6145a0b0fd53SBart Van Assche { 6146a0b0fd53SBart Van Assche init_data_structures_once(); 6147a0b0fd53SBart Van Assche 6148a0b0fd53SBart Van Assche if (inside_selftest()) 6149a0b0fd53SBart Van Assche lockdep_reset_lock_imm(lock); 6150a0b0fd53SBart Van Assche else 6151a0b0fd53SBart Van Assche lockdep_reset_lock_reg(lock); 6152a0b0fd53SBart Van Assche } 6153a0b0fd53SBart Van Assche 6154108c1485SBart Van Assche /* Unregister a dynamically allocated key. */ 6155108c1485SBart Van Assche void lockdep_unregister_key(struct lock_class_key *key) 6156108c1485SBart Van Assche { 6157108c1485SBart Van Assche struct hlist_head *hash_head = keyhashentry(key); 6158108c1485SBart Van Assche struct lock_class_key *k; 6159108c1485SBart Van Assche struct pending_free *pf; 6160108c1485SBart Van Assche unsigned long flags; 6161108c1485SBart Van Assche bool found = false; 6162108c1485SBart Van Assche 6163108c1485SBart Van Assche might_sleep(); 6164108c1485SBart Van Assche 6165108c1485SBart Van Assche if (WARN_ON_ONCE(static_obj(key))) 6166108c1485SBart Van Assche return; 6167108c1485SBart Van Assche 6168108c1485SBart Van Assche raw_local_irq_save(flags); 61698b39adbeSBart Van Assche if (!graph_lock()) 61708b39adbeSBart Van Assche goto out_irq; 61718b39adbeSBart Van Assche 6172108c1485SBart Van Assche pf = get_pending_free(); 6173108c1485SBart Van Assche hlist_for_each_entry_rcu(k, hash_head, hash_entry) { 6174108c1485SBart Van Assche if (k == key) { 6175108c1485SBart Van Assche hlist_del_rcu(&k->hash_entry); 6176108c1485SBart Van Assche found = true; 6177108c1485SBart Van Assche break; 6178108c1485SBart Van Assche } 6179108c1485SBart Van Assche } 6180108c1485SBart Van Assche WARN_ON_ONCE(!found); 6181108c1485SBart Van Assche __lockdep_free_key_range(pf, key, 1); 6182108c1485SBart Van Assche call_rcu_zapped(pf); 61838b39adbeSBart Van Assche graph_unlock(); 61848b39adbeSBart Van Assche out_irq: 6185108c1485SBart Van Assche raw_local_irq_restore(flags); 6186108c1485SBart Van Assche 6187108c1485SBart Van Assche /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ 6188108c1485SBart Van Assche synchronize_rcu(); 6189108c1485SBart Van Assche } 6190108c1485SBart Van Assche EXPORT_SYMBOL_GPL(lockdep_unregister_key); 6191108c1485SBart Van Assche 6192c3bc8fd6SJoel Fernandes (Google) void __init lockdep_init(void) 61938eddac3fSPeter Zijlstra { 61948eddac3fSPeter Zijlstra printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); 61958eddac3fSPeter Zijlstra 61968eddac3fSPeter Zijlstra printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES); 61978eddac3fSPeter Zijlstra printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH); 61988eddac3fSPeter Zijlstra printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS); 61998eddac3fSPeter Zijlstra printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE); 62008eddac3fSPeter Zijlstra printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES); 62018eddac3fSPeter Zijlstra printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); 62028eddac3fSPeter Zijlstra printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); 62038eddac3fSPeter Zijlstra 620409d75ecbSBart Van Assche printk(" memory used by lock dependency info: %zu kB\n", 62057ff8517eSBart Van Assche (sizeof(lock_classes) + 620601bb6f0aSYuyang Du sizeof(lock_classes_in_use) + 62077ff8517eSBart Van Assche sizeof(classhash_table) + 62087ff8517eSBart Van Assche sizeof(list_entries) + 6209ace35a7aSBart Van Assche sizeof(list_entries_in_use) + 6210a0b0fd53SBart Van Assche sizeof(chainhash_table) + 6211a0b0fd53SBart Van Assche sizeof(delayed_free) 62128eddac3fSPeter Zijlstra #ifdef CONFIG_PROVE_LOCKING 62137ff8517eSBart Van Assche + sizeof(lock_cq) 621415ea86b5SBart Van Assche + sizeof(lock_chains) 6215de4643a7SBart Van Assche + sizeof(lock_chains_in_use) 621615ea86b5SBart Van Assche + sizeof(chain_hlocks) 62178eddac3fSPeter Zijlstra #endif 62188eddac3fSPeter Zijlstra ) / 1024 62198eddac3fSPeter Zijlstra ); 62208eddac3fSPeter Zijlstra 622112593b74SBart Van Assche #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 622212593b74SBart Van Assche printk(" memory used for stack traces: %zu kB\n", 622312593b74SBart Van Assche (sizeof(stack_trace) + sizeof(stack_trace_hash)) / 1024 622412593b74SBart Van Assche ); 622512593b74SBart Van Assche #endif 622612593b74SBart Van Assche 622709d75ecbSBart Van Assche printk(" per task-struct memory footprint: %zu bytes\n", 62287ff8517eSBart Van Assche sizeof(((struct task_struct *)NULL)->held_locks)); 62298eddac3fSPeter Zijlstra } 62308eddac3fSPeter Zijlstra 62318eddac3fSPeter Zijlstra static void 62328eddac3fSPeter Zijlstra print_freed_lock_bug(struct task_struct *curr, const void *mem_from, 62338eddac3fSPeter Zijlstra const void *mem_to, struct held_lock *hlock) 62348eddac3fSPeter Zijlstra { 62358eddac3fSPeter Zijlstra if (!debug_locks_off()) 62368eddac3fSPeter Zijlstra return; 62378eddac3fSPeter Zijlstra if (debug_locks_silent) 62388eddac3fSPeter Zijlstra return; 62398eddac3fSPeter Zijlstra 6240681fbec8SPaul E. McKenney pr_warn("\n"); 6241a5dd63efSPaul E. McKenney pr_warn("=========================\n"); 6242a5dd63efSPaul E. McKenney pr_warn("WARNING: held lock freed!\n"); 62438eddac3fSPeter Zijlstra print_kernel_ident(); 6244a5dd63efSPaul E. McKenney pr_warn("-------------------------\n"); 624504860d48SBorislav Petkov pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n", 62468eddac3fSPeter Zijlstra curr->comm, task_pid_nr(curr), mem_from, mem_to-1); 62478eddac3fSPeter Zijlstra print_lock(hlock); 62488eddac3fSPeter Zijlstra lockdep_print_held_locks(curr); 62498eddac3fSPeter Zijlstra 6250681fbec8SPaul E. McKenney pr_warn("\nstack backtrace:\n"); 62518eddac3fSPeter Zijlstra dump_stack(); 62528eddac3fSPeter Zijlstra } 62538eddac3fSPeter Zijlstra 62548eddac3fSPeter Zijlstra static inline int not_in_range(const void* mem_from, unsigned long mem_len, 62558eddac3fSPeter Zijlstra const void* lock_from, unsigned long lock_len) 62568eddac3fSPeter Zijlstra { 62578eddac3fSPeter Zijlstra return lock_from + lock_len <= mem_from || 62588eddac3fSPeter Zijlstra mem_from + mem_len <= lock_from; 62598eddac3fSPeter Zijlstra } 62608eddac3fSPeter Zijlstra 62618eddac3fSPeter Zijlstra /* 62628eddac3fSPeter Zijlstra * Called when kernel memory is freed (or unmapped), or if a lock 62638eddac3fSPeter Zijlstra * is destroyed or reinitialized - this code checks whether there is 62648eddac3fSPeter Zijlstra * any held lock in the memory range of <from> to <to>: 62658eddac3fSPeter Zijlstra */ 62668eddac3fSPeter Zijlstra void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) 62678eddac3fSPeter Zijlstra { 62688eddac3fSPeter Zijlstra struct task_struct *curr = current; 62698eddac3fSPeter Zijlstra struct held_lock *hlock; 62708eddac3fSPeter Zijlstra unsigned long flags; 62718eddac3fSPeter Zijlstra int i; 62728eddac3fSPeter Zijlstra 62738eddac3fSPeter Zijlstra if (unlikely(!debug_locks)) 62748eddac3fSPeter Zijlstra return; 62758eddac3fSPeter Zijlstra 6276fcc784beSSteven Rostedt (VMware) raw_local_irq_save(flags); 62778eddac3fSPeter Zijlstra for (i = 0; i < curr->lockdep_depth; i++) { 62788eddac3fSPeter Zijlstra hlock = curr->held_locks + i; 62798eddac3fSPeter Zijlstra 62808eddac3fSPeter Zijlstra if (not_in_range(mem_from, mem_len, hlock->instance, 62818eddac3fSPeter Zijlstra sizeof(*hlock->instance))) 62828eddac3fSPeter Zijlstra continue; 62838eddac3fSPeter Zijlstra 62848eddac3fSPeter Zijlstra print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); 62858eddac3fSPeter Zijlstra break; 62868eddac3fSPeter Zijlstra } 6287fcc784beSSteven Rostedt (VMware) raw_local_irq_restore(flags); 62888eddac3fSPeter Zijlstra } 62898eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); 62908eddac3fSPeter Zijlstra 62918eddac3fSPeter Zijlstra static void print_held_locks_bug(void) 62928eddac3fSPeter Zijlstra { 62938eddac3fSPeter Zijlstra if (!debug_locks_off()) 62948eddac3fSPeter Zijlstra return; 62958eddac3fSPeter Zijlstra if (debug_locks_silent) 62968eddac3fSPeter Zijlstra return; 62978eddac3fSPeter Zijlstra 6298681fbec8SPaul E. McKenney pr_warn("\n"); 6299a5dd63efSPaul E. McKenney pr_warn("====================================\n"); 6300a5dd63efSPaul E. McKenney pr_warn("WARNING: %s/%d still has locks held!\n", 63018eddac3fSPeter Zijlstra current->comm, task_pid_nr(current)); 63028eddac3fSPeter Zijlstra print_kernel_ident(); 6303a5dd63efSPaul E. McKenney pr_warn("------------------------------------\n"); 63048eddac3fSPeter Zijlstra lockdep_print_held_locks(current); 6305681fbec8SPaul E. McKenney pr_warn("\nstack backtrace:\n"); 63068eddac3fSPeter Zijlstra dump_stack(); 63078eddac3fSPeter Zijlstra } 63088eddac3fSPeter Zijlstra 63098eddac3fSPeter Zijlstra void debug_check_no_locks_held(void) 63108eddac3fSPeter Zijlstra { 63118eddac3fSPeter Zijlstra if (unlikely(current->lockdep_depth > 0)) 63128eddac3fSPeter Zijlstra print_held_locks_bug(); 63138eddac3fSPeter Zijlstra } 63148eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(debug_check_no_locks_held); 63158eddac3fSPeter Zijlstra 63168dce7a9aSSasha Levin #ifdef __KERNEL__ 63178eddac3fSPeter Zijlstra void debug_show_all_locks(void) 63188eddac3fSPeter Zijlstra { 63198eddac3fSPeter Zijlstra struct task_struct *g, *p; 63208eddac3fSPeter Zijlstra 63218eddac3fSPeter Zijlstra if (unlikely(!debug_locks)) { 6322681fbec8SPaul E. McKenney pr_warn("INFO: lockdep is turned off.\n"); 63238eddac3fSPeter Zijlstra return; 63248eddac3fSPeter Zijlstra } 6325681fbec8SPaul E. McKenney pr_warn("\nShowing all locks held in the system:\n"); 63268eddac3fSPeter Zijlstra 63270f736a52STetsuo Handa rcu_read_lock(); 63280f736a52STetsuo Handa for_each_process_thread(g, p) { 63290f736a52STetsuo Handa if (!p->lockdep_depth) 63300f736a52STetsuo Handa continue; 63318eddac3fSPeter Zijlstra lockdep_print_held_locks(p); 633288f1c87dSTejun Heo touch_nmi_watchdog(); 63330f736a52STetsuo Handa touch_all_softlockup_watchdogs(); 63340f736a52STetsuo Handa } 63350f736a52STetsuo Handa rcu_read_unlock(); 63368eddac3fSPeter Zijlstra 6337681fbec8SPaul E. McKenney pr_warn("\n"); 6338a5dd63efSPaul E. McKenney pr_warn("=============================================\n\n"); 63398eddac3fSPeter Zijlstra } 63408eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(debug_show_all_locks); 63418dce7a9aSSasha Levin #endif 63428eddac3fSPeter Zijlstra 63438eddac3fSPeter Zijlstra /* 63448eddac3fSPeter Zijlstra * Careful: only use this function if you are sure that 63458eddac3fSPeter Zijlstra * the task cannot run in parallel! 63468eddac3fSPeter Zijlstra */ 63478eddac3fSPeter Zijlstra void debug_show_held_locks(struct task_struct *task) 63488eddac3fSPeter Zijlstra { 63498eddac3fSPeter Zijlstra if (unlikely(!debug_locks)) { 63508eddac3fSPeter Zijlstra printk("INFO: lockdep is turned off.\n"); 63518eddac3fSPeter Zijlstra return; 63528eddac3fSPeter Zijlstra } 63538eddac3fSPeter Zijlstra lockdep_print_held_locks(task); 63548eddac3fSPeter Zijlstra } 63558eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(debug_show_held_locks); 63568eddac3fSPeter Zijlstra 6357722a9f92SAndi Kleen asmlinkage __visible void lockdep_sys_exit(void) 63588eddac3fSPeter Zijlstra { 63598eddac3fSPeter Zijlstra struct task_struct *curr = current; 63608eddac3fSPeter Zijlstra 63618eddac3fSPeter Zijlstra if (unlikely(curr->lockdep_depth)) { 63628eddac3fSPeter Zijlstra if (!debug_locks_off()) 63638eddac3fSPeter Zijlstra return; 6364681fbec8SPaul E. McKenney pr_warn("\n"); 6365a5dd63efSPaul E. McKenney pr_warn("================================================\n"); 6366a5dd63efSPaul E. McKenney pr_warn("WARNING: lock held when returning to user space!\n"); 63678eddac3fSPeter Zijlstra print_kernel_ident(); 6368a5dd63efSPaul E. McKenney pr_warn("------------------------------------------------\n"); 6369681fbec8SPaul E. McKenney pr_warn("%s/%d is leaving the kernel with locks still held!\n", 63708eddac3fSPeter Zijlstra curr->comm, curr->pid); 63718eddac3fSPeter Zijlstra lockdep_print_held_locks(curr); 63728eddac3fSPeter Zijlstra } 6373b09be676SByungchul Park 6374b09be676SByungchul Park /* 6375b09be676SByungchul Park * The lock history for each syscall should be independent. So wipe the 6376b09be676SByungchul Park * slate clean on return to userspace. 6377b09be676SByungchul Park */ 6378f52be570SPeter Zijlstra lockdep_invariant_state(false); 63798eddac3fSPeter Zijlstra } 63808eddac3fSPeter Zijlstra 63818eddac3fSPeter Zijlstra void lockdep_rcu_suspicious(const char *file, const int line, const char *s) 63828eddac3fSPeter Zijlstra { 63838eddac3fSPeter Zijlstra struct task_struct *curr = current; 63848eddac3fSPeter Zijlstra 63858eddac3fSPeter Zijlstra /* Note: the following can be executed concurrently, so be careful. */ 6386681fbec8SPaul E. McKenney pr_warn("\n"); 6387a5dd63efSPaul E. McKenney pr_warn("=============================\n"); 6388a5dd63efSPaul E. McKenney pr_warn("WARNING: suspicious RCU usage\n"); 63898eddac3fSPeter Zijlstra print_kernel_ident(); 6390a5dd63efSPaul E. McKenney pr_warn("-----------------------------\n"); 6391681fbec8SPaul E. McKenney pr_warn("%s:%d %s!\n", file, line, s); 6392681fbec8SPaul E. McKenney pr_warn("\nother info that might help us debug this:\n\n"); 6393681fbec8SPaul E. McKenney pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n", 63948eddac3fSPeter Zijlstra !rcu_lockdep_current_cpu_online() 63958eddac3fSPeter Zijlstra ? "RCU used illegally from offline CPU!\n" 63968eddac3fSPeter Zijlstra : "", 63978eddac3fSPeter Zijlstra rcu_scheduler_active, debug_locks); 63988eddac3fSPeter Zijlstra 63998eddac3fSPeter Zijlstra /* 64008eddac3fSPeter Zijlstra * If a CPU is in the RCU-free window in idle (ie: in the section 64018eddac3fSPeter Zijlstra * between rcu_idle_enter() and rcu_idle_exit(), then RCU 64028eddac3fSPeter Zijlstra * considers that CPU to be in an "extended quiescent state", 64038eddac3fSPeter Zijlstra * which means that RCU will be completely ignoring that CPU. 64048eddac3fSPeter Zijlstra * Therefore, rcu_read_lock() and friends have absolutely no 64058eddac3fSPeter Zijlstra * effect on a CPU running in that state. In other words, even if 64068eddac3fSPeter Zijlstra * such an RCU-idle CPU has called rcu_read_lock(), RCU might well 64078eddac3fSPeter Zijlstra * delete data structures out from under it. RCU really has no 64088eddac3fSPeter Zijlstra * choice here: we need to keep an RCU-free window in idle where 64098eddac3fSPeter Zijlstra * the CPU may possibly enter into low power mode. This way we can 64108eddac3fSPeter Zijlstra * notice an extended quiescent state to other CPUs that started a grace 64118eddac3fSPeter Zijlstra * period. Otherwise we would delay any grace period as long as we run 64128eddac3fSPeter Zijlstra * in the idle task. 64138eddac3fSPeter Zijlstra * 64148eddac3fSPeter Zijlstra * So complain bitterly if someone does call rcu_read_lock(), 64158eddac3fSPeter Zijlstra * rcu_read_lock_bh() and so on from extended quiescent states. 64168eddac3fSPeter Zijlstra */ 64178eddac3fSPeter Zijlstra if (!rcu_is_watching()) 6418681fbec8SPaul E. McKenney pr_warn("RCU used illegally from extended quiescent state!\n"); 64198eddac3fSPeter Zijlstra 64208eddac3fSPeter Zijlstra lockdep_print_held_locks(curr); 6421681fbec8SPaul E. McKenney pr_warn("\nstack backtrace:\n"); 64228eddac3fSPeter Zijlstra dump_stack(); 64238eddac3fSPeter Zijlstra } 64248eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious); 6425