xref: /linux/kernel/locking/lockdep.c (revision b11be024de164213f6338973d76ab9ab139120cd)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
28eddac3fSPeter Zijlstra /*
38eddac3fSPeter Zijlstra  * kernel/lockdep.c
48eddac3fSPeter Zijlstra  *
58eddac3fSPeter Zijlstra  * Runtime locking correctness validator
68eddac3fSPeter Zijlstra  *
78eddac3fSPeter Zijlstra  * Started by Ingo Molnar:
88eddac3fSPeter Zijlstra  *
98eddac3fSPeter Zijlstra  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
1090eec103SPeter Zijlstra  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
118eddac3fSPeter Zijlstra  *
128eddac3fSPeter Zijlstra  * this code maps all the lock dependencies as they occur in a live kernel
138eddac3fSPeter Zijlstra  * and will warn about the following classes of locking bugs:
148eddac3fSPeter Zijlstra  *
158eddac3fSPeter Zijlstra  * - lock inversion scenarios
168eddac3fSPeter Zijlstra  * - circular lock dependencies
178eddac3fSPeter Zijlstra  * - hardirq/softirq safe/unsafe locking bugs
188eddac3fSPeter Zijlstra  *
198eddac3fSPeter Zijlstra  * Bugs are reported even if the current locking scenario does not cause
208eddac3fSPeter Zijlstra  * any deadlock at this point.
218eddac3fSPeter Zijlstra  *
228eddac3fSPeter Zijlstra  * I.e. if anytime in the past two locks were taken in a different order,
238eddac3fSPeter Zijlstra  * even if it happened for another task, even if those were different
248eddac3fSPeter Zijlstra  * locks (but of the same class as this lock), this code will detect it.
258eddac3fSPeter Zijlstra  *
268eddac3fSPeter Zijlstra  * Thanks to Arjan van de Ven for coming up with the initial idea of
278eddac3fSPeter Zijlstra  * mapping lock dependencies runtime.
288eddac3fSPeter Zijlstra  */
298eddac3fSPeter Zijlstra #define DISABLE_BRANCH_PROFILING
308eddac3fSPeter Zijlstra #include <linux/mutex.h>
318eddac3fSPeter Zijlstra #include <linux/sched.h>
32e6017571SIngo Molnar #include <linux/sched/clock.h>
3329930025SIngo Molnar #include <linux/sched/task.h>
346d7225f0SNikolay Borisov #include <linux/sched/mm.h>
358eddac3fSPeter Zijlstra #include <linux/delay.h>
368eddac3fSPeter Zijlstra #include <linux/module.h>
378eddac3fSPeter Zijlstra #include <linux/proc_fs.h>
388eddac3fSPeter Zijlstra #include <linux/seq_file.h>
398eddac3fSPeter Zijlstra #include <linux/spinlock.h>
408eddac3fSPeter Zijlstra #include <linux/kallsyms.h>
418eddac3fSPeter Zijlstra #include <linux/interrupt.h>
428eddac3fSPeter Zijlstra #include <linux/stacktrace.h>
438eddac3fSPeter Zijlstra #include <linux/debug_locks.h>
448eddac3fSPeter Zijlstra #include <linux/irqflags.h>
458eddac3fSPeter Zijlstra #include <linux/utsname.h>
468eddac3fSPeter Zijlstra #include <linux/hash.h>
478eddac3fSPeter Zijlstra #include <linux/ftrace.h>
488eddac3fSPeter Zijlstra #include <linux/stringify.h>
49ace35a7aSBart Van Assche #include <linux/bitmap.h>
508eddac3fSPeter Zijlstra #include <linux/bitops.h>
518eddac3fSPeter Zijlstra #include <linux/gfp.h>
52e7904a28SPeter Zijlstra #include <linux/random.h>
53dfaaf3faSPeter Zijlstra #include <linux/jhash.h>
5488f1c87dSTejun Heo #include <linux/nmi.h>
55a0b0fd53SBart Van Assche #include <linux/rcupdate.h>
562f43c602SMasami Hiramatsu #include <linux/kprobes.h>
578eddac3fSPeter Zijlstra 
588eddac3fSPeter Zijlstra #include <asm/sections.h>
598eddac3fSPeter Zijlstra 
608eddac3fSPeter Zijlstra #include "lockdep_internals.h"
618eddac3fSPeter Zijlstra 
628eddac3fSPeter Zijlstra #define CREATE_TRACE_POINTS
638eddac3fSPeter Zijlstra #include <trace/events/lock.h>
648eddac3fSPeter Zijlstra 
658eddac3fSPeter Zijlstra #ifdef CONFIG_PROVE_LOCKING
668eddac3fSPeter Zijlstra int prove_locking = 1;
678eddac3fSPeter Zijlstra module_param(prove_locking, int, 0644);
688eddac3fSPeter Zijlstra #else
698eddac3fSPeter Zijlstra #define prove_locking 0
708eddac3fSPeter Zijlstra #endif
718eddac3fSPeter Zijlstra 
728eddac3fSPeter Zijlstra #ifdef CONFIG_LOCK_STAT
738eddac3fSPeter Zijlstra int lock_stat = 1;
748eddac3fSPeter Zijlstra module_param(lock_stat, int, 0644);
758eddac3fSPeter Zijlstra #else
768eddac3fSPeter Zijlstra #define lock_stat 0
778eddac3fSPeter Zijlstra #endif
788eddac3fSPeter Zijlstra 
798eddac3fSPeter Zijlstra /*
808eddac3fSPeter Zijlstra  * lockdep_lock: protects the lockdep graph, the hashes and the
818eddac3fSPeter Zijlstra  *               class/list/hash allocators.
828eddac3fSPeter Zijlstra  *
838eddac3fSPeter Zijlstra  * This is one of the rare exceptions where it's justified
848eddac3fSPeter Zijlstra  * to use a raw spinlock - we really dont want the spinlock
858eddac3fSPeter Zijlstra  * code to recurse back into the lockdep code...
868eddac3fSPeter Zijlstra  */
87248efb21SPeter Zijlstra static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
88248efb21SPeter Zijlstra static struct task_struct *__owner;
89248efb21SPeter Zijlstra 
90248efb21SPeter Zijlstra static inline void lockdep_lock(void)
91248efb21SPeter Zijlstra {
92248efb21SPeter Zijlstra 	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
93248efb21SPeter Zijlstra 
94248efb21SPeter Zijlstra 	arch_spin_lock(&__lock);
95248efb21SPeter Zijlstra 	__owner = current;
96248efb21SPeter Zijlstra 	current->lockdep_recursion++;
97248efb21SPeter Zijlstra }
98248efb21SPeter Zijlstra 
99248efb21SPeter Zijlstra static inline void lockdep_unlock(void)
100248efb21SPeter Zijlstra {
101248efb21SPeter Zijlstra 	if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
102248efb21SPeter Zijlstra 		return;
103248efb21SPeter Zijlstra 
104248efb21SPeter Zijlstra 	current->lockdep_recursion--;
105248efb21SPeter Zijlstra 	__owner = NULL;
106248efb21SPeter Zijlstra 	arch_spin_unlock(&__lock);
107248efb21SPeter Zijlstra }
108248efb21SPeter Zijlstra 
109248efb21SPeter Zijlstra static inline bool lockdep_assert_locked(void)
110248efb21SPeter Zijlstra {
111248efb21SPeter Zijlstra 	return DEBUG_LOCKS_WARN_ON(__owner != current);
112248efb21SPeter Zijlstra }
113248efb21SPeter Zijlstra 
114cdc84d79SBart Van Assche static struct task_struct *lockdep_selftest_task_struct;
1158eddac3fSPeter Zijlstra 
116248efb21SPeter Zijlstra 
1178eddac3fSPeter Zijlstra static int graph_lock(void)
1188eddac3fSPeter Zijlstra {
119248efb21SPeter Zijlstra 	lockdep_lock();
1208eddac3fSPeter Zijlstra 	/*
1218eddac3fSPeter Zijlstra 	 * Make sure that if another CPU detected a bug while
1228eddac3fSPeter Zijlstra 	 * walking the graph we dont change it (while the other
1238eddac3fSPeter Zijlstra 	 * CPU is busy printing out stuff with the graph lock
1248eddac3fSPeter Zijlstra 	 * dropped already)
1258eddac3fSPeter Zijlstra 	 */
1268eddac3fSPeter Zijlstra 	if (!debug_locks) {
127248efb21SPeter Zijlstra 		lockdep_unlock();
1288eddac3fSPeter Zijlstra 		return 0;
1298eddac3fSPeter Zijlstra 	}
1308eddac3fSPeter Zijlstra 	return 1;
1318eddac3fSPeter Zijlstra }
1328eddac3fSPeter Zijlstra 
133248efb21SPeter Zijlstra static inline void graph_unlock(void)
1348eddac3fSPeter Zijlstra {
135248efb21SPeter Zijlstra 	lockdep_unlock();
1368eddac3fSPeter Zijlstra }
1378eddac3fSPeter Zijlstra 
1388eddac3fSPeter Zijlstra /*
1398eddac3fSPeter Zijlstra  * Turn lock debugging off and return with 0 if it was off already,
1408eddac3fSPeter Zijlstra  * and also release the graph lock:
1418eddac3fSPeter Zijlstra  */
1428eddac3fSPeter Zijlstra static inline int debug_locks_off_graph_unlock(void)
1438eddac3fSPeter Zijlstra {
1448eddac3fSPeter Zijlstra 	int ret = debug_locks_off();
1458eddac3fSPeter Zijlstra 
146248efb21SPeter Zijlstra 	lockdep_unlock();
1478eddac3fSPeter Zijlstra 
1488eddac3fSPeter Zijlstra 	return ret;
1498eddac3fSPeter Zijlstra }
1508eddac3fSPeter Zijlstra 
1518eddac3fSPeter Zijlstra unsigned long nr_list_entries;
1528eddac3fSPeter Zijlstra static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
153ace35a7aSBart Van Assche static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
1548eddac3fSPeter Zijlstra 
1558eddac3fSPeter Zijlstra /*
1568eddac3fSPeter Zijlstra  * All data structures here are protected by the global debug_lock.
1578eddac3fSPeter Zijlstra  *
158a0b0fd53SBart Van Assche  * nr_lock_classes is the number of elements of lock_classes[] that is
159a0b0fd53SBart Van Assche  * in use.
1608eddac3fSPeter Zijlstra  */
161108c1485SBart Van Assche #define KEYHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
162108c1485SBart Van Assche #define KEYHASH_SIZE		(1UL << KEYHASH_BITS)
163108c1485SBart Van Assche static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
1648eddac3fSPeter Zijlstra unsigned long nr_lock_classes;
1651d44bcb4SWaiman Long unsigned long nr_zapped_classes;
1661431a5d2SBart Van Assche #ifndef CONFIG_DEBUG_LOCKDEP
1671431a5d2SBart Van Assche static
1681431a5d2SBart Van Assche #endif
1698ca2b56cSWaiman Long struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
17001bb6f0aSYuyang Du static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
1718eddac3fSPeter Zijlstra 
1728eddac3fSPeter Zijlstra static inline struct lock_class *hlock_class(struct held_lock *hlock)
1738eddac3fSPeter Zijlstra {
17401bb6f0aSYuyang Du 	unsigned int class_idx = hlock->class_idx;
17501bb6f0aSYuyang Du 
17601bb6f0aSYuyang Du 	/* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfield */
17701bb6f0aSYuyang Du 	barrier();
17801bb6f0aSYuyang Du 
17901bb6f0aSYuyang Du 	if (!test_bit(class_idx, lock_classes_in_use)) {
1808eddac3fSPeter Zijlstra 		/*
1818eddac3fSPeter Zijlstra 		 * Someone passed in garbage, we give up.
1828eddac3fSPeter Zijlstra 		 */
1838eddac3fSPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(1);
1848eddac3fSPeter Zijlstra 		return NULL;
1858eddac3fSPeter Zijlstra 	}
18601bb6f0aSYuyang Du 
18701bb6f0aSYuyang Du 	/*
18801bb6f0aSYuyang Du 	 * At this point, if the passed hlock->class_idx is still garbage,
18901bb6f0aSYuyang Du 	 * we just have to live with it
19001bb6f0aSYuyang Du 	 */
19101bb6f0aSYuyang Du 	return lock_classes + class_idx;
1928eddac3fSPeter Zijlstra }
1938eddac3fSPeter Zijlstra 
1948eddac3fSPeter Zijlstra #ifdef CONFIG_LOCK_STAT
19525528213SPeter Zijlstra static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
1968eddac3fSPeter Zijlstra 
1978eddac3fSPeter Zijlstra static inline u64 lockstat_clock(void)
1988eddac3fSPeter Zijlstra {
1998eddac3fSPeter Zijlstra 	return local_clock();
2008eddac3fSPeter Zijlstra }
2018eddac3fSPeter Zijlstra 
2028eddac3fSPeter Zijlstra static int lock_point(unsigned long points[], unsigned long ip)
2038eddac3fSPeter Zijlstra {
2048eddac3fSPeter Zijlstra 	int i;
2058eddac3fSPeter Zijlstra 
2068eddac3fSPeter Zijlstra 	for (i = 0; i < LOCKSTAT_POINTS; i++) {
2078eddac3fSPeter Zijlstra 		if (points[i] == 0) {
2088eddac3fSPeter Zijlstra 			points[i] = ip;
2098eddac3fSPeter Zijlstra 			break;
2108eddac3fSPeter Zijlstra 		}
2118eddac3fSPeter Zijlstra 		if (points[i] == ip)
2128eddac3fSPeter Zijlstra 			break;
2138eddac3fSPeter Zijlstra 	}
2148eddac3fSPeter Zijlstra 
2158eddac3fSPeter Zijlstra 	return i;
2168eddac3fSPeter Zijlstra }
2178eddac3fSPeter Zijlstra 
2188eddac3fSPeter Zijlstra static void lock_time_inc(struct lock_time *lt, u64 time)
2198eddac3fSPeter Zijlstra {
2208eddac3fSPeter Zijlstra 	if (time > lt->max)
2218eddac3fSPeter Zijlstra 		lt->max = time;
2228eddac3fSPeter Zijlstra 
2238eddac3fSPeter Zijlstra 	if (time < lt->min || !lt->nr)
2248eddac3fSPeter Zijlstra 		lt->min = time;
2258eddac3fSPeter Zijlstra 
2268eddac3fSPeter Zijlstra 	lt->total += time;
2278eddac3fSPeter Zijlstra 	lt->nr++;
2288eddac3fSPeter Zijlstra }
2298eddac3fSPeter Zijlstra 
2308eddac3fSPeter Zijlstra static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
2318eddac3fSPeter Zijlstra {
2328eddac3fSPeter Zijlstra 	if (!src->nr)
2338eddac3fSPeter Zijlstra 		return;
2348eddac3fSPeter Zijlstra 
2358eddac3fSPeter Zijlstra 	if (src->max > dst->max)
2368eddac3fSPeter Zijlstra 		dst->max = src->max;
2378eddac3fSPeter Zijlstra 
2388eddac3fSPeter Zijlstra 	if (src->min < dst->min || !dst->nr)
2398eddac3fSPeter Zijlstra 		dst->min = src->min;
2408eddac3fSPeter Zijlstra 
2418eddac3fSPeter Zijlstra 	dst->total += src->total;
2428eddac3fSPeter Zijlstra 	dst->nr += src->nr;
2438eddac3fSPeter Zijlstra }
2448eddac3fSPeter Zijlstra 
2458eddac3fSPeter Zijlstra struct lock_class_stats lock_stats(struct lock_class *class)
2468eddac3fSPeter Zijlstra {
2478eddac3fSPeter Zijlstra 	struct lock_class_stats stats;
2488eddac3fSPeter Zijlstra 	int cpu, i;
2498eddac3fSPeter Zijlstra 
2508eddac3fSPeter Zijlstra 	memset(&stats, 0, sizeof(struct lock_class_stats));
2518eddac3fSPeter Zijlstra 	for_each_possible_cpu(cpu) {
2528eddac3fSPeter Zijlstra 		struct lock_class_stats *pcs =
2538eddac3fSPeter Zijlstra 			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
2548eddac3fSPeter Zijlstra 
2558eddac3fSPeter Zijlstra 		for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
2568eddac3fSPeter Zijlstra 			stats.contention_point[i] += pcs->contention_point[i];
2578eddac3fSPeter Zijlstra 
2588eddac3fSPeter Zijlstra 		for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
2598eddac3fSPeter Zijlstra 			stats.contending_point[i] += pcs->contending_point[i];
2608eddac3fSPeter Zijlstra 
2618eddac3fSPeter Zijlstra 		lock_time_add(&pcs->read_waittime, &stats.read_waittime);
2628eddac3fSPeter Zijlstra 		lock_time_add(&pcs->write_waittime, &stats.write_waittime);
2638eddac3fSPeter Zijlstra 
2648eddac3fSPeter Zijlstra 		lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
2658eddac3fSPeter Zijlstra 		lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
2668eddac3fSPeter Zijlstra 
2678eddac3fSPeter Zijlstra 		for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
2688eddac3fSPeter Zijlstra 			stats.bounces[i] += pcs->bounces[i];
2698eddac3fSPeter Zijlstra 	}
2708eddac3fSPeter Zijlstra 
2718eddac3fSPeter Zijlstra 	return stats;
2728eddac3fSPeter Zijlstra }
2738eddac3fSPeter Zijlstra 
2748eddac3fSPeter Zijlstra void clear_lock_stats(struct lock_class *class)
2758eddac3fSPeter Zijlstra {
2768eddac3fSPeter Zijlstra 	int cpu;
2778eddac3fSPeter Zijlstra 
2788eddac3fSPeter Zijlstra 	for_each_possible_cpu(cpu) {
2798eddac3fSPeter Zijlstra 		struct lock_class_stats *cpu_stats =
2808eddac3fSPeter Zijlstra 			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
2818eddac3fSPeter Zijlstra 
2828eddac3fSPeter Zijlstra 		memset(cpu_stats, 0, sizeof(struct lock_class_stats));
2838eddac3fSPeter Zijlstra 	}
2848eddac3fSPeter Zijlstra 	memset(class->contention_point, 0, sizeof(class->contention_point));
2858eddac3fSPeter Zijlstra 	memset(class->contending_point, 0, sizeof(class->contending_point));
2868eddac3fSPeter Zijlstra }
2878eddac3fSPeter Zijlstra 
2888eddac3fSPeter Zijlstra static struct lock_class_stats *get_lock_stats(struct lock_class *class)
2898eddac3fSPeter Zijlstra {
29001f38497SJoel Fernandes (Google) 	return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes];
2918eddac3fSPeter Zijlstra }
2928eddac3fSPeter Zijlstra 
2938eddac3fSPeter Zijlstra static void lock_release_holdtime(struct held_lock *hlock)
2948eddac3fSPeter Zijlstra {
2958eddac3fSPeter Zijlstra 	struct lock_class_stats *stats;
2968eddac3fSPeter Zijlstra 	u64 holdtime;
2978eddac3fSPeter Zijlstra 
2988eddac3fSPeter Zijlstra 	if (!lock_stat)
2998eddac3fSPeter Zijlstra 		return;
3008eddac3fSPeter Zijlstra 
3018eddac3fSPeter Zijlstra 	holdtime = lockstat_clock() - hlock->holdtime_stamp;
3028eddac3fSPeter Zijlstra 
3038eddac3fSPeter Zijlstra 	stats = get_lock_stats(hlock_class(hlock));
3048eddac3fSPeter Zijlstra 	if (hlock->read)
3058eddac3fSPeter Zijlstra 		lock_time_inc(&stats->read_holdtime, holdtime);
3068eddac3fSPeter Zijlstra 	else
3078eddac3fSPeter Zijlstra 		lock_time_inc(&stats->write_holdtime, holdtime);
3088eddac3fSPeter Zijlstra }
3098eddac3fSPeter Zijlstra #else
3108eddac3fSPeter Zijlstra static inline void lock_release_holdtime(struct held_lock *hlock)
3118eddac3fSPeter Zijlstra {
3128eddac3fSPeter Zijlstra }
3138eddac3fSPeter Zijlstra #endif
3148eddac3fSPeter Zijlstra 
3158eddac3fSPeter Zijlstra /*
316a0b0fd53SBart Van Assche  * We keep a global list of all lock classes. The list is only accessed with
317a0b0fd53SBart Van Assche  * the lockdep spinlock lock held. free_lock_classes is a list with free
318a0b0fd53SBart Van Assche  * elements. These elements are linked together by the lock_entry member in
319a0b0fd53SBart Van Assche  * struct lock_class.
3208eddac3fSPeter Zijlstra  */
3218eddac3fSPeter Zijlstra LIST_HEAD(all_lock_classes);
322a0b0fd53SBart Van Assche static LIST_HEAD(free_lock_classes);
323a0b0fd53SBart Van Assche 
324a0b0fd53SBart Van Assche /**
325a0b0fd53SBart Van Assche  * struct pending_free - information about data structures about to be freed
326a0b0fd53SBart Van Assche  * @zapped: Head of a list with struct lock_class elements.
327de4643a7SBart Van Assche  * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements
328de4643a7SBart Van Assche  *	are about to be freed.
329a0b0fd53SBart Van Assche  */
330a0b0fd53SBart Van Assche struct pending_free {
331a0b0fd53SBart Van Assche 	struct list_head zapped;
332de4643a7SBart Van Assche 	DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS);
333a0b0fd53SBart Van Assche };
334a0b0fd53SBart Van Assche 
335a0b0fd53SBart Van Assche /**
336a0b0fd53SBart Van Assche  * struct delayed_free - data structures used for delayed freeing
337a0b0fd53SBart Van Assche  *
338a0b0fd53SBart Van Assche  * A data structure for delayed freeing of data structures that may be
339a0b0fd53SBart Van Assche  * accessed by RCU readers at the time these were freed.
340a0b0fd53SBart Van Assche  *
341a0b0fd53SBart Van Assche  * @rcu_head:  Used to schedule an RCU callback for freeing data structures.
342a0b0fd53SBart Van Assche  * @index:     Index of @pf to which freed data structures are added.
343a0b0fd53SBart Van Assche  * @scheduled: Whether or not an RCU callback has been scheduled.
344a0b0fd53SBart Van Assche  * @pf:        Array with information about data structures about to be freed.
345a0b0fd53SBart Van Assche  */
346a0b0fd53SBart Van Assche static struct delayed_free {
347a0b0fd53SBart Van Assche 	struct rcu_head		rcu_head;
348a0b0fd53SBart Van Assche 	int			index;
349a0b0fd53SBart Van Assche 	int			scheduled;
350a0b0fd53SBart Van Assche 	struct pending_free	pf[2];
351a0b0fd53SBart Van Assche } delayed_free;
3528eddac3fSPeter Zijlstra 
3538eddac3fSPeter Zijlstra /*
3548eddac3fSPeter Zijlstra  * The lockdep classes are in a hash-table as well, for fast lookup:
3558eddac3fSPeter Zijlstra  */
3568eddac3fSPeter Zijlstra #define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
3578eddac3fSPeter Zijlstra #define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS)
3588eddac3fSPeter Zijlstra #define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)
3598eddac3fSPeter Zijlstra #define classhashentry(key)	(classhash_table + __classhashfn((key)))
3608eddac3fSPeter Zijlstra 
361a63f38ccSAndrew Morton static struct hlist_head classhash_table[CLASSHASH_SIZE];
3628eddac3fSPeter Zijlstra 
3638eddac3fSPeter Zijlstra /*
3648eddac3fSPeter Zijlstra  * We put the lock dependency chains into a hash-table as well, to cache
3658eddac3fSPeter Zijlstra  * their existence:
3668eddac3fSPeter Zijlstra  */
3678eddac3fSPeter Zijlstra #define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)
3688eddac3fSPeter Zijlstra #define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)
3698eddac3fSPeter Zijlstra #define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
3708eddac3fSPeter Zijlstra #define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))
3718eddac3fSPeter Zijlstra 
372a63f38ccSAndrew Morton static struct hlist_head chainhash_table[CHAINHASH_SIZE];
3738eddac3fSPeter Zijlstra 
3748eddac3fSPeter Zijlstra /*
3758eddac3fSPeter Zijlstra  * The hash key of the lock dependency chains is a hash itself too:
3768eddac3fSPeter Zijlstra  * it's a hash of all locks taken up to that lock, including that lock.
3778eddac3fSPeter Zijlstra  * It's a 64-bit hash, because it's important for the keys to be
3788eddac3fSPeter Zijlstra  * unique.
3798eddac3fSPeter Zijlstra  */
380dfaaf3faSPeter Zijlstra static inline u64 iterate_chain_key(u64 key, u32 idx)
381dfaaf3faSPeter Zijlstra {
382dfaaf3faSPeter Zijlstra 	u32 k0 = key, k1 = key >> 32;
383dfaaf3faSPeter Zijlstra 
384dfaaf3faSPeter Zijlstra 	__jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
385dfaaf3faSPeter Zijlstra 
386dfaaf3faSPeter Zijlstra 	return k0 | (u64)k1 << 32;
387dfaaf3faSPeter Zijlstra }
3888eddac3fSPeter Zijlstra 
389e196e479SYuyang Du void lockdep_init_task(struct task_struct *task)
390e196e479SYuyang Du {
391e196e479SYuyang Du 	task->lockdep_depth = 0; /* no locks held yet */
392f6ec8829SYuyang Du 	task->curr_chain_key = INITIAL_CHAIN_KEY;
393e196e479SYuyang Du 	task->lockdep_recursion = 0;
394e196e479SYuyang Du }
395e196e479SYuyang Du 
3966eebad1aSPeter Zijlstra static __always_inline void lockdep_recursion_finish(void)
39710476e63SPeter Zijlstra {
398859d069eSPeter Zijlstra 	if (WARN_ON_ONCE((--current->lockdep_recursion) & LOCKDEP_RECURSION_MASK))
39910476e63SPeter Zijlstra 		current->lockdep_recursion = 0;
40010476e63SPeter Zijlstra }
40110476e63SPeter Zijlstra 
402cdc84d79SBart Van Assche void lockdep_set_selftest_task(struct task_struct *task)
403cdc84d79SBart Van Assche {
404cdc84d79SBart Van Assche 	lockdep_selftest_task_struct = task;
405cdc84d79SBart Van Assche }
406cdc84d79SBart Van Assche 
4078eddac3fSPeter Zijlstra /*
4088eddac3fSPeter Zijlstra  * Debugging switches:
4098eddac3fSPeter Zijlstra  */
4108eddac3fSPeter Zijlstra 
4118eddac3fSPeter Zijlstra #define VERBOSE			0
4128eddac3fSPeter Zijlstra #define VERY_VERBOSE		0
4138eddac3fSPeter Zijlstra 
4148eddac3fSPeter Zijlstra #if VERBOSE
4158eddac3fSPeter Zijlstra # define HARDIRQ_VERBOSE	1
4168eddac3fSPeter Zijlstra # define SOFTIRQ_VERBOSE	1
4178eddac3fSPeter Zijlstra #else
4188eddac3fSPeter Zijlstra # define HARDIRQ_VERBOSE	0
4198eddac3fSPeter Zijlstra # define SOFTIRQ_VERBOSE	0
4208eddac3fSPeter Zijlstra #endif
4218eddac3fSPeter Zijlstra 
422d92a8cfcSPeter Zijlstra #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
4238eddac3fSPeter Zijlstra /*
4248eddac3fSPeter Zijlstra  * Quick filtering for interesting events:
4258eddac3fSPeter Zijlstra  */
4268eddac3fSPeter Zijlstra static int class_filter(struct lock_class *class)
4278eddac3fSPeter Zijlstra {
4288eddac3fSPeter Zijlstra #if 0
4298eddac3fSPeter Zijlstra 	/* Example */
4308eddac3fSPeter Zijlstra 	if (class->name_version == 1 &&
4318eddac3fSPeter Zijlstra 			!strcmp(class->name, "lockname"))
4328eddac3fSPeter Zijlstra 		return 1;
4338eddac3fSPeter Zijlstra 	if (class->name_version == 1 &&
4348eddac3fSPeter Zijlstra 			!strcmp(class->name, "&struct->lockfield"))
4358eddac3fSPeter Zijlstra 		return 1;
4368eddac3fSPeter Zijlstra #endif
4378eddac3fSPeter Zijlstra 	/* Filter everything else. 1 would be to allow everything else */
4388eddac3fSPeter Zijlstra 	return 0;
4398eddac3fSPeter Zijlstra }
4408eddac3fSPeter Zijlstra #endif
4418eddac3fSPeter Zijlstra 
4428eddac3fSPeter Zijlstra static int verbose(struct lock_class *class)
4438eddac3fSPeter Zijlstra {
4448eddac3fSPeter Zijlstra #if VERBOSE
4458eddac3fSPeter Zijlstra 	return class_filter(class);
4468eddac3fSPeter Zijlstra #endif
4478eddac3fSPeter Zijlstra 	return 0;
4488eddac3fSPeter Zijlstra }
4498eddac3fSPeter Zijlstra 
4508eddac3fSPeter Zijlstra static void print_lockdep_off(const char *bug_msg)
4518eddac3fSPeter Zijlstra {
4528eddac3fSPeter Zijlstra 	printk(KERN_DEBUG "%s\n", bug_msg);
4538eddac3fSPeter Zijlstra 	printk(KERN_DEBUG "turning off the locking correctness validator.\n");
454acf59377SAndreas Gruenbacher #ifdef CONFIG_LOCK_STAT
4558eddac3fSPeter Zijlstra 	printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
456acf59377SAndreas Gruenbacher #endif
4578eddac3fSPeter Zijlstra }
4588eddac3fSPeter Zijlstra 
459886532aeSArnd Bergmann unsigned long nr_stack_trace_entries;
460886532aeSArnd Bergmann 
46130a35f79SArnd Bergmann #ifdef CONFIG_PROVE_LOCKING
46212593b74SBart Van Assche /**
46312593b74SBart Van Assche  * struct lock_trace - single stack backtrace
46412593b74SBart Van Assche  * @hash_entry:	Entry in a stack_trace_hash[] list.
46512593b74SBart Van Assche  * @hash:	jhash() of @entries.
46612593b74SBart Van Assche  * @nr_entries:	Number of entries in @entries.
46712593b74SBart Van Assche  * @entries:	Actual stack backtrace.
46812593b74SBart Van Assche  */
46912593b74SBart Van Assche struct lock_trace {
47012593b74SBart Van Assche 	struct hlist_node	hash_entry;
47112593b74SBart Van Assche 	u32			hash;
47212593b74SBart Van Assche 	u32			nr_entries;
473db78538cSGustavo A. R. Silva 	unsigned long		entries[] __aligned(sizeof(unsigned long));
47412593b74SBart Van Assche };
47512593b74SBart Van Assche #define LOCK_TRACE_SIZE_IN_LONGS				\
47612593b74SBart Van Assche 	(sizeof(struct lock_trace) / sizeof(unsigned long))
477886532aeSArnd Bergmann /*
47812593b74SBart Van Assche  * Stack-trace: sequence of lock_trace structures. Protected by the graph_lock.
479886532aeSArnd Bergmann  */
480886532aeSArnd Bergmann static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
48112593b74SBart Van Assche static struct hlist_head stack_trace_hash[STACK_TRACE_HASH_SIZE];
482886532aeSArnd Bergmann 
48312593b74SBart Van Assche static bool traces_identical(struct lock_trace *t1, struct lock_trace *t2)
4848eddac3fSPeter Zijlstra {
48512593b74SBart Van Assche 	return t1->hash == t2->hash && t1->nr_entries == t2->nr_entries &&
48612593b74SBart Van Assche 		memcmp(t1->entries, t2->entries,
48712593b74SBart Van Assche 		       t1->nr_entries * sizeof(t1->entries[0])) == 0;
48812593b74SBart Van Assche }
48912593b74SBart Van Assche 
49012593b74SBart Van Assche static struct lock_trace *save_trace(void)
49112593b74SBart Van Assche {
49212593b74SBart Van Assche 	struct lock_trace *trace, *t2;
49312593b74SBart Van Assche 	struct hlist_head *hash_head;
49412593b74SBart Van Assche 	u32 hash;
495d91f3057SWaiman Long 	int max_entries;
4968eddac3fSPeter Zijlstra 
49712593b74SBart Van Assche 	BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE);
49812593b74SBart Van Assche 	BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES);
4998eddac3fSPeter Zijlstra 
50012593b74SBart Van Assche 	trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries);
50112593b74SBart Van Assche 	max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries -
50212593b74SBart Van Assche 		LOCK_TRACE_SIZE_IN_LONGS;
50312593b74SBart Van Assche 
504d91f3057SWaiman Long 	if (max_entries <= 0) {
5058eddac3fSPeter Zijlstra 		if (!debug_locks_off_graph_unlock())
50612593b74SBart Van Assche 			return NULL;
5078eddac3fSPeter Zijlstra 
5088eddac3fSPeter Zijlstra 		print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
5098eddac3fSPeter Zijlstra 		dump_stack();
5108eddac3fSPeter Zijlstra 
51112593b74SBart Van Assche 		return NULL;
5128eddac3fSPeter Zijlstra 	}
513d91f3057SWaiman Long 	trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
5148eddac3fSPeter Zijlstra 
51512593b74SBart Van Assche 	hash = jhash(trace->entries, trace->nr_entries *
51612593b74SBart Van Assche 		     sizeof(trace->entries[0]), 0);
51712593b74SBart Van Assche 	trace->hash = hash;
51812593b74SBart Van Assche 	hash_head = stack_trace_hash + (hash & (STACK_TRACE_HASH_SIZE - 1));
51912593b74SBart Van Assche 	hlist_for_each_entry(t2, hash_head, hash_entry) {
52012593b74SBart Van Assche 		if (traces_identical(trace, t2))
52112593b74SBart Van Assche 			return t2;
52212593b74SBart Van Assche 	}
52312593b74SBart Van Assche 	nr_stack_trace_entries += LOCK_TRACE_SIZE_IN_LONGS + trace->nr_entries;
52412593b74SBart Van Assche 	hlist_add_head(&trace->hash_entry, hash_head);
52512593b74SBart Van Assche 
52612593b74SBart Van Assche 	return trace;
5278eddac3fSPeter Zijlstra }
5288c779229SBart Van Assche 
5298c779229SBart Van Assche /* Return the number of stack traces in the stack_trace[] array. */
5308c779229SBart Van Assche u64 lockdep_stack_trace_count(void)
5318c779229SBart Van Assche {
5328c779229SBart Van Assche 	struct lock_trace *trace;
5338c779229SBart Van Assche 	u64 c = 0;
5348c779229SBart Van Assche 	int i;
5358c779229SBart Van Assche 
5368c779229SBart Van Assche 	for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++) {
5378c779229SBart Van Assche 		hlist_for_each_entry(trace, &stack_trace_hash[i], hash_entry) {
5388c779229SBart Van Assche 			c++;
5398c779229SBart Van Assche 		}
5408c779229SBart Van Assche 	}
5418c779229SBart Van Assche 
5428c779229SBart Van Assche 	return c;
5438c779229SBart Van Assche }
5448c779229SBart Van Assche 
5458c779229SBart Van Assche /* Return the number of stack hash chains that have at least one stack trace. */
5468c779229SBart Van Assche u64 lockdep_stack_hash_count(void)
5478c779229SBart Van Assche {
5488c779229SBart Van Assche 	u64 c = 0;
5498c779229SBart Van Assche 	int i;
5508c779229SBart Van Assche 
5518c779229SBart Van Assche 	for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++)
5528c779229SBart Van Assche 		if (!hlist_empty(&stack_trace_hash[i]))
5538c779229SBart Van Assche 			c++;
5548c779229SBart Van Assche 
5558c779229SBart Van Assche 	return c;
5568eddac3fSPeter Zijlstra }
557886532aeSArnd Bergmann #endif
5588eddac3fSPeter Zijlstra 
5598eddac3fSPeter Zijlstra unsigned int nr_hardirq_chains;
5608eddac3fSPeter Zijlstra unsigned int nr_softirq_chains;
5618eddac3fSPeter Zijlstra unsigned int nr_process_chains;
5628eddac3fSPeter Zijlstra unsigned int max_lockdep_depth;
5638eddac3fSPeter Zijlstra 
5648eddac3fSPeter Zijlstra #ifdef CONFIG_DEBUG_LOCKDEP
5658eddac3fSPeter Zijlstra /*
5668eddac3fSPeter Zijlstra  * Various lockdep statistics:
5678eddac3fSPeter Zijlstra  */
5688eddac3fSPeter Zijlstra DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
5698eddac3fSPeter Zijlstra #endif
5708eddac3fSPeter Zijlstra 
57130a35f79SArnd Bergmann #ifdef CONFIG_PROVE_LOCKING
5728eddac3fSPeter Zijlstra /*
5738eddac3fSPeter Zijlstra  * Locking printouts:
5748eddac3fSPeter Zijlstra  */
5758eddac3fSPeter Zijlstra 
5768eddac3fSPeter Zijlstra #define __USAGE(__STATE)						\
5778eddac3fSPeter Zijlstra 	[LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",	\
5788eddac3fSPeter Zijlstra 	[LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",		\
5798eddac3fSPeter Zijlstra 	[LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
5808eddac3fSPeter Zijlstra 	[LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
5818eddac3fSPeter Zijlstra 
5828eddac3fSPeter Zijlstra static const char *usage_str[] =
5838eddac3fSPeter Zijlstra {
5848eddac3fSPeter Zijlstra #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
5858eddac3fSPeter Zijlstra #include "lockdep_states.h"
5868eddac3fSPeter Zijlstra #undef LOCKDEP_STATE
5878eddac3fSPeter Zijlstra 	[LOCK_USED] = "INITIAL USE",
588f6f48e18SPeter Zijlstra 	[LOCK_USAGE_STATES] = "IN-NMI",
5898eddac3fSPeter Zijlstra };
590886532aeSArnd Bergmann #endif
5918eddac3fSPeter Zijlstra 
592364f6afcSBart Van Assche const char *__get_key_name(const struct lockdep_subclass_key *key, char *str)
5938eddac3fSPeter Zijlstra {
5948eddac3fSPeter Zijlstra 	return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
5958eddac3fSPeter Zijlstra }
5968eddac3fSPeter Zijlstra 
5978eddac3fSPeter Zijlstra static inline unsigned long lock_flag(enum lock_usage_bit bit)
5988eddac3fSPeter Zijlstra {
5998eddac3fSPeter Zijlstra 	return 1UL << bit;
6008eddac3fSPeter Zijlstra }
6018eddac3fSPeter Zijlstra 
6028eddac3fSPeter Zijlstra static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
6038eddac3fSPeter Zijlstra {
604c52478f4SYuyang Du 	/*
605c52478f4SYuyang Du 	 * The usage character defaults to '.' (i.e., irqs disabled and not in
606c52478f4SYuyang Du 	 * irq context), which is the safest usage category.
607c52478f4SYuyang Du 	 */
6088eddac3fSPeter Zijlstra 	char c = '.';
6098eddac3fSPeter Zijlstra 
610c52478f4SYuyang Du 	/*
611c52478f4SYuyang Du 	 * The order of the following usage checks matters, which will
612c52478f4SYuyang Du 	 * result in the outcome character as follows:
613c52478f4SYuyang Du 	 *
614c52478f4SYuyang Du 	 * - '+': irq is enabled and not in irq context
615c52478f4SYuyang Du 	 * - '-': in irq context and irq is disabled
616c52478f4SYuyang Du 	 * - '?': in irq context and irq is enabled
617c52478f4SYuyang Du 	 */
618c52478f4SYuyang Du 	if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) {
6198eddac3fSPeter Zijlstra 		c = '+';
620c52478f4SYuyang Du 		if (class->usage_mask & lock_flag(bit))
6218eddac3fSPeter Zijlstra 			c = '?';
622c52478f4SYuyang Du 	} else if (class->usage_mask & lock_flag(bit))
623c52478f4SYuyang Du 		c = '-';
6248eddac3fSPeter Zijlstra 
6258eddac3fSPeter Zijlstra 	return c;
6268eddac3fSPeter Zijlstra }
6278eddac3fSPeter Zijlstra 
6288eddac3fSPeter Zijlstra void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
6298eddac3fSPeter Zijlstra {
6308eddac3fSPeter Zijlstra 	int i = 0;
6318eddac3fSPeter Zijlstra 
6328eddac3fSPeter Zijlstra #define LOCKDEP_STATE(__STATE) 						\
6338eddac3fSPeter Zijlstra 	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);	\
6348eddac3fSPeter Zijlstra 	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
6358eddac3fSPeter Zijlstra #include "lockdep_states.h"
6368eddac3fSPeter Zijlstra #undef LOCKDEP_STATE
6378eddac3fSPeter Zijlstra 
6388eddac3fSPeter Zijlstra 	usage[i] = '\0';
6398eddac3fSPeter Zijlstra }
6408eddac3fSPeter Zijlstra 
6418eddac3fSPeter Zijlstra static void __print_lock_name(struct lock_class *class)
6428eddac3fSPeter Zijlstra {
6438eddac3fSPeter Zijlstra 	char str[KSYM_NAME_LEN];
6448eddac3fSPeter Zijlstra 	const char *name;
6458eddac3fSPeter Zijlstra 
6468eddac3fSPeter Zijlstra 	name = class->name;
6478eddac3fSPeter Zijlstra 	if (!name) {
6488eddac3fSPeter Zijlstra 		name = __get_key_name(class->key, str);
649f943fe0fSDmitry Vyukov 		printk(KERN_CONT "%s", name);
6508eddac3fSPeter Zijlstra 	} else {
651f943fe0fSDmitry Vyukov 		printk(KERN_CONT "%s", name);
6528eddac3fSPeter Zijlstra 		if (class->name_version > 1)
653f943fe0fSDmitry Vyukov 			printk(KERN_CONT "#%d", class->name_version);
6548eddac3fSPeter Zijlstra 		if (class->subclass)
655f943fe0fSDmitry Vyukov 			printk(KERN_CONT "/%d", class->subclass);
6568eddac3fSPeter Zijlstra 	}
6578eddac3fSPeter Zijlstra }
6588eddac3fSPeter Zijlstra 
6598eddac3fSPeter Zijlstra static void print_lock_name(struct lock_class *class)
6608eddac3fSPeter Zijlstra {
6618eddac3fSPeter Zijlstra 	char usage[LOCK_USAGE_CHARS];
6628eddac3fSPeter Zijlstra 
6638eddac3fSPeter Zijlstra 	get_usage_chars(class, usage);
6648eddac3fSPeter Zijlstra 
665f943fe0fSDmitry Vyukov 	printk(KERN_CONT " (");
6668eddac3fSPeter Zijlstra 	__print_lock_name(class);
667de8f5e4fSPeter Zijlstra 	printk(KERN_CONT "){%s}-{%hd:%hd}", usage,
668de8f5e4fSPeter Zijlstra 			class->wait_type_outer ?: class->wait_type_inner,
669de8f5e4fSPeter Zijlstra 			class->wait_type_inner);
6708eddac3fSPeter Zijlstra }
6718eddac3fSPeter Zijlstra 
6728eddac3fSPeter Zijlstra static void print_lockdep_cache(struct lockdep_map *lock)
6738eddac3fSPeter Zijlstra {
6748eddac3fSPeter Zijlstra 	const char *name;
6758eddac3fSPeter Zijlstra 	char str[KSYM_NAME_LEN];
6768eddac3fSPeter Zijlstra 
6778eddac3fSPeter Zijlstra 	name = lock->name;
6788eddac3fSPeter Zijlstra 	if (!name)
6798eddac3fSPeter Zijlstra 		name = __get_key_name(lock->key->subkeys, str);
6808eddac3fSPeter Zijlstra 
681f943fe0fSDmitry Vyukov 	printk(KERN_CONT "%s", name);
6828eddac3fSPeter Zijlstra }
6838eddac3fSPeter Zijlstra 
6848eddac3fSPeter Zijlstra static void print_lock(struct held_lock *hlock)
6858eddac3fSPeter Zijlstra {
686d7bc3197SPeter Zijlstra 	/*
687d7bc3197SPeter Zijlstra 	 * We can be called locklessly through debug_show_all_locks() so be
688d7bc3197SPeter Zijlstra 	 * extra careful, the hlock might have been released and cleared.
68901bb6f0aSYuyang Du 	 *
69001bb6f0aSYuyang Du 	 * If this indeed happens, lets pretend it does not hurt to continue
69101bb6f0aSYuyang Du 	 * to print the lock unless the hlock class_idx does not point to a
69201bb6f0aSYuyang Du 	 * registered class. The rationale here is: since we don't attempt
69301bb6f0aSYuyang Du 	 * to distinguish whether we are in this situation, if it just
69401bb6f0aSYuyang Du 	 * happened we can't count on class_idx to tell either.
695d7bc3197SPeter Zijlstra 	 */
69601bb6f0aSYuyang Du 	struct lock_class *lock = hlock_class(hlock);
697d7bc3197SPeter Zijlstra 
69801bb6f0aSYuyang Du 	if (!lock) {
699f943fe0fSDmitry Vyukov 		printk(KERN_CONT "<RELEASED>\n");
700d7bc3197SPeter Zijlstra 		return;
701d7bc3197SPeter Zijlstra 	}
702d7bc3197SPeter Zijlstra 
703519248f3SPaul E. McKenney 	printk(KERN_CONT "%px", hlock->instance);
70401bb6f0aSYuyang Du 	print_lock_name(lock);
705b3c39758STetsuo Handa 	printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
7068eddac3fSPeter Zijlstra }
7078eddac3fSPeter Zijlstra 
7088cc05c71STetsuo Handa static void lockdep_print_held_locks(struct task_struct *p)
7098eddac3fSPeter Zijlstra {
7108cc05c71STetsuo Handa 	int i, depth = READ_ONCE(p->lockdep_depth);
7118eddac3fSPeter Zijlstra 
7128cc05c71STetsuo Handa 	if (!depth)
7138cc05c71STetsuo Handa 		printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
7148cc05c71STetsuo Handa 	else
7158cc05c71STetsuo Handa 		printk("%d lock%s held by %s/%d:\n", depth,
7168cc05c71STetsuo Handa 		       depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
7178cc05c71STetsuo Handa 	/*
7188cc05c71STetsuo Handa 	 * It's not reliable to print a task's held locks if it's not sleeping
7198cc05c71STetsuo Handa 	 * and it's not the current task.
7208cc05c71STetsuo Handa 	 */
7218cc05c71STetsuo Handa 	if (p->state == TASK_RUNNING && p != current)
7228eddac3fSPeter Zijlstra 		return;
7238eddac3fSPeter Zijlstra 	for (i = 0; i < depth; i++) {
7248eddac3fSPeter Zijlstra 		printk(" #%d: ", i);
7258cc05c71STetsuo Handa 		print_lock(p->held_locks + i);
7268eddac3fSPeter Zijlstra 	}
7278eddac3fSPeter Zijlstra }
7288eddac3fSPeter Zijlstra 
7298eddac3fSPeter Zijlstra static void print_kernel_ident(void)
7308eddac3fSPeter Zijlstra {
7318eddac3fSPeter Zijlstra 	printk("%s %.*s %s\n", init_utsname()->release,
7328eddac3fSPeter Zijlstra 		(int)strcspn(init_utsname()->version, " "),
7338eddac3fSPeter Zijlstra 		init_utsname()->version,
7348eddac3fSPeter Zijlstra 		print_tainted());
7358eddac3fSPeter Zijlstra }
7368eddac3fSPeter Zijlstra 
7378eddac3fSPeter Zijlstra static int very_verbose(struct lock_class *class)
7388eddac3fSPeter Zijlstra {
7398eddac3fSPeter Zijlstra #if VERY_VERBOSE
7408eddac3fSPeter Zijlstra 	return class_filter(class);
7418eddac3fSPeter Zijlstra #endif
7428eddac3fSPeter Zijlstra 	return 0;
7438eddac3fSPeter Zijlstra }
7448eddac3fSPeter Zijlstra 
7458eddac3fSPeter Zijlstra /*
7468eddac3fSPeter Zijlstra  * Is this the address of a static object:
7478eddac3fSPeter Zijlstra  */
7488dce7a9aSSasha Levin #ifdef __KERNEL__
749108c1485SBart Van Assche static int static_obj(const void *obj)
7508eddac3fSPeter Zijlstra {
7518eddac3fSPeter Zijlstra 	unsigned long start = (unsigned long) &_stext,
7528eddac3fSPeter Zijlstra 		      end   = (unsigned long) &_end,
7538eddac3fSPeter Zijlstra 		      addr  = (unsigned long) obj;
7548eddac3fSPeter Zijlstra 
7557a5da02dSGerald Schaefer 	if (arch_is_kernel_initmem_freed(addr))
7567a5da02dSGerald Schaefer 		return 0;
7577a5da02dSGerald Schaefer 
7588eddac3fSPeter Zijlstra 	/*
7598eddac3fSPeter Zijlstra 	 * static variable?
7608eddac3fSPeter Zijlstra 	 */
7618eddac3fSPeter Zijlstra 	if ((addr >= start) && (addr < end))
7628eddac3fSPeter Zijlstra 		return 1;
7638eddac3fSPeter Zijlstra 
7648eddac3fSPeter Zijlstra 	if (arch_is_kernel_data(addr))
7658eddac3fSPeter Zijlstra 		return 1;
7668eddac3fSPeter Zijlstra 
7678eddac3fSPeter Zijlstra 	/*
7688eddac3fSPeter Zijlstra 	 * in-kernel percpu var?
7698eddac3fSPeter Zijlstra 	 */
7708eddac3fSPeter Zijlstra 	if (is_kernel_percpu_address(addr))
7718eddac3fSPeter Zijlstra 		return 1;
7728eddac3fSPeter Zijlstra 
7738eddac3fSPeter Zijlstra 	/*
7748eddac3fSPeter Zijlstra 	 * module static or percpu var?
7758eddac3fSPeter Zijlstra 	 */
7768eddac3fSPeter Zijlstra 	return is_module_address(addr) || is_module_percpu_address(addr);
7778eddac3fSPeter Zijlstra }
7788dce7a9aSSasha Levin #endif
7798eddac3fSPeter Zijlstra 
7808eddac3fSPeter Zijlstra /*
7818eddac3fSPeter Zijlstra  * To make lock name printouts unique, we calculate a unique
782fe27b0deSBart Van Assche  * class->name_version generation counter. The caller must hold the graph
783fe27b0deSBart Van Assche  * lock.
7848eddac3fSPeter Zijlstra  */
7858eddac3fSPeter Zijlstra static int count_matching_names(struct lock_class *new_class)
7868eddac3fSPeter Zijlstra {
7878eddac3fSPeter Zijlstra 	struct lock_class *class;
7888eddac3fSPeter Zijlstra 	int count = 0;
7898eddac3fSPeter Zijlstra 
7908eddac3fSPeter Zijlstra 	if (!new_class->name)
7918eddac3fSPeter Zijlstra 		return 0;
7928eddac3fSPeter Zijlstra 
793fe27b0deSBart Van Assche 	list_for_each_entry(class, &all_lock_classes, lock_entry) {
7948eddac3fSPeter Zijlstra 		if (new_class->key - new_class->subclass == class->key)
7958eddac3fSPeter Zijlstra 			return class->name_version;
7968eddac3fSPeter Zijlstra 		if (class->name && !strcmp(class->name, new_class->name))
7978eddac3fSPeter Zijlstra 			count = max(count, class->name_version);
7988eddac3fSPeter Zijlstra 	}
7998eddac3fSPeter Zijlstra 
8008eddac3fSPeter Zijlstra 	return count + 1;
8018eddac3fSPeter Zijlstra }
8028eddac3fSPeter Zijlstra 
803f6f48e18SPeter Zijlstra /* used from NMI context -- must be lockless */
8046eebad1aSPeter Zijlstra static __always_inline struct lock_class *
80508f36ff6SMatthew Wilcox look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
8068eddac3fSPeter Zijlstra {
8078eddac3fSPeter Zijlstra 	struct lockdep_subclass_key *key;
808a63f38ccSAndrew Morton 	struct hlist_head *hash_head;
8098eddac3fSPeter Zijlstra 	struct lock_class *class;
8108eddac3fSPeter Zijlstra 
8118eddac3fSPeter Zijlstra 	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
8128eddac3fSPeter Zijlstra 		debug_locks_off();
8138eddac3fSPeter Zijlstra 		printk(KERN_ERR
8148eddac3fSPeter Zijlstra 			"BUG: looking up invalid subclass: %u\n", subclass);
8158eddac3fSPeter Zijlstra 		printk(KERN_ERR
8168eddac3fSPeter Zijlstra 			"turning off the locking correctness validator.\n");
8178eddac3fSPeter Zijlstra 		dump_stack();
8188eddac3fSPeter Zijlstra 		return NULL;
8198eddac3fSPeter Zijlstra 	}
8208eddac3fSPeter Zijlstra 
8218eddac3fSPeter Zijlstra 	/*
82264f29d1bSMatthew Wilcox 	 * If it is not initialised then it has never been locked,
82364f29d1bSMatthew Wilcox 	 * so it won't be present in the hash table.
8248eddac3fSPeter Zijlstra 	 */
82564f29d1bSMatthew Wilcox 	if (unlikely(!lock->key))
82664f29d1bSMatthew Wilcox 		return NULL;
8278eddac3fSPeter Zijlstra 
8288eddac3fSPeter Zijlstra 	/*
8298eddac3fSPeter Zijlstra 	 * NOTE: the class-key must be unique. For dynamic locks, a static
8308eddac3fSPeter Zijlstra 	 * lock_class_key variable is passed in through the mutex_init()
8318eddac3fSPeter Zijlstra 	 * (or spin_lock_init()) call - which acts as the key. For static
8328eddac3fSPeter Zijlstra 	 * locks we use the lock object itself as the key.
8338eddac3fSPeter Zijlstra 	 */
8348eddac3fSPeter Zijlstra 	BUILD_BUG_ON(sizeof(struct lock_class_key) >
8358eddac3fSPeter Zijlstra 			sizeof(struct lockdep_map));
8368eddac3fSPeter Zijlstra 
8378eddac3fSPeter Zijlstra 	key = lock->key->subkeys + subclass;
8388eddac3fSPeter Zijlstra 
8398eddac3fSPeter Zijlstra 	hash_head = classhashentry(key);
8408eddac3fSPeter Zijlstra 
8418eddac3fSPeter Zijlstra 	/*
84235a9393cSPeter Zijlstra 	 * We do an RCU walk of the hash, see lockdep_free_key_range().
8438eddac3fSPeter Zijlstra 	 */
84435a9393cSPeter Zijlstra 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
84535a9393cSPeter Zijlstra 		return NULL;
84635a9393cSPeter Zijlstra 
847a63f38ccSAndrew Morton 	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
8488eddac3fSPeter Zijlstra 		if (class->key == key) {
8498eddac3fSPeter Zijlstra 			/*
8508eddac3fSPeter Zijlstra 			 * Huh! same key, different name? Did someone trample
8518eddac3fSPeter Zijlstra 			 * on some memory? We're most confused.
8528eddac3fSPeter Zijlstra 			 */
85397831546SSebastian Andrzej Siewior 			WARN_ON_ONCE(class->name != lock->name &&
85497831546SSebastian Andrzej Siewior 				     lock->key != &__lockdep_no_validate__);
8558eddac3fSPeter Zijlstra 			return class;
8568eddac3fSPeter Zijlstra 		}
8578eddac3fSPeter Zijlstra 	}
8588eddac3fSPeter Zijlstra 
85964f29d1bSMatthew Wilcox 	return NULL;
86064f29d1bSMatthew Wilcox }
86164f29d1bSMatthew Wilcox 
86264f29d1bSMatthew Wilcox /*
86364f29d1bSMatthew Wilcox  * Static locks do not have their class-keys yet - for them the key is
86464f29d1bSMatthew Wilcox  * the lock object itself. If the lock is in the per cpu area, the
86564f29d1bSMatthew Wilcox  * canonical address of the lock (per cpu offset removed) is used.
86664f29d1bSMatthew Wilcox  */
86764f29d1bSMatthew Wilcox static bool assign_lock_key(struct lockdep_map *lock)
86864f29d1bSMatthew Wilcox {
86964f29d1bSMatthew Wilcox 	unsigned long can_addr, addr = (unsigned long)lock;
87064f29d1bSMatthew Wilcox 
8714bf50862SBart Van Assche #ifdef __KERNEL__
8724bf50862SBart Van Assche 	/*
8734bf50862SBart Van Assche 	 * lockdep_free_key_range() assumes that struct lock_class_key
8744bf50862SBart Van Assche 	 * objects do not overlap. Since we use the address of lock
8754bf50862SBart Van Assche 	 * objects as class key for static objects, check whether the
8764bf50862SBart Van Assche 	 * size of lock_class_key objects does not exceed the size of
8774bf50862SBart Van Assche 	 * the smallest lock object.
8784bf50862SBart Van Assche 	 */
8794bf50862SBart Van Assche 	BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t));
8804bf50862SBart Van Assche #endif
8814bf50862SBart Van Assche 
88264f29d1bSMatthew Wilcox 	if (__is_kernel_percpu_address(addr, &can_addr))
88364f29d1bSMatthew Wilcox 		lock->key = (void *)can_addr;
88464f29d1bSMatthew Wilcox 	else if (__is_module_percpu_address(addr, &can_addr))
88564f29d1bSMatthew Wilcox 		lock->key = (void *)can_addr;
88664f29d1bSMatthew Wilcox 	else if (static_obj(lock))
88764f29d1bSMatthew Wilcox 		lock->key = (void *)lock;
88864f29d1bSMatthew Wilcox 	else {
88964f29d1bSMatthew Wilcox 		/* Debug-check: all keys must be persistent! */
89064f29d1bSMatthew Wilcox 		debug_locks_off();
89164f29d1bSMatthew Wilcox 		pr_err("INFO: trying to register non-static key.\n");
89264f29d1bSMatthew Wilcox 		pr_err("the code is fine but needs lockdep annotation.\n");
89364f29d1bSMatthew Wilcox 		pr_err("turning off the locking correctness validator.\n");
89464f29d1bSMatthew Wilcox 		dump_stack();
89564f29d1bSMatthew Wilcox 		return false;
89664f29d1bSMatthew Wilcox 	}
89764f29d1bSMatthew Wilcox 
89864f29d1bSMatthew Wilcox 	return true;
8998eddac3fSPeter Zijlstra }
9008eddac3fSPeter Zijlstra 
90172dcd505SPeter Zijlstra #ifdef CONFIG_DEBUG_LOCKDEP
90272dcd505SPeter Zijlstra 
903b526b2e3SBart Van Assche /* Check whether element @e occurs in list @h */
904b526b2e3SBart Van Assche static bool in_list(struct list_head *e, struct list_head *h)
905b526b2e3SBart Van Assche {
906b526b2e3SBart Van Assche 	struct list_head *f;
907b526b2e3SBart Van Assche 
908b526b2e3SBart Van Assche 	list_for_each(f, h) {
909b526b2e3SBart Van Assche 		if (e == f)
910b526b2e3SBart Van Assche 			return true;
911b526b2e3SBart Van Assche 	}
912b526b2e3SBart Van Assche 
913b526b2e3SBart Van Assche 	return false;
914b526b2e3SBart Van Assche }
915b526b2e3SBart Van Assche 
916b526b2e3SBart Van Assche /*
917b526b2e3SBart Van Assche  * Check whether entry @e occurs in any of the locks_after or locks_before
918b526b2e3SBart Van Assche  * lists.
919b526b2e3SBart Van Assche  */
920b526b2e3SBart Van Assche static bool in_any_class_list(struct list_head *e)
921b526b2e3SBart Van Assche {
922b526b2e3SBart Van Assche 	struct lock_class *class;
923b526b2e3SBart Van Assche 	int i;
924b526b2e3SBart Van Assche 
925b526b2e3SBart Van Assche 	for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
926b526b2e3SBart Van Assche 		class = &lock_classes[i];
927b526b2e3SBart Van Assche 		if (in_list(e, &class->locks_after) ||
928b526b2e3SBart Van Assche 		    in_list(e, &class->locks_before))
929b526b2e3SBart Van Assche 			return true;
930b526b2e3SBart Van Assche 	}
931b526b2e3SBart Van Assche 	return false;
932b526b2e3SBart Van Assche }
933b526b2e3SBart Van Assche 
934b526b2e3SBart Van Assche static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
935b526b2e3SBart Van Assche {
936b526b2e3SBart Van Assche 	struct lock_list *e;
937b526b2e3SBart Van Assche 
938b526b2e3SBart Van Assche 	list_for_each_entry(e, h, entry) {
939b526b2e3SBart Van Assche 		if (e->links_to != c) {
940b526b2e3SBart Van Assche 			printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s",
941b526b2e3SBart Van Assche 			       c->name ? : "(?)",
942b526b2e3SBart Van Assche 			       (unsigned long)(e - list_entries),
943b526b2e3SBart Van Assche 			       e->links_to && e->links_to->name ?
944b526b2e3SBart Van Assche 			       e->links_to->name : "(?)",
945b526b2e3SBart Van Assche 			       e->class && e->class->name ? e->class->name :
946b526b2e3SBart Van Assche 			       "(?)");
947b526b2e3SBart Van Assche 			return false;
948b526b2e3SBart Van Assche 		}
949b526b2e3SBart Van Assche 	}
950b526b2e3SBart Van Assche 	return true;
951b526b2e3SBart Van Assche }
952b526b2e3SBart Van Assche 
9533fe7522fSArnd Bergmann #ifdef CONFIG_PROVE_LOCKING
9543fe7522fSArnd Bergmann static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
9553fe7522fSArnd Bergmann #endif
956b526b2e3SBart Van Assche 
957b526b2e3SBart Van Assche static bool check_lock_chain_key(struct lock_chain *chain)
958b526b2e3SBart Van Assche {
959b526b2e3SBart Van Assche #ifdef CONFIG_PROVE_LOCKING
960f6ec8829SYuyang Du 	u64 chain_key = INITIAL_CHAIN_KEY;
961b526b2e3SBart Van Assche 	int i;
962b526b2e3SBart Van Assche 
963b526b2e3SBart Van Assche 	for (i = chain->base; i < chain->base + chain->depth; i++)
96401bb6f0aSYuyang Du 		chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
965b526b2e3SBart Van Assche 	/*
966b526b2e3SBart Van Assche 	 * The 'unsigned long long' casts avoid that a compiler warning
967b526b2e3SBart Van Assche 	 * is reported when building tools/lib/lockdep.
968b526b2e3SBart Van Assche 	 */
96972dcd505SPeter Zijlstra 	if (chain->chain_key != chain_key) {
970b526b2e3SBart Van Assche 		printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n",
971b526b2e3SBart Van Assche 		       (unsigned long long)(chain - lock_chains),
972b526b2e3SBart Van Assche 		       (unsigned long long)chain->chain_key,
973b526b2e3SBart Van Assche 		       (unsigned long long)chain_key);
97472dcd505SPeter Zijlstra 		return false;
97572dcd505SPeter Zijlstra 	}
976b526b2e3SBart Van Assche #endif
97772dcd505SPeter Zijlstra 	return true;
978b526b2e3SBart Van Assche }
979b526b2e3SBart Van Assche 
980b526b2e3SBart Van Assche static bool in_any_zapped_class_list(struct lock_class *class)
981b526b2e3SBart Van Assche {
982b526b2e3SBart Van Assche 	struct pending_free *pf;
983b526b2e3SBart Van Assche 	int i;
984b526b2e3SBart Van Assche 
98572dcd505SPeter Zijlstra 	for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) {
986b526b2e3SBart Van Assche 		if (in_list(&class->lock_entry, &pf->zapped))
987b526b2e3SBart Van Assche 			return true;
98872dcd505SPeter Zijlstra 	}
989b526b2e3SBart Van Assche 
990b526b2e3SBart Van Assche 	return false;
991b526b2e3SBart Van Assche }
992b526b2e3SBart Van Assche 
99372dcd505SPeter Zijlstra static bool __check_data_structures(void)
994b526b2e3SBart Van Assche {
995b526b2e3SBart Van Assche 	struct lock_class *class;
996b526b2e3SBart Van Assche 	struct lock_chain *chain;
997b526b2e3SBart Van Assche 	struct hlist_head *head;
998b526b2e3SBart Van Assche 	struct lock_list *e;
999b526b2e3SBart Van Assche 	int i;
1000b526b2e3SBart Van Assche 
1001b526b2e3SBart Van Assche 	/* Check whether all classes occur in a lock list. */
1002b526b2e3SBart Van Assche 	for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1003b526b2e3SBart Van Assche 		class = &lock_classes[i];
1004b526b2e3SBart Van Assche 		if (!in_list(&class->lock_entry, &all_lock_classes) &&
1005b526b2e3SBart Van Assche 		    !in_list(&class->lock_entry, &free_lock_classes) &&
1006b526b2e3SBart Van Assche 		    !in_any_zapped_class_list(class)) {
1007b526b2e3SBart Van Assche 			printk(KERN_INFO "class %px/%s is not in any class list\n",
1008b526b2e3SBart Van Assche 			       class, class->name ? : "(?)");
1009b526b2e3SBart Van Assche 			return false;
1010b526b2e3SBart Van Assche 		}
1011b526b2e3SBart Van Assche 	}
1012b526b2e3SBart Van Assche 
1013b526b2e3SBart Van Assche 	/* Check whether all classes have valid lock lists. */
1014b526b2e3SBart Van Assche 	for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1015b526b2e3SBart Van Assche 		class = &lock_classes[i];
1016b526b2e3SBart Van Assche 		if (!class_lock_list_valid(class, &class->locks_before))
1017b526b2e3SBart Van Assche 			return false;
1018b526b2e3SBart Van Assche 		if (!class_lock_list_valid(class, &class->locks_after))
1019b526b2e3SBart Van Assche 			return false;
1020b526b2e3SBart Van Assche 	}
1021b526b2e3SBart Van Assche 
1022b526b2e3SBart Van Assche 	/* Check the chain_key of all lock chains. */
1023b526b2e3SBart Van Assche 	for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
1024b526b2e3SBart Van Assche 		head = chainhash_table + i;
1025b526b2e3SBart Van Assche 		hlist_for_each_entry_rcu(chain, head, entry) {
1026b526b2e3SBart Van Assche 			if (!check_lock_chain_key(chain))
1027b526b2e3SBart Van Assche 				return false;
1028b526b2e3SBart Van Assche 		}
1029b526b2e3SBart Van Assche 	}
1030b526b2e3SBart Van Assche 
1031b526b2e3SBart Van Assche 	/*
1032b526b2e3SBart Van Assche 	 * Check whether all list entries that are in use occur in a class
1033b526b2e3SBart Van Assche 	 * lock list.
1034b526b2e3SBart Van Assche 	 */
1035b526b2e3SBart Van Assche 	for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
1036b526b2e3SBart Van Assche 		e = list_entries + i;
1037b526b2e3SBart Van Assche 		if (!in_any_class_list(&e->entry)) {
1038b526b2e3SBart Van Assche 			printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n",
1039b526b2e3SBart Van Assche 			       (unsigned int)(e - list_entries),
1040b526b2e3SBart Van Assche 			       e->class->name ? : "(?)",
1041b526b2e3SBart Van Assche 			       e->links_to->name ? : "(?)");
1042b526b2e3SBart Van Assche 			return false;
1043b526b2e3SBart Van Assche 		}
1044b526b2e3SBart Van Assche 	}
1045b526b2e3SBart Van Assche 
1046b526b2e3SBart Van Assche 	/*
1047b526b2e3SBart Van Assche 	 * Check whether all list entries that are not in use do not occur in
1048b526b2e3SBart Van Assche 	 * a class lock list.
1049b526b2e3SBart Van Assche 	 */
1050b526b2e3SBart Van Assche 	for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
1051b526b2e3SBart Van Assche 		e = list_entries + i;
1052b526b2e3SBart Van Assche 		if (in_any_class_list(&e->entry)) {
1053b526b2e3SBart Van Assche 			printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n",
1054b526b2e3SBart Van Assche 			       (unsigned int)(e - list_entries),
1055b526b2e3SBart Van Assche 			       e->class && e->class->name ? e->class->name :
1056b526b2e3SBart Van Assche 			       "(?)",
1057b526b2e3SBart Van Assche 			       e->links_to && e->links_to->name ?
1058b526b2e3SBart Van Assche 			       e->links_to->name : "(?)");
1059b526b2e3SBart Van Assche 			return false;
1060b526b2e3SBart Van Assche 		}
1061b526b2e3SBart Van Assche 	}
1062b526b2e3SBart Van Assche 
1063b526b2e3SBart Van Assche 	return true;
1064b526b2e3SBart Van Assche }
1065b526b2e3SBart Van Assche 
106672dcd505SPeter Zijlstra int check_consistency = 0;
106772dcd505SPeter Zijlstra module_param(check_consistency, int, 0644);
106872dcd505SPeter Zijlstra 
106972dcd505SPeter Zijlstra static void check_data_structures(void)
107072dcd505SPeter Zijlstra {
107172dcd505SPeter Zijlstra 	static bool once = false;
107272dcd505SPeter Zijlstra 
107372dcd505SPeter Zijlstra 	if (check_consistency && !once) {
107472dcd505SPeter Zijlstra 		if (!__check_data_structures()) {
107572dcd505SPeter Zijlstra 			once = true;
107672dcd505SPeter Zijlstra 			WARN_ON(once);
107772dcd505SPeter Zijlstra 		}
107872dcd505SPeter Zijlstra 	}
107972dcd505SPeter Zijlstra }
108072dcd505SPeter Zijlstra 
108172dcd505SPeter Zijlstra #else /* CONFIG_DEBUG_LOCKDEP */
108272dcd505SPeter Zijlstra 
108372dcd505SPeter Zijlstra static inline void check_data_structures(void) { }
108472dcd505SPeter Zijlstra 
108572dcd505SPeter Zijlstra #endif /* CONFIG_DEBUG_LOCKDEP */
108672dcd505SPeter Zijlstra 
1087810507feSWaiman Long static void init_chain_block_buckets(void);
1088810507feSWaiman Long 
10898eddac3fSPeter Zijlstra /*
1090a0b0fd53SBart Van Assche  * Initialize the lock_classes[] array elements, the free_lock_classes list
1091a0b0fd53SBart Van Assche  * and also the delayed_free structure.
1092feb0a386SBart Van Assche  */
1093feb0a386SBart Van Assche static void init_data_structures_once(void)
1094feb0a386SBart Van Assche {
1095810507feSWaiman Long 	static bool __read_mostly ds_initialized, rcu_head_initialized;
1096feb0a386SBart Van Assche 	int i;
1097feb0a386SBart Van Assche 
10980126574fSBart Van Assche 	if (likely(rcu_head_initialized))
1099feb0a386SBart Van Assche 		return;
1100feb0a386SBart Van Assche 
11010126574fSBart Van Assche 	if (system_state >= SYSTEM_SCHEDULING) {
1102a0b0fd53SBart Van Assche 		init_rcu_head(&delayed_free.rcu_head);
11030126574fSBart Van Assche 		rcu_head_initialized = true;
11040126574fSBart Van Assche 	}
11050126574fSBart Van Assche 
11060126574fSBart Van Assche 	if (ds_initialized)
11070126574fSBart Van Assche 		return;
11080126574fSBart Van Assche 
11090126574fSBart Van Assche 	ds_initialized = true;
11100126574fSBart Van Assche 
1111a0b0fd53SBart Van Assche 	INIT_LIST_HEAD(&delayed_free.pf[0].zapped);
1112a0b0fd53SBart Van Assche 	INIT_LIST_HEAD(&delayed_free.pf[1].zapped);
1113a0b0fd53SBart Van Assche 
1114feb0a386SBart Van Assche 	for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1115a0b0fd53SBart Van Assche 		list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes);
1116feb0a386SBart Van Assche 		INIT_LIST_HEAD(&lock_classes[i].locks_after);
1117feb0a386SBart Van Assche 		INIT_LIST_HEAD(&lock_classes[i].locks_before);
1118feb0a386SBart Van Assche 	}
1119810507feSWaiman Long 	init_chain_block_buckets();
1120feb0a386SBart Van Assche }
1121feb0a386SBart Van Assche 
1122108c1485SBart Van Assche static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
1123108c1485SBart Van Assche {
1124108c1485SBart Van Assche 	unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS);
1125108c1485SBart Van Assche 
1126108c1485SBart Van Assche 	return lock_keys_hash + hash;
1127108c1485SBart Van Assche }
1128108c1485SBart Van Assche 
1129108c1485SBart Van Assche /* Register a dynamically allocated key. */
1130108c1485SBart Van Assche void lockdep_register_key(struct lock_class_key *key)
1131108c1485SBart Van Assche {
1132108c1485SBart Van Assche 	struct hlist_head *hash_head;
1133108c1485SBart Van Assche 	struct lock_class_key *k;
1134108c1485SBart Van Assche 	unsigned long flags;
1135108c1485SBart Van Assche 
1136108c1485SBart Van Assche 	if (WARN_ON_ONCE(static_obj(key)))
1137108c1485SBart Van Assche 		return;
1138108c1485SBart Van Assche 	hash_head = keyhashentry(key);
1139108c1485SBart Van Assche 
1140108c1485SBart Van Assche 	raw_local_irq_save(flags);
1141108c1485SBart Van Assche 	if (!graph_lock())
1142108c1485SBart Van Assche 		goto restore_irqs;
1143108c1485SBart Van Assche 	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1144108c1485SBart Van Assche 		if (WARN_ON_ONCE(k == key))
1145108c1485SBart Van Assche 			goto out_unlock;
1146108c1485SBart Van Assche 	}
1147108c1485SBart Van Assche 	hlist_add_head_rcu(&key->hash_entry, hash_head);
1148108c1485SBart Van Assche out_unlock:
1149108c1485SBart Van Assche 	graph_unlock();
1150108c1485SBart Van Assche restore_irqs:
1151108c1485SBart Van Assche 	raw_local_irq_restore(flags);
1152108c1485SBart Van Assche }
1153108c1485SBart Van Assche EXPORT_SYMBOL_GPL(lockdep_register_key);
1154108c1485SBart Van Assche 
1155108c1485SBart Van Assche /* Check whether a key has been registered as a dynamic key. */
1156108c1485SBart Van Assche static bool is_dynamic_key(const struct lock_class_key *key)
1157108c1485SBart Van Assche {
1158108c1485SBart Van Assche 	struct hlist_head *hash_head;
1159108c1485SBart Van Assche 	struct lock_class_key *k;
1160108c1485SBart Van Assche 	bool found = false;
1161108c1485SBart Van Assche 
1162108c1485SBart Van Assche 	if (WARN_ON_ONCE(static_obj(key)))
1163108c1485SBart Van Assche 		return false;
1164108c1485SBart Van Assche 
1165108c1485SBart Van Assche 	/*
1166108c1485SBart Van Assche 	 * If lock debugging is disabled lock_keys_hash[] may contain
1167108c1485SBart Van Assche 	 * pointers to memory that has already been freed. Avoid triggering
1168108c1485SBart Van Assche 	 * a use-after-free in that case by returning early.
1169108c1485SBart Van Assche 	 */
1170108c1485SBart Van Assche 	if (!debug_locks)
1171108c1485SBart Van Assche 		return true;
1172108c1485SBart Van Assche 
1173108c1485SBart Van Assche 	hash_head = keyhashentry(key);
1174108c1485SBart Van Assche 
1175108c1485SBart Van Assche 	rcu_read_lock();
1176108c1485SBart Van Assche 	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1177108c1485SBart Van Assche 		if (k == key) {
1178108c1485SBart Van Assche 			found = true;
1179108c1485SBart Van Assche 			break;
1180108c1485SBart Van Assche 		}
1181108c1485SBart Van Assche 	}
1182108c1485SBart Van Assche 	rcu_read_unlock();
1183108c1485SBart Van Assche 
1184108c1485SBart Van Assche 	return found;
1185108c1485SBart Van Assche }
1186108c1485SBart Van Assche 
1187feb0a386SBart Van Assche /*
11888eddac3fSPeter Zijlstra  * Register a lock's class in the hash-table, if the class is not present
11898eddac3fSPeter Zijlstra  * yet. Otherwise we look it up. We cache the result in the lock object
11908eddac3fSPeter Zijlstra  * itself, so actual lookup of the hash should be once per lock object.
11918eddac3fSPeter Zijlstra  */
1192c003ed92SDenys Vlasenko static struct lock_class *
11938eddac3fSPeter Zijlstra register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
11948eddac3fSPeter Zijlstra {
11958eddac3fSPeter Zijlstra 	struct lockdep_subclass_key *key;
1196a63f38ccSAndrew Morton 	struct hlist_head *hash_head;
11978eddac3fSPeter Zijlstra 	struct lock_class *class;
119835a9393cSPeter Zijlstra 
119935a9393cSPeter Zijlstra 	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
12008eddac3fSPeter Zijlstra 
12018eddac3fSPeter Zijlstra 	class = look_up_lock_class(lock, subclass);
120264f29d1bSMatthew Wilcox 	if (likely(class))
12038eddac3fSPeter Zijlstra 		goto out_set_class_cache;
12048eddac3fSPeter Zijlstra 
120564f29d1bSMatthew Wilcox 	if (!lock->key) {
120664f29d1bSMatthew Wilcox 		if (!assign_lock_key(lock))
120764f29d1bSMatthew Wilcox 			return NULL;
1208108c1485SBart Van Assche 	} else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) {
12098eddac3fSPeter Zijlstra 		return NULL;
12108eddac3fSPeter Zijlstra 	}
12118eddac3fSPeter Zijlstra 
12128eddac3fSPeter Zijlstra 	key = lock->key->subkeys + subclass;
12138eddac3fSPeter Zijlstra 	hash_head = classhashentry(key);
12148eddac3fSPeter Zijlstra 
12158eddac3fSPeter Zijlstra 	if (!graph_lock()) {
12168eddac3fSPeter Zijlstra 		return NULL;
12178eddac3fSPeter Zijlstra 	}
12188eddac3fSPeter Zijlstra 	/*
12198eddac3fSPeter Zijlstra 	 * We have to do the hash-walk again, to avoid races
12208eddac3fSPeter Zijlstra 	 * with another CPU:
12218eddac3fSPeter Zijlstra 	 */
1222a63f38ccSAndrew Morton 	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
12238eddac3fSPeter Zijlstra 		if (class->key == key)
12248eddac3fSPeter Zijlstra 			goto out_unlock_set;
122535a9393cSPeter Zijlstra 	}
122635a9393cSPeter Zijlstra 
1227feb0a386SBart Van Assche 	init_data_structures_once();
1228feb0a386SBart Van Assche 
1229a0b0fd53SBart Van Assche 	/* Allocate a new lock class and add it to the hash. */
1230a0b0fd53SBart Van Assche 	class = list_first_entry_or_null(&free_lock_classes, typeof(*class),
1231a0b0fd53SBart Van Assche 					 lock_entry);
1232a0b0fd53SBart Van Assche 	if (!class) {
12338eddac3fSPeter Zijlstra 		if (!debug_locks_off_graph_unlock()) {
12348eddac3fSPeter Zijlstra 			return NULL;
12358eddac3fSPeter Zijlstra 		}
12368eddac3fSPeter Zijlstra 
12378eddac3fSPeter Zijlstra 		print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
12388eddac3fSPeter Zijlstra 		dump_stack();
12398eddac3fSPeter Zijlstra 		return NULL;
12408eddac3fSPeter Zijlstra 	}
1241a0b0fd53SBart Van Assche 	nr_lock_classes++;
124201bb6f0aSYuyang Du 	__set_bit(class - lock_classes, lock_classes_in_use);
12438eddac3fSPeter Zijlstra 	debug_atomic_inc(nr_unused_locks);
12448eddac3fSPeter Zijlstra 	class->key = key;
12458eddac3fSPeter Zijlstra 	class->name = lock->name;
12468eddac3fSPeter Zijlstra 	class->subclass = subclass;
1247feb0a386SBart Van Assche 	WARN_ON_ONCE(!list_empty(&class->locks_before));
1248feb0a386SBart Van Assche 	WARN_ON_ONCE(!list_empty(&class->locks_after));
12498eddac3fSPeter Zijlstra 	class->name_version = count_matching_names(class);
1250de8f5e4fSPeter Zijlstra 	class->wait_type_inner = lock->wait_type_inner;
1251de8f5e4fSPeter Zijlstra 	class->wait_type_outer = lock->wait_type_outer;
12528eddac3fSPeter Zijlstra 	/*
12538eddac3fSPeter Zijlstra 	 * We use RCU's safe list-add method to make
12548eddac3fSPeter Zijlstra 	 * parallel walking of the hash-list safe:
12558eddac3fSPeter Zijlstra 	 */
1256a63f38ccSAndrew Morton 	hlist_add_head_rcu(&class->hash_entry, hash_head);
12578eddac3fSPeter Zijlstra 	/*
1258a0b0fd53SBart Van Assche 	 * Remove the class from the free list and add it to the global list
1259a0b0fd53SBart Van Assche 	 * of classes.
12608eddac3fSPeter Zijlstra 	 */
1261a0b0fd53SBart Van Assche 	list_move_tail(&class->lock_entry, &all_lock_classes);
12628eddac3fSPeter Zijlstra 
12638eddac3fSPeter Zijlstra 	if (verbose(class)) {
12648eddac3fSPeter Zijlstra 		graph_unlock();
12658eddac3fSPeter Zijlstra 
126604860d48SBorislav Petkov 		printk("\nnew class %px: %s", class->key, class->name);
12678eddac3fSPeter Zijlstra 		if (class->name_version > 1)
1268f943fe0fSDmitry Vyukov 			printk(KERN_CONT "#%d", class->name_version);
1269f943fe0fSDmitry Vyukov 		printk(KERN_CONT "\n");
12708eddac3fSPeter Zijlstra 		dump_stack();
12718eddac3fSPeter Zijlstra 
12728eddac3fSPeter Zijlstra 		if (!graph_lock()) {
12738eddac3fSPeter Zijlstra 			return NULL;
12748eddac3fSPeter Zijlstra 		}
12758eddac3fSPeter Zijlstra 	}
12768eddac3fSPeter Zijlstra out_unlock_set:
12778eddac3fSPeter Zijlstra 	graph_unlock();
12788eddac3fSPeter Zijlstra 
12798eddac3fSPeter Zijlstra out_set_class_cache:
12808eddac3fSPeter Zijlstra 	if (!subclass || force)
12818eddac3fSPeter Zijlstra 		lock->class_cache[0] = class;
12828eddac3fSPeter Zijlstra 	else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
12838eddac3fSPeter Zijlstra 		lock->class_cache[subclass] = class;
12848eddac3fSPeter Zijlstra 
12858eddac3fSPeter Zijlstra 	/*
12868eddac3fSPeter Zijlstra 	 * Hash collision, did we smoke some? We found a class with a matching
12878eddac3fSPeter Zijlstra 	 * hash but the subclass -- which is hashed in -- didn't match.
12888eddac3fSPeter Zijlstra 	 */
12898eddac3fSPeter Zijlstra 	if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
12908eddac3fSPeter Zijlstra 		return NULL;
12918eddac3fSPeter Zijlstra 
12928eddac3fSPeter Zijlstra 	return class;
12938eddac3fSPeter Zijlstra }
12948eddac3fSPeter Zijlstra 
12958eddac3fSPeter Zijlstra #ifdef CONFIG_PROVE_LOCKING
12968eddac3fSPeter Zijlstra /*
12978eddac3fSPeter Zijlstra  * Allocate a lockdep entry. (assumes the graph_lock held, returns
12988eddac3fSPeter Zijlstra  * with NULL on failure)
12998eddac3fSPeter Zijlstra  */
13008eddac3fSPeter Zijlstra static struct lock_list *alloc_list_entry(void)
13018eddac3fSPeter Zijlstra {
1302ace35a7aSBart Van Assche 	int idx = find_first_zero_bit(list_entries_in_use,
1303ace35a7aSBart Van Assche 				      ARRAY_SIZE(list_entries));
1304ace35a7aSBart Van Assche 
1305ace35a7aSBart Van Assche 	if (idx >= ARRAY_SIZE(list_entries)) {
13068eddac3fSPeter Zijlstra 		if (!debug_locks_off_graph_unlock())
13078eddac3fSPeter Zijlstra 			return NULL;
13088eddac3fSPeter Zijlstra 
13098eddac3fSPeter Zijlstra 		print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
13108eddac3fSPeter Zijlstra 		dump_stack();
13118eddac3fSPeter Zijlstra 		return NULL;
13128eddac3fSPeter Zijlstra 	}
1313ace35a7aSBart Van Assche 	nr_list_entries++;
1314ace35a7aSBart Van Assche 	__set_bit(idx, list_entries_in_use);
1315ace35a7aSBart Van Assche 	return list_entries + idx;
13168eddac3fSPeter Zijlstra }
13178eddac3fSPeter Zijlstra 
13188eddac3fSPeter Zijlstra /*
13198eddac3fSPeter Zijlstra  * Add a new dependency to the head of the list:
13208eddac3fSPeter Zijlstra  */
132186cffb80SBart Van Assche static int add_lock_to_list(struct lock_class *this,
132286cffb80SBart Van Assche 			    struct lock_class *links_to, struct list_head *head,
132383f06168STahsin Erdogan 			    unsigned long ip, int distance,
132412593b74SBart Van Assche 			    const struct lock_trace *trace)
13258eddac3fSPeter Zijlstra {
13268eddac3fSPeter Zijlstra 	struct lock_list *entry;
13278eddac3fSPeter Zijlstra 	/*
13288eddac3fSPeter Zijlstra 	 * Lock not present yet - get a new dependency struct and
13298eddac3fSPeter Zijlstra 	 * add it to the list:
13308eddac3fSPeter Zijlstra 	 */
13318eddac3fSPeter Zijlstra 	entry = alloc_list_entry();
13328eddac3fSPeter Zijlstra 	if (!entry)
13338eddac3fSPeter Zijlstra 		return 0;
13348eddac3fSPeter Zijlstra 
13358eddac3fSPeter Zijlstra 	entry->class = this;
133686cffb80SBart Van Assche 	entry->links_to = links_to;
13378eddac3fSPeter Zijlstra 	entry->distance = distance;
133812593b74SBart Van Assche 	entry->trace = trace;
13398eddac3fSPeter Zijlstra 	/*
134035a9393cSPeter Zijlstra 	 * Both allocation and removal are done under the graph lock; but
134135a9393cSPeter Zijlstra 	 * iteration is under RCU-sched; see look_up_lock_class() and
134235a9393cSPeter Zijlstra 	 * lockdep_free_key_range().
13438eddac3fSPeter Zijlstra 	 */
13448eddac3fSPeter Zijlstra 	list_add_tail_rcu(&entry->entry, head);
13458eddac3fSPeter Zijlstra 
13468eddac3fSPeter Zijlstra 	return 1;
13478eddac3fSPeter Zijlstra }
13488eddac3fSPeter Zijlstra 
13498eddac3fSPeter Zijlstra /*
13508eddac3fSPeter Zijlstra  * For good efficiency of modular, we use power of 2
13518eddac3fSPeter Zijlstra  */
13528eddac3fSPeter Zijlstra #define MAX_CIRCULAR_QUEUE_SIZE		4096UL
13538eddac3fSPeter Zijlstra #define CQ_MASK				(MAX_CIRCULAR_QUEUE_SIZE-1)
13548eddac3fSPeter Zijlstra 
13558eddac3fSPeter Zijlstra /*
1356aa480771SYuyang Du  * The circular_queue and helpers are used to implement graph
1357aa480771SYuyang Du  * breadth-first search (BFS) algorithm, by which we can determine
1358aa480771SYuyang Du  * whether there is a path from a lock to another. In deadlock checks,
1359aa480771SYuyang Du  * a path from the next lock to be acquired to a previous held lock
1360aa480771SYuyang Du  * indicates that adding the <prev> -> <next> lock dependency will
1361aa480771SYuyang Du  * produce a circle in the graph. Breadth-first search instead of
1362aa480771SYuyang Du  * depth-first search is used in order to find the shortest (circular)
1363aa480771SYuyang Du  * path.
13648eddac3fSPeter Zijlstra  */
13658eddac3fSPeter Zijlstra struct circular_queue {
1366aa480771SYuyang Du 	struct lock_list *element[MAX_CIRCULAR_QUEUE_SIZE];
13678eddac3fSPeter Zijlstra 	unsigned int  front, rear;
13688eddac3fSPeter Zijlstra };
13698eddac3fSPeter Zijlstra 
13708eddac3fSPeter Zijlstra static struct circular_queue lock_cq;
13718eddac3fSPeter Zijlstra 
13728eddac3fSPeter Zijlstra unsigned int max_bfs_queue_depth;
13738eddac3fSPeter Zijlstra 
13748eddac3fSPeter Zijlstra static unsigned int lockdep_dependency_gen_id;
13758eddac3fSPeter Zijlstra 
13768eddac3fSPeter Zijlstra static inline void __cq_init(struct circular_queue *cq)
13778eddac3fSPeter Zijlstra {
13788eddac3fSPeter Zijlstra 	cq->front = cq->rear = 0;
13798eddac3fSPeter Zijlstra 	lockdep_dependency_gen_id++;
13808eddac3fSPeter Zijlstra }
13818eddac3fSPeter Zijlstra 
13828eddac3fSPeter Zijlstra static inline int __cq_empty(struct circular_queue *cq)
13838eddac3fSPeter Zijlstra {
13848eddac3fSPeter Zijlstra 	return (cq->front == cq->rear);
13858eddac3fSPeter Zijlstra }
13868eddac3fSPeter Zijlstra 
13878eddac3fSPeter Zijlstra static inline int __cq_full(struct circular_queue *cq)
13888eddac3fSPeter Zijlstra {
13898eddac3fSPeter Zijlstra 	return ((cq->rear + 1) & CQ_MASK) == cq->front;
13908eddac3fSPeter Zijlstra }
13918eddac3fSPeter Zijlstra 
1392aa480771SYuyang Du static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem)
13938eddac3fSPeter Zijlstra {
13948eddac3fSPeter Zijlstra 	if (__cq_full(cq))
13958eddac3fSPeter Zijlstra 		return -1;
13968eddac3fSPeter Zijlstra 
13978eddac3fSPeter Zijlstra 	cq->element[cq->rear] = elem;
13988eddac3fSPeter Zijlstra 	cq->rear = (cq->rear + 1) & CQ_MASK;
13998eddac3fSPeter Zijlstra 	return 0;
14008eddac3fSPeter Zijlstra }
14018eddac3fSPeter Zijlstra 
1402c1661325SYuyang Du /*
1403c1661325SYuyang Du  * Dequeue an element from the circular_queue, return a lock_list if
1404c1661325SYuyang Du  * the queue is not empty, or NULL if otherwise.
1405c1661325SYuyang Du  */
1406c1661325SYuyang Du static inline struct lock_list * __cq_dequeue(struct circular_queue *cq)
14078eddac3fSPeter Zijlstra {
1408c1661325SYuyang Du 	struct lock_list * lock;
14098eddac3fSPeter Zijlstra 
1410c1661325SYuyang Du 	if (__cq_empty(cq))
1411c1661325SYuyang Du 		return NULL;
1412c1661325SYuyang Du 
1413c1661325SYuyang Du 	lock = cq->element[cq->front];
14148eddac3fSPeter Zijlstra 	cq->front = (cq->front + 1) & CQ_MASK;
1415c1661325SYuyang Du 
1416c1661325SYuyang Du 	return lock;
14178eddac3fSPeter Zijlstra }
14188eddac3fSPeter Zijlstra 
14198eddac3fSPeter Zijlstra static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
14208eddac3fSPeter Zijlstra {
14218eddac3fSPeter Zijlstra 	return (cq->rear - cq->front) & CQ_MASK;
14228eddac3fSPeter Zijlstra }
14238eddac3fSPeter Zijlstra 
14248eddac3fSPeter Zijlstra static inline void mark_lock_accessed(struct lock_list *lock,
14258eddac3fSPeter Zijlstra 					struct lock_list *parent)
14268eddac3fSPeter Zijlstra {
14278eddac3fSPeter Zijlstra 	unsigned long nr;
14288eddac3fSPeter Zijlstra 
14298eddac3fSPeter Zijlstra 	nr = lock - list_entries;
1430ace35a7aSBart Van Assche 	WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
14318eddac3fSPeter Zijlstra 	lock->parent = parent;
14328eddac3fSPeter Zijlstra 	lock->class->dep_gen_id = lockdep_dependency_gen_id;
14338eddac3fSPeter Zijlstra }
14348eddac3fSPeter Zijlstra 
14358eddac3fSPeter Zijlstra static inline unsigned long lock_accessed(struct lock_list *lock)
14368eddac3fSPeter Zijlstra {
14378eddac3fSPeter Zijlstra 	unsigned long nr;
14388eddac3fSPeter Zijlstra 
14398eddac3fSPeter Zijlstra 	nr = lock - list_entries;
1440ace35a7aSBart Van Assche 	WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
14418eddac3fSPeter Zijlstra 	return lock->class->dep_gen_id == lockdep_dependency_gen_id;
14428eddac3fSPeter Zijlstra }
14438eddac3fSPeter Zijlstra 
14448eddac3fSPeter Zijlstra static inline struct lock_list *get_lock_parent(struct lock_list *child)
14458eddac3fSPeter Zijlstra {
14468eddac3fSPeter Zijlstra 	return child->parent;
14478eddac3fSPeter Zijlstra }
14488eddac3fSPeter Zijlstra 
14498eddac3fSPeter Zijlstra static inline int get_lock_depth(struct lock_list *child)
14508eddac3fSPeter Zijlstra {
14518eddac3fSPeter Zijlstra 	int depth = 0;
14528eddac3fSPeter Zijlstra 	struct lock_list *parent;
14538eddac3fSPeter Zijlstra 
14548eddac3fSPeter Zijlstra 	while ((parent = get_lock_parent(child))) {
14558eddac3fSPeter Zijlstra 		child = parent;
14568eddac3fSPeter Zijlstra 		depth++;
14578eddac3fSPeter Zijlstra 	}
14588eddac3fSPeter Zijlstra 	return depth;
14598eddac3fSPeter Zijlstra }
14608eddac3fSPeter Zijlstra 
146177a80692SYuyang Du /*
146277a80692SYuyang Du  * Return the forward or backward dependency list.
146377a80692SYuyang Du  *
146477a80692SYuyang Du  * @lock:   the lock_list to get its class's dependency list
146577a80692SYuyang Du  * @offset: the offset to struct lock_class to determine whether it is
146677a80692SYuyang Du  *          locks_after or locks_before
146777a80692SYuyang Du  */
146877a80692SYuyang Du static inline struct list_head *get_dep_list(struct lock_list *lock, int offset)
146977a80692SYuyang Du {
147077a80692SYuyang Du 	void *lock_class = lock->class;
147177a80692SYuyang Du 
147277a80692SYuyang Du 	return lock_class + offset;
147377a80692SYuyang Du }
1474*b11be024SBoqun Feng /*
1475*b11be024SBoqun Feng  * Return values of a bfs search:
1476*b11be024SBoqun Feng  *
1477*b11be024SBoqun Feng  * BFS_E* indicates an error
1478*b11be024SBoqun Feng  * BFS_R* indicates a result (match or not)
1479*b11be024SBoqun Feng  *
1480*b11be024SBoqun Feng  * BFS_EINVALIDNODE: Find a invalid node in the graph.
1481*b11be024SBoqun Feng  *
1482*b11be024SBoqun Feng  * BFS_EQUEUEFULL: The queue is full while doing the bfs.
1483*b11be024SBoqun Feng  *
1484*b11be024SBoqun Feng  * BFS_RMATCH: Find the matched node in the graph, and put that node into
1485*b11be024SBoqun Feng  *             *@target_entry.
1486*b11be024SBoqun Feng  *
1487*b11be024SBoqun Feng  * BFS_RNOMATCH: Haven't found the matched node and keep *@target_entry
1488*b11be024SBoqun Feng  *               _unchanged_.
1489*b11be024SBoqun Feng  */
1490*b11be024SBoqun Feng enum bfs_result {
1491*b11be024SBoqun Feng 	BFS_EINVALIDNODE = -2,
1492*b11be024SBoqun Feng 	BFS_EQUEUEFULL = -1,
1493*b11be024SBoqun Feng 	BFS_RMATCH = 0,
1494*b11be024SBoqun Feng 	BFS_RNOMATCH = 1,
1495*b11be024SBoqun Feng };
1496*b11be024SBoqun Feng 
1497*b11be024SBoqun Feng /*
1498*b11be024SBoqun Feng  * bfs_result < 0 means error
1499*b11be024SBoqun Feng  */
1500*b11be024SBoqun Feng static inline bool bfs_error(enum bfs_result res)
1501*b11be024SBoqun Feng {
1502*b11be024SBoqun Feng 	return res < 0;
1503*b11be024SBoqun Feng }
150477a80692SYuyang Du 
1505154f185eSYuyang Du /*
1506154f185eSYuyang Du  * Forward- or backward-dependency search, used for both circular dependency
1507154f185eSYuyang Du  * checking and hardirq-unsafe/softirq-unsafe checking.
1508154f185eSYuyang Du  */
1509*b11be024SBoqun Feng static enum bfs_result __bfs(struct lock_list *source_entry,
15108eddac3fSPeter Zijlstra 			     void *data,
15118eddac3fSPeter Zijlstra 			     int (*match)(struct lock_list *entry, void *data),
15128eddac3fSPeter Zijlstra 			     struct lock_list **target_entry,
151377a80692SYuyang Du 			     int offset)
15148eddac3fSPeter Zijlstra {
15158eddac3fSPeter Zijlstra 	struct lock_list *entry;
1516c1661325SYuyang Du 	struct lock_list *lock;
15178eddac3fSPeter Zijlstra 	struct list_head *head;
15188eddac3fSPeter Zijlstra 	struct circular_queue *cq = &lock_cq;
1519*b11be024SBoqun Feng 	enum bfs_result ret = BFS_RNOMATCH;
15208eddac3fSPeter Zijlstra 
1521248efb21SPeter Zijlstra 	lockdep_assert_locked();
1522248efb21SPeter Zijlstra 
15238eddac3fSPeter Zijlstra 	if (match(source_entry, data)) {
15248eddac3fSPeter Zijlstra 		*target_entry = source_entry;
1525*b11be024SBoqun Feng 		ret = BFS_RMATCH;
15268eddac3fSPeter Zijlstra 		goto exit;
15278eddac3fSPeter Zijlstra 	}
15288eddac3fSPeter Zijlstra 
152977a80692SYuyang Du 	head = get_dep_list(source_entry, offset);
15308eddac3fSPeter Zijlstra 	if (list_empty(head))
15318eddac3fSPeter Zijlstra 		goto exit;
15328eddac3fSPeter Zijlstra 
15338eddac3fSPeter Zijlstra 	__cq_init(cq);
1534aa480771SYuyang Du 	__cq_enqueue(cq, source_entry);
15358eddac3fSPeter Zijlstra 
1536c1661325SYuyang Du 	while ((lock = __cq_dequeue(cq))) {
15378eddac3fSPeter Zijlstra 
15388eddac3fSPeter Zijlstra 		if (!lock->class) {
1539*b11be024SBoqun Feng 			ret = BFS_EINVALIDNODE;
15408eddac3fSPeter Zijlstra 			goto exit;
15418eddac3fSPeter Zijlstra 		}
15428eddac3fSPeter Zijlstra 
154377a80692SYuyang Du 		head = get_dep_list(lock, offset);
15448eddac3fSPeter Zijlstra 
154535a9393cSPeter Zijlstra 		list_for_each_entry_rcu(entry, head, entry) {
15468eddac3fSPeter Zijlstra 			if (!lock_accessed(entry)) {
15478eddac3fSPeter Zijlstra 				unsigned int cq_depth;
15488eddac3fSPeter Zijlstra 				mark_lock_accessed(entry, lock);
15498eddac3fSPeter Zijlstra 				if (match(entry, data)) {
15508eddac3fSPeter Zijlstra 					*target_entry = entry;
1551*b11be024SBoqun Feng 					ret = BFS_RMATCH;
15528eddac3fSPeter Zijlstra 					goto exit;
15538eddac3fSPeter Zijlstra 				}
15548eddac3fSPeter Zijlstra 
1555aa480771SYuyang Du 				if (__cq_enqueue(cq, entry)) {
1556*b11be024SBoqun Feng 					ret = BFS_EQUEUEFULL;
15578eddac3fSPeter Zijlstra 					goto exit;
15588eddac3fSPeter Zijlstra 				}
15598eddac3fSPeter Zijlstra 				cq_depth = __cq_get_elem_count(cq);
15608eddac3fSPeter Zijlstra 				if (max_bfs_queue_depth < cq_depth)
15618eddac3fSPeter Zijlstra 					max_bfs_queue_depth = cq_depth;
15628eddac3fSPeter Zijlstra 			}
15638eddac3fSPeter Zijlstra 		}
15648eddac3fSPeter Zijlstra 	}
15658eddac3fSPeter Zijlstra exit:
15668eddac3fSPeter Zijlstra 	return ret;
15678eddac3fSPeter Zijlstra }
15688eddac3fSPeter Zijlstra 
1569*b11be024SBoqun Feng static inline enum bfs_result
1570*b11be024SBoqun Feng __bfs_forwards(struct lock_list *src_entry,
15718eddac3fSPeter Zijlstra 	       void *data,
15728eddac3fSPeter Zijlstra 	       int (*match)(struct lock_list *entry, void *data),
15738eddac3fSPeter Zijlstra 	       struct lock_list **target_entry)
15748eddac3fSPeter Zijlstra {
157577a80692SYuyang Du 	return __bfs(src_entry, data, match, target_entry,
157677a80692SYuyang Du 		     offsetof(struct lock_class, locks_after));
15778eddac3fSPeter Zijlstra 
15788eddac3fSPeter Zijlstra }
15798eddac3fSPeter Zijlstra 
1580*b11be024SBoqun Feng static inline enum bfs_result
1581*b11be024SBoqun Feng __bfs_backwards(struct lock_list *src_entry,
15828eddac3fSPeter Zijlstra 		void *data,
15838eddac3fSPeter Zijlstra 		int (*match)(struct lock_list *entry, void *data),
15848eddac3fSPeter Zijlstra 		struct lock_list **target_entry)
15858eddac3fSPeter Zijlstra {
158677a80692SYuyang Du 	return __bfs(src_entry, data, match, target_entry,
158777a80692SYuyang Du 		     offsetof(struct lock_class, locks_before));
15888eddac3fSPeter Zijlstra 
15898eddac3fSPeter Zijlstra }
15908eddac3fSPeter Zijlstra 
159112593b74SBart Van Assche static void print_lock_trace(const struct lock_trace *trace,
159212593b74SBart Van Assche 			     unsigned int spaces)
1593c120bce7SThomas Gleixner {
159412593b74SBart Van Assche 	stack_trace_print(trace->entries, trace->nr_entries, spaces);
1595c120bce7SThomas Gleixner }
1596c120bce7SThomas Gleixner 
15978eddac3fSPeter Zijlstra /*
15988eddac3fSPeter Zijlstra  * Print a dependency chain entry (this is only done when a deadlock
15998eddac3fSPeter Zijlstra  * has been detected):
16008eddac3fSPeter Zijlstra  */
1601f7c1c6b3SYuyang Du static noinline void
16028eddac3fSPeter Zijlstra print_circular_bug_entry(struct lock_list *target, int depth)
16038eddac3fSPeter Zijlstra {
16048eddac3fSPeter Zijlstra 	if (debug_locks_silent)
1605f7c1c6b3SYuyang Du 		return;
16068eddac3fSPeter Zijlstra 	printk("\n-> #%u", depth);
16078eddac3fSPeter Zijlstra 	print_lock_name(target->class);
1608f943fe0fSDmitry Vyukov 	printk(KERN_CONT ":\n");
160912593b74SBart Van Assche 	print_lock_trace(target->trace, 6);
16108eddac3fSPeter Zijlstra }
16118eddac3fSPeter Zijlstra 
16128eddac3fSPeter Zijlstra static void
16138eddac3fSPeter Zijlstra print_circular_lock_scenario(struct held_lock *src,
16148eddac3fSPeter Zijlstra 			     struct held_lock *tgt,
16158eddac3fSPeter Zijlstra 			     struct lock_list *prt)
16168eddac3fSPeter Zijlstra {
16178eddac3fSPeter Zijlstra 	struct lock_class *source = hlock_class(src);
16188eddac3fSPeter Zijlstra 	struct lock_class *target = hlock_class(tgt);
16198eddac3fSPeter Zijlstra 	struct lock_class *parent = prt->class;
16208eddac3fSPeter Zijlstra 
16218eddac3fSPeter Zijlstra 	/*
16228eddac3fSPeter Zijlstra 	 * A direct locking problem where unsafe_class lock is taken
16238eddac3fSPeter Zijlstra 	 * directly by safe_class lock, then all we need to show
16248eddac3fSPeter Zijlstra 	 * is the deadlock scenario, as it is obvious that the
16258eddac3fSPeter Zijlstra 	 * unsafe lock is taken under the safe lock.
16268eddac3fSPeter Zijlstra 	 *
16278eddac3fSPeter Zijlstra 	 * But if there is a chain instead, where the safe lock takes
16288eddac3fSPeter Zijlstra 	 * an intermediate lock (middle_class) where this lock is
16298eddac3fSPeter Zijlstra 	 * not the same as the safe lock, then the lock chain is
16308eddac3fSPeter Zijlstra 	 * used to describe the problem. Otherwise we would need
16318eddac3fSPeter Zijlstra 	 * to show a different CPU case for each link in the chain
16328eddac3fSPeter Zijlstra 	 * from the safe_class lock to the unsafe_class lock.
16338eddac3fSPeter Zijlstra 	 */
16348eddac3fSPeter Zijlstra 	if (parent != source) {
16358eddac3fSPeter Zijlstra 		printk("Chain exists of:\n  ");
16368eddac3fSPeter Zijlstra 		__print_lock_name(source);
1637f943fe0fSDmitry Vyukov 		printk(KERN_CONT " --> ");
16388eddac3fSPeter Zijlstra 		__print_lock_name(parent);
1639f943fe0fSDmitry Vyukov 		printk(KERN_CONT " --> ");
16408eddac3fSPeter Zijlstra 		__print_lock_name(target);
1641f943fe0fSDmitry Vyukov 		printk(KERN_CONT "\n\n");
16428eddac3fSPeter Zijlstra 	}
16438eddac3fSPeter Zijlstra 
16448eddac3fSPeter Zijlstra 	printk(" Possible unsafe locking scenario:\n\n");
16458eddac3fSPeter Zijlstra 	printk("       CPU0                    CPU1\n");
16468eddac3fSPeter Zijlstra 	printk("       ----                    ----\n");
16478eddac3fSPeter Zijlstra 	printk("  lock(");
16488eddac3fSPeter Zijlstra 	__print_lock_name(target);
1649f943fe0fSDmitry Vyukov 	printk(KERN_CONT ");\n");
16508eddac3fSPeter Zijlstra 	printk("                               lock(");
16518eddac3fSPeter Zijlstra 	__print_lock_name(parent);
1652f943fe0fSDmitry Vyukov 	printk(KERN_CONT ");\n");
16538eddac3fSPeter Zijlstra 	printk("                               lock(");
16548eddac3fSPeter Zijlstra 	__print_lock_name(target);
1655f943fe0fSDmitry Vyukov 	printk(KERN_CONT ");\n");
16568eddac3fSPeter Zijlstra 	printk("  lock(");
16578eddac3fSPeter Zijlstra 	__print_lock_name(source);
1658f943fe0fSDmitry Vyukov 	printk(KERN_CONT ");\n");
16598eddac3fSPeter Zijlstra 	printk("\n *** DEADLOCK ***\n\n");
16608eddac3fSPeter Zijlstra }
16618eddac3fSPeter Zijlstra 
16628eddac3fSPeter Zijlstra /*
16638eddac3fSPeter Zijlstra  * When a circular dependency is detected, print the
16648eddac3fSPeter Zijlstra  * header first:
16658eddac3fSPeter Zijlstra  */
1666f7c1c6b3SYuyang Du static noinline void
16678eddac3fSPeter Zijlstra print_circular_bug_header(struct lock_list *entry, unsigned int depth,
16688eddac3fSPeter Zijlstra 			struct held_lock *check_src,
16698eddac3fSPeter Zijlstra 			struct held_lock *check_tgt)
16708eddac3fSPeter Zijlstra {
16718eddac3fSPeter Zijlstra 	struct task_struct *curr = current;
16728eddac3fSPeter Zijlstra 
16738eddac3fSPeter Zijlstra 	if (debug_locks_silent)
1674f7c1c6b3SYuyang Du 		return;
16758eddac3fSPeter Zijlstra 
1676681fbec8SPaul E. McKenney 	pr_warn("\n");
1677a5dd63efSPaul E. McKenney 	pr_warn("======================================================\n");
1678a5dd63efSPaul E. McKenney 	pr_warn("WARNING: possible circular locking dependency detected\n");
16798eddac3fSPeter Zijlstra 	print_kernel_ident();
1680a5dd63efSPaul E. McKenney 	pr_warn("------------------------------------------------------\n");
1681681fbec8SPaul E. McKenney 	pr_warn("%s/%d is trying to acquire lock:\n",
16828eddac3fSPeter Zijlstra 		curr->comm, task_pid_nr(curr));
16838eddac3fSPeter Zijlstra 	print_lock(check_src);
1684383a4bc8SByungchul Park 
1685681fbec8SPaul E. McKenney 	pr_warn("\nbut task is already holding lock:\n");
1686383a4bc8SByungchul Park 
16878eddac3fSPeter Zijlstra 	print_lock(check_tgt);
1688681fbec8SPaul E. McKenney 	pr_warn("\nwhich lock already depends on the new lock.\n\n");
1689681fbec8SPaul E. McKenney 	pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
16908eddac3fSPeter Zijlstra 
16918eddac3fSPeter Zijlstra 	print_circular_bug_entry(entry, depth);
16928eddac3fSPeter Zijlstra }
16938eddac3fSPeter Zijlstra 
16948eddac3fSPeter Zijlstra static inline int class_equal(struct lock_list *entry, void *data)
16958eddac3fSPeter Zijlstra {
16968eddac3fSPeter Zijlstra 	return entry->class == data;
16978eddac3fSPeter Zijlstra }
16988eddac3fSPeter Zijlstra 
1699f7c1c6b3SYuyang Du static noinline void print_circular_bug(struct lock_list *this,
17008eddac3fSPeter Zijlstra 					struct lock_list *target,
17018eddac3fSPeter Zijlstra 					struct held_lock *check_src,
1702b1abe462SThomas Gleixner 					struct held_lock *check_tgt)
17038eddac3fSPeter Zijlstra {
17048eddac3fSPeter Zijlstra 	struct task_struct *curr = current;
17058eddac3fSPeter Zijlstra 	struct lock_list *parent;
17068eddac3fSPeter Zijlstra 	struct lock_list *first_parent;
17078eddac3fSPeter Zijlstra 	int depth;
17088eddac3fSPeter Zijlstra 
17098eddac3fSPeter Zijlstra 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1710f7c1c6b3SYuyang Du 		return;
17118eddac3fSPeter Zijlstra 
171212593b74SBart Van Assche 	this->trace = save_trace();
171312593b74SBart Van Assche 	if (!this->trace)
1714f7c1c6b3SYuyang Du 		return;
17158eddac3fSPeter Zijlstra 
17168eddac3fSPeter Zijlstra 	depth = get_lock_depth(target);
17178eddac3fSPeter Zijlstra 
17188eddac3fSPeter Zijlstra 	print_circular_bug_header(target, depth, check_src, check_tgt);
17198eddac3fSPeter Zijlstra 
17208eddac3fSPeter Zijlstra 	parent = get_lock_parent(target);
17218eddac3fSPeter Zijlstra 	first_parent = parent;
17228eddac3fSPeter Zijlstra 
17238eddac3fSPeter Zijlstra 	while (parent) {
17248eddac3fSPeter Zijlstra 		print_circular_bug_entry(parent, --depth);
17258eddac3fSPeter Zijlstra 		parent = get_lock_parent(parent);
17268eddac3fSPeter Zijlstra 	}
17278eddac3fSPeter Zijlstra 
17288eddac3fSPeter Zijlstra 	printk("\nother info that might help us debug this:\n\n");
17298eddac3fSPeter Zijlstra 	print_circular_lock_scenario(check_src, check_tgt,
17308eddac3fSPeter Zijlstra 				     first_parent);
17318eddac3fSPeter Zijlstra 
17328eddac3fSPeter Zijlstra 	lockdep_print_held_locks(curr);
17338eddac3fSPeter Zijlstra 
17348eddac3fSPeter Zijlstra 	printk("\nstack backtrace:\n");
17358eddac3fSPeter Zijlstra 	dump_stack();
17368eddac3fSPeter Zijlstra }
17378eddac3fSPeter Zijlstra 
1738f7c1c6b3SYuyang Du static noinline void print_bfs_bug(int ret)
17398eddac3fSPeter Zijlstra {
17408eddac3fSPeter Zijlstra 	if (!debug_locks_off_graph_unlock())
1741f7c1c6b3SYuyang Du 		return;
17428eddac3fSPeter Zijlstra 
17438eddac3fSPeter Zijlstra 	/*
17448eddac3fSPeter Zijlstra 	 * Breadth-first-search failed, graph got corrupted?
17458eddac3fSPeter Zijlstra 	 */
17468eddac3fSPeter Zijlstra 	WARN(1, "lockdep bfs error:%d\n", ret);
17478eddac3fSPeter Zijlstra }
17488eddac3fSPeter Zijlstra 
17498eddac3fSPeter Zijlstra static int noop_count(struct lock_list *entry, void *data)
17508eddac3fSPeter Zijlstra {
17518eddac3fSPeter Zijlstra 	(*(unsigned long *)data)++;
17528eddac3fSPeter Zijlstra 	return 0;
17538eddac3fSPeter Zijlstra }
17548eddac3fSPeter Zijlstra 
17555216d530SFengguang Wu static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
17568eddac3fSPeter Zijlstra {
17578eddac3fSPeter Zijlstra 	unsigned long  count = 0;
17583f649ab7SKees Cook 	struct lock_list *target_entry;
17598eddac3fSPeter Zijlstra 
17608eddac3fSPeter Zijlstra 	__bfs_forwards(this, (void *)&count, noop_count, &target_entry);
17618eddac3fSPeter Zijlstra 
17628eddac3fSPeter Zijlstra 	return count;
17638eddac3fSPeter Zijlstra }
17648eddac3fSPeter Zijlstra unsigned long lockdep_count_forward_deps(struct lock_class *class)
17658eddac3fSPeter Zijlstra {
17668eddac3fSPeter Zijlstra 	unsigned long ret, flags;
17678eddac3fSPeter Zijlstra 	struct lock_list this;
17688eddac3fSPeter Zijlstra 
17698eddac3fSPeter Zijlstra 	this.parent = NULL;
17708eddac3fSPeter Zijlstra 	this.class = class;
17718eddac3fSPeter Zijlstra 
1772fcc784beSSteven Rostedt (VMware) 	raw_local_irq_save(flags);
1773248efb21SPeter Zijlstra 	lockdep_lock();
17748eddac3fSPeter Zijlstra 	ret = __lockdep_count_forward_deps(&this);
1775248efb21SPeter Zijlstra 	lockdep_unlock();
1776fcc784beSSteven Rostedt (VMware) 	raw_local_irq_restore(flags);
17778eddac3fSPeter Zijlstra 
17788eddac3fSPeter Zijlstra 	return ret;
17798eddac3fSPeter Zijlstra }
17808eddac3fSPeter Zijlstra 
17815216d530SFengguang Wu static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
17828eddac3fSPeter Zijlstra {
17838eddac3fSPeter Zijlstra 	unsigned long  count = 0;
17843f649ab7SKees Cook 	struct lock_list *target_entry;
17858eddac3fSPeter Zijlstra 
17868eddac3fSPeter Zijlstra 	__bfs_backwards(this, (void *)&count, noop_count, &target_entry);
17878eddac3fSPeter Zijlstra 
17888eddac3fSPeter Zijlstra 	return count;
17898eddac3fSPeter Zijlstra }
17908eddac3fSPeter Zijlstra 
17918eddac3fSPeter Zijlstra unsigned long lockdep_count_backward_deps(struct lock_class *class)
17928eddac3fSPeter Zijlstra {
17938eddac3fSPeter Zijlstra 	unsigned long ret, flags;
17948eddac3fSPeter Zijlstra 	struct lock_list this;
17958eddac3fSPeter Zijlstra 
17968eddac3fSPeter Zijlstra 	this.parent = NULL;
17978eddac3fSPeter Zijlstra 	this.class = class;
17988eddac3fSPeter Zijlstra 
1799fcc784beSSteven Rostedt (VMware) 	raw_local_irq_save(flags);
1800248efb21SPeter Zijlstra 	lockdep_lock();
18018eddac3fSPeter Zijlstra 	ret = __lockdep_count_backward_deps(&this);
1802248efb21SPeter Zijlstra 	lockdep_unlock();
1803fcc784beSSteven Rostedt (VMware) 	raw_local_irq_restore(flags);
18048eddac3fSPeter Zijlstra 
18058eddac3fSPeter Zijlstra 	return ret;
18068eddac3fSPeter Zijlstra }
18078eddac3fSPeter Zijlstra 
18088eddac3fSPeter Zijlstra /*
18098c2c2b44SYuyang Du  * Check that the dependency graph starting at <src> can lead to
1810*b11be024SBoqun Feng  * <target> or not.
18118eddac3fSPeter Zijlstra  */
1812*b11be024SBoqun Feng static noinline enum bfs_result
18138c2c2b44SYuyang Du check_path(struct lock_class *target, struct lock_list *src_entry,
18148eddac3fSPeter Zijlstra 	   struct lock_list **target_entry)
18158eddac3fSPeter Zijlstra {
1816*b11be024SBoqun Feng 	enum bfs_result ret;
18178c2c2b44SYuyang Du 
18188c2c2b44SYuyang Du 	ret = __bfs_forwards(src_entry, (void *)target, class_equal,
18198c2c2b44SYuyang Du 			     target_entry);
18208c2c2b44SYuyang Du 
1821*b11be024SBoqun Feng 	if (unlikely(bfs_error(ret)))
18228c2c2b44SYuyang Du 		print_bfs_bug(ret);
18238c2c2b44SYuyang Du 
18248c2c2b44SYuyang Du 	return ret;
18258c2c2b44SYuyang Du }
18268c2c2b44SYuyang Du 
18278c2c2b44SYuyang Du /*
18288c2c2b44SYuyang Du  * Prove that the dependency graph starting at <src> can not
18298c2c2b44SYuyang Du  * lead to <target>. If it can, there is a circle when adding
18308c2c2b44SYuyang Du  * <target> -> <src> dependency.
18318c2c2b44SYuyang Du  *
1832*b11be024SBoqun Feng  * Print an error and return BFS_RMATCH if it does.
18338c2c2b44SYuyang Du  */
1834*b11be024SBoqun Feng static noinline enum bfs_result
18358c2c2b44SYuyang Du check_noncircular(struct held_lock *src, struct held_lock *target,
183612593b74SBart Van Assche 		  struct lock_trace **const trace)
18378c2c2b44SYuyang Du {
1838*b11be024SBoqun Feng 	enum bfs_result ret;
18393f649ab7SKees Cook 	struct lock_list *target_entry;
18408c2c2b44SYuyang Du 	struct lock_list src_entry = {
18418c2c2b44SYuyang Du 		.class = hlock_class(src),
18428c2c2b44SYuyang Du 		.parent = NULL,
18438c2c2b44SYuyang Du 	};
18448eddac3fSPeter Zijlstra 
18458eddac3fSPeter Zijlstra 	debug_atomic_inc(nr_cyclic_checks);
18468eddac3fSPeter Zijlstra 
18478c2c2b44SYuyang Du 	ret = check_path(hlock_class(target), &src_entry, &target_entry);
18488eddac3fSPeter Zijlstra 
1849*b11be024SBoqun Feng 	if (unlikely(ret == BFS_RMATCH)) {
185012593b74SBart Van Assche 		if (!*trace) {
18518c2c2b44SYuyang Du 			/*
18528c2c2b44SYuyang Du 			 * If save_trace fails here, the printing might
18538c2c2b44SYuyang Du 			 * trigger a WARN but because of the !nr_entries it
18548c2c2b44SYuyang Du 			 * should not do bad things.
18558c2c2b44SYuyang Du 			 */
185612593b74SBart Van Assche 			*trace = save_trace();
18578eddac3fSPeter Zijlstra 		}
18588eddac3fSPeter Zijlstra 
18598c2c2b44SYuyang Du 		print_circular_bug(&src_entry, target_entry, src, target);
18608c2c2b44SYuyang Du 	}
18618c2c2b44SYuyang Du 
18628c2c2b44SYuyang Du 	return ret;
18638c2c2b44SYuyang Du }
18648c2c2b44SYuyang Du 
186568e9dc29SYuyang Du #ifdef CONFIG_LOCKDEP_SMALL
18668c2c2b44SYuyang Du /*
18678c2c2b44SYuyang Du  * Check that the dependency graph starting at <src> can lead to
18688c2c2b44SYuyang Du  * <target> or not. If it can, <src> -> <target> dependency is already
18698c2c2b44SYuyang Du  * in the graph.
18708c2c2b44SYuyang Du  *
1871*b11be024SBoqun Feng  * Return BFS_RMATCH if it does, or BFS_RMATCH if it does not, return BFS_E* if
1872*b11be024SBoqun Feng  * any error appears in the bfs search.
18738c2c2b44SYuyang Du  */
1874*b11be024SBoqun Feng static noinline enum bfs_result
18758c2c2b44SYuyang Du check_redundant(struct held_lock *src, struct held_lock *target)
1876ae813308SPeter Zijlstra {
1877*b11be024SBoqun Feng 	enum bfs_result ret;
18783f649ab7SKees Cook 	struct lock_list *target_entry;
18798c2c2b44SYuyang Du 	struct lock_list src_entry = {
18808c2c2b44SYuyang Du 		.class = hlock_class(src),
18818c2c2b44SYuyang Du 		.parent = NULL,
18828c2c2b44SYuyang Du 	};
1883ae813308SPeter Zijlstra 
1884ae813308SPeter Zijlstra 	debug_atomic_inc(nr_redundant_checks);
1885ae813308SPeter Zijlstra 
18868c2c2b44SYuyang Du 	ret = check_path(hlock_class(target), &src_entry, &target_entry);
1887ae813308SPeter Zijlstra 
1888*b11be024SBoqun Feng 	if (ret == BFS_RMATCH)
18898c2c2b44SYuyang Du 		debug_atomic_inc(nr_redundant);
18908c2c2b44SYuyang Du 
18918c2c2b44SYuyang Du 	return ret;
1892ae813308SPeter Zijlstra }
189368e9dc29SYuyang Du #endif
1894ae813308SPeter Zijlstra 
1895e7a38f63SYuyang Du #ifdef CONFIG_TRACE_IRQFLAGS
1896948f8376SFrederic Weisbecker 
1897948f8376SFrederic Weisbecker static inline int usage_accumulate(struct lock_list *entry, void *mask)
1898948f8376SFrederic Weisbecker {
1899948f8376SFrederic Weisbecker 	*(unsigned long *)mask |= entry->class->usage_mask;
1900948f8376SFrederic Weisbecker 
1901948f8376SFrederic Weisbecker 	return 0;
1902948f8376SFrederic Weisbecker }
1903948f8376SFrederic Weisbecker 
19048eddac3fSPeter Zijlstra /*
19058eddac3fSPeter Zijlstra  * Forwards and backwards subgraph searching, for the purposes of
19068eddac3fSPeter Zijlstra  * proving that two subgraphs can be connected by a new dependency
19078eddac3fSPeter Zijlstra  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
19088eddac3fSPeter Zijlstra  */
19098eddac3fSPeter Zijlstra 
1910627f364dSFrederic Weisbecker static inline int usage_match(struct lock_list *entry, void *mask)
19118eddac3fSPeter Zijlstra {
1912627f364dSFrederic Weisbecker 	return entry->class->usage_mask & *(unsigned long *)mask;
19138eddac3fSPeter Zijlstra }
19148eddac3fSPeter Zijlstra 
19158eddac3fSPeter Zijlstra /*
19168eddac3fSPeter Zijlstra  * Find a node in the forwards-direction dependency sub-graph starting
19178eddac3fSPeter Zijlstra  * at @root->class that matches @bit.
19188eddac3fSPeter Zijlstra  *
1919*b11be024SBoqun Feng  * Return BFS_MATCH if such a node exists in the subgraph, and put that node
19208eddac3fSPeter Zijlstra  * into *@target_entry.
19218eddac3fSPeter Zijlstra  */
1922*b11be024SBoqun Feng static enum bfs_result
1923627f364dSFrederic Weisbecker find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
19248eddac3fSPeter Zijlstra 			struct lock_list **target_entry)
19258eddac3fSPeter Zijlstra {
1926*b11be024SBoqun Feng 	enum bfs_result result;
19278eddac3fSPeter Zijlstra 
19288eddac3fSPeter Zijlstra 	debug_atomic_inc(nr_find_usage_forwards_checks);
19298eddac3fSPeter Zijlstra 
1930627f364dSFrederic Weisbecker 	result = __bfs_forwards(root, &usage_mask, usage_match, target_entry);
19318eddac3fSPeter Zijlstra 
19328eddac3fSPeter Zijlstra 	return result;
19338eddac3fSPeter Zijlstra }
19348eddac3fSPeter Zijlstra 
19358eddac3fSPeter Zijlstra /*
19368eddac3fSPeter Zijlstra  * Find a node in the backwards-direction dependency sub-graph starting
19378eddac3fSPeter Zijlstra  * at @root->class that matches @bit.
19388eddac3fSPeter Zijlstra  */
1939*b11be024SBoqun Feng static enum bfs_result
1940627f364dSFrederic Weisbecker find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
19418eddac3fSPeter Zijlstra 			struct lock_list **target_entry)
19428eddac3fSPeter Zijlstra {
1943*b11be024SBoqun Feng 	enum bfs_result result;
19448eddac3fSPeter Zijlstra 
19458eddac3fSPeter Zijlstra 	debug_atomic_inc(nr_find_usage_backwards_checks);
19468eddac3fSPeter Zijlstra 
1947627f364dSFrederic Weisbecker 	result = __bfs_backwards(root, &usage_mask, usage_match, target_entry);
19488eddac3fSPeter Zijlstra 
19498eddac3fSPeter Zijlstra 	return result;
19508eddac3fSPeter Zijlstra }
19518eddac3fSPeter Zijlstra 
19528eddac3fSPeter Zijlstra static void print_lock_class_header(struct lock_class *class, int depth)
19538eddac3fSPeter Zijlstra {
19548eddac3fSPeter Zijlstra 	int bit;
19558eddac3fSPeter Zijlstra 
19568eddac3fSPeter Zijlstra 	printk("%*s->", depth, "");
19578eddac3fSPeter Zijlstra 	print_lock_name(class);
19588ca2b56cSWaiman Long #ifdef CONFIG_DEBUG_LOCKDEP
19598ca2b56cSWaiman Long 	printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
19608ca2b56cSWaiman Long #endif
1961f943fe0fSDmitry Vyukov 	printk(KERN_CONT " {\n");
19628eddac3fSPeter Zijlstra 
19638eddac3fSPeter Zijlstra 	for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
19648eddac3fSPeter Zijlstra 		if (class->usage_mask & (1 << bit)) {
19658eddac3fSPeter Zijlstra 			int len = depth;
19668eddac3fSPeter Zijlstra 
19678eddac3fSPeter Zijlstra 			len += printk("%*s   %s", depth, "", usage_str[bit]);
1968f943fe0fSDmitry Vyukov 			len += printk(KERN_CONT " at:\n");
196912593b74SBart Van Assche 			print_lock_trace(class->usage_traces[bit], len);
19708eddac3fSPeter Zijlstra 		}
19718eddac3fSPeter Zijlstra 	}
19728eddac3fSPeter Zijlstra 	printk("%*s }\n", depth, "");
19738eddac3fSPeter Zijlstra 
197404860d48SBorislav Petkov 	printk("%*s ... key      at: [<%px>] %pS\n",
1975f943fe0fSDmitry Vyukov 		depth, "", class->key, class->key);
19768eddac3fSPeter Zijlstra }
19778eddac3fSPeter Zijlstra 
19788eddac3fSPeter Zijlstra /*
19798eddac3fSPeter Zijlstra  * printk the shortest lock dependencies from @start to @end in reverse order:
19808eddac3fSPeter Zijlstra  */
19818eddac3fSPeter Zijlstra static void __used
19828eddac3fSPeter Zijlstra print_shortest_lock_dependencies(struct lock_list *leaf,
19838eddac3fSPeter Zijlstra 				 struct lock_list *root)
19848eddac3fSPeter Zijlstra {
19858eddac3fSPeter Zijlstra 	struct lock_list *entry = leaf;
19868eddac3fSPeter Zijlstra 	int depth;
19878eddac3fSPeter Zijlstra 
19888eddac3fSPeter Zijlstra 	/*compute depth from generated tree by BFS*/
19898eddac3fSPeter Zijlstra 	depth = get_lock_depth(leaf);
19908eddac3fSPeter Zijlstra 
19918eddac3fSPeter Zijlstra 	do {
19928eddac3fSPeter Zijlstra 		print_lock_class_header(entry->class, depth);
19938eddac3fSPeter Zijlstra 		printk("%*s ... acquired at:\n", depth, "");
199412593b74SBart Van Assche 		print_lock_trace(entry->trace, 2);
19958eddac3fSPeter Zijlstra 		printk("\n");
19968eddac3fSPeter Zijlstra 
19978eddac3fSPeter Zijlstra 		if (depth == 0 && (entry != root)) {
19988eddac3fSPeter Zijlstra 			printk("lockdep:%s bad path found in chain graph\n", __func__);
19998eddac3fSPeter Zijlstra 			break;
20008eddac3fSPeter Zijlstra 		}
20018eddac3fSPeter Zijlstra 
20028eddac3fSPeter Zijlstra 		entry = get_lock_parent(entry);
20038eddac3fSPeter Zijlstra 		depth--;
20048eddac3fSPeter Zijlstra 	} while (entry && (depth >= 0));
20058eddac3fSPeter Zijlstra }
20068eddac3fSPeter Zijlstra 
20078eddac3fSPeter Zijlstra static void
20088eddac3fSPeter Zijlstra print_irq_lock_scenario(struct lock_list *safe_entry,
20098eddac3fSPeter Zijlstra 			struct lock_list *unsafe_entry,
20108eddac3fSPeter Zijlstra 			struct lock_class *prev_class,
20118eddac3fSPeter Zijlstra 			struct lock_class *next_class)
20128eddac3fSPeter Zijlstra {
20138eddac3fSPeter Zijlstra 	struct lock_class *safe_class = safe_entry->class;
20148eddac3fSPeter Zijlstra 	struct lock_class *unsafe_class = unsafe_entry->class;
20158eddac3fSPeter Zijlstra 	struct lock_class *middle_class = prev_class;
20168eddac3fSPeter Zijlstra 
20178eddac3fSPeter Zijlstra 	if (middle_class == safe_class)
20188eddac3fSPeter Zijlstra 		middle_class = next_class;
20198eddac3fSPeter Zijlstra 
20208eddac3fSPeter Zijlstra 	/*
20218eddac3fSPeter Zijlstra 	 * A direct locking problem where unsafe_class lock is taken
20228eddac3fSPeter Zijlstra 	 * directly by safe_class lock, then all we need to show
20238eddac3fSPeter Zijlstra 	 * is the deadlock scenario, as it is obvious that the
20248eddac3fSPeter Zijlstra 	 * unsafe lock is taken under the safe lock.
20258eddac3fSPeter Zijlstra 	 *
20268eddac3fSPeter Zijlstra 	 * But if there is a chain instead, where the safe lock takes
20278eddac3fSPeter Zijlstra 	 * an intermediate lock (middle_class) where this lock is
20288eddac3fSPeter Zijlstra 	 * not the same as the safe lock, then the lock chain is
20298eddac3fSPeter Zijlstra 	 * used to describe the problem. Otherwise we would need
20308eddac3fSPeter Zijlstra 	 * to show a different CPU case for each link in the chain
20318eddac3fSPeter Zijlstra 	 * from the safe_class lock to the unsafe_class lock.
20328eddac3fSPeter Zijlstra 	 */
20338eddac3fSPeter Zijlstra 	if (middle_class != unsafe_class) {
20348eddac3fSPeter Zijlstra 		printk("Chain exists of:\n  ");
20358eddac3fSPeter Zijlstra 		__print_lock_name(safe_class);
2036f943fe0fSDmitry Vyukov 		printk(KERN_CONT " --> ");
20378eddac3fSPeter Zijlstra 		__print_lock_name(middle_class);
2038f943fe0fSDmitry Vyukov 		printk(KERN_CONT " --> ");
20398eddac3fSPeter Zijlstra 		__print_lock_name(unsafe_class);
2040f943fe0fSDmitry Vyukov 		printk(KERN_CONT "\n\n");
20418eddac3fSPeter Zijlstra 	}
20428eddac3fSPeter Zijlstra 
20438eddac3fSPeter Zijlstra 	printk(" Possible interrupt unsafe locking scenario:\n\n");
20448eddac3fSPeter Zijlstra 	printk("       CPU0                    CPU1\n");
20458eddac3fSPeter Zijlstra 	printk("       ----                    ----\n");
20468eddac3fSPeter Zijlstra 	printk("  lock(");
20478eddac3fSPeter Zijlstra 	__print_lock_name(unsafe_class);
2048f943fe0fSDmitry Vyukov 	printk(KERN_CONT ");\n");
20498eddac3fSPeter Zijlstra 	printk("                               local_irq_disable();\n");
20508eddac3fSPeter Zijlstra 	printk("                               lock(");
20518eddac3fSPeter Zijlstra 	__print_lock_name(safe_class);
2052f943fe0fSDmitry Vyukov 	printk(KERN_CONT ");\n");
20538eddac3fSPeter Zijlstra 	printk("                               lock(");
20548eddac3fSPeter Zijlstra 	__print_lock_name(middle_class);
2055f943fe0fSDmitry Vyukov 	printk(KERN_CONT ");\n");
20568eddac3fSPeter Zijlstra 	printk("  <Interrupt>\n");
20578eddac3fSPeter Zijlstra 	printk("    lock(");
20588eddac3fSPeter Zijlstra 	__print_lock_name(safe_class);
2059f943fe0fSDmitry Vyukov 	printk(KERN_CONT ");\n");
20608eddac3fSPeter Zijlstra 	printk("\n *** DEADLOCK ***\n\n");
20618eddac3fSPeter Zijlstra }
20628eddac3fSPeter Zijlstra 
2063f7c1c6b3SYuyang Du static void
20648eddac3fSPeter Zijlstra print_bad_irq_dependency(struct task_struct *curr,
20658eddac3fSPeter Zijlstra 			 struct lock_list *prev_root,
20668eddac3fSPeter Zijlstra 			 struct lock_list *next_root,
20678eddac3fSPeter Zijlstra 			 struct lock_list *backwards_entry,
20688eddac3fSPeter Zijlstra 			 struct lock_list *forwards_entry,
20698eddac3fSPeter Zijlstra 			 struct held_lock *prev,
20708eddac3fSPeter Zijlstra 			 struct held_lock *next,
20718eddac3fSPeter Zijlstra 			 enum lock_usage_bit bit1,
20728eddac3fSPeter Zijlstra 			 enum lock_usage_bit bit2,
20738eddac3fSPeter Zijlstra 			 const char *irqclass)
20748eddac3fSPeter Zijlstra {
20758eddac3fSPeter Zijlstra 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2076f7c1c6b3SYuyang Du 		return;
20778eddac3fSPeter Zijlstra 
2078681fbec8SPaul E. McKenney 	pr_warn("\n");
2079a5dd63efSPaul E. McKenney 	pr_warn("=====================================================\n");
2080a5dd63efSPaul E. McKenney 	pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
20818eddac3fSPeter Zijlstra 		irqclass, irqclass);
20828eddac3fSPeter Zijlstra 	print_kernel_ident();
2083a5dd63efSPaul E. McKenney 	pr_warn("-----------------------------------------------------\n");
2084681fbec8SPaul E. McKenney 	pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
20858eddac3fSPeter Zijlstra 		curr->comm, task_pid_nr(curr),
2086f9ad4a5fSPeter Zijlstra 		lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
20878eddac3fSPeter Zijlstra 		curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
2088f9ad4a5fSPeter Zijlstra 		lockdep_hardirqs_enabled(),
20898eddac3fSPeter Zijlstra 		curr->softirqs_enabled);
20908eddac3fSPeter Zijlstra 	print_lock(next);
20918eddac3fSPeter Zijlstra 
2092681fbec8SPaul E. McKenney 	pr_warn("\nand this task is already holding:\n");
20938eddac3fSPeter Zijlstra 	print_lock(prev);
2094681fbec8SPaul E. McKenney 	pr_warn("which would create a new lock dependency:\n");
20958eddac3fSPeter Zijlstra 	print_lock_name(hlock_class(prev));
2096681fbec8SPaul E. McKenney 	pr_cont(" ->");
20978eddac3fSPeter Zijlstra 	print_lock_name(hlock_class(next));
2098681fbec8SPaul E. McKenney 	pr_cont("\n");
20998eddac3fSPeter Zijlstra 
2100681fbec8SPaul E. McKenney 	pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
21018eddac3fSPeter Zijlstra 		irqclass);
21028eddac3fSPeter Zijlstra 	print_lock_name(backwards_entry->class);
2103681fbec8SPaul E. McKenney 	pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
21048eddac3fSPeter Zijlstra 
210512593b74SBart Van Assche 	print_lock_trace(backwards_entry->class->usage_traces[bit1], 1);
21068eddac3fSPeter Zijlstra 
2107681fbec8SPaul E. McKenney 	pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
21088eddac3fSPeter Zijlstra 	print_lock_name(forwards_entry->class);
2109681fbec8SPaul E. McKenney 	pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
2110681fbec8SPaul E. McKenney 	pr_warn("...");
21118eddac3fSPeter Zijlstra 
211212593b74SBart Van Assche 	print_lock_trace(forwards_entry->class->usage_traces[bit2], 1);
21138eddac3fSPeter Zijlstra 
2114681fbec8SPaul E. McKenney 	pr_warn("\nother info that might help us debug this:\n\n");
21158eddac3fSPeter Zijlstra 	print_irq_lock_scenario(backwards_entry, forwards_entry,
21168eddac3fSPeter Zijlstra 				hlock_class(prev), hlock_class(next));
21178eddac3fSPeter Zijlstra 
21188eddac3fSPeter Zijlstra 	lockdep_print_held_locks(curr);
21198eddac3fSPeter Zijlstra 
2120681fbec8SPaul E. McKenney 	pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
212112593b74SBart Van Assche 	prev_root->trace = save_trace();
212212593b74SBart Van Assche 	if (!prev_root->trace)
2123f7c1c6b3SYuyang Du 		return;
21248eddac3fSPeter Zijlstra 	print_shortest_lock_dependencies(backwards_entry, prev_root);
21258eddac3fSPeter Zijlstra 
2126681fbec8SPaul E. McKenney 	pr_warn("\nthe dependencies between the lock to be acquired");
2127681fbec8SPaul E. McKenney 	pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
212812593b74SBart Van Assche 	next_root->trace = save_trace();
212912593b74SBart Van Assche 	if (!next_root->trace)
2130f7c1c6b3SYuyang Du 		return;
21318eddac3fSPeter Zijlstra 	print_shortest_lock_dependencies(forwards_entry, next_root);
21328eddac3fSPeter Zijlstra 
2133681fbec8SPaul E. McKenney 	pr_warn("\nstack backtrace:\n");
21348eddac3fSPeter Zijlstra 	dump_stack();
21358eddac3fSPeter Zijlstra }
21368eddac3fSPeter Zijlstra 
21378eddac3fSPeter Zijlstra static const char *state_names[] = {
21388eddac3fSPeter Zijlstra #define LOCKDEP_STATE(__STATE) \
21398eddac3fSPeter Zijlstra 	__stringify(__STATE),
21408eddac3fSPeter Zijlstra #include "lockdep_states.h"
21418eddac3fSPeter Zijlstra #undef LOCKDEP_STATE
21428eddac3fSPeter Zijlstra };
21438eddac3fSPeter Zijlstra 
21448eddac3fSPeter Zijlstra static const char *state_rnames[] = {
21458eddac3fSPeter Zijlstra #define LOCKDEP_STATE(__STATE) \
21468eddac3fSPeter Zijlstra 	__stringify(__STATE)"-READ",
21478eddac3fSPeter Zijlstra #include "lockdep_states.h"
21488eddac3fSPeter Zijlstra #undef LOCKDEP_STATE
21498eddac3fSPeter Zijlstra };
21508eddac3fSPeter Zijlstra 
21518eddac3fSPeter Zijlstra static inline const char *state_name(enum lock_usage_bit bit)
21528eddac3fSPeter Zijlstra {
2153c902a1e8SFrederic Weisbecker 	if (bit & LOCK_USAGE_READ_MASK)
2154c902a1e8SFrederic Weisbecker 		return state_rnames[bit >> LOCK_USAGE_DIR_MASK];
2155c902a1e8SFrederic Weisbecker 	else
2156c902a1e8SFrederic Weisbecker 		return state_names[bit >> LOCK_USAGE_DIR_MASK];
21578eddac3fSPeter Zijlstra }
21588eddac3fSPeter Zijlstra 
2159948f8376SFrederic Weisbecker /*
2160948f8376SFrederic Weisbecker  * The bit number is encoded like:
2161948f8376SFrederic Weisbecker  *
2162948f8376SFrederic Weisbecker  *  bit0: 0 exclusive, 1 read lock
2163948f8376SFrederic Weisbecker  *  bit1: 0 used in irq, 1 irq enabled
2164948f8376SFrederic Weisbecker  *  bit2-n: state
2165948f8376SFrederic Weisbecker  */
21668eddac3fSPeter Zijlstra static int exclusive_bit(int new_bit)
21678eddac3fSPeter Zijlstra {
2168bba2a8f1SFrederic Weisbecker 	int state = new_bit & LOCK_USAGE_STATE_MASK;
2169bba2a8f1SFrederic Weisbecker 	int dir = new_bit & LOCK_USAGE_DIR_MASK;
21708eddac3fSPeter Zijlstra 
21718eddac3fSPeter Zijlstra 	/*
21728eddac3fSPeter Zijlstra 	 * keep state, bit flip the direction and strip read.
21738eddac3fSPeter Zijlstra 	 */
2174bba2a8f1SFrederic Weisbecker 	return state | (dir ^ LOCK_USAGE_DIR_MASK);
21758eddac3fSPeter Zijlstra }
21768eddac3fSPeter Zijlstra 
2177948f8376SFrederic Weisbecker /*
2178948f8376SFrederic Weisbecker  * Observe that when given a bitmask where each bitnr is encoded as above, a
2179948f8376SFrederic Weisbecker  * right shift of the mask transforms the individual bitnrs as -1 and
2180948f8376SFrederic Weisbecker  * conversely, a left shift transforms into +1 for the individual bitnrs.
2181948f8376SFrederic Weisbecker  *
2182948f8376SFrederic Weisbecker  * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can
2183948f8376SFrederic Weisbecker  * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0)
2184948f8376SFrederic Weisbecker  * instead by subtracting the bit number by 2, or shifting the mask right by 2.
2185948f8376SFrederic Weisbecker  *
2186948f8376SFrederic Weisbecker  * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2.
2187948f8376SFrederic Weisbecker  *
2188948f8376SFrederic Weisbecker  * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is
2189948f8376SFrederic Weisbecker  * all bits set) and recompose with bitnr1 flipped.
2190948f8376SFrederic Weisbecker  */
2191948f8376SFrederic Weisbecker static unsigned long invert_dir_mask(unsigned long mask)
21928eddac3fSPeter Zijlstra {
2193948f8376SFrederic Weisbecker 	unsigned long excl = 0;
21948eddac3fSPeter Zijlstra 
2195948f8376SFrederic Weisbecker 	/* Invert dir */
2196948f8376SFrederic Weisbecker 	excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK;
2197948f8376SFrederic Weisbecker 	excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK;
21988eddac3fSPeter Zijlstra 
2199948f8376SFrederic Weisbecker 	return excl;
22008eddac3fSPeter Zijlstra }
22018eddac3fSPeter Zijlstra 
2202948f8376SFrederic Weisbecker /*
2203948f8376SFrederic Weisbecker  * As above, we clear bitnr0 (LOCK_*_READ off) with bitmask ops. First, for all
2204948f8376SFrederic Weisbecker  * bits with bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*).
2205948f8376SFrederic Weisbecker  * And then mask out all bitnr0.
2206948f8376SFrederic Weisbecker  */
2207948f8376SFrederic Weisbecker static unsigned long exclusive_mask(unsigned long mask)
2208948f8376SFrederic Weisbecker {
2209948f8376SFrederic Weisbecker 	unsigned long excl = invert_dir_mask(mask);
2210948f8376SFrederic Weisbecker 
2211948f8376SFrederic Weisbecker 	/* Strip read */
2212948f8376SFrederic Weisbecker 	excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
2213948f8376SFrederic Weisbecker 	excl &= ~LOCKF_IRQ_READ;
2214948f8376SFrederic Weisbecker 
2215948f8376SFrederic Weisbecker 	return excl;
2216948f8376SFrederic Weisbecker }
2217948f8376SFrederic Weisbecker 
2218948f8376SFrederic Weisbecker /*
2219948f8376SFrederic Weisbecker  * Retrieve the _possible_ original mask to which @mask is
2220948f8376SFrederic Weisbecker  * exclusive. Ie: this is the opposite of exclusive_mask().
2221948f8376SFrederic Weisbecker  * Note that 2 possible original bits can match an exclusive
2222948f8376SFrederic Weisbecker  * bit: one has LOCK_USAGE_READ_MASK set, the other has it
2223948f8376SFrederic Weisbecker  * cleared. So both are returned for each exclusive bit.
2224948f8376SFrederic Weisbecker  */
2225948f8376SFrederic Weisbecker static unsigned long original_mask(unsigned long mask)
2226948f8376SFrederic Weisbecker {
2227948f8376SFrederic Weisbecker 	unsigned long excl = invert_dir_mask(mask);
2228948f8376SFrederic Weisbecker 
2229948f8376SFrederic Weisbecker 	/* Include read in existing usages */
2230948f8376SFrederic Weisbecker 	excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
2231948f8376SFrederic Weisbecker 
2232948f8376SFrederic Weisbecker 	return excl;
2233948f8376SFrederic Weisbecker }
2234948f8376SFrederic Weisbecker 
2235948f8376SFrederic Weisbecker /*
2236948f8376SFrederic Weisbecker  * Find the first pair of bit match between an original
2237948f8376SFrederic Weisbecker  * usage mask and an exclusive usage mask.
2238948f8376SFrederic Weisbecker  */
2239948f8376SFrederic Weisbecker static int find_exclusive_match(unsigned long mask,
2240948f8376SFrederic Weisbecker 				unsigned long excl_mask,
2241948f8376SFrederic Weisbecker 				enum lock_usage_bit *bitp,
2242948f8376SFrederic Weisbecker 				enum lock_usage_bit *excl_bitp)
2243948f8376SFrederic Weisbecker {
2244948f8376SFrederic Weisbecker 	int bit, excl;
2245948f8376SFrederic Weisbecker 
2246948f8376SFrederic Weisbecker 	for_each_set_bit(bit, &mask, LOCK_USED) {
2247948f8376SFrederic Weisbecker 		excl = exclusive_bit(bit);
2248948f8376SFrederic Weisbecker 		if (excl_mask & lock_flag(excl)) {
2249948f8376SFrederic Weisbecker 			*bitp = bit;
2250948f8376SFrederic Weisbecker 			*excl_bitp = excl;
2251948f8376SFrederic Weisbecker 			return 0;
2252948f8376SFrederic Weisbecker 		}
2253948f8376SFrederic Weisbecker 	}
2254948f8376SFrederic Weisbecker 	return -1;
2255948f8376SFrederic Weisbecker }
2256948f8376SFrederic Weisbecker 
2257948f8376SFrederic Weisbecker /*
2258948f8376SFrederic Weisbecker  * Prove that the new dependency does not connect a hardirq-safe(-read)
2259948f8376SFrederic Weisbecker  * lock with a hardirq-unsafe lock - to achieve this we search
2260948f8376SFrederic Weisbecker  * the backwards-subgraph starting at <prev>, and the
2261948f8376SFrederic Weisbecker  * forwards-subgraph starting at <next>:
2262948f8376SFrederic Weisbecker  */
2263948f8376SFrederic Weisbecker static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
22648eddac3fSPeter Zijlstra 			   struct held_lock *next)
22658eddac3fSPeter Zijlstra {
2266948f8376SFrederic Weisbecker 	unsigned long usage_mask = 0, forward_mask, backward_mask;
2267948f8376SFrederic Weisbecker 	enum lock_usage_bit forward_bit = 0, backward_bit = 0;
22683f649ab7SKees Cook 	struct lock_list *target_entry1;
22693f649ab7SKees Cook 	struct lock_list *target_entry;
2270948f8376SFrederic Weisbecker 	struct lock_list this, that;
2271*b11be024SBoqun Feng 	enum bfs_result ret;
22728eddac3fSPeter Zijlstra 
2273948f8376SFrederic Weisbecker 	/*
2274948f8376SFrederic Weisbecker 	 * Step 1: gather all hard/soft IRQs usages backward in an
2275948f8376SFrederic Weisbecker 	 * accumulated usage mask.
2276948f8376SFrederic Weisbecker 	 */
2277948f8376SFrederic Weisbecker 	this.parent = NULL;
2278948f8376SFrederic Weisbecker 	this.class = hlock_class(prev);
2279948f8376SFrederic Weisbecker 
2280948f8376SFrederic Weisbecker 	ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, NULL);
2281*b11be024SBoqun Feng 	if (bfs_error(ret)) {
2282f7c1c6b3SYuyang Du 		print_bfs_bug(ret);
2283f7c1c6b3SYuyang Du 		return 0;
2284f7c1c6b3SYuyang Du 	}
2285948f8376SFrederic Weisbecker 
2286948f8376SFrederic Weisbecker 	usage_mask &= LOCKF_USED_IN_IRQ_ALL;
2287948f8376SFrederic Weisbecker 	if (!usage_mask)
22888eddac3fSPeter Zijlstra 		return 1;
2289948f8376SFrederic Weisbecker 
2290948f8376SFrederic Weisbecker 	/*
2291948f8376SFrederic Weisbecker 	 * Step 2: find exclusive uses forward that match the previous
2292948f8376SFrederic Weisbecker 	 * backward accumulated mask.
2293948f8376SFrederic Weisbecker 	 */
2294948f8376SFrederic Weisbecker 	forward_mask = exclusive_mask(usage_mask);
2295948f8376SFrederic Weisbecker 
2296948f8376SFrederic Weisbecker 	that.parent = NULL;
2297948f8376SFrederic Weisbecker 	that.class = hlock_class(next);
2298948f8376SFrederic Weisbecker 
2299948f8376SFrederic Weisbecker 	ret = find_usage_forwards(&that, forward_mask, &target_entry1);
2300*b11be024SBoqun Feng 	if (bfs_error(ret)) {
2301f7c1c6b3SYuyang Du 		print_bfs_bug(ret);
2302f7c1c6b3SYuyang Du 		return 0;
2303f7c1c6b3SYuyang Du 	}
2304*b11be024SBoqun Feng 	if (ret == BFS_RNOMATCH)
2305*b11be024SBoqun Feng 		return 1;
2306948f8376SFrederic Weisbecker 
2307948f8376SFrederic Weisbecker 	/*
2308948f8376SFrederic Weisbecker 	 * Step 3: we found a bad match! Now retrieve a lock from the backward
2309948f8376SFrederic Weisbecker 	 * list whose usage mask matches the exclusive usage mask from the
2310948f8376SFrederic Weisbecker 	 * lock found on the forward list.
2311948f8376SFrederic Weisbecker 	 */
2312948f8376SFrederic Weisbecker 	backward_mask = original_mask(target_entry1->class->usage_mask);
2313948f8376SFrederic Weisbecker 
2314948f8376SFrederic Weisbecker 	ret = find_usage_backwards(&this, backward_mask, &target_entry);
2315*b11be024SBoqun Feng 	if (bfs_error(ret)) {
2316f7c1c6b3SYuyang Du 		print_bfs_bug(ret);
2317f7c1c6b3SYuyang Du 		return 0;
2318f7c1c6b3SYuyang Du 	}
2319*b11be024SBoqun Feng 	if (DEBUG_LOCKS_WARN_ON(ret == BFS_RNOMATCH))
2320948f8376SFrederic Weisbecker 		return 1;
2321948f8376SFrederic Weisbecker 
2322948f8376SFrederic Weisbecker 	/*
2323948f8376SFrederic Weisbecker 	 * Step 4: narrow down to a pair of incompatible usage bits
2324948f8376SFrederic Weisbecker 	 * and report it.
2325948f8376SFrederic Weisbecker 	 */
2326948f8376SFrederic Weisbecker 	ret = find_exclusive_match(target_entry->class->usage_mask,
2327948f8376SFrederic Weisbecker 				   target_entry1->class->usage_mask,
2328948f8376SFrederic Weisbecker 				   &backward_bit, &forward_bit);
2329948f8376SFrederic Weisbecker 	if (DEBUG_LOCKS_WARN_ON(ret == -1))
2330948f8376SFrederic Weisbecker 		return 1;
2331948f8376SFrederic Weisbecker 
2332f7c1c6b3SYuyang Du 	print_bad_irq_dependency(curr, &this, &that,
2333948f8376SFrederic Weisbecker 				 target_entry, target_entry1,
2334948f8376SFrederic Weisbecker 				 prev, next,
2335948f8376SFrederic Weisbecker 				 backward_bit, forward_bit,
2336948f8376SFrederic Weisbecker 				 state_name(backward_bit));
2337f7c1c6b3SYuyang Du 
2338f7c1c6b3SYuyang Du 	return 0;
23398eddac3fSPeter Zijlstra }
23408eddac3fSPeter Zijlstra 
23418eddac3fSPeter Zijlstra #else
23428eddac3fSPeter Zijlstra 
2343948f8376SFrederic Weisbecker static inline int check_irq_usage(struct task_struct *curr,
2344948f8376SFrederic Weisbecker 				  struct held_lock *prev, struct held_lock *next)
23458eddac3fSPeter Zijlstra {
23468eddac3fSPeter Zijlstra 	return 1;
23478eddac3fSPeter Zijlstra }
2348b3b9c187SWaiman Long #endif /* CONFIG_TRACE_IRQFLAGS */
23498eddac3fSPeter Zijlstra 
2350b3b9c187SWaiman Long static void inc_chains(int irq_context)
23518eddac3fSPeter Zijlstra {
2352b3b9c187SWaiman Long 	if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
2353b3b9c187SWaiman Long 		nr_hardirq_chains++;
2354b3b9c187SWaiman Long 	else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
2355b3b9c187SWaiman Long 		nr_softirq_chains++;
2356b3b9c187SWaiman Long 	else
23578eddac3fSPeter Zijlstra 		nr_process_chains++;
23588eddac3fSPeter Zijlstra }
23598eddac3fSPeter Zijlstra 
2360b3b9c187SWaiman Long static void dec_chains(int irq_context)
2361b3b9c187SWaiman Long {
2362b3b9c187SWaiman Long 	if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
2363b3b9c187SWaiman Long 		nr_hardirq_chains--;
2364b3b9c187SWaiman Long 	else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
2365b3b9c187SWaiman Long 		nr_softirq_chains--;
2366b3b9c187SWaiman Long 	else
2367b3b9c187SWaiman Long 		nr_process_chains--;
2368b3b9c187SWaiman Long }
23698eddac3fSPeter Zijlstra 
23708eddac3fSPeter Zijlstra static void
2371f7c1c6b3SYuyang Du print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
23728eddac3fSPeter Zijlstra {
23738eddac3fSPeter Zijlstra 	struct lock_class *next = hlock_class(nxt);
23748eddac3fSPeter Zijlstra 	struct lock_class *prev = hlock_class(prv);
23758eddac3fSPeter Zijlstra 
23768eddac3fSPeter Zijlstra 	printk(" Possible unsafe locking scenario:\n\n");
23778eddac3fSPeter Zijlstra 	printk("       CPU0\n");
23788eddac3fSPeter Zijlstra 	printk("       ----\n");
23798eddac3fSPeter Zijlstra 	printk("  lock(");
23808eddac3fSPeter Zijlstra 	__print_lock_name(prev);
2381f943fe0fSDmitry Vyukov 	printk(KERN_CONT ");\n");
23828eddac3fSPeter Zijlstra 	printk("  lock(");
23838eddac3fSPeter Zijlstra 	__print_lock_name(next);
2384f943fe0fSDmitry Vyukov 	printk(KERN_CONT ");\n");
23858eddac3fSPeter Zijlstra 	printk("\n *** DEADLOCK ***\n\n");
23868eddac3fSPeter Zijlstra 	printk(" May be due to missing lock nesting notation\n\n");
23878eddac3fSPeter Zijlstra }
23888eddac3fSPeter Zijlstra 
2389f7c1c6b3SYuyang Du static void
23908eddac3fSPeter Zijlstra print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
23918eddac3fSPeter Zijlstra 		   struct held_lock *next)
23928eddac3fSPeter Zijlstra {
23938eddac3fSPeter Zijlstra 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2394f7c1c6b3SYuyang Du 		return;
23958eddac3fSPeter Zijlstra 
2396681fbec8SPaul E. McKenney 	pr_warn("\n");
2397a5dd63efSPaul E. McKenney 	pr_warn("============================================\n");
2398a5dd63efSPaul E. McKenney 	pr_warn("WARNING: possible recursive locking detected\n");
23998eddac3fSPeter Zijlstra 	print_kernel_ident();
2400a5dd63efSPaul E. McKenney 	pr_warn("--------------------------------------------\n");
2401681fbec8SPaul E. McKenney 	pr_warn("%s/%d is trying to acquire lock:\n",
24028eddac3fSPeter Zijlstra 		curr->comm, task_pid_nr(curr));
24038eddac3fSPeter Zijlstra 	print_lock(next);
2404681fbec8SPaul E. McKenney 	pr_warn("\nbut task is already holding lock:\n");
24058eddac3fSPeter Zijlstra 	print_lock(prev);
24068eddac3fSPeter Zijlstra 
2407681fbec8SPaul E. McKenney 	pr_warn("\nother info that might help us debug this:\n");
24088eddac3fSPeter Zijlstra 	print_deadlock_scenario(next, prev);
24098eddac3fSPeter Zijlstra 	lockdep_print_held_locks(curr);
24108eddac3fSPeter Zijlstra 
2411681fbec8SPaul E. McKenney 	pr_warn("\nstack backtrace:\n");
24128eddac3fSPeter Zijlstra 	dump_stack();
24138eddac3fSPeter Zijlstra }
24148eddac3fSPeter Zijlstra 
24158eddac3fSPeter Zijlstra /*
24168eddac3fSPeter Zijlstra  * Check whether we are holding such a class already.
24178eddac3fSPeter Zijlstra  *
24188eddac3fSPeter Zijlstra  * (Note that this has to be done separately, because the graph cannot
24198eddac3fSPeter Zijlstra  * detect such classes of deadlocks.)
24208eddac3fSPeter Zijlstra  *
24218eddac3fSPeter Zijlstra  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
24228eddac3fSPeter Zijlstra  */
24238eddac3fSPeter Zijlstra static int
24244609c4f9SYuyang Du check_deadlock(struct task_struct *curr, struct held_lock *next)
24258eddac3fSPeter Zijlstra {
24268eddac3fSPeter Zijlstra 	struct held_lock *prev;
24278eddac3fSPeter Zijlstra 	struct held_lock *nest = NULL;
24288eddac3fSPeter Zijlstra 	int i;
24298eddac3fSPeter Zijlstra 
24308eddac3fSPeter Zijlstra 	for (i = 0; i < curr->lockdep_depth; i++) {
24318eddac3fSPeter Zijlstra 		prev = curr->held_locks + i;
24328eddac3fSPeter Zijlstra 
24338eddac3fSPeter Zijlstra 		if (prev->instance == next->nest_lock)
24348eddac3fSPeter Zijlstra 			nest = prev;
24358eddac3fSPeter Zijlstra 
24368eddac3fSPeter Zijlstra 		if (hlock_class(prev) != hlock_class(next))
24378eddac3fSPeter Zijlstra 			continue;
24388eddac3fSPeter Zijlstra 
24398eddac3fSPeter Zijlstra 		/*
24408eddac3fSPeter Zijlstra 		 * Allow read-after-read recursion of the same
24418eddac3fSPeter Zijlstra 		 * lock class (i.e. read_lock(lock)+read_lock(lock)):
24428eddac3fSPeter Zijlstra 		 */
24434609c4f9SYuyang Du 		if ((next->read == 2) && prev->read)
24448eddac3fSPeter Zijlstra 			return 2;
24458eddac3fSPeter Zijlstra 
24468eddac3fSPeter Zijlstra 		/*
24478eddac3fSPeter Zijlstra 		 * We're holding the nest_lock, which serializes this lock's
24488eddac3fSPeter Zijlstra 		 * nesting behaviour.
24498eddac3fSPeter Zijlstra 		 */
24508eddac3fSPeter Zijlstra 		if (nest)
24518eddac3fSPeter Zijlstra 			return 2;
24528eddac3fSPeter Zijlstra 
2453f7c1c6b3SYuyang Du 		print_deadlock_bug(curr, prev, next);
2454f7c1c6b3SYuyang Du 		return 0;
24558eddac3fSPeter Zijlstra 	}
24568eddac3fSPeter Zijlstra 	return 1;
24578eddac3fSPeter Zijlstra }
24588eddac3fSPeter Zijlstra 
24598eddac3fSPeter Zijlstra /*
24608eddac3fSPeter Zijlstra  * There was a chain-cache miss, and we are about to add a new dependency
2461154f185eSYuyang Du  * to a previous lock. We validate the following rules:
24628eddac3fSPeter Zijlstra  *
24638eddac3fSPeter Zijlstra  *  - would the adding of the <prev> -> <next> dependency create a
24648eddac3fSPeter Zijlstra  *    circular dependency in the graph? [== circular deadlock]
24658eddac3fSPeter Zijlstra  *
24668eddac3fSPeter Zijlstra  *  - does the new prev->next dependency connect any hardirq-safe lock
24678eddac3fSPeter Zijlstra  *    (in the full backwards-subgraph starting at <prev>) with any
24688eddac3fSPeter Zijlstra  *    hardirq-unsafe lock (in the full forwards-subgraph starting at
24698eddac3fSPeter Zijlstra  *    <next>)? [== illegal lock inversion with hardirq contexts]
24708eddac3fSPeter Zijlstra  *
24718eddac3fSPeter Zijlstra  *  - does the new prev->next dependency connect any softirq-safe lock
24728eddac3fSPeter Zijlstra  *    (in the full backwards-subgraph starting at <prev>) with any
24738eddac3fSPeter Zijlstra  *    softirq-unsafe lock (in the full forwards-subgraph starting at
24748eddac3fSPeter Zijlstra  *    <next>)? [== illegal lock inversion with softirq contexts]
24758eddac3fSPeter Zijlstra  *
24768eddac3fSPeter Zijlstra  * any of these scenarios could lead to a deadlock.
24778eddac3fSPeter Zijlstra  *
24788eddac3fSPeter Zijlstra  * Then if all the validations pass, we add the forwards and backwards
24798eddac3fSPeter Zijlstra  * dependency.
24808eddac3fSPeter Zijlstra  */
24818eddac3fSPeter Zijlstra static int
24828eddac3fSPeter Zijlstra check_prev_add(struct task_struct *curr, struct held_lock *prev,
248312593b74SBart Van Assche 	       struct held_lock *next, int distance,
248412593b74SBart Van Assche 	       struct lock_trace **const trace)
24858eddac3fSPeter Zijlstra {
24868b405d5cSPeter Zijlstra 	struct lock_list *entry;
2487*b11be024SBoqun Feng 	enum bfs_result ret;
24888eddac3fSPeter Zijlstra 
2489a0b0fd53SBart Van Assche 	if (!hlock_class(prev)->key || !hlock_class(next)->key) {
2490a0b0fd53SBart Van Assche 		/*
2491a0b0fd53SBart Van Assche 		 * The warning statements below may trigger a use-after-free
2492a0b0fd53SBart Van Assche 		 * of the class name. It is better to trigger a use-after free
2493a0b0fd53SBart Van Assche 		 * and to have the class name most of the time instead of not
2494a0b0fd53SBart Van Assche 		 * having the class name available.
2495a0b0fd53SBart Van Assche 		 */
2496a0b0fd53SBart Van Assche 		WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key,
2497a0b0fd53SBart Van Assche 			  "Detected use-after-free of lock class %px/%s\n",
2498a0b0fd53SBart Van Assche 			  hlock_class(prev),
2499a0b0fd53SBart Van Assche 			  hlock_class(prev)->name);
2500a0b0fd53SBart Van Assche 		WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key,
2501a0b0fd53SBart Van Assche 			  "Detected use-after-free of lock class %px/%s\n",
2502a0b0fd53SBart Van Assche 			  hlock_class(next),
2503a0b0fd53SBart Van Assche 			  hlock_class(next)->name);
2504a0b0fd53SBart Van Assche 		return 2;
2505a0b0fd53SBart Van Assche 	}
2506a0b0fd53SBart Van Assche 
25078eddac3fSPeter Zijlstra 	/*
25088eddac3fSPeter Zijlstra 	 * Prove that the new <prev> -> <next> dependency would not
25098eddac3fSPeter Zijlstra 	 * create a circular dependency in the graph. (We do this by
2510154f185eSYuyang Du 	 * a breadth-first search into the graph starting at <next>,
2511154f185eSYuyang Du 	 * and check whether we can reach <prev>.)
25128eddac3fSPeter Zijlstra 	 *
2513154f185eSYuyang Du 	 * The search is limited by the size of the circular queue (i.e.,
2514154f185eSYuyang Du 	 * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
2515154f185eSYuyang Du 	 * in the graph whose neighbours are to be checked.
25168eddac3fSPeter Zijlstra 	 */
25178c2c2b44SYuyang Du 	ret = check_noncircular(next, prev, trace);
2518*b11be024SBoqun Feng 	if (unlikely(bfs_error(ret) || ret == BFS_RMATCH))
2519f7c1c6b3SYuyang Du 		return 0;
25208eddac3fSPeter Zijlstra 
2521948f8376SFrederic Weisbecker 	if (!check_irq_usage(curr, prev, next))
25228eddac3fSPeter Zijlstra 		return 0;
25238eddac3fSPeter Zijlstra 
25248eddac3fSPeter Zijlstra 	/*
25258eddac3fSPeter Zijlstra 	 * For recursive read-locks we do all the dependency checks,
25268eddac3fSPeter Zijlstra 	 * but we dont store read-triggered dependencies (only
25278eddac3fSPeter Zijlstra 	 * write-triggered dependencies). This ensures that only the
25288eddac3fSPeter Zijlstra 	 * write-side dependencies matter, and that if for example a
25298eddac3fSPeter Zijlstra 	 * write-lock never takes any other locks, then the reads are
25308eddac3fSPeter Zijlstra 	 * equivalent to a NOP.
25318eddac3fSPeter Zijlstra 	 */
25328eddac3fSPeter Zijlstra 	if (next->read == 2 || prev->read == 2)
25338eddac3fSPeter Zijlstra 		return 1;
25348eddac3fSPeter Zijlstra 	/*
25358eddac3fSPeter Zijlstra 	 * Is the <prev> -> <next> dependency already present?
25368eddac3fSPeter Zijlstra 	 *
25378eddac3fSPeter Zijlstra 	 * (this may occur even though this is a new chain: consider
25388eddac3fSPeter Zijlstra 	 *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
25398eddac3fSPeter Zijlstra 	 *  chains - the second one will be new, but L1 already has
25408eddac3fSPeter Zijlstra 	 *  L2 added to its dependency list, due to the first chain.)
25418eddac3fSPeter Zijlstra 	 */
25428eddac3fSPeter Zijlstra 	list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
25438eddac3fSPeter Zijlstra 		if (entry->class == hlock_class(next)) {
25448eddac3fSPeter Zijlstra 			if (distance == 1)
25458eddac3fSPeter Zijlstra 				entry->distance = 1;
254670911fdcSByungchul Park 			return 1;
25478eddac3fSPeter Zijlstra 		}
25488eddac3fSPeter Zijlstra 	}
25498eddac3fSPeter Zijlstra 
255068e9dc29SYuyang Du #ifdef CONFIG_LOCKDEP_SMALL
2551ae813308SPeter Zijlstra 	/*
2552ae813308SPeter Zijlstra 	 * Is the <prev> -> <next> link redundant?
2553ae813308SPeter Zijlstra 	 */
25548c2c2b44SYuyang Du 	ret = check_redundant(prev, next);
2555*b11be024SBoqun Feng 	if (bfs_error(ret))
2556*b11be024SBoqun Feng 		return 0;
2557*b11be024SBoqun Feng 	else if (ret == BFS_RMATCH)
2558*b11be024SBoqun Feng 		return 2;
255968e9dc29SYuyang Du #endif
2560ae813308SPeter Zijlstra 
256112593b74SBart Van Assche 	if (!*trace) {
256212593b74SBart Van Assche 		*trace = save_trace();
256312593b74SBart Van Assche 		if (!*trace)
25648eddac3fSPeter Zijlstra 			return 0;
256512593b74SBart Van Assche 	}
25668eddac3fSPeter Zijlstra 
25678eddac3fSPeter Zijlstra 	/*
25688eddac3fSPeter Zijlstra 	 * Ok, all validations passed, add the new lock
25698eddac3fSPeter Zijlstra 	 * to the previous lock's dependency list:
25708eddac3fSPeter Zijlstra 	 */
257186cffb80SBart Van Assche 	ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
25728eddac3fSPeter Zijlstra 			       &hlock_class(prev)->locks_after,
257312593b74SBart Van Assche 			       next->acquire_ip, distance, *trace);
25748eddac3fSPeter Zijlstra 
25758eddac3fSPeter Zijlstra 	if (!ret)
25768eddac3fSPeter Zijlstra 		return 0;
25778eddac3fSPeter Zijlstra 
257886cffb80SBart Van Assche 	ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
25798eddac3fSPeter Zijlstra 			       &hlock_class(next)->locks_before,
258012593b74SBart Van Assche 			       next->acquire_ip, distance, *trace);
25818eddac3fSPeter Zijlstra 	if (!ret)
25828eddac3fSPeter Zijlstra 		return 0;
25838eddac3fSPeter Zijlstra 
258470911fdcSByungchul Park 	return 2;
25858eddac3fSPeter Zijlstra }
25868eddac3fSPeter Zijlstra 
25878eddac3fSPeter Zijlstra /*
25888eddac3fSPeter Zijlstra  * Add the dependency to all directly-previous locks that are 'relevant'.
25898eddac3fSPeter Zijlstra  * The ones that are relevant are (in increasing distance from curr):
25908eddac3fSPeter Zijlstra  * all consecutive trylock entries and the final non-trylock entry - or
25918eddac3fSPeter Zijlstra  * the end of this context's lock-chain - whichever comes first.
25928eddac3fSPeter Zijlstra  */
25938eddac3fSPeter Zijlstra static int
25948eddac3fSPeter Zijlstra check_prevs_add(struct task_struct *curr, struct held_lock *next)
25958eddac3fSPeter Zijlstra {
259612593b74SBart Van Assche 	struct lock_trace *trace = NULL;
25978eddac3fSPeter Zijlstra 	int depth = curr->lockdep_depth;
25988eddac3fSPeter Zijlstra 	struct held_lock *hlock;
25998eddac3fSPeter Zijlstra 
26008eddac3fSPeter Zijlstra 	/*
26018eddac3fSPeter Zijlstra 	 * Debugging checks.
26028eddac3fSPeter Zijlstra 	 *
26038eddac3fSPeter Zijlstra 	 * Depth must not be zero for a non-head lock:
26048eddac3fSPeter Zijlstra 	 */
26058eddac3fSPeter Zijlstra 	if (!depth)
26068eddac3fSPeter Zijlstra 		goto out_bug;
26078eddac3fSPeter Zijlstra 	/*
26088eddac3fSPeter Zijlstra 	 * At least two relevant locks must exist for this
26098eddac3fSPeter Zijlstra 	 * to be a head:
26108eddac3fSPeter Zijlstra 	 */
26118eddac3fSPeter Zijlstra 	if (curr->held_locks[depth].irq_context !=
26128eddac3fSPeter Zijlstra 			curr->held_locks[depth-1].irq_context)
26138eddac3fSPeter Zijlstra 		goto out_bug;
26148eddac3fSPeter Zijlstra 
26158eddac3fSPeter Zijlstra 	for (;;) {
26168eddac3fSPeter Zijlstra 		int distance = curr->lockdep_depth - depth + 1;
26178eddac3fSPeter Zijlstra 		hlock = curr->held_locks + depth - 1;
2618e966eaeeSIngo Molnar 
2619b09be676SByungchul Park 		/*
26208eddac3fSPeter Zijlstra 		 * Only non-recursive-read entries get new dependencies
26218eddac3fSPeter Zijlstra 		 * added:
26228eddac3fSPeter Zijlstra 		 */
26231b5ff816SOleg Nesterov 		if (hlock->read != 2 && hlock->check) {
262476b14436SThomas Gleixner 			int ret = check_prev_add(curr, hlock, next, distance,
262576b14436SThomas Gleixner 						 &trace);
2626ce07a941SByungchul Park 			if (!ret)
26278eddac3fSPeter Zijlstra 				return 0;
2628ce07a941SByungchul Park 
2629ce07a941SByungchul Park 			/*
26308eddac3fSPeter Zijlstra 			 * Stop after the first non-trylock entry,
26318eddac3fSPeter Zijlstra 			 * as non-trylock entries have added their
26328eddac3fSPeter Zijlstra 			 * own direct dependencies already, so this
26338eddac3fSPeter Zijlstra 			 * lock is connected to them indirectly:
26348eddac3fSPeter Zijlstra 			 */
26358eddac3fSPeter Zijlstra 			if (!hlock->trylock)
26368eddac3fSPeter Zijlstra 				break;
26378eddac3fSPeter Zijlstra 		}
2638e966eaeeSIngo Molnar 
26398eddac3fSPeter Zijlstra 		depth--;
26408eddac3fSPeter Zijlstra 		/*
26418eddac3fSPeter Zijlstra 		 * End of lock-stack?
26428eddac3fSPeter Zijlstra 		 */
26438eddac3fSPeter Zijlstra 		if (!depth)
26448eddac3fSPeter Zijlstra 			break;
26458eddac3fSPeter Zijlstra 		/*
26468eddac3fSPeter Zijlstra 		 * Stop the search if we cross into another context:
26478eddac3fSPeter Zijlstra 		 */
26488eddac3fSPeter Zijlstra 		if (curr->held_locks[depth].irq_context !=
26498eddac3fSPeter Zijlstra 				curr->held_locks[depth-1].irq_context)
26508eddac3fSPeter Zijlstra 			break;
26518eddac3fSPeter Zijlstra 	}
26528eddac3fSPeter Zijlstra 	return 1;
26538eddac3fSPeter Zijlstra out_bug:
26548eddac3fSPeter Zijlstra 	if (!debug_locks_off_graph_unlock())
26558eddac3fSPeter Zijlstra 		return 0;
26568eddac3fSPeter Zijlstra 
26578eddac3fSPeter Zijlstra 	/*
26588eddac3fSPeter Zijlstra 	 * Clearly we all shouldn't be here, but since we made it we
26598eddac3fSPeter Zijlstra 	 * can reliable say we messed up our state. See the above two
26608eddac3fSPeter Zijlstra 	 * gotos for reasons why we could possibly end up here.
26618eddac3fSPeter Zijlstra 	 */
26628eddac3fSPeter Zijlstra 	WARN_ON(1);
26638eddac3fSPeter Zijlstra 
26648eddac3fSPeter Zijlstra 	return 0;
26658eddac3fSPeter Zijlstra }
26668eddac3fSPeter Zijlstra 
26678eddac3fSPeter Zijlstra struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
2668de4643a7SBart Van Assche static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
26698eddac3fSPeter Zijlstra static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
2670797b82ebSWaiman Long unsigned long nr_zapped_lock_chains;
2671810507feSWaiman Long unsigned int nr_free_chain_hlocks;	/* Free chain_hlocks in buckets */
2672810507feSWaiman Long unsigned int nr_lost_chain_hlocks;	/* Lost chain_hlocks */
2673810507feSWaiman Long unsigned int nr_large_chain_blocks;	/* size > MAX_CHAIN_BUCKETS */
2674810507feSWaiman Long 
2675810507feSWaiman Long /*
2676810507feSWaiman Long  * The first 2 chain_hlocks entries in the chain block in the bucket
2677810507feSWaiman Long  * list contains the following meta data:
2678810507feSWaiman Long  *
2679810507feSWaiman Long  *   entry[0]:
2680810507feSWaiman Long  *     Bit    15 - always set to 1 (it is not a class index)
2681810507feSWaiman Long  *     Bits 0-14 - upper 15 bits of the next block index
2682810507feSWaiman Long  *   entry[1]    - lower 16 bits of next block index
2683810507feSWaiman Long  *
2684810507feSWaiman Long  * A next block index of all 1 bits means it is the end of the list.
2685810507feSWaiman Long  *
2686810507feSWaiman Long  * On the unsized bucket (bucket-0), the 3rd and 4th entries contain
2687810507feSWaiman Long  * the chain block size:
2688810507feSWaiman Long  *
2689810507feSWaiman Long  *   entry[2] - upper 16 bits of the chain block size
2690810507feSWaiman Long  *   entry[3] - lower 16 bits of the chain block size
2691810507feSWaiman Long  */
2692810507feSWaiman Long #define MAX_CHAIN_BUCKETS	16
2693810507feSWaiman Long #define CHAIN_BLK_FLAG		(1U << 15)
2694810507feSWaiman Long #define CHAIN_BLK_LIST_END	0xFFFFU
2695810507feSWaiman Long 
2696810507feSWaiman Long static int chain_block_buckets[MAX_CHAIN_BUCKETS];
2697810507feSWaiman Long 
2698810507feSWaiman Long static inline int size_to_bucket(int size)
2699810507feSWaiman Long {
2700810507feSWaiman Long 	if (size > MAX_CHAIN_BUCKETS)
2701810507feSWaiman Long 		return 0;
2702810507feSWaiman Long 
2703810507feSWaiman Long 	return size - 1;
2704810507feSWaiman Long }
2705810507feSWaiman Long 
2706810507feSWaiman Long /*
2707810507feSWaiman Long  * Iterate all the chain blocks in a bucket.
2708810507feSWaiman Long  */
2709810507feSWaiman Long #define for_each_chain_block(bucket, prev, curr)		\
2710810507feSWaiman Long 	for ((prev) = -1, (curr) = chain_block_buckets[bucket];	\
2711810507feSWaiman Long 	     (curr) >= 0;					\
2712810507feSWaiman Long 	     (prev) = (curr), (curr) = chain_block_next(curr))
2713810507feSWaiman Long 
2714810507feSWaiman Long /*
2715810507feSWaiman Long  * next block or -1
2716810507feSWaiman Long  */
2717810507feSWaiman Long static inline int chain_block_next(int offset)
2718810507feSWaiman Long {
2719810507feSWaiman Long 	int next = chain_hlocks[offset];
2720810507feSWaiman Long 
2721810507feSWaiman Long 	WARN_ON_ONCE(!(next & CHAIN_BLK_FLAG));
2722810507feSWaiman Long 
2723810507feSWaiman Long 	if (next == CHAIN_BLK_LIST_END)
2724810507feSWaiman Long 		return -1;
2725810507feSWaiman Long 
2726810507feSWaiman Long 	next &= ~CHAIN_BLK_FLAG;
2727810507feSWaiman Long 	next <<= 16;
2728810507feSWaiman Long 	next |= chain_hlocks[offset + 1];
2729810507feSWaiman Long 
2730810507feSWaiman Long 	return next;
2731810507feSWaiman Long }
2732810507feSWaiman Long 
2733810507feSWaiman Long /*
2734810507feSWaiman Long  * bucket-0 only
2735810507feSWaiman Long  */
2736810507feSWaiman Long static inline int chain_block_size(int offset)
2737810507feSWaiman Long {
2738810507feSWaiman Long 	return (chain_hlocks[offset + 2] << 16) | chain_hlocks[offset + 3];
2739810507feSWaiman Long }
2740810507feSWaiman Long 
2741810507feSWaiman Long static inline void init_chain_block(int offset, int next, int bucket, int size)
2742810507feSWaiman Long {
2743810507feSWaiman Long 	chain_hlocks[offset] = (next >> 16) | CHAIN_BLK_FLAG;
2744810507feSWaiman Long 	chain_hlocks[offset + 1] = (u16)next;
2745810507feSWaiman Long 
2746810507feSWaiman Long 	if (size && !bucket) {
2747810507feSWaiman Long 		chain_hlocks[offset + 2] = size >> 16;
2748810507feSWaiman Long 		chain_hlocks[offset + 3] = (u16)size;
2749810507feSWaiman Long 	}
2750810507feSWaiman Long }
2751810507feSWaiman Long 
2752810507feSWaiman Long static inline void add_chain_block(int offset, int size)
2753810507feSWaiman Long {
2754810507feSWaiman Long 	int bucket = size_to_bucket(size);
2755810507feSWaiman Long 	int next = chain_block_buckets[bucket];
2756810507feSWaiman Long 	int prev, curr;
2757810507feSWaiman Long 
2758810507feSWaiman Long 	if (unlikely(size < 2)) {
2759810507feSWaiman Long 		/*
2760810507feSWaiman Long 		 * We can't store single entries on the freelist. Leak them.
2761810507feSWaiman Long 		 *
2762810507feSWaiman Long 		 * One possible way out would be to uniquely mark them, other
2763810507feSWaiman Long 		 * than with CHAIN_BLK_FLAG, such that we can recover them when
2764810507feSWaiman Long 		 * the block before it is re-added.
2765810507feSWaiman Long 		 */
2766810507feSWaiman Long 		if (size)
2767810507feSWaiman Long 			nr_lost_chain_hlocks++;
2768810507feSWaiman Long 		return;
2769810507feSWaiman Long 	}
2770810507feSWaiman Long 
2771810507feSWaiman Long 	nr_free_chain_hlocks += size;
2772810507feSWaiman Long 	if (!bucket) {
2773810507feSWaiman Long 		nr_large_chain_blocks++;
2774810507feSWaiman Long 
2775810507feSWaiman Long 		/*
2776810507feSWaiman Long 		 * Variable sized, sort large to small.
2777810507feSWaiman Long 		 */
2778810507feSWaiman Long 		for_each_chain_block(0, prev, curr) {
2779810507feSWaiman Long 			if (size >= chain_block_size(curr))
2780810507feSWaiman Long 				break;
2781810507feSWaiman Long 		}
2782810507feSWaiman Long 		init_chain_block(offset, curr, 0, size);
2783810507feSWaiman Long 		if (prev < 0)
2784810507feSWaiman Long 			chain_block_buckets[0] = offset;
2785810507feSWaiman Long 		else
2786810507feSWaiman Long 			init_chain_block(prev, offset, 0, 0);
2787810507feSWaiman Long 		return;
2788810507feSWaiman Long 	}
2789810507feSWaiman Long 	/*
2790810507feSWaiman Long 	 * Fixed size, add to head.
2791810507feSWaiman Long 	 */
2792810507feSWaiman Long 	init_chain_block(offset, next, bucket, size);
2793810507feSWaiman Long 	chain_block_buckets[bucket] = offset;
2794810507feSWaiman Long }
2795810507feSWaiman Long 
2796810507feSWaiman Long /*
2797810507feSWaiman Long  * Only the first block in the list can be deleted.
2798810507feSWaiman Long  *
2799810507feSWaiman Long  * For the variable size bucket[0], the first block (the largest one) is
2800810507feSWaiman Long  * returned, broken up and put back into the pool. So if a chain block of
2801810507feSWaiman Long  * length > MAX_CHAIN_BUCKETS is ever used and zapped, it will just be
2802810507feSWaiman Long  * queued up after the primordial chain block and never be used until the
2803810507feSWaiman Long  * hlock entries in the primordial chain block is almost used up. That
2804810507feSWaiman Long  * causes fragmentation and reduce allocation efficiency. That can be
2805810507feSWaiman Long  * monitored by looking at the "large chain blocks" number in lockdep_stats.
2806810507feSWaiman Long  */
2807810507feSWaiman Long static inline void del_chain_block(int bucket, int size, int next)
2808810507feSWaiman Long {
2809810507feSWaiman Long 	nr_free_chain_hlocks -= size;
2810810507feSWaiman Long 	chain_block_buckets[bucket] = next;
2811810507feSWaiman Long 
2812810507feSWaiman Long 	if (!bucket)
2813810507feSWaiman Long 		nr_large_chain_blocks--;
2814810507feSWaiman Long }
2815810507feSWaiman Long 
2816810507feSWaiman Long static void init_chain_block_buckets(void)
2817810507feSWaiman Long {
2818810507feSWaiman Long 	int i;
2819810507feSWaiman Long 
2820810507feSWaiman Long 	for (i = 0; i < MAX_CHAIN_BUCKETS; i++)
2821810507feSWaiman Long 		chain_block_buckets[i] = -1;
2822810507feSWaiman Long 
2823810507feSWaiman Long 	add_chain_block(0, ARRAY_SIZE(chain_hlocks));
2824810507feSWaiman Long }
2825810507feSWaiman Long 
2826810507feSWaiman Long /*
2827810507feSWaiman Long  * Return offset of a chain block of the right size or -1 if not found.
2828810507feSWaiman Long  *
2829810507feSWaiman Long  * Fairly simple worst-fit allocator with the addition of a number of size
2830810507feSWaiman Long  * specific free lists.
2831810507feSWaiman Long  */
2832810507feSWaiman Long static int alloc_chain_hlocks(int req)
2833810507feSWaiman Long {
2834810507feSWaiman Long 	int bucket, curr, size;
2835810507feSWaiman Long 
2836810507feSWaiman Long 	/*
2837810507feSWaiman Long 	 * We rely on the MSB to act as an escape bit to denote freelist
2838810507feSWaiman Long 	 * pointers. Make sure this bit isn't set in 'normal' class_idx usage.
2839810507feSWaiman Long 	 */
2840810507feSWaiman Long 	BUILD_BUG_ON((MAX_LOCKDEP_KEYS-1) & CHAIN_BLK_FLAG);
2841810507feSWaiman Long 
2842810507feSWaiman Long 	init_data_structures_once();
2843810507feSWaiman Long 
2844810507feSWaiman Long 	if (nr_free_chain_hlocks < req)
2845810507feSWaiman Long 		return -1;
2846810507feSWaiman Long 
2847810507feSWaiman Long 	/*
2848810507feSWaiman Long 	 * We require a minimum of 2 (u16) entries to encode a freelist
2849810507feSWaiman Long 	 * 'pointer'.
2850810507feSWaiman Long 	 */
2851810507feSWaiman Long 	req = max(req, 2);
2852810507feSWaiman Long 	bucket = size_to_bucket(req);
2853810507feSWaiman Long 	curr = chain_block_buckets[bucket];
2854810507feSWaiman Long 
2855810507feSWaiman Long 	if (bucket) {
2856810507feSWaiman Long 		if (curr >= 0) {
2857810507feSWaiman Long 			del_chain_block(bucket, req, chain_block_next(curr));
2858810507feSWaiman Long 			return curr;
2859810507feSWaiman Long 		}
2860810507feSWaiman Long 		/* Try bucket 0 */
2861810507feSWaiman Long 		curr = chain_block_buckets[0];
2862810507feSWaiman Long 	}
2863810507feSWaiman Long 
2864810507feSWaiman Long 	/*
2865810507feSWaiman Long 	 * The variable sized freelist is sorted by size; the first entry is
2866810507feSWaiman Long 	 * the largest. Use it if it fits.
2867810507feSWaiman Long 	 */
2868810507feSWaiman Long 	if (curr >= 0) {
2869810507feSWaiman Long 		size = chain_block_size(curr);
2870810507feSWaiman Long 		if (likely(size >= req)) {
2871810507feSWaiman Long 			del_chain_block(0, size, chain_block_next(curr));
2872810507feSWaiman Long 			add_chain_block(curr + req, size - req);
2873810507feSWaiman Long 			return curr;
2874810507feSWaiman Long 		}
2875810507feSWaiman Long 	}
2876810507feSWaiman Long 
2877810507feSWaiman Long 	/*
2878810507feSWaiman Long 	 * Last resort, split a block in a larger sized bucket.
2879810507feSWaiman Long 	 */
2880810507feSWaiman Long 	for (size = MAX_CHAIN_BUCKETS; size > req; size--) {
2881810507feSWaiman Long 		bucket = size_to_bucket(size);
2882810507feSWaiman Long 		curr = chain_block_buckets[bucket];
2883810507feSWaiman Long 		if (curr < 0)
2884810507feSWaiman Long 			continue;
2885810507feSWaiman Long 
2886810507feSWaiman Long 		del_chain_block(bucket, size, chain_block_next(curr));
2887810507feSWaiman Long 		add_chain_block(curr + req, size - req);
2888810507feSWaiman Long 		return curr;
2889810507feSWaiman Long 	}
2890810507feSWaiman Long 
2891810507feSWaiman Long 	return -1;
2892810507feSWaiman Long }
2893810507feSWaiman Long 
2894810507feSWaiman Long static inline void free_chain_hlocks(int base, int size)
2895810507feSWaiman Long {
2896810507feSWaiman Long 	add_chain_block(base, max(size, 2));
2897810507feSWaiman Long }
28988eddac3fSPeter Zijlstra 
28998eddac3fSPeter Zijlstra struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
29008eddac3fSPeter Zijlstra {
29018eddac3fSPeter Zijlstra 	return lock_classes + chain_hlocks[chain->base + i];
29028eddac3fSPeter Zijlstra }
29038eddac3fSPeter Zijlstra 
29048eddac3fSPeter Zijlstra /*
29059e4e7554SIngo Molnar  * Returns the index of the first held_lock of the current chain
29069e4e7554SIngo Molnar  */
29079e4e7554SIngo Molnar static inline int get_first_held_lock(struct task_struct *curr,
29089e4e7554SIngo Molnar 					struct held_lock *hlock)
29099e4e7554SIngo Molnar {
29109e4e7554SIngo Molnar 	int i;
29119e4e7554SIngo Molnar 	struct held_lock *hlock_curr;
29129e4e7554SIngo Molnar 
29139e4e7554SIngo Molnar 	for (i = curr->lockdep_depth - 1; i >= 0; i--) {
29149e4e7554SIngo Molnar 		hlock_curr = curr->held_locks + i;
29159e4e7554SIngo Molnar 		if (hlock_curr->irq_context != hlock->irq_context)
29169e4e7554SIngo Molnar 			break;
29179e4e7554SIngo Molnar 
29189e4e7554SIngo Molnar 	}
29199e4e7554SIngo Molnar 
29209e4e7554SIngo Molnar 	return ++i;
29219e4e7554SIngo Molnar }
29229e4e7554SIngo Molnar 
29235c8a010cSBorislav Petkov #ifdef CONFIG_DEBUG_LOCKDEP
29249e4e7554SIngo Molnar /*
292539e2e173SAlfredo Alvarez Fernandez  * Returns the next chain_key iteration
292639e2e173SAlfredo Alvarez Fernandez  */
292739e2e173SAlfredo Alvarez Fernandez static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
292839e2e173SAlfredo Alvarez Fernandez {
292939e2e173SAlfredo Alvarez Fernandez 	u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
293039e2e173SAlfredo Alvarez Fernandez 
293139e2e173SAlfredo Alvarez Fernandez 	printk(" class_idx:%d -> chain_key:%016Lx",
293239e2e173SAlfredo Alvarez Fernandez 		class_idx,
293339e2e173SAlfredo Alvarez Fernandez 		(unsigned long long)new_chain_key);
293439e2e173SAlfredo Alvarez Fernandez 	return new_chain_key;
293539e2e173SAlfredo Alvarez Fernandez }
293639e2e173SAlfredo Alvarez Fernandez 
293739e2e173SAlfredo Alvarez Fernandez static void
293839e2e173SAlfredo Alvarez Fernandez print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
293939e2e173SAlfredo Alvarez Fernandez {
294039e2e173SAlfredo Alvarez Fernandez 	struct held_lock *hlock;
2941f6ec8829SYuyang Du 	u64 chain_key = INITIAL_CHAIN_KEY;
294239e2e173SAlfredo Alvarez Fernandez 	int depth = curr->lockdep_depth;
2943834494b2SYuyang Du 	int i = get_first_held_lock(curr, hlock_next);
294439e2e173SAlfredo Alvarez Fernandez 
2945834494b2SYuyang Du 	printk("depth: %u (irq_context %u)\n", depth - i + 1,
2946834494b2SYuyang Du 		hlock_next->irq_context);
2947834494b2SYuyang Du 	for (; i < depth; i++) {
294839e2e173SAlfredo Alvarez Fernandez 		hlock = curr->held_locks + i;
294939e2e173SAlfredo Alvarez Fernandez 		chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
295039e2e173SAlfredo Alvarez Fernandez 
295139e2e173SAlfredo Alvarez Fernandez 		print_lock(hlock);
295239e2e173SAlfredo Alvarez Fernandez 	}
295339e2e173SAlfredo Alvarez Fernandez 
295439e2e173SAlfredo Alvarez Fernandez 	print_chain_key_iteration(hlock_next->class_idx, chain_key);
295539e2e173SAlfredo Alvarez Fernandez 	print_lock(hlock_next);
295639e2e173SAlfredo Alvarez Fernandez }
295739e2e173SAlfredo Alvarez Fernandez 
295839e2e173SAlfredo Alvarez Fernandez static void print_chain_keys_chain(struct lock_chain *chain)
295939e2e173SAlfredo Alvarez Fernandez {
296039e2e173SAlfredo Alvarez Fernandez 	int i;
2961f6ec8829SYuyang Du 	u64 chain_key = INITIAL_CHAIN_KEY;
296239e2e173SAlfredo Alvarez Fernandez 	int class_id;
296339e2e173SAlfredo Alvarez Fernandez 
296439e2e173SAlfredo Alvarez Fernandez 	printk("depth: %u\n", chain->depth);
296539e2e173SAlfredo Alvarez Fernandez 	for (i = 0; i < chain->depth; i++) {
296639e2e173SAlfredo Alvarez Fernandez 		class_id = chain_hlocks[chain->base + i];
296701bb6f0aSYuyang Du 		chain_key = print_chain_key_iteration(class_id, chain_key);
296839e2e173SAlfredo Alvarez Fernandez 
296939e2e173SAlfredo Alvarez Fernandez 		print_lock_name(lock_classes + class_id);
297039e2e173SAlfredo Alvarez Fernandez 		printk("\n");
297139e2e173SAlfredo Alvarez Fernandez 	}
297239e2e173SAlfredo Alvarez Fernandez }
297339e2e173SAlfredo Alvarez Fernandez 
297439e2e173SAlfredo Alvarez Fernandez static void print_collision(struct task_struct *curr,
297539e2e173SAlfredo Alvarez Fernandez 			struct held_lock *hlock_next,
297639e2e173SAlfredo Alvarez Fernandez 			struct lock_chain *chain)
297739e2e173SAlfredo Alvarez Fernandez {
2978681fbec8SPaul E. McKenney 	pr_warn("\n");
2979a5dd63efSPaul E. McKenney 	pr_warn("============================\n");
2980a5dd63efSPaul E. McKenney 	pr_warn("WARNING: chain_key collision\n");
298139e2e173SAlfredo Alvarez Fernandez 	print_kernel_ident();
2982a5dd63efSPaul E. McKenney 	pr_warn("----------------------------\n");
2983681fbec8SPaul E. McKenney 	pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
2984681fbec8SPaul E. McKenney 	pr_warn("Hash chain already cached but the contents don't match!\n");
298539e2e173SAlfredo Alvarez Fernandez 
2986681fbec8SPaul E. McKenney 	pr_warn("Held locks:");
298739e2e173SAlfredo Alvarez Fernandez 	print_chain_keys_held_locks(curr, hlock_next);
298839e2e173SAlfredo Alvarez Fernandez 
2989681fbec8SPaul E. McKenney 	pr_warn("Locks in cached chain:");
299039e2e173SAlfredo Alvarez Fernandez 	print_chain_keys_chain(chain);
299139e2e173SAlfredo Alvarez Fernandez 
2992681fbec8SPaul E. McKenney 	pr_warn("\nstack backtrace:\n");
299339e2e173SAlfredo Alvarez Fernandez 	dump_stack();
299439e2e173SAlfredo Alvarez Fernandez }
29955c8a010cSBorislav Petkov #endif
299639e2e173SAlfredo Alvarez Fernandez 
299739e2e173SAlfredo Alvarez Fernandez /*
29989e4e7554SIngo Molnar  * Checks whether the chain and the current held locks are consistent
29999e4e7554SIngo Molnar  * in depth and also in content. If they are not it most likely means
30009e4e7554SIngo Molnar  * that there was a collision during the calculation of the chain_key.
30019e4e7554SIngo Molnar  * Returns: 0 not passed, 1 passed
30029e4e7554SIngo Molnar  */
30039e4e7554SIngo Molnar static int check_no_collision(struct task_struct *curr,
30049e4e7554SIngo Molnar 			struct held_lock *hlock,
30059e4e7554SIngo Molnar 			struct lock_chain *chain)
30069e4e7554SIngo Molnar {
30079e4e7554SIngo Molnar #ifdef CONFIG_DEBUG_LOCKDEP
30089e4e7554SIngo Molnar 	int i, j, id;
30099e4e7554SIngo Molnar 
30109e4e7554SIngo Molnar 	i = get_first_held_lock(curr, hlock);
30119e4e7554SIngo Molnar 
301239e2e173SAlfredo Alvarez Fernandez 	if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
301339e2e173SAlfredo Alvarez Fernandez 		print_collision(curr, hlock, chain);
30149e4e7554SIngo Molnar 		return 0;
301539e2e173SAlfredo Alvarez Fernandez 	}
30169e4e7554SIngo Molnar 
30179e4e7554SIngo Molnar 	for (j = 0; j < chain->depth - 1; j++, i++) {
301801bb6f0aSYuyang Du 		id = curr->held_locks[i].class_idx;
30199e4e7554SIngo Molnar 
302039e2e173SAlfredo Alvarez Fernandez 		if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
302139e2e173SAlfredo Alvarez Fernandez 			print_collision(curr, hlock, chain);
30229e4e7554SIngo Molnar 			return 0;
30239e4e7554SIngo Molnar 		}
302439e2e173SAlfredo Alvarez Fernandez 	}
30259e4e7554SIngo Molnar #endif
30269e4e7554SIngo Molnar 	return 1;
30279e4e7554SIngo Molnar }
30289e4e7554SIngo Molnar 
30299e4e7554SIngo Molnar /*
30302212684aSBart Van Assche  * Given an index that is >= -1, return the index of the next lock chain.
30312212684aSBart Van Assche  * Return -2 if there is no next lock chain.
30322212684aSBart Van Assche  */
30332212684aSBart Van Assche long lockdep_next_lockchain(long i)
30342212684aSBart Van Assche {
3035de4643a7SBart Van Assche 	i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1);
3036de4643a7SBart Van Assche 	return i < ARRAY_SIZE(lock_chains) ? i : -2;
30372212684aSBart Van Assche }
30382212684aSBart Van Assche 
30392212684aSBart Van Assche unsigned long lock_chain_count(void)
30402212684aSBart Van Assche {
3041de4643a7SBart Van Assche 	return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains));
3042de4643a7SBart Van Assche }
3043de4643a7SBart Van Assche 
3044de4643a7SBart Van Assche /* Must be called with the graph lock held. */
3045de4643a7SBart Van Assche static struct lock_chain *alloc_lock_chain(void)
3046de4643a7SBart Van Assche {
3047de4643a7SBart Van Assche 	int idx = find_first_zero_bit(lock_chains_in_use,
3048de4643a7SBart Van Assche 				      ARRAY_SIZE(lock_chains));
3049de4643a7SBart Van Assche 
3050de4643a7SBart Van Assche 	if (unlikely(idx >= ARRAY_SIZE(lock_chains)))
3051de4643a7SBart Van Assche 		return NULL;
3052de4643a7SBart Van Assche 	__set_bit(idx, lock_chains_in_use);
3053de4643a7SBart Van Assche 	return lock_chains + idx;
30542212684aSBart Van Assche }
30552212684aSBart Van Assche 
30562212684aSBart Van Assche /*
3057545c23f2SByungchul Park  * Adds a dependency chain into chain hashtable. And must be called with
3058545c23f2SByungchul Park  * graph_lock held.
3059545c23f2SByungchul Park  *
3060545c23f2SByungchul Park  * Return 0 if fail, and graph_lock is released.
3061545c23f2SByungchul Park  * Return 1 if succeed, with graph_lock held.
30628eddac3fSPeter Zijlstra  */
3063545c23f2SByungchul Park static inline int add_chain_cache(struct task_struct *curr,
30648eddac3fSPeter Zijlstra 				  struct held_lock *hlock,
30658eddac3fSPeter Zijlstra 				  u64 chain_key)
30668eddac3fSPeter Zijlstra {
30678eddac3fSPeter Zijlstra 	struct lock_class *class = hlock_class(hlock);
3068a63f38ccSAndrew Morton 	struct hlist_head *hash_head = chainhashentry(chain_key);
30698eddac3fSPeter Zijlstra 	struct lock_chain *chain;
30708eddac3fSPeter Zijlstra 	int i, j;
30718eddac3fSPeter Zijlstra 
30728eddac3fSPeter Zijlstra 	/*
3073527af3eaSBart Van Assche 	 * The caller must hold the graph lock, ensure we've got IRQs
30748eddac3fSPeter Zijlstra 	 * disabled to make this an IRQ-safe lock.. for recursion reasons
30758eddac3fSPeter Zijlstra 	 * lockdep won't complain about its own locking errors.
30768eddac3fSPeter Zijlstra 	 */
3077248efb21SPeter Zijlstra 	if (lockdep_assert_locked())
30788eddac3fSPeter Zijlstra 		return 0;
30799e4e7554SIngo Molnar 
3080de4643a7SBart Van Assche 	chain = alloc_lock_chain();
3081de4643a7SBart Van Assche 	if (!chain) {
30828eddac3fSPeter Zijlstra 		if (!debug_locks_off_graph_unlock())
30838eddac3fSPeter Zijlstra 			return 0;
30848eddac3fSPeter Zijlstra 
30858eddac3fSPeter Zijlstra 		print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
30868eddac3fSPeter Zijlstra 		dump_stack();
30878eddac3fSPeter Zijlstra 		return 0;
30888eddac3fSPeter Zijlstra 	}
30898eddac3fSPeter Zijlstra 	chain->chain_key = chain_key;
30908eddac3fSPeter Zijlstra 	chain->irq_context = hlock->irq_context;
30919e4e7554SIngo Molnar 	i = get_first_held_lock(curr, hlock);
30928eddac3fSPeter Zijlstra 	chain->depth = curr->lockdep_depth + 1 - i;
309375dd602aSPeter Zijlstra 
309475dd602aSPeter Zijlstra 	BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
309575dd602aSPeter Zijlstra 	BUILD_BUG_ON((1UL << 6)  <= ARRAY_SIZE(curr->held_locks));
309675dd602aSPeter Zijlstra 	BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
309775dd602aSPeter Zijlstra 
3098810507feSWaiman Long 	j = alloc_chain_hlocks(chain->depth);
3099810507feSWaiman Long 	if (j < 0) {
3100f9af456aSByungchul Park 		if (!debug_locks_off_graph_unlock())
310175dd602aSPeter Zijlstra 			return 0;
310275dd602aSPeter Zijlstra 
310375dd602aSPeter Zijlstra 		print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
310475dd602aSPeter Zijlstra 		dump_stack();
310575dd602aSPeter Zijlstra 		return 0;
310675dd602aSPeter Zijlstra 	}
310775dd602aSPeter Zijlstra 
3108810507feSWaiman Long 	chain->base = j;
3109810507feSWaiman Long 	for (j = 0; j < chain->depth - 1; j++, i++) {
3110810507feSWaiman Long 		int lock_id = curr->held_locks[i].class_idx;
3111810507feSWaiman Long 
3112810507feSWaiman Long 		chain_hlocks[chain->base + j] = lock_id;
3113810507feSWaiman Long 	}
3114810507feSWaiman Long 	chain_hlocks[chain->base + j] = class - lock_classes;
3115a63f38ccSAndrew Morton 	hlist_add_head_rcu(&chain->entry, hash_head);
31168eddac3fSPeter Zijlstra 	debug_atomic_inc(chain_lookup_misses);
3117b3b9c187SWaiman Long 	inc_chains(chain->irq_context);
31188eddac3fSPeter Zijlstra 
31198eddac3fSPeter Zijlstra 	return 1;
31208eddac3fSPeter Zijlstra }
31218eddac3fSPeter Zijlstra 
3122545c23f2SByungchul Park /*
3123a0b0fd53SBart Van Assche  * Look up a dependency chain. Must be called with either the graph lock or
3124a0b0fd53SBart Van Assche  * the RCU read lock held.
3125545c23f2SByungchul Park  */
3126545c23f2SByungchul Park static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
3127545c23f2SByungchul Park {
3128545c23f2SByungchul Park 	struct hlist_head *hash_head = chainhashentry(chain_key);
3129545c23f2SByungchul Park 	struct lock_chain *chain;
3130545c23f2SByungchul Park 
3131545c23f2SByungchul Park 	hlist_for_each_entry_rcu(chain, hash_head, entry) {
3132a0b0fd53SBart Van Assche 		if (READ_ONCE(chain->chain_key) == chain_key) {
3133545c23f2SByungchul Park 			debug_atomic_inc(chain_lookup_hits);
3134545c23f2SByungchul Park 			return chain;
3135545c23f2SByungchul Park 		}
3136545c23f2SByungchul Park 	}
3137545c23f2SByungchul Park 	return NULL;
3138545c23f2SByungchul Park }
3139545c23f2SByungchul Park 
3140545c23f2SByungchul Park /*
3141545c23f2SByungchul Park  * If the key is not present yet in dependency chain cache then
3142545c23f2SByungchul Park  * add it and return 1 - in this case the new dependency chain is
3143545c23f2SByungchul Park  * validated. If the key is already hashed, return 0.
3144545c23f2SByungchul Park  * (On return with 1 graph_lock is held.)
3145545c23f2SByungchul Park  */
3146545c23f2SByungchul Park static inline int lookup_chain_cache_add(struct task_struct *curr,
3147545c23f2SByungchul Park 					 struct held_lock *hlock,
3148545c23f2SByungchul Park 					 u64 chain_key)
3149545c23f2SByungchul Park {
3150545c23f2SByungchul Park 	struct lock_class *class = hlock_class(hlock);
3151545c23f2SByungchul Park 	struct lock_chain *chain = lookup_chain_cache(chain_key);
3152545c23f2SByungchul Park 
3153545c23f2SByungchul Park 	if (chain) {
3154545c23f2SByungchul Park cache_hit:
3155545c23f2SByungchul Park 		if (!check_no_collision(curr, hlock, chain))
3156545c23f2SByungchul Park 			return 0;
3157545c23f2SByungchul Park 
3158545c23f2SByungchul Park 		if (very_verbose(class)) {
3159545c23f2SByungchul Park 			printk("\nhash chain already cached, key: "
316004860d48SBorislav Petkov 					"%016Lx tail class: [%px] %s\n",
3161545c23f2SByungchul Park 					(unsigned long long)chain_key,
3162545c23f2SByungchul Park 					class->key, class->name);
3163545c23f2SByungchul Park 		}
3164545c23f2SByungchul Park 
3165545c23f2SByungchul Park 		return 0;
3166545c23f2SByungchul Park 	}
3167545c23f2SByungchul Park 
3168545c23f2SByungchul Park 	if (very_verbose(class)) {
316904860d48SBorislav Petkov 		printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
3170545c23f2SByungchul Park 			(unsigned long long)chain_key, class->key, class->name);
3171545c23f2SByungchul Park 	}
3172545c23f2SByungchul Park 
3173545c23f2SByungchul Park 	if (!graph_lock())
3174545c23f2SByungchul Park 		return 0;
3175545c23f2SByungchul Park 
3176545c23f2SByungchul Park 	/*
3177545c23f2SByungchul Park 	 * We have to walk the chain again locked - to avoid duplicates:
3178545c23f2SByungchul Park 	 */
3179545c23f2SByungchul Park 	chain = lookup_chain_cache(chain_key);
3180545c23f2SByungchul Park 	if (chain) {
3181545c23f2SByungchul Park 		graph_unlock();
3182545c23f2SByungchul Park 		goto cache_hit;
3183545c23f2SByungchul Park 	}
3184545c23f2SByungchul Park 
3185545c23f2SByungchul Park 	if (!add_chain_cache(curr, hlock, chain_key))
3186545c23f2SByungchul Park 		return 0;
3187545c23f2SByungchul Park 
3188545c23f2SByungchul Park 	return 1;
3189545c23f2SByungchul Park }
3190545c23f2SByungchul Park 
31910b9fc8ecSYuyang Du static int validate_chain(struct task_struct *curr,
31920b9fc8ecSYuyang Du 			  struct held_lock *hlock,
31930b9fc8ecSYuyang Du 			  int chain_head, u64 chain_key)
31948eddac3fSPeter Zijlstra {
31958eddac3fSPeter Zijlstra 	/*
31968eddac3fSPeter Zijlstra 	 * Trylock needs to maintain the stack of held locks, but it
31978eddac3fSPeter Zijlstra 	 * does not add new dependencies, because trylock can be done
31988eddac3fSPeter Zijlstra 	 * in any order.
31998eddac3fSPeter Zijlstra 	 *
32008eddac3fSPeter Zijlstra 	 * We look up the chain_key and do the O(N^2) check and update of
32018eddac3fSPeter Zijlstra 	 * the dependencies only if this is a new dependency chain.
3202545c23f2SByungchul Park 	 * (If lookup_chain_cache_add() return with 1 it acquires
32038eddac3fSPeter Zijlstra 	 * graph_lock for us)
32048eddac3fSPeter Zijlstra 	 */
3205fb9edbe9SOleg Nesterov 	if (!hlock->trylock && hlock->check &&
3206545c23f2SByungchul Park 	    lookup_chain_cache_add(curr, hlock, chain_key)) {
32078eddac3fSPeter Zijlstra 		/*
32088eddac3fSPeter Zijlstra 		 * Check whether last held lock:
32098eddac3fSPeter Zijlstra 		 *
32108eddac3fSPeter Zijlstra 		 * - is irq-safe, if this lock is irq-unsafe
32118eddac3fSPeter Zijlstra 		 * - is softirq-safe, if this lock is hardirq-unsafe
32128eddac3fSPeter Zijlstra 		 *
32138eddac3fSPeter Zijlstra 		 * And check whether the new lock's dependency graph
321431a490e5SYuyang Du 		 * could lead back to the previous lock:
32158eddac3fSPeter Zijlstra 		 *
321631a490e5SYuyang Du 		 * - within the current held-lock stack
321731a490e5SYuyang Du 		 * - across our accumulated lock dependency records
321831a490e5SYuyang Du 		 *
321931a490e5SYuyang Du 		 * any of these scenarios could lead to a deadlock.
322031a490e5SYuyang Du 		 */
322131a490e5SYuyang Du 		/*
322231a490e5SYuyang Du 		 * The simple case: does the current hold the same lock
322331a490e5SYuyang Du 		 * already?
32248eddac3fSPeter Zijlstra 		 */
32254609c4f9SYuyang Du 		int ret = check_deadlock(curr, hlock);
32268eddac3fSPeter Zijlstra 
32278eddac3fSPeter Zijlstra 		if (!ret)
32288eddac3fSPeter Zijlstra 			return 0;
32298eddac3fSPeter Zijlstra 		/*
32308eddac3fSPeter Zijlstra 		 * Mark recursive read, as we jump over it when
32318eddac3fSPeter Zijlstra 		 * building dependencies (just like we jump over
32328eddac3fSPeter Zijlstra 		 * trylock entries):
32338eddac3fSPeter Zijlstra 		 */
32348eddac3fSPeter Zijlstra 		if (ret == 2)
32358eddac3fSPeter Zijlstra 			hlock->read = 2;
32368eddac3fSPeter Zijlstra 		/*
32378eddac3fSPeter Zijlstra 		 * Add dependency only if this lock is not the head
32388eddac3fSPeter Zijlstra 		 * of the chain, and if it's not a secondary read-lock:
32398eddac3fSPeter Zijlstra 		 */
3240545c23f2SByungchul Park 		if (!chain_head && ret != 2) {
32418eddac3fSPeter Zijlstra 			if (!check_prevs_add(curr, hlock))
32428eddac3fSPeter Zijlstra 				return 0;
3243545c23f2SByungchul Park 		}
3244545c23f2SByungchul Park 
32458eddac3fSPeter Zijlstra 		graph_unlock();
3246545c23f2SByungchul Park 	} else {
3247545c23f2SByungchul Park 		/* after lookup_chain_cache_add(): */
32488eddac3fSPeter Zijlstra 		if (unlikely(!debug_locks))
32498eddac3fSPeter Zijlstra 			return 0;
3250545c23f2SByungchul Park 	}
32518eddac3fSPeter Zijlstra 
32528eddac3fSPeter Zijlstra 	return 1;
32538eddac3fSPeter Zijlstra }
32548eddac3fSPeter Zijlstra #else
32558eddac3fSPeter Zijlstra static inline int validate_chain(struct task_struct *curr,
32560b9fc8ecSYuyang Du 				 struct held_lock *hlock,
32578eddac3fSPeter Zijlstra 				 int chain_head, u64 chain_key)
32588eddac3fSPeter Zijlstra {
32598eddac3fSPeter Zijlstra 	return 1;
32608eddac3fSPeter Zijlstra }
3261810507feSWaiman Long 
3262810507feSWaiman Long static void init_chain_block_buckets(void)	{ }
3263e7a38f63SYuyang Du #endif /* CONFIG_PROVE_LOCKING */
32648eddac3fSPeter Zijlstra 
32658eddac3fSPeter Zijlstra /*
32668eddac3fSPeter Zijlstra  * We are building curr_chain_key incrementally, so double-check
32678eddac3fSPeter Zijlstra  * it from scratch, to make sure that it's done correctly:
32688eddac3fSPeter Zijlstra  */
32698eddac3fSPeter Zijlstra static void check_chain_key(struct task_struct *curr)
32708eddac3fSPeter Zijlstra {
32718eddac3fSPeter Zijlstra #ifdef CONFIG_DEBUG_LOCKDEP
32728eddac3fSPeter Zijlstra 	struct held_lock *hlock, *prev_hlock = NULL;
32735f18ab5cSAlfredo Alvarez Fernandez 	unsigned int i;
3274f6ec8829SYuyang Du 	u64 chain_key = INITIAL_CHAIN_KEY;
32758eddac3fSPeter Zijlstra 
32768eddac3fSPeter Zijlstra 	for (i = 0; i < curr->lockdep_depth; i++) {
32778eddac3fSPeter Zijlstra 		hlock = curr->held_locks + i;
32788eddac3fSPeter Zijlstra 		if (chain_key != hlock->prev_chain_key) {
32798eddac3fSPeter Zijlstra 			debug_locks_off();
32808eddac3fSPeter Zijlstra 			/*
32818eddac3fSPeter Zijlstra 			 * We got mighty confused, our chain keys don't match
32828eddac3fSPeter Zijlstra 			 * with what we expect, someone trample on our task state?
32838eddac3fSPeter Zijlstra 			 */
32848eddac3fSPeter Zijlstra 			WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
32858eddac3fSPeter Zijlstra 				curr->lockdep_depth, i,
32868eddac3fSPeter Zijlstra 				(unsigned long long)chain_key,
32878eddac3fSPeter Zijlstra 				(unsigned long long)hlock->prev_chain_key);
32888eddac3fSPeter Zijlstra 			return;
32898eddac3fSPeter Zijlstra 		}
329001bb6f0aSYuyang Du 
32918eddac3fSPeter Zijlstra 		/*
329201bb6f0aSYuyang Du 		 * hlock->class_idx can't go beyond MAX_LOCKDEP_KEYS, but is
329301bb6f0aSYuyang Du 		 * it registered lock class index?
32948eddac3fSPeter Zijlstra 		 */
329501bb6f0aSYuyang Du 		if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use)))
32968eddac3fSPeter Zijlstra 			return;
32978eddac3fSPeter Zijlstra 
32988eddac3fSPeter Zijlstra 		if (prev_hlock && (prev_hlock->irq_context !=
32998eddac3fSPeter Zijlstra 							hlock->irq_context))
3300f6ec8829SYuyang Du 			chain_key = INITIAL_CHAIN_KEY;
33015f18ab5cSAlfredo Alvarez Fernandez 		chain_key = iterate_chain_key(chain_key, hlock->class_idx);
33028eddac3fSPeter Zijlstra 		prev_hlock = hlock;
33038eddac3fSPeter Zijlstra 	}
33048eddac3fSPeter Zijlstra 	if (chain_key != curr->curr_chain_key) {
33058eddac3fSPeter Zijlstra 		debug_locks_off();
33068eddac3fSPeter Zijlstra 		/*
33078eddac3fSPeter Zijlstra 		 * More smoking hash instead of calculating it, damn see these
33088eddac3fSPeter Zijlstra 		 * numbers float.. I bet that a pink elephant stepped on my memory.
33098eddac3fSPeter Zijlstra 		 */
33108eddac3fSPeter Zijlstra 		WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
33118eddac3fSPeter Zijlstra 			curr->lockdep_depth, i,
33128eddac3fSPeter Zijlstra 			(unsigned long long)chain_key,
33138eddac3fSPeter Zijlstra 			(unsigned long long)curr->curr_chain_key);
33148eddac3fSPeter Zijlstra 	}
33158eddac3fSPeter Zijlstra #endif
33168eddac3fSPeter Zijlstra }
33178eddac3fSPeter Zijlstra 
331830a35f79SArnd Bergmann #ifdef CONFIG_PROVE_LOCKING
33190d2cc3b3SFrederic Weisbecker static int mark_lock(struct task_struct *curr, struct held_lock *this,
33200d2cc3b3SFrederic Weisbecker 		     enum lock_usage_bit new_bit);
33210d2cc3b3SFrederic Weisbecker 
3322f7c1c6b3SYuyang Du static void print_usage_bug_scenario(struct held_lock *lock)
33238eddac3fSPeter Zijlstra {
33248eddac3fSPeter Zijlstra 	struct lock_class *class = hlock_class(lock);
33258eddac3fSPeter Zijlstra 
33268eddac3fSPeter Zijlstra 	printk(" Possible unsafe locking scenario:\n\n");
33278eddac3fSPeter Zijlstra 	printk("       CPU0\n");
33288eddac3fSPeter Zijlstra 	printk("       ----\n");
33298eddac3fSPeter Zijlstra 	printk("  lock(");
33308eddac3fSPeter Zijlstra 	__print_lock_name(class);
3331f943fe0fSDmitry Vyukov 	printk(KERN_CONT ");\n");
33328eddac3fSPeter Zijlstra 	printk("  <Interrupt>\n");
33338eddac3fSPeter Zijlstra 	printk("    lock(");
33348eddac3fSPeter Zijlstra 	__print_lock_name(class);
3335f943fe0fSDmitry Vyukov 	printk(KERN_CONT ");\n");
33368eddac3fSPeter Zijlstra 	printk("\n *** DEADLOCK ***\n\n");
33378eddac3fSPeter Zijlstra }
33388eddac3fSPeter Zijlstra 
3339f7c1c6b3SYuyang Du static void
33408eddac3fSPeter Zijlstra print_usage_bug(struct task_struct *curr, struct held_lock *this,
33418eddac3fSPeter Zijlstra 		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
33428eddac3fSPeter Zijlstra {
33438eddac3fSPeter Zijlstra 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
3344f7c1c6b3SYuyang Du 		return;
33458eddac3fSPeter Zijlstra 
3346681fbec8SPaul E. McKenney 	pr_warn("\n");
3347a5dd63efSPaul E. McKenney 	pr_warn("================================\n");
3348a5dd63efSPaul E. McKenney 	pr_warn("WARNING: inconsistent lock state\n");
33498eddac3fSPeter Zijlstra 	print_kernel_ident();
3350a5dd63efSPaul E. McKenney 	pr_warn("--------------------------------\n");
33518eddac3fSPeter Zijlstra 
3352681fbec8SPaul E. McKenney 	pr_warn("inconsistent {%s} -> {%s} usage.\n",
33538eddac3fSPeter Zijlstra 		usage_str[prev_bit], usage_str[new_bit]);
33548eddac3fSPeter Zijlstra 
3355681fbec8SPaul E. McKenney 	pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
33568eddac3fSPeter Zijlstra 		curr->comm, task_pid_nr(curr),
3357f9ad4a5fSPeter Zijlstra 		lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
3358ef996916SPeter Zijlstra 		lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
3359f9ad4a5fSPeter Zijlstra 		lockdep_hardirqs_enabled(),
3360ef996916SPeter Zijlstra 		lockdep_softirqs_enabled(curr));
33618eddac3fSPeter Zijlstra 	print_lock(this);
33628eddac3fSPeter Zijlstra 
3363681fbec8SPaul E. McKenney 	pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
336412593b74SBart Van Assche 	print_lock_trace(hlock_class(this)->usage_traces[prev_bit], 1);
33658eddac3fSPeter Zijlstra 
33668eddac3fSPeter Zijlstra 	print_irqtrace_events(curr);
3367681fbec8SPaul E. McKenney 	pr_warn("\nother info that might help us debug this:\n");
33688eddac3fSPeter Zijlstra 	print_usage_bug_scenario(this);
33698eddac3fSPeter Zijlstra 
33708eddac3fSPeter Zijlstra 	lockdep_print_held_locks(curr);
33718eddac3fSPeter Zijlstra 
3372681fbec8SPaul E. McKenney 	pr_warn("\nstack backtrace:\n");
33738eddac3fSPeter Zijlstra 	dump_stack();
33748eddac3fSPeter Zijlstra }
33758eddac3fSPeter Zijlstra 
33768eddac3fSPeter Zijlstra /*
33778eddac3fSPeter Zijlstra  * Print out an error if an invalid bit is set:
33788eddac3fSPeter Zijlstra  */
33798eddac3fSPeter Zijlstra static inline int
33808eddac3fSPeter Zijlstra valid_state(struct task_struct *curr, struct held_lock *this,
33818eddac3fSPeter Zijlstra 	    enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
33828eddac3fSPeter Zijlstra {
3383f7c1c6b3SYuyang Du 	if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
3384f7c1c6b3SYuyang Du 		print_usage_bug(curr, this, bad_bit, new_bit);
3385f7c1c6b3SYuyang Du 		return 0;
3386f7c1c6b3SYuyang Du 	}
33878eddac3fSPeter Zijlstra 	return 1;
33888eddac3fSPeter Zijlstra }
33898eddac3fSPeter Zijlstra 
33908eddac3fSPeter Zijlstra 
33918eddac3fSPeter Zijlstra /*
33928eddac3fSPeter Zijlstra  * print irq inversion bug:
33938eddac3fSPeter Zijlstra  */
3394f7c1c6b3SYuyang Du static void
33958eddac3fSPeter Zijlstra print_irq_inversion_bug(struct task_struct *curr,
33968eddac3fSPeter Zijlstra 			struct lock_list *root, struct lock_list *other,
33978eddac3fSPeter Zijlstra 			struct held_lock *this, int forwards,
33988eddac3fSPeter Zijlstra 			const char *irqclass)
33998eddac3fSPeter Zijlstra {
34008eddac3fSPeter Zijlstra 	struct lock_list *entry = other;
34018eddac3fSPeter Zijlstra 	struct lock_list *middle = NULL;
34028eddac3fSPeter Zijlstra 	int depth;
34038eddac3fSPeter Zijlstra 
34048eddac3fSPeter Zijlstra 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
3405f7c1c6b3SYuyang Du 		return;
34068eddac3fSPeter Zijlstra 
3407681fbec8SPaul E. McKenney 	pr_warn("\n");
3408a5dd63efSPaul E. McKenney 	pr_warn("========================================================\n");
3409a5dd63efSPaul E. McKenney 	pr_warn("WARNING: possible irq lock inversion dependency detected\n");
34108eddac3fSPeter Zijlstra 	print_kernel_ident();
3411a5dd63efSPaul E. McKenney 	pr_warn("--------------------------------------------------------\n");
3412681fbec8SPaul E. McKenney 	pr_warn("%s/%d just changed the state of lock:\n",
34138eddac3fSPeter Zijlstra 		curr->comm, task_pid_nr(curr));
34148eddac3fSPeter Zijlstra 	print_lock(this);
34158eddac3fSPeter Zijlstra 	if (forwards)
3416681fbec8SPaul E. McKenney 		pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
34178eddac3fSPeter Zijlstra 	else
3418681fbec8SPaul E. McKenney 		pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
34198eddac3fSPeter Zijlstra 	print_lock_name(other->class);
3420681fbec8SPaul E. McKenney 	pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
34218eddac3fSPeter Zijlstra 
3422681fbec8SPaul E. McKenney 	pr_warn("\nother info that might help us debug this:\n");
34238eddac3fSPeter Zijlstra 
34248eddac3fSPeter Zijlstra 	/* Find a middle lock (if one exists) */
34258eddac3fSPeter Zijlstra 	depth = get_lock_depth(other);
34268eddac3fSPeter Zijlstra 	do {
34278eddac3fSPeter Zijlstra 		if (depth == 0 && (entry != root)) {
3428681fbec8SPaul E. McKenney 			pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
34298eddac3fSPeter Zijlstra 			break;
34308eddac3fSPeter Zijlstra 		}
34318eddac3fSPeter Zijlstra 		middle = entry;
34328eddac3fSPeter Zijlstra 		entry = get_lock_parent(entry);
34338eddac3fSPeter Zijlstra 		depth--;
34348eddac3fSPeter Zijlstra 	} while (entry && entry != root && (depth >= 0));
34358eddac3fSPeter Zijlstra 	if (forwards)
34368eddac3fSPeter Zijlstra 		print_irq_lock_scenario(root, other,
34378eddac3fSPeter Zijlstra 			middle ? middle->class : root->class, other->class);
34388eddac3fSPeter Zijlstra 	else
34398eddac3fSPeter Zijlstra 		print_irq_lock_scenario(other, root,
34408eddac3fSPeter Zijlstra 			middle ? middle->class : other->class, root->class);
34418eddac3fSPeter Zijlstra 
34428eddac3fSPeter Zijlstra 	lockdep_print_held_locks(curr);
34438eddac3fSPeter Zijlstra 
3444681fbec8SPaul E. McKenney 	pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
344512593b74SBart Van Assche 	root->trace = save_trace();
344612593b74SBart Van Assche 	if (!root->trace)
3447f7c1c6b3SYuyang Du 		return;
34488eddac3fSPeter Zijlstra 	print_shortest_lock_dependencies(other, root);
34498eddac3fSPeter Zijlstra 
3450681fbec8SPaul E. McKenney 	pr_warn("\nstack backtrace:\n");
34518eddac3fSPeter Zijlstra 	dump_stack();
34528eddac3fSPeter Zijlstra }
34538eddac3fSPeter Zijlstra 
34548eddac3fSPeter Zijlstra /*
34558eddac3fSPeter Zijlstra  * Prove that in the forwards-direction subgraph starting at <this>
34568eddac3fSPeter Zijlstra  * there is no lock matching <mask>:
34578eddac3fSPeter Zijlstra  */
34588eddac3fSPeter Zijlstra static int
34598eddac3fSPeter Zijlstra check_usage_forwards(struct task_struct *curr, struct held_lock *this,
34608eddac3fSPeter Zijlstra 		     enum lock_usage_bit bit, const char *irqclass)
34618eddac3fSPeter Zijlstra {
3462*b11be024SBoqun Feng 	enum bfs_result ret;
34638eddac3fSPeter Zijlstra 	struct lock_list root;
34643f649ab7SKees Cook 	struct lock_list *target_entry;
34658eddac3fSPeter Zijlstra 
34668eddac3fSPeter Zijlstra 	root.parent = NULL;
34678eddac3fSPeter Zijlstra 	root.class = hlock_class(this);
3468627f364dSFrederic Weisbecker 	ret = find_usage_forwards(&root, lock_flag(bit), &target_entry);
3469*b11be024SBoqun Feng 	if (bfs_error(ret)) {
3470f7c1c6b3SYuyang Du 		print_bfs_bug(ret);
3471f7c1c6b3SYuyang Du 		return 0;
3472f7c1c6b3SYuyang Du 	}
3473*b11be024SBoqun Feng 	if (ret == BFS_RNOMATCH)
3474*b11be024SBoqun Feng 		return 1;
34758eddac3fSPeter Zijlstra 
3476f7c1c6b3SYuyang Du 	print_irq_inversion_bug(curr, &root, target_entry,
34778eddac3fSPeter Zijlstra 				this, 1, irqclass);
3478f7c1c6b3SYuyang Du 	return 0;
34798eddac3fSPeter Zijlstra }
34808eddac3fSPeter Zijlstra 
34818eddac3fSPeter Zijlstra /*
34828eddac3fSPeter Zijlstra  * Prove that in the backwards-direction subgraph starting at <this>
34838eddac3fSPeter Zijlstra  * there is no lock matching <mask>:
34848eddac3fSPeter Zijlstra  */
34858eddac3fSPeter Zijlstra static int
34868eddac3fSPeter Zijlstra check_usage_backwards(struct task_struct *curr, struct held_lock *this,
34878eddac3fSPeter Zijlstra 		      enum lock_usage_bit bit, const char *irqclass)
34888eddac3fSPeter Zijlstra {
3489*b11be024SBoqun Feng 	enum bfs_result ret;
34908eddac3fSPeter Zijlstra 	struct lock_list root;
34913f649ab7SKees Cook 	struct lock_list *target_entry;
34928eddac3fSPeter Zijlstra 
34938eddac3fSPeter Zijlstra 	root.parent = NULL;
34948eddac3fSPeter Zijlstra 	root.class = hlock_class(this);
3495627f364dSFrederic Weisbecker 	ret = find_usage_backwards(&root, lock_flag(bit), &target_entry);
3496*b11be024SBoqun Feng 	if (bfs_error(ret)) {
3497f7c1c6b3SYuyang Du 		print_bfs_bug(ret);
3498f7c1c6b3SYuyang Du 		return 0;
3499f7c1c6b3SYuyang Du 	}
3500*b11be024SBoqun Feng 	if (ret == BFS_RNOMATCH)
3501*b11be024SBoqun Feng 		return 1;
35028eddac3fSPeter Zijlstra 
3503f7c1c6b3SYuyang Du 	print_irq_inversion_bug(curr, &root, target_entry,
35048eddac3fSPeter Zijlstra 				this, 0, irqclass);
3505f7c1c6b3SYuyang Du 	return 0;
35068eddac3fSPeter Zijlstra }
35078eddac3fSPeter Zijlstra 
35088eddac3fSPeter Zijlstra void print_irqtrace_events(struct task_struct *curr)
35098eddac3fSPeter Zijlstra {
35100584df9cSMarco Elver 	const struct irqtrace_events *trace = &curr->irqtrace;
35110584df9cSMarco Elver 
35120584df9cSMarco Elver 	printk("irq event stamp: %u\n", trace->irq_events);
351304860d48SBorislav Petkov 	printk("hardirqs last  enabled at (%u): [<%px>] %pS\n",
35140584df9cSMarco Elver 		trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
35150584df9cSMarco Elver 		(void *)trace->hardirq_enable_ip);
351604860d48SBorislav Petkov 	printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
35170584df9cSMarco Elver 		trace->hardirq_disable_event, (void *)trace->hardirq_disable_ip,
35180584df9cSMarco Elver 		(void *)trace->hardirq_disable_ip);
351904860d48SBorislav Petkov 	printk("softirqs last  enabled at (%u): [<%px>] %pS\n",
35200584df9cSMarco Elver 		trace->softirq_enable_event, (void *)trace->softirq_enable_ip,
35210584df9cSMarco Elver 		(void *)trace->softirq_enable_ip);
352204860d48SBorislav Petkov 	printk("softirqs last disabled at (%u): [<%px>] %pS\n",
35230584df9cSMarco Elver 		trace->softirq_disable_event, (void *)trace->softirq_disable_ip,
35240584df9cSMarco Elver 		(void *)trace->softirq_disable_ip);
35258eddac3fSPeter Zijlstra }
35268eddac3fSPeter Zijlstra 
35278eddac3fSPeter Zijlstra static int HARDIRQ_verbose(struct lock_class *class)
35288eddac3fSPeter Zijlstra {
35298eddac3fSPeter Zijlstra #if HARDIRQ_VERBOSE
35308eddac3fSPeter Zijlstra 	return class_filter(class);
35318eddac3fSPeter Zijlstra #endif
35328eddac3fSPeter Zijlstra 	return 0;
35338eddac3fSPeter Zijlstra }
35348eddac3fSPeter Zijlstra 
35358eddac3fSPeter Zijlstra static int SOFTIRQ_verbose(struct lock_class *class)
35368eddac3fSPeter Zijlstra {
35378eddac3fSPeter Zijlstra #if SOFTIRQ_VERBOSE
35388eddac3fSPeter Zijlstra 	return class_filter(class);
35398eddac3fSPeter Zijlstra #endif
35408eddac3fSPeter Zijlstra 	return 0;
35418eddac3fSPeter Zijlstra }
35428eddac3fSPeter Zijlstra 
35438eddac3fSPeter Zijlstra #define STRICT_READ_CHECKS	1
35448eddac3fSPeter Zijlstra 
35458eddac3fSPeter Zijlstra static int (*state_verbose_f[])(struct lock_class *class) = {
35468eddac3fSPeter Zijlstra #define LOCKDEP_STATE(__STATE) \
35478eddac3fSPeter Zijlstra 	__STATE##_verbose,
35488eddac3fSPeter Zijlstra #include "lockdep_states.h"
35498eddac3fSPeter Zijlstra #undef LOCKDEP_STATE
35508eddac3fSPeter Zijlstra };
35518eddac3fSPeter Zijlstra 
35528eddac3fSPeter Zijlstra static inline int state_verbose(enum lock_usage_bit bit,
35538eddac3fSPeter Zijlstra 				struct lock_class *class)
35548eddac3fSPeter Zijlstra {
3555c902a1e8SFrederic Weisbecker 	return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class);
35568eddac3fSPeter Zijlstra }
35578eddac3fSPeter Zijlstra 
35588eddac3fSPeter Zijlstra typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
35598eddac3fSPeter Zijlstra 			     enum lock_usage_bit bit, const char *name);
35608eddac3fSPeter Zijlstra 
35618eddac3fSPeter Zijlstra static int
35628eddac3fSPeter Zijlstra mark_lock_irq(struct task_struct *curr, struct held_lock *this,
35638eddac3fSPeter Zijlstra 		enum lock_usage_bit new_bit)
35648eddac3fSPeter Zijlstra {
35658eddac3fSPeter Zijlstra 	int excl_bit = exclusive_bit(new_bit);
3566bba2a8f1SFrederic Weisbecker 	int read = new_bit & LOCK_USAGE_READ_MASK;
3567bba2a8f1SFrederic Weisbecker 	int dir = new_bit & LOCK_USAGE_DIR_MASK;
35688eddac3fSPeter Zijlstra 
35698eddac3fSPeter Zijlstra 	/*
35708eddac3fSPeter Zijlstra 	 * mark USED_IN has to look forwards -- to ensure no dependency
35718eddac3fSPeter Zijlstra 	 * has ENABLED state, which would allow recursion deadlocks.
35728eddac3fSPeter Zijlstra 	 *
35738eddac3fSPeter Zijlstra 	 * mark ENABLED has to look backwards -- to ensure no dependee
35748eddac3fSPeter Zijlstra 	 * has USED_IN state, which, again, would allow  recursion deadlocks.
35758eddac3fSPeter Zijlstra 	 */
35768eddac3fSPeter Zijlstra 	check_usage_f usage = dir ?
35778eddac3fSPeter Zijlstra 		check_usage_backwards : check_usage_forwards;
35788eddac3fSPeter Zijlstra 
35798eddac3fSPeter Zijlstra 	/*
35808eddac3fSPeter Zijlstra 	 * Validate that this particular lock does not have conflicting
35818eddac3fSPeter Zijlstra 	 * usage states.
35828eddac3fSPeter Zijlstra 	 */
35838eddac3fSPeter Zijlstra 	if (!valid_state(curr, this, new_bit, excl_bit))
35848eddac3fSPeter Zijlstra 		return 0;
35858eddac3fSPeter Zijlstra 
35868eddac3fSPeter Zijlstra 	/*
35878eddac3fSPeter Zijlstra 	 * Validate that the lock dependencies don't have conflicting usage
35888eddac3fSPeter Zijlstra 	 * states.
35898eddac3fSPeter Zijlstra 	 */
3590bf998b98SYuyang Du 	if ((!read || STRICT_READ_CHECKS) &&
3591bba2a8f1SFrederic Weisbecker 			!usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
35928eddac3fSPeter Zijlstra 		return 0;
35938eddac3fSPeter Zijlstra 
35948eddac3fSPeter Zijlstra 	/*
35958eddac3fSPeter Zijlstra 	 * Check for read in write conflicts
35968eddac3fSPeter Zijlstra 	 */
35978eddac3fSPeter Zijlstra 	if (!read) {
3598bba2a8f1SFrederic Weisbecker 		if (!valid_state(curr, this, new_bit, excl_bit + LOCK_USAGE_READ_MASK))
35998eddac3fSPeter Zijlstra 			return 0;
36008eddac3fSPeter Zijlstra 
36018eddac3fSPeter Zijlstra 		if (STRICT_READ_CHECKS &&
3602bba2a8f1SFrederic Weisbecker 			!usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK,
3603bba2a8f1SFrederic Weisbecker 				state_name(new_bit + LOCK_USAGE_READ_MASK)))
36048eddac3fSPeter Zijlstra 			return 0;
36058eddac3fSPeter Zijlstra 	}
36068eddac3fSPeter Zijlstra 
36078eddac3fSPeter Zijlstra 	if (state_verbose(new_bit, hlock_class(this)))
36088eddac3fSPeter Zijlstra 		return 2;
36098eddac3fSPeter Zijlstra 
36108eddac3fSPeter Zijlstra 	return 1;
36118eddac3fSPeter Zijlstra }
36128eddac3fSPeter Zijlstra 
36138eddac3fSPeter Zijlstra /*
36148eddac3fSPeter Zijlstra  * Mark all held locks with a usage bit:
36158eddac3fSPeter Zijlstra  */
36168eddac3fSPeter Zijlstra static int
3617436a49aeSFrederic Weisbecker mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
36188eddac3fSPeter Zijlstra {
36198eddac3fSPeter Zijlstra 	struct held_lock *hlock;
36208eddac3fSPeter Zijlstra 	int i;
36218eddac3fSPeter Zijlstra 
36228eddac3fSPeter Zijlstra 	for (i = 0; i < curr->lockdep_depth; i++) {
3623436a49aeSFrederic Weisbecker 		enum lock_usage_bit hlock_bit = base_bit;
36248eddac3fSPeter Zijlstra 		hlock = curr->held_locks + i;
36258eddac3fSPeter Zijlstra 
36268eddac3fSPeter Zijlstra 		if (hlock->read)
3627bba2a8f1SFrederic Weisbecker 			hlock_bit += LOCK_USAGE_READ_MASK;
36288eddac3fSPeter Zijlstra 
3629436a49aeSFrederic Weisbecker 		BUG_ON(hlock_bit >= LOCK_USAGE_STATES);
36308eddac3fSPeter Zijlstra 
363134d0ed5eSOleg Nesterov 		if (!hlock->check)
36328eddac3fSPeter Zijlstra 			continue;
36338eddac3fSPeter Zijlstra 
3634436a49aeSFrederic Weisbecker 		if (!mark_lock(curr, hlock, hlock_bit))
36358eddac3fSPeter Zijlstra 			return 0;
36368eddac3fSPeter Zijlstra 	}
36378eddac3fSPeter Zijlstra 
36388eddac3fSPeter Zijlstra 	return 1;
36398eddac3fSPeter Zijlstra }
36408eddac3fSPeter Zijlstra 
36418eddac3fSPeter Zijlstra /*
36428eddac3fSPeter Zijlstra  * Hardirqs will be enabled:
36438eddac3fSPeter Zijlstra  */
3644c86e9b98SPeter Zijlstra static void __trace_hardirqs_on_caller(void)
36458eddac3fSPeter Zijlstra {
36468eddac3fSPeter Zijlstra 	struct task_struct *curr = current;
36478eddac3fSPeter Zijlstra 
36488eddac3fSPeter Zijlstra 	/*
36498eddac3fSPeter Zijlstra 	 * We are going to turn hardirqs on, so set the
36508eddac3fSPeter Zijlstra 	 * usage bit for all held locks:
36518eddac3fSPeter Zijlstra 	 */
3652436a49aeSFrederic Weisbecker 	if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ))
36538eddac3fSPeter Zijlstra 		return;
36548eddac3fSPeter Zijlstra 	/*
36558eddac3fSPeter Zijlstra 	 * If we have softirqs enabled, then set the usage
36568eddac3fSPeter Zijlstra 	 * bit for all held locks. (disabled hardirqs prevented
36578eddac3fSPeter Zijlstra 	 * this bit from being set before)
36588eddac3fSPeter Zijlstra 	 */
36598eddac3fSPeter Zijlstra 	if (curr->softirqs_enabled)
3660c86e9b98SPeter Zijlstra 		mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
36618eddac3fSPeter Zijlstra }
36628eddac3fSPeter Zijlstra 
3663c86e9b98SPeter Zijlstra /**
3664c86e9b98SPeter Zijlstra  * lockdep_hardirqs_on_prepare - Prepare for enabling interrupts
3665c86e9b98SPeter Zijlstra  * @ip:		Caller address
3666c86e9b98SPeter Zijlstra  *
3667c86e9b98SPeter Zijlstra  * Invoked before a possible transition to RCU idle from exit to user or
3668c86e9b98SPeter Zijlstra  * guest mode. This ensures that all RCU operations are done before RCU
3669c86e9b98SPeter Zijlstra  * stops watching. After the RCU transition lockdep_hardirqs_on() has to be
3670c86e9b98SPeter Zijlstra  * invoked to set the final state.
3671c86e9b98SPeter Zijlstra  */
3672c86e9b98SPeter Zijlstra void lockdep_hardirqs_on_prepare(unsigned long ip)
36738eddac3fSPeter Zijlstra {
3674859d069eSPeter Zijlstra 	if (unlikely(!debug_locks))
36758eddac3fSPeter Zijlstra 		return;
36768eddac3fSPeter Zijlstra 
3677859d069eSPeter Zijlstra 	/*
3678859d069eSPeter Zijlstra 	 * NMIs do not (and cannot) track lock dependencies, nothing to do.
3679859d069eSPeter Zijlstra 	 */
3680859d069eSPeter Zijlstra 	if (unlikely(in_nmi()))
3681859d069eSPeter Zijlstra 		return;
3682859d069eSPeter Zijlstra 
3683859d069eSPeter Zijlstra 	if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
36848eddac3fSPeter Zijlstra 		return;
36858eddac3fSPeter Zijlstra 
3686f9ad4a5fSPeter Zijlstra 	if (unlikely(lockdep_hardirqs_enabled())) {
36878eddac3fSPeter Zijlstra 		/*
36888eddac3fSPeter Zijlstra 		 * Neither irq nor preemption are disabled here
36898eddac3fSPeter Zijlstra 		 * so this is racy by nature but losing one hit
36908eddac3fSPeter Zijlstra 		 * in a stat is not a big deal.
36918eddac3fSPeter Zijlstra 		 */
36928eddac3fSPeter Zijlstra 		__debug_atomic_inc(redundant_hardirqs_on);
36938eddac3fSPeter Zijlstra 		return;
36948eddac3fSPeter Zijlstra 	}
36958eddac3fSPeter Zijlstra 
36968eddac3fSPeter Zijlstra 	/*
36978eddac3fSPeter Zijlstra 	 * We're enabling irqs and according to our state above irqs weren't
36988eddac3fSPeter Zijlstra 	 * already enabled, yet we find the hardware thinks they are in fact
36998eddac3fSPeter Zijlstra 	 * enabled.. someone messed up their IRQ state tracing.
37008eddac3fSPeter Zijlstra 	 */
37018eddac3fSPeter Zijlstra 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
37028eddac3fSPeter Zijlstra 		return;
37038eddac3fSPeter Zijlstra 
37048eddac3fSPeter Zijlstra 	/*
37058eddac3fSPeter Zijlstra 	 * See the fine text that goes along with this variable definition.
37068eddac3fSPeter Zijlstra 	 */
3707d671002bSzhengbin 	if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
37088eddac3fSPeter Zijlstra 		return;
37098eddac3fSPeter Zijlstra 
37108eddac3fSPeter Zijlstra 	/*
37118eddac3fSPeter Zijlstra 	 * Can't allow enabling interrupts while in an interrupt handler,
37128eddac3fSPeter Zijlstra 	 * that's general bad form and such. Recursion, limited stack etc..
37138eddac3fSPeter Zijlstra 	 */
3714f9ad4a5fSPeter Zijlstra 	if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context()))
37158eddac3fSPeter Zijlstra 		return;
37168eddac3fSPeter Zijlstra 
3717c86e9b98SPeter Zijlstra 	current->hardirq_chain_key = current->curr_chain_key;
3718c86e9b98SPeter Zijlstra 
371910476e63SPeter Zijlstra 	current->lockdep_recursion++;
3720c86e9b98SPeter Zijlstra 	__trace_hardirqs_on_caller();
372110476e63SPeter Zijlstra 	lockdep_recursion_finish();
37228eddac3fSPeter Zijlstra }
3723c86e9b98SPeter Zijlstra EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
3724c86e9b98SPeter Zijlstra 
3725c86e9b98SPeter Zijlstra void noinstr lockdep_hardirqs_on(unsigned long ip)
3726c86e9b98SPeter Zijlstra {
37270584df9cSMarco Elver 	struct irqtrace_events *trace = &current->irqtrace;
3728c86e9b98SPeter Zijlstra 
3729859d069eSPeter Zijlstra 	if (unlikely(!debug_locks))
3730c86e9b98SPeter Zijlstra 		return;
3731c86e9b98SPeter Zijlstra 
3732859d069eSPeter Zijlstra 	/*
3733859d069eSPeter Zijlstra 	 * NMIs can happen in the middle of local_irq_{en,dis}able() where the
3734859d069eSPeter Zijlstra 	 * tracking state and hardware state are out of sync.
3735859d069eSPeter Zijlstra 	 *
3736859d069eSPeter Zijlstra 	 * NMIs must save lockdep_hardirqs_enabled() to restore IRQ state from,
3737859d069eSPeter Zijlstra 	 * and not rely on hardware state like normal interrupts.
3738859d069eSPeter Zijlstra 	 */
3739859d069eSPeter Zijlstra 	if (unlikely(in_nmi())) {
3740ed004953Speterz@infradead.org 		if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
3741ed004953Speterz@infradead.org 			return;
3742ed004953Speterz@infradead.org 
3743859d069eSPeter Zijlstra 		/*
3744859d069eSPeter Zijlstra 		 * Skip:
3745859d069eSPeter Zijlstra 		 *  - recursion check, because NMI can hit lockdep;
3746859d069eSPeter Zijlstra 		 *  - hardware state check, because above;
3747859d069eSPeter Zijlstra 		 *  - chain_key check, see lockdep_hardirqs_on_prepare().
3748859d069eSPeter Zijlstra 		 */
3749859d069eSPeter Zijlstra 		goto skip_checks;
3750859d069eSPeter Zijlstra 	}
3751859d069eSPeter Zijlstra 
3752859d069eSPeter Zijlstra 	if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
3753c86e9b98SPeter Zijlstra 		return;
3754c86e9b98SPeter Zijlstra 
3755f9ad4a5fSPeter Zijlstra 	if (lockdep_hardirqs_enabled()) {
3756c86e9b98SPeter Zijlstra 		/*
3757c86e9b98SPeter Zijlstra 		 * Neither irq nor preemption are disabled here
3758c86e9b98SPeter Zijlstra 		 * so this is racy by nature but losing one hit
3759c86e9b98SPeter Zijlstra 		 * in a stat is not a big deal.
3760c86e9b98SPeter Zijlstra 		 */
3761c86e9b98SPeter Zijlstra 		__debug_atomic_inc(redundant_hardirqs_on);
3762c86e9b98SPeter Zijlstra 		return;
3763c86e9b98SPeter Zijlstra 	}
3764c86e9b98SPeter Zijlstra 
3765c86e9b98SPeter Zijlstra 	/*
3766c86e9b98SPeter Zijlstra 	 * We're enabling irqs and according to our state above irqs weren't
3767c86e9b98SPeter Zijlstra 	 * already enabled, yet we find the hardware thinks they are in fact
3768c86e9b98SPeter Zijlstra 	 * enabled.. someone messed up their IRQ state tracing.
3769c86e9b98SPeter Zijlstra 	 */
3770c86e9b98SPeter Zijlstra 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3771c86e9b98SPeter Zijlstra 		return;
3772c86e9b98SPeter Zijlstra 
3773c86e9b98SPeter Zijlstra 	/*
3774c86e9b98SPeter Zijlstra 	 * Ensure the lock stack remained unchanged between
3775c86e9b98SPeter Zijlstra 	 * lockdep_hardirqs_on_prepare() and lockdep_hardirqs_on().
3776c86e9b98SPeter Zijlstra 	 */
3777c86e9b98SPeter Zijlstra 	DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
3778c86e9b98SPeter Zijlstra 			    current->curr_chain_key);
3779c86e9b98SPeter Zijlstra 
3780859d069eSPeter Zijlstra skip_checks:
3781c86e9b98SPeter Zijlstra 	/* we'll do an OFF -> ON transition: */
3782fddf9055SPeter Zijlstra 	__this_cpu_write(hardirqs_enabled, 1);
37830584df9cSMarco Elver 	trace->hardirq_enable_ip = ip;
37840584df9cSMarco Elver 	trace->hardirq_enable_event = ++trace->irq_events;
3785c86e9b98SPeter Zijlstra 	debug_atomic_inc(hardirqs_on_events);
3786c86e9b98SPeter Zijlstra }
3787c86e9b98SPeter Zijlstra EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
37888eddac3fSPeter Zijlstra 
37898eddac3fSPeter Zijlstra /*
37908eddac3fSPeter Zijlstra  * Hardirqs were disabled:
37918eddac3fSPeter Zijlstra  */
3792c86e9b98SPeter Zijlstra void noinstr lockdep_hardirqs_off(unsigned long ip)
37938eddac3fSPeter Zijlstra {
3794859d069eSPeter Zijlstra 	if (unlikely(!debug_locks))
3795859d069eSPeter Zijlstra 		return;
37968eddac3fSPeter Zijlstra 
3797859d069eSPeter Zijlstra 	/*
3798859d069eSPeter Zijlstra 	 * Matching lockdep_hardirqs_on(), allow NMIs in the middle of lockdep;
3799859d069eSPeter Zijlstra 	 * they will restore the software state. This ensures the software
3800859d069eSPeter Zijlstra 	 * state is consistent inside NMIs as well.
3801859d069eSPeter Zijlstra 	 */
3802ed004953Speterz@infradead.org 	if (in_nmi()) {
3803ed004953Speterz@infradead.org 		if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
3804ed004953Speterz@infradead.org 			return;
3805ed004953Speterz@infradead.org 	} else if (current->lockdep_recursion & LOCKDEP_RECURSION_MASK)
38068eddac3fSPeter Zijlstra 		return;
38078eddac3fSPeter Zijlstra 
38088eddac3fSPeter Zijlstra 	/*
38098eddac3fSPeter Zijlstra 	 * So we're supposed to get called after you mask local IRQs, but for
38108eddac3fSPeter Zijlstra 	 * some reason the hardware doesn't quite think you did a proper job.
38118eddac3fSPeter Zijlstra 	 */
38128eddac3fSPeter Zijlstra 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
38138eddac3fSPeter Zijlstra 		return;
38148eddac3fSPeter Zijlstra 
3815f9ad4a5fSPeter Zijlstra 	if (lockdep_hardirqs_enabled()) {
38160584df9cSMarco Elver 		struct irqtrace_events *trace = &current->irqtrace;
38170584df9cSMarco Elver 
38188eddac3fSPeter Zijlstra 		/*
38198eddac3fSPeter Zijlstra 		 * We have done an ON -> OFF transition:
38208eddac3fSPeter Zijlstra 		 */
3821fddf9055SPeter Zijlstra 		__this_cpu_write(hardirqs_enabled, 0);
38220584df9cSMarco Elver 		trace->hardirq_disable_ip = ip;
38230584df9cSMarco Elver 		trace->hardirq_disable_event = ++trace->irq_events;
38248eddac3fSPeter Zijlstra 		debug_atomic_inc(hardirqs_off_events);
3825c86e9b98SPeter Zijlstra 	} else {
38268eddac3fSPeter Zijlstra 		debug_atomic_inc(redundant_hardirqs_off);
38278eddac3fSPeter Zijlstra 	}
3828c86e9b98SPeter Zijlstra }
3829c86e9b98SPeter Zijlstra EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
38308eddac3fSPeter Zijlstra 
38318eddac3fSPeter Zijlstra /*
38328eddac3fSPeter Zijlstra  * Softirqs will be enabled:
38338eddac3fSPeter Zijlstra  */
38340d38453cSPeter Zijlstra void lockdep_softirqs_on(unsigned long ip)
38358eddac3fSPeter Zijlstra {
38360584df9cSMarco Elver 	struct irqtrace_events *trace = &current->irqtrace;
38378eddac3fSPeter Zijlstra 
38388eddac3fSPeter Zijlstra 	if (unlikely(!debug_locks || current->lockdep_recursion))
38398eddac3fSPeter Zijlstra 		return;
38408eddac3fSPeter Zijlstra 
38418eddac3fSPeter Zijlstra 	/*
38428eddac3fSPeter Zijlstra 	 * We fancy IRQs being disabled here, see softirq.c, avoids
38438eddac3fSPeter Zijlstra 	 * funny state and nesting things.
38448eddac3fSPeter Zijlstra 	 */
38458eddac3fSPeter Zijlstra 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
38468eddac3fSPeter Zijlstra 		return;
38478eddac3fSPeter Zijlstra 
38480584df9cSMarco Elver 	if (current->softirqs_enabled) {
38498eddac3fSPeter Zijlstra 		debug_atomic_inc(redundant_softirqs_on);
38508eddac3fSPeter Zijlstra 		return;
38518eddac3fSPeter Zijlstra 	}
38528eddac3fSPeter Zijlstra 
385310476e63SPeter Zijlstra 	current->lockdep_recursion++;
38548eddac3fSPeter Zijlstra 	/*
38558eddac3fSPeter Zijlstra 	 * We'll do an OFF -> ON transition:
38568eddac3fSPeter Zijlstra 	 */
38570584df9cSMarco Elver 	current->softirqs_enabled = 1;
38580584df9cSMarco Elver 	trace->softirq_enable_ip = ip;
38590584df9cSMarco Elver 	trace->softirq_enable_event = ++trace->irq_events;
38608eddac3fSPeter Zijlstra 	debug_atomic_inc(softirqs_on_events);
38618eddac3fSPeter Zijlstra 	/*
38628eddac3fSPeter Zijlstra 	 * We are going to turn softirqs on, so set the
38638eddac3fSPeter Zijlstra 	 * usage bit for all held locks, if hardirqs are
38648eddac3fSPeter Zijlstra 	 * enabled too:
38658eddac3fSPeter Zijlstra 	 */
3866f9ad4a5fSPeter Zijlstra 	if (lockdep_hardirqs_enabled())
38670584df9cSMarco Elver 		mark_held_locks(current, LOCK_ENABLED_SOFTIRQ);
386810476e63SPeter Zijlstra 	lockdep_recursion_finish();
38698eddac3fSPeter Zijlstra }
38708eddac3fSPeter Zijlstra 
38718eddac3fSPeter Zijlstra /*
38728eddac3fSPeter Zijlstra  * Softirqs were disabled:
38738eddac3fSPeter Zijlstra  */
38740d38453cSPeter Zijlstra void lockdep_softirqs_off(unsigned long ip)
38758eddac3fSPeter Zijlstra {
38768eddac3fSPeter Zijlstra 	if (unlikely(!debug_locks || current->lockdep_recursion))
38778eddac3fSPeter Zijlstra 		return;
38788eddac3fSPeter Zijlstra 
38798eddac3fSPeter Zijlstra 	/*
38808eddac3fSPeter Zijlstra 	 * We fancy IRQs being disabled here, see softirq.c
38818eddac3fSPeter Zijlstra 	 */
38828eddac3fSPeter Zijlstra 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
38838eddac3fSPeter Zijlstra 		return;
38848eddac3fSPeter Zijlstra 
38850584df9cSMarco Elver 	if (current->softirqs_enabled) {
38860584df9cSMarco Elver 		struct irqtrace_events *trace = &current->irqtrace;
38870584df9cSMarco Elver 
38888eddac3fSPeter Zijlstra 		/*
38898eddac3fSPeter Zijlstra 		 * We have done an ON -> OFF transition:
38908eddac3fSPeter Zijlstra 		 */
38910584df9cSMarco Elver 		current->softirqs_enabled = 0;
38920584df9cSMarco Elver 		trace->softirq_disable_ip = ip;
38930584df9cSMarco Elver 		trace->softirq_disable_event = ++trace->irq_events;
38948eddac3fSPeter Zijlstra 		debug_atomic_inc(softirqs_off_events);
38958eddac3fSPeter Zijlstra 		/*
38968eddac3fSPeter Zijlstra 		 * Whoops, we wanted softirqs off, so why aren't they?
38978eddac3fSPeter Zijlstra 		 */
38988eddac3fSPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(!softirq_count());
38998eddac3fSPeter Zijlstra 	} else
39008eddac3fSPeter Zijlstra 		debug_atomic_inc(redundant_softirqs_off);
39018eddac3fSPeter Zijlstra }
39028eddac3fSPeter Zijlstra 
390309180651SYuyang Du static int
390409180651SYuyang Du mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
39058eddac3fSPeter Zijlstra {
390609180651SYuyang Du 	if (!check)
390709180651SYuyang Du 		goto lock_used;
390809180651SYuyang Du 
39098eddac3fSPeter Zijlstra 	/*
39108eddac3fSPeter Zijlstra 	 * If non-trylock use in a hardirq or softirq context, then
39118eddac3fSPeter Zijlstra 	 * mark the lock as used in these contexts:
39128eddac3fSPeter Zijlstra 	 */
39138eddac3fSPeter Zijlstra 	if (!hlock->trylock) {
39148eddac3fSPeter Zijlstra 		if (hlock->read) {
3915f9ad4a5fSPeter Zijlstra 			if (lockdep_hardirq_context())
39168eddac3fSPeter Zijlstra 				if (!mark_lock(curr, hlock,
39178eddac3fSPeter Zijlstra 						LOCK_USED_IN_HARDIRQ_READ))
39188eddac3fSPeter Zijlstra 					return 0;
39198eddac3fSPeter Zijlstra 			if (curr->softirq_context)
39208eddac3fSPeter Zijlstra 				if (!mark_lock(curr, hlock,
39218eddac3fSPeter Zijlstra 						LOCK_USED_IN_SOFTIRQ_READ))
39228eddac3fSPeter Zijlstra 					return 0;
39238eddac3fSPeter Zijlstra 		} else {
3924f9ad4a5fSPeter Zijlstra 			if (lockdep_hardirq_context())
39258eddac3fSPeter Zijlstra 				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
39268eddac3fSPeter Zijlstra 					return 0;
39278eddac3fSPeter Zijlstra 			if (curr->softirq_context)
39288eddac3fSPeter Zijlstra 				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
39298eddac3fSPeter Zijlstra 					return 0;
39308eddac3fSPeter Zijlstra 		}
39318eddac3fSPeter Zijlstra 	}
39328eddac3fSPeter Zijlstra 	if (!hlock->hardirqs_off) {
39338eddac3fSPeter Zijlstra 		if (hlock->read) {
39348eddac3fSPeter Zijlstra 			if (!mark_lock(curr, hlock,
39358eddac3fSPeter Zijlstra 					LOCK_ENABLED_HARDIRQ_READ))
39368eddac3fSPeter Zijlstra 				return 0;
39378eddac3fSPeter Zijlstra 			if (curr->softirqs_enabled)
39388eddac3fSPeter Zijlstra 				if (!mark_lock(curr, hlock,
39398eddac3fSPeter Zijlstra 						LOCK_ENABLED_SOFTIRQ_READ))
39408eddac3fSPeter Zijlstra 					return 0;
39418eddac3fSPeter Zijlstra 		} else {
39428eddac3fSPeter Zijlstra 			if (!mark_lock(curr, hlock,
39438eddac3fSPeter Zijlstra 					LOCK_ENABLED_HARDIRQ))
39448eddac3fSPeter Zijlstra 				return 0;
39458eddac3fSPeter Zijlstra 			if (curr->softirqs_enabled)
39468eddac3fSPeter Zijlstra 				if (!mark_lock(curr, hlock,
39478eddac3fSPeter Zijlstra 						LOCK_ENABLED_SOFTIRQ))
39488eddac3fSPeter Zijlstra 					return 0;
39498eddac3fSPeter Zijlstra 		}
39508eddac3fSPeter Zijlstra 	}
39518eddac3fSPeter Zijlstra 
395209180651SYuyang Du lock_used:
395309180651SYuyang Du 	/* mark it as used: */
395409180651SYuyang Du 	if (!mark_lock(curr, hlock, LOCK_USED))
395509180651SYuyang Du 		return 0;
395609180651SYuyang Du 
39578eddac3fSPeter Zijlstra 	return 1;
39588eddac3fSPeter Zijlstra }
39598eddac3fSPeter Zijlstra 
3960c2469756SBoqun Feng static inline unsigned int task_irq_context(struct task_struct *task)
3961c2469756SBoqun Feng {
3962f9ad4a5fSPeter Zijlstra 	return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context() +
3963b3b9c187SWaiman Long 	       LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
3964c2469756SBoqun Feng }
3965c2469756SBoqun Feng 
39668eddac3fSPeter Zijlstra static int separate_irq_context(struct task_struct *curr,
39678eddac3fSPeter Zijlstra 		struct held_lock *hlock)
39688eddac3fSPeter Zijlstra {
39698eddac3fSPeter Zijlstra 	unsigned int depth = curr->lockdep_depth;
39708eddac3fSPeter Zijlstra 
39718eddac3fSPeter Zijlstra 	/*
39728eddac3fSPeter Zijlstra 	 * Keep track of points where we cross into an interrupt context:
39738eddac3fSPeter Zijlstra 	 */
39748eddac3fSPeter Zijlstra 	if (depth) {
39758eddac3fSPeter Zijlstra 		struct held_lock *prev_hlock;
39768eddac3fSPeter Zijlstra 
39778eddac3fSPeter Zijlstra 		prev_hlock = curr->held_locks + depth-1;
39788eddac3fSPeter Zijlstra 		/*
39798eddac3fSPeter Zijlstra 		 * If we cross into another context, reset the
39808eddac3fSPeter Zijlstra 		 * hash key (this also prevents the checking and the
39818eddac3fSPeter Zijlstra 		 * adding of the dependency to 'prev'):
39828eddac3fSPeter Zijlstra 		 */
39838eddac3fSPeter Zijlstra 		if (prev_hlock->irq_context != hlock->irq_context)
39848eddac3fSPeter Zijlstra 			return 1;
39858eddac3fSPeter Zijlstra 	}
39868eddac3fSPeter Zijlstra 	return 0;
39878eddac3fSPeter Zijlstra }
39888eddac3fSPeter Zijlstra 
39898eddac3fSPeter Zijlstra /*
39908eddac3fSPeter Zijlstra  * Mark a lock with a usage bit, and validate the state transition:
39918eddac3fSPeter Zijlstra  */
39928eddac3fSPeter Zijlstra static int mark_lock(struct task_struct *curr, struct held_lock *this,
39938eddac3fSPeter Zijlstra 			     enum lock_usage_bit new_bit)
39948eddac3fSPeter Zijlstra {
39958eddac3fSPeter Zijlstra 	unsigned int new_mask = 1 << new_bit, ret = 1;
39968eddac3fSPeter Zijlstra 
39974d56330dSYuyang Du 	if (new_bit >= LOCK_USAGE_STATES) {
39984d56330dSYuyang Du 		DEBUG_LOCKS_WARN_ON(1);
39994d56330dSYuyang Du 		return 0;
40004d56330dSYuyang Du 	}
40014d56330dSYuyang Du 
40028eddac3fSPeter Zijlstra 	/*
40038eddac3fSPeter Zijlstra 	 * If already set then do not dirty the cacheline,
40048eddac3fSPeter Zijlstra 	 * nor do any checks:
40058eddac3fSPeter Zijlstra 	 */
40068eddac3fSPeter Zijlstra 	if (likely(hlock_class(this)->usage_mask & new_mask))
40078eddac3fSPeter Zijlstra 		return 1;
40088eddac3fSPeter Zijlstra 
40098eddac3fSPeter Zijlstra 	if (!graph_lock())
40108eddac3fSPeter Zijlstra 		return 0;
40118eddac3fSPeter Zijlstra 	/*
40128eddac3fSPeter Zijlstra 	 * Make sure we didn't race:
40138eddac3fSPeter Zijlstra 	 */
40148eddac3fSPeter Zijlstra 	if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
40158eddac3fSPeter Zijlstra 		graph_unlock();
40168eddac3fSPeter Zijlstra 		return 1;
40178eddac3fSPeter Zijlstra 	}
40188eddac3fSPeter Zijlstra 
40198eddac3fSPeter Zijlstra 	hlock_class(this)->usage_mask |= new_mask;
40208eddac3fSPeter Zijlstra 
402112593b74SBart Van Assche 	if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
40228eddac3fSPeter Zijlstra 		return 0;
40238eddac3fSPeter Zijlstra 
40248eddac3fSPeter Zijlstra 	switch (new_bit) {
40258eddac3fSPeter Zijlstra 	case LOCK_USED:
40268eddac3fSPeter Zijlstra 		debug_atomic_dec(nr_unused_locks);
40278eddac3fSPeter Zijlstra 		break;
40288eddac3fSPeter Zijlstra 	default:
40294d56330dSYuyang Du 		ret = mark_lock_irq(curr, this, new_bit);
40304d56330dSYuyang Du 		if (!ret)
40318eddac3fSPeter Zijlstra 			return 0;
40328eddac3fSPeter Zijlstra 	}
40338eddac3fSPeter Zijlstra 
40348eddac3fSPeter Zijlstra 	graph_unlock();
40358eddac3fSPeter Zijlstra 
40368eddac3fSPeter Zijlstra 	/*
40378eddac3fSPeter Zijlstra 	 * We must printk outside of the graph_lock:
40388eddac3fSPeter Zijlstra 	 */
40398eddac3fSPeter Zijlstra 	if (ret == 2) {
40408eddac3fSPeter Zijlstra 		printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
40418eddac3fSPeter Zijlstra 		print_lock(this);
40428eddac3fSPeter Zijlstra 		print_irqtrace_events(curr);
40438eddac3fSPeter Zijlstra 		dump_stack();
40448eddac3fSPeter Zijlstra 	}
40458eddac3fSPeter Zijlstra 
40468eddac3fSPeter Zijlstra 	return ret;
40478eddac3fSPeter Zijlstra }
40488eddac3fSPeter Zijlstra 
40499a019db0SPeter Zijlstra static inline short task_wait_context(struct task_struct *curr)
40509a019db0SPeter Zijlstra {
40519a019db0SPeter Zijlstra 	/*
40529a019db0SPeter Zijlstra 	 * Set appropriate wait type for the context; for IRQs we have to take
40539a019db0SPeter Zijlstra 	 * into account force_irqthread as that is implied by PREEMPT_RT.
40549a019db0SPeter Zijlstra 	 */
4055f9ad4a5fSPeter Zijlstra 	if (lockdep_hardirq_context()) {
40569a019db0SPeter Zijlstra 		/*
40579a019db0SPeter Zijlstra 		 * Check if force_irqthreads will run us threaded.
40589a019db0SPeter Zijlstra 		 */
40599a019db0SPeter Zijlstra 		if (curr->hardirq_threaded || curr->irq_config)
40609a019db0SPeter Zijlstra 			return LD_WAIT_CONFIG;
40619a019db0SPeter Zijlstra 
40629a019db0SPeter Zijlstra 		return LD_WAIT_SPIN;
40639a019db0SPeter Zijlstra 	} else if (curr->softirq_context) {
40649a019db0SPeter Zijlstra 		/*
40659a019db0SPeter Zijlstra 		 * Softirqs are always threaded.
40669a019db0SPeter Zijlstra 		 */
40679a019db0SPeter Zijlstra 		return LD_WAIT_CONFIG;
40689a019db0SPeter Zijlstra 	}
40699a019db0SPeter Zijlstra 
40709a019db0SPeter Zijlstra 	return LD_WAIT_MAX;
40719a019db0SPeter Zijlstra }
40729a019db0SPeter Zijlstra 
4073de8f5e4fSPeter Zijlstra static int
4074de8f5e4fSPeter Zijlstra print_lock_invalid_wait_context(struct task_struct *curr,
4075de8f5e4fSPeter Zijlstra 				struct held_lock *hlock)
4076de8f5e4fSPeter Zijlstra {
40779a019db0SPeter Zijlstra 	short curr_inner;
40789a019db0SPeter Zijlstra 
4079de8f5e4fSPeter Zijlstra 	if (!debug_locks_off())
4080de8f5e4fSPeter Zijlstra 		return 0;
4081de8f5e4fSPeter Zijlstra 	if (debug_locks_silent)
4082de8f5e4fSPeter Zijlstra 		return 0;
4083de8f5e4fSPeter Zijlstra 
4084de8f5e4fSPeter Zijlstra 	pr_warn("\n");
4085de8f5e4fSPeter Zijlstra 	pr_warn("=============================\n");
4086de8f5e4fSPeter Zijlstra 	pr_warn("[ BUG: Invalid wait context ]\n");
4087de8f5e4fSPeter Zijlstra 	print_kernel_ident();
4088de8f5e4fSPeter Zijlstra 	pr_warn("-----------------------------\n");
4089de8f5e4fSPeter Zijlstra 
4090de8f5e4fSPeter Zijlstra 	pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
4091de8f5e4fSPeter Zijlstra 	print_lock(hlock);
4092de8f5e4fSPeter Zijlstra 
4093de8f5e4fSPeter Zijlstra 	pr_warn("other info that might help us debug this:\n");
40949a019db0SPeter Zijlstra 
40959a019db0SPeter Zijlstra 	curr_inner = task_wait_context(curr);
40969a019db0SPeter Zijlstra 	pr_warn("context-{%d:%d}\n", curr_inner, curr_inner);
40979a019db0SPeter Zijlstra 
4098de8f5e4fSPeter Zijlstra 	lockdep_print_held_locks(curr);
4099de8f5e4fSPeter Zijlstra 
4100de8f5e4fSPeter Zijlstra 	pr_warn("stack backtrace:\n");
4101de8f5e4fSPeter Zijlstra 	dump_stack();
4102de8f5e4fSPeter Zijlstra 
4103de8f5e4fSPeter Zijlstra 	return 0;
4104de8f5e4fSPeter Zijlstra }
4105de8f5e4fSPeter Zijlstra 
4106de8f5e4fSPeter Zijlstra /*
4107de8f5e4fSPeter Zijlstra  * Verify the wait_type context.
4108de8f5e4fSPeter Zijlstra  *
4109de8f5e4fSPeter Zijlstra  * This check validates we takes locks in the right wait-type order; that is it
4110de8f5e4fSPeter Zijlstra  * ensures that we do not take mutexes inside spinlocks and do not attempt to
4111de8f5e4fSPeter Zijlstra  * acquire spinlocks inside raw_spinlocks and the sort.
4112de8f5e4fSPeter Zijlstra  *
4113de8f5e4fSPeter Zijlstra  * The entire thing is slightly more complex because of RCU, RCU is a lock that
4114de8f5e4fSPeter Zijlstra  * can be taken from (pretty much) any context but also has constraints.
4115de8f5e4fSPeter Zijlstra  * However when taken in a stricter environment the RCU lock does not loosen
4116de8f5e4fSPeter Zijlstra  * the constraints.
4117de8f5e4fSPeter Zijlstra  *
4118de8f5e4fSPeter Zijlstra  * Therefore we must look for the strictest environment in the lock stack and
4119de8f5e4fSPeter Zijlstra  * compare that to the lock we're trying to acquire.
4120de8f5e4fSPeter Zijlstra  */
4121de8f5e4fSPeter Zijlstra static int check_wait_context(struct task_struct *curr, struct held_lock *next)
4122de8f5e4fSPeter Zijlstra {
4123de8f5e4fSPeter Zijlstra 	short next_inner = hlock_class(next)->wait_type_inner;
4124de8f5e4fSPeter Zijlstra 	short next_outer = hlock_class(next)->wait_type_outer;
4125de8f5e4fSPeter Zijlstra 	short curr_inner;
4126de8f5e4fSPeter Zijlstra 	int depth;
4127de8f5e4fSPeter Zijlstra 
4128de8f5e4fSPeter Zijlstra 	if (!curr->lockdep_depth || !next_inner || next->trylock)
4129de8f5e4fSPeter Zijlstra 		return 0;
4130de8f5e4fSPeter Zijlstra 
4131de8f5e4fSPeter Zijlstra 	if (!next_outer)
4132de8f5e4fSPeter Zijlstra 		next_outer = next_inner;
4133de8f5e4fSPeter Zijlstra 
4134de8f5e4fSPeter Zijlstra 	/*
4135de8f5e4fSPeter Zijlstra 	 * Find start of current irq_context..
4136de8f5e4fSPeter Zijlstra 	 */
4137de8f5e4fSPeter Zijlstra 	for (depth = curr->lockdep_depth - 1; depth >= 0; depth--) {
4138de8f5e4fSPeter Zijlstra 		struct held_lock *prev = curr->held_locks + depth;
4139de8f5e4fSPeter Zijlstra 		if (prev->irq_context != next->irq_context)
4140de8f5e4fSPeter Zijlstra 			break;
4141de8f5e4fSPeter Zijlstra 	}
4142de8f5e4fSPeter Zijlstra 	depth++;
4143de8f5e4fSPeter Zijlstra 
41449a019db0SPeter Zijlstra 	curr_inner = task_wait_context(curr);
4145de8f5e4fSPeter Zijlstra 
4146de8f5e4fSPeter Zijlstra 	for (; depth < curr->lockdep_depth; depth++) {
4147de8f5e4fSPeter Zijlstra 		struct held_lock *prev = curr->held_locks + depth;
4148de8f5e4fSPeter Zijlstra 		short prev_inner = hlock_class(prev)->wait_type_inner;
4149de8f5e4fSPeter Zijlstra 
4150de8f5e4fSPeter Zijlstra 		if (prev_inner) {
4151de8f5e4fSPeter Zijlstra 			/*
4152de8f5e4fSPeter Zijlstra 			 * We can have a bigger inner than a previous one
4153de8f5e4fSPeter Zijlstra 			 * when outer is smaller than inner, as with RCU.
4154de8f5e4fSPeter Zijlstra 			 *
4155de8f5e4fSPeter Zijlstra 			 * Also due to trylocks.
4156de8f5e4fSPeter Zijlstra 			 */
4157de8f5e4fSPeter Zijlstra 			curr_inner = min(curr_inner, prev_inner);
4158de8f5e4fSPeter Zijlstra 		}
4159de8f5e4fSPeter Zijlstra 	}
4160de8f5e4fSPeter Zijlstra 
4161de8f5e4fSPeter Zijlstra 	if (next_outer > curr_inner)
4162de8f5e4fSPeter Zijlstra 		return print_lock_invalid_wait_context(curr, next);
4163de8f5e4fSPeter Zijlstra 
4164de8f5e4fSPeter Zijlstra 	return 0;
4165de8f5e4fSPeter Zijlstra }
4166de8f5e4fSPeter Zijlstra 
416730a35f79SArnd Bergmann #else /* CONFIG_PROVE_LOCKING */
4168886532aeSArnd Bergmann 
4169886532aeSArnd Bergmann static inline int
4170886532aeSArnd Bergmann mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
4171886532aeSArnd Bergmann {
4172886532aeSArnd Bergmann 	return 1;
4173886532aeSArnd Bergmann }
4174886532aeSArnd Bergmann 
4175886532aeSArnd Bergmann static inline unsigned int task_irq_context(struct task_struct *task)
4176886532aeSArnd Bergmann {
4177886532aeSArnd Bergmann 	return 0;
4178886532aeSArnd Bergmann }
4179886532aeSArnd Bergmann 
4180886532aeSArnd Bergmann static inline int separate_irq_context(struct task_struct *curr,
4181886532aeSArnd Bergmann 		struct held_lock *hlock)
4182886532aeSArnd Bergmann {
4183886532aeSArnd Bergmann 	return 0;
4184886532aeSArnd Bergmann }
4185886532aeSArnd Bergmann 
4186de8f5e4fSPeter Zijlstra static inline int check_wait_context(struct task_struct *curr,
4187de8f5e4fSPeter Zijlstra 				     struct held_lock *next)
4188de8f5e4fSPeter Zijlstra {
4189de8f5e4fSPeter Zijlstra 	return 0;
4190de8f5e4fSPeter Zijlstra }
4191de8f5e4fSPeter Zijlstra 
419230a35f79SArnd Bergmann #endif /* CONFIG_PROVE_LOCKING */
4193886532aeSArnd Bergmann 
41948eddac3fSPeter Zijlstra /*
41958eddac3fSPeter Zijlstra  * Initialize a lock instance's lock-class mapping info:
41968eddac3fSPeter Zijlstra  */
4197de8f5e4fSPeter Zijlstra void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
4198de8f5e4fSPeter Zijlstra 			    struct lock_class_key *key, int subclass,
4199de8f5e4fSPeter Zijlstra 			    short inner, short outer)
42008eddac3fSPeter Zijlstra {
42018eddac3fSPeter Zijlstra 	int i;
42028eddac3fSPeter Zijlstra 
42038eddac3fSPeter Zijlstra 	for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
42048eddac3fSPeter Zijlstra 		lock->class_cache[i] = NULL;
42058eddac3fSPeter Zijlstra 
42068eddac3fSPeter Zijlstra #ifdef CONFIG_LOCK_STAT
42078eddac3fSPeter Zijlstra 	lock->cpu = raw_smp_processor_id();
42088eddac3fSPeter Zijlstra #endif
42098eddac3fSPeter Zijlstra 
42108eddac3fSPeter Zijlstra 	/*
42118eddac3fSPeter Zijlstra 	 * Can't be having no nameless bastards around this place!
42128eddac3fSPeter Zijlstra 	 */
42138eddac3fSPeter Zijlstra 	if (DEBUG_LOCKS_WARN_ON(!name)) {
42148eddac3fSPeter Zijlstra 		lock->name = "NULL";
42158eddac3fSPeter Zijlstra 		return;
42168eddac3fSPeter Zijlstra 	}
42178eddac3fSPeter Zijlstra 
42188eddac3fSPeter Zijlstra 	lock->name = name;
42198eddac3fSPeter Zijlstra 
4220de8f5e4fSPeter Zijlstra 	lock->wait_type_outer = outer;
4221de8f5e4fSPeter Zijlstra 	lock->wait_type_inner = inner;
4222de8f5e4fSPeter Zijlstra 
42238eddac3fSPeter Zijlstra 	/*
42248eddac3fSPeter Zijlstra 	 * No key, no joy, we need to hash something.
42258eddac3fSPeter Zijlstra 	 */
42268eddac3fSPeter Zijlstra 	if (DEBUG_LOCKS_WARN_ON(!key))
42278eddac3fSPeter Zijlstra 		return;
42288eddac3fSPeter Zijlstra 	/*
4229108c1485SBart Van Assche 	 * Sanity check, the lock-class key must either have been allocated
4230108c1485SBart Van Assche 	 * statically or must have been registered as a dynamic key.
42318eddac3fSPeter Zijlstra 	 */
4232108c1485SBart Van Assche 	if (!static_obj(key) && !is_dynamic_key(key)) {
4233108c1485SBart Van Assche 		if (debug_locks)
4234108c1485SBart Van Assche 			printk(KERN_ERR "BUG: key %px has not been registered!\n", key);
42358eddac3fSPeter Zijlstra 		DEBUG_LOCKS_WARN_ON(1);
42368eddac3fSPeter Zijlstra 		return;
42378eddac3fSPeter Zijlstra 	}
42388eddac3fSPeter Zijlstra 	lock->key = key;
42398eddac3fSPeter Zijlstra 
42408eddac3fSPeter Zijlstra 	if (unlikely(!debug_locks))
42418eddac3fSPeter Zijlstra 		return;
42428eddac3fSPeter Zijlstra 
424335a9393cSPeter Zijlstra 	if (subclass) {
424435a9393cSPeter Zijlstra 		unsigned long flags;
424535a9393cSPeter Zijlstra 
424635a9393cSPeter Zijlstra 		if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
424735a9393cSPeter Zijlstra 			return;
424835a9393cSPeter Zijlstra 
424935a9393cSPeter Zijlstra 		raw_local_irq_save(flags);
425010476e63SPeter Zijlstra 		current->lockdep_recursion++;
42518eddac3fSPeter Zijlstra 		register_lock_class(lock, subclass, 1);
425210476e63SPeter Zijlstra 		lockdep_recursion_finish();
425335a9393cSPeter Zijlstra 		raw_local_irq_restore(flags);
425435a9393cSPeter Zijlstra 	}
42558eddac3fSPeter Zijlstra }
4256de8f5e4fSPeter Zijlstra EXPORT_SYMBOL_GPL(lockdep_init_map_waits);
42578eddac3fSPeter Zijlstra 
42588eddac3fSPeter Zijlstra struct lock_class_key __lockdep_no_validate__;
42598eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
42608eddac3fSPeter Zijlstra 
4261f7c1c6b3SYuyang Du static void
42628eddac3fSPeter Zijlstra print_lock_nested_lock_not_held(struct task_struct *curr,
42638eddac3fSPeter Zijlstra 				struct held_lock *hlock,
42648eddac3fSPeter Zijlstra 				unsigned long ip)
42658eddac3fSPeter Zijlstra {
42668eddac3fSPeter Zijlstra 	if (!debug_locks_off())
4267f7c1c6b3SYuyang Du 		return;
42688eddac3fSPeter Zijlstra 	if (debug_locks_silent)
4269f7c1c6b3SYuyang Du 		return;
42708eddac3fSPeter Zijlstra 
4271681fbec8SPaul E. McKenney 	pr_warn("\n");
4272a5dd63efSPaul E. McKenney 	pr_warn("==================================\n");
4273a5dd63efSPaul E. McKenney 	pr_warn("WARNING: Nested lock was not taken\n");
42748eddac3fSPeter Zijlstra 	print_kernel_ident();
4275a5dd63efSPaul E. McKenney 	pr_warn("----------------------------------\n");
42768eddac3fSPeter Zijlstra 
4277681fbec8SPaul E. McKenney 	pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
42788eddac3fSPeter Zijlstra 	print_lock(hlock);
42798eddac3fSPeter Zijlstra 
4280681fbec8SPaul E. McKenney 	pr_warn("\nbut this task is not holding:\n");
4281681fbec8SPaul E. McKenney 	pr_warn("%s\n", hlock->nest_lock->name);
42828eddac3fSPeter Zijlstra 
4283681fbec8SPaul E. McKenney 	pr_warn("\nstack backtrace:\n");
42848eddac3fSPeter Zijlstra 	dump_stack();
42858eddac3fSPeter Zijlstra 
4286681fbec8SPaul E. McKenney 	pr_warn("\nother info that might help us debug this:\n");
42878eddac3fSPeter Zijlstra 	lockdep_print_held_locks(curr);
42888eddac3fSPeter Zijlstra 
4289681fbec8SPaul E. McKenney 	pr_warn("\nstack backtrace:\n");
42908eddac3fSPeter Zijlstra 	dump_stack();
42918eddac3fSPeter Zijlstra }
42928eddac3fSPeter Zijlstra 
429308f36ff6SMatthew Wilcox static int __lock_is_held(const struct lockdep_map *lock, int read);
42948eddac3fSPeter Zijlstra 
42958eddac3fSPeter Zijlstra /*
42968eddac3fSPeter Zijlstra  * This gets called for every mutex_lock*()/spin_lock*() operation.
42978eddac3fSPeter Zijlstra  * We maintain the dependency maps and validate the locking attempt:
42988ee10862SWaiman Long  *
42998ee10862SWaiman Long  * The callers must make sure that IRQs are disabled before calling it,
43008ee10862SWaiman Long  * otherwise we could get an interrupt which would want to take locks,
43018ee10862SWaiman Long  * which would end up in lockdep again.
43028eddac3fSPeter Zijlstra  */
43038eddac3fSPeter Zijlstra static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
43048eddac3fSPeter Zijlstra 			  int trylock, int read, int check, int hardirqs_off,
43058eddac3fSPeter Zijlstra 			  struct lockdep_map *nest_lock, unsigned long ip,
430621199f27SPeter Zijlstra 			  int references, int pin_count)
43078eddac3fSPeter Zijlstra {
43088eddac3fSPeter Zijlstra 	struct task_struct *curr = current;
43098eddac3fSPeter Zijlstra 	struct lock_class *class = NULL;
43108eddac3fSPeter Zijlstra 	struct held_lock *hlock;
43115f18ab5cSAlfredo Alvarez Fernandez 	unsigned int depth;
43128eddac3fSPeter Zijlstra 	int chain_head = 0;
43138eddac3fSPeter Zijlstra 	int class_idx;
43148eddac3fSPeter Zijlstra 	u64 chain_key;
43158eddac3fSPeter Zijlstra 
43168eddac3fSPeter Zijlstra 	if (unlikely(!debug_locks))
43178eddac3fSPeter Zijlstra 		return 0;
43188eddac3fSPeter Zijlstra 
4319fb9edbe9SOleg Nesterov 	if (!prove_locking || lock->key == &__lockdep_no_validate__)
4320fb9edbe9SOleg Nesterov 		check = 0;
43218eddac3fSPeter Zijlstra 
43228eddac3fSPeter Zijlstra 	if (subclass < NR_LOCKDEP_CACHING_CLASSES)
43238eddac3fSPeter Zijlstra 		class = lock->class_cache[subclass];
43248eddac3fSPeter Zijlstra 	/*
43258eddac3fSPeter Zijlstra 	 * Not cached?
43268eddac3fSPeter Zijlstra 	 */
43278eddac3fSPeter Zijlstra 	if (unlikely(!class)) {
43288eddac3fSPeter Zijlstra 		class = register_lock_class(lock, subclass, 0);
43298eddac3fSPeter Zijlstra 		if (!class)
43308eddac3fSPeter Zijlstra 			return 0;
43318eddac3fSPeter Zijlstra 	}
43328ca2b56cSWaiman Long 
43338ca2b56cSWaiman Long 	debug_class_ops_inc(class);
43348ca2b56cSWaiman Long 
43358eddac3fSPeter Zijlstra 	if (very_verbose(class)) {
433604860d48SBorislav Petkov 		printk("\nacquire class [%px] %s", class->key, class->name);
43378eddac3fSPeter Zijlstra 		if (class->name_version > 1)
4338f943fe0fSDmitry Vyukov 			printk(KERN_CONT "#%d", class->name_version);
4339f943fe0fSDmitry Vyukov 		printk(KERN_CONT "\n");
43408eddac3fSPeter Zijlstra 		dump_stack();
43418eddac3fSPeter Zijlstra 	}
43428eddac3fSPeter Zijlstra 
43438eddac3fSPeter Zijlstra 	/*
43448eddac3fSPeter Zijlstra 	 * Add the lock to the list of currently held locks.
43458eddac3fSPeter Zijlstra 	 * (we dont increase the depth just yet, up until the
43468eddac3fSPeter Zijlstra 	 * dependency checks are done)
43478eddac3fSPeter Zijlstra 	 */
43488eddac3fSPeter Zijlstra 	depth = curr->lockdep_depth;
43498eddac3fSPeter Zijlstra 	/*
43508eddac3fSPeter Zijlstra 	 * Ran out of static storage for our per-task lock stack again have we?
43518eddac3fSPeter Zijlstra 	 */
43528eddac3fSPeter Zijlstra 	if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
43538eddac3fSPeter Zijlstra 		return 0;
43548eddac3fSPeter Zijlstra 
435501bb6f0aSYuyang Du 	class_idx = class - lock_classes;
43568eddac3fSPeter Zijlstra 
4357de8f5e4fSPeter Zijlstra 	if (depth) { /* we're holding locks */
43588eddac3fSPeter Zijlstra 		hlock = curr->held_locks + depth - 1;
43598eddac3fSPeter Zijlstra 		if (hlock->class_idx == class_idx && nest_lock) {
4360d9349850SImre Deak 			if (!references)
4361d9349850SImre Deak 				references++;
43627fb4a2ceSPeter Zijlstra 
4363d9349850SImre Deak 			if (!hlock->references)
43648eddac3fSPeter Zijlstra 				hlock->references++;
4365d9349850SImre Deak 
4366d9349850SImre Deak 			hlock->references += references;
4367d9349850SImre Deak 
4368d9349850SImre Deak 			/* Overflow */
4369d9349850SImre Deak 			if (DEBUG_LOCKS_WARN_ON(hlock->references < references))
4370d9349850SImre Deak 				return 0;
43718eddac3fSPeter Zijlstra 
43728c8889d8SImre Deak 			return 2;
43738eddac3fSPeter Zijlstra 		}
43748eddac3fSPeter Zijlstra 	}
43758eddac3fSPeter Zijlstra 
43768eddac3fSPeter Zijlstra 	hlock = curr->held_locks + depth;
43778eddac3fSPeter Zijlstra 	/*
43788eddac3fSPeter Zijlstra 	 * Plain impossible, we just registered it and checked it weren't no
43798eddac3fSPeter Zijlstra 	 * NULL like.. I bet this mushroom I ate was good!
43808eddac3fSPeter Zijlstra 	 */
43818eddac3fSPeter Zijlstra 	if (DEBUG_LOCKS_WARN_ON(!class))
43828eddac3fSPeter Zijlstra 		return 0;
43838eddac3fSPeter Zijlstra 	hlock->class_idx = class_idx;
43848eddac3fSPeter Zijlstra 	hlock->acquire_ip = ip;
43858eddac3fSPeter Zijlstra 	hlock->instance = lock;
43868eddac3fSPeter Zijlstra 	hlock->nest_lock = nest_lock;
4387c2469756SBoqun Feng 	hlock->irq_context = task_irq_context(curr);
43888eddac3fSPeter Zijlstra 	hlock->trylock = trylock;
43898eddac3fSPeter Zijlstra 	hlock->read = read;
43908eddac3fSPeter Zijlstra 	hlock->check = check;
43918eddac3fSPeter Zijlstra 	hlock->hardirqs_off = !!hardirqs_off;
43928eddac3fSPeter Zijlstra 	hlock->references = references;
43938eddac3fSPeter Zijlstra #ifdef CONFIG_LOCK_STAT
43948eddac3fSPeter Zijlstra 	hlock->waittime_stamp = 0;
43958eddac3fSPeter Zijlstra 	hlock->holdtime_stamp = lockstat_clock();
43968eddac3fSPeter Zijlstra #endif
439721199f27SPeter Zijlstra 	hlock->pin_count = pin_count;
43988eddac3fSPeter Zijlstra 
4399de8f5e4fSPeter Zijlstra 	if (check_wait_context(curr, hlock))
4400de8f5e4fSPeter Zijlstra 		return 0;
4401de8f5e4fSPeter Zijlstra 
440209180651SYuyang Du 	/* Initialize the lock usage bit */
440309180651SYuyang Du 	if (!mark_usage(curr, hlock, check))
44048eddac3fSPeter Zijlstra 		return 0;
44058eddac3fSPeter Zijlstra 
44068eddac3fSPeter Zijlstra 	/*
44078eddac3fSPeter Zijlstra 	 * Calculate the chain hash: it's the combined hash of all the
44088eddac3fSPeter Zijlstra 	 * lock keys along the dependency chain. We save the hash value
44098eddac3fSPeter Zijlstra 	 * at every step so that we can get the current hash easily
44108eddac3fSPeter Zijlstra 	 * after unlock. The chain hash is then used to cache dependency
44118eddac3fSPeter Zijlstra 	 * results.
44128eddac3fSPeter Zijlstra 	 *
44138eddac3fSPeter Zijlstra 	 * The 'key ID' is what is the most compact key value to drive
44148eddac3fSPeter Zijlstra 	 * the hash, not class->key.
44158eddac3fSPeter Zijlstra 	 */
44168eddac3fSPeter Zijlstra 	/*
441701bb6f0aSYuyang Du 	 * Whoops, we did it again.. class_idx is invalid.
44188eddac3fSPeter Zijlstra 	 */
441901bb6f0aSYuyang Du 	if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx, lock_classes_in_use)))
44208eddac3fSPeter Zijlstra 		return 0;
44218eddac3fSPeter Zijlstra 
44228eddac3fSPeter Zijlstra 	chain_key = curr->curr_chain_key;
44238eddac3fSPeter Zijlstra 	if (!depth) {
44248eddac3fSPeter Zijlstra 		/*
44258eddac3fSPeter Zijlstra 		 * How can we have a chain hash when we ain't got no keys?!
44268eddac3fSPeter Zijlstra 		 */
4427f6ec8829SYuyang Du 		if (DEBUG_LOCKS_WARN_ON(chain_key != INITIAL_CHAIN_KEY))
44288eddac3fSPeter Zijlstra 			return 0;
44298eddac3fSPeter Zijlstra 		chain_head = 1;
44308eddac3fSPeter Zijlstra 	}
44318eddac3fSPeter Zijlstra 
44328eddac3fSPeter Zijlstra 	hlock->prev_chain_key = chain_key;
44338eddac3fSPeter Zijlstra 	if (separate_irq_context(curr, hlock)) {
4434f6ec8829SYuyang Du 		chain_key = INITIAL_CHAIN_KEY;
44358eddac3fSPeter Zijlstra 		chain_head = 1;
44368eddac3fSPeter Zijlstra 	}
44375f18ab5cSAlfredo Alvarez Fernandez 	chain_key = iterate_chain_key(chain_key, class_idx);
44388eddac3fSPeter Zijlstra 
4439f7c1c6b3SYuyang Du 	if (nest_lock && !__lock_is_held(nest_lock, -1)) {
4440f7c1c6b3SYuyang Du 		print_lock_nested_lock_not_held(curr, hlock, ip);
4441f7c1c6b3SYuyang Du 		return 0;
4442f7c1c6b3SYuyang Du 	}
44438eddac3fSPeter Zijlstra 
4444a0b0fd53SBart Van Assche 	if (!debug_locks_silent) {
4445a0b0fd53SBart Van Assche 		WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key);
4446a0b0fd53SBart Van Assche 		WARN_ON_ONCE(!hlock_class(hlock)->key);
4447a0b0fd53SBart Van Assche 	}
4448a0b0fd53SBart Van Assche 
44490b9fc8ecSYuyang Du 	if (!validate_chain(curr, hlock, chain_head, chain_key))
44508eddac3fSPeter Zijlstra 		return 0;
44518eddac3fSPeter Zijlstra 
44528eddac3fSPeter Zijlstra 	curr->curr_chain_key = chain_key;
44538eddac3fSPeter Zijlstra 	curr->lockdep_depth++;
44548eddac3fSPeter Zijlstra 	check_chain_key(curr);
44558eddac3fSPeter Zijlstra #ifdef CONFIG_DEBUG_LOCKDEP
44568eddac3fSPeter Zijlstra 	if (unlikely(!debug_locks))
44578eddac3fSPeter Zijlstra 		return 0;
44588eddac3fSPeter Zijlstra #endif
44598eddac3fSPeter Zijlstra 	if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
44608eddac3fSPeter Zijlstra 		debug_locks_off();
44618eddac3fSPeter Zijlstra 		print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
44628eddac3fSPeter Zijlstra 		printk(KERN_DEBUG "depth: %i  max: %lu!\n",
44638eddac3fSPeter Zijlstra 		       curr->lockdep_depth, MAX_LOCK_DEPTH);
44648eddac3fSPeter Zijlstra 
44658eddac3fSPeter Zijlstra 		lockdep_print_held_locks(current);
44668eddac3fSPeter Zijlstra 		debug_show_all_locks();
44678eddac3fSPeter Zijlstra 		dump_stack();
44688eddac3fSPeter Zijlstra 
44698eddac3fSPeter Zijlstra 		return 0;
44708eddac3fSPeter Zijlstra 	}
44718eddac3fSPeter Zijlstra 
44728eddac3fSPeter Zijlstra 	if (unlikely(curr->lockdep_depth > max_lockdep_depth))
44738eddac3fSPeter Zijlstra 		max_lockdep_depth = curr->lockdep_depth;
44748eddac3fSPeter Zijlstra 
44758eddac3fSPeter Zijlstra 	return 1;
44768eddac3fSPeter Zijlstra }
44778eddac3fSPeter Zijlstra 
4478f7c1c6b3SYuyang Du static void print_unlock_imbalance_bug(struct task_struct *curr,
4479f7c1c6b3SYuyang Du 				       struct lockdep_map *lock,
44808eddac3fSPeter Zijlstra 				       unsigned long ip)
44818eddac3fSPeter Zijlstra {
44828eddac3fSPeter Zijlstra 	if (!debug_locks_off())
4483f7c1c6b3SYuyang Du 		return;
44848eddac3fSPeter Zijlstra 	if (debug_locks_silent)
4485f7c1c6b3SYuyang Du 		return;
44868eddac3fSPeter Zijlstra 
4487681fbec8SPaul E. McKenney 	pr_warn("\n");
4488a5dd63efSPaul E. McKenney 	pr_warn("=====================================\n");
4489a5dd63efSPaul E. McKenney 	pr_warn("WARNING: bad unlock balance detected!\n");
44908eddac3fSPeter Zijlstra 	print_kernel_ident();
4491a5dd63efSPaul E. McKenney 	pr_warn("-------------------------------------\n");
4492681fbec8SPaul E. McKenney 	pr_warn("%s/%d is trying to release lock (",
44938eddac3fSPeter Zijlstra 		curr->comm, task_pid_nr(curr));
44948eddac3fSPeter Zijlstra 	print_lockdep_cache(lock);
4495681fbec8SPaul E. McKenney 	pr_cont(") at:\n");
44962062a4e8SDmitry Safonov 	print_ip_sym(KERN_WARNING, ip);
4497681fbec8SPaul E. McKenney 	pr_warn("but there are no more locks to release!\n");
4498681fbec8SPaul E. McKenney 	pr_warn("\nother info that might help us debug this:\n");
44998eddac3fSPeter Zijlstra 	lockdep_print_held_locks(curr);
45008eddac3fSPeter Zijlstra 
4501681fbec8SPaul E. McKenney 	pr_warn("\nstack backtrace:\n");
45028eddac3fSPeter Zijlstra 	dump_stack();
45038eddac3fSPeter Zijlstra }
45048eddac3fSPeter Zijlstra 
4505c86e9b98SPeter Zijlstra static noinstr int match_held_lock(const struct held_lock *hlock,
450608f36ff6SMatthew Wilcox 				   const struct lockdep_map *lock)
45078eddac3fSPeter Zijlstra {
45088eddac3fSPeter Zijlstra 	if (hlock->instance == lock)
45098eddac3fSPeter Zijlstra 		return 1;
45108eddac3fSPeter Zijlstra 
45118eddac3fSPeter Zijlstra 	if (hlock->references) {
451208f36ff6SMatthew Wilcox 		const struct lock_class *class = lock->class_cache[0];
45138eddac3fSPeter Zijlstra 
45148eddac3fSPeter Zijlstra 		if (!class)
45158eddac3fSPeter Zijlstra 			class = look_up_lock_class(lock, 0);
45168eddac3fSPeter Zijlstra 
45178eddac3fSPeter Zijlstra 		/*
45188eddac3fSPeter Zijlstra 		 * If look_up_lock_class() failed to find a class, we're trying
45198eddac3fSPeter Zijlstra 		 * to test if we hold a lock that has never yet been acquired.
45208eddac3fSPeter Zijlstra 		 * Clearly if the lock hasn't been acquired _ever_, we're not
45218eddac3fSPeter Zijlstra 		 * holding it either, so report failure.
45228eddac3fSPeter Zijlstra 		 */
452364f29d1bSMatthew Wilcox 		if (!class)
45248eddac3fSPeter Zijlstra 			return 0;
45258eddac3fSPeter Zijlstra 
45268eddac3fSPeter Zijlstra 		/*
45278eddac3fSPeter Zijlstra 		 * References, but not a lock we're actually ref-counting?
45288eddac3fSPeter Zijlstra 		 * State got messed up, follow the sites that change ->references
45298eddac3fSPeter Zijlstra 		 * and try to make sense of it.
45308eddac3fSPeter Zijlstra 		 */
45318eddac3fSPeter Zijlstra 		if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
45328eddac3fSPeter Zijlstra 			return 0;
45338eddac3fSPeter Zijlstra 
453401bb6f0aSYuyang Du 		if (hlock->class_idx == class - lock_classes)
45358eddac3fSPeter Zijlstra 			return 1;
45368eddac3fSPeter Zijlstra 	}
45378eddac3fSPeter Zijlstra 
45388eddac3fSPeter Zijlstra 	return 0;
45398eddac3fSPeter Zijlstra }
45408eddac3fSPeter Zijlstra 
454141c2c5b8SJ. R. Okajima /* @depth must not be zero */
454241c2c5b8SJ. R. Okajima static struct held_lock *find_held_lock(struct task_struct *curr,
454341c2c5b8SJ. R. Okajima 					struct lockdep_map *lock,
454441c2c5b8SJ. R. Okajima 					unsigned int depth, int *idx)
454541c2c5b8SJ. R. Okajima {
454641c2c5b8SJ. R. Okajima 	struct held_lock *ret, *hlock, *prev_hlock;
454741c2c5b8SJ. R. Okajima 	int i;
454841c2c5b8SJ. R. Okajima 
454941c2c5b8SJ. R. Okajima 	i = depth - 1;
455041c2c5b8SJ. R. Okajima 	hlock = curr->held_locks + i;
455141c2c5b8SJ. R. Okajima 	ret = hlock;
455241c2c5b8SJ. R. Okajima 	if (match_held_lock(hlock, lock))
455341c2c5b8SJ. R. Okajima 		goto out;
455441c2c5b8SJ. R. Okajima 
455541c2c5b8SJ. R. Okajima 	ret = NULL;
455641c2c5b8SJ. R. Okajima 	for (i--, prev_hlock = hlock--;
455741c2c5b8SJ. R. Okajima 	     i >= 0;
455841c2c5b8SJ. R. Okajima 	     i--, prev_hlock = hlock--) {
455941c2c5b8SJ. R. Okajima 		/*
456041c2c5b8SJ. R. Okajima 		 * We must not cross into another context:
456141c2c5b8SJ. R. Okajima 		 */
456241c2c5b8SJ. R. Okajima 		if (prev_hlock->irq_context != hlock->irq_context) {
456341c2c5b8SJ. R. Okajima 			ret = NULL;
456441c2c5b8SJ. R. Okajima 			break;
456541c2c5b8SJ. R. Okajima 		}
456641c2c5b8SJ. R. Okajima 		if (match_held_lock(hlock, lock)) {
456741c2c5b8SJ. R. Okajima 			ret = hlock;
456841c2c5b8SJ. R. Okajima 			break;
456941c2c5b8SJ. R. Okajima 		}
457041c2c5b8SJ. R. Okajima 	}
457141c2c5b8SJ. R. Okajima 
457241c2c5b8SJ. R. Okajima out:
457341c2c5b8SJ. R. Okajima 	*idx = i;
457441c2c5b8SJ. R. Okajima 	return ret;
457541c2c5b8SJ. R. Okajima }
457641c2c5b8SJ. R. Okajima 
4577e969970bSJ. R. Okajima static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
45788c8889d8SImre Deak 				int idx, unsigned int *merged)
4579e969970bSJ. R. Okajima {
4580e969970bSJ. R. Okajima 	struct held_lock *hlock;
45818c8889d8SImre Deak 	int first_idx = idx;
4582e969970bSJ. R. Okajima 
45838ee10862SWaiman Long 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
45848ee10862SWaiman Long 		return 0;
45858ee10862SWaiman Long 
4586e969970bSJ. R. Okajima 	for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
45878c8889d8SImre Deak 		switch (__lock_acquire(hlock->instance,
4588e969970bSJ. R. Okajima 				    hlock_class(hlock)->subclass,
4589e969970bSJ. R. Okajima 				    hlock->trylock,
4590e969970bSJ. R. Okajima 				    hlock->read, hlock->check,
4591e969970bSJ. R. Okajima 				    hlock->hardirqs_off,
4592e969970bSJ. R. Okajima 				    hlock->nest_lock, hlock->acquire_ip,
45938c8889d8SImre Deak 				    hlock->references, hlock->pin_count)) {
45948c8889d8SImre Deak 		case 0:
4595e969970bSJ. R. Okajima 			return 1;
45968c8889d8SImre Deak 		case 1:
45978c8889d8SImre Deak 			break;
45988c8889d8SImre Deak 		case 2:
45998c8889d8SImre Deak 			*merged += (idx == first_idx);
46008c8889d8SImre Deak 			break;
46018c8889d8SImre Deak 		default:
46028c8889d8SImre Deak 			WARN_ON(1);
46038c8889d8SImre Deak 			return 0;
46048c8889d8SImre Deak 		}
4605e969970bSJ. R. Okajima 	}
4606e969970bSJ. R. Okajima 	return 0;
4607e969970bSJ. R. Okajima }
4608e969970bSJ. R. Okajima 
46098eddac3fSPeter Zijlstra static int
46108eddac3fSPeter Zijlstra __lock_set_class(struct lockdep_map *lock, const char *name,
46118eddac3fSPeter Zijlstra 		 struct lock_class_key *key, unsigned int subclass,
46128eddac3fSPeter Zijlstra 		 unsigned long ip)
46138eddac3fSPeter Zijlstra {
46148eddac3fSPeter Zijlstra 	struct task_struct *curr = current;
46158c8889d8SImre Deak 	unsigned int depth, merged = 0;
461641c2c5b8SJ. R. Okajima 	struct held_lock *hlock;
46178eddac3fSPeter Zijlstra 	struct lock_class *class;
46188eddac3fSPeter Zijlstra 	int i;
46198eddac3fSPeter Zijlstra 
4620513e1073SWaiman Long 	if (unlikely(!debug_locks))
4621513e1073SWaiman Long 		return 0;
4622513e1073SWaiman Long 
46238eddac3fSPeter Zijlstra 	depth = curr->lockdep_depth;
46248eddac3fSPeter Zijlstra 	/*
46258eddac3fSPeter Zijlstra 	 * This function is about (re)setting the class of a held lock,
46268eddac3fSPeter Zijlstra 	 * yet we're not actually holding any locks. Naughty user!
46278eddac3fSPeter Zijlstra 	 */
46288eddac3fSPeter Zijlstra 	if (DEBUG_LOCKS_WARN_ON(!depth))
46298eddac3fSPeter Zijlstra 		return 0;
46308eddac3fSPeter Zijlstra 
463141c2c5b8SJ. R. Okajima 	hlock = find_held_lock(curr, lock, depth, &i);
4632f7c1c6b3SYuyang Du 	if (!hlock) {
4633f7c1c6b3SYuyang Du 		print_unlock_imbalance_bug(curr, lock, ip);
4634f7c1c6b3SYuyang Du 		return 0;
4635f7c1c6b3SYuyang Du 	}
46368eddac3fSPeter Zijlstra 
4637de8f5e4fSPeter Zijlstra 	lockdep_init_map_waits(lock, name, key, 0,
4638de8f5e4fSPeter Zijlstra 			       lock->wait_type_inner,
4639de8f5e4fSPeter Zijlstra 			       lock->wait_type_outer);
46408eddac3fSPeter Zijlstra 	class = register_lock_class(lock, subclass, 0);
464101bb6f0aSYuyang Du 	hlock->class_idx = class - lock_classes;
46428eddac3fSPeter Zijlstra 
46438eddac3fSPeter Zijlstra 	curr->lockdep_depth = i;
46448eddac3fSPeter Zijlstra 	curr->curr_chain_key = hlock->prev_chain_key;
46458eddac3fSPeter Zijlstra 
46468c8889d8SImre Deak 	if (reacquire_held_locks(curr, depth, i, &merged))
46478eddac3fSPeter Zijlstra 		return 0;
46488eddac3fSPeter Zijlstra 
46498eddac3fSPeter Zijlstra 	/*
46508eddac3fSPeter Zijlstra 	 * I took it apart and put it back together again, except now I have
46518eddac3fSPeter Zijlstra 	 * these 'spare' parts.. where shall I put them.
46528eddac3fSPeter Zijlstra 	 */
46538c8889d8SImre Deak 	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged))
46548eddac3fSPeter Zijlstra 		return 0;
46558eddac3fSPeter Zijlstra 	return 1;
46568eddac3fSPeter Zijlstra }
46578eddac3fSPeter Zijlstra 
46586419c4afSJ. R. Okajima static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
46596419c4afSJ. R. Okajima {
46606419c4afSJ. R. Okajima 	struct task_struct *curr = current;
46618c8889d8SImre Deak 	unsigned int depth, merged = 0;
46626419c4afSJ. R. Okajima 	struct held_lock *hlock;
46636419c4afSJ. R. Okajima 	int i;
46646419c4afSJ. R. Okajima 
466571492580SWaiman Long 	if (unlikely(!debug_locks))
466671492580SWaiman Long 		return 0;
466771492580SWaiman Long 
46686419c4afSJ. R. Okajima 	depth = curr->lockdep_depth;
46696419c4afSJ. R. Okajima 	/*
46706419c4afSJ. R. Okajima 	 * This function is about (re)setting the class of a held lock,
46716419c4afSJ. R. Okajima 	 * yet we're not actually holding any locks. Naughty user!
46726419c4afSJ. R. Okajima 	 */
46736419c4afSJ. R. Okajima 	if (DEBUG_LOCKS_WARN_ON(!depth))
46746419c4afSJ. R. Okajima 		return 0;
46756419c4afSJ. R. Okajima 
46766419c4afSJ. R. Okajima 	hlock = find_held_lock(curr, lock, depth, &i);
4677f7c1c6b3SYuyang Du 	if (!hlock) {
4678f7c1c6b3SYuyang Du 		print_unlock_imbalance_bug(curr, lock, ip);
4679f7c1c6b3SYuyang Du 		return 0;
4680f7c1c6b3SYuyang Du 	}
46816419c4afSJ. R. Okajima 
46826419c4afSJ. R. Okajima 	curr->lockdep_depth = i;
46836419c4afSJ. R. Okajima 	curr->curr_chain_key = hlock->prev_chain_key;
46846419c4afSJ. R. Okajima 
46856419c4afSJ. R. Okajima 	WARN(hlock->read, "downgrading a read lock");
46866419c4afSJ. R. Okajima 	hlock->read = 1;
46876419c4afSJ. R. Okajima 	hlock->acquire_ip = ip;
46886419c4afSJ. R. Okajima 
46898c8889d8SImre Deak 	if (reacquire_held_locks(curr, depth, i, &merged))
46908c8889d8SImre Deak 		return 0;
46918c8889d8SImre Deak 
46928c8889d8SImre Deak 	/* Merging can't happen with unchanged classes.. */
46938c8889d8SImre Deak 	if (DEBUG_LOCKS_WARN_ON(merged))
46946419c4afSJ. R. Okajima 		return 0;
46956419c4afSJ. R. Okajima 
46966419c4afSJ. R. Okajima 	/*
46976419c4afSJ. R. Okajima 	 * I took it apart and put it back together again, except now I have
46986419c4afSJ. R. Okajima 	 * these 'spare' parts.. where shall I put them.
46996419c4afSJ. R. Okajima 	 */
47006419c4afSJ. R. Okajima 	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
47016419c4afSJ. R. Okajima 		return 0;
47028c8889d8SImre Deak 
47036419c4afSJ. R. Okajima 	return 1;
47046419c4afSJ. R. Okajima }
47056419c4afSJ. R. Okajima 
47068eddac3fSPeter Zijlstra /*
4707c759bc47SDan Carpenter  * Remove the lock from the list of currently held locks - this gets
4708e0f56fd7SPeter Zijlstra  * called on mutex_unlock()/spin_unlock*() (or on a failed
4709e0f56fd7SPeter Zijlstra  * mutex_lock_interruptible()).
47108eddac3fSPeter Zijlstra  */
47118eddac3fSPeter Zijlstra static int
4712b4adfe8eSYuyang Du __lock_release(struct lockdep_map *lock, unsigned long ip)
47138eddac3fSPeter Zijlstra {
4714e0f56fd7SPeter Zijlstra 	struct task_struct *curr = current;
47158c8889d8SImre Deak 	unsigned int depth, merged = 1;
471641c2c5b8SJ. R. Okajima 	struct held_lock *hlock;
4717e966eaeeSIngo Molnar 	int i;
47188eddac3fSPeter Zijlstra 
4719e0f56fd7SPeter Zijlstra 	if (unlikely(!debug_locks))
4720e0f56fd7SPeter Zijlstra 		return 0;
4721e0f56fd7SPeter Zijlstra 
47228eddac3fSPeter Zijlstra 	depth = curr->lockdep_depth;
47238eddac3fSPeter Zijlstra 	/*
47248eddac3fSPeter Zijlstra 	 * So we're all set to release this lock.. wait what lock? We don't
47258eddac3fSPeter Zijlstra 	 * own any locks, you've been drinking again?
47268eddac3fSPeter Zijlstra 	 */
4727dd471efeSKobe Wu 	if (depth <= 0) {
4728f7c1c6b3SYuyang Du 		print_unlock_imbalance_bug(curr, lock, ip);
4729f7c1c6b3SYuyang Du 		return 0;
4730f7c1c6b3SYuyang Du 	}
47318eddac3fSPeter Zijlstra 
4732e0f56fd7SPeter Zijlstra 	/*
4733e0f56fd7SPeter Zijlstra 	 * Check whether the lock exists in the current stack
4734e0f56fd7SPeter Zijlstra 	 * of held locks:
4735e0f56fd7SPeter Zijlstra 	 */
473641c2c5b8SJ. R. Okajima 	hlock = find_held_lock(curr, lock, depth, &i);
4737f7c1c6b3SYuyang Du 	if (!hlock) {
4738f7c1c6b3SYuyang Du 		print_unlock_imbalance_bug(curr, lock, ip);
4739f7c1c6b3SYuyang Du 		return 0;
4740f7c1c6b3SYuyang Du 	}
47418eddac3fSPeter Zijlstra 
47428eddac3fSPeter Zijlstra 	if (hlock->instance == lock)
47438eddac3fSPeter Zijlstra 		lock_release_holdtime(hlock);
47448eddac3fSPeter Zijlstra 
4745a24fc60dSPeter Zijlstra 	WARN(hlock->pin_count, "releasing a pinned lock\n");
4746a24fc60dSPeter Zijlstra 
47478eddac3fSPeter Zijlstra 	if (hlock->references) {
47488eddac3fSPeter Zijlstra 		hlock->references--;
47498eddac3fSPeter Zijlstra 		if (hlock->references) {
47508eddac3fSPeter Zijlstra 			/*
47518eddac3fSPeter Zijlstra 			 * We had, and after removing one, still have
47528eddac3fSPeter Zijlstra 			 * references, the current lock stack is still
47538eddac3fSPeter Zijlstra 			 * valid. We're done!
47548eddac3fSPeter Zijlstra 			 */
47558eddac3fSPeter Zijlstra 			return 1;
47568eddac3fSPeter Zijlstra 		}
47578eddac3fSPeter Zijlstra 	}
47588eddac3fSPeter Zijlstra 
47598eddac3fSPeter Zijlstra 	/*
47608eddac3fSPeter Zijlstra 	 * We have the right lock to unlock, 'hlock' points to it.
47618eddac3fSPeter Zijlstra 	 * Now we remove it from the stack, and add back the other
47628eddac3fSPeter Zijlstra 	 * entries (if any), recalculating the hash along the way:
47638eddac3fSPeter Zijlstra 	 */
47648eddac3fSPeter Zijlstra 
47658eddac3fSPeter Zijlstra 	curr->lockdep_depth = i;
47668eddac3fSPeter Zijlstra 	curr->curr_chain_key = hlock->prev_chain_key;
47678eddac3fSPeter Zijlstra 
4768ce52a18dSWaiman Long 	/*
4769ce52a18dSWaiman Long 	 * The most likely case is when the unlock is on the innermost
4770ce52a18dSWaiman Long 	 * lock. In this case, we are done!
4771ce52a18dSWaiman Long 	 */
4772ce52a18dSWaiman Long 	if (i == depth-1)
4773ce52a18dSWaiman Long 		return 1;
4774ce52a18dSWaiman Long 
47758c8889d8SImre Deak 	if (reacquire_held_locks(curr, depth, i + 1, &merged))
47768eddac3fSPeter Zijlstra 		return 0;
47778eddac3fSPeter Zijlstra 
47788eddac3fSPeter Zijlstra 	/*
47798eddac3fSPeter Zijlstra 	 * We had N bottles of beer on the wall, we drank one, but now
47808eddac3fSPeter Zijlstra 	 * there's not N-1 bottles of beer left on the wall...
47818c8889d8SImre Deak 	 * Pouring two of the bottles together is acceptable.
47828eddac3fSPeter Zijlstra 	 */
47838c8889d8SImre Deak 	DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged);
4784e0f56fd7SPeter Zijlstra 
4785ce52a18dSWaiman Long 	/*
4786ce52a18dSWaiman Long 	 * Since reacquire_held_locks() would have called check_chain_key()
4787ce52a18dSWaiman Long 	 * indirectly via __lock_acquire(), we don't need to do it again
4788ce52a18dSWaiman Long 	 * on return.
4789ce52a18dSWaiman Long 	 */
4790ce52a18dSWaiman Long 	return 0;
47918eddac3fSPeter Zijlstra }
47928eddac3fSPeter Zijlstra 
4793c86e9b98SPeter Zijlstra static __always_inline
47942f43c602SMasami Hiramatsu int __lock_is_held(const struct lockdep_map *lock, int read)
47958eddac3fSPeter Zijlstra {
47968eddac3fSPeter Zijlstra 	struct task_struct *curr = current;
47978eddac3fSPeter Zijlstra 	int i;
47988eddac3fSPeter Zijlstra 
47998eddac3fSPeter Zijlstra 	for (i = 0; i < curr->lockdep_depth; i++) {
48008eddac3fSPeter Zijlstra 		struct held_lock *hlock = curr->held_locks + i;
48018eddac3fSPeter Zijlstra 
4802f8319483SPeter Zijlstra 		if (match_held_lock(hlock, lock)) {
4803f8319483SPeter Zijlstra 			if (read == -1 || hlock->read == read)
48048eddac3fSPeter Zijlstra 				return 1;
4805f8319483SPeter Zijlstra 
4806f8319483SPeter Zijlstra 			return 0;
4807f8319483SPeter Zijlstra 		}
48088eddac3fSPeter Zijlstra 	}
48098eddac3fSPeter Zijlstra 
48108eddac3fSPeter Zijlstra 	return 0;
48118eddac3fSPeter Zijlstra }
48128eddac3fSPeter Zijlstra 
4813e7904a28SPeter Zijlstra static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
4814e7904a28SPeter Zijlstra {
4815e7904a28SPeter Zijlstra 	struct pin_cookie cookie = NIL_COOKIE;
4816e7904a28SPeter Zijlstra 	struct task_struct *curr = current;
4817e7904a28SPeter Zijlstra 	int i;
4818e7904a28SPeter Zijlstra 
4819e7904a28SPeter Zijlstra 	if (unlikely(!debug_locks))
4820e7904a28SPeter Zijlstra 		return cookie;
4821e7904a28SPeter Zijlstra 
4822e7904a28SPeter Zijlstra 	for (i = 0; i < curr->lockdep_depth; i++) {
4823e7904a28SPeter Zijlstra 		struct held_lock *hlock = curr->held_locks + i;
4824e7904a28SPeter Zijlstra 
4825e7904a28SPeter Zijlstra 		if (match_held_lock(hlock, lock)) {
4826e7904a28SPeter Zijlstra 			/*
4827e7904a28SPeter Zijlstra 			 * Grab 16bits of randomness; this is sufficient to not
4828e7904a28SPeter Zijlstra 			 * be guessable and still allows some pin nesting in
4829e7904a28SPeter Zijlstra 			 * our u32 pin_count.
4830e7904a28SPeter Zijlstra 			 */
4831e7904a28SPeter Zijlstra 			cookie.val = 1 + (prandom_u32() >> 16);
4832e7904a28SPeter Zijlstra 			hlock->pin_count += cookie.val;
4833e7904a28SPeter Zijlstra 			return cookie;
4834e7904a28SPeter Zijlstra 		}
4835e7904a28SPeter Zijlstra 	}
4836e7904a28SPeter Zijlstra 
4837e7904a28SPeter Zijlstra 	WARN(1, "pinning an unheld lock\n");
4838e7904a28SPeter Zijlstra 	return cookie;
4839e7904a28SPeter Zijlstra }
4840e7904a28SPeter Zijlstra 
4841e7904a28SPeter Zijlstra static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4842a24fc60dSPeter Zijlstra {
4843a24fc60dSPeter Zijlstra 	struct task_struct *curr = current;
4844a24fc60dSPeter Zijlstra 	int i;
4845a24fc60dSPeter Zijlstra 
4846a24fc60dSPeter Zijlstra 	if (unlikely(!debug_locks))
4847a24fc60dSPeter Zijlstra 		return;
4848a24fc60dSPeter Zijlstra 
4849a24fc60dSPeter Zijlstra 	for (i = 0; i < curr->lockdep_depth; i++) {
4850a24fc60dSPeter Zijlstra 		struct held_lock *hlock = curr->held_locks + i;
4851a24fc60dSPeter Zijlstra 
4852a24fc60dSPeter Zijlstra 		if (match_held_lock(hlock, lock)) {
4853e7904a28SPeter Zijlstra 			hlock->pin_count += cookie.val;
4854a24fc60dSPeter Zijlstra 			return;
4855a24fc60dSPeter Zijlstra 		}
4856a24fc60dSPeter Zijlstra 	}
4857a24fc60dSPeter Zijlstra 
4858a24fc60dSPeter Zijlstra 	WARN(1, "pinning an unheld lock\n");
4859a24fc60dSPeter Zijlstra }
4860a24fc60dSPeter Zijlstra 
4861e7904a28SPeter Zijlstra static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4862a24fc60dSPeter Zijlstra {
4863a24fc60dSPeter Zijlstra 	struct task_struct *curr = current;
4864a24fc60dSPeter Zijlstra 	int i;
4865a24fc60dSPeter Zijlstra 
4866a24fc60dSPeter Zijlstra 	if (unlikely(!debug_locks))
4867a24fc60dSPeter Zijlstra 		return;
4868a24fc60dSPeter Zijlstra 
4869a24fc60dSPeter Zijlstra 	for (i = 0; i < curr->lockdep_depth; i++) {
4870a24fc60dSPeter Zijlstra 		struct held_lock *hlock = curr->held_locks + i;
4871a24fc60dSPeter Zijlstra 
4872a24fc60dSPeter Zijlstra 		if (match_held_lock(hlock, lock)) {
4873a24fc60dSPeter Zijlstra 			if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
4874a24fc60dSPeter Zijlstra 				return;
4875a24fc60dSPeter Zijlstra 
4876e7904a28SPeter Zijlstra 			hlock->pin_count -= cookie.val;
4877e7904a28SPeter Zijlstra 
4878e7904a28SPeter Zijlstra 			if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
4879e7904a28SPeter Zijlstra 				hlock->pin_count = 0;
4880e7904a28SPeter Zijlstra 
4881a24fc60dSPeter Zijlstra 			return;
4882a24fc60dSPeter Zijlstra 		}
4883a24fc60dSPeter Zijlstra 	}
4884a24fc60dSPeter Zijlstra 
4885a24fc60dSPeter Zijlstra 	WARN(1, "unpinning an unheld lock\n");
4886a24fc60dSPeter Zijlstra }
4887a24fc60dSPeter Zijlstra 
48888eddac3fSPeter Zijlstra /*
48898eddac3fSPeter Zijlstra  * Check whether we follow the irq-flags state precisely:
48908eddac3fSPeter Zijlstra  */
48918eddac3fSPeter Zijlstra static void check_flags(unsigned long flags)
48928eddac3fSPeter Zijlstra {
489330a35f79SArnd Bergmann #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
48948eddac3fSPeter Zijlstra 	if (!debug_locks)
48958eddac3fSPeter Zijlstra 		return;
48968eddac3fSPeter Zijlstra 
48978eddac3fSPeter Zijlstra 	if (irqs_disabled_flags(flags)) {
4898f9ad4a5fSPeter Zijlstra 		if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
48998eddac3fSPeter Zijlstra 			printk("possible reason: unannotated irqs-off.\n");
49008eddac3fSPeter Zijlstra 		}
49018eddac3fSPeter Zijlstra 	} else {
4902f9ad4a5fSPeter Zijlstra 		if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled())) {
49038eddac3fSPeter Zijlstra 			printk("possible reason: unannotated irqs-on.\n");
49048eddac3fSPeter Zijlstra 		}
49058eddac3fSPeter Zijlstra 	}
49068eddac3fSPeter Zijlstra 
49078eddac3fSPeter Zijlstra 	/*
49088eddac3fSPeter Zijlstra 	 * We dont accurately track softirq state in e.g.
49098eddac3fSPeter Zijlstra 	 * hardirq contexts (such as on 4KSTACKS), so only
49108eddac3fSPeter Zijlstra 	 * check if not in hardirq contexts:
49118eddac3fSPeter Zijlstra 	 */
49128eddac3fSPeter Zijlstra 	if (!hardirq_count()) {
49138eddac3fSPeter Zijlstra 		if (softirq_count()) {
49148eddac3fSPeter Zijlstra 			/* like the above, but with softirqs */
49158eddac3fSPeter Zijlstra 			DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
49168eddac3fSPeter Zijlstra 		} else {
49178eddac3fSPeter Zijlstra 			/* lick the above, does it taste good? */
49188eddac3fSPeter Zijlstra 			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
49198eddac3fSPeter Zijlstra 		}
49208eddac3fSPeter Zijlstra 	}
49218eddac3fSPeter Zijlstra 
49228eddac3fSPeter Zijlstra 	if (!debug_locks)
49238eddac3fSPeter Zijlstra 		print_irqtrace_events(current);
49248eddac3fSPeter Zijlstra #endif
49258eddac3fSPeter Zijlstra }
49268eddac3fSPeter Zijlstra 
49278eddac3fSPeter Zijlstra void lock_set_class(struct lockdep_map *lock, const char *name,
49288eddac3fSPeter Zijlstra 		    struct lock_class_key *key, unsigned int subclass,
49298eddac3fSPeter Zijlstra 		    unsigned long ip)
49308eddac3fSPeter Zijlstra {
49318eddac3fSPeter Zijlstra 	unsigned long flags;
49328eddac3fSPeter Zijlstra 
49338eddac3fSPeter Zijlstra 	if (unlikely(current->lockdep_recursion))
49348eddac3fSPeter Zijlstra 		return;
49358eddac3fSPeter Zijlstra 
49368eddac3fSPeter Zijlstra 	raw_local_irq_save(flags);
493710476e63SPeter Zijlstra 	current->lockdep_recursion++;
49388eddac3fSPeter Zijlstra 	check_flags(flags);
49398eddac3fSPeter Zijlstra 	if (__lock_set_class(lock, name, key, subclass, ip))
49408eddac3fSPeter Zijlstra 		check_chain_key(current);
494110476e63SPeter Zijlstra 	lockdep_recursion_finish();
49428eddac3fSPeter Zijlstra 	raw_local_irq_restore(flags);
49438eddac3fSPeter Zijlstra }
49448eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(lock_set_class);
49458eddac3fSPeter Zijlstra 
49466419c4afSJ. R. Okajima void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
49476419c4afSJ. R. Okajima {
49486419c4afSJ. R. Okajima 	unsigned long flags;
49496419c4afSJ. R. Okajima 
49506419c4afSJ. R. Okajima 	if (unlikely(current->lockdep_recursion))
49516419c4afSJ. R. Okajima 		return;
49526419c4afSJ. R. Okajima 
49536419c4afSJ. R. Okajima 	raw_local_irq_save(flags);
495410476e63SPeter Zijlstra 	current->lockdep_recursion++;
49556419c4afSJ. R. Okajima 	check_flags(flags);
49566419c4afSJ. R. Okajima 	if (__lock_downgrade(lock, ip))
49576419c4afSJ. R. Okajima 		check_chain_key(current);
495810476e63SPeter Zijlstra 	lockdep_recursion_finish();
49596419c4afSJ. R. Okajima 	raw_local_irq_restore(flags);
49606419c4afSJ. R. Okajima }
49616419c4afSJ. R. Okajima EXPORT_SYMBOL_GPL(lock_downgrade);
49626419c4afSJ. R. Okajima 
4963f6f48e18SPeter Zijlstra /* NMI context !!! */
4964f6f48e18SPeter Zijlstra static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock, int subclass)
4965f6f48e18SPeter Zijlstra {
4966f6f48e18SPeter Zijlstra #ifdef CONFIG_PROVE_LOCKING
4967f6f48e18SPeter Zijlstra 	struct lock_class *class = look_up_lock_class(lock, subclass);
4968f6f48e18SPeter Zijlstra 
4969f6f48e18SPeter Zijlstra 	/* if it doesn't have a class (yet), it certainly hasn't been used yet */
4970f6f48e18SPeter Zijlstra 	if (!class)
4971f6f48e18SPeter Zijlstra 		return;
4972f6f48e18SPeter Zijlstra 
4973f6f48e18SPeter Zijlstra 	if (!(class->usage_mask & LOCK_USED))
4974f6f48e18SPeter Zijlstra 		return;
4975f6f48e18SPeter Zijlstra 
4976f6f48e18SPeter Zijlstra 	hlock->class_idx = class - lock_classes;
4977f6f48e18SPeter Zijlstra 
4978f6f48e18SPeter Zijlstra 	print_usage_bug(current, hlock, LOCK_USED, LOCK_USAGE_STATES);
4979f6f48e18SPeter Zijlstra #endif
4980f6f48e18SPeter Zijlstra }
4981f6f48e18SPeter Zijlstra 
4982f6f48e18SPeter Zijlstra static bool lockdep_nmi(void)
4983f6f48e18SPeter Zijlstra {
4984f6f48e18SPeter Zijlstra 	if (current->lockdep_recursion & LOCKDEP_RECURSION_MASK)
4985f6f48e18SPeter Zijlstra 		return false;
4986f6f48e18SPeter Zijlstra 
4987f6f48e18SPeter Zijlstra 	if (!in_nmi())
4988f6f48e18SPeter Zijlstra 		return false;
4989f6f48e18SPeter Zijlstra 
4990f6f48e18SPeter Zijlstra 	return true;
4991f6f48e18SPeter Zijlstra }
4992f6f48e18SPeter Zijlstra 
49938eddac3fSPeter Zijlstra /*
4994e9181886SBoqun Feng  * read_lock() is recursive if:
4995e9181886SBoqun Feng  * 1. We force lockdep think this way in selftests or
4996e9181886SBoqun Feng  * 2. The implementation is not queued read/write lock or
4997e9181886SBoqun Feng  * 3. The locker is at an in_interrupt() context.
4998e9181886SBoqun Feng  */
4999e9181886SBoqun Feng bool read_lock_is_recursive(void)
5000e9181886SBoqun Feng {
5001e9181886SBoqun Feng 	return force_read_lock_recursive ||
5002e9181886SBoqun Feng 	       !IS_ENABLED(CONFIG_QUEUED_RWLOCKS) ||
5003e9181886SBoqun Feng 	       in_interrupt();
5004e9181886SBoqun Feng }
5005e9181886SBoqun Feng EXPORT_SYMBOL_GPL(read_lock_is_recursive);
5006e9181886SBoqun Feng 
5007e9181886SBoqun Feng /*
50088eddac3fSPeter Zijlstra  * We are not always called with irqs disabled - do that here,
50098eddac3fSPeter Zijlstra  * and also avoid lockdep recursion:
50108eddac3fSPeter Zijlstra  */
50118eddac3fSPeter Zijlstra void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
50128eddac3fSPeter Zijlstra 			  int trylock, int read, int check,
50138eddac3fSPeter Zijlstra 			  struct lockdep_map *nest_lock, unsigned long ip)
50148eddac3fSPeter Zijlstra {
50158eddac3fSPeter Zijlstra 	unsigned long flags;
50168eddac3fSPeter Zijlstra 
5017eb1f0023SPeter Zijlstra 	trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
5018eb1f0023SPeter Zijlstra 
5019f6f48e18SPeter Zijlstra 	if (unlikely(current->lockdep_recursion)) {
5020f6f48e18SPeter Zijlstra 		/* XXX allow trylock from NMI ?!? */
5021f6f48e18SPeter Zijlstra 		if (lockdep_nmi() && !trylock) {
5022f6f48e18SPeter Zijlstra 			struct held_lock hlock;
5023f6f48e18SPeter Zijlstra 
5024f6f48e18SPeter Zijlstra 			hlock.acquire_ip = ip;
5025f6f48e18SPeter Zijlstra 			hlock.instance = lock;
5026f6f48e18SPeter Zijlstra 			hlock.nest_lock = nest_lock;
5027f6f48e18SPeter Zijlstra 			hlock.irq_context = 2; // XXX
5028f6f48e18SPeter Zijlstra 			hlock.trylock = trylock;
5029f6f48e18SPeter Zijlstra 			hlock.read = read;
5030f6f48e18SPeter Zijlstra 			hlock.check = check;
5031f6f48e18SPeter Zijlstra 			hlock.hardirqs_off = true;
5032f6f48e18SPeter Zijlstra 			hlock.references = 0;
5033f6f48e18SPeter Zijlstra 
5034f6f48e18SPeter Zijlstra 			verify_lock_unused(lock, &hlock, subclass);
5035f6f48e18SPeter Zijlstra 		}
50368eddac3fSPeter Zijlstra 		return;
5037f6f48e18SPeter Zijlstra 	}
50388eddac3fSPeter Zijlstra 
50398eddac3fSPeter Zijlstra 	raw_local_irq_save(flags);
50408eddac3fSPeter Zijlstra 	check_flags(flags);
50418eddac3fSPeter Zijlstra 
504210476e63SPeter Zijlstra 	current->lockdep_recursion++;
50438eddac3fSPeter Zijlstra 	__lock_acquire(lock, subclass, trylock, read, check,
504421199f27SPeter Zijlstra 		       irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
504510476e63SPeter Zijlstra 	lockdep_recursion_finish();
50468eddac3fSPeter Zijlstra 	raw_local_irq_restore(flags);
50478eddac3fSPeter Zijlstra }
50488eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(lock_acquire);
50498eddac3fSPeter Zijlstra 
50505facae4fSQian Cai void lock_release(struct lockdep_map *lock, unsigned long ip)
50518eddac3fSPeter Zijlstra {
50528eddac3fSPeter Zijlstra 	unsigned long flags;
50538eddac3fSPeter Zijlstra 
5054eb1f0023SPeter Zijlstra 	trace_lock_release(lock, ip);
5055eb1f0023SPeter Zijlstra 
50568eddac3fSPeter Zijlstra 	if (unlikely(current->lockdep_recursion))
50578eddac3fSPeter Zijlstra 		return;
50588eddac3fSPeter Zijlstra 
50598eddac3fSPeter Zijlstra 	raw_local_irq_save(flags);
50608eddac3fSPeter Zijlstra 	check_flags(flags);
5061eb1f0023SPeter Zijlstra 
506210476e63SPeter Zijlstra 	current->lockdep_recursion++;
5063b4adfe8eSYuyang Du 	if (__lock_release(lock, ip))
5064e0f56fd7SPeter Zijlstra 		check_chain_key(current);
506510476e63SPeter Zijlstra 	lockdep_recursion_finish();
50668eddac3fSPeter Zijlstra 	raw_local_irq_restore(flags);
50678eddac3fSPeter Zijlstra }
50688eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(lock_release);
50698eddac3fSPeter Zijlstra 
5070c86e9b98SPeter Zijlstra noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
50718eddac3fSPeter Zijlstra {
50728eddac3fSPeter Zijlstra 	unsigned long flags;
50738eddac3fSPeter Zijlstra 	int ret = 0;
50748eddac3fSPeter Zijlstra 
50758eddac3fSPeter Zijlstra 	if (unlikely(current->lockdep_recursion))
50768eddac3fSPeter Zijlstra 		return 1; /* avoid false negative lockdep_assert_held() */
50778eddac3fSPeter Zijlstra 
50788eddac3fSPeter Zijlstra 	raw_local_irq_save(flags);
50798eddac3fSPeter Zijlstra 	check_flags(flags);
50808eddac3fSPeter Zijlstra 
508110476e63SPeter Zijlstra 	current->lockdep_recursion++;
5082f8319483SPeter Zijlstra 	ret = __lock_is_held(lock, read);
508310476e63SPeter Zijlstra 	lockdep_recursion_finish();
50848eddac3fSPeter Zijlstra 	raw_local_irq_restore(flags);
50858eddac3fSPeter Zijlstra 
50868eddac3fSPeter Zijlstra 	return ret;
50878eddac3fSPeter Zijlstra }
5088f8319483SPeter Zijlstra EXPORT_SYMBOL_GPL(lock_is_held_type);
50892f43c602SMasami Hiramatsu NOKPROBE_SYMBOL(lock_is_held_type);
50908eddac3fSPeter Zijlstra 
5091e7904a28SPeter Zijlstra struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
5092a24fc60dSPeter Zijlstra {
5093e7904a28SPeter Zijlstra 	struct pin_cookie cookie = NIL_COOKIE;
5094a24fc60dSPeter Zijlstra 	unsigned long flags;
5095a24fc60dSPeter Zijlstra 
5096a24fc60dSPeter Zijlstra 	if (unlikely(current->lockdep_recursion))
5097e7904a28SPeter Zijlstra 		return cookie;
5098a24fc60dSPeter Zijlstra 
5099a24fc60dSPeter Zijlstra 	raw_local_irq_save(flags);
5100a24fc60dSPeter Zijlstra 	check_flags(flags);
5101a24fc60dSPeter Zijlstra 
510210476e63SPeter Zijlstra 	current->lockdep_recursion++;
5103e7904a28SPeter Zijlstra 	cookie = __lock_pin_lock(lock);
510410476e63SPeter Zijlstra 	lockdep_recursion_finish();
5105a24fc60dSPeter Zijlstra 	raw_local_irq_restore(flags);
5106e7904a28SPeter Zijlstra 
5107e7904a28SPeter Zijlstra 	return cookie;
5108a24fc60dSPeter Zijlstra }
5109a24fc60dSPeter Zijlstra EXPORT_SYMBOL_GPL(lock_pin_lock);
5110a24fc60dSPeter Zijlstra 
5111e7904a28SPeter Zijlstra void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5112a24fc60dSPeter Zijlstra {
5113a24fc60dSPeter Zijlstra 	unsigned long flags;
5114a24fc60dSPeter Zijlstra 
5115a24fc60dSPeter Zijlstra 	if (unlikely(current->lockdep_recursion))
5116a24fc60dSPeter Zijlstra 		return;
5117a24fc60dSPeter Zijlstra 
5118a24fc60dSPeter Zijlstra 	raw_local_irq_save(flags);
5119a24fc60dSPeter Zijlstra 	check_flags(flags);
5120a24fc60dSPeter Zijlstra 
512110476e63SPeter Zijlstra 	current->lockdep_recursion++;
5122e7904a28SPeter Zijlstra 	__lock_repin_lock(lock, cookie);
512310476e63SPeter Zijlstra 	lockdep_recursion_finish();
5124e7904a28SPeter Zijlstra 	raw_local_irq_restore(flags);
5125e7904a28SPeter Zijlstra }
5126e7904a28SPeter Zijlstra EXPORT_SYMBOL_GPL(lock_repin_lock);
5127e7904a28SPeter Zijlstra 
5128e7904a28SPeter Zijlstra void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5129e7904a28SPeter Zijlstra {
5130e7904a28SPeter Zijlstra 	unsigned long flags;
5131e7904a28SPeter Zijlstra 
5132e7904a28SPeter Zijlstra 	if (unlikely(current->lockdep_recursion))
5133e7904a28SPeter Zijlstra 		return;
5134e7904a28SPeter Zijlstra 
5135e7904a28SPeter Zijlstra 	raw_local_irq_save(flags);
5136e7904a28SPeter Zijlstra 	check_flags(flags);
5137e7904a28SPeter Zijlstra 
513810476e63SPeter Zijlstra 	current->lockdep_recursion++;
5139e7904a28SPeter Zijlstra 	__lock_unpin_lock(lock, cookie);
514010476e63SPeter Zijlstra 	lockdep_recursion_finish();
5141a24fc60dSPeter Zijlstra 	raw_local_irq_restore(flags);
5142a24fc60dSPeter Zijlstra }
5143a24fc60dSPeter Zijlstra EXPORT_SYMBOL_GPL(lock_unpin_lock);
5144a24fc60dSPeter Zijlstra 
51458eddac3fSPeter Zijlstra #ifdef CONFIG_LOCK_STAT
5146f7c1c6b3SYuyang Du static void print_lock_contention_bug(struct task_struct *curr,
5147f7c1c6b3SYuyang Du 				      struct lockdep_map *lock,
51488eddac3fSPeter Zijlstra 				      unsigned long ip)
51498eddac3fSPeter Zijlstra {
51508eddac3fSPeter Zijlstra 	if (!debug_locks_off())
5151f7c1c6b3SYuyang Du 		return;
51528eddac3fSPeter Zijlstra 	if (debug_locks_silent)
5153f7c1c6b3SYuyang Du 		return;
51548eddac3fSPeter Zijlstra 
5155681fbec8SPaul E. McKenney 	pr_warn("\n");
5156a5dd63efSPaul E. McKenney 	pr_warn("=================================\n");
5157a5dd63efSPaul E. McKenney 	pr_warn("WARNING: bad contention detected!\n");
51588eddac3fSPeter Zijlstra 	print_kernel_ident();
5159a5dd63efSPaul E. McKenney 	pr_warn("---------------------------------\n");
5160681fbec8SPaul E. McKenney 	pr_warn("%s/%d is trying to contend lock (",
51618eddac3fSPeter Zijlstra 		curr->comm, task_pid_nr(curr));
51628eddac3fSPeter Zijlstra 	print_lockdep_cache(lock);
5163681fbec8SPaul E. McKenney 	pr_cont(") at:\n");
51642062a4e8SDmitry Safonov 	print_ip_sym(KERN_WARNING, ip);
5165681fbec8SPaul E. McKenney 	pr_warn("but there are no locks held!\n");
5166681fbec8SPaul E. McKenney 	pr_warn("\nother info that might help us debug this:\n");
51678eddac3fSPeter Zijlstra 	lockdep_print_held_locks(curr);
51688eddac3fSPeter Zijlstra 
5169681fbec8SPaul E. McKenney 	pr_warn("\nstack backtrace:\n");
51708eddac3fSPeter Zijlstra 	dump_stack();
51718eddac3fSPeter Zijlstra }
51728eddac3fSPeter Zijlstra 
51738eddac3fSPeter Zijlstra static void
51748eddac3fSPeter Zijlstra __lock_contended(struct lockdep_map *lock, unsigned long ip)
51758eddac3fSPeter Zijlstra {
51768eddac3fSPeter Zijlstra 	struct task_struct *curr = current;
517741c2c5b8SJ. R. Okajima 	struct held_lock *hlock;
51788eddac3fSPeter Zijlstra 	struct lock_class_stats *stats;
51798eddac3fSPeter Zijlstra 	unsigned int depth;
51808eddac3fSPeter Zijlstra 	int i, contention_point, contending_point;
51818eddac3fSPeter Zijlstra 
51828eddac3fSPeter Zijlstra 	depth = curr->lockdep_depth;
51838eddac3fSPeter Zijlstra 	/*
51848eddac3fSPeter Zijlstra 	 * Whee, we contended on this lock, except it seems we're not
51858eddac3fSPeter Zijlstra 	 * actually trying to acquire anything much at all..
51868eddac3fSPeter Zijlstra 	 */
51878eddac3fSPeter Zijlstra 	if (DEBUG_LOCKS_WARN_ON(!depth))
51888eddac3fSPeter Zijlstra 		return;
51898eddac3fSPeter Zijlstra 
519041c2c5b8SJ. R. Okajima 	hlock = find_held_lock(curr, lock, depth, &i);
519141c2c5b8SJ. R. Okajima 	if (!hlock) {
51928eddac3fSPeter Zijlstra 		print_lock_contention_bug(curr, lock, ip);
51938eddac3fSPeter Zijlstra 		return;
519441c2c5b8SJ. R. Okajima 	}
51958eddac3fSPeter Zijlstra 
51968eddac3fSPeter Zijlstra 	if (hlock->instance != lock)
51978eddac3fSPeter Zijlstra 		return;
51988eddac3fSPeter Zijlstra 
51998eddac3fSPeter Zijlstra 	hlock->waittime_stamp = lockstat_clock();
52008eddac3fSPeter Zijlstra 
52018eddac3fSPeter Zijlstra 	contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
52028eddac3fSPeter Zijlstra 	contending_point = lock_point(hlock_class(hlock)->contending_point,
52038eddac3fSPeter Zijlstra 				      lock->ip);
52048eddac3fSPeter Zijlstra 
52058eddac3fSPeter Zijlstra 	stats = get_lock_stats(hlock_class(hlock));
52068eddac3fSPeter Zijlstra 	if (contention_point < LOCKSTAT_POINTS)
52078eddac3fSPeter Zijlstra 		stats->contention_point[contention_point]++;
52088eddac3fSPeter Zijlstra 	if (contending_point < LOCKSTAT_POINTS)
52098eddac3fSPeter Zijlstra 		stats->contending_point[contending_point]++;
52108eddac3fSPeter Zijlstra 	if (lock->cpu != smp_processor_id())
52118eddac3fSPeter Zijlstra 		stats->bounces[bounce_contended + !!hlock->read]++;
52128eddac3fSPeter Zijlstra }
52138eddac3fSPeter Zijlstra 
52148eddac3fSPeter Zijlstra static void
52158eddac3fSPeter Zijlstra __lock_acquired(struct lockdep_map *lock, unsigned long ip)
52168eddac3fSPeter Zijlstra {
52178eddac3fSPeter Zijlstra 	struct task_struct *curr = current;
521841c2c5b8SJ. R. Okajima 	struct held_lock *hlock;
52198eddac3fSPeter Zijlstra 	struct lock_class_stats *stats;
52208eddac3fSPeter Zijlstra 	unsigned int depth;
52218eddac3fSPeter Zijlstra 	u64 now, waittime = 0;
52228eddac3fSPeter Zijlstra 	int i, cpu;
52238eddac3fSPeter Zijlstra 
52248eddac3fSPeter Zijlstra 	depth = curr->lockdep_depth;
52258eddac3fSPeter Zijlstra 	/*
52268eddac3fSPeter Zijlstra 	 * Yay, we acquired ownership of this lock we didn't try to
52278eddac3fSPeter Zijlstra 	 * acquire, how the heck did that happen?
52288eddac3fSPeter Zijlstra 	 */
52298eddac3fSPeter Zijlstra 	if (DEBUG_LOCKS_WARN_ON(!depth))
52308eddac3fSPeter Zijlstra 		return;
52318eddac3fSPeter Zijlstra 
523241c2c5b8SJ. R. Okajima 	hlock = find_held_lock(curr, lock, depth, &i);
523341c2c5b8SJ. R. Okajima 	if (!hlock) {
52348eddac3fSPeter Zijlstra 		print_lock_contention_bug(curr, lock, _RET_IP_);
52358eddac3fSPeter Zijlstra 		return;
523641c2c5b8SJ. R. Okajima 	}
52378eddac3fSPeter Zijlstra 
52388eddac3fSPeter Zijlstra 	if (hlock->instance != lock)
52398eddac3fSPeter Zijlstra 		return;
52408eddac3fSPeter Zijlstra 
52418eddac3fSPeter Zijlstra 	cpu = smp_processor_id();
52428eddac3fSPeter Zijlstra 	if (hlock->waittime_stamp) {
52438eddac3fSPeter Zijlstra 		now = lockstat_clock();
52448eddac3fSPeter Zijlstra 		waittime = now - hlock->waittime_stamp;
52458eddac3fSPeter Zijlstra 		hlock->holdtime_stamp = now;
52468eddac3fSPeter Zijlstra 	}
52478eddac3fSPeter Zijlstra 
52488eddac3fSPeter Zijlstra 	stats = get_lock_stats(hlock_class(hlock));
52498eddac3fSPeter Zijlstra 	if (waittime) {
52508eddac3fSPeter Zijlstra 		if (hlock->read)
52518eddac3fSPeter Zijlstra 			lock_time_inc(&stats->read_waittime, waittime);
52528eddac3fSPeter Zijlstra 		else
52538eddac3fSPeter Zijlstra 			lock_time_inc(&stats->write_waittime, waittime);
52548eddac3fSPeter Zijlstra 	}
52558eddac3fSPeter Zijlstra 	if (lock->cpu != cpu)
52568eddac3fSPeter Zijlstra 		stats->bounces[bounce_acquired + !!hlock->read]++;
52578eddac3fSPeter Zijlstra 
52588eddac3fSPeter Zijlstra 	lock->cpu = cpu;
52598eddac3fSPeter Zijlstra 	lock->ip = ip;
52608eddac3fSPeter Zijlstra }
52618eddac3fSPeter Zijlstra 
52628eddac3fSPeter Zijlstra void lock_contended(struct lockdep_map *lock, unsigned long ip)
52638eddac3fSPeter Zijlstra {
52648eddac3fSPeter Zijlstra 	unsigned long flags;
52658eddac3fSPeter Zijlstra 
5266eb1f0023SPeter Zijlstra 	trace_lock_acquired(lock, ip);
5267eb1f0023SPeter Zijlstra 
52689506a742SWaiman Long 	if (unlikely(!lock_stat || !debug_locks))
52698eddac3fSPeter Zijlstra 		return;
52708eddac3fSPeter Zijlstra 
52718eddac3fSPeter Zijlstra 	if (unlikely(current->lockdep_recursion))
52728eddac3fSPeter Zijlstra 		return;
52738eddac3fSPeter Zijlstra 
52748eddac3fSPeter Zijlstra 	raw_local_irq_save(flags);
52758eddac3fSPeter Zijlstra 	check_flags(flags);
527610476e63SPeter Zijlstra 	current->lockdep_recursion++;
52778eddac3fSPeter Zijlstra 	__lock_contended(lock, ip);
527810476e63SPeter Zijlstra 	lockdep_recursion_finish();
52798eddac3fSPeter Zijlstra 	raw_local_irq_restore(flags);
52808eddac3fSPeter Zijlstra }
52818eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(lock_contended);
52828eddac3fSPeter Zijlstra 
52838eddac3fSPeter Zijlstra void lock_acquired(struct lockdep_map *lock, unsigned long ip)
52848eddac3fSPeter Zijlstra {
52858eddac3fSPeter Zijlstra 	unsigned long flags;
52868eddac3fSPeter Zijlstra 
5287eb1f0023SPeter Zijlstra 	trace_lock_contended(lock, ip);
5288eb1f0023SPeter Zijlstra 
52899506a742SWaiman Long 	if (unlikely(!lock_stat || !debug_locks))
52908eddac3fSPeter Zijlstra 		return;
52918eddac3fSPeter Zijlstra 
52928eddac3fSPeter Zijlstra 	if (unlikely(current->lockdep_recursion))
52938eddac3fSPeter Zijlstra 		return;
52948eddac3fSPeter Zijlstra 
52958eddac3fSPeter Zijlstra 	raw_local_irq_save(flags);
52968eddac3fSPeter Zijlstra 	check_flags(flags);
529710476e63SPeter Zijlstra 	current->lockdep_recursion++;
52988eddac3fSPeter Zijlstra 	__lock_acquired(lock, ip);
529910476e63SPeter Zijlstra 	lockdep_recursion_finish();
53008eddac3fSPeter Zijlstra 	raw_local_irq_restore(flags);
53018eddac3fSPeter Zijlstra }
53028eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(lock_acquired);
53038eddac3fSPeter Zijlstra #endif
53048eddac3fSPeter Zijlstra 
53058eddac3fSPeter Zijlstra /*
53068eddac3fSPeter Zijlstra  * Used by the testsuite, sanitize the validator state
53078eddac3fSPeter Zijlstra  * after a simulated failure:
53088eddac3fSPeter Zijlstra  */
53098eddac3fSPeter Zijlstra 
53108eddac3fSPeter Zijlstra void lockdep_reset(void)
53118eddac3fSPeter Zijlstra {
53128eddac3fSPeter Zijlstra 	unsigned long flags;
53138eddac3fSPeter Zijlstra 	int i;
53148eddac3fSPeter Zijlstra 
53158eddac3fSPeter Zijlstra 	raw_local_irq_save(flags);
5316e196e479SYuyang Du 	lockdep_init_task(current);
53178eddac3fSPeter Zijlstra 	memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
53188eddac3fSPeter Zijlstra 	nr_hardirq_chains = 0;
53198eddac3fSPeter Zijlstra 	nr_softirq_chains = 0;
53208eddac3fSPeter Zijlstra 	nr_process_chains = 0;
53218eddac3fSPeter Zijlstra 	debug_locks = 1;
53228eddac3fSPeter Zijlstra 	for (i = 0; i < CHAINHASH_SIZE; i++)
5323a63f38ccSAndrew Morton 		INIT_HLIST_HEAD(chainhash_table + i);
53248eddac3fSPeter Zijlstra 	raw_local_irq_restore(flags);
53258eddac3fSPeter Zijlstra }
53268eddac3fSPeter Zijlstra 
5327a0b0fd53SBart Van Assche /* Remove a class from a lock chain. Must be called with the graph lock held. */
5328de4643a7SBart Van Assche static void remove_class_from_lock_chain(struct pending_free *pf,
5329de4643a7SBart Van Assche 					 struct lock_chain *chain,
5330a0b0fd53SBart Van Assche 					 struct lock_class *class)
5331a0b0fd53SBart Van Assche {
5332a0b0fd53SBart Van Assche #ifdef CONFIG_PROVE_LOCKING
5333a0b0fd53SBart Van Assche 	int i;
5334a0b0fd53SBart Van Assche 
5335a0b0fd53SBart Van Assche 	for (i = chain->base; i < chain->base + chain->depth; i++) {
5336a0b0fd53SBart Van Assche 		if (chain_hlocks[i] != class - lock_classes)
5337a0b0fd53SBart Van Assche 			continue;
5338a0b0fd53SBart Van Assche 		/*
5339a0b0fd53SBart Van Assche 		 * Each lock class occurs at most once in a lock chain so once
5340a0b0fd53SBart Van Assche 		 * we found a match we can break out of this loop.
5341a0b0fd53SBart Van Assche 		 */
5342836bd74bSWaiman Long 		goto free_lock_chain;
5343a0b0fd53SBart Van Assche 	}
5344a0b0fd53SBart Van Assche 	/* Since the chain has not been modified, return. */
5345a0b0fd53SBart Van Assche 	return;
5346a0b0fd53SBart Van Assche 
5347836bd74bSWaiman Long free_lock_chain:
5348810507feSWaiman Long 	free_chain_hlocks(chain->base, chain->depth);
5349a0b0fd53SBart Van Assche 	/* Overwrite the chain key for concurrent RCU readers. */
5350836bd74bSWaiman Long 	WRITE_ONCE(chain->chain_key, INITIAL_CHAIN_KEY);
5351b3b9c187SWaiman Long 	dec_chains(chain->irq_context);
5352b3b9c187SWaiman Long 
5353a0b0fd53SBart Van Assche 	/*
5354a0b0fd53SBart Van Assche 	 * Note: calling hlist_del_rcu() from inside a
5355a0b0fd53SBart Van Assche 	 * hlist_for_each_entry_rcu() loop is safe.
5356a0b0fd53SBart Van Assche 	 */
5357a0b0fd53SBart Van Assche 	hlist_del_rcu(&chain->entry);
5358de4643a7SBart Van Assche 	__set_bit(chain - lock_chains, pf->lock_chains_being_freed);
5359797b82ebSWaiman Long 	nr_zapped_lock_chains++;
5360a0b0fd53SBart Van Assche #endif
5361a0b0fd53SBart Van Assche }
5362a0b0fd53SBart Van Assche 
5363a0b0fd53SBart Van Assche /* Must be called with the graph lock held. */
5364de4643a7SBart Van Assche static void remove_class_from_lock_chains(struct pending_free *pf,
5365de4643a7SBart Van Assche 					  struct lock_class *class)
5366a0b0fd53SBart Van Assche {
5367a0b0fd53SBart Van Assche 	struct lock_chain *chain;
5368a0b0fd53SBart Van Assche 	struct hlist_head *head;
5369a0b0fd53SBart Van Assche 	int i;
5370a0b0fd53SBart Van Assche 
5371a0b0fd53SBart Van Assche 	for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
5372a0b0fd53SBart Van Assche 		head = chainhash_table + i;
5373a0b0fd53SBart Van Assche 		hlist_for_each_entry_rcu(chain, head, entry) {
5374de4643a7SBart Van Assche 			remove_class_from_lock_chain(pf, chain, class);
5375a0b0fd53SBart Van Assche 		}
5376a0b0fd53SBart Van Assche 	}
5377a0b0fd53SBart Van Assche }
5378a0b0fd53SBart Van Assche 
5379786fa29eSBart Van Assche /*
5380786fa29eSBart Van Assche  * Remove all references to a lock class. The caller must hold the graph lock.
5381786fa29eSBart Van Assche  */
5382a0b0fd53SBart Van Assche static void zap_class(struct pending_free *pf, struct lock_class *class)
53838eddac3fSPeter Zijlstra {
538486cffb80SBart Van Assche 	struct lock_list *entry;
53858eddac3fSPeter Zijlstra 	int i;
53868eddac3fSPeter Zijlstra 
5387a0b0fd53SBart Van Assche 	WARN_ON_ONCE(!class->key);
5388a0b0fd53SBart Van Assche 
53898eddac3fSPeter Zijlstra 	/*
53908eddac3fSPeter Zijlstra 	 * Remove all dependencies this lock is
53918eddac3fSPeter Zijlstra 	 * involved in:
53928eddac3fSPeter Zijlstra 	 */
5393ace35a7aSBart Van Assche 	for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
5394ace35a7aSBart Van Assche 		entry = list_entries + i;
539586cffb80SBart Van Assche 		if (entry->class != class && entry->links_to != class)
539686cffb80SBart Van Assche 			continue;
5397ace35a7aSBart Van Assche 		__clear_bit(i, list_entries_in_use);
5398ace35a7aSBart Van Assche 		nr_list_entries--;
539986cffb80SBart Van Assche 		list_del_rcu(&entry->entry);
54008eddac3fSPeter Zijlstra 	}
5401a0b0fd53SBart Van Assche 	if (list_empty(&class->locks_after) &&
5402a0b0fd53SBart Van Assche 	    list_empty(&class->locks_before)) {
5403a0b0fd53SBart Van Assche 		list_move_tail(&class->lock_entry, &pf->zapped);
5404a63f38ccSAndrew Morton 		hlist_del_rcu(&class->hash_entry);
5405a0b0fd53SBart Van Assche 		WRITE_ONCE(class->key, NULL);
5406a0b0fd53SBart Van Assche 		WRITE_ONCE(class->name, NULL);
5407a0b0fd53SBart Van Assche 		nr_lock_classes--;
540801bb6f0aSYuyang Du 		__clear_bit(class - lock_classes, lock_classes_in_use);
5409a0b0fd53SBart Van Assche 	} else {
5410a0b0fd53SBart Van Assche 		WARN_ONCE(true, "%s() failed for class %s\n", __func__,
5411a0b0fd53SBart Van Assche 			  class->name);
5412a0b0fd53SBart Van Assche 	}
54138eddac3fSPeter Zijlstra 
5414de4643a7SBart Van Assche 	remove_class_from_lock_chains(pf, class);
54151d44bcb4SWaiman Long 	nr_zapped_classes++;
5416a0b0fd53SBart Van Assche }
5417a0b0fd53SBart Van Assche 
5418a0b0fd53SBart Van Assche static void reinit_class(struct lock_class *class)
5419a0b0fd53SBart Van Assche {
5420a0b0fd53SBart Van Assche 	void *const p = class;
5421a0b0fd53SBart Van Assche 	const unsigned int offset = offsetof(struct lock_class, key);
5422a0b0fd53SBart Van Assche 
5423a0b0fd53SBart Van Assche 	WARN_ON_ONCE(!class->lock_entry.next);
5424a0b0fd53SBart Van Assche 	WARN_ON_ONCE(!list_empty(&class->locks_after));
5425a0b0fd53SBart Van Assche 	WARN_ON_ONCE(!list_empty(&class->locks_before));
5426a0b0fd53SBart Van Assche 	memset(p + offset, 0, sizeof(*class) - offset);
5427a0b0fd53SBart Van Assche 	WARN_ON_ONCE(!class->lock_entry.next);
5428a0b0fd53SBart Van Assche 	WARN_ON_ONCE(!list_empty(&class->locks_after));
5429a0b0fd53SBart Van Assche 	WARN_ON_ONCE(!list_empty(&class->locks_before));
54308eddac3fSPeter Zijlstra }
54318eddac3fSPeter Zijlstra 
54328eddac3fSPeter Zijlstra static inline int within(const void *addr, void *start, unsigned long size)
54338eddac3fSPeter Zijlstra {
54348eddac3fSPeter Zijlstra 	return addr >= start && addr < start + size;
54358eddac3fSPeter Zijlstra }
54368eddac3fSPeter Zijlstra 
5437a0b0fd53SBart Van Assche static bool inside_selftest(void)
5438a0b0fd53SBart Van Assche {
5439a0b0fd53SBart Van Assche 	return current == lockdep_selftest_task_struct;
5440a0b0fd53SBart Van Assche }
5441a0b0fd53SBart Van Assche 
5442a0b0fd53SBart Van Assche /* The caller must hold the graph lock. */
5443a0b0fd53SBart Van Assche static struct pending_free *get_pending_free(void)
5444a0b0fd53SBart Van Assche {
5445a0b0fd53SBart Van Assche 	return delayed_free.pf + delayed_free.index;
5446a0b0fd53SBart Van Assche }
5447a0b0fd53SBart Van Assche 
5448a0b0fd53SBart Van Assche static void free_zapped_rcu(struct rcu_head *cb);
5449a0b0fd53SBart Van Assche 
5450a0b0fd53SBart Van Assche /*
5451a0b0fd53SBart Van Assche  * Schedule an RCU callback if no RCU callback is pending. Must be called with
5452a0b0fd53SBart Van Assche  * the graph lock held.
5453a0b0fd53SBart Van Assche  */
5454a0b0fd53SBart Van Assche static void call_rcu_zapped(struct pending_free *pf)
5455a0b0fd53SBart Van Assche {
5456a0b0fd53SBart Van Assche 	WARN_ON_ONCE(inside_selftest());
5457a0b0fd53SBart Van Assche 
5458a0b0fd53SBart Van Assche 	if (list_empty(&pf->zapped))
5459a0b0fd53SBart Van Assche 		return;
5460a0b0fd53SBart Van Assche 
5461a0b0fd53SBart Van Assche 	if (delayed_free.scheduled)
5462a0b0fd53SBart Van Assche 		return;
5463a0b0fd53SBart Van Assche 
5464a0b0fd53SBart Van Assche 	delayed_free.scheduled = true;
5465a0b0fd53SBart Van Assche 
5466a0b0fd53SBart Van Assche 	WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
5467a0b0fd53SBart Van Assche 	delayed_free.index ^= 1;
5468a0b0fd53SBart Van Assche 
5469a0b0fd53SBart Van Assche 	call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
5470a0b0fd53SBart Van Assche }
5471a0b0fd53SBart Van Assche 
5472a0b0fd53SBart Van Assche /* The caller must hold the graph lock. May be called from RCU context. */
5473a0b0fd53SBart Van Assche static void __free_zapped_classes(struct pending_free *pf)
5474a0b0fd53SBart Van Assche {
5475a0b0fd53SBart Van Assche 	struct lock_class *class;
5476a0b0fd53SBart Van Assche 
547772dcd505SPeter Zijlstra 	check_data_structures();
5478b526b2e3SBart Van Assche 
5479a0b0fd53SBart Van Assche 	list_for_each_entry(class, &pf->zapped, lock_entry)
5480a0b0fd53SBart Van Assche 		reinit_class(class);
5481a0b0fd53SBart Van Assche 
5482a0b0fd53SBart Van Assche 	list_splice_init(&pf->zapped, &free_lock_classes);
5483de4643a7SBart Van Assche 
5484de4643a7SBart Van Assche #ifdef CONFIG_PROVE_LOCKING
5485de4643a7SBart Van Assche 	bitmap_andnot(lock_chains_in_use, lock_chains_in_use,
5486de4643a7SBart Van Assche 		      pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains));
5487de4643a7SBart Van Assche 	bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains));
5488de4643a7SBart Van Assche #endif
5489a0b0fd53SBart Van Assche }
5490a0b0fd53SBart Van Assche 
5491a0b0fd53SBart Van Assche static void free_zapped_rcu(struct rcu_head *ch)
5492a0b0fd53SBart Van Assche {
5493a0b0fd53SBart Van Assche 	struct pending_free *pf;
5494a0b0fd53SBart Van Assche 	unsigned long flags;
5495a0b0fd53SBart Van Assche 
5496a0b0fd53SBart Van Assche 	if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
5497a0b0fd53SBart Van Assche 		return;
5498a0b0fd53SBart Van Assche 
5499a0b0fd53SBart Van Assche 	raw_local_irq_save(flags);
5500248efb21SPeter Zijlstra 	lockdep_lock();
5501a0b0fd53SBart Van Assche 
5502a0b0fd53SBart Van Assche 	/* closed head */
5503a0b0fd53SBart Van Assche 	pf = delayed_free.pf + (delayed_free.index ^ 1);
5504a0b0fd53SBart Van Assche 	__free_zapped_classes(pf);
5505a0b0fd53SBart Van Assche 	delayed_free.scheduled = false;
5506a0b0fd53SBart Van Assche 
5507a0b0fd53SBart Van Assche 	/*
5508a0b0fd53SBart Van Assche 	 * If there's anything on the open list, close and start a new callback.
5509a0b0fd53SBart Van Assche 	 */
5510a0b0fd53SBart Van Assche 	call_rcu_zapped(delayed_free.pf + delayed_free.index);
5511a0b0fd53SBart Van Assche 
5512248efb21SPeter Zijlstra 	lockdep_unlock();
5513a0b0fd53SBart Van Assche 	raw_local_irq_restore(flags);
5514a0b0fd53SBart Van Assche }
5515a0b0fd53SBart Van Assche 
5516a0b0fd53SBart Van Assche /*
5517a0b0fd53SBart Van Assche  * Remove all lock classes from the class hash table and from the
5518a0b0fd53SBart Van Assche  * all_lock_classes list whose key or name is in the address range [start,
5519a0b0fd53SBart Van Assche  * start + size). Move these lock classes to the zapped_classes list. Must
5520a0b0fd53SBart Van Assche  * be called with the graph lock held.
5521a0b0fd53SBart Van Assche  */
5522a0b0fd53SBart Van Assche static void __lockdep_free_key_range(struct pending_free *pf, void *start,
5523a0b0fd53SBart Van Assche 				     unsigned long size)
5524956f3563SBart Van Assche {
5525956f3563SBart Van Assche 	struct lock_class *class;
5526956f3563SBart Van Assche 	struct hlist_head *head;
5527956f3563SBart Van Assche 	int i;
5528956f3563SBart Van Assche 
5529956f3563SBart Van Assche 	/* Unhash all classes that were created by a module. */
5530956f3563SBart Van Assche 	for (i = 0; i < CLASSHASH_SIZE; i++) {
5531956f3563SBart Van Assche 		head = classhash_table + i;
5532956f3563SBart Van Assche 		hlist_for_each_entry_rcu(class, head, hash_entry) {
5533956f3563SBart Van Assche 			if (!within(class->key, start, size) &&
5534956f3563SBart Van Assche 			    !within(class->name, start, size))
5535956f3563SBart Van Assche 				continue;
5536a0b0fd53SBart Van Assche 			zap_class(pf, class);
5537956f3563SBart Van Assche 		}
5538956f3563SBart Van Assche 	}
5539956f3563SBart Van Assche }
5540956f3563SBart Van Assche 
554135a9393cSPeter Zijlstra /*
554235a9393cSPeter Zijlstra  * Used in module.c to remove lock classes from memory that is going to be
554335a9393cSPeter Zijlstra  * freed; and possibly re-used by other modules.
554435a9393cSPeter Zijlstra  *
554529fc33fbSBart Van Assche  * We will have had one synchronize_rcu() before getting here, so we're
554629fc33fbSBart Van Assche  * guaranteed nobody will look up these exact classes -- they're properly dead
554729fc33fbSBart Van Assche  * but still allocated.
554835a9393cSPeter Zijlstra  */
5549a0b0fd53SBart Van Assche static void lockdep_free_key_range_reg(void *start, unsigned long size)
55508eddac3fSPeter Zijlstra {
5551a0b0fd53SBart Van Assche 	struct pending_free *pf;
55528eddac3fSPeter Zijlstra 	unsigned long flags;
55538eddac3fSPeter Zijlstra 
5554feb0a386SBart Van Assche 	init_data_structures_once();
5555feb0a386SBart Van Assche 
55568eddac3fSPeter Zijlstra 	raw_local_irq_save(flags);
5557248efb21SPeter Zijlstra 	lockdep_lock();
5558a0b0fd53SBart Van Assche 	pf = get_pending_free();
5559a0b0fd53SBart Van Assche 	__lockdep_free_key_range(pf, start, size);
5560a0b0fd53SBart Van Assche 	call_rcu_zapped(pf);
5561248efb21SPeter Zijlstra 	lockdep_unlock();
55628eddac3fSPeter Zijlstra 	raw_local_irq_restore(flags);
556335a9393cSPeter Zijlstra 
556435a9393cSPeter Zijlstra 	/*
556535a9393cSPeter Zijlstra 	 * Wait for any possible iterators from look_up_lock_class() to pass
556635a9393cSPeter Zijlstra 	 * before continuing to free the memory they refer to.
556735a9393cSPeter Zijlstra 	 */
556851959d85SPaul E. McKenney 	synchronize_rcu();
5569a0b0fd53SBart Van Assche }
557035a9393cSPeter Zijlstra 
557135a9393cSPeter Zijlstra /*
5572a0b0fd53SBart Van Assche  * Free all lockdep keys in the range [start, start+size). Does not sleep.
5573a0b0fd53SBart Van Assche  * Ignores debug_locks. Must only be used by the lockdep selftests.
557435a9393cSPeter Zijlstra  */
5575a0b0fd53SBart Van Assche static void lockdep_free_key_range_imm(void *start, unsigned long size)
5576a0b0fd53SBart Van Assche {
5577a0b0fd53SBart Van Assche 	struct pending_free *pf = delayed_free.pf;
5578a0b0fd53SBart Van Assche 	unsigned long flags;
5579a0b0fd53SBart Van Assche 
5580a0b0fd53SBart Van Assche 	init_data_structures_once();
5581a0b0fd53SBart Van Assche 
5582a0b0fd53SBart Van Assche 	raw_local_irq_save(flags);
5583248efb21SPeter Zijlstra 	lockdep_lock();
5584a0b0fd53SBart Van Assche 	__lockdep_free_key_range(pf, start, size);
5585a0b0fd53SBart Van Assche 	__free_zapped_classes(pf);
5586248efb21SPeter Zijlstra 	lockdep_unlock();
5587a0b0fd53SBart Van Assche 	raw_local_irq_restore(flags);
5588a0b0fd53SBart Van Assche }
5589a0b0fd53SBart Van Assche 
5590a0b0fd53SBart Van Assche void lockdep_free_key_range(void *start, unsigned long size)
5591a0b0fd53SBart Van Assche {
5592a0b0fd53SBart Van Assche 	init_data_structures_once();
5593a0b0fd53SBart Van Assche 
5594a0b0fd53SBart Van Assche 	if (inside_selftest())
5595a0b0fd53SBart Van Assche 		lockdep_free_key_range_imm(start, size);
5596a0b0fd53SBart Van Assche 	else
5597a0b0fd53SBart Van Assche 		lockdep_free_key_range_reg(start, size);
55988eddac3fSPeter Zijlstra }
55998eddac3fSPeter Zijlstra 
56002904d9faSBart Van Assche /*
56012904d9faSBart Van Assche  * Check whether any element of the @lock->class_cache[] array refers to a
56022904d9faSBart Van Assche  * registered lock class. The caller must hold either the graph lock or the
56032904d9faSBart Van Assche  * RCU read lock.
56042904d9faSBart Van Assche  */
56052904d9faSBart Van Assche static bool lock_class_cache_is_registered(struct lockdep_map *lock)
56068eddac3fSPeter Zijlstra {
560735a9393cSPeter Zijlstra 	struct lock_class *class;
5608a63f38ccSAndrew Morton 	struct hlist_head *head;
56098eddac3fSPeter Zijlstra 	int i, j;
56102904d9faSBart Van Assche 
56112904d9faSBart Van Assche 	for (i = 0; i < CLASSHASH_SIZE; i++) {
56122904d9faSBart Van Assche 		head = classhash_table + i;
56132904d9faSBart Van Assche 		hlist_for_each_entry_rcu(class, head, hash_entry) {
56142904d9faSBart Van Assche 			for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
56152904d9faSBart Van Assche 				if (lock->class_cache[j] == class)
56162904d9faSBart Van Assche 					return true;
56172904d9faSBart Van Assche 		}
56182904d9faSBart Van Assche 	}
56192904d9faSBart Van Assche 	return false;
56202904d9faSBart Van Assche }
56212904d9faSBart Van Assche 
5622956f3563SBart Van Assche /* The caller must hold the graph lock. Does not sleep. */
5623a0b0fd53SBart Van Assche static void __lockdep_reset_lock(struct pending_free *pf,
5624a0b0fd53SBart Van Assche 				 struct lockdep_map *lock)
56252904d9faSBart Van Assche {
56262904d9faSBart Van Assche 	struct lock_class *class;
5627956f3563SBart Van Assche 	int j;
56288eddac3fSPeter Zijlstra 
56298eddac3fSPeter Zijlstra 	/*
56308eddac3fSPeter Zijlstra 	 * Remove all classes this lock might have:
56318eddac3fSPeter Zijlstra 	 */
56328eddac3fSPeter Zijlstra 	for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
56338eddac3fSPeter Zijlstra 		/*
56348eddac3fSPeter Zijlstra 		 * If the class exists we look it up and zap it:
56358eddac3fSPeter Zijlstra 		 */
56368eddac3fSPeter Zijlstra 		class = look_up_lock_class(lock, j);
563764f29d1bSMatthew Wilcox 		if (class)
5638a0b0fd53SBart Van Assche 			zap_class(pf, class);
56398eddac3fSPeter Zijlstra 	}
56408eddac3fSPeter Zijlstra 	/*
56418eddac3fSPeter Zijlstra 	 * Debug check: in the end all mapped classes should
56428eddac3fSPeter Zijlstra 	 * be gone.
56438eddac3fSPeter Zijlstra 	 */
5644956f3563SBart Van Assche 	if (WARN_ON_ONCE(lock_class_cache_is_registered(lock)))
5645956f3563SBart Van Assche 		debug_locks_off();
56468eddac3fSPeter Zijlstra }
5647956f3563SBart Van Assche 
5648a0b0fd53SBart Van Assche /*
5649a0b0fd53SBart Van Assche  * Remove all information lockdep has about a lock if debug_locks == 1. Free
5650a0b0fd53SBart Van Assche  * released data structures from RCU context.
5651a0b0fd53SBart Van Assche  */
5652a0b0fd53SBart Van Assche static void lockdep_reset_lock_reg(struct lockdep_map *lock)
5653956f3563SBart Van Assche {
5654a0b0fd53SBart Van Assche 	struct pending_free *pf;
5655956f3563SBart Van Assche 	unsigned long flags;
5656956f3563SBart Van Assche 	int locked;
5657956f3563SBart Van Assche 
5658956f3563SBart Van Assche 	raw_local_irq_save(flags);
5659956f3563SBart Van Assche 	locked = graph_lock();
5660a0b0fd53SBart Van Assche 	if (!locked)
5661a0b0fd53SBart Van Assche 		goto out_irq;
5662a0b0fd53SBart Van Assche 
5663a0b0fd53SBart Van Assche 	pf = get_pending_free();
5664a0b0fd53SBart Van Assche 	__lockdep_reset_lock(pf, lock);
5665a0b0fd53SBart Van Assche 	call_rcu_zapped(pf);
5666a0b0fd53SBart Van Assche 
56678eddac3fSPeter Zijlstra 	graph_unlock();
5668a0b0fd53SBart Van Assche out_irq:
56698eddac3fSPeter Zijlstra 	raw_local_irq_restore(flags);
56708eddac3fSPeter Zijlstra }
56718eddac3fSPeter Zijlstra 
5672a0b0fd53SBart Van Assche /*
5673a0b0fd53SBart Van Assche  * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the
5674a0b0fd53SBart Van Assche  * lockdep selftests.
5675a0b0fd53SBart Van Assche  */
5676a0b0fd53SBart Van Assche static void lockdep_reset_lock_imm(struct lockdep_map *lock)
5677a0b0fd53SBart Van Assche {
5678a0b0fd53SBart Van Assche 	struct pending_free *pf = delayed_free.pf;
5679a0b0fd53SBart Van Assche 	unsigned long flags;
5680a0b0fd53SBart Van Assche 
5681a0b0fd53SBart Van Assche 	raw_local_irq_save(flags);
5682248efb21SPeter Zijlstra 	lockdep_lock();
5683a0b0fd53SBart Van Assche 	__lockdep_reset_lock(pf, lock);
5684a0b0fd53SBart Van Assche 	__free_zapped_classes(pf);
5685248efb21SPeter Zijlstra 	lockdep_unlock();
5686a0b0fd53SBart Van Assche 	raw_local_irq_restore(flags);
5687a0b0fd53SBart Van Assche }
5688a0b0fd53SBart Van Assche 
5689a0b0fd53SBart Van Assche void lockdep_reset_lock(struct lockdep_map *lock)
5690a0b0fd53SBart Van Assche {
5691a0b0fd53SBart Van Assche 	init_data_structures_once();
5692a0b0fd53SBart Van Assche 
5693a0b0fd53SBart Van Assche 	if (inside_selftest())
5694a0b0fd53SBart Van Assche 		lockdep_reset_lock_imm(lock);
5695a0b0fd53SBart Van Assche 	else
5696a0b0fd53SBart Van Assche 		lockdep_reset_lock_reg(lock);
5697a0b0fd53SBart Van Assche }
5698a0b0fd53SBart Van Assche 
5699108c1485SBart Van Assche /* Unregister a dynamically allocated key. */
5700108c1485SBart Van Assche void lockdep_unregister_key(struct lock_class_key *key)
5701108c1485SBart Van Assche {
5702108c1485SBart Van Assche 	struct hlist_head *hash_head = keyhashentry(key);
5703108c1485SBart Van Assche 	struct lock_class_key *k;
5704108c1485SBart Van Assche 	struct pending_free *pf;
5705108c1485SBart Van Assche 	unsigned long flags;
5706108c1485SBart Van Assche 	bool found = false;
5707108c1485SBart Van Assche 
5708108c1485SBart Van Assche 	might_sleep();
5709108c1485SBart Van Assche 
5710108c1485SBart Van Assche 	if (WARN_ON_ONCE(static_obj(key)))
5711108c1485SBart Van Assche 		return;
5712108c1485SBart Van Assche 
5713108c1485SBart Van Assche 	raw_local_irq_save(flags);
57148b39adbeSBart Van Assche 	if (!graph_lock())
57158b39adbeSBart Van Assche 		goto out_irq;
57168b39adbeSBart Van Assche 
5717108c1485SBart Van Assche 	pf = get_pending_free();
5718108c1485SBart Van Assche 	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
5719108c1485SBart Van Assche 		if (k == key) {
5720108c1485SBart Van Assche 			hlist_del_rcu(&k->hash_entry);
5721108c1485SBart Van Assche 			found = true;
5722108c1485SBart Van Assche 			break;
5723108c1485SBart Van Assche 		}
5724108c1485SBart Van Assche 	}
5725108c1485SBart Van Assche 	WARN_ON_ONCE(!found);
5726108c1485SBart Van Assche 	__lockdep_free_key_range(pf, key, 1);
5727108c1485SBart Van Assche 	call_rcu_zapped(pf);
57288b39adbeSBart Van Assche 	graph_unlock();
57298b39adbeSBart Van Assche out_irq:
5730108c1485SBart Van Assche 	raw_local_irq_restore(flags);
5731108c1485SBart Van Assche 
5732108c1485SBart Van Assche 	/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
5733108c1485SBart Van Assche 	synchronize_rcu();
5734108c1485SBart Van Assche }
5735108c1485SBart Van Assche EXPORT_SYMBOL_GPL(lockdep_unregister_key);
5736108c1485SBart Van Assche 
5737c3bc8fd6SJoel Fernandes (Google) void __init lockdep_init(void)
57388eddac3fSPeter Zijlstra {
57398eddac3fSPeter Zijlstra 	printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
57408eddac3fSPeter Zijlstra 
57418eddac3fSPeter Zijlstra 	printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
57428eddac3fSPeter Zijlstra 	printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
57438eddac3fSPeter Zijlstra 	printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
57448eddac3fSPeter Zijlstra 	printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
57458eddac3fSPeter Zijlstra 	printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
57468eddac3fSPeter Zijlstra 	printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
57478eddac3fSPeter Zijlstra 	printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
57488eddac3fSPeter Zijlstra 
574909d75ecbSBart Van Assche 	printk(" memory used by lock dependency info: %zu kB\n",
57507ff8517eSBart Van Assche 	       (sizeof(lock_classes) +
575101bb6f0aSYuyang Du 		sizeof(lock_classes_in_use) +
57527ff8517eSBart Van Assche 		sizeof(classhash_table) +
57537ff8517eSBart Van Assche 		sizeof(list_entries) +
5754ace35a7aSBart Van Assche 		sizeof(list_entries_in_use) +
5755a0b0fd53SBart Van Assche 		sizeof(chainhash_table) +
5756a0b0fd53SBart Van Assche 		sizeof(delayed_free)
57578eddac3fSPeter Zijlstra #ifdef CONFIG_PROVE_LOCKING
57587ff8517eSBart Van Assche 		+ sizeof(lock_cq)
575915ea86b5SBart Van Assche 		+ sizeof(lock_chains)
5760de4643a7SBart Van Assche 		+ sizeof(lock_chains_in_use)
576115ea86b5SBart Van Assche 		+ sizeof(chain_hlocks)
57628eddac3fSPeter Zijlstra #endif
57638eddac3fSPeter Zijlstra 		) / 1024
57648eddac3fSPeter Zijlstra 		);
57658eddac3fSPeter Zijlstra 
576612593b74SBart Van Assche #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
576712593b74SBart Van Assche 	printk(" memory used for stack traces: %zu kB\n",
576812593b74SBart Van Assche 	       (sizeof(stack_trace) + sizeof(stack_trace_hash)) / 1024
576912593b74SBart Van Assche 	       );
577012593b74SBart Van Assche #endif
577112593b74SBart Van Assche 
577209d75ecbSBart Van Assche 	printk(" per task-struct memory footprint: %zu bytes\n",
57737ff8517eSBart Van Assche 	       sizeof(((struct task_struct *)NULL)->held_locks));
57748eddac3fSPeter Zijlstra }
57758eddac3fSPeter Zijlstra 
57768eddac3fSPeter Zijlstra static void
57778eddac3fSPeter Zijlstra print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
57788eddac3fSPeter Zijlstra 		     const void *mem_to, struct held_lock *hlock)
57798eddac3fSPeter Zijlstra {
57808eddac3fSPeter Zijlstra 	if (!debug_locks_off())
57818eddac3fSPeter Zijlstra 		return;
57828eddac3fSPeter Zijlstra 	if (debug_locks_silent)
57838eddac3fSPeter Zijlstra 		return;
57848eddac3fSPeter Zijlstra 
5785681fbec8SPaul E. McKenney 	pr_warn("\n");
5786a5dd63efSPaul E. McKenney 	pr_warn("=========================\n");
5787a5dd63efSPaul E. McKenney 	pr_warn("WARNING: held lock freed!\n");
57888eddac3fSPeter Zijlstra 	print_kernel_ident();
5789a5dd63efSPaul E. McKenney 	pr_warn("-------------------------\n");
579004860d48SBorislav Petkov 	pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
57918eddac3fSPeter Zijlstra 		curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
57928eddac3fSPeter Zijlstra 	print_lock(hlock);
57938eddac3fSPeter Zijlstra 	lockdep_print_held_locks(curr);
57948eddac3fSPeter Zijlstra 
5795681fbec8SPaul E. McKenney 	pr_warn("\nstack backtrace:\n");
57968eddac3fSPeter Zijlstra 	dump_stack();
57978eddac3fSPeter Zijlstra }
57988eddac3fSPeter Zijlstra 
57998eddac3fSPeter Zijlstra static inline int not_in_range(const void* mem_from, unsigned long mem_len,
58008eddac3fSPeter Zijlstra 				const void* lock_from, unsigned long lock_len)
58018eddac3fSPeter Zijlstra {
58028eddac3fSPeter Zijlstra 	return lock_from + lock_len <= mem_from ||
58038eddac3fSPeter Zijlstra 		mem_from + mem_len <= lock_from;
58048eddac3fSPeter Zijlstra }
58058eddac3fSPeter Zijlstra 
58068eddac3fSPeter Zijlstra /*
58078eddac3fSPeter Zijlstra  * Called when kernel memory is freed (or unmapped), or if a lock
58088eddac3fSPeter Zijlstra  * is destroyed or reinitialized - this code checks whether there is
58098eddac3fSPeter Zijlstra  * any held lock in the memory range of <from> to <to>:
58108eddac3fSPeter Zijlstra  */
58118eddac3fSPeter Zijlstra void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
58128eddac3fSPeter Zijlstra {
58138eddac3fSPeter Zijlstra 	struct task_struct *curr = current;
58148eddac3fSPeter Zijlstra 	struct held_lock *hlock;
58158eddac3fSPeter Zijlstra 	unsigned long flags;
58168eddac3fSPeter Zijlstra 	int i;
58178eddac3fSPeter Zijlstra 
58188eddac3fSPeter Zijlstra 	if (unlikely(!debug_locks))
58198eddac3fSPeter Zijlstra 		return;
58208eddac3fSPeter Zijlstra 
5821fcc784beSSteven Rostedt (VMware) 	raw_local_irq_save(flags);
58228eddac3fSPeter Zijlstra 	for (i = 0; i < curr->lockdep_depth; i++) {
58238eddac3fSPeter Zijlstra 		hlock = curr->held_locks + i;
58248eddac3fSPeter Zijlstra 
58258eddac3fSPeter Zijlstra 		if (not_in_range(mem_from, mem_len, hlock->instance,
58268eddac3fSPeter Zijlstra 					sizeof(*hlock->instance)))
58278eddac3fSPeter Zijlstra 			continue;
58288eddac3fSPeter Zijlstra 
58298eddac3fSPeter Zijlstra 		print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
58308eddac3fSPeter Zijlstra 		break;
58318eddac3fSPeter Zijlstra 	}
5832fcc784beSSteven Rostedt (VMware) 	raw_local_irq_restore(flags);
58338eddac3fSPeter Zijlstra }
58348eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
58358eddac3fSPeter Zijlstra 
58368eddac3fSPeter Zijlstra static void print_held_locks_bug(void)
58378eddac3fSPeter Zijlstra {
58388eddac3fSPeter Zijlstra 	if (!debug_locks_off())
58398eddac3fSPeter Zijlstra 		return;
58408eddac3fSPeter Zijlstra 	if (debug_locks_silent)
58418eddac3fSPeter Zijlstra 		return;
58428eddac3fSPeter Zijlstra 
5843681fbec8SPaul E. McKenney 	pr_warn("\n");
5844a5dd63efSPaul E. McKenney 	pr_warn("====================================\n");
5845a5dd63efSPaul E. McKenney 	pr_warn("WARNING: %s/%d still has locks held!\n",
58468eddac3fSPeter Zijlstra 	       current->comm, task_pid_nr(current));
58478eddac3fSPeter Zijlstra 	print_kernel_ident();
5848a5dd63efSPaul E. McKenney 	pr_warn("------------------------------------\n");
58498eddac3fSPeter Zijlstra 	lockdep_print_held_locks(current);
5850681fbec8SPaul E. McKenney 	pr_warn("\nstack backtrace:\n");
58518eddac3fSPeter Zijlstra 	dump_stack();
58528eddac3fSPeter Zijlstra }
58538eddac3fSPeter Zijlstra 
58548eddac3fSPeter Zijlstra void debug_check_no_locks_held(void)
58558eddac3fSPeter Zijlstra {
58568eddac3fSPeter Zijlstra 	if (unlikely(current->lockdep_depth > 0))
58578eddac3fSPeter Zijlstra 		print_held_locks_bug();
58588eddac3fSPeter Zijlstra }
58598eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
58608eddac3fSPeter Zijlstra 
58618dce7a9aSSasha Levin #ifdef __KERNEL__
58628eddac3fSPeter Zijlstra void debug_show_all_locks(void)
58638eddac3fSPeter Zijlstra {
58648eddac3fSPeter Zijlstra 	struct task_struct *g, *p;
58658eddac3fSPeter Zijlstra 
58668eddac3fSPeter Zijlstra 	if (unlikely(!debug_locks)) {
5867681fbec8SPaul E. McKenney 		pr_warn("INFO: lockdep is turned off.\n");
58688eddac3fSPeter Zijlstra 		return;
58698eddac3fSPeter Zijlstra 	}
5870681fbec8SPaul E. McKenney 	pr_warn("\nShowing all locks held in the system:\n");
58718eddac3fSPeter Zijlstra 
58720f736a52STetsuo Handa 	rcu_read_lock();
58730f736a52STetsuo Handa 	for_each_process_thread(g, p) {
58740f736a52STetsuo Handa 		if (!p->lockdep_depth)
58750f736a52STetsuo Handa 			continue;
58768eddac3fSPeter Zijlstra 		lockdep_print_held_locks(p);
587788f1c87dSTejun Heo 		touch_nmi_watchdog();
58780f736a52STetsuo Handa 		touch_all_softlockup_watchdogs();
58790f736a52STetsuo Handa 	}
58800f736a52STetsuo Handa 	rcu_read_unlock();
58818eddac3fSPeter Zijlstra 
5882681fbec8SPaul E. McKenney 	pr_warn("\n");
5883a5dd63efSPaul E. McKenney 	pr_warn("=============================================\n\n");
58848eddac3fSPeter Zijlstra }
58858eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(debug_show_all_locks);
58868dce7a9aSSasha Levin #endif
58878eddac3fSPeter Zijlstra 
58888eddac3fSPeter Zijlstra /*
58898eddac3fSPeter Zijlstra  * Careful: only use this function if you are sure that
58908eddac3fSPeter Zijlstra  * the task cannot run in parallel!
58918eddac3fSPeter Zijlstra  */
58928eddac3fSPeter Zijlstra void debug_show_held_locks(struct task_struct *task)
58938eddac3fSPeter Zijlstra {
58948eddac3fSPeter Zijlstra 	if (unlikely(!debug_locks)) {
58958eddac3fSPeter Zijlstra 		printk("INFO: lockdep is turned off.\n");
58968eddac3fSPeter Zijlstra 		return;
58978eddac3fSPeter Zijlstra 	}
58988eddac3fSPeter Zijlstra 	lockdep_print_held_locks(task);
58998eddac3fSPeter Zijlstra }
59008eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(debug_show_held_locks);
59018eddac3fSPeter Zijlstra 
5902722a9f92SAndi Kleen asmlinkage __visible void lockdep_sys_exit(void)
59038eddac3fSPeter Zijlstra {
59048eddac3fSPeter Zijlstra 	struct task_struct *curr = current;
59058eddac3fSPeter Zijlstra 
59068eddac3fSPeter Zijlstra 	if (unlikely(curr->lockdep_depth)) {
59078eddac3fSPeter Zijlstra 		if (!debug_locks_off())
59088eddac3fSPeter Zijlstra 			return;
5909681fbec8SPaul E. McKenney 		pr_warn("\n");
5910a5dd63efSPaul E. McKenney 		pr_warn("================================================\n");
5911a5dd63efSPaul E. McKenney 		pr_warn("WARNING: lock held when returning to user space!\n");
59128eddac3fSPeter Zijlstra 		print_kernel_ident();
5913a5dd63efSPaul E. McKenney 		pr_warn("------------------------------------------------\n");
5914681fbec8SPaul E. McKenney 		pr_warn("%s/%d is leaving the kernel with locks still held!\n",
59158eddac3fSPeter Zijlstra 				curr->comm, curr->pid);
59168eddac3fSPeter Zijlstra 		lockdep_print_held_locks(curr);
59178eddac3fSPeter Zijlstra 	}
5918b09be676SByungchul Park 
5919b09be676SByungchul Park 	/*
5920b09be676SByungchul Park 	 * The lock history for each syscall should be independent. So wipe the
5921b09be676SByungchul Park 	 * slate clean on return to userspace.
5922b09be676SByungchul Park 	 */
5923f52be570SPeter Zijlstra 	lockdep_invariant_state(false);
59248eddac3fSPeter Zijlstra }
59258eddac3fSPeter Zijlstra 
59268eddac3fSPeter Zijlstra void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
59278eddac3fSPeter Zijlstra {
59288eddac3fSPeter Zijlstra 	struct task_struct *curr = current;
59298eddac3fSPeter Zijlstra 
59308eddac3fSPeter Zijlstra 	/* Note: the following can be executed concurrently, so be careful. */
5931681fbec8SPaul E. McKenney 	pr_warn("\n");
5932a5dd63efSPaul E. McKenney 	pr_warn("=============================\n");
5933a5dd63efSPaul E. McKenney 	pr_warn("WARNING: suspicious RCU usage\n");
59348eddac3fSPeter Zijlstra 	print_kernel_ident();
5935a5dd63efSPaul E. McKenney 	pr_warn("-----------------------------\n");
5936681fbec8SPaul E. McKenney 	pr_warn("%s:%d %s!\n", file, line, s);
5937681fbec8SPaul E. McKenney 	pr_warn("\nother info that might help us debug this:\n\n");
5938681fbec8SPaul E. McKenney 	pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
59398eddac3fSPeter Zijlstra 	       !rcu_lockdep_current_cpu_online()
59408eddac3fSPeter Zijlstra 			? "RCU used illegally from offline CPU!\n"
59418eddac3fSPeter Zijlstra 			: "",
59428eddac3fSPeter Zijlstra 	       rcu_scheduler_active, debug_locks);
59438eddac3fSPeter Zijlstra 
59448eddac3fSPeter Zijlstra 	/*
59458eddac3fSPeter Zijlstra 	 * If a CPU is in the RCU-free window in idle (ie: in the section
59468eddac3fSPeter Zijlstra 	 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
59478eddac3fSPeter Zijlstra 	 * considers that CPU to be in an "extended quiescent state",
59488eddac3fSPeter Zijlstra 	 * which means that RCU will be completely ignoring that CPU.
59498eddac3fSPeter Zijlstra 	 * Therefore, rcu_read_lock() and friends have absolutely no
59508eddac3fSPeter Zijlstra 	 * effect on a CPU running in that state. In other words, even if
59518eddac3fSPeter Zijlstra 	 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
59528eddac3fSPeter Zijlstra 	 * delete data structures out from under it.  RCU really has no
59538eddac3fSPeter Zijlstra 	 * choice here: we need to keep an RCU-free window in idle where
59548eddac3fSPeter Zijlstra 	 * the CPU may possibly enter into low power mode. This way we can
59558eddac3fSPeter Zijlstra 	 * notice an extended quiescent state to other CPUs that started a grace
59568eddac3fSPeter Zijlstra 	 * period. Otherwise we would delay any grace period as long as we run
59578eddac3fSPeter Zijlstra 	 * in the idle task.
59588eddac3fSPeter Zijlstra 	 *
59598eddac3fSPeter Zijlstra 	 * So complain bitterly if someone does call rcu_read_lock(),
59608eddac3fSPeter Zijlstra 	 * rcu_read_lock_bh() and so on from extended quiescent states.
59618eddac3fSPeter Zijlstra 	 */
59628eddac3fSPeter Zijlstra 	if (!rcu_is_watching())
5963681fbec8SPaul E. McKenney 		pr_warn("RCU used illegally from extended quiescent state!\n");
59648eddac3fSPeter Zijlstra 
59658eddac3fSPeter Zijlstra 	lockdep_print_held_locks(curr);
5966681fbec8SPaul E. McKenney 	pr_warn("\nstack backtrace:\n");
59678eddac3fSPeter Zijlstra 	dump_stack();
59688eddac3fSPeter Zijlstra }
59698eddac3fSPeter Zijlstra EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
5970