xref: /linux/kernel/locking/lockdep_internals.h (revision bb9707077b4ee5f77bc9939b057ff8a0d410296f)
1 /*
2  * kernel/lockdep_internals.h
3  *
4  * Runtime locking correctness validator
5  *
6  * lockdep subsystem internal functions and variables.
7  */
8 
9 /*
10  * Lock-class usage-state bits:
11  */
12 enum lock_usage_bit {
13 #define LOCKDEP_STATE(__STATE)		\
14 	LOCK_USED_IN_##__STATE,		\
15 	LOCK_USED_IN_##__STATE##_READ,	\
16 	LOCK_ENABLED_##__STATE,		\
17 	LOCK_ENABLED_##__STATE##_READ,
18 #include "lockdep_states.h"
19 #undef LOCKDEP_STATE
20 	LOCK_USED,
21 	LOCK_USAGE_STATES
22 };
23 
24 /*
25  * Usage-state bitmasks:
26  */
27 #define __LOCKF(__STATE)	LOCKF_##__STATE = (1 << LOCK_##__STATE),
28 
29 enum {
30 #define LOCKDEP_STATE(__STATE)						\
31 	__LOCKF(USED_IN_##__STATE)					\
32 	__LOCKF(USED_IN_##__STATE##_READ)				\
33 	__LOCKF(ENABLED_##__STATE)					\
34 	__LOCKF(ENABLED_##__STATE##_READ)
35 #include "lockdep_states.h"
36 #undef LOCKDEP_STATE
37 	__LOCKF(USED)
38 };
39 
40 #define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
41 #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
42 
43 #define LOCKF_ENABLED_IRQ_READ \
44 		(LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
45 #define LOCKF_USED_IN_IRQ_READ \
46 		(LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
47 
48 /*
49  * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
50  * we track.
51  *
52  * We use the per-lock dependency maps in two ways: we grow it by adding
53  * every to-be-taken lock to all currently held lock's own dependency
54  * table (if it's not there yet), and we check it for lock order
55  * conflicts and deadlocks.
56  */
57 #define MAX_LOCKDEP_ENTRIES	32768UL
58 
59 #define MAX_LOCKDEP_CHAINS_BITS	16
60 #define MAX_LOCKDEP_CHAINS	(1UL << MAX_LOCKDEP_CHAINS_BITS)
61 
62 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
63 
64 /*
65  * Stack-trace: tightly packed array of stack backtrace
66  * addresses. Protected by the hash_lock.
67  */
68 #define MAX_STACK_TRACE_ENTRIES	524288UL
69 
70 extern struct list_head all_lock_classes;
71 extern struct lock_chain lock_chains[];
72 
73 #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
74 
75 extern void get_usage_chars(struct lock_class *class,
76 			    char usage[LOCK_USAGE_CHARS]);
77 
78 extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
79 
80 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
81 
82 extern unsigned long nr_lock_classes;
83 extern unsigned long nr_list_entries;
84 extern unsigned long nr_lock_chains;
85 extern int nr_chain_hlocks;
86 extern unsigned long nr_stack_trace_entries;
87 
88 extern unsigned int nr_hardirq_chains;
89 extern unsigned int nr_softirq_chains;
90 extern unsigned int nr_process_chains;
91 extern unsigned int max_lockdep_depth;
92 extern unsigned int max_recursion_depth;
93 
94 extern unsigned int max_bfs_queue_depth;
95 
96 #ifdef CONFIG_PROVE_LOCKING
97 extern unsigned long lockdep_count_forward_deps(struct lock_class *);
98 extern unsigned long lockdep_count_backward_deps(struct lock_class *);
99 #else
100 static inline unsigned long
101 lockdep_count_forward_deps(struct lock_class *class)
102 {
103 	return 0;
104 }
105 static inline unsigned long
106 lockdep_count_backward_deps(struct lock_class *class)
107 {
108 	return 0;
109 }
110 #endif
111 
112 #ifdef CONFIG_DEBUG_LOCKDEP
113 
114 #include <asm/local.h>
115 /*
116  * Various lockdep statistics.
117  * We want them per cpu as they are often accessed in fast path
118  * and we want to avoid too much cache bouncing.
119  */
120 struct lockdep_stats {
121 	int	chain_lookup_hits;
122 	int	chain_lookup_misses;
123 	int	hardirqs_on_events;
124 	int	hardirqs_off_events;
125 	int	redundant_hardirqs_on;
126 	int	redundant_hardirqs_off;
127 	int	softirqs_on_events;
128 	int	softirqs_off_events;
129 	int	redundant_softirqs_on;
130 	int	redundant_softirqs_off;
131 	int	nr_unused_locks;
132 	int	nr_cyclic_checks;
133 	int	nr_cyclic_check_recursions;
134 	int	nr_find_usage_forwards_checks;
135 	int	nr_find_usage_forwards_recursions;
136 	int	nr_find_usage_backwards_checks;
137 	int	nr_find_usage_backwards_recursions;
138 };
139 
140 DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
141 
142 #define __debug_atomic_inc(ptr)					\
143 	this_cpu_inc(lockdep_stats.ptr);
144 
145 #define debug_atomic_inc(ptr)			{		\
146 	WARN_ON_ONCE(!irqs_disabled());				\
147 	__this_cpu_inc(lockdep_stats.ptr);			\
148 }
149 
150 #define debug_atomic_dec(ptr)			{		\
151 	WARN_ON_ONCE(!irqs_disabled());				\
152 	__this_cpu_dec(lockdep_stats.ptr);			\
153 }
154 
155 #define debug_atomic_read(ptr)		({				\
156 	struct lockdep_stats *__cpu_lockdep_stats;			\
157 	unsigned long long __total = 0;					\
158 	int __cpu;							\
159 	for_each_possible_cpu(__cpu) {					\
160 		__cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu);	\
161 		__total += __cpu_lockdep_stats->ptr;			\
162 	}								\
163 	__total;							\
164 })
165 #else
166 # define __debug_atomic_inc(ptr)	do { } while (0)
167 # define debug_atomic_inc(ptr)		do { } while (0)
168 # define debug_atomic_dec(ptr)		do { } while (0)
169 # define debug_atomic_read(ptr)		0
170 #endif
171