xref: /linux/kernel/locking/lockdep_internals.h (revision b3570b00dc3062c5a5e8d9602b923618d679636a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * kernel/lockdep_internals.h
4  *
5  * Runtime locking correctness validator
6  *
7  * lockdep subsystem internal functions and variables.
8  */
9 
10 /*
11  * Lock-class usage-state bits:
12  */
13 enum lock_usage_bit {
14 #define LOCKDEP_STATE(__STATE)		\
15 	LOCK_USED_IN_##__STATE,		\
16 	LOCK_USED_IN_##__STATE##_READ,	\
17 	LOCK_ENABLED_##__STATE,		\
18 	LOCK_ENABLED_##__STATE##_READ,
19 #include "lockdep_states.h"
20 #undef LOCKDEP_STATE
21 	LOCK_USED,
22 	LOCK_USED_READ,
23 	LOCK_USAGE_STATES,
24 };
25 
26 /* states after LOCK_USED_READ are not traced and printed */
27 static_assert(LOCK_TRACE_STATES == LOCK_USAGE_STATES);
28 
29 #define LOCK_USAGE_READ_MASK 1
30 #define LOCK_USAGE_DIR_MASK  2
31 #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
32 
33 /*
34  * Usage-state bitmasks:
35  */
36 #define __LOCKF(__STATE)	LOCKF_##__STATE = (1 << LOCK_##__STATE),
37 
38 enum {
39 #define LOCKDEP_STATE(__STATE)						\
40 	__LOCKF(USED_IN_##__STATE)					\
41 	__LOCKF(USED_IN_##__STATE##_READ)				\
42 	__LOCKF(ENABLED_##__STATE)					\
43 	__LOCKF(ENABLED_##__STATE##_READ)
44 #include "lockdep_states.h"
45 #undef LOCKDEP_STATE
46 	__LOCKF(USED)
47 	__LOCKF(USED_READ)
48 };
49 
50 #define LOCKDEP_STATE(__STATE)	LOCKF_ENABLED_##__STATE |
51 static const unsigned long LOCKF_ENABLED_IRQ =
52 #include "lockdep_states.h"
53 	0;
54 #undef LOCKDEP_STATE
55 
56 #define LOCKDEP_STATE(__STATE)	LOCKF_USED_IN_##__STATE |
57 static const unsigned long LOCKF_USED_IN_IRQ =
58 #include "lockdep_states.h"
59 	0;
60 #undef LOCKDEP_STATE
61 
62 #define LOCKDEP_STATE(__STATE)	LOCKF_ENABLED_##__STATE##_READ |
63 static const unsigned long LOCKF_ENABLED_IRQ_READ =
64 #include "lockdep_states.h"
65 	0;
66 #undef LOCKDEP_STATE
67 
68 #define LOCKDEP_STATE(__STATE)	LOCKF_USED_IN_##__STATE##_READ |
69 static const unsigned long LOCKF_USED_IN_IRQ_READ =
70 #include "lockdep_states.h"
71 	0;
72 #undef LOCKDEP_STATE
73 
74 #define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
75 #define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
76 
77 #define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ)
78 #define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ)
79 
80 /*
81  * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
82  * .data and .bss to fit in required 32MB limit for the kernel. With
83  * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
84  * So, reduce the static allocations for lockdeps related structures so that
85  * everything fits in current required size limit.
86  */
87 #ifdef CONFIG_LOCKDEP_SMALL
88 /*
89  * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
90  * we track.
91  *
92  * We use the per-lock dependency maps in two ways: we grow it by adding
93  * every to-be-taken lock to all currently held lock's own dependency
94  * table (if it's not there yet), and we check it for lock order
95  * conflicts and deadlocks.
96  */
97 #define MAX_LOCKDEP_ENTRIES	16384UL
98 #define MAX_LOCKDEP_CHAINS_BITS	15
99 #define MAX_STACK_TRACE_ENTRIES	262144UL
100 #define STACK_TRACE_HASH_SIZE	8192
101 #else
102 #define MAX_LOCKDEP_ENTRIES	(1UL << CONFIG_LOCKDEP_BITS)
103 
104 #define MAX_LOCKDEP_CHAINS_BITS	CONFIG_LOCKDEP_CHAINS_BITS
105 
106 /*
107  * Stack-trace: tightly packed array of stack backtrace
108  * addresses. Protected by the hash_lock.
109  */
110 #define MAX_STACK_TRACE_ENTRIES	(1UL << CONFIG_LOCKDEP_STACK_TRACE_BITS)
111 #define STACK_TRACE_HASH_SIZE	(1 << CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS)
112 #endif
113 
114 /*
115  * Bit definitions for lock_chain.irq_context
116  */
117 #define LOCK_CHAIN_SOFTIRQ_CONTEXT	(1 << 0)
118 #define LOCK_CHAIN_HARDIRQ_CONTEXT	(1 << 1)
119 
120 #define MAX_LOCKDEP_CHAINS	(1UL << MAX_LOCKDEP_CHAINS_BITS)
121 
122 #define AVG_LOCKDEP_CHAIN_DEPTH		5
123 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS * AVG_LOCKDEP_CHAIN_DEPTH)
124 
125 extern struct lock_chain lock_chains[];
126 
127 #define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1)
128 
129 extern void get_usage_chars(struct lock_class *class,
130 			    char usage[LOCK_USAGE_CHARS]);
131 
132 extern const char *__get_key_name(const struct lockdep_subclass_key *key,
133 				  char *str);
134 
135 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
136 
137 extern unsigned long nr_lock_classes;
138 extern unsigned long nr_zapped_classes;
139 extern unsigned long nr_zapped_lock_chains;
140 extern unsigned long nr_list_entries;
141 extern unsigned long nr_dynamic_keys;
142 long lockdep_next_lockchain(long i);
143 unsigned long lock_chain_count(void);
144 extern unsigned long nr_stack_trace_entries;
145 
146 extern unsigned int nr_hardirq_chains;
147 extern unsigned int nr_softirq_chains;
148 extern unsigned int nr_process_chains;
149 extern unsigned int nr_free_chain_hlocks;
150 extern unsigned int nr_lost_chain_hlocks;
151 extern unsigned int nr_large_chain_blocks;
152 
153 extern unsigned int max_lockdep_depth;
154 extern unsigned int max_bfs_queue_depth;
155 extern unsigned long max_lock_class_idx;
156 
157 extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
158 extern unsigned long lock_classes_in_use[];
159 
160 #ifdef CONFIG_PROVE_LOCKING
161 extern unsigned long lockdep_count_forward_deps(struct lock_class *);
162 extern unsigned long lockdep_count_backward_deps(struct lock_class *);
163 #ifdef CONFIG_TRACE_IRQFLAGS
164 u64 lockdep_stack_trace_count(void);
165 u64 lockdep_stack_hash_count(void);
166 #endif
167 #else
168 static inline unsigned long
169 lockdep_count_forward_deps(struct lock_class *class)
170 {
171 	return 0;
172 }
173 static inline unsigned long
174 lockdep_count_backward_deps(struct lock_class *class)
175 {
176 	return 0;
177 }
178 #endif
179 
180 #ifdef CONFIG_DEBUG_LOCKDEP
181 
182 #include <asm/local.h>
183 /*
184  * Various lockdep statistics.
185  * We want them per cpu as they are often accessed in fast path
186  * and we want to avoid too much cache bouncing.
187  */
188 struct lockdep_stats {
189 	unsigned long  chain_lookup_hits;
190 	unsigned int   chain_lookup_misses;
191 	unsigned long  hardirqs_on_events;
192 	unsigned long  hardirqs_off_events;
193 	unsigned long  redundant_hardirqs_on;
194 	unsigned long  redundant_hardirqs_off;
195 	unsigned long  softirqs_on_events;
196 	unsigned long  softirqs_off_events;
197 	unsigned long  redundant_softirqs_on;
198 	unsigned long  redundant_softirqs_off;
199 	int            nr_unused_locks;
200 	unsigned int   nr_redundant_checks;
201 	unsigned int   nr_redundant;
202 	unsigned int   nr_cyclic_checks;
203 	unsigned int   nr_find_usage_forwards_checks;
204 	unsigned int   nr_find_usage_backwards_checks;
205 
206 	/*
207 	 * Per lock class locking operation stat counts
208 	 */
209 	unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
210 };
211 
212 DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
213 
214 #define __debug_atomic_inc(ptr)					\
215 	this_cpu_inc(lockdep_stats.ptr);
216 
217 #define debug_atomic_inc(ptr)			{		\
218 	WARN_ON_ONCE(!irqs_disabled());				\
219 	__this_cpu_inc(lockdep_stats.ptr);			\
220 }
221 
222 #define debug_atomic_dec(ptr)			{		\
223 	WARN_ON_ONCE(!irqs_disabled());				\
224 	__this_cpu_dec(lockdep_stats.ptr);			\
225 }
226 
227 #define debug_atomic_read(ptr)		({				\
228 	struct lockdep_stats *__cpu_lockdep_stats;			\
229 	unsigned long long __total = 0;					\
230 	int __cpu;							\
231 	for_each_possible_cpu(__cpu) {					\
232 		__cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu);	\
233 		__total += __cpu_lockdep_stats->ptr;			\
234 	}								\
235 	__total;							\
236 })
237 
238 static inline void debug_class_ops_inc(struct lock_class *class)
239 {
240 	int idx;
241 
242 	idx = class - lock_classes;
243 	__debug_atomic_inc(lock_class_ops[idx]);
244 }
245 
246 static inline unsigned long debug_class_ops_read(struct lock_class *class)
247 {
248 	int idx, cpu;
249 	unsigned long ops = 0;
250 
251 	idx = class - lock_classes;
252 	for_each_possible_cpu(cpu)
253 		ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
254 	return ops;
255 }
256 
257 #else
258 # define __debug_atomic_inc(ptr)	do { } while (0)
259 # define debug_atomic_inc(ptr)		do { } while (0)
260 # define debug_atomic_dec(ptr)		do { } while (0)
261 # define debug_atomic_read(ptr)		0
262 # define debug_class_ops_inc(ptr)	do { } while (0)
263 #endif
264