xref: /linux/include/linux/lockdep_types.h (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1  /* SPDX-License-Identifier: GPL-2.0 */
2  /*
3   * Runtime locking correctness validator
4   *
5   *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6   *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7   *
8   * see Documentation/locking/lockdep-design.rst for more details.
9   */
10  #ifndef __LINUX_LOCKDEP_TYPES_H
11  #define __LINUX_LOCKDEP_TYPES_H
12  
13  #include <linux/types.h>
14  
15  #define MAX_LOCKDEP_SUBCLASSES		8UL
16  
17  enum lockdep_wait_type {
18  	LD_WAIT_INV = 0,	/* not checked, catch all */
19  
20  	LD_WAIT_FREE,		/* wait free, rcu etc.. */
21  	LD_WAIT_SPIN,		/* spin loops, raw_spinlock_t etc.. */
22  
23  #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
24  	LD_WAIT_CONFIG,		/* preemptible in PREEMPT_RT, spinlock_t etc.. */
25  #else
26  	LD_WAIT_CONFIG = LD_WAIT_SPIN,
27  #endif
28  	LD_WAIT_SLEEP,		/* sleeping locks, mutex_t etc.. */
29  
30  	LD_WAIT_MAX,		/* must be last */
31  };
32  
33  enum lockdep_lock_type {
34  	LD_LOCK_NORMAL = 0,	/* normal, catch all */
35  	LD_LOCK_PERCPU,		/* percpu */
36  	LD_LOCK_WAIT_OVERRIDE,	/* annotation */
37  	LD_LOCK_MAX,
38  };
39  
40  #ifdef CONFIG_LOCKDEP
41  
42  /*
43   * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
44   * the total number of states... :-(
45   *
46   * XXX_LOCK_USAGE_STATES is the number of lines in lockdep_states.h, for each
47   * of those we generates 4 states, Additionally we report on USED and USED_READ.
48   */
49  #define XXX_LOCK_USAGE_STATES		2
50  #define LOCK_TRACE_STATES		(XXX_LOCK_USAGE_STATES*4 + 2)
51  
52  /*
53   * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
54   * cached in the instance of lockdep_map
55   *
56   * Currently main class (subclass == 0) and single depth subclass
57   * are cached in lockdep_map. This optimization is mainly targeting
58   * on rq->lock. double_rq_lock() acquires this highly competitive with
59   * single depth.
60   */
61  #define NR_LOCKDEP_CACHING_CLASSES	2
62  
63  /*
64   * A lockdep key is associated with each lock object. For static locks we use
65   * the lock address itself as the key. Dynamically allocated lock objects can
66   * have a statically or dynamically allocated key. Dynamically allocated lock
67   * keys must be registered before being used and must be unregistered before
68   * the key memory is freed.
69   */
70  struct lockdep_subclass_key {
71  	char __one_byte;
72  } __attribute__ ((__packed__));
73  
74  /* hash_entry is used to keep track of dynamically allocated keys. */
75  struct lock_class_key {
76  	union {
77  		struct hlist_node		hash_entry;
78  		struct lockdep_subclass_key	subkeys[MAX_LOCKDEP_SUBCLASSES];
79  	};
80  };
81  
82  extern struct lock_class_key __lockdep_no_validate__;
83  extern struct lock_class_key __lockdep_no_track__;
84  
85  struct lock_trace;
86  
87  #define LOCKSTAT_POINTS		4
88  
89  struct lockdep_map;
90  typedef int (*lock_cmp_fn)(const struct lockdep_map *a,
91  			   const struct lockdep_map *b);
92  typedef void (*lock_print_fn)(const struct lockdep_map *map);
93  
94  /*
95   * The lock-class itself. The order of the structure members matters.
96   * reinit_class() zeroes the key member and all subsequent members.
97   */
98  struct lock_class {
99  	/*
100  	 * class-hash:
101  	 */
102  	struct hlist_node		hash_entry;
103  
104  	/*
105  	 * Entry in all_lock_classes when in use. Entry in free_lock_classes
106  	 * when not in use. Instances that are being freed are on one of the
107  	 * zapped_classes lists.
108  	 */
109  	struct list_head		lock_entry;
110  
111  	/*
112  	 * These fields represent a directed graph of lock dependencies,
113  	 * to every node we attach a list of "forward" and a list of
114  	 * "backward" graph nodes.
115  	 */
116  	struct list_head		locks_after, locks_before;
117  
118  	const struct lockdep_subclass_key *key;
119  	lock_cmp_fn			cmp_fn;
120  	lock_print_fn			print_fn;
121  
122  	unsigned int			subclass;
123  	unsigned int			dep_gen_id;
124  
125  	/*
126  	 * IRQ/softirq usage tracking bits:
127  	 */
128  	unsigned long			usage_mask;
129  	const struct lock_trace		*usage_traces[LOCK_TRACE_STATES];
130  
131  	const char			*name;
132  	/*
133  	 * Generation counter, when doing certain classes of graph walking,
134  	 * to ensure that we check one node only once:
135  	 */
136  	int				name_version;
137  
138  	u8				wait_type_inner;
139  	u8				wait_type_outer;
140  	u8				lock_type;
141  	/* u8				hole; */
142  
143  #ifdef CONFIG_LOCK_STAT
144  	unsigned long			contention_point[LOCKSTAT_POINTS];
145  	unsigned long			contending_point[LOCKSTAT_POINTS];
146  #endif
147  } __no_randomize_layout;
148  
149  #ifdef CONFIG_LOCK_STAT
150  struct lock_time {
151  	s64				min;
152  	s64				max;
153  	s64				total;
154  	unsigned long			nr;
155  };
156  
157  enum bounce_type {
158  	bounce_acquired_write,
159  	bounce_acquired_read,
160  	bounce_contended_write,
161  	bounce_contended_read,
162  	nr_bounce_types,
163  
164  	bounce_acquired = bounce_acquired_write,
165  	bounce_contended = bounce_contended_write,
166  };
167  
168  struct lock_class_stats {
169  	unsigned long			contention_point[LOCKSTAT_POINTS];
170  	unsigned long			contending_point[LOCKSTAT_POINTS];
171  	struct lock_time		read_waittime;
172  	struct lock_time		write_waittime;
173  	struct lock_time		read_holdtime;
174  	struct lock_time		write_holdtime;
175  	unsigned long			bounces[nr_bounce_types];
176  };
177  
178  struct lock_class_stats lock_stats(struct lock_class *class);
179  void clear_lock_stats(struct lock_class *class);
180  #endif
181  
182  /*
183   * Map the lock object (the lock instance) to the lock-class object.
184   * This is embedded into specific lock instances:
185   */
186  struct lockdep_map {
187  	struct lock_class_key		*key;
188  	struct lock_class		*class_cache[NR_LOCKDEP_CACHING_CLASSES];
189  	const char			*name;
190  	u8				wait_type_outer; /* can be taken in this context */
191  	u8				wait_type_inner; /* presents this context */
192  	u8				lock_type;
193  	/* u8				hole; */
194  #ifdef CONFIG_LOCK_STAT
195  	int				cpu;
196  	unsigned long			ip;
197  #endif
198  };
199  
200  struct pin_cookie { unsigned int val; };
201  
202  #define MAX_LOCKDEP_KEYS_BITS		13
203  #define MAX_LOCKDEP_KEYS		(1UL << MAX_LOCKDEP_KEYS_BITS)
204  #define INITIAL_CHAIN_KEY		-1
205  
206  struct held_lock {
207  	/*
208  	 * One-way hash of the dependency chain up to this point. We
209  	 * hash the hashes step by step as the dependency chain grows.
210  	 *
211  	 * We use it for dependency-caching and we skip detection
212  	 * passes and dependency-updates if there is a cache-hit, so
213  	 * it is absolutely critical for 100% coverage of the validator
214  	 * to have a unique key value for every unique dependency path
215  	 * that can occur in the system, to make a unique hash value
216  	 * as likely as possible - hence the 64-bit width.
217  	 *
218  	 * The task struct holds the current hash value (initialized
219  	 * with zero), here we store the previous hash value:
220  	 */
221  	u64				prev_chain_key;
222  	unsigned long			acquire_ip;
223  	struct lockdep_map		*instance;
224  	struct lockdep_map		*nest_lock;
225  #ifdef CONFIG_LOCK_STAT
226  	u64 				waittime_stamp;
227  	u64				holdtime_stamp;
228  #endif
229  	/*
230  	 * class_idx is zero-indexed; it points to the element in
231  	 * lock_classes this held lock instance belongs to. class_idx is in
232  	 * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
233  	 */
234  	unsigned int			class_idx:MAX_LOCKDEP_KEYS_BITS;
235  	/*
236  	 * The lock-stack is unified in that the lock chains of interrupt
237  	 * contexts nest ontop of process context chains, but we 'separate'
238  	 * the hashes by starting with 0 if we cross into an interrupt
239  	 * context, and we also keep do not add cross-context lock
240  	 * dependencies - the lock usage graph walking covers that area
241  	 * anyway, and we'd just unnecessarily increase the number of
242  	 * dependencies otherwise. [Note: hardirq and softirq contexts
243  	 * are separated from each other too.]
244  	 *
245  	 * The following field is used to detect when we cross into an
246  	 * interrupt context:
247  	 */
248  	unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
249  	unsigned int trylock:1;						/* 16 bits */
250  
251  	unsigned int read:2;        /* see lock_acquire() comment */
252  	unsigned int check:1;       /* see lock_acquire() comment */
253  	unsigned int hardirqs_off:1;
254  	unsigned int sync:1;
255  	unsigned int references:11;					/* 32 bits */
256  	unsigned int pin_count;
257  };
258  
259  #else /* !CONFIG_LOCKDEP */
260  
261  /*
262   * The class key takes no space if lockdep is disabled:
263   */
264  struct lock_class_key { };
265  
266  /*
267   * The lockdep_map takes no space if lockdep is disabled:
268   */
269  struct lockdep_map { };
270  
271  struct pin_cookie { };
272  
273  #endif /* !LOCKDEP */
274  
275  #endif /* __LINUX_LOCKDEP_TYPES_H */
276