xref: /freebsd/sys/kern/subr_witness.c (revision 529a53abe2287eae08a3af62749273df775254e9)
1 /*-
2  * Copyright (c) 2008 Isilon Systems, Inc.
3  * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
4  * Copyright (c) 1998 Berkeley Software Design, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Berkeley Software Design Inc's name may not be used to endorse or
16  *    promote products derived from this software without specific prior
17  *    written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
32  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33  */
34 
35 /*
36  * Implementation of the `witness' lock verifier.  Originally implemented for
37  * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
38  * classes in FreeBSD.
39  */
40 
41 /*
42  *	Main Entry: witness
43  *	Pronunciation: 'wit-n&s
44  *	Function: noun
45  *	Etymology: Middle English witnesse, from Old English witnes knowledge,
46  *	    testimony, witness, from 2wit
47  *	Date: before 12th century
48  *	1 : attestation of a fact or event : TESTIMONY
49  *	2 : one that gives evidence; specifically : one who testifies in
50  *	    a cause or before a judicial tribunal
51  *	3 : one asked to be present at a transaction so as to be able to
52  *	    testify to its having taken place
53  *	4 : one who has personal knowledge of something
54  *	5 a : something serving as evidence or proof : SIGN
55  *	  b : public affirmation by word or example of usually
56  *	      religious faith or conviction <the heroic witness to divine
57  *	      life -- Pilot>
58  *	6 capitalized : a member of the Jehovah's Witnesses
59  */
60 
61 /*
62  * Special rules concerning Giant and lock orders:
63  *
64  * 1) Giant must be acquired before any other mutexes.  Stated another way,
65  *    no other mutex may be held when Giant is acquired.
66  *
67  * 2) Giant must be released when blocking on a sleepable lock.
68  *
69  * This rule is less obvious, but is a result of Giant providing the same
70  * semantics as spl().  Basically, when a thread sleeps, it must release
71  * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
72  * 2).
73  *
74  * 3) Giant may be acquired before or after sleepable locks.
75  *
76  * This rule is also not quite as obvious.  Giant may be acquired after
77  * a sleepable lock because it is a non-sleepable lock and non-sleepable
78  * locks may always be acquired while holding a sleepable lock.  The second
79  * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
80  * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
81  * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
82  * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
83  * execute.  Thus, acquiring Giant both before and after a sleepable lock
84  * will not result in a lock order reversal.
85  */
86 
87 #include <sys/cdefs.h>
88 __FBSDID("$FreeBSD$");
89 
90 #include "opt_ddb.h"
91 #include "opt_hwpmc_hooks.h"
92 #include "opt_stack.h"
93 #include "opt_witness.h"
94 
95 #include <sys/param.h>
96 #include <sys/bus.h>
97 #include <sys/kdb.h>
98 #include <sys/kernel.h>
99 #include <sys/ktr.h>
100 #include <sys/lock.h>
101 #include <sys/malloc.h>
102 #include <sys/mutex.h>
103 #include <sys/priv.h>
104 #include <sys/proc.h>
105 #include <sys/sbuf.h>
106 #include <sys/sched.h>
107 #include <sys/stack.h>
108 #include <sys/sysctl.h>
109 #include <sys/systm.h>
110 
111 #ifdef DDB
112 #include <ddb/ddb.h>
113 #endif
114 
115 #include <machine/stdarg.h>
116 
117 #if !defined(DDB) && !defined(STACK)
118 #error "DDB or STACK options are required for WITNESS"
119 #endif
120 
121 /* Note that these traces do not work with KTR_ALQ. */
122 #if 0
123 #define	KTR_WITNESS	KTR_SUBSYS
124 #else
125 #define	KTR_WITNESS	0
126 #endif
127 
128 #define	LI_RECURSEMASK	0x0000ffff	/* Recursion depth of lock instance. */
129 #define	LI_EXCLUSIVE	0x00010000	/* Exclusive lock instance. */
130 #define	LI_NORELEASE	0x00020000	/* Lock not allowed to be released. */
131 
132 /* Define this to check for blessed mutexes */
133 #undef BLESSING
134 
135 #define	WITNESS_COUNT 		1536
136 #define	WITNESS_CHILDCOUNT 	(WITNESS_COUNT * 4)
137 #define	WITNESS_HASH_SIZE	251	/* Prime, gives load factor < 2 */
138 #define	WITNESS_PENDLIST	1024
139 
140 /* Allocate 256 KB of stack data space */
141 #define	WITNESS_LO_DATA_COUNT	2048
142 
143 /* Prime, gives load factor of ~2 at full load */
144 #define	WITNESS_LO_HASH_SIZE	1021
145 
146 /*
147  * XXX: This is somewhat bogus, as we assume here that at most 2048 threads
148  * will hold LOCK_NCHILDREN locks.  We handle failure ok, and we should
149  * probably be safe for the most part, but it's still a SWAG.
150  */
151 #define	LOCK_NCHILDREN	5
152 #define	LOCK_CHILDCOUNT	2048
153 
154 #define	MAX_W_NAME	64
155 
156 #define	BADSTACK_SBUF_SIZE	(256 * WITNESS_COUNT)
157 #define	FULLGRAPH_SBUF_SIZE	512
158 
159 /*
160  * These flags go in the witness relationship matrix and describe the
161  * relationship between any two struct witness objects.
162  */
163 #define	WITNESS_UNRELATED        0x00    /* No lock order relation. */
164 #define	WITNESS_PARENT           0x01    /* Parent, aka direct ancestor. */
165 #define	WITNESS_ANCESTOR         0x02    /* Direct or indirect ancestor. */
166 #define	WITNESS_CHILD            0x04    /* Child, aka direct descendant. */
167 #define	WITNESS_DESCENDANT       0x08    /* Direct or indirect descendant. */
168 #define	WITNESS_ANCESTOR_MASK    (WITNESS_PARENT | WITNESS_ANCESTOR)
169 #define	WITNESS_DESCENDANT_MASK  (WITNESS_CHILD | WITNESS_DESCENDANT)
170 #define	WITNESS_RELATED_MASK						\
171 	(WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
172 #define	WITNESS_REVERSAL         0x10    /* A lock order reversal has been
173 					  * observed. */
174 #define	WITNESS_RESERVED1        0x20    /* Unused flag, reserved. */
175 #define	WITNESS_RESERVED2        0x40    /* Unused flag, reserved. */
176 #define	WITNESS_LOCK_ORDER_KNOWN 0x80    /* This lock order is known. */
177 
178 /* Descendant to ancestor flags */
179 #define	WITNESS_DTOA(x)	(((x) & WITNESS_RELATED_MASK) >> 2)
180 
181 /* Ancestor to descendant flags */
182 #define	WITNESS_ATOD(x)	(((x) & WITNESS_RELATED_MASK) << 2)
183 
184 #define	WITNESS_INDEX_ASSERT(i)						\
185 	MPASS((i) > 0 && (i) <= w_max_used_index && (i) < WITNESS_COUNT)
186 
187 static MALLOC_DEFINE(M_WITNESS, "Witness", "Witness");
188 
189 /*
190  * Lock instances.  A lock instance is the data associated with a lock while
191  * it is held by witness.  For example, a lock instance will hold the
192  * recursion count of a lock.  Lock instances are held in lists.  Spin locks
193  * are held in a per-cpu list while sleep locks are held in per-thread list.
194  */
195 struct lock_instance {
196 	struct lock_object	*li_lock;
197 	const char		*li_file;
198 	int			li_line;
199 	u_int			li_flags;
200 };
201 
202 /*
203  * A simple list type used to build the list of locks held by a thread
204  * or CPU.  We can't simply embed the list in struct lock_object since a
205  * lock may be held by more than one thread if it is a shared lock.  Locks
206  * are added to the head of the list, so we fill up each list entry from
207  * "the back" logically.  To ease some of the arithmetic, we actually fill
208  * in each list entry the normal way (children[0] then children[1], etc.) but
209  * when we traverse the list we read children[count-1] as the first entry
210  * down to children[0] as the final entry.
211  */
212 struct lock_list_entry {
213 	struct lock_list_entry	*ll_next;
214 	struct lock_instance	ll_children[LOCK_NCHILDREN];
215 	u_int			ll_count;
216 };
217 
218 /*
219  * The main witness structure. One of these per named lock type in the system
220  * (for example, "vnode interlock").
221  */
222 struct witness {
223 	char  			w_name[MAX_W_NAME];
224 	uint32_t 		w_index;  /* Index in the relationship matrix */
225 	struct lock_class	*w_class;
226 	STAILQ_ENTRY(witness) 	w_list;		/* List of all witnesses. */
227 	STAILQ_ENTRY(witness) 	w_typelist;	/* Witnesses of a type. */
228 	struct witness		*w_hash_next; /* Linked list in hash buckets. */
229 	const char		*w_file; /* File where last acquired */
230 	uint32_t 		w_line; /* Line where last acquired */
231 	uint32_t 		w_refcount;
232 	uint16_t 		w_num_ancestors; /* direct/indirect
233 						  * ancestor count */
234 	uint16_t 		w_num_descendants; /* direct/indirect
235 						    * descendant count */
236 	int16_t 		w_ddb_level;
237 	unsigned		w_displayed:1;
238 	unsigned		w_reversed:1;
239 };
240 
241 STAILQ_HEAD(witness_list, witness);
242 
243 /*
244  * The witness hash table. Keys are witness names (const char *), elements are
245  * witness objects (struct witness *).
246  */
247 struct witness_hash {
248 	struct witness	*wh_array[WITNESS_HASH_SIZE];
249 	uint32_t	wh_size;
250 	uint32_t	wh_count;
251 };
252 
253 /*
254  * Key type for the lock order data hash table.
255  */
256 struct witness_lock_order_key {
257 	uint16_t	from;
258 	uint16_t	to;
259 };
260 
261 struct witness_lock_order_data {
262 	struct stack			wlod_stack;
263 	struct witness_lock_order_key	wlod_key;
264 	struct witness_lock_order_data	*wlod_next;
265 };
266 
267 /*
268  * The witness lock order data hash table. Keys are witness index tuples
269  * (struct witness_lock_order_key), elements are lock order data objects
270  * (struct witness_lock_order_data).
271  */
272 struct witness_lock_order_hash {
273 	struct witness_lock_order_data	*wloh_array[WITNESS_LO_HASH_SIZE];
274 	u_int	wloh_size;
275 	u_int	wloh_count;
276 };
277 
278 #ifdef BLESSING
279 struct witness_blessed {
280 	const char	*b_lock1;
281 	const char	*b_lock2;
282 };
283 #endif
284 
285 struct witness_pendhelp {
286 	const char		*wh_type;
287 	struct lock_object	*wh_lock;
288 };
289 
290 struct witness_order_list_entry {
291 	const char		*w_name;
292 	struct lock_class	*w_class;
293 };
294 
295 /*
296  * Returns 0 if one of the locks is a spin lock and the other is not.
297  * Returns 1 otherwise.
298  */
299 static __inline int
300 witness_lock_type_equal(struct witness *w1, struct witness *w2)
301 {
302 
303 	return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
304 		(w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
305 }
306 
307 static __inline int
308 witness_lock_order_key_equal(const struct witness_lock_order_key *a,
309     const struct witness_lock_order_key *b)
310 {
311 
312 	return (a->from == b->from && a->to == b->to);
313 }
314 
315 static int	_isitmyx(struct witness *w1, struct witness *w2, int rmask,
316 		    const char *fname);
317 #ifdef KDB
318 static void	_witness_debugger(int cond, const char *msg);
319 #endif
320 static void	adopt(struct witness *parent, struct witness *child);
321 #ifdef BLESSING
322 static int	blessed(struct witness *, struct witness *);
323 #endif
324 static void	depart(struct witness *w);
325 static struct witness	*enroll(const char *description,
326 			    struct lock_class *lock_class);
327 static struct lock_instance	*find_instance(struct lock_list_entry *list,
328 				    const struct lock_object *lock);
329 static int	isitmychild(struct witness *parent, struct witness *child);
330 static int	isitmydescendant(struct witness *parent, struct witness *child);
331 static void	itismychild(struct witness *parent, struct witness *child);
332 static int	sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS);
333 static int	sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
334 static int	sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS);
335 static void	witness_add_fullgraph(struct sbuf *sb, struct witness *parent);
336 #ifdef DDB
337 static void	witness_ddb_compute_levels(void);
338 static void	witness_ddb_display(int(*)(const char *fmt, ...));
339 static void	witness_ddb_display_descendants(int(*)(const char *fmt, ...),
340 		    struct witness *, int indent);
341 static void	witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
342 		    struct witness_list *list);
343 static void	witness_ddb_level_descendants(struct witness *parent, int l);
344 static void	witness_ddb_list(struct thread *td);
345 #endif
346 static void	witness_free(struct witness *m);
347 static struct witness	*witness_get(void);
348 static uint32_t	witness_hash_djb2(const uint8_t *key, uint32_t size);
349 static struct witness	*witness_hash_get(const char *key);
350 static void	witness_hash_put(struct witness *w);
351 static void	witness_init_hash_tables(void);
352 static void	witness_increment_graph_generation(void);
353 static void	witness_lock_list_free(struct lock_list_entry *lle);
354 static struct lock_list_entry	*witness_lock_list_get(void);
355 static int	witness_lock_order_add(struct witness *parent,
356 		    struct witness *child);
357 static int	witness_lock_order_check(struct witness *parent,
358 		    struct witness *child);
359 static struct witness_lock_order_data	*witness_lock_order_get(
360 					    struct witness *parent,
361 					    struct witness *child);
362 static void	witness_list_lock(struct lock_instance *instance,
363 		    int (*prnt)(const char *fmt, ...));
364 static void	witness_setflag(struct lock_object *lock, int flag, int set);
365 
366 #ifdef KDB
367 #define	witness_debugger(c)	_witness_debugger(c, __func__)
368 #else
369 #define	witness_debugger(c)
370 #endif
371 
372 static SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL,
373     "Witness Locking");
374 
375 /*
376  * If set to 0, lock order checking is disabled.  If set to -1,
377  * witness is completely disabled.  Otherwise witness performs full
378  * lock order checking for all locks.  At runtime, lock order checking
379  * may be toggled.  However, witness cannot be reenabled once it is
380  * completely disabled.
381  */
382 static int witness_watch = 1;
383 TUNABLE_INT("debug.witness.watch", &witness_watch);
384 SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
385     sysctl_debug_witness_watch, "I", "witness is watching lock operations");
386 
387 #ifdef KDB
388 /*
389  * When KDB is enabled and witness_kdb is 1, it will cause the system
390  * to drop into kdebug() when:
391  *	- a lock hierarchy violation occurs
392  *	- locks are held when going to sleep.
393  */
394 #ifdef WITNESS_KDB
395 int	witness_kdb = 1;
396 #else
397 int	witness_kdb = 0;
398 #endif
399 TUNABLE_INT("debug.witness.kdb", &witness_kdb);
400 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
401 
402 /*
403  * When KDB is enabled and witness_trace is 1, it will cause the system
404  * to print a stack trace:
405  *	- a lock hierarchy violation occurs
406  *	- locks are held when going to sleep.
407  */
408 int	witness_trace = 1;
409 TUNABLE_INT("debug.witness.trace", &witness_trace);
410 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
411 #endif /* KDB */
412 
413 #ifdef WITNESS_SKIPSPIN
414 int	witness_skipspin = 1;
415 #else
416 int	witness_skipspin = 0;
417 #endif
418 TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
419 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin,
420     0, "");
421 
422 /*
423  * Call this to print out the relations between locks.
424  */
425 SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD,
426     NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs");
427 
428 /*
429  * Call this to print out the witness faulty stacks.
430  */
431 SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD,
432     NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks");
433 
434 static struct mtx w_mtx;
435 
436 /* w_list */
437 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
438 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
439 
440 /* w_typelist */
441 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
442 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
443 
444 /* lock list */
445 static struct lock_list_entry *w_lock_list_free = NULL;
446 static struct witness_pendhelp pending_locks[WITNESS_PENDLIST];
447 static u_int pending_cnt;
448 
449 static int w_free_cnt, w_spin_cnt, w_sleep_cnt;
450 SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
451 SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
452 SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
453     "");
454 
455 static struct witness *w_data;
456 static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1];
457 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
458 static struct witness_hash w_hash;	/* The witness hash table. */
459 
460 /* The lock order data hash */
461 static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT];
462 static struct witness_lock_order_data *w_lofree = NULL;
463 static struct witness_lock_order_hash w_lohash;
464 static int w_max_used_index = 0;
465 static unsigned int w_generation = 0;
466 static const char w_notrunning[] = "Witness not running\n";
467 static const char w_stillcold[] = "Witness is still cold\n";
468 
469 
470 static struct witness_order_list_entry order_lists[] = {
471 	/*
472 	 * sx locks
473 	 */
474 	{ "proctree", &lock_class_sx },
475 	{ "allproc", &lock_class_sx },
476 	{ "allprison", &lock_class_sx },
477 	{ NULL, NULL },
478 	/*
479 	 * Various mutexes
480 	 */
481 	{ "Giant", &lock_class_mtx_sleep },
482 	{ "pipe mutex", &lock_class_mtx_sleep },
483 	{ "sigio lock", &lock_class_mtx_sleep },
484 	{ "process group", &lock_class_mtx_sleep },
485 	{ "process lock", &lock_class_mtx_sleep },
486 	{ "session", &lock_class_mtx_sleep },
487 	{ "uidinfo hash", &lock_class_rw },
488 #ifdef	HWPMC_HOOKS
489 	{ "pmc-sleep", &lock_class_mtx_sleep },
490 #endif
491 	{ "time lock", &lock_class_mtx_sleep },
492 	{ NULL, NULL },
493 	/*
494 	 * Sockets
495 	 */
496 	{ "accept", &lock_class_mtx_sleep },
497 	{ "so_snd", &lock_class_mtx_sleep },
498 	{ "so_rcv", &lock_class_mtx_sleep },
499 	{ "sellck", &lock_class_mtx_sleep },
500 	{ NULL, NULL },
501 	/*
502 	 * Routing
503 	 */
504 	{ "so_rcv", &lock_class_mtx_sleep },
505 	{ "radix node head", &lock_class_rw },
506 	{ "rtentry", &lock_class_mtx_sleep },
507 	{ "ifaddr", &lock_class_mtx_sleep },
508 	{ NULL, NULL },
509 	/*
510 	 * IPv4 multicast:
511 	 * protocol locks before interface locks, after UDP locks.
512 	 */
513 	{ "udpinp", &lock_class_rw },
514 	{ "in_multi_mtx", &lock_class_mtx_sleep },
515 	{ "igmp_mtx", &lock_class_mtx_sleep },
516 	{ "if_addr_lock", &lock_class_rw },
517 	{ NULL, NULL },
518 	/*
519 	 * IPv6 multicast:
520 	 * protocol locks before interface locks, after UDP locks.
521 	 */
522 	{ "udpinp", &lock_class_rw },
523 	{ "in6_multi_mtx", &lock_class_mtx_sleep },
524 	{ "mld_mtx", &lock_class_mtx_sleep },
525 	{ "if_addr_lock", &lock_class_rw },
526 	{ NULL, NULL },
527 	/*
528 	 * UNIX Domain Sockets
529 	 */
530 	{ "unp_global_rwlock", &lock_class_rw },
531 	{ "unp_list_lock", &lock_class_mtx_sleep },
532 	{ "unp", &lock_class_mtx_sleep },
533 	{ "so_snd", &lock_class_mtx_sleep },
534 	{ NULL, NULL },
535 	/*
536 	 * UDP/IP
537 	 */
538 	{ "udp", &lock_class_rw },
539 	{ "udpinp", &lock_class_rw },
540 	{ "so_snd", &lock_class_mtx_sleep },
541 	{ NULL, NULL },
542 	/*
543 	 * TCP/IP
544 	 */
545 	{ "tcp", &lock_class_rw },
546 	{ "tcpinp", &lock_class_rw },
547 	{ "so_snd", &lock_class_mtx_sleep },
548 	{ NULL, NULL },
549 	/*
550 	 * netatalk
551 	 */
552 	{ "ddp_list_mtx", &lock_class_mtx_sleep },
553 	{ "ddp_mtx", &lock_class_mtx_sleep },
554 	{ NULL, NULL },
555 	/*
556 	 * BPF
557 	 */
558 	{ "bpf global lock", &lock_class_mtx_sleep },
559 	{ "bpf interface lock", &lock_class_rw },
560 	{ "bpf cdev lock", &lock_class_mtx_sleep },
561 	{ NULL, NULL },
562 	/*
563 	 * NFS server
564 	 */
565 	{ "nfsd_mtx", &lock_class_mtx_sleep },
566 	{ "so_snd", &lock_class_mtx_sleep },
567 	{ NULL, NULL },
568 
569 	/*
570 	 * IEEE 802.11
571 	 */
572 	{ "802.11 com lock", &lock_class_mtx_sleep},
573 	{ NULL, NULL },
574 	/*
575 	 * Network drivers
576 	 */
577 	{ "network driver", &lock_class_mtx_sleep},
578 	{ NULL, NULL },
579 
580 	/*
581 	 * Netgraph
582 	 */
583 	{ "ng_node", &lock_class_mtx_sleep },
584 	{ "ng_worklist", &lock_class_mtx_sleep },
585 	{ NULL, NULL },
586 	/*
587 	 * CDEV
588 	 */
589 	{ "vm map (system)", &lock_class_mtx_sleep },
590 	{ "vm page queue", &lock_class_mtx_sleep },
591 	{ "vnode interlock", &lock_class_mtx_sleep },
592 	{ "cdev", &lock_class_mtx_sleep },
593 	{ NULL, NULL },
594 	/*
595 	 * VM
596 	 */
597 	{ "vm map (user)", &lock_class_sx },
598 	{ "vm object", &lock_class_rw },
599 	{ "vm page", &lock_class_mtx_sleep },
600 	{ "vm page queue", &lock_class_mtx_sleep },
601 	{ "pmap pv global", &lock_class_rw },
602 	{ "pmap", &lock_class_mtx_sleep },
603 	{ "pmap pv list", &lock_class_rw },
604 	{ "vm page free queue", &lock_class_mtx_sleep },
605 	{ NULL, NULL },
606 	/*
607 	 * kqueue/VFS interaction
608 	 */
609 	{ "kqueue", &lock_class_mtx_sleep },
610 	{ "struct mount mtx", &lock_class_mtx_sleep },
611 	{ "vnode interlock", &lock_class_mtx_sleep },
612 	{ NULL, NULL },
613 	/*
614 	 * ZFS locking
615 	 */
616 	{ "dn->dn_mtx", &lock_class_sx },
617 	{ "dr->dt.di.dr_mtx", &lock_class_sx },
618 	{ "db->db_mtx", &lock_class_sx },
619 	{ NULL, NULL },
620 	/*
621 	 * spin locks
622 	 */
623 #ifdef SMP
624 	{ "ap boot", &lock_class_mtx_spin },
625 #endif
626 	{ "rm.mutex_mtx", &lock_class_mtx_spin },
627 	{ "sio", &lock_class_mtx_spin },
628 	{ "scrlock", &lock_class_mtx_spin },
629 #ifdef __i386__
630 	{ "cy", &lock_class_mtx_spin },
631 #endif
632 #ifdef __sparc64__
633 	{ "pcib_mtx", &lock_class_mtx_spin },
634 	{ "rtc_mtx", &lock_class_mtx_spin },
635 #endif
636 	{ "scc_hwmtx", &lock_class_mtx_spin },
637 	{ "uart_hwmtx", &lock_class_mtx_spin },
638 	{ "fast_taskqueue", &lock_class_mtx_spin },
639 	{ "intr table", &lock_class_mtx_spin },
640 #ifdef	HWPMC_HOOKS
641 	{ "pmc-per-proc", &lock_class_mtx_spin },
642 #endif
643 	{ "process slock", &lock_class_mtx_spin },
644 	{ "sleepq chain", &lock_class_mtx_spin },
645 	{ "umtx lock", &lock_class_mtx_spin },
646 	{ "rm_spinlock", &lock_class_mtx_spin },
647 	{ "turnstile chain", &lock_class_mtx_spin },
648 	{ "turnstile lock", &lock_class_mtx_spin },
649 	{ "sched lock", &lock_class_mtx_spin },
650 	{ "td_contested", &lock_class_mtx_spin },
651 	{ "callout", &lock_class_mtx_spin },
652 	{ "entropy harvest mutex", &lock_class_mtx_spin },
653 	{ "syscons video lock", &lock_class_mtx_spin },
654 #ifdef SMP
655 	{ "smp rendezvous", &lock_class_mtx_spin },
656 #endif
657 #ifdef __powerpc__
658 	{ "tlb0", &lock_class_mtx_spin },
659 #endif
660 	/*
661 	 * leaf locks
662 	 */
663 	{ "intrcnt", &lock_class_mtx_spin },
664 	{ "icu", &lock_class_mtx_spin },
665 #ifdef __i386__
666 	{ "allpmaps", &lock_class_mtx_spin },
667 	{ "descriptor tables", &lock_class_mtx_spin },
668 #endif
669 	{ "clk", &lock_class_mtx_spin },
670 	{ "cpuset", &lock_class_mtx_spin },
671 	{ "mprof lock", &lock_class_mtx_spin },
672 	{ "zombie lock", &lock_class_mtx_spin },
673 	{ "ALD Queue", &lock_class_mtx_spin },
674 #ifdef __ia64__
675 	{ "MCA spin lock", &lock_class_mtx_spin },
676 #endif
677 #if defined(__i386__) || defined(__amd64__)
678 	{ "pcicfg", &lock_class_mtx_spin },
679 	{ "NDIS thread lock", &lock_class_mtx_spin },
680 #endif
681 	{ "tw_osl_io_lock", &lock_class_mtx_spin },
682 	{ "tw_osl_q_lock", &lock_class_mtx_spin },
683 	{ "tw_cl_io_lock", &lock_class_mtx_spin },
684 	{ "tw_cl_intr_lock", &lock_class_mtx_spin },
685 	{ "tw_cl_gen_lock", &lock_class_mtx_spin },
686 #ifdef	HWPMC_HOOKS
687 	{ "pmc-leaf", &lock_class_mtx_spin },
688 #endif
689 	{ "blocked lock", &lock_class_mtx_spin },
690 	{ NULL, NULL },
691 	{ NULL, NULL }
692 };
693 
694 #ifdef BLESSING
695 /*
696  * Pairs of locks which have been blessed
697  * Don't complain about order problems with blessed locks
698  */
699 static struct witness_blessed blessed_list[] = {
700 };
701 static int blessed_count =
702 	sizeof(blessed_list) / sizeof(struct witness_blessed);
703 #endif
704 
705 /*
706  * This global is set to 0 once it becomes safe to use the witness code.
707  */
708 static int witness_cold = 1;
709 
710 /*
711  * This global is set to 1 once the static lock orders have been enrolled
712  * so that a warning can be issued for any spin locks enrolled later.
713  */
714 static int witness_spin_warn = 0;
715 
716 /* Trim useless garbage from filenames. */
717 static const char *
718 fixup_filename(const char *file)
719 {
720 
721 	if (file == NULL)
722 		return (NULL);
723 	while (strncmp(file, "../", 3) == 0)
724 		file += 3;
725 	return (file);
726 }
727 
728 /*
729  * The WITNESS-enabled diagnostic code.  Note that the witness code does
730  * assume that the early boot is single-threaded at least until after this
731  * routine is completed.
732  */
733 static void
734 witness_initialize(void *dummy __unused)
735 {
736 	struct lock_object *lock;
737 	struct witness_order_list_entry *order;
738 	struct witness *w, *w1;
739 	int i;
740 
741 	w_data = malloc(sizeof (struct witness) * WITNESS_COUNT, M_WITNESS,
742 	    M_NOWAIT | M_ZERO);
743 
744 	/*
745 	 * We have to release Giant before initializing its witness
746 	 * structure so that WITNESS doesn't get confused.
747 	 */
748 	mtx_unlock(&Giant);
749 	mtx_assert(&Giant, MA_NOTOWNED);
750 
751 	CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
752 	mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
753 	    MTX_NOWITNESS | MTX_NOPROFILE);
754 	for (i = WITNESS_COUNT - 1; i >= 0; i--) {
755 		w = &w_data[i];
756 		memset(w, 0, sizeof(*w));
757 		w_data[i].w_index = i;	/* Witness index never changes. */
758 		witness_free(w);
759 	}
760 	KASSERT(STAILQ_FIRST(&w_free)->w_index == 0,
761 	    ("%s: Invalid list of free witness objects", __func__));
762 
763 	/* Witness with index 0 is not used to aid in debugging. */
764 	STAILQ_REMOVE_HEAD(&w_free, w_list);
765 	w_free_cnt--;
766 
767 	memset(w_rmatrix, 0,
768 	    (sizeof(**w_rmatrix) * (WITNESS_COUNT+1) * (WITNESS_COUNT+1)));
769 
770 	for (i = 0; i < LOCK_CHILDCOUNT; i++)
771 		witness_lock_list_free(&w_locklistdata[i]);
772 	witness_init_hash_tables();
773 
774 	/* First add in all the specified order lists. */
775 	for (order = order_lists; order->w_name != NULL; order++) {
776 		w = enroll(order->w_name, order->w_class);
777 		if (w == NULL)
778 			continue;
779 		w->w_file = "order list";
780 		for (order++; order->w_name != NULL; order++) {
781 			w1 = enroll(order->w_name, order->w_class);
782 			if (w1 == NULL)
783 				continue;
784 			w1->w_file = "order list";
785 			itismychild(w, w1);
786 			w = w1;
787 		}
788 	}
789 	witness_spin_warn = 1;
790 
791 	/* Iterate through all locks and add them to witness. */
792 	for (i = 0; pending_locks[i].wh_lock != NULL; i++) {
793 		lock = pending_locks[i].wh_lock;
794 		KASSERT(lock->lo_flags & LO_WITNESS,
795 		    ("%s: lock %s is on pending list but not LO_WITNESS",
796 		    __func__, lock->lo_name));
797 		lock->lo_witness = enroll(pending_locks[i].wh_type,
798 		    LOCK_CLASS(lock));
799 	}
800 
801 	/* Mark the witness code as being ready for use. */
802 	witness_cold = 0;
803 
804 	mtx_lock(&Giant);
805 }
806 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize,
807     NULL);
808 
809 void
810 witness_init(struct lock_object *lock, const char *type)
811 {
812 	struct lock_class *class;
813 
814 	/* Various sanity checks. */
815 	class = LOCK_CLASS(lock);
816 	if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
817 	    (class->lc_flags & LC_RECURSABLE) == 0)
818 		kassert_panic("%s: lock (%s) %s can not be recursable",
819 		    __func__, class->lc_name, lock->lo_name);
820 	if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
821 	    (class->lc_flags & LC_SLEEPABLE) == 0)
822 		kassert_panic("%s: lock (%s) %s can not be sleepable",
823 		    __func__, class->lc_name, lock->lo_name);
824 	if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
825 	    (class->lc_flags & LC_UPGRADABLE) == 0)
826 		kassert_panic("%s: lock (%s) %s can not be upgradable",
827 		    __func__, class->lc_name, lock->lo_name);
828 
829 	/*
830 	 * If we shouldn't watch this lock, then just clear lo_witness.
831 	 * Otherwise, if witness_cold is set, then it is too early to
832 	 * enroll this lock, so defer it to witness_initialize() by adding
833 	 * it to the pending_locks list.  If it is not too early, then enroll
834 	 * the lock now.
835 	 */
836 	if (witness_watch < 1 || panicstr != NULL ||
837 	    (lock->lo_flags & LO_WITNESS) == 0)
838 		lock->lo_witness = NULL;
839 	else if (witness_cold) {
840 		pending_locks[pending_cnt].wh_lock = lock;
841 		pending_locks[pending_cnt++].wh_type = type;
842 		if (pending_cnt > WITNESS_PENDLIST)
843 			panic("%s: pending locks list is too small, "
844 			    "increase WITNESS_PENDLIST\n",
845 			    __func__);
846 	} else
847 		lock->lo_witness = enroll(type, class);
848 }
849 
850 void
851 witness_destroy(struct lock_object *lock)
852 {
853 	struct lock_class *class;
854 	struct witness *w;
855 
856 	class = LOCK_CLASS(lock);
857 
858 	if (witness_cold)
859 		panic("lock (%s) %s destroyed while witness_cold",
860 		    class->lc_name, lock->lo_name);
861 
862 	/* XXX: need to verify that no one holds the lock */
863 	if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL)
864 		return;
865 	w = lock->lo_witness;
866 
867 	mtx_lock_spin(&w_mtx);
868 	MPASS(w->w_refcount > 0);
869 	w->w_refcount--;
870 
871 	if (w->w_refcount == 0)
872 		depart(w);
873 	mtx_unlock_spin(&w_mtx);
874 }
875 
876 #ifdef DDB
877 static void
878 witness_ddb_compute_levels(void)
879 {
880 	struct witness *w;
881 
882 	/*
883 	 * First clear all levels.
884 	 */
885 	STAILQ_FOREACH(w, &w_all, w_list)
886 		w->w_ddb_level = -1;
887 
888 	/*
889 	 * Look for locks with no parents and level all their descendants.
890 	 */
891 	STAILQ_FOREACH(w, &w_all, w_list) {
892 
893 		/* If the witness has ancestors (is not a root), skip it. */
894 		if (w->w_num_ancestors > 0)
895 			continue;
896 		witness_ddb_level_descendants(w, 0);
897 	}
898 }
899 
900 static void
901 witness_ddb_level_descendants(struct witness *w, int l)
902 {
903 	int i;
904 
905 	if (w->w_ddb_level >= l)
906 		return;
907 
908 	w->w_ddb_level = l;
909 	l++;
910 
911 	for (i = 1; i <= w_max_used_index; i++) {
912 		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
913 			witness_ddb_level_descendants(&w_data[i], l);
914 	}
915 }
916 
917 static void
918 witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...),
919     struct witness *w, int indent)
920 {
921 	int i;
922 
923  	for (i = 0; i < indent; i++)
924  		prnt(" ");
925 	prnt("%s (type: %s, depth: %d, active refs: %d)",
926 	     w->w_name, w->w_class->lc_name,
927 	     w->w_ddb_level, w->w_refcount);
928  	if (w->w_displayed) {
929  		prnt(" -- (already displayed)\n");
930  		return;
931  	}
932  	w->w_displayed = 1;
933 	if (w->w_file != NULL && w->w_line != 0)
934 		prnt(" -- last acquired @ %s:%d\n", fixup_filename(w->w_file),
935 		    w->w_line);
936 	else
937 		prnt(" -- never acquired\n");
938 	indent++;
939 	WITNESS_INDEX_ASSERT(w->w_index);
940 	for (i = 1; i <= w_max_used_index; i++) {
941 		if (db_pager_quit)
942 			return;
943 		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
944 			witness_ddb_display_descendants(prnt, &w_data[i],
945 			    indent);
946 	}
947 }
948 
949 static void
950 witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
951     struct witness_list *list)
952 {
953 	struct witness *w;
954 
955 	STAILQ_FOREACH(w, list, w_typelist) {
956 		if (w->w_file == NULL || w->w_ddb_level > 0)
957 			continue;
958 
959 		/* This lock has no anscestors - display its descendants. */
960 		witness_ddb_display_descendants(prnt, w, 0);
961 		if (db_pager_quit)
962 			return;
963 	}
964 }
965 
966 static void
967 witness_ddb_display(int(*prnt)(const char *fmt, ...))
968 {
969 	struct witness *w;
970 
971 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
972 	witness_ddb_compute_levels();
973 
974 	/* Clear all the displayed flags. */
975 	STAILQ_FOREACH(w, &w_all, w_list)
976 		w->w_displayed = 0;
977 
978 	/*
979 	 * First, handle sleep locks which have been acquired at least
980 	 * once.
981 	 */
982 	prnt("Sleep locks:\n");
983 	witness_ddb_display_list(prnt, &w_sleep);
984 	if (db_pager_quit)
985 		return;
986 
987 	/*
988 	 * Now do spin locks which have been acquired at least once.
989 	 */
990 	prnt("\nSpin locks:\n");
991 	witness_ddb_display_list(prnt, &w_spin);
992 	if (db_pager_quit)
993 		return;
994 
995 	/*
996 	 * Finally, any locks which have not been acquired yet.
997 	 */
998 	prnt("\nLocks which were never acquired:\n");
999 	STAILQ_FOREACH(w, &w_all, w_list) {
1000 		if (w->w_file != NULL || w->w_refcount == 0)
1001 			continue;
1002 		prnt("%s (type: %s, depth: %d)\n", w->w_name,
1003 		    w->w_class->lc_name, w->w_ddb_level);
1004 		if (db_pager_quit)
1005 			return;
1006 	}
1007 }
1008 #endif /* DDB */
1009 
1010 int
1011 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
1012 {
1013 
1014 	if (witness_watch == -1 || panicstr != NULL)
1015 		return (0);
1016 
1017 	/* Require locks that witness knows about. */
1018 	if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
1019 	    lock2->lo_witness == NULL)
1020 		return (EINVAL);
1021 
1022 	mtx_assert(&w_mtx, MA_NOTOWNED);
1023 	mtx_lock_spin(&w_mtx);
1024 
1025 	/*
1026 	 * If we already have either an explicit or implied lock order that
1027 	 * is the other way around, then return an error.
1028 	 */
1029 	if (witness_watch &&
1030 	    isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
1031 		mtx_unlock_spin(&w_mtx);
1032 		return (EDOOFUS);
1033 	}
1034 
1035 	/* Try to add the new order. */
1036 	CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1037 	    lock2->lo_witness->w_name, lock1->lo_witness->w_name);
1038 	itismychild(lock1->lo_witness, lock2->lo_witness);
1039 	mtx_unlock_spin(&w_mtx);
1040 	return (0);
1041 }
1042 
1043 void
1044 witness_checkorder(struct lock_object *lock, int flags, const char *file,
1045     int line, struct lock_object *interlock)
1046 {
1047 	struct lock_list_entry *lock_list, *lle;
1048 	struct lock_instance *lock1, *lock2, *plock;
1049 	struct lock_class *class, *iclass;
1050 	struct witness *w, *w1;
1051 	struct thread *td;
1052 	int i, j;
1053 
1054 	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
1055 	    panicstr != NULL)
1056 		return;
1057 
1058 	w = lock->lo_witness;
1059 	class = LOCK_CLASS(lock);
1060 	td = curthread;
1061 
1062 	if (class->lc_flags & LC_SLEEPLOCK) {
1063 
1064 		/*
1065 		 * Since spin locks include a critical section, this check
1066 		 * implicitly enforces a lock order of all sleep locks before
1067 		 * all spin locks.
1068 		 */
1069 		if (td->td_critnest != 0 && !kdb_active)
1070 			kassert_panic("acquiring blockable sleep lock with "
1071 			    "spinlock or critical section held (%s) %s @ %s:%d",
1072 			    class->lc_name, lock->lo_name,
1073 			    fixup_filename(file), line);
1074 
1075 		/*
1076 		 * If this is the first lock acquired then just return as
1077 		 * no order checking is needed.
1078 		 */
1079 		lock_list = td->td_sleeplocks;
1080 		if (lock_list == NULL || lock_list->ll_count == 0)
1081 			return;
1082 	} else {
1083 
1084 		/*
1085 		 * If this is the first lock, just return as no order
1086 		 * checking is needed.  Avoid problems with thread
1087 		 * migration pinning the thread while checking if
1088 		 * spinlocks are held.  If at least one spinlock is held
1089 		 * the thread is in a safe path and it is allowed to
1090 		 * unpin it.
1091 		 */
1092 		sched_pin();
1093 		lock_list = PCPU_GET(spinlocks);
1094 		if (lock_list == NULL || lock_list->ll_count == 0) {
1095 			sched_unpin();
1096 			return;
1097 		}
1098 		sched_unpin();
1099 	}
1100 
1101 	/*
1102 	 * Check to see if we are recursing on a lock we already own.  If
1103 	 * so, make sure that we don't mismatch exclusive and shared lock
1104 	 * acquires.
1105 	 */
1106 	lock1 = find_instance(lock_list, lock);
1107 	if (lock1 != NULL) {
1108 		if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
1109 		    (flags & LOP_EXCLUSIVE) == 0) {
1110 			printf("shared lock of (%s) %s @ %s:%d\n",
1111 			    class->lc_name, lock->lo_name,
1112 			    fixup_filename(file), line);
1113 			printf("while exclusively locked from %s:%d\n",
1114 			    fixup_filename(lock1->li_file), lock1->li_line);
1115 			kassert_panic("excl->share");
1116 		}
1117 		if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
1118 		    (flags & LOP_EXCLUSIVE) != 0) {
1119 			printf("exclusive lock of (%s) %s @ %s:%d\n",
1120 			    class->lc_name, lock->lo_name,
1121 			    fixup_filename(file), line);
1122 			printf("while share locked from %s:%d\n",
1123 			    fixup_filename(lock1->li_file), lock1->li_line);
1124 			kassert_panic("share->excl");
1125 		}
1126 		return;
1127 	}
1128 
1129 	/* Warn if the interlock is not locked exactly once. */
1130 	if (interlock != NULL) {
1131 		iclass = LOCK_CLASS(interlock);
1132 		lock1 = find_instance(lock_list, interlock);
1133 		if (lock1 == NULL)
1134 			kassert_panic("interlock (%s) %s not locked @ %s:%d",
1135 			    iclass->lc_name, interlock->lo_name,
1136 			    fixup_filename(file), line);
1137 		else if ((lock1->li_flags & LI_RECURSEMASK) != 0)
1138 			kassert_panic("interlock (%s) %s recursed @ %s:%d",
1139 			    iclass->lc_name, interlock->lo_name,
1140 			    fixup_filename(file), line);
1141 	}
1142 
1143 	/*
1144 	 * Find the previously acquired lock, but ignore interlocks.
1145 	 */
1146 	plock = &lock_list->ll_children[lock_list->ll_count - 1];
1147 	if (interlock != NULL && plock->li_lock == interlock) {
1148 		if (lock_list->ll_count > 1)
1149 			plock =
1150 			    &lock_list->ll_children[lock_list->ll_count - 2];
1151 		else {
1152 			lle = lock_list->ll_next;
1153 
1154 			/*
1155 			 * The interlock is the only lock we hold, so
1156 			 * simply return.
1157 			 */
1158 			if (lle == NULL)
1159 				return;
1160 			plock = &lle->ll_children[lle->ll_count - 1];
1161 		}
1162 	}
1163 
1164 	/*
1165 	 * Try to perform most checks without a lock.  If this succeeds we
1166 	 * can skip acquiring the lock and return success.
1167 	 */
1168 	w1 = plock->li_lock->lo_witness;
1169 	if (witness_lock_order_check(w1, w))
1170 		return;
1171 
1172 	/*
1173 	 * Check for duplicate locks of the same type.  Note that we only
1174 	 * have to check for this on the last lock we just acquired.  Any
1175 	 * other cases will be caught as lock order violations.
1176 	 */
1177 	mtx_lock_spin(&w_mtx);
1178 	witness_lock_order_add(w1, w);
1179 	if (w1 == w) {
1180 		i = w->w_index;
1181 		if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) &&
1182 		    !(w_rmatrix[i][i] & WITNESS_REVERSAL)) {
1183 		    w_rmatrix[i][i] |= WITNESS_REVERSAL;
1184 			w->w_reversed = 1;
1185 			mtx_unlock_spin(&w_mtx);
1186 			printf(
1187 			    "acquiring duplicate lock of same type: \"%s\"\n",
1188 			    w->w_name);
1189 			printf(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
1190 			    fixup_filename(plock->li_file), plock->li_line);
1191 			printf(" 2nd %s @ %s:%d\n", lock->lo_name,
1192 			    fixup_filename(file), line);
1193 			witness_debugger(1);
1194 		} else
1195 			mtx_unlock_spin(&w_mtx);
1196 		return;
1197 	}
1198 	mtx_assert(&w_mtx, MA_OWNED);
1199 
1200 	/*
1201 	 * If we know that the lock we are acquiring comes after
1202 	 * the lock we most recently acquired in the lock order tree,
1203 	 * then there is no need for any further checks.
1204 	 */
1205 	if (isitmychild(w1, w))
1206 		goto out;
1207 
1208 	for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
1209 		for (i = lle->ll_count - 1; i >= 0; i--, j++) {
1210 
1211 			MPASS(j < WITNESS_COUNT);
1212 			lock1 = &lle->ll_children[i];
1213 
1214 			/*
1215 			 * Ignore the interlock.
1216 			 */
1217 			if (interlock == lock1->li_lock)
1218 				continue;
1219 
1220 			/*
1221 			 * If this lock doesn't undergo witness checking,
1222 			 * then skip it.
1223 			 */
1224 			w1 = lock1->li_lock->lo_witness;
1225 			if (w1 == NULL) {
1226 				KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
1227 				    ("lock missing witness structure"));
1228 				continue;
1229 			}
1230 
1231 			/*
1232 			 * If we are locking Giant and this is a sleepable
1233 			 * lock, then skip it.
1234 			 */
1235 			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
1236 			    lock == &Giant.lock_object)
1237 				continue;
1238 
1239 			/*
1240 			 * If we are locking a sleepable lock and this lock
1241 			 * is Giant, then skip it.
1242 			 */
1243 			if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1244 			    lock1->li_lock == &Giant.lock_object)
1245 				continue;
1246 
1247 			/*
1248 			 * If we are locking a sleepable lock and this lock
1249 			 * isn't sleepable, we want to treat it as a lock
1250 			 * order violation to enfore a general lock order of
1251 			 * sleepable locks before non-sleepable locks.
1252 			 */
1253 			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1254 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1255 				goto reversal;
1256 
1257 			/*
1258 			 * If we are locking Giant and this is a non-sleepable
1259 			 * lock, then treat it as a reversal.
1260 			 */
1261 			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
1262 			    lock == &Giant.lock_object)
1263 				goto reversal;
1264 
1265 			/*
1266 			 * Check the lock order hierarchy for a reveresal.
1267 			 */
1268 			if (!isitmydescendant(w, w1))
1269 				continue;
1270 		reversal:
1271 
1272 			/*
1273 			 * We have a lock order violation, check to see if it
1274 			 * is allowed or has already been yelled about.
1275 			 */
1276 #ifdef BLESSING
1277 
1278 			/*
1279 			 * If the lock order is blessed, just bail.  We don't
1280 			 * look for other lock order violations though, which
1281 			 * may be a bug.
1282 			 */
1283 			if (blessed(w, w1))
1284 				goto out;
1285 #endif
1286 
1287 			/* Bail if this violation is known */
1288 			if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
1289 				goto out;
1290 
1291 			/* Record this as a violation */
1292 			w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL;
1293 			w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL;
1294 			w->w_reversed = w1->w_reversed = 1;
1295 			witness_increment_graph_generation();
1296 			mtx_unlock_spin(&w_mtx);
1297 
1298 #ifdef WITNESS_NO_VNODE
1299 			/*
1300 			 * There are known LORs between VNODE locks. They are
1301 			 * not an indication of a bug. VNODE locks are flagged
1302 			 * as such (LO_IS_VNODE) and we don't yell if the LOR
1303 			 * is between 2 VNODE locks.
1304 			 */
1305 			if ((lock->lo_flags & LO_IS_VNODE) != 0 &&
1306 			    (lock1->li_lock->lo_flags & LO_IS_VNODE) != 0)
1307 				return;
1308 #endif
1309 
1310 			/*
1311 			 * Ok, yell about it.
1312 			 */
1313 			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1314 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1315 				printf(
1316 		"lock order reversal: (sleepable after non-sleepable)\n");
1317 			else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1318 			    && lock == &Giant.lock_object)
1319 				printf(
1320 		"lock order reversal: (Giant after non-sleepable)\n");
1321 			else
1322 				printf("lock order reversal:\n");
1323 
1324 			/*
1325 			 * Try to locate an earlier lock with
1326 			 * witness w in our list.
1327 			 */
1328 			do {
1329 				lock2 = &lle->ll_children[i];
1330 				MPASS(lock2->li_lock != NULL);
1331 				if (lock2->li_lock->lo_witness == w)
1332 					break;
1333 				if (i == 0 && lle->ll_next != NULL) {
1334 					lle = lle->ll_next;
1335 					i = lle->ll_count - 1;
1336 					MPASS(i >= 0 && i < LOCK_NCHILDREN);
1337 				} else
1338 					i--;
1339 			} while (i >= 0);
1340 			if (i < 0) {
1341 				printf(" 1st %p %s (%s) @ %s:%d\n",
1342 				    lock1->li_lock, lock1->li_lock->lo_name,
1343 				    w1->w_name, fixup_filename(lock1->li_file),
1344 				    lock1->li_line);
1345 				printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1346 				    lock->lo_name, w->w_name,
1347 				    fixup_filename(file), line);
1348 			} else {
1349 				printf(" 1st %p %s (%s) @ %s:%d\n",
1350 				    lock2->li_lock, lock2->li_lock->lo_name,
1351 				    lock2->li_lock->lo_witness->w_name,
1352 				    fixup_filename(lock2->li_file),
1353 				    lock2->li_line);
1354 				printf(" 2nd %p %s (%s) @ %s:%d\n",
1355 				    lock1->li_lock, lock1->li_lock->lo_name,
1356 				    w1->w_name, fixup_filename(lock1->li_file),
1357 				    lock1->li_line);
1358 				printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1359 				    lock->lo_name, w->w_name,
1360 				    fixup_filename(file), line);
1361 			}
1362 			witness_debugger(1);
1363 			return;
1364 		}
1365 	}
1366 
1367 	/*
1368 	 * If requested, build a new lock order.  However, don't build a new
1369 	 * relationship between a sleepable lock and Giant if it is in the
1370 	 * wrong direction.  The correct lock order is that sleepable locks
1371 	 * always come before Giant.
1372 	 */
1373 	if (flags & LOP_NEWORDER &&
1374 	    !(plock->li_lock == &Giant.lock_object &&
1375 	    (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1376 		CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1377 		    w->w_name, plock->li_lock->lo_witness->w_name);
1378 		itismychild(plock->li_lock->lo_witness, w);
1379 	}
1380 out:
1381 	mtx_unlock_spin(&w_mtx);
1382 }
1383 
1384 void
1385 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1386 {
1387 	struct lock_list_entry **lock_list, *lle;
1388 	struct lock_instance *instance;
1389 	struct witness *w;
1390 	struct thread *td;
1391 
1392 	if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
1393 	    panicstr != NULL)
1394 		return;
1395 	w = lock->lo_witness;
1396 	td = curthread;
1397 
1398 	/* Determine lock list for this lock. */
1399 	if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1400 		lock_list = &td->td_sleeplocks;
1401 	else
1402 		lock_list = PCPU_PTR(spinlocks);
1403 
1404 	/* Check to see if we are recursing on a lock we already own. */
1405 	instance = find_instance(*lock_list, lock);
1406 	if (instance != NULL) {
1407 		instance->li_flags++;
1408 		CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1409 		    td->td_proc->p_pid, lock->lo_name,
1410 		    instance->li_flags & LI_RECURSEMASK);
1411 		instance->li_file = file;
1412 		instance->li_line = line;
1413 		return;
1414 	}
1415 
1416 	/* Update per-witness last file and line acquire. */
1417 	w->w_file = file;
1418 	w->w_line = line;
1419 
1420 	/* Find the next open lock instance in the list and fill it. */
1421 	lle = *lock_list;
1422 	if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1423 		lle = witness_lock_list_get();
1424 		if (lle == NULL)
1425 			return;
1426 		lle->ll_next = *lock_list;
1427 		CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1428 		    td->td_proc->p_pid, lle);
1429 		*lock_list = lle;
1430 	}
1431 	instance = &lle->ll_children[lle->ll_count++];
1432 	instance->li_lock = lock;
1433 	instance->li_line = line;
1434 	instance->li_file = file;
1435 	if ((flags & LOP_EXCLUSIVE) != 0)
1436 		instance->li_flags = LI_EXCLUSIVE;
1437 	else
1438 		instance->li_flags = 0;
1439 	CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1440 	    td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1441 }
1442 
1443 void
1444 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1445 {
1446 	struct lock_instance *instance;
1447 	struct lock_class *class;
1448 
1449 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1450 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1451 		return;
1452 	class = LOCK_CLASS(lock);
1453 	if (witness_watch) {
1454 		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1455 			kassert_panic(
1456 			    "upgrade of non-upgradable lock (%s) %s @ %s:%d",
1457 			    class->lc_name, lock->lo_name,
1458 			    fixup_filename(file), line);
1459 		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1460 			kassert_panic(
1461 			    "upgrade of non-sleep lock (%s) %s @ %s:%d",
1462 			    class->lc_name, lock->lo_name,
1463 			    fixup_filename(file), line);
1464 	}
1465 	instance = find_instance(curthread->td_sleeplocks, lock);
1466 	if (instance == NULL) {
1467 		kassert_panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1468 		    class->lc_name, lock->lo_name,
1469 		    fixup_filename(file), line);
1470 		return;
1471 	}
1472 	if (witness_watch) {
1473 		if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1474 			kassert_panic(
1475 			    "upgrade of exclusive lock (%s) %s @ %s:%d",
1476 			    class->lc_name, lock->lo_name,
1477 			    fixup_filename(file), line);
1478 		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1479 			kassert_panic(
1480 			    "upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1481 			    class->lc_name, lock->lo_name,
1482 			    instance->li_flags & LI_RECURSEMASK,
1483 			    fixup_filename(file), line);
1484 	}
1485 	instance->li_flags |= LI_EXCLUSIVE;
1486 }
1487 
1488 void
1489 witness_downgrade(struct lock_object *lock, int flags, const char *file,
1490     int line)
1491 {
1492 	struct lock_instance *instance;
1493 	struct lock_class *class;
1494 
1495 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1496 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1497 		return;
1498 	class = LOCK_CLASS(lock);
1499 	if (witness_watch) {
1500 		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1501 			kassert_panic(
1502 			    "downgrade of non-upgradable lock (%s) %s @ %s:%d",
1503 			    class->lc_name, lock->lo_name,
1504 			    fixup_filename(file), line);
1505 		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1506 			kassert_panic(
1507 			    "downgrade of non-sleep lock (%s) %s @ %s:%d",
1508 			    class->lc_name, lock->lo_name,
1509 			    fixup_filename(file), line);
1510 	}
1511 	instance = find_instance(curthread->td_sleeplocks, lock);
1512 	if (instance == NULL) {
1513 		kassert_panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1514 		    class->lc_name, lock->lo_name,
1515 		    fixup_filename(file), line);
1516 		return;
1517 	}
1518 	if (witness_watch) {
1519 		if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1520 			kassert_panic(
1521 			    "downgrade of shared lock (%s) %s @ %s:%d",
1522 			    class->lc_name, lock->lo_name,
1523 			    fixup_filename(file), line);
1524 		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1525 			kassert_panic(
1526 			    "downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1527 			    class->lc_name, lock->lo_name,
1528 			    instance->li_flags & LI_RECURSEMASK,
1529 			    fixup_filename(file), line);
1530 	}
1531 	instance->li_flags &= ~LI_EXCLUSIVE;
1532 }
1533 
1534 void
1535 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1536 {
1537 	struct lock_list_entry **lock_list, *lle;
1538 	struct lock_instance *instance;
1539 	struct lock_class *class;
1540 	struct thread *td;
1541 	register_t s;
1542 	int i, j;
1543 
1544 	if (witness_cold || lock->lo_witness == NULL || panicstr != NULL)
1545 		return;
1546 	td = curthread;
1547 	class = LOCK_CLASS(lock);
1548 
1549 	/* Find lock instance associated with this lock. */
1550 	if (class->lc_flags & LC_SLEEPLOCK)
1551 		lock_list = &td->td_sleeplocks;
1552 	else
1553 		lock_list = PCPU_PTR(spinlocks);
1554 	lle = *lock_list;
1555 	for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1556 		for (i = 0; i < (*lock_list)->ll_count; i++) {
1557 			instance = &(*lock_list)->ll_children[i];
1558 			if (instance->li_lock == lock)
1559 				goto found;
1560 		}
1561 
1562 	/*
1563 	 * When disabling WITNESS through witness_watch we could end up in
1564 	 * having registered locks in the td_sleeplocks queue.
1565 	 * We have to make sure we flush these queues, so just search for
1566 	 * eventual register locks and remove them.
1567 	 */
1568 	if (witness_watch > 0) {
1569 		kassert_panic("lock (%s) %s not locked @ %s:%d", class->lc_name,
1570 		    lock->lo_name, fixup_filename(file), line);
1571 		return;
1572 	} else {
1573 		return;
1574 	}
1575 found:
1576 
1577 	/* First, check for shared/exclusive mismatches. */
1578 	if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 &&
1579 	    (flags & LOP_EXCLUSIVE) == 0) {
1580 		printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1581 		    lock->lo_name, fixup_filename(file), line);
1582 		printf("while exclusively locked from %s:%d\n",
1583 		    fixup_filename(instance->li_file), instance->li_line);
1584 		kassert_panic("excl->ushare");
1585 	}
1586 	if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 &&
1587 	    (flags & LOP_EXCLUSIVE) != 0) {
1588 		printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1589 		    lock->lo_name, fixup_filename(file), line);
1590 		printf("while share locked from %s:%d\n",
1591 		    fixup_filename(instance->li_file),
1592 		    instance->li_line);
1593 		kassert_panic("share->uexcl");
1594 	}
1595 	/* If we are recursed, unrecurse. */
1596 	if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1597 		CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1598 		    td->td_proc->p_pid, instance->li_lock->lo_name,
1599 		    instance->li_flags);
1600 		instance->li_flags--;
1601 		return;
1602 	}
1603 	/* The lock is now being dropped, check for NORELEASE flag */
1604 	if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) {
1605 		printf("forbidden unlock of (%s) %s @ %s:%d\n", class->lc_name,
1606 		    lock->lo_name, fixup_filename(file), line);
1607 		kassert_panic("lock marked norelease");
1608 	}
1609 
1610 	/* Otherwise, remove this item from the list. */
1611 	s = intr_disable();
1612 	CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1613 	    td->td_proc->p_pid, instance->li_lock->lo_name,
1614 	    (*lock_list)->ll_count - 1);
1615 	for (j = i; j < (*lock_list)->ll_count - 1; j++)
1616 		(*lock_list)->ll_children[j] =
1617 		    (*lock_list)->ll_children[j + 1];
1618 	(*lock_list)->ll_count--;
1619 	intr_restore(s);
1620 
1621 	/*
1622 	 * In order to reduce contention on w_mtx, we want to keep always an
1623 	 * head object into lists so that frequent allocation from the
1624 	 * free witness pool (and subsequent locking) is avoided.
1625 	 * In order to maintain the current code simple, when the head
1626 	 * object is totally unloaded it means also that we do not have
1627 	 * further objects in the list, so the list ownership needs to be
1628 	 * hand over to another object if the current head needs to be freed.
1629 	 */
1630 	if ((*lock_list)->ll_count == 0) {
1631 		if (*lock_list == lle) {
1632 			if (lle->ll_next == NULL)
1633 				return;
1634 		} else
1635 			lle = *lock_list;
1636 		*lock_list = lle->ll_next;
1637 		CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1638 		    td->td_proc->p_pid, lle);
1639 		witness_lock_list_free(lle);
1640 	}
1641 }
1642 
1643 void
1644 witness_thread_exit(struct thread *td)
1645 {
1646 	struct lock_list_entry *lle;
1647 	int i, n;
1648 
1649 	lle = td->td_sleeplocks;
1650 	if (lle == NULL || panicstr != NULL)
1651 		return;
1652 	if (lle->ll_count != 0) {
1653 		for (n = 0; lle != NULL; lle = lle->ll_next)
1654 			for (i = lle->ll_count - 1; i >= 0; i--) {
1655 				if (n == 0)
1656 		printf("Thread %p exiting with the following locks held:\n",
1657 					    td);
1658 				n++;
1659 				witness_list_lock(&lle->ll_children[i], printf);
1660 
1661 			}
1662 		kassert_panic(
1663 		    "Thread %p cannot exit while holding sleeplocks\n", td);
1664 	}
1665 	witness_lock_list_free(lle);
1666 }
1667 
1668 /*
1669  * Warn if any locks other than 'lock' are held.  Flags can be passed in to
1670  * exempt Giant and sleepable locks from the checks as well.  If any
1671  * non-exempt locks are held, then a supplied message is printed to the
1672  * console along with a list of the offending locks.  If indicated in the
1673  * flags then a failure results in a panic as well.
1674  */
1675 int
1676 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1677 {
1678 	struct lock_list_entry *lock_list, *lle;
1679 	struct lock_instance *lock1;
1680 	struct thread *td;
1681 	va_list ap;
1682 	int i, n;
1683 
1684 	if (witness_cold || witness_watch < 1 || panicstr != NULL)
1685 		return (0);
1686 	n = 0;
1687 	td = curthread;
1688 	for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1689 		for (i = lle->ll_count - 1; i >= 0; i--) {
1690 			lock1 = &lle->ll_children[i];
1691 			if (lock1->li_lock == lock)
1692 				continue;
1693 			if (flags & WARN_GIANTOK &&
1694 			    lock1->li_lock == &Giant.lock_object)
1695 				continue;
1696 			if (flags & WARN_SLEEPOK &&
1697 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1698 				continue;
1699 			if (n == 0) {
1700 				va_start(ap, fmt);
1701 				vprintf(fmt, ap);
1702 				va_end(ap);
1703 				printf(" with the following");
1704 				if (flags & WARN_SLEEPOK)
1705 					printf(" non-sleepable");
1706 				printf(" locks held:\n");
1707 			}
1708 			n++;
1709 			witness_list_lock(lock1, printf);
1710 		}
1711 
1712 	/*
1713 	 * Pin the thread in order to avoid problems with thread migration.
1714 	 * Once that all verifies are passed about spinlocks ownership,
1715 	 * the thread is in a safe path and it can be unpinned.
1716 	 */
1717 	sched_pin();
1718 	lock_list = PCPU_GET(spinlocks);
1719 	if (lock_list != NULL && lock_list->ll_count != 0) {
1720 		sched_unpin();
1721 
1722 		/*
1723 		 * We should only have one spinlock and as long as
1724 		 * the flags cannot match for this locks class,
1725 		 * check if the first spinlock is the one curthread
1726 		 * should hold.
1727 		 */
1728 		lock1 = &lock_list->ll_children[lock_list->ll_count - 1];
1729 		if (lock_list->ll_count == 1 && lock_list->ll_next == NULL &&
1730 		    lock1->li_lock == lock && n == 0)
1731 			return (0);
1732 
1733 		va_start(ap, fmt);
1734 		vprintf(fmt, ap);
1735 		va_end(ap);
1736 		printf(" with the following");
1737 		if (flags & WARN_SLEEPOK)
1738 			printf(" non-sleepable");
1739 		printf(" locks held:\n");
1740 		n += witness_list_locks(&lock_list, printf);
1741 	} else
1742 		sched_unpin();
1743 	if (flags & WARN_PANIC && n)
1744 		kassert_panic("%s", __func__);
1745 	else
1746 		witness_debugger(n);
1747 	return (n);
1748 }
1749 
1750 const char *
1751 witness_file(struct lock_object *lock)
1752 {
1753 	struct witness *w;
1754 
1755 	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1756 		return ("?");
1757 	w = lock->lo_witness;
1758 	return (w->w_file);
1759 }
1760 
1761 int
1762 witness_line(struct lock_object *lock)
1763 {
1764 	struct witness *w;
1765 
1766 	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1767 		return (0);
1768 	w = lock->lo_witness;
1769 	return (w->w_line);
1770 }
1771 
1772 static struct witness *
1773 enroll(const char *description, struct lock_class *lock_class)
1774 {
1775 	struct witness *w;
1776 	struct witness_list *typelist;
1777 
1778 	MPASS(description != NULL);
1779 
1780 	if (witness_watch == -1 || panicstr != NULL)
1781 		return (NULL);
1782 	if ((lock_class->lc_flags & LC_SPINLOCK)) {
1783 		if (witness_skipspin)
1784 			return (NULL);
1785 		else
1786 			typelist = &w_spin;
1787 	} else if ((lock_class->lc_flags & LC_SLEEPLOCK)) {
1788 		typelist = &w_sleep;
1789 	} else {
1790 		kassert_panic("lock class %s is not sleep or spin",
1791 		    lock_class->lc_name);
1792 		return (NULL);
1793 	}
1794 
1795 	mtx_lock_spin(&w_mtx);
1796 	w = witness_hash_get(description);
1797 	if (w)
1798 		goto found;
1799 	if ((w = witness_get()) == NULL)
1800 		return (NULL);
1801 	MPASS(strlen(description) < MAX_W_NAME);
1802 	strcpy(w->w_name, description);
1803 	w->w_class = lock_class;
1804 	w->w_refcount = 1;
1805 	STAILQ_INSERT_HEAD(&w_all, w, w_list);
1806 	if (lock_class->lc_flags & LC_SPINLOCK) {
1807 		STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1808 		w_spin_cnt++;
1809 	} else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1810 		STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1811 		w_sleep_cnt++;
1812 	}
1813 
1814 	/* Insert new witness into the hash */
1815 	witness_hash_put(w);
1816 	witness_increment_graph_generation();
1817 	mtx_unlock_spin(&w_mtx);
1818 	return (w);
1819 found:
1820 	w->w_refcount++;
1821 	mtx_unlock_spin(&w_mtx);
1822 	if (lock_class != w->w_class)
1823 		kassert_panic(
1824 			"lock (%s) %s does not match earlier (%s) lock",
1825 			description, lock_class->lc_name,
1826 			w->w_class->lc_name);
1827 	return (w);
1828 }
1829 
1830 static void
1831 depart(struct witness *w)
1832 {
1833 	struct witness_list *list;
1834 
1835 	MPASS(w->w_refcount == 0);
1836 	if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1837 		list = &w_sleep;
1838 		w_sleep_cnt--;
1839 	} else {
1840 		list = &w_spin;
1841 		w_spin_cnt--;
1842 	}
1843 	/*
1844 	 * Set file to NULL as it may point into a loadable module.
1845 	 */
1846 	w->w_file = NULL;
1847 	w->w_line = 0;
1848 	witness_increment_graph_generation();
1849 }
1850 
1851 
1852 static void
1853 adopt(struct witness *parent, struct witness *child)
1854 {
1855 	int pi, ci, i, j;
1856 
1857 	if (witness_cold == 0)
1858 		mtx_assert(&w_mtx, MA_OWNED);
1859 
1860 	/* If the relationship is already known, there's no work to be done. */
1861 	if (isitmychild(parent, child))
1862 		return;
1863 
1864 	/* When the structure of the graph changes, bump up the generation. */
1865 	witness_increment_graph_generation();
1866 
1867 	/*
1868 	 * The hard part ... create the direct relationship, then propagate all
1869 	 * indirect relationships.
1870 	 */
1871 	pi = parent->w_index;
1872 	ci = child->w_index;
1873 	WITNESS_INDEX_ASSERT(pi);
1874 	WITNESS_INDEX_ASSERT(ci);
1875 	MPASS(pi != ci);
1876 	w_rmatrix[pi][ci] |= WITNESS_PARENT;
1877 	w_rmatrix[ci][pi] |= WITNESS_CHILD;
1878 
1879 	/*
1880 	 * If parent was not already an ancestor of child,
1881 	 * then we increment the descendant and ancestor counters.
1882 	 */
1883 	if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) {
1884 		parent->w_num_descendants++;
1885 		child->w_num_ancestors++;
1886 	}
1887 
1888 	/*
1889 	 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
1890 	 * an ancestor of 'pi' during this loop.
1891 	 */
1892 	for (i = 1; i <= w_max_used_index; i++) {
1893 		if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
1894 		    (i != pi))
1895 			continue;
1896 
1897 		/* Find each descendant of 'i' and mark it as a descendant. */
1898 		for (j = 1; j <= w_max_used_index; j++) {
1899 
1900 			/*
1901 			 * Skip children that are already marked as
1902 			 * descendants of 'i'.
1903 			 */
1904 			if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK)
1905 				continue;
1906 
1907 			/*
1908 			 * We are only interested in descendants of 'ci'. Note
1909 			 * that 'ci' itself is counted as a descendant of 'ci'.
1910 			 */
1911 			if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
1912 			    (j != ci))
1913 				continue;
1914 			w_rmatrix[i][j] |= WITNESS_ANCESTOR;
1915 			w_rmatrix[j][i] |= WITNESS_DESCENDANT;
1916 			w_data[i].w_num_descendants++;
1917 			w_data[j].w_num_ancestors++;
1918 
1919 			/*
1920 			 * Make sure we aren't marking a node as both an
1921 			 * ancestor and descendant. We should have caught
1922 			 * this as a lock order reversal earlier.
1923 			 */
1924 			if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
1925 			    (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
1926 				printf("witness rmatrix paradox! [%d][%d]=%d "
1927 				    "both ancestor and descendant\n",
1928 				    i, j, w_rmatrix[i][j]);
1929 				kdb_backtrace();
1930 				printf("Witness disabled.\n");
1931 				witness_watch = -1;
1932 			}
1933 			if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) &&
1934 			    (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
1935 				printf("witness rmatrix paradox! [%d][%d]=%d "
1936 				    "both ancestor and descendant\n",
1937 				    j, i, w_rmatrix[j][i]);
1938 				kdb_backtrace();
1939 				printf("Witness disabled.\n");
1940 				witness_watch = -1;
1941 			}
1942 		}
1943 	}
1944 }
1945 
1946 static void
1947 itismychild(struct witness *parent, struct witness *child)
1948 {
1949 	int unlocked;
1950 
1951 	MPASS(child != NULL && parent != NULL);
1952 	if (witness_cold == 0)
1953 		mtx_assert(&w_mtx, MA_OWNED);
1954 
1955 	if (!witness_lock_type_equal(parent, child)) {
1956 		if (witness_cold == 0) {
1957 			unlocked = 1;
1958 			mtx_unlock_spin(&w_mtx);
1959 		} else {
1960 			unlocked = 0;
1961 		}
1962 		kassert_panic(
1963 		    "%s: parent \"%s\" (%s) and child \"%s\" (%s) are not "
1964 		    "the same lock type", __func__, parent->w_name,
1965 		    parent->w_class->lc_name, child->w_name,
1966 		    child->w_class->lc_name);
1967 		if (unlocked)
1968 			mtx_lock_spin(&w_mtx);
1969 	}
1970 	adopt(parent, child);
1971 }
1972 
1973 /*
1974  * Generic code for the isitmy*() functions. The rmask parameter is the
1975  * expected relationship of w1 to w2.
1976  */
1977 static int
1978 _isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
1979 {
1980 	unsigned char r1, r2;
1981 	int i1, i2;
1982 
1983 	i1 = w1->w_index;
1984 	i2 = w2->w_index;
1985 	WITNESS_INDEX_ASSERT(i1);
1986 	WITNESS_INDEX_ASSERT(i2);
1987 	r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK;
1988 	r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK;
1989 
1990 	/* The flags on one better be the inverse of the flags on the other */
1991 	if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) ||
1992 		(WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) {
1993 		printf("%s: rmatrix mismatch between %s (index %d) and %s "
1994 		    "(index %d): w_rmatrix[%d][%d] == %hhx but "
1995 		    "w_rmatrix[%d][%d] == %hhx\n",
1996 		    fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1,
1997 		    i2, i1, r2);
1998 		kdb_backtrace();
1999 		printf("Witness disabled.\n");
2000 		witness_watch = -1;
2001 	}
2002 	return (r1 & rmask);
2003 }
2004 
2005 /*
2006  * Checks if @child is a direct child of @parent.
2007  */
2008 static int
2009 isitmychild(struct witness *parent, struct witness *child)
2010 {
2011 
2012 	return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
2013 }
2014 
2015 /*
2016  * Checks if @descendant is a direct or inderect descendant of @ancestor.
2017  */
2018 static int
2019 isitmydescendant(struct witness *ancestor, struct witness *descendant)
2020 {
2021 
2022 	return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
2023 	    __func__));
2024 }
2025 
2026 #ifdef BLESSING
2027 static int
2028 blessed(struct witness *w1, struct witness *w2)
2029 {
2030 	int i;
2031 	struct witness_blessed *b;
2032 
2033 	for (i = 0; i < blessed_count; i++) {
2034 		b = &blessed_list[i];
2035 		if (strcmp(w1->w_name, b->b_lock1) == 0) {
2036 			if (strcmp(w2->w_name, b->b_lock2) == 0)
2037 				return (1);
2038 			continue;
2039 		}
2040 		if (strcmp(w1->w_name, b->b_lock2) == 0)
2041 			if (strcmp(w2->w_name, b->b_lock1) == 0)
2042 				return (1);
2043 	}
2044 	return (0);
2045 }
2046 #endif
2047 
2048 static struct witness *
2049 witness_get(void)
2050 {
2051 	struct witness *w;
2052 	int index;
2053 
2054 	if (witness_cold == 0)
2055 		mtx_assert(&w_mtx, MA_OWNED);
2056 
2057 	if (witness_watch == -1) {
2058 		mtx_unlock_spin(&w_mtx);
2059 		return (NULL);
2060 	}
2061 	if (STAILQ_EMPTY(&w_free)) {
2062 		witness_watch = -1;
2063 		mtx_unlock_spin(&w_mtx);
2064 		printf("WITNESS: unable to allocate a new witness object\n");
2065 		return (NULL);
2066 	}
2067 	w = STAILQ_FIRST(&w_free);
2068 	STAILQ_REMOVE_HEAD(&w_free, w_list);
2069 	w_free_cnt--;
2070 	index = w->w_index;
2071 	MPASS(index > 0 && index == w_max_used_index+1 &&
2072 	    index < WITNESS_COUNT);
2073 	bzero(w, sizeof(*w));
2074 	w->w_index = index;
2075 	if (index > w_max_used_index)
2076 		w_max_used_index = index;
2077 	return (w);
2078 }
2079 
2080 static void
2081 witness_free(struct witness *w)
2082 {
2083 
2084 	STAILQ_INSERT_HEAD(&w_free, w, w_list);
2085 	w_free_cnt++;
2086 }
2087 
2088 static struct lock_list_entry *
2089 witness_lock_list_get(void)
2090 {
2091 	struct lock_list_entry *lle;
2092 
2093 	if (witness_watch == -1)
2094 		return (NULL);
2095 	mtx_lock_spin(&w_mtx);
2096 	lle = w_lock_list_free;
2097 	if (lle == NULL) {
2098 		witness_watch = -1;
2099 		mtx_unlock_spin(&w_mtx);
2100 		printf("%s: witness exhausted\n", __func__);
2101 		return (NULL);
2102 	}
2103 	w_lock_list_free = lle->ll_next;
2104 	mtx_unlock_spin(&w_mtx);
2105 	bzero(lle, sizeof(*lle));
2106 	return (lle);
2107 }
2108 
2109 static void
2110 witness_lock_list_free(struct lock_list_entry *lle)
2111 {
2112 
2113 	mtx_lock_spin(&w_mtx);
2114 	lle->ll_next = w_lock_list_free;
2115 	w_lock_list_free = lle;
2116 	mtx_unlock_spin(&w_mtx);
2117 }
2118 
2119 static struct lock_instance *
2120 find_instance(struct lock_list_entry *list, const struct lock_object *lock)
2121 {
2122 	struct lock_list_entry *lle;
2123 	struct lock_instance *instance;
2124 	int i;
2125 
2126 	for (lle = list; lle != NULL; lle = lle->ll_next)
2127 		for (i = lle->ll_count - 1; i >= 0; i--) {
2128 			instance = &lle->ll_children[i];
2129 			if (instance->li_lock == lock)
2130 				return (instance);
2131 		}
2132 	return (NULL);
2133 }
2134 
2135 static void
2136 witness_list_lock(struct lock_instance *instance,
2137     int (*prnt)(const char *fmt, ...))
2138 {
2139 	struct lock_object *lock;
2140 
2141 	lock = instance->li_lock;
2142 	prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2143 	    "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2144 	if (lock->lo_witness->w_name != lock->lo_name)
2145 		prnt(" (%s)", lock->lo_witness->w_name);
2146 	prnt(" r = %d (%p) locked @ %s:%d\n",
2147 	    instance->li_flags & LI_RECURSEMASK, lock,
2148 	    fixup_filename(instance->li_file), instance->li_line);
2149 }
2150 
2151 #ifdef DDB
2152 static int
2153 witness_thread_has_locks(struct thread *td)
2154 {
2155 
2156 	if (td->td_sleeplocks == NULL)
2157 		return (0);
2158 	return (td->td_sleeplocks->ll_count != 0);
2159 }
2160 
2161 static int
2162 witness_proc_has_locks(struct proc *p)
2163 {
2164 	struct thread *td;
2165 
2166 	FOREACH_THREAD_IN_PROC(p, td) {
2167 		if (witness_thread_has_locks(td))
2168 			return (1);
2169 	}
2170 	return (0);
2171 }
2172 #endif
2173 
2174 int
2175 witness_list_locks(struct lock_list_entry **lock_list,
2176     int (*prnt)(const char *fmt, ...))
2177 {
2178 	struct lock_list_entry *lle;
2179 	int i, nheld;
2180 
2181 	nheld = 0;
2182 	for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2183 		for (i = lle->ll_count - 1; i >= 0; i--) {
2184 			witness_list_lock(&lle->ll_children[i], prnt);
2185 			nheld++;
2186 		}
2187 	return (nheld);
2188 }
2189 
2190 /*
2191  * This is a bit risky at best.  We call this function when we have timed
2192  * out acquiring a spin lock, and we assume that the other CPU is stuck
2193  * with this lock held.  So, we go groveling around in the other CPU's
2194  * per-cpu data to try to find the lock instance for this spin lock to
2195  * see when it was last acquired.
2196  */
2197 void
2198 witness_display_spinlock(struct lock_object *lock, struct thread *owner,
2199     int (*prnt)(const char *fmt, ...))
2200 {
2201 	struct lock_instance *instance;
2202 	struct pcpu *pc;
2203 
2204 	if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2205 		return;
2206 	pc = pcpu_find(owner->td_oncpu);
2207 	instance = find_instance(pc->pc_spinlocks, lock);
2208 	if (instance != NULL)
2209 		witness_list_lock(instance, prnt);
2210 }
2211 
2212 void
2213 witness_save(struct lock_object *lock, const char **filep, int *linep)
2214 {
2215 	struct lock_list_entry *lock_list;
2216 	struct lock_instance *instance;
2217 	struct lock_class *class;
2218 
2219 	/*
2220 	 * This function is used independently in locking code to deal with
2221 	 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2222 	 * is gone.
2223 	 */
2224 	if (SCHEDULER_STOPPED())
2225 		return;
2226 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2227 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2228 		return;
2229 	class = LOCK_CLASS(lock);
2230 	if (class->lc_flags & LC_SLEEPLOCK)
2231 		lock_list = curthread->td_sleeplocks;
2232 	else {
2233 		if (witness_skipspin)
2234 			return;
2235 		lock_list = PCPU_GET(spinlocks);
2236 	}
2237 	instance = find_instance(lock_list, lock);
2238 	if (instance == NULL) {
2239 		kassert_panic("%s: lock (%s) %s not locked", __func__,
2240 		    class->lc_name, lock->lo_name);
2241 		return;
2242 	}
2243 	*filep = instance->li_file;
2244 	*linep = instance->li_line;
2245 }
2246 
2247 void
2248 witness_restore(struct lock_object *lock, const char *file, int line)
2249 {
2250 	struct lock_list_entry *lock_list;
2251 	struct lock_instance *instance;
2252 	struct lock_class *class;
2253 
2254 	/*
2255 	 * This function is used independently in locking code to deal with
2256 	 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2257 	 * is gone.
2258 	 */
2259 	if (SCHEDULER_STOPPED())
2260 		return;
2261 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2262 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2263 		return;
2264 	class = LOCK_CLASS(lock);
2265 	if (class->lc_flags & LC_SLEEPLOCK)
2266 		lock_list = curthread->td_sleeplocks;
2267 	else {
2268 		if (witness_skipspin)
2269 			return;
2270 		lock_list = PCPU_GET(spinlocks);
2271 	}
2272 	instance = find_instance(lock_list, lock);
2273 	if (instance == NULL)
2274 		kassert_panic("%s: lock (%s) %s not locked", __func__,
2275 		    class->lc_name, lock->lo_name);
2276 	lock->lo_witness->w_file = file;
2277 	lock->lo_witness->w_line = line;
2278 	if (instance == NULL)
2279 		return;
2280 	instance->li_file = file;
2281 	instance->li_line = line;
2282 }
2283 
2284 void
2285 witness_assert(const struct lock_object *lock, int flags, const char *file,
2286     int line)
2287 {
2288 #ifdef INVARIANT_SUPPORT
2289 	struct lock_instance *instance;
2290 	struct lock_class *class;
2291 
2292 	if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL)
2293 		return;
2294 	class = LOCK_CLASS(lock);
2295 	if ((class->lc_flags & LC_SLEEPLOCK) != 0)
2296 		instance = find_instance(curthread->td_sleeplocks, lock);
2297 	else if ((class->lc_flags & LC_SPINLOCK) != 0)
2298 		instance = find_instance(PCPU_GET(spinlocks), lock);
2299 	else {
2300 		kassert_panic("Lock (%s) %s is not sleep or spin!",
2301 		    class->lc_name, lock->lo_name);
2302 		return;
2303 	}
2304 	switch (flags) {
2305 	case LA_UNLOCKED:
2306 		if (instance != NULL)
2307 			kassert_panic("Lock (%s) %s locked @ %s:%d.",
2308 			    class->lc_name, lock->lo_name,
2309 			    fixup_filename(file), line);
2310 		break;
2311 	case LA_LOCKED:
2312 	case LA_LOCKED | LA_RECURSED:
2313 	case LA_LOCKED | LA_NOTRECURSED:
2314 	case LA_SLOCKED:
2315 	case LA_SLOCKED | LA_RECURSED:
2316 	case LA_SLOCKED | LA_NOTRECURSED:
2317 	case LA_XLOCKED:
2318 	case LA_XLOCKED | LA_RECURSED:
2319 	case LA_XLOCKED | LA_NOTRECURSED:
2320 		if (instance == NULL) {
2321 			kassert_panic("Lock (%s) %s not locked @ %s:%d.",
2322 			    class->lc_name, lock->lo_name,
2323 			    fixup_filename(file), line);
2324 			break;
2325 		}
2326 		if ((flags & LA_XLOCKED) != 0 &&
2327 		    (instance->li_flags & LI_EXCLUSIVE) == 0)
2328 			kassert_panic(
2329 			    "Lock (%s) %s not exclusively locked @ %s:%d.",
2330 			    class->lc_name, lock->lo_name,
2331 			    fixup_filename(file), line);
2332 		if ((flags & LA_SLOCKED) != 0 &&
2333 		    (instance->li_flags & LI_EXCLUSIVE) != 0)
2334 			kassert_panic(
2335 			    "Lock (%s) %s exclusively locked @ %s:%d.",
2336 			    class->lc_name, lock->lo_name,
2337 			    fixup_filename(file), line);
2338 		if ((flags & LA_RECURSED) != 0 &&
2339 		    (instance->li_flags & LI_RECURSEMASK) == 0)
2340 			kassert_panic("Lock (%s) %s not recursed @ %s:%d.",
2341 			    class->lc_name, lock->lo_name,
2342 			    fixup_filename(file), line);
2343 		if ((flags & LA_NOTRECURSED) != 0 &&
2344 		    (instance->li_flags & LI_RECURSEMASK) != 0)
2345 			kassert_panic("Lock (%s) %s recursed @ %s:%d.",
2346 			    class->lc_name, lock->lo_name,
2347 			    fixup_filename(file), line);
2348 		break;
2349 	default:
2350 		kassert_panic("Invalid lock assertion at %s:%d.",
2351 		    fixup_filename(file), line);
2352 
2353 	}
2354 #endif	/* INVARIANT_SUPPORT */
2355 }
2356 
2357 static void
2358 witness_setflag(struct lock_object *lock, int flag, int set)
2359 {
2360 	struct lock_list_entry *lock_list;
2361 	struct lock_instance *instance;
2362 	struct lock_class *class;
2363 
2364 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2365 		return;
2366 	class = LOCK_CLASS(lock);
2367 	if (class->lc_flags & LC_SLEEPLOCK)
2368 		lock_list = curthread->td_sleeplocks;
2369 	else {
2370 		if (witness_skipspin)
2371 			return;
2372 		lock_list = PCPU_GET(spinlocks);
2373 	}
2374 	instance = find_instance(lock_list, lock);
2375 	if (instance == NULL) {
2376 		kassert_panic("%s: lock (%s) %s not locked", __func__,
2377 		    class->lc_name, lock->lo_name);
2378 		return;
2379 	}
2380 
2381 	if (set)
2382 		instance->li_flags |= flag;
2383 	else
2384 		instance->li_flags &= ~flag;
2385 }
2386 
2387 void
2388 witness_norelease(struct lock_object *lock)
2389 {
2390 
2391 	witness_setflag(lock, LI_NORELEASE, 1);
2392 }
2393 
2394 void
2395 witness_releaseok(struct lock_object *lock)
2396 {
2397 
2398 	witness_setflag(lock, LI_NORELEASE, 0);
2399 }
2400 
2401 #ifdef DDB
2402 static void
2403 witness_ddb_list(struct thread *td)
2404 {
2405 
2406 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2407 	KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2408 
2409 	if (witness_watch < 1)
2410 		return;
2411 
2412 	witness_list_locks(&td->td_sleeplocks, db_printf);
2413 
2414 	/*
2415 	 * We only handle spinlocks if td == curthread.  This is somewhat broken
2416 	 * if td is currently executing on some other CPU and holds spin locks
2417 	 * as we won't display those locks.  If we had a MI way of getting
2418 	 * the per-cpu data for a given cpu then we could use
2419 	 * td->td_oncpu to get the list of spinlocks for this thread
2420 	 * and "fix" this.
2421 	 *
2422 	 * That still wouldn't really fix this unless we locked the scheduler
2423 	 * lock or stopped the other CPU to make sure it wasn't changing the
2424 	 * list out from under us.  It is probably best to just not try to
2425 	 * handle threads on other CPU's for now.
2426 	 */
2427 	if (td == curthread && PCPU_GET(spinlocks) != NULL)
2428 		witness_list_locks(PCPU_PTR(spinlocks), db_printf);
2429 }
2430 
2431 DB_SHOW_COMMAND(locks, db_witness_list)
2432 {
2433 	struct thread *td;
2434 
2435 	if (have_addr)
2436 		td = db_lookup_thread(addr, TRUE);
2437 	else
2438 		td = kdb_thread;
2439 	witness_ddb_list(td);
2440 }
2441 
2442 DB_SHOW_ALL_COMMAND(locks, db_witness_list_all)
2443 {
2444 	struct thread *td;
2445 	struct proc *p;
2446 
2447 	/*
2448 	 * It would be nice to list only threads and processes that actually
2449 	 * held sleep locks, but that information is currently not exported
2450 	 * by WITNESS.
2451 	 */
2452 	FOREACH_PROC_IN_SYSTEM(p) {
2453 		if (!witness_proc_has_locks(p))
2454 			continue;
2455 		FOREACH_THREAD_IN_PROC(p, td) {
2456 			if (!witness_thread_has_locks(td))
2457 				continue;
2458 			db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2459 			    p->p_comm, td, td->td_tid);
2460 			witness_ddb_list(td);
2461 			if (db_pager_quit)
2462 				return;
2463 		}
2464 	}
2465 }
2466 DB_SHOW_ALIAS(alllocks, db_witness_list_all)
2467 
2468 DB_SHOW_COMMAND(witness, db_witness_display)
2469 {
2470 
2471 	witness_ddb_display(db_printf);
2472 }
2473 #endif
2474 
2475 static int
2476 sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
2477 {
2478 	struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
2479 	struct witness *tmp_w1, *tmp_w2, *w1, *w2;
2480 	struct sbuf *sb;
2481 	u_int w_rmatrix1, w_rmatrix2;
2482 	int error, generation, i, j;
2483 
2484 	tmp_data1 = NULL;
2485 	tmp_data2 = NULL;
2486 	tmp_w1 = NULL;
2487 	tmp_w2 = NULL;
2488 	if (witness_watch < 1) {
2489 		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2490 		return (error);
2491 	}
2492 	if (witness_cold) {
2493 		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2494 		return (error);
2495 	}
2496 	error = 0;
2497 	sb = sbuf_new(NULL, NULL, BADSTACK_SBUF_SIZE, SBUF_AUTOEXTEND);
2498 	if (sb == NULL)
2499 		return (ENOMEM);
2500 
2501 	/* Allocate and init temporary storage space. */
2502 	tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2503 	tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2504 	tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2505 	    M_WAITOK | M_ZERO);
2506 	tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2507 	    M_WAITOK | M_ZERO);
2508 	stack_zero(&tmp_data1->wlod_stack);
2509 	stack_zero(&tmp_data2->wlod_stack);
2510 
2511 restart:
2512 	mtx_lock_spin(&w_mtx);
2513 	generation = w_generation;
2514 	mtx_unlock_spin(&w_mtx);
2515 	sbuf_printf(sb, "Number of known direct relationships is %d\n",
2516 	    w_lohash.wloh_count);
2517 	for (i = 1; i < w_max_used_index; i++) {
2518 		mtx_lock_spin(&w_mtx);
2519 		if (generation != w_generation) {
2520 			mtx_unlock_spin(&w_mtx);
2521 
2522 			/* The graph has changed, try again. */
2523 			req->oldidx = 0;
2524 			sbuf_clear(sb);
2525 			goto restart;
2526 		}
2527 
2528 		w1 = &w_data[i];
2529 		if (w1->w_reversed == 0) {
2530 			mtx_unlock_spin(&w_mtx);
2531 			continue;
2532 		}
2533 
2534 		/* Copy w1 locally so we can release the spin lock. */
2535 		*tmp_w1 = *w1;
2536 		mtx_unlock_spin(&w_mtx);
2537 
2538 		if (tmp_w1->w_reversed == 0)
2539 			continue;
2540 		for (j = 1; j < w_max_used_index; j++) {
2541 			if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j)
2542 				continue;
2543 
2544 			mtx_lock_spin(&w_mtx);
2545 			if (generation != w_generation) {
2546 				mtx_unlock_spin(&w_mtx);
2547 
2548 				/* The graph has changed, try again. */
2549 				req->oldidx = 0;
2550 				sbuf_clear(sb);
2551 				goto restart;
2552 			}
2553 
2554 			w2 = &w_data[j];
2555 			data1 = witness_lock_order_get(w1, w2);
2556 			data2 = witness_lock_order_get(w2, w1);
2557 
2558 			/*
2559 			 * Copy information locally so we can release the
2560 			 * spin lock.
2561 			 */
2562 			*tmp_w2 = *w2;
2563 			w_rmatrix1 = (unsigned int)w_rmatrix[i][j];
2564 			w_rmatrix2 = (unsigned int)w_rmatrix[j][i];
2565 
2566 			if (data1) {
2567 				stack_zero(&tmp_data1->wlod_stack);
2568 				stack_copy(&data1->wlod_stack,
2569 				    &tmp_data1->wlod_stack);
2570 			}
2571 			if (data2 && data2 != data1) {
2572 				stack_zero(&tmp_data2->wlod_stack);
2573 				stack_copy(&data2->wlod_stack,
2574 				    &tmp_data2->wlod_stack);
2575 			}
2576 			mtx_unlock_spin(&w_mtx);
2577 
2578 			sbuf_printf(sb,
2579 	    "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
2580 			    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2581 			    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2582 #if 0
2583  			sbuf_printf(sb,
2584 			"w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n",
2585  			    tmp_w1->name, tmp_w2->w_name, w_rmatrix1,
2586  			    tmp_w2->name, tmp_w1->w_name, w_rmatrix2);
2587 #endif
2588 			if (data1) {
2589 				sbuf_printf(sb,
2590 			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2591 				    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2592 				    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2593 				stack_sbuf_print(sb, &tmp_data1->wlod_stack);
2594 				sbuf_printf(sb, "\n");
2595 			}
2596 			if (data2 && data2 != data1) {
2597 				sbuf_printf(sb,
2598 			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2599 				    tmp_w2->w_name, tmp_w2->w_class->lc_name,
2600 				    tmp_w1->w_name, tmp_w1->w_class->lc_name);
2601 				stack_sbuf_print(sb, &tmp_data2->wlod_stack);
2602 				sbuf_printf(sb, "\n");
2603 			}
2604 		}
2605 	}
2606 	mtx_lock_spin(&w_mtx);
2607 	if (generation != w_generation) {
2608 		mtx_unlock_spin(&w_mtx);
2609 
2610 		/*
2611 		 * The graph changed while we were printing stack data,
2612 		 * try again.
2613 		 */
2614 		req->oldidx = 0;
2615 		sbuf_clear(sb);
2616 		goto restart;
2617 	}
2618 	mtx_unlock_spin(&w_mtx);
2619 
2620 	/* Free temporary storage space. */
2621 	free(tmp_data1, M_TEMP);
2622 	free(tmp_data2, M_TEMP);
2623 	free(tmp_w1, M_TEMP);
2624 	free(tmp_w2, M_TEMP);
2625 
2626 	sbuf_finish(sb);
2627 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2628 	sbuf_delete(sb);
2629 
2630 	return (error);
2631 }
2632 
2633 static int
2634 sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
2635 {
2636 	struct witness *w;
2637 	struct sbuf *sb;
2638 	int error;
2639 
2640 	if (witness_watch < 1) {
2641 		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2642 		return (error);
2643 	}
2644 	if (witness_cold) {
2645 		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2646 		return (error);
2647 	}
2648 	error = 0;
2649 
2650 	error = sysctl_wire_old_buffer(req, 0);
2651 	if (error != 0)
2652 		return (error);
2653 	sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req);
2654 	if (sb == NULL)
2655 		return (ENOMEM);
2656 	sbuf_printf(sb, "\n");
2657 
2658 	mtx_lock_spin(&w_mtx);
2659 	STAILQ_FOREACH(w, &w_all, w_list)
2660 		w->w_displayed = 0;
2661 	STAILQ_FOREACH(w, &w_all, w_list)
2662 		witness_add_fullgraph(sb, w);
2663 	mtx_unlock_spin(&w_mtx);
2664 
2665 	/*
2666 	 * Close the sbuf and return to userland.
2667 	 */
2668 	error = sbuf_finish(sb);
2669 	sbuf_delete(sb);
2670 
2671 	return (error);
2672 }
2673 
2674 static int
2675 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
2676 {
2677 	int error, value;
2678 
2679 	value = witness_watch;
2680 	error = sysctl_handle_int(oidp, &value, 0, req);
2681 	if (error != 0 || req->newptr == NULL)
2682 		return (error);
2683 	if (value > 1 || value < -1 ||
2684 	    (witness_watch == -1 && value != witness_watch))
2685 		return (EINVAL);
2686 	witness_watch = value;
2687 	return (0);
2688 }
2689 
2690 static void
2691 witness_add_fullgraph(struct sbuf *sb, struct witness *w)
2692 {
2693 	int i;
2694 
2695 	if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0))
2696 		return;
2697 	w->w_displayed = 1;
2698 
2699 	WITNESS_INDEX_ASSERT(w->w_index);
2700 	for (i = 1; i <= w_max_used_index; i++) {
2701 		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) {
2702 			sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name,
2703 			    w_data[i].w_name);
2704 			witness_add_fullgraph(sb, &w_data[i]);
2705 		}
2706 	}
2707 }
2708 
2709 /*
2710  * A simple hash function. Takes a key pointer and a key size. If size == 0,
2711  * interprets the key as a string and reads until the null
2712  * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit
2713  * hash value computed from the key.
2714  */
2715 static uint32_t
2716 witness_hash_djb2(const uint8_t *key, uint32_t size)
2717 {
2718 	unsigned int hash = 5381;
2719 	int i;
2720 
2721 	/* hash = hash * 33 + key[i] */
2722 	if (size)
2723 		for (i = 0; i < size; i++)
2724 			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2725 	else
2726 		for (i = 0; key[i] != 0; i++)
2727 			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2728 
2729 	return (hash);
2730 }
2731 
2732 
2733 /*
2734  * Initializes the two witness hash tables. Called exactly once from
2735  * witness_initialize().
2736  */
2737 static void
2738 witness_init_hash_tables(void)
2739 {
2740 	int i;
2741 
2742 	MPASS(witness_cold);
2743 
2744 	/* Initialize the hash tables. */
2745 	for (i = 0; i < WITNESS_HASH_SIZE; i++)
2746 		w_hash.wh_array[i] = NULL;
2747 
2748 	w_hash.wh_size = WITNESS_HASH_SIZE;
2749 	w_hash.wh_count = 0;
2750 
2751 	/* Initialize the lock order data hash. */
2752 	w_lofree = NULL;
2753 	for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
2754 		memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
2755 		w_lodata[i].wlod_next = w_lofree;
2756 		w_lofree = &w_lodata[i];
2757 	}
2758 	w_lohash.wloh_size = WITNESS_LO_HASH_SIZE;
2759 	w_lohash.wloh_count = 0;
2760 	for (i = 0; i < WITNESS_LO_HASH_SIZE; i++)
2761 		w_lohash.wloh_array[i] = NULL;
2762 }
2763 
2764 static struct witness *
2765 witness_hash_get(const char *key)
2766 {
2767 	struct witness *w;
2768 	uint32_t hash;
2769 
2770 	MPASS(key != NULL);
2771 	if (witness_cold == 0)
2772 		mtx_assert(&w_mtx, MA_OWNED);
2773 	hash = witness_hash_djb2(key, 0) % w_hash.wh_size;
2774 	w = w_hash.wh_array[hash];
2775 	while (w != NULL) {
2776 		if (strcmp(w->w_name, key) == 0)
2777 			goto out;
2778 		w = w->w_hash_next;
2779 	}
2780 
2781 out:
2782 	return (w);
2783 }
2784 
2785 static void
2786 witness_hash_put(struct witness *w)
2787 {
2788 	uint32_t hash;
2789 
2790 	MPASS(w != NULL);
2791 	MPASS(w->w_name != NULL);
2792 	if (witness_cold == 0)
2793 		mtx_assert(&w_mtx, MA_OWNED);
2794 	KASSERT(witness_hash_get(w->w_name) == NULL,
2795 	    ("%s: trying to add a hash entry that already exists!", __func__));
2796 	KASSERT(w->w_hash_next == NULL,
2797 	    ("%s: w->w_hash_next != NULL", __func__));
2798 
2799 	hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size;
2800 	w->w_hash_next = w_hash.wh_array[hash];
2801 	w_hash.wh_array[hash] = w;
2802 	w_hash.wh_count++;
2803 }
2804 
2805 
2806 static struct witness_lock_order_data *
2807 witness_lock_order_get(struct witness *parent, struct witness *child)
2808 {
2809 	struct witness_lock_order_data *data = NULL;
2810 	struct witness_lock_order_key key;
2811 	unsigned int hash;
2812 
2813 	MPASS(parent != NULL && child != NULL);
2814 	key.from = parent->w_index;
2815 	key.to = child->w_index;
2816 	WITNESS_INDEX_ASSERT(key.from);
2817 	WITNESS_INDEX_ASSERT(key.to);
2818 	if ((w_rmatrix[parent->w_index][child->w_index]
2819 	    & WITNESS_LOCK_ORDER_KNOWN) == 0)
2820 		goto out;
2821 
2822 	hash = witness_hash_djb2((const char*)&key,
2823 	    sizeof(key)) % w_lohash.wloh_size;
2824 	data = w_lohash.wloh_array[hash];
2825 	while (data != NULL) {
2826 		if (witness_lock_order_key_equal(&data->wlod_key, &key))
2827 			break;
2828 		data = data->wlod_next;
2829 	}
2830 
2831 out:
2832 	return (data);
2833 }
2834 
2835 /*
2836  * Verify that parent and child have a known relationship, are not the same,
2837  * and child is actually a child of parent.  This is done without w_mtx
2838  * to avoid contention in the common case.
2839  */
2840 static int
2841 witness_lock_order_check(struct witness *parent, struct witness *child)
2842 {
2843 
2844 	if (parent != child &&
2845 	    w_rmatrix[parent->w_index][child->w_index]
2846 	    & WITNESS_LOCK_ORDER_KNOWN &&
2847 	    isitmychild(parent, child))
2848 		return (1);
2849 
2850 	return (0);
2851 }
2852 
2853 static int
2854 witness_lock_order_add(struct witness *parent, struct witness *child)
2855 {
2856 	struct witness_lock_order_data *data = NULL;
2857 	struct witness_lock_order_key key;
2858 	unsigned int hash;
2859 
2860 	MPASS(parent != NULL && child != NULL);
2861 	key.from = parent->w_index;
2862 	key.to = child->w_index;
2863 	WITNESS_INDEX_ASSERT(key.from);
2864 	WITNESS_INDEX_ASSERT(key.to);
2865 	if (w_rmatrix[parent->w_index][child->w_index]
2866 	    & WITNESS_LOCK_ORDER_KNOWN)
2867 		return (1);
2868 
2869 	hash = witness_hash_djb2((const char*)&key,
2870 	    sizeof(key)) % w_lohash.wloh_size;
2871 	w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
2872 	data = w_lofree;
2873 	if (data == NULL)
2874 		return (0);
2875 	w_lofree = data->wlod_next;
2876 	data->wlod_next = w_lohash.wloh_array[hash];
2877 	data->wlod_key = key;
2878 	w_lohash.wloh_array[hash] = data;
2879 	w_lohash.wloh_count++;
2880 	stack_zero(&data->wlod_stack);
2881 	stack_save(&data->wlod_stack);
2882 	return (1);
2883 }
2884 
2885 /* Call this whenver the structure of the witness graph changes. */
2886 static void
2887 witness_increment_graph_generation(void)
2888 {
2889 
2890 	if (witness_cold == 0)
2891 		mtx_assert(&w_mtx, MA_OWNED);
2892 	w_generation++;
2893 }
2894 
2895 #ifdef KDB
2896 static void
2897 _witness_debugger(int cond, const char *msg)
2898 {
2899 
2900 	if (witness_trace && cond)
2901 		kdb_backtrace();
2902 	if (witness_kdb && cond)
2903 		kdb_enter(KDB_WHY_WITNESS, msg);
2904 }
2905 #endif
2906