xref: /freebsd/sys/kern/subr_witness.c (revision 54ebdd631db8c0bba2baab0155f603a8b5cf014a)
1 /*-
2  * Copyright (c) 2008 Isilon Systems, Inc.
3  * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
4  * Copyright (c) 1998 Berkeley Software Design, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Berkeley Software Design Inc's name may not be used to endorse or
16  *    promote products derived from this software without specific prior
17  *    written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
32  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33  */
34 
35 /*
36  * Implementation of the `witness' lock verifier.  Originally implemented for
37  * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
38  * classes in FreeBSD.
39  */
40 
41 /*
42  *	Main Entry: witness
43  *	Pronunciation: 'wit-n&s
44  *	Function: noun
45  *	Etymology: Middle English witnesse, from Old English witnes knowledge,
46  *	    testimony, witness, from 2wit
47  *	Date: before 12th century
48  *	1 : attestation of a fact or event : TESTIMONY
49  *	2 : one that gives evidence; specifically : one who testifies in
50  *	    a cause or before a judicial tribunal
51  *	3 : one asked to be present at a transaction so as to be able to
52  *	    testify to its having taken place
53  *	4 : one who has personal knowledge of something
54  *	5 a : something serving as evidence or proof : SIGN
55  *	  b : public affirmation by word or example of usually
56  *	      religious faith or conviction <the heroic witness to divine
57  *	      life -- Pilot>
58  *	6 capitalized : a member of the Jehovah's Witnesses
59  */
60 
61 /*
62  * Special rules concerning Giant and lock orders:
63  *
64  * 1) Giant must be acquired before any other mutexes.  Stated another way,
65  *    no other mutex may be held when Giant is acquired.
66  *
67  * 2) Giant must be released when blocking on a sleepable lock.
68  *
69  * This rule is less obvious, but is a result of Giant providing the same
70  * semantics as spl().  Basically, when a thread sleeps, it must release
71  * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
72  * 2).
73  *
74  * 3) Giant may be acquired before or after sleepable locks.
75  *
76  * This rule is also not quite as obvious.  Giant may be acquired after
77  * a sleepable lock because it is a non-sleepable lock and non-sleepable
78  * locks may always be acquired while holding a sleepable lock.  The second
79  * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
80  * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
81  * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
82  * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
83  * execute.  Thus, acquiring Giant both before and after a sleepable lock
84  * will not result in a lock order reversal.
85  */
86 
87 #include <sys/cdefs.h>
88 __FBSDID("$FreeBSD$");
89 
90 #include "opt_ddb.h"
91 #include "opt_hwpmc_hooks.h"
92 #include "opt_stack.h"
93 #include "opt_witness.h"
94 
95 #include <sys/param.h>
96 #include <sys/bus.h>
97 #include <sys/kdb.h>
98 #include <sys/kernel.h>
99 #include <sys/ktr.h>
100 #include <sys/lock.h>
101 #include <sys/malloc.h>
102 #include <sys/mutex.h>
103 #include <sys/priv.h>
104 #include <sys/proc.h>
105 #include <sys/sbuf.h>
106 #include <sys/sched.h>
107 #include <sys/stack.h>
108 #include <sys/sysctl.h>
109 #include <sys/systm.h>
110 
111 #ifdef DDB
112 #include <ddb/ddb.h>
113 #endif
114 
115 #include <machine/stdarg.h>
116 
117 #if !defined(DDB) && !defined(STACK)
118 #error "DDB or STACK options are required for WITNESS"
119 #endif
120 
121 /* Note that these traces do not work with KTR_ALQ. */
122 #if 0
123 #define	KTR_WITNESS	KTR_SUBSYS
124 #else
125 #define	KTR_WITNESS	0
126 #endif
127 
128 #define	LI_RECURSEMASK	0x0000ffff	/* Recursion depth of lock instance. */
129 #define	LI_EXCLUSIVE	0x00010000	/* Exclusive lock instance. */
130 
131 /* Define this to check for blessed mutexes */
132 #undef BLESSING
133 
134 #define	WITNESS_COUNT 		1024
135 #define	WITNESS_CHILDCOUNT 	(WITNESS_COUNT * 4)
136 #define	WITNESS_HASH_SIZE	251	/* Prime, gives load factor < 2 */
137 #define	WITNESS_PENDLIST	512
138 
139 /* Allocate 256 KB of stack data space */
140 #define	WITNESS_LO_DATA_COUNT	2048
141 
142 /* Prime, gives load factor of ~2 at full load */
143 #define	WITNESS_LO_HASH_SIZE	1021
144 
145 /*
146  * XXX: This is somewhat bogus, as we assume here that at most 2048 threads
147  * will hold LOCK_NCHILDREN locks.  We handle failure ok, and we should
148  * probably be safe for the most part, but it's still a SWAG.
149  */
150 #define	LOCK_NCHILDREN	5
151 #define	LOCK_CHILDCOUNT	2048
152 
153 #define	MAX_W_NAME	64
154 
155 #define	BADSTACK_SBUF_SIZE	(256 * WITNESS_COUNT)
156 #define	CYCLEGRAPH_SBUF_SIZE	8192
157 #define	FULLGRAPH_SBUF_SIZE	32768
158 
159 /*
160  * These flags go in the witness relationship matrix and describe the
161  * relationship between any two struct witness objects.
162  */
163 #define	WITNESS_UNRELATED        0x00    /* No lock order relation. */
164 #define	WITNESS_PARENT           0x01    /* Parent, aka direct ancestor. */
165 #define	WITNESS_ANCESTOR         0x02    /* Direct or indirect ancestor. */
166 #define	WITNESS_CHILD            0x04    /* Child, aka direct descendant. */
167 #define	WITNESS_DESCENDANT       0x08    /* Direct or indirect descendant. */
168 #define	WITNESS_ANCESTOR_MASK    (WITNESS_PARENT | WITNESS_ANCESTOR)
169 #define	WITNESS_DESCENDANT_MASK  (WITNESS_CHILD | WITNESS_DESCENDANT)
170 #define	WITNESS_RELATED_MASK						\
171 	(WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
172 #define	WITNESS_REVERSAL         0x10    /* A lock order reversal has been
173 					  * observed. */
174 #define	WITNESS_RESERVED1        0x20    /* Unused flag, reserved. */
175 #define	WITNESS_RESERVED2        0x40    /* Unused flag, reserved. */
176 #define	WITNESS_LOCK_ORDER_KNOWN 0x80    /* This lock order is known. */
177 
178 /* Descendant to ancestor flags */
179 #define	WITNESS_DTOA(x)	(((x) & WITNESS_RELATED_MASK) >> 2)
180 
181 /* Ancestor to descendant flags */
182 #define	WITNESS_ATOD(x)	(((x) & WITNESS_RELATED_MASK) << 2)
183 
184 #define	WITNESS_INDEX_ASSERT(i)						\
185 	MPASS((i) > 0 && (i) <= w_max_used_index && (i) < WITNESS_COUNT)
186 
187 MALLOC_DEFINE(M_WITNESS, "Witness", "Witness");
188 
189 /*
190  * Lock instances.  A lock instance is the data associated with a lock while
191  * it is held by witness.  For example, a lock instance will hold the
192  * recursion count of a lock.  Lock instances are held in lists.  Spin locks
193  * are held in a per-cpu list while sleep locks are held in per-thread list.
194  */
195 struct lock_instance {
196 	struct lock_object	*li_lock;
197 	const char		*li_file;
198 	int			li_line;
199 	u_int			li_flags;
200 };
201 
202 /*
203  * A simple list type used to build the list of locks held by a thread
204  * or CPU.  We can't simply embed the list in struct lock_object since a
205  * lock may be held by more than one thread if it is a shared lock.  Locks
206  * are added to the head of the list, so we fill up each list entry from
207  * "the back" logically.  To ease some of the arithmetic, we actually fill
208  * in each list entry the normal way (children[0] then children[1], etc.) but
209  * when we traverse the list we read children[count-1] as the first entry
210  * down to children[0] as the final entry.
211  */
212 struct lock_list_entry {
213 	struct lock_list_entry	*ll_next;
214 	struct lock_instance	ll_children[LOCK_NCHILDREN];
215 	u_int			ll_count;
216 };
217 
218 /*
219  * The main witness structure. One of these per named lock type in the system
220  * (for example, "vnode interlock").
221  */
222 struct witness {
223 	char  			w_name[MAX_W_NAME];
224 	uint32_t 		w_index;  /* Index in the relationship matrix */
225 	struct lock_class	*w_class;
226 	STAILQ_ENTRY(witness) 	w_list;		/* List of all witnesses. */
227 	STAILQ_ENTRY(witness) 	w_typelist;	/* Witnesses of a type. */
228 	struct witness		*w_hash_next; /* Linked list in hash buckets. */
229 	const char		*w_file; /* File where last acquired */
230 	uint32_t 		w_line; /* Line where last acquired */
231 	uint32_t 		w_refcount;
232 	uint16_t 		w_num_ancestors; /* direct/indirect
233 						  * ancestor count */
234 	uint16_t 		w_num_descendants; /* direct/indirect
235 						    * descendant count */
236 	int16_t 		w_ddb_level;
237 	int 			w_displayed:1;
238 	int 			w_reversed:1;
239 };
240 
241 STAILQ_HEAD(witness_list, witness);
242 
243 /*
244  * The witness hash table. Keys are witness names (const char *), elements are
245  * witness objects (struct witness *).
246  */
247 struct witness_hash {
248 	struct witness	*wh_array[WITNESS_HASH_SIZE];
249 	uint32_t	wh_size;
250 	uint32_t	wh_count;
251 };
252 
253 /*
254  * Key type for the lock order data hash table.
255  */
256 struct witness_lock_order_key {
257 	uint16_t	from;
258 	uint16_t	to;
259 };
260 
261 struct witness_lock_order_data {
262 	struct stack			wlod_stack;
263 	struct witness_lock_order_key	wlod_key;
264 	struct witness_lock_order_data	*wlod_next;
265 };
266 
267 /*
268  * The witness lock order data hash table. Keys are witness index tuples
269  * (struct witness_lock_order_key), elements are lock order data objects
270  * (struct witness_lock_order_data).
271  */
272 struct witness_lock_order_hash {
273 	struct witness_lock_order_data	*wloh_array[WITNESS_LO_HASH_SIZE];
274 	u_int	wloh_size;
275 	u_int	wloh_count;
276 };
277 
278 #ifdef BLESSING
279 struct witness_blessed {
280 	const char	*b_lock1;
281 	const char	*b_lock2;
282 };
283 #endif
284 
285 struct witness_pendhelp {
286 	const char		*wh_type;
287 	struct lock_object	*wh_lock;
288 };
289 
290 struct witness_order_list_entry {
291 	const char		*w_name;
292 	struct lock_class	*w_class;
293 };
294 
295 /*
296  * Returns 0 if one of the locks is a spin lock and the other is not.
297  * Returns 1 otherwise.
298  */
299 static __inline int
300 witness_lock_type_equal(struct witness *w1, struct witness *w2)
301 {
302 
303 	return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
304 		(w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
305 }
306 
307 static __inline int
308 witness_lock_order_key_empty(const struct witness_lock_order_key *key)
309 {
310 
311 	return (key->from == 0 && key->to == 0);
312 }
313 
314 static __inline int
315 witness_lock_order_key_equal(const struct witness_lock_order_key *a,
316     const struct witness_lock_order_key *b)
317 {
318 
319 	return (a->from == b->from && a->to == b->to);
320 }
321 
322 static int	_isitmyx(struct witness *w1, struct witness *w2, int rmask,
323 		    const char *fname);
324 #ifdef KDB
325 static void	_witness_debugger(int cond, const char *msg);
326 #endif
327 static void	adopt(struct witness *parent, struct witness *child);
328 #ifdef BLESSING
329 static int	blessed(struct witness *, struct witness *);
330 #endif
331 static void	depart(struct witness *w);
332 static struct witness	*enroll(const char *description,
333 			    struct lock_class *lock_class);
334 static struct lock_instance	*find_instance(struct lock_list_entry *list,
335 				    struct lock_object *lock);
336 static int	isitmychild(struct witness *parent, struct witness *child);
337 static int	isitmydescendant(struct witness *parent, struct witness *child);
338 static void	itismychild(struct witness *parent, struct witness *child);
339 static int	sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS);
340 static int	sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
341 static int	sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS);
342 static void	witness_add_fullgraph(struct sbuf *sb, struct witness *parent);
343 #ifdef DDB
344 static void	witness_ddb_compute_levels(void);
345 static void	witness_ddb_display(void(*)(const char *fmt, ...));
346 static void	witness_ddb_display_descendants(void(*)(const char *fmt, ...),
347 		    struct witness *, int indent);
348 static void	witness_ddb_display_list(void(*prnt)(const char *fmt, ...),
349 		    struct witness_list *list);
350 static void	witness_ddb_level_descendants(struct witness *parent, int l);
351 static void	witness_ddb_list(struct thread *td);
352 #endif
353 static void	witness_free(struct witness *m);
354 static struct witness	*witness_get(void);
355 static uint32_t	witness_hash_djb2(const uint8_t *key, uint32_t size);
356 static struct witness	*witness_hash_get(const char *key);
357 static void	witness_hash_put(struct witness *w);
358 static void	witness_init_hash_tables(void);
359 static void	witness_increment_graph_generation(void);
360 static void	witness_lock_list_free(struct lock_list_entry *lle);
361 static struct lock_list_entry	*witness_lock_list_get(void);
362 static int	witness_lock_order_add(struct witness *parent,
363 		    struct witness *child);
364 static int	witness_lock_order_check(struct witness *parent,
365 		    struct witness *child);
366 static struct witness_lock_order_data	*witness_lock_order_get(
367 					    struct witness *parent,
368 					    struct witness *child);
369 static void	witness_list_lock(struct lock_instance *instance);
370 
371 #ifdef KDB
372 #define	witness_debugger(c)	_witness_debugger(c, __func__)
373 #else
374 #define	witness_debugger(c)
375 #endif
376 
377 SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking");
378 
379 /*
380  * If set to 0, lock order checking is disabled.  If set to -1,
381  * witness is completely disabled.  Otherwise witness performs full
382  * lock order checking for all locks.  At runtime, lock order checking
383  * may be toggled.  However, witness cannot be reenabled once it is
384  * completely disabled.
385  */
386 static int witness_watch = 1;
387 TUNABLE_INT("debug.witness.watch", &witness_watch);
388 SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
389     sysctl_debug_witness_watch, "I", "witness is watching lock operations");
390 
391 #ifdef KDB
392 /*
393  * When KDB is enabled and witness_kdb is 1, it will cause the system
394  * to drop into kdebug() when:
395  *	- a lock hierarchy violation occurs
396  *	- locks are held when going to sleep.
397  */
398 #ifdef WITNESS_KDB
399 int	witness_kdb = 1;
400 #else
401 int	witness_kdb = 0;
402 #endif
403 TUNABLE_INT("debug.witness.kdb", &witness_kdb);
404 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
405 
406 /*
407  * When KDB is enabled and witness_trace is 1, it will cause the system
408  * to print a stack trace:
409  *	- a lock hierarchy violation occurs
410  *	- locks are held when going to sleep.
411  */
412 int	witness_trace = 1;
413 TUNABLE_INT("debug.witness.trace", &witness_trace);
414 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
415 #endif /* KDB */
416 
417 #ifdef WITNESS_SKIPSPIN
418 int	witness_skipspin = 1;
419 #else
420 int	witness_skipspin = 0;
421 #endif
422 TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
423 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin,
424     0, "");
425 
426 /*
427  * Call this to print out the relations between locks.
428  */
429 SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD,
430     NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs");
431 
432 /*
433  * Call this to print out the witness faulty stacks.
434  */
435 SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD,
436     NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks");
437 
438 static struct mtx w_mtx;
439 
440 /* w_list */
441 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
442 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
443 
444 /* w_typelist */
445 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
446 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
447 
448 /* lock list */
449 static struct lock_list_entry *w_lock_list_free = NULL;
450 static struct witness_pendhelp pending_locks[WITNESS_PENDLIST];
451 static u_int pending_cnt;
452 
453 static int w_free_cnt, w_spin_cnt, w_sleep_cnt;
454 SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
455 SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
456 SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
457     "");
458 
459 static struct witness *w_data;
460 static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1];
461 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
462 static struct witness_hash w_hash;	/* The witness hash table. */
463 
464 /* The lock order data hash */
465 static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT];
466 static struct witness_lock_order_data *w_lofree = NULL;
467 static struct witness_lock_order_hash w_lohash;
468 static int w_max_used_index = 0;
469 static unsigned int w_generation = 0;
470 static const char *w_notrunning = "Witness not running\n";
471 static const char *w_stillcold = "Witness is still cold\n";
472 
473 
474 static struct witness_order_list_entry order_lists[] = {
475 	/*
476 	 * sx locks
477 	 */
478 	{ "proctree", &lock_class_sx },
479 	{ "allproc", &lock_class_sx },
480 	{ "allprison", &lock_class_sx },
481 	{ NULL, NULL },
482 	/*
483 	 * Various mutexes
484 	 */
485 	{ "Giant", &lock_class_mtx_sleep },
486 	{ "pipe mutex", &lock_class_mtx_sleep },
487 	{ "sigio lock", &lock_class_mtx_sleep },
488 	{ "process group", &lock_class_mtx_sleep },
489 	{ "process lock", &lock_class_mtx_sleep },
490 	{ "session", &lock_class_mtx_sleep },
491 	{ "uidinfo hash", &lock_class_rw },
492 #ifdef	HWPMC_HOOKS
493 	{ "pmc-sleep", &lock_class_mtx_sleep },
494 #endif
495 	{ NULL, NULL },
496 	/*
497 	 * Sockets
498 	 */
499 	{ "accept", &lock_class_mtx_sleep },
500 	{ "so_snd", &lock_class_mtx_sleep },
501 	{ "so_rcv", &lock_class_mtx_sleep },
502 	{ "sellck", &lock_class_mtx_sleep },
503 	{ NULL, NULL },
504 	/*
505 	 * Routing
506 	 */
507 	{ "so_rcv", &lock_class_mtx_sleep },
508 	{ "radix node head", &lock_class_rw },
509 	{ "rtentry", &lock_class_mtx_sleep },
510 	{ "ifaddr", &lock_class_mtx_sleep },
511 	{ NULL, NULL },
512 	/*
513 	 * Multicast - protocol locks before interface locks, after UDP locks.
514 	 */
515 	{ "udpinp", &lock_class_rw },
516 	{ "in_multi_mtx", &lock_class_mtx_sleep },
517 	{ "igmp_mtx", &lock_class_mtx_sleep },
518 	{ "if_addr_mtx", &lock_class_mtx_sleep },
519 	{ NULL, NULL },
520 	/*
521 	 * UNIX Domain Sockets
522 	 */
523 	{ "unp", &lock_class_mtx_sleep },
524 	{ "so_snd", &lock_class_mtx_sleep },
525 	{ NULL, NULL },
526 	/*
527 	 * UDP/IP
528 	 */
529 	{ "udp", &lock_class_rw },
530 	{ "udpinp", &lock_class_rw },
531 	{ "so_snd", &lock_class_mtx_sleep },
532 	{ NULL, NULL },
533 	/*
534 	 * TCP/IP
535 	 */
536 	{ "tcp", &lock_class_rw },
537 	{ "tcpinp", &lock_class_rw },
538 	{ "so_snd", &lock_class_mtx_sleep },
539 	{ NULL, NULL },
540 	/*
541 	 * SLIP
542 	 */
543 	{ "slip_mtx", &lock_class_mtx_sleep },
544 	{ "slip sc_mtx", &lock_class_mtx_sleep },
545 	{ NULL, NULL },
546 	/*
547 	 * netatalk
548 	 */
549 	{ "ddp_list_mtx", &lock_class_mtx_sleep },
550 	{ "ddp_mtx", &lock_class_mtx_sleep },
551 	{ NULL, NULL },
552 	/*
553 	 * BPF
554 	 */
555 	{ "bpf global lock", &lock_class_mtx_sleep },
556 	{ "bpf interface lock", &lock_class_mtx_sleep },
557 	{ "bpf cdev lock", &lock_class_mtx_sleep },
558 	{ NULL, NULL },
559 	/*
560 	 * NFS server
561 	 */
562 	{ "nfsd_mtx", &lock_class_mtx_sleep },
563 	{ "so_snd", &lock_class_mtx_sleep },
564 	{ NULL, NULL },
565 
566 	/*
567 	 * IEEE 802.11
568 	 */
569 	{ "802.11 com lock", &lock_class_mtx_sleep},
570 	{ NULL, NULL },
571 	/*
572 	 * Network drivers
573 	 */
574 	{ "network driver", &lock_class_mtx_sleep},
575 	{ NULL, NULL },
576 
577 	/*
578 	 * Netgraph
579 	 */
580 	{ "ng_node", &lock_class_mtx_sleep },
581 	{ "ng_worklist", &lock_class_mtx_sleep },
582 	{ NULL, NULL },
583 	/*
584 	 * CDEV
585 	 */
586 	{ "system map", &lock_class_mtx_sleep },
587 	{ "vm page queue mutex", &lock_class_mtx_sleep },
588 	{ "vnode interlock", &lock_class_mtx_sleep },
589 	{ "cdev", &lock_class_mtx_sleep },
590 	{ NULL, NULL },
591 	/*
592 	 * kqueue/VFS interaction
593 	 */
594 	{ "kqueue", &lock_class_mtx_sleep },
595 	{ "struct mount mtx", &lock_class_mtx_sleep },
596 	{ "vnode interlock", &lock_class_mtx_sleep },
597 	{ NULL, NULL },
598 	/*
599 	 * spin locks
600 	 */
601 #ifdef SMP
602 	{ "ap boot", &lock_class_mtx_spin },
603 #endif
604 	{ "rm.mutex_mtx", &lock_class_mtx_spin },
605 	{ "sio", &lock_class_mtx_spin },
606 	{ "scrlock", &lock_class_mtx_spin },
607 #ifdef __i386__
608 	{ "cy", &lock_class_mtx_spin },
609 #endif
610 #ifdef __sparc64__
611 	{ "pcib_mtx", &lock_class_mtx_spin },
612 	{ "rtc_mtx", &lock_class_mtx_spin },
613 #endif
614 	{ "scc_hwmtx", &lock_class_mtx_spin },
615 	{ "uart_hwmtx", &lock_class_mtx_spin },
616 	{ "fast_taskqueue", &lock_class_mtx_spin },
617 	{ "intr table", &lock_class_mtx_spin },
618 #ifdef	HWPMC_HOOKS
619 	{ "pmc-per-proc", &lock_class_mtx_spin },
620 #endif
621 	{ "process slock", &lock_class_mtx_spin },
622 	{ "sleepq chain", &lock_class_mtx_spin },
623 	{ "umtx lock", &lock_class_mtx_spin },
624 	{ "rm_spinlock", &lock_class_mtx_spin },
625 	{ "turnstile chain", &lock_class_mtx_spin },
626 	{ "turnstile lock", &lock_class_mtx_spin },
627 	{ "sched lock", &lock_class_mtx_spin },
628 	{ "td_contested", &lock_class_mtx_spin },
629 	{ "callout", &lock_class_mtx_spin },
630 	{ "entropy harvest mutex", &lock_class_mtx_spin },
631 	{ "syscons video lock", &lock_class_mtx_spin },
632 	{ "time lock", &lock_class_mtx_spin },
633 #ifdef SMP
634 	{ "smp rendezvous", &lock_class_mtx_spin },
635 #endif
636 #ifdef __powerpc__
637 	{ "tlb0", &lock_class_mtx_spin },
638 #endif
639 	/*
640 	 * leaf locks
641 	 */
642 	{ "intrcnt", &lock_class_mtx_spin },
643 	{ "icu", &lock_class_mtx_spin },
644 #if defined(SMP) && defined(__sparc64__)
645 	{ "ipi", &lock_class_mtx_spin },
646 #endif
647 #ifdef __i386__
648 	{ "allpmaps", &lock_class_mtx_spin },
649 	{ "descriptor tables", &lock_class_mtx_spin },
650 #endif
651 	{ "clk", &lock_class_mtx_spin },
652 	{ "cpuset", &lock_class_mtx_spin },
653 	{ "mprof lock", &lock_class_mtx_spin },
654 	{ "zombie lock", &lock_class_mtx_spin },
655 	{ "ALD Queue", &lock_class_mtx_spin },
656 #ifdef __ia64__
657 	{ "MCA spin lock", &lock_class_mtx_spin },
658 #endif
659 #if defined(__i386__) || defined(__amd64__)
660 	{ "pcicfg", &lock_class_mtx_spin },
661 	{ "NDIS thread lock", &lock_class_mtx_spin },
662 #endif
663 	{ "tw_osl_io_lock", &lock_class_mtx_spin },
664 	{ "tw_osl_q_lock", &lock_class_mtx_spin },
665 	{ "tw_cl_io_lock", &lock_class_mtx_spin },
666 	{ "tw_cl_intr_lock", &lock_class_mtx_spin },
667 	{ "tw_cl_gen_lock", &lock_class_mtx_spin },
668 #ifdef	HWPMC_HOOKS
669 	{ "pmc-leaf", &lock_class_mtx_spin },
670 #endif
671 	{ "blocked lock", &lock_class_mtx_spin },
672 	{ NULL, NULL },
673 	{ NULL, NULL }
674 };
675 
676 #ifdef BLESSING
677 /*
678  * Pairs of locks which have been blessed
679  * Don't complain about order problems with blessed locks
680  */
681 static struct witness_blessed blessed_list[] = {
682 };
683 static int blessed_count =
684 	sizeof(blessed_list) / sizeof(struct witness_blessed);
685 #endif
686 
687 /*
688  * This global is set to 0 once it becomes safe to use the witness code.
689  */
690 static int witness_cold = 1;
691 
692 /*
693  * This global is set to 1 once the static lock orders have been enrolled
694  * so that a warning can be issued for any spin locks enrolled later.
695  */
696 static int witness_spin_warn = 0;
697 
698 /*
699  * The WITNESS-enabled diagnostic code.  Note that the witness code does
700  * assume that the early boot is single-threaded at least until after this
701  * routine is completed.
702  */
703 static void
704 witness_initialize(void *dummy __unused)
705 {
706 	struct lock_object *lock;
707 	struct witness_order_list_entry *order;
708 	struct witness *w, *w1;
709 	int i;
710 
711 	w_data = malloc(sizeof (struct witness) * WITNESS_COUNT, M_WITNESS,
712 	    M_NOWAIT | M_ZERO);
713 
714 	/*
715 	 * We have to release Giant before initializing its witness
716 	 * structure so that WITNESS doesn't get confused.
717 	 */
718 	mtx_unlock(&Giant);
719 	mtx_assert(&Giant, MA_NOTOWNED);
720 
721 	CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
722 	mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
723 	    MTX_NOWITNESS | MTX_NOPROFILE);
724 	for (i = WITNESS_COUNT - 1; i >= 0; i--) {
725 		w = &w_data[i];
726 		memset(w, 0, sizeof(*w));
727 		w_data[i].w_index = i;	/* Witness index never changes. */
728 		witness_free(w);
729 	}
730 	KASSERT(STAILQ_FIRST(&w_free)->w_index == 0,
731 	    ("%s: Invalid list of free witness objects", __func__));
732 
733 	/* Witness with index 0 is not used to aid in debugging. */
734 	STAILQ_REMOVE_HEAD(&w_free, w_list);
735 	w_free_cnt--;
736 
737 	memset(w_rmatrix, 0,
738 	    (sizeof(**w_rmatrix) * (WITNESS_COUNT+1) * (WITNESS_COUNT+1)));
739 
740 	for (i = 0; i < LOCK_CHILDCOUNT; i++)
741 		witness_lock_list_free(&w_locklistdata[i]);
742 	witness_init_hash_tables();
743 
744 	/* First add in all the specified order lists. */
745 	for (order = order_lists; order->w_name != NULL; order++) {
746 		w = enroll(order->w_name, order->w_class);
747 		if (w == NULL)
748 			continue;
749 		w->w_file = "order list";
750 		for (order++; order->w_name != NULL; order++) {
751 			w1 = enroll(order->w_name, order->w_class);
752 			if (w1 == NULL)
753 				continue;
754 			w1->w_file = "order list";
755 			itismychild(w, w1);
756 			w = w1;
757 		}
758 	}
759 	witness_spin_warn = 1;
760 
761 	/* Iterate through all locks and add them to witness. */
762 	for (i = 0; pending_locks[i].wh_lock != NULL; i++) {
763 		lock = pending_locks[i].wh_lock;
764 		KASSERT(lock->lo_flags & LO_WITNESS,
765 		    ("%s: lock %s is on pending list but not LO_WITNESS",
766 		    __func__, lock->lo_name));
767 		lock->lo_witness = enroll(pending_locks[i].wh_type,
768 		    LOCK_CLASS(lock));
769 	}
770 
771 	/* Mark the witness code as being ready for use. */
772 	witness_cold = 0;
773 
774 	mtx_lock(&Giant);
775 }
776 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize,
777     NULL);
778 
779 void
780 witness_init(struct lock_object *lock, const char *type)
781 {
782 	struct lock_class *class;
783 
784 	/* Various sanity checks. */
785 	class = LOCK_CLASS(lock);
786 	if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
787 	    (class->lc_flags & LC_RECURSABLE) == 0)
788 		panic("%s: lock (%s) %s can not be recursable", __func__,
789 		    class->lc_name, lock->lo_name);
790 	if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
791 	    (class->lc_flags & LC_SLEEPABLE) == 0)
792 		panic("%s: lock (%s) %s can not be sleepable", __func__,
793 		    class->lc_name, lock->lo_name);
794 	if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
795 	    (class->lc_flags & LC_UPGRADABLE) == 0)
796 		panic("%s: lock (%s) %s can not be upgradable", __func__,
797 		    class->lc_name, lock->lo_name);
798 
799 	/*
800 	 * If we shouldn't watch this lock, then just clear lo_witness.
801 	 * Otherwise, if witness_cold is set, then it is too early to
802 	 * enroll this lock, so defer it to witness_initialize() by adding
803 	 * it to the pending_locks list.  If it is not too early, then enroll
804 	 * the lock now.
805 	 */
806 	if (witness_watch < 1 || panicstr != NULL ||
807 	    (lock->lo_flags & LO_WITNESS) == 0)
808 		lock->lo_witness = NULL;
809 	else if (witness_cold) {
810 		pending_locks[pending_cnt].wh_lock = lock;
811 		pending_locks[pending_cnt++].wh_type = type;
812 		if (pending_cnt > WITNESS_PENDLIST)
813 			panic("%s: pending locks list is too small, bump it\n",
814 			    __func__);
815 	} else
816 		lock->lo_witness = enroll(type, class);
817 }
818 
819 void
820 witness_destroy(struct lock_object *lock)
821 {
822 	struct lock_class *class;
823 	struct witness *w;
824 
825 	class = LOCK_CLASS(lock);
826 
827 	if (witness_cold)
828 		panic("lock (%s) %s destroyed while witness_cold",
829 		    class->lc_name, lock->lo_name);
830 
831 	/* XXX: need to verify that no one holds the lock */
832 	if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL)
833 		return;
834 	w = lock->lo_witness;
835 
836 	mtx_lock_spin(&w_mtx);
837 	MPASS(w->w_refcount > 0);
838 	w->w_refcount--;
839 
840 	if (w->w_refcount == 0)
841 		depart(w);
842 	mtx_unlock_spin(&w_mtx);
843 }
844 
845 #ifdef DDB
846 static void
847 witness_ddb_compute_levels(void)
848 {
849 	struct witness *w;
850 
851 	/*
852 	 * First clear all levels.
853 	 */
854 	STAILQ_FOREACH(w, &w_all, w_list)
855 		w->w_ddb_level = -1;
856 
857 	/*
858 	 * Look for locks with no parents and level all their descendants.
859 	 */
860 	STAILQ_FOREACH(w, &w_all, w_list) {
861 
862 		/* If the witness has ancestors (is not a root), skip it. */
863 		if (w->w_num_ancestors > 0)
864 			continue;
865 		witness_ddb_level_descendants(w, 0);
866 	}
867 }
868 
869 static void
870 witness_ddb_level_descendants(struct witness *w, int l)
871 {
872 	int i;
873 
874 	if (w->w_ddb_level >= l)
875 		return;
876 
877 	w->w_ddb_level = l;
878 	l++;
879 
880 	for (i = 1; i <= w_max_used_index; i++) {
881 		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
882 			witness_ddb_level_descendants(&w_data[i], l);
883 	}
884 }
885 
886 static void
887 witness_ddb_display_descendants(void(*prnt)(const char *fmt, ...),
888     struct witness *w, int indent)
889 {
890 	int i;
891 
892  	for (i = 0; i < indent; i++)
893  		prnt(" ");
894 	prnt("%s (type: %s, depth: %d, active refs: %d)",
895 	     w->w_name, w->w_class->lc_name,
896 	     w->w_ddb_level, w->w_refcount);
897  	if (w->w_displayed) {
898  		prnt(" -- (already displayed)\n");
899  		return;
900  	}
901  	w->w_displayed = 1;
902 	if (w->w_file != NULL && w->w_line != 0)
903 		prnt(" -- last acquired @ %s:%d\n", w->w_file,
904 		    w->w_line);
905 	else
906 		prnt(" -- never acquired\n");
907 	indent++;
908 	WITNESS_INDEX_ASSERT(w->w_index);
909 	for (i = 1; i <= w_max_used_index; i++) {
910 		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
911 			witness_ddb_display_descendants(prnt, &w_data[i],
912 			    indent);
913 	}
914 }
915 
916 static void
917 witness_ddb_display_list(void(*prnt)(const char *fmt, ...),
918     struct witness_list *list)
919 {
920 	struct witness *w;
921 
922 	STAILQ_FOREACH(w, list, w_typelist) {
923 		if (w->w_file == NULL || w->w_ddb_level > 0)
924 			continue;
925 
926 		/* This lock has no anscestors - display its descendants. */
927 		witness_ddb_display_descendants(prnt, w, 0);
928 	}
929 }
930 
931 static void
932 witness_ddb_display(void(*prnt)(const char *fmt, ...))
933 {
934 	struct witness *w;
935 
936 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
937 	witness_ddb_compute_levels();
938 
939 	/* Clear all the displayed flags. */
940 	STAILQ_FOREACH(w, &w_all, w_list)
941 		w->w_displayed = 0;
942 
943 	/*
944 	 * First, handle sleep locks which have been acquired at least
945 	 * once.
946 	 */
947 	prnt("Sleep locks:\n");
948 	witness_ddb_display_list(prnt, &w_sleep);
949 
950 	/*
951 	 * Now do spin locks which have been acquired at least once.
952 	 */
953 	prnt("\nSpin locks:\n");
954 	witness_ddb_display_list(prnt, &w_spin);
955 
956 	/*
957 	 * Finally, any locks which have not been acquired yet.
958 	 */
959 	prnt("\nLocks which were never acquired:\n");
960 	STAILQ_FOREACH(w, &w_all, w_list) {
961 		if (w->w_file != NULL || w->w_refcount == 0)
962 			continue;
963 		prnt("%s (type: %s, depth: %d)\n", w->w_name,
964 		    w->w_class->lc_name, w->w_ddb_level);
965 	}
966 }
967 #endif /* DDB */
968 
969 /* Trim useless garbage from filenames. */
970 static const char *
971 fixup_filename(const char *file)
972 {
973 
974 	if (file == NULL)
975 		return (NULL);
976 	while (strncmp(file, "../", 3) == 0)
977 		file += 3;
978 	return (file);
979 }
980 
981 int
982 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
983 {
984 
985 	if (witness_watch == -1 || panicstr != NULL)
986 		return (0);
987 
988 	/* Require locks that witness knows about. */
989 	if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
990 	    lock2->lo_witness == NULL)
991 		return (EINVAL);
992 
993 	mtx_assert(&w_mtx, MA_NOTOWNED);
994 	mtx_lock_spin(&w_mtx);
995 
996 	/*
997 	 * If we already have either an explicit or implied lock order that
998 	 * is the other way around, then return an error.
999 	 */
1000 	if (witness_watch &&
1001 	    isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
1002 		mtx_unlock_spin(&w_mtx);
1003 		return (EDOOFUS);
1004 	}
1005 
1006 	/* Try to add the new order. */
1007 	CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1008 	    lock2->lo_witness->w_name, lock1->lo_witness->w_name);
1009 	itismychild(lock1->lo_witness, lock2->lo_witness);
1010 	mtx_unlock_spin(&w_mtx);
1011 	return (0);
1012 }
1013 
1014 void
1015 witness_checkorder(struct lock_object *lock, int flags, const char *file,
1016     int line, struct lock_object *interlock)
1017 {
1018 	struct lock_list_entry *lock_list, *lle;
1019 	struct lock_instance *lock1, *lock2, *plock;
1020 	struct lock_class *class;
1021 	struct witness *w, *w1;
1022 	struct thread *td;
1023 	int i, j;
1024 
1025 	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
1026 	    panicstr != NULL)
1027 		return;
1028 
1029 	w = lock->lo_witness;
1030 	class = LOCK_CLASS(lock);
1031 	td = curthread;
1032 	file = fixup_filename(file);
1033 
1034 	if (class->lc_flags & LC_SLEEPLOCK) {
1035 
1036 		/*
1037 		 * Since spin locks include a critical section, this check
1038 		 * implicitly enforces a lock order of all sleep locks before
1039 		 * all spin locks.
1040 		 */
1041 		if (td->td_critnest != 0 && !kdb_active)
1042 			panic("blockable sleep lock (%s) %s @ %s:%d",
1043 			    class->lc_name, lock->lo_name, file, line);
1044 
1045 		/*
1046 		 * If this is the first lock acquired then just return as
1047 		 * no order checking is needed.
1048 		 */
1049 		lock_list = td->td_sleeplocks;
1050 		if (lock_list == NULL || lock_list->ll_count == 0)
1051 			return;
1052 	} else {
1053 
1054 		/*
1055 		 * If this is the first lock, just return as no order
1056 		 * checking is needed.  Avoid problems with thread
1057 		 * migration pinning the thread while checking if
1058 		 * spinlocks are held.  If at least one spinlock is held
1059 		 * the thread is in a safe path and it is allowed to
1060 		 * unpin it.
1061 		 */
1062 		sched_pin();
1063 		lock_list = PCPU_GET(spinlocks);
1064 		if (lock_list == NULL || lock_list->ll_count == 0) {
1065 			sched_unpin();
1066 			return;
1067 		}
1068 		sched_unpin();
1069 	}
1070 
1071 	/*
1072 	 * Check to see if we are recursing on a lock we already own.  If
1073 	 * so, make sure that we don't mismatch exclusive and shared lock
1074 	 * acquires.
1075 	 */
1076 	lock1 = find_instance(lock_list, lock);
1077 	if (lock1 != NULL) {
1078 		if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
1079 		    (flags & LOP_EXCLUSIVE) == 0) {
1080 			printf("shared lock of (%s) %s @ %s:%d\n",
1081 			    class->lc_name, lock->lo_name, file, line);
1082 			printf("while exclusively locked from %s:%d\n",
1083 			    lock1->li_file, lock1->li_line);
1084 			panic("share->excl");
1085 		}
1086 		if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
1087 		    (flags & LOP_EXCLUSIVE) != 0) {
1088 			printf("exclusive lock of (%s) %s @ %s:%d\n",
1089 			    class->lc_name, lock->lo_name, file, line);
1090 			printf("while share locked from %s:%d\n",
1091 			    lock1->li_file, lock1->li_line);
1092 			panic("excl->share");
1093 		}
1094 		return;
1095 	}
1096 
1097 	/*
1098 	 * Find the previously acquired lock, but ignore interlocks.
1099 	 */
1100 	plock = &lock_list->ll_children[lock_list->ll_count - 1];
1101 	if (interlock != NULL && plock->li_lock == interlock) {
1102 		if (lock_list->ll_count > 1)
1103 			plock =
1104 			    &lock_list->ll_children[lock_list->ll_count - 2];
1105 		else {
1106 			lle = lock_list->ll_next;
1107 
1108 			/*
1109 			 * The interlock is the only lock we hold, so
1110 			 * simply return.
1111 			 */
1112 			if (lle == NULL)
1113 				return;
1114 			plock = &lle->ll_children[lle->ll_count - 1];
1115 		}
1116 	}
1117 
1118 	/*
1119 	 * Try to perform most checks without a lock.  If this succeeds we
1120 	 * can skip acquiring the lock and return success.
1121 	 */
1122 	w1 = plock->li_lock->lo_witness;
1123 	if (witness_lock_order_check(w1, w))
1124 		return;
1125 
1126 	/*
1127 	 * Check for duplicate locks of the same type.  Note that we only
1128 	 * have to check for this on the last lock we just acquired.  Any
1129 	 * other cases will be caught as lock order violations.
1130 	 */
1131 	mtx_lock_spin(&w_mtx);
1132 	witness_lock_order_add(w1, w);
1133 	if (w1 == w) {
1134 		i = w->w_index;
1135 		if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) &&
1136 		    !(w_rmatrix[i][i] & WITNESS_REVERSAL)) {
1137 		    w_rmatrix[i][i] |= WITNESS_REVERSAL;
1138 			w->w_reversed = 1;
1139 			mtx_unlock_spin(&w_mtx);
1140 			printf(
1141 			    "acquiring duplicate lock of same type: \"%s\"\n",
1142 			    w->w_name);
1143 			printf(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
1144 			       plock->li_file, plock->li_line);
1145 			printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
1146 			witness_debugger(1);
1147 		    } else
1148 			    mtx_unlock_spin(&w_mtx);
1149 		return;
1150 	}
1151 	mtx_assert(&w_mtx, MA_OWNED);
1152 
1153 	/*
1154 	 * If we know that the the lock we are acquiring comes after
1155 	 * the lock we most recently acquired in the lock order tree,
1156 	 * then there is no need for any further checks.
1157 	 */
1158 	if (isitmychild(w1, w))
1159 		goto out;
1160 
1161 	for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
1162 		for (i = lle->ll_count - 1; i >= 0; i--, j++) {
1163 
1164 			MPASS(j < WITNESS_COUNT);
1165 			lock1 = &lle->ll_children[i];
1166 
1167 			/*
1168 			 * Ignore the interlock the first time we see it.
1169 			 */
1170 			if (interlock != NULL && interlock == lock1->li_lock) {
1171 				interlock = NULL;
1172 				continue;
1173 			}
1174 
1175 			/*
1176 			 * If this lock doesn't undergo witness checking,
1177 			 * then skip it.
1178 			 */
1179 			w1 = lock1->li_lock->lo_witness;
1180 			if (w1 == NULL) {
1181 				KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
1182 				    ("lock missing witness structure"));
1183 				continue;
1184 			}
1185 
1186 			/*
1187 			 * If we are locking Giant and this is a sleepable
1188 			 * lock, then skip it.
1189 			 */
1190 			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
1191 			    lock == &Giant.lock_object)
1192 				continue;
1193 
1194 			/*
1195 			 * If we are locking a sleepable lock and this lock
1196 			 * is Giant, then skip it.
1197 			 */
1198 			if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1199 			    lock1->li_lock == &Giant.lock_object)
1200 				continue;
1201 
1202 			/*
1203 			 * If we are locking a sleepable lock and this lock
1204 			 * isn't sleepable, we want to treat it as a lock
1205 			 * order violation to enfore a general lock order of
1206 			 * sleepable locks before non-sleepable locks.
1207 			 */
1208 			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1209 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1210 				goto reversal;
1211 
1212 			/*
1213 			 * If we are locking Giant and this is a non-sleepable
1214 			 * lock, then treat it as a reversal.
1215 			 */
1216 			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
1217 			    lock == &Giant.lock_object)
1218 				goto reversal;
1219 
1220 			/*
1221 			 * Check the lock order hierarchy for a reveresal.
1222 			 */
1223 			if (!isitmydescendant(w, w1))
1224 				continue;
1225 		reversal:
1226 
1227 			/*
1228 			 * We have a lock order violation, check to see if it
1229 			 * is allowed or has already been yelled about.
1230 			 */
1231 #ifdef BLESSING
1232 
1233 			/*
1234 			 * If the lock order is blessed, just bail.  We don't
1235 			 * look for other lock order violations though, which
1236 			 * may be a bug.
1237 			 */
1238 			if (blessed(w, w1))
1239 				goto out;
1240 #endif
1241 
1242 			/* Bail if this violation is known */
1243 			if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
1244 				goto out;
1245 
1246 			/* Record this as a violation */
1247 			w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL;
1248 			w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL;
1249 			w->w_reversed = w1->w_reversed = 1;
1250 			witness_increment_graph_generation();
1251 			mtx_unlock_spin(&w_mtx);
1252 
1253 			/*
1254 			 * Ok, yell about it.
1255 			 */
1256 			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1257 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1258 				printf(
1259 		"lock order reversal: (sleepable after non-sleepable)\n");
1260 			else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1261 			    && lock == &Giant.lock_object)
1262 				printf(
1263 		"lock order reversal: (Giant after non-sleepable)\n");
1264 			else
1265 				printf("lock order reversal:\n");
1266 
1267 			/*
1268 			 * Try to locate an earlier lock with
1269 			 * witness w in our list.
1270 			 */
1271 			do {
1272 				lock2 = &lle->ll_children[i];
1273 				MPASS(lock2->li_lock != NULL);
1274 				if (lock2->li_lock->lo_witness == w)
1275 					break;
1276 				if (i == 0 && lle->ll_next != NULL) {
1277 					lle = lle->ll_next;
1278 					i = lle->ll_count - 1;
1279 					MPASS(i >= 0 && i < LOCK_NCHILDREN);
1280 				} else
1281 					i--;
1282 			} while (i >= 0);
1283 			if (i < 0) {
1284 				printf(" 1st %p %s (%s) @ %s:%d\n",
1285 				    lock1->li_lock, lock1->li_lock->lo_name,
1286 				    w1->w_name, lock1->li_file, lock1->li_line);
1287 				printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1288 				    lock->lo_name, w->w_name, file, line);
1289 			} else {
1290 				printf(" 1st %p %s (%s) @ %s:%d\n",
1291 				    lock2->li_lock, lock2->li_lock->lo_name,
1292 				    lock2->li_lock->lo_witness->w_name,
1293 				    lock2->li_file, lock2->li_line);
1294 				printf(" 2nd %p %s (%s) @ %s:%d\n",
1295 				    lock1->li_lock, lock1->li_lock->lo_name,
1296 				    w1->w_name, lock1->li_file, lock1->li_line);
1297 				printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1298 				    lock->lo_name, w->w_name, file, line);
1299 			}
1300 			witness_debugger(1);
1301 			return;
1302 		}
1303 	}
1304 
1305 	/*
1306 	 * If requested, build a new lock order.  However, don't build a new
1307 	 * relationship between a sleepable lock and Giant if it is in the
1308 	 * wrong direction.  The correct lock order is that sleepable locks
1309 	 * always come before Giant.
1310 	 */
1311 	if (flags & LOP_NEWORDER &&
1312 	    !(plock->li_lock == &Giant.lock_object &&
1313 	    (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1314 		CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1315 		    w->w_name, plock->li_lock->lo_witness->w_name);
1316 		itismychild(plock->li_lock->lo_witness, w);
1317 	}
1318 out:
1319 	mtx_unlock_spin(&w_mtx);
1320 }
1321 
1322 void
1323 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1324 {
1325 	struct lock_list_entry **lock_list, *lle;
1326 	struct lock_instance *instance;
1327 	struct witness *w;
1328 	struct thread *td;
1329 
1330 	if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
1331 	    panicstr != NULL)
1332 		return;
1333 	w = lock->lo_witness;
1334 	td = curthread;
1335 	file = fixup_filename(file);
1336 
1337 	/* Determine lock list for this lock. */
1338 	if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1339 		lock_list = &td->td_sleeplocks;
1340 	else
1341 		lock_list = PCPU_PTR(spinlocks);
1342 
1343 	/* Check to see if we are recursing on a lock we already own. */
1344 	instance = find_instance(*lock_list, lock);
1345 	if (instance != NULL) {
1346 		instance->li_flags++;
1347 		CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1348 		    td->td_proc->p_pid, lock->lo_name,
1349 		    instance->li_flags & LI_RECURSEMASK);
1350 		instance->li_file = file;
1351 		instance->li_line = line;
1352 		return;
1353 	}
1354 
1355 	/* Update per-witness last file and line acquire. */
1356 	w->w_file = file;
1357 	w->w_line = line;
1358 
1359 	/* Find the next open lock instance in the list and fill it. */
1360 	lle = *lock_list;
1361 	if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1362 		lle = witness_lock_list_get();
1363 		if (lle == NULL)
1364 			return;
1365 		lle->ll_next = *lock_list;
1366 		CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1367 		    td->td_proc->p_pid, lle);
1368 		*lock_list = lle;
1369 	}
1370 	instance = &lle->ll_children[lle->ll_count++];
1371 	instance->li_lock = lock;
1372 	instance->li_line = line;
1373 	instance->li_file = file;
1374 	if ((flags & LOP_EXCLUSIVE) != 0)
1375 		instance->li_flags = LI_EXCLUSIVE;
1376 	else
1377 		instance->li_flags = 0;
1378 	CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1379 	    td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1380 }
1381 
1382 void
1383 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1384 {
1385 	struct lock_instance *instance;
1386 	struct lock_class *class;
1387 
1388 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1389 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1390 		return;
1391 	class = LOCK_CLASS(lock);
1392 	file = fixup_filename(file);
1393 	if (witness_watch) {
1394 		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1395 			panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1396 			    class->lc_name, lock->lo_name, file, line);
1397 		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1398 			panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1399 			    class->lc_name, lock->lo_name, file, line);
1400 	}
1401 	instance = find_instance(curthread->td_sleeplocks, lock);
1402 	if (instance == NULL)
1403 		panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1404 		    class->lc_name, lock->lo_name, file, line);
1405 	if (witness_watch) {
1406 		if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1407 			panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1408 			    class->lc_name, lock->lo_name, file, line);
1409 		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1410 			panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1411 			    class->lc_name, lock->lo_name,
1412 			    instance->li_flags & LI_RECURSEMASK, file, line);
1413 	}
1414 	instance->li_flags |= LI_EXCLUSIVE;
1415 }
1416 
1417 void
1418 witness_downgrade(struct lock_object *lock, int flags, const char *file,
1419     int line)
1420 {
1421 	struct lock_instance *instance;
1422 	struct lock_class *class;
1423 
1424 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1425 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1426 		return;
1427 	class = LOCK_CLASS(lock);
1428 	file = fixup_filename(file);
1429 	if (witness_watch) {
1430 		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1431 		panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1432 			    class->lc_name, lock->lo_name, file, line);
1433 		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1434 			panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1435 			    class->lc_name, lock->lo_name, file, line);
1436 	}
1437 	instance = find_instance(curthread->td_sleeplocks, lock);
1438 	if (instance == NULL)
1439 		panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1440 		    class->lc_name, lock->lo_name, file, line);
1441 	if (witness_watch) {
1442 		if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1443 			panic("downgrade of shared lock (%s) %s @ %s:%d",
1444 			    class->lc_name, lock->lo_name, file, line);
1445 		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1446 			panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1447 			    class->lc_name, lock->lo_name,
1448 			    instance->li_flags & LI_RECURSEMASK, file, line);
1449 	}
1450 	instance->li_flags &= ~LI_EXCLUSIVE;
1451 }
1452 
1453 void
1454 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1455 {
1456 	struct lock_list_entry **lock_list, *lle;
1457 	struct lock_instance *instance;
1458 	struct lock_class *class;
1459 	struct thread *td;
1460 	register_t s;
1461 	int i, j;
1462 
1463 	if (witness_cold || lock->lo_witness == NULL || panicstr != NULL)
1464 		return;
1465 	td = curthread;
1466 	class = LOCK_CLASS(lock);
1467 	file = fixup_filename(file);
1468 
1469 	/* Find lock instance associated with this lock. */
1470 	if (class->lc_flags & LC_SLEEPLOCK)
1471 		lock_list = &td->td_sleeplocks;
1472 	else
1473 		lock_list = PCPU_PTR(spinlocks);
1474 	lle = *lock_list;
1475 	for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1476 		for (i = 0; i < (*lock_list)->ll_count; i++) {
1477 			instance = &(*lock_list)->ll_children[i];
1478 			if (instance->li_lock == lock)
1479 				goto found;
1480 		}
1481 
1482 	/*
1483 	 * When disabling WITNESS through witness_watch we could end up in
1484 	 * having registered locks in the td_sleeplocks queue.
1485 	 * We have to make sure we flush these queues, so just search for
1486 	 * eventual register locks and remove them.
1487 	 */
1488 	if (witness_watch > 0)
1489 		panic("lock (%s) %s not locked @ %s:%d", class->lc_name,
1490 		    lock->lo_name, file, line);
1491 	else
1492 		return;
1493 found:
1494 
1495 	/* First, check for shared/exclusive mismatches. */
1496 	if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 &&
1497 	    (flags & LOP_EXCLUSIVE) == 0) {
1498 		printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1499 		    lock->lo_name, file, line);
1500 		printf("while exclusively locked from %s:%d\n",
1501 		    instance->li_file, instance->li_line);
1502 		panic("excl->ushare");
1503 	}
1504 	if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 &&
1505 	    (flags & LOP_EXCLUSIVE) != 0) {
1506 		printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1507 		    lock->lo_name, file, line);
1508 		printf("while share locked from %s:%d\n", instance->li_file,
1509 		    instance->li_line);
1510 		panic("share->uexcl");
1511 	}
1512 
1513 	/* If we are recursed, unrecurse. */
1514 	if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1515 		CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1516 		    td->td_proc->p_pid, instance->li_lock->lo_name,
1517 		    instance->li_flags);
1518 		instance->li_flags--;
1519 		return;
1520 	}
1521 
1522 	/* Otherwise, remove this item from the list. */
1523 	s = intr_disable();
1524 	CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1525 	    td->td_proc->p_pid, instance->li_lock->lo_name,
1526 	    (*lock_list)->ll_count - 1);
1527 	for (j = i; j < (*lock_list)->ll_count - 1; j++)
1528 		(*lock_list)->ll_children[j] =
1529 		    (*lock_list)->ll_children[j + 1];
1530 	(*lock_list)->ll_count--;
1531 	intr_restore(s);
1532 
1533 	/*
1534 	 * In order to reduce contention on w_mtx, we want to keep always an
1535 	 * head object into lists so that frequent allocation from the
1536 	 * free witness pool (and subsequent locking) is avoided.
1537 	 * In order to maintain the current code simple, when the head
1538 	 * object is totally unloaded it means also that we do not have
1539 	 * further objects in the list, so the list ownership needs to be
1540 	 * hand over to another object if the current head needs to be freed.
1541 	 */
1542 	if ((*lock_list)->ll_count == 0) {
1543 		if (*lock_list == lle) {
1544 			if (lle->ll_next == NULL)
1545 				return;
1546 		} else
1547 			lle = *lock_list;
1548 		*lock_list = lle->ll_next;
1549 		CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1550 		    td->td_proc->p_pid, lle);
1551 		witness_lock_list_free(lle);
1552 	}
1553 }
1554 
1555 void
1556 witness_thread_exit(struct thread *td)
1557 {
1558 	struct lock_list_entry *lle;
1559 	int i, n;
1560 
1561 	lle = td->td_sleeplocks;
1562 	if (lle == NULL || panicstr != NULL)
1563 		return;
1564 	if (lle->ll_count != 0) {
1565 		for (n = 0; lle != NULL; lle = lle->ll_next)
1566 			for (i = lle->ll_count - 1; i >= 0; i--) {
1567 				if (n == 0)
1568 		printf("Thread %p exiting with the following locks held:\n",
1569 					    td);
1570 				n++;
1571 				witness_list_lock(&lle->ll_children[i]);
1572 
1573 			}
1574 		panic("Thread %p cannot exit while holding sleeplocks\n", td);
1575 	}
1576 	witness_lock_list_free(lle);
1577 }
1578 
1579 /*
1580  * Warn if any locks other than 'lock' are held.  Flags can be passed in to
1581  * exempt Giant and sleepable locks from the checks as well.  If any
1582  * non-exempt locks are held, then a supplied message is printed to the
1583  * console along with a list of the offending locks.  If indicated in the
1584  * flags then a failure results in a panic as well.
1585  */
1586 int
1587 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1588 {
1589 	struct lock_list_entry *lock_list, *lle;
1590 	struct lock_instance *lock1;
1591 	struct thread *td;
1592 	va_list ap;
1593 	int i, n;
1594 
1595 	if (witness_cold || witness_watch < 1 || panicstr != NULL)
1596 		return (0);
1597 	n = 0;
1598 	td = curthread;
1599 	for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1600 		for (i = lle->ll_count - 1; i >= 0; i--) {
1601 			lock1 = &lle->ll_children[i];
1602 			if (lock1->li_lock == lock)
1603 				continue;
1604 			if (flags & WARN_GIANTOK &&
1605 			    lock1->li_lock == &Giant.lock_object)
1606 				continue;
1607 			if (flags & WARN_SLEEPOK &&
1608 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1609 				continue;
1610 			if (n == 0) {
1611 				va_start(ap, fmt);
1612 				vprintf(fmt, ap);
1613 				va_end(ap);
1614 				printf(" with the following");
1615 				if (flags & WARN_SLEEPOK)
1616 					printf(" non-sleepable");
1617 				printf(" locks held:\n");
1618 			}
1619 			n++;
1620 			witness_list_lock(lock1);
1621 		}
1622 
1623 	/*
1624 	 * Pin the thread in order to avoid problems with thread migration.
1625 	 * Once that all verifies are passed about spinlocks ownership,
1626 	 * the thread is in a safe path and it can be unpinned.
1627 	 */
1628 	sched_pin();
1629 	lock_list = PCPU_GET(spinlocks);
1630 	if (lock_list != NULL && lock_list->ll_count != 0) {
1631 		sched_unpin();
1632 
1633 		/*
1634 		 * We should only have one spinlock and as long as
1635 		 * the flags cannot match for this locks class,
1636 		 * check if the first spinlock is the one curthread
1637 		 * should hold.
1638 		 */
1639 		lock1 = &lock_list->ll_children[lock_list->ll_count - 1];
1640 		if (lock_list->ll_count == 1 && lock_list->ll_next == NULL &&
1641 		    lock1->li_lock == lock && n == 0)
1642 			return (0);
1643 
1644 		va_start(ap, fmt);
1645 		vprintf(fmt, ap);
1646 		va_end(ap);
1647 		printf(" with the following");
1648 		if (flags & WARN_SLEEPOK)
1649 			printf(" non-sleepable");
1650 		printf(" locks held:\n");
1651 		n += witness_list_locks(&lock_list);
1652 	} else
1653 		sched_unpin();
1654 	if (flags & WARN_PANIC && n)
1655 		panic("%s", __func__);
1656 	else
1657 		witness_debugger(n);
1658 	return (n);
1659 }
1660 
1661 const char *
1662 witness_file(struct lock_object *lock)
1663 {
1664 	struct witness *w;
1665 
1666 	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1667 		return ("?");
1668 	w = lock->lo_witness;
1669 	return (w->w_file);
1670 }
1671 
1672 int
1673 witness_line(struct lock_object *lock)
1674 {
1675 	struct witness *w;
1676 
1677 	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1678 		return (0);
1679 	w = lock->lo_witness;
1680 	return (w->w_line);
1681 }
1682 
1683 static struct witness *
1684 enroll(const char *description, struct lock_class *lock_class)
1685 {
1686 	struct witness *w;
1687 	struct witness_list *typelist;
1688 
1689 	MPASS(description != NULL);
1690 
1691 	if (witness_watch == -1 || panicstr != NULL)
1692 		return (NULL);
1693 	if ((lock_class->lc_flags & LC_SPINLOCK)) {
1694 		if (witness_skipspin)
1695 			return (NULL);
1696 		else
1697 			typelist = &w_spin;
1698 	} else if ((lock_class->lc_flags & LC_SLEEPLOCK))
1699 		typelist = &w_sleep;
1700 	else
1701 		panic("lock class %s is not sleep or spin",
1702 		    lock_class->lc_name);
1703 
1704 	mtx_lock_spin(&w_mtx);
1705 	w = witness_hash_get(description);
1706 	if (w)
1707 		goto found;
1708 	if ((w = witness_get()) == NULL)
1709 		return (NULL);
1710 	MPASS(strlen(description) < MAX_W_NAME);
1711 	strcpy(w->w_name, description);
1712 	w->w_class = lock_class;
1713 	w->w_refcount = 1;
1714 	STAILQ_INSERT_HEAD(&w_all, w, w_list);
1715 	if (lock_class->lc_flags & LC_SPINLOCK) {
1716 		STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1717 		w_spin_cnt++;
1718 	} else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1719 		STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1720 		w_sleep_cnt++;
1721 	}
1722 
1723 	/* Insert new witness into the hash */
1724 	witness_hash_put(w);
1725 	witness_increment_graph_generation();
1726 	mtx_unlock_spin(&w_mtx);
1727 	return (w);
1728 found:
1729 	w->w_refcount++;
1730 	mtx_unlock_spin(&w_mtx);
1731 	if (lock_class != w->w_class)
1732 		panic(
1733 			"lock (%s) %s does not match earlier (%s) lock",
1734 			description, lock_class->lc_name,
1735 			w->w_class->lc_name);
1736 	return (w);
1737 }
1738 
1739 static void
1740 depart(struct witness *w)
1741 {
1742 	struct witness_list *list;
1743 
1744 	MPASS(w->w_refcount == 0);
1745 	if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1746 		list = &w_sleep;
1747 		w_sleep_cnt--;
1748 	} else {
1749 		list = &w_spin;
1750 		w_spin_cnt--;
1751 	}
1752 	/*
1753 	 * Set file to NULL as it may point into a loadable module.
1754 	 */
1755 	w->w_file = NULL;
1756 	w->w_line = 0;
1757 	witness_increment_graph_generation();
1758 }
1759 
1760 
1761 static void
1762 adopt(struct witness *parent, struct witness *child)
1763 {
1764 	int pi, ci, i, j;
1765 
1766 	if (witness_cold == 0)
1767 		mtx_assert(&w_mtx, MA_OWNED);
1768 
1769 	/* If the relationship is already known, there's no work to be done. */
1770 	if (isitmychild(parent, child))
1771 		return;
1772 
1773 	/* When the structure of the graph changes, bump up the generation. */
1774 	witness_increment_graph_generation();
1775 
1776 	/*
1777 	 * The hard part ... create the direct relationship, then propagate all
1778 	 * indirect relationships.
1779 	 */
1780 	pi = parent->w_index;
1781 	ci = child->w_index;
1782 	WITNESS_INDEX_ASSERT(pi);
1783 	WITNESS_INDEX_ASSERT(ci);
1784 	MPASS(pi != ci);
1785 	w_rmatrix[pi][ci] |= WITNESS_PARENT;
1786 	w_rmatrix[ci][pi] |= WITNESS_CHILD;
1787 
1788 	/*
1789 	 * If parent was not already an ancestor of child,
1790 	 * then we increment the descendant and ancestor counters.
1791 	 */
1792 	if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) {
1793 		parent->w_num_descendants++;
1794 		child->w_num_ancestors++;
1795 	}
1796 
1797 	/*
1798 	 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
1799 	 * an ancestor of 'pi' during this loop.
1800 	 */
1801 	for (i = 1; i <= w_max_used_index; i++) {
1802 		if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
1803 		    (i != pi))
1804 			continue;
1805 
1806 		/* Find each descendant of 'i' and mark it as a descendant. */
1807 		for (j = 1; j <= w_max_used_index; j++) {
1808 
1809 			/*
1810 			 * Skip children that are already marked as
1811 			 * descendants of 'i'.
1812 			 */
1813 			if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK)
1814 				continue;
1815 
1816 			/*
1817 			 * We are only interested in descendants of 'ci'. Note
1818 			 * that 'ci' itself is counted as a descendant of 'ci'.
1819 			 */
1820 			if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
1821 			    (j != ci))
1822 				continue;
1823 			w_rmatrix[i][j] |= WITNESS_ANCESTOR;
1824 			w_rmatrix[j][i] |= WITNESS_DESCENDANT;
1825 			w_data[i].w_num_descendants++;
1826 			w_data[j].w_num_ancestors++;
1827 
1828 			/*
1829 			 * Make sure we aren't marking a node as both an
1830 			 * ancestor and descendant. We should have caught
1831 			 * this as a lock order reversal earlier.
1832 			 */
1833 			if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
1834 			    (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
1835 				printf("witness rmatrix paradox! [%d][%d]=%d "
1836 				    "both ancestor and descendant\n",
1837 				    i, j, w_rmatrix[i][j]);
1838 				kdb_backtrace();
1839 				printf("Witness disabled.\n");
1840 				witness_watch = -1;
1841 			}
1842 			if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) &&
1843 			    (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
1844 				printf("witness rmatrix paradox! [%d][%d]=%d "
1845 				    "both ancestor and descendant\n",
1846 				    j, i, w_rmatrix[j][i]);
1847 				kdb_backtrace();
1848 				printf("Witness disabled.\n");
1849 				witness_watch = -1;
1850 			}
1851 		}
1852 	}
1853 }
1854 
1855 static void
1856 itismychild(struct witness *parent, struct witness *child)
1857 {
1858 
1859 	MPASS(child != NULL && parent != NULL);
1860 	if (witness_cold == 0)
1861 		mtx_assert(&w_mtx, MA_OWNED);
1862 
1863 	if (!witness_lock_type_equal(parent, child)) {
1864 		if (witness_cold == 0)
1865 			mtx_unlock_spin(&w_mtx);
1866 		panic("%s: parent \"%s\" (%s) and child \"%s\" (%s) are not "
1867 		    "the same lock type", __func__, parent->w_name,
1868 		    parent->w_class->lc_name, child->w_name,
1869 		    child->w_class->lc_name);
1870 	}
1871 	adopt(parent, child);
1872 }
1873 
1874 /*
1875  * Generic code for the isitmy*() functions. The rmask parameter is the
1876  * expected relationship of w1 to w2.
1877  */
1878 static int
1879 _isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
1880 {
1881 	unsigned char r1, r2;
1882 	int i1, i2;
1883 
1884 	i1 = w1->w_index;
1885 	i2 = w2->w_index;
1886 	WITNESS_INDEX_ASSERT(i1);
1887 	WITNESS_INDEX_ASSERT(i2);
1888 	r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK;
1889 	r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK;
1890 
1891 	/* The flags on one better be the inverse of the flags on the other */
1892 	if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) ||
1893 		(WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) {
1894 		printf("%s: rmatrix mismatch between %s (index %d) and %s "
1895 		    "(index %d): w_rmatrix[%d][%d] == %hhx but "
1896 		    "w_rmatrix[%d][%d] == %hhx\n",
1897 		    fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1,
1898 		    i2, i1, r2);
1899 		kdb_backtrace();
1900 		printf("Witness disabled.\n");
1901 		witness_watch = -1;
1902 	}
1903 	return (r1 & rmask);
1904 }
1905 
1906 /*
1907  * Checks if @child is a direct child of @parent.
1908  */
1909 static int
1910 isitmychild(struct witness *parent, struct witness *child)
1911 {
1912 
1913 	return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
1914 }
1915 
1916 /*
1917  * Checks if @descendant is a direct or inderect descendant of @ancestor.
1918  */
1919 static int
1920 isitmydescendant(struct witness *ancestor, struct witness *descendant)
1921 {
1922 
1923 	return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
1924 	    __func__));
1925 }
1926 
1927 #ifdef BLESSING
1928 static int
1929 blessed(struct witness *w1, struct witness *w2)
1930 {
1931 	int i;
1932 	struct witness_blessed *b;
1933 
1934 	for (i = 0; i < blessed_count; i++) {
1935 		b = &blessed_list[i];
1936 		if (strcmp(w1->w_name, b->b_lock1) == 0) {
1937 			if (strcmp(w2->w_name, b->b_lock2) == 0)
1938 				return (1);
1939 			continue;
1940 		}
1941 		if (strcmp(w1->w_name, b->b_lock2) == 0)
1942 			if (strcmp(w2->w_name, b->b_lock1) == 0)
1943 				return (1);
1944 	}
1945 	return (0);
1946 }
1947 #endif
1948 
1949 static struct witness *
1950 witness_get(void)
1951 {
1952 	struct witness *w;
1953 	int index;
1954 
1955 	if (witness_cold == 0)
1956 		mtx_assert(&w_mtx, MA_OWNED);
1957 
1958 	if (witness_watch == -1) {
1959 		mtx_unlock_spin(&w_mtx);
1960 		return (NULL);
1961 	}
1962 	if (STAILQ_EMPTY(&w_free)) {
1963 		witness_watch = -1;
1964 		mtx_unlock_spin(&w_mtx);
1965 		printf("WITNESS: unable to allocate a new witness object\n");
1966 		return (NULL);
1967 	}
1968 	w = STAILQ_FIRST(&w_free);
1969 	STAILQ_REMOVE_HEAD(&w_free, w_list);
1970 	w_free_cnt--;
1971 	index = w->w_index;
1972 	MPASS(index > 0 && index == w_max_used_index+1 &&
1973 	    index < WITNESS_COUNT);
1974 	bzero(w, sizeof(*w));
1975 	w->w_index = index;
1976 	if (index > w_max_used_index)
1977 		w_max_used_index = index;
1978 	return (w);
1979 }
1980 
1981 static void
1982 witness_free(struct witness *w)
1983 {
1984 
1985 	STAILQ_INSERT_HEAD(&w_free, w, w_list);
1986 	w_free_cnt++;
1987 }
1988 
1989 static struct lock_list_entry *
1990 witness_lock_list_get(void)
1991 {
1992 	struct lock_list_entry *lle;
1993 
1994 	if (witness_watch == -1)
1995 		return (NULL);
1996 	mtx_lock_spin(&w_mtx);
1997 	lle = w_lock_list_free;
1998 	if (lle == NULL) {
1999 		witness_watch = -1;
2000 		mtx_unlock_spin(&w_mtx);
2001 		printf("%s: witness exhausted\n", __func__);
2002 		return (NULL);
2003 	}
2004 	w_lock_list_free = lle->ll_next;
2005 	mtx_unlock_spin(&w_mtx);
2006 	bzero(lle, sizeof(*lle));
2007 	return (lle);
2008 }
2009 
2010 static void
2011 witness_lock_list_free(struct lock_list_entry *lle)
2012 {
2013 
2014 	mtx_lock_spin(&w_mtx);
2015 	lle->ll_next = w_lock_list_free;
2016 	w_lock_list_free = lle;
2017 	mtx_unlock_spin(&w_mtx);
2018 }
2019 
2020 static struct lock_instance *
2021 find_instance(struct lock_list_entry *list, struct lock_object *lock)
2022 {
2023 	struct lock_list_entry *lle;
2024 	struct lock_instance *instance;
2025 	int i;
2026 
2027 	for (lle = list; lle != NULL; lle = lle->ll_next)
2028 		for (i = lle->ll_count - 1; i >= 0; i--) {
2029 			instance = &lle->ll_children[i];
2030 			if (instance->li_lock == lock)
2031 				return (instance);
2032 		}
2033 	return (NULL);
2034 }
2035 
2036 static void
2037 witness_list_lock(struct lock_instance *instance)
2038 {
2039 	struct lock_object *lock;
2040 
2041 	lock = instance->li_lock;
2042 	printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2043 	    "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2044 	if (lock->lo_witness->w_name != lock->lo_name)
2045 		printf(" (%s)", lock->lo_witness->w_name);
2046 	printf(" r = %d (%p) locked @ %s:%d\n",
2047 	    instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
2048 	    instance->li_line);
2049 }
2050 
2051 #ifdef DDB
2052 static int
2053 witness_thread_has_locks(struct thread *td)
2054 {
2055 
2056 	if (td->td_sleeplocks == NULL)
2057 		return (0);
2058 	return (td->td_sleeplocks->ll_count != 0);
2059 }
2060 
2061 static int
2062 witness_proc_has_locks(struct proc *p)
2063 {
2064 	struct thread *td;
2065 
2066 	FOREACH_THREAD_IN_PROC(p, td) {
2067 		if (witness_thread_has_locks(td))
2068 			return (1);
2069 	}
2070 	return (0);
2071 }
2072 #endif
2073 
2074 int
2075 witness_list_locks(struct lock_list_entry **lock_list)
2076 {
2077 	struct lock_list_entry *lle;
2078 	int i, nheld;
2079 
2080 	nheld = 0;
2081 	for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2082 		for (i = lle->ll_count - 1; i >= 0; i--) {
2083 			witness_list_lock(&lle->ll_children[i]);
2084 			nheld++;
2085 		}
2086 	return (nheld);
2087 }
2088 
2089 /*
2090  * This is a bit risky at best.  We call this function when we have timed
2091  * out acquiring a spin lock, and we assume that the other CPU is stuck
2092  * with this lock held.  So, we go groveling around in the other CPU's
2093  * per-cpu data to try to find the lock instance for this spin lock to
2094  * see when it was last acquired.
2095  */
2096 void
2097 witness_display_spinlock(struct lock_object *lock, struct thread *owner)
2098 {
2099 	struct lock_instance *instance;
2100 	struct pcpu *pc;
2101 
2102 	if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2103 		return;
2104 	pc = pcpu_find(owner->td_oncpu);
2105 	instance = find_instance(pc->pc_spinlocks, lock);
2106 	if (instance != NULL)
2107 		witness_list_lock(instance);
2108 }
2109 
2110 void
2111 witness_save(struct lock_object *lock, const char **filep, int *linep)
2112 {
2113 	struct lock_list_entry *lock_list;
2114 	struct lock_instance *instance;
2115 	struct lock_class *class;
2116 
2117 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2118 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2119 		return;
2120 	class = LOCK_CLASS(lock);
2121 	if (class->lc_flags & LC_SLEEPLOCK)
2122 		lock_list = curthread->td_sleeplocks;
2123 	else {
2124 		if (witness_skipspin)
2125 			return;
2126 		lock_list = PCPU_GET(spinlocks);
2127 	}
2128 	instance = find_instance(lock_list, lock);
2129 	if (instance == NULL)
2130 		panic("%s: lock (%s) %s not locked", __func__,
2131 		    class->lc_name, lock->lo_name);
2132 	*filep = instance->li_file;
2133 	*linep = instance->li_line;
2134 }
2135 
2136 void
2137 witness_restore(struct lock_object *lock, const char *file, int line)
2138 {
2139 	struct lock_list_entry *lock_list;
2140 	struct lock_instance *instance;
2141 	struct lock_class *class;
2142 
2143 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2144 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2145 		return;
2146 	class = LOCK_CLASS(lock);
2147 	if (class->lc_flags & LC_SLEEPLOCK)
2148 		lock_list = curthread->td_sleeplocks;
2149 	else {
2150 		if (witness_skipspin)
2151 			return;
2152 		lock_list = PCPU_GET(spinlocks);
2153 	}
2154 	instance = find_instance(lock_list, lock);
2155 	if (instance == NULL)
2156 		panic("%s: lock (%s) %s not locked", __func__,
2157 		    class->lc_name, lock->lo_name);
2158 	lock->lo_witness->w_file = file;
2159 	lock->lo_witness->w_line = line;
2160 	instance->li_file = file;
2161 	instance->li_line = line;
2162 }
2163 
2164 void
2165 witness_assert(struct lock_object *lock, int flags, const char *file, int line)
2166 {
2167 #ifdef INVARIANT_SUPPORT
2168 	struct lock_instance *instance;
2169 	struct lock_class *class;
2170 
2171 	if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL)
2172 		return;
2173 	class = LOCK_CLASS(lock);
2174 	if ((class->lc_flags & LC_SLEEPLOCK) != 0)
2175 		instance = find_instance(curthread->td_sleeplocks, lock);
2176 	else if ((class->lc_flags & LC_SPINLOCK) != 0)
2177 		instance = find_instance(PCPU_GET(spinlocks), lock);
2178 	else {
2179 		panic("Lock (%s) %s is not sleep or spin!",
2180 		    class->lc_name, lock->lo_name);
2181 	}
2182 	file = fixup_filename(file);
2183 	switch (flags) {
2184 	case LA_UNLOCKED:
2185 		if (instance != NULL)
2186 			panic("Lock (%s) %s locked @ %s:%d.",
2187 			    class->lc_name, lock->lo_name, file, line);
2188 		break;
2189 	case LA_LOCKED:
2190 	case LA_LOCKED | LA_RECURSED:
2191 	case LA_LOCKED | LA_NOTRECURSED:
2192 	case LA_SLOCKED:
2193 	case LA_SLOCKED | LA_RECURSED:
2194 	case LA_SLOCKED | LA_NOTRECURSED:
2195 	case LA_XLOCKED:
2196 	case LA_XLOCKED | LA_RECURSED:
2197 	case LA_XLOCKED | LA_NOTRECURSED:
2198 		if (instance == NULL) {
2199 			panic("Lock (%s) %s not locked @ %s:%d.",
2200 			    class->lc_name, lock->lo_name, file, line);
2201 			break;
2202 		}
2203 		if ((flags & LA_XLOCKED) != 0 &&
2204 		    (instance->li_flags & LI_EXCLUSIVE) == 0)
2205 			panic("Lock (%s) %s not exclusively locked @ %s:%d.",
2206 			    class->lc_name, lock->lo_name, file, line);
2207 		if ((flags & LA_SLOCKED) != 0 &&
2208 		    (instance->li_flags & LI_EXCLUSIVE) != 0)
2209 			panic("Lock (%s) %s exclusively locked @ %s:%d.",
2210 			    class->lc_name, lock->lo_name, file, line);
2211 		if ((flags & LA_RECURSED) != 0 &&
2212 		    (instance->li_flags & LI_RECURSEMASK) == 0)
2213 			panic("Lock (%s) %s not recursed @ %s:%d.",
2214 			    class->lc_name, lock->lo_name, file, line);
2215 		if ((flags & LA_NOTRECURSED) != 0 &&
2216 		    (instance->li_flags & LI_RECURSEMASK) != 0)
2217 			panic("Lock (%s) %s recursed @ %s:%d.",
2218 			    class->lc_name, lock->lo_name, file, line);
2219 		break;
2220 	default:
2221 		panic("Invalid lock assertion at %s:%d.", file, line);
2222 
2223 	}
2224 #endif	/* INVARIANT_SUPPORT */
2225 }
2226 
2227 #ifdef DDB
2228 static void
2229 witness_ddb_list(struct thread *td)
2230 {
2231 
2232 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2233 	KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2234 
2235 	if (witness_watch < 1)
2236 		return;
2237 
2238 	witness_list_locks(&td->td_sleeplocks);
2239 
2240 	/*
2241 	 * We only handle spinlocks if td == curthread.  This is somewhat broken
2242 	 * if td is currently executing on some other CPU and holds spin locks
2243 	 * as we won't display those locks.  If we had a MI way of getting
2244 	 * the per-cpu data for a given cpu then we could use
2245 	 * td->td_oncpu to get the list of spinlocks for this thread
2246 	 * and "fix" this.
2247 	 *
2248 	 * That still wouldn't really fix this unless we locked the scheduler
2249 	 * lock or stopped the other CPU to make sure it wasn't changing the
2250 	 * list out from under us.  It is probably best to just not try to
2251 	 * handle threads on other CPU's for now.
2252 	 */
2253 	if (td == curthread && PCPU_GET(spinlocks) != NULL)
2254 		witness_list_locks(PCPU_PTR(spinlocks));
2255 }
2256 
2257 DB_SHOW_COMMAND(locks, db_witness_list)
2258 {
2259 	struct thread *td;
2260 
2261 	if (have_addr)
2262 		td = db_lookup_thread(addr, TRUE);
2263 	else
2264 		td = kdb_thread;
2265 	witness_ddb_list(td);
2266 }
2267 
2268 DB_SHOW_ALL_COMMAND(locks, db_witness_list_all)
2269 {
2270 	struct thread *td;
2271 	struct proc *p;
2272 
2273 	/*
2274 	 * It would be nice to list only threads and processes that actually
2275 	 * held sleep locks, but that information is currently not exported
2276 	 * by WITNESS.
2277 	 */
2278 	FOREACH_PROC_IN_SYSTEM(p) {
2279 		if (!witness_proc_has_locks(p))
2280 			continue;
2281 		FOREACH_THREAD_IN_PROC(p, td) {
2282 			if (!witness_thread_has_locks(td))
2283 				continue;
2284 			db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2285 			    p->p_comm, td, td->td_tid);
2286 			witness_ddb_list(td);
2287 		}
2288 	}
2289 }
2290 DB_SHOW_ALIAS(alllocks, db_witness_list_all)
2291 
2292 DB_SHOW_COMMAND(witness, db_witness_display)
2293 {
2294 
2295 	witness_ddb_display(db_printf);
2296 }
2297 #endif
2298 
2299 static int
2300 sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
2301 {
2302 	struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
2303 	struct witness *tmp_w1, *tmp_w2, *w1, *w2;
2304 	struct sbuf *sb;
2305 	u_int w_rmatrix1, w_rmatrix2;
2306 	int error, generation, i, j;
2307 
2308 	tmp_data1 = NULL;
2309 	tmp_data2 = NULL;
2310 	tmp_w1 = NULL;
2311 	tmp_w2 = NULL;
2312 	if (witness_watch < 1) {
2313 		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2314 		return (error);
2315 	}
2316 	if (witness_cold) {
2317 		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2318 		return (error);
2319 	}
2320 	error = 0;
2321 	sb = sbuf_new(NULL, NULL, BADSTACK_SBUF_SIZE, SBUF_AUTOEXTEND);
2322 	if (sb == NULL)
2323 		return (ENOMEM);
2324 
2325 	/* Allocate and init temporary storage space. */
2326 	tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2327 	tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2328 	tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2329 	    M_WAITOK | M_ZERO);
2330 	tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2331 	    M_WAITOK | M_ZERO);
2332 	stack_zero(&tmp_data1->wlod_stack);
2333 	stack_zero(&tmp_data2->wlod_stack);
2334 
2335 restart:
2336 	mtx_lock_spin(&w_mtx);
2337 	generation = w_generation;
2338 	mtx_unlock_spin(&w_mtx);
2339 	sbuf_printf(sb, "Number of known direct relationships is %d\n",
2340 	    w_lohash.wloh_count);
2341 	for (i = 1; i < w_max_used_index; i++) {
2342 		mtx_lock_spin(&w_mtx);
2343 		if (generation != w_generation) {
2344 			mtx_unlock_spin(&w_mtx);
2345 
2346 			/* The graph has changed, try again. */
2347 			req->oldidx = 0;
2348 			sbuf_clear(sb);
2349 			goto restart;
2350 		}
2351 
2352 		w1 = &w_data[i];
2353 		if (w1->w_reversed == 0) {
2354 			mtx_unlock_spin(&w_mtx);
2355 			continue;
2356 		}
2357 
2358 		/* Copy w1 locally so we can release the spin lock. */
2359 		*tmp_w1 = *w1;
2360 		mtx_unlock_spin(&w_mtx);
2361 
2362 		if (tmp_w1->w_reversed == 0)
2363 			continue;
2364 		for (j = 1; j < w_max_used_index; j++) {
2365 			if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j)
2366 				continue;
2367 
2368 			mtx_lock_spin(&w_mtx);
2369 			if (generation != w_generation) {
2370 				mtx_unlock_spin(&w_mtx);
2371 
2372 				/* The graph has changed, try again. */
2373 				req->oldidx = 0;
2374 				sbuf_clear(sb);
2375 				goto restart;
2376 			}
2377 
2378 			w2 = &w_data[j];
2379 			data1 = witness_lock_order_get(w1, w2);
2380 			data2 = witness_lock_order_get(w2, w1);
2381 
2382 			/*
2383 			 * Copy information locally so we can release the
2384 			 * spin lock.
2385 			 */
2386 			*tmp_w2 = *w2;
2387 			w_rmatrix1 = (unsigned int)w_rmatrix[i][j];
2388 			w_rmatrix2 = (unsigned int)w_rmatrix[j][i];
2389 
2390 			if (data1) {
2391 				stack_zero(&tmp_data1->wlod_stack);
2392 				stack_copy(&data1->wlod_stack,
2393 				    &tmp_data1->wlod_stack);
2394 			}
2395 			if (data2 && data2 != data1) {
2396 				stack_zero(&tmp_data2->wlod_stack);
2397 				stack_copy(&data2->wlod_stack,
2398 				    &tmp_data2->wlod_stack);
2399 			}
2400 			mtx_unlock_spin(&w_mtx);
2401 
2402 			sbuf_printf(sb,
2403 	    "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
2404 			    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2405 			    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2406 #if 0
2407  			sbuf_printf(sb,
2408 			"w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n",
2409  			    tmp_w1->name, tmp_w2->w_name, w_rmatrix1,
2410  			    tmp_w2->name, tmp_w1->w_name, w_rmatrix2);
2411 #endif
2412 			if (data1) {
2413 				sbuf_printf(sb,
2414 			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2415 				    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2416 				    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2417 				stack_sbuf_print(sb, &tmp_data1->wlod_stack);
2418 				sbuf_printf(sb, "\n");
2419 			}
2420 			if (data2 && data2 != data1) {
2421 				sbuf_printf(sb,
2422 			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2423 				    tmp_w2->w_name, tmp_w2->w_class->lc_name,
2424 				    tmp_w1->w_name, tmp_w1->w_class->lc_name);
2425 				stack_sbuf_print(sb, &tmp_data2->wlod_stack);
2426 				sbuf_printf(sb, "\n");
2427 			}
2428 		}
2429 	}
2430 	mtx_lock_spin(&w_mtx);
2431 	if (generation != w_generation) {
2432 		mtx_unlock_spin(&w_mtx);
2433 
2434 		/*
2435 		 * The graph changed while we were printing stack data,
2436 		 * try again.
2437 		 */
2438 		req->oldidx = 0;
2439 		sbuf_clear(sb);
2440 		goto restart;
2441 	}
2442 	mtx_unlock_spin(&w_mtx);
2443 
2444 	/* Free temporary storage space. */
2445 	free(tmp_data1, M_TEMP);
2446 	free(tmp_data2, M_TEMP);
2447 	free(tmp_w1, M_TEMP);
2448 	free(tmp_w2, M_TEMP);
2449 
2450 	sbuf_finish(sb);
2451 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2452 	sbuf_delete(sb);
2453 
2454 	return (error);
2455 }
2456 
2457 static int
2458 sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
2459 {
2460 	struct witness *w;
2461 	struct sbuf *sb;
2462 	int error;
2463 
2464 	if (witness_watch < 1) {
2465 		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2466 		return (error);
2467 	}
2468 	if (witness_cold) {
2469 		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2470 		return (error);
2471 	}
2472 	error = 0;
2473 	sb = sbuf_new(NULL, NULL, FULLGRAPH_SBUF_SIZE, SBUF_FIXEDLEN);
2474 	if (sb == NULL)
2475 		return (ENOMEM);
2476 	sbuf_printf(sb, "\n");
2477 
2478 	mtx_lock_spin(&w_mtx);
2479 	STAILQ_FOREACH(w, &w_all, w_list)
2480 		w->w_displayed = 0;
2481 	STAILQ_FOREACH(w, &w_all, w_list)
2482 		witness_add_fullgraph(sb, w);
2483 	mtx_unlock_spin(&w_mtx);
2484 
2485 	/*
2486 	 * While using SBUF_FIXEDLEN, check if the sbuf overflowed.
2487 	 */
2488 	if (sbuf_overflowed(sb)) {
2489 		sbuf_delete(sb);
2490 		panic("%s: sbuf overflowed, bump FULLGRAPH_SBUF_SIZE value\n",
2491 		    __func__);
2492 	}
2493 
2494 	/*
2495 	 * Close the sbuf and return to userland.
2496 	 */
2497 	sbuf_finish(sb);
2498 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2499 	sbuf_delete(sb);
2500 
2501 	return (error);
2502 }
2503 
2504 static int
2505 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
2506 {
2507 	int error, value;
2508 
2509 	value = witness_watch;
2510 	error = sysctl_handle_int(oidp, &value, 0, req);
2511 	if (error != 0 || req->newptr == NULL)
2512 		return (error);
2513 	if (value > 1 || value < -1 ||
2514 	    (witness_watch == -1 && value != witness_watch))
2515 		return (EINVAL);
2516 	witness_watch = value;
2517 	return (0);
2518 }
2519 
2520 static void
2521 witness_add_fullgraph(struct sbuf *sb, struct witness *w)
2522 {
2523 	int i;
2524 
2525 	if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0))
2526 		return;
2527 	w->w_displayed = 1;
2528 
2529 	WITNESS_INDEX_ASSERT(w->w_index);
2530 	for (i = 1; i <= w_max_used_index; i++) {
2531 		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) {
2532 			sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name,
2533 			    w_data[i].w_name);
2534 			witness_add_fullgraph(sb, &w_data[i]);
2535 		}
2536 	}
2537 }
2538 
2539 /*
2540  * A simple hash function. Takes a key pointer and a key size. If size == 0,
2541  * interprets the key as a string and reads until the null
2542  * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit
2543  * hash value computed from the key.
2544  */
2545 static uint32_t
2546 witness_hash_djb2(const uint8_t *key, uint32_t size)
2547 {
2548 	unsigned int hash = 5381;
2549 	int i;
2550 
2551 	/* hash = hash * 33 + key[i] */
2552 	if (size)
2553 		for (i = 0; i < size; i++)
2554 			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2555 	else
2556 		for (i = 0; key[i] != 0; i++)
2557 			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2558 
2559 	return (hash);
2560 }
2561 
2562 
2563 /*
2564  * Initializes the two witness hash tables. Called exactly once from
2565  * witness_initialize().
2566  */
2567 static void
2568 witness_init_hash_tables(void)
2569 {
2570 	int i;
2571 
2572 	MPASS(witness_cold);
2573 
2574 	/* Initialize the hash tables. */
2575 	for (i = 0; i < WITNESS_HASH_SIZE; i++)
2576 		w_hash.wh_array[i] = NULL;
2577 
2578 	w_hash.wh_size = WITNESS_HASH_SIZE;
2579 	w_hash.wh_count = 0;
2580 
2581 	/* Initialize the lock order data hash. */
2582 	w_lofree = NULL;
2583 	for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
2584 		memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
2585 		w_lodata[i].wlod_next = w_lofree;
2586 		w_lofree = &w_lodata[i];
2587 	}
2588 	w_lohash.wloh_size = WITNESS_LO_HASH_SIZE;
2589 	w_lohash.wloh_count = 0;
2590 	for (i = 0; i < WITNESS_LO_HASH_SIZE; i++)
2591 		w_lohash.wloh_array[i] = NULL;
2592 }
2593 
2594 static struct witness *
2595 witness_hash_get(const char *key)
2596 {
2597 	struct witness *w;
2598 	uint32_t hash;
2599 
2600 	MPASS(key != NULL);
2601 	if (witness_cold == 0)
2602 		mtx_assert(&w_mtx, MA_OWNED);
2603 	hash = witness_hash_djb2(key, 0) % w_hash.wh_size;
2604 	w = w_hash.wh_array[hash];
2605 	while (w != NULL) {
2606 		if (strcmp(w->w_name, key) == 0)
2607 			goto out;
2608 		w = w->w_hash_next;
2609 	}
2610 
2611 out:
2612 	return (w);
2613 }
2614 
2615 static void
2616 witness_hash_put(struct witness *w)
2617 {
2618 	uint32_t hash;
2619 
2620 	MPASS(w != NULL);
2621 	MPASS(w->w_name != NULL);
2622 	if (witness_cold == 0)
2623 		mtx_assert(&w_mtx, MA_OWNED);
2624 	KASSERT(witness_hash_get(w->w_name) == NULL,
2625 	    ("%s: trying to add a hash entry that already exists!", __func__));
2626 	KASSERT(w->w_hash_next == NULL,
2627 	    ("%s: w->w_hash_next != NULL", __func__));
2628 
2629 	hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size;
2630 	w->w_hash_next = w_hash.wh_array[hash];
2631 	w_hash.wh_array[hash] = w;
2632 	w_hash.wh_count++;
2633 }
2634 
2635 
2636 static struct witness_lock_order_data *
2637 witness_lock_order_get(struct witness *parent, struct witness *child)
2638 {
2639 	struct witness_lock_order_data *data = NULL;
2640 	struct witness_lock_order_key key;
2641 	unsigned int hash;
2642 
2643 	MPASS(parent != NULL && child != NULL);
2644 	key.from = parent->w_index;
2645 	key.to = child->w_index;
2646 	WITNESS_INDEX_ASSERT(key.from);
2647 	WITNESS_INDEX_ASSERT(key.to);
2648 	if ((w_rmatrix[parent->w_index][child->w_index]
2649 	    & WITNESS_LOCK_ORDER_KNOWN) == 0)
2650 		goto out;
2651 
2652 	hash = witness_hash_djb2((const char*)&key,
2653 	    sizeof(key)) % w_lohash.wloh_size;
2654 	data = w_lohash.wloh_array[hash];
2655 	while (data != NULL) {
2656 		if (witness_lock_order_key_equal(&data->wlod_key, &key))
2657 			break;
2658 		data = data->wlod_next;
2659 	}
2660 
2661 out:
2662 	return (data);
2663 }
2664 
2665 /*
2666  * Verify that parent and child have a known relationship, are not the same,
2667  * and child is actually a child of parent.  This is done without w_mtx
2668  * to avoid contention in the common case.
2669  */
2670 static int
2671 witness_lock_order_check(struct witness *parent, struct witness *child)
2672 {
2673 
2674 	if (parent != child &&
2675 	    w_rmatrix[parent->w_index][child->w_index]
2676 	    & WITNESS_LOCK_ORDER_KNOWN &&
2677 	    isitmychild(parent, child))
2678 		return (1);
2679 
2680 	return (0);
2681 }
2682 
2683 static int
2684 witness_lock_order_add(struct witness *parent, struct witness *child)
2685 {
2686 	struct witness_lock_order_data *data = NULL;
2687 	struct witness_lock_order_key key;
2688 	unsigned int hash;
2689 
2690 	MPASS(parent != NULL && child != NULL);
2691 	key.from = parent->w_index;
2692 	key.to = child->w_index;
2693 	WITNESS_INDEX_ASSERT(key.from);
2694 	WITNESS_INDEX_ASSERT(key.to);
2695 	if (w_rmatrix[parent->w_index][child->w_index]
2696 	    & WITNESS_LOCK_ORDER_KNOWN)
2697 		return (1);
2698 
2699 	hash = witness_hash_djb2((const char*)&key,
2700 	    sizeof(key)) % w_lohash.wloh_size;
2701 	w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
2702 	data = w_lofree;
2703 	if (data == NULL)
2704 		return (0);
2705 	w_lofree = data->wlod_next;
2706 	data->wlod_next = w_lohash.wloh_array[hash];
2707 	data->wlod_key = key;
2708 	w_lohash.wloh_array[hash] = data;
2709 	w_lohash.wloh_count++;
2710 	stack_zero(&data->wlod_stack);
2711 	stack_save(&data->wlod_stack);
2712 	return (1);
2713 }
2714 
2715 /* Call this whenver the structure of the witness graph changes. */
2716 static void
2717 witness_increment_graph_generation(void)
2718 {
2719 
2720 	if (witness_cold == 0)
2721 		mtx_assert(&w_mtx, MA_OWNED);
2722 	w_generation++;
2723 }
2724 
2725 #ifdef KDB
2726 static void
2727 _witness_debugger(int cond, const char *msg)
2728 {
2729 
2730 	if (witness_trace && cond)
2731 		kdb_backtrace();
2732 	if (witness_kdb && cond)
2733 		kdb_enter(KDB_WHY_WITNESS, msg);
2734 }
2735 #endif
2736