xref: /freebsd/sys/kern/subr_witness.c (revision aa64588d28258aef88cc33b8043112e8856948d0)
1 /*-
2  * Copyright (c) 2008 Isilon Systems, Inc.
3  * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
4  * Copyright (c) 1998 Berkeley Software Design, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Berkeley Software Design Inc's name may not be used to endorse or
16  *    promote products derived from this software without specific prior
17  *    written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
32  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33  */
34 
35 /*
36  * Implementation of the `witness' lock verifier.  Originally implemented for
37  * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
38  * classes in FreeBSD.
39  */
40 
41 /*
42  *	Main Entry: witness
43  *	Pronunciation: 'wit-n&s
44  *	Function: noun
45  *	Etymology: Middle English witnesse, from Old English witnes knowledge,
46  *	    testimony, witness, from 2wit
47  *	Date: before 12th century
48  *	1 : attestation of a fact or event : TESTIMONY
49  *	2 : one that gives evidence; specifically : one who testifies in
50  *	    a cause or before a judicial tribunal
51  *	3 : one asked to be present at a transaction so as to be able to
52  *	    testify to its having taken place
53  *	4 : one who has personal knowledge of something
54  *	5 a : something serving as evidence or proof : SIGN
55  *	  b : public affirmation by word or example of usually
56  *	      religious faith or conviction <the heroic witness to divine
57  *	      life -- Pilot>
58  *	6 capitalized : a member of the Jehovah's Witnesses
59  */
60 
61 /*
62  * Special rules concerning Giant and lock orders:
63  *
64  * 1) Giant must be acquired before any other mutexes.  Stated another way,
65  *    no other mutex may be held when Giant is acquired.
66  *
67  * 2) Giant must be released when blocking on a sleepable lock.
68  *
69  * This rule is less obvious, but is a result of Giant providing the same
70  * semantics as spl().  Basically, when a thread sleeps, it must release
71  * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
72  * 2).
73  *
74  * 3) Giant may be acquired before or after sleepable locks.
75  *
76  * This rule is also not quite as obvious.  Giant may be acquired after
77  * a sleepable lock because it is a non-sleepable lock and non-sleepable
78  * locks may always be acquired while holding a sleepable lock.  The second
79  * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
80  * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
81  * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
82  * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
83  * execute.  Thus, acquiring Giant both before and after a sleepable lock
84  * will not result in a lock order reversal.
85  */
86 
87 #include <sys/cdefs.h>
88 __FBSDID("$FreeBSD$");
89 
90 #include "opt_ddb.h"
91 #include "opt_hwpmc_hooks.h"
92 #include "opt_stack.h"
93 #include "opt_witness.h"
94 
95 #include <sys/param.h>
96 #include <sys/bus.h>
97 #include <sys/kdb.h>
98 #include <sys/kernel.h>
99 #include <sys/ktr.h>
100 #include <sys/lock.h>
101 #include <sys/malloc.h>
102 #include <sys/mutex.h>
103 #include <sys/priv.h>
104 #include <sys/proc.h>
105 #include <sys/sbuf.h>
106 #include <sys/sched.h>
107 #include <sys/stack.h>
108 #include <sys/sysctl.h>
109 #include <sys/systm.h>
110 
111 #ifdef DDB
112 #include <ddb/ddb.h>
113 #endif
114 
115 #include <machine/stdarg.h>
116 
117 #if !defined(DDB) && !defined(STACK)
118 #error "DDB or STACK options are required for WITNESS"
119 #endif
120 
121 /* Note that these traces do not work with KTR_ALQ. */
122 #if 0
123 #define	KTR_WITNESS	KTR_SUBSYS
124 #else
125 #define	KTR_WITNESS	0
126 #endif
127 
128 #define	LI_RECURSEMASK	0x0000ffff	/* Recursion depth of lock instance. */
129 #define	LI_EXCLUSIVE	0x00010000	/* Exclusive lock instance. */
130 #define	LI_NORELEASE	0x00020000	/* Lock not allowed to be released. */
131 
132 /* Define this to check for blessed mutexes */
133 #undef BLESSING
134 
135 #define	WITNESS_COUNT 		1024
136 #define	WITNESS_CHILDCOUNT 	(WITNESS_COUNT * 4)
137 #define	WITNESS_HASH_SIZE	251	/* Prime, gives load factor < 2 */
138 #define	WITNESS_PENDLIST	512
139 
140 /* Allocate 256 KB of stack data space */
141 #define	WITNESS_LO_DATA_COUNT	2048
142 
143 /* Prime, gives load factor of ~2 at full load */
144 #define	WITNESS_LO_HASH_SIZE	1021
145 
146 /*
147  * XXX: This is somewhat bogus, as we assume here that at most 2048 threads
148  * will hold LOCK_NCHILDREN locks.  We handle failure ok, and we should
149  * probably be safe for the most part, but it's still a SWAG.
150  */
151 #define	LOCK_NCHILDREN	5
152 #define	LOCK_CHILDCOUNT	2048
153 
154 #define	MAX_W_NAME	64
155 
156 #define	BADSTACK_SBUF_SIZE	(256 * WITNESS_COUNT)
157 #define	CYCLEGRAPH_SBUF_SIZE	8192
158 #define	FULLGRAPH_SBUF_SIZE	32768
159 
160 /*
161  * These flags go in the witness relationship matrix and describe the
162  * relationship between any two struct witness objects.
163  */
164 #define	WITNESS_UNRELATED        0x00    /* No lock order relation. */
165 #define	WITNESS_PARENT           0x01    /* Parent, aka direct ancestor. */
166 #define	WITNESS_ANCESTOR         0x02    /* Direct or indirect ancestor. */
167 #define	WITNESS_CHILD            0x04    /* Child, aka direct descendant. */
168 #define	WITNESS_DESCENDANT       0x08    /* Direct or indirect descendant. */
169 #define	WITNESS_ANCESTOR_MASK    (WITNESS_PARENT | WITNESS_ANCESTOR)
170 #define	WITNESS_DESCENDANT_MASK  (WITNESS_CHILD | WITNESS_DESCENDANT)
171 #define	WITNESS_RELATED_MASK						\
172 	(WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
173 #define	WITNESS_REVERSAL         0x10    /* A lock order reversal has been
174 					  * observed. */
175 #define	WITNESS_RESERVED1        0x20    /* Unused flag, reserved. */
176 #define	WITNESS_RESERVED2        0x40    /* Unused flag, reserved. */
177 #define	WITNESS_LOCK_ORDER_KNOWN 0x80    /* This lock order is known. */
178 
179 /* Descendant to ancestor flags */
180 #define	WITNESS_DTOA(x)	(((x) & WITNESS_RELATED_MASK) >> 2)
181 
182 /* Ancestor to descendant flags */
183 #define	WITNESS_ATOD(x)	(((x) & WITNESS_RELATED_MASK) << 2)
184 
185 #define	WITNESS_INDEX_ASSERT(i)						\
186 	MPASS((i) > 0 && (i) <= w_max_used_index && (i) < WITNESS_COUNT)
187 
188 MALLOC_DEFINE(M_WITNESS, "Witness", "Witness");
189 
190 /*
191  * Lock instances.  A lock instance is the data associated with a lock while
192  * it is held by witness.  For example, a lock instance will hold the
193  * recursion count of a lock.  Lock instances are held in lists.  Spin locks
194  * are held in a per-cpu list while sleep locks are held in per-thread list.
195  */
196 struct lock_instance {
197 	struct lock_object	*li_lock;
198 	const char		*li_file;
199 	int			li_line;
200 	u_int			li_flags;
201 };
202 
203 /*
204  * A simple list type used to build the list of locks held by a thread
205  * or CPU.  We can't simply embed the list in struct lock_object since a
206  * lock may be held by more than one thread if it is a shared lock.  Locks
207  * are added to the head of the list, so we fill up each list entry from
208  * "the back" logically.  To ease some of the arithmetic, we actually fill
209  * in each list entry the normal way (children[0] then children[1], etc.) but
210  * when we traverse the list we read children[count-1] as the first entry
211  * down to children[0] as the final entry.
212  */
213 struct lock_list_entry {
214 	struct lock_list_entry	*ll_next;
215 	struct lock_instance	ll_children[LOCK_NCHILDREN];
216 	u_int			ll_count;
217 };
218 
219 /*
220  * The main witness structure. One of these per named lock type in the system
221  * (for example, "vnode interlock").
222  */
223 struct witness {
224 	char  			w_name[MAX_W_NAME];
225 	uint32_t 		w_index;  /* Index in the relationship matrix */
226 	struct lock_class	*w_class;
227 	STAILQ_ENTRY(witness) 	w_list;		/* List of all witnesses. */
228 	STAILQ_ENTRY(witness) 	w_typelist;	/* Witnesses of a type. */
229 	struct witness		*w_hash_next; /* Linked list in hash buckets. */
230 	const char		*w_file; /* File where last acquired */
231 	uint32_t 		w_line; /* Line where last acquired */
232 	uint32_t 		w_refcount;
233 	uint16_t 		w_num_ancestors; /* direct/indirect
234 						  * ancestor count */
235 	uint16_t 		w_num_descendants; /* direct/indirect
236 						    * descendant count */
237 	int16_t 		w_ddb_level;
238 	unsigned		w_displayed:1;
239 	unsigned		w_reversed:1;
240 };
241 
242 STAILQ_HEAD(witness_list, witness);
243 
244 /*
245  * The witness hash table. Keys are witness names (const char *), elements are
246  * witness objects (struct witness *).
247  */
248 struct witness_hash {
249 	struct witness	*wh_array[WITNESS_HASH_SIZE];
250 	uint32_t	wh_size;
251 	uint32_t	wh_count;
252 };
253 
254 /*
255  * Key type for the lock order data hash table.
256  */
257 struct witness_lock_order_key {
258 	uint16_t	from;
259 	uint16_t	to;
260 };
261 
262 struct witness_lock_order_data {
263 	struct stack			wlod_stack;
264 	struct witness_lock_order_key	wlod_key;
265 	struct witness_lock_order_data	*wlod_next;
266 };
267 
268 /*
269  * The witness lock order data hash table. Keys are witness index tuples
270  * (struct witness_lock_order_key), elements are lock order data objects
271  * (struct witness_lock_order_data).
272  */
273 struct witness_lock_order_hash {
274 	struct witness_lock_order_data	*wloh_array[WITNESS_LO_HASH_SIZE];
275 	u_int	wloh_size;
276 	u_int	wloh_count;
277 };
278 
279 #ifdef BLESSING
280 struct witness_blessed {
281 	const char	*b_lock1;
282 	const char	*b_lock2;
283 };
284 #endif
285 
286 struct witness_pendhelp {
287 	const char		*wh_type;
288 	struct lock_object	*wh_lock;
289 };
290 
291 struct witness_order_list_entry {
292 	const char		*w_name;
293 	struct lock_class	*w_class;
294 };
295 
296 /*
297  * Returns 0 if one of the locks is a spin lock and the other is not.
298  * Returns 1 otherwise.
299  */
300 static __inline int
301 witness_lock_type_equal(struct witness *w1, struct witness *w2)
302 {
303 
304 	return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
305 		(w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
306 }
307 
308 static __inline int
309 witness_lock_order_key_empty(const struct witness_lock_order_key *key)
310 {
311 
312 	return (key->from == 0 && key->to == 0);
313 }
314 
315 static __inline int
316 witness_lock_order_key_equal(const struct witness_lock_order_key *a,
317     const struct witness_lock_order_key *b)
318 {
319 
320 	return (a->from == b->from && a->to == b->to);
321 }
322 
323 static int	_isitmyx(struct witness *w1, struct witness *w2, int rmask,
324 		    const char *fname);
325 #ifdef KDB
326 static void	_witness_debugger(int cond, const char *msg);
327 #endif
328 static void	adopt(struct witness *parent, struct witness *child);
329 #ifdef BLESSING
330 static int	blessed(struct witness *, struct witness *);
331 #endif
332 static void	depart(struct witness *w);
333 static struct witness	*enroll(const char *description,
334 			    struct lock_class *lock_class);
335 static struct lock_instance	*find_instance(struct lock_list_entry *list,
336 				    struct lock_object *lock);
337 static int	isitmychild(struct witness *parent, struct witness *child);
338 static int	isitmydescendant(struct witness *parent, struct witness *child);
339 static void	itismychild(struct witness *parent, struct witness *child);
340 static int	sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS);
341 static int	sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
342 static int	sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS);
343 static void	witness_add_fullgraph(struct sbuf *sb, struct witness *parent);
344 #ifdef DDB
345 static void	witness_ddb_compute_levels(void);
346 static void	witness_ddb_display(int(*)(const char *fmt, ...));
347 static void	witness_ddb_display_descendants(int(*)(const char *fmt, ...),
348 		    struct witness *, int indent);
349 static void	witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
350 		    struct witness_list *list);
351 static void	witness_ddb_level_descendants(struct witness *parent, int l);
352 static void	witness_ddb_list(struct thread *td);
353 #endif
354 static void	witness_free(struct witness *m);
355 static struct witness	*witness_get(void);
356 static uint32_t	witness_hash_djb2(const uint8_t *key, uint32_t size);
357 static struct witness	*witness_hash_get(const char *key);
358 static void	witness_hash_put(struct witness *w);
359 static void	witness_init_hash_tables(void);
360 static void	witness_increment_graph_generation(void);
361 static void	witness_lock_list_free(struct lock_list_entry *lle);
362 static struct lock_list_entry	*witness_lock_list_get(void);
363 static int	witness_lock_order_add(struct witness *parent,
364 		    struct witness *child);
365 static int	witness_lock_order_check(struct witness *parent,
366 		    struct witness *child);
367 static struct witness_lock_order_data	*witness_lock_order_get(
368 					    struct witness *parent,
369 					    struct witness *child);
370 static void	witness_list_lock(struct lock_instance *instance,
371 		    int (*prnt)(const char *fmt, ...));
372 static void	witness_setflag(struct lock_object *lock, int flag, int set);
373 
374 #ifdef KDB
375 #define	witness_debugger(c)	_witness_debugger(c, __func__)
376 #else
377 #define	witness_debugger(c)
378 #endif
379 
380 SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL, "Witness Locking");
381 
382 /*
383  * If set to 0, lock order checking is disabled.  If set to -1,
384  * witness is completely disabled.  Otherwise witness performs full
385  * lock order checking for all locks.  At runtime, lock order checking
386  * may be toggled.  However, witness cannot be reenabled once it is
387  * completely disabled.
388  */
389 static int witness_watch = 1;
390 TUNABLE_INT("debug.witness.watch", &witness_watch);
391 SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
392     sysctl_debug_witness_watch, "I", "witness is watching lock operations");
393 
394 #ifdef KDB
395 /*
396  * When KDB is enabled and witness_kdb is 1, it will cause the system
397  * to drop into kdebug() when:
398  *	- a lock hierarchy violation occurs
399  *	- locks are held when going to sleep.
400  */
401 #ifdef WITNESS_KDB
402 int	witness_kdb = 1;
403 #else
404 int	witness_kdb = 0;
405 #endif
406 TUNABLE_INT("debug.witness.kdb", &witness_kdb);
407 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
408 
409 /*
410  * When KDB is enabled and witness_trace is 1, it will cause the system
411  * to print a stack trace:
412  *	- a lock hierarchy violation occurs
413  *	- locks are held when going to sleep.
414  */
415 int	witness_trace = 1;
416 TUNABLE_INT("debug.witness.trace", &witness_trace);
417 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
418 #endif /* KDB */
419 
420 #ifdef WITNESS_SKIPSPIN
421 int	witness_skipspin = 1;
422 #else
423 int	witness_skipspin = 0;
424 #endif
425 TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
426 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin,
427     0, "");
428 
429 /*
430  * Call this to print out the relations between locks.
431  */
432 SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD,
433     NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs");
434 
435 /*
436  * Call this to print out the witness faulty stacks.
437  */
438 SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD,
439     NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks");
440 
441 static struct mtx w_mtx;
442 
443 /* w_list */
444 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
445 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
446 
447 /* w_typelist */
448 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
449 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
450 
451 /* lock list */
452 static struct lock_list_entry *w_lock_list_free = NULL;
453 static struct witness_pendhelp pending_locks[WITNESS_PENDLIST];
454 static u_int pending_cnt;
455 
456 static int w_free_cnt, w_spin_cnt, w_sleep_cnt;
457 SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
458 SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
459 SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
460     "");
461 
462 static struct witness *w_data;
463 static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1];
464 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
465 static struct witness_hash w_hash;	/* The witness hash table. */
466 
467 /* The lock order data hash */
468 static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT];
469 static struct witness_lock_order_data *w_lofree = NULL;
470 static struct witness_lock_order_hash w_lohash;
471 static int w_max_used_index = 0;
472 static unsigned int w_generation = 0;
473 static const char w_notrunning[] = "Witness not running\n";
474 static const char w_stillcold[] = "Witness is still cold\n";
475 
476 
477 static struct witness_order_list_entry order_lists[] = {
478 	/*
479 	 * sx locks
480 	 */
481 	{ "proctree", &lock_class_sx },
482 	{ "allproc", &lock_class_sx },
483 	{ "allprison", &lock_class_sx },
484 	{ NULL, NULL },
485 	/*
486 	 * Various mutexes
487 	 */
488 	{ "Giant", &lock_class_mtx_sleep },
489 	{ "pipe mutex", &lock_class_mtx_sleep },
490 	{ "sigio lock", &lock_class_mtx_sleep },
491 	{ "process group", &lock_class_mtx_sleep },
492 	{ "process lock", &lock_class_mtx_sleep },
493 	{ "session", &lock_class_mtx_sleep },
494 	{ "uidinfo hash", &lock_class_rw },
495 #ifdef	HWPMC_HOOKS
496 	{ "pmc-sleep", &lock_class_mtx_sleep },
497 #endif
498 	{ NULL, NULL },
499 	/*
500 	 * Sockets
501 	 */
502 	{ "accept", &lock_class_mtx_sleep },
503 	{ "so_snd", &lock_class_mtx_sleep },
504 	{ "so_rcv", &lock_class_mtx_sleep },
505 	{ "sellck", &lock_class_mtx_sleep },
506 	{ NULL, NULL },
507 	/*
508 	 * Routing
509 	 */
510 	{ "so_rcv", &lock_class_mtx_sleep },
511 	{ "radix node head", &lock_class_rw },
512 	{ "rtentry", &lock_class_mtx_sleep },
513 	{ "ifaddr", &lock_class_mtx_sleep },
514 	{ NULL, NULL },
515 	/*
516 	 * IPv4 multicast:
517 	 * protocol locks before interface locks, after UDP locks.
518 	 */
519 	{ "udpinp", &lock_class_rw },
520 	{ "in_multi_mtx", &lock_class_mtx_sleep },
521 	{ "igmp_mtx", &lock_class_mtx_sleep },
522 	{ "if_addr_mtx", &lock_class_mtx_sleep },
523 	{ NULL, NULL },
524 	/*
525 	 * IPv6 multicast:
526 	 * protocol locks before interface locks, after UDP locks.
527 	 */
528 	{ "udpinp", &lock_class_rw },
529 	{ "in6_multi_mtx", &lock_class_mtx_sleep },
530 	{ "mld_mtx", &lock_class_mtx_sleep },
531 	{ "if_addr_mtx", &lock_class_mtx_sleep },
532 	{ NULL, NULL },
533 	/*
534 	 * UNIX Domain Sockets
535 	 */
536 	{ "unp_global_rwlock", &lock_class_rw },
537 	{ "unp_list_lock", &lock_class_mtx_sleep },
538 	{ "unp", &lock_class_mtx_sleep },
539 	{ "so_snd", &lock_class_mtx_sleep },
540 	{ NULL, NULL },
541 	/*
542 	 * UDP/IP
543 	 */
544 	{ "udp", &lock_class_rw },
545 	{ "udpinp", &lock_class_rw },
546 	{ "so_snd", &lock_class_mtx_sleep },
547 	{ NULL, NULL },
548 	/*
549 	 * TCP/IP
550 	 */
551 	{ "tcp", &lock_class_rw },
552 	{ "tcpinp", &lock_class_rw },
553 	{ "so_snd", &lock_class_mtx_sleep },
554 	{ NULL, NULL },
555 	/*
556 	 * netatalk
557 	 */
558 	{ "ddp_list_mtx", &lock_class_mtx_sleep },
559 	{ "ddp_mtx", &lock_class_mtx_sleep },
560 	{ NULL, NULL },
561 	/*
562 	 * BPF
563 	 */
564 	{ "bpf global lock", &lock_class_mtx_sleep },
565 	{ "bpf interface lock", &lock_class_mtx_sleep },
566 	{ "bpf cdev lock", &lock_class_mtx_sleep },
567 	{ NULL, NULL },
568 	/*
569 	 * NFS server
570 	 */
571 	{ "nfsd_mtx", &lock_class_mtx_sleep },
572 	{ "so_snd", &lock_class_mtx_sleep },
573 	{ NULL, NULL },
574 
575 	/*
576 	 * IEEE 802.11
577 	 */
578 	{ "802.11 com lock", &lock_class_mtx_sleep},
579 	{ NULL, NULL },
580 	/*
581 	 * Network drivers
582 	 */
583 	{ "network driver", &lock_class_mtx_sleep},
584 	{ NULL, NULL },
585 
586 	/*
587 	 * Netgraph
588 	 */
589 	{ "ng_node", &lock_class_mtx_sleep },
590 	{ "ng_worklist", &lock_class_mtx_sleep },
591 	{ NULL, NULL },
592 	/*
593 	 * CDEV
594 	 */
595 	{ "system map", &lock_class_mtx_sleep },
596 	{ "vm page queue mutex", &lock_class_mtx_sleep },
597 	{ "vnode interlock", &lock_class_mtx_sleep },
598 	{ "cdev", &lock_class_mtx_sleep },
599 	{ NULL, NULL },
600 	/*
601 	 * VM
602 	 *
603 	 */
604 	{ "vm object", &lock_class_mtx_sleep },
605 	{ "page lock", &lock_class_mtx_sleep },
606 	{ "vm page queue mutex", &lock_class_mtx_sleep },
607 	{ "pmap", &lock_class_mtx_sleep },
608 	{ NULL, NULL },
609 	/*
610 	 * kqueue/VFS interaction
611 	 */
612 	{ "kqueue", &lock_class_mtx_sleep },
613 	{ "struct mount mtx", &lock_class_mtx_sleep },
614 	{ "vnode interlock", &lock_class_mtx_sleep },
615 	{ NULL, NULL },
616 	/*
617 	 * ZFS locking
618 	 */
619 	{ "dn->dn_mtx", &lock_class_sx },
620 	{ "dr->dt.di.dr_mtx", &lock_class_sx },
621 	{ "db->db_mtx", &lock_class_sx },
622 	{ NULL, NULL },
623 	/*
624 	 * spin locks
625 	 */
626 #ifdef SMP
627 	{ "ap boot", &lock_class_mtx_spin },
628 #endif
629 	{ "rm.mutex_mtx", &lock_class_mtx_spin },
630 	{ "sio", &lock_class_mtx_spin },
631 	{ "scrlock", &lock_class_mtx_spin },
632 #ifdef __i386__
633 	{ "cy", &lock_class_mtx_spin },
634 #endif
635 #ifdef __sparc64__
636 	{ "pcib_mtx", &lock_class_mtx_spin },
637 	{ "rtc_mtx", &lock_class_mtx_spin },
638 #endif
639 	{ "scc_hwmtx", &lock_class_mtx_spin },
640 	{ "uart_hwmtx", &lock_class_mtx_spin },
641 	{ "fast_taskqueue", &lock_class_mtx_spin },
642 	{ "intr table", &lock_class_mtx_spin },
643 #ifdef	HWPMC_HOOKS
644 	{ "pmc-per-proc", &lock_class_mtx_spin },
645 #endif
646 	{ "process slock", &lock_class_mtx_spin },
647 	{ "sleepq chain", &lock_class_mtx_spin },
648 	{ "umtx lock", &lock_class_mtx_spin },
649 	{ "rm_spinlock", &lock_class_mtx_spin },
650 	{ "turnstile chain", &lock_class_mtx_spin },
651 	{ "turnstile lock", &lock_class_mtx_spin },
652 	{ "sched lock", &lock_class_mtx_spin },
653 	{ "td_contested", &lock_class_mtx_spin },
654 	{ "callout", &lock_class_mtx_spin },
655 	{ "entropy harvest mutex", &lock_class_mtx_spin },
656 	{ "syscons video lock", &lock_class_mtx_spin },
657 	{ "time lock", &lock_class_mtx_spin },
658 #ifdef SMP
659 	{ "smp rendezvous", &lock_class_mtx_spin },
660 #endif
661 #ifdef __powerpc__
662 	{ "tlb0", &lock_class_mtx_spin },
663 #endif
664 	/*
665 	 * leaf locks
666 	 */
667 	{ "intrcnt", &lock_class_mtx_spin },
668 	{ "icu", &lock_class_mtx_spin },
669 #if defined(SMP) && defined(__sparc64__)
670 	{ "ipi", &lock_class_mtx_spin },
671 #endif
672 #ifdef __i386__
673 	{ "allpmaps", &lock_class_mtx_spin },
674 	{ "descriptor tables", &lock_class_mtx_spin },
675 #endif
676 	{ "clk", &lock_class_mtx_spin },
677 	{ "cpuset", &lock_class_mtx_spin },
678 	{ "mprof lock", &lock_class_mtx_spin },
679 	{ "zombie lock", &lock_class_mtx_spin },
680 	{ "ALD Queue", &lock_class_mtx_spin },
681 #ifdef __ia64__
682 	{ "MCA spin lock", &lock_class_mtx_spin },
683 #endif
684 #if defined(__i386__) || defined(__amd64__)
685 	{ "pcicfg", &lock_class_mtx_spin },
686 	{ "NDIS thread lock", &lock_class_mtx_spin },
687 #endif
688 	{ "tw_osl_io_lock", &lock_class_mtx_spin },
689 	{ "tw_osl_q_lock", &lock_class_mtx_spin },
690 	{ "tw_cl_io_lock", &lock_class_mtx_spin },
691 	{ "tw_cl_intr_lock", &lock_class_mtx_spin },
692 	{ "tw_cl_gen_lock", &lock_class_mtx_spin },
693 #ifdef	HWPMC_HOOKS
694 	{ "pmc-leaf", &lock_class_mtx_spin },
695 #endif
696 	{ "blocked lock", &lock_class_mtx_spin },
697 	{ NULL, NULL },
698 	{ NULL, NULL }
699 };
700 
701 #ifdef BLESSING
702 /*
703  * Pairs of locks which have been blessed
704  * Don't complain about order problems with blessed locks
705  */
706 static struct witness_blessed blessed_list[] = {
707 };
708 static int blessed_count =
709 	sizeof(blessed_list) / sizeof(struct witness_blessed);
710 #endif
711 
712 /*
713  * This global is set to 0 once it becomes safe to use the witness code.
714  */
715 static int witness_cold = 1;
716 
717 /*
718  * This global is set to 1 once the static lock orders have been enrolled
719  * so that a warning can be issued for any spin locks enrolled later.
720  */
721 static int witness_spin_warn = 0;
722 
723 /*
724  * The WITNESS-enabled diagnostic code.  Note that the witness code does
725  * assume that the early boot is single-threaded at least until after this
726  * routine is completed.
727  */
728 static void
729 witness_initialize(void *dummy __unused)
730 {
731 	struct lock_object *lock;
732 	struct witness_order_list_entry *order;
733 	struct witness *w, *w1;
734 	int i;
735 
736 	w_data = malloc(sizeof (struct witness) * WITNESS_COUNT, M_WITNESS,
737 	    M_NOWAIT | M_ZERO);
738 
739 	/*
740 	 * We have to release Giant before initializing its witness
741 	 * structure so that WITNESS doesn't get confused.
742 	 */
743 	mtx_unlock(&Giant);
744 	mtx_assert(&Giant, MA_NOTOWNED);
745 
746 	CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
747 	mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
748 	    MTX_NOWITNESS | MTX_NOPROFILE);
749 	for (i = WITNESS_COUNT - 1; i >= 0; i--) {
750 		w = &w_data[i];
751 		memset(w, 0, sizeof(*w));
752 		w_data[i].w_index = i;	/* Witness index never changes. */
753 		witness_free(w);
754 	}
755 	KASSERT(STAILQ_FIRST(&w_free)->w_index == 0,
756 	    ("%s: Invalid list of free witness objects", __func__));
757 
758 	/* Witness with index 0 is not used to aid in debugging. */
759 	STAILQ_REMOVE_HEAD(&w_free, w_list);
760 	w_free_cnt--;
761 
762 	memset(w_rmatrix, 0,
763 	    (sizeof(**w_rmatrix) * (WITNESS_COUNT+1) * (WITNESS_COUNT+1)));
764 
765 	for (i = 0; i < LOCK_CHILDCOUNT; i++)
766 		witness_lock_list_free(&w_locklistdata[i]);
767 	witness_init_hash_tables();
768 
769 	/* First add in all the specified order lists. */
770 	for (order = order_lists; order->w_name != NULL; order++) {
771 		w = enroll(order->w_name, order->w_class);
772 		if (w == NULL)
773 			continue;
774 		w->w_file = "order list";
775 		for (order++; order->w_name != NULL; order++) {
776 			w1 = enroll(order->w_name, order->w_class);
777 			if (w1 == NULL)
778 				continue;
779 			w1->w_file = "order list";
780 			itismychild(w, w1);
781 			w = w1;
782 		}
783 	}
784 	witness_spin_warn = 1;
785 
786 	/* Iterate through all locks and add them to witness. */
787 	for (i = 0; pending_locks[i].wh_lock != NULL; i++) {
788 		lock = pending_locks[i].wh_lock;
789 		KASSERT(lock->lo_flags & LO_WITNESS,
790 		    ("%s: lock %s is on pending list but not LO_WITNESS",
791 		    __func__, lock->lo_name));
792 		lock->lo_witness = enroll(pending_locks[i].wh_type,
793 		    LOCK_CLASS(lock));
794 	}
795 
796 	/* Mark the witness code as being ready for use. */
797 	witness_cold = 0;
798 
799 	mtx_lock(&Giant);
800 }
801 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize,
802     NULL);
803 
804 void
805 witness_init(struct lock_object *lock, const char *type)
806 {
807 	struct lock_class *class;
808 
809 	/* Various sanity checks. */
810 	class = LOCK_CLASS(lock);
811 	if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
812 	    (class->lc_flags & LC_RECURSABLE) == 0)
813 		panic("%s: lock (%s) %s can not be recursable", __func__,
814 		    class->lc_name, lock->lo_name);
815 	if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
816 	    (class->lc_flags & LC_SLEEPABLE) == 0)
817 		panic("%s: lock (%s) %s can not be sleepable", __func__,
818 		    class->lc_name, lock->lo_name);
819 	if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
820 	    (class->lc_flags & LC_UPGRADABLE) == 0)
821 		panic("%s: lock (%s) %s can not be upgradable", __func__,
822 		    class->lc_name, lock->lo_name);
823 
824 	/*
825 	 * If we shouldn't watch this lock, then just clear lo_witness.
826 	 * Otherwise, if witness_cold is set, then it is too early to
827 	 * enroll this lock, so defer it to witness_initialize() by adding
828 	 * it to the pending_locks list.  If it is not too early, then enroll
829 	 * the lock now.
830 	 */
831 	if (witness_watch < 1 || panicstr != NULL ||
832 	    (lock->lo_flags & LO_WITNESS) == 0)
833 		lock->lo_witness = NULL;
834 	else if (witness_cold) {
835 		pending_locks[pending_cnt].wh_lock = lock;
836 		pending_locks[pending_cnt++].wh_type = type;
837 		if (pending_cnt > WITNESS_PENDLIST)
838 			panic("%s: pending locks list is too small, bump it\n",
839 			    __func__);
840 	} else
841 		lock->lo_witness = enroll(type, class);
842 }
843 
844 void
845 witness_destroy(struct lock_object *lock)
846 {
847 	struct lock_class *class;
848 	struct witness *w;
849 
850 	class = LOCK_CLASS(lock);
851 
852 	if (witness_cold)
853 		panic("lock (%s) %s destroyed while witness_cold",
854 		    class->lc_name, lock->lo_name);
855 
856 	/* XXX: need to verify that no one holds the lock */
857 	if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL)
858 		return;
859 	w = lock->lo_witness;
860 
861 	mtx_lock_spin(&w_mtx);
862 	MPASS(w->w_refcount > 0);
863 	w->w_refcount--;
864 
865 	if (w->w_refcount == 0)
866 		depart(w);
867 	mtx_unlock_spin(&w_mtx);
868 }
869 
870 #ifdef DDB
871 static void
872 witness_ddb_compute_levels(void)
873 {
874 	struct witness *w;
875 
876 	/*
877 	 * First clear all levels.
878 	 */
879 	STAILQ_FOREACH(w, &w_all, w_list)
880 		w->w_ddb_level = -1;
881 
882 	/*
883 	 * Look for locks with no parents and level all their descendants.
884 	 */
885 	STAILQ_FOREACH(w, &w_all, w_list) {
886 
887 		/* If the witness has ancestors (is not a root), skip it. */
888 		if (w->w_num_ancestors > 0)
889 			continue;
890 		witness_ddb_level_descendants(w, 0);
891 	}
892 }
893 
894 static void
895 witness_ddb_level_descendants(struct witness *w, int l)
896 {
897 	int i;
898 
899 	if (w->w_ddb_level >= l)
900 		return;
901 
902 	w->w_ddb_level = l;
903 	l++;
904 
905 	for (i = 1; i <= w_max_used_index; i++) {
906 		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
907 			witness_ddb_level_descendants(&w_data[i], l);
908 	}
909 }
910 
911 static void
912 witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...),
913     struct witness *w, int indent)
914 {
915 	int i;
916 
917  	for (i = 0; i < indent; i++)
918  		prnt(" ");
919 	prnt("%s (type: %s, depth: %d, active refs: %d)",
920 	     w->w_name, w->w_class->lc_name,
921 	     w->w_ddb_level, w->w_refcount);
922  	if (w->w_displayed) {
923  		prnt(" -- (already displayed)\n");
924  		return;
925  	}
926  	w->w_displayed = 1;
927 	if (w->w_file != NULL && w->w_line != 0)
928 		prnt(" -- last acquired @ %s:%d\n", w->w_file,
929 		    w->w_line);
930 	else
931 		prnt(" -- never acquired\n");
932 	indent++;
933 	WITNESS_INDEX_ASSERT(w->w_index);
934 	for (i = 1; i <= w_max_used_index; i++) {
935 		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
936 			witness_ddb_display_descendants(prnt, &w_data[i],
937 			    indent);
938 	}
939 }
940 
941 static void
942 witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
943     struct witness_list *list)
944 {
945 	struct witness *w;
946 
947 	STAILQ_FOREACH(w, list, w_typelist) {
948 		if (w->w_file == NULL || w->w_ddb_level > 0)
949 			continue;
950 
951 		/* This lock has no anscestors - display its descendants. */
952 		witness_ddb_display_descendants(prnt, w, 0);
953 	}
954 }
955 
956 static void
957 witness_ddb_display(int(*prnt)(const char *fmt, ...))
958 {
959 	struct witness *w;
960 
961 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
962 	witness_ddb_compute_levels();
963 
964 	/* Clear all the displayed flags. */
965 	STAILQ_FOREACH(w, &w_all, w_list)
966 		w->w_displayed = 0;
967 
968 	/*
969 	 * First, handle sleep locks which have been acquired at least
970 	 * once.
971 	 */
972 	prnt("Sleep locks:\n");
973 	witness_ddb_display_list(prnt, &w_sleep);
974 
975 	/*
976 	 * Now do spin locks which have been acquired at least once.
977 	 */
978 	prnt("\nSpin locks:\n");
979 	witness_ddb_display_list(prnt, &w_spin);
980 
981 	/*
982 	 * Finally, any locks which have not been acquired yet.
983 	 */
984 	prnt("\nLocks which were never acquired:\n");
985 	STAILQ_FOREACH(w, &w_all, w_list) {
986 		if (w->w_file != NULL || w->w_refcount == 0)
987 			continue;
988 		prnt("%s (type: %s, depth: %d)\n", w->w_name,
989 		    w->w_class->lc_name, w->w_ddb_level);
990 	}
991 }
992 #endif /* DDB */
993 
994 /* Trim useless garbage from filenames. */
995 static const char *
996 fixup_filename(const char *file)
997 {
998 
999 	if (file == NULL)
1000 		return (NULL);
1001 	while (strncmp(file, "../", 3) == 0)
1002 		file += 3;
1003 	return (file);
1004 }
1005 
1006 int
1007 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
1008 {
1009 
1010 	if (witness_watch == -1 || panicstr != NULL)
1011 		return (0);
1012 
1013 	/* Require locks that witness knows about. */
1014 	if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
1015 	    lock2->lo_witness == NULL)
1016 		return (EINVAL);
1017 
1018 	mtx_assert(&w_mtx, MA_NOTOWNED);
1019 	mtx_lock_spin(&w_mtx);
1020 
1021 	/*
1022 	 * If we already have either an explicit or implied lock order that
1023 	 * is the other way around, then return an error.
1024 	 */
1025 	if (witness_watch &&
1026 	    isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
1027 		mtx_unlock_spin(&w_mtx);
1028 		return (EDOOFUS);
1029 	}
1030 
1031 	/* Try to add the new order. */
1032 	CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1033 	    lock2->lo_witness->w_name, lock1->lo_witness->w_name);
1034 	itismychild(lock1->lo_witness, lock2->lo_witness);
1035 	mtx_unlock_spin(&w_mtx);
1036 	return (0);
1037 }
1038 
1039 void
1040 witness_checkorder(struct lock_object *lock, int flags, const char *file,
1041     int line, struct lock_object *interlock)
1042 {
1043 	struct lock_list_entry *lock_list, *lle;
1044 	struct lock_instance *lock1, *lock2, *plock;
1045 	struct lock_class *class;
1046 	struct witness *w, *w1;
1047 	struct thread *td;
1048 	int i, j;
1049 
1050 	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
1051 	    panicstr != NULL)
1052 		return;
1053 
1054 	w = lock->lo_witness;
1055 	class = LOCK_CLASS(lock);
1056 	td = curthread;
1057 	file = fixup_filename(file);
1058 
1059 	if (class->lc_flags & LC_SLEEPLOCK) {
1060 
1061 		/*
1062 		 * Since spin locks include a critical section, this check
1063 		 * implicitly enforces a lock order of all sleep locks before
1064 		 * all spin locks.
1065 		 */
1066 		if (td->td_critnest != 0 && !kdb_active)
1067 			panic("blockable sleep lock (%s) %s @ %s:%d",
1068 			    class->lc_name, lock->lo_name, file, line);
1069 
1070 		/*
1071 		 * If this is the first lock acquired then just return as
1072 		 * no order checking is needed.
1073 		 */
1074 		lock_list = td->td_sleeplocks;
1075 		if (lock_list == NULL || lock_list->ll_count == 0)
1076 			return;
1077 	} else {
1078 
1079 		/*
1080 		 * If this is the first lock, just return as no order
1081 		 * checking is needed.  Avoid problems with thread
1082 		 * migration pinning the thread while checking if
1083 		 * spinlocks are held.  If at least one spinlock is held
1084 		 * the thread is in a safe path and it is allowed to
1085 		 * unpin it.
1086 		 */
1087 		sched_pin();
1088 		lock_list = PCPU_GET(spinlocks);
1089 		if (lock_list == NULL || lock_list->ll_count == 0) {
1090 			sched_unpin();
1091 			return;
1092 		}
1093 		sched_unpin();
1094 	}
1095 
1096 	/*
1097 	 * Check to see if we are recursing on a lock we already own.  If
1098 	 * so, make sure that we don't mismatch exclusive and shared lock
1099 	 * acquires.
1100 	 */
1101 	lock1 = find_instance(lock_list, lock);
1102 	if (lock1 != NULL) {
1103 		if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
1104 		    (flags & LOP_EXCLUSIVE) == 0) {
1105 			printf("shared lock of (%s) %s @ %s:%d\n",
1106 			    class->lc_name, lock->lo_name, file, line);
1107 			printf("while exclusively locked from %s:%d\n",
1108 			    lock1->li_file, lock1->li_line);
1109 			panic("share->excl");
1110 		}
1111 		if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
1112 		    (flags & LOP_EXCLUSIVE) != 0) {
1113 			printf("exclusive lock of (%s) %s @ %s:%d\n",
1114 			    class->lc_name, lock->lo_name, file, line);
1115 			printf("while share locked from %s:%d\n",
1116 			    lock1->li_file, lock1->li_line);
1117 			panic("excl->share");
1118 		}
1119 		return;
1120 	}
1121 
1122 	/*
1123 	 * Find the previously acquired lock, but ignore interlocks.
1124 	 */
1125 	plock = &lock_list->ll_children[lock_list->ll_count - 1];
1126 	if (interlock != NULL && plock->li_lock == interlock) {
1127 		if (lock_list->ll_count > 1)
1128 			plock =
1129 			    &lock_list->ll_children[lock_list->ll_count - 2];
1130 		else {
1131 			lle = lock_list->ll_next;
1132 
1133 			/*
1134 			 * The interlock is the only lock we hold, so
1135 			 * simply return.
1136 			 */
1137 			if (lle == NULL)
1138 				return;
1139 			plock = &lle->ll_children[lle->ll_count - 1];
1140 		}
1141 	}
1142 
1143 	/*
1144 	 * Try to perform most checks without a lock.  If this succeeds we
1145 	 * can skip acquiring the lock and return success.
1146 	 */
1147 	w1 = plock->li_lock->lo_witness;
1148 	if (witness_lock_order_check(w1, w))
1149 		return;
1150 
1151 	/*
1152 	 * Check for duplicate locks of the same type.  Note that we only
1153 	 * have to check for this on the last lock we just acquired.  Any
1154 	 * other cases will be caught as lock order violations.
1155 	 */
1156 	mtx_lock_spin(&w_mtx);
1157 	witness_lock_order_add(w1, w);
1158 	if (w1 == w) {
1159 		i = w->w_index;
1160 		if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) &&
1161 		    !(w_rmatrix[i][i] & WITNESS_REVERSAL)) {
1162 		    w_rmatrix[i][i] |= WITNESS_REVERSAL;
1163 			w->w_reversed = 1;
1164 			mtx_unlock_spin(&w_mtx);
1165 			printf(
1166 			    "acquiring duplicate lock of same type: \"%s\"\n",
1167 			    w->w_name);
1168 			printf(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
1169 			       plock->li_file, plock->li_line);
1170 			printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
1171 			witness_debugger(1);
1172 		    } else
1173 			    mtx_unlock_spin(&w_mtx);
1174 		return;
1175 	}
1176 	mtx_assert(&w_mtx, MA_OWNED);
1177 
1178 	/*
1179 	 * If we know that the the lock we are acquiring comes after
1180 	 * the lock we most recently acquired in the lock order tree,
1181 	 * then there is no need for any further checks.
1182 	 */
1183 	if (isitmychild(w1, w))
1184 		goto out;
1185 
1186 	for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
1187 		for (i = lle->ll_count - 1; i >= 0; i--, j++) {
1188 
1189 			MPASS(j < WITNESS_COUNT);
1190 			lock1 = &lle->ll_children[i];
1191 
1192 			/*
1193 			 * Ignore the interlock the first time we see it.
1194 			 */
1195 			if (interlock != NULL && interlock == lock1->li_lock) {
1196 				interlock = NULL;
1197 				continue;
1198 			}
1199 
1200 			/*
1201 			 * If this lock doesn't undergo witness checking,
1202 			 * then skip it.
1203 			 */
1204 			w1 = lock1->li_lock->lo_witness;
1205 			if (w1 == NULL) {
1206 				KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
1207 				    ("lock missing witness structure"));
1208 				continue;
1209 			}
1210 
1211 			/*
1212 			 * If we are locking Giant and this is a sleepable
1213 			 * lock, then skip it.
1214 			 */
1215 			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
1216 			    lock == &Giant.lock_object)
1217 				continue;
1218 
1219 			/*
1220 			 * If we are locking a sleepable lock and this lock
1221 			 * is Giant, then skip it.
1222 			 */
1223 			if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1224 			    lock1->li_lock == &Giant.lock_object)
1225 				continue;
1226 
1227 			/*
1228 			 * If we are locking a sleepable lock and this lock
1229 			 * isn't sleepable, we want to treat it as a lock
1230 			 * order violation to enfore a general lock order of
1231 			 * sleepable locks before non-sleepable locks.
1232 			 */
1233 			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1234 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1235 				goto reversal;
1236 
1237 			/*
1238 			 * If we are locking Giant and this is a non-sleepable
1239 			 * lock, then treat it as a reversal.
1240 			 */
1241 			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
1242 			    lock == &Giant.lock_object)
1243 				goto reversal;
1244 
1245 			/*
1246 			 * Check the lock order hierarchy for a reveresal.
1247 			 */
1248 			if (!isitmydescendant(w, w1))
1249 				continue;
1250 		reversal:
1251 
1252 			/*
1253 			 * We have a lock order violation, check to see if it
1254 			 * is allowed or has already been yelled about.
1255 			 */
1256 #ifdef BLESSING
1257 
1258 			/*
1259 			 * If the lock order is blessed, just bail.  We don't
1260 			 * look for other lock order violations though, which
1261 			 * may be a bug.
1262 			 */
1263 			if (blessed(w, w1))
1264 				goto out;
1265 #endif
1266 
1267 			/* Bail if this violation is known */
1268 			if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
1269 				goto out;
1270 
1271 			/* Record this as a violation */
1272 			w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL;
1273 			w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL;
1274 			w->w_reversed = w1->w_reversed = 1;
1275 			witness_increment_graph_generation();
1276 			mtx_unlock_spin(&w_mtx);
1277 
1278 			/*
1279 			 * Ok, yell about it.
1280 			 */
1281 			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1282 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1283 				printf(
1284 		"lock order reversal: (sleepable after non-sleepable)\n");
1285 			else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1286 			    && lock == &Giant.lock_object)
1287 				printf(
1288 		"lock order reversal: (Giant after non-sleepable)\n");
1289 			else
1290 				printf("lock order reversal:\n");
1291 
1292 			/*
1293 			 * Try to locate an earlier lock with
1294 			 * witness w in our list.
1295 			 */
1296 			do {
1297 				lock2 = &lle->ll_children[i];
1298 				MPASS(lock2->li_lock != NULL);
1299 				if (lock2->li_lock->lo_witness == w)
1300 					break;
1301 				if (i == 0 && lle->ll_next != NULL) {
1302 					lle = lle->ll_next;
1303 					i = lle->ll_count - 1;
1304 					MPASS(i >= 0 && i < LOCK_NCHILDREN);
1305 				} else
1306 					i--;
1307 			} while (i >= 0);
1308 			if (i < 0) {
1309 				printf(" 1st %p %s (%s) @ %s:%d\n",
1310 				    lock1->li_lock, lock1->li_lock->lo_name,
1311 				    w1->w_name, lock1->li_file, lock1->li_line);
1312 				printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1313 				    lock->lo_name, w->w_name, file, line);
1314 			} else {
1315 				printf(" 1st %p %s (%s) @ %s:%d\n",
1316 				    lock2->li_lock, lock2->li_lock->lo_name,
1317 				    lock2->li_lock->lo_witness->w_name,
1318 				    lock2->li_file, lock2->li_line);
1319 				printf(" 2nd %p %s (%s) @ %s:%d\n",
1320 				    lock1->li_lock, lock1->li_lock->lo_name,
1321 				    w1->w_name, lock1->li_file, lock1->li_line);
1322 				printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1323 				    lock->lo_name, w->w_name, file, line);
1324 			}
1325 			witness_debugger(1);
1326 			return;
1327 		}
1328 	}
1329 
1330 	/*
1331 	 * If requested, build a new lock order.  However, don't build a new
1332 	 * relationship between a sleepable lock and Giant if it is in the
1333 	 * wrong direction.  The correct lock order is that sleepable locks
1334 	 * always come before Giant.
1335 	 */
1336 	if (flags & LOP_NEWORDER &&
1337 	    !(plock->li_lock == &Giant.lock_object &&
1338 	    (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1339 		CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1340 		    w->w_name, plock->li_lock->lo_witness->w_name);
1341 		itismychild(plock->li_lock->lo_witness, w);
1342 	}
1343 out:
1344 	mtx_unlock_spin(&w_mtx);
1345 }
1346 
1347 void
1348 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1349 {
1350 	struct lock_list_entry **lock_list, *lle;
1351 	struct lock_instance *instance;
1352 	struct witness *w;
1353 	struct thread *td;
1354 
1355 	if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
1356 	    panicstr != NULL)
1357 		return;
1358 	w = lock->lo_witness;
1359 	td = curthread;
1360 	file = fixup_filename(file);
1361 
1362 	/* Determine lock list for this lock. */
1363 	if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1364 		lock_list = &td->td_sleeplocks;
1365 	else
1366 		lock_list = PCPU_PTR(spinlocks);
1367 
1368 	/* Check to see if we are recursing on a lock we already own. */
1369 	instance = find_instance(*lock_list, lock);
1370 	if (instance != NULL) {
1371 		instance->li_flags++;
1372 		CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1373 		    td->td_proc->p_pid, lock->lo_name,
1374 		    instance->li_flags & LI_RECURSEMASK);
1375 		instance->li_file = file;
1376 		instance->li_line = line;
1377 		return;
1378 	}
1379 
1380 	/* Update per-witness last file and line acquire. */
1381 	w->w_file = file;
1382 	w->w_line = line;
1383 
1384 	/* Find the next open lock instance in the list and fill it. */
1385 	lle = *lock_list;
1386 	if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1387 		lle = witness_lock_list_get();
1388 		if (lle == NULL)
1389 			return;
1390 		lle->ll_next = *lock_list;
1391 		CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1392 		    td->td_proc->p_pid, lle);
1393 		*lock_list = lle;
1394 	}
1395 	instance = &lle->ll_children[lle->ll_count++];
1396 	instance->li_lock = lock;
1397 	instance->li_line = line;
1398 	instance->li_file = file;
1399 	if ((flags & LOP_EXCLUSIVE) != 0)
1400 		instance->li_flags = LI_EXCLUSIVE;
1401 	else
1402 		instance->li_flags = 0;
1403 	CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1404 	    td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1405 }
1406 
1407 void
1408 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1409 {
1410 	struct lock_instance *instance;
1411 	struct lock_class *class;
1412 
1413 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1414 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1415 		return;
1416 	class = LOCK_CLASS(lock);
1417 	file = fixup_filename(file);
1418 	if (witness_watch) {
1419 		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1420 			panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1421 			    class->lc_name, lock->lo_name, file, line);
1422 		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1423 			panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1424 			    class->lc_name, lock->lo_name, file, line);
1425 	}
1426 	instance = find_instance(curthread->td_sleeplocks, lock);
1427 	if (instance == NULL)
1428 		panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1429 		    class->lc_name, lock->lo_name, file, line);
1430 	if (witness_watch) {
1431 		if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1432 			panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1433 			    class->lc_name, lock->lo_name, file, line);
1434 		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1435 			panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1436 			    class->lc_name, lock->lo_name,
1437 			    instance->li_flags & LI_RECURSEMASK, file, line);
1438 	}
1439 	instance->li_flags |= LI_EXCLUSIVE;
1440 }
1441 
1442 void
1443 witness_downgrade(struct lock_object *lock, int flags, const char *file,
1444     int line)
1445 {
1446 	struct lock_instance *instance;
1447 	struct lock_class *class;
1448 
1449 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1450 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1451 		return;
1452 	class = LOCK_CLASS(lock);
1453 	file = fixup_filename(file);
1454 	if (witness_watch) {
1455 		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1456 		panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1457 			    class->lc_name, lock->lo_name, file, line);
1458 		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1459 			panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1460 			    class->lc_name, lock->lo_name, file, line);
1461 	}
1462 	instance = find_instance(curthread->td_sleeplocks, lock);
1463 	if (instance == NULL)
1464 		panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1465 		    class->lc_name, lock->lo_name, file, line);
1466 	if (witness_watch) {
1467 		if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1468 			panic("downgrade of shared lock (%s) %s @ %s:%d",
1469 			    class->lc_name, lock->lo_name, file, line);
1470 		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1471 			panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1472 			    class->lc_name, lock->lo_name,
1473 			    instance->li_flags & LI_RECURSEMASK, file, line);
1474 	}
1475 	instance->li_flags &= ~LI_EXCLUSIVE;
1476 }
1477 
1478 void
1479 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1480 {
1481 	struct lock_list_entry **lock_list, *lle;
1482 	struct lock_instance *instance;
1483 	struct lock_class *class;
1484 	struct thread *td;
1485 	register_t s;
1486 	int i, j;
1487 
1488 	if (witness_cold || lock->lo_witness == NULL || panicstr != NULL)
1489 		return;
1490 	td = curthread;
1491 	class = LOCK_CLASS(lock);
1492 	file = fixup_filename(file);
1493 
1494 	/* Find lock instance associated with this lock. */
1495 	if (class->lc_flags & LC_SLEEPLOCK)
1496 		lock_list = &td->td_sleeplocks;
1497 	else
1498 		lock_list = PCPU_PTR(spinlocks);
1499 	lle = *lock_list;
1500 	for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1501 		for (i = 0; i < (*lock_list)->ll_count; i++) {
1502 			instance = &(*lock_list)->ll_children[i];
1503 			if (instance->li_lock == lock)
1504 				goto found;
1505 		}
1506 
1507 	/*
1508 	 * When disabling WITNESS through witness_watch we could end up in
1509 	 * having registered locks in the td_sleeplocks queue.
1510 	 * We have to make sure we flush these queues, so just search for
1511 	 * eventual register locks and remove them.
1512 	 */
1513 	if (witness_watch > 0)
1514 		panic("lock (%s) %s not locked @ %s:%d", class->lc_name,
1515 		    lock->lo_name, file, line);
1516 	else
1517 		return;
1518 found:
1519 
1520 	/* First, check for shared/exclusive mismatches. */
1521 	if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 &&
1522 	    (flags & LOP_EXCLUSIVE) == 0) {
1523 		printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1524 		    lock->lo_name, file, line);
1525 		printf("while exclusively locked from %s:%d\n",
1526 		    instance->li_file, instance->li_line);
1527 		panic("excl->ushare");
1528 	}
1529 	if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 &&
1530 	    (flags & LOP_EXCLUSIVE) != 0) {
1531 		printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1532 		    lock->lo_name, file, line);
1533 		printf("while share locked from %s:%d\n", instance->li_file,
1534 		    instance->li_line);
1535 		panic("share->uexcl");
1536 	}
1537 	/* If we are recursed, unrecurse. */
1538 	if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1539 		CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1540 		    td->td_proc->p_pid, instance->li_lock->lo_name,
1541 		    instance->li_flags);
1542 		instance->li_flags--;
1543 		return;
1544 	}
1545 	/* The lock is now being dropped, check for NORELEASE flag */
1546 	if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) {
1547 		printf("forbidden unlock of (%s) %s @ %s:%d\n", class->lc_name,
1548 		    lock->lo_name, file, line);
1549 		panic("lock marked norelease");
1550 	}
1551 
1552 	/* Otherwise, remove this item from the list. */
1553 	s = intr_disable();
1554 	CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1555 	    td->td_proc->p_pid, instance->li_lock->lo_name,
1556 	    (*lock_list)->ll_count - 1);
1557 	for (j = i; j < (*lock_list)->ll_count - 1; j++)
1558 		(*lock_list)->ll_children[j] =
1559 		    (*lock_list)->ll_children[j + 1];
1560 	(*lock_list)->ll_count--;
1561 	intr_restore(s);
1562 
1563 	/*
1564 	 * In order to reduce contention on w_mtx, we want to keep always an
1565 	 * head object into lists so that frequent allocation from the
1566 	 * free witness pool (and subsequent locking) is avoided.
1567 	 * In order to maintain the current code simple, when the head
1568 	 * object is totally unloaded it means also that we do not have
1569 	 * further objects in the list, so the list ownership needs to be
1570 	 * hand over to another object if the current head needs to be freed.
1571 	 */
1572 	if ((*lock_list)->ll_count == 0) {
1573 		if (*lock_list == lle) {
1574 			if (lle->ll_next == NULL)
1575 				return;
1576 		} else
1577 			lle = *lock_list;
1578 		*lock_list = lle->ll_next;
1579 		CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1580 		    td->td_proc->p_pid, lle);
1581 		witness_lock_list_free(lle);
1582 	}
1583 }
1584 
1585 void
1586 witness_thread_exit(struct thread *td)
1587 {
1588 	struct lock_list_entry *lle;
1589 	int i, n;
1590 
1591 	lle = td->td_sleeplocks;
1592 	if (lle == NULL || panicstr != NULL)
1593 		return;
1594 	if (lle->ll_count != 0) {
1595 		for (n = 0; lle != NULL; lle = lle->ll_next)
1596 			for (i = lle->ll_count - 1; i >= 0; i--) {
1597 				if (n == 0)
1598 		printf("Thread %p exiting with the following locks held:\n",
1599 					    td);
1600 				n++;
1601 				witness_list_lock(&lle->ll_children[i], printf);
1602 
1603 			}
1604 		panic("Thread %p cannot exit while holding sleeplocks\n", td);
1605 	}
1606 	witness_lock_list_free(lle);
1607 }
1608 
1609 /*
1610  * Warn if any locks other than 'lock' are held.  Flags can be passed in to
1611  * exempt Giant and sleepable locks from the checks as well.  If any
1612  * non-exempt locks are held, then a supplied message is printed to the
1613  * console along with a list of the offending locks.  If indicated in the
1614  * flags then a failure results in a panic as well.
1615  */
1616 int
1617 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1618 {
1619 	struct lock_list_entry *lock_list, *lle;
1620 	struct lock_instance *lock1;
1621 	struct thread *td;
1622 	va_list ap;
1623 	int i, n;
1624 
1625 	if (witness_cold || witness_watch < 1 || panicstr != NULL)
1626 		return (0);
1627 	n = 0;
1628 	td = curthread;
1629 	for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1630 		for (i = lle->ll_count - 1; i >= 0; i--) {
1631 			lock1 = &lle->ll_children[i];
1632 			if (lock1->li_lock == lock)
1633 				continue;
1634 			if (flags & WARN_GIANTOK &&
1635 			    lock1->li_lock == &Giant.lock_object)
1636 				continue;
1637 			if (flags & WARN_SLEEPOK &&
1638 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1639 				continue;
1640 			if (n == 0) {
1641 				va_start(ap, fmt);
1642 				vprintf(fmt, ap);
1643 				va_end(ap);
1644 				printf(" with the following");
1645 				if (flags & WARN_SLEEPOK)
1646 					printf(" non-sleepable");
1647 				printf(" locks held:\n");
1648 			}
1649 			n++;
1650 			witness_list_lock(lock1, printf);
1651 		}
1652 
1653 	/*
1654 	 * Pin the thread in order to avoid problems with thread migration.
1655 	 * Once that all verifies are passed about spinlocks ownership,
1656 	 * the thread is in a safe path and it can be unpinned.
1657 	 */
1658 	sched_pin();
1659 	lock_list = PCPU_GET(spinlocks);
1660 	if (lock_list != NULL && lock_list->ll_count != 0) {
1661 		sched_unpin();
1662 
1663 		/*
1664 		 * We should only have one spinlock and as long as
1665 		 * the flags cannot match for this locks class,
1666 		 * check if the first spinlock is the one curthread
1667 		 * should hold.
1668 		 */
1669 		lock1 = &lock_list->ll_children[lock_list->ll_count - 1];
1670 		if (lock_list->ll_count == 1 && lock_list->ll_next == NULL &&
1671 		    lock1->li_lock == lock && n == 0)
1672 			return (0);
1673 
1674 		va_start(ap, fmt);
1675 		vprintf(fmt, ap);
1676 		va_end(ap);
1677 		printf(" with the following");
1678 		if (flags & WARN_SLEEPOK)
1679 			printf(" non-sleepable");
1680 		printf(" locks held:\n");
1681 		n += witness_list_locks(&lock_list, printf);
1682 	} else
1683 		sched_unpin();
1684 	if (flags & WARN_PANIC && n)
1685 		panic("%s", __func__);
1686 	else
1687 		witness_debugger(n);
1688 	return (n);
1689 }
1690 
1691 const char *
1692 witness_file(struct lock_object *lock)
1693 {
1694 	struct witness *w;
1695 
1696 	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1697 		return ("?");
1698 	w = lock->lo_witness;
1699 	return (w->w_file);
1700 }
1701 
1702 int
1703 witness_line(struct lock_object *lock)
1704 {
1705 	struct witness *w;
1706 
1707 	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1708 		return (0);
1709 	w = lock->lo_witness;
1710 	return (w->w_line);
1711 }
1712 
1713 static struct witness *
1714 enroll(const char *description, struct lock_class *lock_class)
1715 {
1716 	struct witness *w;
1717 	struct witness_list *typelist;
1718 
1719 	MPASS(description != NULL);
1720 
1721 	if (witness_watch == -1 || panicstr != NULL)
1722 		return (NULL);
1723 	if ((lock_class->lc_flags & LC_SPINLOCK)) {
1724 		if (witness_skipspin)
1725 			return (NULL);
1726 		else
1727 			typelist = &w_spin;
1728 	} else if ((lock_class->lc_flags & LC_SLEEPLOCK))
1729 		typelist = &w_sleep;
1730 	else
1731 		panic("lock class %s is not sleep or spin",
1732 		    lock_class->lc_name);
1733 
1734 	mtx_lock_spin(&w_mtx);
1735 	w = witness_hash_get(description);
1736 	if (w)
1737 		goto found;
1738 	if ((w = witness_get()) == NULL)
1739 		return (NULL);
1740 	MPASS(strlen(description) < MAX_W_NAME);
1741 	strcpy(w->w_name, description);
1742 	w->w_class = lock_class;
1743 	w->w_refcount = 1;
1744 	STAILQ_INSERT_HEAD(&w_all, w, w_list);
1745 	if (lock_class->lc_flags & LC_SPINLOCK) {
1746 		STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1747 		w_spin_cnt++;
1748 	} else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1749 		STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1750 		w_sleep_cnt++;
1751 	}
1752 
1753 	/* Insert new witness into the hash */
1754 	witness_hash_put(w);
1755 	witness_increment_graph_generation();
1756 	mtx_unlock_spin(&w_mtx);
1757 	return (w);
1758 found:
1759 	w->w_refcount++;
1760 	mtx_unlock_spin(&w_mtx);
1761 	if (lock_class != w->w_class)
1762 		panic(
1763 			"lock (%s) %s does not match earlier (%s) lock",
1764 			description, lock_class->lc_name,
1765 			w->w_class->lc_name);
1766 	return (w);
1767 }
1768 
1769 static void
1770 depart(struct witness *w)
1771 {
1772 	struct witness_list *list;
1773 
1774 	MPASS(w->w_refcount == 0);
1775 	if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1776 		list = &w_sleep;
1777 		w_sleep_cnt--;
1778 	} else {
1779 		list = &w_spin;
1780 		w_spin_cnt--;
1781 	}
1782 	/*
1783 	 * Set file to NULL as it may point into a loadable module.
1784 	 */
1785 	w->w_file = NULL;
1786 	w->w_line = 0;
1787 	witness_increment_graph_generation();
1788 }
1789 
1790 
1791 static void
1792 adopt(struct witness *parent, struct witness *child)
1793 {
1794 	int pi, ci, i, j;
1795 
1796 	if (witness_cold == 0)
1797 		mtx_assert(&w_mtx, MA_OWNED);
1798 
1799 	/* If the relationship is already known, there's no work to be done. */
1800 	if (isitmychild(parent, child))
1801 		return;
1802 
1803 	/* When the structure of the graph changes, bump up the generation. */
1804 	witness_increment_graph_generation();
1805 
1806 	/*
1807 	 * The hard part ... create the direct relationship, then propagate all
1808 	 * indirect relationships.
1809 	 */
1810 	pi = parent->w_index;
1811 	ci = child->w_index;
1812 	WITNESS_INDEX_ASSERT(pi);
1813 	WITNESS_INDEX_ASSERT(ci);
1814 	MPASS(pi != ci);
1815 	w_rmatrix[pi][ci] |= WITNESS_PARENT;
1816 	w_rmatrix[ci][pi] |= WITNESS_CHILD;
1817 
1818 	/*
1819 	 * If parent was not already an ancestor of child,
1820 	 * then we increment the descendant and ancestor counters.
1821 	 */
1822 	if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) {
1823 		parent->w_num_descendants++;
1824 		child->w_num_ancestors++;
1825 	}
1826 
1827 	/*
1828 	 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
1829 	 * an ancestor of 'pi' during this loop.
1830 	 */
1831 	for (i = 1; i <= w_max_used_index; i++) {
1832 		if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
1833 		    (i != pi))
1834 			continue;
1835 
1836 		/* Find each descendant of 'i' and mark it as a descendant. */
1837 		for (j = 1; j <= w_max_used_index; j++) {
1838 
1839 			/*
1840 			 * Skip children that are already marked as
1841 			 * descendants of 'i'.
1842 			 */
1843 			if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK)
1844 				continue;
1845 
1846 			/*
1847 			 * We are only interested in descendants of 'ci'. Note
1848 			 * that 'ci' itself is counted as a descendant of 'ci'.
1849 			 */
1850 			if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
1851 			    (j != ci))
1852 				continue;
1853 			w_rmatrix[i][j] |= WITNESS_ANCESTOR;
1854 			w_rmatrix[j][i] |= WITNESS_DESCENDANT;
1855 			w_data[i].w_num_descendants++;
1856 			w_data[j].w_num_ancestors++;
1857 
1858 			/*
1859 			 * Make sure we aren't marking a node as both an
1860 			 * ancestor and descendant. We should have caught
1861 			 * this as a lock order reversal earlier.
1862 			 */
1863 			if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
1864 			    (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
1865 				printf("witness rmatrix paradox! [%d][%d]=%d "
1866 				    "both ancestor and descendant\n",
1867 				    i, j, w_rmatrix[i][j]);
1868 				kdb_backtrace();
1869 				printf("Witness disabled.\n");
1870 				witness_watch = -1;
1871 			}
1872 			if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) &&
1873 			    (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
1874 				printf("witness rmatrix paradox! [%d][%d]=%d "
1875 				    "both ancestor and descendant\n",
1876 				    j, i, w_rmatrix[j][i]);
1877 				kdb_backtrace();
1878 				printf("Witness disabled.\n");
1879 				witness_watch = -1;
1880 			}
1881 		}
1882 	}
1883 }
1884 
1885 static void
1886 itismychild(struct witness *parent, struct witness *child)
1887 {
1888 
1889 	MPASS(child != NULL && parent != NULL);
1890 	if (witness_cold == 0)
1891 		mtx_assert(&w_mtx, MA_OWNED);
1892 
1893 	if (!witness_lock_type_equal(parent, child)) {
1894 		if (witness_cold == 0)
1895 			mtx_unlock_spin(&w_mtx);
1896 		panic("%s: parent \"%s\" (%s) and child \"%s\" (%s) are not "
1897 		    "the same lock type", __func__, parent->w_name,
1898 		    parent->w_class->lc_name, child->w_name,
1899 		    child->w_class->lc_name);
1900 	}
1901 	adopt(parent, child);
1902 }
1903 
1904 /*
1905  * Generic code for the isitmy*() functions. The rmask parameter is the
1906  * expected relationship of w1 to w2.
1907  */
1908 static int
1909 _isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
1910 {
1911 	unsigned char r1, r2;
1912 	int i1, i2;
1913 
1914 	i1 = w1->w_index;
1915 	i2 = w2->w_index;
1916 	WITNESS_INDEX_ASSERT(i1);
1917 	WITNESS_INDEX_ASSERT(i2);
1918 	r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK;
1919 	r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK;
1920 
1921 	/* The flags on one better be the inverse of the flags on the other */
1922 	if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) ||
1923 		(WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) {
1924 		printf("%s: rmatrix mismatch between %s (index %d) and %s "
1925 		    "(index %d): w_rmatrix[%d][%d] == %hhx but "
1926 		    "w_rmatrix[%d][%d] == %hhx\n",
1927 		    fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1,
1928 		    i2, i1, r2);
1929 		kdb_backtrace();
1930 		printf("Witness disabled.\n");
1931 		witness_watch = -1;
1932 	}
1933 	return (r1 & rmask);
1934 }
1935 
1936 /*
1937  * Checks if @child is a direct child of @parent.
1938  */
1939 static int
1940 isitmychild(struct witness *parent, struct witness *child)
1941 {
1942 
1943 	return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
1944 }
1945 
1946 /*
1947  * Checks if @descendant is a direct or inderect descendant of @ancestor.
1948  */
1949 static int
1950 isitmydescendant(struct witness *ancestor, struct witness *descendant)
1951 {
1952 
1953 	return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
1954 	    __func__));
1955 }
1956 
1957 #ifdef BLESSING
1958 static int
1959 blessed(struct witness *w1, struct witness *w2)
1960 {
1961 	int i;
1962 	struct witness_blessed *b;
1963 
1964 	for (i = 0; i < blessed_count; i++) {
1965 		b = &blessed_list[i];
1966 		if (strcmp(w1->w_name, b->b_lock1) == 0) {
1967 			if (strcmp(w2->w_name, b->b_lock2) == 0)
1968 				return (1);
1969 			continue;
1970 		}
1971 		if (strcmp(w1->w_name, b->b_lock2) == 0)
1972 			if (strcmp(w2->w_name, b->b_lock1) == 0)
1973 				return (1);
1974 	}
1975 	return (0);
1976 }
1977 #endif
1978 
1979 static struct witness *
1980 witness_get(void)
1981 {
1982 	struct witness *w;
1983 	int index;
1984 
1985 	if (witness_cold == 0)
1986 		mtx_assert(&w_mtx, MA_OWNED);
1987 
1988 	if (witness_watch == -1) {
1989 		mtx_unlock_spin(&w_mtx);
1990 		return (NULL);
1991 	}
1992 	if (STAILQ_EMPTY(&w_free)) {
1993 		witness_watch = -1;
1994 		mtx_unlock_spin(&w_mtx);
1995 		printf("WITNESS: unable to allocate a new witness object\n");
1996 		return (NULL);
1997 	}
1998 	w = STAILQ_FIRST(&w_free);
1999 	STAILQ_REMOVE_HEAD(&w_free, w_list);
2000 	w_free_cnt--;
2001 	index = w->w_index;
2002 	MPASS(index > 0 && index == w_max_used_index+1 &&
2003 	    index < WITNESS_COUNT);
2004 	bzero(w, sizeof(*w));
2005 	w->w_index = index;
2006 	if (index > w_max_used_index)
2007 		w_max_used_index = index;
2008 	return (w);
2009 }
2010 
2011 static void
2012 witness_free(struct witness *w)
2013 {
2014 
2015 	STAILQ_INSERT_HEAD(&w_free, w, w_list);
2016 	w_free_cnt++;
2017 }
2018 
2019 static struct lock_list_entry *
2020 witness_lock_list_get(void)
2021 {
2022 	struct lock_list_entry *lle;
2023 
2024 	if (witness_watch == -1)
2025 		return (NULL);
2026 	mtx_lock_spin(&w_mtx);
2027 	lle = w_lock_list_free;
2028 	if (lle == NULL) {
2029 		witness_watch = -1;
2030 		mtx_unlock_spin(&w_mtx);
2031 		printf("%s: witness exhausted\n", __func__);
2032 		return (NULL);
2033 	}
2034 	w_lock_list_free = lle->ll_next;
2035 	mtx_unlock_spin(&w_mtx);
2036 	bzero(lle, sizeof(*lle));
2037 	return (lle);
2038 }
2039 
2040 static void
2041 witness_lock_list_free(struct lock_list_entry *lle)
2042 {
2043 
2044 	mtx_lock_spin(&w_mtx);
2045 	lle->ll_next = w_lock_list_free;
2046 	w_lock_list_free = lle;
2047 	mtx_unlock_spin(&w_mtx);
2048 }
2049 
2050 static struct lock_instance *
2051 find_instance(struct lock_list_entry *list, struct lock_object *lock)
2052 {
2053 	struct lock_list_entry *lle;
2054 	struct lock_instance *instance;
2055 	int i;
2056 
2057 	for (lle = list; lle != NULL; lle = lle->ll_next)
2058 		for (i = lle->ll_count - 1; i >= 0; i--) {
2059 			instance = &lle->ll_children[i];
2060 			if (instance->li_lock == lock)
2061 				return (instance);
2062 		}
2063 	return (NULL);
2064 }
2065 
2066 static void
2067 witness_list_lock(struct lock_instance *instance,
2068     int (*prnt)(const char *fmt, ...))
2069 {
2070 	struct lock_object *lock;
2071 
2072 	lock = instance->li_lock;
2073 	prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2074 	    "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2075 	if (lock->lo_witness->w_name != lock->lo_name)
2076 		prnt(" (%s)", lock->lo_witness->w_name);
2077 	prnt(" r = %d (%p) locked @ %s:%d\n",
2078 	    instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
2079 	    instance->li_line);
2080 }
2081 
2082 #ifdef DDB
2083 static int
2084 witness_thread_has_locks(struct thread *td)
2085 {
2086 
2087 	if (td->td_sleeplocks == NULL)
2088 		return (0);
2089 	return (td->td_sleeplocks->ll_count != 0);
2090 }
2091 
2092 static int
2093 witness_proc_has_locks(struct proc *p)
2094 {
2095 	struct thread *td;
2096 
2097 	FOREACH_THREAD_IN_PROC(p, td) {
2098 		if (witness_thread_has_locks(td))
2099 			return (1);
2100 	}
2101 	return (0);
2102 }
2103 #endif
2104 
2105 int
2106 witness_list_locks(struct lock_list_entry **lock_list,
2107     int (*prnt)(const char *fmt, ...))
2108 {
2109 	struct lock_list_entry *lle;
2110 	int i, nheld;
2111 
2112 	nheld = 0;
2113 	for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2114 		for (i = lle->ll_count - 1; i >= 0; i--) {
2115 			witness_list_lock(&lle->ll_children[i], prnt);
2116 			nheld++;
2117 		}
2118 	return (nheld);
2119 }
2120 
2121 /*
2122  * This is a bit risky at best.  We call this function when we have timed
2123  * out acquiring a spin lock, and we assume that the other CPU is stuck
2124  * with this lock held.  So, we go groveling around in the other CPU's
2125  * per-cpu data to try to find the lock instance for this spin lock to
2126  * see when it was last acquired.
2127  */
2128 void
2129 witness_display_spinlock(struct lock_object *lock, struct thread *owner,
2130     int (*prnt)(const char *fmt, ...))
2131 {
2132 	struct lock_instance *instance;
2133 	struct pcpu *pc;
2134 
2135 	if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2136 		return;
2137 	pc = pcpu_find(owner->td_oncpu);
2138 	instance = find_instance(pc->pc_spinlocks, lock);
2139 	if (instance != NULL)
2140 		witness_list_lock(instance, prnt);
2141 }
2142 
2143 void
2144 witness_save(struct lock_object *lock, const char **filep, int *linep)
2145 {
2146 	struct lock_list_entry *lock_list;
2147 	struct lock_instance *instance;
2148 	struct lock_class *class;
2149 
2150 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2151 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2152 		return;
2153 	class = LOCK_CLASS(lock);
2154 	if (class->lc_flags & LC_SLEEPLOCK)
2155 		lock_list = curthread->td_sleeplocks;
2156 	else {
2157 		if (witness_skipspin)
2158 			return;
2159 		lock_list = PCPU_GET(spinlocks);
2160 	}
2161 	instance = find_instance(lock_list, lock);
2162 	if (instance == NULL)
2163 		panic("%s: lock (%s) %s not locked", __func__,
2164 		    class->lc_name, lock->lo_name);
2165 	*filep = instance->li_file;
2166 	*linep = instance->li_line;
2167 }
2168 
2169 void
2170 witness_restore(struct lock_object *lock, const char *file, int line)
2171 {
2172 	struct lock_list_entry *lock_list;
2173 	struct lock_instance *instance;
2174 	struct lock_class *class;
2175 
2176 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2177 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2178 		return;
2179 	class = LOCK_CLASS(lock);
2180 	if (class->lc_flags & LC_SLEEPLOCK)
2181 		lock_list = curthread->td_sleeplocks;
2182 	else {
2183 		if (witness_skipspin)
2184 			return;
2185 		lock_list = PCPU_GET(spinlocks);
2186 	}
2187 	instance = find_instance(lock_list, lock);
2188 	if (instance == NULL)
2189 		panic("%s: lock (%s) %s not locked", __func__,
2190 		    class->lc_name, lock->lo_name);
2191 	lock->lo_witness->w_file = file;
2192 	lock->lo_witness->w_line = line;
2193 	instance->li_file = file;
2194 	instance->li_line = line;
2195 }
2196 
2197 void
2198 witness_assert(struct lock_object *lock, int flags, const char *file, int line)
2199 {
2200 #ifdef INVARIANT_SUPPORT
2201 	struct lock_instance *instance;
2202 	struct lock_class *class;
2203 
2204 	if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL)
2205 		return;
2206 	class = LOCK_CLASS(lock);
2207 	if ((class->lc_flags & LC_SLEEPLOCK) != 0)
2208 		instance = find_instance(curthread->td_sleeplocks, lock);
2209 	else if ((class->lc_flags & LC_SPINLOCK) != 0)
2210 		instance = find_instance(PCPU_GET(spinlocks), lock);
2211 	else {
2212 		panic("Lock (%s) %s is not sleep or spin!",
2213 		    class->lc_name, lock->lo_name);
2214 	}
2215 	file = fixup_filename(file);
2216 	switch (flags) {
2217 	case LA_UNLOCKED:
2218 		if (instance != NULL)
2219 			panic("Lock (%s) %s locked @ %s:%d.",
2220 			    class->lc_name, lock->lo_name, file, line);
2221 		break;
2222 	case LA_LOCKED:
2223 	case LA_LOCKED | LA_RECURSED:
2224 	case LA_LOCKED | LA_NOTRECURSED:
2225 	case LA_SLOCKED:
2226 	case LA_SLOCKED | LA_RECURSED:
2227 	case LA_SLOCKED | LA_NOTRECURSED:
2228 	case LA_XLOCKED:
2229 	case LA_XLOCKED | LA_RECURSED:
2230 	case LA_XLOCKED | LA_NOTRECURSED:
2231 		if (instance == NULL) {
2232 			panic("Lock (%s) %s not locked @ %s:%d.",
2233 			    class->lc_name, lock->lo_name, file, line);
2234 			break;
2235 		}
2236 		if ((flags & LA_XLOCKED) != 0 &&
2237 		    (instance->li_flags & LI_EXCLUSIVE) == 0)
2238 			panic("Lock (%s) %s not exclusively locked @ %s:%d.",
2239 			    class->lc_name, lock->lo_name, file, line);
2240 		if ((flags & LA_SLOCKED) != 0 &&
2241 		    (instance->li_flags & LI_EXCLUSIVE) != 0)
2242 			panic("Lock (%s) %s exclusively locked @ %s:%d.",
2243 			    class->lc_name, lock->lo_name, file, line);
2244 		if ((flags & LA_RECURSED) != 0 &&
2245 		    (instance->li_flags & LI_RECURSEMASK) == 0)
2246 			panic("Lock (%s) %s not recursed @ %s:%d.",
2247 			    class->lc_name, lock->lo_name, file, line);
2248 		if ((flags & LA_NOTRECURSED) != 0 &&
2249 		    (instance->li_flags & LI_RECURSEMASK) != 0)
2250 			panic("Lock (%s) %s recursed @ %s:%d.",
2251 			    class->lc_name, lock->lo_name, file, line);
2252 		break;
2253 	default:
2254 		panic("Invalid lock assertion at %s:%d.", file, line);
2255 
2256 	}
2257 #endif	/* INVARIANT_SUPPORT */
2258 }
2259 
2260 static void
2261 witness_setflag(struct lock_object *lock, int flag, int set)
2262 {
2263 	struct lock_list_entry *lock_list;
2264 	struct lock_instance *instance;
2265 	struct lock_class *class;
2266 
2267 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2268 		return;
2269 	class = LOCK_CLASS(lock);
2270 	if (class->lc_flags & LC_SLEEPLOCK)
2271 		lock_list = curthread->td_sleeplocks;
2272 	else {
2273 		if (witness_skipspin)
2274 			return;
2275 		lock_list = PCPU_GET(spinlocks);
2276 	}
2277 	instance = find_instance(lock_list, lock);
2278 	if (instance == NULL)
2279 		panic("%s: lock (%s) %s not locked", __func__,
2280 		    class->lc_name, lock->lo_name);
2281 
2282 	if (set)
2283 		instance->li_flags |= flag;
2284 	else
2285 		instance->li_flags &= ~flag;
2286 }
2287 
2288 void
2289 witness_norelease(struct lock_object *lock)
2290 {
2291 
2292 	witness_setflag(lock, LI_NORELEASE, 1);
2293 }
2294 
2295 void
2296 witness_releaseok(struct lock_object *lock)
2297 {
2298 
2299 	witness_setflag(lock, LI_NORELEASE, 0);
2300 }
2301 
2302 #ifdef DDB
2303 static void
2304 witness_ddb_list(struct thread *td)
2305 {
2306 
2307 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2308 	KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2309 
2310 	if (witness_watch < 1)
2311 		return;
2312 
2313 	witness_list_locks(&td->td_sleeplocks, db_printf);
2314 
2315 	/*
2316 	 * We only handle spinlocks if td == curthread.  This is somewhat broken
2317 	 * if td is currently executing on some other CPU and holds spin locks
2318 	 * as we won't display those locks.  If we had a MI way of getting
2319 	 * the per-cpu data for a given cpu then we could use
2320 	 * td->td_oncpu to get the list of spinlocks for this thread
2321 	 * and "fix" this.
2322 	 *
2323 	 * That still wouldn't really fix this unless we locked the scheduler
2324 	 * lock or stopped the other CPU to make sure it wasn't changing the
2325 	 * list out from under us.  It is probably best to just not try to
2326 	 * handle threads on other CPU's for now.
2327 	 */
2328 	if (td == curthread && PCPU_GET(spinlocks) != NULL)
2329 		witness_list_locks(PCPU_PTR(spinlocks), db_printf);
2330 }
2331 
2332 DB_SHOW_COMMAND(locks, db_witness_list)
2333 {
2334 	struct thread *td;
2335 
2336 	if (have_addr)
2337 		td = db_lookup_thread(addr, TRUE);
2338 	else
2339 		td = kdb_thread;
2340 	witness_ddb_list(td);
2341 }
2342 
2343 DB_SHOW_ALL_COMMAND(locks, db_witness_list_all)
2344 {
2345 	struct thread *td;
2346 	struct proc *p;
2347 
2348 	/*
2349 	 * It would be nice to list only threads and processes that actually
2350 	 * held sleep locks, but that information is currently not exported
2351 	 * by WITNESS.
2352 	 */
2353 	FOREACH_PROC_IN_SYSTEM(p) {
2354 		if (!witness_proc_has_locks(p))
2355 			continue;
2356 		FOREACH_THREAD_IN_PROC(p, td) {
2357 			if (!witness_thread_has_locks(td))
2358 				continue;
2359 			db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2360 			    p->p_comm, td, td->td_tid);
2361 			witness_ddb_list(td);
2362 		}
2363 	}
2364 }
2365 DB_SHOW_ALIAS(alllocks, db_witness_list_all)
2366 
2367 DB_SHOW_COMMAND(witness, db_witness_display)
2368 {
2369 
2370 	witness_ddb_display(db_printf);
2371 }
2372 #endif
2373 
2374 static int
2375 sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
2376 {
2377 	struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
2378 	struct witness *tmp_w1, *tmp_w2, *w1, *w2;
2379 	struct sbuf *sb;
2380 	u_int w_rmatrix1, w_rmatrix2;
2381 	int error, generation, i, j;
2382 
2383 	tmp_data1 = NULL;
2384 	tmp_data2 = NULL;
2385 	tmp_w1 = NULL;
2386 	tmp_w2 = NULL;
2387 	if (witness_watch < 1) {
2388 		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2389 		return (error);
2390 	}
2391 	if (witness_cold) {
2392 		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2393 		return (error);
2394 	}
2395 	error = 0;
2396 	sb = sbuf_new(NULL, NULL, BADSTACK_SBUF_SIZE, SBUF_AUTOEXTEND);
2397 	if (sb == NULL)
2398 		return (ENOMEM);
2399 
2400 	/* Allocate and init temporary storage space. */
2401 	tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2402 	tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2403 	tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2404 	    M_WAITOK | M_ZERO);
2405 	tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2406 	    M_WAITOK | M_ZERO);
2407 	stack_zero(&tmp_data1->wlod_stack);
2408 	stack_zero(&tmp_data2->wlod_stack);
2409 
2410 restart:
2411 	mtx_lock_spin(&w_mtx);
2412 	generation = w_generation;
2413 	mtx_unlock_spin(&w_mtx);
2414 	sbuf_printf(sb, "Number of known direct relationships is %d\n",
2415 	    w_lohash.wloh_count);
2416 	for (i = 1; i < w_max_used_index; i++) {
2417 		mtx_lock_spin(&w_mtx);
2418 		if (generation != w_generation) {
2419 			mtx_unlock_spin(&w_mtx);
2420 
2421 			/* The graph has changed, try again. */
2422 			req->oldidx = 0;
2423 			sbuf_clear(sb);
2424 			goto restart;
2425 		}
2426 
2427 		w1 = &w_data[i];
2428 		if (w1->w_reversed == 0) {
2429 			mtx_unlock_spin(&w_mtx);
2430 			continue;
2431 		}
2432 
2433 		/* Copy w1 locally so we can release the spin lock. */
2434 		*tmp_w1 = *w1;
2435 		mtx_unlock_spin(&w_mtx);
2436 
2437 		if (tmp_w1->w_reversed == 0)
2438 			continue;
2439 		for (j = 1; j < w_max_used_index; j++) {
2440 			if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j)
2441 				continue;
2442 
2443 			mtx_lock_spin(&w_mtx);
2444 			if (generation != w_generation) {
2445 				mtx_unlock_spin(&w_mtx);
2446 
2447 				/* The graph has changed, try again. */
2448 				req->oldidx = 0;
2449 				sbuf_clear(sb);
2450 				goto restart;
2451 			}
2452 
2453 			w2 = &w_data[j];
2454 			data1 = witness_lock_order_get(w1, w2);
2455 			data2 = witness_lock_order_get(w2, w1);
2456 
2457 			/*
2458 			 * Copy information locally so we can release the
2459 			 * spin lock.
2460 			 */
2461 			*tmp_w2 = *w2;
2462 			w_rmatrix1 = (unsigned int)w_rmatrix[i][j];
2463 			w_rmatrix2 = (unsigned int)w_rmatrix[j][i];
2464 
2465 			if (data1) {
2466 				stack_zero(&tmp_data1->wlod_stack);
2467 				stack_copy(&data1->wlod_stack,
2468 				    &tmp_data1->wlod_stack);
2469 			}
2470 			if (data2 && data2 != data1) {
2471 				stack_zero(&tmp_data2->wlod_stack);
2472 				stack_copy(&data2->wlod_stack,
2473 				    &tmp_data2->wlod_stack);
2474 			}
2475 			mtx_unlock_spin(&w_mtx);
2476 
2477 			sbuf_printf(sb,
2478 	    "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
2479 			    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2480 			    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2481 #if 0
2482  			sbuf_printf(sb,
2483 			"w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n",
2484  			    tmp_w1->name, tmp_w2->w_name, w_rmatrix1,
2485  			    tmp_w2->name, tmp_w1->w_name, w_rmatrix2);
2486 #endif
2487 			if (data1) {
2488 				sbuf_printf(sb,
2489 			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2490 				    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2491 				    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2492 				stack_sbuf_print(sb, &tmp_data1->wlod_stack);
2493 				sbuf_printf(sb, "\n");
2494 			}
2495 			if (data2 && data2 != data1) {
2496 				sbuf_printf(sb,
2497 			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2498 				    tmp_w2->w_name, tmp_w2->w_class->lc_name,
2499 				    tmp_w1->w_name, tmp_w1->w_class->lc_name);
2500 				stack_sbuf_print(sb, &tmp_data2->wlod_stack);
2501 				sbuf_printf(sb, "\n");
2502 			}
2503 		}
2504 	}
2505 	mtx_lock_spin(&w_mtx);
2506 	if (generation != w_generation) {
2507 		mtx_unlock_spin(&w_mtx);
2508 
2509 		/*
2510 		 * The graph changed while we were printing stack data,
2511 		 * try again.
2512 		 */
2513 		req->oldidx = 0;
2514 		sbuf_clear(sb);
2515 		goto restart;
2516 	}
2517 	mtx_unlock_spin(&w_mtx);
2518 
2519 	/* Free temporary storage space. */
2520 	free(tmp_data1, M_TEMP);
2521 	free(tmp_data2, M_TEMP);
2522 	free(tmp_w1, M_TEMP);
2523 	free(tmp_w2, M_TEMP);
2524 
2525 	sbuf_finish(sb);
2526 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2527 	sbuf_delete(sb);
2528 
2529 	return (error);
2530 }
2531 
2532 static int
2533 sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
2534 {
2535 	struct witness *w;
2536 	struct sbuf *sb;
2537 	int error;
2538 
2539 	if (witness_watch < 1) {
2540 		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2541 		return (error);
2542 	}
2543 	if (witness_cold) {
2544 		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2545 		return (error);
2546 	}
2547 	error = 0;
2548 	sb = sbuf_new(NULL, NULL, FULLGRAPH_SBUF_SIZE, SBUF_FIXEDLEN);
2549 	if (sb == NULL)
2550 		return (ENOMEM);
2551 	sbuf_printf(sb, "\n");
2552 
2553 	mtx_lock_spin(&w_mtx);
2554 	STAILQ_FOREACH(w, &w_all, w_list)
2555 		w->w_displayed = 0;
2556 	STAILQ_FOREACH(w, &w_all, w_list)
2557 		witness_add_fullgraph(sb, w);
2558 	mtx_unlock_spin(&w_mtx);
2559 
2560 	/*
2561 	 * While using SBUF_FIXEDLEN, check if the sbuf overflowed.
2562 	 */
2563 	if (sbuf_overflowed(sb)) {
2564 		sbuf_delete(sb);
2565 		panic("%s: sbuf overflowed, bump FULLGRAPH_SBUF_SIZE value\n",
2566 		    __func__);
2567 	}
2568 
2569 	/*
2570 	 * Close the sbuf and return to userland.
2571 	 */
2572 	sbuf_finish(sb);
2573 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2574 	sbuf_delete(sb);
2575 
2576 	return (error);
2577 }
2578 
2579 static int
2580 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
2581 {
2582 	int error, value;
2583 
2584 	value = witness_watch;
2585 	error = sysctl_handle_int(oidp, &value, 0, req);
2586 	if (error != 0 || req->newptr == NULL)
2587 		return (error);
2588 	if (value > 1 || value < -1 ||
2589 	    (witness_watch == -1 && value != witness_watch))
2590 		return (EINVAL);
2591 	witness_watch = value;
2592 	return (0);
2593 }
2594 
2595 static void
2596 witness_add_fullgraph(struct sbuf *sb, struct witness *w)
2597 {
2598 	int i;
2599 
2600 	if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0))
2601 		return;
2602 	w->w_displayed = 1;
2603 
2604 	WITNESS_INDEX_ASSERT(w->w_index);
2605 	for (i = 1; i <= w_max_used_index; i++) {
2606 		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) {
2607 			sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name,
2608 			    w_data[i].w_name);
2609 			witness_add_fullgraph(sb, &w_data[i]);
2610 		}
2611 	}
2612 }
2613 
2614 /*
2615  * A simple hash function. Takes a key pointer and a key size. If size == 0,
2616  * interprets the key as a string and reads until the null
2617  * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit
2618  * hash value computed from the key.
2619  */
2620 static uint32_t
2621 witness_hash_djb2(const uint8_t *key, uint32_t size)
2622 {
2623 	unsigned int hash = 5381;
2624 	int i;
2625 
2626 	/* hash = hash * 33 + key[i] */
2627 	if (size)
2628 		for (i = 0; i < size; i++)
2629 			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2630 	else
2631 		for (i = 0; key[i] != 0; i++)
2632 			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2633 
2634 	return (hash);
2635 }
2636 
2637 
2638 /*
2639  * Initializes the two witness hash tables. Called exactly once from
2640  * witness_initialize().
2641  */
2642 static void
2643 witness_init_hash_tables(void)
2644 {
2645 	int i;
2646 
2647 	MPASS(witness_cold);
2648 
2649 	/* Initialize the hash tables. */
2650 	for (i = 0; i < WITNESS_HASH_SIZE; i++)
2651 		w_hash.wh_array[i] = NULL;
2652 
2653 	w_hash.wh_size = WITNESS_HASH_SIZE;
2654 	w_hash.wh_count = 0;
2655 
2656 	/* Initialize the lock order data hash. */
2657 	w_lofree = NULL;
2658 	for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
2659 		memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
2660 		w_lodata[i].wlod_next = w_lofree;
2661 		w_lofree = &w_lodata[i];
2662 	}
2663 	w_lohash.wloh_size = WITNESS_LO_HASH_SIZE;
2664 	w_lohash.wloh_count = 0;
2665 	for (i = 0; i < WITNESS_LO_HASH_SIZE; i++)
2666 		w_lohash.wloh_array[i] = NULL;
2667 }
2668 
2669 static struct witness *
2670 witness_hash_get(const char *key)
2671 {
2672 	struct witness *w;
2673 	uint32_t hash;
2674 
2675 	MPASS(key != NULL);
2676 	if (witness_cold == 0)
2677 		mtx_assert(&w_mtx, MA_OWNED);
2678 	hash = witness_hash_djb2(key, 0) % w_hash.wh_size;
2679 	w = w_hash.wh_array[hash];
2680 	while (w != NULL) {
2681 		if (strcmp(w->w_name, key) == 0)
2682 			goto out;
2683 		w = w->w_hash_next;
2684 	}
2685 
2686 out:
2687 	return (w);
2688 }
2689 
2690 static void
2691 witness_hash_put(struct witness *w)
2692 {
2693 	uint32_t hash;
2694 
2695 	MPASS(w != NULL);
2696 	MPASS(w->w_name != NULL);
2697 	if (witness_cold == 0)
2698 		mtx_assert(&w_mtx, MA_OWNED);
2699 	KASSERT(witness_hash_get(w->w_name) == NULL,
2700 	    ("%s: trying to add a hash entry that already exists!", __func__));
2701 	KASSERT(w->w_hash_next == NULL,
2702 	    ("%s: w->w_hash_next != NULL", __func__));
2703 
2704 	hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size;
2705 	w->w_hash_next = w_hash.wh_array[hash];
2706 	w_hash.wh_array[hash] = w;
2707 	w_hash.wh_count++;
2708 }
2709 
2710 
2711 static struct witness_lock_order_data *
2712 witness_lock_order_get(struct witness *parent, struct witness *child)
2713 {
2714 	struct witness_lock_order_data *data = NULL;
2715 	struct witness_lock_order_key key;
2716 	unsigned int hash;
2717 
2718 	MPASS(parent != NULL && child != NULL);
2719 	key.from = parent->w_index;
2720 	key.to = child->w_index;
2721 	WITNESS_INDEX_ASSERT(key.from);
2722 	WITNESS_INDEX_ASSERT(key.to);
2723 	if ((w_rmatrix[parent->w_index][child->w_index]
2724 	    & WITNESS_LOCK_ORDER_KNOWN) == 0)
2725 		goto out;
2726 
2727 	hash = witness_hash_djb2((const char*)&key,
2728 	    sizeof(key)) % w_lohash.wloh_size;
2729 	data = w_lohash.wloh_array[hash];
2730 	while (data != NULL) {
2731 		if (witness_lock_order_key_equal(&data->wlod_key, &key))
2732 			break;
2733 		data = data->wlod_next;
2734 	}
2735 
2736 out:
2737 	return (data);
2738 }
2739 
2740 /*
2741  * Verify that parent and child have a known relationship, are not the same,
2742  * and child is actually a child of parent.  This is done without w_mtx
2743  * to avoid contention in the common case.
2744  */
2745 static int
2746 witness_lock_order_check(struct witness *parent, struct witness *child)
2747 {
2748 
2749 	if (parent != child &&
2750 	    w_rmatrix[parent->w_index][child->w_index]
2751 	    & WITNESS_LOCK_ORDER_KNOWN &&
2752 	    isitmychild(parent, child))
2753 		return (1);
2754 
2755 	return (0);
2756 }
2757 
2758 static int
2759 witness_lock_order_add(struct witness *parent, struct witness *child)
2760 {
2761 	struct witness_lock_order_data *data = NULL;
2762 	struct witness_lock_order_key key;
2763 	unsigned int hash;
2764 
2765 	MPASS(parent != NULL && child != NULL);
2766 	key.from = parent->w_index;
2767 	key.to = child->w_index;
2768 	WITNESS_INDEX_ASSERT(key.from);
2769 	WITNESS_INDEX_ASSERT(key.to);
2770 	if (w_rmatrix[parent->w_index][child->w_index]
2771 	    & WITNESS_LOCK_ORDER_KNOWN)
2772 		return (1);
2773 
2774 	hash = witness_hash_djb2((const char*)&key,
2775 	    sizeof(key)) % w_lohash.wloh_size;
2776 	w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
2777 	data = w_lofree;
2778 	if (data == NULL)
2779 		return (0);
2780 	w_lofree = data->wlod_next;
2781 	data->wlod_next = w_lohash.wloh_array[hash];
2782 	data->wlod_key = key;
2783 	w_lohash.wloh_array[hash] = data;
2784 	w_lohash.wloh_count++;
2785 	stack_zero(&data->wlod_stack);
2786 	stack_save(&data->wlod_stack);
2787 	return (1);
2788 }
2789 
2790 /* Call this whenver the structure of the witness graph changes. */
2791 static void
2792 witness_increment_graph_generation(void)
2793 {
2794 
2795 	if (witness_cold == 0)
2796 		mtx_assert(&w_mtx, MA_OWNED);
2797 	w_generation++;
2798 }
2799 
2800 #ifdef KDB
2801 static void
2802 _witness_debugger(int cond, const char *msg)
2803 {
2804 
2805 	if (witness_trace && cond)
2806 		kdb_backtrace();
2807 	if (witness_kdb && cond)
2808 		kdb_enter(KDB_WHY_WITNESS, msg);
2809 }
2810 #endif
2811