xref: /freebsd/sys/kern/subr_witness.c (revision 20bd59416dcacbd2b776fe49dfa193900f303287)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2008 Isilon Systems, Inc.
5  * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
6  * Copyright (c) 1998 Berkeley Software Design, Inc.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Berkeley Software Design Inc's name may not be used to endorse or
18  *    promote products derived from this software without specific prior
19  *    written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
34  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
35  */
36 
37 /*
38  * Implementation of the `witness' lock verifier.  Originally implemented for
39  * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
40  * classes in FreeBSD.
41  */
42 
43 /*
44  *	Main Entry: witness
45  *	Pronunciation: 'wit-n&s
46  *	Function: noun
47  *	Etymology: Middle English witnesse, from Old English witnes knowledge,
48  *	    testimony, witness, from 2wit
49  *	Date: before 12th century
50  *	1 : attestation of a fact or event : TESTIMONY
51  *	2 : one that gives evidence; specifically : one who testifies in
52  *	    a cause or before a judicial tribunal
53  *	3 : one asked to be present at a transaction so as to be able to
54  *	    testify to its having taken place
55  *	4 : one who has personal knowledge of something
56  *	5 a : something serving as evidence or proof : SIGN
57  *	  b : public affirmation by word or example of usually
58  *	      religious faith or conviction <the heroic witness to divine
59  *	      life -- Pilot>
60  *	6 capitalized : a member of the Jehovah's Witnesses
61  */
62 
63 /*
64  * Special rules concerning Giant and lock orders:
65  *
66  * 1) Giant must be acquired before any other mutexes.  Stated another way,
67  *    no other mutex may be held when Giant is acquired.
68  *
69  * 2) Giant must be released when blocking on a sleepable lock.
70  *
71  * This rule is less obvious, but is a result of Giant providing the same
72  * semantics as spl().  Basically, when a thread sleeps, it must release
73  * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
74  * 2).
75  *
76  * 3) Giant may be acquired before or after sleepable locks.
77  *
78  * This rule is also not quite as obvious.  Giant may be acquired after
79  * a sleepable lock because it is a non-sleepable lock and non-sleepable
80  * locks may always be acquired while holding a sleepable lock.  The second
81  * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
82  * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
83  * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
84  * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
85  * execute.  Thus, acquiring Giant both before and after a sleepable lock
86  * will not result in a lock order reversal.
87  */
88 
89 #include <sys/cdefs.h>
90 __FBSDID("$FreeBSD$");
91 
92 #include "opt_ddb.h"
93 #include "opt_hwpmc_hooks.h"
94 #include "opt_stack.h"
95 #include "opt_witness.h"
96 
97 #include <sys/param.h>
98 #include <sys/bus.h>
99 #include <sys/kdb.h>
100 #include <sys/kernel.h>
101 #include <sys/ktr.h>
102 #include <sys/lock.h>
103 #include <sys/malloc.h>
104 #include <sys/mutex.h>
105 #include <sys/priv.h>
106 #include <sys/proc.h>
107 #include <sys/sbuf.h>
108 #include <sys/sched.h>
109 #include <sys/stack.h>
110 #include <sys/sysctl.h>
111 #include <sys/syslog.h>
112 #include <sys/systm.h>
113 
114 #ifdef DDB
115 #include <ddb/ddb.h>
116 #endif
117 
118 #include <machine/stdarg.h>
119 
120 #if !defined(DDB) && !defined(STACK)
121 #error "DDB or STACK options are required for WITNESS"
122 #endif
123 
124 /* Note that these traces do not work with KTR_ALQ. */
125 #if 0
126 #define	KTR_WITNESS	KTR_SUBSYS
127 #else
128 #define	KTR_WITNESS	0
129 #endif
130 
131 #define	LI_RECURSEMASK	0x0000ffff	/* Recursion depth of lock instance. */
132 #define	LI_EXCLUSIVE	0x00010000	/* Exclusive lock instance. */
133 #define	LI_NORELEASE	0x00020000	/* Lock not allowed to be released. */
134 
135 #ifndef WITNESS_COUNT
136 #define	WITNESS_COUNT 		1536
137 #endif
138 #define	WITNESS_HASH_SIZE	251	/* Prime, gives load factor < 2 */
139 #define	WITNESS_PENDLIST	(512 + (MAXCPU * 4))
140 
141 /* Allocate 256 KB of stack data space */
142 #define	WITNESS_LO_DATA_COUNT	2048
143 
144 /* Prime, gives load factor of ~2 at full load */
145 #define	WITNESS_LO_HASH_SIZE	1021
146 
147 /*
148  * XXX: This is somewhat bogus, as we assume here that at most 2048 threads
149  * will hold LOCK_NCHILDREN locks.  We handle failure ok, and we should
150  * probably be safe for the most part, but it's still a SWAG.
151  */
152 #define	LOCK_NCHILDREN	5
153 #define	LOCK_CHILDCOUNT	2048
154 
155 #define	MAX_W_NAME	64
156 
157 #define	FULLGRAPH_SBUF_SIZE	512
158 
159 /*
160  * These flags go in the witness relationship matrix and describe the
161  * relationship between any two struct witness objects.
162  */
163 #define	WITNESS_UNRELATED        0x00    /* No lock order relation. */
164 #define	WITNESS_PARENT           0x01    /* Parent, aka direct ancestor. */
165 #define	WITNESS_ANCESTOR         0x02    /* Direct or indirect ancestor. */
166 #define	WITNESS_CHILD            0x04    /* Child, aka direct descendant. */
167 #define	WITNESS_DESCENDANT       0x08    /* Direct or indirect descendant. */
168 #define	WITNESS_ANCESTOR_MASK    (WITNESS_PARENT | WITNESS_ANCESTOR)
169 #define	WITNESS_DESCENDANT_MASK  (WITNESS_CHILD | WITNESS_DESCENDANT)
170 #define	WITNESS_RELATED_MASK						\
171 	(WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
172 #define	WITNESS_REVERSAL         0x10    /* A lock order reversal has been
173 					  * observed. */
174 #define	WITNESS_RESERVED1        0x20    /* Unused flag, reserved. */
175 #define	WITNESS_RESERVED2        0x40    /* Unused flag, reserved. */
176 #define	WITNESS_LOCK_ORDER_KNOWN 0x80    /* This lock order is known. */
177 
178 /* Descendant to ancestor flags */
179 #define	WITNESS_DTOA(x)	(((x) & WITNESS_RELATED_MASK) >> 2)
180 
181 /* Ancestor to descendant flags */
182 #define	WITNESS_ATOD(x)	(((x) & WITNESS_RELATED_MASK) << 2)
183 
184 #define	WITNESS_INDEX_ASSERT(i)						\
185 	MPASS((i) > 0 && (i) <= w_max_used_index && (i) < witness_count)
186 
187 static MALLOC_DEFINE(M_WITNESS, "Witness", "Witness");
188 
189 /*
190  * Lock instances.  A lock instance is the data associated with a lock while
191  * it is held by witness.  For example, a lock instance will hold the
192  * recursion count of a lock.  Lock instances are held in lists.  Spin locks
193  * are held in a per-cpu list while sleep locks are held in per-thread list.
194  */
195 struct lock_instance {
196 	struct lock_object	*li_lock;
197 	const char		*li_file;
198 	int			li_line;
199 	u_int			li_flags;
200 };
201 
202 /*
203  * A simple list type used to build the list of locks held by a thread
204  * or CPU.  We can't simply embed the list in struct lock_object since a
205  * lock may be held by more than one thread if it is a shared lock.  Locks
206  * are added to the head of the list, so we fill up each list entry from
207  * "the back" logically.  To ease some of the arithmetic, we actually fill
208  * in each list entry the normal way (children[0] then children[1], etc.) but
209  * when we traverse the list we read children[count-1] as the first entry
210  * down to children[0] as the final entry.
211  */
212 struct lock_list_entry {
213 	struct lock_list_entry	*ll_next;
214 	struct lock_instance	ll_children[LOCK_NCHILDREN];
215 	u_int			ll_count;
216 };
217 
218 /*
219  * The main witness structure. One of these per named lock type in the system
220  * (for example, "vnode interlock").
221  */
222 struct witness {
223 	char  			w_name[MAX_W_NAME];
224 	uint32_t 		w_index;  /* Index in the relationship matrix */
225 	struct lock_class	*w_class;
226 	STAILQ_ENTRY(witness) 	w_list;		/* List of all witnesses. */
227 	STAILQ_ENTRY(witness) 	w_typelist;	/* Witnesses of a type. */
228 	struct witness		*w_hash_next; /* Linked list in hash buckets. */
229 	const char		*w_file; /* File where last acquired */
230 	uint32_t 		w_line; /* Line where last acquired */
231 	uint32_t 		w_refcount;
232 	uint16_t 		w_num_ancestors; /* direct/indirect
233 						  * ancestor count */
234 	uint16_t 		w_num_descendants; /* direct/indirect
235 						    * descendant count */
236 	int16_t 		w_ddb_level;
237 	unsigned		w_displayed:1;
238 	unsigned		w_reversed:1;
239 };
240 
241 STAILQ_HEAD(witness_list, witness);
242 
243 /*
244  * The witness hash table. Keys are witness names (const char *), elements are
245  * witness objects (struct witness *).
246  */
247 struct witness_hash {
248 	struct witness	*wh_array[WITNESS_HASH_SIZE];
249 	uint32_t	wh_size;
250 	uint32_t	wh_count;
251 };
252 
253 /*
254  * Key type for the lock order data hash table.
255  */
256 struct witness_lock_order_key {
257 	uint16_t	from;
258 	uint16_t	to;
259 };
260 
261 struct witness_lock_order_data {
262 	struct stack			wlod_stack;
263 	struct witness_lock_order_key	wlod_key;
264 	struct witness_lock_order_data	*wlod_next;
265 };
266 
267 /*
268  * The witness lock order data hash table. Keys are witness index tuples
269  * (struct witness_lock_order_key), elements are lock order data objects
270  * (struct witness_lock_order_data).
271  */
272 struct witness_lock_order_hash {
273 	struct witness_lock_order_data	*wloh_array[WITNESS_LO_HASH_SIZE];
274 	u_int	wloh_size;
275 	u_int	wloh_count;
276 };
277 
278 struct witness_blessed {
279 	const char	*b_lock1;
280 	const char	*b_lock2;
281 };
282 
283 struct witness_pendhelp {
284 	const char		*wh_type;
285 	struct lock_object	*wh_lock;
286 };
287 
288 struct witness_order_list_entry {
289 	const char		*w_name;
290 	struct lock_class	*w_class;
291 };
292 
293 /*
294  * Returns 0 if one of the locks is a spin lock and the other is not.
295  * Returns 1 otherwise.
296  */
297 static __inline int
298 witness_lock_type_equal(struct witness *w1, struct witness *w2)
299 {
300 
301 	return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
302 		(w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
303 }
304 
305 static __inline int
306 witness_lock_order_key_equal(const struct witness_lock_order_key *a,
307     const struct witness_lock_order_key *b)
308 {
309 
310 	return (a->from == b->from && a->to == b->to);
311 }
312 
313 static int	_isitmyx(struct witness *w1, struct witness *w2, int rmask,
314 		    const char *fname);
315 static void	adopt(struct witness *parent, struct witness *child);
316 static int	blessed(struct witness *, struct witness *);
317 static void	depart(struct witness *w);
318 static struct witness	*enroll(const char *description,
319 			    struct lock_class *lock_class);
320 static struct lock_instance	*find_instance(struct lock_list_entry *list,
321 				    const struct lock_object *lock);
322 static int	isitmychild(struct witness *parent, struct witness *child);
323 static int	isitmydescendant(struct witness *parent, struct witness *child);
324 static void	itismychild(struct witness *parent, struct witness *child);
325 static int	sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS);
326 static int	sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
327 static int	sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS);
328 static int	sysctl_debug_witness_channel(SYSCTL_HANDLER_ARGS);
329 static void	witness_add_fullgraph(struct sbuf *sb, struct witness *parent);
330 #ifdef DDB
331 static void	witness_ddb_compute_levels(void);
332 static void	witness_ddb_display(int(*)(const char *fmt, ...));
333 static void	witness_ddb_display_descendants(int(*)(const char *fmt, ...),
334 		    struct witness *, int indent);
335 static void	witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
336 		    struct witness_list *list);
337 static void	witness_ddb_level_descendants(struct witness *parent, int l);
338 static void	witness_ddb_list(struct thread *td);
339 #endif
340 static void	witness_debugger(int cond, const char *msg);
341 static void	witness_free(struct witness *m);
342 static struct witness	*witness_get(void);
343 static uint32_t	witness_hash_djb2(const uint8_t *key, uint32_t size);
344 static struct witness	*witness_hash_get(const char *key);
345 static void	witness_hash_put(struct witness *w);
346 static void	witness_init_hash_tables(void);
347 static void	witness_increment_graph_generation(void);
348 static void	witness_lock_list_free(struct lock_list_entry *lle);
349 static struct lock_list_entry	*witness_lock_list_get(void);
350 static int	witness_lock_order_add(struct witness *parent,
351 		    struct witness *child);
352 static int	witness_lock_order_check(struct witness *parent,
353 		    struct witness *child);
354 static struct witness_lock_order_data	*witness_lock_order_get(
355 					    struct witness *parent,
356 					    struct witness *child);
357 static void	witness_list_lock(struct lock_instance *instance,
358 		    int (*prnt)(const char *fmt, ...));
359 static int	witness_output(const char *fmt, ...) __printflike(1, 2);
360 static int	witness_voutput(const char *fmt, va_list ap) __printflike(1, 0);
361 static void	witness_setflag(struct lock_object *lock, int flag, int set);
362 
363 static SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL,
364     "Witness Locking");
365 
366 /*
367  * If set to 0, lock order checking is disabled.  If set to -1,
368  * witness is completely disabled.  Otherwise witness performs full
369  * lock order checking for all locks.  At runtime, lock order checking
370  * may be toggled.  However, witness cannot be reenabled once it is
371  * completely disabled.
372  */
373 static int witness_watch = 1;
374 SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RWTUN | CTLTYPE_INT, NULL, 0,
375     sysctl_debug_witness_watch, "I", "witness is watching lock operations");
376 
377 #ifdef KDB
378 /*
379  * When KDB is enabled and witness_kdb is 1, it will cause the system
380  * to drop into kdebug() when:
381  *	- a lock hierarchy violation occurs
382  *	- locks are held when going to sleep.
383  */
384 #ifdef WITNESS_KDB
385 int	witness_kdb = 1;
386 #else
387 int	witness_kdb = 0;
388 #endif
389 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RWTUN, &witness_kdb, 0, "");
390 #endif /* KDB */
391 
392 #if defined(DDB) || defined(KDB)
393 /*
394  * When DDB or KDB is enabled and witness_trace is 1, it will cause the system
395  * to print a stack trace:
396  *	- a lock hierarchy violation occurs
397  *	- locks are held when going to sleep.
398  */
399 int	witness_trace = 1;
400 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RWTUN, &witness_trace, 0, "");
401 #endif /* DDB || KDB */
402 
403 #ifdef WITNESS_SKIPSPIN
404 int	witness_skipspin = 1;
405 #else
406 int	witness_skipspin = 0;
407 #endif
408 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin, 0, "");
409 
410 int badstack_sbuf_size;
411 
412 int witness_count = WITNESS_COUNT;
413 SYSCTL_INT(_debug_witness, OID_AUTO, witness_count, CTLFLAG_RDTUN,
414     &witness_count, 0, "");
415 
416 /*
417  * Output channel for witness messages.  By default we print to the console.
418  */
419 enum witness_channel {
420 	WITNESS_CONSOLE,
421 	WITNESS_LOG,
422 	WITNESS_NONE,
423 };
424 
425 static enum witness_channel witness_channel = WITNESS_CONSOLE;
426 SYSCTL_PROC(_debug_witness, OID_AUTO, output_channel, CTLTYPE_STRING |
427     CTLFLAG_RWTUN, NULL, 0, sysctl_debug_witness_channel, "A",
428     "Output channel for warnings");
429 
430 /*
431  * Call this to print out the relations between locks.
432  */
433 SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD,
434     NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs");
435 
436 /*
437  * Call this to print out the witness faulty stacks.
438  */
439 SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD,
440     NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks");
441 
442 static struct mtx w_mtx;
443 
444 /* w_list */
445 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
446 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
447 
448 /* w_typelist */
449 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
450 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
451 
452 /* lock list */
453 static struct lock_list_entry *w_lock_list_free = NULL;
454 static struct witness_pendhelp pending_locks[WITNESS_PENDLIST];
455 static u_int pending_cnt;
456 
457 static int w_free_cnt, w_spin_cnt, w_sleep_cnt;
458 SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
459 SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
460 SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
461     "");
462 
463 static struct witness *w_data;
464 static uint8_t **w_rmatrix;
465 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
466 static struct witness_hash w_hash;	/* The witness hash table. */
467 
468 /* The lock order data hash */
469 static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT];
470 static struct witness_lock_order_data *w_lofree = NULL;
471 static struct witness_lock_order_hash w_lohash;
472 static int w_max_used_index = 0;
473 static unsigned int w_generation = 0;
474 static const char w_notrunning[] = "Witness not running\n";
475 static const char w_stillcold[] = "Witness is still cold\n";
476 #ifdef __i386__
477 static const char w_notallowed[] = "The sysctl is disabled on the arch\n";
478 #endif
479 
480 static struct witness_order_list_entry order_lists[] = {
481 	/*
482 	 * sx locks
483 	 */
484 	{ "proctree", &lock_class_sx },
485 	{ "allproc", &lock_class_sx },
486 	{ "allprison", &lock_class_sx },
487 	{ NULL, NULL },
488 	/*
489 	 * Various mutexes
490 	 */
491 	{ "Giant", &lock_class_mtx_sleep },
492 	{ "pipe mutex", &lock_class_mtx_sleep },
493 	{ "sigio lock", &lock_class_mtx_sleep },
494 	{ "process group", &lock_class_mtx_sleep },
495 #ifdef	HWPMC_HOOKS
496 	{ "pmc-sleep", &lock_class_mtx_sleep },
497 #endif
498 	{ "process lock", &lock_class_mtx_sleep },
499 	{ "session", &lock_class_mtx_sleep },
500 	{ "uidinfo hash", &lock_class_rw },
501 	{ "time lock", &lock_class_mtx_sleep },
502 	{ NULL, NULL },
503 	/*
504 	 * umtx
505 	 */
506 	{ "umtx lock", &lock_class_mtx_sleep },
507 	{ NULL, NULL },
508 	/*
509 	 * Sockets
510 	 */
511 	{ "accept", &lock_class_mtx_sleep },
512 	{ "so_snd", &lock_class_mtx_sleep },
513 	{ "so_rcv", &lock_class_mtx_sleep },
514 	{ "sellck", &lock_class_mtx_sleep },
515 	{ NULL, NULL },
516 	/*
517 	 * Routing
518 	 */
519 	{ "so_rcv", &lock_class_mtx_sleep },
520 	{ "radix node head", &lock_class_rm },
521 	{ "rtentry", &lock_class_mtx_sleep },
522 	{ "ifaddr", &lock_class_mtx_sleep },
523 	{ NULL, NULL },
524 	/*
525 	 * IPv4 multicast:
526 	 * protocol locks before interface locks, after UDP locks.
527 	 */
528 	{ "in_multi_sx", &lock_class_sx },
529 	{ "udpinp", &lock_class_rw },
530 	{ "in_multi_list_mtx", &lock_class_mtx_sleep },
531 	{ "igmp_mtx", &lock_class_mtx_sleep },
532 	{ "ifnet_rw", &lock_class_rw },
533 	{ "if_addr_lock", &lock_class_mtx_sleep },
534 	{ NULL, NULL },
535 	/*
536 	 * IPv6 multicast:
537 	 * protocol locks before interface locks, after UDP locks.
538 	 */
539 	{ "in6_multi_sx", &lock_class_sx },
540 	{ "udpinp", &lock_class_rw },
541 	{ "in6_multi_list_mtx", &lock_class_mtx_sleep },
542 	{ "mld_mtx", &lock_class_mtx_sleep },
543 	{ "ifnet_rw", &lock_class_rw },
544 	{ "if_addr_lock", &lock_class_mtx_sleep },
545 	{ NULL, NULL },
546 	/*
547 	 * UNIX Domain Sockets
548 	 */
549 	{ "unp_link_rwlock", &lock_class_rw },
550 	{ "unp_list_lock", &lock_class_mtx_sleep },
551 	{ "unp", &lock_class_mtx_sleep },
552 	{ "so_snd", &lock_class_mtx_sleep },
553 	{ NULL, NULL },
554 	/*
555 	 * UDP/IP
556 	 */
557 	{ "udp", &lock_class_mtx_sleep },
558 	{ "udpinp", &lock_class_rw },
559 	{ "so_snd", &lock_class_mtx_sleep },
560 	{ NULL, NULL },
561 	/*
562 	 * TCP/IP
563 	 */
564 	{ "tcp", &lock_class_mtx_sleep },
565 	{ "tcpinp", &lock_class_rw },
566 	{ "so_snd", &lock_class_mtx_sleep },
567 	{ NULL, NULL },
568 	/*
569 	 * BPF
570 	 */
571 	{ "bpf global lock", &lock_class_sx },
572 	{ "bpf cdev lock", &lock_class_mtx_sleep },
573 	{ NULL, NULL },
574 	/*
575 	 * NFS server
576 	 */
577 	{ "nfsd_mtx", &lock_class_mtx_sleep },
578 	{ "so_snd", &lock_class_mtx_sleep },
579 	{ NULL, NULL },
580 
581 	/*
582 	 * IEEE 802.11
583 	 */
584 	{ "802.11 com lock", &lock_class_mtx_sleep},
585 	{ NULL, NULL },
586 	/*
587 	 * Network drivers
588 	 */
589 	{ "network driver", &lock_class_mtx_sleep},
590 	{ NULL, NULL },
591 
592 	/*
593 	 * Netgraph
594 	 */
595 	{ "ng_node", &lock_class_mtx_sleep },
596 	{ "ng_worklist", &lock_class_mtx_sleep },
597 	{ NULL, NULL },
598 	/*
599 	 * CDEV
600 	 */
601 	{ "vm map (system)", &lock_class_mtx_sleep },
602 	{ "vnode interlock", &lock_class_mtx_sleep },
603 	{ "cdev", &lock_class_mtx_sleep },
604 	{ NULL, NULL },
605 	/*
606 	 * VM
607 	 */
608 	{ "vm map (user)", &lock_class_sx },
609 	{ "vm object", &lock_class_rw },
610 	{ "vm page", &lock_class_mtx_sleep },
611 	{ "pmap pv global", &lock_class_rw },
612 	{ "pmap", &lock_class_mtx_sleep },
613 	{ "pmap pv list", &lock_class_rw },
614 	{ "vm page free queue", &lock_class_mtx_sleep },
615 	{ "vm pagequeue", &lock_class_mtx_sleep },
616 	{ NULL, NULL },
617 	/*
618 	 * kqueue/VFS interaction
619 	 */
620 	{ "kqueue", &lock_class_mtx_sleep },
621 	{ "struct mount mtx", &lock_class_mtx_sleep },
622 	{ "vnode interlock", &lock_class_mtx_sleep },
623 	{ NULL, NULL },
624 	/*
625 	 * VFS namecache
626 	 */
627 	{ "ncvn", &lock_class_mtx_sleep },
628 	{ "ncbuc", &lock_class_rw },
629 	{ "vnode interlock", &lock_class_mtx_sleep },
630 	{ "ncneg", &lock_class_mtx_sleep },
631 	{ NULL, NULL },
632 	/*
633 	 * ZFS locking
634 	 */
635 	{ "dn->dn_mtx", &lock_class_sx },
636 	{ "dr->dt.di.dr_mtx", &lock_class_sx },
637 	{ "db->db_mtx", &lock_class_sx },
638 	{ NULL, NULL },
639 	/*
640 	 * TCP log locks
641 	 */
642 	{ "TCP ID tree", &lock_class_rw },
643 	{ "tcp log id bucket", &lock_class_mtx_sleep },
644 	{ "tcpinp", &lock_class_rw },
645 	{ "TCP log expireq", &lock_class_mtx_sleep },
646 	{ NULL, NULL },
647 	/*
648 	 * spin locks
649 	 */
650 #ifdef SMP
651 	{ "ap boot", &lock_class_mtx_spin },
652 #endif
653 	{ "rm.mutex_mtx", &lock_class_mtx_spin },
654 	{ "sio", &lock_class_mtx_spin },
655 #ifdef __i386__
656 	{ "cy", &lock_class_mtx_spin },
657 #endif
658 #ifdef __sparc64__
659 	{ "pcib_mtx", &lock_class_mtx_spin },
660 	{ "rtc_mtx", &lock_class_mtx_spin },
661 #endif
662 	{ "scc_hwmtx", &lock_class_mtx_spin },
663 	{ "uart_hwmtx", &lock_class_mtx_spin },
664 	{ "fast_taskqueue", &lock_class_mtx_spin },
665 	{ "intr table", &lock_class_mtx_spin },
666 	{ "process slock", &lock_class_mtx_spin },
667 	{ "syscons video lock", &lock_class_mtx_spin },
668 	{ "sleepq chain", &lock_class_mtx_spin },
669 	{ "rm_spinlock", &lock_class_mtx_spin },
670 	{ "turnstile chain", &lock_class_mtx_spin },
671 	{ "turnstile lock", &lock_class_mtx_spin },
672 	{ "sched lock", &lock_class_mtx_spin },
673 	{ "td_contested", &lock_class_mtx_spin },
674 	{ "callout", &lock_class_mtx_spin },
675 	{ "entropy harvest mutex", &lock_class_mtx_spin },
676 #ifdef SMP
677 	{ "smp rendezvous", &lock_class_mtx_spin },
678 #endif
679 #ifdef __powerpc__
680 	{ "tlb0", &lock_class_mtx_spin },
681 #endif
682 	{ NULL, NULL },
683 	{ "sched lock", &lock_class_mtx_spin },
684 #ifdef	HWPMC_HOOKS
685 	{ "pmc-per-proc", &lock_class_mtx_spin },
686 #endif
687 	{ NULL, NULL },
688 	/*
689 	 * leaf locks
690 	 */
691 	{ "intrcnt", &lock_class_mtx_spin },
692 	{ "icu", &lock_class_mtx_spin },
693 #if defined(SMP) && defined(__sparc64__)
694 	{ "ipi", &lock_class_mtx_spin },
695 #endif
696 #ifdef __i386__
697 	{ "allpmaps", &lock_class_mtx_spin },
698 	{ "descriptor tables", &lock_class_mtx_spin },
699 #endif
700 	{ "clk", &lock_class_mtx_spin },
701 	{ "cpuset", &lock_class_mtx_spin },
702 	{ "mprof lock", &lock_class_mtx_spin },
703 	{ "zombie lock", &lock_class_mtx_spin },
704 	{ "ALD Queue", &lock_class_mtx_spin },
705 #if defined(__i386__) || defined(__amd64__)
706 	{ "pcicfg", &lock_class_mtx_spin },
707 	{ "NDIS thread lock", &lock_class_mtx_spin },
708 #endif
709 	{ "tw_osl_io_lock", &lock_class_mtx_spin },
710 	{ "tw_osl_q_lock", &lock_class_mtx_spin },
711 	{ "tw_cl_io_lock", &lock_class_mtx_spin },
712 	{ "tw_cl_intr_lock", &lock_class_mtx_spin },
713 	{ "tw_cl_gen_lock", &lock_class_mtx_spin },
714 #ifdef	HWPMC_HOOKS
715 	{ "pmc-leaf", &lock_class_mtx_spin },
716 #endif
717 	{ "blocked lock", &lock_class_mtx_spin },
718 	{ NULL, NULL },
719 	{ NULL, NULL }
720 };
721 
722 /*
723  * Pairs of locks which have been blessed.  Witness does not complain about
724  * order problems with blessed lock pairs.  Please do not add an entry to the
725  * table without an explanatory comment.
726  */
727 static struct witness_blessed blessed_list[] = {
728 	/*
729 	 * See the comment in ufs_dirhash.c.  Basically, a vnode lock serializes
730 	 * both lock orders, so a deadlock cannot happen as a result of this
731 	 * LOR.
732 	 */
733 	{ "dirhash",	"bufwait" },
734 
735 	/*
736 	 * A UFS vnode may be locked in vget() while a buffer belonging to the
737 	 * parent directory vnode is locked.
738 	 */
739 	{ "ufs",	"bufwait" },
740 };
741 
742 /*
743  * This global is set to 0 once it becomes safe to use the witness code.
744  */
745 static int witness_cold = 1;
746 
747 /*
748  * This global is set to 1 once the static lock orders have been enrolled
749  * so that a warning can be issued for any spin locks enrolled later.
750  */
751 static int witness_spin_warn = 0;
752 
753 /* Trim useless garbage from filenames. */
754 static const char *
755 fixup_filename(const char *file)
756 {
757 
758 	if (file == NULL)
759 		return (NULL);
760 	while (strncmp(file, "../", 3) == 0)
761 		file += 3;
762 	return (file);
763 }
764 
765 /*
766  * Calculate the size of early witness structures.
767  */
768 int
769 witness_startup_count(void)
770 {
771 	int sz;
772 
773 	sz = sizeof(struct witness) * witness_count;
774 	sz += sizeof(*w_rmatrix) * (witness_count + 1);
775 	sz += sizeof(*w_rmatrix[0]) * (witness_count + 1) *
776 	    (witness_count + 1);
777 
778 	return (sz);
779 }
780 
781 /*
782  * The WITNESS-enabled diagnostic code.  Note that the witness code does
783  * assume that the early boot is single-threaded at least until after this
784  * routine is completed.
785  */
786 void
787 witness_startup(void *mem)
788 {
789 	struct lock_object *lock;
790 	struct witness_order_list_entry *order;
791 	struct witness *w, *w1;
792 	uintptr_t p;
793 	int i;
794 
795 	p = (uintptr_t)mem;
796 	w_data = (void *)p;
797 	p += sizeof(struct witness) * witness_count;
798 
799 	w_rmatrix = (void *)p;
800 	p += sizeof(*w_rmatrix) * (witness_count + 1);
801 
802 	for (i = 0; i < witness_count + 1; i++) {
803 		w_rmatrix[i] = (void *)p;
804 		p += sizeof(*w_rmatrix[i]) * (witness_count + 1);
805 	}
806 	badstack_sbuf_size = witness_count * 256;
807 
808 	/*
809 	 * We have to release Giant before initializing its witness
810 	 * structure so that WITNESS doesn't get confused.
811 	 */
812 	mtx_unlock(&Giant);
813 	mtx_assert(&Giant, MA_NOTOWNED);
814 
815 	CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
816 	mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
817 	    MTX_NOWITNESS | MTX_NOPROFILE);
818 	for (i = witness_count - 1; i >= 0; i--) {
819 		w = &w_data[i];
820 		memset(w, 0, sizeof(*w));
821 		w_data[i].w_index = i;	/* Witness index never changes. */
822 		witness_free(w);
823 	}
824 	KASSERT(STAILQ_FIRST(&w_free)->w_index == 0,
825 	    ("%s: Invalid list of free witness objects", __func__));
826 
827 	/* Witness with index 0 is not used to aid in debugging. */
828 	STAILQ_REMOVE_HEAD(&w_free, w_list);
829 	w_free_cnt--;
830 
831 	for (i = 0; i < witness_count; i++) {
832 		memset(w_rmatrix[i], 0, sizeof(*w_rmatrix[i]) *
833 		    (witness_count + 1));
834 	}
835 
836 	for (i = 0; i < LOCK_CHILDCOUNT; i++)
837 		witness_lock_list_free(&w_locklistdata[i]);
838 	witness_init_hash_tables();
839 
840 	/* First add in all the specified order lists. */
841 	for (order = order_lists; order->w_name != NULL; order++) {
842 		w = enroll(order->w_name, order->w_class);
843 		if (w == NULL)
844 			continue;
845 		w->w_file = "order list";
846 		for (order++; order->w_name != NULL; order++) {
847 			w1 = enroll(order->w_name, order->w_class);
848 			if (w1 == NULL)
849 				continue;
850 			w1->w_file = "order list";
851 			itismychild(w, w1);
852 			w = w1;
853 		}
854 	}
855 	witness_spin_warn = 1;
856 
857 	/* Iterate through all locks and add them to witness. */
858 	for (i = 0; pending_locks[i].wh_lock != NULL; i++) {
859 		lock = pending_locks[i].wh_lock;
860 		KASSERT(lock->lo_flags & LO_WITNESS,
861 		    ("%s: lock %s is on pending list but not LO_WITNESS",
862 		    __func__, lock->lo_name));
863 		lock->lo_witness = enroll(pending_locks[i].wh_type,
864 		    LOCK_CLASS(lock));
865 	}
866 
867 	/* Mark the witness code as being ready for use. */
868 	witness_cold = 0;
869 
870 	mtx_lock(&Giant);
871 }
872 
873 void
874 witness_init(struct lock_object *lock, const char *type)
875 {
876 	struct lock_class *class;
877 
878 	/* Various sanity checks. */
879 	class = LOCK_CLASS(lock);
880 	if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
881 	    (class->lc_flags & LC_RECURSABLE) == 0)
882 		kassert_panic("%s: lock (%s) %s can not be recursable",
883 		    __func__, class->lc_name, lock->lo_name);
884 	if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
885 	    (class->lc_flags & LC_SLEEPABLE) == 0)
886 		kassert_panic("%s: lock (%s) %s can not be sleepable",
887 		    __func__, class->lc_name, lock->lo_name);
888 	if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
889 	    (class->lc_flags & LC_UPGRADABLE) == 0)
890 		kassert_panic("%s: lock (%s) %s can not be upgradable",
891 		    __func__, class->lc_name, lock->lo_name);
892 
893 	/*
894 	 * If we shouldn't watch this lock, then just clear lo_witness.
895 	 * Otherwise, if witness_cold is set, then it is too early to
896 	 * enroll this lock, so defer it to witness_initialize() by adding
897 	 * it to the pending_locks list.  If it is not too early, then enroll
898 	 * the lock now.
899 	 */
900 	if (witness_watch < 1 || panicstr != NULL ||
901 	    (lock->lo_flags & LO_WITNESS) == 0)
902 		lock->lo_witness = NULL;
903 	else if (witness_cold) {
904 		pending_locks[pending_cnt].wh_lock = lock;
905 		pending_locks[pending_cnt++].wh_type = type;
906 		if (pending_cnt > WITNESS_PENDLIST)
907 			panic("%s: pending locks list is too small, "
908 			    "increase WITNESS_PENDLIST\n",
909 			    __func__);
910 	} else
911 		lock->lo_witness = enroll(type, class);
912 }
913 
914 void
915 witness_destroy(struct lock_object *lock)
916 {
917 	struct lock_class *class;
918 	struct witness *w;
919 
920 	class = LOCK_CLASS(lock);
921 
922 	if (witness_cold)
923 		panic("lock (%s) %s destroyed while witness_cold",
924 		    class->lc_name, lock->lo_name);
925 
926 	/* XXX: need to verify that no one holds the lock */
927 	if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL)
928 		return;
929 	w = lock->lo_witness;
930 
931 	mtx_lock_spin(&w_mtx);
932 	MPASS(w->w_refcount > 0);
933 	w->w_refcount--;
934 
935 	if (w->w_refcount == 0)
936 		depart(w);
937 	mtx_unlock_spin(&w_mtx);
938 }
939 
940 #ifdef DDB
941 static void
942 witness_ddb_compute_levels(void)
943 {
944 	struct witness *w;
945 
946 	/*
947 	 * First clear all levels.
948 	 */
949 	STAILQ_FOREACH(w, &w_all, w_list)
950 		w->w_ddb_level = -1;
951 
952 	/*
953 	 * Look for locks with no parents and level all their descendants.
954 	 */
955 	STAILQ_FOREACH(w, &w_all, w_list) {
956 
957 		/* If the witness has ancestors (is not a root), skip it. */
958 		if (w->w_num_ancestors > 0)
959 			continue;
960 		witness_ddb_level_descendants(w, 0);
961 	}
962 }
963 
964 static void
965 witness_ddb_level_descendants(struct witness *w, int l)
966 {
967 	int i;
968 
969 	if (w->w_ddb_level >= l)
970 		return;
971 
972 	w->w_ddb_level = l;
973 	l++;
974 
975 	for (i = 1; i <= w_max_used_index; i++) {
976 		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
977 			witness_ddb_level_descendants(&w_data[i], l);
978 	}
979 }
980 
981 static void
982 witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...),
983     struct witness *w, int indent)
984 {
985 	int i;
986 
987  	for (i = 0; i < indent; i++)
988  		prnt(" ");
989 	prnt("%s (type: %s, depth: %d, active refs: %d)",
990 	     w->w_name, w->w_class->lc_name,
991 	     w->w_ddb_level, w->w_refcount);
992  	if (w->w_displayed) {
993  		prnt(" -- (already displayed)\n");
994  		return;
995  	}
996  	w->w_displayed = 1;
997 	if (w->w_file != NULL && w->w_line != 0)
998 		prnt(" -- last acquired @ %s:%d\n", fixup_filename(w->w_file),
999 		    w->w_line);
1000 	else
1001 		prnt(" -- never acquired\n");
1002 	indent++;
1003 	WITNESS_INDEX_ASSERT(w->w_index);
1004 	for (i = 1; i <= w_max_used_index; i++) {
1005 		if (db_pager_quit)
1006 			return;
1007 		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
1008 			witness_ddb_display_descendants(prnt, &w_data[i],
1009 			    indent);
1010 	}
1011 }
1012 
1013 static void
1014 witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
1015     struct witness_list *list)
1016 {
1017 	struct witness *w;
1018 
1019 	STAILQ_FOREACH(w, list, w_typelist) {
1020 		if (w->w_file == NULL || w->w_ddb_level > 0)
1021 			continue;
1022 
1023 		/* This lock has no anscestors - display its descendants. */
1024 		witness_ddb_display_descendants(prnt, w, 0);
1025 		if (db_pager_quit)
1026 			return;
1027 	}
1028 }
1029 
1030 static void
1031 witness_ddb_display(int(*prnt)(const char *fmt, ...))
1032 {
1033 	struct witness *w;
1034 
1035 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1036 	witness_ddb_compute_levels();
1037 
1038 	/* Clear all the displayed flags. */
1039 	STAILQ_FOREACH(w, &w_all, w_list)
1040 		w->w_displayed = 0;
1041 
1042 	/*
1043 	 * First, handle sleep locks which have been acquired at least
1044 	 * once.
1045 	 */
1046 	prnt("Sleep locks:\n");
1047 	witness_ddb_display_list(prnt, &w_sleep);
1048 	if (db_pager_quit)
1049 		return;
1050 
1051 	/*
1052 	 * Now do spin locks which have been acquired at least once.
1053 	 */
1054 	prnt("\nSpin locks:\n");
1055 	witness_ddb_display_list(prnt, &w_spin);
1056 	if (db_pager_quit)
1057 		return;
1058 
1059 	/*
1060 	 * Finally, any locks which have not been acquired yet.
1061 	 */
1062 	prnt("\nLocks which were never acquired:\n");
1063 	STAILQ_FOREACH(w, &w_all, w_list) {
1064 		if (w->w_file != NULL || w->w_refcount == 0)
1065 			continue;
1066 		prnt("%s (type: %s, depth: %d)\n", w->w_name,
1067 		    w->w_class->lc_name, w->w_ddb_level);
1068 		if (db_pager_quit)
1069 			return;
1070 	}
1071 }
1072 #endif /* DDB */
1073 
1074 int
1075 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
1076 {
1077 
1078 	if (witness_watch == -1 || panicstr != NULL)
1079 		return (0);
1080 
1081 	/* Require locks that witness knows about. */
1082 	if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
1083 	    lock2->lo_witness == NULL)
1084 		return (EINVAL);
1085 
1086 	mtx_assert(&w_mtx, MA_NOTOWNED);
1087 	mtx_lock_spin(&w_mtx);
1088 
1089 	/*
1090 	 * If we already have either an explicit or implied lock order that
1091 	 * is the other way around, then return an error.
1092 	 */
1093 	if (witness_watch &&
1094 	    isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
1095 		mtx_unlock_spin(&w_mtx);
1096 		return (EDOOFUS);
1097 	}
1098 
1099 	/* Try to add the new order. */
1100 	CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1101 	    lock2->lo_witness->w_name, lock1->lo_witness->w_name);
1102 	itismychild(lock1->lo_witness, lock2->lo_witness);
1103 	mtx_unlock_spin(&w_mtx);
1104 	return (0);
1105 }
1106 
1107 void
1108 witness_checkorder(struct lock_object *lock, int flags, const char *file,
1109     int line, struct lock_object *interlock)
1110 {
1111 	struct lock_list_entry *lock_list, *lle;
1112 	struct lock_instance *lock1, *lock2, *plock;
1113 	struct lock_class *class, *iclass;
1114 	struct witness *w, *w1;
1115 	struct thread *td;
1116 	int i, j;
1117 
1118 	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
1119 	    panicstr != NULL)
1120 		return;
1121 
1122 	w = lock->lo_witness;
1123 	class = LOCK_CLASS(lock);
1124 	td = curthread;
1125 
1126 	if (class->lc_flags & LC_SLEEPLOCK) {
1127 
1128 		/*
1129 		 * Since spin locks include a critical section, this check
1130 		 * implicitly enforces a lock order of all sleep locks before
1131 		 * all spin locks.
1132 		 */
1133 		if (td->td_critnest != 0 && !kdb_active)
1134 			kassert_panic("acquiring blockable sleep lock with "
1135 			    "spinlock or critical section held (%s) %s @ %s:%d",
1136 			    class->lc_name, lock->lo_name,
1137 			    fixup_filename(file), line);
1138 
1139 		/*
1140 		 * If this is the first lock acquired then just return as
1141 		 * no order checking is needed.
1142 		 */
1143 		lock_list = td->td_sleeplocks;
1144 		if (lock_list == NULL || lock_list->ll_count == 0)
1145 			return;
1146 	} else {
1147 
1148 		/*
1149 		 * If this is the first lock, just return as no order
1150 		 * checking is needed.  Avoid problems with thread
1151 		 * migration pinning the thread while checking if
1152 		 * spinlocks are held.  If at least one spinlock is held
1153 		 * the thread is in a safe path and it is allowed to
1154 		 * unpin it.
1155 		 */
1156 		sched_pin();
1157 		lock_list = PCPU_GET(spinlocks);
1158 		if (lock_list == NULL || lock_list->ll_count == 0) {
1159 			sched_unpin();
1160 			return;
1161 		}
1162 		sched_unpin();
1163 	}
1164 
1165 	/*
1166 	 * Check to see if we are recursing on a lock we already own.  If
1167 	 * so, make sure that we don't mismatch exclusive and shared lock
1168 	 * acquires.
1169 	 */
1170 	lock1 = find_instance(lock_list, lock);
1171 	if (lock1 != NULL) {
1172 		if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
1173 		    (flags & LOP_EXCLUSIVE) == 0) {
1174 			witness_output("shared lock of (%s) %s @ %s:%d\n",
1175 			    class->lc_name, lock->lo_name,
1176 			    fixup_filename(file), line);
1177 			witness_output("while exclusively locked from %s:%d\n",
1178 			    fixup_filename(lock1->li_file), lock1->li_line);
1179 			kassert_panic("excl->share");
1180 		}
1181 		if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
1182 		    (flags & LOP_EXCLUSIVE) != 0) {
1183 			witness_output("exclusive lock of (%s) %s @ %s:%d\n",
1184 			    class->lc_name, lock->lo_name,
1185 			    fixup_filename(file), line);
1186 			witness_output("while share locked from %s:%d\n",
1187 			    fixup_filename(lock1->li_file), lock1->li_line);
1188 			kassert_panic("share->excl");
1189 		}
1190 		return;
1191 	}
1192 
1193 	/* Warn if the interlock is not locked exactly once. */
1194 	if (interlock != NULL) {
1195 		iclass = LOCK_CLASS(interlock);
1196 		lock1 = find_instance(lock_list, interlock);
1197 		if (lock1 == NULL)
1198 			kassert_panic("interlock (%s) %s not locked @ %s:%d",
1199 			    iclass->lc_name, interlock->lo_name,
1200 			    fixup_filename(file), line);
1201 		else if ((lock1->li_flags & LI_RECURSEMASK) != 0)
1202 			kassert_panic("interlock (%s) %s recursed @ %s:%d",
1203 			    iclass->lc_name, interlock->lo_name,
1204 			    fixup_filename(file), line);
1205 	}
1206 
1207 	/*
1208 	 * Find the previously acquired lock, but ignore interlocks.
1209 	 */
1210 	plock = &lock_list->ll_children[lock_list->ll_count - 1];
1211 	if (interlock != NULL && plock->li_lock == interlock) {
1212 		if (lock_list->ll_count > 1)
1213 			plock =
1214 			    &lock_list->ll_children[lock_list->ll_count - 2];
1215 		else {
1216 			lle = lock_list->ll_next;
1217 
1218 			/*
1219 			 * The interlock is the only lock we hold, so
1220 			 * simply return.
1221 			 */
1222 			if (lle == NULL)
1223 				return;
1224 			plock = &lle->ll_children[lle->ll_count - 1];
1225 		}
1226 	}
1227 
1228 	/*
1229 	 * Try to perform most checks without a lock.  If this succeeds we
1230 	 * can skip acquiring the lock and return success.  Otherwise we redo
1231 	 * the check with the lock held to handle races with concurrent updates.
1232 	 */
1233 	w1 = plock->li_lock->lo_witness;
1234 	if (witness_lock_order_check(w1, w))
1235 		return;
1236 
1237 	mtx_lock_spin(&w_mtx);
1238 	if (witness_lock_order_check(w1, w)) {
1239 		mtx_unlock_spin(&w_mtx);
1240 		return;
1241 	}
1242 	witness_lock_order_add(w1, w);
1243 
1244 	/*
1245 	 * Check for duplicate locks of the same type.  Note that we only
1246 	 * have to check for this on the last lock we just acquired.  Any
1247 	 * other cases will be caught as lock order violations.
1248 	 */
1249 	if (w1 == w) {
1250 		i = w->w_index;
1251 		if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) &&
1252 		    !(w_rmatrix[i][i] & WITNESS_REVERSAL)) {
1253 		    w_rmatrix[i][i] |= WITNESS_REVERSAL;
1254 			w->w_reversed = 1;
1255 			mtx_unlock_spin(&w_mtx);
1256 			witness_output(
1257 			    "acquiring duplicate lock of same type: \"%s\"\n",
1258 			    w->w_name);
1259 			witness_output(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
1260 			    fixup_filename(plock->li_file), plock->li_line);
1261 			witness_output(" 2nd %s @ %s:%d\n", lock->lo_name,
1262 			    fixup_filename(file), line);
1263 			witness_debugger(1, __func__);
1264 		} else
1265 			mtx_unlock_spin(&w_mtx);
1266 		return;
1267 	}
1268 	mtx_assert(&w_mtx, MA_OWNED);
1269 
1270 	/*
1271 	 * If we know that the lock we are acquiring comes after
1272 	 * the lock we most recently acquired in the lock order tree,
1273 	 * then there is no need for any further checks.
1274 	 */
1275 	if (isitmychild(w1, w))
1276 		goto out;
1277 
1278 	for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
1279 		for (i = lle->ll_count - 1; i >= 0; i--, j++) {
1280 
1281 			MPASS(j < LOCK_CHILDCOUNT * LOCK_NCHILDREN);
1282 			lock1 = &lle->ll_children[i];
1283 
1284 			/*
1285 			 * Ignore the interlock.
1286 			 */
1287 			if (interlock == lock1->li_lock)
1288 				continue;
1289 
1290 			/*
1291 			 * If this lock doesn't undergo witness checking,
1292 			 * then skip it.
1293 			 */
1294 			w1 = lock1->li_lock->lo_witness;
1295 			if (w1 == NULL) {
1296 				KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
1297 				    ("lock missing witness structure"));
1298 				continue;
1299 			}
1300 
1301 			/*
1302 			 * If we are locking Giant and this is a sleepable
1303 			 * lock, then skip it.
1304 			 */
1305 			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
1306 			    lock == &Giant.lock_object)
1307 				continue;
1308 
1309 			/*
1310 			 * If we are locking a sleepable lock and this lock
1311 			 * is Giant, then skip it.
1312 			 */
1313 			if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1314 			    lock1->li_lock == &Giant.lock_object)
1315 				continue;
1316 
1317 			/*
1318 			 * If we are locking a sleepable lock and this lock
1319 			 * isn't sleepable, we want to treat it as a lock
1320 			 * order violation to enfore a general lock order of
1321 			 * sleepable locks before non-sleepable locks.
1322 			 */
1323 			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1324 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1325 				goto reversal;
1326 
1327 			/*
1328 			 * If we are locking Giant and this is a non-sleepable
1329 			 * lock, then treat it as a reversal.
1330 			 */
1331 			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
1332 			    lock == &Giant.lock_object)
1333 				goto reversal;
1334 
1335 			/*
1336 			 * Check the lock order hierarchy for a reveresal.
1337 			 */
1338 			if (!isitmydescendant(w, w1))
1339 				continue;
1340 		reversal:
1341 
1342 			/*
1343 			 * We have a lock order violation, check to see if it
1344 			 * is allowed or has already been yelled about.
1345 			 */
1346 
1347 			/* Bail if this violation is known */
1348 			if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
1349 				goto out;
1350 
1351 			/* Record this as a violation */
1352 			w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL;
1353 			w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL;
1354 			w->w_reversed = w1->w_reversed = 1;
1355 			witness_increment_graph_generation();
1356 
1357 			/*
1358 			 * If the lock order is blessed, bail before logging
1359 			 * anything.  We don't look for other lock order
1360 			 * violations though, which may be a bug.
1361 			 */
1362 			if (blessed(w, w1))
1363 				goto out;
1364 			mtx_unlock_spin(&w_mtx);
1365 
1366 #ifdef WITNESS_NO_VNODE
1367 			/*
1368 			 * There are known LORs between VNODE locks. They are
1369 			 * not an indication of a bug. VNODE locks are flagged
1370 			 * as such (LO_IS_VNODE) and we don't yell if the LOR
1371 			 * is between 2 VNODE locks.
1372 			 */
1373 			if ((lock->lo_flags & LO_IS_VNODE) != 0 &&
1374 			    (lock1->li_lock->lo_flags & LO_IS_VNODE) != 0)
1375 				return;
1376 #endif
1377 
1378 			/*
1379 			 * Ok, yell about it.
1380 			 */
1381 			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1382 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1383 				witness_output(
1384 		"lock order reversal: (sleepable after non-sleepable)\n");
1385 			else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1386 			    && lock == &Giant.lock_object)
1387 				witness_output(
1388 		"lock order reversal: (Giant after non-sleepable)\n");
1389 			else
1390 				witness_output("lock order reversal:\n");
1391 
1392 			/*
1393 			 * Try to locate an earlier lock with
1394 			 * witness w in our list.
1395 			 */
1396 			do {
1397 				lock2 = &lle->ll_children[i];
1398 				MPASS(lock2->li_lock != NULL);
1399 				if (lock2->li_lock->lo_witness == w)
1400 					break;
1401 				if (i == 0 && lle->ll_next != NULL) {
1402 					lle = lle->ll_next;
1403 					i = lle->ll_count - 1;
1404 					MPASS(i >= 0 && i < LOCK_NCHILDREN);
1405 				} else
1406 					i--;
1407 			} while (i >= 0);
1408 			if (i < 0) {
1409 				witness_output(" 1st %p %s (%s) @ %s:%d\n",
1410 				    lock1->li_lock, lock1->li_lock->lo_name,
1411 				    w1->w_name, fixup_filename(lock1->li_file),
1412 				    lock1->li_line);
1413 				witness_output(" 2nd %p %s (%s) @ %s:%d\n", lock,
1414 				    lock->lo_name, w->w_name,
1415 				    fixup_filename(file), line);
1416 			} else {
1417 				witness_output(" 1st %p %s (%s) @ %s:%d\n",
1418 				    lock2->li_lock, lock2->li_lock->lo_name,
1419 				    lock2->li_lock->lo_witness->w_name,
1420 				    fixup_filename(lock2->li_file),
1421 				    lock2->li_line);
1422 				witness_output(" 2nd %p %s (%s) @ %s:%d\n",
1423 				    lock1->li_lock, lock1->li_lock->lo_name,
1424 				    w1->w_name, fixup_filename(lock1->li_file),
1425 				    lock1->li_line);
1426 				witness_output(" 3rd %p %s (%s) @ %s:%d\n", lock,
1427 				    lock->lo_name, w->w_name,
1428 				    fixup_filename(file), line);
1429 			}
1430 			witness_debugger(1, __func__);
1431 			return;
1432 		}
1433 	}
1434 
1435 	/*
1436 	 * If requested, build a new lock order.  However, don't build a new
1437 	 * relationship between a sleepable lock and Giant if it is in the
1438 	 * wrong direction.  The correct lock order is that sleepable locks
1439 	 * always come before Giant.
1440 	 */
1441 	if (flags & LOP_NEWORDER &&
1442 	    !(plock->li_lock == &Giant.lock_object &&
1443 	    (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1444 		CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1445 		    w->w_name, plock->li_lock->lo_witness->w_name);
1446 		itismychild(plock->li_lock->lo_witness, w);
1447 	}
1448 out:
1449 	mtx_unlock_spin(&w_mtx);
1450 }
1451 
1452 void
1453 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1454 {
1455 	struct lock_list_entry **lock_list, *lle;
1456 	struct lock_instance *instance;
1457 	struct witness *w;
1458 	struct thread *td;
1459 
1460 	if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
1461 	    panicstr != NULL)
1462 		return;
1463 	w = lock->lo_witness;
1464 	td = curthread;
1465 
1466 	/* Determine lock list for this lock. */
1467 	if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1468 		lock_list = &td->td_sleeplocks;
1469 	else
1470 		lock_list = PCPU_PTR(spinlocks);
1471 
1472 	/* Check to see if we are recursing on a lock we already own. */
1473 	instance = find_instance(*lock_list, lock);
1474 	if (instance != NULL) {
1475 		instance->li_flags++;
1476 		CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1477 		    td->td_proc->p_pid, lock->lo_name,
1478 		    instance->li_flags & LI_RECURSEMASK);
1479 		instance->li_file = file;
1480 		instance->li_line = line;
1481 		return;
1482 	}
1483 
1484 	/* Update per-witness last file and line acquire. */
1485 	w->w_file = file;
1486 	w->w_line = line;
1487 
1488 	/* Find the next open lock instance in the list and fill it. */
1489 	lle = *lock_list;
1490 	if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1491 		lle = witness_lock_list_get();
1492 		if (lle == NULL)
1493 			return;
1494 		lle->ll_next = *lock_list;
1495 		CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1496 		    td->td_proc->p_pid, lle);
1497 		*lock_list = lle;
1498 	}
1499 	instance = &lle->ll_children[lle->ll_count++];
1500 	instance->li_lock = lock;
1501 	instance->li_line = line;
1502 	instance->li_file = file;
1503 	if ((flags & LOP_EXCLUSIVE) != 0)
1504 		instance->li_flags = LI_EXCLUSIVE;
1505 	else
1506 		instance->li_flags = 0;
1507 	CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1508 	    td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1509 }
1510 
1511 void
1512 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1513 {
1514 	struct lock_instance *instance;
1515 	struct lock_class *class;
1516 
1517 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1518 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1519 		return;
1520 	class = LOCK_CLASS(lock);
1521 	if (witness_watch) {
1522 		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1523 			kassert_panic(
1524 			    "upgrade of non-upgradable lock (%s) %s @ %s:%d",
1525 			    class->lc_name, lock->lo_name,
1526 			    fixup_filename(file), line);
1527 		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1528 			kassert_panic(
1529 			    "upgrade of non-sleep lock (%s) %s @ %s:%d",
1530 			    class->lc_name, lock->lo_name,
1531 			    fixup_filename(file), line);
1532 	}
1533 	instance = find_instance(curthread->td_sleeplocks, lock);
1534 	if (instance == NULL) {
1535 		kassert_panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1536 		    class->lc_name, lock->lo_name,
1537 		    fixup_filename(file), line);
1538 		return;
1539 	}
1540 	if (witness_watch) {
1541 		if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1542 			kassert_panic(
1543 			    "upgrade of exclusive lock (%s) %s @ %s:%d",
1544 			    class->lc_name, lock->lo_name,
1545 			    fixup_filename(file), line);
1546 		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1547 			kassert_panic(
1548 			    "upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1549 			    class->lc_name, lock->lo_name,
1550 			    instance->li_flags & LI_RECURSEMASK,
1551 			    fixup_filename(file), line);
1552 	}
1553 	instance->li_flags |= LI_EXCLUSIVE;
1554 }
1555 
1556 void
1557 witness_downgrade(struct lock_object *lock, int flags, const char *file,
1558     int line)
1559 {
1560 	struct lock_instance *instance;
1561 	struct lock_class *class;
1562 
1563 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1564 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1565 		return;
1566 	class = LOCK_CLASS(lock);
1567 	if (witness_watch) {
1568 		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1569 			kassert_panic(
1570 			    "downgrade of non-upgradable lock (%s) %s @ %s:%d",
1571 			    class->lc_name, lock->lo_name,
1572 			    fixup_filename(file), line);
1573 		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1574 			kassert_panic(
1575 			    "downgrade of non-sleep lock (%s) %s @ %s:%d",
1576 			    class->lc_name, lock->lo_name,
1577 			    fixup_filename(file), line);
1578 	}
1579 	instance = find_instance(curthread->td_sleeplocks, lock);
1580 	if (instance == NULL) {
1581 		kassert_panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1582 		    class->lc_name, lock->lo_name,
1583 		    fixup_filename(file), line);
1584 		return;
1585 	}
1586 	if (witness_watch) {
1587 		if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1588 			kassert_panic(
1589 			    "downgrade of shared lock (%s) %s @ %s:%d",
1590 			    class->lc_name, lock->lo_name,
1591 			    fixup_filename(file), line);
1592 		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1593 			kassert_panic(
1594 			    "downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1595 			    class->lc_name, lock->lo_name,
1596 			    instance->li_flags & LI_RECURSEMASK,
1597 			    fixup_filename(file), line);
1598 	}
1599 	instance->li_flags &= ~LI_EXCLUSIVE;
1600 }
1601 
1602 void
1603 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1604 {
1605 	struct lock_list_entry **lock_list, *lle;
1606 	struct lock_instance *instance;
1607 	struct lock_class *class;
1608 	struct thread *td;
1609 	register_t s;
1610 	int i, j;
1611 
1612 	if (witness_cold || lock->lo_witness == NULL || panicstr != NULL)
1613 		return;
1614 	td = curthread;
1615 	class = LOCK_CLASS(lock);
1616 
1617 	/* Find lock instance associated with this lock. */
1618 	if (class->lc_flags & LC_SLEEPLOCK)
1619 		lock_list = &td->td_sleeplocks;
1620 	else
1621 		lock_list = PCPU_PTR(spinlocks);
1622 	lle = *lock_list;
1623 	for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1624 		for (i = 0; i < (*lock_list)->ll_count; i++) {
1625 			instance = &(*lock_list)->ll_children[i];
1626 			if (instance->li_lock == lock)
1627 				goto found;
1628 		}
1629 
1630 	/*
1631 	 * When disabling WITNESS through witness_watch we could end up in
1632 	 * having registered locks in the td_sleeplocks queue.
1633 	 * We have to make sure we flush these queues, so just search for
1634 	 * eventual register locks and remove them.
1635 	 */
1636 	if (witness_watch > 0) {
1637 		kassert_panic("lock (%s) %s not locked @ %s:%d", class->lc_name,
1638 		    lock->lo_name, fixup_filename(file), line);
1639 		return;
1640 	} else {
1641 		return;
1642 	}
1643 found:
1644 
1645 	/* First, check for shared/exclusive mismatches. */
1646 	if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 &&
1647 	    (flags & LOP_EXCLUSIVE) == 0) {
1648 		witness_output("shared unlock of (%s) %s @ %s:%d\n",
1649 		    class->lc_name, lock->lo_name, fixup_filename(file), line);
1650 		witness_output("while exclusively locked from %s:%d\n",
1651 		    fixup_filename(instance->li_file), instance->li_line);
1652 		kassert_panic("excl->ushare");
1653 	}
1654 	if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 &&
1655 	    (flags & LOP_EXCLUSIVE) != 0) {
1656 		witness_output("exclusive unlock of (%s) %s @ %s:%d\n",
1657 		    class->lc_name, lock->lo_name, fixup_filename(file), line);
1658 		witness_output("while share locked from %s:%d\n",
1659 		    fixup_filename(instance->li_file),
1660 		    instance->li_line);
1661 		kassert_panic("share->uexcl");
1662 	}
1663 	/* If we are recursed, unrecurse. */
1664 	if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1665 		CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1666 		    td->td_proc->p_pid, instance->li_lock->lo_name,
1667 		    instance->li_flags);
1668 		instance->li_flags--;
1669 		return;
1670 	}
1671 	/* The lock is now being dropped, check for NORELEASE flag */
1672 	if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) {
1673 		witness_output("forbidden unlock of (%s) %s @ %s:%d\n",
1674 		    class->lc_name, lock->lo_name, fixup_filename(file), line);
1675 		kassert_panic("lock marked norelease");
1676 	}
1677 
1678 	/* Otherwise, remove this item from the list. */
1679 	s = intr_disable();
1680 	CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1681 	    td->td_proc->p_pid, instance->li_lock->lo_name,
1682 	    (*lock_list)->ll_count - 1);
1683 	for (j = i; j < (*lock_list)->ll_count - 1; j++)
1684 		(*lock_list)->ll_children[j] =
1685 		    (*lock_list)->ll_children[j + 1];
1686 	(*lock_list)->ll_count--;
1687 	intr_restore(s);
1688 
1689 	/*
1690 	 * In order to reduce contention on w_mtx, we want to keep always an
1691 	 * head object into lists so that frequent allocation from the
1692 	 * free witness pool (and subsequent locking) is avoided.
1693 	 * In order to maintain the current code simple, when the head
1694 	 * object is totally unloaded it means also that we do not have
1695 	 * further objects in the list, so the list ownership needs to be
1696 	 * hand over to another object if the current head needs to be freed.
1697 	 */
1698 	if ((*lock_list)->ll_count == 0) {
1699 		if (*lock_list == lle) {
1700 			if (lle->ll_next == NULL)
1701 				return;
1702 		} else
1703 			lle = *lock_list;
1704 		*lock_list = lle->ll_next;
1705 		CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1706 		    td->td_proc->p_pid, lle);
1707 		witness_lock_list_free(lle);
1708 	}
1709 }
1710 
1711 void
1712 witness_thread_exit(struct thread *td)
1713 {
1714 	struct lock_list_entry *lle;
1715 	int i, n;
1716 
1717 	lle = td->td_sleeplocks;
1718 	if (lle == NULL || panicstr != NULL)
1719 		return;
1720 	if (lle->ll_count != 0) {
1721 		for (n = 0; lle != NULL; lle = lle->ll_next)
1722 			for (i = lle->ll_count - 1; i >= 0; i--) {
1723 				if (n == 0)
1724 					witness_output(
1725 		    "Thread %p exiting with the following locks held:\n", td);
1726 				n++;
1727 				witness_list_lock(&lle->ll_children[i],
1728 				    witness_output);
1729 
1730 			}
1731 		kassert_panic(
1732 		    "Thread %p cannot exit while holding sleeplocks\n", td);
1733 	}
1734 	witness_lock_list_free(lle);
1735 }
1736 
1737 /*
1738  * Warn if any locks other than 'lock' are held.  Flags can be passed in to
1739  * exempt Giant and sleepable locks from the checks as well.  If any
1740  * non-exempt locks are held, then a supplied message is printed to the
1741  * output channel along with a list of the offending locks.  If indicated in the
1742  * flags then a failure results in a panic as well.
1743  */
1744 int
1745 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1746 {
1747 	struct lock_list_entry *lock_list, *lle;
1748 	struct lock_instance *lock1;
1749 	struct thread *td;
1750 	va_list ap;
1751 	int i, n;
1752 
1753 	if (witness_cold || witness_watch < 1 || panicstr != NULL)
1754 		return (0);
1755 	n = 0;
1756 	td = curthread;
1757 	for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1758 		for (i = lle->ll_count - 1; i >= 0; i--) {
1759 			lock1 = &lle->ll_children[i];
1760 			if (lock1->li_lock == lock)
1761 				continue;
1762 			if (flags & WARN_GIANTOK &&
1763 			    lock1->li_lock == &Giant.lock_object)
1764 				continue;
1765 			if (flags & WARN_SLEEPOK &&
1766 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1767 				continue;
1768 			if (n == 0) {
1769 				va_start(ap, fmt);
1770 				vprintf(fmt, ap);
1771 				va_end(ap);
1772 				printf(" with the following %slocks held:\n",
1773 				    (flags & WARN_SLEEPOK) != 0 ?
1774 				    "non-sleepable " : "");
1775 			}
1776 			n++;
1777 			witness_list_lock(lock1, printf);
1778 		}
1779 
1780 	/*
1781 	 * Pin the thread in order to avoid problems with thread migration.
1782 	 * Once that all verifies are passed about spinlocks ownership,
1783 	 * the thread is in a safe path and it can be unpinned.
1784 	 */
1785 	sched_pin();
1786 	lock_list = PCPU_GET(spinlocks);
1787 	if (lock_list != NULL && lock_list->ll_count != 0) {
1788 		sched_unpin();
1789 
1790 		/*
1791 		 * We should only have one spinlock and as long as
1792 		 * the flags cannot match for this locks class,
1793 		 * check if the first spinlock is the one curthread
1794 		 * should hold.
1795 		 */
1796 		lock1 = &lock_list->ll_children[lock_list->ll_count - 1];
1797 		if (lock_list->ll_count == 1 && lock_list->ll_next == NULL &&
1798 		    lock1->li_lock == lock && n == 0)
1799 			return (0);
1800 
1801 		va_start(ap, fmt);
1802 		vprintf(fmt, ap);
1803 		va_end(ap);
1804 		printf(" with the following %slocks held:\n",
1805 		    (flags & WARN_SLEEPOK) != 0 ?  "non-sleepable " : "");
1806 		n += witness_list_locks(&lock_list, printf);
1807 	} else
1808 		sched_unpin();
1809 	if (flags & WARN_PANIC && n)
1810 		kassert_panic("%s", __func__);
1811 	else
1812 		witness_debugger(n, __func__);
1813 	return (n);
1814 }
1815 
1816 const char *
1817 witness_file(struct lock_object *lock)
1818 {
1819 	struct witness *w;
1820 
1821 	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1822 		return ("?");
1823 	w = lock->lo_witness;
1824 	return (w->w_file);
1825 }
1826 
1827 int
1828 witness_line(struct lock_object *lock)
1829 {
1830 	struct witness *w;
1831 
1832 	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1833 		return (0);
1834 	w = lock->lo_witness;
1835 	return (w->w_line);
1836 }
1837 
1838 static struct witness *
1839 enroll(const char *description, struct lock_class *lock_class)
1840 {
1841 	struct witness *w;
1842 
1843 	MPASS(description != NULL);
1844 
1845 	if (witness_watch == -1 || panicstr != NULL)
1846 		return (NULL);
1847 	if ((lock_class->lc_flags & LC_SPINLOCK)) {
1848 		if (witness_skipspin)
1849 			return (NULL);
1850 	} else if ((lock_class->lc_flags & LC_SLEEPLOCK) == 0) {
1851 		kassert_panic("lock class %s is not sleep or spin",
1852 		    lock_class->lc_name);
1853 		return (NULL);
1854 	}
1855 
1856 	mtx_lock_spin(&w_mtx);
1857 	w = witness_hash_get(description);
1858 	if (w)
1859 		goto found;
1860 	if ((w = witness_get()) == NULL)
1861 		return (NULL);
1862 	MPASS(strlen(description) < MAX_W_NAME);
1863 	strcpy(w->w_name, description);
1864 	w->w_class = lock_class;
1865 	w->w_refcount = 1;
1866 	STAILQ_INSERT_HEAD(&w_all, w, w_list);
1867 	if (lock_class->lc_flags & LC_SPINLOCK) {
1868 		STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1869 		w_spin_cnt++;
1870 	} else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1871 		STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1872 		w_sleep_cnt++;
1873 	}
1874 
1875 	/* Insert new witness into the hash */
1876 	witness_hash_put(w);
1877 	witness_increment_graph_generation();
1878 	mtx_unlock_spin(&w_mtx);
1879 	return (w);
1880 found:
1881 	w->w_refcount++;
1882 	if (w->w_refcount == 1)
1883 		w->w_class = lock_class;
1884 	mtx_unlock_spin(&w_mtx);
1885 	if (lock_class != w->w_class)
1886 		kassert_panic(
1887 		    "lock (%s) %s does not match earlier (%s) lock",
1888 		    description, lock_class->lc_name,
1889 		    w->w_class->lc_name);
1890 	return (w);
1891 }
1892 
1893 static void
1894 depart(struct witness *w)
1895 {
1896 
1897 	MPASS(w->w_refcount == 0);
1898 	if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1899 		w_sleep_cnt--;
1900 	} else {
1901 		w_spin_cnt--;
1902 	}
1903 	/*
1904 	 * Set file to NULL as it may point into a loadable module.
1905 	 */
1906 	w->w_file = NULL;
1907 	w->w_line = 0;
1908 	witness_increment_graph_generation();
1909 }
1910 
1911 
1912 static void
1913 adopt(struct witness *parent, struct witness *child)
1914 {
1915 	int pi, ci, i, j;
1916 
1917 	if (witness_cold == 0)
1918 		mtx_assert(&w_mtx, MA_OWNED);
1919 
1920 	/* If the relationship is already known, there's no work to be done. */
1921 	if (isitmychild(parent, child))
1922 		return;
1923 
1924 	/* When the structure of the graph changes, bump up the generation. */
1925 	witness_increment_graph_generation();
1926 
1927 	/*
1928 	 * The hard part ... create the direct relationship, then propagate all
1929 	 * indirect relationships.
1930 	 */
1931 	pi = parent->w_index;
1932 	ci = child->w_index;
1933 	WITNESS_INDEX_ASSERT(pi);
1934 	WITNESS_INDEX_ASSERT(ci);
1935 	MPASS(pi != ci);
1936 	w_rmatrix[pi][ci] |= WITNESS_PARENT;
1937 	w_rmatrix[ci][pi] |= WITNESS_CHILD;
1938 
1939 	/*
1940 	 * If parent was not already an ancestor of child,
1941 	 * then we increment the descendant and ancestor counters.
1942 	 */
1943 	if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) {
1944 		parent->w_num_descendants++;
1945 		child->w_num_ancestors++;
1946 	}
1947 
1948 	/*
1949 	 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
1950 	 * an ancestor of 'pi' during this loop.
1951 	 */
1952 	for (i = 1; i <= w_max_used_index; i++) {
1953 		if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
1954 		    (i != pi))
1955 			continue;
1956 
1957 		/* Find each descendant of 'i' and mark it as a descendant. */
1958 		for (j = 1; j <= w_max_used_index; j++) {
1959 
1960 			/*
1961 			 * Skip children that are already marked as
1962 			 * descendants of 'i'.
1963 			 */
1964 			if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK)
1965 				continue;
1966 
1967 			/*
1968 			 * We are only interested in descendants of 'ci'. Note
1969 			 * that 'ci' itself is counted as a descendant of 'ci'.
1970 			 */
1971 			if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
1972 			    (j != ci))
1973 				continue;
1974 			w_rmatrix[i][j] |= WITNESS_ANCESTOR;
1975 			w_rmatrix[j][i] |= WITNESS_DESCENDANT;
1976 			w_data[i].w_num_descendants++;
1977 			w_data[j].w_num_ancestors++;
1978 
1979 			/*
1980 			 * Make sure we aren't marking a node as both an
1981 			 * ancestor and descendant. We should have caught
1982 			 * this as a lock order reversal earlier.
1983 			 */
1984 			if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
1985 			    (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
1986 				printf("witness rmatrix paradox! [%d][%d]=%d "
1987 				    "both ancestor and descendant\n",
1988 				    i, j, w_rmatrix[i][j]);
1989 				kdb_backtrace();
1990 				printf("Witness disabled.\n");
1991 				witness_watch = -1;
1992 			}
1993 			if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) &&
1994 			    (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
1995 				printf("witness rmatrix paradox! [%d][%d]=%d "
1996 				    "both ancestor and descendant\n",
1997 				    j, i, w_rmatrix[j][i]);
1998 				kdb_backtrace();
1999 				printf("Witness disabled.\n");
2000 				witness_watch = -1;
2001 			}
2002 		}
2003 	}
2004 }
2005 
2006 static void
2007 itismychild(struct witness *parent, struct witness *child)
2008 {
2009 	int unlocked;
2010 
2011 	MPASS(child != NULL && parent != NULL);
2012 	if (witness_cold == 0)
2013 		mtx_assert(&w_mtx, MA_OWNED);
2014 
2015 	if (!witness_lock_type_equal(parent, child)) {
2016 		if (witness_cold == 0) {
2017 			unlocked = 1;
2018 			mtx_unlock_spin(&w_mtx);
2019 		} else {
2020 			unlocked = 0;
2021 		}
2022 		kassert_panic(
2023 		    "%s: parent \"%s\" (%s) and child \"%s\" (%s) are not "
2024 		    "the same lock type", __func__, parent->w_name,
2025 		    parent->w_class->lc_name, child->w_name,
2026 		    child->w_class->lc_name);
2027 		if (unlocked)
2028 			mtx_lock_spin(&w_mtx);
2029 	}
2030 	adopt(parent, child);
2031 }
2032 
2033 /*
2034  * Generic code for the isitmy*() functions. The rmask parameter is the
2035  * expected relationship of w1 to w2.
2036  */
2037 static int
2038 _isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
2039 {
2040 	unsigned char r1, r2;
2041 	int i1, i2;
2042 
2043 	i1 = w1->w_index;
2044 	i2 = w2->w_index;
2045 	WITNESS_INDEX_ASSERT(i1);
2046 	WITNESS_INDEX_ASSERT(i2);
2047 	r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK;
2048 	r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK;
2049 
2050 	/* The flags on one better be the inverse of the flags on the other */
2051 	if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) ||
2052 	    (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) {
2053 		/* Don't squawk if we're potentially racing with an update. */
2054 		if (!mtx_owned(&w_mtx))
2055 			return (0);
2056 		printf("%s: rmatrix mismatch between %s (index %d) and %s "
2057 		    "(index %d): w_rmatrix[%d][%d] == %hhx but "
2058 		    "w_rmatrix[%d][%d] == %hhx\n",
2059 		    fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1,
2060 		    i2, i1, r2);
2061 		kdb_backtrace();
2062 		printf("Witness disabled.\n");
2063 		witness_watch = -1;
2064 	}
2065 	return (r1 & rmask);
2066 }
2067 
2068 /*
2069  * Checks if @child is a direct child of @parent.
2070  */
2071 static int
2072 isitmychild(struct witness *parent, struct witness *child)
2073 {
2074 
2075 	return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
2076 }
2077 
2078 /*
2079  * Checks if @descendant is a direct or inderect descendant of @ancestor.
2080  */
2081 static int
2082 isitmydescendant(struct witness *ancestor, struct witness *descendant)
2083 {
2084 
2085 	return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
2086 	    __func__));
2087 }
2088 
2089 static int
2090 blessed(struct witness *w1, struct witness *w2)
2091 {
2092 	int i;
2093 	struct witness_blessed *b;
2094 
2095 	for (i = 0; i < nitems(blessed_list); i++) {
2096 		b = &blessed_list[i];
2097 		if (strcmp(w1->w_name, b->b_lock1) == 0) {
2098 			if (strcmp(w2->w_name, b->b_lock2) == 0)
2099 				return (1);
2100 			continue;
2101 		}
2102 		if (strcmp(w1->w_name, b->b_lock2) == 0)
2103 			if (strcmp(w2->w_name, b->b_lock1) == 0)
2104 				return (1);
2105 	}
2106 	return (0);
2107 }
2108 
2109 static struct witness *
2110 witness_get(void)
2111 {
2112 	struct witness *w;
2113 	int index;
2114 
2115 	if (witness_cold == 0)
2116 		mtx_assert(&w_mtx, MA_OWNED);
2117 
2118 	if (witness_watch == -1) {
2119 		mtx_unlock_spin(&w_mtx);
2120 		return (NULL);
2121 	}
2122 	if (STAILQ_EMPTY(&w_free)) {
2123 		witness_watch = -1;
2124 		mtx_unlock_spin(&w_mtx);
2125 		printf("WITNESS: unable to allocate a new witness object\n");
2126 		return (NULL);
2127 	}
2128 	w = STAILQ_FIRST(&w_free);
2129 	STAILQ_REMOVE_HEAD(&w_free, w_list);
2130 	w_free_cnt--;
2131 	index = w->w_index;
2132 	MPASS(index > 0 && index == w_max_used_index+1 &&
2133 	    index < witness_count);
2134 	bzero(w, sizeof(*w));
2135 	w->w_index = index;
2136 	if (index > w_max_used_index)
2137 		w_max_used_index = index;
2138 	return (w);
2139 }
2140 
2141 static void
2142 witness_free(struct witness *w)
2143 {
2144 
2145 	STAILQ_INSERT_HEAD(&w_free, w, w_list);
2146 	w_free_cnt++;
2147 }
2148 
2149 static struct lock_list_entry *
2150 witness_lock_list_get(void)
2151 {
2152 	struct lock_list_entry *lle;
2153 
2154 	if (witness_watch == -1)
2155 		return (NULL);
2156 	mtx_lock_spin(&w_mtx);
2157 	lle = w_lock_list_free;
2158 	if (lle == NULL) {
2159 		witness_watch = -1;
2160 		mtx_unlock_spin(&w_mtx);
2161 		printf("%s: witness exhausted\n", __func__);
2162 		return (NULL);
2163 	}
2164 	w_lock_list_free = lle->ll_next;
2165 	mtx_unlock_spin(&w_mtx);
2166 	bzero(lle, sizeof(*lle));
2167 	return (lle);
2168 }
2169 
2170 static void
2171 witness_lock_list_free(struct lock_list_entry *lle)
2172 {
2173 
2174 	mtx_lock_spin(&w_mtx);
2175 	lle->ll_next = w_lock_list_free;
2176 	w_lock_list_free = lle;
2177 	mtx_unlock_spin(&w_mtx);
2178 }
2179 
2180 static struct lock_instance *
2181 find_instance(struct lock_list_entry *list, const struct lock_object *lock)
2182 {
2183 	struct lock_list_entry *lle;
2184 	struct lock_instance *instance;
2185 	int i;
2186 
2187 	for (lle = list; lle != NULL; lle = lle->ll_next)
2188 		for (i = lle->ll_count - 1; i >= 0; i--) {
2189 			instance = &lle->ll_children[i];
2190 			if (instance->li_lock == lock)
2191 				return (instance);
2192 		}
2193 	return (NULL);
2194 }
2195 
2196 static void
2197 witness_list_lock(struct lock_instance *instance,
2198     int (*prnt)(const char *fmt, ...))
2199 {
2200 	struct lock_object *lock;
2201 
2202 	lock = instance->li_lock;
2203 	prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2204 	    "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2205 	if (lock->lo_witness->w_name != lock->lo_name)
2206 		prnt(" (%s)", lock->lo_witness->w_name);
2207 	prnt(" r = %d (%p) locked @ %s:%d\n",
2208 	    instance->li_flags & LI_RECURSEMASK, lock,
2209 	    fixup_filename(instance->li_file), instance->li_line);
2210 }
2211 
2212 static int
2213 witness_output(const char *fmt, ...)
2214 {
2215 	va_list ap;
2216 	int ret;
2217 
2218 	va_start(ap, fmt);
2219 	ret = witness_voutput(fmt, ap);
2220 	va_end(ap);
2221 	return (ret);
2222 }
2223 
2224 static int
2225 witness_voutput(const char *fmt, va_list ap)
2226 {
2227 	int ret;
2228 
2229 	ret = 0;
2230 	switch (witness_channel) {
2231 	case WITNESS_CONSOLE:
2232 		ret = vprintf(fmt, ap);
2233 		break;
2234 	case WITNESS_LOG:
2235 		vlog(LOG_NOTICE, fmt, ap);
2236 		break;
2237 	case WITNESS_NONE:
2238 		break;
2239 	}
2240 	return (ret);
2241 }
2242 
2243 #ifdef DDB
2244 static int
2245 witness_thread_has_locks(struct thread *td)
2246 {
2247 
2248 	if (td->td_sleeplocks == NULL)
2249 		return (0);
2250 	return (td->td_sleeplocks->ll_count != 0);
2251 }
2252 
2253 static int
2254 witness_proc_has_locks(struct proc *p)
2255 {
2256 	struct thread *td;
2257 
2258 	FOREACH_THREAD_IN_PROC(p, td) {
2259 		if (witness_thread_has_locks(td))
2260 			return (1);
2261 	}
2262 	return (0);
2263 }
2264 #endif
2265 
2266 int
2267 witness_list_locks(struct lock_list_entry **lock_list,
2268     int (*prnt)(const char *fmt, ...))
2269 {
2270 	struct lock_list_entry *lle;
2271 	int i, nheld;
2272 
2273 	nheld = 0;
2274 	for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2275 		for (i = lle->ll_count - 1; i >= 0; i--) {
2276 			witness_list_lock(&lle->ll_children[i], prnt);
2277 			nheld++;
2278 		}
2279 	return (nheld);
2280 }
2281 
2282 /*
2283  * This is a bit risky at best.  We call this function when we have timed
2284  * out acquiring a spin lock, and we assume that the other CPU is stuck
2285  * with this lock held.  So, we go groveling around in the other CPU's
2286  * per-cpu data to try to find the lock instance for this spin lock to
2287  * see when it was last acquired.
2288  */
2289 void
2290 witness_display_spinlock(struct lock_object *lock, struct thread *owner,
2291     int (*prnt)(const char *fmt, ...))
2292 {
2293 	struct lock_instance *instance;
2294 	struct pcpu *pc;
2295 
2296 	if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2297 		return;
2298 	pc = pcpu_find(owner->td_oncpu);
2299 	instance = find_instance(pc->pc_spinlocks, lock);
2300 	if (instance != NULL)
2301 		witness_list_lock(instance, prnt);
2302 }
2303 
2304 void
2305 witness_save(struct lock_object *lock, const char **filep, int *linep)
2306 {
2307 	struct lock_list_entry *lock_list;
2308 	struct lock_instance *instance;
2309 	struct lock_class *class;
2310 
2311 	/*
2312 	 * This function is used independently in locking code to deal with
2313 	 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2314 	 * is gone.
2315 	 */
2316 	if (SCHEDULER_STOPPED())
2317 		return;
2318 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2319 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2320 		return;
2321 	class = LOCK_CLASS(lock);
2322 	if (class->lc_flags & LC_SLEEPLOCK)
2323 		lock_list = curthread->td_sleeplocks;
2324 	else {
2325 		if (witness_skipspin)
2326 			return;
2327 		lock_list = PCPU_GET(spinlocks);
2328 	}
2329 	instance = find_instance(lock_list, lock);
2330 	if (instance == NULL) {
2331 		kassert_panic("%s: lock (%s) %s not locked", __func__,
2332 		    class->lc_name, lock->lo_name);
2333 		return;
2334 	}
2335 	*filep = instance->li_file;
2336 	*linep = instance->li_line;
2337 }
2338 
2339 void
2340 witness_restore(struct lock_object *lock, const char *file, int line)
2341 {
2342 	struct lock_list_entry *lock_list;
2343 	struct lock_instance *instance;
2344 	struct lock_class *class;
2345 
2346 	/*
2347 	 * This function is used independently in locking code to deal with
2348 	 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2349 	 * is gone.
2350 	 */
2351 	if (SCHEDULER_STOPPED())
2352 		return;
2353 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2354 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2355 		return;
2356 	class = LOCK_CLASS(lock);
2357 	if (class->lc_flags & LC_SLEEPLOCK)
2358 		lock_list = curthread->td_sleeplocks;
2359 	else {
2360 		if (witness_skipspin)
2361 			return;
2362 		lock_list = PCPU_GET(spinlocks);
2363 	}
2364 	instance = find_instance(lock_list, lock);
2365 	if (instance == NULL)
2366 		kassert_panic("%s: lock (%s) %s not locked", __func__,
2367 		    class->lc_name, lock->lo_name);
2368 	lock->lo_witness->w_file = file;
2369 	lock->lo_witness->w_line = line;
2370 	if (instance == NULL)
2371 		return;
2372 	instance->li_file = file;
2373 	instance->li_line = line;
2374 }
2375 
2376 void
2377 witness_assert(const struct lock_object *lock, int flags, const char *file,
2378     int line)
2379 {
2380 #ifdef INVARIANT_SUPPORT
2381 	struct lock_instance *instance;
2382 	struct lock_class *class;
2383 
2384 	if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL)
2385 		return;
2386 	class = LOCK_CLASS(lock);
2387 	if ((class->lc_flags & LC_SLEEPLOCK) != 0)
2388 		instance = find_instance(curthread->td_sleeplocks, lock);
2389 	else if ((class->lc_flags & LC_SPINLOCK) != 0)
2390 		instance = find_instance(PCPU_GET(spinlocks), lock);
2391 	else {
2392 		kassert_panic("Lock (%s) %s is not sleep or spin!",
2393 		    class->lc_name, lock->lo_name);
2394 		return;
2395 	}
2396 	switch (flags) {
2397 	case LA_UNLOCKED:
2398 		if (instance != NULL)
2399 			kassert_panic("Lock (%s) %s locked @ %s:%d.",
2400 			    class->lc_name, lock->lo_name,
2401 			    fixup_filename(file), line);
2402 		break;
2403 	case LA_LOCKED:
2404 	case LA_LOCKED | LA_RECURSED:
2405 	case LA_LOCKED | LA_NOTRECURSED:
2406 	case LA_SLOCKED:
2407 	case LA_SLOCKED | LA_RECURSED:
2408 	case LA_SLOCKED | LA_NOTRECURSED:
2409 	case LA_XLOCKED:
2410 	case LA_XLOCKED | LA_RECURSED:
2411 	case LA_XLOCKED | LA_NOTRECURSED:
2412 		if (instance == NULL) {
2413 			kassert_panic("Lock (%s) %s not locked @ %s:%d.",
2414 			    class->lc_name, lock->lo_name,
2415 			    fixup_filename(file), line);
2416 			break;
2417 		}
2418 		if ((flags & LA_XLOCKED) != 0 &&
2419 		    (instance->li_flags & LI_EXCLUSIVE) == 0)
2420 			kassert_panic(
2421 			    "Lock (%s) %s not exclusively locked @ %s:%d.",
2422 			    class->lc_name, lock->lo_name,
2423 			    fixup_filename(file), line);
2424 		if ((flags & LA_SLOCKED) != 0 &&
2425 		    (instance->li_flags & LI_EXCLUSIVE) != 0)
2426 			kassert_panic(
2427 			    "Lock (%s) %s exclusively locked @ %s:%d.",
2428 			    class->lc_name, lock->lo_name,
2429 			    fixup_filename(file), line);
2430 		if ((flags & LA_RECURSED) != 0 &&
2431 		    (instance->li_flags & LI_RECURSEMASK) == 0)
2432 			kassert_panic("Lock (%s) %s not recursed @ %s:%d.",
2433 			    class->lc_name, lock->lo_name,
2434 			    fixup_filename(file), line);
2435 		if ((flags & LA_NOTRECURSED) != 0 &&
2436 		    (instance->li_flags & LI_RECURSEMASK) != 0)
2437 			kassert_panic("Lock (%s) %s recursed @ %s:%d.",
2438 			    class->lc_name, lock->lo_name,
2439 			    fixup_filename(file), line);
2440 		break;
2441 	default:
2442 		kassert_panic("Invalid lock assertion at %s:%d.",
2443 		    fixup_filename(file), line);
2444 
2445 	}
2446 #endif	/* INVARIANT_SUPPORT */
2447 }
2448 
2449 static void
2450 witness_setflag(struct lock_object *lock, int flag, int set)
2451 {
2452 	struct lock_list_entry *lock_list;
2453 	struct lock_instance *instance;
2454 	struct lock_class *class;
2455 
2456 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2457 		return;
2458 	class = LOCK_CLASS(lock);
2459 	if (class->lc_flags & LC_SLEEPLOCK)
2460 		lock_list = curthread->td_sleeplocks;
2461 	else {
2462 		if (witness_skipspin)
2463 			return;
2464 		lock_list = PCPU_GET(spinlocks);
2465 	}
2466 	instance = find_instance(lock_list, lock);
2467 	if (instance == NULL) {
2468 		kassert_panic("%s: lock (%s) %s not locked", __func__,
2469 		    class->lc_name, lock->lo_name);
2470 		return;
2471 	}
2472 
2473 	if (set)
2474 		instance->li_flags |= flag;
2475 	else
2476 		instance->li_flags &= ~flag;
2477 }
2478 
2479 void
2480 witness_norelease(struct lock_object *lock)
2481 {
2482 
2483 	witness_setflag(lock, LI_NORELEASE, 1);
2484 }
2485 
2486 void
2487 witness_releaseok(struct lock_object *lock)
2488 {
2489 
2490 	witness_setflag(lock, LI_NORELEASE, 0);
2491 }
2492 
2493 #ifdef DDB
2494 static void
2495 witness_ddb_list(struct thread *td)
2496 {
2497 
2498 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2499 	KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2500 
2501 	if (witness_watch < 1)
2502 		return;
2503 
2504 	witness_list_locks(&td->td_sleeplocks, db_printf);
2505 
2506 	/*
2507 	 * We only handle spinlocks if td == curthread.  This is somewhat broken
2508 	 * if td is currently executing on some other CPU and holds spin locks
2509 	 * as we won't display those locks.  If we had a MI way of getting
2510 	 * the per-cpu data for a given cpu then we could use
2511 	 * td->td_oncpu to get the list of spinlocks for this thread
2512 	 * and "fix" this.
2513 	 *
2514 	 * That still wouldn't really fix this unless we locked the scheduler
2515 	 * lock or stopped the other CPU to make sure it wasn't changing the
2516 	 * list out from under us.  It is probably best to just not try to
2517 	 * handle threads on other CPU's for now.
2518 	 */
2519 	if (td == curthread && PCPU_GET(spinlocks) != NULL)
2520 		witness_list_locks(PCPU_PTR(spinlocks), db_printf);
2521 }
2522 
2523 DB_SHOW_COMMAND(locks, db_witness_list)
2524 {
2525 	struct thread *td;
2526 
2527 	if (have_addr)
2528 		td = db_lookup_thread(addr, true);
2529 	else
2530 		td = kdb_thread;
2531 	witness_ddb_list(td);
2532 }
2533 
2534 DB_SHOW_ALL_COMMAND(locks, db_witness_list_all)
2535 {
2536 	struct thread *td;
2537 	struct proc *p;
2538 
2539 	/*
2540 	 * It would be nice to list only threads and processes that actually
2541 	 * held sleep locks, but that information is currently not exported
2542 	 * by WITNESS.
2543 	 */
2544 	FOREACH_PROC_IN_SYSTEM(p) {
2545 		if (!witness_proc_has_locks(p))
2546 			continue;
2547 		FOREACH_THREAD_IN_PROC(p, td) {
2548 			if (!witness_thread_has_locks(td))
2549 				continue;
2550 			db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2551 			    p->p_comm, td, td->td_tid);
2552 			witness_ddb_list(td);
2553 			if (db_pager_quit)
2554 				return;
2555 		}
2556 	}
2557 }
2558 DB_SHOW_ALIAS(alllocks, db_witness_list_all)
2559 
2560 DB_SHOW_COMMAND(witness, db_witness_display)
2561 {
2562 
2563 	witness_ddb_display(db_printf);
2564 }
2565 #endif
2566 
2567 static void
2568 sbuf_print_witness_badstacks(struct sbuf *sb, size_t *oldidx)
2569 {
2570 	struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
2571 	struct witness *tmp_w1, *tmp_w2, *w1, *w2;
2572 	int generation, i, j;
2573 
2574 	tmp_data1 = NULL;
2575 	tmp_data2 = NULL;
2576 	tmp_w1 = NULL;
2577 	tmp_w2 = NULL;
2578 
2579 	/* Allocate and init temporary storage space. */
2580 	tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2581 	tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2582 	tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2583 	    M_WAITOK | M_ZERO);
2584 	tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2585 	    M_WAITOK | M_ZERO);
2586 	stack_zero(&tmp_data1->wlod_stack);
2587 	stack_zero(&tmp_data2->wlod_stack);
2588 
2589 restart:
2590 	mtx_lock_spin(&w_mtx);
2591 	generation = w_generation;
2592 	mtx_unlock_spin(&w_mtx);
2593 	sbuf_printf(sb, "Number of known direct relationships is %d\n",
2594 	    w_lohash.wloh_count);
2595 	for (i = 1; i < w_max_used_index; i++) {
2596 		mtx_lock_spin(&w_mtx);
2597 		if (generation != w_generation) {
2598 			mtx_unlock_spin(&w_mtx);
2599 
2600 			/* The graph has changed, try again. */
2601 			*oldidx = 0;
2602 			sbuf_clear(sb);
2603 			goto restart;
2604 		}
2605 
2606 		w1 = &w_data[i];
2607 		if (w1->w_reversed == 0) {
2608 			mtx_unlock_spin(&w_mtx);
2609 			continue;
2610 		}
2611 
2612 		/* Copy w1 locally so we can release the spin lock. */
2613 		*tmp_w1 = *w1;
2614 		mtx_unlock_spin(&w_mtx);
2615 
2616 		if (tmp_w1->w_reversed == 0)
2617 			continue;
2618 		for (j = 1; j < w_max_used_index; j++) {
2619 			if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j)
2620 				continue;
2621 
2622 			mtx_lock_spin(&w_mtx);
2623 			if (generation != w_generation) {
2624 				mtx_unlock_spin(&w_mtx);
2625 
2626 				/* The graph has changed, try again. */
2627 				*oldidx = 0;
2628 				sbuf_clear(sb);
2629 				goto restart;
2630 			}
2631 
2632 			w2 = &w_data[j];
2633 			data1 = witness_lock_order_get(w1, w2);
2634 			data2 = witness_lock_order_get(w2, w1);
2635 
2636 			/*
2637 			 * Copy information locally so we can release the
2638 			 * spin lock.
2639 			 */
2640 			*tmp_w2 = *w2;
2641 
2642 			if (data1) {
2643 				stack_zero(&tmp_data1->wlod_stack);
2644 				stack_copy(&data1->wlod_stack,
2645 				    &tmp_data1->wlod_stack);
2646 			}
2647 			if (data2 && data2 != data1) {
2648 				stack_zero(&tmp_data2->wlod_stack);
2649 				stack_copy(&data2->wlod_stack,
2650 				    &tmp_data2->wlod_stack);
2651 			}
2652 			mtx_unlock_spin(&w_mtx);
2653 
2654 			if (blessed(tmp_w1, tmp_w2))
2655 				continue;
2656 
2657 			sbuf_printf(sb,
2658 	    "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
2659 			    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2660 			    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2661 			if (data1) {
2662 				sbuf_printf(sb,
2663 			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2664 				    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2665 				    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2666 				stack_sbuf_print(sb, &tmp_data1->wlod_stack);
2667 				sbuf_printf(sb, "\n");
2668 			}
2669 			if (data2 && data2 != data1) {
2670 				sbuf_printf(sb,
2671 			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2672 				    tmp_w2->w_name, tmp_w2->w_class->lc_name,
2673 				    tmp_w1->w_name, tmp_w1->w_class->lc_name);
2674 				stack_sbuf_print(sb, &tmp_data2->wlod_stack);
2675 				sbuf_printf(sb, "\n");
2676 			}
2677 		}
2678 	}
2679 	mtx_lock_spin(&w_mtx);
2680 	if (generation != w_generation) {
2681 		mtx_unlock_spin(&w_mtx);
2682 
2683 		/*
2684 		 * The graph changed while we were printing stack data,
2685 		 * try again.
2686 		 */
2687 		*oldidx = 0;
2688 		sbuf_clear(sb);
2689 		goto restart;
2690 	}
2691 	mtx_unlock_spin(&w_mtx);
2692 
2693 	/* Free temporary storage space. */
2694 	free(tmp_data1, M_TEMP);
2695 	free(tmp_data2, M_TEMP);
2696 	free(tmp_w1, M_TEMP);
2697 	free(tmp_w2, M_TEMP);
2698 }
2699 
2700 static int
2701 sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
2702 {
2703 	struct sbuf *sb;
2704 	int error;
2705 
2706 	if (witness_watch < 1) {
2707 		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2708 		return (error);
2709 	}
2710 	if (witness_cold) {
2711 		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2712 		return (error);
2713 	}
2714 	error = 0;
2715 	sb = sbuf_new(NULL, NULL, badstack_sbuf_size, SBUF_AUTOEXTEND);
2716 	if (sb == NULL)
2717 		return (ENOMEM);
2718 
2719 	sbuf_print_witness_badstacks(sb, &req->oldidx);
2720 
2721 	sbuf_finish(sb);
2722 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2723 	sbuf_delete(sb);
2724 
2725 	return (error);
2726 }
2727 
2728 #ifdef DDB
2729 static int
2730 sbuf_db_printf_drain(void *arg __unused, const char *data, int len)
2731 {
2732 
2733 	return (db_printf("%.*s", len, data));
2734 }
2735 
2736 DB_SHOW_COMMAND(badstacks, db_witness_badstacks)
2737 {
2738 	struct sbuf sb;
2739 	char buffer[128];
2740 	size_t dummy;
2741 
2742 	sbuf_new(&sb, buffer, sizeof(buffer), SBUF_FIXEDLEN);
2743 	sbuf_set_drain(&sb, sbuf_db_printf_drain, NULL);
2744 	sbuf_print_witness_badstacks(&sb, &dummy);
2745 	sbuf_finish(&sb);
2746 }
2747 #endif
2748 
2749 static int
2750 sysctl_debug_witness_channel(SYSCTL_HANDLER_ARGS)
2751 {
2752 	static const struct {
2753 		enum witness_channel channel;
2754 		const char *name;
2755 	} channels[] = {
2756 		{ WITNESS_CONSOLE, "console" },
2757 		{ WITNESS_LOG, "log" },
2758 		{ WITNESS_NONE, "none" },
2759 	};
2760 	char buf[16];
2761 	u_int i;
2762 	int error;
2763 
2764 	buf[0] = '\0';
2765 	for (i = 0; i < nitems(channels); i++)
2766 		if (witness_channel == channels[i].channel) {
2767 			snprintf(buf, sizeof(buf), "%s", channels[i].name);
2768 			break;
2769 		}
2770 
2771 	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
2772 	if (error != 0 || req->newptr == NULL)
2773 		return (error);
2774 
2775 	error = EINVAL;
2776 	for (i = 0; i < nitems(channels); i++)
2777 		if (strcmp(channels[i].name, buf) == 0) {
2778 			witness_channel = channels[i].channel;
2779 			error = 0;
2780 			break;
2781 		}
2782 	return (error);
2783 }
2784 
2785 static int
2786 sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
2787 {
2788 	struct witness *w;
2789 	struct sbuf *sb;
2790 	int error;
2791 
2792 #ifdef __i386__
2793 	error = SYSCTL_OUT(req, w_notallowed, sizeof(w_notallowed));
2794 	return (error);
2795 #endif
2796 
2797 	if (witness_watch < 1) {
2798 		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2799 		return (error);
2800 	}
2801 	if (witness_cold) {
2802 		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2803 		return (error);
2804 	}
2805 	error = 0;
2806 
2807 	error = sysctl_wire_old_buffer(req, 0);
2808 	if (error != 0)
2809 		return (error);
2810 	sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req);
2811 	if (sb == NULL)
2812 		return (ENOMEM);
2813 	sbuf_printf(sb, "\n");
2814 
2815 	mtx_lock_spin(&w_mtx);
2816 	STAILQ_FOREACH(w, &w_all, w_list)
2817 		w->w_displayed = 0;
2818 	STAILQ_FOREACH(w, &w_all, w_list)
2819 		witness_add_fullgraph(sb, w);
2820 	mtx_unlock_spin(&w_mtx);
2821 
2822 	/*
2823 	 * Close the sbuf and return to userland.
2824 	 */
2825 	error = sbuf_finish(sb);
2826 	sbuf_delete(sb);
2827 
2828 	return (error);
2829 }
2830 
2831 static int
2832 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
2833 {
2834 	int error, value;
2835 
2836 	value = witness_watch;
2837 	error = sysctl_handle_int(oidp, &value, 0, req);
2838 	if (error != 0 || req->newptr == NULL)
2839 		return (error);
2840 	if (value > 1 || value < -1 ||
2841 	    (witness_watch == -1 && value != witness_watch))
2842 		return (EINVAL);
2843 	witness_watch = value;
2844 	return (0);
2845 }
2846 
2847 static void
2848 witness_add_fullgraph(struct sbuf *sb, struct witness *w)
2849 {
2850 	int i;
2851 
2852 	if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0))
2853 		return;
2854 	w->w_displayed = 1;
2855 
2856 	WITNESS_INDEX_ASSERT(w->w_index);
2857 	for (i = 1; i <= w_max_used_index; i++) {
2858 		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) {
2859 			sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name,
2860 			    w_data[i].w_name);
2861 			witness_add_fullgraph(sb, &w_data[i]);
2862 		}
2863 	}
2864 }
2865 
2866 /*
2867  * A simple hash function. Takes a key pointer and a key size. If size == 0,
2868  * interprets the key as a string and reads until the null
2869  * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit
2870  * hash value computed from the key.
2871  */
2872 static uint32_t
2873 witness_hash_djb2(const uint8_t *key, uint32_t size)
2874 {
2875 	unsigned int hash = 5381;
2876 	int i;
2877 
2878 	/* hash = hash * 33 + key[i] */
2879 	if (size)
2880 		for (i = 0; i < size; i++)
2881 			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2882 	else
2883 		for (i = 0; key[i] != 0; i++)
2884 			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2885 
2886 	return (hash);
2887 }
2888 
2889 
2890 /*
2891  * Initializes the two witness hash tables. Called exactly once from
2892  * witness_initialize().
2893  */
2894 static void
2895 witness_init_hash_tables(void)
2896 {
2897 	int i;
2898 
2899 	MPASS(witness_cold);
2900 
2901 	/* Initialize the hash tables. */
2902 	for (i = 0; i < WITNESS_HASH_SIZE; i++)
2903 		w_hash.wh_array[i] = NULL;
2904 
2905 	w_hash.wh_size = WITNESS_HASH_SIZE;
2906 	w_hash.wh_count = 0;
2907 
2908 	/* Initialize the lock order data hash. */
2909 	w_lofree = NULL;
2910 	for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
2911 		memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
2912 		w_lodata[i].wlod_next = w_lofree;
2913 		w_lofree = &w_lodata[i];
2914 	}
2915 	w_lohash.wloh_size = WITNESS_LO_HASH_SIZE;
2916 	w_lohash.wloh_count = 0;
2917 	for (i = 0; i < WITNESS_LO_HASH_SIZE; i++)
2918 		w_lohash.wloh_array[i] = NULL;
2919 }
2920 
2921 static struct witness *
2922 witness_hash_get(const char *key)
2923 {
2924 	struct witness *w;
2925 	uint32_t hash;
2926 
2927 	MPASS(key != NULL);
2928 	if (witness_cold == 0)
2929 		mtx_assert(&w_mtx, MA_OWNED);
2930 	hash = witness_hash_djb2(key, 0) % w_hash.wh_size;
2931 	w = w_hash.wh_array[hash];
2932 	while (w != NULL) {
2933 		if (strcmp(w->w_name, key) == 0)
2934 			goto out;
2935 		w = w->w_hash_next;
2936 	}
2937 
2938 out:
2939 	return (w);
2940 }
2941 
2942 static void
2943 witness_hash_put(struct witness *w)
2944 {
2945 	uint32_t hash;
2946 
2947 	MPASS(w != NULL);
2948 	MPASS(w->w_name != NULL);
2949 	if (witness_cold == 0)
2950 		mtx_assert(&w_mtx, MA_OWNED);
2951 	KASSERT(witness_hash_get(w->w_name) == NULL,
2952 	    ("%s: trying to add a hash entry that already exists!", __func__));
2953 	KASSERT(w->w_hash_next == NULL,
2954 	    ("%s: w->w_hash_next != NULL", __func__));
2955 
2956 	hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size;
2957 	w->w_hash_next = w_hash.wh_array[hash];
2958 	w_hash.wh_array[hash] = w;
2959 	w_hash.wh_count++;
2960 }
2961 
2962 
2963 static struct witness_lock_order_data *
2964 witness_lock_order_get(struct witness *parent, struct witness *child)
2965 {
2966 	struct witness_lock_order_data *data = NULL;
2967 	struct witness_lock_order_key key;
2968 	unsigned int hash;
2969 
2970 	MPASS(parent != NULL && child != NULL);
2971 	key.from = parent->w_index;
2972 	key.to = child->w_index;
2973 	WITNESS_INDEX_ASSERT(key.from);
2974 	WITNESS_INDEX_ASSERT(key.to);
2975 	if ((w_rmatrix[parent->w_index][child->w_index]
2976 	    & WITNESS_LOCK_ORDER_KNOWN) == 0)
2977 		goto out;
2978 
2979 	hash = witness_hash_djb2((const char*)&key,
2980 	    sizeof(key)) % w_lohash.wloh_size;
2981 	data = w_lohash.wloh_array[hash];
2982 	while (data != NULL) {
2983 		if (witness_lock_order_key_equal(&data->wlod_key, &key))
2984 			break;
2985 		data = data->wlod_next;
2986 	}
2987 
2988 out:
2989 	return (data);
2990 }
2991 
2992 /*
2993  * Verify that parent and child have a known relationship, are not the same,
2994  * and child is actually a child of parent.  This is done without w_mtx
2995  * to avoid contention in the common case.
2996  */
2997 static int
2998 witness_lock_order_check(struct witness *parent, struct witness *child)
2999 {
3000 
3001 	if (parent != child &&
3002 	    w_rmatrix[parent->w_index][child->w_index]
3003 	    & WITNESS_LOCK_ORDER_KNOWN &&
3004 	    isitmychild(parent, child))
3005 		return (1);
3006 
3007 	return (0);
3008 }
3009 
3010 static int
3011 witness_lock_order_add(struct witness *parent, struct witness *child)
3012 {
3013 	struct witness_lock_order_data *data = NULL;
3014 	struct witness_lock_order_key key;
3015 	unsigned int hash;
3016 
3017 	MPASS(parent != NULL && child != NULL);
3018 	key.from = parent->w_index;
3019 	key.to = child->w_index;
3020 	WITNESS_INDEX_ASSERT(key.from);
3021 	WITNESS_INDEX_ASSERT(key.to);
3022 	if (w_rmatrix[parent->w_index][child->w_index]
3023 	    & WITNESS_LOCK_ORDER_KNOWN)
3024 		return (1);
3025 
3026 	hash = witness_hash_djb2((const char*)&key,
3027 	    sizeof(key)) % w_lohash.wloh_size;
3028 	w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
3029 	data = w_lofree;
3030 	if (data == NULL)
3031 		return (0);
3032 	w_lofree = data->wlod_next;
3033 	data->wlod_next = w_lohash.wloh_array[hash];
3034 	data->wlod_key = key;
3035 	w_lohash.wloh_array[hash] = data;
3036 	w_lohash.wloh_count++;
3037 	stack_zero(&data->wlod_stack);
3038 	stack_save(&data->wlod_stack);
3039 	return (1);
3040 }
3041 
3042 /* Call this whenever the structure of the witness graph changes. */
3043 static void
3044 witness_increment_graph_generation(void)
3045 {
3046 
3047 	if (witness_cold == 0)
3048 		mtx_assert(&w_mtx, MA_OWNED);
3049 	w_generation++;
3050 }
3051 
3052 static int
3053 witness_output_drain(void *arg __unused, const char *data, int len)
3054 {
3055 
3056 	witness_output("%.*s", len, data);
3057 	return (len);
3058 }
3059 
3060 static void
3061 witness_debugger(int cond, const char *msg)
3062 {
3063 	char buf[32];
3064 	struct sbuf sb;
3065 	struct stack st;
3066 
3067 	if (!cond)
3068 		return;
3069 
3070 	if (witness_trace) {
3071 		sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
3072 		sbuf_set_drain(&sb, witness_output_drain, NULL);
3073 
3074 		stack_zero(&st);
3075 		stack_save(&st);
3076 		witness_output("stack backtrace:\n");
3077 		stack_sbuf_print_ddb(&sb, &st);
3078 
3079 		sbuf_finish(&sb);
3080 	}
3081 
3082 #ifdef KDB
3083 	if (witness_kdb)
3084 		kdb_enter(KDB_WHY_WITNESS, msg);
3085 #endif
3086 }
3087