xref: /freebsd/sys/kern/subr_witness.c (revision 47dd1d1b619cc035b82b49a91a25544309ff95ae)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2008 Isilon Systems, Inc.
5  * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
6  * Copyright (c) 1998 Berkeley Software Design, Inc.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Berkeley Software Design Inc's name may not be used to endorse or
18  *    promote products derived from this software without specific prior
19  *    written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
34  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
35  */
36 
37 /*
38  * Implementation of the `witness' lock verifier.  Originally implemented for
39  * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
40  * classes in FreeBSD.
41  */
42 
43 /*
44  *	Main Entry: witness
45  *	Pronunciation: 'wit-n&s
46  *	Function: noun
47  *	Etymology: Middle English witnesse, from Old English witnes knowledge,
48  *	    testimony, witness, from 2wit
49  *	Date: before 12th century
50  *	1 : attestation of a fact or event : TESTIMONY
51  *	2 : one that gives evidence; specifically : one who testifies in
52  *	    a cause or before a judicial tribunal
53  *	3 : one asked to be present at a transaction so as to be able to
54  *	    testify to its having taken place
55  *	4 : one who has personal knowledge of something
56  *	5 a : something serving as evidence or proof : SIGN
57  *	  b : public affirmation by word or example of usually
58  *	      religious faith or conviction <the heroic witness to divine
59  *	      life -- Pilot>
60  *	6 capitalized : a member of the Jehovah's Witnesses
61  */
62 
63 /*
64  * Special rules concerning Giant and lock orders:
65  *
66  * 1) Giant must be acquired before any other mutexes.  Stated another way,
67  *    no other mutex may be held when Giant is acquired.
68  *
69  * 2) Giant must be released when blocking on a sleepable lock.
70  *
71  * This rule is less obvious, but is a result of Giant providing the same
72  * semantics as spl().  Basically, when a thread sleeps, it must release
73  * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
74  * 2).
75  *
76  * 3) Giant may be acquired before or after sleepable locks.
77  *
78  * This rule is also not quite as obvious.  Giant may be acquired after
79  * a sleepable lock because it is a non-sleepable lock and non-sleepable
80  * locks may always be acquired while holding a sleepable lock.  The second
81  * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
82  * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
83  * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
84  * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
85  * execute.  Thus, acquiring Giant both before and after a sleepable lock
86  * will not result in a lock order reversal.
87  */
88 
89 #include <sys/cdefs.h>
90 __FBSDID("$FreeBSD$");
91 
92 #include "opt_ddb.h"
93 #include "opt_hwpmc_hooks.h"
94 #include "opt_stack.h"
95 #include "opt_witness.h"
96 
97 #include <sys/param.h>
98 #include <sys/bus.h>
99 #include <sys/kdb.h>
100 #include <sys/kernel.h>
101 #include <sys/ktr.h>
102 #include <sys/lock.h>
103 #include <sys/malloc.h>
104 #include <sys/mutex.h>
105 #include <sys/priv.h>
106 #include <sys/proc.h>
107 #include <sys/sbuf.h>
108 #include <sys/sched.h>
109 #include <sys/stack.h>
110 #include <sys/sysctl.h>
111 #include <sys/syslog.h>
112 #include <sys/systm.h>
113 
114 #ifdef DDB
115 #include <ddb/ddb.h>
116 #endif
117 
118 #include <machine/stdarg.h>
119 
120 #if !defined(DDB) && !defined(STACK)
121 #error "DDB or STACK options are required for WITNESS"
122 #endif
123 
124 /* Note that these traces do not work with KTR_ALQ. */
125 #if 0
126 #define	KTR_WITNESS	KTR_SUBSYS
127 #else
128 #define	KTR_WITNESS	0
129 #endif
130 
131 #define	LI_RECURSEMASK	0x0000ffff	/* Recursion depth of lock instance. */
132 #define	LI_EXCLUSIVE	0x00010000	/* Exclusive lock instance. */
133 #define	LI_NORELEASE	0x00020000	/* Lock not allowed to be released. */
134 
135 /* Define this to check for blessed mutexes */
136 #undef BLESSING
137 
138 #ifndef WITNESS_COUNT
139 #define	WITNESS_COUNT 		1536
140 #endif
141 #define	WITNESS_HASH_SIZE	251	/* Prime, gives load factor < 2 */
142 #define	WITNESS_PENDLIST	(512 + (MAXCPU * 4))
143 
144 /* Allocate 256 KB of stack data space */
145 #define	WITNESS_LO_DATA_COUNT	2048
146 
147 /* Prime, gives load factor of ~2 at full load */
148 #define	WITNESS_LO_HASH_SIZE	1021
149 
150 /*
151  * XXX: This is somewhat bogus, as we assume here that at most 2048 threads
152  * will hold LOCK_NCHILDREN locks.  We handle failure ok, and we should
153  * probably be safe for the most part, but it's still a SWAG.
154  */
155 #define	LOCK_NCHILDREN	5
156 #define	LOCK_CHILDCOUNT	2048
157 
158 #define	MAX_W_NAME	64
159 
160 #define	FULLGRAPH_SBUF_SIZE	512
161 
162 /*
163  * These flags go in the witness relationship matrix and describe the
164  * relationship between any two struct witness objects.
165  */
166 #define	WITNESS_UNRELATED        0x00    /* No lock order relation. */
167 #define	WITNESS_PARENT           0x01    /* Parent, aka direct ancestor. */
168 #define	WITNESS_ANCESTOR         0x02    /* Direct or indirect ancestor. */
169 #define	WITNESS_CHILD            0x04    /* Child, aka direct descendant. */
170 #define	WITNESS_DESCENDANT       0x08    /* Direct or indirect descendant. */
171 #define	WITNESS_ANCESTOR_MASK    (WITNESS_PARENT | WITNESS_ANCESTOR)
172 #define	WITNESS_DESCENDANT_MASK  (WITNESS_CHILD | WITNESS_DESCENDANT)
173 #define	WITNESS_RELATED_MASK						\
174 	(WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
175 #define	WITNESS_REVERSAL         0x10    /* A lock order reversal has been
176 					  * observed. */
177 #define	WITNESS_RESERVED1        0x20    /* Unused flag, reserved. */
178 #define	WITNESS_RESERVED2        0x40    /* Unused flag, reserved. */
179 #define	WITNESS_LOCK_ORDER_KNOWN 0x80    /* This lock order is known. */
180 
181 /* Descendant to ancestor flags */
182 #define	WITNESS_DTOA(x)	(((x) & WITNESS_RELATED_MASK) >> 2)
183 
184 /* Ancestor to descendant flags */
185 #define	WITNESS_ATOD(x)	(((x) & WITNESS_RELATED_MASK) << 2)
186 
187 #define	WITNESS_INDEX_ASSERT(i)						\
188 	MPASS((i) > 0 && (i) <= w_max_used_index && (i) < witness_count)
189 
190 static MALLOC_DEFINE(M_WITNESS, "Witness", "Witness");
191 
192 /*
193  * Lock instances.  A lock instance is the data associated with a lock while
194  * it is held by witness.  For example, a lock instance will hold the
195  * recursion count of a lock.  Lock instances are held in lists.  Spin locks
196  * are held in a per-cpu list while sleep locks are held in per-thread list.
197  */
198 struct lock_instance {
199 	struct lock_object	*li_lock;
200 	const char		*li_file;
201 	int			li_line;
202 	u_int			li_flags;
203 };
204 
205 /*
206  * A simple list type used to build the list of locks held by a thread
207  * or CPU.  We can't simply embed the list in struct lock_object since a
208  * lock may be held by more than one thread if it is a shared lock.  Locks
209  * are added to the head of the list, so we fill up each list entry from
210  * "the back" logically.  To ease some of the arithmetic, we actually fill
211  * in each list entry the normal way (children[0] then children[1], etc.) but
212  * when we traverse the list we read children[count-1] as the first entry
213  * down to children[0] as the final entry.
214  */
215 struct lock_list_entry {
216 	struct lock_list_entry	*ll_next;
217 	struct lock_instance	ll_children[LOCK_NCHILDREN];
218 	u_int			ll_count;
219 };
220 
221 /*
222  * The main witness structure. One of these per named lock type in the system
223  * (for example, "vnode interlock").
224  */
225 struct witness {
226 	char  			w_name[MAX_W_NAME];
227 	uint32_t 		w_index;  /* Index in the relationship matrix */
228 	struct lock_class	*w_class;
229 	STAILQ_ENTRY(witness) 	w_list;		/* List of all witnesses. */
230 	STAILQ_ENTRY(witness) 	w_typelist;	/* Witnesses of a type. */
231 	struct witness		*w_hash_next; /* Linked list in hash buckets. */
232 	const char		*w_file; /* File where last acquired */
233 	uint32_t 		w_line; /* Line where last acquired */
234 	uint32_t 		w_refcount;
235 	uint16_t 		w_num_ancestors; /* direct/indirect
236 						  * ancestor count */
237 	uint16_t 		w_num_descendants; /* direct/indirect
238 						    * descendant count */
239 	int16_t 		w_ddb_level;
240 	unsigned		w_displayed:1;
241 	unsigned		w_reversed:1;
242 };
243 
244 STAILQ_HEAD(witness_list, witness);
245 
246 /*
247  * The witness hash table. Keys are witness names (const char *), elements are
248  * witness objects (struct witness *).
249  */
250 struct witness_hash {
251 	struct witness	*wh_array[WITNESS_HASH_SIZE];
252 	uint32_t	wh_size;
253 	uint32_t	wh_count;
254 };
255 
256 /*
257  * Key type for the lock order data hash table.
258  */
259 struct witness_lock_order_key {
260 	uint16_t	from;
261 	uint16_t	to;
262 };
263 
264 struct witness_lock_order_data {
265 	struct stack			wlod_stack;
266 	struct witness_lock_order_key	wlod_key;
267 	struct witness_lock_order_data	*wlod_next;
268 };
269 
270 /*
271  * The witness lock order data hash table. Keys are witness index tuples
272  * (struct witness_lock_order_key), elements are lock order data objects
273  * (struct witness_lock_order_data).
274  */
275 struct witness_lock_order_hash {
276 	struct witness_lock_order_data	*wloh_array[WITNESS_LO_HASH_SIZE];
277 	u_int	wloh_size;
278 	u_int	wloh_count;
279 };
280 
281 #ifdef BLESSING
282 struct witness_blessed {
283 	const char	*b_lock1;
284 	const char	*b_lock2;
285 };
286 #endif
287 
288 struct witness_pendhelp {
289 	const char		*wh_type;
290 	struct lock_object	*wh_lock;
291 };
292 
293 struct witness_order_list_entry {
294 	const char		*w_name;
295 	struct lock_class	*w_class;
296 };
297 
298 /*
299  * Returns 0 if one of the locks is a spin lock and the other is not.
300  * Returns 1 otherwise.
301  */
302 static __inline int
303 witness_lock_type_equal(struct witness *w1, struct witness *w2)
304 {
305 
306 	return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
307 		(w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
308 }
309 
310 static __inline int
311 witness_lock_order_key_equal(const struct witness_lock_order_key *a,
312     const struct witness_lock_order_key *b)
313 {
314 
315 	return (a->from == b->from && a->to == b->to);
316 }
317 
318 static int	_isitmyx(struct witness *w1, struct witness *w2, int rmask,
319 		    const char *fname);
320 static void	adopt(struct witness *parent, struct witness *child);
321 #ifdef BLESSING
322 static int	blessed(struct witness *, struct witness *);
323 #endif
324 static void	depart(struct witness *w);
325 static struct witness	*enroll(const char *description,
326 			    struct lock_class *lock_class);
327 static struct lock_instance	*find_instance(struct lock_list_entry *list,
328 				    const struct lock_object *lock);
329 static int	isitmychild(struct witness *parent, struct witness *child);
330 static int	isitmydescendant(struct witness *parent, struct witness *child);
331 static void	itismychild(struct witness *parent, struct witness *child);
332 static int	sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS);
333 static int	sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
334 static int	sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS);
335 static int	sysctl_debug_witness_channel(SYSCTL_HANDLER_ARGS);
336 static void	witness_add_fullgraph(struct sbuf *sb, struct witness *parent);
337 #ifdef DDB
338 static void	witness_ddb_compute_levels(void);
339 static void	witness_ddb_display(int(*)(const char *fmt, ...));
340 static void	witness_ddb_display_descendants(int(*)(const char *fmt, ...),
341 		    struct witness *, int indent);
342 static void	witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
343 		    struct witness_list *list);
344 static void	witness_ddb_level_descendants(struct witness *parent, int l);
345 static void	witness_ddb_list(struct thread *td);
346 #endif
347 static void	witness_debugger(int cond, const char *msg);
348 static void	witness_free(struct witness *m);
349 static struct witness	*witness_get(void);
350 static uint32_t	witness_hash_djb2(const uint8_t *key, uint32_t size);
351 static struct witness	*witness_hash_get(const char *key);
352 static void	witness_hash_put(struct witness *w);
353 static void	witness_init_hash_tables(void);
354 static void	witness_increment_graph_generation(void);
355 static void	witness_lock_list_free(struct lock_list_entry *lle);
356 static struct lock_list_entry	*witness_lock_list_get(void);
357 static int	witness_lock_order_add(struct witness *parent,
358 		    struct witness *child);
359 static int	witness_lock_order_check(struct witness *parent,
360 		    struct witness *child);
361 static struct witness_lock_order_data	*witness_lock_order_get(
362 					    struct witness *parent,
363 					    struct witness *child);
364 static void	witness_list_lock(struct lock_instance *instance,
365 		    int (*prnt)(const char *fmt, ...));
366 static int	witness_output(const char *fmt, ...) __printflike(1, 2);
367 static int	witness_voutput(const char *fmt, va_list ap) __printflike(1, 0);
368 static void	witness_setflag(struct lock_object *lock, int flag, int set);
369 
370 static SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL,
371     "Witness Locking");
372 
373 /*
374  * If set to 0, lock order checking is disabled.  If set to -1,
375  * witness is completely disabled.  Otherwise witness performs full
376  * lock order checking for all locks.  At runtime, lock order checking
377  * may be toggled.  However, witness cannot be reenabled once it is
378  * completely disabled.
379  */
380 static int witness_watch = 1;
381 SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RWTUN | CTLTYPE_INT, NULL, 0,
382     sysctl_debug_witness_watch, "I", "witness is watching lock operations");
383 
384 #ifdef KDB
385 /*
386  * When KDB is enabled and witness_kdb is 1, it will cause the system
387  * to drop into kdebug() when:
388  *	- a lock hierarchy violation occurs
389  *	- locks are held when going to sleep.
390  */
391 #ifdef WITNESS_KDB
392 int	witness_kdb = 1;
393 #else
394 int	witness_kdb = 0;
395 #endif
396 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RWTUN, &witness_kdb, 0, "");
397 #endif /* KDB */
398 
399 #if defined(DDB) || defined(KDB)
400 /*
401  * When DDB or KDB is enabled and witness_trace is 1, it will cause the system
402  * to print a stack trace:
403  *	- a lock hierarchy violation occurs
404  *	- locks are held when going to sleep.
405  */
406 int	witness_trace = 1;
407 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RWTUN, &witness_trace, 0, "");
408 #endif /* DDB || KDB */
409 
410 #ifdef WITNESS_SKIPSPIN
411 int	witness_skipspin = 1;
412 #else
413 int	witness_skipspin = 0;
414 #endif
415 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin, 0, "");
416 
417 int badstack_sbuf_size;
418 
419 int witness_count = WITNESS_COUNT;
420 SYSCTL_INT(_debug_witness, OID_AUTO, witness_count, CTLFLAG_RDTUN,
421     &witness_count, 0, "");
422 
423 /*
424  * Output channel for witness messages.  By default we print to the console.
425  */
426 enum witness_channel {
427 	WITNESS_CONSOLE,
428 	WITNESS_LOG,
429 	WITNESS_NONE,
430 };
431 
432 static enum witness_channel witness_channel = WITNESS_CONSOLE;
433 SYSCTL_PROC(_debug_witness, OID_AUTO, output_channel, CTLTYPE_STRING |
434     CTLFLAG_RWTUN, NULL, 0, sysctl_debug_witness_channel, "A",
435     "Output channel for warnings");
436 
437 /*
438  * Call this to print out the relations between locks.
439  */
440 SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD,
441     NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs");
442 
443 /*
444  * Call this to print out the witness faulty stacks.
445  */
446 SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD,
447     NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks");
448 
449 static struct mtx w_mtx;
450 
451 /* w_list */
452 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
453 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
454 
455 /* w_typelist */
456 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
457 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
458 
459 /* lock list */
460 static struct lock_list_entry *w_lock_list_free = NULL;
461 static struct witness_pendhelp pending_locks[WITNESS_PENDLIST];
462 static u_int pending_cnt;
463 
464 static int w_free_cnt, w_spin_cnt, w_sleep_cnt;
465 SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
466 SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
467 SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
468     "");
469 
470 static struct witness *w_data;
471 static uint8_t **w_rmatrix;
472 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
473 static struct witness_hash w_hash;	/* The witness hash table. */
474 
475 /* The lock order data hash */
476 static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT];
477 static struct witness_lock_order_data *w_lofree = NULL;
478 static struct witness_lock_order_hash w_lohash;
479 static int w_max_used_index = 0;
480 static unsigned int w_generation = 0;
481 static const char w_notrunning[] = "Witness not running\n";
482 static const char w_stillcold[] = "Witness is still cold\n";
483 #ifdef __i386__
484 static const char w_notallowed[] = "The sysctl is disabled on the arch\n";
485 #endif
486 
487 static struct witness_order_list_entry order_lists[] = {
488 	/*
489 	 * sx locks
490 	 */
491 	{ "proctree", &lock_class_sx },
492 	{ "allproc", &lock_class_sx },
493 	{ "allprison", &lock_class_sx },
494 	{ NULL, NULL },
495 	/*
496 	 * Various mutexes
497 	 */
498 	{ "Giant", &lock_class_mtx_sleep },
499 	{ "pipe mutex", &lock_class_mtx_sleep },
500 	{ "sigio lock", &lock_class_mtx_sleep },
501 	{ "process group", &lock_class_mtx_sleep },
502 	{ "process lock", &lock_class_mtx_sleep },
503 	{ "session", &lock_class_mtx_sleep },
504 	{ "uidinfo hash", &lock_class_rw },
505 #ifdef	HWPMC_HOOKS
506 	{ "pmc-sleep", &lock_class_mtx_sleep },
507 #endif
508 	{ "time lock", &lock_class_mtx_sleep },
509 	{ NULL, NULL },
510 	/*
511 	 * umtx
512 	 */
513 	{ "umtx lock", &lock_class_mtx_sleep },
514 	{ NULL, NULL },
515 	/*
516 	 * Sockets
517 	 */
518 	{ "accept", &lock_class_mtx_sleep },
519 	{ "so_snd", &lock_class_mtx_sleep },
520 	{ "so_rcv", &lock_class_mtx_sleep },
521 	{ "sellck", &lock_class_mtx_sleep },
522 	{ NULL, NULL },
523 	/*
524 	 * Routing
525 	 */
526 	{ "so_rcv", &lock_class_mtx_sleep },
527 	{ "radix node head", &lock_class_rw },
528 	{ "rtentry", &lock_class_mtx_sleep },
529 	{ "ifaddr", &lock_class_mtx_sleep },
530 	{ NULL, NULL },
531 	/*
532 	 * IPv4 multicast:
533 	 * protocol locks before interface locks, after UDP locks.
534 	 */
535 	{ "in_multi_sx", &lock_class_sx },
536 	{ "udpinp", &lock_class_rw },
537 	{ "in_multi_list_mtx", &lock_class_mtx_sleep },
538 	{ "igmp_mtx", &lock_class_mtx_sleep },
539 	{ "ifnet_rw", &lock_class_rw },
540 	{ "if_addr_lock", &lock_class_rw },
541 	{ NULL, NULL },
542 	/*
543 	 * IPv6 multicast:
544 	 * protocol locks before interface locks, after UDP locks.
545 	 */
546 	{ "in6_multi_sx", &lock_class_sx },
547 	{ "udpinp", &lock_class_rw },
548 	{ "in6_multi_list_mtx", &lock_class_mtx_sleep },
549 	{ "mld_mtx", &lock_class_mtx_sleep },
550 	{ "ifnet_rw", &lock_class_rw },
551 	{ "if_addr_lock", &lock_class_rw },
552 	{ NULL, NULL },
553 	/*
554 	 * UNIX Domain Sockets
555 	 */
556 	{ "unp_link_rwlock", &lock_class_rw },
557 	{ "unp_list_lock", &lock_class_mtx_sleep },
558 	{ "unp", &lock_class_mtx_sleep },
559 	{ "so_snd", &lock_class_mtx_sleep },
560 	{ NULL, NULL },
561 	/*
562 	 * UDP/IP
563 	 */
564 	{ "udp", &lock_class_rw },
565 	{ "udpinp", &lock_class_rw },
566 	{ "so_snd", &lock_class_mtx_sleep },
567 	{ NULL, NULL },
568 	/*
569 	 * TCP/IP
570 	 */
571 	{ "tcp", &lock_class_rw },
572 	{ "tcpinp", &lock_class_rw },
573 	{ "so_snd", &lock_class_mtx_sleep },
574 	{ NULL, NULL },
575 	/*
576 	 * BPF
577 	 */
578 	{ "bpf global lock", &lock_class_sx },
579 	{ "bpf interface lock", &lock_class_rw },
580 	{ "bpf cdev lock", &lock_class_mtx_sleep },
581 	{ NULL, NULL },
582 	/*
583 	 * NFS server
584 	 */
585 	{ "nfsd_mtx", &lock_class_mtx_sleep },
586 	{ "so_snd", &lock_class_mtx_sleep },
587 	{ NULL, NULL },
588 
589 	/*
590 	 * IEEE 802.11
591 	 */
592 	{ "802.11 com lock", &lock_class_mtx_sleep},
593 	{ NULL, NULL },
594 	/*
595 	 * Network drivers
596 	 */
597 	{ "network driver", &lock_class_mtx_sleep},
598 	{ NULL, NULL },
599 
600 	/*
601 	 * Netgraph
602 	 */
603 	{ "ng_node", &lock_class_mtx_sleep },
604 	{ "ng_worklist", &lock_class_mtx_sleep },
605 	{ NULL, NULL },
606 	/*
607 	 * CDEV
608 	 */
609 	{ "vm map (system)", &lock_class_mtx_sleep },
610 	{ "vnode interlock", &lock_class_mtx_sleep },
611 	{ "cdev", &lock_class_mtx_sleep },
612 	{ NULL, NULL },
613 	/*
614 	 * VM
615 	 */
616 	{ "vm map (user)", &lock_class_sx },
617 	{ "vm object", &lock_class_rw },
618 	{ "vm page", &lock_class_mtx_sleep },
619 	{ "pmap pv global", &lock_class_rw },
620 	{ "pmap", &lock_class_mtx_sleep },
621 	{ "pmap pv list", &lock_class_rw },
622 	{ "vm page free queue", &lock_class_mtx_sleep },
623 	{ "vm pagequeue", &lock_class_mtx_sleep },
624 	{ NULL, NULL },
625 	/*
626 	 * kqueue/VFS interaction
627 	 */
628 	{ "kqueue", &lock_class_mtx_sleep },
629 	{ "struct mount mtx", &lock_class_mtx_sleep },
630 	{ "vnode interlock", &lock_class_mtx_sleep },
631 	{ NULL, NULL },
632 	/*
633 	 * VFS namecache
634 	 */
635 	{ "ncvn", &lock_class_mtx_sleep },
636 	{ "ncbuc", &lock_class_rw },
637 	{ "vnode interlock", &lock_class_mtx_sleep },
638 	{ "ncneg", &lock_class_mtx_sleep },
639 	{ NULL, NULL },
640 	/*
641 	 * ZFS locking
642 	 */
643 	{ "dn->dn_mtx", &lock_class_sx },
644 	{ "dr->dt.di.dr_mtx", &lock_class_sx },
645 	{ "db->db_mtx", &lock_class_sx },
646 	{ NULL, NULL },
647 	/*
648 	 * TCP log locks
649 	 */
650 	{ "TCP ID tree", &lock_class_rw },
651 	{ "tcp log id bucket", &lock_class_mtx_sleep },
652 	{ "tcpinp", &lock_class_rw },
653 	{ "TCP log expireq", &lock_class_mtx_sleep },
654 	{ NULL, NULL },
655 	/*
656 	 * spin locks
657 	 */
658 #ifdef SMP
659 	{ "ap boot", &lock_class_mtx_spin },
660 #endif
661 	{ "rm.mutex_mtx", &lock_class_mtx_spin },
662 	{ "sio", &lock_class_mtx_spin },
663 #ifdef __i386__
664 	{ "cy", &lock_class_mtx_spin },
665 #endif
666 #ifdef __sparc64__
667 	{ "pcib_mtx", &lock_class_mtx_spin },
668 	{ "rtc_mtx", &lock_class_mtx_spin },
669 #endif
670 	{ "scc_hwmtx", &lock_class_mtx_spin },
671 	{ "uart_hwmtx", &lock_class_mtx_spin },
672 	{ "fast_taskqueue", &lock_class_mtx_spin },
673 	{ "intr table", &lock_class_mtx_spin },
674 #ifdef	HWPMC_HOOKS
675 	{ "pmc-per-proc", &lock_class_mtx_spin },
676 #endif
677 	{ "process slock", &lock_class_mtx_spin },
678 	{ "syscons video lock", &lock_class_mtx_spin },
679 	{ "sleepq chain", &lock_class_mtx_spin },
680 	{ "rm_spinlock", &lock_class_mtx_spin },
681 	{ "turnstile chain", &lock_class_mtx_spin },
682 	{ "turnstile lock", &lock_class_mtx_spin },
683 	{ "sched lock", &lock_class_mtx_spin },
684 	{ "td_contested", &lock_class_mtx_spin },
685 	{ "callout", &lock_class_mtx_spin },
686 	{ "entropy harvest mutex", &lock_class_mtx_spin },
687 #ifdef SMP
688 	{ "smp rendezvous", &lock_class_mtx_spin },
689 #endif
690 #ifdef __powerpc__
691 	{ "tlb0", &lock_class_mtx_spin },
692 #endif
693 	/*
694 	 * leaf locks
695 	 */
696 	{ "intrcnt", &lock_class_mtx_spin },
697 	{ "icu", &lock_class_mtx_spin },
698 #if defined(SMP) && defined(__sparc64__)
699 	{ "ipi", &lock_class_mtx_spin },
700 #endif
701 #ifdef __i386__
702 	{ "allpmaps", &lock_class_mtx_spin },
703 	{ "descriptor tables", &lock_class_mtx_spin },
704 #endif
705 	{ "clk", &lock_class_mtx_spin },
706 	{ "cpuset", &lock_class_mtx_spin },
707 	{ "mprof lock", &lock_class_mtx_spin },
708 	{ "zombie lock", &lock_class_mtx_spin },
709 	{ "ALD Queue", &lock_class_mtx_spin },
710 #if defined(__i386__) || defined(__amd64__)
711 	{ "pcicfg", &lock_class_mtx_spin },
712 	{ "NDIS thread lock", &lock_class_mtx_spin },
713 #endif
714 	{ "tw_osl_io_lock", &lock_class_mtx_spin },
715 	{ "tw_osl_q_lock", &lock_class_mtx_spin },
716 	{ "tw_cl_io_lock", &lock_class_mtx_spin },
717 	{ "tw_cl_intr_lock", &lock_class_mtx_spin },
718 	{ "tw_cl_gen_lock", &lock_class_mtx_spin },
719 #ifdef	HWPMC_HOOKS
720 	{ "pmc-leaf", &lock_class_mtx_spin },
721 #endif
722 	{ "blocked lock", &lock_class_mtx_spin },
723 	{ NULL, NULL },
724 	{ NULL, NULL }
725 };
726 
727 #ifdef BLESSING
728 /*
729  * Pairs of locks which have been blessed
730  * Don't complain about order problems with blessed locks
731  */
732 static struct witness_blessed blessed_list[] = {
733 };
734 #endif
735 
736 /*
737  * This global is set to 0 once it becomes safe to use the witness code.
738  */
739 static int witness_cold = 1;
740 
741 /*
742  * This global is set to 1 once the static lock orders have been enrolled
743  * so that a warning can be issued for any spin locks enrolled later.
744  */
745 static int witness_spin_warn = 0;
746 
747 /* Trim useless garbage from filenames. */
748 static const char *
749 fixup_filename(const char *file)
750 {
751 
752 	if (file == NULL)
753 		return (NULL);
754 	while (strncmp(file, "../", 3) == 0)
755 		file += 3;
756 	return (file);
757 }
758 
759 /*
760  * Calculate the size of early witness structures.
761  */
762 int
763 witness_startup_count(void)
764 {
765 	int sz;
766 
767 	sz = sizeof(struct witness) * witness_count;
768 	sz += sizeof(*w_rmatrix) * (witness_count + 1);
769 	sz += sizeof(*w_rmatrix[0]) * (witness_count + 1) *
770 	    (witness_count + 1);
771 
772 	return (sz);
773 }
774 
775 /*
776  * The WITNESS-enabled diagnostic code.  Note that the witness code does
777  * assume that the early boot is single-threaded at least until after this
778  * routine is completed.
779  */
780 void
781 witness_startup(void *mem)
782 {
783 	struct lock_object *lock;
784 	struct witness_order_list_entry *order;
785 	struct witness *w, *w1;
786 	uintptr_t p;
787 	int i;
788 
789 	p = (uintptr_t)mem;
790 	w_data = (void *)p;
791 	p += sizeof(struct witness) * witness_count;
792 
793 	w_rmatrix = (void *)p;
794 	p += sizeof(*w_rmatrix) * (witness_count + 1);
795 
796 	for (i = 0; i < witness_count + 1; i++) {
797 		w_rmatrix[i] = (void *)p;
798 		p += sizeof(*w_rmatrix[i]) * (witness_count + 1);
799 	}
800 	badstack_sbuf_size = witness_count * 256;
801 
802 	/*
803 	 * We have to release Giant before initializing its witness
804 	 * structure so that WITNESS doesn't get confused.
805 	 */
806 	mtx_unlock(&Giant);
807 	mtx_assert(&Giant, MA_NOTOWNED);
808 
809 	CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
810 	mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
811 	    MTX_NOWITNESS | MTX_NOPROFILE);
812 	for (i = witness_count - 1; i >= 0; i--) {
813 		w = &w_data[i];
814 		memset(w, 0, sizeof(*w));
815 		w_data[i].w_index = i;	/* Witness index never changes. */
816 		witness_free(w);
817 	}
818 	KASSERT(STAILQ_FIRST(&w_free)->w_index == 0,
819 	    ("%s: Invalid list of free witness objects", __func__));
820 
821 	/* Witness with index 0 is not used to aid in debugging. */
822 	STAILQ_REMOVE_HEAD(&w_free, w_list);
823 	w_free_cnt--;
824 
825 	for (i = 0; i < witness_count; i++) {
826 		memset(w_rmatrix[i], 0, sizeof(*w_rmatrix[i]) *
827 		    (witness_count + 1));
828 	}
829 
830 	for (i = 0; i < LOCK_CHILDCOUNT; i++)
831 		witness_lock_list_free(&w_locklistdata[i]);
832 	witness_init_hash_tables();
833 
834 	/* First add in all the specified order lists. */
835 	for (order = order_lists; order->w_name != NULL; order++) {
836 		w = enroll(order->w_name, order->w_class);
837 		if (w == NULL)
838 			continue;
839 		w->w_file = "order list";
840 		for (order++; order->w_name != NULL; order++) {
841 			w1 = enroll(order->w_name, order->w_class);
842 			if (w1 == NULL)
843 				continue;
844 			w1->w_file = "order list";
845 			itismychild(w, w1);
846 			w = w1;
847 		}
848 	}
849 	witness_spin_warn = 1;
850 
851 	/* Iterate through all locks and add them to witness. */
852 	for (i = 0; pending_locks[i].wh_lock != NULL; i++) {
853 		lock = pending_locks[i].wh_lock;
854 		KASSERT(lock->lo_flags & LO_WITNESS,
855 		    ("%s: lock %s is on pending list but not LO_WITNESS",
856 		    __func__, lock->lo_name));
857 		lock->lo_witness = enroll(pending_locks[i].wh_type,
858 		    LOCK_CLASS(lock));
859 	}
860 
861 	/* Mark the witness code as being ready for use. */
862 	witness_cold = 0;
863 
864 	mtx_lock(&Giant);
865 }
866 
867 void
868 witness_init(struct lock_object *lock, const char *type)
869 {
870 	struct lock_class *class;
871 
872 	/* Various sanity checks. */
873 	class = LOCK_CLASS(lock);
874 	if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
875 	    (class->lc_flags & LC_RECURSABLE) == 0)
876 		kassert_panic("%s: lock (%s) %s can not be recursable",
877 		    __func__, class->lc_name, lock->lo_name);
878 	if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
879 	    (class->lc_flags & LC_SLEEPABLE) == 0)
880 		kassert_panic("%s: lock (%s) %s can not be sleepable",
881 		    __func__, class->lc_name, lock->lo_name);
882 	if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
883 	    (class->lc_flags & LC_UPGRADABLE) == 0)
884 		kassert_panic("%s: lock (%s) %s can not be upgradable",
885 		    __func__, class->lc_name, lock->lo_name);
886 
887 	/*
888 	 * If we shouldn't watch this lock, then just clear lo_witness.
889 	 * Otherwise, if witness_cold is set, then it is too early to
890 	 * enroll this lock, so defer it to witness_initialize() by adding
891 	 * it to the pending_locks list.  If it is not too early, then enroll
892 	 * the lock now.
893 	 */
894 	if (witness_watch < 1 || panicstr != NULL ||
895 	    (lock->lo_flags & LO_WITNESS) == 0)
896 		lock->lo_witness = NULL;
897 	else if (witness_cold) {
898 		pending_locks[pending_cnt].wh_lock = lock;
899 		pending_locks[pending_cnt++].wh_type = type;
900 		if (pending_cnt > WITNESS_PENDLIST)
901 			panic("%s: pending locks list is too small, "
902 			    "increase WITNESS_PENDLIST\n",
903 			    __func__);
904 	} else
905 		lock->lo_witness = enroll(type, class);
906 }
907 
908 void
909 witness_destroy(struct lock_object *lock)
910 {
911 	struct lock_class *class;
912 	struct witness *w;
913 
914 	class = LOCK_CLASS(lock);
915 
916 	if (witness_cold)
917 		panic("lock (%s) %s destroyed while witness_cold",
918 		    class->lc_name, lock->lo_name);
919 
920 	/* XXX: need to verify that no one holds the lock */
921 	if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL)
922 		return;
923 	w = lock->lo_witness;
924 
925 	mtx_lock_spin(&w_mtx);
926 	MPASS(w->w_refcount > 0);
927 	w->w_refcount--;
928 
929 	if (w->w_refcount == 0)
930 		depart(w);
931 	mtx_unlock_spin(&w_mtx);
932 }
933 
934 #ifdef DDB
935 static void
936 witness_ddb_compute_levels(void)
937 {
938 	struct witness *w;
939 
940 	/*
941 	 * First clear all levels.
942 	 */
943 	STAILQ_FOREACH(w, &w_all, w_list)
944 		w->w_ddb_level = -1;
945 
946 	/*
947 	 * Look for locks with no parents and level all their descendants.
948 	 */
949 	STAILQ_FOREACH(w, &w_all, w_list) {
950 
951 		/* If the witness has ancestors (is not a root), skip it. */
952 		if (w->w_num_ancestors > 0)
953 			continue;
954 		witness_ddb_level_descendants(w, 0);
955 	}
956 }
957 
958 static void
959 witness_ddb_level_descendants(struct witness *w, int l)
960 {
961 	int i;
962 
963 	if (w->w_ddb_level >= l)
964 		return;
965 
966 	w->w_ddb_level = l;
967 	l++;
968 
969 	for (i = 1; i <= w_max_used_index; i++) {
970 		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
971 			witness_ddb_level_descendants(&w_data[i], l);
972 	}
973 }
974 
975 static void
976 witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...),
977     struct witness *w, int indent)
978 {
979 	int i;
980 
981  	for (i = 0; i < indent; i++)
982  		prnt(" ");
983 	prnt("%s (type: %s, depth: %d, active refs: %d)",
984 	     w->w_name, w->w_class->lc_name,
985 	     w->w_ddb_level, w->w_refcount);
986  	if (w->w_displayed) {
987  		prnt(" -- (already displayed)\n");
988  		return;
989  	}
990  	w->w_displayed = 1;
991 	if (w->w_file != NULL && w->w_line != 0)
992 		prnt(" -- last acquired @ %s:%d\n", fixup_filename(w->w_file),
993 		    w->w_line);
994 	else
995 		prnt(" -- never acquired\n");
996 	indent++;
997 	WITNESS_INDEX_ASSERT(w->w_index);
998 	for (i = 1; i <= w_max_used_index; i++) {
999 		if (db_pager_quit)
1000 			return;
1001 		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
1002 			witness_ddb_display_descendants(prnt, &w_data[i],
1003 			    indent);
1004 	}
1005 }
1006 
1007 static void
1008 witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
1009     struct witness_list *list)
1010 {
1011 	struct witness *w;
1012 
1013 	STAILQ_FOREACH(w, list, w_typelist) {
1014 		if (w->w_file == NULL || w->w_ddb_level > 0)
1015 			continue;
1016 
1017 		/* This lock has no anscestors - display its descendants. */
1018 		witness_ddb_display_descendants(prnt, w, 0);
1019 		if (db_pager_quit)
1020 			return;
1021 	}
1022 }
1023 
1024 static void
1025 witness_ddb_display(int(*prnt)(const char *fmt, ...))
1026 {
1027 	struct witness *w;
1028 
1029 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1030 	witness_ddb_compute_levels();
1031 
1032 	/* Clear all the displayed flags. */
1033 	STAILQ_FOREACH(w, &w_all, w_list)
1034 		w->w_displayed = 0;
1035 
1036 	/*
1037 	 * First, handle sleep locks which have been acquired at least
1038 	 * once.
1039 	 */
1040 	prnt("Sleep locks:\n");
1041 	witness_ddb_display_list(prnt, &w_sleep);
1042 	if (db_pager_quit)
1043 		return;
1044 
1045 	/*
1046 	 * Now do spin locks which have been acquired at least once.
1047 	 */
1048 	prnt("\nSpin locks:\n");
1049 	witness_ddb_display_list(prnt, &w_spin);
1050 	if (db_pager_quit)
1051 		return;
1052 
1053 	/*
1054 	 * Finally, any locks which have not been acquired yet.
1055 	 */
1056 	prnt("\nLocks which were never acquired:\n");
1057 	STAILQ_FOREACH(w, &w_all, w_list) {
1058 		if (w->w_file != NULL || w->w_refcount == 0)
1059 			continue;
1060 		prnt("%s (type: %s, depth: %d)\n", w->w_name,
1061 		    w->w_class->lc_name, w->w_ddb_level);
1062 		if (db_pager_quit)
1063 			return;
1064 	}
1065 }
1066 #endif /* DDB */
1067 
1068 int
1069 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
1070 {
1071 
1072 	if (witness_watch == -1 || panicstr != NULL)
1073 		return (0);
1074 
1075 	/* Require locks that witness knows about. */
1076 	if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
1077 	    lock2->lo_witness == NULL)
1078 		return (EINVAL);
1079 
1080 	mtx_assert(&w_mtx, MA_NOTOWNED);
1081 	mtx_lock_spin(&w_mtx);
1082 
1083 	/*
1084 	 * If we already have either an explicit or implied lock order that
1085 	 * is the other way around, then return an error.
1086 	 */
1087 	if (witness_watch &&
1088 	    isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
1089 		mtx_unlock_spin(&w_mtx);
1090 		return (EDOOFUS);
1091 	}
1092 
1093 	/* Try to add the new order. */
1094 	CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1095 	    lock2->lo_witness->w_name, lock1->lo_witness->w_name);
1096 	itismychild(lock1->lo_witness, lock2->lo_witness);
1097 	mtx_unlock_spin(&w_mtx);
1098 	return (0);
1099 }
1100 
1101 void
1102 witness_checkorder(struct lock_object *lock, int flags, const char *file,
1103     int line, struct lock_object *interlock)
1104 {
1105 	struct lock_list_entry *lock_list, *lle;
1106 	struct lock_instance *lock1, *lock2, *plock;
1107 	struct lock_class *class, *iclass;
1108 	struct witness *w, *w1;
1109 	struct thread *td;
1110 	int i, j;
1111 
1112 	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
1113 	    panicstr != NULL)
1114 		return;
1115 
1116 	w = lock->lo_witness;
1117 	class = LOCK_CLASS(lock);
1118 	td = curthread;
1119 
1120 	if (class->lc_flags & LC_SLEEPLOCK) {
1121 
1122 		/*
1123 		 * Since spin locks include a critical section, this check
1124 		 * implicitly enforces a lock order of all sleep locks before
1125 		 * all spin locks.
1126 		 */
1127 		if (td->td_critnest != 0 && !kdb_active)
1128 			kassert_panic("acquiring blockable sleep lock with "
1129 			    "spinlock or critical section held (%s) %s @ %s:%d",
1130 			    class->lc_name, lock->lo_name,
1131 			    fixup_filename(file), line);
1132 
1133 		/*
1134 		 * If this is the first lock acquired then just return as
1135 		 * no order checking is needed.
1136 		 */
1137 		lock_list = td->td_sleeplocks;
1138 		if (lock_list == NULL || lock_list->ll_count == 0)
1139 			return;
1140 	} else {
1141 
1142 		/*
1143 		 * If this is the first lock, just return as no order
1144 		 * checking is needed.  Avoid problems with thread
1145 		 * migration pinning the thread while checking if
1146 		 * spinlocks are held.  If at least one spinlock is held
1147 		 * the thread is in a safe path and it is allowed to
1148 		 * unpin it.
1149 		 */
1150 		sched_pin();
1151 		lock_list = PCPU_GET(spinlocks);
1152 		if (lock_list == NULL || lock_list->ll_count == 0) {
1153 			sched_unpin();
1154 			return;
1155 		}
1156 		sched_unpin();
1157 	}
1158 
1159 	/*
1160 	 * Check to see if we are recursing on a lock we already own.  If
1161 	 * so, make sure that we don't mismatch exclusive and shared lock
1162 	 * acquires.
1163 	 */
1164 	lock1 = find_instance(lock_list, lock);
1165 	if (lock1 != NULL) {
1166 		if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
1167 		    (flags & LOP_EXCLUSIVE) == 0) {
1168 			witness_output("shared lock of (%s) %s @ %s:%d\n",
1169 			    class->lc_name, lock->lo_name,
1170 			    fixup_filename(file), line);
1171 			witness_output("while exclusively locked from %s:%d\n",
1172 			    fixup_filename(lock1->li_file), lock1->li_line);
1173 			kassert_panic("excl->share");
1174 		}
1175 		if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
1176 		    (flags & LOP_EXCLUSIVE) != 0) {
1177 			witness_output("exclusive lock of (%s) %s @ %s:%d\n",
1178 			    class->lc_name, lock->lo_name,
1179 			    fixup_filename(file), line);
1180 			witness_output("while share locked from %s:%d\n",
1181 			    fixup_filename(lock1->li_file), lock1->li_line);
1182 			kassert_panic("share->excl");
1183 		}
1184 		return;
1185 	}
1186 
1187 	/* Warn if the interlock is not locked exactly once. */
1188 	if (interlock != NULL) {
1189 		iclass = LOCK_CLASS(interlock);
1190 		lock1 = find_instance(lock_list, interlock);
1191 		if (lock1 == NULL)
1192 			kassert_panic("interlock (%s) %s not locked @ %s:%d",
1193 			    iclass->lc_name, interlock->lo_name,
1194 			    fixup_filename(file), line);
1195 		else if ((lock1->li_flags & LI_RECURSEMASK) != 0)
1196 			kassert_panic("interlock (%s) %s recursed @ %s:%d",
1197 			    iclass->lc_name, interlock->lo_name,
1198 			    fixup_filename(file), line);
1199 	}
1200 
1201 	/*
1202 	 * Find the previously acquired lock, but ignore interlocks.
1203 	 */
1204 	plock = &lock_list->ll_children[lock_list->ll_count - 1];
1205 	if (interlock != NULL && plock->li_lock == interlock) {
1206 		if (lock_list->ll_count > 1)
1207 			plock =
1208 			    &lock_list->ll_children[lock_list->ll_count - 2];
1209 		else {
1210 			lle = lock_list->ll_next;
1211 
1212 			/*
1213 			 * The interlock is the only lock we hold, so
1214 			 * simply return.
1215 			 */
1216 			if (lle == NULL)
1217 				return;
1218 			plock = &lle->ll_children[lle->ll_count - 1];
1219 		}
1220 	}
1221 
1222 	/*
1223 	 * Try to perform most checks without a lock.  If this succeeds we
1224 	 * can skip acquiring the lock and return success.  Otherwise we redo
1225 	 * the check with the lock held to handle races with concurrent updates.
1226 	 */
1227 	w1 = plock->li_lock->lo_witness;
1228 	if (witness_lock_order_check(w1, w))
1229 		return;
1230 
1231 	mtx_lock_spin(&w_mtx);
1232 	if (witness_lock_order_check(w1, w)) {
1233 		mtx_unlock_spin(&w_mtx);
1234 		return;
1235 	}
1236 	witness_lock_order_add(w1, w);
1237 
1238 	/*
1239 	 * Check for duplicate locks of the same type.  Note that we only
1240 	 * have to check for this on the last lock we just acquired.  Any
1241 	 * other cases will be caught as lock order violations.
1242 	 */
1243 	if (w1 == w) {
1244 		i = w->w_index;
1245 		if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) &&
1246 		    !(w_rmatrix[i][i] & WITNESS_REVERSAL)) {
1247 		    w_rmatrix[i][i] |= WITNESS_REVERSAL;
1248 			w->w_reversed = 1;
1249 			mtx_unlock_spin(&w_mtx);
1250 			witness_output(
1251 			    "acquiring duplicate lock of same type: \"%s\"\n",
1252 			    w->w_name);
1253 			witness_output(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
1254 			    fixup_filename(plock->li_file), plock->li_line);
1255 			witness_output(" 2nd %s @ %s:%d\n", lock->lo_name,
1256 			    fixup_filename(file), line);
1257 			witness_debugger(1, __func__);
1258 		} else
1259 			mtx_unlock_spin(&w_mtx);
1260 		return;
1261 	}
1262 	mtx_assert(&w_mtx, MA_OWNED);
1263 
1264 	/*
1265 	 * If we know that the lock we are acquiring comes after
1266 	 * the lock we most recently acquired in the lock order tree,
1267 	 * then there is no need for any further checks.
1268 	 */
1269 	if (isitmychild(w1, w))
1270 		goto out;
1271 
1272 	for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
1273 		for (i = lle->ll_count - 1; i >= 0; i--, j++) {
1274 
1275 			MPASS(j < LOCK_CHILDCOUNT * LOCK_NCHILDREN);
1276 			lock1 = &lle->ll_children[i];
1277 
1278 			/*
1279 			 * Ignore the interlock.
1280 			 */
1281 			if (interlock == lock1->li_lock)
1282 				continue;
1283 
1284 			/*
1285 			 * If this lock doesn't undergo witness checking,
1286 			 * then skip it.
1287 			 */
1288 			w1 = lock1->li_lock->lo_witness;
1289 			if (w1 == NULL) {
1290 				KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
1291 				    ("lock missing witness structure"));
1292 				continue;
1293 			}
1294 
1295 			/*
1296 			 * If we are locking Giant and this is a sleepable
1297 			 * lock, then skip it.
1298 			 */
1299 			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
1300 			    lock == &Giant.lock_object)
1301 				continue;
1302 
1303 			/*
1304 			 * If we are locking a sleepable lock and this lock
1305 			 * is Giant, then skip it.
1306 			 */
1307 			if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1308 			    lock1->li_lock == &Giant.lock_object)
1309 				continue;
1310 
1311 			/*
1312 			 * If we are locking a sleepable lock and this lock
1313 			 * isn't sleepable, we want to treat it as a lock
1314 			 * order violation to enfore a general lock order of
1315 			 * sleepable locks before non-sleepable locks.
1316 			 */
1317 			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1318 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1319 				goto reversal;
1320 
1321 			/*
1322 			 * If we are locking Giant and this is a non-sleepable
1323 			 * lock, then treat it as a reversal.
1324 			 */
1325 			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
1326 			    lock == &Giant.lock_object)
1327 				goto reversal;
1328 
1329 			/*
1330 			 * Check the lock order hierarchy for a reveresal.
1331 			 */
1332 			if (!isitmydescendant(w, w1))
1333 				continue;
1334 		reversal:
1335 
1336 			/*
1337 			 * We have a lock order violation, check to see if it
1338 			 * is allowed or has already been yelled about.
1339 			 */
1340 #ifdef BLESSING
1341 
1342 			/*
1343 			 * If the lock order is blessed, just bail.  We don't
1344 			 * look for other lock order violations though, which
1345 			 * may be a bug.
1346 			 */
1347 			if (blessed(w, w1))
1348 				goto out;
1349 #endif
1350 
1351 			/* Bail if this violation is known */
1352 			if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
1353 				goto out;
1354 
1355 			/* Record this as a violation */
1356 			w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL;
1357 			w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL;
1358 			w->w_reversed = w1->w_reversed = 1;
1359 			witness_increment_graph_generation();
1360 			mtx_unlock_spin(&w_mtx);
1361 
1362 #ifdef WITNESS_NO_VNODE
1363 			/*
1364 			 * There are known LORs between VNODE locks. They are
1365 			 * not an indication of a bug. VNODE locks are flagged
1366 			 * as such (LO_IS_VNODE) and we don't yell if the LOR
1367 			 * is between 2 VNODE locks.
1368 			 */
1369 			if ((lock->lo_flags & LO_IS_VNODE) != 0 &&
1370 			    (lock1->li_lock->lo_flags & LO_IS_VNODE) != 0)
1371 				return;
1372 #endif
1373 
1374 			/*
1375 			 * Ok, yell about it.
1376 			 */
1377 			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1378 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1379 				witness_output(
1380 		"lock order reversal: (sleepable after non-sleepable)\n");
1381 			else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1382 			    && lock == &Giant.lock_object)
1383 				witness_output(
1384 		"lock order reversal: (Giant after non-sleepable)\n");
1385 			else
1386 				witness_output("lock order reversal:\n");
1387 
1388 			/*
1389 			 * Try to locate an earlier lock with
1390 			 * witness w in our list.
1391 			 */
1392 			do {
1393 				lock2 = &lle->ll_children[i];
1394 				MPASS(lock2->li_lock != NULL);
1395 				if (lock2->li_lock->lo_witness == w)
1396 					break;
1397 				if (i == 0 && lle->ll_next != NULL) {
1398 					lle = lle->ll_next;
1399 					i = lle->ll_count - 1;
1400 					MPASS(i >= 0 && i < LOCK_NCHILDREN);
1401 				} else
1402 					i--;
1403 			} while (i >= 0);
1404 			if (i < 0) {
1405 				witness_output(" 1st %p %s (%s) @ %s:%d\n",
1406 				    lock1->li_lock, lock1->li_lock->lo_name,
1407 				    w1->w_name, fixup_filename(lock1->li_file),
1408 				    lock1->li_line);
1409 				witness_output(" 2nd %p %s (%s) @ %s:%d\n", lock,
1410 				    lock->lo_name, w->w_name,
1411 				    fixup_filename(file), line);
1412 			} else {
1413 				witness_output(" 1st %p %s (%s) @ %s:%d\n",
1414 				    lock2->li_lock, lock2->li_lock->lo_name,
1415 				    lock2->li_lock->lo_witness->w_name,
1416 				    fixup_filename(lock2->li_file),
1417 				    lock2->li_line);
1418 				witness_output(" 2nd %p %s (%s) @ %s:%d\n",
1419 				    lock1->li_lock, lock1->li_lock->lo_name,
1420 				    w1->w_name, fixup_filename(lock1->li_file),
1421 				    lock1->li_line);
1422 				witness_output(" 3rd %p %s (%s) @ %s:%d\n", lock,
1423 				    lock->lo_name, w->w_name,
1424 				    fixup_filename(file), line);
1425 			}
1426 			witness_debugger(1, __func__);
1427 			return;
1428 		}
1429 	}
1430 
1431 	/*
1432 	 * If requested, build a new lock order.  However, don't build a new
1433 	 * relationship between a sleepable lock and Giant if it is in the
1434 	 * wrong direction.  The correct lock order is that sleepable locks
1435 	 * always come before Giant.
1436 	 */
1437 	if (flags & LOP_NEWORDER &&
1438 	    !(plock->li_lock == &Giant.lock_object &&
1439 	    (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1440 		CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1441 		    w->w_name, plock->li_lock->lo_witness->w_name);
1442 		itismychild(plock->li_lock->lo_witness, w);
1443 	}
1444 out:
1445 	mtx_unlock_spin(&w_mtx);
1446 }
1447 
1448 void
1449 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1450 {
1451 	struct lock_list_entry **lock_list, *lle;
1452 	struct lock_instance *instance;
1453 	struct witness *w;
1454 	struct thread *td;
1455 
1456 	if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
1457 	    panicstr != NULL)
1458 		return;
1459 	w = lock->lo_witness;
1460 	td = curthread;
1461 
1462 	/* Determine lock list for this lock. */
1463 	if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1464 		lock_list = &td->td_sleeplocks;
1465 	else
1466 		lock_list = PCPU_PTR(spinlocks);
1467 
1468 	/* Check to see if we are recursing on a lock we already own. */
1469 	instance = find_instance(*lock_list, lock);
1470 	if (instance != NULL) {
1471 		instance->li_flags++;
1472 		CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1473 		    td->td_proc->p_pid, lock->lo_name,
1474 		    instance->li_flags & LI_RECURSEMASK);
1475 		instance->li_file = file;
1476 		instance->li_line = line;
1477 		return;
1478 	}
1479 
1480 	/* Update per-witness last file and line acquire. */
1481 	w->w_file = file;
1482 	w->w_line = line;
1483 
1484 	/* Find the next open lock instance in the list and fill it. */
1485 	lle = *lock_list;
1486 	if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1487 		lle = witness_lock_list_get();
1488 		if (lle == NULL)
1489 			return;
1490 		lle->ll_next = *lock_list;
1491 		CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1492 		    td->td_proc->p_pid, lle);
1493 		*lock_list = lle;
1494 	}
1495 	instance = &lle->ll_children[lle->ll_count++];
1496 	instance->li_lock = lock;
1497 	instance->li_line = line;
1498 	instance->li_file = file;
1499 	if ((flags & LOP_EXCLUSIVE) != 0)
1500 		instance->li_flags = LI_EXCLUSIVE;
1501 	else
1502 		instance->li_flags = 0;
1503 	CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1504 	    td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1505 }
1506 
1507 void
1508 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1509 {
1510 	struct lock_instance *instance;
1511 	struct lock_class *class;
1512 
1513 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1514 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1515 		return;
1516 	class = LOCK_CLASS(lock);
1517 	if (witness_watch) {
1518 		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1519 			kassert_panic(
1520 			    "upgrade of non-upgradable lock (%s) %s @ %s:%d",
1521 			    class->lc_name, lock->lo_name,
1522 			    fixup_filename(file), line);
1523 		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1524 			kassert_panic(
1525 			    "upgrade of non-sleep lock (%s) %s @ %s:%d",
1526 			    class->lc_name, lock->lo_name,
1527 			    fixup_filename(file), line);
1528 	}
1529 	instance = find_instance(curthread->td_sleeplocks, lock);
1530 	if (instance == NULL) {
1531 		kassert_panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1532 		    class->lc_name, lock->lo_name,
1533 		    fixup_filename(file), line);
1534 		return;
1535 	}
1536 	if (witness_watch) {
1537 		if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1538 			kassert_panic(
1539 			    "upgrade of exclusive lock (%s) %s @ %s:%d",
1540 			    class->lc_name, lock->lo_name,
1541 			    fixup_filename(file), line);
1542 		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1543 			kassert_panic(
1544 			    "upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1545 			    class->lc_name, lock->lo_name,
1546 			    instance->li_flags & LI_RECURSEMASK,
1547 			    fixup_filename(file), line);
1548 	}
1549 	instance->li_flags |= LI_EXCLUSIVE;
1550 }
1551 
1552 void
1553 witness_downgrade(struct lock_object *lock, int flags, const char *file,
1554     int line)
1555 {
1556 	struct lock_instance *instance;
1557 	struct lock_class *class;
1558 
1559 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1560 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1561 		return;
1562 	class = LOCK_CLASS(lock);
1563 	if (witness_watch) {
1564 		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1565 			kassert_panic(
1566 			    "downgrade of non-upgradable lock (%s) %s @ %s:%d",
1567 			    class->lc_name, lock->lo_name,
1568 			    fixup_filename(file), line);
1569 		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1570 			kassert_panic(
1571 			    "downgrade of non-sleep lock (%s) %s @ %s:%d",
1572 			    class->lc_name, lock->lo_name,
1573 			    fixup_filename(file), line);
1574 	}
1575 	instance = find_instance(curthread->td_sleeplocks, lock);
1576 	if (instance == NULL) {
1577 		kassert_panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1578 		    class->lc_name, lock->lo_name,
1579 		    fixup_filename(file), line);
1580 		return;
1581 	}
1582 	if (witness_watch) {
1583 		if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1584 			kassert_panic(
1585 			    "downgrade of shared lock (%s) %s @ %s:%d",
1586 			    class->lc_name, lock->lo_name,
1587 			    fixup_filename(file), line);
1588 		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1589 			kassert_panic(
1590 			    "downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1591 			    class->lc_name, lock->lo_name,
1592 			    instance->li_flags & LI_RECURSEMASK,
1593 			    fixup_filename(file), line);
1594 	}
1595 	instance->li_flags &= ~LI_EXCLUSIVE;
1596 }
1597 
1598 void
1599 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1600 {
1601 	struct lock_list_entry **lock_list, *lle;
1602 	struct lock_instance *instance;
1603 	struct lock_class *class;
1604 	struct thread *td;
1605 	register_t s;
1606 	int i, j;
1607 
1608 	if (witness_cold || lock->lo_witness == NULL || panicstr != NULL)
1609 		return;
1610 	td = curthread;
1611 	class = LOCK_CLASS(lock);
1612 
1613 	/* Find lock instance associated with this lock. */
1614 	if (class->lc_flags & LC_SLEEPLOCK)
1615 		lock_list = &td->td_sleeplocks;
1616 	else
1617 		lock_list = PCPU_PTR(spinlocks);
1618 	lle = *lock_list;
1619 	for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1620 		for (i = 0; i < (*lock_list)->ll_count; i++) {
1621 			instance = &(*lock_list)->ll_children[i];
1622 			if (instance->li_lock == lock)
1623 				goto found;
1624 		}
1625 
1626 	/*
1627 	 * When disabling WITNESS through witness_watch we could end up in
1628 	 * having registered locks in the td_sleeplocks queue.
1629 	 * We have to make sure we flush these queues, so just search for
1630 	 * eventual register locks and remove them.
1631 	 */
1632 	if (witness_watch > 0) {
1633 		kassert_panic("lock (%s) %s not locked @ %s:%d", class->lc_name,
1634 		    lock->lo_name, fixup_filename(file), line);
1635 		return;
1636 	} else {
1637 		return;
1638 	}
1639 found:
1640 
1641 	/* First, check for shared/exclusive mismatches. */
1642 	if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 &&
1643 	    (flags & LOP_EXCLUSIVE) == 0) {
1644 		witness_output("shared unlock of (%s) %s @ %s:%d\n",
1645 		    class->lc_name, lock->lo_name, fixup_filename(file), line);
1646 		witness_output("while exclusively locked from %s:%d\n",
1647 		    fixup_filename(instance->li_file), instance->li_line);
1648 		kassert_panic("excl->ushare");
1649 	}
1650 	if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 &&
1651 	    (flags & LOP_EXCLUSIVE) != 0) {
1652 		witness_output("exclusive unlock of (%s) %s @ %s:%d\n",
1653 		    class->lc_name, lock->lo_name, fixup_filename(file), line);
1654 		witness_output("while share locked from %s:%d\n",
1655 		    fixup_filename(instance->li_file),
1656 		    instance->li_line);
1657 		kassert_panic("share->uexcl");
1658 	}
1659 	/* If we are recursed, unrecurse. */
1660 	if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1661 		CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1662 		    td->td_proc->p_pid, instance->li_lock->lo_name,
1663 		    instance->li_flags);
1664 		instance->li_flags--;
1665 		return;
1666 	}
1667 	/* The lock is now being dropped, check for NORELEASE flag */
1668 	if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) {
1669 		witness_output("forbidden unlock of (%s) %s @ %s:%d\n",
1670 		    class->lc_name, lock->lo_name, fixup_filename(file), line);
1671 		kassert_panic("lock marked norelease");
1672 	}
1673 
1674 	/* Otherwise, remove this item from the list. */
1675 	s = intr_disable();
1676 	CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1677 	    td->td_proc->p_pid, instance->li_lock->lo_name,
1678 	    (*lock_list)->ll_count - 1);
1679 	for (j = i; j < (*lock_list)->ll_count - 1; j++)
1680 		(*lock_list)->ll_children[j] =
1681 		    (*lock_list)->ll_children[j + 1];
1682 	(*lock_list)->ll_count--;
1683 	intr_restore(s);
1684 
1685 	/*
1686 	 * In order to reduce contention on w_mtx, we want to keep always an
1687 	 * head object into lists so that frequent allocation from the
1688 	 * free witness pool (and subsequent locking) is avoided.
1689 	 * In order to maintain the current code simple, when the head
1690 	 * object is totally unloaded it means also that we do not have
1691 	 * further objects in the list, so the list ownership needs to be
1692 	 * hand over to another object if the current head needs to be freed.
1693 	 */
1694 	if ((*lock_list)->ll_count == 0) {
1695 		if (*lock_list == lle) {
1696 			if (lle->ll_next == NULL)
1697 				return;
1698 		} else
1699 			lle = *lock_list;
1700 		*lock_list = lle->ll_next;
1701 		CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1702 		    td->td_proc->p_pid, lle);
1703 		witness_lock_list_free(lle);
1704 	}
1705 }
1706 
1707 void
1708 witness_thread_exit(struct thread *td)
1709 {
1710 	struct lock_list_entry *lle;
1711 	int i, n;
1712 
1713 	lle = td->td_sleeplocks;
1714 	if (lle == NULL || panicstr != NULL)
1715 		return;
1716 	if (lle->ll_count != 0) {
1717 		for (n = 0; lle != NULL; lle = lle->ll_next)
1718 			for (i = lle->ll_count - 1; i >= 0; i--) {
1719 				if (n == 0)
1720 					witness_output(
1721 		    "Thread %p exiting with the following locks held:\n", td);
1722 				n++;
1723 				witness_list_lock(&lle->ll_children[i],
1724 				    witness_output);
1725 
1726 			}
1727 		kassert_panic(
1728 		    "Thread %p cannot exit while holding sleeplocks\n", td);
1729 	}
1730 	witness_lock_list_free(lle);
1731 }
1732 
1733 /*
1734  * Warn if any locks other than 'lock' are held.  Flags can be passed in to
1735  * exempt Giant and sleepable locks from the checks as well.  If any
1736  * non-exempt locks are held, then a supplied message is printed to the
1737  * output channel along with a list of the offending locks.  If indicated in the
1738  * flags then a failure results in a panic as well.
1739  */
1740 int
1741 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1742 {
1743 	struct lock_list_entry *lock_list, *lle;
1744 	struct lock_instance *lock1;
1745 	struct thread *td;
1746 	va_list ap;
1747 	int i, n;
1748 
1749 	if (witness_cold || witness_watch < 1 || panicstr != NULL)
1750 		return (0);
1751 	n = 0;
1752 	td = curthread;
1753 	for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1754 		for (i = lle->ll_count - 1; i >= 0; i--) {
1755 			lock1 = &lle->ll_children[i];
1756 			if (lock1->li_lock == lock)
1757 				continue;
1758 			if (flags & WARN_GIANTOK &&
1759 			    lock1->li_lock == &Giant.lock_object)
1760 				continue;
1761 			if (flags & WARN_SLEEPOK &&
1762 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1763 				continue;
1764 			if (n == 0) {
1765 				va_start(ap, fmt);
1766 				vprintf(fmt, ap);
1767 				va_end(ap);
1768 				printf(" with the following %slocks held:\n",
1769 				    (flags & WARN_SLEEPOK) != 0 ?
1770 				    "non-sleepable " : "");
1771 			}
1772 			n++;
1773 			witness_list_lock(lock1, printf);
1774 		}
1775 
1776 	/*
1777 	 * Pin the thread in order to avoid problems with thread migration.
1778 	 * Once that all verifies are passed about spinlocks ownership,
1779 	 * the thread is in a safe path and it can be unpinned.
1780 	 */
1781 	sched_pin();
1782 	lock_list = PCPU_GET(spinlocks);
1783 	if (lock_list != NULL && lock_list->ll_count != 0) {
1784 		sched_unpin();
1785 
1786 		/*
1787 		 * We should only have one spinlock and as long as
1788 		 * the flags cannot match for this locks class,
1789 		 * check if the first spinlock is the one curthread
1790 		 * should hold.
1791 		 */
1792 		lock1 = &lock_list->ll_children[lock_list->ll_count - 1];
1793 		if (lock_list->ll_count == 1 && lock_list->ll_next == NULL &&
1794 		    lock1->li_lock == lock && n == 0)
1795 			return (0);
1796 
1797 		va_start(ap, fmt);
1798 		vprintf(fmt, ap);
1799 		va_end(ap);
1800 		printf(" with the following %slocks held:\n",
1801 		    (flags & WARN_SLEEPOK) != 0 ?  "non-sleepable " : "");
1802 		n += witness_list_locks(&lock_list, printf);
1803 	} else
1804 		sched_unpin();
1805 	if (flags & WARN_PANIC && n)
1806 		kassert_panic("%s", __func__);
1807 	else
1808 		witness_debugger(n, __func__);
1809 	return (n);
1810 }
1811 
1812 const char *
1813 witness_file(struct lock_object *lock)
1814 {
1815 	struct witness *w;
1816 
1817 	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1818 		return ("?");
1819 	w = lock->lo_witness;
1820 	return (w->w_file);
1821 }
1822 
1823 int
1824 witness_line(struct lock_object *lock)
1825 {
1826 	struct witness *w;
1827 
1828 	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1829 		return (0);
1830 	w = lock->lo_witness;
1831 	return (w->w_line);
1832 }
1833 
1834 static struct witness *
1835 enroll(const char *description, struct lock_class *lock_class)
1836 {
1837 	struct witness *w;
1838 
1839 	MPASS(description != NULL);
1840 
1841 	if (witness_watch == -1 || panicstr != NULL)
1842 		return (NULL);
1843 	if ((lock_class->lc_flags & LC_SPINLOCK)) {
1844 		if (witness_skipspin)
1845 			return (NULL);
1846 	} else if ((lock_class->lc_flags & LC_SLEEPLOCK) == 0) {
1847 		kassert_panic("lock class %s is not sleep or spin",
1848 		    lock_class->lc_name);
1849 		return (NULL);
1850 	}
1851 
1852 	mtx_lock_spin(&w_mtx);
1853 	w = witness_hash_get(description);
1854 	if (w)
1855 		goto found;
1856 	if ((w = witness_get()) == NULL)
1857 		return (NULL);
1858 	MPASS(strlen(description) < MAX_W_NAME);
1859 	strcpy(w->w_name, description);
1860 	w->w_class = lock_class;
1861 	w->w_refcount = 1;
1862 	STAILQ_INSERT_HEAD(&w_all, w, w_list);
1863 	if (lock_class->lc_flags & LC_SPINLOCK) {
1864 		STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1865 		w_spin_cnt++;
1866 	} else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1867 		STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1868 		w_sleep_cnt++;
1869 	}
1870 
1871 	/* Insert new witness into the hash */
1872 	witness_hash_put(w);
1873 	witness_increment_graph_generation();
1874 	mtx_unlock_spin(&w_mtx);
1875 	return (w);
1876 found:
1877 	w->w_refcount++;
1878 	if (w->w_refcount == 1)
1879 		w->w_class = lock_class;
1880 	mtx_unlock_spin(&w_mtx);
1881 	if (lock_class != w->w_class)
1882 		kassert_panic(
1883 		    "lock (%s) %s does not match earlier (%s) lock",
1884 		    description, lock_class->lc_name,
1885 		    w->w_class->lc_name);
1886 	return (w);
1887 }
1888 
1889 static void
1890 depart(struct witness *w)
1891 {
1892 
1893 	MPASS(w->w_refcount == 0);
1894 	if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1895 		w_sleep_cnt--;
1896 	} else {
1897 		w_spin_cnt--;
1898 	}
1899 	/*
1900 	 * Set file to NULL as it may point into a loadable module.
1901 	 */
1902 	w->w_file = NULL;
1903 	w->w_line = 0;
1904 	witness_increment_graph_generation();
1905 }
1906 
1907 
1908 static void
1909 adopt(struct witness *parent, struct witness *child)
1910 {
1911 	int pi, ci, i, j;
1912 
1913 	if (witness_cold == 0)
1914 		mtx_assert(&w_mtx, MA_OWNED);
1915 
1916 	/* If the relationship is already known, there's no work to be done. */
1917 	if (isitmychild(parent, child))
1918 		return;
1919 
1920 	/* When the structure of the graph changes, bump up the generation. */
1921 	witness_increment_graph_generation();
1922 
1923 	/*
1924 	 * The hard part ... create the direct relationship, then propagate all
1925 	 * indirect relationships.
1926 	 */
1927 	pi = parent->w_index;
1928 	ci = child->w_index;
1929 	WITNESS_INDEX_ASSERT(pi);
1930 	WITNESS_INDEX_ASSERT(ci);
1931 	MPASS(pi != ci);
1932 	w_rmatrix[pi][ci] |= WITNESS_PARENT;
1933 	w_rmatrix[ci][pi] |= WITNESS_CHILD;
1934 
1935 	/*
1936 	 * If parent was not already an ancestor of child,
1937 	 * then we increment the descendant and ancestor counters.
1938 	 */
1939 	if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) {
1940 		parent->w_num_descendants++;
1941 		child->w_num_ancestors++;
1942 	}
1943 
1944 	/*
1945 	 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
1946 	 * an ancestor of 'pi' during this loop.
1947 	 */
1948 	for (i = 1; i <= w_max_used_index; i++) {
1949 		if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
1950 		    (i != pi))
1951 			continue;
1952 
1953 		/* Find each descendant of 'i' and mark it as a descendant. */
1954 		for (j = 1; j <= w_max_used_index; j++) {
1955 
1956 			/*
1957 			 * Skip children that are already marked as
1958 			 * descendants of 'i'.
1959 			 */
1960 			if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK)
1961 				continue;
1962 
1963 			/*
1964 			 * We are only interested in descendants of 'ci'. Note
1965 			 * that 'ci' itself is counted as a descendant of 'ci'.
1966 			 */
1967 			if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
1968 			    (j != ci))
1969 				continue;
1970 			w_rmatrix[i][j] |= WITNESS_ANCESTOR;
1971 			w_rmatrix[j][i] |= WITNESS_DESCENDANT;
1972 			w_data[i].w_num_descendants++;
1973 			w_data[j].w_num_ancestors++;
1974 
1975 			/*
1976 			 * Make sure we aren't marking a node as both an
1977 			 * ancestor and descendant. We should have caught
1978 			 * this as a lock order reversal earlier.
1979 			 */
1980 			if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
1981 			    (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
1982 				printf("witness rmatrix paradox! [%d][%d]=%d "
1983 				    "both ancestor and descendant\n",
1984 				    i, j, w_rmatrix[i][j]);
1985 				kdb_backtrace();
1986 				printf("Witness disabled.\n");
1987 				witness_watch = -1;
1988 			}
1989 			if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) &&
1990 			    (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
1991 				printf("witness rmatrix paradox! [%d][%d]=%d "
1992 				    "both ancestor and descendant\n",
1993 				    j, i, w_rmatrix[j][i]);
1994 				kdb_backtrace();
1995 				printf("Witness disabled.\n");
1996 				witness_watch = -1;
1997 			}
1998 		}
1999 	}
2000 }
2001 
2002 static void
2003 itismychild(struct witness *parent, struct witness *child)
2004 {
2005 	int unlocked;
2006 
2007 	MPASS(child != NULL && parent != NULL);
2008 	if (witness_cold == 0)
2009 		mtx_assert(&w_mtx, MA_OWNED);
2010 
2011 	if (!witness_lock_type_equal(parent, child)) {
2012 		if (witness_cold == 0) {
2013 			unlocked = 1;
2014 			mtx_unlock_spin(&w_mtx);
2015 		} else {
2016 			unlocked = 0;
2017 		}
2018 		kassert_panic(
2019 		    "%s: parent \"%s\" (%s) and child \"%s\" (%s) are not "
2020 		    "the same lock type", __func__, parent->w_name,
2021 		    parent->w_class->lc_name, child->w_name,
2022 		    child->w_class->lc_name);
2023 		if (unlocked)
2024 			mtx_lock_spin(&w_mtx);
2025 	}
2026 	adopt(parent, child);
2027 }
2028 
2029 /*
2030  * Generic code for the isitmy*() functions. The rmask parameter is the
2031  * expected relationship of w1 to w2.
2032  */
2033 static int
2034 _isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
2035 {
2036 	unsigned char r1, r2;
2037 	int i1, i2;
2038 
2039 	i1 = w1->w_index;
2040 	i2 = w2->w_index;
2041 	WITNESS_INDEX_ASSERT(i1);
2042 	WITNESS_INDEX_ASSERT(i2);
2043 	r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK;
2044 	r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK;
2045 
2046 	/* The flags on one better be the inverse of the flags on the other */
2047 	if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) ||
2048 	    (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) {
2049 		/* Don't squawk if we're potentially racing with an update. */
2050 		if (!mtx_owned(&w_mtx))
2051 			return (0);
2052 		printf("%s: rmatrix mismatch between %s (index %d) and %s "
2053 		    "(index %d): w_rmatrix[%d][%d] == %hhx but "
2054 		    "w_rmatrix[%d][%d] == %hhx\n",
2055 		    fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1,
2056 		    i2, i1, r2);
2057 		kdb_backtrace();
2058 		printf("Witness disabled.\n");
2059 		witness_watch = -1;
2060 	}
2061 	return (r1 & rmask);
2062 }
2063 
2064 /*
2065  * Checks if @child is a direct child of @parent.
2066  */
2067 static int
2068 isitmychild(struct witness *parent, struct witness *child)
2069 {
2070 
2071 	return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
2072 }
2073 
2074 /*
2075  * Checks if @descendant is a direct or inderect descendant of @ancestor.
2076  */
2077 static int
2078 isitmydescendant(struct witness *ancestor, struct witness *descendant)
2079 {
2080 
2081 	return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
2082 	    __func__));
2083 }
2084 
2085 #ifdef BLESSING
2086 static int
2087 blessed(struct witness *w1, struct witness *w2)
2088 {
2089 	int i;
2090 	struct witness_blessed *b;
2091 
2092 	for (i = 0; i < nitems(blessed_list); i++) {
2093 		b = &blessed_list[i];
2094 		if (strcmp(w1->w_name, b->b_lock1) == 0) {
2095 			if (strcmp(w2->w_name, b->b_lock2) == 0)
2096 				return (1);
2097 			continue;
2098 		}
2099 		if (strcmp(w1->w_name, b->b_lock2) == 0)
2100 			if (strcmp(w2->w_name, b->b_lock1) == 0)
2101 				return (1);
2102 	}
2103 	return (0);
2104 }
2105 #endif
2106 
2107 static struct witness *
2108 witness_get(void)
2109 {
2110 	struct witness *w;
2111 	int index;
2112 
2113 	if (witness_cold == 0)
2114 		mtx_assert(&w_mtx, MA_OWNED);
2115 
2116 	if (witness_watch == -1) {
2117 		mtx_unlock_spin(&w_mtx);
2118 		return (NULL);
2119 	}
2120 	if (STAILQ_EMPTY(&w_free)) {
2121 		witness_watch = -1;
2122 		mtx_unlock_spin(&w_mtx);
2123 		printf("WITNESS: unable to allocate a new witness object\n");
2124 		return (NULL);
2125 	}
2126 	w = STAILQ_FIRST(&w_free);
2127 	STAILQ_REMOVE_HEAD(&w_free, w_list);
2128 	w_free_cnt--;
2129 	index = w->w_index;
2130 	MPASS(index > 0 && index == w_max_used_index+1 &&
2131 	    index < witness_count);
2132 	bzero(w, sizeof(*w));
2133 	w->w_index = index;
2134 	if (index > w_max_used_index)
2135 		w_max_used_index = index;
2136 	return (w);
2137 }
2138 
2139 static void
2140 witness_free(struct witness *w)
2141 {
2142 
2143 	STAILQ_INSERT_HEAD(&w_free, w, w_list);
2144 	w_free_cnt++;
2145 }
2146 
2147 static struct lock_list_entry *
2148 witness_lock_list_get(void)
2149 {
2150 	struct lock_list_entry *lle;
2151 
2152 	if (witness_watch == -1)
2153 		return (NULL);
2154 	mtx_lock_spin(&w_mtx);
2155 	lle = w_lock_list_free;
2156 	if (lle == NULL) {
2157 		witness_watch = -1;
2158 		mtx_unlock_spin(&w_mtx);
2159 		printf("%s: witness exhausted\n", __func__);
2160 		return (NULL);
2161 	}
2162 	w_lock_list_free = lle->ll_next;
2163 	mtx_unlock_spin(&w_mtx);
2164 	bzero(lle, sizeof(*lle));
2165 	return (lle);
2166 }
2167 
2168 static void
2169 witness_lock_list_free(struct lock_list_entry *lle)
2170 {
2171 
2172 	mtx_lock_spin(&w_mtx);
2173 	lle->ll_next = w_lock_list_free;
2174 	w_lock_list_free = lle;
2175 	mtx_unlock_spin(&w_mtx);
2176 }
2177 
2178 static struct lock_instance *
2179 find_instance(struct lock_list_entry *list, const struct lock_object *lock)
2180 {
2181 	struct lock_list_entry *lle;
2182 	struct lock_instance *instance;
2183 	int i;
2184 
2185 	for (lle = list; lle != NULL; lle = lle->ll_next)
2186 		for (i = lle->ll_count - 1; i >= 0; i--) {
2187 			instance = &lle->ll_children[i];
2188 			if (instance->li_lock == lock)
2189 				return (instance);
2190 		}
2191 	return (NULL);
2192 }
2193 
2194 static void
2195 witness_list_lock(struct lock_instance *instance,
2196     int (*prnt)(const char *fmt, ...))
2197 {
2198 	struct lock_object *lock;
2199 
2200 	lock = instance->li_lock;
2201 	prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2202 	    "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2203 	if (lock->lo_witness->w_name != lock->lo_name)
2204 		prnt(" (%s)", lock->lo_witness->w_name);
2205 	prnt(" r = %d (%p) locked @ %s:%d\n",
2206 	    instance->li_flags & LI_RECURSEMASK, lock,
2207 	    fixup_filename(instance->li_file), instance->li_line);
2208 }
2209 
2210 static int
2211 witness_output(const char *fmt, ...)
2212 {
2213 	va_list ap;
2214 	int ret;
2215 
2216 	va_start(ap, fmt);
2217 	ret = witness_voutput(fmt, ap);
2218 	va_end(ap);
2219 	return (ret);
2220 }
2221 
2222 static int
2223 witness_voutput(const char *fmt, va_list ap)
2224 {
2225 	int ret;
2226 
2227 	ret = 0;
2228 	switch (witness_channel) {
2229 	case WITNESS_CONSOLE:
2230 		ret = vprintf(fmt, ap);
2231 		break;
2232 	case WITNESS_LOG:
2233 		vlog(LOG_NOTICE, fmt, ap);
2234 		break;
2235 	case WITNESS_NONE:
2236 		break;
2237 	}
2238 	return (ret);
2239 }
2240 
2241 #ifdef DDB
2242 static int
2243 witness_thread_has_locks(struct thread *td)
2244 {
2245 
2246 	if (td->td_sleeplocks == NULL)
2247 		return (0);
2248 	return (td->td_sleeplocks->ll_count != 0);
2249 }
2250 
2251 static int
2252 witness_proc_has_locks(struct proc *p)
2253 {
2254 	struct thread *td;
2255 
2256 	FOREACH_THREAD_IN_PROC(p, td) {
2257 		if (witness_thread_has_locks(td))
2258 			return (1);
2259 	}
2260 	return (0);
2261 }
2262 #endif
2263 
2264 int
2265 witness_list_locks(struct lock_list_entry **lock_list,
2266     int (*prnt)(const char *fmt, ...))
2267 {
2268 	struct lock_list_entry *lle;
2269 	int i, nheld;
2270 
2271 	nheld = 0;
2272 	for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2273 		for (i = lle->ll_count - 1; i >= 0; i--) {
2274 			witness_list_lock(&lle->ll_children[i], prnt);
2275 			nheld++;
2276 		}
2277 	return (nheld);
2278 }
2279 
2280 /*
2281  * This is a bit risky at best.  We call this function when we have timed
2282  * out acquiring a spin lock, and we assume that the other CPU is stuck
2283  * with this lock held.  So, we go groveling around in the other CPU's
2284  * per-cpu data to try to find the lock instance for this spin lock to
2285  * see when it was last acquired.
2286  */
2287 void
2288 witness_display_spinlock(struct lock_object *lock, struct thread *owner,
2289     int (*prnt)(const char *fmt, ...))
2290 {
2291 	struct lock_instance *instance;
2292 	struct pcpu *pc;
2293 
2294 	if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2295 		return;
2296 	pc = pcpu_find(owner->td_oncpu);
2297 	instance = find_instance(pc->pc_spinlocks, lock);
2298 	if (instance != NULL)
2299 		witness_list_lock(instance, prnt);
2300 }
2301 
2302 void
2303 witness_save(struct lock_object *lock, const char **filep, int *linep)
2304 {
2305 	struct lock_list_entry *lock_list;
2306 	struct lock_instance *instance;
2307 	struct lock_class *class;
2308 
2309 	/*
2310 	 * This function is used independently in locking code to deal with
2311 	 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2312 	 * is gone.
2313 	 */
2314 	if (SCHEDULER_STOPPED())
2315 		return;
2316 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2317 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2318 		return;
2319 	class = LOCK_CLASS(lock);
2320 	if (class->lc_flags & LC_SLEEPLOCK)
2321 		lock_list = curthread->td_sleeplocks;
2322 	else {
2323 		if (witness_skipspin)
2324 			return;
2325 		lock_list = PCPU_GET(spinlocks);
2326 	}
2327 	instance = find_instance(lock_list, lock);
2328 	if (instance == NULL) {
2329 		kassert_panic("%s: lock (%s) %s not locked", __func__,
2330 		    class->lc_name, lock->lo_name);
2331 		return;
2332 	}
2333 	*filep = instance->li_file;
2334 	*linep = instance->li_line;
2335 }
2336 
2337 void
2338 witness_restore(struct lock_object *lock, const char *file, int line)
2339 {
2340 	struct lock_list_entry *lock_list;
2341 	struct lock_instance *instance;
2342 	struct lock_class *class;
2343 
2344 	/*
2345 	 * This function is used independently in locking code to deal with
2346 	 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2347 	 * is gone.
2348 	 */
2349 	if (SCHEDULER_STOPPED())
2350 		return;
2351 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2352 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2353 		return;
2354 	class = LOCK_CLASS(lock);
2355 	if (class->lc_flags & LC_SLEEPLOCK)
2356 		lock_list = curthread->td_sleeplocks;
2357 	else {
2358 		if (witness_skipspin)
2359 			return;
2360 		lock_list = PCPU_GET(spinlocks);
2361 	}
2362 	instance = find_instance(lock_list, lock);
2363 	if (instance == NULL)
2364 		kassert_panic("%s: lock (%s) %s not locked", __func__,
2365 		    class->lc_name, lock->lo_name);
2366 	lock->lo_witness->w_file = file;
2367 	lock->lo_witness->w_line = line;
2368 	if (instance == NULL)
2369 		return;
2370 	instance->li_file = file;
2371 	instance->li_line = line;
2372 }
2373 
2374 void
2375 witness_assert(const struct lock_object *lock, int flags, const char *file,
2376     int line)
2377 {
2378 #ifdef INVARIANT_SUPPORT
2379 	struct lock_instance *instance;
2380 	struct lock_class *class;
2381 
2382 	if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL)
2383 		return;
2384 	class = LOCK_CLASS(lock);
2385 	if ((class->lc_flags & LC_SLEEPLOCK) != 0)
2386 		instance = find_instance(curthread->td_sleeplocks, lock);
2387 	else if ((class->lc_flags & LC_SPINLOCK) != 0)
2388 		instance = find_instance(PCPU_GET(spinlocks), lock);
2389 	else {
2390 		kassert_panic("Lock (%s) %s is not sleep or spin!",
2391 		    class->lc_name, lock->lo_name);
2392 		return;
2393 	}
2394 	switch (flags) {
2395 	case LA_UNLOCKED:
2396 		if (instance != NULL)
2397 			kassert_panic("Lock (%s) %s locked @ %s:%d.",
2398 			    class->lc_name, lock->lo_name,
2399 			    fixup_filename(file), line);
2400 		break;
2401 	case LA_LOCKED:
2402 	case LA_LOCKED | LA_RECURSED:
2403 	case LA_LOCKED | LA_NOTRECURSED:
2404 	case LA_SLOCKED:
2405 	case LA_SLOCKED | LA_RECURSED:
2406 	case LA_SLOCKED | LA_NOTRECURSED:
2407 	case LA_XLOCKED:
2408 	case LA_XLOCKED | LA_RECURSED:
2409 	case LA_XLOCKED | LA_NOTRECURSED:
2410 		if (instance == NULL) {
2411 			kassert_panic("Lock (%s) %s not locked @ %s:%d.",
2412 			    class->lc_name, lock->lo_name,
2413 			    fixup_filename(file), line);
2414 			break;
2415 		}
2416 		if ((flags & LA_XLOCKED) != 0 &&
2417 		    (instance->li_flags & LI_EXCLUSIVE) == 0)
2418 			kassert_panic(
2419 			    "Lock (%s) %s not exclusively locked @ %s:%d.",
2420 			    class->lc_name, lock->lo_name,
2421 			    fixup_filename(file), line);
2422 		if ((flags & LA_SLOCKED) != 0 &&
2423 		    (instance->li_flags & LI_EXCLUSIVE) != 0)
2424 			kassert_panic(
2425 			    "Lock (%s) %s exclusively locked @ %s:%d.",
2426 			    class->lc_name, lock->lo_name,
2427 			    fixup_filename(file), line);
2428 		if ((flags & LA_RECURSED) != 0 &&
2429 		    (instance->li_flags & LI_RECURSEMASK) == 0)
2430 			kassert_panic("Lock (%s) %s not recursed @ %s:%d.",
2431 			    class->lc_name, lock->lo_name,
2432 			    fixup_filename(file), line);
2433 		if ((flags & LA_NOTRECURSED) != 0 &&
2434 		    (instance->li_flags & LI_RECURSEMASK) != 0)
2435 			kassert_panic("Lock (%s) %s recursed @ %s:%d.",
2436 			    class->lc_name, lock->lo_name,
2437 			    fixup_filename(file), line);
2438 		break;
2439 	default:
2440 		kassert_panic("Invalid lock assertion at %s:%d.",
2441 		    fixup_filename(file), line);
2442 
2443 	}
2444 #endif	/* INVARIANT_SUPPORT */
2445 }
2446 
2447 static void
2448 witness_setflag(struct lock_object *lock, int flag, int set)
2449 {
2450 	struct lock_list_entry *lock_list;
2451 	struct lock_instance *instance;
2452 	struct lock_class *class;
2453 
2454 	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2455 		return;
2456 	class = LOCK_CLASS(lock);
2457 	if (class->lc_flags & LC_SLEEPLOCK)
2458 		lock_list = curthread->td_sleeplocks;
2459 	else {
2460 		if (witness_skipspin)
2461 			return;
2462 		lock_list = PCPU_GET(spinlocks);
2463 	}
2464 	instance = find_instance(lock_list, lock);
2465 	if (instance == NULL) {
2466 		kassert_panic("%s: lock (%s) %s not locked", __func__,
2467 		    class->lc_name, lock->lo_name);
2468 		return;
2469 	}
2470 
2471 	if (set)
2472 		instance->li_flags |= flag;
2473 	else
2474 		instance->li_flags &= ~flag;
2475 }
2476 
2477 void
2478 witness_norelease(struct lock_object *lock)
2479 {
2480 
2481 	witness_setflag(lock, LI_NORELEASE, 1);
2482 }
2483 
2484 void
2485 witness_releaseok(struct lock_object *lock)
2486 {
2487 
2488 	witness_setflag(lock, LI_NORELEASE, 0);
2489 }
2490 
2491 #ifdef DDB
2492 static void
2493 witness_ddb_list(struct thread *td)
2494 {
2495 
2496 	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2497 	KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2498 
2499 	if (witness_watch < 1)
2500 		return;
2501 
2502 	witness_list_locks(&td->td_sleeplocks, db_printf);
2503 
2504 	/*
2505 	 * We only handle spinlocks if td == curthread.  This is somewhat broken
2506 	 * if td is currently executing on some other CPU and holds spin locks
2507 	 * as we won't display those locks.  If we had a MI way of getting
2508 	 * the per-cpu data for a given cpu then we could use
2509 	 * td->td_oncpu to get the list of spinlocks for this thread
2510 	 * and "fix" this.
2511 	 *
2512 	 * That still wouldn't really fix this unless we locked the scheduler
2513 	 * lock or stopped the other CPU to make sure it wasn't changing the
2514 	 * list out from under us.  It is probably best to just not try to
2515 	 * handle threads on other CPU's for now.
2516 	 */
2517 	if (td == curthread && PCPU_GET(spinlocks) != NULL)
2518 		witness_list_locks(PCPU_PTR(spinlocks), db_printf);
2519 }
2520 
2521 DB_SHOW_COMMAND(locks, db_witness_list)
2522 {
2523 	struct thread *td;
2524 
2525 	if (have_addr)
2526 		td = db_lookup_thread(addr, true);
2527 	else
2528 		td = kdb_thread;
2529 	witness_ddb_list(td);
2530 }
2531 
2532 DB_SHOW_ALL_COMMAND(locks, db_witness_list_all)
2533 {
2534 	struct thread *td;
2535 	struct proc *p;
2536 
2537 	/*
2538 	 * It would be nice to list only threads and processes that actually
2539 	 * held sleep locks, but that information is currently not exported
2540 	 * by WITNESS.
2541 	 */
2542 	FOREACH_PROC_IN_SYSTEM(p) {
2543 		if (!witness_proc_has_locks(p))
2544 			continue;
2545 		FOREACH_THREAD_IN_PROC(p, td) {
2546 			if (!witness_thread_has_locks(td))
2547 				continue;
2548 			db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2549 			    p->p_comm, td, td->td_tid);
2550 			witness_ddb_list(td);
2551 			if (db_pager_quit)
2552 				return;
2553 		}
2554 	}
2555 }
2556 DB_SHOW_ALIAS(alllocks, db_witness_list_all)
2557 
2558 DB_SHOW_COMMAND(witness, db_witness_display)
2559 {
2560 
2561 	witness_ddb_display(db_printf);
2562 }
2563 #endif
2564 
2565 static void
2566 sbuf_print_witness_badstacks(struct sbuf *sb, size_t *oldidx)
2567 {
2568 	struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
2569 	struct witness *tmp_w1, *tmp_w2, *w1, *w2;
2570 	int generation, i, j;
2571 
2572 	tmp_data1 = NULL;
2573 	tmp_data2 = NULL;
2574 	tmp_w1 = NULL;
2575 	tmp_w2 = NULL;
2576 
2577 	/* Allocate and init temporary storage space. */
2578 	tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2579 	tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2580 	tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2581 	    M_WAITOK | M_ZERO);
2582 	tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2583 	    M_WAITOK | M_ZERO);
2584 	stack_zero(&tmp_data1->wlod_stack);
2585 	stack_zero(&tmp_data2->wlod_stack);
2586 
2587 restart:
2588 	mtx_lock_spin(&w_mtx);
2589 	generation = w_generation;
2590 	mtx_unlock_spin(&w_mtx);
2591 	sbuf_printf(sb, "Number of known direct relationships is %d\n",
2592 	    w_lohash.wloh_count);
2593 	for (i = 1; i < w_max_used_index; i++) {
2594 		mtx_lock_spin(&w_mtx);
2595 		if (generation != w_generation) {
2596 			mtx_unlock_spin(&w_mtx);
2597 
2598 			/* The graph has changed, try again. */
2599 			*oldidx = 0;
2600 			sbuf_clear(sb);
2601 			goto restart;
2602 		}
2603 
2604 		w1 = &w_data[i];
2605 		if (w1->w_reversed == 0) {
2606 			mtx_unlock_spin(&w_mtx);
2607 			continue;
2608 		}
2609 
2610 		/* Copy w1 locally so we can release the spin lock. */
2611 		*tmp_w1 = *w1;
2612 		mtx_unlock_spin(&w_mtx);
2613 
2614 		if (tmp_w1->w_reversed == 0)
2615 			continue;
2616 		for (j = 1; j < w_max_used_index; j++) {
2617 			if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j)
2618 				continue;
2619 
2620 			mtx_lock_spin(&w_mtx);
2621 			if (generation != w_generation) {
2622 				mtx_unlock_spin(&w_mtx);
2623 
2624 				/* The graph has changed, try again. */
2625 				*oldidx = 0;
2626 				sbuf_clear(sb);
2627 				goto restart;
2628 			}
2629 
2630 			w2 = &w_data[j];
2631 			data1 = witness_lock_order_get(w1, w2);
2632 			data2 = witness_lock_order_get(w2, w1);
2633 
2634 			/*
2635 			 * Copy information locally so we can release the
2636 			 * spin lock.
2637 			 */
2638 			*tmp_w2 = *w2;
2639 
2640 			if (data1) {
2641 				stack_zero(&tmp_data1->wlod_stack);
2642 				stack_copy(&data1->wlod_stack,
2643 				    &tmp_data1->wlod_stack);
2644 			}
2645 			if (data2 && data2 != data1) {
2646 				stack_zero(&tmp_data2->wlod_stack);
2647 				stack_copy(&data2->wlod_stack,
2648 				    &tmp_data2->wlod_stack);
2649 			}
2650 			mtx_unlock_spin(&w_mtx);
2651 
2652 			sbuf_printf(sb,
2653 	    "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
2654 			    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2655 			    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2656 			if (data1) {
2657 				sbuf_printf(sb,
2658 			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2659 				    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2660 				    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2661 				stack_sbuf_print(sb, &tmp_data1->wlod_stack);
2662 				sbuf_printf(sb, "\n");
2663 			}
2664 			if (data2 && data2 != data1) {
2665 				sbuf_printf(sb,
2666 			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2667 				    tmp_w2->w_name, tmp_w2->w_class->lc_name,
2668 				    tmp_w1->w_name, tmp_w1->w_class->lc_name);
2669 				stack_sbuf_print(sb, &tmp_data2->wlod_stack);
2670 				sbuf_printf(sb, "\n");
2671 			}
2672 		}
2673 	}
2674 	mtx_lock_spin(&w_mtx);
2675 	if (generation != w_generation) {
2676 		mtx_unlock_spin(&w_mtx);
2677 
2678 		/*
2679 		 * The graph changed while we were printing stack data,
2680 		 * try again.
2681 		 */
2682 		*oldidx = 0;
2683 		sbuf_clear(sb);
2684 		goto restart;
2685 	}
2686 	mtx_unlock_spin(&w_mtx);
2687 
2688 	/* Free temporary storage space. */
2689 	free(tmp_data1, M_TEMP);
2690 	free(tmp_data2, M_TEMP);
2691 	free(tmp_w1, M_TEMP);
2692 	free(tmp_w2, M_TEMP);
2693 }
2694 
2695 static int
2696 sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
2697 {
2698 	struct sbuf *sb;
2699 	int error;
2700 
2701 	if (witness_watch < 1) {
2702 		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2703 		return (error);
2704 	}
2705 	if (witness_cold) {
2706 		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2707 		return (error);
2708 	}
2709 	error = 0;
2710 	sb = sbuf_new(NULL, NULL, badstack_sbuf_size, SBUF_AUTOEXTEND);
2711 	if (sb == NULL)
2712 		return (ENOMEM);
2713 
2714 	sbuf_print_witness_badstacks(sb, &req->oldidx);
2715 
2716 	sbuf_finish(sb);
2717 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2718 	sbuf_delete(sb);
2719 
2720 	return (error);
2721 }
2722 
2723 #ifdef DDB
2724 static int
2725 sbuf_db_printf_drain(void *arg __unused, const char *data, int len)
2726 {
2727 
2728 	return (db_printf("%.*s", len, data));
2729 }
2730 
2731 DB_SHOW_COMMAND(badstacks, db_witness_badstacks)
2732 {
2733 	struct sbuf sb;
2734 	char buffer[128];
2735 	size_t dummy;
2736 
2737 	sbuf_new(&sb, buffer, sizeof(buffer), SBUF_FIXEDLEN);
2738 	sbuf_set_drain(&sb, sbuf_db_printf_drain, NULL);
2739 	sbuf_print_witness_badstacks(&sb, &dummy);
2740 	sbuf_finish(&sb);
2741 }
2742 #endif
2743 
2744 static int
2745 sysctl_debug_witness_channel(SYSCTL_HANDLER_ARGS)
2746 {
2747 	static const struct {
2748 		enum witness_channel channel;
2749 		const char *name;
2750 	} channels[] = {
2751 		{ WITNESS_CONSOLE, "console" },
2752 		{ WITNESS_LOG, "log" },
2753 		{ WITNESS_NONE, "none" },
2754 	};
2755 	char buf[16];
2756 	u_int i;
2757 	int error;
2758 
2759 	buf[0] = '\0';
2760 	for (i = 0; i < nitems(channels); i++)
2761 		if (witness_channel == channels[i].channel) {
2762 			snprintf(buf, sizeof(buf), "%s", channels[i].name);
2763 			break;
2764 		}
2765 
2766 	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
2767 	if (error != 0 || req->newptr == NULL)
2768 		return (error);
2769 
2770 	error = EINVAL;
2771 	for (i = 0; i < nitems(channels); i++)
2772 		if (strcmp(channels[i].name, buf) == 0) {
2773 			witness_channel = channels[i].channel;
2774 			error = 0;
2775 			break;
2776 		}
2777 	return (error);
2778 }
2779 
2780 static int
2781 sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
2782 {
2783 	struct witness *w;
2784 	struct sbuf *sb;
2785 	int error;
2786 
2787 #ifdef __i386__
2788 	error = SYSCTL_OUT(req, w_notallowed, sizeof(w_notallowed));
2789 	return (error);
2790 #endif
2791 
2792 	if (witness_watch < 1) {
2793 		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2794 		return (error);
2795 	}
2796 	if (witness_cold) {
2797 		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2798 		return (error);
2799 	}
2800 	error = 0;
2801 
2802 	error = sysctl_wire_old_buffer(req, 0);
2803 	if (error != 0)
2804 		return (error);
2805 	sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req);
2806 	if (sb == NULL)
2807 		return (ENOMEM);
2808 	sbuf_printf(sb, "\n");
2809 
2810 	mtx_lock_spin(&w_mtx);
2811 	STAILQ_FOREACH(w, &w_all, w_list)
2812 		w->w_displayed = 0;
2813 	STAILQ_FOREACH(w, &w_all, w_list)
2814 		witness_add_fullgraph(sb, w);
2815 	mtx_unlock_spin(&w_mtx);
2816 
2817 	/*
2818 	 * Close the sbuf and return to userland.
2819 	 */
2820 	error = sbuf_finish(sb);
2821 	sbuf_delete(sb);
2822 
2823 	return (error);
2824 }
2825 
2826 static int
2827 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
2828 {
2829 	int error, value;
2830 
2831 	value = witness_watch;
2832 	error = sysctl_handle_int(oidp, &value, 0, req);
2833 	if (error != 0 || req->newptr == NULL)
2834 		return (error);
2835 	if (value > 1 || value < -1 ||
2836 	    (witness_watch == -1 && value != witness_watch))
2837 		return (EINVAL);
2838 	witness_watch = value;
2839 	return (0);
2840 }
2841 
2842 static void
2843 witness_add_fullgraph(struct sbuf *sb, struct witness *w)
2844 {
2845 	int i;
2846 
2847 	if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0))
2848 		return;
2849 	w->w_displayed = 1;
2850 
2851 	WITNESS_INDEX_ASSERT(w->w_index);
2852 	for (i = 1; i <= w_max_used_index; i++) {
2853 		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) {
2854 			sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name,
2855 			    w_data[i].w_name);
2856 			witness_add_fullgraph(sb, &w_data[i]);
2857 		}
2858 	}
2859 }
2860 
2861 /*
2862  * A simple hash function. Takes a key pointer and a key size. If size == 0,
2863  * interprets the key as a string and reads until the null
2864  * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit
2865  * hash value computed from the key.
2866  */
2867 static uint32_t
2868 witness_hash_djb2(const uint8_t *key, uint32_t size)
2869 {
2870 	unsigned int hash = 5381;
2871 	int i;
2872 
2873 	/* hash = hash * 33 + key[i] */
2874 	if (size)
2875 		for (i = 0; i < size; i++)
2876 			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2877 	else
2878 		for (i = 0; key[i] != 0; i++)
2879 			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2880 
2881 	return (hash);
2882 }
2883 
2884 
2885 /*
2886  * Initializes the two witness hash tables. Called exactly once from
2887  * witness_initialize().
2888  */
2889 static void
2890 witness_init_hash_tables(void)
2891 {
2892 	int i;
2893 
2894 	MPASS(witness_cold);
2895 
2896 	/* Initialize the hash tables. */
2897 	for (i = 0; i < WITNESS_HASH_SIZE; i++)
2898 		w_hash.wh_array[i] = NULL;
2899 
2900 	w_hash.wh_size = WITNESS_HASH_SIZE;
2901 	w_hash.wh_count = 0;
2902 
2903 	/* Initialize the lock order data hash. */
2904 	w_lofree = NULL;
2905 	for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
2906 		memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
2907 		w_lodata[i].wlod_next = w_lofree;
2908 		w_lofree = &w_lodata[i];
2909 	}
2910 	w_lohash.wloh_size = WITNESS_LO_HASH_SIZE;
2911 	w_lohash.wloh_count = 0;
2912 	for (i = 0; i < WITNESS_LO_HASH_SIZE; i++)
2913 		w_lohash.wloh_array[i] = NULL;
2914 }
2915 
2916 static struct witness *
2917 witness_hash_get(const char *key)
2918 {
2919 	struct witness *w;
2920 	uint32_t hash;
2921 
2922 	MPASS(key != NULL);
2923 	if (witness_cold == 0)
2924 		mtx_assert(&w_mtx, MA_OWNED);
2925 	hash = witness_hash_djb2(key, 0) % w_hash.wh_size;
2926 	w = w_hash.wh_array[hash];
2927 	while (w != NULL) {
2928 		if (strcmp(w->w_name, key) == 0)
2929 			goto out;
2930 		w = w->w_hash_next;
2931 	}
2932 
2933 out:
2934 	return (w);
2935 }
2936 
2937 static void
2938 witness_hash_put(struct witness *w)
2939 {
2940 	uint32_t hash;
2941 
2942 	MPASS(w != NULL);
2943 	MPASS(w->w_name != NULL);
2944 	if (witness_cold == 0)
2945 		mtx_assert(&w_mtx, MA_OWNED);
2946 	KASSERT(witness_hash_get(w->w_name) == NULL,
2947 	    ("%s: trying to add a hash entry that already exists!", __func__));
2948 	KASSERT(w->w_hash_next == NULL,
2949 	    ("%s: w->w_hash_next != NULL", __func__));
2950 
2951 	hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size;
2952 	w->w_hash_next = w_hash.wh_array[hash];
2953 	w_hash.wh_array[hash] = w;
2954 	w_hash.wh_count++;
2955 }
2956 
2957 
2958 static struct witness_lock_order_data *
2959 witness_lock_order_get(struct witness *parent, struct witness *child)
2960 {
2961 	struct witness_lock_order_data *data = NULL;
2962 	struct witness_lock_order_key key;
2963 	unsigned int hash;
2964 
2965 	MPASS(parent != NULL && child != NULL);
2966 	key.from = parent->w_index;
2967 	key.to = child->w_index;
2968 	WITNESS_INDEX_ASSERT(key.from);
2969 	WITNESS_INDEX_ASSERT(key.to);
2970 	if ((w_rmatrix[parent->w_index][child->w_index]
2971 	    & WITNESS_LOCK_ORDER_KNOWN) == 0)
2972 		goto out;
2973 
2974 	hash = witness_hash_djb2((const char*)&key,
2975 	    sizeof(key)) % w_lohash.wloh_size;
2976 	data = w_lohash.wloh_array[hash];
2977 	while (data != NULL) {
2978 		if (witness_lock_order_key_equal(&data->wlod_key, &key))
2979 			break;
2980 		data = data->wlod_next;
2981 	}
2982 
2983 out:
2984 	return (data);
2985 }
2986 
2987 /*
2988  * Verify that parent and child have a known relationship, are not the same,
2989  * and child is actually a child of parent.  This is done without w_mtx
2990  * to avoid contention in the common case.
2991  */
2992 static int
2993 witness_lock_order_check(struct witness *parent, struct witness *child)
2994 {
2995 
2996 	if (parent != child &&
2997 	    w_rmatrix[parent->w_index][child->w_index]
2998 	    & WITNESS_LOCK_ORDER_KNOWN &&
2999 	    isitmychild(parent, child))
3000 		return (1);
3001 
3002 	return (0);
3003 }
3004 
3005 static int
3006 witness_lock_order_add(struct witness *parent, struct witness *child)
3007 {
3008 	struct witness_lock_order_data *data = NULL;
3009 	struct witness_lock_order_key key;
3010 	unsigned int hash;
3011 
3012 	MPASS(parent != NULL && child != NULL);
3013 	key.from = parent->w_index;
3014 	key.to = child->w_index;
3015 	WITNESS_INDEX_ASSERT(key.from);
3016 	WITNESS_INDEX_ASSERT(key.to);
3017 	if (w_rmatrix[parent->w_index][child->w_index]
3018 	    & WITNESS_LOCK_ORDER_KNOWN)
3019 		return (1);
3020 
3021 	hash = witness_hash_djb2((const char*)&key,
3022 	    sizeof(key)) % w_lohash.wloh_size;
3023 	w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
3024 	data = w_lofree;
3025 	if (data == NULL)
3026 		return (0);
3027 	w_lofree = data->wlod_next;
3028 	data->wlod_next = w_lohash.wloh_array[hash];
3029 	data->wlod_key = key;
3030 	w_lohash.wloh_array[hash] = data;
3031 	w_lohash.wloh_count++;
3032 	stack_zero(&data->wlod_stack);
3033 	stack_save(&data->wlod_stack);
3034 	return (1);
3035 }
3036 
3037 /* Call this whenever the structure of the witness graph changes. */
3038 static void
3039 witness_increment_graph_generation(void)
3040 {
3041 
3042 	if (witness_cold == 0)
3043 		mtx_assert(&w_mtx, MA_OWNED);
3044 	w_generation++;
3045 }
3046 
3047 static int
3048 witness_output_drain(void *arg __unused, const char *data, int len)
3049 {
3050 
3051 	witness_output("%.*s", len, data);
3052 	return (len);
3053 }
3054 
3055 static void
3056 witness_debugger(int cond, const char *msg)
3057 {
3058 	char buf[32];
3059 	struct sbuf sb;
3060 	struct stack st;
3061 
3062 	if (!cond)
3063 		return;
3064 
3065 	if (witness_trace) {
3066 		sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
3067 		sbuf_set_drain(&sb, witness_output_drain, NULL);
3068 
3069 		stack_zero(&st);
3070 		stack_save(&st);
3071 		witness_output("stack backtrace:\n");
3072 		stack_sbuf_print_ddb(&sb, &st);
3073 
3074 		sbuf_finish(&sb);
3075 	}
3076 
3077 #ifdef KDB
3078 	if (witness_kdb)
3079 		kdb_enter(KDB_WHY_WITNESS, msg);
3080 #endif
3081 }
3082