xref: /freebsd/sys/kern/subr_witness.c (revision 1e413cf93298b5b97441a21d9a50fdcd0ee9945e)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  */
31 
32 /*
33  * Implementation of the `witness' lock verifier.  Originally implemented for
34  * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
35  * classes in FreeBSD.
36  */
37 
38 /*
39  *	Main Entry: witness
40  *	Pronunciation: 'wit-n&s
41  *	Function: noun
42  *	Etymology: Middle English witnesse, from Old English witnes knowledge,
43  *	    testimony, witness, from 2wit
44  *	Date: before 12th century
45  *	1 : attestation of a fact or event : TESTIMONY
46  *	2 : one that gives evidence; specifically : one who testifies in
47  *	    a cause or before a judicial tribunal
48  *	3 : one asked to be present at a transaction so as to be able to
49  *	    testify to its having taken place
50  *	4 : one who has personal knowledge of something
51  *	5 a : something serving as evidence or proof : SIGN
52  *	  b : public affirmation by word or example of usually
53  *	      religious faith or conviction <the heroic witness to divine
54  *	      life -- Pilot>
55  *	6 capitalized : a member of the Jehovah's Witnesses
56  */
57 
58 /*
59  * Special rules concerning Giant and lock orders:
60  *
61  * 1) Giant must be acquired before any other mutexes.  Stated another way,
62  *    no other mutex may be held when Giant is acquired.
63  *
64  * 2) Giant must be released when blocking on a sleepable lock.
65  *
66  * This rule is less obvious, but is a result of Giant providing the same
67  * semantics as spl().  Basically, when a thread sleeps, it must release
68  * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
69  * 2).
70  *
71  * 3) Giant may be acquired before or after sleepable locks.
72  *
73  * This rule is also not quite as obvious.  Giant may be acquired after
74  * a sleepable lock because it is a non-sleepable lock and non-sleepable
75  * locks may always be acquired while holding a sleepable lock.  The second
76  * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
77  * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
78  * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
79  * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
80  * execute.  Thus, acquiring Giant both before and after a sleepable lock
81  * will not result in a lock order reversal.
82  */
83 
84 #include <sys/cdefs.h>
85 __FBSDID("$FreeBSD$");
86 
87 #include "opt_ddb.h"
88 #include "opt_hwpmc_hooks.h"
89 #include "opt_witness.h"
90 
91 #include <sys/param.h>
92 #include <sys/bus.h>
93 #include <sys/kdb.h>
94 #include <sys/kernel.h>
95 #include <sys/ktr.h>
96 #include <sys/lock.h>
97 #include <sys/malloc.h>
98 #include <sys/mutex.h>
99 #include <sys/priv.h>
100 #include <sys/proc.h>
101 #include <sys/sysctl.h>
102 #include <sys/systm.h>
103 
104 #include <ddb/ddb.h>
105 
106 #include <machine/stdarg.h>
107 
108 /* Note that these traces do not work with KTR_ALQ. */
109 #if 0
110 #define	KTR_WITNESS	KTR_SUBSYS
111 #else
112 #define	KTR_WITNESS	0
113 #endif
114 
115 /* Easier to stay with the old names. */
116 #define	lo_list		lo_witness_data.lod_list
117 #define	lo_witness	lo_witness_data.lod_witness
118 
119 /* Define this to check for blessed mutexes */
120 #undef BLESSING
121 
122 #define WITNESS_COUNT 1024
123 #define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
124 /*
125  * XXX: This is somewhat bogus, as we assume here that at most 1024 threads
126  * will hold LOCK_NCHILDREN * 2 locks.  We handle failure ok, and we should
127  * probably be safe for the most part, but it's still a SWAG.
128  */
129 #define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2
130 
131 #define	WITNESS_NCHILDREN 6
132 
133 struct witness_child_list_entry;
134 
135 struct witness {
136 	const	char *w_name;
137 	struct	lock_class *w_class;
138 	STAILQ_ENTRY(witness) w_list;		/* List of all witnesses. */
139 	STAILQ_ENTRY(witness) w_typelist;	/* Witnesses of a type. */
140 	struct	witness_child_list_entry *w_children;	/* Great evilness... */
141 	const	char *w_file;
142 	int	w_line;
143 	u_int	w_level;
144 	u_int	w_refcount;
145 	u_char	w_Giant_squawked:1;
146 	u_char	w_other_squawked:1;
147 	u_char	w_same_squawked:1;
148 	u_char	w_displayed:1;
149 };
150 
151 struct witness_child_list_entry {
152 	struct	witness_child_list_entry *wcl_next;
153 	struct	witness *wcl_children[WITNESS_NCHILDREN];
154 	u_int	wcl_count;
155 };
156 
157 STAILQ_HEAD(witness_list, witness);
158 
159 #ifdef BLESSING
160 struct witness_blessed {
161 	const	char *b_lock1;
162 	const	char *b_lock2;
163 };
164 #endif
165 
166 struct witness_order_list_entry {
167 	const	char *w_name;
168 	struct	lock_class *w_class;
169 };
170 
171 #ifdef BLESSING
172 static int	blessed(struct witness *, struct witness *);
173 #endif
174 static int	depart(struct witness *w);
175 static struct	witness *enroll(const char *description,
176 				struct lock_class *lock_class);
177 static int	insertchild(struct witness *parent, struct witness *child);
178 static int	isitmychild(struct witness *parent, struct witness *child);
179 static int	isitmydescendant(struct witness *parent, struct witness *child);
180 static int	itismychild(struct witness *parent, struct witness *child);
181 static void	removechild(struct witness *parent, struct witness *child);
182 static int	sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
183 static const char *fixup_filename(const char *file);
184 static struct	witness *witness_get(void);
185 static void	witness_free(struct witness *m);
186 static struct	witness_child_list_entry *witness_child_get(void);
187 static void	witness_child_free(struct witness_child_list_entry *wcl);
188 static struct	lock_list_entry *witness_lock_list_get(void);
189 static void	witness_lock_list_free(struct lock_list_entry *lle);
190 static struct	lock_instance *find_instance(struct lock_list_entry *lock_list,
191 					     struct lock_object *lock);
192 static void	witness_list_lock(struct lock_instance *instance);
193 #ifdef DDB
194 static void	witness_leveldescendents(struct witness *parent, int level);
195 static void	witness_levelall(void);
196 static void	witness_displaydescendants(void(*)(const char *fmt, ...),
197 					   struct witness *, int indent);
198 static void	witness_display_list(void(*prnt)(const char *fmt, ...),
199 				     struct witness_list *list);
200 static void	witness_display(void(*)(const char *fmt, ...));
201 static void	witness_list(struct thread *td);
202 #endif
203 
204 SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking");
205 
206 /*
207  * If set to 0, witness is disabled.  If set to a non-zero value, witness
208  * performs full lock order checking for all locks.  At runtime, this
209  * value may be set to 0 to turn off witness.  witness is not allowed be
210  * turned on once it is turned off, however.
211  */
212 static int witness_watch = 1;
213 TUNABLE_INT("debug.witness.watch", &witness_watch);
214 SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
215     sysctl_debug_witness_watch, "I", "witness is watching lock operations");
216 
217 #ifdef KDB
218 /*
219  * When KDB is enabled and witness_kdb is set to 1, it will cause the system
220  * to drop into kdebug() when:
221  *	- a lock hierarchy violation occurs
222  *	- locks are held when going to sleep.
223  */
224 #ifdef WITNESS_KDB
225 int	witness_kdb = 1;
226 #else
227 int	witness_kdb = 0;
228 #endif
229 TUNABLE_INT("debug.witness.kdb", &witness_kdb);
230 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
231 
232 /*
233  * When KDB is enabled and witness_trace is set to 1, it will cause the system
234  * to print a stack trace:
235  *	- a lock hierarchy violation occurs
236  *	- locks are held when going to sleep.
237  */
238 int	witness_trace = 1;
239 TUNABLE_INT("debug.witness.trace", &witness_trace);
240 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
241 #endif /* KDB */
242 
243 #ifdef WITNESS_SKIPSPIN
244 int	witness_skipspin = 1;
245 #else
246 int	witness_skipspin = 0;
247 #endif
248 TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
249 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN,
250     &witness_skipspin, 0, "");
251 
252 static struct mtx w_mtx;
253 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
254 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
255 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
256 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
257 static struct witness_child_list_entry *w_child_free = NULL;
258 static struct lock_list_entry *w_lock_list_free = NULL;
259 
260 static int w_free_cnt, w_spin_cnt, w_sleep_cnt, w_child_free_cnt, w_child_cnt;
261 SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
262 SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
263 SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
264     "");
265 SYSCTL_INT(_debug_witness, OID_AUTO, child_free_cnt, CTLFLAG_RD,
266     &w_child_free_cnt, 0, "");
267 SYSCTL_INT(_debug_witness, OID_AUTO, child_cnt, CTLFLAG_RD, &w_child_cnt, 0,
268     "");
269 
270 static struct witness w_data[WITNESS_COUNT];
271 static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT];
272 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
273 
274 static struct witness_order_list_entry order_lists[] = {
275 	/*
276 	 * sx locks
277 	 */
278 	{ "proctree", &lock_class_sx },
279 	{ "allproc", &lock_class_sx },
280 	{ "allprison", &lock_class_sx },
281 	{ NULL, NULL },
282 	/*
283 	 * Various mutexes
284 	 */
285 	{ "Giant", &lock_class_mtx_sleep },
286 	{ "pipe mutex", &lock_class_mtx_sleep },
287 	{ "sigio lock", &lock_class_mtx_sleep },
288 	{ "process group", &lock_class_mtx_sleep },
289 	{ "process lock", &lock_class_mtx_sleep },
290 	{ "session", &lock_class_mtx_sleep },
291 	{ "uidinfo hash", &lock_class_mtx_sleep },
292 	{ "uidinfo struct", &lock_class_mtx_sleep },
293 #ifdef	HWPMC_HOOKS
294 	{ "pmc-sleep", &lock_class_mtx_sleep },
295 #endif
296 	{ NULL, NULL },
297 	/*
298 	 * Sockets
299 	 */
300 	{ "accept", &lock_class_mtx_sleep },
301 	{ "so_snd", &lock_class_mtx_sleep },
302 	{ "so_rcv", &lock_class_mtx_sleep },
303 	{ "sellck", &lock_class_mtx_sleep },
304 	{ NULL, NULL },
305 	/*
306 	 * Routing
307 	 */
308 	{ "so_rcv", &lock_class_mtx_sleep },
309 	{ "radix node head", &lock_class_mtx_sleep },
310 	{ "rtentry", &lock_class_mtx_sleep },
311 	{ "ifaddr", &lock_class_mtx_sleep },
312 	{ NULL, NULL },
313 	/*
314 	 * Multicast - protocol locks before interface locks, after UDP locks.
315 	 */
316 	{ "udpinp", &lock_class_mtx_sleep },
317 	{ "in_multi_mtx", &lock_class_mtx_sleep },
318 	{ "igmp_mtx", &lock_class_mtx_sleep },
319 	{ "if_addr_mtx", &lock_class_mtx_sleep },
320 	{ NULL, NULL },
321 	/*
322 	 * UNIX Domain Sockets
323 	 */
324 	{ "unp", &lock_class_mtx_sleep },
325 	{ "so_snd", &lock_class_mtx_sleep },
326 	{ NULL, NULL },
327 	/*
328 	 * UDP/IP
329 	 */
330 	{ "udp", &lock_class_mtx_sleep },
331 	{ "udpinp", &lock_class_mtx_sleep },
332 	{ "so_snd", &lock_class_mtx_sleep },
333 	{ NULL, NULL },
334 	/*
335 	 * TCP/IP
336 	 */
337 	{ "tcp", &lock_class_mtx_sleep },
338 	{ "tcpinp", &lock_class_mtx_sleep },
339 	{ "so_snd", &lock_class_mtx_sleep },
340 	{ NULL, NULL },
341 	/*
342 	 * SLIP
343 	 */
344 	{ "slip_mtx", &lock_class_mtx_sleep },
345 	{ "slip sc_mtx", &lock_class_mtx_sleep },
346 	{ NULL, NULL },
347 	/*
348 	 * netatalk
349 	 */
350 	{ "ddp_list_mtx", &lock_class_mtx_sleep },
351 	{ "ddp_mtx", &lock_class_mtx_sleep },
352 	{ NULL, NULL },
353 	/*
354 	 * BPF
355 	 */
356 	{ "bpf global lock", &lock_class_mtx_sleep },
357 	{ "bpf interface lock", &lock_class_mtx_sleep },
358 	{ "bpf cdev lock", &lock_class_mtx_sleep },
359 	{ NULL, NULL },
360 	/*
361 	 * NFS server
362 	 */
363 	{ "nfsd_mtx", &lock_class_mtx_sleep },
364 	{ "so_snd", &lock_class_mtx_sleep },
365 	{ NULL, NULL },
366 
367 	/*
368 	 * IEEE 802.11
369 	 */
370 	{ "802.11 com lock", &lock_class_mtx_sleep},
371 	{ NULL, NULL },
372 	/*
373 	 * Network drivers
374 	 */
375 	{ "network driver", &lock_class_mtx_sleep},
376 	{ NULL, NULL },
377 
378 	/*
379 	 * Netgraph
380 	 */
381 	{ "ng_node", &lock_class_mtx_sleep },
382 	{ "ng_worklist", &lock_class_mtx_sleep },
383 	{ NULL, NULL },
384 	/*
385 	 * CDEV
386 	 */
387 	{ "system map", &lock_class_mtx_sleep },
388 	{ "vm page queue mutex", &lock_class_mtx_sleep },
389 	{ "vnode interlock", &lock_class_mtx_sleep },
390 	{ "cdev", &lock_class_mtx_sleep },
391 	{ NULL, NULL },
392 	/*
393 	 * kqueue/VFS interaction
394 	 */
395 	{ "kqueue", &lock_class_mtx_sleep },
396 	{ "struct mount mtx", &lock_class_mtx_sleep },
397 	{ "vnode interlock", &lock_class_mtx_sleep },
398 	{ NULL, NULL },
399 	/*
400 	 * spin locks
401 	 */
402 #ifdef SMP
403 	{ "ap boot", &lock_class_mtx_spin },
404 #endif
405 	{ "rm.mutex_mtx", &lock_class_mtx_spin },
406 	{ "sio", &lock_class_mtx_spin },
407 	{ "scrlock", &lock_class_mtx_spin },
408 #ifdef __i386__
409 	{ "cy", &lock_class_mtx_spin },
410 #endif
411 #ifdef __sparc64__
412 	{ "pcib_mtx", &lock_class_mtx_spin },
413 	{ "rtc_mtx", &lock_class_mtx_spin },
414 #endif
415 	{ "scc_hwmtx", &lock_class_mtx_spin },
416 	{ "uart_hwmtx", &lock_class_mtx_spin },
417 	{ "fast_taskqueue", &lock_class_mtx_spin },
418 	{ "intr table", &lock_class_mtx_spin },
419 #ifdef	HWPMC_HOOKS
420 	{ "pmc-per-proc", &lock_class_mtx_spin },
421 #endif
422 	{ "process slock", &lock_class_mtx_spin },
423 	{ "sleepq chain", &lock_class_mtx_spin },
424 	{ "umtx lock", &lock_class_mtx_spin },
425 	{ "rm_spinlock", &lock_class_mtx_spin },
426 	{ "turnstile chain", &lock_class_mtx_spin },
427 	{ "turnstile lock", &lock_class_mtx_spin },
428 	{ "sched lock", &lock_class_mtx_spin },
429 	{ "td_contested", &lock_class_mtx_spin },
430 	{ "callout", &lock_class_mtx_spin },
431 	{ "entropy harvest mutex", &lock_class_mtx_spin },
432 	{ "syscons video lock", &lock_class_mtx_spin },
433 	{ "time lock", &lock_class_mtx_spin },
434 #ifdef SMP
435 	{ "smp rendezvous", &lock_class_mtx_spin },
436 #endif
437 	/*
438 	 * leaf locks
439 	 */
440 	{ "icu", &lock_class_mtx_spin },
441 #if defined(SMP) && defined(__sparc64__)
442 	{ "ipi", &lock_class_mtx_spin },
443 #endif
444 #ifdef __i386__
445 	{ "allpmaps", &lock_class_mtx_spin },
446 	{ "descriptor tables", &lock_class_mtx_spin },
447 #endif
448 	{ "clk", &lock_class_mtx_spin },
449 	{ "mprof lock", &lock_class_mtx_spin },
450 	{ "kse lock", &lock_class_mtx_spin },
451 	{ "zombie lock", &lock_class_mtx_spin },
452 	{ "ALD Queue", &lock_class_mtx_spin },
453 #ifdef __ia64__
454 	{ "MCA spin lock", &lock_class_mtx_spin },
455 #endif
456 #if defined(__i386__) || defined(__amd64__)
457 	{ "pcicfg", &lock_class_mtx_spin },
458 	{ "NDIS thread lock", &lock_class_mtx_spin },
459 #endif
460 	{ "tw_osl_io_lock", &lock_class_mtx_spin },
461 	{ "tw_osl_q_lock", &lock_class_mtx_spin },
462 	{ "tw_cl_io_lock", &lock_class_mtx_spin },
463 	{ "tw_cl_intr_lock", &lock_class_mtx_spin },
464 	{ "tw_cl_gen_lock", &lock_class_mtx_spin },
465 #ifdef	HWPMC_HOOKS
466 	{ "pmc-leaf", &lock_class_mtx_spin },
467 #endif
468 	{ "blocked lock", &lock_class_mtx_spin },
469 	{ NULL, NULL },
470 	{ NULL, NULL }
471 };
472 
473 #ifdef BLESSING
474 /*
475  * Pairs of locks which have been blessed
476  * Don't complain about order problems with blessed locks
477  */
478 static struct witness_blessed blessed_list[] = {
479 };
480 static int blessed_count =
481 	sizeof(blessed_list) / sizeof(struct witness_blessed);
482 #endif
483 
484 /*
485  * List of locks initialized prior to witness being initialized whose
486  * enrollment is currently deferred.
487  */
488 STAILQ_HEAD(, lock_object) pending_locks =
489     STAILQ_HEAD_INITIALIZER(pending_locks);
490 
491 /*
492  * This global is set to 0 once it becomes safe to use the witness code.
493  */
494 static int witness_cold = 1;
495 
496 /*
497  * This global is set to 1 once the static lock orders have been enrolled
498  * so that a warning can be issued for any spin locks enrolled later.
499  */
500 static int witness_spin_warn = 0;
501 
502 /*
503  * The WITNESS-enabled diagnostic code.  Note that the witness code does
504  * assume that the early boot is single-threaded at least until after this
505  * routine is completed.
506  */
507 static void
508 witness_initialize(void *dummy __unused)
509 {
510 	struct lock_object *lock;
511 	struct witness_order_list_entry *order;
512 	struct witness *w, *w1;
513 	int i;
514 
515 	/*
516 	 * We have to release Giant before initializing its witness
517 	 * structure so that WITNESS doesn't get confused.
518 	 */
519 	mtx_unlock(&Giant);
520 	mtx_assert(&Giant, MA_NOTOWNED);
521 
522 	CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
523 	mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
524 	    MTX_NOWITNESS | MTX_NOPROFILE);
525 	for (i = 0; i < WITNESS_COUNT; i++)
526 		witness_free(&w_data[i]);
527 	for (i = 0; i < WITNESS_CHILDCOUNT; i++)
528 		witness_child_free(&w_childdata[i]);
529 	for (i = 0; i < LOCK_CHILDCOUNT; i++)
530 		witness_lock_list_free(&w_locklistdata[i]);
531 
532 	/* First add in all the specified order lists. */
533 	for (order = order_lists; order->w_name != NULL; order++) {
534 		w = enroll(order->w_name, order->w_class);
535 		if (w == NULL)
536 			continue;
537 		w->w_file = "order list";
538 		for (order++; order->w_name != NULL; order++) {
539 			w1 = enroll(order->w_name, order->w_class);
540 			if (w1 == NULL)
541 				continue;
542 			w1->w_file = "order list";
543 			if (!itismychild(w, w1))
544 				panic("Not enough memory for static orders!");
545 			w = w1;
546 		}
547 	}
548 	witness_spin_warn = 1;
549 
550 	/* Iterate through all locks and add them to witness. */
551 	while (!STAILQ_EMPTY(&pending_locks)) {
552 		lock = STAILQ_FIRST(&pending_locks);
553 		STAILQ_REMOVE_HEAD(&pending_locks, lo_list);
554 		KASSERT(lock->lo_flags & LO_WITNESS,
555 		    ("%s: lock %s is on pending list but not LO_WITNESS",
556 		    __func__, lock->lo_name));
557 		lock->lo_witness = enroll(lock->lo_type, LOCK_CLASS(lock));
558 	}
559 
560 	/* Mark the witness code as being ready for use. */
561 	witness_cold = 0;
562 
563 	mtx_lock(&Giant);
564 }
565 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL)
566 
567 static int
568 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
569 {
570 	int error, value;
571 
572 	value = witness_watch;
573 	error = sysctl_handle_int(oidp, &value, 0, req);
574 	if (error != 0 || req->newptr == NULL)
575 		return (error);
576 	if (value == witness_watch)
577 		return (0);
578 	if (value != 0)
579 		return (EINVAL);
580 	witness_watch = 0;
581 	return (0);
582 }
583 
584 void
585 witness_init(struct lock_object *lock)
586 {
587 	struct lock_class *class;
588 
589 	/* Various sanity checks. */
590 	class = LOCK_CLASS(lock);
591 	if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
592 	    (class->lc_flags & LC_RECURSABLE) == 0)
593 		panic("%s: lock (%s) %s can not be recursable", __func__,
594 		    class->lc_name, lock->lo_name);
595 	if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
596 	    (class->lc_flags & LC_SLEEPABLE) == 0)
597 		panic("%s: lock (%s) %s can not be sleepable", __func__,
598 		    class->lc_name, lock->lo_name);
599 	if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
600 	    (class->lc_flags & LC_UPGRADABLE) == 0)
601 		panic("%s: lock (%s) %s can not be upgradable", __func__,
602 		    class->lc_name, lock->lo_name);
603 
604 	/*
605 	 * If we shouldn't watch this lock, then just clear lo_witness.
606 	 * Otherwise, if witness_cold is set, then it is too early to
607 	 * enroll this lock, so defer it to witness_initialize() by adding
608 	 * it to the pending_locks list.  If it is not too early, then enroll
609 	 * the lock now.
610 	 */
611 	if (witness_watch == 0 || panicstr != NULL ||
612 	    (lock->lo_flags & LO_WITNESS) == 0)
613 		lock->lo_witness = NULL;
614 	else if (witness_cold) {
615 		STAILQ_INSERT_TAIL(&pending_locks, lock, lo_list);
616 		lock->lo_flags |= LO_ENROLLPEND;
617 	} else
618 		lock->lo_witness = enroll(lock->lo_type, class);
619 }
620 
621 void
622 witness_destroy(struct lock_object *lock)
623 {
624 	struct lock_class *class;
625 	struct witness *w;
626 
627 	class = LOCK_CLASS(lock);
628 	if (witness_cold)
629 		panic("lock (%s) %s destroyed while witness_cold",
630 		    class->lc_name, lock->lo_name);
631 
632 	/* XXX: need to verify that no one holds the lock */
633 	if ((lock->lo_flags & (LO_WITNESS | LO_ENROLLPEND)) == LO_WITNESS &&
634 	    lock->lo_witness != NULL) {
635 		w = lock->lo_witness;
636 		mtx_lock_spin(&w_mtx);
637 		MPASS(w->w_refcount > 0);
638 		w->w_refcount--;
639 
640 		/*
641 		 * Lock is already released if we have an allocation failure
642 		 * and depart() fails.
643 		 */
644 		if (w->w_refcount != 0 || depart(w))
645 			mtx_unlock_spin(&w_mtx);
646 	}
647 
648 	/*
649 	 * If this lock is destroyed before witness is up and running,
650 	 * remove it from the pending list.
651 	 */
652 	if (lock->lo_flags & LO_ENROLLPEND) {
653 		STAILQ_REMOVE(&pending_locks, lock, lock_object, lo_list);
654 		lock->lo_flags &= ~LO_ENROLLPEND;
655 	}
656 }
657 
658 #ifdef DDB
659 static void
660 witness_levelall (void)
661 {
662 	struct witness_list *list;
663 	struct witness *w, *w1;
664 
665 	/*
666 	 * First clear all levels.
667 	 */
668 	STAILQ_FOREACH(w, &w_all, w_list) {
669 		w->w_level = 0;
670 	}
671 
672 	/*
673 	 * Look for locks with no parent and level all their descendants.
674 	 */
675 	STAILQ_FOREACH(w, &w_all, w_list) {
676 		/*
677 		 * This is just an optimization, technically we could get
678 		 * away just walking the all list each time.
679 		 */
680 		if (w->w_class->lc_flags & LC_SLEEPLOCK)
681 			list = &w_sleep;
682 		else
683 			list = &w_spin;
684 		STAILQ_FOREACH(w1, list, w_typelist) {
685 			if (isitmychild(w1, w))
686 				goto skip;
687 		}
688 		witness_leveldescendents(w, 0);
689 	skip:
690 		;	/* silence GCC 3.x */
691 	}
692 }
693 
694 static void
695 witness_leveldescendents(struct witness *parent, int level)
696 {
697 	struct witness_child_list_entry *wcl;
698 	int i;
699 
700 	if (parent->w_level < level)
701 		parent->w_level = level;
702 	level++;
703 	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
704 		for (i = 0; i < wcl->wcl_count; i++)
705 			witness_leveldescendents(wcl->wcl_children[i], level);
706 }
707 
708 static void
709 witness_displaydescendants(void(*prnt)(const char *fmt, ...),
710 			   struct witness *parent, int indent)
711 {
712 	struct witness_child_list_entry *wcl;
713 	int i, level;
714 
715 	level = parent->w_level;
716 	prnt("%-2d", level);
717 	for (i = 0; i < indent; i++)
718 		prnt(" ");
719 	if (parent->w_refcount > 0)
720 		prnt("%s", parent->w_name);
721 	else
722 		prnt("(dead)");
723 	if (parent->w_displayed) {
724 		prnt(" -- (already displayed)\n");
725 		return;
726 	}
727 	parent->w_displayed = 1;
728 	if (parent->w_refcount > 0) {
729 		if (parent->w_file != NULL)
730 			prnt(" -- last acquired @ %s:%d", parent->w_file,
731 			    parent->w_line);
732 	}
733 	prnt("\n");
734 	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
735 		for (i = 0; i < wcl->wcl_count; i++)
736 			    witness_displaydescendants(prnt,
737 				wcl->wcl_children[i], indent + 1);
738 }
739 
740 static void
741 witness_display_list(void(*prnt)(const char *fmt, ...),
742 		     struct witness_list *list)
743 {
744 	struct witness *w;
745 
746 	STAILQ_FOREACH(w, list, w_typelist) {
747 		if (w->w_file == NULL || w->w_level > 0)
748 			continue;
749 		/*
750 		 * This lock has no anscestors, display its descendants.
751 		 */
752 		witness_displaydescendants(prnt, w, 0);
753 	}
754 }
755 
756 static void
757 witness_display(void(*prnt)(const char *fmt, ...))
758 {
759 	struct witness *w;
760 
761 	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
762 	witness_levelall();
763 
764 	/* Clear all the displayed flags. */
765 	STAILQ_FOREACH(w, &w_all, w_list) {
766 		w->w_displayed = 0;
767 	}
768 
769 	/*
770 	 * First, handle sleep locks which have been acquired at least
771 	 * once.
772 	 */
773 	prnt("Sleep locks:\n");
774 	witness_display_list(prnt, &w_sleep);
775 
776 	/*
777 	 * Now do spin locks which have been acquired at least once.
778 	 */
779 	prnt("\nSpin locks:\n");
780 	witness_display_list(prnt, &w_spin);
781 
782 	/*
783 	 * Finally, any locks which have not been acquired yet.
784 	 */
785 	prnt("\nLocks which were never acquired:\n");
786 	STAILQ_FOREACH(w, &w_all, w_list) {
787 		if (w->w_file != NULL || w->w_refcount == 0)
788 			continue;
789 		prnt("%s\n", w->w_name);
790 	}
791 }
792 #endif /* DDB */
793 
794 /* Trim useless garbage from filenames. */
795 static const char *
796 fixup_filename(const char *file)
797 {
798 
799 	if (file == NULL)
800 		return (NULL);
801 	while (strncmp(file, "../", 3) == 0)
802 		file += 3;
803 	return (file);
804 }
805 
806 int
807 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
808 {
809 
810 	if (witness_watch == 0 || panicstr != NULL)
811 		return (0);
812 
813 	/* Require locks that witness knows about. */
814 	if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
815 	    lock2->lo_witness == NULL)
816 		return (EINVAL);
817 
818 	MPASS(!mtx_owned(&w_mtx));
819 	mtx_lock_spin(&w_mtx);
820 
821 	/*
822 	 * If we already have either an explicit or implied lock order that
823 	 * is the other way around, then return an error.
824 	 */
825 	if (isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
826 		mtx_unlock_spin(&w_mtx);
827 		return (EDOOFUS);
828 	}
829 
830 	/* Try to add the new order. */
831 	CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
832 	    lock2->lo_type, lock1->lo_type);
833 	if (!itismychild(lock1->lo_witness, lock2->lo_witness))
834 		return (ENOMEM);
835 	mtx_unlock_spin(&w_mtx);
836 	return (0);
837 }
838 
839 void
840 witness_checkorder(struct lock_object *lock, int flags, const char *file,
841     int line)
842 {
843 	struct lock_list_entry **lock_list, *lle;
844 	struct lock_instance *lock1, *lock2;
845 	struct lock_class *class;
846 	struct witness *w, *w1;
847 	struct thread *td;
848 	int i, j;
849 
850 	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
851 	    panicstr != NULL)
852 		return;
853 
854 	/*
855 	 * Try locks do not block if they fail to acquire the lock, thus
856 	 * there is no danger of deadlocks or of switching while holding a
857 	 * spin lock if we acquire a lock via a try operation.  This
858 	 * function shouldn't even be called for try locks, so panic if
859 	 * that happens.
860 	 */
861 	if (flags & LOP_TRYLOCK)
862 		panic("%s should not be called for try lock operations",
863 		    __func__);
864 
865 	w = lock->lo_witness;
866 	class = LOCK_CLASS(lock);
867 	td = curthread;
868 	file = fixup_filename(file);
869 
870 	if (class->lc_flags & LC_SLEEPLOCK) {
871 		/*
872 		 * Since spin locks include a critical section, this check
873 		 * implicitly enforces a lock order of all sleep locks before
874 		 * all spin locks.
875 		 */
876 		if (td->td_critnest != 0 && !kdb_active)
877 			panic("blockable sleep lock (%s) %s @ %s:%d",
878 			    class->lc_name, lock->lo_name, file, line);
879 
880 		/*
881 		 * If this is the first lock acquired then just return as
882 		 * no order checking is needed.
883 		 */
884 		if (td->td_sleeplocks == NULL)
885 			return;
886 		lock_list = &td->td_sleeplocks;
887 	} else {
888 		/*
889 		 * If this is the first lock, just return as no order
890 		 * checking is needed.  We check this in both if clauses
891 		 * here as unifying the check would require us to use a
892 		 * critical section to ensure we don't migrate while doing
893 		 * the check.  Note that if this is not the first lock, we
894 		 * are already in a critical section and are safe for the
895 		 * rest of the check.
896 		 */
897 		if (PCPU_GET(spinlocks) == NULL)
898 			return;
899 		lock_list = PCPU_PTR(spinlocks);
900 	}
901 
902 	/*
903 	 * Check to see if we are recursing on a lock we already own.  If
904 	 * so, make sure that we don't mismatch exclusive and shared lock
905 	 * acquires.
906 	 */
907 	lock1 = find_instance(*lock_list, lock);
908 	if (lock1 != NULL) {
909 		if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
910 		    (flags & LOP_EXCLUSIVE) == 0) {
911 			printf("shared lock of (%s) %s @ %s:%d\n",
912 			    class->lc_name, lock->lo_name, file, line);
913 			printf("while exclusively locked from %s:%d\n",
914 			    lock1->li_file, lock1->li_line);
915 			panic("share->excl");
916 		}
917 		if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
918 		    (flags & LOP_EXCLUSIVE) != 0) {
919 			printf("exclusive lock of (%s) %s @ %s:%d\n",
920 			    class->lc_name, lock->lo_name, file, line);
921 			printf("while share locked from %s:%d\n",
922 			    lock1->li_file, lock1->li_line);
923 			panic("excl->share");
924 		}
925 		return;
926 	}
927 
928 	/*
929 	 * Try locks do not block if they fail to acquire the lock, thus
930 	 * there is no danger of deadlocks or of switching while holding a
931 	 * spin lock if we acquire a lock via a try operation.
932 	 */
933 	if (flags & LOP_TRYLOCK)
934 		return;
935 
936 	/*
937 	 * Check for duplicate locks of the same type.  Note that we only
938 	 * have to check for this on the last lock we just acquired.  Any
939 	 * other cases will be caught as lock order violations.
940 	 */
941 	lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
942 	w1 = lock1->li_lock->lo_witness;
943 	if (w1 == w) {
944 		if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK) ||
945 		    (flags & LOP_DUPOK))
946 			return;
947 		w->w_same_squawked = 1;
948 		printf("acquiring duplicate lock of same type: \"%s\"\n",
949 			lock->lo_type);
950 		printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name,
951 		    lock1->li_file, lock1->li_line);
952 		printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
953 #ifdef KDB
954 		goto debugger;
955 #else
956 		return;
957 #endif
958 	}
959 	MPASS(!mtx_owned(&w_mtx));
960 	mtx_lock_spin(&w_mtx);
961 	/*
962 	 * If we know that the the lock we are acquiring comes after
963 	 * the lock we most recently acquired in the lock order tree,
964 	 * then there is no need for any further checks.
965 	 */
966 	if (isitmychild(w1, w)) {
967 		mtx_unlock_spin(&w_mtx);
968 		return;
969 	}
970 	for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) {
971 		for (i = lle->ll_count - 1; i >= 0; i--, j++) {
972 
973 			MPASS(j < WITNESS_COUNT);
974 			lock1 = &lle->ll_children[i];
975 			w1 = lock1->li_lock->lo_witness;
976 
977 			/*
978 			 * If this lock doesn't undergo witness checking,
979 			 * then skip it.
980 			 */
981 			if (w1 == NULL) {
982 				KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
983 				    ("lock missing witness structure"));
984 				continue;
985 			}
986 			/*
987 			 * If we are locking Giant and this is a sleepable
988 			 * lock, then skip it.
989 			 */
990 			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
991 			    lock == &Giant.lock_object)
992 				continue;
993 			/*
994 			 * If we are locking a sleepable lock and this lock
995 			 * is Giant, then skip it.
996 			 */
997 			if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
998 			    lock1->li_lock == &Giant.lock_object)
999 				continue;
1000 			/*
1001 			 * If we are locking a sleepable lock and this lock
1002 			 * isn't sleepable, we want to treat it as a lock
1003 			 * order violation to enfore a general lock order of
1004 			 * sleepable locks before non-sleepable locks.
1005 			 */
1006 			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1007 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1008 				goto reversal;
1009 			/*
1010 			 * If we are locking Giant and this is a non-sleepable
1011 			 * lock, then treat it as a reversal.
1012 			 */
1013 			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
1014 			    lock == &Giant.lock_object)
1015 				goto reversal;
1016 			/*
1017 			 * Check the lock order hierarchy for a reveresal.
1018 			 */
1019 			if (!isitmydescendant(w, w1))
1020 				continue;
1021 		reversal:
1022 			/*
1023 			 * We have a lock order violation, check to see if it
1024 			 * is allowed or has already been yelled about.
1025 			 */
1026 			mtx_unlock_spin(&w_mtx);
1027 #ifdef BLESSING
1028 			/*
1029 			 * If the lock order is blessed, just bail.  We don't
1030 			 * look for other lock order violations though, which
1031 			 * may be a bug.
1032 			 */
1033 			if (blessed(w, w1))
1034 				return;
1035 #endif
1036 			if (lock1->li_lock == &Giant.lock_object) {
1037 				if (w1->w_Giant_squawked)
1038 					return;
1039 				else
1040 					w1->w_Giant_squawked = 1;
1041 			} else {
1042 				if (w1->w_other_squawked)
1043 					return;
1044 				else
1045 					w1->w_other_squawked = 1;
1046 			}
1047 			/*
1048 			 * Ok, yell about it.
1049 			 */
1050 			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1051 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1052 				printf(
1053 		"lock order reversal: (sleepable after non-sleepable)\n");
1054 			else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1055 			    && lock == &Giant.lock_object)
1056 				printf(
1057 		"lock order reversal: (Giant after non-sleepable)\n");
1058 			else
1059 				printf("lock order reversal:\n");
1060 			/*
1061 			 * Try to locate an earlier lock with
1062 			 * witness w in our list.
1063 			 */
1064 			do {
1065 				lock2 = &lle->ll_children[i];
1066 				MPASS(lock2->li_lock != NULL);
1067 				if (lock2->li_lock->lo_witness == w)
1068 					break;
1069 				if (i == 0 && lle->ll_next != NULL) {
1070 					lle = lle->ll_next;
1071 					i = lle->ll_count - 1;
1072 					MPASS(i >= 0 && i < LOCK_NCHILDREN);
1073 				} else
1074 					i--;
1075 			} while (i >= 0);
1076 			if (i < 0) {
1077 				printf(" 1st %p %s (%s) @ %s:%d\n",
1078 				    lock1->li_lock, lock1->li_lock->lo_name,
1079 				    lock1->li_lock->lo_type, lock1->li_file,
1080 				    lock1->li_line);
1081 				printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1082 				    lock->lo_name, lock->lo_type, file, line);
1083 			} else {
1084 				printf(" 1st %p %s (%s) @ %s:%d\n",
1085 				    lock2->li_lock, lock2->li_lock->lo_name,
1086 				    lock2->li_lock->lo_type, lock2->li_file,
1087 				    lock2->li_line);
1088 				printf(" 2nd %p %s (%s) @ %s:%d\n",
1089 				    lock1->li_lock, lock1->li_lock->lo_name,
1090 				    lock1->li_lock->lo_type, lock1->li_file,
1091 				    lock1->li_line);
1092 				printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1093 				    lock->lo_name, lock->lo_type, file, line);
1094 			}
1095 #ifdef KDB
1096 			goto debugger;
1097 #else
1098 			return;
1099 #endif
1100 		}
1101 	}
1102 	lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
1103 	/*
1104 	 * If requested, build a new lock order.  However, don't build a new
1105 	 * relationship between a sleepable lock and Giant if it is in the
1106 	 * wrong direction.  The correct lock order is that sleepable locks
1107 	 * always come before Giant.
1108 	 */
1109 	if (flags & LOP_NEWORDER &&
1110 	    !(lock1->li_lock == &Giant.lock_object &&
1111 	    (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1112 		CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1113 		    lock->lo_type, lock1->li_lock->lo_type);
1114 		if (!itismychild(lock1->li_lock->lo_witness, w))
1115 			/* Witness is dead. */
1116 			return;
1117 	}
1118 	mtx_unlock_spin(&w_mtx);
1119 	return;
1120 
1121 #ifdef KDB
1122 debugger:
1123 	if (witness_trace)
1124 		kdb_backtrace();
1125 	if (witness_kdb)
1126 		kdb_enter(KDB_WHY_WITNESS, __func__);
1127 #endif
1128 }
1129 
1130 void
1131 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1132 {
1133 	struct lock_list_entry **lock_list, *lle;
1134 	struct lock_instance *instance;
1135 	struct witness *w;
1136 	struct thread *td;
1137 
1138 	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1139 	    panicstr != NULL)
1140 		return;
1141 	w = lock->lo_witness;
1142 	td = curthread;
1143 	file = fixup_filename(file);
1144 
1145 	/* Determine lock list for this lock. */
1146 	if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1147 		lock_list = &td->td_sleeplocks;
1148 	else
1149 		lock_list = PCPU_PTR(spinlocks);
1150 
1151 	/* Check to see if we are recursing on a lock we already own. */
1152 	instance = find_instance(*lock_list, lock);
1153 	if (instance != NULL) {
1154 		instance->li_flags++;
1155 		CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1156 		    td->td_proc->p_pid, lock->lo_name,
1157 		    instance->li_flags & LI_RECURSEMASK);
1158 		instance->li_file = file;
1159 		instance->li_line = line;
1160 		return;
1161 	}
1162 
1163 	/* Update per-witness last file and line acquire. */
1164 	w->w_file = file;
1165 	w->w_line = line;
1166 
1167 	/* Find the next open lock instance in the list and fill it. */
1168 	lle = *lock_list;
1169 	if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1170 		lle = witness_lock_list_get();
1171 		if (lle == NULL)
1172 			return;
1173 		lle->ll_next = *lock_list;
1174 		CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1175 		    td->td_proc->p_pid, lle);
1176 		*lock_list = lle;
1177 	}
1178 	instance = &lle->ll_children[lle->ll_count++];
1179 	instance->li_lock = lock;
1180 	instance->li_line = line;
1181 	instance->li_file = file;
1182 	if ((flags & LOP_EXCLUSIVE) != 0)
1183 		instance->li_flags = LI_EXCLUSIVE;
1184 	else
1185 		instance->li_flags = 0;
1186 	CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1187 	    td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1188 }
1189 
1190 void
1191 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1192 {
1193 	struct lock_instance *instance;
1194 	struct lock_class *class;
1195 
1196 	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1197 	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1198 		return;
1199 	class = LOCK_CLASS(lock);
1200 	file = fixup_filename(file);
1201 	if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1202 		panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1203 		    class->lc_name, lock->lo_name, file, line);
1204 	if ((flags & LOP_TRYLOCK) == 0)
1205 		panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name,
1206 		    lock->lo_name, file, line);
1207 	if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1208 		panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1209 		    class->lc_name, lock->lo_name, file, line);
1210 	instance = find_instance(curthread->td_sleeplocks, lock);
1211 	if (instance == NULL)
1212 		panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1213 		    class->lc_name, lock->lo_name, file, line);
1214 	if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1215 		panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1216 		    class->lc_name, lock->lo_name, file, line);
1217 	if ((instance->li_flags & LI_RECURSEMASK) != 0)
1218 		panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1219 		    class->lc_name, lock->lo_name,
1220 		    instance->li_flags & LI_RECURSEMASK, file, line);
1221 	instance->li_flags |= LI_EXCLUSIVE;
1222 }
1223 
1224 void
1225 witness_downgrade(struct lock_object *lock, int flags, const char *file,
1226     int line)
1227 {
1228 	struct lock_instance *instance;
1229 	struct lock_class *class;
1230 
1231 	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1232 	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1233 		return;
1234 	class = LOCK_CLASS(lock);
1235 	file = fixup_filename(file);
1236 	if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1237 		panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1238 		    class->lc_name, lock->lo_name, file, line);
1239 	if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1240 		panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1241 		    class->lc_name, lock->lo_name, file, line);
1242 	instance = find_instance(curthread->td_sleeplocks, lock);
1243 	if (instance == NULL)
1244 		panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1245 		    class->lc_name, lock->lo_name, file, line);
1246 	if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1247 		panic("downgrade of shared lock (%s) %s @ %s:%d",
1248 		    class->lc_name, lock->lo_name, file, line);
1249 	if ((instance->li_flags & LI_RECURSEMASK) != 0)
1250 		panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1251 		    class->lc_name, lock->lo_name,
1252 		    instance->li_flags & LI_RECURSEMASK, file, line);
1253 	instance->li_flags &= ~LI_EXCLUSIVE;
1254 }
1255 
1256 void
1257 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1258 {
1259 	struct lock_list_entry **lock_list, *lle;
1260 	struct lock_instance *instance;
1261 	struct lock_class *class;
1262 	struct thread *td;
1263 	register_t s;
1264 	int i, j;
1265 
1266 	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1267 	    panicstr != NULL)
1268 		return;
1269 	td = curthread;
1270 	class = LOCK_CLASS(lock);
1271 	file = fixup_filename(file);
1272 
1273 	/* Find lock instance associated with this lock. */
1274 	if (class->lc_flags & LC_SLEEPLOCK)
1275 		lock_list = &td->td_sleeplocks;
1276 	else
1277 		lock_list = PCPU_PTR(spinlocks);
1278 	for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1279 		for (i = 0; i < (*lock_list)->ll_count; i++) {
1280 			instance = &(*lock_list)->ll_children[i];
1281 			if (instance->li_lock == lock)
1282 				goto found;
1283 		}
1284 	panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
1285 	    file, line);
1286 found:
1287 
1288 	/* First, check for shared/exclusive mismatches. */
1289 	if ((instance->li_flags & LI_EXCLUSIVE) != 0 &&
1290 	    (flags & LOP_EXCLUSIVE) == 0) {
1291 		printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1292 		    lock->lo_name, file, line);
1293 		printf("while exclusively locked from %s:%d\n",
1294 		    instance->li_file, instance->li_line);
1295 		panic("excl->ushare");
1296 	}
1297 	if ((instance->li_flags & LI_EXCLUSIVE) == 0 &&
1298 	    (flags & LOP_EXCLUSIVE) != 0) {
1299 		printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1300 		    lock->lo_name, file, line);
1301 		printf("while share locked from %s:%d\n", instance->li_file,
1302 		    instance->li_line);
1303 		panic("share->uexcl");
1304 	}
1305 
1306 	/* If we are recursed, unrecurse. */
1307 	if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1308 		CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1309 		    td->td_proc->p_pid, instance->li_lock->lo_name,
1310 		    instance->li_flags);
1311 		instance->li_flags--;
1312 		return;
1313 	}
1314 
1315 	/* Otherwise, remove this item from the list. */
1316 	s = intr_disable();
1317 	CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1318 	    td->td_proc->p_pid, instance->li_lock->lo_name,
1319 	    (*lock_list)->ll_count - 1);
1320 	for (j = i; j < (*lock_list)->ll_count - 1; j++)
1321 		(*lock_list)->ll_children[j] =
1322 		    (*lock_list)->ll_children[j + 1];
1323 	(*lock_list)->ll_count--;
1324 	intr_restore(s);
1325 
1326 	/* If this lock list entry is now empty, free it. */
1327 	if ((*lock_list)->ll_count == 0) {
1328 		lle = *lock_list;
1329 		*lock_list = lle->ll_next;
1330 		CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1331 		    td->td_proc->p_pid, lle);
1332 		witness_lock_list_free(lle);
1333 	}
1334 }
1335 
1336 /*
1337  * Warn if any locks other than 'lock' are held.  Flags can be passed in to
1338  * exempt Giant and sleepable locks from the checks as well.  If any
1339  * non-exempt locks are held, then a supplied message is printed to the
1340  * console along with a list of the offending locks.  If indicated in the
1341  * flags then a failure results in a panic as well.
1342  */
1343 int
1344 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1345 {
1346 	struct lock_list_entry *lle;
1347 	struct lock_instance *lock1;
1348 	struct thread *td;
1349 	va_list ap;
1350 	int i, n;
1351 
1352 	if (witness_cold || witness_watch == 0 || panicstr != NULL)
1353 		return (0);
1354 	n = 0;
1355 	td = curthread;
1356 	for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1357 		for (i = lle->ll_count - 1; i >= 0; i--) {
1358 			lock1 = &lle->ll_children[i];
1359 			if (lock1->li_lock == lock)
1360 				continue;
1361 			if (flags & WARN_GIANTOK &&
1362 			    lock1->li_lock == &Giant.lock_object)
1363 				continue;
1364 			if (flags & WARN_SLEEPOK &&
1365 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1366 				continue;
1367 			if (n == 0) {
1368 				va_start(ap, fmt);
1369 				vprintf(fmt, ap);
1370 				va_end(ap);
1371 				printf(" with the following");
1372 				if (flags & WARN_SLEEPOK)
1373 					printf(" non-sleepable");
1374 				printf(" locks held:\n");
1375 			}
1376 			n++;
1377 			witness_list_lock(lock1);
1378 		}
1379 	if (PCPU_GET(spinlocks) != NULL) {
1380 		/*
1381 		 * Since we already hold a spinlock preemption is
1382 		 * already blocked.
1383 		 */
1384 		if (n == 0) {
1385 			va_start(ap, fmt);
1386 			vprintf(fmt, ap);
1387 			va_end(ap);
1388 			printf(" with the following");
1389 			if (flags & WARN_SLEEPOK)
1390 				printf(" non-sleepable");
1391 			printf(" locks held:\n");
1392 		}
1393 		n += witness_list_locks(PCPU_PTR(spinlocks));
1394 	}
1395 	if (flags & WARN_PANIC && n)
1396 		panic("witness_warn");
1397 #ifdef KDB
1398 	else if (witness_kdb && n)
1399 		kdb_enter(KDB_WHY_WITNESS, __func__);
1400 	else if (witness_trace && n)
1401 		kdb_backtrace();
1402 #endif
1403 	return (n);
1404 }
1405 
1406 const char *
1407 witness_file(struct lock_object *lock)
1408 {
1409 	struct witness *w;
1410 
1411 	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1412 		return ("?");
1413 	w = lock->lo_witness;
1414 	return (w->w_file);
1415 }
1416 
1417 int
1418 witness_line(struct lock_object *lock)
1419 {
1420 	struct witness *w;
1421 
1422 	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1423 		return (0);
1424 	w = lock->lo_witness;
1425 	return (w->w_line);
1426 }
1427 
1428 static struct witness *
1429 enroll(const char *description, struct lock_class *lock_class)
1430 {
1431 	struct witness *w;
1432 
1433 	if (witness_watch == 0 || panicstr != NULL)
1434 		return (NULL);
1435 	if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin)
1436 		return (NULL);
1437 	mtx_lock_spin(&w_mtx);
1438 	STAILQ_FOREACH(w, &w_all, w_list) {
1439 		if (w->w_name == description || (w->w_refcount > 0 &&
1440 		    strcmp(description, w->w_name) == 0)) {
1441 			w->w_refcount++;
1442 			mtx_unlock_spin(&w_mtx);
1443 			if (lock_class != w->w_class)
1444 				panic(
1445 				"lock (%s) %s does not match earlier (%s) lock",
1446 				    description, lock_class->lc_name,
1447 				    w->w_class->lc_name);
1448 			return (w);
1449 		}
1450 	}
1451 	if ((w = witness_get()) == NULL)
1452 		goto out;
1453 	w->w_name = description;
1454 	w->w_class = lock_class;
1455 	w->w_refcount = 1;
1456 	STAILQ_INSERT_HEAD(&w_all, w, w_list);
1457 	if (lock_class->lc_flags & LC_SPINLOCK) {
1458 		STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1459 		w_spin_cnt++;
1460 	} else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1461 		STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1462 		w_sleep_cnt++;
1463 	} else {
1464 		mtx_unlock_spin(&w_mtx);
1465 		panic("lock class %s is not sleep or spin",
1466 		    lock_class->lc_name);
1467 	}
1468 	mtx_unlock_spin(&w_mtx);
1469 out:
1470 	/*
1471 	 * We issue a warning for any spin locks not defined in the static
1472 	 * order list as a way to discourage their use (folks should really
1473 	 * be using non-spin mutexes most of the time).  However, several
1474 	 * 3rd part device drivers use spin locks because that is all they
1475 	 * have available on Windows and Linux and they think that normal
1476 	 * mutexes are insufficient.
1477 	 */
1478 	if ((lock_class->lc_flags & LC_SPINLOCK) && witness_spin_warn)
1479 		printf("WITNESS: spin lock %s not in order list\n",
1480 		    description);
1481 	return (w);
1482 }
1483 
1484 /* Don't let the door bang you on the way out... */
1485 static int
1486 depart(struct witness *w)
1487 {
1488 	struct witness_child_list_entry *wcl, *nwcl;
1489 	struct witness_list *list;
1490 	struct witness *parent;
1491 
1492 	MPASS(w->w_refcount == 0);
1493 	if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1494 		list = &w_sleep;
1495 		w_sleep_cnt--;
1496 	} else {
1497 		list = &w_spin;
1498 		w_spin_cnt--;
1499 	}
1500 	/*
1501 	 * First, we run through the entire tree looking for any
1502 	 * witnesses that the outgoing witness is a child of.  For
1503 	 * each parent that we find, we reparent all the direct
1504 	 * children of the outgoing witness to its parent.
1505 	 */
1506 	STAILQ_FOREACH(parent, list, w_typelist) {
1507 		if (!isitmychild(parent, w))
1508 			continue;
1509 		removechild(parent, w);
1510 	}
1511 
1512 	/*
1513 	 * Now we go through and free up the child list of the
1514 	 * outgoing witness.
1515 	 */
1516 	for (wcl = w->w_children; wcl != NULL; wcl = nwcl) {
1517 		nwcl = wcl->wcl_next;
1518         	w_child_cnt--;
1519 		witness_child_free(wcl);
1520 	}
1521 
1522 	/*
1523 	 * Detach from various lists and free.
1524 	 */
1525 	STAILQ_REMOVE(list, w, witness, w_typelist);
1526 	STAILQ_REMOVE(&w_all, w, witness, w_list);
1527 	witness_free(w);
1528 
1529 	return (1);
1530 }
1531 
1532 /*
1533  * Add "child" as a direct child of "parent".  Returns false if
1534  * we fail due to out of memory.
1535  */
1536 static int
1537 insertchild(struct witness *parent, struct witness *child)
1538 {
1539 	struct witness_child_list_entry **wcl;
1540 
1541 	MPASS(child != NULL && parent != NULL);
1542 
1543 	/*
1544 	 * Insert "child" after "parent"
1545 	 */
1546 	wcl = &parent->w_children;
1547 	while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN)
1548 		wcl = &(*wcl)->wcl_next;
1549 	if (*wcl == NULL) {
1550 		*wcl = witness_child_get();
1551 		if (*wcl == NULL)
1552 			return (0);
1553         	w_child_cnt++;
1554 	}
1555 	(*wcl)->wcl_children[(*wcl)->wcl_count++] = child;
1556 
1557 	return (1);
1558 }
1559 
1560 
1561 static int
1562 itismychild(struct witness *parent, struct witness *child)
1563 {
1564 	struct witness_list *list;
1565 
1566 	MPASS(child != NULL && parent != NULL);
1567 	if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) !=
1568 	    (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)))
1569 		panic(
1570 		"%s: parent (%s) and child (%s) are not the same lock type",
1571 		    __func__, parent->w_class->lc_name,
1572 		    child->w_class->lc_name);
1573 
1574 	if (!insertchild(parent, child))
1575 		return (0);
1576 
1577 	if (parent->w_class->lc_flags & LC_SLEEPLOCK)
1578 		list = &w_sleep;
1579 	else
1580 		list = &w_spin;
1581 	return (1);
1582 }
1583 
1584 static void
1585 removechild(struct witness *parent, struct witness *child)
1586 {
1587 	struct witness_child_list_entry **wcl, *wcl1;
1588 	int i;
1589 
1590 	for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next)
1591 		for (i = 0; i < (*wcl)->wcl_count; i++)
1592 			if ((*wcl)->wcl_children[i] == child)
1593 				goto found;
1594 	return;
1595 found:
1596 	(*wcl)->wcl_count--;
1597 	if ((*wcl)->wcl_count > i)
1598 		(*wcl)->wcl_children[i] =
1599 		    (*wcl)->wcl_children[(*wcl)->wcl_count];
1600 	MPASS((*wcl)->wcl_children[i] != NULL);
1601 	if ((*wcl)->wcl_count != 0)
1602 		return;
1603 	wcl1 = *wcl;
1604 	*wcl = wcl1->wcl_next;
1605 	w_child_cnt--;
1606 	witness_child_free(wcl1);
1607 }
1608 
1609 static int
1610 isitmychild(struct witness *parent, struct witness *child)
1611 {
1612 	struct witness_child_list_entry *wcl;
1613 	int i;
1614 
1615 	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1616 		for (i = 0; i < wcl->wcl_count; i++) {
1617 			if (wcl->wcl_children[i] == child)
1618 				return (1);
1619 		}
1620 	}
1621 	return (0);
1622 }
1623 
1624 static int
1625 isitmydescendant(struct witness *parent, struct witness *child)
1626 {
1627 	struct witness_child_list_entry *wcl;
1628 	int i, j;
1629 
1630 	if (isitmychild(parent, child))
1631 		return (1);
1632 	j = 0;
1633 	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1634 		MPASS(j < 1000);
1635 		for (i = 0; i < wcl->wcl_count; i++) {
1636 			if (isitmydescendant(wcl->wcl_children[i], child))
1637 				return (1);
1638 		}
1639 		j++;
1640 	}
1641 	return (0);
1642 }
1643 
1644 #ifdef BLESSING
1645 static int
1646 blessed(struct witness *w1, struct witness *w2)
1647 {
1648 	int i;
1649 	struct witness_blessed *b;
1650 
1651 	for (i = 0; i < blessed_count; i++) {
1652 		b = &blessed_list[i];
1653 		if (strcmp(w1->w_name, b->b_lock1) == 0) {
1654 			if (strcmp(w2->w_name, b->b_lock2) == 0)
1655 				return (1);
1656 			continue;
1657 		}
1658 		if (strcmp(w1->w_name, b->b_lock2) == 0)
1659 			if (strcmp(w2->w_name, b->b_lock1) == 0)
1660 				return (1);
1661 	}
1662 	return (0);
1663 }
1664 #endif
1665 
1666 static struct witness *
1667 witness_get(void)
1668 {
1669 	struct witness *w;
1670 
1671 	if (witness_watch == 0) {
1672 		mtx_unlock_spin(&w_mtx);
1673 		return (NULL);
1674 	}
1675 	if (STAILQ_EMPTY(&w_free)) {
1676 		witness_watch = 0;
1677 		mtx_unlock_spin(&w_mtx);
1678 		printf("%s: witness exhausted\n", __func__);
1679 		return (NULL);
1680 	}
1681 	w = STAILQ_FIRST(&w_free);
1682 	STAILQ_REMOVE_HEAD(&w_free, w_list);
1683 	w_free_cnt--;
1684 	bzero(w, sizeof(*w));
1685 	return (w);
1686 }
1687 
1688 static void
1689 witness_free(struct witness *w)
1690 {
1691 
1692 	STAILQ_INSERT_HEAD(&w_free, w, w_list);
1693 	w_free_cnt++;
1694 }
1695 
1696 static struct witness_child_list_entry *
1697 witness_child_get(void)
1698 {
1699 	struct witness_child_list_entry *wcl;
1700 
1701 	if (witness_watch == 0) {
1702 		mtx_unlock_spin(&w_mtx);
1703 		return (NULL);
1704 	}
1705 	wcl = w_child_free;
1706 	if (wcl == NULL) {
1707 		witness_watch = 0;
1708 		mtx_unlock_spin(&w_mtx);
1709 		printf("%s: witness exhausted\n", __func__);
1710 		return (NULL);
1711 	}
1712 	w_child_free = wcl->wcl_next;
1713 	w_child_free_cnt--;
1714 	bzero(wcl, sizeof(*wcl));
1715 	return (wcl);
1716 }
1717 
1718 static void
1719 witness_child_free(struct witness_child_list_entry *wcl)
1720 {
1721 
1722 	wcl->wcl_next = w_child_free;
1723 	w_child_free = wcl;
1724 	w_child_free_cnt++;
1725 }
1726 
1727 static struct lock_list_entry *
1728 witness_lock_list_get(void)
1729 {
1730 	struct lock_list_entry *lle;
1731 
1732 	if (witness_watch == 0)
1733 		return (NULL);
1734 	mtx_lock_spin(&w_mtx);
1735 	lle = w_lock_list_free;
1736 	if (lle == NULL) {
1737 		witness_watch = 0;
1738 		mtx_unlock_spin(&w_mtx);
1739 		printf("%s: witness exhausted\n", __func__);
1740 		return (NULL);
1741 	}
1742 	w_lock_list_free = lle->ll_next;
1743 	mtx_unlock_spin(&w_mtx);
1744 	bzero(lle, sizeof(*lle));
1745 	return (lle);
1746 }
1747 
1748 static void
1749 witness_lock_list_free(struct lock_list_entry *lle)
1750 {
1751 
1752 	mtx_lock_spin(&w_mtx);
1753 	lle->ll_next = w_lock_list_free;
1754 	w_lock_list_free = lle;
1755 	mtx_unlock_spin(&w_mtx);
1756 }
1757 
1758 static struct lock_instance *
1759 find_instance(struct lock_list_entry *lock_list, struct lock_object *lock)
1760 {
1761 	struct lock_list_entry *lle;
1762 	struct lock_instance *instance;
1763 	int i;
1764 
1765 	for (lle = lock_list; lle != NULL; lle = lle->ll_next)
1766 		for (i = lle->ll_count - 1; i >= 0; i--) {
1767 			instance = &lle->ll_children[i];
1768 			if (instance->li_lock == lock)
1769 				return (instance);
1770 		}
1771 	return (NULL);
1772 }
1773 
1774 static void
1775 witness_list_lock(struct lock_instance *instance)
1776 {
1777 	struct lock_object *lock;
1778 
1779 	lock = instance->li_lock;
1780 	printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
1781 	    "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
1782 	if (lock->lo_type != lock->lo_name)
1783 		printf(" (%s)", lock->lo_type);
1784 	printf(" r = %d (%p) locked @ %s:%d\n",
1785 	    instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
1786 	    instance->li_line);
1787 }
1788 
1789 #ifdef DDB
1790 static int
1791 witness_thread_has_locks(struct thread *td)
1792 {
1793 
1794 	return (td->td_sleeplocks != NULL);
1795 }
1796 
1797 static int
1798 witness_proc_has_locks(struct proc *p)
1799 {
1800 	struct thread *td;
1801 
1802 	FOREACH_THREAD_IN_PROC(p, td) {
1803 		if (witness_thread_has_locks(td))
1804 			return (1);
1805 	}
1806 	return (0);
1807 }
1808 #endif
1809 
1810 int
1811 witness_list_locks(struct lock_list_entry **lock_list)
1812 {
1813 	struct lock_list_entry *lle;
1814 	int i, nheld;
1815 
1816 	nheld = 0;
1817 	for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
1818 		for (i = lle->ll_count - 1; i >= 0; i--) {
1819 			witness_list_lock(&lle->ll_children[i]);
1820 			nheld++;
1821 		}
1822 	return (nheld);
1823 }
1824 
1825 /*
1826  * This is a bit risky at best.  We call this function when we have timed
1827  * out acquiring a spin lock, and we assume that the other CPU is stuck
1828  * with this lock held.  So, we go groveling around in the other CPU's
1829  * per-cpu data to try to find the lock instance for this spin lock to
1830  * see when it was last acquired.
1831  */
1832 void
1833 witness_display_spinlock(struct lock_object *lock, struct thread *owner)
1834 {
1835 	struct lock_instance *instance;
1836 	struct pcpu *pc;
1837 
1838 	if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
1839 		return;
1840 	pc = pcpu_find(owner->td_oncpu);
1841 	instance = find_instance(pc->pc_spinlocks, lock);
1842 	if (instance != NULL)
1843 		witness_list_lock(instance);
1844 }
1845 
1846 void
1847 witness_save(struct lock_object *lock, const char **filep, int *linep)
1848 {
1849 	struct lock_list_entry *lock_list;
1850 	struct lock_instance *instance;
1851 	struct lock_class *class;
1852 
1853 	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1854 	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1855 		return;
1856 	class = LOCK_CLASS(lock);
1857 	if (class->lc_flags & LC_SLEEPLOCK)
1858 		lock_list = curthread->td_sleeplocks;
1859 	else {
1860 		if (witness_skipspin)
1861 			return;
1862 		lock_list = PCPU_GET(spinlocks);
1863 	}
1864 	instance = find_instance(lock_list, lock);
1865 	if (instance == NULL)
1866 		panic("%s: lock (%s) %s not locked", __func__,
1867 		    class->lc_name, lock->lo_name);
1868 	*filep = instance->li_file;
1869 	*linep = instance->li_line;
1870 }
1871 
1872 void
1873 witness_restore(struct lock_object *lock, const char *file, int line)
1874 {
1875 	struct lock_list_entry *lock_list;
1876 	struct lock_instance *instance;
1877 	struct lock_class *class;
1878 
1879 	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1880 	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1881 		return;
1882 	class = LOCK_CLASS(lock);
1883 	if (class->lc_flags & LC_SLEEPLOCK)
1884 		lock_list = curthread->td_sleeplocks;
1885 	else {
1886 		if (witness_skipspin)
1887 			return;
1888 		lock_list = PCPU_GET(spinlocks);
1889 	}
1890 	instance = find_instance(lock_list, lock);
1891 	if (instance == NULL)
1892 		panic("%s: lock (%s) %s not locked", __func__,
1893 		    class->lc_name, lock->lo_name);
1894 	lock->lo_witness->w_file = file;
1895 	lock->lo_witness->w_line = line;
1896 	instance->li_file = file;
1897 	instance->li_line = line;
1898 }
1899 
1900 void
1901 witness_assert(struct lock_object *lock, int flags, const char *file, int line)
1902 {
1903 #ifdef INVARIANT_SUPPORT
1904 	struct lock_instance *instance;
1905 	struct lock_class *class;
1906 
1907 	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1908 		return;
1909 	class = LOCK_CLASS(lock);
1910 	if ((class->lc_flags & LC_SLEEPLOCK) != 0)
1911 		instance = find_instance(curthread->td_sleeplocks, lock);
1912 	else if ((class->lc_flags & LC_SPINLOCK) != 0)
1913 		instance = find_instance(PCPU_GET(spinlocks), lock);
1914 	else {
1915 		panic("Lock (%s) %s is not sleep or spin!",
1916 		    class->lc_name, lock->lo_name);
1917 	}
1918 	file = fixup_filename(file);
1919 	switch (flags) {
1920 	case LA_UNLOCKED:
1921 		if (instance != NULL)
1922 			panic("Lock (%s) %s locked @ %s:%d.",
1923 			    class->lc_name, lock->lo_name, file, line);
1924 		break;
1925 	case LA_LOCKED:
1926 	case LA_LOCKED | LA_RECURSED:
1927 	case LA_LOCKED | LA_NOTRECURSED:
1928 	case LA_SLOCKED:
1929 	case LA_SLOCKED | LA_RECURSED:
1930 	case LA_SLOCKED | LA_NOTRECURSED:
1931 	case LA_XLOCKED:
1932 	case LA_XLOCKED | LA_RECURSED:
1933 	case LA_XLOCKED | LA_NOTRECURSED:
1934 		if (instance == NULL) {
1935 			panic("Lock (%s) %s not locked @ %s:%d.",
1936 			    class->lc_name, lock->lo_name, file, line);
1937 			break;
1938 		}
1939 		if ((flags & LA_XLOCKED) != 0 &&
1940 		    (instance->li_flags & LI_EXCLUSIVE) == 0)
1941 			panic("Lock (%s) %s not exclusively locked @ %s:%d.",
1942 			    class->lc_name, lock->lo_name, file, line);
1943 		if ((flags & LA_SLOCKED) != 0 &&
1944 		    (instance->li_flags & LI_EXCLUSIVE) != 0)
1945 			panic("Lock (%s) %s exclusively locked @ %s:%d.",
1946 			    class->lc_name, lock->lo_name, file, line);
1947 		if ((flags & LA_RECURSED) != 0 &&
1948 		    (instance->li_flags & LI_RECURSEMASK) == 0)
1949 			panic("Lock (%s) %s not recursed @ %s:%d.",
1950 			    class->lc_name, lock->lo_name, file, line);
1951 		if ((flags & LA_NOTRECURSED) != 0 &&
1952 		    (instance->li_flags & LI_RECURSEMASK) != 0)
1953 			panic("Lock (%s) %s recursed @ %s:%d.",
1954 			    class->lc_name, lock->lo_name, file, line);
1955 		break;
1956 	default:
1957 		panic("Invalid lock assertion at %s:%d.", file, line);
1958 
1959 	}
1960 #endif	/* INVARIANT_SUPPORT */
1961 }
1962 
1963 #ifdef DDB
1964 static void
1965 witness_list(struct thread *td)
1966 {
1967 
1968 	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1969 	KASSERT(kdb_active, ("%s: not in the debugger", __func__));
1970 
1971 	if (witness_watch == 0)
1972 		return;
1973 
1974 	witness_list_locks(&td->td_sleeplocks);
1975 
1976 	/*
1977 	 * We only handle spinlocks if td == curthread.  This is somewhat broken
1978 	 * if td is currently executing on some other CPU and holds spin locks
1979 	 * as we won't display those locks.  If we had a MI way of getting
1980 	 * the per-cpu data for a given cpu then we could use
1981 	 * td->td_oncpu to get the list of spinlocks for this thread
1982 	 * and "fix" this.
1983 	 *
1984 	 * That still wouldn't really fix this unless we locked the scheduler
1985 	 * lock or stopped the other CPU to make sure it wasn't changing the
1986 	 * list out from under us.  It is probably best to just not try to
1987 	 * handle threads on other CPU's for now.
1988 	 */
1989 	if (td == curthread && PCPU_GET(spinlocks) != NULL)
1990 		witness_list_locks(PCPU_PTR(spinlocks));
1991 }
1992 
1993 DB_SHOW_COMMAND(locks, db_witness_list)
1994 {
1995 	struct thread *td;
1996 
1997 	if (have_addr)
1998 		td = db_lookup_thread(addr, TRUE);
1999 	else
2000 		td = kdb_thread;
2001 	witness_list(td);
2002 }
2003 
2004 DB_SHOW_COMMAND(alllocks, db_witness_list_all)
2005 {
2006 	struct thread *td;
2007 	struct proc *p;
2008 
2009 	/*
2010 	 * It would be nice to list only threads and processes that actually
2011 	 * held sleep locks, but that information is currently not exported
2012 	 * by WITNESS.
2013 	 */
2014 	FOREACH_PROC_IN_SYSTEM(p) {
2015 		if (!witness_proc_has_locks(p))
2016 			continue;
2017 		FOREACH_THREAD_IN_PROC(p, td) {
2018 			if (!witness_thread_has_locks(td))
2019 				continue;
2020 			db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2021 			    td->td_name, td, td->td_tid);
2022 			witness_list(td);
2023 		}
2024 	}
2025 }
2026 
2027 DB_SHOW_COMMAND(witness, db_witness_display)
2028 {
2029 
2030 	witness_display(db_printf);
2031 }
2032 #endif
2033