xref: /freebsd/sys/kern/subr_witness.c (revision 0fa02ea5f786ef02befd46f8f083f48c8cd9630b)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  */
31 
32 /*
33  * Implementation of the `witness' lock verifier.  Originally implemented for
34  * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
35  * classes in FreeBSD.
36  */
37 
38 /*
39  *	Main Entry: witness
40  *	Pronunciation: 'wit-n&s
41  *	Function: noun
42  *	Etymology: Middle English witnesse, from Old English witnes knowledge,
43  *	    testimony, witness, from 2wit
44  *	Date: before 12th century
45  *	1 : attestation of a fact or event : TESTIMONY
46  *	2 : one that gives evidence; specifically : one who testifies in
47  *	    a cause or before a judicial tribunal
48  *	3 : one asked to be present at a transaction so as to be able to
49  *	    testify to its having taken place
50  *	4 : one who has personal knowledge of something
51  *	5 a : something serving as evidence or proof : SIGN
52  *	  b : public affirmation by word or example of usually
53  *	      religious faith or conviction <the heroic witness to divine
54  *	      life -- Pilot>
55  *	6 capitalized : a member of the Jehovah's Witnesses
56  */
57 
58 /*
59  * Special rules concerning Giant and lock orders:
60  *
61  * 1) Giant must be acquired before any other mutexes.  Stated another way,
62  *    no other mutex may be held when Giant is acquired.
63  *
64  * 2) Giant must be released when blocking on a sleepable lock.
65  *
66  * This rule is less obvious, but is a result of Giant providing the same
67  * semantics as spl().  Basically, when a thread sleeps, it must release
68  * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
69  * 2).
70  *
71  * 3) Giant may be acquired before or after sleepable locks.
72  *
73  * This rule is also not quite as obvious.  Giant may be acquired after
74  * a sleepable lock because it is a non-sleepable lock and non-sleepable
75  * locks may always be acquired while holding a sleepable lock.  The second
76  * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
77  * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
78  * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
79  * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
80  * execute.  Thus, acquiring Giant both before and after a sleepable lock
81  * will not result in a lock order reversal.
82  */
83 
84 #include <sys/cdefs.h>
85 __FBSDID("$FreeBSD$");
86 
87 #include "opt_ddb.h"
88 #include "opt_witness.h"
89 
90 #include <sys/param.h>
91 #include <sys/bus.h>
92 #include <sys/kernel.h>
93 #include <sys/ktr.h>
94 #include <sys/lock.h>
95 #include <sys/malloc.h>
96 #include <sys/mutex.h>
97 #include <sys/proc.h>
98 #include <sys/sysctl.h>
99 #include <sys/systm.h>
100 
101 #include <ddb/ddb.h>
102 
103 #include <machine/stdarg.h>
104 
105 /* Define this to check for blessed mutexes */
106 #undef BLESSING
107 
108 #define WITNESS_COUNT 200
109 #define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
110 /*
111  * XXX: This is somewhat bogus, as we assume here that at most 1024 threads
112  * will hold LOCK_NCHILDREN * 2 locks.  We handle failure ok, and we should
113  * probably be safe for the most part, but it's still a SWAG.
114  */
115 #define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2
116 
117 #define	WITNESS_NCHILDREN 6
118 
119 struct witness_child_list_entry;
120 
121 struct witness {
122 	const	char *w_name;
123 	struct	lock_class *w_class;
124 	STAILQ_ENTRY(witness) w_list;		/* List of all witnesses. */
125 	STAILQ_ENTRY(witness) w_typelist;	/* Witnesses of a type. */
126 	struct	witness_child_list_entry *w_children;	/* Great evilness... */
127 	const	char *w_file;
128 	int	w_line;
129 	u_int	w_level;
130 	u_int	w_refcount;
131 	u_char	w_Giant_squawked:1;
132 	u_char	w_other_squawked:1;
133 	u_char	w_same_squawked:1;
134 	u_char	w_displayed:1;
135 };
136 
137 struct witness_child_list_entry {
138 	struct	witness_child_list_entry *wcl_next;
139 	struct	witness *wcl_children[WITNESS_NCHILDREN];
140 	u_int	wcl_count;
141 };
142 
143 STAILQ_HEAD(witness_list, witness);
144 
145 #ifdef BLESSING
146 struct witness_blessed {
147 	const	char *b_lock1;
148 	const	char *b_lock2;
149 };
150 #endif
151 
152 struct witness_order_list_entry {
153 	const	char *w_name;
154 	struct	lock_class *w_class;
155 };
156 
157 #ifdef BLESSING
158 static int	blessed(struct witness *, struct witness *);
159 #endif
160 static int	depart(struct witness *w);
161 static struct	witness *enroll(const char *description,
162 				struct lock_class *lock_class);
163 static int	insertchild(struct witness *parent, struct witness *child);
164 static int	isitmychild(struct witness *parent, struct witness *child);
165 static int	isitmydescendant(struct witness *parent, struct witness *child);
166 static int	itismychild(struct witness *parent, struct witness *child);
167 static int	rebalancetree(struct witness_list *list);
168 static void	removechild(struct witness *parent, struct witness *child);
169 static int	reparentchildren(struct witness *newparent,
170 		    struct witness *oldparent);
171 static int	sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
172 static void	witness_displaydescendants(void(*)(const char *fmt, ...),
173 					   struct witness *, int indent);
174 static const char *fixup_filename(const char *file);
175 static void	witness_leveldescendents(struct witness *parent, int level);
176 static void	witness_levelall(void);
177 static struct	witness *witness_get(void);
178 static void	witness_free(struct witness *m);
179 static struct	witness_child_list_entry *witness_child_get(void);
180 static void	witness_child_free(struct witness_child_list_entry *wcl);
181 static struct	lock_list_entry *witness_lock_list_get(void);
182 static void	witness_lock_list_free(struct lock_list_entry *lle);
183 static struct	lock_instance *find_instance(struct lock_list_entry *lock_list,
184 					     struct lock_object *lock);
185 static void	witness_list_lock(struct lock_instance *instance);
186 #ifdef DDB
187 static void	witness_list(struct thread *td);
188 static void	witness_display_list(void(*prnt)(const char *fmt, ...),
189 				     struct witness_list *list);
190 static void	witness_display(void(*)(const char *fmt, ...));
191 #endif
192 
193 MALLOC_DEFINE(M_WITNESS, "witness", "witness structure");
194 
195 /*
196  * If set to 0, witness is disabled.  If set to 1, witness performs full lock
197  * order checking for all locks.  If set to 2 or higher, then witness skips
198  * the full lock order check if the lock being acquired is at a higher level
199  * (i.e. farther down in the tree) than the current lock.  This last mode is
200  * somewhat experimental and not considered fully safe.  At runtime, this
201  * value may be set to 0 to turn off witness.  witness is not allowed be
202  * turned on once it is turned off, however.
203  */
204 static int witness_watch = 1;
205 TUNABLE_INT("debug.witness_watch", &witness_watch);
206 SYSCTL_PROC(_debug, OID_AUTO, witness_watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
207     sysctl_debug_witness_watch, "I", "witness is watching lock operations");
208 
209 #ifdef DDB
210 /*
211  * When DDB is enabled and witness_ddb is set to 1, it will cause the system to
212  * drop into kdebug() when:
213  *	- a lock heirarchy violation occurs
214  *	- locks are held when going to sleep.
215  */
216 #ifdef WITNESS_DDB
217 int	witness_ddb = 1;
218 #else
219 int	witness_ddb = 0;
220 #endif
221 TUNABLE_INT("debug.witness_ddb", &witness_ddb);
222 SYSCTL_INT(_debug, OID_AUTO, witness_ddb, CTLFLAG_RW, &witness_ddb, 0, "");
223 
224 /*
225  * When DDB is enabled and witness_trace is set to 1, it will cause the system
226  * to print a stack trace:
227  *	- a lock heirarchy violation occurs
228  *	- locks are held when going to sleep.
229  */
230 int	witness_trace = 1;
231 TUNABLE_INT("debug.witness_trace", &witness_trace);
232 SYSCTL_INT(_debug, OID_AUTO, witness_trace, CTLFLAG_RW, &witness_trace, 0, "");
233 #endif /* DDB */
234 
235 #ifdef WITNESS_SKIPSPIN
236 int	witness_skipspin = 1;
237 #else
238 int	witness_skipspin = 0;
239 #endif
240 TUNABLE_INT("debug.witness_skipspin", &witness_skipspin);
241 SYSCTL_INT(_debug, OID_AUTO, witness_skipspin, CTLFLAG_RDTUN, &witness_skipspin, 0,
242     "");
243 
244 static struct mtx w_mtx;
245 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
246 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
247 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
248 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
249 static struct witness_child_list_entry *w_child_free = NULL;
250 static struct lock_list_entry *w_lock_list_free = NULL;
251 
252 static struct witness w_data[WITNESS_COUNT];
253 static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT];
254 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
255 
256 static struct witness_order_list_entry order_lists[] = {
257 	{ "proctree", &lock_class_sx },
258 	{ "allproc", &lock_class_sx },
259 	{ "Giant", &lock_class_mtx_sleep },
260 	{ "filedesc structure", &lock_class_mtx_sleep },
261 	{ "pipe mutex", &lock_class_mtx_sleep },
262 	{ "sigio lock", &lock_class_mtx_sleep },
263 	{ "process group", &lock_class_mtx_sleep },
264 	{ "process lock", &lock_class_mtx_sleep },
265 	{ "session", &lock_class_mtx_sleep },
266 	{ "uidinfo hash", &lock_class_mtx_sleep },
267 	{ "uidinfo struct", &lock_class_mtx_sleep },
268 	{ "allprison", &lock_class_mtx_sleep },
269 	{ NULL, NULL },
270 	/*
271 	 * spin locks
272 	 */
273 #ifdef SMP
274 	{ "ap boot", &lock_class_mtx_spin },
275 #endif
276 	{ "sio", &lock_class_mtx_spin },
277 #ifdef __i386__
278 	{ "cy", &lock_class_mtx_spin },
279 #endif
280 	{ "sabtty", &lock_class_mtx_spin },
281 	{ "zstty", &lock_class_mtx_spin },
282 	{ "ng_node", &lock_class_mtx_spin },
283 	{ "ng_worklist", &lock_class_mtx_spin },
284 	{ "taskqueue_fast", &lock_class_mtx_spin },
285 	{ "intr table", &lock_class_mtx_spin },
286 	{ "ithread table lock", &lock_class_mtx_spin },
287 	{ "sched lock", &lock_class_mtx_spin },
288 	{ "turnstile chain", &lock_class_mtx_spin },
289 	{ "td_contested", &lock_class_mtx_spin },
290 	{ "callout", &lock_class_mtx_spin },
291 	{ "entropy harvest", &lock_class_mtx_spin },
292 	{ "entropy harvest buffers", &lock_class_mtx_spin },
293 	/*
294 	 * leaf locks
295 	 */
296 	{ "allpmaps", &lock_class_mtx_spin },
297 	{ "vm page queue free mutex", &lock_class_mtx_spin },
298 	{ "icu", &lock_class_mtx_spin },
299 #ifdef SMP
300 	{ "smp rendezvous", &lock_class_mtx_spin },
301 #if defined(__i386__) || defined(__amd64__)
302 	{ "tlb", &lock_class_mtx_spin },
303 	{ "lazypmap", &lock_class_mtx_spin },
304 #endif
305 #ifdef __sparc64__
306 	{ "ipi", &lock_class_mtx_spin },
307 #endif
308 #endif
309 	{ "clk", &lock_class_mtx_spin },
310 	{ "mutex profiling lock", &lock_class_mtx_spin },
311 	{ "kse zombie lock", &lock_class_mtx_spin },
312 	{ "ALD Queue", &lock_class_mtx_spin },
313 #ifdef __ia64__
314 	{ "MCA spin lock", &lock_class_mtx_spin },
315 #endif
316 #if defined(__i386__) || defined(__amd64__)
317 	{ "pcicfg", &lock_class_mtx_spin },
318 #endif
319 	{ NULL, NULL },
320 	{ NULL, NULL }
321 };
322 
323 #ifdef BLESSING
324 /*
325  * Pairs of locks which have been blessed
326  * Don't complain about order problems with blessed locks
327  */
328 static struct witness_blessed blessed_list[] = {
329 };
330 static int blessed_count =
331 	sizeof(blessed_list) / sizeof(struct witness_blessed);
332 #endif
333 
334 /*
335  * List of all locks in the system.
336  */
337 TAILQ_HEAD(, lock_object) all_locks = TAILQ_HEAD_INITIALIZER(all_locks);
338 
339 static struct mtx all_mtx = {
340 	{ &lock_class_mtx_sleep,	/* mtx_object.lo_class */
341 	  "All locks list",		/* mtx_object.lo_name */
342 	  "All locks list",		/* mtx_object.lo_type */
343 	  LO_INITIALIZED,		/* mtx_object.lo_flags */
344 	  { NULL, NULL },		/* mtx_object.lo_list */
345 	  NULL },			/* mtx_object.lo_witness */
346 	MTX_UNOWNED, 0			/* mtx_lock, mtx_recurse */
347 };
348 
349 /*
350  * This global is set to 0 once it becomes safe to use the witness code.
351  */
352 static int witness_cold = 1;
353 
354 /*
355  * Global variables for book keeping.
356  */
357 static int lock_cur_cnt;
358 static int lock_max_cnt;
359 
360 /*
361  * The WITNESS-enabled diagnostic code.
362  */
363 static void
364 witness_initialize(void *dummy __unused)
365 {
366 	struct lock_object *lock;
367 	struct witness_order_list_entry *order;
368 	struct witness *w, *w1;
369 	int i;
370 
371 	/*
372 	 * We have to release Giant before initializing its witness
373 	 * structure so that WITNESS doesn't get confused.
374 	 */
375 	mtx_unlock(&Giant);
376 	mtx_assert(&Giant, MA_NOTOWNED);
377 
378 	CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
379 	TAILQ_INSERT_HEAD(&all_locks, &all_mtx.mtx_object, lo_list);
380 	mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
381 	    MTX_NOWITNESS);
382 	for (i = 0; i < WITNESS_COUNT; i++)
383 		witness_free(&w_data[i]);
384 	for (i = 0; i < WITNESS_CHILDCOUNT; i++)
385 		witness_child_free(&w_childdata[i]);
386 	for (i = 0; i < LOCK_CHILDCOUNT; i++)
387 		witness_lock_list_free(&w_locklistdata[i]);
388 
389 	/* First add in all the specified order lists. */
390 	for (order = order_lists; order->w_name != NULL; order++) {
391 		w = enroll(order->w_name, order->w_class);
392 		if (w == NULL)
393 			continue;
394 		w->w_file = "order list";
395 		for (order++; order->w_name != NULL; order++) {
396 			w1 = enroll(order->w_name, order->w_class);
397 			if (w1 == NULL)
398 				continue;
399 			w1->w_file = "order list";
400 			if (!itismychild(w, w1))
401 				panic("Not enough memory for static orders!");
402 			w = w1;
403 		}
404 	}
405 
406 	/* Iterate through all locks and add them to witness. */
407 	mtx_lock(&all_mtx);
408 	TAILQ_FOREACH(lock, &all_locks, lo_list) {
409 		if (lock->lo_flags & LO_WITNESS)
410 			lock->lo_witness = enroll(lock->lo_type,
411 			    lock->lo_class);
412 		else
413 			lock->lo_witness = NULL;
414 	}
415 	mtx_unlock(&all_mtx);
416 
417 	/* Mark the witness code as being ready for use. */
418 	atomic_store_rel_int(&witness_cold, 0);
419 
420 	mtx_lock(&Giant);
421 }
422 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL)
423 
424 static int
425 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
426 {
427 	int error, value;
428 
429 	value = witness_watch;
430 	error = sysctl_handle_int(oidp, &value, 0, req);
431 	if (error != 0 || req->newptr == NULL)
432 		return (error);
433 	error = suser(req->td);
434 	if (error != 0)
435 		return (error);
436 	if (value == witness_watch)
437 		return (0);
438 	if (value != 0)
439 		return (EINVAL);
440 	witness_watch = 0;
441 	return (0);
442 }
443 
444 void
445 witness_init(struct lock_object *lock)
446 {
447 	struct lock_class *class;
448 
449 	class = lock->lo_class;
450 	if (lock->lo_flags & LO_INITIALIZED)
451 		panic("%s: lock (%s) %s is already initialized", __func__,
452 		    class->lc_name, lock->lo_name);
453 	if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
454 	    (class->lc_flags & LC_RECURSABLE) == 0)
455 		panic("%s: lock (%s) %s can not be recursable", __func__,
456 		    class->lc_name, lock->lo_name);
457 	if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
458 	    (class->lc_flags & LC_SLEEPABLE) == 0)
459 		panic("%s: lock (%s) %s can not be sleepable", __func__,
460 		    class->lc_name, lock->lo_name);
461 	if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
462 	    (class->lc_flags & LC_UPGRADABLE) == 0)
463 		panic("%s: lock (%s) %s can not be upgradable", __func__,
464 		    class->lc_name, lock->lo_name);
465 
466 	mtx_lock(&all_mtx);
467 	TAILQ_INSERT_TAIL(&all_locks, lock, lo_list);
468 	lock->lo_flags |= LO_INITIALIZED;
469 	lock_cur_cnt++;
470 	if (lock_cur_cnt > lock_max_cnt)
471 		lock_max_cnt = lock_cur_cnt;
472 	mtx_unlock(&all_mtx);
473 	if (!witness_cold && witness_watch != 0 && panicstr == NULL &&
474 	    (lock->lo_flags & LO_WITNESS) != 0)
475 		lock->lo_witness = enroll(lock->lo_type, class);
476 	else
477 		lock->lo_witness = NULL;
478 }
479 
480 void
481 witness_destroy(struct lock_object *lock)
482 {
483 	struct witness *w;
484 
485 	if (witness_cold)
486 		panic("lock (%s) %s destroyed while witness_cold",
487 		    lock->lo_class->lc_name, lock->lo_name);
488 	if ((lock->lo_flags & LO_INITIALIZED) == 0)
489 		panic("%s: lock (%s) %s is not initialized", __func__,
490 		    lock->lo_class->lc_name, lock->lo_name);
491 
492 	/* XXX: need to verify that no one holds the lock */
493 	w = lock->lo_witness;
494 	if (w != NULL) {
495 		mtx_lock_spin(&w_mtx);
496 		MPASS(w->w_refcount > 0);
497 		w->w_refcount--;
498 
499 		/*
500 		 * Lock is already released if we have an allocation failure
501 		 * and depart() fails.
502 		 */
503 		if (w->w_refcount != 0 || depart(w))
504 			mtx_unlock_spin(&w_mtx);
505 	}
506 
507 	mtx_lock(&all_mtx);
508 	lock_cur_cnt--;
509 	TAILQ_REMOVE(&all_locks, lock, lo_list);
510 	lock->lo_flags &= ~LO_INITIALIZED;
511 	mtx_unlock(&all_mtx);
512 }
513 
514 #ifdef DDB
515 static void
516 witness_display_list(void(*prnt)(const char *fmt, ...),
517 		     struct witness_list *list)
518 {
519 	struct witness *w;
520 
521 	STAILQ_FOREACH(w, list, w_typelist) {
522 		if (w->w_file == NULL || w->w_level > 0)
523 			continue;
524 		/*
525 		 * This lock has no anscestors, display its descendants.
526 		 */
527 		witness_displaydescendants(prnt, w, 0);
528 	}
529 }
530 
531 static void
532 witness_display(void(*prnt)(const char *fmt, ...))
533 {
534 	struct witness *w;
535 
536 	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
537 	witness_levelall();
538 
539 	/* Clear all the displayed flags. */
540 	STAILQ_FOREACH(w, &w_all, w_list) {
541 		w->w_displayed = 0;
542 	}
543 
544 	/*
545 	 * First, handle sleep locks which have been acquired at least
546 	 * once.
547 	 */
548 	prnt("Sleep locks:\n");
549 	witness_display_list(prnt, &w_sleep);
550 
551 	/*
552 	 * Now do spin locks which have been acquired at least once.
553 	 */
554 	prnt("\nSpin locks:\n");
555 	witness_display_list(prnt, &w_spin);
556 
557 	/*
558 	 * Finally, any locks which have not been acquired yet.
559 	 */
560 	prnt("\nLocks which were never acquired:\n");
561 	STAILQ_FOREACH(w, &w_all, w_list) {
562 		if (w->w_file != NULL || w->w_refcount == 0)
563 			continue;
564 		prnt("%s\n", w->w_name);
565 	}
566 }
567 #endif /* DDB */
568 
569 /* Trim useless garbage from filenames. */
570 static const char *
571 fixup_filename(const char *file)
572 {
573 
574 	if (file == NULL)
575 		return (NULL);
576 	while (strncmp(file, "../", 3) == 0)
577 		file += 3;
578 	return (file);
579 }
580 
581 void
582 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
583 {
584 	struct lock_list_entry **lock_list, *lle;
585 	struct lock_instance *lock1, *lock2;
586 	struct lock_class *class;
587 	struct witness *w, *w1;
588 	struct thread *td;
589 	int i, j;
590 #ifdef DDB
591 	int go_into_ddb = 0;
592 #endif
593 
594 	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
595 	    panicstr != NULL)
596 		return;
597 	w = lock->lo_witness;
598 	class = lock->lo_class;
599 	td = curthread;
600 	file = fixup_filename(file);
601 
602 	if (class->lc_flags & LC_SLEEPLOCK) {
603 		/*
604 		 * Since spin locks include a critical section, this check
605 		 * impliclty enforces a lock order of all sleep locks before
606 		 * all spin locks.
607 		 */
608 		if (td->td_critnest != 0 && (flags & LOP_TRYLOCK) == 0)
609 			panic("blockable sleep lock (%s) %s @ %s:%d",
610 			    class->lc_name, lock->lo_name, file, line);
611 		lock_list = &td->td_sleeplocks;
612 	} else
613 		lock_list = PCPU_PTR(spinlocks);
614 
615 	/*
616 	 * Is this the first lock acquired?  If so, then no order checking
617 	 * is needed.
618 	 */
619 	if (*lock_list == NULL)
620 		goto out;
621 
622 	/*
623 	 * Check to see if we are recursing on a lock we already own.
624 	 */
625 	lock1 = find_instance(*lock_list, lock);
626 	if (lock1 != NULL) {
627 		if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
628 		    (flags & LOP_EXCLUSIVE) == 0) {
629 			printf("shared lock of (%s) %s @ %s:%d\n",
630 			    class->lc_name, lock->lo_name, file, line);
631 			printf("while exclusively locked from %s:%d\n",
632 			    lock1->li_file, lock1->li_line);
633 			panic("share->excl");
634 		}
635 		if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
636 		    (flags & LOP_EXCLUSIVE) != 0) {
637 			printf("exclusive lock of (%s) %s @ %s:%d\n",
638 			    class->lc_name, lock->lo_name, file, line);
639 			printf("while share locked from %s:%d\n",
640 			    lock1->li_file, lock1->li_line);
641 			panic("excl->share");
642 		}
643 		lock1->li_flags++;
644 		if ((lock->lo_flags & LO_RECURSABLE) == 0) {
645 			printf(
646 			"recursed on non-recursive lock (%s) %s @ %s:%d\n",
647 			    class->lc_name, lock->lo_name, file, line);
648 			printf("first acquired @ %s:%d\n", lock1->li_file,
649 			    lock1->li_line);
650 			panic("recurse");
651 		}
652 		CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
653 		    td->td_proc->p_pid, lock->lo_name,
654 		    lock1->li_flags & LI_RECURSEMASK);
655 		lock1->li_file = file;
656 		lock1->li_line = line;
657 		return;
658 	}
659 
660 	/*
661 	 * Try locks do not block if they fail to acquire the lock, thus
662 	 * there is no danger of deadlocks or of switching while holding a
663 	 * spin lock if we acquire a lock via a try operation.
664 	 */
665 	if (flags & LOP_TRYLOCK)
666 		goto out;
667 
668 	/*
669 	 * Check for duplicate locks of the same type.  Note that we only
670 	 * have to check for this on the last lock we just acquired.  Any
671 	 * other cases will be caught as lock order violations.
672 	 */
673 	lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
674 	w1 = lock1->li_lock->lo_witness;
675 	if (w1 == w) {
676 		if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK))
677 			goto out;
678 		w->w_same_squawked = 1;
679 		printf("acquiring duplicate lock of same type: \"%s\"\n",
680 			lock->lo_type);
681 		printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name,
682 		    lock1->li_file, lock1->li_line);
683 		printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
684 #ifdef DDB
685 		go_into_ddb = 1;
686 #endif
687 		goto out;
688 	}
689 	MPASS(!mtx_owned(&w_mtx));
690 	mtx_lock_spin(&w_mtx);
691 	/*
692 	 * If we have a known higher number just say ok
693 	 */
694 	if (witness_watch > 1 && w->w_level > w1->w_level) {
695 		mtx_unlock_spin(&w_mtx);
696 		goto out;
697 	}
698 	/*
699 	 * If we know that the the lock we are acquiring comes after
700 	 * the lock we most recently acquired in the lock order tree,
701 	 * then there is no need for any further checks.
702 	 */
703 	if (isitmydescendant(w1, w)) {
704 		mtx_unlock_spin(&w_mtx);
705 		goto out;
706 	}
707 	for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) {
708 		for (i = lle->ll_count - 1; i >= 0; i--, j++) {
709 
710 			MPASS(j < WITNESS_COUNT);
711 			lock1 = &lle->ll_children[i];
712 			w1 = lock1->li_lock->lo_witness;
713 
714 			/*
715 			 * If this lock doesn't undergo witness checking,
716 			 * then skip it.
717 			 */
718 			if (w1 == NULL) {
719 				KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
720 				    ("lock missing witness structure"));
721 				continue;
722 			}
723 			/*
724 			 * If we are locking Giant and this is a sleepable
725 			 * lock, then skip it.
726 			 */
727 			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
728 			    lock == &Giant.mtx_object)
729 				continue;
730 			/*
731 			 * If we are locking a sleepable lock and this lock
732 			 * is Giant, then skip it.
733 			 */
734 			if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
735 			    lock1->li_lock == &Giant.mtx_object)
736 				continue;
737 			/*
738 			 * If we are locking a sleepable lock and this lock
739 			 * isn't sleepable, we want to treat it as a lock
740 			 * order violation to enfore a general lock order of
741 			 * sleepable locks before non-sleepable locks.
742 			 */
743 			if (!((lock->lo_flags & LO_SLEEPABLE) != 0 &&
744 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
745 			    /*
746 			     * Check the lock order hierarchy for a reveresal.
747 			     */
748 			    if (!isitmydescendant(w, w1))
749 				continue;
750 			/*
751 			 * We have a lock order violation, check to see if it
752 			 * is allowed or has already been yelled about.
753 			 */
754 			mtx_unlock_spin(&w_mtx);
755 #ifdef BLESSING
756 			if (blessed(w, w1))
757 				goto out;
758 #endif
759 			if (lock1->li_lock == &Giant.mtx_object) {
760 				if (w1->w_Giant_squawked)
761 					goto out;
762 				else
763 					w1->w_Giant_squawked = 1;
764 			} else {
765 				if (w1->w_other_squawked)
766 					goto out;
767 				else
768 					w1->w_other_squawked = 1;
769 			}
770 			/*
771 			 * Ok, yell about it.
772 			 */
773 			printf("lock order reversal\n");
774 			/*
775 			 * Try to locate an earlier lock with
776 			 * witness w in our list.
777 			 */
778 			do {
779 				lock2 = &lle->ll_children[i];
780 				MPASS(lock2->li_lock != NULL);
781 				if (lock2->li_lock->lo_witness == w)
782 					break;
783 				i--;
784 				if (i == 0 && lle->ll_next != NULL) {
785 					lle = lle->ll_next;
786 					i = lle->ll_count - 1;
787 					MPASS(i >= 0 && i < LOCK_NCHILDREN);
788 				}
789 			} while (i >= 0);
790 			if (i < 0) {
791 				printf(" 1st %p %s (%s) @ %s:%d\n",
792 				    lock1->li_lock, lock1->li_lock->lo_name,
793 				    lock1->li_lock->lo_type, lock1->li_file,
794 				    lock1->li_line);
795 				printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
796 				    lock->lo_name, lock->lo_type, file, line);
797 			} else {
798 				printf(" 1st %p %s (%s) @ %s:%d\n",
799 				    lock2->li_lock, lock2->li_lock->lo_name,
800 				    lock2->li_lock->lo_type, lock2->li_file,
801 				    lock2->li_line);
802 				printf(" 2nd %p %s (%s) @ %s:%d\n",
803 				    lock1->li_lock, lock1->li_lock->lo_name,
804 				    lock1->li_lock->lo_type, lock1->li_file,
805 				    lock1->li_line);
806 				printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
807 				    lock->lo_name, lock->lo_type, file, line);
808 			}
809 #ifdef DDB
810 			go_into_ddb = 1;
811 #endif
812 			goto out;
813 		}
814 	}
815 	lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
816 	/*
817 	 * Don't build a new relationship between a sleepable lock and
818 	 * Giant if it is the wrong direction.  The real lock order is that
819 	 * sleepable locks come before Giant.
820 	 */
821 	if (!(lock1->li_lock == &Giant.mtx_object &&
822 	    (lock->lo_flags & LO_SLEEPABLE) != 0)) {
823 		CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
824 		    lock->lo_type, lock1->li_lock->lo_type);
825 		if (!itismychild(lock1->li_lock->lo_witness, w))
826 			/* Witness is dead. */
827 			return;
828 	}
829 	mtx_unlock_spin(&w_mtx);
830 
831 out:
832 #ifdef DDB
833 	if (go_into_ddb) {
834 		if (witness_trace)
835 			backtrace();
836 		if (witness_ddb)
837 			Debugger(__func__);
838 	}
839 #endif
840 	w->w_file = file;
841 	w->w_line = line;
842 
843 	lle = *lock_list;
844 	if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
845 		lle = witness_lock_list_get();
846 		if (lle == NULL)
847 			return;
848 		lle->ll_next = *lock_list;
849 		CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
850 		    td->td_proc->p_pid, lle);
851 		*lock_list = lle;
852 	}
853 	lock1 = &lle->ll_children[lle->ll_count++];
854 	lock1->li_lock = lock;
855 	lock1->li_line = line;
856 	lock1->li_file = file;
857 	if ((flags & LOP_EXCLUSIVE) != 0)
858 		lock1->li_flags = LI_EXCLUSIVE;
859 	else
860 		lock1->li_flags = 0;
861 	CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
862 	    td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
863 }
864 
865 void
866 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
867 {
868 	struct lock_instance *instance;
869 	struct lock_class *class;
870 
871 	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
872 	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
873 		return;
874 	class = lock->lo_class;
875 	file = fixup_filename(file);
876 	if ((lock->lo_flags & LO_UPGRADABLE) == 0)
877 		panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
878 		    class->lc_name, lock->lo_name, file, line);
879 	if ((flags & LOP_TRYLOCK) == 0)
880 		panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name,
881 		    lock->lo_name, file, line);
882 	if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
883 		panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
884 		    class->lc_name, lock->lo_name, file, line);
885 	instance = find_instance(curthread->td_sleeplocks, lock);
886 	if (instance == NULL)
887 		panic("upgrade of unlocked lock (%s) %s @ %s:%d",
888 		    class->lc_name, lock->lo_name, file, line);
889 	if ((instance->li_flags & LI_EXCLUSIVE) != 0)
890 		panic("upgrade of exclusive lock (%s) %s @ %s:%d",
891 		    class->lc_name, lock->lo_name, file, line);
892 	if ((instance->li_flags & LI_RECURSEMASK) != 0)
893 		panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
894 		    class->lc_name, lock->lo_name,
895 		    instance->li_flags & LI_RECURSEMASK, file, line);
896 	instance->li_flags |= LI_EXCLUSIVE;
897 }
898 
899 void
900 witness_downgrade(struct lock_object *lock, int flags, const char *file,
901     int line)
902 {
903 	struct lock_instance *instance;
904 	struct lock_class *class;
905 
906 	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
907 	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
908 		return;
909 	class = lock->lo_class;
910 	file = fixup_filename(file);
911 	if ((lock->lo_flags & LO_UPGRADABLE) == 0)
912 		panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
913 		    class->lc_name, lock->lo_name, file, line);
914 	if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
915 		panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
916 		    class->lc_name, lock->lo_name, file, line);
917 	instance = find_instance(curthread->td_sleeplocks, lock);
918 	if (instance == NULL)
919 		panic("downgrade of unlocked lock (%s) %s @ %s:%d",
920 		    class->lc_name, lock->lo_name, file, line);
921 	if ((instance->li_flags & LI_EXCLUSIVE) == 0)
922 		panic("downgrade of shared lock (%s) %s @ %s:%d",
923 		    class->lc_name, lock->lo_name, file, line);
924 	if ((instance->li_flags & LI_RECURSEMASK) != 0)
925 		panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
926 		    class->lc_name, lock->lo_name,
927 		    instance->li_flags & LI_RECURSEMASK, file, line);
928 	instance->li_flags &= ~LI_EXCLUSIVE;
929 }
930 
931 void
932 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
933 {
934 	struct lock_list_entry **lock_list, *lle;
935 	struct lock_instance *instance;
936 	struct lock_class *class;
937 	struct thread *td;
938 	register_t s;
939 	int i, j;
940 
941 	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
942 	    panicstr != NULL)
943 		return;
944 	td = curthread;
945 	class = lock->lo_class;
946 	file = fixup_filename(file);
947 	if (class->lc_flags & LC_SLEEPLOCK)
948 		lock_list = &td->td_sleeplocks;
949 	else
950 		lock_list = PCPU_PTR(spinlocks);
951 	for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
952 		for (i = 0; i < (*lock_list)->ll_count; i++) {
953 			instance = &(*lock_list)->ll_children[i];
954 			if (instance->li_lock == lock) {
955 				if ((instance->li_flags & LI_EXCLUSIVE) != 0 &&
956 				    (flags & LOP_EXCLUSIVE) == 0) {
957 					printf(
958 					"shared unlock of (%s) %s @ %s:%d\n",
959 					    class->lc_name, lock->lo_name,
960 					    file, line);
961 					printf(
962 					"while exclusively locked from %s:%d\n",
963 					    instance->li_file,
964 					    instance->li_line);
965 					panic("excl->ushare");
966 				}
967 				if ((instance->li_flags & LI_EXCLUSIVE) == 0 &&
968 				    (flags & LOP_EXCLUSIVE) != 0) {
969 					printf(
970 					"exclusive unlock of (%s) %s @ %s:%d\n",
971 					    class->lc_name, lock->lo_name,
972 					    file, line);
973 					printf(
974 					"while share locked from %s:%d\n",
975 					    instance->li_file,
976 					    instance->li_line);
977 					panic("share->uexcl");
978 				}
979 				/* If we are recursed, unrecurse. */
980 				if ((instance->li_flags & LI_RECURSEMASK) > 0) {
981 					CTR4(KTR_WITNESS,
982 				    "%s: pid %d unrecursed on %s r=%d", __func__,
983 					    td->td_proc->p_pid,
984 					    instance->li_lock->lo_name,
985 					    instance->li_flags);
986 					instance->li_flags--;
987 					return;
988 				}
989 				s = intr_disable();
990 				CTR4(KTR_WITNESS,
991 				    "%s: pid %d removed %s from lle[%d]", __func__,
992 				    td->td_proc->p_pid,
993 				    instance->li_lock->lo_name,
994 				    (*lock_list)->ll_count - 1);
995 				for (j = i; j < (*lock_list)->ll_count - 1; j++)
996 					(*lock_list)->ll_children[j] =
997 					    (*lock_list)->ll_children[j + 1];
998 				(*lock_list)->ll_count--;
999 				intr_restore(s);
1000 				if ((*lock_list)->ll_count == 0) {
1001 					lle = *lock_list;
1002 					*lock_list = lle->ll_next;
1003 					CTR3(KTR_WITNESS,
1004 					    "%s: pid %d removed lle %p", __func__,
1005 					    td->td_proc->p_pid, lle);
1006 					witness_lock_list_free(lle);
1007 				}
1008 				return;
1009 			}
1010 		}
1011 	panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
1012 	    file, line);
1013 }
1014 
1015 /*
1016  * Warn if any locks other than 'lock' are held.  Flags can be passed in to
1017  * exempt Giant and sleepable locks from the checks as well.  If any
1018  * non-exempt locks are held, then a supplied message is printed to the
1019  * console along with a list of the offending locks.  If indicated in the
1020  * flags then a failure results in a panic as well.
1021  */
1022 int
1023 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1024 {
1025 	struct lock_list_entry *lle;
1026 	struct lock_instance *lock1;
1027 	struct thread *td;
1028 	va_list ap;
1029 	int i, n;
1030 
1031 	if (witness_cold || witness_watch == 0 || panicstr != NULL)
1032 		return (0);
1033 	n = 0;
1034 	td = curthread;
1035 	for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1036 		for (i = lle->ll_count - 1; i >= 0; i--) {
1037 			lock1 = &lle->ll_children[i];
1038 			if (lock1->li_lock == lock)
1039 				continue;
1040 			if (flags & WARN_GIANTOK &&
1041 			    lock1->li_lock == &Giant.mtx_object)
1042 				continue;
1043 			if (flags & WARN_SLEEPOK &&
1044 			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1045 				continue;
1046 			if (n == 0) {
1047 				va_start(ap, fmt);
1048 				vprintf(fmt, ap);
1049 				va_end(ap);
1050 				printf(" with the following");
1051 				if (flags & WARN_SLEEPOK)
1052 					printf(" non-sleepable");
1053 				printf(" locks held:\n");
1054 			}
1055 			n++;
1056 			witness_list_lock(lock1);
1057 		}
1058 	if (PCPU_GET(spinlocks) != NULL) {
1059 		/*
1060 		 * Since we already hold a spinlock preemption is
1061 		 * already blocked.
1062 		 */
1063 		if (n == 0) {
1064 			va_start(ap, fmt);
1065 			vprintf(fmt, ap);
1066 			va_end(ap);
1067 			printf(" with the following");
1068 			if (flags & WARN_SLEEPOK)
1069 				printf(" non-sleepable");
1070 			printf(" locks held:\n");
1071 		}
1072 		n += witness_list_locks(PCPU_PTR(spinlocks));
1073 	}
1074 	if (flags & WARN_PANIC && n)
1075 		panic("witness_warn");
1076 #ifdef DDB
1077 	else if (witness_ddb && n)
1078 		Debugger(__func__);
1079 #endif
1080 	return (n);
1081 }
1082 
1083 const char *
1084 witness_file(struct lock_object *lock)
1085 {
1086 	struct witness *w;
1087 
1088 	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1089 		return ("?");
1090 	w = lock->lo_witness;
1091 	return (w->w_file);
1092 }
1093 
1094 int
1095 witness_line(struct lock_object *lock)
1096 {
1097 	struct witness *w;
1098 
1099 	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1100 		return (0);
1101 	w = lock->lo_witness;
1102 	return (w->w_line);
1103 }
1104 
1105 static struct witness *
1106 enroll(const char *description, struct lock_class *lock_class)
1107 {
1108 	struct witness *w;
1109 
1110 	if (!witness_watch || witness_watch == 0 || panicstr != NULL)
1111 		return (NULL);
1112 	if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin)
1113 		return (NULL);
1114 	mtx_lock_spin(&w_mtx);
1115 	STAILQ_FOREACH(w, &w_all, w_list) {
1116 		if (w->w_name == description || (w->w_refcount > 0 &&
1117 		    strcmp(description, w->w_name) == 0)) {
1118 			w->w_refcount++;
1119 			mtx_unlock_spin(&w_mtx);
1120 			if (lock_class != w->w_class)
1121 				panic(
1122 				"lock (%s) %s does not match earlier (%s) lock",
1123 				    description, lock_class->lc_name,
1124 				    w->w_class->lc_name);
1125 			return (w);
1126 		}
1127 	}
1128 	/*
1129 	 * This isn't quite right, as witness_cold is still 0 while we
1130 	 * enroll all the locks initialized before witness_initialize().
1131 	 */
1132 	if ((lock_class->lc_flags & LC_SPINLOCK) && !witness_cold) {
1133 		mtx_unlock_spin(&w_mtx);
1134 		panic("spin lock %s not in order list", description);
1135 	}
1136 	if ((w = witness_get()) == NULL)
1137 		return (NULL);
1138 	w->w_name = description;
1139 	w->w_class = lock_class;
1140 	w->w_refcount = 1;
1141 	STAILQ_INSERT_HEAD(&w_all, w, w_list);
1142 	if (lock_class->lc_flags & LC_SPINLOCK)
1143 		STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1144 	else if (lock_class->lc_flags & LC_SLEEPLOCK)
1145 		STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1146 	else {
1147 		mtx_unlock_spin(&w_mtx);
1148 		panic("lock class %s is not sleep or spin",
1149 		    lock_class->lc_name);
1150 	}
1151 	mtx_unlock_spin(&w_mtx);
1152 	return (w);
1153 }
1154 
1155 /* Don't let the door bang you on the way out... */
1156 static int
1157 depart(struct witness *w)
1158 {
1159 	struct witness_child_list_entry *wcl, *nwcl;
1160 	struct witness_list *list;
1161 	struct witness *parent;
1162 
1163 	MPASS(w->w_refcount == 0);
1164 	if (w->w_class->lc_flags & LC_SLEEPLOCK)
1165 		list = &w_sleep;
1166 	else
1167 		list = &w_spin;
1168 	/*
1169 	 * First, we run through the entire tree looking for any
1170 	 * witnesses that the outgoing witness is a child of.  For
1171 	 * each parent that we find, we reparent all the direct
1172 	 * children of the outgoing witness to its parent.
1173 	 */
1174 	STAILQ_FOREACH(parent, list, w_typelist) {
1175 		if (!isitmychild(parent, w))
1176 			continue;
1177 		removechild(parent, w);
1178 		if (!reparentchildren(parent, w))
1179 			return (0);
1180 	}
1181 
1182 	/*
1183 	 * Now we go through and free up the child list of the
1184 	 * outgoing witness.
1185 	 */
1186 	for (wcl = w->w_children; wcl != NULL; wcl = nwcl) {
1187 		nwcl = wcl->wcl_next;
1188 		witness_child_free(wcl);
1189 	}
1190 
1191 	/*
1192 	 * Detach from various lists and free.
1193 	 */
1194 	STAILQ_REMOVE(list, w, witness, w_typelist);
1195 	STAILQ_REMOVE(&w_all, w, witness, w_list);
1196 	witness_free(w);
1197 
1198 	/* Finally, fixup the tree. */
1199 	return (rebalancetree(list));
1200 }
1201 
1202 /*
1203  * Prune an entire lock order tree.  We look for cases where a lock
1204  * is now both a descendant and a direct child of a given lock.  In
1205  * that case, we want to remove the direct child link from the tree.
1206  *
1207  * Returns false if insertchild() fails.
1208  */
1209 static int
1210 rebalancetree(struct witness_list *list)
1211 {
1212 	struct witness *child, *parent;
1213 
1214 	STAILQ_FOREACH(child, list, w_typelist) {
1215 		STAILQ_FOREACH(parent, list, w_typelist) {
1216 			if (!isitmychild(parent, child))
1217 				continue;
1218 			removechild(parent, child);
1219 			if (isitmydescendant(parent, child))
1220 				continue;
1221 			if (!insertchild(parent, child))
1222 				return (0);
1223 		}
1224 	}
1225 	witness_levelall();
1226 	return (1);
1227 }
1228 
1229 /*
1230  * Add "child" as a direct child of "parent".  Returns false if
1231  * we fail due to out of memory.
1232  */
1233 static int
1234 insertchild(struct witness *parent, struct witness *child)
1235 {
1236 	struct witness_child_list_entry **wcl;
1237 
1238 	MPASS(child != NULL && parent != NULL);
1239 
1240 	/*
1241 	 * Insert "child" after "parent"
1242 	 */
1243 	wcl = &parent->w_children;
1244 	while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN)
1245 		wcl = &(*wcl)->wcl_next;
1246 	if (*wcl == NULL) {
1247 		*wcl = witness_child_get();
1248 		if (*wcl == NULL)
1249 			return (0);
1250 	}
1251 	(*wcl)->wcl_children[(*wcl)->wcl_count++] = child;
1252 
1253 	return (1);
1254 }
1255 
1256 /*
1257  * Make all the direct descendants of oldparent be direct descendants
1258  * of newparent.
1259  */
1260 static int
1261 reparentchildren(struct witness *newparent, struct witness *oldparent)
1262 {
1263 	struct witness_child_list_entry *wcl;
1264 	int i;
1265 
1266 	/* Avoid making a witness a child of itself. */
1267 	MPASS(!isitmychild(oldparent, newparent));
1268 
1269 	for (wcl = oldparent->w_children; wcl != NULL; wcl = wcl->wcl_next)
1270 		for (i = 0; i < wcl->wcl_count; i++)
1271 			if (!insertchild(newparent, wcl->wcl_children[i]))
1272 				return (0);
1273 	return (1);
1274 }
1275 
1276 static int
1277 itismychild(struct witness *parent, struct witness *child)
1278 {
1279 	struct witness_list *list;
1280 
1281 	MPASS(child != NULL && parent != NULL);
1282 	if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) !=
1283 	    (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)))
1284 		panic(
1285 		"%s: parent (%s) and child (%s) are not the same lock type",
1286 		    __func__, parent->w_class->lc_name,
1287 		    child->w_class->lc_name);
1288 
1289 	if (!insertchild(parent, child))
1290 		return (0);
1291 
1292 	if (parent->w_class->lc_flags & LC_SLEEPLOCK)
1293 		list = &w_sleep;
1294 	else
1295 		list = &w_spin;
1296 	return (rebalancetree(list));
1297 }
1298 
1299 static void
1300 removechild(struct witness *parent, struct witness *child)
1301 {
1302 	struct witness_child_list_entry **wcl, *wcl1;
1303 	int i;
1304 
1305 	for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next)
1306 		for (i = 0; i < (*wcl)->wcl_count; i++)
1307 			if ((*wcl)->wcl_children[i] == child)
1308 				goto found;
1309 	return;
1310 found:
1311 	(*wcl)->wcl_count--;
1312 	if ((*wcl)->wcl_count > i)
1313 		(*wcl)->wcl_children[i] =
1314 		    (*wcl)->wcl_children[(*wcl)->wcl_count];
1315 	MPASS((*wcl)->wcl_children[i] != NULL);
1316 	if ((*wcl)->wcl_count != 0)
1317 		return;
1318 	wcl1 = *wcl;
1319 	*wcl = wcl1->wcl_next;
1320 	witness_child_free(wcl1);
1321 }
1322 
1323 static int
1324 isitmychild(struct witness *parent, struct witness *child)
1325 {
1326 	struct witness_child_list_entry *wcl;
1327 	int i;
1328 
1329 	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1330 		for (i = 0; i < wcl->wcl_count; i++) {
1331 			if (wcl->wcl_children[i] == child)
1332 				return (1);
1333 		}
1334 	}
1335 	return (0);
1336 }
1337 
1338 static int
1339 isitmydescendant(struct witness *parent, struct witness *child)
1340 {
1341 	struct witness_child_list_entry *wcl;
1342 	int i, j;
1343 
1344 	if (isitmychild(parent, child))
1345 		return (1);
1346 	j = 0;
1347 	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1348 		MPASS(j < 1000);
1349 		for (i = 0; i < wcl->wcl_count; i++) {
1350 			if (isitmydescendant(wcl->wcl_children[i], child))
1351 				return (1);
1352 		}
1353 		j++;
1354 	}
1355 	return (0);
1356 }
1357 
1358 static void
1359 witness_levelall (void)
1360 {
1361 	struct witness_list *list;
1362 	struct witness *w, *w1;
1363 
1364 	/*
1365 	 * First clear all levels.
1366 	 */
1367 	STAILQ_FOREACH(w, &w_all, w_list) {
1368 		w->w_level = 0;
1369 	}
1370 
1371 	/*
1372 	 * Look for locks with no parent and level all their descendants.
1373 	 */
1374 	STAILQ_FOREACH(w, &w_all, w_list) {
1375 		/*
1376 		 * This is just an optimization, technically we could get
1377 		 * away just walking the all list each time.
1378 		 */
1379 		if (w->w_class->lc_flags & LC_SLEEPLOCK)
1380 			list = &w_sleep;
1381 		else
1382 			list = &w_spin;
1383 		STAILQ_FOREACH(w1, list, w_typelist) {
1384 			if (isitmychild(w1, w))
1385 				goto skip;
1386 		}
1387 		witness_leveldescendents(w, 0);
1388 	skip:
1389 		;	/* silence GCC 3.x */
1390 	}
1391 }
1392 
1393 static void
1394 witness_leveldescendents(struct witness *parent, int level)
1395 {
1396 	struct witness_child_list_entry *wcl;
1397 	int i;
1398 
1399 	if (parent->w_level < level)
1400 		parent->w_level = level;
1401 	level++;
1402 	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
1403 		for (i = 0; i < wcl->wcl_count; i++)
1404 			witness_leveldescendents(wcl->wcl_children[i], level);
1405 }
1406 
1407 static void
1408 witness_displaydescendants(void(*prnt)(const char *fmt, ...),
1409 			   struct witness *parent, int indent)
1410 {
1411 	struct witness_child_list_entry *wcl;
1412 	int i, level;
1413 
1414 	level = parent->w_level;
1415 	prnt("%-2d", level);
1416 	for (i = 0; i < indent; i++)
1417 		prnt(" ");
1418 	if (parent->w_refcount > 0)
1419 		prnt("%s", parent->w_name);
1420 	else
1421 		prnt("(dead)");
1422 	if (parent->w_displayed) {
1423 		prnt(" -- (already displayed)\n");
1424 		return;
1425 	}
1426 	parent->w_displayed = 1;
1427 	if (parent->w_refcount > 0) {
1428 		if (parent->w_file != NULL)
1429 			prnt(" -- last acquired @ %s:%d", parent->w_file,
1430 			    parent->w_line);
1431 	}
1432 	prnt("\n");
1433 	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
1434 		for (i = 0; i < wcl->wcl_count; i++)
1435 			    witness_displaydescendants(prnt,
1436 				wcl->wcl_children[i], indent + 1);
1437 }
1438 
1439 #ifdef BLESSING
1440 static int
1441 blessed(struct witness *w1, struct witness *w2)
1442 {
1443 	int i;
1444 	struct witness_blessed *b;
1445 
1446 	for (i = 0; i < blessed_count; i++) {
1447 		b = &blessed_list[i];
1448 		if (strcmp(w1->w_name, b->b_lock1) == 0) {
1449 			if (strcmp(w2->w_name, b->b_lock2) == 0)
1450 				return (1);
1451 			continue;
1452 		}
1453 		if (strcmp(w1->w_name, b->b_lock2) == 0)
1454 			if (strcmp(w2->w_name, b->b_lock1) == 0)
1455 				return (1);
1456 	}
1457 	return (0);
1458 }
1459 #endif
1460 
1461 static struct witness *
1462 witness_get(void)
1463 {
1464 	struct witness *w;
1465 
1466 	if (witness_watch == 0) {
1467 		mtx_unlock_spin(&w_mtx);
1468 		return (NULL);
1469 	}
1470 	if (STAILQ_EMPTY(&w_free)) {
1471 		witness_watch = 0;
1472 		mtx_unlock_spin(&w_mtx);
1473 		printf("%s: witness exhausted\n", __func__);
1474 		return (NULL);
1475 	}
1476 	w = STAILQ_FIRST(&w_free);
1477 	STAILQ_REMOVE_HEAD(&w_free, w_list);
1478 	bzero(w, sizeof(*w));
1479 	return (w);
1480 }
1481 
1482 static void
1483 witness_free(struct witness *w)
1484 {
1485 
1486 	STAILQ_INSERT_HEAD(&w_free, w, w_list);
1487 }
1488 
1489 static struct witness_child_list_entry *
1490 witness_child_get(void)
1491 {
1492 	struct witness_child_list_entry *wcl;
1493 
1494 	if (witness_watch == 0) {
1495 		mtx_unlock_spin(&w_mtx);
1496 		return (NULL);
1497 	}
1498 	wcl = w_child_free;
1499 	if (wcl == NULL) {
1500 		witness_watch = 0;
1501 		mtx_unlock_spin(&w_mtx);
1502 		printf("%s: witness exhausted\n", __func__);
1503 		return (NULL);
1504 	}
1505 	w_child_free = wcl->wcl_next;
1506 	bzero(wcl, sizeof(*wcl));
1507 	return (wcl);
1508 }
1509 
1510 static void
1511 witness_child_free(struct witness_child_list_entry *wcl)
1512 {
1513 
1514 	wcl->wcl_next = w_child_free;
1515 	w_child_free = wcl;
1516 }
1517 
1518 static struct lock_list_entry *
1519 witness_lock_list_get(void)
1520 {
1521 	struct lock_list_entry *lle;
1522 
1523 	if (witness_watch == 0)
1524 		return (NULL);
1525 	mtx_lock_spin(&w_mtx);
1526 	lle = w_lock_list_free;
1527 	if (lle == NULL) {
1528 		witness_watch = 0;
1529 		mtx_unlock_spin(&w_mtx);
1530 		printf("%s: witness exhausted\n", __func__);
1531 		return (NULL);
1532 	}
1533 	w_lock_list_free = lle->ll_next;
1534 	mtx_unlock_spin(&w_mtx);
1535 	bzero(lle, sizeof(*lle));
1536 	return (lle);
1537 }
1538 
1539 static void
1540 witness_lock_list_free(struct lock_list_entry *lle)
1541 {
1542 
1543 	mtx_lock_spin(&w_mtx);
1544 	lle->ll_next = w_lock_list_free;
1545 	w_lock_list_free = lle;
1546 	mtx_unlock_spin(&w_mtx);
1547 }
1548 
1549 static struct lock_instance *
1550 find_instance(struct lock_list_entry *lock_list, struct lock_object *lock)
1551 {
1552 	struct lock_list_entry *lle;
1553 	struct lock_instance *instance;
1554 	int i;
1555 
1556 	for (lle = lock_list; lle != NULL; lle = lle->ll_next)
1557 		for (i = lle->ll_count - 1; i >= 0; i--) {
1558 			instance = &lle->ll_children[i];
1559 			if (instance->li_lock == lock)
1560 				return (instance);
1561 		}
1562 	return (NULL);
1563 }
1564 
1565 static void
1566 witness_list_lock(struct lock_instance *instance)
1567 {
1568 	struct lock_object *lock;
1569 
1570 	lock = instance->li_lock;
1571 	printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
1572 	    "exclusive" : "shared", lock->lo_class->lc_name, lock->lo_name);
1573 	if (lock->lo_type != lock->lo_name)
1574 		printf(" (%s)", lock->lo_type);
1575 	printf(" r = %d (%p) locked @ %s:%d\n",
1576 	    instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
1577 	    instance->li_line);
1578 }
1579 
1580 int
1581 witness_list_locks(struct lock_list_entry **lock_list)
1582 {
1583 	struct lock_list_entry *lle;
1584 	int i, nheld;
1585 
1586 	nheld = 0;
1587 	for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
1588 		for (i = lle->ll_count - 1; i >= 0; i--) {
1589 			witness_list_lock(&lle->ll_children[i]);
1590 			nheld++;
1591 		}
1592 	return (nheld);
1593 }
1594 
1595 /*
1596  * This is a bit risky at best.  We call this function when we have timed
1597  * out acquiring a spin lock, and we assume that the other CPU is stuck
1598  * with this lock held.  So, we go groveling around in the other CPU's
1599  * per-cpu data to try to find the lock instance for this spin lock to
1600  * see when it was last acquired.
1601  */
1602 void
1603 witness_display_spinlock(struct lock_object *lock, struct thread *owner)
1604 {
1605 	struct lock_instance *instance;
1606 	struct pcpu *pc;
1607 
1608 	if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
1609 		return;
1610 	pc = pcpu_find(owner->td_oncpu);
1611 	instance = find_instance(pc->pc_spinlocks, lock);
1612 	if (instance != NULL)
1613 		witness_list_lock(instance);
1614 }
1615 
1616 void
1617 witness_save(struct lock_object *lock, const char **filep, int *linep)
1618 {
1619 	struct lock_instance *instance;
1620 
1621 	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1622 	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1623 		return;
1624 	if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1625 		panic("%s: lock (%s) %s is not a sleep lock", __func__,
1626 		    lock->lo_class->lc_name, lock->lo_name);
1627 	instance = find_instance(curthread->td_sleeplocks, lock);
1628 	if (instance == NULL)
1629 		panic("%s: lock (%s) %s not locked", __func__,
1630 		    lock->lo_class->lc_name, lock->lo_name);
1631 	*filep = instance->li_file;
1632 	*linep = instance->li_line;
1633 }
1634 
1635 void
1636 witness_restore(struct lock_object *lock, const char *file, int line)
1637 {
1638 	struct lock_instance *instance;
1639 
1640 	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1641 	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1642 		return;
1643 	if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1644 		panic("%s: lock (%s) %s is not a sleep lock", __func__,
1645 		    lock->lo_class->lc_name, lock->lo_name);
1646 	instance = find_instance(curthread->td_sleeplocks, lock);
1647 	if (instance == NULL)
1648 		panic("%s: lock (%s) %s not locked", __func__,
1649 		    lock->lo_class->lc_name, lock->lo_name);
1650 	lock->lo_witness->w_file = file;
1651 	lock->lo_witness->w_line = line;
1652 	instance->li_file = file;
1653 	instance->li_line = line;
1654 }
1655 
1656 void
1657 witness_assert(struct lock_object *lock, int flags, const char *file, int line)
1658 {
1659 #ifdef INVARIANT_SUPPORT
1660 	struct lock_instance *instance;
1661 
1662 	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1663 		return;
1664 	if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) != 0)
1665 		instance = find_instance(curthread->td_sleeplocks, lock);
1666 	else if ((lock->lo_class->lc_flags & LC_SPINLOCK) != 0)
1667 		instance = find_instance(PCPU_GET(spinlocks), lock);
1668 	else {
1669 		panic("Lock (%s) %s is not sleep or spin!",
1670 		    lock->lo_class->lc_name, lock->lo_name);
1671 	}
1672 	file = fixup_filename(file);
1673 	switch (flags) {
1674 	case LA_UNLOCKED:
1675 		if (instance != NULL)
1676 			panic("Lock (%s) %s locked @ %s:%d.",
1677 			    lock->lo_class->lc_name, lock->lo_name, file, line);
1678 		break;
1679 	case LA_LOCKED:
1680 	case LA_LOCKED | LA_RECURSED:
1681 	case LA_LOCKED | LA_NOTRECURSED:
1682 	case LA_SLOCKED:
1683 	case LA_SLOCKED | LA_RECURSED:
1684 	case LA_SLOCKED | LA_NOTRECURSED:
1685 	case LA_XLOCKED:
1686 	case LA_XLOCKED | LA_RECURSED:
1687 	case LA_XLOCKED | LA_NOTRECURSED:
1688 		if (instance == NULL) {
1689 			panic("Lock (%s) %s not locked @ %s:%d.",
1690 			    lock->lo_class->lc_name, lock->lo_name, file, line);
1691 			break;
1692 		}
1693 		if ((flags & LA_XLOCKED) != 0 &&
1694 		    (instance->li_flags & LI_EXCLUSIVE) == 0)
1695 			panic("Lock (%s) %s not exclusively locked @ %s:%d.",
1696 			    lock->lo_class->lc_name, lock->lo_name, file, line);
1697 		if ((flags & LA_SLOCKED) != 0 &&
1698 		    (instance->li_flags & LI_EXCLUSIVE) != 0)
1699 			panic("Lock (%s) %s exclusively locked @ %s:%d.",
1700 			    lock->lo_class->lc_name, lock->lo_name, file, line);
1701 		if ((flags & LA_RECURSED) != 0 &&
1702 		    (instance->li_flags & LI_RECURSEMASK) == 0)
1703 			panic("Lock (%s) %s not recursed @ %s:%d.",
1704 			    lock->lo_class->lc_name, lock->lo_name, file, line);
1705 		if ((flags & LA_NOTRECURSED) != 0 &&
1706 		    (instance->li_flags & LI_RECURSEMASK) != 0)
1707 			panic("Lock (%s) %s recursed @ %s:%d.",
1708 			    lock->lo_class->lc_name, lock->lo_name, file, line);
1709 		break;
1710 	default:
1711 		panic("Invalid lock assertion at %s:%d.", file, line);
1712 
1713 	}
1714 #endif	/* INVARIANT_SUPPORT */
1715 }
1716 
1717 #ifdef DDB
1718 static void
1719 witness_list(struct thread *td)
1720 {
1721 
1722 	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1723 	KASSERT(db_active, ("%s: not in the debugger", __func__));
1724 
1725 	if (witness_watch == 0)
1726 		return;
1727 
1728 	witness_list_locks(&td->td_sleeplocks);
1729 
1730 	/*
1731 	 * We only handle spinlocks if td == curthread.  This is somewhat broken
1732 	 * if td is currently executing on some other CPU and holds spin locks
1733 	 * as we won't display those locks.  If we had a MI way of getting
1734 	 * the per-cpu data for a given cpu then we could use
1735 	 * td->td_oncpu to get the list of spinlocks for this thread
1736 	 * and "fix" this.
1737 	 *
1738 	 * That still wouldn't really fix this unless we locked sched_lock
1739 	 * or stopped the other CPU to make sure it wasn't changing the list
1740 	 * out from under us.  It is probably best to just not try to handle
1741 	 * threads on other CPU's for now.
1742 	 */
1743 	if (td == curthread && PCPU_GET(spinlocks) != NULL)
1744 		witness_list_locks(PCPU_PTR(spinlocks));
1745 }
1746 
1747 DB_SHOW_COMMAND(locks, db_witness_list)
1748 {
1749 	struct thread *td;
1750 	pid_t pid;
1751 	struct proc *p;
1752 
1753 	if (have_addr) {
1754 		pid = (addr % 16) + ((addr >> 4) % 16) * 10 +
1755 		    ((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 +
1756 		    ((addr >> 16) % 16) * 10000;
1757 		/* sx_slock(&allproc_lock); */
1758 		FOREACH_PROC_IN_SYSTEM(p) {
1759 			if (p->p_pid == pid)
1760 				break;
1761 		}
1762 		/* sx_sunlock(&allproc_lock); */
1763 		if (p == NULL) {
1764 			db_printf("pid %d not found\n", pid);
1765 			return;
1766 		}
1767 		FOREACH_THREAD_IN_PROC(p, td) {
1768 			witness_list(td);
1769 		}
1770 	} else {
1771 		td = curthread;
1772 		witness_list(td);
1773 	}
1774 }
1775 
1776 DB_SHOW_COMMAND(witness, db_witness_display)
1777 {
1778 
1779 	witness_display(db_printf);
1780 }
1781 #endif
1782