1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2008 Isilon Systems, Inc.
5 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
6 * Copyright (c) 1998 Berkeley Software Design, Inc.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Berkeley Software Design Inc's name may not be used to endorse or
18 * promote products derived from this software without specific prior
19 * written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
34 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
35 */
36
37 /*
38 * Implementation of the `witness' lock verifier. Originally implemented for
39 * mutexes in BSD/OS. Extended to handle generic lock objects and lock
40 * classes in FreeBSD.
41 */
42
43 /*
44 * Main Entry: witness
45 * Pronunciation: 'wit-n&s
46 * Function: noun
47 * Etymology: Middle English witnesse, from Old English witnes knowledge,
48 * testimony, witness, from 2wit
49 * Date: before 12th century
50 * 1 : attestation of a fact or event : TESTIMONY
51 * 2 : one that gives evidence; specifically : one who testifies in
52 * a cause or before a judicial tribunal
53 * 3 : one asked to be present at a transaction so as to be able to
54 * testify to its having taken place
55 * 4 : one who has personal knowledge of something
56 * 5 a : something serving as evidence or proof : SIGN
57 * b : public affirmation by word or example of usually
58 * religious faith or conviction <the heroic witness to divine
59 * life -- Pilot>
60 * 6 capitalized : a member of the Jehovah's Witnesses
61 */
62
63 /*
64 * Special rules concerning Giant and lock orders:
65 *
66 * 1) Giant must be acquired before any other mutexes. Stated another way,
67 * no other mutex may be held when Giant is acquired.
68 *
69 * 2) Giant must be released when blocking on a sleepable lock.
70 *
71 * This rule is less obvious, but is a result of Giant providing the same
72 * semantics as spl(). Basically, when a thread sleeps, it must release
73 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule
74 * 2).
75 *
76 * 3) Giant may be acquired before or after sleepable locks.
77 *
78 * This rule is also not quite as obvious. Giant may be acquired after
79 * a sleepable lock because it is a non-sleepable lock and non-sleepable
80 * locks may always be acquired while holding a sleepable lock. The second
81 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose
82 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1
83 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and
84 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to
85 * execute. Thus, acquiring Giant both before and after a sleepable lock
86 * will not result in a lock order reversal.
87 */
88
89 #include <sys/cdefs.h>
90 #include "opt_ddb.h"
91 #include "opt_hwpmc_hooks.h"
92 #include "opt_stack.h"
93 #include "opt_witness.h"
94
95 #include <sys/param.h>
96 #include <sys/bus.h>
97 #include <sys/kdb.h>
98 #include <sys/kernel.h>
99 #include <sys/ktr.h>
100 #include <sys/lock.h>
101 #include <sys/malloc.h>
102 #include <sys/mutex.h>
103 #include <sys/priv.h>
104 #include <sys/proc.h>
105 #include <sys/sbuf.h>
106 #include <sys/sched.h>
107 #include <sys/stack.h>
108 #include <sys/sysctl.h>
109 #include <sys/syslog.h>
110 #include <sys/systm.h>
111
112 #ifdef DDB
113 #include <ddb/ddb.h>
114 #endif
115
116 #include <machine/stdarg.h>
117
118 #if !defined(DDB) && !defined(STACK)
119 #error "DDB or STACK options are required for WITNESS"
120 #endif
121
122 /* Note that these traces do not work with KTR_ALQ. */
123 #if 0
124 #define KTR_WITNESS KTR_SUBSYS
125 #else
126 #define KTR_WITNESS 0
127 #endif
128
129 #define LI_RECURSEMASK 0x0000ffff /* Recursion depth of lock instance. */
130 #define LI_EXCLUSIVE 0x00010000 /* Exclusive lock instance. */
131 #define LI_NORELEASE 0x00020000 /* Lock not allowed to be released. */
132 #define LI_SLEEPABLE 0x00040000 /* Lock may be held while sleeping. */
133
134 #ifndef WITNESS_COUNT
135 #define WITNESS_COUNT 1536
136 #endif
137 #define WITNESS_HASH_SIZE 251 /* Prime, gives load factor < 2 */
138 #define WITNESS_PENDLIST (512 + (MAXCPU * 4))
139
140 /* Allocate 256 KB of stack data space */
141 #define WITNESS_LO_DATA_COUNT 2048
142
143 /* Prime, gives load factor of ~2 at full load */
144 #define WITNESS_LO_HASH_SIZE 1021
145
146 /*
147 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads
148 * will hold LOCK_NCHILDREN locks. We handle failure ok, and we should
149 * probably be safe for the most part, but it's still a SWAG.
150 */
151 #define LOCK_NCHILDREN 5
152 #define LOCK_CHILDCOUNT 2048
153
154 #define MAX_W_NAME 64
155
156 #define FULLGRAPH_SBUF_SIZE 512
157
158 /*
159 * These flags go in the witness relationship matrix and describe the
160 * relationship between any two struct witness objects.
161 */
162 #define WITNESS_UNRELATED 0x00 /* No lock order relation. */
163 #define WITNESS_PARENT 0x01 /* Parent, aka direct ancestor. */
164 #define WITNESS_ANCESTOR 0x02 /* Direct or indirect ancestor. */
165 #define WITNESS_CHILD 0x04 /* Child, aka direct descendant. */
166 #define WITNESS_DESCENDANT 0x08 /* Direct or indirect descendant. */
167 #define WITNESS_ANCESTOR_MASK (WITNESS_PARENT | WITNESS_ANCESTOR)
168 #define WITNESS_DESCENDANT_MASK (WITNESS_CHILD | WITNESS_DESCENDANT)
169 #define WITNESS_RELATED_MASK \
170 (WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
171 #define WITNESS_REVERSAL 0x10 /* A lock order reversal has been
172 * observed. */
173 #define WITNESS_RESERVED1 0x20 /* Unused flag, reserved. */
174 #define WITNESS_RESERVED2 0x40 /* Unused flag, reserved. */
175 #define WITNESS_LOCK_ORDER_KNOWN 0x80 /* This lock order is known. */
176
177 /* Descendant to ancestor flags */
178 #define WITNESS_DTOA(x) (((x) & WITNESS_RELATED_MASK) >> 2)
179
180 /* Ancestor to descendant flags */
181 #define WITNESS_ATOD(x) (((x) & WITNESS_RELATED_MASK) << 2)
182
183 #define WITNESS_INDEX_ASSERT(i) \
184 MPASS((i) > 0 && (i) <= w_max_used_index && (i) < witness_count)
185
186 static MALLOC_DEFINE(M_WITNESS, "Witness", "Witness");
187
188 /*
189 * Lock instances. A lock instance is the data associated with a lock while
190 * it is held by witness. For example, a lock instance will hold the
191 * recursion count of a lock. Lock instances are held in lists. Spin locks
192 * are held in a per-cpu list while sleep locks are held in per-thread list.
193 */
194 struct lock_instance {
195 struct lock_object *li_lock;
196 const char *li_file;
197 int li_line;
198 u_int li_flags;
199 };
200
201 /*
202 * A simple list type used to build the list of locks held by a thread
203 * or CPU. We can't simply embed the list in struct lock_object since a
204 * lock may be held by more than one thread if it is a shared lock. Locks
205 * are added to the head of the list, so we fill up each list entry from
206 * "the back" logically. To ease some of the arithmetic, we actually fill
207 * in each list entry the normal way (children[0] then children[1], etc.) but
208 * when we traverse the list we read children[count-1] as the first entry
209 * down to children[0] as the final entry.
210 */
211 struct lock_list_entry {
212 struct lock_list_entry *ll_next;
213 struct lock_instance ll_children[LOCK_NCHILDREN];
214 u_int ll_count;
215 };
216
217 /*
218 * The main witness structure. One of these per named lock type in the system
219 * (for example, "vnode interlock").
220 */
221 struct witness {
222 char w_name[MAX_W_NAME];
223 uint32_t w_index; /* Index in the relationship matrix */
224 struct lock_class *w_class;
225 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */
226 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */
227 struct witness *w_hash_next; /* Linked list in hash buckets. */
228 const char *w_file; /* File where last acquired */
229 uint32_t w_line; /* Line where last acquired */
230 uint32_t w_refcount;
231 uint16_t w_num_ancestors; /* direct/indirect
232 * ancestor count */
233 uint16_t w_num_descendants; /* direct/indirect
234 * descendant count */
235 int16_t w_ddb_level;
236 unsigned w_displayed:1;
237 unsigned w_reversed:1;
238 };
239
240 STAILQ_HEAD(witness_list, witness);
241
242 /*
243 * The witness hash table. Keys are witness names (const char *), elements are
244 * witness objects (struct witness *).
245 */
246 struct witness_hash {
247 struct witness *wh_array[WITNESS_HASH_SIZE];
248 uint32_t wh_size;
249 uint32_t wh_count;
250 };
251
252 /*
253 * Key type for the lock order data hash table.
254 */
255 struct witness_lock_order_key {
256 uint16_t from;
257 uint16_t to;
258 };
259
260 struct witness_lock_order_data {
261 struct stack wlod_stack;
262 struct witness_lock_order_key wlod_key;
263 struct witness_lock_order_data *wlod_next;
264 };
265
266 /*
267 * The witness lock order data hash table. Keys are witness index tuples
268 * (struct witness_lock_order_key), elements are lock order data objects
269 * (struct witness_lock_order_data).
270 */
271 struct witness_lock_order_hash {
272 struct witness_lock_order_data *wloh_array[WITNESS_LO_HASH_SIZE];
273 u_int wloh_size;
274 u_int wloh_count;
275 };
276
277 struct witness_blessed {
278 const char *b_lock1;
279 const char *b_lock2;
280 };
281
282 struct witness_pendhelp {
283 const char *wh_type;
284 struct lock_object *wh_lock;
285 };
286
287 struct witness_order_list_entry {
288 const char *w_name;
289 struct lock_class *w_class;
290 };
291
292 /*
293 * Returns 0 if one of the locks is a spin lock and the other is not.
294 * Returns 1 otherwise.
295 */
296 static __inline int
witness_lock_type_equal(struct witness * w1,struct witness * w2)297 witness_lock_type_equal(struct witness *w1, struct witness *w2)
298 {
299
300 return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
301 (w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
302 }
303
304 static __inline int
witness_lock_order_key_equal(const struct witness_lock_order_key * a,const struct witness_lock_order_key * b)305 witness_lock_order_key_equal(const struct witness_lock_order_key *a,
306 const struct witness_lock_order_key *b)
307 {
308
309 return (a->from == b->from && a->to == b->to);
310 }
311
312 static int _isitmyx(struct witness *w1, struct witness *w2, int rmask,
313 const char *fname);
314 static void adopt(struct witness *parent, struct witness *child);
315 static int blessed(struct witness *, struct witness *);
316 static void depart(struct witness *w);
317 static struct witness *enroll(const char *description,
318 struct lock_class *lock_class);
319 static struct lock_instance *find_instance(struct lock_list_entry *list,
320 const struct lock_object *lock);
321 static int isitmychild(struct witness *parent, struct witness *child);
322 static int isitmydescendant(struct witness *parent, struct witness *child);
323 static void itismychild(struct witness *parent, struct witness *child);
324 static int sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS);
325 static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
326 static int sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS);
327 static int sysctl_debug_witness_channel(SYSCTL_HANDLER_ARGS);
328 static void witness_add_fullgraph(struct sbuf *sb, struct witness *parent);
329 #ifdef DDB
330 static void witness_ddb_compute_levels(void);
331 static void witness_ddb_display(int(*)(const char *fmt, ...));
332 static void witness_ddb_display_descendants(int(*)(const char *fmt, ...),
333 struct witness *, int indent);
334 static void witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
335 struct witness_list *list);
336 static void witness_ddb_level_descendants(struct witness *parent, int l);
337 static void witness_ddb_list(struct thread *td);
338 #endif
339 static void witness_enter_debugger(const char *msg);
340 static void witness_debugger(int cond, const char *msg);
341 static void witness_free(struct witness *m);
342 static struct witness *witness_get(void);
343 static uint32_t witness_hash_djb2(const uint8_t *key, uint32_t size);
344 static struct witness *witness_hash_get(const char *key);
345 static void witness_hash_put(struct witness *w);
346 static void witness_init_hash_tables(void);
347 static void witness_increment_graph_generation(void);
348 static void witness_lock_list_free(struct lock_list_entry *lle);
349 static struct lock_list_entry *witness_lock_list_get(void);
350 static int witness_lock_order_add(struct witness *parent,
351 struct witness *child);
352 static int witness_lock_order_check(struct witness *parent,
353 struct witness *child);
354 static struct witness_lock_order_data *witness_lock_order_get(
355 struct witness *parent,
356 struct witness *child);
357 static void witness_list_lock(struct lock_instance *instance,
358 int (*prnt)(const char *fmt, ...));
359 static int witness_output(const char *fmt, ...) __printflike(1, 2);
360 static int witness_output_drain(void *arg __unused, const char *data,
361 int len);
362 static int witness_voutput(const char *fmt, va_list ap) __printflike(1, 0);
363 static void witness_setflag(struct lock_object *lock, int flag, int set);
364
365 FEATURE(witness, "kernel has witness(9) support");
366
367 static SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
368 "Witness Locking");
369
370 /*
371 * If set to 0, lock order checking is disabled. If set to -1,
372 * witness is completely disabled. Otherwise witness performs full
373 * lock order checking for all locks. At runtime, lock order checking
374 * may be toggled. However, witness cannot be reenabled once it is
375 * completely disabled.
376 */
377 static int witness_watch = 1;
378 SYSCTL_PROC(_debug_witness, OID_AUTO, watch,
379 CTLFLAG_RWTUN | CTLTYPE_INT | CTLFLAG_MPSAFE, NULL, 0,
380 sysctl_debug_witness_watch, "I",
381 "witness is watching lock operations");
382
383 #ifdef KDB
384 /*
385 * When KDB is enabled and witness_kdb is 1, it will cause the system
386 * to drop into kdebug() when:
387 * - a lock hierarchy violation occurs
388 * - locks are held when going to sleep.
389 */
390 #ifdef WITNESS_KDB
391 int witness_kdb = 1;
392 #else
393 int witness_kdb = 0;
394 #endif
395 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RWTUN, &witness_kdb, 0, "");
396 #endif /* KDB */
397
398 #if defined(DDB) || defined(KDB)
399 /*
400 * When DDB or KDB is enabled and witness_trace is 1, it will cause the system
401 * to print a stack trace:
402 * - a lock hierarchy violation occurs
403 * - locks are held when going to sleep.
404 */
405 int witness_trace = 1;
406 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RWTUN, &witness_trace, 0, "");
407 #endif /* DDB || KDB */
408
409 #ifdef WITNESS_SKIPSPIN
410 int witness_skipspin = 1;
411 #else
412 int witness_skipspin = 0;
413 #endif
414 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin, 0, "");
415
416 int badstack_sbuf_size;
417
418 int witness_count = WITNESS_COUNT;
419 SYSCTL_INT(_debug_witness, OID_AUTO, witness_count, CTLFLAG_RDTUN,
420 &witness_count, 0, "");
421
422 /*
423 * Output channel for witness messages. By default we print to the console.
424 */
425 enum witness_channel {
426 WITNESS_CONSOLE,
427 WITNESS_LOG,
428 WITNESS_NONE,
429 };
430
431 static enum witness_channel witness_channel = WITNESS_CONSOLE;
432 SYSCTL_PROC(_debug_witness, OID_AUTO, output_channel,
433 CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL, 0,
434 sysctl_debug_witness_channel, "A",
435 "Output channel for warnings");
436
437 /*
438 * Call this to print out the relations between locks.
439 */
440 SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph,
441 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
442 sysctl_debug_witness_fullgraph, "A",
443 "Show locks relation graphs");
444
445 /*
446 * Call this to print out the witness faulty stacks.
447 */
448 SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks,
449 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
450 sysctl_debug_witness_badstacks, "A",
451 "Show bad witness stacks");
452
453 static struct mtx w_mtx;
454
455 /* w_list */
456 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
457 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
458
459 /* w_typelist */
460 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
461 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
462
463 /* lock list */
464 static struct lock_list_entry *w_lock_list_free = NULL;
465 static struct witness_pendhelp pending_locks[WITNESS_PENDLIST];
466 static u_int pending_cnt;
467
468 static int w_free_cnt, w_spin_cnt, w_sleep_cnt;
469 SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
470 SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
471 SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
472 "");
473
474 static struct witness *w_data;
475 static uint8_t **w_rmatrix;
476 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
477 static struct witness_hash w_hash; /* The witness hash table. */
478
479 /* The lock order data hash */
480 static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT];
481 static struct witness_lock_order_data *w_lofree = NULL;
482 static struct witness_lock_order_hash w_lohash;
483 static int w_max_used_index = 0;
484 static unsigned int w_generation = 0;
485 static const char w_notrunning[] = "Witness not running\n";
486 static const char w_stillcold[] = "Witness is still cold\n";
487 #ifdef __i386__
488 static const char w_notallowed[] = "The sysctl is disabled on the arch\n";
489 #endif
490
491 static struct witness_order_list_entry order_lists[] = {
492 /*
493 * sx locks
494 */
495 { "proctree", &lock_class_sx },
496 { "allproc", &lock_class_sx },
497 { "allprison", &lock_class_sx },
498 { NULL, NULL },
499 /*
500 * Various mutexes
501 */
502 { "Giant", &lock_class_mtx_sleep },
503 { "pipe mutex", &lock_class_mtx_sleep },
504 { "sigio lock", &lock_class_mtx_sleep },
505 { "process group", &lock_class_mtx_sleep },
506 #ifdef HWPMC_HOOKS
507 { "pmc-sleep", &lock_class_mtx_sleep },
508 #endif
509 { "process lock", &lock_class_mtx_sleep },
510 { "session", &lock_class_mtx_sleep },
511 { "uidinfo hash", &lock_class_rw },
512 { "time lock", &lock_class_mtx_sleep },
513 { NULL, NULL },
514 /*
515 * umtx
516 */
517 { "umtx lock", &lock_class_mtx_sleep },
518 { NULL, NULL },
519 /*
520 * Sockets
521 */
522 { "accept", &lock_class_mtx_sleep },
523 { "so_snd", &lock_class_mtx_sleep },
524 { "so_rcv", &lock_class_mtx_sleep },
525 { "sellck", &lock_class_mtx_sleep },
526 { NULL, NULL },
527 /*
528 * Routing
529 */
530 { "so_rcv", &lock_class_mtx_sleep },
531 { "radix node head", &lock_class_rm },
532 { "ifaddr", &lock_class_mtx_sleep },
533 { NULL, NULL },
534 /*
535 * IPv4 multicast:
536 * protocol locks before interface locks, after UDP locks.
537 */
538 { "in_multi_sx", &lock_class_sx },
539 { "udpinp", &lock_class_rw },
540 { "in_multi_list_mtx", &lock_class_mtx_sleep },
541 { "igmp_mtx", &lock_class_mtx_sleep },
542 { "if_addr_lock", &lock_class_mtx_sleep },
543 { NULL, NULL },
544 /*
545 * IPv6 multicast:
546 * protocol locks before interface locks, after UDP locks.
547 */
548 { "in6_multi_sx", &lock_class_sx },
549 { "udpinp", &lock_class_rw },
550 { "in6_multi_list_mtx", &lock_class_mtx_sleep },
551 { "mld_mtx", &lock_class_mtx_sleep },
552 { "if_addr_lock", &lock_class_mtx_sleep },
553 { NULL, NULL },
554 /*
555 * UNIX Domain Sockets
556 */
557 { "unp_link_rwlock", &lock_class_rw },
558 { "unp_list_lock", &lock_class_mtx_sleep },
559 { "unp", &lock_class_mtx_sleep },
560 { "so_snd", &lock_class_mtx_sleep },
561 { NULL, NULL },
562 /*
563 * UDP/IP
564 */
565 { "udpinp", &lock_class_rw },
566 { "udp", &lock_class_mtx_sleep },
567 { "so_snd", &lock_class_mtx_sleep },
568 { NULL, NULL },
569 /*
570 * TCP/IP
571 */
572 { "tcpinp", &lock_class_rw },
573 { "tcp", &lock_class_mtx_sleep },
574 { "so_snd", &lock_class_mtx_sleep },
575 { NULL, NULL },
576 /*
577 * BPF
578 */
579 { "bpf global lock", &lock_class_sx },
580 { "bpf cdev lock", &lock_class_mtx_sleep },
581 { NULL, NULL },
582 /*
583 * NFS server
584 */
585 { "nfsd_mtx", &lock_class_mtx_sleep },
586 { "so_snd", &lock_class_mtx_sleep },
587 { NULL, NULL },
588
589 /*
590 * IEEE 802.11
591 */
592 { "802.11 com lock", &lock_class_mtx_sleep},
593 { NULL, NULL },
594 /*
595 * Network drivers
596 */
597 { "network driver", &lock_class_mtx_sleep},
598 { NULL, NULL },
599
600 /*
601 * Netgraph
602 */
603 { "ng_node", &lock_class_mtx_sleep },
604 { "ng_worklist", &lock_class_mtx_sleep },
605 { NULL, NULL },
606 /*
607 * CDEV
608 */
609 { "vm map (system)", &lock_class_mtx_sleep },
610 { "vnode interlock", &lock_class_mtx_sleep },
611 { "cdev", &lock_class_mtx_sleep },
612 { "devthrd", &lock_class_mtx_sleep },
613 { NULL, NULL },
614 /*
615 * VM
616 */
617 { "vm map (user)", &lock_class_sx },
618 { "vm object", &lock_class_rw },
619 { "vm page", &lock_class_mtx_sleep },
620 { "pmap pv global", &lock_class_rw },
621 { "pmap", &lock_class_mtx_sleep },
622 { "pmap pv list", &lock_class_rw },
623 { "vm page free queue", &lock_class_mtx_sleep },
624 { "vm pagequeue", &lock_class_mtx_sleep },
625 { NULL, NULL },
626 /*
627 * kqueue/VFS interaction
628 */
629 { "kqueue", &lock_class_mtx_sleep },
630 { "struct mount mtx", &lock_class_mtx_sleep },
631 { "vnode interlock", &lock_class_mtx_sleep },
632 { NULL, NULL },
633 /*
634 * VFS namecache
635 */
636 { "ncvn", &lock_class_mtx_sleep },
637 { "ncbuc", &lock_class_mtx_sleep },
638 { "vnode interlock", &lock_class_mtx_sleep },
639 { "ncneg", &lock_class_mtx_sleep },
640 { NULL, NULL },
641 /*
642 * ZFS locking
643 */
644 { "dn->dn_mtx", &lock_class_sx },
645 { "dr->dt.di.dr_mtx", &lock_class_sx },
646 { "db->db_mtx", &lock_class_sx },
647 { NULL, NULL },
648 /*
649 * TCP log locks
650 */
651 { "TCP ID tree", &lock_class_rw },
652 { "tcp log id bucket", &lock_class_mtx_sleep },
653 { "tcpinp", &lock_class_rw },
654 { "TCP log expireq", &lock_class_mtx_sleep },
655 { NULL, NULL },
656 /*
657 * spin locks
658 */
659 #ifdef SMP
660 { "ap boot", &lock_class_mtx_spin },
661 #endif
662 { "rm.mutex_mtx", &lock_class_mtx_spin },
663 #ifdef __i386__
664 { "cy", &lock_class_mtx_spin },
665 #endif
666 { "scc_hwmtx", &lock_class_mtx_spin },
667 { "uart_hwmtx", &lock_class_mtx_spin },
668 { "fast_taskqueue", &lock_class_mtx_spin },
669 { "intr table", &lock_class_mtx_spin },
670 { "process slock", &lock_class_mtx_spin },
671 { "syscons video lock", &lock_class_mtx_spin },
672 { "sleepq chain", &lock_class_mtx_spin },
673 { "rm_spinlock", &lock_class_mtx_spin },
674 { "turnstile chain", &lock_class_mtx_spin },
675 { "turnstile lock", &lock_class_mtx_spin },
676 { "sched lock", &lock_class_mtx_spin },
677 { "td_contested", &lock_class_mtx_spin },
678 { "callout", &lock_class_mtx_spin },
679 { "entropy harvest mutex", &lock_class_mtx_spin },
680 #ifdef SMP
681 { "smp rendezvous", &lock_class_mtx_spin },
682 #endif
683 #ifdef __powerpc__
684 { "tlb0", &lock_class_mtx_spin },
685 #endif
686 { NULL, NULL },
687 { "sched lock", &lock_class_mtx_spin },
688 #ifdef HWPMC_HOOKS
689 { "pmc-per-proc", &lock_class_mtx_spin },
690 #endif
691 { NULL, NULL },
692 /*
693 * leaf locks
694 */
695 { "intrcnt", &lock_class_mtx_spin },
696 { "icu", &lock_class_mtx_spin },
697 #ifdef __i386__
698 { "allpmaps", &lock_class_mtx_spin },
699 { "descriptor tables", &lock_class_mtx_spin },
700 #endif
701 { "clk", &lock_class_mtx_spin },
702 { "cpuset", &lock_class_mtx_spin },
703 { "mprof lock", &lock_class_mtx_spin },
704 { "zombie lock", &lock_class_mtx_spin },
705 { "ALD Queue", &lock_class_mtx_spin },
706 #if defined(__i386__) || defined(__amd64__)
707 { "pcicfg", &lock_class_mtx_spin },
708 { "NDIS thread lock", &lock_class_mtx_spin },
709 #endif
710 { "tw_osl_io_lock", &lock_class_mtx_spin },
711 { "tw_osl_q_lock", &lock_class_mtx_spin },
712 { "tw_cl_io_lock", &lock_class_mtx_spin },
713 { "tw_cl_intr_lock", &lock_class_mtx_spin },
714 { "tw_cl_gen_lock", &lock_class_mtx_spin },
715 #ifdef HWPMC_HOOKS
716 { "pmc-leaf", &lock_class_mtx_spin },
717 #endif
718 { "blocked lock", &lock_class_mtx_spin },
719 { NULL, NULL },
720 { NULL, NULL }
721 };
722
723 /*
724 * Pairs of locks which have been blessed. Witness does not complain about
725 * order problems with blessed lock pairs. Please do not add an entry to the
726 * table without an explanatory comment.
727 */
728 static struct witness_blessed blessed_list[] = {
729 /*
730 * See the comment in ufs_dirhash.c. Basically, a vnode lock serializes
731 * both lock orders, so a deadlock cannot happen as a result of this
732 * LOR.
733 */
734 { "dirhash", "bufwait" },
735
736 /*
737 * A UFS vnode may be locked in vget() while a buffer belonging to the
738 * parent directory vnode is locked.
739 */
740 { "ufs", "bufwait" },
741
742 /*
743 * The tarfs decompression stream vnode may be locked while a
744 * buffer belonging to a tarfs data vnode is locked.
745 */
746 { "tarfs", "bufwait" },
747 };
748
749 /*
750 * This global is set to 0 once it becomes safe to use the witness code.
751 */
752 static int witness_cold = 1;
753
754 /*
755 * This global is set to 1 once the static lock orders have been enrolled
756 * so that a warning can be issued for any spin locks enrolled later.
757 */
758 static int witness_spin_warn = 0;
759
760 /* Trim useless garbage from filenames. */
761 static const char *
fixup_filename(const char * file)762 fixup_filename(const char *file)
763 {
764
765 if (file == NULL)
766 return (NULL);
767 while (strncmp(file, "../", 3) == 0)
768 file += 3;
769 return (file);
770 }
771
772 /*
773 * Calculate the size of early witness structures.
774 */
775 int
witness_startup_count(void)776 witness_startup_count(void)
777 {
778 int sz;
779
780 sz = sizeof(struct witness) * witness_count;
781 sz += sizeof(*w_rmatrix) * (witness_count + 1);
782 sz += sizeof(*w_rmatrix[0]) * (witness_count + 1) *
783 (witness_count + 1);
784
785 return (sz);
786 }
787
788 /*
789 * The WITNESS-enabled diagnostic code. Note that the witness code does
790 * assume that the early boot is single-threaded at least until after this
791 * routine is completed.
792 */
793 void
witness_startup(void * mem)794 witness_startup(void *mem)
795 {
796 struct lock_object *lock;
797 struct witness_order_list_entry *order;
798 struct witness *w, *w1;
799 uintptr_t p;
800 int i;
801
802 p = (uintptr_t)mem;
803 w_data = (void *)p;
804 p += sizeof(struct witness) * witness_count;
805
806 w_rmatrix = (void *)p;
807 p += sizeof(*w_rmatrix) * (witness_count + 1);
808
809 for (i = 0; i < witness_count + 1; i++) {
810 w_rmatrix[i] = (void *)p;
811 p += sizeof(*w_rmatrix[i]) * (witness_count + 1);
812 }
813 badstack_sbuf_size = witness_count * 256;
814
815 /*
816 * We have to release Giant before initializing its witness
817 * structure so that WITNESS doesn't get confused.
818 */
819 mtx_unlock(&Giant);
820 mtx_assert(&Giant, MA_NOTOWNED);
821
822 CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
823 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
824 MTX_NOWITNESS | MTX_NOPROFILE);
825 for (i = witness_count - 1; i >= 0; i--) {
826 w = &w_data[i];
827 memset(w, 0, sizeof(*w));
828 w_data[i].w_index = i; /* Witness index never changes. */
829 witness_free(w);
830 }
831 KASSERT(STAILQ_FIRST(&w_free)->w_index == 0,
832 ("%s: Invalid list of free witness objects", __func__));
833
834 /* Witness with index 0 is not used to aid in debugging. */
835 STAILQ_REMOVE_HEAD(&w_free, w_list);
836 w_free_cnt--;
837
838 for (i = 0; i < witness_count; i++) {
839 memset(w_rmatrix[i], 0, sizeof(*w_rmatrix[i]) *
840 (witness_count + 1));
841 }
842
843 for (i = 0; i < LOCK_CHILDCOUNT; i++)
844 witness_lock_list_free(&w_locklistdata[i]);
845 witness_init_hash_tables();
846
847 /* First add in all the specified order lists. */
848 for (order = order_lists; order->w_name != NULL; order++) {
849 w = enroll(order->w_name, order->w_class);
850 if (w == NULL)
851 continue;
852 w->w_file = "order list";
853 for (order++; order->w_name != NULL; order++) {
854 w1 = enroll(order->w_name, order->w_class);
855 if (w1 == NULL)
856 continue;
857 w1->w_file = "order list";
858 itismychild(w, w1);
859 w = w1;
860 }
861 }
862 witness_spin_warn = 1;
863
864 /* Iterate through all locks and add them to witness. */
865 for (i = 0; pending_locks[i].wh_lock != NULL; i++) {
866 lock = pending_locks[i].wh_lock;
867 KASSERT(lock->lo_flags & LO_WITNESS,
868 ("%s: lock %s is on pending list but not LO_WITNESS",
869 __func__, lock->lo_name));
870 lock->lo_witness = enroll(pending_locks[i].wh_type,
871 LOCK_CLASS(lock));
872 }
873
874 /* Mark the witness code as being ready for use. */
875 witness_cold = 0;
876
877 mtx_lock(&Giant);
878 }
879
880 void
witness_init(struct lock_object * lock,const char * type)881 witness_init(struct lock_object *lock, const char *type)
882 {
883 struct lock_class *class;
884
885 /* Various sanity checks. */
886 class = LOCK_CLASS(lock);
887 if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
888 (class->lc_flags & LC_RECURSABLE) == 0)
889 kassert_panic("%s: lock (%s) %s can not be recursable",
890 __func__, class->lc_name, lock->lo_name);
891 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
892 (class->lc_flags & LC_SLEEPABLE) == 0)
893 kassert_panic("%s: lock (%s) %s can not be sleepable",
894 __func__, class->lc_name, lock->lo_name);
895 if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
896 (class->lc_flags & LC_UPGRADABLE) == 0)
897 kassert_panic("%s: lock (%s) %s can not be upgradable",
898 __func__, class->lc_name, lock->lo_name);
899
900 /*
901 * If we shouldn't watch this lock, then just clear lo_witness.
902 * Otherwise, if witness_cold is set, then it is too early to
903 * enroll this lock, so defer it to witness_initialize() by adding
904 * it to the pending_locks list. If it is not too early, then enroll
905 * the lock now.
906 */
907 if (witness_watch < 1 || KERNEL_PANICKED() ||
908 (lock->lo_flags & LO_WITNESS) == 0)
909 lock->lo_witness = NULL;
910 else if (witness_cold) {
911 pending_locks[pending_cnt].wh_lock = lock;
912 pending_locks[pending_cnt++].wh_type = type;
913 if (pending_cnt > WITNESS_PENDLIST)
914 panic("%s: pending locks list is too small, "
915 "increase WITNESS_PENDLIST\n",
916 __func__);
917 } else
918 lock->lo_witness = enroll(type, class);
919 }
920
921 void
witness_destroy(struct lock_object * lock)922 witness_destroy(struct lock_object *lock)
923 {
924 struct lock_class *class;
925 struct witness *w;
926
927 class = LOCK_CLASS(lock);
928
929 if (witness_cold)
930 panic("lock (%s) %s destroyed while witness_cold",
931 class->lc_name, lock->lo_name);
932
933 /* XXX: need to verify that no one holds the lock */
934 if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL)
935 return;
936 w = lock->lo_witness;
937
938 mtx_lock_spin(&w_mtx);
939 MPASS(w->w_refcount > 0);
940 w->w_refcount--;
941
942 if (w->w_refcount == 0)
943 depart(w);
944 mtx_unlock_spin(&w_mtx);
945 }
946
947 #ifdef DDB
948 static void
witness_ddb_compute_levels(void)949 witness_ddb_compute_levels(void)
950 {
951 struct witness *w;
952
953 /*
954 * First clear all levels.
955 */
956 STAILQ_FOREACH(w, &w_all, w_list)
957 w->w_ddb_level = -1;
958
959 /*
960 * Look for locks with no parents and level all their descendants.
961 */
962 STAILQ_FOREACH(w, &w_all, w_list) {
963 /* If the witness has ancestors (is not a root), skip it. */
964 if (w->w_num_ancestors > 0)
965 continue;
966 witness_ddb_level_descendants(w, 0);
967 }
968 }
969
970 static void
witness_ddb_level_descendants(struct witness * w,int l)971 witness_ddb_level_descendants(struct witness *w, int l)
972 {
973 int i;
974
975 if (w->w_ddb_level >= l)
976 return;
977
978 w->w_ddb_level = l;
979 l++;
980
981 for (i = 1; i <= w_max_used_index; i++) {
982 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
983 witness_ddb_level_descendants(&w_data[i], l);
984 }
985 }
986
987 static void
witness_ddb_display_descendants(int (* prnt)(const char * fmt,...),struct witness * w,int indent)988 witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...),
989 struct witness *w, int indent)
990 {
991 int i;
992
993 for (i = 0; i < indent; i++)
994 prnt(" ");
995 prnt("%s (type: %s, depth: %d, active refs: %d)",
996 w->w_name, w->w_class->lc_name,
997 w->w_ddb_level, w->w_refcount);
998 if (w->w_displayed) {
999 prnt(" -- (already displayed)\n");
1000 return;
1001 }
1002 w->w_displayed = 1;
1003 if (w->w_file != NULL && w->w_line != 0)
1004 prnt(" -- last acquired @ %s:%d\n", fixup_filename(w->w_file),
1005 w->w_line);
1006 else
1007 prnt(" -- never acquired\n");
1008 indent++;
1009 WITNESS_INDEX_ASSERT(w->w_index);
1010 for (i = 1; i <= w_max_used_index; i++) {
1011 if (db_pager_quit)
1012 return;
1013 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
1014 witness_ddb_display_descendants(prnt, &w_data[i],
1015 indent);
1016 }
1017 }
1018
1019 static void
witness_ddb_display_list(int (* prnt)(const char * fmt,...),struct witness_list * list)1020 witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
1021 struct witness_list *list)
1022 {
1023 struct witness *w;
1024
1025 STAILQ_FOREACH(w, list, w_typelist) {
1026 if (w->w_file == NULL || w->w_ddb_level > 0)
1027 continue;
1028
1029 /* This lock has no anscestors - display its descendants. */
1030 witness_ddb_display_descendants(prnt, w, 0);
1031 if (db_pager_quit)
1032 return;
1033 }
1034 }
1035
1036 static void
witness_ddb_display(int (* prnt)(const char * fmt,...))1037 witness_ddb_display(int(*prnt)(const char *fmt, ...))
1038 {
1039 struct witness *w;
1040
1041 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1042 witness_ddb_compute_levels();
1043
1044 /* Clear all the displayed flags. */
1045 STAILQ_FOREACH(w, &w_all, w_list)
1046 w->w_displayed = 0;
1047
1048 /*
1049 * First, handle sleep locks which have been acquired at least
1050 * once.
1051 */
1052 prnt("Sleep locks:\n");
1053 witness_ddb_display_list(prnt, &w_sleep);
1054 if (db_pager_quit)
1055 return;
1056
1057 /*
1058 * Now do spin locks which have been acquired at least once.
1059 */
1060 prnt("\nSpin locks:\n");
1061 witness_ddb_display_list(prnt, &w_spin);
1062 if (db_pager_quit)
1063 return;
1064
1065 /*
1066 * Finally, any locks which have not been acquired yet.
1067 */
1068 prnt("\nLocks which were never acquired:\n");
1069 STAILQ_FOREACH(w, &w_all, w_list) {
1070 if (w->w_file != NULL || w->w_refcount == 0)
1071 continue;
1072 prnt("%s (type: %s, depth: %d)\n", w->w_name,
1073 w->w_class->lc_name, w->w_ddb_level);
1074 if (db_pager_quit)
1075 return;
1076 }
1077 }
1078 #endif /* DDB */
1079
1080 int
witness_defineorder(struct lock_object * lock1,struct lock_object * lock2)1081 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
1082 {
1083
1084 if (witness_watch == -1 || KERNEL_PANICKED())
1085 return (0);
1086
1087 /* Require locks that witness knows about. */
1088 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
1089 lock2->lo_witness == NULL)
1090 return (EINVAL);
1091
1092 mtx_assert(&w_mtx, MA_NOTOWNED);
1093 mtx_lock_spin(&w_mtx);
1094
1095 /*
1096 * If we already have either an explicit or implied lock order that
1097 * is the other way around, then return an error.
1098 */
1099 if (witness_watch &&
1100 isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
1101 mtx_unlock_spin(&w_mtx);
1102 return (EDOOFUS);
1103 }
1104
1105 /* Try to add the new order. */
1106 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1107 lock2->lo_witness->w_name, lock1->lo_witness->w_name);
1108 itismychild(lock1->lo_witness, lock2->lo_witness);
1109 mtx_unlock_spin(&w_mtx);
1110 return (0);
1111 }
1112
1113 void
witness_checkorder(struct lock_object * lock,int flags,const char * file,int line,struct lock_object * interlock)1114 witness_checkorder(struct lock_object *lock, int flags, const char *file,
1115 int line, struct lock_object *interlock)
1116 {
1117 struct lock_list_entry *lock_list, *lle;
1118 struct lock_instance *lock1, *lock2, *plock;
1119 struct lock_class *class, *iclass;
1120 struct witness *w, *w1;
1121 struct thread *td;
1122 int i, j;
1123
1124 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
1125 KERNEL_PANICKED())
1126 return;
1127
1128 w = lock->lo_witness;
1129 class = LOCK_CLASS(lock);
1130 td = curthread;
1131
1132 if (class->lc_flags & LC_SLEEPLOCK) {
1133 /*
1134 * Since spin locks include a critical section, this check
1135 * implicitly enforces a lock order of all sleep locks before
1136 * all spin locks.
1137 */
1138 if (td->td_critnest != 0 && !kdb_active)
1139 kassert_panic("acquiring blockable sleep lock with "
1140 "spinlock or critical section held (%s) %s @ %s:%d",
1141 class->lc_name, lock->lo_name,
1142 fixup_filename(file), line);
1143
1144 /*
1145 * If this is the first lock acquired then just return as
1146 * no order checking is needed.
1147 */
1148 lock_list = td->td_sleeplocks;
1149 if (lock_list == NULL || lock_list->ll_count == 0)
1150 return;
1151 } else {
1152 /*
1153 * If this is the first lock, just return as no order
1154 * checking is needed. Avoid problems with thread
1155 * migration pinning the thread while checking if
1156 * spinlocks are held. If at least one spinlock is held
1157 * the thread is in a safe path and it is allowed to
1158 * unpin it.
1159 */
1160 sched_pin();
1161 lock_list = PCPU_GET(spinlocks);
1162 if (lock_list == NULL || lock_list->ll_count == 0) {
1163 sched_unpin();
1164 return;
1165 }
1166 sched_unpin();
1167 }
1168
1169 /*
1170 * Check to see if we are recursing on a lock we already own. If
1171 * so, make sure that we don't mismatch exclusive and shared lock
1172 * acquires.
1173 */
1174 lock1 = find_instance(lock_list, lock);
1175 if (lock1 != NULL) {
1176 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
1177 (flags & LOP_EXCLUSIVE) == 0) {
1178 witness_output("shared lock of (%s) %s @ %s:%d\n",
1179 class->lc_name, lock->lo_name,
1180 fixup_filename(file), line);
1181 witness_output("while exclusively locked from %s:%d\n",
1182 fixup_filename(lock1->li_file), lock1->li_line);
1183 kassert_panic("excl->share");
1184 }
1185 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
1186 (flags & LOP_EXCLUSIVE) != 0) {
1187 witness_output("exclusive lock of (%s) %s @ %s:%d\n",
1188 class->lc_name, lock->lo_name,
1189 fixup_filename(file), line);
1190 witness_output("while share locked from %s:%d\n",
1191 fixup_filename(lock1->li_file), lock1->li_line);
1192 kassert_panic("share->excl");
1193 }
1194 return;
1195 }
1196
1197 /* Warn if the interlock is not locked exactly once. */
1198 if (interlock != NULL) {
1199 iclass = LOCK_CLASS(interlock);
1200 lock1 = find_instance(lock_list, interlock);
1201 if (lock1 == NULL)
1202 kassert_panic("interlock (%s) %s not locked @ %s:%d",
1203 iclass->lc_name, interlock->lo_name,
1204 fixup_filename(file), line);
1205 else if ((lock1->li_flags & LI_RECURSEMASK) != 0)
1206 kassert_panic("interlock (%s) %s recursed @ %s:%d",
1207 iclass->lc_name, interlock->lo_name,
1208 fixup_filename(file), line);
1209 }
1210
1211 /*
1212 * Find the previously acquired lock, but ignore interlocks.
1213 */
1214 plock = &lock_list->ll_children[lock_list->ll_count - 1];
1215 if (interlock != NULL && plock->li_lock == interlock) {
1216 if (lock_list->ll_count > 1)
1217 plock =
1218 &lock_list->ll_children[lock_list->ll_count - 2];
1219 else {
1220 lle = lock_list->ll_next;
1221
1222 /*
1223 * The interlock is the only lock we hold, so
1224 * simply return.
1225 */
1226 if (lle == NULL)
1227 return;
1228 plock = &lle->ll_children[lle->ll_count - 1];
1229 }
1230 }
1231
1232 /*
1233 * Try to perform most checks without a lock. If this succeeds we
1234 * can skip acquiring the lock and return success. Otherwise we redo
1235 * the check with the lock held to handle races with concurrent updates.
1236 */
1237 w1 = plock->li_lock->lo_witness;
1238 if (witness_lock_order_check(w1, w))
1239 return;
1240
1241 mtx_lock_spin(&w_mtx);
1242 if (witness_lock_order_check(w1, w)) {
1243 mtx_unlock_spin(&w_mtx);
1244 return;
1245 }
1246 witness_lock_order_add(w1, w);
1247
1248 /*
1249 * Check for duplicate locks of the same type. Note that we only
1250 * have to check for this on the last lock we just acquired. Any
1251 * other cases will be caught as lock order violations.
1252 */
1253 if (w1 == w) {
1254 i = w->w_index;
1255 if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) &&
1256 !(w_rmatrix[i][i] & WITNESS_REVERSAL)) {
1257 w_rmatrix[i][i] |= WITNESS_REVERSAL;
1258 w->w_reversed = 1;
1259 mtx_unlock_spin(&w_mtx);
1260 witness_output(
1261 "acquiring duplicate lock of same type: \"%s\"\n",
1262 w->w_name);
1263 witness_output(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
1264 fixup_filename(plock->li_file), plock->li_line);
1265 witness_output(" 2nd %s @ %s:%d\n", lock->lo_name,
1266 fixup_filename(file), line);
1267 witness_debugger(1, __func__);
1268 } else
1269 mtx_unlock_spin(&w_mtx);
1270 return;
1271 }
1272 mtx_assert(&w_mtx, MA_OWNED);
1273
1274 /*
1275 * If we know that the lock we are acquiring comes after
1276 * the lock we most recently acquired in the lock order tree,
1277 * then there is no need for any further checks.
1278 */
1279 if (isitmychild(w1, w))
1280 goto out;
1281
1282 for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
1283 for (i = lle->ll_count - 1; i >= 0; i--, j++) {
1284 struct stack pstack;
1285 bool pstackv, trace;
1286
1287 MPASS(j < LOCK_CHILDCOUNT * LOCK_NCHILDREN);
1288 lock1 = &lle->ll_children[i];
1289
1290 /*
1291 * Ignore the interlock.
1292 */
1293 if (interlock == lock1->li_lock)
1294 continue;
1295
1296 /*
1297 * If this lock doesn't undergo witness checking,
1298 * then skip it.
1299 */
1300 w1 = lock1->li_lock->lo_witness;
1301 if (w1 == NULL) {
1302 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
1303 ("lock missing witness structure"));
1304 continue;
1305 }
1306
1307 /*
1308 * If we are locking Giant and this is a sleepable
1309 * lock, then skip it.
1310 */
1311 if ((lock1->li_flags & LI_SLEEPABLE) != 0 &&
1312 lock == &Giant.lock_object)
1313 continue;
1314
1315 /*
1316 * If we are locking a sleepable lock and this lock
1317 * is Giant, then skip it.
1318 */
1319 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1320 (flags & LOP_NOSLEEP) == 0 &&
1321 lock1->li_lock == &Giant.lock_object)
1322 continue;
1323
1324 /*
1325 * If we are locking a sleepable lock and this lock
1326 * isn't sleepable, we want to treat it as a lock
1327 * order violation to enfore a general lock order of
1328 * sleepable locks before non-sleepable locks.
1329 */
1330 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1331 (flags & LOP_NOSLEEP) == 0 &&
1332 (lock1->li_flags & LI_SLEEPABLE) == 0)
1333 goto reversal;
1334
1335 /*
1336 * If we are locking Giant and this is a non-sleepable
1337 * lock, then treat it as a reversal.
1338 */
1339 if ((lock1->li_flags & LI_SLEEPABLE) == 0 &&
1340 lock == &Giant.lock_object)
1341 goto reversal;
1342
1343 /*
1344 * Check the lock order hierarchy for a reveresal.
1345 */
1346 if (!isitmydescendant(w, w1))
1347 continue;
1348 reversal:
1349
1350 /*
1351 * We have a lock order violation, check to see if it
1352 * is allowed or has already been yelled about.
1353 */
1354
1355 /* Bail if this violation is known */
1356 if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
1357 goto out;
1358
1359 /* Record this as a violation */
1360 w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL;
1361 w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL;
1362 w->w_reversed = w1->w_reversed = 1;
1363 witness_increment_graph_generation();
1364
1365 /*
1366 * If the lock order is blessed, bail before logging
1367 * anything. We don't look for other lock order
1368 * violations though, which may be a bug.
1369 */
1370 if (blessed(w, w1))
1371 goto out;
1372
1373 trace = atomic_load_int(&witness_trace);
1374 if (trace) {
1375 struct witness_lock_order_data *data;
1376
1377 pstackv = false;
1378 data = witness_lock_order_get(w, w1);
1379 if (data != NULL) {
1380 stack_copy(&data->wlod_stack,
1381 &pstack);
1382 pstackv = true;
1383 }
1384 }
1385 mtx_unlock_spin(&w_mtx);
1386
1387 #ifdef WITNESS_NO_VNODE
1388 /*
1389 * There are known LORs between VNODE locks. They are
1390 * not an indication of a bug. VNODE locks are flagged
1391 * as such (LO_IS_VNODE) and we don't yell if the LOR
1392 * is between 2 VNODE locks.
1393 */
1394 if ((lock->lo_flags & LO_IS_VNODE) != 0 &&
1395 (lock1->li_lock->lo_flags & LO_IS_VNODE) != 0)
1396 return;
1397 #endif
1398
1399 /*
1400 * Ok, yell about it.
1401 */
1402 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1403 (flags & LOP_NOSLEEP) == 0 &&
1404 (lock1->li_flags & LI_SLEEPABLE) == 0)
1405 witness_output(
1406 "lock order reversal: (sleepable after non-sleepable)\n");
1407 else if ((lock1->li_flags & LI_SLEEPABLE) == 0
1408 && lock == &Giant.lock_object)
1409 witness_output(
1410 "lock order reversal: (Giant after non-sleepable)\n");
1411 else
1412 witness_output("lock order reversal:\n");
1413
1414 /*
1415 * Try to locate an earlier lock with
1416 * witness w in our list.
1417 */
1418 do {
1419 lock2 = &lle->ll_children[i];
1420 MPASS(lock2->li_lock != NULL);
1421 if (lock2->li_lock->lo_witness == w)
1422 break;
1423 if (i == 0 && lle->ll_next != NULL) {
1424 lle = lle->ll_next;
1425 i = lle->ll_count - 1;
1426 MPASS(i >= 0 && i < LOCK_NCHILDREN);
1427 } else
1428 i--;
1429 } while (i >= 0);
1430 if (i < 0) {
1431 witness_output(" 1st %p %s (%s, %s) @ %s:%d\n",
1432 lock1->li_lock, lock1->li_lock->lo_name,
1433 w1->w_name, w1->w_class->lc_name,
1434 fixup_filename(lock1->li_file),
1435 lock1->li_line);
1436 witness_output(" 2nd %p %s (%s, %s) @ %s:%d\n",
1437 lock, lock->lo_name, w->w_name,
1438 w->w_class->lc_name, fixup_filename(file),
1439 line);
1440 } else {
1441 struct witness *w2 = lock2->li_lock->lo_witness;
1442
1443 witness_output(" 1st %p %s (%s, %s) @ %s:%d\n",
1444 lock2->li_lock, lock2->li_lock->lo_name,
1445 w2->w_name, w2->w_class->lc_name,
1446 fixup_filename(lock2->li_file),
1447 lock2->li_line);
1448 witness_output(" 2nd %p %s (%s, %s) @ %s:%d\n",
1449 lock1->li_lock, lock1->li_lock->lo_name,
1450 w1->w_name, w1->w_class->lc_name,
1451 fixup_filename(lock1->li_file),
1452 lock1->li_line);
1453 witness_output(" 3rd %p %s (%s, %s) @ %s:%d\n", lock,
1454 lock->lo_name, w->w_name,
1455 w->w_class->lc_name, fixup_filename(file),
1456 line);
1457 }
1458 if (trace) {
1459 char buf[64];
1460 struct sbuf sb;
1461
1462 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
1463 sbuf_set_drain(&sb, witness_output_drain,
1464 NULL);
1465
1466 if (pstackv) {
1467 sbuf_printf(&sb,
1468 "lock order %s -> %s established at:\n",
1469 w->w_name, w1->w_name);
1470 stack_sbuf_print_flags(&sb, &pstack,
1471 M_NOWAIT, STACK_SBUF_FMT_LONG);
1472 }
1473
1474 sbuf_printf(&sb,
1475 "lock order %s -> %s attempted at:\n",
1476 w1->w_name, w->w_name);
1477 stack_save(&pstack);
1478 stack_sbuf_print_flags(&sb, &pstack, M_NOWAIT,
1479 STACK_SBUF_FMT_LONG);
1480
1481 sbuf_finish(&sb);
1482 sbuf_delete(&sb);
1483 }
1484 witness_enter_debugger(__func__);
1485 return;
1486 }
1487 }
1488
1489 /*
1490 * If requested, build a new lock order. However, don't build a new
1491 * relationship between a sleepable lock and Giant if it is in the
1492 * wrong direction. The correct lock order is that sleepable locks
1493 * always come before Giant.
1494 */
1495 if (flags & LOP_NEWORDER &&
1496 !(plock->li_lock == &Giant.lock_object &&
1497 (lock->lo_flags & LO_SLEEPABLE) != 0 &&
1498 (flags & LOP_NOSLEEP) == 0)) {
1499 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1500 w->w_name, plock->li_lock->lo_witness->w_name);
1501 itismychild(plock->li_lock->lo_witness, w);
1502 }
1503 out:
1504 mtx_unlock_spin(&w_mtx);
1505 }
1506
1507 void
witness_lock(struct lock_object * lock,int flags,const char * file,int line)1508 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1509 {
1510 struct lock_list_entry **lock_list, *lle;
1511 struct lock_instance *instance;
1512 struct witness *w;
1513 struct thread *td;
1514
1515 if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
1516 KERNEL_PANICKED())
1517 return;
1518 w = lock->lo_witness;
1519 td = curthread;
1520
1521 /* Determine lock list for this lock. */
1522 if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1523 lock_list = &td->td_sleeplocks;
1524 else
1525 lock_list = PCPU_PTR(spinlocks);
1526
1527 /* Check to see if we are recursing on a lock we already own. */
1528 instance = find_instance(*lock_list, lock);
1529 if (instance != NULL) {
1530 instance->li_flags++;
1531 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1532 td->td_proc->p_pid, lock->lo_name,
1533 instance->li_flags & LI_RECURSEMASK);
1534 instance->li_file = file;
1535 instance->li_line = line;
1536 return;
1537 }
1538
1539 /* Update per-witness last file and line acquire. */
1540 w->w_file = file;
1541 w->w_line = line;
1542
1543 /* Find the next open lock instance in the list and fill it. */
1544 lle = *lock_list;
1545 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1546 lle = witness_lock_list_get();
1547 if (lle == NULL)
1548 return;
1549 lle->ll_next = *lock_list;
1550 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1551 td->td_proc->p_pid, lle);
1552 *lock_list = lle;
1553 }
1554 instance = &lle->ll_children[lle->ll_count++];
1555 instance->li_lock = lock;
1556 instance->li_line = line;
1557 instance->li_file = file;
1558 instance->li_flags = 0;
1559 if ((flags & LOP_EXCLUSIVE) != 0)
1560 instance->li_flags |= LI_EXCLUSIVE;
1561 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && (flags & LOP_NOSLEEP) == 0)
1562 instance->li_flags |= LI_SLEEPABLE;
1563 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1564 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1565 }
1566
1567 void
witness_upgrade(struct lock_object * lock,int flags,const char * file,int line)1568 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1569 {
1570 struct lock_instance *instance;
1571 struct lock_class *class;
1572
1573 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1574 if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED())
1575 return;
1576 class = LOCK_CLASS(lock);
1577 if (witness_watch) {
1578 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1579 kassert_panic(
1580 "upgrade of non-upgradable lock (%s) %s @ %s:%d",
1581 class->lc_name, lock->lo_name,
1582 fixup_filename(file), line);
1583 if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1584 kassert_panic(
1585 "upgrade of non-sleep lock (%s) %s @ %s:%d",
1586 class->lc_name, lock->lo_name,
1587 fixup_filename(file), line);
1588 }
1589 instance = find_instance(curthread->td_sleeplocks, lock);
1590 if (instance == NULL) {
1591 kassert_panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1592 class->lc_name, lock->lo_name,
1593 fixup_filename(file), line);
1594 return;
1595 }
1596 if (witness_watch) {
1597 if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1598 kassert_panic(
1599 "upgrade of exclusive lock (%s) %s @ %s:%d",
1600 class->lc_name, lock->lo_name,
1601 fixup_filename(file), line);
1602 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1603 kassert_panic(
1604 "upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1605 class->lc_name, lock->lo_name,
1606 instance->li_flags & LI_RECURSEMASK,
1607 fixup_filename(file), line);
1608 }
1609 instance->li_flags |= LI_EXCLUSIVE;
1610 }
1611
1612 void
witness_downgrade(struct lock_object * lock,int flags,const char * file,int line)1613 witness_downgrade(struct lock_object *lock, int flags, const char *file,
1614 int line)
1615 {
1616 struct lock_instance *instance;
1617 struct lock_class *class;
1618
1619 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1620 if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED())
1621 return;
1622 class = LOCK_CLASS(lock);
1623 if (witness_watch) {
1624 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1625 kassert_panic(
1626 "downgrade of non-upgradable lock (%s) %s @ %s:%d",
1627 class->lc_name, lock->lo_name,
1628 fixup_filename(file), line);
1629 if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1630 kassert_panic(
1631 "downgrade of non-sleep lock (%s) %s @ %s:%d",
1632 class->lc_name, lock->lo_name,
1633 fixup_filename(file), line);
1634 }
1635 instance = find_instance(curthread->td_sleeplocks, lock);
1636 if (instance == NULL) {
1637 kassert_panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1638 class->lc_name, lock->lo_name,
1639 fixup_filename(file), line);
1640 return;
1641 }
1642 if (witness_watch) {
1643 if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1644 kassert_panic(
1645 "downgrade of shared lock (%s) %s @ %s:%d",
1646 class->lc_name, lock->lo_name,
1647 fixup_filename(file), line);
1648 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1649 kassert_panic(
1650 "downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1651 class->lc_name, lock->lo_name,
1652 instance->li_flags & LI_RECURSEMASK,
1653 fixup_filename(file), line);
1654 }
1655 instance->li_flags &= ~LI_EXCLUSIVE;
1656 }
1657
1658 void
witness_unlock(struct lock_object * lock,int flags,const char * file,int line)1659 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1660 {
1661 struct lock_list_entry **lock_list, *lle;
1662 struct lock_instance *instance;
1663 struct lock_class *class;
1664 struct thread *td;
1665 register_t s;
1666 int i, j;
1667
1668 if (witness_cold || lock->lo_witness == NULL || KERNEL_PANICKED())
1669 return;
1670 td = curthread;
1671 class = LOCK_CLASS(lock);
1672
1673 /* Find lock instance associated with this lock. */
1674 if (class->lc_flags & LC_SLEEPLOCK)
1675 lock_list = &td->td_sleeplocks;
1676 else
1677 lock_list = PCPU_PTR(spinlocks);
1678 lle = *lock_list;
1679 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1680 for (i = 0; i < (*lock_list)->ll_count; i++) {
1681 instance = &(*lock_list)->ll_children[i];
1682 if (instance->li_lock == lock)
1683 goto found;
1684 }
1685
1686 /*
1687 * When disabling WITNESS through witness_watch we could end up in
1688 * having registered locks in the td_sleeplocks queue.
1689 * We have to make sure we flush these queues, so just search for
1690 * eventual register locks and remove them.
1691 */
1692 if (witness_watch > 0) {
1693 kassert_panic("lock (%s) %s not locked @ %s:%d", class->lc_name,
1694 lock->lo_name, fixup_filename(file), line);
1695 return;
1696 } else {
1697 return;
1698 }
1699 found:
1700
1701 /* First, check for shared/exclusive mismatches. */
1702 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 &&
1703 (flags & LOP_EXCLUSIVE) == 0) {
1704 witness_output("shared unlock of (%s) %s @ %s:%d\n",
1705 class->lc_name, lock->lo_name, fixup_filename(file), line);
1706 witness_output("while exclusively locked from %s:%d\n",
1707 fixup_filename(instance->li_file), instance->li_line);
1708 kassert_panic("excl->ushare");
1709 }
1710 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 &&
1711 (flags & LOP_EXCLUSIVE) != 0) {
1712 witness_output("exclusive unlock of (%s) %s @ %s:%d\n",
1713 class->lc_name, lock->lo_name, fixup_filename(file), line);
1714 witness_output("while share locked from %s:%d\n",
1715 fixup_filename(instance->li_file),
1716 instance->li_line);
1717 kassert_panic("share->uexcl");
1718 }
1719 /* If we are recursed, unrecurse. */
1720 if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1721 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1722 td->td_proc->p_pid, instance->li_lock->lo_name,
1723 instance->li_flags);
1724 instance->li_flags--;
1725 return;
1726 }
1727 /* The lock is now being dropped, check for NORELEASE flag */
1728 if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) {
1729 witness_output("forbidden unlock of (%s) %s @ %s:%d\n",
1730 class->lc_name, lock->lo_name, fixup_filename(file), line);
1731 kassert_panic("lock marked norelease");
1732 }
1733
1734 /* Otherwise, remove this item from the list. */
1735 s = intr_disable();
1736 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1737 td->td_proc->p_pid, instance->li_lock->lo_name,
1738 (*lock_list)->ll_count - 1);
1739 for (j = i; j < (*lock_list)->ll_count - 1; j++)
1740 (*lock_list)->ll_children[j] =
1741 (*lock_list)->ll_children[j + 1];
1742 (*lock_list)->ll_count--;
1743 intr_restore(s);
1744
1745 /*
1746 * In order to reduce contention on w_mtx, we want to keep always an
1747 * head object into lists so that frequent allocation from the
1748 * free witness pool (and subsequent locking) is avoided.
1749 * In order to maintain the current code simple, when the head
1750 * object is totally unloaded it means also that we do not have
1751 * further objects in the list, so the list ownership needs to be
1752 * hand over to another object if the current head needs to be freed.
1753 */
1754 if ((*lock_list)->ll_count == 0) {
1755 if (*lock_list == lle) {
1756 if (lle->ll_next == NULL)
1757 return;
1758 } else
1759 lle = *lock_list;
1760 *lock_list = lle->ll_next;
1761 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1762 td->td_proc->p_pid, lle);
1763 witness_lock_list_free(lle);
1764 }
1765 }
1766
1767 void
witness_thread_exit(struct thread * td)1768 witness_thread_exit(struct thread *td)
1769 {
1770 struct lock_list_entry *lle;
1771 int i, n;
1772
1773 lle = td->td_sleeplocks;
1774 if (lle == NULL || KERNEL_PANICKED())
1775 return;
1776 if (lle->ll_count != 0) {
1777 for (n = 0; lle != NULL; lle = lle->ll_next)
1778 for (i = lle->ll_count - 1; i >= 0; i--) {
1779 if (n == 0)
1780 witness_output(
1781 "Thread %p exiting with the following locks held:\n", td);
1782 n++;
1783 witness_list_lock(&lle->ll_children[i],
1784 witness_output);
1785
1786 }
1787 kassert_panic(
1788 "Thread %p cannot exit while holding sleeplocks\n", td);
1789 }
1790 witness_lock_list_free(lle);
1791 }
1792
1793 /*
1794 * Warn if any locks other than 'lock' are held. Flags can be passed in to
1795 * exempt Giant and sleepable locks from the checks as well. If any
1796 * non-exempt locks are held, then a supplied message is printed to the
1797 * output channel along with a list of the offending locks. If indicated in the
1798 * flags then a failure results in a panic as well.
1799 */
1800 int
witness_warn(int flags,struct lock_object * lock,const char * fmt,...)1801 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1802 {
1803 struct lock_list_entry *lock_list, *lle;
1804 struct lock_instance *lock1;
1805 struct thread *td;
1806 va_list ap;
1807 int i, n;
1808
1809 if (witness_cold || witness_watch < 1 || KERNEL_PANICKED())
1810 return (0);
1811 n = 0;
1812 td = curthread;
1813 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1814 for (i = lle->ll_count - 1; i >= 0; i--) {
1815 lock1 = &lle->ll_children[i];
1816 if (lock1->li_lock == lock)
1817 continue;
1818 if (flags & WARN_GIANTOK &&
1819 lock1->li_lock == &Giant.lock_object)
1820 continue;
1821 if (flags & WARN_SLEEPOK &&
1822 (lock1->li_flags & LI_SLEEPABLE) != 0)
1823 continue;
1824 if (n == 0) {
1825 va_start(ap, fmt);
1826 vprintf(fmt, ap);
1827 va_end(ap);
1828 printf(" with the following %slocks held:\n",
1829 (flags & WARN_SLEEPOK) != 0 ?
1830 "non-sleepable " : "");
1831 }
1832 n++;
1833 witness_list_lock(lock1, printf);
1834 }
1835
1836 /*
1837 * Pin the thread in order to avoid problems with thread migration.
1838 * Once that all verifies are passed about spinlocks ownership,
1839 * the thread is in a safe path and it can be unpinned.
1840 */
1841 sched_pin();
1842 lock_list = PCPU_GET(spinlocks);
1843 if (lock_list != NULL && lock_list->ll_count != 0) {
1844 sched_unpin();
1845
1846 /*
1847 * We should only have one spinlock and as long as
1848 * the flags cannot match for this locks class,
1849 * check if the first spinlock is the one curthread
1850 * should hold.
1851 */
1852 lock1 = &lock_list->ll_children[lock_list->ll_count - 1];
1853 if (lock_list->ll_count == 1 && lock_list->ll_next == NULL &&
1854 lock1->li_lock == lock && n == 0)
1855 return (0);
1856
1857 va_start(ap, fmt);
1858 vprintf(fmt, ap);
1859 va_end(ap);
1860 printf(" with the following %slocks held:\n",
1861 (flags & WARN_SLEEPOK) != 0 ? "non-sleepable " : "");
1862 n += witness_list_locks(&lock_list, printf);
1863 } else
1864 sched_unpin();
1865 if (flags & WARN_PANIC && n)
1866 kassert_panic("%s", __func__);
1867 else
1868 witness_debugger(n, __func__);
1869 return (n);
1870 }
1871
1872 const char *
witness_file(struct lock_object * lock)1873 witness_file(struct lock_object *lock)
1874 {
1875 struct witness *w;
1876
1877 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1878 return ("?");
1879 w = lock->lo_witness;
1880 return (w->w_file);
1881 }
1882
1883 int
witness_line(struct lock_object * lock)1884 witness_line(struct lock_object *lock)
1885 {
1886 struct witness *w;
1887
1888 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1889 return (0);
1890 w = lock->lo_witness;
1891 return (w->w_line);
1892 }
1893
1894 static struct witness *
enroll(const char * description,struct lock_class * lock_class)1895 enroll(const char *description, struct lock_class *lock_class)
1896 {
1897 struct witness *w;
1898
1899 MPASS(description != NULL);
1900
1901 if (witness_watch == -1 || KERNEL_PANICKED())
1902 return (NULL);
1903 if ((lock_class->lc_flags & LC_SPINLOCK)) {
1904 if (witness_skipspin)
1905 return (NULL);
1906 } else if ((lock_class->lc_flags & LC_SLEEPLOCK) == 0) {
1907 kassert_panic("lock class %s is not sleep or spin",
1908 lock_class->lc_name);
1909 return (NULL);
1910 }
1911
1912 mtx_lock_spin(&w_mtx);
1913 w = witness_hash_get(description);
1914 if (w)
1915 goto found;
1916 if ((w = witness_get()) == NULL)
1917 return (NULL);
1918 MPASS(strlen(description) < MAX_W_NAME);
1919 strcpy(w->w_name, description);
1920 w->w_class = lock_class;
1921 w->w_refcount = 1;
1922 STAILQ_INSERT_HEAD(&w_all, w, w_list);
1923 if (lock_class->lc_flags & LC_SPINLOCK) {
1924 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1925 w_spin_cnt++;
1926 } else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1927 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1928 w_sleep_cnt++;
1929 }
1930
1931 /* Insert new witness into the hash */
1932 witness_hash_put(w);
1933 witness_increment_graph_generation();
1934 mtx_unlock_spin(&w_mtx);
1935 return (w);
1936 found:
1937 w->w_refcount++;
1938 if (w->w_refcount == 1)
1939 w->w_class = lock_class;
1940 mtx_unlock_spin(&w_mtx);
1941 if (lock_class != w->w_class)
1942 kassert_panic(
1943 "lock (%s) %s does not match earlier (%s) lock",
1944 description, lock_class->lc_name,
1945 w->w_class->lc_name);
1946 return (w);
1947 }
1948
1949 static void
depart(struct witness * w)1950 depart(struct witness *w)
1951 {
1952
1953 MPASS(w->w_refcount == 0);
1954 if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1955 w_sleep_cnt--;
1956 } else {
1957 w_spin_cnt--;
1958 }
1959 /*
1960 * Set file to NULL as it may point into a loadable module.
1961 */
1962 w->w_file = NULL;
1963 w->w_line = 0;
1964 witness_increment_graph_generation();
1965 }
1966
1967 static void
adopt(struct witness * parent,struct witness * child)1968 adopt(struct witness *parent, struct witness *child)
1969 {
1970 int pi, ci, i, j;
1971
1972 if (witness_cold == 0)
1973 mtx_assert(&w_mtx, MA_OWNED);
1974
1975 /* If the relationship is already known, there's no work to be done. */
1976 if (isitmychild(parent, child))
1977 return;
1978
1979 /* When the structure of the graph changes, bump up the generation. */
1980 witness_increment_graph_generation();
1981
1982 /*
1983 * The hard part ... create the direct relationship, then propagate all
1984 * indirect relationships.
1985 */
1986 pi = parent->w_index;
1987 ci = child->w_index;
1988 WITNESS_INDEX_ASSERT(pi);
1989 WITNESS_INDEX_ASSERT(ci);
1990 MPASS(pi != ci);
1991 w_rmatrix[pi][ci] |= WITNESS_PARENT;
1992 w_rmatrix[ci][pi] |= WITNESS_CHILD;
1993
1994 /*
1995 * If parent was not already an ancestor of child,
1996 * then we increment the descendant and ancestor counters.
1997 */
1998 if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) {
1999 parent->w_num_descendants++;
2000 child->w_num_ancestors++;
2001 }
2002
2003 /*
2004 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
2005 * an ancestor of 'pi' during this loop.
2006 */
2007 for (i = 1; i <= w_max_used_index; i++) {
2008 if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
2009 (i != pi))
2010 continue;
2011
2012 /* Find each descendant of 'i' and mark it as a descendant. */
2013 for (j = 1; j <= w_max_used_index; j++) {
2014 /*
2015 * Skip children that are already marked as
2016 * descendants of 'i'.
2017 */
2018 if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK)
2019 continue;
2020
2021 /*
2022 * We are only interested in descendants of 'ci'. Note
2023 * that 'ci' itself is counted as a descendant of 'ci'.
2024 */
2025 if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
2026 (j != ci))
2027 continue;
2028 w_rmatrix[i][j] |= WITNESS_ANCESTOR;
2029 w_rmatrix[j][i] |= WITNESS_DESCENDANT;
2030 w_data[i].w_num_descendants++;
2031 w_data[j].w_num_ancestors++;
2032
2033 /*
2034 * Make sure we aren't marking a node as both an
2035 * ancestor and descendant. We should have caught
2036 * this as a lock order reversal earlier.
2037 */
2038 if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
2039 (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
2040 printf("witness rmatrix paradox! [%d][%d]=%d "
2041 "both ancestor and descendant\n",
2042 i, j, w_rmatrix[i][j]);
2043 kdb_backtrace();
2044 printf("Witness disabled.\n");
2045 witness_watch = -1;
2046 }
2047 if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) &&
2048 (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
2049 printf("witness rmatrix paradox! [%d][%d]=%d "
2050 "both ancestor and descendant\n",
2051 j, i, w_rmatrix[j][i]);
2052 kdb_backtrace();
2053 printf("Witness disabled.\n");
2054 witness_watch = -1;
2055 }
2056 }
2057 }
2058 }
2059
2060 static void
itismychild(struct witness * parent,struct witness * child)2061 itismychild(struct witness *parent, struct witness *child)
2062 {
2063 int unlocked;
2064
2065 MPASS(child != NULL && parent != NULL);
2066 if (witness_cold == 0)
2067 mtx_assert(&w_mtx, MA_OWNED);
2068
2069 if (!witness_lock_type_equal(parent, child)) {
2070 if (witness_cold == 0) {
2071 unlocked = 1;
2072 mtx_unlock_spin(&w_mtx);
2073 } else {
2074 unlocked = 0;
2075 }
2076 kassert_panic(
2077 "%s: parent \"%s\" (%s) and child \"%s\" (%s) are not "
2078 "the same lock type", __func__, parent->w_name,
2079 parent->w_class->lc_name, child->w_name,
2080 child->w_class->lc_name);
2081 if (unlocked)
2082 mtx_lock_spin(&w_mtx);
2083 }
2084 adopt(parent, child);
2085 }
2086
2087 /*
2088 * Generic code for the isitmy*() functions. The rmask parameter is the
2089 * expected relationship of w1 to w2.
2090 */
2091 static int
_isitmyx(struct witness * w1,struct witness * w2,int rmask,const char * fname)2092 _isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
2093 {
2094 unsigned char r1, r2;
2095 int i1, i2;
2096
2097 i1 = w1->w_index;
2098 i2 = w2->w_index;
2099 WITNESS_INDEX_ASSERT(i1);
2100 WITNESS_INDEX_ASSERT(i2);
2101 r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK;
2102 r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK;
2103
2104 /* The flags on one better be the inverse of the flags on the other */
2105 if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) ||
2106 (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) {
2107 /* Don't squawk if we're potentially racing with an update. */
2108 if (!mtx_owned(&w_mtx))
2109 return (0);
2110 printf("%s: rmatrix mismatch between %s (index %d) and %s "
2111 "(index %d): w_rmatrix[%d][%d] == %hhx but "
2112 "w_rmatrix[%d][%d] == %hhx\n",
2113 fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1,
2114 i2, i1, r2);
2115 kdb_backtrace();
2116 printf("Witness disabled.\n");
2117 witness_watch = -1;
2118 }
2119 return (r1 & rmask);
2120 }
2121
2122 /*
2123 * Checks if @child is a direct child of @parent.
2124 */
2125 static int
isitmychild(struct witness * parent,struct witness * child)2126 isitmychild(struct witness *parent, struct witness *child)
2127 {
2128
2129 return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
2130 }
2131
2132 /*
2133 * Checks if @descendant is a direct or inderect descendant of @ancestor.
2134 */
2135 static int
isitmydescendant(struct witness * ancestor,struct witness * descendant)2136 isitmydescendant(struct witness *ancestor, struct witness *descendant)
2137 {
2138
2139 return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
2140 __func__));
2141 }
2142
2143 static int
blessed(struct witness * w1,struct witness * w2)2144 blessed(struct witness *w1, struct witness *w2)
2145 {
2146 int i;
2147 struct witness_blessed *b;
2148
2149 for (i = 0; i < nitems(blessed_list); i++) {
2150 b = &blessed_list[i];
2151 if (strcmp(w1->w_name, b->b_lock1) == 0) {
2152 if (strcmp(w2->w_name, b->b_lock2) == 0)
2153 return (1);
2154 continue;
2155 }
2156 if (strcmp(w1->w_name, b->b_lock2) == 0)
2157 if (strcmp(w2->w_name, b->b_lock1) == 0)
2158 return (1);
2159 }
2160 return (0);
2161 }
2162
2163 static struct witness *
witness_get(void)2164 witness_get(void)
2165 {
2166 struct witness *w;
2167 int index;
2168
2169 if (witness_cold == 0)
2170 mtx_assert(&w_mtx, MA_OWNED);
2171
2172 if (witness_watch == -1) {
2173 mtx_unlock_spin(&w_mtx);
2174 return (NULL);
2175 }
2176 if (STAILQ_EMPTY(&w_free)) {
2177 witness_watch = -1;
2178 mtx_unlock_spin(&w_mtx);
2179 printf("WITNESS: unable to allocate a new witness object\n");
2180 return (NULL);
2181 }
2182 w = STAILQ_FIRST(&w_free);
2183 STAILQ_REMOVE_HEAD(&w_free, w_list);
2184 w_free_cnt--;
2185 index = w->w_index;
2186 MPASS(index > 0 && index == w_max_used_index+1 &&
2187 index < witness_count);
2188 bzero(w, sizeof(*w));
2189 w->w_index = index;
2190 if (index > w_max_used_index)
2191 w_max_used_index = index;
2192 return (w);
2193 }
2194
2195 static void
witness_free(struct witness * w)2196 witness_free(struct witness *w)
2197 {
2198
2199 STAILQ_INSERT_HEAD(&w_free, w, w_list);
2200 w_free_cnt++;
2201 }
2202
2203 static struct lock_list_entry *
witness_lock_list_get(void)2204 witness_lock_list_get(void)
2205 {
2206 struct lock_list_entry *lle;
2207
2208 if (witness_watch == -1)
2209 return (NULL);
2210 mtx_lock_spin(&w_mtx);
2211 lle = w_lock_list_free;
2212 if (lle == NULL) {
2213 witness_watch = -1;
2214 mtx_unlock_spin(&w_mtx);
2215 printf("%s: witness exhausted\n", __func__);
2216 return (NULL);
2217 }
2218 w_lock_list_free = lle->ll_next;
2219 mtx_unlock_spin(&w_mtx);
2220 bzero(lle, sizeof(*lle));
2221 return (lle);
2222 }
2223
2224 static void
witness_lock_list_free(struct lock_list_entry * lle)2225 witness_lock_list_free(struct lock_list_entry *lle)
2226 {
2227
2228 mtx_lock_spin(&w_mtx);
2229 lle->ll_next = w_lock_list_free;
2230 w_lock_list_free = lle;
2231 mtx_unlock_spin(&w_mtx);
2232 }
2233
2234 static struct lock_instance *
find_instance(struct lock_list_entry * list,const struct lock_object * lock)2235 find_instance(struct lock_list_entry *list, const struct lock_object *lock)
2236 {
2237 struct lock_list_entry *lle;
2238 struct lock_instance *instance;
2239 int i;
2240
2241 for (lle = list; lle != NULL; lle = lle->ll_next)
2242 for (i = lle->ll_count - 1; i >= 0; i--) {
2243 instance = &lle->ll_children[i];
2244 if (instance->li_lock == lock)
2245 return (instance);
2246 }
2247 return (NULL);
2248 }
2249
2250 static void
witness_list_lock(struct lock_instance * instance,int (* prnt)(const char * fmt,...))2251 witness_list_lock(struct lock_instance *instance,
2252 int (*prnt)(const char *fmt, ...))
2253 {
2254 struct lock_object *lock;
2255
2256 lock = instance->li_lock;
2257 prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2258 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2259 if (lock->lo_witness->w_name != lock->lo_name)
2260 prnt(" (%s)", lock->lo_witness->w_name);
2261 prnt(" r = %d (%p) locked @ %s:%d\n",
2262 instance->li_flags & LI_RECURSEMASK, lock,
2263 fixup_filename(instance->li_file), instance->li_line);
2264 }
2265
2266 static int
witness_output(const char * fmt,...)2267 witness_output(const char *fmt, ...)
2268 {
2269 va_list ap;
2270 int ret;
2271
2272 va_start(ap, fmt);
2273 ret = witness_voutput(fmt, ap);
2274 va_end(ap);
2275 return (ret);
2276 }
2277
2278 static int
witness_voutput(const char * fmt,va_list ap)2279 witness_voutput(const char *fmt, va_list ap)
2280 {
2281 int ret;
2282
2283 ret = 0;
2284 switch (witness_channel) {
2285 case WITNESS_CONSOLE:
2286 ret = vprintf(fmt, ap);
2287 break;
2288 case WITNESS_LOG:
2289 vlog(LOG_NOTICE, fmt, ap);
2290 break;
2291 case WITNESS_NONE:
2292 break;
2293 }
2294 return (ret);
2295 }
2296
2297 #ifdef DDB
2298 static int
witness_thread_has_locks(struct thread * td)2299 witness_thread_has_locks(struct thread *td)
2300 {
2301
2302 if (td->td_sleeplocks == NULL)
2303 return (0);
2304 return (td->td_sleeplocks->ll_count != 0);
2305 }
2306
2307 static int
witness_proc_has_locks(struct proc * p)2308 witness_proc_has_locks(struct proc *p)
2309 {
2310 struct thread *td;
2311
2312 FOREACH_THREAD_IN_PROC(p, td) {
2313 if (witness_thread_has_locks(td))
2314 return (1);
2315 }
2316 return (0);
2317 }
2318 #endif
2319
2320 int
witness_list_locks(struct lock_list_entry ** lock_list,int (* prnt)(const char * fmt,...))2321 witness_list_locks(struct lock_list_entry **lock_list,
2322 int (*prnt)(const char *fmt, ...))
2323 {
2324 struct lock_list_entry *lle;
2325 int i, nheld;
2326
2327 nheld = 0;
2328 for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2329 for (i = lle->ll_count - 1; i >= 0; i--) {
2330 witness_list_lock(&lle->ll_children[i], prnt);
2331 nheld++;
2332 }
2333 return (nheld);
2334 }
2335
2336 /*
2337 * This is a bit risky at best. We call this function when we have timed
2338 * out acquiring a spin lock, and we assume that the other CPU is stuck
2339 * with this lock held. So, we go groveling around in the other CPU's
2340 * per-cpu data to try to find the lock instance for this spin lock to
2341 * see when it was last acquired.
2342 */
2343 void
witness_display_spinlock(struct lock_object * lock,struct thread * owner,int (* prnt)(const char * fmt,...))2344 witness_display_spinlock(struct lock_object *lock, struct thread *owner,
2345 int (*prnt)(const char *fmt, ...))
2346 {
2347 struct lock_instance *instance;
2348 struct pcpu *pc;
2349
2350 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2351 return;
2352 pc = pcpu_find(owner->td_oncpu);
2353 instance = find_instance(pc->pc_spinlocks, lock);
2354 if (instance != NULL)
2355 witness_list_lock(instance, prnt);
2356 }
2357
2358 void
witness_save(struct lock_object * lock,const char ** filep,int * linep)2359 witness_save(struct lock_object *lock, const char **filep, int *linep)
2360 {
2361 struct lock_list_entry *lock_list;
2362 struct lock_instance *instance;
2363 struct lock_class *class;
2364
2365 /* Initialize for KMSAN's benefit. */
2366 *filep = NULL;
2367 *linep = 0;
2368
2369 /*
2370 * This function is used independently in locking code to deal with
2371 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2372 * is gone.
2373 */
2374 if (SCHEDULER_STOPPED())
2375 return;
2376 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2377 if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED())
2378 return;
2379 class = LOCK_CLASS(lock);
2380 if (class->lc_flags & LC_SLEEPLOCK)
2381 lock_list = curthread->td_sleeplocks;
2382 else {
2383 if (witness_skipspin)
2384 return;
2385 lock_list = PCPU_GET(spinlocks);
2386 }
2387 instance = find_instance(lock_list, lock);
2388 if (instance == NULL) {
2389 kassert_panic("%s: lock (%s) %s not locked", __func__,
2390 class->lc_name, lock->lo_name);
2391 return;
2392 }
2393 *filep = instance->li_file;
2394 *linep = instance->li_line;
2395 }
2396
2397 void
witness_restore(struct lock_object * lock,const char * file,int line)2398 witness_restore(struct lock_object *lock, const char *file, int line)
2399 {
2400 struct lock_list_entry *lock_list;
2401 struct lock_instance *instance;
2402 struct lock_class *class;
2403
2404 /*
2405 * This function is used independently in locking code to deal with
2406 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2407 * is gone.
2408 */
2409 if (SCHEDULER_STOPPED())
2410 return;
2411 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2412 if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED())
2413 return;
2414 class = LOCK_CLASS(lock);
2415 if (class->lc_flags & LC_SLEEPLOCK)
2416 lock_list = curthread->td_sleeplocks;
2417 else {
2418 if (witness_skipspin)
2419 return;
2420 lock_list = PCPU_GET(spinlocks);
2421 }
2422 instance = find_instance(lock_list, lock);
2423 if (instance == NULL)
2424 kassert_panic("%s: lock (%s) %s not locked", __func__,
2425 class->lc_name, lock->lo_name);
2426 lock->lo_witness->w_file = file;
2427 lock->lo_witness->w_line = line;
2428 if (instance == NULL)
2429 return;
2430 instance->li_file = file;
2431 instance->li_line = line;
2432 }
2433
2434 static bool
witness_find_instance(const struct lock_object * lock,struct lock_instance ** instance)2435 witness_find_instance(const struct lock_object *lock,
2436 struct lock_instance **instance)
2437 {
2438 #ifdef INVARIANT_SUPPORT
2439 struct lock_class *class;
2440
2441 if (lock->lo_witness == NULL || witness_watch < 1 || KERNEL_PANICKED())
2442 return (false);
2443 class = LOCK_CLASS(lock);
2444 if ((class->lc_flags & LC_SLEEPLOCK) != 0) {
2445 *instance = find_instance(curthread->td_sleeplocks, lock);
2446 return (true);
2447 } else if ((class->lc_flags & LC_SPINLOCK) != 0) {
2448 *instance = find_instance(PCPU_GET(spinlocks), lock);
2449 return (true);
2450 } else {
2451 kassert_panic("Lock (%s) %s is not sleep or spin!",
2452 class->lc_name, lock->lo_name);
2453 return (false);
2454 }
2455 #else
2456 return (false);
2457 #endif
2458 }
2459
2460 void
witness_assert(const struct lock_object * lock,int flags,const char * file,int line)2461 witness_assert(const struct lock_object *lock, int flags, const char *file,
2462 int line)
2463 {
2464 #ifdef INVARIANT_SUPPORT
2465 struct lock_instance *instance;
2466 struct lock_class *class;
2467
2468 if (!witness_find_instance(lock, &instance))
2469 return;
2470 class = LOCK_CLASS(lock);
2471 switch (flags) {
2472 case LA_UNLOCKED:
2473 if (instance != NULL)
2474 kassert_panic("Lock (%s) %s locked @ %s:%d.",
2475 class->lc_name, lock->lo_name,
2476 fixup_filename(file), line);
2477 break;
2478 case LA_LOCKED:
2479 case LA_LOCKED | LA_RECURSED:
2480 case LA_LOCKED | LA_NOTRECURSED:
2481 case LA_SLOCKED:
2482 case LA_SLOCKED | LA_RECURSED:
2483 case LA_SLOCKED | LA_NOTRECURSED:
2484 case LA_XLOCKED:
2485 case LA_XLOCKED | LA_RECURSED:
2486 case LA_XLOCKED | LA_NOTRECURSED:
2487 if (instance == NULL) {
2488 kassert_panic("Lock (%s) %s not locked @ %s:%d.",
2489 class->lc_name, lock->lo_name,
2490 fixup_filename(file), line);
2491 break;
2492 }
2493 if ((flags & LA_XLOCKED) != 0 &&
2494 (instance->li_flags & LI_EXCLUSIVE) == 0)
2495 kassert_panic(
2496 "Lock (%s) %s not exclusively locked @ %s:%d.",
2497 class->lc_name, lock->lo_name,
2498 fixup_filename(file), line);
2499 if ((flags & LA_SLOCKED) != 0 &&
2500 (instance->li_flags & LI_EXCLUSIVE) != 0)
2501 kassert_panic(
2502 "Lock (%s) %s exclusively locked @ %s:%d.",
2503 class->lc_name, lock->lo_name,
2504 fixup_filename(file), line);
2505 if ((flags & LA_RECURSED) != 0 &&
2506 (instance->li_flags & LI_RECURSEMASK) == 0)
2507 kassert_panic("Lock (%s) %s not recursed @ %s:%d.",
2508 class->lc_name, lock->lo_name,
2509 fixup_filename(file), line);
2510 if ((flags & LA_NOTRECURSED) != 0 &&
2511 (instance->li_flags & LI_RECURSEMASK) != 0)
2512 kassert_panic("Lock (%s) %s recursed @ %s:%d.",
2513 class->lc_name, lock->lo_name,
2514 fixup_filename(file), line);
2515 break;
2516 default:
2517 kassert_panic("Invalid lock assertion at %s:%d.",
2518 fixup_filename(file), line);
2519 }
2520 #endif /* INVARIANT_SUPPORT */
2521 }
2522
2523 /*
2524 * Checks the ownership of the lock by curthread, consulting the witness list.
2525 * Returns:
2526 * 0 if witness is disabled or did not work
2527 * -1 if not owned
2528 * 1 if owned
2529 */
2530 int
witness_is_owned(const struct lock_object * lock)2531 witness_is_owned(const struct lock_object *lock)
2532 {
2533 #ifdef INVARIANT_SUPPORT
2534 struct lock_instance *instance;
2535
2536 if (!witness_find_instance(lock, &instance))
2537 return (0);
2538 return (instance == NULL ? -1 : 1);
2539 #else
2540 return (0);
2541 #endif
2542 }
2543
2544 static void
witness_setflag(struct lock_object * lock,int flag,int set)2545 witness_setflag(struct lock_object *lock, int flag, int set)
2546 {
2547 struct lock_list_entry *lock_list;
2548 struct lock_instance *instance;
2549 struct lock_class *class;
2550
2551 if (lock->lo_witness == NULL || witness_watch == -1 || KERNEL_PANICKED())
2552 return;
2553 class = LOCK_CLASS(lock);
2554 if (class->lc_flags & LC_SLEEPLOCK)
2555 lock_list = curthread->td_sleeplocks;
2556 else {
2557 if (witness_skipspin)
2558 return;
2559 lock_list = PCPU_GET(spinlocks);
2560 }
2561 instance = find_instance(lock_list, lock);
2562 if (instance == NULL) {
2563 kassert_panic("%s: lock (%s) %s not locked", __func__,
2564 class->lc_name, lock->lo_name);
2565 return;
2566 }
2567
2568 if (set)
2569 instance->li_flags |= flag;
2570 else
2571 instance->li_flags &= ~flag;
2572 }
2573
2574 void
witness_norelease(struct lock_object * lock)2575 witness_norelease(struct lock_object *lock)
2576 {
2577
2578 witness_setflag(lock, LI_NORELEASE, 1);
2579 }
2580
2581 void
witness_releaseok(struct lock_object * lock)2582 witness_releaseok(struct lock_object *lock)
2583 {
2584
2585 witness_setflag(lock, LI_NORELEASE, 0);
2586 }
2587
2588 #ifdef DDB
2589 static void
witness_ddb_list(struct thread * td)2590 witness_ddb_list(struct thread *td)
2591 {
2592
2593 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2594 KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2595
2596 if (witness_watch < 1)
2597 return;
2598
2599 witness_list_locks(&td->td_sleeplocks, db_printf);
2600
2601 /*
2602 * We only handle spinlocks if td == curthread. This is somewhat broken
2603 * if td is currently executing on some other CPU and holds spin locks
2604 * as we won't display those locks. If we had a MI way of getting
2605 * the per-cpu data for a given cpu then we could use
2606 * td->td_oncpu to get the list of spinlocks for this thread
2607 * and "fix" this.
2608 *
2609 * That still wouldn't really fix this unless we locked the scheduler
2610 * lock or stopped the other CPU to make sure it wasn't changing the
2611 * list out from under us. It is probably best to just not try to
2612 * handle threads on other CPU's for now.
2613 */
2614 if (td == curthread && PCPU_GET(spinlocks) != NULL)
2615 witness_list_locks(PCPU_PTR(spinlocks), db_printf);
2616 }
2617
DB_SHOW_COMMAND(locks,db_witness_list)2618 DB_SHOW_COMMAND(locks, db_witness_list)
2619 {
2620 struct thread *td;
2621
2622 if (have_addr)
2623 td = db_lookup_thread(addr, true);
2624 else
2625 td = kdb_thread;
2626 witness_ddb_list(td);
2627 }
2628
DB_SHOW_ALL_COMMAND(locks,db_witness_list_all)2629 DB_SHOW_ALL_COMMAND(locks, db_witness_list_all)
2630 {
2631 struct thread *td;
2632 struct proc *p;
2633
2634 /*
2635 * It would be nice to list only threads and processes that actually
2636 * held sleep locks, but that information is currently not exported
2637 * by WITNESS.
2638 */
2639 FOREACH_PROC_IN_SYSTEM(p) {
2640 if (!witness_proc_has_locks(p))
2641 continue;
2642 FOREACH_THREAD_IN_PROC(p, td) {
2643 if (!witness_thread_has_locks(td))
2644 continue;
2645 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2646 p->p_comm, td, td->td_tid);
2647 witness_ddb_list(td);
2648 if (db_pager_quit)
2649 return;
2650 }
2651 }
2652 }
2653 DB_SHOW_ALIAS_FLAGS(alllocks, db_witness_list_all, DB_CMD_MEMSAFE);
2654
DB_SHOW_COMMAND_FLAGS(witness,db_witness_display,DB_CMD_MEMSAFE)2655 DB_SHOW_COMMAND_FLAGS(witness, db_witness_display, DB_CMD_MEMSAFE)
2656 {
2657
2658 witness_ddb_display(db_printf);
2659 }
2660 #endif
2661
2662 static void
sbuf_print_witness_badstacks(struct sbuf * sb,size_t * oldidx)2663 sbuf_print_witness_badstacks(struct sbuf *sb, size_t *oldidx)
2664 {
2665 struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
2666 struct witness *tmp_w1, *tmp_w2, *w1, *w2;
2667 int generation, i, j;
2668
2669 tmp_data1 = NULL;
2670 tmp_data2 = NULL;
2671 tmp_w1 = NULL;
2672 tmp_w2 = NULL;
2673
2674 /* Allocate and init temporary storage space. */
2675 tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2676 tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2677 tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2678 M_WAITOK | M_ZERO);
2679 tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2680 M_WAITOK | M_ZERO);
2681 stack_zero(&tmp_data1->wlod_stack);
2682 stack_zero(&tmp_data2->wlod_stack);
2683
2684 restart:
2685 mtx_lock_spin(&w_mtx);
2686 generation = w_generation;
2687 mtx_unlock_spin(&w_mtx);
2688 sbuf_printf(sb, "Number of known direct relationships is %d\n",
2689 w_lohash.wloh_count);
2690 for (i = 1; i < w_max_used_index; i++) {
2691 mtx_lock_spin(&w_mtx);
2692 if (generation != w_generation) {
2693 mtx_unlock_spin(&w_mtx);
2694
2695 /* The graph has changed, try again. */
2696 *oldidx = 0;
2697 sbuf_clear(sb);
2698 goto restart;
2699 }
2700
2701 w1 = &w_data[i];
2702 if (w1->w_reversed == 0) {
2703 mtx_unlock_spin(&w_mtx);
2704 continue;
2705 }
2706
2707 /* Copy w1 locally so we can release the spin lock. */
2708 *tmp_w1 = *w1;
2709 mtx_unlock_spin(&w_mtx);
2710
2711 if (tmp_w1->w_reversed == 0)
2712 continue;
2713 for (j = 1; j < w_max_used_index; j++) {
2714 if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j)
2715 continue;
2716
2717 mtx_lock_spin(&w_mtx);
2718 if (generation != w_generation) {
2719 mtx_unlock_spin(&w_mtx);
2720
2721 /* The graph has changed, try again. */
2722 *oldidx = 0;
2723 sbuf_clear(sb);
2724 goto restart;
2725 }
2726
2727 w2 = &w_data[j];
2728 data1 = witness_lock_order_get(w1, w2);
2729 data2 = witness_lock_order_get(w2, w1);
2730
2731 /*
2732 * Copy information locally so we can release the
2733 * spin lock.
2734 */
2735 *tmp_w2 = *w2;
2736
2737 if (data1) {
2738 stack_zero(&tmp_data1->wlod_stack);
2739 stack_copy(&data1->wlod_stack,
2740 &tmp_data1->wlod_stack);
2741 }
2742 if (data2 && data2 != data1) {
2743 stack_zero(&tmp_data2->wlod_stack);
2744 stack_copy(&data2->wlod_stack,
2745 &tmp_data2->wlod_stack);
2746 }
2747 mtx_unlock_spin(&w_mtx);
2748
2749 if (blessed(tmp_w1, tmp_w2))
2750 continue;
2751
2752 sbuf_printf(sb,
2753 "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
2754 tmp_w1->w_name, tmp_w1->w_class->lc_name,
2755 tmp_w2->w_name, tmp_w2->w_class->lc_name);
2756 if (data1) {
2757 sbuf_printf(sb,
2758 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2759 tmp_w1->w_name, tmp_w1->w_class->lc_name,
2760 tmp_w2->w_name, tmp_w2->w_class->lc_name);
2761 stack_sbuf_print(sb, &tmp_data1->wlod_stack);
2762 sbuf_putc(sb, '\n');
2763 }
2764 if (data2 && data2 != data1) {
2765 sbuf_printf(sb,
2766 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2767 tmp_w2->w_name, tmp_w2->w_class->lc_name,
2768 tmp_w1->w_name, tmp_w1->w_class->lc_name);
2769 stack_sbuf_print(sb, &tmp_data2->wlod_stack);
2770 sbuf_putc(sb, '\n');
2771 }
2772 }
2773 }
2774 mtx_lock_spin(&w_mtx);
2775 if (generation != w_generation) {
2776 mtx_unlock_spin(&w_mtx);
2777
2778 /*
2779 * The graph changed while we were printing stack data,
2780 * try again.
2781 */
2782 *oldidx = 0;
2783 sbuf_clear(sb);
2784 goto restart;
2785 }
2786 mtx_unlock_spin(&w_mtx);
2787
2788 /* Free temporary storage space. */
2789 free(tmp_data1, M_TEMP);
2790 free(tmp_data2, M_TEMP);
2791 free(tmp_w1, M_TEMP);
2792 free(tmp_w2, M_TEMP);
2793 }
2794
2795 static int
sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)2796 sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
2797 {
2798 struct sbuf *sb;
2799 int error;
2800
2801 if (witness_watch < 1) {
2802 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2803 return (error);
2804 }
2805 if (witness_cold) {
2806 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2807 return (error);
2808 }
2809 error = 0;
2810 sb = sbuf_new(NULL, NULL, badstack_sbuf_size, SBUF_AUTOEXTEND);
2811 if (sb == NULL)
2812 return (ENOMEM);
2813
2814 sbuf_print_witness_badstacks(sb, &req->oldidx);
2815
2816 sbuf_finish(sb);
2817 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2818 sbuf_delete(sb);
2819
2820 return (error);
2821 }
2822
2823 #ifdef DDB
2824 static int
sbuf_db_printf_drain(void * arg __unused,const char * data,int len)2825 sbuf_db_printf_drain(void *arg __unused, const char *data, int len)
2826 {
2827
2828 return (db_printf("%.*s", len, data));
2829 }
2830
DB_SHOW_COMMAND_FLAGS(badstacks,db_witness_badstacks,DB_CMD_MEMSAFE)2831 DB_SHOW_COMMAND_FLAGS(badstacks, db_witness_badstacks, DB_CMD_MEMSAFE)
2832 {
2833 struct sbuf sb;
2834 char buffer[128];
2835 size_t dummy;
2836
2837 sbuf_new(&sb, buffer, sizeof(buffer), SBUF_FIXEDLEN);
2838 sbuf_set_drain(&sb, sbuf_db_printf_drain, NULL);
2839 sbuf_print_witness_badstacks(&sb, &dummy);
2840 sbuf_finish(&sb);
2841 }
2842 #endif
2843
2844 static int
sysctl_debug_witness_channel(SYSCTL_HANDLER_ARGS)2845 sysctl_debug_witness_channel(SYSCTL_HANDLER_ARGS)
2846 {
2847 static const struct {
2848 enum witness_channel channel;
2849 const char *name;
2850 } channels[] = {
2851 { WITNESS_CONSOLE, "console" },
2852 { WITNESS_LOG, "log" },
2853 { WITNESS_NONE, "none" },
2854 };
2855 char buf[16];
2856 u_int i;
2857 int error;
2858
2859 buf[0] = '\0';
2860 for (i = 0; i < nitems(channels); i++)
2861 if (witness_channel == channels[i].channel) {
2862 snprintf(buf, sizeof(buf), "%s", channels[i].name);
2863 break;
2864 }
2865
2866 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
2867 if (error != 0 || req->newptr == NULL)
2868 return (error);
2869
2870 error = EINVAL;
2871 for (i = 0; i < nitems(channels); i++)
2872 if (strcmp(channels[i].name, buf) == 0) {
2873 witness_channel = channels[i].channel;
2874 error = 0;
2875 break;
2876 }
2877 return (error);
2878 }
2879
2880 static int
sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)2881 sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
2882 {
2883 struct witness *w;
2884 struct sbuf *sb;
2885 int error;
2886
2887 #ifdef __i386__
2888 error = SYSCTL_OUT(req, w_notallowed, sizeof(w_notallowed));
2889 return (error);
2890 #endif
2891
2892 if (witness_watch < 1) {
2893 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2894 return (error);
2895 }
2896 if (witness_cold) {
2897 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2898 return (error);
2899 }
2900 error = 0;
2901
2902 error = sysctl_wire_old_buffer(req, 0);
2903 if (error != 0)
2904 return (error);
2905 sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req);
2906 if (sb == NULL)
2907 return (ENOMEM);
2908 sbuf_putc(sb, '\n');
2909
2910 mtx_lock_spin(&w_mtx);
2911 STAILQ_FOREACH(w, &w_all, w_list)
2912 w->w_displayed = 0;
2913 STAILQ_FOREACH(w, &w_all, w_list)
2914 witness_add_fullgraph(sb, w);
2915 mtx_unlock_spin(&w_mtx);
2916
2917 /*
2918 * Close the sbuf and return to userland.
2919 */
2920 error = sbuf_finish(sb);
2921 sbuf_delete(sb);
2922
2923 return (error);
2924 }
2925
2926 static int
sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)2927 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
2928 {
2929 int error, value;
2930
2931 value = witness_watch;
2932 error = sysctl_handle_int(oidp, &value, 0, req);
2933 if (error != 0 || req->newptr == NULL)
2934 return (error);
2935 if (value > 1 || value < -1 ||
2936 (witness_watch == -1 && value != witness_watch))
2937 return (EINVAL);
2938 witness_watch = value;
2939 return (0);
2940 }
2941
2942 static void
witness_add_fullgraph(struct sbuf * sb,struct witness * w)2943 witness_add_fullgraph(struct sbuf *sb, struct witness *w)
2944 {
2945 int i;
2946
2947 if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0))
2948 return;
2949 w->w_displayed = 1;
2950
2951 WITNESS_INDEX_ASSERT(w->w_index);
2952 for (i = 1; i <= w_max_used_index; i++) {
2953 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) {
2954 sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name,
2955 w_data[i].w_name);
2956 witness_add_fullgraph(sb, &w_data[i]);
2957 }
2958 }
2959 }
2960
2961 /*
2962 * A simple hash function. Takes a key pointer and a key size. If size == 0,
2963 * interprets the key as a string and reads until the null
2964 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit
2965 * hash value computed from the key.
2966 */
2967 static uint32_t
witness_hash_djb2(const uint8_t * key,uint32_t size)2968 witness_hash_djb2(const uint8_t *key, uint32_t size)
2969 {
2970 unsigned int hash = 5381;
2971 int i;
2972
2973 /* hash = hash * 33 + key[i] */
2974 if (size)
2975 for (i = 0; i < size; i++)
2976 hash = ((hash << 5) + hash) + (unsigned int)key[i];
2977 else
2978 for (i = 0; key[i] != 0; i++)
2979 hash = ((hash << 5) + hash) + (unsigned int)key[i];
2980
2981 return (hash);
2982 }
2983
2984 /*
2985 * Initializes the two witness hash tables. Called exactly once from
2986 * witness_initialize().
2987 */
2988 static void
witness_init_hash_tables(void)2989 witness_init_hash_tables(void)
2990 {
2991 int i;
2992
2993 MPASS(witness_cold);
2994
2995 /* Initialize the hash tables. */
2996 for (i = 0; i < WITNESS_HASH_SIZE; i++)
2997 w_hash.wh_array[i] = NULL;
2998
2999 w_hash.wh_size = WITNESS_HASH_SIZE;
3000 w_hash.wh_count = 0;
3001
3002 /* Initialize the lock order data hash. */
3003 w_lofree = NULL;
3004 for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
3005 memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
3006 w_lodata[i].wlod_next = w_lofree;
3007 w_lofree = &w_lodata[i];
3008 }
3009 w_lohash.wloh_size = WITNESS_LO_HASH_SIZE;
3010 w_lohash.wloh_count = 0;
3011 for (i = 0; i < WITNESS_LO_HASH_SIZE; i++)
3012 w_lohash.wloh_array[i] = NULL;
3013 }
3014
3015 static struct witness *
witness_hash_get(const char * key)3016 witness_hash_get(const char *key)
3017 {
3018 struct witness *w;
3019 uint32_t hash;
3020
3021 MPASS(key != NULL);
3022 if (witness_cold == 0)
3023 mtx_assert(&w_mtx, MA_OWNED);
3024 hash = witness_hash_djb2(key, 0) % w_hash.wh_size;
3025 w = w_hash.wh_array[hash];
3026 while (w != NULL) {
3027 if (strcmp(w->w_name, key) == 0)
3028 goto out;
3029 w = w->w_hash_next;
3030 }
3031
3032 out:
3033 return (w);
3034 }
3035
3036 static void
witness_hash_put(struct witness * w)3037 witness_hash_put(struct witness *w)
3038 {
3039 uint32_t hash;
3040
3041 MPASS(w != NULL);
3042 MPASS(w->w_name != NULL);
3043 if (witness_cold == 0)
3044 mtx_assert(&w_mtx, MA_OWNED);
3045 KASSERT(witness_hash_get(w->w_name) == NULL,
3046 ("%s: trying to add a hash entry that already exists!", __func__));
3047 KASSERT(w->w_hash_next == NULL,
3048 ("%s: w->w_hash_next != NULL", __func__));
3049
3050 hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size;
3051 w->w_hash_next = w_hash.wh_array[hash];
3052 w_hash.wh_array[hash] = w;
3053 w_hash.wh_count++;
3054 }
3055
3056 static struct witness_lock_order_data *
witness_lock_order_get(struct witness * parent,struct witness * child)3057 witness_lock_order_get(struct witness *parent, struct witness *child)
3058 {
3059 struct witness_lock_order_data *data = NULL;
3060 struct witness_lock_order_key key;
3061 unsigned int hash;
3062
3063 MPASS(parent != NULL && child != NULL);
3064 key.from = parent->w_index;
3065 key.to = child->w_index;
3066 WITNESS_INDEX_ASSERT(key.from);
3067 WITNESS_INDEX_ASSERT(key.to);
3068 if ((w_rmatrix[parent->w_index][child->w_index]
3069 & WITNESS_LOCK_ORDER_KNOWN) == 0)
3070 goto out;
3071
3072 hash = witness_hash_djb2((const char*)&key,
3073 sizeof(key)) % w_lohash.wloh_size;
3074 data = w_lohash.wloh_array[hash];
3075 while (data != NULL) {
3076 if (witness_lock_order_key_equal(&data->wlod_key, &key))
3077 break;
3078 data = data->wlod_next;
3079 }
3080
3081 out:
3082 return (data);
3083 }
3084
3085 /*
3086 * Verify that parent and child have a known relationship, are not the same,
3087 * and child is actually a child of parent. This is done without w_mtx
3088 * to avoid contention in the common case.
3089 */
3090 static int
witness_lock_order_check(struct witness * parent,struct witness * child)3091 witness_lock_order_check(struct witness *parent, struct witness *child)
3092 {
3093
3094 if (parent != child &&
3095 w_rmatrix[parent->w_index][child->w_index]
3096 & WITNESS_LOCK_ORDER_KNOWN &&
3097 isitmychild(parent, child))
3098 return (1);
3099
3100 return (0);
3101 }
3102
3103 static int
witness_lock_order_add(struct witness * parent,struct witness * child)3104 witness_lock_order_add(struct witness *parent, struct witness *child)
3105 {
3106 struct witness_lock_order_data *data = NULL;
3107 struct witness_lock_order_key key;
3108 unsigned int hash;
3109
3110 MPASS(parent != NULL && child != NULL);
3111 key.from = parent->w_index;
3112 key.to = child->w_index;
3113 WITNESS_INDEX_ASSERT(key.from);
3114 WITNESS_INDEX_ASSERT(key.to);
3115 if (w_rmatrix[parent->w_index][child->w_index]
3116 & WITNESS_LOCK_ORDER_KNOWN)
3117 return (1);
3118
3119 hash = witness_hash_djb2((const char*)&key,
3120 sizeof(key)) % w_lohash.wloh_size;
3121 w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
3122 data = w_lofree;
3123 if (data == NULL)
3124 return (0);
3125 w_lofree = data->wlod_next;
3126 data->wlod_next = w_lohash.wloh_array[hash];
3127 data->wlod_key = key;
3128 w_lohash.wloh_array[hash] = data;
3129 w_lohash.wloh_count++;
3130 stack_save(&data->wlod_stack);
3131 return (1);
3132 }
3133
3134 /* Call this whenever the structure of the witness graph changes. */
3135 static void
witness_increment_graph_generation(void)3136 witness_increment_graph_generation(void)
3137 {
3138
3139 if (witness_cold == 0)
3140 mtx_assert(&w_mtx, MA_OWNED);
3141 w_generation++;
3142 }
3143
3144 static int
witness_output_drain(void * arg __unused,const char * data,int len)3145 witness_output_drain(void *arg __unused, const char *data, int len)
3146 {
3147
3148 witness_output("%.*s", len, data);
3149 return (len);
3150 }
3151
3152 static void
witness_debugger(int cond,const char * msg)3153 witness_debugger(int cond, const char *msg)
3154 {
3155 char buf[32];
3156 struct sbuf sb;
3157 struct stack st;
3158
3159 if (!cond)
3160 return;
3161
3162 if (witness_trace) {
3163 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
3164 sbuf_set_drain(&sb, witness_output_drain, NULL);
3165
3166 stack_save(&st);
3167 witness_output("stack backtrace:\n");
3168 stack_sbuf_print_ddb(&sb, &st);
3169
3170 sbuf_finish(&sb);
3171 }
3172
3173 witness_enter_debugger(msg);
3174 }
3175
3176 static void
witness_enter_debugger(const char * msg)3177 witness_enter_debugger(const char *msg)
3178 {
3179 #ifdef KDB
3180 if (witness_kdb)
3181 kdb_enter(KDB_WHY_WITNESS, msg);
3182 #endif
3183 }
3184