1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2008 Isilon Systems, Inc. 5 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com> 6 * Copyright (c) 1998 Berkeley Software Design, Inc. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Berkeley Software Design Inc's name may not be used to endorse or 18 * promote products derived from this software without specific prior 19 * written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 34 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 35 */ 36 37 /* 38 * Implementation of the `witness' lock verifier. Originally implemented for 39 * mutexes in BSD/OS. Extended to handle generic lock objects and lock 40 * classes in FreeBSD. 41 */ 42 43 /* 44 * Main Entry: witness 45 * Pronunciation: 'wit-n&s 46 * Function: noun 47 * Etymology: Middle English witnesse, from Old English witnes knowledge, 48 * testimony, witness, from 2wit 49 * Date: before 12th century 50 * 1 : attestation of a fact or event : TESTIMONY 51 * 2 : one that gives evidence; specifically : one who testifies in 52 * a cause or before a judicial tribunal 53 * 3 : one asked to be present at a transaction so as to be able to 54 * testify to its having taken place 55 * 4 : one who has personal knowledge of something 56 * 5 a : something serving as evidence or proof : SIGN 57 * b : public affirmation by word or example of usually 58 * religious faith or conviction <the heroic witness to divine 59 * life -- Pilot> 60 * 6 capitalized : a member of the Jehovah's Witnesses 61 */ 62 63 /* 64 * Special rules concerning Giant and lock orders: 65 * 66 * 1) Giant must be acquired before any other mutexes. Stated another way, 67 * no other mutex may be held when Giant is acquired. 68 * 69 * 2) Giant must be released when blocking on a sleepable lock. 70 * 71 * This rule is less obvious, but is a result of Giant providing the same 72 * semantics as spl(). Basically, when a thread sleeps, it must release 73 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule 74 * 2). 75 * 76 * 3) Giant may be acquired before or after sleepable locks. 77 * 78 * This rule is also not quite as obvious. Giant may be acquired after 79 * a sleepable lock because it is a non-sleepable lock and non-sleepable 80 * locks may always be acquired while holding a sleepable lock. The second 81 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose 82 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1 83 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and 84 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to 85 * execute. Thus, acquiring Giant both before and after a sleepable lock 86 * will not result in a lock order reversal. 87 */ 88 89 #include <sys/cdefs.h> 90 __FBSDID("$FreeBSD$"); 91 92 #include "opt_ddb.h" 93 #include "opt_hwpmc_hooks.h" 94 #include "opt_stack.h" 95 #include "opt_witness.h" 96 97 #include <sys/param.h> 98 #include <sys/bus.h> 99 #include <sys/kdb.h> 100 #include <sys/kernel.h> 101 #include <sys/ktr.h> 102 #include <sys/lock.h> 103 #include <sys/malloc.h> 104 #include <sys/mutex.h> 105 #include <sys/priv.h> 106 #include <sys/proc.h> 107 #include <sys/sbuf.h> 108 #include <sys/sched.h> 109 #include <sys/stack.h> 110 #include <sys/sysctl.h> 111 #include <sys/syslog.h> 112 #include <sys/systm.h> 113 114 #ifdef DDB 115 #include <ddb/ddb.h> 116 #endif 117 118 #include <machine/stdarg.h> 119 120 #if !defined(DDB) && !defined(STACK) 121 #error "DDB or STACK options are required for WITNESS" 122 #endif 123 124 /* Note that these traces do not work with KTR_ALQ. */ 125 #if 0 126 #define KTR_WITNESS KTR_SUBSYS 127 #else 128 #define KTR_WITNESS 0 129 #endif 130 131 #define LI_RECURSEMASK 0x0000ffff /* Recursion depth of lock instance. */ 132 #define LI_EXCLUSIVE 0x00010000 /* Exclusive lock instance. */ 133 #define LI_NORELEASE 0x00020000 /* Lock not allowed to be released. */ 134 135 /* Define this to check for blessed mutexes */ 136 #undef BLESSING 137 138 #ifndef WITNESS_COUNT 139 #define WITNESS_COUNT 1536 140 #endif 141 #define WITNESS_HASH_SIZE 251 /* Prime, gives load factor < 2 */ 142 #define WITNESS_PENDLIST (512 + (MAXCPU * 4)) 143 144 /* Allocate 256 KB of stack data space */ 145 #define WITNESS_LO_DATA_COUNT 2048 146 147 /* Prime, gives load factor of ~2 at full load */ 148 #define WITNESS_LO_HASH_SIZE 1021 149 150 /* 151 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads 152 * will hold LOCK_NCHILDREN locks. We handle failure ok, and we should 153 * probably be safe for the most part, but it's still a SWAG. 154 */ 155 #define LOCK_NCHILDREN 5 156 #define LOCK_CHILDCOUNT 2048 157 158 #define MAX_W_NAME 64 159 160 #define FULLGRAPH_SBUF_SIZE 512 161 162 /* 163 * These flags go in the witness relationship matrix and describe the 164 * relationship between any two struct witness objects. 165 */ 166 #define WITNESS_UNRELATED 0x00 /* No lock order relation. */ 167 #define WITNESS_PARENT 0x01 /* Parent, aka direct ancestor. */ 168 #define WITNESS_ANCESTOR 0x02 /* Direct or indirect ancestor. */ 169 #define WITNESS_CHILD 0x04 /* Child, aka direct descendant. */ 170 #define WITNESS_DESCENDANT 0x08 /* Direct or indirect descendant. */ 171 #define WITNESS_ANCESTOR_MASK (WITNESS_PARENT | WITNESS_ANCESTOR) 172 #define WITNESS_DESCENDANT_MASK (WITNESS_CHILD | WITNESS_DESCENDANT) 173 #define WITNESS_RELATED_MASK \ 174 (WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK) 175 #define WITNESS_REVERSAL 0x10 /* A lock order reversal has been 176 * observed. */ 177 #define WITNESS_RESERVED1 0x20 /* Unused flag, reserved. */ 178 #define WITNESS_RESERVED2 0x40 /* Unused flag, reserved. */ 179 #define WITNESS_LOCK_ORDER_KNOWN 0x80 /* This lock order is known. */ 180 181 /* Descendant to ancestor flags */ 182 #define WITNESS_DTOA(x) (((x) & WITNESS_RELATED_MASK) >> 2) 183 184 /* Ancestor to descendant flags */ 185 #define WITNESS_ATOD(x) (((x) & WITNESS_RELATED_MASK) << 2) 186 187 #define WITNESS_INDEX_ASSERT(i) \ 188 MPASS((i) > 0 && (i) <= w_max_used_index && (i) < witness_count) 189 190 static MALLOC_DEFINE(M_WITNESS, "Witness", "Witness"); 191 192 /* 193 * Lock instances. A lock instance is the data associated with a lock while 194 * it is held by witness. For example, a lock instance will hold the 195 * recursion count of a lock. Lock instances are held in lists. Spin locks 196 * are held in a per-cpu list while sleep locks are held in per-thread list. 197 */ 198 struct lock_instance { 199 struct lock_object *li_lock; 200 const char *li_file; 201 int li_line; 202 u_int li_flags; 203 }; 204 205 /* 206 * A simple list type used to build the list of locks held by a thread 207 * or CPU. We can't simply embed the list in struct lock_object since a 208 * lock may be held by more than one thread if it is a shared lock. Locks 209 * are added to the head of the list, so we fill up each list entry from 210 * "the back" logically. To ease some of the arithmetic, we actually fill 211 * in each list entry the normal way (children[0] then children[1], etc.) but 212 * when we traverse the list we read children[count-1] as the first entry 213 * down to children[0] as the final entry. 214 */ 215 struct lock_list_entry { 216 struct lock_list_entry *ll_next; 217 struct lock_instance ll_children[LOCK_NCHILDREN]; 218 u_int ll_count; 219 }; 220 221 /* 222 * The main witness structure. One of these per named lock type in the system 223 * (for example, "vnode interlock"). 224 */ 225 struct witness { 226 char w_name[MAX_W_NAME]; 227 uint32_t w_index; /* Index in the relationship matrix */ 228 struct lock_class *w_class; 229 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */ 230 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */ 231 struct witness *w_hash_next; /* Linked list in hash buckets. */ 232 const char *w_file; /* File where last acquired */ 233 uint32_t w_line; /* Line where last acquired */ 234 uint32_t w_refcount; 235 uint16_t w_num_ancestors; /* direct/indirect 236 * ancestor count */ 237 uint16_t w_num_descendants; /* direct/indirect 238 * descendant count */ 239 int16_t w_ddb_level; 240 unsigned w_displayed:1; 241 unsigned w_reversed:1; 242 }; 243 244 STAILQ_HEAD(witness_list, witness); 245 246 /* 247 * The witness hash table. Keys are witness names (const char *), elements are 248 * witness objects (struct witness *). 249 */ 250 struct witness_hash { 251 struct witness *wh_array[WITNESS_HASH_SIZE]; 252 uint32_t wh_size; 253 uint32_t wh_count; 254 }; 255 256 /* 257 * Key type for the lock order data hash table. 258 */ 259 struct witness_lock_order_key { 260 uint16_t from; 261 uint16_t to; 262 }; 263 264 struct witness_lock_order_data { 265 struct stack wlod_stack; 266 struct witness_lock_order_key wlod_key; 267 struct witness_lock_order_data *wlod_next; 268 }; 269 270 /* 271 * The witness lock order data hash table. Keys are witness index tuples 272 * (struct witness_lock_order_key), elements are lock order data objects 273 * (struct witness_lock_order_data). 274 */ 275 struct witness_lock_order_hash { 276 struct witness_lock_order_data *wloh_array[WITNESS_LO_HASH_SIZE]; 277 u_int wloh_size; 278 u_int wloh_count; 279 }; 280 281 #ifdef BLESSING 282 struct witness_blessed { 283 const char *b_lock1; 284 const char *b_lock2; 285 }; 286 #endif 287 288 struct witness_pendhelp { 289 const char *wh_type; 290 struct lock_object *wh_lock; 291 }; 292 293 struct witness_order_list_entry { 294 const char *w_name; 295 struct lock_class *w_class; 296 }; 297 298 /* 299 * Returns 0 if one of the locks is a spin lock and the other is not. 300 * Returns 1 otherwise. 301 */ 302 static __inline int 303 witness_lock_type_equal(struct witness *w1, struct witness *w2) 304 { 305 306 return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) == 307 (w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK))); 308 } 309 310 static __inline int 311 witness_lock_order_key_equal(const struct witness_lock_order_key *a, 312 const struct witness_lock_order_key *b) 313 { 314 315 return (a->from == b->from && a->to == b->to); 316 } 317 318 static int _isitmyx(struct witness *w1, struct witness *w2, int rmask, 319 const char *fname); 320 static void adopt(struct witness *parent, struct witness *child); 321 #ifdef BLESSING 322 static int blessed(struct witness *, struct witness *); 323 #endif 324 static void depart(struct witness *w); 325 static struct witness *enroll(const char *description, 326 struct lock_class *lock_class); 327 static struct lock_instance *find_instance(struct lock_list_entry *list, 328 const struct lock_object *lock); 329 static int isitmychild(struct witness *parent, struct witness *child); 330 static int isitmydescendant(struct witness *parent, struct witness *child); 331 static void itismychild(struct witness *parent, struct witness *child); 332 static int sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS); 333 static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS); 334 static int sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS); 335 static int sysctl_debug_witness_channel(SYSCTL_HANDLER_ARGS); 336 static void witness_add_fullgraph(struct sbuf *sb, struct witness *parent); 337 #ifdef DDB 338 static void witness_ddb_compute_levels(void); 339 static void witness_ddb_display(int(*)(const char *fmt, ...)); 340 static void witness_ddb_display_descendants(int(*)(const char *fmt, ...), 341 struct witness *, int indent); 342 static void witness_ddb_display_list(int(*prnt)(const char *fmt, ...), 343 struct witness_list *list); 344 static void witness_ddb_level_descendants(struct witness *parent, int l); 345 static void witness_ddb_list(struct thread *td); 346 #endif 347 static void witness_debugger(int cond, const char *msg); 348 static void witness_free(struct witness *m); 349 static struct witness *witness_get(void); 350 static uint32_t witness_hash_djb2(const uint8_t *key, uint32_t size); 351 static struct witness *witness_hash_get(const char *key); 352 static void witness_hash_put(struct witness *w); 353 static void witness_init_hash_tables(void); 354 static void witness_increment_graph_generation(void); 355 static void witness_lock_list_free(struct lock_list_entry *lle); 356 static struct lock_list_entry *witness_lock_list_get(void); 357 static int witness_lock_order_add(struct witness *parent, 358 struct witness *child); 359 static int witness_lock_order_check(struct witness *parent, 360 struct witness *child); 361 static struct witness_lock_order_data *witness_lock_order_get( 362 struct witness *parent, 363 struct witness *child); 364 static void witness_list_lock(struct lock_instance *instance, 365 int (*prnt)(const char *fmt, ...)); 366 static int witness_output(const char *fmt, ...) __printflike(1, 2); 367 static int witness_voutput(const char *fmt, va_list ap) __printflike(1, 0); 368 static void witness_setflag(struct lock_object *lock, int flag, int set); 369 370 static SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL, 371 "Witness Locking"); 372 373 /* 374 * If set to 0, lock order checking is disabled. If set to -1, 375 * witness is completely disabled. Otherwise witness performs full 376 * lock order checking for all locks. At runtime, lock order checking 377 * may be toggled. However, witness cannot be reenabled once it is 378 * completely disabled. 379 */ 380 static int witness_watch = 1; 381 SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RWTUN | CTLTYPE_INT, NULL, 0, 382 sysctl_debug_witness_watch, "I", "witness is watching lock operations"); 383 384 #ifdef KDB 385 /* 386 * When KDB is enabled and witness_kdb is 1, it will cause the system 387 * to drop into kdebug() when: 388 * - a lock hierarchy violation occurs 389 * - locks are held when going to sleep. 390 */ 391 #ifdef WITNESS_KDB 392 int witness_kdb = 1; 393 #else 394 int witness_kdb = 0; 395 #endif 396 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RWTUN, &witness_kdb, 0, ""); 397 #endif /* KDB */ 398 399 #if defined(DDB) || defined(KDB) 400 /* 401 * When DDB or KDB is enabled and witness_trace is 1, it will cause the system 402 * to print a stack trace: 403 * - a lock hierarchy violation occurs 404 * - locks are held when going to sleep. 405 */ 406 int witness_trace = 1; 407 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RWTUN, &witness_trace, 0, ""); 408 #endif /* DDB || KDB */ 409 410 #ifdef WITNESS_SKIPSPIN 411 int witness_skipspin = 1; 412 #else 413 int witness_skipspin = 0; 414 #endif 415 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin, 0, ""); 416 417 int badstack_sbuf_size; 418 419 int witness_count = WITNESS_COUNT; 420 SYSCTL_INT(_debug_witness, OID_AUTO, witness_count, CTLFLAG_RDTUN, 421 &witness_count, 0, ""); 422 423 /* 424 * Output channel for witness messages. By default we print to the console. 425 */ 426 enum witness_channel { 427 WITNESS_CONSOLE, 428 WITNESS_LOG, 429 WITNESS_NONE, 430 }; 431 432 static enum witness_channel witness_channel = WITNESS_CONSOLE; 433 SYSCTL_PROC(_debug_witness, OID_AUTO, output_channel, CTLTYPE_STRING | 434 CTLFLAG_RWTUN, NULL, 0, sysctl_debug_witness_channel, "A", 435 "Output channel for warnings"); 436 437 /* 438 * Call this to print out the relations between locks. 439 */ 440 SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD, 441 NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs"); 442 443 /* 444 * Call this to print out the witness faulty stacks. 445 */ 446 SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD, 447 NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks"); 448 449 static struct mtx w_mtx; 450 451 /* w_list */ 452 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free); 453 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all); 454 455 /* w_typelist */ 456 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin); 457 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep); 458 459 /* lock list */ 460 static struct lock_list_entry *w_lock_list_free = NULL; 461 static struct witness_pendhelp pending_locks[WITNESS_PENDLIST]; 462 static u_int pending_cnt; 463 464 static int w_free_cnt, w_spin_cnt, w_sleep_cnt; 465 SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, ""); 466 SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, ""); 467 SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0, 468 ""); 469 470 static struct witness *w_data; 471 static uint8_t **w_rmatrix; 472 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT]; 473 static struct witness_hash w_hash; /* The witness hash table. */ 474 475 /* The lock order data hash */ 476 static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT]; 477 static struct witness_lock_order_data *w_lofree = NULL; 478 static struct witness_lock_order_hash w_lohash; 479 static int w_max_used_index = 0; 480 static unsigned int w_generation = 0; 481 static const char w_notrunning[] = "Witness not running\n"; 482 static const char w_stillcold[] = "Witness is still cold\n"; 483 #ifdef __i386__ 484 static const char w_notallowed[] = "The sysctl is disabled on the arch\n"; 485 #endif 486 487 static struct witness_order_list_entry order_lists[] = { 488 /* 489 * sx locks 490 */ 491 { "proctree", &lock_class_sx }, 492 { "allproc", &lock_class_sx }, 493 { "allprison", &lock_class_sx }, 494 { NULL, NULL }, 495 /* 496 * Various mutexes 497 */ 498 { "Giant", &lock_class_mtx_sleep }, 499 { "pipe mutex", &lock_class_mtx_sleep }, 500 { "sigio lock", &lock_class_mtx_sleep }, 501 { "process group", &lock_class_mtx_sleep }, 502 { "process lock", &lock_class_mtx_sleep }, 503 { "session", &lock_class_mtx_sleep }, 504 { "uidinfo hash", &lock_class_rw }, 505 #ifdef HWPMC_HOOKS 506 { "pmc-sleep", &lock_class_mtx_sleep }, 507 #endif 508 { "time lock", &lock_class_mtx_sleep }, 509 { NULL, NULL }, 510 /* 511 * umtx 512 */ 513 { "umtx lock", &lock_class_mtx_sleep }, 514 { NULL, NULL }, 515 /* 516 * Sockets 517 */ 518 { "accept", &lock_class_mtx_sleep }, 519 { "so_snd", &lock_class_mtx_sleep }, 520 { "so_rcv", &lock_class_mtx_sleep }, 521 { "sellck", &lock_class_mtx_sleep }, 522 { NULL, NULL }, 523 /* 524 * Routing 525 */ 526 { "so_rcv", &lock_class_mtx_sleep }, 527 { "radix node head", &lock_class_rw }, 528 { "rtentry", &lock_class_mtx_sleep }, 529 { "ifaddr", &lock_class_mtx_sleep }, 530 { NULL, NULL }, 531 /* 532 * IPv4 multicast: 533 * protocol locks before interface locks, after UDP locks. 534 */ 535 { "udpinp", &lock_class_rw }, 536 { "in_multi_mtx", &lock_class_mtx_sleep }, 537 { "igmp_mtx", &lock_class_mtx_sleep }, 538 { "if_addr_lock", &lock_class_rw }, 539 { NULL, NULL }, 540 /* 541 * IPv6 multicast: 542 * protocol locks before interface locks, after UDP locks. 543 */ 544 { "udpinp", &lock_class_rw }, 545 { "in6_multi_mtx", &lock_class_mtx_sleep }, 546 { "mld_mtx", &lock_class_mtx_sleep }, 547 { "if_addr_lock", &lock_class_rw }, 548 { NULL, NULL }, 549 /* 550 * UNIX Domain Sockets 551 */ 552 { "unp_link_rwlock", &lock_class_rw }, 553 { "unp_list_lock", &lock_class_mtx_sleep }, 554 { "unp", &lock_class_mtx_sleep }, 555 { "so_snd", &lock_class_mtx_sleep }, 556 { NULL, NULL }, 557 /* 558 * UDP/IP 559 */ 560 { "udp", &lock_class_rw }, 561 { "udpinp", &lock_class_rw }, 562 { "so_snd", &lock_class_mtx_sleep }, 563 { NULL, NULL }, 564 /* 565 * TCP/IP 566 */ 567 { "tcp", &lock_class_rw }, 568 { "tcpinp", &lock_class_rw }, 569 { "so_snd", &lock_class_mtx_sleep }, 570 { NULL, NULL }, 571 /* 572 * BPF 573 */ 574 { "bpf global lock", &lock_class_sx }, 575 { "bpf interface lock", &lock_class_rw }, 576 { "bpf cdev lock", &lock_class_mtx_sleep }, 577 { NULL, NULL }, 578 /* 579 * NFS server 580 */ 581 { "nfsd_mtx", &lock_class_mtx_sleep }, 582 { "so_snd", &lock_class_mtx_sleep }, 583 { NULL, NULL }, 584 585 /* 586 * IEEE 802.11 587 */ 588 { "802.11 com lock", &lock_class_mtx_sleep}, 589 { NULL, NULL }, 590 /* 591 * Network drivers 592 */ 593 { "network driver", &lock_class_mtx_sleep}, 594 { NULL, NULL }, 595 596 /* 597 * Netgraph 598 */ 599 { "ng_node", &lock_class_mtx_sleep }, 600 { "ng_worklist", &lock_class_mtx_sleep }, 601 { NULL, NULL }, 602 /* 603 * CDEV 604 */ 605 { "vm map (system)", &lock_class_mtx_sleep }, 606 { "vnode interlock", &lock_class_mtx_sleep }, 607 { "cdev", &lock_class_mtx_sleep }, 608 { NULL, NULL }, 609 /* 610 * VM 611 */ 612 { "vm map (user)", &lock_class_sx }, 613 { "vm object", &lock_class_rw }, 614 { "vm page", &lock_class_mtx_sleep }, 615 { "pmap pv global", &lock_class_rw }, 616 { "pmap", &lock_class_mtx_sleep }, 617 { "pmap pv list", &lock_class_rw }, 618 { "vm page free queue", &lock_class_mtx_sleep }, 619 { "vm pagequeue", &lock_class_mtx_sleep }, 620 { NULL, NULL }, 621 /* 622 * kqueue/VFS interaction 623 */ 624 { "kqueue", &lock_class_mtx_sleep }, 625 { "struct mount mtx", &lock_class_mtx_sleep }, 626 { "vnode interlock", &lock_class_mtx_sleep }, 627 { NULL, NULL }, 628 /* 629 * VFS namecache 630 */ 631 { "ncvn", &lock_class_mtx_sleep }, 632 { "ncbuc", &lock_class_rw }, 633 { "vnode interlock", &lock_class_mtx_sleep }, 634 { "ncneg", &lock_class_mtx_sleep }, 635 { NULL, NULL }, 636 /* 637 * ZFS locking 638 */ 639 { "dn->dn_mtx", &lock_class_sx }, 640 { "dr->dt.di.dr_mtx", &lock_class_sx }, 641 { "db->db_mtx", &lock_class_sx }, 642 { NULL, NULL }, 643 /* 644 * TCP log locks 645 */ 646 { "TCP ID tree", &lock_class_rw }, 647 { "tcp log id bucket", &lock_class_mtx_sleep }, 648 { "tcpinp", &lock_class_rw }, 649 { "TCP log expireq", &lock_class_mtx_sleep }, 650 { NULL, NULL }, 651 /* 652 * spin locks 653 */ 654 #ifdef SMP 655 { "ap boot", &lock_class_mtx_spin }, 656 #endif 657 { "rm.mutex_mtx", &lock_class_mtx_spin }, 658 { "sio", &lock_class_mtx_spin }, 659 #ifdef __i386__ 660 { "cy", &lock_class_mtx_spin }, 661 #endif 662 #ifdef __sparc64__ 663 { "pcib_mtx", &lock_class_mtx_spin }, 664 { "rtc_mtx", &lock_class_mtx_spin }, 665 #endif 666 { "scc_hwmtx", &lock_class_mtx_spin }, 667 { "uart_hwmtx", &lock_class_mtx_spin }, 668 { "fast_taskqueue", &lock_class_mtx_spin }, 669 { "intr table", &lock_class_mtx_spin }, 670 #ifdef HWPMC_HOOKS 671 { "pmc-per-proc", &lock_class_mtx_spin }, 672 #endif 673 { "process slock", &lock_class_mtx_spin }, 674 { "syscons video lock", &lock_class_mtx_spin }, 675 { "sleepq chain", &lock_class_mtx_spin }, 676 { "rm_spinlock", &lock_class_mtx_spin }, 677 { "turnstile chain", &lock_class_mtx_spin }, 678 { "turnstile lock", &lock_class_mtx_spin }, 679 { "sched lock", &lock_class_mtx_spin }, 680 { "td_contested", &lock_class_mtx_spin }, 681 { "callout", &lock_class_mtx_spin }, 682 { "entropy harvest mutex", &lock_class_mtx_spin }, 683 #ifdef SMP 684 { "smp rendezvous", &lock_class_mtx_spin }, 685 #endif 686 #ifdef __powerpc__ 687 { "tlb0", &lock_class_mtx_spin }, 688 #endif 689 /* 690 * leaf locks 691 */ 692 { "intrcnt", &lock_class_mtx_spin }, 693 { "icu", &lock_class_mtx_spin }, 694 #if defined(SMP) && defined(__sparc64__) 695 { "ipi", &lock_class_mtx_spin }, 696 #endif 697 #ifdef __i386__ 698 { "allpmaps", &lock_class_mtx_spin }, 699 { "descriptor tables", &lock_class_mtx_spin }, 700 #endif 701 { "clk", &lock_class_mtx_spin }, 702 { "cpuset", &lock_class_mtx_spin }, 703 { "mprof lock", &lock_class_mtx_spin }, 704 { "zombie lock", &lock_class_mtx_spin }, 705 { "ALD Queue", &lock_class_mtx_spin }, 706 #if defined(__i386__) || defined(__amd64__) 707 { "pcicfg", &lock_class_mtx_spin }, 708 { "NDIS thread lock", &lock_class_mtx_spin }, 709 #endif 710 { "tw_osl_io_lock", &lock_class_mtx_spin }, 711 { "tw_osl_q_lock", &lock_class_mtx_spin }, 712 { "tw_cl_io_lock", &lock_class_mtx_spin }, 713 { "tw_cl_intr_lock", &lock_class_mtx_spin }, 714 { "tw_cl_gen_lock", &lock_class_mtx_spin }, 715 #ifdef HWPMC_HOOKS 716 { "pmc-leaf", &lock_class_mtx_spin }, 717 #endif 718 { "blocked lock", &lock_class_mtx_spin }, 719 { NULL, NULL }, 720 { NULL, NULL } 721 }; 722 723 #ifdef BLESSING 724 /* 725 * Pairs of locks which have been blessed 726 * Don't complain about order problems with blessed locks 727 */ 728 static struct witness_blessed blessed_list[] = { 729 }; 730 #endif 731 732 /* 733 * This global is set to 0 once it becomes safe to use the witness code. 734 */ 735 static int witness_cold = 1; 736 737 /* 738 * This global is set to 1 once the static lock orders have been enrolled 739 * so that a warning can be issued for any spin locks enrolled later. 740 */ 741 static int witness_spin_warn = 0; 742 743 /* Trim useless garbage from filenames. */ 744 static const char * 745 fixup_filename(const char *file) 746 { 747 748 if (file == NULL) 749 return (NULL); 750 while (strncmp(file, "../", 3) == 0) 751 file += 3; 752 return (file); 753 } 754 755 /* 756 * Calculate the size of early witness structures. 757 */ 758 int 759 witness_startup_count(void) 760 { 761 int sz; 762 763 sz = sizeof(struct witness) * witness_count; 764 sz += sizeof(*w_rmatrix) * (witness_count + 1); 765 sz += sizeof(*w_rmatrix[0]) * (witness_count + 1) * 766 (witness_count + 1); 767 768 return (sz); 769 } 770 771 /* 772 * The WITNESS-enabled diagnostic code. Note that the witness code does 773 * assume that the early boot is single-threaded at least until after this 774 * routine is completed. 775 */ 776 void 777 witness_startup(void *mem) 778 { 779 struct lock_object *lock; 780 struct witness_order_list_entry *order; 781 struct witness *w, *w1; 782 uintptr_t p; 783 int i; 784 785 p = (uintptr_t)mem; 786 w_data = (void *)p; 787 p += sizeof(struct witness) * witness_count; 788 789 w_rmatrix = (void *)p; 790 p += sizeof(*w_rmatrix) * (witness_count + 1); 791 792 for (i = 0; i < witness_count + 1; i++) { 793 w_rmatrix[i] = (void *)p; 794 p += sizeof(*w_rmatrix[i]) * (witness_count + 1); 795 } 796 badstack_sbuf_size = witness_count * 256; 797 798 /* 799 * We have to release Giant before initializing its witness 800 * structure so that WITNESS doesn't get confused. 801 */ 802 mtx_unlock(&Giant); 803 mtx_assert(&Giant, MA_NOTOWNED); 804 805 CTR1(KTR_WITNESS, "%s: initializing witness", __func__); 806 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET | 807 MTX_NOWITNESS | MTX_NOPROFILE); 808 for (i = witness_count - 1; i >= 0; i--) { 809 w = &w_data[i]; 810 memset(w, 0, sizeof(*w)); 811 w_data[i].w_index = i; /* Witness index never changes. */ 812 witness_free(w); 813 } 814 KASSERT(STAILQ_FIRST(&w_free)->w_index == 0, 815 ("%s: Invalid list of free witness objects", __func__)); 816 817 /* Witness with index 0 is not used to aid in debugging. */ 818 STAILQ_REMOVE_HEAD(&w_free, w_list); 819 w_free_cnt--; 820 821 for (i = 0; i < witness_count; i++) { 822 memset(w_rmatrix[i], 0, sizeof(*w_rmatrix[i]) * 823 (witness_count + 1)); 824 } 825 826 for (i = 0; i < LOCK_CHILDCOUNT; i++) 827 witness_lock_list_free(&w_locklistdata[i]); 828 witness_init_hash_tables(); 829 830 /* First add in all the specified order lists. */ 831 for (order = order_lists; order->w_name != NULL; order++) { 832 w = enroll(order->w_name, order->w_class); 833 if (w == NULL) 834 continue; 835 w->w_file = "order list"; 836 for (order++; order->w_name != NULL; order++) { 837 w1 = enroll(order->w_name, order->w_class); 838 if (w1 == NULL) 839 continue; 840 w1->w_file = "order list"; 841 itismychild(w, w1); 842 w = w1; 843 } 844 } 845 witness_spin_warn = 1; 846 847 /* Iterate through all locks and add them to witness. */ 848 for (i = 0; pending_locks[i].wh_lock != NULL; i++) { 849 lock = pending_locks[i].wh_lock; 850 KASSERT(lock->lo_flags & LO_WITNESS, 851 ("%s: lock %s is on pending list but not LO_WITNESS", 852 __func__, lock->lo_name)); 853 lock->lo_witness = enroll(pending_locks[i].wh_type, 854 LOCK_CLASS(lock)); 855 } 856 857 /* Mark the witness code as being ready for use. */ 858 witness_cold = 0; 859 860 mtx_lock(&Giant); 861 } 862 863 void 864 witness_init(struct lock_object *lock, const char *type) 865 { 866 struct lock_class *class; 867 868 /* Various sanity checks. */ 869 class = LOCK_CLASS(lock); 870 if ((lock->lo_flags & LO_RECURSABLE) != 0 && 871 (class->lc_flags & LC_RECURSABLE) == 0) 872 kassert_panic("%s: lock (%s) %s can not be recursable", 873 __func__, class->lc_name, lock->lo_name); 874 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 875 (class->lc_flags & LC_SLEEPABLE) == 0) 876 kassert_panic("%s: lock (%s) %s can not be sleepable", 877 __func__, class->lc_name, lock->lo_name); 878 if ((lock->lo_flags & LO_UPGRADABLE) != 0 && 879 (class->lc_flags & LC_UPGRADABLE) == 0) 880 kassert_panic("%s: lock (%s) %s can not be upgradable", 881 __func__, class->lc_name, lock->lo_name); 882 883 /* 884 * If we shouldn't watch this lock, then just clear lo_witness. 885 * Otherwise, if witness_cold is set, then it is too early to 886 * enroll this lock, so defer it to witness_initialize() by adding 887 * it to the pending_locks list. If it is not too early, then enroll 888 * the lock now. 889 */ 890 if (witness_watch < 1 || panicstr != NULL || 891 (lock->lo_flags & LO_WITNESS) == 0) 892 lock->lo_witness = NULL; 893 else if (witness_cold) { 894 pending_locks[pending_cnt].wh_lock = lock; 895 pending_locks[pending_cnt++].wh_type = type; 896 if (pending_cnt > WITNESS_PENDLIST) 897 panic("%s: pending locks list is too small, " 898 "increase WITNESS_PENDLIST\n", 899 __func__); 900 } else 901 lock->lo_witness = enroll(type, class); 902 } 903 904 void 905 witness_destroy(struct lock_object *lock) 906 { 907 struct lock_class *class; 908 struct witness *w; 909 910 class = LOCK_CLASS(lock); 911 912 if (witness_cold) 913 panic("lock (%s) %s destroyed while witness_cold", 914 class->lc_name, lock->lo_name); 915 916 /* XXX: need to verify that no one holds the lock */ 917 if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL) 918 return; 919 w = lock->lo_witness; 920 921 mtx_lock_spin(&w_mtx); 922 MPASS(w->w_refcount > 0); 923 w->w_refcount--; 924 925 if (w->w_refcount == 0) 926 depart(w); 927 mtx_unlock_spin(&w_mtx); 928 } 929 930 #ifdef DDB 931 static void 932 witness_ddb_compute_levels(void) 933 { 934 struct witness *w; 935 936 /* 937 * First clear all levels. 938 */ 939 STAILQ_FOREACH(w, &w_all, w_list) 940 w->w_ddb_level = -1; 941 942 /* 943 * Look for locks with no parents and level all their descendants. 944 */ 945 STAILQ_FOREACH(w, &w_all, w_list) { 946 947 /* If the witness has ancestors (is not a root), skip it. */ 948 if (w->w_num_ancestors > 0) 949 continue; 950 witness_ddb_level_descendants(w, 0); 951 } 952 } 953 954 static void 955 witness_ddb_level_descendants(struct witness *w, int l) 956 { 957 int i; 958 959 if (w->w_ddb_level >= l) 960 return; 961 962 w->w_ddb_level = l; 963 l++; 964 965 for (i = 1; i <= w_max_used_index; i++) { 966 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) 967 witness_ddb_level_descendants(&w_data[i], l); 968 } 969 } 970 971 static void 972 witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...), 973 struct witness *w, int indent) 974 { 975 int i; 976 977 for (i = 0; i < indent; i++) 978 prnt(" "); 979 prnt("%s (type: %s, depth: %d, active refs: %d)", 980 w->w_name, w->w_class->lc_name, 981 w->w_ddb_level, w->w_refcount); 982 if (w->w_displayed) { 983 prnt(" -- (already displayed)\n"); 984 return; 985 } 986 w->w_displayed = 1; 987 if (w->w_file != NULL && w->w_line != 0) 988 prnt(" -- last acquired @ %s:%d\n", fixup_filename(w->w_file), 989 w->w_line); 990 else 991 prnt(" -- never acquired\n"); 992 indent++; 993 WITNESS_INDEX_ASSERT(w->w_index); 994 for (i = 1; i <= w_max_used_index; i++) { 995 if (db_pager_quit) 996 return; 997 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) 998 witness_ddb_display_descendants(prnt, &w_data[i], 999 indent); 1000 } 1001 } 1002 1003 static void 1004 witness_ddb_display_list(int(*prnt)(const char *fmt, ...), 1005 struct witness_list *list) 1006 { 1007 struct witness *w; 1008 1009 STAILQ_FOREACH(w, list, w_typelist) { 1010 if (w->w_file == NULL || w->w_ddb_level > 0) 1011 continue; 1012 1013 /* This lock has no anscestors - display its descendants. */ 1014 witness_ddb_display_descendants(prnt, w, 0); 1015 if (db_pager_quit) 1016 return; 1017 } 1018 } 1019 1020 static void 1021 witness_ddb_display(int(*prnt)(const char *fmt, ...)) 1022 { 1023 struct witness *w; 1024 1025 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 1026 witness_ddb_compute_levels(); 1027 1028 /* Clear all the displayed flags. */ 1029 STAILQ_FOREACH(w, &w_all, w_list) 1030 w->w_displayed = 0; 1031 1032 /* 1033 * First, handle sleep locks which have been acquired at least 1034 * once. 1035 */ 1036 prnt("Sleep locks:\n"); 1037 witness_ddb_display_list(prnt, &w_sleep); 1038 if (db_pager_quit) 1039 return; 1040 1041 /* 1042 * Now do spin locks which have been acquired at least once. 1043 */ 1044 prnt("\nSpin locks:\n"); 1045 witness_ddb_display_list(prnt, &w_spin); 1046 if (db_pager_quit) 1047 return; 1048 1049 /* 1050 * Finally, any locks which have not been acquired yet. 1051 */ 1052 prnt("\nLocks which were never acquired:\n"); 1053 STAILQ_FOREACH(w, &w_all, w_list) { 1054 if (w->w_file != NULL || w->w_refcount == 0) 1055 continue; 1056 prnt("%s (type: %s, depth: %d)\n", w->w_name, 1057 w->w_class->lc_name, w->w_ddb_level); 1058 if (db_pager_quit) 1059 return; 1060 } 1061 } 1062 #endif /* DDB */ 1063 1064 int 1065 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2) 1066 { 1067 1068 if (witness_watch == -1 || panicstr != NULL) 1069 return (0); 1070 1071 /* Require locks that witness knows about. */ 1072 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL || 1073 lock2->lo_witness == NULL) 1074 return (EINVAL); 1075 1076 mtx_assert(&w_mtx, MA_NOTOWNED); 1077 mtx_lock_spin(&w_mtx); 1078 1079 /* 1080 * If we already have either an explicit or implied lock order that 1081 * is the other way around, then return an error. 1082 */ 1083 if (witness_watch && 1084 isitmydescendant(lock2->lo_witness, lock1->lo_witness)) { 1085 mtx_unlock_spin(&w_mtx); 1086 return (EDOOFUS); 1087 } 1088 1089 /* Try to add the new order. */ 1090 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1091 lock2->lo_witness->w_name, lock1->lo_witness->w_name); 1092 itismychild(lock1->lo_witness, lock2->lo_witness); 1093 mtx_unlock_spin(&w_mtx); 1094 return (0); 1095 } 1096 1097 void 1098 witness_checkorder(struct lock_object *lock, int flags, const char *file, 1099 int line, struct lock_object *interlock) 1100 { 1101 struct lock_list_entry *lock_list, *lle; 1102 struct lock_instance *lock1, *lock2, *plock; 1103 struct lock_class *class, *iclass; 1104 struct witness *w, *w1; 1105 struct thread *td; 1106 int i, j; 1107 1108 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL || 1109 panicstr != NULL) 1110 return; 1111 1112 w = lock->lo_witness; 1113 class = LOCK_CLASS(lock); 1114 td = curthread; 1115 1116 if (class->lc_flags & LC_SLEEPLOCK) { 1117 1118 /* 1119 * Since spin locks include a critical section, this check 1120 * implicitly enforces a lock order of all sleep locks before 1121 * all spin locks. 1122 */ 1123 if (td->td_critnest != 0 && !kdb_active) 1124 kassert_panic("acquiring blockable sleep lock with " 1125 "spinlock or critical section held (%s) %s @ %s:%d", 1126 class->lc_name, lock->lo_name, 1127 fixup_filename(file), line); 1128 1129 /* 1130 * If this is the first lock acquired then just return as 1131 * no order checking is needed. 1132 */ 1133 lock_list = td->td_sleeplocks; 1134 if (lock_list == NULL || lock_list->ll_count == 0) 1135 return; 1136 } else { 1137 1138 /* 1139 * If this is the first lock, just return as no order 1140 * checking is needed. Avoid problems with thread 1141 * migration pinning the thread while checking if 1142 * spinlocks are held. If at least one spinlock is held 1143 * the thread is in a safe path and it is allowed to 1144 * unpin it. 1145 */ 1146 sched_pin(); 1147 lock_list = PCPU_GET(spinlocks); 1148 if (lock_list == NULL || lock_list->ll_count == 0) { 1149 sched_unpin(); 1150 return; 1151 } 1152 sched_unpin(); 1153 } 1154 1155 /* 1156 * Check to see if we are recursing on a lock we already own. If 1157 * so, make sure that we don't mismatch exclusive and shared lock 1158 * acquires. 1159 */ 1160 lock1 = find_instance(lock_list, lock); 1161 if (lock1 != NULL) { 1162 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 && 1163 (flags & LOP_EXCLUSIVE) == 0) { 1164 witness_output("shared lock of (%s) %s @ %s:%d\n", 1165 class->lc_name, lock->lo_name, 1166 fixup_filename(file), line); 1167 witness_output("while exclusively locked from %s:%d\n", 1168 fixup_filename(lock1->li_file), lock1->li_line); 1169 kassert_panic("excl->share"); 1170 } 1171 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 && 1172 (flags & LOP_EXCLUSIVE) != 0) { 1173 witness_output("exclusive lock of (%s) %s @ %s:%d\n", 1174 class->lc_name, lock->lo_name, 1175 fixup_filename(file), line); 1176 witness_output("while share locked from %s:%d\n", 1177 fixup_filename(lock1->li_file), lock1->li_line); 1178 kassert_panic("share->excl"); 1179 } 1180 return; 1181 } 1182 1183 /* Warn if the interlock is not locked exactly once. */ 1184 if (interlock != NULL) { 1185 iclass = LOCK_CLASS(interlock); 1186 lock1 = find_instance(lock_list, interlock); 1187 if (lock1 == NULL) 1188 kassert_panic("interlock (%s) %s not locked @ %s:%d", 1189 iclass->lc_name, interlock->lo_name, 1190 fixup_filename(file), line); 1191 else if ((lock1->li_flags & LI_RECURSEMASK) != 0) 1192 kassert_panic("interlock (%s) %s recursed @ %s:%d", 1193 iclass->lc_name, interlock->lo_name, 1194 fixup_filename(file), line); 1195 } 1196 1197 /* 1198 * Find the previously acquired lock, but ignore interlocks. 1199 */ 1200 plock = &lock_list->ll_children[lock_list->ll_count - 1]; 1201 if (interlock != NULL && plock->li_lock == interlock) { 1202 if (lock_list->ll_count > 1) 1203 plock = 1204 &lock_list->ll_children[lock_list->ll_count - 2]; 1205 else { 1206 lle = lock_list->ll_next; 1207 1208 /* 1209 * The interlock is the only lock we hold, so 1210 * simply return. 1211 */ 1212 if (lle == NULL) 1213 return; 1214 plock = &lle->ll_children[lle->ll_count - 1]; 1215 } 1216 } 1217 1218 /* 1219 * Try to perform most checks without a lock. If this succeeds we 1220 * can skip acquiring the lock and return success. Otherwise we redo 1221 * the check with the lock held to handle races with concurrent updates. 1222 */ 1223 w1 = plock->li_lock->lo_witness; 1224 if (witness_lock_order_check(w1, w)) 1225 return; 1226 1227 mtx_lock_spin(&w_mtx); 1228 if (witness_lock_order_check(w1, w)) { 1229 mtx_unlock_spin(&w_mtx); 1230 return; 1231 } 1232 witness_lock_order_add(w1, w); 1233 1234 /* 1235 * Check for duplicate locks of the same type. Note that we only 1236 * have to check for this on the last lock we just acquired. Any 1237 * other cases will be caught as lock order violations. 1238 */ 1239 if (w1 == w) { 1240 i = w->w_index; 1241 if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) && 1242 !(w_rmatrix[i][i] & WITNESS_REVERSAL)) { 1243 w_rmatrix[i][i] |= WITNESS_REVERSAL; 1244 w->w_reversed = 1; 1245 mtx_unlock_spin(&w_mtx); 1246 witness_output( 1247 "acquiring duplicate lock of same type: \"%s\"\n", 1248 w->w_name); 1249 witness_output(" 1st %s @ %s:%d\n", plock->li_lock->lo_name, 1250 fixup_filename(plock->li_file), plock->li_line); 1251 witness_output(" 2nd %s @ %s:%d\n", lock->lo_name, 1252 fixup_filename(file), line); 1253 witness_debugger(1, __func__); 1254 } else 1255 mtx_unlock_spin(&w_mtx); 1256 return; 1257 } 1258 mtx_assert(&w_mtx, MA_OWNED); 1259 1260 /* 1261 * If we know that the lock we are acquiring comes after 1262 * the lock we most recently acquired in the lock order tree, 1263 * then there is no need for any further checks. 1264 */ 1265 if (isitmychild(w1, w)) 1266 goto out; 1267 1268 for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) { 1269 for (i = lle->ll_count - 1; i >= 0; i--, j++) { 1270 1271 MPASS(j < LOCK_CHILDCOUNT * LOCK_NCHILDREN); 1272 lock1 = &lle->ll_children[i]; 1273 1274 /* 1275 * Ignore the interlock. 1276 */ 1277 if (interlock == lock1->li_lock) 1278 continue; 1279 1280 /* 1281 * If this lock doesn't undergo witness checking, 1282 * then skip it. 1283 */ 1284 w1 = lock1->li_lock->lo_witness; 1285 if (w1 == NULL) { 1286 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0, 1287 ("lock missing witness structure")); 1288 continue; 1289 } 1290 1291 /* 1292 * If we are locking Giant and this is a sleepable 1293 * lock, then skip it. 1294 */ 1295 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 && 1296 lock == &Giant.lock_object) 1297 continue; 1298 1299 /* 1300 * If we are locking a sleepable lock and this lock 1301 * is Giant, then skip it. 1302 */ 1303 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 1304 lock1->li_lock == &Giant.lock_object) 1305 continue; 1306 1307 /* 1308 * If we are locking a sleepable lock and this lock 1309 * isn't sleepable, we want to treat it as a lock 1310 * order violation to enfore a general lock order of 1311 * sleepable locks before non-sleepable locks. 1312 */ 1313 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 1314 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 1315 goto reversal; 1316 1317 /* 1318 * If we are locking Giant and this is a non-sleepable 1319 * lock, then treat it as a reversal. 1320 */ 1321 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 && 1322 lock == &Giant.lock_object) 1323 goto reversal; 1324 1325 /* 1326 * Check the lock order hierarchy for a reveresal. 1327 */ 1328 if (!isitmydescendant(w, w1)) 1329 continue; 1330 reversal: 1331 1332 /* 1333 * We have a lock order violation, check to see if it 1334 * is allowed or has already been yelled about. 1335 */ 1336 #ifdef BLESSING 1337 1338 /* 1339 * If the lock order is blessed, just bail. We don't 1340 * look for other lock order violations though, which 1341 * may be a bug. 1342 */ 1343 if (blessed(w, w1)) 1344 goto out; 1345 #endif 1346 1347 /* Bail if this violation is known */ 1348 if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL) 1349 goto out; 1350 1351 /* Record this as a violation */ 1352 w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL; 1353 w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL; 1354 w->w_reversed = w1->w_reversed = 1; 1355 witness_increment_graph_generation(); 1356 mtx_unlock_spin(&w_mtx); 1357 1358 #ifdef WITNESS_NO_VNODE 1359 /* 1360 * There are known LORs between VNODE locks. They are 1361 * not an indication of a bug. VNODE locks are flagged 1362 * as such (LO_IS_VNODE) and we don't yell if the LOR 1363 * is between 2 VNODE locks. 1364 */ 1365 if ((lock->lo_flags & LO_IS_VNODE) != 0 && 1366 (lock1->li_lock->lo_flags & LO_IS_VNODE) != 0) 1367 return; 1368 #endif 1369 1370 /* 1371 * Ok, yell about it. 1372 */ 1373 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 1374 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 1375 witness_output( 1376 "lock order reversal: (sleepable after non-sleepable)\n"); 1377 else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 1378 && lock == &Giant.lock_object) 1379 witness_output( 1380 "lock order reversal: (Giant after non-sleepable)\n"); 1381 else 1382 witness_output("lock order reversal:\n"); 1383 1384 /* 1385 * Try to locate an earlier lock with 1386 * witness w in our list. 1387 */ 1388 do { 1389 lock2 = &lle->ll_children[i]; 1390 MPASS(lock2->li_lock != NULL); 1391 if (lock2->li_lock->lo_witness == w) 1392 break; 1393 if (i == 0 && lle->ll_next != NULL) { 1394 lle = lle->ll_next; 1395 i = lle->ll_count - 1; 1396 MPASS(i >= 0 && i < LOCK_NCHILDREN); 1397 } else 1398 i--; 1399 } while (i >= 0); 1400 if (i < 0) { 1401 witness_output(" 1st %p %s (%s) @ %s:%d\n", 1402 lock1->li_lock, lock1->li_lock->lo_name, 1403 w1->w_name, fixup_filename(lock1->li_file), 1404 lock1->li_line); 1405 witness_output(" 2nd %p %s (%s) @ %s:%d\n", lock, 1406 lock->lo_name, w->w_name, 1407 fixup_filename(file), line); 1408 } else { 1409 witness_output(" 1st %p %s (%s) @ %s:%d\n", 1410 lock2->li_lock, lock2->li_lock->lo_name, 1411 lock2->li_lock->lo_witness->w_name, 1412 fixup_filename(lock2->li_file), 1413 lock2->li_line); 1414 witness_output(" 2nd %p %s (%s) @ %s:%d\n", 1415 lock1->li_lock, lock1->li_lock->lo_name, 1416 w1->w_name, fixup_filename(lock1->li_file), 1417 lock1->li_line); 1418 witness_output(" 3rd %p %s (%s) @ %s:%d\n", lock, 1419 lock->lo_name, w->w_name, 1420 fixup_filename(file), line); 1421 } 1422 witness_debugger(1, __func__); 1423 return; 1424 } 1425 } 1426 1427 /* 1428 * If requested, build a new lock order. However, don't build a new 1429 * relationship between a sleepable lock and Giant if it is in the 1430 * wrong direction. The correct lock order is that sleepable locks 1431 * always come before Giant. 1432 */ 1433 if (flags & LOP_NEWORDER && 1434 !(plock->li_lock == &Giant.lock_object && 1435 (lock->lo_flags & LO_SLEEPABLE) != 0)) { 1436 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1437 w->w_name, plock->li_lock->lo_witness->w_name); 1438 itismychild(plock->li_lock->lo_witness, w); 1439 } 1440 out: 1441 mtx_unlock_spin(&w_mtx); 1442 } 1443 1444 void 1445 witness_lock(struct lock_object *lock, int flags, const char *file, int line) 1446 { 1447 struct lock_list_entry **lock_list, *lle; 1448 struct lock_instance *instance; 1449 struct witness *w; 1450 struct thread *td; 1451 1452 if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL || 1453 panicstr != NULL) 1454 return; 1455 w = lock->lo_witness; 1456 td = curthread; 1457 1458 /* Determine lock list for this lock. */ 1459 if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK) 1460 lock_list = &td->td_sleeplocks; 1461 else 1462 lock_list = PCPU_PTR(spinlocks); 1463 1464 /* Check to see if we are recursing on a lock we already own. */ 1465 instance = find_instance(*lock_list, lock); 1466 if (instance != NULL) { 1467 instance->li_flags++; 1468 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__, 1469 td->td_proc->p_pid, lock->lo_name, 1470 instance->li_flags & LI_RECURSEMASK); 1471 instance->li_file = file; 1472 instance->li_line = line; 1473 return; 1474 } 1475 1476 /* Update per-witness last file and line acquire. */ 1477 w->w_file = file; 1478 w->w_line = line; 1479 1480 /* Find the next open lock instance in the list and fill it. */ 1481 lle = *lock_list; 1482 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) { 1483 lle = witness_lock_list_get(); 1484 if (lle == NULL) 1485 return; 1486 lle->ll_next = *lock_list; 1487 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__, 1488 td->td_proc->p_pid, lle); 1489 *lock_list = lle; 1490 } 1491 instance = &lle->ll_children[lle->ll_count++]; 1492 instance->li_lock = lock; 1493 instance->li_line = line; 1494 instance->li_file = file; 1495 if ((flags & LOP_EXCLUSIVE) != 0) 1496 instance->li_flags = LI_EXCLUSIVE; 1497 else 1498 instance->li_flags = 0; 1499 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__, 1500 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1); 1501 } 1502 1503 void 1504 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line) 1505 { 1506 struct lock_instance *instance; 1507 struct lock_class *class; 1508 1509 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 1510 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 1511 return; 1512 class = LOCK_CLASS(lock); 1513 if (witness_watch) { 1514 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1515 kassert_panic( 1516 "upgrade of non-upgradable lock (%s) %s @ %s:%d", 1517 class->lc_name, lock->lo_name, 1518 fixup_filename(file), line); 1519 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1520 kassert_panic( 1521 "upgrade of non-sleep lock (%s) %s @ %s:%d", 1522 class->lc_name, lock->lo_name, 1523 fixup_filename(file), line); 1524 } 1525 instance = find_instance(curthread->td_sleeplocks, lock); 1526 if (instance == NULL) { 1527 kassert_panic("upgrade of unlocked lock (%s) %s @ %s:%d", 1528 class->lc_name, lock->lo_name, 1529 fixup_filename(file), line); 1530 return; 1531 } 1532 if (witness_watch) { 1533 if ((instance->li_flags & LI_EXCLUSIVE) != 0) 1534 kassert_panic( 1535 "upgrade of exclusive lock (%s) %s @ %s:%d", 1536 class->lc_name, lock->lo_name, 1537 fixup_filename(file), line); 1538 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1539 kassert_panic( 1540 "upgrade of recursed lock (%s) %s r=%d @ %s:%d", 1541 class->lc_name, lock->lo_name, 1542 instance->li_flags & LI_RECURSEMASK, 1543 fixup_filename(file), line); 1544 } 1545 instance->li_flags |= LI_EXCLUSIVE; 1546 } 1547 1548 void 1549 witness_downgrade(struct lock_object *lock, int flags, const char *file, 1550 int line) 1551 { 1552 struct lock_instance *instance; 1553 struct lock_class *class; 1554 1555 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 1556 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 1557 return; 1558 class = LOCK_CLASS(lock); 1559 if (witness_watch) { 1560 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1561 kassert_panic( 1562 "downgrade of non-upgradable lock (%s) %s @ %s:%d", 1563 class->lc_name, lock->lo_name, 1564 fixup_filename(file), line); 1565 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1566 kassert_panic( 1567 "downgrade of non-sleep lock (%s) %s @ %s:%d", 1568 class->lc_name, lock->lo_name, 1569 fixup_filename(file), line); 1570 } 1571 instance = find_instance(curthread->td_sleeplocks, lock); 1572 if (instance == NULL) { 1573 kassert_panic("downgrade of unlocked lock (%s) %s @ %s:%d", 1574 class->lc_name, lock->lo_name, 1575 fixup_filename(file), line); 1576 return; 1577 } 1578 if (witness_watch) { 1579 if ((instance->li_flags & LI_EXCLUSIVE) == 0) 1580 kassert_panic( 1581 "downgrade of shared lock (%s) %s @ %s:%d", 1582 class->lc_name, lock->lo_name, 1583 fixup_filename(file), line); 1584 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1585 kassert_panic( 1586 "downgrade of recursed lock (%s) %s r=%d @ %s:%d", 1587 class->lc_name, lock->lo_name, 1588 instance->li_flags & LI_RECURSEMASK, 1589 fixup_filename(file), line); 1590 } 1591 instance->li_flags &= ~LI_EXCLUSIVE; 1592 } 1593 1594 void 1595 witness_unlock(struct lock_object *lock, int flags, const char *file, int line) 1596 { 1597 struct lock_list_entry **lock_list, *lle; 1598 struct lock_instance *instance; 1599 struct lock_class *class; 1600 struct thread *td; 1601 register_t s; 1602 int i, j; 1603 1604 if (witness_cold || lock->lo_witness == NULL || panicstr != NULL) 1605 return; 1606 td = curthread; 1607 class = LOCK_CLASS(lock); 1608 1609 /* Find lock instance associated with this lock. */ 1610 if (class->lc_flags & LC_SLEEPLOCK) 1611 lock_list = &td->td_sleeplocks; 1612 else 1613 lock_list = PCPU_PTR(spinlocks); 1614 lle = *lock_list; 1615 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next) 1616 for (i = 0; i < (*lock_list)->ll_count; i++) { 1617 instance = &(*lock_list)->ll_children[i]; 1618 if (instance->li_lock == lock) 1619 goto found; 1620 } 1621 1622 /* 1623 * When disabling WITNESS through witness_watch we could end up in 1624 * having registered locks in the td_sleeplocks queue. 1625 * We have to make sure we flush these queues, so just search for 1626 * eventual register locks and remove them. 1627 */ 1628 if (witness_watch > 0) { 1629 kassert_panic("lock (%s) %s not locked @ %s:%d", class->lc_name, 1630 lock->lo_name, fixup_filename(file), line); 1631 return; 1632 } else { 1633 return; 1634 } 1635 found: 1636 1637 /* First, check for shared/exclusive mismatches. */ 1638 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 && 1639 (flags & LOP_EXCLUSIVE) == 0) { 1640 witness_output("shared unlock of (%s) %s @ %s:%d\n", 1641 class->lc_name, lock->lo_name, fixup_filename(file), line); 1642 witness_output("while exclusively locked from %s:%d\n", 1643 fixup_filename(instance->li_file), instance->li_line); 1644 kassert_panic("excl->ushare"); 1645 } 1646 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 && 1647 (flags & LOP_EXCLUSIVE) != 0) { 1648 witness_output("exclusive unlock of (%s) %s @ %s:%d\n", 1649 class->lc_name, lock->lo_name, fixup_filename(file), line); 1650 witness_output("while share locked from %s:%d\n", 1651 fixup_filename(instance->li_file), 1652 instance->li_line); 1653 kassert_panic("share->uexcl"); 1654 } 1655 /* If we are recursed, unrecurse. */ 1656 if ((instance->li_flags & LI_RECURSEMASK) > 0) { 1657 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__, 1658 td->td_proc->p_pid, instance->li_lock->lo_name, 1659 instance->li_flags); 1660 instance->li_flags--; 1661 return; 1662 } 1663 /* The lock is now being dropped, check for NORELEASE flag */ 1664 if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) { 1665 witness_output("forbidden unlock of (%s) %s @ %s:%d\n", 1666 class->lc_name, lock->lo_name, fixup_filename(file), line); 1667 kassert_panic("lock marked norelease"); 1668 } 1669 1670 /* Otherwise, remove this item from the list. */ 1671 s = intr_disable(); 1672 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__, 1673 td->td_proc->p_pid, instance->li_lock->lo_name, 1674 (*lock_list)->ll_count - 1); 1675 for (j = i; j < (*lock_list)->ll_count - 1; j++) 1676 (*lock_list)->ll_children[j] = 1677 (*lock_list)->ll_children[j + 1]; 1678 (*lock_list)->ll_count--; 1679 intr_restore(s); 1680 1681 /* 1682 * In order to reduce contention on w_mtx, we want to keep always an 1683 * head object into lists so that frequent allocation from the 1684 * free witness pool (and subsequent locking) is avoided. 1685 * In order to maintain the current code simple, when the head 1686 * object is totally unloaded it means also that we do not have 1687 * further objects in the list, so the list ownership needs to be 1688 * hand over to another object if the current head needs to be freed. 1689 */ 1690 if ((*lock_list)->ll_count == 0) { 1691 if (*lock_list == lle) { 1692 if (lle->ll_next == NULL) 1693 return; 1694 } else 1695 lle = *lock_list; 1696 *lock_list = lle->ll_next; 1697 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__, 1698 td->td_proc->p_pid, lle); 1699 witness_lock_list_free(lle); 1700 } 1701 } 1702 1703 void 1704 witness_thread_exit(struct thread *td) 1705 { 1706 struct lock_list_entry *lle; 1707 int i, n; 1708 1709 lle = td->td_sleeplocks; 1710 if (lle == NULL || panicstr != NULL) 1711 return; 1712 if (lle->ll_count != 0) { 1713 for (n = 0; lle != NULL; lle = lle->ll_next) 1714 for (i = lle->ll_count - 1; i >= 0; i--) { 1715 if (n == 0) 1716 witness_output( 1717 "Thread %p exiting with the following locks held:\n", td); 1718 n++; 1719 witness_list_lock(&lle->ll_children[i], 1720 witness_output); 1721 1722 } 1723 kassert_panic( 1724 "Thread %p cannot exit while holding sleeplocks\n", td); 1725 } 1726 witness_lock_list_free(lle); 1727 } 1728 1729 /* 1730 * Warn if any locks other than 'lock' are held. Flags can be passed in to 1731 * exempt Giant and sleepable locks from the checks as well. If any 1732 * non-exempt locks are held, then a supplied message is printed to the 1733 * output channel along with a list of the offending locks. If indicated in the 1734 * flags then a failure results in a panic as well. 1735 */ 1736 int 1737 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...) 1738 { 1739 struct lock_list_entry *lock_list, *lle; 1740 struct lock_instance *lock1; 1741 struct thread *td; 1742 va_list ap; 1743 int i, n; 1744 1745 if (witness_cold || witness_watch < 1 || panicstr != NULL) 1746 return (0); 1747 n = 0; 1748 td = curthread; 1749 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next) 1750 for (i = lle->ll_count - 1; i >= 0; i--) { 1751 lock1 = &lle->ll_children[i]; 1752 if (lock1->li_lock == lock) 1753 continue; 1754 if (flags & WARN_GIANTOK && 1755 lock1->li_lock == &Giant.lock_object) 1756 continue; 1757 if (flags & WARN_SLEEPOK && 1758 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0) 1759 continue; 1760 if (n == 0) { 1761 va_start(ap, fmt); 1762 vprintf(fmt, ap); 1763 va_end(ap); 1764 printf(" with the following %slocks held:\n", 1765 (flags & WARN_SLEEPOK) != 0 ? 1766 "non-sleepable " : ""); 1767 } 1768 n++; 1769 witness_list_lock(lock1, printf); 1770 } 1771 1772 /* 1773 * Pin the thread in order to avoid problems with thread migration. 1774 * Once that all verifies are passed about spinlocks ownership, 1775 * the thread is in a safe path and it can be unpinned. 1776 */ 1777 sched_pin(); 1778 lock_list = PCPU_GET(spinlocks); 1779 if (lock_list != NULL && lock_list->ll_count != 0) { 1780 sched_unpin(); 1781 1782 /* 1783 * We should only have one spinlock and as long as 1784 * the flags cannot match for this locks class, 1785 * check if the first spinlock is the one curthread 1786 * should hold. 1787 */ 1788 lock1 = &lock_list->ll_children[lock_list->ll_count - 1]; 1789 if (lock_list->ll_count == 1 && lock_list->ll_next == NULL && 1790 lock1->li_lock == lock && n == 0) 1791 return (0); 1792 1793 va_start(ap, fmt); 1794 vprintf(fmt, ap); 1795 va_end(ap); 1796 printf(" with the following %slocks held:\n", 1797 (flags & WARN_SLEEPOK) != 0 ? "non-sleepable " : ""); 1798 n += witness_list_locks(&lock_list, printf); 1799 } else 1800 sched_unpin(); 1801 if (flags & WARN_PANIC && n) 1802 kassert_panic("%s", __func__); 1803 else 1804 witness_debugger(n, __func__); 1805 return (n); 1806 } 1807 1808 const char * 1809 witness_file(struct lock_object *lock) 1810 { 1811 struct witness *w; 1812 1813 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL) 1814 return ("?"); 1815 w = lock->lo_witness; 1816 return (w->w_file); 1817 } 1818 1819 int 1820 witness_line(struct lock_object *lock) 1821 { 1822 struct witness *w; 1823 1824 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL) 1825 return (0); 1826 w = lock->lo_witness; 1827 return (w->w_line); 1828 } 1829 1830 static struct witness * 1831 enroll(const char *description, struct lock_class *lock_class) 1832 { 1833 struct witness *w; 1834 1835 MPASS(description != NULL); 1836 1837 if (witness_watch == -1 || panicstr != NULL) 1838 return (NULL); 1839 if ((lock_class->lc_flags & LC_SPINLOCK)) { 1840 if (witness_skipspin) 1841 return (NULL); 1842 } else if ((lock_class->lc_flags & LC_SLEEPLOCK) == 0) { 1843 kassert_panic("lock class %s is not sleep or spin", 1844 lock_class->lc_name); 1845 return (NULL); 1846 } 1847 1848 mtx_lock_spin(&w_mtx); 1849 w = witness_hash_get(description); 1850 if (w) 1851 goto found; 1852 if ((w = witness_get()) == NULL) 1853 return (NULL); 1854 MPASS(strlen(description) < MAX_W_NAME); 1855 strcpy(w->w_name, description); 1856 w->w_class = lock_class; 1857 w->w_refcount = 1; 1858 STAILQ_INSERT_HEAD(&w_all, w, w_list); 1859 if (lock_class->lc_flags & LC_SPINLOCK) { 1860 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist); 1861 w_spin_cnt++; 1862 } else if (lock_class->lc_flags & LC_SLEEPLOCK) { 1863 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist); 1864 w_sleep_cnt++; 1865 } 1866 1867 /* Insert new witness into the hash */ 1868 witness_hash_put(w); 1869 witness_increment_graph_generation(); 1870 mtx_unlock_spin(&w_mtx); 1871 return (w); 1872 found: 1873 w->w_refcount++; 1874 if (w->w_refcount == 1) 1875 w->w_class = lock_class; 1876 mtx_unlock_spin(&w_mtx); 1877 if (lock_class != w->w_class) 1878 kassert_panic( 1879 "lock (%s) %s does not match earlier (%s) lock", 1880 description, lock_class->lc_name, 1881 w->w_class->lc_name); 1882 return (w); 1883 } 1884 1885 static void 1886 depart(struct witness *w) 1887 { 1888 1889 MPASS(w->w_refcount == 0); 1890 if (w->w_class->lc_flags & LC_SLEEPLOCK) { 1891 w_sleep_cnt--; 1892 } else { 1893 w_spin_cnt--; 1894 } 1895 /* 1896 * Set file to NULL as it may point into a loadable module. 1897 */ 1898 w->w_file = NULL; 1899 w->w_line = 0; 1900 witness_increment_graph_generation(); 1901 } 1902 1903 1904 static void 1905 adopt(struct witness *parent, struct witness *child) 1906 { 1907 int pi, ci, i, j; 1908 1909 if (witness_cold == 0) 1910 mtx_assert(&w_mtx, MA_OWNED); 1911 1912 /* If the relationship is already known, there's no work to be done. */ 1913 if (isitmychild(parent, child)) 1914 return; 1915 1916 /* When the structure of the graph changes, bump up the generation. */ 1917 witness_increment_graph_generation(); 1918 1919 /* 1920 * The hard part ... create the direct relationship, then propagate all 1921 * indirect relationships. 1922 */ 1923 pi = parent->w_index; 1924 ci = child->w_index; 1925 WITNESS_INDEX_ASSERT(pi); 1926 WITNESS_INDEX_ASSERT(ci); 1927 MPASS(pi != ci); 1928 w_rmatrix[pi][ci] |= WITNESS_PARENT; 1929 w_rmatrix[ci][pi] |= WITNESS_CHILD; 1930 1931 /* 1932 * If parent was not already an ancestor of child, 1933 * then we increment the descendant and ancestor counters. 1934 */ 1935 if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) { 1936 parent->w_num_descendants++; 1937 child->w_num_ancestors++; 1938 } 1939 1940 /* 1941 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as 1942 * an ancestor of 'pi' during this loop. 1943 */ 1944 for (i = 1; i <= w_max_used_index; i++) { 1945 if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 && 1946 (i != pi)) 1947 continue; 1948 1949 /* Find each descendant of 'i' and mark it as a descendant. */ 1950 for (j = 1; j <= w_max_used_index; j++) { 1951 1952 /* 1953 * Skip children that are already marked as 1954 * descendants of 'i'. 1955 */ 1956 if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) 1957 continue; 1958 1959 /* 1960 * We are only interested in descendants of 'ci'. Note 1961 * that 'ci' itself is counted as a descendant of 'ci'. 1962 */ 1963 if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 && 1964 (j != ci)) 1965 continue; 1966 w_rmatrix[i][j] |= WITNESS_ANCESTOR; 1967 w_rmatrix[j][i] |= WITNESS_DESCENDANT; 1968 w_data[i].w_num_descendants++; 1969 w_data[j].w_num_ancestors++; 1970 1971 /* 1972 * Make sure we aren't marking a node as both an 1973 * ancestor and descendant. We should have caught 1974 * this as a lock order reversal earlier. 1975 */ 1976 if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) && 1977 (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) { 1978 printf("witness rmatrix paradox! [%d][%d]=%d " 1979 "both ancestor and descendant\n", 1980 i, j, w_rmatrix[i][j]); 1981 kdb_backtrace(); 1982 printf("Witness disabled.\n"); 1983 witness_watch = -1; 1984 } 1985 if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) && 1986 (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) { 1987 printf("witness rmatrix paradox! [%d][%d]=%d " 1988 "both ancestor and descendant\n", 1989 j, i, w_rmatrix[j][i]); 1990 kdb_backtrace(); 1991 printf("Witness disabled.\n"); 1992 witness_watch = -1; 1993 } 1994 } 1995 } 1996 } 1997 1998 static void 1999 itismychild(struct witness *parent, struct witness *child) 2000 { 2001 int unlocked; 2002 2003 MPASS(child != NULL && parent != NULL); 2004 if (witness_cold == 0) 2005 mtx_assert(&w_mtx, MA_OWNED); 2006 2007 if (!witness_lock_type_equal(parent, child)) { 2008 if (witness_cold == 0) { 2009 unlocked = 1; 2010 mtx_unlock_spin(&w_mtx); 2011 } else { 2012 unlocked = 0; 2013 } 2014 kassert_panic( 2015 "%s: parent \"%s\" (%s) and child \"%s\" (%s) are not " 2016 "the same lock type", __func__, parent->w_name, 2017 parent->w_class->lc_name, child->w_name, 2018 child->w_class->lc_name); 2019 if (unlocked) 2020 mtx_lock_spin(&w_mtx); 2021 } 2022 adopt(parent, child); 2023 } 2024 2025 /* 2026 * Generic code for the isitmy*() functions. The rmask parameter is the 2027 * expected relationship of w1 to w2. 2028 */ 2029 static int 2030 _isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname) 2031 { 2032 unsigned char r1, r2; 2033 int i1, i2; 2034 2035 i1 = w1->w_index; 2036 i2 = w2->w_index; 2037 WITNESS_INDEX_ASSERT(i1); 2038 WITNESS_INDEX_ASSERT(i2); 2039 r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK; 2040 r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK; 2041 2042 /* The flags on one better be the inverse of the flags on the other */ 2043 if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) || 2044 (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) { 2045 /* Don't squawk if we're potentially racing with an update. */ 2046 if (!mtx_owned(&w_mtx)) 2047 return (0); 2048 printf("%s: rmatrix mismatch between %s (index %d) and %s " 2049 "(index %d): w_rmatrix[%d][%d] == %hhx but " 2050 "w_rmatrix[%d][%d] == %hhx\n", 2051 fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1, 2052 i2, i1, r2); 2053 kdb_backtrace(); 2054 printf("Witness disabled.\n"); 2055 witness_watch = -1; 2056 } 2057 return (r1 & rmask); 2058 } 2059 2060 /* 2061 * Checks if @child is a direct child of @parent. 2062 */ 2063 static int 2064 isitmychild(struct witness *parent, struct witness *child) 2065 { 2066 2067 return (_isitmyx(parent, child, WITNESS_PARENT, __func__)); 2068 } 2069 2070 /* 2071 * Checks if @descendant is a direct or inderect descendant of @ancestor. 2072 */ 2073 static int 2074 isitmydescendant(struct witness *ancestor, struct witness *descendant) 2075 { 2076 2077 return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK, 2078 __func__)); 2079 } 2080 2081 #ifdef BLESSING 2082 static int 2083 blessed(struct witness *w1, struct witness *w2) 2084 { 2085 int i; 2086 struct witness_blessed *b; 2087 2088 for (i = 0; i < nitems(blessed_list); i++) { 2089 b = &blessed_list[i]; 2090 if (strcmp(w1->w_name, b->b_lock1) == 0) { 2091 if (strcmp(w2->w_name, b->b_lock2) == 0) 2092 return (1); 2093 continue; 2094 } 2095 if (strcmp(w1->w_name, b->b_lock2) == 0) 2096 if (strcmp(w2->w_name, b->b_lock1) == 0) 2097 return (1); 2098 } 2099 return (0); 2100 } 2101 #endif 2102 2103 static struct witness * 2104 witness_get(void) 2105 { 2106 struct witness *w; 2107 int index; 2108 2109 if (witness_cold == 0) 2110 mtx_assert(&w_mtx, MA_OWNED); 2111 2112 if (witness_watch == -1) { 2113 mtx_unlock_spin(&w_mtx); 2114 return (NULL); 2115 } 2116 if (STAILQ_EMPTY(&w_free)) { 2117 witness_watch = -1; 2118 mtx_unlock_spin(&w_mtx); 2119 printf("WITNESS: unable to allocate a new witness object\n"); 2120 return (NULL); 2121 } 2122 w = STAILQ_FIRST(&w_free); 2123 STAILQ_REMOVE_HEAD(&w_free, w_list); 2124 w_free_cnt--; 2125 index = w->w_index; 2126 MPASS(index > 0 && index == w_max_used_index+1 && 2127 index < witness_count); 2128 bzero(w, sizeof(*w)); 2129 w->w_index = index; 2130 if (index > w_max_used_index) 2131 w_max_used_index = index; 2132 return (w); 2133 } 2134 2135 static void 2136 witness_free(struct witness *w) 2137 { 2138 2139 STAILQ_INSERT_HEAD(&w_free, w, w_list); 2140 w_free_cnt++; 2141 } 2142 2143 static struct lock_list_entry * 2144 witness_lock_list_get(void) 2145 { 2146 struct lock_list_entry *lle; 2147 2148 if (witness_watch == -1) 2149 return (NULL); 2150 mtx_lock_spin(&w_mtx); 2151 lle = w_lock_list_free; 2152 if (lle == NULL) { 2153 witness_watch = -1; 2154 mtx_unlock_spin(&w_mtx); 2155 printf("%s: witness exhausted\n", __func__); 2156 return (NULL); 2157 } 2158 w_lock_list_free = lle->ll_next; 2159 mtx_unlock_spin(&w_mtx); 2160 bzero(lle, sizeof(*lle)); 2161 return (lle); 2162 } 2163 2164 static void 2165 witness_lock_list_free(struct lock_list_entry *lle) 2166 { 2167 2168 mtx_lock_spin(&w_mtx); 2169 lle->ll_next = w_lock_list_free; 2170 w_lock_list_free = lle; 2171 mtx_unlock_spin(&w_mtx); 2172 } 2173 2174 static struct lock_instance * 2175 find_instance(struct lock_list_entry *list, const struct lock_object *lock) 2176 { 2177 struct lock_list_entry *lle; 2178 struct lock_instance *instance; 2179 int i; 2180 2181 for (lle = list; lle != NULL; lle = lle->ll_next) 2182 for (i = lle->ll_count - 1; i >= 0; i--) { 2183 instance = &lle->ll_children[i]; 2184 if (instance->li_lock == lock) 2185 return (instance); 2186 } 2187 return (NULL); 2188 } 2189 2190 static void 2191 witness_list_lock(struct lock_instance *instance, 2192 int (*prnt)(const char *fmt, ...)) 2193 { 2194 struct lock_object *lock; 2195 2196 lock = instance->li_lock; 2197 prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ? 2198 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name); 2199 if (lock->lo_witness->w_name != lock->lo_name) 2200 prnt(" (%s)", lock->lo_witness->w_name); 2201 prnt(" r = %d (%p) locked @ %s:%d\n", 2202 instance->li_flags & LI_RECURSEMASK, lock, 2203 fixup_filename(instance->li_file), instance->li_line); 2204 } 2205 2206 static int 2207 witness_output(const char *fmt, ...) 2208 { 2209 va_list ap; 2210 int ret; 2211 2212 va_start(ap, fmt); 2213 ret = witness_voutput(fmt, ap); 2214 va_end(ap); 2215 return (ret); 2216 } 2217 2218 static int 2219 witness_voutput(const char *fmt, va_list ap) 2220 { 2221 int ret; 2222 2223 ret = 0; 2224 switch (witness_channel) { 2225 case WITNESS_CONSOLE: 2226 ret = vprintf(fmt, ap); 2227 break; 2228 case WITNESS_LOG: 2229 vlog(LOG_NOTICE, fmt, ap); 2230 break; 2231 case WITNESS_NONE: 2232 break; 2233 } 2234 return (ret); 2235 } 2236 2237 #ifdef DDB 2238 static int 2239 witness_thread_has_locks(struct thread *td) 2240 { 2241 2242 if (td->td_sleeplocks == NULL) 2243 return (0); 2244 return (td->td_sleeplocks->ll_count != 0); 2245 } 2246 2247 static int 2248 witness_proc_has_locks(struct proc *p) 2249 { 2250 struct thread *td; 2251 2252 FOREACH_THREAD_IN_PROC(p, td) { 2253 if (witness_thread_has_locks(td)) 2254 return (1); 2255 } 2256 return (0); 2257 } 2258 #endif 2259 2260 int 2261 witness_list_locks(struct lock_list_entry **lock_list, 2262 int (*prnt)(const char *fmt, ...)) 2263 { 2264 struct lock_list_entry *lle; 2265 int i, nheld; 2266 2267 nheld = 0; 2268 for (lle = *lock_list; lle != NULL; lle = lle->ll_next) 2269 for (i = lle->ll_count - 1; i >= 0; i--) { 2270 witness_list_lock(&lle->ll_children[i], prnt); 2271 nheld++; 2272 } 2273 return (nheld); 2274 } 2275 2276 /* 2277 * This is a bit risky at best. We call this function when we have timed 2278 * out acquiring a spin lock, and we assume that the other CPU is stuck 2279 * with this lock held. So, we go groveling around in the other CPU's 2280 * per-cpu data to try to find the lock instance for this spin lock to 2281 * see when it was last acquired. 2282 */ 2283 void 2284 witness_display_spinlock(struct lock_object *lock, struct thread *owner, 2285 int (*prnt)(const char *fmt, ...)) 2286 { 2287 struct lock_instance *instance; 2288 struct pcpu *pc; 2289 2290 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU) 2291 return; 2292 pc = pcpu_find(owner->td_oncpu); 2293 instance = find_instance(pc->pc_spinlocks, lock); 2294 if (instance != NULL) 2295 witness_list_lock(instance, prnt); 2296 } 2297 2298 void 2299 witness_save(struct lock_object *lock, const char **filep, int *linep) 2300 { 2301 struct lock_list_entry *lock_list; 2302 struct lock_instance *instance; 2303 struct lock_class *class; 2304 2305 /* 2306 * This function is used independently in locking code to deal with 2307 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant 2308 * is gone. 2309 */ 2310 if (SCHEDULER_STOPPED()) 2311 return; 2312 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2313 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2314 return; 2315 class = LOCK_CLASS(lock); 2316 if (class->lc_flags & LC_SLEEPLOCK) 2317 lock_list = curthread->td_sleeplocks; 2318 else { 2319 if (witness_skipspin) 2320 return; 2321 lock_list = PCPU_GET(spinlocks); 2322 } 2323 instance = find_instance(lock_list, lock); 2324 if (instance == NULL) { 2325 kassert_panic("%s: lock (%s) %s not locked", __func__, 2326 class->lc_name, lock->lo_name); 2327 return; 2328 } 2329 *filep = instance->li_file; 2330 *linep = instance->li_line; 2331 } 2332 2333 void 2334 witness_restore(struct lock_object *lock, const char *file, int line) 2335 { 2336 struct lock_list_entry *lock_list; 2337 struct lock_instance *instance; 2338 struct lock_class *class; 2339 2340 /* 2341 * This function is used independently in locking code to deal with 2342 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant 2343 * is gone. 2344 */ 2345 if (SCHEDULER_STOPPED()) 2346 return; 2347 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2348 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2349 return; 2350 class = LOCK_CLASS(lock); 2351 if (class->lc_flags & LC_SLEEPLOCK) 2352 lock_list = curthread->td_sleeplocks; 2353 else { 2354 if (witness_skipspin) 2355 return; 2356 lock_list = PCPU_GET(spinlocks); 2357 } 2358 instance = find_instance(lock_list, lock); 2359 if (instance == NULL) 2360 kassert_panic("%s: lock (%s) %s not locked", __func__, 2361 class->lc_name, lock->lo_name); 2362 lock->lo_witness->w_file = file; 2363 lock->lo_witness->w_line = line; 2364 if (instance == NULL) 2365 return; 2366 instance->li_file = file; 2367 instance->li_line = line; 2368 } 2369 2370 void 2371 witness_assert(const struct lock_object *lock, int flags, const char *file, 2372 int line) 2373 { 2374 #ifdef INVARIANT_SUPPORT 2375 struct lock_instance *instance; 2376 struct lock_class *class; 2377 2378 if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL) 2379 return; 2380 class = LOCK_CLASS(lock); 2381 if ((class->lc_flags & LC_SLEEPLOCK) != 0) 2382 instance = find_instance(curthread->td_sleeplocks, lock); 2383 else if ((class->lc_flags & LC_SPINLOCK) != 0) 2384 instance = find_instance(PCPU_GET(spinlocks), lock); 2385 else { 2386 kassert_panic("Lock (%s) %s is not sleep or spin!", 2387 class->lc_name, lock->lo_name); 2388 return; 2389 } 2390 switch (flags) { 2391 case LA_UNLOCKED: 2392 if (instance != NULL) 2393 kassert_panic("Lock (%s) %s locked @ %s:%d.", 2394 class->lc_name, lock->lo_name, 2395 fixup_filename(file), line); 2396 break; 2397 case LA_LOCKED: 2398 case LA_LOCKED | LA_RECURSED: 2399 case LA_LOCKED | LA_NOTRECURSED: 2400 case LA_SLOCKED: 2401 case LA_SLOCKED | LA_RECURSED: 2402 case LA_SLOCKED | LA_NOTRECURSED: 2403 case LA_XLOCKED: 2404 case LA_XLOCKED | LA_RECURSED: 2405 case LA_XLOCKED | LA_NOTRECURSED: 2406 if (instance == NULL) { 2407 kassert_panic("Lock (%s) %s not locked @ %s:%d.", 2408 class->lc_name, lock->lo_name, 2409 fixup_filename(file), line); 2410 break; 2411 } 2412 if ((flags & LA_XLOCKED) != 0 && 2413 (instance->li_flags & LI_EXCLUSIVE) == 0) 2414 kassert_panic( 2415 "Lock (%s) %s not exclusively locked @ %s:%d.", 2416 class->lc_name, lock->lo_name, 2417 fixup_filename(file), line); 2418 if ((flags & LA_SLOCKED) != 0 && 2419 (instance->li_flags & LI_EXCLUSIVE) != 0) 2420 kassert_panic( 2421 "Lock (%s) %s exclusively locked @ %s:%d.", 2422 class->lc_name, lock->lo_name, 2423 fixup_filename(file), line); 2424 if ((flags & LA_RECURSED) != 0 && 2425 (instance->li_flags & LI_RECURSEMASK) == 0) 2426 kassert_panic("Lock (%s) %s not recursed @ %s:%d.", 2427 class->lc_name, lock->lo_name, 2428 fixup_filename(file), line); 2429 if ((flags & LA_NOTRECURSED) != 0 && 2430 (instance->li_flags & LI_RECURSEMASK) != 0) 2431 kassert_panic("Lock (%s) %s recursed @ %s:%d.", 2432 class->lc_name, lock->lo_name, 2433 fixup_filename(file), line); 2434 break; 2435 default: 2436 kassert_panic("Invalid lock assertion at %s:%d.", 2437 fixup_filename(file), line); 2438 2439 } 2440 #endif /* INVARIANT_SUPPORT */ 2441 } 2442 2443 static void 2444 witness_setflag(struct lock_object *lock, int flag, int set) 2445 { 2446 struct lock_list_entry *lock_list; 2447 struct lock_instance *instance; 2448 struct lock_class *class; 2449 2450 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2451 return; 2452 class = LOCK_CLASS(lock); 2453 if (class->lc_flags & LC_SLEEPLOCK) 2454 lock_list = curthread->td_sleeplocks; 2455 else { 2456 if (witness_skipspin) 2457 return; 2458 lock_list = PCPU_GET(spinlocks); 2459 } 2460 instance = find_instance(lock_list, lock); 2461 if (instance == NULL) { 2462 kassert_panic("%s: lock (%s) %s not locked", __func__, 2463 class->lc_name, lock->lo_name); 2464 return; 2465 } 2466 2467 if (set) 2468 instance->li_flags |= flag; 2469 else 2470 instance->li_flags &= ~flag; 2471 } 2472 2473 void 2474 witness_norelease(struct lock_object *lock) 2475 { 2476 2477 witness_setflag(lock, LI_NORELEASE, 1); 2478 } 2479 2480 void 2481 witness_releaseok(struct lock_object *lock) 2482 { 2483 2484 witness_setflag(lock, LI_NORELEASE, 0); 2485 } 2486 2487 #ifdef DDB 2488 static void 2489 witness_ddb_list(struct thread *td) 2490 { 2491 2492 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2493 KASSERT(kdb_active, ("%s: not in the debugger", __func__)); 2494 2495 if (witness_watch < 1) 2496 return; 2497 2498 witness_list_locks(&td->td_sleeplocks, db_printf); 2499 2500 /* 2501 * We only handle spinlocks if td == curthread. This is somewhat broken 2502 * if td is currently executing on some other CPU and holds spin locks 2503 * as we won't display those locks. If we had a MI way of getting 2504 * the per-cpu data for a given cpu then we could use 2505 * td->td_oncpu to get the list of spinlocks for this thread 2506 * and "fix" this. 2507 * 2508 * That still wouldn't really fix this unless we locked the scheduler 2509 * lock or stopped the other CPU to make sure it wasn't changing the 2510 * list out from under us. It is probably best to just not try to 2511 * handle threads on other CPU's for now. 2512 */ 2513 if (td == curthread && PCPU_GET(spinlocks) != NULL) 2514 witness_list_locks(PCPU_PTR(spinlocks), db_printf); 2515 } 2516 2517 DB_SHOW_COMMAND(locks, db_witness_list) 2518 { 2519 struct thread *td; 2520 2521 if (have_addr) 2522 td = db_lookup_thread(addr, true); 2523 else 2524 td = kdb_thread; 2525 witness_ddb_list(td); 2526 } 2527 2528 DB_SHOW_ALL_COMMAND(locks, db_witness_list_all) 2529 { 2530 struct thread *td; 2531 struct proc *p; 2532 2533 /* 2534 * It would be nice to list only threads and processes that actually 2535 * held sleep locks, but that information is currently not exported 2536 * by WITNESS. 2537 */ 2538 FOREACH_PROC_IN_SYSTEM(p) { 2539 if (!witness_proc_has_locks(p)) 2540 continue; 2541 FOREACH_THREAD_IN_PROC(p, td) { 2542 if (!witness_thread_has_locks(td)) 2543 continue; 2544 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid, 2545 p->p_comm, td, td->td_tid); 2546 witness_ddb_list(td); 2547 if (db_pager_quit) 2548 return; 2549 } 2550 } 2551 } 2552 DB_SHOW_ALIAS(alllocks, db_witness_list_all) 2553 2554 DB_SHOW_COMMAND(witness, db_witness_display) 2555 { 2556 2557 witness_ddb_display(db_printf); 2558 } 2559 #endif 2560 2561 static void 2562 sbuf_print_witness_badstacks(struct sbuf *sb, size_t *oldidx) 2563 { 2564 struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2; 2565 struct witness *tmp_w1, *tmp_w2, *w1, *w2; 2566 int generation, i, j; 2567 2568 tmp_data1 = NULL; 2569 tmp_data2 = NULL; 2570 tmp_w1 = NULL; 2571 tmp_w2 = NULL; 2572 2573 /* Allocate and init temporary storage space. */ 2574 tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO); 2575 tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO); 2576 tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP, 2577 M_WAITOK | M_ZERO); 2578 tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP, 2579 M_WAITOK | M_ZERO); 2580 stack_zero(&tmp_data1->wlod_stack); 2581 stack_zero(&tmp_data2->wlod_stack); 2582 2583 restart: 2584 mtx_lock_spin(&w_mtx); 2585 generation = w_generation; 2586 mtx_unlock_spin(&w_mtx); 2587 sbuf_printf(sb, "Number of known direct relationships is %d\n", 2588 w_lohash.wloh_count); 2589 for (i = 1; i < w_max_used_index; i++) { 2590 mtx_lock_spin(&w_mtx); 2591 if (generation != w_generation) { 2592 mtx_unlock_spin(&w_mtx); 2593 2594 /* The graph has changed, try again. */ 2595 *oldidx = 0; 2596 sbuf_clear(sb); 2597 goto restart; 2598 } 2599 2600 w1 = &w_data[i]; 2601 if (w1->w_reversed == 0) { 2602 mtx_unlock_spin(&w_mtx); 2603 continue; 2604 } 2605 2606 /* Copy w1 locally so we can release the spin lock. */ 2607 *tmp_w1 = *w1; 2608 mtx_unlock_spin(&w_mtx); 2609 2610 if (tmp_w1->w_reversed == 0) 2611 continue; 2612 for (j = 1; j < w_max_used_index; j++) { 2613 if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j) 2614 continue; 2615 2616 mtx_lock_spin(&w_mtx); 2617 if (generation != w_generation) { 2618 mtx_unlock_spin(&w_mtx); 2619 2620 /* The graph has changed, try again. */ 2621 *oldidx = 0; 2622 sbuf_clear(sb); 2623 goto restart; 2624 } 2625 2626 w2 = &w_data[j]; 2627 data1 = witness_lock_order_get(w1, w2); 2628 data2 = witness_lock_order_get(w2, w1); 2629 2630 /* 2631 * Copy information locally so we can release the 2632 * spin lock. 2633 */ 2634 *tmp_w2 = *w2; 2635 2636 if (data1) { 2637 stack_zero(&tmp_data1->wlod_stack); 2638 stack_copy(&data1->wlod_stack, 2639 &tmp_data1->wlod_stack); 2640 } 2641 if (data2 && data2 != data1) { 2642 stack_zero(&tmp_data2->wlod_stack); 2643 stack_copy(&data2->wlod_stack, 2644 &tmp_data2->wlod_stack); 2645 } 2646 mtx_unlock_spin(&w_mtx); 2647 2648 sbuf_printf(sb, 2649 "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n", 2650 tmp_w1->w_name, tmp_w1->w_class->lc_name, 2651 tmp_w2->w_name, tmp_w2->w_class->lc_name); 2652 if (data1) { 2653 sbuf_printf(sb, 2654 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n", 2655 tmp_w1->w_name, tmp_w1->w_class->lc_name, 2656 tmp_w2->w_name, tmp_w2->w_class->lc_name); 2657 stack_sbuf_print(sb, &tmp_data1->wlod_stack); 2658 sbuf_printf(sb, "\n"); 2659 } 2660 if (data2 && data2 != data1) { 2661 sbuf_printf(sb, 2662 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n", 2663 tmp_w2->w_name, tmp_w2->w_class->lc_name, 2664 tmp_w1->w_name, tmp_w1->w_class->lc_name); 2665 stack_sbuf_print(sb, &tmp_data2->wlod_stack); 2666 sbuf_printf(sb, "\n"); 2667 } 2668 } 2669 } 2670 mtx_lock_spin(&w_mtx); 2671 if (generation != w_generation) { 2672 mtx_unlock_spin(&w_mtx); 2673 2674 /* 2675 * The graph changed while we were printing stack data, 2676 * try again. 2677 */ 2678 *oldidx = 0; 2679 sbuf_clear(sb); 2680 goto restart; 2681 } 2682 mtx_unlock_spin(&w_mtx); 2683 2684 /* Free temporary storage space. */ 2685 free(tmp_data1, M_TEMP); 2686 free(tmp_data2, M_TEMP); 2687 free(tmp_w1, M_TEMP); 2688 free(tmp_w2, M_TEMP); 2689 } 2690 2691 static int 2692 sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS) 2693 { 2694 struct sbuf *sb; 2695 int error; 2696 2697 if (witness_watch < 1) { 2698 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning)); 2699 return (error); 2700 } 2701 if (witness_cold) { 2702 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold)); 2703 return (error); 2704 } 2705 error = 0; 2706 sb = sbuf_new(NULL, NULL, badstack_sbuf_size, SBUF_AUTOEXTEND); 2707 if (sb == NULL) 2708 return (ENOMEM); 2709 2710 sbuf_print_witness_badstacks(sb, &req->oldidx); 2711 2712 sbuf_finish(sb); 2713 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 2714 sbuf_delete(sb); 2715 2716 return (error); 2717 } 2718 2719 #ifdef DDB 2720 static int 2721 sbuf_db_printf_drain(void *arg __unused, const char *data, int len) 2722 { 2723 2724 return (db_printf("%.*s", len, data)); 2725 } 2726 2727 DB_SHOW_COMMAND(badstacks, db_witness_badstacks) 2728 { 2729 struct sbuf sb; 2730 char buffer[128]; 2731 size_t dummy; 2732 2733 sbuf_new(&sb, buffer, sizeof(buffer), SBUF_FIXEDLEN); 2734 sbuf_set_drain(&sb, sbuf_db_printf_drain, NULL); 2735 sbuf_print_witness_badstacks(&sb, &dummy); 2736 sbuf_finish(&sb); 2737 } 2738 #endif 2739 2740 static int 2741 sysctl_debug_witness_channel(SYSCTL_HANDLER_ARGS) 2742 { 2743 static const struct { 2744 enum witness_channel channel; 2745 const char *name; 2746 } channels[] = { 2747 { WITNESS_CONSOLE, "console" }, 2748 { WITNESS_LOG, "log" }, 2749 { WITNESS_NONE, "none" }, 2750 }; 2751 char buf[16]; 2752 u_int i; 2753 int error; 2754 2755 buf[0] = '\0'; 2756 for (i = 0; i < nitems(channels); i++) 2757 if (witness_channel == channels[i].channel) { 2758 snprintf(buf, sizeof(buf), "%s", channels[i].name); 2759 break; 2760 } 2761 2762 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 2763 if (error != 0 || req->newptr == NULL) 2764 return (error); 2765 2766 error = EINVAL; 2767 for (i = 0; i < nitems(channels); i++) 2768 if (strcmp(channels[i].name, buf) == 0) { 2769 witness_channel = channels[i].channel; 2770 error = 0; 2771 break; 2772 } 2773 return (error); 2774 } 2775 2776 static int 2777 sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS) 2778 { 2779 struct witness *w; 2780 struct sbuf *sb; 2781 int error; 2782 2783 #ifdef __i386__ 2784 error = SYSCTL_OUT(req, w_notallowed, sizeof(w_notallowed)); 2785 return (error); 2786 #endif 2787 2788 if (witness_watch < 1) { 2789 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning)); 2790 return (error); 2791 } 2792 if (witness_cold) { 2793 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold)); 2794 return (error); 2795 } 2796 error = 0; 2797 2798 error = sysctl_wire_old_buffer(req, 0); 2799 if (error != 0) 2800 return (error); 2801 sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req); 2802 if (sb == NULL) 2803 return (ENOMEM); 2804 sbuf_printf(sb, "\n"); 2805 2806 mtx_lock_spin(&w_mtx); 2807 STAILQ_FOREACH(w, &w_all, w_list) 2808 w->w_displayed = 0; 2809 STAILQ_FOREACH(w, &w_all, w_list) 2810 witness_add_fullgraph(sb, w); 2811 mtx_unlock_spin(&w_mtx); 2812 2813 /* 2814 * Close the sbuf and return to userland. 2815 */ 2816 error = sbuf_finish(sb); 2817 sbuf_delete(sb); 2818 2819 return (error); 2820 } 2821 2822 static int 2823 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS) 2824 { 2825 int error, value; 2826 2827 value = witness_watch; 2828 error = sysctl_handle_int(oidp, &value, 0, req); 2829 if (error != 0 || req->newptr == NULL) 2830 return (error); 2831 if (value > 1 || value < -1 || 2832 (witness_watch == -1 && value != witness_watch)) 2833 return (EINVAL); 2834 witness_watch = value; 2835 return (0); 2836 } 2837 2838 static void 2839 witness_add_fullgraph(struct sbuf *sb, struct witness *w) 2840 { 2841 int i; 2842 2843 if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0)) 2844 return; 2845 w->w_displayed = 1; 2846 2847 WITNESS_INDEX_ASSERT(w->w_index); 2848 for (i = 1; i <= w_max_used_index; i++) { 2849 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) { 2850 sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name, 2851 w_data[i].w_name); 2852 witness_add_fullgraph(sb, &w_data[i]); 2853 } 2854 } 2855 } 2856 2857 /* 2858 * A simple hash function. Takes a key pointer and a key size. If size == 0, 2859 * interprets the key as a string and reads until the null 2860 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit 2861 * hash value computed from the key. 2862 */ 2863 static uint32_t 2864 witness_hash_djb2(const uint8_t *key, uint32_t size) 2865 { 2866 unsigned int hash = 5381; 2867 int i; 2868 2869 /* hash = hash * 33 + key[i] */ 2870 if (size) 2871 for (i = 0; i < size; i++) 2872 hash = ((hash << 5) + hash) + (unsigned int)key[i]; 2873 else 2874 for (i = 0; key[i] != 0; i++) 2875 hash = ((hash << 5) + hash) + (unsigned int)key[i]; 2876 2877 return (hash); 2878 } 2879 2880 2881 /* 2882 * Initializes the two witness hash tables. Called exactly once from 2883 * witness_initialize(). 2884 */ 2885 static void 2886 witness_init_hash_tables(void) 2887 { 2888 int i; 2889 2890 MPASS(witness_cold); 2891 2892 /* Initialize the hash tables. */ 2893 for (i = 0; i < WITNESS_HASH_SIZE; i++) 2894 w_hash.wh_array[i] = NULL; 2895 2896 w_hash.wh_size = WITNESS_HASH_SIZE; 2897 w_hash.wh_count = 0; 2898 2899 /* Initialize the lock order data hash. */ 2900 w_lofree = NULL; 2901 for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) { 2902 memset(&w_lodata[i], 0, sizeof(w_lodata[i])); 2903 w_lodata[i].wlod_next = w_lofree; 2904 w_lofree = &w_lodata[i]; 2905 } 2906 w_lohash.wloh_size = WITNESS_LO_HASH_SIZE; 2907 w_lohash.wloh_count = 0; 2908 for (i = 0; i < WITNESS_LO_HASH_SIZE; i++) 2909 w_lohash.wloh_array[i] = NULL; 2910 } 2911 2912 static struct witness * 2913 witness_hash_get(const char *key) 2914 { 2915 struct witness *w; 2916 uint32_t hash; 2917 2918 MPASS(key != NULL); 2919 if (witness_cold == 0) 2920 mtx_assert(&w_mtx, MA_OWNED); 2921 hash = witness_hash_djb2(key, 0) % w_hash.wh_size; 2922 w = w_hash.wh_array[hash]; 2923 while (w != NULL) { 2924 if (strcmp(w->w_name, key) == 0) 2925 goto out; 2926 w = w->w_hash_next; 2927 } 2928 2929 out: 2930 return (w); 2931 } 2932 2933 static void 2934 witness_hash_put(struct witness *w) 2935 { 2936 uint32_t hash; 2937 2938 MPASS(w != NULL); 2939 MPASS(w->w_name != NULL); 2940 if (witness_cold == 0) 2941 mtx_assert(&w_mtx, MA_OWNED); 2942 KASSERT(witness_hash_get(w->w_name) == NULL, 2943 ("%s: trying to add a hash entry that already exists!", __func__)); 2944 KASSERT(w->w_hash_next == NULL, 2945 ("%s: w->w_hash_next != NULL", __func__)); 2946 2947 hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size; 2948 w->w_hash_next = w_hash.wh_array[hash]; 2949 w_hash.wh_array[hash] = w; 2950 w_hash.wh_count++; 2951 } 2952 2953 2954 static struct witness_lock_order_data * 2955 witness_lock_order_get(struct witness *parent, struct witness *child) 2956 { 2957 struct witness_lock_order_data *data = NULL; 2958 struct witness_lock_order_key key; 2959 unsigned int hash; 2960 2961 MPASS(parent != NULL && child != NULL); 2962 key.from = parent->w_index; 2963 key.to = child->w_index; 2964 WITNESS_INDEX_ASSERT(key.from); 2965 WITNESS_INDEX_ASSERT(key.to); 2966 if ((w_rmatrix[parent->w_index][child->w_index] 2967 & WITNESS_LOCK_ORDER_KNOWN) == 0) 2968 goto out; 2969 2970 hash = witness_hash_djb2((const char*)&key, 2971 sizeof(key)) % w_lohash.wloh_size; 2972 data = w_lohash.wloh_array[hash]; 2973 while (data != NULL) { 2974 if (witness_lock_order_key_equal(&data->wlod_key, &key)) 2975 break; 2976 data = data->wlod_next; 2977 } 2978 2979 out: 2980 return (data); 2981 } 2982 2983 /* 2984 * Verify that parent and child have a known relationship, are not the same, 2985 * and child is actually a child of parent. This is done without w_mtx 2986 * to avoid contention in the common case. 2987 */ 2988 static int 2989 witness_lock_order_check(struct witness *parent, struct witness *child) 2990 { 2991 2992 if (parent != child && 2993 w_rmatrix[parent->w_index][child->w_index] 2994 & WITNESS_LOCK_ORDER_KNOWN && 2995 isitmychild(parent, child)) 2996 return (1); 2997 2998 return (0); 2999 } 3000 3001 static int 3002 witness_lock_order_add(struct witness *parent, struct witness *child) 3003 { 3004 struct witness_lock_order_data *data = NULL; 3005 struct witness_lock_order_key key; 3006 unsigned int hash; 3007 3008 MPASS(parent != NULL && child != NULL); 3009 key.from = parent->w_index; 3010 key.to = child->w_index; 3011 WITNESS_INDEX_ASSERT(key.from); 3012 WITNESS_INDEX_ASSERT(key.to); 3013 if (w_rmatrix[parent->w_index][child->w_index] 3014 & WITNESS_LOCK_ORDER_KNOWN) 3015 return (1); 3016 3017 hash = witness_hash_djb2((const char*)&key, 3018 sizeof(key)) % w_lohash.wloh_size; 3019 w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN; 3020 data = w_lofree; 3021 if (data == NULL) 3022 return (0); 3023 w_lofree = data->wlod_next; 3024 data->wlod_next = w_lohash.wloh_array[hash]; 3025 data->wlod_key = key; 3026 w_lohash.wloh_array[hash] = data; 3027 w_lohash.wloh_count++; 3028 stack_zero(&data->wlod_stack); 3029 stack_save(&data->wlod_stack); 3030 return (1); 3031 } 3032 3033 /* Call this whenever the structure of the witness graph changes. */ 3034 static void 3035 witness_increment_graph_generation(void) 3036 { 3037 3038 if (witness_cold == 0) 3039 mtx_assert(&w_mtx, MA_OWNED); 3040 w_generation++; 3041 } 3042 3043 static int 3044 witness_output_drain(void *arg __unused, const char *data, int len) 3045 { 3046 3047 witness_output("%.*s", len, data); 3048 return (len); 3049 } 3050 3051 static void 3052 witness_debugger(int cond, const char *msg) 3053 { 3054 char buf[32]; 3055 struct sbuf sb; 3056 struct stack st; 3057 3058 if (!cond) 3059 return; 3060 3061 if (witness_trace) { 3062 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); 3063 sbuf_set_drain(&sb, witness_output_drain, NULL); 3064 3065 stack_zero(&st); 3066 stack_save(&st); 3067 witness_output("stack backtrace:\n"); 3068 stack_sbuf_print_ddb(&sb, &st); 3069 3070 sbuf_finish(&sb); 3071 } 3072 3073 #ifdef KDB 3074 if (witness_kdb) 3075 kdb_enter(KDB_WHY_WITNESS, msg); 3076 #endif 3077 } 3078