1 /*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32 /* 33 * Implementation of the `witness' lock verifier. Originally implemented for 34 * mutexes in BSD/OS. Extended to handle generic lock objects and lock 35 * classes in FreeBSD. 36 */ 37 38 /* 39 * Main Entry: witness 40 * Pronunciation: 'wit-n&s 41 * Function: noun 42 * Etymology: Middle English witnesse, from Old English witnes knowledge, 43 * testimony, witness, from 2wit 44 * Date: before 12th century 45 * 1 : attestation of a fact or event : TESTIMONY 46 * 2 : one that gives evidence; specifically : one who testifies in 47 * a cause or before a judicial tribunal 48 * 3 : one asked to be present at a transaction so as to be able to 49 * testify to its having taken place 50 * 4 : one who has personal knowledge of something 51 * 5 a : something serving as evidence or proof : SIGN 52 * b : public affirmation by word or example of usually 53 * religious faith or conviction <the heroic witness to divine 54 * life -- Pilot> 55 * 6 capitalized : a member of the Jehovah's Witnesses 56 */ 57 58 /* 59 * Special rules concerning Giant and lock orders: 60 * 61 * 1) Giant must be acquired before any other mutexes. Stated another way, 62 * no other mutex may be held when Giant is acquired. 63 * 64 * 2) Giant must be released when blocking on a sleepable lock. 65 * 66 * This rule is less obvious, but is a result of Giant providing the same 67 * semantics as spl(). Basically, when a thread sleeps, it must release 68 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule 69 * 2). 70 * 71 * 3) Giant may be acquired before or after sleepable locks. 72 * 73 * This rule is also not quite as obvious. Giant may be acquired after 74 * a sleepable lock because it is a non-sleepable lock and non-sleepable 75 * locks may always be acquired while holding a sleepable lock. The second 76 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose 77 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1 78 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and 79 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to 80 * execute. Thus, acquiring Giant both before and after a sleepable lock 81 * will not result in a lock order reversal. 82 */ 83 84 #include <sys/cdefs.h> 85 __FBSDID("$FreeBSD$"); 86 87 #include "opt_ddb.h" 88 #include "opt_witness.h" 89 90 #include <sys/param.h> 91 #include <sys/bus.h> 92 #include <sys/kdb.h> 93 #include <sys/kernel.h> 94 #include <sys/ktr.h> 95 #include <sys/lock.h> 96 #include <sys/malloc.h> 97 #include <sys/mutex.h> 98 #include <sys/proc.h> 99 #include <sys/sysctl.h> 100 #include <sys/systm.h> 101 102 #include <ddb/ddb.h> 103 104 #include <machine/stdarg.h> 105 106 /* Define this to check for blessed mutexes */ 107 #undef BLESSING 108 109 #define WITNESS_COUNT 200 110 #define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4) 111 /* 112 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads 113 * will hold LOCK_NCHILDREN * 2 locks. We handle failure ok, and we should 114 * probably be safe for the most part, but it's still a SWAG. 115 */ 116 #define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2 117 118 #define WITNESS_NCHILDREN 6 119 120 struct witness_child_list_entry; 121 122 struct witness { 123 const char *w_name; 124 struct lock_class *w_class; 125 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */ 126 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */ 127 struct witness_child_list_entry *w_children; /* Great evilness... */ 128 const char *w_file; 129 int w_line; 130 u_int w_level; 131 u_int w_refcount; 132 u_char w_Giant_squawked:1; 133 u_char w_other_squawked:1; 134 u_char w_same_squawked:1; 135 u_char w_displayed:1; 136 }; 137 138 struct witness_child_list_entry { 139 struct witness_child_list_entry *wcl_next; 140 struct witness *wcl_children[WITNESS_NCHILDREN]; 141 u_int wcl_count; 142 }; 143 144 STAILQ_HEAD(witness_list, witness); 145 146 #ifdef BLESSING 147 struct witness_blessed { 148 const char *b_lock1; 149 const char *b_lock2; 150 }; 151 #endif 152 153 struct witness_order_list_entry { 154 const char *w_name; 155 struct lock_class *w_class; 156 }; 157 158 #ifdef BLESSING 159 static int blessed(struct witness *, struct witness *); 160 #endif 161 static int depart(struct witness *w); 162 static struct witness *enroll(const char *description, 163 struct lock_class *lock_class); 164 static int insertchild(struct witness *parent, struct witness *child); 165 static int isitmychild(struct witness *parent, struct witness *child); 166 static int isitmydescendant(struct witness *parent, struct witness *child); 167 static int itismychild(struct witness *parent, struct witness *child); 168 static int rebalancetree(struct witness_list *list); 169 static void removechild(struct witness *parent, struct witness *child); 170 static int reparentchildren(struct witness *newparent, 171 struct witness *oldparent); 172 static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS); 173 static void witness_displaydescendants(void(*)(const char *fmt, ...), 174 struct witness *, int indent); 175 static const char *fixup_filename(const char *file); 176 static void witness_leveldescendents(struct witness *parent, int level); 177 static void witness_levelall(void); 178 static struct witness *witness_get(void); 179 static void witness_free(struct witness *m); 180 static struct witness_child_list_entry *witness_child_get(void); 181 static void witness_child_free(struct witness_child_list_entry *wcl); 182 static struct lock_list_entry *witness_lock_list_get(void); 183 static void witness_lock_list_free(struct lock_list_entry *lle); 184 static struct lock_instance *find_instance(struct lock_list_entry *lock_list, 185 struct lock_object *lock); 186 static void witness_list_lock(struct lock_instance *instance); 187 #ifdef DDB 188 static void witness_list(struct thread *td); 189 static void witness_display_list(void(*prnt)(const char *fmt, ...), 190 struct witness_list *list); 191 static void witness_display(void(*)(const char *fmt, ...)); 192 #endif 193 194 MALLOC_DEFINE(M_WITNESS, "witness", "witness structure"); 195 196 /* 197 * If set to 0, witness is disabled. If set to 1, witness performs full lock 198 * order checking for all locks. If set to 2 or higher, then witness skips 199 * the full lock order check if the lock being acquired is at a higher level 200 * (i.e. farther down in the tree) than the current lock. This last mode is 201 * somewhat experimental and not considered fully safe. At runtime, this 202 * value may be set to 0 to turn off witness. witness is not allowed be 203 * turned on once it is turned off, however. 204 */ 205 static int witness_watch = 1; 206 TUNABLE_INT("debug.witness_watch", &witness_watch); 207 SYSCTL_PROC(_debug, OID_AUTO, witness_watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0, 208 sysctl_debug_witness_watch, "I", "witness is watching lock operations"); 209 210 #ifdef KDB 211 /* 212 * When KDB is enabled and witness_kdb is set to 1, it will cause the system 213 * to drop into kdebug() when: 214 * - a lock heirarchy violation occurs 215 * - locks are held when going to sleep. 216 */ 217 #ifdef WITNESS_KDB 218 int witness_kdb = 1; 219 #else 220 int witness_kdb = 0; 221 #endif 222 TUNABLE_INT("debug.witness_kdb", &witness_kdb); 223 SYSCTL_INT(_debug, OID_AUTO, witness_kdb, CTLFLAG_RW, &witness_kdb, 0, ""); 224 225 /* 226 * When KDB is enabled and witness_trace is set to 1, it will cause the system 227 * to print a stack trace: 228 * - a lock heirarchy violation occurs 229 * - locks are held when going to sleep. 230 */ 231 int witness_trace = 1; 232 TUNABLE_INT("debug.witness_trace", &witness_trace); 233 SYSCTL_INT(_debug, OID_AUTO, witness_trace, CTLFLAG_RW, &witness_trace, 0, ""); 234 #endif /* KDB */ 235 236 #ifdef WITNESS_SKIPSPIN 237 int witness_skipspin = 1; 238 #else 239 int witness_skipspin = 0; 240 #endif 241 TUNABLE_INT("debug.witness_skipspin", &witness_skipspin); 242 SYSCTL_INT(_debug, OID_AUTO, witness_skipspin, CTLFLAG_RDTUN, &witness_skipspin, 0, 243 ""); 244 245 static struct mtx w_mtx; 246 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free); 247 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all); 248 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin); 249 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep); 250 static struct witness_child_list_entry *w_child_free = NULL; 251 static struct lock_list_entry *w_lock_list_free = NULL; 252 253 static struct witness w_data[WITNESS_COUNT]; 254 static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT]; 255 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT]; 256 257 static struct witness_order_list_entry order_lists[] = { 258 { "proctree", &lock_class_sx }, 259 { "allproc", &lock_class_sx }, 260 { "Giant", &lock_class_mtx_sleep }, 261 { "filedesc structure", &lock_class_mtx_sleep }, 262 { "pipe mutex", &lock_class_mtx_sleep }, 263 { "sigio lock", &lock_class_mtx_sleep }, 264 { "process group", &lock_class_mtx_sleep }, 265 { "process lock", &lock_class_mtx_sleep }, 266 { "session", &lock_class_mtx_sleep }, 267 { "uidinfo hash", &lock_class_mtx_sleep }, 268 { "uidinfo struct", &lock_class_mtx_sleep }, 269 { "allprison", &lock_class_mtx_sleep }, 270 { NULL, NULL }, 271 /* 272 * Sockets 273 */ 274 { "filedesc structure", &lock_class_mtx_sleep }, 275 { "accept", &lock_class_mtx_sleep }, 276 { "so_snd", &lock_class_mtx_sleep }, 277 { "so_rcv", &lock_class_mtx_sleep }, 278 { "sellck", &lock_class_mtx_sleep }, 279 { NULL, NULL }, 280 /* 281 * Routing 282 */ 283 { "so_rcv", &lock_class_mtx_sleep }, 284 { "radix node head", &lock_class_mtx_sleep }, 285 { "rtentry", &lock_class_mtx_sleep }, 286 { "ifaddr", &lock_class_mtx_sleep }, 287 { NULL, NULL }, 288 /* 289 * UNIX Domain Sockets 290 */ 291 { "unp", &lock_class_mtx_sleep }, 292 { "so_snd", &lock_class_mtx_sleep }, 293 { NULL, NULL }, 294 /* 295 * UDP/IP 296 */ 297 { "udp", &lock_class_mtx_sleep }, 298 { "udpinp", &lock_class_mtx_sleep }, 299 { "so_snd", &lock_class_mtx_sleep }, 300 { NULL, NULL }, 301 /* 302 * TCP/IP 303 */ 304 { "tcp", &lock_class_mtx_sleep }, 305 { "tcpinp", &lock_class_mtx_sleep }, 306 { "so_snd", &lock_class_mtx_sleep }, 307 { NULL, NULL }, 308 /* 309 * SLIP 310 */ 311 { "slip_mtx", &lock_class_mtx_sleep }, 312 { "slip sc_mtx", &lock_class_mtx_sleep }, 313 { NULL, NULL }, 314 /* 315 * netatalk 316 */ 317 { "ddp_list_mtx", &lock_class_mtx_sleep }, 318 { "ddp_mtx", &lock_class_mtx_sleep }, 319 { NULL, NULL }, 320 /* 321 * spin locks 322 */ 323 #ifdef SMP 324 { "ap boot", &lock_class_mtx_spin }, 325 #endif 326 { "sio", &lock_class_mtx_spin }, 327 #ifdef __i386__ 328 { "cy", &lock_class_mtx_spin }, 329 #endif 330 { "uart_hwmtx", &lock_class_mtx_spin }, 331 { "sabtty", &lock_class_mtx_spin }, 332 { "zstty", &lock_class_mtx_spin }, 333 { "ng_node", &lock_class_mtx_spin }, 334 { "ng_worklist", &lock_class_mtx_spin }, 335 { "taskqueue_fast", &lock_class_mtx_spin }, 336 { "intr table", &lock_class_mtx_spin }, 337 { "ithread table lock", &lock_class_mtx_spin }, 338 { "sleepq chain", &lock_class_mtx_spin }, 339 { "sched lock", &lock_class_mtx_spin }, 340 { "turnstile chain", &lock_class_mtx_spin }, 341 { "td_contested", &lock_class_mtx_spin }, 342 { "callout", &lock_class_mtx_spin }, 343 { "entropy harvest", &lock_class_mtx_spin }, 344 { "entropy harvest buffers", &lock_class_mtx_spin }, 345 /* 346 * leaf locks 347 */ 348 { "allpmaps", &lock_class_mtx_spin }, 349 { "vm page queue free mutex", &lock_class_mtx_spin }, 350 { "icu", &lock_class_mtx_spin }, 351 #ifdef SMP 352 { "smp rendezvous", &lock_class_mtx_spin }, 353 #if defined(__i386__) || defined(__amd64__) 354 { "tlb", &lock_class_mtx_spin }, 355 { "lazypmap", &lock_class_mtx_spin }, 356 #endif 357 #ifdef __sparc64__ 358 { "ipi", &lock_class_mtx_spin }, 359 #endif 360 #endif 361 { "clk", &lock_class_mtx_spin }, 362 { "mutex profiling lock", &lock_class_mtx_spin }, 363 { "kse zombie lock", &lock_class_mtx_spin }, 364 { "ALD Queue", &lock_class_mtx_spin }, 365 #ifdef __ia64__ 366 { "MCA spin lock", &lock_class_mtx_spin }, 367 #endif 368 #if defined(__i386__) || defined(__amd64__) 369 { "pcicfg", &lock_class_mtx_spin }, 370 #endif 371 { NULL, NULL }, 372 { NULL, NULL } 373 }; 374 375 #ifdef BLESSING 376 /* 377 * Pairs of locks which have been blessed 378 * Don't complain about order problems with blessed locks 379 */ 380 static struct witness_blessed blessed_list[] = { 381 }; 382 static int blessed_count = 383 sizeof(blessed_list) / sizeof(struct witness_blessed); 384 #endif 385 386 /* 387 * List of all locks in the system. 388 */ 389 TAILQ_HEAD(, lock_object) all_locks = TAILQ_HEAD_INITIALIZER(all_locks); 390 391 static struct mtx all_mtx = { 392 { &lock_class_mtx_sleep, /* mtx_object.lo_class */ 393 "All locks list", /* mtx_object.lo_name */ 394 "All locks list", /* mtx_object.lo_type */ 395 LO_INITIALIZED, /* mtx_object.lo_flags */ 396 { NULL, NULL }, /* mtx_object.lo_list */ 397 NULL }, /* mtx_object.lo_witness */ 398 MTX_UNOWNED, 0 /* mtx_lock, mtx_recurse */ 399 }; 400 401 /* 402 * This global is set to 0 once it becomes safe to use the witness code. 403 */ 404 static int witness_cold = 1; 405 406 /* 407 * Global variables for book keeping. 408 */ 409 static int lock_cur_cnt; 410 static int lock_max_cnt; 411 412 /* 413 * The WITNESS-enabled diagnostic code. 414 */ 415 static void 416 witness_initialize(void *dummy __unused) 417 { 418 struct lock_object *lock; 419 struct witness_order_list_entry *order; 420 struct witness *w, *w1; 421 int i; 422 423 /* 424 * We have to release Giant before initializing its witness 425 * structure so that WITNESS doesn't get confused. 426 */ 427 mtx_unlock(&Giant); 428 mtx_assert(&Giant, MA_NOTOWNED); 429 430 CTR1(KTR_WITNESS, "%s: initializing witness", __func__); 431 TAILQ_INSERT_HEAD(&all_locks, &all_mtx.mtx_object, lo_list); 432 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET | 433 MTX_NOWITNESS); 434 for (i = 0; i < WITNESS_COUNT; i++) 435 witness_free(&w_data[i]); 436 for (i = 0; i < WITNESS_CHILDCOUNT; i++) 437 witness_child_free(&w_childdata[i]); 438 for (i = 0; i < LOCK_CHILDCOUNT; i++) 439 witness_lock_list_free(&w_locklistdata[i]); 440 441 /* First add in all the specified order lists. */ 442 for (order = order_lists; order->w_name != NULL; order++) { 443 w = enroll(order->w_name, order->w_class); 444 if (w == NULL) 445 continue; 446 w->w_file = "order list"; 447 for (order++; order->w_name != NULL; order++) { 448 w1 = enroll(order->w_name, order->w_class); 449 if (w1 == NULL) 450 continue; 451 w1->w_file = "order list"; 452 if (!itismychild(w, w1)) 453 panic("Not enough memory for static orders!"); 454 w = w1; 455 } 456 } 457 458 /* Iterate through all locks and add them to witness. */ 459 mtx_lock(&all_mtx); 460 TAILQ_FOREACH(lock, &all_locks, lo_list) { 461 if (lock->lo_flags & LO_WITNESS) 462 lock->lo_witness = enroll(lock->lo_type, 463 lock->lo_class); 464 else 465 lock->lo_witness = NULL; 466 } 467 mtx_unlock(&all_mtx); 468 469 /* Mark the witness code as being ready for use. */ 470 atomic_store_rel_int(&witness_cold, 0); 471 472 mtx_lock(&Giant); 473 } 474 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL) 475 476 static int 477 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS) 478 { 479 int error, value; 480 481 value = witness_watch; 482 error = sysctl_handle_int(oidp, &value, 0, req); 483 if (error != 0 || req->newptr == NULL) 484 return (error); 485 error = suser(req->td); 486 if (error != 0) 487 return (error); 488 if (value == witness_watch) 489 return (0); 490 if (value != 0) 491 return (EINVAL); 492 witness_watch = 0; 493 return (0); 494 } 495 496 void 497 witness_init(struct lock_object *lock) 498 { 499 struct lock_class *class; 500 501 class = lock->lo_class; 502 if (lock->lo_flags & LO_INITIALIZED) 503 panic("%s: lock (%s) %s is already initialized", __func__, 504 class->lc_name, lock->lo_name); 505 if ((lock->lo_flags & LO_RECURSABLE) != 0 && 506 (class->lc_flags & LC_RECURSABLE) == 0) 507 panic("%s: lock (%s) %s can not be recursable", __func__, 508 class->lc_name, lock->lo_name); 509 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 510 (class->lc_flags & LC_SLEEPABLE) == 0) 511 panic("%s: lock (%s) %s can not be sleepable", __func__, 512 class->lc_name, lock->lo_name); 513 if ((lock->lo_flags & LO_UPGRADABLE) != 0 && 514 (class->lc_flags & LC_UPGRADABLE) == 0) 515 panic("%s: lock (%s) %s can not be upgradable", __func__, 516 class->lc_name, lock->lo_name); 517 518 mtx_lock(&all_mtx); 519 TAILQ_INSERT_TAIL(&all_locks, lock, lo_list); 520 lock->lo_flags |= LO_INITIALIZED; 521 lock_cur_cnt++; 522 if (lock_cur_cnt > lock_max_cnt) 523 lock_max_cnt = lock_cur_cnt; 524 mtx_unlock(&all_mtx); 525 if (!witness_cold && witness_watch != 0 && panicstr == NULL && 526 (lock->lo_flags & LO_WITNESS) != 0) 527 lock->lo_witness = enroll(lock->lo_type, class); 528 else 529 lock->lo_witness = NULL; 530 } 531 532 void 533 witness_destroy(struct lock_object *lock) 534 { 535 struct witness *w; 536 537 if (witness_cold) 538 panic("lock (%s) %s destroyed while witness_cold", 539 lock->lo_class->lc_name, lock->lo_name); 540 if ((lock->lo_flags & LO_INITIALIZED) == 0) 541 panic("%s: lock (%s) %s is not initialized", __func__, 542 lock->lo_class->lc_name, lock->lo_name); 543 544 /* XXX: need to verify that no one holds the lock */ 545 w = lock->lo_witness; 546 if (w != NULL) { 547 mtx_lock_spin(&w_mtx); 548 MPASS(w->w_refcount > 0); 549 w->w_refcount--; 550 551 /* 552 * Lock is already released if we have an allocation failure 553 * and depart() fails. 554 */ 555 if (w->w_refcount != 0 || depart(w)) 556 mtx_unlock_spin(&w_mtx); 557 } 558 559 mtx_lock(&all_mtx); 560 lock_cur_cnt--; 561 TAILQ_REMOVE(&all_locks, lock, lo_list); 562 lock->lo_flags &= ~LO_INITIALIZED; 563 mtx_unlock(&all_mtx); 564 } 565 566 #ifdef DDB 567 static void 568 witness_display_list(void(*prnt)(const char *fmt, ...), 569 struct witness_list *list) 570 { 571 struct witness *w; 572 573 STAILQ_FOREACH(w, list, w_typelist) { 574 if (w->w_file == NULL || w->w_level > 0) 575 continue; 576 /* 577 * This lock has no anscestors, display its descendants. 578 */ 579 witness_displaydescendants(prnt, w, 0); 580 } 581 } 582 583 static void 584 witness_display(void(*prnt)(const char *fmt, ...)) 585 { 586 struct witness *w; 587 588 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 589 witness_levelall(); 590 591 /* Clear all the displayed flags. */ 592 STAILQ_FOREACH(w, &w_all, w_list) { 593 w->w_displayed = 0; 594 } 595 596 /* 597 * First, handle sleep locks which have been acquired at least 598 * once. 599 */ 600 prnt("Sleep locks:\n"); 601 witness_display_list(prnt, &w_sleep); 602 603 /* 604 * Now do spin locks which have been acquired at least once. 605 */ 606 prnt("\nSpin locks:\n"); 607 witness_display_list(prnt, &w_spin); 608 609 /* 610 * Finally, any locks which have not been acquired yet. 611 */ 612 prnt("\nLocks which were never acquired:\n"); 613 STAILQ_FOREACH(w, &w_all, w_list) { 614 if (w->w_file != NULL || w->w_refcount == 0) 615 continue; 616 prnt("%s\n", w->w_name); 617 } 618 } 619 #endif /* DDB */ 620 621 /* Trim useless garbage from filenames. */ 622 static const char * 623 fixup_filename(const char *file) 624 { 625 626 if (file == NULL) 627 return (NULL); 628 while (strncmp(file, "../", 3) == 0) 629 file += 3; 630 return (file); 631 } 632 633 int 634 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2) 635 { 636 637 if (witness_watch == 0 || panicstr != NULL) 638 return (0); 639 640 /* Require locks that witness knows about. */ 641 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL || 642 lock2->lo_witness == NULL) 643 return (EINVAL); 644 645 MPASS(!mtx_owned(&w_mtx)); 646 mtx_lock_spin(&w_mtx); 647 648 /* 649 * If we already have either an explicit or implied lock order that 650 * is the other way around, then return an error. 651 */ 652 if (isitmydescendant(lock2->lo_witness, lock1->lo_witness)) { 653 mtx_unlock_spin(&w_mtx); 654 return (EDOOFUS); 655 } 656 657 /* Try to add the new order. */ 658 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 659 lock2->lo_type, lock1->lo_type); 660 if (!itismychild(lock1->lo_witness, lock2->lo_witness)) 661 return (ENOMEM); 662 mtx_unlock_spin(&w_mtx); 663 return (0); 664 } 665 666 void 667 witness_checkorder(struct lock_object *lock, int flags, const char *file, 668 int line) 669 { 670 struct lock_list_entry **lock_list, *lle; 671 struct lock_instance *lock1, *lock2; 672 struct lock_class *class; 673 struct witness *w, *w1; 674 struct thread *td; 675 int i, j; 676 677 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 678 panicstr != NULL) 679 return; 680 681 /* 682 * Try locks do not block if they fail to acquire the lock, thus 683 * there is no danger of deadlocks or of switching while holding a 684 * spin lock if we acquire a lock via a try operation. This 685 * function shouldn't even be called for try locks, so panic if 686 * that happens. 687 */ 688 if (flags & LOP_TRYLOCK) 689 panic("%s should not be called for try lock operations", 690 __func__); 691 692 w = lock->lo_witness; 693 class = lock->lo_class; 694 td = curthread; 695 file = fixup_filename(file); 696 697 if (class->lc_flags & LC_SLEEPLOCK) { 698 /* 699 * Since spin locks include a critical section, this check 700 * implicitly enforces a lock order of all sleep locks before 701 * all spin locks. 702 */ 703 if (td->td_critnest != 0) 704 panic("blockable sleep lock (%s) %s @ %s:%d", 705 class->lc_name, lock->lo_name, file, line); 706 707 /* 708 * If this is the first lock acquired then just return as 709 * no order checking is needed. 710 */ 711 if (td->td_sleeplocks == NULL) 712 return; 713 lock_list = &td->td_sleeplocks; 714 } else { 715 /* 716 * If this is the first lock, just return as no order 717 * checking is needed. We check this in both if clauses 718 * here as unifying the check would require us to use a 719 * critical section to ensure we don't migrate while doing 720 * the check. Note that if this is not the first lock, we 721 * are already in a critical section and are safe for the 722 * rest of the check. 723 */ 724 if (PCPU_GET(spinlocks) == NULL) 725 return; 726 lock_list = PCPU_PTR(spinlocks); 727 } 728 729 /* 730 * Check to see if we are recursing on a lock we already own. If 731 * so, make sure that we don't mismatch exclusive and shared lock 732 * acquires. 733 */ 734 lock1 = find_instance(*lock_list, lock); 735 if (lock1 != NULL) { 736 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 && 737 (flags & LOP_EXCLUSIVE) == 0) { 738 printf("shared lock of (%s) %s @ %s:%d\n", 739 class->lc_name, lock->lo_name, file, line); 740 printf("while exclusively locked from %s:%d\n", 741 lock1->li_file, lock1->li_line); 742 panic("share->excl"); 743 } 744 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 && 745 (flags & LOP_EXCLUSIVE) != 0) { 746 printf("exclusive lock of (%s) %s @ %s:%d\n", 747 class->lc_name, lock->lo_name, file, line); 748 printf("while share locked from %s:%d\n", 749 lock1->li_file, lock1->li_line); 750 panic("excl->share"); 751 } 752 return; 753 } 754 755 /* 756 * Try locks do not block if they fail to acquire the lock, thus 757 * there is no danger of deadlocks or of switching while holding a 758 * spin lock if we acquire a lock via a try operation. 759 */ 760 if (flags & LOP_TRYLOCK) 761 return; 762 763 /* 764 * Check for duplicate locks of the same type. Note that we only 765 * have to check for this on the last lock we just acquired. Any 766 * other cases will be caught as lock order violations. 767 */ 768 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 769 w1 = lock1->li_lock->lo_witness; 770 if (w1 == w) { 771 if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK)) 772 return; 773 w->w_same_squawked = 1; 774 printf("acquiring duplicate lock of same type: \"%s\"\n", 775 lock->lo_type); 776 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name, 777 lock1->li_file, lock1->li_line); 778 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line); 779 #ifdef KDB 780 goto debugger; 781 #else 782 return; 783 #endif 784 } 785 MPASS(!mtx_owned(&w_mtx)); 786 mtx_lock_spin(&w_mtx); 787 /* 788 * If we have a known higher number just say ok 789 */ 790 if (witness_watch > 1 && w->w_level > w1->w_level) { 791 mtx_unlock_spin(&w_mtx); 792 return; 793 } 794 /* 795 * If we know that the the lock we are acquiring comes after 796 * the lock we most recently acquired in the lock order tree, 797 * then there is no need for any further checks. 798 */ 799 if (isitmydescendant(w1, w)) { 800 mtx_unlock_spin(&w_mtx); 801 return; 802 } 803 for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) { 804 for (i = lle->ll_count - 1; i >= 0; i--, j++) { 805 806 MPASS(j < WITNESS_COUNT); 807 lock1 = &lle->ll_children[i]; 808 w1 = lock1->li_lock->lo_witness; 809 810 /* 811 * If this lock doesn't undergo witness checking, 812 * then skip it. 813 */ 814 if (w1 == NULL) { 815 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0, 816 ("lock missing witness structure")); 817 continue; 818 } 819 /* 820 * If we are locking Giant and this is a sleepable 821 * lock, then skip it. 822 */ 823 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 && 824 lock == &Giant.mtx_object) 825 continue; 826 /* 827 * If we are locking a sleepable lock and this lock 828 * is Giant, then skip it. 829 */ 830 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 831 lock1->li_lock == &Giant.mtx_object) 832 continue; 833 /* 834 * If we are locking a sleepable lock and this lock 835 * isn't sleepable, we want to treat it as a lock 836 * order violation to enfore a general lock order of 837 * sleepable locks before non-sleepable locks. 838 */ 839 if (!((lock->lo_flags & LO_SLEEPABLE) != 0 && 840 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 841 /* 842 * Check the lock order hierarchy for a reveresal. 843 */ 844 if (!isitmydescendant(w, w1)) 845 continue; 846 /* 847 * We have a lock order violation, check to see if it 848 * is allowed or has already been yelled about. 849 */ 850 mtx_unlock_spin(&w_mtx); 851 #ifdef BLESSING 852 /* 853 * If the lock order is blessed, just bail. We don't 854 * look for other lock order violations though, which 855 * may be a bug. 856 */ 857 if (blessed(w, w1)) 858 return; 859 #endif 860 if (lock1->li_lock == &Giant.mtx_object) { 861 if (w1->w_Giant_squawked) 862 return; 863 else 864 w1->w_Giant_squawked = 1; 865 } else { 866 if (w1->w_other_squawked) 867 return; 868 else 869 w1->w_other_squawked = 1; 870 } 871 /* 872 * Ok, yell about it. 873 */ 874 printf("lock order reversal\n"); 875 /* 876 * Try to locate an earlier lock with 877 * witness w in our list. 878 */ 879 do { 880 lock2 = &lle->ll_children[i]; 881 MPASS(lock2->li_lock != NULL); 882 if (lock2->li_lock->lo_witness == w) 883 break; 884 if (i == 0 && lle->ll_next != NULL) { 885 lle = lle->ll_next; 886 i = lle->ll_count - 1; 887 MPASS(i >= 0 && i < LOCK_NCHILDREN); 888 } else 889 i--; 890 } while (i >= 0); 891 if (i < 0) { 892 printf(" 1st %p %s (%s) @ %s:%d\n", 893 lock1->li_lock, lock1->li_lock->lo_name, 894 lock1->li_lock->lo_type, lock1->li_file, 895 lock1->li_line); 896 printf(" 2nd %p %s (%s) @ %s:%d\n", lock, 897 lock->lo_name, lock->lo_type, file, line); 898 } else { 899 printf(" 1st %p %s (%s) @ %s:%d\n", 900 lock2->li_lock, lock2->li_lock->lo_name, 901 lock2->li_lock->lo_type, lock2->li_file, 902 lock2->li_line); 903 printf(" 2nd %p %s (%s) @ %s:%d\n", 904 lock1->li_lock, lock1->li_lock->lo_name, 905 lock1->li_lock->lo_type, lock1->li_file, 906 lock1->li_line); 907 printf(" 3rd %p %s (%s) @ %s:%d\n", lock, 908 lock->lo_name, lock->lo_type, file, line); 909 } 910 #ifdef KDB 911 goto debugger; 912 #else 913 return; 914 #endif 915 } 916 } 917 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 918 /* 919 * If requested, build a new lock order. However, don't build a new 920 * relationship between a sleepable lock and Giant if it is in the 921 * wrong direction. The correct lock order is that sleepable locks 922 * always come before Giant. 923 */ 924 if (flags & LOP_NEWORDER && 925 !(lock1->li_lock == &Giant.mtx_object && 926 (lock->lo_flags & LO_SLEEPABLE) != 0)) { 927 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 928 lock->lo_type, lock1->li_lock->lo_type); 929 if (!itismychild(lock1->li_lock->lo_witness, w)) 930 /* Witness is dead. */ 931 return; 932 } 933 mtx_unlock_spin(&w_mtx); 934 return; 935 936 #ifdef KDB 937 debugger: 938 if (witness_trace) 939 kdb_backtrace(); 940 if (witness_kdb) 941 kdb_enter(__func__); 942 #endif 943 } 944 945 void 946 witness_lock(struct lock_object *lock, int flags, const char *file, int line) 947 { 948 struct lock_list_entry **lock_list, *lle; 949 struct lock_instance *instance; 950 struct witness *w; 951 struct thread *td; 952 953 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 954 panicstr != NULL) 955 return; 956 w = lock->lo_witness; 957 td = curthread; 958 file = fixup_filename(file); 959 960 /* Determine lock list for this lock. */ 961 if (lock->lo_class->lc_flags & LC_SLEEPLOCK) 962 lock_list = &td->td_sleeplocks; 963 else 964 lock_list = PCPU_PTR(spinlocks); 965 966 /* Check to see if we are recursing on a lock we already own. */ 967 instance = find_instance(*lock_list, lock); 968 if (instance != NULL) { 969 instance->li_flags++; 970 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__, 971 td->td_proc->p_pid, lock->lo_name, 972 instance->li_flags & LI_RECURSEMASK); 973 instance->li_file = file; 974 instance->li_line = line; 975 return; 976 } 977 978 /* Update per-witness last file and line acquire. */ 979 w->w_file = file; 980 w->w_line = line; 981 982 /* Find the next open lock instance in the list and fill it. */ 983 lle = *lock_list; 984 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) { 985 lle = witness_lock_list_get(); 986 if (lle == NULL) 987 return; 988 lle->ll_next = *lock_list; 989 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__, 990 td->td_proc->p_pid, lle); 991 *lock_list = lle; 992 } 993 instance = &lle->ll_children[lle->ll_count++]; 994 instance->li_lock = lock; 995 instance->li_line = line; 996 instance->li_file = file; 997 if ((flags & LOP_EXCLUSIVE) != 0) 998 instance->li_flags = LI_EXCLUSIVE; 999 else 1000 instance->li_flags = 0; 1001 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__, 1002 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1); 1003 } 1004 1005 void 1006 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line) 1007 { 1008 struct lock_instance *instance; 1009 struct lock_class *class; 1010 1011 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1012 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1013 return; 1014 class = lock->lo_class; 1015 file = fixup_filename(file); 1016 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1017 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d", 1018 class->lc_name, lock->lo_name, file, line); 1019 if ((flags & LOP_TRYLOCK) == 0) 1020 panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name, 1021 lock->lo_name, file, line); 1022 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1023 panic("upgrade of non-sleep lock (%s) %s @ %s:%d", 1024 class->lc_name, lock->lo_name, file, line); 1025 instance = find_instance(curthread->td_sleeplocks, lock); 1026 if (instance == NULL) 1027 panic("upgrade of unlocked lock (%s) %s @ %s:%d", 1028 class->lc_name, lock->lo_name, file, line); 1029 if ((instance->li_flags & LI_EXCLUSIVE) != 0) 1030 panic("upgrade of exclusive lock (%s) %s @ %s:%d", 1031 class->lc_name, lock->lo_name, file, line); 1032 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1033 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d", 1034 class->lc_name, lock->lo_name, 1035 instance->li_flags & LI_RECURSEMASK, file, line); 1036 instance->li_flags |= LI_EXCLUSIVE; 1037 } 1038 1039 void 1040 witness_downgrade(struct lock_object *lock, int flags, const char *file, 1041 int line) 1042 { 1043 struct lock_instance *instance; 1044 struct lock_class *class; 1045 1046 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1047 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1048 return; 1049 class = lock->lo_class; 1050 file = fixup_filename(file); 1051 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1052 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d", 1053 class->lc_name, lock->lo_name, file, line); 1054 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1055 panic("downgrade of non-sleep lock (%s) %s @ %s:%d", 1056 class->lc_name, lock->lo_name, file, line); 1057 instance = find_instance(curthread->td_sleeplocks, lock); 1058 if (instance == NULL) 1059 panic("downgrade of unlocked lock (%s) %s @ %s:%d", 1060 class->lc_name, lock->lo_name, file, line); 1061 if ((instance->li_flags & LI_EXCLUSIVE) == 0) 1062 panic("downgrade of shared lock (%s) %s @ %s:%d", 1063 class->lc_name, lock->lo_name, file, line); 1064 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1065 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d", 1066 class->lc_name, lock->lo_name, 1067 instance->li_flags & LI_RECURSEMASK, file, line); 1068 instance->li_flags &= ~LI_EXCLUSIVE; 1069 } 1070 1071 void 1072 witness_unlock(struct lock_object *lock, int flags, const char *file, int line) 1073 { 1074 struct lock_list_entry **lock_list, *lle; 1075 struct lock_instance *instance; 1076 struct lock_class *class; 1077 struct thread *td; 1078 register_t s; 1079 int i, j; 1080 1081 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 1082 panicstr != NULL) 1083 return; 1084 td = curthread; 1085 class = lock->lo_class; 1086 file = fixup_filename(file); 1087 1088 /* Find lock instance associated with this lock. */ 1089 if (class->lc_flags & LC_SLEEPLOCK) 1090 lock_list = &td->td_sleeplocks; 1091 else 1092 lock_list = PCPU_PTR(spinlocks); 1093 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next) 1094 for (i = 0; i < (*lock_list)->ll_count; i++) { 1095 instance = &(*lock_list)->ll_children[i]; 1096 if (instance->li_lock == lock) 1097 goto found; 1098 } 1099 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name, 1100 file, line); 1101 found: 1102 1103 /* First, check for shared/exclusive mismatches. */ 1104 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && 1105 (flags & LOP_EXCLUSIVE) == 0) { 1106 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name, 1107 lock->lo_name, file, line); 1108 printf("while exclusively locked from %s:%d\n", 1109 instance->li_file, instance->li_line); 1110 panic("excl->ushare"); 1111 } 1112 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && 1113 (flags & LOP_EXCLUSIVE) != 0) { 1114 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name, 1115 lock->lo_name, file, line); 1116 printf("while share locked from %s:%d\n", instance->li_file, 1117 instance->li_line); 1118 panic("share->uexcl"); 1119 } 1120 1121 /* If we are recursed, unrecurse. */ 1122 if ((instance->li_flags & LI_RECURSEMASK) > 0) { 1123 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__, 1124 td->td_proc->p_pid, instance->li_lock->lo_name, 1125 instance->li_flags); 1126 instance->li_flags--; 1127 return; 1128 } 1129 1130 /* Otherwise, remove this item from the list. */ 1131 s = intr_disable(); 1132 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__, 1133 td->td_proc->p_pid, instance->li_lock->lo_name, 1134 (*lock_list)->ll_count - 1); 1135 for (j = i; j < (*lock_list)->ll_count - 1; j++) 1136 (*lock_list)->ll_children[j] = 1137 (*lock_list)->ll_children[j + 1]; 1138 (*lock_list)->ll_count--; 1139 intr_restore(s); 1140 1141 /* If this lock list entry is now empty, free it. */ 1142 if ((*lock_list)->ll_count == 0) { 1143 lle = *lock_list; 1144 *lock_list = lle->ll_next; 1145 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__, 1146 td->td_proc->p_pid, lle); 1147 witness_lock_list_free(lle); 1148 } 1149 } 1150 1151 /* 1152 * Warn if any locks other than 'lock' are held. Flags can be passed in to 1153 * exempt Giant and sleepable locks from the checks as well. If any 1154 * non-exempt locks are held, then a supplied message is printed to the 1155 * console along with a list of the offending locks. If indicated in the 1156 * flags then a failure results in a panic as well. 1157 */ 1158 int 1159 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...) 1160 { 1161 struct lock_list_entry *lle; 1162 struct lock_instance *lock1; 1163 struct thread *td; 1164 va_list ap; 1165 int i, n; 1166 1167 if (witness_cold || witness_watch == 0 || panicstr != NULL) 1168 return (0); 1169 n = 0; 1170 td = curthread; 1171 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next) 1172 for (i = lle->ll_count - 1; i >= 0; i--) { 1173 lock1 = &lle->ll_children[i]; 1174 if (lock1->li_lock == lock) 1175 continue; 1176 if (flags & WARN_GIANTOK && 1177 lock1->li_lock == &Giant.mtx_object) 1178 continue; 1179 if (flags & WARN_SLEEPOK && 1180 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0) 1181 continue; 1182 if (n == 0) { 1183 va_start(ap, fmt); 1184 vprintf(fmt, ap); 1185 va_end(ap); 1186 printf(" with the following"); 1187 if (flags & WARN_SLEEPOK) 1188 printf(" non-sleepable"); 1189 printf(" locks held:\n"); 1190 } 1191 n++; 1192 witness_list_lock(lock1); 1193 } 1194 if (PCPU_GET(spinlocks) != NULL) { 1195 /* 1196 * Since we already hold a spinlock preemption is 1197 * already blocked. 1198 */ 1199 if (n == 0) { 1200 va_start(ap, fmt); 1201 vprintf(fmt, ap); 1202 va_end(ap); 1203 printf(" with the following"); 1204 if (flags & WARN_SLEEPOK) 1205 printf(" non-sleepable"); 1206 printf(" locks held:\n"); 1207 } 1208 n += witness_list_locks(PCPU_PTR(spinlocks)); 1209 } 1210 if (flags & WARN_PANIC && n) 1211 panic("witness_warn"); 1212 #ifdef KDB 1213 else if (witness_kdb && n) 1214 kdb_enter(__func__); 1215 else if (witness_trace && n) 1216 kdb_backtrace(); 1217 #endif 1218 return (n); 1219 } 1220 1221 const char * 1222 witness_file(struct lock_object *lock) 1223 { 1224 struct witness *w; 1225 1226 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL) 1227 return ("?"); 1228 w = lock->lo_witness; 1229 return (w->w_file); 1230 } 1231 1232 int 1233 witness_line(struct lock_object *lock) 1234 { 1235 struct witness *w; 1236 1237 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL) 1238 return (0); 1239 w = lock->lo_witness; 1240 return (w->w_line); 1241 } 1242 1243 static struct witness * 1244 enroll(const char *description, struct lock_class *lock_class) 1245 { 1246 struct witness *w; 1247 1248 if (witness_watch == 0 || panicstr != NULL) 1249 return (NULL); 1250 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin) 1251 return (NULL); 1252 mtx_lock_spin(&w_mtx); 1253 STAILQ_FOREACH(w, &w_all, w_list) { 1254 if (w->w_name == description || (w->w_refcount > 0 && 1255 strcmp(description, w->w_name) == 0)) { 1256 w->w_refcount++; 1257 mtx_unlock_spin(&w_mtx); 1258 if (lock_class != w->w_class) 1259 panic( 1260 "lock (%s) %s does not match earlier (%s) lock", 1261 description, lock_class->lc_name, 1262 w->w_class->lc_name); 1263 return (w); 1264 } 1265 } 1266 /* 1267 * This isn't quite right, as witness_cold is still 0 while we 1268 * enroll all the locks initialized before witness_initialize(). 1269 */ 1270 if ((lock_class->lc_flags & LC_SPINLOCK) && !witness_cold) { 1271 mtx_unlock_spin(&w_mtx); 1272 panic("spin lock %s not in order list", description); 1273 } 1274 if ((w = witness_get()) == NULL) 1275 return (NULL); 1276 w->w_name = description; 1277 w->w_class = lock_class; 1278 w->w_refcount = 1; 1279 STAILQ_INSERT_HEAD(&w_all, w, w_list); 1280 if (lock_class->lc_flags & LC_SPINLOCK) 1281 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist); 1282 else if (lock_class->lc_flags & LC_SLEEPLOCK) 1283 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist); 1284 else { 1285 mtx_unlock_spin(&w_mtx); 1286 panic("lock class %s is not sleep or spin", 1287 lock_class->lc_name); 1288 } 1289 mtx_unlock_spin(&w_mtx); 1290 return (w); 1291 } 1292 1293 /* Don't let the door bang you on the way out... */ 1294 static int 1295 depart(struct witness *w) 1296 { 1297 struct witness_child_list_entry *wcl, *nwcl; 1298 struct witness_list *list; 1299 struct witness *parent; 1300 1301 MPASS(w->w_refcount == 0); 1302 if (w->w_class->lc_flags & LC_SLEEPLOCK) 1303 list = &w_sleep; 1304 else 1305 list = &w_spin; 1306 /* 1307 * First, we run through the entire tree looking for any 1308 * witnesses that the outgoing witness is a child of. For 1309 * each parent that we find, we reparent all the direct 1310 * children of the outgoing witness to its parent. 1311 */ 1312 STAILQ_FOREACH(parent, list, w_typelist) { 1313 if (!isitmychild(parent, w)) 1314 continue; 1315 removechild(parent, w); 1316 if (!reparentchildren(parent, w)) 1317 return (0); 1318 } 1319 1320 /* 1321 * Now we go through and free up the child list of the 1322 * outgoing witness. 1323 */ 1324 for (wcl = w->w_children; wcl != NULL; wcl = nwcl) { 1325 nwcl = wcl->wcl_next; 1326 witness_child_free(wcl); 1327 } 1328 1329 /* 1330 * Detach from various lists and free. 1331 */ 1332 STAILQ_REMOVE(list, w, witness, w_typelist); 1333 STAILQ_REMOVE(&w_all, w, witness, w_list); 1334 witness_free(w); 1335 1336 /* Finally, fixup the tree. */ 1337 return (rebalancetree(list)); 1338 } 1339 1340 /* 1341 * Prune an entire lock order tree. We look for cases where a lock 1342 * is now both a descendant and a direct child of a given lock. In 1343 * that case, we want to remove the direct child link from the tree. 1344 * 1345 * Returns false if insertchild() fails. 1346 */ 1347 static int 1348 rebalancetree(struct witness_list *list) 1349 { 1350 struct witness *child, *parent; 1351 1352 STAILQ_FOREACH(child, list, w_typelist) { 1353 STAILQ_FOREACH(parent, list, w_typelist) { 1354 if (!isitmychild(parent, child)) 1355 continue; 1356 removechild(parent, child); 1357 if (isitmydescendant(parent, child)) 1358 continue; 1359 if (!insertchild(parent, child)) 1360 return (0); 1361 } 1362 } 1363 witness_levelall(); 1364 return (1); 1365 } 1366 1367 /* 1368 * Add "child" as a direct child of "parent". Returns false if 1369 * we fail due to out of memory. 1370 */ 1371 static int 1372 insertchild(struct witness *parent, struct witness *child) 1373 { 1374 struct witness_child_list_entry **wcl; 1375 1376 MPASS(child != NULL && parent != NULL); 1377 1378 /* 1379 * Insert "child" after "parent" 1380 */ 1381 wcl = &parent->w_children; 1382 while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN) 1383 wcl = &(*wcl)->wcl_next; 1384 if (*wcl == NULL) { 1385 *wcl = witness_child_get(); 1386 if (*wcl == NULL) 1387 return (0); 1388 } 1389 (*wcl)->wcl_children[(*wcl)->wcl_count++] = child; 1390 1391 return (1); 1392 } 1393 1394 /* 1395 * Make all the direct descendants of oldparent be direct descendants 1396 * of newparent. 1397 */ 1398 static int 1399 reparentchildren(struct witness *newparent, struct witness *oldparent) 1400 { 1401 struct witness_child_list_entry *wcl; 1402 int i; 1403 1404 /* Avoid making a witness a child of itself. */ 1405 MPASS(!isitmychild(oldparent, newparent)); 1406 1407 for (wcl = oldparent->w_children; wcl != NULL; wcl = wcl->wcl_next) 1408 for (i = 0; i < wcl->wcl_count; i++) 1409 if (!insertchild(newparent, wcl->wcl_children[i])) 1410 return (0); 1411 return (1); 1412 } 1413 1414 static int 1415 itismychild(struct witness *parent, struct witness *child) 1416 { 1417 struct witness_list *list; 1418 1419 MPASS(child != NULL && parent != NULL); 1420 if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) != 1421 (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK))) 1422 panic( 1423 "%s: parent (%s) and child (%s) are not the same lock type", 1424 __func__, parent->w_class->lc_name, 1425 child->w_class->lc_name); 1426 1427 if (!insertchild(parent, child)) 1428 return (0); 1429 1430 if (parent->w_class->lc_flags & LC_SLEEPLOCK) 1431 list = &w_sleep; 1432 else 1433 list = &w_spin; 1434 return (rebalancetree(list)); 1435 } 1436 1437 static void 1438 removechild(struct witness *parent, struct witness *child) 1439 { 1440 struct witness_child_list_entry **wcl, *wcl1; 1441 int i; 1442 1443 for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next) 1444 for (i = 0; i < (*wcl)->wcl_count; i++) 1445 if ((*wcl)->wcl_children[i] == child) 1446 goto found; 1447 return; 1448 found: 1449 (*wcl)->wcl_count--; 1450 if ((*wcl)->wcl_count > i) 1451 (*wcl)->wcl_children[i] = 1452 (*wcl)->wcl_children[(*wcl)->wcl_count]; 1453 MPASS((*wcl)->wcl_children[i] != NULL); 1454 if ((*wcl)->wcl_count != 0) 1455 return; 1456 wcl1 = *wcl; 1457 *wcl = wcl1->wcl_next; 1458 witness_child_free(wcl1); 1459 } 1460 1461 static int 1462 isitmychild(struct witness *parent, struct witness *child) 1463 { 1464 struct witness_child_list_entry *wcl; 1465 int i; 1466 1467 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) { 1468 for (i = 0; i < wcl->wcl_count; i++) { 1469 if (wcl->wcl_children[i] == child) 1470 return (1); 1471 } 1472 } 1473 return (0); 1474 } 1475 1476 static int 1477 isitmydescendant(struct witness *parent, struct witness *child) 1478 { 1479 struct witness_child_list_entry *wcl; 1480 int i, j; 1481 1482 if (isitmychild(parent, child)) 1483 return (1); 1484 j = 0; 1485 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) { 1486 MPASS(j < 1000); 1487 for (i = 0; i < wcl->wcl_count; i++) { 1488 if (isitmydescendant(wcl->wcl_children[i], child)) 1489 return (1); 1490 } 1491 j++; 1492 } 1493 return (0); 1494 } 1495 1496 static void 1497 witness_levelall (void) 1498 { 1499 struct witness_list *list; 1500 struct witness *w, *w1; 1501 1502 /* 1503 * First clear all levels. 1504 */ 1505 STAILQ_FOREACH(w, &w_all, w_list) { 1506 w->w_level = 0; 1507 } 1508 1509 /* 1510 * Look for locks with no parent and level all their descendants. 1511 */ 1512 STAILQ_FOREACH(w, &w_all, w_list) { 1513 /* 1514 * This is just an optimization, technically we could get 1515 * away just walking the all list each time. 1516 */ 1517 if (w->w_class->lc_flags & LC_SLEEPLOCK) 1518 list = &w_sleep; 1519 else 1520 list = &w_spin; 1521 STAILQ_FOREACH(w1, list, w_typelist) { 1522 if (isitmychild(w1, w)) 1523 goto skip; 1524 } 1525 witness_leveldescendents(w, 0); 1526 skip: 1527 ; /* silence GCC 3.x */ 1528 } 1529 } 1530 1531 static void 1532 witness_leveldescendents(struct witness *parent, int level) 1533 { 1534 struct witness_child_list_entry *wcl; 1535 int i; 1536 1537 if (parent->w_level < level) 1538 parent->w_level = level; 1539 level++; 1540 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) 1541 for (i = 0; i < wcl->wcl_count; i++) 1542 witness_leveldescendents(wcl->wcl_children[i], level); 1543 } 1544 1545 static void 1546 witness_displaydescendants(void(*prnt)(const char *fmt, ...), 1547 struct witness *parent, int indent) 1548 { 1549 struct witness_child_list_entry *wcl; 1550 int i, level; 1551 1552 level = parent->w_level; 1553 prnt("%-2d", level); 1554 for (i = 0; i < indent; i++) 1555 prnt(" "); 1556 if (parent->w_refcount > 0) 1557 prnt("%s", parent->w_name); 1558 else 1559 prnt("(dead)"); 1560 if (parent->w_displayed) { 1561 prnt(" -- (already displayed)\n"); 1562 return; 1563 } 1564 parent->w_displayed = 1; 1565 if (parent->w_refcount > 0) { 1566 if (parent->w_file != NULL) 1567 prnt(" -- last acquired @ %s:%d", parent->w_file, 1568 parent->w_line); 1569 } 1570 prnt("\n"); 1571 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) 1572 for (i = 0; i < wcl->wcl_count; i++) 1573 witness_displaydescendants(prnt, 1574 wcl->wcl_children[i], indent + 1); 1575 } 1576 1577 #ifdef BLESSING 1578 static int 1579 blessed(struct witness *w1, struct witness *w2) 1580 { 1581 int i; 1582 struct witness_blessed *b; 1583 1584 for (i = 0; i < blessed_count; i++) { 1585 b = &blessed_list[i]; 1586 if (strcmp(w1->w_name, b->b_lock1) == 0) { 1587 if (strcmp(w2->w_name, b->b_lock2) == 0) 1588 return (1); 1589 continue; 1590 } 1591 if (strcmp(w1->w_name, b->b_lock2) == 0) 1592 if (strcmp(w2->w_name, b->b_lock1) == 0) 1593 return (1); 1594 } 1595 return (0); 1596 } 1597 #endif 1598 1599 static struct witness * 1600 witness_get(void) 1601 { 1602 struct witness *w; 1603 1604 if (witness_watch == 0) { 1605 mtx_unlock_spin(&w_mtx); 1606 return (NULL); 1607 } 1608 if (STAILQ_EMPTY(&w_free)) { 1609 witness_watch = 0; 1610 mtx_unlock_spin(&w_mtx); 1611 printf("%s: witness exhausted\n", __func__); 1612 return (NULL); 1613 } 1614 w = STAILQ_FIRST(&w_free); 1615 STAILQ_REMOVE_HEAD(&w_free, w_list); 1616 bzero(w, sizeof(*w)); 1617 return (w); 1618 } 1619 1620 static void 1621 witness_free(struct witness *w) 1622 { 1623 1624 STAILQ_INSERT_HEAD(&w_free, w, w_list); 1625 } 1626 1627 static struct witness_child_list_entry * 1628 witness_child_get(void) 1629 { 1630 struct witness_child_list_entry *wcl; 1631 1632 if (witness_watch == 0) { 1633 mtx_unlock_spin(&w_mtx); 1634 return (NULL); 1635 } 1636 wcl = w_child_free; 1637 if (wcl == NULL) { 1638 witness_watch = 0; 1639 mtx_unlock_spin(&w_mtx); 1640 printf("%s: witness exhausted\n", __func__); 1641 return (NULL); 1642 } 1643 w_child_free = wcl->wcl_next; 1644 bzero(wcl, sizeof(*wcl)); 1645 return (wcl); 1646 } 1647 1648 static void 1649 witness_child_free(struct witness_child_list_entry *wcl) 1650 { 1651 1652 wcl->wcl_next = w_child_free; 1653 w_child_free = wcl; 1654 } 1655 1656 static struct lock_list_entry * 1657 witness_lock_list_get(void) 1658 { 1659 struct lock_list_entry *lle; 1660 1661 if (witness_watch == 0) 1662 return (NULL); 1663 mtx_lock_spin(&w_mtx); 1664 lle = w_lock_list_free; 1665 if (lle == NULL) { 1666 witness_watch = 0; 1667 mtx_unlock_spin(&w_mtx); 1668 printf("%s: witness exhausted\n", __func__); 1669 return (NULL); 1670 } 1671 w_lock_list_free = lle->ll_next; 1672 mtx_unlock_spin(&w_mtx); 1673 bzero(lle, sizeof(*lle)); 1674 return (lle); 1675 } 1676 1677 static void 1678 witness_lock_list_free(struct lock_list_entry *lle) 1679 { 1680 1681 mtx_lock_spin(&w_mtx); 1682 lle->ll_next = w_lock_list_free; 1683 w_lock_list_free = lle; 1684 mtx_unlock_spin(&w_mtx); 1685 } 1686 1687 static struct lock_instance * 1688 find_instance(struct lock_list_entry *lock_list, struct lock_object *lock) 1689 { 1690 struct lock_list_entry *lle; 1691 struct lock_instance *instance; 1692 int i; 1693 1694 for (lle = lock_list; lle != NULL; lle = lle->ll_next) 1695 for (i = lle->ll_count - 1; i >= 0; i--) { 1696 instance = &lle->ll_children[i]; 1697 if (instance->li_lock == lock) 1698 return (instance); 1699 } 1700 return (NULL); 1701 } 1702 1703 static void 1704 witness_list_lock(struct lock_instance *instance) 1705 { 1706 struct lock_object *lock; 1707 1708 lock = instance->li_lock; 1709 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ? 1710 "exclusive" : "shared", lock->lo_class->lc_name, lock->lo_name); 1711 if (lock->lo_type != lock->lo_name) 1712 printf(" (%s)", lock->lo_type); 1713 printf(" r = %d (%p) locked @ %s:%d\n", 1714 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file, 1715 instance->li_line); 1716 } 1717 1718 int 1719 witness_list_locks(struct lock_list_entry **lock_list) 1720 { 1721 struct lock_list_entry *lle; 1722 int i, nheld; 1723 1724 nheld = 0; 1725 for (lle = *lock_list; lle != NULL; lle = lle->ll_next) 1726 for (i = lle->ll_count - 1; i >= 0; i--) { 1727 witness_list_lock(&lle->ll_children[i]); 1728 nheld++; 1729 } 1730 return (nheld); 1731 } 1732 1733 /* 1734 * This is a bit risky at best. We call this function when we have timed 1735 * out acquiring a spin lock, and we assume that the other CPU is stuck 1736 * with this lock held. So, we go groveling around in the other CPU's 1737 * per-cpu data to try to find the lock instance for this spin lock to 1738 * see when it was last acquired. 1739 */ 1740 void 1741 witness_display_spinlock(struct lock_object *lock, struct thread *owner) 1742 { 1743 struct lock_instance *instance; 1744 struct pcpu *pc; 1745 1746 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU) 1747 return; 1748 pc = pcpu_find(owner->td_oncpu); 1749 instance = find_instance(pc->pc_spinlocks, lock); 1750 if (instance != NULL) 1751 witness_list_lock(instance); 1752 } 1753 1754 void 1755 witness_save(struct lock_object *lock, const char **filep, int *linep) 1756 { 1757 struct lock_instance *instance; 1758 1759 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1760 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1761 return; 1762 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1763 panic("%s: lock (%s) %s is not a sleep lock", __func__, 1764 lock->lo_class->lc_name, lock->lo_name); 1765 instance = find_instance(curthread->td_sleeplocks, lock); 1766 if (instance == NULL) 1767 panic("%s: lock (%s) %s not locked", __func__, 1768 lock->lo_class->lc_name, lock->lo_name); 1769 *filep = instance->li_file; 1770 *linep = instance->li_line; 1771 } 1772 1773 void 1774 witness_restore(struct lock_object *lock, const char *file, int line) 1775 { 1776 struct lock_instance *instance; 1777 1778 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1779 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1780 return; 1781 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1782 panic("%s: lock (%s) %s is not a sleep lock", __func__, 1783 lock->lo_class->lc_name, lock->lo_name); 1784 instance = find_instance(curthread->td_sleeplocks, lock); 1785 if (instance == NULL) 1786 panic("%s: lock (%s) %s not locked", __func__, 1787 lock->lo_class->lc_name, lock->lo_name); 1788 lock->lo_witness->w_file = file; 1789 lock->lo_witness->w_line = line; 1790 instance->li_file = file; 1791 instance->li_line = line; 1792 } 1793 1794 void 1795 witness_assert(struct lock_object *lock, int flags, const char *file, int line) 1796 { 1797 #ifdef INVARIANT_SUPPORT 1798 struct lock_instance *instance; 1799 1800 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1801 return; 1802 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) != 0) 1803 instance = find_instance(curthread->td_sleeplocks, lock); 1804 else if ((lock->lo_class->lc_flags & LC_SPINLOCK) != 0) 1805 instance = find_instance(PCPU_GET(spinlocks), lock); 1806 else { 1807 panic("Lock (%s) %s is not sleep or spin!", 1808 lock->lo_class->lc_name, lock->lo_name); 1809 } 1810 file = fixup_filename(file); 1811 switch (flags) { 1812 case LA_UNLOCKED: 1813 if (instance != NULL) 1814 panic("Lock (%s) %s locked @ %s:%d.", 1815 lock->lo_class->lc_name, lock->lo_name, file, line); 1816 break; 1817 case LA_LOCKED: 1818 case LA_LOCKED | LA_RECURSED: 1819 case LA_LOCKED | LA_NOTRECURSED: 1820 case LA_SLOCKED: 1821 case LA_SLOCKED | LA_RECURSED: 1822 case LA_SLOCKED | LA_NOTRECURSED: 1823 case LA_XLOCKED: 1824 case LA_XLOCKED | LA_RECURSED: 1825 case LA_XLOCKED | LA_NOTRECURSED: 1826 if (instance == NULL) { 1827 panic("Lock (%s) %s not locked @ %s:%d.", 1828 lock->lo_class->lc_name, lock->lo_name, file, line); 1829 break; 1830 } 1831 if ((flags & LA_XLOCKED) != 0 && 1832 (instance->li_flags & LI_EXCLUSIVE) == 0) 1833 panic("Lock (%s) %s not exclusively locked @ %s:%d.", 1834 lock->lo_class->lc_name, lock->lo_name, file, line); 1835 if ((flags & LA_SLOCKED) != 0 && 1836 (instance->li_flags & LI_EXCLUSIVE) != 0) 1837 panic("Lock (%s) %s exclusively locked @ %s:%d.", 1838 lock->lo_class->lc_name, lock->lo_name, file, line); 1839 if ((flags & LA_RECURSED) != 0 && 1840 (instance->li_flags & LI_RECURSEMASK) == 0) 1841 panic("Lock (%s) %s not recursed @ %s:%d.", 1842 lock->lo_class->lc_name, lock->lo_name, file, line); 1843 if ((flags & LA_NOTRECURSED) != 0 && 1844 (instance->li_flags & LI_RECURSEMASK) != 0) 1845 panic("Lock (%s) %s recursed @ %s:%d.", 1846 lock->lo_class->lc_name, lock->lo_name, file, line); 1847 break; 1848 default: 1849 panic("Invalid lock assertion at %s:%d.", file, line); 1850 1851 } 1852 #endif /* INVARIANT_SUPPORT */ 1853 } 1854 1855 #ifdef DDB 1856 static void 1857 witness_list(struct thread *td) 1858 { 1859 1860 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1861 KASSERT(kdb_active, ("%s: not in the debugger", __func__)); 1862 1863 if (witness_watch == 0) 1864 return; 1865 1866 witness_list_locks(&td->td_sleeplocks); 1867 1868 /* 1869 * We only handle spinlocks if td == curthread. This is somewhat broken 1870 * if td is currently executing on some other CPU and holds spin locks 1871 * as we won't display those locks. If we had a MI way of getting 1872 * the per-cpu data for a given cpu then we could use 1873 * td->td_oncpu to get the list of spinlocks for this thread 1874 * and "fix" this. 1875 * 1876 * That still wouldn't really fix this unless we locked sched_lock 1877 * or stopped the other CPU to make sure it wasn't changing the list 1878 * out from under us. It is probably best to just not try to handle 1879 * threads on other CPU's for now. 1880 */ 1881 if (td == curthread && PCPU_GET(spinlocks) != NULL) 1882 witness_list_locks(PCPU_PTR(spinlocks)); 1883 } 1884 1885 DB_SHOW_COMMAND(locks, db_witness_list) 1886 { 1887 struct thread *td; 1888 pid_t pid; 1889 struct proc *p; 1890 1891 if (have_addr) { 1892 pid = (addr % 16) + ((addr >> 4) % 16) * 10 + 1893 ((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 + 1894 ((addr >> 16) % 16) * 10000; 1895 /* sx_slock(&allproc_lock); */ 1896 FOREACH_PROC_IN_SYSTEM(p) { 1897 if (p->p_pid == pid) 1898 break; 1899 } 1900 /* sx_sunlock(&allproc_lock); */ 1901 if (p == NULL) { 1902 db_printf("pid %d not found\n", pid); 1903 return; 1904 } 1905 FOREACH_THREAD_IN_PROC(p, td) { 1906 witness_list(td); 1907 } 1908 } else { 1909 td = curthread; 1910 witness_list(td); 1911 } 1912 } 1913 1914 DB_SHOW_COMMAND(witness, db_witness_display) 1915 { 1916 1917 witness_display(db_printf); 1918 } 1919 #endif 1920