1 /*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 * $FreeBSD$ 31 */ 32 33 /* 34 * Implementation of the `witness' lock verifier. Originally implemented for 35 * mutexes in BSD/OS. Extended to handle generic lock objects and lock 36 * classes in FreeBSD. 37 */ 38 39 /* 40 * Main Entry: witness 41 * Pronunciation: 'wit-n&s 42 * Function: noun 43 * Etymology: Middle English witnesse, from Old English witnes knowledge, 44 * testimony, witness, from 2wit 45 * Date: before 12th century 46 * 1 : attestation of a fact or event : TESTIMONY 47 * 2 : one that gives evidence; specifically : one who testifies in 48 * a cause or before a judicial tribunal 49 * 3 : one asked to be present at a transaction so as to be able to 50 * testify to its having taken place 51 * 4 : one who has personal knowledge of something 52 * 5 a : something serving as evidence or proof : SIGN 53 * b : public affirmation by word or example of usually 54 * religious faith or conviction <the heroic witness to divine 55 * life -- Pilot> 56 * 6 capitalized : a member of the Jehovah's Witnesses 57 */ 58 59 #include "opt_ddb.h" 60 #include "opt_witness.h" 61 62 #include <sys/param.h> 63 #include <sys/bus.h> 64 #include <sys/kernel.h> 65 #include <sys/ktr.h> 66 #include <sys/lock.h> 67 #include <sys/malloc.h> 68 #include <sys/mutex.h> 69 #include <sys/proc.h> 70 #include <sys/sysctl.h> 71 #include <sys/systm.h> 72 73 #include <ddb/ddb.h> 74 75 /* Define this to check for blessed mutexes */ 76 #undef BLESSING 77 78 #define WITNESS_COUNT 200 79 #define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4) 80 /* 81 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads 82 * will hold LOCK_NCHILDREN * 2 locks. We handle failure ok, and we should 83 * probably be safe for the most part, but it's still a SWAG. 84 */ 85 #define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2 86 87 #define WITNESS_NCHILDREN 6 88 89 struct witness_child_list_entry; 90 91 struct witness { 92 const char *w_name; 93 struct lock_class *w_class; 94 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */ 95 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */ 96 struct witness_child_list_entry *w_children; /* Great evilness... */ 97 const char *w_file; 98 int w_line; 99 u_int w_level; 100 u_int w_refcount; 101 u_char w_Giant_squawked:1; 102 u_char w_other_squawked:1; 103 u_char w_same_squawked:1; 104 }; 105 106 struct witness_child_list_entry { 107 struct witness_child_list_entry *wcl_next; 108 struct witness *wcl_children[WITNESS_NCHILDREN]; 109 u_int wcl_count; 110 }; 111 112 STAILQ_HEAD(witness_list, witness); 113 114 #ifdef BLESSING 115 struct witness_blessed { 116 const char *b_lock1; 117 const char *b_lock2; 118 }; 119 #endif 120 121 struct witness_order_list_entry { 122 const char *w_name; 123 struct lock_class *w_class; 124 }; 125 126 static struct witness *enroll(const char *description, 127 struct lock_class *lock_class); 128 static int itismychild(struct witness *parent, struct witness *child); 129 static void removechild(struct witness *parent, struct witness *child); 130 static int isitmychild(struct witness *parent, struct witness *child); 131 static int isitmydescendant(struct witness *parent, struct witness *child); 132 #ifdef BLESSING 133 static int blessed(struct witness *, struct witness *); 134 #endif 135 static void witness_displaydescendants(void(*)(const char *fmt, ...), 136 struct witness *); 137 static void witness_leveldescendents(struct witness *parent, int level); 138 static void witness_levelall(void); 139 static struct witness *witness_get(void); 140 static void witness_free(struct witness *m); 141 static struct witness_child_list_entry *witness_child_get(void); 142 static void witness_child_free(struct witness_child_list_entry *wcl); 143 static struct lock_list_entry *witness_lock_list_get(void); 144 static void witness_lock_list_free(struct lock_list_entry *lle); 145 static struct lock_instance *find_instance(struct lock_list_entry *lock_list, 146 struct lock_object *lock); 147 #if defined(DDB) 148 static void witness_display_list(void(*prnt)(const char *fmt, ...), 149 struct witness_list *list); 150 static void witness_display(void(*)(const char *fmt, ...)); 151 #endif 152 153 MALLOC_DEFINE(M_WITNESS, "witness", "witness structure"); 154 155 static int witness_watch = 1; 156 TUNABLE_INT("debug.witness_watch", &witness_watch); 157 SYSCTL_INT(_debug, OID_AUTO, witness_watch, CTLFLAG_RD, &witness_watch, 0, ""); 158 159 #ifdef DDB 160 /* 161 * When DDB is enabled and witness_ddb is set to 1, it will cause the system to 162 * drop into kdebug() when: 163 * - a lock heirarchy violation occurs 164 * - locks are held when going to sleep. 165 */ 166 #ifdef WITNESS_DDB 167 int witness_ddb = 1; 168 #else 169 int witness_ddb = 0; 170 #endif 171 TUNABLE_INT("debug.witness_ddb", &witness_ddb); 172 SYSCTL_INT(_debug, OID_AUTO, witness_ddb, CTLFLAG_RW, &witness_ddb, 0, ""); 173 #endif /* DDB */ 174 175 #ifdef WITNESS_SKIPSPIN 176 int witness_skipspin = 1; 177 #else 178 int witness_skipspin = 0; 179 #endif 180 TUNABLE_INT("debug.witness_skipspin", &witness_skipspin); 181 SYSCTL_INT(_debug, OID_AUTO, witness_skipspin, CTLFLAG_RD, &witness_skipspin, 0, 182 ""); 183 184 static struct mtx w_mtx; 185 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free); 186 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all); 187 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin); 188 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep); 189 static struct witness_child_list_entry *w_child_free = NULL; 190 static struct lock_list_entry *w_lock_list_free = NULL; 191 static int witness_dead; /* fatal error, probably no memory */ 192 193 static struct witness w_data[WITNESS_COUNT]; 194 static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT]; 195 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT]; 196 197 static struct witness_order_list_entry order_lists[] = { 198 { "Giant", &lock_class_mtx_sleep }, 199 { "proctree", &lock_class_sx }, 200 { "allproc", &lock_class_sx }, 201 { "filedesc structure", &lock_class_mtx_sleep }, 202 { "pipe mutex", &lock_class_mtx_sleep }, 203 { "sigio lock", &lock_class_mtx_sleep }, 204 { "process group", &lock_class_mtx_sleep }, 205 { "process lock", &lock_class_mtx_sleep }, 206 { "session", &lock_class_mtx_sleep }, 207 { "uidinfo hash", &lock_class_mtx_sleep }, 208 { "uidinfo struct", &lock_class_mtx_sleep }, 209 { NULL, NULL }, 210 /* 211 * spin locks 212 */ 213 #ifdef SMP 214 { "ap boot", &lock_class_mtx_spin }, 215 #ifdef __i386__ 216 { "com", &lock_class_mtx_spin }, 217 #endif 218 #endif 219 { "sio", &lock_class_mtx_spin }, 220 #ifdef __i386__ 221 { "cy", &lock_class_mtx_spin }, 222 #endif 223 { "sabtty", &lock_class_mtx_spin }, 224 { "zstty", &lock_class_mtx_spin }, 225 { "ng_node", &lock_class_mtx_spin }, 226 { "ng_worklist", &lock_class_mtx_spin }, 227 { "ithread table lock", &lock_class_mtx_spin }, 228 { "sched lock", &lock_class_mtx_spin }, 229 { "callout", &lock_class_mtx_spin }, 230 /* 231 * leaf locks 232 */ 233 { "allpmaps", &lock_class_mtx_spin }, 234 { "vm page queue free mutex", &lock_class_mtx_spin }, 235 { "icu", &lock_class_mtx_spin }, 236 #ifdef SMP 237 { "smp rendezvous", &lock_class_mtx_spin }, 238 #if defined(__i386__) && defined(APIC_IO) 239 { "tlb", &lock_class_mtx_spin }, 240 #endif 241 #ifdef __sparc64__ 242 { "ipi", &lock_class_mtx_spin }, 243 #endif 244 #endif 245 { "clk", &lock_class_mtx_spin }, 246 { "mutex profiling lock", &lock_class_mtx_spin }, 247 { "kse zombie lock", &lock_class_mtx_spin }, 248 { "ALD Queue", &lock_class_mtx_spin }, 249 #ifdef __ia64__ 250 { "MCA spin lock", &lock_class_mtx_spin }, 251 #endif 252 { NULL, NULL }, 253 { NULL, NULL } 254 }; 255 256 #ifdef BLESSING 257 /* 258 * Pairs of locks which have been blessed 259 * Don't complain about order problems with blessed locks 260 */ 261 static struct witness_blessed blessed_list[] = { 262 }; 263 static int blessed_count = 264 sizeof(blessed_list) / sizeof(struct witness_blessed); 265 #endif 266 267 /* 268 * List of all locks in the system. 269 */ 270 TAILQ_HEAD(, lock_object) all_locks = TAILQ_HEAD_INITIALIZER(all_locks); 271 272 static struct mtx all_mtx = { 273 { &lock_class_mtx_sleep, /* mtx_object.lo_class */ 274 "All locks list", /* mtx_object.lo_name */ 275 "All locks list", /* mtx_object.lo_type */ 276 LO_INITIALIZED, /* mtx_object.lo_flags */ 277 { NULL, NULL }, /* mtx_object.lo_list */ 278 NULL }, /* mtx_object.lo_witness */ 279 MTX_UNOWNED, 0, /* mtx_lock, mtx_recurse */ 280 TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked), 281 { NULL, NULL } /* mtx_contested */ 282 }; 283 284 /* 285 * This global is set to 0 once it becomes safe to use the witness code. 286 */ 287 static int witness_cold = 1; 288 289 /* 290 * Global variables for book keeping. 291 */ 292 static int lock_cur_cnt; 293 static int lock_max_cnt; 294 295 /* 296 * The WITNESS-enabled diagnostic code. 297 */ 298 static void 299 witness_initialize(void *dummy __unused) 300 { 301 struct lock_object *lock; 302 struct witness_order_list_entry *order; 303 struct witness *w, *w1; 304 int i; 305 306 /* 307 * We have to release Giant before initializing its witness 308 * structure so that WITNESS doesn't get confused. 309 */ 310 mtx_unlock(&Giant); 311 mtx_assert(&Giant, MA_NOTOWNED); 312 313 CTR1(KTR_WITNESS, "%s: initializing witness", __func__); 314 TAILQ_INSERT_HEAD(&all_locks, &all_mtx.mtx_object, lo_list); 315 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET | 316 MTX_NOWITNESS); 317 for (i = 0; i < WITNESS_COUNT; i++) 318 witness_free(&w_data[i]); 319 for (i = 0; i < WITNESS_CHILDCOUNT; i++) 320 witness_child_free(&w_childdata[i]); 321 for (i = 0; i < LOCK_CHILDCOUNT; i++) 322 witness_lock_list_free(&w_locklistdata[i]); 323 324 /* First add in all the specified order lists. */ 325 for (order = order_lists; order->w_name != NULL; order++) { 326 w = enroll(order->w_name, order->w_class); 327 if (w == NULL) 328 continue; 329 w->w_file = "order list"; 330 for (order++; order->w_name != NULL; order++) { 331 w1 = enroll(order->w_name, order->w_class); 332 if (w1 == NULL) 333 continue; 334 w1->w_file = "order list"; 335 itismychild(w, w1); 336 w = w1; 337 } 338 } 339 340 /* Iterate through all locks and add them to witness. */ 341 mtx_lock(&all_mtx); 342 TAILQ_FOREACH(lock, &all_locks, lo_list) { 343 if (lock->lo_flags & LO_WITNESS) 344 lock->lo_witness = enroll(lock->lo_type, 345 lock->lo_class); 346 else 347 lock->lo_witness = NULL; 348 } 349 mtx_unlock(&all_mtx); 350 351 /* Mark the witness code as being ready for use. */ 352 atomic_store_rel_int(&witness_cold, 0); 353 354 mtx_lock(&Giant); 355 } 356 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL) 357 358 void 359 witness_init(struct lock_object *lock) 360 { 361 struct lock_class *class; 362 363 class = lock->lo_class; 364 if (lock->lo_flags & LO_INITIALIZED) 365 panic("%s: lock (%s) %s is already initialized", __func__, 366 class->lc_name, lock->lo_name); 367 if ((lock->lo_flags & LO_RECURSABLE) != 0 && 368 (class->lc_flags & LC_RECURSABLE) == 0) 369 panic("%s: lock (%s) %s can not be recursable", __func__, 370 class->lc_name, lock->lo_name); 371 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 372 (class->lc_flags & LC_SLEEPABLE) == 0) 373 panic("%s: lock (%s) %s can not be sleepable", __func__, 374 class->lc_name, lock->lo_name); 375 if ((lock->lo_flags & LO_UPGRADABLE) != 0 && 376 (class->lc_flags & LC_UPGRADABLE) == 0) 377 panic("%s: lock (%s) %s can not be upgradable", __func__, 378 class->lc_name, lock->lo_name); 379 380 mtx_lock(&all_mtx); 381 TAILQ_INSERT_TAIL(&all_locks, lock, lo_list); 382 lock->lo_flags |= LO_INITIALIZED; 383 lock_cur_cnt++; 384 if (lock_cur_cnt > lock_max_cnt) 385 lock_max_cnt = lock_cur_cnt; 386 mtx_unlock(&all_mtx); 387 if (!witness_cold && !witness_dead && panicstr == NULL && 388 (lock->lo_flags & LO_WITNESS) != 0) 389 lock->lo_witness = enroll(lock->lo_type, class); 390 else 391 lock->lo_witness = NULL; 392 } 393 394 void 395 witness_destroy(struct lock_object *lock) 396 { 397 struct witness *w; 398 399 if (witness_cold) 400 panic("lock (%s) %s destroyed while witness_cold", 401 lock->lo_class->lc_name, lock->lo_name); 402 if ((lock->lo_flags & LO_INITIALIZED) == 0) 403 panic("%s: lock (%s) %s is not initialized", __func__, 404 lock->lo_class->lc_name, lock->lo_name); 405 406 /* XXX: need to verify that no one holds the lock */ 407 w = lock->lo_witness; 408 if (w != NULL) { 409 mtx_lock_spin(&w_mtx); 410 MPASS(w->w_refcount > 0); 411 w->w_refcount--; 412 mtx_unlock_spin(&w_mtx); 413 } 414 415 mtx_lock(&all_mtx); 416 lock_cur_cnt--; 417 TAILQ_REMOVE(&all_locks, lock, lo_list); 418 lock->lo_flags &= ~LO_INITIALIZED; 419 mtx_unlock(&all_mtx); 420 } 421 422 #if defined(DDB) 423 static void 424 witness_display_list(void(*prnt)(const char *fmt, ...), 425 struct witness_list *list) 426 { 427 struct witness *w, *w1; 428 int found; 429 430 STAILQ_FOREACH(w, list, w_typelist) { 431 if (w->w_file == NULL) 432 continue; 433 found = 0; 434 STAILQ_FOREACH(w1, list, w_typelist) { 435 if (isitmychild(w1, w)) { 436 found++; 437 break; 438 } 439 } 440 if (found) 441 continue; 442 /* 443 * This lock has no anscestors, display its descendants. 444 */ 445 witness_displaydescendants(prnt, w); 446 } 447 } 448 449 static void 450 witness_display(void(*prnt)(const char *fmt, ...)) 451 { 452 struct witness *w; 453 454 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 455 witness_levelall(); 456 457 /* 458 * First, handle sleep locks which have been acquired at least 459 * once. 460 */ 461 prnt("Sleep locks:\n"); 462 witness_display_list(prnt, &w_sleep); 463 464 /* 465 * Now do spin locks which have been acquired at least once. 466 */ 467 prnt("\nSpin locks:\n"); 468 witness_display_list(prnt, &w_spin); 469 470 /* 471 * Finally, any locks which have not been acquired yet. 472 */ 473 prnt("\nLocks which were never acquired:\n"); 474 STAILQ_FOREACH(w, &w_all, w_list) { 475 if (w->w_file != NULL || w->w_refcount == 0) 476 continue; 477 prnt("%s\n", w->w_name); 478 } 479 } 480 #endif 481 482 void 483 witness_lock(struct lock_object *lock, int flags, const char *file, int line) 484 { 485 struct lock_list_entry **lock_list, *lle; 486 struct lock_instance *lock1, *lock2; 487 struct lock_class *class; 488 struct witness *w, *w1; 489 struct thread *td; 490 int i, j; 491 #ifdef DDB 492 int go_into_ddb = 0; 493 #endif /* DDB */ 494 495 if (witness_cold || witness_dead || lock->lo_witness == NULL || 496 panicstr != NULL) 497 return; 498 w = lock->lo_witness; 499 class = lock->lo_class; 500 td = curthread; 501 502 if (class->lc_flags & LC_SLEEPLOCK) { 503 /* 504 * Since spin locks include a critical section, this check 505 * impliclty enforces a lock order of all sleep locks before 506 * all spin locks. 507 */ 508 if (td->td_critnest != 0 && (flags & LOP_TRYLOCK) == 0) 509 panic("blockable sleep lock (%s) %s @ %s:%d", 510 class->lc_name, lock->lo_name, file, line); 511 lock_list = &td->td_sleeplocks; 512 } else 513 lock_list = PCPU_PTR(spinlocks); 514 515 /* 516 * Try locks do not block if they fail to acquire the lock, thus 517 * there is no danger of deadlocks or of switching while holding a 518 * spin lock if we acquire a lock via a try operation. 519 */ 520 if (flags & LOP_TRYLOCK) 521 goto out; 522 523 /* 524 * Is this the first lock acquired? If so, then no order checking 525 * is needed. 526 */ 527 if (*lock_list == NULL) 528 goto out; 529 530 /* 531 * Check to see if we are recursing on a lock we already own. 532 */ 533 lock1 = find_instance(*lock_list, lock); 534 if (lock1 != NULL) { 535 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 && 536 (flags & LOP_EXCLUSIVE) == 0) { 537 printf("shared lock of (%s) %s @ %s:%d\n", 538 class->lc_name, lock->lo_name, file, line); 539 printf("while exclusively locked from %s:%d\n", 540 lock1->li_file, lock1->li_line); 541 panic("share->excl"); 542 } 543 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 && 544 (flags & LOP_EXCLUSIVE) != 0) { 545 printf("exclusive lock of (%s) %s @ %s:%d\n", 546 class->lc_name, lock->lo_name, file, line); 547 printf("while share locked from %s:%d\n", 548 lock1->li_file, lock1->li_line); 549 panic("excl->share"); 550 } 551 lock1->li_flags++; 552 if ((lock->lo_flags & LO_RECURSABLE) == 0) { 553 printf( 554 "recursed on non-recursive lock (%s) %s @ %s:%d\n", 555 class->lc_name, lock->lo_name, file, line); 556 printf("first acquired @ %s:%d\n", lock1->li_file, 557 lock1->li_line); 558 panic("recurse"); 559 } 560 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__, 561 td->td_proc->p_pid, lock->lo_name, 562 lock1->li_flags & LI_RECURSEMASK); 563 lock1->li_file = file; 564 lock1->li_line = line; 565 return; 566 } 567 568 /* 569 * Check for duplicate locks of the same type. Note that we only 570 * have to check for this on the last lock we just acquired. Any 571 * other cases will be caught as lock order violations. 572 */ 573 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 574 w1 = lock1->li_lock->lo_witness; 575 if (w1 == w) { 576 if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK)) 577 goto out; 578 w->w_same_squawked = 1; 579 printf("acquiring duplicate lock of same type: \"%s\"\n", 580 lock->lo_type); 581 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name, 582 lock1->li_file, lock1->li_line); 583 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line); 584 #ifdef DDB 585 go_into_ddb = 1; 586 #endif /* DDB */ 587 goto out; 588 } 589 MPASS(!mtx_owned(&w_mtx)); 590 mtx_lock_spin(&w_mtx); 591 /* 592 * If we have a known higher number just say ok 593 */ 594 if (witness_watch > 1 && w->w_level > w1->w_level) { 595 mtx_unlock_spin(&w_mtx); 596 goto out; 597 } 598 if (isitmydescendant(w1, w)) { 599 mtx_unlock_spin(&w_mtx); 600 goto out; 601 } 602 for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) { 603 for (i = lle->ll_count - 1; i >= 0; i--, j++) { 604 605 MPASS(j < WITNESS_COUNT); 606 lock1 = &lle->ll_children[i]; 607 w1 = lock1->li_lock->lo_witness; 608 609 /* 610 * If this lock doesn't undergo witness checking, 611 * then skip it. 612 */ 613 if (w1 == NULL) { 614 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0, 615 ("lock missing witness structure")); 616 continue; 617 } 618 /* 619 * If we are locking Giant and we slept with this 620 * lock, then skip it. 621 */ 622 if ((lock1->li_flags & LI_SLEPT) != 0 && 623 lock == &Giant.mtx_object) 624 continue; 625 /* 626 * If we are locking a sleepable lock and this lock 627 * isn't sleepable and isn't Giant, we want to treat 628 * it as a lock order violation to enfore a general 629 * lock order of sleepable locks before non-sleepable 630 * locks. Thus, we only bother checking the lock 631 * order hierarchy if we pass the initial test. 632 */ 633 if (!((lock->lo_flags & LO_SLEEPABLE) != 0 && 634 ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 && 635 lock1->li_lock != &Giant.mtx_object)) && 636 !isitmydescendant(w, w1)) 637 continue; 638 /* 639 * We have a lock order violation, check to see if it 640 * is allowed or has already been yelled about. 641 */ 642 mtx_unlock_spin(&w_mtx); 643 #ifdef BLESSING 644 if (blessed(w, w1)) 645 goto out; 646 #endif 647 if (lock1->li_lock == &Giant.mtx_object) { 648 if (w1->w_Giant_squawked) 649 goto out; 650 else 651 w1->w_Giant_squawked = 1; 652 } else { 653 if (w1->w_other_squawked) 654 goto out; 655 else 656 w1->w_other_squawked = 1; 657 } 658 /* 659 * Ok, yell about it. 660 */ 661 printf("lock order reversal\n"); 662 /* 663 * Try to locate an earlier lock with 664 * witness w in our list. 665 */ 666 do { 667 lock2 = &lle->ll_children[i]; 668 MPASS(lock2->li_lock != NULL); 669 if (lock2->li_lock->lo_witness == w) 670 break; 671 i--; 672 if (i == 0 && lle->ll_next != NULL) { 673 lle = lle->ll_next; 674 i = lle->ll_count - 1; 675 MPASS(i >= 0 && i < LOCK_NCHILDREN); 676 } 677 } while (i >= 0); 678 if (i < 0) { 679 printf(" 1st %p %s (%s) @ %s:%d\n", 680 lock1->li_lock, lock1->li_lock->lo_name, 681 lock1->li_lock->lo_type, lock1->li_file, 682 lock1->li_line); 683 printf(" 2nd %p %s (%s) @ %s:%d\n", lock, 684 lock->lo_name, lock->lo_type, file, line); 685 } else { 686 printf(" 1st %p %s (%s) @ %s:%d\n", 687 lock2->li_lock, lock2->li_lock->lo_name, 688 lock2->li_lock->lo_type, lock2->li_file, 689 lock2->li_line); 690 printf(" 2nd %p %s (%s) @ %s:%d\n", 691 lock1->li_lock, lock1->li_lock->lo_name, 692 lock1->li_lock->lo_type, lock1->li_file, 693 lock1->li_line); 694 printf(" 3rd %p %s (%s) @ %s:%d\n", lock, 695 lock->lo_name, lock->lo_type, file, line); 696 } 697 #ifdef DDB 698 go_into_ddb = 1; 699 #endif /* DDB */ 700 goto out; 701 } 702 } 703 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 704 /* 705 * Don't build a new relationship if we are locking Giant just 706 * after waking up and the previous lock in the list was acquired 707 * prior to blocking. 708 */ 709 if (lock == &Giant.mtx_object && (lock1->li_flags & LI_SLEPT) != 0) 710 mtx_unlock_spin(&w_mtx); 711 else { 712 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 713 lock->lo_type, lock1->li_lock->lo_type); 714 if (!itismychild(lock1->li_lock->lo_witness, w)) 715 mtx_unlock_spin(&w_mtx); 716 } 717 718 out: 719 #ifdef DDB 720 if (witness_ddb && go_into_ddb) 721 Debugger(__func__); 722 #endif /* DDB */ 723 w->w_file = file; 724 w->w_line = line; 725 726 lle = *lock_list; 727 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) { 728 lle = witness_lock_list_get(); 729 if (lle == NULL) 730 return; 731 lle->ll_next = *lock_list; 732 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__, 733 td->td_proc->p_pid, lle); 734 *lock_list = lle; 735 } 736 lock1 = &lle->ll_children[lle->ll_count++]; 737 lock1->li_lock = lock; 738 lock1->li_line = line; 739 lock1->li_file = file; 740 if ((flags & LOP_EXCLUSIVE) != 0) 741 lock1->li_flags = LI_EXCLUSIVE; 742 else 743 lock1->li_flags = 0; 744 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__, 745 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1); 746 } 747 748 void 749 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line) 750 { 751 struct lock_instance *instance; 752 struct lock_class *class; 753 754 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 755 if (lock->lo_witness == NULL || witness_dead || panicstr != NULL) 756 return; 757 class = lock->lo_class; 758 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 759 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d", 760 class->lc_name, lock->lo_name, file, line); 761 if ((flags & LOP_TRYLOCK) == 0) 762 panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name, 763 lock->lo_name, file, line); 764 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 765 panic("upgrade of non-sleep lock (%s) %s @ %s:%d", 766 class->lc_name, lock->lo_name, file, line); 767 instance = find_instance(curthread->td_sleeplocks, lock); 768 if (instance == NULL) 769 panic("upgrade of unlocked lock (%s) %s @ %s:%d", 770 class->lc_name, lock->lo_name, file, line); 771 if ((instance->li_flags & LI_EXCLUSIVE) != 0) 772 panic("upgrade of exclusive lock (%s) %s @ %s:%d", 773 class->lc_name, lock->lo_name, file, line); 774 if ((instance->li_flags & LI_RECURSEMASK) != 0) 775 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d", 776 class->lc_name, lock->lo_name, 777 instance->li_flags & LI_RECURSEMASK, file, line); 778 instance->li_flags |= LI_EXCLUSIVE; 779 } 780 781 void 782 witness_downgrade(struct lock_object *lock, int flags, const char *file, 783 int line) 784 { 785 struct lock_instance *instance; 786 struct lock_class *class; 787 788 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 789 if (lock->lo_witness == NULL || witness_dead || panicstr != NULL) 790 return; 791 class = lock->lo_class; 792 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 793 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d", 794 class->lc_name, lock->lo_name, file, line); 795 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 796 panic("downgrade of non-sleep lock (%s) %s @ %s:%d", 797 class->lc_name, lock->lo_name, file, line); 798 instance = find_instance(curthread->td_sleeplocks, lock); 799 if (instance == NULL) 800 panic("downgrade of unlocked lock (%s) %s @ %s:%d", 801 class->lc_name, lock->lo_name, file, line); 802 if ((instance->li_flags & LI_EXCLUSIVE) == 0) 803 panic("downgrade of shared lock (%s) %s @ %s:%d", 804 class->lc_name, lock->lo_name, file, line); 805 if ((instance->li_flags & LI_RECURSEMASK) != 0) 806 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d", 807 class->lc_name, lock->lo_name, 808 instance->li_flags & LI_RECURSEMASK, file, line); 809 instance->li_flags &= ~LI_EXCLUSIVE; 810 } 811 812 void 813 witness_unlock(struct lock_object *lock, int flags, const char *file, int line) 814 { 815 struct lock_list_entry **lock_list, *lle; 816 struct lock_instance *instance; 817 struct lock_class *class; 818 struct thread *td; 819 register_t s; 820 int i, j; 821 822 if (witness_cold || witness_dead || lock->lo_witness == NULL || 823 panicstr != NULL) 824 return; 825 td = curthread; 826 class = lock->lo_class; 827 if (class->lc_flags & LC_SLEEPLOCK) 828 lock_list = &td->td_sleeplocks; 829 else 830 lock_list = PCPU_PTR(spinlocks); 831 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next) 832 for (i = 0; i < (*lock_list)->ll_count; i++) { 833 instance = &(*lock_list)->ll_children[i]; 834 if (instance->li_lock == lock) { 835 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && 836 (flags & LOP_EXCLUSIVE) == 0) { 837 printf( 838 "shared unlock of (%s) %s @ %s:%d\n", 839 class->lc_name, lock->lo_name, 840 file, line); 841 printf( 842 "while exclusively locked from %s:%d\n", 843 instance->li_file, 844 instance->li_line); 845 panic("excl->ushare"); 846 } 847 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && 848 (flags & LOP_EXCLUSIVE) != 0) { 849 printf( 850 "exclusive unlock of (%s) %s @ %s:%d\n", 851 class->lc_name, lock->lo_name, 852 file, line); 853 printf( 854 "while share locked from %s:%d\n", 855 instance->li_file, 856 instance->li_line); 857 panic("share->uexcl"); 858 } 859 /* If we are recursed, unrecurse. */ 860 if ((instance->li_flags & LI_RECURSEMASK) > 0) { 861 CTR4(KTR_WITNESS, 862 "%s: pid %d unrecursed on %s r=%d", __func__, 863 td->td_proc->p_pid, 864 instance->li_lock->lo_name, 865 instance->li_flags); 866 instance->li_flags--; 867 return; 868 } 869 s = intr_disable(); 870 CTR4(KTR_WITNESS, 871 "%s: pid %d removed %s from lle[%d]", __func__, 872 td->td_proc->p_pid, 873 instance->li_lock->lo_name, 874 (*lock_list)->ll_count - 1); 875 for (j = i; j < (*lock_list)->ll_count - 1; j++) 876 (*lock_list)->ll_children[j] = 877 (*lock_list)->ll_children[j + 1]; 878 (*lock_list)->ll_count--; 879 intr_restore(s); 880 if ((*lock_list)->ll_count == 0) { 881 lle = *lock_list; 882 *lock_list = lle->ll_next; 883 CTR3(KTR_WITNESS, 884 "%s: pid %d removed lle %p", __func__, 885 td->td_proc->p_pid, lle); 886 witness_lock_list_free(lle); 887 } 888 return; 889 } 890 } 891 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name, 892 file, line); 893 } 894 895 /* 896 * Warn if any held locks are not sleepable. Note that Giant and the lock 897 * passed in are both special cases since they are both released during the 898 * sleep process and aren't actually held while the thread is asleep. 899 */ 900 int 901 witness_sleep(int check_only, struct lock_object *lock, const char *file, 902 int line) 903 { 904 struct lock_list_entry **lock_list, *lle; 905 struct lock_instance *lock1; 906 struct thread *td; 907 int i, n; 908 909 if (witness_cold || witness_dead || panicstr != NULL) 910 return (0); 911 n = 0; 912 td = curthread; 913 lock_list = &td->td_sleeplocks; 914 again: 915 for (lle = *lock_list; lle != NULL; lle = lle->ll_next) 916 for (i = lle->ll_count - 1; i >= 0; i--) { 917 lock1 = &lle->ll_children[i]; 918 if (lock1->li_lock == lock || 919 lock1->li_lock == &Giant.mtx_object) 920 continue; 921 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0) { 922 if (check_only == 0) { 923 CTR3(KTR_WITNESS, 924 "pid %d: sleeping with lock (%s) %s held", 925 td->td_proc->p_pid, 926 lock1->li_lock->lo_class->lc_name, 927 lock1->li_lock->lo_name); 928 lock1->li_flags |= LI_SLEPT; 929 } 930 continue; 931 } 932 n++; 933 printf("%s:%d: %s with \"%s\" locked from %s:%d\n", 934 file, line, check_only ? "could sleep" : "sleeping", 935 lock1->li_lock->lo_name, lock1->li_file, 936 lock1->li_line); 937 } 938 if (lock_list == &td->td_sleeplocks && PCPU_GET(spinlocks) != NULL) { 939 /* 940 * Since we already hold a spinlock preemption is 941 * already blocked. 942 */ 943 lock_list = PCPU_PTR(spinlocks); 944 goto again; 945 } 946 #ifdef DDB 947 if (witness_ddb && n) 948 Debugger(__func__); 949 #endif /* DDB */ 950 return (n); 951 } 952 953 const char * 954 witness_file(struct lock_object *lock) 955 { 956 struct witness *w; 957 958 if (witness_cold || witness_dead || lock->lo_witness == NULL) 959 return ("?"); 960 w = lock->lo_witness; 961 return (w->w_file); 962 } 963 964 int 965 witness_line(struct lock_object *lock) 966 { 967 struct witness *w; 968 969 if (witness_cold || witness_dead || lock->lo_witness == NULL) 970 return (0); 971 w = lock->lo_witness; 972 return (w->w_line); 973 } 974 975 static struct witness * 976 enroll(const char *description, struct lock_class *lock_class) 977 { 978 struct witness *w; 979 980 if (!witness_watch || witness_dead || panicstr != NULL) 981 return (NULL); 982 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin) 983 return (NULL); 984 mtx_lock_spin(&w_mtx); 985 STAILQ_FOREACH(w, &w_all, w_list) { 986 if (w->w_name == description || (w->w_refcount > 0 && 987 strcmp(description, w->w_name) == 0)) { 988 w->w_refcount++; 989 mtx_unlock_spin(&w_mtx); 990 if (lock_class != w->w_class) 991 panic( 992 "lock (%s) %s does not match earlier (%s) lock", 993 description, lock_class->lc_name, 994 w->w_class->lc_name); 995 return (w); 996 } 997 } 998 /* 999 * This isn't quite right, as witness_cold is still 0 while we 1000 * enroll all the locks initialized before witness_initialize(). 1001 */ 1002 if ((lock_class->lc_flags & LC_SPINLOCK) && !witness_cold) { 1003 mtx_unlock_spin(&w_mtx); 1004 panic("spin lock %s not in order list", description); 1005 } 1006 if ((w = witness_get()) == NULL) 1007 return (NULL); 1008 w->w_name = description; 1009 w->w_class = lock_class; 1010 w->w_refcount = 1; 1011 STAILQ_INSERT_HEAD(&w_all, w, w_list); 1012 if (lock_class->lc_flags & LC_SPINLOCK) 1013 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist); 1014 else if (lock_class->lc_flags & LC_SLEEPLOCK) 1015 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist); 1016 else { 1017 mtx_unlock_spin(&w_mtx); 1018 panic("lock class %s is not sleep or spin", 1019 lock_class->lc_name); 1020 } 1021 mtx_unlock_spin(&w_mtx); 1022 return (w); 1023 } 1024 1025 static int 1026 itismychild(struct witness *parent, struct witness *child) 1027 { 1028 static int recursed; 1029 struct witness_child_list_entry **wcl; 1030 struct witness_list *list; 1031 1032 MPASS(child != NULL && parent != NULL); 1033 if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) != 1034 (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK))) 1035 panic( 1036 "%s: parent (%s) and child (%s) are not the same lock type", 1037 __func__, parent->w_class->lc_name, 1038 child->w_class->lc_name); 1039 1040 /* 1041 * Insert "child" after "parent" 1042 */ 1043 wcl = &parent->w_children; 1044 while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN) 1045 wcl = &(*wcl)->wcl_next; 1046 if (*wcl == NULL) { 1047 *wcl = witness_child_get(); 1048 if (*wcl == NULL) 1049 return (1); 1050 } 1051 (*wcl)->wcl_children[(*wcl)->wcl_count++] = child; 1052 1053 /* 1054 * Now prune whole tree. We look for cases where a lock is now 1055 * both a descendant and a direct child of a given lock. In that 1056 * case, we want to remove the direct child link from the tree. 1057 */ 1058 if (recursed) 1059 return (0); 1060 recursed = 1; 1061 if (parent->w_class->lc_flags & LC_SLEEPLOCK) 1062 list = &w_sleep; 1063 else 1064 list = &w_spin; 1065 STAILQ_FOREACH(child, list, w_typelist) { 1066 STAILQ_FOREACH(parent, list, w_typelist) { 1067 if (!isitmychild(parent, child)) 1068 continue; 1069 removechild(parent, child); 1070 if (isitmydescendant(parent, child)) 1071 continue; 1072 itismychild(parent, child); 1073 } 1074 } 1075 recursed = 0; 1076 witness_levelall(); 1077 return (0); 1078 } 1079 1080 static void 1081 removechild(struct witness *parent, struct witness *child) 1082 { 1083 struct witness_child_list_entry **wcl, *wcl1; 1084 int i; 1085 1086 for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next) 1087 for (i = 0; i < (*wcl)->wcl_count; i++) 1088 if ((*wcl)->wcl_children[i] == child) 1089 goto found; 1090 return; 1091 found: 1092 (*wcl)->wcl_count--; 1093 if ((*wcl)->wcl_count > i) 1094 (*wcl)->wcl_children[i] = 1095 (*wcl)->wcl_children[(*wcl)->wcl_count]; 1096 MPASS((*wcl)->wcl_children[i] != NULL); 1097 if ((*wcl)->wcl_count != 0) 1098 return; 1099 wcl1 = *wcl; 1100 *wcl = wcl1->wcl_next; 1101 witness_child_free(wcl1); 1102 } 1103 1104 static int 1105 isitmychild(struct witness *parent, struct witness *child) 1106 { 1107 struct witness_child_list_entry *wcl; 1108 int i; 1109 1110 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) { 1111 for (i = 0; i < wcl->wcl_count; i++) { 1112 if (wcl->wcl_children[i] == child) 1113 return (1); 1114 } 1115 } 1116 return (0); 1117 } 1118 1119 static int 1120 isitmydescendant(struct witness *parent, struct witness *child) 1121 { 1122 struct witness_child_list_entry *wcl; 1123 int i, j; 1124 1125 if (isitmychild(parent, child)) 1126 return (1); 1127 j = 0; 1128 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) { 1129 MPASS(j < 1000); 1130 for (i = 0; i < wcl->wcl_count; i++) { 1131 if (isitmydescendant(wcl->wcl_children[i], child)) 1132 return (1); 1133 } 1134 j++; 1135 } 1136 return (0); 1137 } 1138 1139 static void 1140 witness_levelall (void) 1141 { 1142 struct witness_list *list; 1143 struct witness *w, *w1; 1144 1145 /* 1146 * First clear all levels. 1147 */ 1148 STAILQ_FOREACH(w, &w_all, w_list) { 1149 w->w_level = 0; 1150 } 1151 1152 /* 1153 * Look for locks with no parent and level all their descendants. 1154 */ 1155 STAILQ_FOREACH(w, &w_all, w_list) { 1156 /* 1157 * This is just an optimization, technically we could get 1158 * away just walking the all list each time. 1159 */ 1160 if (w->w_class->lc_flags & LC_SLEEPLOCK) 1161 list = &w_sleep; 1162 else 1163 list = &w_spin; 1164 STAILQ_FOREACH(w1, list, w_typelist) { 1165 if (isitmychild(w1, w)) 1166 goto skip; 1167 } 1168 witness_leveldescendents(w, 0); 1169 skip: 1170 ; /* silence GCC 3.x */ 1171 } 1172 } 1173 1174 static void 1175 witness_leveldescendents(struct witness *parent, int level) 1176 { 1177 struct witness_child_list_entry *wcl; 1178 int i; 1179 1180 if (parent->w_level < level) 1181 parent->w_level = level; 1182 level++; 1183 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) 1184 for (i = 0; i < wcl->wcl_count; i++) 1185 witness_leveldescendents(wcl->wcl_children[i], level); 1186 } 1187 1188 static void 1189 witness_displaydescendants(void(*prnt)(const char *fmt, ...), 1190 struct witness *parent) 1191 { 1192 struct witness_child_list_entry *wcl; 1193 int i, level; 1194 1195 level = parent->w_level; 1196 prnt("%-2d", level); 1197 for (i = 0; i < level; i++) 1198 prnt(" "); 1199 if (parent->w_refcount > 0) { 1200 prnt("%s", parent->w_name); 1201 if (parent->w_file != NULL) 1202 prnt(" -- last acquired @ %s:%d\n", parent->w_file, 1203 parent->w_line); 1204 } else 1205 prnt("(dead)\n"); 1206 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) 1207 for (i = 0; i < wcl->wcl_count; i++) 1208 witness_displaydescendants(prnt, 1209 wcl->wcl_children[i]); 1210 } 1211 1212 #ifdef BLESSING 1213 static int 1214 blessed(struct witness *w1, struct witness *w2) 1215 { 1216 int i; 1217 struct witness_blessed *b; 1218 1219 for (i = 0; i < blessed_count; i++) { 1220 b = &blessed_list[i]; 1221 if (strcmp(w1->w_name, b->b_lock1) == 0) { 1222 if (strcmp(w2->w_name, b->b_lock2) == 0) 1223 return (1); 1224 continue; 1225 } 1226 if (strcmp(w1->w_name, b->b_lock2) == 0) 1227 if (strcmp(w2->w_name, b->b_lock1) == 0) 1228 return (1); 1229 } 1230 return (0); 1231 } 1232 #endif 1233 1234 static struct witness * 1235 witness_get(void) 1236 { 1237 struct witness *w; 1238 1239 if (witness_dead) { 1240 mtx_unlock_spin(&w_mtx); 1241 return (NULL); 1242 } 1243 if (STAILQ_EMPTY(&w_free)) { 1244 witness_dead = 1; 1245 mtx_unlock_spin(&w_mtx); 1246 printf("%s: witness exhausted\n", __func__); 1247 return (NULL); 1248 } 1249 w = STAILQ_FIRST(&w_free); 1250 STAILQ_REMOVE_HEAD(&w_free, w_list); 1251 bzero(w, sizeof(*w)); 1252 return (w); 1253 } 1254 1255 static void 1256 witness_free(struct witness *w) 1257 { 1258 1259 STAILQ_INSERT_HEAD(&w_free, w, w_list); 1260 } 1261 1262 static struct witness_child_list_entry * 1263 witness_child_get(void) 1264 { 1265 struct witness_child_list_entry *wcl; 1266 1267 if (witness_dead) { 1268 mtx_unlock_spin(&w_mtx); 1269 return (NULL); 1270 } 1271 wcl = w_child_free; 1272 if (wcl == NULL) { 1273 witness_dead = 1; 1274 mtx_unlock_spin(&w_mtx); 1275 printf("%s: witness exhausted\n", __func__); 1276 return (NULL); 1277 } 1278 w_child_free = wcl->wcl_next; 1279 bzero(wcl, sizeof(*wcl)); 1280 return (wcl); 1281 } 1282 1283 static void 1284 witness_child_free(struct witness_child_list_entry *wcl) 1285 { 1286 1287 wcl->wcl_next = w_child_free; 1288 w_child_free = wcl; 1289 } 1290 1291 static struct lock_list_entry * 1292 witness_lock_list_get(void) 1293 { 1294 struct lock_list_entry *lle; 1295 1296 if (witness_dead) 1297 return (NULL); 1298 mtx_lock_spin(&w_mtx); 1299 lle = w_lock_list_free; 1300 if (lle == NULL) { 1301 witness_dead = 1; 1302 mtx_unlock_spin(&w_mtx); 1303 printf("%s: witness exhausted\n", __func__); 1304 return (NULL); 1305 } 1306 w_lock_list_free = lle->ll_next; 1307 mtx_unlock_spin(&w_mtx); 1308 bzero(lle, sizeof(*lle)); 1309 return (lle); 1310 } 1311 1312 static void 1313 witness_lock_list_free(struct lock_list_entry *lle) 1314 { 1315 1316 mtx_lock_spin(&w_mtx); 1317 lle->ll_next = w_lock_list_free; 1318 w_lock_list_free = lle; 1319 mtx_unlock_spin(&w_mtx); 1320 } 1321 1322 static struct lock_instance * 1323 find_instance(struct lock_list_entry *lock_list, struct lock_object *lock) 1324 { 1325 struct lock_list_entry *lle; 1326 struct lock_instance *instance; 1327 int i; 1328 1329 for (lle = lock_list; lle != NULL; lle = lle->ll_next) 1330 for (i = lle->ll_count - 1; i >= 0; i--) { 1331 instance = &lle->ll_children[i]; 1332 if (instance->li_lock == lock) 1333 return (instance); 1334 } 1335 return (NULL); 1336 } 1337 1338 int 1339 witness_list_locks(struct lock_list_entry **lock_list) 1340 { 1341 struct lock_list_entry *lle; 1342 struct lock_instance *instance; 1343 struct lock_object *lock; 1344 int i, nheld; 1345 1346 nheld = 0; 1347 for (lle = *lock_list; lle != NULL; lle = lle->ll_next) 1348 for (i = lle->ll_count - 1; i >= 0; i--) { 1349 instance = &lle->ll_children[i]; 1350 lock = instance->li_lock; 1351 printf("%s %s %s", 1352 (instance->li_flags & LI_EXCLUSIVE) != 0 ? 1353 "exclusive" : "shared", 1354 lock->lo_class->lc_name, lock->lo_name); 1355 if (lock->lo_type != lock->lo_name) 1356 printf(" (%s)", lock->lo_type); 1357 printf(" r = %d (%p) locked @ %s:%d\n", 1358 instance->li_flags & LI_RECURSEMASK, lock, 1359 instance->li_file, instance->li_line); 1360 nheld++; 1361 } 1362 return (nheld); 1363 } 1364 1365 /* 1366 * Calling this on td != curthread is bad unless we are in ddb. 1367 */ 1368 int 1369 witness_list(struct thread *td) 1370 { 1371 int nheld; 1372 1373 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1374 #ifdef DDB 1375 KASSERT(td == curthread || db_active, 1376 ("%s: td != curthread and we aren't in the debugger", __func__)); 1377 if (!db_active && witness_dead) 1378 return (0); 1379 #else 1380 KASSERT(td == curthread, ("%s: p != curthread", __func__)); 1381 if (witness_dead) 1382 return (0); 1383 #endif 1384 nheld = witness_list_locks(&td->td_sleeplocks); 1385 1386 /* 1387 * We only handle spinlocks if td == curthread. This is somewhat broken 1388 * if td is currently executing on some other CPU and holds spin locks 1389 * as we won't display those locks. If we had a MI way of getting 1390 * the per-cpu data for a given cpu then we could use 1391 * td->td_kse->ke_oncpu to get the list of spinlocks for this thread 1392 * and "fix" this. 1393 * 1394 * That still wouldn't really fix this unless we locked sched_lock 1395 * or stopped the other CPU to make sure it wasn't changing the list 1396 * out from under us. It is probably best to just not try to handle 1397 * threads on other CPU's for now. 1398 */ 1399 if (td == curthread && PCPU_GET(spinlocks) != NULL) 1400 nheld += witness_list_locks(PCPU_PTR(spinlocks)); 1401 1402 return (nheld); 1403 } 1404 1405 void 1406 witness_save(struct lock_object *lock, const char **filep, int *linep) 1407 { 1408 struct lock_instance *instance; 1409 1410 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1411 if (lock->lo_witness == NULL || witness_dead || panicstr != NULL) 1412 return; 1413 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1414 panic("%s: lock (%s) %s is not a sleep lock", __func__, 1415 lock->lo_class->lc_name, lock->lo_name); 1416 instance = find_instance(curthread->td_sleeplocks, lock); 1417 if (instance == NULL) 1418 panic("%s: lock (%s) %s not locked", __func__, 1419 lock->lo_class->lc_name, lock->lo_name); 1420 *filep = instance->li_file; 1421 *linep = instance->li_line; 1422 } 1423 1424 void 1425 witness_restore(struct lock_object *lock, const char *file, int line) 1426 { 1427 struct lock_instance *instance; 1428 1429 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1430 if (lock->lo_witness == NULL || witness_dead || panicstr != NULL) 1431 return; 1432 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1433 panic("%s: lock (%s) %s is not a sleep lock", __func__, 1434 lock->lo_class->lc_name, lock->lo_name); 1435 instance = find_instance(curthread->td_sleeplocks, lock); 1436 if (instance == NULL) 1437 panic("%s: lock (%s) %s not locked", __func__, 1438 lock->lo_class->lc_name, lock->lo_name); 1439 lock->lo_witness->w_file = file; 1440 lock->lo_witness->w_line = line; 1441 instance->li_file = file; 1442 instance->li_line = line; 1443 } 1444 1445 void 1446 witness_assert(struct lock_object *lock, int flags, const char *file, int line) 1447 { 1448 #ifdef INVARIANT_SUPPORT 1449 struct lock_instance *instance; 1450 1451 if (lock->lo_witness == NULL || witness_dead || panicstr != NULL) 1452 return; 1453 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) != 0) 1454 instance = find_instance(curthread->td_sleeplocks, lock); 1455 else if ((lock->lo_class->lc_flags & LC_SPINLOCK) != 0) 1456 instance = find_instance(PCPU_GET(spinlocks), lock); 1457 else { 1458 panic("Lock (%s) %s is not sleep or spin!", 1459 lock->lo_class->lc_name, lock->lo_name); 1460 return; 1461 } 1462 switch (flags) { 1463 case LA_UNLOCKED: 1464 if (instance != NULL) 1465 panic("Lock (%s) %s locked @ %s:%d.", 1466 lock->lo_class->lc_name, lock->lo_name, file, line); 1467 break; 1468 case LA_LOCKED: 1469 case LA_LOCKED | LA_RECURSED: 1470 case LA_LOCKED | LA_NOTRECURSED: 1471 case LA_SLOCKED: 1472 case LA_SLOCKED | LA_RECURSED: 1473 case LA_SLOCKED | LA_NOTRECURSED: 1474 case LA_XLOCKED: 1475 case LA_XLOCKED | LA_RECURSED: 1476 case LA_XLOCKED | LA_NOTRECURSED: 1477 if (instance == NULL) { 1478 panic("Lock (%s) %s not locked @ %s:%d.", 1479 lock->lo_class->lc_name, lock->lo_name, file, line); 1480 break; 1481 } 1482 if ((flags & LA_XLOCKED) != 0 && 1483 (instance->li_flags & LI_EXCLUSIVE) == 0) 1484 panic("Lock (%s) %s not exclusively locked @ %s:%d.", 1485 lock->lo_class->lc_name, lock->lo_name, file, line); 1486 if ((flags & LA_SLOCKED) != 0 && 1487 (instance->li_flags & LI_EXCLUSIVE) != 0) 1488 panic("Lock (%s) %s exclusively locked @ %s:%d.", 1489 lock->lo_class->lc_name, lock->lo_name, file, line); 1490 if ((flags & LA_RECURSED) != 0 && 1491 (instance->li_flags & LI_RECURSEMASK) == 0) 1492 panic("Lock (%s) %s not recursed @ %s:%d.", 1493 lock->lo_class->lc_name, lock->lo_name, file, line); 1494 if ((flags & LA_NOTRECURSED) != 0 && 1495 (instance->li_flags & LI_RECURSEMASK) != 0) 1496 panic("Lock (%s) %s recursed @ %s:%d.", 1497 lock->lo_class->lc_name, lock->lo_name, file, line); 1498 break; 1499 default: 1500 panic("Invalid lock assertion at %s:%d.", file, line); 1501 1502 } 1503 #endif /* INVARIANT_SUPPORT */ 1504 } 1505 1506 #ifdef DDB 1507 1508 DB_SHOW_COMMAND(locks, db_witness_list) 1509 { 1510 struct thread *td; 1511 pid_t pid; 1512 struct proc *p; 1513 1514 if (have_addr) { 1515 pid = (addr % 16) + ((addr >> 4) % 16) * 10 + 1516 ((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 + 1517 ((addr >> 16) % 16) * 10000; 1518 /* sx_slock(&allproc_lock); */ 1519 FOREACH_PROC_IN_SYSTEM(p) { 1520 if (p->p_pid == pid) 1521 break; 1522 } 1523 /* sx_sunlock(&allproc_lock); */ 1524 if (p == NULL) { 1525 db_printf("pid %d not found\n", pid); 1526 return; 1527 } 1528 FOREACH_THREAD_IN_PROC(p, td) { 1529 witness_list(td); 1530 } 1531 } else { 1532 td = curthread; 1533 witness_list(td); 1534 } 1535 } 1536 1537 DB_SHOW_COMMAND(witness, db_witness_display) 1538 { 1539 1540 witness_display(db_printf); 1541 } 1542 #endif 1543