1 /*- 2 * Copyright (c) 2008 Isilon Systems, Inc. 3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com> 4 * Copyright (c) 1998 Berkeley Software Design, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Berkeley Software Design Inc's name may not be used to endorse or 16 * promote products derived from this software without specific prior 17 * written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 32 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 33 */ 34 35 /* 36 * Implementation of the `witness' lock verifier. Originally implemented for 37 * mutexes in BSD/OS. Extended to handle generic lock objects and lock 38 * classes in FreeBSD. 39 */ 40 41 /* 42 * Main Entry: witness 43 * Pronunciation: 'wit-n&s 44 * Function: noun 45 * Etymology: Middle English witnesse, from Old English witnes knowledge, 46 * testimony, witness, from 2wit 47 * Date: before 12th century 48 * 1 : attestation of a fact or event : TESTIMONY 49 * 2 : one that gives evidence; specifically : one who testifies in 50 * a cause or before a judicial tribunal 51 * 3 : one asked to be present at a transaction so as to be able to 52 * testify to its having taken place 53 * 4 : one who has personal knowledge of something 54 * 5 a : something serving as evidence or proof : SIGN 55 * b : public affirmation by word or example of usually 56 * religious faith or conviction <the heroic witness to divine 57 * life -- Pilot> 58 * 6 capitalized : a member of the Jehovah's Witnesses 59 */ 60 61 /* 62 * Special rules concerning Giant and lock orders: 63 * 64 * 1) Giant must be acquired before any other mutexes. Stated another way, 65 * no other mutex may be held when Giant is acquired. 66 * 67 * 2) Giant must be released when blocking on a sleepable lock. 68 * 69 * This rule is less obvious, but is a result of Giant providing the same 70 * semantics as spl(). Basically, when a thread sleeps, it must release 71 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule 72 * 2). 73 * 74 * 3) Giant may be acquired before or after sleepable locks. 75 * 76 * This rule is also not quite as obvious. Giant may be acquired after 77 * a sleepable lock because it is a non-sleepable lock and non-sleepable 78 * locks may always be acquired while holding a sleepable lock. The second 79 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose 80 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1 81 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and 82 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to 83 * execute. Thus, acquiring Giant both before and after a sleepable lock 84 * will not result in a lock order reversal. 85 */ 86 87 #include <sys/cdefs.h> 88 __FBSDID("$FreeBSD$"); 89 90 #include "opt_ddb.h" 91 #include "opt_hwpmc_hooks.h" 92 #include "opt_stack.h" 93 #include "opt_witness.h" 94 95 #include <sys/param.h> 96 #include <sys/bus.h> 97 #include <sys/kdb.h> 98 #include <sys/kernel.h> 99 #include <sys/ktr.h> 100 #include <sys/lock.h> 101 #include <sys/malloc.h> 102 #include <sys/mutex.h> 103 #include <sys/priv.h> 104 #include <sys/proc.h> 105 #include <sys/sbuf.h> 106 #include <sys/sched.h> 107 #include <sys/stack.h> 108 #include <sys/sysctl.h> 109 #include <sys/systm.h> 110 111 #ifdef DDB 112 #include <ddb/ddb.h> 113 #endif 114 115 #include <machine/stdarg.h> 116 117 #if !defined(DDB) && !defined(STACK) 118 #error "DDB or STACK options are required for WITNESS" 119 #endif 120 121 /* Note that these traces do not work with KTR_ALQ. */ 122 #if 0 123 #define KTR_WITNESS KTR_SUBSYS 124 #else 125 #define KTR_WITNESS 0 126 #endif 127 128 #define LI_RECURSEMASK 0x0000ffff /* Recursion depth of lock instance. */ 129 #define LI_EXCLUSIVE 0x00010000 /* Exclusive lock instance. */ 130 #define LI_NORELEASE 0x00020000 /* Lock not allowed to be released. */ 131 132 /* Define this to check for blessed mutexes */ 133 #undef BLESSING 134 135 #define WITNESS_COUNT 1024 136 #define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4) 137 #define WITNESS_HASH_SIZE 251 /* Prime, gives load factor < 2 */ 138 #define WITNESS_PENDLIST 768 139 140 /* Allocate 256 KB of stack data space */ 141 #define WITNESS_LO_DATA_COUNT 2048 142 143 /* Prime, gives load factor of ~2 at full load */ 144 #define WITNESS_LO_HASH_SIZE 1021 145 146 /* 147 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads 148 * will hold LOCK_NCHILDREN locks. We handle failure ok, and we should 149 * probably be safe for the most part, but it's still a SWAG. 150 */ 151 #define LOCK_NCHILDREN 5 152 #define LOCK_CHILDCOUNT 2048 153 154 #define MAX_W_NAME 64 155 156 #define BADSTACK_SBUF_SIZE (256 * WITNESS_COUNT) 157 #define FULLGRAPH_SBUF_SIZE 512 158 159 /* 160 * These flags go in the witness relationship matrix and describe the 161 * relationship between any two struct witness objects. 162 */ 163 #define WITNESS_UNRELATED 0x00 /* No lock order relation. */ 164 #define WITNESS_PARENT 0x01 /* Parent, aka direct ancestor. */ 165 #define WITNESS_ANCESTOR 0x02 /* Direct or indirect ancestor. */ 166 #define WITNESS_CHILD 0x04 /* Child, aka direct descendant. */ 167 #define WITNESS_DESCENDANT 0x08 /* Direct or indirect descendant. */ 168 #define WITNESS_ANCESTOR_MASK (WITNESS_PARENT | WITNESS_ANCESTOR) 169 #define WITNESS_DESCENDANT_MASK (WITNESS_CHILD | WITNESS_DESCENDANT) 170 #define WITNESS_RELATED_MASK \ 171 (WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK) 172 #define WITNESS_REVERSAL 0x10 /* A lock order reversal has been 173 * observed. */ 174 #define WITNESS_RESERVED1 0x20 /* Unused flag, reserved. */ 175 #define WITNESS_RESERVED2 0x40 /* Unused flag, reserved. */ 176 #define WITNESS_LOCK_ORDER_KNOWN 0x80 /* This lock order is known. */ 177 178 /* Descendant to ancestor flags */ 179 #define WITNESS_DTOA(x) (((x) & WITNESS_RELATED_MASK) >> 2) 180 181 /* Ancestor to descendant flags */ 182 #define WITNESS_ATOD(x) (((x) & WITNESS_RELATED_MASK) << 2) 183 184 #define WITNESS_INDEX_ASSERT(i) \ 185 MPASS((i) > 0 && (i) <= w_max_used_index && (i) < WITNESS_COUNT) 186 187 MALLOC_DEFINE(M_WITNESS, "Witness", "Witness"); 188 189 /* 190 * Lock instances. A lock instance is the data associated with a lock while 191 * it is held by witness. For example, a lock instance will hold the 192 * recursion count of a lock. Lock instances are held in lists. Spin locks 193 * are held in a per-cpu list while sleep locks are held in per-thread list. 194 */ 195 struct lock_instance { 196 struct lock_object *li_lock; 197 const char *li_file; 198 int li_line; 199 u_int li_flags; 200 }; 201 202 /* 203 * A simple list type used to build the list of locks held by a thread 204 * or CPU. We can't simply embed the list in struct lock_object since a 205 * lock may be held by more than one thread if it is a shared lock. Locks 206 * are added to the head of the list, so we fill up each list entry from 207 * "the back" logically. To ease some of the arithmetic, we actually fill 208 * in each list entry the normal way (children[0] then children[1], etc.) but 209 * when we traverse the list we read children[count-1] as the first entry 210 * down to children[0] as the final entry. 211 */ 212 struct lock_list_entry { 213 struct lock_list_entry *ll_next; 214 struct lock_instance ll_children[LOCK_NCHILDREN]; 215 u_int ll_count; 216 }; 217 218 /* 219 * The main witness structure. One of these per named lock type in the system 220 * (for example, "vnode interlock"). 221 */ 222 struct witness { 223 char w_name[MAX_W_NAME]; 224 uint32_t w_index; /* Index in the relationship matrix */ 225 struct lock_class *w_class; 226 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */ 227 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */ 228 struct witness *w_hash_next; /* Linked list in hash buckets. */ 229 const char *w_file; /* File where last acquired */ 230 uint32_t w_line; /* Line where last acquired */ 231 uint32_t w_refcount; 232 uint16_t w_num_ancestors; /* direct/indirect 233 * ancestor count */ 234 uint16_t w_num_descendants; /* direct/indirect 235 * descendant count */ 236 int16_t w_ddb_level; 237 unsigned w_displayed:1; 238 unsigned w_reversed:1; 239 }; 240 241 STAILQ_HEAD(witness_list, witness); 242 243 /* 244 * The witness hash table. Keys are witness names (const char *), elements are 245 * witness objects (struct witness *). 246 */ 247 struct witness_hash { 248 struct witness *wh_array[WITNESS_HASH_SIZE]; 249 uint32_t wh_size; 250 uint32_t wh_count; 251 }; 252 253 /* 254 * Key type for the lock order data hash table. 255 */ 256 struct witness_lock_order_key { 257 uint16_t from; 258 uint16_t to; 259 }; 260 261 struct witness_lock_order_data { 262 struct stack wlod_stack; 263 struct witness_lock_order_key wlod_key; 264 struct witness_lock_order_data *wlod_next; 265 }; 266 267 /* 268 * The witness lock order data hash table. Keys are witness index tuples 269 * (struct witness_lock_order_key), elements are lock order data objects 270 * (struct witness_lock_order_data). 271 */ 272 struct witness_lock_order_hash { 273 struct witness_lock_order_data *wloh_array[WITNESS_LO_HASH_SIZE]; 274 u_int wloh_size; 275 u_int wloh_count; 276 }; 277 278 #ifdef BLESSING 279 struct witness_blessed { 280 const char *b_lock1; 281 const char *b_lock2; 282 }; 283 #endif 284 285 struct witness_pendhelp { 286 const char *wh_type; 287 struct lock_object *wh_lock; 288 }; 289 290 struct witness_order_list_entry { 291 const char *w_name; 292 struct lock_class *w_class; 293 }; 294 295 /* 296 * Returns 0 if one of the locks is a spin lock and the other is not. 297 * Returns 1 otherwise. 298 */ 299 static __inline int 300 witness_lock_type_equal(struct witness *w1, struct witness *w2) 301 { 302 303 return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) == 304 (w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK))); 305 } 306 307 static __inline int 308 witness_lock_order_key_empty(const struct witness_lock_order_key *key) 309 { 310 311 return (key->from == 0 && key->to == 0); 312 } 313 314 static __inline int 315 witness_lock_order_key_equal(const struct witness_lock_order_key *a, 316 const struct witness_lock_order_key *b) 317 { 318 319 return (a->from == b->from && a->to == b->to); 320 } 321 322 static int _isitmyx(struct witness *w1, struct witness *w2, int rmask, 323 const char *fname); 324 #ifdef KDB 325 static void _witness_debugger(int cond, const char *msg); 326 #endif 327 static void adopt(struct witness *parent, struct witness *child); 328 #ifdef BLESSING 329 static int blessed(struct witness *, struct witness *); 330 #endif 331 static void depart(struct witness *w); 332 static struct witness *enroll(const char *description, 333 struct lock_class *lock_class); 334 static struct lock_instance *find_instance(struct lock_list_entry *list, 335 struct lock_object *lock); 336 static int isitmychild(struct witness *parent, struct witness *child); 337 static int isitmydescendant(struct witness *parent, struct witness *child); 338 static void itismychild(struct witness *parent, struct witness *child); 339 static int sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS); 340 static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS); 341 static int sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS); 342 static void witness_add_fullgraph(struct sbuf *sb, struct witness *parent); 343 #ifdef DDB 344 static void witness_ddb_compute_levels(void); 345 static void witness_ddb_display(int(*)(const char *fmt, ...)); 346 static void witness_ddb_display_descendants(int(*)(const char *fmt, ...), 347 struct witness *, int indent); 348 static void witness_ddb_display_list(int(*prnt)(const char *fmt, ...), 349 struct witness_list *list); 350 static void witness_ddb_level_descendants(struct witness *parent, int l); 351 static void witness_ddb_list(struct thread *td); 352 #endif 353 static void witness_free(struct witness *m); 354 static struct witness *witness_get(void); 355 static uint32_t witness_hash_djb2(const uint8_t *key, uint32_t size); 356 static struct witness *witness_hash_get(const char *key); 357 static void witness_hash_put(struct witness *w); 358 static void witness_init_hash_tables(void); 359 static void witness_increment_graph_generation(void); 360 static void witness_lock_list_free(struct lock_list_entry *lle); 361 static struct lock_list_entry *witness_lock_list_get(void); 362 static int witness_lock_order_add(struct witness *parent, 363 struct witness *child); 364 static int witness_lock_order_check(struct witness *parent, 365 struct witness *child); 366 static struct witness_lock_order_data *witness_lock_order_get( 367 struct witness *parent, 368 struct witness *child); 369 static void witness_list_lock(struct lock_instance *instance, 370 int (*prnt)(const char *fmt, ...)); 371 static void witness_setflag(struct lock_object *lock, int flag, int set); 372 373 #ifdef KDB 374 #define witness_debugger(c) _witness_debugger(c, __func__) 375 #else 376 #define witness_debugger(c) 377 #endif 378 379 SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL, "Witness Locking"); 380 381 /* 382 * If set to 0, lock order checking is disabled. If set to -1, 383 * witness is completely disabled. Otherwise witness performs full 384 * lock order checking for all locks. At runtime, lock order checking 385 * may be toggled. However, witness cannot be reenabled once it is 386 * completely disabled. 387 */ 388 static int witness_watch = 1; 389 TUNABLE_INT("debug.witness.watch", &witness_watch); 390 SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0, 391 sysctl_debug_witness_watch, "I", "witness is watching lock operations"); 392 393 #ifdef KDB 394 /* 395 * When KDB is enabled and witness_kdb is 1, it will cause the system 396 * to drop into kdebug() when: 397 * - a lock hierarchy violation occurs 398 * - locks are held when going to sleep. 399 */ 400 #ifdef WITNESS_KDB 401 int witness_kdb = 1; 402 #else 403 int witness_kdb = 0; 404 #endif 405 TUNABLE_INT("debug.witness.kdb", &witness_kdb); 406 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, ""); 407 408 /* 409 * When KDB is enabled and witness_trace is 1, it will cause the system 410 * to print a stack trace: 411 * - a lock hierarchy violation occurs 412 * - locks are held when going to sleep. 413 */ 414 int witness_trace = 1; 415 TUNABLE_INT("debug.witness.trace", &witness_trace); 416 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, ""); 417 #endif /* KDB */ 418 419 #ifdef WITNESS_SKIPSPIN 420 int witness_skipspin = 1; 421 #else 422 int witness_skipspin = 0; 423 #endif 424 TUNABLE_INT("debug.witness.skipspin", &witness_skipspin); 425 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin, 426 0, ""); 427 428 /* 429 * Call this to print out the relations between locks. 430 */ 431 SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD, 432 NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs"); 433 434 /* 435 * Call this to print out the witness faulty stacks. 436 */ 437 SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD, 438 NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks"); 439 440 static struct mtx w_mtx; 441 442 /* w_list */ 443 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free); 444 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all); 445 446 /* w_typelist */ 447 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin); 448 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep); 449 450 /* lock list */ 451 static struct lock_list_entry *w_lock_list_free = NULL; 452 static struct witness_pendhelp pending_locks[WITNESS_PENDLIST]; 453 static u_int pending_cnt; 454 455 static int w_free_cnt, w_spin_cnt, w_sleep_cnt; 456 SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, ""); 457 SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, ""); 458 SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0, 459 ""); 460 461 static struct witness *w_data; 462 static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1]; 463 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT]; 464 static struct witness_hash w_hash; /* The witness hash table. */ 465 466 /* The lock order data hash */ 467 static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT]; 468 static struct witness_lock_order_data *w_lofree = NULL; 469 static struct witness_lock_order_hash w_lohash; 470 static int w_max_used_index = 0; 471 static unsigned int w_generation = 0; 472 static const char w_notrunning[] = "Witness not running\n"; 473 static const char w_stillcold[] = "Witness is still cold\n"; 474 475 476 static struct witness_order_list_entry order_lists[] = { 477 /* 478 * sx locks 479 */ 480 { "proctree", &lock_class_sx }, 481 { "allproc", &lock_class_sx }, 482 { "allprison", &lock_class_sx }, 483 { NULL, NULL }, 484 /* 485 * Various mutexes 486 */ 487 { "Giant", &lock_class_mtx_sleep }, 488 { "pipe mutex", &lock_class_mtx_sleep }, 489 { "sigio lock", &lock_class_mtx_sleep }, 490 { "process group", &lock_class_mtx_sleep }, 491 { "process lock", &lock_class_mtx_sleep }, 492 { "session", &lock_class_mtx_sleep }, 493 { "uidinfo hash", &lock_class_rw }, 494 #ifdef HWPMC_HOOKS 495 { "pmc-sleep", &lock_class_mtx_sleep }, 496 #endif 497 { "time lock", &lock_class_mtx_sleep }, 498 { NULL, NULL }, 499 /* 500 * Sockets 501 */ 502 { "accept", &lock_class_mtx_sleep }, 503 { "so_snd", &lock_class_mtx_sleep }, 504 { "so_rcv", &lock_class_mtx_sleep }, 505 { "sellck", &lock_class_mtx_sleep }, 506 { NULL, NULL }, 507 /* 508 * Routing 509 */ 510 { "so_rcv", &lock_class_mtx_sleep }, 511 { "radix node head", &lock_class_rw }, 512 { "rtentry", &lock_class_mtx_sleep }, 513 { "ifaddr", &lock_class_mtx_sleep }, 514 { NULL, NULL }, 515 /* 516 * IPv4 multicast: 517 * protocol locks before interface locks, after UDP locks. 518 */ 519 { "udpinp", &lock_class_rw }, 520 { "in_multi_mtx", &lock_class_mtx_sleep }, 521 { "igmp_mtx", &lock_class_mtx_sleep }, 522 { "if_addr_mtx", &lock_class_mtx_sleep }, 523 { NULL, NULL }, 524 /* 525 * IPv6 multicast: 526 * protocol locks before interface locks, after UDP locks. 527 */ 528 { "udpinp", &lock_class_rw }, 529 { "in6_multi_mtx", &lock_class_mtx_sleep }, 530 { "mld_mtx", &lock_class_mtx_sleep }, 531 { "if_addr_mtx", &lock_class_mtx_sleep }, 532 { NULL, NULL }, 533 /* 534 * UNIX Domain Sockets 535 */ 536 { "unp_global_rwlock", &lock_class_rw }, 537 { "unp_list_lock", &lock_class_mtx_sleep }, 538 { "unp", &lock_class_mtx_sleep }, 539 { "so_snd", &lock_class_mtx_sleep }, 540 { NULL, NULL }, 541 /* 542 * UDP/IP 543 */ 544 { "udp", &lock_class_rw }, 545 { "udpinp", &lock_class_rw }, 546 { "so_snd", &lock_class_mtx_sleep }, 547 { NULL, NULL }, 548 /* 549 * TCP/IP 550 */ 551 { "tcp", &lock_class_rw }, 552 { "tcpinp", &lock_class_rw }, 553 { "so_snd", &lock_class_mtx_sleep }, 554 { NULL, NULL }, 555 /* 556 * netatalk 557 */ 558 { "ddp_list_mtx", &lock_class_mtx_sleep }, 559 { "ddp_mtx", &lock_class_mtx_sleep }, 560 { NULL, NULL }, 561 /* 562 * BPF 563 */ 564 { "bpf global lock", &lock_class_mtx_sleep }, 565 { "bpf interface lock", &lock_class_mtx_sleep }, 566 { "bpf cdev lock", &lock_class_mtx_sleep }, 567 { NULL, NULL }, 568 /* 569 * NFS server 570 */ 571 { "nfsd_mtx", &lock_class_mtx_sleep }, 572 { "so_snd", &lock_class_mtx_sleep }, 573 { NULL, NULL }, 574 575 /* 576 * IEEE 802.11 577 */ 578 { "802.11 com lock", &lock_class_mtx_sleep}, 579 { NULL, NULL }, 580 /* 581 * Network drivers 582 */ 583 { "network driver", &lock_class_mtx_sleep}, 584 { NULL, NULL }, 585 586 /* 587 * Netgraph 588 */ 589 { "ng_node", &lock_class_mtx_sleep }, 590 { "ng_worklist", &lock_class_mtx_sleep }, 591 { NULL, NULL }, 592 /* 593 * CDEV 594 */ 595 { "system map", &lock_class_mtx_sleep }, 596 { "vm page queue mutex", &lock_class_mtx_sleep }, 597 { "vnode interlock", &lock_class_mtx_sleep }, 598 { "cdev", &lock_class_mtx_sleep }, 599 { NULL, NULL }, 600 /* 601 * VM 602 * 603 */ 604 { "vm object", &lock_class_mtx_sleep }, 605 { "page lock", &lock_class_mtx_sleep }, 606 { "vm page queue mutex", &lock_class_mtx_sleep }, 607 { "pmap", &lock_class_mtx_sleep }, 608 { NULL, NULL }, 609 /* 610 * kqueue/VFS interaction 611 */ 612 { "kqueue", &lock_class_mtx_sleep }, 613 { "struct mount mtx", &lock_class_mtx_sleep }, 614 { "vnode interlock", &lock_class_mtx_sleep }, 615 { NULL, NULL }, 616 /* 617 * ZFS locking 618 */ 619 { "dn->dn_mtx", &lock_class_sx }, 620 { "dr->dt.di.dr_mtx", &lock_class_sx }, 621 { "db->db_mtx", &lock_class_sx }, 622 { NULL, NULL }, 623 /* 624 * spin locks 625 */ 626 #ifdef SMP 627 { "ap boot", &lock_class_mtx_spin }, 628 #endif 629 { "rm.mutex_mtx", &lock_class_mtx_spin }, 630 { "sio", &lock_class_mtx_spin }, 631 { "scrlock", &lock_class_mtx_spin }, 632 #ifdef __i386__ 633 { "cy", &lock_class_mtx_spin }, 634 #endif 635 #ifdef __sparc64__ 636 { "pcib_mtx", &lock_class_mtx_spin }, 637 { "rtc_mtx", &lock_class_mtx_spin }, 638 #endif 639 { "scc_hwmtx", &lock_class_mtx_spin }, 640 { "uart_hwmtx", &lock_class_mtx_spin }, 641 { "fast_taskqueue", &lock_class_mtx_spin }, 642 { "intr table", &lock_class_mtx_spin }, 643 #ifdef HWPMC_HOOKS 644 { "pmc-per-proc", &lock_class_mtx_spin }, 645 #endif 646 { "process slock", &lock_class_mtx_spin }, 647 { "sleepq chain", &lock_class_mtx_spin }, 648 { "umtx lock", &lock_class_mtx_spin }, 649 { "rm_spinlock", &lock_class_mtx_spin }, 650 { "turnstile chain", &lock_class_mtx_spin }, 651 { "turnstile lock", &lock_class_mtx_spin }, 652 { "sched lock", &lock_class_mtx_spin }, 653 { "td_contested", &lock_class_mtx_spin }, 654 { "callout", &lock_class_mtx_spin }, 655 { "entropy harvest mutex", &lock_class_mtx_spin }, 656 { "syscons video lock", &lock_class_mtx_spin }, 657 #ifdef SMP 658 { "smp rendezvous", &lock_class_mtx_spin }, 659 #endif 660 #ifdef __powerpc__ 661 { "tlb0", &lock_class_mtx_spin }, 662 #endif 663 /* 664 * leaf locks 665 */ 666 { "intrcnt", &lock_class_mtx_spin }, 667 { "icu", &lock_class_mtx_spin }, 668 #if defined(SMP) && defined(__sparc64__) 669 { "ipi", &lock_class_mtx_spin }, 670 #endif 671 #ifdef __i386__ 672 { "allpmaps", &lock_class_mtx_spin }, 673 { "descriptor tables", &lock_class_mtx_spin }, 674 #endif 675 { "clk", &lock_class_mtx_spin }, 676 { "cpuset", &lock_class_mtx_spin }, 677 { "mprof lock", &lock_class_mtx_spin }, 678 { "zombie lock", &lock_class_mtx_spin }, 679 { "ALD Queue", &lock_class_mtx_spin }, 680 #ifdef __ia64__ 681 { "MCA spin lock", &lock_class_mtx_spin }, 682 #endif 683 #if defined(__i386__) || defined(__amd64__) 684 { "pcicfg", &lock_class_mtx_spin }, 685 { "NDIS thread lock", &lock_class_mtx_spin }, 686 #endif 687 { "tw_osl_io_lock", &lock_class_mtx_spin }, 688 { "tw_osl_q_lock", &lock_class_mtx_spin }, 689 { "tw_cl_io_lock", &lock_class_mtx_spin }, 690 { "tw_cl_intr_lock", &lock_class_mtx_spin }, 691 { "tw_cl_gen_lock", &lock_class_mtx_spin }, 692 #ifdef HWPMC_HOOKS 693 { "pmc-leaf", &lock_class_mtx_spin }, 694 #endif 695 { "blocked lock", &lock_class_mtx_spin }, 696 { NULL, NULL }, 697 { NULL, NULL } 698 }; 699 700 #ifdef BLESSING 701 /* 702 * Pairs of locks which have been blessed 703 * Don't complain about order problems with blessed locks 704 */ 705 static struct witness_blessed blessed_list[] = { 706 }; 707 static int blessed_count = 708 sizeof(blessed_list) / sizeof(struct witness_blessed); 709 #endif 710 711 /* 712 * This global is set to 0 once it becomes safe to use the witness code. 713 */ 714 static int witness_cold = 1; 715 716 /* 717 * This global is set to 1 once the static lock orders have been enrolled 718 * so that a warning can be issued for any spin locks enrolled later. 719 */ 720 static int witness_spin_warn = 0; 721 722 /* 723 * The WITNESS-enabled diagnostic code. Note that the witness code does 724 * assume that the early boot is single-threaded at least until after this 725 * routine is completed. 726 */ 727 static void 728 witness_initialize(void *dummy __unused) 729 { 730 struct lock_object *lock; 731 struct witness_order_list_entry *order; 732 struct witness *w, *w1; 733 int i; 734 735 w_data = malloc(sizeof (struct witness) * WITNESS_COUNT, M_WITNESS, 736 M_NOWAIT | M_ZERO); 737 738 /* 739 * We have to release Giant before initializing its witness 740 * structure so that WITNESS doesn't get confused. 741 */ 742 mtx_unlock(&Giant); 743 mtx_assert(&Giant, MA_NOTOWNED); 744 745 CTR1(KTR_WITNESS, "%s: initializing witness", __func__); 746 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET | 747 MTX_NOWITNESS | MTX_NOPROFILE); 748 for (i = WITNESS_COUNT - 1; i >= 0; i--) { 749 w = &w_data[i]; 750 memset(w, 0, sizeof(*w)); 751 w_data[i].w_index = i; /* Witness index never changes. */ 752 witness_free(w); 753 } 754 KASSERT(STAILQ_FIRST(&w_free)->w_index == 0, 755 ("%s: Invalid list of free witness objects", __func__)); 756 757 /* Witness with index 0 is not used to aid in debugging. */ 758 STAILQ_REMOVE_HEAD(&w_free, w_list); 759 w_free_cnt--; 760 761 memset(w_rmatrix, 0, 762 (sizeof(**w_rmatrix) * (WITNESS_COUNT+1) * (WITNESS_COUNT+1))); 763 764 for (i = 0; i < LOCK_CHILDCOUNT; i++) 765 witness_lock_list_free(&w_locklistdata[i]); 766 witness_init_hash_tables(); 767 768 /* First add in all the specified order lists. */ 769 for (order = order_lists; order->w_name != NULL; order++) { 770 w = enroll(order->w_name, order->w_class); 771 if (w == NULL) 772 continue; 773 w->w_file = "order list"; 774 for (order++; order->w_name != NULL; order++) { 775 w1 = enroll(order->w_name, order->w_class); 776 if (w1 == NULL) 777 continue; 778 w1->w_file = "order list"; 779 itismychild(w, w1); 780 w = w1; 781 } 782 } 783 witness_spin_warn = 1; 784 785 /* Iterate through all locks and add them to witness. */ 786 for (i = 0; pending_locks[i].wh_lock != NULL; i++) { 787 lock = pending_locks[i].wh_lock; 788 KASSERT(lock->lo_flags & LO_WITNESS, 789 ("%s: lock %s is on pending list but not LO_WITNESS", 790 __func__, lock->lo_name)); 791 lock->lo_witness = enroll(pending_locks[i].wh_type, 792 LOCK_CLASS(lock)); 793 } 794 795 /* Mark the witness code as being ready for use. */ 796 witness_cold = 0; 797 798 mtx_lock(&Giant); 799 } 800 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, 801 NULL); 802 803 void 804 witness_init(struct lock_object *lock, const char *type) 805 { 806 struct lock_class *class; 807 808 /* Various sanity checks. */ 809 class = LOCK_CLASS(lock); 810 if ((lock->lo_flags & LO_RECURSABLE) != 0 && 811 (class->lc_flags & LC_RECURSABLE) == 0) 812 panic("%s: lock (%s) %s can not be recursable", __func__, 813 class->lc_name, lock->lo_name); 814 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 815 (class->lc_flags & LC_SLEEPABLE) == 0) 816 panic("%s: lock (%s) %s can not be sleepable", __func__, 817 class->lc_name, lock->lo_name); 818 if ((lock->lo_flags & LO_UPGRADABLE) != 0 && 819 (class->lc_flags & LC_UPGRADABLE) == 0) 820 panic("%s: lock (%s) %s can not be upgradable", __func__, 821 class->lc_name, lock->lo_name); 822 823 /* 824 * If we shouldn't watch this lock, then just clear lo_witness. 825 * Otherwise, if witness_cold is set, then it is too early to 826 * enroll this lock, so defer it to witness_initialize() by adding 827 * it to the pending_locks list. If it is not too early, then enroll 828 * the lock now. 829 */ 830 if (witness_watch < 1 || panicstr != NULL || 831 (lock->lo_flags & LO_WITNESS) == 0) 832 lock->lo_witness = NULL; 833 else if (witness_cold) { 834 pending_locks[pending_cnt].wh_lock = lock; 835 pending_locks[pending_cnt++].wh_type = type; 836 if (pending_cnt > WITNESS_PENDLIST) 837 panic("%s: pending locks list is too small, bump it\n", 838 __func__); 839 } else 840 lock->lo_witness = enroll(type, class); 841 } 842 843 void 844 witness_destroy(struct lock_object *lock) 845 { 846 struct lock_class *class; 847 struct witness *w; 848 849 class = LOCK_CLASS(lock); 850 851 if (witness_cold) 852 panic("lock (%s) %s destroyed while witness_cold", 853 class->lc_name, lock->lo_name); 854 855 /* XXX: need to verify that no one holds the lock */ 856 if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL) 857 return; 858 w = lock->lo_witness; 859 860 mtx_lock_spin(&w_mtx); 861 MPASS(w->w_refcount > 0); 862 w->w_refcount--; 863 864 if (w->w_refcount == 0) 865 depart(w); 866 mtx_unlock_spin(&w_mtx); 867 } 868 869 #ifdef DDB 870 static void 871 witness_ddb_compute_levels(void) 872 { 873 struct witness *w; 874 875 /* 876 * First clear all levels. 877 */ 878 STAILQ_FOREACH(w, &w_all, w_list) 879 w->w_ddb_level = -1; 880 881 /* 882 * Look for locks with no parents and level all their descendants. 883 */ 884 STAILQ_FOREACH(w, &w_all, w_list) { 885 886 /* If the witness has ancestors (is not a root), skip it. */ 887 if (w->w_num_ancestors > 0) 888 continue; 889 witness_ddb_level_descendants(w, 0); 890 } 891 } 892 893 static void 894 witness_ddb_level_descendants(struct witness *w, int l) 895 { 896 int i; 897 898 if (w->w_ddb_level >= l) 899 return; 900 901 w->w_ddb_level = l; 902 l++; 903 904 for (i = 1; i <= w_max_used_index; i++) { 905 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) 906 witness_ddb_level_descendants(&w_data[i], l); 907 } 908 } 909 910 static void 911 witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...), 912 struct witness *w, int indent) 913 { 914 int i; 915 916 for (i = 0; i < indent; i++) 917 prnt(" "); 918 prnt("%s (type: %s, depth: %d, active refs: %d)", 919 w->w_name, w->w_class->lc_name, 920 w->w_ddb_level, w->w_refcount); 921 if (w->w_displayed) { 922 prnt(" -- (already displayed)\n"); 923 return; 924 } 925 w->w_displayed = 1; 926 if (w->w_file != NULL && w->w_line != 0) 927 prnt(" -- last acquired @ %s:%d\n", w->w_file, 928 w->w_line); 929 else 930 prnt(" -- never acquired\n"); 931 indent++; 932 WITNESS_INDEX_ASSERT(w->w_index); 933 for (i = 1; i <= w_max_used_index; i++) { 934 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) 935 witness_ddb_display_descendants(prnt, &w_data[i], 936 indent); 937 } 938 } 939 940 static void 941 witness_ddb_display_list(int(*prnt)(const char *fmt, ...), 942 struct witness_list *list) 943 { 944 struct witness *w; 945 946 STAILQ_FOREACH(w, list, w_typelist) { 947 if (w->w_file == NULL || w->w_ddb_level > 0) 948 continue; 949 950 /* This lock has no anscestors - display its descendants. */ 951 witness_ddb_display_descendants(prnt, w, 0); 952 } 953 } 954 955 static void 956 witness_ddb_display(int(*prnt)(const char *fmt, ...)) 957 { 958 struct witness *w; 959 960 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 961 witness_ddb_compute_levels(); 962 963 /* Clear all the displayed flags. */ 964 STAILQ_FOREACH(w, &w_all, w_list) 965 w->w_displayed = 0; 966 967 /* 968 * First, handle sleep locks which have been acquired at least 969 * once. 970 */ 971 prnt("Sleep locks:\n"); 972 witness_ddb_display_list(prnt, &w_sleep); 973 974 /* 975 * Now do spin locks which have been acquired at least once. 976 */ 977 prnt("\nSpin locks:\n"); 978 witness_ddb_display_list(prnt, &w_spin); 979 980 /* 981 * Finally, any locks which have not been acquired yet. 982 */ 983 prnt("\nLocks which were never acquired:\n"); 984 STAILQ_FOREACH(w, &w_all, w_list) { 985 if (w->w_file != NULL || w->w_refcount == 0) 986 continue; 987 prnt("%s (type: %s, depth: %d)\n", w->w_name, 988 w->w_class->lc_name, w->w_ddb_level); 989 } 990 } 991 #endif /* DDB */ 992 993 /* Trim useless garbage from filenames. */ 994 static const char * 995 fixup_filename(const char *file) 996 { 997 998 if (file == NULL) 999 return (NULL); 1000 while (strncmp(file, "../", 3) == 0) 1001 file += 3; 1002 return (file); 1003 } 1004 1005 int 1006 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2) 1007 { 1008 1009 if (witness_watch == -1 || panicstr != NULL) 1010 return (0); 1011 1012 /* Require locks that witness knows about. */ 1013 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL || 1014 lock2->lo_witness == NULL) 1015 return (EINVAL); 1016 1017 mtx_assert(&w_mtx, MA_NOTOWNED); 1018 mtx_lock_spin(&w_mtx); 1019 1020 /* 1021 * If we already have either an explicit or implied lock order that 1022 * is the other way around, then return an error. 1023 */ 1024 if (witness_watch && 1025 isitmydescendant(lock2->lo_witness, lock1->lo_witness)) { 1026 mtx_unlock_spin(&w_mtx); 1027 return (EDOOFUS); 1028 } 1029 1030 /* Try to add the new order. */ 1031 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1032 lock2->lo_witness->w_name, lock1->lo_witness->w_name); 1033 itismychild(lock1->lo_witness, lock2->lo_witness); 1034 mtx_unlock_spin(&w_mtx); 1035 return (0); 1036 } 1037 1038 void 1039 witness_checkorder(struct lock_object *lock, int flags, const char *file, 1040 int line, struct lock_object *interlock) 1041 { 1042 struct lock_list_entry *lock_list, *lle; 1043 struct lock_instance *lock1, *lock2, *plock; 1044 struct lock_class *class; 1045 struct witness *w, *w1; 1046 struct thread *td; 1047 int i, j; 1048 1049 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL || 1050 panicstr != NULL) 1051 return; 1052 1053 w = lock->lo_witness; 1054 class = LOCK_CLASS(lock); 1055 td = curthread; 1056 file = fixup_filename(file); 1057 1058 if (class->lc_flags & LC_SLEEPLOCK) { 1059 1060 /* 1061 * Since spin locks include a critical section, this check 1062 * implicitly enforces a lock order of all sleep locks before 1063 * all spin locks. 1064 */ 1065 if (td->td_critnest != 0 && !kdb_active) 1066 panic("blockable sleep lock (%s) %s @ %s:%d", 1067 class->lc_name, lock->lo_name, file, line); 1068 1069 /* 1070 * If this is the first lock acquired then just return as 1071 * no order checking is needed. 1072 */ 1073 lock_list = td->td_sleeplocks; 1074 if (lock_list == NULL || lock_list->ll_count == 0) 1075 return; 1076 } else { 1077 1078 /* 1079 * If this is the first lock, just return as no order 1080 * checking is needed. Avoid problems with thread 1081 * migration pinning the thread while checking if 1082 * spinlocks are held. If at least one spinlock is held 1083 * the thread is in a safe path and it is allowed to 1084 * unpin it. 1085 */ 1086 sched_pin(); 1087 lock_list = PCPU_GET(spinlocks); 1088 if (lock_list == NULL || lock_list->ll_count == 0) { 1089 sched_unpin(); 1090 return; 1091 } 1092 sched_unpin(); 1093 } 1094 1095 /* 1096 * Check to see if we are recursing on a lock we already own. If 1097 * so, make sure that we don't mismatch exclusive and shared lock 1098 * acquires. 1099 */ 1100 lock1 = find_instance(lock_list, lock); 1101 if (lock1 != NULL) { 1102 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 && 1103 (flags & LOP_EXCLUSIVE) == 0) { 1104 printf("shared lock of (%s) %s @ %s:%d\n", 1105 class->lc_name, lock->lo_name, file, line); 1106 printf("while exclusively locked from %s:%d\n", 1107 lock1->li_file, lock1->li_line); 1108 panic("share->excl"); 1109 } 1110 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 && 1111 (flags & LOP_EXCLUSIVE) != 0) { 1112 printf("exclusive lock of (%s) %s @ %s:%d\n", 1113 class->lc_name, lock->lo_name, file, line); 1114 printf("while share locked from %s:%d\n", 1115 lock1->li_file, lock1->li_line); 1116 panic("excl->share"); 1117 } 1118 return; 1119 } 1120 1121 /* 1122 * Find the previously acquired lock, but ignore interlocks. 1123 */ 1124 plock = &lock_list->ll_children[lock_list->ll_count - 1]; 1125 if (interlock != NULL && plock->li_lock == interlock) { 1126 if (lock_list->ll_count > 1) 1127 plock = 1128 &lock_list->ll_children[lock_list->ll_count - 2]; 1129 else { 1130 lle = lock_list->ll_next; 1131 1132 /* 1133 * The interlock is the only lock we hold, so 1134 * simply return. 1135 */ 1136 if (lle == NULL) 1137 return; 1138 plock = &lle->ll_children[lle->ll_count - 1]; 1139 } 1140 } 1141 1142 /* 1143 * Try to perform most checks without a lock. If this succeeds we 1144 * can skip acquiring the lock and return success. 1145 */ 1146 w1 = plock->li_lock->lo_witness; 1147 if (witness_lock_order_check(w1, w)) 1148 return; 1149 1150 /* 1151 * Check for duplicate locks of the same type. Note that we only 1152 * have to check for this on the last lock we just acquired. Any 1153 * other cases will be caught as lock order violations. 1154 */ 1155 mtx_lock_spin(&w_mtx); 1156 witness_lock_order_add(w1, w); 1157 if (w1 == w) { 1158 i = w->w_index; 1159 if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) && 1160 !(w_rmatrix[i][i] & WITNESS_REVERSAL)) { 1161 w_rmatrix[i][i] |= WITNESS_REVERSAL; 1162 w->w_reversed = 1; 1163 mtx_unlock_spin(&w_mtx); 1164 printf( 1165 "acquiring duplicate lock of same type: \"%s\"\n", 1166 w->w_name); 1167 printf(" 1st %s @ %s:%d\n", plock->li_lock->lo_name, 1168 plock->li_file, plock->li_line); 1169 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line); 1170 witness_debugger(1); 1171 } else 1172 mtx_unlock_spin(&w_mtx); 1173 return; 1174 } 1175 mtx_assert(&w_mtx, MA_OWNED); 1176 1177 /* 1178 * If we know that the lock we are acquiring comes after 1179 * the lock we most recently acquired in the lock order tree, 1180 * then there is no need for any further checks. 1181 */ 1182 if (isitmychild(w1, w)) 1183 goto out; 1184 1185 for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) { 1186 for (i = lle->ll_count - 1; i >= 0; i--, j++) { 1187 1188 MPASS(j < WITNESS_COUNT); 1189 lock1 = &lle->ll_children[i]; 1190 1191 /* 1192 * Ignore the interlock the first time we see it. 1193 */ 1194 if (interlock != NULL && interlock == lock1->li_lock) { 1195 interlock = NULL; 1196 continue; 1197 } 1198 1199 /* 1200 * If this lock doesn't undergo witness checking, 1201 * then skip it. 1202 */ 1203 w1 = lock1->li_lock->lo_witness; 1204 if (w1 == NULL) { 1205 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0, 1206 ("lock missing witness structure")); 1207 continue; 1208 } 1209 1210 /* 1211 * If we are locking Giant and this is a sleepable 1212 * lock, then skip it. 1213 */ 1214 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 && 1215 lock == &Giant.lock_object) 1216 continue; 1217 1218 /* 1219 * If we are locking a sleepable lock and this lock 1220 * is Giant, then skip it. 1221 */ 1222 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 1223 lock1->li_lock == &Giant.lock_object) 1224 continue; 1225 1226 /* 1227 * If we are locking a sleepable lock and this lock 1228 * isn't sleepable, we want to treat it as a lock 1229 * order violation to enfore a general lock order of 1230 * sleepable locks before non-sleepable locks. 1231 */ 1232 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 1233 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 1234 goto reversal; 1235 1236 /* 1237 * If we are locking Giant and this is a non-sleepable 1238 * lock, then treat it as a reversal. 1239 */ 1240 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 && 1241 lock == &Giant.lock_object) 1242 goto reversal; 1243 1244 /* 1245 * Check the lock order hierarchy for a reveresal. 1246 */ 1247 if (!isitmydescendant(w, w1)) 1248 continue; 1249 reversal: 1250 1251 /* 1252 * We have a lock order violation, check to see if it 1253 * is allowed or has already been yelled about. 1254 */ 1255 #ifdef BLESSING 1256 1257 /* 1258 * If the lock order is blessed, just bail. We don't 1259 * look for other lock order violations though, which 1260 * may be a bug. 1261 */ 1262 if (blessed(w, w1)) 1263 goto out; 1264 #endif 1265 1266 /* Bail if this violation is known */ 1267 if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL) 1268 goto out; 1269 1270 /* Record this as a violation */ 1271 w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL; 1272 w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL; 1273 w->w_reversed = w1->w_reversed = 1; 1274 witness_increment_graph_generation(); 1275 mtx_unlock_spin(&w_mtx); 1276 1277 /* 1278 * Ok, yell about it. 1279 */ 1280 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 1281 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 1282 printf( 1283 "lock order reversal: (sleepable after non-sleepable)\n"); 1284 else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 1285 && lock == &Giant.lock_object) 1286 printf( 1287 "lock order reversal: (Giant after non-sleepable)\n"); 1288 else 1289 printf("lock order reversal:\n"); 1290 1291 /* 1292 * Try to locate an earlier lock with 1293 * witness w in our list. 1294 */ 1295 do { 1296 lock2 = &lle->ll_children[i]; 1297 MPASS(lock2->li_lock != NULL); 1298 if (lock2->li_lock->lo_witness == w) 1299 break; 1300 if (i == 0 && lle->ll_next != NULL) { 1301 lle = lle->ll_next; 1302 i = lle->ll_count - 1; 1303 MPASS(i >= 0 && i < LOCK_NCHILDREN); 1304 } else 1305 i--; 1306 } while (i >= 0); 1307 if (i < 0) { 1308 printf(" 1st %p %s (%s) @ %s:%d\n", 1309 lock1->li_lock, lock1->li_lock->lo_name, 1310 w1->w_name, lock1->li_file, lock1->li_line); 1311 printf(" 2nd %p %s (%s) @ %s:%d\n", lock, 1312 lock->lo_name, w->w_name, file, line); 1313 } else { 1314 printf(" 1st %p %s (%s) @ %s:%d\n", 1315 lock2->li_lock, lock2->li_lock->lo_name, 1316 lock2->li_lock->lo_witness->w_name, 1317 lock2->li_file, lock2->li_line); 1318 printf(" 2nd %p %s (%s) @ %s:%d\n", 1319 lock1->li_lock, lock1->li_lock->lo_name, 1320 w1->w_name, lock1->li_file, lock1->li_line); 1321 printf(" 3rd %p %s (%s) @ %s:%d\n", lock, 1322 lock->lo_name, w->w_name, file, line); 1323 } 1324 witness_debugger(1); 1325 return; 1326 } 1327 } 1328 1329 /* 1330 * If requested, build a new lock order. However, don't build a new 1331 * relationship between a sleepable lock and Giant if it is in the 1332 * wrong direction. The correct lock order is that sleepable locks 1333 * always come before Giant. 1334 */ 1335 if (flags & LOP_NEWORDER && 1336 !(plock->li_lock == &Giant.lock_object && 1337 (lock->lo_flags & LO_SLEEPABLE) != 0)) { 1338 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1339 w->w_name, plock->li_lock->lo_witness->w_name); 1340 itismychild(plock->li_lock->lo_witness, w); 1341 } 1342 out: 1343 mtx_unlock_spin(&w_mtx); 1344 } 1345 1346 void 1347 witness_lock(struct lock_object *lock, int flags, const char *file, int line) 1348 { 1349 struct lock_list_entry **lock_list, *lle; 1350 struct lock_instance *instance; 1351 struct witness *w; 1352 struct thread *td; 1353 1354 if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL || 1355 panicstr != NULL) 1356 return; 1357 w = lock->lo_witness; 1358 td = curthread; 1359 file = fixup_filename(file); 1360 1361 /* Determine lock list for this lock. */ 1362 if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK) 1363 lock_list = &td->td_sleeplocks; 1364 else 1365 lock_list = PCPU_PTR(spinlocks); 1366 1367 /* Check to see if we are recursing on a lock we already own. */ 1368 instance = find_instance(*lock_list, lock); 1369 if (instance != NULL) { 1370 instance->li_flags++; 1371 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__, 1372 td->td_proc->p_pid, lock->lo_name, 1373 instance->li_flags & LI_RECURSEMASK); 1374 instance->li_file = file; 1375 instance->li_line = line; 1376 return; 1377 } 1378 1379 /* Update per-witness last file and line acquire. */ 1380 w->w_file = file; 1381 w->w_line = line; 1382 1383 /* Find the next open lock instance in the list and fill it. */ 1384 lle = *lock_list; 1385 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) { 1386 lle = witness_lock_list_get(); 1387 if (lle == NULL) 1388 return; 1389 lle->ll_next = *lock_list; 1390 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__, 1391 td->td_proc->p_pid, lle); 1392 *lock_list = lle; 1393 } 1394 instance = &lle->ll_children[lle->ll_count++]; 1395 instance->li_lock = lock; 1396 instance->li_line = line; 1397 instance->li_file = file; 1398 if ((flags & LOP_EXCLUSIVE) != 0) 1399 instance->li_flags = LI_EXCLUSIVE; 1400 else 1401 instance->li_flags = 0; 1402 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__, 1403 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1); 1404 } 1405 1406 void 1407 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line) 1408 { 1409 struct lock_instance *instance; 1410 struct lock_class *class; 1411 1412 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 1413 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 1414 return; 1415 class = LOCK_CLASS(lock); 1416 file = fixup_filename(file); 1417 if (witness_watch) { 1418 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1419 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d", 1420 class->lc_name, lock->lo_name, file, line); 1421 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1422 panic("upgrade of non-sleep lock (%s) %s @ %s:%d", 1423 class->lc_name, lock->lo_name, file, line); 1424 } 1425 instance = find_instance(curthread->td_sleeplocks, lock); 1426 if (instance == NULL) 1427 panic("upgrade of unlocked lock (%s) %s @ %s:%d", 1428 class->lc_name, lock->lo_name, file, line); 1429 if (witness_watch) { 1430 if ((instance->li_flags & LI_EXCLUSIVE) != 0) 1431 panic("upgrade of exclusive lock (%s) %s @ %s:%d", 1432 class->lc_name, lock->lo_name, file, line); 1433 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1434 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d", 1435 class->lc_name, lock->lo_name, 1436 instance->li_flags & LI_RECURSEMASK, file, line); 1437 } 1438 instance->li_flags |= LI_EXCLUSIVE; 1439 } 1440 1441 void 1442 witness_downgrade(struct lock_object *lock, int flags, const char *file, 1443 int line) 1444 { 1445 struct lock_instance *instance; 1446 struct lock_class *class; 1447 1448 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 1449 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 1450 return; 1451 class = LOCK_CLASS(lock); 1452 file = fixup_filename(file); 1453 if (witness_watch) { 1454 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1455 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d", 1456 class->lc_name, lock->lo_name, file, line); 1457 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1458 panic("downgrade of non-sleep lock (%s) %s @ %s:%d", 1459 class->lc_name, lock->lo_name, file, line); 1460 } 1461 instance = find_instance(curthread->td_sleeplocks, lock); 1462 if (instance == NULL) 1463 panic("downgrade of unlocked lock (%s) %s @ %s:%d", 1464 class->lc_name, lock->lo_name, file, line); 1465 if (witness_watch) { 1466 if ((instance->li_flags & LI_EXCLUSIVE) == 0) 1467 panic("downgrade of shared lock (%s) %s @ %s:%d", 1468 class->lc_name, lock->lo_name, file, line); 1469 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1470 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d", 1471 class->lc_name, lock->lo_name, 1472 instance->li_flags & LI_RECURSEMASK, file, line); 1473 } 1474 instance->li_flags &= ~LI_EXCLUSIVE; 1475 } 1476 1477 void 1478 witness_unlock(struct lock_object *lock, int flags, const char *file, int line) 1479 { 1480 struct lock_list_entry **lock_list, *lle; 1481 struct lock_instance *instance; 1482 struct lock_class *class; 1483 struct thread *td; 1484 register_t s; 1485 int i, j; 1486 1487 if (witness_cold || lock->lo_witness == NULL || panicstr != NULL) 1488 return; 1489 td = curthread; 1490 class = LOCK_CLASS(lock); 1491 file = fixup_filename(file); 1492 1493 /* Find lock instance associated with this lock. */ 1494 if (class->lc_flags & LC_SLEEPLOCK) 1495 lock_list = &td->td_sleeplocks; 1496 else 1497 lock_list = PCPU_PTR(spinlocks); 1498 lle = *lock_list; 1499 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next) 1500 for (i = 0; i < (*lock_list)->ll_count; i++) { 1501 instance = &(*lock_list)->ll_children[i]; 1502 if (instance->li_lock == lock) 1503 goto found; 1504 } 1505 1506 /* 1507 * When disabling WITNESS through witness_watch we could end up in 1508 * having registered locks in the td_sleeplocks queue. 1509 * We have to make sure we flush these queues, so just search for 1510 * eventual register locks and remove them. 1511 */ 1512 if (witness_watch > 0) 1513 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, 1514 lock->lo_name, file, line); 1515 else 1516 return; 1517 found: 1518 1519 /* First, check for shared/exclusive mismatches. */ 1520 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 && 1521 (flags & LOP_EXCLUSIVE) == 0) { 1522 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name, 1523 lock->lo_name, file, line); 1524 printf("while exclusively locked from %s:%d\n", 1525 instance->li_file, instance->li_line); 1526 panic("excl->ushare"); 1527 } 1528 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 && 1529 (flags & LOP_EXCLUSIVE) != 0) { 1530 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name, 1531 lock->lo_name, file, line); 1532 printf("while share locked from %s:%d\n", instance->li_file, 1533 instance->li_line); 1534 panic("share->uexcl"); 1535 } 1536 /* If we are recursed, unrecurse. */ 1537 if ((instance->li_flags & LI_RECURSEMASK) > 0) { 1538 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__, 1539 td->td_proc->p_pid, instance->li_lock->lo_name, 1540 instance->li_flags); 1541 instance->li_flags--; 1542 return; 1543 } 1544 /* The lock is now being dropped, check for NORELEASE flag */ 1545 if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) { 1546 printf("forbidden unlock of (%s) %s @ %s:%d\n", class->lc_name, 1547 lock->lo_name, file, line); 1548 panic("lock marked norelease"); 1549 } 1550 1551 /* Otherwise, remove this item from the list. */ 1552 s = intr_disable(); 1553 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__, 1554 td->td_proc->p_pid, instance->li_lock->lo_name, 1555 (*lock_list)->ll_count - 1); 1556 for (j = i; j < (*lock_list)->ll_count - 1; j++) 1557 (*lock_list)->ll_children[j] = 1558 (*lock_list)->ll_children[j + 1]; 1559 (*lock_list)->ll_count--; 1560 intr_restore(s); 1561 1562 /* 1563 * In order to reduce contention on w_mtx, we want to keep always an 1564 * head object into lists so that frequent allocation from the 1565 * free witness pool (and subsequent locking) is avoided. 1566 * In order to maintain the current code simple, when the head 1567 * object is totally unloaded it means also that we do not have 1568 * further objects in the list, so the list ownership needs to be 1569 * hand over to another object if the current head needs to be freed. 1570 */ 1571 if ((*lock_list)->ll_count == 0) { 1572 if (*lock_list == lle) { 1573 if (lle->ll_next == NULL) 1574 return; 1575 } else 1576 lle = *lock_list; 1577 *lock_list = lle->ll_next; 1578 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__, 1579 td->td_proc->p_pid, lle); 1580 witness_lock_list_free(lle); 1581 } 1582 } 1583 1584 void 1585 witness_thread_exit(struct thread *td) 1586 { 1587 struct lock_list_entry *lle; 1588 int i, n; 1589 1590 lle = td->td_sleeplocks; 1591 if (lle == NULL || panicstr != NULL) 1592 return; 1593 if (lle->ll_count != 0) { 1594 for (n = 0; lle != NULL; lle = lle->ll_next) 1595 for (i = lle->ll_count - 1; i >= 0; i--) { 1596 if (n == 0) 1597 printf("Thread %p exiting with the following locks held:\n", 1598 td); 1599 n++; 1600 witness_list_lock(&lle->ll_children[i], printf); 1601 1602 } 1603 panic("Thread %p cannot exit while holding sleeplocks\n", td); 1604 } 1605 witness_lock_list_free(lle); 1606 } 1607 1608 /* 1609 * Warn if any locks other than 'lock' are held. Flags can be passed in to 1610 * exempt Giant and sleepable locks from the checks as well. If any 1611 * non-exempt locks are held, then a supplied message is printed to the 1612 * console along with a list of the offending locks. If indicated in the 1613 * flags then a failure results in a panic as well. 1614 */ 1615 int 1616 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...) 1617 { 1618 struct lock_list_entry *lock_list, *lle; 1619 struct lock_instance *lock1; 1620 struct thread *td; 1621 va_list ap; 1622 int i, n; 1623 1624 if (witness_cold || witness_watch < 1 || panicstr != NULL) 1625 return (0); 1626 n = 0; 1627 td = curthread; 1628 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next) 1629 for (i = lle->ll_count - 1; i >= 0; i--) { 1630 lock1 = &lle->ll_children[i]; 1631 if (lock1->li_lock == lock) 1632 continue; 1633 if (flags & WARN_GIANTOK && 1634 lock1->li_lock == &Giant.lock_object) 1635 continue; 1636 if (flags & WARN_SLEEPOK && 1637 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0) 1638 continue; 1639 if (n == 0) { 1640 va_start(ap, fmt); 1641 vprintf(fmt, ap); 1642 va_end(ap); 1643 printf(" with the following"); 1644 if (flags & WARN_SLEEPOK) 1645 printf(" non-sleepable"); 1646 printf(" locks held:\n"); 1647 } 1648 n++; 1649 witness_list_lock(lock1, printf); 1650 } 1651 1652 /* 1653 * Pin the thread in order to avoid problems with thread migration. 1654 * Once that all verifies are passed about spinlocks ownership, 1655 * the thread is in a safe path and it can be unpinned. 1656 */ 1657 sched_pin(); 1658 lock_list = PCPU_GET(spinlocks); 1659 if (lock_list != NULL && lock_list->ll_count != 0) { 1660 sched_unpin(); 1661 1662 /* 1663 * We should only have one spinlock and as long as 1664 * the flags cannot match for this locks class, 1665 * check if the first spinlock is the one curthread 1666 * should hold. 1667 */ 1668 lock1 = &lock_list->ll_children[lock_list->ll_count - 1]; 1669 if (lock_list->ll_count == 1 && lock_list->ll_next == NULL && 1670 lock1->li_lock == lock && n == 0) 1671 return (0); 1672 1673 va_start(ap, fmt); 1674 vprintf(fmt, ap); 1675 va_end(ap); 1676 printf(" with the following"); 1677 if (flags & WARN_SLEEPOK) 1678 printf(" non-sleepable"); 1679 printf(" locks held:\n"); 1680 n += witness_list_locks(&lock_list, printf); 1681 } else 1682 sched_unpin(); 1683 if (flags & WARN_PANIC && n) 1684 panic("%s", __func__); 1685 else 1686 witness_debugger(n); 1687 return (n); 1688 } 1689 1690 const char * 1691 witness_file(struct lock_object *lock) 1692 { 1693 struct witness *w; 1694 1695 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL) 1696 return ("?"); 1697 w = lock->lo_witness; 1698 return (w->w_file); 1699 } 1700 1701 int 1702 witness_line(struct lock_object *lock) 1703 { 1704 struct witness *w; 1705 1706 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL) 1707 return (0); 1708 w = lock->lo_witness; 1709 return (w->w_line); 1710 } 1711 1712 static struct witness * 1713 enroll(const char *description, struct lock_class *lock_class) 1714 { 1715 struct witness *w; 1716 struct witness_list *typelist; 1717 1718 MPASS(description != NULL); 1719 1720 if (witness_watch == -1 || panicstr != NULL) 1721 return (NULL); 1722 if ((lock_class->lc_flags & LC_SPINLOCK)) { 1723 if (witness_skipspin) 1724 return (NULL); 1725 else 1726 typelist = &w_spin; 1727 } else if ((lock_class->lc_flags & LC_SLEEPLOCK)) 1728 typelist = &w_sleep; 1729 else 1730 panic("lock class %s is not sleep or spin", 1731 lock_class->lc_name); 1732 1733 mtx_lock_spin(&w_mtx); 1734 w = witness_hash_get(description); 1735 if (w) 1736 goto found; 1737 if ((w = witness_get()) == NULL) 1738 return (NULL); 1739 MPASS(strlen(description) < MAX_W_NAME); 1740 strcpy(w->w_name, description); 1741 w->w_class = lock_class; 1742 w->w_refcount = 1; 1743 STAILQ_INSERT_HEAD(&w_all, w, w_list); 1744 if (lock_class->lc_flags & LC_SPINLOCK) { 1745 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist); 1746 w_spin_cnt++; 1747 } else if (lock_class->lc_flags & LC_SLEEPLOCK) { 1748 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist); 1749 w_sleep_cnt++; 1750 } 1751 1752 /* Insert new witness into the hash */ 1753 witness_hash_put(w); 1754 witness_increment_graph_generation(); 1755 mtx_unlock_spin(&w_mtx); 1756 return (w); 1757 found: 1758 w->w_refcount++; 1759 mtx_unlock_spin(&w_mtx); 1760 if (lock_class != w->w_class) 1761 panic( 1762 "lock (%s) %s does not match earlier (%s) lock", 1763 description, lock_class->lc_name, 1764 w->w_class->lc_name); 1765 return (w); 1766 } 1767 1768 static void 1769 depart(struct witness *w) 1770 { 1771 struct witness_list *list; 1772 1773 MPASS(w->w_refcount == 0); 1774 if (w->w_class->lc_flags & LC_SLEEPLOCK) { 1775 list = &w_sleep; 1776 w_sleep_cnt--; 1777 } else { 1778 list = &w_spin; 1779 w_spin_cnt--; 1780 } 1781 /* 1782 * Set file to NULL as it may point into a loadable module. 1783 */ 1784 w->w_file = NULL; 1785 w->w_line = 0; 1786 witness_increment_graph_generation(); 1787 } 1788 1789 1790 static void 1791 adopt(struct witness *parent, struct witness *child) 1792 { 1793 int pi, ci, i, j; 1794 1795 if (witness_cold == 0) 1796 mtx_assert(&w_mtx, MA_OWNED); 1797 1798 /* If the relationship is already known, there's no work to be done. */ 1799 if (isitmychild(parent, child)) 1800 return; 1801 1802 /* When the structure of the graph changes, bump up the generation. */ 1803 witness_increment_graph_generation(); 1804 1805 /* 1806 * The hard part ... create the direct relationship, then propagate all 1807 * indirect relationships. 1808 */ 1809 pi = parent->w_index; 1810 ci = child->w_index; 1811 WITNESS_INDEX_ASSERT(pi); 1812 WITNESS_INDEX_ASSERT(ci); 1813 MPASS(pi != ci); 1814 w_rmatrix[pi][ci] |= WITNESS_PARENT; 1815 w_rmatrix[ci][pi] |= WITNESS_CHILD; 1816 1817 /* 1818 * If parent was not already an ancestor of child, 1819 * then we increment the descendant and ancestor counters. 1820 */ 1821 if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) { 1822 parent->w_num_descendants++; 1823 child->w_num_ancestors++; 1824 } 1825 1826 /* 1827 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as 1828 * an ancestor of 'pi' during this loop. 1829 */ 1830 for (i = 1; i <= w_max_used_index; i++) { 1831 if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 && 1832 (i != pi)) 1833 continue; 1834 1835 /* Find each descendant of 'i' and mark it as a descendant. */ 1836 for (j = 1; j <= w_max_used_index; j++) { 1837 1838 /* 1839 * Skip children that are already marked as 1840 * descendants of 'i'. 1841 */ 1842 if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) 1843 continue; 1844 1845 /* 1846 * We are only interested in descendants of 'ci'. Note 1847 * that 'ci' itself is counted as a descendant of 'ci'. 1848 */ 1849 if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 && 1850 (j != ci)) 1851 continue; 1852 w_rmatrix[i][j] |= WITNESS_ANCESTOR; 1853 w_rmatrix[j][i] |= WITNESS_DESCENDANT; 1854 w_data[i].w_num_descendants++; 1855 w_data[j].w_num_ancestors++; 1856 1857 /* 1858 * Make sure we aren't marking a node as both an 1859 * ancestor and descendant. We should have caught 1860 * this as a lock order reversal earlier. 1861 */ 1862 if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) && 1863 (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) { 1864 printf("witness rmatrix paradox! [%d][%d]=%d " 1865 "both ancestor and descendant\n", 1866 i, j, w_rmatrix[i][j]); 1867 kdb_backtrace(); 1868 printf("Witness disabled.\n"); 1869 witness_watch = -1; 1870 } 1871 if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) && 1872 (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) { 1873 printf("witness rmatrix paradox! [%d][%d]=%d " 1874 "both ancestor and descendant\n", 1875 j, i, w_rmatrix[j][i]); 1876 kdb_backtrace(); 1877 printf("Witness disabled.\n"); 1878 witness_watch = -1; 1879 } 1880 } 1881 } 1882 } 1883 1884 static void 1885 itismychild(struct witness *parent, struct witness *child) 1886 { 1887 1888 MPASS(child != NULL && parent != NULL); 1889 if (witness_cold == 0) 1890 mtx_assert(&w_mtx, MA_OWNED); 1891 1892 if (!witness_lock_type_equal(parent, child)) { 1893 if (witness_cold == 0) 1894 mtx_unlock_spin(&w_mtx); 1895 panic("%s: parent \"%s\" (%s) and child \"%s\" (%s) are not " 1896 "the same lock type", __func__, parent->w_name, 1897 parent->w_class->lc_name, child->w_name, 1898 child->w_class->lc_name); 1899 } 1900 adopt(parent, child); 1901 } 1902 1903 /* 1904 * Generic code for the isitmy*() functions. The rmask parameter is the 1905 * expected relationship of w1 to w2. 1906 */ 1907 static int 1908 _isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname) 1909 { 1910 unsigned char r1, r2; 1911 int i1, i2; 1912 1913 i1 = w1->w_index; 1914 i2 = w2->w_index; 1915 WITNESS_INDEX_ASSERT(i1); 1916 WITNESS_INDEX_ASSERT(i2); 1917 r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK; 1918 r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK; 1919 1920 /* The flags on one better be the inverse of the flags on the other */ 1921 if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) || 1922 (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) { 1923 printf("%s: rmatrix mismatch between %s (index %d) and %s " 1924 "(index %d): w_rmatrix[%d][%d] == %hhx but " 1925 "w_rmatrix[%d][%d] == %hhx\n", 1926 fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1, 1927 i2, i1, r2); 1928 kdb_backtrace(); 1929 printf("Witness disabled.\n"); 1930 witness_watch = -1; 1931 } 1932 return (r1 & rmask); 1933 } 1934 1935 /* 1936 * Checks if @child is a direct child of @parent. 1937 */ 1938 static int 1939 isitmychild(struct witness *parent, struct witness *child) 1940 { 1941 1942 return (_isitmyx(parent, child, WITNESS_PARENT, __func__)); 1943 } 1944 1945 /* 1946 * Checks if @descendant is a direct or inderect descendant of @ancestor. 1947 */ 1948 static int 1949 isitmydescendant(struct witness *ancestor, struct witness *descendant) 1950 { 1951 1952 return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK, 1953 __func__)); 1954 } 1955 1956 #ifdef BLESSING 1957 static int 1958 blessed(struct witness *w1, struct witness *w2) 1959 { 1960 int i; 1961 struct witness_blessed *b; 1962 1963 for (i = 0; i < blessed_count; i++) { 1964 b = &blessed_list[i]; 1965 if (strcmp(w1->w_name, b->b_lock1) == 0) { 1966 if (strcmp(w2->w_name, b->b_lock2) == 0) 1967 return (1); 1968 continue; 1969 } 1970 if (strcmp(w1->w_name, b->b_lock2) == 0) 1971 if (strcmp(w2->w_name, b->b_lock1) == 0) 1972 return (1); 1973 } 1974 return (0); 1975 } 1976 #endif 1977 1978 static struct witness * 1979 witness_get(void) 1980 { 1981 struct witness *w; 1982 int index; 1983 1984 if (witness_cold == 0) 1985 mtx_assert(&w_mtx, MA_OWNED); 1986 1987 if (witness_watch == -1) { 1988 mtx_unlock_spin(&w_mtx); 1989 return (NULL); 1990 } 1991 if (STAILQ_EMPTY(&w_free)) { 1992 witness_watch = -1; 1993 mtx_unlock_spin(&w_mtx); 1994 printf("WITNESS: unable to allocate a new witness object\n"); 1995 return (NULL); 1996 } 1997 w = STAILQ_FIRST(&w_free); 1998 STAILQ_REMOVE_HEAD(&w_free, w_list); 1999 w_free_cnt--; 2000 index = w->w_index; 2001 MPASS(index > 0 && index == w_max_used_index+1 && 2002 index < WITNESS_COUNT); 2003 bzero(w, sizeof(*w)); 2004 w->w_index = index; 2005 if (index > w_max_used_index) 2006 w_max_used_index = index; 2007 return (w); 2008 } 2009 2010 static void 2011 witness_free(struct witness *w) 2012 { 2013 2014 STAILQ_INSERT_HEAD(&w_free, w, w_list); 2015 w_free_cnt++; 2016 } 2017 2018 static struct lock_list_entry * 2019 witness_lock_list_get(void) 2020 { 2021 struct lock_list_entry *lle; 2022 2023 if (witness_watch == -1) 2024 return (NULL); 2025 mtx_lock_spin(&w_mtx); 2026 lle = w_lock_list_free; 2027 if (lle == NULL) { 2028 witness_watch = -1; 2029 mtx_unlock_spin(&w_mtx); 2030 printf("%s: witness exhausted\n", __func__); 2031 return (NULL); 2032 } 2033 w_lock_list_free = lle->ll_next; 2034 mtx_unlock_spin(&w_mtx); 2035 bzero(lle, sizeof(*lle)); 2036 return (lle); 2037 } 2038 2039 static void 2040 witness_lock_list_free(struct lock_list_entry *lle) 2041 { 2042 2043 mtx_lock_spin(&w_mtx); 2044 lle->ll_next = w_lock_list_free; 2045 w_lock_list_free = lle; 2046 mtx_unlock_spin(&w_mtx); 2047 } 2048 2049 static struct lock_instance * 2050 find_instance(struct lock_list_entry *list, struct lock_object *lock) 2051 { 2052 struct lock_list_entry *lle; 2053 struct lock_instance *instance; 2054 int i; 2055 2056 for (lle = list; lle != NULL; lle = lle->ll_next) 2057 for (i = lle->ll_count - 1; i >= 0; i--) { 2058 instance = &lle->ll_children[i]; 2059 if (instance->li_lock == lock) 2060 return (instance); 2061 } 2062 return (NULL); 2063 } 2064 2065 static void 2066 witness_list_lock(struct lock_instance *instance, 2067 int (*prnt)(const char *fmt, ...)) 2068 { 2069 struct lock_object *lock; 2070 2071 lock = instance->li_lock; 2072 prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ? 2073 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name); 2074 if (lock->lo_witness->w_name != lock->lo_name) 2075 prnt(" (%s)", lock->lo_witness->w_name); 2076 prnt(" r = %d (%p) locked @ %s:%d\n", 2077 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file, 2078 instance->li_line); 2079 } 2080 2081 #ifdef DDB 2082 static int 2083 witness_thread_has_locks(struct thread *td) 2084 { 2085 2086 if (td->td_sleeplocks == NULL) 2087 return (0); 2088 return (td->td_sleeplocks->ll_count != 0); 2089 } 2090 2091 static int 2092 witness_proc_has_locks(struct proc *p) 2093 { 2094 struct thread *td; 2095 2096 FOREACH_THREAD_IN_PROC(p, td) { 2097 if (witness_thread_has_locks(td)) 2098 return (1); 2099 } 2100 return (0); 2101 } 2102 #endif 2103 2104 int 2105 witness_list_locks(struct lock_list_entry **lock_list, 2106 int (*prnt)(const char *fmt, ...)) 2107 { 2108 struct lock_list_entry *lle; 2109 int i, nheld; 2110 2111 nheld = 0; 2112 for (lle = *lock_list; lle != NULL; lle = lle->ll_next) 2113 for (i = lle->ll_count - 1; i >= 0; i--) { 2114 witness_list_lock(&lle->ll_children[i], prnt); 2115 nheld++; 2116 } 2117 return (nheld); 2118 } 2119 2120 /* 2121 * This is a bit risky at best. We call this function when we have timed 2122 * out acquiring a spin lock, and we assume that the other CPU is stuck 2123 * with this lock held. So, we go groveling around in the other CPU's 2124 * per-cpu data to try to find the lock instance for this spin lock to 2125 * see when it was last acquired. 2126 */ 2127 void 2128 witness_display_spinlock(struct lock_object *lock, struct thread *owner, 2129 int (*prnt)(const char *fmt, ...)) 2130 { 2131 struct lock_instance *instance; 2132 struct pcpu *pc; 2133 2134 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU) 2135 return; 2136 pc = pcpu_find(owner->td_oncpu); 2137 instance = find_instance(pc->pc_spinlocks, lock); 2138 if (instance != NULL) 2139 witness_list_lock(instance, prnt); 2140 } 2141 2142 void 2143 witness_save(struct lock_object *lock, const char **filep, int *linep) 2144 { 2145 struct lock_list_entry *lock_list; 2146 struct lock_instance *instance; 2147 struct lock_class *class; 2148 2149 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2150 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2151 return; 2152 class = LOCK_CLASS(lock); 2153 if (class->lc_flags & LC_SLEEPLOCK) 2154 lock_list = curthread->td_sleeplocks; 2155 else { 2156 if (witness_skipspin) 2157 return; 2158 lock_list = PCPU_GET(spinlocks); 2159 } 2160 instance = find_instance(lock_list, lock); 2161 if (instance == NULL) 2162 panic("%s: lock (%s) %s not locked", __func__, 2163 class->lc_name, lock->lo_name); 2164 *filep = instance->li_file; 2165 *linep = instance->li_line; 2166 } 2167 2168 void 2169 witness_restore(struct lock_object *lock, const char *file, int line) 2170 { 2171 struct lock_list_entry *lock_list; 2172 struct lock_instance *instance; 2173 struct lock_class *class; 2174 2175 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2176 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2177 return; 2178 class = LOCK_CLASS(lock); 2179 if (class->lc_flags & LC_SLEEPLOCK) 2180 lock_list = curthread->td_sleeplocks; 2181 else { 2182 if (witness_skipspin) 2183 return; 2184 lock_list = PCPU_GET(spinlocks); 2185 } 2186 instance = find_instance(lock_list, lock); 2187 if (instance == NULL) 2188 panic("%s: lock (%s) %s not locked", __func__, 2189 class->lc_name, lock->lo_name); 2190 lock->lo_witness->w_file = file; 2191 lock->lo_witness->w_line = line; 2192 instance->li_file = file; 2193 instance->li_line = line; 2194 } 2195 2196 void 2197 witness_assert(struct lock_object *lock, int flags, const char *file, int line) 2198 { 2199 #ifdef INVARIANT_SUPPORT 2200 struct lock_instance *instance; 2201 struct lock_class *class; 2202 2203 if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL) 2204 return; 2205 class = LOCK_CLASS(lock); 2206 if ((class->lc_flags & LC_SLEEPLOCK) != 0) 2207 instance = find_instance(curthread->td_sleeplocks, lock); 2208 else if ((class->lc_flags & LC_SPINLOCK) != 0) 2209 instance = find_instance(PCPU_GET(spinlocks), lock); 2210 else { 2211 panic("Lock (%s) %s is not sleep or spin!", 2212 class->lc_name, lock->lo_name); 2213 } 2214 file = fixup_filename(file); 2215 switch (flags) { 2216 case LA_UNLOCKED: 2217 if (instance != NULL) 2218 panic("Lock (%s) %s locked @ %s:%d.", 2219 class->lc_name, lock->lo_name, file, line); 2220 break; 2221 case LA_LOCKED: 2222 case LA_LOCKED | LA_RECURSED: 2223 case LA_LOCKED | LA_NOTRECURSED: 2224 case LA_SLOCKED: 2225 case LA_SLOCKED | LA_RECURSED: 2226 case LA_SLOCKED | LA_NOTRECURSED: 2227 case LA_XLOCKED: 2228 case LA_XLOCKED | LA_RECURSED: 2229 case LA_XLOCKED | LA_NOTRECURSED: 2230 if (instance == NULL) { 2231 panic("Lock (%s) %s not locked @ %s:%d.", 2232 class->lc_name, lock->lo_name, file, line); 2233 break; 2234 } 2235 if ((flags & LA_XLOCKED) != 0 && 2236 (instance->li_flags & LI_EXCLUSIVE) == 0) 2237 panic("Lock (%s) %s not exclusively locked @ %s:%d.", 2238 class->lc_name, lock->lo_name, file, line); 2239 if ((flags & LA_SLOCKED) != 0 && 2240 (instance->li_flags & LI_EXCLUSIVE) != 0) 2241 panic("Lock (%s) %s exclusively locked @ %s:%d.", 2242 class->lc_name, lock->lo_name, file, line); 2243 if ((flags & LA_RECURSED) != 0 && 2244 (instance->li_flags & LI_RECURSEMASK) == 0) 2245 panic("Lock (%s) %s not recursed @ %s:%d.", 2246 class->lc_name, lock->lo_name, file, line); 2247 if ((flags & LA_NOTRECURSED) != 0 && 2248 (instance->li_flags & LI_RECURSEMASK) != 0) 2249 panic("Lock (%s) %s recursed @ %s:%d.", 2250 class->lc_name, lock->lo_name, file, line); 2251 break; 2252 default: 2253 panic("Invalid lock assertion at %s:%d.", file, line); 2254 2255 } 2256 #endif /* INVARIANT_SUPPORT */ 2257 } 2258 2259 static void 2260 witness_setflag(struct lock_object *lock, int flag, int set) 2261 { 2262 struct lock_list_entry *lock_list; 2263 struct lock_instance *instance; 2264 struct lock_class *class; 2265 2266 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2267 return; 2268 class = LOCK_CLASS(lock); 2269 if (class->lc_flags & LC_SLEEPLOCK) 2270 lock_list = curthread->td_sleeplocks; 2271 else { 2272 if (witness_skipspin) 2273 return; 2274 lock_list = PCPU_GET(spinlocks); 2275 } 2276 instance = find_instance(lock_list, lock); 2277 if (instance == NULL) 2278 panic("%s: lock (%s) %s not locked", __func__, 2279 class->lc_name, lock->lo_name); 2280 2281 if (set) 2282 instance->li_flags |= flag; 2283 else 2284 instance->li_flags &= ~flag; 2285 } 2286 2287 void 2288 witness_norelease(struct lock_object *lock) 2289 { 2290 2291 witness_setflag(lock, LI_NORELEASE, 1); 2292 } 2293 2294 void 2295 witness_releaseok(struct lock_object *lock) 2296 { 2297 2298 witness_setflag(lock, LI_NORELEASE, 0); 2299 } 2300 2301 #ifdef DDB 2302 static void 2303 witness_ddb_list(struct thread *td) 2304 { 2305 2306 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2307 KASSERT(kdb_active, ("%s: not in the debugger", __func__)); 2308 2309 if (witness_watch < 1) 2310 return; 2311 2312 witness_list_locks(&td->td_sleeplocks, db_printf); 2313 2314 /* 2315 * We only handle spinlocks if td == curthread. This is somewhat broken 2316 * if td is currently executing on some other CPU and holds spin locks 2317 * as we won't display those locks. If we had a MI way of getting 2318 * the per-cpu data for a given cpu then we could use 2319 * td->td_oncpu to get the list of spinlocks for this thread 2320 * and "fix" this. 2321 * 2322 * That still wouldn't really fix this unless we locked the scheduler 2323 * lock or stopped the other CPU to make sure it wasn't changing the 2324 * list out from under us. It is probably best to just not try to 2325 * handle threads on other CPU's for now. 2326 */ 2327 if (td == curthread && PCPU_GET(spinlocks) != NULL) 2328 witness_list_locks(PCPU_PTR(spinlocks), db_printf); 2329 } 2330 2331 DB_SHOW_COMMAND(locks, db_witness_list) 2332 { 2333 struct thread *td; 2334 2335 if (have_addr) 2336 td = db_lookup_thread(addr, TRUE); 2337 else 2338 td = kdb_thread; 2339 witness_ddb_list(td); 2340 } 2341 2342 DB_SHOW_ALL_COMMAND(locks, db_witness_list_all) 2343 { 2344 struct thread *td; 2345 struct proc *p; 2346 2347 /* 2348 * It would be nice to list only threads and processes that actually 2349 * held sleep locks, but that information is currently not exported 2350 * by WITNESS. 2351 */ 2352 FOREACH_PROC_IN_SYSTEM(p) { 2353 if (!witness_proc_has_locks(p)) 2354 continue; 2355 FOREACH_THREAD_IN_PROC(p, td) { 2356 if (!witness_thread_has_locks(td)) 2357 continue; 2358 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid, 2359 p->p_comm, td, td->td_tid); 2360 witness_ddb_list(td); 2361 } 2362 } 2363 } 2364 DB_SHOW_ALIAS(alllocks, db_witness_list_all) 2365 2366 DB_SHOW_COMMAND(witness, db_witness_display) 2367 { 2368 2369 witness_ddb_display(db_printf); 2370 } 2371 #endif 2372 2373 static int 2374 sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS) 2375 { 2376 struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2; 2377 struct witness *tmp_w1, *tmp_w2, *w1, *w2; 2378 struct sbuf *sb; 2379 u_int w_rmatrix1, w_rmatrix2; 2380 int error, generation, i, j; 2381 2382 tmp_data1 = NULL; 2383 tmp_data2 = NULL; 2384 tmp_w1 = NULL; 2385 tmp_w2 = NULL; 2386 if (witness_watch < 1) { 2387 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning)); 2388 return (error); 2389 } 2390 if (witness_cold) { 2391 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold)); 2392 return (error); 2393 } 2394 error = 0; 2395 sb = sbuf_new(NULL, NULL, BADSTACK_SBUF_SIZE, SBUF_AUTOEXTEND); 2396 if (sb == NULL) 2397 return (ENOMEM); 2398 2399 /* Allocate and init temporary storage space. */ 2400 tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO); 2401 tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO); 2402 tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP, 2403 M_WAITOK | M_ZERO); 2404 tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP, 2405 M_WAITOK | M_ZERO); 2406 stack_zero(&tmp_data1->wlod_stack); 2407 stack_zero(&tmp_data2->wlod_stack); 2408 2409 restart: 2410 mtx_lock_spin(&w_mtx); 2411 generation = w_generation; 2412 mtx_unlock_spin(&w_mtx); 2413 sbuf_printf(sb, "Number of known direct relationships is %d\n", 2414 w_lohash.wloh_count); 2415 for (i = 1; i < w_max_used_index; i++) { 2416 mtx_lock_spin(&w_mtx); 2417 if (generation != w_generation) { 2418 mtx_unlock_spin(&w_mtx); 2419 2420 /* The graph has changed, try again. */ 2421 req->oldidx = 0; 2422 sbuf_clear(sb); 2423 goto restart; 2424 } 2425 2426 w1 = &w_data[i]; 2427 if (w1->w_reversed == 0) { 2428 mtx_unlock_spin(&w_mtx); 2429 continue; 2430 } 2431 2432 /* Copy w1 locally so we can release the spin lock. */ 2433 *tmp_w1 = *w1; 2434 mtx_unlock_spin(&w_mtx); 2435 2436 if (tmp_w1->w_reversed == 0) 2437 continue; 2438 for (j = 1; j < w_max_used_index; j++) { 2439 if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j) 2440 continue; 2441 2442 mtx_lock_spin(&w_mtx); 2443 if (generation != w_generation) { 2444 mtx_unlock_spin(&w_mtx); 2445 2446 /* The graph has changed, try again. */ 2447 req->oldidx = 0; 2448 sbuf_clear(sb); 2449 goto restart; 2450 } 2451 2452 w2 = &w_data[j]; 2453 data1 = witness_lock_order_get(w1, w2); 2454 data2 = witness_lock_order_get(w2, w1); 2455 2456 /* 2457 * Copy information locally so we can release the 2458 * spin lock. 2459 */ 2460 *tmp_w2 = *w2; 2461 w_rmatrix1 = (unsigned int)w_rmatrix[i][j]; 2462 w_rmatrix2 = (unsigned int)w_rmatrix[j][i]; 2463 2464 if (data1) { 2465 stack_zero(&tmp_data1->wlod_stack); 2466 stack_copy(&data1->wlod_stack, 2467 &tmp_data1->wlod_stack); 2468 } 2469 if (data2 && data2 != data1) { 2470 stack_zero(&tmp_data2->wlod_stack); 2471 stack_copy(&data2->wlod_stack, 2472 &tmp_data2->wlod_stack); 2473 } 2474 mtx_unlock_spin(&w_mtx); 2475 2476 sbuf_printf(sb, 2477 "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n", 2478 tmp_w1->w_name, tmp_w1->w_class->lc_name, 2479 tmp_w2->w_name, tmp_w2->w_class->lc_name); 2480 #if 0 2481 sbuf_printf(sb, 2482 "w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n", 2483 tmp_w1->name, tmp_w2->w_name, w_rmatrix1, 2484 tmp_w2->name, tmp_w1->w_name, w_rmatrix2); 2485 #endif 2486 if (data1) { 2487 sbuf_printf(sb, 2488 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n", 2489 tmp_w1->w_name, tmp_w1->w_class->lc_name, 2490 tmp_w2->w_name, tmp_w2->w_class->lc_name); 2491 stack_sbuf_print(sb, &tmp_data1->wlod_stack); 2492 sbuf_printf(sb, "\n"); 2493 } 2494 if (data2 && data2 != data1) { 2495 sbuf_printf(sb, 2496 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n", 2497 tmp_w2->w_name, tmp_w2->w_class->lc_name, 2498 tmp_w1->w_name, tmp_w1->w_class->lc_name); 2499 stack_sbuf_print(sb, &tmp_data2->wlod_stack); 2500 sbuf_printf(sb, "\n"); 2501 } 2502 } 2503 } 2504 mtx_lock_spin(&w_mtx); 2505 if (generation != w_generation) { 2506 mtx_unlock_spin(&w_mtx); 2507 2508 /* 2509 * The graph changed while we were printing stack data, 2510 * try again. 2511 */ 2512 req->oldidx = 0; 2513 sbuf_clear(sb); 2514 goto restart; 2515 } 2516 mtx_unlock_spin(&w_mtx); 2517 2518 /* Free temporary storage space. */ 2519 free(tmp_data1, M_TEMP); 2520 free(tmp_data2, M_TEMP); 2521 free(tmp_w1, M_TEMP); 2522 free(tmp_w2, M_TEMP); 2523 2524 sbuf_finish(sb); 2525 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 2526 sbuf_delete(sb); 2527 2528 return (error); 2529 } 2530 2531 static int 2532 sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS) 2533 { 2534 struct witness *w; 2535 struct sbuf *sb; 2536 int error; 2537 2538 if (witness_watch < 1) { 2539 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning)); 2540 return (error); 2541 } 2542 if (witness_cold) { 2543 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold)); 2544 return (error); 2545 } 2546 error = 0; 2547 2548 error = sysctl_wire_old_buffer(req, 0); 2549 if (error != 0) 2550 return (error); 2551 sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req); 2552 if (sb == NULL) 2553 return (ENOMEM); 2554 sbuf_printf(sb, "\n"); 2555 2556 mtx_lock_spin(&w_mtx); 2557 STAILQ_FOREACH(w, &w_all, w_list) 2558 w->w_displayed = 0; 2559 STAILQ_FOREACH(w, &w_all, w_list) 2560 witness_add_fullgraph(sb, w); 2561 mtx_unlock_spin(&w_mtx); 2562 2563 /* 2564 * Close the sbuf and return to userland. 2565 */ 2566 error = sbuf_finish(sb); 2567 sbuf_delete(sb); 2568 2569 return (error); 2570 } 2571 2572 static int 2573 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS) 2574 { 2575 int error, value; 2576 2577 value = witness_watch; 2578 error = sysctl_handle_int(oidp, &value, 0, req); 2579 if (error != 0 || req->newptr == NULL) 2580 return (error); 2581 if (value > 1 || value < -1 || 2582 (witness_watch == -1 && value != witness_watch)) 2583 return (EINVAL); 2584 witness_watch = value; 2585 return (0); 2586 } 2587 2588 static void 2589 witness_add_fullgraph(struct sbuf *sb, struct witness *w) 2590 { 2591 int i; 2592 2593 if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0)) 2594 return; 2595 w->w_displayed = 1; 2596 2597 WITNESS_INDEX_ASSERT(w->w_index); 2598 for (i = 1; i <= w_max_used_index; i++) { 2599 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) { 2600 sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name, 2601 w_data[i].w_name); 2602 witness_add_fullgraph(sb, &w_data[i]); 2603 } 2604 } 2605 } 2606 2607 /* 2608 * A simple hash function. Takes a key pointer and a key size. If size == 0, 2609 * interprets the key as a string and reads until the null 2610 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit 2611 * hash value computed from the key. 2612 */ 2613 static uint32_t 2614 witness_hash_djb2(const uint8_t *key, uint32_t size) 2615 { 2616 unsigned int hash = 5381; 2617 int i; 2618 2619 /* hash = hash * 33 + key[i] */ 2620 if (size) 2621 for (i = 0; i < size; i++) 2622 hash = ((hash << 5) + hash) + (unsigned int)key[i]; 2623 else 2624 for (i = 0; key[i] != 0; i++) 2625 hash = ((hash << 5) + hash) + (unsigned int)key[i]; 2626 2627 return (hash); 2628 } 2629 2630 2631 /* 2632 * Initializes the two witness hash tables. Called exactly once from 2633 * witness_initialize(). 2634 */ 2635 static void 2636 witness_init_hash_tables(void) 2637 { 2638 int i; 2639 2640 MPASS(witness_cold); 2641 2642 /* Initialize the hash tables. */ 2643 for (i = 0; i < WITNESS_HASH_SIZE; i++) 2644 w_hash.wh_array[i] = NULL; 2645 2646 w_hash.wh_size = WITNESS_HASH_SIZE; 2647 w_hash.wh_count = 0; 2648 2649 /* Initialize the lock order data hash. */ 2650 w_lofree = NULL; 2651 for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) { 2652 memset(&w_lodata[i], 0, sizeof(w_lodata[i])); 2653 w_lodata[i].wlod_next = w_lofree; 2654 w_lofree = &w_lodata[i]; 2655 } 2656 w_lohash.wloh_size = WITNESS_LO_HASH_SIZE; 2657 w_lohash.wloh_count = 0; 2658 for (i = 0; i < WITNESS_LO_HASH_SIZE; i++) 2659 w_lohash.wloh_array[i] = NULL; 2660 } 2661 2662 static struct witness * 2663 witness_hash_get(const char *key) 2664 { 2665 struct witness *w; 2666 uint32_t hash; 2667 2668 MPASS(key != NULL); 2669 if (witness_cold == 0) 2670 mtx_assert(&w_mtx, MA_OWNED); 2671 hash = witness_hash_djb2(key, 0) % w_hash.wh_size; 2672 w = w_hash.wh_array[hash]; 2673 while (w != NULL) { 2674 if (strcmp(w->w_name, key) == 0) 2675 goto out; 2676 w = w->w_hash_next; 2677 } 2678 2679 out: 2680 return (w); 2681 } 2682 2683 static void 2684 witness_hash_put(struct witness *w) 2685 { 2686 uint32_t hash; 2687 2688 MPASS(w != NULL); 2689 MPASS(w->w_name != NULL); 2690 if (witness_cold == 0) 2691 mtx_assert(&w_mtx, MA_OWNED); 2692 KASSERT(witness_hash_get(w->w_name) == NULL, 2693 ("%s: trying to add a hash entry that already exists!", __func__)); 2694 KASSERT(w->w_hash_next == NULL, 2695 ("%s: w->w_hash_next != NULL", __func__)); 2696 2697 hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size; 2698 w->w_hash_next = w_hash.wh_array[hash]; 2699 w_hash.wh_array[hash] = w; 2700 w_hash.wh_count++; 2701 } 2702 2703 2704 static struct witness_lock_order_data * 2705 witness_lock_order_get(struct witness *parent, struct witness *child) 2706 { 2707 struct witness_lock_order_data *data = NULL; 2708 struct witness_lock_order_key key; 2709 unsigned int hash; 2710 2711 MPASS(parent != NULL && child != NULL); 2712 key.from = parent->w_index; 2713 key.to = child->w_index; 2714 WITNESS_INDEX_ASSERT(key.from); 2715 WITNESS_INDEX_ASSERT(key.to); 2716 if ((w_rmatrix[parent->w_index][child->w_index] 2717 & WITNESS_LOCK_ORDER_KNOWN) == 0) 2718 goto out; 2719 2720 hash = witness_hash_djb2((const char*)&key, 2721 sizeof(key)) % w_lohash.wloh_size; 2722 data = w_lohash.wloh_array[hash]; 2723 while (data != NULL) { 2724 if (witness_lock_order_key_equal(&data->wlod_key, &key)) 2725 break; 2726 data = data->wlod_next; 2727 } 2728 2729 out: 2730 return (data); 2731 } 2732 2733 /* 2734 * Verify that parent and child have a known relationship, are not the same, 2735 * and child is actually a child of parent. This is done without w_mtx 2736 * to avoid contention in the common case. 2737 */ 2738 static int 2739 witness_lock_order_check(struct witness *parent, struct witness *child) 2740 { 2741 2742 if (parent != child && 2743 w_rmatrix[parent->w_index][child->w_index] 2744 & WITNESS_LOCK_ORDER_KNOWN && 2745 isitmychild(parent, child)) 2746 return (1); 2747 2748 return (0); 2749 } 2750 2751 static int 2752 witness_lock_order_add(struct witness *parent, struct witness *child) 2753 { 2754 struct witness_lock_order_data *data = NULL; 2755 struct witness_lock_order_key key; 2756 unsigned int hash; 2757 2758 MPASS(parent != NULL && child != NULL); 2759 key.from = parent->w_index; 2760 key.to = child->w_index; 2761 WITNESS_INDEX_ASSERT(key.from); 2762 WITNESS_INDEX_ASSERT(key.to); 2763 if (w_rmatrix[parent->w_index][child->w_index] 2764 & WITNESS_LOCK_ORDER_KNOWN) 2765 return (1); 2766 2767 hash = witness_hash_djb2((const char*)&key, 2768 sizeof(key)) % w_lohash.wloh_size; 2769 w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN; 2770 data = w_lofree; 2771 if (data == NULL) 2772 return (0); 2773 w_lofree = data->wlod_next; 2774 data->wlod_next = w_lohash.wloh_array[hash]; 2775 data->wlod_key = key; 2776 w_lohash.wloh_array[hash] = data; 2777 w_lohash.wloh_count++; 2778 stack_zero(&data->wlod_stack); 2779 stack_save(&data->wlod_stack); 2780 return (1); 2781 } 2782 2783 /* Call this whenver the structure of the witness graph changes. */ 2784 static void 2785 witness_increment_graph_generation(void) 2786 { 2787 2788 if (witness_cold == 0) 2789 mtx_assert(&w_mtx, MA_OWNED); 2790 w_generation++; 2791 } 2792 2793 #ifdef KDB 2794 static void 2795 _witness_debugger(int cond, const char *msg) 2796 { 2797 2798 if (witness_trace && cond) 2799 kdb_backtrace(); 2800 if (witness_kdb && cond) 2801 kdb_enter(KDB_WHY_WITNESS, msg); 2802 } 2803 #endif 2804