1 /*- 2 * Copyright (c) 2008 Isilon Systems, Inc. 3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com> 4 * Copyright (c) 1998 Berkeley Software Design, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Berkeley Software Design Inc's name may not be used to endorse or 16 * promote products derived from this software without specific prior 17 * written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 32 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 33 */ 34 35 /* 36 * Implementation of the `witness' lock verifier. Originally implemented for 37 * mutexes in BSD/OS. Extended to handle generic lock objects and lock 38 * classes in FreeBSD. 39 */ 40 41 /* 42 * Main Entry: witness 43 * Pronunciation: 'wit-n&s 44 * Function: noun 45 * Etymology: Middle English witnesse, from Old English witnes knowledge, 46 * testimony, witness, from 2wit 47 * Date: before 12th century 48 * 1 : attestation of a fact or event : TESTIMONY 49 * 2 : one that gives evidence; specifically : one who testifies in 50 * a cause or before a judicial tribunal 51 * 3 : one asked to be present at a transaction so as to be able to 52 * testify to its having taken place 53 * 4 : one who has personal knowledge of something 54 * 5 a : something serving as evidence or proof : SIGN 55 * b : public affirmation by word or example of usually 56 * religious faith or conviction <the heroic witness to divine 57 * life -- Pilot> 58 * 6 capitalized : a member of the Jehovah's Witnesses 59 */ 60 61 /* 62 * Special rules concerning Giant and lock orders: 63 * 64 * 1) Giant must be acquired before any other mutexes. Stated another way, 65 * no other mutex may be held when Giant is acquired. 66 * 67 * 2) Giant must be released when blocking on a sleepable lock. 68 * 69 * This rule is less obvious, but is a result of Giant providing the same 70 * semantics as spl(). Basically, when a thread sleeps, it must release 71 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule 72 * 2). 73 * 74 * 3) Giant may be acquired before or after sleepable locks. 75 * 76 * This rule is also not quite as obvious. Giant may be acquired after 77 * a sleepable lock because it is a non-sleepable lock and non-sleepable 78 * locks may always be acquired while holding a sleepable lock. The second 79 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose 80 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1 81 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and 82 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to 83 * execute. Thus, acquiring Giant both before and after a sleepable lock 84 * will not result in a lock order reversal. 85 */ 86 87 #include <sys/cdefs.h> 88 __FBSDID("$FreeBSD$"); 89 90 #include "opt_ddb.h" 91 #include "opt_hwpmc_hooks.h" 92 #include "opt_stack.h" 93 #include "opt_witness.h" 94 95 #include <sys/param.h> 96 #include <sys/bus.h> 97 #include <sys/kdb.h> 98 #include <sys/kernel.h> 99 #include <sys/ktr.h> 100 #include <sys/lock.h> 101 #include <sys/malloc.h> 102 #include <sys/mutex.h> 103 #include <sys/priv.h> 104 #include <sys/proc.h> 105 #include <sys/sbuf.h> 106 #include <sys/sched.h> 107 #include <sys/stack.h> 108 #include <sys/sysctl.h> 109 #include <sys/systm.h> 110 111 #ifdef DDB 112 #include <ddb/ddb.h> 113 #endif 114 115 #include <machine/stdarg.h> 116 117 #if !defined(DDB) && !defined(STACK) 118 #error "DDB or STACK options are required for WITNESS" 119 #endif 120 121 /* Note that these traces do not work with KTR_ALQ. */ 122 #if 0 123 #define KTR_WITNESS KTR_SUBSYS 124 #else 125 #define KTR_WITNESS 0 126 #endif 127 128 #define LI_RECURSEMASK 0x0000ffff /* Recursion depth of lock instance. */ 129 #define LI_EXCLUSIVE 0x00010000 /* Exclusive lock instance. */ 130 #define LI_NORELEASE 0x00020000 /* Lock not allowed to be released. */ 131 132 /* Define this to check for blessed mutexes */ 133 #undef BLESSING 134 135 #define WITNESS_COUNT 1024 136 #define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4) 137 #define WITNESS_HASH_SIZE 251 /* Prime, gives load factor < 2 */ 138 #define WITNESS_PENDLIST 512 139 140 /* Allocate 256 KB of stack data space */ 141 #define WITNESS_LO_DATA_COUNT 2048 142 143 /* Prime, gives load factor of ~2 at full load */ 144 #define WITNESS_LO_HASH_SIZE 1021 145 146 /* 147 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads 148 * will hold LOCK_NCHILDREN locks. We handle failure ok, and we should 149 * probably be safe for the most part, but it's still a SWAG. 150 */ 151 #define LOCK_NCHILDREN 5 152 #define LOCK_CHILDCOUNT 2048 153 154 #define MAX_W_NAME 64 155 156 #define BADSTACK_SBUF_SIZE (256 * WITNESS_COUNT) 157 #define CYCLEGRAPH_SBUF_SIZE 8192 158 #define FULLGRAPH_SBUF_SIZE 32768 159 160 /* 161 * These flags go in the witness relationship matrix and describe the 162 * relationship between any two struct witness objects. 163 */ 164 #define WITNESS_UNRELATED 0x00 /* No lock order relation. */ 165 #define WITNESS_PARENT 0x01 /* Parent, aka direct ancestor. */ 166 #define WITNESS_ANCESTOR 0x02 /* Direct or indirect ancestor. */ 167 #define WITNESS_CHILD 0x04 /* Child, aka direct descendant. */ 168 #define WITNESS_DESCENDANT 0x08 /* Direct or indirect descendant. */ 169 #define WITNESS_ANCESTOR_MASK (WITNESS_PARENT | WITNESS_ANCESTOR) 170 #define WITNESS_DESCENDANT_MASK (WITNESS_CHILD | WITNESS_DESCENDANT) 171 #define WITNESS_RELATED_MASK \ 172 (WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK) 173 #define WITNESS_REVERSAL 0x10 /* A lock order reversal has been 174 * observed. */ 175 #define WITNESS_RESERVED1 0x20 /* Unused flag, reserved. */ 176 #define WITNESS_RESERVED2 0x40 /* Unused flag, reserved. */ 177 #define WITNESS_LOCK_ORDER_KNOWN 0x80 /* This lock order is known. */ 178 179 /* Descendant to ancestor flags */ 180 #define WITNESS_DTOA(x) (((x) & WITNESS_RELATED_MASK) >> 2) 181 182 /* Ancestor to descendant flags */ 183 #define WITNESS_ATOD(x) (((x) & WITNESS_RELATED_MASK) << 2) 184 185 #define WITNESS_INDEX_ASSERT(i) \ 186 MPASS((i) > 0 && (i) <= w_max_used_index && (i) < WITNESS_COUNT) 187 188 MALLOC_DEFINE(M_WITNESS, "Witness", "Witness"); 189 190 /* 191 * Lock instances. A lock instance is the data associated with a lock while 192 * it is held by witness. For example, a lock instance will hold the 193 * recursion count of a lock. Lock instances are held in lists. Spin locks 194 * are held in a per-cpu list while sleep locks are held in per-thread list. 195 */ 196 struct lock_instance { 197 struct lock_object *li_lock; 198 const char *li_file; 199 int li_line; 200 u_int li_flags; 201 }; 202 203 /* 204 * A simple list type used to build the list of locks held by a thread 205 * or CPU. We can't simply embed the list in struct lock_object since a 206 * lock may be held by more than one thread if it is a shared lock. Locks 207 * are added to the head of the list, so we fill up each list entry from 208 * "the back" logically. To ease some of the arithmetic, we actually fill 209 * in each list entry the normal way (children[0] then children[1], etc.) but 210 * when we traverse the list we read children[count-1] as the first entry 211 * down to children[0] as the final entry. 212 */ 213 struct lock_list_entry { 214 struct lock_list_entry *ll_next; 215 struct lock_instance ll_children[LOCK_NCHILDREN]; 216 u_int ll_count; 217 }; 218 219 /* 220 * The main witness structure. One of these per named lock type in the system 221 * (for example, "vnode interlock"). 222 */ 223 struct witness { 224 char w_name[MAX_W_NAME]; 225 uint32_t w_index; /* Index in the relationship matrix */ 226 struct lock_class *w_class; 227 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */ 228 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */ 229 struct witness *w_hash_next; /* Linked list in hash buckets. */ 230 const char *w_file; /* File where last acquired */ 231 uint32_t w_line; /* Line where last acquired */ 232 uint32_t w_refcount; 233 uint16_t w_num_ancestors; /* direct/indirect 234 * ancestor count */ 235 uint16_t w_num_descendants; /* direct/indirect 236 * descendant count */ 237 int16_t w_ddb_level; 238 unsigned w_displayed:1; 239 unsigned w_reversed:1; 240 }; 241 242 STAILQ_HEAD(witness_list, witness); 243 244 /* 245 * The witness hash table. Keys are witness names (const char *), elements are 246 * witness objects (struct witness *). 247 */ 248 struct witness_hash { 249 struct witness *wh_array[WITNESS_HASH_SIZE]; 250 uint32_t wh_size; 251 uint32_t wh_count; 252 }; 253 254 /* 255 * Key type for the lock order data hash table. 256 */ 257 struct witness_lock_order_key { 258 uint16_t from; 259 uint16_t to; 260 }; 261 262 struct witness_lock_order_data { 263 struct stack wlod_stack; 264 struct witness_lock_order_key wlod_key; 265 struct witness_lock_order_data *wlod_next; 266 }; 267 268 /* 269 * The witness lock order data hash table. Keys are witness index tuples 270 * (struct witness_lock_order_key), elements are lock order data objects 271 * (struct witness_lock_order_data). 272 */ 273 struct witness_lock_order_hash { 274 struct witness_lock_order_data *wloh_array[WITNESS_LO_HASH_SIZE]; 275 u_int wloh_size; 276 u_int wloh_count; 277 }; 278 279 #ifdef BLESSING 280 struct witness_blessed { 281 const char *b_lock1; 282 const char *b_lock2; 283 }; 284 #endif 285 286 struct witness_pendhelp { 287 const char *wh_type; 288 struct lock_object *wh_lock; 289 }; 290 291 struct witness_order_list_entry { 292 const char *w_name; 293 struct lock_class *w_class; 294 }; 295 296 /* 297 * Returns 0 if one of the locks is a spin lock and the other is not. 298 * Returns 1 otherwise. 299 */ 300 static __inline int 301 witness_lock_type_equal(struct witness *w1, struct witness *w2) 302 { 303 304 return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) == 305 (w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK))); 306 } 307 308 static __inline int 309 witness_lock_order_key_empty(const struct witness_lock_order_key *key) 310 { 311 312 return (key->from == 0 && key->to == 0); 313 } 314 315 static __inline int 316 witness_lock_order_key_equal(const struct witness_lock_order_key *a, 317 const struct witness_lock_order_key *b) 318 { 319 320 return (a->from == b->from && a->to == b->to); 321 } 322 323 static int _isitmyx(struct witness *w1, struct witness *w2, int rmask, 324 const char *fname); 325 #ifdef KDB 326 static void _witness_debugger(int cond, const char *msg); 327 #endif 328 static void adopt(struct witness *parent, struct witness *child); 329 #ifdef BLESSING 330 static int blessed(struct witness *, struct witness *); 331 #endif 332 static void depart(struct witness *w); 333 static struct witness *enroll(const char *description, 334 struct lock_class *lock_class); 335 static struct lock_instance *find_instance(struct lock_list_entry *list, 336 struct lock_object *lock); 337 static int isitmychild(struct witness *parent, struct witness *child); 338 static int isitmydescendant(struct witness *parent, struct witness *child); 339 static void itismychild(struct witness *parent, struct witness *child); 340 static int sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS); 341 static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS); 342 static int sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS); 343 static void witness_add_fullgraph(struct sbuf *sb, struct witness *parent); 344 #ifdef DDB 345 static void witness_ddb_compute_levels(void); 346 static void witness_ddb_display(void(*)(const char *fmt, ...)); 347 static void witness_ddb_display_descendants(void(*)(const char *fmt, ...), 348 struct witness *, int indent); 349 static void witness_ddb_display_list(void(*prnt)(const char *fmt, ...), 350 struct witness_list *list); 351 static void witness_ddb_level_descendants(struct witness *parent, int l); 352 static void witness_ddb_list(struct thread *td); 353 #endif 354 static void witness_free(struct witness *m); 355 static struct witness *witness_get(void); 356 static uint32_t witness_hash_djb2(const uint8_t *key, uint32_t size); 357 static struct witness *witness_hash_get(const char *key); 358 static void witness_hash_put(struct witness *w); 359 static void witness_init_hash_tables(void); 360 static void witness_increment_graph_generation(void); 361 static void witness_lock_list_free(struct lock_list_entry *lle); 362 static struct lock_list_entry *witness_lock_list_get(void); 363 static int witness_lock_order_add(struct witness *parent, 364 struct witness *child); 365 static int witness_lock_order_check(struct witness *parent, 366 struct witness *child); 367 static struct witness_lock_order_data *witness_lock_order_get( 368 struct witness *parent, 369 struct witness *child); 370 static void witness_list_lock(struct lock_instance *instance); 371 static void witness_setflag(struct lock_object *lock, int flag, int set); 372 373 #ifdef KDB 374 #define witness_debugger(c) _witness_debugger(c, __func__) 375 #else 376 #define witness_debugger(c) 377 #endif 378 379 SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL, "Witness Locking"); 380 381 /* 382 * If set to 0, lock order checking is disabled. If set to -1, 383 * witness is completely disabled. Otherwise witness performs full 384 * lock order checking for all locks. At runtime, lock order checking 385 * may be toggled. However, witness cannot be reenabled once it is 386 * completely disabled. 387 */ 388 static int witness_watch = 1; 389 TUNABLE_INT("debug.witness.watch", &witness_watch); 390 SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0, 391 sysctl_debug_witness_watch, "I", "witness is watching lock operations"); 392 393 #ifdef KDB 394 /* 395 * When KDB is enabled and witness_kdb is 1, it will cause the system 396 * to drop into kdebug() when: 397 * - a lock hierarchy violation occurs 398 * - locks are held when going to sleep. 399 */ 400 #ifdef WITNESS_KDB 401 int witness_kdb = 1; 402 #else 403 int witness_kdb = 0; 404 #endif 405 TUNABLE_INT("debug.witness.kdb", &witness_kdb); 406 SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, ""); 407 408 /* 409 * When KDB is enabled and witness_trace is 1, it will cause the system 410 * to print a stack trace: 411 * - a lock hierarchy violation occurs 412 * - locks are held when going to sleep. 413 */ 414 int witness_trace = 1; 415 TUNABLE_INT("debug.witness.trace", &witness_trace); 416 SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, ""); 417 #endif /* KDB */ 418 419 #ifdef WITNESS_SKIPSPIN 420 int witness_skipspin = 1; 421 #else 422 int witness_skipspin = 0; 423 #endif 424 TUNABLE_INT("debug.witness.skipspin", &witness_skipspin); 425 SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin, 426 0, ""); 427 428 /* 429 * Call this to print out the relations between locks. 430 */ 431 SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD, 432 NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs"); 433 434 /* 435 * Call this to print out the witness faulty stacks. 436 */ 437 SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD, 438 NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks"); 439 440 static struct mtx w_mtx; 441 442 /* w_list */ 443 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free); 444 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all); 445 446 /* w_typelist */ 447 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin); 448 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep); 449 450 /* lock list */ 451 static struct lock_list_entry *w_lock_list_free = NULL; 452 static struct witness_pendhelp pending_locks[WITNESS_PENDLIST]; 453 static u_int pending_cnt; 454 455 static int w_free_cnt, w_spin_cnt, w_sleep_cnt; 456 SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, ""); 457 SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, ""); 458 SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0, 459 ""); 460 461 static struct witness *w_data; 462 static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1]; 463 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT]; 464 static struct witness_hash w_hash; /* The witness hash table. */ 465 466 /* The lock order data hash */ 467 static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT]; 468 static struct witness_lock_order_data *w_lofree = NULL; 469 static struct witness_lock_order_hash w_lohash; 470 static int w_max_used_index = 0; 471 static unsigned int w_generation = 0; 472 static const char w_notrunning[] = "Witness not running\n"; 473 static const char w_stillcold[] = "Witness is still cold\n"; 474 475 476 static struct witness_order_list_entry order_lists[] = { 477 /* 478 * sx locks 479 */ 480 { "proctree", &lock_class_sx }, 481 { "allproc", &lock_class_sx }, 482 { "allprison", &lock_class_sx }, 483 { NULL, NULL }, 484 /* 485 * Various mutexes 486 */ 487 { "Giant", &lock_class_mtx_sleep }, 488 { "pipe mutex", &lock_class_mtx_sleep }, 489 { "sigio lock", &lock_class_mtx_sleep }, 490 { "process group", &lock_class_mtx_sleep }, 491 { "process lock", &lock_class_mtx_sleep }, 492 { "session", &lock_class_mtx_sleep }, 493 { "uidinfo hash", &lock_class_rw }, 494 #ifdef HWPMC_HOOKS 495 { "pmc-sleep", &lock_class_mtx_sleep }, 496 #endif 497 { NULL, NULL }, 498 /* 499 * Sockets 500 */ 501 { "accept", &lock_class_mtx_sleep }, 502 { "so_snd", &lock_class_mtx_sleep }, 503 { "so_rcv", &lock_class_mtx_sleep }, 504 { "sellck", &lock_class_mtx_sleep }, 505 { NULL, NULL }, 506 /* 507 * Routing 508 */ 509 { "so_rcv", &lock_class_mtx_sleep }, 510 { "radix node head", &lock_class_rw }, 511 { "rtentry", &lock_class_mtx_sleep }, 512 { "ifaddr", &lock_class_mtx_sleep }, 513 { NULL, NULL }, 514 /* 515 * IPv4 multicast: 516 * protocol locks before interface locks, after UDP locks. 517 */ 518 { "udpinp", &lock_class_rw }, 519 { "in_multi_mtx", &lock_class_mtx_sleep }, 520 { "igmp_mtx", &lock_class_mtx_sleep }, 521 { "if_addr_mtx", &lock_class_mtx_sleep }, 522 { NULL, NULL }, 523 /* 524 * IPv6 multicast: 525 * protocol locks before interface locks, after UDP locks. 526 */ 527 { "udpinp", &lock_class_rw }, 528 { "in6_multi_mtx", &lock_class_mtx_sleep }, 529 { "mld_mtx", &lock_class_mtx_sleep }, 530 { "if_addr_mtx", &lock_class_mtx_sleep }, 531 { NULL, NULL }, 532 /* 533 * UNIX Domain Sockets 534 */ 535 { "unp_global_rwlock", &lock_class_rw }, 536 { "unp_list_lock", &lock_class_mtx_sleep }, 537 { "unp", &lock_class_mtx_sleep }, 538 { "so_snd", &lock_class_mtx_sleep }, 539 { NULL, NULL }, 540 /* 541 * UDP/IP 542 */ 543 { "udp", &lock_class_rw }, 544 { "udpinp", &lock_class_rw }, 545 { "so_snd", &lock_class_mtx_sleep }, 546 { NULL, NULL }, 547 /* 548 * TCP/IP 549 */ 550 { "tcp", &lock_class_rw }, 551 { "tcpinp", &lock_class_rw }, 552 { "so_snd", &lock_class_mtx_sleep }, 553 { NULL, NULL }, 554 /* 555 * netatalk 556 */ 557 { "ddp_list_mtx", &lock_class_mtx_sleep }, 558 { "ddp_mtx", &lock_class_mtx_sleep }, 559 { NULL, NULL }, 560 /* 561 * BPF 562 */ 563 { "bpf global lock", &lock_class_mtx_sleep }, 564 { "bpf interface lock", &lock_class_mtx_sleep }, 565 { "bpf cdev lock", &lock_class_mtx_sleep }, 566 { NULL, NULL }, 567 /* 568 * NFS server 569 */ 570 { "nfsd_mtx", &lock_class_mtx_sleep }, 571 { "so_snd", &lock_class_mtx_sleep }, 572 { NULL, NULL }, 573 574 /* 575 * IEEE 802.11 576 */ 577 { "802.11 com lock", &lock_class_mtx_sleep}, 578 { NULL, NULL }, 579 /* 580 * Network drivers 581 */ 582 { "network driver", &lock_class_mtx_sleep}, 583 { NULL, NULL }, 584 585 /* 586 * Netgraph 587 */ 588 { "ng_node", &lock_class_mtx_sleep }, 589 { "ng_worklist", &lock_class_mtx_sleep }, 590 { NULL, NULL }, 591 /* 592 * CDEV 593 */ 594 { "system map", &lock_class_mtx_sleep }, 595 { "vm page queue mutex", &lock_class_mtx_sleep }, 596 { "vnode interlock", &lock_class_mtx_sleep }, 597 { "cdev", &lock_class_mtx_sleep }, 598 { NULL, NULL }, 599 /* 600 * kqueue/VFS interaction 601 */ 602 { "kqueue", &lock_class_mtx_sleep }, 603 { "struct mount mtx", &lock_class_mtx_sleep }, 604 { "vnode interlock", &lock_class_mtx_sleep }, 605 { NULL, NULL }, 606 /* 607 * ZFS locking 608 */ 609 { "dn->dn_mtx", &lock_class_sx }, 610 { "dr->dt.di.dr_mtx", &lock_class_sx }, 611 { "db->db_mtx", &lock_class_sx }, 612 { NULL, NULL }, 613 /* 614 * spin locks 615 */ 616 #ifdef SMP 617 { "ap boot", &lock_class_mtx_spin }, 618 #endif 619 { "rm.mutex_mtx", &lock_class_mtx_spin }, 620 { "sio", &lock_class_mtx_spin }, 621 { "scrlock", &lock_class_mtx_spin }, 622 #ifdef __i386__ 623 { "cy", &lock_class_mtx_spin }, 624 #endif 625 #ifdef __sparc64__ 626 { "pcib_mtx", &lock_class_mtx_spin }, 627 { "rtc_mtx", &lock_class_mtx_spin }, 628 #endif 629 { "scc_hwmtx", &lock_class_mtx_spin }, 630 { "uart_hwmtx", &lock_class_mtx_spin }, 631 { "fast_taskqueue", &lock_class_mtx_spin }, 632 { "intr table", &lock_class_mtx_spin }, 633 #ifdef HWPMC_HOOKS 634 { "pmc-per-proc", &lock_class_mtx_spin }, 635 #endif 636 { "process slock", &lock_class_mtx_spin }, 637 { "sleepq chain", &lock_class_mtx_spin }, 638 { "umtx lock", &lock_class_mtx_spin }, 639 { "rm_spinlock", &lock_class_mtx_spin }, 640 { "turnstile chain", &lock_class_mtx_spin }, 641 { "turnstile lock", &lock_class_mtx_spin }, 642 { "sched lock", &lock_class_mtx_spin }, 643 { "td_contested", &lock_class_mtx_spin }, 644 { "callout", &lock_class_mtx_spin }, 645 { "entropy harvest mutex", &lock_class_mtx_spin }, 646 { "syscons video lock", &lock_class_mtx_spin }, 647 { "time lock", &lock_class_mtx_spin }, 648 #ifdef SMP 649 { "smp rendezvous", &lock_class_mtx_spin }, 650 #endif 651 #ifdef __powerpc__ 652 { "tlb0", &lock_class_mtx_spin }, 653 #endif 654 /* 655 * leaf locks 656 */ 657 { "intrcnt", &lock_class_mtx_spin }, 658 { "icu", &lock_class_mtx_spin }, 659 #if defined(SMP) && defined(__sparc64__) 660 { "ipi", &lock_class_mtx_spin }, 661 #endif 662 #ifdef __i386__ 663 { "allpmaps", &lock_class_mtx_spin }, 664 { "descriptor tables", &lock_class_mtx_spin }, 665 #endif 666 { "clk", &lock_class_mtx_spin }, 667 { "cpuset", &lock_class_mtx_spin }, 668 { "mprof lock", &lock_class_mtx_spin }, 669 { "zombie lock", &lock_class_mtx_spin }, 670 { "ALD Queue", &lock_class_mtx_spin }, 671 #ifdef __ia64__ 672 { "MCA spin lock", &lock_class_mtx_spin }, 673 #endif 674 #if defined(__i386__) || defined(__amd64__) 675 { "pcicfg", &lock_class_mtx_spin }, 676 { "NDIS thread lock", &lock_class_mtx_spin }, 677 #endif 678 { "tw_osl_io_lock", &lock_class_mtx_spin }, 679 { "tw_osl_q_lock", &lock_class_mtx_spin }, 680 { "tw_cl_io_lock", &lock_class_mtx_spin }, 681 { "tw_cl_intr_lock", &lock_class_mtx_spin }, 682 { "tw_cl_gen_lock", &lock_class_mtx_spin }, 683 #ifdef HWPMC_HOOKS 684 { "pmc-leaf", &lock_class_mtx_spin }, 685 #endif 686 { "blocked lock", &lock_class_mtx_spin }, 687 { NULL, NULL }, 688 { NULL, NULL } 689 }; 690 691 #ifdef BLESSING 692 /* 693 * Pairs of locks which have been blessed 694 * Don't complain about order problems with blessed locks 695 */ 696 static struct witness_blessed blessed_list[] = { 697 }; 698 static int blessed_count = 699 sizeof(blessed_list) / sizeof(struct witness_blessed); 700 #endif 701 702 /* 703 * This global is set to 0 once it becomes safe to use the witness code. 704 */ 705 static int witness_cold = 1; 706 707 /* 708 * This global is set to 1 once the static lock orders have been enrolled 709 * so that a warning can be issued for any spin locks enrolled later. 710 */ 711 static int witness_spin_warn = 0; 712 713 /* 714 * The WITNESS-enabled diagnostic code. Note that the witness code does 715 * assume that the early boot is single-threaded at least until after this 716 * routine is completed. 717 */ 718 static void 719 witness_initialize(void *dummy __unused) 720 { 721 struct lock_object *lock; 722 struct witness_order_list_entry *order; 723 struct witness *w, *w1; 724 int i; 725 726 w_data = malloc(sizeof (struct witness) * WITNESS_COUNT, M_WITNESS, 727 M_NOWAIT | M_ZERO); 728 729 /* 730 * We have to release Giant before initializing its witness 731 * structure so that WITNESS doesn't get confused. 732 */ 733 mtx_unlock(&Giant); 734 mtx_assert(&Giant, MA_NOTOWNED); 735 736 CTR1(KTR_WITNESS, "%s: initializing witness", __func__); 737 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET | 738 MTX_NOWITNESS | MTX_NOPROFILE); 739 for (i = WITNESS_COUNT - 1; i >= 0; i--) { 740 w = &w_data[i]; 741 memset(w, 0, sizeof(*w)); 742 w_data[i].w_index = i; /* Witness index never changes. */ 743 witness_free(w); 744 } 745 KASSERT(STAILQ_FIRST(&w_free)->w_index == 0, 746 ("%s: Invalid list of free witness objects", __func__)); 747 748 /* Witness with index 0 is not used to aid in debugging. */ 749 STAILQ_REMOVE_HEAD(&w_free, w_list); 750 w_free_cnt--; 751 752 memset(w_rmatrix, 0, 753 (sizeof(**w_rmatrix) * (WITNESS_COUNT+1) * (WITNESS_COUNT+1))); 754 755 for (i = 0; i < LOCK_CHILDCOUNT; i++) 756 witness_lock_list_free(&w_locklistdata[i]); 757 witness_init_hash_tables(); 758 759 /* First add in all the specified order lists. */ 760 for (order = order_lists; order->w_name != NULL; order++) { 761 w = enroll(order->w_name, order->w_class); 762 if (w == NULL) 763 continue; 764 w->w_file = "order list"; 765 for (order++; order->w_name != NULL; order++) { 766 w1 = enroll(order->w_name, order->w_class); 767 if (w1 == NULL) 768 continue; 769 w1->w_file = "order list"; 770 itismychild(w, w1); 771 w = w1; 772 } 773 } 774 witness_spin_warn = 1; 775 776 /* Iterate through all locks and add them to witness. */ 777 for (i = 0; pending_locks[i].wh_lock != NULL; i++) { 778 lock = pending_locks[i].wh_lock; 779 KASSERT(lock->lo_flags & LO_WITNESS, 780 ("%s: lock %s is on pending list but not LO_WITNESS", 781 __func__, lock->lo_name)); 782 lock->lo_witness = enroll(pending_locks[i].wh_type, 783 LOCK_CLASS(lock)); 784 } 785 786 /* Mark the witness code as being ready for use. */ 787 witness_cold = 0; 788 789 mtx_lock(&Giant); 790 } 791 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, 792 NULL); 793 794 void 795 witness_init(struct lock_object *lock, const char *type) 796 { 797 struct lock_class *class; 798 799 /* Various sanity checks. */ 800 class = LOCK_CLASS(lock); 801 if ((lock->lo_flags & LO_RECURSABLE) != 0 && 802 (class->lc_flags & LC_RECURSABLE) == 0) 803 panic("%s: lock (%s) %s can not be recursable", __func__, 804 class->lc_name, lock->lo_name); 805 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 806 (class->lc_flags & LC_SLEEPABLE) == 0) 807 panic("%s: lock (%s) %s can not be sleepable", __func__, 808 class->lc_name, lock->lo_name); 809 if ((lock->lo_flags & LO_UPGRADABLE) != 0 && 810 (class->lc_flags & LC_UPGRADABLE) == 0) 811 panic("%s: lock (%s) %s can not be upgradable", __func__, 812 class->lc_name, lock->lo_name); 813 814 /* 815 * If we shouldn't watch this lock, then just clear lo_witness. 816 * Otherwise, if witness_cold is set, then it is too early to 817 * enroll this lock, so defer it to witness_initialize() by adding 818 * it to the pending_locks list. If it is not too early, then enroll 819 * the lock now. 820 */ 821 if (witness_watch < 1 || panicstr != NULL || 822 (lock->lo_flags & LO_WITNESS) == 0) 823 lock->lo_witness = NULL; 824 else if (witness_cold) { 825 pending_locks[pending_cnt].wh_lock = lock; 826 pending_locks[pending_cnt++].wh_type = type; 827 if (pending_cnt > WITNESS_PENDLIST) 828 panic("%s: pending locks list is too small, bump it\n", 829 __func__); 830 } else 831 lock->lo_witness = enroll(type, class); 832 } 833 834 void 835 witness_destroy(struct lock_object *lock) 836 { 837 struct lock_class *class; 838 struct witness *w; 839 840 class = LOCK_CLASS(lock); 841 842 if (witness_cold) 843 panic("lock (%s) %s destroyed while witness_cold", 844 class->lc_name, lock->lo_name); 845 846 /* XXX: need to verify that no one holds the lock */ 847 if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL) 848 return; 849 w = lock->lo_witness; 850 851 mtx_lock_spin(&w_mtx); 852 MPASS(w->w_refcount > 0); 853 w->w_refcount--; 854 855 if (w->w_refcount == 0) 856 depart(w); 857 mtx_unlock_spin(&w_mtx); 858 } 859 860 #ifdef DDB 861 static void 862 witness_ddb_compute_levels(void) 863 { 864 struct witness *w; 865 866 /* 867 * First clear all levels. 868 */ 869 STAILQ_FOREACH(w, &w_all, w_list) 870 w->w_ddb_level = -1; 871 872 /* 873 * Look for locks with no parents and level all their descendants. 874 */ 875 STAILQ_FOREACH(w, &w_all, w_list) { 876 877 /* If the witness has ancestors (is not a root), skip it. */ 878 if (w->w_num_ancestors > 0) 879 continue; 880 witness_ddb_level_descendants(w, 0); 881 } 882 } 883 884 static void 885 witness_ddb_level_descendants(struct witness *w, int l) 886 { 887 int i; 888 889 if (w->w_ddb_level >= l) 890 return; 891 892 w->w_ddb_level = l; 893 l++; 894 895 for (i = 1; i <= w_max_used_index; i++) { 896 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) 897 witness_ddb_level_descendants(&w_data[i], l); 898 } 899 } 900 901 static void 902 witness_ddb_display_descendants(void(*prnt)(const char *fmt, ...), 903 struct witness *w, int indent) 904 { 905 int i; 906 907 for (i = 0; i < indent; i++) 908 prnt(" "); 909 prnt("%s (type: %s, depth: %d, active refs: %d)", 910 w->w_name, w->w_class->lc_name, 911 w->w_ddb_level, w->w_refcount); 912 if (w->w_displayed) { 913 prnt(" -- (already displayed)\n"); 914 return; 915 } 916 w->w_displayed = 1; 917 if (w->w_file != NULL && w->w_line != 0) 918 prnt(" -- last acquired @ %s:%d\n", w->w_file, 919 w->w_line); 920 else 921 prnt(" -- never acquired\n"); 922 indent++; 923 WITNESS_INDEX_ASSERT(w->w_index); 924 for (i = 1; i <= w_max_used_index; i++) { 925 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) 926 witness_ddb_display_descendants(prnt, &w_data[i], 927 indent); 928 } 929 } 930 931 static void 932 witness_ddb_display_list(void(*prnt)(const char *fmt, ...), 933 struct witness_list *list) 934 { 935 struct witness *w; 936 937 STAILQ_FOREACH(w, list, w_typelist) { 938 if (w->w_file == NULL || w->w_ddb_level > 0) 939 continue; 940 941 /* This lock has no anscestors - display its descendants. */ 942 witness_ddb_display_descendants(prnt, w, 0); 943 } 944 } 945 946 static void 947 witness_ddb_display(void(*prnt)(const char *fmt, ...)) 948 { 949 struct witness *w; 950 951 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 952 witness_ddb_compute_levels(); 953 954 /* Clear all the displayed flags. */ 955 STAILQ_FOREACH(w, &w_all, w_list) 956 w->w_displayed = 0; 957 958 /* 959 * First, handle sleep locks which have been acquired at least 960 * once. 961 */ 962 prnt("Sleep locks:\n"); 963 witness_ddb_display_list(prnt, &w_sleep); 964 965 /* 966 * Now do spin locks which have been acquired at least once. 967 */ 968 prnt("\nSpin locks:\n"); 969 witness_ddb_display_list(prnt, &w_spin); 970 971 /* 972 * Finally, any locks which have not been acquired yet. 973 */ 974 prnt("\nLocks which were never acquired:\n"); 975 STAILQ_FOREACH(w, &w_all, w_list) { 976 if (w->w_file != NULL || w->w_refcount == 0) 977 continue; 978 prnt("%s (type: %s, depth: %d)\n", w->w_name, 979 w->w_class->lc_name, w->w_ddb_level); 980 } 981 } 982 #endif /* DDB */ 983 984 /* Trim useless garbage from filenames. */ 985 static const char * 986 fixup_filename(const char *file) 987 { 988 989 if (file == NULL) 990 return (NULL); 991 while (strncmp(file, "../", 3) == 0) 992 file += 3; 993 return (file); 994 } 995 996 int 997 witness_defineorder(struct lock_object *lock1, struct lock_object *lock2) 998 { 999 1000 if (witness_watch == -1 || panicstr != NULL) 1001 return (0); 1002 1003 /* Require locks that witness knows about. */ 1004 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL || 1005 lock2->lo_witness == NULL) 1006 return (EINVAL); 1007 1008 mtx_assert(&w_mtx, MA_NOTOWNED); 1009 mtx_lock_spin(&w_mtx); 1010 1011 /* 1012 * If we already have either an explicit or implied lock order that 1013 * is the other way around, then return an error. 1014 */ 1015 if (witness_watch && 1016 isitmydescendant(lock2->lo_witness, lock1->lo_witness)) { 1017 mtx_unlock_spin(&w_mtx); 1018 return (EDOOFUS); 1019 } 1020 1021 /* Try to add the new order. */ 1022 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1023 lock2->lo_witness->w_name, lock1->lo_witness->w_name); 1024 itismychild(lock1->lo_witness, lock2->lo_witness); 1025 mtx_unlock_spin(&w_mtx); 1026 return (0); 1027 } 1028 1029 void 1030 witness_checkorder(struct lock_object *lock, int flags, const char *file, 1031 int line, struct lock_object *interlock) 1032 { 1033 struct lock_list_entry *lock_list, *lle; 1034 struct lock_instance *lock1, *lock2, *plock; 1035 struct lock_class *class; 1036 struct witness *w, *w1; 1037 struct thread *td; 1038 int i, j; 1039 1040 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL || 1041 panicstr != NULL) 1042 return; 1043 1044 w = lock->lo_witness; 1045 class = LOCK_CLASS(lock); 1046 td = curthread; 1047 file = fixup_filename(file); 1048 1049 if (class->lc_flags & LC_SLEEPLOCK) { 1050 1051 /* 1052 * Since spin locks include a critical section, this check 1053 * implicitly enforces a lock order of all sleep locks before 1054 * all spin locks. 1055 */ 1056 if (td->td_critnest != 0 && !kdb_active) 1057 panic("blockable sleep lock (%s) %s @ %s:%d", 1058 class->lc_name, lock->lo_name, file, line); 1059 1060 /* 1061 * If this is the first lock acquired then just return as 1062 * no order checking is needed. 1063 */ 1064 lock_list = td->td_sleeplocks; 1065 if (lock_list == NULL || lock_list->ll_count == 0) 1066 return; 1067 } else { 1068 1069 /* 1070 * If this is the first lock, just return as no order 1071 * checking is needed. Avoid problems with thread 1072 * migration pinning the thread while checking if 1073 * spinlocks are held. If at least one spinlock is held 1074 * the thread is in a safe path and it is allowed to 1075 * unpin it. 1076 */ 1077 sched_pin(); 1078 lock_list = PCPU_GET(spinlocks); 1079 if (lock_list == NULL || lock_list->ll_count == 0) { 1080 sched_unpin(); 1081 return; 1082 } 1083 sched_unpin(); 1084 } 1085 1086 /* 1087 * Check to see if we are recursing on a lock we already own. If 1088 * so, make sure that we don't mismatch exclusive and shared lock 1089 * acquires. 1090 */ 1091 lock1 = find_instance(lock_list, lock); 1092 if (lock1 != NULL) { 1093 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 && 1094 (flags & LOP_EXCLUSIVE) == 0) { 1095 printf("shared lock of (%s) %s @ %s:%d\n", 1096 class->lc_name, lock->lo_name, file, line); 1097 printf("while exclusively locked from %s:%d\n", 1098 lock1->li_file, lock1->li_line); 1099 panic("share->excl"); 1100 } 1101 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 && 1102 (flags & LOP_EXCLUSIVE) != 0) { 1103 printf("exclusive lock of (%s) %s @ %s:%d\n", 1104 class->lc_name, lock->lo_name, file, line); 1105 printf("while share locked from %s:%d\n", 1106 lock1->li_file, lock1->li_line); 1107 panic("excl->share"); 1108 } 1109 return; 1110 } 1111 1112 /* 1113 * Find the previously acquired lock, but ignore interlocks. 1114 */ 1115 plock = &lock_list->ll_children[lock_list->ll_count - 1]; 1116 if (interlock != NULL && plock->li_lock == interlock) { 1117 if (lock_list->ll_count > 1) 1118 plock = 1119 &lock_list->ll_children[lock_list->ll_count - 2]; 1120 else { 1121 lle = lock_list->ll_next; 1122 1123 /* 1124 * The interlock is the only lock we hold, so 1125 * simply return. 1126 */ 1127 if (lle == NULL) 1128 return; 1129 plock = &lle->ll_children[lle->ll_count - 1]; 1130 } 1131 } 1132 1133 /* 1134 * Try to perform most checks without a lock. If this succeeds we 1135 * can skip acquiring the lock and return success. 1136 */ 1137 w1 = plock->li_lock->lo_witness; 1138 if (witness_lock_order_check(w1, w)) 1139 return; 1140 1141 /* 1142 * Check for duplicate locks of the same type. Note that we only 1143 * have to check for this on the last lock we just acquired. Any 1144 * other cases will be caught as lock order violations. 1145 */ 1146 mtx_lock_spin(&w_mtx); 1147 witness_lock_order_add(w1, w); 1148 if (w1 == w) { 1149 i = w->w_index; 1150 if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) && 1151 !(w_rmatrix[i][i] & WITNESS_REVERSAL)) { 1152 w_rmatrix[i][i] |= WITNESS_REVERSAL; 1153 w->w_reversed = 1; 1154 mtx_unlock_spin(&w_mtx); 1155 printf( 1156 "acquiring duplicate lock of same type: \"%s\"\n", 1157 w->w_name); 1158 printf(" 1st %s @ %s:%d\n", plock->li_lock->lo_name, 1159 plock->li_file, plock->li_line); 1160 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line); 1161 witness_debugger(1); 1162 } else 1163 mtx_unlock_spin(&w_mtx); 1164 return; 1165 } 1166 mtx_assert(&w_mtx, MA_OWNED); 1167 1168 /* 1169 * If we know that the the lock we are acquiring comes after 1170 * the lock we most recently acquired in the lock order tree, 1171 * then there is no need for any further checks. 1172 */ 1173 if (isitmychild(w1, w)) 1174 goto out; 1175 1176 for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) { 1177 for (i = lle->ll_count - 1; i >= 0; i--, j++) { 1178 1179 MPASS(j < WITNESS_COUNT); 1180 lock1 = &lle->ll_children[i]; 1181 1182 /* 1183 * Ignore the interlock the first time we see it. 1184 */ 1185 if (interlock != NULL && interlock == lock1->li_lock) { 1186 interlock = NULL; 1187 continue; 1188 } 1189 1190 /* 1191 * If this lock doesn't undergo witness checking, 1192 * then skip it. 1193 */ 1194 w1 = lock1->li_lock->lo_witness; 1195 if (w1 == NULL) { 1196 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0, 1197 ("lock missing witness structure")); 1198 continue; 1199 } 1200 1201 /* 1202 * If we are locking Giant and this is a sleepable 1203 * lock, then skip it. 1204 */ 1205 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 && 1206 lock == &Giant.lock_object) 1207 continue; 1208 1209 /* 1210 * If we are locking a sleepable lock and this lock 1211 * is Giant, then skip it. 1212 */ 1213 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 1214 lock1->li_lock == &Giant.lock_object) 1215 continue; 1216 1217 /* 1218 * If we are locking a sleepable lock and this lock 1219 * isn't sleepable, we want to treat it as a lock 1220 * order violation to enfore a general lock order of 1221 * sleepable locks before non-sleepable locks. 1222 */ 1223 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 1224 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 1225 goto reversal; 1226 1227 /* 1228 * If we are locking Giant and this is a non-sleepable 1229 * lock, then treat it as a reversal. 1230 */ 1231 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 && 1232 lock == &Giant.lock_object) 1233 goto reversal; 1234 1235 /* 1236 * Check the lock order hierarchy for a reveresal. 1237 */ 1238 if (!isitmydescendant(w, w1)) 1239 continue; 1240 reversal: 1241 1242 /* 1243 * We have a lock order violation, check to see if it 1244 * is allowed or has already been yelled about. 1245 */ 1246 #ifdef BLESSING 1247 1248 /* 1249 * If the lock order is blessed, just bail. We don't 1250 * look for other lock order violations though, which 1251 * may be a bug. 1252 */ 1253 if (blessed(w, w1)) 1254 goto out; 1255 #endif 1256 1257 /* Bail if this violation is known */ 1258 if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL) 1259 goto out; 1260 1261 /* Record this as a violation */ 1262 w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL; 1263 w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL; 1264 w->w_reversed = w1->w_reversed = 1; 1265 witness_increment_graph_generation(); 1266 mtx_unlock_spin(&w_mtx); 1267 1268 /* 1269 * Ok, yell about it. 1270 */ 1271 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 1272 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 1273 printf( 1274 "lock order reversal: (sleepable after non-sleepable)\n"); 1275 else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 1276 && lock == &Giant.lock_object) 1277 printf( 1278 "lock order reversal: (Giant after non-sleepable)\n"); 1279 else 1280 printf("lock order reversal:\n"); 1281 1282 /* 1283 * Try to locate an earlier lock with 1284 * witness w in our list. 1285 */ 1286 do { 1287 lock2 = &lle->ll_children[i]; 1288 MPASS(lock2->li_lock != NULL); 1289 if (lock2->li_lock->lo_witness == w) 1290 break; 1291 if (i == 0 && lle->ll_next != NULL) { 1292 lle = lle->ll_next; 1293 i = lle->ll_count - 1; 1294 MPASS(i >= 0 && i < LOCK_NCHILDREN); 1295 } else 1296 i--; 1297 } while (i >= 0); 1298 if (i < 0) { 1299 printf(" 1st %p %s (%s) @ %s:%d\n", 1300 lock1->li_lock, lock1->li_lock->lo_name, 1301 w1->w_name, lock1->li_file, lock1->li_line); 1302 printf(" 2nd %p %s (%s) @ %s:%d\n", lock, 1303 lock->lo_name, w->w_name, file, line); 1304 } else { 1305 printf(" 1st %p %s (%s) @ %s:%d\n", 1306 lock2->li_lock, lock2->li_lock->lo_name, 1307 lock2->li_lock->lo_witness->w_name, 1308 lock2->li_file, lock2->li_line); 1309 printf(" 2nd %p %s (%s) @ %s:%d\n", 1310 lock1->li_lock, lock1->li_lock->lo_name, 1311 w1->w_name, lock1->li_file, lock1->li_line); 1312 printf(" 3rd %p %s (%s) @ %s:%d\n", lock, 1313 lock->lo_name, w->w_name, file, line); 1314 } 1315 witness_debugger(1); 1316 return; 1317 } 1318 } 1319 1320 /* 1321 * If requested, build a new lock order. However, don't build a new 1322 * relationship between a sleepable lock and Giant if it is in the 1323 * wrong direction. The correct lock order is that sleepable locks 1324 * always come before Giant. 1325 */ 1326 if (flags & LOP_NEWORDER && 1327 !(plock->li_lock == &Giant.lock_object && 1328 (lock->lo_flags & LO_SLEEPABLE) != 0)) { 1329 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1330 w->w_name, plock->li_lock->lo_witness->w_name); 1331 itismychild(plock->li_lock->lo_witness, w); 1332 } 1333 out: 1334 mtx_unlock_spin(&w_mtx); 1335 } 1336 1337 void 1338 witness_lock(struct lock_object *lock, int flags, const char *file, int line) 1339 { 1340 struct lock_list_entry **lock_list, *lle; 1341 struct lock_instance *instance; 1342 struct witness *w; 1343 struct thread *td; 1344 1345 if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL || 1346 panicstr != NULL) 1347 return; 1348 w = lock->lo_witness; 1349 td = curthread; 1350 file = fixup_filename(file); 1351 1352 /* Determine lock list for this lock. */ 1353 if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK) 1354 lock_list = &td->td_sleeplocks; 1355 else 1356 lock_list = PCPU_PTR(spinlocks); 1357 1358 /* Check to see if we are recursing on a lock we already own. */ 1359 instance = find_instance(*lock_list, lock); 1360 if (instance != NULL) { 1361 instance->li_flags++; 1362 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__, 1363 td->td_proc->p_pid, lock->lo_name, 1364 instance->li_flags & LI_RECURSEMASK); 1365 instance->li_file = file; 1366 instance->li_line = line; 1367 return; 1368 } 1369 1370 /* Update per-witness last file and line acquire. */ 1371 w->w_file = file; 1372 w->w_line = line; 1373 1374 /* Find the next open lock instance in the list and fill it. */ 1375 lle = *lock_list; 1376 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) { 1377 lle = witness_lock_list_get(); 1378 if (lle == NULL) 1379 return; 1380 lle->ll_next = *lock_list; 1381 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__, 1382 td->td_proc->p_pid, lle); 1383 *lock_list = lle; 1384 } 1385 instance = &lle->ll_children[lle->ll_count++]; 1386 instance->li_lock = lock; 1387 instance->li_line = line; 1388 instance->li_file = file; 1389 if ((flags & LOP_EXCLUSIVE) != 0) 1390 instance->li_flags = LI_EXCLUSIVE; 1391 else 1392 instance->li_flags = 0; 1393 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__, 1394 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1); 1395 } 1396 1397 void 1398 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line) 1399 { 1400 struct lock_instance *instance; 1401 struct lock_class *class; 1402 1403 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 1404 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 1405 return; 1406 class = LOCK_CLASS(lock); 1407 file = fixup_filename(file); 1408 if (witness_watch) { 1409 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1410 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d", 1411 class->lc_name, lock->lo_name, file, line); 1412 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1413 panic("upgrade of non-sleep lock (%s) %s @ %s:%d", 1414 class->lc_name, lock->lo_name, file, line); 1415 } 1416 instance = find_instance(curthread->td_sleeplocks, lock); 1417 if (instance == NULL) 1418 panic("upgrade of unlocked lock (%s) %s @ %s:%d", 1419 class->lc_name, lock->lo_name, file, line); 1420 if (witness_watch) { 1421 if ((instance->li_flags & LI_EXCLUSIVE) != 0) 1422 panic("upgrade of exclusive lock (%s) %s @ %s:%d", 1423 class->lc_name, lock->lo_name, file, line); 1424 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1425 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d", 1426 class->lc_name, lock->lo_name, 1427 instance->li_flags & LI_RECURSEMASK, file, line); 1428 } 1429 instance->li_flags |= LI_EXCLUSIVE; 1430 } 1431 1432 void 1433 witness_downgrade(struct lock_object *lock, int flags, const char *file, 1434 int line) 1435 { 1436 struct lock_instance *instance; 1437 struct lock_class *class; 1438 1439 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 1440 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 1441 return; 1442 class = LOCK_CLASS(lock); 1443 file = fixup_filename(file); 1444 if (witness_watch) { 1445 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1446 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d", 1447 class->lc_name, lock->lo_name, file, line); 1448 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1449 panic("downgrade of non-sleep lock (%s) %s @ %s:%d", 1450 class->lc_name, lock->lo_name, file, line); 1451 } 1452 instance = find_instance(curthread->td_sleeplocks, lock); 1453 if (instance == NULL) 1454 panic("downgrade of unlocked lock (%s) %s @ %s:%d", 1455 class->lc_name, lock->lo_name, file, line); 1456 if (witness_watch) { 1457 if ((instance->li_flags & LI_EXCLUSIVE) == 0) 1458 panic("downgrade of shared lock (%s) %s @ %s:%d", 1459 class->lc_name, lock->lo_name, file, line); 1460 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1461 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d", 1462 class->lc_name, lock->lo_name, 1463 instance->li_flags & LI_RECURSEMASK, file, line); 1464 } 1465 instance->li_flags &= ~LI_EXCLUSIVE; 1466 } 1467 1468 void 1469 witness_unlock(struct lock_object *lock, int flags, const char *file, int line) 1470 { 1471 struct lock_list_entry **lock_list, *lle; 1472 struct lock_instance *instance; 1473 struct lock_class *class; 1474 struct thread *td; 1475 register_t s; 1476 int i, j; 1477 1478 if (witness_cold || lock->lo_witness == NULL || panicstr != NULL) 1479 return; 1480 td = curthread; 1481 class = LOCK_CLASS(lock); 1482 file = fixup_filename(file); 1483 1484 /* Find lock instance associated with this lock. */ 1485 if (class->lc_flags & LC_SLEEPLOCK) 1486 lock_list = &td->td_sleeplocks; 1487 else 1488 lock_list = PCPU_PTR(spinlocks); 1489 lle = *lock_list; 1490 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next) 1491 for (i = 0; i < (*lock_list)->ll_count; i++) { 1492 instance = &(*lock_list)->ll_children[i]; 1493 if (instance->li_lock == lock) 1494 goto found; 1495 } 1496 1497 /* 1498 * When disabling WITNESS through witness_watch we could end up in 1499 * having registered locks in the td_sleeplocks queue. 1500 * We have to make sure we flush these queues, so just search for 1501 * eventual register locks and remove them. 1502 */ 1503 if (witness_watch > 0) 1504 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, 1505 lock->lo_name, file, line); 1506 else 1507 return; 1508 found: 1509 1510 /* First, check for shared/exclusive mismatches. */ 1511 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 && 1512 (flags & LOP_EXCLUSIVE) == 0) { 1513 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name, 1514 lock->lo_name, file, line); 1515 printf("while exclusively locked from %s:%d\n", 1516 instance->li_file, instance->li_line); 1517 panic("excl->ushare"); 1518 } 1519 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 && 1520 (flags & LOP_EXCLUSIVE) != 0) { 1521 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name, 1522 lock->lo_name, file, line); 1523 printf("while share locked from %s:%d\n", instance->li_file, 1524 instance->li_line); 1525 panic("share->uexcl"); 1526 } 1527 /* If we are recursed, unrecurse. */ 1528 if ((instance->li_flags & LI_RECURSEMASK) > 0) { 1529 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__, 1530 td->td_proc->p_pid, instance->li_lock->lo_name, 1531 instance->li_flags); 1532 instance->li_flags--; 1533 return; 1534 } 1535 /* The lock is now being dropped, check for NORELEASE flag */ 1536 if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) { 1537 printf("forbidden unlock of (%s) %s @ %s:%d\n", class->lc_name, 1538 lock->lo_name, file, line); 1539 panic("lock marked norelease"); 1540 } 1541 1542 /* Otherwise, remove this item from the list. */ 1543 s = intr_disable(); 1544 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__, 1545 td->td_proc->p_pid, instance->li_lock->lo_name, 1546 (*lock_list)->ll_count - 1); 1547 for (j = i; j < (*lock_list)->ll_count - 1; j++) 1548 (*lock_list)->ll_children[j] = 1549 (*lock_list)->ll_children[j + 1]; 1550 (*lock_list)->ll_count--; 1551 intr_restore(s); 1552 1553 /* 1554 * In order to reduce contention on w_mtx, we want to keep always an 1555 * head object into lists so that frequent allocation from the 1556 * free witness pool (and subsequent locking) is avoided. 1557 * In order to maintain the current code simple, when the head 1558 * object is totally unloaded it means also that we do not have 1559 * further objects in the list, so the list ownership needs to be 1560 * hand over to another object if the current head needs to be freed. 1561 */ 1562 if ((*lock_list)->ll_count == 0) { 1563 if (*lock_list == lle) { 1564 if (lle->ll_next == NULL) 1565 return; 1566 } else 1567 lle = *lock_list; 1568 *lock_list = lle->ll_next; 1569 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__, 1570 td->td_proc->p_pid, lle); 1571 witness_lock_list_free(lle); 1572 } 1573 } 1574 1575 void 1576 witness_thread_exit(struct thread *td) 1577 { 1578 struct lock_list_entry *lle; 1579 int i, n; 1580 1581 lle = td->td_sleeplocks; 1582 if (lle == NULL || panicstr != NULL) 1583 return; 1584 if (lle->ll_count != 0) { 1585 for (n = 0; lle != NULL; lle = lle->ll_next) 1586 for (i = lle->ll_count - 1; i >= 0; i--) { 1587 if (n == 0) 1588 printf("Thread %p exiting with the following locks held:\n", 1589 td); 1590 n++; 1591 witness_list_lock(&lle->ll_children[i]); 1592 1593 } 1594 panic("Thread %p cannot exit while holding sleeplocks\n", td); 1595 } 1596 witness_lock_list_free(lle); 1597 } 1598 1599 /* 1600 * Warn if any locks other than 'lock' are held. Flags can be passed in to 1601 * exempt Giant and sleepable locks from the checks as well. If any 1602 * non-exempt locks are held, then a supplied message is printed to the 1603 * console along with a list of the offending locks. If indicated in the 1604 * flags then a failure results in a panic as well. 1605 */ 1606 int 1607 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...) 1608 { 1609 struct lock_list_entry *lock_list, *lle; 1610 struct lock_instance *lock1; 1611 struct thread *td; 1612 va_list ap; 1613 int i, n; 1614 1615 if (witness_cold || witness_watch < 1 || panicstr != NULL) 1616 return (0); 1617 n = 0; 1618 td = curthread; 1619 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next) 1620 for (i = lle->ll_count - 1; i >= 0; i--) { 1621 lock1 = &lle->ll_children[i]; 1622 if (lock1->li_lock == lock) 1623 continue; 1624 if (flags & WARN_GIANTOK && 1625 lock1->li_lock == &Giant.lock_object) 1626 continue; 1627 if (flags & WARN_SLEEPOK && 1628 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0) 1629 continue; 1630 if (n == 0) { 1631 va_start(ap, fmt); 1632 vprintf(fmt, ap); 1633 va_end(ap); 1634 printf(" with the following"); 1635 if (flags & WARN_SLEEPOK) 1636 printf(" non-sleepable"); 1637 printf(" locks held:\n"); 1638 } 1639 n++; 1640 witness_list_lock(lock1); 1641 } 1642 1643 /* 1644 * Pin the thread in order to avoid problems with thread migration. 1645 * Once that all verifies are passed about spinlocks ownership, 1646 * the thread is in a safe path and it can be unpinned. 1647 */ 1648 sched_pin(); 1649 lock_list = PCPU_GET(spinlocks); 1650 if (lock_list != NULL && lock_list->ll_count != 0) { 1651 sched_unpin(); 1652 1653 /* 1654 * We should only have one spinlock and as long as 1655 * the flags cannot match for this locks class, 1656 * check if the first spinlock is the one curthread 1657 * should hold. 1658 */ 1659 lock1 = &lock_list->ll_children[lock_list->ll_count - 1]; 1660 if (lock_list->ll_count == 1 && lock_list->ll_next == NULL && 1661 lock1->li_lock == lock && n == 0) 1662 return (0); 1663 1664 va_start(ap, fmt); 1665 vprintf(fmt, ap); 1666 va_end(ap); 1667 printf(" with the following"); 1668 if (flags & WARN_SLEEPOK) 1669 printf(" non-sleepable"); 1670 printf(" locks held:\n"); 1671 n += witness_list_locks(&lock_list); 1672 } else 1673 sched_unpin(); 1674 if (flags & WARN_PANIC && n) 1675 panic("%s", __func__); 1676 else 1677 witness_debugger(n); 1678 return (n); 1679 } 1680 1681 const char * 1682 witness_file(struct lock_object *lock) 1683 { 1684 struct witness *w; 1685 1686 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL) 1687 return ("?"); 1688 w = lock->lo_witness; 1689 return (w->w_file); 1690 } 1691 1692 int 1693 witness_line(struct lock_object *lock) 1694 { 1695 struct witness *w; 1696 1697 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL) 1698 return (0); 1699 w = lock->lo_witness; 1700 return (w->w_line); 1701 } 1702 1703 static struct witness * 1704 enroll(const char *description, struct lock_class *lock_class) 1705 { 1706 struct witness *w; 1707 struct witness_list *typelist; 1708 1709 MPASS(description != NULL); 1710 1711 if (witness_watch == -1 || panicstr != NULL) 1712 return (NULL); 1713 if ((lock_class->lc_flags & LC_SPINLOCK)) { 1714 if (witness_skipspin) 1715 return (NULL); 1716 else 1717 typelist = &w_spin; 1718 } else if ((lock_class->lc_flags & LC_SLEEPLOCK)) 1719 typelist = &w_sleep; 1720 else 1721 panic("lock class %s is not sleep or spin", 1722 lock_class->lc_name); 1723 1724 mtx_lock_spin(&w_mtx); 1725 w = witness_hash_get(description); 1726 if (w) 1727 goto found; 1728 if ((w = witness_get()) == NULL) 1729 return (NULL); 1730 MPASS(strlen(description) < MAX_W_NAME); 1731 strcpy(w->w_name, description); 1732 w->w_class = lock_class; 1733 w->w_refcount = 1; 1734 STAILQ_INSERT_HEAD(&w_all, w, w_list); 1735 if (lock_class->lc_flags & LC_SPINLOCK) { 1736 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist); 1737 w_spin_cnt++; 1738 } else if (lock_class->lc_flags & LC_SLEEPLOCK) { 1739 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist); 1740 w_sleep_cnt++; 1741 } 1742 1743 /* Insert new witness into the hash */ 1744 witness_hash_put(w); 1745 witness_increment_graph_generation(); 1746 mtx_unlock_spin(&w_mtx); 1747 return (w); 1748 found: 1749 w->w_refcount++; 1750 mtx_unlock_spin(&w_mtx); 1751 if (lock_class != w->w_class) 1752 panic( 1753 "lock (%s) %s does not match earlier (%s) lock", 1754 description, lock_class->lc_name, 1755 w->w_class->lc_name); 1756 return (w); 1757 } 1758 1759 static void 1760 depart(struct witness *w) 1761 { 1762 struct witness_list *list; 1763 1764 MPASS(w->w_refcount == 0); 1765 if (w->w_class->lc_flags & LC_SLEEPLOCK) { 1766 list = &w_sleep; 1767 w_sleep_cnt--; 1768 } else { 1769 list = &w_spin; 1770 w_spin_cnt--; 1771 } 1772 /* 1773 * Set file to NULL as it may point into a loadable module. 1774 */ 1775 w->w_file = NULL; 1776 w->w_line = 0; 1777 witness_increment_graph_generation(); 1778 } 1779 1780 1781 static void 1782 adopt(struct witness *parent, struct witness *child) 1783 { 1784 int pi, ci, i, j; 1785 1786 if (witness_cold == 0) 1787 mtx_assert(&w_mtx, MA_OWNED); 1788 1789 /* If the relationship is already known, there's no work to be done. */ 1790 if (isitmychild(parent, child)) 1791 return; 1792 1793 /* When the structure of the graph changes, bump up the generation. */ 1794 witness_increment_graph_generation(); 1795 1796 /* 1797 * The hard part ... create the direct relationship, then propagate all 1798 * indirect relationships. 1799 */ 1800 pi = parent->w_index; 1801 ci = child->w_index; 1802 WITNESS_INDEX_ASSERT(pi); 1803 WITNESS_INDEX_ASSERT(ci); 1804 MPASS(pi != ci); 1805 w_rmatrix[pi][ci] |= WITNESS_PARENT; 1806 w_rmatrix[ci][pi] |= WITNESS_CHILD; 1807 1808 /* 1809 * If parent was not already an ancestor of child, 1810 * then we increment the descendant and ancestor counters. 1811 */ 1812 if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) { 1813 parent->w_num_descendants++; 1814 child->w_num_ancestors++; 1815 } 1816 1817 /* 1818 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as 1819 * an ancestor of 'pi' during this loop. 1820 */ 1821 for (i = 1; i <= w_max_used_index; i++) { 1822 if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 && 1823 (i != pi)) 1824 continue; 1825 1826 /* Find each descendant of 'i' and mark it as a descendant. */ 1827 for (j = 1; j <= w_max_used_index; j++) { 1828 1829 /* 1830 * Skip children that are already marked as 1831 * descendants of 'i'. 1832 */ 1833 if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) 1834 continue; 1835 1836 /* 1837 * We are only interested in descendants of 'ci'. Note 1838 * that 'ci' itself is counted as a descendant of 'ci'. 1839 */ 1840 if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 && 1841 (j != ci)) 1842 continue; 1843 w_rmatrix[i][j] |= WITNESS_ANCESTOR; 1844 w_rmatrix[j][i] |= WITNESS_DESCENDANT; 1845 w_data[i].w_num_descendants++; 1846 w_data[j].w_num_ancestors++; 1847 1848 /* 1849 * Make sure we aren't marking a node as both an 1850 * ancestor and descendant. We should have caught 1851 * this as a lock order reversal earlier. 1852 */ 1853 if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) && 1854 (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) { 1855 printf("witness rmatrix paradox! [%d][%d]=%d " 1856 "both ancestor and descendant\n", 1857 i, j, w_rmatrix[i][j]); 1858 kdb_backtrace(); 1859 printf("Witness disabled.\n"); 1860 witness_watch = -1; 1861 } 1862 if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) && 1863 (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) { 1864 printf("witness rmatrix paradox! [%d][%d]=%d " 1865 "both ancestor and descendant\n", 1866 j, i, w_rmatrix[j][i]); 1867 kdb_backtrace(); 1868 printf("Witness disabled.\n"); 1869 witness_watch = -1; 1870 } 1871 } 1872 } 1873 } 1874 1875 static void 1876 itismychild(struct witness *parent, struct witness *child) 1877 { 1878 1879 MPASS(child != NULL && parent != NULL); 1880 if (witness_cold == 0) 1881 mtx_assert(&w_mtx, MA_OWNED); 1882 1883 if (!witness_lock_type_equal(parent, child)) { 1884 if (witness_cold == 0) 1885 mtx_unlock_spin(&w_mtx); 1886 panic("%s: parent \"%s\" (%s) and child \"%s\" (%s) are not " 1887 "the same lock type", __func__, parent->w_name, 1888 parent->w_class->lc_name, child->w_name, 1889 child->w_class->lc_name); 1890 } 1891 adopt(parent, child); 1892 } 1893 1894 /* 1895 * Generic code for the isitmy*() functions. The rmask parameter is the 1896 * expected relationship of w1 to w2. 1897 */ 1898 static int 1899 _isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname) 1900 { 1901 unsigned char r1, r2; 1902 int i1, i2; 1903 1904 i1 = w1->w_index; 1905 i2 = w2->w_index; 1906 WITNESS_INDEX_ASSERT(i1); 1907 WITNESS_INDEX_ASSERT(i2); 1908 r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK; 1909 r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK; 1910 1911 /* The flags on one better be the inverse of the flags on the other */ 1912 if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) || 1913 (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) { 1914 printf("%s: rmatrix mismatch between %s (index %d) and %s " 1915 "(index %d): w_rmatrix[%d][%d] == %hhx but " 1916 "w_rmatrix[%d][%d] == %hhx\n", 1917 fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1, 1918 i2, i1, r2); 1919 kdb_backtrace(); 1920 printf("Witness disabled.\n"); 1921 witness_watch = -1; 1922 } 1923 return (r1 & rmask); 1924 } 1925 1926 /* 1927 * Checks if @child is a direct child of @parent. 1928 */ 1929 static int 1930 isitmychild(struct witness *parent, struct witness *child) 1931 { 1932 1933 return (_isitmyx(parent, child, WITNESS_PARENT, __func__)); 1934 } 1935 1936 /* 1937 * Checks if @descendant is a direct or inderect descendant of @ancestor. 1938 */ 1939 static int 1940 isitmydescendant(struct witness *ancestor, struct witness *descendant) 1941 { 1942 1943 return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK, 1944 __func__)); 1945 } 1946 1947 #ifdef BLESSING 1948 static int 1949 blessed(struct witness *w1, struct witness *w2) 1950 { 1951 int i; 1952 struct witness_blessed *b; 1953 1954 for (i = 0; i < blessed_count; i++) { 1955 b = &blessed_list[i]; 1956 if (strcmp(w1->w_name, b->b_lock1) == 0) { 1957 if (strcmp(w2->w_name, b->b_lock2) == 0) 1958 return (1); 1959 continue; 1960 } 1961 if (strcmp(w1->w_name, b->b_lock2) == 0) 1962 if (strcmp(w2->w_name, b->b_lock1) == 0) 1963 return (1); 1964 } 1965 return (0); 1966 } 1967 #endif 1968 1969 static struct witness * 1970 witness_get(void) 1971 { 1972 struct witness *w; 1973 int index; 1974 1975 if (witness_cold == 0) 1976 mtx_assert(&w_mtx, MA_OWNED); 1977 1978 if (witness_watch == -1) { 1979 mtx_unlock_spin(&w_mtx); 1980 return (NULL); 1981 } 1982 if (STAILQ_EMPTY(&w_free)) { 1983 witness_watch = -1; 1984 mtx_unlock_spin(&w_mtx); 1985 printf("WITNESS: unable to allocate a new witness object\n"); 1986 return (NULL); 1987 } 1988 w = STAILQ_FIRST(&w_free); 1989 STAILQ_REMOVE_HEAD(&w_free, w_list); 1990 w_free_cnt--; 1991 index = w->w_index; 1992 MPASS(index > 0 && index == w_max_used_index+1 && 1993 index < WITNESS_COUNT); 1994 bzero(w, sizeof(*w)); 1995 w->w_index = index; 1996 if (index > w_max_used_index) 1997 w_max_used_index = index; 1998 return (w); 1999 } 2000 2001 static void 2002 witness_free(struct witness *w) 2003 { 2004 2005 STAILQ_INSERT_HEAD(&w_free, w, w_list); 2006 w_free_cnt++; 2007 } 2008 2009 static struct lock_list_entry * 2010 witness_lock_list_get(void) 2011 { 2012 struct lock_list_entry *lle; 2013 2014 if (witness_watch == -1) 2015 return (NULL); 2016 mtx_lock_spin(&w_mtx); 2017 lle = w_lock_list_free; 2018 if (lle == NULL) { 2019 witness_watch = -1; 2020 mtx_unlock_spin(&w_mtx); 2021 printf("%s: witness exhausted\n", __func__); 2022 return (NULL); 2023 } 2024 w_lock_list_free = lle->ll_next; 2025 mtx_unlock_spin(&w_mtx); 2026 bzero(lle, sizeof(*lle)); 2027 return (lle); 2028 } 2029 2030 static void 2031 witness_lock_list_free(struct lock_list_entry *lle) 2032 { 2033 2034 mtx_lock_spin(&w_mtx); 2035 lle->ll_next = w_lock_list_free; 2036 w_lock_list_free = lle; 2037 mtx_unlock_spin(&w_mtx); 2038 } 2039 2040 static struct lock_instance * 2041 find_instance(struct lock_list_entry *list, struct lock_object *lock) 2042 { 2043 struct lock_list_entry *lle; 2044 struct lock_instance *instance; 2045 int i; 2046 2047 for (lle = list; lle != NULL; lle = lle->ll_next) 2048 for (i = lle->ll_count - 1; i >= 0; i--) { 2049 instance = &lle->ll_children[i]; 2050 if (instance->li_lock == lock) 2051 return (instance); 2052 } 2053 return (NULL); 2054 } 2055 2056 static void 2057 witness_list_lock(struct lock_instance *instance) 2058 { 2059 struct lock_object *lock; 2060 2061 lock = instance->li_lock; 2062 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ? 2063 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name); 2064 if (lock->lo_witness->w_name != lock->lo_name) 2065 printf(" (%s)", lock->lo_witness->w_name); 2066 printf(" r = %d (%p) locked @ %s:%d\n", 2067 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file, 2068 instance->li_line); 2069 } 2070 2071 #ifdef DDB 2072 static int 2073 witness_thread_has_locks(struct thread *td) 2074 { 2075 2076 if (td->td_sleeplocks == NULL) 2077 return (0); 2078 return (td->td_sleeplocks->ll_count != 0); 2079 } 2080 2081 static int 2082 witness_proc_has_locks(struct proc *p) 2083 { 2084 struct thread *td; 2085 2086 FOREACH_THREAD_IN_PROC(p, td) { 2087 if (witness_thread_has_locks(td)) 2088 return (1); 2089 } 2090 return (0); 2091 } 2092 #endif 2093 2094 int 2095 witness_list_locks(struct lock_list_entry **lock_list) 2096 { 2097 struct lock_list_entry *lle; 2098 int i, nheld; 2099 2100 nheld = 0; 2101 for (lle = *lock_list; lle != NULL; lle = lle->ll_next) 2102 for (i = lle->ll_count - 1; i >= 0; i--) { 2103 witness_list_lock(&lle->ll_children[i]); 2104 nheld++; 2105 } 2106 return (nheld); 2107 } 2108 2109 /* 2110 * This is a bit risky at best. We call this function when we have timed 2111 * out acquiring a spin lock, and we assume that the other CPU is stuck 2112 * with this lock held. So, we go groveling around in the other CPU's 2113 * per-cpu data to try to find the lock instance for this spin lock to 2114 * see when it was last acquired. 2115 */ 2116 void 2117 witness_display_spinlock(struct lock_object *lock, struct thread *owner) 2118 { 2119 struct lock_instance *instance; 2120 struct pcpu *pc; 2121 2122 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU) 2123 return; 2124 pc = pcpu_find(owner->td_oncpu); 2125 instance = find_instance(pc->pc_spinlocks, lock); 2126 if (instance != NULL) 2127 witness_list_lock(instance); 2128 } 2129 2130 void 2131 witness_save(struct lock_object *lock, const char **filep, int *linep) 2132 { 2133 struct lock_list_entry *lock_list; 2134 struct lock_instance *instance; 2135 struct lock_class *class; 2136 2137 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2138 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2139 return; 2140 class = LOCK_CLASS(lock); 2141 if (class->lc_flags & LC_SLEEPLOCK) 2142 lock_list = curthread->td_sleeplocks; 2143 else { 2144 if (witness_skipspin) 2145 return; 2146 lock_list = PCPU_GET(spinlocks); 2147 } 2148 instance = find_instance(lock_list, lock); 2149 if (instance == NULL) 2150 panic("%s: lock (%s) %s not locked", __func__, 2151 class->lc_name, lock->lo_name); 2152 *filep = instance->li_file; 2153 *linep = instance->li_line; 2154 } 2155 2156 void 2157 witness_restore(struct lock_object *lock, const char *file, int line) 2158 { 2159 struct lock_list_entry *lock_list; 2160 struct lock_instance *instance; 2161 struct lock_class *class; 2162 2163 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2164 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2165 return; 2166 class = LOCK_CLASS(lock); 2167 if (class->lc_flags & LC_SLEEPLOCK) 2168 lock_list = curthread->td_sleeplocks; 2169 else { 2170 if (witness_skipspin) 2171 return; 2172 lock_list = PCPU_GET(spinlocks); 2173 } 2174 instance = find_instance(lock_list, lock); 2175 if (instance == NULL) 2176 panic("%s: lock (%s) %s not locked", __func__, 2177 class->lc_name, lock->lo_name); 2178 lock->lo_witness->w_file = file; 2179 lock->lo_witness->w_line = line; 2180 instance->li_file = file; 2181 instance->li_line = line; 2182 } 2183 2184 void 2185 witness_assert(struct lock_object *lock, int flags, const char *file, int line) 2186 { 2187 #ifdef INVARIANT_SUPPORT 2188 struct lock_instance *instance; 2189 struct lock_class *class; 2190 2191 if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL) 2192 return; 2193 class = LOCK_CLASS(lock); 2194 if ((class->lc_flags & LC_SLEEPLOCK) != 0) 2195 instance = find_instance(curthread->td_sleeplocks, lock); 2196 else if ((class->lc_flags & LC_SPINLOCK) != 0) 2197 instance = find_instance(PCPU_GET(spinlocks), lock); 2198 else { 2199 panic("Lock (%s) %s is not sleep or spin!", 2200 class->lc_name, lock->lo_name); 2201 } 2202 file = fixup_filename(file); 2203 switch (flags) { 2204 case LA_UNLOCKED: 2205 if (instance != NULL) 2206 panic("Lock (%s) %s locked @ %s:%d.", 2207 class->lc_name, lock->lo_name, file, line); 2208 break; 2209 case LA_LOCKED: 2210 case LA_LOCKED | LA_RECURSED: 2211 case LA_LOCKED | LA_NOTRECURSED: 2212 case LA_SLOCKED: 2213 case LA_SLOCKED | LA_RECURSED: 2214 case LA_SLOCKED | LA_NOTRECURSED: 2215 case LA_XLOCKED: 2216 case LA_XLOCKED | LA_RECURSED: 2217 case LA_XLOCKED | LA_NOTRECURSED: 2218 if (instance == NULL) { 2219 panic("Lock (%s) %s not locked @ %s:%d.", 2220 class->lc_name, lock->lo_name, file, line); 2221 break; 2222 } 2223 if ((flags & LA_XLOCKED) != 0 && 2224 (instance->li_flags & LI_EXCLUSIVE) == 0) 2225 panic("Lock (%s) %s not exclusively locked @ %s:%d.", 2226 class->lc_name, lock->lo_name, file, line); 2227 if ((flags & LA_SLOCKED) != 0 && 2228 (instance->li_flags & LI_EXCLUSIVE) != 0) 2229 panic("Lock (%s) %s exclusively locked @ %s:%d.", 2230 class->lc_name, lock->lo_name, file, line); 2231 if ((flags & LA_RECURSED) != 0 && 2232 (instance->li_flags & LI_RECURSEMASK) == 0) 2233 panic("Lock (%s) %s not recursed @ %s:%d.", 2234 class->lc_name, lock->lo_name, file, line); 2235 if ((flags & LA_NOTRECURSED) != 0 && 2236 (instance->li_flags & LI_RECURSEMASK) != 0) 2237 panic("Lock (%s) %s recursed @ %s:%d.", 2238 class->lc_name, lock->lo_name, file, line); 2239 break; 2240 default: 2241 panic("Invalid lock assertion at %s:%d.", file, line); 2242 2243 } 2244 #endif /* INVARIANT_SUPPORT */ 2245 } 2246 2247 static void 2248 witness_setflag(struct lock_object *lock, int flag, int set) 2249 { 2250 struct lock_list_entry *lock_list; 2251 struct lock_instance *instance; 2252 struct lock_class *class; 2253 2254 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2255 return; 2256 class = LOCK_CLASS(lock); 2257 if (class->lc_flags & LC_SLEEPLOCK) 2258 lock_list = curthread->td_sleeplocks; 2259 else { 2260 if (witness_skipspin) 2261 return; 2262 lock_list = PCPU_GET(spinlocks); 2263 } 2264 instance = find_instance(lock_list, lock); 2265 if (instance == NULL) 2266 panic("%s: lock (%s) %s not locked", __func__, 2267 class->lc_name, lock->lo_name); 2268 2269 if (set) 2270 instance->li_flags |= flag; 2271 else 2272 instance->li_flags &= ~flag; 2273 } 2274 2275 void 2276 witness_norelease(struct lock_object *lock) 2277 { 2278 2279 witness_setflag(lock, LI_NORELEASE, 1); 2280 } 2281 2282 void 2283 witness_releaseok(struct lock_object *lock) 2284 { 2285 2286 witness_setflag(lock, LI_NORELEASE, 0); 2287 } 2288 2289 #ifdef DDB 2290 static void 2291 witness_ddb_list(struct thread *td) 2292 { 2293 2294 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2295 KASSERT(kdb_active, ("%s: not in the debugger", __func__)); 2296 2297 if (witness_watch < 1) 2298 return; 2299 2300 witness_list_locks(&td->td_sleeplocks); 2301 2302 /* 2303 * We only handle spinlocks if td == curthread. This is somewhat broken 2304 * if td is currently executing on some other CPU and holds spin locks 2305 * as we won't display those locks. If we had a MI way of getting 2306 * the per-cpu data for a given cpu then we could use 2307 * td->td_oncpu to get the list of spinlocks for this thread 2308 * and "fix" this. 2309 * 2310 * That still wouldn't really fix this unless we locked the scheduler 2311 * lock or stopped the other CPU to make sure it wasn't changing the 2312 * list out from under us. It is probably best to just not try to 2313 * handle threads on other CPU's for now. 2314 */ 2315 if (td == curthread && PCPU_GET(spinlocks) != NULL) 2316 witness_list_locks(PCPU_PTR(spinlocks)); 2317 } 2318 2319 DB_SHOW_COMMAND(locks, db_witness_list) 2320 { 2321 struct thread *td; 2322 2323 if (have_addr) 2324 td = db_lookup_thread(addr, TRUE); 2325 else 2326 td = kdb_thread; 2327 witness_ddb_list(td); 2328 } 2329 2330 DB_SHOW_ALL_COMMAND(locks, db_witness_list_all) 2331 { 2332 struct thread *td; 2333 struct proc *p; 2334 2335 /* 2336 * It would be nice to list only threads and processes that actually 2337 * held sleep locks, but that information is currently not exported 2338 * by WITNESS. 2339 */ 2340 FOREACH_PROC_IN_SYSTEM(p) { 2341 if (!witness_proc_has_locks(p)) 2342 continue; 2343 FOREACH_THREAD_IN_PROC(p, td) { 2344 if (!witness_thread_has_locks(td)) 2345 continue; 2346 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid, 2347 p->p_comm, td, td->td_tid); 2348 witness_ddb_list(td); 2349 } 2350 } 2351 } 2352 DB_SHOW_ALIAS(alllocks, db_witness_list_all) 2353 2354 DB_SHOW_COMMAND(witness, db_witness_display) 2355 { 2356 2357 witness_ddb_display(db_printf); 2358 } 2359 #endif 2360 2361 static int 2362 sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS) 2363 { 2364 struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2; 2365 struct witness *tmp_w1, *tmp_w2, *w1, *w2; 2366 struct sbuf *sb; 2367 u_int w_rmatrix1, w_rmatrix2; 2368 int error, generation, i, j; 2369 2370 tmp_data1 = NULL; 2371 tmp_data2 = NULL; 2372 tmp_w1 = NULL; 2373 tmp_w2 = NULL; 2374 if (witness_watch < 1) { 2375 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning)); 2376 return (error); 2377 } 2378 if (witness_cold) { 2379 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold)); 2380 return (error); 2381 } 2382 error = 0; 2383 sb = sbuf_new(NULL, NULL, BADSTACK_SBUF_SIZE, SBUF_AUTOEXTEND); 2384 if (sb == NULL) 2385 return (ENOMEM); 2386 2387 /* Allocate and init temporary storage space. */ 2388 tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO); 2389 tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO); 2390 tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP, 2391 M_WAITOK | M_ZERO); 2392 tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP, 2393 M_WAITOK | M_ZERO); 2394 stack_zero(&tmp_data1->wlod_stack); 2395 stack_zero(&tmp_data2->wlod_stack); 2396 2397 restart: 2398 mtx_lock_spin(&w_mtx); 2399 generation = w_generation; 2400 mtx_unlock_spin(&w_mtx); 2401 sbuf_printf(sb, "Number of known direct relationships is %d\n", 2402 w_lohash.wloh_count); 2403 for (i = 1; i < w_max_used_index; i++) { 2404 mtx_lock_spin(&w_mtx); 2405 if (generation != w_generation) { 2406 mtx_unlock_spin(&w_mtx); 2407 2408 /* The graph has changed, try again. */ 2409 req->oldidx = 0; 2410 sbuf_clear(sb); 2411 goto restart; 2412 } 2413 2414 w1 = &w_data[i]; 2415 if (w1->w_reversed == 0) { 2416 mtx_unlock_spin(&w_mtx); 2417 continue; 2418 } 2419 2420 /* Copy w1 locally so we can release the spin lock. */ 2421 *tmp_w1 = *w1; 2422 mtx_unlock_spin(&w_mtx); 2423 2424 if (tmp_w1->w_reversed == 0) 2425 continue; 2426 for (j = 1; j < w_max_used_index; j++) { 2427 if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j) 2428 continue; 2429 2430 mtx_lock_spin(&w_mtx); 2431 if (generation != w_generation) { 2432 mtx_unlock_spin(&w_mtx); 2433 2434 /* The graph has changed, try again. */ 2435 req->oldidx = 0; 2436 sbuf_clear(sb); 2437 goto restart; 2438 } 2439 2440 w2 = &w_data[j]; 2441 data1 = witness_lock_order_get(w1, w2); 2442 data2 = witness_lock_order_get(w2, w1); 2443 2444 /* 2445 * Copy information locally so we can release the 2446 * spin lock. 2447 */ 2448 *tmp_w2 = *w2; 2449 w_rmatrix1 = (unsigned int)w_rmatrix[i][j]; 2450 w_rmatrix2 = (unsigned int)w_rmatrix[j][i]; 2451 2452 if (data1) { 2453 stack_zero(&tmp_data1->wlod_stack); 2454 stack_copy(&data1->wlod_stack, 2455 &tmp_data1->wlod_stack); 2456 } 2457 if (data2 && data2 != data1) { 2458 stack_zero(&tmp_data2->wlod_stack); 2459 stack_copy(&data2->wlod_stack, 2460 &tmp_data2->wlod_stack); 2461 } 2462 mtx_unlock_spin(&w_mtx); 2463 2464 sbuf_printf(sb, 2465 "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n", 2466 tmp_w1->w_name, tmp_w1->w_class->lc_name, 2467 tmp_w2->w_name, tmp_w2->w_class->lc_name); 2468 #if 0 2469 sbuf_printf(sb, 2470 "w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n", 2471 tmp_w1->name, tmp_w2->w_name, w_rmatrix1, 2472 tmp_w2->name, tmp_w1->w_name, w_rmatrix2); 2473 #endif 2474 if (data1) { 2475 sbuf_printf(sb, 2476 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n", 2477 tmp_w1->w_name, tmp_w1->w_class->lc_name, 2478 tmp_w2->w_name, tmp_w2->w_class->lc_name); 2479 stack_sbuf_print(sb, &tmp_data1->wlod_stack); 2480 sbuf_printf(sb, "\n"); 2481 } 2482 if (data2 && data2 != data1) { 2483 sbuf_printf(sb, 2484 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n", 2485 tmp_w2->w_name, tmp_w2->w_class->lc_name, 2486 tmp_w1->w_name, tmp_w1->w_class->lc_name); 2487 stack_sbuf_print(sb, &tmp_data2->wlod_stack); 2488 sbuf_printf(sb, "\n"); 2489 } 2490 } 2491 } 2492 mtx_lock_spin(&w_mtx); 2493 if (generation != w_generation) { 2494 mtx_unlock_spin(&w_mtx); 2495 2496 /* 2497 * The graph changed while we were printing stack data, 2498 * try again. 2499 */ 2500 req->oldidx = 0; 2501 sbuf_clear(sb); 2502 goto restart; 2503 } 2504 mtx_unlock_spin(&w_mtx); 2505 2506 /* Free temporary storage space. */ 2507 free(tmp_data1, M_TEMP); 2508 free(tmp_data2, M_TEMP); 2509 free(tmp_w1, M_TEMP); 2510 free(tmp_w2, M_TEMP); 2511 2512 sbuf_finish(sb); 2513 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 2514 sbuf_delete(sb); 2515 2516 return (error); 2517 } 2518 2519 static int 2520 sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS) 2521 { 2522 struct witness *w; 2523 struct sbuf *sb; 2524 int error; 2525 2526 if (witness_watch < 1) { 2527 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning)); 2528 return (error); 2529 } 2530 if (witness_cold) { 2531 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold)); 2532 return (error); 2533 } 2534 error = 0; 2535 sb = sbuf_new(NULL, NULL, FULLGRAPH_SBUF_SIZE, SBUF_FIXEDLEN); 2536 if (sb == NULL) 2537 return (ENOMEM); 2538 sbuf_printf(sb, "\n"); 2539 2540 mtx_lock_spin(&w_mtx); 2541 STAILQ_FOREACH(w, &w_all, w_list) 2542 w->w_displayed = 0; 2543 STAILQ_FOREACH(w, &w_all, w_list) 2544 witness_add_fullgraph(sb, w); 2545 mtx_unlock_spin(&w_mtx); 2546 2547 /* 2548 * While using SBUF_FIXEDLEN, check if the sbuf overflowed. 2549 */ 2550 if (sbuf_overflowed(sb)) { 2551 sbuf_delete(sb); 2552 panic("%s: sbuf overflowed, bump FULLGRAPH_SBUF_SIZE value\n", 2553 __func__); 2554 } 2555 2556 /* 2557 * Close the sbuf and return to userland. 2558 */ 2559 sbuf_finish(sb); 2560 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 2561 sbuf_delete(sb); 2562 2563 return (error); 2564 } 2565 2566 static int 2567 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS) 2568 { 2569 int error, value; 2570 2571 value = witness_watch; 2572 error = sysctl_handle_int(oidp, &value, 0, req); 2573 if (error != 0 || req->newptr == NULL) 2574 return (error); 2575 if (value > 1 || value < -1 || 2576 (witness_watch == -1 && value != witness_watch)) 2577 return (EINVAL); 2578 witness_watch = value; 2579 return (0); 2580 } 2581 2582 static void 2583 witness_add_fullgraph(struct sbuf *sb, struct witness *w) 2584 { 2585 int i; 2586 2587 if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0)) 2588 return; 2589 w->w_displayed = 1; 2590 2591 WITNESS_INDEX_ASSERT(w->w_index); 2592 for (i = 1; i <= w_max_used_index; i++) { 2593 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) { 2594 sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name, 2595 w_data[i].w_name); 2596 witness_add_fullgraph(sb, &w_data[i]); 2597 } 2598 } 2599 } 2600 2601 /* 2602 * A simple hash function. Takes a key pointer and a key size. If size == 0, 2603 * interprets the key as a string and reads until the null 2604 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit 2605 * hash value computed from the key. 2606 */ 2607 static uint32_t 2608 witness_hash_djb2(const uint8_t *key, uint32_t size) 2609 { 2610 unsigned int hash = 5381; 2611 int i; 2612 2613 /* hash = hash * 33 + key[i] */ 2614 if (size) 2615 for (i = 0; i < size; i++) 2616 hash = ((hash << 5) + hash) + (unsigned int)key[i]; 2617 else 2618 for (i = 0; key[i] != 0; i++) 2619 hash = ((hash << 5) + hash) + (unsigned int)key[i]; 2620 2621 return (hash); 2622 } 2623 2624 2625 /* 2626 * Initializes the two witness hash tables. Called exactly once from 2627 * witness_initialize(). 2628 */ 2629 static void 2630 witness_init_hash_tables(void) 2631 { 2632 int i; 2633 2634 MPASS(witness_cold); 2635 2636 /* Initialize the hash tables. */ 2637 for (i = 0; i < WITNESS_HASH_SIZE; i++) 2638 w_hash.wh_array[i] = NULL; 2639 2640 w_hash.wh_size = WITNESS_HASH_SIZE; 2641 w_hash.wh_count = 0; 2642 2643 /* Initialize the lock order data hash. */ 2644 w_lofree = NULL; 2645 for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) { 2646 memset(&w_lodata[i], 0, sizeof(w_lodata[i])); 2647 w_lodata[i].wlod_next = w_lofree; 2648 w_lofree = &w_lodata[i]; 2649 } 2650 w_lohash.wloh_size = WITNESS_LO_HASH_SIZE; 2651 w_lohash.wloh_count = 0; 2652 for (i = 0; i < WITNESS_LO_HASH_SIZE; i++) 2653 w_lohash.wloh_array[i] = NULL; 2654 } 2655 2656 static struct witness * 2657 witness_hash_get(const char *key) 2658 { 2659 struct witness *w; 2660 uint32_t hash; 2661 2662 MPASS(key != NULL); 2663 if (witness_cold == 0) 2664 mtx_assert(&w_mtx, MA_OWNED); 2665 hash = witness_hash_djb2(key, 0) % w_hash.wh_size; 2666 w = w_hash.wh_array[hash]; 2667 while (w != NULL) { 2668 if (strcmp(w->w_name, key) == 0) 2669 goto out; 2670 w = w->w_hash_next; 2671 } 2672 2673 out: 2674 return (w); 2675 } 2676 2677 static void 2678 witness_hash_put(struct witness *w) 2679 { 2680 uint32_t hash; 2681 2682 MPASS(w != NULL); 2683 MPASS(w->w_name != NULL); 2684 if (witness_cold == 0) 2685 mtx_assert(&w_mtx, MA_OWNED); 2686 KASSERT(witness_hash_get(w->w_name) == NULL, 2687 ("%s: trying to add a hash entry that already exists!", __func__)); 2688 KASSERT(w->w_hash_next == NULL, 2689 ("%s: w->w_hash_next != NULL", __func__)); 2690 2691 hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size; 2692 w->w_hash_next = w_hash.wh_array[hash]; 2693 w_hash.wh_array[hash] = w; 2694 w_hash.wh_count++; 2695 } 2696 2697 2698 static struct witness_lock_order_data * 2699 witness_lock_order_get(struct witness *parent, struct witness *child) 2700 { 2701 struct witness_lock_order_data *data = NULL; 2702 struct witness_lock_order_key key; 2703 unsigned int hash; 2704 2705 MPASS(parent != NULL && child != NULL); 2706 key.from = parent->w_index; 2707 key.to = child->w_index; 2708 WITNESS_INDEX_ASSERT(key.from); 2709 WITNESS_INDEX_ASSERT(key.to); 2710 if ((w_rmatrix[parent->w_index][child->w_index] 2711 & WITNESS_LOCK_ORDER_KNOWN) == 0) 2712 goto out; 2713 2714 hash = witness_hash_djb2((const char*)&key, 2715 sizeof(key)) % w_lohash.wloh_size; 2716 data = w_lohash.wloh_array[hash]; 2717 while (data != NULL) { 2718 if (witness_lock_order_key_equal(&data->wlod_key, &key)) 2719 break; 2720 data = data->wlod_next; 2721 } 2722 2723 out: 2724 return (data); 2725 } 2726 2727 /* 2728 * Verify that parent and child have a known relationship, are not the same, 2729 * and child is actually a child of parent. This is done without w_mtx 2730 * to avoid contention in the common case. 2731 */ 2732 static int 2733 witness_lock_order_check(struct witness *parent, struct witness *child) 2734 { 2735 2736 if (parent != child && 2737 w_rmatrix[parent->w_index][child->w_index] 2738 & WITNESS_LOCK_ORDER_KNOWN && 2739 isitmychild(parent, child)) 2740 return (1); 2741 2742 return (0); 2743 } 2744 2745 static int 2746 witness_lock_order_add(struct witness *parent, struct witness *child) 2747 { 2748 struct witness_lock_order_data *data = NULL; 2749 struct witness_lock_order_key key; 2750 unsigned int hash; 2751 2752 MPASS(parent != NULL && child != NULL); 2753 key.from = parent->w_index; 2754 key.to = child->w_index; 2755 WITNESS_INDEX_ASSERT(key.from); 2756 WITNESS_INDEX_ASSERT(key.to); 2757 if (w_rmatrix[parent->w_index][child->w_index] 2758 & WITNESS_LOCK_ORDER_KNOWN) 2759 return (1); 2760 2761 hash = witness_hash_djb2((const char*)&key, 2762 sizeof(key)) % w_lohash.wloh_size; 2763 w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN; 2764 data = w_lofree; 2765 if (data == NULL) 2766 return (0); 2767 w_lofree = data->wlod_next; 2768 data->wlod_next = w_lohash.wloh_array[hash]; 2769 data->wlod_key = key; 2770 w_lohash.wloh_array[hash] = data; 2771 w_lohash.wloh_count++; 2772 stack_zero(&data->wlod_stack); 2773 stack_save(&data->wlod_stack); 2774 return (1); 2775 } 2776 2777 /* Call this whenver the structure of the witness graph changes. */ 2778 static void 2779 witness_increment_graph_generation(void) 2780 { 2781 2782 if (witness_cold == 0) 2783 mtx_assert(&w_mtx, MA_OWNED); 2784 w_generation++; 2785 } 2786 2787 #ifdef KDB 2788 static void 2789 _witness_debugger(int cond, const char *msg) 2790 { 2791 2792 if (witness_trace && cond) 2793 kdb_backtrace(); 2794 if (witness_kdb && cond) 2795 kdb_enter(KDB_WHY_WITNESS, msg); 2796 } 2797 #endif 2798