1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Berkeley Software Design Inc's name may not be used to endorse or 15 * promote products derived from this software without specific prior 16 * written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 31 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 32 */ 33 34 /* 35 * Implementation of turnstiles used to hold queue of threads blocked on 36 * non-sleepable locks. Sleepable locks use condition variables to 37 * implement their queues. Turnstiles differ from a sleep queue in that 38 * turnstile queue's are assigned to a lock held by an owning thread. Thus, 39 * when one thread is enqueued onto a turnstile, it can lend its priority 40 * to the owning thread. 41 * 42 * We wish to avoid bloating locks with an embedded turnstile and we do not 43 * want to use back-pointers in the locks for the same reason. Thus, we 44 * use a similar approach to that of Solaris 7 as described in Solaris 45 * Internals by Jim Mauro and Richard McDougall. Turnstiles are looked up 46 * in a hash table based on the address of the lock. Each entry in the 47 * hash table is a linked-lists of turnstiles and is called a turnstile 48 * chain. Each chain contains a spin mutex that protects all of the 49 * turnstiles in the chain. 50 * 51 * Each time a thread is created, a turnstile is allocated from a UMA zone 52 * and attached to that thread. When a thread blocks on a lock, if it is the 53 * first thread to block, it lends its turnstile to the lock. If the lock 54 * already has a turnstile, then it gives its turnstile to the lock's 55 * turnstile's free list. When a thread is woken up, it takes a turnstile from 56 * the free list if there are any other waiters. If it is the only thread 57 * blocked on the lock, then it reclaims the turnstile associated with the lock 58 * and removes it from the hash table. 59 */ 60 61 #include <sys/cdefs.h> 62 #include "opt_ddb.h" 63 #include "opt_turnstile_profiling.h" 64 #include "opt_sched.h" 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/kdb.h> 69 #include <sys/kernel.h> 70 #include <sys/ktr.h> 71 #include <sys/lock.h> 72 #include <sys/mutex.h> 73 #include <sys/proc.h> 74 #include <sys/queue.h> 75 #include <sys/sched.h> 76 #include <sys/sdt.h> 77 #include <sys/sysctl.h> 78 #include <sys/turnstile.h> 79 80 #include <vm/uma.h> 81 82 #ifdef DDB 83 #include <ddb/ddb.h> 84 #include <sys/lockmgr.h> 85 #include <sys/sx.h> 86 #endif 87 88 /* 89 * Constants for the hash table of turnstile chains. TC_SHIFT is a magic 90 * number chosen because the sleep queue's use the same value for the 91 * shift. Basically, we ignore the lower 8 bits of the address. 92 * TC_TABLESIZE must be a power of two for TC_MASK to work properly. 93 */ 94 #define TC_TABLESIZE 128 /* Must be power of 2. */ 95 #define TC_MASK (TC_TABLESIZE - 1) 96 #define TC_SHIFT 8 97 #define TC_HASH(lock) (((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK) 98 #define TC_LOOKUP(lock) &turnstile_chains[TC_HASH(lock)] 99 100 /* 101 * There are three different lists of turnstiles as follows. The list 102 * connected by ts_link entries is a per-thread list of all the turnstiles 103 * attached to locks that we own. This is used to fixup our priority when 104 * a lock is released. The other two lists use the ts_hash entries. The 105 * first of these two is the turnstile chain list that a turnstile is on 106 * when it is attached to a lock. The second list to use ts_hash is the 107 * free list hung off of a turnstile that is attached to a lock. 108 * 109 * Each turnstile contains three lists of threads. The two ts_blocked lists 110 * are linked list of threads blocked on the turnstile's lock. One list is 111 * for exclusive waiters, and the other is for shared waiters. The 112 * ts_pending list is a linked list of threads previously awakened by 113 * turnstile_signal() or turnstile_wait() that are waiting to be put on 114 * the run queue. 115 * 116 * Locking key: 117 * c - turnstile chain lock 118 * q - td_contested lock 119 */ 120 struct turnstile { 121 struct mtx ts_lock; /* Spin lock for self. */ 122 struct threadqueue ts_blocked[2]; /* (c + q) Blocked threads. */ 123 struct threadqueue ts_pending; /* (c) Pending threads. */ 124 LIST_ENTRY(turnstile) ts_hash; /* (c) Chain and free list. */ 125 LIST_ENTRY(turnstile) ts_link; /* (q) Contested locks. */ 126 LIST_HEAD(, turnstile) ts_free; /* (c) Free turnstiles. */ 127 struct lock_object *ts_lockobj; /* (c) Lock we reference. */ 128 struct thread *ts_owner; /* (c + q) Who owns the lock. */ 129 }; 130 131 struct turnstile_chain { 132 LIST_HEAD(, turnstile) tc_turnstiles; /* List of turnstiles. */ 133 struct mtx tc_lock; /* Spin lock for this chain. */ 134 #ifdef TURNSTILE_PROFILING 135 u_int tc_depth; /* Length of tc_queues. */ 136 u_int tc_max_depth; /* Max length of tc_queues. */ 137 #endif 138 }; 139 140 #ifdef TURNSTILE_PROFILING 141 u_int turnstile_max_depth; 142 static SYSCTL_NODE(_debug, OID_AUTO, turnstile, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 143 "turnstile profiling"); 144 static SYSCTL_NODE(_debug_turnstile, OID_AUTO, chains, 145 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 146 "turnstile chain stats"); 147 SYSCTL_UINT(_debug_turnstile, OID_AUTO, max_depth, CTLFLAG_RD, 148 &turnstile_max_depth, 0, "maximum depth achieved of a single chain"); 149 #endif 150 static struct mtx td_contested_lock; 151 static struct turnstile_chain turnstile_chains[TC_TABLESIZE]; 152 static uma_zone_t turnstile_zone; 153 154 /* 155 * Prototypes for non-exported routines. 156 */ 157 static void init_turnstile0(void *dummy); 158 #ifdef TURNSTILE_PROFILING 159 static void init_turnstile_profiling(void *arg); 160 #endif 161 static void propagate_priority(struct thread *td); 162 static int turnstile_adjust_thread(struct turnstile *ts, 163 struct thread *td); 164 static struct thread *turnstile_first_waiter(struct turnstile *ts); 165 static void turnstile_setowner(struct turnstile *ts, struct thread *owner); 166 #ifdef INVARIANTS 167 static void turnstile_dtor(void *mem, int size, void *arg); 168 #endif 169 static int turnstile_init(void *mem, int size, int flags); 170 static void turnstile_fini(void *mem, int size); 171 172 SDT_PROVIDER_DECLARE(sched); 173 SDT_PROBE_DEFINE(sched, , , sleep); 174 SDT_PROBE_DEFINE2(sched, , , wakeup, "struct thread *", 175 "struct proc *"); 176 177 static inline void 178 propagate_unlock_ts(struct turnstile *top, struct turnstile *ts) 179 { 180 181 if (ts != top) 182 mtx_unlock_spin(&ts->ts_lock); 183 } 184 185 static inline void 186 propagate_unlock_td(struct turnstile *top, struct thread *td) 187 { 188 189 if (td->td_lock != &top->ts_lock) 190 thread_unlock(td); 191 } 192 193 /* 194 * Walks the chain of turnstiles and their owners to propagate the priority 195 * of the thread being blocked to all the threads holding locks that have to 196 * release their locks before this thread can run again. 197 */ 198 static void 199 propagate_priority(struct thread *td) 200 { 201 struct turnstile *ts, *top; 202 int pri; 203 204 THREAD_LOCK_ASSERT(td, MA_OWNED); 205 pri = td->td_priority; 206 top = ts = td->td_blocked; 207 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock); 208 209 /* 210 * The original turnstile lock is held across the entire 211 * operation. We only ever lock down the chain so the lock 212 * order is constant. 213 */ 214 for (;;) { 215 td = ts->ts_owner; 216 217 if (td == NULL) { 218 /* 219 * This might be a read lock with no owner. There's 220 * not much we can do, so just bail. 221 */ 222 propagate_unlock_ts(top, ts); 223 return; 224 } 225 226 /* 227 * Wait for the thread lock to be stable and then only 228 * acquire if it is not the turnstile lock. 229 */ 230 thread_lock_block_wait(td); 231 if (td->td_lock != &ts->ts_lock) { 232 thread_lock_flags(td, MTX_DUPOK); 233 propagate_unlock_ts(top, ts); 234 } 235 MPASS(td->td_proc != NULL); 236 MPASS(td->td_proc->p_magic == P_MAGIC); 237 238 /* 239 * If the thread is asleep, then we are probably about 240 * to deadlock. To make debugging this easier, show 241 * backtrace of misbehaving thread and panic to not 242 * leave the kernel deadlocked. 243 */ 244 if (TD_IS_SLEEPING(td)) { 245 printf( 246 "Sleeping thread (tid %d, pid %d) owns a non-sleepable lock\n", 247 td->td_tid, td->td_proc->p_pid); 248 kdb_backtrace_thread(td); 249 panic("sleeping thread holds %s", 250 ts->ts_lockobj->lo_name); 251 } 252 253 /* 254 * If this thread already has higher priority than the 255 * thread that is being blocked, we are finished. 256 */ 257 if (td->td_priority <= pri) { 258 propagate_unlock_td(top, td); 259 return; 260 } 261 262 /* 263 * Bump this thread's priority. 264 */ 265 sched_lend_prio(td, pri); 266 267 /* 268 * If lock holder is actually running or on the run queue 269 * then we are done. 270 */ 271 if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td)) { 272 MPASS(td->td_blocked == NULL); 273 propagate_unlock_td(top, td); 274 return; 275 } 276 277 #ifndef SMP 278 /* 279 * For UP, we check to see if td is curthread (this shouldn't 280 * ever happen however as it would mean we are in a deadlock.) 281 */ 282 KASSERT(td != curthread, ("Deadlock detected")); 283 #endif 284 285 /* 286 * If we aren't blocked on a lock, we should be. 287 */ 288 KASSERT(TD_ON_LOCK(td), ( 289 "thread %d(%s):%d holds %s but isn't blocked on a lock\n", 290 td->td_tid, td->td_name, TD_GET_STATE(td), 291 ts->ts_lockobj->lo_name)); 292 293 /* 294 * Pick up the lock that td is blocked on. 295 */ 296 ts = td->td_blocked; 297 MPASS(ts != NULL); 298 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock); 299 /* Resort td on the list if needed. */ 300 if (!turnstile_adjust_thread(ts, td)) { 301 propagate_unlock_ts(top, ts); 302 return; 303 } 304 /* The thread lock is released as ts lock above. */ 305 } 306 } 307 308 /* 309 * Adjust the thread's position on a turnstile after its priority has been 310 * changed. 311 */ 312 static int 313 turnstile_adjust_thread(struct turnstile *ts, struct thread *td) 314 { 315 struct thread *td1, *td2; 316 int queue; 317 318 THREAD_LOCK_ASSERT(td, MA_OWNED); 319 MPASS(TD_ON_LOCK(td)); 320 321 /* 322 * This thread may not be blocked on this turnstile anymore 323 * but instead might already be woken up on another CPU 324 * that is waiting on the thread lock in turnstile_unpend() to 325 * finish waking this thread up. We can detect this case 326 * by checking to see if this thread has been given a 327 * turnstile by either turnstile_signal() or 328 * turnstile_broadcast(). In this case, treat the thread as 329 * if it was already running. 330 */ 331 if (td->td_turnstile != NULL) 332 return (0); 333 334 /* 335 * Check if the thread needs to be moved on the blocked chain. 336 * It needs to be moved if either its priority is lower than 337 * the previous thread or higher than the next thread. 338 */ 339 THREAD_LOCKPTR_BLOCKED_ASSERT(td, &ts->ts_lock); 340 td1 = TAILQ_PREV(td, threadqueue, td_lockq); 341 td2 = TAILQ_NEXT(td, td_lockq); 342 if ((td1 != NULL && td->td_priority < td1->td_priority) || 343 (td2 != NULL && td->td_priority > td2->td_priority)) { 344 /* 345 * Remove thread from blocked chain and determine where 346 * it should be moved to. 347 */ 348 queue = td->td_tsqueue; 349 MPASS(queue == TS_EXCLUSIVE_QUEUE || queue == TS_SHARED_QUEUE); 350 mtx_lock_spin(&td_contested_lock); 351 TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq); 352 TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq) { 353 MPASS(td1->td_proc->p_magic == P_MAGIC); 354 if (td1->td_priority > td->td_priority) 355 break; 356 } 357 358 if (td1 == NULL) 359 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq); 360 else 361 TAILQ_INSERT_BEFORE(td1, td, td_lockq); 362 mtx_unlock_spin(&td_contested_lock); 363 if (td1 == NULL) 364 CTR3(KTR_LOCK, 365 "turnstile_adjust_thread: td %d put at tail on [%p] %s", 366 td->td_tid, ts->ts_lockobj, ts->ts_lockobj->lo_name); 367 else 368 CTR4(KTR_LOCK, 369 "turnstile_adjust_thread: td %d moved before %d on [%p] %s", 370 td->td_tid, td1->td_tid, ts->ts_lockobj, 371 ts->ts_lockobj->lo_name); 372 } 373 return (1); 374 } 375 376 /* 377 * Early initialization of turnstiles. This is not done via a SYSINIT() 378 * since this needs to be initialized very early when mutexes are first 379 * initialized. 380 */ 381 void 382 init_turnstiles(void) 383 { 384 int i; 385 386 for (i = 0; i < TC_TABLESIZE; i++) { 387 LIST_INIT(&turnstile_chains[i].tc_turnstiles); 388 mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain", 389 NULL, MTX_SPIN); 390 } 391 mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN); 392 LIST_INIT(&thread0.td_contested); 393 thread0.td_turnstile = NULL; 394 } 395 396 #ifdef TURNSTILE_PROFILING 397 static void 398 init_turnstile_profiling(void *arg) 399 { 400 struct sysctl_oid *chain_oid; 401 char chain_name[10]; 402 int i; 403 404 for (i = 0; i < TC_TABLESIZE; i++) { 405 snprintf(chain_name, sizeof(chain_name), "%d", i); 406 chain_oid = SYSCTL_ADD_NODE(NULL, 407 SYSCTL_STATIC_CHILDREN(_debug_turnstile_chains), OID_AUTO, 408 chain_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 409 "turnstile chain stats"); 410 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 411 "depth", CTLFLAG_RD, &turnstile_chains[i].tc_depth, 0, 412 NULL); 413 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 414 "max_depth", CTLFLAG_RD, &turnstile_chains[i].tc_max_depth, 415 0, NULL); 416 } 417 } 418 SYSINIT(turnstile_profiling, SI_SUB_LOCK, SI_ORDER_ANY, 419 init_turnstile_profiling, NULL); 420 #endif 421 422 static void 423 init_turnstile0(void *dummy) 424 { 425 426 turnstile_zone = uma_zcreate("TURNSTILE", sizeof(struct turnstile), 427 NULL, 428 #ifdef INVARIANTS 429 turnstile_dtor, 430 #else 431 NULL, 432 #endif 433 turnstile_init, turnstile_fini, UMA_ALIGN_CACHE, UMA_ZONE_NOFREE); 434 thread0.td_turnstile = turnstile_alloc(); 435 } 436 SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL); 437 438 /* 439 * Update a thread on the turnstile list after it's priority has been changed. 440 * The old priority is passed in as an argument. 441 */ 442 void 443 turnstile_adjust(struct thread *td, u_char oldpri) 444 { 445 struct turnstile *ts; 446 447 MPASS(TD_ON_LOCK(td)); 448 449 /* 450 * Pick up the lock that td is blocked on. 451 */ 452 ts = td->td_blocked; 453 MPASS(ts != NULL); 454 THREAD_LOCKPTR_BLOCKED_ASSERT(td, &ts->ts_lock); 455 mtx_assert(&ts->ts_lock, MA_OWNED); 456 457 /* Resort the turnstile on the list. */ 458 if (!turnstile_adjust_thread(ts, td)) 459 return; 460 /* 461 * If our priority was lowered and we are at the head of the 462 * turnstile, then propagate our new priority up the chain. 463 * Note that we currently don't try to revoke lent priorities 464 * when our priority goes up. 465 */ 466 MPASS(td->td_tsqueue == TS_EXCLUSIVE_QUEUE || 467 td->td_tsqueue == TS_SHARED_QUEUE); 468 if (td == TAILQ_FIRST(&ts->ts_blocked[td->td_tsqueue]) && 469 td->td_priority < oldpri) { 470 propagate_priority(td); 471 } 472 } 473 474 /* 475 * Set the owner of the lock this turnstile is attached to. 476 */ 477 static void 478 turnstile_setowner(struct turnstile *ts, struct thread *owner) 479 { 480 481 mtx_assert(&td_contested_lock, MA_OWNED); 482 MPASS(ts->ts_owner == NULL); 483 484 /* A shared lock might not have an owner. */ 485 if (owner == NULL) 486 return; 487 488 MPASS(owner->td_proc->p_magic == P_MAGIC); 489 ts->ts_owner = owner; 490 LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link); 491 } 492 493 #ifdef INVARIANTS 494 /* 495 * UMA zone item deallocator. 496 */ 497 static void 498 turnstile_dtor(void *mem, int size, void *arg) 499 { 500 struct turnstile *ts; 501 502 ts = mem; 503 MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE])); 504 MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE])); 505 MPASS(TAILQ_EMPTY(&ts->ts_pending)); 506 } 507 #endif 508 509 /* 510 * UMA zone item initializer. 511 */ 512 static int 513 turnstile_init(void *mem, int size, int flags) 514 { 515 struct turnstile *ts; 516 517 bzero(mem, size); 518 ts = mem; 519 TAILQ_INIT(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]); 520 TAILQ_INIT(&ts->ts_blocked[TS_SHARED_QUEUE]); 521 TAILQ_INIT(&ts->ts_pending); 522 LIST_INIT(&ts->ts_free); 523 mtx_init(&ts->ts_lock, "turnstile lock", NULL, MTX_SPIN); 524 return (0); 525 } 526 527 static void 528 turnstile_fini(void *mem, int size) 529 { 530 struct turnstile *ts; 531 532 ts = mem; 533 mtx_destroy(&ts->ts_lock); 534 } 535 536 /* 537 * Get a turnstile for a new thread. 538 */ 539 struct turnstile * 540 turnstile_alloc(void) 541 { 542 543 return (uma_zalloc(turnstile_zone, M_WAITOK)); 544 } 545 546 /* 547 * Free a turnstile when a thread is destroyed. 548 */ 549 void 550 turnstile_free(struct turnstile *ts) 551 { 552 553 uma_zfree(turnstile_zone, ts); 554 } 555 556 /* 557 * Lock the turnstile chain associated with the specified lock. 558 */ 559 void 560 turnstile_chain_lock(struct lock_object *lock) 561 { 562 struct turnstile_chain *tc; 563 564 tc = TC_LOOKUP(lock); 565 mtx_lock_spin(&tc->tc_lock); 566 } 567 568 struct turnstile * 569 turnstile_trywait(struct lock_object *lock) 570 { 571 struct turnstile_chain *tc; 572 struct turnstile *ts; 573 574 tc = TC_LOOKUP(lock); 575 mtx_lock_spin(&tc->tc_lock); 576 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash) 577 if (ts->ts_lockobj == lock) { 578 mtx_lock_spin(&ts->ts_lock); 579 return (ts); 580 } 581 582 ts = curthread->td_turnstile; 583 MPASS(ts != NULL); 584 mtx_lock_spin(&ts->ts_lock); 585 KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer")); 586 ts->ts_lockobj = lock; 587 588 return (ts); 589 } 590 591 bool 592 turnstile_lock(struct turnstile *ts, struct lock_object **lockp, 593 struct thread **tdp) 594 { 595 struct turnstile_chain *tc; 596 struct lock_object *lock; 597 598 if ((lock = ts->ts_lockobj) == NULL) 599 return (false); 600 tc = TC_LOOKUP(lock); 601 mtx_lock_spin(&tc->tc_lock); 602 mtx_lock_spin(&ts->ts_lock); 603 if (__predict_false(lock != ts->ts_lockobj)) { 604 mtx_unlock_spin(&tc->tc_lock); 605 mtx_unlock_spin(&ts->ts_lock); 606 return (false); 607 } 608 *lockp = lock; 609 *tdp = ts->ts_owner; 610 return (true); 611 } 612 613 void 614 turnstile_unlock(struct turnstile *ts, struct lock_object *lock) 615 { 616 struct turnstile_chain *tc; 617 618 mtx_assert(&ts->ts_lock, MA_OWNED); 619 mtx_unlock_spin(&ts->ts_lock); 620 if (ts == curthread->td_turnstile) 621 ts->ts_lockobj = NULL; 622 tc = TC_LOOKUP(lock); 623 mtx_unlock_spin(&tc->tc_lock); 624 } 625 626 void 627 turnstile_assert(struct turnstile *ts) 628 { 629 MPASS(ts->ts_lockobj == NULL); 630 } 631 632 void 633 turnstile_cancel(struct turnstile *ts) 634 { 635 struct turnstile_chain *tc; 636 struct lock_object *lock; 637 638 mtx_assert(&ts->ts_lock, MA_OWNED); 639 640 mtx_unlock_spin(&ts->ts_lock); 641 lock = ts->ts_lockobj; 642 if (ts == curthread->td_turnstile) 643 ts->ts_lockobj = NULL; 644 tc = TC_LOOKUP(lock); 645 mtx_unlock_spin(&tc->tc_lock); 646 } 647 648 /* 649 * Look up the turnstile for a lock in the hash table locking the associated 650 * turnstile chain along the way. If no turnstile is found in the hash 651 * table, NULL is returned. 652 */ 653 struct turnstile * 654 turnstile_lookup(struct lock_object *lock) 655 { 656 struct turnstile_chain *tc; 657 struct turnstile *ts; 658 659 tc = TC_LOOKUP(lock); 660 mtx_assert(&tc->tc_lock, MA_OWNED); 661 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash) 662 if (ts->ts_lockobj == lock) { 663 mtx_lock_spin(&ts->ts_lock); 664 return (ts); 665 } 666 return (NULL); 667 } 668 669 /* 670 * Unlock the turnstile chain associated with a given lock. 671 */ 672 void 673 turnstile_chain_unlock(struct lock_object *lock) 674 { 675 struct turnstile_chain *tc; 676 677 tc = TC_LOOKUP(lock); 678 mtx_unlock_spin(&tc->tc_lock); 679 } 680 681 /* 682 * Return a pointer to the thread waiting on this turnstile with the 683 * most important priority or NULL if the turnstile has no waiters. 684 */ 685 static struct thread * 686 turnstile_first_waiter(struct turnstile *ts) 687 { 688 struct thread *std, *xtd; 689 690 std = TAILQ_FIRST(&ts->ts_blocked[TS_SHARED_QUEUE]); 691 xtd = TAILQ_FIRST(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]); 692 if (xtd == NULL || (std != NULL && std->td_priority < xtd->td_priority)) 693 return (std); 694 return (xtd); 695 } 696 697 /* 698 * Take ownership of a turnstile and adjust the priority of the new 699 * owner appropriately. 700 */ 701 void 702 turnstile_claim(struct turnstile *ts) 703 { 704 struct thread *td, *owner; 705 struct turnstile_chain *tc; 706 707 mtx_assert(&ts->ts_lock, MA_OWNED); 708 MPASS(ts != curthread->td_turnstile); 709 710 owner = curthread; 711 mtx_lock_spin(&td_contested_lock); 712 turnstile_setowner(ts, owner); 713 mtx_unlock_spin(&td_contested_lock); 714 715 td = turnstile_first_waiter(ts); 716 MPASS(td != NULL); 717 MPASS(td->td_proc->p_magic == P_MAGIC); 718 THREAD_LOCKPTR_BLOCKED_ASSERT(td, &ts->ts_lock); 719 720 /* 721 * Update the priority of the new owner if needed. 722 */ 723 thread_lock(owner); 724 if (td->td_priority < owner->td_priority) 725 sched_lend_prio(owner, td->td_priority); 726 thread_unlock(owner); 727 tc = TC_LOOKUP(ts->ts_lockobj); 728 mtx_unlock_spin(&ts->ts_lock); 729 mtx_unlock_spin(&tc->tc_lock); 730 } 731 732 /* 733 * Block the current thread on the turnstile assicated with 'lock'. This 734 * function will context switch and not return until this thread has been 735 * woken back up. This function must be called with the appropriate 736 * turnstile chain locked and will return with it unlocked. 737 */ 738 void 739 turnstile_wait(struct turnstile *ts, struct thread *owner, int queue) 740 { 741 struct turnstile_chain *tc; 742 struct thread *td, *td1; 743 struct lock_object *lock; 744 745 td = curthread; 746 mtx_assert(&ts->ts_lock, MA_OWNED); 747 if (owner) 748 MPASS(owner->td_proc->p_magic == P_MAGIC); 749 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE); 750 751 /* 752 * If the lock does not already have a turnstile, use this thread's 753 * turnstile. Otherwise insert the current thread into the 754 * turnstile already in use by this lock. 755 */ 756 tc = TC_LOOKUP(ts->ts_lockobj); 757 mtx_assert(&tc->tc_lock, MA_OWNED); 758 if (ts == td->td_turnstile) { 759 #ifdef TURNSTILE_PROFILING 760 tc->tc_depth++; 761 if (tc->tc_depth > tc->tc_max_depth) { 762 tc->tc_max_depth = tc->tc_depth; 763 if (tc->tc_max_depth > turnstile_max_depth) 764 turnstile_max_depth = tc->tc_max_depth; 765 } 766 #endif 767 LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash); 768 KASSERT(TAILQ_EMPTY(&ts->ts_pending), 769 ("thread's turnstile has pending threads")); 770 KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]), 771 ("thread's turnstile has exclusive waiters")); 772 KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]), 773 ("thread's turnstile has shared waiters")); 774 KASSERT(LIST_EMPTY(&ts->ts_free), 775 ("thread's turnstile has a non-empty free list")); 776 MPASS(ts->ts_lockobj != NULL); 777 mtx_lock_spin(&td_contested_lock); 778 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq); 779 turnstile_setowner(ts, owner); 780 mtx_unlock_spin(&td_contested_lock); 781 } else { 782 TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq) 783 if (td1->td_priority > td->td_priority) 784 break; 785 mtx_lock_spin(&td_contested_lock); 786 if (td1 != NULL) 787 TAILQ_INSERT_BEFORE(td1, td, td_lockq); 788 else 789 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq); 790 MPASS(owner == ts->ts_owner); 791 mtx_unlock_spin(&td_contested_lock); 792 MPASS(td->td_turnstile != NULL); 793 LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash); 794 } 795 thread_lock(td); 796 thread_lock_set(td, &ts->ts_lock); 797 td->td_turnstile = NULL; 798 799 /* Save who we are blocked on and switch. */ 800 lock = ts->ts_lockobj; 801 td->td_tsqueue = queue; 802 td->td_blocked = ts; 803 td->td_lockname = lock->lo_name; 804 td->td_blktick = ticks; 805 TD_SET_LOCK(td); 806 mtx_unlock_spin(&tc->tc_lock); 807 propagate_priority(td); 808 809 if (LOCK_LOG_TEST(lock, 0)) 810 CTR4(KTR_LOCK, "%s: td %d blocked on [%p] %s", __func__, 811 td->td_tid, lock, lock->lo_name); 812 813 SDT_PROBE0(sched, , , sleep); 814 815 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock); 816 mi_switch(SW_VOL | SWT_TURNSTILE); 817 818 if (LOCK_LOG_TEST(lock, 0)) 819 CTR4(KTR_LOCK, "%s: td %d free from blocked on [%p] %s", 820 __func__, td->td_tid, lock, lock->lo_name); 821 } 822 823 /* 824 * Pick the highest priority thread on this turnstile and put it on the 825 * pending list. This must be called with the turnstile chain locked. 826 */ 827 int 828 turnstile_signal(struct turnstile *ts, int queue) 829 { 830 struct turnstile_chain *tc __unused; 831 struct thread *td; 832 int empty; 833 834 MPASS(ts != NULL); 835 mtx_assert(&ts->ts_lock, MA_OWNED); 836 MPASS(curthread->td_proc->p_magic == P_MAGIC); 837 MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL); 838 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE); 839 840 /* 841 * Pick the highest priority thread blocked on this lock and 842 * move it to the pending list. 843 */ 844 td = TAILQ_FIRST(&ts->ts_blocked[queue]); 845 MPASS(td->td_proc->p_magic == P_MAGIC); 846 mtx_lock_spin(&td_contested_lock); 847 TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq); 848 mtx_unlock_spin(&td_contested_lock); 849 TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq); 850 851 /* 852 * If the turnstile is now empty, remove it from its chain and 853 * give it to the about-to-be-woken thread. Otherwise take a 854 * turnstile from the free list and give it to the thread. 855 */ 856 empty = TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) && 857 TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]); 858 if (empty) { 859 tc = TC_LOOKUP(ts->ts_lockobj); 860 mtx_assert(&tc->tc_lock, MA_OWNED); 861 MPASS(LIST_EMPTY(&ts->ts_free)); 862 #ifdef TURNSTILE_PROFILING 863 tc->tc_depth--; 864 #endif 865 } else 866 ts = LIST_FIRST(&ts->ts_free); 867 MPASS(ts != NULL); 868 LIST_REMOVE(ts, ts_hash); 869 td->td_turnstile = ts; 870 871 return (empty); 872 } 873 874 /* 875 * Put all blocked threads on the pending list. This must be called with 876 * the turnstile chain locked. 877 */ 878 void 879 turnstile_broadcast(struct turnstile *ts, int queue) 880 { 881 struct turnstile_chain *tc __unused; 882 struct turnstile *ts1; 883 struct thread *td; 884 885 MPASS(ts != NULL); 886 mtx_assert(&ts->ts_lock, MA_OWNED); 887 MPASS(curthread->td_proc->p_magic == P_MAGIC); 888 MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL); 889 /* 890 * We must have the chain locked so that we can remove the empty 891 * turnstile from the hash queue. 892 */ 893 tc = TC_LOOKUP(ts->ts_lockobj); 894 mtx_assert(&tc->tc_lock, MA_OWNED); 895 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE); 896 897 /* 898 * Transfer the blocked list to the pending list. 899 */ 900 mtx_lock_spin(&td_contested_lock); 901 TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked[queue], td_lockq); 902 mtx_unlock_spin(&td_contested_lock); 903 904 /* 905 * Give a turnstile to each thread. The last thread gets 906 * this turnstile if the turnstile is empty. 907 */ 908 TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) { 909 if (LIST_EMPTY(&ts->ts_free)) { 910 MPASS(TAILQ_NEXT(td, td_lockq) == NULL); 911 ts1 = ts; 912 #ifdef TURNSTILE_PROFILING 913 tc->tc_depth--; 914 #endif 915 } else 916 ts1 = LIST_FIRST(&ts->ts_free); 917 MPASS(ts1 != NULL); 918 LIST_REMOVE(ts1, ts_hash); 919 td->td_turnstile = ts1; 920 } 921 } 922 923 static u_char 924 turnstile_calc_unlend_prio_locked(struct thread *td) 925 { 926 struct turnstile *nts; 927 u_char cp, pri; 928 929 THREAD_LOCK_ASSERT(td, MA_OWNED); 930 mtx_assert(&td_contested_lock, MA_OWNED); 931 932 pri = PRI_MAX; 933 LIST_FOREACH(nts, &td->td_contested, ts_link) { 934 cp = turnstile_first_waiter(nts)->td_priority; 935 if (cp < pri) 936 pri = cp; 937 } 938 return (pri); 939 } 940 941 /* 942 * Wakeup all threads on the pending list and adjust the priority of the 943 * current thread appropriately. This must be called with the turnstile 944 * chain locked. 945 */ 946 void 947 turnstile_unpend(struct turnstile *ts) 948 { 949 TAILQ_HEAD( ,thread) pending_threads; 950 struct thread *td; 951 u_char pri; 952 953 MPASS(ts != NULL); 954 mtx_assert(&ts->ts_lock, MA_OWNED); 955 MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL); 956 MPASS(!TAILQ_EMPTY(&ts->ts_pending)); 957 958 /* 959 * Move the list of pending threads out of the turnstile and 960 * into a local variable. 961 */ 962 TAILQ_INIT(&pending_threads); 963 TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq); 964 #ifdef INVARIANTS 965 if (TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) && 966 TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE])) 967 ts->ts_lockobj = NULL; 968 #endif 969 /* 970 * Adjust the priority of curthread based on other contested 971 * locks it owns. Don't lower the priority below the base 972 * priority however. 973 */ 974 td = curthread; 975 thread_lock(td); 976 mtx_lock_spin(&td_contested_lock); 977 /* 978 * Remove the turnstile from this thread's list of contested locks 979 * since this thread doesn't own it anymore. New threads will 980 * not be blocking on the turnstile until it is claimed by a new 981 * owner. There might not be a current owner if this is a shared 982 * lock. 983 */ 984 if (ts->ts_owner != NULL) { 985 ts->ts_owner = NULL; 986 LIST_REMOVE(ts, ts_link); 987 } 988 pri = turnstile_calc_unlend_prio_locked(td); 989 mtx_unlock_spin(&td_contested_lock); 990 sched_unlend_prio(td, pri); 991 thread_unlock(td); 992 /* 993 * Wake up all the pending threads. If a thread is not blocked 994 * on a lock, then it is currently executing on another CPU in 995 * turnstile_wait() or sitting on a run queue waiting to resume 996 * in turnstile_wait(). Set a flag to force it to try to acquire 997 * the lock again instead of blocking. 998 */ 999 while (!TAILQ_EMPTY(&pending_threads)) { 1000 td = TAILQ_FIRST(&pending_threads); 1001 TAILQ_REMOVE(&pending_threads, td, td_lockq); 1002 SDT_PROBE2(sched, , , wakeup, td, td->td_proc); 1003 thread_lock_block_wait(td); 1004 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock); 1005 MPASS(td->td_proc->p_magic == P_MAGIC); 1006 MPASS(TD_ON_LOCK(td)); 1007 TD_CLR_LOCK(td); 1008 MPASS(TD_CAN_RUN(td)); 1009 td->td_blocked = NULL; 1010 td->td_lockname = NULL; 1011 td->td_blktick = 0; 1012 #ifdef INVARIANTS 1013 td->td_tsqueue = 0xff; 1014 #endif 1015 sched_add(td, SRQ_HOLD | SRQ_BORING); 1016 } 1017 mtx_unlock_spin(&ts->ts_lock); 1018 } 1019 1020 /* 1021 * Give up ownership of a turnstile. This must be called with the 1022 * turnstile chain locked. 1023 */ 1024 void 1025 turnstile_disown(struct turnstile *ts) 1026 { 1027 struct thread *td; 1028 u_char pri; 1029 1030 MPASS(ts != NULL); 1031 mtx_assert(&ts->ts_lock, MA_OWNED); 1032 MPASS(ts->ts_owner == curthread); 1033 MPASS(TAILQ_EMPTY(&ts->ts_pending)); 1034 MPASS(!TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) || 1035 !TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE])); 1036 1037 /* 1038 * Remove the turnstile from this thread's list of contested locks 1039 * since this thread doesn't own it anymore. New threads will 1040 * not be blocking on the turnstile until it is claimed by a new 1041 * owner. 1042 */ 1043 mtx_lock_spin(&td_contested_lock); 1044 ts->ts_owner = NULL; 1045 LIST_REMOVE(ts, ts_link); 1046 mtx_unlock_spin(&td_contested_lock); 1047 1048 /* 1049 * Adjust the priority of curthread based on other contested 1050 * locks it owns. Don't lower the priority below the base 1051 * priority however. 1052 */ 1053 td = curthread; 1054 thread_lock(td); 1055 mtx_unlock_spin(&ts->ts_lock); 1056 mtx_lock_spin(&td_contested_lock); 1057 pri = turnstile_calc_unlend_prio_locked(td); 1058 mtx_unlock_spin(&td_contested_lock); 1059 sched_unlend_prio(td, pri); 1060 thread_unlock(td); 1061 } 1062 1063 /* 1064 * Return the first thread in a turnstile. 1065 */ 1066 struct thread * 1067 turnstile_head(struct turnstile *ts, int queue) 1068 { 1069 #ifdef INVARIANTS 1070 1071 MPASS(ts != NULL); 1072 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE); 1073 mtx_assert(&ts->ts_lock, MA_OWNED); 1074 #endif 1075 return (TAILQ_FIRST(&ts->ts_blocked[queue])); 1076 } 1077 1078 /* 1079 * Returns true if a sub-queue of a turnstile is empty. 1080 */ 1081 int 1082 turnstile_empty(struct turnstile *ts, int queue) 1083 { 1084 #ifdef INVARIANTS 1085 1086 MPASS(ts != NULL); 1087 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE); 1088 mtx_assert(&ts->ts_lock, MA_OWNED); 1089 #endif 1090 return (TAILQ_EMPTY(&ts->ts_blocked[queue])); 1091 } 1092 1093 #ifdef DDB 1094 static void 1095 print_thread(struct thread *td, const char *prefix) 1096 { 1097 1098 db_printf("%s%p (tid %d, pid %d, \"%s\")\n", prefix, td, td->td_tid, 1099 td->td_proc->p_pid, td->td_name); 1100 } 1101 1102 static void 1103 print_queue(struct threadqueue *queue, const char *header, const char *prefix) 1104 { 1105 struct thread *td; 1106 1107 db_printf("%s:\n", header); 1108 if (TAILQ_EMPTY(queue)) { 1109 db_printf("%sempty\n", prefix); 1110 return; 1111 } 1112 TAILQ_FOREACH(td, queue, td_lockq) { 1113 print_thread(td, prefix); 1114 } 1115 } 1116 1117 DB_SHOW_COMMAND(turnstile, db_show_turnstile) 1118 { 1119 struct turnstile_chain *tc; 1120 struct turnstile *ts; 1121 struct lock_object *lock; 1122 int i; 1123 1124 if (!have_addr) 1125 return; 1126 1127 /* 1128 * First, see if there is an active turnstile for the lock indicated 1129 * by the address. 1130 */ 1131 lock = (struct lock_object *)addr; 1132 tc = TC_LOOKUP(lock); 1133 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash) 1134 if (ts->ts_lockobj == lock) 1135 goto found; 1136 1137 /* 1138 * Second, see if there is an active turnstile at the address 1139 * indicated. 1140 */ 1141 for (i = 0; i < TC_TABLESIZE; i++) 1142 LIST_FOREACH(ts, &turnstile_chains[i].tc_turnstiles, ts_hash) { 1143 if (ts == (struct turnstile *)addr) 1144 goto found; 1145 } 1146 1147 db_printf("Unable to locate a turnstile via %p\n", (void *)addr); 1148 return; 1149 found: 1150 lock = ts->ts_lockobj; 1151 db_printf("Lock: %p - (%s) %s\n", lock, LOCK_CLASS(lock)->lc_name, 1152 lock->lo_name); 1153 if (ts->ts_owner) 1154 print_thread(ts->ts_owner, "Lock Owner: "); 1155 else 1156 db_printf("Lock Owner: none\n"); 1157 print_queue(&ts->ts_blocked[TS_SHARED_QUEUE], "Shared Waiters", "\t"); 1158 print_queue(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE], "Exclusive Waiters", 1159 "\t"); 1160 print_queue(&ts->ts_pending, "Pending Threads", "\t"); 1161 1162 } 1163 1164 /* 1165 * Show all the threads a particular thread is waiting on based on 1166 * non-spin locks. 1167 */ 1168 static void 1169 print_lockchain(struct thread *td, const char *prefix) 1170 { 1171 struct lock_object *lock; 1172 struct lock_class *class; 1173 struct turnstile *ts; 1174 struct thread *owner; 1175 1176 /* 1177 * Follow the chain. We keep walking as long as the thread is 1178 * blocked on a lock that has an owner. 1179 */ 1180 while (!db_pager_quit) { 1181 if (td == (void *)LK_KERNPROC) { 1182 db_printf("%sdisowned (LK_KERNPROC)\n", prefix); 1183 return; 1184 } 1185 db_printf("%sthread %d (pid %d, %s) is ", prefix, td->td_tid, 1186 td->td_proc->p_pid, td->td_name); 1187 switch (TD_GET_STATE(td)) { 1188 case TDS_INACTIVE: 1189 db_printf("inactive\n"); 1190 return; 1191 case TDS_CAN_RUN: 1192 db_printf("runnable\n"); 1193 return; 1194 case TDS_RUNQ: 1195 db_printf("on a run queue\n"); 1196 return; 1197 case TDS_RUNNING: 1198 db_printf("running on CPU %d\n", td->td_oncpu); 1199 return; 1200 case TDS_INHIBITED: 1201 if (TD_ON_LOCK(td)) { 1202 ts = td->td_blocked; 1203 lock = ts->ts_lockobj; 1204 class = LOCK_CLASS(lock); 1205 db_printf("blocked on lock %p (%s) \"%s\"\n", 1206 lock, class->lc_name, lock->lo_name); 1207 if (ts->ts_owner == NULL) 1208 return; 1209 td = ts->ts_owner; 1210 break; 1211 } else if (TD_ON_SLEEPQ(td)) { 1212 if (!lockmgr_chain(td, &owner) && 1213 !sx_chain(td, &owner)) { 1214 db_printf("sleeping on %p \"%s\"\n", 1215 td->td_wchan, td->td_wmesg); 1216 return; 1217 } 1218 if (owner == NULL) 1219 return; 1220 td = owner; 1221 break; 1222 } 1223 db_printf("inhibited: %s\n", KTDSTATE(td)); 1224 return; 1225 default: 1226 db_printf("??? (%#x)\n", TD_GET_STATE(td)); 1227 return; 1228 } 1229 } 1230 } 1231 1232 DB_SHOW_COMMAND(lockchain, db_show_lockchain) 1233 { 1234 struct thread *td; 1235 1236 /* Figure out which thread to start with. */ 1237 if (have_addr) 1238 td = db_lookup_thread(addr, true); 1239 else 1240 td = kdb_thread; 1241 1242 print_lockchain(td, ""); 1243 } 1244 DB_SHOW_ALIAS(sleepchain, db_show_lockchain); 1245 1246 DB_SHOW_ALL_COMMAND(chains, db_show_allchains) 1247 { 1248 struct thread *td; 1249 struct proc *p; 1250 int i; 1251 1252 i = 1; 1253 FOREACH_PROC_IN_SYSTEM(p) { 1254 FOREACH_THREAD_IN_PROC(p, td) { 1255 if ((TD_ON_LOCK(td) && LIST_EMPTY(&td->td_contested)) 1256 || (TD_IS_INHIBITED(td) && TD_ON_SLEEPQ(td))) { 1257 db_printf("chain %d:\n", i++); 1258 print_lockchain(td, " "); 1259 } 1260 if (db_pager_quit) 1261 return; 1262 } 1263 } 1264 } 1265 DB_SHOW_ALIAS_FLAGS(allchains, db_show_allchains, DB_CMD_MEMSAFE); 1266 1267 static void print_waiters(struct turnstile *ts, int indent); 1268 1269 static void 1270 print_waiter(struct thread *td, int indent) 1271 { 1272 struct turnstile *ts; 1273 int i; 1274 1275 if (db_pager_quit) 1276 return; 1277 for (i = 0; i < indent; i++) 1278 db_printf(" "); 1279 print_thread(td, "thread "); 1280 LIST_FOREACH(ts, &td->td_contested, ts_link) 1281 print_waiters(ts, indent + 1); 1282 } 1283 1284 static void 1285 print_waiters(struct turnstile *ts, int indent) 1286 { 1287 struct lock_object *lock; 1288 struct lock_class *class; 1289 struct thread *td; 1290 int i; 1291 1292 if (db_pager_quit) 1293 return; 1294 lock = ts->ts_lockobj; 1295 class = LOCK_CLASS(lock); 1296 for (i = 0; i < indent; i++) 1297 db_printf(" "); 1298 db_printf("lock %p (%s) \"%s\"\n", lock, class->lc_name, lock->lo_name); 1299 TAILQ_FOREACH(td, &ts->ts_blocked[TS_EXCLUSIVE_QUEUE], td_lockq) 1300 print_waiter(td, indent + 1); 1301 TAILQ_FOREACH(td, &ts->ts_blocked[TS_SHARED_QUEUE], td_lockq) 1302 print_waiter(td, indent + 1); 1303 TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) 1304 print_waiter(td, indent + 1); 1305 } 1306 1307 DB_SHOW_COMMAND(locktree, db_show_locktree) 1308 { 1309 struct lock_object *lock; 1310 struct lock_class *class; 1311 struct turnstile_chain *tc; 1312 struct turnstile *ts; 1313 1314 if (!have_addr) 1315 return; 1316 lock = (struct lock_object *)addr; 1317 tc = TC_LOOKUP(lock); 1318 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash) 1319 if (ts->ts_lockobj == lock) 1320 break; 1321 if (ts == NULL) { 1322 class = LOCK_CLASS(lock); 1323 db_printf("lock %p (%s) \"%s\"\n", lock, class->lc_name, 1324 lock->lo_name); 1325 } else 1326 print_waiters(ts, 0); 1327 } 1328 #endif 1329