Lines Matching +full:lock +full:- +full:- +full:- +full:-
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
36 * non-sleepable locks. Sleepable locks use condition variables to
38 * turnstile queue's are assigned to a lock held by an owning thread. Thus,
43 * want to use back-pointers in the locks for the same reason. Thus, we
46 * in a hash table based on the address of the lock. Each entry in the
47 * hash table is a linked-lists of turnstiles and is called a turnstile
52 * and attached to that thread. When a thread blocks on a lock, if it is the
53 * first thread to block, it lends its turnstile to the lock. If the lock
54 * already has a turnstile, then it gives its turnstile to the lock's
57 * blocked on the lock, then it reclaims the turnstile associated with the lock
71 #include <sys/lock.h>
95 #define TC_MASK (TC_TABLESIZE - 1)
97 #define TC_HASH(lock) (((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK) argument
98 #define TC_LOOKUP(lock) &turnstile_chains[TC_HASH(lock)] argument
102 * connected by ts_link entries is a per-thread list of all the turnstiles
104 * a lock is released. The other two lists use the ts_hash entries. The
106 * when it is attached to a lock. The second list to use ts_hash is the
107 * free list hung off of a turnstile that is attached to a lock.
110 * are linked list of threads blocked on the turnstile's lock. One list is
117 * c - turnstile chain lock
118 * q - td_contested lock
121 struct mtx ts_lock; /* Spin lock for self. */
127 struct lock_object *ts_lockobj; /* (c) Lock we reference. */
128 struct thread *ts_owner; /* (c + q) Who owns the lock. */
133 struct mtx tc_lock; /* Spin lock for this chain. */
155 * Prototypes for non-exported routines.
182 mtx_unlock_spin(&ts->ts_lock); in propagate_unlock_ts()
189 if (td->td_lock != &top->ts_lock) in propagate_unlock_td()
205 pri = td->td_priority; in propagate_priority()
206 top = ts = td->td_blocked; in propagate_priority()
207 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock); in propagate_priority()
210 * The original turnstile lock is held across the entire in propagate_priority()
211 * operation. We only ever lock down the chain so the lock in propagate_priority()
215 td = ts->ts_owner; in propagate_priority()
219 * This might be a read lock with no owner. There's in propagate_priority()
227 * Wait for the thread lock to be stable and then only in propagate_priority()
228 * acquire if it is not the turnstile lock. in propagate_priority()
231 if (td->td_lock != &ts->ts_lock) { in propagate_priority()
235 MPASS(td->td_proc != NULL); in propagate_priority()
236 MPASS(td->td_proc->p_magic == P_MAGIC); in propagate_priority()
246 "Sleeping thread (tid %d, pid %d) owns a non-sleepable lock\n", in propagate_priority()
247 td->td_tid, td->td_proc->p_pid); in propagate_priority()
250 ts->ts_lockobj->lo_name); in propagate_priority()
257 if (td->td_priority <= pri) { in propagate_priority()
268 * If lock holder is actually running or on the run queue in propagate_priority()
272 MPASS(td->td_blocked == NULL); in propagate_priority()
286 * If we aren't blocked on a lock, we should be. in propagate_priority()
289 "thread %d(%s):%d holds %s but isn't blocked on a lock\n", in propagate_priority()
290 td->td_tid, td->td_name, TD_GET_STATE(td), in propagate_priority()
291 ts->ts_lockobj->lo_name)); in propagate_priority()
294 * Pick up the lock that td is blocked on. in propagate_priority()
296 ts = td->td_blocked; in propagate_priority()
298 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock); in propagate_priority()
304 /* The thread lock is released as ts lock above. */ in propagate_priority()
324 * that is waiting on the thread lock in turnstile_unpend() to in turnstile_adjust_thread()
331 if (td->td_turnstile != NULL) in turnstile_adjust_thread()
339 THREAD_LOCKPTR_BLOCKED_ASSERT(td, &ts->ts_lock); in turnstile_adjust_thread()
342 if ((td1 != NULL && td->td_priority < td1->td_priority) || in turnstile_adjust_thread()
343 (td2 != NULL && td->td_priority > td2->td_priority)) { in turnstile_adjust_thread()
348 queue = td->td_tsqueue; in turnstile_adjust_thread()
351 TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq); in turnstile_adjust_thread()
352 TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq) { in turnstile_adjust_thread()
353 MPASS(td1->td_proc->p_magic == P_MAGIC); in turnstile_adjust_thread()
354 if (td1->td_priority > td->td_priority) in turnstile_adjust_thread()
359 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq); in turnstile_adjust_thread()
366 td->td_tid, ts->ts_lockobj, ts->ts_lockobj->lo_name); in turnstile_adjust_thread()
370 td->td_tid, td1->td_tid, ts->ts_lockobj, in turnstile_adjust_thread()
371 ts->ts_lockobj->lo_name); in turnstile_adjust_thread()
450 * Pick up the lock that td is blocked on. in turnstile_adjust()
452 ts = td->td_blocked; in turnstile_adjust()
454 THREAD_LOCKPTR_BLOCKED_ASSERT(td, &ts->ts_lock); in turnstile_adjust()
455 mtx_assert(&ts->ts_lock, MA_OWNED); in turnstile_adjust()
466 MPASS(td->td_tsqueue == TS_EXCLUSIVE_QUEUE || in turnstile_adjust()
467 td->td_tsqueue == TS_SHARED_QUEUE); in turnstile_adjust()
468 if (td == TAILQ_FIRST(&ts->ts_blocked[td->td_tsqueue]) && in turnstile_adjust()
469 td->td_priority < oldpri) { in turnstile_adjust()
475 * Set the owner of the lock this turnstile is attached to.
482 MPASS(ts->ts_owner == NULL); in turnstile_setowner()
484 /* A shared lock might not have an owner. */ in turnstile_setowner()
488 MPASS(owner->td_proc->p_magic == P_MAGIC); in turnstile_setowner()
489 ts->ts_owner = owner; in turnstile_setowner()
490 LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link); in turnstile_setowner()
503 MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE])); in turnstile_dtor()
504 MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE])); in turnstile_dtor()
505 MPASS(TAILQ_EMPTY(&ts->ts_pending)); in turnstile_dtor()
519 TAILQ_INIT(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]); in turnstile_init()
520 TAILQ_INIT(&ts->ts_blocked[TS_SHARED_QUEUE]); in turnstile_init()
521 TAILQ_INIT(&ts->ts_pending); in turnstile_init()
522 LIST_INIT(&ts->ts_free); in turnstile_init()
523 mtx_init(&ts->ts_lock, "turnstile lock", NULL, MTX_SPIN); in turnstile_init()
533 mtx_destroy(&ts->ts_lock); in turnstile_fini()
557 * Lock the turnstile chain associated with the specified lock.
560 turnstile_chain_lock(struct lock_object *lock) in turnstile_chain_lock() argument
564 tc = TC_LOOKUP(lock); in turnstile_chain_lock()
565 mtx_lock_spin(&tc->tc_lock); in turnstile_chain_lock()
569 turnstile_trywait(struct lock_object *lock) in turnstile_trywait() argument
574 tc = TC_LOOKUP(lock); in turnstile_trywait()
575 mtx_lock_spin(&tc->tc_lock); in turnstile_trywait()
576 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash) in turnstile_trywait()
577 if (ts->ts_lockobj == lock) { in turnstile_trywait()
578 mtx_lock_spin(&ts->ts_lock); in turnstile_trywait()
582 ts = curthread->td_turnstile; in turnstile_trywait()
584 mtx_lock_spin(&ts->ts_lock); in turnstile_trywait()
585 KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer")); in turnstile_trywait()
586 ts->ts_lockobj = lock; in turnstile_trywait()
596 struct lock_object *lock; in turnstile_lock() local
598 if ((lock = ts->ts_lockobj) == NULL) in turnstile_lock()
600 tc = TC_LOOKUP(lock); in turnstile_lock()
601 mtx_lock_spin(&tc->tc_lock); in turnstile_lock()
602 mtx_lock_spin(&ts->ts_lock); in turnstile_lock()
603 if (__predict_false(lock != ts->ts_lockobj)) { in turnstile_lock()
604 mtx_unlock_spin(&tc->tc_lock); in turnstile_lock()
605 mtx_unlock_spin(&ts->ts_lock); in turnstile_lock()
608 *lockp = lock; in turnstile_lock()
609 *tdp = ts->ts_owner; in turnstile_lock()
614 turnstile_unlock(struct turnstile *ts, struct lock_object *lock) in turnstile_unlock() argument
618 mtx_assert(&ts->ts_lock, MA_OWNED); in turnstile_unlock()
619 mtx_unlock_spin(&ts->ts_lock); in turnstile_unlock()
620 if (ts == curthread->td_turnstile) in turnstile_unlock()
621 ts->ts_lockobj = NULL; in turnstile_unlock()
622 tc = TC_LOOKUP(lock); in turnstile_unlock()
623 mtx_unlock_spin(&tc->tc_lock); in turnstile_unlock()
629 MPASS(ts->ts_lockobj == NULL); in turnstile_assert()
636 struct lock_object *lock; in turnstile_cancel() local
638 mtx_assert(&ts->ts_lock, MA_OWNED); in turnstile_cancel()
640 mtx_unlock_spin(&ts->ts_lock); in turnstile_cancel()
641 lock = ts->ts_lockobj; in turnstile_cancel()
642 if (ts == curthread->td_turnstile) in turnstile_cancel()
643 ts->ts_lockobj = NULL; in turnstile_cancel()
644 tc = TC_LOOKUP(lock); in turnstile_cancel()
645 mtx_unlock_spin(&tc->tc_lock); in turnstile_cancel()
649 * Look up the turnstile for a lock in the hash table locking the associated
654 turnstile_lookup(struct lock_object *lock) in turnstile_lookup() argument
659 tc = TC_LOOKUP(lock); in turnstile_lookup()
660 mtx_assert(&tc->tc_lock, MA_OWNED); in turnstile_lookup()
661 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash) in turnstile_lookup()
662 if (ts->ts_lockobj == lock) { in turnstile_lookup()
663 mtx_lock_spin(&ts->ts_lock); in turnstile_lookup()
670 * Unlock the turnstile chain associated with a given lock.
673 turnstile_chain_unlock(struct lock_object *lock) in turnstile_chain_unlock() argument
677 tc = TC_LOOKUP(lock); in turnstile_chain_unlock()
678 mtx_unlock_spin(&tc->tc_lock); in turnstile_chain_unlock()
690 std = TAILQ_FIRST(&ts->ts_blocked[TS_SHARED_QUEUE]); in turnstile_first_waiter()
691 xtd = TAILQ_FIRST(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]); in turnstile_first_waiter()
692 if (xtd == NULL || (std != NULL && std->td_priority < xtd->td_priority)) in turnstile_first_waiter()
707 mtx_assert(&ts->ts_lock, MA_OWNED); in turnstile_claim()
708 MPASS(ts != curthread->td_turnstile); in turnstile_claim()
717 MPASS(td->td_proc->p_magic == P_MAGIC); in turnstile_claim()
718 THREAD_LOCKPTR_BLOCKED_ASSERT(td, &ts->ts_lock); in turnstile_claim()
724 if (td->td_priority < owner->td_priority) in turnstile_claim()
725 sched_lend_prio(owner, td->td_priority); in turnstile_claim()
727 tc = TC_LOOKUP(ts->ts_lockobj); in turnstile_claim()
728 mtx_unlock_spin(&ts->ts_lock); in turnstile_claim()
729 mtx_unlock_spin(&tc->tc_lock); in turnstile_claim()
733 * Block the current thread on the turnstile assicated with 'lock'. This
743 struct lock_object *lock; in turnstile_wait() local
746 mtx_assert(&ts->ts_lock, MA_OWNED); in turnstile_wait()
748 MPASS(owner->td_proc->p_magic == P_MAGIC); in turnstile_wait()
752 * If the lock does not already have a turnstile, use this thread's in turnstile_wait()
754 * turnstile already in use by this lock. in turnstile_wait()
756 tc = TC_LOOKUP(ts->ts_lockobj); in turnstile_wait()
757 mtx_assert(&tc->tc_lock, MA_OWNED); in turnstile_wait()
758 if (ts == td->td_turnstile) { in turnstile_wait()
760 tc->tc_depth++; in turnstile_wait()
761 if (tc->tc_depth > tc->tc_max_depth) { in turnstile_wait()
762 tc->tc_max_depth = tc->tc_depth; in turnstile_wait()
763 if (tc->tc_max_depth > turnstile_max_depth) in turnstile_wait()
764 turnstile_max_depth = tc->tc_max_depth; in turnstile_wait()
767 LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash); in turnstile_wait()
768 KASSERT(TAILQ_EMPTY(&ts->ts_pending), in turnstile_wait()
770 KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]), in turnstile_wait()
772 KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]), in turnstile_wait()
774 KASSERT(LIST_EMPTY(&ts->ts_free), in turnstile_wait()
775 ("thread's turnstile has a non-empty free list")); in turnstile_wait()
776 MPASS(ts->ts_lockobj != NULL); in turnstile_wait()
778 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq); in turnstile_wait()
782 TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq) in turnstile_wait()
783 if (td1->td_priority > td->td_priority) in turnstile_wait()
789 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq); in turnstile_wait()
790 MPASS(owner == ts->ts_owner); in turnstile_wait()
792 MPASS(td->td_turnstile != NULL); in turnstile_wait()
793 LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash); in turnstile_wait()
796 thread_lock_set(td, &ts->ts_lock); in turnstile_wait()
797 td->td_turnstile = NULL; in turnstile_wait()
800 lock = ts->ts_lockobj; in turnstile_wait()
801 td->td_tsqueue = queue; in turnstile_wait()
802 td->td_blocked = ts; in turnstile_wait()
803 td->td_lockname = lock->lo_name; in turnstile_wait()
804 td->td_blktick = ticks; in turnstile_wait()
806 mtx_unlock_spin(&tc->tc_lock); in turnstile_wait()
809 if (LOCK_LOG_TEST(lock, 0)) in turnstile_wait()
811 td->td_tid, lock, lock->lo_name); in turnstile_wait()
815 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock); in turnstile_wait()
818 if (LOCK_LOG_TEST(lock, 0)) in turnstile_wait()
820 __func__, td->td_tid, lock, lock->lo_name); in turnstile_wait()
835 mtx_assert(&ts->ts_lock, MA_OWNED); in turnstile_signal()
836 MPASS(curthread->td_proc->p_magic == P_MAGIC); in turnstile_signal()
837 MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL); in turnstile_signal()
841 * Pick the highest priority thread blocked on this lock and in turnstile_signal()
844 td = TAILQ_FIRST(&ts->ts_blocked[queue]); in turnstile_signal()
845 MPASS(td->td_proc->p_magic == P_MAGIC); in turnstile_signal()
847 TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq); in turnstile_signal()
849 TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq); in turnstile_signal()
853 * give it to the about-to-be-woken thread. Otherwise take a in turnstile_signal()
856 empty = TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) && in turnstile_signal()
857 TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]); in turnstile_signal()
859 tc = TC_LOOKUP(ts->ts_lockobj); in turnstile_signal()
860 mtx_assert(&tc->tc_lock, MA_OWNED); in turnstile_signal()
861 MPASS(LIST_EMPTY(&ts->ts_free)); in turnstile_signal()
863 tc->tc_depth--; in turnstile_signal()
866 ts = LIST_FIRST(&ts->ts_free); in turnstile_signal()
869 td->td_turnstile = ts; in turnstile_signal()
886 mtx_assert(&ts->ts_lock, MA_OWNED); in turnstile_broadcast()
887 MPASS(curthread->td_proc->p_magic == P_MAGIC); in turnstile_broadcast()
888 MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL); in turnstile_broadcast()
893 tc = TC_LOOKUP(ts->ts_lockobj); in turnstile_broadcast()
894 mtx_assert(&tc->tc_lock, MA_OWNED); in turnstile_broadcast()
901 TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked[queue], td_lockq); in turnstile_broadcast()
908 TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) { in turnstile_broadcast()
909 if (LIST_EMPTY(&ts->ts_free)) { in turnstile_broadcast()
913 tc->tc_depth--; in turnstile_broadcast()
916 ts1 = LIST_FIRST(&ts->ts_free); in turnstile_broadcast()
919 td->td_turnstile = ts1; in turnstile_broadcast()
933 LIST_FOREACH(nts, &td->td_contested, ts_link) { in turnstile_calc_unlend_prio_locked()
934 cp = turnstile_first_waiter(nts)->td_priority; in turnstile_calc_unlend_prio_locked()
954 mtx_assert(&ts->ts_lock, MA_OWNED); in turnstile_unpend()
955 MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL); in turnstile_unpend()
956 MPASS(!TAILQ_EMPTY(&ts->ts_pending)); in turnstile_unpend()
963 TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq); in turnstile_unpend()
965 if (TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) && in turnstile_unpend()
966 TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE])) in turnstile_unpend()
967 ts->ts_lockobj = NULL; in turnstile_unpend()
982 * lock. in turnstile_unpend()
984 if (ts->ts_owner != NULL) { in turnstile_unpend()
985 ts->ts_owner = NULL; in turnstile_unpend()
994 * on a lock, then it is currently executing on another CPU in in turnstile_unpend()
997 * the lock again instead of blocking. in turnstile_unpend()
1002 SDT_PROBE2(sched, , , wakeup, td, td->td_proc); in turnstile_unpend()
1004 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock); in turnstile_unpend()
1005 MPASS(td->td_proc->p_magic == P_MAGIC); in turnstile_unpend()
1009 td->td_blocked = NULL; in turnstile_unpend()
1010 td->td_lockname = NULL; in turnstile_unpend()
1011 td->td_blktick = 0; in turnstile_unpend()
1013 td->td_tsqueue = 0xff; in turnstile_unpend()
1017 mtx_unlock_spin(&ts->ts_lock); in turnstile_unpend()
1031 mtx_assert(&ts->ts_lock, MA_OWNED); in turnstile_disown()
1032 MPASS(ts->ts_owner == curthread); in turnstile_disown()
1033 MPASS(TAILQ_EMPTY(&ts->ts_pending)); in turnstile_disown()
1034 MPASS(!TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) || in turnstile_disown()
1035 !TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE])); in turnstile_disown()
1044 ts->ts_owner = NULL; in turnstile_disown()
1055 mtx_unlock_spin(&ts->ts_lock); in turnstile_disown()
1073 mtx_assert(&ts->ts_lock, MA_OWNED); in turnstile_head()
1075 return (TAILQ_FIRST(&ts->ts_blocked[queue])); in turnstile_head()
1079 * Returns true if a sub-queue of a turnstile is empty.
1088 mtx_assert(&ts->ts_lock, MA_OWNED); in turnstile_empty()
1090 return (TAILQ_EMPTY(&ts->ts_blocked[queue])); in turnstile_empty()
1098 db_printf("%s%p (tid %d, pid %d, \"%s\")\n", prefix, td, td->td_tid, in print_thread()
1099 td->td_proc->p_pid, td->td_name); in print_thread()
1121 struct lock_object *lock; in DB_SHOW_COMMAND() local
1128 * First, see if there is an active turnstile for the lock indicated in DB_SHOW_COMMAND()
1131 lock = (struct lock_object *)addr; in DB_SHOW_COMMAND()
1132 tc = TC_LOOKUP(lock); in DB_SHOW_COMMAND()
1133 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash) in DB_SHOW_COMMAND()
1134 if (ts->ts_lockobj == lock) in DB_SHOW_COMMAND()
1150 lock = ts->ts_lockobj; in DB_SHOW_COMMAND()
1151 db_printf("Lock: %p - (%s) %s\n", lock, LOCK_CLASS(lock)->lc_name, in DB_SHOW_COMMAND()
1152 lock->lo_name); in DB_SHOW_COMMAND()
1153 if (ts->ts_owner) in DB_SHOW_COMMAND()
1154 print_thread(ts->ts_owner, "Lock Owner: "); in DB_SHOW_COMMAND()
1156 db_printf("Lock Owner: none\n"); in DB_SHOW_COMMAND()
1157 print_queue(&ts->ts_blocked[TS_SHARED_QUEUE], "Shared Waiters", "\t"); in DB_SHOW_COMMAND()
1158 print_queue(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE], "Exclusive Waiters", in DB_SHOW_COMMAND()
1160 print_queue(&ts->ts_pending, "Pending Threads", "\t"); in DB_SHOW_COMMAND()
1166 * non-spin locks.
1171 struct lock_object *lock; in print_lockchain() local
1178 * blocked on a lock that has an owner. in print_lockchain()
1185 db_printf("%sthread %d (pid %d, %s) is ", prefix, td->td_tid, in print_lockchain()
1186 td->td_proc->p_pid, td->td_name); in print_lockchain()
1198 db_printf("running on CPU %d\n", td->td_oncpu); in print_lockchain()
1202 ts = td->td_blocked; in print_lockchain()
1203 lock = ts->ts_lockobj; in print_lockchain()
1204 class = LOCK_CLASS(lock); in print_lockchain()
1205 db_printf("blocked on lock %p (%s) \"%s\"\n", in print_lockchain()
1206 lock, class->lc_name, lock->lo_name); in print_lockchain()
1207 if (ts->ts_owner == NULL) in print_lockchain()
1209 td = ts->ts_owner; in print_lockchain()
1215 td->td_wchan, td->td_wmesg); in print_lockchain()
1255 if ((TD_ON_LOCK(td) && LIST_EMPTY(&td->td_contested)) in DB_SHOW_ALL_COMMAND()
1280 LIST_FOREACH(ts, &td->td_contested, ts_link) in print_waiter()
1287 struct lock_object *lock; in print_waiters() local
1294 lock = ts->ts_lockobj; in print_waiters()
1295 class = LOCK_CLASS(lock); in print_waiters()
1298 db_printf("lock %p (%s) \"%s\"\n", lock, class->lc_name, lock->lo_name); in print_waiters()
1299 TAILQ_FOREACH(td, &ts->ts_blocked[TS_EXCLUSIVE_QUEUE], td_lockq) in print_waiters()
1301 TAILQ_FOREACH(td, &ts->ts_blocked[TS_SHARED_QUEUE], td_lockq) in print_waiters()
1303 TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) in print_waiters()
1309 struct lock_object *lock; in DB_SHOW_COMMAND() local
1316 lock = (struct lock_object *)addr; in DB_SHOW_COMMAND()
1317 tc = TC_LOOKUP(lock); in DB_SHOW_COMMAND()
1318 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash) in DB_SHOW_COMMAND()
1319 if (ts->ts_lockobj == lock) in DB_SHOW_COMMAND()
1322 class = LOCK_CLASS(lock); in DB_SHOW_COMMAND()
1323 db_printf("lock %p (%s) \"%s\"\n", lock, class->lc_name, in DB_SHOW_COMMAND()
1324 lock->lo_name); in DB_SHOW_COMMAND()