xref: /freebsd/sys/kern/subr_turnstile.c (revision 94942af266ac119ede0ca836f9aa5a5ac0582938)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  */
31 
32 /*
33  * Implementation of turnstiles used to hold queue of threads blocked on
34  * non-sleepable locks.  Sleepable locks use condition variables to
35  * implement their queues.  Turnstiles differ from a sleep queue in that
36  * turnstile queue's are assigned to a lock held by an owning thread.  Thus,
37  * when one thread is enqueued onto a turnstile, it can lend its priority
38  * to the owning thread.
39  *
40  * We wish to avoid bloating locks with an embedded turnstile and we do not
41  * want to use back-pointers in the locks for the same reason.  Thus, we
42  * use a similar approach to that of Solaris 7 as described in Solaris
43  * Internals by Jim Mauro and Richard McDougall.  Turnstiles are looked up
44  * in a hash table based on the address of the lock.  Each entry in the
45  * hash table is a linked-lists of turnstiles and is called a turnstile
46  * chain.  Each chain contains a spin mutex that protects all of the
47  * turnstiles in the chain.
48  *
49  * Each time a thread is created, a turnstile is allocated from a UMA zone
50  * and attached to that thread.  When a thread blocks on a lock, if it is the
51  * first thread to block, it lends its turnstile to the lock.  If the lock
52  * already has a turnstile, then it gives its turnstile to the lock's
53  * turnstile's free list.  When a thread is woken up, it takes a turnstile from
54  * the free list if there are any other waiters.  If it is the only thread
55  * blocked on the lock, then it reclaims the turnstile associated with the lock
56  * and removes it from the hash table.
57  */
58 
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61 
62 #include "opt_ddb.h"
63 #include "opt_turnstile_profiling.h"
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/ktr.h>
69 #include <sys/lock.h>
70 #include <sys/mutex.h>
71 #include <sys/proc.h>
72 #include <sys/queue.h>
73 #include <sys/sched.h>
74 #include <sys/sysctl.h>
75 #include <sys/turnstile.h>
76 
77 #include <vm/uma.h>
78 
79 #ifdef DDB
80 #include <sys/kdb.h>
81 #include <ddb/ddb.h>
82 #include <sys/lockmgr.h>
83 #include <sys/sx.h>
84 #endif
85 
86 /*
87  * Constants for the hash table of turnstile chains.  TC_SHIFT is a magic
88  * number chosen because the sleep queue's use the same value for the
89  * shift.  Basically, we ignore the lower 8 bits of the address.
90  * TC_TABLESIZE must be a power of two for TC_MASK to work properly.
91  */
92 #define	TC_TABLESIZE	128			/* Must be power of 2. */
93 #define	TC_MASK		(TC_TABLESIZE - 1)
94 #define	TC_SHIFT	8
95 #define	TC_HASH(lock)	(((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK)
96 #define	TC_LOOKUP(lock)	&turnstile_chains[TC_HASH(lock)]
97 
98 /*
99  * There are three different lists of turnstiles as follows.  The list
100  * connected by ts_link entries is a per-thread list of all the turnstiles
101  * attached to locks that we own.  This is used to fixup our priority when
102  * a lock is released.  The other two lists use the ts_hash entries.  The
103  * first of these two is the turnstile chain list that a turnstile is on
104  * when it is attached to a lock.  The second list to use ts_hash is the
105  * free list hung off of a turnstile that is attached to a lock.
106  *
107  * Each turnstile contains three lists of threads.  The two ts_blocked lists
108  * are linked list of threads blocked on the turnstile's lock.  One list is
109  * for exclusive waiters, and the other is for shared waiters.  The
110  * ts_pending list is a linked list of threads previously awakened by
111  * turnstile_signal() or turnstile_wait() that are waiting to be put on
112  * the run queue.
113  *
114  * Locking key:
115  *  c - turnstile chain lock
116  *  q - td_contested lock
117  */
118 struct turnstile {
119 	struct threadqueue ts_blocked[2];	/* (c + q) Blocked threads. */
120 	struct threadqueue ts_pending;		/* (c) Pending threads. */
121 	LIST_ENTRY(turnstile) ts_hash;		/* (c) Chain and free list. */
122 	LIST_ENTRY(turnstile) ts_link;		/* (q) Contested locks. */
123 	LIST_HEAD(, turnstile) ts_free;		/* (c) Free turnstiles. */
124 	struct lock_object *ts_lockobj;		/* (c) Lock we reference. */
125 	struct thread *ts_owner;		/* (c + q) Who owns the lock. */
126 };
127 
128 struct turnstile_chain {
129 	LIST_HEAD(, turnstile) tc_turnstiles;	/* List of turnstiles. */
130 	struct mtx tc_lock;			/* Spin lock for this chain. */
131 #ifdef TURNSTILE_PROFILING
132 	u_int	tc_depth;			/* Length of tc_queues. */
133 	u_int	tc_max_depth;			/* Max length of tc_queues. */
134 #endif
135 };
136 
137 #ifdef TURNSTILE_PROFILING
138 u_int turnstile_max_depth;
139 SYSCTL_NODE(_debug, OID_AUTO, turnstile, CTLFLAG_RD, 0, "turnstile profiling");
140 SYSCTL_NODE(_debug_turnstile, OID_AUTO, chains, CTLFLAG_RD, 0,
141     "turnstile chain stats");
142 SYSCTL_UINT(_debug_turnstile, OID_AUTO, max_depth, CTLFLAG_RD,
143     &turnstile_max_depth, 0, "maxmimum depth achieved of a single chain");
144 #endif
145 static struct mtx td_contested_lock;
146 static struct turnstile_chain turnstile_chains[TC_TABLESIZE];
147 static uma_zone_t turnstile_zone;
148 
149 /*
150  * Prototypes for non-exported routines.
151  */
152 static void	init_turnstile0(void *dummy);
153 #ifdef TURNSTILE_PROFILING
154 static void	init_turnstile_profiling(void *arg);
155 #endif
156 static void	propagate_priority(struct thread *td);
157 static int	turnstile_adjust_thread(struct turnstile *ts,
158 		    struct thread *td);
159 static struct thread *turnstile_first_waiter(struct turnstile *ts);
160 static void	turnstile_setowner(struct turnstile *ts, struct thread *owner);
161 #ifdef INVARIANTS
162 static void	turnstile_dtor(void *mem, int size, void *arg);
163 #endif
164 static int	turnstile_init(void *mem, int size, int flags);
165 
166 /*
167  * Walks the chain of turnstiles and their owners to propagate the priority
168  * of the thread being blocked to all the threads holding locks that have to
169  * release their locks before this thread can run again.
170  */
171 static void
172 propagate_priority(struct thread *td)
173 {
174 	struct turnstile_chain *tc;
175 	struct turnstile *ts;
176 	int pri;
177 
178 	mtx_assert(&sched_lock, MA_OWNED);
179 	pri = td->td_priority;
180 	ts = td->td_blocked;
181 	for (;;) {
182 		td = ts->ts_owner;
183 
184 		if (td == NULL) {
185 			/*
186 			 * This might be a read lock with no owner.  There's
187 			 * not much we can do, so just bail.
188 			 */
189 			return;
190 		}
191 
192 		MPASS(td->td_proc != NULL);
193 		MPASS(td->td_proc->p_magic == P_MAGIC);
194 
195 		/*
196 		 * If the thread is asleep, then we are probably about
197 		 * to deadlock.  To make debugging this easier, just
198 		 * panic and tell the user which thread misbehaved so
199 		 * they can hopefully get a stack trace from the truly
200 		 * misbehaving thread.
201 		 */
202 		if (TD_IS_SLEEPING(td)) {
203 			printf(
204 		"Sleeping thread (tid %d, pid %d) owns a non-sleepable lock\n",
205 			    td->td_tid, td->td_proc->p_pid);
206 #ifdef DDB
207 			db_trace_thread(td, -1);
208 #endif
209 			panic("sleeping thread");
210 		}
211 
212 		/*
213 		 * If this thread already has higher priority than the
214 		 * thread that is being blocked, we are finished.
215 		 */
216 		if (td->td_priority <= pri)
217 			return;
218 
219 		/*
220 		 * Bump this thread's priority.
221 		 */
222 		sched_lend_prio(td, pri);
223 
224 		/*
225 		 * If lock holder is actually running or on the run queue
226 		 * then we are done.
227 		 */
228 		if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td)) {
229 			MPASS(td->td_blocked == NULL);
230 			return;
231 		}
232 
233 #ifndef SMP
234 		/*
235 		 * For UP, we check to see if td is curthread (this shouldn't
236 		 * ever happen however as it would mean we are in a deadlock.)
237 		 */
238 		KASSERT(td != curthread, ("Deadlock detected"));
239 #endif
240 
241 		/*
242 		 * If we aren't blocked on a lock, we should be.
243 		 */
244 		KASSERT(TD_ON_LOCK(td), (
245 		    "thread %d(%s):%d holds %s but isn't blocked on a lock\n",
246 		    td->td_tid, td->td_proc->p_comm, td->td_state,
247 		    ts->ts_lockobj->lo_name));
248 
249 		/*
250 		 * Pick up the lock that td is blocked on.
251 		 */
252 		ts = td->td_blocked;
253 		MPASS(ts != NULL);
254 		tc = TC_LOOKUP(ts->ts_lockobj);
255 		mtx_lock_spin(&tc->tc_lock);
256 
257 		/* Resort td on the list if needed. */
258 		if (!turnstile_adjust_thread(ts, td)) {
259 			mtx_unlock_spin(&tc->tc_lock);
260 			return;
261 		}
262 		mtx_unlock_spin(&tc->tc_lock);
263 	}
264 }
265 
266 /*
267  * Adjust the thread's position on a turnstile after its priority has been
268  * changed.
269  */
270 static int
271 turnstile_adjust_thread(struct turnstile *ts, struct thread *td)
272 {
273 	struct turnstile_chain *tc;
274 	struct thread *td1, *td2;
275 	int queue;
276 
277 	mtx_assert(&sched_lock, MA_OWNED);
278 	MPASS(TD_ON_LOCK(td));
279 
280 	/*
281 	 * This thread may not be blocked on this turnstile anymore
282 	 * but instead might already be woken up on another CPU
283 	 * that is waiting on sched_lock in turnstile_unpend() to
284 	 * finish waking this thread up.  We can detect this case
285 	 * by checking to see if this thread has been given a
286 	 * turnstile by either turnstile_signal() or
287 	 * turnstile_broadcast().  In this case, treat the thread as
288 	 * if it was already running.
289 	 */
290 	if (td->td_turnstile != NULL)
291 		return (0);
292 
293 	/*
294 	 * Check if the thread needs to be moved on the blocked chain.
295 	 * It needs to be moved if either its priority is lower than
296 	 * the previous thread or higher than the next thread.
297 	 */
298 	tc = TC_LOOKUP(ts->ts_lockobj);
299 	mtx_assert(&tc->tc_lock, MA_OWNED);
300 	td1 = TAILQ_PREV(td, threadqueue, td_lockq);
301 	td2 = TAILQ_NEXT(td, td_lockq);
302 	if ((td1 != NULL && td->td_priority < td1->td_priority) ||
303 	    (td2 != NULL && td->td_priority > td2->td_priority)) {
304 
305 		/*
306 		 * Remove thread from blocked chain and determine where
307 		 * it should be moved to.
308 		 */
309 		queue = td->td_tsqueue;
310 		MPASS(queue == TS_EXCLUSIVE_QUEUE || queue == TS_SHARED_QUEUE);
311 		mtx_lock_spin(&td_contested_lock);
312 		TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq);
313 		TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq) {
314 			MPASS(td1->td_proc->p_magic == P_MAGIC);
315 			if (td1->td_priority > td->td_priority)
316 				break;
317 		}
318 
319 		if (td1 == NULL)
320 			TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
321 		else
322 			TAILQ_INSERT_BEFORE(td1, td, td_lockq);
323 		mtx_unlock_spin(&td_contested_lock);
324 		if (td1 == NULL)
325 			CTR3(KTR_LOCK,
326 		    "turnstile_adjust_thread: td %d put at tail on [%p] %s",
327 			    td->td_tid, ts->ts_lockobj, ts->ts_lockobj->lo_name);
328 		else
329 			CTR4(KTR_LOCK,
330 		    "turnstile_adjust_thread: td %d moved before %d on [%p] %s",
331 			    td->td_tid, td1->td_tid, ts->ts_lockobj,
332 			    ts->ts_lockobj->lo_name);
333 	}
334 	return (1);
335 }
336 
337 /*
338  * Early initialization of turnstiles.  This is not done via a SYSINIT()
339  * since this needs to be initialized very early when mutexes are first
340  * initialized.
341  */
342 void
343 init_turnstiles(void)
344 {
345 	int i;
346 
347 	for (i = 0; i < TC_TABLESIZE; i++) {
348 		LIST_INIT(&turnstile_chains[i].tc_turnstiles);
349 		mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain",
350 		    NULL, MTX_SPIN);
351 	}
352 	mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN);
353 	LIST_INIT(&thread0.td_contested);
354 	thread0.td_turnstile = NULL;
355 }
356 
357 #ifdef TURNSTILE_PROFILING
358 static void
359 init_turnstile_profiling(void *arg)
360 {
361 	struct sysctl_oid *chain_oid;
362 	char chain_name[10];
363 	int i;
364 
365 	for (i = 0; i < TC_TABLESIZE; i++) {
366 		snprintf(chain_name, sizeof(chain_name), "%d", i);
367 		chain_oid = SYSCTL_ADD_NODE(NULL,
368 		    SYSCTL_STATIC_CHILDREN(_debug_turnstile_chains), OID_AUTO,
369 		    chain_name, CTLFLAG_RD, NULL, "turnstile chain stats");
370 		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
371 		    "depth", CTLFLAG_RD, &turnstile_chains[i].tc_depth, 0,
372 		    NULL);
373 		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
374 		    "max_depth", CTLFLAG_RD, &turnstile_chains[i].tc_max_depth,
375 		    0, NULL);
376 	}
377 }
378 SYSINIT(turnstile_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
379     init_turnstile_profiling, NULL);
380 #endif
381 
382 static void
383 init_turnstile0(void *dummy)
384 {
385 
386 	turnstile_zone = uma_zcreate("TURNSTILE", sizeof(struct turnstile),
387 #ifdef INVARIANTS
388 	    NULL, turnstile_dtor, turnstile_init, NULL, UMA_ALIGN_CACHE, 0);
389 #else
390 	    NULL, NULL, turnstile_init, NULL, UMA_ALIGN_CACHE, 0);
391 #endif
392 	thread0.td_turnstile = turnstile_alloc();
393 }
394 SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL);
395 
396 /*
397  * Update a thread on the turnstile list after it's priority has been changed.
398  * The old priority is passed in as an argument.
399  */
400 void
401 turnstile_adjust(struct thread *td, u_char oldpri)
402 {
403 	struct turnstile_chain *tc;
404 	struct turnstile *ts;
405 
406 	mtx_assert(&sched_lock, MA_OWNED);
407 	MPASS(TD_ON_LOCK(td));
408 
409 	/*
410 	 * Pick up the lock that td is blocked on.
411 	 */
412 	ts = td->td_blocked;
413 	MPASS(ts != NULL);
414 	tc = TC_LOOKUP(ts->ts_lockobj);
415 	mtx_lock_spin(&tc->tc_lock);
416 
417 	/* Resort the turnstile on the list. */
418 	if (!turnstile_adjust_thread(ts, td)) {
419 		mtx_unlock_spin(&tc->tc_lock);
420 		return;
421 	}
422 
423 	/*
424 	 * If our priority was lowered and we are at the head of the
425 	 * turnstile, then propagate our new priority up the chain.
426 	 * Note that we currently don't try to revoke lent priorities
427 	 * when our priority goes up.
428 	 */
429 	MPASS(td->td_tsqueue == TS_EXCLUSIVE_QUEUE ||
430 	    td->td_tsqueue == TS_SHARED_QUEUE);
431 	if (td == TAILQ_FIRST(&ts->ts_blocked[td->td_tsqueue]) &&
432 	    td->td_priority < oldpri) {
433 		mtx_unlock_spin(&tc->tc_lock);
434 		critical_enter();
435 		propagate_priority(td);
436 		critical_exit();
437 	} else
438 		mtx_unlock_spin(&tc->tc_lock);
439 }
440 
441 /*
442  * Set the owner of the lock this turnstile is attached to.
443  */
444 static void
445 turnstile_setowner(struct turnstile *ts, struct thread *owner)
446 {
447 
448 	mtx_assert(&td_contested_lock, MA_OWNED);
449 	MPASS(ts->ts_owner == NULL);
450 
451 	/* A shared lock might not have an owner. */
452 	if (owner == NULL)
453 		return;
454 
455 	MPASS(owner->td_proc->p_magic == P_MAGIC);
456 	ts->ts_owner = owner;
457 	LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link);
458 }
459 
460 #ifdef INVARIANTS
461 /*
462  * UMA zone item deallocator.
463  */
464 static void
465 turnstile_dtor(void *mem, int size, void *arg)
466 {
467 	struct turnstile *ts;
468 
469 	ts = mem;
470 	MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]));
471 	MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
472 	MPASS(TAILQ_EMPTY(&ts->ts_pending));
473 }
474 #endif
475 
476 /*
477  * UMA zone item initializer.
478  */
479 static int
480 turnstile_init(void *mem, int size, int flags)
481 {
482 	struct turnstile *ts;
483 
484 	bzero(mem, size);
485 	ts = mem;
486 	TAILQ_INIT(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
487 	TAILQ_INIT(&ts->ts_blocked[TS_SHARED_QUEUE]);
488 	TAILQ_INIT(&ts->ts_pending);
489 	LIST_INIT(&ts->ts_free);
490 	return (0);
491 }
492 
493 /*
494  * Get a turnstile for a new thread.
495  */
496 struct turnstile *
497 turnstile_alloc(void)
498 {
499 
500 	return (uma_zalloc(turnstile_zone, M_WAITOK));
501 }
502 
503 /*
504  * Free a turnstile when a thread is destroyed.
505  */
506 void
507 turnstile_free(struct turnstile *ts)
508 {
509 
510 	uma_zfree(turnstile_zone, ts);
511 }
512 
513 /*
514  * Lock the turnstile chain associated with the specified lock.
515  */
516 void
517 turnstile_lock(struct lock_object *lock)
518 {
519 	struct turnstile_chain *tc;
520 
521 	tc = TC_LOOKUP(lock);
522 	mtx_lock_spin(&tc->tc_lock);
523 }
524 
525 /*
526  * Look up the turnstile for a lock in the hash table locking the associated
527  * turnstile chain along the way.  If no turnstile is found in the hash
528  * table, NULL is returned.
529  */
530 struct turnstile *
531 turnstile_lookup(struct lock_object *lock)
532 {
533 	struct turnstile_chain *tc;
534 	struct turnstile *ts;
535 
536 	tc = TC_LOOKUP(lock);
537 	mtx_assert(&tc->tc_lock, MA_OWNED);
538 	LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
539 		if (ts->ts_lockobj == lock)
540 			return (ts);
541 	return (NULL);
542 }
543 
544 /*
545  * Unlock the turnstile chain associated with a given lock.
546  */
547 void
548 turnstile_release(struct lock_object *lock)
549 {
550 	struct turnstile_chain *tc;
551 
552 	tc = TC_LOOKUP(lock);
553 	mtx_unlock_spin(&tc->tc_lock);
554 }
555 
556 /*
557  * Return a pointer to the thread waiting on this turnstile with the
558  * most important priority or NULL if the turnstile has no waiters.
559  */
560 static struct thread *
561 turnstile_first_waiter(struct turnstile *ts)
562 {
563 	struct thread *std, *xtd;
564 
565 	std = TAILQ_FIRST(&ts->ts_blocked[TS_SHARED_QUEUE]);
566 	xtd = TAILQ_FIRST(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
567 	if (xtd == NULL || (std != NULL && std->td_priority < xtd->td_priority))
568 		return (std);
569 	return (xtd);
570 }
571 
572 /*
573  * Take ownership of a turnstile and adjust the priority of the new
574  * owner appropriately.
575  */
576 void
577 turnstile_claim(struct lock_object *lock)
578 {
579 	struct turnstile_chain *tc;
580 	struct turnstile *ts;
581 	struct thread *td, *owner;
582 
583 	tc = TC_LOOKUP(lock);
584 	mtx_assert(&tc->tc_lock, MA_OWNED);
585 	ts = turnstile_lookup(lock);
586 	MPASS(ts != NULL);
587 
588 	owner = curthread;
589 	mtx_lock_spin(&td_contested_lock);
590 	turnstile_setowner(ts, owner);
591 	mtx_unlock_spin(&td_contested_lock);
592 
593 	td = turnstile_first_waiter(ts);
594 	MPASS(td != NULL);
595 	MPASS(td->td_proc->p_magic == P_MAGIC);
596 	mtx_unlock_spin(&tc->tc_lock);
597 
598 	/*
599 	 * Update the priority of the new owner if needed.
600 	 */
601 	mtx_lock_spin(&sched_lock);
602 	if (td->td_priority < owner->td_priority)
603 		sched_lend_prio(owner, td->td_priority);
604 	mtx_unlock_spin(&sched_lock);
605 }
606 
607 /*
608  * Block the current thread on the turnstile assicated with 'lock'.  This
609  * function will context switch and not return until this thread has been
610  * woken back up.  This function must be called with the appropriate
611  * turnstile chain locked and will return with it unlocked.
612  */
613 void
614 turnstile_wait(struct lock_object *lock, struct thread *owner, int queue)
615 {
616 	struct turnstile_chain *tc;
617 	struct turnstile *ts;
618 	struct thread *td, *td1;
619 
620 	td = curthread;
621 	tc = TC_LOOKUP(lock);
622 	mtx_assert(&tc->tc_lock, MA_OWNED);
623 	MPASS(td->td_turnstile != NULL);
624 	if (queue == TS_SHARED_QUEUE)
625 		MPASS(owner != NULL);
626 	if (owner)
627 		MPASS(owner->td_proc->p_magic == P_MAGIC);
628 	MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
629 
630 	/* Look up the turnstile associated with the lock 'lock'. */
631 	ts = turnstile_lookup(lock);
632 
633 	/*
634 	 * If the lock does not already have a turnstile, use this thread's
635 	 * turnstile.  Otherwise insert the current thread into the
636 	 * turnstile already in use by this lock.
637 	 */
638 	if (ts == NULL) {
639 #ifdef TURNSTILE_PROFILING
640 		tc->tc_depth++;
641 		if (tc->tc_depth > tc->tc_max_depth) {
642 			tc->tc_max_depth = tc->tc_depth;
643 			if (tc->tc_max_depth > turnstile_max_depth)
644 				turnstile_max_depth = tc->tc_max_depth;
645 		}
646 #endif
647 		ts = td->td_turnstile;
648 		LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash);
649 		KASSERT(TAILQ_EMPTY(&ts->ts_pending),
650 		    ("thread's turnstile has pending threads"));
651 		KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]),
652 		    ("thread's turnstile has exclusive waiters"));
653 		KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]),
654 		    ("thread's turnstile has shared waiters"));
655 		KASSERT(LIST_EMPTY(&ts->ts_free),
656 		    ("thread's turnstile has a non-empty free list"));
657 		KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer"));
658 		ts->ts_lockobj = lock;
659 		mtx_lock_spin(&td_contested_lock);
660 		TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
661 		turnstile_setowner(ts, owner);
662 		mtx_unlock_spin(&td_contested_lock);
663 	} else {
664 		TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq)
665 			if (td1->td_priority > td->td_priority)
666 				break;
667 		mtx_lock_spin(&td_contested_lock);
668 		if (td1 != NULL)
669 			TAILQ_INSERT_BEFORE(td1, td, td_lockq);
670 		else
671 			TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
672 		MPASS(owner == ts->ts_owner);
673 		mtx_unlock_spin(&td_contested_lock);
674 		MPASS(td->td_turnstile != NULL);
675 		LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash);
676 	}
677 	td->td_turnstile = NULL;
678 	mtx_unlock_spin(&tc->tc_lock);
679 
680 	mtx_lock_spin(&sched_lock);
681 	/*
682 	 * Handle race condition where a thread on another CPU that owns
683 	 * lock 'lock' could have woken us in between us dropping the
684 	 * turnstile chain lock and acquiring the sched_lock.
685 	 */
686 	if (td->td_flags & TDF_TSNOBLOCK) {
687 		td->td_flags &= ~TDF_TSNOBLOCK;
688 		mtx_unlock_spin(&sched_lock);
689 		return;
690 	}
691 
692 #ifdef notyet
693 	/*
694 	 * If we're borrowing an interrupted thread's VM context, we
695 	 * must clean up before going to sleep.
696 	 */
697 	if (td->td_ithd != NULL) {
698 		struct ithd *it = td->td_ithd;
699 
700 		if (it->it_interrupted) {
701 			if (LOCK_LOG_TEST(lock, 0))
702 				CTR3(KTR_LOCK, "%s: %p interrupted %p",
703 				    __func__, it, it->it_interrupted);
704 			intr_thd_fixup(it);
705 		}
706 	}
707 #endif
708 
709 	/* Save who we are blocked on and switch. */
710 	td->td_tsqueue = queue;
711 	td->td_blocked = ts;
712 	td->td_lockname = lock->lo_name;
713 	TD_SET_LOCK(td);
714 	critical_enter();
715 	propagate_priority(td);
716 	critical_exit();
717 
718 	if (LOCK_LOG_TEST(lock, 0))
719 		CTR4(KTR_LOCK, "%s: td %d blocked on [%p] %s", __func__,
720 		    td->td_tid, lock, lock->lo_name);
721 
722 	mi_switch(SW_VOL, NULL);
723 
724 	if (LOCK_LOG_TEST(lock, 0))
725 		CTR4(KTR_LOCK, "%s: td %d free from blocked on [%p] %s",
726 		    __func__, td->td_tid, lock, lock->lo_name);
727 
728 	mtx_unlock_spin(&sched_lock);
729 }
730 
731 /*
732  * Pick the highest priority thread on this turnstile and put it on the
733  * pending list.  This must be called with the turnstile chain locked.
734  */
735 int
736 turnstile_signal(struct turnstile *ts, int queue)
737 {
738 	struct turnstile_chain *tc;
739 	struct thread *td;
740 	int empty;
741 
742 	MPASS(ts != NULL);
743 	MPASS(curthread->td_proc->p_magic == P_MAGIC);
744 	MPASS(ts->ts_owner == curthread ||
745 	    (queue == TS_EXCLUSIVE_QUEUE && ts->ts_owner == NULL));
746 	tc = TC_LOOKUP(ts->ts_lockobj);
747 	mtx_assert(&tc->tc_lock, MA_OWNED);
748 	MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
749 
750 	/*
751 	 * Pick the highest priority thread blocked on this lock and
752 	 * move it to the pending list.
753 	 */
754 	td = TAILQ_FIRST(&ts->ts_blocked[queue]);
755 	MPASS(td->td_proc->p_magic == P_MAGIC);
756 	mtx_lock_spin(&td_contested_lock);
757 	TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq);
758 	mtx_unlock_spin(&td_contested_lock);
759 	TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq);
760 
761 	/*
762 	 * If the turnstile is now empty, remove it from its chain and
763 	 * give it to the about-to-be-woken thread.  Otherwise take a
764 	 * turnstile from the free list and give it to the thread.
765 	 */
766 	empty = TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) &&
767 	    TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]);
768 	if (empty) {
769 		MPASS(LIST_EMPTY(&ts->ts_free));
770 #ifdef TURNSTILE_PROFILING
771 		tc->tc_depth--;
772 #endif
773 	} else
774 		ts = LIST_FIRST(&ts->ts_free);
775 	MPASS(ts != NULL);
776 	LIST_REMOVE(ts, ts_hash);
777 	td->td_turnstile = ts;
778 
779 	return (empty);
780 }
781 
782 /*
783  * Put all blocked threads on the pending list.  This must be called with
784  * the turnstile chain locked.
785  */
786 void
787 turnstile_broadcast(struct turnstile *ts, int queue)
788 {
789 	struct turnstile_chain *tc;
790 	struct turnstile *ts1;
791 	struct thread *td;
792 
793 	MPASS(ts != NULL);
794 	MPASS(curthread->td_proc->p_magic == P_MAGIC);
795 	MPASS(ts->ts_owner == curthread ||
796 	    (queue == TS_EXCLUSIVE_QUEUE && ts->ts_owner == NULL));
797 	tc = TC_LOOKUP(ts->ts_lockobj);
798 	mtx_assert(&tc->tc_lock, MA_OWNED);
799 	MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
800 
801 	/*
802 	 * Transfer the blocked list to the pending list.
803 	 */
804 	mtx_lock_spin(&td_contested_lock);
805 	TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked[queue], td_lockq);
806 	mtx_unlock_spin(&td_contested_lock);
807 
808 	/*
809 	 * Give a turnstile to each thread.  The last thread gets
810 	 * this turnstile if the turnstile is empty.
811 	 */
812 	TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) {
813 		if (LIST_EMPTY(&ts->ts_free)) {
814 			MPASS(TAILQ_NEXT(td, td_lockq) == NULL);
815 			ts1 = ts;
816 #ifdef TURNSTILE_PROFILING
817 			tc->tc_depth--;
818 #endif
819 		} else
820 			ts1 = LIST_FIRST(&ts->ts_free);
821 		MPASS(ts1 != NULL);
822 		LIST_REMOVE(ts1, ts_hash);
823 		td->td_turnstile = ts1;
824 	}
825 }
826 
827 /*
828  * Wakeup all threads on the pending list and adjust the priority of the
829  * current thread appropriately.  This must be called with the turnstile
830  * chain locked.
831  */
832 void
833 turnstile_unpend(struct turnstile *ts, int owner_type)
834 {
835 	TAILQ_HEAD( ,thread) pending_threads;
836 	struct turnstile_chain *tc;
837 	struct thread *td;
838 	u_char cp, pri;
839 
840 	MPASS(ts != NULL);
841 	MPASS(ts->ts_owner == curthread ||
842 	    (owner_type == TS_SHARED_LOCK && ts->ts_owner == NULL));
843 	tc = TC_LOOKUP(ts->ts_lockobj);
844 	mtx_assert(&tc->tc_lock, MA_OWNED);
845 	MPASS(!TAILQ_EMPTY(&ts->ts_pending));
846 
847 	/*
848 	 * Move the list of pending threads out of the turnstile and
849 	 * into a local variable.
850 	 */
851 	TAILQ_INIT(&pending_threads);
852 	TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq);
853 #ifdef INVARIANTS
854 	if (TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) &&
855 	    TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]))
856 		ts->ts_lockobj = NULL;
857 #endif
858 
859 	/*
860 	 * Remove the turnstile from this thread's list of contested locks
861 	 * since this thread doesn't own it anymore.  New threads will
862 	 * not be blocking on the turnstile until it is claimed by a new
863 	 * owner.  There might not be a current owner if this is a shared
864 	 * lock.
865 	 */
866 	if (ts->ts_owner != NULL) {
867 		mtx_lock_spin(&td_contested_lock);
868 		ts->ts_owner = NULL;
869 		LIST_REMOVE(ts, ts_link);
870 		mtx_unlock_spin(&td_contested_lock);
871 	}
872 	critical_enter();
873 	mtx_unlock_spin(&tc->tc_lock);
874 
875 	/*
876 	 * Adjust the priority of curthread based on other contested
877 	 * locks it owns.  Don't lower the priority below the base
878 	 * priority however.
879 	 */
880 	td = curthread;
881 	pri = PRI_MAX;
882 	mtx_lock_spin(&sched_lock);
883 	mtx_lock_spin(&td_contested_lock);
884 	LIST_FOREACH(ts, &td->td_contested, ts_link) {
885 		cp = turnstile_first_waiter(ts)->td_priority;
886 		if (cp < pri)
887 			pri = cp;
888 	}
889 	mtx_unlock_spin(&td_contested_lock);
890 	sched_unlend_prio(td, pri);
891 
892 	/*
893 	 * Wake up all the pending threads.  If a thread is not blocked
894 	 * on a lock, then it is currently executing on another CPU in
895 	 * turnstile_wait() or sitting on a run queue waiting to resume
896 	 * in turnstile_wait().  Set a flag to force it to try to acquire
897 	 * the lock again instead of blocking.
898 	 */
899 	while (!TAILQ_EMPTY(&pending_threads)) {
900 		td = TAILQ_FIRST(&pending_threads);
901 		TAILQ_REMOVE(&pending_threads, td, td_lockq);
902 		MPASS(td->td_proc->p_magic == P_MAGIC);
903 		if (TD_ON_LOCK(td)) {
904 			td->td_blocked = NULL;
905 			td->td_lockname = NULL;
906 #ifdef INVARIANTS
907 			td->td_tsqueue = 0xff;
908 #endif
909 			TD_CLR_LOCK(td);
910 			MPASS(TD_CAN_RUN(td));
911 			sched_add(td, SRQ_BORING);
912 		} else {
913 			td->td_flags |= TDF_TSNOBLOCK;
914 			MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td));
915 		}
916 	}
917 	critical_exit();
918 	mtx_unlock_spin(&sched_lock);
919 }
920 
921 /*
922  * Give up ownership of a turnstile.  This must be called with the
923  * turnstile chain locked.
924  */
925 void
926 turnstile_disown(struct turnstile *ts)
927 {
928 	struct turnstile_chain *tc;
929 	struct thread *td;
930 	u_char cp, pri;
931 
932 	MPASS(ts != NULL);
933 	MPASS(ts->ts_owner == curthread);
934 	tc = TC_LOOKUP(ts->ts_lockobj);
935 	mtx_assert(&tc->tc_lock, MA_OWNED);
936 	MPASS(TAILQ_EMPTY(&ts->ts_pending));
937 	MPASS(!TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) ||
938 	    !TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
939 
940 	/*
941 	 * Remove the turnstile from this thread's list of contested locks
942 	 * since this thread doesn't own it anymore.  New threads will
943 	 * not be blocking on the turnstile until it is claimed by a new
944 	 * owner.
945 	 */
946 	mtx_lock_spin(&td_contested_lock);
947 	ts->ts_owner = NULL;
948 	LIST_REMOVE(ts, ts_link);
949 	mtx_unlock_spin(&td_contested_lock);
950 	mtx_unlock_spin(&tc->tc_lock);
951 
952 	/*
953 	 * Adjust the priority of curthread based on other contested
954 	 * locks it owns.  Don't lower the priority below the base
955 	 * priority however.
956 	 */
957 	td = curthread;
958 	pri = PRI_MAX;
959 	mtx_lock_spin(&sched_lock);
960 	mtx_lock_spin(&td_contested_lock);
961 	LIST_FOREACH(ts, &td->td_contested, ts_link) {
962 		cp = turnstile_first_waiter(ts)->td_priority;
963 		if (cp < pri)
964 			pri = cp;
965 	}
966 	mtx_unlock_spin(&td_contested_lock);
967 	sched_unlend_prio(td, pri);
968 	mtx_unlock_spin(&sched_lock);
969 }
970 
971 /*
972  * Return the first thread in a turnstile.
973  */
974 struct thread *
975 turnstile_head(struct turnstile *ts, int queue)
976 {
977 #ifdef INVARIANTS
978 	struct turnstile_chain *tc;
979 
980 	MPASS(ts != NULL);
981 	MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
982 	tc = TC_LOOKUP(ts->ts_lockobj);
983 	mtx_assert(&tc->tc_lock, MA_OWNED);
984 #endif
985 	return (TAILQ_FIRST(&ts->ts_blocked[queue]));
986 }
987 
988 /*
989  * Returns true if a sub-queue of a turnstile is empty.
990  */
991 int
992 turnstile_empty(struct turnstile *ts, int queue)
993 {
994 #ifdef INVARIANTS
995 	struct turnstile_chain *tc;
996 
997 	MPASS(ts != NULL);
998 	MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
999 	tc = TC_LOOKUP(ts->ts_lockobj);
1000 	mtx_assert(&tc->tc_lock, MA_OWNED);
1001 #endif
1002 	return (TAILQ_EMPTY(&ts->ts_blocked[queue]));
1003 }
1004 
1005 #ifdef DDB
1006 static void
1007 print_thread(struct thread *td, const char *prefix)
1008 {
1009 
1010 	db_printf("%s%p (tid %d, pid %d, \"%s\")\n", prefix, td, td->td_tid,
1011 	    td->td_proc->p_pid, td->td_name[0] != '\0' ? td->td_name :
1012 	    td->td_proc->p_comm);
1013 }
1014 
1015 static void
1016 print_queue(struct threadqueue *queue, const char *header, const char *prefix)
1017 {
1018 	struct thread *td;
1019 
1020 	db_printf("%s:\n", header);
1021 	if (TAILQ_EMPTY(queue)) {
1022 		db_printf("%sempty\n", prefix);
1023 		return;
1024 	}
1025 	TAILQ_FOREACH(td, queue, td_lockq) {
1026 		print_thread(td, prefix);
1027 	}
1028 }
1029 
1030 DB_SHOW_COMMAND(turnstile, db_show_turnstile)
1031 {
1032 	struct turnstile_chain *tc;
1033 	struct turnstile *ts;
1034 	struct lock_object *lock;
1035 	int i;
1036 
1037 	if (!have_addr)
1038 		return;
1039 
1040 	/*
1041 	 * First, see if there is an active turnstile for the lock indicated
1042 	 * by the address.
1043 	 */
1044 	lock = (struct lock_object *)addr;
1045 	tc = TC_LOOKUP(lock);
1046 	LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
1047 		if (ts->ts_lockobj == lock)
1048 			goto found;
1049 
1050 	/*
1051 	 * Second, see if there is an active turnstile at the address
1052 	 * indicated.
1053 	 */
1054 	for (i = 0; i < TC_TABLESIZE; i++)
1055 		LIST_FOREACH(ts, &turnstile_chains[i].tc_turnstiles, ts_hash) {
1056 			if (ts == (struct turnstile *)addr)
1057 				goto found;
1058 		}
1059 
1060 	db_printf("Unable to locate a turnstile via %p\n", (void *)addr);
1061 	return;
1062 found:
1063 	lock = ts->ts_lockobj;
1064 	db_printf("Lock: %p - (%s) %s\n", lock, LOCK_CLASS(lock)->lc_name,
1065 	    lock->lo_name);
1066 	if (ts->ts_owner)
1067 		print_thread(ts->ts_owner, "Lock Owner: ");
1068 	else
1069 		db_printf("Lock Owner: none\n");
1070 	print_queue(&ts->ts_blocked[TS_SHARED_QUEUE], "Shared Waiters", "\t");
1071 	print_queue(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE], "Exclusive Waiters",
1072 	    "\t");
1073 	print_queue(&ts->ts_pending, "Pending Threads", "\t");
1074 
1075 }
1076 
1077 /*
1078  * Show all the threads a particular thread is waiting on based on
1079  * non-sleepable and non-spin locks.
1080  */
1081 static void
1082 print_lockchain(struct thread *td, const char *prefix)
1083 {
1084 	struct lock_object *lock;
1085 	struct lock_class *class;
1086 	struct turnstile *ts;
1087 
1088 	/*
1089 	 * Follow the chain.  We keep walking as long as the thread is
1090 	 * blocked on a turnstile that has an owner.
1091 	 */
1092 	while (!db_pager_quit) {
1093 		db_printf("%sthread %d (pid %d, %s) ", prefix, td->td_tid,
1094 		    td->td_proc->p_pid, td->td_name[0] != '\0' ? td->td_name :
1095 		    td->td_proc->p_comm);
1096 		switch (td->td_state) {
1097 		case TDS_INACTIVE:
1098 			db_printf("is inactive\n");
1099 			return;
1100 		case TDS_CAN_RUN:
1101 			db_printf("can run\n");
1102 			return;
1103 		case TDS_RUNQ:
1104 			db_printf("is on a run queue\n");
1105 			return;
1106 		case TDS_RUNNING:
1107 			db_printf("running on CPU %d\n", td->td_oncpu);
1108 			return;
1109 		case TDS_INHIBITED:
1110 			if (TD_ON_LOCK(td)) {
1111 				ts = td->td_blocked;
1112 				lock = ts->ts_lockobj;
1113 				class = LOCK_CLASS(lock);
1114 				db_printf("blocked on lock %p (%s) \"%s\"\n",
1115 				    lock, class->lc_name, lock->lo_name);
1116 				if (ts->ts_owner == NULL)
1117 					return;
1118 				td = ts->ts_owner;
1119 				break;
1120 			}
1121 			db_printf("inhibited\n");
1122 			return;
1123 		default:
1124 			db_printf("??? (%#x)\n", td->td_state);
1125 			return;
1126 		}
1127 	}
1128 }
1129 
1130 DB_SHOW_COMMAND(lockchain, db_show_lockchain)
1131 {
1132 	struct thread *td;
1133 
1134 	/* Figure out which thread to start with. */
1135 	if (have_addr)
1136 		td = db_lookup_thread(addr, TRUE);
1137 	else
1138 		td = kdb_thread;
1139 
1140 	print_lockchain(td, "");
1141 }
1142 
1143 DB_SHOW_COMMAND(allchains, db_show_allchains)
1144 {
1145 	struct thread *td;
1146 	struct proc *p;
1147 	int i;
1148 
1149 	i = 1;
1150 	FOREACH_PROC_IN_SYSTEM(p) {
1151 		FOREACH_THREAD_IN_PROC(p, td) {
1152 			if (TD_ON_LOCK(td) && LIST_EMPTY(&td->td_contested)) {
1153 				db_printf("chain %d:\n", i++);
1154 				print_lockchain(td, " ");
1155 			}
1156 			if (db_pager_quit)
1157 				return;
1158 		}
1159 	}
1160 }
1161 
1162 /*
1163  * Show all the threads a particular thread is waiting on based on
1164  * sleepable locks.
1165  */
1166 static void
1167 print_sleepchain(struct thread *td, const char *prefix)
1168 {
1169 	struct thread *owner;
1170 
1171 	/*
1172 	 * Follow the chain.  We keep walking as long as the thread is
1173 	 * blocked on a sleep lock that has an owner.
1174 	 */
1175 	while (!db_pager_quit) {
1176 		db_printf("%sthread %d (pid %d, %s) ", prefix, td->td_tid,
1177 		    td->td_proc->p_pid, td->td_name[0] != '\0' ? td->td_name :
1178 		    td->td_proc->p_comm);
1179 		switch (td->td_state) {
1180 		case TDS_INACTIVE:
1181 			db_printf("is inactive\n");
1182 			return;
1183 		case TDS_CAN_RUN:
1184 			db_printf("can run\n");
1185 			return;
1186 		case TDS_RUNQ:
1187 			db_printf("is on a run queue\n");
1188 			return;
1189 		case TDS_RUNNING:
1190 			db_printf("running on CPU %d\n", td->td_oncpu);
1191 			return;
1192 		case TDS_INHIBITED:
1193 			if (TD_ON_SLEEPQ(td)) {
1194 				if (lockmgr_chain(td, &owner) ||
1195 				    sx_chain(td, &owner)) {
1196 					if (owner == NULL)
1197 						return;
1198 					td = owner;
1199 					break;
1200 				}
1201 				db_printf("sleeping on %p \"%s\"\n",
1202 				    td->td_wchan, td->td_wmesg);
1203 				return;
1204 			}
1205 			db_printf("inhibited\n");
1206 			return;
1207 		default:
1208 			db_printf("??? (%#x)\n", td->td_state);
1209 			return;
1210 		}
1211 	}
1212 }
1213 
1214 DB_SHOW_COMMAND(sleepchain, db_show_sleepchain)
1215 {
1216 	struct thread *td;
1217 
1218 	/* Figure out which thread to start with. */
1219 	if (have_addr)
1220 		td = db_lookup_thread(addr, TRUE);
1221 	else
1222 		td = kdb_thread;
1223 
1224 	print_sleepchain(td, "");
1225 }
1226 
1227 static void	print_waiters(struct turnstile *ts, int indent);
1228 
1229 static void
1230 print_waiter(struct thread *td, int indent)
1231 {
1232 	struct turnstile *ts;
1233 	int i;
1234 
1235 	if (db_pager_quit)
1236 		return;
1237 	for (i = 0; i < indent; i++)
1238 		db_printf(" ");
1239 	print_thread(td, "thread ");
1240 	LIST_FOREACH(ts, &td->td_contested, ts_link)
1241 		print_waiters(ts, indent + 1);
1242 }
1243 
1244 static void
1245 print_waiters(struct turnstile *ts, int indent)
1246 {
1247 	struct lock_object *lock;
1248 	struct lock_class *class;
1249 	struct thread *td;
1250 	int i;
1251 
1252 	if (db_pager_quit)
1253 		return;
1254 	lock = ts->ts_lockobj;
1255 	class = LOCK_CLASS(lock);
1256 	for (i = 0; i < indent; i++)
1257 		db_printf(" ");
1258 	db_printf("lock %p (%s) \"%s\"\n", lock, class->lc_name, lock->lo_name);
1259 	TAILQ_FOREACH(td, &ts->ts_blocked[TS_EXCLUSIVE_QUEUE], td_lockq)
1260 		print_waiter(td, indent + 1);
1261 	TAILQ_FOREACH(td, &ts->ts_blocked[TS_SHARED_QUEUE], td_lockq)
1262 		print_waiter(td, indent + 1);
1263 	TAILQ_FOREACH(td, &ts->ts_pending, td_lockq)
1264 		print_waiter(td, indent + 1);
1265 }
1266 
1267 DB_SHOW_COMMAND(locktree, db_show_locktree)
1268 {
1269 	struct lock_object *lock;
1270 	struct lock_class *class;
1271 	struct turnstile_chain *tc;
1272 	struct turnstile *ts;
1273 
1274 	if (!have_addr)
1275 		return;
1276 	lock = (struct lock_object *)addr;
1277 	tc = TC_LOOKUP(lock);
1278 	LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
1279 		if (ts->ts_lockobj == lock)
1280 			break;
1281 	if (ts == NULL) {
1282 		class = LOCK_CLASS(lock);
1283 		db_printf("lock %p (%s) \"%s\"\n", lock, class->lc_name,
1284 		    lock->lo_name);
1285 	} else
1286 		print_waiters(ts, 0);
1287 }
1288 #endif
1289