xref: /freebsd/sys/kern/subr_turnstile.c (revision 0c927cdd8e6e05387fc5a9ffcb5dbe128d4ad749)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  */
31 
32 /*
33  * Implementation of turnstiles used to hold queue of threads blocked on
34  * non-sleepable locks.  Sleepable locks use condition variables to
35  * implement their queues.  Turnstiles differ from a sleep queue in that
36  * turnstile queue's are assigned to a lock held by an owning thread.  Thus,
37  * when one thread is enqueued onto a turnstile, it can lend its priority
38  * to the owning thread.
39  *
40  * We wish to avoid bloating locks with an embedded turnstile and we do not
41  * want to use back-pointers in the locks for the same reason.  Thus, we
42  * use a similar approach to that of Solaris 7 as described in Solaris
43  * Internals by Jim Mauro and Richard McDougall.  Turnstiles are looked up
44  * in a hash table based on the address of the lock.  Each entry in the
45  * hash table is a linked-lists of turnstiles and is called a turnstile
46  * chain.  Each chain contains a spin mutex that protects all of the
47  * turnstiles in the chain.
48  *
49  * Each time a thread is created, a turnstile is allocated from a UMA zone
50  * and attached to that thread.  When a thread blocks on a lock, if it is the
51  * first thread to block, it lends its turnstile to the lock.  If the lock
52  * already has a turnstile, then it gives its turnstile to the lock's
53  * turnstile's free list.  When a thread is woken up, it takes a turnstile from
54  * the free list if there are any other waiters.  If it is the only thread
55  * blocked on the lock, then it reclaims the turnstile associated with the lock
56  * and removes it from the hash table.
57  */
58 
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61 
62 #include "opt_ddb.h"
63 #include "opt_turnstile_profiling.h"
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/ktr.h>
69 #include <sys/lock.h>
70 #include <sys/mutex.h>
71 #include <sys/proc.h>
72 #include <sys/queue.h>
73 #include <sys/sched.h>
74 #include <sys/sysctl.h>
75 #include <sys/turnstile.h>
76 
77 #include <vm/uma.h>
78 
79 #ifdef DDB
80 #include <sys/kdb.h>
81 #include <ddb/ddb.h>
82 #include <sys/lockmgr.h>
83 #include <sys/sx.h>
84 #endif
85 
86 /*
87  * Constants for the hash table of turnstile chains.  TC_SHIFT is a magic
88  * number chosen because the sleep queue's use the same value for the
89  * shift.  Basically, we ignore the lower 8 bits of the address.
90  * TC_TABLESIZE must be a power of two for TC_MASK to work properly.
91  */
92 #define	TC_TABLESIZE	128			/* Must be power of 2. */
93 #define	TC_MASK		(TC_TABLESIZE - 1)
94 #define	TC_SHIFT	8
95 #define	TC_HASH(lock)	(((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK)
96 #define	TC_LOOKUP(lock)	&turnstile_chains[TC_HASH(lock)]
97 
98 /*
99  * There are three different lists of turnstiles as follows.  The list
100  * connected by ts_link entries is a per-thread list of all the turnstiles
101  * attached to locks that we own.  This is used to fixup our priority when
102  * a lock is released.  The other two lists use the ts_hash entries.  The
103  * first of these two is the turnstile chain list that a turnstile is on
104  * when it is attached to a lock.  The second list to use ts_hash is the
105  * free list hung off of a turnstile that is attached to a lock.
106  *
107  * Each turnstile contains three lists of threads.  The two ts_blocked lists
108  * are linked list of threads blocked on the turnstile's lock.  One list is
109  * for exclusive waiters, and the other is for shared waiters.  The
110  * ts_pending list is a linked list of threads previously awakened by
111  * turnstile_signal() or turnstile_wait() that are waiting to be put on
112  * the run queue.
113  *
114  * Locking key:
115  *  c - turnstile chain lock
116  *  q - td_contested lock
117  */
118 struct turnstile {
119 	struct mtx ts_lock;			/* Spin lock for self. */
120 	struct threadqueue ts_blocked[2];	/* (c + q) Blocked threads. */
121 	struct threadqueue ts_pending;		/* (c) Pending threads. */
122 	LIST_ENTRY(turnstile) ts_hash;		/* (c) Chain and free list. */
123 	LIST_ENTRY(turnstile) ts_link;		/* (q) Contested locks. */
124 	LIST_HEAD(, turnstile) ts_free;		/* (c) Free turnstiles. */
125 	struct lock_object *ts_lockobj;		/* (c) Lock we reference. */
126 	struct thread *ts_owner;		/* (c + q) Who owns the lock. */
127 };
128 
129 struct turnstile_chain {
130 	LIST_HEAD(, turnstile) tc_turnstiles;	/* List of turnstiles. */
131 	struct mtx tc_lock;			/* Spin lock for this chain. */
132 #ifdef TURNSTILE_PROFILING
133 	u_int	tc_depth;			/* Length of tc_queues. */
134 	u_int	tc_max_depth;			/* Max length of tc_queues. */
135 #endif
136 };
137 
138 #ifdef TURNSTILE_PROFILING
139 u_int turnstile_max_depth;
140 SYSCTL_NODE(_debug, OID_AUTO, turnstile, CTLFLAG_RD, 0, "turnstile profiling");
141 SYSCTL_NODE(_debug_turnstile, OID_AUTO, chains, CTLFLAG_RD, 0,
142     "turnstile chain stats");
143 SYSCTL_UINT(_debug_turnstile, OID_AUTO, max_depth, CTLFLAG_RD,
144     &turnstile_max_depth, 0, "maxmimum depth achieved of a single chain");
145 #endif
146 static struct mtx td_contested_lock;
147 static struct turnstile_chain turnstile_chains[TC_TABLESIZE];
148 static uma_zone_t turnstile_zone;
149 
150 /*
151  * Prototypes for non-exported routines.
152  */
153 static void	init_turnstile0(void *dummy);
154 #ifdef TURNSTILE_PROFILING
155 static void	init_turnstile_profiling(void *arg);
156 #endif
157 static void	propagate_priority(struct thread *td);
158 static int	turnstile_adjust_thread(struct turnstile *ts,
159 		    struct thread *td);
160 static struct thread *turnstile_first_waiter(struct turnstile *ts);
161 static void	turnstile_setowner(struct turnstile *ts, struct thread *owner);
162 #ifdef INVARIANTS
163 static void	turnstile_dtor(void *mem, int size, void *arg);
164 #endif
165 static int	turnstile_init(void *mem, int size, int flags);
166 static void	turnstile_fini(void *mem, int size);
167 
168 /*
169  * Walks the chain of turnstiles and their owners to propagate the priority
170  * of the thread being blocked to all the threads holding locks that have to
171  * release their locks before this thread can run again.
172  */
173 static void
174 propagate_priority(struct thread *td)
175 {
176 	struct turnstile *ts;
177 	int pri;
178 
179 	THREAD_LOCK_ASSERT(td, MA_OWNED);
180 	pri = td->td_priority;
181 	ts = td->td_blocked;
182 	MPASS(td->td_lock == &ts->ts_lock);
183 	/*
184 	 * Grab a recursive lock on this turnstile chain so it stays locked
185 	 * for the whole operation.  The caller expects us to return with
186 	 * the original lock held.  We only ever lock down the chain so
187 	 * the lock order is constant.
188 	 */
189 	mtx_lock_spin(&ts->ts_lock);
190 	for (;;) {
191 		td = ts->ts_owner;
192 
193 		if (td == NULL) {
194 			/*
195 			 * This might be a read lock with no owner.  There's
196 			 * not much we can do, so just bail.
197 			 */
198 			mtx_unlock_spin(&ts->ts_lock);
199 			return;
200 		}
201 
202 		thread_lock_flags(td, MTX_DUPOK);
203 		mtx_unlock_spin(&ts->ts_lock);
204 		MPASS(td->td_proc != NULL);
205 		MPASS(td->td_proc->p_magic == P_MAGIC);
206 
207 		/*
208 		 * If the thread is asleep, then we are probably about
209 		 * to deadlock.  To make debugging this easier, just
210 		 * panic and tell the user which thread misbehaved so
211 		 * they can hopefully get a stack trace from the truly
212 		 * misbehaving thread.
213 		 */
214 		if (TD_IS_SLEEPING(td)) {
215 			printf(
216 		"Sleeping thread (tid %d, pid %d) owns a non-sleepable lock\n",
217 			    td->td_tid, td->td_proc->p_pid);
218 #ifdef DDB
219 			db_trace_thread(td, -1);
220 #endif
221 			panic("sleeping thread");
222 		}
223 
224 		/*
225 		 * If this thread already has higher priority than the
226 		 * thread that is being blocked, we are finished.
227 		 */
228 		if (td->td_priority <= pri) {
229 			thread_unlock(td);
230 			return;
231 		}
232 
233 		/*
234 		 * Bump this thread's priority.
235 		 */
236 		sched_lend_prio(td, pri);
237 
238 		/*
239 		 * If lock holder is actually running or on the run queue
240 		 * then we are done.
241 		 */
242 		if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td)) {
243 			MPASS(td->td_blocked == NULL);
244 			thread_unlock(td);
245 			return;
246 		}
247 
248 #ifndef SMP
249 		/*
250 		 * For UP, we check to see if td is curthread (this shouldn't
251 		 * ever happen however as it would mean we are in a deadlock.)
252 		 */
253 		KASSERT(td != curthread, ("Deadlock detected"));
254 #endif
255 
256 		/*
257 		 * If we aren't blocked on a lock, we should be.
258 		 */
259 		KASSERT(TD_ON_LOCK(td), (
260 		    "thread %d(%s):%d holds %s but isn't blocked on a lock\n",
261 		    td->td_tid, td->td_proc->p_comm, td->td_state,
262 		    ts->ts_lockobj->lo_name));
263 
264 		/*
265 		 * Pick up the lock that td is blocked on.
266 		 */
267 		ts = td->td_blocked;
268 		MPASS(ts != NULL);
269 		MPASS(td->td_lock == &ts->ts_lock);
270 		/* Resort td on the list if needed. */
271 		if (!turnstile_adjust_thread(ts, td)) {
272 			mtx_unlock_spin(&ts->ts_lock);
273 			return;
274 		}
275 		/* The thread lock is released as ts lock above. */
276 	}
277 }
278 
279 /*
280  * Adjust the thread's position on a turnstile after its priority has been
281  * changed.
282  */
283 static int
284 turnstile_adjust_thread(struct turnstile *ts, struct thread *td)
285 {
286 	struct thread *td1, *td2;
287 	int queue;
288 
289 	THREAD_LOCK_ASSERT(td, MA_OWNED);
290 	MPASS(TD_ON_LOCK(td));
291 
292 	/*
293 	 * This thread may not be blocked on this turnstile anymore
294 	 * but instead might already be woken up on another CPU
295 	 * that is waiting on the thread lock in turnstile_unpend() to
296 	 * finish waking this thread up.  We can detect this case
297 	 * by checking to see if this thread has been given a
298 	 * turnstile by either turnstile_signal() or
299 	 * turnstile_broadcast().  In this case, treat the thread as
300 	 * if it was already running.
301 	 */
302 	if (td->td_turnstile != NULL)
303 		return (0);
304 
305 	/*
306 	 * Check if the thread needs to be moved on the blocked chain.
307 	 * It needs to be moved if either its priority is lower than
308 	 * the previous thread or higher than the next thread.
309 	 */
310 	MPASS(td->td_lock == &ts->ts_lock);
311 	td1 = TAILQ_PREV(td, threadqueue, td_lockq);
312 	td2 = TAILQ_NEXT(td, td_lockq);
313 	if ((td1 != NULL && td->td_priority < td1->td_priority) ||
314 	    (td2 != NULL && td->td_priority > td2->td_priority)) {
315 
316 		/*
317 		 * Remove thread from blocked chain and determine where
318 		 * it should be moved to.
319 		 */
320 		queue = td->td_tsqueue;
321 		MPASS(queue == TS_EXCLUSIVE_QUEUE || queue == TS_SHARED_QUEUE);
322 		mtx_lock_spin(&td_contested_lock);
323 		TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq);
324 		TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq) {
325 			MPASS(td1->td_proc->p_magic == P_MAGIC);
326 			if (td1->td_priority > td->td_priority)
327 				break;
328 		}
329 
330 		if (td1 == NULL)
331 			TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
332 		else
333 			TAILQ_INSERT_BEFORE(td1, td, td_lockq);
334 		mtx_unlock_spin(&td_contested_lock);
335 		if (td1 == NULL)
336 			CTR3(KTR_LOCK,
337 		    "turnstile_adjust_thread: td %d put at tail on [%p] %s",
338 			    td->td_tid, ts->ts_lockobj, ts->ts_lockobj->lo_name);
339 		else
340 			CTR4(KTR_LOCK,
341 		    "turnstile_adjust_thread: td %d moved before %d on [%p] %s",
342 			    td->td_tid, td1->td_tid, ts->ts_lockobj,
343 			    ts->ts_lockobj->lo_name);
344 	}
345 	return (1);
346 }
347 
348 /*
349  * Early initialization of turnstiles.  This is not done via a SYSINIT()
350  * since this needs to be initialized very early when mutexes are first
351  * initialized.
352  */
353 void
354 init_turnstiles(void)
355 {
356 	int i;
357 
358 	for (i = 0; i < TC_TABLESIZE; i++) {
359 		LIST_INIT(&turnstile_chains[i].tc_turnstiles);
360 		mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain",
361 		    NULL, MTX_SPIN);
362 	}
363 	mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN);
364 	LIST_INIT(&thread0.td_contested);
365 	thread0.td_turnstile = NULL;
366 }
367 
368 #ifdef TURNSTILE_PROFILING
369 static void
370 init_turnstile_profiling(void *arg)
371 {
372 	struct sysctl_oid *chain_oid;
373 	char chain_name[10];
374 	int i;
375 
376 	for (i = 0; i < TC_TABLESIZE; i++) {
377 		snprintf(chain_name, sizeof(chain_name), "%d", i);
378 		chain_oid = SYSCTL_ADD_NODE(NULL,
379 		    SYSCTL_STATIC_CHILDREN(_debug_turnstile_chains), OID_AUTO,
380 		    chain_name, CTLFLAG_RD, NULL, "turnstile chain stats");
381 		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
382 		    "depth", CTLFLAG_RD, &turnstile_chains[i].tc_depth, 0,
383 		    NULL);
384 		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
385 		    "max_depth", CTLFLAG_RD, &turnstile_chains[i].tc_max_depth,
386 		    0, NULL);
387 	}
388 }
389 SYSINIT(turnstile_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
390     init_turnstile_profiling, NULL);
391 #endif
392 
393 static void
394 init_turnstile0(void *dummy)
395 {
396 
397 	turnstile_zone = uma_zcreate("TURNSTILE", sizeof(struct turnstile),
398 #ifdef INVARIANTS
399 	    NULL, turnstile_dtor, turnstile_init, turnstile_fini,
400 	    UMA_ALIGN_CACHE, 0);
401 #else
402 	    NULL, NULL, turnstile_init, turnstile_fini, UMA_ALIGN_CACHE, 0);
403 #endif
404 	thread0.td_turnstile = turnstile_alloc();
405 }
406 SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL);
407 
408 /*
409  * Update a thread on the turnstile list after it's priority has been changed.
410  * The old priority is passed in as an argument.
411  */
412 void
413 turnstile_adjust(struct thread *td, u_char oldpri)
414 {
415 	struct turnstile *ts;
416 
417 	MPASS(TD_ON_LOCK(td));
418 
419 	/*
420 	 * Pick up the lock that td is blocked on.
421 	 */
422 	ts = td->td_blocked;
423 	MPASS(ts != NULL);
424 	MPASS(td->td_lock == &ts->ts_lock);
425 	mtx_assert(&ts->ts_lock, MA_OWNED);
426 
427 	/* Resort the turnstile on the list. */
428 	if (!turnstile_adjust_thread(ts, td))
429 		return;
430 	/*
431 	 * If our priority was lowered and we are at the head of the
432 	 * turnstile, then propagate our new priority up the chain.
433 	 * Note that we currently don't try to revoke lent priorities
434 	 * when our priority goes up.
435 	 */
436 	MPASS(td->td_tsqueue == TS_EXCLUSIVE_QUEUE ||
437 	    td->td_tsqueue == TS_SHARED_QUEUE);
438 	if (td == TAILQ_FIRST(&ts->ts_blocked[td->td_tsqueue]) &&
439 	    td->td_priority < oldpri) {
440 		propagate_priority(td);
441 	}
442 }
443 
444 /*
445  * Set the owner of the lock this turnstile is attached to.
446  */
447 static void
448 turnstile_setowner(struct turnstile *ts, struct thread *owner)
449 {
450 
451 	mtx_assert(&td_contested_lock, MA_OWNED);
452 	MPASS(ts->ts_owner == NULL);
453 
454 	/* A shared lock might not have an owner. */
455 	if (owner == NULL)
456 		return;
457 
458 	MPASS(owner->td_proc->p_magic == P_MAGIC);
459 	ts->ts_owner = owner;
460 	LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link);
461 }
462 
463 #ifdef INVARIANTS
464 /*
465  * UMA zone item deallocator.
466  */
467 static void
468 turnstile_dtor(void *mem, int size, void *arg)
469 {
470 	struct turnstile *ts;
471 
472 	ts = mem;
473 	MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]));
474 	MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
475 	MPASS(TAILQ_EMPTY(&ts->ts_pending));
476 }
477 #endif
478 
479 /*
480  * UMA zone item initializer.
481  */
482 static int
483 turnstile_init(void *mem, int size, int flags)
484 {
485 	struct turnstile *ts;
486 
487 	bzero(mem, size);
488 	ts = mem;
489 	TAILQ_INIT(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
490 	TAILQ_INIT(&ts->ts_blocked[TS_SHARED_QUEUE]);
491 	TAILQ_INIT(&ts->ts_pending);
492 	LIST_INIT(&ts->ts_free);
493 	mtx_init(&ts->ts_lock, "turnstile lock", NULL, MTX_SPIN | MTX_RECURSE);
494 	return (0);
495 }
496 
497 static void
498 turnstile_fini(void *mem, int size)
499 {
500 	struct turnstile *ts;
501 
502 	ts = mem;
503 	mtx_destroy(&ts->ts_lock);
504 }
505 
506 /*
507  * Get a turnstile for a new thread.
508  */
509 struct turnstile *
510 turnstile_alloc(void)
511 {
512 
513 	return (uma_zalloc(turnstile_zone, M_WAITOK));
514 }
515 
516 /*
517  * Free a turnstile when a thread is destroyed.
518  */
519 void
520 turnstile_free(struct turnstile *ts)
521 {
522 
523 	uma_zfree(turnstile_zone, ts);
524 }
525 
526 /*
527  * Lock the turnstile chain associated with the specified lock.
528  */
529 void
530 turnstile_chain_lock(struct lock_object *lock)
531 {
532 	struct turnstile_chain *tc;
533 
534 	tc = TC_LOOKUP(lock);
535 	mtx_lock_spin(&tc->tc_lock);
536 }
537 
538 struct turnstile *
539 turnstile_trywait(struct lock_object *lock)
540 {
541 	struct turnstile_chain *tc;
542 	struct turnstile *ts;
543 
544 	tc = TC_LOOKUP(lock);
545 	mtx_lock_spin(&tc->tc_lock);
546 	LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
547 		if (ts->ts_lockobj == lock) {
548 			mtx_lock_spin(&ts->ts_lock);
549 			return (ts);
550 		}
551 
552 	ts = curthread->td_turnstile;
553 	MPASS(ts != NULL);
554 	mtx_lock_spin(&ts->ts_lock);
555 	KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer"));
556 	ts->ts_lockobj = lock;
557 
558 	return (ts);
559 }
560 
561 void
562 turnstile_cancel(struct turnstile *ts)
563 {
564 	struct turnstile_chain *tc;
565 	struct lock_object *lock;
566 
567 	mtx_assert(&ts->ts_lock, MA_OWNED);
568 
569 	mtx_unlock_spin(&ts->ts_lock);
570 	lock = ts->ts_lockobj;
571 	if (ts == curthread->td_turnstile)
572 		ts->ts_lockobj = NULL;
573 	tc = TC_LOOKUP(lock);
574 	mtx_unlock_spin(&tc->tc_lock);
575 }
576 
577 /*
578  * Look up the turnstile for a lock in the hash table locking the associated
579  * turnstile chain along the way.  If no turnstile is found in the hash
580  * table, NULL is returned.
581  */
582 struct turnstile *
583 turnstile_lookup(struct lock_object *lock)
584 {
585 	struct turnstile_chain *tc;
586 	struct turnstile *ts;
587 
588 	tc = TC_LOOKUP(lock);
589 	mtx_assert(&tc->tc_lock, MA_OWNED);
590 	LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
591 		if (ts->ts_lockobj == lock) {
592 			mtx_lock_spin(&ts->ts_lock);
593 			return (ts);
594 		}
595 	return (NULL);
596 }
597 
598 /*
599  * Unlock the turnstile chain associated with a given lock.
600  */
601 void
602 turnstile_chain_unlock(struct lock_object *lock)
603 {
604 	struct turnstile_chain *tc;
605 
606 	tc = TC_LOOKUP(lock);
607 	mtx_unlock_spin(&tc->tc_lock);
608 }
609 
610 /*
611  * Return a pointer to the thread waiting on this turnstile with the
612  * most important priority or NULL if the turnstile has no waiters.
613  */
614 static struct thread *
615 turnstile_first_waiter(struct turnstile *ts)
616 {
617 	struct thread *std, *xtd;
618 
619 	std = TAILQ_FIRST(&ts->ts_blocked[TS_SHARED_QUEUE]);
620 	xtd = TAILQ_FIRST(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
621 	if (xtd == NULL || (std != NULL && std->td_priority < xtd->td_priority))
622 		return (std);
623 	return (xtd);
624 }
625 
626 /*
627  * Take ownership of a turnstile and adjust the priority of the new
628  * owner appropriately.
629  */
630 void
631 turnstile_claim(struct turnstile *ts)
632 {
633 	struct thread *td, *owner;
634 	struct turnstile_chain *tc;
635 
636 	mtx_assert(&ts->ts_lock, MA_OWNED);
637 	MPASS(ts != curthread->td_turnstile);
638 
639 	owner = curthread;
640 	mtx_lock_spin(&td_contested_lock);
641 	turnstile_setowner(ts, owner);
642 	mtx_unlock_spin(&td_contested_lock);
643 
644 	td = turnstile_first_waiter(ts);
645 	MPASS(td != NULL);
646 	MPASS(td->td_proc->p_magic == P_MAGIC);
647 	MPASS(td->td_lock == &ts->ts_lock);
648 
649 	/*
650 	 * Update the priority of the new owner if needed.
651 	 */
652 	thread_lock(owner);
653 	if (td->td_priority < owner->td_priority)
654 		sched_lend_prio(owner, td->td_priority);
655 	thread_unlock(owner);
656 	tc = TC_LOOKUP(ts->ts_lockobj);
657 	mtx_unlock_spin(&ts->ts_lock);
658 	mtx_unlock_spin(&tc->tc_lock);
659 }
660 
661 /*
662  * Block the current thread on the turnstile assicated with 'lock'.  This
663  * function will context switch and not return until this thread has been
664  * woken back up.  This function must be called with the appropriate
665  * turnstile chain locked and will return with it unlocked.
666  */
667 void
668 turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)
669 {
670 	struct turnstile_chain *tc;
671 	struct thread *td, *td1;
672 	struct lock_object *lock;
673 
674 	td = curthread;
675 	mtx_assert(&ts->ts_lock, MA_OWNED);
676 	if (queue == TS_SHARED_QUEUE)
677 		MPASS(owner != NULL);
678 	if (owner)
679 		MPASS(owner->td_proc->p_magic == P_MAGIC);
680 	MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
681 
682 	/*
683 	 * If the lock does not already have a turnstile, use this thread's
684 	 * turnstile.  Otherwise insert the current thread into the
685 	 * turnstile already in use by this lock.
686 	 */
687 	tc = TC_LOOKUP(ts->ts_lockobj);
688 	if (ts == td->td_turnstile) {
689 	mtx_assert(&tc->tc_lock, MA_OWNED);
690 #ifdef TURNSTILE_PROFILING
691 		tc->tc_depth++;
692 		if (tc->tc_depth > tc->tc_max_depth) {
693 			tc->tc_max_depth = tc->tc_depth;
694 			if (tc->tc_max_depth > turnstile_max_depth)
695 				turnstile_max_depth = tc->tc_max_depth;
696 		}
697 #endif
698 		tc = TC_LOOKUP(ts->ts_lockobj);
699 		LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash);
700 		KASSERT(TAILQ_EMPTY(&ts->ts_pending),
701 		    ("thread's turnstile has pending threads"));
702 		KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]),
703 		    ("thread's turnstile has exclusive waiters"));
704 		KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]),
705 		    ("thread's turnstile has shared waiters"));
706 		KASSERT(LIST_EMPTY(&ts->ts_free),
707 		    ("thread's turnstile has a non-empty free list"));
708 		MPASS(ts->ts_lockobj != NULL);
709 		mtx_lock_spin(&td_contested_lock);
710 		TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
711 		turnstile_setowner(ts, owner);
712 		mtx_unlock_spin(&td_contested_lock);
713 	} else {
714 		TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq)
715 			if (td1->td_priority > td->td_priority)
716 				break;
717 		mtx_lock_spin(&td_contested_lock);
718 		if (td1 != NULL)
719 			TAILQ_INSERT_BEFORE(td1, td, td_lockq);
720 		else
721 			TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
722 		MPASS(owner == ts->ts_owner);
723 		mtx_unlock_spin(&td_contested_lock);
724 		MPASS(td->td_turnstile != NULL);
725 		LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash);
726 	}
727 	thread_lock(td);
728 	thread_lock_set(td, &ts->ts_lock);
729 	td->td_turnstile = NULL;
730 
731 	/* Save who we are blocked on and switch. */
732 	lock = ts->ts_lockobj;
733 	td->td_tsqueue = queue;
734 	td->td_blocked = ts;
735 	td->td_lockname = lock->lo_name;
736 	TD_SET_LOCK(td);
737 	mtx_unlock_spin(&tc->tc_lock);
738 	propagate_priority(td);
739 
740 	if (LOCK_LOG_TEST(lock, 0))
741 		CTR4(KTR_LOCK, "%s: td %d blocked on [%p] %s", __func__,
742 		    td->td_tid, lock, lock->lo_name);
743 
744 	MPASS(td->td_lock == &ts->ts_lock);
745 	SCHED_STAT_INC(switch_turnstile);
746 	mi_switch(SW_VOL, NULL);
747 
748 	if (LOCK_LOG_TEST(lock, 0))
749 		CTR4(KTR_LOCK, "%s: td %d free from blocked on [%p] %s",
750 		    __func__, td->td_tid, lock, lock->lo_name);
751 	thread_unlock(td);
752 }
753 
754 /*
755  * Pick the highest priority thread on this turnstile and put it on the
756  * pending list.  This must be called with the turnstile chain locked.
757  */
758 int
759 turnstile_signal(struct turnstile *ts, int queue)
760 {
761 	struct turnstile_chain *tc;
762 	struct thread *td;
763 	int empty;
764 
765 	MPASS(ts != NULL);
766 	mtx_assert(&ts->ts_lock, MA_OWNED);
767 	MPASS(curthread->td_proc->p_magic == P_MAGIC);
768 	MPASS(ts->ts_owner == curthread ||
769 	    (queue == TS_EXCLUSIVE_QUEUE && ts->ts_owner == NULL));
770 	MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
771 
772 	/*
773 	 * Pick the highest priority thread blocked on this lock and
774 	 * move it to the pending list.
775 	 */
776 	td = TAILQ_FIRST(&ts->ts_blocked[queue]);
777 	MPASS(td->td_proc->p_magic == P_MAGIC);
778 	mtx_lock_spin(&td_contested_lock);
779 	TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq);
780 	mtx_unlock_spin(&td_contested_lock);
781 	TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq);
782 
783 	/*
784 	 * If the turnstile is now empty, remove it from its chain and
785 	 * give it to the about-to-be-woken thread.  Otherwise take a
786 	 * turnstile from the free list and give it to the thread.
787 	 */
788 	empty = TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) &&
789 	    TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]);
790 	if (empty) {
791 		tc = TC_LOOKUP(ts->ts_lockobj);
792 		mtx_assert(&tc->tc_lock, MA_OWNED);
793 		MPASS(LIST_EMPTY(&ts->ts_free));
794 #ifdef TURNSTILE_PROFILING
795 		tc->tc_depth--;
796 #endif
797 	} else
798 		ts = LIST_FIRST(&ts->ts_free);
799 	MPASS(ts != NULL);
800 	LIST_REMOVE(ts, ts_hash);
801 	td->td_turnstile = ts;
802 
803 	return (empty);
804 }
805 
806 /*
807  * Put all blocked threads on the pending list.  This must be called with
808  * the turnstile chain locked.
809  */
810 void
811 turnstile_broadcast(struct turnstile *ts, int queue)
812 {
813 	struct turnstile_chain *tc;
814 	struct turnstile *ts1;
815 	struct thread *td;
816 
817 	MPASS(ts != NULL);
818 	mtx_assert(&ts->ts_lock, MA_OWNED);
819 	MPASS(curthread->td_proc->p_magic == P_MAGIC);
820 	MPASS(ts->ts_owner == curthread ||
821 	    (queue == TS_EXCLUSIVE_QUEUE && ts->ts_owner == NULL));
822 	/*
823 	 * We must have the chain locked so that we can remove the empty
824 	 * turnstile from the hash queue.
825 	 */
826 	tc = TC_LOOKUP(ts->ts_lockobj);
827 	mtx_assert(&tc->tc_lock, MA_OWNED);
828 	MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
829 
830 	/*
831 	 * Transfer the blocked list to the pending list.
832 	 */
833 	mtx_lock_spin(&td_contested_lock);
834 	TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked[queue], td_lockq);
835 	mtx_unlock_spin(&td_contested_lock);
836 
837 	/*
838 	 * Give a turnstile to each thread.  The last thread gets
839 	 * this turnstile if the turnstile is empty.
840 	 */
841 	TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) {
842 		if (LIST_EMPTY(&ts->ts_free)) {
843 			MPASS(TAILQ_NEXT(td, td_lockq) == NULL);
844 			ts1 = ts;
845 #ifdef TURNSTILE_PROFILING
846 			tc->tc_depth--;
847 #endif
848 		} else
849 			ts1 = LIST_FIRST(&ts->ts_free);
850 		MPASS(ts1 != NULL);
851 		LIST_REMOVE(ts1, ts_hash);
852 		td->td_turnstile = ts1;
853 	}
854 }
855 
856 /*
857  * Wakeup all threads on the pending list and adjust the priority of the
858  * current thread appropriately.  This must be called with the turnstile
859  * chain locked.
860  */
861 void
862 turnstile_unpend(struct turnstile *ts, int owner_type)
863 {
864 	TAILQ_HEAD( ,thread) pending_threads;
865 	struct turnstile *nts;
866 	struct thread *td;
867 	u_char cp, pri;
868 
869 	MPASS(ts != NULL);
870 	mtx_assert(&ts->ts_lock, MA_OWNED);
871 	MPASS(ts->ts_owner == curthread ||
872 	    (owner_type == TS_SHARED_LOCK && ts->ts_owner == NULL));
873 	MPASS(!TAILQ_EMPTY(&ts->ts_pending));
874 
875 	/*
876 	 * Move the list of pending threads out of the turnstile and
877 	 * into a local variable.
878 	 */
879 	TAILQ_INIT(&pending_threads);
880 	TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq);
881 #ifdef INVARIANTS
882 	if (TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) &&
883 	    TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]))
884 		ts->ts_lockobj = NULL;
885 #endif
886 	/*
887 	 * Adjust the priority of curthread based on other contested
888 	 * locks it owns.  Don't lower the priority below the base
889 	 * priority however.
890 	 */
891 	td = curthread;
892 	pri = PRI_MAX;
893 	thread_lock(td);
894 	mtx_lock_spin(&td_contested_lock);
895 	/*
896 	 * Remove the turnstile from this thread's list of contested locks
897 	 * since this thread doesn't own it anymore.  New threads will
898 	 * not be blocking on the turnstile until it is claimed by a new
899 	 * owner.  There might not be a current owner if this is a shared
900 	 * lock.
901 	 */
902 	if (ts->ts_owner != NULL) {
903 		ts->ts_owner = NULL;
904 		LIST_REMOVE(ts, ts_link);
905 	}
906 	LIST_FOREACH(nts, &td->td_contested, ts_link) {
907 		cp = turnstile_first_waiter(nts)->td_priority;
908 		if (cp < pri)
909 			pri = cp;
910 	}
911 	mtx_unlock_spin(&td_contested_lock);
912 	sched_unlend_prio(td, pri);
913 	thread_unlock(td);
914 	/*
915 	 * Wake up all the pending threads.  If a thread is not blocked
916 	 * on a lock, then it is currently executing on another CPU in
917 	 * turnstile_wait() or sitting on a run queue waiting to resume
918 	 * in turnstile_wait().  Set a flag to force it to try to acquire
919 	 * the lock again instead of blocking.
920 	 */
921 	while (!TAILQ_EMPTY(&pending_threads)) {
922 		td = TAILQ_FIRST(&pending_threads);
923 		TAILQ_REMOVE(&pending_threads, td, td_lockq);
924 		thread_lock(td);
925 		MPASS(td->td_lock == &ts->ts_lock);
926 		MPASS(td->td_proc->p_magic == P_MAGIC);
927 		MPASS(TD_ON_LOCK(td));
928 		TD_CLR_LOCK(td);
929 		MPASS(TD_CAN_RUN(td));
930 		td->td_blocked = NULL;
931 		td->td_lockname = NULL;
932 #ifdef INVARIANTS
933 		td->td_tsqueue = 0xff;
934 #endif
935 		sched_add(td, SRQ_BORING);
936 		thread_unlock(td);
937 	}
938 	mtx_unlock_spin(&ts->ts_lock);
939 }
940 
941 /*
942  * Give up ownership of a turnstile.  This must be called with the
943  * turnstile chain locked.
944  */
945 void
946 turnstile_disown(struct turnstile *ts)
947 {
948 	struct thread *td;
949 	u_char cp, pri;
950 
951 	MPASS(ts != NULL);
952 	mtx_assert(&ts->ts_lock, MA_OWNED);
953 	MPASS(ts->ts_owner == curthread);
954 	MPASS(TAILQ_EMPTY(&ts->ts_pending));
955 	MPASS(!TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) ||
956 	    !TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
957 
958 	/*
959 	 * Remove the turnstile from this thread's list of contested locks
960 	 * since this thread doesn't own it anymore.  New threads will
961 	 * not be blocking on the turnstile until it is claimed by a new
962 	 * owner.
963 	 */
964 	mtx_lock_spin(&td_contested_lock);
965 	ts->ts_owner = NULL;
966 	LIST_REMOVE(ts, ts_link);
967 	mtx_unlock_spin(&td_contested_lock);
968 
969 	/*
970 	 * Adjust the priority of curthread based on other contested
971 	 * locks it owns.  Don't lower the priority below the base
972 	 * priority however.
973 	 */
974 	td = curthread;
975 	pri = PRI_MAX;
976 	thread_lock(td);
977 	mtx_unlock_spin(&ts->ts_lock);
978 	mtx_lock_spin(&td_contested_lock);
979 	LIST_FOREACH(ts, &td->td_contested, ts_link) {
980 		cp = turnstile_first_waiter(ts)->td_priority;
981 		if (cp < pri)
982 			pri = cp;
983 	}
984 	mtx_unlock_spin(&td_contested_lock);
985 	sched_unlend_prio(td, pri);
986 	thread_unlock(td);
987 }
988 
989 /*
990  * Return the first thread in a turnstile.
991  */
992 struct thread *
993 turnstile_head(struct turnstile *ts, int queue)
994 {
995 #ifdef INVARIANTS
996 
997 	MPASS(ts != NULL);
998 	MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
999 	mtx_assert(&ts->ts_lock, MA_OWNED);
1000 #endif
1001 	return (TAILQ_FIRST(&ts->ts_blocked[queue]));
1002 }
1003 
1004 /*
1005  * Returns true if a sub-queue of a turnstile is empty.
1006  */
1007 int
1008 turnstile_empty(struct turnstile *ts, int queue)
1009 {
1010 #ifdef INVARIANTS
1011 
1012 	MPASS(ts != NULL);
1013 	MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
1014 	mtx_assert(&ts->ts_lock, MA_OWNED);
1015 #endif
1016 	return (TAILQ_EMPTY(&ts->ts_blocked[queue]));
1017 }
1018 
1019 #ifdef DDB
1020 static void
1021 print_thread(struct thread *td, const char *prefix)
1022 {
1023 
1024 	db_printf("%s%p (tid %d, pid %d, \"%s\")\n", prefix, td, td->td_tid,
1025 	    td->td_proc->p_pid, td->td_name[0] != '\0' ? td->td_name :
1026 	    td->td_proc->p_comm);
1027 }
1028 
1029 static void
1030 print_queue(struct threadqueue *queue, const char *header, const char *prefix)
1031 {
1032 	struct thread *td;
1033 
1034 	db_printf("%s:\n", header);
1035 	if (TAILQ_EMPTY(queue)) {
1036 		db_printf("%sempty\n", prefix);
1037 		return;
1038 	}
1039 	TAILQ_FOREACH(td, queue, td_lockq) {
1040 		print_thread(td, prefix);
1041 	}
1042 }
1043 
1044 DB_SHOW_COMMAND(turnstile, db_show_turnstile)
1045 {
1046 	struct turnstile_chain *tc;
1047 	struct turnstile *ts;
1048 	struct lock_object *lock;
1049 	int i;
1050 
1051 	if (!have_addr)
1052 		return;
1053 
1054 	/*
1055 	 * First, see if there is an active turnstile for the lock indicated
1056 	 * by the address.
1057 	 */
1058 	lock = (struct lock_object *)addr;
1059 	tc = TC_LOOKUP(lock);
1060 	LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
1061 		if (ts->ts_lockobj == lock)
1062 			goto found;
1063 
1064 	/*
1065 	 * Second, see if there is an active turnstile at the address
1066 	 * indicated.
1067 	 */
1068 	for (i = 0; i < TC_TABLESIZE; i++)
1069 		LIST_FOREACH(ts, &turnstile_chains[i].tc_turnstiles, ts_hash) {
1070 			if (ts == (struct turnstile *)addr)
1071 				goto found;
1072 		}
1073 
1074 	db_printf("Unable to locate a turnstile via %p\n", (void *)addr);
1075 	return;
1076 found:
1077 	lock = ts->ts_lockobj;
1078 	db_printf("Lock: %p - (%s) %s\n", lock, LOCK_CLASS(lock)->lc_name,
1079 	    lock->lo_name);
1080 	if (ts->ts_owner)
1081 		print_thread(ts->ts_owner, "Lock Owner: ");
1082 	else
1083 		db_printf("Lock Owner: none\n");
1084 	print_queue(&ts->ts_blocked[TS_SHARED_QUEUE], "Shared Waiters", "\t");
1085 	print_queue(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE], "Exclusive Waiters",
1086 	    "\t");
1087 	print_queue(&ts->ts_pending, "Pending Threads", "\t");
1088 
1089 }
1090 
1091 /*
1092  * Show all the threads a particular thread is waiting on based on
1093  * non-sleepable and non-spin locks.
1094  */
1095 static void
1096 print_lockchain(struct thread *td, const char *prefix)
1097 {
1098 	struct lock_object *lock;
1099 	struct lock_class *class;
1100 	struct turnstile *ts;
1101 
1102 	/*
1103 	 * Follow the chain.  We keep walking as long as the thread is
1104 	 * blocked on a turnstile that has an owner.
1105 	 */
1106 	while (!db_pager_quit) {
1107 		db_printf("%sthread %d (pid %d, %s) ", prefix, td->td_tid,
1108 		    td->td_proc->p_pid, td->td_name[0] != '\0' ? td->td_name :
1109 		    td->td_proc->p_comm);
1110 		switch (td->td_state) {
1111 		case TDS_INACTIVE:
1112 			db_printf("is inactive\n");
1113 			return;
1114 		case TDS_CAN_RUN:
1115 			db_printf("can run\n");
1116 			return;
1117 		case TDS_RUNQ:
1118 			db_printf("is on a run queue\n");
1119 			return;
1120 		case TDS_RUNNING:
1121 			db_printf("running on CPU %d\n", td->td_oncpu);
1122 			return;
1123 		case TDS_INHIBITED:
1124 			if (TD_ON_LOCK(td)) {
1125 				ts = td->td_blocked;
1126 				lock = ts->ts_lockobj;
1127 				class = LOCK_CLASS(lock);
1128 				db_printf("blocked on lock %p (%s) \"%s\"\n",
1129 				    lock, class->lc_name, lock->lo_name);
1130 				if (ts->ts_owner == NULL)
1131 					return;
1132 				td = ts->ts_owner;
1133 				break;
1134 			}
1135 			db_printf("inhibited\n");
1136 			return;
1137 		default:
1138 			db_printf("??? (%#x)\n", td->td_state);
1139 			return;
1140 		}
1141 	}
1142 }
1143 
1144 DB_SHOW_COMMAND(lockchain, db_show_lockchain)
1145 {
1146 	struct thread *td;
1147 
1148 	/* Figure out which thread to start with. */
1149 	if (have_addr)
1150 		td = db_lookup_thread(addr, TRUE);
1151 	else
1152 		td = kdb_thread;
1153 
1154 	print_lockchain(td, "");
1155 }
1156 
1157 DB_SHOW_COMMAND(allchains, db_show_allchains)
1158 {
1159 	struct thread *td;
1160 	struct proc *p;
1161 	int i;
1162 
1163 	i = 1;
1164 	FOREACH_PROC_IN_SYSTEM(p) {
1165 		FOREACH_THREAD_IN_PROC(p, td) {
1166 			if (TD_ON_LOCK(td) && LIST_EMPTY(&td->td_contested)) {
1167 				db_printf("chain %d:\n", i++);
1168 				print_lockchain(td, " ");
1169 			}
1170 			if (db_pager_quit)
1171 				return;
1172 		}
1173 	}
1174 }
1175 
1176 /*
1177  * Show all the threads a particular thread is waiting on based on
1178  * sleepable locks.
1179  */
1180 static void
1181 print_sleepchain(struct thread *td, const char *prefix)
1182 {
1183 	struct thread *owner;
1184 
1185 	/*
1186 	 * Follow the chain.  We keep walking as long as the thread is
1187 	 * blocked on a sleep lock that has an owner.
1188 	 */
1189 	while (!db_pager_quit) {
1190 		db_printf("%sthread %d (pid %d, %s) ", prefix, td->td_tid,
1191 		    td->td_proc->p_pid, td->td_name[0] != '\0' ? td->td_name :
1192 		    td->td_proc->p_comm);
1193 		switch (td->td_state) {
1194 		case TDS_INACTIVE:
1195 			db_printf("is inactive\n");
1196 			return;
1197 		case TDS_CAN_RUN:
1198 			db_printf("can run\n");
1199 			return;
1200 		case TDS_RUNQ:
1201 			db_printf("is on a run queue\n");
1202 			return;
1203 		case TDS_RUNNING:
1204 			db_printf("running on CPU %d\n", td->td_oncpu);
1205 			return;
1206 		case TDS_INHIBITED:
1207 			if (TD_ON_SLEEPQ(td)) {
1208 				if (lockmgr_chain(td, &owner) ||
1209 				    sx_chain(td, &owner)) {
1210 					if (owner == NULL)
1211 						return;
1212 					td = owner;
1213 					break;
1214 				}
1215 				db_printf("sleeping on %p \"%s\"\n",
1216 				    td->td_wchan, td->td_wmesg);
1217 				return;
1218 			}
1219 			db_printf("inhibited\n");
1220 			return;
1221 		default:
1222 			db_printf("??? (%#x)\n", td->td_state);
1223 			return;
1224 		}
1225 	}
1226 }
1227 
1228 DB_SHOW_COMMAND(sleepchain, db_show_sleepchain)
1229 {
1230 	struct thread *td;
1231 
1232 	/* Figure out which thread to start with. */
1233 	if (have_addr)
1234 		td = db_lookup_thread(addr, TRUE);
1235 	else
1236 		td = kdb_thread;
1237 
1238 	print_sleepchain(td, "");
1239 }
1240 
1241 static void	print_waiters(struct turnstile *ts, int indent);
1242 
1243 static void
1244 print_waiter(struct thread *td, int indent)
1245 {
1246 	struct turnstile *ts;
1247 	int i;
1248 
1249 	if (db_pager_quit)
1250 		return;
1251 	for (i = 0; i < indent; i++)
1252 		db_printf(" ");
1253 	print_thread(td, "thread ");
1254 	LIST_FOREACH(ts, &td->td_contested, ts_link)
1255 		print_waiters(ts, indent + 1);
1256 }
1257 
1258 static void
1259 print_waiters(struct turnstile *ts, int indent)
1260 {
1261 	struct lock_object *lock;
1262 	struct lock_class *class;
1263 	struct thread *td;
1264 	int i;
1265 
1266 	if (db_pager_quit)
1267 		return;
1268 	lock = ts->ts_lockobj;
1269 	class = LOCK_CLASS(lock);
1270 	for (i = 0; i < indent; i++)
1271 		db_printf(" ");
1272 	db_printf("lock %p (%s) \"%s\"\n", lock, class->lc_name, lock->lo_name);
1273 	TAILQ_FOREACH(td, &ts->ts_blocked[TS_EXCLUSIVE_QUEUE], td_lockq)
1274 		print_waiter(td, indent + 1);
1275 	TAILQ_FOREACH(td, &ts->ts_blocked[TS_SHARED_QUEUE], td_lockq)
1276 		print_waiter(td, indent + 1);
1277 	TAILQ_FOREACH(td, &ts->ts_pending, td_lockq)
1278 		print_waiter(td, indent + 1);
1279 }
1280 
1281 DB_SHOW_COMMAND(locktree, db_show_locktree)
1282 {
1283 	struct lock_object *lock;
1284 	struct lock_class *class;
1285 	struct turnstile_chain *tc;
1286 	struct turnstile *ts;
1287 
1288 	if (!have_addr)
1289 		return;
1290 	lock = (struct lock_object *)addr;
1291 	tc = TC_LOOKUP(lock);
1292 	LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
1293 		if (ts->ts_lockobj == lock)
1294 			break;
1295 	if (ts == NULL) {
1296 		class = LOCK_CLASS(lock);
1297 		db_printf("lock %p (%s) \"%s\"\n", lock, class->lc_name,
1298 		    lock->lo_name);
1299 	} else
1300 		print_waiters(ts, 0);
1301 }
1302 #endif
1303