xref: /freebsd/sys/kern/subr_sleepqueue.c (revision a64729f5077d77e13b9497cb33ecb3c82e606ee8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 /*
29  * Implementation of sleep queues used to hold queue of threads blocked on
30  * a wait channel.  Sleep queues are different from turnstiles in that wait
31  * channels are not owned by anyone, so there is no priority propagation.
32  * Sleep queues can also provide a timeout and can also be interrupted by
33  * signals.  That said, there are several similarities between the turnstile
34  * and sleep queue implementations.  (Note: turnstiles were implemented
35  * first.)  For example, both use a hash table of the same size where each
36  * bucket is referred to as a "chain" that contains both a spin lock and
37  * a linked list of queues.  An individual queue is located by using a hash
38  * to pick a chain, locking the chain, and then walking the chain searching
39  * for the queue.  This means that a wait channel object does not need to
40  * embed its queue head just as locks do not embed their turnstile queue
41  * head.  Threads also carry around a sleep queue that they lend to the
42  * wait channel when blocking.  Just as in turnstiles, the queue includes
43  * a free list of the sleep queues of other threads blocked on the same
44  * wait channel in the case of multiple waiters.
45  *
46  * Some additional functionality provided by sleep queues include the
47  * ability to set a timeout.  The timeout is managed using a per-thread
48  * callout that resumes a thread if it is asleep.  A thread may also
49  * catch signals while it is asleep (aka an interruptible sleep).  The
50  * signal code uses sleepq_abort() to interrupt a sleeping thread.  Finally,
51  * sleep queues also provide some extra assertions.  One is not allowed to
52  * mix the sleep/wakeup and cv APIs for a given wait channel.  Also, one
53  * must consistently use the same lock to synchronize with a wait channel,
54  * though this check is currently only a warning for sleep/wakeup due to
55  * pre-existing abuse of that API.  The same lock must also be held when
56  * awakening threads, though that is currently only enforced for condition
57  * variables.
58  */
59 
60 #include <sys/cdefs.h>
61 #include "opt_sleepqueue_profiling.h"
62 #include "opt_ddb.h"
63 #include "opt_sched.h"
64 #include "opt_stack.h"
65 
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/lock.h>
69 #include <sys/kernel.h>
70 #include <sys/ktr.h>
71 #include <sys/mutex.h>
72 #include <sys/proc.h>
73 #include <sys/sbuf.h>
74 #include <sys/sched.h>
75 #include <sys/sdt.h>
76 #include <sys/signalvar.h>
77 #include <sys/sleepqueue.h>
78 #include <sys/stack.h>
79 #include <sys/sysctl.h>
80 #include <sys/time.h>
81 #ifdef EPOCH_TRACE
82 #include <sys/epoch.h>
83 #endif
84 
85 #include <machine/atomic.h>
86 
87 #include <vm/uma.h>
88 
89 #ifdef DDB
90 #include <ddb/ddb.h>
91 #endif
92 
93 /*
94  * Constants for the hash table of sleep queue chains.
95  * SC_TABLESIZE must be a power of two for SC_MASK to work properly.
96  */
97 #ifndef SC_TABLESIZE
98 #define	SC_TABLESIZE	256
99 #endif
100 CTASSERT(powerof2(SC_TABLESIZE));
101 #define	SC_MASK		(SC_TABLESIZE - 1)
102 #define	SC_SHIFT	8
103 #define	SC_HASH(wc)	((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \
104 			    SC_MASK)
105 #define	SC_LOOKUP(wc)	&sleepq_chains[SC_HASH(wc)]
106 #define NR_SLEEPQS      2
107 /*
108  * There are two different lists of sleep queues.  Both lists are connected
109  * via the sq_hash entries.  The first list is the sleep queue chain list
110  * that a sleep queue is on when it is attached to a wait channel.  The
111  * second list is the free list hung off of a sleep queue that is attached
112  * to a wait channel.
113  *
114  * Each sleep queue also contains the wait channel it is attached to, the
115  * list of threads blocked on that wait channel, flags specific to the
116  * wait channel, and the lock used to synchronize with a wait channel.
117  * The flags are used to catch mismatches between the various consumers
118  * of the sleep queue API (e.g. sleep/wakeup and condition variables).
119  * The lock pointer is only used when invariants are enabled for various
120  * debugging checks.
121  *
122  * Locking key:
123  *  c - sleep queue chain lock
124  */
125 struct sleepqueue {
126 	struct threadqueue sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */
127 	u_int sq_blockedcnt[NR_SLEEPQS];	/* (c) N. of blocked threads. */
128 	LIST_ENTRY(sleepqueue) sq_hash;		/* (c) Chain and free list. */
129 	LIST_HEAD(, sleepqueue) sq_free;	/* (c) Free queues. */
130 	const void	*sq_wchan;		/* (c) Wait channel. */
131 	int	sq_type;			/* (c) Queue type. */
132 #ifdef INVARIANTS
133 	struct lock_object *sq_lock;		/* (c) Associated lock. */
134 #endif
135 };
136 
137 struct sleepqueue_chain {
138 	LIST_HEAD(, sleepqueue) sc_queues;	/* List of sleep queues. */
139 	struct mtx sc_lock;			/* Spin lock for this chain. */
140 #ifdef SLEEPQUEUE_PROFILING
141 	u_int	sc_depth;			/* Length of sc_queues. */
142 	u_int	sc_max_depth;			/* Max length of sc_queues. */
143 #endif
144 } __aligned(CACHE_LINE_SIZE);
145 
146 #ifdef SLEEPQUEUE_PROFILING
147 static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
148     "sleepq profiling");
149 static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains,
150     CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
151     "sleepq chain stats");
152 static u_int sleepq_max_depth;
153 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
154     0, "maxmimum depth achieved of a single chain");
155 
156 static void	sleepq_profile(const char *wmesg);
157 static int	prof_enabled;
158 #endif
159 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
160 static uma_zone_t sleepq_zone;
161 
162 /*
163  * Prototypes for non-exported routines.
164  */
165 static int	sleepq_catch_signals(const void *wchan, int pri);
166 static inline int sleepq_check_signals(void);
167 static inline int sleepq_check_timeout(void);
168 #ifdef INVARIANTS
169 static void	sleepq_dtor(void *mem, int size, void *arg);
170 #endif
171 static int	sleepq_init(void *mem, int size, int flags);
172 static void	sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
173 		    int pri, int srqflags);
174 static void	sleepq_remove_thread(struct sleepqueue *sq, struct thread *td);
175 static void	sleepq_switch(const void *wchan, int pri);
176 static void	sleepq_timeout(void *arg);
177 
178 SDT_PROBE_DECLARE(sched, , , sleep);
179 SDT_PROBE_DECLARE(sched, , , wakeup);
180 
181 /*
182  * Initialize SLEEPQUEUE_PROFILING specific sysctl nodes.
183  * Note that it must happen after sleepinit() has been fully executed, so
184  * it must happen after SI_SUB_KMEM SYSINIT() subsystem setup.
185  */
186 #ifdef SLEEPQUEUE_PROFILING
187 static void
188 init_sleepqueue_profiling(void)
189 {
190 	char chain_name[10];
191 	struct sysctl_oid *chain_oid;
192 	u_int i;
193 
194 	for (i = 0; i < SC_TABLESIZE; i++) {
195 		snprintf(chain_name, sizeof(chain_name), "%u", i);
196 		chain_oid = SYSCTL_ADD_NODE(NULL,
197 		    SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
198 		    chain_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
199 		    "sleepq chain stats");
200 		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
201 		    "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
202 		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
203 		    "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
204 		    NULL);
205 	}
206 }
207 
208 SYSINIT(sleepqueue_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
209     init_sleepqueue_profiling, NULL);
210 #endif
211 
212 /*
213  * Early initialization of sleep queues that is called from the sleepinit()
214  * SYSINIT.
215  */
216 void
217 init_sleepqueues(void)
218 {
219 	int i;
220 
221 	for (i = 0; i < SC_TABLESIZE; i++) {
222 		LIST_INIT(&sleepq_chains[i].sc_queues);
223 		mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
224 		    MTX_SPIN);
225 	}
226 	sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
227 #ifdef INVARIANTS
228 	    NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
229 #else
230 	    NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
231 #endif
232 
233 	thread0.td_sleepqueue = sleepq_alloc();
234 }
235 
236 /*
237  * Get a sleep queue for a new thread.
238  */
239 struct sleepqueue *
240 sleepq_alloc(void)
241 {
242 
243 	return (uma_zalloc(sleepq_zone, M_WAITOK));
244 }
245 
246 /*
247  * Free a sleep queue when a thread is destroyed.
248  */
249 void
250 sleepq_free(struct sleepqueue *sq)
251 {
252 
253 	uma_zfree(sleepq_zone, sq);
254 }
255 
256 /*
257  * Lock the sleep queue chain associated with the specified wait channel.
258  */
259 void
260 sleepq_lock(const void *wchan)
261 {
262 	struct sleepqueue_chain *sc;
263 
264 	sc = SC_LOOKUP(wchan);
265 	mtx_lock_spin(&sc->sc_lock);
266 }
267 
268 /*
269  * Look up the sleep queue associated with a given wait channel in the hash
270  * table locking the associated sleep queue chain.  If no queue is found in
271  * the table, NULL is returned.
272  */
273 struct sleepqueue *
274 sleepq_lookup(const void *wchan)
275 {
276 	struct sleepqueue_chain *sc;
277 	struct sleepqueue *sq;
278 
279 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
280 	sc = SC_LOOKUP(wchan);
281 	mtx_assert(&sc->sc_lock, MA_OWNED);
282 	LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
283 		if (sq->sq_wchan == wchan)
284 			return (sq);
285 	return (NULL);
286 }
287 
288 /*
289  * Unlock the sleep queue chain associated with a given wait channel.
290  */
291 void
292 sleepq_release(const void *wchan)
293 {
294 	struct sleepqueue_chain *sc;
295 
296 	sc = SC_LOOKUP(wchan);
297 	mtx_unlock_spin(&sc->sc_lock);
298 }
299 
300 /*
301  * Places the current thread on the sleep queue for the specified wait
302  * channel.  If INVARIANTS is enabled, then it associates the passed in
303  * lock with the sleepq to make sure it is held when that sleep queue is
304  * woken up.
305  */
306 void
307 sleepq_add(const void *wchan, struct lock_object *lock, const char *wmesg,
308     int flags, int queue)
309 {
310 	struct sleepqueue_chain *sc;
311 	struct sleepqueue *sq;
312 	struct thread *td;
313 
314 	td = curthread;
315 	sc = SC_LOOKUP(wchan);
316 	mtx_assert(&sc->sc_lock, MA_OWNED);
317 	MPASS(td->td_sleepqueue != NULL);
318 	MPASS(wchan != NULL);
319 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
320 
321 	/* If this thread is not allowed to sleep, die a horrible death. */
322 	if (__predict_false(!THREAD_CAN_SLEEP())) {
323 #ifdef EPOCH_TRACE
324 		epoch_trace_list(curthread);
325 #endif
326 		KASSERT(0,
327 		    ("%s: td %p to sleep on wchan %p with sleeping prohibited",
328 		    __func__, td, wchan));
329 	}
330 
331 	/* Look up the sleep queue associated with the wait channel 'wchan'. */
332 	sq = sleepq_lookup(wchan);
333 
334 	/*
335 	 * If the wait channel does not already have a sleep queue, use
336 	 * this thread's sleep queue.  Otherwise, insert the current thread
337 	 * into the sleep queue already in use by this wait channel.
338 	 */
339 	if (sq == NULL) {
340 #ifdef INVARIANTS
341 		int i;
342 
343 		sq = td->td_sleepqueue;
344 		for (i = 0; i < NR_SLEEPQS; i++) {
345 			KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
346 			    ("thread's sleep queue %d is not empty", i));
347 			KASSERT(sq->sq_blockedcnt[i] == 0,
348 			    ("thread's sleep queue %d count mismatches", i));
349 		}
350 		KASSERT(LIST_EMPTY(&sq->sq_free),
351 		    ("thread's sleep queue has a non-empty free list"));
352 		KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
353 		sq->sq_lock = lock;
354 #endif
355 #ifdef SLEEPQUEUE_PROFILING
356 		sc->sc_depth++;
357 		if (sc->sc_depth > sc->sc_max_depth) {
358 			sc->sc_max_depth = sc->sc_depth;
359 			if (sc->sc_max_depth > sleepq_max_depth)
360 				sleepq_max_depth = sc->sc_max_depth;
361 		}
362 #endif
363 		sq = td->td_sleepqueue;
364 		LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
365 		sq->sq_wchan = wchan;
366 		sq->sq_type = flags & SLEEPQ_TYPE;
367 	} else {
368 		MPASS(wchan == sq->sq_wchan);
369 		MPASS(lock == sq->sq_lock);
370 		MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
371 		LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
372 	}
373 	thread_lock(td);
374 	TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
375 	sq->sq_blockedcnt[queue]++;
376 	td->td_sleepqueue = NULL;
377 	td->td_sqqueue = queue;
378 	td->td_wchan = wchan;
379 	td->td_wmesg = wmesg;
380 	if (flags & SLEEPQ_INTERRUPTIBLE) {
381 		td->td_intrval = 0;
382 		td->td_flags |= TDF_SINTR;
383 	}
384 	td->td_flags &= ~TDF_TIMEOUT;
385 	thread_unlock(td);
386 }
387 
388 /*
389  * Sets a timeout that will remove the current thread from the
390  * specified sleep queue at the specified time if the thread has not
391  * already been awakened.  Flags are from C_* (callout) namespace.
392  */
393 void
394 sleepq_set_timeout_sbt(const void *wchan, sbintime_t sbt, sbintime_t pr,
395     int flags)
396 {
397 	struct sleepqueue_chain *sc __unused;
398 	struct thread *td;
399 	sbintime_t pr1;
400 
401 	td = curthread;
402 	sc = SC_LOOKUP(wchan);
403 	mtx_assert(&sc->sc_lock, MA_OWNED);
404 	MPASS(TD_ON_SLEEPQ(td));
405 	MPASS(td->td_sleepqueue == NULL);
406 	MPASS(wchan != NULL);
407 	if (cold && td == &thread0)
408 		panic("timed sleep before timers are working");
409 	KASSERT(td->td_sleeptimo == 0, ("td %d %p td_sleeptimo %jx",
410 	    td->td_tid, td, (uintmax_t)td->td_sleeptimo));
411 	thread_lock(td);
412 	callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1);
413 	thread_unlock(td);
414 	callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1,
415 	    sleepq_timeout, td, PCPU_GET(cpuid), flags | C_PRECALC |
416 	    C_DIRECT_EXEC);
417 }
418 
419 /*
420  * Return the number of actual sleepers for the specified queue.
421  */
422 u_int
423 sleepq_sleepcnt(const void *wchan, int queue)
424 {
425 	struct sleepqueue *sq;
426 
427 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
428 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
429 	sq = sleepq_lookup(wchan);
430 	if (sq == NULL)
431 		return (0);
432 	return (sq->sq_blockedcnt[queue]);
433 }
434 
435 static int
436 sleepq_check_ast_sc_locked(struct thread *td, struct sleepqueue_chain *sc)
437 {
438 	struct proc *p;
439 	int ret;
440 
441 	mtx_assert(&sc->sc_lock, MA_OWNED);
442 
443 	if ((td->td_pflags & TDP_WAKEUP) != 0) {
444 		td->td_pflags &= ~TDP_WAKEUP;
445 		thread_lock(td);
446 		return (EINTR);
447 	}
448 
449 	/*
450 	 * See if there are any pending signals or suspension requests for this
451 	 * thread.  If not, we can switch immediately.
452 	 */
453 	thread_lock(td);
454 	if (!td_ast_pending(td, TDA_SIG) && !td_ast_pending(td, TDA_SUSPEND))
455 		return (0);
456 
457 	thread_unlock(td);
458 	mtx_unlock_spin(&sc->sc_lock);
459 
460 	p = td->td_proc;
461 	CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
462 	    (void *)td, (long)p->p_pid, td->td_name);
463 	PROC_LOCK(p);
464 
465 	/*
466 	 * Check for suspension first. Checking for signals and then
467 	 * suspending could result in a missed signal, since a signal
468 	 * can be delivered while this thread is suspended.
469 	 */
470 	ret = sig_ast_checksusp(td);
471 	if (ret != 0) {
472 		PROC_UNLOCK(p);
473 		mtx_lock_spin(&sc->sc_lock);
474 		thread_lock(td);
475 		return (ret);
476 	}
477 
478 	ret = sig_ast_needsigchk(td);
479 
480 	/*
481 	 * Lock the per-process spinlock prior to dropping the
482 	 * PROC_LOCK to avoid a signal delivery race.
483 	 * PROC_LOCK, PROC_SLOCK, and thread_lock() are
484 	 * currently held in tdsendsignal() and thread_single().
485 	 */
486 	PROC_SLOCK(p);
487 	mtx_lock_spin(&sc->sc_lock);
488 	PROC_UNLOCK(p);
489 	thread_lock(td);
490 	PROC_SUNLOCK(p);
491 
492 	return (ret);
493 }
494 
495 /*
496  * Marks the pending sleep of the current thread as interruptible and
497  * makes an initial check for pending signals before putting a thread
498  * to sleep. Enters and exits with the thread lock held.  Thread lock
499  * may have transitioned from the sleepq lock to a run lock.
500  */
501 static int
502 sleepq_catch_signals(const void *wchan, int pri)
503 {
504 	struct thread *td;
505 	struct sleepqueue_chain *sc;
506 	struct sleepqueue *sq;
507 	int ret;
508 
509 	sc = SC_LOOKUP(wchan);
510 	mtx_assert(&sc->sc_lock, MA_OWNED);
511 	MPASS(wchan != NULL);
512 	td = curthread;
513 
514 	ret = sleepq_check_ast_sc_locked(td, sc);
515 	THREAD_LOCK_ASSERT(td, MA_OWNED);
516 	mtx_assert(&sc->sc_lock, MA_OWNED);
517 
518 	if (ret == 0) {
519 		/*
520 		 * No pending signals and no suspension requests found.
521 		 * Switch the thread off the cpu.
522 		 */
523 		sleepq_switch(wchan, pri);
524 	} else {
525 		/*
526 		 * There were pending signals and this thread is still
527 		 * on the sleep queue, remove it from the sleep queue.
528 		 */
529 		if (TD_ON_SLEEPQ(td)) {
530 			sq = sleepq_lookup(wchan);
531 			sleepq_remove_thread(sq, td);
532 		}
533 		MPASS(td->td_lock != &sc->sc_lock);
534 		mtx_unlock_spin(&sc->sc_lock);
535 		thread_unlock(td);
536 	}
537 	return (ret);
538 }
539 
540 /*
541  * Switches to another thread if we are still asleep on a sleep queue.
542  * Returns with thread lock.
543  */
544 static void
545 sleepq_switch(const void *wchan, int pri)
546 {
547 	struct sleepqueue_chain *sc;
548 	struct sleepqueue *sq;
549 	struct thread *td;
550 	bool rtc_changed;
551 
552 	td = curthread;
553 	sc = SC_LOOKUP(wchan);
554 	mtx_assert(&sc->sc_lock, MA_OWNED);
555 	THREAD_LOCK_ASSERT(td, MA_OWNED);
556 
557 	/*
558 	 * If we have a sleep queue, then we've already been woken up, so
559 	 * just return.
560 	 */
561 	if (td->td_sleepqueue != NULL) {
562 		mtx_unlock_spin(&sc->sc_lock);
563 		thread_unlock(td);
564 		return;
565 	}
566 
567 	/*
568 	 * If TDF_TIMEOUT is set, then our sleep has been timed out
569 	 * already but we are still on the sleep queue, so dequeue the
570 	 * thread and return.
571 	 *
572 	 * Do the same if the real-time clock has been adjusted since this
573 	 * thread calculated its timeout based on that clock.  This handles
574 	 * the following race:
575 	 * - The Ts thread needs to sleep until an absolute real-clock time.
576 	 *   It copies the global rtc_generation into curthread->td_rtcgen,
577 	 *   reads the RTC, and calculates a sleep duration based on that time.
578 	 *   See umtxq_sleep() for an example.
579 	 * - The Tc thread adjusts the RTC, bumps rtc_generation, and wakes
580 	 *   threads that are sleeping until an absolute real-clock time.
581 	 *   See tc_setclock() and the POSIX specification of clock_settime().
582 	 * - Ts reaches the code below.  It holds the sleepqueue chain lock,
583 	 *   so Tc has finished waking, so this thread must test td_rtcgen.
584 	 * (The declaration of td_rtcgen refers to this comment.)
585 	 */
586 	rtc_changed = td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation;
587 	if ((td->td_flags & TDF_TIMEOUT) || rtc_changed) {
588 		if (rtc_changed) {
589 			td->td_rtcgen = 0;
590 		}
591 		MPASS(TD_ON_SLEEPQ(td));
592 		sq = sleepq_lookup(wchan);
593 		sleepq_remove_thread(sq, td);
594 		mtx_unlock_spin(&sc->sc_lock);
595 		thread_unlock(td);
596 		return;
597 	}
598 #ifdef SLEEPQUEUE_PROFILING
599 	if (prof_enabled)
600 		sleepq_profile(td->td_wmesg);
601 #endif
602 	MPASS(td->td_sleepqueue == NULL);
603 	sched_sleep(td, pri);
604 	thread_lock_set(td, &sc->sc_lock);
605 	SDT_PROBE0(sched, , , sleep);
606 	TD_SET_SLEEPING(td);
607 	mi_switch(SW_VOL | SWT_SLEEPQ);
608 	KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
609 	CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
610 	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
611 }
612 
613 /*
614  * Check to see if we timed out.
615  */
616 static inline int
617 sleepq_check_timeout(void)
618 {
619 	struct thread *td;
620 	int res;
621 
622 	res = 0;
623 	td = curthread;
624 	if (td->td_sleeptimo != 0) {
625 		if (td->td_sleeptimo <= sbinuptime())
626 			res = EWOULDBLOCK;
627 		td->td_sleeptimo = 0;
628 	}
629 	return (res);
630 }
631 
632 /*
633  * Check to see if we were awoken by a signal.
634  */
635 static inline int
636 sleepq_check_signals(void)
637 {
638 	struct thread *td;
639 
640 	td = curthread;
641 	KASSERT((td->td_flags & TDF_SINTR) == 0,
642 	    ("thread %p still in interruptible sleep?", td));
643 
644 	return (td->td_intrval);
645 }
646 
647 /*
648  * Block the current thread until it is awakened from its sleep queue.
649  */
650 void
651 sleepq_wait(const void *wchan, int pri)
652 {
653 	struct thread *td;
654 
655 	td = curthread;
656 	MPASS(!(td->td_flags & TDF_SINTR));
657 	thread_lock(td);
658 	sleepq_switch(wchan, pri);
659 }
660 
661 /*
662  * Block the current thread until it is awakened from its sleep queue
663  * or it is interrupted by a signal.
664  */
665 int
666 sleepq_wait_sig(const void *wchan, int pri)
667 {
668 	int rcatch;
669 
670 	rcatch = sleepq_catch_signals(wchan, pri);
671 	if (rcatch)
672 		return (rcatch);
673 	return (sleepq_check_signals());
674 }
675 
676 /*
677  * Block the current thread until it is awakened from its sleep queue
678  * or it times out while waiting.
679  */
680 int
681 sleepq_timedwait(const void *wchan, int pri)
682 {
683 	struct thread *td;
684 
685 	td = curthread;
686 	MPASS(!(td->td_flags & TDF_SINTR));
687 
688 	thread_lock(td);
689 	sleepq_switch(wchan, pri);
690 
691 	return (sleepq_check_timeout());
692 }
693 
694 /*
695  * Block the current thread until it is awakened from its sleep queue,
696  * it is interrupted by a signal, or it times out waiting to be awakened.
697  */
698 int
699 sleepq_timedwait_sig(const void *wchan, int pri)
700 {
701 	int rcatch, rvalt, rvals;
702 
703 	rcatch = sleepq_catch_signals(wchan, pri);
704 	/* We must always call check_timeout() to clear sleeptimo. */
705 	rvalt = sleepq_check_timeout();
706 	rvals = sleepq_check_signals();
707 	if (rcatch)
708 		return (rcatch);
709 	if (rvals)
710 		return (rvals);
711 	return (rvalt);
712 }
713 
714 /*
715  * Returns the type of sleepqueue given a waitchannel.
716  */
717 int
718 sleepq_type(const void *wchan)
719 {
720 	struct sleepqueue *sq;
721 	int type;
722 
723 	MPASS(wchan != NULL);
724 
725 	sq = sleepq_lookup(wchan);
726 	if (sq == NULL)
727 		return (-1);
728 	type = sq->sq_type;
729 
730 	return (type);
731 }
732 
733 /*
734  * Removes a thread from a sleep queue and makes it runnable.
735  *
736  * Requires the sc chain locked on entry.  If SRQ_HOLD is specified it will
737  * be locked on return.  Returns without the thread lock held.
738  */
739 static void
740 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri,
741     int srqflags)
742 {
743 	struct sleepqueue_chain *sc;
744 	bool drop;
745 
746 	MPASS(td != NULL);
747 	MPASS(sq->sq_wchan != NULL);
748 	MPASS(td->td_wchan == sq->sq_wchan);
749 
750 	sc = SC_LOOKUP(sq->sq_wchan);
751 	mtx_assert(&sc->sc_lock, MA_OWNED);
752 
753 	/*
754 	 * Avoid recursing on the chain lock.  If the locks don't match we
755 	 * need to acquire the thread lock which setrunnable will drop for
756 	 * us.  In this case we need to drop the chain lock afterwards.
757 	 *
758 	 * There is no race that will make td_lock equal to sc_lock because
759 	 * we hold sc_lock.
760 	 */
761 	drop = false;
762 	if (!TD_IS_SLEEPING(td)) {
763 		thread_lock(td);
764 		drop = true;
765 	} else
766 		thread_lock_block_wait(td);
767 
768 	/* Remove thread from the sleepq. */
769 	sleepq_remove_thread(sq, td);
770 
771 	/* If we're done with the sleepqueue release it. */
772 	if ((srqflags & SRQ_HOLD) == 0 && drop)
773 		mtx_unlock_spin(&sc->sc_lock);
774 
775 	/* Adjust priority if requested. */
776 	MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
777 	if (pri != 0 && td->td_priority > pri &&
778 	    PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
779 		sched_prio(td, pri);
780 
781 	/*
782 	 * Note that thread td might not be sleeping if it is running
783 	 * sleepq_catch_signals() on another CPU or is blocked on its
784 	 * proc lock to check signals.  There's no need to mark the
785 	 * thread runnable in that case.
786 	 */
787 	if (TD_IS_SLEEPING(td)) {
788 		MPASS(!drop);
789 		TD_CLR_SLEEPING(td);
790 		setrunnable(td, srqflags);
791 	} else {
792 		MPASS(drop);
793 		thread_unlock(td);
794 	}
795 }
796 
797 static void
798 sleepq_remove_thread(struct sleepqueue *sq, struct thread *td)
799 {
800 	struct sleepqueue_chain *sc __unused;
801 
802 	MPASS(td != NULL);
803 	MPASS(sq->sq_wchan != NULL);
804 	MPASS(td->td_wchan == sq->sq_wchan);
805 	MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
806 	THREAD_LOCK_ASSERT(td, MA_OWNED);
807 	sc = SC_LOOKUP(sq->sq_wchan);
808 	mtx_assert(&sc->sc_lock, MA_OWNED);
809 
810 	SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
811 
812 	/* Remove the thread from the queue. */
813 	sq->sq_blockedcnt[td->td_sqqueue]--;
814 	TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
815 
816 	/*
817 	 * Get a sleep queue for this thread.  If this is the last waiter,
818 	 * use the queue itself and take it out of the chain, otherwise,
819 	 * remove a queue from the free list.
820 	 */
821 	if (LIST_EMPTY(&sq->sq_free)) {
822 		td->td_sleepqueue = sq;
823 #ifdef INVARIANTS
824 		sq->sq_wchan = NULL;
825 #endif
826 #ifdef SLEEPQUEUE_PROFILING
827 		sc->sc_depth--;
828 #endif
829 	} else
830 		td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
831 	LIST_REMOVE(td->td_sleepqueue, sq_hash);
832 
833 	if ((td->td_flags & TDF_TIMEOUT) == 0 && td->td_sleeptimo != 0 &&
834 	    td->td_lock == &sc->sc_lock) {
835 		/*
836 		 * We ignore the situation where timeout subsystem was
837 		 * unable to stop our callout.  The struct thread is
838 		 * type-stable, the callout will use the correct
839 		 * memory when running.  The checks of the
840 		 * td_sleeptimo value in this function and in
841 		 * sleepq_timeout() ensure that the thread does not
842 		 * get spurious wakeups, even if the callout was reset
843 		 * or thread reused.
844 		 *
845 		 * We also cannot safely stop the callout if a scheduler
846 		 * lock is held since softclock_thread() forces a lock
847 		 * order of callout lock -> scheduler lock.  The thread
848 		 * lock will be a scheduler lock only if the thread is
849 		 * preparing to go to sleep, so this is hopefully a rare
850 		 * scenario.
851 		 */
852 		callout_stop(&td->td_slpcallout);
853 	}
854 
855 	td->td_wmesg = NULL;
856 	td->td_wchan = NULL;
857 	td->td_flags &= ~(TDF_SINTR | TDF_TIMEOUT);
858 
859 	CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
860 	    (void *)td, (long)td->td_proc->p_pid, td->td_name);
861 }
862 
863 void
864 sleepq_remove_nested(struct thread *td)
865 {
866 	struct sleepqueue_chain *sc;
867 	struct sleepqueue *sq;
868 	const void *wchan;
869 
870 	MPASS(TD_ON_SLEEPQ(td));
871 
872 	wchan = td->td_wchan;
873 	sc = SC_LOOKUP(wchan);
874 	mtx_lock_spin(&sc->sc_lock);
875 	sq = sleepq_lookup(wchan);
876 	MPASS(sq != NULL);
877 	thread_lock(td);
878 	sleepq_remove_thread(sq, td);
879 	mtx_unlock_spin(&sc->sc_lock);
880 	/* Returns with the thread lock owned. */
881 }
882 
883 #ifdef INVARIANTS
884 /*
885  * UMA zone item deallocator.
886  */
887 static void
888 sleepq_dtor(void *mem, int size, void *arg)
889 {
890 	struct sleepqueue *sq;
891 	int i;
892 
893 	sq = mem;
894 	for (i = 0; i < NR_SLEEPQS; i++) {
895 		MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
896 		MPASS(sq->sq_blockedcnt[i] == 0);
897 	}
898 }
899 #endif
900 
901 /*
902  * UMA zone item initializer.
903  */
904 static int
905 sleepq_init(void *mem, int size, int flags)
906 {
907 	struct sleepqueue *sq;
908 	int i;
909 
910 	bzero(mem, size);
911 	sq = mem;
912 	for (i = 0; i < NR_SLEEPQS; i++) {
913 		TAILQ_INIT(&sq->sq_blocked[i]);
914 		sq->sq_blockedcnt[i] = 0;
915 	}
916 	LIST_INIT(&sq->sq_free);
917 	return (0);
918 }
919 
920 /*
921  * Find thread sleeping on a wait channel and resume it.
922  */
923 void
924 sleepq_signal(const void *wchan, int flags, int pri, int queue)
925 {
926 	struct sleepqueue_chain *sc;
927 	struct sleepqueue *sq;
928 	struct threadqueue *head;
929 	struct thread *td, *besttd;
930 
931 	CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
932 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
933 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
934 	sq = sleepq_lookup(wchan);
935 	if (sq == NULL) {
936 		if (flags & SLEEPQ_DROP)
937 			sleepq_release(wchan);
938 		return;
939 	}
940 	KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
941 	    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
942 
943 	head = &sq->sq_blocked[queue];
944 	if (flags & SLEEPQ_UNFAIR) {
945 		/*
946 		 * Find the most recently sleeping thread, but try to
947 		 * skip threads still in process of context switch to
948 		 * avoid spinning on the thread lock.
949 		 */
950 		sc = SC_LOOKUP(wchan);
951 		besttd = TAILQ_LAST_FAST(head, thread, td_slpq);
952 		while (besttd->td_lock != &sc->sc_lock) {
953 			td = TAILQ_PREV_FAST(besttd, head, thread, td_slpq);
954 			if (td == NULL)
955 				break;
956 			besttd = td;
957 		}
958 	} else {
959 		/*
960 		 * Find the highest priority thread on the queue.  If there
961 		 * is a tie, use the thread that first appears in the queue
962 		 * as it has been sleeping the longest since threads are
963 		 * always added to the tail of sleep queues.
964 		 */
965 		besttd = td = TAILQ_FIRST(head);
966 		while ((td = TAILQ_NEXT(td, td_slpq)) != NULL) {
967 			if (td->td_priority < besttd->td_priority)
968 				besttd = td;
969 		}
970 	}
971 	MPASS(besttd != NULL);
972 	sleepq_resume_thread(sq, besttd, pri,
973 	    (flags & SLEEPQ_DROP) ? 0 : SRQ_HOLD);
974 }
975 
976 static bool
977 match_any(struct thread *td __unused)
978 {
979 
980 	return (true);
981 }
982 
983 /*
984  * Resume all threads sleeping on a specified wait channel.
985  */
986 void
987 sleepq_broadcast(const void *wchan, int flags, int pri, int queue)
988 {
989 	struct sleepqueue *sq;
990 
991 	CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
992 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
993 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
994 	sq = sleepq_lookup(wchan);
995 	if (sq != NULL) {
996 		KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
997 		    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
998 
999 		sleepq_remove_matching(sq, queue, match_any, pri);
1000 	}
1001 }
1002 
1003 /*
1004  * Resume threads on the sleep queue that match the given predicate.
1005  */
1006 void
1007 sleepq_remove_matching(struct sleepqueue *sq, int queue,
1008     bool (*matches)(struct thread *), int pri)
1009 {
1010 	struct thread *td, *tdn;
1011 
1012 	/*
1013 	 * The last thread will be given ownership of sq and may
1014 	 * re-enqueue itself before sleepq_resume_thread() returns,
1015 	 * so we must cache the "next" queue item at the beginning
1016 	 * of the final iteration.
1017 	 */
1018 	TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
1019 		if (matches(td))
1020 			sleepq_resume_thread(sq, td, pri, SRQ_HOLD);
1021 	}
1022 }
1023 
1024 /*
1025  * Time sleeping threads out.  When the timeout expires, the thread is
1026  * removed from the sleep queue and made runnable if it is still asleep.
1027  */
1028 static void
1029 sleepq_timeout(void *arg)
1030 {
1031 	struct sleepqueue_chain *sc __unused;
1032 	struct sleepqueue *sq;
1033 	struct thread *td;
1034 	const void *wchan;
1035 
1036 	td = arg;
1037 	CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
1038 	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1039 
1040 	thread_lock(td);
1041 	if (td->td_sleeptimo == 0 ||
1042 	    td->td_sleeptimo > td->td_slpcallout.c_time) {
1043 		/*
1044 		 * The thread does not want a timeout (yet).
1045 		 */
1046 	} else if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
1047 		/*
1048 		 * See if the thread is asleep and get the wait
1049 		 * channel if it is.
1050 		 */
1051 		wchan = td->td_wchan;
1052 		sc = SC_LOOKUP(wchan);
1053 		THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
1054 		sq = sleepq_lookup(wchan);
1055 		MPASS(sq != NULL);
1056 		td->td_flags |= TDF_TIMEOUT;
1057 		sleepq_resume_thread(sq, td, 0, 0);
1058 		return;
1059 	} else if (TD_ON_SLEEPQ(td)) {
1060 		/*
1061 		 * If the thread is on the SLEEPQ but isn't sleeping
1062 		 * yet, it can either be on another CPU in between
1063 		 * sleepq_add() and one of the sleepq_*wait*()
1064 		 * routines or it can be in sleepq_catch_signals().
1065 		 */
1066 		td->td_flags |= TDF_TIMEOUT;
1067 	}
1068 	thread_unlock(td);
1069 }
1070 
1071 /*
1072  * Resumes a specific thread from the sleep queue associated with a specific
1073  * wait channel if it is on that queue.
1074  */
1075 void
1076 sleepq_remove(struct thread *td, const void *wchan)
1077 {
1078 	struct sleepqueue_chain *sc;
1079 	struct sleepqueue *sq;
1080 
1081 	/*
1082 	 * Look up the sleep queue for this wait channel, then re-check
1083 	 * that the thread is asleep on that channel, if it is not, then
1084 	 * bail.
1085 	 */
1086 	MPASS(wchan != NULL);
1087 	sc = SC_LOOKUP(wchan);
1088 	mtx_lock_spin(&sc->sc_lock);
1089 	/*
1090 	 * We can not lock the thread here as it may be sleeping on a
1091 	 * different sleepq.  However, holding the sleepq lock for this
1092 	 * wchan can guarantee that we do not miss a wakeup for this
1093 	 * channel.  The asserts below will catch any false positives.
1094 	 */
1095 	if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
1096 		mtx_unlock_spin(&sc->sc_lock);
1097 		return;
1098 	}
1099 
1100 	/* Thread is asleep on sleep queue sq, so wake it up. */
1101 	sq = sleepq_lookup(wchan);
1102 	MPASS(sq != NULL);
1103 	MPASS(td->td_wchan == wchan);
1104 	sleepq_resume_thread(sq, td, 0, 0);
1105 }
1106 
1107 /*
1108  * Abort a thread as if an interrupt had occurred.  Only abort
1109  * interruptible waits (unfortunately it isn't safe to abort others).
1110  *
1111  * Requires thread lock on entry, releases on return.
1112  */
1113 void
1114 sleepq_abort(struct thread *td, int intrval)
1115 {
1116 	struct sleepqueue *sq;
1117 	const void *wchan;
1118 
1119 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1120 	MPASS(TD_ON_SLEEPQ(td));
1121 	MPASS(td->td_flags & TDF_SINTR);
1122 	MPASS((intrval == 0 && (td->td_flags & TDF_SIGWAIT) != 0) ||
1123 	    intrval == EINTR || intrval == ERESTART);
1124 
1125 	/*
1126 	 * If the TDF_TIMEOUT flag is set, just leave. A
1127 	 * timeout is scheduled anyhow.
1128 	 */
1129 	if (td->td_flags & TDF_TIMEOUT) {
1130 		thread_unlock(td);
1131 		return;
1132 	}
1133 
1134 	CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
1135 	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1136 	td->td_intrval = intrval;
1137 
1138 	/*
1139 	 * If the thread has not slept yet it will find the signal in
1140 	 * sleepq_catch_signals() and call sleepq_resume_thread.  Otherwise
1141 	 * we have to do it here.
1142 	 */
1143 	if (!TD_IS_SLEEPING(td)) {
1144 		thread_unlock(td);
1145 		return;
1146 	}
1147 	wchan = td->td_wchan;
1148 	MPASS(wchan != NULL);
1149 	sq = sleepq_lookup(wchan);
1150 	MPASS(sq != NULL);
1151 
1152 	/* Thread is asleep on sleep queue sq, so wake it up. */
1153 	sleepq_resume_thread(sq, td, 0, 0);
1154 }
1155 
1156 void
1157 sleepq_chains_remove_matching(bool (*matches)(struct thread *))
1158 {
1159 	struct sleepqueue_chain *sc;
1160 	struct sleepqueue *sq, *sq1;
1161 	int i;
1162 
1163 	for (sc = &sleepq_chains[0]; sc < sleepq_chains + SC_TABLESIZE; ++sc) {
1164 		if (LIST_EMPTY(&sc->sc_queues)) {
1165 			continue;
1166 		}
1167 		mtx_lock_spin(&sc->sc_lock);
1168 		LIST_FOREACH_SAFE(sq, &sc->sc_queues, sq_hash, sq1) {
1169 			for (i = 0; i < NR_SLEEPQS; ++i)
1170 				sleepq_remove_matching(sq, i, matches, 0);
1171 		}
1172 		mtx_unlock_spin(&sc->sc_lock);
1173 	}
1174 }
1175 
1176 /*
1177  * Prints the stacks of all threads presently sleeping on wchan/queue to
1178  * the sbuf sb.  Sets count_stacks_printed to the number of stacks actually
1179  * printed.  Typically, this will equal the number of threads sleeping on the
1180  * queue, but may be less if sb overflowed before all stacks were printed.
1181  */
1182 #ifdef STACK
1183 int
1184 sleepq_sbuf_print_stacks(struct sbuf *sb, const void *wchan, int queue,
1185     int *count_stacks_printed)
1186 {
1187 	struct thread *td, *td_next;
1188 	struct sleepqueue *sq;
1189 	struct stack **st;
1190 	struct sbuf **td_infos;
1191 	int i, stack_idx, error, stacks_to_allocate;
1192 	bool finished;
1193 
1194 	error = 0;
1195 	finished = false;
1196 
1197 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
1198 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
1199 
1200 	stacks_to_allocate = 10;
1201 	for (i = 0; i < 3 && !finished ; i++) {
1202 		/* We cannot malloc while holding the queue's spinlock, so
1203 		 * we do our mallocs now, and hope it is enough.  If it
1204 		 * isn't, we will free these, drop the lock, malloc more,
1205 		 * and try again, up to a point.  After that point we will
1206 		 * give up and report ENOMEM. We also cannot write to sb
1207 		 * during this time since the client may have set the
1208 		 * SBUF_AUTOEXTEND flag on their sbuf, which could cause a
1209 		 * malloc as we print to it.  So we defer actually printing
1210 		 * to sb until after we drop the spinlock.
1211 		 */
1212 
1213 		/* Where we will store the stacks. */
1214 		st = malloc(sizeof(struct stack *) * stacks_to_allocate,
1215 		    M_TEMP, M_WAITOK);
1216 		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1217 		    stack_idx++)
1218 			st[stack_idx] = stack_create(M_WAITOK);
1219 
1220 		/* Where we will store the td name, tid, etc. */
1221 		td_infos = malloc(sizeof(struct sbuf *) * stacks_to_allocate,
1222 		    M_TEMP, M_WAITOK);
1223 		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1224 		    stack_idx++)
1225 			td_infos[stack_idx] = sbuf_new(NULL, NULL,
1226 			    MAXCOMLEN + sizeof(struct thread *) * 2 + 40,
1227 			    SBUF_FIXEDLEN);
1228 
1229 		sleepq_lock(wchan);
1230 		sq = sleepq_lookup(wchan);
1231 		if (sq == NULL) {
1232 			/* This sleepq does not exist; exit and return ENOENT. */
1233 			error = ENOENT;
1234 			finished = true;
1235 			sleepq_release(wchan);
1236 			goto loop_end;
1237 		}
1238 
1239 		stack_idx = 0;
1240 		/* Save thread info */
1241 		TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq,
1242 		    td_next) {
1243 			if (stack_idx >= stacks_to_allocate)
1244 				goto loop_end;
1245 
1246 			/* Note the td_lock is equal to the sleepq_lock here. */
1247 			(void)stack_save_td(st[stack_idx], td);
1248 
1249 			sbuf_printf(td_infos[stack_idx], "%d: %s %p",
1250 			    td->td_tid, td->td_name, td);
1251 
1252 			++stack_idx;
1253 		}
1254 
1255 		finished = true;
1256 		sleepq_release(wchan);
1257 
1258 		/* Print the stacks */
1259 		for (i = 0; i < stack_idx; i++) {
1260 			sbuf_finish(td_infos[i]);
1261 			sbuf_printf(sb, "--- thread %s: ---\n", sbuf_data(td_infos[i]));
1262 			stack_sbuf_print(sb, st[i]);
1263 			sbuf_putc(sb, '\n');
1264 
1265 			error = sbuf_error(sb);
1266 			if (error == 0)
1267 				*count_stacks_printed = stack_idx;
1268 		}
1269 
1270 loop_end:
1271 		if (!finished)
1272 			sleepq_release(wchan);
1273 		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1274 		    stack_idx++)
1275 			stack_destroy(st[stack_idx]);
1276 		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1277 		    stack_idx++)
1278 			sbuf_delete(td_infos[stack_idx]);
1279 		free(st, M_TEMP);
1280 		free(td_infos, M_TEMP);
1281 		stacks_to_allocate *= 10;
1282 	}
1283 
1284 	if (!finished && error == 0)
1285 		error = ENOMEM;
1286 
1287 	return (error);
1288 }
1289 #endif
1290 
1291 #ifdef SLEEPQUEUE_PROFILING
1292 #define	SLEEPQ_PROF_LOCATIONS	1024
1293 #define	SLEEPQ_SBUFSIZE		512
1294 struct sleepq_prof {
1295 	LIST_ENTRY(sleepq_prof) sp_link;
1296 	const char	*sp_wmesg;
1297 	long		sp_count;
1298 };
1299 
1300 LIST_HEAD(sqphead, sleepq_prof);
1301 
1302 struct sqphead sleepq_prof_free;
1303 struct sqphead sleepq_hash[SC_TABLESIZE];
1304 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1305 static struct mtx sleepq_prof_lock;
1306 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1307 
1308 static void
1309 sleepq_profile(const char *wmesg)
1310 {
1311 	struct sleepq_prof *sp;
1312 
1313 	mtx_lock_spin(&sleepq_prof_lock);
1314 	if (prof_enabled == 0)
1315 		goto unlock;
1316 	LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link)
1317 		if (sp->sp_wmesg == wmesg)
1318 			goto done;
1319 	sp = LIST_FIRST(&sleepq_prof_free);
1320 	if (sp == NULL)
1321 		goto unlock;
1322 	sp->sp_wmesg = wmesg;
1323 	LIST_REMOVE(sp, sp_link);
1324 	LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link);
1325 done:
1326 	sp->sp_count++;
1327 unlock:
1328 	mtx_unlock_spin(&sleepq_prof_lock);
1329 	return;
1330 }
1331 
1332 static void
1333 sleepq_prof_reset(void)
1334 {
1335 	struct sleepq_prof *sp;
1336 	int enabled;
1337 	int i;
1338 
1339 	mtx_lock_spin(&sleepq_prof_lock);
1340 	enabled = prof_enabled;
1341 	prof_enabled = 0;
1342 	for (i = 0; i < SC_TABLESIZE; i++)
1343 		LIST_INIT(&sleepq_hash[i]);
1344 	LIST_INIT(&sleepq_prof_free);
1345 	for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1346 		sp = &sleepq_profent[i];
1347 		sp->sp_wmesg = NULL;
1348 		sp->sp_count = 0;
1349 		LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1350 	}
1351 	prof_enabled = enabled;
1352 	mtx_unlock_spin(&sleepq_prof_lock);
1353 }
1354 
1355 static int
1356 enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1357 {
1358 	int error, v;
1359 
1360 	v = prof_enabled;
1361 	error = sysctl_handle_int(oidp, &v, v, req);
1362 	if (error)
1363 		return (error);
1364 	if (req->newptr == NULL)
1365 		return (error);
1366 	if (v == prof_enabled)
1367 		return (0);
1368 	if (v == 1)
1369 		sleepq_prof_reset();
1370 	mtx_lock_spin(&sleepq_prof_lock);
1371 	prof_enabled = !!v;
1372 	mtx_unlock_spin(&sleepq_prof_lock);
1373 
1374 	return (0);
1375 }
1376 
1377 static int
1378 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1379 {
1380 	int error, v;
1381 
1382 	v = 0;
1383 	error = sysctl_handle_int(oidp, &v, 0, req);
1384 	if (error)
1385 		return (error);
1386 	if (req->newptr == NULL)
1387 		return (error);
1388 	if (v == 0)
1389 		return (0);
1390 	sleepq_prof_reset();
1391 
1392 	return (0);
1393 }
1394 
1395 static int
1396 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1397 {
1398 	struct sleepq_prof *sp;
1399 	struct sbuf *sb;
1400 	int enabled;
1401 	int error;
1402 	int i;
1403 
1404 	error = sysctl_wire_old_buffer(req, 0);
1405 	if (error != 0)
1406 		return (error);
1407 	sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req);
1408 	sbuf_cat(sb, "\nwmesg\tcount\n");
1409 	enabled = prof_enabled;
1410 	mtx_lock_spin(&sleepq_prof_lock);
1411 	prof_enabled = 0;
1412 	mtx_unlock_spin(&sleepq_prof_lock);
1413 	for (i = 0; i < SC_TABLESIZE; i++) {
1414 		LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1415 			sbuf_printf(sb, "%s\t%ld\n",
1416 			    sp->sp_wmesg, sp->sp_count);
1417 		}
1418 	}
1419 	mtx_lock_spin(&sleepq_prof_lock);
1420 	prof_enabled = enabled;
1421 	mtx_unlock_spin(&sleepq_prof_lock);
1422 
1423 	error = sbuf_finish(sb);
1424 	sbuf_delete(sb);
1425 	return (error);
1426 }
1427 
1428 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats,
1429     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0,
1430     dump_sleepq_prof_stats, "A",
1431     "Sleepqueue profiling statistics");
1432 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset,
1433     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
1434     reset_sleepq_prof_stats, "I",
1435     "Reset sleepqueue profiling statistics");
1436 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable,
1437     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
1438     enable_sleepq_prof, "I",
1439     "Enable sleepqueue profiling");
1440 #endif
1441 
1442 #ifdef DDB
1443 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1444 {
1445 	struct sleepqueue_chain *sc;
1446 	struct sleepqueue *sq;
1447 #ifdef INVARIANTS
1448 	struct lock_object *lock;
1449 #endif
1450 	struct thread *td;
1451 	void *wchan;
1452 	int i;
1453 
1454 	if (!have_addr)
1455 		return;
1456 
1457 	/*
1458 	 * First, see if there is an active sleep queue for the wait channel
1459 	 * indicated by the address.
1460 	 */
1461 	wchan = (void *)addr;
1462 	sc = SC_LOOKUP(wchan);
1463 	LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1464 		if (sq->sq_wchan == wchan)
1465 			goto found;
1466 
1467 	/*
1468 	 * Second, see if there is an active sleep queue at the address
1469 	 * indicated.
1470 	 */
1471 	for (i = 0; i < SC_TABLESIZE; i++)
1472 		LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
1473 			if (sq == (struct sleepqueue *)addr)
1474 				goto found;
1475 		}
1476 
1477 	db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
1478 	return;
1479 found:
1480 	db_printf("Wait channel: %p\n", sq->sq_wchan);
1481 	db_printf("Queue type: %d\n", sq->sq_type);
1482 #ifdef INVARIANTS
1483 	if (sq->sq_lock) {
1484 		lock = sq->sq_lock;
1485 		db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1486 		    LOCK_CLASS(lock)->lc_name, lock->lo_name);
1487 	}
1488 #endif
1489 	db_printf("Blocked threads:\n");
1490 	for (i = 0; i < NR_SLEEPQS; i++) {
1491 		db_printf("\nQueue[%d]:\n", i);
1492 		if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1493 			db_printf("\tempty\n");
1494 		else
1495 			TAILQ_FOREACH(td, &sq->sq_blocked[i],
1496 				      td_slpq) {
1497 				db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
1498 					  td->td_tid, td->td_proc->p_pid,
1499 					  td->td_name);
1500 			}
1501 		db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);
1502 	}
1503 }
1504 
1505 /* Alias 'show sleepqueue' to 'show sleepq'. */
1506 DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue);
1507 #endif
1508