xref: /freebsd/sys/kern/subr_sleepqueue.c (revision 8b238f4126d32df3e70056bc32536b7248ebffa0)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 /*
29  * Implementation of sleep queues used to hold queue of threads blocked on
30  * a wait channel.  Sleep queues are different from turnstiles in that wait
31  * channels are not owned by anyone, so there is no priority propagation.
32  * Sleep queues can also provide a timeout and can also be interrupted by
33  * signals.  That said, there are several similarities between the turnstile
34  * and sleep queue implementations.  (Note: turnstiles were implemented
35  * first.)  For example, both use a hash table of the same size where each
36  * bucket is referred to as a "chain" that contains both a spin lock and
37  * a linked list of queues.  An individual queue is located by using a hash
38  * to pick a chain, locking the chain, and then walking the chain searching
39  * for the queue.  This means that a wait channel object does not need to
40  * embed its queue head just as locks do not embed their turnstile queue
41  * head.  Threads also carry around a sleep queue that they lend to the
42  * wait channel when blocking.  Just as in turnstiles, the queue includes
43  * a free list of the sleep queues of other threads blocked on the same
44  * wait channel in the case of multiple waiters.
45  *
46  * Some additional functionality provided by sleep queues include the
47  * ability to set a timeout.  The timeout is managed using a per-thread
48  * callout that resumes a thread if it is asleep.  A thread may also
49  * catch signals while it is asleep (aka an interruptible sleep).  The
50  * signal code uses sleepq_abort() to interrupt a sleeping thread.  Finally,
51  * sleep queues also provide some extra assertions.  One is not allowed to
52  * mix the sleep/wakeup and cv APIs for a given wait channel.  Also, one
53  * must consistently use the same lock to synchronize with a wait channel,
54  * though this check is currently only a warning for sleep/wakeup due to
55  * pre-existing abuse of that API.  The same lock must also be held when
56  * awakening threads, though that is currently only enforced for condition
57  * variables.
58  */
59 
60 #include <sys/cdefs.h>
61 __FBSDID("$FreeBSD$");
62 
63 #include "opt_sleepqueue_profiling.h"
64 #include "opt_ddb.h"
65 #include "opt_sched.h"
66 #include "opt_stack.h"
67 
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/lock.h>
71 #include <sys/kernel.h>
72 #include <sys/ktr.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/sbuf.h>
76 #include <sys/sched.h>
77 #include <sys/sdt.h>
78 #include <sys/signalvar.h>
79 #include <sys/sleepqueue.h>
80 #include <sys/stack.h>
81 #include <sys/sysctl.h>
82 #include <sys/time.h>
83 #ifdef EPOCH_TRACE
84 #include <sys/epoch.h>
85 #endif
86 
87 #include <machine/atomic.h>
88 
89 #include <vm/uma.h>
90 
91 #ifdef DDB
92 #include <ddb/ddb.h>
93 #endif
94 
95 
96 /*
97  * Constants for the hash table of sleep queue chains.
98  * SC_TABLESIZE must be a power of two for SC_MASK to work properly.
99  */
100 #ifndef SC_TABLESIZE
101 #define	SC_TABLESIZE	256
102 #endif
103 CTASSERT(powerof2(SC_TABLESIZE));
104 #define	SC_MASK		(SC_TABLESIZE - 1)
105 #define	SC_SHIFT	8
106 #define	SC_HASH(wc)	((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \
107 			    SC_MASK)
108 #define	SC_LOOKUP(wc)	&sleepq_chains[SC_HASH(wc)]
109 #define NR_SLEEPQS      2
110 /*
111  * There are two different lists of sleep queues.  Both lists are connected
112  * via the sq_hash entries.  The first list is the sleep queue chain list
113  * that a sleep queue is on when it is attached to a wait channel.  The
114  * second list is the free list hung off of a sleep queue that is attached
115  * to a wait channel.
116  *
117  * Each sleep queue also contains the wait channel it is attached to, the
118  * list of threads blocked on that wait channel, flags specific to the
119  * wait channel, and the lock used to synchronize with a wait channel.
120  * The flags are used to catch mismatches between the various consumers
121  * of the sleep queue API (e.g. sleep/wakeup and condition variables).
122  * The lock pointer is only used when invariants are enabled for various
123  * debugging checks.
124  *
125  * Locking key:
126  *  c - sleep queue chain lock
127  */
128 struct sleepqueue {
129 	struct threadqueue sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */
130 	u_int sq_blockedcnt[NR_SLEEPQS];	/* (c) N. of blocked threads. */
131 	LIST_ENTRY(sleepqueue) sq_hash;		/* (c) Chain and free list. */
132 	LIST_HEAD(, sleepqueue) sq_free;	/* (c) Free queues. */
133 	void	*sq_wchan;			/* (c) Wait channel. */
134 	int	sq_type;			/* (c) Queue type. */
135 #ifdef INVARIANTS
136 	struct lock_object *sq_lock;		/* (c) Associated lock. */
137 #endif
138 };
139 
140 struct sleepqueue_chain {
141 	LIST_HEAD(, sleepqueue) sc_queues;	/* List of sleep queues. */
142 	struct mtx sc_lock;			/* Spin lock for this chain. */
143 #ifdef SLEEPQUEUE_PROFILING
144 	u_int	sc_depth;			/* Length of sc_queues. */
145 	u_int	sc_max_depth;			/* Max length of sc_queues. */
146 #endif
147 } __aligned(CACHE_LINE_SIZE);
148 
149 #ifdef SLEEPQUEUE_PROFILING
150 u_int sleepq_max_depth;
151 static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
152 static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
153     "sleepq chain stats");
154 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
155     0, "maxmimum depth achieved of a single chain");
156 
157 static void	sleepq_profile(const char *wmesg);
158 static int	prof_enabled;
159 #endif
160 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
161 static uma_zone_t sleepq_zone;
162 
163 /*
164  * Prototypes for non-exported routines.
165  */
166 static int	sleepq_catch_signals(void *wchan, int pri);
167 static int	sleepq_check_signals(void);
168 static int	sleepq_check_timeout(void);
169 #ifdef INVARIANTS
170 static void	sleepq_dtor(void *mem, int size, void *arg);
171 #endif
172 static int	sleepq_init(void *mem, int size, int flags);
173 static int	sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
174 		    int pri);
175 static void	sleepq_switch(void *wchan, int pri);
176 static void	sleepq_timeout(void *arg);
177 
178 SDT_PROBE_DECLARE(sched, , , sleep);
179 SDT_PROBE_DECLARE(sched, , , wakeup);
180 
181 /*
182  * Initialize SLEEPQUEUE_PROFILING specific sysctl nodes.
183  * Note that it must happen after sleepinit() has been fully executed, so
184  * it must happen after SI_SUB_KMEM SYSINIT() subsystem setup.
185  */
186 #ifdef SLEEPQUEUE_PROFILING
187 static void
188 init_sleepqueue_profiling(void)
189 {
190 	char chain_name[10];
191 	struct sysctl_oid *chain_oid;
192 	u_int i;
193 
194 	for (i = 0; i < SC_TABLESIZE; i++) {
195 		snprintf(chain_name, sizeof(chain_name), "%u", i);
196 		chain_oid = SYSCTL_ADD_NODE(NULL,
197 		    SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
198 		    chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
199 		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
200 		    "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
201 		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
202 		    "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
203 		    NULL);
204 	}
205 }
206 
207 SYSINIT(sleepqueue_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
208     init_sleepqueue_profiling, NULL);
209 #endif
210 
211 /*
212  * Early initialization of sleep queues that is called from the sleepinit()
213  * SYSINIT.
214  */
215 void
216 init_sleepqueues(void)
217 {
218 	int i;
219 
220 	for (i = 0; i < SC_TABLESIZE; i++) {
221 		LIST_INIT(&sleepq_chains[i].sc_queues);
222 		mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
223 		    MTX_SPIN | MTX_RECURSE);
224 	}
225 	sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
226 #ifdef INVARIANTS
227 	    NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
228 #else
229 	    NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
230 #endif
231 
232 	thread0.td_sleepqueue = sleepq_alloc();
233 }
234 
235 /*
236  * Get a sleep queue for a new thread.
237  */
238 struct sleepqueue *
239 sleepq_alloc(void)
240 {
241 
242 	return (uma_zalloc(sleepq_zone, M_WAITOK));
243 }
244 
245 /*
246  * Free a sleep queue when a thread is destroyed.
247  */
248 void
249 sleepq_free(struct sleepqueue *sq)
250 {
251 
252 	uma_zfree(sleepq_zone, sq);
253 }
254 
255 /*
256  * Lock the sleep queue chain associated with the specified wait channel.
257  */
258 void
259 sleepq_lock(void *wchan)
260 {
261 	struct sleepqueue_chain *sc;
262 
263 	sc = SC_LOOKUP(wchan);
264 	mtx_lock_spin(&sc->sc_lock);
265 }
266 
267 /*
268  * Look up the sleep queue associated with a given wait channel in the hash
269  * table locking the associated sleep queue chain.  If no queue is found in
270  * the table, NULL is returned.
271  */
272 struct sleepqueue *
273 sleepq_lookup(void *wchan)
274 {
275 	struct sleepqueue_chain *sc;
276 	struct sleepqueue *sq;
277 
278 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
279 	sc = SC_LOOKUP(wchan);
280 	mtx_assert(&sc->sc_lock, MA_OWNED);
281 	LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
282 		if (sq->sq_wchan == wchan)
283 			return (sq);
284 	return (NULL);
285 }
286 
287 /*
288  * Unlock the sleep queue chain associated with a given wait channel.
289  */
290 void
291 sleepq_release(void *wchan)
292 {
293 	struct sleepqueue_chain *sc;
294 
295 	sc = SC_LOOKUP(wchan);
296 	mtx_unlock_spin(&sc->sc_lock);
297 }
298 
299 /*
300  * Places the current thread on the sleep queue for the specified wait
301  * channel.  If INVARIANTS is enabled, then it associates the passed in
302  * lock with the sleepq to make sure it is held when that sleep queue is
303  * woken up.
304  */
305 void
306 sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
307     int queue)
308 {
309 	struct sleepqueue_chain *sc;
310 	struct sleepqueue *sq;
311 	struct thread *td;
312 
313 	td = curthread;
314 	sc = SC_LOOKUP(wchan);
315 	mtx_assert(&sc->sc_lock, MA_OWNED);
316 	MPASS(td->td_sleepqueue != NULL);
317 	MPASS(wchan != NULL);
318 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
319 
320 	/* If this thread is not allowed to sleep, die a horrible death. */
321 	if (__predict_false(!THREAD_CAN_SLEEP())) {
322 #ifdef EPOCH_TRACE
323 		epoch_trace_list(curthread);
324 #endif
325 		KASSERT(1,
326 		    ("%s: td %p to sleep on wchan %p with sleeping prohibited",
327 		    __func__, td, wchan));
328 	}
329 
330 	/* Look up the sleep queue associated with the wait channel 'wchan'. */
331 	sq = sleepq_lookup(wchan);
332 
333 	/*
334 	 * If the wait channel does not already have a sleep queue, use
335 	 * this thread's sleep queue.  Otherwise, insert the current thread
336 	 * into the sleep queue already in use by this wait channel.
337 	 */
338 	if (sq == NULL) {
339 #ifdef INVARIANTS
340 		int i;
341 
342 		sq = td->td_sleepqueue;
343 		for (i = 0; i < NR_SLEEPQS; i++) {
344 			KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
345 			    ("thread's sleep queue %d is not empty", i));
346 			KASSERT(sq->sq_blockedcnt[i] == 0,
347 			    ("thread's sleep queue %d count mismatches", i));
348 		}
349 		KASSERT(LIST_EMPTY(&sq->sq_free),
350 		    ("thread's sleep queue has a non-empty free list"));
351 		KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
352 		sq->sq_lock = lock;
353 #endif
354 #ifdef SLEEPQUEUE_PROFILING
355 		sc->sc_depth++;
356 		if (sc->sc_depth > sc->sc_max_depth) {
357 			sc->sc_max_depth = sc->sc_depth;
358 			if (sc->sc_max_depth > sleepq_max_depth)
359 				sleepq_max_depth = sc->sc_max_depth;
360 		}
361 #endif
362 		sq = td->td_sleepqueue;
363 		LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
364 		sq->sq_wchan = wchan;
365 		sq->sq_type = flags & SLEEPQ_TYPE;
366 	} else {
367 		MPASS(wchan == sq->sq_wchan);
368 		MPASS(lock == sq->sq_lock);
369 		MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
370 		LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
371 	}
372 	thread_lock(td);
373 	TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
374 	sq->sq_blockedcnt[queue]++;
375 	td->td_sleepqueue = NULL;
376 	td->td_sqqueue = queue;
377 	td->td_wchan = wchan;
378 	td->td_wmesg = wmesg;
379 	if (flags & SLEEPQ_INTERRUPTIBLE) {
380 		td->td_flags |= TDF_SINTR;
381 		td->td_flags &= ~TDF_SLEEPABORT;
382 	}
383 	thread_unlock(td);
384 }
385 
386 /*
387  * Sets a timeout that will remove the current thread from the specified
388  * sleep queue after timo ticks if the thread has not already been awakened.
389  */
390 void
391 sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr,
392     int flags)
393 {
394 	struct sleepqueue_chain *sc __unused;
395 	struct thread *td;
396 	sbintime_t pr1;
397 
398 	td = curthread;
399 	sc = SC_LOOKUP(wchan);
400 	mtx_assert(&sc->sc_lock, MA_OWNED);
401 	MPASS(TD_ON_SLEEPQ(td));
402 	MPASS(td->td_sleepqueue == NULL);
403 	MPASS(wchan != NULL);
404 	if (cold && td == &thread0)
405 		panic("timed sleep before timers are working");
406 	KASSERT(td->td_sleeptimo == 0, ("td %d %p td_sleeptimo %jx",
407 	    td->td_tid, td, (uintmax_t)td->td_sleeptimo));
408 	thread_lock(td);
409 	callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1);
410 	thread_unlock(td);
411 	callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1,
412 	    sleepq_timeout, td, PCPU_GET(cpuid), flags | C_PRECALC |
413 	    C_DIRECT_EXEC);
414 }
415 
416 /*
417  * Return the number of actual sleepers for the specified queue.
418  */
419 u_int
420 sleepq_sleepcnt(void *wchan, int queue)
421 {
422 	struct sleepqueue *sq;
423 
424 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
425 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
426 	sq = sleepq_lookup(wchan);
427 	if (sq == NULL)
428 		return (0);
429 	return (sq->sq_blockedcnt[queue]);
430 }
431 
432 /*
433  * Marks the pending sleep of the current thread as interruptible and
434  * makes an initial check for pending signals before putting a thread
435  * to sleep. Enters and exits with the thread lock held.  Thread lock
436  * may have transitioned from the sleepq lock to a run lock.
437  */
438 static int
439 sleepq_catch_signals(void *wchan, int pri)
440 {
441 	struct sleepqueue_chain *sc;
442 	struct sleepqueue *sq;
443 	struct thread *td;
444 	struct proc *p;
445 	struct sigacts *ps;
446 	int sig, ret;
447 
448 	ret = 0;
449 	td = curthread;
450 	p = curproc;
451 	sc = SC_LOOKUP(wchan);
452 	mtx_assert(&sc->sc_lock, MA_OWNED);
453 	MPASS(wchan != NULL);
454 	if ((td->td_pflags & TDP_WAKEUP) != 0) {
455 		td->td_pflags &= ~TDP_WAKEUP;
456 		ret = EINTR;
457 		thread_lock(td);
458 		goto out;
459 	}
460 
461 	/*
462 	 * See if there are any pending signals or suspension requests for this
463 	 * thread.  If not, we can switch immediately.
464 	 */
465 	thread_lock(td);
466 	if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) != 0) {
467 		thread_unlock(td);
468 		mtx_unlock_spin(&sc->sc_lock);
469 		CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
470 			(void *)td, (long)p->p_pid, td->td_name);
471 		PROC_LOCK(p);
472 		/*
473 		 * Check for suspension first. Checking for signals and then
474 		 * suspending could result in a missed signal, since a signal
475 		 * can be delivered while this thread is suspended.
476 		 */
477 		if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
478 			ret = thread_suspend_check(1);
479 			MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
480 			if (ret != 0) {
481 				PROC_UNLOCK(p);
482 				mtx_lock_spin(&sc->sc_lock);
483 				thread_lock(td);
484 				goto out;
485 			}
486 		}
487 		if ((td->td_flags & TDF_NEEDSIGCHK) != 0) {
488 			ps = p->p_sigacts;
489 			mtx_lock(&ps->ps_mtx);
490 			sig = cursig(td);
491 			if (sig == -1) {
492 				mtx_unlock(&ps->ps_mtx);
493 				KASSERT((td->td_flags & TDF_SBDRY) != 0,
494 				    ("lost TDF_SBDRY"));
495 				KASSERT(TD_SBDRY_INTR(td),
496 				    ("lost TDF_SERESTART of TDF_SEINTR"));
497 				KASSERT((td->td_flags &
498 				    (TDF_SEINTR | TDF_SERESTART)) !=
499 				    (TDF_SEINTR | TDF_SERESTART),
500 				    ("both TDF_SEINTR and TDF_SERESTART"));
501 				ret = TD_SBDRY_ERRNO(td);
502 			} else if (sig != 0) {
503 				ret = SIGISMEMBER(ps->ps_sigintr, sig) ?
504 				    EINTR : ERESTART;
505 				mtx_unlock(&ps->ps_mtx);
506 			} else {
507 				mtx_unlock(&ps->ps_mtx);
508 			}
509 
510 			/*
511 			 * Do not go into sleep if this thread was the
512 			 * ptrace(2) attach leader.  cursig() consumed
513 			 * SIGSTOP from PT_ATTACH, but we usually act
514 			 * on the signal by interrupting sleep, and
515 			 * should do that here as well.
516 			 */
517 			if ((td->td_dbgflags & TDB_FSTP) != 0) {
518 				if (ret == 0)
519 					ret = EINTR;
520 				td->td_dbgflags &= ~TDB_FSTP;
521 			}
522 		}
523 		/*
524 		 * Lock the per-process spinlock prior to dropping the PROC_LOCK
525 		 * to avoid a signal delivery race.  PROC_LOCK, PROC_SLOCK, and
526 		 * thread_lock() are currently held in tdsendsignal().
527 		 */
528 		PROC_SLOCK(p);
529 		mtx_lock_spin(&sc->sc_lock);
530 		PROC_UNLOCK(p);
531 		thread_lock(td);
532 		PROC_SUNLOCK(p);
533 	}
534 	if (ret == 0) {
535 		sleepq_switch(wchan, pri);
536 		return (0);
537 	}
538 out:
539 	/*
540 	 * There were pending signals and this thread is still
541 	 * on the sleep queue, remove it from the sleep queue.
542 	 */
543 	if (TD_ON_SLEEPQ(td)) {
544 		sq = sleepq_lookup(wchan);
545 		if (sleepq_resume_thread(sq, td, 0)) {
546 #ifdef INVARIANTS
547 			/*
548 			 * This thread hasn't gone to sleep yet, so it
549 			 * should not be swapped out.
550 			 */
551 			panic("not waking up swapper");
552 #endif
553 		}
554 	}
555 	mtx_unlock_spin(&sc->sc_lock);
556 	MPASS(td->td_lock != &sc->sc_lock);
557 	return (ret);
558 }
559 
560 /*
561  * Switches to another thread if we are still asleep on a sleep queue.
562  * Returns with thread lock.
563  */
564 static void
565 sleepq_switch(void *wchan, int pri)
566 {
567 	struct sleepqueue_chain *sc;
568 	struct sleepqueue *sq;
569 	struct thread *td;
570 	bool rtc_changed;
571 
572 	td = curthread;
573 	sc = SC_LOOKUP(wchan);
574 	mtx_assert(&sc->sc_lock, MA_OWNED);
575 	THREAD_LOCK_ASSERT(td, MA_OWNED);
576 
577 	/*
578 	 * If we have a sleep queue, then we've already been woken up, so
579 	 * just return.
580 	 */
581 	if (td->td_sleepqueue != NULL) {
582 		mtx_unlock_spin(&sc->sc_lock);
583 		return;
584 	}
585 
586 	/*
587 	 * If TDF_TIMEOUT is set, then our sleep has been timed out
588 	 * already but we are still on the sleep queue, so dequeue the
589 	 * thread and return.
590 	 *
591 	 * Do the same if the real-time clock has been adjusted since this
592 	 * thread calculated its timeout based on that clock.  This handles
593 	 * the following race:
594 	 * - The Ts thread needs to sleep until an absolute real-clock time.
595 	 *   It copies the global rtc_generation into curthread->td_rtcgen,
596 	 *   reads the RTC, and calculates a sleep duration based on that time.
597 	 *   See umtxq_sleep() for an example.
598 	 * - The Tc thread adjusts the RTC, bumps rtc_generation, and wakes
599 	 *   threads that are sleeping until an absolute real-clock time.
600 	 *   See tc_setclock() and the POSIX specification of clock_settime().
601 	 * - Ts reaches the code below.  It holds the sleepqueue chain lock,
602 	 *   so Tc has finished waking, so this thread must test td_rtcgen.
603 	 * (The declaration of td_rtcgen refers to this comment.)
604 	 */
605 	rtc_changed = td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation;
606 	if ((td->td_flags & TDF_TIMEOUT) || rtc_changed) {
607 		if (rtc_changed) {
608 			td->td_rtcgen = 0;
609 		}
610 		MPASS(TD_ON_SLEEPQ(td));
611 		sq = sleepq_lookup(wchan);
612 		if (sleepq_resume_thread(sq, td, 0)) {
613 #ifdef INVARIANTS
614 			/*
615 			 * This thread hasn't gone to sleep yet, so it
616 			 * should not be swapped out.
617 			 */
618 			panic("not waking up swapper");
619 #endif
620 		}
621 		mtx_unlock_spin(&sc->sc_lock);
622 		return;
623 	}
624 #ifdef SLEEPQUEUE_PROFILING
625 	if (prof_enabled)
626 		sleepq_profile(td->td_wmesg);
627 #endif
628 	MPASS(td->td_sleepqueue == NULL);
629 	sched_sleep(td, pri);
630 	thread_lock_set(td, &sc->sc_lock);
631 	SDT_PROBE0(sched, , , sleep);
632 	TD_SET_SLEEPING(td);
633 	mi_switch(SW_VOL | SWT_SLEEPQ, NULL);
634 	KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
635 	CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
636 	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
637 }
638 
639 /*
640  * Check to see if we timed out.
641  */
642 static int
643 sleepq_check_timeout(void)
644 {
645 	struct thread *td;
646 	int res;
647 
648 	td = curthread;
649 	THREAD_LOCK_ASSERT(td, MA_OWNED);
650 
651 	/*
652 	 * If TDF_TIMEOUT is set, we timed out.  But recheck
653 	 * td_sleeptimo anyway.
654 	 */
655 	res = 0;
656 	if (td->td_sleeptimo != 0) {
657 		if (td->td_sleeptimo <= sbinuptime())
658 			res = EWOULDBLOCK;
659 		td->td_sleeptimo = 0;
660 	}
661 	if (td->td_flags & TDF_TIMEOUT)
662 		td->td_flags &= ~TDF_TIMEOUT;
663 	else
664 		/*
665 		 * We ignore the situation where timeout subsystem was
666 		 * unable to stop our callout.  The struct thread is
667 		 * type-stable, the callout will use the correct
668 		 * memory when running.  The checks of the
669 		 * td_sleeptimo value in this function and in
670 		 * sleepq_timeout() ensure that the thread does not
671 		 * get spurious wakeups, even if the callout was reset
672 		 * or thread reused.
673 		 */
674 		callout_stop(&td->td_slpcallout);
675 	return (res);
676 }
677 
678 /*
679  * Check to see if we were awoken by a signal.
680  */
681 static int
682 sleepq_check_signals(void)
683 {
684 	struct thread *td;
685 
686 	td = curthread;
687 	THREAD_LOCK_ASSERT(td, MA_OWNED);
688 
689 	/* We are no longer in an interruptible sleep. */
690 	if (td->td_flags & TDF_SINTR)
691 		td->td_flags &= ~TDF_SINTR;
692 
693 	if (td->td_flags & TDF_SLEEPABORT) {
694 		td->td_flags &= ~TDF_SLEEPABORT;
695 		return (td->td_intrval);
696 	}
697 
698 	return (0);
699 }
700 
701 /*
702  * Block the current thread until it is awakened from its sleep queue.
703  */
704 void
705 sleepq_wait(void *wchan, int pri)
706 {
707 	struct thread *td;
708 
709 	td = curthread;
710 	MPASS(!(td->td_flags & TDF_SINTR));
711 	thread_lock(td);
712 	sleepq_switch(wchan, pri);
713 	thread_unlock(td);
714 }
715 
716 /*
717  * Block the current thread until it is awakened from its sleep queue
718  * or it is interrupted by a signal.
719  */
720 int
721 sleepq_wait_sig(void *wchan, int pri)
722 {
723 	int rcatch;
724 	int rval;
725 
726 	rcatch = sleepq_catch_signals(wchan, pri);
727 	rval = sleepq_check_signals();
728 	thread_unlock(curthread);
729 	if (rcatch)
730 		return (rcatch);
731 	return (rval);
732 }
733 
734 /*
735  * Block the current thread until it is awakened from its sleep queue
736  * or it times out while waiting.
737  */
738 int
739 sleepq_timedwait(void *wchan, int pri)
740 {
741 	struct thread *td;
742 	int rval;
743 
744 	td = curthread;
745 	MPASS(!(td->td_flags & TDF_SINTR));
746 	thread_lock(td);
747 	sleepq_switch(wchan, pri);
748 	rval = sleepq_check_timeout();
749 	thread_unlock(td);
750 
751 	return (rval);
752 }
753 
754 /*
755  * Block the current thread until it is awakened from its sleep queue,
756  * it is interrupted by a signal, or it times out waiting to be awakened.
757  */
758 int
759 sleepq_timedwait_sig(void *wchan, int pri)
760 {
761 	int rcatch, rvalt, rvals;
762 
763 	rcatch = sleepq_catch_signals(wchan, pri);
764 	rvalt = sleepq_check_timeout();
765 	rvals = sleepq_check_signals();
766 	thread_unlock(curthread);
767 	if (rcatch)
768 		return (rcatch);
769 	if (rvals)
770 		return (rvals);
771 	return (rvalt);
772 }
773 
774 /*
775  * Returns the type of sleepqueue given a waitchannel.
776  */
777 int
778 sleepq_type(void *wchan)
779 {
780 	struct sleepqueue *sq;
781 	int type;
782 
783 	MPASS(wchan != NULL);
784 
785 	sleepq_lock(wchan);
786 	sq = sleepq_lookup(wchan);
787 	if (sq == NULL) {
788 		sleepq_release(wchan);
789 		return (-1);
790 	}
791 	type = sq->sq_type;
792 	sleepq_release(wchan);
793 	return (type);
794 }
795 
796 /*
797  * Removes a thread from a sleep queue and makes it
798  * runnable.
799  */
800 static int
801 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
802 {
803 	struct sleepqueue_chain *sc __unused;
804 
805 	MPASS(td != NULL);
806 	MPASS(sq->sq_wchan != NULL);
807 	MPASS(td->td_wchan == sq->sq_wchan);
808 	MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
809 	THREAD_LOCK_ASSERT(td, MA_OWNED);
810 	sc = SC_LOOKUP(sq->sq_wchan);
811 	mtx_assert(&sc->sc_lock, MA_OWNED);
812 
813 	SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
814 
815 	/* Remove the thread from the queue. */
816 	sq->sq_blockedcnt[td->td_sqqueue]--;
817 	TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
818 
819 	/*
820 	 * Get a sleep queue for this thread.  If this is the last waiter,
821 	 * use the queue itself and take it out of the chain, otherwise,
822 	 * remove a queue from the free list.
823 	 */
824 	if (LIST_EMPTY(&sq->sq_free)) {
825 		td->td_sleepqueue = sq;
826 #ifdef INVARIANTS
827 		sq->sq_wchan = NULL;
828 #endif
829 #ifdef SLEEPQUEUE_PROFILING
830 		sc->sc_depth--;
831 #endif
832 	} else
833 		td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
834 	LIST_REMOVE(td->td_sleepqueue, sq_hash);
835 
836 	td->td_wmesg = NULL;
837 	td->td_wchan = NULL;
838 	td->td_flags &= ~TDF_SINTR;
839 
840 	CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
841 	    (void *)td, (long)td->td_proc->p_pid, td->td_name);
842 
843 	/* Adjust priority if requested. */
844 	MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
845 	if (pri != 0 && td->td_priority > pri &&
846 	    PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
847 		sched_prio(td, pri);
848 
849 	/*
850 	 * Note that thread td might not be sleeping if it is running
851 	 * sleepq_catch_signals() on another CPU or is blocked on its
852 	 * proc lock to check signals.  There's no need to mark the
853 	 * thread runnable in that case.
854 	 */
855 	if (TD_IS_SLEEPING(td)) {
856 		TD_CLR_SLEEPING(td);
857 		return (setrunnable(td));
858 	}
859 	return (0);
860 }
861 
862 #ifdef INVARIANTS
863 /*
864  * UMA zone item deallocator.
865  */
866 static void
867 sleepq_dtor(void *mem, int size, void *arg)
868 {
869 	struct sleepqueue *sq;
870 	int i;
871 
872 	sq = mem;
873 	for (i = 0; i < NR_SLEEPQS; i++) {
874 		MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
875 		MPASS(sq->sq_blockedcnt[i] == 0);
876 	}
877 }
878 #endif
879 
880 /*
881  * UMA zone item initializer.
882  */
883 static int
884 sleepq_init(void *mem, int size, int flags)
885 {
886 	struct sleepqueue *sq;
887 	int i;
888 
889 	bzero(mem, size);
890 	sq = mem;
891 	for (i = 0; i < NR_SLEEPQS; i++) {
892 		TAILQ_INIT(&sq->sq_blocked[i]);
893 		sq->sq_blockedcnt[i] = 0;
894 	}
895 	LIST_INIT(&sq->sq_free);
896 	return (0);
897 }
898 
899 /*
900  * Find thread sleeping on a wait channel and resume it.
901  */
902 int
903 sleepq_signal(void *wchan, int flags, int pri, int queue)
904 {
905 	struct sleepqueue_chain *sc;
906 	struct sleepqueue *sq;
907 	struct threadqueue *head;
908 	struct thread *td, *besttd;
909 	int wakeup_swapper;
910 
911 	CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
912 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
913 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
914 	sq = sleepq_lookup(wchan);
915 	if (sq == NULL)
916 		return (0);
917 	KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
918 	    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
919 
920 	head = &sq->sq_blocked[queue];
921 	if (flags & SLEEPQ_UNFAIR) {
922 		/*
923 		 * Find the most recently sleeping thread, but try to
924 		 * skip threads still in process of context switch to
925 		 * avoid spinning on the thread lock.
926 		 */
927 		sc = SC_LOOKUP(wchan);
928 		besttd = TAILQ_LAST_FAST(head, thread, td_slpq);
929 		while (besttd->td_lock != &sc->sc_lock) {
930 			td = TAILQ_PREV_FAST(besttd, head, thread, td_slpq);
931 			if (td == NULL)
932 				break;
933 			besttd = td;
934 		}
935 	} else {
936 		/*
937 		 * Find the highest priority thread on the queue.  If there
938 		 * is a tie, use the thread that first appears in the queue
939 		 * as it has been sleeping the longest since threads are
940 		 * always added to the tail of sleep queues.
941 		 */
942 		besttd = td = TAILQ_FIRST(head);
943 		while ((td = TAILQ_NEXT(td, td_slpq)) != NULL) {
944 			if (td->td_priority < besttd->td_priority)
945 				besttd = td;
946 		}
947 	}
948 	MPASS(besttd != NULL);
949 	thread_lock(besttd);
950 	wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
951 	thread_unlock(besttd);
952 	return (wakeup_swapper);
953 }
954 
955 static bool
956 match_any(struct thread *td __unused)
957 {
958 
959 	return (true);
960 }
961 
962 /*
963  * Resume all threads sleeping on a specified wait channel.
964  */
965 int
966 sleepq_broadcast(void *wchan, int flags, int pri, int queue)
967 {
968 	struct sleepqueue *sq;
969 
970 	CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
971 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
972 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
973 	sq = sleepq_lookup(wchan);
974 	if (sq == NULL)
975 		return (0);
976 	KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
977 	    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
978 
979 	return (sleepq_remove_matching(sq, queue, match_any, pri));
980 }
981 
982 /*
983  * Resume threads on the sleep queue that match the given predicate.
984  */
985 int
986 sleepq_remove_matching(struct sleepqueue *sq, int queue,
987     bool (*matches)(struct thread *), int pri)
988 {
989 	struct thread *td, *tdn;
990 	int wakeup_swapper;
991 
992 	/*
993 	 * The last thread will be given ownership of sq and may
994 	 * re-enqueue itself before sleepq_resume_thread() returns,
995 	 * so we must cache the "next" queue item at the beginning
996 	 * of the final iteration.
997 	 */
998 	wakeup_swapper = 0;
999 	TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
1000 		thread_lock(td);
1001 		if (matches(td))
1002 			wakeup_swapper |= sleepq_resume_thread(sq, td, pri);
1003 		thread_unlock(td);
1004 	}
1005 
1006 	return (wakeup_swapper);
1007 }
1008 
1009 /*
1010  * Time sleeping threads out.  When the timeout expires, the thread is
1011  * removed from the sleep queue and made runnable if it is still asleep.
1012  */
1013 static void
1014 sleepq_timeout(void *arg)
1015 {
1016 	struct sleepqueue_chain *sc __unused;
1017 	struct sleepqueue *sq;
1018 	struct thread *td;
1019 	void *wchan;
1020 	int wakeup_swapper;
1021 
1022 	td = arg;
1023 	wakeup_swapper = 0;
1024 	CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
1025 	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1026 
1027 	thread_lock(td);
1028 
1029 	if (td->td_sleeptimo > sbinuptime() || td->td_sleeptimo == 0) {
1030 		/*
1031 		 * The thread does not want a timeout (yet).
1032 		 */
1033 	} else if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
1034 		/*
1035 		 * See if the thread is asleep and get the wait
1036 		 * channel if it is.
1037 		 */
1038 		wchan = td->td_wchan;
1039 		sc = SC_LOOKUP(wchan);
1040 		THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
1041 		sq = sleepq_lookup(wchan);
1042 		MPASS(sq != NULL);
1043 		td->td_flags |= TDF_TIMEOUT;
1044 		wakeup_swapper = sleepq_resume_thread(sq, td, 0);
1045 	} else if (TD_ON_SLEEPQ(td)) {
1046 		/*
1047 		 * If the thread is on the SLEEPQ but isn't sleeping
1048 		 * yet, it can either be on another CPU in between
1049 		 * sleepq_add() and one of the sleepq_*wait*()
1050 		 * routines or it can be in sleepq_catch_signals().
1051 		 */
1052 		td->td_flags |= TDF_TIMEOUT;
1053 	}
1054 
1055 	thread_unlock(td);
1056 	if (wakeup_swapper)
1057 		kick_proc0();
1058 }
1059 
1060 /*
1061  * Resumes a specific thread from the sleep queue associated with a specific
1062  * wait channel if it is on that queue.
1063  */
1064 void
1065 sleepq_remove(struct thread *td, void *wchan)
1066 {
1067 	struct sleepqueue *sq;
1068 	int wakeup_swapper;
1069 
1070 	/*
1071 	 * Look up the sleep queue for this wait channel, then re-check
1072 	 * that the thread is asleep on that channel, if it is not, then
1073 	 * bail.
1074 	 */
1075 	MPASS(wchan != NULL);
1076 	sleepq_lock(wchan);
1077 	sq = sleepq_lookup(wchan);
1078 	/*
1079 	 * We can not lock the thread here as it may be sleeping on a
1080 	 * different sleepq.  However, holding the sleepq lock for this
1081 	 * wchan can guarantee that we do not miss a wakeup for this
1082 	 * channel.  The asserts below will catch any false positives.
1083 	 */
1084 	if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
1085 		sleepq_release(wchan);
1086 		return;
1087 	}
1088 	/* Thread is asleep on sleep queue sq, so wake it up. */
1089 	thread_lock(td);
1090 	MPASS(sq != NULL);
1091 	MPASS(td->td_wchan == wchan);
1092 	wakeup_swapper = sleepq_resume_thread(sq, td, 0);
1093 	thread_unlock(td);
1094 	sleepq_release(wchan);
1095 	if (wakeup_swapper)
1096 		kick_proc0();
1097 }
1098 
1099 /*
1100  * Abort a thread as if an interrupt had occurred.  Only abort
1101  * interruptible waits (unfortunately it isn't safe to abort others).
1102  */
1103 int
1104 sleepq_abort(struct thread *td, int intrval)
1105 {
1106 	struct sleepqueue *sq;
1107 	void *wchan;
1108 
1109 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1110 	MPASS(TD_ON_SLEEPQ(td));
1111 	MPASS(td->td_flags & TDF_SINTR);
1112 	MPASS(intrval == EINTR || intrval == ERESTART);
1113 
1114 	/*
1115 	 * If the TDF_TIMEOUT flag is set, just leave. A
1116 	 * timeout is scheduled anyhow.
1117 	 */
1118 	if (td->td_flags & TDF_TIMEOUT)
1119 		return (0);
1120 
1121 	CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
1122 	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1123 	td->td_intrval = intrval;
1124 	td->td_flags |= TDF_SLEEPABORT;
1125 	/*
1126 	 * If the thread has not slept yet it will find the signal in
1127 	 * sleepq_catch_signals() and call sleepq_resume_thread.  Otherwise
1128 	 * we have to do it here.
1129 	 */
1130 	if (!TD_IS_SLEEPING(td))
1131 		return (0);
1132 	wchan = td->td_wchan;
1133 	MPASS(wchan != NULL);
1134 	sq = sleepq_lookup(wchan);
1135 	MPASS(sq != NULL);
1136 
1137 	/* Thread is asleep on sleep queue sq, so wake it up. */
1138 	return (sleepq_resume_thread(sq, td, 0));
1139 }
1140 
1141 void
1142 sleepq_chains_remove_matching(bool (*matches)(struct thread *))
1143 {
1144 	struct sleepqueue_chain *sc;
1145 	struct sleepqueue *sq, *sq1;
1146 	int i, wakeup_swapper;
1147 
1148 	wakeup_swapper = 0;
1149 	for (sc = &sleepq_chains[0]; sc < sleepq_chains + SC_TABLESIZE; ++sc) {
1150 		if (LIST_EMPTY(&sc->sc_queues)) {
1151 			continue;
1152 		}
1153 		mtx_lock_spin(&sc->sc_lock);
1154 		LIST_FOREACH_SAFE(sq, &sc->sc_queues, sq_hash, sq1) {
1155 			for (i = 0; i < NR_SLEEPQS; ++i) {
1156 				wakeup_swapper |= sleepq_remove_matching(sq, i,
1157 				    matches, 0);
1158 			}
1159 		}
1160 		mtx_unlock_spin(&sc->sc_lock);
1161 	}
1162 	if (wakeup_swapper) {
1163 		kick_proc0();
1164 	}
1165 }
1166 
1167 /*
1168  * Prints the stacks of all threads presently sleeping on wchan/queue to
1169  * the sbuf sb.  Sets count_stacks_printed to the number of stacks actually
1170  * printed.  Typically, this will equal the number of threads sleeping on the
1171  * queue, but may be less if sb overflowed before all stacks were printed.
1172  */
1173 #ifdef STACK
1174 int
1175 sleepq_sbuf_print_stacks(struct sbuf *sb, void *wchan, int queue,
1176     int *count_stacks_printed)
1177 {
1178 	struct thread *td, *td_next;
1179 	struct sleepqueue *sq;
1180 	struct stack **st;
1181 	struct sbuf **td_infos;
1182 	int i, stack_idx, error, stacks_to_allocate;
1183 	bool finished;
1184 
1185 	error = 0;
1186 	finished = false;
1187 
1188 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
1189 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
1190 
1191 	stacks_to_allocate = 10;
1192 	for (i = 0; i < 3 && !finished ; i++) {
1193 		/* We cannot malloc while holding the queue's spinlock, so
1194 		 * we do our mallocs now, and hope it is enough.  If it
1195 		 * isn't, we will free these, drop the lock, malloc more,
1196 		 * and try again, up to a point.  After that point we will
1197 		 * give up and report ENOMEM. We also cannot write to sb
1198 		 * during this time since the client may have set the
1199 		 * SBUF_AUTOEXTEND flag on their sbuf, which could cause a
1200 		 * malloc as we print to it.  So we defer actually printing
1201 		 * to sb until after we drop the spinlock.
1202 		 */
1203 
1204 		/* Where we will store the stacks. */
1205 		st = malloc(sizeof(struct stack *) * stacks_to_allocate,
1206 		    M_TEMP, M_WAITOK);
1207 		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1208 		    stack_idx++)
1209 			st[stack_idx] = stack_create(M_WAITOK);
1210 
1211 		/* Where we will store the td name, tid, etc. */
1212 		td_infos = malloc(sizeof(struct sbuf *) * stacks_to_allocate,
1213 		    M_TEMP, M_WAITOK);
1214 		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1215 		    stack_idx++)
1216 			td_infos[stack_idx] = sbuf_new(NULL, NULL,
1217 			    MAXCOMLEN + sizeof(struct thread *) * 2 + 40,
1218 			    SBUF_FIXEDLEN);
1219 
1220 		sleepq_lock(wchan);
1221 		sq = sleepq_lookup(wchan);
1222 		if (sq == NULL) {
1223 			/* This sleepq does not exist; exit and return ENOENT. */
1224 			error = ENOENT;
1225 			finished = true;
1226 			sleepq_release(wchan);
1227 			goto loop_end;
1228 		}
1229 
1230 		stack_idx = 0;
1231 		/* Save thread info */
1232 		TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq,
1233 		    td_next) {
1234 			if (stack_idx >= stacks_to_allocate)
1235 				goto loop_end;
1236 
1237 			/* Note the td_lock is equal to the sleepq_lock here. */
1238 			stack_save_td(st[stack_idx], td);
1239 
1240 			sbuf_printf(td_infos[stack_idx], "%d: %s %p",
1241 			    td->td_tid, td->td_name, td);
1242 
1243 			++stack_idx;
1244 		}
1245 
1246 		finished = true;
1247 		sleepq_release(wchan);
1248 
1249 		/* Print the stacks */
1250 		for (i = 0; i < stack_idx; i++) {
1251 			sbuf_finish(td_infos[i]);
1252 			sbuf_printf(sb, "--- thread %s: ---\n", sbuf_data(td_infos[i]));
1253 			stack_sbuf_print(sb, st[i]);
1254 			sbuf_printf(sb, "\n");
1255 
1256 			error = sbuf_error(sb);
1257 			if (error == 0)
1258 				*count_stacks_printed = stack_idx;
1259 		}
1260 
1261 loop_end:
1262 		if (!finished)
1263 			sleepq_release(wchan);
1264 		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1265 		    stack_idx++)
1266 			stack_destroy(st[stack_idx]);
1267 		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1268 		    stack_idx++)
1269 			sbuf_delete(td_infos[stack_idx]);
1270 		free(st, M_TEMP);
1271 		free(td_infos, M_TEMP);
1272 		stacks_to_allocate *= 10;
1273 	}
1274 
1275 	if (!finished && error == 0)
1276 		error = ENOMEM;
1277 
1278 	return (error);
1279 }
1280 #endif
1281 
1282 #ifdef SLEEPQUEUE_PROFILING
1283 #define	SLEEPQ_PROF_LOCATIONS	1024
1284 #define	SLEEPQ_SBUFSIZE		512
1285 struct sleepq_prof {
1286 	LIST_ENTRY(sleepq_prof) sp_link;
1287 	const char	*sp_wmesg;
1288 	long		sp_count;
1289 };
1290 
1291 LIST_HEAD(sqphead, sleepq_prof);
1292 
1293 struct sqphead sleepq_prof_free;
1294 struct sqphead sleepq_hash[SC_TABLESIZE];
1295 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1296 static struct mtx sleepq_prof_lock;
1297 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1298 
1299 static void
1300 sleepq_profile(const char *wmesg)
1301 {
1302 	struct sleepq_prof *sp;
1303 
1304 	mtx_lock_spin(&sleepq_prof_lock);
1305 	if (prof_enabled == 0)
1306 		goto unlock;
1307 	LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link)
1308 		if (sp->sp_wmesg == wmesg)
1309 			goto done;
1310 	sp = LIST_FIRST(&sleepq_prof_free);
1311 	if (sp == NULL)
1312 		goto unlock;
1313 	sp->sp_wmesg = wmesg;
1314 	LIST_REMOVE(sp, sp_link);
1315 	LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link);
1316 done:
1317 	sp->sp_count++;
1318 unlock:
1319 	mtx_unlock_spin(&sleepq_prof_lock);
1320 	return;
1321 }
1322 
1323 static void
1324 sleepq_prof_reset(void)
1325 {
1326 	struct sleepq_prof *sp;
1327 	int enabled;
1328 	int i;
1329 
1330 	mtx_lock_spin(&sleepq_prof_lock);
1331 	enabled = prof_enabled;
1332 	prof_enabled = 0;
1333 	for (i = 0; i < SC_TABLESIZE; i++)
1334 		LIST_INIT(&sleepq_hash[i]);
1335 	LIST_INIT(&sleepq_prof_free);
1336 	for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1337 		sp = &sleepq_profent[i];
1338 		sp->sp_wmesg = NULL;
1339 		sp->sp_count = 0;
1340 		LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1341 	}
1342 	prof_enabled = enabled;
1343 	mtx_unlock_spin(&sleepq_prof_lock);
1344 }
1345 
1346 static int
1347 enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1348 {
1349 	int error, v;
1350 
1351 	v = prof_enabled;
1352 	error = sysctl_handle_int(oidp, &v, v, req);
1353 	if (error)
1354 		return (error);
1355 	if (req->newptr == NULL)
1356 		return (error);
1357 	if (v == prof_enabled)
1358 		return (0);
1359 	if (v == 1)
1360 		sleepq_prof_reset();
1361 	mtx_lock_spin(&sleepq_prof_lock);
1362 	prof_enabled = !!v;
1363 	mtx_unlock_spin(&sleepq_prof_lock);
1364 
1365 	return (0);
1366 }
1367 
1368 static int
1369 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1370 {
1371 	int error, v;
1372 
1373 	v = 0;
1374 	error = sysctl_handle_int(oidp, &v, 0, req);
1375 	if (error)
1376 		return (error);
1377 	if (req->newptr == NULL)
1378 		return (error);
1379 	if (v == 0)
1380 		return (0);
1381 	sleepq_prof_reset();
1382 
1383 	return (0);
1384 }
1385 
1386 static int
1387 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1388 {
1389 	struct sleepq_prof *sp;
1390 	struct sbuf *sb;
1391 	int enabled;
1392 	int error;
1393 	int i;
1394 
1395 	error = sysctl_wire_old_buffer(req, 0);
1396 	if (error != 0)
1397 		return (error);
1398 	sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req);
1399 	sbuf_printf(sb, "\nwmesg\tcount\n");
1400 	enabled = prof_enabled;
1401 	mtx_lock_spin(&sleepq_prof_lock);
1402 	prof_enabled = 0;
1403 	mtx_unlock_spin(&sleepq_prof_lock);
1404 	for (i = 0; i < SC_TABLESIZE; i++) {
1405 		LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1406 			sbuf_printf(sb, "%s\t%ld\n",
1407 			    sp->sp_wmesg, sp->sp_count);
1408 		}
1409 	}
1410 	mtx_lock_spin(&sleepq_prof_lock);
1411 	prof_enabled = enabled;
1412 	mtx_unlock_spin(&sleepq_prof_lock);
1413 
1414 	error = sbuf_finish(sb);
1415 	sbuf_delete(sb);
1416 	return (error);
1417 }
1418 
1419 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
1420     NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics");
1421 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
1422     NULL, 0, reset_sleepq_prof_stats, "I",
1423     "Reset sleepqueue profiling statistics");
1424 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
1425     NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling");
1426 #endif
1427 
1428 #ifdef DDB
1429 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1430 {
1431 	struct sleepqueue_chain *sc;
1432 	struct sleepqueue *sq;
1433 #ifdef INVARIANTS
1434 	struct lock_object *lock;
1435 #endif
1436 	struct thread *td;
1437 	void *wchan;
1438 	int i;
1439 
1440 	if (!have_addr)
1441 		return;
1442 
1443 	/*
1444 	 * First, see if there is an active sleep queue for the wait channel
1445 	 * indicated by the address.
1446 	 */
1447 	wchan = (void *)addr;
1448 	sc = SC_LOOKUP(wchan);
1449 	LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1450 		if (sq->sq_wchan == wchan)
1451 			goto found;
1452 
1453 	/*
1454 	 * Second, see if there is an active sleep queue at the address
1455 	 * indicated.
1456 	 */
1457 	for (i = 0; i < SC_TABLESIZE; i++)
1458 		LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
1459 			if (sq == (struct sleepqueue *)addr)
1460 				goto found;
1461 		}
1462 
1463 	db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
1464 	return;
1465 found:
1466 	db_printf("Wait channel: %p\n", sq->sq_wchan);
1467 	db_printf("Queue type: %d\n", sq->sq_type);
1468 #ifdef INVARIANTS
1469 	if (sq->sq_lock) {
1470 		lock = sq->sq_lock;
1471 		db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1472 		    LOCK_CLASS(lock)->lc_name, lock->lo_name);
1473 	}
1474 #endif
1475 	db_printf("Blocked threads:\n");
1476 	for (i = 0; i < NR_SLEEPQS; i++) {
1477 		db_printf("\nQueue[%d]:\n", i);
1478 		if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1479 			db_printf("\tempty\n");
1480 		else
1481 			TAILQ_FOREACH(td, &sq->sq_blocked[i],
1482 				      td_slpq) {
1483 				db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
1484 					  td->td_tid, td->td_proc->p_pid,
1485 					  td->td_name);
1486 			}
1487 		db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);
1488 	}
1489 }
1490 
1491 /* Alias 'show sleepqueue' to 'show sleepq'. */
1492 DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue);
1493 #endif
1494