xref: /freebsd/sys/kern/subr_sleepqueue.c (revision ca987d4641cdcd7f27e153db17c5bf064934faf5)
1 /*-
2  * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * Implementation of sleep queues used to hold queue of threads blocked on
29  * a wait channel.  Sleep queues are different from turnstiles in that wait
30  * channels are not owned by anyone, so there is no priority propagation.
31  * Sleep queues can also provide a timeout and can also be interrupted by
32  * signals.  That said, there are several similarities between the turnstile
33  * and sleep queue implementations.  (Note: turnstiles were implemented
34  * first.)  For example, both use a hash table of the same size where each
35  * bucket is referred to as a "chain" that contains both a spin lock and
36  * a linked list of queues.  An individual queue is located by using a hash
37  * to pick a chain, locking the chain, and then walking the chain searching
38  * for the queue.  This means that a wait channel object does not need to
39  * embed its queue head just as locks do not embed their turnstile queue
40  * head.  Threads also carry around a sleep queue that they lend to the
41  * wait channel when blocking.  Just as in turnstiles, the queue includes
42  * a free list of the sleep queues of other threads blocked on the same
43  * wait channel in the case of multiple waiters.
44  *
45  * Some additional functionality provided by sleep queues include the
46  * ability to set a timeout.  The timeout is managed using a per-thread
47  * callout that resumes a thread if it is asleep.  A thread may also
48  * catch signals while it is asleep (aka an interruptible sleep).  The
49  * signal code uses sleepq_abort() to interrupt a sleeping thread.  Finally,
50  * sleep queues also provide some extra assertions.  One is not allowed to
51  * mix the sleep/wakeup and cv APIs for a given wait channel.  Also, one
52  * must consistently use the same lock to synchronize with a wait channel,
53  * though this check is currently only a warning for sleep/wakeup due to
54  * pre-existing abuse of that API.  The same lock must also be held when
55  * awakening threads, though that is currently only enforced for condition
56  * variables.
57  */
58 
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61 
62 #include "opt_sleepqueue_profiling.h"
63 #include "opt_ddb.h"
64 #include "opt_sched.h"
65 #include "opt_stack.h"
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/lock.h>
70 #include <sys/kernel.h>
71 #include <sys/ktr.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/sbuf.h>
75 #include <sys/sched.h>
76 #include <sys/sdt.h>
77 #include <sys/signalvar.h>
78 #include <sys/sleepqueue.h>
79 #include <sys/stack.h>
80 #include <sys/sysctl.h>
81 #include <sys/time.h>
82 
83 #include <machine/atomic.h>
84 
85 #include <vm/uma.h>
86 
87 #ifdef DDB
88 #include <ddb/ddb.h>
89 #endif
90 
91 
92 /*
93  * Constants for the hash table of sleep queue chains.
94  * SC_TABLESIZE must be a power of two for SC_MASK to work properly.
95  */
96 #ifndef SC_TABLESIZE
97 #define	SC_TABLESIZE	256
98 #endif
99 CTASSERT(powerof2(SC_TABLESIZE));
100 #define	SC_MASK		(SC_TABLESIZE - 1)
101 #define	SC_SHIFT	8
102 #define	SC_HASH(wc)	((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \
103 			    SC_MASK)
104 #define	SC_LOOKUP(wc)	&sleepq_chains[SC_HASH(wc)]
105 #define NR_SLEEPQS      2
106 /*
107  * There are two different lists of sleep queues.  Both lists are connected
108  * via the sq_hash entries.  The first list is the sleep queue chain list
109  * that a sleep queue is on when it is attached to a wait channel.  The
110  * second list is the free list hung off of a sleep queue that is attached
111  * to a wait channel.
112  *
113  * Each sleep queue also contains the wait channel it is attached to, the
114  * list of threads blocked on that wait channel, flags specific to the
115  * wait channel, and the lock used to synchronize with a wait channel.
116  * The flags are used to catch mismatches between the various consumers
117  * of the sleep queue API (e.g. sleep/wakeup and condition variables).
118  * The lock pointer is only used when invariants are enabled for various
119  * debugging checks.
120  *
121  * Locking key:
122  *  c - sleep queue chain lock
123  */
124 struct sleepqueue {
125 	TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS];	/* (c) Blocked threads. */
126 	u_int sq_blockedcnt[NR_SLEEPQS];	/* (c) N. of blocked threads. */
127 	LIST_ENTRY(sleepqueue) sq_hash;		/* (c) Chain and free list. */
128 	LIST_HEAD(, sleepqueue) sq_free;	/* (c) Free queues. */
129 	void	*sq_wchan;			/* (c) Wait channel. */
130 	int	sq_type;			/* (c) Queue type. */
131 #ifdef INVARIANTS
132 	struct lock_object *sq_lock;		/* (c) Associated lock. */
133 #endif
134 };
135 
136 struct sleepqueue_chain {
137 	LIST_HEAD(, sleepqueue) sc_queues;	/* List of sleep queues. */
138 	struct mtx sc_lock;			/* Spin lock for this chain. */
139 #ifdef SLEEPQUEUE_PROFILING
140 	u_int	sc_depth;			/* Length of sc_queues. */
141 	u_int	sc_max_depth;			/* Max length of sc_queues. */
142 #endif
143 } __aligned(CACHE_LINE_SIZE);
144 
145 #ifdef SLEEPQUEUE_PROFILING
146 u_int sleepq_max_depth;
147 static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
148 static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
149     "sleepq chain stats");
150 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
151     0, "maxmimum depth achieved of a single chain");
152 
153 static void	sleepq_profile(const char *wmesg);
154 static int	prof_enabled;
155 #endif
156 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
157 static uma_zone_t sleepq_zone;
158 
159 /*
160  * Prototypes for non-exported routines.
161  */
162 static int	sleepq_catch_signals(void *wchan, int pri);
163 static int	sleepq_check_signals(void);
164 static int	sleepq_check_timeout(void);
165 #ifdef INVARIANTS
166 static void	sleepq_dtor(void *mem, int size, void *arg);
167 #endif
168 static int	sleepq_init(void *mem, int size, int flags);
169 static int	sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
170 		    int pri);
171 static void	sleepq_switch(void *wchan, int pri);
172 static void	sleepq_timeout(void *arg);
173 
174 SDT_PROBE_DECLARE(sched, , , sleep);
175 SDT_PROBE_DECLARE(sched, , , wakeup);
176 
177 /*
178  * Initialize SLEEPQUEUE_PROFILING specific sysctl nodes.
179  * Note that it must happen after sleepinit() has been fully executed, so
180  * it must happen after SI_SUB_KMEM SYSINIT() subsystem setup.
181  */
182 #ifdef SLEEPQUEUE_PROFILING
183 static void
184 init_sleepqueue_profiling(void)
185 {
186 	char chain_name[10];
187 	struct sysctl_oid *chain_oid;
188 	u_int i;
189 
190 	for (i = 0; i < SC_TABLESIZE; i++) {
191 		snprintf(chain_name, sizeof(chain_name), "%u", i);
192 		chain_oid = SYSCTL_ADD_NODE(NULL,
193 		    SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
194 		    chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
195 		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
196 		    "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
197 		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
198 		    "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
199 		    NULL);
200 	}
201 }
202 
203 SYSINIT(sleepqueue_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
204     init_sleepqueue_profiling, NULL);
205 #endif
206 
207 /*
208  * Early initialization of sleep queues that is called from the sleepinit()
209  * SYSINIT.
210  */
211 void
212 init_sleepqueues(void)
213 {
214 	int i;
215 
216 	for (i = 0; i < SC_TABLESIZE; i++) {
217 		LIST_INIT(&sleepq_chains[i].sc_queues);
218 		mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
219 		    MTX_SPIN | MTX_RECURSE);
220 	}
221 	sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
222 #ifdef INVARIANTS
223 	    NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
224 #else
225 	    NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
226 #endif
227 
228 	thread0.td_sleepqueue = sleepq_alloc();
229 }
230 
231 /*
232  * Get a sleep queue for a new thread.
233  */
234 struct sleepqueue *
235 sleepq_alloc(void)
236 {
237 
238 	return (uma_zalloc(sleepq_zone, M_WAITOK));
239 }
240 
241 /*
242  * Free a sleep queue when a thread is destroyed.
243  */
244 void
245 sleepq_free(struct sleepqueue *sq)
246 {
247 
248 	uma_zfree(sleepq_zone, sq);
249 }
250 
251 /*
252  * Lock the sleep queue chain associated with the specified wait channel.
253  */
254 void
255 sleepq_lock(void *wchan)
256 {
257 	struct sleepqueue_chain *sc;
258 
259 	sc = SC_LOOKUP(wchan);
260 	mtx_lock_spin(&sc->sc_lock);
261 }
262 
263 /*
264  * Look up the sleep queue associated with a given wait channel in the hash
265  * table locking the associated sleep queue chain.  If no queue is found in
266  * the table, NULL is returned.
267  */
268 struct sleepqueue *
269 sleepq_lookup(void *wchan)
270 {
271 	struct sleepqueue_chain *sc;
272 	struct sleepqueue *sq;
273 
274 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
275 	sc = SC_LOOKUP(wchan);
276 	mtx_assert(&sc->sc_lock, MA_OWNED);
277 	LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
278 		if (sq->sq_wchan == wchan)
279 			return (sq);
280 	return (NULL);
281 }
282 
283 /*
284  * Unlock the sleep queue chain associated with a given wait channel.
285  */
286 void
287 sleepq_release(void *wchan)
288 {
289 	struct sleepqueue_chain *sc;
290 
291 	sc = SC_LOOKUP(wchan);
292 	mtx_unlock_spin(&sc->sc_lock);
293 }
294 
295 /*
296  * Places the current thread on the sleep queue for the specified wait
297  * channel.  If INVARIANTS is enabled, then it associates the passed in
298  * lock with the sleepq to make sure it is held when that sleep queue is
299  * woken up.
300  */
301 void
302 sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
303     int queue)
304 {
305 	struct sleepqueue_chain *sc;
306 	struct sleepqueue *sq;
307 	struct thread *td;
308 
309 	td = curthread;
310 	sc = SC_LOOKUP(wchan);
311 	mtx_assert(&sc->sc_lock, MA_OWNED);
312 	MPASS(td->td_sleepqueue != NULL);
313 	MPASS(wchan != NULL);
314 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
315 
316 	/* If this thread is not allowed to sleep, die a horrible death. */
317 	KASSERT(td->td_no_sleeping == 0,
318 	    ("%s: td %p to sleep on wchan %p with sleeping prohibited",
319 	    __func__, td, wchan));
320 
321 	/* Look up the sleep queue associated with the wait channel 'wchan'. */
322 	sq = sleepq_lookup(wchan);
323 
324 	/*
325 	 * If the wait channel does not already have a sleep queue, use
326 	 * this thread's sleep queue.  Otherwise, insert the current thread
327 	 * into the sleep queue already in use by this wait channel.
328 	 */
329 	if (sq == NULL) {
330 #ifdef INVARIANTS
331 		int i;
332 
333 		sq = td->td_sleepqueue;
334 		for (i = 0; i < NR_SLEEPQS; i++) {
335 			KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
336 			    ("thread's sleep queue %d is not empty", i));
337 			KASSERT(sq->sq_blockedcnt[i] == 0,
338 			    ("thread's sleep queue %d count mismatches", i));
339 		}
340 		KASSERT(LIST_EMPTY(&sq->sq_free),
341 		    ("thread's sleep queue has a non-empty free list"));
342 		KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
343 		sq->sq_lock = lock;
344 #endif
345 #ifdef SLEEPQUEUE_PROFILING
346 		sc->sc_depth++;
347 		if (sc->sc_depth > sc->sc_max_depth) {
348 			sc->sc_max_depth = sc->sc_depth;
349 			if (sc->sc_max_depth > sleepq_max_depth)
350 				sleepq_max_depth = sc->sc_max_depth;
351 		}
352 #endif
353 		sq = td->td_sleepqueue;
354 		LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
355 		sq->sq_wchan = wchan;
356 		sq->sq_type = flags & SLEEPQ_TYPE;
357 	} else {
358 		MPASS(wchan == sq->sq_wchan);
359 		MPASS(lock == sq->sq_lock);
360 		MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
361 		LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
362 	}
363 	thread_lock(td);
364 	TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
365 	sq->sq_blockedcnt[queue]++;
366 	td->td_sleepqueue = NULL;
367 	td->td_sqqueue = queue;
368 	td->td_wchan = wchan;
369 	td->td_wmesg = wmesg;
370 	if (flags & SLEEPQ_INTERRUPTIBLE) {
371 		td->td_flags |= TDF_SINTR;
372 		td->td_flags &= ~TDF_SLEEPABORT;
373 	}
374 	thread_unlock(td);
375 }
376 
377 /*
378  * Sets a timeout that will remove the current thread from the specified
379  * sleep queue after timo ticks if the thread has not already been awakened.
380  */
381 void
382 sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr,
383     int flags)
384 {
385 	struct sleepqueue_chain *sc;
386 	struct thread *td;
387 	sbintime_t pr1;
388 
389 	td = curthread;
390 	sc = SC_LOOKUP(wchan);
391 	mtx_assert(&sc->sc_lock, MA_OWNED);
392 	MPASS(TD_ON_SLEEPQ(td));
393 	MPASS(td->td_sleepqueue == NULL);
394 	MPASS(wchan != NULL);
395 	if (cold && td == &thread0)
396 		panic("timed sleep before timers are working");
397 	KASSERT(td->td_sleeptimo == 0, ("td %d %p td_sleeptimo %jx",
398 	    td->td_tid, td, (uintmax_t)td->td_sleeptimo));
399 	thread_lock(td);
400 	callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1);
401 	thread_unlock(td);
402 	callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1,
403 	    sleepq_timeout, td, PCPU_GET(cpuid), flags | C_PRECALC |
404 	    C_DIRECT_EXEC);
405 }
406 
407 /*
408  * Return the number of actual sleepers for the specified queue.
409  */
410 u_int
411 sleepq_sleepcnt(void *wchan, int queue)
412 {
413 	struct sleepqueue *sq;
414 
415 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
416 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
417 	sq = sleepq_lookup(wchan);
418 	if (sq == NULL)
419 		return (0);
420 	return (sq->sq_blockedcnt[queue]);
421 }
422 
423 /*
424  * Marks the pending sleep of the current thread as interruptible and
425  * makes an initial check for pending signals before putting a thread
426  * to sleep. Enters and exits with the thread lock held.  Thread lock
427  * may have transitioned from the sleepq lock to a run lock.
428  */
429 static int
430 sleepq_catch_signals(void *wchan, int pri)
431 {
432 	struct sleepqueue_chain *sc;
433 	struct sleepqueue *sq;
434 	struct thread *td;
435 	struct proc *p;
436 	struct sigacts *ps;
437 	int sig, ret;
438 
439 	ret = 0;
440 	td = curthread;
441 	p = curproc;
442 	sc = SC_LOOKUP(wchan);
443 	mtx_assert(&sc->sc_lock, MA_OWNED);
444 	MPASS(wchan != NULL);
445 	if ((td->td_pflags & TDP_WAKEUP) != 0) {
446 		td->td_pflags &= ~TDP_WAKEUP;
447 		ret = EINTR;
448 		thread_lock(td);
449 		goto out;
450 	}
451 
452 	/*
453 	 * See if there are any pending signals or suspension requests for this
454 	 * thread.  If not, we can switch immediately.
455 	 */
456 	thread_lock(td);
457 	if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) != 0) {
458 		thread_unlock(td);
459 		mtx_unlock_spin(&sc->sc_lock);
460 		CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
461 			(void *)td, (long)p->p_pid, td->td_name);
462 		PROC_LOCK(p);
463 		/*
464 		 * Check for suspension first. Checking for signals and then
465 		 * suspending could result in a missed signal, since a signal
466 		 * can be delivered while this thread is suspended.
467 		 */
468 		if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
469 			ret = thread_suspend_check(1);
470 			MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
471 			if (ret != 0) {
472 				PROC_UNLOCK(p);
473 				mtx_lock_spin(&sc->sc_lock);
474 				thread_lock(td);
475 				goto out;
476 			}
477 		}
478 		if ((td->td_flags & TDF_NEEDSIGCHK) != 0) {
479 			ps = p->p_sigacts;
480 			mtx_lock(&ps->ps_mtx);
481 			sig = cursig(td);
482 			if (sig == -1) {
483 				mtx_unlock(&ps->ps_mtx);
484 				KASSERT((td->td_flags & TDF_SBDRY) != 0,
485 				    ("lost TDF_SBDRY"));
486 				KASSERT(TD_SBDRY_INTR(td),
487 				    ("lost TDF_SERESTART of TDF_SEINTR"));
488 				KASSERT((td->td_flags &
489 				    (TDF_SEINTR | TDF_SERESTART)) !=
490 				    (TDF_SEINTR | TDF_SERESTART),
491 				    ("both TDF_SEINTR and TDF_SERESTART"));
492 				ret = TD_SBDRY_ERRNO(td);
493 			} else if (sig != 0) {
494 				ret = SIGISMEMBER(ps->ps_sigintr, sig) ?
495 				    EINTR : ERESTART;
496 				mtx_unlock(&ps->ps_mtx);
497 			} else {
498 				mtx_unlock(&ps->ps_mtx);
499 			}
500 		}
501 		/*
502 		 * Lock the per-process spinlock prior to dropping the PROC_LOCK
503 		 * to avoid a signal delivery race.  PROC_LOCK, PROC_SLOCK, and
504 		 * thread_lock() are currently held in tdsendsignal().
505 		 */
506 		PROC_SLOCK(p);
507 		mtx_lock_spin(&sc->sc_lock);
508 		PROC_UNLOCK(p);
509 		thread_lock(td);
510 		PROC_SUNLOCK(p);
511 	}
512 	if (ret == 0) {
513 		sleepq_switch(wchan, pri);
514 		return (0);
515 	}
516 out:
517 	/*
518 	 * There were pending signals and this thread is still
519 	 * on the sleep queue, remove it from the sleep queue.
520 	 */
521 	if (TD_ON_SLEEPQ(td)) {
522 		sq = sleepq_lookup(wchan);
523 		if (sleepq_resume_thread(sq, td, 0)) {
524 #ifdef INVARIANTS
525 			/*
526 			 * This thread hasn't gone to sleep yet, so it
527 			 * should not be swapped out.
528 			 */
529 			panic("not waking up swapper");
530 #endif
531 		}
532 	}
533 	mtx_unlock_spin(&sc->sc_lock);
534 	MPASS(td->td_lock != &sc->sc_lock);
535 	return (ret);
536 }
537 
538 /*
539  * Switches to another thread if we are still asleep on a sleep queue.
540  * Returns with thread lock.
541  */
542 static void
543 sleepq_switch(void *wchan, int pri)
544 {
545 	struct sleepqueue_chain *sc;
546 	struct sleepqueue *sq;
547 	struct thread *td;
548 	bool rtc_changed;
549 
550 	td = curthread;
551 	sc = SC_LOOKUP(wchan);
552 	mtx_assert(&sc->sc_lock, MA_OWNED);
553 	THREAD_LOCK_ASSERT(td, MA_OWNED);
554 
555 	/*
556 	 * If we have a sleep queue, then we've already been woken up, so
557 	 * just return.
558 	 */
559 	if (td->td_sleepqueue != NULL) {
560 		mtx_unlock_spin(&sc->sc_lock);
561 		return;
562 	}
563 
564 	/*
565 	 * If TDF_TIMEOUT is set, then our sleep has been timed out
566 	 * already but we are still on the sleep queue, so dequeue the
567 	 * thread and return.
568 	 *
569 	 * Do the same if the real-time clock has been adjusted since this
570 	 * thread calculated its timeout based on that clock.  This handles
571 	 * the following race:
572 	 * - The Ts thread needs to sleep until an absolute real-clock time.
573 	 *   It copies the global rtc_generation into curthread->td_rtcgen,
574 	 *   reads the RTC, and calculates a sleep duration based on that time.
575 	 *   See umtxq_sleep() for an example.
576 	 * - The Tc thread adjusts the RTC, bumps rtc_generation, and wakes
577 	 *   threads that are sleeping until an absolute real-clock time.
578 	 *   See tc_setclock() and the POSIX specification of clock_settime().
579 	 * - Ts reaches the code below.  It holds the sleepqueue chain lock,
580 	 *   so Tc has finished waking, so this thread must test td_rtcgen.
581 	 * (The declaration of td_rtcgen refers to this comment.)
582 	 */
583 	rtc_changed = td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation;
584 	if ((td->td_flags & TDF_TIMEOUT) || rtc_changed) {
585 		if (rtc_changed) {
586 			td->td_rtcgen = 0;
587 		}
588 		MPASS(TD_ON_SLEEPQ(td));
589 		sq = sleepq_lookup(wchan);
590 		if (sleepq_resume_thread(sq, td, 0)) {
591 #ifdef INVARIANTS
592 			/*
593 			 * This thread hasn't gone to sleep yet, so it
594 			 * should not be swapped out.
595 			 */
596 			panic("not waking up swapper");
597 #endif
598 		}
599 		mtx_unlock_spin(&sc->sc_lock);
600 		return;
601 	}
602 #ifdef SLEEPQUEUE_PROFILING
603 	if (prof_enabled)
604 		sleepq_profile(td->td_wmesg);
605 #endif
606 	MPASS(td->td_sleepqueue == NULL);
607 	sched_sleep(td, pri);
608 	thread_lock_set(td, &sc->sc_lock);
609 	SDT_PROBE0(sched, , , sleep);
610 	TD_SET_SLEEPING(td);
611 	mi_switch(SW_VOL | SWT_SLEEPQ, NULL);
612 	KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
613 	CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
614 	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
615 }
616 
617 /*
618  * Check to see if we timed out.
619  */
620 static int
621 sleepq_check_timeout(void)
622 {
623 	struct thread *td;
624 	int res;
625 
626 	td = curthread;
627 	THREAD_LOCK_ASSERT(td, MA_OWNED);
628 
629 	/*
630 	 * If TDF_TIMEOUT is set, we timed out.  But recheck
631 	 * td_sleeptimo anyway.
632 	 */
633 	res = 0;
634 	if (td->td_sleeptimo != 0) {
635 		if (td->td_sleeptimo <= sbinuptime())
636 			res = EWOULDBLOCK;
637 		td->td_sleeptimo = 0;
638 	}
639 	if (td->td_flags & TDF_TIMEOUT)
640 		td->td_flags &= ~TDF_TIMEOUT;
641 	else
642 		/*
643 		 * We ignore the situation where timeout subsystem was
644 		 * unable to stop our callout.  The struct thread is
645 		 * type-stable, the callout will use the correct
646 		 * memory when running.  The checks of the
647 		 * td_sleeptimo value in this function and in
648 		 * sleepq_timeout() ensure that the thread does not
649 		 * get spurious wakeups, even if the callout was reset
650 		 * or thread reused.
651 		 */
652 		callout_stop(&td->td_slpcallout);
653 	return (res);
654 }
655 
656 /*
657  * Check to see if we were awoken by a signal.
658  */
659 static int
660 sleepq_check_signals(void)
661 {
662 	struct thread *td;
663 
664 	td = curthread;
665 	THREAD_LOCK_ASSERT(td, MA_OWNED);
666 
667 	/* We are no longer in an interruptible sleep. */
668 	if (td->td_flags & TDF_SINTR)
669 		td->td_flags &= ~TDF_SINTR;
670 
671 	if (td->td_flags & TDF_SLEEPABORT) {
672 		td->td_flags &= ~TDF_SLEEPABORT;
673 		return (td->td_intrval);
674 	}
675 
676 	return (0);
677 }
678 
679 /*
680  * Block the current thread until it is awakened from its sleep queue.
681  */
682 void
683 sleepq_wait(void *wchan, int pri)
684 {
685 	struct thread *td;
686 
687 	td = curthread;
688 	MPASS(!(td->td_flags & TDF_SINTR));
689 	thread_lock(td);
690 	sleepq_switch(wchan, pri);
691 	thread_unlock(td);
692 }
693 
694 /*
695  * Block the current thread until it is awakened from its sleep queue
696  * or it is interrupted by a signal.
697  */
698 int
699 sleepq_wait_sig(void *wchan, int pri)
700 {
701 	int rcatch;
702 	int rval;
703 
704 	rcatch = sleepq_catch_signals(wchan, pri);
705 	rval = sleepq_check_signals();
706 	thread_unlock(curthread);
707 	if (rcatch)
708 		return (rcatch);
709 	return (rval);
710 }
711 
712 /*
713  * Block the current thread until it is awakened from its sleep queue
714  * or it times out while waiting.
715  */
716 int
717 sleepq_timedwait(void *wchan, int pri)
718 {
719 	struct thread *td;
720 	int rval;
721 
722 	td = curthread;
723 	MPASS(!(td->td_flags & TDF_SINTR));
724 	thread_lock(td);
725 	sleepq_switch(wchan, pri);
726 	rval = sleepq_check_timeout();
727 	thread_unlock(td);
728 
729 	return (rval);
730 }
731 
732 /*
733  * Block the current thread until it is awakened from its sleep queue,
734  * it is interrupted by a signal, or it times out waiting to be awakened.
735  */
736 int
737 sleepq_timedwait_sig(void *wchan, int pri)
738 {
739 	int rcatch, rvalt, rvals;
740 
741 	rcatch = sleepq_catch_signals(wchan, pri);
742 	rvalt = sleepq_check_timeout();
743 	rvals = sleepq_check_signals();
744 	thread_unlock(curthread);
745 	if (rcatch)
746 		return (rcatch);
747 	if (rvals)
748 		return (rvals);
749 	return (rvalt);
750 }
751 
752 /*
753  * Returns the type of sleepqueue given a waitchannel.
754  */
755 int
756 sleepq_type(void *wchan)
757 {
758 	struct sleepqueue *sq;
759 	int type;
760 
761 	MPASS(wchan != NULL);
762 
763 	sleepq_lock(wchan);
764 	sq = sleepq_lookup(wchan);
765 	if (sq == NULL) {
766 		sleepq_release(wchan);
767 		return (-1);
768 	}
769 	type = sq->sq_type;
770 	sleepq_release(wchan);
771 	return (type);
772 }
773 
774 /*
775  * Removes a thread from a sleep queue and makes it
776  * runnable.
777  */
778 static int
779 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
780 {
781 	struct sleepqueue_chain *sc;
782 
783 	MPASS(td != NULL);
784 	MPASS(sq->sq_wchan != NULL);
785 	MPASS(td->td_wchan == sq->sq_wchan);
786 	MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
787 	THREAD_LOCK_ASSERT(td, MA_OWNED);
788 	sc = SC_LOOKUP(sq->sq_wchan);
789 	mtx_assert(&sc->sc_lock, MA_OWNED);
790 
791 	SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
792 
793 	/* Remove the thread from the queue. */
794 	sq->sq_blockedcnt[td->td_sqqueue]--;
795 	TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
796 
797 	/*
798 	 * Get a sleep queue for this thread.  If this is the last waiter,
799 	 * use the queue itself and take it out of the chain, otherwise,
800 	 * remove a queue from the free list.
801 	 */
802 	if (LIST_EMPTY(&sq->sq_free)) {
803 		td->td_sleepqueue = sq;
804 #ifdef INVARIANTS
805 		sq->sq_wchan = NULL;
806 #endif
807 #ifdef SLEEPQUEUE_PROFILING
808 		sc->sc_depth--;
809 #endif
810 	} else
811 		td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
812 	LIST_REMOVE(td->td_sleepqueue, sq_hash);
813 
814 	td->td_wmesg = NULL;
815 	td->td_wchan = NULL;
816 	td->td_flags &= ~TDF_SINTR;
817 
818 	CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
819 	    (void *)td, (long)td->td_proc->p_pid, td->td_name);
820 
821 	/* Adjust priority if requested. */
822 	MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
823 	if (pri != 0 && td->td_priority > pri &&
824 	    PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
825 		sched_prio(td, pri);
826 
827 	/*
828 	 * Note that thread td might not be sleeping if it is running
829 	 * sleepq_catch_signals() on another CPU or is blocked on its
830 	 * proc lock to check signals.  There's no need to mark the
831 	 * thread runnable in that case.
832 	 */
833 	if (TD_IS_SLEEPING(td)) {
834 		TD_CLR_SLEEPING(td);
835 		return (setrunnable(td));
836 	}
837 	return (0);
838 }
839 
840 #ifdef INVARIANTS
841 /*
842  * UMA zone item deallocator.
843  */
844 static void
845 sleepq_dtor(void *mem, int size, void *arg)
846 {
847 	struct sleepqueue *sq;
848 	int i;
849 
850 	sq = mem;
851 	for (i = 0; i < NR_SLEEPQS; i++) {
852 		MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
853 		MPASS(sq->sq_blockedcnt[i] == 0);
854 	}
855 }
856 #endif
857 
858 /*
859  * UMA zone item initializer.
860  */
861 static int
862 sleepq_init(void *mem, int size, int flags)
863 {
864 	struct sleepqueue *sq;
865 	int i;
866 
867 	bzero(mem, size);
868 	sq = mem;
869 	for (i = 0; i < NR_SLEEPQS; i++) {
870 		TAILQ_INIT(&sq->sq_blocked[i]);
871 		sq->sq_blockedcnt[i] = 0;
872 	}
873 	LIST_INIT(&sq->sq_free);
874 	return (0);
875 }
876 
877 /*
878  * Find the highest priority thread sleeping on a wait channel and resume it.
879  */
880 int
881 sleepq_signal(void *wchan, int flags, int pri, int queue)
882 {
883 	struct sleepqueue *sq;
884 	struct thread *td, *besttd;
885 	int wakeup_swapper;
886 
887 	CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
888 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
889 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
890 	sq = sleepq_lookup(wchan);
891 	if (sq == NULL)
892 		return (0);
893 	KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
894 	    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
895 
896 	/*
897 	 * Find the highest priority thread on the queue.  If there is a
898 	 * tie, use the thread that first appears in the queue as it has
899 	 * been sleeping the longest since threads are always added to
900 	 * the tail of sleep queues.
901 	 */
902 	besttd = TAILQ_FIRST(&sq->sq_blocked[queue]);
903 	TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) {
904 		if (td->td_priority < besttd->td_priority)
905 			besttd = td;
906 	}
907 	MPASS(besttd != NULL);
908 	thread_lock(besttd);
909 	wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
910 	thread_unlock(besttd);
911 	return (wakeup_swapper);
912 }
913 
914 static bool
915 match_any(struct thread *td __unused)
916 {
917 
918 	return (true);
919 }
920 
921 /*
922  * Resume all threads sleeping on a specified wait channel.
923  */
924 int
925 sleepq_broadcast(void *wchan, int flags, int pri, int queue)
926 {
927 	struct sleepqueue *sq;
928 
929 	CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
930 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
931 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
932 	sq = sleepq_lookup(wchan);
933 	if (sq == NULL)
934 		return (0);
935 	KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
936 	    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
937 
938 	return (sleepq_remove_matching(sq, queue, match_any, pri));
939 }
940 
941 /*
942  * Resume threads on the sleep queue that match the given predicate.
943  */
944 int
945 sleepq_remove_matching(struct sleepqueue *sq, int queue,
946     bool (*matches)(struct thread *), int pri)
947 {
948 	struct thread *td, *tdn;
949 	int wakeup_swapper;
950 
951 	/*
952 	 * The last thread will be given ownership of sq and may
953 	 * re-enqueue itself before sleepq_resume_thread() returns,
954 	 * so we must cache the "next" queue item at the beginning
955 	 * of the final iteration.
956 	 */
957 	wakeup_swapper = 0;
958 	TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
959 		thread_lock(td);
960 		if (matches(td))
961 			wakeup_swapper |= sleepq_resume_thread(sq, td, pri);
962 		thread_unlock(td);
963 	}
964 
965 	return (wakeup_swapper);
966 }
967 
968 /*
969  * Time sleeping threads out.  When the timeout expires, the thread is
970  * removed from the sleep queue and made runnable if it is still asleep.
971  */
972 static void
973 sleepq_timeout(void *arg)
974 {
975 	struct sleepqueue_chain *sc;
976 	struct sleepqueue *sq;
977 	struct thread *td;
978 	void *wchan;
979 	int wakeup_swapper;
980 
981 	td = arg;
982 	wakeup_swapper = 0;
983 	CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
984 	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
985 
986 	thread_lock(td);
987 
988 	if (td->td_sleeptimo > sbinuptime() || td->td_sleeptimo == 0) {
989 		/*
990 		 * The thread does not want a timeout (yet).
991 		 */
992 	} else if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
993 		/*
994 		 * See if the thread is asleep and get the wait
995 		 * channel if it is.
996 		 */
997 		wchan = td->td_wchan;
998 		sc = SC_LOOKUP(wchan);
999 		THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
1000 		sq = sleepq_lookup(wchan);
1001 		MPASS(sq != NULL);
1002 		td->td_flags |= TDF_TIMEOUT;
1003 		wakeup_swapper = sleepq_resume_thread(sq, td, 0);
1004 	} else if (TD_ON_SLEEPQ(td)) {
1005 		/*
1006 		 * If the thread is on the SLEEPQ but isn't sleeping
1007 		 * yet, it can either be on another CPU in between
1008 		 * sleepq_add() and one of the sleepq_*wait*()
1009 		 * routines or it can be in sleepq_catch_signals().
1010 		 */
1011 		td->td_flags |= TDF_TIMEOUT;
1012 	}
1013 
1014 	thread_unlock(td);
1015 	if (wakeup_swapper)
1016 		kick_proc0();
1017 }
1018 
1019 /*
1020  * Resumes a specific thread from the sleep queue associated with a specific
1021  * wait channel if it is on that queue.
1022  */
1023 void
1024 sleepq_remove(struct thread *td, void *wchan)
1025 {
1026 	struct sleepqueue *sq;
1027 	int wakeup_swapper;
1028 
1029 	/*
1030 	 * Look up the sleep queue for this wait channel, then re-check
1031 	 * that the thread is asleep on that channel, if it is not, then
1032 	 * bail.
1033 	 */
1034 	MPASS(wchan != NULL);
1035 	sleepq_lock(wchan);
1036 	sq = sleepq_lookup(wchan);
1037 	/*
1038 	 * We can not lock the thread here as it may be sleeping on a
1039 	 * different sleepq.  However, holding the sleepq lock for this
1040 	 * wchan can guarantee that we do not miss a wakeup for this
1041 	 * channel.  The asserts below will catch any false positives.
1042 	 */
1043 	if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
1044 		sleepq_release(wchan);
1045 		return;
1046 	}
1047 	/* Thread is asleep on sleep queue sq, so wake it up. */
1048 	thread_lock(td);
1049 	MPASS(sq != NULL);
1050 	MPASS(td->td_wchan == wchan);
1051 	wakeup_swapper = sleepq_resume_thread(sq, td, 0);
1052 	thread_unlock(td);
1053 	sleepq_release(wchan);
1054 	if (wakeup_swapper)
1055 		kick_proc0();
1056 }
1057 
1058 /*
1059  * Abort a thread as if an interrupt had occurred.  Only abort
1060  * interruptible waits (unfortunately it isn't safe to abort others).
1061  */
1062 int
1063 sleepq_abort(struct thread *td, int intrval)
1064 {
1065 	struct sleepqueue *sq;
1066 	void *wchan;
1067 
1068 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1069 	MPASS(TD_ON_SLEEPQ(td));
1070 	MPASS(td->td_flags & TDF_SINTR);
1071 	MPASS(intrval == EINTR || intrval == ERESTART);
1072 
1073 	/*
1074 	 * If the TDF_TIMEOUT flag is set, just leave. A
1075 	 * timeout is scheduled anyhow.
1076 	 */
1077 	if (td->td_flags & TDF_TIMEOUT)
1078 		return (0);
1079 
1080 	CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
1081 	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1082 	td->td_intrval = intrval;
1083 	td->td_flags |= TDF_SLEEPABORT;
1084 	/*
1085 	 * If the thread has not slept yet it will find the signal in
1086 	 * sleepq_catch_signals() and call sleepq_resume_thread.  Otherwise
1087 	 * we have to do it here.
1088 	 */
1089 	if (!TD_IS_SLEEPING(td))
1090 		return (0);
1091 	wchan = td->td_wchan;
1092 	MPASS(wchan != NULL);
1093 	sq = sleepq_lookup(wchan);
1094 	MPASS(sq != NULL);
1095 
1096 	/* Thread is asleep on sleep queue sq, so wake it up. */
1097 	return (sleepq_resume_thread(sq, td, 0));
1098 }
1099 
1100 void
1101 sleepq_chains_remove_matching(bool (*matches)(struct thread *))
1102 {
1103 	struct sleepqueue_chain *sc;
1104 	struct sleepqueue *sq;
1105 	int i, wakeup_swapper;
1106 
1107 	wakeup_swapper = 0;
1108 	for (sc = &sleepq_chains[0]; sc < sleepq_chains + SC_TABLESIZE; ++sc) {
1109 		if (LIST_EMPTY(&sc->sc_queues)) {
1110 			continue;
1111 		}
1112 		mtx_lock_spin(&sc->sc_lock);
1113 		LIST_FOREACH(sq, &sc->sc_queues, sq_hash) {
1114 			for (i = 0; i < NR_SLEEPQS; ++i) {
1115 				wakeup_swapper |= sleepq_remove_matching(sq, i,
1116 				    matches, 0);
1117 			}
1118 		}
1119 		mtx_unlock_spin(&sc->sc_lock);
1120 	}
1121 	if (wakeup_swapper) {
1122 		kick_proc0();
1123 	}
1124 }
1125 
1126 /*
1127  * Prints the stacks of all threads presently sleeping on wchan/queue to
1128  * the sbuf sb.  Sets count_stacks_printed to the number of stacks actually
1129  * printed.  Typically, this will equal the number of threads sleeping on the
1130  * queue, but may be less if sb overflowed before all stacks were printed.
1131  */
1132 #ifdef STACK
1133 int
1134 sleepq_sbuf_print_stacks(struct sbuf *sb, void *wchan, int queue,
1135     int *count_stacks_printed)
1136 {
1137 	struct thread *td, *td_next;
1138 	struct sleepqueue *sq;
1139 	struct stack **st;
1140 	struct sbuf **td_infos;
1141 	int i, stack_idx, error, stacks_to_allocate;
1142 	bool finished, partial_print;
1143 
1144 	error = 0;
1145 	finished = false;
1146 	partial_print = false;
1147 
1148 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
1149 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
1150 
1151 	stacks_to_allocate = 10;
1152 	for (i = 0; i < 3 && !finished ; i++) {
1153 		/* We cannot malloc while holding the queue's spinlock, so
1154 		 * we do our mallocs now, and hope it is enough.  If it
1155 		 * isn't, we will free these, drop the lock, malloc more,
1156 		 * and try again, up to a point.  After that point we will
1157 		 * give up and report ENOMEM. We also cannot write to sb
1158 		 * during this time since the client may have set the
1159 		 * SBUF_AUTOEXTEND flag on their sbuf, which could cause a
1160 		 * malloc as we print to it.  So we defer actually printing
1161 		 * to sb until after we drop the spinlock.
1162 		 */
1163 
1164 		/* Where we will store the stacks. */
1165 		st = malloc(sizeof(struct stack *) * stacks_to_allocate,
1166 		    M_TEMP, M_WAITOK);
1167 		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1168 		    stack_idx++)
1169 			st[stack_idx] = stack_create(M_WAITOK);
1170 
1171 		/* Where we will store the td name, tid, etc. */
1172 		td_infos = malloc(sizeof(struct sbuf *) * stacks_to_allocate,
1173 		    M_TEMP, M_WAITOK);
1174 		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1175 		    stack_idx++)
1176 			td_infos[stack_idx] = sbuf_new(NULL, NULL,
1177 			    MAXCOMLEN + sizeof(struct thread *) * 2 + 40,
1178 			    SBUF_FIXEDLEN);
1179 
1180 		sleepq_lock(wchan);
1181 		sq = sleepq_lookup(wchan);
1182 		if (sq == NULL) {
1183 			/* This sleepq does not exist; exit and return ENOENT. */
1184 			error = ENOENT;
1185 			finished = true;
1186 			sleepq_release(wchan);
1187 			goto loop_end;
1188 		}
1189 
1190 		stack_idx = 0;
1191 		/* Save thread info */
1192 		TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq,
1193 		    td_next) {
1194 			if (stack_idx >= stacks_to_allocate)
1195 				goto loop_end;
1196 
1197 			/* Note the td_lock is equal to the sleepq_lock here. */
1198 			stack_save_td(st[stack_idx], td);
1199 
1200 			sbuf_printf(td_infos[stack_idx], "%d: %s %p",
1201 			    td->td_tid, td->td_name, td);
1202 
1203 			++stack_idx;
1204 		}
1205 
1206 		finished = true;
1207 		sleepq_release(wchan);
1208 
1209 		/* Print the stacks */
1210 		for (i = 0; i < stack_idx; i++) {
1211 			sbuf_finish(td_infos[i]);
1212 			sbuf_printf(sb, "--- thread %s: ---\n", sbuf_data(td_infos[i]));
1213 			stack_sbuf_print(sb, st[i]);
1214 			sbuf_printf(sb, "\n");
1215 
1216 			error = sbuf_error(sb);
1217 			if (error == 0)
1218 				*count_stacks_printed = stack_idx;
1219 		}
1220 
1221 loop_end:
1222 		if (!finished)
1223 			sleepq_release(wchan);
1224 		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1225 		    stack_idx++)
1226 			stack_destroy(st[stack_idx]);
1227 		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1228 		    stack_idx++)
1229 			sbuf_delete(td_infos[stack_idx]);
1230 		free(st, M_TEMP);
1231 		free(td_infos, M_TEMP);
1232 		stacks_to_allocate *= 10;
1233 	}
1234 
1235 	if (!finished && error == 0)
1236 		error = ENOMEM;
1237 
1238 	return (error);
1239 }
1240 #endif
1241 
1242 #ifdef SLEEPQUEUE_PROFILING
1243 #define	SLEEPQ_PROF_LOCATIONS	1024
1244 #define	SLEEPQ_SBUFSIZE		512
1245 struct sleepq_prof {
1246 	LIST_ENTRY(sleepq_prof) sp_link;
1247 	const char	*sp_wmesg;
1248 	long		sp_count;
1249 };
1250 
1251 LIST_HEAD(sqphead, sleepq_prof);
1252 
1253 struct sqphead sleepq_prof_free;
1254 struct sqphead sleepq_hash[SC_TABLESIZE];
1255 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1256 static struct mtx sleepq_prof_lock;
1257 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1258 
1259 static void
1260 sleepq_profile(const char *wmesg)
1261 {
1262 	struct sleepq_prof *sp;
1263 
1264 	mtx_lock_spin(&sleepq_prof_lock);
1265 	if (prof_enabled == 0)
1266 		goto unlock;
1267 	LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link)
1268 		if (sp->sp_wmesg == wmesg)
1269 			goto done;
1270 	sp = LIST_FIRST(&sleepq_prof_free);
1271 	if (sp == NULL)
1272 		goto unlock;
1273 	sp->sp_wmesg = wmesg;
1274 	LIST_REMOVE(sp, sp_link);
1275 	LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link);
1276 done:
1277 	sp->sp_count++;
1278 unlock:
1279 	mtx_unlock_spin(&sleepq_prof_lock);
1280 	return;
1281 }
1282 
1283 static void
1284 sleepq_prof_reset(void)
1285 {
1286 	struct sleepq_prof *sp;
1287 	int enabled;
1288 	int i;
1289 
1290 	mtx_lock_spin(&sleepq_prof_lock);
1291 	enabled = prof_enabled;
1292 	prof_enabled = 0;
1293 	for (i = 0; i < SC_TABLESIZE; i++)
1294 		LIST_INIT(&sleepq_hash[i]);
1295 	LIST_INIT(&sleepq_prof_free);
1296 	for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1297 		sp = &sleepq_profent[i];
1298 		sp->sp_wmesg = NULL;
1299 		sp->sp_count = 0;
1300 		LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1301 	}
1302 	prof_enabled = enabled;
1303 	mtx_unlock_spin(&sleepq_prof_lock);
1304 }
1305 
1306 static int
1307 enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1308 {
1309 	int error, v;
1310 
1311 	v = prof_enabled;
1312 	error = sysctl_handle_int(oidp, &v, v, req);
1313 	if (error)
1314 		return (error);
1315 	if (req->newptr == NULL)
1316 		return (error);
1317 	if (v == prof_enabled)
1318 		return (0);
1319 	if (v == 1)
1320 		sleepq_prof_reset();
1321 	mtx_lock_spin(&sleepq_prof_lock);
1322 	prof_enabled = !!v;
1323 	mtx_unlock_spin(&sleepq_prof_lock);
1324 
1325 	return (0);
1326 }
1327 
1328 static int
1329 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1330 {
1331 	int error, v;
1332 
1333 	v = 0;
1334 	error = sysctl_handle_int(oidp, &v, 0, req);
1335 	if (error)
1336 		return (error);
1337 	if (req->newptr == NULL)
1338 		return (error);
1339 	if (v == 0)
1340 		return (0);
1341 	sleepq_prof_reset();
1342 
1343 	return (0);
1344 }
1345 
1346 static int
1347 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1348 {
1349 	struct sleepq_prof *sp;
1350 	struct sbuf *sb;
1351 	int enabled;
1352 	int error;
1353 	int i;
1354 
1355 	error = sysctl_wire_old_buffer(req, 0);
1356 	if (error != 0)
1357 		return (error);
1358 	sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req);
1359 	sbuf_printf(sb, "\nwmesg\tcount\n");
1360 	enabled = prof_enabled;
1361 	mtx_lock_spin(&sleepq_prof_lock);
1362 	prof_enabled = 0;
1363 	mtx_unlock_spin(&sleepq_prof_lock);
1364 	for (i = 0; i < SC_TABLESIZE; i++) {
1365 		LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1366 			sbuf_printf(sb, "%s\t%ld\n",
1367 			    sp->sp_wmesg, sp->sp_count);
1368 		}
1369 	}
1370 	mtx_lock_spin(&sleepq_prof_lock);
1371 	prof_enabled = enabled;
1372 	mtx_unlock_spin(&sleepq_prof_lock);
1373 
1374 	error = sbuf_finish(sb);
1375 	sbuf_delete(sb);
1376 	return (error);
1377 }
1378 
1379 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
1380     NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics");
1381 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
1382     NULL, 0, reset_sleepq_prof_stats, "I",
1383     "Reset sleepqueue profiling statistics");
1384 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
1385     NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling");
1386 #endif
1387 
1388 #ifdef DDB
1389 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1390 {
1391 	struct sleepqueue_chain *sc;
1392 	struct sleepqueue *sq;
1393 #ifdef INVARIANTS
1394 	struct lock_object *lock;
1395 #endif
1396 	struct thread *td;
1397 	void *wchan;
1398 	int i;
1399 
1400 	if (!have_addr)
1401 		return;
1402 
1403 	/*
1404 	 * First, see if there is an active sleep queue for the wait channel
1405 	 * indicated by the address.
1406 	 */
1407 	wchan = (void *)addr;
1408 	sc = SC_LOOKUP(wchan);
1409 	LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1410 		if (sq->sq_wchan == wchan)
1411 			goto found;
1412 
1413 	/*
1414 	 * Second, see if there is an active sleep queue at the address
1415 	 * indicated.
1416 	 */
1417 	for (i = 0; i < SC_TABLESIZE; i++)
1418 		LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
1419 			if (sq == (struct sleepqueue *)addr)
1420 				goto found;
1421 		}
1422 
1423 	db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
1424 	return;
1425 found:
1426 	db_printf("Wait channel: %p\n", sq->sq_wchan);
1427 	db_printf("Queue type: %d\n", sq->sq_type);
1428 #ifdef INVARIANTS
1429 	if (sq->sq_lock) {
1430 		lock = sq->sq_lock;
1431 		db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1432 		    LOCK_CLASS(lock)->lc_name, lock->lo_name);
1433 	}
1434 #endif
1435 	db_printf("Blocked threads:\n");
1436 	for (i = 0; i < NR_SLEEPQS; i++) {
1437 		db_printf("\nQueue[%d]:\n", i);
1438 		if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1439 			db_printf("\tempty\n");
1440 		else
1441 			TAILQ_FOREACH(td, &sq->sq_blocked[i],
1442 				      td_slpq) {
1443 				db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
1444 					  td->td_tid, td->td_proc->p_pid,
1445 					  td->td_name);
1446 			}
1447 		db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);
1448 	}
1449 }
1450 
1451 /* Alias 'show sleepqueue' to 'show sleepq'. */
1452 DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue);
1453 #endif
1454