xref: /freebsd/sys/kern/subr_sleepqueue.c (revision 1456f0f9681bbd7fdae7b683553f6c7491508c4e)
1 /*-
2  * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * Implementation of sleep queues used to hold queue of threads blocked on
29  * a wait channel.  Sleep queues different from turnstiles in that wait
30  * channels are not owned by anyone, so there is no priority propagation.
31  * Sleep queues can also provide a timeout and can also be interrupted by
32  * signals.  That said, there are several similarities between the turnstile
33  * and sleep queue implementations.  (Note: turnstiles were implemented
34  * first.)  For example, both use a hash table of the same size where each
35  * bucket is referred to as a "chain" that contains both a spin lock and
36  * a linked list of queues.  An individual queue is located by using a hash
37  * to pick a chain, locking the chain, and then walking the chain searching
38  * for the queue.  This means that a wait channel object does not need to
39  * embed it's queue head just as locks do not embed their turnstile queue
40  * head.  Threads also carry around a sleep queue that they lend to the
41  * wait channel when blocking.  Just as in turnstiles, the queue includes
42  * a free list of the sleep queues of other threads blocked on the same
43  * wait channel in the case of multiple waiters.
44  *
45  * Some additional functionality provided by sleep queues include the
46  * ability to set a timeout.  The timeout is managed using a per-thread
47  * callout that resumes a thread if it is asleep.  A thread may also
48  * catch signals while it is asleep (aka an interruptible sleep).  The
49  * signal code uses sleepq_abort() to interrupt a sleeping thread.  Finally,
50  * sleep queues also provide some extra assertions.  One is not allowed to
51  * mix the sleep/wakeup and cv APIs for a given wait channel.  Also, one
52  * must consistently use the same lock to synchronize with a wait channel,
53  * though this check is currently only a warning for sleep/wakeup due to
54  * pre-existing abuse of that API.  The same lock must also be held when
55  * awakening threads, though that is currently only enforced for condition
56  * variables.
57  */
58 
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61 
62 #include "opt_sleepqueue_profiling.h"
63 #include "opt_ddb.h"
64 #include "opt_sched.h"
65 #include "opt_stack.h"
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/lock.h>
70 #include <sys/kernel.h>
71 #include <sys/ktr.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/sbuf.h>
75 #include <sys/sched.h>
76 #include <sys/sdt.h>
77 #include <sys/signalvar.h>
78 #include <sys/sleepqueue.h>
79 #include <sys/stack.h>
80 #include <sys/sysctl.h>
81 
82 #include <vm/uma.h>
83 
84 #ifdef DDB
85 #include <ddb/ddb.h>
86 #endif
87 
88 
89 /*
90  * Constants for the hash table of sleep queue chains.
91  * SC_TABLESIZE must be a power of two for SC_MASK to work properly.
92  */
93 #define	SC_TABLESIZE	256			/* Must be power of 2. */
94 #define	SC_MASK		(SC_TABLESIZE - 1)
95 #define	SC_SHIFT	8
96 #define	SC_HASH(wc)	((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \
97 			    SC_MASK)
98 #define	SC_LOOKUP(wc)	&sleepq_chains[SC_HASH(wc)]
99 #define NR_SLEEPQS      2
100 /*
101  * There two different lists of sleep queues.  Both lists are connected
102  * via the sq_hash entries.  The first list is the sleep queue chain list
103  * that a sleep queue is on when it is attached to a wait channel.  The
104  * second list is the free list hung off of a sleep queue that is attached
105  * to a wait channel.
106  *
107  * Each sleep queue also contains the wait channel it is attached to, the
108  * list of threads blocked on that wait channel, flags specific to the
109  * wait channel, and the lock used to synchronize with a wait channel.
110  * The flags are used to catch mismatches between the various consumers
111  * of the sleep queue API (e.g. sleep/wakeup and condition variables).
112  * The lock pointer is only used when invariants are enabled for various
113  * debugging checks.
114  *
115  * Locking key:
116  *  c - sleep queue chain lock
117  */
118 struct sleepqueue {
119 	TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS];	/* (c) Blocked threads. */
120 	u_int sq_blockedcnt[NR_SLEEPQS];	/* (c) N. of blocked threads. */
121 	LIST_ENTRY(sleepqueue) sq_hash;		/* (c) Chain and free list. */
122 	LIST_HEAD(, sleepqueue) sq_free;	/* (c) Free queues. */
123 	void	*sq_wchan;			/* (c) Wait channel. */
124 	int	sq_type;			/* (c) Queue type. */
125 #ifdef INVARIANTS
126 	struct lock_object *sq_lock;		/* (c) Associated lock. */
127 #endif
128 };
129 
130 struct sleepqueue_chain {
131 	LIST_HEAD(, sleepqueue) sc_queues;	/* List of sleep queues. */
132 	struct mtx sc_lock;			/* Spin lock for this chain. */
133 #ifdef SLEEPQUEUE_PROFILING
134 	u_int	sc_depth;			/* Length of sc_queues. */
135 	u_int	sc_max_depth;			/* Max length of sc_queues. */
136 #endif
137 };
138 
139 #ifdef SLEEPQUEUE_PROFILING
140 u_int sleepq_max_depth;
141 static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
142 static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
143     "sleepq chain stats");
144 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
145     0, "maxmimum depth achieved of a single chain");
146 
147 static void	sleepq_profile(const char *wmesg);
148 static int	prof_enabled;
149 #endif
150 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
151 static uma_zone_t sleepq_zone;
152 
153 /*
154  * Prototypes for non-exported routines.
155  */
156 static int	sleepq_catch_signals(void *wchan, int pri);
157 static int	sleepq_check_signals(void);
158 static int	sleepq_check_timeout(void);
159 #ifdef INVARIANTS
160 static void	sleepq_dtor(void *mem, int size, void *arg);
161 #endif
162 static int	sleepq_init(void *mem, int size, int flags);
163 static int	sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
164 		    int pri);
165 static void	sleepq_switch(void *wchan, int pri);
166 static void	sleepq_timeout(void *arg);
167 
168 SDT_PROBE_DECLARE(sched, , , sleep);
169 SDT_PROBE_DECLARE(sched, , , wakeup);
170 
171 /*
172  * Initialize SLEEPQUEUE_PROFILING specific sysctl nodes.
173  * Note that it must happen after sleepinit() has been fully executed, so
174  * it must happen after SI_SUB_KMEM SYSINIT() subsystem setup.
175  */
176 #ifdef SLEEPQUEUE_PROFILING
177 static void
178 init_sleepqueue_profiling(void)
179 {
180 	char chain_name[10];
181 	struct sysctl_oid *chain_oid;
182 	u_int i;
183 
184 	for (i = 0; i < SC_TABLESIZE; i++) {
185 		snprintf(chain_name, sizeof(chain_name), "%u", i);
186 		chain_oid = SYSCTL_ADD_NODE(NULL,
187 		    SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
188 		    chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
189 		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
190 		    "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
191 		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
192 		    "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
193 		    NULL);
194 	}
195 }
196 
197 SYSINIT(sleepqueue_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
198     init_sleepqueue_profiling, NULL);
199 #endif
200 
201 /*
202  * Early initialization of sleep queues that is called from the sleepinit()
203  * SYSINIT.
204  */
205 void
206 init_sleepqueues(void)
207 {
208 	int i;
209 
210 	for (i = 0; i < SC_TABLESIZE; i++) {
211 		LIST_INIT(&sleepq_chains[i].sc_queues);
212 		mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
213 		    MTX_SPIN | MTX_RECURSE);
214 	}
215 	sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
216 #ifdef INVARIANTS
217 	    NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
218 #else
219 	    NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
220 #endif
221 
222 	thread0.td_sleepqueue = sleepq_alloc();
223 }
224 
225 /*
226  * Get a sleep queue for a new thread.
227  */
228 struct sleepqueue *
229 sleepq_alloc(void)
230 {
231 
232 	return (uma_zalloc(sleepq_zone, M_WAITOK));
233 }
234 
235 /*
236  * Free a sleep queue when a thread is destroyed.
237  */
238 void
239 sleepq_free(struct sleepqueue *sq)
240 {
241 
242 	uma_zfree(sleepq_zone, sq);
243 }
244 
245 /*
246  * Lock the sleep queue chain associated with the specified wait channel.
247  */
248 void
249 sleepq_lock(void *wchan)
250 {
251 	struct sleepqueue_chain *sc;
252 
253 	sc = SC_LOOKUP(wchan);
254 	mtx_lock_spin(&sc->sc_lock);
255 }
256 
257 /*
258  * Look up the sleep queue associated with a given wait channel in the hash
259  * table locking the associated sleep queue chain.  If no queue is found in
260  * the table, NULL is returned.
261  */
262 struct sleepqueue *
263 sleepq_lookup(void *wchan)
264 {
265 	struct sleepqueue_chain *sc;
266 	struct sleepqueue *sq;
267 
268 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
269 	sc = SC_LOOKUP(wchan);
270 	mtx_assert(&sc->sc_lock, MA_OWNED);
271 	LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
272 		if (sq->sq_wchan == wchan)
273 			return (sq);
274 	return (NULL);
275 }
276 
277 /*
278  * Unlock the sleep queue chain associated with a given wait channel.
279  */
280 void
281 sleepq_release(void *wchan)
282 {
283 	struct sleepqueue_chain *sc;
284 
285 	sc = SC_LOOKUP(wchan);
286 	mtx_unlock_spin(&sc->sc_lock);
287 }
288 
289 /*
290  * Places the current thread on the sleep queue for the specified wait
291  * channel.  If INVARIANTS is enabled, then it associates the passed in
292  * lock with the sleepq to make sure it is held when that sleep queue is
293  * woken up.
294  */
295 void
296 sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
297     int queue)
298 {
299 	struct sleepqueue_chain *sc;
300 	struct sleepqueue *sq;
301 	struct thread *td;
302 
303 	td = curthread;
304 	sc = SC_LOOKUP(wchan);
305 	mtx_assert(&sc->sc_lock, MA_OWNED);
306 	MPASS(td->td_sleepqueue != NULL);
307 	MPASS(wchan != NULL);
308 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
309 
310 	/* If this thread is not allowed to sleep, die a horrible death. */
311 	KASSERT(td->td_no_sleeping == 0,
312 	    ("%s: td %p to sleep on wchan %p with sleeping prohibited",
313 	    __func__, td, wchan));
314 
315 	/* Look up the sleep queue associated with the wait channel 'wchan'. */
316 	sq = sleepq_lookup(wchan);
317 
318 	/*
319 	 * If the wait channel does not already have a sleep queue, use
320 	 * this thread's sleep queue.  Otherwise, insert the current thread
321 	 * into the sleep queue already in use by this wait channel.
322 	 */
323 	if (sq == NULL) {
324 #ifdef INVARIANTS
325 		int i;
326 
327 		sq = td->td_sleepqueue;
328 		for (i = 0; i < NR_SLEEPQS; i++) {
329 			KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
330 			    ("thread's sleep queue %d is not empty", i));
331 			KASSERT(sq->sq_blockedcnt[i] == 0,
332 			    ("thread's sleep queue %d count mismatches", i));
333 		}
334 		KASSERT(LIST_EMPTY(&sq->sq_free),
335 		    ("thread's sleep queue has a non-empty free list"));
336 		KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
337 		sq->sq_lock = lock;
338 #endif
339 #ifdef SLEEPQUEUE_PROFILING
340 		sc->sc_depth++;
341 		if (sc->sc_depth > sc->sc_max_depth) {
342 			sc->sc_max_depth = sc->sc_depth;
343 			if (sc->sc_max_depth > sleepq_max_depth)
344 				sleepq_max_depth = sc->sc_max_depth;
345 		}
346 #endif
347 		sq = td->td_sleepqueue;
348 		LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
349 		sq->sq_wchan = wchan;
350 		sq->sq_type = flags & SLEEPQ_TYPE;
351 	} else {
352 		MPASS(wchan == sq->sq_wchan);
353 		MPASS(lock == sq->sq_lock);
354 		MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
355 		LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
356 	}
357 	thread_lock(td);
358 	TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
359 	sq->sq_blockedcnt[queue]++;
360 	td->td_sleepqueue = NULL;
361 	td->td_sqqueue = queue;
362 	td->td_wchan = wchan;
363 	td->td_wmesg = wmesg;
364 	if (flags & SLEEPQ_INTERRUPTIBLE) {
365 		td->td_flags |= TDF_SINTR;
366 		td->td_flags &= ~TDF_SLEEPABORT;
367 	}
368 	thread_unlock(td);
369 }
370 
371 /*
372  * Sets a timeout that will remove the current thread from the specified
373  * sleep queue after timo ticks if the thread has not already been awakened.
374  */
375 void
376 sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr,
377     int flags)
378 {
379 	struct sleepqueue_chain *sc;
380 	struct thread *td;
381 	sbintime_t pr1;
382 
383 	td = curthread;
384 	sc = SC_LOOKUP(wchan);
385 	mtx_assert(&sc->sc_lock, MA_OWNED);
386 	MPASS(TD_ON_SLEEPQ(td));
387 	MPASS(td->td_sleepqueue == NULL);
388 	MPASS(wchan != NULL);
389 	if (cold && td == &thread0)
390 		panic("timed sleep before timers are working");
391 	KASSERT(td->td_sleeptimo == 0, ("td %d %p td_sleeptimo %jx",
392 	    td->td_tid, td, (uintmax_t)td->td_sleeptimo));
393 	thread_lock(td);
394 	callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1);
395 	thread_unlock(td);
396 	callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1,
397 	    sleepq_timeout, td, PCPU_GET(cpuid), flags | C_PRECALC |
398 	    C_DIRECT_EXEC);
399 }
400 
401 /*
402  * Return the number of actual sleepers for the specified queue.
403  */
404 u_int
405 sleepq_sleepcnt(void *wchan, int queue)
406 {
407 	struct sleepqueue *sq;
408 
409 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
410 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
411 	sq = sleepq_lookup(wchan);
412 	if (sq == NULL)
413 		return (0);
414 	return (sq->sq_blockedcnt[queue]);
415 }
416 
417 /*
418  * Marks the pending sleep of the current thread as interruptible and
419  * makes an initial check for pending signals before putting a thread
420  * to sleep. Enters and exits with the thread lock held.  Thread lock
421  * may have transitioned from the sleepq lock to a run lock.
422  */
423 static int
424 sleepq_catch_signals(void *wchan, int pri)
425 {
426 	struct sleepqueue_chain *sc;
427 	struct sleepqueue *sq;
428 	struct thread *td;
429 	struct proc *p;
430 	struct sigacts *ps;
431 	int sig, ret;
432 
433 	td = curthread;
434 	p = curproc;
435 	sc = SC_LOOKUP(wchan);
436 	mtx_assert(&sc->sc_lock, MA_OWNED);
437 	MPASS(wchan != NULL);
438 	if ((td->td_pflags & TDP_WAKEUP) != 0) {
439 		td->td_pflags &= ~TDP_WAKEUP;
440 		ret = EINTR;
441 		thread_lock(td);
442 		goto out;
443 	}
444 
445 	/*
446 	 * See if there are any pending signals for this thread.  If not
447 	 * we can switch immediately.  Otherwise do the signal processing
448 	 * directly.
449 	 */
450 	thread_lock(td);
451 	if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0) {
452 		sleepq_switch(wchan, pri);
453 		return (0);
454 	}
455 	thread_unlock(td);
456 	mtx_unlock_spin(&sc->sc_lock);
457 	CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
458 		(void *)td, (long)p->p_pid, td->td_name);
459 	PROC_LOCK(p);
460 	ps = p->p_sigacts;
461 	mtx_lock(&ps->ps_mtx);
462 	sig = cursig(td);
463 	if (sig == -1) {
464 		mtx_unlock(&ps->ps_mtx);
465 		KASSERT((td->td_flags & TDF_SBDRY) != 0, ("lost TDF_SBDRY"));
466 		KASSERT(TD_SBDRY_INTR(td),
467 		    ("lost TDF_SERESTART of TDF_SEINTR"));
468 		KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
469 		    (TDF_SEINTR | TDF_SERESTART),
470 		    ("both TDF_SEINTR and TDF_SERESTART"));
471 		ret = TD_SBDRY_ERRNO(td);
472 	} else if (sig == 0) {
473 		mtx_unlock(&ps->ps_mtx);
474 		ret = thread_suspend_check(1);
475 		MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
476 	} else {
477 		if (SIGISMEMBER(ps->ps_sigintr, sig))
478 			ret = EINTR;
479 		else
480 			ret = ERESTART;
481 		mtx_unlock(&ps->ps_mtx);
482 	}
483 	/*
484 	 * Lock the per-process spinlock prior to dropping the PROC_LOCK
485 	 * to avoid a signal delivery race.  PROC_LOCK, PROC_SLOCK, and
486 	 * thread_lock() are currently held in tdsendsignal().
487 	 */
488 	PROC_SLOCK(p);
489 	mtx_lock_spin(&sc->sc_lock);
490 	PROC_UNLOCK(p);
491 	thread_lock(td);
492 	PROC_SUNLOCK(p);
493 	if (ret == 0) {
494 		sleepq_switch(wchan, pri);
495 		return (0);
496 	}
497 out:
498 	/*
499 	 * There were pending signals and this thread is still
500 	 * on the sleep queue, remove it from the sleep queue.
501 	 */
502 	if (TD_ON_SLEEPQ(td)) {
503 		sq = sleepq_lookup(wchan);
504 		if (sleepq_resume_thread(sq, td, 0)) {
505 #ifdef INVARIANTS
506 			/*
507 			 * This thread hasn't gone to sleep yet, so it
508 			 * should not be swapped out.
509 			 */
510 			panic("not waking up swapper");
511 #endif
512 		}
513 	}
514 	mtx_unlock_spin(&sc->sc_lock);
515 	MPASS(td->td_lock != &sc->sc_lock);
516 	return (ret);
517 }
518 
519 /*
520  * Switches to another thread if we are still asleep on a sleep queue.
521  * Returns with thread lock.
522  */
523 static void
524 sleepq_switch(void *wchan, int pri)
525 {
526 	struct sleepqueue_chain *sc;
527 	struct sleepqueue *sq;
528 	struct thread *td;
529 
530 	td = curthread;
531 	sc = SC_LOOKUP(wchan);
532 	mtx_assert(&sc->sc_lock, MA_OWNED);
533 	THREAD_LOCK_ASSERT(td, MA_OWNED);
534 
535 	/*
536 	 * If we have a sleep queue, then we've already been woken up, so
537 	 * just return.
538 	 */
539 	if (td->td_sleepqueue != NULL) {
540 		mtx_unlock_spin(&sc->sc_lock);
541 		return;
542 	}
543 
544 	/*
545 	 * If TDF_TIMEOUT is set, then our sleep has been timed out
546 	 * already but we are still on the sleep queue, so dequeue the
547 	 * thread and return.
548 	 */
549 	if (td->td_flags & TDF_TIMEOUT) {
550 		MPASS(TD_ON_SLEEPQ(td));
551 		sq = sleepq_lookup(wchan);
552 		if (sleepq_resume_thread(sq, td, 0)) {
553 #ifdef INVARIANTS
554 			/*
555 			 * This thread hasn't gone to sleep yet, so it
556 			 * should not be swapped out.
557 			 */
558 			panic("not waking up swapper");
559 #endif
560 		}
561 		mtx_unlock_spin(&sc->sc_lock);
562 		return;
563 	}
564 #ifdef SLEEPQUEUE_PROFILING
565 	if (prof_enabled)
566 		sleepq_profile(td->td_wmesg);
567 #endif
568 	MPASS(td->td_sleepqueue == NULL);
569 	sched_sleep(td, pri);
570 	thread_lock_set(td, &sc->sc_lock);
571 	SDT_PROBE0(sched, , , sleep);
572 	TD_SET_SLEEPING(td);
573 	mi_switch(SW_VOL | SWT_SLEEPQ, NULL);
574 	KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
575 	CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
576 	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
577 }
578 
579 /*
580  * Check to see if we timed out.
581  */
582 static int
583 sleepq_check_timeout(void)
584 {
585 	struct thread *td;
586 	int res;
587 
588 	td = curthread;
589 	THREAD_LOCK_ASSERT(td, MA_OWNED);
590 
591 	/*
592 	 * If TDF_TIMEOUT is set, we timed out.  But recheck
593 	 * td_sleeptimo anyway.
594 	 */
595 	res = 0;
596 	if (td->td_sleeptimo != 0) {
597 		if (td->td_sleeptimo <= sbinuptime())
598 			res = EWOULDBLOCK;
599 		td->td_sleeptimo = 0;
600 	}
601 	if (td->td_flags & TDF_TIMEOUT)
602 		td->td_flags &= ~TDF_TIMEOUT;
603 	else
604 		/*
605 		 * We ignore the situation where timeout subsystem was
606 		 * unable to stop our callout.  The struct thread is
607 		 * type-stable, the callout will use the correct
608 		 * memory when running.  The checks of the
609 		 * td_sleeptimo value in this function and in
610 		 * sleepq_timeout() ensure that the thread does not
611 		 * get spurious wakeups, even if the callout was reset
612 		 * or thread reused.
613 		 */
614 		callout_stop(&td->td_slpcallout);
615 	return (res);
616 }
617 
618 /*
619  * Check to see if we were awoken by a signal.
620  */
621 static int
622 sleepq_check_signals(void)
623 {
624 	struct thread *td;
625 
626 	td = curthread;
627 	THREAD_LOCK_ASSERT(td, MA_OWNED);
628 
629 	/* We are no longer in an interruptible sleep. */
630 	if (td->td_flags & TDF_SINTR)
631 		td->td_flags &= ~TDF_SINTR;
632 
633 	if (td->td_flags & TDF_SLEEPABORT) {
634 		td->td_flags &= ~TDF_SLEEPABORT;
635 		return (td->td_intrval);
636 	}
637 
638 	return (0);
639 }
640 
641 /*
642  * Block the current thread until it is awakened from its sleep queue.
643  */
644 void
645 sleepq_wait(void *wchan, int pri)
646 {
647 	struct thread *td;
648 
649 	td = curthread;
650 	MPASS(!(td->td_flags & TDF_SINTR));
651 	thread_lock(td);
652 	sleepq_switch(wchan, pri);
653 	thread_unlock(td);
654 }
655 
656 /*
657  * Block the current thread until it is awakened from its sleep queue
658  * or it is interrupted by a signal.
659  */
660 int
661 sleepq_wait_sig(void *wchan, int pri)
662 {
663 	int rcatch;
664 	int rval;
665 
666 	rcatch = sleepq_catch_signals(wchan, pri);
667 	rval = sleepq_check_signals();
668 	thread_unlock(curthread);
669 	if (rcatch)
670 		return (rcatch);
671 	return (rval);
672 }
673 
674 /*
675  * Block the current thread until it is awakened from its sleep queue
676  * or it times out while waiting.
677  */
678 int
679 sleepq_timedwait(void *wchan, int pri)
680 {
681 	struct thread *td;
682 	int rval;
683 
684 	td = curthread;
685 	MPASS(!(td->td_flags & TDF_SINTR));
686 	thread_lock(td);
687 	sleepq_switch(wchan, pri);
688 	rval = sleepq_check_timeout();
689 	thread_unlock(td);
690 
691 	return (rval);
692 }
693 
694 /*
695  * Block the current thread until it is awakened from its sleep queue,
696  * it is interrupted by a signal, or it times out waiting to be awakened.
697  */
698 int
699 sleepq_timedwait_sig(void *wchan, int pri)
700 {
701 	int rcatch, rvalt, rvals;
702 
703 	rcatch = sleepq_catch_signals(wchan, pri);
704 	rvalt = sleepq_check_timeout();
705 	rvals = sleepq_check_signals();
706 	thread_unlock(curthread);
707 	if (rcatch)
708 		return (rcatch);
709 	if (rvals)
710 		return (rvals);
711 	return (rvalt);
712 }
713 
714 /*
715  * Returns the type of sleepqueue given a waitchannel.
716  */
717 int
718 sleepq_type(void *wchan)
719 {
720 	struct sleepqueue *sq;
721 	int type;
722 
723 	MPASS(wchan != NULL);
724 
725 	sleepq_lock(wchan);
726 	sq = sleepq_lookup(wchan);
727 	if (sq == NULL) {
728 		sleepq_release(wchan);
729 		return (-1);
730 	}
731 	type = sq->sq_type;
732 	sleepq_release(wchan);
733 	return (type);
734 }
735 
736 /*
737  * Removes a thread from a sleep queue and makes it
738  * runnable.
739  */
740 static int
741 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
742 {
743 	struct sleepqueue_chain *sc;
744 
745 	MPASS(td != NULL);
746 	MPASS(sq->sq_wchan != NULL);
747 	MPASS(td->td_wchan == sq->sq_wchan);
748 	MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
749 	THREAD_LOCK_ASSERT(td, MA_OWNED);
750 	sc = SC_LOOKUP(sq->sq_wchan);
751 	mtx_assert(&sc->sc_lock, MA_OWNED);
752 
753 	SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
754 
755 	/* Remove the thread from the queue. */
756 	sq->sq_blockedcnt[td->td_sqqueue]--;
757 	TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
758 
759 	/*
760 	 * Get a sleep queue for this thread.  If this is the last waiter,
761 	 * use the queue itself and take it out of the chain, otherwise,
762 	 * remove a queue from the free list.
763 	 */
764 	if (LIST_EMPTY(&sq->sq_free)) {
765 		td->td_sleepqueue = sq;
766 #ifdef INVARIANTS
767 		sq->sq_wchan = NULL;
768 #endif
769 #ifdef SLEEPQUEUE_PROFILING
770 		sc->sc_depth--;
771 #endif
772 	} else
773 		td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
774 	LIST_REMOVE(td->td_sleepqueue, sq_hash);
775 
776 	td->td_wmesg = NULL;
777 	td->td_wchan = NULL;
778 	td->td_flags &= ~TDF_SINTR;
779 
780 	CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
781 	    (void *)td, (long)td->td_proc->p_pid, td->td_name);
782 
783 	/* Adjust priority if requested. */
784 	MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
785 	if (pri != 0 && td->td_priority > pri &&
786 	    PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
787 		sched_prio(td, pri);
788 
789 	/*
790 	 * Note that thread td might not be sleeping if it is running
791 	 * sleepq_catch_signals() on another CPU or is blocked on its
792 	 * proc lock to check signals.  There's no need to mark the
793 	 * thread runnable in that case.
794 	 */
795 	if (TD_IS_SLEEPING(td)) {
796 		TD_CLR_SLEEPING(td);
797 		return (setrunnable(td));
798 	}
799 	return (0);
800 }
801 
802 #ifdef INVARIANTS
803 /*
804  * UMA zone item deallocator.
805  */
806 static void
807 sleepq_dtor(void *mem, int size, void *arg)
808 {
809 	struct sleepqueue *sq;
810 	int i;
811 
812 	sq = mem;
813 	for (i = 0; i < NR_SLEEPQS; i++) {
814 		MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
815 		MPASS(sq->sq_blockedcnt[i] == 0);
816 	}
817 }
818 #endif
819 
820 /*
821  * UMA zone item initializer.
822  */
823 static int
824 sleepq_init(void *mem, int size, int flags)
825 {
826 	struct sleepqueue *sq;
827 	int i;
828 
829 	bzero(mem, size);
830 	sq = mem;
831 	for (i = 0; i < NR_SLEEPQS; i++) {
832 		TAILQ_INIT(&sq->sq_blocked[i]);
833 		sq->sq_blockedcnt[i] = 0;
834 	}
835 	LIST_INIT(&sq->sq_free);
836 	return (0);
837 }
838 
839 /*
840  * Find the highest priority thread sleeping on a wait channel and resume it.
841  */
842 int
843 sleepq_signal(void *wchan, int flags, int pri, int queue)
844 {
845 	struct sleepqueue *sq;
846 	struct thread *td, *besttd;
847 	int wakeup_swapper;
848 
849 	CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
850 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
851 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
852 	sq = sleepq_lookup(wchan);
853 	if (sq == NULL)
854 		return (0);
855 	KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
856 	    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
857 
858 	/*
859 	 * Find the highest priority thread on the queue.  If there is a
860 	 * tie, use the thread that first appears in the queue as it has
861 	 * been sleeping the longest since threads are always added to
862 	 * the tail of sleep queues.
863 	 */
864 	besttd = TAILQ_FIRST(&sq->sq_blocked[queue]);
865 	TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) {
866 		if (td->td_priority < besttd->td_priority)
867 			besttd = td;
868 	}
869 	MPASS(besttd != NULL);
870 	thread_lock(besttd);
871 	wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
872 	thread_unlock(besttd);
873 	return (wakeup_swapper);
874 }
875 
876 /*
877  * Resume all threads sleeping on a specified wait channel.
878  */
879 int
880 sleepq_broadcast(void *wchan, int flags, int pri, int queue)
881 {
882 	struct sleepqueue *sq;
883 	struct thread *td, *tdn;
884 	int wakeup_swapper;
885 
886 	CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
887 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
888 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
889 	sq = sleepq_lookup(wchan);
890 	if (sq == NULL)
891 		return (0);
892 	KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
893 	    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
894 
895 	/*
896 	 * Resume all blocked threads on the sleep queue.  The last thread will
897 	 * be given ownership of sq and may re-enqueue itself before
898 	 * sleepq_resume_thread() returns, so we must cache the "next" queue
899 	 * item at the beginning of the final iteration.
900 	 */
901 	wakeup_swapper = 0;
902 	TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
903 		thread_lock(td);
904 		wakeup_swapper |= sleepq_resume_thread(sq, td, pri);
905 		thread_unlock(td);
906 	}
907 	return (wakeup_swapper);
908 }
909 
910 /*
911  * Time sleeping threads out.  When the timeout expires, the thread is
912  * removed from the sleep queue and made runnable if it is still asleep.
913  */
914 static void
915 sleepq_timeout(void *arg)
916 {
917 	struct sleepqueue_chain *sc;
918 	struct sleepqueue *sq;
919 	struct thread *td;
920 	void *wchan;
921 	int wakeup_swapper;
922 
923 	td = arg;
924 	wakeup_swapper = 0;
925 	CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
926 	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
927 
928 	thread_lock(td);
929 
930 	if (td->td_sleeptimo > sbinuptime() || td->td_sleeptimo == 0) {
931 		/*
932 		 * The thread does not want a timeout (yet).
933 		 */
934 	} else if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
935 		/*
936 		 * See if the thread is asleep and get the wait
937 		 * channel if it is.
938 		 */
939 		wchan = td->td_wchan;
940 		sc = SC_LOOKUP(wchan);
941 		THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
942 		sq = sleepq_lookup(wchan);
943 		MPASS(sq != NULL);
944 		td->td_flags |= TDF_TIMEOUT;
945 		wakeup_swapper = sleepq_resume_thread(sq, td, 0);
946 	} else if (TD_ON_SLEEPQ(td)) {
947 		/*
948 		 * If the thread is on the SLEEPQ but isn't sleeping
949 		 * yet, it can either be on another CPU in between
950 		 * sleepq_add() and one of the sleepq_*wait*()
951 		 * routines or it can be in sleepq_catch_signals().
952 		 */
953 		td->td_flags |= TDF_TIMEOUT;
954 	}
955 
956 	thread_unlock(td);
957 	if (wakeup_swapper)
958 		kick_proc0();
959 }
960 
961 /*
962  * Resumes a specific thread from the sleep queue associated with a specific
963  * wait channel if it is on that queue.
964  */
965 void
966 sleepq_remove(struct thread *td, void *wchan)
967 {
968 	struct sleepqueue *sq;
969 	int wakeup_swapper;
970 
971 	/*
972 	 * Look up the sleep queue for this wait channel, then re-check
973 	 * that the thread is asleep on that channel, if it is not, then
974 	 * bail.
975 	 */
976 	MPASS(wchan != NULL);
977 	sleepq_lock(wchan);
978 	sq = sleepq_lookup(wchan);
979 	/*
980 	 * We can not lock the thread here as it may be sleeping on a
981 	 * different sleepq.  However, holding the sleepq lock for this
982 	 * wchan can guarantee that we do not miss a wakeup for this
983 	 * channel.  The asserts below will catch any false positives.
984 	 */
985 	if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
986 		sleepq_release(wchan);
987 		return;
988 	}
989 	/* Thread is asleep on sleep queue sq, so wake it up. */
990 	thread_lock(td);
991 	MPASS(sq != NULL);
992 	MPASS(td->td_wchan == wchan);
993 	wakeup_swapper = sleepq_resume_thread(sq, td, 0);
994 	thread_unlock(td);
995 	sleepq_release(wchan);
996 	if (wakeup_swapper)
997 		kick_proc0();
998 }
999 
1000 /*
1001  * Abort a thread as if an interrupt had occurred.  Only abort
1002  * interruptible waits (unfortunately it isn't safe to abort others).
1003  */
1004 int
1005 sleepq_abort(struct thread *td, int intrval)
1006 {
1007 	struct sleepqueue *sq;
1008 	void *wchan;
1009 
1010 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1011 	MPASS(TD_ON_SLEEPQ(td));
1012 	MPASS(td->td_flags & TDF_SINTR);
1013 	MPASS(intrval == EINTR || intrval == ERESTART);
1014 
1015 	/*
1016 	 * If the TDF_TIMEOUT flag is set, just leave. A
1017 	 * timeout is scheduled anyhow.
1018 	 */
1019 	if (td->td_flags & TDF_TIMEOUT)
1020 		return (0);
1021 
1022 	CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
1023 	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1024 	td->td_intrval = intrval;
1025 	td->td_flags |= TDF_SLEEPABORT;
1026 	/*
1027 	 * If the thread has not slept yet it will find the signal in
1028 	 * sleepq_catch_signals() and call sleepq_resume_thread.  Otherwise
1029 	 * we have to do it here.
1030 	 */
1031 	if (!TD_IS_SLEEPING(td))
1032 		return (0);
1033 	wchan = td->td_wchan;
1034 	MPASS(wchan != NULL);
1035 	sq = sleepq_lookup(wchan);
1036 	MPASS(sq != NULL);
1037 
1038 	/* Thread is asleep on sleep queue sq, so wake it up. */
1039 	return (sleepq_resume_thread(sq, td, 0));
1040 }
1041 
1042 /*
1043  * Prints the stacks of all threads presently sleeping on wchan/queue to
1044  * the sbuf sb.  Sets count_stacks_printed to the number of stacks actually
1045  * printed.  Typically, this will equal the number of threads sleeping on the
1046  * queue, but may be less if sb overflowed before all stacks were printed.
1047  */
1048 #ifdef STACK
1049 int
1050 sleepq_sbuf_print_stacks(struct sbuf *sb, void *wchan, int queue,
1051     int *count_stacks_printed)
1052 {
1053 	struct thread *td, *td_next;
1054 	struct sleepqueue *sq;
1055 	struct stack **st;
1056 	struct sbuf **td_infos;
1057 	int i, stack_idx, error, stacks_to_allocate;
1058 	bool finished, partial_print;
1059 
1060 	error = 0;
1061 	finished = false;
1062 	partial_print = false;
1063 
1064 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
1065 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
1066 
1067 	stacks_to_allocate = 10;
1068 	for (i = 0; i < 3 && !finished ; i++) {
1069 		/* We cannot malloc while holding the queue's spinlock, so
1070 		 * we do our mallocs now, and hope it is enough.  If it
1071 		 * isn't, we will free these, drop the lock, malloc more,
1072 		 * and try again, up to a point.  After that point we will
1073 		 * give up and report ENOMEM. We also cannot write to sb
1074 		 * during this time since the client may have set the
1075 		 * SBUF_AUTOEXTEND flag on their sbuf, which could cause a
1076 		 * malloc as we print to it.  So we defer actually printing
1077 		 * to sb until after we drop the spinlock.
1078 		 */
1079 
1080 		/* Where we will store the stacks. */
1081 		st = malloc(sizeof(struct stack *) * stacks_to_allocate,
1082 		    M_TEMP, M_WAITOK);
1083 		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1084 		    stack_idx++)
1085 			st[stack_idx] = stack_create();
1086 
1087 		/* Where we will store the td name, tid, etc. */
1088 		td_infos = malloc(sizeof(struct sbuf *) * stacks_to_allocate,
1089 		    M_TEMP, M_WAITOK);
1090 		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1091 		    stack_idx++)
1092 			td_infos[stack_idx] = sbuf_new(NULL, NULL,
1093 			    MAXCOMLEN + sizeof(struct thread *) * 2 + 40,
1094 			    SBUF_FIXEDLEN);
1095 
1096 		sleepq_lock(wchan);
1097 		sq = sleepq_lookup(wchan);
1098 		if (sq == NULL) {
1099 			/* This sleepq does not exist; exit and return ENOENT. */
1100 			error = ENOENT;
1101 			finished = true;
1102 			sleepq_release(wchan);
1103 			goto loop_end;
1104 		}
1105 
1106 		stack_idx = 0;
1107 		/* Save thread info */
1108 		TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq,
1109 		    td_next) {
1110 			if (stack_idx >= stacks_to_allocate)
1111 				goto loop_end;
1112 
1113 			/* Note the td_lock is equal to the sleepq_lock here. */
1114 			stack_save_td(st[stack_idx], td);
1115 
1116 			sbuf_printf(td_infos[stack_idx], "%d: %s %p",
1117 			    td->td_tid, td->td_name, td);
1118 
1119 			++stack_idx;
1120 		}
1121 
1122 		finished = true;
1123 		sleepq_release(wchan);
1124 
1125 		/* Print the stacks */
1126 		for (i = 0; i < stack_idx; i++) {
1127 			sbuf_finish(td_infos[i]);
1128 			sbuf_printf(sb, "--- thread %s: ---\n", sbuf_data(td_infos[i]));
1129 			stack_sbuf_print(sb, st[i]);
1130 			sbuf_printf(sb, "\n");
1131 
1132 			error = sbuf_error(sb);
1133 			if (error == 0)
1134 				*count_stacks_printed = stack_idx;
1135 		}
1136 
1137 loop_end:
1138 		if (!finished)
1139 			sleepq_release(wchan);
1140 		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1141 		    stack_idx++)
1142 			stack_destroy(st[stack_idx]);
1143 		for (stack_idx = 0; stack_idx < stacks_to_allocate;
1144 		    stack_idx++)
1145 			sbuf_delete(td_infos[stack_idx]);
1146 		free(st, M_TEMP);
1147 		free(td_infos, M_TEMP);
1148 		stacks_to_allocate *= 10;
1149 	}
1150 
1151 	if (!finished && error == 0)
1152 		error = ENOMEM;
1153 
1154 	return (error);
1155 }
1156 #endif
1157 
1158 #ifdef SLEEPQUEUE_PROFILING
1159 #define	SLEEPQ_PROF_LOCATIONS	1024
1160 #define	SLEEPQ_SBUFSIZE		512
1161 struct sleepq_prof {
1162 	LIST_ENTRY(sleepq_prof) sp_link;
1163 	const char	*sp_wmesg;
1164 	long		sp_count;
1165 };
1166 
1167 LIST_HEAD(sqphead, sleepq_prof);
1168 
1169 struct sqphead sleepq_prof_free;
1170 struct sqphead sleepq_hash[SC_TABLESIZE];
1171 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1172 static struct mtx sleepq_prof_lock;
1173 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1174 
1175 static void
1176 sleepq_profile(const char *wmesg)
1177 {
1178 	struct sleepq_prof *sp;
1179 
1180 	mtx_lock_spin(&sleepq_prof_lock);
1181 	if (prof_enabled == 0)
1182 		goto unlock;
1183 	LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link)
1184 		if (sp->sp_wmesg == wmesg)
1185 			goto done;
1186 	sp = LIST_FIRST(&sleepq_prof_free);
1187 	if (sp == NULL)
1188 		goto unlock;
1189 	sp->sp_wmesg = wmesg;
1190 	LIST_REMOVE(sp, sp_link);
1191 	LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link);
1192 done:
1193 	sp->sp_count++;
1194 unlock:
1195 	mtx_unlock_spin(&sleepq_prof_lock);
1196 	return;
1197 }
1198 
1199 static void
1200 sleepq_prof_reset(void)
1201 {
1202 	struct sleepq_prof *sp;
1203 	int enabled;
1204 	int i;
1205 
1206 	mtx_lock_spin(&sleepq_prof_lock);
1207 	enabled = prof_enabled;
1208 	prof_enabled = 0;
1209 	for (i = 0; i < SC_TABLESIZE; i++)
1210 		LIST_INIT(&sleepq_hash[i]);
1211 	LIST_INIT(&sleepq_prof_free);
1212 	for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1213 		sp = &sleepq_profent[i];
1214 		sp->sp_wmesg = NULL;
1215 		sp->sp_count = 0;
1216 		LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1217 	}
1218 	prof_enabled = enabled;
1219 	mtx_unlock_spin(&sleepq_prof_lock);
1220 }
1221 
1222 static int
1223 enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1224 {
1225 	int error, v;
1226 
1227 	v = prof_enabled;
1228 	error = sysctl_handle_int(oidp, &v, v, req);
1229 	if (error)
1230 		return (error);
1231 	if (req->newptr == NULL)
1232 		return (error);
1233 	if (v == prof_enabled)
1234 		return (0);
1235 	if (v == 1)
1236 		sleepq_prof_reset();
1237 	mtx_lock_spin(&sleepq_prof_lock);
1238 	prof_enabled = !!v;
1239 	mtx_unlock_spin(&sleepq_prof_lock);
1240 
1241 	return (0);
1242 }
1243 
1244 static int
1245 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1246 {
1247 	int error, v;
1248 
1249 	v = 0;
1250 	error = sysctl_handle_int(oidp, &v, 0, req);
1251 	if (error)
1252 		return (error);
1253 	if (req->newptr == NULL)
1254 		return (error);
1255 	if (v == 0)
1256 		return (0);
1257 	sleepq_prof_reset();
1258 
1259 	return (0);
1260 }
1261 
1262 static int
1263 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1264 {
1265 	struct sleepq_prof *sp;
1266 	struct sbuf *sb;
1267 	int enabled;
1268 	int error;
1269 	int i;
1270 
1271 	error = sysctl_wire_old_buffer(req, 0);
1272 	if (error != 0)
1273 		return (error);
1274 	sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req);
1275 	sbuf_printf(sb, "\nwmesg\tcount\n");
1276 	enabled = prof_enabled;
1277 	mtx_lock_spin(&sleepq_prof_lock);
1278 	prof_enabled = 0;
1279 	mtx_unlock_spin(&sleepq_prof_lock);
1280 	for (i = 0; i < SC_TABLESIZE; i++) {
1281 		LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1282 			sbuf_printf(sb, "%s\t%ld\n",
1283 			    sp->sp_wmesg, sp->sp_count);
1284 		}
1285 	}
1286 	mtx_lock_spin(&sleepq_prof_lock);
1287 	prof_enabled = enabled;
1288 	mtx_unlock_spin(&sleepq_prof_lock);
1289 
1290 	error = sbuf_finish(sb);
1291 	sbuf_delete(sb);
1292 	return (error);
1293 }
1294 
1295 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
1296     NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics");
1297 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
1298     NULL, 0, reset_sleepq_prof_stats, "I",
1299     "Reset sleepqueue profiling statistics");
1300 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
1301     NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling");
1302 #endif
1303 
1304 #ifdef DDB
1305 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1306 {
1307 	struct sleepqueue_chain *sc;
1308 	struct sleepqueue *sq;
1309 #ifdef INVARIANTS
1310 	struct lock_object *lock;
1311 #endif
1312 	struct thread *td;
1313 	void *wchan;
1314 	int i;
1315 
1316 	if (!have_addr)
1317 		return;
1318 
1319 	/*
1320 	 * First, see if there is an active sleep queue for the wait channel
1321 	 * indicated by the address.
1322 	 */
1323 	wchan = (void *)addr;
1324 	sc = SC_LOOKUP(wchan);
1325 	LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1326 		if (sq->sq_wchan == wchan)
1327 			goto found;
1328 
1329 	/*
1330 	 * Second, see if there is an active sleep queue at the address
1331 	 * indicated.
1332 	 */
1333 	for (i = 0; i < SC_TABLESIZE; i++)
1334 		LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
1335 			if (sq == (struct sleepqueue *)addr)
1336 				goto found;
1337 		}
1338 
1339 	db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
1340 	return;
1341 found:
1342 	db_printf("Wait channel: %p\n", sq->sq_wchan);
1343 	db_printf("Queue type: %d\n", sq->sq_type);
1344 #ifdef INVARIANTS
1345 	if (sq->sq_lock) {
1346 		lock = sq->sq_lock;
1347 		db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1348 		    LOCK_CLASS(lock)->lc_name, lock->lo_name);
1349 	}
1350 #endif
1351 	db_printf("Blocked threads:\n");
1352 	for (i = 0; i < NR_SLEEPQS; i++) {
1353 		db_printf("\nQueue[%d]:\n", i);
1354 		if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1355 			db_printf("\tempty\n");
1356 		else
1357 			TAILQ_FOREACH(td, &sq->sq_blocked[0],
1358 				      td_slpq) {
1359 				db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
1360 					  td->td_tid, td->td_proc->p_pid,
1361 					  td->td_name);
1362 			}
1363 		db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);
1364 	}
1365 }
1366 
1367 /* Alias 'show sleepqueue' to 'show sleepq'. */
1368 DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue);
1369 #endif
1370