xref: /freebsd/sys/kern/subr_sleepqueue.c (revision 6af83ee0d2941d18880b6aaa2b4facd1d30c6106)
1 /*-
2  * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * Implementation of sleep queues used to hold queue of threads blocked on
32  * a wait channel.  Sleep queues different from turnstiles in that wait
33  * channels are not owned by anyone, so there is no priority propagation.
34  * Sleep queues can also provide a timeout and can also be interrupted by
35  * signals.  That said, there are several similarities between the turnstile
36  * and sleep queue implementations.  (Note: turnstiles were implemented
37  * first.)  For example, both use a hash table of the same size where each
38  * bucket is referred to as a "chain" that contains both a spin lock and
39  * a linked list of queues.  An individual queue is located by using a hash
40  * to pick a chain, locking the chain, and then walking the chain searching
41  * for the queue.  This means that a wait channel object does not need to
42  * embed it's queue head just as locks do not embed their turnstile queue
43  * head.  Threads also carry around a sleep queue that they lend to the
44  * wait channel when blocking.  Just as in turnstiles, the queue includes
45  * a free list of the sleep queues of other threads blocked on the same
46  * wait channel in the case of multiple waiters.
47  *
48  * Some additional functionality provided by sleep queues include the
49  * ability to set a timeout.  The timeout is managed using a per-thread
50  * callout that resumes a thread if it is asleep.  A thread may also
51  * catch signals while it is asleep (aka an interruptible sleep).  The
52  * signal code uses sleepq_abort() to interrupt a sleeping thread.  Finally,
53  * sleep queues also provide some extra assertions.  One is not allowed to
54  * mix the sleep/wakeup and cv APIs for a given wait channel.  Also, one
55  * must consistently use the same lock to synchronize with a wait channel,
56  * though this check is currently only a warning for sleep/wakeup due to
57  * pre-existing abuse of that API.  The same lock must also be held when
58  * awakening threads, though that is currently only enforced for condition
59  * variables.
60  */
61 
62 #include "opt_sleepqueue_profiling.h"
63 
64 #include <sys/cdefs.h>
65 __FBSDID("$FreeBSD$");
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/lock.h>
70 #include <sys/kernel.h>
71 #include <sys/ktr.h>
72 #include <sys/malloc.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/sched.h>
76 #include <sys/signalvar.h>
77 #include <sys/sleepqueue.h>
78 #include <sys/sysctl.h>
79 
80 /*
81  * Constants for the hash table of sleep queue chains.  These constants are
82  * the same ones that 4BSD (and possibly earlier versions of BSD) used.
83  * Basically, we ignore the lower 8 bits of the address since most wait
84  * channel pointers are aligned and only look at the next 7 bits for the
85  * hash.  SC_TABLESIZE must be a power of two for SC_MASK to work properly.
86  */
87 #define	SC_TABLESIZE	128			/* Must be power of 2. */
88 #define	SC_MASK		(SC_TABLESIZE - 1)
89 #define	SC_SHIFT	8
90 #define	SC_HASH(wc)	(((uintptr_t)(wc) >> SC_SHIFT) & SC_MASK)
91 #define	SC_LOOKUP(wc)	&sleepq_chains[SC_HASH(wc)]
92 
93 /*
94  * There two different lists of sleep queues.  Both lists are connected
95  * via the sq_hash entries.  The first list is the sleep queue chain list
96  * that a sleep queue is on when it is attached to a wait channel.  The
97  * second list is the free list hung off of a sleep queue that is attached
98  * to a wait channel.
99  *
100  * Each sleep queue also contains the wait channel it is attached to, the
101  * list of threads blocked on that wait channel, flags specific to the
102  * wait channel, and the lock used to synchronize with a wait channel.
103  * The flags are used to catch mismatches between the various consumers
104  * of the sleep queue API (e.g. sleep/wakeup and condition variables).
105  * The lock pointer is only used when invariants are enabled for various
106  * debugging checks.
107  *
108  * Locking key:
109  *  c - sleep queue chain lock
110  */
111 struct sleepqueue {
112 	TAILQ_HEAD(, thread) sq_blocked;	/* (c) Blocked threads. */
113 	LIST_ENTRY(sleepqueue) sq_hash;		/* (c) Chain and free list. */
114 	LIST_HEAD(, sleepqueue) sq_free;	/* (c) Free queues. */
115 	void	*sq_wchan;			/* (c) Wait channel. */
116 #ifdef INVARIANTS
117 	int	sq_type;			/* (c) Queue type. */
118 	struct mtx *sq_lock;			/* (c) Associated lock. */
119 #endif
120 };
121 
122 struct sleepqueue_chain {
123 	LIST_HEAD(, sleepqueue) sc_queues;	/* List of sleep queues. */
124 	struct mtx sc_lock;			/* Spin lock for this chain. */
125 #ifdef SLEEPQUEUE_PROFILING
126 	u_int	sc_depth;			/* Length of sc_queues. */
127 	u_int	sc_max_depth;			/* Max length of sc_queues. */
128 #endif
129 };
130 
131 #ifdef SLEEPQUEUE_PROFILING
132 u_int sleepq_max_depth;
133 SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
134 SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
135     "sleepq chain stats");
136 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
137     0, "maxmimum depth achieved of a single chain");
138 #endif
139 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
140 
141 static MALLOC_DEFINE(M_SLEEPQUEUE, "sleep queues", "sleep queues");
142 
143 /*
144  * Prototypes for non-exported routines.
145  */
146 static int	sleepq_check_timeout(void);
147 static void	sleepq_switch(void *wchan);
148 static void	sleepq_timeout(void *arg);
149 static void	sleepq_remove_thread(struct sleepqueue *sq, struct thread *td);
150 static void	sleepq_resume_thread(struct thread *td, int pri);
151 
152 /*
153  * Early initialization of sleep queues that is called from the sleepinit()
154  * SYSINIT.
155  */
156 void
157 init_sleepqueues(void)
158 {
159 #ifdef SLEEPQUEUE_PROFILING
160 	struct sysctl_oid *chain_oid;
161 	char chain_name[10];
162 #endif
163 	int i;
164 
165 	for (i = 0; i < SC_TABLESIZE; i++) {
166 		LIST_INIT(&sleepq_chains[i].sc_queues);
167 		mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
168 		    MTX_SPIN);
169 #ifdef SLEEPQUEUE_PROFILING
170 		snprintf(chain_name, sizeof(chain_name), "%d", i);
171 		chain_oid = SYSCTL_ADD_NODE(NULL,
172 		    SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
173 		    chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
174 		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
175 		    "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
176 		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
177 		    "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
178 		    NULL);
179 #endif
180 	}
181 	thread0.td_sleepqueue = sleepq_alloc();
182 }
183 
184 /*
185  * Malloc and initialize a new sleep queue for a new thread.
186  */
187 struct sleepqueue *
188 sleepq_alloc(void)
189 {
190 	struct sleepqueue *sq;
191 
192 	sq = malloc(sizeof(struct sleepqueue), M_SLEEPQUEUE, M_WAITOK | M_ZERO);
193 	TAILQ_INIT(&sq->sq_blocked);
194 	LIST_INIT(&sq->sq_free);
195 	return (sq);
196 }
197 
198 /*
199  * Free a sleep queue when a thread is destroyed.
200  */
201 void
202 sleepq_free(struct sleepqueue *sq)
203 {
204 
205 	MPASS(sq != NULL);
206 	MPASS(TAILQ_EMPTY(&sq->sq_blocked));
207 	free(sq, M_SLEEPQUEUE);
208 }
209 
210 /*
211  * Lock the sleep queue chain associated with the specified wait channel.
212  */
213 void
214 sleepq_lock(void *wchan)
215 {
216 	struct sleepqueue_chain *sc;
217 
218 	sc = SC_LOOKUP(wchan);
219 	mtx_lock_spin(&sc->sc_lock);
220 }
221 
222 /*
223  * Look up the sleep queue associated with a given wait channel in the hash
224  * table locking the associated sleep queue chain.  If no queue is found in
225  * the table, NULL is returned.
226  */
227 struct sleepqueue *
228 sleepq_lookup(void *wchan)
229 {
230 	struct sleepqueue_chain *sc;
231 	struct sleepqueue *sq;
232 
233 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
234 	sc = SC_LOOKUP(wchan);
235 	mtx_assert(&sc->sc_lock, MA_OWNED);
236 	LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
237 		if (sq->sq_wchan == wchan)
238 			return (sq);
239 	return (NULL);
240 }
241 
242 /*
243  * Unlock the sleep queue chain associated with a given wait channel.
244  */
245 void
246 sleepq_release(void *wchan)
247 {
248 	struct sleepqueue_chain *sc;
249 
250 	sc = SC_LOOKUP(wchan);
251 	mtx_unlock_spin(&sc->sc_lock);
252 }
253 
254 /*
255  * Places the current thread on the sleep queue for the specified wait
256  * channel.  If INVARIANTS is enabled, then it associates the passed in
257  * lock with the sleepq to make sure it is held when that sleep queue is
258  * woken up.
259  */
260 void
261 sleepq_add(void *wchan, struct mtx *lock, const char *wmesg, int flags)
262 {
263 	struct sleepqueue_chain *sc;
264 	struct sleepqueue *sq;
265 	struct thread *td;
266 
267 	td = curthread;
268 	sc = SC_LOOKUP(wchan);
269 	mtx_assert(&sc->sc_lock, MA_OWNED);
270 	MPASS(td->td_sleepqueue != NULL);
271 	MPASS(wchan != NULL);
272 
273 	/* Look up the sleep queue associated with the wait channel 'wchan'. */
274 	sq = sleepq_lookup(wchan);
275 
276 	/*
277 	 * If the wait channel does not already have a sleep queue, use
278 	 * this thread's sleep queue.  Otherwise, insert the current thread
279 	 * into the sleep queue already in use by this wait channel.
280 	 */
281 	if (sq == NULL) {
282 #ifdef SLEEPQUEUE_PROFILING
283 		sc->sc_depth++;
284 		if (sc->sc_depth > sc->sc_max_depth) {
285 			sc->sc_max_depth = sc->sc_depth;
286 			if (sc->sc_max_depth > sleepq_max_depth)
287 				sleepq_max_depth = sc->sc_max_depth;
288 		}
289 #endif
290 		sq = td->td_sleepqueue;
291 		LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
292 		KASSERT(TAILQ_EMPTY(&sq->sq_blocked),
293 		    ("thread's sleep queue has a non-empty queue"));
294 		KASSERT(LIST_EMPTY(&sq->sq_free),
295 		    ("thread's sleep queue has a non-empty free list"));
296 		KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
297 		sq->sq_wchan = wchan;
298 #ifdef INVARIANTS
299 		sq->sq_lock = lock;
300 		sq->sq_type = flags & SLEEPQ_TYPE;
301 #endif
302 	} else {
303 		MPASS(wchan == sq->sq_wchan);
304 		MPASS(lock == sq->sq_lock);
305 		MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
306 		LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
307 	}
308 	TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq);
309 	td->td_sleepqueue = NULL;
310 	mtx_lock_spin(&sched_lock);
311 	td->td_wchan = wchan;
312 	td->td_wmesg = wmesg;
313 	if (flags & SLEEPQ_INTERRUPTIBLE)
314 		td->td_flags |= TDF_SINTR;
315 	mtx_unlock_spin(&sched_lock);
316 }
317 
318 /*
319  * Sets a timeout that will remove the current thread from the specified
320  * sleep queue after timo ticks if the thread has not already been awakened.
321  */
322 void
323 sleepq_set_timeout(void *wchan, int timo)
324 {
325 	struct sleepqueue_chain *sc;
326 	struct thread *td;
327 
328 	td = curthread;
329 	sc = SC_LOOKUP(wchan);
330 	mtx_assert(&sc->sc_lock, MA_OWNED);
331 	MPASS(TD_ON_SLEEPQ(td));
332 	MPASS(td->td_sleepqueue == NULL);
333 	MPASS(wchan != NULL);
334 	callout_reset(&td->td_slpcallout, timo, sleepq_timeout, td);
335 }
336 
337 /*
338  * Marks the pending sleep of the current thread as interruptible and
339  * makes an initial check for pending signals before putting a thread
340  * to sleep.
341  */
342 int
343 sleepq_catch_signals(void *wchan)
344 {
345 	struct sleepqueue_chain *sc;
346 	struct sleepqueue *sq;
347 	struct thread *td;
348 	struct proc *p;
349 	int do_upcall;
350 	int sig;
351 
352 	do_upcall = 0;
353 	td = curthread;
354 	p = td->td_proc;
355 	sc = SC_LOOKUP(wchan);
356 	mtx_assert(&sc->sc_lock, MA_OWNED);
357 	MPASS(td->td_sleepqueue == NULL);
358 	MPASS(wchan != NULL);
359 	CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
360 	    (void *)td, (long)p->p_pid, p->p_comm);
361 
362 	/* Mark thread as being in an interruptible sleep. */
363 	MPASS(td->td_flags & TDF_SINTR);
364 	MPASS(TD_ON_SLEEPQ(td));
365 	sleepq_release(wchan);
366 
367 	/* See if there are any pending signals for this thread. */
368 	PROC_LOCK(p);
369 	mtx_lock(&p->p_sigacts->ps_mtx);
370 	sig = cursig(td);
371 	mtx_unlock(&p->p_sigacts->ps_mtx);
372 	if (sig == 0 && thread_suspend_check(1))
373 		sig = SIGSTOP;
374 	else
375 		do_upcall = thread_upcall_check(td);
376 	PROC_UNLOCK(p);
377 
378 	/*
379 	 * If there were pending signals and this thread is still on
380 	 * the sleep queue, remove it from the sleep queue.  If the
381 	 * thread was removed from the sleep queue while we were blocked
382 	 * above, then clear TDF_SINTR before returning.
383 	 */
384 	sleepq_lock(wchan);
385 	sq = sleepq_lookup(wchan);
386 	mtx_lock_spin(&sched_lock);
387 	if (TD_ON_SLEEPQ(td) && (sig != 0 || do_upcall != 0)) {
388 		mtx_unlock_spin(&sched_lock);
389 		sleepq_remove_thread(sq, td);
390 	} else {
391 		if (!TD_ON_SLEEPQ(td) && sig == 0)
392 			td->td_flags &= ~TDF_SINTR;
393 		mtx_unlock_spin(&sched_lock);
394 	}
395 	return (sig);
396 }
397 
398 /*
399  * Switches to another thread if we are still asleep on a sleep queue and
400  * drop the lock on the sleep queue chain.  Returns with sched_lock held.
401  */
402 static void
403 sleepq_switch(void *wchan)
404 {
405 	struct sleepqueue_chain *sc;
406 	struct thread *td;
407 
408 	td = curthread;
409 	sc = SC_LOOKUP(wchan);
410 	mtx_assert(&sc->sc_lock, MA_OWNED);
411 
412 	/*
413 	 * If we have a sleep queue, then we've already been woken up, so
414 	 * just return.
415 	 */
416 	if (td->td_sleepqueue != NULL) {
417 		MPASS(!TD_ON_SLEEPQ(td));
418 		mtx_unlock_spin(&sc->sc_lock);
419 		mtx_lock_spin(&sched_lock);
420 		return;
421 	}
422 
423 	/*
424 	 * Otherwise, actually go to sleep.
425 	 */
426 	mtx_lock_spin(&sched_lock);
427 	mtx_unlock_spin(&sc->sc_lock);
428 
429 	sched_sleep(td);
430 	TD_SET_SLEEPING(td);
431 	mi_switch(SW_VOL, NULL);
432 	KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
433 	CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
434 	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
435 }
436 
437 /*
438  * Check to see if we timed out.
439  */
440 static int
441 sleepq_check_timeout(void)
442 {
443 	struct thread *td;
444 
445 	mtx_assert(&sched_lock, MA_OWNED);
446 	td = curthread;
447 
448 	/*
449 	 * If TDF_TIMEOUT is set, we timed out.
450 	 */
451 	if (td->td_flags & TDF_TIMEOUT) {
452 		td->td_flags &= ~TDF_TIMEOUT;
453 		return (EWOULDBLOCK);
454 	}
455 
456 	/*
457 	 * If TDF_TIMOFAIL is set, the timeout ran after we had
458 	 * already been woken up.
459 	 */
460 	if (td->td_flags & TDF_TIMOFAIL)
461 		td->td_flags &= ~TDF_TIMOFAIL;
462 
463 	/*
464 	 * If callout_stop() fails, then the timeout is running on
465 	 * another CPU, so synchronize with it to avoid having it
466 	 * accidentally wake up a subsequent sleep.
467 	 */
468 	else if (callout_stop(&td->td_slpcallout) == 0) {
469 		td->td_flags |= TDF_TIMEOUT;
470 		TD_SET_SLEEPING(td);
471 		mi_switch(SW_INVOL, NULL);
472 	}
473 	return (0);
474 }
475 
476 /*
477  * Check to see if we were awoken by a signal.
478  */
479 static int
480 sleepq_check_signals(void)
481 {
482 	struct thread *td;
483 
484 	mtx_assert(&sched_lock, MA_OWNED);
485 	td = curthread;
486 
487 	/*
488 	 * If TDF_SINTR is clear, then we were awakened while executing
489 	 * sleepq_catch_signals().
490 	 */
491 	if (!(td->td_flags & TDF_SINTR))
492 		return (0);
493 
494 	/* We are no longer in an interruptible sleep. */
495 	td->td_flags &= ~TDF_SINTR;
496 
497 	if (td->td_flags & TDF_INTERRUPT)
498 		return (td->td_intrval);
499 	return (0);
500 }
501 
502 /*
503  * If we were in an interruptible sleep and we weren't interrupted and
504  * didn't timeout, check to see if there are any pending signals and
505  * which return value we should use if so.  The return value from an
506  * earlier call to sleepq_catch_signals() should be passed in as the
507  * argument.
508  */
509 int
510 sleepq_calc_signal_retval(int sig)
511 {
512 	struct thread *td;
513 	struct proc *p;
514 	int rval;
515 
516 	td = curthread;
517 	p = td->td_proc;
518 	PROC_LOCK(p);
519 	mtx_lock(&p->p_sigacts->ps_mtx);
520 	/* XXX: Should we always be calling cursig()? */
521 	if (sig == 0)
522 		sig = cursig(td);
523 	if (sig != 0) {
524 		if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
525 			rval = EINTR;
526 		else
527 			rval = ERESTART;
528 	} else
529 		rval = 0;
530 	mtx_unlock(&p->p_sigacts->ps_mtx);
531 	PROC_UNLOCK(p);
532 	return (rval);
533 }
534 
535 /*
536  * Block the current thread until it is awakened from its sleep queue.
537  */
538 void
539 sleepq_wait(void *wchan)
540 {
541 
542 	MPASS(!(curthread->td_flags & TDF_SINTR));
543 	sleepq_switch(wchan);
544 	mtx_unlock_spin(&sched_lock);
545 }
546 
547 /*
548  * Block the current thread until it is awakened from its sleep queue
549  * or it is interrupted by a signal.
550  */
551 int
552 sleepq_wait_sig(void *wchan)
553 {
554 	int rval;
555 
556 	sleepq_switch(wchan);
557 	rval = sleepq_check_signals();
558 	mtx_unlock_spin(&sched_lock);
559 	return (rval);
560 }
561 
562 /*
563  * Block the current thread until it is awakened from its sleep queue
564  * or it times out while waiting.
565  */
566 int
567 sleepq_timedwait(void *wchan)
568 {
569 	int rval;
570 
571 	MPASS(!(curthread->td_flags & TDF_SINTR));
572 	sleepq_switch(wchan);
573 	rval = sleepq_check_timeout();
574 	mtx_unlock_spin(&sched_lock);
575 	return (rval);
576 }
577 
578 /*
579  * Block the current thread until it is awakened from its sleep queue,
580  * it is interrupted by a signal, or it times out waiting to be awakened.
581  */
582 int
583 sleepq_timedwait_sig(void *wchan, int signal_caught)
584 {
585 	int rvalt, rvals;
586 
587 	sleepq_switch(wchan);
588 	rvalt = sleepq_check_timeout();
589 	rvals = sleepq_check_signals();
590 	mtx_unlock_spin(&sched_lock);
591 	if (signal_caught || rvalt == 0)
592 		return (rvals);
593 	else
594 		return (rvalt);
595 }
596 
597 /*
598  * Removes a thread from a sleep queue.
599  */
600 static void
601 sleepq_remove_thread(struct sleepqueue *sq, struct thread *td)
602 {
603 	struct sleepqueue_chain *sc;
604 
605 	MPASS(td != NULL);
606 	MPASS(sq->sq_wchan != NULL);
607 	MPASS(td->td_wchan == sq->sq_wchan);
608 	sc = SC_LOOKUP(sq->sq_wchan);
609 	mtx_assert(&sc->sc_lock, MA_OWNED);
610 
611 	/* Remove the thread from the queue. */
612 	TAILQ_REMOVE(&sq->sq_blocked, td, td_slpq);
613 
614 	/*
615 	 * Get a sleep queue for this thread.  If this is the last waiter,
616 	 * use the queue itself and take it out of the chain, otherwise,
617 	 * remove a queue from the free list.
618 	 */
619 	if (LIST_EMPTY(&sq->sq_free)) {
620 		td->td_sleepqueue = sq;
621 #ifdef INVARIANTS
622 		sq->sq_wchan = NULL;
623 #endif
624 #ifdef SLEEPQUEUE_PROFILING
625 		sc->sc_depth--;
626 #endif
627 	} else
628 		td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
629 	LIST_REMOVE(td->td_sleepqueue, sq_hash);
630 
631 	mtx_lock_spin(&sched_lock);
632 	td->td_wmesg = NULL;
633 	td->td_wchan = NULL;
634 	mtx_unlock_spin(&sched_lock);
635 }
636 
637 /*
638  * Resumes a thread that was asleep on a queue.
639  */
640 static void
641 sleepq_resume_thread(struct thread *td, int pri)
642 {
643 
644 	/*
645 	 * Note that thread td might not be sleeping if it is running
646 	 * sleepq_catch_signals() on another CPU or is blocked on
647 	 * its proc lock to check signals.  It doesn't hurt to clear
648 	 * the sleeping flag if it isn't set though, so we just always
649 	 * do it.  However, we can't assert that it is set.
650 	 */
651 	mtx_lock_spin(&sched_lock);
652 	CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
653 	    (void *)td, (long)td->td_proc->p_pid, td->td_proc->p_comm);
654 	TD_CLR_SLEEPING(td);
655 
656 	/* Adjust priority if requested. */
657 	MPASS(pri == -1 || (pri >= PRI_MIN && pri <= PRI_MAX));
658 	if (pri != -1 && td->td_priority > pri)
659 		sched_prio(td, pri);
660 	setrunnable(td);
661 	mtx_unlock_spin(&sched_lock);
662 }
663 
664 /*
665  * Find the highest priority thread sleeping on a wait channel and resume it.
666  */
667 void
668 sleepq_signal(void *wchan, int flags, int pri)
669 {
670 	struct sleepqueue *sq;
671 	struct thread *td, *besttd;
672 
673 	CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
674 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
675 	sq = sleepq_lookup(wchan);
676 	if (sq == NULL) {
677 		sleepq_release(wchan);
678 		return;
679 	}
680 	KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
681 	    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
682 
683 	/*
684 	 * Find the highest priority thread on the queue.  If there is a
685 	 * tie, use the thread that first appears in the queue as it has
686 	 * been sleeping the longest since threads are always added to
687 	 * the tail of sleep queues.
688 	 */
689 	besttd = NULL;
690 	TAILQ_FOREACH(td, &sq->sq_blocked, td_slpq) {
691 		if (besttd == NULL || td->td_priority < besttd->td_priority)
692 			besttd = td;
693 	}
694 	MPASS(besttd != NULL);
695 	sleepq_remove_thread(sq, besttd);
696 	sleepq_release(wchan);
697 	sleepq_resume_thread(besttd, pri);
698 }
699 
700 /*
701  * Resume all threads sleeping on a specified wait channel.
702  */
703 void
704 sleepq_broadcast(void *wchan, int flags, int pri)
705 {
706 	TAILQ_HEAD(, thread) list;
707 	struct sleepqueue *sq;
708 	struct thread *td;
709 
710 	CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
711 	KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
712 	sq = sleepq_lookup(wchan);
713 	if (sq == NULL) {
714 		sleepq_release(wchan);
715 		return;
716 	}
717 	KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
718 	    ("%s: mismatch between sleep/wakeup and cv_*", __func__));
719 
720 	/* Move blocked threads from the sleep queue to a temporary list. */
721 	TAILQ_INIT(&list);
722 	while (!TAILQ_EMPTY(&sq->sq_blocked)) {
723 		td = TAILQ_FIRST(&sq->sq_blocked);
724 		sleepq_remove_thread(sq, td);
725 		TAILQ_INSERT_TAIL(&list, td, td_slpq);
726 	}
727 	sleepq_release(wchan);
728 
729 	/* Resume all the threads on the temporary list. */
730 	while (!TAILQ_EMPTY(&list)) {
731 		td = TAILQ_FIRST(&list);
732 		TAILQ_REMOVE(&list, td, td_slpq);
733 		sleepq_resume_thread(td, pri);
734 	}
735 }
736 
737 /*
738  * Time sleeping threads out.  When the timeout expires, the thread is
739  * removed from the sleep queue and made runnable if it is still asleep.
740  */
741 static void
742 sleepq_timeout(void *arg)
743 {
744 	struct sleepqueue *sq;
745 	struct thread *td;
746 	void *wchan;
747 
748 	td = arg;
749 	CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
750 	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
751 
752 	/*
753 	 * First, see if the thread is asleep and get the wait channel if
754 	 * it is.
755 	 */
756 	mtx_lock_spin(&sched_lock);
757 	if (TD_ON_SLEEPQ(td)) {
758 		wchan = td->td_wchan;
759 		mtx_unlock_spin(&sched_lock);
760 		sleepq_lock(wchan);
761 		sq = sleepq_lookup(wchan);
762 		mtx_lock_spin(&sched_lock);
763 	} else {
764 		wchan = NULL;
765 		sq = NULL;
766 	}
767 
768 	/*
769 	 * At this point, if the thread is still on the sleep queue,
770 	 * we have that sleep queue locked as it cannot migrate sleep
771 	 * queues while we dropped sched_lock.  If it had resumed and
772 	 * was on another CPU while the lock was dropped, it would have
773 	 * seen that TDF_TIMEOUT and TDF_TIMOFAIL are clear and the
774 	 * call to callout_stop() to stop this routine would have failed
775 	 * meaning that it would have already set TDF_TIMEOUT to
776 	 * synchronize with this function.
777 	 */
778 	if (TD_ON_SLEEPQ(td)) {
779 		MPASS(td->td_wchan == wchan);
780 		MPASS(sq != NULL);
781 		td->td_flags |= TDF_TIMEOUT;
782 		mtx_unlock_spin(&sched_lock);
783 		sleepq_remove_thread(sq, td);
784 		sleepq_release(wchan);
785 		sleepq_resume_thread(td, -1);
786 		return;
787 	} else if (wchan != NULL)
788 		sleepq_release(wchan);
789 
790 	/*
791 	 * Now check for the edge cases.  First, if TDF_TIMEOUT is set,
792 	 * then the other thread has already yielded to us, so clear
793 	 * the flag and resume it.  If TDF_TIMEOUT is not set, then the
794 	 * we know that the other thread is not on a sleep queue, but it
795 	 * hasn't resumed execution yet.  In that case, set TDF_TIMOFAIL
796 	 * to let it know that the timeout has already run and doesn't
797 	 * need to be canceled.
798 	 */
799 	if (td->td_flags & TDF_TIMEOUT) {
800 		MPASS(TD_IS_SLEEPING(td));
801 		td->td_flags &= ~TDF_TIMEOUT;
802 		TD_CLR_SLEEPING(td);
803 		setrunnable(td);
804 	} else
805 		td->td_flags |= TDF_TIMOFAIL;
806 	mtx_unlock_spin(&sched_lock);
807 }
808 
809 /*
810  * Resumes a specific thread from the sleep queue associated with a specific
811  * wait channel if it is on that queue.
812  */
813 void
814 sleepq_remove(struct thread *td, void *wchan)
815 {
816 	struct sleepqueue *sq;
817 
818 	/*
819 	 * Look up the sleep queue for this wait channel, then re-check
820 	 * that the thread is asleep on that channel, if it is not, then
821 	 * bail.
822 	 */
823 	MPASS(wchan != NULL);
824 	sleepq_lock(wchan);
825 	sq = sleepq_lookup(wchan);
826 	mtx_lock_spin(&sched_lock);
827 	if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
828 		mtx_unlock_spin(&sched_lock);
829 		sleepq_release(wchan);
830 		return;
831 	}
832 	mtx_unlock_spin(&sched_lock);
833 	MPASS(sq != NULL);
834 
835 	/* Thread is asleep on sleep queue sq, so wake it up. */
836 	sleepq_remove_thread(sq, td);
837 	sleepq_release(wchan);
838 	sleepq_resume_thread(td, -1);
839 }
840 
841 /*
842  * Abort a thread as if an interrupt had occurred.  Only abort
843  * interruptible waits (unfortunately it isn't safe to abort others).
844  *
845  * XXX: What in the world does the comment below mean?
846  * Also, whatever the signal code does...
847  */
848 void
849 sleepq_abort(struct thread *td)
850 {
851 	void *wchan;
852 
853 	mtx_assert(&sched_lock, MA_OWNED);
854 	MPASS(TD_ON_SLEEPQ(td));
855 	MPASS(td->td_flags & TDF_SINTR);
856 
857 	/*
858 	 * If the TDF_TIMEOUT flag is set, just leave. A
859 	 * timeout is scheduled anyhow.
860 	 */
861 	if (td->td_flags & TDF_TIMEOUT)
862 		return;
863 
864 	CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
865 	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
866 	wchan = td->td_wchan;
867 	mtx_unlock_spin(&sched_lock);
868 	sleepq_remove(td, wchan);
869 	mtx_lock_spin(&sched_lock);
870 }
871