1 /* 2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the author nor the names of any co-contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * Implementation of sleep queues used to hold queue of threads blocked on 32 * a wait channel. Sleep queues different from turnstiles in that wait 33 * channels are not owned by anyone, so there is no priority propagation. 34 * Sleep queues can also provide a timeout and can also be interrupted by 35 * signals. That said, there are several similarities between the turnstile 36 * and sleep queue implementations. (Note: turnstiles were implemented 37 * first.) For example, both use a hash table of the same size where each 38 * bucket is referred to as a "chain" that contains both a spin lock and 39 * a linked list of queues. An individual queue is located by using a hash 40 * to pick a chain, locking the chain, and then walking the chain searching 41 * for the queue. This means that a wait channel object does not need to 42 * embed it's queue head just as locks do not embed their turnstile queue 43 * head. Threads also carry around a sleep queue that they lend to the 44 * wait channel when blocking. Just as in turnstiles, the queue includes 45 * a free list of the sleep queues of other threads blocked on the same 46 * wait channel in the case of multiple waiters. 47 * 48 * Some additional functionality provided by sleep queues include the 49 * ability to set a timeout. The timeout is managed using a per-thread 50 * callout that resumes a thread if it is asleep. A thread may also 51 * catch signals while it is asleep (aka an interruptible sleep). The 52 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally, 53 * sleep queues also provide some extra assertions. One is not allowed to 54 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one 55 * must consistently use the same lock to synchronize with a wait channel, 56 * though this check is currently only a warning for sleep/wakeup due to 57 * pre-existing abuse of that API. The same lock must also be held when 58 * awakening threads, though that is currently only enforced for condition 59 * variables. 60 */ 61 62 #include <sys/cdefs.h> 63 __FBSDID("$FreeBSD$"); 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/lock.h> 68 #include <sys/kernel.h> 69 #include <sys/ktr.h> 70 #include <sys/malloc.h> 71 #include <sys/mutex.h> 72 #include <sys/proc.h> 73 #include <sys/sched.h> 74 #include <sys/signalvar.h> 75 #include <sys/sleepqueue.h> 76 77 /* 78 * Constants for the hash table of sleep queue chains. These constants are 79 * the same ones that 4BSD (and possibly earlier versions of BSD) used. 80 * Basically, we ignore the lower 8 bits of the address since most wait 81 * channel pointers are aligned and only look at the next 7 bits for the 82 * hash. SC_TABLESIZE must be a power of two for SC_MASK to work properly. 83 */ 84 #define SC_TABLESIZE 128 /* Must be power of 2. */ 85 #define SC_MASK (SC_TABLESIZE - 1) 86 #define SC_SHIFT 8 87 #define SC_HASH(wc) (((uintptr_t)(wc) >> SC_SHIFT) & SC_MASK) 88 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)] 89 90 /* 91 * There two different lists of sleep queues. Both lists are connected 92 * via the sq_hash entries. The first list is the sleep queue chain list 93 * that a sleep queue is on when it is attached to a wait channel. The 94 * second list is the free list hung off of a sleep queue that is attached 95 * to a wait channel. 96 * 97 * Each sleep queue also contains the wait channel it is attached to, the 98 * list of threads blocked on that wait channel, flags specific to the 99 * wait channel, and the lock used to synchronize with a wait channel. 100 * The flags are used to catch mismatches between the various consumers 101 * of the sleep queue API (e.g. sleep/wakeup and condition variables). 102 * The lock pointer is only used when invariants are enabled for various 103 * debugging checks. 104 * 105 * Locking key: 106 * c - sleep queue chain lock 107 */ 108 struct sleepqueue { 109 TAILQ_HEAD(, thread) sq_blocked; /* (c) Blocked threads. */ 110 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */ 111 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */ 112 void *sq_wchan; /* (c) Wait channel. */ 113 int sq_flags; /* (c) Flags. */ 114 #ifdef INVARIANTS 115 struct mtx *sq_lock; /* (c) Associated lock. */ 116 #endif 117 }; 118 119 struct sleepqueue_chain { 120 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */ 121 struct mtx sc_lock; /* Spin lock for this chain. */ 122 }; 123 124 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE]; 125 126 MALLOC_DEFINE(M_SLEEPQUEUE, "sleep queues", "sleep queues"); 127 128 /* 129 * Prototypes for non-exported routines. 130 */ 131 static int sleepq_check_timeout(void); 132 static void sleepq_switch(void *wchan); 133 static void sleepq_timeout(void *arg); 134 static void sleepq_remove_thread(struct sleepqueue *sq, struct thread *td); 135 static void sleepq_resume_thread(struct thread *td, int pri); 136 137 /* 138 * Early initialization of sleep queues that is called from the sleepinit() 139 * SYSINIT. 140 */ 141 void 142 init_sleepqueues(void) 143 { 144 int i; 145 146 for (i = 0; i < SC_TABLESIZE; i++) { 147 LIST_INIT(&sleepq_chains[i].sc_queues); 148 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL, 149 MTX_SPIN); 150 } 151 thread0.td_sleepqueue = sleepq_alloc(); 152 } 153 154 /* 155 * Malloc and initialize a new sleep queue for a new thread. 156 */ 157 struct sleepqueue * 158 sleepq_alloc(void) 159 { 160 struct sleepqueue *sq; 161 162 sq = malloc(sizeof(struct sleepqueue), M_SLEEPQUEUE, M_WAITOK | M_ZERO); 163 TAILQ_INIT(&sq->sq_blocked); 164 LIST_INIT(&sq->sq_free); 165 return (sq); 166 } 167 168 /* 169 * Free a sleep queue when a thread is destroyed. 170 */ 171 void 172 sleepq_free(struct sleepqueue *sq) 173 { 174 175 MPASS(sq != NULL); 176 MPASS(TAILQ_EMPTY(&sq->sq_blocked)); 177 free(sq, M_SLEEPQUEUE); 178 } 179 180 /* 181 * Look up the sleep queue associated with a given wait channel in the hash 182 * table locking the associated sleep queue chain. Return holdind the sleep 183 * queue chain lock. If no queue is found in the table, NULL is returned. 184 */ 185 struct sleepqueue * 186 sleepq_lookup(void *wchan) 187 { 188 struct sleepqueue_chain *sc; 189 struct sleepqueue *sq; 190 191 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 192 sc = SC_LOOKUP(wchan); 193 mtx_lock_spin(&sc->sc_lock); 194 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) 195 if (sq->sq_wchan == wchan) 196 return (sq); 197 return (NULL); 198 } 199 200 /* 201 * Unlock the sleep queue chain associated with a given wait channel. 202 */ 203 void 204 sleepq_release(void *wchan) 205 { 206 struct sleepqueue_chain *sc; 207 208 sc = SC_LOOKUP(wchan); 209 mtx_unlock_spin(&sc->sc_lock); 210 } 211 212 /* 213 * Places the current thread on the sleepqueue for the specified wait 214 * channel. If INVARIANTS is enabled, then it associates the passed in 215 * lock with the sleepq to make sure it is held when that sleep queue is 216 * woken up. 217 */ 218 void 219 sleepq_add(struct sleepqueue *sq, void *wchan, struct mtx *lock, 220 const char *wmesg, int flags) 221 { 222 struct sleepqueue_chain *sc; 223 struct thread *td, *td1; 224 225 td = curthread; 226 sc = SC_LOOKUP(wchan); 227 mtx_assert(&sc->sc_lock, MA_OWNED); 228 MPASS(td->td_sleepqueue != NULL); 229 MPASS(wchan != NULL); 230 231 /* If the passed in sleep queue is NULL, use this thread's queue. */ 232 if (sq == NULL) { 233 sq = td->td_sleepqueue; 234 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash); 235 KASSERT(TAILQ_EMPTY(&sq->sq_blocked), 236 ("thread's sleep queue has a non-empty queue")); 237 KASSERT(LIST_EMPTY(&sq->sq_free), 238 ("thread's sleep queue has a non-empty free list")); 239 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer")); 240 sq->sq_wchan = wchan; 241 #ifdef INVARIANTS 242 sq->sq_lock = lock; 243 #endif 244 sq->sq_flags = flags; 245 TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq); 246 } else { 247 MPASS(wchan == sq->sq_wchan); 248 MPASS(lock == sq->sq_lock); 249 TAILQ_FOREACH(td1, &sq->sq_blocked, td_slpq) 250 if (td1->td_priority > td->td_priority) 251 break; 252 if (td1 != NULL) 253 TAILQ_INSERT_BEFORE(td1, td, td_slpq); 254 else 255 TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq); 256 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash); 257 } 258 td->td_sleepqueue = NULL; 259 mtx_lock_spin(&sched_lock); 260 td->td_wchan = wchan; 261 td->td_wmesg = wmesg; 262 mtx_unlock_spin(&sched_lock); 263 } 264 265 /* 266 * Sets a timeout that will remove the current thread from the specified 267 * sleep queue after timo ticks if the thread has not already been awakened. 268 */ 269 void 270 sleepq_set_timeout(void *wchan, int timo) 271 { 272 struct sleepqueue_chain *sc; 273 struct thread *td; 274 275 td = curthread; 276 sc = SC_LOOKUP(wchan); 277 mtx_assert(&sc->sc_lock, MA_OWNED); 278 MPASS(TD_ON_SLEEPQ(td)); 279 MPASS(td->td_sleepqueue == NULL); 280 MPASS(wchan != NULL); 281 callout_reset(&td->td_slpcallout, timo, sleepq_timeout, td); 282 } 283 284 /* 285 * Marks the pending sleep of the current thread as interruptible and 286 * makes an initial check for pending signals before putting a thread 287 * to sleep. 288 */ 289 int 290 sleepq_catch_signals(void *wchan) 291 { 292 struct sleepqueue_chain *sc; 293 struct sleepqueue *sq; 294 struct thread *td; 295 struct proc *p; 296 int do_upcall; 297 int sig; 298 299 do_upcall = 0; 300 td = curthread; 301 p = td->td_proc; 302 sc = SC_LOOKUP(wchan); 303 mtx_assert(&sc->sc_lock, MA_OWNED); 304 MPASS(td->td_sleepqueue == NULL); 305 MPASS(wchan != NULL); 306 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %d, %s)", td, 307 p->p_pid, p->p_comm); 308 309 /* Mark thread as being in an interruptible sleep. */ 310 mtx_lock_spin(&sched_lock); 311 MPASS(TD_ON_SLEEPQ(td)); 312 td->td_flags |= TDF_SINTR; 313 mtx_unlock_spin(&sched_lock); 314 sleepq_release(wchan); 315 316 /* See if there are any pending signals for this thread. */ 317 PROC_LOCK(p); 318 mtx_lock(&p->p_sigacts->ps_mtx); 319 sig = cursig(td); 320 mtx_unlock(&p->p_sigacts->ps_mtx); 321 if (sig == 0 && thread_suspend_check(1)) 322 sig = SIGSTOP; 323 else 324 do_upcall = thread_upcall_check(td); 325 PROC_UNLOCK(p); 326 327 /* 328 * If there were pending signals and this thread is still on 329 * the sleep queue, remove it from the sleep queue. 330 */ 331 sq = sleepq_lookup(wchan); 332 mtx_lock_spin(&sched_lock); 333 if (TD_ON_SLEEPQ(td) && (sig != 0 || do_upcall != 0)) { 334 mtx_unlock_spin(&sched_lock); 335 sleepq_remove_thread(sq, td); 336 } else 337 mtx_unlock_spin(&sched_lock); 338 return (sig); 339 } 340 341 /* 342 * Switches to another thread if we are still asleep on a sleep queue and 343 * drop the lock on the sleepqueue chain. Returns with sched_lock held. 344 */ 345 static void 346 sleepq_switch(void *wchan) 347 { 348 struct sleepqueue_chain *sc; 349 struct thread *td; 350 351 td = curthread; 352 sc = SC_LOOKUP(wchan); 353 mtx_assert(&sc->sc_lock, MA_OWNED); 354 355 /* 356 * If we have a sleep queue, then we've already been woken up, so 357 * just return. 358 */ 359 if (td->td_sleepqueue != NULL) { 360 MPASS(!TD_ON_SLEEPQ(td)); 361 mtx_unlock_spin(&sc->sc_lock); 362 mtx_lock_spin(&sched_lock); 363 return; 364 } 365 366 /* 367 * Otherwise, actually go to sleep. 368 */ 369 mtx_lock_spin(&sched_lock); 370 mtx_unlock_spin(&sc->sc_lock); 371 372 sched_sleep(td); 373 TD_SET_SLEEPING(td); 374 mi_switch(SW_VOL); 375 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING")); 376 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %d, %s)", td, 377 td->td_proc->p_pid, td->td_proc->p_comm); 378 } 379 380 /* 381 * Check to see if we timed out. 382 */ 383 static int 384 sleepq_check_timeout(void) 385 { 386 struct thread *td; 387 388 mtx_assert(&sched_lock, MA_OWNED); 389 td = curthread; 390 391 /* 392 * If TDF_TIMEOUT is set, we timed out. 393 */ 394 if (td->td_flags & TDF_TIMEOUT) { 395 td->td_flags &= ~TDF_TIMEOUT; 396 return (EWOULDBLOCK); 397 } 398 399 /* 400 * If TDF_TIMOFAIL is set, the timeout ran after we had 401 * already been woken up. 402 */ 403 if (td->td_flags & TDF_TIMOFAIL) 404 td->td_flags &= ~TDF_TIMOFAIL; 405 406 /* 407 * If callout_stop() fails, then the timeout is running on 408 * another CPU, so synchronize with it to avoid having it 409 * accidentally wake up a subsequent sleep. 410 */ 411 else if (callout_stop(&td->td_slpcallout) == 0) { 412 td->td_flags |= TDF_TIMEOUT; 413 TD_SET_SLEEPING(td); 414 mi_switch(SW_INVOL); 415 } 416 return (0); 417 } 418 419 /* 420 * Check to see if we were awoken by a signal. 421 */ 422 static int 423 sleepq_check_signals(void) 424 { 425 struct thread *td; 426 427 mtx_assert(&sched_lock, MA_OWNED); 428 td = curthread; 429 430 /* We are no longer in an interruptible sleep. */ 431 td->td_flags &= ~TDF_SINTR; 432 433 /* If we were interrupted, return td_intrval. */ 434 if (td->td_flags & TDF_INTERRUPT) 435 return (td->td_intrval); 436 return (0); 437 } 438 439 /* 440 * If we were in an interruptible sleep and we weren't interrupted and 441 * didn't timeout, check to see if there are any pending signals and 442 * which return value we should use if so. The return value from an 443 * earlier call to sleepq_catch_signals() should be passed in as the 444 * argument. 445 */ 446 int 447 sleepq_calc_signal_retval(int sig) 448 { 449 struct thread *td; 450 struct proc *p; 451 int rval; 452 453 td = curthread; 454 p = td->td_proc; 455 PROC_LOCK(p); 456 mtx_lock(&p->p_sigacts->ps_mtx); 457 /* XXX: Should we always be calling cursig()? */ 458 if (sig == 0) 459 sig = cursig(td); 460 if (sig != 0) { 461 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 462 rval = EINTR; 463 else 464 rval = ERESTART; 465 } else 466 rval = 0; 467 mtx_unlock(&p->p_sigacts->ps_mtx); 468 PROC_UNLOCK(p); 469 return (rval); 470 } 471 472 /* 473 * Block the current thread until it is awakened from its sleep queue. 474 */ 475 void 476 sleepq_wait(void *wchan) 477 { 478 479 sleepq_switch(wchan); 480 mtx_unlock_spin(&sched_lock); 481 } 482 483 /* 484 * Block the current thread until it is awakened from its sleep queue 485 * or it is interrupted by a signal. 486 */ 487 int 488 sleepq_wait_sig(void *wchan) 489 { 490 int rval; 491 492 sleepq_switch(wchan); 493 rval = sleepq_check_signals(); 494 mtx_unlock_spin(&sched_lock); 495 return (rval); 496 } 497 498 /* 499 * Block the current thread until it is awakened from its sleep queue 500 * or it times out while waiting. 501 */ 502 int 503 sleepq_timedwait(void *wchan, int signal_caught) 504 { 505 int rval; 506 507 sleepq_switch(wchan); 508 rval = sleepq_check_timeout(); 509 mtx_unlock_spin(&sched_lock); 510 if (signal_caught) 511 return (0); 512 else 513 return (rval); 514 } 515 516 /* 517 * Block the current thread until it is awakened from its sleep queue, 518 * it is interrupted by a signal, or it times out waiting to be awakened. 519 */ 520 int 521 sleepq_timedwait_sig(void *wchan, int signal_caught) 522 { 523 int rvalt, rvals; 524 525 sleepq_switch(wchan); 526 rvalt = sleepq_check_timeout(); 527 rvals = sleepq_check_signals(); 528 mtx_unlock_spin(&sched_lock); 529 if (signal_caught || rvalt == 0) 530 return (rvals); 531 else 532 return (rvalt); 533 } 534 535 /* 536 * Removes a thread from a sleep queue. 537 */ 538 static void 539 sleepq_remove_thread(struct sleepqueue *sq, struct thread *td) 540 { 541 struct sleepqueue_chain *sc; 542 543 MPASS(td != NULL); 544 MPASS(sq->sq_wchan != NULL); 545 MPASS(td->td_wchan == sq->sq_wchan); 546 sc = SC_LOOKUP(sq->sq_wchan); 547 mtx_assert(&sc->sc_lock, MA_OWNED); 548 549 /* Remove the thread from the queue. */ 550 TAILQ_REMOVE(&sq->sq_blocked, td, td_slpq); 551 552 /* 553 * Get a sleep queue for this thread. If this is the last waiter, 554 * use the queue itself and take it out of the chain, otherwise, 555 * remove a queue from the free list. 556 */ 557 if (LIST_EMPTY(&sq->sq_free)) { 558 td->td_sleepqueue = sq; 559 #ifdef INVARIANTS 560 sq->sq_wchan = NULL; 561 #endif 562 } else 563 td->td_sleepqueue = LIST_FIRST(&sq->sq_free); 564 LIST_REMOVE(td->td_sleepqueue, sq_hash); 565 566 mtx_lock_spin(&sched_lock); 567 td->td_wmesg = NULL; 568 td->td_wchan = NULL; 569 mtx_unlock_spin(&sched_lock); 570 } 571 572 /* 573 * Resumes a thread that was asleep on a queue. 574 */ 575 static void 576 sleepq_resume_thread(struct thread *td, int pri) 577 { 578 579 /* 580 * Note that thread td might not be sleeping if it is running 581 * sleepq_catch_signals() on another CPU or is blocked on 582 * its proc lock to check signals. It doesn't hurt to clear 583 * the sleeping flag if it isn't set though, so we just always 584 * do it. However, we can't assert that it is set. 585 */ 586 mtx_lock_spin(&sched_lock); 587 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %d, %s)", td, 588 td->td_proc->p_pid, td->td_proc->p_comm); 589 TD_CLR_SLEEPING(td); 590 591 /* Adjust priority if requested. */ 592 MPASS(pri == -1 || (pri >= PRI_MIN && pri <= PRI_MAX)); 593 if (pri != -1 && td->td_priority > pri) 594 td->td_priority = pri; 595 setrunnable(td); 596 mtx_unlock_spin(&sched_lock); 597 } 598 599 /* 600 * Find the highest priority thread sleeping on a wait channel and resume it. 601 */ 602 void 603 sleepq_signal(void *wchan, int flags, int pri) 604 { 605 struct sleepqueue *sq; 606 struct thread *td; 607 608 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags); 609 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 610 sq = sleepq_lookup(wchan); 611 if (sq == NULL) { 612 sleepq_release(wchan); 613 return; 614 } 615 KASSERT(sq->sq_flags == flags, 616 ("%s: mismatch between sleep/wakeup and cv_*", __func__)); 617 /* XXX: Do for all sleep queues eventually. */ 618 if (flags & SLEEPQ_CONDVAR) 619 mtx_assert(sq->sq_lock, MA_OWNED); 620 621 /* Remove first thread from queue and awaken it. */ 622 td = TAILQ_FIRST(&sq->sq_blocked); 623 sleepq_remove_thread(sq, td); 624 sleepq_release(wchan); 625 sleepq_resume_thread(td, pri); 626 } 627 628 /* 629 * Resume all threads sleeping on a specified wait channel. 630 */ 631 void 632 sleepq_broadcast(void *wchan, int flags, int pri) 633 { 634 TAILQ_HEAD(, thread) list; 635 struct sleepqueue *sq; 636 struct thread *td; 637 638 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags); 639 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 640 sq = sleepq_lookup(wchan); 641 if (sq == NULL) { 642 sleepq_release(wchan); 643 return; 644 } 645 KASSERT(sq->sq_flags == flags, 646 ("%s: mismatch between sleep/wakeup and cv_*", __func__)); 647 /* XXX: Do for all sleep queues eventually. */ 648 if (flags & SLEEPQ_CONDVAR) 649 mtx_assert(sq->sq_lock, MA_OWNED); 650 651 /* Move blocked threads from the sleep queue to a temporary list. */ 652 TAILQ_INIT(&list); 653 while (!TAILQ_EMPTY(&sq->sq_blocked)) { 654 td = TAILQ_FIRST(&sq->sq_blocked); 655 sleepq_remove_thread(sq, td); 656 TAILQ_INSERT_TAIL(&list, td, td_slpq); 657 } 658 sleepq_release(wchan); 659 660 /* Resume all the threads on the temporary list. */ 661 while (!TAILQ_EMPTY(&list)) { 662 td = TAILQ_FIRST(&list); 663 TAILQ_REMOVE(&list, td, td_slpq); 664 sleepq_resume_thread(td, pri); 665 } 666 } 667 668 /* 669 * Time sleeping threads out. When the timeout expires, the thread is 670 * removed from the sleep queue and made runnable if it is still asleep. 671 */ 672 static void 673 sleepq_timeout(void *arg) 674 { 675 struct sleepqueue *sq; 676 struct thread *td; 677 void *wchan; 678 679 td = (struct thread *)arg; 680 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %d, %s)", 681 td, td->td_proc->p_pid, td->td_proc->p_comm); 682 683 /* 684 * First, see if the thread is asleep and get the wait channel if 685 * it is. 686 */ 687 mtx_lock_spin(&sched_lock); 688 if (TD_ON_SLEEPQ(td)) { 689 wchan = td->td_wchan; 690 mtx_unlock_spin(&sched_lock); 691 sq = sleepq_lookup(wchan); 692 mtx_lock_spin(&sched_lock); 693 } else { 694 wchan = NULL; 695 sq = NULL; 696 } 697 698 /* 699 * At this point, if the thread is still on the sleep queue, 700 * we have that sleep queue locked as it cannot migrate sleep 701 * queues while we dropped sched_lock. If it had resumed and 702 * was on another CPU while the lock was dropped, it would have 703 * seen that TDF_TIMEOUT and TDF_TIMOFAIL are clear and the 704 * call to callout_stop() to stop this routine would have failed 705 * meaning that it would have already set TDF_TIMEOUT to 706 * synchronize with this function. 707 */ 708 if (TD_ON_SLEEPQ(td)) { 709 MPASS(td->td_wchan == wchan); 710 MPASS(sq != NULL); 711 td->td_flags |= TDF_TIMEOUT; 712 mtx_unlock_spin(&sched_lock); 713 sleepq_remove_thread(sq, td); 714 sleepq_release(wchan); 715 sleepq_resume_thread(td, -1); 716 return; 717 } else if (wchan != NULL) 718 sleepq_release(wchan); 719 720 /* 721 * Now check for the edge cases. First, if TDF_TIMEOUT is set, 722 * then the other thread has already yielded to us, so clear 723 * the flag and resume it. If TDF_TIMEOUT is not set, then the 724 * we know that the other thread is not on a sleep queue, but it 725 * hasn't resumed execution yet. In that case, set TDF_TIMOFAIL 726 * to let it know that the timeout has already run and doesn't 727 * need to be canceled. 728 */ 729 if (td->td_flags & TDF_TIMEOUT) { 730 MPASS(TD_IS_SLEEPING(td)); 731 td->td_flags &= ~TDF_TIMEOUT; 732 TD_CLR_SLEEPING(td); 733 setrunnable(td); 734 } else 735 td->td_flags |= TDF_TIMOFAIL; 736 mtx_unlock_spin(&sched_lock); 737 } 738 739 /* 740 * Resumes a specific thread from the sleep queue associated with a specific 741 * wait channel if it is on that queue. 742 */ 743 void 744 sleepq_remove(struct thread *td, void *wchan) 745 { 746 struct sleepqueue *sq; 747 748 /* 749 * Look up the sleep queue for this wait channel, then re-check 750 * that the thread is asleep on that channel, if it is not, then 751 * bail. 752 */ 753 MPASS(wchan != NULL); 754 sq = sleepq_lookup(wchan); 755 mtx_lock_spin(&sched_lock); 756 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) { 757 mtx_unlock_spin(&sched_lock); 758 sleepq_release(wchan); 759 return; 760 } 761 mtx_unlock_spin(&sched_lock); 762 MPASS(sq != NULL); 763 764 /* Thread is asleep on sleep queue sq, so wake it up. */ 765 sleepq_remove_thread(sq, td); 766 sleepq_release(wchan); 767 sleepq_resume_thread(td, -1); 768 } 769 770 /* 771 * Abort a thread as if an interrupt had occured. Only abort 772 * interruptable waits (unfortunately it isn't safe to abort others). 773 * 774 * XXX: What in the world does the comment below mean? 775 * Also, whatever the signal code does... 776 */ 777 void 778 sleepq_abort(struct thread *td) 779 { 780 void *wchan; 781 782 mtx_assert(&sched_lock, MA_OWNED); 783 MPASS(TD_ON_SLEEPQ(td)); 784 MPASS(td->td_flags & TDF_SINTR); 785 786 /* 787 * If the TDF_TIMEOUT flag is set, just leave. A 788 * timeout is scheduled anyhow. 789 */ 790 if (td->td_flags & TDF_TIMEOUT) 791 return; 792 793 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %d, %s)", td, 794 td->td_proc->p_pid, td->td_proc->p_comm); 795 wchan = td->td_wchan; 796 mtx_unlock_spin(&sched_lock); 797 sleepq_remove(td, wchan); 798 mtx_lock_spin(&sched_lock); 799 } 800