1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* 29 * Implementation of sleep queues used to hold queue of threads blocked on 30 * a wait channel. Sleep queues are different from turnstiles in that wait 31 * channels are not owned by anyone, so there is no priority propagation. 32 * Sleep queues can also provide a timeout and can also be interrupted by 33 * signals. That said, there are several similarities between the turnstile 34 * and sleep queue implementations. (Note: turnstiles were implemented 35 * first.) For example, both use a hash table of the same size where each 36 * bucket is referred to as a "chain" that contains both a spin lock and 37 * a linked list of queues. An individual queue is located by using a hash 38 * to pick a chain, locking the chain, and then walking the chain searching 39 * for the queue. This means that a wait channel object does not need to 40 * embed its queue head just as locks do not embed their turnstile queue 41 * head. Threads also carry around a sleep queue that they lend to the 42 * wait channel when blocking. Just as in turnstiles, the queue includes 43 * a free list of the sleep queues of other threads blocked on the same 44 * wait channel in the case of multiple waiters. 45 * 46 * Some additional functionality provided by sleep queues include the 47 * ability to set a timeout. The timeout is managed using a per-thread 48 * callout that resumes a thread if it is asleep. A thread may also 49 * catch signals while it is asleep (aka an interruptible sleep). The 50 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally, 51 * sleep queues also provide some extra assertions. One is not allowed to 52 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one 53 * must consistently use the same lock to synchronize with a wait channel, 54 * though this check is currently only a warning for sleep/wakeup due to 55 * pre-existing abuse of that API. The same lock must also be held when 56 * awakening threads, though that is currently only enforced for condition 57 * variables. 58 */ 59 60 #include <sys/cdefs.h> 61 __FBSDID("$FreeBSD$"); 62 63 #include "opt_sleepqueue_profiling.h" 64 #include "opt_ddb.h" 65 #include "opt_sched.h" 66 #include "opt_stack.h" 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/lock.h> 71 #include <sys/kernel.h> 72 #include <sys/ktr.h> 73 #include <sys/mutex.h> 74 #include <sys/proc.h> 75 #include <sys/sbuf.h> 76 #include <sys/sched.h> 77 #include <sys/sdt.h> 78 #include <sys/signalvar.h> 79 #include <sys/sleepqueue.h> 80 #include <sys/stack.h> 81 #include <sys/sysctl.h> 82 #include <sys/time.h> 83 #ifdef EPOCH_TRACE 84 #include <sys/epoch.h> 85 #endif 86 87 #include <machine/atomic.h> 88 89 #include <vm/uma.h> 90 91 #ifdef DDB 92 #include <ddb/ddb.h> 93 #endif 94 95 /* 96 * Constants for the hash table of sleep queue chains. 97 * SC_TABLESIZE must be a power of two for SC_MASK to work properly. 98 */ 99 #ifndef SC_TABLESIZE 100 #define SC_TABLESIZE 256 101 #endif 102 CTASSERT(powerof2(SC_TABLESIZE)); 103 #define SC_MASK (SC_TABLESIZE - 1) 104 #define SC_SHIFT 8 105 #define SC_HASH(wc) ((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \ 106 SC_MASK) 107 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)] 108 #define NR_SLEEPQS 2 109 /* 110 * There are two different lists of sleep queues. Both lists are connected 111 * via the sq_hash entries. The first list is the sleep queue chain list 112 * that a sleep queue is on when it is attached to a wait channel. The 113 * second list is the free list hung off of a sleep queue that is attached 114 * to a wait channel. 115 * 116 * Each sleep queue also contains the wait channel it is attached to, the 117 * list of threads blocked on that wait channel, flags specific to the 118 * wait channel, and the lock used to synchronize with a wait channel. 119 * The flags are used to catch mismatches between the various consumers 120 * of the sleep queue API (e.g. sleep/wakeup and condition variables). 121 * The lock pointer is only used when invariants are enabled for various 122 * debugging checks. 123 * 124 * Locking key: 125 * c - sleep queue chain lock 126 */ 127 struct sleepqueue { 128 struct threadqueue sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */ 129 u_int sq_blockedcnt[NR_SLEEPQS]; /* (c) N. of blocked threads. */ 130 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */ 131 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */ 132 const void *sq_wchan; /* (c) Wait channel. */ 133 int sq_type; /* (c) Queue type. */ 134 #ifdef INVARIANTS 135 struct lock_object *sq_lock; /* (c) Associated lock. */ 136 #endif 137 }; 138 139 struct sleepqueue_chain { 140 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */ 141 struct mtx sc_lock; /* Spin lock for this chain. */ 142 #ifdef SLEEPQUEUE_PROFILING 143 u_int sc_depth; /* Length of sc_queues. */ 144 u_int sc_max_depth; /* Max length of sc_queues. */ 145 #endif 146 } __aligned(CACHE_LINE_SIZE); 147 148 #ifdef SLEEPQUEUE_PROFILING 149 u_int sleepq_max_depth; 150 static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 151 "sleepq profiling"); 152 static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, 153 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 154 "sleepq chain stats"); 155 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth, 156 0, "maxmimum depth achieved of a single chain"); 157 158 static void sleepq_profile(const char *wmesg); 159 static int prof_enabled; 160 #endif 161 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE]; 162 static uma_zone_t sleepq_zone; 163 164 /* 165 * Prototypes for non-exported routines. 166 */ 167 static int sleepq_catch_signals(const void *wchan, int pri); 168 static inline int sleepq_check_signals(void); 169 static inline int sleepq_check_timeout(void); 170 #ifdef INVARIANTS 171 static void sleepq_dtor(void *mem, int size, void *arg); 172 #endif 173 static int sleepq_init(void *mem, int size, int flags); 174 static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, 175 int pri, int srqflags); 176 static void sleepq_remove_thread(struct sleepqueue *sq, struct thread *td); 177 static void sleepq_switch(const void *wchan, int pri); 178 static void sleepq_timeout(void *arg); 179 180 SDT_PROBE_DECLARE(sched, , , sleep); 181 SDT_PROBE_DECLARE(sched, , , wakeup); 182 183 /* 184 * Initialize SLEEPQUEUE_PROFILING specific sysctl nodes. 185 * Note that it must happen after sleepinit() has been fully executed, so 186 * it must happen after SI_SUB_KMEM SYSINIT() subsystem setup. 187 */ 188 #ifdef SLEEPQUEUE_PROFILING 189 static void 190 init_sleepqueue_profiling(void) 191 { 192 char chain_name[10]; 193 struct sysctl_oid *chain_oid; 194 u_int i; 195 196 for (i = 0; i < SC_TABLESIZE; i++) { 197 snprintf(chain_name, sizeof(chain_name), "%u", i); 198 chain_oid = SYSCTL_ADD_NODE(NULL, 199 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO, 200 chain_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 201 "sleepq chain stats"); 202 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 203 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL); 204 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 205 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0, 206 NULL); 207 } 208 } 209 210 SYSINIT(sleepqueue_profiling, SI_SUB_LOCK, SI_ORDER_ANY, 211 init_sleepqueue_profiling, NULL); 212 #endif 213 214 /* 215 * Early initialization of sleep queues that is called from the sleepinit() 216 * SYSINIT. 217 */ 218 void 219 init_sleepqueues(void) 220 { 221 int i; 222 223 for (i = 0; i < SC_TABLESIZE; i++) { 224 LIST_INIT(&sleepq_chains[i].sc_queues); 225 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL, 226 MTX_SPIN); 227 } 228 sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue), 229 #ifdef INVARIANTS 230 NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0); 231 #else 232 NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0); 233 #endif 234 235 thread0.td_sleepqueue = sleepq_alloc(); 236 } 237 238 /* 239 * Get a sleep queue for a new thread. 240 */ 241 struct sleepqueue * 242 sleepq_alloc(void) 243 { 244 245 return (uma_zalloc(sleepq_zone, M_WAITOK)); 246 } 247 248 /* 249 * Free a sleep queue when a thread is destroyed. 250 */ 251 void 252 sleepq_free(struct sleepqueue *sq) 253 { 254 255 uma_zfree(sleepq_zone, sq); 256 } 257 258 /* 259 * Lock the sleep queue chain associated with the specified wait channel. 260 */ 261 void 262 sleepq_lock(const void *wchan) 263 { 264 struct sleepqueue_chain *sc; 265 266 sc = SC_LOOKUP(wchan); 267 mtx_lock_spin(&sc->sc_lock); 268 } 269 270 /* 271 * Look up the sleep queue associated with a given wait channel in the hash 272 * table locking the associated sleep queue chain. If no queue is found in 273 * the table, NULL is returned. 274 */ 275 struct sleepqueue * 276 sleepq_lookup(const void *wchan) 277 { 278 struct sleepqueue_chain *sc; 279 struct sleepqueue *sq; 280 281 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 282 sc = SC_LOOKUP(wchan); 283 mtx_assert(&sc->sc_lock, MA_OWNED); 284 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) 285 if (sq->sq_wchan == wchan) 286 return (sq); 287 return (NULL); 288 } 289 290 /* 291 * Unlock the sleep queue chain associated with a given wait channel. 292 */ 293 void 294 sleepq_release(const void *wchan) 295 { 296 struct sleepqueue_chain *sc; 297 298 sc = SC_LOOKUP(wchan); 299 mtx_unlock_spin(&sc->sc_lock); 300 } 301 302 /* 303 * Places the current thread on the sleep queue for the specified wait 304 * channel. If INVARIANTS is enabled, then it associates the passed in 305 * lock with the sleepq to make sure it is held when that sleep queue is 306 * woken up. 307 */ 308 void 309 sleepq_add(const void *wchan, struct lock_object *lock, const char *wmesg, 310 int flags, int queue) 311 { 312 struct sleepqueue_chain *sc; 313 struct sleepqueue *sq; 314 struct thread *td; 315 316 td = curthread; 317 sc = SC_LOOKUP(wchan); 318 mtx_assert(&sc->sc_lock, MA_OWNED); 319 MPASS(td->td_sleepqueue != NULL); 320 MPASS(wchan != NULL); 321 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 322 323 /* If this thread is not allowed to sleep, die a horrible death. */ 324 if (__predict_false(!THREAD_CAN_SLEEP())) { 325 #ifdef EPOCH_TRACE 326 epoch_trace_list(curthread); 327 #endif 328 KASSERT(1, 329 ("%s: td %p to sleep on wchan %p with sleeping prohibited", 330 __func__, td, wchan)); 331 } 332 333 /* Look up the sleep queue associated with the wait channel 'wchan'. */ 334 sq = sleepq_lookup(wchan); 335 336 /* 337 * If the wait channel does not already have a sleep queue, use 338 * this thread's sleep queue. Otherwise, insert the current thread 339 * into the sleep queue already in use by this wait channel. 340 */ 341 if (sq == NULL) { 342 #ifdef INVARIANTS 343 int i; 344 345 sq = td->td_sleepqueue; 346 for (i = 0; i < NR_SLEEPQS; i++) { 347 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]), 348 ("thread's sleep queue %d is not empty", i)); 349 KASSERT(sq->sq_blockedcnt[i] == 0, 350 ("thread's sleep queue %d count mismatches", i)); 351 } 352 KASSERT(LIST_EMPTY(&sq->sq_free), 353 ("thread's sleep queue has a non-empty free list")); 354 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer")); 355 sq->sq_lock = lock; 356 #endif 357 #ifdef SLEEPQUEUE_PROFILING 358 sc->sc_depth++; 359 if (sc->sc_depth > sc->sc_max_depth) { 360 sc->sc_max_depth = sc->sc_depth; 361 if (sc->sc_max_depth > sleepq_max_depth) 362 sleepq_max_depth = sc->sc_max_depth; 363 } 364 #endif 365 sq = td->td_sleepqueue; 366 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash); 367 sq->sq_wchan = wchan; 368 sq->sq_type = flags & SLEEPQ_TYPE; 369 } else { 370 MPASS(wchan == sq->sq_wchan); 371 MPASS(lock == sq->sq_lock); 372 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type); 373 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash); 374 } 375 thread_lock(td); 376 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq); 377 sq->sq_blockedcnt[queue]++; 378 td->td_sleepqueue = NULL; 379 td->td_sqqueue = queue; 380 td->td_wchan = wchan; 381 td->td_wmesg = wmesg; 382 if (flags & SLEEPQ_INTERRUPTIBLE) { 383 td->td_intrval = 0; 384 td->td_flags |= TDF_SINTR; 385 } 386 td->td_flags &= ~TDF_TIMEOUT; 387 thread_unlock(td); 388 } 389 390 /* 391 * Sets a timeout that will remove the current thread from the specified 392 * sleep queue after timo ticks if the thread has not already been awakened. 393 */ 394 void 395 sleepq_set_timeout_sbt(const void *wchan, sbintime_t sbt, sbintime_t pr, 396 int flags) 397 { 398 struct sleepqueue_chain *sc __unused; 399 struct thread *td; 400 sbintime_t pr1; 401 402 td = curthread; 403 sc = SC_LOOKUP(wchan); 404 mtx_assert(&sc->sc_lock, MA_OWNED); 405 MPASS(TD_ON_SLEEPQ(td)); 406 MPASS(td->td_sleepqueue == NULL); 407 MPASS(wchan != NULL); 408 if (cold && td == &thread0) 409 panic("timed sleep before timers are working"); 410 KASSERT(td->td_sleeptimo == 0, ("td %d %p td_sleeptimo %jx", 411 td->td_tid, td, (uintmax_t)td->td_sleeptimo)); 412 thread_lock(td); 413 callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1); 414 thread_unlock(td); 415 callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1, 416 sleepq_timeout, td, PCPU_GET(cpuid), flags | C_PRECALC | 417 C_DIRECT_EXEC); 418 } 419 420 /* 421 * Return the number of actual sleepers for the specified queue. 422 */ 423 u_int 424 sleepq_sleepcnt(const void *wchan, int queue) 425 { 426 struct sleepqueue *sq; 427 428 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 429 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 430 sq = sleepq_lookup(wchan); 431 if (sq == NULL) 432 return (0); 433 return (sq->sq_blockedcnt[queue]); 434 } 435 436 /* 437 * Marks the pending sleep of the current thread as interruptible and 438 * makes an initial check for pending signals before putting a thread 439 * to sleep. Enters and exits with the thread lock held. Thread lock 440 * may have transitioned from the sleepq lock to a run lock. 441 */ 442 static int 443 sleepq_catch_signals(const void *wchan, int pri) 444 { 445 struct sleepqueue_chain *sc; 446 struct sleepqueue *sq; 447 struct thread *td; 448 struct proc *p; 449 struct sigacts *ps; 450 int sig, ret; 451 452 ret = 0; 453 td = curthread; 454 p = curproc; 455 sc = SC_LOOKUP(wchan); 456 mtx_assert(&sc->sc_lock, MA_OWNED); 457 MPASS(wchan != NULL); 458 if ((td->td_pflags & TDP_WAKEUP) != 0) { 459 td->td_pflags &= ~TDP_WAKEUP; 460 ret = EINTR; 461 thread_lock(td); 462 goto out; 463 } 464 465 /* 466 * See if there are any pending signals or suspension requests for this 467 * thread. If not, we can switch immediately. 468 */ 469 thread_lock(td); 470 if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) != 0) { 471 thread_unlock(td); 472 mtx_unlock_spin(&sc->sc_lock); 473 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)", 474 (void *)td, (long)p->p_pid, td->td_name); 475 PROC_LOCK(p); 476 /* 477 * Check for suspension first. Checking for signals and then 478 * suspending could result in a missed signal, since a signal 479 * can be delivered while this thread is suspended. 480 */ 481 if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) { 482 ret = thread_suspend_check(1); 483 MPASS(ret == 0 || ret == EINTR || ret == ERESTART); 484 if (ret != 0) { 485 PROC_UNLOCK(p); 486 mtx_lock_spin(&sc->sc_lock); 487 thread_lock(td); 488 goto out; 489 } 490 } 491 if ((td->td_flags & TDF_NEEDSIGCHK) != 0) { 492 ps = p->p_sigacts; 493 mtx_lock(&ps->ps_mtx); 494 sig = cursig(td); 495 if (sig == -1) { 496 mtx_unlock(&ps->ps_mtx); 497 KASSERT((td->td_flags & TDF_SBDRY) != 0, 498 ("lost TDF_SBDRY")); 499 KASSERT(TD_SBDRY_INTR(td), 500 ("lost TDF_SERESTART of TDF_SEINTR")); 501 KASSERT((td->td_flags & 502 (TDF_SEINTR | TDF_SERESTART)) != 503 (TDF_SEINTR | TDF_SERESTART), 504 ("both TDF_SEINTR and TDF_SERESTART")); 505 ret = TD_SBDRY_ERRNO(td); 506 } else if (sig != 0) { 507 ret = SIGISMEMBER(ps->ps_sigintr, sig) ? 508 EINTR : ERESTART; 509 mtx_unlock(&ps->ps_mtx); 510 } else { 511 mtx_unlock(&ps->ps_mtx); 512 } 513 514 /* 515 * Do not go into sleep if this thread was the 516 * ptrace(2) attach leader. cursig() consumed 517 * SIGSTOP from PT_ATTACH, but we usually act 518 * on the signal by interrupting sleep, and 519 * should do that here as well. 520 */ 521 if ((td->td_dbgflags & TDB_FSTP) != 0) { 522 if (ret == 0) 523 ret = EINTR; 524 td->td_dbgflags &= ~TDB_FSTP; 525 } 526 } 527 /* 528 * Lock the per-process spinlock prior to dropping the PROC_LOCK 529 * to avoid a signal delivery race. PROC_LOCK, PROC_SLOCK, and 530 * thread_lock() are currently held in tdsendsignal(). 531 */ 532 PROC_SLOCK(p); 533 mtx_lock_spin(&sc->sc_lock); 534 PROC_UNLOCK(p); 535 thread_lock(td); 536 PROC_SUNLOCK(p); 537 } 538 if (ret == 0) { 539 sleepq_switch(wchan, pri); 540 return (0); 541 } 542 out: 543 /* 544 * There were pending signals and this thread is still 545 * on the sleep queue, remove it from the sleep queue. 546 */ 547 if (TD_ON_SLEEPQ(td)) { 548 sq = sleepq_lookup(wchan); 549 sleepq_remove_thread(sq, td); 550 } 551 MPASS(td->td_lock != &sc->sc_lock); 552 mtx_unlock_spin(&sc->sc_lock); 553 thread_unlock(td); 554 555 return (ret); 556 } 557 558 /* 559 * Switches to another thread if we are still asleep on a sleep queue. 560 * Returns with thread lock. 561 */ 562 static void 563 sleepq_switch(const void *wchan, int pri) 564 { 565 struct sleepqueue_chain *sc; 566 struct sleepqueue *sq; 567 struct thread *td; 568 bool rtc_changed; 569 570 td = curthread; 571 sc = SC_LOOKUP(wchan); 572 mtx_assert(&sc->sc_lock, MA_OWNED); 573 THREAD_LOCK_ASSERT(td, MA_OWNED); 574 575 /* 576 * If we have a sleep queue, then we've already been woken up, so 577 * just return. 578 */ 579 if (td->td_sleepqueue != NULL) { 580 mtx_unlock_spin(&sc->sc_lock); 581 thread_unlock(td); 582 return; 583 } 584 585 /* 586 * If TDF_TIMEOUT is set, then our sleep has been timed out 587 * already but we are still on the sleep queue, so dequeue the 588 * thread and return. 589 * 590 * Do the same if the real-time clock has been adjusted since this 591 * thread calculated its timeout based on that clock. This handles 592 * the following race: 593 * - The Ts thread needs to sleep until an absolute real-clock time. 594 * It copies the global rtc_generation into curthread->td_rtcgen, 595 * reads the RTC, and calculates a sleep duration based on that time. 596 * See umtxq_sleep() for an example. 597 * - The Tc thread adjusts the RTC, bumps rtc_generation, and wakes 598 * threads that are sleeping until an absolute real-clock time. 599 * See tc_setclock() and the POSIX specification of clock_settime(). 600 * - Ts reaches the code below. It holds the sleepqueue chain lock, 601 * so Tc has finished waking, so this thread must test td_rtcgen. 602 * (The declaration of td_rtcgen refers to this comment.) 603 */ 604 rtc_changed = td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation; 605 if ((td->td_flags & TDF_TIMEOUT) || rtc_changed) { 606 if (rtc_changed) { 607 td->td_rtcgen = 0; 608 } 609 MPASS(TD_ON_SLEEPQ(td)); 610 sq = sleepq_lookup(wchan); 611 sleepq_remove_thread(sq, td); 612 mtx_unlock_spin(&sc->sc_lock); 613 thread_unlock(td); 614 return; 615 } 616 #ifdef SLEEPQUEUE_PROFILING 617 if (prof_enabled) 618 sleepq_profile(td->td_wmesg); 619 #endif 620 MPASS(td->td_sleepqueue == NULL); 621 sched_sleep(td, pri); 622 thread_lock_set(td, &sc->sc_lock); 623 SDT_PROBE0(sched, , , sleep); 624 TD_SET_SLEEPING(td); 625 mi_switch(SW_VOL | SWT_SLEEPQ); 626 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING")); 627 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)", 628 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 629 } 630 631 /* 632 * Check to see if we timed out. 633 */ 634 static inline int 635 sleepq_check_timeout(void) 636 { 637 struct thread *td; 638 int res; 639 640 res = 0; 641 td = curthread; 642 if (td->td_sleeptimo != 0) { 643 if (td->td_sleeptimo <= sbinuptime()) 644 res = EWOULDBLOCK; 645 td->td_sleeptimo = 0; 646 } 647 return (res); 648 } 649 650 /* 651 * Check to see if we were awoken by a signal. 652 */ 653 static inline int 654 sleepq_check_signals(void) 655 { 656 struct thread *td; 657 658 td = curthread; 659 KASSERT((td->td_flags & TDF_SINTR) == 0, 660 ("thread %p still in interruptible sleep?", td)); 661 662 return (td->td_intrval); 663 } 664 665 /* 666 * Block the current thread until it is awakened from its sleep queue. 667 */ 668 void 669 sleepq_wait(const void *wchan, int pri) 670 { 671 struct thread *td; 672 673 td = curthread; 674 MPASS(!(td->td_flags & TDF_SINTR)); 675 thread_lock(td); 676 sleepq_switch(wchan, pri); 677 } 678 679 /* 680 * Block the current thread until it is awakened from its sleep queue 681 * or it is interrupted by a signal. 682 */ 683 int 684 sleepq_wait_sig(const void *wchan, int pri) 685 { 686 int rcatch; 687 688 rcatch = sleepq_catch_signals(wchan, pri); 689 if (rcatch) 690 return (rcatch); 691 return (sleepq_check_signals()); 692 } 693 694 /* 695 * Block the current thread until it is awakened from its sleep queue 696 * or it times out while waiting. 697 */ 698 int 699 sleepq_timedwait(const void *wchan, int pri) 700 { 701 struct thread *td; 702 703 td = curthread; 704 MPASS(!(td->td_flags & TDF_SINTR)); 705 706 thread_lock(td); 707 sleepq_switch(wchan, pri); 708 709 return (sleepq_check_timeout()); 710 } 711 712 /* 713 * Block the current thread until it is awakened from its sleep queue, 714 * it is interrupted by a signal, or it times out waiting to be awakened. 715 */ 716 int 717 sleepq_timedwait_sig(const void *wchan, int pri) 718 { 719 int rcatch, rvalt, rvals; 720 721 rcatch = sleepq_catch_signals(wchan, pri); 722 /* We must always call check_timeout() to clear sleeptimo. */ 723 rvalt = sleepq_check_timeout(); 724 rvals = sleepq_check_signals(); 725 if (rcatch) 726 return (rcatch); 727 if (rvals) 728 return (rvals); 729 return (rvalt); 730 } 731 732 /* 733 * Returns the type of sleepqueue given a waitchannel. 734 */ 735 int 736 sleepq_type(const void *wchan) 737 { 738 struct sleepqueue *sq; 739 int type; 740 741 MPASS(wchan != NULL); 742 743 sq = sleepq_lookup(wchan); 744 if (sq == NULL) 745 return (-1); 746 type = sq->sq_type; 747 748 return (type); 749 } 750 751 /* 752 * Removes a thread from a sleep queue and makes it 753 * runnable. 754 * 755 * Requires the sc chain locked on entry. If SRQ_HOLD is specified it will 756 * be locked on return. Returns without the thread lock held. 757 */ 758 static int 759 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri, 760 int srqflags) 761 { 762 struct sleepqueue_chain *sc; 763 bool drop; 764 765 MPASS(td != NULL); 766 MPASS(sq->sq_wchan != NULL); 767 MPASS(td->td_wchan == sq->sq_wchan); 768 769 sc = SC_LOOKUP(sq->sq_wchan); 770 mtx_assert(&sc->sc_lock, MA_OWNED); 771 772 /* 773 * Avoid recursing on the chain lock. If the locks don't match we 774 * need to acquire the thread lock which setrunnable will drop for 775 * us. In this case we need to drop the chain lock afterwards. 776 * 777 * There is no race that will make td_lock equal to sc_lock because 778 * we hold sc_lock. 779 */ 780 drop = false; 781 if (!TD_IS_SLEEPING(td)) { 782 thread_lock(td); 783 drop = true; 784 } else 785 thread_lock_block_wait(td); 786 787 /* Remove thread from the sleepq. */ 788 sleepq_remove_thread(sq, td); 789 790 /* If we're done with the sleepqueue release it. */ 791 if ((srqflags & SRQ_HOLD) == 0 && drop) 792 mtx_unlock_spin(&sc->sc_lock); 793 794 /* Adjust priority if requested. */ 795 MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX)); 796 if (pri != 0 && td->td_priority > pri && 797 PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 798 sched_prio(td, pri); 799 800 /* 801 * Note that thread td might not be sleeping if it is running 802 * sleepq_catch_signals() on another CPU or is blocked on its 803 * proc lock to check signals. There's no need to mark the 804 * thread runnable in that case. 805 */ 806 if (TD_IS_SLEEPING(td)) { 807 MPASS(!drop); 808 TD_CLR_SLEEPING(td); 809 return (setrunnable(td, srqflags)); 810 } 811 MPASS(drop); 812 thread_unlock(td); 813 814 return (0); 815 } 816 817 static void 818 sleepq_remove_thread(struct sleepqueue *sq, struct thread *td) 819 { 820 struct sleepqueue_chain *sc __unused; 821 822 MPASS(td != NULL); 823 MPASS(sq->sq_wchan != NULL); 824 MPASS(td->td_wchan == sq->sq_wchan); 825 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0); 826 THREAD_LOCK_ASSERT(td, MA_OWNED); 827 sc = SC_LOOKUP(sq->sq_wchan); 828 mtx_assert(&sc->sc_lock, MA_OWNED); 829 830 SDT_PROBE2(sched, , , wakeup, td, td->td_proc); 831 832 /* Remove the thread from the queue. */ 833 sq->sq_blockedcnt[td->td_sqqueue]--; 834 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq); 835 836 /* 837 * Get a sleep queue for this thread. If this is the last waiter, 838 * use the queue itself and take it out of the chain, otherwise, 839 * remove a queue from the free list. 840 */ 841 if (LIST_EMPTY(&sq->sq_free)) { 842 td->td_sleepqueue = sq; 843 #ifdef INVARIANTS 844 sq->sq_wchan = NULL; 845 #endif 846 #ifdef SLEEPQUEUE_PROFILING 847 sc->sc_depth--; 848 #endif 849 } else 850 td->td_sleepqueue = LIST_FIRST(&sq->sq_free); 851 LIST_REMOVE(td->td_sleepqueue, sq_hash); 852 853 if ((td->td_flags & TDF_TIMEOUT) == 0 && td->td_sleeptimo != 0) 854 /* 855 * We ignore the situation where timeout subsystem was 856 * unable to stop our callout. The struct thread is 857 * type-stable, the callout will use the correct 858 * memory when running. The checks of the 859 * td_sleeptimo value in this function and in 860 * sleepq_timeout() ensure that the thread does not 861 * get spurious wakeups, even if the callout was reset 862 * or thread reused. 863 */ 864 callout_stop(&td->td_slpcallout); 865 866 td->td_wmesg = NULL; 867 td->td_wchan = NULL; 868 td->td_flags &= ~(TDF_SINTR | TDF_TIMEOUT); 869 870 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)", 871 (void *)td, (long)td->td_proc->p_pid, td->td_name); 872 } 873 874 #ifdef INVARIANTS 875 /* 876 * UMA zone item deallocator. 877 */ 878 static void 879 sleepq_dtor(void *mem, int size, void *arg) 880 { 881 struct sleepqueue *sq; 882 int i; 883 884 sq = mem; 885 for (i = 0; i < NR_SLEEPQS; i++) { 886 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i])); 887 MPASS(sq->sq_blockedcnt[i] == 0); 888 } 889 } 890 #endif 891 892 /* 893 * UMA zone item initializer. 894 */ 895 static int 896 sleepq_init(void *mem, int size, int flags) 897 { 898 struct sleepqueue *sq; 899 int i; 900 901 bzero(mem, size); 902 sq = mem; 903 for (i = 0; i < NR_SLEEPQS; i++) { 904 TAILQ_INIT(&sq->sq_blocked[i]); 905 sq->sq_blockedcnt[i] = 0; 906 } 907 LIST_INIT(&sq->sq_free); 908 return (0); 909 } 910 911 /* 912 * Find thread sleeping on a wait channel and resume it. 913 */ 914 int 915 sleepq_signal(const void *wchan, int flags, int pri, int queue) 916 { 917 struct sleepqueue_chain *sc; 918 struct sleepqueue *sq; 919 struct threadqueue *head; 920 struct thread *td, *besttd; 921 int wakeup_swapper; 922 923 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags); 924 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 925 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 926 sq = sleepq_lookup(wchan); 927 if (sq == NULL) 928 return (0); 929 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), 930 ("%s: mismatch between sleep/wakeup and cv_*", __func__)); 931 932 head = &sq->sq_blocked[queue]; 933 if (flags & SLEEPQ_UNFAIR) { 934 /* 935 * Find the most recently sleeping thread, but try to 936 * skip threads still in process of context switch to 937 * avoid spinning on the thread lock. 938 */ 939 sc = SC_LOOKUP(wchan); 940 besttd = TAILQ_LAST_FAST(head, thread, td_slpq); 941 while (besttd->td_lock != &sc->sc_lock) { 942 td = TAILQ_PREV_FAST(besttd, head, thread, td_slpq); 943 if (td == NULL) 944 break; 945 besttd = td; 946 } 947 } else { 948 /* 949 * Find the highest priority thread on the queue. If there 950 * is a tie, use the thread that first appears in the queue 951 * as it has been sleeping the longest since threads are 952 * always added to the tail of sleep queues. 953 */ 954 besttd = td = TAILQ_FIRST(head); 955 while ((td = TAILQ_NEXT(td, td_slpq)) != NULL) { 956 if (td->td_priority < besttd->td_priority) 957 besttd = td; 958 } 959 } 960 MPASS(besttd != NULL); 961 wakeup_swapper = sleepq_resume_thread(sq, besttd, pri, SRQ_HOLD); 962 return (wakeup_swapper); 963 } 964 965 static bool 966 match_any(struct thread *td __unused) 967 { 968 969 return (true); 970 } 971 972 /* 973 * Resume all threads sleeping on a specified wait channel. 974 */ 975 int 976 sleepq_broadcast(const void *wchan, int flags, int pri, int queue) 977 { 978 struct sleepqueue *sq; 979 980 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags); 981 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 982 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 983 sq = sleepq_lookup(wchan); 984 if (sq == NULL) 985 return (0); 986 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), 987 ("%s: mismatch between sleep/wakeup and cv_*", __func__)); 988 989 return (sleepq_remove_matching(sq, queue, match_any, pri)); 990 } 991 992 /* 993 * Resume threads on the sleep queue that match the given predicate. 994 */ 995 int 996 sleepq_remove_matching(struct sleepqueue *sq, int queue, 997 bool (*matches)(struct thread *), int pri) 998 { 999 struct thread *td, *tdn; 1000 int wakeup_swapper; 1001 1002 /* 1003 * The last thread will be given ownership of sq and may 1004 * re-enqueue itself before sleepq_resume_thread() returns, 1005 * so we must cache the "next" queue item at the beginning 1006 * of the final iteration. 1007 */ 1008 wakeup_swapper = 0; 1009 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) { 1010 if (matches(td)) 1011 wakeup_swapper |= sleepq_resume_thread(sq, td, pri, 1012 SRQ_HOLD); 1013 } 1014 1015 return (wakeup_swapper); 1016 } 1017 1018 /* 1019 * Time sleeping threads out. When the timeout expires, the thread is 1020 * removed from the sleep queue and made runnable if it is still asleep. 1021 */ 1022 static void 1023 sleepq_timeout(void *arg) 1024 { 1025 struct sleepqueue_chain *sc __unused; 1026 struct sleepqueue *sq; 1027 struct thread *td; 1028 const void *wchan; 1029 int wakeup_swapper; 1030 1031 td = arg; 1032 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)", 1033 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 1034 1035 thread_lock(td); 1036 if (td->td_sleeptimo == 0 || td->td_sleeptimo > sbinuptime()) { 1037 /* 1038 * The thread does not want a timeout (yet). 1039 */ 1040 } else if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) { 1041 /* 1042 * See if the thread is asleep and get the wait 1043 * channel if it is. 1044 */ 1045 wchan = td->td_wchan; 1046 sc = SC_LOOKUP(wchan); 1047 THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock); 1048 sq = sleepq_lookup(wchan); 1049 MPASS(sq != NULL); 1050 td->td_flags |= TDF_TIMEOUT; 1051 wakeup_swapper = sleepq_resume_thread(sq, td, 0, 0); 1052 if (wakeup_swapper) 1053 kick_proc0(); 1054 return; 1055 } else if (TD_ON_SLEEPQ(td)) { 1056 /* 1057 * If the thread is on the SLEEPQ but isn't sleeping 1058 * yet, it can either be on another CPU in between 1059 * sleepq_add() and one of the sleepq_*wait*() 1060 * routines or it can be in sleepq_catch_signals(). 1061 */ 1062 td->td_flags |= TDF_TIMEOUT; 1063 } 1064 thread_unlock(td); 1065 } 1066 1067 /* 1068 * Resumes a specific thread from the sleep queue associated with a specific 1069 * wait channel if it is on that queue. 1070 */ 1071 void 1072 sleepq_remove(struct thread *td, const void *wchan) 1073 { 1074 struct sleepqueue_chain *sc; 1075 struct sleepqueue *sq; 1076 int wakeup_swapper; 1077 1078 /* 1079 * Look up the sleep queue for this wait channel, then re-check 1080 * that the thread is asleep on that channel, if it is not, then 1081 * bail. 1082 */ 1083 MPASS(wchan != NULL); 1084 sc = SC_LOOKUP(wchan); 1085 mtx_lock_spin(&sc->sc_lock); 1086 /* 1087 * We can not lock the thread here as it may be sleeping on a 1088 * different sleepq. However, holding the sleepq lock for this 1089 * wchan can guarantee that we do not miss a wakeup for this 1090 * channel. The asserts below will catch any false positives. 1091 */ 1092 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) { 1093 mtx_unlock_spin(&sc->sc_lock); 1094 return; 1095 } 1096 1097 /* Thread is asleep on sleep queue sq, so wake it up. */ 1098 sq = sleepq_lookup(wchan); 1099 MPASS(sq != NULL); 1100 MPASS(td->td_wchan == wchan); 1101 wakeup_swapper = sleepq_resume_thread(sq, td, 0, 0); 1102 if (wakeup_swapper) 1103 kick_proc0(); 1104 } 1105 1106 /* 1107 * Abort a thread as if an interrupt had occurred. Only abort 1108 * interruptible waits (unfortunately it isn't safe to abort others). 1109 * 1110 * Requires thread lock on entry, releases on return. 1111 */ 1112 int 1113 sleepq_abort(struct thread *td, int intrval) 1114 { 1115 struct sleepqueue *sq; 1116 const void *wchan; 1117 1118 THREAD_LOCK_ASSERT(td, MA_OWNED); 1119 MPASS(TD_ON_SLEEPQ(td)); 1120 MPASS(td->td_flags & TDF_SINTR); 1121 MPASS(intrval == EINTR || intrval == ERESTART); 1122 1123 /* 1124 * If the TDF_TIMEOUT flag is set, just leave. A 1125 * timeout is scheduled anyhow. 1126 */ 1127 if (td->td_flags & TDF_TIMEOUT) { 1128 thread_unlock(td); 1129 return (0); 1130 } 1131 1132 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)", 1133 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 1134 td->td_intrval = intrval; 1135 1136 /* 1137 * If the thread has not slept yet it will find the signal in 1138 * sleepq_catch_signals() and call sleepq_resume_thread. Otherwise 1139 * we have to do it here. 1140 */ 1141 if (!TD_IS_SLEEPING(td)) { 1142 thread_unlock(td); 1143 return (0); 1144 } 1145 wchan = td->td_wchan; 1146 MPASS(wchan != NULL); 1147 sq = sleepq_lookup(wchan); 1148 MPASS(sq != NULL); 1149 1150 /* Thread is asleep on sleep queue sq, so wake it up. */ 1151 return (sleepq_resume_thread(sq, td, 0, 0)); 1152 } 1153 1154 void 1155 sleepq_chains_remove_matching(bool (*matches)(struct thread *)) 1156 { 1157 struct sleepqueue_chain *sc; 1158 struct sleepqueue *sq, *sq1; 1159 int i, wakeup_swapper; 1160 1161 wakeup_swapper = 0; 1162 for (sc = &sleepq_chains[0]; sc < sleepq_chains + SC_TABLESIZE; ++sc) { 1163 if (LIST_EMPTY(&sc->sc_queues)) { 1164 continue; 1165 } 1166 mtx_lock_spin(&sc->sc_lock); 1167 LIST_FOREACH_SAFE(sq, &sc->sc_queues, sq_hash, sq1) { 1168 for (i = 0; i < NR_SLEEPQS; ++i) { 1169 wakeup_swapper |= sleepq_remove_matching(sq, i, 1170 matches, 0); 1171 } 1172 } 1173 mtx_unlock_spin(&sc->sc_lock); 1174 } 1175 if (wakeup_swapper) { 1176 kick_proc0(); 1177 } 1178 } 1179 1180 /* 1181 * Prints the stacks of all threads presently sleeping on wchan/queue to 1182 * the sbuf sb. Sets count_stacks_printed to the number of stacks actually 1183 * printed. Typically, this will equal the number of threads sleeping on the 1184 * queue, but may be less if sb overflowed before all stacks were printed. 1185 */ 1186 #ifdef STACK 1187 int 1188 sleepq_sbuf_print_stacks(struct sbuf *sb, const void *wchan, int queue, 1189 int *count_stacks_printed) 1190 { 1191 struct thread *td, *td_next; 1192 struct sleepqueue *sq; 1193 struct stack **st; 1194 struct sbuf **td_infos; 1195 int i, stack_idx, error, stacks_to_allocate; 1196 bool finished; 1197 1198 error = 0; 1199 finished = false; 1200 1201 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 1202 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 1203 1204 stacks_to_allocate = 10; 1205 for (i = 0; i < 3 && !finished ; i++) { 1206 /* We cannot malloc while holding the queue's spinlock, so 1207 * we do our mallocs now, and hope it is enough. If it 1208 * isn't, we will free these, drop the lock, malloc more, 1209 * and try again, up to a point. After that point we will 1210 * give up and report ENOMEM. We also cannot write to sb 1211 * during this time since the client may have set the 1212 * SBUF_AUTOEXTEND flag on their sbuf, which could cause a 1213 * malloc as we print to it. So we defer actually printing 1214 * to sb until after we drop the spinlock. 1215 */ 1216 1217 /* Where we will store the stacks. */ 1218 st = malloc(sizeof(struct stack *) * stacks_to_allocate, 1219 M_TEMP, M_WAITOK); 1220 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1221 stack_idx++) 1222 st[stack_idx] = stack_create(M_WAITOK); 1223 1224 /* Where we will store the td name, tid, etc. */ 1225 td_infos = malloc(sizeof(struct sbuf *) * stacks_to_allocate, 1226 M_TEMP, M_WAITOK); 1227 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1228 stack_idx++) 1229 td_infos[stack_idx] = sbuf_new(NULL, NULL, 1230 MAXCOMLEN + sizeof(struct thread *) * 2 + 40, 1231 SBUF_FIXEDLEN); 1232 1233 sleepq_lock(wchan); 1234 sq = sleepq_lookup(wchan); 1235 if (sq == NULL) { 1236 /* This sleepq does not exist; exit and return ENOENT. */ 1237 error = ENOENT; 1238 finished = true; 1239 sleepq_release(wchan); 1240 goto loop_end; 1241 } 1242 1243 stack_idx = 0; 1244 /* Save thread info */ 1245 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, 1246 td_next) { 1247 if (stack_idx >= stacks_to_allocate) 1248 goto loop_end; 1249 1250 /* Note the td_lock is equal to the sleepq_lock here. */ 1251 (void)stack_save_td(st[stack_idx], td); 1252 1253 sbuf_printf(td_infos[stack_idx], "%d: %s %p", 1254 td->td_tid, td->td_name, td); 1255 1256 ++stack_idx; 1257 } 1258 1259 finished = true; 1260 sleepq_release(wchan); 1261 1262 /* Print the stacks */ 1263 for (i = 0; i < stack_idx; i++) { 1264 sbuf_finish(td_infos[i]); 1265 sbuf_printf(sb, "--- thread %s: ---\n", sbuf_data(td_infos[i])); 1266 stack_sbuf_print(sb, st[i]); 1267 sbuf_printf(sb, "\n"); 1268 1269 error = sbuf_error(sb); 1270 if (error == 0) 1271 *count_stacks_printed = stack_idx; 1272 } 1273 1274 loop_end: 1275 if (!finished) 1276 sleepq_release(wchan); 1277 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1278 stack_idx++) 1279 stack_destroy(st[stack_idx]); 1280 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1281 stack_idx++) 1282 sbuf_delete(td_infos[stack_idx]); 1283 free(st, M_TEMP); 1284 free(td_infos, M_TEMP); 1285 stacks_to_allocate *= 10; 1286 } 1287 1288 if (!finished && error == 0) 1289 error = ENOMEM; 1290 1291 return (error); 1292 } 1293 #endif 1294 1295 #ifdef SLEEPQUEUE_PROFILING 1296 #define SLEEPQ_PROF_LOCATIONS 1024 1297 #define SLEEPQ_SBUFSIZE 512 1298 struct sleepq_prof { 1299 LIST_ENTRY(sleepq_prof) sp_link; 1300 const char *sp_wmesg; 1301 long sp_count; 1302 }; 1303 1304 LIST_HEAD(sqphead, sleepq_prof); 1305 1306 struct sqphead sleepq_prof_free; 1307 struct sqphead sleepq_hash[SC_TABLESIZE]; 1308 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS]; 1309 static struct mtx sleepq_prof_lock; 1310 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN); 1311 1312 static void 1313 sleepq_profile(const char *wmesg) 1314 { 1315 struct sleepq_prof *sp; 1316 1317 mtx_lock_spin(&sleepq_prof_lock); 1318 if (prof_enabled == 0) 1319 goto unlock; 1320 LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link) 1321 if (sp->sp_wmesg == wmesg) 1322 goto done; 1323 sp = LIST_FIRST(&sleepq_prof_free); 1324 if (sp == NULL) 1325 goto unlock; 1326 sp->sp_wmesg = wmesg; 1327 LIST_REMOVE(sp, sp_link); 1328 LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link); 1329 done: 1330 sp->sp_count++; 1331 unlock: 1332 mtx_unlock_spin(&sleepq_prof_lock); 1333 return; 1334 } 1335 1336 static void 1337 sleepq_prof_reset(void) 1338 { 1339 struct sleepq_prof *sp; 1340 int enabled; 1341 int i; 1342 1343 mtx_lock_spin(&sleepq_prof_lock); 1344 enabled = prof_enabled; 1345 prof_enabled = 0; 1346 for (i = 0; i < SC_TABLESIZE; i++) 1347 LIST_INIT(&sleepq_hash[i]); 1348 LIST_INIT(&sleepq_prof_free); 1349 for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) { 1350 sp = &sleepq_profent[i]; 1351 sp->sp_wmesg = NULL; 1352 sp->sp_count = 0; 1353 LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link); 1354 } 1355 prof_enabled = enabled; 1356 mtx_unlock_spin(&sleepq_prof_lock); 1357 } 1358 1359 static int 1360 enable_sleepq_prof(SYSCTL_HANDLER_ARGS) 1361 { 1362 int error, v; 1363 1364 v = prof_enabled; 1365 error = sysctl_handle_int(oidp, &v, v, req); 1366 if (error) 1367 return (error); 1368 if (req->newptr == NULL) 1369 return (error); 1370 if (v == prof_enabled) 1371 return (0); 1372 if (v == 1) 1373 sleepq_prof_reset(); 1374 mtx_lock_spin(&sleepq_prof_lock); 1375 prof_enabled = !!v; 1376 mtx_unlock_spin(&sleepq_prof_lock); 1377 1378 return (0); 1379 } 1380 1381 static int 1382 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS) 1383 { 1384 int error, v; 1385 1386 v = 0; 1387 error = sysctl_handle_int(oidp, &v, 0, req); 1388 if (error) 1389 return (error); 1390 if (req->newptr == NULL) 1391 return (error); 1392 if (v == 0) 1393 return (0); 1394 sleepq_prof_reset(); 1395 1396 return (0); 1397 } 1398 1399 static int 1400 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS) 1401 { 1402 struct sleepq_prof *sp; 1403 struct sbuf *sb; 1404 int enabled; 1405 int error; 1406 int i; 1407 1408 error = sysctl_wire_old_buffer(req, 0); 1409 if (error != 0) 1410 return (error); 1411 sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req); 1412 sbuf_printf(sb, "\nwmesg\tcount\n"); 1413 enabled = prof_enabled; 1414 mtx_lock_spin(&sleepq_prof_lock); 1415 prof_enabled = 0; 1416 mtx_unlock_spin(&sleepq_prof_lock); 1417 for (i = 0; i < SC_TABLESIZE; i++) { 1418 LIST_FOREACH(sp, &sleepq_hash[i], sp_link) { 1419 sbuf_printf(sb, "%s\t%ld\n", 1420 sp->sp_wmesg, sp->sp_count); 1421 } 1422 } 1423 mtx_lock_spin(&sleepq_prof_lock); 1424 prof_enabled = enabled; 1425 mtx_unlock_spin(&sleepq_prof_lock); 1426 1427 error = sbuf_finish(sb); 1428 sbuf_delete(sb); 1429 return (error); 1430 } 1431 1432 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, 1433 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0, 1434 dump_sleepq_prof_stats, "A", 1435 "Sleepqueue profiling statistics"); 1436 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, 1437 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, 1438 reset_sleepq_prof_stats, "I", 1439 "Reset sleepqueue profiling statistics"); 1440 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, 1441 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, 1442 enable_sleepq_prof, "I", 1443 "Enable sleepqueue profiling"); 1444 #endif 1445 1446 #ifdef DDB 1447 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue) 1448 { 1449 struct sleepqueue_chain *sc; 1450 struct sleepqueue *sq; 1451 #ifdef INVARIANTS 1452 struct lock_object *lock; 1453 #endif 1454 struct thread *td; 1455 void *wchan; 1456 int i; 1457 1458 if (!have_addr) 1459 return; 1460 1461 /* 1462 * First, see if there is an active sleep queue for the wait channel 1463 * indicated by the address. 1464 */ 1465 wchan = (void *)addr; 1466 sc = SC_LOOKUP(wchan); 1467 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) 1468 if (sq->sq_wchan == wchan) 1469 goto found; 1470 1471 /* 1472 * Second, see if there is an active sleep queue at the address 1473 * indicated. 1474 */ 1475 for (i = 0; i < SC_TABLESIZE; i++) 1476 LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) { 1477 if (sq == (struct sleepqueue *)addr) 1478 goto found; 1479 } 1480 1481 db_printf("Unable to locate a sleep queue via %p\n", (void *)addr); 1482 return; 1483 found: 1484 db_printf("Wait channel: %p\n", sq->sq_wchan); 1485 db_printf("Queue type: %d\n", sq->sq_type); 1486 #ifdef INVARIANTS 1487 if (sq->sq_lock) { 1488 lock = sq->sq_lock; 1489 db_printf("Associated Interlock: %p - (%s) %s\n", lock, 1490 LOCK_CLASS(lock)->lc_name, lock->lo_name); 1491 } 1492 #endif 1493 db_printf("Blocked threads:\n"); 1494 for (i = 0; i < NR_SLEEPQS; i++) { 1495 db_printf("\nQueue[%d]:\n", i); 1496 if (TAILQ_EMPTY(&sq->sq_blocked[i])) 1497 db_printf("\tempty\n"); 1498 else 1499 TAILQ_FOREACH(td, &sq->sq_blocked[i], 1500 td_slpq) { 1501 db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td, 1502 td->td_tid, td->td_proc->p_pid, 1503 td->td_name); 1504 } 1505 db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]); 1506 } 1507 } 1508 1509 /* Alias 'show sleepqueue' to 'show sleepq'. */ 1510 DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue); 1511 #endif 1512