1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* 29 * Implementation of sleep queues used to hold queue of threads blocked on 30 * a wait channel. Sleep queues are different from turnstiles in that wait 31 * channels are not owned by anyone, so there is no priority propagation. 32 * Sleep queues can also provide a timeout and can also be interrupted by 33 * signals. That said, there are several similarities between the turnstile 34 * and sleep queue implementations. (Note: turnstiles were implemented 35 * first.) For example, both use a hash table of the same size where each 36 * bucket is referred to as a "chain" that contains both a spin lock and 37 * a linked list of queues. An individual queue is located by using a hash 38 * to pick a chain, locking the chain, and then walking the chain searching 39 * for the queue. This means that a wait channel object does not need to 40 * embed its queue head just as locks do not embed their turnstile queue 41 * head. Threads also carry around a sleep queue that they lend to the 42 * wait channel when blocking. Just as in turnstiles, the queue includes 43 * a free list of the sleep queues of other threads blocked on the same 44 * wait channel in the case of multiple waiters. 45 * 46 * Some additional functionality provided by sleep queues include the 47 * ability to set a timeout. The timeout is managed using a per-thread 48 * callout that resumes a thread if it is asleep. A thread may also 49 * catch signals while it is asleep (aka an interruptible sleep). The 50 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally, 51 * sleep queues also provide some extra assertions. One is not allowed to 52 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one 53 * must consistently use the same lock to synchronize with a wait channel, 54 * though this check is currently only a warning for sleep/wakeup due to 55 * pre-existing abuse of that API. The same lock must also be held when 56 * awakening threads, though that is currently only enforced for condition 57 * variables. 58 */ 59 60 #include <sys/cdefs.h> 61 __FBSDID("$FreeBSD$"); 62 63 #include "opt_sleepqueue_profiling.h" 64 #include "opt_ddb.h" 65 #include "opt_sched.h" 66 #include "opt_stack.h" 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/lock.h> 71 #include <sys/kernel.h> 72 #include <sys/ktr.h> 73 #include <sys/mutex.h> 74 #include <sys/proc.h> 75 #include <sys/sbuf.h> 76 #include <sys/sched.h> 77 #include <sys/sdt.h> 78 #include <sys/signalvar.h> 79 #include <sys/sleepqueue.h> 80 #include <sys/stack.h> 81 #include <sys/sysctl.h> 82 #include <sys/time.h> 83 84 #include <machine/atomic.h> 85 86 #include <vm/uma.h> 87 88 #ifdef DDB 89 #include <ddb/ddb.h> 90 #endif 91 92 93 /* 94 * Constants for the hash table of sleep queue chains. 95 * SC_TABLESIZE must be a power of two for SC_MASK to work properly. 96 */ 97 #ifndef SC_TABLESIZE 98 #define SC_TABLESIZE 256 99 #endif 100 CTASSERT(powerof2(SC_TABLESIZE)); 101 #define SC_MASK (SC_TABLESIZE - 1) 102 #define SC_SHIFT 8 103 #define SC_HASH(wc) ((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \ 104 SC_MASK) 105 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)] 106 #define NR_SLEEPQS 2 107 /* 108 * There are two different lists of sleep queues. Both lists are connected 109 * via the sq_hash entries. The first list is the sleep queue chain list 110 * that a sleep queue is on when it is attached to a wait channel. The 111 * second list is the free list hung off of a sleep queue that is attached 112 * to a wait channel. 113 * 114 * Each sleep queue also contains the wait channel it is attached to, the 115 * list of threads blocked on that wait channel, flags specific to the 116 * wait channel, and the lock used to synchronize with a wait channel. 117 * The flags are used to catch mismatches between the various consumers 118 * of the sleep queue API (e.g. sleep/wakeup and condition variables). 119 * The lock pointer is only used when invariants are enabled for various 120 * debugging checks. 121 * 122 * Locking key: 123 * c - sleep queue chain lock 124 */ 125 struct sleepqueue { 126 TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */ 127 u_int sq_blockedcnt[NR_SLEEPQS]; /* (c) N. of blocked threads. */ 128 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */ 129 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */ 130 void *sq_wchan; /* (c) Wait channel. */ 131 int sq_type; /* (c) Queue type. */ 132 #ifdef INVARIANTS 133 struct lock_object *sq_lock; /* (c) Associated lock. */ 134 #endif 135 }; 136 137 struct sleepqueue_chain { 138 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */ 139 struct mtx sc_lock; /* Spin lock for this chain. */ 140 #ifdef SLEEPQUEUE_PROFILING 141 u_int sc_depth; /* Length of sc_queues. */ 142 u_int sc_max_depth; /* Max length of sc_queues. */ 143 #endif 144 } __aligned(CACHE_LINE_SIZE); 145 146 #ifdef SLEEPQUEUE_PROFILING 147 u_int sleepq_max_depth; 148 static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling"); 149 static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0, 150 "sleepq chain stats"); 151 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth, 152 0, "maxmimum depth achieved of a single chain"); 153 154 static void sleepq_profile(const char *wmesg); 155 static int prof_enabled; 156 #endif 157 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE]; 158 static uma_zone_t sleepq_zone; 159 160 /* 161 * Prototypes for non-exported routines. 162 */ 163 static int sleepq_catch_signals(void *wchan, int pri); 164 static int sleepq_check_signals(void); 165 static int sleepq_check_timeout(void); 166 #ifdef INVARIANTS 167 static void sleepq_dtor(void *mem, int size, void *arg); 168 #endif 169 static int sleepq_init(void *mem, int size, int flags); 170 static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, 171 int pri); 172 static void sleepq_switch(void *wchan, int pri); 173 static void sleepq_timeout(void *arg); 174 175 SDT_PROBE_DECLARE(sched, , , sleep); 176 SDT_PROBE_DECLARE(sched, , , wakeup); 177 178 /* 179 * Initialize SLEEPQUEUE_PROFILING specific sysctl nodes. 180 * Note that it must happen after sleepinit() has been fully executed, so 181 * it must happen after SI_SUB_KMEM SYSINIT() subsystem setup. 182 */ 183 #ifdef SLEEPQUEUE_PROFILING 184 static void 185 init_sleepqueue_profiling(void) 186 { 187 char chain_name[10]; 188 struct sysctl_oid *chain_oid; 189 u_int i; 190 191 for (i = 0; i < SC_TABLESIZE; i++) { 192 snprintf(chain_name, sizeof(chain_name), "%u", i); 193 chain_oid = SYSCTL_ADD_NODE(NULL, 194 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO, 195 chain_name, CTLFLAG_RD, NULL, "sleepq chain stats"); 196 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 197 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL); 198 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 199 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0, 200 NULL); 201 } 202 } 203 204 SYSINIT(sleepqueue_profiling, SI_SUB_LOCK, SI_ORDER_ANY, 205 init_sleepqueue_profiling, NULL); 206 #endif 207 208 /* 209 * Early initialization of sleep queues that is called from the sleepinit() 210 * SYSINIT. 211 */ 212 void 213 init_sleepqueues(void) 214 { 215 int i; 216 217 for (i = 0; i < SC_TABLESIZE; i++) { 218 LIST_INIT(&sleepq_chains[i].sc_queues); 219 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL, 220 MTX_SPIN | MTX_RECURSE); 221 } 222 sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue), 223 #ifdef INVARIANTS 224 NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0); 225 #else 226 NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0); 227 #endif 228 229 thread0.td_sleepqueue = sleepq_alloc(); 230 } 231 232 /* 233 * Get a sleep queue for a new thread. 234 */ 235 struct sleepqueue * 236 sleepq_alloc(void) 237 { 238 239 return (uma_zalloc(sleepq_zone, M_WAITOK)); 240 } 241 242 /* 243 * Free a sleep queue when a thread is destroyed. 244 */ 245 void 246 sleepq_free(struct sleepqueue *sq) 247 { 248 249 uma_zfree(sleepq_zone, sq); 250 } 251 252 /* 253 * Lock the sleep queue chain associated with the specified wait channel. 254 */ 255 void 256 sleepq_lock(void *wchan) 257 { 258 struct sleepqueue_chain *sc; 259 260 sc = SC_LOOKUP(wchan); 261 mtx_lock_spin(&sc->sc_lock); 262 } 263 264 /* 265 * Look up the sleep queue associated with a given wait channel in the hash 266 * table locking the associated sleep queue chain. If no queue is found in 267 * the table, NULL is returned. 268 */ 269 struct sleepqueue * 270 sleepq_lookup(void *wchan) 271 { 272 struct sleepqueue_chain *sc; 273 struct sleepqueue *sq; 274 275 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 276 sc = SC_LOOKUP(wchan); 277 mtx_assert(&sc->sc_lock, MA_OWNED); 278 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) 279 if (sq->sq_wchan == wchan) 280 return (sq); 281 return (NULL); 282 } 283 284 /* 285 * Unlock the sleep queue chain associated with a given wait channel. 286 */ 287 void 288 sleepq_release(void *wchan) 289 { 290 struct sleepqueue_chain *sc; 291 292 sc = SC_LOOKUP(wchan); 293 mtx_unlock_spin(&sc->sc_lock); 294 } 295 296 /* 297 * Places the current thread on the sleep queue for the specified wait 298 * channel. If INVARIANTS is enabled, then it associates the passed in 299 * lock with the sleepq to make sure it is held when that sleep queue is 300 * woken up. 301 */ 302 void 303 sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags, 304 int queue) 305 { 306 struct sleepqueue_chain *sc; 307 struct sleepqueue *sq; 308 struct thread *td; 309 310 td = curthread; 311 sc = SC_LOOKUP(wchan); 312 mtx_assert(&sc->sc_lock, MA_OWNED); 313 MPASS(td->td_sleepqueue != NULL); 314 MPASS(wchan != NULL); 315 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 316 317 /* If this thread is not allowed to sleep, die a horrible death. */ 318 KASSERT(td->td_no_sleeping == 0, 319 ("%s: td %p to sleep on wchan %p with sleeping prohibited", 320 __func__, td, wchan)); 321 322 /* Look up the sleep queue associated with the wait channel 'wchan'. */ 323 sq = sleepq_lookup(wchan); 324 325 /* 326 * If the wait channel does not already have a sleep queue, use 327 * this thread's sleep queue. Otherwise, insert the current thread 328 * into the sleep queue already in use by this wait channel. 329 */ 330 if (sq == NULL) { 331 #ifdef INVARIANTS 332 int i; 333 334 sq = td->td_sleepqueue; 335 for (i = 0; i < NR_SLEEPQS; i++) { 336 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]), 337 ("thread's sleep queue %d is not empty", i)); 338 KASSERT(sq->sq_blockedcnt[i] == 0, 339 ("thread's sleep queue %d count mismatches", i)); 340 } 341 KASSERT(LIST_EMPTY(&sq->sq_free), 342 ("thread's sleep queue has a non-empty free list")); 343 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer")); 344 sq->sq_lock = lock; 345 #endif 346 #ifdef SLEEPQUEUE_PROFILING 347 sc->sc_depth++; 348 if (sc->sc_depth > sc->sc_max_depth) { 349 sc->sc_max_depth = sc->sc_depth; 350 if (sc->sc_max_depth > sleepq_max_depth) 351 sleepq_max_depth = sc->sc_max_depth; 352 } 353 #endif 354 sq = td->td_sleepqueue; 355 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash); 356 sq->sq_wchan = wchan; 357 sq->sq_type = flags & SLEEPQ_TYPE; 358 } else { 359 MPASS(wchan == sq->sq_wchan); 360 MPASS(lock == sq->sq_lock); 361 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type); 362 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash); 363 } 364 thread_lock(td); 365 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq); 366 sq->sq_blockedcnt[queue]++; 367 td->td_sleepqueue = NULL; 368 td->td_sqqueue = queue; 369 td->td_wchan = wchan; 370 td->td_wmesg = wmesg; 371 if (flags & SLEEPQ_INTERRUPTIBLE) { 372 td->td_flags |= TDF_SINTR; 373 td->td_flags &= ~TDF_SLEEPABORT; 374 } 375 thread_unlock(td); 376 } 377 378 /* 379 * Sets a timeout that will remove the current thread from the specified 380 * sleep queue after timo ticks if the thread has not already been awakened. 381 */ 382 void 383 sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr, 384 int flags) 385 { 386 struct sleepqueue_chain *sc __unused; 387 struct thread *td; 388 sbintime_t pr1; 389 390 td = curthread; 391 sc = SC_LOOKUP(wchan); 392 mtx_assert(&sc->sc_lock, MA_OWNED); 393 MPASS(TD_ON_SLEEPQ(td)); 394 MPASS(td->td_sleepqueue == NULL); 395 MPASS(wchan != NULL); 396 if (cold && td == &thread0) 397 panic("timed sleep before timers are working"); 398 KASSERT(td->td_sleeptimo == 0, ("td %d %p td_sleeptimo %jx", 399 td->td_tid, td, (uintmax_t)td->td_sleeptimo)); 400 thread_lock(td); 401 callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1); 402 thread_unlock(td); 403 callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1, 404 sleepq_timeout, td, PCPU_GET(cpuid), flags | C_PRECALC | 405 C_DIRECT_EXEC); 406 } 407 408 /* 409 * Return the number of actual sleepers for the specified queue. 410 */ 411 u_int 412 sleepq_sleepcnt(void *wchan, int queue) 413 { 414 struct sleepqueue *sq; 415 416 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 417 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 418 sq = sleepq_lookup(wchan); 419 if (sq == NULL) 420 return (0); 421 return (sq->sq_blockedcnt[queue]); 422 } 423 424 /* 425 * Marks the pending sleep of the current thread as interruptible and 426 * makes an initial check for pending signals before putting a thread 427 * to sleep. Enters and exits with the thread lock held. Thread lock 428 * may have transitioned from the sleepq lock to a run lock. 429 */ 430 static int 431 sleepq_catch_signals(void *wchan, int pri) 432 { 433 struct sleepqueue_chain *sc; 434 struct sleepqueue *sq; 435 struct thread *td; 436 struct proc *p; 437 struct sigacts *ps; 438 int sig, ret; 439 440 ret = 0; 441 td = curthread; 442 p = curproc; 443 sc = SC_LOOKUP(wchan); 444 mtx_assert(&sc->sc_lock, MA_OWNED); 445 MPASS(wchan != NULL); 446 if ((td->td_pflags & TDP_WAKEUP) != 0) { 447 td->td_pflags &= ~TDP_WAKEUP; 448 ret = EINTR; 449 thread_lock(td); 450 goto out; 451 } 452 453 /* 454 * See if there are any pending signals or suspension requests for this 455 * thread. If not, we can switch immediately. 456 */ 457 thread_lock(td); 458 if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) != 0) { 459 thread_unlock(td); 460 mtx_unlock_spin(&sc->sc_lock); 461 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)", 462 (void *)td, (long)p->p_pid, td->td_name); 463 PROC_LOCK(p); 464 /* 465 * Check for suspension first. Checking for signals and then 466 * suspending could result in a missed signal, since a signal 467 * can be delivered while this thread is suspended. 468 */ 469 if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) { 470 ret = thread_suspend_check(1); 471 MPASS(ret == 0 || ret == EINTR || ret == ERESTART); 472 if (ret != 0) { 473 PROC_UNLOCK(p); 474 mtx_lock_spin(&sc->sc_lock); 475 thread_lock(td); 476 goto out; 477 } 478 } 479 if ((td->td_flags & TDF_NEEDSIGCHK) != 0) { 480 ps = p->p_sigacts; 481 mtx_lock(&ps->ps_mtx); 482 sig = cursig(td); 483 if (sig == -1) { 484 mtx_unlock(&ps->ps_mtx); 485 KASSERT((td->td_flags & TDF_SBDRY) != 0, 486 ("lost TDF_SBDRY")); 487 KASSERT(TD_SBDRY_INTR(td), 488 ("lost TDF_SERESTART of TDF_SEINTR")); 489 KASSERT((td->td_flags & 490 (TDF_SEINTR | TDF_SERESTART)) != 491 (TDF_SEINTR | TDF_SERESTART), 492 ("both TDF_SEINTR and TDF_SERESTART")); 493 ret = TD_SBDRY_ERRNO(td); 494 } else if (sig != 0) { 495 ret = SIGISMEMBER(ps->ps_sigintr, sig) ? 496 EINTR : ERESTART; 497 mtx_unlock(&ps->ps_mtx); 498 } else { 499 mtx_unlock(&ps->ps_mtx); 500 } 501 } 502 /* 503 * Lock the per-process spinlock prior to dropping the PROC_LOCK 504 * to avoid a signal delivery race. PROC_LOCK, PROC_SLOCK, and 505 * thread_lock() are currently held in tdsendsignal(). 506 */ 507 PROC_SLOCK(p); 508 mtx_lock_spin(&sc->sc_lock); 509 PROC_UNLOCK(p); 510 thread_lock(td); 511 PROC_SUNLOCK(p); 512 } 513 if (ret == 0) { 514 sleepq_switch(wchan, pri); 515 return (0); 516 } 517 out: 518 /* 519 * There were pending signals and this thread is still 520 * on the sleep queue, remove it from the sleep queue. 521 */ 522 if (TD_ON_SLEEPQ(td)) { 523 sq = sleepq_lookup(wchan); 524 if (sleepq_resume_thread(sq, td, 0)) { 525 #ifdef INVARIANTS 526 /* 527 * This thread hasn't gone to sleep yet, so it 528 * should not be swapped out. 529 */ 530 panic("not waking up swapper"); 531 #endif 532 } 533 } 534 mtx_unlock_spin(&sc->sc_lock); 535 MPASS(td->td_lock != &sc->sc_lock); 536 return (ret); 537 } 538 539 /* 540 * Switches to another thread if we are still asleep on a sleep queue. 541 * Returns with thread lock. 542 */ 543 static void 544 sleepq_switch(void *wchan, int pri) 545 { 546 struct sleepqueue_chain *sc; 547 struct sleepqueue *sq; 548 struct thread *td; 549 bool rtc_changed; 550 551 td = curthread; 552 sc = SC_LOOKUP(wchan); 553 mtx_assert(&sc->sc_lock, MA_OWNED); 554 THREAD_LOCK_ASSERT(td, MA_OWNED); 555 556 /* 557 * If we have a sleep queue, then we've already been woken up, so 558 * just return. 559 */ 560 if (td->td_sleepqueue != NULL) { 561 mtx_unlock_spin(&sc->sc_lock); 562 return; 563 } 564 565 /* 566 * If TDF_TIMEOUT is set, then our sleep has been timed out 567 * already but we are still on the sleep queue, so dequeue the 568 * thread and return. 569 * 570 * Do the same if the real-time clock has been adjusted since this 571 * thread calculated its timeout based on that clock. This handles 572 * the following race: 573 * - The Ts thread needs to sleep until an absolute real-clock time. 574 * It copies the global rtc_generation into curthread->td_rtcgen, 575 * reads the RTC, and calculates a sleep duration based on that time. 576 * See umtxq_sleep() for an example. 577 * - The Tc thread adjusts the RTC, bumps rtc_generation, and wakes 578 * threads that are sleeping until an absolute real-clock time. 579 * See tc_setclock() and the POSIX specification of clock_settime(). 580 * - Ts reaches the code below. It holds the sleepqueue chain lock, 581 * so Tc has finished waking, so this thread must test td_rtcgen. 582 * (The declaration of td_rtcgen refers to this comment.) 583 */ 584 rtc_changed = td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation; 585 if ((td->td_flags & TDF_TIMEOUT) || rtc_changed) { 586 if (rtc_changed) { 587 td->td_rtcgen = 0; 588 } 589 MPASS(TD_ON_SLEEPQ(td)); 590 sq = sleepq_lookup(wchan); 591 if (sleepq_resume_thread(sq, td, 0)) { 592 #ifdef INVARIANTS 593 /* 594 * This thread hasn't gone to sleep yet, so it 595 * should not be swapped out. 596 */ 597 panic("not waking up swapper"); 598 #endif 599 } 600 mtx_unlock_spin(&sc->sc_lock); 601 return; 602 } 603 #ifdef SLEEPQUEUE_PROFILING 604 if (prof_enabled) 605 sleepq_profile(td->td_wmesg); 606 #endif 607 MPASS(td->td_sleepqueue == NULL); 608 sched_sleep(td, pri); 609 thread_lock_set(td, &sc->sc_lock); 610 SDT_PROBE0(sched, , , sleep); 611 TD_SET_SLEEPING(td); 612 mi_switch(SW_VOL | SWT_SLEEPQ, NULL); 613 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING")); 614 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)", 615 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 616 } 617 618 /* 619 * Check to see if we timed out. 620 */ 621 static int 622 sleepq_check_timeout(void) 623 { 624 struct thread *td; 625 int res; 626 627 td = curthread; 628 THREAD_LOCK_ASSERT(td, MA_OWNED); 629 630 /* 631 * If TDF_TIMEOUT is set, we timed out. But recheck 632 * td_sleeptimo anyway. 633 */ 634 res = 0; 635 if (td->td_sleeptimo != 0) { 636 if (td->td_sleeptimo <= sbinuptime()) 637 res = EWOULDBLOCK; 638 td->td_sleeptimo = 0; 639 } 640 if (td->td_flags & TDF_TIMEOUT) 641 td->td_flags &= ~TDF_TIMEOUT; 642 else 643 /* 644 * We ignore the situation where timeout subsystem was 645 * unable to stop our callout. The struct thread is 646 * type-stable, the callout will use the correct 647 * memory when running. The checks of the 648 * td_sleeptimo value in this function and in 649 * sleepq_timeout() ensure that the thread does not 650 * get spurious wakeups, even if the callout was reset 651 * or thread reused. 652 */ 653 callout_stop(&td->td_slpcallout); 654 return (res); 655 } 656 657 /* 658 * Check to see if we were awoken by a signal. 659 */ 660 static int 661 sleepq_check_signals(void) 662 { 663 struct thread *td; 664 665 td = curthread; 666 THREAD_LOCK_ASSERT(td, MA_OWNED); 667 668 /* We are no longer in an interruptible sleep. */ 669 if (td->td_flags & TDF_SINTR) 670 td->td_flags &= ~TDF_SINTR; 671 672 if (td->td_flags & TDF_SLEEPABORT) { 673 td->td_flags &= ~TDF_SLEEPABORT; 674 return (td->td_intrval); 675 } 676 677 return (0); 678 } 679 680 /* 681 * Block the current thread until it is awakened from its sleep queue. 682 */ 683 void 684 sleepq_wait(void *wchan, int pri) 685 { 686 struct thread *td; 687 688 td = curthread; 689 MPASS(!(td->td_flags & TDF_SINTR)); 690 thread_lock(td); 691 sleepq_switch(wchan, pri); 692 thread_unlock(td); 693 } 694 695 /* 696 * Block the current thread until it is awakened from its sleep queue 697 * or it is interrupted by a signal. 698 */ 699 int 700 sleepq_wait_sig(void *wchan, int pri) 701 { 702 int rcatch; 703 int rval; 704 705 rcatch = sleepq_catch_signals(wchan, pri); 706 rval = sleepq_check_signals(); 707 thread_unlock(curthread); 708 if (rcatch) 709 return (rcatch); 710 return (rval); 711 } 712 713 /* 714 * Block the current thread until it is awakened from its sleep queue 715 * or it times out while waiting. 716 */ 717 int 718 sleepq_timedwait(void *wchan, int pri) 719 { 720 struct thread *td; 721 int rval; 722 723 td = curthread; 724 MPASS(!(td->td_flags & TDF_SINTR)); 725 thread_lock(td); 726 sleepq_switch(wchan, pri); 727 rval = sleepq_check_timeout(); 728 thread_unlock(td); 729 730 return (rval); 731 } 732 733 /* 734 * Block the current thread until it is awakened from its sleep queue, 735 * it is interrupted by a signal, or it times out waiting to be awakened. 736 */ 737 int 738 sleepq_timedwait_sig(void *wchan, int pri) 739 { 740 int rcatch, rvalt, rvals; 741 742 rcatch = sleepq_catch_signals(wchan, pri); 743 rvalt = sleepq_check_timeout(); 744 rvals = sleepq_check_signals(); 745 thread_unlock(curthread); 746 if (rcatch) 747 return (rcatch); 748 if (rvals) 749 return (rvals); 750 return (rvalt); 751 } 752 753 /* 754 * Returns the type of sleepqueue given a waitchannel. 755 */ 756 int 757 sleepq_type(void *wchan) 758 { 759 struct sleepqueue *sq; 760 int type; 761 762 MPASS(wchan != NULL); 763 764 sleepq_lock(wchan); 765 sq = sleepq_lookup(wchan); 766 if (sq == NULL) { 767 sleepq_release(wchan); 768 return (-1); 769 } 770 type = sq->sq_type; 771 sleepq_release(wchan); 772 return (type); 773 } 774 775 /* 776 * Removes a thread from a sleep queue and makes it 777 * runnable. 778 */ 779 static int 780 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri) 781 { 782 struct sleepqueue_chain *sc __unused; 783 784 MPASS(td != NULL); 785 MPASS(sq->sq_wchan != NULL); 786 MPASS(td->td_wchan == sq->sq_wchan); 787 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0); 788 THREAD_LOCK_ASSERT(td, MA_OWNED); 789 sc = SC_LOOKUP(sq->sq_wchan); 790 mtx_assert(&sc->sc_lock, MA_OWNED); 791 792 SDT_PROBE2(sched, , , wakeup, td, td->td_proc); 793 794 /* Remove the thread from the queue. */ 795 sq->sq_blockedcnt[td->td_sqqueue]--; 796 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq); 797 798 /* 799 * Get a sleep queue for this thread. If this is the last waiter, 800 * use the queue itself and take it out of the chain, otherwise, 801 * remove a queue from the free list. 802 */ 803 if (LIST_EMPTY(&sq->sq_free)) { 804 td->td_sleepqueue = sq; 805 #ifdef INVARIANTS 806 sq->sq_wchan = NULL; 807 #endif 808 #ifdef SLEEPQUEUE_PROFILING 809 sc->sc_depth--; 810 #endif 811 } else 812 td->td_sleepqueue = LIST_FIRST(&sq->sq_free); 813 LIST_REMOVE(td->td_sleepqueue, sq_hash); 814 815 td->td_wmesg = NULL; 816 td->td_wchan = NULL; 817 td->td_flags &= ~TDF_SINTR; 818 819 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)", 820 (void *)td, (long)td->td_proc->p_pid, td->td_name); 821 822 /* Adjust priority if requested. */ 823 MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX)); 824 if (pri != 0 && td->td_priority > pri && 825 PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 826 sched_prio(td, pri); 827 828 /* 829 * Note that thread td might not be sleeping if it is running 830 * sleepq_catch_signals() on another CPU or is blocked on its 831 * proc lock to check signals. There's no need to mark the 832 * thread runnable in that case. 833 */ 834 if (TD_IS_SLEEPING(td)) { 835 TD_CLR_SLEEPING(td); 836 return (setrunnable(td)); 837 } 838 return (0); 839 } 840 841 #ifdef INVARIANTS 842 /* 843 * UMA zone item deallocator. 844 */ 845 static void 846 sleepq_dtor(void *mem, int size, void *arg) 847 { 848 struct sleepqueue *sq; 849 int i; 850 851 sq = mem; 852 for (i = 0; i < NR_SLEEPQS; i++) { 853 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i])); 854 MPASS(sq->sq_blockedcnt[i] == 0); 855 } 856 } 857 #endif 858 859 /* 860 * UMA zone item initializer. 861 */ 862 static int 863 sleepq_init(void *mem, int size, int flags) 864 { 865 struct sleepqueue *sq; 866 int i; 867 868 bzero(mem, size); 869 sq = mem; 870 for (i = 0; i < NR_SLEEPQS; i++) { 871 TAILQ_INIT(&sq->sq_blocked[i]); 872 sq->sq_blockedcnt[i] = 0; 873 } 874 LIST_INIT(&sq->sq_free); 875 return (0); 876 } 877 878 /* 879 * Find the highest priority thread sleeping on a wait channel and resume it. 880 */ 881 int 882 sleepq_signal(void *wchan, int flags, int pri, int queue) 883 { 884 struct sleepqueue *sq; 885 struct thread *td, *besttd; 886 int wakeup_swapper; 887 888 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags); 889 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 890 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 891 sq = sleepq_lookup(wchan); 892 if (sq == NULL) 893 return (0); 894 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), 895 ("%s: mismatch between sleep/wakeup and cv_*", __func__)); 896 897 /* 898 * Find the highest priority thread on the queue. If there is a 899 * tie, use the thread that first appears in the queue as it has 900 * been sleeping the longest since threads are always added to 901 * the tail of sleep queues. 902 */ 903 besttd = TAILQ_FIRST(&sq->sq_blocked[queue]); 904 TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) { 905 if (td->td_priority < besttd->td_priority) 906 besttd = td; 907 } 908 MPASS(besttd != NULL); 909 thread_lock(besttd); 910 wakeup_swapper = sleepq_resume_thread(sq, besttd, pri); 911 thread_unlock(besttd); 912 return (wakeup_swapper); 913 } 914 915 static bool 916 match_any(struct thread *td __unused) 917 { 918 919 return (true); 920 } 921 922 /* 923 * Resume all threads sleeping on a specified wait channel. 924 */ 925 int 926 sleepq_broadcast(void *wchan, int flags, int pri, int queue) 927 { 928 struct sleepqueue *sq; 929 930 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags); 931 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 932 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 933 sq = sleepq_lookup(wchan); 934 if (sq == NULL) 935 return (0); 936 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), 937 ("%s: mismatch between sleep/wakeup and cv_*", __func__)); 938 939 return (sleepq_remove_matching(sq, queue, match_any, pri)); 940 } 941 942 /* 943 * Resume threads on the sleep queue that match the given predicate. 944 */ 945 int 946 sleepq_remove_matching(struct sleepqueue *sq, int queue, 947 bool (*matches)(struct thread *), int pri) 948 { 949 struct thread *td, *tdn; 950 int wakeup_swapper; 951 952 /* 953 * The last thread will be given ownership of sq and may 954 * re-enqueue itself before sleepq_resume_thread() returns, 955 * so we must cache the "next" queue item at the beginning 956 * of the final iteration. 957 */ 958 wakeup_swapper = 0; 959 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) { 960 thread_lock(td); 961 if (matches(td)) 962 wakeup_swapper |= sleepq_resume_thread(sq, td, pri); 963 thread_unlock(td); 964 } 965 966 return (wakeup_swapper); 967 } 968 969 /* 970 * Time sleeping threads out. When the timeout expires, the thread is 971 * removed from the sleep queue and made runnable if it is still asleep. 972 */ 973 static void 974 sleepq_timeout(void *arg) 975 { 976 struct sleepqueue_chain *sc __unused; 977 struct sleepqueue *sq; 978 struct thread *td; 979 void *wchan; 980 int wakeup_swapper; 981 982 td = arg; 983 wakeup_swapper = 0; 984 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)", 985 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 986 987 thread_lock(td); 988 989 if (td->td_sleeptimo > sbinuptime() || td->td_sleeptimo == 0) { 990 /* 991 * The thread does not want a timeout (yet). 992 */ 993 } else if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) { 994 /* 995 * See if the thread is asleep and get the wait 996 * channel if it is. 997 */ 998 wchan = td->td_wchan; 999 sc = SC_LOOKUP(wchan); 1000 THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock); 1001 sq = sleepq_lookup(wchan); 1002 MPASS(sq != NULL); 1003 td->td_flags |= TDF_TIMEOUT; 1004 wakeup_swapper = sleepq_resume_thread(sq, td, 0); 1005 } else if (TD_ON_SLEEPQ(td)) { 1006 /* 1007 * If the thread is on the SLEEPQ but isn't sleeping 1008 * yet, it can either be on another CPU in between 1009 * sleepq_add() and one of the sleepq_*wait*() 1010 * routines or it can be in sleepq_catch_signals(). 1011 */ 1012 td->td_flags |= TDF_TIMEOUT; 1013 } 1014 1015 thread_unlock(td); 1016 if (wakeup_swapper) 1017 kick_proc0(); 1018 } 1019 1020 /* 1021 * Resumes a specific thread from the sleep queue associated with a specific 1022 * wait channel if it is on that queue. 1023 */ 1024 void 1025 sleepq_remove(struct thread *td, void *wchan) 1026 { 1027 struct sleepqueue *sq; 1028 int wakeup_swapper; 1029 1030 /* 1031 * Look up the sleep queue for this wait channel, then re-check 1032 * that the thread is asleep on that channel, if it is not, then 1033 * bail. 1034 */ 1035 MPASS(wchan != NULL); 1036 sleepq_lock(wchan); 1037 sq = sleepq_lookup(wchan); 1038 /* 1039 * We can not lock the thread here as it may be sleeping on a 1040 * different sleepq. However, holding the sleepq lock for this 1041 * wchan can guarantee that we do not miss a wakeup for this 1042 * channel. The asserts below will catch any false positives. 1043 */ 1044 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) { 1045 sleepq_release(wchan); 1046 return; 1047 } 1048 /* Thread is asleep on sleep queue sq, so wake it up. */ 1049 thread_lock(td); 1050 MPASS(sq != NULL); 1051 MPASS(td->td_wchan == wchan); 1052 wakeup_swapper = sleepq_resume_thread(sq, td, 0); 1053 thread_unlock(td); 1054 sleepq_release(wchan); 1055 if (wakeup_swapper) 1056 kick_proc0(); 1057 } 1058 1059 /* 1060 * Abort a thread as if an interrupt had occurred. Only abort 1061 * interruptible waits (unfortunately it isn't safe to abort others). 1062 */ 1063 int 1064 sleepq_abort(struct thread *td, int intrval) 1065 { 1066 struct sleepqueue *sq; 1067 void *wchan; 1068 1069 THREAD_LOCK_ASSERT(td, MA_OWNED); 1070 MPASS(TD_ON_SLEEPQ(td)); 1071 MPASS(td->td_flags & TDF_SINTR); 1072 MPASS(intrval == EINTR || intrval == ERESTART); 1073 1074 /* 1075 * If the TDF_TIMEOUT flag is set, just leave. A 1076 * timeout is scheduled anyhow. 1077 */ 1078 if (td->td_flags & TDF_TIMEOUT) 1079 return (0); 1080 1081 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)", 1082 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 1083 td->td_intrval = intrval; 1084 td->td_flags |= TDF_SLEEPABORT; 1085 /* 1086 * If the thread has not slept yet it will find the signal in 1087 * sleepq_catch_signals() and call sleepq_resume_thread. Otherwise 1088 * we have to do it here. 1089 */ 1090 if (!TD_IS_SLEEPING(td)) 1091 return (0); 1092 wchan = td->td_wchan; 1093 MPASS(wchan != NULL); 1094 sq = sleepq_lookup(wchan); 1095 MPASS(sq != NULL); 1096 1097 /* Thread is asleep on sleep queue sq, so wake it up. */ 1098 return (sleepq_resume_thread(sq, td, 0)); 1099 } 1100 1101 void 1102 sleepq_chains_remove_matching(bool (*matches)(struct thread *)) 1103 { 1104 struct sleepqueue_chain *sc; 1105 struct sleepqueue *sq, *sq1; 1106 int i, wakeup_swapper; 1107 1108 wakeup_swapper = 0; 1109 for (sc = &sleepq_chains[0]; sc < sleepq_chains + SC_TABLESIZE; ++sc) { 1110 if (LIST_EMPTY(&sc->sc_queues)) { 1111 continue; 1112 } 1113 mtx_lock_spin(&sc->sc_lock); 1114 LIST_FOREACH_SAFE(sq, &sc->sc_queues, sq_hash, sq1) { 1115 for (i = 0; i < NR_SLEEPQS; ++i) { 1116 wakeup_swapper |= sleepq_remove_matching(sq, i, 1117 matches, 0); 1118 } 1119 } 1120 mtx_unlock_spin(&sc->sc_lock); 1121 } 1122 if (wakeup_swapper) { 1123 kick_proc0(); 1124 } 1125 } 1126 1127 /* 1128 * Prints the stacks of all threads presently sleeping on wchan/queue to 1129 * the sbuf sb. Sets count_stacks_printed to the number of stacks actually 1130 * printed. Typically, this will equal the number of threads sleeping on the 1131 * queue, but may be less if sb overflowed before all stacks were printed. 1132 */ 1133 #ifdef STACK 1134 int 1135 sleepq_sbuf_print_stacks(struct sbuf *sb, void *wchan, int queue, 1136 int *count_stacks_printed) 1137 { 1138 struct thread *td, *td_next; 1139 struct sleepqueue *sq; 1140 struct stack **st; 1141 struct sbuf **td_infos; 1142 int i, stack_idx, error, stacks_to_allocate; 1143 bool finished; 1144 1145 error = 0; 1146 finished = false; 1147 1148 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 1149 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 1150 1151 stacks_to_allocate = 10; 1152 for (i = 0; i < 3 && !finished ; i++) { 1153 /* We cannot malloc while holding the queue's spinlock, so 1154 * we do our mallocs now, and hope it is enough. If it 1155 * isn't, we will free these, drop the lock, malloc more, 1156 * and try again, up to a point. After that point we will 1157 * give up and report ENOMEM. We also cannot write to sb 1158 * during this time since the client may have set the 1159 * SBUF_AUTOEXTEND flag on their sbuf, which could cause a 1160 * malloc as we print to it. So we defer actually printing 1161 * to sb until after we drop the spinlock. 1162 */ 1163 1164 /* Where we will store the stacks. */ 1165 st = malloc(sizeof(struct stack *) * stacks_to_allocate, 1166 M_TEMP, M_WAITOK); 1167 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1168 stack_idx++) 1169 st[stack_idx] = stack_create(M_WAITOK); 1170 1171 /* Where we will store the td name, tid, etc. */ 1172 td_infos = malloc(sizeof(struct sbuf *) * stacks_to_allocate, 1173 M_TEMP, M_WAITOK); 1174 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1175 stack_idx++) 1176 td_infos[stack_idx] = sbuf_new(NULL, NULL, 1177 MAXCOMLEN + sizeof(struct thread *) * 2 + 40, 1178 SBUF_FIXEDLEN); 1179 1180 sleepq_lock(wchan); 1181 sq = sleepq_lookup(wchan); 1182 if (sq == NULL) { 1183 /* This sleepq does not exist; exit and return ENOENT. */ 1184 error = ENOENT; 1185 finished = true; 1186 sleepq_release(wchan); 1187 goto loop_end; 1188 } 1189 1190 stack_idx = 0; 1191 /* Save thread info */ 1192 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, 1193 td_next) { 1194 if (stack_idx >= stacks_to_allocate) 1195 goto loop_end; 1196 1197 /* Note the td_lock is equal to the sleepq_lock here. */ 1198 stack_save_td(st[stack_idx], td); 1199 1200 sbuf_printf(td_infos[stack_idx], "%d: %s %p", 1201 td->td_tid, td->td_name, td); 1202 1203 ++stack_idx; 1204 } 1205 1206 finished = true; 1207 sleepq_release(wchan); 1208 1209 /* Print the stacks */ 1210 for (i = 0; i < stack_idx; i++) { 1211 sbuf_finish(td_infos[i]); 1212 sbuf_printf(sb, "--- thread %s: ---\n", sbuf_data(td_infos[i])); 1213 stack_sbuf_print(sb, st[i]); 1214 sbuf_printf(sb, "\n"); 1215 1216 error = sbuf_error(sb); 1217 if (error == 0) 1218 *count_stacks_printed = stack_idx; 1219 } 1220 1221 loop_end: 1222 if (!finished) 1223 sleepq_release(wchan); 1224 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1225 stack_idx++) 1226 stack_destroy(st[stack_idx]); 1227 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1228 stack_idx++) 1229 sbuf_delete(td_infos[stack_idx]); 1230 free(st, M_TEMP); 1231 free(td_infos, M_TEMP); 1232 stacks_to_allocate *= 10; 1233 } 1234 1235 if (!finished && error == 0) 1236 error = ENOMEM; 1237 1238 return (error); 1239 } 1240 #endif 1241 1242 #ifdef SLEEPQUEUE_PROFILING 1243 #define SLEEPQ_PROF_LOCATIONS 1024 1244 #define SLEEPQ_SBUFSIZE 512 1245 struct sleepq_prof { 1246 LIST_ENTRY(sleepq_prof) sp_link; 1247 const char *sp_wmesg; 1248 long sp_count; 1249 }; 1250 1251 LIST_HEAD(sqphead, sleepq_prof); 1252 1253 struct sqphead sleepq_prof_free; 1254 struct sqphead sleepq_hash[SC_TABLESIZE]; 1255 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS]; 1256 static struct mtx sleepq_prof_lock; 1257 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN); 1258 1259 static void 1260 sleepq_profile(const char *wmesg) 1261 { 1262 struct sleepq_prof *sp; 1263 1264 mtx_lock_spin(&sleepq_prof_lock); 1265 if (prof_enabled == 0) 1266 goto unlock; 1267 LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link) 1268 if (sp->sp_wmesg == wmesg) 1269 goto done; 1270 sp = LIST_FIRST(&sleepq_prof_free); 1271 if (sp == NULL) 1272 goto unlock; 1273 sp->sp_wmesg = wmesg; 1274 LIST_REMOVE(sp, sp_link); 1275 LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link); 1276 done: 1277 sp->sp_count++; 1278 unlock: 1279 mtx_unlock_spin(&sleepq_prof_lock); 1280 return; 1281 } 1282 1283 static void 1284 sleepq_prof_reset(void) 1285 { 1286 struct sleepq_prof *sp; 1287 int enabled; 1288 int i; 1289 1290 mtx_lock_spin(&sleepq_prof_lock); 1291 enabled = prof_enabled; 1292 prof_enabled = 0; 1293 for (i = 0; i < SC_TABLESIZE; i++) 1294 LIST_INIT(&sleepq_hash[i]); 1295 LIST_INIT(&sleepq_prof_free); 1296 for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) { 1297 sp = &sleepq_profent[i]; 1298 sp->sp_wmesg = NULL; 1299 sp->sp_count = 0; 1300 LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link); 1301 } 1302 prof_enabled = enabled; 1303 mtx_unlock_spin(&sleepq_prof_lock); 1304 } 1305 1306 static int 1307 enable_sleepq_prof(SYSCTL_HANDLER_ARGS) 1308 { 1309 int error, v; 1310 1311 v = prof_enabled; 1312 error = sysctl_handle_int(oidp, &v, v, req); 1313 if (error) 1314 return (error); 1315 if (req->newptr == NULL) 1316 return (error); 1317 if (v == prof_enabled) 1318 return (0); 1319 if (v == 1) 1320 sleepq_prof_reset(); 1321 mtx_lock_spin(&sleepq_prof_lock); 1322 prof_enabled = !!v; 1323 mtx_unlock_spin(&sleepq_prof_lock); 1324 1325 return (0); 1326 } 1327 1328 static int 1329 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS) 1330 { 1331 int error, v; 1332 1333 v = 0; 1334 error = sysctl_handle_int(oidp, &v, 0, req); 1335 if (error) 1336 return (error); 1337 if (req->newptr == NULL) 1338 return (error); 1339 if (v == 0) 1340 return (0); 1341 sleepq_prof_reset(); 1342 1343 return (0); 1344 } 1345 1346 static int 1347 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS) 1348 { 1349 struct sleepq_prof *sp; 1350 struct sbuf *sb; 1351 int enabled; 1352 int error; 1353 int i; 1354 1355 error = sysctl_wire_old_buffer(req, 0); 1356 if (error != 0) 1357 return (error); 1358 sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req); 1359 sbuf_printf(sb, "\nwmesg\tcount\n"); 1360 enabled = prof_enabled; 1361 mtx_lock_spin(&sleepq_prof_lock); 1362 prof_enabled = 0; 1363 mtx_unlock_spin(&sleepq_prof_lock); 1364 for (i = 0; i < SC_TABLESIZE; i++) { 1365 LIST_FOREACH(sp, &sleepq_hash[i], sp_link) { 1366 sbuf_printf(sb, "%s\t%ld\n", 1367 sp->sp_wmesg, sp->sp_count); 1368 } 1369 } 1370 mtx_lock_spin(&sleepq_prof_lock); 1371 prof_enabled = enabled; 1372 mtx_unlock_spin(&sleepq_prof_lock); 1373 1374 error = sbuf_finish(sb); 1375 sbuf_delete(sb); 1376 return (error); 1377 } 1378 1379 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, 1380 NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics"); 1381 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, 1382 NULL, 0, reset_sleepq_prof_stats, "I", 1383 "Reset sleepqueue profiling statistics"); 1384 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW, 1385 NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling"); 1386 #endif 1387 1388 #ifdef DDB 1389 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue) 1390 { 1391 struct sleepqueue_chain *sc; 1392 struct sleepqueue *sq; 1393 #ifdef INVARIANTS 1394 struct lock_object *lock; 1395 #endif 1396 struct thread *td; 1397 void *wchan; 1398 int i; 1399 1400 if (!have_addr) 1401 return; 1402 1403 /* 1404 * First, see if there is an active sleep queue for the wait channel 1405 * indicated by the address. 1406 */ 1407 wchan = (void *)addr; 1408 sc = SC_LOOKUP(wchan); 1409 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) 1410 if (sq->sq_wchan == wchan) 1411 goto found; 1412 1413 /* 1414 * Second, see if there is an active sleep queue at the address 1415 * indicated. 1416 */ 1417 for (i = 0; i < SC_TABLESIZE; i++) 1418 LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) { 1419 if (sq == (struct sleepqueue *)addr) 1420 goto found; 1421 } 1422 1423 db_printf("Unable to locate a sleep queue via %p\n", (void *)addr); 1424 return; 1425 found: 1426 db_printf("Wait channel: %p\n", sq->sq_wchan); 1427 db_printf("Queue type: %d\n", sq->sq_type); 1428 #ifdef INVARIANTS 1429 if (sq->sq_lock) { 1430 lock = sq->sq_lock; 1431 db_printf("Associated Interlock: %p - (%s) %s\n", lock, 1432 LOCK_CLASS(lock)->lc_name, lock->lo_name); 1433 } 1434 #endif 1435 db_printf("Blocked threads:\n"); 1436 for (i = 0; i < NR_SLEEPQS; i++) { 1437 db_printf("\nQueue[%d]:\n", i); 1438 if (TAILQ_EMPTY(&sq->sq_blocked[i])) 1439 db_printf("\tempty\n"); 1440 else 1441 TAILQ_FOREACH(td, &sq->sq_blocked[i], 1442 td_slpq) { 1443 db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td, 1444 td->td_tid, td->td_proc->p_pid, 1445 td->td_name); 1446 } 1447 db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]); 1448 } 1449 } 1450 1451 /* Alias 'show sleepqueue' to 'show sleepq'. */ 1452 DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue); 1453 #endif 1454