1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* 29 * Implementation of sleep queues used to hold queue of threads blocked on 30 * a wait channel. Sleep queues are different from turnstiles in that wait 31 * channels are not owned by anyone, so there is no priority propagation. 32 * Sleep queues can also provide a timeout and can also be interrupted by 33 * signals. That said, there are several similarities between the turnstile 34 * and sleep queue implementations. (Note: turnstiles were implemented 35 * first.) For example, both use a hash table of the same size where each 36 * bucket is referred to as a "chain" that contains both a spin lock and 37 * a linked list of queues. An individual queue is located by using a hash 38 * to pick a chain, locking the chain, and then walking the chain searching 39 * for the queue. This means that a wait channel object does not need to 40 * embed its queue head just as locks do not embed their turnstile queue 41 * head. Threads also carry around a sleep queue that they lend to the 42 * wait channel when blocking. Just as in turnstiles, the queue includes 43 * a free list of the sleep queues of other threads blocked on the same 44 * wait channel in the case of multiple waiters. 45 * 46 * Some additional functionality provided by sleep queues include the 47 * ability to set a timeout. The timeout is managed using a per-thread 48 * callout that resumes a thread if it is asleep. A thread may also 49 * catch signals while it is asleep (aka an interruptible sleep). The 50 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally, 51 * sleep queues also provide some extra assertions. One is not allowed to 52 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one 53 * must consistently use the same lock to synchronize with a wait channel, 54 * though this check is currently only a warning for sleep/wakeup due to 55 * pre-existing abuse of that API. The same lock must also be held when 56 * awakening threads, though that is currently only enforced for condition 57 * variables. 58 */ 59 60 #include <sys/cdefs.h> 61 __FBSDID("$FreeBSD$"); 62 63 #include "opt_sleepqueue_profiling.h" 64 #include "opt_ddb.h" 65 #include "opt_sched.h" 66 #include "opt_stack.h" 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/lock.h> 71 #include <sys/kernel.h> 72 #include <sys/ktr.h> 73 #include <sys/mutex.h> 74 #include <sys/proc.h> 75 #include <sys/sbuf.h> 76 #include <sys/sched.h> 77 #include <sys/sdt.h> 78 #include <sys/signalvar.h> 79 #include <sys/sleepqueue.h> 80 #include <sys/stack.h> 81 #include <sys/sysctl.h> 82 #include <sys/time.h> 83 #ifdef EPOCH_TRACE 84 #include <sys/epoch.h> 85 #endif 86 87 #include <machine/atomic.h> 88 89 #include <vm/uma.h> 90 91 #ifdef DDB 92 #include <ddb/ddb.h> 93 #endif 94 95 /* 96 * Constants for the hash table of sleep queue chains. 97 * SC_TABLESIZE must be a power of two for SC_MASK to work properly. 98 */ 99 #ifndef SC_TABLESIZE 100 #define SC_TABLESIZE 256 101 #endif 102 CTASSERT(powerof2(SC_TABLESIZE)); 103 #define SC_MASK (SC_TABLESIZE - 1) 104 #define SC_SHIFT 8 105 #define SC_HASH(wc) ((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \ 106 SC_MASK) 107 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)] 108 #define NR_SLEEPQS 2 109 /* 110 * There are two different lists of sleep queues. Both lists are connected 111 * via the sq_hash entries. The first list is the sleep queue chain list 112 * that a sleep queue is on when it is attached to a wait channel. The 113 * second list is the free list hung off of a sleep queue that is attached 114 * to a wait channel. 115 * 116 * Each sleep queue also contains the wait channel it is attached to, the 117 * list of threads blocked on that wait channel, flags specific to the 118 * wait channel, and the lock used to synchronize with a wait channel. 119 * The flags are used to catch mismatches between the various consumers 120 * of the sleep queue API (e.g. sleep/wakeup and condition variables). 121 * The lock pointer is only used when invariants are enabled for various 122 * debugging checks. 123 * 124 * Locking key: 125 * c - sleep queue chain lock 126 */ 127 struct sleepqueue { 128 struct threadqueue sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */ 129 u_int sq_blockedcnt[NR_SLEEPQS]; /* (c) N. of blocked threads. */ 130 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */ 131 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */ 132 const void *sq_wchan; /* (c) Wait channel. */ 133 int sq_type; /* (c) Queue type. */ 134 #ifdef INVARIANTS 135 struct lock_object *sq_lock; /* (c) Associated lock. */ 136 #endif 137 }; 138 139 struct sleepqueue_chain { 140 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */ 141 struct mtx sc_lock; /* Spin lock for this chain. */ 142 #ifdef SLEEPQUEUE_PROFILING 143 u_int sc_depth; /* Length of sc_queues. */ 144 u_int sc_max_depth; /* Max length of sc_queues. */ 145 #endif 146 } __aligned(CACHE_LINE_SIZE); 147 148 #ifdef SLEEPQUEUE_PROFILING 149 u_int sleepq_max_depth; 150 static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 151 "sleepq profiling"); 152 static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, 153 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 154 "sleepq chain stats"); 155 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth, 156 0, "maxmimum depth achieved of a single chain"); 157 158 static void sleepq_profile(const char *wmesg); 159 static int prof_enabled; 160 #endif 161 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE]; 162 static uma_zone_t sleepq_zone; 163 164 /* 165 * Prototypes for non-exported routines. 166 */ 167 static int sleepq_catch_signals(const void *wchan, int pri); 168 static inline int sleepq_check_signals(void); 169 static inline int sleepq_check_timeout(void); 170 #ifdef INVARIANTS 171 static void sleepq_dtor(void *mem, int size, void *arg); 172 #endif 173 static int sleepq_init(void *mem, int size, int flags); 174 static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, 175 int pri, int srqflags); 176 static void sleepq_remove_thread(struct sleepqueue *sq, struct thread *td); 177 static void sleepq_switch(const void *wchan, int pri); 178 static void sleepq_timeout(void *arg); 179 180 SDT_PROBE_DECLARE(sched, , , sleep); 181 SDT_PROBE_DECLARE(sched, , , wakeup); 182 183 /* 184 * Initialize SLEEPQUEUE_PROFILING specific sysctl nodes. 185 * Note that it must happen after sleepinit() has been fully executed, so 186 * it must happen after SI_SUB_KMEM SYSINIT() subsystem setup. 187 */ 188 #ifdef SLEEPQUEUE_PROFILING 189 static void 190 init_sleepqueue_profiling(void) 191 { 192 char chain_name[10]; 193 struct sysctl_oid *chain_oid; 194 u_int i; 195 196 for (i = 0; i < SC_TABLESIZE; i++) { 197 snprintf(chain_name, sizeof(chain_name), "%u", i); 198 chain_oid = SYSCTL_ADD_NODE(NULL, 199 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO, 200 chain_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 201 "sleepq chain stats"); 202 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 203 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL); 204 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 205 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0, 206 NULL); 207 } 208 } 209 210 SYSINIT(sleepqueue_profiling, SI_SUB_LOCK, SI_ORDER_ANY, 211 init_sleepqueue_profiling, NULL); 212 #endif 213 214 /* 215 * Early initialization of sleep queues that is called from the sleepinit() 216 * SYSINIT. 217 */ 218 void 219 init_sleepqueues(void) 220 { 221 int i; 222 223 for (i = 0; i < SC_TABLESIZE; i++) { 224 LIST_INIT(&sleepq_chains[i].sc_queues); 225 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL, 226 MTX_SPIN); 227 } 228 sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue), 229 #ifdef INVARIANTS 230 NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0); 231 #else 232 NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0); 233 #endif 234 235 thread0.td_sleepqueue = sleepq_alloc(); 236 } 237 238 /* 239 * Get a sleep queue for a new thread. 240 */ 241 struct sleepqueue * 242 sleepq_alloc(void) 243 { 244 245 return (uma_zalloc(sleepq_zone, M_WAITOK)); 246 } 247 248 /* 249 * Free a sleep queue when a thread is destroyed. 250 */ 251 void 252 sleepq_free(struct sleepqueue *sq) 253 { 254 255 uma_zfree(sleepq_zone, sq); 256 } 257 258 /* 259 * Lock the sleep queue chain associated with the specified wait channel. 260 */ 261 void 262 sleepq_lock(const void *wchan) 263 { 264 struct sleepqueue_chain *sc; 265 266 sc = SC_LOOKUP(wchan); 267 mtx_lock_spin(&sc->sc_lock); 268 } 269 270 /* 271 * Look up the sleep queue associated with a given wait channel in the hash 272 * table locking the associated sleep queue chain. If no queue is found in 273 * the table, NULL is returned. 274 */ 275 struct sleepqueue * 276 sleepq_lookup(const void *wchan) 277 { 278 struct sleepqueue_chain *sc; 279 struct sleepqueue *sq; 280 281 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 282 sc = SC_LOOKUP(wchan); 283 mtx_assert(&sc->sc_lock, MA_OWNED); 284 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) 285 if (sq->sq_wchan == wchan) 286 return (sq); 287 return (NULL); 288 } 289 290 /* 291 * Unlock the sleep queue chain associated with a given wait channel. 292 */ 293 void 294 sleepq_release(const void *wchan) 295 { 296 struct sleepqueue_chain *sc; 297 298 sc = SC_LOOKUP(wchan); 299 mtx_unlock_spin(&sc->sc_lock); 300 } 301 302 /* 303 * Places the current thread on the sleep queue for the specified wait 304 * channel. If INVARIANTS is enabled, then it associates the passed in 305 * lock with the sleepq to make sure it is held when that sleep queue is 306 * woken up. 307 */ 308 void 309 sleepq_add(const void *wchan, struct lock_object *lock, const char *wmesg, 310 int flags, int queue) 311 { 312 struct sleepqueue_chain *sc; 313 struct sleepqueue *sq; 314 struct thread *td; 315 316 td = curthread; 317 sc = SC_LOOKUP(wchan); 318 mtx_assert(&sc->sc_lock, MA_OWNED); 319 MPASS(td->td_sleepqueue != NULL); 320 MPASS(wchan != NULL); 321 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 322 323 /* If this thread is not allowed to sleep, die a horrible death. */ 324 if (__predict_false(!THREAD_CAN_SLEEP())) { 325 #ifdef EPOCH_TRACE 326 epoch_trace_list(curthread); 327 #endif 328 KASSERT(0, 329 ("%s: td %p to sleep on wchan %p with sleeping prohibited", 330 __func__, td, wchan)); 331 } 332 333 /* Look up the sleep queue associated with the wait channel 'wchan'. */ 334 sq = sleepq_lookup(wchan); 335 336 /* 337 * If the wait channel does not already have a sleep queue, use 338 * this thread's sleep queue. Otherwise, insert the current thread 339 * into the sleep queue already in use by this wait channel. 340 */ 341 if (sq == NULL) { 342 #ifdef INVARIANTS 343 int i; 344 345 sq = td->td_sleepqueue; 346 for (i = 0; i < NR_SLEEPQS; i++) { 347 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]), 348 ("thread's sleep queue %d is not empty", i)); 349 KASSERT(sq->sq_blockedcnt[i] == 0, 350 ("thread's sleep queue %d count mismatches", i)); 351 } 352 KASSERT(LIST_EMPTY(&sq->sq_free), 353 ("thread's sleep queue has a non-empty free list")); 354 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer")); 355 sq->sq_lock = lock; 356 #endif 357 #ifdef SLEEPQUEUE_PROFILING 358 sc->sc_depth++; 359 if (sc->sc_depth > sc->sc_max_depth) { 360 sc->sc_max_depth = sc->sc_depth; 361 if (sc->sc_max_depth > sleepq_max_depth) 362 sleepq_max_depth = sc->sc_max_depth; 363 } 364 #endif 365 sq = td->td_sleepqueue; 366 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash); 367 sq->sq_wchan = wchan; 368 sq->sq_type = flags & SLEEPQ_TYPE; 369 } else { 370 MPASS(wchan == sq->sq_wchan); 371 MPASS(lock == sq->sq_lock); 372 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type); 373 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash); 374 } 375 thread_lock(td); 376 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq); 377 sq->sq_blockedcnt[queue]++; 378 td->td_sleepqueue = NULL; 379 td->td_sqqueue = queue; 380 td->td_wchan = wchan; 381 td->td_wmesg = wmesg; 382 if (flags & SLEEPQ_INTERRUPTIBLE) { 383 td->td_intrval = 0; 384 td->td_flags |= TDF_SINTR; 385 } 386 td->td_flags &= ~TDF_TIMEOUT; 387 thread_unlock(td); 388 } 389 390 /* 391 * Sets a timeout that will remove the current thread from the specified 392 * sleep queue after timo ticks if the thread has not already been awakened. 393 */ 394 void 395 sleepq_set_timeout_sbt(const void *wchan, sbintime_t sbt, sbintime_t pr, 396 int flags) 397 { 398 struct sleepqueue_chain *sc __unused; 399 struct thread *td; 400 sbintime_t pr1; 401 402 td = curthread; 403 sc = SC_LOOKUP(wchan); 404 mtx_assert(&sc->sc_lock, MA_OWNED); 405 MPASS(TD_ON_SLEEPQ(td)); 406 MPASS(td->td_sleepqueue == NULL); 407 MPASS(wchan != NULL); 408 if (cold && td == &thread0) 409 panic("timed sleep before timers are working"); 410 KASSERT(td->td_sleeptimo == 0, ("td %d %p td_sleeptimo %jx", 411 td->td_tid, td, (uintmax_t)td->td_sleeptimo)); 412 thread_lock(td); 413 callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1); 414 thread_unlock(td); 415 callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1, 416 sleepq_timeout, td, PCPU_GET(cpuid), flags | C_PRECALC | 417 C_DIRECT_EXEC); 418 } 419 420 /* 421 * Return the number of actual sleepers for the specified queue. 422 */ 423 u_int 424 sleepq_sleepcnt(const void *wchan, int queue) 425 { 426 struct sleepqueue *sq; 427 428 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 429 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 430 sq = sleepq_lookup(wchan); 431 if (sq == NULL) 432 return (0); 433 return (sq->sq_blockedcnt[queue]); 434 } 435 436 static int 437 sleepq_check_ast_sc_locked(struct thread *td, struct sleepqueue_chain *sc) 438 { 439 struct proc *p; 440 int ret; 441 442 mtx_assert(&sc->sc_lock, MA_OWNED); 443 444 ret = 0; 445 if ((td->td_pflags & TDP_WAKEUP) != 0) { 446 td->td_pflags &= ~TDP_WAKEUP; 447 ret = EINTR; 448 thread_lock(td); 449 return (0); 450 } 451 452 /* 453 * See if there are any pending signals or suspension requests for this 454 * thread. If not, we can switch immediately. 455 */ 456 thread_lock(td); 457 if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0) 458 return (0); 459 460 thread_unlock(td); 461 mtx_unlock_spin(&sc->sc_lock); 462 463 p = td->td_proc; 464 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)", 465 (void *)td, (long)p->p_pid, td->td_name); 466 PROC_LOCK(p); 467 468 /* 469 * Check for suspension first. Checking for signals and then 470 * suspending could result in a missed signal, since a signal 471 * can be delivered while this thread is suspended. 472 */ 473 ret = sig_ast_checksusp(td); 474 if (ret != 0) { 475 PROC_UNLOCK(p); 476 mtx_lock_spin(&sc->sc_lock); 477 thread_lock(td); 478 return (ret); 479 } 480 481 ret = sig_ast_needsigchk(td); 482 483 /* 484 * Lock the per-process spinlock prior to dropping the 485 * PROC_LOCK to avoid a signal delivery race. 486 * PROC_LOCK, PROC_SLOCK, and thread_lock() are 487 * currently held in tdsendsignal(). 488 */ 489 PROC_SLOCK(p); 490 mtx_lock_spin(&sc->sc_lock); 491 PROC_UNLOCK(p); 492 thread_lock(td); 493 PROC_SUNLOCK(p); 494 495 return (ret); 496 } 497 498 /* 499 * Marks the pending sleep of the current thread as interruptible and 500 * makes an initial check for pending signals before putting a thread 501 * to sleep. Enters and exits with the thread lock held. Thread lock 502 * may have transitioned from the sleepq lock to a run lock. 503 */ 504 static int 505 sleepq_catch_signals(const void *wchan, int pri) 506 { 507 struct thread *td; 508 struct sleepqueue_chain *sc; 509 struct sleepqueue *sq; 510 int ret; 511 512 sc = SC_LOOKUP(wchan); 513 mtx_assert(&sc->sc_lock, MA_OWNED); 514 MPASS(wchan != NULL); 515 td = curthread; 516 517 ret = sleepq_check_ast_sc_locked(td, sc); 518 THREAD_LOCK_ASSERT(td, MA_OWNED); 519 mtx_assert(&sc->sc_lock, MA_OWNED); 520 521 if (ret == 0) { 522 /* 523 * No pending signals and no suspension requests found. 524 * Switch the thread off the cpu. 525 */ 526 sleepq_switch(wchan, pri); 527 } else { 528 /* 529 * There were pending signals and this thread is still 530 * on the sleep queue, remove it from the sleep queue. 531 */ 532 if (TD_ON_SLEEPQ(td)) { 533 sq = sleepq_lookup(wchan); 534 sleepq_remove_thread(sq, td); 535 } 536 MPASS(td->td_lock != &sc->sc_lock); 537 mtx_unlock_spin(&sc->sc_lock); 538 thread_unlock(td); 539 } 540 return (ret); 541 } 542 543 /* 544 * Switches to another thread if we are still asleep on a sleep queue. 545 * Returns with thread lock. 546 */ 547 static void 548 sleepq_switch(const void *wchan, int pri) 549 { 550 struct sleepqueue_chain *sc; 551 struct sleepqueue *sq; 552 struct thread *td; 553 bool rtc_changed; 554 555 td = curthread; 556 sc = SC_LOOKUP(wchan); 557 mtx_assert(&sc->sc_lock, MA_OWNED); 558 THREAD_LOCK_ASSERT(td, MA_OWNED); 559 560 /* 561 * If we have a sleep queue, then we've already been woken up, so 562 * just return. 563 */ 564 if (td->td_sleepqueue != NULL) { 565 mtx_unlock_spin(&sc->sc_lock); 566 thread_unlock(td); 567 return; 568 } 569 570 /* 571 * If TDF_TIMEOUT is set, then our sleep has been timed out 572 * already but we are still on the sleep queue, so dequeue the 573 * thread and return. 574 * 575 * Do the same if the real-time clock has been adjusted since this 576 * thread calculated its timeout based on that clock. This handles 577 * the following race: 578 * - The Ts thread needs to sleep until an absolute real-clock time. 579 * It copies the global rtc_generation into curthread->td_rtcgen, 580 * reads the RTC, and calculates a sleep duration based on that time. 581 * See umtxq_sleep() for an example. 582 * - The Tc thread adjusts the RTC, bumps rtc_generation, and wakes 583 * threads that are sleeping until an absolute real-clock time. 584 * See tc_setclock() and the POSIX specification of clock_settime(). 585 * - Ts reaches the code below. It holds the sleepqueue chain lock, 586 * so Tc has finished waking, so this thread must test td_rtcgen. 587 * (The declaration of td_rtcgen refers to this comment.) 588 */ 589 rtc_changed = td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation; 590 if ((td->td_flags & TDF_TIMEOUT) || rtc_changed) { 591 if (rtc_changed) { 592 td->td_rtcgen = 0; 593 } 594 MPASS(TD_ON_SLEEPQ(td)); 595 sq = sleepq_lookup(wchan); 596 sleepq_remove_thread(sq, td); 597 mtx_unlock_spin(&sc->sc_lock); 598 thread_unlock(td); 599 return; 600 } 601 #ifdef SLEEPQUEUE_PROFILING 602 if (prof_enabled) 603 sleepq_profile(td->td_wmesg); 604 #endif 605 MPASS(td->td_sleepqueue == NULL); 606 sched_sleep(td, pri); 607 thread_lock_set(td, &sc->sc_lock); 608 SDT_PROBE0(sched, , , sleep); 609 TD_SET_SLEEPING(td); 610 mi_switch(SW_VOL | SWT_SLEEPQ); 611 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING")); 612 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)", 613 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 614 } 615 616 /* 617 * Check to see if we timed out. 618 */ 619 static inline int 620 sleepq_check_timeout(void) 621 { 622 struct thread *td; 623 int res; 624 625 res = 0; 626 td = curthread; 627 if (td->td_sleeptimo != 0) { 628 if (td->td_sleeptimo <= sbinuptime()) 629 res = EWOULDBLOCK; 630 td->td_sleeptimo = 0; 631 } 632 return (res); 633 } 634 635 /* 636 * Check to see if we were awoken by a signal. 637 */ 638 static inline int 639 sleepq_check_signals(void) 640 { 641 struct thread *td; 642 643 td = curthread; 644 KASSERT((td->td_flags & TDF_SINTR) == 0, 645 ("thread %p still in interruptible sleep?", td)); 646 647 return (td->td_intrval); 648 } 649 650 /* 651 * Block the current thread until it is awakened from its sleep queue. 652 */ 653 void 654 sleepq_wait(const void *wchan, int pri) 655 { 656 struct thread *td; 657 658 td = curthread; 659 MPASS(!(td->td_flags & TDF_SINTR)); 660 thread_lock(td); 661 sleepq_switch(wchan, pri); 662 } 663 664 /* 665 * Block the current thread until it is awakened from its sleep queue 666 * or it is interrupted by a signal. 667 */ 668 int 669 sleepq_wait_sig(const void *wchan, int pri) 670 { 671 int rcatch; 672 673 rcatch = sleepq_catch_signals(wchan, pri); 674 if (rcatch) 675 return (rcatch); 676 return (sleepq_check_signals()); 677 } 678 679 /* 680 * Block the current thread until it is awakened from its sleep queue 681 * or it times out while waiting. 682 */ 683 int 684 sleepq_timedwait(const void *wchan, int pri) 685 { 686 struct thread *td; 687 688 td = curthread; 689 MPASS(!(td->td_flags & TDF_SINTR)); 690 691 thread_lock(td); 692 sleepq_switch(wchan, pri); 693 694 return (sleepq_check_timeout()); 695 } 696 697 /* 698 * Block the current thread until it is awakened from its sleep queue, 699 * it is interrupted by a signal, or it times out waiting to be awakened. 700 */ 701 int 702 sleepq_timedwait_sig(const void *wchan, int pri) 703 { 704 int rcatch, rvalt, rvals; 705 706 rcatch = sleepq_catch_signals(wchan, pri); 707 /* We must always call check_timeout() to clear sleeptimo. */ 708 rvalt = sleepq_check_timeout(); 709 rvals = sleepq_check_signals(); 710 if (rcatch) 711 return (rcatch); 712 if (rvals) 713 return (rvals); 714 return (rvalt); 715 } 716 717 /* 718 * Returns the type of sleepqueue given a waitchannel. 719 */ 720 int 721 sleepq_type(const void *wchan) 722 { 723 struct sleepqueue *sq; 724 int type; 725 726 MPASS(wchan != NULL); 727 728 sq = sleepq_lookup(wchan); 729 if (sq == NULL) 730 return (-1); 731 type = sq->sq_type; 732 733 return (type); 734 } 735 736 /* 737 * Removes a thread from a sleep queue and makes it 738 * runnable. 739 * 740 * Requires the sc chain locked on entry. If SRQ_HOLD is specified it will 741 * be locked on return. Returns without the thread lock held. 742 */ 743 static int 744 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri, 745 int srqflags) 746 { 747 struct sleepqueue_chain *sc; 748 bool drop; 749 750 MPASS(td != NULL); 751 MPASS(sq->sq_wchan != NULL); 752 MPASS(td->td_wchan == sq->sq_wchan); 753 754 sc = SC_LOOKUP(sq->sq_wchan); 755 mtx_assert(&sc->sc_lock, MA_OWNED); 756 757 /* 758 * Avoid recursing on the chain lock. If the locks don't match we 759 * need to acquire the thread lock which setrunnable will drop for 760 * us. In this case we need to drop the chain lock afterwards. 761 * 762 * There is no race that will make td_lock equal to sc_lock because 763 * we hold sc_lock. 764 */ 765 drop = false; 766 if (!TD_IS_SLEEPING(td)) { 767 thread_lock(td); 768 drop = true; 769 } else 770 thread_lock_block_wait(td); 771 772 /* Remove thread from the sleepq. */ 773 sleepq_remove_thread(sq, td); 774 775 /* If we're done with the sleepqueue release it. */ 776 if ((srqflags & SRQ_HOLD) == 0 && drop) 777 mtx_unlock_spin(&sc->sc_lock); 778 779 /* Adjust priority if requested. */ 780 MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX)); 781 if (pri != 0 && td->td_priority > pri && 782 PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 783 sched_prio(td, pri); 784 785 /* 786 * Note that thread td might not be sleeping if it is running 787 * sleepq_catch_signals() on another CPU or is blocked on its 788 * proc lock to check signals. There's no need to mark the 789 * thread runnable in that case. 790 */ 791 if (TD_IS_SLEEPING(td)) { 792 MPASS(!drop); 793 TD_CLR_SLEEPING(td); 794 return (setrunnable(td, srqflags)); 795 } 796 MPASS(drop); 797 thread_unlock(td); 798 799 return (0); 800 } 801 802 static void 803 sleepq_remove_thread(struct sleepqueue *sq, struct thread *td) 804 { 805 struct sleepqueue_chain *sc __unused; 806 807 MPASS(td != NULL); 808 MPASS(sq->sq_wchan != NULL); 809 MPASS(td->td_wchan == sq->sq_wchan); 810 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0); 811 THREAD_LOCK_ASSERT(td, MA_OWNED); 812 sc = SC_LOOKUP(sq->sq_wchan); 813 mtx_assert(&sc->sc_lock, MA_OWNED); 814 815 SDT_PROBE2(sched, , , wakeup, td, td->td_proc); 816 817 /* Remove the thread from the queue. */ 818 sq->sq_blockedcnt[td->td_sqqueue]--; 819 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq); 820 821 /* 822 * Get a sleep queue for this thread. If this is the last waiter, 823 * use the queue itself and take it out of the chain, otherwise, 824 * remove a queue from the free list. 825 */ 826 if (LIST_EMPTY(&sq->sq_free)) { 827 td->td_sleepqueue = sq; 828 #ifdef INVARIANTS 829 sq->sq_wchan = NULL; 830 #endif 831 #ifdef SLEEPQUEUE_PROFILING 832 sc->sc_depth--; 833 #endif 834 } else 835 td->td_sleepqueue = LIST_FIRST(&sq->sq_free); 836 LIST_REMOVE(td->td_sleepqueue, sq_hash); 837 838 if ((td->td_flags & TDF_TIMEOUT) == 0 && td->td_sleeptimo != 0) 839 /* 840 * We ignore the situation where timeout subsystem was 841 * unable to stop our callout. The struct thread is 842 * type-stable, the callout will use the correct 843 * memory when running. The checks of the 844 * td_sleeptimo value in this function and in 845 * sleepq_timeout() ensure that the thread does not 846 * get spurious wakeups, even if the callout was reset 847 * or thread reused. 848 */ 849 callout_stop(&td->td_slpcallout); 850 851 td->td_wmesg = NULL; 852 td->td_wchan = NULL; 853 td->td_flags &= ~(TDF_SINTR | TDF_TIMEOUT); 854 855 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)", 856 (void *)td, (long)td->td_proc->p_pid, td->td_name); 857 } 858 859 #ifdef INVARIANTS 860 /* 861 * UMA zone item deallocator. 862 */ 863 static void 864 sleepq_dtor(void *mem, int size, void *arg) 865 { 866 struct sleepqueue *sq; 867 int i; 868 869 sq = mem; 870 for (i = 0; i < NR_SLEEPQS; i++) { 871 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i])); 872 MPASS(sq->sq_blockedcnt[i] == 0); 873 } 874 } 875 #endif 876 877 /* 878 * UMA zone item initializer. 879 */ 880 static int 881 sleepq_init(void *mem, int size, int flags) 882 { 883 struct sleepqueue *sq; 884 int i; 885 886 bzero(mem, size); 887 sq = mem; 888 for (i = 0; i < NR_SLEEPQS; i++) { 889 TAILQ_INIT(&sq->sq_blocked[i]); 890 sq->sq_blockedcnt[i] = 0; 891 } 892 LIST_INIT(&sq->sq_free); 893 return (0); 894 } 895 896 /* 897 * Find thread sleeping on a wait channel and resume it. 898 */ 899 int 900 sleepq_signal(const void *wchan, int flags, int pri, int queue) 901 { 902 struct sleepqueue_chain *sc; 903 struct sleepqueue *sq; 904 struct threadqueue *head; 905 struct thread *td, *besttd; 906 int wakeup_swapper; 907 908 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags); 909 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 910 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 911 sq = sleepq_lookup(wchan); 912 if (sq == NULL) 913 return (0); 914 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), 915 ("%s: mismatch between sleep/wakeup and cv_*", __func__)); 916 917 head = &sq->sq_blocked[queue]; 918 if (flags & SLEEPQ_UNFAIR) { 919 /* 920 * Find the most recently sleeping thread, but try to 921 * skip threads still in process of context switch to 922 * avoid spinning on the thread lock. 923 */ 924 sc = SC_LOOKUP(wchan); 925 besttd = TAILQ_LAST_FAST(head, thread, td_slpq); 926 while (besttd->td_lock != &sc->sc_lock) { 927 td = TAILQ_PREV_FAST(besttd, head, thread, td_slpq); 928 if (td == NULL) 929 break; 930 besttd = td; 931 } 932 } else { 933 /* 934 * Find the highest priority thread on the queue. If there 935 * is a tie, use the thread that first appears in the queue 936 * as it has been sleeping the longest since threads are 937 * always added to the tail of sleep queues. 938 */ 939 besttd = td = TAILQ_FIRST(head); 940 while ((td = TAILQ_NEXT(td, td_slpq)) != NULL) { 941 if (td->td_priority < besttd->td_priority) 942 besttd = td; 943 } 944 } 945 MPASS(besttd != NULL); 946 wakeup_swapper = sleepq_resume_thread(sq, besttd, pri, SRQ_HOLD); 947 return (wakeup_swapper); 948 } 949 950 static bool 951 match_any(struct thread *td __unused) 952 { 953 954 return (true); 955 } 956 957 /* 958 * Resume all threads sleeping on a specified wait channel. 959 */ 960 int 961 sleepq_broadcast(const void *wchan, int flags, int pri, int queue) 962 { 963 struct sleepqueue *sq; 964 965 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags); 966 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 967 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 968 sq = sleepq_lookup(wchan); 969 if (sq == NULL) 970 return (0); 971 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), 972 ("%s: mismatch between sleep/wakeup and cv_*", __func__)); 973 974 return (sleepq_remove_matching(sq, queue, match_any, pri)); 975 } 976 977 /* 978 * Resume threads on the sleep queue that match the given predicate. 979 */ 980 int 981 sleepq_remove_matching(struct sleepqueue *sq, int queue, 982 bool (*matches)(struct thread *), int pri) 983 { 984 struct thread *td, *tdn; 985 int wakeup_swapper; 986 987 /* 988 * The last thread will be given ownership of sq and may 989 * re-enqueue itself before sleepq_resume_thread() returns, 990 * so we must cache the "next" queue item at the beginning 991 * of the final iteration. 992 */ 993 wakeup_swapper = 0; 994 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) { 995 if (matches(td)) 996 wakeup_swapper |= sleepq_resume_thread(sq, td, pri, 997 SRQ_HOLD); 998 } 999 1000 return (wakeup_swapper); 1001 } 1002 1003 /* 1004 * Time sleeping threads out. When the timeout expires, the thread is 1005 * removed from the sleep queue and made runnable if it is still asleep. 1006 */ 1007 static void 1008 sleepq_timeout(void *arg) 1009 { 1010 struct sleepqueue_chain *sc __unused; 1011 struct sleepqueue *sq; 1012 struct thread *td; 1013 const void *wchan; 1014 int wakeup_swapper; 1015 1016 td = arg; 1017 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)", 1018 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 1019 1020 thread_lock(td); 1021 if (td->td_sleeptimo == 0 || td->td_sleeptimo > sbinuptime()) { 1022 /* 1023 * The thread does not want a timeout (yet). 1024 */ 1025 } else if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) { 1026 /* 1027 * See if the thread is asleep and get the wait 1028 * channel if it is. 1029 */ 1030 wchan = td->td_wchan; 1031 sc = SC_LOOKUP(wchan); 1032 THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock); 1033 sq = sleepq_lookup(wchan); 1034 MPASS(sq != NULL); 1035 td->td_flags |= TDF_TIMEOUT; 1036 wakeup_swapper = sleepq_resume_thread(sq, td, 0, 0); 1037 if (wakeup_swapper) 1038 kick_proc0(); 1039 return; 1040 } else if (TD_ON_SLEEPQ(td)) { 1041 /* 1042 * If the thread is on the SLEEPQ but isn't sleeping 1043 * yet, it can either be on another CPU in between 1044 * sleepq_add() and one of the sleepq_*wait*() 1045 * routines or it can be in sleepq_catch_signals(). 1046 */ 1047 td->td_flags |= TDF_TIMEOUT; 1048 } 1049 thread_unlock(td); 1050 } 1051 1052 /* 1053 * Resumes a specific thread from the sleep queue associated with a specific 1054 * wait channel if it is on that queue. 1055 */ 1056 void 1057 sleepq_remove(struct thread *td, const void *wchan) 1058 { 1059 struct sleepqueue_chain *sc; 1060 struct sleepqueue *sq; 1061 int wakeup_swapper; 1062 1063 /* 1064 * Look up the sleep queue for this wait channel, then re-check 1065 * that the thread is asleep on that channel, if it is not, then 1066 * bail. 1067 */ 1068 MPASS(wchan != NULL); 1069 sc = SC_LOOKUP(wchan); 1070 mtx_lock_spin(&sc->sc_lock); 1071 /* 1072 * We can not lock the thread here as it may be sleeping on a 1073 * different sleepq. However, holding the sleepq lock for this 1074 * wchan can guarantee that we do not miss a wakeup for this 1075 * channel. The asserts below will catch any false positives. 1076 */ 1077 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) { 1078 mtx_unlock_spin(&sc->sc_lock); 1079 return; 1080 } 1081 1082 /* Thread is asleep on sleep queue sq, so wake it up. */ 1083 sq = sleepq_lookup(wchan); 1084 MPASS(sq != NULL); 1085 MPASS(td->td_wchan == wchan); 1086 wakeup_swapper = sleepq_resume_thread(sq, td, 0, 0); 1087 if (wakeup_swapper) 1088 kick_proc0(); 1089 } 1090 1091 /* 1092 * Abort a thread as if an interrupt had occurred. Only abort 1093 * interruptible waits (unfortunately it isn't safe to abort others). 1094 * 1095 * Requires thread lock on entry, releases on return. 1096 */ 1097 int 1098 sleepq_abort(struct thread *td, int intrval) 1099 { 1100 struct sleepqueue *sq; 1101 const void *wchan; 1102 1103 THREAD_LOCK_ASSERT(td, MA_OWNED); 1104 MPASS(TD_ON_SLEEPQ(td)); 1105 MPASS(td->td_flags & TDF_SINTR); 1106 MPASS(intrval == EINTR || intrval == ERESTART); 1107 1108 /* 1109 * If the TDF_TIMEOUT flag is set, just leave. A 1110 * timeout is scheduled anyhow. 1111 */ 1112 if (td->td_flags & TDF_TIMEOUT) { 1113 thread_unlock(td); 1114 return (0); 1115 } 1116 1117 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)", 1118 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 1119 td->td_intrval = intrval; 1120 1121 /* 1122 * If the thread has not slept yet it will find the signal in 1123 * sleepq_catch_signals() and call sleepq_resume_thread. Otherwise 1124 * we have to do it here. 1125 */ 1126 if (!TD_IS_SLEEPING(td)) { 1127 thread_unlock(td); 1128 return (0); 1129 } 1130 wchan = td->td_wchan; 1131 MPASS(wchan != NULL); 1132 sq = sleepq_lookup(wchan); 1133 MPASS(sq != NULL); 1134 1135 /* Thread is asleep on sleep queue sq, so wake it up. */ 1136 return (sleepq_resume_thread(sq, td, 0, 0)); 1137 } 1138 1139 void 1140 sleepq_chains_remove_matching(bool (*matches)(struct thread *)) 1141 { 1142 struct sleepqueue_chain *sc; 1143 struct sleepqueue *sq, *sq1; 1144 int i, wakeup_swapper; 1145 1146 wakeup_swapper = 0; 1147 for (sc = &sleepq_chains[0]; sc < sleepq_chains + SC_TABLESIZE; ++sc) { 1148 if (LIST_EMPTY(&sc->sc_queues)) { 1149 continue; 1150 } 1151 mtx_lock_spin(&sc->sc_lock); 1152 LIST_FOREACH_SAFE(sq, &sc->sc_queues, sq_hash, sq1) { 1153 for (i = 0; i < NR_SLEEPQS; ++i) { 1154 wakeup_swapper |= sleepq_remove_matching(sq, i, 1155 matches, 0); 1156 } 1157 } 1158 mtx_unlock_spin(&sc->sc_lock); 1159 } 1160 if (wakeup_swapper) { 1161 kick_proc0(); 1162 } 1163 } 1164 1165 /* 1166 * Prints the stacks of all threads presently sleeping on wchan/queue to 1167 * the sbuf sb. Sets count_stacks_printed to the number of stacks actually 1168 * printed. Typically, this will equal the number of threads sleeping on the 1169 * queue, but may be less if sb overflowed before all stacks were printed. 1170 */ 1171 #ifdef STACK 1172 int 1173 sleepq_sbuf_print_stacks(struct sbuf *sb, const void *wchan, int queue, 1174 int *count_stacks_printed) 1175 { 1176 struct thread *td, *td_next; 1177 struct sleepqueue *sq; 1178 struct stack **st; 1179 struct sbuf **td_infos; 1180 int i, stack_idx, error, stacks_to_allocate; 1181 bool finished; 1182 1183 error = 0; 1184 finished = false; 1185 1186 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 1187 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 1188 1189 stacks_to_allocate = 10; 1190 for (i = 0; i < 3 && !finished ; i++) { 1191 /* We cannot malloc while holding the queue's spinlock, so 1192 * we do our mallocs now, and hope it is enough. If it 1193 * isn't, we will free these, drop the lock, malloc more, 1194 * and try again, up to a point. After that point we will 1195 * give up and report ENOMEM. We also cannot write to sb 1196 * during this time since the client may have set the 1197 * SBUF_AUTOEXTEND flag on their sbuf, which could cause a 1198 * malloc as we print to it. So we defer actually printing 1199 * to sb until after we drop the spinlock. 1200 */ 1201 1202 /* Where we will store the stacks. */ 1203 st = malloc(sizeof(struct stack *) * stacks_to_allocate, 1204 M_TEMP, M_WAITOK); 1205 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1206 stack_idx++) 1207 st[stack_idx] = stack_create(M_WAITOK); 1208 1209 /* Where we will store the td name, tid, etc. */ 1210 td_infos = malloc(sizeof(struct sbuf *) * stacks_to_allocate, 1211 M_TEMP, M_WAITOK); 1212 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1213 stack_idx++) 1214 td_infos[stack_idx] = sbuf_new(NULL, NULL, 1215 MAXCOMLEN + sizeof(struct thread *) * 2 + 40, 1216 SBUF_FIXEDLEN); 1217 1218 sleepq_lock(wchan); 1219 sq = sleepq_lookup(wchan); 1220 if (sq == NULL) { 1221 /* This sleepq does not exist; exit and return ENOENT. */ 1222 error = ENOENT; 1223 finished = true; 1224 sleepq_release(wchan); 1225 goto loop_end; 1226 } 1227 1228 stack_idx = 0; 1229 /* Save thread info */ 1230 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, 1231 td_next) { 1232 if (stack_idx >= stacks_to_allocate) 1233 goto loop_end; 1234 1235 /* Note the td_lock is equal to the sleepq_lock here. */ 1236 (void)stack_save_td(st[stack_idx], td); 1237 1238 sbuf_printf(td_infos[stack_idx], "%d: %s %p", 1239 td->td_tid, td->td_name, td); 1240 1241 ++stack_idx; 1242 } 1243 1244 finished = true; 1245 sleepq_release(wchan); 1246 1247 /* Print the stacks */ 1248 for (i = 0; i < stack_idx; i++) { 1249 sbuf_finish(td_infos[i]); 1250 sbuf_printf(sb, "--- thread %s: ---\n", sbuf_data(td_infos[i])); 1251 stack_sbuf_print(sb, st[i]); 1252 sbuf_printf(sb, "\n"); 1253 1254 error = sbuf_error(sb); 1255 if (error == 0) 1256 *count_stacks_printed = stack_idx; 1257 } 1258 1259 loop_end: 1260 if (!finished) 1261 sleepq_release(wchan); 1262 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1263 stack_idx++) 1264 stack_destroy(st[stack_idx]); 1265 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1266 stack_idx++) 1267 sbuf_delete(td_infos[stack_idx]); 1268 free(st, M_TEMP); 1269 free(td_infos, M_TEMP); 1270 stacks_to_allocate *= 10; 1271 } 1272 1273 if (!finished && error == 0) 1274 error = ENOMEM; 1275 1276 return (error); 1277 } 1278 #endif 1279 1280 #ifdef SLEEPQUEUE_PROFILING 1281 #define SLEEPQ_PROF_LOCATIONS 1024 1282 #define SLEEPQ_SBUFSIZE 512 1283 struct sleepq_prof { 1284 LIST_ENTRY(sleepq_prof) sp_link; 1285 const char *sp_wmesg; 1286 long sp_count; 1287 }; 1288 1289 LIST_HEAD(sqphead, sleepq_prof); 1290 1291 struct sqphead sleepq_prof_free; 1292 struct sqphead sleepq_hash[SC_TABLESIZE]; 1293 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS]; 1294 static struct mtx sleepq_prof_lock; 1295 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN); 1296 1297 static void 1298 sleepq_profile(const char *wmesg) 1299 { 1300 struct sleepq_prof *sp; 1301 1302 mtx_lock_spin(&sleepq_prof_lock); 1303 if (prof_enabled == 0) 1304 goto unlock; 1305 LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link) 1306 if (sp->sp_wmesg == wmesg) 1307 goto done; 1308 sp = LIST_FIRST(&sleepq_prof_free); 1309 if (sp == NULL) 1310 goto unlock; 1311 sp->sp_wmesg = wmesg; 1312 LIST_REMOVE(sp, sp_link); 1313 LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link); 1314 done: 1315 sp->sp_count++; 1316 unlock: 1317 mtx_unlock_spin(&sleepq_prof_lock); 1318 return; 1319 } 1320 1321 static void 1322 sleepq_prof_reset(void) 1323 { 1324 struct sleepq_prof *sp; 1325 int enabled; 1326 int i; 1327 1328 mtx_lock_spin(&sleepq_prof_lock); 1329 enabled = prof_enabled; 1330 prof_enabled = 0; 1331 for (i = 0; i < SC_TABLESIZE; i++) 1332 LIST_INIT(&sleepq_hash[i]); 1333 LIST_INIT(&sleepq_prof_free); 1334 for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) { 1335 sp = &sleepq_profent[i]; 1336 sp->sp_wmesg = NULL; 1337 sp->sp_count = 0; 1338 LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link); 1339 } 1340 prof_enabled = enabled; 1341 mtx_unlock_spin(&sleepq_prof_lock); 1342 } 1343 1344 static int 1345 enable_sleepq_prof(SYSCTL_HANDLER_ARGS) 1346 { 1347 int error, v; 1348 1349 v = prof_enabled; 1350 error = sysctl_handle_int(oidp, &v, v, req); 1351 if (error) 1352 return (error); 1353 if (req->newptr == NULL) 1354 return (error); 1355 if (v == prof_enabled) 1356 return (0); 1357 if (v == 1) 1358 sleepq_prof_reset(); 1359 mtx_lock_spin(&sleepq_prof_lock); 1360 prof_enabled = !!v; 1361 mtx_unlock_spin(&sleepq_prof_lock); 1362 1363 return (0); 1364 } 1365 1366 static int 1367 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS) 1368 { 1369 int error, v; 1370 1371 v = 0; 1372 error = sysctl_handle_int(oidp, &v, 0, req); 1373 if (error) 1374 return (error); 1375 if (req->newptr == NULL) 1376 return (error); 1377 if (v == 0) 1378 return (0); 1379 sleepq_prof_reset(); 1380 1381 return (0); 1382 } 1383 1384 static int 1385 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS) 1386 { 1387 struct sleepq_prof *sp; 1388 struct sbuf *sb; 1389 int enabled; 1390 int error; 1391 int i; 1392 1393 error = sysctl_wire_old_buffer(req, 0); 1394 if (error != 0) 1395 return (error); 1396 sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req); 1397 sbuf_printf(sb, "\nwmesg\tcount\n"); 1398 enabled = prof_enabled; 1399 mtx_lock_spin(&sleepq_prof_lock); 1400 prof_enabled = 0; 1401 mtx_unlock_spin(&sleepq_prof_lock); 1402 for (i = 0; i < SC_TABLESIZE; i++) { 1403 LIST_FOREACH(sp, &sleepq_hash[i], sp_link) { 1404 sbuf_printf(sb, "%s\t%ld\n", 1405 sp->sp_wmesg, sp->sp_count); 1406 } 1407 } 1408 mtx_lock_spin(&sleepq_prof_lock); 1409 prof_enabled = enabled; 1410 mtx_unlock_spin(&sleepq_prof_lock); 1411 1412 error = sbuf_finish(sb); 1413 sbuf_delete(sb); 1414 return (error); 1415 } 1416 1417 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, 1418 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0, 1419 dump_sleepq_prof_stats, "A", 1420 "Sleepqueue profiling statistics"); 1421 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, 1422 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, 1423 reset_sleepq_prof_stats, "I", 1424 "Reset sleepqueue profiling statistics"); 1425 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, 1426 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, 1427 enable_sleepq_prof, "I", 1428 "Enable sleepqueue profiling"); 1429 #endif 1430 1431 #ifdef DDB 1432 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue) 1433 { 1434 struct sleepqueue_chain *sc; 1435 struct sleepqueue *sq; 1436 #ifdef INVARIANTS 1437 struct lock_object *lock; 1438 #endif 1439 struct thread *td; 1440 void *wchan; 1441 int i; 1442 1443 if (!have_addr) 1444 return; 1445 1446 /* 1447 * First, see if there is an active sleep queue for the wait channel 1448 * indicated by the address. 1449 */ 1450 wchan = (void *)addr; 1451 sc = SC_LOOKUP(wchan); 1452 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) 1453 if (sq->sq_wchan == wchan) 1454 goto found; 1455 1456 /* 1457 * Second, see if there is an active sleep queue at the address 1458 * indicated. 1459 */ 1460 for (i = 0; i < SC_TABLESIZE; i++) 1461 LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) { 1462 if (sq == (struct sleepqueue *)addr) 1463 goto found; 1464 } 1465 1466 db_printf("Unable to locate a sleep queue via %p\n", (void *)addr); 1467 return; 1468 found: 1469 db_printf("Wait channel: %p\n", sq->sq_wchan); 1470 db_printf("Queue type: %d\n", sq->sq_type); 1471 #ifdef INVARIANTS 1472 if (sq->sq_lock) { 1473 lock = sq->sq_lock; 1474 db_printf("Associated Interlock: %p - (%s) %s\n", lock, 1475 LOCK_CLASS(lock)->lc_name, lock->lo_name); 1476 } 1477 #endif 1478 db_printf("Blocked threads:\n"); 1479 for (i = 0; i < NR_SLEEPQS; i++) { 1480 db_printf("\nQueue[%d]:\n", i); 1481 if (TAILQ_EMPTY(&sq->sq_blocked[i])) 1482 db_printf("\tempty\n"); 1483 else 1484 TAILQ_FOREACH(td, &sq->sq_blocked[i], 1485 td_slpq) { 1486 db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td, 1487 td->td_tid, td->td_proc->p_pid, 1488 td->td_name); 1489 } 1490 db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]); 1491 } 1492 } 1493 1494 /* Alias 'show sleepqueue' to 'show sleepq'. */ 1495 DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue); 1496 #endif 1497