1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* 29 * Implementation of sleep queues used to hold queue of threads blocked on 30 * a wait channel. Sleep queues are different from turnstiles in that wait 31 * channels are not owned by anyone, so there is no priority propagation. 32 * Sleep queues can also provide a timeout and can also be interrupted by 33 * signals. That said, there are several similarities between the turnstile 34 * and sleep queue implementations. (Note: turnstiles were implemented 35 * first.) For example, both use a hash table of the same size where each 36 * bucket is referred to as a "chain" that contains both a spin lock and 37 * a linked list of queues. An individual queue is located by using a hash 38 * to pick a chain, locking the chain, and then walking the chain searching 39 * for the queue. This means that a wait channel object does not need to 40 * embed its queue head just as locks do not embed their turnstile queue 41 * head. Threads also carry around a sleep queue that they lend to the 42 * wait channel when blocking. Just as in turnstiles, the queue includes 43 * a free list of the sleep queues of other threads blocked on the same 44 * wait channel in the case of multiple waiters. 45 * 46 * Some additional functionality provided by sleep queues include the 47 * ability to set a timeout. The timeout is managed using a per-thread 48 * callout that resumes a thread if it is asleep. A thread may also 49 * catch signals while it is asleep (aka an interruptible sleep). The 50 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally, 51 * sleep queues also provide some extra assertions. One is not allowed to 52 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one 53 * must consistently use the same lock to synchronize with a wait channel, 54 * though this check is currently only a warning for sleep/wakeup due to 55 * pre-existing abuse of that API. The same lock must also be held when 56 * awakening threads, though that is currently only enforced for condition 57 * variables. 58 */ 59 60 #include <sys/cdefs.h> 61 #include "opt_sleepqueue_profiling.h" 62 #include "opt_ddb.h" 63 #include "opt_sched.h" 64 #include "opt_stack.h" 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/lock.h> 69 #include <sys/kernel.h> 70 #include <sys/ktr.h> 71 #include <sys/mutex.h> 72 #include <sys/proc.h> 73 #include <sys/sbuf.h> 74 #include <sys/sched.h> 75 #include <sys/sdt.h> 76 #include <sys/signalvar.h> 77 #include <sys/sleepqueue.h> 78 #include <sys/stack.h> 79 #include <sys/sysctl.h> 80 #include <sys/time.h> 81 #ifdef EPOCH_TRACE 82 #include <sys/epoch.h> 83 #endif 84 85 #include <machine/atomic.h> 86 87 #include <vm/uma.h> 88 89 #ifdef DDB 90 #include <ddb/ddb.h> 91 #endif 92 93 /* 94 * Constants for the hash table of sleep queue chains. 95 * SC_TABLESIZE must be a power of two for SC_MASK to work properly. 96 */ 97 #ifndef SC_TABLESIZE 98 #define SC_TABLESIZE 256 99 #endif 100 CTASSERT(powerof2(SC_TABLESIZE)); 101 #define SC_MASK (SC_TABLESIZE - 1) 102 #define SC_SHIFT 8 103 #define SC_HASH(wc) ((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \ 104 SC_MASK) 105 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)] 106 #define NR_SLEEPQS 2 107 /* 108 * There are two different lists of sleep queues. Both lists are connected 109 * via the sq_hash entries. The first list is the sleep queue chain list 110 * that a sleep queue is on when it is attached to a wait channel. The 111 * second list is the free list hung off of a sleep queue that is attached 112 * to a wait channel. 113 * 114 * Each sleep queue also contains the wait channel it is attached to, the 115 * list of threads blocked on that wait channel, flags specific to the 116 * wait channel, and the lock used to synchronize with a wait channel. 117 * The flags are used to catch mismatches between the various consumers 118 * of the sleep queue API (e.g. sleep/wakeup and condition variables). 119 * The lock pointer is only used when invariants are enabled for various 120 * debugging checks. 121 * 122 * Locking key: 123 * c - sleep queue chain lock 124 */ 125 struct sleepqueue { 126 struct threadqueue sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */ 127 u_int sq_blockedcnt[NR_SLEEPQS]; /* (c) N. of blocked threads. */ 128 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */ 129 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */ 130 const void *sq_wchan; /* (c) Wait channel. */ 131 int sq_type; /* (c) Queue type. */ 132 #ifdef INVARIANTS 133 struct lock_object *sq_lock; /* (c) Associated lock. */ 134 #endif 135 }; 136 137 struct sleepqueue_chain { 138 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */ 139 struct mtx sc_lock; /* Spin lock for this chain. */ 140 #ifdef SLEEPQUEUE_PROFILING 141 u_int sc_depth; /* Length of sc_queues. */ 142 u_int sc_max_depth; /* Max length of sc_queues. */ 143 #endif 144 } __aligned(CACHE_LINE_SIZE); 145 146 #ifdef SLEEPQUEUE_PROFILING 147 static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 148 "sleepq profiling"); 149 static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, 150 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 151 "sleepq chain stats"); 152 static u_int sleepq_max_depth; 153 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth, 154 0, "maxmimum depth achieved of a single chain"); 155 156 static void sleepq_profile(const char *wmesg); 157 static int prof_enabled; 158 #endif 159 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE]; 160 static uma_zone_t sleepq_zone; 161 162 /* 163 * Prototypes for non-exported routines. 164 */ 165 static int sleepq_catch_signals(const void *wchan, int pri); 166 static inline int sleepq_check_signals(void); 167 static inline int sleepq_check_timeout(void); 168 #ifdef INVARIANTS 169 static void sleepq_dtor(void *mem, int size, void *arg); 170 #endif 171 static int sleepq_init(void *mem, int size, int flags); 172 static void sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, 173 int pri, int srqflags); 174 static void sleepq_remove_thread(struct sleepqueue *sq, struct thread *td); 175 static void sleepq_switch(const void *wchan, int pri); 176 static void sleepq_timeout(void *arg); 177 178 SDT_PROBE_DECLARE(sched, , , sleep); 179 SDT_PROBE_DECLARE(sched, , , wakeup); 180 181 /* 182 * Initialize SLEEPQUEUE_PROFILING specific sysctl nodes. 183 * Note that it must happen after sleepinit() has been fully executed, so 184 * it must happen after SI_SUB_KMEM SYSINIT() subsystem setup. 185 */ 186 #ifdef SLEEPQUEUE_PROFILING 187 static void 188 init_sleepqueue_profiling(void) 189 { 190 char chain_name[10]; 191 struct sysctl_oid *chain_oid; 192 u_int i; 193 194 for (i = 0; i < SC_TABLESIZE; i++) { 195 snprintf(chain_name, sizeof(chain_name), "%u", i); 196 chain_oid = SYSCTL_ADD_NODE(NULL, 197 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO, 198 chain_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 199 "sleepq chain stats"); 200 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 201 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL); 202 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 203 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0, 204 NULL); 205 } 206 } 207 208 SYSINIT(sleepqueue_profiling, SI_SUB_LOCK, SI_ORDER_ANY, 209 init_sleepqueue_profiling, NULL); 210 #endif 211 212 /* 213 * Early initialization of sleep queues that is called from the sleepinit() 214 * SYSINIT. 215 */ 216 void 217 init_sleepqueues(void) 218 { 219 int i; 220 221 for (i = 0; i < SC_TABLESIZE; i++) { 222 LIST_INIT(&sleepq_chains[i].sc_queues); 223 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL, 224 MTX_SPIN); 225 } 226 sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue), 227 #ifdef INVARIANTS 228 NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0); 229 #else 230 NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0); 231 #endif 232 233 thread0.td_sleepqueue = sleepq_alloc(); 234 } 235 236 /* 237 * Get a sleep queue for a new thread. 238 */ 239 struct sleepqueue * 240 sleepq_alloc(void) 241 { 242 243 return (uma_zalloc(sleepq_zone, M_WAITOK)); 244 } 245 246 /* 247 * Free a sleep queue when a thread is destroyed. 248 */ 249 void 250 sleepq_free(struct sleepqueue *sq) 251 { 252 253 uma_zfree(sleepq_zone, sq); 254 } 255 256 /* 257 * Lock the sleep queue chain associated with the specified wait channel. 258 */ 259 void 260 sleepq_lock(const void *wchan) 261 { 262 struct sleepqueue_chain *sc; 263 264 sc = SC_LOOKUP(wchan); 265 mtx_lock_spin(&sc->sc_lock); 266 } 267 268 /* 269 * Look up the sleep queue associated with a given wait channel in the hash 270 * table locking the associated sleep queue chain. If no queue is found in 271 * the table, NULL is returned. 272 */ 273 struct sleepqueue * 274 sleepq_lookup(const void *wchan) 275 { 276 struct sleepqueue_chain *sc; 277 struct sleepqueue *sq; 278 279 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 280 sc = SC_LOOKUP(wchan); 281 mtx_assert(&sc->sc_lock, MA_OWNED); 282 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) 283 if (sq->sq_wchan == wchan) 284 return (sq); 285 return (NULL); 286 } 287 288 /* 289 * Unlock the sleep queue chain associated with a given wait channel. 290 */ 291 void 292 sleepq_release(const void *wchan) 293 { 294 struct sleepqueue_chain *sc; 295 296 sc = SC_LOOKUP(wchan); 297 mtx_unlock_spin(&sc->sc_lock); 298 } 299 300 /* 301 * Places the current thread on the sleep queue for the specified wait 302 * channel. If INVARIANTS is enabled, then it associates the passed in 303 * lock with the sleepq to make sure it is held when that sleep queue is 304 * woken up. 305 */ 306 void 307 sleepq_add(const void *wchan, struct lock_object *lock, const char *wmesg, 308 int flags, int queue) 309 { 310 struct sleepqueue_chain *sc; 311 struct sleepqueue *sq; 312 struct thread *td; 313 314 td = curthread; 315 sc = SC_LOOKUP(wchan); 316 mtx_assert(&sc->sc_lock, MA_OWNED); 317 MPASS(td->td_sleepqueue != NULL); 318 MPASS(wchan != NULL); 319 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 320 321 /* If this thread is not allowed to sleep, die a horrible death. */ 322 if (__predict_false(!THREAD_CAN_SLEEP())) { 323 #ifdef EPOCH_TRACE 324 epoch_trace_list(curthread); 325 #endif 326 KASSERT(0, 327 ("%s: td %p to sleep on wchan %p with sleeping prohibited", 328 __func__, td, wchan)); 329 } 330 331 /* Look up the sleep queue associated with the wait channel 'wchan'. */ 332 sq = sleepq_lookup(wchan); 333 334 /* 335 * If the wait channel does not already have a sleep queue, use 336 * this thread's sleep queue. Otherwise, insert the current thread 337 * into the sleep queue already in use by this wait channel. 338 */ 339 if (sq == NULL) { 340 #ifdef INVARIANTS 341 int i; 342 343 sq = td->td_sleepqueue; 344 for (i = 0; i < NR_SLEEPQS; i++) { 345 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]), 346 ("thread's sleep queue %d is not empty", i)); 347 KASSERT(sq->sq_blockedcnt[i] == 0, 348 ("thread's sleep queue %d count mismatches", i)); 349 } 350 KASSERT(LIST_EMPTY(&sq->sq_free), 351 ("thread's sleep queue has a non-empty free list")); 352 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer")); 353 sq->sq_lock = lock; 354 #endif 355 #ifdef SLEEPQUEUE_PROFILING 356 sc->sc_depth++; 357 if (sc->sc_depth > sc->sc_max_depth) { 358 sc->sc_max_depth = sc->sc_depth; 359 if (sc->sc_max_depth > sleepq_max_depth) 360 sleepq_max_depth = sc->sc_max_depth; 361 } 362 #endif 363 sq = td->td_sleepqueue; 364 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash); 365 sq->sq_wchan = wchan; 366 sq->sq_type = flags & SLEEPQ_TYPE; 367 } else { 368 MPASS(wchan == sq->sq_wchan); 369 MPASS(lock == sq->sq_lock); 370 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type); 371 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash); 372 } 373 thread_lock(td); 374 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq); 375 sq->sq_blockedcnt[queue]++; 376 td->td_sleepqueue = NULL; 377 td->td_sqqueue = queue; 378 td->td_wchan = wchan; 379 td->td_wmesg = wmesg; 380 if (flags & SLEEPQ_INTERRUPTIBLE) { 381 td->td_intrval = 0; 382 td->td_flags |= TDF_SINTR; 383 } 384 td->td_flags &= ~TDF_TIMEOUT; 385 thread_unlock(td); 386 } 387 388 /* 389 * Sets a timeout that will remove the current thread from the 390 * specified sleep queue at the specified time if the thread has not 391 * already been awakened. Flags are from C_* (callout) namespace. 392 */ 393 void 394 sleepq_set_timeout_sbt(const void *wchan, sbintime_t sbt, sbintime_t pr, 395 int flags) 396 { 397 struct sleepqueue_chain *sc __unused; 398 struct thread *td; 399 sbintime_t pr1; 400 401 td = curthread; 402 sc = SC_LOOKUP(wchan); 403 mtx_assert(&sc->sc_lock, MA_OWNED); 404 MPASS(TD_ON_SLEEPQ(td)); 405 MPASS(td->td_sleepqueue == NULL); 406 MPASS(wchan != NULL); 407 if (cold && td == &thread0) 408 panic("timed sleep before timers are working"); 409 KASSERT(td->td_sleeptimo == 0, ("td %d %p td_sleeptimo %jx", 410 td->td_tid, td, (uintmax_t)td->td_sleeptimo)); 411 thread_lock(td); 412 callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1); 413 thread_unlock(td); 414 callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1, 415 sleepq_timeout, td, PCPU_GET(cpuid), flags | C_PRECALC | 416 C_DIRECT_EXEC); 417 } 418 419 /* 420 * Return the number of actual sleepers for the specified queue. 421 */ 422 u_int 423 sleepq_sleepcnt(const void *wchan, int queue) 424 { 425 struct sleepqueue *sq; 426 427 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 428 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 429 sq = sleepq_lookup(wchan); 430 if (sq == NULL) 431 return (0); 432 return (sq->sq_blockedcnt[queue]); 433 } 434 435 static int 436 sleepq_check_ast_sc_locked(struct thread *td, struct sleepqueue_chain *sc) 437 { 438 struct proc *p; 439 int ret; 440 441 mtx_assert(&sc->sc_lock, MA_OWNED); 442 443 if ((td->td_pflags & TDP_WAKEUP) != 0) { 444 td->td_pflags &= ~TDP_WAKEUP; 445 thread_lock(td); 446 return (EINTR); 447 } 448 449 /* 450 * See if there are any pending signals or suspension requests for this 451 * thread. If not, we can switch immediately. 452 */ 453 thread_lock(td); 454 if (!td_ast_pending(td, TDA_SIG) && !td_ast_pending(td, TDA_SUSPEND)) 455 return (0); 456 457 thread_unlock(td); 458 mtx_unlock_spin(&sc->sc_lock); 459 460 p = td->td_proc; 461 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)", 462 (void *)td, (long)p->p_pid, td->td_name); 463 PROC_LOCK(p); 464 465 /* 466 * Check for suspension first. Checking for signals and then 467 * suspending could result in a missed signal, since a signal 468 * can be delivered while this thread is suspended. 469 */ 470 ret = sig_ast_checksusp(td); 471 if (ret != 0) { 472 PROC_UNLOCK(p); 473 mtx_lock_spin(&sc->sc_lock); 474 thread_lock(td); 475 return (ret); 476 } 477 478 ret = sig_ast_needsigchk(td); 479 480 /* 481 * Lock the per-process spinlock prior to dropping the 482 * PROC_LOCK to avoid a signal delivery race. 483 * PROC_LOCK, PROC_SLOCK, and thread_lock() are 484 * currently held in tdsendsignal() and thread_single(). 485 */ 486 PROC_SLOCK(p); 487 mtx_lock_spin(&sc->sc_lock); 488 PROC_UNLOCK(p); 489 thread_lock(td); 490 PROC_SUNLOCK(p); 491 492 return (ret); 493 } 494 495 /* 496 * Marks the pending sleep of the current thread as interruptible and 497 * makes an initial check for pending signals before putting a thread 498 * to sleep. Enters and exits with the thread lock held. Thread lock 499 * may have transitioned from the sleepq lock to a run lock. 500 */ 501 static int 502 sleepq_catch_signals(const void *wchan, int pri) 503 { 504 struct thread *td; 505 struct sleepqueue_chain *sc; 506 struct sleepqueue *sq; 507 int ret; 508 509 sc = SC_LOOKUP(wchan); 510 mtx_assert(&sc->sc_lock, MA_OWNED); 511 MPASS(wchan != NULL); 512 td = curthread; 513 514 ret = sleepq_check_ast_sc_locked(td, sc); 515 THREAD_LOCK_ASSERT(td, MA_OWNED); 516 mtx_assert(&sc->sc_lock, MA_OWNED); 517 518 if (ret == 0) { 519 /* 520 * No pending signals and no suspension requests found. 521 * Switch the thread off the cpu. 522 */ 523 sleepq_switch(wchan, pri); 524 } else { 525 /* 526 * There were pending signals and this thread is still 527 * on the sleep queue, remove it from the sleep queue. 528 */ 529 if (TD_ON_SLEEPQ(td)) { 530 sq = sleepq_lookup(wchan); 531 sleepq_remove_thread(sq, td); 532 } 533 MPASS(td->td_lock != &sc->sc_lock); 534 mtx_unlock_spin(&sc->sc_lock); 535 thread_unlock(td); 536 } 537 return (ret); 538 } 539 540 /* 541 * Switches to another thread if we are still asleep on a sleep queue. 542 * 543 * The thread lock is required on entry and is no longer held on return. 544 */ 545 static void 546 sleepq_switch(const void *wchan, int pri) 547 { 548 struct sleepqueue_chain *sc; 549 struct sleepqueue *sq; 550 struct thread *td; 551 bool rtc_changed; 552 553 td = curthread; 554 sc = SC_LOOKUP(wchan); 555 mtx_assert(&sc->sc_lock, MA_OWNED); 556 THREAD_LOCK_ASSERT(td, MA_OWNED); 557 558 /* 559 * If we have a sleep queue, then we've already been woken up, so 560 * just return. 561 */ 562 if (td->td_sleepqueue != NULL) { 563 mtx_unlock_spin(&sc->sc_lock); 564 thread_unlock(td); 565 return; 566 } 567 568 /* 569 * If TDF_TIMEOUT is set, then our sleep has been timed out 570 * already but we are still on the sleep queue, so dequeue the 571 * thread and return. 572 * 573 * Do the same if the real-time clock has been adjusted since this 574 * thread calculated its timeout based on that clock. This handles 575 * the following race: 576 * - The Ts thread needs to sleep until an absolute real-clock time. 577 * It copies the global rtc_generation into curthread->td_rtcgen, 578 * reads the RTC, and calculates a sleep duration based on that time. 579 * See umtxq_sleep() for an example. 580 * - The Tc thread adjusts the RTC, bumps rtc_generation, and wakes 581 * threads that are sleeping until an absolute real-clock time. 582 * See tc_setclock() and the POSIX specification of clock_settime(). 583 * - Ts reaches the code below. It holds the sleepqueue chain lock, 584 * so Tc has finished waking, so this thread must test td_rtcgen. 585 * (The declaration of td_rtcgen refers to this comment.) 586 */ 587 rtc_changed = td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation; 588 if ((td->td_flags & TDF_TIMEOUT) || rtc_changed) { 589 if (rtc_changed) { 590 td->td_rtcgen = 0; 591 } 592 MPASS(TD_ON_SLEEPQ(td)); 593 sq = sleepq_lookup(wchan); 594 sleepq_remove_thread(sq, td); 595 mtx_unlock_spin(&sc->sc_lock); 596 thread_unlock(td); 597 return; 598 } 599 #ifdef SLEEPQUEUE_PROFILING 600 if (prof_enabled) 601 sleepq_profile(td->td_wmesg); 602 #endif 603 MPASS(td->td_sleepqueue == NULL); 604 sched_sleep(td, pri); 605 thread_lock_set(td, &sc->sc_lock); 606 SDT_PROBE0(sched, , , sleep); 607 TD_SET_SLEEPING(td); 608 mi_switch(SW_VOL | SWT_SLEEPQ); 609 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING")); 610 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)", 611 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 612 } 613 614 /* 615 * Check to see if we timed out. 616 */ 617 static inline int 618 sleepq_check_timeout(void) 619 { 620 struct thread *td; 621 int res; 622 623 res = 0; 624 td = curthread; 625 if (td->td_sleeptimo != 0) { 626 if (td->td_sleeptimo <= sbinuptime()) 627 res = EWOULDBLOCK; 628 td->td_sleeptimo = 0; 629 } 630 return (res); 631 } 632 633 /* 634 * Check to see if we were awoken by a signal. 635 */ 636 static inline int 637 sleepq_check_signals(void) 638 { 639 struct thread *td; 640 641 td = curthread; 642 KASSERT((td->td_flags & TDF_SINTR) == 0, 643 ("thread %p still in interruptible sleep?", td)); 644 645 return (td->td_intrval); 646 } 647 648 /* 649 * Block the current thread until it is awakened from its sleep queue. 650 */ 651 void 652 sleepq_wait(const void *wchan, int pri) 653 { 654 struct thread *td; 655 656 td = curthread; 657 MPASS(!(td->td_flags & TDF_SINTR)); 658 thread_lock(td); 659 sleepq_switch(wchan, pri); 660 } 661 662 /* 663 * Block the current thread until it is awakened from its sleep queue 664 * or it is interrupted by a signal. 665 */ 666 int 667 sleepq_wait_sig(const void *wchan, int pri) 668 { 669 int rcatch; 670 671 rcatch = sleepq_catch_signals(wchan, pri); 672 if (rcatch) 673 return (rcatch); 674 return (sleepq_check_signals()); 675 } 676 677 /* 678 * Block the current thread until it is awakened from its sleep queue 679 * or it times out while waiting. 680 */ 681 int 682 sleepq_timedwait(const void *wchan, int pri) 683 { 684 struct thread *td; 685 686 td = curthread; 687 MPASS(!(td->td_flags & TDF_SINTR)); 688 689 thread_lock(td); 690 sleepq_switch(wchan, pri); 691 692 return (sleepq_check_timeout()); 693 } 694 695 /* 696 * Block the current thread until it is awakened from its sleep queue, 697 * it is interrupted by a signal, or it times out waiting to be awakened. 698 */ 699 int 700 sleepq_timedwait_sig(const void *wchan, int pri) 701 { 702 int rcatch, rvalt, rvals; 703 704 rcatch = sleepq_catch_signals(wchan, pri); 705 /* We must always call check_timeout() to clear sleeptimo. */ 706 rvalt = sleepq_check_timeout(); 707 rvals = sleepq_check_signals(); 708 if (rcatch) 709 return (rcatch); 710 if (rvals) 711 return (rvals); 712 return (rvalt); 713 } 714 715 /* 716 * Returns the type of sleepqueue given a waitchannel. 717 */ 718 int 719 sleepq_type(const void *wchan) 720 { 721 struct sleepqueue *sq; 722 int type; 723 724 MPASS(wchan != NULL); 725 726 sq = sleepq_lookup(wchan); 727 if (sq == NULL) 728 return (-1); 729 type = sq->sq_type; 730 731 return (type); 732 } 733 734 /* 735 * Removes a thread from a sleep queue and makes it runnable. 736 * 737 * Requires the sc chain locked on entry. If SRQ_HOLD is specified it will 738 * be locked on return. Returns without the thread lock held. 739 */ 740 static void 741 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri, 742 int srqflags) 743 { 744 struct sleepqueue_chain *sc; 745 bool drop; 746 747 MPASS(td != NULL); 748 MPASS(sq->sq_wchan != NULL); 749 MPASS(td->td_wchan == sq->sq_wchan); 750 751 sc = SC_LOOKUP(sq->sq_wchan); 752 mtx_assert(&sc->sc_lock, MA_OWNED); 753 754 /* 755 * Avoid recursing on the chain lock. If the locks don't match we 756 * need to acquire the thread lock which setrunnable will drop for 757 * us. In this case we need to drop the chain lock afterwards. 758 * 759 * There is no race that will make td_lock equal to sc_lock because 760 * we hold sc_lock. 761 */ 762 drop = false; 763 if (!TD_IS_SLEEPING(td)) { 764 thread_lock(td); 765 drop = true; 766 } else 767 thread_lock_block_wait(td); 768 769 /* Remove thread from the sleepq. */ 770 sleepq_remove_thread(sq, td); 771 772 /* If we're done with the sleepqueue release it. */ 773 if ((srqflags & SRQ_HOLD) == 0 && drop) 774 mtx_unlock_spin(&sc->sc_lock); 775 776 /* Adjust priority if requested. */ 777 MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX)); 778 if (pri != 0 && td->td_priority > pri && 779 PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 780 sched_prio(td, pri); 781 782 /* 783 * Note that thread td might not be sleeping if it is running 784 * sleepq_catch_signals() on another CPU or is blocked on its 785 * proc lock to check signals. There's no need to mark the 786 * thread runnable in that case. 787 */ 788 if (TD_IS_SLEEPING(td)) { 789 MPASS(!drop); 790 TD_CLR_SLEEPING(td); 791 setrunnable(td, srqflags); 792 } else { 793 MPASS(drop); 794 thread_unlock(td); 795 } 796 } 797 798 static void 799 sleepq_remove_thread(struct sleepqueue *sq, struct thread *td) 800 { 801 struct sleepqueue_chain *sc __unused; 802 803 MPASS(td != NULL); 804 MPASS(sq->sq_wchan != NULL); 805 MPASS(td->td_wchan == sq->sq_wchan); 806 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0); 807 THREAD_LOCK_ASSERT(td, MA_OWNED); 808 sc = SC_LOOKUP(sq->sq_wchan); 809 mtx_assert(&sc->sc_lock, MA_OWNED); 810 811 SDT_PROBE2(sched, , , wakeup, td, td->td_proc); 812 813 /* Remove the thread from the queue. */ 814 sq->sq_blockedcnt[td->td_sqqueue]--; 815 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq); 816 817 /* 818 * Get a sleep queue for this thread. If this is the last waiter, 819 * use the queue itself and take it out of the chain, otherwise, 820 * remove a queue from the free list. 821 */ 822 if (LIST_EMPTY(&sq->sq_free)) { 823 td->td_sleepqueue = sq; 824 #ifdef INVARIANTS 825 sq->sq_wchan = NULL; 826 #endif 827 #ifdef SLEEPQUEUE_PROFILING 828 sc->sc_depth--; 829 #endif 830 } else 831 td->td_sleepqueue = LIST_FIRST(&sq->sq_free); 832 LIST_REMOVE(td->td_sleepqueue, sq_hash); 833 834 if ((td->td_flags & TDF_TIMEOUT) == 0 && td->td_sleeptimo != 0 && 835 td->td_lock == &sc->sc_lock) { 836 /* 837 * We ignore the situation where timeout subsystem was 838 * unable to stop our callout. The struct thread is 839 * type-stable, the callout will use the correct 840 * memory when running. The checks of the 841 * td_sleeptimo value in this function and in 842 * sleepq_timeout() ensure that the thread does not 843 * get spurious wakeups, even if the callout was reset 844 * or thread reused. 845 * 846 * We also cannot safely stop the callout if a scheduler 847 * lock is held since softclock_thread() forces a lock 848 * order of callout lock -> scheduler lock. The thread 849 * lock will be a scheduler lock only if the thread is 850 * preparing to go to sleep, so this is hopefully a rare 851 * scenario. 852 */ 853 callout_stop(&td->td_slpcallout); 854 } 855 856 td->td_wmesg = NULL; 857 td->td_wchan = NULL; 858 td->td_flags &= ~(TDF_SINTR | TDF_TIMEOUT); 859 860 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)", 861 (void *)td, (long)td->td_proc->p_pid, td->td_name); 862 } 863 864 void 865 sleepq_remove_nested(struct thread *td) 866 { 867 struct sleepqueue_chain *sc; 868 struct sleepqueue *sq; 869 const void *wchan; 870 871 MPASS(TD_ON_SLEEPQ(td)); 872 873 wchan = td->td_wchan; 874 sc = SC_LOOKUP(wchan); 875 mtx_lock_spin(&sc->sc_lock); 876 sq = sleepq_lookup(wchan); 877 MPASS(sq != NULL); 878 thread_lock(td); 879 sleepq_remove_thread(sq, td); 880 mtx_unlock_spin(&sc->sc_lock); 881 /* Returns with the thread lock owned. */ 882 } 883 884 #ifdef INVARIANTS 885 /* 886 * UMA zone item deallocator. 887 */ 888 static void 889 sleepq_dtor(void *mem, int size, void *arg) 890 { 891 struct sleepqueue *sq; 892 int i; 893 894 sq = mem; 895 for (i = 0; i < NR_SLEEPQS; i++) { 896 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i])); 897 MPASS(sq->sq_blockedcnt[i] == 0); 898 } 899 } 900 #endif 901 902 /* 903 * UMA zone item initializer. 904 */ 905 static int 906 sleepq_init(void *mem, int size, int flags) 907 { 908 struct sleepqueue *sq; 909 int i; 910 911 bzero(mem, size); 912 sq = mem; 913 for (i = 0; i < NR_SLEEPQS; i++) { 914 TAILQ_INIT(&sq->sq_blocked[i]); 915 sq->sq_blockedcnt[i] = 0; 916 } 917 LIST_INIT(&sq->sq_free); 918 return (0); 919 } 920 921 /* 922 * Find thread sleeping on a wait channel and resume it. 923 */ 924 void 925 sleepq_signal(const void *wchan, int flags, int pri, int queue) 926 { 927 struct sleepqueue_chain *sc; 928 struct sleepqueue *sq; 929 struct threadqueue *head; 930 struct thread *td, *besttd; 931 932 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags); 933 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 934 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 935 sq = sleepq_lookup(wchan); 936 if (sq == NULL) { 937 if (flags & SLEEPQ_DROP) 938 sleepq_release(wchan); 939 return; 940 } 941 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), 942 ("%s: mismatch between sleep/wakeup and cv_*", __func__)); 943 944 head = &sq->sq_blocked[queue]; 945 if (flags & SLEEPQ_UNFAIR) { 946 /* 947 * Find the most recently sleeping thread, but try to 948 * skip threads still in process of context switch to 949 * avoid spinning on the thread lock. 950 */ 951 sc = SC_LOOKUP(wchan); 952 besttd = TAILQ_LAST_FAST(head, thread, td_slpq); 953 while (besttd->td_lock != &sc->sc_lock) { 954 td = TAILQ_PREV_FAST(besttd, head, thread, td_slpq); 955 if (td == NULL) 956 break; 957 besttd = td; 958 } 959 } else { 960 /* 961 * Find the highest priority thread on the queue. If there 962 * is a tie, use the thread that first appears in the queue 963 * as it has been sleeping the longest since threads are 964 * always added to the tail of sleep queues. 965 */ 966 besttd = td = TAILQ_FIRST(head); 967 while ((td = TAILQ_NEXT(td, td_slpq)) != NULL) { 968 if (td->td_priority < besttd->td_priority) 969 besttd = td; 970 } 971 } 972 MPASS(besttd != NULL); 973 sleepq_resume_thread(sq, besttd, pri, 974 (flags & SLEEPQ_DROP) ? 0 : SRQ_HOLD); 975 } 976 977 static bool 978 match_any(struct thread *td __unused) 979 { 980 981 return (true); 982 } 983 984 /* 985 * Resume all threads sleeping on a specified wait channel. 986 */ 987 void 988 sleepq_broadcast(const void *wchan, int flags, int pri, int queue) 989 { 990 struct sleepqueue *sq; 991 992 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags); 993 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 994 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 995 sq = sleepq_lookup(wchan); 996 if (sq != NULL) { 997 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), 998 ("%s: mismatch between sleep/wakeup and cv_*", __func__)); 999 1000 sleepq_remove_matching(sq, queue, match_any, pri); 1001 } 1002 } 1003 1004 /* 1005 * Resume threads on the sleep queue that match the given predicate. 1006 */ 1007 void 1008 sleepq_remove_matching(struct sleepqueue *sq, int queue, 1009 bool (*matches)(struct thread *), int pri) 1010 { 1011 struct thread *td, *tdn; 1012 1013 /* 1014 * The last thread will be given ownership of sq and may 1015 * re-enqueue itself before sleepq_resume_thread() returns, 1016 * so we must cache the "next" queue item at the beginning 1017 * of the final iteration. 1018 */ 1019 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) { 1020 if (matches(td)) 1021 sleepq_resume_thread(sq, td, pri, SRQ_HOLD); 1022 } 1023 } 1024 1025 /* 1026 * Time sleeping threads out. When the timeout expires, the thread is 1027 * removed from the sleep queue and made runnable if it is still asleep. 1028 */ 1029 static void 1030 sleepq_timeout(void *arg) 1031 { 1032 struct sleepqueue_chain *sc __unused; 1033 struct sleepqueue *sq; 1034 struct thread *td; 1035 const void *wchan; 1036 1037 td = arg; 1038 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)", 1039 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 1040 1041 thread_lock(td); 1042 if (td->td_sleeptimo == 0 || 1043 td->td_sleeptimo > td->td_slpcallout.c_time) { 1044 /* 1045 * The thread does not want a timeout (yet). 1046 */ 1047 } else if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) { 1048 /* 1049 * See if the thread is asleep and get the wait 1050 * channel if it is. 1051 */ 1052 wchan = td->td_wchan; 1053 sc = SC_LOOKUP(wchan); 1054 THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock); 1055 sq = sleepq_lookup(wchan); 1056 MPASS(sq != NULL); 1057 td->td_flags |= TDF_TIMEOUT; 1058 sleepq_resume_thread(sq, td, 0, 0); 1059 return; 1060 } else if (TD_ON_SLEEPQ(td)) { 1061 /* 1062 * If the thread is on the SLEEPQ but isn't sleeping 1063 * yet, it can either be on another CPU in between 1064 * sleepq_add() and one of the sleepq_*wait*() 1065 * routines or it can be in sleepq_catch_signals(). 1066 */ 1067 td->td_flags |= TDF_TIMEOUT; 1068 } 1069 thread_unlock(td); 1070 } 1071 1072 /* 1073 * Resumes a specific thread from the sleep queue associated with a specific 1074 * wait channel if it is on that queue. 1075 */ 1076 void 1077 sleepq_remove(struct thread *td, const void *wchan) 1078 { 1079 struct sleepqueue_chain *sc; 1080 struct sleepqueue *sq; 1081 1082 /* 1083 * Look up the sleep queue for this wait channel, then re-check 1084 * that the thread is asleep on that channel, if it is not, then 1085 * bail. 1086 */ 1087 MPASS(wchan != NULL); 1088 sc = SC_LOOKUP(wchan); 1089 mtx_lock_spin(&sc->sc_lock); 1090 /* 1091 * We can not lock the thread here as it may be sleeping on a 1092 * different sleepq. However, holding the sleepq lock for this 1093 * wchan can guarantee that we do not miss a wakeup for this 1094 * channel. The asserts below will catch any false positives. 1095 */ 1096 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) { 1097 mtx_unlock_spin(&sc->sc_lock); 1098 return; 1099 } 1100 1101 /* Thread is asleep on sleep queue sq, so wake it up. */ 1102 sq = sleepq_lookup(wchan); 1103 MPASS(sq != NULL); 1104 MPASS(td->td_wchan == wchan); 1105 sleepq_resume_thread(sq, td, 0, 0); 1106 } 1107 1108 /* 1109 * Abort a thread as if an interrupt had occurred. Only abort 1110 * interruptible waits (unfortunately it isn't safe to abort others). 1111 * 1112 * Requires thread lock on entry, releases on return. 1113 */ 1114 void 1115 sleepq_abort(struct thread *td, int intrval) 1116 { 1117 struct sleepqueue *sq; 1118 const void *wchan; 1119 1120 THREAD_LOCK_ASSERT(td, MA_OWNED); 1121 MPASS(TD_ON_SLEEPQ(td)); 1122 MPASS(td->td_flags & TDF_SINTR); 1123 MPASS((intrval == 0 && (td->td_flags & TDF_SIGWAIT) != 0) || 1124 intrval == EINTR || intrval == ERESTART); 1125 1126 /* 1127 * If the TDF_TIMEOUT flag is set, just leave. A 1128 * timeout is scheduled anyhow. 1129 */ 1130 if (td->td_flags & TDF_TIMEOUT) { 1131 thread_unlock(td); 1132 return; 1133 } 1134 1135 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)", 1136 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name); 1137 td->td_intrval = intrval; 1138 1139 /* 1140 * If the thread has not slept yet it will find the signal in 1141 * sleepq_catch_signals() and call sleepq_resume_thread. Otherwise 1142 * we have to do it here. 1143 */ 1144 if (!TD_IS_SLEEPING(td)) { 1145 thread_unlock(td); 1146 return; 1147 } 1148 wchan = td->td_wchan; 1149 MPASS(wchan != NULL); 1150 sq = sleepq_lookup(wchan); 1151 MPASS(sq != NULL); 1152 1153 /* Thread is asleep on sleep queue sq, so wake it up. */ 1154 sleepq_resume_thread(sq, td, 0, 0); 1155 } 1156 1157 void 1158 sleepq_chains_remove_matching(bool (*matches)(struct thread *)) 1159 { 1160 struct sleepqueue_chain *sc; 1161 struct sleepqueue *sq, *sq1; 1162 int i; 1163 1164 for (sc = &sleepq_chains[0]; sc < sleepq_chains + SC_TABLESIZE; ++sc) { 1165 if (LIST_EMPTY(&sc->sc_queues)) { 1166 continue; 1167 } 1168 mtx_lock_spin(&sc->sc_lock); 1169 LIST_FOREACH_SAFE(sq, &sc->sc_queues, sq_hash, sq1) { 1170 for (i = 0; i < NR_SLEEPQS; ++i) 1171 sleepq_remove_matching(sq, i, matches, 0); 1172 } 1173 mtx_unlock_spin(&sc->sc_lock); 1174 } 1175 } 1176 1177 /* 1178 * Prints the stacks of all threads presently sleeping on wchan/queue to 1179 * the sbuf sb. Sets count_stacks_printed to the number of stacks actually 1180 * printed. Typically, this will equal the number of threads sleeping on the 1181 * queue, but may be less if sb overflowed before all stacks were printed. 1182 */ 1183 #ifdef STACK 1184 int 1185 sleepq_sbuf_print_stacks(struct sbuf *sb, const void *wchan, int queue, 1186 int *count_stacks_printed) 1187 { 1188 struct thread *td, *td_next; 1189 struct sleepqueue *sq; 1190 struct stack **st; 1191 struct sbuf **td_infos; 1192 int i, stack_idx, error, stacks_to_allocate; 1193 bool finished; 1194 1195 error = 0; 1196 finished = false; 1197 1198 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); 1199 MPASS((queue >= 0) && (queue < NR_SLEEPQS)); 1200 1201 stacks_to_allocate = 10; 1202 for (i = 0; i < 3 && !finished ; i++) { 1203 /* We cannot malloc while holding the queue's spinlock, so 1204 * we do our mallocs now, and hope it is enough. If it 1205 * isn't, we will free these, drop the lock, malloc more, 1206 * and try again, up to a point. After that point we will 1207 * give up and report ENOMEM. We also cannot write to sb 1208 * during this time since the client may have set the 1209 * SBUF_AUTOEXTEND flag on their sbuf, which could cause a 1210 * malloc as we print to it. So we defer actually printing 1211 * to sb until after we drop the spinlock. 1212 */ 1213 1214 /* Where we will store the stacks. */ 1215 st = malloc(sizeof(struct stack *) * stacks_to_allocate, 1216 M_TEMP, M_WAITOK); 1217 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1218 stack_idx++) 1219 st[stack_idx] = stack_create(M_WAITOK); 1220 1221 /* Where we will store the td name, tid, etc. */ 1222 td_infos = malloc(sizeof(struct sbuf *) * stacks_to_allocate, 1223 M_TEMP, M_WAITOK); 1224 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1225 stack_idx++) 1226 td_infos[stack_idx] = sbuf_new(NULL, NULL, 1227 MAXCOMLEN + sizeof(struct thread *) * 2 + 40, 1228 SBUF_FIXEDLEN); 1229 1230 sleepq_lock(wchan); 1231 sq = sleepq_lookup(wchan); 1232 if (sq == NULL) { 1233 /* This sleepq does not exist; exit and return ENOENT. */ 1234 error = ENOENT; 1235 finished = true; 1236 sleepq_release(wchan); 1237 goto loop_end; 1238 } 1239 1240 stack_idx = 0; 1241 /* Save thread info */ 1242 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, 1243 td_next) { 1244 if (stack_idx >= stacks_to_allocate) 1245 goto loop_end; 1246 1247 /* Note the td_lock is equal to the sleepq_lock here. */ 1248 (void)stack_save_td(st[stack_idx], td); 1249 1250 sbuf_printf(td_infos[stack_idx], "%d: %s %p", 1251 td->td_tid, td->td_name, td); 1252 1253 ++stack_idx; 1254 } 1255 1256 finished = true; 1257 sleepq_release(wchan); 1258 1259 /* Print the stacks */ 1260 for (i = 0; i < stack_idx; i++) { 1261 sbuf_finish(td_infos[i]); 1262 sbuf_printf(sb, "--- thread %s: ---\n", sbuf_data(td_infos[i])); 1263 stack_sbuf_print(sb, st[i]); 1264 sbuf_putc(sb, '\n'); 1265 1266 error = sbuf_error(sb); 1267 if (error == 0) 1268 *count_stacks_printed = stack_idx; 1269 } 1270 1271 loop_end: 1272 if (!finished) 1273 sleepq_release(wchan); 1274 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1275 stack_idx++) 1276 stack_destroy(st[stack_idx]); 1277 for (stack_idx = 0; stack_idx < stacks_to_allocate; 1278 stack_idx++) 1279 sbuf_delete(td_infos[stack_idx]); 1280 free(st, M_TEMP); 1281 free(td_infos, M_TEMP); 1282 stacks_to_allocate *= 10; 1283 } 1284 1285 if (!finished && error == 0) 1286 error = ENOMEM; 1287 1288 return (error); 1289 } 1290 #endif 1291 1292 #ifdef SLEEPQUEUE_PROFILING 1293 #define SLEEPQ_PROF_LOCATIONS 1024 1294 #define SLEEPQ_SBUFSIZE 512 1295 struct sleepq_prof { 1296 LIST_ENTRY(sleepq_prof) sp_link; 1297 const char *sp_wmesg; 1298 long sp_count; 1299 }; 1300 1301 LIST_HEAD(sqphead, sleepq_prof); 1302 1303 struct sqphead sleepq_prof_free; 1304 struct sqphead sleepq_hash[SC_TABLESIZE]; 1305 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS]; 1306 static struct mtx sleepq_prof_lock; 1307 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN); 1308 1309 static void 1310 sleepq_profile(const char *wmesg) 1311 { 1312 struct sleepq_prof *sp; 1313 1314 mtx_lock_spin(&sleepq_prof_lock); 1315 if (prof_enabled == 0) 1316 goto unlock; 1317 LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link) 1318 if (sp->sp_wmesg == wmesg) 1319 goto done; 1320 sp = LIST_FIRST(&sleepq_prof_free); 1321 if (sp == NULL) 1322 goto unlock; 1323 sp->sp_wmesg = wmesg; 1324 LIST_REMOVE(sp, sp_link); 1325 LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link); 1326 done: 1327 sp->sp_count++; 1328 unlock: 1329 mtx_unlock_spin(&sleepq_prof_lock); 1330 return; 1331 } 1332 1333 static void 1334 sleepq_prof_reset(void) 1335 { 1336 struct sleepq_prof *sp; 1337 int enabled; 1338 int i; 1339 1340 mtx_lock_spin(&sleepq_prof_lock); 1341 enabled = prof_enabled; 1342 prof_enabled = 0; 1343 for (i = 0; i < SC_TABLESIZE; i++) 1344 LIST_INIT(&sleepq_hash[i]); 1345 LIST_INIT(&sleepq_prof_free); 1346 for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) { 1347 sp = &sleepq_profent[i]; 1348 sp->sp_wmesg = NULL; 1349 sp->sp_count = 0; 1350 LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link); 1351 } 1352 prof_enabled = enabled; 1353 mtx_unlock_spin(&sleepq_prof_lock); 1354 } 1355 1356 static int 1357 enable_sleepq_prof(SYSCTL_HANDLER_ARGS) 1358 { 1359 int error, v; 1360 1361 v = prof_enabled; 1362 error = sysctl_handle_int(oidp, &v, v, req); 1363 if (error) 1364 return (error); 1365 if (req->newptr == NULL) 1366 return (error); 1367 if (v == prof_enabled) 1368 return (0); 1369 if (v == 1) 1370 sleepq_prof_reset(); 1371 mtx_lock_spin(&sleepq_prof_lock); 1372 prof_enabled = !!v; 1373 mtx_unlock_spin(&sleepq_prof_lock); 1374 1375 return (0); 1376 } 1377 1378 static int 1379 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS) 1380 { 1381 int error, v; 1382 1383 v = 0; 1384 error = sysctl_handle_int(oidp, &v, 0, req); 1385 if (error) 1386 return (error); 1387 if (req->newptr == NULL) 1388 return (error); 1389 if (v == 0) 1390 return (0); 1391 sleepq_prof_reset(); 1392 1393 return (0); 1394 } 1395 1396 static int 1397 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS) 1398 { 1399 struct sleepq_prof *sp; 1400 struct sbuf *sb; 1401 int enabled; 1402 int error; 1403 int i; 1404 1405 error = sysctl_wire_old_buffer(req, 0); 1406 if (error != 0) 1407 return (error); 1408 sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req); 1409 sbuf_cat(sb, "\nwmesg\tcount\n"); 1410 enabled = prof_enabled; 1411 mtx_lock_spin(&sleepq_prof_lock); 1412 prof_enabled = 0; 1413 mtx_unlock_spin(&sleepq_prof_lock); 1414 for (i = 0; i < SC_TABLESIZE; i++) { 1415 LIST_FOREACH(sp, &sleepq_hash[i], sp_link) { 1416 sbuf_printf(sb, "%s\t%ld\n", 1417 sp->sp_wmesg, sp->sp_count); 1418 } 1419 } 1420 mtx_lock_spin(&sleepq_prof_lock); 1421 prof_enabled = enabled; 1422 mtx_unlock_spin(&sleepq_prof_lock); 1423 1424 error = sbuf_finish(sb); 1425 sbuf_delete(sb); 1426 return (error); 1427 } 1428 1429 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, 1430 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0, 1431 dump_sleepq_prof_stats, "A", 1432 "Sleepqueue profiling statistics"); 1433 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, 1434 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, 1435 reset_sleepq_prof_stats, "I", 1436 "Reset sleepqueue profiling statistics"); 1437 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, 1438 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, 1439 enable_sleepq_prof, "I", 1440 "Enable sleepqueue profiling"); 1441 #endif 1442 1443 #ifdef DDB 1444 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue) 1445 { 1446 struct sleepqueue_chain *sc; 1447 struct sleepqueue *sq; 1448 #ifdef INVARIANTS 1449 struct lock_object *lock; 1450 #endif 1451 struct thread *td; 1452 void *wchan; 1453 int i; 1454 1455 if (!have_addr) 1456 return; 1457 1458 /* 1459 * First, see if there is an active sleep queue for the wait channel 1460 * indicated by the address. 1461 */ 1462 wchan = (void *)addr; 1463 sc = SC_LOOKUP(wchan); 1464 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) 1465 if (sq->sq_wchan == wchan) 1466 goto found; 1467 1468 /* 1469 * Second, see if there is an active sleep queue at the address 1470 * indicated. 1471 */ 1472 for (i = 0; i < SC_TABLESIZE; i++) 1473 LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) { 1474 if (sq == (struct sleepqueue *)addr) 1475 goto found; 1476 } 1477 1478 db_printf("Unable to locate a sleep queue via %p\n", (void *)addr); 1479 return; 1480 found: 1481 db_printf("Wait channel: %p\n", sq->sq_wchan); 1482 db_printf("Queue type: %d\n", sq->sq_type); 1483 #ifdef INVARIANTS 1484 if (sq->sq_lock) { 1485 lock = sq->sq_lock; 1486 db_printf("Associated Interlock: %p - (%s) %s\n", lock, 1487 LOCK_CLASS(lock)->lc_name, lock->lo_name); 1488 } 1489 #endif 1490 db_printf("Blocked threads:\n"); 1491 for (i = 0; i < NR_SLEEPQS; i++) { 1492 db_printf("\nQueue[%d]:\n", i); 1493 if (TAILQ_EMPTY(&sq->sq_blocked[i])) 1494 db_printf("\tempty\n"); 1495 else 1496 TAILQ_FOREACH(td, &sq->sq_blocked[i], 1497 td_slpq) { 1498 db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td, 1499 td->td_tid, td->td_proc->p_pid, 1500 td->td_name); 1501 } 1502 db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]); 1503 } 1504 } 1505 1506 /* Alias 'show sleepqueue' to 'show sleepq'. */ 1507 DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue); 1508 #endif 1509