1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Berkeley Software Design Inc's name may not be used to endorse or 15 * promote products derived from this software without specific prior 16 * written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 31 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 32 */ 33 34 /* 35 * Machine independent bits of mutex implementation. 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include "opt_adaptive_mutexes.h" 42 #include "opt_ddb.h" 43 #include "opt_hwpmc_hooks.h" 44 #include "opt_sched.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/bus.h> 49 #include <sys/conf.h> 50 #include <sys/kdb.h> 51 #include <sys/kernel.h> 52 #include <sys/ktr.h> 53 #include <sys/lock.h> 54 #include <sys/malloc.h> 55 #include <sys/mutex.h> 56 #include <sys/proc.h> 57 #include <sys/resourcevar.h> 58 #include <sys/sched.h> 59 #include <sys/sbuf.h> 60 #include <sys/smp.h> 61 #include <sys/sysctl.h> 62 #include <sys/turnstile.h> 63 #include <sys/vmmeter.h> 64 #include <sys/lock_profile.h> 65 66 #include <machine/atomic.h> 67 #include <machine/bus.h> 68 #include <machine/cpu.h> 69 70 #include <ddb/ddb.h> 71 72 #include <fs/devfs/devfs_int.h> 73 74 #include <vm/vm.h> 75 #include <vm/vm_extern.h> 76 77 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 78 #define ADAPTIVE_MUTEXES 79 #endif 80 81 #ifdef HWPMC_HOOKS 82 #include <sys/pmckern.h> 83 PMC_SOFT_DEFINE( , , lock, failed); 84 #endif 85 86 /* 87 * Return the mutex address when the lock cookie address is provided. 88 * This functionality assumes that struct mtx* have a member named mtx_lock. 89 */ 90 #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock)) 91 92 /* 93 * Internal utility macros. 94 */ 95 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 96 97 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED) 98 99 static void assert_mtx(const struct lock_object *lock, int what); 100 #ifdef DDB 101 static void db_show_mtx(const struct lock_object *lock); 102 #endif 103 static void lock_mtx(struct lock_object *lock, uintptr_t how); 104 static void lock_spin(struct lock_object *lock, uintptr_t how); 105 #ifdef KDTRACE_HOOKS 106 static int owner_mtx(const struct lock_object *lock, 107 struct thread **owner); 108 #endif 109 static uintptr_t unlock_mtx(struct lock_object *lock); 110 static uintptr_t unlock_spin(struct lock_object *lock); 111 112 /* 113 * Lock classes for sleep and spin mutexes. 114 */ 115 struct lock_class lock_class_mtx_sleep = { 116 .lc_name = "sleep mutex", 117 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, 118 .lc_assert = assert_mtx, 119 #ifdef DDB 120 .lc_ddb_show = db_show_mtx, 121 #endif 122 .lc_lock = lock_mtx, 123 .lc_unlock = unlock_mtx, 124 #ifdef KDTRACE_HOOKS 125 .lc_owner = owner_mtx, 126 #endif 127 }; 128 struct lock_class lock_class_mtx_spin = { 129 .lc_name = "spin mutex", 130 .lc_flags = LC_SPINLOCK | LC_RECURSABLE, 131 .lc_assert = assert_mtx, 132 #ifdef DDB 133 .lc_ddb_show = db_show_mtx, 134 #endif 135 .lc_lock = lock_spin, 136 .lc_unlock = unlock_spin, 137 #ifdef KDTRACE_HOOKS 138 .lc_owner = owner_mtx, 139 #endif 140 }; 141 142 #ifdef ADAPTIVE_MUTEXES 143 static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging"); 144 145 static struct lock_delay_config __read_frequently mtx_delay; 146 147 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base, 148 0, ""); 149 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max, 150 0, ""); 151 152 LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay); 153 #endif 154 155 static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin, CTLFLAG_RD, NULL, 156 "mtx spin debugging"); 157 158 static struct lock_delay_config __read_frequently mtx_spin_delay; 159 160 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW, 161 &mtx_spin_delay.base, 0, ""); 162 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW, 163 &mtx_spin_delay.max, 0, ""); 164 165 LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay); 166 167 /* 168 * System-wide mutexes 169 */ 170 struct mtx blocked_lock; 171 struct mtx __exclusive_cache_line Giant; 172 173 void 174 assert_mtx(const struct lock_object *lock, int what) 175 { 176 177 mtx_assert((const struct mtx *)lock, what); 178 } 179 180 void 181 lock_mtx(struct lock_object *lock, uintptr_t how) 182 { 183 184 mtx_lock((struct mtx *)lock); 185 } 186 187 void 188 lock_spin(struct lock_object *lock, uintptr_t how) 189 { 190 191 panic("spin locks can only use msleep_spin"); 192 } 193 194 uintptr_t 195 unlock_mtx(struct lock_object *lock) 196 { 197 struct mtx *m; 198 199 m = (struct mtx *)lock; 200 mtx_assert(m, MA_OWNED | MA_NOTRECURSED); 201 mtx_unlock(m); 202 return (0); 203 } 204 205 uintptr_t 206 unlock_spin(struct lock_object *lock) 207 { 208 209 panic("spin locks can only use msleep_spin"); 210 } 211 212 #ifdef KDTRACE_HOOKS 213 int 214 owner_mtx(const struct lock_object *lock, struct thread **owner) 215 { 216 const struct mtx *m; 217 uintptr_t x; 218 219 m = (const struct mtx *)lock; 220 x = m->mtx_lock; 221 *owner = (struct thread *)(x & ~MTX_FLAGMASK); 222 return (*owner != NULL); 223 } 224 #endif 225 226 /* 227 * Function versions of the inlined __mtx_* macros. These are used by 228 * modules and can also be called from assembly language if needed. 229 */ 230 void 231 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line) 232 { 233 struct mtx *m; 234 uintptr_t tid, v; 235 236 m = mtxlock2mtx(c); 237 238 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 239 !TD_IS_IDLETHREAD(curthread), 240 ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d", 241 curthread, m->lock_object.lo_name, file, line)); 242 KASSERT(m->mtx_lock != MTX_DESTROYED, 243 ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); 244 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 245 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 246 file, line)); 247 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) | 248 LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 249 250 tid = (uintptr_t)curthread; 251 v = MTX_UNOWNED; 252 if (!_mtx_obtain_lock_fetch(m, &v, tid)) 253 _mtx_lock_sleep(m, v, opts, file, line); 254 else 255 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, 256 m, 0, 0, file, line); 257 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 258 line); 259 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE, 260 file, line); 261 TD_LOCKS_INC(curthread); 262 } 263 264 void 265 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line) 266 { 267 struct mtx *m; 268 269 m = mtxlock2mtx(c); 270 271 KASSERT(m->mtx_lock != MTX_DESTROYED, 272 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); 273 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 274 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 275 file, line)); 276 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 277 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 278 line); 279 mtx_assert(m, MA_OWNED); 280 281 #ifdef LOCK_PROFILING 282 __mtx_unlock_sleep(c, (uintptr_t)curthread, opts, file, line); 283 #else 284 __mtx_unlock(m, curthread, opts, file, line); 285 #endif 286 TD_LOCKS_DEC(curthread); 287 } 288 289 void 290 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 291 int line) 292 { 293 struct mtx *m; 294 #ifdef SMP 295 uintptr_t tid, v; 296 #endif 297 298 m = mtxlock2mtx(c); 299 300 KASSERT(m->mtx_lock != MTX_DESTROYED, 301 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); 302 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 303 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 304 m->lock_object.lo_name, file, line)); 305 if (mtx_owned(m)) 306 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 307 (opts & MTX_RECURSE) != 0, 308 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n", 309 m->lock_object.lo_name, file, line)); 310 opts &= ~MTX_RECURSE; 311 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 312 file, line, NULL); 313 #ifdef SMP 314 spinlock_enter(); 315 tid = (uintptr_t)curthread; 316 v = MTX_UNOWNED; 317 if (!_mtx_obtain_lock_fetch(m, &v, tid)) 318 _mtx_lock_spin(m, v, opts, file, line); 319 else 320 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, 321 m, 0, 0, file, line); 322 #else 323 __mtx_lock_spin(m, curthread, opts, file, line); 324 #endif 325 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 326 line); 327 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 328 } 329 330 int 331 __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 332 int line) 333 { 334 struct mtx *m; 335 336 if (SCHEDULER_STOPPED()) 337 return (1); 338 339 m = mtxlock2mtx(c); 340 341 KASSERT(m->mtx_lock != MTX_DESTROYED, 342 ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line)); 343 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 344 ("mtx_trylock_spin() of sleep mutex %s @ %s:%d", 345 m->lock_object.lo_name, file, line)); 346 KASSERT((opts & MTX_RECURSE) == 0, 347 ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n", 348 m->lock_object.lo_name, file, line)); 349 if (__mtx_trylock_spin(m, curthread, opts, file, line)) { 350 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line); 351 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 352 return (1); 353 } 354 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line); 355 return (0); 356 } 357 358 void 359 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 360 int line) 361 { 362 struct mtx *m; 363 364 m = mtxlock2mtx(c); 365 366 KASSERT(m->mtx_lock != MTX_DESTROYED, 367 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); 368 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 369 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 370 m->lock_object.lo_name, file, line)); 371 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 372 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 373 line); 374 mtx_assert(m, MA_OWNED); 375 376 __mtx_unlock_spin(m); 377 } 378 379 /* 380 * The important part of mtx_trylock{,_flags}() 381 * Tries to acquire lock `m.' If this function is called on a mutex that 382 * is already owned, it will recursively acquire the lock. 383 */ 384 int 385 _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF) 386 { 387 struct thread *td; 388 uintptr_t tid, v; 389 #ifdef LOCK_PROFILING 390 uint64_t waittime = 0; 391 int contested = 0; 392 #endif 393 int rval; 394 bool recursed; 395 396 td = curthread; 397 tid = (uintptr_t)td; 398 if (SCHEDULER_STOPPED_TD(td)) 399 return (1); 400 401 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td), 402 ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d", 403 curthread, m->lock_object.lo_name, file, line)); 404 KASSERT(m->mtx_lock != MTX_DESTROYED, 405 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); 406 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 407 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 408 file, line)); 409 410 rval = 1; 411 recursed = false; 412 v = MTX_UNOWNED; 413 for (;;) { 414 if (_mtx_obtain_lock_fetch(m, &v, tid)) 415 break; 416 if (v == MTX_UNOWNED) 417 continue; 418 if (v == tid && 419 ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 420 (opts & MTX_RECURSE) != 0)) { 421 m->mtx_recurse++; 422 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 423 recursed = true; 424 break; 425 } 426 rval = 0; 427 break; 428 } 429 430 opts &= ~MTX_RECURSE; 431 432 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); 433 if (rval) { 434 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 435 file, line); 436 TD_LOCKS_INC(curthread); 437 if (!recursed) 438 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, 439 m, contested, waittime, file, line); 440 } 441 442 return (rval); 443 } 444 445 int 446 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) 447 { 448 struct mtx *m; 449 450 m = mtxlock2mtx(c); 451 return (_mtx_trylock_flags_int(m, opts LOCK_FILE_LINE_ARG)); 452 } 453 454 /* 455 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 456 * 457 * We call this if the lock is either contested (i.e. we need to go to 458 * sleep waiting for it), or if we need to recurse on it. 459 */ 460 #if LOCK_DEBUG > 0 461 void 462 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file, 463 int line) 464 #else 465 void 466 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v) 467 #endif 468 { 469 struct thread *td; 470 struct mtx *m; 471 struct turnstile *ts; 472 uintptr_t tid; 473 struct thread *owner; 474 #ifdef KTR 475 int cont_logged = 0; 476 #endif 477 #ifdef LOCK_PROFILING 478 int contested = 0; 479 uint64_t waittime = 0; 480 #endif 481 #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS) 482 struct lock_delay_arg lda; 483 #endif 484 #ifdef KDTRACE_HOOKS 485 u_int sleep_cnt = 0; 486 int64_t sleep_time = 0; 487 int64_t all_time = 0; 488 #endif 489 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 490 int doing_lockprof; 491 #endif 492 td = curthread; 493 tid = (uintptr_t)td; 494 if (SCHEDULER_STOPPED_TD(td)) 495 return; 496 497 #if defined(ADAPTIVE_MUTEXES) 498 lock_delay_arg_init(&lda, &mtx_delay); 499 #elif defined(KDTRACE_HOOKS) 500 lock_delay_arg_init(&lda, NULL); 501 #endif 502 m = mtxlock2mtx(c); 503 if (__predict_false(v == MTX_UNOWNED)) 504 v = MTX_READ_VALUE(m); 505 506 if (__predict_false(lv_mtx_owner(v) == td)) { 507 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 508 (opts & MTX_RECURSE) != 0, 509 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 510 m->lock_object.lo_name, file, line)); 511 #if LOCK_DEBUG > 0 512 opts &= ~MTX_RECURSE; 513 #endif 514 m->mtx_recurse++; 515 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 516 if (LOCK_LOG_TEST(&m->lock_object, opts)) 517 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 518 return; 519 } 520 #if LOCK_DEBUG > 0 521 opts &= ~MTX_RECURSE; 522 #endif 523 524 #ifdef HWPMC_HOOKS 525 PMC_SOFT_CALL( , , lock, failed); 526 #endif 527 lock_profile_obtain_lock_failed(&m->lock_object, 528 &contested, &waittime); 529 if (LOCK_LOG_TEST(&m->lock_object, opts)) 530 CTR4(KTR_LOCK, 531 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 532 m->lock_object.lo_name, (void *)m->mtx_lock, file, line); 533 #ifdef LOCK_PROFILING 534 doing_lockprof = 1; 535 #elif defined(KDTRACE_HOOKS) 536 doing_lockprof = lockstat_enabled; 537 if (__predict_false(doing_lockprof)) 538 all_time -= lockstat_nsecs(&m->lock_object); 539 #endif 540 541 for (;;) { 542 if (v == MTX_UNOWNED) { 543 if (_mtx_obtain_lock_fetch(m, &v, tid)) 544 break; 545 continue; 546 } 547 #ifdef KDTRACE_HOOKS 548 lda.spin_cnt++; 549 #endif 550 #ifdef ADAPTIVE_MUTEXES 551 /* 552 * If the owner is running on another CPU, spin until the 553 * owner stops running or the state of the lock changes. 554 */ 555 owner = lv_mtx_owner(v); 556 if (TD_IS_RUNNING(owner)) { 557 if (LOCK_LOG_TEST(&m->lock_object, 0)) 558 CTR3(KTR_LOCK, 559 "%s: spinning on %p held by %p", 560 __func__, m, owner); 561 KTR_STATE1(KTR_SCHED, "thread", 562 sched_tdname((struct thread *)tid), 563 "spinning", "lockname:\"%s\"", 564 m->lock_object.lo_name); 565 do { 566 lock_delay(&lda); 567 v = MTX_READ_VALUE(m); 568 owner = lv_mtx_owner(v); 569 } while (v != MTX_UNOWNED && TD_IS_RUNNING(owner)); 570 KTR_STATE0(KTR_SCHED, "thread", 571 sched_tdname((struct thread *)tid), 572 "running"); 573 continue; 574 } 575 #endif 576 577 ts = turnstile_trywait(&m->lock_object); 578 v = MTX_READ_VALUE(m); 579 580 /* 581 * Check if the lock has been released while spinning for 582 * the turnstile chain lock. 583 */ 584 if (v == MTX_UNOWNED) { 585 turnstile_cancel(ts); 586 continue; 587 } 588 589 #ifdef ADAPTIVE_MUTEXES 590 /* 591 * The current lock owner might have started executing 592 * on another CPU (or the lock could have changed 593 * owners) while we were waiting on the turnstile 594 * chain lock. If so, drop the turnstile lock and try 595 * again. 596 */ 597 owner = lv_mtx_owner(v); 598 if (TD_IS_RUNNING(owner)) { 599 turnstile_cancel(ts); 600 continue; 601 } 602 #endif 603 604 /* 605 * If the mutex isn't already contested and a failure occurs 606 * setting the contested bit, the mutex was either released 607 * or the state of the MTX_RECURSED bit changed. 608 */ 609 if ((v & MTX_CONTESTED) == 0 && 610 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { 611 turnstile_cancel(ts); 612 v = MTX_READ_VALUE(m); 613 continue; 614 } 615 616 /* 617 * We definitely must sleep for this lock. 618 */ 619 mtx_assert(m, MA_NOTOWNED); 620 621 #ifdef KTR 622 if (!cont_logged) { 623 CTR6(KTR_CONTENTION, 624 "contention: %p at %s:%d wants %s, taken by %s:%d", 625 (void *)tid, file, line, m->lock_object.lo_name, 626 WITNESS_FILE(&m->lock_object), 627 WITNESS_LINE(&m->lock_object)); 628 cont_logged = 1; 629 } 630 #endif 631 632 /* 633 * Block on the turnstile. 634 */ 635 #ifdef KDTRACE_HOOKS 636 sleep_time -= lockstat_nsecs(&m->lock_object); 637 #endif 638 #ifndef ADAPTIVE_MUTEXES 639 owner = mtx_owner(m); 640 #endif 641 MPASS(owner == mtx_owner(m)); 642 turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE); 643 #ifdef KDTRACE_HOOKS 644 sleep_time += lockstat_nsecs(&m->lock_object); 645 sleep_cnt++; 646 #endif 647 v = MTX_READ_VALUE(m); 648 } 649 #ifdef KTR 650 if (cont_logged) { 651 CTR4(KTR_CONTENTION, 652 "contention end: %s acquired by %p at %s:%d", 653 m->lock_object.lo_name, (void *)tid, file, line); 654 } 655 #endif 656 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 657 if (__predict_true(!doing_lockprof)) 658 return; 659 #endif 660 #ifdef KDTRACE_HOOKS 661 all_time += lockstat_nsecs(&m->lock_object); 662 #endif 663 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested, 664 waittime, file, line); 665 #ifdef KDTRACE_HOOKS 666 if (sleep_time) 667 LOCKSTAT_RECORD1(adaptive__block, m, sleep_time); 668 669 /* 670 * Only record the loops spinning and not sleeping. 671 */ 672 if (lda.spin_cnt > sleep_cnt) 673 LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time); 674 #endif 675 } 676 677 static void 678 _mtx_lock_spin_failed(struct mtx *m) 679 { 680 struct thread *td; 681 682 td = mtx_owner(m); 683 684 /* If the mutex is unlocked, try again. */ 685 if (td == NULL) 686 return; 687 688 printf( "spin lock %p (%s) held by %p (tid %d) too long\n", 689 m, m->lock_object.lo_name, td, td->td_tid); 690 #ifdef WITNESS 691 witness_display_spinlock(&m->lock_object, td, printf); 692 #endif 693 panic("spin lock held too long"); 694 } 695 696 #ifdef SMP 697 /* 698 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock. 699 * 700 * This is only called if we need to actually spin for the lock. Recursion 701 * is handled inline. 702 */ 703 #if LOCK_DEBUG > 0 704 void 705 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts, 706 const char *file, int line) 707 #else 708 void 709 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v) 710 #endif 711 { 712 struct mtx *m; 713 struct lock_delay_arg lda; 714 uintptr_t tid; 715 #ifdef LOCK_PROFILING 716 int contested = 0; 717 uint64_t waittime = 0; 718 #endif 719 #ifdef KDTRACE_HOOKS 720 int64_t spin_time = 0; 721 #endif 722 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 723 int doing_lockprof; 724 #endif 725 726 tid = (uintptr_t)curthread; 727 m = mtxlock2mtx(c); 728 729 if (__predict_false(v == MTX_UNOWNED)) 730 v = MTX_READ_VALUE(m); 731 732 if (__predict_false(v == tid)) { 733 m->mtx_recurse++; 734 return; 735 } 736 737 if (SCHEDULER_STOPPED()) 738 return; 739 740 lock_delay_arg_init(&lda, &mtx_spin_delay); 741 742 if (LOCK_LOG_TEST(&m->lock_object, opts)) 743 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 744 KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), 745 "spinning", "lockname:\"%s\"", m->lock_object.lo_name); 746 747 #ifdef HWPMC_HOOKS 748 PMC_SOFT_CALL( , , lock, failed); 749 #endif 750 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); 751 #ifdef LOCK_PROFILING 752 doing_lockprof = 1; 753 #elif defined(KDTRACE_HOOKS) 754 doing_lockprof = lockstat_enabled; 755 if (__predict_false(doing_lockprof)) 756 spin_time -= lockstat_nsecs(&m->lock_object); 757 #endif 758 for (;;) { 759 if (v == MTX_UNOWNED) { 760 if (_mtx_obtain_lock_fetch(m, &v, tid)) 761 break; 762 continue; 763 } 764 /* Give interrupts a chance while we spin. */ 765 spinlock_exit(); 766 do { 767 if (lda.spin_cnt < 10000000) { 768 lock_delay(&lda); 769 } else { 770 lda.spin_cnt++; 771 if (lda.spin_cnt < 60000000 || kdb_active || 772 panicstr != NULL) 773 DELAY(1); 774 else 775 _mtx_lock_spin_failed(m); 776 cpu_spinwait(); 777 } 778 v = MTX_READ_VALUE(m); 779 } while (v != MTX_UNOWNED); 780 spinlock_enter(); 781 } 782 783 if (LOCK_LOG_TEST(&m->lock_object, opts)) 784 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 785 KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), 786 "running"); 787 788 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 789 if (__predict_true(!doing_lockprof)) 790 return; 791 #endif 792 #ifdef KDTRACE_HOOKS 793 spin_time += lockstat_nsecs(&m->lock_object); 794 #endif 795 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, 796 contested, waittime, file, line); 797 #ifdef KDTRACE_HOOKS 798 if (lda.spin_cnt != 0) 799 LOCKSTAT_RECORD1(spin__spin, m, spin_time); 800 #endif 801 } 802 #endif /* SMP */ 803 804 #ifdef INVARIANTS 805 static void 806 thread_lock_validate(struct mtx *m, int opts, const char *file, int line) 807 { 808 809 KASSERT(m->mtx_lock != MTX_DESTROYED, 810 ("thread_lock() of destroyed mutex @ %s:%d", file, line)); 811 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 812 ("thread_lock() of sleep mutex %s @ %s:%d", 813 m->lock_object.lo_name, file, line)); 814 if (mtx_owned(m)) 815 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, 816 ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n", 817 m->lock_object.lo_name, file, line)); 818 WITNESS_CHECKORDER(&m->lock_object, 819 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 820 } 821 #else 822 #define thread_lock_validate(m, opts, file, line) do { } while (0) 823 #endif 824 825 #ifndef LOCK_PROFILING 826 #if LOCK_DEBUG > 0 827 void 828 _thread_lock(struct thread *td, int opts, const char *file, int line) 829 #else 830 void 831 _thread_lock(struct thread *td) 832 #endif 833 { 834 struct mtx *m; 835 uintptr_t tid, v; 836 837 tid = (uintptr_t)curthread; 838 839 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire))) 840 goto slowpath_noirq; 841 spinlock_enter(); 842 m = td->td_lock; 843 thread_lock_validate(m, 0, file, line); 844 v = MTX_READ_VALUE(m); 845 if (__predict_true(v == MTX_UNOWNED)) { 846 if (__predict_false(!_mtx_obtain_lock(m, tid))) 847 goto slowpath_unlocked; 848 } else if (v == tid) { 849 m->mtx_recurse++; 850 } else 851 goto slowpath_unlocked; 852 if (__predict_true(m == td->td_lock)) { 853 WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line); 854 return; 855 } 856 if (m->mtx_recurse != 0) 857 m->mtx_recurse--; 858 else 859 _mtx_release_lock_quick(m); 860 slowpath_unlocked: 861 spinlock_exit(); 862 slowpath_noirq: 863 #if LOCK_DEBUG > 0 864 thread_lock_flags_(td, opts, file, line); 865 #else 866 thread_lock_flags_(td, 0, 0, 0); 867 #endif 868 } 869 #endif 870 871 void 872 thread_lock_flags_(struct thread *td, int opts, const char *file, int line) 873 { 874 struct mtx *m; 875 uintptr_t tid, v; 876 struct lock_delay_arg lda; 877 #ifdef LOCK_PROFILING 878 int contested = 0; 879 uint64_t waittime = 0; 880 #endif 881 #ifdef KDTRACE_HOOKS 882 int64_t spin_time = 0; 883 #endif 884 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 885 int doing_lockprof = 1; 886 #endif 887 888 tid = (uintptr_t)curthread; 889 890 if (SCHEDULER_STOPPED()) { 891 /* 892 * Ensure that spinlock sections are balanced even when the 893 * scheduler is stopped, since we may otherwise inadvertently 894 * re-enable interrupts while dumping core. 895 */ 896 spinlock_enter(); 897 return; 898 } 899 900 lock_delay_arg_init(&lda, &mtx_spin_delay); 901 902 #ifdef LOCK_PROFILING 903 doing_lockprof = 1; 904 #elif defined(KDTRACE_HOOKS) 905 doing_lockprof = lockstat_enabled; 906 if (__predict_false(doing_lockprof)) 907 spin_time -= lockstat_nsecs(&td->td_lock->lock_object); 908 #endif 909 for (;;) { 910 retry: 911 v = MTX_UNOWNED; 912 spinlock_enter(); 913 m = td->td_lock; 914 thread_lock_validate(m, opts, file, line); 915 for (;;) { 916 if (_mtx_obtain_lock_fetch(m, &v, tid)) 917 break; 918 if (v == MTX_UNOWNED) 919 continue; 920 if (v == tid) { 921 m->mtx_recurse++; 922 break; 923 } 924 #ifdef HWPMC_HOOKS 925 PMC_SOFT_CALL( , , lock, failed); 926 #endif 927 lock_profile_obtain_lock_failed(&m->lock_object, 928 &contested, &waittime); 929 /* Give interrupts a chance while we spin. */ 930 spinlock_exit(); 931 do { 932 if (lda.spin_cnt < 10000000) { 933 lock_delay(&lda); 934 } else { 935 lda.spin_cnt++; 936 if (lda.spin_cnt < 60000000 || 937 kdb_active || panicstr != NULL) 938 DELAY(1); 939 else 940 _mtx_lock_spin_failed(m); 941 cpu_spinwait(); 942 } 943 if (m != td->td_lock) 944 goto retry; 945 v = MTX_READ_VALUE(m); 946 } while (v != MTX_UNOWNED); 947 spinlock_enter(); 948 } 949 if (m == td->td_lock) 950 break; 951 __mtx_unlock_spin(m); /* does spinlock_exit() */ 952 } 953 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 954 line); 955 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 956 957 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 958 if (__predict_true(!doing_lockprof)) 959 return; 960 #endif 961 #ifdef KDTRACE_HOOKS 962 spin_time += lockstat_nsecs(&m->lock_object); 963 #endif 964 if (m->mtx_recurse == 0) 965 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, 966 contested, waittime, file, line); 967 #ifdef KDTRACE_HOOKS 968 if (lda.spin_cnt != 0) 969 LOCKSTAT_RECORD1(thread__spin, m, spin_time); 970 #endif 971 } 972 973 struct mtx * 974 thread_lock_block(struct thread *td) 975 { 976 struct mtx *lock; 977 978 THREAD_LOCK_ASSERT(td, MA_OWNED); 979 lock = td->td_lock; 980 td->td_lock = &blocked_lock; 981 mtx_unlock_spin(lock); 982 983 return (lock); 984 } 985 986 void 987 thread_lock_unblock(struct thread *td, struct mtx *new) 988 { 989 mtx_assert(new, MA_OWNED); 990 MPASS(td->td_lock == &blocked_lock); 991 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new); 992 } 993 994 void 995 thread_lock_set(struct thread *td, struct mtx *new) 996 { 997 struct mtx *lock; 998 999 mtx_assert(new, MA_OWNED); 1000 THREAD_LOCK_ASSERT(td, MA_OWNED); 1001 lock = td->td_lock; 1002 td->td_lock = new; 1003 mtx_unlock_spin(lock); 1004 } 1005 1006 /* 1007 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 1008 * 1009 * We are only called here if the lock is recursed, contested (i.e. we 1010 * need to wake up a blocked thread) or lockstat probe is active. 1011 */ 1012 #if LOCK_DEBUG > 0 1013 void 1014 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, 1015 const char *file, int line) 1016 #else 1017 void 1018 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v) 1019 #endif 1020 { 1021 struct mtx *m; 1022 struct turnstile *ts; 1023 uintptr_t tid; 1024 1025 if (SCHEDULER_STOPPED()) 1026 return; 1027 1028 tid = (uintptr_t)curthread; 1029 m = mtxlock2mtx(c); 1030 1031 if (__predict_false(v == tid)) 1032 v = MTX_READ_VALUE(m); 1033 1034 if (__predict_false(v & MTX_RECURSED)) { 1035 if (--(m->mtx_recurse) == 0) 1036 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 1037 if (LOCK_LOG_TEST(&m->lock_object, opts)) 1038 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 1039 return; 1040 } 1041 1042 LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m); 1043 if (v == tid && _mtx_release_lock(m, tid)) 1044 return; 1045 1046 /* 1047 * We have to lock the chain before the turnstile so this turnstile 1048 * can be removed from the hash list if it is empty. 1049 */ 1050 turnstile_chain_lock(&m->lock_object); 1051 _mtx_release_lock_quick(m); 1052 ts = turnstile_lookup(&m->lock_object); 1053 MPASS(ts != NULL); 1054 if (LOCK_LOG_TEST(&m->lock_object, opts)) 1055 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 1056 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 1057 1058 /* 1059 * This turnstile is now no longer associated with the mutex. We can 1060 * unlock the chain lock so a new turnstile may take it's place. 1061 */ 1062 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 1063 turnstile_chain_unlock(&m->lock_object); 1064 } 1065 1066 /* 1067 * All the unlocking of MTX_SPIN locks is done inline. 1068 * See the __mtx_unlock_spin() macro for the details. 1069 */ 1070 1071 /* 1072 * The backing function for the INVARIANTS-enabled mtx_assert() 1073 */ 1074 #ifdef INVARIANT_SUPPORT 1075 void 1076 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line) 1077 { 1078 const struct mtx *m; 1079 1080 if (panicstr != NULL || dumping || SCHEDULER_STOPPED()) 1081 return; 1082 1083 m = mtxlock2mtx(c); 1084 1085 switch (what) { 1086 case MA_OWNED: 1087 case MA_OWNED | MA_RECURSED: 1088 case MA_OWNED | MA_NOTRECURSED: 1089 if (!mtx_owned(m)) 1090 panic("mutex %s not owned at %s:%d", 1091 m->lock_object.lo_name, file, line); 1092 if (mtx_recursed(m)) { 1093 if ((what & MA_NOTRECURSED) != 0) 1094 panic("mutex %s recursed at %s:%d", 1095 m->lock_object.lo_name, file, line); 1096 } else if ((what & MA_RECURSED) != 0) { 1097 panic("mutex %s unrecursed at %s:%d", 1098 m->lock_object.lo_name, file, line); 1099 } 1100 break; 1101 case MA_NOTOWNED: 1102 if (mtx_owned(m)) 1103 panic("mutex %s owned at %s:%d", 1104 m->lock_object.lo_name, file, line); 1105 break; 1106 default: 1107 panic("unknown mtx_assert at %s:%d", file, line); 1108 } 1109 } 1110 #endif 1111 1112 /* 1113 * General init routine used by the MTX_SYSINIT() macro. 1114 */ 1115 void 1116 mtx_sysinit(void *arg) 1117 { 1118 struct mtx_args *margs = arg; 1119 1120 mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL, 1121 margs->ma_opts); 1122 } 1123 1124 /* 1125 * Mutex initialization routine; initialize lock `m' of type contained in 1126 * `opts' with options contained in `opts' and name `name.' The optional 1127 * lock type `type' is used as a general lock category name for use with 1128 * witness. 1129 */ 1130 void 1131 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts) 1132 { 1133 struct mtx *m; 1134 struct lock_class *class; 1135 int flags; 1136 1137 m = mtxlock2mtx(c); 1138 1139 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 1140 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0); 1141 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, 1142 ("%s: mtx_lock not aligned for %s: %p", __func__, name, 1143 &m->mtx_lock)); 1144 1145 /* Determine lock class and lock flags. */ 1146 if (opts & MTX_SPIN) 1147 class = &lock_class_mtx_spin; 1148 else 1149 class = &lock_class_mtx_sleep; 1150 flags = 0; 1151 if (opts & MTX_QUIET) 1152 flags |= LO_QUIET; 1153 if (opts & MTX_RECURSE) 1154 flags |= LO_RECURSABLE; 1155 if ((opts & MTX_NOWITNESS) == 0) 1156 flags |= LO_WITNESS; 1157 if (opts & MTX_DUPOK) 1158 flags |= LO_DUPOK; 1159 if (opts & MTX_NOPROFILE) 1160 flags |= LO_NOPROFILE; 1161 if (opts & MTX_NEW) 1162 flags |= LO_NEW; 1163 1164 /* Initialize mutex. */ 1165 lock_init(&m->lock_object, class, name, type, flags); 1166 1167 m->mtx_lock = MTX_UNOWNED; 1168 m->mtx_recurse = 0; 1169 } 1170 1171 /* 1172 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 1173 * passed in as a flag here because if the corresponding mtx_init() was 1174 * called with MTX_QUIET set, then it will already be set in the mutex's 1175 * flags. 1176 */ 1177 void 1178 _mtx_destroy(volatile uintptr_t *c) 1179 { 1180 struct mtx *m; 1181 1182 m = mtxlock2mtx(c); 1183 1184 if (!mtx_owned(m)) 1185 MPASS(mtx_unowned(m)); 1186 else { 1187 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 1188 1189 /* Perform the non-mtx related part of mtx_unlock_spin(). */ 1190 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) 1191 spinlock_exit(); 1192 else 1193 TD_LOCKS_DEC(curthread); 1194 1195 lock_profile_release_lock(&m->lock_object); 1196 /* Tell witness this isn't locked to make it happy. */ 1197 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, 1198 __LINE__); 1199 } 1200 1201 m->mtx_lock = MTX_DESTROYED; 1202 lock_destroy(&m->lock_object); 1203 } 1204 1205 /* 1206 * Intialize the mutex code and system mutexes. This is called from the MD 1207 * startup code prior to mi_startup(). The per-CPU data space needs to be 1208 * setup before this is called. 1209 */ 1210 void 1211 mutex_init(void) 1212 { 1213 1214 /* Setup turnstiles so that sleep mutexes work. */ 1215 init_turnstiles(); 1216 1217 /* 1218 * Initialize mutexes. 1219 */ 1220 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 1221 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN); 1222 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */ 1223 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 1224 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN); 1225 mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN); 1226 mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN); 1227 mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN); 1228 mtx_init(&devmtx, "cdev", NULL, MTX_DEF); 1229 mtx_lock(&Giant); 1230 } 1231 1232 #ifdef DDB 1233 void 1234 db_show_mtx(const struct lock_object *lock) 1235 { 1236 struct thread *td; 1237 const struct mtx *m; 1238 1239 m = (const struct mtx *)lock; 1240 1241 db_printf(" flags: {"); 1242 if (LOCK_CLASS(lock) == &lock_class_mtx_spin) 1243 db_printf("SPIN"); 1244 else 1245 db_printf("DEF"); 1246 if (m->lock_object.lo_flags & LO_RECURSABLE) 1247 db_printf(", RECURSE"); 1248 if (m->lock_object.lo_flags & LO_DUPOK) 1249 db_printf(", DUPOK"); 1250 db_printf("}\n"); 1251 db_printf(" state: {"); 1252 if (mtx_unowned(m)) 1253 db_printf("UNOWNED"); 1254 else if (mtx_destroyed(m)) 1255 db_printf("DESTROYED"); 1256 else { 1257 db_printf("OWNED"); 1258 if (m->mtx_lock & MTX_CONTESTED) 1259 db_printf(", CONTESTED"); 1260 if (m->mtx_lock & MTX_RECURSED) 1261 db_printf(", RECURSED"); 1262 } 1263 db_printf("}\n"); 1264 if (!mtx_unowned(m) && !mtx_destroyed(m)) { 1265 td = mtx_owner(m); 1266 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, 1267 td->td_tid, td->td_proc->p_pid, td->td_name); 1268 if (mtx_recursed(m)) 1269 db_printf(" recursed: %d\n", m->mtx_recurse); 1270 } 1271 } 1272 #endif 1273