1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Berkeley Software Design Inc's name may not be used to endorse or 15 * promote products derived from this software without specific prior 16 * written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 31 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 32 */ 33 34 /* 35 * Machine independent bits of mutex implementation. 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include "opt_adaptive_mutexes.h" 42 #include "opt_ddb.h" 43 #include "opt_hwpmc_hooks.h" 44 #include "opt_sched.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/bus.h> 49 #include <sys/conf.h> 50 #include <sys/kdb.h> 51 #include <sys/kernel.h> 52 #include <sys/ktr.h> 53 #include <sys/lock.h> 54 #include <sys/malloc.h> 55 #include <sys/mutex.h> 56 #include <sys/proc.h> 57 #include <sys/resourcevar.h> 58 #include <sys/sched.h> 59 #include <sys/sbuf.h> 60 #include <sys/smp.h> 61 #include <sys/sysctl.h> 62 #include <sys/turnstile.h> 63 #include <sys/vmmeter.h> 64 #include <sys/lock_profile.h> 65 66 #include <machine/atomic.h> 67 #include <machine/bus.h> 68 #include <machine/cpu.h> 69 70 #include <ddb/ddb.h> 71 72 #include <fs/devfs/devfs_int.h> 73 74 #include <vm/vm.h> 75 #include <vm/vm_extern.h> 76 77 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 78 #define ADAPTIVE_MUTEXES 79 #endif 80 81 #ifdef HWPMC_HOOKS 82 #include <sys/pmckern.h> 83 PMC_SOFT_DEFINE( , , lock, failed); 84 #endif 85 86 /* 87 * Return the mutex address when the lock cookie address is provided. 88 * This functionality assumes that struct mtx* have a member named mtx_lock. 89 */ 90 #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock)) 91 92 /* 93 * Internal utility macros. 94 */ 95 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 96 97 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED) 98 99 static void assert_mtx(const struct lock_object *lock, int what); 100 #ifdef DDB 101 static void db_show_mtx(const struct lock_object *lock); 102 #endif 103 static void lock_mtx(struct lock_object *lock, uintptr_t how); 104 static void lock_spin(struct lock_object *lock, uintptr_t how); 105 #ifdef KDTRACE_HOOKS 106 static int owner_mtx(const struct lock_object *lock, 107 struct thread **owner); 108 #endif 109 static uintptr_t unlock_mtx(struct lock_object *lock); 110 static uintptr_t unlock_spin(struct lock_object *lock); 111 112 /* 113 * Lock classes for sleep and spin mutexes. 114 */ 115 struct lock_class lock_class_mtx_sleep = { 116 .lc_name = "sleep mutex", 117 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, 118 .lc_assert = assert_mtx, 119 #ifdef DDB 120 .lc_ddb_show = db_show_mtx, 121 #endif 122 .lc_lock = lock_mtx, 123 .lc_unlock = unlock_mtx, 124 #ifdef KDTRACE_HOOKS 125 .lc_owner = owner_mtx, 126 #endif 127 }; 128 struct lock_class lock_class_mtx_spin = { 129 .lc_name = "spin mutex", 130 .lc_flags = LC_SPINLOCK | LC_RECURSABLE, 131 .lc_assert = assert_mtx, 132 #ifdef DDB 133 .lc_ddb_show = db_show_mtx, 134 #endif 135 .lc_lock = lock_spin, 136 .lc_unlock = unlock_spin, 137 #ifdef KDTRACE_HOOKS 138 .lc_owner = owner_mtx, 139 #endif 140 }; 141 142 #ifdef ADAPTIVE_MUTEXES 143 #ifdef MUTEX_CUSTOM_BACKOFF 144 static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging"); 145 146 static struct lock_delay_config __read_frequently mtx_delay; 147 148 SYSCTL_U16(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base, 149 0, ""); 150 SYSCTL_U16(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max, 151 0, ""); 152 153 LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay); 154 #else 155 #define mtx_delay locks_delay 156 #endif 157 #endif 158 159 #ifdef MUTEX_SPIN_CUSTOM_BACKOFF 160 static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin, CTLFLAG_RD, NULL, 161 "mtx spin debugging"); 162 163 static struct lock_delay_config __read_frequently mtx_spin_delay; 164 165 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW, 166 &mtx_spin_delay.base, 0, ""); 167 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW, 168 &mtx_spin_delay.max, 0, ""); 169 170 LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay); 171 #else 172 #define mtx_spin_delay locks_delay 173 #endif 174 175 /* 176 * System-wide mutexes 177 */ 178 struct mtx blocked_lock; 179 struct mtx __exclusive_cache_line Giant; 180 181 static void _mtx_lock_indefinite_check(struct mtx *, struct lock_delay_arg *); 182 183 void 184 assert_mtx(const struct lock_object *lock, int what) 185 { 186 187 /* 188 * Treat LA_LOCKED as if LA_XLOCKED was asserted. 189 * 190 * Some callers of lc_assert uses LA_LOCKED to indicate that either 191 * a shared lock or write lock was held, while other callers uses 192 * the more strict LA_XLOCKED (used as MA_OWNED). 193 * 194 * Mutex is the only lock class that can not be shared, as a result, 195 * we can reasonably consider the caller really intends to assert 196 * LA_XLOCKED when they are asserting LA_LOCKED on a mutex object. 197 */ 198 if (what & LA_LOCKED) { 199 what &= ~LA_LOCKED; 200 what |= LA_XLOCKED; 201 } 202 mtx_assert((const struct mtx *)lock, what); 203 } 204 205 void 206 lock_mtx(struct lock_object *lock, uintptr_t how) 207 { 208 209 mtx_lock((struct mtx *)lock); 210 } 211 212 void 213 lock_spin(struct lock_object *lock, uintptr_t how) 214 { 215 216 panic("spin locks can only use msleep_spin"); 217 } 218 219 uintptr_t 220 unlock_mtx(struct lock_object *lock) 221 { 222 struct mtx *m; 223 224 m = (struct mtx *)lock; 225 mtx_assert(m, MA_OWNED | MA_NOTRECURSED); 226 mtx_unlock(m); 227 return (0); 228 } 229 230 uintptr_t 231 unlock_spin(struct lock_object *lock) 232 { 233 234 panic("spin locks can only use msleep_spin"); 235 } 236 237 #ifdef KDTRACE_HOOKS 238 int 239 owner_mtx(const struct lock_object *lock, struct thread **owner) 240 { 241 const struct mtx *m; 242 uintptr_t x; 243 244 m = (const struct mtx *)lock; 245 x = m->mtx_lock; 246 *owner = (struct thread *)(x & ~MTX_FLAGMASK); 247 return (*owner != NULL); 248 } 249 #endif 250 251 /* 252 * Function versions of the inlined __mtx_* macros. These are used by 253 * modules and can also be called from assembly language if needed. 254 */ 255 void 256 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line) 257 { 258 struct mtx *m; 259 uintptr_t tid, v; 260 261 m = mtxlock2mtx(c); 262 263 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 264 !TD_IS_IDLETHREAD(curthread), 265 ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d", 266 curthread, m->lock_object.lo_name, file, line)); 267 KASSERT(m->mtx_lock != MTX_DESTROYED, 268 ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); 269 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 270 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 271 file, line)); 272 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) | 273 LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 274 275 tid = (uintptr_t)curthread; 276 v = MTX_UNOWNED; 277 if (!_mtx_obtain_lock_fetch(m, &v, tid)) 278 _mtx_lock_sleep(m, v, opts, file, line); 279 else 280 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, 281 m, 0, 0, file, line); 282 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 283 line); 284 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE, 285 file, line); 286 TD_LOCKS_INC(curthread); 287 } 288 289 void 290 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line) 291 { 292 struct mtx *m; 293 294 m = mtxlock2mtx(c); 295 296 KASSERT(m->mtx_lock != MTX_DESTROYED, 297 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); 298 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 299 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 300 file, line)); 301 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 302 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 303 line); 304 mtx_assert(m, MA_OWNED); 305 306 #ifdef LOCK_PROFILING 307 __mtx_unlock_sleep(c, (uintptr_t)curthread, opts, file, line); 308 #else 309 __mtx_unlock(m, curthread, opts, file, line); 310 #endif 311 TD_LOCKS_DEC(curthread); 312 } 313 314 void 315 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 316 int line) 317 { 318 struct mtx *m; 319 #ifdef SMP 320 uintptr_t tid, v; 321 #endif 322 323 m = mtxlock2mtx(c); 324 325 KASSERT(m->mtx_lock != MTX_DESTROYED, 326 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); 327 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 328 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 329 m->lock_object.lo_name, file, line)); 330 if (mtx_owned(m)) 331 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 332 (opts & MTX_RECURSE) != 0, 333 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n", 334 m->lock_object.lo_name, file, line)); 335 opts &= ~MTX_RECURSE; 336 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 337 file, line, NULL); 338 #ifdef SMP 339 spinlock_enter(); 340 tid = (uintptr_t)curthread; 341 v = MTX_UNOWNED; 342 if (!_mtx_obtain_lock_fetch(m, &v, tid)) 343 _mtx_lock_spin(m, v, opts, file, line); 344 else 345 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, 346 m, 0, 0, file, line); 347 #else 348 __mtx_lock_spin(m, curthread, opts, file, line); 349 #endif 350 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 351 line); 352 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 353 } 354 355 int 356 __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 357 int line) 358 { 359 struct mtx *m; 360 361 if (SCHEDULER_STOPPED()) 362 return (1); 363 364 m = mtxlock2mtx(c); 365 366 KASSERT(m->mtx_lock != MTX_DESTROYED, 367 ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line)); 368 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 369 ("mtx_trylock_spin() of sleep mutex %s @ %s:%d", 370 m->lock_object.lo_name, file, line)); 371 KASSERT((opts & MTX_RECURSE) == 0, 372 ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n", 373 m->lock_object.lo_name, file, line)); 374 if (__mtx_trylock_spin(m, curthread, opts, file, line)) { 375 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line); 376 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 377 return (1); 378 } 379 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line); 380 return (0); 381 } 382 383 void 384 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 385 int line) 386 { 387 struct mtx *m; 388 389 m = mtxlock2mtx(c); 390 391 KASSERT(m->mtx_lock != MTX_DESTROYED, 392 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); 393 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 394 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 395 m->lock_object.lo_name, file, line)); 396 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 397 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 398 line); 399 mtx_assert(m, MA_OWNED); 400 401 __mtx_unlock_spin(m); 402 } 403 404 /* 405 * The important part of mtx_trylock{,_flags}() 406 * Tries to acquire lock `m.' If this function is called on a mutex that 407 * is already owned, it will recursively acquire the lock. 408 */ 409 int 410 _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF) 411 { 412 struct thread *td; 413 uintptr_t tid, v; 414 #ifdef LOCK_PROFILING 415 uint64_t waittime = 0; 416 int contested = 0; 417 #endif 418 int rval; 419 bool recursed; 420 421 td = curthread; 422 tid = (uintptr_t)td; 423 if (SCHEDULER_STOPPED_TD(td)) 424 return (1); 425 426 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td), 427 ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d", 428 curthread, m->lock_object.lo_name, file, line)); 429 KASSERT(m->mtx_lock != MTX_DESTROYED, 430 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); 431 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 432 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 433 file, line)); 434 435 rval = 1; 436 recursed = false; 437 v = MTX_UNOWNED; 438 for (;;) { 439 if (_mtx_obtain_lock_fetch(m, &v, tid)) 440 break; 441 if (v == MTX_UNOWNED) 442 continue; 443 if (v == tid && 444 ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 445 (opts & MTX_RECURSE) != 0)) { 446 m->mtx_recurse++; 447 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 448 recursed = true; 449 break; 450 } 451 rval = 0; 452 break; 453 } 454 455 opts &= ~MTX_RECURSE; 456 457 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); 458 if (rval) { 459 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 460 file, line); 461 TD_LOCKS_INC(curthread); 462 if (!recursed) 463 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, 464 m, contested, waittime, file, line); 465 } 466 467 return (rval); 468 } 469 470 int 471 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) 472 { 473 struct mtx *m; 474 475 m = mtxlock2mtx(c); 476 return (_mtx_trylock_flags_int(m, opts LOCK_FILE_LINE_ARG)); 477 } 478 479 /* 480 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 481 * 482 * We call this if the lock is either contested (i.e. we need to go to 483 * sleep waiting for it), or if we need to recurse on it. 484 */ 485 #if LOCK_DEBUG > 0 486 void 487 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file, 488 int line) 489 #else 490 void 491 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v) 492 #endif 493 { 494 struct thread *td; 495 struct mtx *m; 496 struct turnstile *ts; 497 uintptr_t tid; 498 struct thread *owner; 499 #ifdef LOCK_PROFILING 500 int contested = 0; 501 uint64_t waittime = 0; 502 #endif 503 #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS) 504 struct lock_delay_arg lda; 505 #endif 506 #ifdef KDTRACE_HOOKS 507 u_int sleep_cnt = 0; 508 int64_t sleep_time = 0; 509 int64_t all_time = 0; 510 #endif 511 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 512 int doing_lockprof = 0; 513 #endif 514 515 td = curthread; 516 tid = (uintptr_t)td; 517 m = mtxlock2mtx(c); 518 519 #ifdef KDTRACE_HOOKS 520 if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) { 521 while (v == MTX_UNOWNED) { 522 if (_mtx_obtain_lock_fetch(m, &v, tid)) 523 goto out_lockstat; 524 } 525 doing_lockprof = 1; 526 all_time -= lockstat_nsecs(&m->lock_object); 527 } 528 #endif 529 #ifdef LOCK_PROFILING 530 doing_lockprof = 1; 531 #endif 532 533 if (SCHEDULER_STOPPED_TD(td)) 534 return; 535 536 #if defined(ADAPTIVE_MUTEXES) 537 lock_delay_arg_init(&lda, &mtx_delay); 538 #elif defined(KDTRACE_HOOKS) 539 lock_delay_arg_init(&lda, NULL); 540 #endif 541 542 if (__predict_false(v == MTX_UNOWNED)) 543 v = MTX_READ_VALUE(m); 544 545 if (__predict_false(lv_mtx_owner(v) == td)) { 546 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 547 (opts & MTX_RECURSE) != 0, 548 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 549 m->lock_object.lo_name, file, line)); 550 #if LOCK_DEBUG > 0 551 opts &= ~MTX_RECURSE; 552 #endif 553 m->mtx_recurse++; 554 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 555 if (LOCK_LOG_TEST(&m->lock_object, opts)) 556 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 557 return; 558 } 559 #if LOCK_DEBUG > 0 560 opts &= ~MTX_RECURSE; 561 #endif 562 563 #ifdef HWPMC_HOOKS 564 PMC_SOFT_CALL( , , lock, failed); 565 #endif 566 lock_profile_obtain_lock_failed(&m->lock_object, 567 &contested, &waittime); 568 if (LOCK_LOG_TEST(&m->lock_object, opts)) 569 CTR4(KTR_LOCK, 570 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 571 m->lock_object.lo_name, (void *)m->mtx_lock, file, line); 572 573 for (;;) { 574 if (v == MTX_UNOWNED) { 575 if (_mtx_obtain_lock_fetch(m, &v, tid)) 576 break; 577 continue; 578 } 579 #ifdef KDTRACE_HOOKS 580 lda.spin_cnt++; 581 #endif 582 #ifdef ADAPTIVE_MUTEXES 583 /* 584 * If the owner is running on another CPU, spin until the 585 * owner stops running or the state of the lock changes. 586 */ 587 owner = lv_mtx_owner(v); 588 if (TD_IS_RUNNING(owner)) { 589 if (LOCK_LOG_TEST(&m->lock_object, 0)) 590 CTR3(KTR_LOCK, 591 "%s: spinning on %p held by %p", 592 __func__, m, owner); 593 KTR_STATE1(KTR_SCHED, "thread", 594 sched_tdname((struct thread *)tid), 595 "spinning", "lockname:\"%s\"", 596 m->lock_object.lo_name); 597 do { 598 lock_delay(&lda); 599 v = MTX_READ_VALUE(m); 600 owner = lv_mtx_owner(v); 601 } while (v != MTX_UNOWNED && TD_IS_RUNNING(owner)); 602 KTR_STATE0(KTR_SCHED, "thread", 603 sched_tdname((struct thread *)tid), 604 "running"); 605 continue; 606 } 607 #endif 608 609 ts = turnstile_trywait(&m->lock_object); 610 v = MTX_READ_VALUE(m); 611 retry_turnstile: 612 613 /* 614 * Check if the lock has been released while spinning for 615 * the turnstile chain lock. 616 */ 617 if (v == MTX_UNOWNED) { 618 turnstile_cancel(ts); 619 continue; 620 } 621 622 #ifdef ADAPTIVE_MUTEXES 623 /* 624 * The current lock owner might have started executing 625 * on another CPU (or the lock could have changed 626 * owners) while we were waiting on the turnstile 627 * chain lock. If so, drop the turnstile lock and try 628 * again. 629 */ 630 owner = lv_mtx_owner(v); 631 if (TD_IS_RUNNING(owner)) { 632 turnstile_cancel(ts); 633 continue; 634 } 635 #endif 636 637 /* 638 * If the mutex isn't already contested and a failure occurs 639 * setting the contested bit, the mutex was either released 640 * or the state of the MTX_RECURSED bit changed. 641 */ 642 if ((v & MTX_CONTESTED) == 0 && 643 !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) { 644 goto retry_turnstile; 645 } 646 647 /* 648 * We definitely must sleep for this lock. 649 */ 650 mtx_assert(m, MA_NOTOWNED); 651 652 /* 653 * Block on the turnstile. 654 */ 655 #ifdef KDTRACE_HOOKS 656 sleep_time -= lockstat_nsecs(&m->lock_object); 657 #endif 658 #ifndef ADAPTIVE_MUTEXES 659 owner = mtx_owner(m); 660 #endif 661 MPASS(owner == mtx_owner(m)); 662 turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE); 663 #ifdef KDTRACE_HOOKS 664 sleep_time += lockstat_nsecs(&m->lock_object); 665 sleep_cnt++; 666 #endif 667 v = MTX_READ_VALUE(m); 668 } 669 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 670 if (__predict_true(!doing_lockprof)) 671 return; 672 #endif 673 #ifdef KDTRACE_HOOKS 674 all_time += lockstat_nsecs(&m->lock_object); 675 if (sleep_time) 676 LOCKSTAT_RECORD1(adaptive__block, m, sleep_time); 677 678 /* 679 * Only record the loops spinning and not sleeping. 680 */ 681 if (lda.spin_cnt > sleep_cnt) 682 LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time); 683 out_lockstat: 684 #endif 685 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested, 686 waittime, file, line); 687 } 688 689 #ifdef SMP 690 /* 691 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock. 692 * 693 * This is only called if we need to actually spin for the lock. Recursion 694 * is handled inline. 695 */ 696 #if LOCK_DEBUG > 0 697 void 698 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts, 699 const char *file, int line) 700 #else 701 void 702 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v) 703 #endif 704 { 705 struct mtx *m; 706 struct lock_delay_arg lda; 707 uintptr_t tid; 708 #ifdef LOCK_PROFILING 709 int contested = 0; 710 uint64_t waittime = 0; 711 #endif 712 #ifdef KDTRACE_HOOKS 713 int64_t spin_time = 0; 714 #endif 715 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 716 int doing_lockprof = 0; 717 #endif 718 719 tid = (uintptr_t)curthread; 720 m = mtxlock2mtx(c); 721 722 #ifdef KDTRACE_HOOKS 723 if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) { 724 while (v == MTX_UNOWNED) { 725 if (_mtx_obtain_lock_fetch(m, &v, tid)) 726 goto out_lockstat; 727 } 728 doing_lockprof = 1; 729 spin_time -= lockstat_nsecs(&m->lock_object); 730 } 731 #endif 732 #ifdef LOCK_PROFILING 733 doing_lockprof = 1; 734 #endif 735 736 if (__predict_false(v == MTX_UNOWNED)) 737 v = MTX_READ_VALUE(m); 738 739 if (__predict_false(v == tid)) { 740 m->mtx_recurse++; 741 return; 742 } 743 744 if (SCHEDULER_STOPPED()) 745 return; 746 747 lock_delay_arg_init(&lda, &mtx_spin_delay); 748 749 if (LOCK_LOG_TEST(&m->lock_object, opts)) 750 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 751 KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), 752 "spinning", "lockname:\"%s\"", m->lock_object.lo_name); 753 754 #ifdef HWPMC_HOOKS 755 PMC_SOFT_CALL( , , lock, failed); 756 #endif 757 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); 758 759 for (;;) { 760 if (v == MTX_UNOWNED) { 761 if (_mtx_obtain_lock_fetch(m, &v, tid)) 762 break; 763 continue; 764 } 765 /* Give interrupts a chance while we spin. */ 766 spinlock_exit(); 767 do { 768 if (__predict_true(lda.spin_cnt < 10000000)) { 769 lock_delay(&lda); 770 } else { 771 _mtx_lock_indefinite_check(m, &lda); 772 } 773 v = MTX_READ_VALUE(m); 774 } while (v != MTX_UNOWNED); 775 spinlock_enter(); 776 } 777 778 if (LOCK_LOG_TEST(&m->lock_object, opts)) 779 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 780 KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), 781 "running"); 782 783 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 784 if (__predict_true(!doing_lockprof)) 785 return; 786 #endif 787 #ifdef KDTRACE_HOOKS 788 spin_time += lockstat_nsecs(&m->lock_object); 789 if (lda.spin_cnt != 0) 790 LOCKSTAT_RECORD1(spin__spin, m, spin_time); 791 out_lockstat: 792 #endif 793 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, 794 contested, waittime, file, line); 795 } 796 #endif /* SMP */ 797 798 #ifdef INVARIANTS 799 static void 800 thread_lock_validate(struct mtx *m, int opts, const char *file, int line) 801 { 802 803 KASSERT(m->mtx_lock != MTX_DESTROYED, 804 ("thread_lock() of destroyed mutex @ %s:%d", file, line)); 805 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 806 ("thread_lock() of sleep mutex %s @ %s:%d", 807 m->lock_object.lo_name, file, line)); 808 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) == 0, 809 ("thread_lock: got a recursive mutex %s @ %s:%d\n", 810 m->lock_object.lo_name, file, line)); 811 WITNESS_CHECKORDER(&m->lock_object, 812 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 813 } 814 #else 815 #define thread_lock_validate(m, opts, file, line) do { } while (0) 816 #endif 817 818 #ifndef LOCK_PROFILING 819 #if LOCK_DEBUG > 0 820 void 821 _thread_lock(struct thread *td, int opts, const char *file, int line) 822 #else 823 void 824 _thread_lock(struct thread *td) 825 #endif 826 { 827 struct mtx *m; 828 uintptr_t tid; 829 830 tid = (uintptr_t)curthread; 831 832 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire))) 833 goto slowpath_noirq; 834 spinlock_enter(); 835 m = td->td_lock; 836 thread_lock_validate(m, 0, file, line); 837 if (__predict_false(m == &blocked_lock)) 838 goto slowpath_unlocked; 839 if (__predict_false(!_mtx_obtain_lock(m, tid))) 840 goto slowpath_unlocked; 841 if (__predict_true(m == td->td_lock)) { 842 WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line); 843 return; 844 } 845 _mtx_release_lock_quick(m); 846 slowpath_unlocked: 847 spinlock_exit(); 848 slowpath_noirq: 849 #if LOCK_DEBUG > 0 850 thread_lock_flags_(td, opts, file, line); 851 #else 852 thread_lock_flags_(td, 0, 0, 0); 853 #endif 854 } 855 #endif 856 857 void 858 thread_lock_flags_(struct thread *td, int opts, const char *file, int line) 859 { 860 struct mtx *m; 861 uintptr_t tid, v; 862 struct lock_delay_arg lda; 863 #ifdef LOCK_PROFILING 864 int contested = 0; 865 uint64_t waittime = 0; 866 #endif 867 #ifdef KDTRACE_HOOKS 868 int64_t spin_time = 0; 869 #endif 870 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 871 int doing_lockprof = 1; 872 #endif 873 874 tid = (uintptr_t)curthread; 875 876 if (SCHEDULER_STOPPED()) { 877 /* 878 * Ensure that spinlock sections are balanced even when the 879 * scheduler is stopped, since we may otherwise inadvertently 880 * re-enable interrupts while dumping core. 881 */ 882 spinlock_enter(); 883 return; 884 } 885 886 lock_delay_arg_init(&lda, &mtx_spin_delay); 887 888 #ifdef HWPMC_HOOKS 889 PMC_SOFT_CALL( , , lock, failed); 890 #endif 891 892 #ifdef LOCK_PROFILING 893 doing_lockprof = 1; 894 #elif defined(KDTRACE_HOOKS) 895 doing_lockprof = lockstat_enabled; 896 if (__predict_false(doing_lockprof)) 897 spin_time -= lockstat_nsecs(&td->td_lock->lock_object); 898 #endif 899 spinlock_enter(); 900 901 for (;;) { 902 retry: 903 m = td->td_lock; 904 thread_lock_validate(m, opts, file, line); 905 v = MTX_READ_VALUE(m); 906 for (;;) { 907 if (v == MTX_UNOWNED) { 908 if (_mtx_obtain_lock_fetch(m, &v, tid)) 909 break; 910 continue; 911 } 912 MPASS(v != tid); 913 lock_profile_obtain_lock_failed(&m->lock_object, 914 &contested, &waittime); 915 /* Give interrupts a chance while we spin. */ 916 spinlock_exit(); 917 do { 918 if (__predict_true(lda.spin_cnt < 10000000)) { 919 lock_delay(&lda); 920 } else { 921 _mtx_lock_indefinite_check(m, &lda); 922 } 923 if (m != td->td_lock) { 924 spinlock_enter(); 925 goto retry; 926 } 927 v = MTX_READ_VALUE(m); 928 } while (v != MTX_UNOWNED); 929 spinlock_enter(); 930 } 931 if (m == td->td_lock) 932 break; 933 _mtx_release_lock_quick(m); 934 } 935 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 936 line); 937 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 938 939 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 940 if (__predict_true(!doing_lockprof)) 941 return; 942 #endif 943 #ifdef KDTRACE_HOOKS 944 spin_time += lockstat_nsecs(&m->lock_object); 945 #endif 946 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, contested, 947 waittime, file, line); 948 #ifdef KDTRACE_HOOKS 949 if (lda.spin_cnt != 0) 950 LOCKSTAT_RECORD1(thread__spin, m, spin_time); 951 #endif 952 } 953 954 struct mtx * 955 thread_lock_block(struct thread *td) 956 { 957 struct mtx *lock; 958 959 lock = td->td_lock; 960 mtx_assert(lock, MA_OWNED); 961 td->td_lock = &blocked_lock; 962 963 return (lock); 964 } 965 966 void 967 thread_lock_unblock(struct thread *td, struct mtx *new) 968 { 969 970 mtx_assert(new, MA_OWNED); 971 KASSERT(td->td_lock == &blocked_lock, 972 ("thread %p lock %p not blocked_lock %p", 973 td, td->td_lock, &blocked_lock)); 974 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new); 975 } 976 977 void 978 thread_lock_block_wait(struct thread *td) 979 { 980 981 while (td->td_lock == &blocked_lock) 982 cpu_spinwait(); 983 984 /* Acquire fence to be certain that all thread state is visible. */ 985 atomic_thread_fence_acq(); 986 } 987 988 void 989 thread_lock_set(struct thread *td, struct mtx *new) 990 { 991 struct mtx *lock; 992 993 mtx_assert(new, MA_OWNED); 994 lock = td->td_lock; 995 mtx_assert(lock, MA_OWNED); 996 td->td_lock = new; 997 mtx_unlock_spin(lock); 998 } 999 1000 /* 1001 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 1002 * 1003 * We are only called here if the lock is recursed, contested (i.e. we 1004 * need to wake up a blocked thread) or lockstat probe is active. 1005 */ 1006 #if LOCK_DEBUG > 0 1007 void 1008 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, 1009 const char *file, int line) 1010 #else 1011 void 1012 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v) 1013 #endif 1014 { 1015 struct mtx *m; 1016 struct turnstile *ts; 1017 uintptr_t tid; 1018 1019 if (SCHEDULER_STOPPED()) 1020 return; 1021 1022 tid = (uintptr_t)curthread; 1023 m = mtxlock2mtx(c); 1024 1025 if (__predict_false(v == tid)) 1026 v = MTX_READ_VALUE(m); 1027 1028 if (__predict_false(v & MTX_RECURSED)) { 1029 if (--(m->mtx_recurse) == 0) 1030 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 1031 if (LOCK_LOG_TEST(&m->lock_object, opts)) 1032 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 1033 return; 1034 } 1035 1036 LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m); 1037 if (v == tid && _mtx_release_lock(m, tid)) 1038 return; 1039 1040 /* 1041 * We have to lock the chain before the turnstile so this turnstile 1042 * can be removed from the hash list if it is empty. 1043 */ 1044 turnstile_chain_lock(&m->lock_object); 1045 _mtx_release_lock_quick(m); 1046 ts = turnstile_lookup(&m->lock_object); 1047 MPASS(ts != NULL); 1048 if (LOCK_LOG_TEST(&m->lock_object, opts)) 1049 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 1050 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 1051 1052 /* 1053 * This turnstile is now no longer associated with the mutex. We can 1054 * unlock the chain lock so a new turnstile may take it's place. 1055 */ 1056 turnstile_unpend(ts); 1057 turnstile_chain_unlock(&m->lock_object); 1058 } 1059 1060 /* 1061 * All the unlocking of MTX_SPIN locks is done inline. 1062 * See the __mtx_unlock_spin() macro for the details. 1063 */ 1064 1065 /* 1066 * The backing function for the INVARIANTS-enabled mtx_assert() 1067 */ 1068 #ifdef INVARIANT_SUPPORT 1069 void 1070 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line) 1071 { 1072 const struct mtx *m; 1073 1074 if (KERNEL_PANICKED() || dumping || SCHEDULER_STOPPED()) 1075 return; 1076 1077 m = mtxlock2mtx(c); 1078 1079 switch (what) { 1080 case MA_OWNED: 1081 case MA_OWNED | MA_RECURSED: 1082 case MA_OWNED | MA_NOTRECURSED: 1083 if (!mtx_owned(m)) 1084 panic("mutex %s not owned at %s:%d", 1085 m->lock_object.lo_name, file, line); 1086 if (mtx_recursed(m)) { 1087 if ((what & MA_NOTRECURSED) != 0) 1088 panic("mutex %s recursed at %s:%d", 1089 m->lock_object.lo_name, file, line); 1090 } else if ((what & MA_RECURSED) != 0) { 1091 panic("mutex %s unrecursed at %s:%d", 1092 m->lock_object.lo_name, file, line); 1093 } 1094 break; 1095 case MA_NOTOWNED: 1096 if (mtx_owned(m)) 1097 panic("mutex %s owned at %s:%d", 1098 m->lock_object.lo_name, file, line); 1099 break; 1100 default: 1101 panic("unknown mtx_assert at %s:%d", file, line); 1102 } 1103 } 1104 #endif 1105 1106 /* 1107 * General init routine used by the MTX_SYSINIT() macro. 1108 */ 1109 void 1110 mtx_sysinit(void *arg) 1111 { 1112 struct mtx_args *margs = arg; 1113 1114 mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL, 1115 margs->ma_opts); 1116 } 1117 1118 /* 1119 * Mutex initialization routine; initialize lock `m' of type contained in 1120 * `opts' with options contained in `opts' and name `name.' The optional 1121 * lock type `type' is used as a general lock category name for use with 1122 * witness. 1123 */ 1124 void 1125 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts) 1126 { 1127 struct mtx *m; 1128 struct lock_class *class; 1129 int flags; 1130 1131 m = mtxlock2mtx(c); 1132 1133 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 1134 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0); 1135 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, 1136 ("%s: mtx_lock not aligned for %s: %p", __func__, name, 1137 &m->mtx_lock)); 1138 1139 /* Determine lock class and lock flags. */ 1140 if (opts & MTX_SPIN) 1141 class = &lock_class_mtx_spin; 1142 else 1143 class = &lock_class_mtx_sleep; 1144 flags = 0; 1145 if (opts & MTX_QUIET) 1146 flags |= LO_QUIET; 1147 if (opts & MTX_RECURSE) 1148 flags |= LO_RECURSABLE; 1149 if ((opts & MTX_NOWITNESS) == 0) 1150 flags |= LO_WITNESS; 1151 if (opts & MTX_DUPOK) 1152 flags |= LO_DUPOK; 1153 if (opts & MTX_NOPROFILE) 1154 flags |= LO_NOPROFILE; 1155 if (opts & MTX_NEW) 1156 flags |= LO_NEW; 1157 1158 /* Initialize mutex. */ 1159 lock_init(&m->lock_object, class, name, type, flags); 1160 1161 m->mtx_lock = MTX_UNOWNED; 1162 m->mtx_recurse = 0; 1163 } 1164 1165 /* 1166 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 1167 * passed in as a flag here because if the corresponding mtx_init() was 1168 * called with MTX_QUIET set, then it will already be set in the mutex's 1169 * flags. 1170 */ 1171 void 1172 _mtx_destroy(volatile uintptr_t *c) 1173 { 1174 struct mtx *m; 1175 1176 m = mtxlock2mtx(c); 1177 1178 if (!mtx_owned(m)) 1179 MPASS(mtx_unowned(m)); 1180 else { 1181 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 1182 1183 /* Perform the non-mtx related part of mtx_unlock_spin(). */ 1184 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) 1185 spinlock_exit(); 1186 else 1187 TD_LOCKS_DEC(curthread); 1188 1189 lock_profile_release_lock(&m->lock_object); 1190 /* Tell witness this isn't locked to make it happy. */ 1191 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, 1192 __LINE__); 1193 } 1194 1195 m->mtx_lock = MTX_DESTROYED; 1196 lock_destroy(&m->lock_object); 1197 } 1198 1199 /* 1200 * Intialize the mutex code and system mutexes. This is called from the MD 1201 * startup code prior to mi_startup(). The per-CPU data space needs to be 1202 * setup before this is called. 1203 */ 1204 void 1205 mutex_init(void) 1206 { 1207 1208 /* Setup turnstiles so that sleep mutexes work. */ 1209 init_turnstiles(); 1210 1211 /* 1212 * Initialize mutexes. 1213 */ 1214 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 1215 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN); 1216 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */ 1217 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 1218 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN); 1219 mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN); 1220 mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN); 1221 mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN); 1222 mtx_init(&devmtx, "cdev", NULL, MTX_DEF); 1223 mtx_lock(&Giant); 1224 } 1225 1226 static void __noinline 1227 _mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap) 1228 { 1229 struct thread *td; 1230 1231 ldap->spin_cnt++; 1232 if (ldap->spin_cnt < 60000000 || kdb_active || KERNEL_PANICKED()) 1233 cpu_lock_delay(); 1234 else { 1235 td = mtx_owner(m); 1236 1237 /* If the mutex is unlocked, try again. */ 1238 if (td == NULL) 1239 return; 1240 1241 printf( "spin lock %p (%s) held by %p (tid %d) too long\n", 1242 m, m->lock_object.lo_name, td, td->td_tid); 1243 #ifdef WITNESS 1244 witness_display_spinlock(&m->lock_object, td, printf); 1245 #endif 1246 panic("spin lock held too long"); 1247 } 1248 cpu_spinwait(); 1249 } 1250 1251 void 1252 mtx_spin_wait_unlocked(struct mtx *m) 1253 { 1254 struct lock_delay_arg lda; 1255 1256 KASSERT(m->mtx_lock != MTX_DESTROYED, 1257 ("%s() of destroyed mutex %p", __func__, m)); 1258 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 1259 ("%s() of sleep mutex %p (%s)", __func__, m, 1260 m->lock_object.lo_name)); 1261 KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m, 1262 m->lock_object.lo_name)); 1263 1264 lda.spin_cnt = 0; 1265 1266 while (atomic_load_acq_ptr(&m->mtx_lock) != MTX_UNOWNED) { 1267 if (__predict_true(lda.spin_cnt < 10000000)) { 1268 cpu_spinwait(); 1269 lda.spin_cnt++; 1270 } else { 1271 _mtx_lock_indefinite_check(m, &lda); 1272 } 1273 } 1274 } 1275 1276 #ifdef DDB 1277 void 1278 db_show_mtx(const struct lock_object *lock) 1279 { 1280 struct thread *td; 1281 const struct mtx *m; 1282 1283 m = (const struct mtx *)lock; 1284 1285 db_printf(" flags: {"); 1286 if (LOCK_CLASS(lock) == &lock_class_mtx_spin) 1287 db_printf("SPIN"); 1288 else 1289 db_printf("DEF"); 1290 if (m->lock_object.lo_flags & LO_RECURSABLE) 1291 db_printf(", RECURSE"); 1292 if (m->lock_object.lo_flags & LO_DUPOK) 1293 db_printf(", DUPOK"); 1294 db_printf("}\n"); 1295 db_printf(" state: {"); 1296 if (mtx_unowned(m)) 1297 db_printf("UNOWNED"); 1298 else if (mtx_destroyed(m)) 1299 db_printf("DESTROYED"); 1300 else { 1301 db_printf("OWNED"); 1302 if (m->mtx_lock & MTX_CONTESTED) 1303 db_printf(", CONTESTED"); 1304 if (m->mtx_lock & MTX_RECURSED) 1305 db_printf(", RECURSED"); 1306 } 1307 db_printf("}\n"); 1308 if (!mtx_unowned(m) && !mtx_destroyed(m)) { 1309 td = mtx_owner(m); 1310 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, 1311 td->td_tid, td->td_proc->p_pid, td->td_name); 1312 if (mtx_recursed(m)) 1313 db_printf(" recursed: %d\n", m->mtx_recurse); 1314 } 1315 } 1316 #endif 1317