1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Berkeley Software Design Inc's name may not be used to endorse or 15 * promote products derived from this software without specific prior 16 * written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 31 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 32 */ 33 34 /* 35 * Machine independent bits of mutex implementation. 36 */ 37 38 #include <sys/cdefs.h> 39 #include "opt_adaptive_mutexes.h" 40 #include "opt_ddb.h" 41 #include "opt_hwpmc_hooks.h" 42 #include "opt_sched.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/bus.h> 47 #include <sys/conf.h> 48 #include <sys/kdb.h> 49 #include <sys/kernel.h> 50 #include <sys/ktr.h> 51 #include <sys/lock.h> 52 #include <sys/malloc.h> 53 #include <sys/mutex.h> 54 #include <sys/proc.h> 55 #include <sys/resourcevar.h> 56 #include <sys/sched.h> 57 #include <sys/sbuf.h> 58 #include <sys/smp.h> 59 #include <sys/sysctl.h> 60 #include <sys/turnstile.h> 61 #include <sys/vmmeter.h> 62 #include <sys/lock_profile.h> 63 64 #include <machine/atomic.h> 65 #include <machine/bus.h> 66 #include <machine/cpu.h> 67 68 #include <ddb/ddb.h> 69 70 #include <fs/devfs/devfs_int.h> 71 72 #include <vm/vm.h> 73 #include <vm/vm_extern.h> 74 75 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 76 #define ADAPTIVE_MUTEXES 77 #endif 78 79 #ifdef HWPMC_HOOKS 80 #include <sys/pmckern.h> 81 PMC_SOFT_DEFINE( , , lock, failed); 82 #endif 83 84 /* 85 * Return the mutex address when the lock cookie address is provided. 86 * This functionality assumes that struct mtx* have a member named mtx_lock. 87 */ 88 #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock)) 89 90 /* 91 * Internal utility macros. 92 */ 93 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 94 95 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED) 96 97 static void assert_mtx(const struct lock_object *lock, int what); 98 #ifdef DDB 99 static void db_show_mtx(const struct lock_object *lock); 100 #endif 101 static void lock_mtx(struct lock_object *lock, uintptr_t how); 102 static void lock_spin(struct lock_object *lock, uintptr_t how); 103 static int trylock_mtx(struct lock_object *lock, uintptr_t how); 104 static int trylock_spin(struct lock_object *lock, uintptr_t how); 105 #ifdef KDTRACE_HOOKS 106 static int owner_mtx(const struct lock_object *lock, 107 struct thread **owner); 108 #endif 109 static uintptr_t unlock_mtx(struct lock_object *lock); 110 static uintptr_t unlock_spin(struct lock_object *lock); 111 112 /* 113 * Lock classes for sleep and spin mutexes. 114 */ 115 struct lock_class lock_class_mtx_sleep = { 116 .lc_name = "sleep mutex", 117 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, 118 .lc_assert = assert_mtx, 119 #ifdef DDB 120 .lc_ddb_show = db_show_mtx, 121 #endif 122 .lc_lock = lock_mtx, 123 .lc_trylock = trylock_mtx, 124 .lc_unlock = unlock_mtx, 125 #ifdef KDTRACE_HOOKS 126 .lc_owner = owner_mtx, 127 #endif 128 }; 129 struct lock_class lock_class_mtx_spin = { 130 .lc_name = "spin mutex", 131 .lc_flags = LC_SPINLOCK | LC_RECURSABLE, 132 .lc_assert = assert_mtx, 133 #ifdef DDB 134 .lc_ddb_show = db_show_mtx, 135 #endif 136 .lc_lock = lock_spin, 137 .lc_trylock = trylock_spin, 138 .lc_unlock = unlock_spin, 139 #ifdef KDTRACE_HOOKS 140 .lc_owner = owner_mtx, 141 #endif 142 }; 143 144 #ifdef ADAPTIVE_MUTEXES 145 #ifdef MUTEX_CUSTOM_BACKOFF 146 static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 147 "mtx debugging"); 148 149 static struct lock_delay_config __read_frequently mtx_delay; 150 151 SYSCTL_U16(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base, 152 0, ""); 153 SYSCTL_U16(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max, 154 0, ""); 155 156 LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay); 157 #else 158 #define mtx_delay locks_delay 159 #endif 160 #endif 161 162 #ifdef MUTEX_SPIN_CUSTOM_BACKOFF 163 static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin, 164 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 165 "mtx spin debugging"); 166 167 static struct lock_delay_config __read_frequently mtx_spin_delay; 168 169 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW, 170 &mtx_spin_delay.base, 0, ""); 171 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW, 172 &mtx_spin_delay.max, 0, ""); 173 174 LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay); 175 #else 176 #define mtx_spin_delay locks_delay 177 #endif 178 179 /* 180 * System-wide mutexes 181 */ 182 struct mtx blocked_lock; 183 struct mtx __exclusive_cache_line Giant; 184 185 static void _mtx_lock_indefinite_check(struct mtx *, struct lock_delay_arg *); 186 187 static void 188 assert_mtx(const struct lock_object *lock, int what) 189 { 190 191 /* 192 * Treat LA_LOCKED as if LA_XLOCKED was asserted. 193 * 194 * Some callers of lc_assert uses LA_LOCKED to indicate that either 195 * a shared lock or write lock was held, while other callers uses 196 * the more strict LA_XLOCKED (used as MA_OWNED). 197 * 198 * Mutex is the only lock class that can not be shared, as a result, 199 * we can reasonably consider the caller really intends to assert 200 * LA_XLOCKED when they are asserting LA_LOCKED on a mutex object. 201 */ 202 if (what & LA_LOCKED) { 203 what &= ~LA_LOCKED; 204 what |= LA_XLOCKED; 205 } 206 mtx_assert((const struct mtx *)lock, what); 207 } 208 209 static void 210 lock_mtx(struct lock_object *lock, uintptr_t how) 211 { 212 213 mtx_lock((struct mtx *)lock); 214 } 215 216 static void 217 lock_spin(struct lock_object *lock, uintptr_t how) 218 { 219 220 mtx_lock_spin((struct mtx *)lock); 221 } 222 223 static int 224 trylock_mtx(struct lock_object *lock, uintptr_t how) 225 { 226 227 return (mtx_trylock((struct mtx *)lock)); 228 } 229 230 static int 231 trylock_spin(struct lock_object *lock, uintptr_t how) 232 { 233 234 return (mtx_trylock_spin((struct mtx *)lock)); 235 } 236 237 static uintptr_t 238 unlock_mtx(struct lock_object *lock) 239 { 240 struct mtx *m; 241 242 m = (struct mtx *)lock; 243 mtx_assert(m, MA_OWNED | MA_NOTRECURSED); 244 mtx_unlock(m); 245 return (0); 246 } 247 248 static uintptr_t 249 unlock_spin(struct lock_object *lock) 250 { 251 struct mtx *m; 252 253 m = (struct mtx *)lock; 254 mtx_assert(m, MA_OWNED | MA_NOTRECURSED); 255 mtx_unlock_spin(m); 256 return (0); 257 } 258 259 #ifdef KDTRACE_HOOKS 260 static int 261 owner_mtx(const struct lock_object *lock, struct thread **owner) 262 { 263 const struct mtx *m; 264 uintptr_t x; 265 266 m = (const struct mtx *)lock; 267 x = m->mtx_lock; 268 *owner = (struct thread *)(x & ~MTX_FLAGMASK); 269 return (*owner != NULL); 270 } 271 #endif 272 273 /* 274 * Function versions of the inlined __mtx_* macros. These are used by 275 * modules and can also be called from assembly language if needed. 276 */ 277 void 278 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line) 279 { 280 struct mtx *m; 281 uintptr_t tid, v; 282 283 m = mtxlock2mtx(c); 284 285 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 286 !TD_IS_IDLETHREAD(curthread), 287 ("mtx_lock() by idle thread %p on mutex %p @ %s:%d", 288 curthread, m, file, line)); 289 KASSERT(m->mtx_lock != MTX_DESTROYED, 290 ("mtx_lock() of destroyed mutex %p @ %s:%d", m, file, line)); 291 KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_spin, 292 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 293 file, line)); 294 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) | 295 LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 296 297 tid = (uintptr_t)curthread; 298 v = MTX_UNOWNED; 299 if (!_mtx_obtain_lock_fetch(m, &v, tid)) 300 _mtx_lock_sleep(m, v, opts, file, line); 301 else 302 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, 303 m, 0, 0, file, line); 304 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 305 line); 306 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE, 307 file, line); 308 TD_LOCKS_INC(curthread); 309 } 310 311 void 312 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line) 313 { 314 struct mtx *m; 315 316 m = mtxlock2mtx(c); 317 318 KASSERT(m->mtx_lock != MTX_DESTROYED, 319 ("mtx_unlock() of destroyed mutex %p @ %s:%d", m, file, line)); 320 KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_spin, 321 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 322 file, line)); 323 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 324 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 325 line); 326 mtx_assert(m, MA_OWNED); 327 328 #ifdef LOCK_PROFILING 329 __mtx_unlock_sleep(c, (uintptr_t)curthread, opts, file, line); 330 #else 331 __mtx_unlock(m, curthread, opts, file, line); 332 #endif 333 TD_LOCKS_DEC(curthread); 334 } 335 336 void 337 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 338 int line) 339 { 340 struct mtx *m; 341 #ifdef SMP 342 uintptr_t tid, v; 343 #endif 344 345 m = mtxlock2mtx(c); 346 347 KASSERT(m->mtx_lock != MTX_DESTROYED, 348 ("mtx_lock_spin() of destroyed mutex %p @ %s:%d", m, file, line)); 349 KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_sleep, 350 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 351 m->lock_object.lo_name, file, line)); 352 if (mtx_owned(m)) 353 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 354 (opts & MTX_RECURSE) != 0, 355 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n", 356 m->lock_object.lo_name, file, line)); 357 opts &= ~MTX_RECURSE; 358 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 359 file, line, NULL); 360 #ifdef SMP 361 spinlock_enter(); 362 tid = (uintptr_t)curthread; 363 v = MTX_UNOWNED; 364 if (!_mtx_obtain_lock_fetch(m, &v, tid)) 365 _mtx_lock_spin(m, v, opts, file, line); 366 else 367 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, 368 m, 0, 0, file, line); 369 #else 370 __mtx_lock_spin(m, curthread, opts, file, line); 371 #endif 372 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 373 line); 374 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 375 } 376 377 int 378 __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 379 int line) 380 { 381 struct mtx *m; 382 383 if (SCHEDULER_STOPPED()) 384 return (1); 385 386 m = mtxlock2mtx(c); 387 388 KASSERT(m->mtx_lock != MTX_DESTROYED, 389 ("mtx_trylock_spin() of destroyed mutex %p @ %s:%d", m, file, 390 line)); 391 KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_sleep, 392 ("mtx_trylock_spin() of sleep mutex %s @ %s:%d", 393 m->lock_object.lo_name, file, line)); 394 KASSERT((opts & MTX_RECURSE) == 0, 395 ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n", 396 m->lock_object.lo_name, file, line)); 397 if (__mtx_trylock_spin(m, curthread, opts, file, line)) { 398 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line); 399 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 400 return (1); 401 } 402 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line); 403 return (0); 404 } 405 406 void 407 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 408 int line) 409 { 410 struct mtx *m; 411 412 m = mtxlock2mtx(c); 413 414 KASSERT(m->mtx_lock != MTX_DESTROYED, 415 ("mtx_unlock_spin() of destroyed mutex %p @ %s:%d", m, file, 416 line)); 417 KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_sleep, 418 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 419 m->lock_object.lo_name, file, line)); 420 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 421 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 422 line); 423 mtx_assert(m, MA_OWNED); 424 425 __mtx_unlock_spin(m); 426 } 427 428 /* 429 * The important part of mtx_trylock{,_flags}() 430 * Tries to acquire lock `m.' If this function is called on a mutex that 431 * is already owned, it will recursively acquire the lock. 432 */ 433 int 434 _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF) 435 { 436 struct thread *td; 437 uintptr_t tid, v; 438 #ifdef LOCK_PROFILING 439 uint64_t waittime = 0; 440 int contested = 0; 441 #endif 442 int rval; 443 bool recursed; 444 445 td = curthread; 446 tid = (uintptr_t)td; 447 if (SCHEDULER_STOPPED()) 448 return (1); 449 450 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td), 451 ("mtx_trylock() by idle thread %p on mutex %p @ %s:%d", 452 curthread, m, file, line)); 453 KASSERT(m->mtx_lock != MTX_DESTROYED, 454 ("mtx_trylock() of destroyed mutex %p @ %s:%d", m, file, line)); 455 KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_spin, 456 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 457 file, line)); 458 459 rval = 1; 460 recursed = false; 461 v = MTX_UNOWNED; 462 for (;;) { 463 if (_mtx_obtain_lock_fetch(m, &v, tid)) 464 break; 465 if (v == MTX_UNOWNED) 466 continue; 467 if (v == tid && 468 ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 469 (opts & MTX_RECURSE) != 0)) { 470 m->mtx_recurse++; 471 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 472 recursed = true; 473 break; 474 } 475 rval = 0; 476 break; 477 } 478 479 opts &= ~MTX_RECURSE; 480 481 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); 482 if (rval) { 483 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 484 file, line); 485 TD_LOCKS_INC(curthread); 486 if (!recursed) 487 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, 488 m, contested, waittime, file, line); 489 } 490 491 return (rval); 492 } 493 494 int 495 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) 496 { 497 struct mtx *m; 498 499 m = mtxlock2mtx(c); 500 return (_mtx_trylock_flags_int(m, opts LOCK_FILE_LINE_ARG)); 501 } 502 503 /* 504 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 505 * 506 * We get here if lock profiling is enabled, the lock is already held by 507 * someone else or we are recursing on it. 508 */ 509 #if LOCK_DEBUG > 0 510 void 511 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file, 512 int line) 513 #else 514 void 515 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v) 516 #endif 517 { 518 struct thread *td; 519 struct mtx *m; 520 struct turnstile *ts; 521 uintptr_t tid; 522 struct thread *owner; 523 #ifdef LOCK_PROFILING 524 int contested = 0; 525 uint64_t waittime = 0; 526 #endif 527 #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS) 528 struct lock_delay_arg lda; 529 #endif 530 #ifdef KDTRACE_HOOKS 531 u_int sleep_cnt = 0; 532 int64_t sleep_time = 0; 533 int64_t all_time = 0; 534 #endif 535 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 536 int doing_lockprof = 0; 537 #endif 538 539 td = curthread; 540 tid = (uintptr_t)td; 541 m = mtxlock2mtx(c); 542 543 #ifdef KDTRACE_HOOKS 544 if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) { 545 while (v == MTX_UNOWNED) { 546 if (_mtx_obtain_lock_fetch(m, &v, tid)) 547 goto out_lockstat; 548 } 549 doing_lockprof = 1; 550 all_time -= lockstat_nsecs(&m->lock_object); 551 } 552 #endif 553 #ifdef LOCK_PROFILING 554 doing_lockprof = 1; 555 #endif 556 557 if (SCHEDULER_STOPPED()) 558 return; 559 560 if (__predict_false(v == MTX_UNOWNED)) 561 v = MTX_READ_VALUE(m); 562 563 if (__predict_false(lv_mtx_owner(v) == td)) { 564 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 565 (opts & MTX_RECURSE) != 0, 566 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 567 m->lock_object.lo_name, file, line)); 568 #if LOCK_DEBUG > 0 569 opts &= ~MTX_RECURSE; 570 #endif 571 m->mtx_recurse++; 572 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 573 if (LOCK_LOG_TEST(&m->lock_object, opts)) 574 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 575 return; 576 } 577 #if LOCK_DEBUG > 0 578 opts &= ~MTX_RECURSE; 579 #endif 580 581 #if defined(ADAPTIVE_MUTEXES) 582 lock_delay_arg_init(&lda, &mtx_delay); 583 #elif defined(KDTRACE_HOOKS) 584 lock_delay_arg_init_noadapt(&lda); 585 #endif 586 587 #ifdef HWPMC_HOOKS 588 PMC_SOFT_CALL( , , lock, failed); 589 #endif 590 lock_profile_obtain_lock_failed(&m->lock_object, false, 591 &contested, &waittime); 592 if (LOCK_LOG_TEST(&m->lock_object, opts)) 593 CTR4(KTR_LOCK, 594 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 595 m->lock_object.lo_name, (void *)m->mtx_lock, file, line); 596 597 THREAD_CONTENDS_ON_LOCK(&m->lock_object); 598 599 for (;;) { 600 if (v == MTX_UNOWNED) { 601 if (_mtx_obtain_lock_fetch(m, &v, tid)) 602 break; 603 continue; 604 } 605 #ifdef KDTRACE_HOOKS 606 lda.spin_cnt++; 607 #endif 608 #ifdef ADAPTIVE_MUTEXES 609 /* 610 * If the owner is running on another CPU, spin until the 611 * owner stops running or the state of the lock changes. 612 */ 613 owner = lv_mtx_owner(v); 614 if (TD_IS_RUNNING(owner)) { 615 if (LOCK_LOG_TEST(&m->lock_object, 0)) 616 CTR3(KTR_LOCK, 617 "%s: spinning on %p held by %p", 618 __func__, m, owner); 619 KTR_STATE1(KTR_SCHED, "thread", 620 sched_tdname((struct thread *)tid), 621 "spinning", "lockname:\"%s\"", 622 m->lock_object.lo_name); 623 do { 624 lock_delay(&lda); 625 v = MTX_READ_VALUE(m); 626 owner = lv_mtx_owner(v); 627 } while (v != MTX_UNOWNED && TD_IS_RUNNING(owner)); 628 KTR_STATE0(KTR_SCHED, "thread", 629 sched_tdname((struct thread *)tid), 630 "running"); 631 continue; 632 } 633 #endif 634 635 ts = turnstile_trywait(&m->lock_object); 636 v = MTX_READ_VALUE(m); 637 retry_turnstile: 638 639 /* 640 * Check if the lock has been released while spinning for 641 * the turnstile chain lock. 642 */ 643 if (v == MTX_UNOWNED) { 644 turnstile_cancel(ts); 645 continue; 646 } 647 648 #ifdef ADAPTIVE_MUTEXES 649 /* 650 * The current lock owner might have started executing 651 * on another CPU (or the lock could have changed 652 * owners) while we were waiting on the turnstile 653 * chain lock. If so, drop the turnstile lock and try 654 * again. 655 */ 656 owner = lv_mtx_owner(v); 657 if (TD_IS_RUNNING(owner)) { 658 turnstile_cancel(ts); 659 continue; 660 } 661 #endif 662 663 if ((v & MTX_WAITERS) == 0 && 664 !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_WAITERS)) { 665 goto retry_turnstile; 666 } 667 668 /* 669 * We definitely must sleep for this lock. 670 */ 671 mtx_assert(m, MA_NOTOWNED); 672 673 /* 674 * Block on the turnstile. 675 */ 676 #ifdef KDTRACE_HOOKS 677 sleep_time -= lockstat_nsecs(&m->lock_object); 678 #endif 679 #ifndef ADAPTIVE_MUTEXES 680 owner = mtx_owner(m); 681 #endif 682 MPASS(owner == mtx_owner(m)); 683 turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE); 684 #ifdef KDTRACE_HOOKS 685 sleep_time += lockstat_nsecs(&m->lock_object); 686 sleep_cnt++; 687 #endif 688 v = MTX_READ_VALUE(m); 689 } 690 THREAD_CONTENTION_DONE(&m->lock_object); 691 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 692 if (__predict_true(!doing_lockprof)) 693 return; 694 #endif 695 #ifdef KDTRACE_HOOKS 696 all_time += lockstat_nsecs(&m->lock_object); 697 if (sleep_time) 698 LOCKSTAT_RECORD1(adaptive__block, m, sleep_time); 699 700 /* 701 * Only record the loops spinning and not sleeping. 702 */ 703 if (lda.spin_cnt > sleep_cnt) 704 LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time); 705 out_lockstat: 706 #endif 707 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested, 708 waittime, file, line); 709 } 710 711 #ifdef SMP 712 /* 713 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock. 714 * 715 * This is only called if we need to actually spin for the lock. Recursion 716 * is handled inline. 717 */ 718 #if LOCK_DEBUG > 0 719 void 720 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts, 721 const char *file, int line) 722 #else 723 void 724 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v) 725 #endif 726 { 727 struct mtx *m; 728 struct lock_delay_arg lda; 729 uintptr_t tid; 730 #ifdef LOCK_PROFILING 731 int contested = 0; 732 uint64_t waittime = 0; 733 #endif 734 #ifdef KDTRACE_HOOKS 735 int64_t spin_time = 0; 736 #endif 737 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 738 int doing_lockprof = 0; 739 #endif 740 741 tid = (uintptr_t)curthread; 742 m = mtxlock2mtx(c); 743 744 #ifdef KDTRACE_HOOKS 745 if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) { 746 while (v == MTX_UNOWNED) { 747 if (_mtx_obtain_lock_fetch(m, &v, tid)) 748 goto out_lockstat; 749 } 750 doing_lockprof = 1; 751 spin_time -= lockstat_nsecs(&m->lock_object); 752 } 753 #endif 754 #ifdef LOCK_PROFILING 755 doing_lockprof = 1; 756 #endif 757 758 if (__predict_false(v == MTX_UNOWNED)) 759 v = MTX_READ_VALUE(m); 760 761 if (__predict_false(v == tid)) { 762 m->mtx_recurse++; 763 return; 764 } 765 766 if (SCHEDULER_STOPPED()) 767 return; 768 769 if (LOCK_LOG_TEST(&m->lock_object, opts)) 770 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 771 KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), 772 "spinning", "lockname:\"%s\"", m->lock_object.lo_name); 773 774 lock_delay_arg_init(&lda, &mtx_spin_delay); 775 776 #ifdef HWPMC_HOOKS 777 PMC_SOFT_CALL( , , lock, failed); 778 #endif 779 lock_profile_obtain_lock_failed(&m->lock_object, true, &contested, &waittime); 780 781 for (;;) { 782 if (v == MTX_UNOWNED) { 783 if (_mtx_obtain_lock_fetch(m, &v, tid)) 784 break; 785 continue; 786 } 787 /* Give interrupts a chance while we spin. */ 788 spinlock_exit(); 789 do { 790 if (__predict_true(lda.spin_cnt < 10000000)) { 791 lock_delay(&lda); 792 } else { 793 _mtx_lock_indefinite_check(m, &lda); 794 } 795 v = MTX_READ_VALUE(m); 796 } while (v != MTX_UNOWNED); 797 spinlock_enter(); 798 } 799 800 if (LOCK_LOG_TEST(&m->lock_object, opts)) 801 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 802 KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), 803 "running"); 804 805 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 806 if (__predict_true(!doing_lockprof)) 807 return; 808 #endif 809 #ifdef KDTRACE_HOOKS 810 spin_time += lockstat_nsecs(&m->lock_object); 811 if (lda.spin_cnt != 0) 812 LOCKSTAT_RECORD1(spin__spin, m, spin_time); 813 out_lockstat: 814 #endif 815 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m, 816 contested, waittime, file, line); 817 } 818 #endif /* SMP */ 819 820 #ifdef INVARIANTS 821 static void 822 thread_lock_validate(struct mtx *m, int opts, const char *file, int line) 823 { 824 825 KASSERT(m->mtx_lock != MTX_DESTROYED, 826 ("thread_lock() of destroyed mutex %p @ %s:%d", m, file, line)); 827 KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_sleep, 828 ("thread_lock() of sleep mutex %s @ %s:%d", 829 m->lock_object.lo_name, file, line)); 830 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) == 0, 831 ("thread_lock: got a recursive mutex %s @ %s:%d\n", 832 m->lock_object.lo_name, file, line)); 833 WITNESS_CHECKORDER(&m->lock_object, 834 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 835 } 836 #else 837 #define thread_lock_validate(m, opts, file, line) do { } while (0) 838 #endif 839 840 #ifndef LOCK_PROFILING 841 #if LOCK_DEBUG > 0 842 void 843 _thread_lock(struct thread *td, int opts, const char *file, int line) 844 #else 845 void 846 _thread_lock(struct thread *td) 847 #endif 848 { 849 struct mtx *m; 850 uintptr_t tid; 851 852 tid = (uintptr_t)curthread; 853 854 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire))) 855 goto slowpath_noirq; 856 spinlock_enter(); 857 m = td->td_lock; 858 thread_lock_validate(m, 0, file, line); 859 if (__predict_false(m == &blocked_lock)) 860 goto slowpath_unlocked; 861 if (__predict_false(!_mtx_obtain_lock(m, tid))) 862 goto slowpath_unlocked; 863 if (__predict_true(m == td->td_lock)) { 864 WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line); 865 return; 866 } 867 atomic_store_rel_ptr(&m->mtx_lock, MTX_UNOWNED); 868 slowpath_unlocked: 869 spinlock_exit(); 870 slowpath_noirq: 871 #if LOCK_DEBUG > 0 872 thread_lock_flags_(td, opts, file, line); 873 #else 874 thread_lock_flags_(td, 0, 0, 0); 875 #endif 876 } 877 #endif 878 879 void 880 thread_lock_flags_(struct thread *td, int opts, const char *file, int line) 881 { 882 struct mtx *m; 883 uintptr_t tid, v; 884 struct lock_delay_arg lda; 885 #ifdef LOCK_PROFILING 886 int contested = 0; 887 uint64_t waittime = 0; 888 #endif 889 #ifdef KDTRACE_HOOKS 890 int64_t spin_time = 0; 891 #endif 892 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 893 int doing_lockprof = 1; 894 #endif 895 896 tid = (uintptr_t)curthread; 897 898 if (SCHEDULER_STOPPED()) { 899 /* 900 * Ensure that spinlock sections are balanced even when the 901 * scheduler is stopped, since we may otherwise inadvertently 902 * re-enable interrupts while dumping core. 903 */ 904 spinlock_enter(); 905 return; 906 } 907 908 lock_delay_arg_init(&lda, &mtx_spin_delay); 909 910 #ifdef HWPMC_HOOKS 911 PMC_SOFT_CALL( , , lock, failed); 912 #endif 913 914 #ifdef LOCK_PROFILING 915 doing_lockprof = 1; 916 #elif defined(KDTRACE_HOOKS) 917 doing_lockprof = lockstat_enabled; 918 #endif 919 #ifdef KDTRACE_HOOKS 920 if (__predict_false(doing_lockprof)) 921 spin_time -= lockstat_nsecs(&td->td_lock->lock_object); 922 #endif 923 spinlock_enter(); 924 925 for (;;) { 926 retry: 927 m = td->td_lock; 928 thread_lock_validate(m, opts, file, line); 929 v = MTX_READ_VALUE(m); 930 for (;;) { 931 if (v == MTX_UNOWNED) { 932 if (_mtx_obtain_lock_fetch(m, &v, tid)) 933 break; 934 continue; 935 } 936 MPASS(v != tid); 937 lock_profile_obtain_lock_failed(&m->lock_object, true, 938 &contested, &waittime); 939 /* Give interrupts a chance while we spin. */ 940 spinlock_exit(); 941 do { 942 if (__predict_true(lda.spin_cnt < 10000000)) { 943 lock_delay(&lda); 944 } else { 945 _mtx_lock_indefinite_check(m, &lda); 946 } 947 if (m != td->td_lock) { 948 spinlock_enter(); 949 goto retry; 950 } 951 v = MTX_READ_VALUE(m); 952 } while (v != MTX_UNOWNED); 953 spinlock_enter(); 954 } 955 if (m == td->td_lock) 956 break; 957 atomic_store_rel_ptr(&m->mtx_lock, MTX_UNOWNED); 958 } 959 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 960 line); 961 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 962 963 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 964 if (__predict_true(!doing_lockprof)) 965 return; 966 #endif 967 #ifdef KDTRACE_HOOKS 968 spin_time += lockstat_nsecs(&m->lock_object); 969 #endif 970 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m, contested, 971 waittime, file, line); 972 #ifdef KDTRACE_HOOKS 973 if (lda.spin_cnt != 0) 974 LOCKSTAT_RECORD1(thread__spin, m, spin_time); 975 #endif 976 } 977 978 struct mtx * 979 thread_lock_block(struct thread *td) 980 { 981 struct mtx *lock; 982 983 lock = td->td_lock; 984 mtx_assert(lock, MA_OWNED); 985 td->td_lock = &blocked_lock; 986 987 return (lock); 988 } 989 990 void 991 thread_lock_unblock(struct thread *td, struct mtx *new) 992 { 993 994 mtx_assert(new, MA_OWNED); 995 KASSERT(td->td_lock == &blocked_lock, 996 ("thread %p lock %p not blocked_lock %p", 997 td, td->td_lock, &blocked_lock)); 998 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new); 999 } 1000 1001 void 1002 thread_lock_block_wait(struct thread *td) 1003 { 1004 1005 while (td->td_lock == &blocked_lock) 1006 cpu_spinwait(); 1007 1008 /* Acquire fence to be certain that all thread state is visible. */ 1009 atomic_thread_fence_acq(); 1010 } 1011 1012 void 1013 thread_lock_set(struct thread *td, struct mtx *new) 1014 { 1015 struct mtx *lock; 1016 1017 mtx_assert(new, MA_OWNED); 1018 lock = td->td_lock; 1019 mtx_assert(lock, MA_OWNED); 1020 td->td_lock = new; 1021 mtx_unlock_spin(lock); 1022 } 1023 1024 /* 1025 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 1026 * 1027 * We get here if lock profiling is enabled, the lock is already held by 1028 * someone else or we are recursing on it. 1029 */ 1030 #if LOCK_DEBUG > 0 1031 void 1032 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, 1033 const char *file, int line) 1034 #else 1035 void 1036 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v) 1037 #endif 1038 { 1039 struct mtx *m; 1040 struct turnstile *ts; 1041 uintptr_t tid; 1042 1043 if (SCHEDULER_STOPPED()) 1044 return; 1045 1046 tid = (uintptr_t)curthread; 1047 m = mtxlock2mtx(c); 1048 1049 if (__predict_false(v == tid)) 1050 v = MTX_READ_VALUE(m); 1051 1052 if (__predict_false(v & MTX_RECURSED)) { 1053 if (--(m->mtx_recurse) == 0) 1054 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 1055 if (LOCK_LOG_TEST(&m->lock_object, opts)) 1056 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 1057 return; 1058 } 1059 1060 LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m); 1061 if (v == tid && _mtx_release_lock(m, tid)) 1062 return; 1063 1064 /* 1065 * We have to lock the chain before the turnstile so this turnstile 1066 * can be removed from the hash list if it is empty. 1067 */ 1068 turnstile_chain_lock(&m->lock_object); 1069 atomic_store_rel_ptr(&m->mtx_lock, MTX_UNOWNED); 1070 ts = turnstile_lookup(&m->lock_object); 1071 MPASS(ts != NULL); 1072 if (LOCK_LOG_TEST(&m->lock_object, opts)) 1073 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 1074 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 1075 1076 /* 1077 * This turnstile is now no longer associated with the mutex. We can 1078 * unlock the chain lock so a new turnstile may take it's place. 1079 */ 1080 turnstile_unpend(ts); 1081 turnstile_chain_unlock(&m->lock_object); 1082 } 1083 1084 /* 1085 * All the unlocking of MTX_SPIN locks is done inline. 1086 * See the __mtx_unlock_spin() macro for the details. 1087 */ 1088 1089 /* 1090 * The backing function for the INVARIANTS-enabled mtx_assert() 1091 */ 1092 #ifdef INVARIANT_SUPPORT 1093 void 1094 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line) 1095 { 1096 const struct mtx *m; 1097 1098 if (KERNEL_PANICKED() || dumping || SCHEDULER_STOPPED()) 1099 return; 1100 1101 m = mtxlock2mtx(c); 1102 1103 switch (what) { 1104 case MA_OWNED: 1105 case MA_OWNED | MA_RECURSED: 1106 case MA_OWNED | MA_NOTRECURSED: 1107 if (!mtx_owned(m)) 1108 panic("mutex %s not owned at %s:%d", 1109 m->lock_object.lo_name, file, line); 1110 if (mtx_recursed(m)) { 1111 if ((what & MA_NOTRECURSED) != 0) 1112 panic("mutex %s recursed at %s:%d", 1113 m->lock_object.lo_name, file, line); 1114 } else if ((what & MA_RECURSED) != 0) { 1115 panic("mutex %s unrecursed at %s:%d", 1116 m->lock_object.lo_name, file, line); 1117 } 1118 break; 1119 case MA_NOTOWNED: 1120 if (mtx_owned(m)) 1121 panic("mutex %s owned at %s:%d", 1122 m->lock_object.lo_name, file, line); 1123 break; 1124 default: 1125 panic("unknown mtx_assert at %s:%d", file, line); 1126 } 1127 } 1128 #endif 1129 1130 /* 1131 * General init routine used by the MTX_SYSINIT() macro. 1132 */ 1133 void 1134 mtx_sysinit(const void *arg) 1135 { 1136 const struct mtx_args *margs = arg; 1137 1138 mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL, 1139 margs->ma_opts); 1140 } 1141 1142 /* 1143 * Mutex initialization routine; initialize lock `m' of type contained in 1144 * `opts' with options contained in `opts' and name `name.' The optional 1145 * lock type `type' is used as a general lock category name for use with 1146 * witness. 1147 */ 1148 void 1149 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts) 1150 { 1151 struct mtx *m; 1152 struct lock_class *class; 1153 int flags; 1154 1155 m = mtxlock2mtx(c); 1156 1157 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 1158 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0); 1159 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, 1160 ("%s: mtx_lock not aligned for %s: %p", __func__, name, 1161 &m->mtx_lock)); 1162 1163 /* Determine lock class and lock flags. */ 1164 if (opts & MTX_SPIN) 1165 class = &lock_class_mtx_spin; 1166 else 1167 class = &lock_class_mtx_sleep; 1168 flags = 0; 1169 if (opts & MTX_QUIET) 1170 flags |= LO_QUIET; 1171 if (opts & MTX_RECURSE) 1172 flags |= LO_RECURSABLE; 1173 if ((opts & MTX_NOWITNESS) == 0) 1174 flags |= LO_WITNESS; 1175 if (opts & MTX_DUPOK) 1176 flags |= LO_DUPOK; 1177 if (opts & MTX_NOPROFILE) 1178 flags |= LO_NOPROFILE; 1179 if (opts & MTX_NEW) 1180 flags |= LO_NEW; 1181 1182 /* Initialize mutex. */ 1183 lock_init(&m->lock_object, class, name, type, flags); 1184 1185 m->mtx_lock = MTX_UNOWNED; 1186 m->mtx_recurse = 0; 1187 } 1188 1189 /* 1190 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 1191 * passed in as a flag here because if the corresponding mtx_init() was 1192 * called with MTX_QUIET set, then it will already be set in the mutex's 1193 * flags. 1194 */ 1195 void 1196 _mtx_destroy(volatile uintptr_t *c) 1197 { 1198 struct mtx *m; 1199 1200 m = mtxlock2mtx(c); 1201 1202 if (!mtx_owned(m)) 1203 MPASS(mtx_unowned(m)); 1204 else { 1205 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_WAITERS)) == 0); 1206 1207 /* Perform the non-mtx related part of mtx_unlock_spin(). */ 1208 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) { 1209 lock_profile_release_lock(&m->lock_object, true); 1210 spinlock_exit(); 1211 } else { 1212 TD_LOCKS_DEC(curthread); 1213 lock_profile_release_lock(&m->lock_object, false); 1214 } 1215 1216 /* Tell witness this isn't locked to make it happy. */ 1217 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, 1218 __LINE__); 1219 } 1220 1221 m->mtx_lock = MTX_DESTROYED; 1222 lock_destroy(&m->lock_object); 1223 } 1224 1225 /* 1226 * Intialize the mutex code and system mutexes. This is called from the MD 1227 * startup code prior to mi_startup(). The per-CPU data space needs to be 1228 * setup before this is called. 1229 */ 1230 void 1231 mutex_init(void) 1232 { 1233 1234 /* Setup turnstiles so that sleep mutexes work. */ 1235 init_turnstiles(); 1236 1237 /* 1238 * Initialize mutexes. 1239 */ 1240 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 1241 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN); 1242 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */ 1243 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 1244 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN); 1245 mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN); 1246 mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN); 1247 mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN); 1248 mtx_init(&devmtx, "cdev", NULL, MTX_DEF); 1249 mtx_lock(&Giant); 1250 } 1251 1252 static void __noinline 1253 _mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap) 1254 { 1255 struct thread *td; 1256 1257 ldap->spin_cnt++; 1258 if (ldap->spin_cnt < 60000000 || kdb_active || KERNEL_PANICKED()) 1259 cpu_lock_delay(); 1260 else { 1261 td = mtx_owner(m); 1262 1263 /* If the mutex is unlocked, try again. */ 1264 if (td == NULL) 1265 return; 1266 1267 printf( "spin lock %p (%s) held by %p (tid %d) too long\n", 1268 m, m->lock_object.lo_name, td, td->td_tid); 1269 #ifdef WITNESS 1270 witness_display_spinlock(&m->lock_object, td, printf); 1271 #endif 1272 panic("spin lock held too long"); 1273 } 1274 cpu_spinwait(); 1275 } 1276 1277 void 1278 mtx_spin_wait_unlocked(struct mtx *m) 1279 { 1280 struct lock_delay_arg lda; 1281 1282 KASSERT(m->mtx_lock != MTX_DESTROYED, 1283 ("%s() of destroyed mutex %p", __func__, m)); 1284 KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_sleep, 1285 ("%s() of sleep mutex %p (%s)", __func__, m, 1286 m->lock_object.lo_name)); 1287 KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m, 1288 m->lock_object.lo_name)); 1289 1290 lda.spin_cnt = 0; 1291 1292 while (atomic_load_acq_ptr(&m->mtx_lock) != MTX_UNOWNED) { 1293 if (__predict_true(lda.spin_cnt < 10000000)) { 1294 cpu_spinwait(); 1295 lda.spin_cnt++; 1296 } else { 1297 _mtx_lock_indefinite_check(m, &lda); 1298 } 1299 } 1300 } 1301 1302 void 1303 mtx_wait_unlocked(struct mtx *m) 1304 { 1305 struct thread *owner; 1306 uintptr_t v; 1307 1308 KASSERT(m->mtx_lock != MTX_DESTROYED, 1309 ("%s() of destroyed mutex %p", __func__, m)); 1310 KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_spin, 1311 ("%s() of spin mutex %p (%s)", __func__, m, 1312 m->lock_object.lo_name)); 1313 KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m, 1314 m->lock_object.lo_name)); 1315 1316 for (;;) { 1317 v = atomic_load_acq_ptr(&m->mtx_lock); 1318 if (v == MTX_UNOWNED) { 1319 break; 1320 } 1321 owner = lv_mtx_owner(v); 1322 if (!TD_IS_RUNNING(owner)) { 1323 mtx_lock(m); 1324 mtx_unlock(m); 1325 break; 1326 } 1327 cpu_spinwait(); 1328 } 1329 } 1330 1331 #ifdef DDB 1332 static void 1333 db_show_mtx(const struct lock_object *lock) 1334 { 1335 struct thread *td; 1336 const struct mtx *m; 1337 1338 m = (const struct mtx *)lock; 1339 1340 db_printf(" flags: {"); 1341 if (LOCK_CLASS(lock) == &lock_class_mtx_spin) 1342 db_printf("SPIN"); 1343 else 1344 db_printf("DEF"); 1345 if (m->lock_object.lo_flags & LO_RECURSABLE) 1346 db_printf(", RECURSE"); 1347 if (m->lock_object.lo_flags & LO_DUPOK) 1348 db_printf(", DUPOK"); 1349 db_printf("}\n"); 1350 db_printf(" state: {"); 1351 if (mtx_unowned(m)) 1352 db_printf("UNOWNED"); 1353 else if (mtx_destroyed(m)) 1354 db_printf("DESTROYED"); 1355 else { 1356 db_printf("OWNED"); 1357 if (m->mtx_lock & MTX_WAITERS) 1358 db_printf(", WAITERS"); 1359 if (m->mtx_lock & MTX_RECURSED) 1360 db_printf(", RECURSED"); 1361 } 1362 db_printf("}\n"); 1363 if (!mtx_unowned(m) && !mtx_destroyed(m)) { 1364 td = mtx_owner(m); 1365 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, 1366 td->td_tid, td->td_proc->p_pid, td->td_name); 1367 if (mtx_recursed(m)) 1368 db_printf(" recursed: %d\n", m->mtx_recurse); 1369 } 1370 } 1371 #endif 1372