1 /*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32 /* 33 * Machine independent bits of mutex implementation. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_adaptive_mutexes.h" 40 #include "opt_ddb.h" 41 #include "opt_hwpmc_hooks.h" 42 #include "opt_sched.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/bus.h> 47 #include <sys/conf.h> 48 #include <sys/kdb.h> 49 #include <sys/kernel.h> 50 #include <sys/ktr.h> 51 #include <sys/lock.h> 52 #include <sys/malloc.h> 53 #include <sys/mutex.h> 54 #include <sys/proc.h> 55 #include <sys/resourcevar.h> 56 #include <sys/sched.h> 57 #include <sys/sbuf.h> 58 #include <sys/smp.h> 59 #include <sys/sysctl.h> 60 #include <sys/turnstile.h> 61 #include <sys/vmmeter.h> 62 #include <sys/lock_profile.h> 63 64 #include <machine/atomic.h> 65 #include <machine/bus.h> 66 #include <machine/cpu.h> 67 68 #include <ddb/ddb.h> 69 70 #include <fs/devfs/devfs_int.h> 71 72 #include <vm/vm.h> 73 #include <vm/vm_extern.h> 74 75 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 76 #define ADAPTIVE_MUTEXES 77 #endif 78 79 #ifdef HWPMC_HOOKS 80 #include <sys/pmckern.h> 81 PMC_SOFT_DEFINE( , , lock, failed); 82 #endif 83 84 /* 85 * Return the mutex address when the lock cookie address is provided. 86 * This functionality assumes that struct mtx* have a member named mtx_lock. 87 */ 88 #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock)) 89 90 /* 91 * Internal utility macros. 92 */ 93 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 94 95 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED) 96 97 static void assert_mtx(const struct lock_object *lock, int what); 98 #ifdef DDB 99 static void db_show_mtx(const struct lock_object *lock); 100 #endif 101 static void lock_mtx(struct lock_object *lock, uintptr_t how); 102 static void lock_spin(struct lock_object *lock, uintptr_t how); 103 #ifdef KDTRACE_HOOKS 104 static int owner_mtx(const struct lock_object *lock, 105 struct thread **owner); 106 #endif 107 static uintptr_t unlock_mtx(struct lock_object *lock); 108 static uintptr_t unlock_spin(struct lock_object *lock); 109 110 /* 111 * Lock classes for sleep and spin mutexes. 112 */ 113 struct lock_class lock_class_mtx_sleep = { 114 .lc_name = "sleep mutex", 115 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, 116 .lc_assert = assert_mtx, 117 #ifdef DDB 118 .lc_ddb_show = db_show_mtx, 119 #endif 120 .lc_lock = lock_mtx, 121 .lc_unlock = unlock_mtx, 122 #ifdef KDTRACE_HOOKS 123 .lc_owner = owner_mtx, 124 #endif 125 }; 126 struct lock_class lock_class_mtx_spin = { 127 .lc_name = "spin mutex", 128 .lc_flags = LC_SPINLOCK | LC_RECURSABLE, 129 .lc_assert = assert_mtx, 130 #ifdef DDB 131 .lc_ddb_show = db_show_mtx, 132 #endif 133 .lc_lock = lock_spin, 134 .lc_unlock = unlock_spin, 135 #ifdef KDTRACE_HOOKS 136 .lc_owner = owner_mtx, 137 #endif 138 }; 139 140 #ifdef ADAPTIVE_MUTEXES 141 static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging"); 142 143 static struct lock_delay_config __read_frequently mtx_delay; 144 145 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base, 146 0, ""); 147 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max, 148 0, ""); 149 150 LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay); 151 #endif 152 153 static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin, CTLFLAG_RD, NULL, 154 "mtx spin debugging"); 155 156 static struct lock_delay_config __read_frequently mtx_spin_delay; 157 158 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW, 159 &mtx_spin_delay.base, 0, ""); 160 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW, 161 &mtx_spin_delay.max, 0, ""); 162 163 LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay); 164 165 /* 166 * System-wide mutexes 167 */ 168 struct mtx blocked_lock; 169 struct mtx __exclusive_cache_line Giant; 170 171 void 172 assert_mtx(const struct lock_object *lock, int what) 173 { 174 175 mtx_assert((const struct mtx *)lock, what); 176 } 177 178 void 179 lock_mtx(struct lock_object *lock, uintptr_t how) 180 { 181 182 mtx_lock((struct mtx *)lock); 183 } 184 185 void 186 lock_spin(struct lock_object *lock, uintptr_t how) 187 { 188 189 panic("spin locks can only use msleep_spin"); 190 } 191 192 uintptr_t 193 unlock_mtx(struct lock_object *lock) 194 { 195 struct mtx *m; 196 197 m = (struct mtx *)lock; 198 mtx_assert(m, MA_OWNED | MA_NOTRECURSED); 199 mtx_unlock(m); 200 return (0); 201 } 202 203 uintptr_t 204 unlock_spin(struct lock_object *lock) 205 { 206 207 panic("spin locks can only use msleep_spin"); 208 } 209 210 #ifdef KDTRACE_HOOKS 211 int 212 owner_mtx(const struct lock_object *lock, struct thread **owner) 213 { 214 const struct mtx *m; 215 uintptr_t x; 216 217 m = (const struct mtx *)lock; 218 x = m->mtx_lock; 219 *owner = (struct thread *)(x & ~MTX_FLAGMASK); 220 return (*owner != NULL); 221 } 222 #endif 223 224 /* 225 * Function versions of the inlined __mtx_* macros. These are used by 226 * modules and can also be called from assembly language if needed. 227 */ 228 void 229 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line) 230 { 231 struct mtx *m; 232 uintptr_t tid, v; 233 234 m = mtxlock2mtx(c); 235 236 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 237 !TD_IS_IDLETHREAD(curthread), 238 ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d", 239 curthread, m->lock_object.lo_name, file, line)); 240 KASSERT(m->mtx_lock != MTX_DESTROYED, 241 ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); 242 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 243 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 244 file, line)); 245 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) | 246 LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 247 248 tid = (uintptr_t)curthread; 249 v = MTX_UNOWNED; 250 if (!_mtx_obtain_lock_fetch(m, &v, tid)) 251 _mtx_lock_sleep(m, v, opts, file, line); 252 else 253 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, 254 m, 0, 0, file, line); 255 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 256 line); 257 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE, 258 file, line); 259 TD_LOCKS_INC(curthread); 260 } 261 262 void 263 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line) 264 { 265 struct mtx *m; 266 267 m = mtxlock2mtx(c); 268 269 KASSERT(m->mtx_lock != MTX_DESTROYED, 270 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); 271 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 272 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 273 file, line)); 274 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 275 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 276 line); 277 mtx_assert(m, MA_OWNED); 278 279 #ifdef LOCK_PROFILING 280 __mtx_unlock_sleep(c, opts, file, line); 281 #else 282 __mtx_unlock(m, curthread, opts, file, line); 283 #endif 284 TD_LOCKS_DEC(curthread); 285 } 286 287 void 288 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 289 int line) 290 { 291 struct mtx *m; 292 #ifdef SMP 293 uintptr_t tid, v; 294 #endif 295 296 m = mtxlock2mtx(c); 297 298 KASSERT(m->mtx_lock != MTX_DESTROYED, 299 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); 300 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 301 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 302 m->lock_object.lo_name, file, line)); 303 if (mtx_owned(m)) 304 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 305 (opts & MTX_RECURSE) != 0, 306 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n", 307 m->lock_object.lo_name, file, line)); 308 opts &= ~MTX_RECURSE; 309 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 310 file, line, NULL); 311 #ifdef SMP 312 spinlock_enter(); 313 tid = (uintptr_t)curthread; 314 v = MTX_UNOWNED; 315 if (!_mtx_obtain_lock_fetch(m, &v, tid)) 316 _mtx_lock_spin(m, v, opts, file, line); 317 else 318 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, 319 m, 0, 0, file, line); 320 #else 321 __mtx_lock_spin(m, curthread, opts, file, line); 322 #endif 323 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 324 line); 325 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 326 } 327 328 int 329 __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 330 int line) 331 { 332 struct mtx *m; 333 334 if (SCHEDULER_STOPPED()) 335 return (1); 336 337 m = mtxlock2mtx(c); 338 339 KASSERT(m->mtx_lock != MTX_DESTROYED, 340 ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line)); 341 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 342 ("mtx_trylock_spin() of sleep mutex %s @ %s:%d", 343 m->lock_object.lo_name, file, line)); 344 KASSERT((opts & MTX_RECURSE) == 0, 345 ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n", 346 m->lock_object.lo_name, file, line)); 347 if (__mtx_trylock_spin(m, curthread, opts, file, line)) { 348 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line); 349 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 350 return (1); 351 } 352 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line); 353 return (0); 354 } 355 356 void 357 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 358 int line) 359 { 360 struct mtx *m; 361 362 m = mtxlock2mtx(c); 363 364 KASSERT(m->mtx_lock != MTX_DESTROYED, 365 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); 366 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 367 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 368 m->lock_object.lo_name, file, line)); 369 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 370 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 371 line); 372 mtx_assert(m, MA_OWNED); 373 374 __mtx_unlock_spin(m); 375 } 376 377 /* 378 * The important part of mtx_trylock{,_flags}() 379 * Tries to acquire lock `m.' If this function is called on a mutex that 380 * is already owned, it will recursively acquire the lock. 381 */ 382 int 383 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) 384 { 385 struct mtx *m; 386 struct thread *td; 387 uintptr_t tid, v; 388 #ifdef LOCK_PROFILING 389 uint64_t waittime = 0; 390 int contested = 0; 391 #endif 392 int rval; 393 bool recursed; 394 395 td = curthread; 396 tid = (uintptr_t)td; 397 if (SCHEDULER_STOPPED_TD(td)) 398 return (1); 399 400 m = mtxlock2mtx(c); 401 402 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td), 403 ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d", 404 curthread, m->lock_object.lo_name, file, line)); 405 KASSERT(m->mtx_lock != MTX_DESTROYED, 406 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); 407 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 408 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 409 file, line)); 410 411 rval = 1; 412 recursed = false; 413 v = MTX_UNOWNED; 414 for (;;) { 415 if (_mtx_obtain_lock_fetch(m, &v, tid)) 416 break; 417 if (v == MTX_UNOWNED) 418 continue; 419 if (v == tid && 420 ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 421 (opts & MTX_RECURSE) != 0)) { 422 m->mtx_recurse++; 423 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 424 recursed = true; 425 break; 426 } 427 rval = 0; 428 break; 429 } 430 431 opts &= ~MTX_RECURSE; 432 433 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); 434 if (rval) { 435 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 436 file, line); 437 TD_LOCKS_INC(curthread); 438 if (!recursed) 439 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, 440 m, contested, waittime, file, line); 441 } 442 443 return (rval); 444 } 445 446 /* 447 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 448 * 449 * We call this if the lock is either contested (i.e. we need to go to 450 * sleep waiting for it), or if we need to recurse on it. 451 */ 452 #if LOCK_DEBUG > 0 453 void 454 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file, 455 int line) 456 #else 457 void 458 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v) 459 #endif 460 { 461 struct thread *td; 462 struct mtx *m; 463 struct turnstile *ts; 464 uintptr_t tid; 465 #ifdef ADAPTIVE_MUTEXES 466 volatile struct thread *owner; 467 #endif 468 #ifdef KTR 469 int cont_logged = 0; 470 #endif 471 #ifdef LOCK_PROFILING 472 int contested = 0; 473 uint64_t waittime = 0; 474 #endif 475 #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS) 476 struct lock_delay_arg lda; 477 #endif 478 #ifdef KDTRACE_HOOKS 479 u_int sleep_cnt = 0; 480 int64_t sleep_time = 0; 481 int64_t all_time = 0; 482 #endif 483 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 484 int doing_lockprof; 485 #endif 486 td = curthread; 487 tid = (uintptr_t)td; 488 if (SCHEDULER_STOPPED_TD(td)) 489 return; 490 491 #if defined(ADAPTIVE_MUTEXES) 492 lock_delay_arg_init(&lda, &mtx_delay); 493 #elif defined(KDTRACE_HOOKS) 494 lock_delay_arg_init(&lda, NULL); 495 #endif 496 m = mtxlock2mtx(c); 497 if (__predict_false(v == MTX_UNOWNED)) 498 v = MTX_READ_VALUE(m); 499 500 if (__predict_false(lv_mtx_owner(v) == td)) { 501 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 502 (opts & MTX_RECURSE) != 0, 503 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 504 m->lock_object.lo_name, file, line)); 505 #if LOCK_DEBUG > 0 506 opts &= ~MTX_RECURSE; 507 #endif 508 m->mtx_recurse++; 509 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 510 if (LOCK_LOG_TEST(&m->lock_object, opts)) 511 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 512 return; 513 } 514 #if LOCK_DEBUG > 0 515 opts &= ~MTX_RECURSE; 516 #endif 517 518 #ifdef HWPMC_HOOKS 519 PMC_SOFT_CALL( , , lock, failed); 520 #endif 521 lock_profile_obtain_lock_failed(&m->lock_object, 522 &contested, &waittime); 523 if (LOCK_LOG_TEST(&m->lock_object, opts)) 524 CTR4(KTR_LOCK, 525 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 526 m->lock_object.lo_name, (void *)m->mtx_lock, file, line); 527 #ifdef LOCK_PROFILING 528 doing_lockprof = 1; 529 #elif defined(KDTRACE_HOOKS) 530 doing_lockprof = lockstat_enabled; 531 if (__predict_false(doing_lockprof)) 532 all_time -= lockstat_nsecs(&m->lock_object); 533 #endif 534 535 for (;;) { 536 if (v == MTX_UNOWNED) { 537 if (_mtx_obtain_lock_fetch(m, &v, tid)) 538 break; 539 continue; 540 } 541 #ifdef KDTRACE_HOOKS 542 lda.spin_cnt++; 543 #endif 544 #ifdef ADAPTIVE_MUTEXES 545 /* 546 * If the owner is running on another CPU, spin until the 547 * owner stops running or the state of the lock changes. 548 */ 549 owner = lv_mtx_owner(v); 550 if (TD_IS_RUNNING(owner)) { 551 if (LOCK_LOG_TEST(&m->lock_object, 0)) 552 CTR3(KTR_LOCK, 553 "%s: spinning on %p held by %p", 554 __func__, m, owner); 555 KTR_STATE1(KTR_SCHED, "thread", 556 sched_tdname((struct thread *)tid), 557 "spinning", "lockname:\"%s\"", 558 m->lock_object.lo_name); 559 do { 560 lock_delay(&lda); 561 v = MTX_READ_VALUE(m); 562 owner = lv_mtx_owner(v); 563 } while (v != MTX_UNOWNED && TD_IS_RUNNING(owner)); 564 KTR_STATE0(KTR_SCHED, "thread", 565 sched_tdname((struct thread *)tid), 566 "running"); 567 continue; 568 } 569 #endif 570 571 ts = turnstile_trywait(&m->lock_object); 572 v = MTX_READ_VALUE(m); 573 574 /* 575 * Check if the lock has been released while spinning for 576 * the turnstile chain lock. 577 */ 578 if (v == MTX_UNOWNED) { 579 turnstile_cancel(ts); 580 continue; 581 } 582 583 #ifdef ADAPTIVE_MUTEXES 584 /* 585 * The current lock owner might have started executing 586 * on another CPU (or the lock could have changed 587 * owners) while we were waiting on the turnstile 588 * chain lock. If so, drop the turnstile lock and try 589 * again. 590 */ 591 owner = lv_mtx_owner(v); 592 if (TD_IS_RUNNING(owner)) { 593 turnstile_cancel(ts); 594 continue; 595 } 596 #endif 597 598 /* 599 * If the mutex isn't already contested and a failure occurs 600 * setting the contested bit, the mutex was either released 601 * or the state of the MTX_RECURSED bit changed. 602 */ 603 if ((v & MTX_CONTESTED) == 0 && 604 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { 605 turnstile_cancel(ts); 606 v = MTX_READ_VALUE(m); 607 continue; 608 } 609 610 /* 611 * We definitely must sleep for this lock. 612 */ 613 mtx_assert(m, MA_NOTOWNED); 614 615 #ifdef KTR 616 if (!cont_logged) { 617 CTR6(KTR_CONTENTION, 618 "contention: %p at %s:%d wants %s, taken by %s:%d", 619 (void *)tid, file, line, m->lock_object.lo_name, 620 WITNESS_FILE(&m->lock_object), 621 WITNESS_LINE(&m->lock_object)); 622 cont_logged = 1; 623 } 624 #endif 625 626 /* 627 * Block on the turnstile. 628 */ 629 #ifdef KDTRACE_HOOKS 630 sleep_time -= lockstat_nsecs(&m->lock_object); 631 #endif 632 turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE); 633 #ifdef KDTRACE_HOOKS 634 sleep_time += lockstat_nsecs(&m->lock_object); 635 sleep_cnt++; 636 #endif 637 v = MTX_READ_VALUE(m); 638 } 639 #ifdef KTR 640 if (cont_logged) { 641 CTR4(KTR_CONTENTION, 642 "contention end: %s acquired by %p at %s:%d", 643 m->lock_object.lo_name, (void *)tid, file, line); 644 } 645 #endif 646 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 647 if (__predict_true(!doing_lockprof)) 648 return; 649 #endif 650 #ifdef KDTRACE_HOOKS 651 all_time += lockstat_nsecs(&m->lock_object); 652 #endif 653 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested, 654 waittime, file, line); 655 #ifdef KDTRACE_HOOKS 656 if (sleep_time) 657 LOCKSTAT_RECORD1(adaptive__block, m, sleep_time); 658 659 /* 660 * Only record the loops spinning and not sleeping. 661 */ 662 if (lda.spin_cnt > sleep_cnt) 663 LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time); 664 #endif 665 } 666 667 static void 668 _mtx_lock_spin_failed(struct mtx *m) 669 { 670 struct thread *td; 671 672 td = mtx_owner(m); 673 674 /* If the mutex is unlocked, try again. */ 675 if (td == NULL) 676 return; 677 678 printf( "spin lock %p (%s) held by %p (tid %d) too long\n", 679 m, m->lock_object.lo_name, td, td->td_tid); 680 #ifdef WITNESS 681 witness_display_spinlock(&m->lock_object, td, printf); 682 #endif 683 panic("spin lock held too long"); 684 } 685 686 #ifdef SMP 687 /* 688 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock. 689 * 690 * This is only called if we need to actually spin for the lock. Recursion 691 * is handled inline. 692 */ 693 #if LOCK_DEBUG > 0 694 void 695 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts, 696 const char *file, int line) 697 #else 698 void 699 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v) 700 #endif 701 { 702 struct mtx *m; 703 struct lock_delay_arg lda; 704 uintptr_t tid; 705 #ifdef LOCK_PROFILING 706 int contested = 0; 707 uint64_t waittime = 0; 708 #endif 709 #ifdef KDTRACE_HOOKS 710 int64_t spin_time = 0; 711 #endif 712 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 713 int doing_lockprof; 714 #endif 715 716 tid = (uintptr_t)curthread; 717 m = mtxlock2mtx(c); 718 719 if (__predict_false(v == MTX_UNOWNED)) 720 v = MTX_READ_VALUE(m); 721 722 if (__predict_false(v == tid)) { 723 m->mtx_recurse++; 724 return; 725 } 726 727 if (SCHEDULER_STOPPED()) 728 return; 729 730 lock_delay_arg_init(&lda, &mtx_spin_delay); 731 732 if (LOCK_LOG_TEST(&m->lock_object, opts)) 733 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 734 KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), 735 "spinning", "lockname:\"%s\"", m->lock_object.lo_name); 736 737 #ifdef HWPMC_HOOKS 738 PMC_SOFT_CALL( , , lock, failed); 739 #endif 740 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); 741 #ifdef LOCK_PROFILING 742 doing_lockprof = 1; 743 #elif defined(KDTRACE_HOOKS) 744 doing_lockprof = lockstat_enabled; 745 if (__predict_false(doing_lockprof)) 746 spin_time -= lockstat_nsecs(&m->lock_object); 747 #endif 748 for (;;) { 749 if (v == MTX_UNOWNED) { 750 if (_mtx_obtain_lock_fetch(m, &v, tid)) 751 break; 752 continue; 753 } 754 /* Give interrupts a chance while we spin. */ 755 spinlock_exit(); 756 do { 757 if (lda.spin_cnt < 10000000) { 758 lock_delay(&lda); 759 } else { 760 lda.spin_cnt++; 761 if (lda.spin_cnt < 60000000 || kdb_active || 762 panicstr != NULL) 763 DELAY(1); 764 else 765 _mtx_lock_spin_failed(m); 766 cpu_spinwait(); 767 } 768 v = MTX_READ_VALUE(m); 769 } while (v != MTX_UNOWNED); 770 spinlock_enter(); 771 } 772 773 if (LOCK_LOG_TEST(&m->lock_object, opts)) 774 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 775 KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), 776 "running"); 777 778 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 779 if (__predict_true(!doing_lockprof)) 780 return; 781 #endif 782 #ifdef KDTRACE_HOOKS 783 spin_time += lockstat_nsecs(&m->lock_object); 784 #endif 785 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, 786 contested, waittime, file, line); 787 #ifdef KDTRACE_HOOKS 788 if (lda.spin_cnt != 0) 789 LOCKSTAT_RECORD1(spin__spin, m, spin_time); 790 #endif 791 } 792 #endif /* SMP */ 793 794 #ifdef INVARIANTS 795 static void 796 thread_lock_validate(struct mtx *m, int opts, const char *file, int line) 797 { 798 799 KASSERT(m->mtx_lock != MTX_DESTROYED, 800 ("thread_lock() of destroyed mutex @ %s:%d", file, line)); 801 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 802 ("thread_lock() of sleep mutex %s @ %s:%d", 803 m->lock_object.lo_name, file, line)); 804 if (mtx_owned(m)) 805 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, 806 ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n", 807 m->lock_object.lo_name, file, line)); 808 WITNESS_CHECKORDER(&m->lock_object, 809 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 810 } 811 #else 812 #define thread_lock_validate(m, opts, file, line) do { } while (0) 813 #endif 814 815 #ifndef LOCK_PROFILING 816 #if LOCK_DEBUG > 0 817 void 818 _thread_lock(struct thread *td, int opts, const char *file, int line) 819 #else 820 void 821 _thread_lock(struct thread *td) 822 #endif 823 { 824 struct mtx *m; 825 uintptr_t tid, v; 826 827 tid = (uintptr_t)curthread; 828 829 spinlock_enter(); 830 m = td->td_lock; 831 thread_lock_validate(m, 0, file, line); 832 v = MTX_READ_VALUE(m); 833 if (__predict_true(v == MTX_UNOWNED)) { 834 if (__predict_false(!_mtx_obtain_lock(m, tid))) 835 goto slowpath_unlocked; 836 } else if (v == tid) { 837 m->mtx_recurse++; 838 } else 839 goto slowpath_unlocked; 840 if (__predict_true(m == td->td_lock)) { 841 WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line); 842 return; 843 } 844 if (m->mtx_recurse != 0) 845 m->mtx_recurse--; 846 else 847 _mtx_release_lock_quick(m); 848 slowpath_unlocked: 849 spinlock_exit(); 850 thread_lock_flags_(td, 0, 0, 0); 851 } 852 #endif 853 854 void 855 thread_lock_flags_(struct thread *td, int opts, const char *file, int line) 856 { 857 struct mtx *m; 858 uintptr_t tid, v; 859 struct lock_delay_arg lda; 860 #ifdef LOCK_PROFILING 861 int contested = 0; 862 uint64_t waittime = 0; 863 #endif 864 #ifdef KDTRACE_HOOKS 865 int64_t spin_time = 0; 866 #endif 867 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 868 int doing_lockprof = 1; 869 #endif 870 871 tid = (uintptr_t)curthread; 872 873 if (SCHEDULER_STOPPED()) { 874 /* 875 * Ensure that spinlock sections are balanced even when the 876 * scheduler is stopped, since we may otherwise inadvertently 877 * re-enable interrupts while dumping core. 878 */ 879 spinlock_enter(); 880 return; 881 } 882 883 lock_delay_arg_init(&lda, &mtx_spin_delay); 884 885 #ifdef LOCK_PROFILING 886 doing_lockprof = 1; 887 #elif defined(KDTRACE_HOOKS) 888 doing_lockprof = lockstat_enabled; 889 if (__predict_false(doing_lockprof)) 890 spin_time -= lockstat_nsecs(&td->td_lock->lock_object); 891 #endif 892 for (;;) { 893 retry: 894 v = MTX_UNOWNED; 895 spinlock_enter(); 896 m = td->td_lock; 897 thread_lock_validate(m, opts, file, line); 898 for (;;) { 899 if (_mtx_obtain_lock_fetch(m, &v, tid)) 900 break; 901 if (v == MTX_UNOWNED) 902 continue; 903 if (v == tid) { 904 m->mtx_recurse++; 905 break; 906 } 907 #ifdef HWPMC_HOOKS 908 PMC_SOFT_CALL( , , lock, failed); 909 #endif 910 lock_profile_obtain_lock_failed(&m->lock_object, 911 &contested, &waittime); 912 /* Give interrupts a chance while we spin. */ 913 spinlock_exit(); 914 do { 915 if (lda.spin_cnt < 10000000) { 916 lock_delay(&lda); 917 } else { 918 lda.spin_cnt++; 919 if (lda.spin_cnt < 60000000 || 920 kdb_active || panicstr != NULL) 921 DELAY(1); 922 else 923 _mtx_lock_spin_failed(m); 924 cpu_spinwait(); 925 } 926 if (m != td->td_lock) 927 goto retry; 928 v = MTX_READ_VALUE(m); 929 } while (v != MTX_UNOWNED); 930 spinlock_enter(); 931 } 932 if (m == td->td_lock) 933 break; 934 __mtx_unlock_spin(m); /* does spinlock_exit() */ 935 } 936 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 937 line); 938 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 939 940 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 941 if (__predict_true(!doing_lockprof)) 942 return; 943 #endif 944 #ifdef KDTRACE_HOOKS 945 spin_time += lockstat_nsecs(&m->lock_object); 946 #endif 947 if (m->mtx_recurse == 0) 948 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, 949 contested, waittime, file, line); 950 #ifdef KDTRACE_HOOKS 951 if (lda.spin_cnt != 0) 952 LOCKSTAT_RECORD1(thread__spin, m, spin_time); 953 #endif 954 } 955 956 struct mtx * 957 thread_lock_block(struct thread *td) 958 { 959 struct mtx *lock; 960 961 THREAD_LOCK_ASSERT(td, MA_OWNED); 962 lock = td->td_lock; 963 td->td_lock = &blocked_lock; 964 mtx_unlock_spin(lock); 965 966 return (lock); 967 } 968 969 void 970 thread_lock_unblock(struct thread *td, struct mtx *new) 971 { 972 mtx_assert(new, MA_OWNED); 973 MPASS(td->td_lock == &blocked_lock); 974 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new); 975 } 976 977 void 978 thread_lock_set(struct thread *td, struct mtx *new) 979 { 980 struct mtx *lock; 981 982 mtx_assert(new, MA_OWNED); 983 THREAD_LOCK_ASSERT(td, MA_OWNED); 984 lock = td->td_lock; 985 td->td_lock = new; 986 mtx_unlock_spin(lock); 987 } 988 989 /* 990 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 991 * 992 * We are only called here if the lock is recursed, contested (i.e. we 993 * need to wake up a blocked thread) or lockstat probe is active. 994 */ 995 #if LOCK_DEBUG > 0 996 void 997 __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line) 998 #else 999 void 1000 __mtx_unlock_sleep(volatile uintptr_t *c) 1001 #endif 1002 { 1003 struct mtx *m; 1004 struct turnstile *ts; 1005 uintptr_t tid, v; 1006 1007 if (SCHEDULER_STOPPED()) 1008 return; 1009 1010 tid = (uintptr_t)curthread; 1011 m = mtxlock2mtx(c); 1012 v = MTX_READ_VALUE(m); 1013 1014 if (v & MTX_RECURSED) { 1015 if (--(m->mtx_recurse) == 0) 1016 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 1017 if (LOCK_LOG_TEST(&m->lock_object, opts)) 1018 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 1019 return; 1020 } 1021 1022 LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m); 1023 if (v == tid && _mtx_release_lock(m, tid)) 1024 return; 1025 1026 /* 1027 * We have to lock the chain before the turnstile so this turnstile 1028 * can be removed from the hash list if it is empty. 1029 */ 1030 turnstile_chain_lock(&m->lock_object); 1031 ts = turnstile_lookup(&m->lock_object); 1032 if (LOCK_LOG_TEST(&m->lock_object, opts)) 1033 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 1034 MPASS(ts != NULL); 1035 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 1036 _mtx_release_lock_quick(m); 1037 1038 /* 1039 * This turnstile is now no longer associated with the mutex. We can 1040 * unlock the chain lock so a new turnstile may take it's place. 1041 */ 1042 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 1043 turnstile_chain_unlock(&m->lock_object); 1044 } 1045 1046 /* 1047 * All the unlocking of MTX_SPIN locks is done inline. 1048 * See the __mtx_unlock_spin() macro for the details. 1049 */ 1050 1051 /* 1052 * The backing function for the INVARIANTS-enabled mtx_assert() 1053 */ 1054 #ifdef INVARIANT_SUPPORT 1055 void 1056 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line) 1057 { 1058 const struct mtx *m; 1059 1060 if (panicstr != NULL || dumping || SCHEDULER_STOPPED()) 1061 return; 1062 1063 m = mtxlock2mtx(c); 1064 1065 switch (what) { 1066 case MA_OWNED: 1067 case MA_OWNED | MA_RECURSED: 1068 case MA_OWNED | MA_NOTRECURSED: 1069 if (!mtx_owned(m)) 1070 panic("mutex %s not owned at %s:%d", 1071 m->lock_object.lo_name, file, line); 1072 if (mtx_recursed(m)) { 1073 if ((what & MA_NOTRECURSED) != 0) 1074 panic("mutex %s recursed at %s:%d", 1075 m->lock_object.lo_name, file, line); 1076 } else if ((what & MA_RECURSED) != 0) { 1077 panic("mutex %s unrecursed at %s:%d", 1078 m->lock_object.lo_name, file, line); 1079 } 1080 break; 1081 case MA_NOTOWNED: 1082 if (mtx_owned(m)) 1083 panic("mutex %s owned at %s:%d", 1084 m->lock_object.lo_name, file, line); 1085 break; 1086 default: 1087 panic("unknown mtx_assert at %s:%d", file, line); 1088 } 1089 } 1090 #endif 1091 1092 /* 1093 * General init routine used by the MTX_SYSINIT() macro. 1094 */ 1095 void 1096 mtx_sysinit(void *arg) 1097 { 1098 struct mtx_args *margs = arg; 1099 1100 mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL, 1101 margs->ma_opts); 1102 } 1103 1104 /* 1105 * Mutex initialization routine; initialize lock `m' of type contained in 1106 * `opts' with options contained in `opts' and name `name.' The optional 1107 * lock type `type' is used as a general lock category name for use with 1108 * witness. 1109 */ 1110 void 1111 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts) 1112 { 1113 struct mtx *m; 1114 struct lock_class *class; 1115 int flags; 1116 1117 m = mtxlock2mtx(c); 1118 1119 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 1120 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0); 1121 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, 1122 ("%s: mtx_lock not aligned for %s: %p", __func__, name, 1123 &m->mtx_lock)); 1124 1125 /* Determine lock class and lock flags. */ 1126 if (opts & MTX_SPIN) 1127 class = &lock_class_mtx_spin; 1128 else 1129 class = &lock_class_mtx_sleep; 1130 flags = 0; 1131 if (opts & MTX_QUIET) 1132 flags |= LO_QUIET; 1133 if (opts & MTX_RECURSE) 1134 flags |= LO_RECURSABLE; 1135 if ((opts & MTX_NOWITNESS) == 0) 1136 flags |= LO_WITNESS; 1137 if (opts & MTX_DUPOK) 1138 flags |= LO_DUPOK; 1139 if (opts & MTX_NOPROFILE) 1140 flags |= LO_NOPROFILE; 1141 if (opts & MTX_NEW) 1142 flags |= LO_NEW; 1143 1144 /* Initialize mutex. */ 1145 lock_init(&m->lock_object, class, name, type, flags); 1146 1147 m->mtx_lock = MTX_UNOWNED; 1148 m->mtx_recurse = 0; 1149 } 1150 1151 /* 1152 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 1153 * passed in as a flag here because if the corresponding mtx_init() was 1154 * called with MTX_QUIET set, then it will already be set in the mutex's 1155 * flags. 1156 */ 1157 void 1158 _mtx_destroy(volatile uintptr_t *c) 1159 { 1160 struct mtx *m; 1161 1162 m = mtxlock2mtx(c); 1163 1164 if (!mtx_owned(m)) 1165 MPASS(mtx_unowned(m)); 1166 else { 1167 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 1168 1169 /* Perform the non-mtx related part of mtx_unlock_spin(). */ 1170 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) 1171 spinlock_exit(); 1172 else 1173 TD_LOCKS_DEC(curthread); 1174 1175 lock_profile_release_lock(&m->lock_object); 1176 /* Tell witness this isn't locked to make it happy. */ 1177 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, 1178 __LINE__); 1179 } 1180 1181 m->mtx_lock = MTX_DESTROYED; 1182 lock_destroy(&m->lock_object); 1183 } 1184 1185 /* 1186 * Intialize the mutex code and system mutexes. This is called from the MD 1187 * startup code prior to mi_startup(). The per-CPU data space needs to be 1188 * setup before this is called. 1189 */ 1190 void 1191 mutex_init(void) 1192 { 1193 1194 /* Setup turnstiles so that sleep mutexes work. */ 1195 init_turnstiles(); 1196 1197 /* 1198 * Initialize mutexes. 1199 */ 1200 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 1201 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN); 1202 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */ 1203 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 1204 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN); 1205 mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN); 1206 mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN); 1207 mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN); 1208 mtx_init(&devmtx, "cdev", NULL, MTX_DEF); 1209 mtx_lock(&Giant); 1210 } 1211 1212 #ifdef DDB 1213 void 1214 db_show_mtx(const struct lock_object *lock) 1215 { 1216 struct thread *td; 1217 const struct mtx *m; 1218 1219 m = (const struct mtx *)lock; 1220 1221 db_printf(" flags: {"); 1222 if (LOCK_CLASS(lock) == &lock_class_mtx_spin) 1223 db_printf("SPIN"); 1224 else 1225 db_printf("DEF"); 1226 if (m->lock_object.lo_flags & LO_RECURSABLE) 1227 db_printf(", RECURSE"); 1228 if (m->lock_object.lo_flags & LO_DUPOK) 1229 db_printf(", DUPOK"); 1230 db_printf("}\n"); 1231 db_printf(" state: {"); 1232 if (mtx_unowned(m)) 1233 db_printf("UNOWNED"); 1234 else if (mtx_destroyed(m)) 1235 db_printf("DESTROYED"); 1236 else { 1237 db_printf("OWNED"); 1238 if (m->mtx_lock & MTX_CONTESTED) 1239 db_printf(", CONTESTED"); 1240 if (m->mtx_lock & MTX_RECURSED) 1241 db_printf(", RECURSED"); 1242 } 1243 db_printf("}\n"); 1244 if (!mtx_unowned(m) && !mtx_destroyed(m)) { 1245 td = mtx_owner(m); 1246 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, 1247 td->td_tid, td->td_proc->p_pid, td->td_name); 1248 if (mtx_recursed(m)) 1249 db_printf(" recursed: %d\n", m->mtx_recurse); 1250 } 1251 } 1252 #endif 1253