1 /*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32 /* 33 * Machine independent bits of mutex implementation. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_adaptive_mutexes.h" 40 #include "opt_ddb.h" 41 #include "opt_global.h" 42 #include "opt_hwpmc_hooks.h" 43 #include "opt_sched.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/bus.h> 48 #include <sys/conf.h> 49 #include <sys/kdb.h> 50 #include <sys/kernel.h> 51 #include <sys/ktr.h> 52 #include <sys/lock.h> 53 #include <sys/malloc.h> 54 #include <sys/mutex.h> 55 #include <sys/proc.h> 56 #include <sys/resourcevar.h> 57 #include <sys/sched.h> 58 #include <sys/sbuf.h> 59 #include <sys/sysctl.h> 60 #include <sys/turnstile.h> 61 #include <sys/vmmeter.h> 62 #include <sys/lock_profile.h> 63 64 #include <machine/atomic.h> 65 #include <machine/bus.h> 66 #include <machine/cpu.h> 67 68 #include <ddb/ddb.h> 69 70 #include <fs/devfs/devfs_int.h> 71 72 #include <vm/vm.h> 73 #include <vm/vm_extern.h> 74 75 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 76 #define ADAPTIVE_MUTEXES 77 #endif 78 79 #ifdef HWPMC_HOOKS 80 #include <sys/pmckern.h> 81 PMC_SOFT_DEFINE( , , lock, failed); 82 #endif 83 84 /* 85 * Return the mutex address when the lock cookie address is provided. 86 * This functionality assumes that struct mtx* have a member named mtx_lock. 87 */ 88 #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock)) 89 90 /* 91 * Internal utility macros. 92 */ 93 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 94 95 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED) 96 97 #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK)) 98 99 static void assert_mtx(const struct lock_object *lock, int what); 100 #ifdef DDB 101 static void db_show_mtx(const struct lock_object *lock); 102 #endif 103 static void lock_mtx(struct lock_object *lock, uintptr_t how); 104 static void lock_spin(struct lock_object *lock, uintptr_t how); 105 #ifdef KDTRACE_HOOKS 106 static int owner_mtx(const struct lock_object *lock, 107 struct thread **owner); 108 #endif 109 static uintptr_t unlock_mtx(struct lock_object *lock); 110 static uintptr_t unlock_spin(struct lock_object *lock); 111 112 /* 113 * Lock classes for sleep and spin mutexes. 114 */ 115 struct lock_class lock_class_mtx_sleep = { 116 .lc_name = "sleep mutex", 117 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, 118 .lc_assert = assert_mtx, 119 #ifdef DDB 120 .lc_ddb_show = db_show_mtx, 121 #endif 122 .lc_lock = lock_mtx, 123 .lc_unlock = unlock_mtx, 124 #ifdef KDTRACE_HOOKS 125 .lc_owner = owner_mtx, 126 #endif 127 }; 128 struct lock_class lock_class_mtx_spin = { 129 .lc_name = "spin mutex", 130 .lc_flags = LC_SPINLOCK | LC_RECURSABLE, 131 .lc_assert = assert_mtx, 132 #ifdef DDB 133 .lc_ddb_show = db_show_mtx, 134 #endif 135 .lc_lock = lock_spin, 136 .lc_unlock = unlock_spin, 137 #ifdef KDTRACE_HOOKS 138 .lc_owner = owner_mtx, 139 #endif 140 }; 141 142 /* 143 * System-wide mutexes 144 */ 145 struct mtx blocked_lock; 146 struct mtx Giant; 147 148 void 149 assert_mtx(const struct lock_object *lock, int what) 150 { 151 152 mtx_assert((const struct mtx *)lock, what); 153 } 154 155 void 156 lock_mtx(struct lock_object *lock, uintptr_t how) 157 { 158 159 mtx_lock((struct mtx *)lock); 160 } 161 162 void 163 lock_spin(struct lock_object *lock, uintptr_t how) 164 { 165 166 panic("spin locks can only use msleep_spin"); 167 } 168 169 uintptr_t 170 unlock_mtx(struct lock_object *lock) 171 { 172 struct mtx *m; 173 174 m = (struct mtx *)lock; 175 mtx_assert(m, MA_OWNED | MA_NOTRECURSED); 176 mtx_unlock(m); 177 return (0); 178 } 179 180 uintptr_t 181 unlock_spin(struct lock_object *lock) 182 { 183 184 panic("spin locks can only use msleep_spin"); 185 } 186 187 #ifdef KDTRACE_HOOKS 188 int 189 owner_mtx(const struct lock_object *lock, struct thread **owner) 190 { 191 const struct mtx *m = (const struct mtx *)lock; 192 193 *owner = mtx_owner(m); 194 return (mtx_unowned(m) == 0); 195 } 196 #endif 197 198 /* 199 * Function versions of the inlined __mtx_* macros. These are used by 200 * modules and can also be called from assembly language if needed. 201 */ 202 void 203 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line) 204 { 205 struct mtx *m; 206 207 if (SCHEDULER_STOPPED()) 208 return; 209 210 m = mtxlock2mtx(c); 211 212 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 213 ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d", 214 curthread, m->lock_object.lo_name, file, line)); 215 KASSERT(m->mtx_lock != MTX_DESTROYED, 216 ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); 217 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 218 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 219 file, line)); 220 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) | 221 LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 222 223 __mtx_lock(m, curthread, opts, file, line); 224 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 225 line); 226 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE, 227 file, line); 228 curthread->td_locks++; 229 } 230 231 void 232 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line) 233 { 234 struct mtx *m; 235 236 if (SCHEDULER_STOPPED()) 237 return; 238 239 m = mtxlock2mtx(c); 240 241 KASSERT(m->mtx_lock != MTX_DESTROYED, 242 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); 243 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 244 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 245 file, line)); 246 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 247 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 248 line); 249 mtx_assert(m, MA_OWNED); 250 251 __mtx_unlock(m, curthread, opts, file, line); 252 curthread->td_locks--; 253 } 254 255 void 256 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 257 int line) 258 { 259 struct mtx *m; 260 261 if (SCHEDULER_STOPPED()) 262 return; 263 264 m = mtxlock2mtx(c); 265 266 KASSERT(m->mtx_lock != MTX_DESTROYED, 267 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); 268 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 269 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 270 m->lock_object.lo_name, file, line)); 271 if (mtx_owned(m)) 272 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 273 (opts & MTX_RECURSE) != 0, 274 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n", 275 m->lock_object.lo_name, file, line)); 276 opts &= ~MTX_RECURSE; 277 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 278 file, line, NULL); 279 __mtx_lock_spin(m, curthread, opts, file, line); 280 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 281 line); 282 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 283 } 284 285 void 286 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 287 int line) 288 { 289 struct mtx *m; 290 291 if (SCHEDULER_STOPPED()) 292 return; 293 294 m = mtxlock2mtx(c); 295 296 KASSERT(m->mtx_lock != MTX_DESTROYED, 297 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); 298 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 299 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 300 m->lock_object.lo_name, file, line)); 301 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 302 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 303 line); 304 mtx_assert(m, MA_OWNED); 305 306 __mtx_unlock_spin(m); 307 } 308 309 /* 310 * The important part of mtx_trylock{,_flags}() 311 * Tries to acquire lock `m.' If this function is called on a mutex that 312 * is already owned, it will recursively acquire the lock. 313 */ 314 int 315 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) 316 { 317 struct mtx *m; 318 #ifdef LOCK_PROFILING 319 uint64_t waittime = 0; 320 int contested = 0; 321 #endif 322 int rval; 323 324 if (SCHEDULER_STOPPED()) 325 return (1); 326 327 m = mtxlock2mtx(c); 328 329 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 330 ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d", 331 curthread, m->lock_object.lo_name, file, line)); 332 KASSERT(m->mtx_lock != MTX_DESTROYED, 333 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); 334 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 335 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 336 file, line)); 337 338 if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 339 (opts & MTX_RECURSE) != 0)) { 340 m->mtx_recurse++; 341 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 342 rval = 1; 343 } else 344 rval = _mtx_obtain_lock(m, (uintptr_t)curthread); 345 opts &= ~MTX_RECURSE; 346 347 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); 348 if (rval) { 349 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 350 file, line); 351 curthread->td_locks++; 352 if (m->mtx_recurse == 0) 353 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, 354 m, contested, waittime, file, line); 355 356 } 357 358 return (rval); 359 } 360 361 /* 362 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 363 * 364 * We call this if the lock is either contested (i.e. we need to go to 365 * sleep waiting for it), or if we need to recurse on it. 366 */ 367 void 368 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, 369 const char *file, int line) 370 { 371 struct mtx *m; 372 struct turnstile *ts; 373 uintptr_t v; 374 #ifdef ADAPTIVE_MUTEXES 375 volatile struct thread *owner; 376 #endif 377 #ifdef KTR 378 int cont_logged = 0; 379 #endif 380 #ifdef LOCK_PROFILING 381 int contested = 0; 382 uint64_t waittime = 0; 383 #endif 384 #ifdef KDTRACE_HOOKS 385 uint64_t spin_cnt = 0; 386 uint64_t sleep_cnt = 0; 387 int64_t sleep_time = 0; 388 #endif 389 390 if (SCHEDULER_STOPPED()) 391 return; 392 393 m = mtxlock2mtx(c); 394 395 if (mtx_owned(m)) { 396 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 397 (opts & MTX_RECURSE) != 0, 398 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 399 m->lock_object.lo_name, file, line)); 400 opts &= ~MTX_RECURSE; 401 m->mtx_recurse++; 402 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 403 if (LOCK_LOG_TEST(&m->lock_object, opts)) 404 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 405 return; 406 } 407 opts &= ~MTX_RECURSE; 408 409 #ifdef HWPMC_HOOKS 410 PMC_SOFT_CALL( , , lock, failed); 411 #endif 412 lock_profile_obtain_lock_failed(&m->lock_object, 413 &contested, &waittime); 414 if (LOCK_LOG_TEST(&m->lock_object, opts)) 415 CTR4(KTR_LOCK, 416 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 417 m->lock_object.lo_name, (void *)m->mtx_lock, file, line); 418 419 while (!_mtx_obtain_lock(m, tid)) { 420 #ifdef KDTRACE_HOOKS 421 spin_cnt++; 422 #endif 423 #ifdef ADAPTIVE_MUTEXES 424 /* 425 * If the owner is running on another CPU, spin until the 426 * owner stops running or the state of the lock changes. 427 */ 428 v = m->mtx_lock; 429 if (v != MTX_UNOWNED) { 430 owner = (struct thread *)(v & ~MTX_FLAGMASK); 431 if (TD_IS_RUNNING(owner)) { 432 if (LOCK_LOG_TEST(&m->lock_object, 0)) 433 CTR3(KTR_LOCK, 434 "%s: spinning on %p held by %p", 435 __func__, m, owner); 436 KTR_STATE1(KTR_SCHED, "thread", 437 sched_tdname((struct thread *)tid), 438 "spinning", "lockname:\"%s\"", 439 m->lock_object.lo_name); 440 while (mtx_owner(m) == owner && 441 TD_IS_RUNNING(owner)) { 442 cpu_spinwait(); 443 #ifdef KDTRACE_HOOKS 444 spin_cnt++; 445 #endif 446 } 447 KTR_STATE0(KTR_SCHED, "thread", 448 sched_tdname((struct thread *)tid), 449 "running"); 450 continue; 451 } 452 } 453 #endif 454 455 ts = turnstile_trywait(&m->lock_object); 456 v = m->mtx_lock; 457 458 /* 459 * Check if the lock has been released while spinning for 460 * the turnstile chain lock. 461 */ 462 if (v == MTX_UNOWNED) { 463 turnstile_cancel(ts); 464 continue; 465 } 466 467 #ifdef ADAPTIVE_MUTEXES 468 /* 469 * The current lock owner might have started executing 470 * on another CPU (or the lock could have changed 471 * owners) while we were waiting on the turnstile 472 * chain lock. If so, drop the turnstile lock and try 473 * again. 474 */ 475 owner = (struct thread *)(v & ~MTX_FLAGMASK); 476 if (TD_IS_RUNNING(owner)) { 477 turnstile_cancel(ts); 478 continue; 479 } 480 #endif 481 482 /* 483 * If the mutex isn't already contested and a failure occurs 484 * setting the contested bit, the mutex was either released 485 * or the state of the MTX_RECURSED bit changed. 486 */ 487 if ((v & MTX_CONTESTED) == 0 && 488 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { 489 turnstile_cancel(ts); 490 continue; 491 } 492 493 /* 494 * We definitely must sleep for this lock. 495 */ 496 mtx_assert(m, MA_NOTOWNED); 497 498 #ifdef KTR 499 if (!cont_logged) { 500 CTR6(KTR_CONTENTION, 501 "contention: %p at %s:%d wants %s, taken by %s:%d", 502 (void *)tid, file, line, m->lock_object.lo_name, 503 WITNESS_FILE(&m->lock_object), 504 WITNESS_LINE(&m->lock_object)); 505 cont_logged = 1; 506 } 507 #endif 508 509 /* 510 * Block on the turnstile. 511 */ 512 #ifdef KDTRACE_HOOKS 513 sleep_time -= lockstat_nsecs(); 514 #endif 515 turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE); 516 #ifdef KDTRACE_HOOKS 517 sleep_time += lockstat_nsecs(); 518 sleep_cnt++; 519 #endif 520 } 521 #ifdef KTR 522 if (cont_logged) { 523 CTR4(KTR_CONTENTION, 524 "contention end: %s acquired by %p at %s:%d", 525 m->lock_object.lo_name, (void *)tid, file, line); 526 } 527 #endif 528 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested, 529 waittime, file, line); 530 #ifdef KDTRACE_HOOKS 531 if (sleep_time) 532 LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time); 533 534 /* 535 * Only record the loops spinning and not sleeping. 536 */ 537 if (spin_cnt > sleep_cnt) 538 LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (spin_cnt - sleep_cnt)); 539 #endif 540 } 541 542 static void 543 _mtx_lock_spin_failed(struct mtx *m) 544 { 545 struct thread *td; 546 547 td = mtx_owner(m); 548 549 /* If the mutex is unlocked, try again. */ 550 if (td == NULL) 551 return; 552 553 printf( "spin lock %p (%s) held by %p (tid %d) too long\n", 554 m, m->lock_object.lo_name, td, td->td_tid); 555 #ifdef WITNESS 556 witness_display_spinlock(&m->lock_object, td, printf); 557 #endif 558 panic("spin lock held too long"); 559 } 560 561 #ifdef SMP 562 /* 563 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock. 564 * 565 * This is only called if we need to actually spin for the lock. Recursion 566 * is handled inline. 567 */ 568 void 569 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, 570 const char *file, int line) 571 { 572 struct mtx *m; 573 int i = 0; 574 #ifdef LOCK_PROFILING 575 int contested = 0; 576 uint64_t waittime = 0; 577 #endif 578 579 if (SCHEDULER_STOPPED()) 580 return; 581 582 m = mtxlock2mtx(c); 583 584 if (LOCK_LOG_TEST(&m->lock_object, opts)) 585 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 586 KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), 587 "spinning", "lockname:\"%s\"", m->lock_object.lo_name); 588 589 #ifdef HWPMC_HOOKS 590 PMC_SOFT_CALL( , , lock, failed); 591 #endif 592 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); 593 while (!_mtx_obtain_lock(m, tid)) { 594 595 /* Give interrupts a chance while we spin. */ 596 spinlock_exit(); 597 while (m->mtx_lock != MTX_UNOWNED) { 598 if (i++ < 10000000) { 599 cpu_spinwait(); 600 continue; 601 } 602 if (i < 60000000 || kdb_active || panicstr != NULL) 603 DELAY(1); 604 else 605 _mtx_lock_spin_failed(m); 606 cpu_spinwait(); 607 } 608 spinlock_enter(); 609 } 610 611 if (LOCK_LOG_TEST(&m->lock_object, opts)) 612 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 613 KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), 614 "running"); 615 616 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m, 617 contested, waittime, (file), (line)); 618 LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, i); 619 } 620 #endif /* SMP */ 621 622 void 623 thread_lock_flags_(struct thread *td, int opts, const char *file, int line) 624 { 625 struct mtx *m; 626 uintptr_t tid; 627 int i; 628 #ifdef LOCK_PROFILING 629 int contested = 0; 630 uint64_t waittime = 0; 631 #endif 632 #ifdef KDTRACE_HOOKS 633 uint64_t spin_cnt = 0; 634 #endif 635 636 i = 0; 637 tid = (uintptr_t)curthread; 638 639 if (SCHEDULER_STOPPED()) 640 return; 641 642 for (;;) { 643 retry: 644 spinlock_enter(); 645 m = td->td_lock; 646 KASSERT(m->mtx_lock != MTX_DESTROYED, 647 ("thread_lock() of destroyed mutex @ %s:%d", file, line)); 648 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 649 ("thread_lock() of sleep mutex %s @ %s:%d", 650 m->lock_object.lo_name, file, line)); 651 if (mtx_owned(m)) 652 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, 653 ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n", 654 m->lock_object.lo_name, file, line)); 655 WITNESS_CHECKORDER(&m->lock_object, 656 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 657 while (!_mtx_obtain_lock(m, tid)) { 658 #ifdef KDTRACE_HOOKS 659 spin_cnt++; 660 #endif 661 if (m->mtx_lock == tid) { 662 m->mtx_recurse++; 663 break; 664 } 665 #ifdef HWPMC_HOOKS 666 PMC_SOFT_CALL( , , lock, failed); 667 #endif 668 lock_profile_obtain_lock_failed(&m->lock_object, 669 &contested, &waittime); 670 /* Give interrupts a chance while we spin. */ 671 spinlock_exit(); 672 while (m->mtx_lock != MTX_UNOWNED) { 673 if (i++ < 10000000) 674 cpu_spinwait(); 675 else if (i < 60000000 || 676 kdb_active || panicstr != NULL) 677 DELAY(1); 678 else 679 _mtx_lock_spin_failed(m); 680 cpu_spinwait(); 681 if (m != td->td_lock) 682 goto retry; 683 } 684 spinlock_enter(); 685 } 686 if (m == td->td_lock) 687 break; 688 __mtx_unlock_spin(m); /* does spinlock_exit() */ 689 #ifdef KDTRACE_HOOKS 690 spin_cnt++; 691 #endif 692 } 693 if (m->mtx_recurse == 0) 694 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, 695 m, contested, waittime, (file), (line)); 696 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 697 line); 698 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 699 LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_cnt); 700 } 701 702 struct mtx * 703 thread_lock_block(struct thread *td) 704 { 705 struct mtx *lock; 706 707 THREAD_LOCK_ASSERT(td, MA_OWNED); 708 lock = td->td_lock; 709 td->td_lock = &blocked_lock; 710 mtx_unlock_spin(lock); 711 712 return (lock); 713 } 714 715 void 716 thread_lock_unblock(struct thread *td, struct mtx *new) 717 { 718 mtx_assert(new, MA_OWNED); 719 MPASS(td->td_lock == &blocked_lock); 720 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new); 721 } 722 723 void 724 thread_lock_set(struct thread *td, struct mtx *new) 725 { 726 struct mtx *lock; 727 728 mtx_assert(new, MA_OWNED); 729 THREAD_LOCK_ASSERT(td, MA_OWNED); 730 lock = td->td_lock; 731 td->td_lock = new; 732 mtx_unlock_spin(lock); 733 } 734 735 /* 736 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 737 * 738 * We are only called here if the lock is recursed or contested (i.e. we 739 * need to wake up a blocked thread). 740 */ 741 void 742 __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line) 743 { 744 struct mtx *m; 745 struct turnstile *ts; 746 747 if (SCHEDULER_STOPPED()) 748 return; 749 750 m = mtxlock2mtx(c); 751 752 if (mtx_recursed(m)) { 753 if (--(m->mtx_recurse) == 0) 754 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 755 if (LOCK_LOG_TEST(&m->lock_object, opts)) 756 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 757 return; 758 } 759 760 /* 761 * We have to lock the chain before the turnstile so this turnstile 762 * can be removed from the hash list if it is empty. 763 */ 764 turnstile_chain_lock(&m->lock_object); 765 ts = turnstile_lookup(&m->lock_object); 766 if (LOCK_LOG_TEST(&m->lock_object, opts)) 767 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 768 MPASS(ts != NULL); 769 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 770 _mtx_release_lock_quick(m); 771 772 /* 773 * This turnstile is now no longer associated with the mutex. We can 774 * unlock the chain lock so a new turnstile may take it's place. 775 */ 776 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 777 turnstile_chain_unlock(&m->lock_object); 778 } 779 780 /* 781 * All the unlocking of MTX_SPIN locks is done inline. 782 * See the __mtx_unlock_spin() macro for the details. 783 */ 784 785 /* 786 * The backing function for the INVARIANTS-enabled mtx_assert() 787 */ 788 #ifdef INVARIANT_SUPPORT 789 void 790 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line) 791 { 792 const struct mtx *m; 793 794 if (panicstr != NULL || dumping) 795 return; 796 797 m = mtxlock2mtx(c); 798 799 switch (what) { 800 case MA_OWNED: 801 case MA_OWNED | MA_RECURSED: 802 case MA_OWNED | MA_NOTRECURSED: 803 if (!mtx_owned(m)) 804 panic("mutex %s not owned at %s:%d", 805 m->lock_object.lo_name, file, line); 806 if (mtx_recursed(m)) { 807 if ((what & MA_NOTRECURSED) != 0) 808 panic("mutex %s recursed at %s:%d", 809 m->lock_object.lo_name, file, line); 810 } else if ((what & MA_RECURSED) != 0) { 811 panic("mutex %s unrecursed at %s:%d", 812 m->lock_object.lo_name, file, line); 813 } 814 break; 815 case MA_NOTOWNED: 816 if (mtx_owned(m)) 817 panic("mutex %s owned at %s:%d", 818 m->lock_object.lo_name, file, line); 819 break; 820 default: 821 panic("unknown mtx_assert at %s:%d", file, line); 822 } 823 } 824 #endif 825 826 /* 827 * The MUTEX_DEBUG-enabled mtx_validate() 828 * 829 * Most of these checks have been moved off into the LO_INITIALIZED flag 830 * maintained by the witness code. 831 */ 832 #ifdef MUTEX_DEBUG 833 834 void mtx_validate(struct mtx *); 835 836 void 837 mtx_validate(struct mtx *m) 838 { 839 840 /* 841 * XXX: When kernacc() does not require Giant we can reenable this check 842 */ 843 #ifdef notyet 844 /* 845 * Can't call kernacc() from early init386(), especially when 846 * initializing Giant mutex, because some stuff in kernacc() 847 * requires Giant itself. 848 */ 849 if (!cold) 850 if (!kernacc((caddr_t)m, sizeof(m), 851 VM_PROT_READ | VM_PROT_WRITE)) 852 panic("Can't read and write to mutex %p", m); 853 #endif 854 } 855 #endif 856 857 /* 858 * General init routine used by the MTX_SYSINIT() macro. 859 */ 860 void 861 mtx_sysinit(void *arg) 862 { 863 struct mtx_args *margs = arg; 864 865 mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL, 866 margs->ma_opts); 867 } 868 869 /* 870 * Mutex initialization routine; initialize lock `m' of type contained in 871 * `opts' with options contained in `opts' and name `name.' The optional 872 * lock type `type' is used as a general lock category name for use with 873 * witness. 874 */ 875 void 876 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts) 877 { 878 struct mtx *m; 879 struct lock_class *class; 880 int flags; 881 882 m = mtxlock2mtx(c); 883 884 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 885 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0); 886 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, 887 ("%s: mtx_lock not aligned for %s: %p", __func__, name, 888 &m->mtx_lock)); 889 890 #ifdef MUTEX_DEBUG 891 /* Diagnostic and error correction */ 892 mtx_validate(m); 893 #endif 894 895 /* Determine lock class and lock flags. */ 896 if (opts & MTX_SPIN) 897 class = &lock_class_mtx_spin; 898 else 899 class = &lock_class_mtx_sleep; 900 flags = 0; 901 if (opts & MTX_QUIET) 902 flags |= LO_QUIET; 903 if (opts & MTX_RECURSE) 904 flags |= LO_RECURSABLE; 905 if ((opts & MTX_NOWITNESS) == 0) 906 flags |= LO_WITNESS; 907 if (opts & MTX_DUPOK) 908 flags |= LO_DUPOK; 909 if (opts & MTX_NOPROFILE) 910 flags |= LO_NOPROFILE; 911 912 /* Initialize mutex. */ 913 lock_init(&m->lock_object, class, name, type, flags); 914 915 m->mtx_lock = MTX_UNOWNED; 916 m->mtx_recurse = 0; 917 } 918 919 /* 920 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 921 * passed in as a flag here because if the corresponding mtx_init() was 922 * called with MTX_QUIET set, then it will already be set in the mutex's 923 * flags. 924 */ 925 void 926 _mtx_destroy(volatile uintptr_t *c) 927 { 928 struct mtx *m; 929 930 m = mtxlock2mtx(c); 931 932 if (!mtx_owned(m)) 933 MPASS(mtx_unowned(m)); 934 else { 935 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 936 937 /* Perform the non-mtx related part of mtx_unlock_spin(). */ 938 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) 939 spinlock_exit(); 940 else 941 curthread->td_locks--; 942 943 lock_profile_release_lock(&m->lock_object); 944 /* Tell witness this isn't locked to make it happy. */ 945 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, 946 __LINE__); 947 } 948 949 m->mtx_lock = MTX_DESTROYED; 950 lock_destroy(&m->lock_object); 951 } 952 953 /* 954 * Intialize the mutex code and system mutexes. This is called from the MD 955 * startup code prior to mi_startup(). The per-CPU data space needs to be 956 * setup before this is called. 957 */ 958 void 959 mutex_init(void) 960 { 961 962 /* Setup turnstiles so that sleep mutexes work. */ 963 init_turnstiles(); 964 965 /* 966 * Initialize mutexes. 967 */ 968 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 969 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN); 970 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */ 971 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 972 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE); 973 mtx_init(&devmtx, "cdev", NULL, MTX_DEF); 974 mtx_lock(&Giant); 975 } 976 977 #ifdef DDB 978 void 979 db_show_mtx(const struct lock_object *lock) 980 { 981 struct thread *td; 982 const struct mtx *m; 983 984 m = (const struct mtx *)lock; 985 986 db_printf(" flags: {"); 987 if (LOCK_CLASS(lock) == &lock_class_mtx_spin) 988 db_printf("SPIN"); 989 else 990 db_printf("DEF"); 991 if (m->lock_object.lo_flags & LO_RECURSABLE) 992 db_printf(", RECURSE"); 993 if (m->lock_object.lo_flags & LO_DUPOK) 994 db_printf(", DUPOK"); 995 db_printf("}\n"); 996 db_printf(" state: {"); 997 if (mtx_unowned(m)) 998 db_printf("UNOWNED"); 999 else if (mtx_destroyed(m)) 1000 db_printf("DESTROYED"); 1001 else { 1002 db_printf("OWNED"); 1003 if (m->mtx_lock & MTX_CONTESTED) 1004 db_printf(", CONTESTED"); 1005 if (m->mtx_lock & MTX_RECURSED) 1006 db_printf(", RECURSED"); 1007 } 1008 db_printf("}\n"); 1009 if (!mtx_unowned(m) && !mtx_destroyed(m)) { 1010 td = mtx_owner(m); 1011 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, 1012 td->td_tid, td->td_proc->p_pid, td->td_name); 1013 if (mtx_recursed(m)) 1014 db_printf(" recursed: %d\n", m->mtx_recurse); 1015 } 1016 } 1017 #endif 1018