1 /*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32 /* 33 * Machine independent bits of mutex implementation. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_adaptive_mutexes.h" 40 #include "opt_ddb.h" 41 #include "opt_global.h" 42 #include "opt_hwpmc_hooks.h" 43 #include "opt_kdtrace.h" 44 #include "opt_sched.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/bus.h> 49 #include <sys/conf.h> 50 #include <sys/kdb.h> 51 #include <sys/kernel.h> 52 #include <sys/ktr.h> 53 #include <sys/lock.h> 54 #include <sys/malloc.h> 55 #include <sys/mutex.h> 56 #include <sys/proc.h> 57 #include <sys/resourcevar.h> 58 #include <sys/sched.h> 59 #include <sys/sbuf.h> 60 #include <sys/sysctl.h> 61 #include <sys/turnstile.h> 62 #include <sys/vmmeter.h> 63 #include <sys/lock_profile.h> 64 65 #include <machine/atomic.h> 66 #include <machine/bus.h> 67 #include <machine/cpu.h> 68 69 #include <ddb/ddb.h> 70 71 #include <fs/devfs/devfs_int.h> 72 73 #include <vm/vm.h> 74 #include <vm/vm_extern.h> 75 76 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 77 #define ADAPTIVE_MUTEXES 78 #endif 79 80 #ifdef HWPMC_HOOKS 81 #include <sys/pmckern.h> 82 PMC_SOFT_DEFINE( , , lock, failed); 83 #endif 84 85 /* 86 * Internal utility macros. 87 */ 88 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 89 90 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED) 91 92 #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK)) 93 94 static void assert_mtx(const struct lock_object *lock, int what); 95 #ifdef DDB 96 static void db_show_mtx(const struct lock_object *lock); 97 #endif 98 static void lock_mtx(struct lock_object *lock, int how); 99 static void lock_spin(struct lock_object *lock, int how); 100 #ifdef KDTRACE_HOOKS 101 static int owner_mtx(const struct lock_object *lock, 102 struct thread **owner); 103 #endif 104 static int unlock_mtx(struct lock_object *lock); 105 static int unlock_spin(struct lock_object *lock); 106 107 /* 108 * Lock classes for sleep and spin mutexes. 109 */ 110 struct lock_class lock_class_mtx_sleep = { 111 .lc_name = "sleep mutex", 112 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, 113 .lc_assert = assert_mtx, 114 #ifdef DDB 115 .lc_ddb_show = db_show_mtx, 116 #endif 117 .lc_lock = lock_mtx, 118 .lc_unlock = unlock_mtx, 119 #ifdef KDTRACE_HOOKS 120 .lc_owner = owner_mtx, 121 #endif 122 }; 123 struct lock_class lock_class_mtx_spin = { 124 .lc_name = "spin mutex", 125 .lc_flags = LC_SPINLOCK | LC_RECURSABLE, 126 .lc_assert = assert_mtx, 127 #ifdef DDB 128 .lc_ddb_show = db_show_mtx, 129 #endif 130 .lc_lock = lock_spin, 131 .lc_unlock = unlock_spin, 132 #ifdef KDTRACE_HOOKS 133 .lc_owner = owner_mtx, 134 #endif 135 }; 136 137 /* 138 * System-wide mutexes 139 */ 140 struct mtx blocked_lock; 141 struct mtx Giant; 142 143 void 144 assert_mtx(const struct lock_object *lock, int what) 145 { 146 147 mtx_assert((const struct mtx *)lock, what); 148 } 149 150 void 151 lock_mtx(struct lock_object *lock, int how) 152 { 153 154 mtx_lock((struct mtx *)lock); 155 } 156 157 void 158 lock_spin(struct lock_object *lock, int how) 159 { 160 161 panic("spin locks can only use msleep_spin"); 162 } 163 164 int 165 unlock_mtx(struct lock_object *lock) 166 { 167 struct mtx *m; 168 169 m = (struct mtx *)lock; 170 mtx_assert(m, MA_OWNED | MA_NOTRECURSED); 171 mtx_unlock(m); 172 return (0); 173 } 174 175 int 176 unlock_spin(struct lock_object *lock) 177 { 178 179 panic("spin locks can only use msleep_spin"); 180 } 181 182 #ifdef KDTRACE_HOOKS 183 int 184 owner_mtx(const struct lock_object *lock, struct thread **owner) 185 { 186 const struct mtx *m = (const struct mtx *)lock; 187 188 *owner = mtx_owner(m); 189 return (mtx_unowned(m) == 0); 190 } 191 #endif 192 193 /* 194 * Function versions of the inlined __mtx_* macros. These are used by 195 * modules and can also be called from assembly language if needed. 196 */ 197 void 198 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 199 { 200 201 if (SCHEDULER_STOPPED()) 202 return; 203 KASSERT(!TD_IS_IDLETHREAD(curthread), 204 ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d", 205 curthread, m->lock_object.lo_name, file, line)); 206 KASSERT(m->mtx_lock != MTX_DESTROYED, 207 ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); 208 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 209 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 210 file, line)); 211 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 212 file, line, NULL); 213 214 __mtx_lock(m, curthread, opts, file, line); 215 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 216 line); 217 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 218 curthread->td_locks++; 219 } 220 221 void 222 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 223 { 224 225 if (SCHEDULER_STOPPED()) 226 return; 227 KASSERT(m->mtx_lock != MTX_DESTROYED, 228 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); 229 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 230 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 231 file, line)); 232 curthread->td_locks--; 233 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 234 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 235 line); 236 mtx_assert(m, MA_OWNED); 237 238 if (m->mtx_recurse == 0) 239 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m); 240 __mtx_unlock(m, curthread, opts, file, line); 241 } 242 243 void 244 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 245 { 246 247 if (SCHEDULER_STOPPED()) 248 return; 249 KASSERT(m->mtx_lock != MTX_DESTROYED, 250 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); 251 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 252 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 253 m->lock_object.lo_name, file, line)); 254 if (mtx_owned(m)) 255 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, 256 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n", 257 m->lock_object.lo_name, file, line)); 258 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 259 file, line, NULL); 260 __mtx_lock_spin(m, curthread, opts, file, line); 261 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 262 line); 263 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 264 } 265 266 void 267 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 268 { 269 270 if (SCHEDULER_STOPPED()) 271 return; 272 KASSERT(m->mtx_lock != MTX_DESTROYED, 273 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); 274 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 275 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 276 m->lock_object.lo_name, file, line)); 277 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 278 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 279 line); 280 mtx_assert(m, MA_OWNED); 281 282 __mtx_unlock_spin(m); 283 } 284 285 /* 286 * The important part of mtx_trylock{,_flags}() 287 * Tries to acquire lock `m.' If this function is called on a mutex that 288 * is already owned, it will recursively acquire the lock. 289 */ 290 int 291 mtx_trylock_flags_(struct mtx *m, int opts, const char *file, int line) 292 { 293 #ifdef LOCK_PROFILING 294 uint64_t waittime = 0; 295 int contested = 0; 296 #endif 297 int rval; 298 299 if (SCHEDULER_STOPPED()) 300 return (1); 301 302 KASSERT(!TD_IS_IDLETHREAD(curthread), 303 ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d", 304 curthread, m->lock_object.lo_name, file, line)); 305 KASSERT(m->mtx_lock != MTX_DESTROYED, 306 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); 307 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 308 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 309 file, line)); 310 311 if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) { 312 m->mtx_recurse++; 313 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 314 rval = 1; 315 } else 316 rval = _mtx_obtain_lock(m, (uintptr_t)curthread); 317 318 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); 319 if (rval) { 320 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 321 file, line); 322 curthread->td_locks++; 323 if (m->mtx_recurse == 0) 324 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, 325 m, contested, waittime, file, line); 326 327 } 328 329 return (rval); 330 } 331 332 /* 333 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 334 * 335 * We call this if the lock is either contested (i.e. we need to go to 336 * sleep waiting for it), or if we need to recurse on it. 337 */ 338 void 339 _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, 340 int line) 341 { 342 struct turnstile *ts; 343 uintptr_t v; 344 #ifdef ADAPTIVE_MUTEXES 345 volatile struct thread *owner; 346 #endif 347 #ifdef KTR 348 int cont_logged = 0; 349 #endif 350 #ifdef LOCK_PROFILING 351 int contested = 0; 352 uint64_t waittime = 0; 353 #endif 354 #ifdef KDTRACE_HOOKS 355 uint64_t spin_cnt = 0; 356 uint64_t sleep_cnt = 0; 357 int64_t sleep_time = 0; 358 #endif 359 360 if (SCHEDULER_STOPPED()) 361 return; 362 363 if (mtx_owned(m)) { 364 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, 365 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 366 m->lock_object.lo_name, file, line)); 367 m->mtx_recurse++; 368 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 369 if (LOCK_LOG_TEST(&m->lock_object, opts)) 370 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 371 return; 372 } 373 374 #ifdef HWPMC_HOOKS 375 PMC_SOFT_CALL( , , lock, failed); 376 #endif 377 lock_profile_obtain_lock_failed(&m->lock_object, 378 &contested, &waittime); 379 if (LOCK_LOG_TEST(&m->lock_object, opts)) 380 CTR4(KTR_LOCK, 381 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 382 m->lock_object.lo_name, (void *)m->mtx_lock, file, line); 383 384 while (!_mtx_obtain_lock(m, tid)) { 385 #ifdef KDTRACE_HOOKS 386 spin_cnt++; 387 #endif 388 #ifdef ADAPTIVE_MUTEXES 389 /* 390 * If the owner is running on another CPU, spin until the 391 * owner stops running or the state of the lock changes. 392 */ 393 v = m->mtx_lock; 394 if (v != MTX_UNOWNED) { 395 owner = (struct thread *)(v & ~MTX_FLAGMASK); 396 if (TD_IS_RUNNING(owner)) { 397 if (LOCK_LOG_TEST(&m->lock_object, 0)) 398 CTR3(KTR_LOCK, 399 "%s: spinning on %p held by %p", 400 __func__, m, owner); 401 while (mtx_owner(m) == owner && 402 TD_IS_RUNNING(owner)) { 403 cpu_spinwait(); 404 #ifdef KDTRACE_HOOKS 405 spin_cnt++; 406 #endif 407 } 408 continue; 409 } 410 } 411 #endif 412 413 ts = turnstile_trywait(&m->lock_object); 414 v = m->mtx_lock; 415 416 /* 417 * Check if the lock has been released while spinning for 418 * the turnstile chain lock. 419 */ 420 if (v == MTX_UNOWNED) { 421 turnstile_cancel(ts); 422 continue; 423 } 424 425 #ifdef ADAPTIVE_MUTEXES 426 /* 427 * The current lock owner might have started executing 428 * on another CPU (or the lock could have changed 429 * owners) while we were waiting on the turnstile 430 * chain lock. If so, drop the turnstile lock and try 431 * again. 432 */ 433 owner = (struct thread *)(v & ~MTX_FLAGMASK); 434 if (TD_IS_RUNNING(owner)) { 435 turnstile_cancel(ts); 436 continue; 437 } 438 #endif 439 440 /* 441 * If the mutex isn't already contested and a failure occurs 442 * setting the contested bit, the mutex was either released 443 * or the state of the MTX_RECURSED bit changed. 444 */ 445 if ((v & MTX_CONTESTED) == 0 && 446 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { 447 turnstile_cancel(ts); 448 continue; 449 } 450 451 /* 452 * We definitely must sleep for this lock. 453 */ 454 mtx_assert(m, MA_NOTOWNED); 455 456 #ifdef KTR 457 if (!cont_logged) { 458 CTR6(KTR_CONTENTION, 459 "contention: %p at %s:%d wants %s, taken by %s:%d", 460 (void *)tid, file, line, m->lock_object.lo_name, 461 WITNESS_FILE(&m->lock_object), 462 WITNESS_LINE(&m->lock_object)); 463 cont_logged = 1; 464 } 465 #endif 466 467 /* 468 * Block on the turnstile. 469 */ 470 #ifdef KDTRACE_HOOKS 471 sleep_time -= lockstat_nsecs(); 472 #endif 473 turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE); 474 #ifdef KDTRACE_HOOKS 475 sleep_time += lockstat_nsecs(); 476 sleep_cnt++; 477 #endif 478 } 479 #ifdef KTR 480 if (cont_logged) { 481 CTR4(KTR_CONTENTION, 482 "contention end: %s acquired by %p at %s:%d", 483 m->lock_object.lo_name, (void *)tid, file, line); 484 } 485 #endif 486 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested, 487 waittime, file, line); 488 #ifdef KDTRACE_HOOKS 489 if (sleep_time) 490 LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time); 491 492 /* 493 * Only record the loops spinning and not sleeping. 494 */ 495 if (spin_cnt > sleep_cnt) 496 LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (spin_cnt - sleep_cnt)); 497 #endif 498 } 499 500 static void 501 _mtx_lock_spin_failed(struct mtx *m) 502 { 503 struct thread *td; 504 505 td = mtx_owner(m); 506 507 /* If the mutex is unlocked, try again. */ 508 if (td == NULL) 509 return; 510 511 printf( "spin lock %p (%s) held by %p (tid %d) too long\n", 512 m, m->lock_object.lo_name, td, td->td_tid); 513 #ifdef WITNESS 514 witness_display_spinlock(&m->lock_object, td, printf); 515 #endif 516 panic("spin lock held too long"); 517 } 518 519 #ifdef SMP 520 /* 521 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 522 * 523 * This is only called if we need to actually spin for the lock. Recursion 524 * is handled inline. 525 */ 526 void 527 _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file, 528 int line) 529 { 530 int i = 0; 531 #ifdef LOCK_PROFILING 532 int contested = 0; 533 uint64_t waittime = 0; 534 #endif 535 536 if (SCHEDULER_STOPPED()) 537 return; 538 539 if (LOCK_LOG_TEST(&m->lock_object, opts)) 540 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 541 542 #ifdef HWPMC_HOOKS 543 PMC_SOFT_CALL( , , lock, failed); 544 #endif 545 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); 546 while (!_mtx_obtain_lock(m, tid)) { 547 548 /* Give interrupts a chance while we spin. */ 549 spinlock_exit(); 550 while (m->mtx_lock != MTX_UNOWNED) { 551 if (i++ < 10000000) { 552 cpu_spinwait(); 553 continue; 554 } 555 if (i < 60000000 || kdb_active || panicstr != NULL) 556 DELAY(1); 557 else 558 _mtx_lock_spin_failed(m); 559 cpu_spinwait(); 560 } 561 spinlock_enter(); 562 } 563 564 if (LOCK_LOG_TEST(&m->lock_object, opts)) 565 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 566 567 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m, 568 contested, waittime, (file), (line)); 569 LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, i); 570 } 571 #endif /* SMP */ 572 573 void 574 thread_lock_flags_(struct thread *td, int opts, const char *file, int line) 575 { 576 struct mtx *m; 577 uintptr_t tid; 578 int i; 579 #ifdef LOCK_PROFILING 580 int contested = 0; 581 uint64_t waittime = 0; 582 #endif 583 #ifdef KDTRACE_HOOKS 584 uint64_t spin_cnt = 0; 585 #endif 586 587 i = 0; 588 tid = (uintptr_t)curthread; 589 590 if (SCHEDULER_STOPPED()) 591 return; 592 593 for (;;) { 594 retry: 595 spinlock_enter(); 596 m = td->td_lock; 597 KASSERT(m->mtx_lock != MTX_DESTROYED, 598 ("thread_lock() of destroyed mutex @ %s:%d", file, line)); 599 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 600 ("thread_lock() of sleep mutex %s @ %s:%d", 601 m->lock_object.lo_name, file, line)); 602 if (mtx_owned(m)) 603 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, 604 ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n", 605 m->lock_object.lo_name, file, line)); 606 WITNESS_CHECKORDER(&m->lock_object, 607 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 608 while (!_mtx_obtain_lock(m, tid)) { 609 #ifdef KDTRACE_HOOKS 610 spin_cnt++; 611 #endif 612 if (m->mtx_lock == tid) { 613 m->mtx_recurse++; 614 break; 615 } 616 #ifdef HWPMC_HOOKS 617 PMC_SOFT_CALL( , , lock, failed); 618 #endif 619 lock_profile_obtain_lock_failed(&m->lock_object, 620 &contested, &waittime); 621 /* Give interrupts a chance while we spin. */ 622 spinlock_exit(); 623 while (m->mtx_lock != MTX_UNOWNED) { 624 if (i++ < 10000000) 625 cpu_spinwait(); 626 else if (i < 60000000 || 627 kdb_active || panicstr != NULL) 628 DELAY(1); 629 else 630 _mtx_lock_spin_failed(m); 631 cpu_spinwait(); 632 if (m != td->td_lock) 633 goto retry; 634 } 635 spinlock_enter(); 636 } 637 if (m == td->td_lock) 638 break; 639 __mtx_unlock_spin(m); /* does spinlock_exit() */ 640 #ifdef KDTRACE_HOOKS 641 spin_cnt++; 642 #endif 643 } 644 if (m->mtx_recurse == 0) 645 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, 646 m, contested, waittime, (file), (line)); 647 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 648 line); 649 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 650 LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_cnt); 651 } 652 653 struct mtx * 654 thread_lock_block(struct thread *td) 655 { 656 struct mtx *lock; 657 658 THREAD_LOCK_ASSERT(td, MA_OWNED); 659 lock = td->td_lock; 660 td->td_lock = &blocked_lock; 661 mtx_unlock_spin(lock); 662 663 return (lock); 664 } 665 666 void 667 thread_lock_unblock(struct thread *td, struct mtx *new) 668 { 669 mtx_assert(new, MA_OWNED); 670 MPASS(td->td_lock == &blocked_lock); 671 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new); 672 } 673 674 void 675 thread_lock_set(struct thread *td, struct mtx *new) 676 { 677 struct mtx *lock; 678 679 mtx_assert(new, MA_OWNED); 680 THREAD_LOCK_ASSERT(td, MA_OWNED); 681 lock = td->td_lock; 682 td->td_lock = new; 683 mtx_unlock_spin(lock); 684 } 685 686 /* 687 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 688 * 689 * We are only called here if the lock is recursed or contested (i.e. we 690 * need to wake up a blocked thread). 691 */ 692 void 693 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 694 { 695 struct turnstile *ts; 696 697 if (SCHEDULER_STOPPED()) 698 return; 699 700 if (mtx_recursed(m)) { 701 if (--(m->mtx_recurse) == 0) 702 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 703 if (LOCK_LOG_TEST(&m->lock_object, opts)) 704 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 705 return; 706 } 707 708 /* 709 * We have to lock the chain before the turnstile so this turnstile 710 * can be removed from the hash list if it is empty. 711 */ 712 turnstile_chain_lock(&m->lock_object); 713 ts = turnstile_lookup(&m->lock_object); 714 if (LOCK_LOG_TEST(&m->lock_object, opts)) 715 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 716 MPASS(ts != NULL); 717 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 718 _mtx_release_lock_quick(m); 719 720 /* 721 * This turnstile is now no longer associated with the mutex. We can 722 * unlock the chain lock so a new turnstile may take it's place. 723 */ 724 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 725 turnstile_chain_unlock(&m->lock_object); 726 } 727 728 /* 729 * All the unlocking of MTX_SPIN locks is done inline. 730 * See the __mtx_unlock_spin() macro for the details. 731 */ 732 733 /* 734 * The backing function for the INVARIANTS-enabled mtx_assert() 735 */ 736 #ifdef INVARIANT_SUPPORT 737 void 738 _mtx_assert(const struct mtx *m, int what, const char *file, int line) 739 { 740 741 if (panicstr != NULL || dumping) 742 return; 743 switch (what) { 744 case MA_OWNED: 745 case MA_OWNED | MA_RECURSED: 746 case MA_OWNED | MA_NOTRECURSED: 747 if (!mtx_owned(m)) 748 panic("mutex %s not owned at %s:%d", 749 m->lock_object.lo_name, file, line); 750 if (mtx_recursed(m)) { 751 if ((what & MA_NOTRECURSED) != 0) 752 panic("mutex %s recursed at %s:%d", 753 m->lock_object.lo_name, file, line); 754 } else if ((what & MA_RECURSED) != 0) { 755 panic("mutex %s unrecursed at %s:%d", 756 m->lock_object.lo_name, file, line); 757 } 758 break; 759 case MA_NOTOWNED: 760 if (mtx_owned(m)) 761 panic("mutex %s owned at %s:%d", 762 m->lock_object.lo_name, file, line); 763 break; 764 default: 765 panic("unknown mtx_assert at %s:%d", file, line); 766 } 767 } 768 #endif 769 770 /* 771 * The MUTEX_DEBUG-enabled mtx_validate() 772 * 773 * Most of these checks have been moved off into the LO_INITIALIZED flag 774 * maintained by the witness code. 775 */ 776 #ifdef MUTEX_DEBUG 777 778 void mtx_validate(struct mtx *); 779 780 void 781 mtx_validate(struct mtx *m) 782 { 783 784 /* 785 * XXX: When kernacc() does not require Giant we can reenable this check 786 */ 787 #ifdef notyet 788 /* 789 * Can't call kernacc() from early init386(), especially when 790 * initializing Giant mutex, because some stuff in kernacc() 791 * requires Giant itself. 792 */ 793 if (!cold) 794 if (!kernacc((caddr_t)m, sizeof(m), 795 VM_PROT_READ | VM_PROT_WRITE)) 796 panic("Can't read and write to mutex %p", m); 797 #endif 798 } 799 #endif 800 801 /* 802 * General init routine used by the MTX_SYSINIT() macro. 803 */ 804 void 805 mtx_sysinit(void *arg) 806 { 807 struct mtx_args *margs = arg; 808 809 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 810 } 811 812 /* 813 * Mutex initialization routine; initialize lock `m' of type contained in 814 * `opts' with options contained in `opts' and name `name.' The optional 815 * lock type `type' is used as a general lock category name for use with 816 * witness. 817 */ 818 void 819 mtx_init(struct mtx *m, const char *name, const char *type, int opts) 820 { 821 struct lock_class *class; 822 int flags; 823 824 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 825 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0); 826 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, 827 ("%s: mtx_lock not aligned for %s: %p", __func__, name, 828 &m->mtx_lock)); 829 830 #ifdef MUTEX_DEBUG 831 /* Diagnostic and error correction */ 832 mtx_validate(m); 833 #endif 834 835 /* Determine lock class and lock flags. */ 836 if (opts & MTX_SPIN) 837 class = &lock_class_mtx_spin; 838 else 839 class = &lock_class_mtx_sleep; 840 flags = 0; 841 if (opts & MTX_QUIET) 842 flags |= LO_QUIET; 843 if (opts & MTX_RECURSE) 844 flags |= LO_RECURSABLE; 845 if ((opts & MTX_NOWITNESS) == 0) 846 flags |= LO_WITNESS; 847 if (opts & MTX_DUPOK) 848 flags |= LO_DUPOK; 849 if (opts & MTX_NOPROFILE) 850 flags |= LO_NOPROFILE; 851 852 /* Initialize mutex. */ 853 m->mtx_lock = MTX_UNOWNED; 854 m->mtx_recurse = 0; 855 856 lock_init(&m->lock_object, class, name, type, flags); 857 } 858 859 /* 860 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 861 * passed in as a flag here because if the corresponding mtx_init() was 862 * called with MTX_QUIET set, then it will already be set in the mutex's 863 * flags. 864 */ 865 void 866 mtx_destroy(struct mtx *m) 867 { 868 869 if (!mtx_owned(m)) 870 MPASS(mtx_unowned(m)); 871 else { 872 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 873 874 /* Perform the non-mtx related part of mtx_unlock_spin(). */ 875 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) 876 spinlock_exit(); 877 else 878 curthread->td_locks--; 879 880 lock_profile_release_lock(&m->lock_object); 881 /* Tell witness this isn't locked to make it happy. */ 882 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, 883 __LINE__); 884 } 885 886 m->mtx_lock = MTX_DESTROYED; 887 lock_destroy(&m->lock_object); 888 } 889 890 /* 891 * Intialize the mutex code and system mutexes. This is called from the MD 892 * startup code prior to mi_startup(). The per-CPU data space needs to be 893 * setup before this is called. 894 */ 895 void 896 mutex_init(void) 897 { 898 899 /* Setup turnstiles so that sleep mutexes work. */ 900 init_turnstiles(); 901 902 /* 903 * Initialize mutexes. 904 */ 905 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 906 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN); 907 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */ 908 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 909 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE); 910 mtx_init(&devmtx, "cdev", NULL, MTX_DEF); 911 mtx_lock(&Giant); 912 } 913 914 #ifdef DDB 915 void 916 db_show_mtx(const struct lock_object *lock) 917 { 918 struct thread *td; 919 const struct mtx *m; 920 921 m = (const struct mtx *)lock; 922 923 db_printf(" flags: {"); 924 if (LOCK_CLASS(lock) == &lock_class_mtx_spin) 925 db_printf("SPIN"); 926 else 927 db_printf("DEF"); 928 if (m->lock_object.lo_flags & LO_RECURSABLE) 929 db_printf(", RECURSE"); 930 if (m->lock_object.lo_flags & LO_DUPOK) 931 db_printf(", DUPOK"); 932 db_printf("}\n"); 933 db_printf(" state: {"); 934 if (mtx_unowned(m)) 935 db_printf("UNOWNED"); 936 else if (mtx_destroyed(m)) 937 db_printf("DESTROYED"); 938 else { 939 db_printf("OWNED"); 940 if (m->mtx_lock & MTX_CONTESTED) 941 db_printf(", CONTESTED"); 942 if (m->mtx_lock & MTX_RECURSED) 943 db_printf(", RECURSED"); 944 } 945 db_printf("}\n"); 946 if (!mtx_unowned(m) && !mtx_destroyed(m)) { 947 td = mtx_owner(m); 948 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, 949 td->td_tid, td->td_proc->p_pid, td->td_name); 950 if (mtx_recursed(m)) 951 db_printf(" recursed: %d\n", m->mtx_recurse); 952 } 953 } 954 #endif 955