1 /*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32 /* 33 * Machine independent bits of mutex implementation. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_adaptive_mutexes.h" 40 #include "opt_ddb.h" 41 #include "opt_global.h" 42 #include "opt_mutex_wake_all.h" 43 #include "opt_sched.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/bus.h> 48 #include <sys/conf.h> 49 #include <sys/kdb.h> 50 #include <sys/kernel.h> 51 #include <sys/ktr.h> 52 #include <sys/lock.h> 53 #include <sys/malloc.h> 54 #include <sys/mutex.h> 55 #include <sys/proc.h> 56 #include <sys/resourcevar.h> 57 #include <sys/sched.h> 58 #include <sys/sbuf.h> 59 #include <sys/sysctl.h> 60 #include <sys/turnstile.h> 61 #include <sys/vmmeter.h> 62 #include <sys/lock_profile.h> 63 64 #include <machine/atomic.h> 65 #include <machine/bus.h> 66 #include <machine/cpu.h> 67 68 #include <ddb/ddb.h> 69 70 #include <fs/devfs/devfs_int.h> 71 72 #include <vm/vm.h> 73 #include <vm/vm_extern.h> 74 75 /* 76 * Force MUTEX_WAKE_ALL for now. 77 * single thread wakeup needs fixes to avoid race conditions with 78 * priority inheritance. 79 */ 80 #ifndef MUTEX_WAKE_ALL 81 #define MUTEX_WAKE_ALL 82 #endif 83 84 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 85 #define ADAPTIVE_MUTEXES 86 #endif 87 88 /* 89 * Internal utility macros. 90 */ 91 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 92 93 #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK)) 94 95 #ifdef DDB 96 static void db_show_mtx(struct lock_object *lock); 97 #endif 98 static void lock_mtx(struct lock_object *lock, int how); 99 static void lock_spin(struct lock_object *lock, int how); 100 static int unlock_mtx(struct lock_object *lock); 101 static int unlock_spin(struct lock_object *lock); 102 103 /* 104 * Lock classes for sleep and spin mutexes. 105 */ 106 struct lock_class lock_class_mtx_sleep = { 107 .lc_name = "sleep mutex", 108 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, 109 #ifdef DDB 110 .lc_ddb_show = db_show_mtx, 111 #endif 112 .lc_lock = lock_mtx, 113 .lc_unlock = unlock_mtx, 114 }; 115 struct lock_class lock_class_mtx_spin = { 116 .lc_name = "spin mutex", 117 .lc_flags = LC_SPINLOCK | LC_RECURSABLE, 118 #ifdef DDB 119 .lc_ddb_show = db_show_mtx, 120 #endif 121 .lc_lock = lock_spin, 122 .lc_unlock = unlock_spin, 123 }; 124 125 /* 126 * System-wide mutexes 127 */ 128 struct mtx sched_lock; 129 struct mtx Giant; 130 131 #ifdef LOCK_PROFILING 132 static inline void lock_profile_init(void) 133 { 134 int i; 135 /* Initialize the mutex profiling locks */ 136 for (i = 0; i < LPROF_LOCK_SIZE; i++) { 137 mtx_init(&lprof_locks[i], "mprof lock", 138 NULL, MTX_SPIN|MTX_QUIET|MTX_NOPROFILE); 139 } 140 } 141 #else 142 static inline void lock_profile_init(void) {;} 143 #endif 144 145 void 146 lock_mtx(struct lock_object *lock, int how) 147 { 148 149 mtx_lock((struct mtx *)lock); 150 } 151 152 void 153 lock_spin(struct lock_object *lock, int how) 154 { 155 156 panic("spin locks can only use msleep_spin"); 157 } 158 159 int 160 unlock_mtx(struct lock_object *lock) 161 { 162 struct mtx *m; 163 164 m = (struct mtx *)lock; 165 mtx_assert(m, MA_OWNED | MA_NOTRECURSED); 166 mtx_unlock(m); 167 return (0); 168 } 169 170 int 171 unlock_spin(struct lock_object *lock) 172 { 173 174 panic("spin locks can only use msleep_spin"); 175 } 176 177 /* 178 * Function versions of the inlined __mtx_* macros. These are used by 179 * modules and can also be called from assembly language if needed. 180 */ 181 void 182 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 183 { 184 185 MPASS(curthread != NULL); 186 KASSERT(m->mtx_lock != MTX_DESTROYED, 187 ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); 188 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 189 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 190 file, line)); 191 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 192 file, line); 193 194 _get_sleep_lock(m, curthread, opts, file, line); 195 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 196 line); 197 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 198 curthread->td_locks++; 199 } 200 201 void 202 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 203 { 204 MPASS(curthread != NULL); 205 KASSERT(m->mtx_lock != MTX_DESTROYED, 206 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); 207 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 208 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 209 file, line)); 210 curthread->td_locks--; 211 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 212 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 213 line); 214 mtx_assert(m, MA_OWNED); 215 216 if (m->mtx_recurse == 0) 217 lock_profile_release_lock(&m->lock_object); 218 _rel_sleep_lock(m, curthread, opts, file, line); 219 } 220 221 void 222 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 223 { 224 225 MPASS(curthread != NULL); 226 KASSERT(m->mtx_lock != MTX_DESTROYED, 227 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); 228 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 229 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 230 m->lock_object.lo_name, file, line)); 231 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 232 file, line); 233 _get_spin_lock(m, curthread, opts, file, line); 234 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 235 line); 236 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 237 } 238 239 void 240 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 241 { 242 243 MPASS(curthread != NULL); 244 KASSERT(m->mtx_lock != MTX_DESTROYED, 245 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); 246 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 247 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 248 m->lock_object.lo_name, file, line)); 249 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 250 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 251 line); 252 mtx_assert(m, MA_OWNED); 253 254 _rel_spin_lock(m); 255 } 256 257 /* 258 * The important part of mtx_trylock{,_flags}() 259 * Tries to acquire lock `m.' If this function is called on a mutex that 260 * is already owned, it will recursively acquire the lock. 261 */ 262 int 263 _mtx_trylock(struct mtx *m, int opts, const char *file, int line) 264 { 265 int rval, contested = 0; 266 uint64_t waittime = 0; 267 268 MPASS(curthread != NULL); 269 KASSERT(m->mtx_lock != MTX_DESTROYED, 270 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); 271 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 272 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 273 file, line)); 274 275 if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) { 276 m->mtx_recurse++; 277 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 278 rval = 1; 279 } else 280 rval = _obtain_lock(m, (uintptr_t)curthread); 281 282 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); 283 if (rval) { 284 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 285 file, line); 286 curthread->td_locks++; 287 if (m->mtx_recurse == 0) 288 lock_profile_obtain_lock_success(&m->lock_object, contested, 289 waittime, file, line); 290 291 } 292 293 return (rval); 294 } 295 296 /* 297 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 298 * 299 * We call this if the lock is either contested (i.e. we need to go to 300 * sleep waiting for it), or if we need to recurse on it. 301 */ 302 void 303 _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, 304 int line) 305 { 306 #ifdef ADAPTIVE_MUTEXES 307 volatile struct thread *owner; 308 #endif 309 #ifdef KTR 310 int cont_logged = 0; 311 #endif 312 int contested = 0; 313 uint64_t waittime = 0; 314 uintptr_t v; 315 316 if (mtx_owned(m)) { 317 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, 318 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 319 m->lock_object.lo_name, file, line)); 320 m->mtx_recurse++; 321 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 322 if (LOCK_LOG_TEST(&m->lock_object, opts)) 323 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 324 return; 325 } 326 327 lock_profile_obtain_lock_failed(&m->lock_object, 328 &contested, &waittime); 329 if (LOCK_LOG_TEST(&m->lock_object, opts)) 330 CTR4(KTR_LOCK, 331 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 332 m->lock_object.lo_name, (void *)m->mtx_lock, file, line); 333 334 while (!_obtain_lock(m, tid)) { 335 turnstile_lock(&m->lock_object); 336 v = m->mtx_lock; 337 338 /* 339 * Check if the lock has been released while spinning for 340 * the turnstile chain lock. 341 */ 342 if (v == MTX_UNOWNED) { 343 turnstile_release(&m->lock_object); 344 cpu_spinwait(); 345 continue; 346 } 347 348 #ifdef MUTEX_WAKE_ALL 349 MPASS(v != MTX_CONTESTED); 350 #else 351 /* 352 * The mutex was marked contested on release. This means that 353 * there are other threads blocked on it. Grab ownership of 354 * it and propagate its priority to the current thread if 355 * necessary. 356 */ 357 if (v == MTX_CONTESTED) { 358 m->mtx_lock = tid | MTX_CONTESTED; 359 turnstile_claim(&m->lock_object); 360 break; 361 } 362 #endif 363 364 /* 365 * If the mutex isn't already contested and a failure occurs 366 * setting the contested bit, the mutex was either released 367 * or the state of the MTX_RECURSED bit changed. 368 */ 369 if ((v & MTX_CONTESTED) == 0 && 370 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { 371 turnstile_release(&m->lock_object); 372 cpu_spinwait(); 373 continue; 374 } 375 376 #ifdef ADAPTIVE_MUTEXES 377 /* 378 * If the current owner of the lock is executing on another 379 * CPU, spin instead of blocking. 380 */ 381 owner = (struct thread *)(v & ~MTX_FLAGMASK); 382 #ifdef ADAPTIVE_GIANT 383 if (TD_IS_RUNNING(owner)) 384 #else 385 if (m != &Giant && TD_IS_RUNNING(owner)) 386 #endif 387 { 388 turnstile_release(&m->lock_object); 389 while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { 390 cpu_spinwait(); 391 } 392 continue; 393 } 394 #endif /* ADAPTIVE_MUTEXES */ 395 396 /* 397 * We definitely must sleep for this lock. 398 */ 399 mtx_assert(m, MA_NOTOWNED); 400 401 #ifdef KTR 402 if (!cont_logged) { 403 CTR6(KTR_CONTENTION, 404 "contention: %p at %s:%d wants %s, taken by %s:%d", 405 (void *)tid, file, line, m->lock_object.lo_name, 406 WITNESS_FILE(&m->lock_object), 407 WITNESS_LINE(&m->lock_object)); 408 cont_logged = 1; 409 } 410 #endif 411 412 /* 413 * Block on the turnstile. 414 */ 415 turnstile_wait(&m->lock_object, mtx_owner(m), 416 TS_EXCLUSIVE_QUEUE); 417 } 418 #ifdef KTR 419 if (cont_logged) { 420 CTR4(KTR_CONTENTION, 421 "contention end: %s acquired by %p at %s:%d", 422 m->lock_object.lo_name, (void *)tid, file, line); 423 } 424 #endif 425 lock_profile_obtain_lock_success(&m->lock_object, contested, 426 waittime, (file), (line)); 427 } 428 429 #ifdef SMP 430 /* 431 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 432 * 433 * This is only called if we need to actually spin for the lock. Recursion 434 * is handled inline. 435 */ 436 void 437 _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file, 438 int line) 439 { 440 int i = 0, contested = 0; 441 struct thread *td; 442 uint64_t waittime = 0; 443 444 if (LOCK_LOG_TEST(&m->lock_object, opts)) 445 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 446 447 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); 448 while (!_obtain_lock(m, tid)) { 449 450 /* Give interrupts a chance while we spin. */ 451 spinlock_exit(); 452 while (m->mtx_lock != MTX_UNOWNED) { 453 if (i++ < 10000000) { 454 cpu_spinwait(); 455 continue; 456 } 457 if (i < 60000000 || kdb_active || panicstr != NULL) 458 DELAY(1); 459 else { 460 td = mtx_owner(m); 461 462 /* If the mutex is unlocked, try again. */ 463 if (td == NULL) 464 continue; 465 printf( 466 "spin lock %p (%s) held by %p (tid %d) too long\n", 467 m, m->lock_object.lo_name, td, td->td_tid); 468 #ifdef WITNESS 469 witness_display_spinlock(&m->lock_object, td); 470 #endif 471 panic("spin lock held too long"); 472 } 473 cpu_spinwait(); 474 } 475 spinlock_enter(); 476 } 477 478 if (LOCK_LOG_TEST(&m->lock_object, opts)) 479 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 480 481 lock_profile_obtain_lock_success(&m->lock_object, contested, 482 waittime, (file), (line)); 483 484 } 485 #endif /* SMP */ 486 487 /* 488 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 489 * 490 * We are only called here if the lock is recursed or contested (i.e. we 491 * need to wake up a blocked thread). 492 */ 493 void 494 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 495 { 496 struct turnstile *ts; 497 #ifndef PREEMPTION 498 struct thread *td, *td1; 499 #endif 500 501 if (mtx_recursed(m)) { 502 if (--(m->mtx_recurse) == 0) 503 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 504 if (LOCK_LOG_TEST(&m->lock_object, opts)) 505 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 506 return; 507 } 508 509 turnstile_lock(&m->lock_object); 510 ts = turnstile_lookup(&m->lock_object); 511 if (LOCK_LOG_TEST(&m->lock_object, opts)) 512 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 513 514 #ifdef ADAPTIVE_MUTEXES 515 if (ts == NULL) { 516 _release_lock_quick(m); 517 if (LOCK_LOG_TEST(&m->lock_object, opts)) 518 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); 519 turnstile_release(&m->lock_object); 520 return; 521 } 522 #else 523 MPASS(ts != NULL); 524 #endif 525 #ifndef PREEMPTION 526 /* XXX */ 527 td1 = turnstile_head(ts, TS_EXCLUSIVE_QUEUE); 528 #endif 529 #ifdef MUTEX_WAKE_ALL 530 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 531 _release_lock_quick(m); 532 #else 533 if (turnstile_signal(ts, TS_EXCLUSIVE_QUEUE)) { 534 _release_lock_quick(m); 535 if (LOCK_LOG_TEST(&m->lock_object, opts)) 536 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 537 } else { 538 m->mtx_lock = MTX_CONTESTED; 539 if (LOCK_LOG_TEST(&m->lock_object, opts)) 540 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested", 541 m); 542 } 543 #endif 544 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 545 546 #ifndef PREEMPTION 547 /* 548 * XXX: This is just a hack until preemption is done. However, 549 * once preemption is done we need to either wrap the 550 * turnstile_signal() and release of the actual lock in an 551 * extra critical section or change the preemption code to 552 * always just set a flag and never do instant-preempts. 553 */ 554 td = curthread; 555 if (td->td_critnest > 0 || td1->td_priority >= td->td_priority) 556 return; 557 558 mtx_lock_spin(&sched_lock); 559 if (!TD_IS_RUNNING(td1)) { 560 #ifdef notyet 561 if (td->td_ithd != NULL) { 562 struct ithd *it = td->td_ithd; 563 564 if (it->it_interrupted) { 565 if (LOCK_LOG_TEST(&m->lock_object, opts)) 566 CTR2(KTR_LOCK, 567 "_mtx_unlock_sleep: %p interrupted %p", 568 it, it->it_interrupted); 569 intr_thd_fixup(it); 570 } 571 } 572 #endif 573 if (LOCK_LOG_TEST(&m->lock_object, opts)) 574 CTR2(KTR_LOCK, 575 "_mtx_unlock_sleep: %p switching out lock=%p", m, 576 (void *)m->mtx_lock); 577 578 mi_switch(SW_INVOL, NULL); 579 if (LOCK_LOG_TEST(&m->lock_object, opts)) 580 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 581 m, (void *)m->mtx_lock); 582 } 583 mtx_unlock_spin(&sched_lock); 584 #endif 585 } 586 587 /* 588 * All the unlocking of MTX_SPIN locks is done inline. 589 * See the _rel_spin_lock() macro for the details. 590 */ 591 592 /* 593 * The backing function for the INVARIANTS-enabled mtx_assert() 594 */ 595 #ifdef INVARIANT_SUPPORT 596 void 597 _mtx_assert(struct mtx *m, int what, const char *file, int line) 598 { 599 600 if (panicstr != NULL || dumping) 601 return; 602 switch (what) { 603 case MA_OWNED: 604 case MA_OWNED | MA_RECURSED: 605 case MA_OWNED | MA_NOTRECURSED: 606 if (!mtx_owned(m)) 607 panic("mutex %s not owned at %s:%d", 608 m->lock_object.lo_name, file, line); 609 if (mtx_recursed(m)) { 610 if ((what & MA_NOTRECURSED) != 0) 611 panic("mutex %s recursed at %s:%d", 612 m->lock_object.lo_name, file, line); 613 } else if ((what & MA_RECURSED) != 0) { 614 panic("mutex %s unrecursed at %s:%d", 615 m->lock_object.lo_name, file, line); 616 } 617 break; 618 case MA_NOTOWNED: 619 if (mtx_owned(m)) 620 panic("mutex %s owned at %s:%d", 621 m->lock_object.lo_name, file, line); 622 break; 623 default: 624 panic("unknown mtx_assert at %s:%d", file, line); 625 } 626 } 627 #endif 628 629 /* 630 * The MUTEX_DEBUG-enabled mtx_validate() 631 * 632 * Most of these checks have been moved off into the LO_INITIALIZED flag 633 * maintained by the witness code. 634 */ 635 #ifdef MUTEX_DEBUG 636 637 void mtx_validate(struct mtx *); 638 639 void 640 mtx_validate(struct mtx *m) 641 { 642 643 /* 644 * XXX: When kernacc() does not require Giant we can reenable this check 645 */ 646 #ifdef notyet 647 /* 648 * Can't call kernacc() from early init386(), especially when 649 * initializing Giant mutex, because some stuff in kernacc() 650 * requires Giant itself. 651 */ 652 if (!cold) 653 if (!kernacc((caddr_t)m, sizeof(m), 654 VM_PROT_READ | VM_PROT_WRITE)) 655 panic("Can't read and write to mutex %p", m); 656 #endif 657 } 658 #endif 659 660 /* 661 * General init routine used by the MTX_SYSINIT() macro. 662 */ 663 void 664 mtx_sysinit(void *arg) 665 { 666 struct mtx_args *margs = arg; 667 668 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 669 } 670 671 /* 672 * Mutex initialization routine; initialize lock `m' of type contained in 673 * `opts' with options contained in `opts' and name `name.' The optional 674 * lock type `type' is used as a general lock category name for use with 675 * witness. 676 */ 677 void 678 mtx_init(struct mtx *m, const char *name, const char *type, int opts) 679 { 680 struct lock_class *class; 681 int flags; 682 683 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 684 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0); 685 686 #ifdef MUTEX_DEBUG 687 /* Diagnostic and error correction */ 688 mtx_validate(m); 689 #endif 690 691 /* Determine lock class and lock flags. */ 692 if (opts & MTX_SPIN) 693 class = &lock_class_mtx_spin; 694 else 695 class = &lock_class_mtx_sleep; 696 flags = 0; 697 if (opts & MTX_QUIET) 698 flags |= LO_QUIET; 699 if (opts & MTX_RECURSE) 700 flags |= LO_RECURSABLE; 701 if ((opts & MTX_NOWITNESS) == 0) 702 flags |= LO_WITNESS; 703 if (opts & MTX_DUPOK) 704 flags |= LO_DUPOK; 705 if (opts & MTX_NOPROFILE) 706 flags |= LO_NOPROFILE; 707 708 /* Initialize mutex. */ 709 m->mtx_lock = MTX_UNOWNED; 710 m->mtx_recurse = 0; 711 712 lock_profile_object_init(&m->lock_object, class, name); 713 lock_init(&m->lock_object, class, name, type, flags); 714 } 715 716 /* 717 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 718 * passed in as a flag here because if the corresponding mtx_init() was 719 * called with MTX_QUIET set, then it will already be set in the mutex's 720 * flags. 721 */ 722 void 723 mtx_destroy(struct mtx *m) 724 { 725 726 if (!mtx_owned(m)) 727 MPASS(mtx_unowned(m)); 728 else { 729 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 730 731 /* Perform the non-mtx related part of mtx_unlock_spin(). */ 732 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) 733 spinlock_exit(); 734 else 735 curthread->td_locks--; 736 737 /* Tell witness this isn't locked to make it happy. */ 738 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, 739 __LINE__); 740 } 741 742 m->mtx_lock = MTX_DESTROYED; 743 lock_profile_object_destroy(&m->lock_object); 744 lock_destroy(&m->lock_object); 745 } 746 747 /* 748 * Intialize the mutex code and system mutexes. This is called from the MD 749 * startup code prior to mi_startup(). The per-CPU data space needs to be 750 * setup before this is called. 751 */ 752 void 753 mutex_init(void) 754 { 755 756 /* Setup turnstiles so that sleep mutexes work. */ 757 init_turnstiles(); 758 759 /* 760 * Initialize mutexes. 761 */ 762 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 763 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 764 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 765 mtx_init(&devmtx, "cdev", NULL, MTX_DEF); 766 mtx_lock(&Giant); 767 768 lock_profile_init(); 769 } 770 771 #ifdef DDB 772 void 773 db_show_mtx(struct lock_object *lock) 774 { 775 struct thread *td; 776 struct mtx *m; 777 778 m = (struct mtx *)lock; 779 780 db_printf(" flags: {"); 781 if (LOCK_CLASS(lock) == &lock_class_mtx_spin) 782 db_printf("SPIN"); 783 else 784 db_printf("DEF"); 785 if (m->lock_object.lo_flags & LO_RECURSABLE) 786 db_printf(", RECURSE"); 787 if (m->lock_object.lo_flags & LO_DUPOK) 788 db_printf(", DUPOK"); 789 db_printf("}\n"); 790 db_printf(" state: {"); 791 if (mtx_unowned(m)) 792 db_printf("UNOWNED"); 793 else { 794 db_printf("OWNED"); 795 if (m->mtx_lock & MTX_CONTESTED) 796 db_printf(", CONTESTED"); 797 if (m->mtx_lock & MTX_RECURSED) 798 db_printf(", RECURSED"); 799 } 800 db_printf("}\n"); 801 if (!mtx_unowned(m)) { 802 td = mtx_owner(m); 803 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, 804 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm); 805 if (mtx_recursed(m)) 806 db_printf(" recursed: %d\n", m->mtx_recurse); 807 } 808 } 809 #endif 810