10384fff8SJason Evans /*- 20384fff8SJason Evans * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 30384fff8SJason Evans * 40384fff8SJason Evans * Redistribution and use in source and binary forms, with or without 50384fff8SJason Evans * modification, are permitted provided that the following conditions 60384fff8SJason Evans * are met: 70384fff8SJason Evans * 1. Redistributions of source code must retain the above copyright 80384fff8SJason Evans * notice, this list of conditions and the following disclaimer. 90384fff8SJason Evans * 2. Redistributions in binary form must reproduce the above copyright 100384fff8SJason Evans * notice, this list of conditions and the following disclaimer in the 110384fff8SJason Evans * documentation and/or other materials provided with the distribution. 120384fff8SJason Evans * 3. Berkeley Software Design Inc's name may not be used to endorse or 130384fff8SJason Evans * promote products derived from this software without specific prior 140384fff8SJason Evans * written permission. 150384fff8SJason Evans * 160384fff8SJason Evans * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 170384fff8SJason Evans * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 180384fff8SJason Evans * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 190384fff8SJason Evans * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 200384fff8SJason Evans * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 210384fff8SJason Evans * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 220384fff8SJason Evans * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 230384fff8SJason Evans * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 240384fff8SJason Evans * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 250384fff8SJason Evans * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 260384fff8SJason Evans * SUCH DAMAGE. 270384fff8SJason Evans * 280384fff8SJason Evans * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 2936412d79SJohn Baldwin * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 300384fff8SJason Evans */ 310384fff8SJason Evans 320384fff8SJason Evans /* 33ba48b69aSJohn Baldwin * Machine independent bits of mutex implementation. 340384fff8SJason Evans */ 350384fff8SJason Evans 36677b542eSDavid E. O'Brien #include <sys/cdefs.h> 37677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 38677b542eSDavid E. O'Brien 392498cf8cSJohn Baldwin #include "opt_adaptive_mutexes.h" 409c36c934SJohn Baldwin #include "opt_ddb.h" 417c0435b9SKip Macy #include "opt_global.h" 42535eb309SJohn Baldwin #include "opt_mutex_wake_all.h" 439923b511SScott Long #include "opt_sched.h" 44a5a96a19SJohn Baldwin 450384fff8SJason Evans #include <sys/param.h> 466c35e809SDag-Erling Smørgrav #include <sys/systm.h> 4736412d79SJohn Baldwin #include <sys/bus.h> 481126349aSPaul Saab #include <sys/conf.h> 492d50560aSMarcel Moolenaar #include <sys/kdb.h> 5036412d79SJohn Baldwin #include <sys/kernel.h> 516c35e809SDag-Erling Smørgrav #include <sys/ktr.h> 5219284646SJohn Baldwin #include <sys/lock.h> 53fb919e4dSMark Murray #include <sys/malloc.h> 5419284646SJohn Baldwin #include <sys/mutex.h> 550384fff8SJason Evans #include <sys/proc.h> 56c4f7a187SJohn Baldwin #include <sys/resourcevar.h> 57b43179fbSJeff Roberson #include <sys/sched.h> 586c35e809SDag-Erling Smørgrav #include <sys/sbuf.h> 59a5a96a19SJohn Baldwin #include <sys/sysctl.h> 60961a7b24SJohn Baldwin #include <sys/turnstile.h> 6136412d79SJohn Baldwin #include <sys/vmmeter.h> 627c0435b9SKip Macy #include <sys/lock_profile.h> 630384fff8SJason Evans 6436412d79SJohn Baldwin #include <machine/atomic.h> 6536412d79SJohn Baldwin #include <machine/bus.h> 660384fff8SJason Evans #include <machine/cpu.h> 6736412d79SJohn Baldwin 689c36c934SJohn Baldwin #include <ddb/ddb.h> 699c36c934SJohn Baldwin 708c4b6380SJohn Baldwin #include <fs/devfs/devfs_int.h> 718c4b6380SJohn Baldwin 7236412d79SJohn Baldwin #include <vm/vm.h> 7336412d79SJohn Baldwin #include <vm/vm_extern.h> 7436412d79SJohn Baldwin 750cde2e34SJason Evans /* 76b9a80acaSStephan Uphoff * Force MUTEX_WAKE_ALL for now. 77b9a80acaSStephan Uphoff * single thread wakeup needs fixes to avoid race conditions with 78b9a80acaSStephan Uphoff * priority inheritance. 79b9a80acaSStephan Uphoff */ 80b9a80acaSStephan Uphoff #ifndef MUTEX_WAKE_ALL 81b9a80acaSStephan Uphoff #define MUTEX_WAKE_ALL 82b9a80acaSStephan Uphoff #endif 83b9a80acaSStephan Uphoff 84cd6e6e4eSJohn Baldwin #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 85cd6e6e4eSJohn Baldwin #define ADAPTIVE_MUTEXES 86cd6e6e4eSJohn Baldwin #endif 87cd6e6e4eSJohn Baldwin 88b9a80acaSStephan Uphoff /* 899ed346baSBosko Milekic * Internal utility macros. 900cde2e34SJason Evans */ 919ed346baSBosko Milekic #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 920cde2e34SJason Evans 9349b94bfcSJohn Baldwin #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK)) 949ed346baSBosko Milekic 95d272fe53SJohn Baldwin #ifdef DDB 96d272fe53SJohn Baldwin static void db_show_mtx(struct lock_object *lock); 97d272fe53SJohn Baldwin #endif 986e21afd4SJohn Baldwin static void lock_mtx(struct lock_object *lock, int how); 996e21afd4SJohn Baldwin static void lock_spin(struct lock_object *lock, int how); 1006e21afd4SJohn Baldwin static int unlock_mtx(struct lock_object *lock); 1016e21afd4SJohn Baldwin static int unlock_spin(struct lock_object *lock); 102d272fe53SJohn Baldwin 1030cde2e34SJason Evans /* 10419284646SJohn Baldwin * Lock classes for sleep and spin mutexes. 1050cde2e34SJason Evans */ 10619284646SJohn Baldwin struct lock_class lock_class_mtx_sleep = { 107ae8dde30SJohn Baldwin .lc_name = "sleep mutex", 108ae8dde30SJohn Baldwin .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, 109d272fe53SJohn Baldwin #ifdef DDB 110ae8dde30SJohn Baldwin .lc_ddb_show = db_show_mtx, 111d272fe53SJohn Baldwin #endif 1126e21afd4SJohn Baldwin .lc_lock = lock_mtx, 1136e21afd4SJohn Baldwin .lc_unlock = unlock_mtx, 11419284646SJohn Baldwin }; 11519284646SJohn Baldwin struct lock_class lock_class_mtx_spin = { 116ae8dde30SJohn Baldwin .lc_name = "spin mutex", 117ae8dde30SJohn Baldwin .lc_flags = LC_SPINLOCK | LC_RECURSABLE, 118d272fe53SJohn Baldwin #ifdef DDB 119ae8dde30SJohn Baldwin .lc_ddb_show = db_show_mtx, 120d272fe53SJohn Baldwin #endif 1216e21afd4SJohn Baldwin .lc_lock = lock_spin, 1226e21afd4SJohn Baldwin .lc_unlock = unlock_spin, 1238484de75SJohn Baldwin }; 1248484de75SJohn Baldwin 1259ed346baSBosko Milekic /* 126c53c013bSJohn Baldwin * System-wide mutexes 127c53c013bSJohn Baldwin */ 128c53c013bSJohn Baldwin struct mtx sched_lock; 129c53c013bSJohn Baldwin struct mtx Giant; 130c53c013bSJohn Baldwin 1311364a812SKip Macy #ifdef LOCK_PROFILING 1321364a812SKip Macy static inline void lock_profile_init(void) 1331364a812SKip Macy { 1341364a812SKip Macy int i; 1351364a812SKip Macy /* Initialize the mutex profiling locks */ 1361364a812SKip Macy for (i = 0; i < LPROF_LOCK_SIZE; i++) { 1371364a812SKip Macy mtx_init(&lprof_locks[i], "mprof lock", 1381364a812SKip Macy NULL, MTX_SPIN|MTX_QUIET|MTX_NOPROFILE); 1391364a812SKip Macy } 1401364a812SKip Macy } 1411364a812SKip Macy #else 1421364a812SKip Macy static inline void lock_profile_init(void) {;} 1431364a812SKip Macy #endif 1441364a812SKip Macy 1456e21afd4SJohn Baldwin void 1466e21afd4SJohn Baldwin lock_mtx(struct lock_object *lock, int how) 1476e21afd4SJohn Baldwin { 1486e21afd4SJohn Baldwin 1496e21afd4SJohn Baldwin mtx_lock((struct mtx *)lock); 1506e21afd4SJohn Baldwin } 1516e21afd4SJohn Baldwin 1526e21afd4SJohn Baldwin void 1536e21afd4SJohn Baldwin lock_spin(struct lock_object *lock, int how) 1546e21afd4SJohn Baldwin { 1556e21afd4SJohn Baldwin 1566e21afd4SJohn Baldwin panic("spin locks can only use msleep_spin"); 1576e21afd4SJohn Baldwin } 1586e21afd4SJohn Baldwin 1596e21afd4SJohn Baldwin int 1606e21afd4SJohn Baldwin unlock_mtx(struct lock_object *lock) 1616e21afd4SJohn Baldwin { 1626e21afd4SJohn Baldwin struct mtx *m; 1636e21afd4SJohn Baldwin 1646e21afd4SJohn Baldwin m = (struct mtx *)lock; 1656e21afd4SJohn Baldwin mtx_assert(m, MA_OWNED | MA_NOTRECURSED); 1666e21afd4SJohn Baldwin mtx_unlock(m); 1676e21afd4SJohn Baldwin return (0); 1686e21afd4SJohn Baldwin } 1696e21afd4SJohn Baldwin 1706e21afd4SJohn Baldwin int 1716e21afd4SJohn Baldwin unlock_spin(struct lock_object *lock) 1726e21afd4SJohn Baldwin { 1736e21afd4SJohn Baldwin 1746e21afd4SJohn Baldwin panic("spin locks can only use msleep_spin"); 1756e21afd4SJohn Baldwin } 1766e21afd4SJohn Baldwin 1770cde2e34SJason Evans /* 1786283b7d0SJohn Baldwin * Function versions of the inlined __mtx_* macros. These are used by 1796283b7d0SJohn Baldwin * modules and can also be called from assembly language if needed. 1806283b7d0SJohn Baldwin */ 1816283b7d0SJohn Baldwin void 1826283b7d0SJohn Baldwin _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 1836283b7d0SJohn Baldwin { 1846283b7d0SJohn Baldwin 185dde96c99SJohn Baldwin MPASS(curthread != NULL); 186186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 187186abbd7SJohn Baldwin ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); 188aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 189aa89d8cdSJohn Baldwin ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 1900d975d63SJohn Baldwin file, line)); 191aa89d8cdSJohn Baldwin WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 1928d768e76SJohn Baldwin file, line); 1937c0435b9SKip Macy 194dde96c99SJohn Baldwin _get_sleep_lock(m, curthread, opts, file, line); 195aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 196dde96c99SJohn Baldwin line); 197aa89d8cdSJohn Baldwin WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 198764e4d54SJohn Baldwin curthread->td_locks++; 1996283b7d0SJohn Baldwin } 2006283b7d0SJohn Baldwin 2016283b7d0SJohn Baldwin void 2026283b7d0SJohn Baldwin _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 2036283b7d0SJohn Baldwin { 204dde96c99SJohn Baldwin MPASS(curthread != NULL); 205186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 206186abbd7SJohn Baldwin ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); 207aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 208aa89d8cdSJohn Baldwin ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 2090d975d63SJohn Baldwin file, line)); 210764e4d54SJohn Baldwin curthread->td_locks--; 211aa89d8cdSJohn Baldwin WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 212aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 2130d975d63SJohn Baldwin line); 21421377ce0SJohn Baldwin mtx_assert(m, MA_OWNED); 215c66d7606SKip Macy 21670fe8436SKip Macy if (m->mtx_recurse == 0) 217aa89d8cdSJohn Baldwin lock_profile_release_lock(&m->lock_object); 218dde96c99SJohn Baldwin _rel_sleep_lock(m, curthread, opts, file, line); 2196283b7d0SJohn Baldwin } 2206283b7d0SJohn Baldwin 2216283b7d0SJohn Baldwin void 2226283b7d0SJohn Baldwin _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 2236283b7d0SJohn Baldwin { 2246283b7d0SJohn Baldwin 225dde96c99SJohn Baldwin MPASS(curthread != NULL); 226186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 227186abbd7SJohn Baldwin ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); 228aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 2290d975d63SJohn Baldwin ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 230aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line)); 231aa89d8cdSJohn Baldwin WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 2328d768e76SJohn Baldwin file, line); 233dde96c99SJohn Baldwin _get_spin_lock(m, curthread, opts, file, line); 234aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 235dde96c99SJohn Baldwin line); 236aa89d8cdSJohn Baldwin WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 2376283b7d0SJohn Baldwin } 2386283b7d0SJohn Baldwin 2396283b7d0SJohn Baldwin void 2406283b7d0SJohn Baldwin _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 2416283b7d0SJohn Baldwin { 242c66d7606SKip Macy 243dde96c99SJohn Baldwin MPASS(curthread != NULL); 244186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 245186abbd7SJohn Baldwin ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); 246aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 2470d975d63SJohn Baldwin ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 248aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line)); 249aa89d8cdSJohn Baldwin WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 250aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 251dde96c99SJohn Baldwin line); 2520d975d63SJohn Baldwin mtx_assert(m, MA_OWNED); 253c66d7606SKip Macy 254dde96c99SJohn Baldwin _rel_spin_lock(m); 2556283b7d0SJohn Baldwin } 2566283b7d0SJohn Baldwin 2576283b7d0SJohn Baldwin /* 2589ed346baSBosko Milekic * The important part of mtx_trylock{,_flags}() 259eac09796SJohn Baldwin * Tries to acquire lock `m.' If this function is called on a mutex that 260eac09796SJohn Baldwin * is already owned, it will recursively acquire the lock. 2610cde2e34SJason Evans */ 2620cde2e34SJason Evans int 2639ed346baSBosko Milekic _mtx_trylock(struct mtx *m, int opts, const char *file, int line) 2640cde2e34SJason Evans { 265fe68a916SKip Macy int rval, contested = 0; 2667c0435b9SKip Macy uint64_t waittime = 0; 2670cde2e34SJason Evans 268b40ce416SJulian Elischer MPASS(curthread != NULL); 269186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 270186abbd7SJohn Baldwin ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); 271aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 272aa89d8cdSJohn Baldwin ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 27383cece6fSJohn Baldwin file, line)); 2749ed346baSBosko Milekic 275aa89d8cdSJohn Baldwin if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) { 276eac09796SJohn Baldwin m->mtx_recurse++; 277eac09796SJohn Baldwin atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 278eac09796SJohn Baldwin rval = 1; 279eac09796SJohn Baldwin } else 280122eceefSJohn Baldwin rval = _obtain_lock(m, (uintptr_t)curthread); 2819ed346baSBosko Milekic 282aa89d8cdSJohn Baldwin LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); 283764e4d54SJohn Baldwin if (rval) { 284aa89d8cdSJohn Baldwin WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 2852d96f0b1SJohn Baldwin file, line); 286764e4d54SJohn Baldwin curthread->td_locks++; 287fe68a916SKip Macy if (m->mtx_recurse == 0) 288aa89d8cdSJohn Baldwin lock_profile_obtain_lock_success(&m->lock_object, contested, 289fe68a916SKip Macy waittime, file, line); 2907c0435b9SKip Macy 291764e4d54SJohn Baldwin } 2929ed346baSBosko Milekic 29319284646SJohn Baldwin return (rval); 2940cde2e34SJason Evans } 2950cde2e34SJason Evans 2960cde2e34SJason Evans /* 2979ed346baSBosko Milekic * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 2989ed346baSBosko Milekic * 2999ed346baSBosko Milekic * We call this if the lock is either contested (i.e. we need to go to 3009ed346baSBosko Milekic * sleep waiting for it), or if we need to recurse on it. 3010cde2e34SJason Evans */ 3020cde2e34SJason Evans void 303122eceefSJohn Baldwin _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, 304bdcfcf5bSJohn Baldwin int line) 30536412d79SJohn Baldwin { 306cd6e6e4eSJohn Baldwin #ifdef ADAPTIVE_MUTEXES 30776447e56SJohn Baldwin volatile struct thread *owner; 3082498cf8cSJohn Baldwin #endif 30902bd1bcdSIan Dowse #ifdef KTR 31002bd1bcdSIan Dowse int cont_logged = 0; 31102bd1bcdSIan Dowse #endif 31270fe8436SKip Macy int contested = 0; 31370fe8436SKip Macy uint64_t waittime = 0; 3147c0435b9SKip Macy uintptr_t v; 31536412d79SJohn Baldwin 3165fa8dd90SJohn Baldwin if (mtx_owned(m)) { 317aa89d8cdSJohn Baldwin KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, 318eac09796SJohn Baldwin ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 319aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line)); 32036412d79SJohn Baldwin m->mtx_recurse++; 32108812b39SBosko Milekic atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 322aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 3235746a1d8SBosko Milekic CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 32436412d79SJohn Baldwin return; 32536412d79SJohn Baldwin } 3269ed346baSBosko Milekic 32770fe8436SKip Macy lock_profile_obtain_lock_failed(&m->lock_object, 32870fe8436SKip Macy &contested, &waittime); 329aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 33015ec816aSJohn Baldwin CTR4(KTR_LOCK, 33115ec816aSJohn Baldwin "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 332aa89d8cdSJohn Baldwin m->lock_object.lo_name, (void *)m->mtx_lock, file, line); 3331bd0eefbSJohn Baldwin 334122eceefSJohn Baldwin while (!_obtain_lock(m, tid)) { 335aa89d8cdSJohn Baldwin turnstile_lock(&m->lock_object); 3365fa8dd90SJohn Baldwin v = m->mtx_lock; 3375fa8dd90SJohn Baldwin 33836412d79SJohn Baldwin /* 3399ed346baSBosko Milekic * Check if the lock has been released while spinning for 340961a7b24SJohn Baldwin * the turnstile chain lock. 34136412d79SJohn Baldwin */ 3425fa8dd90SJohn Baldwin if (v == MTX_UNOWNED) { 343aa89d8cdSJohn Baldwin turnstile_release(&m->lock_object); 3449f1b87f1SMaxime Henrion cpu_spinwait(); 34536412d79SJohn Baldwin continue; 34636412d79SJohn Baldwin } 3479ed346baSBosko Milekic 348535eb309SJohn Baldwin #ifdef MUTEX_WAKE_ALL 349535eb309SJohn Baldwin MPASS(v != MTX_CONTESTED); 350535eb309SJohn Baldwin #else 35136412d79SJohn Baldwin /* 3529ed346baSBosko Milekic * The mutex was marked contested on release. This means that 353f7ee1590SJohn Baldwin * there are other threads blocked on it. Grab ownership of 354f7ee1590SJohn Baldwin * it and propagate its priority to the current thread if 355f7ee1590SJohn Baldwin * necessary. 35636412d79SJohn Baldwin */ 35736412d79SJohn Baldwin if (v == MTX_CONTESTED) { 358122eceefSJohn Baldwin m->mtx_lock = tid | MTX_CONTESTED; 359aa89d8cdSJohn Baldwin turnstile_claim(&m->lock_object); 3608dc10be8SRobert Watson break; 36136412d79SJohn Baldwin } 362535eb309SJohn Baldwin #endif 3639ed346baSBosko Milekic 36436412d79SJohn Baldwin /* 3659ed346baSBosko Milekic * If the mutex isn't already contested and a failure occurs 3669ed346baSBosko Milekic * setting the contested bit, the mutex was either released 3679ed346baSBosko Milekic * or the state of the MTX_RECURSED bit changed. 36836412d79SJohn Baldwin */ 36936412d79SJohn Baldwin if ((v & MTX_CONTESTED) == 0 && 370122eceefSJohn Baldwin !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { 371aa89d8cdSJohn Baldwin turnstile_release(&m->lock_object); 3729f1b87f1SMaxime Henrion cpu_spinwait(); 37336412d79SJohn Baldwin continue; 37436412d79SJohn Baldwin } 37536412d79SJohn Baldwin 376cd6e6e4eSJohn Baldwin #ifdef ADAPTIVE_MUTEXES 3772498cf8cSJohn Baldwin /* 3782498cf8cSJohn Baldwin * If the current owner of the lock is executing on another 3792498cf8cSJohn Baldwin * CPU, spin instead of blocking. 3802498cf8cSJohn Baldwin */ 38149b94bfcSJohn Baldwin owner = (struct thread *)(v & ~MTX_FLAGMASK); 382a9abdce4SRobert Watson #ifdef ADAPTIVE_GIANT 3831364a812SKip Macy if (TD_IS_RUNNING(owner)) 384a9abdce4SRobert Watson #else 3851364a812SKip Macy if (m != &Giant && TD_IS_RUNNING(owner)) 386a9abdce4SRobert Watson #endif 3871364a812SKip Macy { 388aa89d8cdSJohn Baldwin turnstile_release(&m->lock_object); 38927dad03cSJohn Baldwin while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { 3909f1b87f1SMaxime Henrion cpu_spinwait(); 3917fcca609SJohn Baldwin } 3922498cf8cSJohn Baldwin continue; 3932498cf8cSJohn Baldwin } 394cd6e6e4eSJohn Baldwin #endif /* ADAPTIVE_MUTEXES */ 3952498cf8cSJohn Baldwin 3969ed346baSBosko Milekic /* 3977feefcd6SJohn Baldwin * We definitely must sleep for this lock. 3989ed346baSBosko Milekic */ 39936412d79SJohn Baldwin mtx_assert(m, MA_NOTOWNED); 40036412d79SJohn Baldwin 40102bd1bcdSIan Dowse #ifdef KTR 40202bd1bcdSIan Dowse if (!cont_logged) { 40302bd1bcdSIan Dowse CTR6(KTR_CONTENTION, 40402bd1bcdSIan Dowse "contention: %p at %s:%d wants %s, taken by %s:%d", 405aa89d8cdSJohn Baldwin (void *)tid, file, line, m->lock_object.lo_name, 406aa89d8cdSJohn Baldwin WITNESS_FILE(&m->lock_object), 407aa89d8cdSJohn Baldwin WITNESS_LINE(&m->lock_object)); 40802bd1bcdSIan Dowse cont_logged = 1; 40902bd1bcdSIan Dowse } 41002bd1bcdSIan Dowse #endif 41136412d79SJohn Baldwin 4129ed346baSBosko Milekic /* 413961a7b24SJohn Baldwin * Block on the turnstile. 4149ed346baSBosko Milekic */ 415aa89d8cdSJohn Baldwin turnstile_wait(&m->lock_object, mtx_owner(m), 4167aa4f685SJohn Baldwin TS_EXCLUSIVE_QUEUE); 41736412d79SJohn Baldwin } 41802bd1bcdSIan Dowse #ifdef KTR 41902bd1bcdSIan Dowse if (cont_logged) { 42002bd1bcdSIan Dowse CTR4(KTR_CONTENTION, 42102bd1bcdSIan Dowse "contention end: %s acquired by %p at %s:%d", 422aa89d8cdSJohn Baldwin m->lock_object.lo_name, (void *)tid, file, line); 42302bd1bcdSIan Dowse } 42402bd1bcdSIan Dowse #endif 42570fe8436SKip Macy lock_profile_obtain_lock_success(&m->lock_object, contested, 42670fe8436SKip Macy waittime, (file), (line)); 4279ed346baSBosko Milekic } 4289ed346baSBosko Milekic 42933fb8a38SJohn Baldwin #ifdef SMP 4309ed346baSBosko Milekic /* 4319ed346baSBosko Milekic * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 4329ed346baSBosko Milekic * 4339ed346baSBosko Milekic * This is only called if we need to actually spin for the lock. Recursion 4349ed346baSBosko Milekic * is handled inline. 4359ed346baSBosko Milekic */ 4369ed346baSBosko Milekic void 437122eceefSJohn Baldwin _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file, 438bdcfcf5bSJohn Baldwin int line) 43936412d79SJohn Baldwin { 44070fe8436SKip Macy int i = 0, contested = 0; 4410fa2168bSJohn Baldwin struct thread *td; 44270fe8436SKip Macy uint64_t waittime = 0; 44336412d79SJohn Baldwin 444aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 4455746a1d8SBosko Milekic CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 4469ed346baSBosko Milekic 44770fe8436SKip Macy lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); 448f781b5a4SJohn Baldwin while (!_obtain_lock(m, tid)) { 4499ed346baSBosko Milekic 4507141f2adSJohn Baldwin /* Give interrupts a chance while we spin. */ 451c6a37e84SJohn Baldwin spinlock_exit(); 45236412d79SJohn Baldwin while (m->mtx_lock != MTX_UNOWNED) { 453703fc290SJohn Baldwin if (i++ < 10000000) { 4549f1b87f1SMaxime Henrion cpu_spinwait(); 45536412d79SJohn Baldwin continue; 456703fc290SJohn Baldwin } 4570fa2168bSJohn Baldwin if (i < 60000000 || kdb_active || panicstr != NULL) 45836412d79SJohn Baldwin DELAY(1); 4590fa2168bSJohn Baldwin else { 4600fa2168bSJohn Baldwin td = mtx_owner(m); 4610fa2168bSJohn Baldwin 4620fa2168bSJohn Baldwin /* If the mutex is unlocked, try again. */ 4630fa2168bSJohn Baldwin if (td == NULL) 4640fa2168bSJohn Baldwin continue; 4650fa2168bSJohn Baldwin printf( 4660fa2168bSJohn Baldwin "spin lock %p (%s) held by %p (tid %d) too long\n", 467aa89d8cdSJohn Baldwin m, m->lock_object.lo_name, td, td->td_tid); 46841109518SJohn Baldwin #ifdef WITNESS 469aa89d8cdSJohn Baldwin witness_display_spinlock(&m->lock_object, td); 47041109518SJohn Baldwin #endif 47141109518SJohn Baldwin panic("spin lock held too long"); 47241109518SJohn Baldwin } 4739f1b87f1SMaxime Henrion cpu_spinwait(); 47436412d79SJohn Baldwin } 475c6a37e84SJohn Baldwin spinlock_enter(); 47636412d79SJohn Baldwin } 47736412d79SJohn Baldwin 478aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 4799ed346baSBosko Milekic CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 4809ed346baSBosko Milekic 48170fe8436SKip Macy lock_profile_obtain_lock_success(&m->lock_object, contested, 48270fe8436SKip Macy waittime, (file), (line)); 48370fe8436SKip Macy 48436412d79SJohn Baldwin } 48533fb8a38SJohn Baldwin #endif /* SMP */ 48636412d79SJohn Baldwin 4879ed346baSBosko Milekic /* 4889ed346baSBosko Milekic * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 4899ed346baSBosko Milekic * 4909ed346baSBosko Milekic * We are only called here if the lock is recursed or contested (i.e. we 4919ed346baSBosko Milekic * need to wake up a blocked thread). 4929ed346baSBosko Milekic */ 49336412d79SJohn Baldwin void 4949ed346baSBosko Milekic _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 49536412d79SJohn Baldwin { 496961a7b24SJohn Baldwin struct turnstile *ts; 4970c0b25aeSJohn Baldwin #ifndef PREEMPTION 498b40ce416SJulian Elischer struct thread *td, *td1; 4990c0b25aeSJohn Baldwin #endif 5009ed346baSBosko Milekic 50108812b39SBosko Milekic if (mtx_recursed(m)) { 50236412d79SJohn Baldwin if (--(m->mtx_recurse) == 0) 50308812b39SBosko Milekic atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 504aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 5059ed346baSBosko Milekic CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 50636412d79SJohn Baldwin return; 50736412d79SJohn Baldwin } 5089ed346baSBosko Milekic 509aa89d8cdSJohn Baldwin turnstile_lock(&m->lock_object); 510aa89d8cdSJohn Baldwin ts = turnstile_lookup(&m->lock_object); 511aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 5129ed346baSBosko Milekic CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 5139ed346baSBosko Milekic 514cd6e6e4eSJohn Baldwin #ifdef ADAPTIVE_MUTEXES 515961a7b24SJohn Baldwin if (ts == NULL) { 5162498cf8cSJohn Baldwin _release_lock_quick(m); 517aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 5182498cf8cSJohn Baldwin CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); 519aa89d8cdSJohn Baldwin turnstile_release(&m->lock_object); 5202498cf8cSJohn Baldwin return; 5212498cf8cSJohn Baldwin } 522961a7b24SJohn Baldwin #else 523961a7b24SJohn Baldwin MPASS(ts != NULL); 5242498cf8cSJohn Baldwin #endif 5250c0b25aeSJohn Baldwin #ifndef PREEMPTION 526961a7b24SJohn Baldwin /* XXX */ 5277aa4f685SJohn Baldwin td1 = turnstile_head(ts, TS_EXCLUSIVE_QUEUE); 5280c0b25aeSJohn Baldwin #endif 529535eb309SJohn Baldwin #ifdef MUTEX_WAKE_ALL 5307aa4f685SJohn Baldwin turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 531535eb309SJohn Baldwin _release_lock_quick(m); 532535eb309SJohn Baldwin #else 5337aa4f685SJohn Baldwin if (turnstile_signal(ts, TS_EXCLUSIVE_QUEUE)) { 53436412d79SJohn Baldwin _release_lock_quick(m); 535aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 5369ed346baSBosko Milekic CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 537961a7b24SJohn Baldwin } else { 538f7ee1590SJohn Baldwin m->mtx_lock = MTX_CONTESTED; 539aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 540961a7b24SJohn Baldwin CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested", 541961a7b24SJohn Baldwin m); 542e0817317SJulian Elischer } 543535eb309SJohn Baldwin #endif 5447aa4f685SJohn Baldwin turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 5459ed346baSBosko Milekic 5460c0b25aeSJohn Baldwin #ifndef PREEMPTION 547961a7b24SJohn Baldwin /* 548961a7b24SJohn Baldwin * XXX: This is just a hack until preemption is done. However, 549961a7b24SJohn Baldwin * once preemption is done we need to either wrap the 550961a7b24SJohn Baldwin * turnstile_signal() and release of the actual lock in an 551961a7b24SJohn Baldwin * extra critical section or change the preemption code to 552961a7b24SJohn Baldwin * always just set a flag and never do instant-preempts. 553961a7b24SJohn Baldwin */ 554961a7b24SJohn Baldwin td = curthread; 555961a7b24SJohn Baldwin if (td->td_critnest > 0 || td1->td_priority >= td->td_priority) 556961a7b24SJohn Baldwin return; 55770fe8436SKip Macy 558961a7b24SJohn Baldwin mtx_lock_spin(&sched_lock); 559961a7b24SJohn Baldwin if (!TD_IS_RUNNING(td1)) { 56036412d79SJohn Baldwin #ifdef notyet 561b40ce416SJulian Elischer if (td->td_ithd != NULL) { 562b40ce416SJulian Elischer struct ithd *it = td->td_ithd; 56336412d79SJohn Baldwin 56436412d79SJohn Baldwin if (it->it_interrupted) { 565aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 56636412d79SJohn Baldwin CTR2(KTR_LOCK, 56715ec816aSJohn Baldwin "_mtx_unlock_sleep: %p interrupted %p", 56836412d79SJohn Baldwin it, it->it_interrupted); 56936412d79SJohn Baldwin intr_thd_fixup(it); 57036412d79SJohn Baldwin } 57136412d79SJohn Baldwin } 57236412d79SJohn Baldwin #endif 573aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 574562e4ffeSJohn Baldwin CTR2(KTR_LOCK, 5759ed346baSBosko Milekic "_mtx_unlock_sleep: %p switching out lock=%p", m, 5769ed346baSBosko Milekic (void *)m->mtx_lock); 5779ed346baSBosko Milekic 578bf0acc27SJohn Baldwin mi_switch(SW_INVOL, NULL); 579aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 5809ed346baSBosko Milekic CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 58131271627SJohn Baldwin m, (void *)m->mtx_lock); 58236412d79SJohn Baldwin } 5839ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 5840c0b25aeSJohn Baldwin #endif 5859ed346baSBosko Milekic } 5869ed346baSBosko Milekic 5879ed346baSBosko Milekic /* 5889ed346baSBosko Milekic * All the unlocking of MTX_SPIN locks is done inline. 5899ed346baSBosko Milekic * See the _rel_spin_lock() macro for the details. 5909ed346baSBosko Milekic */ 5919ed346baSBosko Milekic 5929ed346baSBosko Milekic /* 59315ec816aSJohn Baldwin * The backing function for the INVARIANTS-enabled mtx_assert() 5949ed346baSBosko Milekic */ 5951103f3b0SJohn Baldwin #ifdef INVARIANT_SUPPORT 5960cde2e34SJason Evans void 59756771ca7SJason Evans _mtx_assert(struct mtx *m, int what, const char *file, int line) 5980cde2e34SJason Evans { 5995cb0fbe4SJohn Baldwin 6001126349aSPaul Saab if (panicstr != NULL || dumping) 6015cb0fbe4SJohn Baldwin return; 602a10f4966SJake Burkholder switch (what) { 6030cde2e34SJason Evans case MA_OWNED: 6040cde2e34SJason Evans case MA_OWNED | MA_RECURSED: 6050cde2e34SJason Evans case MA_OWNED | MA_NOTRECURSED: 606a10f4966SJake Burkholder if (!mtx_owned(m)) 6070cde2e34SJason Evans panic("mutex %s not owned at %s:%d", 608aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line); 609a10f4966SJake Burkholder if (mtx_recursed(m)) { 610a10f4966SJake Burkholder if ((what & MA_NOTRECURSED) != 0) 6110cde2e34SJason Evans panic("mutex %s recursed at %s:%d", 612aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line); 613a10f4966SJake Burkholder } else if ((what & MA_RECURSED) != 0) { 6140cde2e34SJason Evans panic("mutex %s unrecursed at %s:%d", 615aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line); 6160cde2e34SJason Evans } 6170cde2e34SJason Evans break; 6180cde2e34SJason Evans case MA_NOTOWNED: 619a10f4966SJake Burkholder if (mtx_owned(m)) 6200cde2e34SJason Evans panic("mutex %s owned at %s:%d", 621aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line); 6220cde2e34SJason Evans break; 6230cde2e34SJason Evans default: 62456771ca7SJason Evans panic("unknown mtx_assert at %s:%d", file, line); 6250cde2e34SJason Evans } 6260cde2e34SJason Evans } 6270cde2e34SJason Evans #endif 6280cde2e34SJason Evans 6299ed346baSBosko Milekic /* 6309ed346baSBosko Milekic * The MUTEX_DEBUG-enabled mtx_validate() 63119284646SJohn Baldwin * 63219284646SJohn Baldwin * Most of these checks have been moved off into the LO_INITIALIZED flag 63319284646SJohn Baldwin * maintained by the witness code. 6349ed346baSBosko Milekic */ 63536412d79SJohn Baldwin #ifdef MUTEX_DEBUG 63636412d79SJohn Baldwin 6374d77a549SAlfred Perlstein void mtx_validate(struct mtx *); 63836412d79SJohn Baldwin 63919284646SJohn Baldwin void 64019284646SJohn Baldwin mtx_validate(struct mtx *m) 64136412d79SJohn Baldwin { 64236412d79SJohn Baldwin 64336412d79SJohn Baldwin /* 644fa669ab7SPoul-Henning Kamp * XXX: When kernacc() does not require Giant we can reenable this check 645fa669ab7SPoul-Henning Kamp */ 646fa669ab7SPoul-Henning Kamp #ifdef notyet 647fa669ab7SPoul-Henning Kamp /* 64876dcbd6fSBosko Milekic * Can't call kernacc() from early init386(), especially when 64976dcbd6fSBosko Milekic * initializing Giant mutex, because some stuff in kernacc() 65076dcbd6fSBosko Milekic * requires Giant itself. 65176dcbd6fSBosko Milekic */ 652ab07087eSBosko Milekic if (!cold) 653ab07087eSBosko Milekic if (!kernacc((caddr_t)m, sizeof(m), 654ab07087eSBosko Milekic VM_PROT_READ | VM_PROT_WRITE)) 65519284646SJohn Baldwin panic("Can't read and write to mutex %p", m); 65636412d79SJohn Baldwin #endif 65736412d79SJohn Baldwin } 65836412d79SJohn Baldwin #endif 65936412d79SJohn Baldwin 6609ed346baSBosko Milekic /* 661c27b5699SAndrew R. Reiter * General init routine used by the MTX_SYSINIT() macro. 662c27b5699SAndrew R. Reiter */ 663c27b5699SAndrew R. Reiter void 664c27b5699SAndrew R. Reiter mtx_sysinit(void *arg) 665c27b5699SAndrew R. Reiter { 666c27b5699SAndrew R. Reiter struct mtx_args *margs = arg; 667c27b5699SAndrew R. Reiter 6680c88508aSJohn Baldwin mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 669c27b5699SAndrew R. Reiter } 670c27b5699SAndrew R. Reiter 671c27b5699SAndrew R. Reiter /* 6729ed346baSBosko Milekic * Mutex initialization routine; initialize lock `m' of type contained in 6730c88508aSJohn Baldwin * `opts' with options contained in `opts' and name `name.' The optional 6740c88508aSJohn Baldwin * lock type `type' is used as a general lock category name for use with 6750c88508aSJohn Baldwin * witness. 6769ed346baSBosko Milekic */ 67736412d79SJohn Baldwin void 6780c88508aSJohn Baldwin mtx_init(struct mtx *m, const char *name, const char *type, int opts) 67936412d79SJohn Baldwin { 68083a81bcbSJohn Baldwin struct lock_class *class; 68183a81bcbSJohn Baldwin int flags; 6829ed346baSBosko Milekic 68319284646SJohn Baldwin MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 6847c0435b9SKip Macy MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0); 6859ed346baSBosko Milekic 68636412d79SJohn Baldwin #ifdef MUTEX_DEBUG 6879ed346baSBosko Milekic /* Diagnostic and error correction */ 68819284646SJohn Baldwin mtx_validate(m); 6896936206eSJohn Baldwin #endif 69036412d79SJohn Baldwin 69183a81bcbSJohn Baldwin /* Determine lock class and lock flags. */ 69219284646SJohn Baldwin if (opts & MTX_SPIN) 69383a81bcbSJohn Baldwin class = &lock_class_mtx_spin; 69419284646SJohn Baldwin else 69583a81bcbSJohn Baldwin class = &lock_class_mtx_sleep; 69683a81bcbSJohn Baldwin flags = 0; 69719284646SJohn Baldwin if (opts & MTX_QUIET) 69883a81bcbSJohn Baldwin flags |= LO_QUIET; 69919284646SJohn Baldwin if (opts & MTX_RECURSE) 70083a81bcbSJohn Baldwin flags |= LO_RECURSABLE; 70119284646SJohn Baldwin if ((opts & MTX_NOWITNESS) == 0) 70283a81bcbSJohn Baldwin flags |= LO_WITNESS; 703f22a4b62SJeff Roberson if (opts & MTX_DUPOK) 70483a81bcbSJohn Baldwin flags |= LO_DUPOK; 7057c0435b9SKip Macy if (opts & MTX_NOPROFILE) 7067c0435b9SKip Macy flags |= LO_NOPROFILE; 70719284646SJohn Baldwin 70883a81bcbSJohn Baldwin /* Initialize mutex. */ 70919284646SJohn Baldwin m->mtx_lock = MTX_UNOWNED; 71083a81bcbSJohn Baldwin m->mtx_recurse = 0; 7119ed346baSBosko Milekic 712aa89d8cdSJohn Baldwin lock_profile_object_init(&m->lock_object, class, name); 713aa89d8cdSJohn Baldwin lock_init(&m->lock_object, class, name, type, flags); 71436412d79SJohn Baldwin } 71536412d79SJohn Baldwin 7169ed346baSBosko Milekic /* 71719284646SJohn Baldwin * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 71819284646SJohn Baldwin * passed in as a flag here because if the corresponding mtx_init() was 71919284646SJohn Baldwin * called with MTX_QUIET set, then it will already be set in the mutex's 72019284646SJohn Baldwin * flags. 7219ed346baSBosko Milekic */ 72236412d79SJohn Baldwin void 72336412d79SJohn Baldwin mtx_destroy(struct mtx *m) 72436412d79SJohn Baldwin { 72536412d79SJohn Baldwin 72619284646SJohn Baldwin if (!mtx_owned(m)) 72719284646SJohn Baldwin MPASS(mtx_unowned(m)); 72819284646SJohn Baldwin else { 72908812b39SBosko Milekic MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 7309ed346baSBosko Milekic 731861a2308SScott Long /* Perform the non-mtx related part of mtx_unlock_spin(). */ 732aa89d8cdSJohn Baldwin if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) 733861a2308SScott Long spinlock_exit(); 734764e4d54SJohn Baldwin else 735764e4d54SJohn Baldwin curthread->td_locks--; 736861a2308SScott Long 73719284646SJohn Baldwin /* Tell witness this isn't locked to make it happy. */ 738aa89d8cdSJohn Baldwin WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, 739c86b6ff5SJohn Baldwin __LINE__); 74036412d79SJohn Baldwin } 7410384fff8SJason Evans 742186abbd7SJohn Baldwin m->mtx_lock = MTX_DESTROYED; 743aa89d8cdSJohn Baldwin lock_profile_object_destroy(&m->lock_object); 744aa89d8cdSJohn Baldwin lock_destroy(&m->lock_object); 7450384fff8SJason Evans } 746d23f5958SMatthew Dillon 747d23f5958SMatthew Dillon /* 748c53c013bSJohn Baldwin * Intialize the mutex code and system mutexes. This is called from the MD 749c53c013bSJohn Baldwin * startup code prior to mi_startup(). The per-CPU data space needs to be 750c53c013bSJohn Baldwin * setup before this is called. 751c53c013bSJohn Baldwin */ 752c53c013bSJohn Baldwin void 753c53c013bSJohn Baldwin mutex_init(void) 754c53c013bSJohn Baldwin { 755c53c013bSJohn Baldwin 756961a7b24SJohn Baldwin /* Setup turnstiles so that sleep mutexes work. */ 757961a7b24SJohn Baldwin init_turnstiles(); 758961a7b24SJohn Baldwin 759c53c013bSJohn Baldwin /* 760c53c013bSJohn Baldwin * Initialize mutexes. 761c53c013bSJohn Baldwin */ 7620c88508aSJohn Baldwin mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 7630c88508aSJohn Baldwin mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 7640c88508aSJohn Baldwin mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 7658c4b6380SJohn Baldwin mtx_init(&devmtx, "cdev", NULL, MTX_DEF); 766c53c013bSJohn Baldwin mtx_lock(&Giant); 7677c0435b9SKip Macy 7687c0435b9SKip Macy lock_profile_init(); 769c53c013bSJohn Baldwin } 770d272fe53SJohn Baldwin 771d272fe53SJohn Baldwin #ifdef DDB 772d272fe53SJohn Baldwin void 773d272fe53SJohn Baldwin db_show_mtx(struct lock_object *lock) 774d272fe53SJohn Baldwin { 775d272fe53SJohn Baldwin struct thread *td; 776d272fe53SJohn Baldwin struct mtx *m; 777d272fe53SJohn Baldwin 778d272fe53SJohn Baldwin m = (struct mtx *)lock; 779d272fe53SJohn Baldwin 780d272fe53SJohn Baldwin db_printf(" flags: {"); 78183a81bcbSJohn Baldwin if (LOCK_CLASS(lock) == &lock_class_mtx_spin) 782d272fe53SJohn Baldwin db_printf("SPIN"); 783d272fe53SJohn Baldwin else 784d272fe53SJohn Baldwin db_printf("DEF"); 785aa89d8cdSJohn Baldwin if (m->lock_object.lo_flags & LO_RECURSABLE) 786d272fe53SJohn Baldwin db_printf(", RECURSE"); 787aa89d8cdSJohn Baldwin if (m->lock_object.lo_flags & LO_DUPOK) 788d272fe53SJohn Baldwin db_printf(", DUPOK"); 789d272fe53SJohn Baldwin db_printf("}\n"); 790d272fe53SJohn Baldwin db_printf(" state: {"); 791d272fe53SJohn Baldwin if (mtx_unowned(m)) 792d272fe53SJohn Baldwin db_printf("UNOWNED"); 793d272fe53SJohn Baldwin else { 794d272fe53SJohn Baldwin db_printf("OWNED"); 795d272fe53SJohn Baldwin if (m->mtx_lock & MTX_CONTESTED) 796d272fe53SJohn Baldwin db_printf(", CONTESTED"); 797d272fe53SJohn Baldwin if (m->mtx_lock & MTX_RECURSED) 798d272fe53SJohn Baldwin db_printf(", RECURSED"); 799d272fe53SJohn Baldwin } 800d272fe53SJohn Baldwin db_printf("}\n"); 801d272fe53SJohn Baldwin if (!mtx_unowned(m)) { 802d272fe53SJohn Baldwin td = mtx_owner(m); 803d272fe53SJohn Baldwin db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, 804d272fe53SJohn Baldwin td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm); 805d272fe53SJohn Baldwin if (mtx_recursed(m)) 806d272fe53SJohn Baldwin db_printf(" recursed: %d\n", m->mtx_recurse); 807d272fe53SJohn Baldwin } 808d272fe53SJohn Baldwin } 809d272fe53SJohn Baldwin #endif 810