10384fff8SJason Evans /*- 20384fff8SJason Evans * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 30384fff8SJason Evans * 40384fff8SJason Evans * Redistribution and use in source and binary forms, with or without 50384fff8SJason Evans * modification, are permitted provided that the following conditions 60384fff8SJason Evans * are met: 70384fff8SJason Evans * 1. Redistributions of source code must retain the above copyright 80384fff8SJason Evans * notice, this list of conditions and the following disclaimer. 90384fff8SJason Evans * 2. Redistributions in binary form must reproduce the above copyright 100384fff8SJason Evans * notice, this list of conditions and the following disclaimer in the 110384fff8SJason Evans * documentation and/or other materials provided with the distribution. 120384fff8SJason Evans * 3. Berkeley Software Design Inc's name may not be used to endorse or 130384fff8SJason Evans * promote products derived from this software without specific prior 140384fff8SJason Evans * written permission. 150384fff8SJason Evans * 160384fff8SJason Evans * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 170384fff8SJason Evans * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 180384fff8SJason Evans * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 190384fff8SJason Evans * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 200384fff8SJason Evans * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 210384fff8SJason Evans * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 220384fff8SJason Evans * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 230384fff8SJason Evans * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 240384fff8SJason Evans * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 250384fff8SJason Evans * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 260384fff8SJason Evans * SUCH DAMAGE. 270384fff8SJason Evans * 280384fff8SJason Evans * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 2936412d79SJohn Baldwin * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 300384fff8SJason Evans */ 310384fff8SJason Evans 320384fff8SJason Evans /* 33ba48b69aSJohn Baldwin * Machine independent bits of mutex implementation. 340384fff8SJason Evans */ 350384fff8SJason Evans 36677b542eSDavid E. O'Brien #include <sys/cdefs.h> 37677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 38677b542eSDavid E. O'Brien 392498cf8cSJohn Baldwin #include "opt_adaptive_mutexes.h" 409c36c934SJohn Baldwin #include "opt_ddb.h" 417c0435b9SKip Macy #include "opt_global.h" 42*f5f9340bSFabien Thomas #include "opt_hwpmc_hooks.h" 43a5aedd68SStacey Son #include "opt_kdtrace.h" 449923b511SScott Long #include "opt_sched.h" 45a5a96a19SJohn Baldwin 460384fff8SJason Evans #include <sys/param.h> 476c35e809SDag-Erling Smørgrav #include <sys/systm.h> 4836412d79SJohn Baldwin #include <sys/bus.h> 491126349aSPaul Saab #include <sys/conf.h> 502d50560aSMarcel Moolenaar #include <sys/kdb.h> 5136412d79SJohn Baldwin #include <sys/kernel.h> 526c35e809SDag-Erling Smørgrav #include <sys/ktr.h> 5319284646SJohn Baldwin #include <sys/lock.h> 54fb919e4dSMark Murray #include <sys/malloc.h> 5519284646SJohn Baldwin #include <sys/mutex.h> 560384fff8SJason Evans #include <sys/proc.h> 57c4f7a187SJohn Baldwin #include <sys/resourcevar.h> 58b43179fbSJeff Roberson #include <sys/sched.h> 596c35e809SDag-Erling Smørgrav #include <sys/sbuf.h> 60a5a96a19SJohn Baldwin #include <sys/sysctl.h> 61961a7b24SJohn Baldwin #include <sys/turnstile.h> 6236412d79SJohn Baldwin #include <sys/vmmeter.h> 637c0435b9SKip Macy #include <sys/lock_profile.h> 640384fff8SJason Evans 6536412d79SJohn Baldwin #include <machine/atomic.h> 6636412d79SJohn Baldwin #include <machine/bus.h> 670384fff8SJason Evans #include <machine/cpu.h> 6836412d79SJohn Baldwin 699c36c934SJohn Baldwin #include <ddb/ddb.h> 709c36c934SJohn Baldwin 718c4b6380SJohn Baldwin #include <fs/devfs/devfs_int.h> 728c4b6380SJohn Baldwin 7336412d79SJohn Baldwin #include <vm/vm.h> 7436412d79SJohn Baldwin #include <vm/vm_extern.h> 7536412d79SJohn Baldwin 76cd6e6e4eSJohn Baldwin #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 77cd6e6e4eSJohn Baldwin #define ADAPTIVE_MUTEXES 78cd6e6e4eSJohn Baldwin #endif 79cd6e6e4eSJohn Baldwin 80*f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 81*f5f9340bSFabien Thomas #include <sys/pmckern.h> 82*f5f9340bSFabien Thomas PMC_SOFT_DEFINE( , , lock, failed); 83*f5f9340bSFabien Thomas #endif 84*f5f9340bSFabien Thomas 85b9a80acaSStephan Uphoff /* 869ed346baSBosko Milekic * Internal utility macros. 870cde2e34SJason Evans */ 889ed346baSBosko Milekic #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 890cde2e34SJason Evans 90c0bfd703SJohn Baldwin #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED) 91c0bfd703SJohn Baldwin 9249b94bfcSJohn Baldwin #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK)) 939ed346baSBosko Milekic 94d576deedSPawel Jakub Dawidek static void assert_mtx(const struct lock_object *lock, int what); 95d272fe53SJohn Baldwin #ifdef DDB 96d576deedSPawel Jakub Dawidek static void db_show_mtx(const struct lock_object *lock); 97d272fe53SJohn Baldwin #endif 986e21afd4SJohn Baldwin static void lock_mtx(struct lock_object *lock, int how); 996e21afd4SJohn Baldwin static void lock_spin(struct lock_object *lock, int how); 100a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 101d576deedSPawel Jakub Dawidek static int owner_mtx(const struct lock_object *lock, 102d576deedSPawel Jakub Dawidek struct thread **owner); 103a5aedd68SStacey Son #endif 1046e21afd4SJohn Baldwin static int unlock_mtx(struct lock_object *lock); 1056e21afd4SJohn Baldwin static int unlock_spin(struct lock_object *lock); 106d272fe53SJohn Baldwin 1070cde2e34SJason Evans /* 10819284646SJohn Baldwin * Lock classes for sleep and spin mutexes. 1090cde2e34SJason Evans */ 11019284646SJohn Baldwin struct lock_class lock_class_mtx_sleep = { 111ae8dde30SJohn Baldwin .lc_name = "sleep mutex", 112ae8dde30SJohn Baldwin .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, 113f9721b43SAttilio Rao .lc_assert = assert_mtx, 114d272fe53SJohn Baldwin #ifdef DDB 115ae8dde30SJohn Baldwin .lc_ddb_show = db_show_mtx, 116d272fe53SJohn Baldwin #endif 1176e21afd4SJohn Baldwin .lc_lock = lock_mtx, 1186e21afd4SJohn Baldwin .lc_unlock = unlock_mtx, 119a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 120a5aedd68SStacey Son .lc_owner = owner_mtx, 121a5aedd68SStacey Son #endif 12219284646SJohn Baldwin }; 12319284646SJohn Baldwin struct lock_class lock_class_mtx_spin = { 124ae8dde30SJohn Baldwin .lc_name = "spin mutex", 125ae8dde30SJohn Baldwin .lc_flags = LC_SPINLOCK | LC_RECURSABLE, 126f9721b43SAttilio Rao .lc_assert = assert_mtx, 127d272fe53SJohn Baldwin #ifdef DDB 128ae8dde30SJohn Baldwin .lc_ddb_show = db_show_mtx, 129d272fe53SJohn Baldwin #endif 1306e21afd4SJohn Baldwin .lc_lock = lock_spin, 1316e21afd4SJohn Baldwin .lc_unlock = unlock_spin, 132a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 133a5aedd68SStacey Son .lc_owner = owner_mtx, 134a5aedd68SStacey Son #endif 1358484de75SJohn Baldwin }; 1368484de75SJohn Baldwin 1379ed346baSBosko Milekic /* 138c53c013bSJohn Baldwin * System-wide mutexes 139c53c013bSJohn Baldwin */ 1402502c107SJeff Roberson struct mtx blocked_lock; 141c53c013bSJohn Baldwin struct mtx Giant; 142c53c013bSJohn Baldwin 14367784314SPoul-Henning Kamp void 144d576deedSPawel Jakub Dawidek assert_mtx(const struct lock_object *lock, int what) 145f9721b43SAttilio Rao { 146f9721b43SAttilio Rao 147d576deedSPawel Jakub Dawidek mtx_assert((const struct mtx *)lock, what); 148f9721b43SAttilio Rao } 149f9721b43SAttilio Rao 15067784314SPoul-Henning Kamp void 1516e21afd4SJohn Baldwin lock_mtx(struct lock_object *lock, int how) 1526e21afd4SJohn Baldwin { 1536e21afd4SJohn Baldwin 1546e21afd4SJohn Baldwin mtx_lock((struct mtx *)lock); 1556e21afd4SJohn Baldwin } 1566e21afd4SJohn Baldwin 15767784314SPoul-Henning Kamp void 1586e21afd4SJohn Baldwin lock_spin(struct lock_object *lock, int how) 1596e21afd4SJohn Baldwin { 1606e21afd4SJohn Baldwin 1616e21afd4SJohn Baldwin panic("spin locks can only use msleep_spin"); 1626e21afd4SJohn Baldwin } 1636e21afd4SJohn Baldwin 16467784314SPoul-Henning Kamp int 1656e21afd4SJohn Baldwin unlock_mtx(struct lock_object *lock) 1666e21afd4SJohn Baldwin { 1676e21afd4SJohn Baldwin struct mtx *m; 1686e21afd4SJohn Baldwin 1696e21afd4SJohn Baldwin m = (struct mtx *)lock; 1706e21afd4SJohn Baldwin mtx_assert(m, MA_OWNED | MA_NOTRECURSED); 1716e21afd4SJohn Baldwin mtx_unlock(m); 1726e21afd4SJohn Baldwin return (0); 1736e21afd4SJohn Baldwin } 1746e21afd4SJohn Baldwin 17567784314SPoul-Henning Kamp int 1766e21afd4SJohn Baldwin unlock_spin(struct lock_object *lock) 1776e21afd4SJohn Baldwin { 1786e21afd4SJohn Baldwin 1796e21afd4SJohn Baldwin panic("spin locks can only use msleep_spin"); 1806e21afd4SJohn Baldwin } 1816e21afd4SJohn Baldwin 182a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 183a5aedd68SStacey Son int 184d576deedSPawel Jakub Dawidek owner_mtx(const struct lock_object *lock, struct thread **owner) 185a5aedd68SStacey Son { 186d576deedSPawel Jakub Dawidek const struct mtx *m = (const struct mtx *)lock; 187a5aedd68SStacey Son 188a5aedd68SStacey Son *owner = mtx_owner(m); 189a5aedd68SStacey Son return (mtx_unowned(m) == 0); 190a5aedd68SStacey Son } 191a5aedd68SStacey Son #endif 192a5aedd68SStacey Son 1930cde2e34SJason Evans /* 1946283b7d0SJohn Baldwin * Function versions of the inlined __mtx_* macros. These are used by 1956283b7d0SJohn Baldwin * modules and can also be called from assembly language if needed. 1966283b7d0SJohn Baldwin */ 1976283b7d0SJohn Baldwin void 1986283b7d0SJohn Baldwin _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 1996283b7d0SJohn Baldwin { 2006283b7d0SJohn Baldwin 20135370593SAndriy Gapon if (SCHEDULER_STOPPED()) 20235370593SAndriy Gapon return; 203dde96c99SJohn Baldwin MPASS(curthread != NULL); 204186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 205186abbd7SJohn Baldwin ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); 206aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 207aa89d8cdSJohn Baldwin ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 2080d975d63SJohn Baldwin file, line)); 209aa89d8cdSJohn Baldwin WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 21041313430SJohn Baldwin file, line, NULL); 2117c0435b9SKip Macy 212961135eaSJohn Baldwin __mtx_lock(m, curthread, opts, file, line); 213aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 214dde96c99SJohn Baldwin line); 215aa89d8cdSJohn Baldwin WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 216764e4d54SJohn Baldwin curthread->td_locks++; 2176283b7d0SJohn Baldwin } 2186283b7d0SJohn Baldwin 2196283b7d0SJohn Baldwin void 2206283b7d0SJohn Baldwin _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 2216283b7d0SJohn Baldwin { 22235370593SAndriy Gapon 22335370593SAndriy Gapon if (SCHEDULER_STOPPED()) 22435370593SAndriy Gapon return; 225dde96c99SJohn Baldwin MPASS(curthread != NULL); 226186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 227186abbd7SJohn Baldwin ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); 228aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 229aa89d8cdSJohn Baldwin ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 2300d975d63SJohn Baldwin file, line)); 231764e4d54SJohn Baldwin curthread->td_locks--; 232aa89d8cdSJohn Baldwin WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 233aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 2340d975d63SJohn Baldwin line); 23521377ce0SJohn Baldwin mtx_assert(m, MA_OWNED); 236c66d7606SKip Macy 23770fe8436SKip Macy if (m->mtx_recurse == 0) 238a5aedd68SStacey Son LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m); 239961135eaSJohn Baldwin __mtx_unlock(m, curthread, opts, file, line); 2406283b7d0SJohn Baldwin } 2416283b7d0SJohn Baldwin 2426283b7d0SJohn Baldwin void 2436283b7d0SJohn Baldwin _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 2446283b7d0SJohn Baldwin { 2456283b7d0SJohn Baldwin 24635370593SAndriy Gapon if (SCHEDULER_STOPPED()) 24735370593SAndriy Gapon return; 248dde96c99SJohn Baldwin MPASS(curthread != NULL); 249186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 250186abbd7SJohn Baldwin ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); 251aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 2520d975d63SJohn Baldwin ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 253aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line)); 254ad69e26bSJohn Baldwin if (mtx_owned(m)) 255ad69e26bSJohn Baldwin KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, 256ad69e26bSJohn Baldwin ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n", 257ad69e26bSJohn Baldwin m->lock_object.lo_name, file, line)); 258aa89d8cdSJohn Baldwin WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 25941313430SJohn Baldwin file, line, NULL); 260961135eaSJohn Baldwin __mtx_lock_spin(m, curthread, opts, file, line); 261aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 262dde96c99SJohn Baldwin line); 263aa89d8cdSJohn Baldwin WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 2646283b7d0SJohn Baldwin } 2656283b7d0SJohn Baldwin 2666283b7d0SJohn Baldwin void 2676283b7d0SJohn Baldwin _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 2686283b7d0SJohn Baldwin { 269c66d7606SKip Macy 27035370593SAndriy Gapon if (SCHEDULER_STOPPED()) 27135370593SAndriy Gapon return; 272dde96c99SJohn Baldwin MPASS(curthread != NULL); 273186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 274186abbd7SJohn Baldwin ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); 275aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 2760d975d63SJohn Baldwin ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 277aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line)); 278aa89d8cdSJohn Baldwin WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 279aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 280dde96c99SJohn Baldwin line); 2810d975d63SJohn Baldwin mtx_assert(m, MA_OWNED); 282c66d7606SKip Macy 283961135eaSJohn Baldwin __mtx_unlock_spin(m); 2846283b7d0SJohn Baldwin } 2856283b7d0SJohn Baldwin 2866283b7d0SJohn Baldwin /* 2879ed346baSBosko Milekic * The important part of mtx_trylock{,_flags}() 288eac09796SJohn Baldwin * Tries to acquire lock `m.' If this function is called on a mutex that 289eac09796SJohn Baldwin * is already owned, it will recursively acquire the lock. 2900cde2e34SJason Evans */ 2910cde2e34SJason Evans int 292ccdf2333SAttilio Rao mtx_trylock_flags_(struct mtx *m, int opts, const char *file, int line) 2930cde2e34SJason Evans { 2941723a064SJeff Roberson #ifdef LOCK_PROFILING 2957c0435b9SKip Macy uint64_t waittime = 0; 2961723a064SJeff Roberson int contested = 0; 2971723a064SJeff Roberson #endif 2981723a064SJeff Roberson int rval; 2990cde2e34SJason Evans 30035370593SAndriy Gapon if (SCHEDULER_STOPPED()) 30135370593SAndriy Gapon return (1); 30235370593SAndriy Gapon 303b40ce416SJulian Elischer MPASS(curthread != NULL); 304186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 305186abbd7SJohn Baldwin ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); 306aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 307aa89d8cdSJohn Baldwin ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 30883cece6fSJohn Baldwin file, line)); 3099ed346baSBosko Milekic 310aa89d8cdSJohn Baldwin if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) { 311eac09796SJohn Baldwin m->mtx_recurse++; 312eac09796SJohn Baldwin atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 313eac09796SJohn Baldwin rval = 1; 314eac09796SJohn Baldwin } else 315961135eaSJohn Baldwin rval = _mtx_obtain_lock(m, (uintptr_t)curthread); 3169ed346baSBosko Milekic 317aa89d8cdSJohn Baldwin LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); 318764e4d54SJohn Baldwin if (rval) { 319aa89d8cdSJohn Baldwin WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 3202d96f0b1SJohn Baldwin file, line); 321764e4d54SJohn Baldwin curthread->td_locks++; 322fe68a916SKip Macy if (m->mtx_recurse == 0) 323a5aedd68SStacey Son LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, 324a5aedd68SStacey Son m, contested, waittime, file, line); 3257c0435b9SKip Macy 326764e4d54SJohn Baldwin } 3279ed346baSBosko Milekic 32819284646SJohn Baldwin return (rval); 3290cde2e34SJason Evans } 3300cde2e34SJason Evans 3310cde2e34SJason Evans /* 3329ed346baSBosko Milekic * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 3339ed346baSBosko Milekic * 3349ed346baSBosko Milekic * We call this if the lock is either contested (i.e. we need to go to 3359ed346baSBosko Milekic * sleep waiting for it), or if we need to recurse on it. 3360cde2e34SJason Evans */ 3370cde2e34SJason Evans void 338122eceefSJohn Baldwin _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, 339bdcfcf5bSJohn Baldwin int line) 34036412d79SJohn Baldwin { 3412502c107SJeff Roberson struct turnstile *ts; 3421723a064SJeff Roberson uintptr_t v; 343cd6e6e4eSJohn Baldwin #ifdef ADAPTIVE_MUTEXES 34476447e56SJohn Baldwin volatile struct thread *owner; 3452498cf8cSJohn Baldwin #endif 34602bd1bcdSIan Dowse #ifdef KTR 34702bd1bcdSIan Dowse int cont_logged = 0; 34802bd1bcdSIan Dowse #endif 3491723a064SJeff Roberson #ifdef LOCK_PROFILING 35070fe8436SKip Macy int contested = 0; 35170fe8436SKip Macy uint64_t waittime = 0; 3521723a064SJeff Roberson #endif 353a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 354a5aedd68SStacey Son uint64_t spin_cnt = 0; 355a5aedd68SStacey Son uint64_t sleep_cnt = 0; 356a5aedd68SStacey Son int64_t sleep_time = 0; 357a5aedd68SStacey Son #endif 35836412d79SJohn Baldwin 35935370593SAndriy Gapon if (SCHEDULER_STOPPED()) 36035370593SAndriy Gapon return; 36135370593SAndriy Gapon 3625fa8dd90SJohn Baldwin if (mtx_owned(m)) { 363aa89d8cdSJohn Baldwin KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, 364eac09796SJohn Baldwin ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 365aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line)); 36636412d79SJohn Baldwin m->mtx_recurse++; 36708812b39SBosko Milekic atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 368aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 3695746a1d8SBosko Milekic CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 37036412d79SJohn Baldwin return; 37136412d79SJohn Baldwin } 3729ed346baSBosko Milekic 373*f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 374*f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 375*f5f9340bSFabien Thomas #endif 37670fe8436SKip Macy lock_profile_obtain_lock_failed(&m->lock_object, 37770fe8436SKip Macy &contested, &waittime); 378aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 37915ec816aSJohn Baldwin CTR4(KTR_LOCK, 38015ec816aSJohn Baldwin "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 381aa89d8cdSJohn Baldwin m->lock_object.lo_name, (void *)m->mtx_lock, file, line); 3821bd0eefbSJohn Baldwin 383961135eaSJohn Baldwin while (!_mtx_obtain_lock(m, tid)) { 384a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 385a5aedd68SStacey Son spin_cnt++; 386a5aedd68SStacey Son #endif 38749aead8aSAttilio Rao #ifdef ADAPTIVE_MUTEXES 38849aead8aSAttilio Rao /* 38949aead8aSAttilio Rao * If the owner is running on another CPU, spin until the 39049aead8aSAttilio Rao * owner stops running or the state of the lock changes. 39149aead8aSAttilio Rao */ 39249aead8aSAttilio Rao v = m->mtx_lock; 39349aead8aSAttilio Rao if (v != MTX_UNOWNED) { 39449aead8aSAttilio Rao owner = (struct thread *)(v & ~MTX_FLAGMASK); 39549aead8aSAttilio Rao if (TD_IS_RUNNING(owner)) { 39649aead8aSAttilio Rao if (LOCK_LOG_TEST(&m->lock_object, 0)) 39749aead8aSAttilio Rao CTR3(KTR_LOCK, 39849aead8aSAttilio Rao "%s: spinning on %p held by %p", 39949aead8aSAttilio Rao __func__, m, owner); 40049aead8aSAttilio Rao while (mtx_owner(m) == owner && 401a5aedd68SStacey Son TD_IS_RUNNING(owner)) { 40249aead8aSAttilio Rao cpu_spinwait(); 403a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 404a5aedd68SStacey Son spin_cnt++; 405a5aedd68SStacey Son #endif 406a5aedd68SStacey Son } 40749aead8aSAttilio Rao continue; 40849aead8aSAttilio Rao } 40949aead8aSAttilio Rao } 41049aead8aSAttilio Rao #endif 41149aead8aSAttilio Rao 4122502c107SJeff Roberson ts = turnstile_trywait(&m->lock_object); 4135fa8dd90SJohn Baldwin v = m->mtx_lock; 4145fa8dd90SJohn Baldwin 41536412d79SJohn Baldwin /* 4169ed346baSBosko Milekic * Check if the lock has been released while spinning for 417961a7b24SJohn Baldwin * the turnstile chain lock. 41836412d79SJohn Baldwin */ 4195fa8dd90SJohn Baldwin if (v == MTX_UNOWNED) { 4202502c107SJeff Roberson turnstile_cancel(ts); 42136412d79SJohn Baldwin continue; 42236412d79SJohn Baldwin } 4239ed346baSBosko Milekic 42449aead8aSAttilio Rao #ifdef ADAPTIVE_MUTEXES 42549aead8aSAttilio Rao /* 426fa29f023SJohn Baldwin * The current lock owner might have started executing 427fa29f023SJohn Baldwin * on another CPU (or the lock could have changed 428fa29f023SJohn Baldwin * owners) while we were waiting on the turnstile 429fa29f023SJohn Baldwin * chain lock. If so, drop the turnstile lock and try 430fa29f023SJohn Baldwin * again. 43149aead8aSAttilio Rao */ 43249aead8aSAttilio Rao owner = (struct thread *)(v & ~MTX_FLAGMASK); 43349aead8aSAttilio Rao if (TD_IS_RUNNING(owner)) { 43449aead8aSAttilio Rao turnstile_cancel(ts); 43549aead8aSAttilio Rao continue; 43649aead8aSAttilio Rao } 43749aead8aSAttilio Rao #endif 43849aead8aSAttilio Rao 43936412d79SJohn Baldwin /* 4409ed346baSBosko Milekic * If the mutex isn't already contested and a failure occurs 4419ed346baSBosko Milekic * setting the contested bit, the mutex was either released 4429ed346baSBosko Milekic * or the state of the MTX_RECURSED bit changed. 44336412d79SJohn Baldwin */ 44436412d79SJohn Baldwin if ((v & MTX_CONTESTED) == 0 && 445122eceefSJohn Baldwin !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { 4462502c107SJeff Roberson turnstile_cancel(ts); 44736412d79SJohn Baldwin continue; 44836412d79SJohn Baldwin } 44936412d79SJohn Baldwin 4509ed346baSBosko Milekic /* 4517feefcd6SJohn Baldwin * We definitely must sleep for this lock. 4529ed346baSBosko Milekic */ 45336412d79SJohn Baldwin mtx_assert(m, MA_NOTOWNED); 45436412d79SJohn Baldwin 45502bd1bcdSIan Dowse #ifdef KTR 45602bd1bcdSIan Dowse if (!cont_logged) { 45702bd1bcdSIan Dowse CTR6(KTR_CONTENTION, 45802bd1bcdSIan Dowse "contention: %p at %s:%d wants %s, taken by %s:%d", 459aa89d8cdSJohn Baldwin (void *)tid, file, line, m->lock_object.lo_name, 460aa89d8cdSJohn Baldwin WITNESS_FILE(&m->lock_object), 461aa89d8cdSJohn Baldwin WITNESS_LINE(&m->lock_object)); 46202bd1bcdSIan Dowse cont_logged = 1; 46302bd1bcdSIan Dowse } 46402bd1bcdSIan Dowse #endif 46536412d79SJohn Baldwin 4669ed346baSBosko Milekic /* 467961a7b24SJohn Baldwin * Block on the turnstile. 4689ed346baSBosko Milekic */ 469a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 470a5aedd68SStacey Son sleep_time -= lockstat_nsecs(); 471a5aedd68SStacey Son #endif 4722502c107SJeff Roberson turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE); 473a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 474a5aedd68SStacey Son sleep_time += lockstat_nsecs(); 475a5aedd68SStacey Son sleep_cnt++; 476a5aedd68SStacey Son #endif 47736412d79SJohn Baldwin } 47802bd1bcdSIan Dowse #ifdef KTR 47902bd1bcdSIan Dowse if (cont_logged) { 48002bd1bcdSIan Dowse CTR4(KTR_CONTENTION, 48102bd1bcdSIan Dowse "contention end: %s acquired by %p at %s:%d", 482aa89d8cdSJohn Baldwin m->lock_object.lo_name, (void *)tid, file, line); 48302bd1bcdSIan Dowse } 48402bd1bcdSIan Dowse #endif 485a5aedd68SStacey Son LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested, 486eea4f254SJeff Roberson waittime, file, line); 487a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 488a5aedd68SStacey Son if (sleep_time) 489a5aedd68SStacey Son LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time); 490a5aedd68SStacey Son 491a5aedd68SStacey Son /* 492a5aedd68SStacey Son * Only record the loops spinning and not sleeping. 493a5aedd68SStacey Son */ 494a5aedd68SStacey Son if (spin_cnt > sleep_cnt) 495a5aedd68SStacey Son LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (spin_cnt - sleep_cnt)); 496a5aedd68SStacey Son #endif 4979ed346baSBosko Milekic } 4989ed346baSBosko Milekic 4992502c107SJeff Roberson static void 5002502c107SJeff Roberson _mtx_lock_spin_failed(struct mtx *m) 5012502c107SJeff Roberson { 5022502c107SJeff Roberson struct thread *td; 5032502c107SJeff Roberson 5042502c107SJeff Roberson td = mtx_owner(m); 5052502c107SJeff Roberson 5062502c107SJeff Roberson /* If the mutex is unlocked, try again. */ 5072502c107SJeff Roberson if (td == NULL) 5082502c107SJeff Roberson return; 509b95b98b0SKonstantin Belousov 5102502c107SJeff Roberson printf( "spin lock %p (%s) held by %p (tid %d) too long\n", 5112502c107SJeff Roberson m, m->lock_object.lo_name, td, td->td_tid); 5122502c107SJeff Roberson #ifdef WITNESS 51398332c8cSAttilio Rao witness_display_spinlock(&m->lock_object, td, printf); 5142502c107SJeff Roberson #endif 5152502c107SJeff Roberson panic("spin lock held too long"); 5162502c107SJeff Roberson } 5172502c107SJeff Roberson 518b95b98b0SKonstantin Belousov #ifdef SMP 5199ed346baSBosko Milekic /* 5209ed346baSBosko Milekic * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 5219ed346baSBosko Milekic * 5229ed346baSBosko Milekic * This is only called if we need to actually spin for the lock. Recursion 5239ed346baSBosko Milekic * is handled inline. 5249ed346baSBosko Milekic */ 5259ed346baSBosko Milekic void 526122eceefSJohn Baldwin _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file, 527bdcfcf5bSJohn Baldwin int line) 52836412d79SJohn Baldwin { 5291723a064SJeff Roberson int i = 0; 5301723a064SJeff Roberson #ifdef LOCK_PROFILING 5311723a064SJeff Roberson int contested = 0; 53270fe8436SKip Macy uint64_t waittime = 0; 5331723a064SJeff Roberson #endif 53436412d79SJohn Baldwin 53535370593SAndriy Gapon if (SCHEDULER_STOPPED()) 53635370593SAndriy Gapon return; 53735370593SAndriy Gapon 538aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 5395746a1d8SBosko Milekic CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 5409ed346baSBosko Milekic 541*f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 542*f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 543*f5f9340bSFabien Thomas #endif 54470fe8436SKip Macy lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); 545961135eaSJohn Baldwin while (!_mtx_obtain_lock(m, tid)) { 5469ed346baSBosko Milekic 5477141f2adSJohn Baldwin /* Give interrupts a chance while we spin. */ 548c6a37e84SJohn Baldwin spinlock_exit(); 54936412d79SJohn Baldwin while (m->mtx_lock != MTX_UNOWNED) { 550703fc290SJohn Baldwin if (i++ < 10000000) { 5519f1b87f1SMaxime Henrion cpu_spinwait(); 55236412d79SJohn Baldwin continue; 553703fc290SJohn Baldwin } 5540fa2168bSJohn Baldwin if (i < 60000000 || kdb_active || panicstr != NULL) 55536412d79SJohn Baldwin DELAY(1); 5562502c107SJeff Roberson else 5572502c107SJeff Roberson _mtx_lock_spin_failed(m); 5589f1b87f1SMaxime Henrion cpu_spinwait(); 55936412d79SJohn Baldwin } 560c6a37e84SJohn Baldwin spinlock_enter(); 56136412d79SJohn Baldwin } 56236412d79SJohn Baldwin 563aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 5649ed346baSBosko Milekic CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 5659ed346baSBosko Milekic 566a5aedd68SStacey Son LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m, 567a5aedd68SStacey Son contested, waittime, (file), (line)); 568a5aedd68SStacey Son LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, i); 56936412d79SJohn Baldwin } 57033fb8a38SJohn Baldwin #endif /* SMP */ 57136412d79SJohn Baldwin 5722502c107SJeff Roberson void 573ccdf2333SAttilio Rao thread_lock_flags_(struct thread *td, int opts, const char *file, int line) 5742502c107SJeff Roberson { 5752502c107SJeff Roberson struct mtx *m; 5762502c107SJeff Roberson uintptr_t tid; 5771723a064SJeff Roberson int i; 5781723a064SJeff Roberson #ifdef LOCK_PROFILING 5791723a064SJeff Roberson int contested = 0; 5801723a064SJeff Roberson uint64_t waittime = 0; 5811723a064SJeff Roberson #endif 582a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 583a5aedd68SStacey Son uint64_t spin_cnt = 0; 584a5aedd68SStacey Son #endif 5852502c107SJeff Roberson 5861723a064SJeff Roberson i = 0; 5872502c107SJeff Roberson tid = (uintptr_t)curthread; 58835370593SAndriy Gapon 58935370593SAndriy Gapon if (SCHEDULER_STOPPED()) 59035370593SAndriy Gapon return; 59135370593SAndriy Gapon 5922502c107SJeff Roberson for (;;) { 5932502c107SJeff Roberson retry: 5942502c107SJeff Roberson spinlock_enter(); 595710eacdcSJeff Roberson m = td->td_lock; 59613c85a48SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 59713c85a48SJohn Baldwin ("thread_lock() of destroyed mutex @ %s:%d", file, line)); 59813c85a48SJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 59913c85a48SJohn Baldwin ("thread_lock() of sleep mutex %s @ %s:%d", 60013c85a48SJohn Baldwin m->lock_object.lo_name, file, line)); 601ad69e26bSJohn Baldwin if (mtx_owned(m)) 602ad69e26bSJohn Baldwin KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, 603ad69e26bSJohn Baldwin ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n", 604ad69e26bSJohn Baldwin m->lock_object.lo_name, file, line)); 6052502c107SJeff Roberson WITNESS_CHECKORDER(&m->lock_object, 60641313430SJohn Baldwin opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 607961135eaSJohn Baldwin while (!_mtx_obtain_lock(m, tid)) { 608a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 609a5aedd68SStacey Son spin_cnt++; 610a5aedd68SStacey Son #endif 6112502c107SJeff Roberson if (m->mtx_lock == tid) { 6122502c107SJeff Roberson m->mtx_recurse++; 6132502c107SJeff Roberson break; 6142502c107SJeff Roberson } 615*f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 616*f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 617*f5f9340bSFabien Thomas #endif 618eea4f254SJeff Roberson lock_profile_obtain_lock_failed(&m->lock_object, 619eea4f254SJeff Roberson &contested, &waittime); 6202502c107SJeff Roberson /* Give interrupts a chance while we spin. */ 6212502c107SJeff Roberson spinlock_exit(); 6222502c107SJeff Roberson while (m->mtx_lock != MTX_UNOWNED) { 6232502c107SJeff Roberson if (i++ < 10000000) 6242502c107SJeff Roberson cpu_spinwait(); 6252502c107SJeff Roberson else if (i < 60000000 || 6262502c107SJeff Roberson kdb_active || panicstr != NULL) 6272502c107SJeff Roberson DELAY(1); 6282502c107SJeff Roberson else 6292502c107SJeff Roberson _mtx_lock_spin_failed(m); 6302502c107SJeff Roberson cpu_spinwait(); 6312502c107SJeff Roberson if (m != td->td_lock) 6322502c107SJeff Roberson goto retry; 6332502c107SJeff Roberson } 6342502c107SJeff Roberson spinlock_enter(); 6352502c107SJeff Roberson } 6362502c107SJeff Roberson if (m == td->td_lock) 6372502c107SJeff Roberson break; 638961135eaSJohn Baldwin __mtx_unlock_spin(m); /* does spinlock_exit() */ 639a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 640a5aedd68SStacey Son spin_cnt++; 641a5aedd68SStacey Son #endif 6422502c107SJeff Roberson } 643eea4f254SJeff Roberson if (m->mtx_recurse == 0) 644a5aedd68SStacey Son LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, 645a5aedd68SStacey Son m, contested, waittime, (file), (line)); 64613c85a48SJohn Baldwin LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 64713c85a48SJohn Baldwin line); 6482502c107SJeff Roberson WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 649a5aedd68SStacey Son LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_cnt); 6502502c107SJeff Roberson } 6512502c107SJeff Roberson 6522502c107SJeff Roberson struct mtx * 6532502c107SJeff Roberson thread_lock_block(struct thread *td) 6542502c107SJeff Roberson { 6552502c107SJeff Roberson struct mtx *lock; 6562502c107SJeff Roberson 6572502c107SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 658710eacdcSJeff Roberson lock = td->td_lock; 6592502c107SJeff Roberson td->td_lock = &blocked_lock; 6602502c107SJeff Roberson mtx_unlock_spin(lock); 6612502c107SJeff Roberson 6622502c107SJeff Roberson return (lock); 6632502c107SJeff Roberson } 6642502c107SJeff Roberson 6652502c107SJeff Roberson void 6662502c107SJeff Roberson thread_lock_unblock(struct thread *td, struct mtx *new) 6672502c107SJeff Roberson { 6682502c107SJeff Roberson mtx_assert(new, MA_OWNED); 6692502c107SJeff Roberson MPASS(td->td_lock == &blocked_lock); 67065d32cd8SMatt Jacob atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new); 6712502c107SJeff Roberson } 6722502c107SJeff Roberson 6732502c107SJeff Roberson void 6742502c107SJeff Roberson thread_lock_set(struct thread *td, struct mtx *new) 6752502c107SJeff Roberson { 6762502c107SJeff Roberson struct mtx *lock; 6772502c107SJeff Roberson 6782502c107SJeff Roberson mtx_assert(new, MA_OWNED); 6792502c107SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 680710eacdcSJeff Roberson lock = td->td_lock; 6812502c107SJeff Roberson td->td_lock = new; 6822502c107SJeff Roberson mtx_unlock_spin(lock); 6832502c107SJeff Roberson } 6842502c107SJeff Roberson 6859ed346baSBosko Milekic /* 6869ed346baSBosko Milekic * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 6879ed346baSBosko Milekic * 6889ed346baSBosko Milekic * We are only called here if the lock is recursed or contested (i.e. we 6899ed346baSBosko Milekic * need to wake up a blocked thread). 6909ed346baSBosko Milekic */ 69136412d79SJohn Baldwin void 6929ed346baSBosko Milekic _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 69336412d79SJohn Baldwin { 694961a7b24SJohn Baldwin struct turnstile *ts; 6959ed346baSBosko Milekic 69635370593SAndriy Gapon if (SCHEDULER_STOPPED()) 69735370593SAndriy Gapon return; 69835370593SAndriy Gapon 69908812b39SBosko Milekic if (mtx_recursed(m)) { 70036412d79SJohn Baldwin if (--(m->mtx_recurse) == 0) 70108812b39SBosko Milekic atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 702aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 7039ed346baSBosko Milekic CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 70436412d79SJohn Baldwin return; 70536412d79SJohn Baldwin } 7069ed346baSBosko Milekic 7072502c107SJeff Roberson /* 7082502c107SJeff Roberson * We have to lock the chain before the turnstile so this turnstile 7092502c107SJeff Roberson * can be removed from the hash list if it is empty. 7102502c107SJeff Roberson */ 7112502c107SJeff Roberson turnstile_chain_lock(&m->lock_object); 712aa89d8cdSJohn Baldwin ts = turnstile_lookup(&m->lock_object); 713aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 7149ed346baSBosko Milekic CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 715961a7b24SJohn Baldwin MPASS(ts != NULL); 7167aa4f685SJohn Baldwin turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 717961135eaSJohn Baldwin _mtx_release_lock_quick(m); 718bf9c6c31SJohn Baldwin 7192502c107SJeff Roberson /* 7202502c107SJeff Roberson * This turnstile is now no longer associated with the mutex. We can 7212502c107SJeff Roberson * unlock the chain lock so a new turnstile may take it's place. 7222502c107SJeff Roberson */ 7237aa4f685SJohn Baldwin turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 7242502c107SJeff Roberson turnstile_chain_unlock(&m->lock_object); 7259ed346baSBosko Milekic } 7269ed346baSBosko Milekic 7279ed346baSBosko Milekic /* 7289ed346baSBosko Milekic * All the unlocking of MTX_SPIN locks is done inline. 729961135eaSJohn Baldwin * See the __mtx_unlock_spin() macro for the details. 7309ed346baSBosko Milekic */ 7319ed346baSBosko Milekic 7329ed346baSBosko Milekic /* 73315ec816aSJohn Baldwin * The backing function for the INVARIANTS-enabled mtx_assert() 7349ed346baSBosko Milekic */ 7351103f3b0SJohn Baldwin #ifdef INVARIANT_SUPPORT 7360cde2e34SJason Evans void 737d576deedSPawel Jakub Dawidek _mtx_assert(const struct mtx *m, int what, const char *file, int line) 7380cde2e34SJason Evans { 7395cb0fbe4SJohn Baldwin 7401126349aSPaul Saab if (panicstr != NULL || dumping) 7415cb0fbe4SJohn Baldwin return; 742a10f4966SJake Burkholder switch (what) { 7430cde2e34SJason Evans case MA_OWNED: 7440cde2e34SJason Evans case MA_OWNED | MA_RECURSED: 7450cde2e34SJason Evans case MA_OWNED | MA_NOTRECURSED: 746a10f4966SJake Burkholder if (!mtx_owned(m)) 7470cde2e34SJason Evans panic("mutex %s not owned at %s:%d", 748aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line); 749a10f4966SJake Burkholder if (mtx_recursed(m)) { 750a10f4966SJake Burkholder if ((what & MA_NOTRECURSED) != 0) 7510cde2e34SJason Evans panic("mutex %s recursed at %s:%d", 752aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line); 753a10f4966SJake Burkholder } else if ((what & MA_RECURSED) != 0) { 7540cde2e34SJason Evans panic("mutex %s unrecursed at %s:%d", 755aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line); 7560cde2e34SJason Evans } 7570cde2e34SJason Evans break; 7580cde2e34SJason Evans case MA_NOTOWNED: 759a10f4966SJake Burkholder if (mtx_owned(m)) 7600cde2e34SJason Evans panic("mutex %s owned at %s:%d", 761aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line); 7620cde2e34SJason Evans break; 7630cde2e34SJason Evans default: 76456771ca7SJason Evans panic("unknown mtx_assert at %s:%d", file, line); 7650cde2e34SJason Evans } 7660cde2e34SJason Evans } 7670cde2e34SJason Evans #endif 7680cde2e34SJason Evans 7699ed346baSBosko Milekic /* 7709ed346baSBosko Milekic * The MUTEX_DEBUG-enabled mtx_validate() 77119284646SJohn Baldwin * 77219284646SJohn Baldwin * Most of these checks have been moved off into the LO_INITIALIZED flag 77319284646SJohn Baldwin * maintained by the witness code. 7749ed346baSBosko Milekic */ 77536412d79SJohn Baldwin #ifdef MUTEX_DEBUG 77636412d79SJohn Baldwin 7774d77a549SAlfred Perlstein void mtx_validate(struct mtx *); 77836412d79SJohn Baldwin 77919284646SJohn Baldwin void 78019284646SJohn Baldwin mtx_validate(struct mtx *m) 78136412d79SJohn Baldwin { 78236412d79SJohn Baldwin 78336412d79SJohn Baldwin /* 784fa669ab7SPoul-Henning Kamp * XXX: When kernacc() does not require Giant we can reenable this check 785fa669ab7SPoul-Henning Kamp */ 786fa669ab7SPoul-Henning Kamp #ifdef notyet 787fa669ab7SPoul-Henning Kamp /* 78876dcbd6fSBosko Milekic * Can't call kernacc() from early init386(), especially when 78976dcbd6fSBosko Milekic * initializing Giant mutex, because some stuff in kernacc() 79076dcbd6fSBosko Milekic * requires Giant itself. 79176dcbd6fSBosko Milekic */ 792ab07087eSBosko Milekic if (!cold) 793ab07087eSBosko Milekic if (!kernacc((caddr_t)m, sizeof(m), 794ab07087eSBosko Milekic VM_PROT_READ | VM_PROT_WRITE)) 79519284646SJohn Baldwin panic("Can't read and write to mutex %p", m); 79636412d79SJohn Baldwin #endif 79736412d79SJohn Baldwin } 79836412d79SJohn Baldwin #endif 79936412d79SJohn Baldwin 8009ed346baSBosko Milekic /* 801c27b5699SAndrew R. Reiter * General init routine used by the MTX_SYSINIT() macro. 802c27b5699SAndrew R. Reiter */ 803c27b5699SAndrew R. Reiter void 804c27b5699SAndrew R. Reiter mtx_sysinit(void *arg) 805c27b5699SAndrew R. Reiter { 806c27b5699SAndrew R. Reiter struct mtx_args *margs = arg; 807c27b5699SAndrew R. Reiter 8080c88508aSJohn Baldwin mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 809c27b5699SAndrew R. Reiter } 810c27b5699SAndrew R. Reiter 811c27b5699SAndrew R. Reiter /* 8129ed346baSBosko Milekic * Mutex initialization routine; initialize lock `m' of type contained in 8130c88508aSJohn Baldwin * `opts' with options contained in `opts' and name `name.' The optional 8140c88508aSJohn Baldwin * lock type `type' is used as a general lock category name for use with 8150c88508aSJohn Baldwin * witness. 8169ed346baSBosko Milekic */ 81736412d79SJohn Baldwin void 8180c88508aSJohn Baldwin mtx_init(struct mtx *m, const char *name, const char *type, int opts) 81936412d79SJohn Baldwin { 82083a81bcbSJohn Baldwin struct lock_class *class; 82183a81bcbSJohn Baldwin int flags; 8229ed346baSBosko Milekic 82319284646SJohn Baldwin MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 8247c0435b9SKip Macy MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0); 825353998acSAttilio Rao ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, 826353998acSAttilio Rao ("%s: mtx_lock not aligned for %s: %p", __func__, name, 827353998acSAttilio Rao &m->mtx_lock)); 8289ed346baSBosko Milekic 82936412d79SJohn Baldwin #ifdef MUTEX_DEBUG 8309ed346baSBosko Milekic /* Diagnostic and error correction */ 83119284646SJohn Baldwin mtx_validate(m); 8326936206eSJohn Baldwin #endif 83336412d79SJohn Baldwin 83483a81bcbSJohn Baldwin /* Determine lock class and lock flags. */ 83519284646SJohn Baldwin if (opts & MTX_SPIN) 83683a81bcbSJohn Baldwin class = &lock_class_mtx_spin; 83719284646SJohn Baldwin else 83883a81bcbSJohn Baldwin class = &lock_class_mtx_sleep; 83983a81bcbSJohn Baldwin flags = 0; 84019284646SJohn Baldwin if (opts & MTX_QUIET) 84183a81bcbSJohn Baldwin flags |= LO_QUIET; 84219284646SJohn Baldwin if (opts & MTX_RECURSE) 84383a81bcbSJohn Baldwin flags |= LO_RECURSABLE; 84419284646SJohn Baldwin if ((opts & MTX_NOWITNESS) == 0) 84583a81bcbSJohn Baldwin flags |= LO_WITNESS; 846f22a4b62SJeff Roberson if (opts & MTX_DUPOK) 84783a81bcbSJohn Baldwin flags |= LO_DUPOK; 8487c0435b9SKip Macy if (opts & MTX_NOPROFILE) 8497c0435b9SKip Macy flags |= LO_NOPROFILE; 85019284646SJohn Baldwin 85183a81bcbSJohn Baldwin /* Initialize mutex. */ 85219284646SJohn Baldwin m->mtx_lock = MTX_UNOWNED; 85383a81bcbSJohn Baldwin m->mtx_recurse = 0; 8549ed346baSBosko Milekic 855aa89d8cdSJohn Baldwin lock_init(&m->lock_object, class, name, type, flags); 85636412d79SJohn Baldwin } 85736412d79SJohn Baldwin 8589ed346baSBosko Milekic /* 85919284646SJohn Baldwin * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 86019284646SJohn Baldwin * passed in as a flag here because if the corresponding mtx_init() was 86119284646SJohn Baldwin * called with MTX_QUIET set, then it will already be set in the mutex's 86219284646SJohn Baldwin * flags. 8639ed346baSBosko Milekic */ 86436412d79SJohn Baldwin void 86536412d79SJohn Baldwin mtx_destroy(struct mtx *m) 86636412d79SJohn Baldwin { 86736412d79SJohn Baldwin 86819284646SJohn Baldwin if (!mtx_owned(m)) 86919284646SJohn Baldwin MPASS(mtx_unowned(m)); 87019284646SJohn Baldwin else { 87108812b39SBosko Milekic MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 8729ed346baSBosko Milekic 873861a2308SScott Long /* Perform the non-mtx related part of mtx_unlock_spin(). */ 874aa89d8cdSJohn Baldwin if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) 875861a2308SScott Long spinlock_exit(); 876764e4d54SJohn Baldwin else 877764e4d54SJohn Baldwin curthread->td_locks--; 878861a2308SScott Long 879d3df4af3SJeff Roberson lock_profile_release_lock(&m->lock_object); 88019284646SJohn Baldwin /* Tell witness this isn't locked to make it happy. */ 881aa89d8cdSJohn Baldwin WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, 882c86b6ff5SJohn Baldwin __LINE__); 88336412d79SJohn Baldwin } 8840384fff8SJason Evans 885186abbd7SJohn Baldwin m->mtx_lock = MTX_DESTROYED; 886aa89d8cdSJohn Baldwin lock_destroy(&m->lock_object); 8870384fff8SJason Evans } 888d23f5958SMatthew Dillon 889d23f5958SMatthew Dillon /* 890c53c013bSJohn Baldwin * Intialize the mutex code and system mutexes. This is called from the MD 891c53c013bSJohn Baldwin * startup code prior to mi_startup(). The per-CPU data space needs to be 892c53c013bSJohn Baldwin * setup before this is called. 893c53c013bSJohn Baldwin */ 894c53c013bSJohn Baldwin void 895c53c013bSJohn Baldwin mutex_init(void) 896c53c013bSJohn Baldwin { 897c53c013bSJohn Baldwin 898961a7b24SJohn Baldwin /* Setup turnstiles so that sleep mutexes work. */ 899961a7b24SJohn Baldwin init_turnstiles(); 900961a7b24SJohn Baldwin 901c53c013bSJohn Baldwin /* 902c53c013bSJohn Baldwin * Initialize mutexes. 903c53c013bSJohn Baldwin */ 9040c88508aSJohn Baldwin mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 9052502c107SJeff Roberson mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN); 9062502c107SJeff Roberson blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */ 9070c88508aSJohn Baldwin mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 9082502c107SJeff Roberson mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE); 9098c4b6380SJohn Baldwin mtx_init(&devmtx, "cdev", NULL, MTX_DEF); 910c53c013bSJohn Baldwin mtx_lock(&Giant); 911c53c013bSJohn Baldwin } 912d272fe53SJohn Baldwin 913d272fe53SJohn Baldwin #ifdef DDB 914d272fe53SJohn Baldwin void 915d576deedSPawel Jakub Dawidek db_show_mtx(const struct lock_object *lock) 916d272fe53SJohn Baldwin { 917d272fe53SJohn Baldwin struct thread *td; 918d576deedSPawel Jakub Dawidek const struct mtx *m; 919d272fe53SJohn Baldwin 920d576deedSPawel Jakub Dawidek m = (const struct mtx *)lock; 921d272fe53SJohn Baldwin 922d272fe53SJohn Baldwin db_printf(" flags: {"); 92383a81bcbSJohn Baldwin if (LOCK_CLASS(lock) == &lock_class_mtx_spin) 924d272fe53SJohn Baldwin db_printf("SPIN"); 925d272fe53SJohn Baldwin else 926d272fe53SJohn Baldwin db_printf("DEF"); 927aa89d8cdSJohn Baldwin if (m->lock_object.lo_flags & LO_RECURSABLE) 928d272fe53SJohn Baldwin db_printf(", RECURSE"); 929aa89d8cdSJohn Baldwin if (m->lock_object.lo_flags & LO_DUPOK) 930d272fe53SJohn Baldwin db_printf(", DUPOK"); 931d272fe53SJohn Baldwin db_printf("}\n"); 932d272fe53SJohn Baldwin db_printf(" state: {"); 933d272fe53SJohn Baldwin if (mtx_unowned(m)) 934d272fe53SJohn Baldwin db_printf("UNOWNED"); 935c0bfd703SJohn Baldwin else if (mtx_destroyed(m)) 936c0bfd703SJohn Baldwin db_printf("DESTROYED"); 937d272fe53SJohn Baldwin else { 938d272fe53SJohn Baldwin db_printf("OWNED"); 939d272fe53SJohn Baldwin if (m->mtx_lock & MTX_CONTESTED) 940d272fe53SJohn Baldwin db_printf(", CONTESTED"); 941d272fe53SJohn Baldwin if (m->mtx_lock & MTX_RECURSED) 942d272fe53SJohn Baldwin db_printf(", RECURSED"); 943d272fe53SJohn Baldwin } 944d272fe53SJohn Baldwin db_printf("}\n"); 945c0bfd703SJohn Baldwin if (!mtx_unowned(m) && !mtx_destroyed(m)) { 946d272fe53SJohn Baldwin td = mtx_owner(m); 947d272fe53SJohn Baldwin db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, 948431f8906SJulian Elischer td->td_tid, td->td_proc->p_pid, td->td_name); 949d272fe53SJohn Baldwin if (mtx_recursed(m)) 950d272fe53SJohn Baldwin db_printf(" recursed: %d\n", m->mtx_recurse); 951d272fe53SJohn Baldwin } 952d272fe53SJohn Baldwin } 953d272fe53SJohn Baldwin #endif 954