10384fff8SJason Evans /*- 20384fff8SJason Evans * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 30384fff8SJason Evans * 40384fff8SJason Evans * Redistribution and use in source and binary forms, with or without 50384fff8SJason Evans * modification, are permitted provided that the following conditions 60384fff8SJason Evans * are met: 70384fff8SJason Evans * 1. Redistributions of source code must retain the above copyright 80384fff8SJason Evans * notice, this list of conditions and the following disclaimer. 90384fff8SJason Evans * 2. Redistributions in binary form must reproduce the above copyright 100384fff8SJason Evans * notice, this list of conditions and the following disclaimer in the 110384fff8SJason Evans * documentation and/or other materials provided with the distribution. 120384fff8SJason Evans * 3. Berkeley Software Design Inc's name may not be used to endorse or 130384fff8SJason Evans * promote products derived from this software without specific prior 140384fff8SJason Evans * written permission. 150384fff8SJason Evans * 160384fff8SJason Evans * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 170384fff8SJason Evans * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 180384fff8SJason Evans * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 190384fff8SJason Evans * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 200384fff8SJason Evans * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 210384fff8SJason Evans * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 220384fff8SJason Evans * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 230384fff8SJason Evans * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 240384fff8SJason Evans * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 250384fff8SJason Evans * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 260384fff8SJason Evans * SUCH DAMAGE. 270384fff8SJason Evans * 280384fff8SJason Evans * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 2936412d79SJohn Baldwin * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 300384fff8SJason Evans */ 310384fff8SJason Evans 320384fff8SJason Evans /* 33ba48b69aSJohn Baldwin * Machine independent bits of mutex implementation. 340384fff8SJason Evans */ 350384fff8SJason Evans 36677b542eSDavid E. O'Brien #include <sys/cdefs.h> 37677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 38677b542eSDavid E. O'Brien 392498cf8cSJohn Baldwin #include "opt_adaptive_mutexes.h" 409c36c934SJohn Baldwin #include "opt_ddb.h" 41f5f9340bSFabien Thomas #include "opt_hwpmc_hooks.h" 429923b511SScott Long #include "opt_sched.h" 43a5a96a19SJohn Baldwin 440384fff8SJason Evans #include <sys/param.h> 456c35e809SDag-Erling Smørgrav #include <sys/systm.h> 4636412d79SJohn Baldwin #include <sys/bus.h> 471126349aSPaul Saab #include <sys/conf.h> 482d50560aSMarcel Moolenaar #include <sys/kdb.h> 4936412d79SJohn Baldwin #include <sys/kernel.h> 506c35e809SDag-Erling Smørgrav #include <sys/ktr.h> 5119284646SJohn Baldwin #include <sys/lock.h> 52fb919e4dSMark Murray #include <sys/malloc.h> 5319284646SJohn Baldwin #include <sys/mutex.h> 540384fff8SJason Evans #include <sys/proc.h> 55c4f7a187SJohn Baldwin #include <sys/resourcevar.h> 56b43179fbSJeff Roberson #include <sys/sched.h> 576c35e809SDag-Erling Smørgrav #include <sys/sbuf.h> 58a5a96a19SJohn Baldwin #include <sys/sysctl.h> 59961a7b24SJohn Baldwin #include <sys/turnstile.h> 6036412d79SJohn Baldwin #include <sys/vmmeter.h> 617c0435b9SKip Macy #include <sys/lock_profile.h> 620384fff8SJason Evans 6336412d79SJohn Baldwin #include <machine/atomic.h> 6436412d79SJohn Baldwin #include <machine/bus.h> 650384fff8SJason Evans #include <machine/cpu.h> 6636412d79SJohn Baldwin 679c36c934SJohn Baldwin #include <ddb/ddb.h> 689c36c934SJohn Baldwin 698c4b6380SJohn Baldwin #include <fs/devfs/devfs_int.h> 708c4b6380SJohn Baldwin 7136412d79SJohn Baldwin #include <vm/vm.h> 7236412d79SJohn Baldwin #include <vm/vm_extern.h> 7336412d79SJohn Baldwin 74cd6e6e4eSJohn Baldwin #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 75cd6e6e4eSJohn Baldwin #define ADAPTIVE_MUTEXES 76cd6e6e4eSJohn Baldwin #endif 77cd6e6e4eSJohn Baldwin 78f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 79f5f9340bSFabien Thomas #include <sys/pmckern.h> 80f5f9340bSFabien Thomas PMC_SOFT_DEFINE( , , lock, failed); 81f5f9340bSFabien Thomas #endif 82f5f9340bSFabien Thomas 83b9a80acaSStephan Uphoff /* 847f44c618SAttilio Rao * Return the mutex address when the lock cookie address is provided. 857f44c618SAttilio Rao * This functionality assumes that struct mtx* have a member named mtx_lock. 867f44c618SAttilio Rao */ 877f44c618SAttilio Rao #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock)) 887f44c618SAttilio Rao 897f44c618SAttilio Rao /* 909ed346baSBosko Milekic * Internal utility macros. 910cde2e34SJason Evans */ 929ed346baSBosko Milekic #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 930cde2e34SJason Evans 94c0bfd703SJohn Baldwin #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED) 95c0bfd703SJohn Baldwin 9649b94bfcSJohn Baldwin #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK)) 979ed346baSBosko Milekic 98d576deedSPawel Jakub Dawidek static void assert_mtx(const struct lock_object *lock, int what); 99d272fe53SJohn Baldwin #ifdef DDB 100d576deedSPawel Jakub Dawidek static void db_show_mtx(const struct lock_object *lock); 101d272fe53SJohn Baldwin #endif 1027faf4d90SDavide Italiano static void lock_mtx(struct lock_object *lock, uintptr_t how); 1037faf4d90SDavide Italiano static void lock_spin(struct lock_object *lock, uintptr_t how); 104a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 105d576deedSPawel Jakub Dawidek static int owner_mtx(const struct lock_object *lock, 106d576deedSPawel Jakub Dawidek struct thread **owner); 107a5aedd68SStacey Son #endif 1087faf4d90SDavide Italiano static uintptr_t unlock_mtx(struct lock_object *lock); 1097faf4d90SDavide Italiano static uintptr_t unlock_spin(struct lock_object *lock); 110d272fe53SJohn Baldwin 1110cde2e34SJason Evans /* 11219284646SJohn Baldwin * Lock classes for sleep and spin mutexes. 1130cde2e34SJason Evans */ 11419284646SJohn Baldwin struct lock_class lock_class_mtx_sleep = { 115ae8dde30SJohn Baldwin .lc_name = "sleep mutex", 116ae8dde30SJohn Baldwin .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, 117f9721b43SAttilio Rao .lc_assert = assert_mtx, 118d272fe53SJohn Baldwin #ifdef DDB 119ae8dde30SJohn Baldwin .lc_ddb_show = db_show_mtx, 120d272fe53SJohn Baldwin #endif 1216e21afd4SJohn Baldwin .lc_lock = lock_mtx, 1226e21afd4SJohn Baldwin .lc_unlock = unlock_mtx, 123a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 124a5aedd68SStacey Son .lc_owner = owner_mtx, 125a5aedd68SStacey Son #endif 12619284646SJohn Baldwin }; 12719284646SJohn Baldwin struct lock_class lock_class_mtx_spin = { 128ae8dde30SJohn Baldwin .lc_name = "spin mutex", 129ae8dde30SJohn Baldwin .lc_flags = LC_SPINLOCK | LC_RECURSABLE, 130f9721b43SAttilio Rao .lc_assert = assert_mtx, 131d272fe53SJohn Baldwin #ifdef DDB 132ae8dde30SJohn Baldwin .lc_ddb_show = db_show_mtx, 133d272fe53SJohn Baldwin #endif 1346e21afd4SJohn Baldwin .lc_lock = lock_spin, 1356e21afd4SJohn Baldwin .lc_unlock = unlock_spin, 136a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 137a5aedd68SStacey Son .lc_owner = owner_mtx, 138a5aedd68SStacey Son #endif 1398484de75SJohn Baldwin }; 1408484de75SJohn Baldwin 1419ed346baSBosko Milekic /* 142c53c013bSJohn Baldwin * System-wide mutexes 143c53c013bSJohn Baldwin */ 1442502c107SJeff Roberson struct mtx blocked_lock; 145c53c013bSJohn Baldwin struct mtx Giant; 146c53c013bSJohn Baldwin 14767784314SPoul-Henning Kamp void 148d576deedSPawel Jakub Dawidek assert_mtx(const struct lock_object *lock, int what) 149f9721b43SAttilio Rao { 150f9721b43SAttilio Rao 151d576deedSPawel Jakub Dawidek mtx_assert((const struct mtx *)lock, what); 152f9721b43SAttilio Rao } 153f9721b43SAttilio Rao 15467784314SPoul-Henning Kamp void 1557faf4d90SDavide Italiano lock_mtx(struct lock_object *lock, uintptr_t how) 1566e21afd4SJohn Baldwin { 1576e21afd4SJohn Baldwin 1586e21afd4SJohn Baldwin mtx_lock((struct mtx *)lock); 1596e21afd4SJohn Baldwin } 1606e21afd4SJohn Baldwin 16167784314SPoul-Henning Kamp void 1627faf4d90SDavide Italiano lock_spin(struct lock_object *lock, uintptr_t how) 1636e21afd4SJohn Baldwin { 1646e21afd4SJohn Baldwin 1656e21afd4SJohn Baldwin panic("spin locks can only use msleep_spin"); 1666e21afd4SJohn Baldwin } 1676e21afd4SJohn Baldwin 1687faf4d90SDavide Italiano uintptr_t 1696e21afd4SJohn Baldwin unlock_mtx(struct lock_object *lock) 1706e21afd4SJohn Baldwin { 1716e21afd4SJohn Baldwin struct mtx *m; 1726e21afd4SJohn Baldwin 1736e21afd4SJohn Baldwin m = (struct mtx *)lock; 1746e21afd4SJohn Baldwin mtx_assert(m, MA_OWNED | MA_NOTRECURSED); 1756e21afd4SJohn Baldwin mtx_unlock(m); 1766e21afd4SJohn Baldwin return (0); 1776e21afd4SJohn Baldwin } 1786e21afd4SJohn Baldwin 1797faf4d90SDavide Italiano uintptr_t 1806e21afd4SJohn Baldwin unlock_spin(struct lock_object *lock) 1816e21afd4SJohn Baldwin { 1826e21afd4SJohn Baldwin 1836e21afd4SJohn Baldwin panic("spin locks can only use msleep_spin"); 1846e21afd4SJohn Baldwin } 1856e21afd4SJohn Baldwin 186a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 187a5aedd68SStacey Son int 188d576deedSPawel Jakub Dawidek owner_mtx(const struct lock_object *lock, struct thread **owner) 189a5aedd68SStacey Son { 190d576deedSPawel Jakub Dawidek const struct mtx *m = (const struct mtx *)lock; 191a5aedd68SStacey Son 192a5aedd68SStacey Son *owner = mtx_owner(m); 193a5aedd68SStacey Son return (mtx_unowned(m) == 0); 194a5aedd68SStacey Son } 195a5aedd68SStacey Son #endif 196a5aedd68SStacey Son 1970cde2e34SJason Evans /* 1986283b7d0SJohn Baldwin * Function versions of the inlined __mtx_* macros. These are used by 1996283b7d0SJohn Baldwin * modules and can also be called from assembly language if needed. 2006283b7d0SJohn Baldwin */ 2016283b7d0SJohn Baldwin void 2027f44c618SAttilio Rao __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line) 2036283b7d0SJohn Baldwin { 2047f44c618SAttilio Rao struct mtx *m; 2056283b7d0SJohn Baldwin 20635370593SAndriy Gapon if (SCHEDULER_STOPPED()) 20735370593SAndriy Gapon return; 2087f44c618SAttilio Rao 2097f44c618SAttilio Rao m = mtxlock2mtx(c); 2107f44c618SAttilio Rao 211cd2fe4e6SAttilio Rao KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 212e3ae0dfeSAttilio Rao ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d", 213e3ae0dfeSAttilio Rao curthread, m->lock_object.lo_name, file, line)); 214186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 215186abbd7SJohn Baldwin ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); 216aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 217aa89d8cdSJohn Baldwin ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 2180d975d63SJohn Baldwin file, line)); 219ac6b769bSAttilio Rao WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) | 220ac6b769bSAttilio Rao LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 2217c0435b9SKip Macy 222961135eaSJohn Baldwin __mtx_lock(m, curthread, opts, file, line); 223aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 224dde96c99SJohn Baldwin line); 225ac6b769bSAttilio Rao WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE, 226ac6b769bSAttilio Rao file, line); 227764e4d54SJohn Baldwin curthread->td_locks++; 2286283b7d0SJohn Baldwin } 2296283b7d0SJohn Baldwin 2306283b7d0SJohn Baldwin void 2317f44c618SAttilio Rao __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line) 2326283b7d0SJohn Baldwin { 2337f44c618SAttilio Rao struct mtx *m; 23435370593SAndriy Gapon 23535370593SAndriy Gapon if (SCHEDULER_STOPPED()) 23635370593SAndriy Gapon return; 2377f44c618SAttilio Rao 2387f44c618SAttilio Rao m = mtxlock2mtx(c); 2397f44c618SAttilio Rao 240186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 241186abbd7SJohn Baldwin ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); 242aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 243aa89d8cdSJohn Baldwin ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 2440d975d63SJohn Baldwin file, line)); 245aa89d8cdSJohn Baldwin WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 246aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 2470d975d63SJohn Baldwin line); 24821377ce0SJohn Baldwin mtx_assert(m, MA_OWNED); 249c66d7606SKip Macy 250961135eaSJohn Baldwin __mtx_unlock(m, curthread, opts, file, line); 251b5fb43e5SJohn Baldwin curthread->td_locks--; 2526283b7d0SJohn Baldwin } 2536283b7d0SJohn Baldwin 2546283b7d0SJohn Baldwin void 2557f44c618SAttilio Rao __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 2567f44c618SAttilio Rao int line) 2576283b7d0SJohn Baldwin { 2587f44c618SAttilio Rao struct mtx *m; 2596283b7d0SJohn Baldwin 26035370593SAndriy Gapon if (SCHEDULER_STOPPED()) 26135370593SAndriy Gapon return; 2627f44c618SAttilio Rao 2637f44c618SAttilio Rao m = mtxlock2mtx(c); 2647f44c618SAttilio Rao 265186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 266186abbd7SJohn Baldwin ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); 267aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 2680d975d63SJohn Baldwin ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 269aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line)); 270ad69e26bSJohn Baldwin if (mtx_owned(m)) 271ac6b769bSAttilio Rao KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 272ac6b769bSAttilio Rao (opts & MTX_RECURSE) != 0, 273ad69e26bSJohn Baldwin ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n", 274ad69e26bSJohn Baldwin m->lock_object.lo_name, file, line)); 275ac6b769bSAttilio Rao opts &= ~MTX_RECURSE; 276aa89d8cdSJohn Baldwin WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 27741313430SJohn Baldwin file, line, NULL); 278961135eaSJohn Baldwin __mtx_lock_spin(m, curthread, opts, file, line); 279aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 280dde96c99SJohn Baldwin line); 281aa89d8cdSJohn Baldwin WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 2826283b7d0SJohn Baldwin } 2836283b7d0SJohn Baldwin 2846283b7d0SJohn Baldwin void 2857f44c618SAttilio Rao __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 2867f44c618SAttilio Rao int line) 2876283b7d0SJohn Baldwin { 2887f44c618SAttilio Rao struct mtx *m; 289c66d7606SKip Macy 29035370593SAndriy Gapon if (SCHEDULER_STOPPED()) 29135370593SAndriy Gapon return; 2927f44c618SAttilio Rao 2937f44c618SAttilio Rao m = mtxlock2mtx(c); 2947f44c618SAttilio Rao 295186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 296186abbd7SJohn Baldwin ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); 297aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 2980d975d63SJohn Baldwin ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 299aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line)); 300aa89d8cdSJohn Baldwin WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 301aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 302dde96c99SJohn Baldwin line); 3030d975d63SJohn Baldwin mtx_assert(m, MA_OWNED); 304c66d7606SKip Macy 305961135eaSJohn Baldwin __mtx_unlock_spin(m); 3066283b7d0SJohn Baldwin } 3076283b7d0SJohn Baldwin 3086283b7d0SJohn Baldwin /* 3099ed346baSBosko Milekic * The important part of mtx_trylock{,_flags}() 310eac09796SJohn Baldwin * Tries to acquire lock `m.' If this function is called on a mutex that 311eac09796SJohn Baldwin * is already owned, it will recursively acquire the lock. 3120cde2e34SJason Evans */ 3130cde2e34SJason Evans int 3147f44c618SAttilio Rao _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) 3150cde2e34SJason Evans { 3167f44c618SAttilio Rao struct mtx *m; 3171723a064SJeff Roberson #ifdef LOCK_PROFILING 3187c0435b9SKip Macy uint64_t waittime = 0; 3191723a064SJeff Roberson int contested = 0; 3201723a064SJeff Roberson #endif 3211723a064SJeff Roberson int rval; 3220cde2e34SJason Evans 32335370593SAndriy Gapon if (SCHEDULER_STOPPED()) 32435370593SAndriy Gapon return (1); 32535370593SAndriy Gapon 3267f44c618SAttilio Rao m = mtxlock2mtx(c); 3277f44c618SAttilio Rao 328cd2fe4e6SAttilio Rao KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 329e3ae0dfeSAttilio Rao ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d", 330e3ae0dfeSAttilio Rao curthread, m->lock_object.lo_name, file, line)); 331186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 332186abbd7SJohn Baldwin ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); 333aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 334aa89d8cdSJohn Baldwin ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 33583cece6fSJohn Baldwin file, line)); 3369ed346baSBosko Milekic 337ac6b769bSAttilio Rao if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 338ac6b769bSAttilio Rao (opts & MTX_RECURSE) != 0)) { 339eac09796SJohn Baldwin m->mtx_recurse++; 340eac09796SJohn Baldwin atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 341eac09796SJohn Baldwin rval = 1; 342eac09796SJohn Baldwin } else 343961135eaSJohn Baldwin rval = _mtx_obtain_lock(m, (uintptr_t)curthread); 344ac6b769bSAttilio Rao opts &= ~MTX_RECURSE; 3459ed346baSBosko Milekic 346aa89d8cdSJohn Baldwin LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); 347764e4d54SJohn Baldwin if (rval) { 348aa89d8cdSJohn Baldwin WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 3492d96f0b1SJohn Baldwin file, line); 350764e4d54SJohn Baldwin curthread->td_locks++; 351fe68a916SKip Macy if (m->mtx_recurse == 0) 352a5aedd68SStacey Son LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, 353a5aedd68SStacey Son m, contested, waittime, file, line); 3547c0435b9SKip Macy 355764e4d54SJohn Baldwin } 3569ed346baSBosko Milekic 35719284646SJohn Baldwin return (rval); 3580cde2e34SJason Evans } 3590cde2e34SJason Evans 3600cde2e34SJason Evans /* 3617f44c618SAttilio Rao * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 3629ed346baSBosko Milekic * 3639ed346baSBosko Milekic * We call this if the lock is either contested (i.e. we need to go to 3649ed346baSBosko Milekic * sleep waiting for it), or if we need to recurse on it. 3650cde2e34SJason Evans */ 3660cde2e34SJason Evans void 3677f44c618SAttilio Rao __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, 3687f44c618SAttilio Rao const char *file, int line) 36936412d79SJohn Baldwin { 3707f44c618SAttilio Rao struct mtx *m; 3712502c107SJeff Roberson struct turnstile *ts; 3721723a064SJeff Roberson uintptr_t v; 373cd6e6e4eSJohn Baldwin #ifdef ADAPTIVE_MUTEXES 37476447e56SJohn Baldwin volatile struct thread *owner; 3752498cf8cSJohn Baldwin #endif 37602bd1bcdSIan Dowse #ifdef KTR 37702bd1bcdSIan Dowse int cont_logged = 0; 37802bd1bcdSIan Dowse #endif 3791723a064SJeff Roberson #ifdef LOCK_PROFILING 38070fe8436SKip Macy int contested = 0; 38170fe8436SKip Macy uint64_t waittime = 0; 3821723a064SJeff Roberson #endif 383a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 384a5aedd68SStacey Son uint64_t spin_cnt = 0; 385a5aedd68SStacey Son uint64_t sleep_cnt = 0; 386a5aedd68SStacey Son int64_t sleep_time = 0; 387a5aedd68SStacey Son #endif 38836412d79SJohn Baldwin 38935370593SAndriy Gapon if (SCHEDULER_STOPPED()) 39035370593SAndriy Gapon return; 39135370593SAndriy Gapon 3927f44c618SAttilio Rao m = mtxlock2mtx(c); 3937f44c618SAttilio Rao 3945fa8dd90SJohn Baldwin if (mtx_owned(m)) { 395ac6b769bSAttilio Rao KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 396ac6b769bSAttilio Rao (opts & MTX_RECURSE) != 0, 397eac09796SJohn Baldwin ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 398aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line)); 399ac6b769bSAttilio Rao opts &= ~MTX_RECURSE; 40036412d79SJohn Baldwin m->mtx_recurse++; 40108812b39SBosko Milekic atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 402aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 4035746a1d8SBosko Milekic CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 40436412d79SJohn Baldwin return; 40536412d79SJohn Baldwin } 406ac6b769bSAttilio Rao opts &= ~MTX_RECURSE; 4079ed346baSBosko Milekic 408f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 409f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 410f5f9340bSFabien Thomas #endif 41170fe8436SKip Macy lock_profile_obtain_lock_failed(&m->lock_object, 41270fe8436SKip Macy &contested, &waittime); 413aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 41415ec816aSJohn Baldwin CTR4(KTR_LOCK, 41515ec816aSJohn Baldwin "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 416aa89d8cdSJohn Baldwin m->lock_object.lo_name, (void *)m->mtx_lock, file, line); 4171bd0eefbSJohn Baldwin 418961135eaSJohn Baldwin while (!_mtx_obtain_lock(m, tid)) { 419a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 420a5aedd68SStacey Son spin_cnt++; 421a5aedd68SStacey Son #endif 42249aead8aSAttilio Rao #ifdef ADAPTIVE_MUTEXES 42349aead8aSAttilio Rao /* 42449aead8aSAttilio Rao * If the owner is running on another CPU, spin until the 42549aead8aSAttilio Rao * owner stops running or the state of the lock changes. 42649aead8aSAttilio Rao */ 42749aead8aSAttilio Rao v = m->mtx_lock; 42849aead8aSAttilio Rao if (v != MTX_UNOWNED) { 42949aead8aSAttilio Rao owner = (struct thread *)(v & ~MTX_FLAGMASK); 43049aead8aSAttilio Rao if (TD_IS_RUNNING(owner)) { 43149aead8aSAttilio Rao if (LOCK_LOG_TEST(&m->lock_object, 0)) 43249aead8aSAttilio Rao CTR3(KTR_LOCK, 43349aead8aSAttilio Rao "%s: spinning on %p held by %p", 43449aead8aSAttilio Rao __func__, m, owner); 4352cba8dd3SJohn Baldwin KTR_STATE1(KTR_SCHED, "thread", 4362cba8dd3SJohn Baldwin sched_tdname((struct thread *)tid), 4372cba8dd3SJohn Baldwin "spinning", "lockname:\"%s\"", 4382cba8dd3SJohn Baldwin m->lock_object.lo_name); 43949aead8aSAttilio Rao while (mtx_owner(m) == owner && 440a5aedd68SStacey Son TD_IS_RUNNING(owner)) { 44149aead8aSAttilio Rao cpu_spinwait(); 442a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 443a5aedd68SStacey Son spin_cnt++; 444a5aedd68SStacey Son #endif 445a5aedd68SStacey Son } 4462cba8dd3SJohn Baldwin KTR_STATE0(KTR_SCHED, "thread", 4472cba8dd3SJohn Baldwin sched_tdname((struct thread *)tid), 4482cba8dd3SJohn Baldwin "running"); 44949aead8aSAttilio Rao continue; 45049aead8aSAttilio Rao } 45149aead8aSAttilio Rao } 45249aead8aSAttilio Rao #endif 45349aead8aSAttilio Rao 4542502c107SJeff Roberson ts = turnstile_trywait(&m->lock_object); 4555fa8dd90SJohn Baldwin v = m->mtx_lock; 4565fa8dd90SJohn Baldwin 45736412d79SJohn Baldwin /* 4589ed346baSBosko Milekic * Check if the lock has been released while spinning for 459961a7b24SJohn Baldwin * the turnstile chain lock. 46036412d79SJohn Baldwin */ 4615fa8dd90SJohn Baldwin if (v == MTX_UNOWNED) { 4622502c107SJeff Roberson turnstile_cancel(ts); 46336412d79SJohn Baldwin continue; 46436412d79SJohn Baldwin } 4659ed346baSBosko Milekic 46649aead8aSAttilio Rao #ifdef ADAPTIVE_MUTEXES 46749aead8aSAttilio Rao /* 468fa29f023SJohn Baldwin * The current lock owner might have started executing 469fa29f023SJohn Baldwin * on another CPU (or the lock could have changed 470fa29f023SJohn Baldwin * owners) while we were waiting on the turnstile 471fa29f023SJohn Baldwin * chain lock. If so, drop the turnstile lock and try 472fa29f023SJohn Baldwin * again. 47349aead8aSAttilio Rao */ 47449aead8aSAttilio Rao owner = (struct thread *)(v & ~MTX_FLAGMASK); 47549aead8aSAttilio Rao if (TD_IS_RUNNING(owner)) { 47649aead8aSAttilio Rao turnstile_cancel(ts); 47749aead8aSAttilio Rao continue; 47849aead8aSAttilio Rao } 47949aead8aSAttilio Rao #endif 48049aead8aSAttilio Rao 48136412d79SJohn Baldwin /* 4829ed346baSBosko Milekic * If the mutex isn't already contested and a failure occurs 4839ed346baSBosko Milekic * setting the contested bit, the mutex was either released 4849ed346baSBosko Milekic * or the state of the MTX_RECURSED bit changed. 48536412d79SJohn Baldwin */ 48636412d79SJohn Baldwin if ((v & MTX_CONTESTED) == 0 && 487122eceefSJohn Baldwin !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { 4882502c107SJeff Roberson turnstile_cancel(ts); 48936412d79SJohn Baldwin continue; 49036412d79SJohn Baldwin } 49136412d79SJohn Baldwin 4929ed346baSBosko Milekic /* 4937feefcd6SJohn Baldwin * We definitely must sleep for this lock. 4949ed346baSBosko Milekic */ 49536412d79SJohn Baldwin mtx_assert(m, MA_NOTOWNED); 49636412d79SJohn Baldwin 49702bd1bcdSIan Dowse #ifdef KTR 49802bd1bcdSIan Dowse if (!cont_logged) { 49902bd1bcdSIan Dowse CTR6(KTR_CONTENTION, 50002bd1bcdSIan Dowse "contention: %p at %s:%d wants %s, taken by %s:%d", 501aa89d8cdSJohn Baldwin (void *)tid, file, line, m->lock_object.lo_name, 502aa89d8cdSJohn Baldwin WITNESS_FILE(&m->lock_object), 503aa89d8cdSJohn Baldwin WITNESS_LINE(&m->lock_object)); 50402bd1bcdSIan Dowse cont_logged = 1; 50502bd1bcdSIan Dowse } 50602bd1bcdSIan Dowse #endif 50736412d79SJohn Baldwin 5089ed346baSBosko Milekic /* 509961a7b24SJohn Baldwin * Block on the turnstile. 5109ed346baSBosko Milekic */ 511a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 512a5aedd68SStacey Son sleep_time -= lockstat_nsecs(); 513a5aedd68SStacey Son #endif 5142502c107SJeff Roberson turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE); 515a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 516a5aedd68SStacey Son sleep_time += lockstat_nsecs(); 517a5aedd68SStacey Son sleep_cnt++; 518a5aedd68SStacey Son #endif 51936412d79SJohn Baldwin } 52002bd1bcdSIan Dowse #ifdef KTR 52102bd1bcdSIan Dowse if (cont_logged) { 52202bd1bcdSIan Dowse CTR4(KTR_CONTENTION, 52302bd1bcdSIan Dowse "contention end: %s acquired by %p at %s:%d", 524aa89d8cdSJohn Baldwin m->lock_object.lo_name, (void *)tid, file, line); 52502bd1bcdSIan Dowse } 52602bd1bcdSIan Dowse #endif 527a5aedd68SStacey Son LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested, 528eea4f254SJeff Roberson waittime, file, line); 529a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 530a5aedd68SStacey Son if (sleep_time) 531a5aedd68SStacey Son LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time); 532a5aedd68SStacey Son 533a5aedd68SStacey Son /* 534a5aedd68SStacey Son * Only record the loops spinning and not sleeping. 535a5aedd68SStacey Son */ 536a5aedd68SStacey Son if (spin_cnt > sleep_cnt) 537a5aedd68SStacey Son LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (spin_cnt - sleep_cnt)); 538a5aedd68SStacey Son #endif 5399ed346baSBosko Milekic } 5409ed346baSBosko Milekic 5412502c107SJeff Roberson static void 5422502c107SJeff Roberson _mtx_lock_spin_failed(struct mtx *m) 5432502c107SJeff Roberson { 5442502c107SJeff Roberson struct thread *td; 5452502c107SJeff Roberson 5462502c107SJeff Roberson td = mtx_owner(m); 5472502c107SJeff Roberson 5482502c107SJeff Roberson /* If the mutex is unlocked, try again. */ 5492502c107SJeff Roberson if (td == NULL) 5502502c107SJeff Roberson return; 551b95b98b0SKonstantin Belousov 5522502c107SJeff Roberson printf( "spin lock %p (%s) held by %p (tid %d) too long\n", 5532502c107SJeff Roberson m, m->lock_object.lo_name, td, td->td_tid); 5542502c107SJeff Roberson #ifdef WITNESS 55598332c8cSAttilio Rao witness_display_spinlock(&m->lock_object, td, printf); 5562502c107SJeff Roberson #endif 5572502c107SJeff Roberson panic("spin lock held too long"); 5582502c107SJeff Roberson } 5592502c107SJeff Roberson 560b95b98b0SKonstantin Belousov #ifdef SMP 5619ed346baSBosko Milekic /* 5627f44c618SAttilio Rao * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock. 5639ed346baSBosko Milekic * 5649ed346baSBosko Milekic * This is only called if we need to actually spin for the lock. Recursion 5659ed346baSBosko Milekic * is handled inline. 5669ed346baSBosko Milekic */ 5679ed346baSBosko Milekic void 5687f44c618SAttilio Rao _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, 5697f44c618SAttilio Rao const char *file, int line) 57036412d79SJohn Baldwin { 5717f44c618SAttilio Rao struct mtx *m; 5721723a064SJeff Roberson int i = 0; 5731723a064SJeff Roberson #ifdef LOCK_PROFILING 5741723a064SJeff Roberson int contested = 0; 57570fe8436SKip Macy uint64_t waittime = 0; 5761723a064SJeff Roberson #endif 57736412d79SJohn Baldwin 57835370593SAndriy Gapon if (SCHEDULER_STOPPED()) 57935370593SAndriy Gapon return; 58035370593SAndriy Gapon 5817f44c618SAttilio Rao m = mtxlock2mtx(c); 5827f44c618SAttilio Rao 583aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 5845746a1d8SBosko Milekic CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 5852cba8dd3SJohn Baldwin KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), 5862cba8dd3SJohn Baldwin "spinning", "lockname:\"%s\"", m->lock_object.lo_name); 5879ed346baSBosko Milekic 588f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 589f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 590f5f9340bSFabien Thomas #endif 59170fe8436SKip Macy lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); 592961135eaSJohn Baldwin while (!_mtx_obtain_lock(m, tid)) { 5939ed346baSBosko Milekic 5947141f2adSJohn Baldwin /* Give interrupts a chance while we spin. */ 595c6a37e84SJohn Baldwin spinlock_exit(); 59636412d79SJohn Baldwin while (m->mtx_lock != MTX_UNOWNED) { 597703fc290SJohn Baldwin if (i++ < 10000000) { 5989f1b87f1SMaxime Henrion cpu_spinwait(); 59936412d79SJohn Baldwin continue; 600703fc290SJohn Baldwin } 6010fa2168bSJohn Baldwin if (i < 60000000 || kdb_active || panicstr != NULL) 60236412d79SJohn Baldwin DELAY(1); 6032502c107SJeff Roberson else 6042502c107SJeff Roberson _mtx_lock_spin_failed(m); 6059f1b87f1SMaxime Henrion cpu_spinwait(); 60636412d79SJohn Baldwin } 607c6a37e84SJohn Baldwin spinlock_enter(); 60836412d79SJohn Baldwin } 60936412d79SJohn Baldwin 610aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 6119ed346baSBosko Milekic CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 6122cba8dd3SJohn Baldwin KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), 6132cba8dd3SJohn Baldwin "running"); 6149ed346baSBosko Milekic 615a5aedd68SStacey Son LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m, 616a5aedd68SStacey Son contested, waittime, (file), (line)); 617a5aedd68SStacey Son LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, i); 61836412d79SJohn Baldwin } 61933fb8a38SJohn Baldwin #endif /* SMP */ 62036412d79SJohn Baldwin 6212502c107SJeff Roberson void 622ccdf2333SAttilio Rao thread_lock_flags_(struct thread *td, int opts, const char *file, int line) 6232502c107SJeff Roberson { 6242502c107SJeff Roberson struct mtx *m; 6252502c107SJeff Roberson uintptr_t tid; 6261723a064SJeff Roberson int i; 6271723a064SJeff Roberson #ifdef LOCK_PROFILING 6281723a064SJeff Roberson int contested = 0; 6291723a064SJeff Roberson uint64_t waittime = 0; 6301723a064SJeff Roberson #endif 631a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 632a5aedd68SStacey Son uint64_t spin_cnt = 0; 633a5aedd68SStacey Son #endif 6342502c107SJeff Roberson 6351723a064SJeff Roberson i = 0; 6362502c107SJeff Roberson tid = (uintptr_t)curthread; 63735370593SAndriy Gapon 63835370593SAndriy Gapon if (SCHEDULER_STOPPED()) 63935370593SAndriy Gapon return; 64035370593SAndriy Gapon 6412502c107SJeff Roberson for (;;) { 6422502c107SJeff Roberson retry: 6432502c107SJeff Roberson spinlock_enter(); 644710eacdcSJeff Roberson m = td->td_lock; 64513c85a48SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 64613c85a48SJohn Baldwin ("thread_lock() of destroyed mutex @ %s:%d", file, line)); 64713c85a48SJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 64813c85a48SJohn Baldwin ("thread_lock() of sleep mutex %s @ %s:%d", 64913c85a48SJohn Baldwin m->lock_object.lo_name, file, line)); 650ad69e26bSJohn Baldwin if (mtx_owned(m)) 651ad69e26bSJohn Baldwin KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, 652ad69e26bSJohn Baldwin ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n", 653ad69e26bSJohn Baldwin m->lock_object.lo_name, file, line)); 6542502c107SJeff Roberson WITNESS_CHECKORDER(&m->lock_object, 65541313430SJohn Baldwin opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 656961135eaSJohn Baldwin while (!_mtx_obtain_lock(m, tid)) { 657a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 658a5aedd68SStacey Son spin_cnt++; 659a5aedd68SStacey Son #endif 6602502c107SJeff Roberson if (m->mtx_lock == tid) { 6612502c107SJeff Roberson m->mtx_recurse++; 6622502c107SJeff Roberson break; 6632502c107SJeff Roberson } 664f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 665f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 666f5f9340bSFabien Thomas #endif 667eea4f254SJeff Roberson lock_profile_obtain_lock_failed(&m->lock_object, 668eea4f254SJeff Roberson &contested, &waittime); 6692502c107SJeff Roberson /* Give interrupts a chance while we spin. */ 6702502c107SJeff Roberson spinlock_exit(); 6712502c107SJeff Roberson while (m->mtx_lock != MTX_UNOWNED) { 6722502c107SJeff Roberson if (i++ < 10000000) 6732502c107SJeff Roberson cpu_spinwait(); 6742502c107SJeff Roberson else if (i < 60000000 || 6752502c107SJeff Roberson kdb_active || panicstr != NULL) 6762502c107SJeff Roberson DELAY(1); 6772502c107SJeff Roberson else 6782502c107SJeff Roberson _mtx_lock_spin_failed(m); 6792502c107SJeff Roberson cpu_spinwait(); 6802502c107SJeff Roberson if (m != td->td_lock) 6812502c107SJeff Roberson goto retry; 6822502c107SJeff Roberson } 6832502c107SJeff Roberson spinlock_enter(); 6842502c107SJeff Roberson } 6852502c107SJeff Roberson if (m == td->td_lock) 6862502c107SJeff Roberson break; 687961135eaSJohn Baldwin __mtx_unlock_spin(m); /* does spinlock_exit() */ 688a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 689a5aedd68SStacey Son spin_cnt++; 690a5aedd68SStacey Son #endif 6912502c107SJeff Roberson } 692eea4f254SJeff Roberson if (m->mtx_recurse == 0) 693a5aedd68SStacey Son LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, 694a5aedd68SStacey Son m, contested, waittime, (file), (line)); 69513c85a48SJohn Baldwin LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 69613c85a48SJohn Baldwin line); 6972502c107SJeff Roberson WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 698a5aedd68SStacey Son LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_cnt); 6992502c107SJeff Roberson } 7002502c107SJeff Roberson 7012502c107SJeff Roberson struct mtx * 7022502c107SJeff Roberson thread_lock_block(struct thread *td) 7032502c107SJeff Roberson { 7042502c107SJeff Roberson struct mtx *lock; 7052502c107SJeff Roberson 7062502c107SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 707710eacdcSJeff Roberson lock = td->td_lock; 7082502c107SJeff Roberson td->td_lock = &blocked_lock; 7092502c107SJeff Roberson mtx_unlock_spin(lock); 7102502c107SJeff Roberson 7112502c107SJeff Roberson return (lock); 7122502c107SJeff Roberson } 7132502c107SJeff Roberson 7142502c107SJeff Roberson void 7152502c107SJeff Roberson thread_lock_unblock(struct thread *td, struct mtx *new) 7162502c107SJeff Roberson { 7172502c107SJeff Roberson mtx_assert(new, MA_OWNED); 7182502c107SJeff Roberson MPASS(td->td_lock == &blocked_lock); 71965d32cd8SMatt Jacob atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new); 7202502c107SJeff Roberson } 7212502c107SJeff Roberson 7222502c107SJeff Roberson void 7232502c107SJeff Roberson thread_lock_set(struct thread *td, struct mtx *new) 7242502c107SJeff Roberson { 7252502c107SJeff Roberson struct mtx *lock; 7262502c107SJeff Roberson 7272502c107SJeff Roberson mtx_assert(new, MA_OWNED); 7282502c107SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 729710eacdcSJeff Roberson lock = td->td_lock; 7302502c107SJeff Roberson td->td_lock = new; 7312502c107SJeff Roberson mtx_unlock_spin(lock); 7322502c107SJeff Roberson } 7332502c107SJeff Roberson 7349ed346baSBosko Milekic /* 7357f44c618SAttilio Rao * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 7369ed346baSBosko Milekic * 7379ed346baSBosko Milekic * We are only called here if the lock is recursed or contested (i.e. we 7389ed346baSBosko Milekic * need to wake up a blocked thread). 7399ed346baSBosko Milekic */ 74036412d79SJohn Baldwin void 7417f44c618SAttilio Rao __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line) 74236412d79SJohn Baldwin { 7437f44c618SAttilio Rao struct mtx *m; 744961a7b24SJohn Baldwin struct turnstile *ts; 7459ed346baSBosko Milekic 74635370593SAndriy Gapon if (SCHEDULER_STOPPED()) 74735370593SAndriy Gapon return; 74835370593SAndriy Gapon 7497f44c618SAttilio Rao m = mtxlock2mtx(c); 7507f44c618SAttilio Rao 75108812b39SBosko Milekic if (mtx_recursed(m)) { 75236412d79SJohn Baldwin if (--(m->mtx_recurse) == 0) 75308812b39SBosko Milekic atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 754aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 7559ed346baSBosko Milekic CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 75636412d79SJohn Baldwin return; 75736412d79SJohn Baldwin } 7589ed346baSBosko Milekic 7592502c107SJeff Roberson /* 7602502c107SJeff Roberson * We have to lock the chain before the turnstile so this turnstile 7612502c107SJeff Roberson * can be removed from the hash list if it is empty. 7622502c107SJeff Roberson */ 7632502c107SJeff Roberson turnstile_chain_lock(&m->lock_object); 764aa89d8cdSJohn Baldwin ts = turnstile_lookup(&m->lock_object); 765aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 7669ed346baSBosko Milekic CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 767961a7b24SJohn Baldwin MPASS(ts != NULL); 7687aa4f685SJohn Baldwin turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 769961135eaSJohn Baldwin _mtx_release_lock_quick(m); 770bf9c6c31SJohn Baldwin 7712502c107SJeff Roberson /* 7722502c107SJeff Roberson * This turnstile is now no longer associated with the mutex. We can 7732502c107SJeff Roberson * unlock the chain lock so a new turnstile may take it's place. 7742502c107SJeff Roberson */ 7757aa4f685SJohn Baldwin turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 7762502c107SJeff Roberson turnstile_chain_unlock(&m->lock_object); 7779ed346baSBosko Milekic } 7789ed346baSBosko Milekic 7799ed346baSBosko Milekic /* 7809ed346baSBosko Milekic * All the unlocking of MTX_SPIN locks is done inline. 781961135eaSJohn Baldwin * See the __mtx_unlock_spin() macro for the details. 7829ed346baSBosko Milekic */ 7839ed346baSBosko Milekic 7849ed346baSBosko Milekic /* 78515ec816aSJohn Baldwin * The backing function for the INVARIANTS-enabled mtx_assert() 7869ed346baSBosko Milekic */ 7871103f3b0SJohn Baldwin #ifdef INVARIANT_SUPPORT 7880cde2e34SJason Evans void 7897f44c618SAttilio Rao __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line) 7900cde2e34SJason Evans { 7917f44c618SAttilio Rao const struct mtx *m; 7925cb0fbe4SJohn Baldwin 7931126349aSPaul Saab if (panicstr != NULL || dumping) 7945cb0fbe4SJohn Baldwin return; 7957f44c618SAttilio Rao 7967f44c618SAttilio Rao m = mtxlock2mtx(c); 7977f44c618SAttilio Rao 798a10f4966SJake Burkholder switch (what) { 7990cde2e34SJason Evans case MA_OWNED: 8000cde2e34SJason Evans case MA_OWNED | MA_RECURSED: 8010cde2e34SJason Evans case MA_OWNED | MA_NOTRECURSED: 802a10f4966SJake Burkholder if (!mtx_owned(m)) 8030cde2e34SJason Evans panic("mutex %s not owned at %s:%d", 804aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line); 805a10f4966SJake Burkholder if (mtx_recursed(m)) { 806a10f4966SJake Burkholder if ((what & MA_NOTRECURSED) != 0) 8070cde2e34SJason Evans panic("mutex %s recursed at %s:%d", 808aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line); 809a10f4966SJake Burkholder } else if ((what & MA_RECURSED) != 0) { 8100cde2e34SJason Evans panic("mutex %s unrecursed at %s:%d", 811aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line); 8120cde2e34SJason Evans } 8130cde2e34SJason Evans break; 8140cde2e34SJason Evans case MA_NOTOWNED: 815a10f4966SJake Burkholder if (mtx_owned(m)) 8160cde2e34SJason Evans panic("mutex %s owned at %s:%d", 817aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line); 8180cde2e34SJason Evans break; 8190cde2e34SJason Evans default: 82056771ca7SJason Evans panic("unknown mtx_assert at %s:%d", file, line); 8210cde2e34SJason Evans } 8220cde2e34SJason Evans } 8230cde2e34SJason Evans #endif 8240cde2e34SJason Evans 8259ed346baSBosko Milekic /* 8269ed346baSBosko Milekic * The MUTEX_DEBUG-enabled mtx_validate() 82719284646SJohn Baldwin * 82819284646SJohn Baldwin * Most of these checks have been moved off into the LO_INITIALIZED flag 82919284646SJohn Baldwin * maintained by the witness code. 8309ed346baSBosko Milekic */ 83136412d79SJohn Baldwin #ifdef MUTEX_DEBUG 83236412d79SJohn Baldwin 8334d77a549SAlfred Perlstein void mtx_validate(struct mtx *); 83436412d79SJohn Baldwin 83519284646SJohn Baldwin void 83619284646SJohn Baldwin mtx_validate(struct mtx *m) 83736412d79SJohn Baldwin { 83836412d79SJohn Baldwin 83936412d79SJohn Baldwin /* 840fa669ab7SPoul-Henning Kamp * XXX: When kernacc() does not require Giant we can reenable this check 841fa669ab7SPoul-Henning Kamp */ 842fa669ab7SPoul-Henning Kamp #ifdef notyet 843fa669ab7SPoul-Henning Kamp /* 84476dcbd6fSBosko Milekic * Can't call kernacc() from early init386(), especially when 84576dcbd6fSBosko Milekic * initializing Giant mutex, because some stuff in kernacc() 84676dcbd6fSBosko Milekic * requires Giant itself. 84776dcbd6fSBosko Milekic */ 848ab07087eSBosko Milekic if (!cold) 849ab07087eSBosko Milekic if (!kernacc((caddr_t)m, sizeof(m), 850ab07087eSBosko Milekic VM_PROT_READ | VM_PROT_WRITE)) 85119284646SJohn Baldwin panic("Can't read and write to mutex %p", m); 85236412d79SJohn Baldwin #endif 85336412d79SJohn Baldwin } 85436412d79SJohn Baldwin #endif 85536412d79SJohn Baldwin 8569ed346baSBosko Milekic /* 857c27b5699SAndrew R. Reiter * General init routine used by the MTX_SYSINIT() macro. 858c27b5699SAndrew R. Reiter */ 859c27b5699SAndrew R. Reiter void 860c27b5699SAndrew R. Reiter mtx_sysinit(void *arg) 861c27b5699SAndrew R. Reiter { 862c27b5699SAndrew R. Reiter struct mtx_args *margs = arg; 863c27b5699SAndrew R. Reiter 8647f44c618SAttilio Rao mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL, 8657f44c618SAttilio Rao margs->ma_opts); 866c27b5699SAndrew R. Reiter } 867c27b5699SAndrew R. Reiter 868c27b5699SAndrew R. Reiter /* 8699ed346baSBosko Milekic * Mutex initialization routine; initialize lock `m' of type contained in 8700c88508aSJohn Baldwin * `opts' with options contained in `opts' and name `name.' The optional 8710c88508aSJohn Baldwin * lock type `type' is used as a general lock category name for use with 8720c88508aSJohn Baldwin * witness. 8739ed346baSBosko Milekic */ 87436412d79SJohn Baldwin void 8757f44c618SAttilio Rao _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts) 87636412d79SJohn Baldwin { 8777f44c618SAttilio Rao struct mtx *m; 87883a81bcbSJohn Baldwin struct lock_class *class; 87983a81bcbSJohn Baldwin int flags; 8809ed346baSBosko Milekic 8817f44c618SAttilio Rao m = mtxlock2mtx(c); 8827f44c618SAttilio Rao 88319284646SJohn Baldwin MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 884*fd07ddcfSDmitry Chagin MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0); 885353998acSAttilio Rao ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, 886353998acSAttilio Rao ("%s: mtx_lock not aligned for %s: %p", __func__, name, 887353998acSAttilio Rao &m->mtx_lock)); 8889ed346baSBosko Milekic 88936412d79SJohn Baldwin #ifdef MUTEX_DEBUG 8909ed346baSBosko Milekic /* Diagnostic and error correction */ 89119284646SJohn Baldwin mtx_validate(m); 8926936206eSJohn Baldwin #endif 89336412d79SJohn Baldwin 89483a81bcbSJohn Baldwin /* Determine lock class and lock flags. */ 89519284646SJohn Baldwin if (opts & MTX_SPIN) 89683a81bcbSJohn Baldwin class = &lock_class_mtx_spin; 89719284646SJohn Baldwin else 89883a81bcbSJohn Baldwin class = &lock_class_mtx_sleep; 89983a81bcbSJohn Baldwin flags = 0; 90019284646SJohn Baldwin if (opts & MTX_QUIET) 90183a81bcbSJohn Baldwin flags |= LO_QUIET; 90219284646SJohn Baldwin if (opts & MTX_RECURSE) 90383a81bcbSJohn Baldwin flags |= LO_RECURSABLE; 90419284646SJohn Baldwin if ((opts & MTX_NOWITNESS) == 0) 90583a81bcbSJohn Baldwin flags |= LO_WITNESS; 906f22a4b62SJeff Roberson if (opts & MTX_DUPOK) 90783a81bcbSJohn Baldwin flags |= LO_DUPOK; 9087c0435b9SKip Macy if (opts & MTX_NOPROFILE) 9097c0435b9SKip Macy flags |= LO_NOPROFILE; 910*fd07ddcfSDmitry Chagin if (opts & MTX_NEW) 911*fd07ddcfSDmitry Chagin flags |= LO_NEW; 91219284646SJohn Baldwin 91383a81bcbSJohn Baldwin /* Initialize mutex. */ 914b5fb43e5SJohn Baldwin lock_init(&m->lock_object, class, name, type, flags); 915b5fb43e5SJohn Baldwin 91619284646SJohn Baldwin m->mtx_lock = MTX_UNOWNED; 91783a81bcbSJohn Baldwin m->mtx_recurse = 0; 91836412d79SJohn Baldwin } 91936412d79SJohn Baldwin 9209ed346baSBosko Milekic /* 92119284646SJohn Baldwin * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 92219284646SJohn Baldwin * passed in as a flag here because if the corresponding mtx_init() was 92319284646SJohn Baldwin * called with MTX_QUIET set, then it will already be set in the mutex's 92419284646SJohn Baldwin * flags. 9259ed346baSBosko Milekic */ 92636412d79SJohn Baldwin void 9277f44c618SAttilio Rao _mtx_destroy(volatile uintptr_t *c) 92836412d79SJohn Baldwin { 9297f44c618SAttilio Rao struct mtx *m; 9307f44c618SAttilio Rao 9317f44c618SAttilio Rao m = mtxlock2mtx(c); 93236412d79SJohn Baldwin 93319284646SJohn Baldwin if (!mtx_owned(m)) 93419284646SJohn Baldwin MPASS(mtx_unowned(m)); 93519284646SJohn Baldwin else { 93608812b39SBosko Milekic MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 9379ed346baSBosko Milekic 938861a2308SScott Long /* Perform the non-mtx related part of mtx_unlock_spin(). */ 939aa89d8cdSJohn Baldwin if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) 940861a2308SScott Long spinlock_exit(); 941764e4d54SJohn Baldwin else 942764e4d54SJohn Baldwin curthread->td_locks--; 943861a2308SScott Long 944d3df4af3SJeff Roberson lock_profile_release_lock(&m->lock_object); 94519284646SJohn Baldwin /* Tell witness this isn't locked to make it happy. */ 946aa89d8cdSJohn Baldwin WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, 947c86b6ff5SJohn Baldwin __LINE__); 94836412d79SJohn Baldwin } 9490384fff8SJason Evans 950186abbd7SJohn Baldwin m->mtx_lock = MTX_DESTROYED; 951aa89d8cdSJohn Baldwin lock_destroy(&m->lock_object); 9520384fff8SJason Evans } 953d23f5958SMatthew Dillon 954d23f5958SMatthew Dillon /* 955c53c013bSJohn Baldwin * Intialize the mutex code and system mutexes. This is called from the MD 956c53c013bSJohn Baldwin * startup code prior to mi_startup(). The per-CPU data space needs to be 957c53c013bSJohn Baldwin * setup before this is called. 958c53c013bSJohn Baldwin */ 959c53c013bSJohn Baldwin void 960c53c013bSJohn Baldwin mutex_init(void) 961c53c013bSJohn Baldwin { 962c53c013bSJohn Baldwin 963961a7b24SJohn Baldwin /* Setup turnstiles so that sleep mutexes work. */ 964961a7b24SJohn Baldwin init_turnstiles(); 965961a7b24SJohn Baldwin 966c53c013bSJohn Baldwin /* 967c53c013bSJohn Baldwin * Initialize mutexes. 968c53c013bSJohn Baldwin */ 9690c88508aSJohn Baldwin mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 9702502c107SJeff Roberson mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN); 9712502c107SJeff Roberson blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */ 9720c88508aSJohn Baldwin mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 9736afb32fcSKonstantin Belousov mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN); 9745c7bebf9SKonstantin Belousov mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN); 9755c7bebf9SKonstantin Belousov mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN); 9765c7bebf9SKonstantin Belousov mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN); 9778c4b6380SJohn Baldwin mtx_init(&devmtx, "cdev", NULL, MTX_DEF); 978c53c013bSJohn Baldwin mtx_lock(&Giant); 979c53c013bSJohn Baldwin } 980d272fe53SJohn Baldwin 981d272fe53SJohn Baldwin #ifdef DDB 982d272fe53SJohn Baldwin void 983d576deedSPawel Jakub Dawidek db_show_mtx(const struct lock_object *lock) 984d272fe53SJohn Baldwin { 985d272fe53SJohn Baldwin struct thread *td; 986d576deedSPawel Jakub Dawidek const struct mtx *m; 987d272fe53SJohn Baldwin 988d576deedSPawel Jakub Dawidek m = (const struct mtx *)lock; 989d272fe53SJohn Baldwin 990d272fe53SJohn Baldwin db_printf(" flags: {"); 99183a81bcbSJohn Baldwin if (LOCK_CLASS(lock) == &lock_class_mtx_spin) 992d272fe53SJohn Baldwin db_printf("SPIN"); 993d272fe53SJohn Baldwin else 994d272fe53SJohn Baldwin db_printf("DEF"); 995aa89d8cdSJohn Baldwin if (m->lock_object.lo_flags & LO_RECURSABLE) 996d272fe53SJohn Baldwin db_printf(", RECURSE"); 997aa89d8cdSJohn Baldwin if (m->lock_object.lo_flags & LO_DUPOK) 998d272fe53SJohn Baldwin db_printf(", DUPOK"); 999d272fe53SJohn Baldwin db_printf("}\n"); 1000d272fe53SJohn Baldwin db_printf(" state: {"); 1001d272fe53SJohn Baldwin if (mtx_unowned(m)) 1002d272fe53SJohn Baldwin db_printf("UNOWNED"); 1003c0bfd703SJohn Baldwin else if (mtx_destroyed(m)) 1004c0bfd703SJohn Baldwin db_printf("DESTROYED"); 1005d272fe53SJohn Baldwin else { 1006d272fe53SJohn Baldwin db_printf("OWNED"); 1007d272fe53SJohn Baldwin if (m->mtx_lock & MTX_CONTESTED) 1008d272fe53SJohn Baldwin db_printf(", CONTESTED"); 1009d272fe53SJohn Baldwin if (m->mtx_lock & MTX_RECURSED) 1010d272fe53SJohn Baldwin db_printf(", RECURSED"); 1011d272fe53SJohn Baldwin } 1012d272fe53SJohn Baldwin db_printf("}\n"); 1013c0bfd703SJohn Baldwin if (!mtx_unowned(m) && !mtx_destroyed(m)) { 1014d272fe53SJohn Baldwin td = mtx_owner(m); 1015d272fe53SJohn Baldwin db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, 1016431f8906SJulian Elischer td->td_tid, td->td_proc->p_pid, td->td_name); 1017d272fe53SJohn Baldwin if (mtx_recursed(m)) 1018d272fe53SJohn Baldwin db_printf(" recursed: %d\n", m->mtx_recurse); 1019d272fe53SJohn Baldwin } 1020d272fe53SJohn Baldwin } 1021d272fe53SJohn Baldwin #endif 1022