10384fff8SJason Evans /*- 20384fff8SJason Evans * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 30384fff8SJason Evans * 40384fff8SJason Evans * Redistribution and use in source and binary forms, with or without 50384fff8SJason Evans * modification, are permitted provided that the following conditions 60384fff8SJason Evans * are met: 70384fff8SJason Evans * 1. Redistributions of source code must retain the above copyright 80384fff8SJason Evans * notice, this list of conditions and the following disclaimer. 90384fff8SJason Evans * 2. Redistributions in binary form must reproduce the above copyright 100384fff8SJason Evans * notice, this list of conditions and the following disclaimer in the 110384fff8SJason Evans * documentation and/or other materials provided with the distribution. 120384fff8SJason Evans * 3. Berkeley Software Design Inc's name may not be used to endorse or 130384fff8SJason Evans * promote products derived from this software without specific prior 140384fff8SJason Evans * written permission. 150384fff8SJason Evans * 160384fff8SJason Evans * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 170384fff8SJason Evans * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 180384fff8SJason Evans * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 190384fff8SJason Evans * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 200384fff8SJason Evans * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 210384fff8SJason Evans * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 220384fff8SJason Evans * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 230384fff8SJason Evans * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 240384fff8SJason Evans * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 250384fff8SJason Evans * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 260384fff8SJason Evans * SUCH DAMAGE. 270384fff8SJason Evans * 280384fff8SJason Evans * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 2936412d79SJohn Baldwin * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 300384fff8SJason Evans */ 310384fff8SJason Evans 320384fff8SJason Evans /* 33ba48b69aSJohn Baldwin * Machine independent bits of mutex implementation. 340384fff8SJason Evans */ 350384fff8SJason Evans 36677b542eSDavid E. O'Brien #include <sys/cdefs.h> 37677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 38677b542eSDavid E. O'Brien 392498cf8cSJohn Baldwin #include "opt_adaptive_mutexes.h" 409c36c934SJohn Baldwin #include "opt_ddb.h" 417c0435b9SKip Macy #include "opt_global.h" 42f5f9340bSFabien Thomas #include "opt_hwpmc_hooks.h" 43a5aedd68SStacey Son #include "opt_kdtrace.h" 449923b511SScott Long #include "opt_sched.h" 45a5a96a19SJohn Baldwin 460384fff8SJason Evans #include <sys/param.h> 476c35e809SDag-Erling Smørgrav #include <sys/systm.h> 4836412d79SJohn Baldwin #include <sys/bus.h> 491126349aSPaul Saab #include <sys/conf.h> 502d50560aSMarcel Moolenaar #include <sys/kdb.h> 5136412d79SJohn Baldwin #include <sys/kernel.h> 526c35e809SDag-Erling Smørgrav #include <sys/ktr.h> 5319284646SJohn Baldwin #include <sys/lock.h> 54fb919e4dSMark Murray #include <sys/malloc.h> 5519284646SJohn Baldwin #include <sys/mutex.h> 560384fff8SJason Evans #include <sys/proc.h> 57c4f7a187SJohn Baldwin #include <sys/resourcevar.h> 58b43179fbSJeff Roberson #include <sys/sched.h> 596c35e809SDag-Erling Smørgrav #include <sys/sbuf.h> 60a5a96a19SJohn Baldwin #include <sys/sysctl.h> 61961a7b24SJohn Baldwin #include <sys/turnstile.h> 6236412d79SJohn Baldwin #include <sys/vmmeter.h> 637c0435b9SKip Macy #include <sys/lock_profile.h> 640384fff8SJason Evans 6536412d79SJohn Baldwin #include <machine/atomic.h> 6636412d79SJohn Baldwin #include <machine/bus.h> 670384fff8SJason Evans #include <machine/cpu.h> 6836412d79SJohn Baldwin 699c36c934SJohn Baldwin #include <ddb/ddb.h> 709c36c934SJohn Baldwin 718c4b6380SJohn Baldwin #include <fs/devfs/devfs_int.h> 728c4b6380SJohn Baldwin 7336412d79SJohn Baldwin #include <vm/vm.h> 7436412d79SJohn Baldwin #include <vm/vm_extern.h> 7536412d79SJohn Baldwin 76cd6e6e4eSJohn Baldwin #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 77cd6e6e4eSJohn Baldwin #define ADAPTIVE_MUTEXES 78cd6e6e4eSJohn Baldwin #endif 79cd6e6e4eSJohn Baldwin 80f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 81f5f9340bSFabien Thomas #include <sys/pmckern.h> 82f5f9340bSFabien Thomas PMC_SOFT_DEFINE( , , lock, failed); 83f5f9340bSFabien Thomas #endif 84f5f9340bSFabien Thomas 85b9a80acaSStephan Uphoff /* 867f44c618SAttilio Rao * Return the mutex address when the lock cookie address is provided. 877f44c618SAttilio Rao * This functionality assumes that struct mtx* have a member named mtx_lock. 887f44c618SAttilio Rao */ 897f44c618SAttilio Rao #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock)) 907f44c618SAttilio Rao 917f44c618SAttilio Rao /* 929ed346baSBosko Milekic * Internal utility macros. 930cde2e34SJason Evans */ 949ed346baSBosko Milekic #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 950cde2e34SJason Evans 96c0bfd703SJohn Baldwin #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED) 97c0bfd703SJohn Baldwin 9849b94bfcSJohn Baldwin #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK)) 999ed346baSBosko Milekic 100d576deedSPawel Jakub Dawidek static void assert_mtx(const struct lock_object *lock, int what); 101d272fe53SJohn Baldwin #ifdef DDB 102d576deedSPawel Jakub Dawidek static void db_show_mtx(const struct lock_object *lock); 103d272fe53SJohn Baldwin #endif 1046e21afd4SJohn Baldwin static void lock_mtx(struct lock_object *lock, int how); 1056e21afd4SJohn Baldwin static void lock_spin(struct lock_object *lock, int how); 106a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 107d576deedSPawel Jakub Dawidek static int owner_mtx(const struct lock_object *lock, 108d576deedSPawel Jakub Dawidek struct thread **owner); 109a5aedd68SStacey Son #endif 1106e21afd4SJohn Baldwin static int unlock_mtx(struct lock_object *lock); 1116e21afd4SJohn Baldwin static int unlock_spin(struct lock_object *lock); 112d272fe53SJohn Baldwin 1130cde2e34SJason Evans /* 11419284646SJohn Baldwin * Lock classes for sleep and spin mutexes. 1150cde2e34SJason Evans */ 11619284646SJohn Baldwin struct lock_class lock_class_mtx_sleep = { 117ae8dde30SJohn Baldwin .lc_name = "sleep mutex", 118ae8dde30SJohn Baldwin .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, 119f9721b43SAttilio Rao .lc_assert = assert_mtx, 120d272fe53SJohn Baldwin #ifdef DDB 121ae8dde30SJohn Baldwin .lc_ddb_show = db_show_mtx, 122d272fe53SJohn Baldwin #endif 1236e21afd4SJohn Baldwin .lc_lock = lock_mtx, 1246e21afd4SJohn Baldwin .lc_unlock = unlock_mtx, 125a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 126a5aedd68SStacey Son .lc_owner = owner_mtx, 127a5aedd68SStacey Son #endif 12819284646SJohn Baldwin }; 12919284646SJohn Baldwin struct lock_class lock_class_mtx_spin = { 130ae8dde30SJohn Baldwin .lc_name = "spin mutex", 131ae8dde30SJohn Baldwin .lc_flags = LC_SPINLOCK | LC_RECURSABLE, 132f9721b43SAttilio Rao .lc_assert = assert_mtx, 133d272fe53SJohn Baldwin #ifdef DDB 134ae8dde30SJohn Baldwin .lc_ddb_show = db_show_mtx, 135d272fe53SJohn Baldwin #endif 1366e21afd4SJohn Baldwin .lc_lock = lock_spin, 1376e21afd4SJohn Baldwin .lc_unlock = unlock_spin, 138a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 139a5aedd68SStacey Son .lc_owner = owner_mtx, 140a5aedd68SStacey Son #endif 1418484de75SJohn Baldwin }; 1428484de75SJohn Baldwin 1439ed346baSBosko Milekic /* 144c53c013bSJohn Baldwin * System-wide mutexes 145c53c013bSJohn Baldwin */ 1462502c107SJeff Roberson struct mtx blocked_lock; 147c53c013bSJohn Baldwin struct mtx Giant; 148c53c013bSJohn Baldwin 14967784314SPoul-Henning Kamp void 150d576deedSPawel Jakub Dawidek assert_mtx(const struct lock_object *lock, int what) 151f9721b43SAttilio Rao { 152f9721b43SAttilio Rao 153d576deedSPawel Jakub Dawidek mtx_assert((const struct mtx *)lock, what); 154f9721b43SAttilio Rao } 155f9721b43SAttilio Rao 15667784314SPoul-Henning Kamp void 1576e21afd4SJohn Baldwin lock_mtx(struct lock_object *lock, int how) 1586e21afd4SJohn Baldwin { 1596e21afd4SJohn Baldwin 1606e21afd4SJohn Baldwin mtx_lock((struct mtx *)lock); 1616e21afd4SJohn Baldwin } 1626e21afd4SJohn Baldwin 16367784314SPoul-Henning Kamp void 1646e21afd4SJohn Baldwin lock_spin(struct lock_object *lock, int how) 1656e21afd4SJohn Baldwin { 1666e21afd4SJohn Baldwin 1676e21afd4SJohn Baldwin panic("spin locks can only use msleep_spin"); 1686e21afd4SJohn Baldwin } 1696e21afd4SJohn Baldwin 17067784314SPoul-Henning Kamp int 1716e21afd4SJohn Baldwin unlock_mtx(struct lock_object *lock) 1726e21afd4SJohn Baldwin { 1736e21afd4SJohn Baldwin struct mtx *m; 1746e21afd4SJohn Baldwin 1756e21afd4SJohn Baldwin m = (struct mtx *)lock; 1766e21afd4SJohn Baldwin mtx_assert(m, MA_OWNED | MA_NOTRECURSED); 1776e21afd4SJohn Baldwin mtx_unlock(m); 1786e21afd4SJohn Baldwin return (0); 1796e21afd4SJohn Baldwin } 1806e21afd4SJohn Baldwin 18167784314SPoul-Henning Kamp int 1826e21afd4SJohn Baldwin unlock_spin(struct lock_object *lock) 1836e21afd4SJohn Baldwin { 1846e21afd4SJohn Baldwin 1856e21afd4SJohn Baldwin panic("spin locks can only use msleep_spin"); 1866e21afd4SJohn Baldwin } 1876e21afd4SJohn Baldwin 188a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 189a5aedd68SStacey Son int 190d576deedSPawel Jakub Dawidek owner_mtx(const struct lock_object *lock, struct thread **owner) 191a5aedd68SStacey Son { 192d576deedSPawel Jakub Dawidek const struct mtx *m = (const struct mtx *)lock; 193a5aedd68SStacey Son 194a5aedd68SStacey Son *owner = mtx_owner(m); 195a5aedd68SStacey Son return (mtx_unowned(m) == 0); 196a5aedd68SStacey Son } 197a5aedd68SStacey Son #endif 198a5aedd68SStacey Son 1990cde2e34SJason Evans /* 2006283b7d0SJohn Baldwin * Function versions of the inlined __mtx_* macros. These are used by 2016283b7d0SJohn Baldwin * modules and can also be called from assembly language if needed. 2026283b7d0SJohn Baldwin */ 2036283b7d0SJohn Baldwin void 2047f44c618SAttilio Rao __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line) 2056283b7d0SJohn Baldwin { 2067f44c618SAttilio Rao struct mtx *m; 2076283b7d0SJohn Baldwin 20835370593SAndriy Gapon if (SCHEDULER_STOPPED()) 20935370593SAndriy Gapon return; 2107f44c618SAttilio Rao 2117f44c618SAttilio Rao m = mtxlock2mtx(c); 2127f44c618SAttilio Rao 213cd2fe4e6SAttilio Rao KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 214e3ae0dfeSAttilio Rao ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d", 215e3ae0dfeSAttilio Rao curthread, m->lock_object.lo_name, file, line)); 216186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 217186abbd7SJohn Baldwin ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); 218aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 219aa89d8cdSJohn Baldwin ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 2200d975d63SJohn Baldwin file, line)); 221aa89d8cdSJohn Baldwin WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 22241313430SJohn Baldwin file, line, NULL); 2237c0435b9SKip Macy 224961135eaSJohn Baldwin __mtx_lock(m, curthread, opts, file, line); 225aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 226dde96c99SJohn Baldwin line); 227aa89d8cdSJohn Baldwin WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 228764e4d54SJohn Baldwin curthread->td_locks++; 2296283b7d0SJohn Baldwin } 2306283b7d0SJohn Baldwin 2316283b7d0SJohn Baldwin void 2327f44c618SAttilio Rao __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line) 2336283b7d0SJohn Baldwin { 2347f44c618SAttilio Rao struct mtx *m; 23535370593SAndriy Gapon 23635370593SAndriy Gapon if (SCHEDULER_STOPPED()) 23735370593SAndriy Gapon return; 2387f44c618SAttilio Rao 2397f44c618SAttilio Rao m = mtxlock2mtx(c); 2407f44c618SAttilio Rao 241186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 242186abbd7SJohn Baldwin ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); 243aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 244aa89d8cdSJohn Baldwin ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 2450d975d63SJohn Baldwin file, line)); 246aa89d8cdSJohn Baldwin WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 247aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 2480d975d63SJohn Baldwin line); 24921377ce0SJohn Baldwin mtx_assert(m, MA_OWNED); 250c66d7606SKip Macy 25170fe8436SKip Macy if (m->mtx_recurse == 0) 252a5aedd68SStacey Son LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m); 253961135eaSJohn Baldwin __mtx_unlock(m, curthread, opts, file, line); 254*b5fb43e5SJohn Baldwin curthread->td_locks--; 2556283b7d0SJohn Baldwin } 2566283b7d0SJohn Baldwin 2576283b7d0SJohn Baldwin void 2587f44c618SAttilio Rao __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 2597f44c618SAttilio Rao int line) 2606283b7d0SJohn Baldwin { 2617f44c618SAttilio Rao struct mtx *m; 2626283b7d0SJohn Baldwin 26335370593SAndriy Gapon if (SCHEDULER_STOPPED()) 26435370593SAndriy Gapon return; 2657f44c618SAttilio Rao 2667f44c618SAttilio Rao m = mtxlock2mtx(c); 2677f44c618SAttilio Rao 268186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 269186abbd7SJohn Baldwin ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); 270aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 2710d975d63SJohn Baldwin ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 272aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line)); 273ad69e26bSJohn Baldwin if (mtx_owned(m)) 274ad69e26bSJohn Baldwin KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, 275ad69e26bSJohn Baldwin ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n", 276ad69e26bSJohn Baldwin m->lock_object.lo_name, file, line)); 277aa89d8cdSJohn Baldwin WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 27841313430SJohn Baldwin file, line, NULL); 279961135eaSJohn Baldwin __mtx_lock_spin(m, curthread, opts, file, line); 280aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 281dde96c99SJohn Baldwin line); 282aa89d8cdSJohn Baldwin WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 2836283b7d0SJohn Baldwin } 2846283b7d0SJohn Baldwin 2856283b7d0SJohn Baldwin void 2867f44c618SAttilio Rao __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 2877f44c618SAttilio Rao int line) 2886283b7d0SJohn Baldwin { 2897f44c618SAttilio Rao struct mtx *m; 290c66d7606SKip Macy 29135370593SAndriy Gapon if (SCHEDULER_STOPPED()) 29235370593SAndriy Gapon return; 2937f44c618SAttilio Rao 2947f44c618SAttilio Rao m = mtxlock2mtx(c); 2957f44c618SAttilio Rao 296186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 297186abbd7SJohn Baldwin ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); 298aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 2990d975d63SJohn Baldwin ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 300aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line)); 301aa89d8cdSJohn Baldwin WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 302aa89d8cdSJohn Baldwin LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 303dde96c99SJohn Baldwin line); 3040d975d63SJohn Baldwin mtx_assert(m, MA_OWNED); 305c66d7606SKip Macy 306961135eaSJohn Baldwin __mtx_unlock_spin(m); 3076283b7d0SJohn Baldwin } 3086283b7d0SJohn Baldwin 3096283b7d0SJohn Baldwin /* 3109ed346baSBosko Milekic * The important part of mtx_trylock{,_flags}() 311eac09796SJohn Baldwin * Tries to acquire lock `m.' If this function is called on a mutex that 312eac09796SJohn Baldwin * is already owned, it will recursively acquire the lock. 3130cde2e34SJason Evans */ 3140cde2e34SJason Evans int 3157f44c618SAttilio Rao _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) 3160cde2e34SJason Evans { 3177f44c618SAttilio Rao struct mtx *m; 3181723a064SJeff Roberson #ifdef LOCK_PROFILING 3197c0435b9SKip Macy uint64_t waittime = 0; 3201723a064SJeff Roberson int contested = 0; 3211723a064SJeff Roberson #endif 3221723a064SJeff Roberson int rval; 3230cde2e34SJason Evans 32435370593SAndriy Gapon if (SCHEDULER_STOPPED()) 32535370593SAndriy Gapon return (1); 32635370593SAndriy Gapon 3277f44c618SAttilio Rao m = mtxlock2mtx(c); 3287f44c618SAttilio Rao 329cd2fe4e6SAttilio Rao KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 330e3ae0dfeSAttilio Rao ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d", 331e3ae0dfeSAttilio Rao curthread, m->lock_object.lo_name, file, line)); 332186abbd7SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 333186abbd7SJohn Baldwin ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); 334aa89d8cdSJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 335aa89d8cdSJohn Baldwin ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 33683cece6fSJohn Baldwin file, line)); 3379ed346baSBosko Milekic 338aa89d8cdSJohn Baldwin if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) { 339eac09796SJohn Baldwin m->mtx_recurse++; 340eac09796SJohn Baldwin atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 341eac09796SJohn Baldwin rval = 1; 342eac09796SJohn Baldwin } else 343961135eaSJohn Baldwin rval = _mtx_obtain_lock(m, (uintptr_t)curthread); 3449ed346baSBosko Milekic 345aa89d8cdSJohn Baldwin LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); 346764e4d54SJohn Baldwin if (rval) { 347aa89d8cdSJohn Baldwin WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 3482d96f0b1SJohn Baldwin file, line); 349764e4d54SJohn Baldwin curthread->td_locks++; 350fe68a916SKip Macy if (m->mtx_recurse == 0) 351a5aedd68SStacey Son LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, 352a5aedd68SStacey Son m, contested, waittime, file, line); 3537c0435b9SKip Macy 354764e4d54SJohn Baldwin } 3559ed346baSBosko Milekic 35619284646SJohn Baldwin return (rval); 3570cde2e34SJason Evans } 3580cde2e34SJason Evans 3590cde2e34SJason Evans /* 3607f44c618SAttilio Rao * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 3619ed346baSBosko Milekic * 3629ed346baSBosko Milekic * We call this if the lock is either contested (i.e. we need to go to 3639ed346baSBosko Milekic * sleep waiting for it), or if we need to recurse on it. 3640cde2e34SJason Evans */ 3650cde2e34SJason Evans void 3667f44c618SAttilio Rao __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, 3677f44c618SAttilio Rao const char *file, int line) 36836412d79SJohn Baldwin { 3697f44c618SAttilio Rao struct mtx *m; 3702502c107SJeff Roberson struct turnstile *ts; 3711723a064SJeff Roberson uintptr_t v; 372cd6e6e4eSJohn Baldwin #ifdef ADAPTIVE_MUTEXES 37376447e56SJohn Baldwin volatile struct thread *owner; 3742498cf8cSJohn Baldwin #endif 37502bd1bcdSIan Dowse #ifdef KTR 37602bd1bcdSIan Dowse int cont_logged = 0; 37702bd1bcdSIan Dowse #endif 3781723a064SJeff Roberson #ifdef LOCK_PROFILING 37970fe8436SKip Macy int contested = 0; 38070fe8436SKip Macy uint64_t waittime = 0; 3811723a064SJeff Roberson #endif 382a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 383a5aedd68SStacey Son uint64_t spin_cnt = 0; 384a5aedd68SStacey Son uint64_t sleep_cnt = 0; 385a5aedd68SStacey Son int64_t sleep_time = 0; 386a5aedd68SStacey Son #endif 38736412d79SJohn Baldwin 38835370593SAndriy Gapon if (SCHEDULER_STOPPED()) 38935370593SAndriy Gapon return; 39035370593SAndriy Gapon 3917f44c618SAttilio Rao m = mtxlock2mtx(c); 3927f44c618SAttilio Rao 3935fa8dd90SJohn Baldwin if (mtx_owned(m)) { 394aa89d8cdSJohn Baldwin KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, 395eac09796SJohn Baldwin ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 396aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line)); 39736412d79SJohn Baldwin m->mtx_recurse++; 39808812b39SBosko Milekic atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 399aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 4005746a1d8SBosko Milekic CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 40136412d79SJohn Baldwin return; 40236412d79SJohn Baldwin } 4039ed346baSBosko Milekic 404f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 405f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 406f5f9340bSFabien Thomas #endif 40770fe8436SKip Macy lock_profile_obtain_lock_failed(&m->lock_object, 40870fe8436SKip Macy &contested, &waittime); 409aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 41015ec816aSJohn Baldwin CTR4(KTR_LOCK, 41115ec816aSJohn Baldwin "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 412aa89d8cdSJohn Baldwin m->lock_object.lo_name, (void *)m->mtx_lock, file, line); 4131bd0eefbSJohn Baldwin 414961135eaSJohn Baldwin while (!_mtx_obtain_lock(m, tid)) { 415a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 416a5aedd68SStacey Son spin_cnt++; 417a5aedd68SStacey Son #endif 41849aead8aSAttilio Rao #ifdef ADAPTIVE_MUTEXES 41949aead8aSAttilio Rao /* 42049aead8aSAttilio Rao * If the owner is running on another CPU, spin until the 42149aead8aSAttilio Rao * owner stops running or the state of the lock changes. 42249aead8aSAttilio Rao */ 42349aead8aSAttilio Rao v = m->mtx_lock; 42449aead8aSAttilio Rao if (v != MTX_UNOWNED) { 42549aead8aSAttilio Rao owner = (struct thread *)(v & ~MTX_FLAGMASK); 42649aead8aSAttilio Rao if (TD_IS_RUNNING(owner)) { 42749aead8aSAttilio Rao if (LOCK_LOG_TEST(&m->lock_object, 0)) 42849aead8aSAttilio Rao CTR3(KTR_LOCK, 42949aead8aSAttilio Rao "%s: spinning on %p held by %p", 43049aead8aSAttilio Rao __func__, m, owner); 43149aead8aSAttilio Rao while (mtx_owner(m) == owner && 432a5aedd68SStacey Son TD_IS_RUNNING(owner)) { 43349aead8aSAttilio Rao cpu_spinwait(); 434a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 435a5aedd68SStacey Son spin_cnt++; 436a5aedd68SStacey Son #endif 437a5aedd68SStacey Son } 43849aead8aSAttilio Rao continue; 43949aead8aSAttilio Rao } 44049aead8aSAttilio Rao } 44149aead8aSAttilio Rao #endif 44249aead8aSAttilio Rao 4432502c107SJeff Roberson ts = turnstile_trywait(&m->lock_object); 4445fa8dd90SJohn Baldwin v = m->mtx_lock; 4455fa8dd90SJohn Baldwin 44636412d79SJohn Baldwin /* 4479ed346baSBosko Milekic * Check if the lock has been released while spinning for 448961a7b24SJohn Baldwin * the turnstile chain lock. 44936412d79SJohn Baldwin */ 4505fa8dd90SJohn Baldwin if (v == MTX_UNOWNED) { 4512502c107SJeff Roberson turnstile_cancel(ts); 45236412d79SJohn Baldwin continue; 45336412d79SJohn Baldwin } 4549ed346baSBosko Milekic 45549aead8aSAttilio Rao #ifdef ADAPTIVE_MUTEXES 45649aead8aSAttilio Rao /* 457fa29f023SJohn Baldwin * The current lock owner might have started executing 458fa29f023SJohn Baldwin * on another CPU (or the lock could have changed 459fa29f023SJohn Baldwin * owners) while we were waiting on the turnstile 460fa29f023SJohn Baldwin * chain lock. If so, drop the turnstile lock and try 461fa29f023SJohn Baldwin * again. 46249aead8aSAttilio Rao */ 46349aead8aSAttilio Rao owner = (struct thread *)(v & ~MTX_FLAGMASK); 46449aead8aSAttilio Rao if (TD_IS_RUNNING(owner)) { 46549aead8aSAttilio Rao turnstile_cancel(ts); 46649aead8aSAttilio Rao continue; 46749aead8aSAttilio Rao } 46849aead8aSAttilio Rao #endif 46949aead8aSAttilio Rao 47036412d79SJohn Baldwin /* 4719ed346baSBosko Milekic * If the mutex isn't already contested and a failure occurs 4729ed346baSBosko Milekic * setting the contested bit, the mutex was either released 4739ed346baSBosko Milekic * or the state of the MTX_RECURSED bit changed. 47436412d79SJohn Baldwin */ 47536412d79SJohn Baldwin if ((v & MTX_CONTESTED) == 0 && 476122eceefSJohn Baldwin !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { 4772502c107SJeff Roberson turnstile_cancel(ts); 47836412d79SJohn Baldwin continue; 47936412d79SJohn Baldwin } 48036412d79SJohn Baldwin 4819ed346baSBosko Milekic /* 4827feefcd6SJohn Baldwin * We definitely must sleep for this lock. 4839ed346baSBosko Milekic */ 48436412d79SJohn Baldwin mtx_assert(m, MA_NOTOWNED); 48536412d79SJohn Baldwin 48602bd1bcdSIan Dowse #ifdef KTR 48702bd1bcdSIan Dowse if (!cont_logged) { 48802bd1bcdSIan Dowse CTR6(KTR_CONTENTION, 48902bd1bcdSIan Dowse "contention: %p at %s:%d wants %s, taken by %s:%d", 490aa89d8cdSJohn Baldwin (void *)tid, file, line, m->lock_object.lo_name, 491aa89d8cdSJohn Baldwin WITNESS_FILE(&m->lock_object), 492aa89d8cdSJohn Baldwin WITNESS_LINE(&m->lock_object)); 49302bd1bcdSIan Dowse cont_logged = 1; 49402bd1bcdSIan Dowse } 49502bd1bcdSIan Dowse #endif 49636412d79SJohn Baldwin 4979ed346baSBosko Milekic /* 498961a7b24SJohn Baldwin * Block on the turnstile. 4999ed346baSBosko Milekic */ 500a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 501a5aedd68SStacey Son sleep_time -= lockstat_nsecs(); 502a5aedd68SStacey Son #endif 5032502c107SJeff Roberson turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE); 504a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 505a5aedd68SStacey Son sleep_time += lockstat_nsecs(); 506a5aedd68SStacey Son sleep_cnt++; 507a5aedd68SStacey Son #endif 50836412d79SJohn Baldwin } 50902bd1bcdSIan Dowse #ifdef KTR 51002bd1bcdSIan Dowse if (cont_logged) { 51102bd1bcdSIan Dowse CTR4(KTR_CONTENTION, 51202bd1bcdSIan Dowse "contention end: %s acquired by %p at %s:%d", 513aa89d8cdSJohn Baldwin m->lock_object.lo_name, (void *)tid, file, line); 51402bd1bcdSIan Dowse } 51502bd1bcdSIan Dowse #endif 516a5aedd68SStacey Son LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested, 517eea4f254SJeff Roberson waittime, file, line); 518a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 519a5aedd68SStacey Son if (sleep_time) 520a5aedd68SStacey Son LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time); 521a5aedd68SStacey Son 522a5aedd68SStacey Son /* 523a5aedd68SStacey Son * Only record the loops spinning and not sleeping. 524a5aedd68SStacey Son */ 525a5aedd68SStacey Son if (spin_cnt > sleep_cnt) 526a5aedd68SStacey Son LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (spin_cnt - sleep_cnt)); 527a5aedd68SStacey Son #endif 5289ed346baSBosko Milekic } 5299ed346baSBosko Milekic 5302502c107SJeff Roberson static void 5312502c107SJeff Roberson _mtx_lock_spin_failed(struct mtx *m) 5322502c107SJeff Roberson { 5332502c107SJeff Roberson struct thread *td; 5342502c107SJeff Roberson 5352502c107SJeff Roberson td = mtx_owner(m); 5362502c107SJeff Roberson 5372502c107SJeff Roberson /* If the mutex is unlocked, try again. */ 5382502c107SJeff Roberson if (td == NULL) 5392502c107SJeff Roberson return; 540b95b98b0SKonstantin Belousov 5412502c107SJeff Roberson printf( "spin lock %p (%s) held by %p (tid %d) too long\n", 5422502c107SJeff Roberson m, m->lock_object.lo_name, td, td->td_tid); 5432502c107SJeff Roberson #ifdef WITNESS 54498332c8cSAttilio Rao witness_display_spinlock(&m->lock_object, td, printf); 5452502c107SJeff Roberson #endif 5462502c107SJeff Roberson panic("spin lock held too long"); 5472502c107SJeff Roberson } 5482502c107SJeff Roberson 549b95b98b0SKonstantin Belousov #ifdef SMP 5509ed346baSBosko Milekic /* 5517f44c618SAttilio Rao * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock. 5529ed346baSBosko Milekic * 5539ed346baSBosko Milekic * This is only called if we need to actually spin for the lock. Recursion 5549ed346baSBosko Milekic * is handled inline. 5559ed346baSBosko Milekic */ 5569ed346baSBosko Milekic void 5577f44c618SAttilio Rao _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, 5587f44c618SAttilio Rao const char *file, int line) 55936412d79SJohn Baldwin { 5607f44c618SAttilio Rao struct mtx *m; 5611723a064SJeff Roberson int i = 0; 5621723a064SJeff Roberson #ifdef LOCK_PROFILING 5631723a064SJeff Roberson int contested = 0; 56470fe8436SKip Macy uint64_t waittime = 0; 5651723a064SJeff Roberson #endif 56636412d79SJohn Baldwin 56735370593SAndriy Gapon if (SCHEDULER_STOPPED()) 56835370593SAndriy Gapon return; 56935370593SAndriy Gapon 5707f44c618SAttilio Rao m = mtxlock2mtx(c); 5717f44c618SAttilio Rao 572aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 5735746a1d8SBosko Milekic CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 5749ed346baSBosko Milekic 575f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 576f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 577f5f9340bSFabien Thomas #endif 57870fe8436SKip Macy lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); 579961135eaSJohn Baldwin while (!_mtx_obtain_lock(m, tid)) { 5809ed346baSBosko Milekic 5817141f2adSJohn Baldwin /* Give interrupts a chance while we spin. */ 582c6a37e84SJohn Baldwin spinlock_exit(); 58336412d79SJohn Baldwin while (m->mtx_lock != MTX_UNOWNED) { 584703fc290SJohn Baldwin if (i++ < 10000000) { 5859f1b87f1SMaxime Henrion cpu_spinwait(); 58636412d79SJohn Baldwin continue; 587703fc290SJohn Baldwin } 5880fa2168bSJohn Baldwin if (i < 60000000 || kdb_active || panicstr != NULL) 58936412d79SJohn Baldwin DELAY(1); 5902502c107SJeff Roberson else 5912502c107SJeff Roberson _mtx_lock_spin_failed(m); 5929f1b87f1SMaxime Henrion cpu_spinwait(); 59336412d79SJohn Baldwin } 594c6a37e84SJohn Baldwin spinlock_enter(); 59536412d79SJohn Baldwin } 59636412d79SJohn Baldwin 597aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 5989ed346baSBosko Milekic CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 5999ed346baSBosko Milekic 600a5aedd68SStacey Son LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m, 601a5aedd68SStacey Son contested, waittime, (file), (line)); 602a5aedd68SStacey Son LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, i); 60336412d79SJohn Baldwin } 60433fb8a38SJohn Baldwin #endif /* SMP */ 60536412d79SJohn Baldwin 6062502c107SJeff Roberson void 607ccdf2333SAttilio Rao thread_lock_flags_(struct thread *td, int opts, const char *file, int line) 6082502c107SJeff Roberson { 6092502c107SJeff Roberson struct mtx *m; 6102502c107SJeff Roberson uintptr_t tid; 6111723a064SJeff Roberson int i; 6121723a064SJeff Roberson #ifdef LOCK_PROFILING 6131723a064SJeff Roberson int contested = 0; 6141723a064SJeff Roberson uint64_t waittime = 0; 6151723a064SJeff Roberson #endif 616a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 617a5aedd68SStacey Son uint64_t spin_cnt = 0; 618a5aedd68SStacey Son #endif 6192502c107SJeff Roberson 6201723a064SJeff Roberson i = 0; 6212502c107SJeff Roberson tid = (uintptr_t)curthread; 62235370593SAndriy Gapon 62335370593SAndriy Gapon if (SCHEDULER_STOPPED()) 62435370593SAndriy Gapon return; 62535370593SAndriy Gapon 6262502c107SJeff Roberson for (;;) { 6272502c107SJeff Roberson retry: 6282502c107SJeff Roberson spinlock_enter(); 629710eacdcSJeff Roberson m = td->td_lock; 63013c85a48SJohn Baldwin KASSERT(m->mtx_lock != MTX_DESTROYED, 63113c85a48SJohn Baldwin ("thread_lock() of destroyed mutex @ %s:%d", file, line)); 63213c85a48SJohn Baldwin KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 63313c85a48SJohn Baldwin ("thread_lock() of sleep mutex %s @ %s:%d", 63413c85a48SJohn Baldwin m->lock_object.lo_name, file, line)); 635ad69e26bSJohn Baldwin if (mtx_owned(m)) 636ad69e26bSJohn Baldwin KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, 637ad69e26bSJohn Baldwin ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n", 638ad69e26bSJohn Baldwin m->lock_object.lo_name, file, line)); 6392502c107SJeff Roberson WITNESS_CHECKORDER(&m->lock_object, 64041313430SJohn Baldwin opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 641961135eaSJohn Baldwin while (!_mtx_obtain_lock(m, tid)) { 642a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 643a5aedd68SStacey Son spin_cnt++; 644a5aedd68SStacey Son #endif 6452502c107SJeff Roberson if (m->mtx_lock == tid) { 6462502c107SJeff Roberson m->mtx_recurse++; 6472502c107SJeff Roberson break; 6482502c107SJeff Roberson } 649f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 650f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 651f5f9340bSFabien Thomas #endif 652eea4f254SJeff Roberson lock_profile_obtain_lock_failed(&m->lock_object, 653eea4f254SJeff Roberson &contested, &waittime); 6542502c107SJeff Roberson /* Give interrupts a chance while we spin. */ 6552502c107SJeff Roberson spinlock_exit(); 6562502c107SJeff Roberson while (m->mtx_lock != MTX_UNOWNED) { 6572502c107SJeff Roberson if (i++ < 10000000) 6582502c107SJeff Roberson cpu_spinwait(); 6592502c107SJeff Roberson else if (i < 60000000 || 6602502c107SJeff Roberson kdb_active || panicstr != NULL) 6612502c107SJeff Roberson DELAY(1); 6622502c107SJeff Roberson else 6632502c107SJeff Roberson _mtx_lock_spin_failed(m); 6642502c107SJeff Roberson cpu_spinwait(); 6652502c107SJeff Roberson if (m != td->td_lock) 6662502c107SJeff Roberson goto retry; 6672502c107SJeff Roberson } 6682502c107SJeff Roberson spinlock_enter(); 6692502c107SJeff Roberson } 6702502c107SJeff Roberson if (m == td->td_lock) 6712502c107SJeff Roberson break; 672961135eaSJohn Baldwin __mtx_unlock_spin(m); /* does spinlock_exit() */ 673a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 674a5aedd68SStacey Son spin_cnt++; 675a5aedd68SStacey Son #endif 6762502c107SJeff Roberson } 677eea4f254SJeff Roberson if (m->mtx_recurse == 0) 678a5aedd68SStacey Son LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, 679a5aedd68SStacey Son m, contested, waittime, (file), (line)); 68013c85a48SJohn Baldwin LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 68113c85a48SJohn Baldwin line); 6822502c107SJeff Roberson WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 683a5aedd68SStacey Son LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_cnt); 6842502c107SJeff Roberson } 6852502c107SJeff Roberson 6862502c107SJeff Roberson struct mtx * 6872502c107SJeff Roberson thread_lock_block(struct thread *td) 6882502c107SJeff Roberson { 6892502c107SJeff Roberson struct mtx *lock; 6902502c107SJeff Roberson 6912502c107SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 692710eacdcSJeff Roberson lock = td->td_lock; 6932502c107SJeff Roberson td->td_lock = &blocked_lock; 6942502c107SJeff Roberson mtx_unlock_spin(lock); 6952502c107SJeff Roberson 6962502c107SJeff Roberson return (lock); 6972502c107SJeff Roberson } 6982502c107SJeff Roberson 6992502c107SJeff Roberson void 7002502c107SJeff Roberson thread_lock_unblock(struct thread *td, struct mtx *new) 7012502c107SJeff Roberson { 7022502c107SJeff Roberson mtx_assert(new, MA_OWNED); 7032502c107SJeff Roberson MPASS(td->td_lock == &blocked_lock); 70465d32cd8SMatt Jacob atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new); 7052502c107SJeff Roberson } 7062502c107SJeff Roberson 7072502c107SJeff Roberson void 7082502c107SJeff Roberson thread_lock_set(struct thread *td, struct mtx *new) 7092502c107SJeff Roberson { 7102502c107SJeff Roberson struct mtx *lock; 7112502c107SJeff Roberson 7122502c107SJeff Roberson mtx_assert(new, MA_OWNED); 7132502c107SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 714710eacdcSJeff Roberson lock = td->td_lock; 7152502c107SJeff Roberson td->td_lock = new; 7162502c107SJeff Roberson mtx_unlock_spin(lock); 7172502c107SJeff Roberson } 7182502c107SJeff Roberson 7199ed346baSBosko Milekic /* 7207f44c618SAttilio Rao * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 7219ed346baSBosko Milekic * 7229ed346baSBosko Milekic * We are only called here if the lock is recursed or contested (i.e. we 7239ed346baSBosko Milekic * need to wake up a blocked thread). 7249ed346baSBosko Milekic */ 72536412d79SJohn Baldwin void 7267f44c618SAttilio Rao __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line) 72736412d79SJohn Baldwin { 7287f44c618SAttilio Rao struct mtx *m; 729961a7b24SJohn Baldwin struct turnstile *ts; 7309ed346baSBosko Milekic 73135370593SAndriy Gapon if (SCHEDULER_STOPPED()) 73235370593SAndriy Gapon return; 73335370593SAndriy Gapon 7347f44c618SAttilio Rao m = mtxlock2mtx(c); 7357f44c618SAttilio Rao 73608812b39SBosko Milekic if (mtx_recursed(m)) { 73736412d79SJohn Baldwin if (--(m->mtx_recurse) == 0) 73808812b39SBosko Milekic atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 739aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 7409ed346baSBosko Milekic CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 74136412d79SJohn Baldwin return; 74236412d79SJohn Baldwin } 7439ed346baSBosko Milekic 7442502c107SJeff Roberson /* 7452502c107SJeff Roberson * We have to lock the chain before the turnstile so this turnstile 7462502c107SJeff Roberson * can be removed from the hash list if it is empty. 7472502c107SJeff Roberson */ 7482502c107SJeff Roberson turnstile_chain_lock(&m->lock_object); 749aa89d8cdSJohn Baldwin ts = turnstile_lookup(&m->lock_object); 750aa89d8cdSJohn Baldwin if (LOCK_LOG_TEST(&m->lock_object, opts)) 7519ed346baSBosko Milekic CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 752961a7b24SJohn Baldwin MPASS(ts != NULL); 7537aa4f685SJohn Baldwin turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 754961135eaSJohn Baldwin _mtx_release_lock_quick(m); 755bf9c6c31SJohn Baldwin 7562502c107SJeff Roberson /* 7572502c107SJeff Roberson * This turnstile is now no longer associated with the mutex. We can 7582502c107SJeff Roberson * unlock the chain lock so a new turnstile may take it's place. 7592502c107SJeff Roberson */ 7607aa4f685SJohn Baldwin turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 7612502c107SJeff Roberson turnstile_chain_unlock(&m->lock_object); 7629ed346baSBosko Milekic } 7639ed346baSBosko Milekic 7649ed346baSBosko Milekic /* 7659ed346baSBosko Milekic * All the unlocking of MTX_SPIN locks is done inline. 766961135eaSJohn Baldwin * See the __mtx_unlock_spin() macro for the details. 7679ed346baSBosko Milekic */ 7689ed346baSBosko Milekic 7699ed346baSBosko Milekic /* 77015ec816aSJohn Baldwin * The backing function for the INVARIANTS-enabled mtx_assert() 7719ed346baSBosko Milekic */ 7721103f3b0SJohn Baldwin #ifdef INVARIANT_SUPPORT 7730cde2e34SJason Evans void 7747f44c618SAttilio Rao __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line) 7750cde2e34SJason Evans { 7767f44c618SAttilio Rao const struct mtx *m; 7775cb0fbe4SJohn Baldwin 7781126349aSPaul Saab if (panicstr != NULL || dumping) 7795cb0fbe4SJohn Baldwin return; 7807f44c618SAttilio Rao 7817f44c618SAttilio Rao m = mtxlock2mtx(c); 7827f44c618SAttilio Rao 783a10f4966SJake Burkholder switch (what) { 7840cde2e34SJason Evans case MA_OWNED: 7850cde2e34SJason Evans case MA_OWNED | MA_RECURSED: 7860cde2e34SJason Evans case MA_OWNED | MA_NOTRECURSED: 787a10f4966SJake Burkholder if (!mtx_owned(m)) 7880cde2e34SJason Evans panic("mutex %s not owned at %s:%d", 789aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line); 790a10f4966SJake Burkholder if (mtx_recursed(m)) { 791a10f4966SJake Burkholder if ((what & MA_NOTRECURSED) != 0) 7920cde2e34SJason Evans panic("mutex %s recursed at %s:%d", 793aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line); 794a10f4966SJake Burkholder } else if ((what & MA_RECURSED) != 0) { 7950cde2e34SJason Evans panic("mutex %s unrecursed at %s:%d", 796aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line); 7970cde2e34SJason Evans } 7980cde2e34SJason Evans break; 7990cde2e34SJason Evans case MA_NOTOWNED: 800a10f4966SJake Burkholder if (mtx_owned(m)) 8010cde2e34SJason Evans panic("mutex %s owned at %s:%d", 802aa89d8cdSJohn Baldwin m->lock_object.lo_name, file, line); 8030cde2e34SJason Evans break; 8040cde2e34SJason Evans default: 80556771ca7SJason Evans panic("unknown mtx_assert at %s:%d", file, line); 8060cde2e34SJason Evans } 8070cde2e34SJason Evans } 8080cde2e34SJason Evans #endif 8090cde2e34SJason Evans 8109ed346baSBosko Milekic /* 8119ed346baSBosko Milekic * The MUTEX_DEBUG-enabled mtx_validate() 81219284646SJohn Baldwin * 81319284646SJohn Baldwin * Most of these checks have been moved off into the LO_INITIALIZED flag 81419284646SJohn Baldwin * maintained by the witness code. 8159ed346baSBosko Milekic */ 81636412d79SJohn Baldwin #ifdef MUTEX_DEBUG 81736412d79SJohn Baldwin 8184d77a549SAlfred Perlstein void mtx_validate(struct mtx *); 81936412d79SJohn Baldwin 82019284646SJohn Baldwin void 82119284646SJohn Baldwin mtx_validate(struct mtx *m) 82236412d79SJohn Baldwin { 82336412d79SJohn Baldwin 82436412d79SJohn Baldwin /* 825fa669ab7SPoul-Henning Kamp * XXX: When kernacc() does not require Giant we can reenable this check 826fa669ab7SPoul-Henning Kamp */ 827fa669ab7SPoul-Henning Kamp #ifdef notyet 828fa669ab7SPoul-Henning Kamp /* 82976dcbd6fSBosko Milekic * Can't call kernacc() from early init386(), especially when 83076dcbd6fSBosko Milekic * initializing Giant mutex, because some stuff in kernacc() 83176dcbd6fSBosko Milekic * requires Giant itself. 83276dcbd6fSBosko Milekic */ 833ab07087eSBosko Milekic if (!cold) 834ab07087eSBosko Milekic if (!kernacc((caddr_t)m, sizeof(m), 835ab07087eSBosko Milekic VM_PROT_READ | VM_PROT_WRITE)) 83619284646SJohn Baldwin panic("Can't read and write to mutex %p", m); 83736412d79SJohn Baldwin #endif 83836412d79SJohn Baldwin } 83936412d79SJohn Baldwin #endif 84036412d79SJohn Baldwin 8419ed346baSBosko Milekic /* 842c27b5699SAndrew R. Reiter * General init routine used by the MTX_SYSINIT() macro. 843c27b5699SAndrew R. Reiter */ 844c27b5699SAndrew R. Reiter void 845c27b5699SAndrew R. Reiter mtx_sysinit(void *arg) 846c27b5699SAndrew R. Reiter { 847c27b5699SAndrew R. Reiter struct mtx_args *margs = arg; 848c27b5699SAndrew R. Reiter 8497f44c618SAttilio Rao mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL, 8507f44c618SAttilio Rao margs->ma_opts); 851c27b5699SAndrew R. Reiter } 852c27b5699SAndrew R. Reiter 853c27b5699SAndrew R. Reiter /* 8549ed346baSBosko Milekic * Mutex initialization routine; initialize lock `m' of type contained in 8550c88508aSJohn Baldwin * `opts' with options contained in `opts' and name `name.' The optional 8560c88508aSJohn Baldwin * lock type `type' is used as a general lock category name for use with 8570c88508aSJohn Baldwin * witness. 8589ed346baSBosko Milekic */ 85936412d79SJohn Baldwin void 8607f44c618SAttilio Rao _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts) 86136412d79SJohn Baldwin { 8627f44c618SAttilio Rao struct mtx *m; 86383a81bcbSJohn Baldwin struct lock_class *class; 86483a81bcbSJohn Baldwin int flags; 8659ed346baSBosko Milekic 8667f44c618SAttilio Rao m = mtxlock2mtx(c); 8677f44c618SAttilio Rao 86819284646SJohn Baldwin MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 8697c0435b9SKip Macy MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0); 870353998acSAttilio Rao ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, 871353998acSAttilio Rao ("%s: mtx_lock not aligned for %s: %p", __func__, name, 872353998acSAttilio Rao &m->mtx_lock)); 8739ed346baSBosko Milekic 87436412d79SJohn Baldwin #ifdef MUTEX_DEBUG 8759ed346baSBosko Milekic /* Diagnostic and error correction */ 87619284646SJohn Baldwin mtx_validate(m); 8776936206eSJohn Baldwin #endif 87836412d79SJohn Baldwin 87983a81bcbSJohn Baldwin /* Determine lock class and lock flags. */ 88019284646SJohn Baldwin if (opts & MTX_SPIN) 88183a81bcbSJohn Baldwin class = &lock_class_mtx_spin; 88219284646SJohn Baldwin else 88383a81bcbSJohn Baldwin class = &lock_class_mtx_sleep; 88483a81bcbSJohn Baldwin flags = 0; 88519284646SJohn Baldwin if (opts & MTX_QUIET) 88683a81bcbSJohn Baldwin flags |= LO_QUIET; 88719284646SJohn Baldwin if (opts & MTX_RECURSE) 88883a81bcbSJohn Baldwin flags |= LO_RECURSABLE; 88919284646SJohn Baldwin if ((opts & MTX_NOWITNESS) == 0) 89083a81bcbSJohn Baldwin flags |= LO_WITNESS; 891f22a4b62SJeff Roberson if (opts & MTX_DUPOK) 89283a81bcbSJohn Baldwin flags |= LO_DUPOK; 8937c0435b9SKip Macy if (opts & MTX_NOPROFILE) 8947c0435b9SKip Macy flags |= LO_NOPROFILE; 89519284646SJohn Baldwin 89683a81bcbSJohn Baldwin /* Initialize mutex. */ 897*b5fb43e5SJohn Baldwin lock_init(&m->lock_object, class, name, type, flags); 898*b5fb43e5SJohn Baldwin 89919284646SJohn Baldwin m->mtx_lock = MTX_UNOWNED; 90083a81bcbSJohn Baldwin m->mtx_recurse = 0; 90136412d79SJohn Baldwin } 90236412d79SJohn Baldwin 9039ed346baSBosko Milekic /* 90419284646SJohn Baldwin * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 90519284646SJohn Baldwin * passed in as a flag here because if the corresponding mtx_init() was 90619284646SJohn Baldwin * called with MTX_QUIET set, then it will already be set in the mutex's 90719284646SJohn Baldwin * flags. 9089ed346baSBosko Milekic */ 90936412d79SJohn Baldwin void 9107f44c618SAttilio Rao _mtx_destroy(volatile uintptr_t *c) 91136412d79SJohn Baldwin { 9127f44c618SAttilio Rao struct mtx *m; 9137f44c618SAttilio Rao 9147f44c618SAttilio Rao m = mtxlock2mtx(c); 91536412d79SJohn Baldwin 91619284646SJohn Baldwin if (!mtx_owned(m)) 91719284646SJohn Baldwin MPASS(mtx_unowned(m)); 91819284646SJohn Baldwin else { 91908812b39SBosko Milekic MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 9209ed346baSBosko Milekic 921861a2308SScott Long /* Perform the non-mtx related part of mtx_unlock_spin(). */ 922aa89d8cdSJohn Baldwin if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) 923861a2308SScott Long spinlock_exit(); 924764e4d54SJohn Baldwin else 925764e4d54SJohn Baldwin curthread->td_locks--; 926861a2308SScott Long 927d3df4af3SJeff Roberson lock_profile_release_lock(&m->lock_object); 92819284646SJohn Baldwin /* Tell witness this isn't locked to make it happy. */ 929aa89d8cdSJohn Baldwin WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, 930c86b6ff5SJohn Baldwin __LINE__); 93136412d79SJohn Baldwin } 9320384fff8SJason Evans 933186abbd7SJohn Baldwin m->mtx_lock = MTX_DESTROYED; 934aa89d8cdSJohn Baldwin lock_destroy(&m->lock_object); 9350384fff8SJason Evans } 936d23f5958SMatthew Dillon 937d23f5958SMatthew Dillon /* 938c53c013bSJohn Baldwin * Intialize the mutex code and system mutexes. This is called from the MD 939c53c013bSJohn Baldwin * startup code prior to mi_startup(). The per-CPU data space needs to be 940c53c013bSJohn Baldwin * setup before this is called. 941c53c013bSJohn Baldwin */ 942c53c013bSJohn Baldwin void 943c53c013bSJohn Baldwin mutex_init(void) 944c53c013bSJohn Baldwin { 945c53c013bSJohn Baldwin 946961a7b24SJohn Baldwin /* Setup turnstiles so that sleep mutexes work. */ 947961a7b24SJohn Baldwin init_turnstiles(); 948961a7b24SJohn Baldwin 949c53c013bSJohn Baldwin /* 950c53c013bSJohn Baldwin * Initialize mutexes. 951c53c013bSJohn Baldwin */ 9520c88508aSJohn Baldwin mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 9532502c107SJeff Roberson mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN); 9542502c107SJeff Roberson blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */ 9550c88508aSJohn Baldwin mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 9562502c107SJeff Roberson mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE); 9578c4b6380SJohn Baldwin mtx_init(&devmtx, "cdev", NULL, MTX_DEF); 958c53c013bSJohn Baldwin mtx_lock(&Giant); 959c53c013bSJohn Baldwin } 960d272fe53SJohn Baldwin 961d272fe53SJohn Baldwin #ifdef DDB 962d272fe53SJohn Baldwin void 963d576deedSPawel Jakub Dawidek db_show_mtx(const struct lock_object *lock) 964d272fe53SJohn Baldwin { 965d272fe53SJohn Baldwin struct thread *td; 966d576deedSPawel Jakub Dawidek const struct mtx *m; 967d272fe53SJohn Baldwin 968d576deedSPawel Jakub Dawidek m = (const struct mtx *)lock; 969d272fe53SJohn Baldwin 970d272fe53SJohn Baldwin db_printf(" flags: {"); 97183a81bcbSJohn Baldwin if (LOCK_CLASS(lock) == &lock_class_mtx_spin) 972d272fe53SJohn Baldwin db_printf("SPIN"); 973d272fe53SJohn Baldwin else 974d272fe53SJohn Baldwin db_printf("DEF"); 975aa89d8cdSJohn Baldwin if (m->lock_object.lo_flags & LO_RECURSABLE) 976d272fe53SJohn Baldwin db_printf(", RECURSE"); 977aa89d8cdSJohn Baldwin if (m->lock_object.lo_flags & LO_DUPOK) 978d272fe53SJohn Baldwin db_printf(", DUPOK"); 979d272fe53SJohn Baldwin db_printf("}\n"); 980d272fe53SJohn Baldwin db_printf(" state: {"); 981d272fe53SJohn Baldwin if (mtx_unowned(m)) 982d272fe53SJohn Baldwin db_printf("UNOWNED"); 983c0bfd703SJohn Baldwin else if (mtx_destroyed(m)) 984c0bfd703SJohn Baldwin db_printf("DESTROYED"); 985d272fe53SJohn Baldwin else { 986d272fe53SJohn Baldwin db_printf("OWNED"); 987d272fe53SJohn Baldwin if (m->mtx_lock & MTX_CONTESTED) 988d272fe53SJohn Baldwin db_printf(", CONTESTED"); 989d272fe53SJohn Baldwin if (m->mtx_lock & MTX_RECURSED) 990d272fe53SJohn Baldwin db_printf(", RECURSED"); 991d272fe53SJohn Baldwin } 992d272fe53SJohn Baldwin db_printf("}\n"); 993c0bfd703SJohn Baldwin if (!mtx_unowned(m) && !mtx_destroyed(m)) { 994d272fe53SJohn Baldwin td = mtx_owner(m); 995d272fe53SJohn Baldwin db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, 996431f8906SJulian Elischer td->td_tid, td->td_proc->p_pid, td->td_name); 997d272fe53SJohn Baldwin if (mtx_recursed(m)) 998d272fe53SJohn Baldwin db_printf(" recursed: %d\n", m->mtx_recurse); 999d272fe53SJohn Baldwin } 1000d272fe53SJohn Baldwin } 1001d272fe53SJohn Baldwin #endif 1002