10384fff8SJason Evans /*- 20384fff8SJason Evans * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 30384fff8SJason Evans * 40384fff8SJason Evans * Redistribution and use in source and binary forms, with or without 50384fff8SJason Evans * modification, are permitted provided that the following conditions 60384fff8SJason Evans * are met: 70384fff8SJason Evans * 1. Redistributions of source code must retain the above copyright 80384fff8SJason Evans * notice, this list of conditions and the following disclaimer. 90384fff8SJason Evans * 2. Redistributions in binary form must reproduce the above copyright 100384fff8SJason Evans * notice, this list of conditions and the following disclaimer in the 110384fff8SJason Evans * documentation and/or other materials provided with the distribution. 120384fff8SJason Evans * 3. Berkeley Software Design Inc's name may not be used to endorse or 130384fff8SJason Evans * promote products derived from this software without specific prior 140384fff8SJason Evans * written permission. 150384fff8SJason Evans * 160384fff8SJason Evans * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 170384fff8SJason Evans * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 180384fff8SJason Evans * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 190384fff8SJason Evans * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 200384fff8SJason Evans * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 210384fff8SJason Evans * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 220384fff8SJason Evans * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 230384fff8SJason Evans * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 240384fff8SJason Evans * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 250384fff8SJason Evans * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 260384fff8SJason Evans * SUCH DAMAGE. 270384fff8SJason Evans * 280384fff8SJason Evans * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 2936412d79SJohn Baldwin * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 300384fff8SJason Evans * $FreeBSD$ 310384fff8SJason Evans */ 320384fff8SJason Evans 330384fff8SJason Evans /* 349ed346baSBosko Milekic * Machine independent bits of mutex implementation and implementation of 359ed346baSBosko Milekic * `witness' structure & related debugging routines. 369ed346baSBosko Milekic */ 379ed346baSBosko Milekic 389ed346baSBosko Milekic /* 390384fff8SJason Evans * Main Entry: witness 400384fff8SJason Evans * Pronunciation: 'wit-n&s 410384fff8SJason Evans * Function: noun 420384fff8SJason Evans * Etymology: Middle English witnesse, from Old English witnes knowledge, 430384fff8SJason Evans * testimony, witness, from 2wit 440384fff8SJason Evans * Date: before 12th century 450384fff8SJason Evans * 1 : attestation of a fact or event : TESTIMONY 460384fff8SJason Evans * 2 : one that gives evidence; specifically : one who testifies in 470384fff8SJason Evans * a cause or before a judicial tribunal 480384fff8SJason Evans * 3 : one asked to be present at a transaction so as to be able to 490384fff8SJason Evans * testify to its having taken place 500384fff8SJason Evans * 4 : one who has personal knowledge of something 510384fff8SJason Evans * 5 a : something serving as evidence or proof : SIGN 520384fff8SJason Evans * b : public affirmation by word or example of usually 530384fff8SJason Evans * religious faith or conviction <the heroic witness to divine 540384fff8SJason Evans * life -- Pilot> 550384fff8SJason Evans * 6 capitalized : a member of the Jehovah's Witnesses 560384fff8SJason Evans */ 570384fff8SJason Evans 589c36c934SJohn Baldwin #include "opt_ddb.h" 59a5a96a19SJohn Baldwin 600384fff8SJason Evans #include <sys/param.h> 6136412d79SJohn Baldwin #include <sys/bus.h> 6236412d79SJohn Baldwin #include <sys/kernel.h> 6319284646SJohn Baldwin #include <sys/lock.h> 64fb919e4dSMark Murray #include <sys/malloc.h> 6519284646SJohn Baldwin #include <sys/mutex.h> 660384fff8SJason Evans #include <sys/proc.h> 67c4f7a187SJohn Baldwin #include <sys/resourcevar.h> 68a5a96a19SJohn Baldwin #include <sys/sysctl.h> 690384fff8SJason Evans #include <sys/systm.h> 7036412d79SJohn Baldwin #include <sys/vmmeter.h> 710384fff8SJason Evans #include <sys/ktr.h> 720384fff8SJason Evans 7336412d79SJohn Baldwin #include <machine/atomic.h> 7436412d79SJohn Baldwin #include <machine/bus.h> 7536412d79SJohn Baldwin #include <machine/clock.h> 760384fff8SJason Evans #include <machine/cpu.h> 7736412d79SJohn Baldwin 789c36c934SJohn Baldwin #include <ddb/ddb.h> 799c36c934SJohn Baldwin 8036412d79SJohn Baldwin #include <vm/vm.h> 8136412d79SJohn Baldwin #include <vm/vm_extern.h> 8236412d79SJohn Baldwin 830cde2e34SJason Evans /* 849ed346baSBosko Milekic * Internal utility macros. 850cde2e34SJason Evans */ 869ed346baSBosko Milekic #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 870cde2e34SJason Evans 889ed346baSBosko Milekic #define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 89b40ce416SJulian Elischer : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 909ed346baSBosko Milekic 91b40ce416SJulian Elischer #define SET_PRIO(td, pri) (td)->td_ksegrp->kg_pri.pri_level = (pri) 920cde2e34SJason Evans 930cde2e34SJason Evans /* 9419284646SJohn Baldwin * Lock classes for sleep and spin mutexes. 950cde2e34SJason Evans */ 9619284646SJohn Baldwin struct lock_class lock_class_mtx_sleep = { 9719284646SJohn Baldwin "sleep mutex", 9819284646SJohn Baldwin LC_SLEEPLOCK | LC_RECURSABLE 9919284646SJohn Baldwin }; 10019284646SJohn Baldwin struct lock_class lock_class_mtx_spin = { 10119284646SJohn Baldwin "spin mutex", 10219284646SJohn Baldwin LC_SPINLOCK | LC_RECURSABLE 1038484de75SJohn Baldwin }; 1048484de75SJohn Baldwin 1059ed346baSBosko Milekic /* 1069ed346baSBosko Milekic * Prototypes for non-exported routines. 1079ed346baSBosko Milekic */ 108b40ce416SJulian Elischer static void propagate_priority(struct thread *); 10936412d79SJohn Baldwin 11036412d79SJohn Baldwin static void 111b40ce416SJulian Elischer propagate_priority(struct thread *td) 11236412d79SJohn Baldwin { 113b40ce416SJulian Elischer struct ksegrp *kg = td->td_ksegrp; 114b40ce416SJulian Elischer int pri = kg->kg_pri.pri_level; 115b40ce416SJulian Elischer struct mtx *m = td->td_blocked; 11636412d79SJohn Baldwin 1171bd0eefbSJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 11836412d79SJohn Baldwin for (;;) { 119b40ce416SJulian Elischer struct thread *td1; 12036412d79SJohn Baldwin 121b40ce416SJulian Elischer td = mtx_owner(m); 12236412d79SJohn Baldwin 123b40ce416SJulian Elischer if (td == NULL) { 12436412d79SJohn Baldwin /* 12536412d79SJohn Baldwin * This really isn't quite right. Really 126b40ce416SJulian Elischer * ought to bump priority of thread that 12736412d79SJohn Baldwin * next acquires the mutex. 12836412d79SJohn Baldwin */ 12936412d79SJohn Baldwin MPASS(m->mtx_lock == MTX_CONTESTED); 13036412d79SJohn Baldwin return; 13136412d79SJohn Baldwin } 1329ed346baSBosko Milekic 133b40ce416SJulian Elischer MPASS(td->td_proc->p_magic == P_MAGIC); 134b40ce416SJulian Elischer KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex")); 135b40ce416SJulian Elischer if (kg->kg_pri.pri_level <= pri) /* lower is higher priority */ 13636412d79SJohn Baldwin return; 1371bd0eefbSJohn Baldwin 1381bd0eefbSJohn Baldwin /* 139b40ce416SJulian Elischer * Bump this thread's priority. 1401bd0eefbSJohn Baldwin */ 141b40ce416SJulian Elischer SET_PRIO(td, pri); 1421bd0eefbSJohn Baldwin 14336412d79SJohn Baldwin /* 14436412d79SJohn Baldwin * If lock holder is actually running, just bump priority. 14536412d79SJohn Baldwin */ 146b40ce416SJulian Elischer /* XXXKSE this test is not sufficient */ 147b40ce416SJulian Elischer if (td->td_kse && (td->td_kse->ke_oncpu != NOCPU)) { 148b40ce416SJulian Elischer MPASS(td->td_proc->p_stat == SRUN 149b40ce416SJulian Elischer || td->td_proc->p_stat == SZOMB 150b40ce416SJulian Elischer || td->td_proc->p_stat == SSTOP); 15136412d79SJohn Baldwin return; 15236412d79SJohn Baldwin } 153d5a08a60SJake Burkholder 1541b43703bSJohn Baldwin #ifndef SMP 1551b43703bSJohn Baldwin /* 156b40ce416SJulian Elischer * For UP, we check to see if td is curthread (this shouldn't 1571b43703bSJohn Baldwin * ever happen however as it would mean we are in a deadlock.) 1581b43703bSJohn Baldwin */ 159b40ce416SJulian Elischer KASSERT(td != curthread, ("Deadlock detected")); 1601b43703bSJohn Baldwin #endif 1611b43703bSJohn Baldwin 16236412d79SJohn Baldwin /* 163b40ce416SJulian Elischer * If on run queue move to new run queue, and quit. 164b40ce416SJulian Elischer * XXXKSE this gets a lot more complicated under threads 165b40ce416SJulian Elischer * but try anyhow. 16636412d79SJohn Baldwin */ 167b40ce416SJulian Elischer if (td->td_proc->p_stat == SRUN) { 168b40ce416SJulian Elischer MPASS(td->td_blocked == NULL); 169b40ce416SJulian Elischer remrunqueue(td); 170b40ce416SJulian Elischer setrunqueue(td); 17136412d79SJohn Baldwin return; 17236412d79SJohn Baldwin } 17336412d79SJohn Baldwin 17436412d79SJohn Baldwin /* 1751bd0eefbSJohn Baldwin * If we aren't blocked on a mutex, we should be. 17636412d79SJohn Baldwin */ 177b40ce416SJulian Elischer KASSERT(td->td_proc->p_stat == SMTX, ( 1781bd0eefbSJohn Baldwin "process %d(%s):%d holds %s but isn't blocked on a mutex\n", 179b40ce416SJulian Elischer td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat, 18019284646SJohn Baldwin m->mtx_object.lo_name)); 18136412d79SJohn Baldwin 18236412d79SJohn Baldwin /* 183b40ce416SJulian Elischer * Pick up the mutex that td is blocked on. 18436412d79SJohn Baldwin */ 185b40ce416SJulian Elischer m = td->td_blocked; 18636412d79SJohn Baldwin MPASS(m != NULL); 18736412d79SJohn Baldwin 18836412d79SJohn Baldwin /* 189b40ce416SJulian Elischer * Check if the thread needs to be moved up on 19036412d79SJohn Baldwin * the blocked chain 19136412d79SJohn Baldwin */ 192b40ce416SJulian Elischer if (td == TAILQ_FIRST(&m->mtx_blocked)) { 1931bd0eefbSJohn Baldwin continue; 1941bd0eefbSJohn Baldwin } 1959ed346baSBosko Milekic 196b40ce416SJulian Elischer td1 = TAILQ_PREV(td, threadqueue, td_blkq); 197b40ce416SJulian Elischer if (td1->td_ksegrp->kg_pri.pri_level <= pri) { 19836412d79SJohn Baldwin continue; 19936412d79SJohn Baldwin } 20036412d79SJohn Baldwin 20136412d79SJohn Baldwin /* 202b40ce416SJulian Elischer * Remove thread from blocked chain and determine where 203b40ce416SJulian Elischer * it should be moved up to. Since we know that td1 has 204b40ce416SJulian Elischer * a lower priority than td, we know that at least one 205b40ce416SJulian Elischer * thread in the chain has a lower priority and that 206b40ce416SJulian Elischer * td1 will thus not be NULL after the loop. 20736412d79SJohn Baldwin */ 208b40ce416SJulian Elischer TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq); 209b40ce416SJulian Elischer TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) { 210b40ce416SJulian Elischer MPASS(td1->td_proc->p_magic == P_MAGIC); 211b40ce416SJulian Elischer if (td1->td_ksegrp->kg_pri.pri_level > pri) 21236412d79SJohn Baldwin break; 21336412d79SJohn Baldwin } 2149ed346baSBosko Milekic 215b40ce416SJulian Elischer MPASS(td1 != NULL); 216b40ce416SJulian Elischer TAILQ_INSERT_BEFORE(td1, td, td_blkq); 21736412d79SJohn Baldwin CTR4(KTR_LOCK, 2188484de75SJohn Baldwin "propagate_priority: p %p moved before %p on [%p] %s", 219b40ce416SJulian Elischer td, td1, m, m->mtx_object.lo_name); 22036412d79SJohn Baldwin } 22136412d79SJohn Baldwin } 22236412d79SJohn Baldwin 2230cde2e34SJason Evans /* 2246283b7d0SJohn Baldwin * Function versions of the inlined __mtx_* macros. These are used by 2256283b7d0SJohn Baldwin * modules and can also be called from assembly language if needed. 2266283b7d0SJohn Baldwin */ 2276283b7d0SJohn Baldwin void 2286283b7d0SJohn Baldwin _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 2296283b7d0SJohn Baldwin { 2306283b7d0SJohn Baldwin 2316283b7d0SJohn Baldwin __mtx_lock_flags(m, opts, file, line); 2326283b7d0SJohn Baldwin } 2336283b7d0SJohn Baldwin 2346283b7d0SJohn Baldwin void 2356283b7d0SJohn Baldwin _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 2366283b7d0SJohn Baldwin { 2376283b7d0SJohn Baldwin 2386283b7d0SJohn Baldwin __mtx_unlock_flags(m, opts, file, line); 2396283b7d0SJohn Baldwin } 2406283b7d0SJohn Baldwin 2416283b7d0SJohn Baldwin void 2426283b7d0SJohn Baldwin _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 2436283b7d0SJohn Baldwin { 2446283b7d0SJohn Baldwin 2456283b7d0SJohn Baldwin __mtx_lock_spin_flags(m, opts, file, line); 2466283b7d0SJohn Baldwin } 2476283b7d0SJohn Baldwin 2486283b7d0SJohn Baldwin void 2496283b7d0SJohn Baldwin _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 2506283b7d0SJohn Baldwin { 2516283b7d0SJohn Baldwin 2526283b7d0SJohn Baldwin __mtx_unlock_spin_flags(m, opts, file, line); 2536283b7d0SJohn Baldwin } 2546283b7d0SJohn Baldwin 2556283b7d0SJohn Baldwin /* 2569ed346baSBosko Milekic * The important part of mtx_trylock{,_flags}() 2579ed346baSBosko Milekic * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that 2589ed346baSBosko Milekic * if we're called, it's because we know we don't already own this lock. 2590cde2e34SJason Evans */ 2600cde2e34SJason Evans int 2619ed346baSBosko Milekic _mtx_trylock(struct mtx *m, int opts, const char *file, int line) 2620cde2e34SJason Evans { 2630cde2e34SJason Evans int rval; 2640cde2e34SJason Evans 265b40ce416SJulian Elischer MPASS(curthread != NULL); 2669ed346baSBosko Milekic 2679ed346baSBosko Milekic /* 2689ed346baSBosko Milekic * _mtx_trylock does not accept MTX_NOSWITCH option. 2699ed346baSBosko Milekic */ 2705746a1d8SBosko Milekic KASSERT((opts & MTX_NOSWITCH) == 0, 2715746a1d8SBosko Milekic ("mtx_trylock() called with invalid option flag(s) %d", opts)); 2729ed346baSBosko Milekic 273b40ce416SJulian Elischer rval = _obtain_lock(m, curthread); 2749ed346baSBosko Milekic 27519284646SJohn Baldwin LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 27619284646SJohn Baldwin if (rval) { 2779ed346baSBosko Milekic /* 2789ed346baSBosko Milekic * We do not handle recursion in _mtx_trylock; see the 2799ed346baSBosko Milekic * note at the top of the routine. 2809ed346baSBosko Milekic */ 2815746a1d8SBosko Milekic KASSERT(!mtx_recursed(m), 2825746a1d8SBosko Milekic ("mtx_trylock() called on a recursed mutex")); 2832d96f0b1SJohn Baldwin WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 2842d96f0b1SJohn Baldwin file, line); 2850cde2e34SJason Evans } 2869ed346baSBosko Milekic 28719284646SJohn Baldwin return (rval); 2880cde2e34SJason Evans } 2890cde2e34SJason Evans 2900cde2e34SJason Evans /* 2919ed346baSBosko Milekic * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 2929ed346baSBosko Milekic * 2939ed346baSBosko Milekic * We call this if the lock is either contested (i.e. we need to go to 2949ed346baSBosko Milekic * sleep waiting for it), or if we need to recurse on it. 2950cde2e34SJason Evans */ 2960cde2e34SJason Evans void 2979ed346baSBosko Milekic _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) 29836412d79SJohn Baldwin { 299b40ce416SJulian Elischer struct thread *td = curthread; 300b40ce416SJulian Elischer struct ksegrp *kg = td->td_ksegrp; 30136412d79SJohn Baldwin 302b40ce416SJulian Elischer if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) { 30336412d79SJohn Baldwin m->mtx_recurse++; 30408812b39SBosko Milekic atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 30519284646SJohn Baldwin if (LOCK_LOG_TEST(&m->mtx_object, opts)) 3065746a1d8SBosko Milekic CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 30736412d79SJohn Baldwin return; 30836412d79SJohn Baldwin } 3099ed346baSBosko Milekic 31019284646SJohn Baldwin if (LOCK_LOG_TEST(&m->mtx_object, opts)) 31115ec816aSJohn Baldwin CTR4(KTR_LOCK, 31215ec816aSJohn Baldwin "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 31319284646SJohn Baldwin m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 3141bd0eefbSJohn Baldwin 315b40ce416SJulian Elischer while (!_obtain_lock(m, td)) { 316f5271ebcSJohn Baldwin uintptr_t v; 317b40ce416SJulian Elischer struct thread *td1; 31836412d79SJohn Baldwin 3199ed346baSBosko Milekic mtx_lock_spin(&sched_lock); 32036412d79SJohn Baldwin /* 3219ed346baSBosko Milekic * Check if the lock has been released while spinning for 3229ed346baSBosko Milekic * the sched_lock. 32336412d79SJohn Baldwin */ 32436412d79SJohn Baldwin if ((v = m->mtx_lock) == MTX_UNOWNED) { 3259ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 32636412d79SJohn Baldwin continue; 32736412d79SJohn Baldwin } 3289ed346baSBosko Milekic 32936412d79SJohn Baldwin /* 3309ed346baSBosko Milekic * The mutex was marked contested on release. This means that 331b40ce416SJulian Elischer * there are threads blocked on it. 33236412d79SJohn Baldwin */ 33336412d79SJohn Baldwin if (v == MTX_CONTESTED) { 334b40ce416SJulian Elischer td1 = TAILQ_FIRST(&m->mtx_blocked); 335b40ce416SJulian Elischer MPASS(td1 != NULL); 336b40ce416SJulian Elischer m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; 3379ed346baSBosko Milekic 338b40ce416SJulian Elischer if (td1->td_ksegrp->kg_pri.pri_level < kg->kg_pri.pri_level) 339b40ce416SJulian Elischer SET_PRIO(td, td1->td_ksegrp->kg_pri.pri_level); 3409ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 34136412d79SJohn Baldwin return; 34236412d79SJohn Baldwin } 3439ed346baSBosko Milekic 34436412d79SJohn Baldwin /* 3459ed346baSBosko Milekic * If the mutex isn't already contested and a failure occurs 3469ed346baSBosko Milekic * setting the contested bit, the mutex was either released 3479ed346baSBosko Milekic * or the state of the MTX_RECURSED bit changed. 34836412d79SJohn Baldwin */ 34936412d79SJohn Baldwin if ((v & MTX_CONTESTED) == 0 && 35036412d79SJohn Baldwin !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 35136412d79SJohn Baldwin (void *)(v | MTX_CONTESTED))) { 3529ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 35336412d79SJohn Baldwin continue; 35436412d79SJohn Baldwin } 35536412d79SJohn Baldwin 3569ed346baSBosko Milekic /* 3579ed346baSBosko Milekic * We deffinately must sleep for this lock. 3589ed346baSBosko Milekic */ 35936412d79SJohn Baldwin mtx_assert(m, MA_NOTOWNED); 36036412d79SJohn Baldwin 36136412d79SJohn Baldwin #ifdef notyet 36236412d79SJohn Baldwin /* 3639ed346baSBosko Milekic * If we're borrowing an interrupted thread's VM context, we 3649ed346baSBosko Milekic * must clean up before going to sleep. 36536412d79SJohn Baldwin */ 366b40ce416SJulian Elischer if (td->td_ithd != NULL) { 367b40ce416SJulian Elischer struct ithd *it = td->td_ithd; 36836412d79SJohn Baldwin 36936412d79SJohn Baldwin if (it->it_interrupted) { 37019284646SJohn Baldwin if (LOCK_LOG_TEST(&m->mtx_object, opts)) 37136412d79SJohn Baldwin CTR2(KTR_LOCK, 37215ec816aSJohn Baldwin "_mtx_lock_sleep: %p interrupted %p", 37336412d79SJohn Baldwin it, it->it_interrupted); 37436412d79SJohn Baldwin intr_thd_fixup(it); 37536412d79SJohn Baldwin } 37636412d79SJohn Baldwin } 37736412d79SJohn Baldwin #endif 37836412d79SJohn Baldwin 3799ed346baSBosko Milekic /* 3809ed346baSBosko Milekic * Put us on the list of threads blocked on this mutex. 3819ed346baSBosko Milekic */ 38236412d79SJohn Baldwin if (TAILQ_EMPTY(&m->mtx_blocked)) { 383b40ce416SJulian Elischer td1 = (struct thread *)(m->mtx_lock & MTX_FLAGMASK); 384b40ce416SJulian Elischer LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested); 385b40ce416SJulian Elischer TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 38636412d79SJohn Baldwin } else { 387b40ce416SJulian Elischer TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) 388b40ce416SJulian Elischer if (td1->td_ksegrp->kg_pri.pri_level > kg->kg_pri.pri_level) 38936412d79SJohn Baldwin break; 390b40ce416SJulian Elischer if (td1) 391b40ce416SJulian Elischer TAILQ_INSERT_BEFORE(td1, td, td_blkq); 39236412d79SJohn Baldwin else 393b40ce416SJulian Elischer TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 39436412d79SJohn Baldwin } 39536412d79SJohn Baldwin 3969ed346baSBosko Milekic /* 3979ed346baSBosko Milekic * Save who we're blocked on. 3989ed346baSBosko Milekic */ 399b40ce416SJulian Elischer td->td_blocked = m; 400b40ce416SJulian Elischer td->td_mtxname = m->mtx_object.lo_name; 401b40ce416SJulian Elischer td->td_proc->p_stat = SMTX; 402b40ce416SJulian Elischer propagate_priority(td); 4039ed346baSBosko Milekic 40419284646SJohn Baldwin if (LOCK_LOG_TEST(&m->mtx_object, opts)) 405562e4ffeSJohn Baldwin CTR3(KTR_LOCK, 406b40ce416SJulian Elischer "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m, 40719284646SJohn Baldwin m->mtx_object.lo_name); 4089ed346baSBosko Milekic 409b40ce416SJulian Elischer td->td_proc->p_stats->p_ru.ru_nvcsw++; 41020cdcc5bSJohn Baldwin mi_switch(); 4119ed346baSBosko Milekic 41219284646SJohn Baldwin if (LOCK_LOG_TEST(&m->mtx_object, opts)) 41336412d79SJohn Baldwin CTR3(KTR_LOCK, 4149ed346baSBosko Milekic "_mtx_lock_sleep: p %p free from blocked on [%p] %s", 415b40ce416SJulian Elischer td, m, m->mtx_object.lo_name); 4169ed346baSBosko Milekic 4179ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 41836412d79SJohn Baldwin } 4199ed346baSBosko Milekic 42036412d79SJohn Baldwin return; 4219ed346baSBosko Milekic } 4229ed346baSBosko Milekic 4239ed346baSBosko Milekic /* 4249ed346baSBosko Milekic * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 4259ed346baSBosko Milekic * 4269ed346baSBosko Milekic * This is only called if we need to actually spin for the lock. Recursion 4279ed346baSBosko Milekic * is handled inline. 4289ed346baSBosko Milekic */ 4299ed346baSBosko Milekic void 4306283b7d0SJohn Baldwin _mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file, 4319ed346baSBosko Milekic int line) 43236412d79SJohn Baldwin { 43336412d79SJohn Baldwin int i = 0; 43436412d79SJohn Baldwin 43519284646SJohn Baldwin if (LOCK_LOG_TEST(&m->mtx_object, opts)) 4365746a1d8SBosko Milekic CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 4379ed346baSBosko Milekic 43836412d79SJohn Baldwin for (;;) { 439b40ce416SJulian Elischer if (_obtain_lock(m, curthread)) 44036412d79SJohn Baldwin break; 4419ed346baSBosko Milekic 4427141f2adSJohn Baldwin /* Give interrupts a chance while we spin. */ 4437141f2adSJohn Baldwin critical_exit(mtx_crit); 44436412d79SJohn Baldwin while (m->mtx_lock != MTX_UNOWNED) { 44536412d79SJohn Baldwin if (i++ < 1000000) 44636412d79SJohn Baldwin continue; 44736412d79SJohn Baldwin if (i++ < 6000000) 44836412d79SJohn Baldwin DELAY(1); 44936412d79SJohn Baldwin #ifdef DDB 45036412d79SJohn Baldwin else if (!db_active) 45136412d79SJohn Baldwin #else 45236412d79SJohn Baldwin else 45336412d79SJohn Baldwin #endif 4549ed346baSBosko Milekic panic("spin lock %s held by %p for > 5 seconds", 45519284646SJohn Baldwin m->mtx_object.lo_name, (void *)m->mtx_lock); 45636412d79SJohn Baldwin } 4577141f2adSJohn Baldwin mtx_crit = critical_enter(); 45836412d79SJohn Baldwin } 45936412d79SJohn Baldwin 4606283b7d0SJohn Baldwin m->mtx_savecrit = mtx_crit; 46119284646SJohn Baldwin if (LOCK_LOG_TEST(&m->mtx_object, opts)) 4629ed346baSBosko Milekic CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 4639ed346baSBosko Milekic 46436412d79SJohn Baldwin return; 46536412d79SJohn Baldwin } 46636412d79SJohn Baldwin 4679ed346baSBosko Milekic /* 4689ed346baSBosko Milekic * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 4699ed346baSBosko Milekic * 4709ed346baSBosko Milekic * We are only called here if the lock is recursed or contested (i.e. we 4719ed346baSBosko Milekic * need to wake up a blocked thread). 4729ed346baSBosko Milekic */ 47336412d79SJohn Baldwin void 4749ed346baSBosko Milekic _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 47536412d79SJohn Baldwin { 476b40ce416SJulian Elischer struct thread *td, *td1; 47736412d79SJohn Baldwin struct mtx *m1; 47836412d79SJohn Baldwin int pri; 479b40ce416SJulian Elischer struct ksegrp *kg; 48036412d79SJohn Baldwin 481b40ce416SJulian Elischer td = curthread; 482b40ce416SJulian Elischer kg = td->td_ksegrp; 4839ed346baSBosko Milekic 48408812b39SBosko Milekic if (mtx_recursed(m)) { 48536412d79SJohn Baldwin if (--(m->mtx_recurse) == 0) 48608812b39SBosko Milekic atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 48719284646SJohn Baldwin if (LOCK_LOG_TEST(&m->mtx_object, opts)) 4889ed346baSBosko Milekic CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 48936412d79SJohn Baldwin return; 49036412d79SJohn Baldwin } 4919ed346baSBosko Milekic 4929ed346baSBosko Milekic mtx_lock_spin(&sched_lock); 49319284646SJohn Baldwin if (LOCK_LOG_TEST(&m->mtx_object, opts)) 4949ed346baSBosko Milekic CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 4959ed346baSBosko Milekic 496b40ce416SJulian Elischer td1 = TAILQ_FIRST(&m->mtx_blocked); 497b40ce416SJulian Elischer MPASS(td->td_proc->p_magic == P_MAGIC); 498b40ce416SJulian Elischer MPASS(td1->td_proc->p_magic == P_MAGIC); 4999ed346baSBosko Milekic 500b40ce416SJulian Elischer TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq); 5019ed346baSBosko Milekic 50236412d79SJohn Baldwin if (TAILQ_EMPTY(&m->mtx_blocked)) { 50336412d79SJohn Baldwin LIST_REMOVE(m, mtx_contested); 50436412d79SJohn Baldwin _release_lock_quick(m); 50519284646SJohn Baldwin if (LOCK_LOG_TEST(&m->mtx_object, opts)) 5069ed346baSBosko Milekic CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 50736412d79SJohn Baldwin } else 5089ed346baSBosko Milekic atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED); 5099ed346baSBosko Milekic 510d5a08a60SJake Burkholder pri = PRI_MAX; 511b40ce416SJulian Elischer LIST_FOREACH(m1, &td->td_contested, mtx_contested) { 512b40ce416SJulian Elischer int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_ksegrp->kg_pri.pri_level; 51336412d79SJohn Baldwin if (cp < pri) 51436412d79SJohn Baldwin pri = cp; 51536412d79SJohn Baldwin } 5169ed346baSBosko Milekic 517b40ce416SJulian Elischer if (pri > kg->kg_pri.pri_native) 518b40ce416SJulian Elischer pri = kg->kg_pri.pri_native; 519b40ce416SJulian Elischer SET_PRIO(td, pri); 5209ed346baSBosko Milekic 52119284646SJohn Baldwin if (LOCK_LOG_TEST(&m->mtx_object, opts)) 5229ed346baSBosko Milekic CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p", 523b40ce416SJulian Elischer m, td1); 5249ed346baSBosko Milekic 525b40ce416SJulian Elischer td1->td_blocked = NULL; 526b40ce416SJulian Elischer td1->td_proc->p_stat = SRUN; 527b40ce416SJulian Elischer setrunqueue(td1); 5289ed346baSBosko Milekic 529b40ce416SJulian Elischer if ((opts & MTX_NOSWITCH) == 0 && td1->td_ksegrp->kg_pri.pri_level < pri) { 53036412d79SJohn Baldwin #ifdef notyet 531b40ce416SJulian Elischer if (td->td_ithd != NULL) { 532b40ce416SJulian Elischer struct ithd *it = td->td_ithd; 53336412d79SJohn Baldwin 53436412d79SJohn Baldwin if (it->it_interrupted) { 53519284646SJohn Baldwin if (LOCK_LOG_TEST(&m->mtx_object, opts)) 53636412d79SJohn Baldwin CTR2(KTR_LOCK, 53715ec816aSJohn Baldwin "_mtx_unlock_sleep: %p interrupted %p", 53836412d79SJohn Baldwin it, it->it_interrupted); 53936412d79SJohn Baldwin intr_thd_fixup(it); 54036412d79SJohn Baldwin } 54136412d79SJohn Baldwin } 54236412d79SJohn Baldwin #endif 543b40ce416SJulian Elischer setrunqueue(td); 54419284646SJohn Baldwin if (LOCK_LOG_TEST(&m->mtx_object, opts)) 545562e4ffeSJohn Baldwin CTR2(KTR_LOCK, 5469ed346baSBosko Milekic "_mtx_unlock_sleep: %p switching out lock=%p", m, 5479ed346baSBosko Milekic (void *)m->mtx_lock); 5489ed346baSBosko Milekic 549b40ce416SJulian Elischer td->td_proc->p_stats->p_ru.ru_nivcsw++; 55036412d79SJohn Baldwin mi_switch(); 55119284646SJohn Baldwin if (LOCK_LOG_TEST(&m->mtx_object, opts)) 5529ed346baSBosko Milekic CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 55331271627SJohn Baldwin m, (void *)m->mtx_lock); 55436412d79SJohn Baldwin } 55536412d79SJohn Baldwin 5569ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 5579ed346baSBosko Milekic 5589ed346baSBosko Milekic return; 5599ed346baSBosko Milekic } 5609ed346baSBosko Milekic 5619ed346baSBosko Milekic /* 5629ed346baSBosko Milekic * All the unlocking of MTX_SPIN locks is done inline. 5639ed346baSBosko Milekic * See the _rel_spin_lock() macro for the details. 5649ed346baSBosko Milekic */ 5659ed346baSBosko Milekic 5669ed346baSBosko Milekic /* 56715ec816aSJohn Baldwin * The backing function for the INVARIANTS-enabled mtx_assert() 5689ed346baSBosko Milekic */ 5691103f3b0SJohn Baldwin #ifdef INVARIANT_SUPPORT 5700cde2e34SJason Evans void 57156771ca7SJason Evans _mtx_assert(struct mtx *m, int what, const char *file, int line) 5720cde2e34SJason Evans { 5735cb0fbe4SJohn Baldwin 5745cb0fbe4SJohn Baldwin if (panicstr != NULL) 5755cb0fbe4SJohn Baldwin return; 576a10f4966SJake Burkholder switch (what) { 5770cde2e34SJason Evans case MA_OWNED: 5780cde2e34SJason Evans case MA_OWNED | MA_RECURSED: 5790cde2e34SJason Evans case MA_OWNED | MA_NOTRECURSED: 580a10f4966SJake Burkholder if (!mtx_owned(m)) 5810cde2e34SJason Evans panic("mutex %s not owned at %s:%d", 58219284646SJohn Baldwin m->mtx_object.lo_name, file, line); 583a10f4966SJake Burkholder if (mtx_recursed(m)) { 584a10f4966SJake Burkholder if ((what & MA_NOTRECURSED) != 0) 5850cde2e34SJason Evans panic("mutex %s recursed at %s:%d", 58619284646SJohn Baldwin m->mtx_object.lo_name, file, line); 587a10f4966SJake Burkholder } else if ((what & MA_RECURSED) != 0) { 5880cde2e34SJason Evans panic("mutex %s unrecursed at %s:%d", 58919284646SJohn Baldwin m->mtx_object.lo_name, file, line); 5900cde2e34SJason Evans } 5910cde2e34SJason Evans break; 5920cde2e34SJason Evans case MA_NOTOWNED: 593a10f4966SJake Burkholder if (mtx_owned(m)) 5940cde2e34SJason Evans panic("mutex %s owned at %s:%d", 59519284646SJohn Baldwin m->mtx_object.lo_name, file, line); 5960cde2e34SJason Evans break; 5970cde2e34SJason Evans default: 59856771ca7SJason Evans panic("unknown mtx_assert at %s:%d", file, line); 5990cde2e34SJason Evans } 6000cde2e34SJason Evans } 6010cde2e34SJason Evans #endif 6020cde2e34SJason Evans 6039ed346baSBosko Milekic /* 6049ed346baSBosko Milekic * The MUTEX_DEBUG-enabled mtx_validate() 60519284646SJohn Baldwin * 60619284646SJohn Baldwin * Most of these checks have been moved off into the LO_INITIALIZED flag 60719284646SJohn Baldwin * maintained by the witness code. 6089ed346baSBosko Milekic */ 60936412d79SJohn Baldwin #ifdef MUTEX_DEBUG 61036412d79SJohn Baldwin 61119284646SJohn Baldwin void mtx_validate __P((struct mtx *)); 61236412d79SJohn Baldwin 61319284646SJohn Baldwin void 61419284646SJohn Baldwin mtx_validate(struct mtx *m) 61536412d79SJohn Baldwin { 61636412d79SJohn Baldwin 61736412d79SJohn Baldwin /* 61836412d79SJohn Baldwin * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly 61936412d79SJohn Baldwin * we can re-enable the kernacc() checks. 62036412d79SJohn Baldwin */ 62136412d79SJohn Baldwin #ifndef __alpha__ 62276dcbd6fSBosko Milekic /* 62376dcbd6fSBosko Milekic * Can't call kernacc() from early init386(), especially when 62476dcbd6fSBosko Milekic * initializing Giant mutex, because some stuff in kernacc() 62576dcbd6fSBosko Milekic * requires Giant itself. 62676dcbd6fSBosko Milekic */ 627ab07087eSBosko Milekic if (!cold) 628ab07087eSBosko Milekic if (!kernacc((caddr_t)m, sizeof(m), 629ab07087eSBosko Milekic VM_PROT_READ | VM_PROT_WRITE)) 63019284646SJohn Baldwin panic("Can't read and write to mutex %p", m); 63136412d79SJohn Baldwin #endif 63236412d79SJohn Baldwin } 63336412d79SJohn Baldwin #endif 63436412d79SJohn Baldwin 6359ed346baSBosko Milekic /* 6369ed346baSBosko Milekic * Mutex initialization routine; initialize lock `m' of type contained in 6379ed346baSBosko Milekic * `opts' with options contained in `opts' and description `description.' 6389ed346baSBosko Milekic */ 63936412d79SJohn Baldwin void 6409ed346baSBosko Milekic mtx_init(struct mtx *m, const char *description, int opts) 64136412d79SJohn Baldwin { 64219284646SJohn Baldwin struct lock_object *lock; 6439ed346baSBosko Milekic 64419284646SJohn Baldwin MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 64519284646SJohn Baldwin MTX_SLEEPABLE | MTX_NOWITNESS)) == 0); 6469ed346baSBosko Milekic 64736412d79SJohn Baldwin #ifdef MUTEX_DEBUG 6489ed346baSBosko Milekic /* Diagnostic and error correction */ 64919284646SJohn Baldwin mtx_validate(m); 6506936206eSJohn Baldwin #endif 65136412d79SJohn Baldwin 65219284646SJohn Baldwin bzero(m, sizeof(*m)); 65319284646SJohn Baldwin lock = &m->mtx_object; 65419284646SJohn Baldwin if (opts & MTX_SPIN) 65519284646SJohn Baldwin lock->lo_class = &lock_class_mtx_spin; 65619284646SJohn Baldwin else 65719284646SJohn Baldwin lock->lo_class = &lock_class_mtx_sleep; 65819284646SJohn Baldwin lock->lo_name = description; 65919284646SJohn Baldwin if (opts & MTX_QUIET) 66019284646SJohn Baldwin lock->lo_flags = LO_QUIET; 66119284646SJohn Baldwin if (opts & MTX_RECURSE) 66219284646SJohn Baldwin lock->lo_flags |= LO_RECURSABLE; 66319284646SJohn Baldwin if (opts & MTX_SLEEPABLE) 66419284646SJohn Baldwin lock->lo_flags |= LO_SLEEPABLE; 66519284646SJohn Baldwin if ((opts & MTX_NOWITNESS) == 0) 66619284646SJohn Baldwin lock->lo_flags |= LO_WITNESS; 66719284646SJohn Baldwin 66819284646SJohn Baldwin m->mtx_lock = MTX_UNOWNED; 66936412d79SJohn Baldwin TAILQ_INIT(&m->mtx_blocked); 6709ed346baSBosko Milekic 67119284646SJohn Baldwin LOCK_LOG_INIT(lock, opts); 672d1c1b841SJason Evans 67319284646SJohn Baldwin WITNESS_INIT(lock); 67436412d79SJohn Baldwin } 67536412d79SJohn Baldwin 6769ed346baSBosko Milekic /* 67719284646SJohn Baldwin * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 67819284646SJohn Baldwin * passed in as a flag here because if the corresponding mtx_init() was 67919284646SJohn Baldwin * called with MTX_QUIET set, then it will already be set in the mutex's 68019284646SJohn Baldwin * flags. 6819ed346baSBosko Milekic */ 68236412d79SJohn Baldwin void 68336412d79SJohn Baldwin mtx_destroy(struct mtx *m) 68436412d79SJohn Baldwin { 68536412d79SJohn Baldwin 68619284646SJohn Baldwin LOCK_LOG_DESTROY(&m->mtx_object, 0); 6879ed346baSBosko Milekic 68819284646SJohn Baldwin if (!mtx_owned(m)) 68919284646SJohn Baldwin MPASS(mtx_unowned(m)); 69019284646SJohn Baldwin else { 69108812b39SBosko Milekic MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 6929ed346baSBosko Milekic 69319284646SJohn Baldwin /* Tell witness this isn't locked to make it happy. */ 6942d96f0b1SJohn Baldwin WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE | LOP_NOSWITCH, 6952d96f0b1SJohn Baldwin __FILE__, __LINE__); 69636412d79SJohn Baldwin } 6970384fff8SJason Evans 69819284646SJohn Baldwin WITNESS_DESTROY(&m->mtx_object); 6990384fff8SJason Evans } 700