19454b2d8SWarner Losh /*- 253bf4bb2SPeter Wemm * Copyright (c) 1995 353bf4bb2SPeter Wemm * The Regents of the University of California. All rights reserved. 453bf4bb2SPeter Wemm * 503e9c6c1SJohn Dyson * Copyright (C) 1997 603e9c6c1SJohn Dyson * John S. Dyson. All rights reserved. 703e9c6c1SJohn Dyson * 853bf4bb2SPeter Wemm * This code contains ideas from software contributed to Berkeley by 953bf4bb2SPeter Wemm * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 1053bf4bb2SPeter Wemm * System project at Carnegie-Mellon University. 1153bf4bb2SPeter Wemm * 1253bf4bb2SPeter Wemm * Redistribution and use in source and binary forms, with or without 1353bf4bb2SPeter Wemm * modification, are permitted provided that the following conditions 1453bf4bb2SPeter Wemm * are met: 1553bf4bb2SPeter Wemm * 1. Redistributions of source code must retain the above copyright 1653bf4bb2SPeter Wemm * notice, this list of conditions and the following disclaimer. 1753bf4bb2SPeter Wemm * 2. Redistributions in binary form must reproduce the above copyright 1853bf4bb2SPeter Wemm * notice, this list of conditions and the following disclaimer in the 1953bf4bb2SPeter Wemm * documentation and/or other materials provided with the distribution. 2053bf4bb2SPeter Wemm * 3. All advertising materials mentioning features or use of this software 2153bf4bb2SPeter Wemm * must display the following acknowledgement: 2253bf4bb2SPeter Wemm * This product includes software developed by the University of 2353bf4bb2SPeter Wemm * California, Berkeley and its contributors. 2453bf4bb2SPeter Wemm * 4. Neither the name of the University nor the names of its contributors 2553bf4bb2SPeter Wemm * may be used to endorse or promote products derived from this software 2653bf4bb2SPeter Wemm * without specific prior written permission. 2753bf4bb2SPeter Wemm * 2853bf4bb2SPeter Wemm * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2953bf4bb2SPeter Wemm * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 3053bf4bb2SPeter Wemm * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 3153bf4bb2SPeter Wemm * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 3253bf4bb2SPeter Wemm * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 3353bf4bb2SPeter Wemm * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 3453bf4bb2SPeter Wemm * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 3553bf4bb2SPeter Wemm * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3653bf4bb2SPeter Wemm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3753bf4bb2SPeter Wemm * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3853bf4bb2SPeter Wemm * SUCH DAMAGE. 3953bf4bb2SPeter Wemm * 4053bf4bb2SPeter Wemm * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 4153bf4bb2SPeter Wemm */ 4253bf4bb2SPeter Wemm 43677b542eSDavid E. O'Brien #include <sys/cdefs.h> 44677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 45677b542eSDavid E. O'Brien 46be6847d7SJohn Baldwin #include "opt_ddb.h" 47be6847d7SJohn Baldwin 4853bf4bb2SPeter Wemm #include <sys/param.h> 49c30bf5c3SRobert Watson #include <sys/kdb.h> 509722d88fSJason Evans #include <sys/kernel.h> 5161d80e90SJohn Baldwin #include <sys/ktr.h> 5253bf4bb2SPeter Wemm #include <sys/lock.h> 538302d183SBruce Evans #include <sys/lockmgr.h> 54d8881ca3SJohn Baldwin #include <sys/mutex.h> 558302d183SBruce Evans #include <sys/proc.h> 564bdb9b11SPeter Wemm #include <sys/systm.h> 57e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS 58e8ddb61dSJeff Roberson #include <sys/stack.h> 59e8ddb61dSJeff Roberson #endif 6053bf4bb2SPeter Wemm 61be6847d7SJohn Baldwin #ifdef DDB 62be6847d7SJohn Baldwin #include <ddb/ddb.h> 63be6847d7SJohn Baldwin #endif 64be6847d7SJohn Baldwin 6553bf4bb2SPeter Wemm /* 6653bf4bb2SPeter Wemm * Locking primitives implementation. 6753bf4bb2SPeter Wemm * Locks provide shared/exclusive sychronization. 6853bf4bb2SPeter Wemm */ 6953bf4bb2SPeter Wemm 70f158df07SJeff Roberson #define COUNT(td, x) if ((td)) (td)->td_locks += (x) 7199448ed1SJohn Dyson #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ 7299448ed1SJohn Dyson LK_SHARE_NONZERO | LK_WAIT_NONZERO) 7399448ed1SJohn Dyson 74c6964d3bSKirk McKusick static int acquire(struct lock **lkpp, int extflags, int wanted); 7599448ed1SJohn Dyson static int acquiredrain(struct lock *lkp, int extflags) ; 7603e9c6c1SJohn Dyson 77a96ab770SJeff Roberson static __inline void 78f158df07SJeff Roberson sharelock(struct thread *td, struct lock *lkp, int incr) { 7903e9c6c1SJohn Dyson lkp->lk_flags |= LK_SHARE_NONZERO; 8003e9c6c1SJohn Dyson lkp->lk_sharecount += incr; 81f158df07SJeff Roberson COUNT(td, incr); 8203e9c6c1SJohn Dyson } 8303e9c6c1SJohn Dyson 84a96ab770SJeff Roberson static __inline void 85f158df07SJeff Roberson shareunlock(struct thread *td, struct lock *lkp, int decr) { 86219cbf59SEivind Eklund 875526d2d9SEivind Eklund KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr")); 8803e9c6c1SJohn Dyson 89f158df07SJeff Roberson COUNT(td, -decr); 909b2e5badSJohn Dyson if (lkp->lk_sharecount == decr) { 9103e9c6c1SJohn Dyson lkp->lk_flags &= ~LK_SHARE_NONZERO; 929b2e5badSJohn Dyson if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { 939b2e5badSJohn Dyson wakeup(lkp); 949b2e5badSJohn Dyson } 959b2e5badSJohn Dyson lkp->lk_sharecount = 0; 969b2e5badSJohn Dyson } else { 979b2e5badSJohn Dyson lkp->lk_sharecount -= decr; 989b2e5badSJohn Dyson } 9903e9c6c1SJohn Dyson } 10003e9c6c1SJohn Dyson 10103e9c6c1SJohn Dyson static int 10241bd6c15SJeff Roberson acquire(struct lock **lkpp, int extflags, int wanted) 10341bd6c15SJeff Roberson { 104c6964d3bSKirk McKusick struct lock *lkp = *lkpp; 10520728d8fSJeff Roberson int error; 106c06394f5SJohn Baldwin CTR3(KTR_LOCK, 107ff381670SRobert Watson "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x", 108a18b1f1dSJason Evans lkp, extflags, wanted); 109a18b1f1dSJason Evans 11020728d8fSJeff Roberson if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) 11103e9c6c1SJohn Dyson return EBUSY; 11220728d8fSJeff Roberson error = 0; 11303e9c6c1SJohn Dyson while ((lkp->lk_flags & wanted) != 0) { 11420728d8fSJeff Roberson CTR2(KTR_LOCK, 11520728d8fSJeff Roberson "acquire(): lkp == %p, lk_flags == 0x%x sleeping", 11620728d8fSJeff Roberson lkp, lkp->lk_flags); 11703e9c6c1SJohn Dyson lkp->lk_flags |= LK_WAIT_NONZERO; 11803e9c6c1SJohn Dyson lkp->lk_waitcount++; 11996fde7daSJake Burkholder error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio, 12023b59018SMatthew Dillon lkp->lk_wmesg, 12123b59018SMatthew Dillon ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 1229b2e5badSJohn Dyson lkp->lk_waitcount--; 12320728d8fSJeff Roberson if (lkp->lk_waitcount == 0) 12420728d8fSJeff Roberson lkp->lk_flags &= ~LK_WAIT_NONZERO; 12520728d8fSJeff Roberson if (error) 12620728d8fSJeff Roberson break; 12703e9c6c1SJohn Dyson if (extflags & LK_SLEEPFAIL) { 12820728d8fSJeff Roberson error = ENOLCK; 12920728d8fSJeff Roberson break; 13003e9c6c1SJohn Dyson } 131c6964d3bSKirk McKusick if (lkp->lk_newlock != NULL) { 132c6964d3bSKirk McKusick mtx_lock(lkp->lk_newlock->lk_interlock); 133c6964d3bSKirk McKusick mtx_unlock(lkp->lk_interlock); 134c6964d3bSKirk McKusick if (lkp->lk_waitcount == 0) 135c6964d3bSKirk McKusick wakeup((void *)(&lkp->lk_newlock)); 136c6964d3bSKirk McKusick *lkpp = lkp = lkp->lk_newlock; 137c6964d3bSKirk McKusick } 13803e9c6c1SJohn Dyson } 13920728d8fSJeff Roberson mtx_assert(lkp->lk_interlock, MA_OWNED); 14020728d8fSJeff Roberson return (error); 14103e9c6c1SJohn Dyson } 14203e9c6c1SJohn Dyson 14353bf4bb2SPeter Wemm /* 14453bf4bb2SPeter Wemm * Set, change, or release a lock. 14553bf4bb2SPeter Wemm * 14653bf4bb2SPeter Wemm * Shared requests increment the shared count. Exclusive requests set the 14753bf4bb2SPeter Wemm * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 14853bf4bb2SPeter Wemm * accepted shared locks and shared-to-exclusive upgrades to go away. 14953bf4bb2SPeter Wemm */ 15053bf4bb2SPeter Wemm int 151b40ce416SJulian Elischer lockmgr(lkp, flags, interlkp, td) 152248fcb66SSteve Passe struct lock *lkp; 15353bf4bb2SPeter Wemm u_int flags; 154a18b1f1dSJason Evans struct mtx *interlkp; 155b40ce416SJulian Elischer struct thread *td; 15653bf4bb2SPeter Wemm { 15753bf4bb2SPeter Wemm int error; 158822ded67SJulian Elischer struct thread *thr; 159635962afSJohn Baldwin int extflags, lockflags; 16053bf4bb2SPeter Wemm 16153bf4bb2SPeter Wemm error = 0; 162b40ce416SJulian Elischer if (td == NULL) 163822ded67SJulian Elischer thr = LK_KERNPROC; 164891e0f24SJohn Dyson else 165822ded67SJulian Elischer thr = td; 16603e9c6c1SJohn Dyson 16717661e5aSJeff Roberson if ((flags & LK_INTERNAL) == 0) 1689ed346baSBosko Milekic mtx_lock(lkp->lk_interlock); 16941bd6c15SJeff Roberson CTR6(KTR_LOCK, 17041bd6c15SJeff Roberson "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, " 17141bd6c15SJeff Roberson "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder, 17241bd6c15SJeff Roberson lkp->lk_exclusivecount, flags, td); 173e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS 174e8ddb61dSJeff Roberson { 175e8ddb61dSJeff Roberson struct stack stack; /* XXX */ 176e8ddb61dSJeff Roberson stack_save(&stack); 177e37a4994SPawel Jakub Dawidek CTRSTACK(KTR_LOCK, &stack, 0, 1); 178e8ddb61dSJeff Roberson } 17941bd6c15SJeff Roberson #endif 18041bd6c15SJeff Roberson 18198689e1eSAlfred Perlstein if (flags & LK_INTERLOCK) { 1826157b69fSAlfred Perlstein mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED); 1839ed346baSBosko Milekic mtx_unlock(interlkp); 18498689e1eSAlfred Perlstein } 18503e9c6c1SJohn Dyson 18617661e5aSJeff Roberson if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0) 18726306795SJohn Baldwin WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 18826306795SJohn Baldwin &lkp->lk_interlock->mtx_object, 18926306795SJohn Baldwin "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg); 19017661e5aSJeff Roberson 1913f085c22SJohn Baldwin if (panicstr != NULL) { 1923f085c22SJohn Baldwin mtx_unlock(lkp->lk_interlock); 1933f085c22SJohn Baldwin return (0); 1943f085c22SJohn Baldwin } 195c4c0ec5bSJeff Roberson if ((lkp->lk_flags & LK_NOSHARE) && 196c4c0ec5bSJeff Roberson (flags & LK_TYPE_MASK) == LK_SHARED) { 197c4c0ec5bSJeff Roberson flags &= ~LK_TYPE_MASK; 198c4c0ec5bSJeff Roberson flags |= LK_EXCLUSIVE; 199c4c0ec5bSJeff Roberson } 20053bf4bb2SPeter Wemm extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 20153bf4bb2SPeter Wemm 20253bf4bb2SPeter Wemm switch (flags & LK_TYPE_MASK) { 20353bf4bb2SPeter Wemm 20453bf4bb2SPeter Wemm case LK_SHARED: 205beef8a36SJulian Elischer /* 206beef8a36SJulian Elischer * If we are not the exclusive lock holder, we have to block 207beef8a36SJulian Elischer * while there is an exclusive lock holder or while an 208beef8a36SJulian Elischer * exclusive lock request or upgrade request is in progress. 209beef8a36SJulian Elischer * 210fa2a4d05STim J. Robbins * However, if TDP_DEADLKTREAT is set, we override exclusive 211beef8a36SJulian Elischer * lock requests or upgrade requests ( but not the exclusive 212beef8a36SJulian Elischer * lock itself ). 213beef8a36SJulian Elischer */ 214822ded67SJulian Elischer if (lkp->lk_lockholder != thr) { 215635962afSJohn Baldwin lockflags = LK_HAVE_EXCL; 216fa2a4d05STim J. Robbins if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT)) 217bce98419SJohn Baldwin lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE; 218c6964d3bSKirk McKusick error = acquire(&lkp, extflags, lockflags); 21953bf4bb2SPeter Wemm if (error) 22053bf4bb2SPeter Wemm break; 221f158df07SJeff Roberson sharelock(td, lkp, 1); 2227181624aSJeff Roberson #if defined(DEBUG_LOCKS) 2237499fd8dSJeff Roberson stack_save(&lkp->lk_stack); 2247181624aSJeff Roberson #endif 22553bf4bb2SPeter Wemm break; 22653bf4bb2SPeter Wemm } 22753bf4bb2SPeter Wemm /* 22853bf4bb2SPeter Wemm * We hold an exclusive lock, so downgrade it to shared. 22953bf4bb2SPeter Wemm * An alternative would be to fail with EDEADLK. 23053bf4bb2SPeter Wemm */ 231f158df07SJeff Roberson sharelock(td, lkp, 1); 23293b0017fSPhilippe Charnier /* FALLTHROUGH downgrade */ 23353bf4bb2SPeter Wemm 23453bf4bb2SPeter Wemm case LK_DOWNGRADE: 235822ded67SJulian Elischer KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0, 2361375ed7eSAlfred Perlstein ("lockmgr: not holding exclusive lock " 237822ded67SJulian Elischer "(owner thread (%p) != thread (%p), exlcnt (%d) != 0", 238822ded67SJulian Elischer lkp->lk_lockholder, thr, lkp->lk_exclusivecount)); 239f158df07SJeff Roberson sharelock(td, lkp, lkp->lk_exclusivecount); 240f158df07SJeff Roberson COUNT(td, -lkp->lk_exclusivecount); 24153bf4bb2SPeter Wemm lkp->lk_exclusivecount = 0; 24253bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_HAVE_EXCL; 2436f8132a8SJulian Elischer lkp->lk_lockholder = LK_NOPROC; 24453bf4bb2SPeter Wemm if (lkp->lk_waitcount) 24553bf4bb2SPeter Wemm wakeup((void *)lkp); 24653bf4bb2SPeter Wemm break; 24753bf4bb2SPeter Wemm 24853bf4bb2SPeter Wemm case LK_EXCLUPGRADE: 24953bf4bb2SPeter Wemm /* 25053bf4bb2SPeter Wemm * If another process is ahead of us to get an upgrade, 25153bf4bb2SPeter Wemm * then we want to fail rather than have an intervening 25253bf4bb2SPeter Wemm * exclusive access. 25353bf4bb2SPeter Wemm */ 25453bf4bb2SPeter Wemm if (lkp->lk_flags & LK_WANT_UPGRADE) { 255f158df07SJeff Roberson shareunlock(td, lkp, 1); 25653bf4bb2SPeter Wemm error = EBUSY; 25753bf4bb2SPeter Wemm break; 25853bf4bb2SPeter Wemm } 25993b0017fSPhilippe Charnier /* FALLTHROUGH normal upgrade */ 26053bf4bb2SPeter Wemm 26153bf4bb2SPeter Wemm case LK_UPGRADE: 26253bf4bb2SPeter Wemm /* 26353bf4bb2SPeter Wemm * Upgrade a shared lock to an exclusive one. If another 26453bf4bb2SPeter Wemm * shared lock has already requested an upgrade to an 26553bf4bb2SPeter Wemm * exclusive lock, our shared lock is released and an 26653bf4bb2SPeter Wemm * exclusive lock is requested (which will be granted 26753bf4bb2SPeter Wemm * after the upgrade). If we return an error, the file 26853bf4bb2SPeter Wemm * will always be unlocked. 26953bf4bb2SPeter Wemm */ 270436901a8SJeff Roberson if (lkp->lk_lockholder == thr) 27153bf4bb2SPeter Wemm panic("lockmgr: upgrade exclusive lock"); 272436901a8SJeff Roberson if (lkp->lk_sharecount <= 0) 273436901a8SJeff Roberson panic("lockmgr: upgrade without shared"); 274f158df07SJeff Roberson shareunlock(td, lkp, 1); 27553bf4bb2SPeter Wemm /* 27653bf4bb2SPeter Wemm * If we are just polling, check to see if we will block. 27753bf4bb2SPeter Wemm */ 27853bf4bb2SPeter Wemm if ((extflags & LK_NOWAIT) && 27953bf4bb2SPeter Wemm ((lkp->lk_flags & LK_WANT_UPGRADE) || 28053bf4bb2SPeter Wemm lkp->lk_sharecount > 1)) { 28153bf4bb2SPeter Wemm error = EBUSY; 28253bf4bb2SPeter Wemm break; 28353bf4bb2SPeter Wemm } 28453bf4bb2SPeter Wemm if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 28553bf4bb2SPeter Wemm /* 28653bf4bb2SPeter Wemm * We are first shared lock to request an upgrade, so 28753bf4bb2SPeter Wemm * request upgrade and wait for the shared count to 28853bf4bb2SPeter Wemm * drop to zero, then take exclusive lock. 28953bf4bb2SPeter Wemm */ 29053bf4bb2SPeter Wemm lkp->lk_flags |= LK_WANT_UPGRADE; 291c6964d3bSKirk McKusick error = acquire(&lkp, extflags, LK_SHARE_NONZERO); 29253bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_WANT_UPGRADE; 2939b2e5badSJohn Dyson 2944cef6d5aSAlexander Kabaev if (error) { 2954cef6d5aSAlexander Kabaev if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO)) 2964cef6d5aSAlexander Kabaev wakeup((void *)lkp); 29753bf4bb2SPeter Wemm break; 2984cef6d5aSAlexander Kabaev } 29953bf4bb2SPeter Wemm if (lkp->lk_exclusivecount != 0) 30053bf4bb2SPeter Wemm panic("lockmgr: non-zero exclusive count"); 301d8b8e875SPaul Saab lkp->lk_flags |= LK_HAVE_EXCL; 302d8b8e875SPaul Saab lkp->lk_lockholder = thr; 30353bf4bb2SPeter Wemm lkp->lk_exclusivecount = 1; 304f158df07SJeff Roberson COUNT(td, 1); 30515a1057cSEivind Eklund #if defined(DEBUG_LOCKS) 3067499fd8dSJeff Roberson stack_save(&lkp->lk_stack); 30715a1057cSEivind Eklund #endif 30853bf4bb2SPeter Wemm break; 30953bf4bb2SPeter Wemm } 31053bf4bb2SPeter Wemm /* 31153bf4bb2SPeter Wemm * Someone else has requested upgrade. Release our shared 31253bf4bb2SPeter Wemm * lock, awaken upgrade requestor if we are the last shared 31353bf4bb2SPeter Wemm * lock, then request an exclusive lock. 31453bf4bb2SPeter Wemm */ 31503e9c6c1SJohn Dyson if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == 31603e9c6c1SJohn Dyson LK_WAIT_NONZERO) 31753bf4bb2SPeter Wemm wakeup((void *)lkp); 31893b0017fSPhilippe Charnier /* FALLTHROUGH exclusive request */ 31953bf4bb2SPeter Wemm 32053bf4bb2SPeter Wemm case LK_EXCLUSIVE: 321822ded67SJulian Elischer if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) { 32253bf4bb2SPeter Wemm /* 32353bf4bb2SPeter Wemm * Recursive lock. 32453bf4bb2SPeter Wemm */ 32533638e93SKirk McKusick if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) 32653bf4bb2SPeter Wemm panic("lockmgr: locking against myself"); 32733638e93SKirk McKusick if ((extflags & LK_CANRECURSE) != 0) { 32853bf4bb2SPeter Wemm lkp->lk_exclusivecount++; 329f158df07SJeff Roberson COUNT(td, 1); 33053bf4bb2SPeter Wemm break; 33153bf4bb2SPeter Wemm } 33233638e93SKirk McKusick } 33353bf4bb2SPeter Wemm /* 33453bf4bb2SPeter Wemm * If we are just polling, check to see if we will sleep. 33553bf4bb2SPeter Wemm */ 33603e9c6c1SJohn Dyson if ((extflags & LK_NOWAIT) && 33703e9c6c1SJohn Dyson (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { 33853bf4bb2SPeter Wemm error = EBUSY; 33953bf4bb2SPeter Wemm break; 34053bf4bb2SPeter Wemm } 34153bf4bb2SPeter Wemm /* 34253bf4bb2SPeter Wemm * Try to acquire the want_exclusive flag. 34353bf4bb2SPeter Wemm */ 344cffdaf2dSAlexander Kabaev error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL)); 34553bf4bb2SPeter Wemm if (error) 34653bf4bb2SPeter Wemm break; 34753bf4bb2SPeter Wemm lkp->lk_flags |= LK_WANT_EXCL; 34853bf4bb2SPeter Wemm /* 34953bf4bb2SPeter Wemm * Wait for shared locks and upgrades to finish. 35053bf4bb2SPeter Wemm */ 3514cef6d5aSAlexander Kabaev error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO); 35253bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_WANT_EXCL; 3534cef6d5aSAlexander Kabaev if (error) { 3544cef6d5aSAlexander Kabaev if (lkp->lk_flags & LK_WAIT_NONZERO) 3554cef6d5aSAlexander Kabaev wakeup((void *)lkp); 35653bf4bb2SPeter Wemm break; 3574cef6d5aSAlexander Kabaev } 35853bf4bb2SPeter Wemm lkp->lk_flags |= LK_HAVE_EXCL; 359822ded67SJulian Elischer lkp->lk_lockholder = thr; 36053bf4bb2SPeter Wemm if (lkp->lk_exclusivecount != 0) 36153bf4bb2SPeter Wemm panic("lockmgr: non-zero exclusive count"); 36253bf4bb2SPeter Wemm lkp->lk_exclusivecount = 1; 363f158df07SJeff Roberson COUNT(td, 1); 36415a1057cSEivind Eklund #if defined(DEBUG_LOCKS) 3657499fd8dSJeff Roberson stack_save(&lkp->lk_stack); 36615a1057cSEivind Eklund #endif 36753bf4bb2SPeter Wemm break; 36853bf4bb2SPeter Wemm 36953bf4bb2SPeter Wemm case LK_RELEASE: 37053bf4bb2SPeter Wemm if (lkp->lk_exclusivecount != 0) { 371822ded67SJulian Elischer if (lkp->lk_lockholder != thr && 372e701df7dSMatthew Dillon lkp->lk_lockholder != LK_KERNPROC) { 373822ded67SJulian Elischer panic("lockmgr: thread %p, not %s %p unlocking", 374822ded67SJulian Elischer thr, "exclusive lock holder", 37553bf4bb2SPeter Wemm lkp->lk_lockholder); 376e701df7dSMatthew Dillon } 377f158df07SJeff Roberson if (lkp->lk_lockholder != LK_KERNPROC) 378f158df07SJeff Roberson COUNT(td, -1); 3799b2e5badSJohn Dyson if (lkp->lk_exclusivecount == 1) { 38053bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_HAVE_EXCL; 38153bf4bb2SPeter Wemm lkp->lk_lockholder = LK_NOPROC; 3829b2e5badSJohn Dyson lkp->lk_exclusivecount = 0; 3839b2e5badSJohn Dyson } else { 3849b2e5badSJohn Dyson lkp->lk_exclusivecount--; 38553bf4bb2SPeter Wemm } 3861b367556SJason Evans } else if (lkp->lk_flags & LK_SHARE_NONZERO) 387f158df07SJeff Roberson shareunlock(td, lkp, 1); 3881f71de49SSuleiman Souhlal else { 3891f71de49SSuleiman Souhlal printf("lockmgr: thread %p unlocking unheld lock\n", 3901f71de49SSuleiman Souhlal thr); 3911f71de49SSuleiman Souhlal kdb_backtrace(); 3921f71de49SSuleiman Souhlal } 3931f71de49SSuleiman Souhlal 39403e9c6c1SJohn Dyson if (lkp->lk_flags & LK_WAIT_NONZERO) 39553bf4bb2SPeter Wemm wakeup((void *)lkp); 39653bf4bb2SPeter Wemm break; 39753bf4bb2SPeter Wemm 39853bf4bb2SPeter Wemm case LK_DRAIN: 39953bf4bb2SPeter Wemm /* 40053bf4bb2SPeter Wemm * Check that we do not already hold the lock, as it can 40153bf4bb2SPeter Wemm * never drain if we do. Unfortunately, we have no way to 40253bf4bb2SPeter Wemm * check for holding a shared lock, but at least we can 40353bf4bb2SPeter Wemm * check for an exclusive one. 40453bf4bb2SPeter Wemm */ 405822ded67SJulian Elischer if (lkp->lk_lockholder == thr) 40653bf4bb2SPeter Wemm panic("lockmgr: draining against myself"); 40703e9c6c1SJohn Dyson 40803e9c6c1SJohn Dyson error = acquiredrain(lkp, extflags); 40903e9c6c1SJohn Dyson if (error) 41053bf4bb2SPeter Wemm break; 41153bf4bb2SPeter Wemm lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 412822ded67SJulian Elischer lkp->lk_lockholder = thr; 41353bf4bb2SPeter Wemm lkp->lk_exclusivecount = 1; 414f158df07SJeff Roberson COUNT(td, 1); 41515a1057cSEivind Eklund #if defined(DEBUG_LOCKS) 4167499fd8dSJeff Roberson stack_save(&lkp->lk_stack); 41715a1057cSEivind Eklund #endif 41853bf4bb2SPeter Wemm break; 41953bf4bb2SPeter Wemm 42053bf4bb2SPeter Wemm default: 4219ed346baSBosko Milekic mtx_unlock(lkp->lk_interlock); 42253bf4bb2SPeter Wemm panic("lockmgr: unknown locktype request %d", 42353bf4bb2SPeter Wemm flags & LK_TYPE_MASK); 42453bf4bb2SPeter Wemm /* NOTREACHED */ 42553bf4bb2SPeter Wemm } 42603e9c6c1SJohn Dyson if ((lkp->lk_flags & LK_WAITDRAIN) && 42703e9c6c1SJohn Dyson (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 42803e9c6c1SJohn Dyson LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) { 42953bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_WAITDRAIN; 43053bf4bb2SPeter Wemm wakeup((void *)&lkp->lk_flags); 43153bf4bb2SPeter Wemm } 4329ed346baSBosko Milekic mtx_unlock(lkp->lk_interlock); 43353bf4bb2SPeter Wemm return (error); 43453bf4bb2SPeter Wemm } 43553bf4bb2SPeter Wemm 43699448ed1SJohn Dyson static int 43799448ed1SJohn Dyson acquiredrain(struct lock *lkp, int extflags) { 43899448ed1SJohn Dyson int error; 43999448ed1SJohn Dyson 44099448ed1SJohn Dyson if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) { 44199448ed1SJohn Dyson return EBUSY; 44299448ed1SJohn Dyson } 44399448ed1SJohn Dyson while (lkp->lk_flags & LK_ALL) { 44499448ed1SJohn Dyson lkp->lk_flags |= LK_WAITDRAIN; 44596fde7daSJake Burkholder error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio, 44623b59018SMatthew Dillon lkp->lk_wmesg, 44723b59018SMatthew Dillon ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 44899448ed1SJohn Dyson if (error) 44999448ed1SJohn Dyson return error; 45099448ed1SJohn Dyson if (extflags & LK_SLEEPFAIL) { 45199448ed1SJohn Dyson return ENOLCK; 45299448ed1SJohn Dyson } 45399448ed1SJohn Dyson } 45499448ed1SJohn Dyson return 0; 45599448ed1SJohn Dyson } 45699448ed1SJohn Dyson 45799448ed1SJohn Dyson /* 458c6964d3bSKirk McKusick * Transfer any waiting processes from one lock to another. 459c6964d3bSKirk McKusick */ 460c6964d3bSKirk McKusick void 461c6964d3bSKirk McKusick transferlockers(from, to) 462c6964d3bSKirk McKusick struct lock *from; 463c6964d3bSKirk McKusick struct lock *to; 464c6964d3bSKirk McKusick { 465c6964d3bSKirk McKusick 466c6964d3bSKirk McKusick KASSERT(from != to, ("lock transfer to self")); 467c6964d3bSKirk McKusick KASSERT((from->lk_flags&LK_WAITDRAIN) == 0, ("transfer draining lock")); 468f5f0da0aSJeff Roberson 469f5f0da0aSJeff Roberson mtx_lock(from->lk_interlock); 470f5f0da0aSJeff Roberson if (from->lk_waitcount == 0) { 471f5f0da0aSJeff Roberson mtx_unlock(from->lk_interlock); 472c6964d3bSKirk McKusick return; 473f5f0da0aSJeff Roberson } 474c6964d3bSKirk McKusick from->lk_newlock = to; 475c6964d3bSKirk McKusick wakeup((void *)from); 476f5f0da0aSJeff Roberson msleep(&from->lk_newlock, from->lk_interlock, from->lk_prio, 477f5f0da0aSJeff Roberson "lkxfer", 0); 478c6964d3bSKirk McKusick from->lk_newlock = NULL; 479c6964d3bSKirk McKusick from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE); 480c6964d3bSKirk McKusick KASSERT(from->lk_waitcount == 0, ("active lock")); 481f5f0da0aSJeff Roberson mtx_unlock(from->lk_interlock); 482c6964d3bSKirk McKusick } 483c6964d3bSKirk McKusick 484c6964d3bSKirk McKusick 485c6964d3bSKirk McKusick /* 48699448ed1SJohn Dyson * Initialize a lock; required before use. 48799448ed1SJohn Dyson */ 48899448ed1SJohn Dyson void 48999448ed1SJohn Dyson lockinit(lkp, prio, wmesg, timo, flags) 49099448ed1SJohn Dyson struct lock *lkp; 49199448ed1SJohn Dyson int prio; 49204858e7eSEivind Eklund const char *wmesg; 49399448ed1SJohn Dyson int timo; 49499448ed1SJohn Dyson int flags; 49599448ed1SJohn Dyson { 496c06394f5SJohn Baldwin CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", " 497a18b1f1dSJason Evans "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags); 49899448ed1SJohn Dyson 499857d9c60SDon Lewis lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder); 5009722d88fSJason Evans lkp->lk_flags = (flags & LK_EXTFLG_MASK); 50199448ed1SJohn Dyson lkp->lk_sharecount = 0; 50299448ed1SJohn Dyson lkp->lk_waitcount = 0; 50399448ed1SJohn Dyson lkp->lk_exclusivecount = 0; 50499448ed1SJohn Dyson lkp->lk_prio = prio; 50599448ed1SJohn Dyson lkp->lk_wmesg = wmesg; 50699448ed1SJohn Dyson lkp->lk_timo = timo; 50799448ed1SJohn Dyson lkp->lk_lockholder = LK_NOPROC; 508c6964d3bSKirk McKusick lkp->lk_newlock = NULL; 5093a096f6cSKirk McKusick #ifdef DEBUG_LOCKS 5107499fd8dSJeff Roberson stack_zero(&lkp->lk_stack); 5113a096f6cSKirk McKusick #endif 51299448ed1SJohn Dyson } 51399448ed1SJohn Dyson 51499448ed1SJohn Dyson /* 515a18b1f1dSJason Evans * Destroy a lock. 516a18b1f1dSJason Evans */ 517a18b1f1dSJason Evans void 518a18b1f1dSJason Evans lockdestroy(lkp) 519a18b1f1dSJason Evans struct lock *lkp; 520a18b1f1dSJason Evans { 521c06394f5SJohn Baldwin CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")", 522a18b1f1dSJason Evans lkp, lkp->lk_wmesg); 523a18b1f1dSJason Evans } 524a18b1f1dSJason Evans 525a18b1f1dSJason Evans /* 52699448ed1SJohn Dyson * Determine the status of a lock. 52799448ed1SJohn Dyson */ 52899448ed1SJohn Dyson int 529b40ce416SJulian Elischer lockstatus(lkp, td) 53099448ed1SJohn Dyson struct lock *lkp; 531b40ce416SJulian Elischer struct thread *td; 53299448ed1SJohn Dyson { 53399448ed1SJohn Dyson int lock_type = 0; 5342b59d50cSRobert Watson int interlocked; 53599448ed1SJohn Dyson 5362b59d50cSRobert Watson if (!kdb_active) { 5372b59d50cSRobert Watson interlocked = 1; 5389ed346baSBosko Milekic mtx_lock(lkp->lk_interlock); 5392b59d50cSRobert Watson } else 5402b59d50cSRobert Watson interlocked = 0; 5416bdfe06aSEivind Eklund if (lkp->lk_exclusivecount != 0) { 542822ded67SJulian Elischer if (td == NULL || lkp->lk_lockholder == td) 54399448ed1SJohn Dyson lock_type = LK_EXCLUSIVE; 5446bdfe06aSEivind Eklund else 5456bdfe06aSEivind Eklund lock_type = LK_EXCLOTHER; 5466bdfe06aSEivind Eklund } else if (lkp->lk_sharecount != 0) 54799448ed1SJohn Dyson lock_type = LK_SHARED; 5482b59d50cSRobert Watson if (interlocked) 5499ed346baSBosko Milekic mtx_unlock(lkp->lk_interlock); 55099448ed1SJohn Dyson return (lock_type); 55199448ed1SJohn Dyson } 55299448ed1SJohn Dyson 55353bf4bb2SPeter Wemm /* 55467812eacSKirk McKusick * Determine the number of holders of a lock. 55567812eacSKirk McKusick */ 55667812eacSKirk McKusick int 55767812eacSKirk McKusick lockcount(lkp) 55867812eacSKirk McKusick struct lock *lkp; 55967812eacSKirk McKusick { 56067812eacSKirk McKusick int count; 56167812eacSKirk McKusick 5629ed346baSBosko Milekic mtx_lock(lkp->lk_interlock); 56367812eacSKirk McKusick count = lkp->lk_exclusivecount + lkp->lk_sharecount; 5649ed346baSBosko Milekic mtx_unlock(lkp->lk_interlock); 56567812eacSKirk McKusick return (count); 56667812eacSKirk McKusick } 56767812eacSKirk McKusick 56867812eacSKirk McKusick /* 56953bf4bb2SPeter Wemm * Print out information about state of a lock. Used by VOP_PRINT 5700e61ac7bSPoul-Henning Kamp * routines to display status about contained locks. 57153bf4bb2SPeter Wemm */ 572a1ce9d5cSPeter Wemm void 57353bf4bb2SPeter Wemm lockmgr_printinfo(lkp) 57453bf4bb2SPeter Wemm struct lock *lkp; 57553bf4bb2SPeter Wemm { 57653bf4bb2SPeter Wemm 57753bf4bb2SPeter Wemm if (lkp->lk_sharecount) 57853bf4bb2SPeter Wemm printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 57953bf4bb2SPeter Wemm lkp->lk_sharecount); 58053bf4bb2SPeter Wemm else if (lkp->lk_flags & LK_HAVE_EXCL) 581c969c60cSAlexander Kabaev printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)", 582c969c60cSAlexander Kabaev lkp->lk_wmesg, lkp->lk_exclusivecount, 583c969c60cSAlexander Kabaev lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid); 58453bf4bb2SPeter Wemm if (lkp->lk_waitcount > 0) 58553bf4bb2SPeter Wemm printf(" with %d pending", lkp->lk_waitcount); 5867499fd8dSJeff Roberson #ifdef DEBUG_LOCKS 5877499fd8dSJeff Roberson stack_print(&lkp->lk_stack); 5887499fd8dSJeff Roberson #endif 58953bf4bb2SPeter Wemm } 590be6847d7SJohn Baldwin 591be6847d7SJohn Baldwin #ifdef DDB 592be6847d7SJohn Baldwin DB_SHOW_COMMAND(lockmgr, db_show_lockmgr) 593be6847d7SJohn Baldwin { 594be6847d7SJohn Baldwin struct thread *td; 595be6847d7SJohn Baldwin struct lock *lkp; 596be6847d7SJohn Baldwin 597be6847d7SJohn Baldwin if (!have_addr) 598be6847d7SJohn Baldwin return; 599be6847d7SJohn Baldwin lkp = (struct lock *)addr; 600be6847d7SJohn Baldwin 601be6847d7SJohn Baldwin db_printf("lock type: %s\n", lkp->lk_wmesg); 602be6847d7SJohn Baldwin db_printf("state: "); 603be6847d7SJohn Baldwin if (lkp->lk_sharecount) 604be6847d7SJohn Baldwin db_printf("SHARED (count %d)\n", lkp->lk_sharecount); 605be6847d7SJohn Baldwin else if (lkp->lk_flags & LK_HAVE_EXCL) { 606be6847d7SJohn Baldwin td = lkp->lk_lockholder; 607be6847d7SJohn Baldwin db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td); 608be6847d7SJohn Baldwin db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid, 609be6847d7SJohn Baldwin td->td_proc->p_pid, td->td_proc->p_comm); 610be6847d7SJohn Baldwin } else 611be6847d7SJohn Baldwin db_printf("UNLOCKED\n"); 612be6847d7SJohn Baldwin if (lkp->lk_waitcount > 0) 613be6847d7SJohn Baldwin db_printf("waiters: %d\n", lkp->lk_waitcount); 614be6847d7SJohn Baldwin } 615be6847d7SJohn Baldwin #endif 616