19454b2d8SWarner Losh /*- 253bf4bb2SPeter Wemm * Copyright (c) 1995 353bf4bb2SPeter Wemm * The Regents of the University of California. All rights reserved. 453bf4bb2SPeter Wemm * 503e9c6c1SJohn Dyson * Copyright (C) 1997 603e9c6c1SJohn Dyson * John S. Dyson. All rights reserved. 703e9c6c1SJohn Dyson * 853bf4bb2SPeter Wemm * This code contains ideas from software contributed to Berkeley by 953bf4bb2SPeter Wemm * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 1053bf4bb2SPeter Wemm * System project at Carnegie-Mellon University. 1153bf4bb2SPeter Wemm * 1253bf4bb2SPeter Wemm * Redistribution and use in source and binary forms, with or without 1353bf4bb2SPeter Wemm * modification, are permitted provided that the following conditions 1453bf4bb2SPeter Wemm * are met: 1553bf4bb2SPeter Wemm * 1. Redistributions of source code must retain the above copyright 1653bf4bb2SPeter Wemm * notice, this list of conditions and the following disclaimer. 1753bf4bb2SPeter Wemm * 2. Redistributions in binary form must reproduce the above copyright 1853bf4bb2SPeter Wemm * notice, this list of conditions and the following disclaimer in the 1953bf4bb2SPeter Wemm * documentation and/or other materials provided with the distribution. 2053bf4bb2SPeter Wemm * 3. All advertising materials mentioning features or use of this software 2153bf4bb2SPeter Wemm * must display the following acknowledgement: 2253bf4bb2SPeter Wemm * This product includes software developed by the University of 2353bf4bb2SPeter Wemm * California, Berkeley and its contributors. 2453bf4bb2SPeter Wemm * 4. Neither the name of the University nor the names of its contributors 2553bf4bb2SPeter Wemm * may be used to endorse or promote products derived from this software 2653bf4bb2SPeter Wemm * without specific prior written permission. 2753bf4bb2SPeter Wemm * 2853bf4bb2SPeter Wemm * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2953bf4bb2SPeter Wemm * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 3053bf4bb2SPeter Wemm * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 3153bf4bb2SPeter Wemm * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 3253bf4bb2SPeter Wemm * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 3353bf4bb2SPeter Wemm * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 3453bf4bb2SPeter Wemm * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 3553bf4bb2SPeter Wemm * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3653bf4bb2SPeter Wemm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3753bf4bb2SPeter Wemm * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3853bf4bb2SPeter Wemm * SUCH DAMAGE. 3953bf4bb2SPeter Wemm * 4053bf4bb2SPeter Wemm * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 4153bf4bb2SPeter Wemm */ 4253bf4bb2SPeter Wemm 43677b542eSDavid E. O'Brien #include <sys/cdefs.h> 44677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 45677b542eSDavid E. O'Brien 4653bf4bb2SPeter Wemm #include <sys/param.h> 479722d88fSJason Evans #include <sys/kernel.h> 4861d80e90SJohn Baldwin #include <sys/ktr.h> 4953bf4bb2SPeter Wemm #include <sys/lock.h> 508302d183SBruce Evans #include <sys/lockmgr.h> 51d8881ca3SJohn Baldwin #include <sys/mutex.h> 528302d183SBruce Evans #include <sys/proc.h> 534bdb9b11SPeter Wemm #include <sys/systm.h> 54e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS 55e8ddb61dSJeff Roberson #include <sys/stack.h> 56e8ddb61dSJeff Roberson #include <sys/sysctl.h> 57e8ddb61dSJeff Roberson #endif 5853bf4bb2SPeter Wemm 5953bf4bb2SPeter Wemm /* 6053bf4bb2SPeter Wemm * Locking primitives implementation. 6153bf4bb2SPeter Wemm * Locks provide shared/exclusive sychronization. 6253bf4bb2SPeter Wemm */ 6353bf4bb2SPeter Wemm 64f158df07SJeff Roberson #define COUNT(td, x) if ((td)) (td)->td_locks += (x) 6599448ed1SJohn Dyson #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ 6699448ed1SJohn Dyson LK_SHARE_NONZERO | LK_WAIT_NONZERO) 6799448ed1SJohn Dyson 68c6964d3bSKirk McKusick static int acquire(struct lock **lkpp, int extflags, int wanted); 6999448ed1SJohn Dyson static int acquiredrain(struct lock *lkp, int extflags) ; 7003e9c6c1SJohn Dyson 71a96ab770SJeff Roberson static __inline void 72f158df07SJeff Roberson sharelock(struct thread *td, struct lock *lkp, int incr) { 7303e9c6c1SJohn Dyson lkp->lk_flags |= LK_SHARE_NONZERO; 7403e9c6c1SJohn Dyson lkp->lk_sharecount += incr; 75f158df07SJeff Roberson COUNT(td, incr); 7603e9c6c1SJohn Dyson } 7703e9c6c1SJohn Dyson 78a96ab770SJeff Roberson static __inline void 79f158df07SJeff Roberson shareunlock(struct thread *td, struct lock *lkp, int decr) { 80219cbf59SEivind Eklund 815526d2d9SEivind Eklund KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr")); 8203e9c6c1SJohn Dyson 83f158df07SJeff Roberson COUNT(td, -decr); 849b2e5badSJohn Dyson if (lkp->lk_sharecount == decr) { 8503e9c6c1SJohn Dyson lkp->lk_flags &= ~LK_SHARE_NONZERO; 869b2e5badSJohn Dyson if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { 879b2e5badSJohn Dyson wakeup(lkp); 889b2e5badSJohn Dyson } 899b2e5badSJohn Dyson lkp->lk_sharecount = 0; 909b2e5badSJohn Dyson } else { 919b2e5badSJohn Dyson lkp->lk_sharecount -= decr; 929b2e5badSJohn Dyson } 9303e9c6c1SJohn Dyson } 9403e9c6c1SJohn Dyson 9503e9c6c1SJohn Dyson static int 9641bd6c15SJeff Roberson acquire(struct lock **lkpp, int extflags, int wanted) 9741bd6c15SJeff Roberson { 98c6964d3bSKirk McKusick struct lock *lkp = *lkpp; 9920728d8fSJeff Roberson int error; 100c06394f5SJohn Baldwin CTR3(KTR_LOCK, 101ff381670SRobert Watson "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x", 102a18b1f1dSJason Evans lkp, extflags, wanted); 103a18b1f1dSJason Evans 10420728d8fSJeff Roberson if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) 10503e9c6c1SJohn Dyson return EBUSY; 10620728d8fSJeff Roberson error = 0; 10703e9c6c1SJohn Dyson while ((lkp->lk_flags & wanted) != 0) { 10820728d8fSJeff Roberson CTR2(KTR_LOCK, 10920728d8fSJeff Roberson "acquire(): lkp == %p, lk_flags == 0x%x sleeping", 11020728d8fSJeff Roberson lkp, lkp->lk_flags); 11103e9c6c1SJohn Dyson lkp->lk_flags |= LK_WAIT_NONZERO; 11203e9c6c1SJohn Dyson lkp->lk_waitcount++; 11396fde7daSJake Burkholder error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio, 11423b59018SMatthew Dillon lkp->lk_wmesg, 11523b59018SMatthew Dillon ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 1169b2e5badSJohn Dyson lkp->lk_waitcount--; 11720728d8fSJeff Roberson if (lkp->lk_waitcount == 0) 11820728d8fSJeff Roberson lkp->lk_flags &= ~LK_WAIT_NONZERO; 11920728d8fSJeff Roberson if (error) 12020728d8fSJeff Roberson break; 12103e9c6c1SJohn Dyson if (extflags & LK_SLEEPFAIL) { 12220728d8fSJeff Roberson error = ENOLCK; 12320728d8fSJeff Roberson break; 12403e9c6c1SJohn Dyson } 125c6964d3bSKirk McKusick if (lkp->lk_newlock != NULL) { 126c6964d3bSKirk McKusick mtx_lock(lkp->lk_newlock->lk_interlock); 127c6964d3bSKirk McKusick mtx_unlock(lkp->lk_interlock); 128c6964d3bSKirk McKusick if (lkp->lk_waitcount == 0) 129c6964d3bSKirk McKusick wakeup((void *)(&lkp->lk_newlock)); 130c6964d3bSKirk McKusick *lkpp = lkp = lkp->lk_newlock; 131c6964d3bSKirk McKusick } 13203e9c6c1SJohn Dyson } 13320728d8fSJeff Roberson mtx_assert(lkp->lk_interlock, MA_OWNED); 13420728d8fSJeff Roberson return (error); 13503e9c6c1SJohn Dyson } 13603e9c6c1SJohn Dyson 13753bf4bb2SPeter Wemm /* 13853bf4bb2SPeter Wemm * Set, change, or release a lock. 13953bf4bb2SPeter Wemm * 14053bf4bb2SPeter Wemm * Shared requests increment the shared count. Exclusive requests set the 14153bf4bb2SPeter Wemm * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 14253bf4bb2SPeter Wemm * accepted shared locks and shared-to-exclusive upgrades to go away. 14353bf4bb2SPeter Wemm */ 14453bf4bb2SPeter Wemm int 145b40ce416SJulian Elischer lockmgr(lkp, flags, interlkp, td) 146248fcb66SSteve Passe struct lock *lkp; 14753bf4bb2SPeter Wemm u_int flags; 148a18b1f1dSJason Evans struct mtx *interlkp; 149b40ce416SJulian Elischer struct thread *td; 15053bf4bb2SPeter Wemm { 15153bf4bb2SPeter Wemm int error; 152822ded67SJulian Elischer struct thread *thr; 153635962afSJohn Baldwin int extflags, lockflags; 15453bf4bb2SPeter Wemm 15553bf4bb2SPeter Wemm error = 0; 156b40ce416SJulian Elischer if (td == NULL) 157822ded67SJulian Elischer thr = LK_KERNPROC; 158891e0f24SJohn Dyson else 159822ded67SJulian Elischer thr = td; 16003e9c6c1SJohn Dyson 16117661e5aSJeff Roberson if ((flags & LK_INTERNAL) == 0) 1629ed346baSBosko Milekic mtx_lock(lkp->lk_interlock); 16341bd6c15SJeff Roberson CTR6(KTR_LOCK, 16441bd6c15SJeff Roberson "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, " 16541bd6c15SJeff Roberson "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder, 16641bd6c15SJeff Roberson lkp->lk_exclusivecount, flags, td); 167e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS 168e8ddb61dSJeff Roberson { 169e8ddb61dSJeff Roberson struct stack stack; /* XXX */ 170e8ddb61dSJeff Roberson stack_save(&stack); 171e8ddb61dSJeff Roberson CTRSTACK(KTR_LOCK, &stack, 1); 172e8ddb61dSJeff Roberson } 17341bd6c15SJeff Roberson #endif 17441bd6c15SJeff Roberson 17598689e1eSAlfred Perlstein if (flags & LK_INTERLOCK) { 1766157b69fSAlfred Perlstein mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED); 1779ed346baSBosko Milekic mtx_unlock(interlkp); 17898689e1eSAlfred Perlstein } 17903e9c6c1SJohn Dyson 18017661e5aSJeff Roberson if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0) 18126306795SJohn Baldwin WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 18226306795SJohn Baldwin &lkp->lk_interlock->mtx_object, 18326306795SJohn Baldwin "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg); 18417661e5aSJeff Roberson 1853f085c22SJohn Baldwin if (panicstr != NULL) { 1863f085c22SJohn Baldwin mtx_unlock(lkp->lk_interlock); 1873f085c22SJohn Baldwin return (0); 1883f085c22SJohn Baldwin } 189c4c0ec5bSJeff Roberson if ((lkp->lk_flags & LK_NOSHARE) && 190c4c0ec5bSJeff Roberson (flags & LK_TYPE_MASK) == LK_SHARED) { 191c4c0ec5bSJeff Roberson flags &= ~LK_TYPE_MASK; 192c4c0ec5bSJeff Roberson flags |= LK_EXCLUSIVE; 193c4c0ec5bSJeff Roberson } 19453bf4bb2SPeter Wemm extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 19553bf4bb2SPeter Wemm 19653bf4bb2SPeter Wemm switch (flags & LK_TYPE_MASK) { 19753bf4bb2SPeter Wemm 19853bf4bb2SPeter Wemm case LK_SHARED: 199beef8a36SJulian Elischer /* 200beef8a36SJulian Elischer * If we are not the exclusive lock holder, we have to block 201beef8a36SJulian Elischer * while there is an exclusive lock holder or while an 202beef8a36SJulian Elischer * exclusive lock request or upgrade request is in progress. 203beef8a36SJulian Elischer * 204fa2a4d05STim J. Robbins * However, if TDP_DEADLKTREAT is set, we override exclusive 205beef8a36SJulian Elischer * lock requests or upgrade requests ( but not the exclusive 206beef8a36SJulian Elischer * lock itself ). 207beef8a36SJulian Elischer */ 208822ded67SJulian Elischer if (lkp->lk_lockholder != thr) { 209635962afSJohn Baldwin lockflags = LK_HAVE_EXCL; 210fa2a4d05STim J. Robbins if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT)) 211bce98419SJohn Baldwin lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE; 212c6964d3bSKirk McKusick error = acquire(&lkp, extflags, lockflags); 21353bf4bb2SPeter Wemm if (error) 21453bf4bb2SPeter Wemm break; 215f158df07SJeff Roberson sharelock(td, lkp, 1); 2167181624aSJeff Roberson #if defined(DEBUG_LOCKS) 217e8ddb61dSJeff Roberson stack_save(&lkp->stack); 2187181624aSJeff Roberson #endif 21953bf4bb2SPeter Wemm break; 22053bf4bb2SPeter Wemm } 22153bf4bb2SPeter Wemm /* 22253bf4bb2SPeter Wemm * We hold an exclusive lock, so downgrade it to shared. 22353bf4bb2SPeter Wemm * An alternative would be to fail with EDEADLK. 22453bf4bb2SPeter Wemm */ 225f158df07SJeff Roberson sharelock(td, lkp, 1); 22693b0017fSPhilippe Charnier /* FALLTHROUGH downgrade */ 22753bf4bb2SPeter Wemm 22853bf4bb2SPeter Wemm case LK_DOWNGRADE: 229822ded67SJulian Elischer KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0, 2301375ed7eSAlfred Perlstein ("lockmgr: not holding exclusive lock " 231822ded67SJulian Elischer "(owner thread (%p) != thread (%p), exlcnt (%d) != 0", 232822ded67SJulian Elischer lkp->lk_lockholder, thr, lkp->lk_exclusivecount)); 233f158df07SJeff Roberson sharelock(td, lkp, lkp->lk_exclusivecount); 234f158df07SJeff Roberson COUNT(td, -lkp->lk_exclusivecount); 23553bf4bb2SPeter Wemm lkp->lk_exclusivecount = 0; 23653bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_HAVE_EXCL; 2376f8132a8SJulian Elischer lkp->lk_lockholder = LK_NOPROC; 23853bf4bb2SPeter Wemm if (lkp->lk_waitcount) 23953bf4bb2SPeter Wemm wakeup((void *)lkp); 24053bf4bb2SPeter Wemm break; 24153bf4bb2SPeter Wemm 24253bf4bb2SPeter Wemm case LK_EXCLUPGRADE: 24353bf4bb2SPeter Wemm /* 24453bf4bb2SPeter Wemm * If another process is ahead of us to get an upgrade, 24553bf4bb2SPeter Wemm * then we want to fail rather than have an intervening 24653bf4bb2SPeter Wemm * exclusive access. 24753bf4bb2SPeter Wemm */ 24853bf4bb2SPeter Wemm if (lkp->lk_flags & LK_WANT_UPGRADE) { 249f158df07SJeff Roberson shareunlock(td, lkp, 1); 25053bf4bb2SPeter Wemm error = EBUSY; 25153bf4bb2SPeter Wemm break; 25253bf4bb2SPeter Wemm } 25393b0017fSPhilippe Charnier /* FALLTHROUGH normal upgrade */ 25453bf4bb2SPeter Wemm 25553bf4bb2SPeter Wemm case LK_UPGRADE: 25653bf4bb2SPeter Wemm /* 25753bf4bb2SPeter Wemm * Upgrade a shared lock to an exclusive one. If another 25853bf4bb2SPeter Wemm * shared lock has already requested an upgrade to an 25953bf4bb2SPeter Wemm * exclusive lock, our shared lock is released and an 26053bf4bb2SPeter Wemm * exclusive lock is requested (which will be granted 26153bf4bb2SPeter Wemm * after the upgrade). If we return an error, the file 26253bf4bb2SPeter Wemm * will always be unlocked. 26353bf4bb2SPeter Wemm */ 264436901a8SJeff Roberson if (lkp->lk_lockholder == thr) 26553bf4bb2SPeter Wemm panic("lockmgr: upgrade exclusive lock"); 266436901a8SJeff Roberson if (lkp->lk_sharecount <= 0) 267436901a8SJeff Roberson panic("lockmgr: upgrade without shared"); 268f158df07SJeff Roberson shareunlock(td, lkp, 1); 26953bf4bb2SPeter Wemm /* 27053bf4bb2SPeter Wemm * If we are just polling, check to see if we will block. 27153bf4bb2SPeter Wemm */ 27253bf4bb2SPeter Wemm if ((extflags & LK_NOWAIT) && 27353bf4bb2SPeter Wemm ((lkp->lk_flags & LK_WANT_UPGRADE) || 27453bf4bb2SPeter Wemm lkp->lk_sharecount > 1)) { 27553bf4bb2SPeter Wemm error = EBUSY; 27653bf4bb2SPeter Wemm break; 27753bf4bb2SPeter Wemm } 27853bf4bb2SPeter Wemm if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 27953bf4bb2SPeter Wemm /* 28053bf4bb2SPeter Wemm * We are first shared lock to request an upgrade, so 28153bf4bb2SPeter Wemm * request upgrade and wait for the shared count to 28253bf4bb2SPeter Wemm * drop to zero, then take exclusive lock. 28353bf4bb2SPeter Wemm */ 28453bf4bb2SPeter Wemm lkp->lk_flags |= LK_WANT_UPGRADE; 285c6964d3bSKirk McKusick error = acquire(&lkp, extflags, LK_SHARE_NONZERO); 28653bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_WANT_UPGRADE; 2879b2e5badSJohn Dyson 2884cef6d5aSAlexander Kabaev if (error) { 2894cef6d5aSAlexander Kabaev if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO)) 2904cef6d5aSAlexander Kabaev wakeup((void *)lkp); 29153bf4bb2SPeter Wemm break; 2924cef6d5aSAlexander Kabaev } 29353bf4bb2SPeter Wemm if (lkp->lk_exclusivecount != 0) 29453bf4bb2SPeter Wemm panic("lockmgr: non-zero exclusive count"); 295d8b8e875SPaul Saab lkp->lk_flags |= LK_HAVE_EXCL; 296d8b8e875SPaul Saab lkp->lk_lockholder = thr; 29753bf4bb2SPeter Wemm lkp->lk_exclusivecount = 1; 298f158df07SJeff Roberson COUNT(td, 1); 29915a1057cSEivind Eklund #if defined(DEBUG_LOCKS) 300e8ddb61dSJeff Roberson stack_save(&lkp->stack); 30115a1057cSEivind Eklund #endif 30253bf4bb2SPeter Wemm break; 30353bf4bb2SPeter Wemm } 30453bf4bb2SPeter Wemm /* 30553bf4bb2SPeter Wemm * Someone else has requested upgrade. Release our shared 30653bf4bb2SPeter Wemm * lock, awaken upgrade requestor if we are the last shared 30753bf4bb2SPeter Wemm * lock, then request an exclusive lock. 30853bf4bb2SPeter Wemm */ 30903e9c6c1SJohn Dyson if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == 31003e9c6c1SJohn Dyson LK_WAIT_NONZERO) 31153bf4bb2SPeter Wemm wakeup((void *)lkp); 31293b0017fSPhilippe Charnier /* FALLTHROUGH exclusive request */ 31353bf4bb2SPeter Wemm 31453bf4bb2SPeter Wemm case LK_EXCLUSIVE: 315822ded67SJulian Elischer if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) { 31653bf4bb2SPeter Wemm /* 31753bf4bb2SPeter Wemm * Recursive lock. 31853bf4bb2SPeter Wemm */ 31933638e93SKirk McKusick if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) 32053bf4bb2SPeter Wemm panic("lockmgr: locking against myself"); 32133638e93SKirk McKusick if ((extflags & LK_CANRECURSE) != 0) { 32253bf4bb2SPeter Wemm lkp->lk_exclusivecount++; 323f158df07SJeff Roberson COUNT(td, 1); 32453bf4bb2SPeter Wemm break; 32553bf4bb2SPeter Wemm } 32633638e93SKirk McKusick } 32753bf4bb2SPeter Wemm /* 32853bf4bb2SPeter Wemm * If we are just polling, check to see if we will sleep. 32953bf4bb2SPeter Wemm */ 33003e9c6c1SJohn Dyson if ((extflags & LK_NOWAIT) && 33103e9c6c1SJohn Dyson (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { 33253bf4bb2SPeter Wemm error = EBUSY; 33353bf4bb2SPeter Wemm break; 33453bf4bb2SPeter Wemm } 33553bf4bb2SPeter Wemm /* 33653bf4bb2SPeter Wemm * Try to acquire the want_exclusive flag. 33753bf4bb2SPeter Wemm */ 338cffdaf2dSAlexander Kabaev error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL)); 33953bf4bb2SPeter Wemm if (error) 34053bf4bb2SPeter Wemm break; 34153bf4bb2SPeter Wemm lkp->lk_flags |= LK_WANT_EXCL; 34253bf4bb2SPeter Wemm /* 34353bf4bb2SPeter Wemm * Wait for shared locks and upgrades to finish. 34453bf4bb2SPeter Wemm */ 3454cef6d5aSAlexander Kabaev error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO); 34653bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_WANT_EXCL; 3474cef6d5aSAlexander Kabaev if (error) { 3484cef6d5aSAlexander Kabaev if (lkp->lk_flags & LK_WAIT_NONZERO) 3494cef6d5aSAlexander Kabaev wakeup((void *)lkp); 35053bf4bb2SPeter Wemm break; 3514cef6d5aSAlexander Kabaev } 35253bf4bb2SPeter Wemm lkp->lk_flags |= LK_HAVE_EXCL; 353822ded67SJulian Elischer lkp->lk_lockholder = thr; 35453bf4bb2SPeter Wemm if (lkp->lk_exclusivecount != 0) 35553bf4bb2SPeter Wemm panic("lockmgr: non-zero exclusive count"); 35653bf4bb2SPeter Wemm lkp->lk_exclusivecount = 1; 357f158df07SJeff Roberson COUNT(td, 1); 35815a1057cSEivind Eklund #if defined(DEBUG_LOCKS) 359e8ddb61dSJeff Roberson stack_save(&lkp->stack); 36015a1057cSEivind Eklund #endif 36153bf4bb2SPeter Wemm break; 36253bf4bb2SPeter Wemm 36353bf4bb2SPeter Wemm case LK_RELEASE: 36453bf4bb2SPeter Wemm if (lkp->lk_exclusivecount != 0) { 365822ded67SJulian Elischer if (lkp->lk_lockholder != thr && 366e701df7dSMatthew Dillon lkp->lk_lockholder != LK_KERNPROC) { 367822ded67SJulian Elischer panic("lockmgr: thread %p, not %s %p unlocking", 368822ded67SJulian Elischer thr, "exclusive lock holder", 36953bf4bb2SPeter Wemm lkp->lk_lockholder); 370e701df7dSMatthew Dillon } 371f158df07SJeff Roberson if (lkp->lk_lockholder != LK_KERNPROC) 372f158df07SJeff Roberson COUNT(td, -1); 3739b2e5badSJohn Dyson if (lkp->lk_exclusivecount == 1) { 37453bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_HAVE_EXCL; 37553bf4bb2SPeter Wemm lkp->lk_lockholder = LK_NOPROC; 3769b2e5badSJohn Dyson lkp->lk_exclusivecount = 0; 3779b2e5badSJohn Dyson } else { 3789b2e5badSJohn Dyson lkp->lk_exclusivecount--; 37953bf4bb2SPeter Wemm } 3801b367556SJason Evans } else if (lkp->lk_flags & LK_SHARE_NONZERO) 381f158df07SJeff Roberson shareunlock(td, lkp, 1); 38203e9c6c1SJohn Dyson if (lkp->lk_flags & LK_WAIT_NONZERO) 38353bf4bb2SPeter Wemm wakeup((void *)lkp); 38453bf4bb2SPeter Wemm break; 38553bf4bb2SPeter Wemm 38653bf4bb2SPeter Wemm case LK_DRAIN: 38753bf4bb2SPeter Wemm /* 38853bf4bb2SPeter Wemm * Check that we do not already hold the lock, as it can 38953bf4bb2SPeter Wemm * never drain if we do. Unfortunately, we have no way to 39053bf4bb2SPeter Wemm * check for holding a shared lock, but at least we can 39153bf4bb2SPeter Wemm * check for an exclusive one. 39253bf4bb2SPeter Wemm */ 393822ded67SJulian Elischer if (lkp->lk_lockholder == thr) 39453bf4bb2SPeter Wemm panic("lockmgr: draining against myself"); 39503e9c6c1SJohn Dyson 39603e9c6c1SJohn Dyson error = acquiredrain(lkp, extflags); 39703e9c6c1SJohn Dyson if (error) 39853bf4bb2SPeter Wemm break; 39953bf4bb2SPeter Wemm lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 400822ded67SJulian Elischer lkp->lk_lockholder = thr; 40153bf4bb2SPeter Wemm lkp->lk_exclusivecount = 1; 402f158df07SJeff Roberson COUNT(td, 1); 40315a1057cSEivind Eklund #if defined(DEBUG_LOCKS) 404e8ddb61dSJeff Roberson stack_save(&lkp->stack); 40515a1057cSEivind Eklund #endif 40653bf4bb2SPeter Wemm break; 40753bf4bb2SPeter Wemm 40853bf4bb2SPeter Wemm default: 4099ed346baSBosko Milekic mtx_unlock(lkp->lk_interlock); 41053bf4bb2SPeter Wemm panic("lockmgr: unknown locktype request %d", 41153bf4bb2SPeter Wemm flags & LK_TYPE_MASK); 41253bf4bb2SPeter Wemm /* NOTREACHED */ 41353bf4bb2SPeter Wemm } 41403e9c6c1SJohn Dyson if ((lkp->lk_flags & LK_WAITDRAIN) && 41503e9c6c1SJohn Dyson (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 41603e9c6c1SJohn Dyson LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) { 41753bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_WAITDRAIN; 41853bf4bb2SPeter Wemm wakeup((void *)&lkp->lk_flags); 41953bf4bb2SPeter Wemm } 4209ed346baSBosko Milekic mtx_unlock(lkp->lk_interlock); 42153bf4bb2SPeter Wemm return (error); 42253bf4bb2SPeter Wemm } 42353bf4bb2SPeter Wemm 42499448ed1SJohn Dyson static int 42599448ed1SJohn Dyson acquiredrain(struct lock *lkp, int extflags) { 42699448ed1SJohn Dyson int error; 42799448ed1SJohn Dyson 42899448ed1SJohn Dyson if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) { 42999448ed1SJohn Dyson return EBUSY; 43099448ed1SJohn Dyson } 43199448ed1SJohn Dyson while (lkp->lk_flags & LK_ALL) { 43299448ed1SJohn Dyson lkp->lk_flags |= LK_WAITDRAIN; 43396fde7daSJake Burkholder error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio, 43423b59018SMatthew Dillon lkp->lk_wmesg, 43523b59018SMatthew Dillon ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 43699448ed1SJohn Dyson if (error) 43799448ed1SJohn Dyson return error; 43899448ed1SJohn Dyson if (extflags & LK_SLEEPFAIL) { 43999448ed1SJohn Dyson return ENOLCK; 44099448ed1SJohn Dyson } 44199448ed1SJohn Dyson } 44299448ed1SJohn Dyson return 0; 44399448ed1SJohn Dyson } 44499448ed1SJohn Dyson 44599448ed1SJohn Dyson /* 446c6964d3bSKirk McKusick * Transfer any waiting processes from one lock to another. 447c6964d3bSKirk McKusick */ 448c6964d3bSKirk McKusick void 449c6964d3bSKirk McKusick transferlockers(from, to) 450c6964d3bSKirk McKusick struct lock *from; 451c6964d3bSKirk McKusick struct lock *to; 452c6964d3bSKirk McKusick { 453c6964d3bSKirk McKusick 454c6964d3bSKirk McKusick KASSERT(from != to, ("lock transfer to self")); 455c6964d3bSKirk McKusick KASSERT((from->lk_flags&LK_WAITDRAIN) == 0, ("transfer draining lock")); 456f5f0da0aSJeff Roberson 457f5f0da0aSJeff Roberson mtx_lock(from->lk_interlock); 458f5f0da0aSJeff Roberson if (from->lk_waitcount == 0) { 459f5f0da0aSJeff Roberson mtx_unlock(from->lk_interlock); 460c6964d3bSKirk McKusick return; 461f5f0da0aSJeff Roberson } 462c6964d3bSKirk McKusick from->lk_newlock = to; 463c6964d3bSKirk McKusick wakeup((void *)from); 464f5f0da0aSJeff Roberson msleep(&from->lk_newlock, from->lk_interlock, from->lk_prio, 465f5f0da0aSJeff Roberson "lkxfer", 0); 466c6964d3bSKirk McKusick from->lk_newlock = NULL; 467c6964d3bSKirk McKusick from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE); 468c6964d3bSKirk McKusick KASSERT(from->lk_waitcount == 0, ("active lock")); 469f5f0da0aSJeff Roberson mtx_unlock(from->lk_interlock); 470c6964d3bSKirk McKusick } 471c6964d3bSKirk McKusick 472c6964d3bSKirk McKusick 473c6964d3bSKirk McKusick /* 47499448ed1SJohn Dyson * Initialize a lock; required before use. 47599448ed1SJohn Dyson */ 47699448ed1SJohn Dyson void 47799448ed1SJohn Dyson lockinit(lkp, prio, wmesg, timo, flags) 47899448ed1SJohn Dyson struct lock *lkp; 47999448ed1SJohn Dyson int prio; 48004858e7eSEivind Eklund const char *wmesg; 48199448ed1SJohn Dyson int timo; 48299448ed1SJohn Dyson int flags; 48399448ed1SJohn Dyson { 484c06394f5SJohn Baldwin CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", " 485a18b1f1dSJason Evans "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags); 48699448ed1SJohn Dyson 487857d9c60SDon Lewis lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder); 4889722d88fSJason Evans lkp->lk_flags = (flags & LK_EXTFLG_MASK); 48999448ed1SJohn Dyson lkp->lk_sharecount = 0; 49099448ed1SJohn Dyson lkp->lk_waitcount = 0; 49199448ed1SJohn Dyson lkp->lk_exclusivecount = 0; 49299448ed1SJohn Dyson lkp->lk_prio = prio; 49399448ed1SJohn Dyson lkp->lk_wmesg = wmesg; 49499448ed1SJohn Dyson lkp->lk_timo = timo; 49599448ed1SJohn Dyson lkp->lk_lockholder = LK_NOPROC; 496c6964d3bSKirk McKusick lkp->lk_newlock = NULL; 4973a096f6cSKirk McKusick #ifdef DEBUG_LOCKS 498e8ddb61dSJeff Roberson stack_zero(&lkp->stack); 4993a096f6cSKirk McKusick #endif 50099448ed1SJohn Dyson } 50199448ed1SJohn Dyson 50299448ed1SJohn Dyson /* 503a18b1f1dSJason Evans * Destroy a lock. 504a18b1f1dSJason Evans */ 505a18b1f1dSJason Evans void 506a18b1f1dSJason Evans lockdestroy(lkp) 507a18b1f1dSJason Evans struct lock *lkp; 508a18b1f1dSJason Evans { 509c06394f5SJohn Baldwin CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")", 510a18b1f1dSJason Evans lkp, lkp->lk_wmesg); 511a18b1f1dSJason Evans } 512a18b1f1dSJason Evans 513a18b1f1dSJason Evans /* 51499448ed1SJohn Dyson * Determine the status of a lock. 51599448ed1SJohn Dyson */ 51699448ed1SJohn Dyson int 517b40ce416SJulian Elischer lockstatus(lkp, td) 51899448ed1SJohn Dyson struct lock *lkp; 519b40ce416SJulian Elischer struct thread *td; 52099448ed1SJohn Dyson { 52199448ed1SJohn Dyson int lock_type = 0; 52299448ed1SJohn Dyson 5239ed346baSBosko Milekic mtx_lock(lkp->lk_interlock); 5246bdfe06aSEivind Eklund if (lkp->lk_exclusivecount != 0) { 525822ded67SJulian Elischer if (td == NULL || lkp->lk_lockholder == td) 52699448ed1SJohn Dyson lock_type = LK_EXCLUSIVE; 5276bdfe06aSEivind Eklund else 5286bdfe06aSEivind Eklund lock_type = LK_EXCLOTHER; 5296bdfe06aSEivind Eklund } else if (lkp->lk_sharecount != 0) 53099448ed1SJohn Dyson lock_type = LK_SHARED; 5319ed346baSBosko Milekic mtx_unlock(lkp->lk_interlock); 53299448ed1SJohn Dyson return (lock_type); 53399448ed1SJohn Dyson } 53499448ed1SJohn Dyson 53553bf4bb2SPeter Wemm /* 53667812eacSKirk McKusick * Determine the number of holders of a lock. 53767812eacSKirk McKusick */ 53867812eacSKirk McKusick int 53967812eacSKirk McKusick lockcount(lkp) 54067812eacSKirk McKusick struct lock *lkp; 54167812eacSKirk McKusick { 54267812eacSKirk McKusick int count; 54367812eacSKirk McKusick 5449ed346baSBosko Milekic mtx_lock(lkp->lk_interlock); 54567812eacSKirk McKusick count = lkp->lk_exclusivecount + lkp->lk_sharecount; 5469ed346baSBosko Milekic mtx_unlock(lkp->lk_interlock); 54767812eacSKirk McKusick return (count); 54867812eacSKirk McKusick } 54967812eacSKirk McKusick 55067812eacSKirk McKusick /* 55153bf4bb2SPeter Wemm * Print out information about state of a lock. Used by VOP_PRINT 5520e61ac7bSPoul-Henning Kamp * routines to display status about contained locks. 55353bf4bb2SPeter Wemm */ 554a1ce9d5cSPeter Wemm void 55553bf4bb2SPeter Wemm lockmgr_printinfo(lkp) 55653bf4bb2SPeter Wemm struct lock *lkp; 55753bf4bb2SPeter Wemm { 55853bf4bb2SPeter Wemm 55953bf4bb2SPeter Wemm if (lkp->lk_sharecount) 56053bf4bb2SPeter Wemm printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 56153bf4bb2SPeter Wemm lkp->lk_sharecount); 56253bf4bb2SPeter Wemm else if (lkp->lk_flags & LK_HAVE_EXCL) 563c969c60cSAlexander Kabaev printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)", 564c969c60cSAlexander Kabaev lkp->lk_wmesg, lkp->lk_exclusivecount, 565c969c60cSAlexander Kabaev lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid); 56653bf4bb2SPeter Wemm if (lkp->lk_waitcount > 0) 56753bf4bb2SPeter Wemm printf(" with %d pending", lkp->lk_waitcount); 56853bf4bb2SPeter Wemm } 569