19454b2d8SWarner Losh /*- 253bf4bb2SPeter Wemm * Copyright (c) 1995 353bf4bb2SPeter Wemm * The Regents of the University of California. All rights reserved. 453bf4bb2SPeter Wemm * 503e9c6c1SJohn Dyson * Copyright (C) 1997 603e9c6c1SJohn Dyson * John S. Dyson. All rights reserved. 703e9c6c1SJohn Dyson * 853bf4bb2SPeter Wemm * This code contains ideas from software contributed to Berkeley by 953bf4bb2SPeter Wemm * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 1053bf4bb2SPeter Wemm * System project at Carnegie-Mellon University. 1153bf4bb2SPeter Wemm * 1253bf4bb2SPeter Wemm * Redistribution and use in source and binary forms, with or without 1353bf4bb2SPeter Wemm * modification, are permitted provided that the following conditions 1453bf4bb2SPeter Wemm * are met: 1553bf4bb2SPeter Wemm * 1. Redistributions of source code must retain the above copyright 1653bf4bb2SPeter Wemm * notice, this list of conditions and the following disclaimer. 1753bf4bb2SPeter Wemm * 2. Redistributions in binary form must reproduce the above copyright 1853bf4bb2SPeter Wemm * notice, this list of conditions and the following disclaimer in the 1953bf4bb2SPeter Wemm * documentation and/or other materials provided with the distribution. 2053bf4bb2SPeter Wemm * 3. All advertising materials mentioning features or use of this software 2153bf4bb2SPeter Wemm * must display the following acknowledgement: 2253bf4bb2SPeter Wemm * This product includes software developed by the University of 2353bf4bb2SPeter Wemm * California, Berkeley and its contributors. 2453bf4bb2SPeter Wemm * 4. Neither the name of the University nor the names of its contributors 2553bf4bb2SPeter Wemm * may be used to endorse or promote products derived from this software 2653bf4bb2SPeter Wemm * without specific prior written permission. 2753bf4bb2SPeter Wemm * 2853bf4bb2SPeter Wemm * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2953bf4bb2SPeter Wemm * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 3053bf4bb2SPeter Wemm * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 3153bf4bb2SPeter Wemm * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 3253bf4bb2SPeter Wemm * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 3353bf4bb2SPeter Wemm * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 3453bf4bb2SPeter Wemm * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 3553bf4bb2SPeter Wemm * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3653bf4bb2SPeter Wemm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3753bf4bb2SPeter Wemm * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3853bf4bb2SPeter Wemm * SUCH DAMAGE. 3953bf4bb2SPeter Wemm * 4053bf4bb2SPeter Wemm * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 4153bf4bb2SPeter Wemm */ 4253bf4bb2SPeter Wemm 43677b542eSDavid E. O'Brien #include <sys/cdefs.h> 44677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 45677b542eSDavid E. O'Brien 46be6847d7SJohn Baldwin #include "opt_ddb.h" 477c0435b9SKip Macy #include "opt_global.h" 48be6847d7SJohn Baldwin 4953bf4bb2SPeter Wemm #include <sys/param.h> 50c30bf5c3SRobert Watson #include <sys/kdb.h> 519722d88fSJason Evans #include <sys/kernel.h> 5261d80e90SJohn Baldwin #include <sys/ktr.h> 5353bf4bb2SPeter Wemm #include <sys/lock.h> 548302d183SBruce Evans #include <sys/lockmgr.h> 55d8881ca3SJohn Baldwin #include <sys/mutex.h> 568302d183SBruce Evans #include <sys/proc.h> 574bdb9b11SPeter Wemm #include <sys/systm.h> 587c0435b9SKip Macy #include <sys/lock_profile.h> 59e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS 60e8ddb61dSJeff Roberson #include <sys/stack.h> 61e8ddb61dSJeff Roberson #endif 6253bf4bb2SPeter Wemm 63f9721b43SAttilio Rao static void assert_lockmgr(struct lock_object *lock, int what); 64be6847d7SJohn Baldwin #ifdef DDB 65be6847d7SJohn Baldwin #include <ddb/ddb.h> 6661bd5e21SKip Macy static void db_show_lockmgr(struct lock_object *lock); 67be6847d7SJohn Baldwin #endif 686e21afd4SJohn Baldwin static void lock_lockmgr(struct lock_object *lock, int how); 696e21afd4SJohn Baldwin static int unlock_lockmgr(struct lock_object *lock); 7061bd5e21SKip Macy 7161bd5e21SKip Macy struct lock_class lock_class_lockmgr = { 723ff6d229SJohn Baldwin .lc_name = "lockmgr", 733ff6d229SJohn Baldwin .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, 74f9721b43SAttilio Rao .lc_assert = assert_lockmgr, 7561bd5e21SKip Macy #ifdef DDB 766e21afd4SJohn Baldwin .lc_ddb_show = db_show_lockmgr, 7761bd5e21SKip Macy #endif 786e21afd4SJohn Baldwin .lc_lock = lock_lockmgr, 796e21afd4SJohn Baldwin .lc_unlock = unlock_lockmgr, 8061bd5e21SKip Macy }; 8161bd5e21SKip Macy 8253bf4bb2SPeter Wemm /* 8353bf4bb2SPeter Wemm * Locking primitives implementation. 8453bf4bb2SPeter Wemm * Locks provide shared/exclusive sychronization. 8553bf4bb2SPeter Wemm */ 8653bf4bb2SPeter Wemm 876e21afd4SJohn Baldwin void 88f9721b43SAttilio Rao assert_lockmgr(struct lock_object *lock, int what) 89f9721b43SAttilio Rao { 90f9721b43SAttilio Rao 91f9721b43SAttilio Rao panic("lockmgr locks do not support assertions"); 92f9721b43SAttilio Rao } 93f9721b43SAttilio Rao 94f9721b43SAttilio Rao void 956e21afd4SJohn Baldwin lock_lockmgr(struct lock_object *lock, int how) 966e21afd4SJohn Baldwin { 976e21afd4SJohn Baldwin 986e21afd4SJohn Baldwin panic("lockmgr locks do not support sleep interlocking"); 996e21afd4SJohn Baldwin } 1006e21afd4SJohn Baldwin 1016e21afd4SJohn Baldwin int 1026e21afd4SJohn Baldwin unlock_lockmgr(struct lock_object *lock) 1036e21afd4SJohn Baldwin { 1046e21afd4SJohn Baldwin 1056e21afd4SJohn Baldwin panic("lockmgr locks do not support sleep interlocking"); 1066e21afd4SJohn Baldwin } 1076e21afd4SJohn Baldwin 108f158df07SJeff Roberson #define COUNT(td, x) if ((td)) (td)->td_locks += (x) 10999448ed1SJohn Dyson #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ 11099448ed1SJohn Dyson LK_SHARE_NONZERO | LK_WAIT_NONZERO) 11199448ed1SJohn Dyson 112fe68a916SKip Macy static int acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime); 11399448ed1SJohn Dyson static int acquiredrain(struct lock *lkp, int extflags) ; 11403e9c6c1SJohn Dyson 115a96ab770SJeff Roberson static __inline void 116f158df07SJeff Roberson sharelock(struct thread *td, struct lock *lkp, int incr) { 11703e9c6c1SJohn Dyson lkp->lk_flags |= LK_SHARE_NONZERO; 11803e9c6c1SJohn Dyson lkp->lk_sharecount += incr; 119f158df07SJeff Roberson COUNT(td, incr); 12003e9c6c1SJohn Dyson } 12103e9c6c1SJohn Dyson 122a96ab770SJeff Roberson static __inline void 123f158df07SJeff Roberson shareunlock(struct thread *td, struct lock *lkp, int decr) { 124219cbf59SEivind Eklund 1255526d2d9SEivind Eklund KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr")); 12603e9c6c1SJohn Dyson 127f158df07SJeff Roberson COUNT(td, -decr); 1289b2e5badSJohn Dyson if (lkp->lk_sharecount == decr) { 12903e9c6c1SJohn Dyson lkp->lk_flags &= ~LK_SHARE_NONZERO; 1309b2e5badSJohn Dyson if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { 1319b2e5badSJohn Dyson wakeup(lkp); 1329b2e5badSJohn Dyson } 1339b2e5badSJohn Dyson lkp->lk_sharecount = 0; 1349b2e5badSJohn Dyson } else { 1359b2e5badSJohn Dyson lkp->lk_sharecount -= decr; 1369b2e5badSJohn Dyson } 13703e9c6c1SJohn Dyson } 13803e9c6c1SJohn Dyson 13903e9c6c1SJohn Dyson static int 140fe68a916SKip Macy acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime) 14141bd6c15SJeff Roberson { 142c6964d3bSKirk McKusick struct lock *lkp = *lkpp; 14320728d8fSJeff Roberson int error; 144c06394f5SJohn Baldwin CTR3(KTR_LOCK, 145ff381670SRobert Watson "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x", 146a18b1f1dSJason Evans lkp, extflags, wanted); 147a18b1f1dSJason Evans 14820728d8fSJeff Roberson if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) 14903e9c6c1SJohn Dyson return EBUSY; 15020728d8fSJeff Roberson error = 0; 151fe68a916SKip Macy if ((lkp->lk_flags & wanted) != 0) 152fe68a916SKip Macy lock_profile_obtain_lock_failed(&lkp->lk_object, contested, waittime); 153fe68a916SKip Macy 15403e9c6c1SJohn Dyson while ((lkp->lk_flags & wanted) != 0) { 15520728d8fSJeff Roberson CTR2(KTR_LOCK, 15620728d8fSJeff Roberson "acquire(): lkp == %p, lk_flags == 0x%x sleeping", 15720728d8fSJeff Roberson lkp, lkp->lk_flags); 15803e9c6c1SJohn Dyson lkp->lk_flags |= LK_WAIT_NONZERO; 15903e9c6c1SJohn Dyson lkp->lk_waitcount++; 16096fde7daSJake Burkholder error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio, 16123b59018SMatthew Dillon lkp->lk_wmesg, 16223b59018SMatthew Dillon ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 1639b2e5badSJohn Dyson lkp->lk_waitcount--; 16420728d8fSJeff Roberson if (lkp->lk_waitcount == 0) 16520728d8fSJeff Roberson lkp->lk_flags &= ~LK_WAIT_NONZERO; 16620728d8fSJeff Roberson if (error) 16720728d8fSJeff Roberson break; 16803e9c6c1SJohn Dyson if (extflags & LK_SLEEPFAIL) { 16920728d8fSJeff Roberson error = ENOLCK; 17020728d8fSJeff Roberson break; 17103e9c6c1SJohn Dyson } 172c6964d3bSKirk McKusick if (lkp->lk_newlock != NULL) { 173c6964d3bSKirk McKusick mtx_lock(lkp->lk_newlock->lk_interlock); 174c6964d3bSKirk McKusick mtx_unlock(lkp->lk_interlock); 175c6964d3bSKirk McKusick if (lkp->lk_waitcount == 0) 176c6964d3bSKirk McKusick wakeup((void *)(&lkp->lk_newlock)); 177c6964d3bSKirk McKusick *lkpp = lkp = lkp->lk_newlock; 178c6964d3bSKirk McKusick } 17903e9c6c1SJohn Dyson } 18020728d8fSJeff Roberson mtx_assert(lkp->lk_interlock, MA_OWNED); 18120728d8fSJeff Roberson return (error); 18203e9c6c1SJohn Dyson } 18303e9c6c1SJohn Dyson 18453bf4bb2SPeter Wemm /* 18553bf4bb2SPeter Wemm * Set, change, or release a lock. 18653bf4bb2SPeter Wemm * 18753bf4bb2SPeter Wemm * Shared requests increment the shared count. Exclusive requests set the 18853bf4bb2SPeter Wemm * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 18953bf4bb2SPeter Wemm * accepted shared locks and shared-to-exclusive upgrades to go away. 19053bf4bb2SPeter Wemm */ 19153bf4bb2SPeter Wemm int 192ab2dab16SJohn Baldwin _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp, 1937c0435b9SKip Macy struct thread *td, char *file, int line) 1947c0435b9SKip Macy 19553bf4bb2SPeter Wemm { 19653bf4bb2SPeter Wemm int error; 197822ded67SJulian Elischer struct thread *thr; 198635962afSJohn Baldwin int extflags, lockflags; 199fe68a916SKip Macy int contested = 0; 200fe68a916SKip Macy uint64_t waitstart = 0; 20153bf4bb2SPeter Wemm 20253bf4bb2SPeter Wemm error = 0; 203b40ce416SJulian Elischer if (td == NULL) 204822ded67SJulian Elischer thr = LK_KERNPROC; 205891e0f24SJohn Dyson else 206822ded67SJulian Elischer thr = td; 20703e9c6c1SJohn Dyson 20817661e5aSJeff Roberson if ((flags & LK_INTERNAL) == 0) 2099ed346baSBosko Milekic mtx_lock(lkp->lk_interlock); 21041bd6c15SJeff Roberson CTR6(KTR_LOCK, 21141bd6c15SJeff Roberson "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, " 21241bd6c15SJeff Roberson "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder, 21341bd6c15SJeff Roberson lkp->lk_exclusivecount, flags, td); 214e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS 215e8ddb61dSJeff Roberson { 216e8ddb61dSJeff Roberson struct stack stack; /* XXX */ 217e8ddb61dSJeff Roberson stack_save(&stack); 218e37a4994SPawel Jakub Dawidek CTRSTACK(KTR_LOCK, &stack, 0, 1); 219e8ddb61dSJeff Roberson } 22041bd6c15SJeff Roberson #endif 22141bd6c15SJeff Roberson 22298689e1eSAlfred Perlstein if (flags & LK_INTERLOCK) { 2236157b69fSAlfred Perlstein mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED); 2249ed346baSBosko Milekic mtx_unlock(interlkp); 22598689e1eSAlfred Perlstein } 22603e9c6c1SJohn Dyson 22717661e5aSJeff Roberson if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0) 22826306795SJohn Baldwin WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 229aa89d8cdSJohn Baldwin &lkp->lk_interlock->lock_object, 23026306795SJohn Baldwin "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg); 23117661e5aSJeff Roberson 2323f085c22SJohn Baldwin if (panicstr != NULL) { 2333f085c22SJohn Baldwin mtx_unlock(lkp->lk_interlock); 2343f085c22SJohn Baldwin return (0); 2353f085c22SJohn Baldwin } 236c4c0ec5bSJeff Roberson if ((lkp->lk_flags & LK_NOSHARE) && 237c4c0ec5bSJeff Roberson (flags & LK_TYPE_MASK) == LK_SHARED) { 238c4c0ec5bSJeff Roberson flags &= ~LK_TYPE_MASK; 239c4c0ec5bSJeff Roberson flags |= LK_EXCLUSIVE; 240c4c0ec5bSJeff Roberson } 24153bf4bb2SPeter Wemm extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 24253bf4bb2SPeter Wemm 24353bf4bb2SPeter Wemm switch (flags & LK_TYPE_MASK) { 24453bf4bb2SPeter Wemm 24553bf4bb2SPeter Wemm case LK_SHARED: 246beef8a36SJulian Elischer /* 247beef8a36SJulian Elischer * If we are not the exclusive lock holder, we have to block 248beef8a36SJulian Elischer * while there is an exclusive lock holder or while an 249beef8a36SJulian Elischer * exclusive lock request or upgrade request is in progress. 250beef8a36SJulian Elischer * 251fa2a4d05STim J. Robbins * However, if TDP_DEADLKTREAT is set, we override exclusive 252beef8a36SJulian Elischer * lock requests or upgrade requests ( but not the exclusive 253beef8a36SJulian Elischer * lock itself ). 254beef8a36SJulian Elischer */ 255822ded67SJulian Elischer if (lkp->lk_lockholder != thr) { 256635962afSJohn Baldwin lockflags = LK_HAVE_EXCL; 257fa2a4d05STim J. Robbins if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT)) 258bce98419SJohn Baldwin lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE; 259fe68a916SKip Macy error = acquire(&lkp, extflags, lockflags, &contested, &waitstart); 26053bf4bb2SPeter Wemm if (error) 26153bf4bb2SPeter Wemm break; 262f158df07SJeff Roberson sharelock(td, lkp, 1); 2637c0435b9SKip Macy if (lkp->lk_sharecount == 1) 264fe68a916SKip Macy lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); 2657c0435b9SKip Macy 2667181624aSJeff Roberson #if defined(DEBUG_LOCKS) 2677499fd8dSJeff Roberson stack_save(&lkp->lk_stack); 2687181624aSJeff Roberson #endif 26953bf4bb2SPeter Wemm break; 27053bf4bb2SPeter Wemm } 27153bf4bb2SPeter Wemm /* 27253bf4bb2SPeter Wemm * We hold an exclusive lock, so downgrade it to shared. 27353bf4bb2SPeter Wemm * An alternative would be to fail with EDEADLK. 27453bf4bb2SPeter Wemm */ 275f158df07SJeff Roberson sharelock(td, lkp, 1); 2767c0435b9SKip Macy if (lkp->lk_sharecount == 1) 277fe68a916SKip Macy lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); 27893b0017fSPhilippe Charnier /* FALLTHROUGH downgrade */ 27953bf4bb2SPeter Wemm 28053bf4bb2SPeter Wemm case LK_DOWNGRADE: 281822ded67SJulian Elischer KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0, 2821375ed7eSAlfred Perlstein ("lockmgr: not holding exclusive lock " 283822ded67SJulian Elischer "(owner thread (%p) != thread (%p), exlcnt (%d) != 0", 284822ded67SJulian Elischer lkp->lk_lockholder, thr, lkp->lk_exclusivecount)); 285f158df07SJeff Roberson sharelock(td, lkp, lkp->lk_exclusivecount); 286f158df07SJeff Roberson COUNT(td, -lkp->lk_exclusivecount); 28753bf4bb2SPeter Wemm lkp->lk_exclusivecount = 0; 28853bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_HAVE_EXCL; 2896f8132a8SJulian Elischer lkp->lk_lockholder = LK_NOPROC; 29053bf4bb2SPeter Wemm if (lkp->lk_waitcount) 29153bf4bb2SPeter Wemm wakeup((void *)lkp); 29253bf4bb2SPeter Wemm break; 29353bf4bb2SPeter Wemm 29453bf4bb2SPeter Wemm case LK_EXCLUPGRADE: 29553bf4bb2SPeter Wemm /* 29653bf4bb2SPeter Wemm * If another process is ahead of us to get an upgrade, 29753bf4bb2SPeter Wemm * then we want to fail rather than have an intervening 29853bf4bb2SPeter Wemm * exclusive access. 29953bf4bb2SPeter Wemm */ 30053bf4bb2SPeter Wemm if (lkp->lk_flags & LK_WANT_UPGRADE) { 301f158df07SJeff Roberson shareunlock(td, lkp, 1); 30253bf4bb2SPeter Wemm error = EBUSY; 30353bf4bb2SPeter Wemm break; 30453bf4bb2SPeter Wemm } 30593b0017fSPhilippe Charnier /* FALLTHROUGH normal upgrade */ 30653bf4bb2SPeter Wemm 30753bf4bb2SPeter Wemm case LK_UPGRADE: 30853bf4bb2SPeter Wemm /* 30953bf4bb2SPeter Wemm * Upgrade a shared lock to an exclusive one. If another 31053bf4bb2SPeter Wemm * shared lock has already requested an upgrade to an 31153bf4bb2SPeter Wemm * exclusive lock, our shared lock is released and an 31253bf4bb2SPeter Wemm * exclusive lock is requested (which will be granted 31353bf4bb2SPeter Wemm * after the upgrade). If we return an error, the file 31453bf4bb2SPeter Wemm * will always be unlocked. 31553bf4bb2SPeter Wemm */ 316436901a8SJeff Roberson if (lkp->lk_lockholder == thr) 31753bf4bb2SPeter Wemm panic("lockmgr: upgrade exclusive lock"); 318436901a8SJeff Roberson if (lkp->lk_sharecount <= 0) 319436901a8SJeff Roberson panic("lockmgr: upgrade without shared"); 320f158df07SJeff Roberson shareunlock(td, lkp, 1); 3217c0435b9SKip Macy if (lkp->lk_sharecount == 0) 3227c0435b9SKip Macy lock_profile_release_lock(&lkp->lk_object); 32353bf4bb2SPeter Wemm /* 32453bf4bb2SPeter Wemm * If we are just polling, check to see if we will block. 32553bf4bb2SPeter Wemm */ 32653bf4bb2SPeter Wemm if ((extflags & LK_NOWAIT) && 32753bf4bb2SPeter Wemm ((lkp->lk_flags & LK_WANT_UPGRADE) || 32853bf4bb2SPeter Wemm lkp->lk_sharecount > 1)) { 32953bf4bb2SPeter Wemm error = EBUSY; 33053bf4bb2SPeter Wemm break; 33153bf4bb2SPeter Wemm } 33253bf4bb2SPeter Wemm if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 33353bf4bb2SPeter Wemm /* 33453bf4bb2SPeter Wemm * We are first shared lock to request an upgrade, so 33553bf4bb2SPeter Wemm * request upgrade and wait for the shared count to 33653bf4bb2SPeter Wemm * drop to zero, then take exclusive lock. 33753bf4bb2SPeter Wemm */ 33853bf4bb2SPeter Wemm lkp->lk_flags |= LK_WANT_UPGRADE; 339fe68a916SKip Macy error = acquire(&lkp, extflags, LK_SHARE_NONZERO, &contested, &waitstart); 34053bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_WANT_UPGRADE; 3419b2e5badSJohn Dyson 3424cef6d5aSAlexander Kabaev if (error) { 3434cef6d5aSAlexander Kabaev if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO)) 3444cef6d5aSAlexander Kabaev wakeup((void *)lkp); 34553bf4bb2SPeter Wemm break; 3464cef6d5aSAlexander Kabaev } 34753bf4bb2SPeter Wemm if (lkp->lk_exclusivecount != 0) 34853bf4bb2SPeter Wemm panic("lockmgr: non-zero exclusive count"); 349d8b8e875SPaul Saab lkp->lk_flags |= LK_HAVE_EXCL; 350d8b8e875SPaul Saab lkp->lk_lockholder = thr; 35153bf4bb2SPeter Wemm lkp->lk_exclusivecount = 1; 352f158df07SJeff Roberson COUNT(td, 1); 353fe68a916SKip Macy lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); 35415a1057cSEivind Eklund #if defined(DEBUG_LOCKS) 3557499fd8dSJeff Roberson stack_save(&lkp->lk_stack); 35615a1057cSEivind Eklund #endif 35753bf4bb2SPeter Wemm break; 35853bf4bb2SPeter Wemm } 35953bf4bb2SPeter Wemm /* 36053bf4bb2SPeter Wemm * Someone else has requested upgrade. Release our shared 36153bf4bb2SPeter Wemm * lock, awaken upgrade requestor if we are the last shared 36253bf4bb2SPeter Wemm * lock, then request an exclusive lock. 36353bf4bb2SPeter Wemm */ 36403e9c6c1SJohn Dyson if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == 36503e9c6c1SJohn Dyson LK_WAIT_NONZERO) 36653bf4bb2SPeter Wemm wakeup((void *)lkp); 36793b0017fSPhilippe Charnier /* FALLTHROUGH exclusive request */ 36853bf4bb2SPeter Wemm 36953bf4bb2SPeter Wemm case LK_EXCLUSIVE: 370822ded67SJulian Elischer if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) { 37153bf4bb2SPeter Wemm /* 37253bf4bb2SPeter Wemm * Recursive lock. 37353bf4bb2SPeter Wemm */ 37433638e93SKirk McKusick if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) 37553bf4bb2SPeter Wemm panic("lockmgr: locking against myself"); 37633638e93SKirk McKusick if ((extflags & LK_CANRECURSE) != 0) { 37753bf4bb2SPeter Wemm lkp->lk_exclusivecount++; 378f158df07SJeff Roberson COUNT(td, 1); 37953bf4bb2SPeter Wemm break; 38053bf4bb2SPeter Wemm } 38133638e93SKirk McKusick } 38253bf4bb2SPeter Wemm /* 38353bf4bb2SPeter Wemm * If we are just polling, check to see if we will sleep. 38453bf4bb2SPeter Wemm */ 38503e9c6c1SJohn Dyson if ((extflags & LK_NOWAIT) && 38603e9c6c1SJohn Dyson (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { 38753bf4bb2SPeter Wemm error = EBUSY; 38853bf4bb2SPeter Wemm break; 38953bf4bb2SPeter Wemm } 39053bf4bb2SPeter Wemm /* 39153bf4bb2SPeter Wemm * Try to acquire the want_exclusive flag. 39253bf4bb2SPeter Wemm */ 393fe68a916SKip Macy error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL), &contested, &waitstart); 39453bf4bb2SPeter Wemm if (error) 39553bf4bb2SPeter Wemm break; 39653bf4bb2SPeter Wemm lkp->lk_flags |= LK_WANT_EXCL; 39753bf4bb2SPeter Wemm /* 39853bf4bb2SPeter Wemm * Wait for shared locks and upgrades to finish. 39953bf4bb2SPeter Wemm */ 400fe68a916SKip Macy error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO, &contested, &waitstart); 40153bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_WANT_EXCL; 4024cef6d5aSAlexander Kabaev if (error) { 4034cef6d5aSAlexander Kabaev if (lkp->lk_flags & LK_WAIT_NONZERO) 4044cef6d5aSAlexander Kabaev wakeup((void *)lkp); 40553bf4bb2SPeter Wemm break; 4064cef6d5aSAlexander Kabaev } 40753bf4bb2SPeter Wemm lkp->lk_flags |= LK_HAVE_EXCL; 408822ded67SJulian Elischer lkp->lk_lockholder = thr; 40953bf4bb2SPeter Wemm if (lkp->lk_exclusivecount != 0) 41053bf4bb2SPeter Wemm panic("lockmgr: non-zero exclusive count"); 41153bf4bb2SPeter Wemm lkp->lk_exclusivecount = 1; 412f158df07SJeff Roberson COUNT(td, 1); 413fe68a916SKip Macy lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); 41415a1057cSEivind Eklund #if defined(DEBUG_LOCKS) 4157499fd8dSJeff Roberson stack_save(&lkp->lk_stack); 41615a1057cSEivind Eklund #endif 41753bf4bb2SPeter Wemm break; 41853bf4bb2SPeter Wemm 41953bf4bb2SPeter Wemm case LK_RELEASE: 42053bf4bb2SPeter Wemm if (lkp->lk_exclusivecount != 0) { 421822ded67SJulian Elischer if (lkp->lk_lockholder != thr && 422e701df7dSMatthew Dillon lkp->lk_lockholder != LK_KERNPROC) { 423822ded67SJulian Elischer panic("lockmgr: thread %p, not %s %p unlocking", 424822ded67SJulian Elischer thr, "exclusive lock holder", 42553bf4bb2SPeter Wemm lkp->lk_lockholder); 426e701df7dSMatthew Dillon } 427f158df07SJeff Roberson if (lkp->lk_lockholder != LK_KERNPROC) 428f158df07SJeff Roberson COUNT(td, -1); 4299b2e5badSJohn Dyson if (lkp->lk_exclusivecount == 1) { 43053bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_HAVE_EXCL; 43153bf4bb2SPeter Wemm lkp->lk_lockholder = LK_NOPROC; 4329b2e5badSJohn Dyson lkp->lk_exclusivecount = 0; 4337c0435b9SKip Macy lock_profile_release_lock(&lkp->lk_object); 4349b2e5badSJohn Dyson } else { 4359b2e5badSJohn Dyson lkp->lk_exclusivecount--; 43653bf4bb2SPeter Wemm } 4371b367556SJason Evans } else if (lkp->lk_flags & LK_SHARE_NONZERO) 438f158df07SJeff Roberson shareunlock(td, lkp, 1); 4391f71de49SSuleiman Souhlal else { 4401f71de49SSuleiman Souhlal printf("lockmgr: thread %p unlocking unheld lock\n", 4411f71de49SSuleiman Souhlal thr); 4421f71de49SSuleiman Souhlal kdb_backtrace(); 4431f71de49SSuleiman Souhlal } 4441f71de49SSuleiman Souhlal 44503e9c6c1SJohn Dyson if (lkp->lk_flags & LK_WAIT_NONZERO) 44653bf4bb2SPeter Wemm wakeup((void *)lkp); 44753bf4bb2SPeter Wemm break; 44853bf4bb2SPeter Wemm 44953bf4bb2SPeter Wemm case LK_DRAIN: 45053bf4bb2SPeter Wemm /* 45153bf4bb2SPeter Wemm * Check that we do not already hold the lock, as it can 45253bf4bb2SPeter Wemm * never drain if we do. Unfortunately, we have no way to 45353bf4bb2SPeter Wemm * check for holding a shared lock, but at least we can 45453bf4bb2SPeter Wemm * check for an exclusive one. 45553bf4bb2SPeter Wemm */ 456822ded67SJulian Elischer if (lkp->lk_lockholder == thr) 45753bf4bb2SPeter Wemm panic("lockmgr: draining against myself"); 45803e9c6c1SJohn Dyson 45903e9c6c1SJohn Dyson error = acquiredrain(lkp, extflags); 46003e9c6c1SJohn Dyson if (error) 46153bf4bb2SPeter Wemm break; 46253bf4bb2SPeter Wemm lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 463822ded67SJulian Elischer lkp->lk_lockholder = thr; 46453bf4bb2SPeter Wemm lkp->lk_exclusivecount = 1; 465f158df07SJeff Roberson COUNT(td, 1); 46615a1057cSEivind Eklund #if defined(DEBUG_LOCKS) 4677499fd8dSJeff Roberson stack_save(&lkp->lk_stack); 46815a1057cSEivind Eklund #endif 46953bf4bb2SPeter Wemm break; 47053bf4bb2SPeter Wemm 47153bf4bb2SPeter Wemm default: 4729ed346baSBosko Milekic mtx_unlock(lkp->lk_interlock); 47353bf4bb2SPeter Wemm panic("lockmgr: unknown locktype request %d", 47453bf4bb2SPeter Wemm flags & LK_TYPE_MASK); 47553bf4bb2SPeter Wemm /* NOTREACHED */ 47653bf4bb2SPeter Wemm } 47703e9c6c1SJohn Dyson if ((lkp->lk_flags & LK_WAITDRAIN) && 47803e9c6c1SJohn Dyson (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 47903e9c6c1SJohn Dyson LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) { 48053bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_WAITDRAIN; 48153bf4bb2SPeter Wemm wakeup((void *)&lkp->lk_flags); 48253bf4bb2SPeter Wemm } 4839ed346baSBosko Milekic mtx_unlock(lkp->lk_interlock); 48453bf4bb2SPeter Wemm return (error); 48553bf4bb2SPeter Wemm } 48653bf4bb2SPeter Wemm 48799448ed1SJohn Dyson static int 48899448ed1SJohn Dyson acquiredrain(struct lock *lkp, int extflags) { 48999448ed1SJohn Dyson int error; 49099448ed1SJohn Dyson 49199448ed1SJohn Dyson if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) { 49299448ed1SJohn Dyson return EBUSY; 49399448ed1SJohn Dyson } 49499448ed1SJohn Dyson while (lkp->lk_flags & LK_ALL) { 49599448ed1SJohn Dyson lkp->lk_flags |= LK_WAITDRAIN; 49696fde7daSJake Burkholder error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio, 49723b59018SMatthew Dillon lkp->lk_wmesg, 49823b59018SMatthew Dillon ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 49999448ed1SJohn Dyson if (error) 50099448ed1SJohn Dyson return error; 50199448ed1SJohn Dyson if (extflags & LK_SLEEPFAIL) { 50299448ed1SJohn Dyson return ENOLCK; 50399448ed1SJohn Dyson } 50499448ed1SJohn Dyson } 50599448ed1SJohn Dyson return 0; 50699448ed1SJohn Dyson } 50799448ed1SJohn Dyson 50899448ed1SJohn Dyson /* 509c6964d3bSKirk McKusick * Transfer any waiting processes from one lock to another. 510c6964d3bSKirk McKusick */ 511c6964d3bSKirk McKusick void 512c6964d3bSKirk McKusick transferlockers(from, to) 513c6964d3bSKirk McKusick struct lock *from; 514c6964d3bSKirk McKusick struct lock *to; 515c6964d3bSKirk McKusick { 516c6964d3bSKirk McKusick 517c6964d3bSKirk McKusick KASSERT(from != to, ("lock transfer to self")); 518c6964d3bSKirk McKusick KASSERT((from->lk_flags&LK_WAITDRAIN) == 0, ("transfer draining lock")); 519f5f0da0aSJeff Roberson 520f5f0da0aSJeff Roberson mtx_lock(from->lk_interlock); 521f5f0da0aSJeff Roberson if (from->lk_waitcount == 0) { 522f5f0da0aSJeff Roberson mtx_unlock(from->lk_interlock); 523c6964d3bSKirk McKusick return; 524f5f0da0aSJeff Roberson } 525c6964d3bSKirk McKusick from->lk_newlock = to; 526c6964d3bSKirk McKusick wakeup((void *)from); 527f5f0da0aSJeff Roberson msleep(&from->lk_newlock, from->lk_interlock, from->lk_prio, 528f5f0da0aSJeff Roberson "lkxfer", 0); 529c6964d3bSKirk McKusick from->lk_newlock = NULL; 530c6964d3bSKirk McKusick from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE); 531c6964d3bSKirk McKusick KASSERT(from->lk_waitcount == 0, ("active lock")); 532f5f0da0aSJeff Roberson mtx_unlock(from->lk_interlock); 533c6964d3bSKirk McKusick } 534c6964d3bSKirk McKusick 535c6964d3bSKirk McKusick 536c6964d3bSKirk McKusick /* 53799448ed1SJohn Dyson * Initialize a lock; required before use. 53899448ed1SJohn Dyson */ 53999448ed1SJohn Dyson void 54099448ed1SJohn Dyson lockinit(lkp, prio, wmesg, timo, flags) 54199448ed1SJohn Dyson struct lock *lkp; 54299448ed1SJohn Dyson int prio; 54304858e7eSEivind Eklund const char *wmesg; 54499448ed1SJohn Dyson int timo; 54599448ed1SJohn Dyson int flags; 54699448ed1SJohn Dyson { 547c06394f5SJohn Baldwin CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", " 548a18b1f1dSJason Evans "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags); 54999448ed1SJohn Dyson 550857d9c60SDon Lewis lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder); 5519722d88fSJason Evans lkp->lk_flags = (flags & LK_EXTFLG_MASK); 55299448ed1SJohn Dyson lkp->lk_sharecount = 0; 55399448ed1SJohn Dyson lkp->lk_waitcount = 0; 55499448ed1SJohn Dyson lkp->lk_exclusivecount = 0; 55599448ed1SJohn Dyson lkp->lk_prio = prio; 55699448ed1SJohn Dyson lkp->lk_timo = timo; 55799448ed1SJohn Dyson lkp->lk_lockholder = LK_NOPROC; 558c6964d3bSKirk McKusick lkp->lk_newlock = NULL; 5593a096f6cSKirk McKusick #ifdef DEBUG_LOCKS 5607499fd8dSJeff Roberson stack_zero(&lkp->lk_stack); 5613a096f6cSKirk McKusick #endif 562ab2dab16SJohn Baldwin lock_init(&lkp->lk_object, &lock_class_lockmgr, wmesg, NULL, 563ab2dab16SJohn Baldwin LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE); 56499448ed1SJohn Dyson } 56599448ed1SJohn Dyson 56699448ed1SJohn Dyson /* 567a18b1f1dSJason Evans * Destroy a lock. 568a18b1f1dSJason Evans */ 569a18b1f1dSJason Evans void 570a18b1f1dSJason Evans lockdestroy(lkp) 571a18b1f1dSJason Evans struct lock *lkp; 572a18b1f1dSJason Evans { 573c91fcee7SJohn Baldwin 574c06394f5SJohn Baldwin CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")", 575a18b1f1dSJason Evans lkp, lkp->lk_wmesg); 576ab2dab16SJohn Baldwin lock_destroy(&lkp->lk_object); 577a18b1f1dSJason Evans } 578a18b1f1dSJason Evans 579a18b1f1dSJason Evans /* 58099448ed1SJohn Dyson * Determine the status of a lock. 58199448ed1SJohn Dyson */ 58299448ed1SJohn Dyson int 583b40ce416SJulian Elischer lockstatus(lkp, td) 58499448ed1SJohn Dyson struct lock *lkp; 585b40ce416SJulian Elischer struct thread *td; 58699448ed1SJohn Dyson { 58799448ed1SJohn Dyson int lock_type = 0; 5882b59d50cSRobert Watson int interlocked; 58999448ed1SJohn Dyson 5902b59d50cSRobert Watson if (!kdb_active) { 5912b59d50cSRobert Watson interlocked = 1; 5929ed346baSBosko Milekic mtx_lock(lkp->lk_interlock); 5932b59d50cSRobert Watson } else 5942b59d50cSRobert Watson interlocked = 0; 5956bdfe06aSEivind Eklund if (lkp->lk_exclusivecount != 0) { 596822ded67SJulian Elischer if (td == NULL || lkp->lk_lockholder == td) 59799448ed1SJohn Dyson lock_type = LK_EXCLUSIVE; 5986bdfe06aSEivind Eklund else 5996bdfe06aSEivind Eklund lock_type = LK_EXCLOTHER; 6006bdfe06aSEivind Eklund } else if (lkp->lk_sharecount != 0) 60199448ed1SJohn Dyson lock_type = LK_SHARED; 6022b59d50cSRobert Watson if (interlocked) 6039ed346baSBosko Milekic mtx_unlock(lkp->lk_interlock); 60499448ed1SJohn Dyson return (lock_type); 60599448ed1SJohn Dyson } 60699448ed1SJohn Dyson 60753bf4bb2SPeter Wemm /* 60867812eacSKirk McKusick * Determine the number of holders of a lock. 60967812eacSKirk McKusick */ 61067812eacSKirk McKusick int 61167812eacSKirk McKusick lockcount(lkp) 61267812eacSKirk McKusick struct lock *lkp; 61367812eacSKirk McKusick { 61467812eacSKirk McKusick int count; 61567812eacSKirk McKusick 6169ed346baSBosko Milekic mtx_lock(lkp->lk_interlock); 61767812eacSKirk McKusick count = lkp->lk_exclusivecount + lkp->lk_sharecount; 6189ed346baSBosko Milekic mtx_unlock(lkp->lk_interlock); 61967812eacSKirk McKusick return (count); 62067812eacSKirk McKusick } 62167812eacSKirk McKusick 62267812eacSKirk McKusick /* 62304aa807cSTor Egge * Determine the number of waiters on a lock. 62404aa807cSTor Egge */ 62504aa807cSTor Egge int 62604aa807cSTor Egge lockwaiters(lkp) 62704aa807cSTor Egge struct lock *lkp; 62804aa807cSTor Egge { 62904aa807cSTor Egge int count; 63004aa807cSTor Egge 63104aa807cSTor Egge mtx_lock(lkp->lk_interlock); 63204aa807cSTor Egge count = lkp->lk_waitcount; 63304aa807cSTor Egge mtx_unlock(lkp->lk_interlock); 63404aa807cSTor Egge return (count); 63504aa807cSTor Egge } 63604aa807cSTor Egge 63704aa807cSTor Egge /* 63853bf4bb2SPeter Wemm * Print out information about state of a lock. Used by VOP_PRINT 6390e61ac7bSPoul-Henning Kamp * routines to display status about contained locks. 64053bf4bb2SPeter Wemm */ 641a1ce9d5cSPeter Wemm void 64253bf4bb2SPeter Wemm lockmgr_printinfo(lkp) 64353bf4bb2SPeter Wemm struct lock *lkp; 64453bf4bb2SPeter Wemm { 64553bf4bb2SPeter Wemm 64653bf4bb2SPeter Wemm if (lkp->lk_sharecount) 64753bf4bb2SPeter Wemm printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 64853bf4bb2SPeter Wemm lkp->lk_sharecount); 64953bf4bb2SPeter Wemm else if (lkp->lk_flags & LK_HAVE_EXCL) 650c969c60cSAlexander Kabaev printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)", 651c969c60cSAlexander Kabaev lkp->lk_wmesg, lkp->lk_exclusivecount, 652c969c60cSAlexander Kabaev lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid); 65353bf4bb2SPeter Wemm if (lkp->lk_waitcount > 0) 65453bf4bb2SPeter Wemm printf(" with %d pending", lkp->lk_waitcount); 6557499fd8dSJeff Roberson #ifdef DEBUG_LOCKS 6567499fd8dSJeff Roberson stack_print(&lkp->lk_stack); 6577499fd8dSJeff Roberson #endif 65853bf4bb2SPeter Wemm } 659be6847d7SJohn Baldwin 660be6847d7SJohn Baldwin #ifdef DDB 661462a7addSJohn Baldwin /* 662462a7addSJohn Baldwin * Check to see if a thread that is blocked on a sleep queue is actually 663462a7addSJohn Baldwin * blocked on a 'struct lock'. If so, output some details and return true. 664462a7addSJohn Baldwin * If the lock has an exclusive owner, return that in *ownerp. 665462a7addSJohn Baldwin */ 666462a7addSJohn Baldwin int 667462a7addSJohn Baldwin lockmgr_chain(struct thread *td, struct thread **ownerp) 668462a7addSJohn Baldwin { 669462a7addSJohn Baldwin struct lock *lkp; 670462a7addSJohn Baldwin 671462a7addSJohn Baldwin lkp = td->td_wchan; 672462a7addSJohn Baldwin 673462a7addSJohn Baldwin /* Simple test to see if wchan points to a lockmgr lock. */ 674ab2dab16SJohn Baldwin if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr && 675ab2dab16SJohn Baldwin lkp->lk_wmesg == td->td_wmesg) 6766d257b6eSJohn Baldwin goto ok; 6776d257b6eSJohn Baldwin 6786d257b6eSJohn Baldwin /* 6796d257b6eSJohn Baldwin * If this thread is doing a DRAIN, then it would be asleep on 6806d257b6eSJohn Baldwin * &lkp->lk_flags rather than lkp. 6816d257b6eSJohn Baldwin */ 6826d257b6eSJohn Baldwin lkp = (struct lock *)((char *)td->td_wchan - 6836d257b6eSJohn Baldwin offsetof(struct lock, lk_flags)); 684ab2dab16SJohn Baldwin if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr && 685ab2dab16SJohn Baldwin lkp->lk_wmesg == td->td_wmesg && (lkp->lk_flags & LK_WAITDRAIN)) 6866d257b6eSJohn Baldwin goto ok; 6876d257b6eSJohn Baldwin 6886d257b6eSJohn Baldwin /* Doen't seem to be a lockmgr lock. */ 689462a7addSJohn Baldwin return (0); 690462a7addSJohn Baldwin 6916d257b6eSJohn Baldwin ok: 692462a7addSJohn Baldwin /* Ok, we think we have a lockmgr lock, so output some details. */ 693462a7addSJohn Baldwin db_printf("blocked on lk \"%s\" ", lkp->lk_wmesg); 694462a7addSJohn Baldwin if (lkp->lk_sharecount) { 695462a7addSJohn Baldwin db_printf("SHARED (count %d)\n", lkp->lk_sharecount); 696462a7addSJohn Baldwin *ownerp = NULL; 697462a7addSJohn Baldwin } else { 698462a7addSJohn Baldwin db_printf("EXCL (count %d)\n", lkp->lk_exclusivecount); 699462a7addSJohn Baldwin *ownerp = lkp->lk_lockholder; 700462a7addSJohn Baldwin } 701462a7addSJohn Baldwin return (1); 702462a7addSJohn Baldwin } 703462a7addSJohn Baldwin 70461bd5e21SKip Macy void 70561bd5e21SKip Macy db_show_lockmgr(struct lock_object *lock) 706be6847d7SJohn Baldwin { 707be6847d7SJohn Baldwin struct thread *td; 708be6847d7SJohn Baldwin struct lock *lkp; 709be6847d7SJohn Baldwin 71061bd5e21SKip Macy lkp = (struct lock *)lock; 711be6847d7SJohn Baldwin 712be6847d7SJohn Baldwin db_printf(" lock type: %s\n", lkp->lk_wmesg); 713be6847d7SJohn Baldwin db_printf(" state: "); 714be6847d7SJohn Baldwin if (lkp->lk_sharecount) 715be6847d7SJohn Baldwin db_printf("SHARED (count %d)\n", lkp->lk_sharecount); 716be6847d7SJohn Baldwin else if (lkp->lk_flags & LK_HAVE_EXCL) { 717be6847d7SJohn Baldwin td = lkp->lk_lockholder; 718be6847d7SJohn Baldwin db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td); 719be6847d7SJohn Baldwin db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid, 720431f8906SJulian Elischer td->td_proc->p_pid, td->td_name); 721be6847d7SJohn Baldwin } else 722be6847d7SJohn Baldwin db_printf("UNLOCKED\n"); 723be6847d7SJohn Baldwin if (lkp->lk_waitcount > 0) 724be6847d7SJohn Baldwin db_printf(" waiters: %d\n", lkp->lk_waitcount); 725be6847d7SJohn Baldwin } 726be6847d7SJohn Baldwin #endif 727