153bf4bb2SPeter Wemm /* 253bf4bb2SPeter Wemm * Copyright (c) 1995 353bf4bb2SPeter Wemm * The Regents of the University of California. All rights reserved. 453bf4bb2SPeter Wemm * 503e9c6c1SJohn Dyson * Copyright (C) 1997 603e9c6c1SJohn Dyson * John S. Dyson. All rights reserved. 703e9c6c1SJohn Dyson * 853bf4bb2SPeter Wemm * This code contains ideas from software contributed to Berkeley by 953bf4bb2SPeter Wemm * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 1053bf4bb2SPeter Wemm * System project at Carnegie-Mellon University. 1153bf4bb2SPeter Wemm * 1253bf4bb2SPeter Wemm * Redistribution and use in source and binary forms, with or without 1353bf4bb2SPeter Wemm * modification, are permitted provided that the following conditions 1453bf4bb2SPeter Wemm * are met: 1553bf4bb2SPeter Wemm * 1. Redistributions of source code must retain the above copyright 1653bf4bb2SPeter Wemm * notice, this list of conditions and the following disclaimer. 1753bf4bb2SPeter Wemm * 2. Redistributions in binary form must reproduce the above copyright 1853bf4bb2SPeter Wemm * notice, this list of conditions and the following disclaimer in the 1953bf4bb2SPeter Wemm * documentation and/or other materials provided with the distribution. 2053bf4bb2SPeter Wemm * 3. All advertising materials mentioning features or use of this software 2153bf4bb2SPeter Wemm * must display the following acknowledgement: 2253bf4bb2SPeter Wemm * This product includes software developed by the University of 2353bf4bb2SPeter Wemm * California, Berkeley and its contributors. 2453bf4bb2SPeter Wemm * 4. Neither the name of the University nor the names of its contributors 2553bf4bb2SPeter Wemm * may be used to endorse or promote products derived from this software 2653bf4bb2SPeter Wemm * without specific prior written permission. 2753bf4bb2SPeter Wemm * 2853bf4bb2SPeter Wemm * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2953bf4bb2SPeter Wemm * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 3053bf4bb2SPeter Wemm * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 3153bf4bb2SPeter Wemm * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 3253bf4bb2SPeter Wemm * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 3353bf4bb2SPeter Wemm * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 3453bf4bb2SPeter Wemm * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 3553bf4bb2SPeter Wemm * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3653bf4bb2SPeter Wemm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3753bf4bb2SPeter Wemm * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3853bf4bb2SPeter Wemm * SUCH DAMAGE. 3953bf4bb2SPeter Wemm * 4053bf4bb2SPeter Wemm * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 4199448ed1SJohn Dyson * $Id: kern_lock.c,v 1.11 1997/08/22 07:16:46 phk Exp $ 4253bf4bb2SPeter Wemm */ 4353bf4bb2SPeter Wemm 4453bf4bb2SPeter Wemm #include <sys/param.h> 4553bf4bb2SPeter Wemm #include <sys/proc.h> 4653bf4bb2SPeter Wemm #include <sys/lock.h> 474bdb9b11SPeter Wemm #include <sys/systm.h> 4853bf4bb2SPeter Wemm 497cbfd031SSteve Passe #ifdef SMP 507cbfd031SSteve Passe #include <machine/smp.h> 517cbfd031SSteve Passe #endif 527cbfd031SSteve Passe 5353bf4bb2SPeter Wemm /* 5453bf4bb2SPeter Wemm * Locking primitives implementation. 5553bf4bb2SPeter Wemm * Locks provide shared/exclusive sychronization. 5653bf4bb2SPeter Wemm */ 5753bf4bb2SPeter Wemm 58356b94e0SPeter Wemm #ifdef SIMPLELOCK_DEBUG 5953bf4bb2SPeter Wemm #define COUNT(p, x) if (p) (p)->p_locks += (x) 6053bf4bb2SPeter Wemm #else 6153bf4bb2SPeter Wemm #define COUNT(p, x) 6253bf4bb2SPeter Wemm #endif 6353bf4bb2SPeter Wemm 6403e9c6c1SJohn Dyson #define LOCK_WAIT_TIME 100 6503e9c6c1SJohn Dyson #define LOCK_SAMPLE_WAIT 7 6653bf4bb2SPeter Wemm 6703e9c6c1SJohn Dyson #if defined(DIAGNOSTIC) 6803e9c6c1SJohn Dyson #define LOCK_INLINE 6903e9c6c1SJohn Dyson #else 7003e9c6c1SJohn Dyson #define LOCK_INLINE inline 7103e9c6c1SJohn Dyson #endif 7203e9c6c1SJohn Dyson 7399448ed1SJohn Dyson #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ 7499448ed1SJohn Dyson LK_SHARE_NONZERO | LK_WAIT_NONZERO) 7599448ed1SJohn Dyson 7603e9c6c1SJohn Dyson static int acquire(struct lock *lkp, int extflags, int wanted); 7799448ed1SJohn Dyson static int apause(struct lock *lkp, int flags); 7899448ed1SJohn Dyson static int acquiredrain(struct lock *lkp, int extflags) ; 7903e9c6c1SJohn Dyson 8003e9c6c1SJohn Dyson static LOCK_INLINE void 8103e9c6c1SJohn Dyson sharelock(struct lock *lkp, int incr) { 8203e9c6c1SJohn Dyson lkp->lk_flags |= LK_SHARE_NONZERO; 8303e9c6c1SJohn Dyson lkp->lk_sharecount += incr; 8403e9c6c1SJohn Dyson } 8503e9c6c1SJohn Dyson 8603e9c6c1SJohn Dyson static LOCK_INLINE void 8703e9c6c1SJohn Dyson shareunlock(struct lock *lkp, int decr) { 8803e9c6c1SJohn Dyson #if defined(DIAGNOSTIC) 8903e9c6c1SJohn Dyson if (lkp->lk_sharecount < decr) 9003e9c6c1SJohn Dyson #if defined(DDB) 9103e9c6c1SJohn Dyson Debugger("shareunlock: count < decr"); 9203e9c6c1SJohn Dyson #else 9303e9c6c1SJohn Dyson panic("shareunlock: count < decr"); 9403e9c6c1SJohn Dyson #endif 9503e9c6c1SJohn Dyson #endif 9603e9c6c1SJohn Dyson 9703e9c6c1SJohn Dyson lkp->lk_sharecount -= decr; 9803e9c6c1SJohn Dyson if (lkp->lk_sharecount == 0) 9903e9c6c1SJohn Dyson lkp->lk_flags &= ~LK_SHARE_NONZERO; 10003e9c6c1SJohn Dyson } 10103e9c6c1SJohn Dyson 10299448ed1SJohn Dyson /* 10399448ed1SJohn Dyson * This is the waitloop optimization, and note for this to work 10499448ed1SJohn Dyson * simple_lock and simple_unlock should be subroutines to avoid 10599448ed1SJohn Dyson * optimization troubles. 10699448ed1SJohn Dyson */ 10703e9c6c1SJohn Dyson static int 10803e9c6c1SJohn Dyson apause(struct lock *lkp, int flags) { 10903e9c6c1SJohn Dyson int lock_wait; 11003e9c6c1SJohn Dyson lock_wait = LOCK_WAIT_TIME; 11103e9c6c1SJohn Dyson for (; lock_wait > 0; lock_wait--) { 11203e9c6c1SJohn Dyson int i; 11303e9c6c1SJohn Dyson if ((lkp->lk_flags & flags) == 0) 11403e9c6c1SJohn Dyson return 0; 11503e9c6c1SJohn Dyson simple_unlock(&lkp->lk_interlock); 11603e9c6c1SJohn Dyson for (i = LOCK_SAMPLE_WAIT; i > 0; i--) { 11703e9c6c1SJohn Dyson if ((lkp->lk_flags & flags) == 0) { 11803e9c6c1SJohn Dyson simple_lock(&lkp->lk_interlock); 11903e9c6c1SJohn Dyson if ((lkp->lk_flags & flags) == 0) 12003e9c6c1SJohn Dyson return 0; 12153bf4bb2SPeter Wemm break; 12203e9c6c1SJohn Dyson } 12303e9c6c1SJohn Dyson } 12403e9c6c1SJohn Dyson } 12503e9c6c1SJohn Dyson return 1; 12603e9c6c1SJohn Dyson } 12753bf4bb2SPeter Wemm 12803e9c6c1SJohn Dyson static int 12903e9c6c1SJohn Dyson acquire(struct lock *lkp, int extflags, int wanted) { 13003e9c6c1SJohn Dyson int error; 13103e9c6c1SJohn Dyson int lock_wait; 13253bf4bb2SPeter Wemm 13303e9c6c1SJohn Dyson if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) { 13403e9c6c1SJohn Dyson return EBUSY; 13503e9c6c1SJohn Dyson } 13653bf4bb2SPeter Wemm 13799448ed1SJohn Dyson if (((lkp->lk_flags | extflags) & LK_NOPAUSE) == 0) { 13803e9c6c1SJohn Dyson error = apause(lkp, wanted); 13903e9c6c1SJohn Dyson if (error == 0) 14003e9c6c1SJohn Dyson return 0; 14199448ed1SJohn Dyson } 14203e9c6c1SJohn Dyson 14303e9c6c1SJohn Dyson while ((lkp->lk_flags & wanted) != 0) { 14403e9c6c1SJohn Dyson lkp->lk_flags |= LK_WAIT_NONZERO; 14503e9c6c1SJohn Dyson lkp->lk_waitcount++; 14603e9c6c1SJohn Dyson simple_unlock(&lkp->lk_interlock); 14703e9c6c1SJohn Dyson error = tsleep(lkp, lkp->lk_prio, lkp->lk_wmesg, lkp->lk_timo); 14803e9c6c1SJohn Dyson simple_lock(&lkp->lk_interlock); 14903e9c6c1SJohn Dyson lkp->lk_waitcount--; 15003e9c6c1SJohn Dyson if (lkp->lk_waitcount == 0) 15103e9c6c1SJohn Dyson lkp->lk_flags &= ~LK_WAIT_NONZERO; 15203e9c6c1SJohn Dyson if (error) 15303e9c6c1SJohn Dyson return error; 15403e9c6c1SJohn Dyson if (extflags & LK_SLEEPFAIL) { 15503e9c6c1SJohn Dyson return ENOLCK; 15603e9c6c1SJohn Dyson } 15703e9c6c1SJohn Dyson } 15803e9c6c1SJohn Dyson return 0; 15903e9c6c1SJohn Dyson } 16003e9c6c1SJohn Dyson 16153bf4bb2SPeter Wemm /* 16253bf4bb2SPeter Wemm * Set, change, or release a lock. 16353bf4bb2SPeter Wemm * 16453bf4bb2SPeter Wemm * Shared requests increment the shared count. Exclusive requests set the 16553bf4bb2SPeter Wemm * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 16653bf4bb2SPeter Wemm * accepted shared locks and shared-to-exclusive upgrades to go away. 16753bf4bb2SPeter Wemm */ 16853bf4bb2SPeter Wemm int 16953bf4bb2SPeter Wemm lockmgr(lkp, flags, interlkp, p) 170248fcb66SSteve Passe struct lock *lkp; 17153bf4bb2SPeter Wemm u_int flags; 17253bf4bb2SPeter Wemm struct simplelock *interlkp; 17353bf4bb2SPeter Wemm struct proc *p; 17453bf4bb2SPeter Wemm { 17553bf4bb2SPeter Wemm int error; 17653bf4bb2SPeter Wemm pid_t pid; 17753bf4bb2SPeter Wemm int extflags; 17853bf4bb2SPeter Wemm 17953bf4bb2SPeter Wemm error = 0; 18003e9c6c1SJohn Dyson if (p == NULL) 181891e0f24SJohn Dyson pid = LK_KERNPROC; 182891e0f24SJohn Dyson else 18353bf4bb2SPeter Wemm pid = p->p_pid; 18403e9c6c1SJohn Dyson 18553bf4bb2SPeter Wemm simple_lock(&lkp->lk_interlock); 186356b94e0SPeter Wemm if (flags & LK_INTERLOCK) 18753bf4bb2SPeter Wemm simple_unlock(interlkp); 18803e9c6c1SJohn Dyson 18953bf4bb2SPeter Wemm extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 19053bf4bb2SPeter Wemm 19153bf4bb2SPeter Wemm switch (flags & LK_TYPE_MASK) { 19253bf4bb2SPeter Wemm 19353bf4bb2SPeter Wemm case LK_SHARED: 19453bf4bb2SPeter Wemm if (lkp->lk_lockholder != pid) { 19503e9c6c1SJohn Dyson error = acquire(lkp, extflags, 19603e9c6c1SJohn Dyson LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE); 19753bf4bb2SPeter Wemm if (error) 19853bf4bb2SPeter Wemm break; 19903e9c6c1SJohn Dyson sharelock(lkp, 1); 20053bf4bb2SPeter Wemm COUNT(p, 1); 20153bf4bb2SPeter Wemm break; 20253bf4bb2SPeter Wemm } 20353bf4bb2SPeter Wemm /* 20453bf4bb2SPeter Wemm * We hold an exclusive lock, so downgrade it to shared. 20553bf4bb2SPeter Wemm * An alternative would be to fail with EDEADLK. 20653bf4bb2SPeter Wemm */ 20703e9c6c1SJohn Dyson sharelock(lkp, 1); 20853bf4bb2SPeter Wemm COUNT(p, 1); 20953bf4bb2SPeter Wemm /* fall into downgrade */ 21053bf4bb2SPeter Wemm 21153bf4bb2SPeter Wemm case LK_DOWNGRADE: 21253bf4bb2SPeter Wemm if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0) 21353bf4bb2SPeter Wemm panic("lockmgr: not holding exclusive lock"); 21403e9c6c1SJohn Dyson sharelock(lkp, lkp->lk_exclusivecount); 21553bf4bb2SPeter Wemm lkp->lk_exclusivecount = 0; 21653bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_HAVE_EXCL; 21753bf4bb2SPeter Wemm lkp->lk_lockholder = LK_NOPROC; 21853bf4bb2SPeter Wemm if (lkp->lk_waitcount) 21953bf4bb2SPeter Wemm wakeup((void *)lkp); 22053bf4bb2SPeter Wemm break; 22153bf4bb2SPeter Wemm 22253bf4bb2SPeter Wemm case LK_EXCLUPGRADE: 22353bf4bb2SPeter Wemm /* 22453bf4bb2SPeter Wemm * If another process is ahead of us to get an upgrade, 22553bf4bb2SPeter Wemm * then we want to fail rather than have an intervening 22653bf4bb2SPeter Wemm * exclusive access. 22753bf4bb2SPeter Wemm */ 22853bf4bb2SPeter Wemm if (lkp->lk_flags & LK_WANT_UPGRADE) { 22903e9c6c1SJohn Dyson shareunlock(lkp, 1); 23053bf4bb2SPeter Wemm COUNT(p, -1); 23153bf4bb2SPeter Wemm error = EBUSY; 23253bf4bb2SPeter Wemm break; 23353bf4bb2SPeter Wemm } 23453bf4bb2SPeter Wemm /* fall into normal upgrade */ 23553bf4bb2SPeter Wemm 23653bf4bb2SPeter Wemm case LK_UPGRADE: 23753bf4bb2SPeter Wemm /* 23853bf4bb2SPeter Wemm * Upgrade a shared lock to an exclusive one. If another 23953bf4bb2SPeter Wemm * shared lock has already requested an upgrade to an 24053bf4bb2SPeter Wemm * exclusive lock, our shared lock is released and an 24153bf4bb2SPeter Wemm * exclusive lock is requested (which will be granted 24253bf4bb2SPeter Wemm * after the upgrade). If we return an error, the file 24353bf4bb2SPeter Wemm * will always be unlocked. 24453bf4bb2SPeter Wemm */ 24503e9c6c1SJohn Dyson if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0)) 24653bf4bb2SPeter Wemm panic("lockmgr: upgrade exclusive lock"); 24703e9c6c1SJohn Dyson shareunlock(lkp, 1); 24853bf4bb2SPeter Wemm COUNT(p, -1); 24953bf4bb2SPeter Wemm /* 25053bf4bb2SPeter Wemm * If we are just polling, check to see if we will block. 25153bf4bb2SPeter Wemm */ 25253bf4bb2SPeter Wemm if ((extflags & LK_NOWAIT) && 25353bf4bb2SPeter Wemm ((lkp->lk_flags & LK_WANT_UPGRADE) || 25453bf4bb2SPeter Wemm lkp->lk_sharecount > 1)) { 25553bf4bb2SPeter Wemm error = EBUSY; 25653bf4bb2SPeter Wemm break; 25753bf4bb2SPeter Wemm } 25853bf4bb2SPeter Wemm if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 25953bf4bb2SPeter Wemm /* 26053bf4bb2SPeter Wemm * We are first shared lock to request an upgrade, so 26153bf4bb2SPeter Wemm * request upgrade and wait for the shared count to 26253bf4bb2SPeter Wemm * drop to zero, then take exclusive lock. 26353bf4bb2SPeter Wemm */ 26453bf4bb2SPeter Wemm lkp->lk_flags |= LK_WANT_UPGRADE; 26503e9c6c1SJohn Dyson error = acquire(lkp, extflags , LK_SHARE_NONZERO); 26653bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_WANT_UPGRADE; 26753bf4bb2SPeter Wemm if (error) 26853bf4bb2SPeter Wemm break; 26953bf4bb2SPeter Wemm lkp->lk_flags |= LK_HAVE_EXCL; 27053bf4bb2SPeter Wemm lkp->lk_lockholder = pid; 27153bf4bb2SPeter Wemm if (lkp->lk_exclusivecount != 0) 27253bf4bb2SPeter Wemm panic("lockmgr: non-zero exclusive count"); 27353bf4bb2SPeter Wemm lkp->lk_exclusivecount = 1; 27453bf4bb2SPeter Wemm COUNT(p, 1); 27553bf4bb2SPeter Wemm break; 27653bf4bb2SPeter Wemm } 27753bf4bb2SPeter Wemm /* 27853bf4bb2SPeter Wemm * Someone else has requested upgrade. Release our shared 27953bf4bb2SPeter Wemm * lock, awaken upgrade requestor if we are the last shared 28053bf4bb2SPeter Wemm * lock, then request an exclusive lock. 28153bf4bb2SPeter Wemm */ 28203e9c6c1SJohn Dyson if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == 28303e9c6c1SJohn Dyson LK_WAIT_NONZERO) 28453bf4bb2SPeter Wemm wakeup((void *)lkp); 28553bf4bb2SPeter Wemm /* fall into exclusive request */ 28653bf4bb2SPeter Wemm 28753bf4bb2SPeter Wemm case LK_EXCLUSIVE: 28853bf4bb2SPeter Wemm if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) { 28953bf4bb2SPeter Wemm /* 29053bf4bb2SPeter Wemm * Recursive lock. 29153bf4bb2SPeter Wemm */ 29253bf4bb2SPeter Wemm if ((extflags & LK_CANRECURSE) == 0) 29353bf4bb2SPeter Wemm panic("lockmgr: locking against myself"); 29453bf4bb2SPeter Wemm lkp->lk_exclusivecount++; 29553bf4bb2SPeter Wemm COUNT(p, 1); 29653bf4bb2SPeter Wemm break; 29753bf4bb2SPeter Wemm } 29853bf4bb2SPeter Wemm /* 29953bf4bb2SPeter Wemm * If we are just polling, check to see if we will sleep. 30053bf4bb2SPeter Wemm */ 30103e9c6c1SJohn Dyson if ((extflags & LK_NOWAIT) && 30203e9c6c1SJohn Dyson (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { 30353bf4bb2SPeter Wemm error = EBUSY; 30453bf4bb2SPeter Wemm break; 30553bf4bb2SPeter Wemm } 30653bf4bb2SPeter Wemm /* 30753bf4bb2SPeter Wemm * Try to acquire the want_exclusive flag. 30853bf4bb2SPeter Wemm */ 30903e9c6c1SJohn Dyson error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL)); 31053bf4bb2SPeter Wemm if (error) 31153bf4bb2SPeter Wemm break; 31253bf4bb2SPeter Wemm lkp->lk_flags |= LK_WANT_EXCL; 31353bf4bb2SPeter Wemm /* 31453bf4bb2SPeter Wemm * Wait for shared locks and upgrades to finish. 31553bf4bb2SPeter Wemm */ 31603e9c6c1SJohn Dyson error = acquire(lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO); 31753bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_WANT_EXCL; 31853bf4bb2SPeter Wemm if (error) 31953bf4bb2SPeter Wemm break; 32053bf4bb2SPeter Wemm lkp->lk_flags |= LK_HAVE_EXCL; 32153bf4bb2SPeter Wemm lkp->lk_lockholder = pid; 32253bf4bb2SPeter Wemm if (lkp->lk_exclusivecount != 0) 32353bf4bb2SPeter Wemm panic("lockmgr: non-zero exclusive count"); 32453bf4bb2SPeter Wemm lkp->lk_exclusivecount = 1; 32553bf4bb2SPeter Wemm COUNT(p, 1); 32653bf4bb2SPeter Wemm break; 32753bf4bb2SPeter Wemm 32853bf4bb2SPeter Wemm case LK_RELEASE: 32953bf4bb2SPeter Wemm if (lkp->lk_exclusivecount != 0) { 33053bf4bb2SPeter Wemm if (pid != lkp->lk_lockholder) 33153bf4bb2SPeter Wemm panic("lockmgr: pid %d, not %s %d unlocking", 33253bf4bb2SPeter Wemm pid, "exclusive lock holder", 33353bf4bb2SPeter Wemm lkp->lk_lockholder); 33453bf4bb2SPeter Wemm lkp->lk_exclusivecount--; 33553bf4bb2SPeter Wemm COUNT(p, -1); 33653bf4bb2SPeter Wemm if (lkp->lk_exclusivecount == 0) { 33753bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_HAVE_EXCL; 33853bf4bb2SPeter Wemm lkp->lk_lockholder = LK_NOPROC; 33953bf4bb2SPeter Wemm } 34003e9c6c1SJohn Dyson } else if (lkp->lk_flags & LK_SHARE_NONZERO) { 34103e9c6c1SJohn Dyson shareunlock(lkp, 1); 34253bf4bb2SPeter Wemm COUNT(p, -1); 34353bf4bb2SPeter Wemm } 34403e9c6c1SJohn Dyson if (lkp->lk_flags & LK_WAIT_NONZERO) 34553bf4bb2SPeter Wemm wakeup((void *)lkp); 34653bf4bb2SPeter Wemm break; 34753bf4bb2SPeter Wemm 34853bf4bb2SPeter Wemm case LK_DRAIN: 34953bf4bb2SPeter Wemm /* 35053bf4bb2SPeter Wemm * Check that we do not already hold the lock, as it can 35153bf4bb2SPeter Wemm * never drain if we do. Unfortunately, we have no way to 35253bf4bb2SPeter Wemm * check for holding a shared lock, but at least we can 35353bf4bb2SPeter Wemm * check for an exclusive one. 35453bf4bb2SPeter Wemm */ 35553bf4bb2SPeter Wemm if (lkp->lk_lockholder == pid) 35653bf4bb2SPeter Wemm panic("lockmgr: draining against myself"); 35703e9c6c1SJohn Dyson 35803e9c6c1SJohn Dyson error = acquiredrain(lkp, extflags); 35903e9c6c1SJohn Dyson if (error) 36053bf4bb2SPeter Wemm break; 36153bf4bb2SPeter Wemm lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 36253bf4bb2SPeter Wemm lkp->lk_lockholder = pid; 36353bf4bb2SPeter Wemm lkp->lk_exclusivecount = 1; 36453bf4bb2SPeter Wemm COUNT(p, 1); 36553bf4bb2SPeter Wemm break; 36653bf4bb2SPeter Wemm 36753bf4bb2SPeter Wemm default: 36853bf4bb2SPeter Wemm simple_unlock(&lkp->lk_interlock); 36953bf4bb2SPeter Wemm panic("lockmgr: unknown locktype request %d", 37053bf4bb2SPeter Wemm flags & LK_TYPE_MASK); 37153bf4bb2SPeter Wemm /* NOTREACHED */ 37253bf4bb2SPeter Wemm } 37303e9c6c1SJohn Dyson if ((lkp->lk_flags & LK_WAITDRAIN) && 37403e9c6c1SJohn Dyson (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 37503e9c6c1SJohn Dyson LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) { 37653bf4bb2SPeter Wemm lkp->lk_flags &= ~LK_WAITDRAIN; 37753bf4bb2SPeter Wemm wakeup((void *)&lkp->lk_flags); 37853bf4bb2SPeter Wemm } 37953bf4bb2SPeter Wemm simple_unlock(&lkp->lk_interlock); 38053bf4bb2SPeter Wemm return (error); 38153bf4bb2SPeter Wemm } 38253bf4bb2SPeter Wemm 38399448ed1SJohn Dyson static int 38499448ed1SJohn Dyson acquiredrain(struct lock *lkp, int extflags) { 38599448ed1SJohn Dyson int error; 38699448ed1SJohn Dyson int lock_wait; 38799448ed1SJohn Dyson 38899448ed1SJohn Dyson if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) { 38999448ed1SJohn Dyson return EBUSY; 39099448ed1SJohn Dyson } 39199448ed1SJohn Dyson 39299448ed1SJohn Dyson error = apause(lkp, LK_ALL); 39399448ed1SJohn Dyson if (error == 0) 39499448ed1SJohn Dyson return 0; 39599448ed1SJohn Dyson 39699448ed1SJohn Dyson while (lkp->lk_flags & LK_ALL) { 39799448ed1SJohn Dyson lkp->lk_flags |= LK_WAITDRAIN; 39899448ed1SJohn Dyson simple_unlock(&lkp->lk_interlock); 39999448ed1SJohn Dyson error = tsleep(&lkp->lk_flags, lkp->lk_prio, 40099448ed1SJohn Dyson lkp->lk_wmesg, lkp->lk_timo); 40199448ed1SJohn Dyson simple_lock(&lkp->lk_interlock); 40299448ed1SJohn Dyson if (error) 40399448ed1SJohn Dyson return error; 40499448ed1SJohn Dyson if (extflags & LK_SLEEPFAIL) { 40599448ed1SJohn Dyson return ENOLCK; 40699448ed1SJohn Dyson } 40799448ed1SJohn Dyson } 40899448ed1SJohn Dyson return 0; 40999448ed1SJohn Dyson } 41099448ed1SJohn Dyson 41199448ed1SJohn Dyson /* 41299448ed1SJohn Dyson * Initialize a lock; required before use. 41399448ed1SJohn Dyson */ 41499448ed1SJohn Dyson void 41599448ed1SJohn Dyson lockinit(lkp, prio, wmesg, timo, flags) 41699448ed1SJohn Dyson struct lock *lkp; 41799448ed1SJohn Dyson int prio; 41899448ed1SJohn Dyson char *wmesg; 41999448ed1SJohn Dyson int timo; 42099448ed1SJohn Dyson int flags; 42199448ed1SJohn Dyson { 42299448ed1SJohn Dyson 42399448ed1SJohn Dyson simple_lock_init(&lkp->lk_interlock); 42499448ed1SJohn Dyson lkp->lk_flags = (flags & LK_EXTFLG_MASK); 42599448ed1SJohn Dyson lkp->lk_sharecount = 0; 42699448ed1SJohn Dyson lkp->lk_waitcount = 0; 42799448ed1SJohn Dyson lkp->lk_exclusivecount = 0; 42899448ed1SJohn Dyson lkp->lk_prio = prio; 42999448ed1SJohn Dyson lkp->lk_wmesg = wmesg; 43099448ed1SJohn Dyson lkp->lk_timo = timo; 43199448ed1SJohn Dyson lkp->lk_lockholder = LK_NOPROC; 43299448ed1SJohn Dyson } 43399448ed1SJohn Dyson 43499448ed1SJohn Dyson /* 43599448ed1SJohn Dyson * Determine the status of a lock. 43699448ed1SJohn Dyson */ 43799448ed1SJohn Dyson int 43899448ed1SJohn Dyson lockstatus(lkp) 43999448ed1SJohn Dyson struct lock *lkp; 44099448ed1SJohn Dyson { 44199448ed1SJohn Dyson int lock_type = 0; 44299448ed1SJohn Dyson 44399448ed1SJohn Dyson simple_lock(&lkp->lk_interlock); 44499448ed1SJohn Dyson if (lkp->lk_exclusivecount != 0) 44599448ed1SJohn Dyson lock_type = LK_EXCLUSIVE; 44699448ed1SJohn Dyson else if (lkp->lk_sharecount != 0) 44799448ed1SJohn Dyson lock_type = LK_SHARED; 44899448ed1SJohn Dyson simple_unlock(&lkp->lk_interlock); 44999448ed1SJohn Dyson return (lock_type); 45099448ed1SJohn Dyson } 45199448ed1SJohn Dyson 45253bf4bb2SPeter Wemm /* 45353bf4bb2SPeter Wemm * Print out information about state of a lock. Used by VOP_PRINT 4540e61ac7bSPoul-Henning Kamp * routines to display status about contained locks. 45553bf4bb2SPeter Wemm */ 456a1ce9d5cSPeter Wemm void 45753bf4bb2SPeter Wemm lockmgr_printinfo(lkp) 45853bf4bb2SPeter Wemm struct lock *lkp; 45953bf4bb2SPeter Wemm { 46053bf4bb2SPeter Wemm 46153bf4bb2SPeter Wemm if (lkp->lk_sharecount) 46253bf4bb2SPeter Wemm printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 46353bf4bb2SPeter Wemm lkp->lk_sharecount); 46453bf4bb2SPeter Wemm else if (lkp->lk_flags & LK_HAVE_EXCL) 46553bf4bb2SPeter Wemm printf(" lock type %s: EXCL (count %d) by pid %d", 46653bf4bb2SPeter Wemm lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder); 46753bf4bb2SPeter Wemm if (lkp->lk_waitcount > 0) 46853bf4bb2SPeter Wemm printf(" with %d pending", lkp->lk_waitcount); 46953bf4bb2SPeter Wemm } 47053bf4bb2SPeter Wemm 471a1ce9d5cSPeter Wemm #if defined(SIMPLELOCK_DEBUG) && NCPUS == 1 47253bf4bb2SPeter Wemm #include <sys/kernel.h> 47353bf4bb2SPeter Wemm #include <sys/sysctl.h> 4746898627cSBruce Evans 4756898627cSBruce Evans static int lockpausetime = 0; 4766898627cSBruce Evans SYSCTL_INT(_debug, OID_AUTO, lockpausetime, CTLFLAG_RW, &lockpausetime, 0, ""); 4776898627cSBruce Evans 47853bf4bb2SPeter Wemm int simplelockrecurse; 4796898627cSBruce Evans 48053bf4bb2SPeter Wemm /* 48153bf4bb2SPeter Wemm * Simple lock functions so that the debugger can see from whence 48253bf4bb2SPeter Wemm * they are being called. 48353bf4bb2SPeter Wemm */ 48453bf4bb2SPeter Wemm void 48553bf4bb2SPeter Wemm simple_lock_init(alp) 48653bf4bb2SPeter Wemm struct simplelock *alp; 48753bf4bb2SPeter Wemm { 48853bf4bb2SPeter Wemm 48953bf4bb2SPeter Wemm alp->lock_data = 0; 49053bf4bb2SPeter Wemm } 49153bf4bb2SPeter Wemm 49253bf4bb2SPeter Wemm void 49353bf4bb2SPeter Wemm _simple_lock(alp, id, l) 494248fcb66SSteve Passe struct simplelock *alp; 49553bf4bb2SPeter Wemm const char *id; 49653bf4bb2SPeter Wemm int l; 49753bf4bb2SPeter Wemm { 49853bf4bb2SPeter Wemm 49953bf4bb2SPeter Wemm if (simplelockrecurse) 50053bf4bb2SPeter Wemm return; 50153bf4bb2SPeter Wemm if (alp->lock_data == 1) { 50253bf4bb2SPeter Wemm if (lockpausetime == -1) 50353bf4bb2SPeter Wemm panic("%s:%d: simple_lock: lock held", id, l); 50453bf4bb2SPeter Wemm printf("%s:%d: simple_lock: lock held\n", id, l); 50553bf4bb2SPeter Wemm if (lockpausetime == 1) { 506a1ce9d5cSPeter Wemm Debugger("simple_lock"); 507a1ce9d5cSPeter Wemm /*BACKTRACE(curproc); */ 50853bf4bb2SPeter Wemm } else if (lockpausetime > 1) { 50953bf4bb2SPeter Wemm printf("%s:%d: simple_lock: lock held...", id, l); 51053bf4bb2SPeter Wemm tsleep(&lockpausetime, PCATCH | PPAUSE, "slock", 51153bf4bb2SPeter Wemm lockpausetime * hz); 51253bf4bb2SPeter Wemm printf(" continuing\n"); 51353bf4bb2SPeter Wemm } 51453bf4bb2SPeter Wemm } 51553bf4bb2SPeter Wemm alp->lock_data = 1; 51653bf4bb2SPeter Wemm if (curproc) 51753bf4bb2SPeter Wemm curproc->p_simple_locks++; 51853bf4bb2SPeter Wemm } 51953bf4bb2SPeter Wemm 52053bf4bb2SPeter Wemm int 52153bf4bb2SPeter Wemm _simple_lock_try(alp, id, l) 522248fcb66SSteve Passe struct simplelock *alp; 52353bf4bb2SPeter Wemm const char *id; 52453bf4bb2SPeter Wemm int l; 52553bf4bb2SPeter Wemm { 52653bf4bb2SPeter Wemm 52753bf4bb2SPeter Wemm if (alp->lock_data) 52853bf4bb2SPeter Wemm return (0); 52953bf4bb2SPeter Wemm if (simplelockrecurse) 53053bf4bb2SPeter Wemm return (1); 53153bf4bb2SPeter Wemm alp->lock_data = 1; 53253bf4bb2SPeter Wemm if (curproc) 53353bf4bb2SPeter Wemm curproc->p_simple_locks++; 53453bf4bb2SPeter Wemm return (1); 53553bf4bb2SPeter Wemm } 53653bf4bb2SPeter Wemm 53753bf4bb2SPeter Wemm void 53853bf4bb2SPeter Wemm _simple_unlock(alp, id, l) 539248fcb66SSteve Passe struct simplelock *alp; 54053bf4bb2SPeter Wemm const char *id; 54153bf4bb2SPeter Wemm int l; 54253bf4bb2SPeter Wemm { 54353bf4bb2SPeter Wemm 54453bf4bb2SPeter Wemm if (simplelockrecurse) 54553bf4bb2SPeter Wemm return; 54653bf4bb2SPeter Wemm if (alp->lock_data == 0) { 54753bf4bb2SPeter Wemm if (lockpausetime == -1) 54853bf4bb2SPeter Wemm panic("%s:%d: simple_unlock: lock not held", id, l); 54953bf4bb2SPeter Wemm printf("%s:%d: simple_unlock: lock not held\n", id, l); 55053bf4bb2SPeter Wemm if (lockpausetime == 1) { 551a1ce9d5cSPeter Wemm Debugger("simple_unlock"); 552a1ce9d5cSPeter Wemm /* BACKTRACE(curproc); */ 55353bf4bb2SPeter Wemm } else if (lockpausetime > 1) { 55453bf4bb2SPeter Wemm printf("%s:%d: simple_unlock: lock not held...", id, l); 55553bf4bb2SPeter Wemm tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock", 55653bf4bb2SPeter Wemm lockpausetime * hz); 55753bf4bb2SPeter Wemm printf(" continuing\n"); 55853bf4bb2SPeter Wemm } 55953bf4bb2SPeter Wemm } 56053bf4bb2SPeter Wemm alp->lock_data = 0; 56153bf4bb2SPeter Wemm if (curproc) 56253bf4bb2SPeter Wemm curproc->p_simple_locks--; 56353bf4bb2SPeter Wemm } 564a1ce9d5cSPeter Wemm #endif /* SIMPLELOCK_DEBUG && NCPUS == 1 */ 565