xref: /freebsd/sys/kern/kern_lock.c (revision 53bf4bb2cfca3f5e68b6ce23e80dd6856f9e04b2)
153bf4bb2SPeter Wemm /*
253bf4bb2SPeter Wemm  * Copyright (c) 1995
353bf4bb2SPeter Wemm  *	The Regents of the University of California.  All rights reserved.
453bf4bb2SPeter Wemm  *
553bf4bb2SPeter Wemm  * This code contains ideas from software contributed to Berkeley by
653bf4bb2SPeter Wemm  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
753bf4bb2SPeter Wemm  * System project at Carnegie-Mellon University.
853bf4bb2SPeter Wemm  *
953bf4bb2SPeter Wemm  * Redistribution and use in source and binary forms, with or without
1053bf4bb2SPeter Wemm  * modification, are permitted provided that the following conditions
1153bf4bb2SPeter Wemm  * are met:
1253bf4bb2SPeter Wemm  * 1. Redistributions of source code must retain the above copyright
1353bf4bb2SPeter Wemm  *    notice, this list of conditions and the following disclaimer.
1453bf4bb2SPeter Wemm  * 2. Redistributions in binary form must reproduce the above copyright
1553bf4bb2SPeter Wemm  *    notice, this list of conditions and the following disclaimer in the
1653bf4bb2SPeter Wemm  *    documentation and/or other materials provided with the distribution.
1753bf4bb2SPeter Wemm  * 3. All advertising materials mentioning features or use of this software
1853bf4bb2SPeter Wemm  *    must display the following acknowledgement:
1953bf4bb2SPeter Wemm  *	This product includes software developed by the University of
2053bf4bb2SPeter Wemm  *	California, Berkeley and its contributors.
2153bf4bb2SPeter Wemm  * 4. Neither the name of the University nor the names of its contributors
2253bf4bb2SPeter Wemm  *    may be used to endorse or promote products derived from this software
2353bf4bb2SPeter Wemm  *    without specific prior written permission.
2453bf4bb2SPeter Wemm  *
2553bf4bb2SPeter Wemm  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2653bf4bb2SPeter Wemm  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2753bf4bb2SPeter Wemm  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2853bf4bb2SPeter Wemm  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
2953bf4bb2SPeter Wemm  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3053bf4bb2SPeter Wemm  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3153bf4bb2SPeter Wemm  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3253bf4bb2SPeter Wemm  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3353bf4bb2SPeter Wemm  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3453bf4bb2SPeter Wemm  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3553bf4bb2SPeter Wemm  * SUCH DAMAGE.
3653bf4bb2SPeter Wemm  *
3753bf4bb2SPeter Wemm  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
3853bf4bb2SPeter Wemm  */
3953bf4bb2SPeter Wemm 
4053bf4bb2SPeter Wemm #include <sys/param.h>
4153bf4bb2SPeter Wemm #include <sys/proc.h>
4253bf4bb2SPeter Wemm #include <sys/lock.h>
4353bf4bb2SPeter Wemm #include <machine/cpu.h>
4453bf4bb2SPeter Wemm 
4553bf4bb2SPeter Wemm /*
4653bf4bb2SPeter Wemm  * Locking primitives implementation.
4753bf4bb2SPeter Wemm  * Locks provide shared/exclusive sychronization.
4853bf4bb2SPeter Wemm  */
4953bf4bb2SPeter Wemm 
5053bf4bb2SPeter Wemm #ifdef DEBUG
5153bf4bb2SPeter Wemm #define COUNT(p, x) if (p) (p)->p_locks += (x)
5253bf4bb2SPeter Wemm #else
5353bf4bb2SPeter Wemm #define COUNT(p, x)
5453bf4bb2SPeter Wemm #endif
5553bf4bb2SPeter Wemm 
5653bf4bb2SPeter Wemm #if NCPUS > 1
5753bf4bb2SPeter Wemm 
5853bf4bb2SPeter Wemm /*
5953bf4bb2SPeter Wemm  * For multiprocessor system, try spin lock first.
6053bf4bb2SPeter Wemm  *
6153bf4bb2SPeter Wemm  * This should be inline expanded below, but we cannot have #if
6253bf4bb2SPeter Wemm  * inside a multiline define.
6353bf4bb2SPeter Wemm  */
6453bf4bb2SPeter Wemm int lock_wait_time = 100;
6553bf4bb2SPeter Wemm #define PAUSE(lkp, wanted)						\
6653bf4bb2SPeter Wemm 		if (lock_wait_time > 0) {				\
6753bf4bb2SPeter Wemm 			int i;						\
6853bf4bb2SPeter Wemm 									\
6953bf4bb2SPeter Wemm 			simple_unlock(&lkp->lk_interlock);		\
7053bf4bb2SPeter Wemm 			for (i = lock_wait_time; i > 0; i--)		\
7153bf4bb2SPeter Wemm 				if (!(wanted))				\
7253bf4bb2SPeter Wemm 					break;				\
7353bf4bb2SPeter Wemm 			simple_lock(&lkp->lk_interlock);		\
7453bf4bb2SPeter Wemm 		}							\
7553bf4bb2SPeter Wemm 		if (!(wanted))						\
7653bf4bb2SPeter Wemm 			break;
7753bf4bb2SPeter Wemm 
7853bf4bb2SPeter Wemm #else /* NCPUS == 1 */
7953bf4bb2SPeter Wemm 
8053bf4bb2SPeter Wemm /*
8153bf4bb2SPeter Wemm  * It is an error to spin on a uniprocessor as nothing will ever cause
8253bf4bb2SPeter Wemm  * the simple lock to clear while we are executing.
8353bf4bb2SPeter Wemm  */
8453bf4bb2SPeter Wemm #define PAUSE(lkp, wanted)
8553bf4bb2SPeter Wemm 
8653bf4bb2SPeter Wemm #endif /* NCPUS == 1 */
8753bf4bb2SPeter Wemm 
8853bf4bb2SPeter Wemm /*
8953bf4bb2SPeter Wemm  * Acquire a resource.
9053bf4bb2SPeter Wemm  */
9153bf4bb2SPeter Wemm #define ACQUIRE(lkp, error, extflags, wanted)				\
9253bf4bb2SPeter Wemm 	PAUSE(lkp, wanted);						\
9353bf4bb2SPeter Wemm 	for (error = 0; wanted; ) {					\
9453bf4bb2SPeter Wemm 		(lkp)->lk_waitcount++;					\
9553bf4bb2SPeter Wemm 		simple_unlock(&(lkp)->lk_interlock);			\
9653bf4bb2SPeter Wemm 		error = tsleep((void *)lkp, (lkp)->lk_prio,		\
9753bf4bb2SPeter Wemm 		    (lkp)->lk_wmesg, (lkp)->lk_timo);			\
9853bf4bb2SPeter Wemm 		simple_lock(&(lkp)->lk_interlock);			\
9953bf4bb2SPeter Wemm 		(lkp)->lk_waitcount--;					\
10053bf4bb2SPeter Wemm 		if (error)						\
10153bf4bb2SPeter Wemm 			break;						\
10253bf4bb2SPeter Wemm 		if ((extflags) & LK_SLEEPFAIL) {			\
10353bf4bb2SPeter Wemm 			error = ENOLCK;					\
10453bf4bb2SPeter Wemm 			break;						\
10553bf4bb2SPeter Wemm 		}							\
10653bf4bb2SPeter Wemm 	}
10753bf4bb2SPeter Wemm 
10853bf4bb2SPeter Wemm /*
10953bf4bb2SPeter Wemm  * Initialize a lock; required before use.
11053bf4bb2SPeter Wemm  */
11153bf4bb2SPeter Wemm void
11253bf4bb2SPeter Wemm lockinit(lkp, prio, wmesg, timo, flags)
11353bf4bb2SPeter Wemm 	struct lock *lkp;
11453bf4bb2SPeter Wemm 	int prio;
11553bf4bb2SPeter Wemm 	char *wmesg;
11653bf4bb2SPeter Wemm 	int timo;
11753bf4bb2SPeter Wemm 	int flags;
11853bf4bb2SPeter Wemm {
11953bf4bb2SPeter Wemm 
12053bf4bb2SPeter Wemm 	bzero(lkp, sizeof(struct lock));
12153bf4bb2SPeter Wemm 	simple_lock_init(&lkp->lk_interlock);
12253bf4bb2SPeter Wemm 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
12353bf4bb2SPeter Wemm 	lkp->lk_prio = prio;
12453bf4bb2SPeter Wemm 	lkp->lk_timo = timo;
12553bf4bb2SPeter Wemm 	lkp->lk_wmesg = wmesg;
12653bf4bb2SPeter Wemm 	lkp->lk_lockholder = LK_NOPROC;
12753bf4bb2SPeter Wemm }
12853bf4bb2SPeter Wemm 
12953bf4bb2SPeter Wemm /*
13053bf4bb2SPeter Wemm  * Determine the status of a lock.
13153bf4bb2SPeter Wemm  */
13253bf4bb2SPeter Wemm int
13353bf4bb2SPeter Wemm lockstatus(lkp)
13453bf4bb2SPeter Wemm 	struct lock *lkp;
13553bf4bb2SPeter Wemm {
13653bf4bb2SPeter Wemm 	int lock_type = 0;
13753bf4bb2SPeter Wemm 
13853bf4bb2SPeter Wemm 	simple_lock(&lkp->lk_interlock);
13953bf4bb2SPeter Wemm 	if (lkp->lk_exclusivecount != 0)
14053bf4bb2SPeter Wemm 		lock_type = LK_EXCLUSIVE;
14153bf4bb2SPeter Wemm 	else if (lkp->lk_sharecount != 0)
14253bf4bb2SPeter Wemm 		lock_type = LK_SHARED;
14353bf4bb2SPeter Wemm 	simple_unlock(&lkp->lk_interlock);
14453bf4bb2SPeter Wemm 	return (lock_type);
14553bf4bb2SPeter Wemm }
14653bf4bb2SPeter Wemm 
14753bf4bb2SPeter Wemm /*
14853bf4bb2SPeter Wemm  * Set, change, or release a lock.
14953bf4bb2SPeter Wemm  *
15053bf4bb2SPeter Wemm  * Shared requests increment the shared count. Exclusive requests set the
15153bf4bb2SPeter Wemm  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
15253bf4bb2SPeter Wemm  * accepted shared locks and shared-to-exclusive upgrades to go away.
15353bf4bb2SPeter Wemm  */
15453bf4bb2SPeter Wemm int
15553bf4bb2SPeter Wemm lockmgr(lkp, flags, interlkp, p)
15653bf4bb2SPeter Wemm 	__volatile struct lock *lkp;
15753bf4bb2SPeter Wemm 	u_int flags;
15853bf4bb2SPeter Wemm 	struct simplelock *interlkp;
15953bf4bb2SPeter Wemm 	struct proc *p;
16053bf4bb2SPeter Wemm {
16153bf4bb2SPeter Wemm 	int error;
16253bf4bb2SPeter Wemm 	pid_t pid;
16353bf4bb2SPeter Wemm 	int extflags;
16453bf4bb2SPeter Wemm 
16553bf4bb2SPeter Wemm 	error = 0;
16653bf4bb2SPeter Wemm 	if (p)
16753bf4bb2SPeter Wemm 		pid = p->p_pid;
16853bf4bb2SPeter Wemm 	else
16953bf4bb2SPeter Wemm 		pid = LK_KERNPROC;
17053bf4bb2SPeter Wemm 	simple_lock(&lkp->lk_interlock);
17153bf4bb2SPeter Wemm 	if (flags & LK_INTERLOCK)
17253bf4bb2SPeter Wemm 		simple_unlock(interlkp);
17353bf4bb2SPeter Wemm 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
17453bf4bb2SPeter Wemm #ifdef DIAGNOSTIC
17553bf4bb2SPeter Wemm 	/*
17653bf4bb2SPeter Wemm 	 * Once a lock has drained, the LK_DRAINING flag is set and an
17753bf4bb2SPeter Wemm 	 * exclusive lock is returned. The only valid operation thereafter
17853bf4bb2SPeter Wemm 	 * is a single release of that exclusive lock. This final release
17953bf4bb2SPeter Wemm 	 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
18053bf4bb2SPeter Wemm 	 * further requests of any sort will result in a panic. The bits
18153bf4bb2SPeter Wemm 	 * selected for these two flags are chosen so that they will be set
18253bf4bb2SPeter Wemm 	 * in memory that is freed (freed memory is filled with 0xdeadbeef).
18353bf4bb2SPeter Wemm 	 * The final release is permitted to give a new lease on life to
18453bf4bb2SPeter Wemm 	 * the lock by specifying LK_REENABLE.
18553bf4bb2SPeter Wemm 	 */
18653bf4bb2SPeter Wemm 	if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
18753bf4bb2SPeter Wemm 		if (lkp->lk_flags & LK_DRAINED)
18853bf4bb2SPeter Wemm 			panic("lockmgr: using decommissioned lock");
18953bf4bb2SPeter Wemm 		if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
19053bf4bb2SPeter Wemm 		    lkp->lk_lockholder != pid)
19153bf4bb2SPeter Wemm 			panic("lockmgr: non-release on draining lock: %d\n",
19253bf4bb2SPeter Wemm 			    flags & LK_TYPE_MASK);
19353bf4bb2SPeter Wemm 		lkp->lk_flags &= ~LK_DRAINING;
19453bf4bb2SPeter Wemm 		if ((flags & LK_REENABLE) == 0)
19553bf4bb2SPeter Wemm 			lkp->lk_flags |= LK_DRAINED;
19653bf4bb2SPeter Wemm 	}
19753bf4bb2SPeter Wemm #endif DIAGNOSTIC
19853bf4bb2SPeter Wemm 
19953bf4bb2SPeter Wemm 	switch (flags & LK_TYPE_MASK) {
20053bf4bb2SPeter Wemm 
20153bf4bb2SPeter Wemm 	case LK_SHARED:
20253bf4bb2SPeter Wemm 		if (lkp->lk_lockholder != pid) {
20353bf4bb2SPeter Wemm 			/*
20453bf4bb2SPeter Wemm 			 * If just polling, check to see if we will block.
20553bf4bb2SPeter Wemm 			 */
20653bf4bb2SPeter Wemm 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
20753bf4bb2SPeter Wemm 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
20853bf4bb2SPeter Wemm 				error = EBUSY;
20953bf4bb2SPeter Wemm 				break;
21053bf4bb2SPeter Wemm 			}
21153bf4bb2SPeter Wemm 			/*
21253bf4bb2SPeter Wemm 			 * Wait for exclusive locks and upgrades to clear.
21353bf4bb2SPeter Wemm 			 */
21453bf4bb2SPeter Wemm 			ACQUIRE(lkp, error, extflags, lkp->lk_flags &
21553bf4bb2SPeter Wemm 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
21653bf4bb2SPeter Wemm 			if (error)
21753bf4bb2SPeter Wemm 				break;
21853bf4bb2SPeter Wemm 			lkp->lk_sharecount++;
21953bf4bb2SPeter Wemm 			COUNT(p, 1);
22053bf4bb2SPeter Wemm 			break;
22153bf4bb2SPeter Wemm 		}
22253bf4bb2SPeter Wemm 		/*
22353bf4bb2SPeter Wemm 		 * We hold an exclusive lock, so downgrade it to shared.
22453bf4bb2SPeter Wemm 		 * An alternative would be to fail with EDEADLK.
22553bf4bb2SPeter Wemm 		 */
22653bf4bb2SPeter Wemm 		lkp->lk_sharecount++;
22753bf4bb2SPeter Wemm 		COUNT(p, 1);
22853bf4bb2SPeter Wemm 		/* fall into downgrade */
22953bf4bb2SPeter Wemm 
23053bf4bb2SPeter Wemm 	case LK_DOWNGRADE:
23153bf4bb2SPeter Wemm 		if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
23253bf4bb2SPeter Wemm 			panic("lockmgr: not holding exclusive lock");
23353bf4bb2SPeter Wemm 		lkp->lk_sharecount += lkp->lk_exclusivecount;
23453bf4bb2SPeter Wemm 		lkp->lk_exclusivecount = 0;
23553bf4bb2SPeter Wemm 		lkp->lk_flags &= ~LK_HAVE_EXCL;
23653bf4bb2SPeter Wemm 		lkp->lk_lockholder = LK_NOPROC;
23753bf4bb2SPeter Wemm 		if (lkp->lk_waitcount)
23853bf4bb2SPeter Wemm 			wakeup((void *)lkp);
23953bf4bb2SPeter Wemm 		break;
24053bf4bb2SPeter Wemm 
24153bf4bb2SPeter Wemm 	case LK_EXCLUPGRADE:
24253bf4bb2SPeter Wemm 		/*
24353bf4bb2SPeter Wemm 		 * If another process is ahead of us to get an upgrade,
24453bf4bb2SPeter Wemm 		 * then we want to fail rather than have an intervening
24553bf4bb2SPeter Wemm 		 * exclusive access.
24653bf4bb2SPeter Wemm 		 */
24753bf4bb2SPeter Wemm 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
24853bf4bb2SPeter Wemm 			lkp->lk_sharecount--;
24953bf4bb2SPeter Wemm 			COUNT(p, -1);
25053bf4bb2SPeter Wemm 			error = EBUSY;
25153bf4bb2SPeter Wemm 			break;
25253bf4bb2SPeter Wemm 		}
25353bf4bb2SPeter Wemm 		/* fall into normal upgrade */
25453bf4bb2SPeter Wemm 
25553bf4bb2SPeter Wemm 	case LK_UPGRADE:
25653bf4bb2SPeter Wemm 		/*
25753bf4bb2SPeter Wemm 		 * Upgrade a shared lock to an exclusive one. If another
25853bf4bb2SPeter Wemm 		 * shared lock has already requested an upgrade to an
25953bf4bb2SPeter Wemm 		 * exclusive lock, our shared lock is released and an
26053bf4bb2SPeter Wemm 		 * exclusive lock is requested (which will be granted
26153bf4bb2SPeter Wemm 		 * after the upgrade). If we return an error, the file
26253bf4bb2SPeter Wemm 		 * will always be unlocked.
26353bf4bb2SPeter Wemm 		 */
26453bf4bb2SPeter Wemm 		if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
26553bf4bb2SPeter Wemm 			panic("lockmgr: upgrade exclusive lock");
26653bf4bb2SPeter Wemm 		lkp->lk_sharecount--;
26753bf4bb2SPeter Wemm 		COUNT(p, -1);
26853bf4bb2SPeter Wemm 		/*
26953bf4bb2SPeter Wemm 		 * If we are just polling, check to see if we will block.
27053bf4bb2SPeter Wemm 		 */
27153bf4bb2SPeter Wemm 		if ((extflags & LK_NOWAIT) &&
27253bf4bb2SPeter Wemm 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
27353bf4bb2SPeter Wemm 		     lkp->lk_sharecount > 1)) {
27453bf4bb2SPeter Wemm 			error = EBUSY;
27553bf4bb2SPeter Wemm 			break;
27653bf4bb2SPeter Wemm 		}
27753bf4bb2SPeter Wemm 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
27853bf4bb2SPeter Wemm 			/*
27953bf4bb2SPeter Wemm 			 * We are first shared lock to request an upgrade, so
28053bf4bb2SPeter Wemm 			 * request upgrade and wait for the shared count to
28153bf4bb2SPeter Wemm 			 * drop to zero, then take exclusive lock.
28253bf4bb2SPeter Wemm 			 */
28353bf4bb2SPeter Wemm 			lkp->lk_flags |= LK_WANT_UPGRADE;
28453bf4bb2SPeter Wemm 			ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
28553bf4bb2SPeter Wemm 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
28653bf4bb2SPeter Wemm 			if (error)
28753bf4bb2SPeter Wemm 				break;
28853bf4bb2SPeter Wemm 			lkp->lk_flags |= LK_HAVE_EXCL;
28953bf4bb2SPeter Wemm 			lkp->lk_lockholder = pid;
29053bf4bb2SPeter Wemm 			if (lkp->lk_exclusivecount != 0)
29153bf4bb2SPeter Wemm 				panic("lockmgr: non-zero exclusive count");
29253bf4bb2SPeter Wemm 			lkp->lk_exclusivecount = 1;
29353bf4bb2SPeter Wemm 			COUNT(p, 1);
29453bf4bb2SPeter Wemm 			break;
29553bf4bb2SPeter Wemm 		}
29653bf4bb2SPeter Wemm 		/*
29753bf4bb2SPeter Wemm 		 * Someone else has requested upgrade. Release our shared
29853bf4bb2SPeter Wemm 		 * lock, awaken upgrade requestor if we are the last shared
29953bf4bb2SPeter Wemm 		 * lock, then request an exclusive lock.
30053bf4bb2SPeter Wemm 		 */
30153bf4bb2SPeter Wemm 		if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
30253bf4bb2SPeter Wemm 			wakeup((void *)lkp);
30353bf4bb2SPeter Wemm 		/* fall into exclusive request */
30453bf4bb2SPeter Wemm 
30553bf4bb2SPeter Wemm 	case LK_EXCLUSIVE:
30653bf4bb2SPeter Wemm 		if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
30753bf4bb2SPeter Wemm 			/*
30853bf4bb2SPeter Wemm 			 *	Recursive lock.
30953bf4bb2SPeter Wemm 			 */
31053bf4bb2SPeter Wemm 			if ((extflags & LK_CANRECURSE) == 0)
31153bf4bb2SPeter Wemm 				panic("lockmgr: locking against myself");
31253bf4bb2SPeter Wemm 			lkp->lk_exclusivecount++;
31353bf4bb2SPeter Wemm 			COUNT(p, 1);
31453bf4bb2SPeter Wemm 			break;
31553bf4bb2SPeter Wemm 		}
31653bf4bb2SPeter Wemm 		/*
31753bf4bb2SPeter Wemm 		 * If we are just polling, check to see if we will sleep.
31853bf4bb2SPeter Wemm 		 */
31953bf4bb2SPeter Wemm 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
32053bf4bb2SPeter Wemm 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
32153bf4bb2SPeter Wemm 		     lkp->lk_sharecount != 0)) {
32253bf4bb2SPeter Wemm 			error = EBUSY;
32353bf4bb2SPeter Wemm 			break;
32453bf4bb2SPeter Wemm 		}
32553bf4bb2SPeter Wemm 		/*
32653bf4bb2SPeter Wemm 		 * Try to acquire the want_exclusive flag.
32753bf4bb2SPeter Wemm 		 */
32853bf4bb2SPeter Wemm 		ACQUIRE(lkp, error, extflags, lkp->lk_flags &
32953bf4bb2SPeter Wemm 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
33053bf4bb2SPeter Wemm 		if (error)
33153bf4bb2SPeter Wemm 			break;
33253bf4bb2SPeter Wemm 		lkp->lk_flags |= LK_WANT_EXCL;
33353bf4bb2SPeter Wemm 		/*
33453bf4bb2SPeter Wemm 		 * Wait for shared locks and upgrades to finish.
33553bf4bb2SPeter Wemm 		 */
33653bf4bb2SPeter Wemm 		ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
33753bf4bb2SPeter Wemm 		       (lkp->lk_flags & LK_WANT_UPGRADE));
33853bf4bb2SPeter Wemm 		lkp->lk_flags &= ~LK_WANT_EXCL;
33953bf4bb2SPeter Wemm 		if (error)
34053bf4bb2SPeter Wemm 			break;
34153bf4bb2SPeter Wemm 		lkp->lk_flags |= LK_HAVE_EXCL;
34253bf4bb2SPeter Wemm 		lkp->lk_lockholder = pid;
34353bf4bb2SPeter Wemm 		if (lkp->lk_exclusivecount != 0)
34453bf4bb2SPeter Wemm 			panic("lockmgr: non-zero exclusive count");
34553bf4bb2SPeter Wemm 		lkp->lk_exclusivecount = 1;
34653bf4bb2SPeter Wemm 		COUNT(p, 1);
34753bf4bb2SPeter Wemm 		break;
34853bf4bb2SPeter Wemm 
34953bf4bb2SPeter Wemm 	case LK_RELEASE:
35053bf4bb2SPeter Wemm 		if (lkp->lk_exclusivecount != 0) {
35153bf4bb2SPeter Wemm 			if (pid != lkp->lk_lockholder)
35253bf4bb2SPeter Wemm 				panic("lockmgr: pid %d, not %s %d unlocking",
35353bf4bb2SPeter Wemm 				    pid, "exclusive lock holder",
35453bf4bb2SPeter Wemm 				    lkp->lk_lockholder);
35553bf4bb2SPeter Wemm 			lkp->lk_exclusivecount--;
35653bf4bb2SPeter Wemm 			COUNT(p, -1);
35753bf4bb2SPeter Wemm 			if (lkp->lk_exclusivecount == 0) {
35853bf4bb2SPeter Wemm 				lkp->lk_flags &= ~LK_HAVE_EXCL;
35953bf4bb2SPeter Wemm 				lkp->lk_lockholder = LK_NOPROC;
36053bf4bb2SPeter Wemm 			}
36153bf4bb2SPeter Wemm 		} else if (lkp->lk_sharecount != 0) {
36253bf4bb2SPeter Wemm 			lkp->lk_sharecount--;
36353bf4bb2SPeter Wemm 			COUNT(p, -1);
36453bf4bb2SPeter Wemm 		}
36553bf4bb2SPeter Wemm 		if (lkp->lk_waitcount)
36653bf4bb2SPeter Wemm 			wakeup((void *)lkp);
36753bf4bb2SPeter Wemm 		break;
36853bf4bb2SPeter Wemm 
36953bf4bb2SPeter Wemm 	case LK_DRAIN:
37053bf4bb2SPeter Wemm 		/*
37153bf4bb2SPeter Wemm 		 * Check that we do not already hold the lock, as it can
37253bf4bb2SPeter Wemm 		 * never drain if we do. Unfortunately, we have no way to
37353bf4bb2SPeter Wemm 		 * check for holding a shared lock, but at least we can
37453bf4bb2SPeter Wemm 		 * check for an exclusive one.
37553bf4bb2SPeter Wemm 		 */
37653bf4bb2SPeter Wemm 		if (lkp->lk_lockholder == pid)
37753bf4bb2SPeter Wemm 			panic("lockmgr: draining against myself");
37853bf4bb2SPeter Wemm 		/*
37953bf4bb2SPeter Wemm 		 * If we are just polling, check to see if we will sleep.
38053bf4bb2SPeter Wemm 		 */
38153bf4bb2SPeter Wemm 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
38253bf4bb2SPeter Wemm 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
38353bf4bb2SPeter Wemm 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
38453bf4bb2SPeter Wemm 			error = EBUSY;
38553bf4bb2SPeter Wemm 			break;
38653bf4bb2SPeter Wemm 		}
38753bf4bb2SPeter Wemm 		PAUSE(lkp, ((lkp->lk_flags &
38853bf4bb2SPeter Wemm 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
38953bf4bb2SPeter Wemm 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0));
39053bf4bb2SPeter Wemm 		for (error = 0; ((lkp->lk_flags &
39153bf4bb2SPeter Wemm 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
39253bf4bb2SPeter Wemm 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
39353bf4bb2SPeter Wemm 			lkp->lk_flags |= LK_WAITDRAIN;
39453bf4bb2SPeter Wemm 			simple_unlock(&lkp->lk_interlock);
39553bf4bb2SPeter Wemm 			if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio,
39653bf4bb2SPeter Wemm 			    lkp->lk_wmesg, lkp->lk_timo))
39753bf4bb2SPeter Wemm 				return (error);
39853bf4bb2SPeter Wemm 			if ((extflags) & LK_SLEEPFAIL)
39953bf4bb2SPeter Wemm 				return (ENOLCK);
40053bf4bb2SPeter Wemm 			simple_lock(&lkp->lk_interlock);
40153bf4bb2SPeter Wemm 		}
40253bf4bb2SPeter Wemm 		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
40353bf4bb2SPeter Wemm 		lkp->lk_lockholder = pid;
40453bf4bb2SPeter Wemm 		lkp->lk_exclusivecount = 1;
40553bf4bb2SPeter Wemm 		COUNT(p, 1);
40653bf4bb2SPeter Wemm 		break;
40753bf4bb2SPeter Wemm 
40853bf4bb2SPeter Wemm 	default:
40953bf4bb2SPeter Wemm 		simple_unlock(&lkp->lk_interlock);
41053bf4bb2SPeter Wemm 		panic("lockmgr: unknown locktype request %d",
41153bf4bb2SPeter Wemm 		    flags & LK_TYPE_MASK);
41253bf4bb2SPeter Wemm 		/* NOTREACHED */
41353bf4bb2SPeter Wemm 	}
41453bf4bb2SPeter Wemm 	if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
41553bf4bb2SPeter Wemm 	     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
41653bf4bb2SPeter Wemm 	     lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
41753bf4bb2SPeter Wemm 		lkp->lk_flags &= ~LK_WAITDRAIN;
41853bf4bb2SPeter Wemm 		wakeup((void *)&lkp->lk_flags);
41953bf4bb2SPeter Wemm 	}
42053bf4bb2SPeter Wemm 	simple_unlock(&lkp->lk_interlock);
42153bf4bb2SPeter Wemm 	return (error);
42253bf4bb2SPeter Wemm }
42353bf4bb2SPeter Wemm 
42453bf4bb2SPeter Wemm /*
42553bf4bb2SPeter Wemm  * Print out information about state of a lock. Used by VOP_PRINT
42653bf4bb2SPeter Wemm  * routines to display ststus about contained locks.
42753bf4bb2SPeter Wemm  */
42853bf4bb2SPeter Wemm lockmgr_printinfo(lkp)
42953bf4bb2SPeter Wemm 	struct lock *lkp;
43053bf4bb2SPeter Wemm {
43153bf4bb2SPeter Wemm 
43253bf4bb2SPeter Wemm 	if (lkp->lk_sharecount)
43353bf4bb2SPeter Wemm 		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
43453bf4bb2SPeter Wemm 		    lkp->lk_sharecount);
43553bf4bb2SPeter Wemm 	else if (lkp->lk_flags & LK_HAVE_EXCL)
43653bf4bb2SPeter Wemm 		printf(" lock type %s: EXCL (count %d) by pid %d",
43753bf4bb2SPeter Wemm 		    lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
43853bf4bb2SPeter Wemm 	if (lkp->lk_waitcount > 0)
43953bf4bb2SPeter Wemm 		printf(" with %d pending", lkp->lk_waitcount);
44053bf4bb2SPeter Wemm }
44153bf4bb2SPeter Wemm 
44253bf4bb2SPeter Wemm #if defined(DEBUG) && NCPUS == 1
44353bf4bb2SPeter Wemm #include <sys/kernel.h>
44453bf4bb2SPeter Wemm #include <vm/vm.h>
44553bf4bb2SPeter Wemm #include <sys/sysctl.h>
44653bf4bb2SPeter Wemm int lockpausetime = 0;
44753bf4bb2SPeter Wemm struct ctldebug debug2 = { "lockpausetime", &lockpausetime };
44853bf4bb2SPeter Wemm int simplelockrecurse;
44953bf4bb2SPeter Wemm /*
45053bf4bb2SPeter Wemm  * Simple lock functions so that the debugger can see from whence
45153bf4bb2SPeter Wemm  * they are being called.
45253bf4bb2SPeter Wemm  */
45353bf4bb2SPeter Wemm void
45453bf4bb2SPeter Wemm simple_lock_init(alp)
45553bf4bb2SPeter Wemm 	struct simplelock *alp;
45653bf4bb2SPeter Wemm {
45753bf4bb2SPeter Wemm 
45853bf4bb2SPeter Wemm 	alp->lock_data = 0;
45953bf4bb2SPeter Wemm }
46053bf4bb2SPeter Wemm 
46153bf4bb2SPeter Wemm void
46253bf4bb2SPeter Wemm _simple_lock(alp, id, l)
46353bf4bb2SPeter Wemm 	__volatile struct simplelock *alp;
46453bf4bb2SPeter Wemm 	const char *id;
46553bf4bb2SPeter Wemm 	int l;
46653bf4bb2SPeter Wemm {
46753bf4bb2SPeter Wemm 
46853bf4bb2SPeter Wemm 	if (simplelockrecurse)
46953bf4bb2SPeter Wemm 		return;
47053bf4bb2SPeter Wemm 	if (alp->lock_data == 1) {
47153bf4bb2SPeter Wemm 		if (lockpausetime == -1)
47253bf4bb2SPeter Wemm 			panic("%s:%d: simple_lock: lock held", id, l);
47353bf4bb2SPeter Wemm 		printf("%s:%d: simple_lock: lock held\n", id, l);
47453bf4bb2SPeter Wemm 		if (lockpausetime == 1) {
47553bf4bb2SPeter Wemm 			BACKTRACE(curproc);
47653bf4bb2SPeter Wemm 		} else if (lockpausetime > 1) {
47753bf4bb2SPeter Wemm 			printf("%s:%d: simple_lock: lock held...", id, l);
47853bf4bb2SPeter Wemm 			tsleep(&lockpausetime, PCATCH | PPAUSE, "slock",
47953bf4bb2SPeter Wemm 			    lockpausetime * hz);
48053bf4bb2SPeter Wemm 			printf(" continuing\n");
48153bf4bb2SPeter Wemm 		}
48253bf4bb2SPeter Wemm 	}
48353bf4bb2SPeter Wemm 	alp->lock_data = 1;
48453bf4bb2SPeter Wemm 	if (curproc)
48553bf4bb2SPeter Wemm 		curproc->p_simple_locks++;
48653bf4bb2SPeter Wemm }
48753bf4bb2SPeter Wemm 
48853bf4bb2SPeter Wemm int
48953bf4bb2SPeter Wemm _simple_lock_try(alp, id, l)
49053bf4bb2SPeter Wemm 	__volatile struct simplelock *alp;
49153bf4bb2SPeter Wemm 	const char *id;
49253bf4bb2SPeter Wemm 	int l;
49353bf4bb2SPeter Wemm {
49453bf4bb2SPeter Wemm 
49553bf4bb2SPeter Wemm 	if (alp->lock_data)
49653bf4bb2SPeter Wemm 		return (0);
49753bf4bb2SPeter Wemm 	if (simplelockrecurse)
49853bf4bb2SPeter Wemm 		return (1);
49953bf4bb2SPeter Wemm 	alp->lock_data = 1;
50053bf4bb2SPeter Wemm 	if (curproc)
50153bf4bb2SPeter Wemm 		curproc->p_simple_locks++;
50253bf4bb2SPeter Wemm 	return (1);
50353bf4bb2SPeter Wemm }
50453bf4bb2SPeter Wemm 
50553bf4bb2SPeter Wemm void
50653bf4bb2SPeter Wemm _simple_unlock(alp, id, l)
50753bf4bb2SPeter Wemm 	__volatile struct simplelock *alp;
50853bf4bb2SPeter Wemm 	const char *id;
50953bf4bb2SPeter Wemm 	int l;
51053bf4bb2SPeter Wemm {
51153bf4bb2SPeter Wemm 
51253bf4bb2SPeter Wemm 	if (simplelockrecurse)
51353bf4bb2SPeter Wemm 		return;
51453bf4bb2SPeter Wemm 	if (alp->lock_data == 0) {
51553bf4bb2SPeter Wemm 		if (lockpausetime == -1)
51653bf4bb2SPeter Wemm 			panic("%s:%d: simple_unlock: lock not held", id, l);
51753bf4bb2SPeter Wemm 		printf("%s:%d: simple_unlock: lock not held\n", id, l);
51853bf4bb2SPeter Wemm 		if (lockpausetime == 1) {
51953bf4bb2SPeter Wemm 			BACKTRACE(curproc);
52053bf4bb2SPeter Wemm 		} else if (lockpausetime > 1) {
52153bf4bb2SPeter Wemm 			printf("%s:%d: simple_unlock: lock not held...", id, l);
52253bf4bb2SPeter Wemm 			tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock",
52353bf4bb2SPeter Wemm 			    lockpausetime * hz);
52453bf4bb2SPeter Wemm 			printf(" continuing\n");
52553bf4bb2SPeter Wemm 		}
52653bf4bb2SPeter Wemm 	}
52753bf4bb2SPeter Wemm 	alp->lock_data = 0;
52853bf4bb2SPeter Wemm 	if (curproc)
52953bf4bb2SPeter Wemm 		curproc->p_simple_locks--;
53053bf4bb2SPeter Wemm }
53153bf4bb2SPeter Wemm #endif /* DEBUG && NCPUS == 1 */
532