xref: /freebsd/sys/kern/kern_lock.c (revision 047dd67e96bdb9e64f9f61b42ffb72bb5d4387b9)
19454b2d8SWarner Losh /*-
2047dd67eSAttilio Rao  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3047dd67eSAttilio Rao  * All rights reserved.
453bf4bb2SPeter Wemm  *
553bf4bb2SPeter Wemm  * Redistribution and use in source and binary forms, with or without
653bf4bb2SPeter Wemm  * modification, are permitted provided that the following conditions
753bf4bb2SPeter Wemm  * are met:
853bf4bb2SPeter Wemm  * 1. Redistributions of source code must retain the above copyright
9047dd67eSAttilio Rao  *    notice(s), this list of conditions and the following disclaimer as
10047dd67eSAttilio Rao  *    the first lines of this file unmodified other than the possible
11047dd67eSAttilio Rao  *    addition of one or more copyright notices.
1253bf4bb2SPeter Wemm  * 2. Redistributions in binary form must reproduce the above copyright
13047dd67eSAttilio Rao  *    notice(s), this list of conditions and the following disclaimer in the
1453bf4bb2SPeter Wemm  *    documentation and/or other materials provided with the distribution.
1553bf4bb2SPeter Wemm  *
16047dd67eSAttilio Rao  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17047dd67eSAttilio Rao  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18047dd67eSAttilio Rao  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19047dd67eSAttilio Rao  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20047dd67eSAttilio Rao  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21047dd67eSAttilio Rao  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22047dd67eSAttilio Rao  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23047dd67eSAttilio Rao  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2453bf4bb2SPeter Wemm  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25047dd67eSAttilio Rao  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26047dd67eSAttilio Rao  * DAMAGE.
2753bf4bb2SPeter Wemm  */
2853bf4bb2SPeter Wemm 
29047dd67eSAttilio Rao #include "opt_ddb.h"
30047dd67eSAttilio Rao 
31677b542eSDavid E. O'Brien #include <sys/cdefs.h>
32677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
33677b542eSDavid E. O'Brien 
3453bf4bb2SPeter Wemm #include <sys/param.h>
3561d80e90SJohn Baldwin #include <sys/ktr.h>
3653bf4bb2SPeter Wemm #include <sys/lock.h>
37047dd67eSAttilio Rao #include <sys/lock_profile.h>
388302d183SBruce Evans #include <sys/lockmgr.h>
39d8881ca3SJohn Baldwin #include <sys/mutex.h>
408302d183SBruce Evans #include <sys/proc.h>
41047dd67eSAttilio Rao #include <sys/sleepqueue.h>
42e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS
43e8ddb61dSJeff Roberson #include <sys/stack.h>
44e8ddb61dSJeff Roberson #endif
45047dd67eSAttilio Rao #include <sys/systm.h>
4653bf4bb2SPeter Wemm 
47047dd67eSAttilio Rao #include <machine/cpu.h>
486efc8a16SAttilio Rao 
49be6847d7SJohn Baldwin #ifdef DDB
50be6847d7SJohn Baldwin #include <ddb/ddb.h>
51047dd67eSAttilio Rao #endif
52047dd67eSAttilio Rao 
53047dd67eSAttilio Rao CTASSERT(((LK_CANRECURSE | LK_NOSHARE) & LO_CLASSFLAGS) ==
54047dd67eSAttilio Rao     (LK_CANRECURSE | LK_NOSHARE));
55047dd67eSAttilio Rao 
56047dd67eSAttilio Rao #define	SQ_EXCLUSIVE_QUEUE	0
57047dd67eSAttilio Rao #define	SQ_SHARED_QUEUE		1
58047dd67eSAttilio Rao 
59047dd67eSAttilio Rao #ifndef INVARIANTS
60047dd67eSAttilio Rao #define	_lockmgr_assert(lk, what, file, line)
61047dd67eSAttilio Rao #define	TD_LOCKS_INC(td)
62047dd67eSAttilio Rao #define	TD_LOCKS_DEC(td)
63047dd67eSAttilio Rao #else
64047dd67eSAttilio Rao #define	TD_LOCKS_INC(td)	((td)->td_locks++)
65047dd67eSAttilio Rao #define	TD_LOCKS_DEC(td)	((td)->td_locks--)
66047dd67eSAttilio Rao #endif
67047dd67eSAttilio Rao #define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
68047dd67eSAttilio Rao #define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
69047dd67eSAttilio Rao 
70047dd67eSAttilio Rao #ifndef DEBUG_LOCKS
71047dd67eSAttilio Rao #define	STACK_PRINT(lk)
72047dd67eSAttilio Rao #define	STACK_SAVE(lk)
73047dd67eSAttilio Rao #define	STACK_ZERO(lk)
74047dd67eSAttilio Rao #else
75047dd67eSAttilio Rao #define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
76047dd67eSAttilio Rao #define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
77047dd67eSAttilio Rao #define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
78047dd67eSAttilio Rao #endif
79047dd67eSAttilio Rao 
80047dd67eSAttilio Rao #define	LOCK_LOG2(lk, string, arg1, arg2)				\
81047dd67eSAttilio Rao 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
82047dd67eSAttilio Rao 		CTR2(KTR_LOCK, (string), (arg1), (arg2))
83047dd67eSAttilio Rao #define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
84047dd67eSAttilio Rao 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
85047dd67eSAttilio Rao 		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
86047dd67eSAttilio Rao 
87047dd67eSAttilio Rao #define	LK_TRYOP(x)							\
88047dd67eSAttilio Rao 	((x) & LK_NOWAIT)
89047dd67eSAttilio Rao #define	LK_CAN_SHARE(x)							\
90047dd67eSAttilio Rao 	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
91047dd67eSAttilio Rao 	curthread->td_lk_slocks))
92047dd67eSAttilio Rao 
93047dd67eSAttilio Rao #define	lockmgr_disowned(lk)						\
94047dd67eSAttilio Rao 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
95047dd67eSAttilio Rao 
96047dd67eSAttilio Rao #define	lockmgr_xlocked(lk)						\
97047dd67eSAttilio Rao 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
98047dd67eSAttilio Rao 
99047dd67eSAttilio Rao static void	 assert_lockmgr(struct lock_object *lock, int how);
100047dd67eSAttilio Rao #ifdef DDB
10161bd5e21SKip Macy static void	 db_show_lockmgr(struct lock_object *lock);
102be6847d7SJohn Baldwin #endif
1036e21afd4SJohn Baldwin static void	 lock_lockmgr(struct lock_object *lock, int how);
1046e21afd4SJohn Baldwin static int	 unlock_lockmgr(struct lock_object *lock);
10561bd5e21SKip Macy 
10661bd5e21SKip Macy struct lock_class lock_class_lockmgr = {
1073ff6d229SJohn Baldwin 	.lc_name = "lockmgr",
108047dd67eSAttilio Rao 	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
109f9721b43SAttilio Rao 	.lc_assert = assert_lockmgr,
11061bd5e21SKip Macy #ifdef DDB
1116e21afd4SJohn Baldwin 	.lc_ddb_show = db_show_lockmgr,
11261bd5e21SKip Macy #endif
1136e21afd4SJohn Baldwin 	.lc_lock = lock_lockmgr,
114047dd67eSAttilio Rao 	.lc_unlock = unlock_lockmgr
11561bd5e21SKip Macy };
11661bd5e21SKip Macy 
117047dd67eSAttilio Rao static __inline struct thread *
118047dd67eSAttilio Rao lockmgr_xholder(struct lock *lk)
119047dd67eSAttilio Rao {
120047dd67eSAttilio Rao 	uintptr_t x;
121047dd67eSAttilio Rao 
122047dd67eSAttilio Rao 	x = lk->lk_lock;
123047dd67eSAttilio Rao 	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
124047dd67eSAttilio Rao }
12584887fa3SAttilio Rao 
12653bf4bb2SPeter Wemm /*
127047dd67eSAttilio Rao  * It assumes sleepq_lock held and returns with this one unheld.
128047dd67eSAttilio Rao  * It also assumes the generic interlock is sane and previously checked.
129047dd67eSAttilio Rao  * If LK_INTERLOCK is specified the interlock is not reacquired after the
130047dd67eSAttilio Rao  * sleep.
13153bf4bb2SPeter Wemm  */
132047dd67eSAttilio Rao static __inline int
133047dd67eSAttilio Rao sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
134047dd67eSAttilio Rao     const char *wmesg, int pri, int timo, int queue)
135047dd67eSAttilio Rao {
136047dd67eSAttilio Rao 	struct lock_class *class;
137047dd67eSAttilio Rao 	int catch, error;
13853bf4bb2SPeter Wemm 
139047dd67eSAttilio Rao 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
140047dd67eSAttilio Rao 	catch = (pri) ? (pri & PCATCH) : 0;
141047dd67eSAttilio Rao 	pri &= PRIMASK;
142047dd67eSAttilio Rao 	error = 0;
143047dd67eSAttilio Rao 
144047dd67eSAttilio Rao 	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
145047dd67eSAttilio Rao 	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
146047dd67eSAttilio Rao 
147047dd67eSAttilio Rao 	if (flags & LK_INTERLOCK)
148047dd67eSAttilio Rao 		class->lc_unlock(ilk);
149047dd67eSAttilio Rao 	DROP_GIANT();
150047dd67eSAttilio Rao 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
151047dd67eSAttilio Rao 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
152047dd67eSAttilio Rao 	if ((flags & LK_TIMELOCK) && timo)
153047dd67eSAttilio Rao 		sleepq_set_timeout(&lk->lock_object, timo);
154047dd67eSAttilio Rao 
155047dd67eSAttilio Rao 	/*
156047dd67eSAttilio Rao 	 * Decisional switch for real sleeping.
157047dd67eSAttilio Rao 	 */
158047dd67eSAttilio Rao 	if ((flags & LK_TIMELOCK) && timo && catch)
159047dd67eSAttilio Rao 		error = sleepq_timedwait_sig(&lk->lock_object, pri);
160047dd67eSAttilio Rao 	else if ((flags & LK_TIMELOCK) && timo)
161047dd67eSAttilio Rao 		error = sleepq_timedwait(&lk->lock_object, pri);
162047dd67eSAttilio Rao 	else if (catch)
163047dd67eSAttilio Rao 		error = sleepq_wait_sig(&lk->lock_object, pri);
164047dd67eSAttilio Rao 	else
165047dd67eSAttilio Rao 		sleepq_wait(&lk->lock_object, pri);
166047dd67eSAttilio Rao 	PICKUP_GIANT();
167047dd67eSAttilio Rao 	if ((flags & LK_SLEEPFAIL) && error == 0)
168047dd67eSAttilio Rao 		error = ENOLCK;
169047dd67eSAttilio Rao 
170047dd67eSAttilio Rao 	return (error);
171047dd67eSAttilio Rao }
172047dd67eSAttilio Rao 
173047dd67eSAttilio Rao static __inline void
174047dd67eSAttilio Rao wakeupshlk(struct lock *lk, const char *file, int line)
175047dd67eSAttilio Rao {
176047dd67eSAttilio Rao 	uintptr_t v, x;
177047dd67eSAttilio Rao 	int queue;
178047dd67eSAttilio Rao 
179047dd67eSAttilio Rao 	TD_LOCKS_DEC(curthread);
180047dd67eSAttilio Rao 	TD_SLOCKS_DEC(curthread);
181047dd67eSAttilio Rao 	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
182047dd67eSAttilio Rao 
183047dd67eSAttilio Rao 	for (;;) {
184047dd67eSAttilio Rao 		x = lk->lk_lock;
185047dd67eSAttilio Rao 
186047dd67eSAttilio Rao 		/*
187047dd67eSAttilio Rao 		 * If there is more than one shared lock held, just drop one
188047dd67eSAttilio Rao 		 * and return.
189047dd67eSAttilio Rao 		 */
190047dd67eSAttilio Rao 		if (LK_SHARERS(x) > 1) {
191047dd67eSAttilio Rao 			if (atomic_cmpset_ptr(&lk->lk_lock, x,
192047dd67eSAttilio Rao 			    x - LK_ONE_SHARER))
193047dd67eSAttilio Rao 				break;
194047dd67eSAttilio Rao 			continue;
195047dd67eSAttilio Rao 		}
196047dd67eSAttilio Rao 
197047dd67eSAttilio Rao 		/*
198047dd67eSAttilio Rao 		 * If there are not waiters on the exclusive queue, drop the
199047dd67eSAttilio Rao 		 * lock quickly.
200047dd67eSAttilio Rao 		 */
201047dd67eSAttilio Rao 		if ((x & LK_ALL_WAITERS) == 0) {
202047dd67eSAttilio Rao 			MPASS(x == LK_SHARERS_LOCK(1));
203047dd67eSAttilio Rao 			if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1),
204047dd67eSAttilio Rao 			    LK_UNLOCKED))
205047dd67eSAttilio Rao 				break;
206047dd67eSAttilio Rao 			continue;
207047dd67eSAttilio Rao 		}
208047dd67eSAttilio Rao 
209047dd67eSAttilio Rao 		/*
210047dd67eSAttilio Rao 		 * We should have a sharer with waiters, so enter the hard
211047dd67eSAttilio Rao 		 * path in order to handle wakeups correctly.
212047dd67eSAttilio Rao 		 */
213047dd67eSAttilio Rao 		sleepq_lock(&lk->lock_object);
214047dd67eSAttilio Rao 		x = lk->lk_lock & LK_ALL_WAITERS;
215047dd67eSAttilio Rao 		v = LK_UNLOCKED;
216047dd67eSAttilio Rao 
217047dd67eSAttilio Rao 		/*
218047dd67eSAttilio Rao 		 * If the lock has exclusive waiters, give them preference in
219047dd67eSAttilio Rao 		 * order to avoid deadlock with shared runners up.
220047dd67eSAttilio Rao 		 */
221047dd67eSAttilio Rao 		if (x & LK_EXCLUSIVE_WAITERS) {
222047dd67eSAttilio Rao 			queue = SQ_EXCLUSIVE_QUEUE;
223047dd67eSAttilio Rao 			v |= (x & LK_SHARED_WAITERS);
224047dd67eSAttilio Rao 		} else {
225047dd67eSAttilio Rao 			MPASS(x == LK_SHARED_WAITERS);
226047dd67eSAttilio Rao 			queue = SQ_SHARED_QUEUE;
227047dd67eSAttilio Rao 		}
228047dd67eSAttilio Rao 
229047dd67eSAttilio Rao 		if (!atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
230047dd67eSAttilio Rao 		    v)) {
231047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
232047dd67eSAttilio Rao 			continue;
233047dd67eSAttilio Rao 		}
234047dd67eSAttilio Rao 		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
235047dd67eSAttilio Rao 		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
236047dd67eSAttilio Rao 		    "exclusive");
237047dd67eSAttilio Rao 		sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
238047dd67eSAttilio Rao 		sleepq_release(&lk->lock_object);
239047dd67eSAttilio Rao 		break;
240047dd67eSAttilio Rao 	}
241047dd67eSAttilio Rao 
242047dd67eSAttilio Rao 	lock_profile_release_lock(&lk->lock_object);
243047dd67eSAttilio Rao }
244047dd67eSAttilio Rao 
245047dd67eSAttilio Rao static void
246f9721b43SAttilio Rao assert_lockmgr(struct lock_object *lock, int what)
247f9721b43SAttilio Rao {
248f9721b43SAttilio Rao 
249f9721b43SAttilio Rao 	panic("lockmgr locks do not support assertions");
250f9721b43SAttilio Rao }
251f9721b43SAttilio Rao 
252047dd67eSAttilio Rao static void
2536e21afd4SJohn Baldwin lock_lockmgr(struct lock_object *lock, int how)
2546e21afd4SJohn Baldwin {
2556e21afd4SJohn Baldwin 
2566e21afd4SJohn Baldwin 	panic("lockmgr locks do not support sleep interlocking");
2576e21afd4SJohn Baldwin }
2586e21afd4SJohn Baldwin 
259047dd67eSAttilio Rao static int
2606e21afd4SJohn Baldwin unlock_lockmgr(struct lock_object *lock)
2616e21afd4SJohn Baldwin {
2626e21afd4SJohn Baldwin 
2636e21afd4SJohn Baldwin 	panic("lockmgr locks do not support sleep interlocking");
2646e21afd4SJohn Baldwin }
2656e21afd4SJohn Baldwin 
26699448ed1SJohn Dyson void
267047dd67eSAttilio Rao lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
26899448ed1SJohn Dyson {
2696efc8a16SAttilio Rao 	int iflags;
2706efc8a16SAttilio Rao 
271047dd67eSAttilio Rao 	MPASS((flags & ~LK_INIT_MASK) == 0);
27299448ed1SJohn Dyson 
2736efc8a16SAttilio Rao 	iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
274047dd67eSAttilio Rao 	if ((flags & LK_NODUP) == 0)
2756efc8a16SAttilio Rao 		iflags |= LO_DUPOK;
2767fbfba7bSAttilio Rao 	if (flags & LK_NOPROFILE)
2777fbfba7bSAttilio Rao 		iflags |= LO_NOPROFILE;
278047dd67eSAttilio Rao 	if ((flags & LK_NOWITNESS) == 0)
2796efc8a16SAttilio Rao 		iflags |= LO_WITNESS;
2807fbfba7bSAttilio Rao 	if (flags & LK_QUIET)
2817fbfba7bSAttilio Rao 		iflags |= LO_QUIET;
282047dd67eSAttilio Rao 	iflags |= flags & (LK_CANRECURSE | LK_NOSHARE);
283047dd67eSAttilio Rao 
284047dd67eSAttilio Rao 	lk->lk_lock = LK_UNLOCKED;
285047dd67eSAttilio Rao 	lk->lk_recurse = 0;
286047dd67eSAttilio Rao 	lk->lk_timo = timo;
287047dd67eSAttilio Rao 	lk->lk_pri = pri;
288047dd67eSAttilio Rao 	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
289047dd67eSAttilio Rao 	STACK_ZERO(lk);
29099448ed1SJohn Dyson }
29199448ed1SJohn Dyson 
292a18b1f1dSJason Evans void
293047dd67eSAttilio Rao lockdestroy(struct lock *lk)
294a18b1f1dSJason Evans {
295c91fcee7SJohn Baldwin 
296047dd67eSAttilio Rao 	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
297047dd67eSAttilio Rao 	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
298047dd67eSAttilio Rao 	lock_destroy(&lk->lock_object);
299047dd67eSAttilio Rao }
300047dd67eSAttilio Rao 
301047dd67eSAttilio Rao int
302047dd67eSAttilio Rao __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
303047dd67eSAttilio Rao     const char *wmesg, int pri, int timo, const char *file, int line)
304047dd67eSAttilio Rao {
305047dd67eSAttilio Rao 	uint64_t waittime;
306047dd67eSAttilio Rao 	struct lock_class *class;
307047dd67eSAttilio Rao 	const char *iwmesg;
308047dd67eSAttilio Rao 	uintptr_t tid, v, x;
309047dd67eSAttilio Rao 	u_int op;
310047dd67eSAttilio Rao 	int contested, error, ipri, itimo, queue;
311047dd67eSAttilio Rao 
312047dd67eSAttilio Rao 	contested = 0;
313047dd67eSAttilio Rao 	error = 0;
314047dd67eSAttilio Rao 	waittime = 0;
315047dd67eSAttilio Rao 	tid = (uintptr_t)curthread;
316047dd67eSAttilio Rao 	op = (flags & LK_TYPE_MASK);
317047dd67eSAttilio Rao 	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
318047dd67eSAttilio Rao 	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
319047dd67eSAttilio Rao 	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
320047dd67eSAttilio Rao 
321047dd67eSAttilio Rao 	MPASS((flags & ~LK_TOTAL_MASK) == 0);
322047dd67eSAttilio Rao 	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
323047dd67eSAttilio Rao 	    (op != LK_DOWNGRADE && op != LK_RELEASE),
324047dd67eSAttilio Rao 	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
325047dd67eSAttilio Rao 	    __func__, file, line));
326047dd67eSAttilio Rao 	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
327047dd67eSAttilio Rao 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
328047dd67eSAttilio Rao 	    __func__, file, line));
329047dd67eSAttilio Rao 
330047dd67eSAttilio Rao 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
331047dd67eSAttilio Rao 	if (panicstr != NULL) {
332047dd67eSAttilio Rao 		if (flags & LK_INTERLOCK)
333047dd67eSAttilio Rao 			class->lc_unlock(ilk);
334047dd67eSAttilio Rao 		return (0);
335047dd67eSAttilio Rao 	}
336047dd67eSAttilio Rao 
337047dd67eSAttilio Rao 	if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE))
338047dd67eSAttilio Rao 		op = LK_EXCLUSIVE;
339047dd67eSAttilio Rao 
340047dd67eSAttilio Rao 	switch (op) {
341047dd67eSAttilio Rao 	case LK_SHARED:
342047dd67eSAttilio Rao 		for (;;) {
343047dd67eSAttilio Rao 			x = lk->lk_lock;
344047dd67eSAttilio Rao 
345047dd67eSAttilio Rao 			/*
346047dd67eSAttilio Rao 			 * If no other thread has an exclusive lock, or
347047dd67eSAttilio Rao 			 * no exclusive waiter is present, bump the count of
348047dd67eSAttilio Rao 			 * sharers.  Since we have to preserve the state of
349047dd67eSAttilio Rao 			 * waiters, if we fail to acquire the shared lock
350047dd67eSAttilio Rao 			 * loop back and retry.
351047dd67eSAttilio Rao 			 */
352047dd67eSAttilio Rao 			if (LK_CAN_SHARE(x)) {
353047dd67eSAttilio Rao 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
354047dd67eSAttilio Rao 				    x + LK_ONE_SHARER))
355047dd67eSAttilio Rao 					break;
356047dd67eSAttilio Rao 				continue;
357047dd67eSAttilio Rao 			}
358047dd67eSAttilio Rao 			lock_profile_obtain_lock_failed(&lk->lock_object,
359047dd67eSAttilio Rao 			    &contested, &waittime);
360047dd67eSAttilio Rao 
361047dd67eSAttilio Rao 			/*
362047dd67eSAttilio Rao 			 * If the lock is alredy held by curthread in
363047dd67eSAttilio Rao 			 * exclusive way avoid a deadlock.
364047dd67eSAttilio Rao 			 */
365047dd67eSAttilio Rao 			if (LK_HOLDER(x) == tid) {
366047dd67eSAttilio Rao 				LOCK_LOG2(lk,
367047dd67eSAttilio Rao 				    "%s: %p alredy held in exclusive mode",
368047dd67eSAttilio Rao 				    __func__, lk);
369047dd67eSAttilio Rao 				error = EDEADLK;
370047dd67eSAttilio Rao 				break;
371a18b1f1dSJason Evans 			}
372a18b1f1dSJason Evans 
373a18b1f1dSJason Evans 			/*
374047dd67eSAttilio Rao 			 * If the lock is expected to not sleep just give up
375047dd67eSAttilio Rao 			 * and return.
376d7a7e179SAttilio Rao 			 */
377047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
378047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p fails the try operation",
379047dd67eSAttilio Rao 				    __func__, lk);
380047dd67eSAttilio Rao 				error = EBUSY;
381047dd67eSAttilio Rao 				break;
382047dd67eSAttilio Rao 			}
383047dd67eSAttilio Rao 
384047dd67eSAttilio Rao 			/*
385047dd67eSAttilio Rao 			 * Acquire the sleepqueue chain lock because we
386047dd67eSAttilio Rao 			 * probabilly will need to manipulate waiters flags.
387047dd67eSAttilio Rao 			 */
388047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
389047dd67eSAttilio Rao 			x = lk->lk_lock;
390047dd67eSAttilio Rao 
391047dd67eSAttilio Rao 			/*
392047dd67eSAttilio Rao 			 * if the lock can be acquired in shared mode, try
393047dd67eSAttilio Rao 			 * again.
394047dd67eSAttilio Rao 			 */
395047dd67eSAttilio Rao 			if (LK_CAN_SHARE(x)) {
396047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
397047dd67eSAttilio Rao 				continue;
398047dd67eSAttilio Rao 			}
399047dd67eSAttilio Rao 
400047dd67eSAttilio Rao 			/*
401047dd67eSAttilio Rao 			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
402047dd67eSAttilio Rao 			 * loop back and retry.
403047dd67eSAttilio Rao 			 */
404047dd67eSAttilio Rao 			if ((x & LK_SHARED_WAITERS) == 0) {
405047dd67eSAttilio Rao 				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
406047dd67eSAttilio Rao 				    x | LK_SHARED_WAITERS)) {
407047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
408047dd67eSAttilio Rao 					continue;
409047dd67eSAttilio Rao 				}
410047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
411047dd67eSAttilio Rao 				    __func__, lk);
412047dd67eSAttilio Rao 			}
413047dd67eSAttilio Rao 
414047dd67eSAttilio Rao 			/*
415047dd67eSAttilio Rao 			 * As far as we have been unable to acquire the
416047dd67eSAttilio Rao 			 * shared lock and the shared waiters flag is set,
417047dd67eSAttilio Rao 			 * we will sleep.
418047dd67eSAttilio Rao 			 */
419047dd67eSAttilio Rao 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
420047dd67eSAttilio Rao 			    SQ_SHARED_QUEUE);
421047dd67eSAttilio Rao 			flags &= ~LK_INTERLOCK;
422047dd67eSAttilio Rao 			if (error) {
423047dd67eSAttilio Rao 				LOCK_LOG3(lk,
424047dd67eSAttilio Rao 				    "%s: interrupted sleep for %p with %d",
425047dd67eSAttilio Rao 				    __func__, lk, error);
426047dd67eSAttilio Rao 				break;
427047dd67eSAttilio Rao 			}
428047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
429047dd67eSAttilio Rao 			    __func__, lk);
430047dd67eSAttilio Rao 		}
431047dd67eSAttilio Rao 		if (error == 0) {
432047dd67eSAttilio Rao 			lock_profile_obtain_lock_success(&lk->lock_object,
433047dd67eSAttilio Rao 			    contested, waittime, file, line);
434047dd67eSAttilio Rao 			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
435047dd67eSAttilio Rao 			    line);
436047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
437047dd67eSAttilio Rao 			TD_SLOCKS_INC(curthread);
438047dd67eSAttilio Rao 			STACK_SAVE(lk);
439047dd67eSAttilio Rao 		}
440047dd67eSAttilio Rao 		break;
441047dd67eSAttilio Rao 	case LK_UPGRADE:
442047dd67eSAttilio Rao 		_lockmgr_assert(lk, KA_SLOCKED, file, line);
443047dd67eSAttilio Rao 		x = lk->lk_lock & LK_ALL_WAITERS;
444047dd67eSAttilio Rao 
445047dd67eSAttilio Rao 		/*
446047dd67eSAttilio Rao 		 * Try to switch from one shared lock to an exclusive one.
447047dd67eSAttilio Rao 		 * We need to preserve waiters flags during the operation.
448047dd67eSAttilio Rao 		 */
449047dd67eSAttilio Rao 		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
450047dd67eSAttilio Rao 		    tid | x)) {
451047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
452047dd67eSAttilio Rao 			    line);
453047dd67eSAttilio Rao 			TD_SLOCKS_DEC(curthread);
454047dd67eSAttilio Rao 			break;
455047dd67eSAttilio Rao 		}
456047dd67eSAttilio Rao 
457047dd67eSAttilio Rao 		/*
458047dd67eSAttilio Rao 		 * We have been unable to succeed in upgrading, so just
459047dd67eSAttilio Rao 		 * give up the shared lock.
460047dd67eSAttilio Rao 		 */
461047dd67eSAttilio Rao 		wakeupshlk(lk, file, line);
462047dd67eSAttilio Rao 
463047dd67eSAttilio Rao 		/* FALLTHROUGH */
464047dd67eSAttilio Rao 	case LK_EXCLUSIVE:
465047dd67eSAttilio Rao 
466047dd67eSAttilio Rao 		/*
467047dd67eSAttilio Rao 		 * If curthread alredy holds the lock and this one is
468047dd67eSAttilio Rao 		 * allowed to recurse, simply recurse on it.
469047dd67eSAttilio Rao 		 */
470047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk)) {
471047dd67eSAttilio Rao 			if ((flags & LK_CANRECURSE) == 0 &&
472047dd67eSAttilio Rao 			    (lk->lock_object.lo_flags & LK_CANRECURSE) == 0) {
473047dd67eSAttilio Rao 
474047dd67eSAttilio Rao 				/*
475047dd67eSAttilio Rao 				 * If the lock is expected to not panic just
476047dd67eSAttilio Rao 				 * give up and return.
477047dd67eSAttilio Rao 				 */
478047dd67eSAttilio Rao 				if (LK_TRYOP(flags)) {
479047dd67eSAttilio Rao 					LOCK_LOG2(lk,
480047dd67eSAttilio Rao 					    "%s: %p fails the try operation",
481047dd67eSAttilio Rao 					    __func__, lk);
482047dd67eSAttilio Rao 					error = EBUSY;
483047dd67eSAttilio Rao 					break;
484047dd67eSAttilio Rao 				}
485047dd67eSAttilio Rao 				if (flags & LK_INTERLOCK)
486047dd67eSAttilio Rao 					class->lc_unlock(ilk);
487047dd67eSAttilio Rao 		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
488047dd67eSAttilio Rao 				    __func__, iwmesg, file, line);
489047dd67eSAttilio Rao 			}
490047dd67eSAttilio Rao 			lk->lk_recurse++;
491047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
492047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
493047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
494047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
495047dd67eSAttilio Rao 			break;
496047dd67eSAttilio Rao 		}
497047dd67eSAttilio Rao 
498047dd67eSAttilio Rao 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
499047dd67eSAttilio Rao 		    tid)) {
500047dd67eSAttilio Rao 			lock_profile_obtain_lock_failed(&lk->lock_object,
501047dd67eSAttilio Rao 			    &contested, &waittime);
502047dd67eSAttilio Rao 
503047dd67eSAttilio Rao 			/*
504047dd67eSAttilio Rao 			 * If the lock is expected to not sleep just give up
505047dd67eSAttilio Rao 			 * and return.
506047dd67eSAttilio Rao 			 */
507047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
508047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p fails the try operation",
509047dd67eSAttilio Rao 				    __func__, lk);
510047dd67eSAttilio Rao 				error = EBUSY;
511047dd67eSAttilio Rao 				break;
512047dd67eSAttilio Rao 			}
513047dd67eSAttilio Rao 
514047dd67eSAttilio Rao 			/*
515047dd67eSAttilio Rao 			 * Acquire the sleepqueue chain lock because we
516047dd67eSAttilio Rao 			 * probabilly will need to manipulate waiters flags.
517047dd67eSAttilio Rao 			 */
518047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
519047dd67eSAttilio Rao 			x = lk->lk_lock;
520047dd67eSAttilio Rao 			v = x & LK_ALL_WAITERS;
521047dd67eSAttilio Rao 
522047dd67eSAttilio Rao 			/*
523047dd67eSAttilio Rao 			 * if the lock has been released while we spun on
524047dd67eSAttilio Rao 			 * the sleepqueue chain lock just try again.
525047dd67eSAttilio Rao 			 */
526047dd67eSAttilio Rao 			if (x == LK_UNLOCKED) {
527047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
528047dd67eSAttilio Rao 				continue;
529047dd67eSAttilio Rao 			}
530047dd67eSAttilio Rao 
531047dd67eSAttilio Rao 			/*
532047dd67eSAttilio Rao 			 * The lock can be in the state where there is a
533047dd67eSAttilio Rao 			 * pending queue of waiters, but still no owner.
534047dd67eSAttilio Rao 			 * This happens when the lock is contested and an
535047dd67eSAttilio Rao 			 * owner is going to claim the lock.
536047dd67eSAttilio Rao 			 * If curthread is the one successfully acquiring it
537047dd67eSAttilio Rao 			 * claim lock ownership and return, preserving waiters
538047dd67eSAttilio Rao 			 * flags.
539047dd67eSAttilio Rao 			 */
540047dd67eSAttilio Rao 			if (x == (LK_UNLOCKED | v)) {
541047dd67eSAttilio Rao 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
542047dd67eSAttilio Rao 				    tid | v)) {
543047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
544047dd67eSAttilio Rao 					LOCK_LOG2(lk,
545047dd67eSAttilio Rao 					    "%s: %p claimed by a new writer",
546047dd67eSAttilio Rao 					    __func__, lk);
547047dd67eSAttilio Rao 					break;
548047dd67eSAttilio Rao 				}
549047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
550047dd67eSAttilio Rao 				continue;
551047dd67eSAttilio Rao 			}
552047dd67eSAttilio Rao 
553047dd67eSAttilio Rao 			/*
554047dd67eSAttilio Rao 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
555047dd67eSAttilio Rao 			 * fail, loop back and retry.
556047dd67eSAttilio Rao 			 */
557047dd67eSAttilio Rao 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
558047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
559047dd67eSAttilio Rao 				    x | LK_EXCLUSIVE_WAITERS)) {
560047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
561047dd67eSAttilio Rao 					continue;
562047dd67eSAttilio Rao 				}
563047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
564047dd67eSAttilio Rao 				    __func__, lk);
565047dd67eSAttilio Rao 			}
566047dd67eSAttilio Rao 
567047dd67eSAttilio Rao 			/*
568047dd67eSAttilio Rao 			 * As far as we have been unable to acquire the
569047dd67eSAttilio Rao 			 * exclusive lock and the exclusive waiters flag
570047dd67eSAttilio Rao 			 * is set, we will sleep.
571047dd67eSAttilio Rao 			 */
572047dd67eSAttilio Rao 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
573047dd67eSAttilio Rao 			    SQ_EXCLUSIVE_QUEUE);
574047dd67eSAttilio Rao 			flags &= ~LK_INTERLOCK;
575047dd67eSAttilio Rao 			if (error) {
576047dd67eSAttilio Rao 				LOCK_LOG3(lk,
577047dd67eSAttilio Rao 				    "%s: interrupted sleep for %p with %d",
578047dd67eSAttilio Rao 				    __func__, lk, error);
579047dd67eSAttilio Rao 				break;
580047dd67eSAttilio Rao 			}
581047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
582047dd67eSAttilio Rao 			    __func__, lk);
583047dd67eSAttilio Rao 		}
584047dd67eSAttilio Rao 		if (error == 0) {
585047dd67eSAttilio Rao 			lock_profile_obtain_lock_success(&lk->lock_object,
586047dd67eSAttilio Rao 			    contested, waittime, file, line);
587047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
588047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
589047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
590047dd67eSAttilio Rao 			STACK_SAVE(lk);
591047dd67eSAttilio Rao 		}
592047dd67eSAttilio Rao 		break;
593047dd67eSAttilio Rao 	case LK_DOWNGRADE:
594047dd67eSAttilio Rao 		_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
595047dd67eSAttilio Rao 
596047dd67eSAttilio Rao 		/*
597047dd67eSAttilio Rao 		 * In order to preserve waiters flags, just spin.
598047dd67eSAttilio Rao 		 */
599047dd67eSAttilio Rao 		for (;;) {
600047dd67eSAttilio Rao 			x = lk->lk_lock & LK_ALL_WAITERS;
601047dd67eSAttilio Rao 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
602047dd67eSAttilio Rao 			    LK_SHARERS_LOCK(1) | x)) {
603047dd67eSAttilio Rao 				LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object,
604047dd67eSAttilio Rao 				    0, 0, file, line);
605047dd67eSAttilio Rao 				TD_SLOCKS_INC(curthread);
606047dd67eSAttilio Rao 				break;
607047dd67eSAttilio Rao 			}
608047dd67eSAttilio Rao 			cpu_spinwait();
609047dd67eSAttilio Rao 		}
610047dd67eSAttilio Rao 		break;
611047dd67eSAttilio Rao 	case LK_RELEASE:
612047dd67eSAttilio Rao 		_lockmgr_assert(lk, KA_LOCKED, file, line);
613047dd67eSAttilio Rao 		x = lk->lk_lock;
614047dd67eSAttilio Rao 
615047dd67eSAttilio Rao 		if ((x & LK_SHARE) == 0) {
616047dd67eSAttilio Rao 
617047dd67eSAttilio Rao 			/*
618047dd67eSAttilio Rao 			 * As first option, treact the lock as if it has not
619047dd67eSAttilio Rao 			 * any waiter.
620047dd67eSAttilio Rao 			 * Fix-up the tid var if the lock has been disowned.
621047dd67eSAttilio Rao 			 */
622047dd67eSAttilio Rao 			if (LK_HOLDER(x) == LK_KERNPROC)
623047dd67eSAttilio Rao 				tid = LK_KERNPROC;
624047dd67eSAttilio Rao 			else
625047dd67eSAttilio Rao 				TD_LOCKS_DEC(curthread);
626047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
627047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
628047dd67eSAttilio Rao 
629047dd67eSAttilio Rao 			/*
630047dd67eSAttilio Rao 			 * The lock is held in exclusive mode.
631047dd67eSAttilio Rao 			 * If the lock is recursed also, then unrecurse it.
632047dd67eSAttilio Rao 			 */
633047dd67eSAttilio Rao 			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
634047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
635047dd67eSAttilio Rao 				    lk);
636047dd67eSAttilio Rao 				lk->lk_recurse--;
637047dd67eSAttilio Rao 				break;
638047dd67eSAttilio Rao 			}
639047dd67eSAttilio Rao 			lock_profile_release_lock(&lk->lock_object);
640047dd67eSAttilio Rao 
641047dd67eSAttilio Rao 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
642047dd67eSAttilio Rao 			    LK_UNLOCKED))
643047dd67eSAttilio Rao 				break;
644047dd67eSAttilio Rao 
645047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
646047dd67eSAttilio Rao 			x = lk->lk_lock & LK_ALL_WAITERS;
647047dd67eSAttilio Rao 			v = LK_UNLOCKED;
648047dd67eSAttilio Rao 
649047dd67eSAttilio Rao 			/*
650047dd67eSAttilio Rao 		 	 * If the lock has exclusive waiters, give them
651047dd67eSAttilio Rao 			 * preference in order to avoid deadlock with
652047dd67eSAttilio Rao 			 * shared runners up.
653047dd67eSAttilio Rao 			 */
654047dd67eSAttilio Rao 			if (x & LK_EXCLUSIVE_WAITERS) {
655047dd67eSAttilio Rao 				queue = SQ_EXCLUSIVE_QUEUE;
656047dd67eSAttilio Rao 				v |= (x & LK_SHARED_WAITERS);
657047dd67eSAttilio Rao 			} else {
658047dd67eSAttilio Rao 				MPASS(x == LK_SHARED_WAITERS);
659047dd67eSAttilio Rao 				queue = SQ_SHARED_QUEUE;
660047dd67eSAttilio Rao 			}
661047dd67eSAttilio Rao 
662047dd67eSAttilio Rao 			LOCK_LOG3(lk,
663047dd67eSAttilio Rao 			    "%s: %p waking up threads on the %s queue",
664047dd67eSAttilio Rao 			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
665047dd67eSAttilio Rao 			    "exclusive");
666047dd67eSAttilio Rao 			atomic_store_rel_ptr(&lk->lk_lock, v);
667047dd67eSAttilio Rao 			sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
668047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
669047dd67eSAttilio Rao 			break;
670047dd67eSAttilio Rao 		} else
671047dd67eSAttilio Rao 			wakeupshlk(lk, file, line);
672047dd67eSAttilio Rao 		break;
673047dd67eSAttilio Rao 	case LK_DRAIN:
674047dd67eSAttilio Rao 
675047dd67eSAttilio Rao 		/*
676047dd67eSAttilio Rao 		 * Trying to drain a lock we alredy own will result in a
677047dd67eSAttilio Rao 		 * deadlock.
678047dd67eSAttilio Rao 		 */
679047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk)) {
680047dd67eSAttilio Rao 			if (flags & LK_INTERLOCK)
681047dd67eSAttilio Rao 				class->lc_unlock(ilk);
682047dd67eSAttilio Rao 			panic("%s: draining %s with the lock held @ %s:%d\n",
683047dd67eSAttilio Rao 			    __func__, iwmesg, file, line);
684047dd67eSAttilio Rao 		}
685047dd67eSAttilio Rao 
686047dd67eSAttilio Rao 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
687047dd67eSAttilio Rao 			lock_profile_obtain_lock_failed(&lk->lock_object,
688047dd67eSAttilio Rao 			    &contested, &waittime);
689047dd67eSAttilio Rao 
690047dd67eSAttilio Rao 			/*
691047dd67eSAttilio Rao 			 * If the lock is expected to not sleep just give up
692047dd67eSAttilio Rao 			 * and return.
693047dd67eSAttilio Rao 			 */
694047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
695047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p fails the try operation",
696047dd67eSAttilio Rao 				    __func__, lk);
697047dd67eSAttilio Rao 				error = EBUSY;
698047dd67eSAttilio Rao 				break;
699047dd67eSAttilio Rao 			}
700047dd67eSAttilio Rao 
701047dd67eSAttilio Rao 			/*
702047dd67eSAttilio Rao 			 * Acquire the sleepqueue chain lock because we
703047dd67eSAttilio Rao 			 * probabilly will need to manipulate waiters flags.
704047dd67eSAttilio Rao 			 */
705047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
706047dd67eSAttilio Rao 			x = lk->lk_lock;
707047dd67eSAttilio Rao 			v = x & LK_ALL_WAITERS;
708047dd67eSAttilio Rao 
709047dd67eSAttilio Rao 			/*
710047dd67eSAttilio Rao 			 * if the lock has been released while we spun on
711047dd67eSAttilio Rao 			 * the sleepqueue chain lock just try again.
712047dd67eSAttilio Rao 			 */
713047dd67eSAttilio Rao 			if (x == LK_UNLOCKED) {
714047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
715047dd67eSAttilio Rao 				continue;
716047dd67eSAttilio Rao 			}
717047dd67eSAttilio Rao 
718047dd67eSAttilio Rao 			if (x == (LK_UNLOCKED | v)) {
719047dd67eSAttilio Rao 				v = x;
720047dd67eSAttilio Rao 				if (v & LK_EXCLUSIVE_WAITERS) {
721047dd67eSAttilio Rao 					queue = SQ_EXCLUSIVE_QUEUE;
722047dd67eSAttilio Rao 					v &= ~LK_EXCLUSIVE_WAITERS;
723047dd67eSAttilio Rao 				} else {
724047dd67eSAttilio Rao 					MPASS(v & LK_SHARED_WAITERS);
725047dd67eSAttilio Rao 					queue = SQ_SHARED_QUEUE;
726047dd67eSAttilio Rao 					v &= ~LK_SHARED_WAITERS;
727047dd67eSAttilio Rao 				}
728047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
729047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
730047dd67eSAttilio Rao 					continue;
731047dd67eSAttilio Rao 				}
732047dd67eSAttilio Rao 				LOCK_LOG3(lk,
733047dd67eSAttilio Rao 				"%s: %p waking up all threads on the %s queue",
734047dd67eSAttilio Rao 				    __func__, lk, queue == SQ_SHARED_QUEUE ?
735047dd67eSAttilio Rao 				    "shared" : "exclusive");
736047dd67eSAttilio Rao 				sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
737047dd67eSAttilio Rao 				    0, queue);
738047dd67eSAttilio Rao 
739047dd67eSAttilio Rao 				/*
740047dd67eSAttilio Rao 				 * If shared waiters have been woken up we need
741047dd67eSAttilio Rao 				 * to wait for one of them to acquire the lock
742047dd67eSAttilio Rao 				 * before to set the exclusive waiters in
743047dd67eSAttilio Rao 				 * order to avoid a deadlock.
744047dd67eSAttilio Rao 				 */
745047dd67eSAttilio Rao 				if (queue == SQ_SHARED_QUEUE) {
746047dd67eSAttilio Rao 					for (v = lk->lk_lock;
747047dd67eSAttilio Rao 					    (v & LK_SHARE) && !LK_SHARERS(v);
748047dd67eSAttilio Rao 					    v = lk->lk_lock)
749047dd67eSAttilio Rao 						cpu_spinwait();
750047dd67eSAttilio Rao 				}
751047dd67eSAttilio Rao 			}
752047dd67eSAttilio Rao 
753047dd67eSAttilio Rao 			/*
754047dd67eSAttilio Rao 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
755047dd67eSAttilio Rao 			 * fail, loop back and retry.
756047dd67eSAttilio Rao 			 */
757047dd67eSAttilio Rao 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
758047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
759047dd67eSAttilio Rao 				    x | LK_EXCLUSIVE_WAITERS)) {
760047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
761047dd67eSAttilio Rao 					continue;
762047dd67eSAttilio Rao 				}
763047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
764047dd67eSAttilio Rao 				    __func__, lk);
765047dd67eSAttilio Rao 			}
766047dd67eSAttilio Rao 
767047dd67eSAttilio Rao 			/*
768047dd67eSAttilio Rao 			 * As far as we have been unable to acquire the
769047dd67eSAttilio Rao 			 * exclusive lock and the exclusive waiters flag
770047dd67eSAttilio Rao 			 * is set, we will sleep.
771047dd67eSAttilio Rao 			 */
772047dd67eSAttilio Rao 			if (flags & LK_INTERLOCK) {
773047dd67eSAttilio Rao 				class->lc_unlock(ilk);
774047dd67eSAttilio Rao 				flags &= ~LK_INTERLOCK;
775047dd67eSAttilio Rao 			}
776047dd67eSAttilio Rao 			DROP_GIANT();
777047dd67eSAttilio Rao 			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
778047dd67eSAttilio Rao 			    SQ_EXCLUSIVE_QUEUE);
779047dd67eSAttilio Rao 			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
780047dd67eSAttilio Rao 			PICKUP_GIANT();
781047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
782047dd67eSAttilio Rao 			    __func__, lk);
783047dd67eSAttilio Rao 		}
784047dd67eSAttilio Rao 
785047dd67eSAttilio Rao 		if (error == 0) {
786047dd67eSAttilio Rao 			lock_profile_obtain_lock_success(&lk->lock_object,
787047dd67eSAttilio Rao 			    contested, waittime, file, line);
788047dd67eSAttilio Rao 			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
789047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
790047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
791047dd67eSAttilio Rao 			STACK_SAVE(lk);
792047dd67eSAttilio Rao 		}
793047dd67eSAttilio Rao 		break;
794047dd67eSAttilio Rao 	default:
795047dd67eSAttilio Rao 		if (flags & LK_INTERLOCK)
796047dd67eSAttilio Rao 			class->lc_unlock(ilk);
797047dd67eSAttilio Rao 		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
798047dd67eSAttilio Rao 	}
799047dd67eSAttilio Rao 
800047dd67eSAttilio Rao 	/*
801047dd67eSAttilio Rao 	 * We could have exited from the switch without reacquiring the
802047dd67eSAttilio Rao 	 * interlock, so we need to check for the interlock ownership.
803047dd67eSAttilio Rao 	 */
804047dd67eSAttilio Rao 	if (flags & LK_INTERLOCK)
805047dd67eSAttilio Rao 		class->lc_unlock(ilk);
806047dd67eSAttilio Rao 
807047dd67eSAttilio Rao 	return (error);
808047dd67eSAttilio Rao }
809047dd67eSAttilio Rao 
810d7a7e179SAttilio Rao void
811047dd67eSAttilio Rao _lockmgr_disown(struct lock *lk, const char *file, int line)
812047dd67eSAttilio Rao {
813047dd67eSAttilio Rao 	uintptr_t tid, x;
814047dd67eSAttilio Rao 
815047dd67eSAttilio Rao 	tid = (uintptr_t)curthread;
816047dd67eSAttilio Rao 	_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
817047dd67eSAttilio Rao 
818047dd67eSAttilio Rao 	/*
819047dd67eSAttilio Rao 	 * If the owner is alredy LK_KERNPROC just skip the whole operation.
820047dd67eSAttilio Rao 	 */
821047dd67eSAttilio Rao 	if (LK_HOLDER(lk->lk_lock) != tid)
822047dd67eSAttilio Rao 		return;
823047dd67eSAttilio Rao 
824047dd67eSAttilio Rao 	/*
825047dd67eSAttilio Rao 	 * In order to preserve waiters flags, just spin.
826047dd67eSAttilio Rao 	 */
827047dd67eSAttilio Rao 	for (;;) {
828047dd67eSAttilio Rao 		x = lk->lk_lock & LK_ALL_WAITERS;
829047dd67eSAttilio Rao 		if (atomic_cmpset_ptr(&lk->lk_lock, tid | x,
830047dd67eSAttilio Rao 		    LK_KERNPROC | x)) {
831047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file,
832047dd67eSAttilio Rao 			    line);
833047dd67eSAttilio Rao 			TD_LOCKS_DEC(curthread);
834047dd67eSAttilio Rao 			return;
835047dd67eSAttilio Rao 		}
836047dd67eSAttilio Rao 		cpu_spinwait();
837047dd67eSAttilio Rao 	}
838047dd67eSAttilio Rao }
839047dd67eSAttilio Rao 
840047dd67eSAttilio Rao void
841047dd67eSAttilio Rao lockmgr_printinfo(struct lock *lk)
842d7a7e179SAttilio Rao {
843d7a7e179SAttilio Rao 	struct thread *td;
844047dd67eSAttilio Rao 	uintptr_t x;
845d7a7e179SAttilio Rao 
846047dd67eSAttilio Rao 	if (lk->lk_lock == LK_UNLOCKED)
847047dd67eSAttilio Rao 		printf(" lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
848047dd67eSAttilio Rao 	else if (lk->lk_lock & LK_SHARE)
849047dd67eSAttilio Rao 		printf(" lock type %s: SHARED (count %ju)\n",
850047dd67eSAttilio Rao 		    lk->lock_object.lo_name,
851047dd67eSAttilio Rao 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
852047dd67eSAttilio Rao 	else {
853047dd67eSAttilio Rao 		td = lockmgr_xholder(lk);
854047dd67eSAttilio Rao 		printf(" lock type %s: EXCL by thread %p (pid %d)\n",
855047dd67eSAttilio Rao 		    lk->lock_object.lo_name, td, td->td_proc->p_pid);
856d7a7e179SAttilio Rao 	}
857d7a7e179SAttilio Rao 
858047dd67eSAttilio Rao 	x = lk->lk_lock;
859047dd67eSAttilio Rao 	if (x & LK_EXCLUSIVE_WAITERS)
860047dd67eSAttilio Rao 		printf(" with exclusive waiters pending\n");
861047dd67eSAttilio Rao 	if (x & LK_SHARED_WAITERS)
862047dd67eSAttilio Rao 		printf(" with shared waiters pending\n");
863047dd67eSAttilio Rao 
864047dd67eSAttilio Rao 	STACK_PRINT(lk);
865047dd67eSAttilio Rao }
866047dd67eSAttilio Rao 
86799448ed1SJohn Dyson int
868047dd67eSAttilio Rao lockstatus(struct lock *lk)
86999448ed1SJohn Dyson {
870047dd67eSAttilio Rao 	uintptr_t v, x;
871047dd67eSAttilio Rao 	int ret;
87299448ed1SJohn Dyson 
873047dd67eSAttilio Rao 	ret = LK_SHARED;
874047dd67eSAttilio Rao 	x = lk->lk_lock;
875047dd67eSAttilio Rao 	v = LK_HOLDER(x);
8760e9eb108SAttilio Rao 
877047dd67eSAttilio Rao 	if ((x & LK_SHARE) == 0) {
878047dd67eSAttilio Rao 		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
879047dd67eSAttilio Rao 			ret = LK_EXCLUSIVE;
8806bdfe06aSEivind Eklund 		else
881047dd67eSAttilio Rao 			ret = LK_EXCLOTHER;
882047dd67eSAttilio Rao 	} else if (x == LK_UNLOCKED)
883047dd67eSAttilio Rao 		ret = 0;
88499448ed1SJohn Dyson 
885047dd67eSAttilio Rao 	return (ret);
88653bf4bb2SPeter Wemm }
887be6847d7SJohn Baldwin 
88884887fa3SAttilio Rao #ifdef INVARIANT_SUPPORT
88984887fa3SAttilio Rao #ifndef INVARIANTS
89084887fa3SAttilio Rao #undef	_lockmgr_assert
89184887fa3SAttilio Rao #endif
89284887fa3SAttilio Rao 
89384887fa3SAttilio Rao void
894047dd67eSAttilio Rao _lockmgr_assert(struct lock *lk, int what, const char *file, int line)
89584887fa3SAttilio Rao {
89684887fa3SAttilio Rao 	int slocked = 0;
89784887fa3SAttilio Rao 
89884887fa3SAttilio Rao 	if (panicstr != NULL)
89984887fa3SAttilio Rao 		return;
90084887fa3SAttilio Rao 	switch (what) {
90184887fa3SAttilio Rao 	case KA_SLOCKED:
90284887fa3SAttilio Rao 	case KA_SLOCKED | KA_NOTRECURSED:
90384887fa3SAttilio Rao 	case KA_SLOCKED | KA_RECURSED:
90484887fa3SAttilio Rao 		slocked = 1;
90584887fa3SAttilio Rao 	case KA_LOCKED:
90684887fa3SAttilio Rao 	case KA_LOCKED | KA_NOTRECURSED:
90784887fa3SAttilio Rao 	case KA_LOCKED | KA_RECURSED:
908047dd67eSAttilio Rao 		if (lk->lk_lock == LK_UNLOCKED ||
909047dd67eSAttilio Rao 		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
910047dd67eSAttilio Rao 		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
91184887fa3SAttilio Rao 			panic("Lock %s not %slocked @ %s:%d\n",
912047dd67eSAttilio Rao 			    lk->lock_object.lo_name, slocked ? "share" : "",
91384887fa3SAttilio Rao 			    file, line);
914047dd67eSAttilio Rao 
915047dd67eSAttilio Rao 		if ((lk->lk_lock & LK_SHARE) == 0) {
916047dd67eSAttilio Rao 			if (lockmgr_recursed(lk)) {
91784887fa3SAttilio Rao 				if (what & KA_NOTRECURSED)
91884887fa3SAttilio Rao 					panic("Lock %s recursed @ %s:%d\n",
919047dd67eSAttilio Rao 					    lk->lock_object.lo_name, file,
920047dd67eSAttilio Rao 					    line);
92184887fa3SAttilio Rao 			} else if (what & KA_RECURSED)
92284887fa3SAttilio Rao 				panic("Lock %s not recursed @ %s:%d\n",
923047dd67eSAttilio Rao 				    lk->lock_object.lo_name, file, line);
92484887fa3SAttilio Rao 		}
92584887fa3SAttilio Rao 		break;
92684887fa3SAttilio Rao 	case KA_XLOCKED:
92784887fa3SAttilio Rao 	case KA_XLOCKED | KA_NOTRECURSED:
92884887fa3SAttilio Rao 	case KA_XLOCKED | KA_RECURSED:
929047dd67eSAttilio Rao 		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
93084887fa3SAttilio Rao 			panic("Lock %s not exclusively locked @ %s:%d\n",
931047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
932047dd67eSAttilio Rao 		if (lockmgr_recursed(lk)) {
93384887fa3SAttilio Rao 			if (what & KA_NOTRECURSED)
93484887fa3SAttilio Rao 				panic("Lock %s recursed @ %s:%d\n",
935047dd67eSAttilio Rao 				    lk->lock_object.lo_name, file, line);
93684887fa3SAttilio Rao 		} else if (what & KA_RECURSED)
93784887fa3SAttilio Rao 			panic("Lock %s not recursed @ %s:%d\n",
938047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
93984887fa3SAttilio Rao 		break;
94084887fa3SAttilio Rao 	case KA_UNLOCKED:
941047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
94284887fa3SAttilio Rao 			panic("Lock %s exclusively locked @ %s:%d\n",
943047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
94484887fa3SAttilio Rao 		break;
94584887fa3SAttilio Rao 	default:
946047dd67eSAttilio Rao 		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
947047dd67eSAttilio Rao 		    line);
94884887fa3SAttilio Rao 	}
94984887fa3SAttilio Rao }
950047dd67eSAttilio Rao #endif
95184887fa3SAttilio Rao 
952be6847d7SJohn Baldwin #ifdef DDB
953462a7addSJohn Baldwin int
954462a7addSJohn Baldwin lockmgr_chain(struct thread *td, struct thread **ownerp)
955462a7addSJohn Baldwin {
956047dd67eSAttilio Rao 	struct lock *lk;
957462a7addSJohn Baldwin 
958047dd67eSAttilio Rao 	lk = td->td_wchan;
959462a7addSJohn Baldwin 
960047dd67eSAttilio Rao 	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
961462a7addSJohn Baldwin 		return (0);
962047dd67eSAttilio Rao 	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
963047dd67eSAttilio Rao 	if (lk->lk_lock & LK_SHARE)
964047dd67eSAttilio Rao 		db_printf("SHARED (count %ju)\n",
965047dd67eSAttilio Rao 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
966047dd67eSAttilio Rao 	else
967047dd67eSAttilio Rao 		db_printf("EXCL\n");
968047dd67eSAttilio Rao 	*ownerp = lockmgr_xholder(lk);
969462a7addSJohn Baldwin 
970462a7addSJohn Baldwin 	return (1);
971462a7addSJohn Baldwin }
972462a7addSJohn Baldwin 
973047dd67eSAttilio Rao static void
97461bd5e21SKip Macy db_show_lockmgr(struct lock_object *lock)
975be6847d7SJohn Baldwin {
976be6847d7SJohn Baldwin 	struct thread *td;
977047dd67eSAttilio Rao 	struct lock *lk;
978be6847d7SJohn Baldwin 
979047dd67eSAttilio Rao 	lk = (struct lock *)lock;
980be6847d7SJohn Baldwin 
981be6847d7SJohn Baldwin 	db_printf(" state: ");
982047dd67eSAttilio Rao 	if (lk->lk_lock == LK_UNLOCKED)
983be6847d7SJohn Baldwin 		db_printf("UNLOCKED\n");
984047dd67eSAttilio Rao 	else if (lk->lk_lock & LK_SHARE)
985047dd67eSAttilio Rao 		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
986047dd67eSAttilio Rao 	else {
987047dd67eSAttilio Rao 		td = lockmgr_xholder(lk);
988047dd67eSAttilio Rao 		if (td == (struct thread *)LK_KERNPROC)
989047dd67eSAttilio Rao 			db_printf("XLOCK: LK_KERNPROC\n");
990047dd67eSAttilio Rao 		else
991047dd67eSAttilio Rao 			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
992047dd67eSAttilio Rao 			    td->td_tid, td->td_proc->p_pid,
993047dd67eSAttilio Rao 			    td->td_proc->p_comm);
994047dd67eSAttilio Rao 		if (lockmgr_recursed(lk))
995047dd67eSAttilio Rao 			db_printf(" recursed: %d\n", lk->lk_recurse);
996047dd67eSAttilio Rao 	}
997047dd67eSAttilio Rao 	db_printf(" waiters: ");
998047dd67eSAttilio Rao 	switch (lk->lk_lock & LK_ALL_WAITERS) {
999047dd67eSAttilio Rao 	case LK_SHARED_WAITERS:
1000047dd67eSAttilio Rao 		db_printf("shared\n");
1001047dd67eSAttilio Rao 	case LK_EXCLUSIVE_WAITERS:
1002047dd67eSAttilio Rao 		db_printf("exclusive\n");
1003047dd67eSAttilio Rao 		break;
1004047dd67eSAttilio Rao 	case LK_ALL_WAITERS:
1005047dd67eSAttilio Rao 		db_printf("shared and exclusive\n");
1006047dd67eSAttilio Rao 		break;
1007047dd67eSAttilio Rao 	default:
1008047dd67eSAttilio Rao 		db_printf("none\n");
1009047dd67eSAttilio Rao 	}
1010be6847d7SJohn Baldwin }
1011be6847d7SJohn Baldwin #endif
1012