xref: /freebsd/sys/kern/kern_lock.c (revision de5b19526b7350b9c608ae4bf0bd80b91e51a5df)
19454b2d8SWarner Losh /*-
2047dd67eSAttilio Rao  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3047dd67eSAttilio Rao  * All rights reserved.
453bf4bb2SPeter Wemm  *
553bf4bb2SPeter Wemm  * Redistribution and use in source and binary forms, with or without
653bf4bb2SPeter Wemm  * modification, are permitted provided that the following conditions
753bf4bb2SPeter Wemm  * are met:
853bf4bb2SPeter Wemm  * 1. Redistributions of source code must retain the above copyright
9047dd67eSAttilio Rao  *    notice(s), this list of conditions and the following disclaimer as
10047dd67eSAttilio Rao  *    the first lines of this file unmodified other than the possible
11047dd67eSAttilio Rao  *    addition of one or more copyright notices.
1253bf4bb2SPeter Wemm  * 2. Redistributions in binary form must reproduce the above copyright
13047dd67eSAttilio Rao  *    notice(s), this list of conditions and the following disclaimer in the
1453bf4bb2SPeter Wemm  *    documentation and/or other materials provided with the distribution.
1553bf4bb2SPeter Wemm  *
16047dd67eSAttilio Rao  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17047dd67eSAttilio Rao  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18047dd67eSAttilio Rao  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19047dd67eSAttilio Rao  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20047dd67eSAttilio Rao  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21047dd67eSAttilio Rao  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22047dd67eSAttilio Rao  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23047dd67eSAttilio Rao  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2453bf4bb2SPeter Wemm  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25047dd67eSAttilio Rao  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26047dd67eSAttilio Rao  * DAMAGE.
2753bf4bb2SPeter Wemm  */
2853bf4bb2SPeter Wemm 
29651175c9SAttilio Rao #include "opt_adaptive_lockmgrs.h"
30047dd67eSAttilio Rao #include "opt_ddb.h"
31a5aedd68SStacey Son #include "opt_kdtrace.h"
32047dd67eSAttilio Rao 
33677b542eSDavid E. O'Brien #include <sys/cdefs.h>
34677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
35677b542eSDavid E. O'Brien 
3653bf4bb2SPeter Wemm #include <sys/param.h>
3761d80e90SJohn Baldwin #include <sys/ktr.h>
3853bf4bb2SPeter Wemm #include <sys/lock.h>
39047dd67eSAttilio Rao #include <sys/lock_profile.h>
408302d183SBruce Evans #include <sys/lockmgr.h>
41d8881ca3SJohn Baldwin #include <sys/mutex.h>
428302d183SBruce Evans #include <sys/proc.h>
43047dd67eSAttilio Rao #include <sys/sleepqueue.h>
44e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS
45e8ddb61dSJeff Roberson #include <sys/stack.h>
46e8ddb61dSJeff Roberson #endif
47651175c9SAttilio Rao #include <sys/sysctl.h>
48047dd67eSAttilio Rao #include <sys/systm.h>
4953bf4bb2SPeter Wemm 
50047dd67eSAttilio Rao #include <machine/cpu.h>
516efc8a16SAttilio Rao 
52be6847d7SJohn Baldwin #ifdef DDB
53be6847d7SJohn Baldwin #include <ddb/ddb.h>
54047dd67eSAttilio Rao #endif
55047dd67eSAttilio Rao 
56651175c9SAttilio Rao CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
57651175c9SAttilio Rao     (LK_ADAPTIVE | LK_NOSHARE));
58651175c9SAttilio Rao CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
59651175c9SAttilio Rao     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
60047dd67eSAttilio Rao 
61047dd67eSAttilio Rao #define	SQ_EXCLUSIVE_QUEUE	0
62047dd67eSAttilio Rao #define	SQ_SHARED_QUEUE		1
63047dd67eSAttilio Rao 
64047dd67eSAttilio Rao #ifndef INVARIANTS
65047dd67eSAttilio Rao #define	_lockmgr_assert(lk, what, file, line)
66047dd67eSAttilio Rao #define	TD_LOCKS_INC(td)
67047dd67eSAttilio Rao #define	TD_LOCKS_DEC(td)
68047dd67eSAttilio Rao #else
69047dd67eSAttilio Rao #define	TD_LOCKS_INC(td)	((td)->td_locks++)
70047dd67eSAttilio Rao #define	TD_LOCKS_DEC(td)	((td)->td_locks--)
71047dd67eSAttilio Rao #endif
72047dd67eSAttilio Rao #define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
73047dd67eSAttilio Rao #define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
74047dd67eSAttilio Rao 
75047dd67eSAttilio Rao #ifndef DEBUG_LOCKS
76047dd67eSAttilio Rao #define	STACK_PRINT(lk)
77047dd67eSAttilio Rao #define	STACK_SAVE(lk)
78047dd67eSAttilio Rao #define	STACK_ZERO(lk)
79047dd67eSAttilio Rao #else
80047dd67eSAttilio Rao #define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
81047dd67eSAttilio Rao #define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
82047dd67eSAttilio Rao #define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
83047dd67eSAttilio Rao #endif
84047dd67eSAttilio Rao 
85047dd67eSAttilio Rao #define	LOCK_LOG2(lk, string, arg1, arg2)				\
86047dd67eSAttilio Rao 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
87047dd67eSAttilio Rao 		CTR2(KTR_LOCK, (string), (arg1), (arg2))
88047dd67eSAttilio Rao #define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
89047dd67eSAttilio Rao 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
90047dd67eSAttilio Rao 		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
91047dd67eSAttilio Rao 
92e5f94314SAttilio Rao #define	GIANT_DECLARE							\
93e5f94314SAttilio Rao 	int _i = 0;							\
94e5f94314SAttilio Rao 	WITNESS_SAVE_DECL(Giant)
95e5f94314SAttilio Rao #define	GIANT_RESTORE() do {						\
96e5f94314SAttilio Rao 	if (_i > 0) {							\
97e5f94314SAttilio Rao 		while (_i--)						\
98e5f94314SAttilio Rao 			mtx_lock(&Giant);				\
99e5f94314SAttilio Rao 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
100e5f94314SAttilio Rao 	}								\
101e5f94314SAttilio Rao } while (0)
102e5f94314SAttilio Rao #define	GIANT_SAVE() do {						\
103e5f94314SAttilio Rao 	if (mtx_owned(&Giant)) {					\
104e5f94314SAttilio Rao 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
105e5f94314SAttilio Rao 		while (mtx_owned(&Giant)) {				\
106e5f94314SAttilio Rao 			_i++;						\
107e5f94314SAttilio Rao 			mtx_unlock(&Giant);				\
108e5f94314SAttilio Rao 		}							\
109e5f94314SAttilio Rao 	}								\
110e5f94314SAttilio Rao } while (0)
111e5f94314SAttilio Rao 
112047dd67eSAttilio Rao #define	LK_CAN_SHARE(x)							\
113047dd67eSAttilio Rao 	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
114651175c9SAttilio Rao 	((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||				\
115e0f62984SAttilio Rao 	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
116e5f94314SAttilio Rao #define	LK_TRYOP(x)							\
117e5f94314SAttilio Rao 	((x) & LK_NOWAIT)
118e5f94314SAttilio Rao 
119e5f94314SAttilio Rao #define	LK_CAN_WITNESS(x)						\
120e5f94314SAttilio Rao 	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
121e5f94314SAttilio Rao #define	LK_TRYWIT(x)							\
122e5f94314SAttilio Rao 	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
123047dd67eSAttilio Rao 
124651175c9SAttilio Rao #define	LK_CAN_ADAPT(lk, f)						\
125651175c9SAttilio Rao 	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
126651175c9SAttilio Rao 	((f) & LK_SLEEPFAIL) == 0)
127651175c9SAttilio Rao 
128047dd67eSAttilio Rao #define	lockmgr_disowned(lk)						\
129047dd67eSAttilio Rao 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
130047dd67eSAttilio Rao 
131047dd67eSAttilio Rao #define	lockmgr_xlocked(lk)						\
132047dd67eSAttilio Rao 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
133047dd67eSAttilio Rao 
134047dd67eSAttilio Rao static void	 assert_lockmgr(struct lock_object *lock, int how);
135047dd67eSAttilio Rao #ifdef DDB
13661bd5e21SKip Macy static void	 db_show_lockmgr(struct lock_object *lock);
137be6847d7SJohn Baldwin #endif
1386e21afd4SJohn Baldwin static void	 lock_lockmgr(struct lock_object *lock, int how);
139a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
140a5aedd68SStacey Son static int	 owner_lockmgr(struct lock_object *lock, struct thread **owner);
141a5aedd68SStacey Son #endif
1426e21afd4SJohn Baldwin static int	 unlock_lockmgr(struct lock_object *lock);
14361bd5e21SKip Macy 
14461bd5e21SKip Macy struct lock_class lock_class_lockmgr = {
1453ff6d229SJohn Baldwin 	.lc_name = "lockmgr",
146047dd67eSAttilio Rao 	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
147f9721b43SAttilio Rao 	.lc_assert = assert_lockmgr,
14861bd5e21SKip Macy #ifdef DDB
1496e21afd4SJohn Baldwin 	.lc_ddb_show = db_show_lockmgr,
15061bd5e21SKip Macy #endif
1516e21afd4SJohn Baldwin 	.lc_lock = lock_lockmgr,
152a5aedd68SStacey Son 	.lc_unlock = unlock_lockmgr,
153a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
154a5aedd68SStacey Son 	.lc_owner = owner_lockmgr,
155a5aedd68SStacey Son #endif
15661bd5e21SKip Macy };
15761bd5e21SKip Macy 
158651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
159651175c9SAttilio Rao static u_int alk_retries = 10;
160651175c9SAttilio Rao static u_int alk_loops = 10000;
161651175c9SAttilio Rao SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
162651175c9SAttilio Rao SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
163651175c9SAttilio Rao SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
164651175c9SAttilio Rao #endif
165651175c9SAttilio Rao 
166047dd67eSAttilio Rao static __inline struct thread *
167047dd67eSAttilio Rao lockmgr_xholder(struct lock *lk)
168047dd67eSAttilio Rao {
169047dd67eSAttilio Rao 	uintptr_t x;
170047dd67eSAttilio Rao 
171047dd67eSAttilio Rao 	x = lk->lk_lock;
172047dd67eSAttilio Rao 	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
173047dd67eSAttilio Rao }
17484887fa3SAttilio Rao 
17553bf4bb2SPeter Wemm /*
176047dd67eSAttilio Rao  * It assumes sleepq_lock held and returns with this one unheld.
177047dd67eSAttilio Rao  * It also assumes the generic interlock is sane and previously checked.
178047dd67eSAttilio Rao  * If LK_INTERLOCK is specified the interlock is not reacquired after the
179047dd67eSAttilio Rao  * sleep.
18053bf4bb2SPeter Wemm  */
181047dd67eSAttilio Rao static __inline int
182047dd67eSAttilio Rao sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
183047dd67eSAttilio Rao     const char *wmesg, int pri, int timo, int queue)
184047dd67eSAttilio Rao {
185e5f94314SAttilio Rao 	GIANT_DECLARE;
186047dd67eSAttilio Rao 	struct lock_class *class;
187047dd67eSAttilio Rao 	int catch, error;
18853bf4bb2SPeter Wemm 
189047dd67eSAttilio Rao 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1905047a8fdSAttilio Rao 	catch = pri & PCATCH;
191047dd67eSAttilio Rao 	pri &= PRIMASK;
192047dd67eSAttilio Rao 	error = 0;
193047dd67eSAttilio Rao 
194047dd67eSAttilio Rao 	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
195047dd67eSAttilio Rao 	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
196047dd67eSAttilio Rao 
197047dd67eSAttilio Rao 	if (flags & LK_INTERLOCK)
198047dd67eSAttilio Rao 		class->lc_unlock(ilk);
1992028867dSAttilio Rao 	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
2002028867dSAttilio Rao 		lk->lk_exslpfail++;
201e5f94314SAttilio Rao 	GIANT_SAVE();
202047dd67eSAttilio Rao 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
203047dd67eSAttilio Rao 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
204047dd67eSAttilio Rao 	if ((flags & LK_TIMELOCK) && timo)
205047dd67eSAttilio Rao 		sleepq_set_timeout(&lk->lock_object, timo);
206047dd67eSAttilio Rao 
207047dd67eSAttilio Rao 	/*
208047dd67eSAttilio Rao 	 * Decisional switch for real sleeping.
209047dd67eSAttilio Rao 	 */
210047dd67eSAttilio Rao 	if ((flags & LK_TIMELOCK) && timo && catch)
211047dd67eSAttilio Rao 		error = sleepq_timedwait_sig(&lk->lock_object, pri);
212047dd67eSAttilio Rao 	else if ((flags & LK_TIMELOCK) && timo)
213047dd67eSAttilio Rao 		error = sleepq_timedwait(&lk->lock_object, pri);
214047dd67eSAttilio Rao 	else if (catch)
215047dd67eSAttilio Rao 		error = sleepq_wait_sig(&lk->lock_object, pri);
216047dd67eSAttilio Rao 	else
217047dd67eSAttilio Rao 		sleepq_wait(&lk->lock_object, pri);
218e5f94314SAttilio Rao 	GIANT_RESTORE();
219047dd67eSAttilio Rao 	if ((flags & LK_SLEEPFAIL) && error == 0)
220047dd67eSAttilio Rao 		error = ENOLCK;
221047dd67eSAttilio Rao 
222047dd67eSAttilio Rao 	return (error);
223047dd67eSAttilio Rao }
224047dd67eSAttilio Rao 
225da7bbd2cSJohn Baldwin static __inline int
226047dd67eSAttilio Rao wakeupshlk(struct lock *lk, const char *file, int line)
227047dd67eSAttilio Rao {
228047dd67eSAttilio Rao 	uintptr_t v, x;
2292028867dSAttilio Rao 	u_int realexslp;
230da7bbd2cSJohn Baldwin 	int queue, wakeup_swapper;
231047dd67eSAttilio Rao 
232047dd67eSAttilio Rao 	TD_LOCKS_DEC(curthread);
233047dd67eSAttilio Rao 	TD_SLOCKS_DEC(curthread);
234e5f94314SAttilio Rao 	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
235047dd67eSAttilio Rao 	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
236047dd67eSAttilio Rao 
237da7bbd2cSJohn Baldwin 	wakeup_swapper = 0;
238047dd67eSAttilio Rao 	for (;;) {
239047dd67eSAttilio Rao 		x = lk->lk_lock;
240047dd67eSAttilio Rao 
241047dd67eSAttilio Rao 		/*
242047dd67eSAttilio Rao 		 * If there is more than one shared lock held, just drop one
243047dd67eSAttilio Rao 		 * and return.
244047dd67eSAttilio Rao 		 */
245047dd67eSAttilio Rao 		if (LK_SHARERS(x) > 1) {
2467f9f80ceSAttilio Rao 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
247047dd67eSAttilio Rao 			    x - LK_ONE_SHARER))
248047dd67eSAttilio Rao 				break;
249047dd67eSAttilio Rao 			continue;
250047dd67eSAttilio Rao 		}
251047dd67eSAttilio Rao 
252047dd67eSAttilio Rao 		/*
253047dd67eSAttilio Rao 		 * If there are not waiters on the exclusive queue, drop the
254047dd67eSAttilio Rao 		 * lock quickly.
255047dd67eSAttilio Rao 		 */
256047dd67eSAttilio Rao 		if ((x & LK_ALL_WAITERS) == 0) {
257651175c9SAttilio Rao 			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
258651175c9SAttilio Rao 			    LK_SHARERS_LOCK(1));
2597f9f80ceSAttilio Rao 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
260047dd67eSAttilio Rao 				break;
261047dd67eSAttilio Rao 			continue;
262047dd67eSAttilio Rao 		}
263047dd67eSAttilio Rao 
264047dd67eSAttilio Rao 		/*
265047dd67eSAttilio Rao 		 * We should have a sharer with waiters, so enter the hard
266047dd67eSAttilio Rao 		 * path in order to handle wakeups correctly.
267047dd67eSAttilio Rao 		 */
268047dd67eSAttilio Rao 		sleepq_lock(&lk->lock_object);
269651175c9SAttilio Rao 		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
270047dd67eSAttilio Rao 		v = LK_UNLOCKED;
271047dd67eSAttilio Rao 
272047dd67eSAttilio Rao 		/*
273047dd67eSAttilio Rao 		 * If the lock has exclusive waiters, give them preference in
274047dd67eSAttilio Rao 		 * order to avoid deadlock with shared runners up.
2752028867dSAttilio Rao 		 * If interruptible sleeps left the exclusive queue empty
2762028867dSAttilio Rao 		 * avoid a starvation for the threads sleeping on the shared
2772028867dSAttilio Rao 		 * queue by giving them precedence and cleaning up the
2782028867dSAttilio Rao 		 * exclusive waiters bit anyway.
279c636ba83SAttilio Rao 		 * Please note that lk_exslpfail count may be lying about
280c636ba83SAttilio Rao 		 * the real number of waiters with the LK_SLEEPFAIL flag on
281c636ba83SAttilio Rao 		 * because they may be used in conjuction with interruptible
282aab9c8c2SAttilio Rao 		 * sleeps so lk_exslpfail might be considered an 'upper limit'
283aab9c8c2SAttilio Rao 		 * bound, including the edge cases.
284047dd67eSAttilio Rao 		 */
2852028867dSAttilio Rao 		realexslp = sleepq_sleepcnt(&lk->lock_object,
2862028867dSAttilio Rao 		    SQ_EXCLUSIVE_QUEUE);
2872028867dSAttilio Rao 		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
2882028867dSAttilio Rao 			if (lk->lk_exslpfail < realexslp) {
2892028867dSAttilio Rao 				lk->lk_exslpfail = 0;
290047dd67eSAttilio Rao 				queue = SQ_EXCLUSIVE_QUEUE;
291047dd67eSAttilio Rao 				v |= (x & LK_SHARED_WAITERS);
292047dd67eSAttilio Rao 			} else {
2932028867dSAttilio Rao 				lk->lk_exslpfail = 0;
2942028867dSAttilio Rao 				LOCK_LOG2(lk,
2952028867dSAttilio Rao 				    "%s: %p has only LK_SLEEPFAIL sleepers",
2962028867dSAttilio Rao 				    __func__, lk);
2972028867dSAttilio Rao 				LOCK_LOG2(lk,
2982028867dSAttilio Rao 			    "%s: %p waking up threads on the exclusive queue",
2992028867dSAttilio Rao 				    __func__, lk);
3002028867dSAttilio Rao 				wakeup_swapper =
3012028867dSAttilio Rao 				    sleepq_broadcast(&lk->lock_object,
3022028867dSAttilio Rao 				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
3032028867dSAttilio Rao 				queue = SQ_SHARED_QUEUE;
3042028867dSAttilio Rao 			}
3052028867dSAttilio Rao 
3062028867dSAttilio Rao 		} else {
3079dbf7a62SAttilio Rao 
3089dbf7a62SAttilio Rao 			/*
3099dbf7a62SAttilio Rao 			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
3109dbf7a62SAttilio Rao 			 * and using interruptible sleeps/timeout may have
3119dbf7a62SAttilio Rao 			 * left spourious lk_exslpfail counts on, so clean
3129dbf7a62SAttilio Rao 			 * it up anyway.
3139dbf7a62SAttilio Rao 			 */
3149dbf7a62SAttilio Rao 			lk->lk_exslpfail = 0;
315047dd67eSAttilio Rao 			queue = SQ_SHARED_QUEUE;
316047dd67eSAttilio Rao 		}
317047dd67eSAttilio Rao 
3187f9f80ceSAttilio Rao 		if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
319047dd67eSAttilio Rao 		    v)) {
320047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
321047dd67eSAttilio Rao 			continue;
322047dd67eSAttilio Rao 		}
323047dd67eSAttilio Rao 		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
324047dd67eSAttilio Rao 		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
325047dd67eSAttilio Rao 		    "exclusive");
3262028867dSAttilio Rao 		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
327da7bbd2cSJohn Baldwin 		    0, queue);
328047dd67eSAttilio Rao 		sleepq_release(&lk->lock_object);
329047dd67eSAttilio Rao 		break;
330047dd67eSAttilio Rao 	}
331047dd67eSAttilio Rao 
332047dd67eSAttilio Rao 	lock_profile_release_lock(&lk->lock_object);
333da7bbd2cSJohn Baldwin 	return (wakeup_swapper);
334047dd67eSAttilio Rao }
335047dd67eSAttilio Rao 
336047dd67eSAttilio Rao static void
337f9721b43SAttilio Rao assert_lockmgr(struct lock_object *lock, int what)
338f9721b43SAttilio Rao {
339f9721b43SAttilio Rao 
340f9721b43SAttilio Rao 	panic("lockmgr locks do not support assertions");
341f9721b43SAttilio Rao }
342f9721b43SAttilio Rao 
343047dd67eSAttilio Rao static void
3446e21afd4SJohn Baldwin lock_lockmgr(struct lock_object *lock, int how)
3456e21afd4SJohn Baldwin {
3466e21afd4SJohn Baldwin 
3476e21afd4SJohn Baldwin 	panic("lockmgr locks do not support sleep interlocking");
3486e21afd4SJohn Baldwin }
3496e21afd4SJohn Baldwin 
350047dd67eSAttilio Rao static int
3516e21afd4SJohn Baldwin unlock_lockmgr(struct lock_object *lock)
3526e21afd4SJohn Baldwin {
3536e21afd4SJohn Baldwin 
3546e21afd4SJohn Baldwin 	panic("lockmgr locks do not support sleep interlocking");
3556e21afd4SJohn Baldwin }
3566e21afd4SJohn Baldwin 
357a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
358a5aedd68SStacey Son static int
359a5aedd68SStacey Son owner_lockmgr(struct lock_object *lock, struct thread **owner)
360a5aedd68SStacey Son {
361a5aedd68SStacey Son 
362a5aedd68SStacey Son 	panic("lockmgr locks do not support owner inquiring");
363a5aedd68SStacey Son }
364a5aedd68SStacey Son #endif
365a5aedd68SStacey Son 
36699448ed1SJohn Dyson void
367047dd67eSAttilio Rao lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
36899448ed1SJohn Dyson {
3696efc8a16SAttilio Rao 	int iflags;
3706efc8a16SAttilio Rao 
371047dd67eSAttilio Rao 	MPASS((flags & ~LK_INIT_MASK) == 0);
372353998acSAttilio Rao 	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
373353998acSAttilio Rao             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
374353998acSAttilio Rao             &lk->lk_lock));
37599448ed1SJohn Dyson 
376f0830182SAttilio Rao 	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
377f0830182SAttilio Rao 	if (flags & LK_CANRECURSE)
378f0830182SAttilio Rao 		iflags |= LO_RECURSABLE;
379047dd67eSAttilio Rao 	if ((flags & LK_NODUP) == 0)
3806efc8a16SAttilio Rao 		iflags |= LO_DUPOK;
3817fbfba7bSAttilio Rao 	if (flags & LK_NOPROFILE)
3827fbfba7bSAttilio Rao 		iflags |= LO_NOPROFILE;
383047dd67eSAttilio Rao 	if ((flags & LK_NOWITNESS) == 0)
3846efc8a16SAttilio Rao 		iflags |= LO_WITNESS;
3857fbfba7bSAttilio Rao 	if (flags & LK_QUIET)
3867fbfba7bSAttilio Rao 		iflags |= LO_QUIET;
387651175c9SAttilio Rao 	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
388047dd67eSAttilio Rao 
389047dd67eSAttilio Rao 	lk->lk_lock = LK_UNLOCKED;
390047dd67eSAttilio Rao 	lk->lk_recurse = 0;
3912028867dSAttilio Rao 	lk->lk_exslpfail = 0;
392047dd67eSAttilio Rao 	lk->lk_timo = timo;
393047dd67eSAttilio Rao 	lk->lk_pri = pri;
394047dd67eSAttilio Rao 	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
395047dd67eSAttilio Rao 	STACK_ZERO(lk);
39699448ed1SJohn Dyson }
39799448ed1SJohn Dyson 
3983634d5b2SJohn Baldwin /*
3993634d5b2SJohn Baldwin  * XXX: Gross hacks to manipulate external lock flags after
4003634d5b2SJohn Baldwin  * initialization.  Used for certain vnode and buf locks.
4013634d5b2SJohn Baldwin  */
4023634d5b2SJohn Baldwin void
4033634d5b2SJohn Baldwin lockallowshare(struct lock *lk)
4043634d5b2SJohn Baldwin {
4053634d5b2SJohn Baldwin 
4063634d5b2SJohn Baldwin 	lockmgr_assert(lk, KA_XLOCKED);
4073634d5b2SJohn Baldwin 	lk->lock_object.lo_flags &= ~LK_NOSHARE;
4083634d5b2SJohn Baldwin }
4093634d5b2SJohn Baldwin 
4103634d5b2SJohn Baldwin void
4113634d5b2SJohn Baldwin lockallowrecurse(struct lock *lk)
4123634d5b2SJohn Baldwin {
4133634d5b2SJohn Baldwin 
4143634d5b2SJohn Baldwin 	lockmgr_assert(lk, KA_XLOCKED);
4153634d5b2SJohn Baldwin 	lk->lock_object.lo_flags |= LO_RECURSABLE;
4163634d5b2SJohn Baldwin }
4173634d5b2SJohn Baldwin 
4183634d5b2SJohn Baldwin void
4193634d5b2SJohn Baldwin lockdisablerecurse(struct lock *lk)
4203634d5b2SJohn Baldwin {
4213634d5b2SJohn Baldwin 
4223634d5b2SJohn Baldwin 	lockmgr_assert(lk, KA_XLOCKED);
4233634d5b2SJohn Baldwin 	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
4243634d5b2SJohn Baldwin }
4253634d5b2SJohn Baldwin 
426a18b1f1dSJason Evans void
427047dd67eSAttilio Rao lockdestroy(struct lock *lk)
428a18b1f1dSJason Evans {
429c91fcee7SJohn Baldwin 
430047dd67eSAttilio Rao 	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
431047dd67eSAttilio Rao 	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
4322028867dSAttilio Rao 	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
433047dd67eSAttilio Rao 	lock_destroy(&lk->lock_object);
434047dd67eSAttilio Rao }
435047dd67eSAttilio Rao 
436047dd67eSAttilio Rao int
437047dd67eSAttilio Rao __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
438047dd67eSAttilio Rao     const char *wmesg, int pri, int timo, const char *file, int line)
439047dd67eSAttilio Rao {
440e5f94314SAttilio Rao 	GIANT_DECLARE;
441047dd67eSAttilio Rao 	struct lock_class *class;
442047dd67eSAttilio Rao 	const char *iwmesg;
443047dd67eSAttilio Rao 	uintptr_t tid, v, x;
4442028867dSAttilio Rao 	u_int op, realexslp;
4451723a064SJeff Roberson 	int error, ipri, itimo, queue, wakeup_swapper;
4461723a064SJeff Roberson #ifdef LOCK_PROFILING
4471723a064SJeff Roberson 	uint64_t waittime = 0;
4481723a064SJeff Roberson 	int contested = 0;
4491723a064SJeff Roberson #endif
450651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
451651175c9SAttilio Rao 	volatile struct thread *owner;
452651175c9SAttilio Rao 	u_int i, spintries = 0;
453651175c9SAttilio Rao #endif
454047dd67eSAttilio Rao 
455047dd67eSAttilio Rao 	error = 0;
456047dd67eSAttilio Rao 	tid = (uintptr_t)curthread;
457047dd67eSAttilio Rao 	op = (flags & LK_TYPE_MASK);
458047dd67eSAttilio Rao 	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
459047dd67eSAttilio Rao 	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
460047dd67eSAttilio Rao 	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
461047dd67eSAttilio Rao 
462047dd67eSAttilio Rao 	MPASS((flags & ~LK_TOTAL_MASK) == 0);
463872b7289SAttilio Rao 	KASSERT((op & (op - 1)) == 0,
464872b7289SAttilio Rao 	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
465047dd67eSAttilio Rao 	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
466047dd67eSAttilio Rao 	    (op != LK_DOWNGRADE && op != LK_RELEASE),
467047dd67eSAttilio Rao 	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
468047dd67eSAttilio Rao 	    __func__, file, line));
469047dd67eSAttilio Rao 	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
470047dd67eSAttilio Rao 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
471047dd67eSAttilio Rao 	    __func__, file, line));
472047dd67eSAttilio Rao 
473047dd67eSAttilio Rao 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
474047dd67eSAttilio Rao 	if (panicstr != NULL) {
475047dd67eSAttilio Rao 		if (flags & LK_INTERLOCK)
476047dd67eSAttilio Rao 			class->lc_unlock(ilk);
477047dd67eSAttilio Rao 		return (0);
478047dd67eSAttilio Rao 	}
479047dd67eSAttilio Rao 
480047dd67eSAttilio Rao 	if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE))
481047dd67eSAttilio Rao 		op = LK_EXCLUSIVE;
482047dd67eSAttilio Rao 
483da7bbd2cSJohn Baldwin 	wakeup_swapper = 0;
484047dd67eSAttilio Rao 	switch (op) {
485047dd67eSAttilio Rao 	case LK_SHARED:
486e5f94314SAttilio Rao 		if (LK_CAN_WITNESS(flags))
487e5f94314SAttilio Rao 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
48841313430SJohn Baldwin 			    file, line, ilk);
489047dd67eSAttilio Rao 		for (;;) {
490047dd67eSAttilio Rao 			x = lk->lk_lock;
491047dd67eSAttilio Rao 
492047dd67eSAttilio Rao 			/*
493047dd67eSAttilio Rao 			 * If no other thread has an exclusive lock, or
494047dd67eSAttilio Rao 			 * no exclusive waiter is present, bump the count of
495047dd67eSAttilio Rao 			 * sharers.  Since we have to preserve the state of
496047dd67eSAttilio Rao 			 * waiters, if we fail to acquire the shared lock
497047dd67eSAttilio Rao 			 * loop back and retry.
498047dd67eSAttilio Rao 			 */
499047dd67eSAttilio Rao 			if (LK_CAN_SHARE(x)) {
500047dd67eSAttilio Rao 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
501047dd67eSAttilio Rao 				    x + LK_ONE_SHARER))
502047dd67eSAttilio Rao 					break;
503047dd67eSAttilio Rao 				continue;
504047dd67eSAttilio Rao 			}
505047dd67eSAttilio Rao 			lock_profile_obtain_lock_failed(&lk->lock_object,
506047dd67eSAttilio Rao 			    &contested, &waittime);
507047dd67eSAttilio Rao 
508047dd67eSAttilio Rao 			/*
50996f1567fSKonstantin Belousov 			 * If the lock is already held by curthread in
510047dd67eSAttilio Rao 			 * exclusive way avoid a deadlock.
511047dd67eSAttilio Rao 			 */
512047dd67eSAttilio Rao 			if (LK_HOLDER(x) == tid) {
513047dd67eSAttilio Rao 				LOCK_LOG2(lk,
51496f1567fSKonstantin Belousov 				    "%s: %p already held in exclusive mode",
515047dd67eSAttilio Rao 				    __func__, lk);
516047dd67eSAttilio Rao 				error = EDEADLK;
517047dd67eSAttilio Rao 				break;
518a18b1f1dSJason Evans 			}
519a18b1f1dSJason Evans 
520a18b1f1dSJason Evans 			/*
521047dd67eSAttilio Rao 			 * If the lock is expected to not sleep just give up
522047dd67eSAttilio Rao 			 * and return.
523d7a7e179SAttilio Rao 			 */
524047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
525047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p fails the try operation",
526047dd67eSAttilio Rao 				    __func__, lk);
527047dd67eSAttilio Rao 				error = EBUSY;
528047dd67eSAttilio Rao 				break;
529047dd67eSAttilio Rao 			}
530047dd67eSAttilio Rao 
531651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
532651175c9SAttilio Rao 			/*
533651175c9SAttilio Rao 			 * If the owner is running on another CPU, spin until
534651175c9SAttilio Rao 			 * the owner stops running or the state of the lock
5358d3635c4SAttilio Rao 			 * changes.  We need a double-state handle here
5368d3635c4SAttilio Rao 			 * because for a failed acquisition the lock can be
5378d3635c4SAttilio Rao 			 * either held in exclusive mode or shared mode
5388d3635c4SAttilio Rao 			 * (for the writer starvation avoidance technique).
539651175c9SAttilio Rao 			 */
540651175c9SAttilio Rao 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
541651175c9SAttilio Rao 			    LK_HOLDER(x) != LK_KERNPROC) {
542651175c9SAttilio Rao 				owner = (struct thread *)LK_HOLDER(x);
543651175c9SAttilio Rao 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
544651175c9SAttilio Rao 					CTR3(KTR_LOCK,
545651175c9SAttilio Rao 					    "%s: spinning on %p held by %p",
546651175c9SAttilio Rao 					    __func__, lk, owner);
547651175c9SAttilio Rao 
548651175c9SAttilio Rao 				/*
549651175c9SAttilio Rao 				 * If we are holding also an interlock drop it
550651175c9SAttilio Rao 				 * in order to avoid a deadlock if the lockmgr
551651175c9SAttilio Rao 				 * owner is adaptively spinning on the
552651175c9SAttilio Rao 				 * interlock itself.
553651175c9SAttilio Rao 				 */
554651175c9SAttilio Rao 				if (flags & LK_INTERLOCK) {
555651175c9SAttilio Rao 					class->lc_unlock(ilk);
556651175c9SAttilio Rao 					flags &= ~LK_INTERLOCK;
557651175c9SAttilio Rao 				}
558651175c9SAttilio Rao 				GIANT_SAVE();
559651175c9SAttilio Rao 				while (LK_HOLDER(lk->lk_lock) ==
560651175c9SAttilio Rao 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
561651175c9SAttilio Rao 					cpu_spinwait();
5628d3635c4SAttilio Rao 				GIANT_RESTORE();
5638d3635c4SAttilio Rao 				continue;
564651175c9SAttilio Rao 			} else if (LK_CAN_ADAPT(lk, flags) &&
565651175c9SAttilio Rao 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
566651175c9SAttilio Rao 			    spintries < alk_retries) {
567651175c9SAttilio Rao 				if (flags & LK_INTERLOCK) {
568651175c9SAttilio Rao 					class->lc_unlock(ilk);
569651175c9SAttilio Rao 					flags &= ~LK_INTERLOCK;
570651175c9SAttilio Rao 				}
571651175c9SAttilio Rao 				GIANT_SAVE();
572651175c9SAttilio Rao 				spintries++;
573651175c9SAttilio Rao 				for (i = 0; i < alk_loops; i++) {
574651175c9SAttilio Rao 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
575651175c9SAttilio Rao 						CTR4(KTR_LOCK,
576651175c9SAttilio Rao 				    "%s: shared spinning on %p with %u and %u",
577651175c9SAttilio Rao 						    __func__, lk, spintries, i);
578651175c9SAttilio Rao 					x = lk->lk_lock;
579651175c9SAttilio Rao 					if ((x & LK_SHARE) == 0 ||
580651175c9SAttilio Rao 					    LK_CAN_SHARE(x) != 0)
581651175c9SAttilio Rao 						break;
582651175c9SAttilio Rao 					cpu_spinwait();
583651175c9SAttilio Rao 				}
5848d3635c4SAttilio Rao 				GIANT_RESTORE();
585651175c9SAttilio Rao 				if (i != alk_loops)
586651175c9SAttilio Rao 					continue;
587651175c9SAttilio Rao 			}
588651175c9SAttilio Rao #endif
589651175c9SAttilio Rao 
590047dd67eSAttilio Rao 			/*
591047dd67eSAttilio Rao 			 * Acquire the sleepqueue chain lock because we
592047dd67eSAttilio Rao 			 * probabilly will need to manipulate waiters flags.
593047dd67eSAttilio Rao 			 */
594047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
595047dd67eSAttilio Rao 			x = lk->lk_lock;
596047dd67eSAttilio Rao 
597047dd67eSAttilio Rao 			/*
598047dd67eSAttilio Rao 			 * if the lock can be acquired in shared mode, try
599047dd67eSAttilio Rao 			 * again.
600047dd67eSAttilio Rao 			 */
601047dd67eSAttilio Rao 			if (LK_CAN_SHARE(x)) {
602047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
603047dd67eSAttilio Rao 				continue;
604047dd67eSAttilio Rao 			}
605047dd67eSAttilio Rao 
606651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
607651175c9SAttilio Rao 			/*
608651175c9SAttilio Rao 			 * The current lock owner might have started executing
609651175c9SAttilio Rao 			 * on another CPU (or the lock could have changed
610651175c9SAttilio Rao 			 * owner) while we were waiting on the turnstile
611651175c9SAttilio Rao 			 * chain lock.  If so, drop the turnstile lock and try
612651175c9SAttilio Rao 			 * again.
613651175c9SAttilio Rao 			 */
614651175c9SAttilio Rao 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
615651175c9SAttilio Rao 			    LK_HOLDER(x) != LK_KERNPROC) {
616651175c9SAttilio Rao 				owner = (struct thread *)LK_HOLDER(x);
617651175c9SAttilio Rao 				if (TD_IS_RUNNING(owner)) {
618651175c9SAttilio Rao 					sleepq_release(&lk->lock_object);
619651175c9SAttilio Rao 					continue;
620651175c9SAttilio Rao 				}
621651175c9SAttilio Rao 			}
622651175c9SAttilio Rao #endif
623651175c9SAttilio Rao 
624047dd67eSAttilio Rao 			/*
625047dd67eSAttilio Rao 			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
626047dd67eSAttilio Rao 			 * loop back and retry.
627047dd67eSAttilio Rao 			 */
628047dd67eSAttilio Rao 			if ((x & LK_SHARED_WAITERS) == 0) {
629047dd67eSAttilio Rao 				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
630047dd67eSAttilio Rao 				    x | LK_SHARED_WAITERS)) {
631047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
632047dd67eSAttilio Rao 					continue;
633047dd67eSAttilio Rao 				}
634047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
635047dd67eSAttilio Rao 				    __func__, lk);
636047dd67eSAttilio Rao 			}
637047dd67eSAttilio Rao 
638047dd67eSAttilio Rao 			/*
639047dd67eSAttilio Rao 			 * As far as we have been unable to acquire the
640047dd67eSAttilio Rao 			 * shared lock and the shared waiters flag is set,
641047dd67eSAttilio Rao 			 * we will sleep.
642047dd67eSAttilio Rao 			 */
643047dd67eSAttilio Rao 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
644047dd67eSAttilio Rao 			    SQ_SHARED_QUEUE);
645047dd67eSAttilio Rao 			flags &= ~LK_INTERLOCK;
646047dd67eSAttilio Rao 			if (error) {
647047dd67eSAttilio Rao 				LOCK_LOG3(lk,
648047dd67eSAttilio Rao 				    "%s: interrupted sleep for %p with %d",
649047dd67eSAttilio Rao 				    __func__, lk, error);
650047dd67eSAttilio Rao 				break;
651047dd67eSAttilio Rao 			}
652047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
653047dd67eSAttilio Rao 			    __func__, lk);
654047dd67eSAttilio Rao 		}
655047dd67eSAttilio Rao 		if (error == 0) {
656047dd67eSAttilio Rao 			lock_profile_obtain_lock_success(&lk->lock_object,
657047dd67eSAttilio Rao 			    contested, waittime, file, line);
658047dd67eSAttilio Rao 			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
659047dd67eSAttilio Rao 			    line);
660e5f94314SAttilio Rao 			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
661e5f94314SAttilio Rao 			    line);
662047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
663047dd67eSAttilio Rao 			TD_SLOCKS_INC(curthread);
664047dd67eSAttilio Rao 			STACK_SAVE(lk);
665047dd67eSAttilio Rao 		}
666047dd67eSAttilio Rao 		break;
667047dd67eSAttilio Rao 	case LK_UPGRADE:
668047dd67eSAttilio Rao 		_lockmgr_assert(lk, KA_SLOCKED, file, line);
669651175c9SAttilio Rao 		v = lk->lk_lock;
670651175c9SAttilio Rao 		x = v & LK_ALL_WAITERS;
671651175c9SAttilio Rao 		v &= LK_EXCLUSIVE_SPINNERS;
672047dd67eSAttilio Rao 
673047dd67eSAttilio Rao 		/*
674047dd67eSAttilio Rao 		 * Try to switch from one shared lock to an exclusive one.
675047dd67eSAttilio Rao 		 * We need to preserve waiters flags during the operation.
676047dd67eSAttilio Rao 		 */
677651175c9SAttilio Rao 		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
678047dd67eSAttilio Rao 		    tid | x)) {
679047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
680047dd67eSAttilio Rao 			    line);
681e5f94314SAttilio Rao 			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
682e5f94314SAttilio Rao 			    LK_TRYWIT(flags), file, line);
683047dd67eSAttilio Rao 			TD_SLOCKS_DEC(curthread);
684047dd67eSAttilio Rao 			break;
685047dd67eSAttilio Rao 		}
686047dd67eSAttilio Rao 
687047dd67eSAttilio Rao 		/*
688047dd67eSAttilio Rao 		 * We have been unable to succeed in upgrading, so just
689047dd67eSAttilio Rao 		 * give up the shared lock.
690047dd67eSAttilio Rao 		 */
691814f26daSJohn Baldwin 		wakeup_swapper |= wakeupshlk(lk, file, line);
692047dd67eSAttilio Rao 
693047dd67eSAttilio Rao 		/* FALLTHROUGH */
694047dd67eSAttilio Rao 	case LK_EXCLUSIVE:
695e5f94314SAttilio Rao 		if (LK_CAN_WITNESS(flags))
696e5f94314SAttilio Rao 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
69741313430SJohn Baldwin 			    LOP_EXCLUSIVE, file, line, ilk);
698047dd67eSAttilio Rao 
699047dd67eSAttilio Rao 		/*
70096f1567fSKonstantin Belousov 		 * If curthread already holds the lock and this one is
701047dd67eSAttilio Rao 		 * allowed to recurse, simply recurse on it.
702047dd67eSAttilio Rao 		 */
703047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk)) {
704047dd67eSAttilio Rao 			if ((flags & LK_CANRECURSE) == 0 &&
705f0830182SAttilio Rao 			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
706047dd67eSAttilio Rao 
707047dd67eSAttilio Rao 				/*
708047dd67eSAttilio Rao 				 * If the lock is expected to not panic just
709047dd67eSAttilio Rao 				 * give up and return.
710047dd67eSAttilio Rao 				 */
711047dd67eSAttilio Rao 				if (LK_TRYOP(flags)) {
712047dd67eSAttilio Rao 					LOCK_LOG2(lk,
713047dd67eSAttilio Rao 					    "%s: %p fails the try operation",
714047dd67eSAttilio Rao 					    __func__, lk);
715047dd67eSAttilio Rao 					error = EBUSY;
716047dd67eSAttilio Rao 					break;
717047dd67eSAttilio Rao 				}
718047dd67eSAttilio Rao 				if (flags & LK_INTERLOCK)
719047dd67eSAttilio Rao 					class->lc_unlock(ilk);
720047dd67eSAttilio Rao 		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
721047dd67eSAttilio Rao 				    __func__, iwmesg, file, line);
722047dd67eSAttilio Rao 			}
723047dd67eSAttilio Rao 			lk->lk_recurse++;
724047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
725047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
726047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
727e5f94314SAttilio Rao 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
728e5f94314SAttilio Rao 			    LK_TRYWIT(flags), file, line);
729047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
730047dd67eSAttilio Rao 			break;
731047dd67eSAttilio Rao 		}
732047dd67eSAttilio Rao 
733047dd67eSAttilio Rao 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
734047dd67eSAttilio Rao 		    tid)) {
735047dd67eSAttilio Rao 			lock_profile_obtain_lock_failed(&lk->lock_object,
736047dd67eSAttilio Rao 			    &contested, &waittime);
737047dd67eSAttilio Rao 
738047dd67eSAttilio Rao 			/*
739047dd67eSAttilio Rao 			 * If the lock is expected to not sleep just give up
740047dd67eSAttilio Rao 			 * and return.
741047dd67eSAttilio Rao 			 */
742047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
743047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p fails the try operation",
744047dd67eSAttilio Rao 				    __func__, lk);
745047dd67eSAttilio Rao 				error = EBUSY;
746047dd67eSAttilio Rao 				break;
747047dd67eSAttilio Rao 			}
748047dd67eSAttilio Rao 
749651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
750651175c9SAttilio Rao 			/*
751651175c9SAttilio Rao 			 * If the owner is running on another CPU, spin until
752651175c9SAttilio Rao 			 * the owner stops running or the state of the lock
753651175c9SAttilio Rao 			 * changes.
754651175c9SAttilio Rao 			 */
755651175c9SAttilio Rao 			x = lk->lk_lock;
756651175c9SAttilio Rao 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
757651175c9SAttilio Rao 			    LK_HOLDER(x) != LK_KERNPROC) {
758651175c9SAttilio Rao 				owner = (struct thread *)LK_HOLDER(x);
759651175c9SAttilio Rao 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
760651175c9SAttilio Rao 					CTR3(KTR_LOCK,
761651175c9SAttilio Rao 					    "%s: spinning on %p held by %p",
762651175c9SAttilio Rao 					    __func__, lk, owner);
763651175c9SAttilio Rao 
764651175c9SAttilio Rao 				/*
765651175c9SAttilio Rao 				 * If we are holding also an interlock drop it
766651175c9SAttilio Rao 				 * in order to avoid a deadlock if the lockmgr
767651175c9SAttilio Rao 				 * owner is adaptively spinning on the
768651175c9SAttilio Rao 				 * interlock itself.
769651175c9SAttilio Rao 				 */
770651175c9SAttilio Rao 				if (flags & LK_INTERLOCK) {
771651175c9SAttilio Rao 					class->lc_unlock(ilk);
772651175c9SAttilio Rao 					flags &= ~LK_INTERLOCK;
773651175c9SAttilio Rao 				}
774651175c9SAttilio Rao 				GIANT_SAVE();
775651175c9SAttilio Rao 				while (LK_HOLDER(lk->lk_lock) ==
776651175c9SAttilio Rao 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
777651175c9SAttilio Rao 					cpu_spinwait();
7788d3635c4SAttilio Rao 				GIANT_RESTORE();
7798d3635c4SAttilio Rao 				continue;
780651175c9SAttilio Rao 			} else if (LK_CAN_ADAPT(lk, flags) &&
781651175c9SAttilio Rao 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
782651175c9SAttilio Rao 			    spintries < alk_retries) {
783651175c9SAttilio Rao 				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
784651175c9SAttilio Rao 				    !atomic_cmpset_ptr(&lk->lk_lock, x,
785651175c9SAttilio Rao 				    x | LK_EXCLUSIVE_SPINNERS))
786651175c9SAttilio Rao 					continue;
787651175c9SAttilio Rao 				if (flags & LK_INTERLOCK) {
788651175c9SAttilio Rao 					class->lc_unlock(ilk);
789651175c9SAttilio Rao 					flags &= ~LK_INTERLOCK;
790651175c9SAttilio Rao 				}
791651175c9SAttilio Rao 				GIANT_SAVE();
792651175c9SAttilio Rao 				spintries++;
793651175c9SAttilio Rao 				for (i = 0; i < alk_loops; i++) {
794651175c9SAttilio Rao 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
795651175c9SAttilio Rao 						CTR4(KTR_LOCK,
796651175c9SAttilio Rao 				    "%s: shared spinning on %p with %u and %u",
797651175c9SAttilio Rao 						    __func__, lk, spintries, i);
798651175c9SAttilio Rao 					if ((lk->lk_lock &
799651175c9SAttilio Rao 					    LK_EXCLUSIVE_SPINNERS) == 0)
800651175c9SAttilio Rao 						break;
801651175c9SAttilio Rao 					cpu_spinwait();
802651175c9SAttilio Rao 				}
8038d3635c4SAttilio Rao 				GIANT_RESTORE();
804651175c9SAttilio Rao 				if (i != alk_loops)
805651175c9SAttilio Rao 					continue;
806651175c9SAttilio Rao 			}
807651175c9SAttilio Rao #endif
808651175c9SAttilio Rao 
809047dd67eSAttilio Rao 			/*
810047dd67eSAttilio Rao 			 * Acquire the sleepqueue chain lock because we
811047dd67eSAttilio Rao 			 * probabilly will need to manipulate waiters flags.
812047dd67eSAttilio Rao 			 */
813047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
814047dd67eSAttilio Rao 			x = lk->lk_lock;
815047dd67eSAttilio Rao 
816047dd67eSAttilio Rao 			/*
817047dd67eSAttilio Rao 			 * if the lock has been released while we spun on
818047dd67eSAttilio Rao 			 * the sleepqueue chain lock just try again.
819047dd67eSAttilio Rao 			 */
820047dd67eSAttilio Rao 			if (x == LK_UNLOCKED) {
821047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
822047dd67eSAttilio Rao 				continue;
823047dd67eSAttilio Rao 			}
824047dd67eSAttilio Rao 
825651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
826651175c9SAttilio Rao 			/*
827651175c9SAttilio Rao 			 * The current lock owner might have started executing
828651175c9SAttilio Rao 			 * on another CPU (or the lock could have changed
829651175c9SAttilio Rao 			 * owner) while we were waiting on the turnstile
830651175c9SAttilio Rao 			 * chain lock.  If so, drop the turnstile lock and try
831651175c9SAttilio Rao 			 * again.
832651175c9SAttilio Rao 			 */
833651175c9SAttilio Rao 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
834651175c9SAttilio Rao 			    LK_HOLDER(x) != LK_KERNPROC) {
835651175c9SAttilio Rao 				owner = (struct thread *)LK_HOLDER(x);
836651175c9SAttilio Rao 				if (TD_IS_RUNNING(owner)) {
837651175c9SAttilio Rao 					sleepq_release(&lk->lock_object);
838651175c9SAttilio Rao 					continue;
839651175c9SAttilio Rao 				}
840651175c9SAttilio Rao 			}
841651175c9SAttilio Rao #endif
842651175c9SAttilio Rao 
843047dd67eSAttilio Rao 			/*
844047dd67eSAttilio Rao 			 * The lock can be in the state where there is a
845047dd67eSAttilio Rao 			 * pending queue of waiters, but still no owner.
846047dd67eSAttilio Rao 			 * This happens when the lock is contested and an
847047dd67eSAttilio Rao 			 * owner is going to claim the lock.
848047dd67eSAttilio Rao 			 * If curthread is the one successfully acquiring it
849047dd67eSAttilio Rao 			 * claim lock ownership and return, preserving waiters
850047dd67eSAttilio Rao 			 * flags.
851047dd67eSAttilio Rao 			 */
852651175c9SAttilio Rao 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
853651175c9SAttilio Rao 			if ((x & ~v) == LK_UNLOCKED) {
854651175c9SAttilio Rao 				v &= ~LK_EXCLUSIVE_SPINNERS;
855047dd67eSAttilio Rao 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
856047dd67eSAttilio Rao 				    tid | v)) {
857047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
858047dd67eSAttilio Rao 					LOCK_LOG2(lk,
859047dd67eSAttilio Rao 					    "%s: %p claimed by a new writer",
860047dd67eSAttilio Rao 					    __func__, lk);
861047dd67eSAttilio Rao 					break;
862047dd67eSAttilio Rao 				}
863047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
864047dd67eSAttilio Rao 				continue;
865047dd67eSAttilio Rao 			}
866047dd67eSAttilio Rao 
867047dd67eSAttilio Rao 			/*
868047dd67eSAttilio Rao 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
869047dd67eSAttilio Rao 			 * fail, loop back and retry.
870047dd67eSAttilio Rao 			 */
871047dd67eSAttilio Rao 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
872047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
873047dd67eSAttilio Rao 				    x | LK_EXCLUSIVE_WAITERS)) {
874047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
875047dd67eSAttilio Rao 					continue;
876047dd67eSAttilio Rao 				}
877047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
878047dd67eSAttilio Rao 				    __func__, lk);
879047dd67eSAttilio Rao 			}
880047dd67eSAttilio Rao 
881047dd67eSAttilio Rao 			/*
882047dd67eSAttilio Rao 			 * As far as we have been unable to acquire the
883047dd67eSAttilio Rao 			 * exclusive lock and the exclusive waiters flag
884047dd67eSAttilio Rao 			 * is set, we will sleep.
885047dd67eSAttilio Rao 			 */
886047dd67eSAttilio Rao 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
887047dd67eSAttilio Rao 			    SQ_EXCLUSIVE_QUEUE);
888047dd67eSAttilio Rao 			flags &= ~LK_INTERLOCK;
889047dd67eSAttilio Rao 			if (error) {
890047dd67eSAttilio Rao 				LOCK_LOG3(lk,
891047dd67eSAttilio Rao 				    "%s: interrupted sleep for %p with %d",
892047dd67eSAttilio Rao 				    __func__, lk, error);
893047dd67eSAttilio Rao 				break;
894047dd67eSAttilio Rao 			}
895047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
896047dd67eSAttilio Rao 			    __func__, lk);
897047dd67eSAttilio Rao 		}
898047dd67eSAttilio Rao 		if (error == 0) {
899047dd67eSAttilio Rao 			lock_profile_obtain_lock_success(&lk->lock_object,
900047dd67eSAttilio Rao 			    contested, waittime, file, line);
901047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
902047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
903e5f94314SAttilio Rao 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
904e5f94314SAttilio Rao 			    LK_TRYWIT(flags), file, line);
905047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
906047dd67eSAttilio Rao 			STACK_SAVE(lk);
907047dd67eSAttilio Rao 		}
908047dd67eSAttilio Rao 		break;
909047dd67eSAttilio Rao 	case LK_DOWNGRADE:
910047dd67eSAttilio Rao 		_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
911e5f94314SAttilio Rao 		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
912e5f94314SAttilio Rao 		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
913e5f94314SAttilio Rao 		TD_SLOCKS_INC(curthread);
914047dd67eSAttilio Rao 
915047dd67eSAttilio Rao 		/*
916047dd67eSAttilio Rao 		 * In order to preserve waiters flags, just spin.
917047dd67eSAttilio Rao 		 */
918047dd67eSAttilio Rao 		for (;;) {
919651175c9SAttilio Rao 			x = lk->lk_lock;
920651175c9SAttilio Rao 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
921651175c9SAttilio Rao 			x &= LK_ALL_WAITERS;
922047dd67eSAttilio Rao 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
923e5f94314SAttilio Rao 			    LK_SHARERS_LOCK(1) | x))
924047dd67eSAttilio Rao 				break;
925047dd67eSAttilio Rao 			cpu_spinwait();
926047dd67eSAttilio Rao 		}
927047dd67eSAttilio Rao 		break;
928047dd67eSAttilio Rao 	case LK_RELEASE:
929047dd67eSAttilio Rao 		_lockmgr_assert(lk, KA_LOCKED, file, line);
930047dd67eSAttilio Rao 		x = lk->lk_lock;
931047dd67eSAttilio Rao 
932047dd67eSAttilio Rao 		if ((x & LK_SHARE) == 0) {
933047dd67eSAttilio Rao 
934047dd67eSAttilio Rao 			/*
935047dd67eSAttilio Rao 			 * As first option, treact the lock as if it has not
936047dd67eSAttilio Rao 			 * any waiter.
937047dd67eSAttilio Rao 			 * Fix-up the tid var if the lock has been disowned.
938047dd67eSAttilio Rao 			 */
939047dd67eSAttilio Rao 			if (LK_HOLDER(x) == LK_KERNPROC)
940047dd67eSAttilio Rao 				tid = LK_KERNPROC;
941e5f94314SAttilio Rao 			else {
942e5f94314SAttilio Rao 				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
943e5f94314SAttilio Rao 				    file, line);
944047dd67eSAttilio Rao 				TD_LOCKS_DEC(curthread);
945e5f94314SAttilio Rao 			}
946047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
947047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
948047dd67eSAttilio Rao 
949047dd67eSAttilio Rao 			/*
950047dd67eSAttilio Rao 			 * The lock is held in exclusive mode.
951047dd67eSAttilio Rao 			 * If the lock is recursed also, then unrecurse it.
952047dd67eSAttilio Rao 			 */
953047dd67eSAttilio Rao 			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
954047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
955047dd67eSAttilio Rao 				    lk);
956047dd67eSAttilio Rao 				lk->lk_recurse--;
957047dd67eSAttilio Rao 				break;
958047dd67eSAttilio Rao 			}
95904a28689SJeff Roberson 			if (tid != LK_KERNPROC)
960047dd67eSAttilio Rao 				lock_profile_release_lock(&lk->lock_object);
961047dd67eSAttilio Rao 
962047dd67eSAttilio Rao 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
963047dd67eSAttilio Rao 			    LK_UNLOCKED))
964047dd67eSAttilio Rao 				break;
965047dd67eSAttilio Rao 
966047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
967651175c9SAttilio Rao 			x = lk->lk_lock;
968047dd67eSAttilio Rao 			v = LK_UNLOCKED;
969047dd67eSAttilio Rao 
970047dd67eSAttilio Rao 			/*
971047dd67eSAttilio Rao 		 	 * If the lock has exclusive waiters, give them
972047dd67eSAttilio Rao 			 * preference in order to avoid deadlock with
973047dd67eSAttilio Rao 			 * shared runners up.
9742028867dSAttilio Rao 			 * If interruptible sleeps left the exclusive queue
9752028867dSAttilio Rao 			 * empty avoid a starvation for the threads sleeping
9762028867dSAttilio Rao 			 * on the shared queue by giving them precedence
9772028867dSAttilio Rao 			 * and cleaning up the exclusive waiters bit anyway.
978c636ba83SAttilio Rao 			 * Please note that lk_exslpfail count may be lying
979c636ba83SAttilio Rao 			 * about the real number of waiters with the
980c636ba83SAttilio Rao 			 * LK_SLEEPFAIL flag on because they may be used in
981c636ba83SAttilio Rao 			 * conjuction with interruptible sleeps so
982aab9c8c2SAttilio Rao 			 * lk_exslpfail might be considered an 'upper limit'
983aab9c8c2SAttilio Rao 			 * bound, including the edge cases.
984047dd67eSAttilio Rao 			 */
985651175c9SAttilio Rao 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
9862028867dSAttilio Rao 			realexslp = sleepq_sleepcnt(&lk->lock_object,
9872028867dSAttilio Rao 			    SQ_EXCLUSIVE_QUEUE);
9882028867dSAttilio Rao 			if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
9892028867dSAttilio Rao 				if (lk->lk_exslpfail < realexslp) {
9902028867dSAttilio Rao 					lk->lk_exslpfail = 0;
991047dd67eSAttilio Rao 					queue = SQ_EXCLUSIVE_QUEUE;
992047dd67eSAttilio Rao 					v |= (x & LK_SHARED_WAITERS);
993047dd67eSAttilio Rao 				} else {
9942028867dSAttilio Rao 					lk->lk_exslpfail = 0;
9952028867dSAttilio Rao 					LOCK_LOG2(lk,
9962028867dSAttilio Rao 					"%s: %p has only LK_SLEEPFAIL sleepers",
9972028867dSAttilio Rao 					    __func__, lk);
9982028867dSAttilio Rao 					LOCK_LOG2(lk,
9992028867dSAttilio Rao 			"%s: %p waking up threads on the exclusive queue",
10002028867dSAttilio Rao 					    __func__, lk);
10012028867dSAttilio Rao 					wakeup_swapper =
10022028867dSAttilio Rao 					    sleepq_broadcast(&lk->lock_object,
10032028867dSAttilio Rao 					    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
10042028867dSAttilio Rao 					queue = SQ_SHARED_QUEUE;
10052028867dSAttilio Rao 				}
10062028867dSAttilio Rao 			} else {
10079dbf7a62SAttilio Rao 
10089dbf7a62SAttilio Rao 				/*
10099dbf7a62SAttilio Rao 				 * Exclusive waiters sleeping with LK_SLEEPFAIL
10109dbf7a62SAttilio Rao 				 * on and using interruptible sleeps/timeout
10119dbf7a62SAttilio Rao 				 * may have left spourious lk_exslpfail counts
10129dbf7a62SAttilio Rao 				 * on, so clean it up anyway.
10139dbf7a62SAttilio Rao 				 */
10149dbf7a62SAttilio Rao 				lk->lk_exslpfail = 0;
1015047dd67eSAttilio Rao 				queue = SQ_SHARED_QUEUE;
1016047dd67eSAttilio Rao 			}
1017047dd67eSAttilio Rao 
1018047dd67eSAttilio Rao 			LOCK_LOG3(lk,
1019047dd67eSAttilio Rao 			    "%s: %p waking up threads on the %s queue",
1020047dd67eSAttilio Rao 			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1021047dd67eSAttilio Rao 			    "exclusive");
1022047dd67eSAttilio Rao 			atomic_store_rel_ptr(&lk->lk_lock, v);
10232028867dSAttilio Rao 			wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1024da7bbd2cSJohn Baldwin 			    SLEEPQ_LK, 0, queue);
1025047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
1026047dd67eSAttilio Rao 			break;
1027047dd67eSAttilio Rao 		} else
1028da7bbd2cSJohn Baldwin 			wakeup_swapper = wakeupshlk(lk, file, line);
1029047dd67eSAttilio Rao 		break;
1030047dd67eSAttilio Rao 	case LK_DRAIN:
1031e5f94314SAttilio Rao 		if (LK_CAN_WITNESS(flags))
1032e5f94314SAttilio Rao 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
103341313430SJohn Baldwin 			    LOP_EXCLUSIVE, file, line, ilk);
1034047dd67eSAttilio Rao 
1035047dd67eSAttilio Rao 		/*
103696f1567fSKonstantin Belousov 		 * Trying to drain a lock we already own will result in a
1037047dd67eSAttilio Rao 		 * deadlock.
1038047dd67eSAttilio Rao 		 */
1039047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk)) {
1040047dd67eSAttilio Rao 			if (flags & LK_INTERLOCK)
1041047dd67eSAttilio Rao 				class->lc_unlock(ilk);
1042047dd67eSAttilio Rao 			panic("%s: draining %s with the lock held @ %s:%d\n",
1043047dd67eSAttilio Rao 			    __func__, iwmesg, file, line);
1044047dd67eSAttilio Rao 		}
1045047dd67eSAttilio Rao 
1046047dd67eSAttilio Rao 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1047047dd67eSAttilio Rao 			lock_profile_obtain_lock_failed(&lk->lock_object,
1048047dd67eSAttilio Rao 			    &contested, &waittime);
1049047dd67eSAttilio Rao 
1050047dd67eSAttilio Rao 			/*
1051047dd67eSAttilio Rao 			 * If the lock is expected to not sleep just give up
1052047dd67eSAttilio Rao 			 * and return.
1053047dd67eSAttilio Rao 			 */
1054047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
1055047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p fails the try operation",
1056047dd67eSAttilio Rao 				    __func__, lk);
1057047dd67eSAttilio Rao 				error = EBUSY;
1058047dd67eSAttilio Rao 				break;
1059047dd67eSAttilio Rao 			}
1060047dd67eSAttilio Rao 
1061047dd67eSAttilio Rao 			/*
1062047dd67eSAttilio Rao 			 * Acquire the sleepqueue chain lock because we
1063047dd67eSAttilio Rao 			 * probabilly will need to manipulate waiters flags.
1064047dd67eSAttilio Rao 			 */
1065047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
1066047dd67eSAttilio Rao 			x = lk->lk_lock;
1067047dd67eSAttilio Rao 
1068047dd67eSAttilio Rao 			/*
1069047dd67eSAttilio Rao 			 * if the lock has been released while we spun on
1070047dd67eSAttilio Rao 			 * the sleepqueue chain lock just try again.
1071047dd67eSAttilio Rao 			 */
1072047dd67eSAttilio Rao 			if (x == LK_UNLOCKED) {
1073047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
1074047dd67eSAttilio Rao 				continue;
1075047dd67eSAttilio Rao 			}
1076047dd67eSAttilio Rao 
1077651175c9SAttilio Rao 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1078651175c9SAttilio Rao 			if ((x & ~v) == LK_UNLOCKED) {
1079651175c9SAttilio Rao 				v = (x & ~LK_EXCLUSIVE_SPINNERS);
10802028867dSAttilio Rao 
10812028867dSAttilio Rao 				/*
10822028867dSAttilio Rao 				 * If interruptible sleeps left the exclusive
10832028867dSAttilio Rao 				 * queue empty avoid a starvation for the
10842028867dSAttilio Rao 				 * threads sleeping on the shared queue by
10852028867dSAttilio Rao 				 * giving them precedence and cleaning up the
10862028867dSAttilio Rao 				 * exclusive waiters bit anyway.
1087c636ba83SAttilio Rao 				 * Please note that lk_exslpfail count may be
1088c636ba83SAttilio Rao 				 * lying about the real number of waiters with
1089c636ba83SAttilio Rao 				 * the LK_SLEEPFAIL flag on because they may
1090c636ba83SAttilio Rao 				 * be used in conjuction with interruptible
1091aab9c8c2SAttilio Rao 				 * sleeps so lk_exslpfail might be considered
1092aab9c8c2SAttilio Rao 				 * an 'upper limit' bound, including the edge
1093c636ba83SAttilio Rao 				 * cases.
10942028867dSAttilio Rao 				 */
1095047dd67eSAttilio Rao 				if (v & LK_EXCLUSIVE_WAITERS) {
1096047dd67eSAttilio Rao 					queue = SQ_EXCLUSIVE_QUEUE;
1097047dd67eSAttilio Rao 					v &= ~LK_EXCLUSIVE_WAITERS;
1098047dd67eSAttilio Rao 				} else {
10999dbf7a62SAttilio Rao 
11009dbf7a62SAttilio Rao 					/*
11019dbf7a62SAttilio Rao 					 * Exclusive waiters sleeping with
11029dbf7a62SAttilio Rao 					 * LK_SLEEPFAIL on and using
11039dbf7a62SAttilio Rao 					 * interruptible sleeps/timeout may
11049dbf7a62SAttilio Rao 					 * have left spourious lk_exslpfail
11059dbf7a62SAttilio Rao 					 * counts on, so clean it up anyway.
11069dbf7a62SAttilio Rao 					 */
1107047dd67eSAttilio Rao 					MPASS(v & LK_SHARED_WAITERS);
11089dbf7a62SAttilio Rao 					lk->lk_exslpfail = 0;
1109047dd67eSAttilio Rao 					queue = SQ_SHARED_QUEUE;
1110047dd67eSAttilio Rao 					v &= ~LK_SHARED_WAITERS;
1111047dd67eSAttilio Rao 				}
11122028867dSAttilio Rao 				if (queue == SQ_EXCLUSIVE_QUEUE) {
11132028867dSAttilio Rao 					realexslp =
11142028867dSAttilio Rao 					    sleepq_sleepcnt(&lk->lock_object,
11152028867dSAttilio Rao 					    SQ_EXCLUSIVE_QUEUE);
11162028867dSAttilio Rao 					if (lk->lk_exslpfail >= realexslp) {
11172028867dSAttilio Rao 						lk->lk_exslpfail = 0;
11182028867dSAttilio Rao 						queue = SQ_SHARED_QUEUE;
11192028867dSAttilio Rao 						v &= ~LK_SHARED_WAITERS;
11202028867dSAttilio Rao 						if (realexslp != 0) {
11212028867dSAttilio Rao 							LOCK_LOG2(lk,
11222028867dSAttilio Rao 					"%s: %p has only LK_SLEEPFAIL sleepers",
11232028867dSAttilio Rao 							    __func__, lk);
11242028867dSAttilio Rao 							LOCK_LOG2(lk,
11252028867dSAttilio Rao 			"%s: %p waking up threads on the exclusive queue",
11262028867dSAttilio Rao 							    __func__, lk);
11272028867dSAttilio Rao 							wakeup_swapper =
11282028867dSAttilio Rao 							    sleepq_broadcast(
11292028867dSAttilio Rao 							    &lk->lock_object,
11302028867dSAttilio Rao 							    SLEEPQ_LK, 0,
11312028867dSAttilio Rao 							    SQ_EXCLUSIVE_QUEUE);
11322028867dSAttilio Rao 						}
11332028867dSAttilio Rao 					} else
11342028867dSAttilio Rao 						lk->lk_exslpfail = 0;
11352028867dSAttilio Rao 				}
1136047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1137047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
1138047dd67eSAttilio Rao 					continue;
1139047dd67eSAttilio Rao 				}
1140047dd67eSAttilio Rao 				LOCK_LOG3(lk,
1141047dd67eSAttilio Rao 				"%s: %p waking up all threads on the %s queue",
1142047dd67eSAttilio Rao 				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1143047dd67eSAttilio Rao 				    "shared" : "exclusive");
1144814f26daSJohn Baldwin 				wakeup_swapper |= sleepq_broadcast(
1145da7bbd2cSJohn Baldwin 				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1146047dd67eSAttilio Rao 
1147047dd67eSAttilio Rao 				/*
1148047dd67eSAttilio Rao 				 * If shared waiters have been woken up we need
1149047dd67eSAttilio Rao 				 * to wait for one of them to acquire the lock
1150047dd67eSAttilio Rao 				 * before to set the exclusive waiters in
1151047dd67eSAttilio Rao 				 * order to avoid a deadlock.
1152047dd67eSAttilio Rao 				 */
1153047dd67eSAttilio Rao 				if (queue == SQ_SHARED_QUEUE) {
1154047dd67eSAttilio Rao 					for (v = lk->lk_lock;
1155047dd67eSAttilio Rao 					    (v & LK_SHARE) && !LK_SHARERS(v);
1156047dd67eSAttilio Rao 					    v = lk->lk_lock)
1157047dd67eSAttilio Rao 						cpu_spinwait();
1158047dd67eSAttilio Rao 				}
1159047dd67eSAttilio Rao 			}
1160047dd67eSAttilio Rao 
1161047dd67eSAttilio Rao 			/*
1162047dd67eSAttilio Rao 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1163047dd67eSAttilio Rao 			 * fail, loop back and retry.
1164047dd67eSAttilio Rao 			 */
1165047dd67eSAttilio Rao 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1166047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1167047dd67eSAttilio Rao 				    x | LK_EXCLUSIVE_WAITERS)) {
1168047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
1169047dd67eSAttilio Rao 					continue;
1170047dd67eSAttilio Rao 				}
1171047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1172047dd67eSAttilio Rao 				    __func__, lk);
1173047dd67eSAttilio Rao 			}
1174047dd67eSAttilio Rao 
1175047dd67eSAttilio Rao 			/*
1176047dd67eSAttilio Rao 			 * As far as we have been unable to acquire the
1177047dd67eSAttilio Rao 			 * exclusive lock and the exclusive waiters flag
1178047dd67eSAttilio Rao 			 * is set, we will sleep.
1179047dd67eSAttilio Rao 			 */
1180047dd67eSAttilio Rao 			if (flags & LK_INTERLOCK) {
1181047dd67eSAttilio Rao 				class->lc_unlock(ilk);
1182047dd67eSAttilio Rao 				flags &= ~LK_INTERLOCK;
1183047dd67eSAttilio Rao 			}
1184e5f94314SAttilio Rao 			GIANT_SAVE();
1185047dd67eSAttilio Rao 			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1186047dd67eSAttilio Rao 			    SQ_EXCLUSIVE_QUEUE);
1187047dd67eSAttilio Rao 			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1188e5f94314SAttilio Rao 			GIANT_RESTORE();
1189047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1190047dd67eSAttilio Rao 			    __func__, lk);
1191047dd67eSAttilio Rao 		}
1192047dd67eSAttilio Rao 
1193047dd67eSAttilio Rao 		if (error == 0) {
1194047dd67eSAttilio Rao 			lock_profile_obtain_lock_success(&lk->lock_object,
1195047dd67eSAttilio Rao 			    contested, waittime, file, line);
1196047dd67eSAttilio Rao 			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1197047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
1198e5f94314SAttilio Rao 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1199e5f94314SAttilio Rao 			    LK_TRYWIT(flags), file, line);
1200047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
1201047dd67eSAttilio Rao 			STACK_SAVE(lk);
1202047dd67eSAttilio Rao 		}
1203047dd67eSAttilio Rao 		break;
1204047dd67eSAttilio Rao 	default:
1205047dd67eSAttilio Rao 		if (flags & LK_INTERLOCK)
1206047dd67eSAttilio Rao 			class->lc_unlock(ilk);
1207047dd67eSAttilio Rao 		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1208047dd67eSAttilio Rao 	}
1209047dd67eSAttilio Rao 
1210047dd67eSAttilio Rao 	if (flags & LK_INTERLOCK)
1211047dd67eSAttilio Rao 		class->lc_unlock(ilk);
1212da7bbd2cSJohn Baldwin 	if (wakeup_swapper)
1213da7bbd2cSJohn Baldwin 		kick_proc0();
1214047dd67eSAttilio Rao 
1215047dd67eSAttilio Rao 	return (error);
1216047dd67eSAttilio Rao }
1217047dd67eSAttilio Rao 
1218d7a7e179SAttilio Rao void
1219047dd67eSAttilio Rao _lockmgr_disown(struct lock *lk, const char *file, int line)
1220047dd67eSAttilio Rao {
1221047dd67eSAttilio Rao 	uintptr_t tid, x;
1222047dd67eSAttilio Rao 
1223047dd67eSAttilio Rao 	tid = (uintptr_t)curthread;
1224047dd67eSAttilio Rao 	_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
1225047dd67eSAttilio Rao 
1226047dd67eSAttilio Rao 	/*
122796f1567fSKonstantin Belousov 	 * If the owner is already LK_KERNPROC just skip the whole operation.
1228047dd67eSAttilio Rao 	 */
1229047dd67eSAttilio Rao 	if (LK_HOLDER(lk->lk_lock) != tid)
1230047dd67eSAttilio Rao 		return;
123104a28689SJeff Roberson 	lock_profile_release_lock(&lk->lock_object);
1232e5f94314SAttilio Rao 	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1233e5f94314SAttilio Rao 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1234e5f94314SAttilio Rao 	TD_LOCKS_DEC(curthread);
1235337c5ff4SAttilio Rao 	STACK_SAVE(lk);
1236047dd67eSAttilio Rao 
1237047dd67eSAttilio Rao 	/*
1238047dd67eSAttilio Rao 	 * In order to preserve waiters flags, just spin.
1239047dd67eSAttilio Rao 	 */
1240047dd67eSAttilio Rao 	for (;;) {
1241651175c9SAttilio Rao 		x = lk->lk_lock;
1242651175c9SAttilio Rao 		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1243651175c9SAttilio Rao 		x &= LK_ALL_WAITERS;
124422dd228dSAttilio Rao 		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1245e5f94314SAttilio Rao 		    LK_KERNPROC | x))
1246047dd67eSAttilio Rao 			return;
1247047dd67eSAttilio Rao 		cpu_spinwait();
1248047dd67eSAttilio Rao 	}
1249047dd67eSAttilio Rao }
1250047dd67eSAttilio Rao 
1251047dd67eSAttilio Rao void
1252047dd67eSAttilio Rao lockmgr_printinfo(struct lock *lk)
1253d7a7e179SAttilio Rao {
1254d7a7e179SAttilio Rao 	struct thread *td;
1255047dd67eSAttilio Rao 	uintptr_t x;
1256d7a7e179SAttilio Rao 
1257047dd67eSAttilio Rao 	if (lk->lk_lock == LK_UNLOCKED)
1258047dd67eSAttilio Rao 		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1259047dd67eSAttilio Rao 	else if (lk->lk_lock & LK_SHARE)
1260047dd67eSAttilio Rao 		printf("lock type %s: SHARED (count %ju)\n",
1261047dd67eSAttilio Rao 		    lk->lock_object.lo_name,
1262047dd67eSAttilio Rao 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1263047dd67eSAttilio Rao 	else {
1264047dd67eSAttilio Rao 		td = lockmgr_xholder(lk);
1265047dd67eSAttilio Rao 		printf("lock type %s: EXCL by thread %p (pid %d)\n",
1266047dd67eSAttilio Rao 		    lk->lock_object.lo_name, td, td->td_proc->p_pid);
1267d7a7e179SAttilio Rao 	}
1268d7a7e179SAttilio Rao 
1269047dd67eSAttilio Rao 	x = lk->lk_lock;
1270047dd67eSAttilio Rao 	if (x & LK_EXCLUSIVE_WAITERS)
1271047dd67eSAttilio Rao 		printf(" with exclusive waiters pending\n");
1272047dd67eSAttilio Rao 	if (x & LK_SHARED_WAITERS)
1273047dd67eSAttilio Rao 		printf(" with shared waiters pending\n");
1274651175c9SAttilio Rao 	if (x & LK_EXCLUSIVE_SPINNERS)
1275651175c9SAttilio Rao 		printf(" with exclusive spinners pending\n");
1276047dd67eSAttilio Rao 
1277047dd67eSAttilio Rao 	STACK_PRINT(lk);
1278047dd67eSAttilio Rao }
1279047dd67eSAttilio Rao 
128099448ed1SJohn Dyson int
1281047dd67eSAttilio Rao lockstatus(struct lock *lk)
128299448ed1SJohn Dyson {
1283047dd67eSAttilio Rao 	uintptr_t v, x;
1284047dd67eSAttilio Rao 	int ret;
128599448ed1SJohn Dyson 
1286047dd67eSAttilio Rao 	ret = LK_SHARED;
1287047dd67eSAttilio Rao 	x = lk->lk_lock;
1288047dd67eSAttilio Rao 	v = LK_HOLDER(x);
12890e9eb108SAttilio Rao 
1290047dd67eSAttilio Rao 	if ((x & LK_SHARE) == 0) {
1291047dd67eSAttilio Rao 		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1292047dd67eSAttilio Rao 			ret = LK_EXCLUSIVE;
12936bdfe06aSEivind Eklund 		else
1294047dd67eSAttilio Rao 			ret = LK_EXCLOTHER;
1295047dd67eSAttilio Rao 	} else if (x == LK_UNLOCKED)
1296047dd67eSAttilio Rao 		ret = 0;
129799448ed1SJohn Dyson 
1298047dd67eSAttilio Rao 	return (ret);
129953bf4bb2SPeter Wemm }
1300be6847d7SJohn Baldwin 
130184887fa3SAttilio Rao #ifdef INVARIANT_SUPPORT
1302*de5b1952SAlexander Leidinger 
1303*de5b1952SAlexander Leidinger FEATURE(invariant_support,
1304*de5b1952SAlexander Leidinger     "Support for modules compiled with INVARIANTS option");
1305*de5b1952SAlexander Leidinger 
130684887fa3SAttilio Rao #ifndef INVARIANTS
130784887fa3SAttilio Rao #undef	_lockmgr_assert
130884887fa3SAttilio Rao #endif
130984887fa3SAttilio Rao 
131084887fa3SAttilio Rao void
1311047dd67eSAttilio Rao _lockmgr_assert(struct lock *lk, int what, const char *file, int line)
131284887fa3SAttilio Rao {
131384887fa3SAttilio Rao 	int slocked = 0;
131484887fa3SAttilio Rao 
131584887fa3SAttilio Rao 	if (panicstr != NULL)
131684887fa3SAttilio Rao 		return;
131784887fa3SAttilio Rao 	switch (what) {
131884887fa3SAttilio Rao 	case KA_SLOCKED:
131984887fa3SAttilio Rao 	case KA_SLOCKED | KA_NOTRECURSED:
132084887fa3SAttilio Rao 	case KA_SLOCKED | KA_RECURSED:
132184887fa3SAttilio Rao 		slocked = 1;
132284887fa3SAttilio Rao 	case KA_LOCKED:
132384887fa3SAttilio Rao 	case KA_LOCKED | KA_NOTRECURSED:
132484887fa3SAttilio Rao 	case KA_LOCKED | KA_RECURSED:
1325e5f94314SAttilio Rao #ifdef WITNESS
1326e5f94314SAttilio Rao 
1327e5f94314SAttilio Rao 		/*
1328e5f94314SAttilio Rao 		 * We cannot trust WITNESS if the lock is held in exclusive
1329e5f94314SAttilio Rao 		 * mode and a call to lockmgr_disown() happened.
1330e5f94314SAttilio Rao 		 * Workaround this skipping the check if the lock is held in
1331e5f94314SAttilio Rao 		 * exclusive mode even for the KA_LOCKED case.
1332e5f94314SAttilio Rao 		 */
1333e5f94314SAttilio Rao 		if (slocked || (lk->lk_lock & LK_SHARE)) {
1334e5f94314SAttilio Rao 			witness_assert(&lk->lock_object, what, file, line);
1335e5f94314SAttilio Rao 			break;
1336e5f94314SAttilio Rao 		}
1337e5f94314SAttilio Rao #endif
1338047dd67eSAttilio Rao 		if (lk->lk_lock == LK_UNLOCKED ||
1339047dd67eSAttilio Rao 		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1340047dd67eSAttilio Rao 		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
134184887fa3SAttilio Rao 			panic("Lock %s not %slocked @ %s:%d\n",
1342047dd67eSAttilio Rao 			    lk->lock_object.lo_name, slocked ? "share" : "",
134384887fa3SAttilio Rao 			    file, line);
1344047dd67eSAttilio Rao 
1345047dd67eSAttilio Rao 		if ((lk->lk_lock & LK_SHARE) == 0) {
1346047dd67eSAttilio Rao 			if (lockmgr_recursed(lk)) {
134784887fa3SAttilio Rao 				if (what & KA_NOTRECURSED)
134884887fa3SAttilio Rao 					panic("Lock %s recursed @ %s:%d\n",
1349047dd67eSAttilio Rao 					    lk->lock_object.lo_name, file,
1350047dd67eSAttilio Rao 					    line);
135184887fa3SAttilio Rao 			} else if (what & KA_RECURSED)
135284887fa3SAttilio Rao 				panic("Lock %s not recursed @ %s:%d\n",
1353047dd67eSAttilio Rao 				    lk->lock_object.lo_name, file, line);
135484887fa3SAttilio Rao 		}
135584887fa3SAttilio Rao 		break;
135684887fa3SAttilio Rao 	case KA_XLOCKED:
135784887fa3SAttilio Rao 	case KA_XLOCKED | KA_NOTRECURSED:
135884887fa3SAttilio Rao 	case KA_XLOCKED | KA_RECURSED:
1359047dd67eSAttilio Rao 		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
136084887fa3SAttilio Rao 			panic("Lock %s not exclusively locked @ %s:%d\n",
1361047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
1362047dd67eSAttilio Rao 		if (lockmgr_recursed(lk)) {
136384887fa3SAttilio Rao 			if (what & KA_NOTRECURSED)
136484887fa3SAttilio Rao 				panic("Lock %s recursed @ %s:%d\n",
1365047dd67eSAttilio Rao 				    lk->lock_object.lo_name, file, line);
136684887fa3SAttilio Rao 		} else if (what & KA_RECURSED)
136784887fa3SAttilio Rao 			panic("Lock %s not recursed @ %s:%d\n",
1368047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
136984887fa3SAttilio Rao 		break;
137084887fa3SAttilio Rao 	case KA_UNLOCKED:
1371047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
137284887fa3SAttilio Rao 			panic("Lock %s exclusively locked @ %s:%d\n",
1373047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
137484887fa3SAttilio Rao 		break;
137584887fa3SAttilio Rao 	default:
1376047dd67eSAttilio Rao 		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1377047dd67eSAttilio Rao 		    line);
137884887fa3SAttilio Rao 	}
137984887fa3SAttilio Rao }
1380047dd67eSAttilio Rao #endif
138184887fa3SAttilio Rao 
1382be6847d7SJohn Baldwin #ifdef DDB
1383462a7addSJohn Baldwin int
1384462a7addSJohn Baldwin lockmgr_chain(struct thread *td, struct thread **ownerp)
1385462a7addSJohn Baldwin {
1386047dd67eSAttilio Rao 	struct lock *lk;
1387462a7addSJohn Baldwin 
1388047dd67eSAttilio Rao 	lk = td->td_wchan;
1389462a7addSJohn Baldwin 
1390047dd67eSAttilio Rao 	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1391462a7addSJohn Baldwin 		return (0);
1392047dd67eSAttilio Rao 	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1393047dd67eSAttilio Rao 	if (lk->lk_lock & LK_SHARE)
1394047dd67eSAttilio Rao 		db_printf("SHARED (count %ju)\n",
1395047dd67eSAttilio Rao 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1396047dd67eSAttilio Rao 	else
1397047dd67eSAttilio Rao 		db_printf("EXCL\n");
1398047dd67eSAttilio Rao 	*ownerp = lockmgr_xholder(lk);
1399462a7addSJohn Baldwin 
1400462a7addSJohn Baldwin 	return (1);
1401462a7addSJohn Baldwin }
1402462a7addSJohn Baldwin 
1403047dd67eSAttilio Rao static void
140461bd5e21SKip Macy db_show_lockmgr(struct lock_object *lock)
1405be6847d7SJohn Baldwin {
1406be6847d7SJohn Baldwin 	struct thread *td;
1407047dd67eSAttilio Rao 	struct lock *lk;
1408be6847d7SJohn Baldwin 
1409047dd67eSAttilio Rao 	lk = (struct lock *)lock;
1410be6847d7SJohn Baldwin 
1411be6847d7SJohn Baldwin 	db_printf(" state: ");
1412047dd67eSAttilio Rao 	if (lk->lk_lock == LK_UNLOCKED)
1413be6847d7SJohn Baldwin 		db_printf("UNLOCKED\n");
1414047dd67eSAttilio Rao 	else if (lk->lk_lock & LK_SHARE)
1415047dd67eSAttilio Rao 		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1416047dd67eSAttilio Rao 	else {
1417047dd67eSAttilio Rao 		td = lockmgr_xholder(lk);
1418047dd67eSAttilio Rao 		if (td == (struct thread *)LK_KERNPROC)
1419047dd67eSAttilio Rao 			db_printf("XLOCK: LK_KERNPROC\n");
1420047dd67eSAttilio Rao 		else
1421047dd67eSAttilio Rao 			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1422047dd67eSAttilio Rao 			    td->td_tid, td->td_proc->p_pid,
1423047dd67eSAttilio Rao 			    td->td_proc->p_comm);
1424047dd67eSAttilio Rao 		if (lockmgr_recursed(lk))
1425047dd67eSAttilio Rao 			db_printf(" recursed: %d\n", lk->lk_recurse);
1426047dd67eSAttilio Rao 	}
1427047dd67eSAttilio Rao 	db_printf(" waiters: ");
1428047dd67eSAttilio Rao 	switch (lk->lk_lock & LK_ALL_WAITERS) {
1429047dd67eSAttilio Rao 	case LK_SHARED_WAITERS:
1430047dd67eSAttilio Rao 		db_printf("shared\n");
1431e5023dd9SEdward Tomasz Napierala 		break;
1432047dd67eSAttilio Rao 	case LK_EXCLUSIVE_WAITERS:
1433047dd67eSAttilio Rao 		db_printf("exclusive\n");
1434047dd67eSAttilio Rao 		break;
1435047dd67eSAttilio Rao 	case LK_ALL_WAITERS:
1436047dd67eSAttilio Rao 		db_printf("shared and exclusive\n");
1437047dd67eSAttilio Rao 		break;
1438047dd67eSAttilio Rao 	default:
1439047dd67eSAttilio Rao 		db_printf("none\n");
1440047dd67eSAttilio Rao 	}
1441651175c9SAttilio Rao 	db_printf(" spinners: ");
1442651175c9SAttilio Rao 	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1443651175c9SAttilio Rao 		db_printf("exclusive\n");
1444651175c9SAttilio Rao 	else
1445651175c9SAttilio Rao 		db_printf("none\n");
1446be6847d7SJohn Baldwin }
1447be6847d7SJohn Baldwin #endif
1448