xref: /freebsd/sys/kern/kern_lock.c (revision c4a48867f1bbaf4ebfa7309700174c36e7f74fe4)
19454b2d8SWarner Losh /*-
2047dd67eSAttilio Rao  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3047dd67eSAttilio Rao  * All rights reserved.
453bf4bb2SPeter Wemm  *
553bf4bb2SPeter Wemm  * Redistribution and use in source and binary forms, with or without
653bf4bb2SPeter Wemm  * modification, are permitted provided that the following conditions
753bf4bb2SPeter Wemm  * are met:
853bf4bb2SPeter Wemm  * 1. Redistributions of source code must retain the above copyright
9047dd67eSAttilio Rao  *    notice(s), this list of conditions and the following disclaimer as
10047dd67eSAttilio Rao  *    the first lines of this file unmodified other than the possible
11047dd67eSAttilio Rao  *    addition of one or more copyright notices.
1253bf4bb2SPeter Wemm  * 2. Redistributions in binary form must reproduce the above copyright
13047dd67eSAttilio Rao  *    notice(s), this list of conditions and the following disclaimer in the
1453bf4bb2SPeter Wemm  *    documentation and/or other materials provided with the distribution.
1553bf4bb2SPeter Wemm  *
16047dd67eSAttilio Rao  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17047dd67eSAttilio Rao  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18047dd67eSAttilio Rao  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19047dd67eSAttilio Rao  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20047dd67eSAttilio Rao  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21047dd67eSAttilio Rao  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22047dd67eSAttilio Rao  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23047dd67eSAttilio Rao  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2453bf4bb2SPeter Wemm  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25047dd67eSAttilio Rao  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26047dd67eSAttilio Rao  * DAMAGE.
2753bf4bb2SPeter Wemm  */
2853bf4bb2SPeter Wemm 
29651175c9SAttilio Rao #include "opt_adaptive_lockmgrs.h"
30047dd67eSAttilio Rao #include "opt_ddb.h"
31f5f9340bSFabien Thomas #include "opt_hwpmc_hooks.h"
32047dd67eSAttilio Rao 
33677b542eSDavid E. O'Brien #include <sys/cdefs.h>
34677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
35677b542eSDavid E. O'Brien 
3653bf4bb2SPeter Wemm #include <sys/param.h>
37cd2fe4e6SAttilio Rao #include <sys/kdb.h>
3861d80e90SJohn Baldwin #include <sys/ktr.h>
3953bf4bb2SPeter Wemm #include <sys/lock.h>
40047dd67eSAttilio Rao #include <sys/lock_profile.h>
418302d183SBruce Evans #include <sys/lockmgr.h>
42d8881ca3SJohn Baldwin #include <sys/mutex.h>
438302d183SBruce Evans #include <sys/proc.h>
44047dd67eSAttilio Rao #include <sys/sleepqueue.h>
45e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS
46e8ddb61dSJeff Roberson #include <sys/stack.h>
47e8ddb61dSJeff Roberson #endif
48651175c9SAttilio Rao #include <sys/sysctl.h>
49047dd67eSAttilio Rao #include <sys/systm.h>
5053bf4bb2SPeter Wemm 
51047dd67eSAttilio Rao #include <machine/cpu.h>
526efc8a16SAttilio Rao 
53be6847d7SJohn Baldwin #ifdef DDB
54be6847d7SJohn Baldwin #include <ddb/ddb.h>
55047dd67eSAttilio Rao #endif
56047dd67eSAttilio Rao 
57f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
58f5f9340bSFabien Thomas #include <sys/pmckern.h>
59f5f9340bSFabien Thomas PMC_SOFT_DECLARE( , , lock, failed);
60f5f9340bSFabien Thomas #endif
61f5f9340bSFabien Thomas 
62651175c9SAttilio Rao CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
63651175c9SAttilio Rao     (LK_ADAPTIVE | LK_NOSHARE));
64651175c9SAttilio Rao CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
65651175c9SAttilio Rao     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
66047dd67eSAttilio Rao 
67047dd67eSAttilio Rao #define	SQ_EXCLUSIVE_QUEUE	0
68047dd67eSAttilio Rao #define	SQ_SHARED_QUEUE		1
69047dd67eSAttilio Rao 
70047dd67eSAttilio Rao #ifndef INVARIANTS
71047dd67eSAttilio Rao #define	_lockmgr_assert(lk, what, file, line)
72047dd67eSAttilio Rao #endif
73ce1c953eSMark Johnston 
74047dd67eSAttilio Rao #define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
75047dd67eSAttilio Rao #define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
76047dd67eSAttilio Rao 
77047dd67eSAttilio Rao #ifndef DEBUG_LOCKS
78047dd67eSAttilio Rao #define	STACK_PRINT(lk)
79047dd67eSAttilio Rao #define	STACK_SAVE(lk)
80047dd67eSAttilio Rao #define	STACK_ZERO(lk)
81047dd67eSAttilio Rao #else
82047dd67eSAttilio Rao #define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
83047dd67eSAttilio Rao #define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
84047dd67eSAttilio Rao #define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
85047dd67eSAttilio Rao #endif
86047dd67eSAttilio Rao 
87047dd67eSAttilio Rao #define	LOCK_LOG2(lk, string, arg1, arg2)				\
88047dd67eSAttilio Rao 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
89047dd67eSAttilio Rao 		CTR2(KTR_LOCK, (string), (arg1), (arg2))
90047dd67eSAttilio Rao #define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
91047dd67eSAttilio Rao 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
92047dd67eSAttilio Rao 		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
93047dd67eSAttilio Rao 
94e5f94314SAttilio Rao #define	GIANT_DECLARE							\
95e5f94314SAttilio Rao 	int _i = 0;							\
96e5f94314SAttilio Rao 	WITNESS_SAVE_DECL(Giant)
97e5f94314SAttilio Rao #define	GIANT_RESTORE() do {						\
98e5f94314SAttilio Rao 	if (_i > 0) {							\
99e5f94314SAttilio Rao 		while (_i--)						\
100e5f94314SAttilio Rao 			mtx_lock(&Giant);				\
101e5f94314SAttilio Rao 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
102e5f94314SAttilio Rao 	}								\
103e5f94314SAttilio Rao } while (0)
104e5f94314SAttilio Rao #define	GIANT_SAVE() do {						\
105e5f94314SAttilio Rao 	if (mtx_owned(&Giant)) {					\
106e5f94314SAttilio Rao 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
107e5f94314SAttilio Rao 		while (mtx_owned(&Giant)) {				\
108e5f94314SAttilio Rao 			_i++;						\
109e5f94314SAttilio Rao 			mtx_unlock(&Giant);				\
110e5f94314SAttilio Rao 		}							\
111e5f94314SAttilio Rao 	}								\
112e5f94314SAttilio Rao } while (0)
113e5f94314SAttilio Rao 
11472ba3c08SKonstantin Belousov #define	LK_CAN_SHARE(x, flags)						\
11572ba3c08SKonstantin Belousov 	(((x) & LK_SHARE) &&						\
11672ba3c08SKonstantin Belousov 	(((x) & (LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 0 ||	\
11772ba3c08SKonstantin Belousov 	(curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||	\
11872ba3c08SKonstantin Belousov 	(curthread->td_pflags & TDP_DEADLKTREAT)))
119e5f94314SAttilio Rao #define	LK_TRYOP(x)							\
120e5f94314SAttilio Rao 	((x) & LK_NOWAIT)
121e5f94314SAttilio Rao 
122e5f94314SAttilio Rao #define	LK_CAN_WITNESS(x)						\
123e5f94314SAttilio Rao 	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
124e5f94314SAttilio Rao #define	LK_TRYWIT(x)							\
125e5f94314SAttilio Rao 	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
126047dd67eSAttilio Rao 
127651175c9SAttilio Rao #define	LK_CAN_ADAPT(lk, f)						\
128651175c9SAttilio Rao 	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
129651175c9SAttilio Rao 	((f) & LK_SLEEPFAIL) == 0)
130651175c9SAttilio Rao 
131047dd67eSAttilio Rao #define	lockmgr_disowned(lk)						\
132047dd67eSAttilio Rao 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
133047dd67eSAttilio Rao 
134047dd67eSAttilio Rao #define	lockmgr_xlocked(lk)						\
135047dd67eSAttilio Rao 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
136047dd67eSAttilio Rao 
137d576deedSPawel Jakub Dawidek static void	assert_lockmgr(const struct lock_object *lock, int how);
138047dd67eSAttilio Rao #ifdef DDB
139d576deedSPawel Jakub Dawidek static void	db_show_lockmgr(const struct lock_object *lock);
140be6847d7SJohn Baldwin #endif
1417faf4d90SDavide Italiano static void	lock_lockmgr(struct lock_object *lock, uintptr_t how);
142a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
143d576deedSPawel Jakub Dawidek static int	owner_lockmgr(const struct lock_object *lock,
144d576deedSPawel Jakub Dawidek 		    struct thread **owner);
145a5aedd68SStacey Son #endif
1467faf4d90SDavide Italiano static uintptr_t unlock_lockmgr(struct lock_object *lock);
14761bd5e21SKip Macy 
14861bd5e21SKip Macy struct lock_class lock_class_lockmgr = {
1493ff6d229SJohn Baldwin 	.lc_name = "lockmgr",
150047dd67eSAttilio Rao 	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
151f9721b43SAttilio Rao 	.lc_assert = assert_lockmgr,
15261bd5e21SKip Macy #ifdef DDB
1536e21afd4SJohn Baldwin 	.lc_ddb_show = db_show_lockmgr,
15461bd5e21SKip Macy #endif
1556e21afd4SJohn Baldwin 	.lc_lock = lock_lockmgr,
156a5aedd68SStacey Son 	.lc_unlock = unlock_lockmgr,
157a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
158a5aedd68SStacey Son 	.lc_owner = owner_lockmgr,
159a5aedd68SStacey Son #endif
16061bd5e21SKip Macy };
16161bd5e21SKip Macy 
162651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
163651175c9SAttilio Rao static u_int alk_retries = 10;
164651175c9SAttilio Rao static u_int alk_loops = 10000;
1656472ac3dSEd Schouten static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
1666472ac3dSEd Schouten     "lockmgr debugging");
167651175c9SAttilio Rao SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
168651175c9SAttilio Rao SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
169651175c9SAttilio Rao #endif
170651175c9SAttilio Rao 
171*c4a48867SMateusz Guzik static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
172*c4a48867SMateusz Guzik     int flags);
173*c4a48867SMateusz Guzik static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t x);
174*c4a48867SMateusz Guzik 
175*c4a48867SMateusz Guzik static void
176*c4a48867SMateusz Guzik lockmgr_note_shared_acquire(struct lock *lk, int contested,
177*c4a48867SMateusz Guzik     uint64_t waittime, const char *file, int line, int flags)
178*c4a48867SMateusz Guzik {
179*c4a48867SMateusz Guzik 
180*c4a48867SMateusz Guzik 	lock_profile_obtain_lock_success(&lk->lock_object, contested, waittime,
181*c4a48867SMateusz Guzik 	    file, line);
182*c4a48867SMateusz Guzik 	LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
183*c4a48867SMateusz Guzik 	WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
184*c4a48867SMateusz Guzik 	TD_LOCKS_INC(curthread);
185*c4a48867SMateusz Guzik 	TD_SLOCKS_INC(curthread);
186*c4a48867SMateusz Guzik 	STACK_SAVE(lk);
187*c4a48867SMateusz Guzik }
188*c4a48867SMateusz Guzik 
189*c4a48867SMateusz Guzik static void
190*c4a48867SMateusz Guzik lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
191*c4a48867SMateusz Guzik {
192*c4a48867SMateusz Guzik 
193*c4a48867SMateusz Guzik 	lock_profile_release_lock(&lk->lock_object);
194*c4a48867SMateusz Guzik 	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
195*c4a48867SMateusz Guzik 	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
196*c4a48867SMateusz Guzik 	TD_LOCKS_DEC(curthread);
197*c4a48867SMateusz Guzik 	TD_SLOCKS_DEC(curthread);
198*c4a48867SMateusz Guzik }
199*c4a48867SMateusz Guzik 
200*c4a48867SMateusz Guzik static void
201*c4a48867SMateusz Guzik lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
202*c4a48867SMateusz Guzik     uint64_t waittime, const char *file, int line, int flags)
203*c4a48867SMateusz Guzik {
204*c4a48867SMateusz Guzik 
205*c4a48867SMateusz Guzik 	lock_profile_obtain_lock_success(&lk->lock_object, contested, waittime,
206*c4a48867SMateusz Guzik 	    file, line);
207*c4a48867SMateusz Guzik 	LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
208*c4a48867SMateusz Guzik 	WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
209*c4a48867SMateusz Guzik 	    line);
210*c4a48867SMateusz Guzik 	TD_LOCKS_INC(curthread);
211*c4a48867SMateusz Guzik 	STACK_SAVE(lk);
212*c4a48867SMateusz Guzik }
213*c4a48867SMateusz Guzik 
214*c4a48867SMateusz Guzik static void
215*c4a48867SMateusz Guzik lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
216*c4a48867SMateusz Guzik {
217*c4a48867SMateusz Guzik 
218*c4a48867SMateusz Guzik 	lock_profile_release_lock(&lk->lock_object);
219*c4a48867SMateusz Guzik 	LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
220*c4a48867SMateusz Guzik 	    line);
221*c4a48867SMateusz Guzik 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
222*c4a48867SMateusz Guzik 	TD_LOCKS_DEC(curthread);
223*c4a48867SMateusz Guzik }
224*c4a48867SMateusz Guzik 
225*c4a48867SMateusz Guzik static void
226*c4a48867SMateusz Guzik lockmgr_note_exclusive_upgrade(struct lock *lk, const char *file, int line,
227*c4a48867SMateusz Guzik     int flags)
228*c4a48867SMateusz Guzik {
229*c4a48867SMateusz Guzik 
230*c4a48867SMateusz Guzik 	LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
231*c4a48867SMateusz Guzik 	    line);
232*c4a48867SMateusz Guzik 	WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
233*c4a48867SMateusz Guzik 	    LK_TRYWIT(flags), file, line);
234*c4a48867SMateusz Guzik 	TD_SLOCKS_DEC(curthread);
235*c4a48867SMateusz Guzik }
236*c4a48867SMateusz Guzik 
237047dd67eSAttilio Rao static __inline struct thread *
238d576deedSPawel Jakub Dawidek lockmgr_xholder(const struct lock *lk)
239047dd67eSAttilio Rao {
240047dd67eSAttilio Rao 	uintptr_t x;
241047dd67eSAttilio Rao 
242047dd67eSAttilio Rao 	x = lk->lk_lock;
243047dd67eSAttilio Rao 	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
244047dd67eSAttilio Rao }
24584887fa3SAttilio Rao 
24653bf4bb2SPeter Wemm /*
247047dd67eSAttilio Rao  * It assumes sleepq_lock held and returns with this one unheld.
248047dd67eSAttilio Rao  * It also assumes the generic interlock is sane and previously checked.
249047dd67eSAttilio Rao  * If LK_INTERLOCK is specified the interlock is not reacquired after the
250047dd67eSAttilio Rao  * sleep.
25153bf4bb2SPeter Wemm  */
252047dd67eSAttilio Rao static __inline int
253047dd67eSAttilio Rao sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
254047dd67eSAttilio Rao     const char *wmesg, int pri, int timo, int queue)
255047dd67eSAttilio Rao {
256e5f94314SAttilio Rao 	GIANT_DECLARE;
257047dd67eSAttilio Rao 	struct lock_class *class;
258047dd67eSAttilio Rao 	int catch, error;
25953bf4bb2SPeter Wemm 
260047dd67eSAttilio Rao 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
2615047a8fdSAttilio Rao 	catch = pri & PCATCH;
262047dd67eSAttilio Rao 	pri &= PRIMASK;
263047dd67eSAttilio Rao 	error = 0;
264047dd67eSAttilio Rao 
265047dd67eSAttilio Rao 	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
266047dd67eSAttilio Rao 	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
267047dd67eSAttilio Rao 
268047dd67eSAttilio Rao 	if (flags & LK_INTERLOCK)
269047dd67eSAttilio Rao 		class->lc_unlock(ilk);
2702028867dSAttilio Rao 	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
2712028867dSAttilio Rao 		lk->lk_exslpfail++;
272e5f94314SAttilio Rao 	GIANT_SAVE();
273047dd67eSAttilio Rao 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
274047dd67eSAttilio Rao 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
275a115fb62SHans Petter Selasky 	if ((flags & LK_TIMELOCK) && timo)
276047dd67eSAttilio Rao 		sleepq_set_timeout(&lk->lock_object, timo);
277a115fb62SHans Petter Selasky 
278047dd67eSAttilio Rao 	/*
279047dd67eSAttilio Rao 	 * Decisional switch for real sleeping.
280047dd67eSAttilio Rao 	 */
281047dd67eSAttilio Rao 	if ((flags & LK_TIMELOCK) && timo && catch)
282047dd67eSAttilio Rao 		error = sleepq_timedwait_sig(&lk->lock_object, pri);
283047dd67eSAttilio Rao 	else if ((flags & LK_TIMELOCK) && timo)
284047dd67eSAttilio Rao 		error = sleepq_timedwait(&lk->lock_object, pri);
285047dd67eSAttilio Rao 	else if (catch)
286047dd67eSAttilio Rao 		error = sleepq_wait_sig(&lk->lock_object, pri);
287047dd67eSAttilio Rao 	else
288047dd67eSAttilio Rao 		sleepq_wait(&lk->lock_object, pri);
289e5f94314SAttilio Rao 	GIANT_RESTORE();
290047dd67eSAttilio Rao 	if ((flags & LK_SLEEPFAIL) && error == 0)
291047dd67eSAttilio Rao 		error = ENOLCK;
292047dd67eSAttilio Rao 
293047dd67eSAttilio Rao 	return (error);
294047dd67eSAttilio Rao }
295047dd67eSAttilio Rao 
296da7bbd2cSJohn Baldwin static __inline int
297047dd67eSAttilio Rao wakeupshlk(struct lock *lk, const char *file, int line)
298047dd67eSAttilio Rao {
299047dd67eSAttilio Rao 	uintptr_t v, x;
3002028867dSAttilio Rao 	u_int realexslp;
301da7bbd2cSJohn Baldwin 	int queue, wakeup_swapper;
302047dd67eSAttilio Rao 
303da7bbd2cSJohn Baldwin 	wakeup_swapper = 0;
304047dd67eSAttilio Rao 	for (;;) {
305047dd67eSAttilio Rao 		x = lk->lk_lock;
306*c4a48867SMateusz Guzik 		if (lockmgr_sunlock_try(lk, x))
307047dd67eSAttilio Rao 			break;
308047dd67eSAttilio Rao 
309047dd67eSAttilio Rao 		/*
310047dd67eSAttilio Rao 		 * We should have a sharer with waiters, so enter the hard
311047dd67eSAttilio Rao 		 * path in order to handle wakeups correctly.
312047dd67eSAttilio Rao 		 */
313047dd67eSAttilio Rao 		sleepq_lock(&lk->lock_object);
314651175c9SAttilio Rao 		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
315047dd67eSAttilio Rao 		v = LK_UNLOCKED;
316047dd67eSAttilio Rao 
317047dd67eSAttilio Rao 		/*
318047dd67eSAttilio Rao 		 * If the lock has exclusive waiters, give them preference in
319047dd67eSAttilio Rao 		 * order to avoid deadlock with shared runners up.
3202028867dSAttilio Rao 		 * If interruptible sleeps left the exclusive queue empty
3212028867dSAttilio Rao 		 * avoid a starvation for the threads sleeping on the shared
3222028867dSAttilio Rao 		 * queue by giving them precedence and cleaning up the
3232028867dSAttilio Rao 		 * exclusive waiters bit anyway.
324c636ba83SAttilio Rao 		 * Please note that lk_exslpfail count may be lying about
325c636ba83SAttilio Rao 		 * the real number of waiters with the LK_SLEEPFAIL flag on
326e3043798SPedro F. Giffuni 		 * because they may be used in conjunction with interruptible
327aab9c8c2SAttilio Rao 		 * sleeps so lk_exslpfail might be considered an 'upper limit'
328aab9c8c2SAttilio Rao 		 * bound, including the edge cases.
329047dd67eSAttilio Rao 		 */
3302028867dSAttilio Rao 		realexslp = sleepq_sleepcnt(&lk->lock_object,
3312028867dSAttilio Rao 		    SQ_EXCLUSIVE_QUEUE);
3322028867dSAttilio Rao 		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
3332028867dSAttilio Rao 			if (lk->lk_exslpfail < realexslp) {
3342028867dSAttilio Rao 				lk->lk_exslpfail = 0;
335047dd67eSAttilio Rao 				queue = SQ_EXCLUSIVE_QUEUE;
336047dd67eSAttilio Rao 				v |= (x & LK_SHARED_WAITERS);
337047dd67eSAttilio Rao 			} else {
3382028867dSAttilio Rao 				lk->lk_exslpfail = 0;
3392028867dSAttilio Rao 				LOCK_LOG2(lk,
3402028867dSAttilio Rao 				    "%s: %p has only LK_SLEEPFAIL sleepers",
3412028867dSAttilio Rao 				    __func__, lk);
3422028867dSAttilio Rao 				LOCK_LOG2(lk,
3432028867dSAttilio Rao 			    "%s: %p waking up threads on the exclusive queue",
3442028867dSAttilio Rao 				    __func__, lk);
3452028867dSAttilio Rao 				wakeup_swapper =
3462028867dSAttilio Rao 				    sleepq_broadcast(&lk->lock_object,
3472028867dSAttilio Rao 				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
3482028867dSAttilio Rao 				queue = SQ_SHARED_QUEUE;
3492028867dSAttilio Rao 			}
3502028867dSAttilio Rao 
3512028867dSAttilio Rao 		} else {
3529dbf7a62SAttilio Rao 
3539dbf7a62SAttilio Rao 			/*
3549dbf7a62SAttilio Rao 			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
3559dbf7a62SAttilio Rao 			 * and using interruptible sleeps/timeout may have
3569dbf7a62SAttilio Rao 			 * left spourious lk_exslpfail counts on, so clean
3579dbf7a62SAttilio Rao 			 * it up anyway.
3589dbf7a62SAttilio Rao 			 */
3599dbf7a62SAttilio Rao 			lk->lk_exslpfail = 0;
360047dd67eSAttilio Rao 			queue = SQ_SHARED_QUEUE;
361047dd67eSAttilio Rao 		}
362047dd67eSAttilio Rao 
3637f9f80ceSAttilio Rao 		if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
364047dd67eSAttilio Rao 		    v)) {
365047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
366047dd67eSAttilio Rao 			continue;
367047dd67eSAttilio Rao 		}
368047dd67eSAttilio Rao 		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
369047dd67eSAttilio Rao 		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
370047dd67eSAttilio Rao 		    "exclusive");
3712028867dSAttilio Rao 		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
372da7bbd2cSJohn Baldwin 		    0, queue);
373047dd67eSAttilio Rao 		sleepq_release(&lk->lock_object);
374047dd67eSAttilio Rao 		break;
375047dd67eSAttilio Rao 	}
376047dd67eSAttilio Rao 
377*c4a48867SMateusz Guzik 	lockmgr_note_shared_release(lk, file, line);
378da7bbd2cSJohn Baldwin 	return (wakeup_swapper);
379047dd67eSAttilio Rao }
380047dd67eSAttilio Rao 
381047dd67eSAttilio Rao static void
382d576deedSPawel Jakub Dawidek assert_lockmgr(const struct lock_object *lock, int what)
383f9721b43SAttilio Rao {
384f9721b43SAttilio Rao 
385f9721b43SAttilio Rao 	panic("lockmgr locks do not support assertions");
386f9721b43SAttilio Rao }
387f9721b43SAttilio Rao 
388047dd67eSAttilio Rao static void
3897faf4d90SDavide Italiano lock_lockmgr(struct lock_object *lock, uintptr_t how)
3906e21afd4SJohn Baldwin {
3916e21afd4SJohn Baldwin 
3926e21afd4SJohn Baldwin 	panic("lockmgr locks do not support sleep interlocking");
3936e21afd4SJohn Baldwin }
3946e21afd4SJohn Baldwin 
3957faf4d90SDavide Italiano static uintptr_t
3966e21afd4SJohn Baldwin unlock_lockmgr(struct lock_object *lock)
3976e21afd4SJohn Baldwin {
3986e21afd4SJohn Baldwin 
3996e21afd4SJohn Baldwin 	panic("lockmgr locks do not support sleep interlocking");
4006e21afd4SJohn Baldwin }
4016e21afd4SJohn Baldwin 
402a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
403a5aedd68SStacey Son static int
404d576deedSPawel Jakub Dawidek owner_lockmgr(const struct lock_object *lock, struct thread **owner)
405a5aedd68SStacey Son {
406a5aedd68SStacey Son 
407a5aedd68SStacey Son 	panic("lockmgr locks do not support owner inquiring");
408a5aedd68SStacey Son }
409a5aedd68SStacey Son #endif
410a5aedd68SStacey Son 
41199448ed1SJohn Dyson void
412047dd67eSAttilio Rao lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
41399448ed1SJohn Dyson {
4146efc8a16SAttilio Rao 	int iflags;
4156efc8a16SAttilio Rao 
416047dd67eSAttilio Rao 	MPASS((flags & ~LK_INIT_MASK) == 0);
417353998acSAttilio Rao 	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
418353998acSAttilio Rao             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
419353998acSAttilio Rao             &lk->lk_lock));
42099448ed1SJohn Dyson 
421f0830182SAttilio Rao 	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
422f0830182SAttilio Rao 	if (flags & LK_CANRECURSE)
423f0830182SAttilio Rao 		iflags |= LO_RECURSABLE;
424047dd67eSAttilio Rao 	if ((flags & LK_NODUP) == 0)
4256efc8a16SAttilio Rao 		iflags |= LO_DUPOK;
4267fbfba7bSAttilio Rao 	if (flags & LK_NOPROFILE)
4277fbfba7bSAttilio Rao 		iflags |= LO_NOPROFILE;
428047dd67eSAttilio Rao 	if ((flags & LK_NOWITNESS) == 0)
4296efc8a16SAttilio Rao 		iflags |= LO_WITNESS;
4307fbfba7bSAttilio Rao 	if (flags & LK_QUIET)
4317fbfba7bSAttilio Rao 		iflags |= LO_QUIET;
432e63091eaSMarcel Moolenaar 	if (flags & LK_IS_VNODE)
433e63091eaSMarcel Moolenaar 		iflags |= LO_IS_VNODE;
434651175c9SAttilio Rao 	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
435047dd67eSAttilio Rao 
436b5fb43e5SJohn Baldwin 	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
437047dd67eSAttilio Rao 	lk->lk_lock = LK_UNLOCKED;
438047dd67eSAttilio Rao 	lk->lk_recurse = 0;
4392028867dSAttilio Rao 	lk->lk_exslpfail = 0;
440047dd67eSAttilio Rao 	lk->lk_timo = timo;
441047dd67eSAttilio Rao 	lk->lk_pri = pri;
442047dd67eSAttilio Rao 	STACK_ZERO(lk);
44399448ed1SJohn Dyson }
44499448ed1SJohn Dyson 
4453634d5b2SJohn Baldwin /*
4463634d5b2SJohn Baldwin  * XXX: Gross hacks to manipulate external lock flags after
4473634d5b2SJohn Baldwin  * initialization.  Used for certain vnode and buf locks.
4483634d5b2SJohn Baldwin  */
4493634d5b2SJohn Baldwin void
4503634d5b2SJohn Baldwin lockallowshare(struct lock *lk)
4513634d5b2SJohn Baldwin {
4523634d5b2SJohn Baldwin 
4533634d5b2SJohn Baldwin 	lockmgr_assert(lk, KA_XLOCKED);
4543634d5b2SJohn Baldwin 	lk->lock_object.lo_flags &= ~LK_NOSHARE;
4553634d5b2SJohn Baldwin }
4563634d5b2SJohn Baldwin 
4573634d5b2SJohn Baldwin void
458575e02d9SKonstantin Belousov lockdisableshare(struct lock *lk)
459575e02d9SKonstantin Belousov {
460575e02d9SKonstantin Belousov 
461575e02d9SKonstantin Belousov 	lockmgr_assert(lk, KA_XLOCKED);
462575e02d9SKonstantin Belousov 	lk->lock_object.lo_flags |= LK_NOSHARE;
463575e02d9SKonstantin Belousov }
464575e02d9SKonstantin Belousov 
465575e02d9SKonstantin Belousov void
4663634d5b2SJohn Baldwin lockallowrecurse(struct lock *lk)
4673634d5b2SJohn Baldwin {
4683634d5b2SJohn Baldwin 
4693634d5b2SJohn Baldwin 	lockmgr_assert(lk, KA_XLOCKED);
4703634d5b2SJohn Baldwin 	lk->lock_object.lo_flags |= LO_RECURSABLE;
4713634d5b2SJohn Baldwin }
4723634d5b2SJohn Baldwin 
4733634d5b2SJohn Baldwin void
4743634d5b2SJohn Baldwin lockdisablerecurse(struct lock *lk)
4753634d5b2SJohn Baldwin {
4763634d5b2SJohn Baldwin 
4773634d5b2SJohn Baldwin 	lockmgr_assert(lk, KA_XLOCKED);
4783634d5b2SJohn Baldwin 	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
4793634d5b2SJohn Baldwin }
4803634d5b2SJohn Baldwin 
481a18b1f1dSJason Evans void
482047dd67eSAttilio Rao lockdestroy(struct lock *lk)
483a18b1f1dSJason Evans {
484c91fcee7SJohn Baldwin 
485047dd67eSAttilio Rao 	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
486047dd67eSAttilio Rao 	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
4872028867dSAttilio Rao 	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
488047dd67eSAttilio Rao 	lock_destroy(&lk->lock_object);
489047dd67eSAttilio Rao }
490047dd67eSAttilio Rao 
491*c4a48867SMateusz Guzik static bool __always_inline
492*c4a48867SMateusz Guzik lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags)
493*c4a48867SMateusz Guzik {
494*c4a48867SMateusz Guzik 
495*c4a48867SMateusz Guzik 	/*
496*c4a48867SMateusz Guzik 	 * If no other thread has an exclusive lock, or
497*c4a48867SMateusz Guzik 	 * no exclusive waiter is present, bump the count of
498*c4a48867SMateusz Guzik 	 * sharers.  Since we have to preserve the state of
499*c4a48867SMateusz Guzik 	 * waiters, if we fail to acquire the shared lock
500*c4a48867SMateusz Guzik 	 * loop back and retry.
501*c4a48867SMateusz Guzik 	 */
502*c4a48867SMateusz Guzik 	*xp = lk->lk_lock;
503*c4a48867SMateusz Guzik 	while (LK_CAN_SHARE(*xp, flags)) {
504*c4a48867SMateusz Guzik 		if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
505*c4a48867SMateusz Guzik 		    *xp + LK_ONE_SHARER)) {
506*c4a48867SMateusz Guzik 			return (true);
507*c4a48867SMateusz Guzik 		}
508*c4a48867SMateusz Guzik 	}
509*c4a48867SMateusz Guzik 	return (false);
510*c4a48867SMateusz Guzik }
511*c4a48867SMateusz Guzik 
512*c4a48867SMateusz Guzik static bool __always_inline
513*c4a48867SMateusz Guzik lockmgr_sunlock_try(struct lock *lk, uintptr_t x)
514*c4a48867SMateusz Guzik {
515*c4a48867SMateusz Guzik 
516*c4a48867SMateusz Guzik 	for (;;) {
517*c4a48867SMateusz Guzik 		/*
518*c4a48867SMateusz Guzik 		 * If there is more than one shared lock held, just drop one
519*c4a48867SMateusz Guzik 		 * and return.
520*c4a48867SMateusz Guzik 		 */
521*c4a48867SMateusz Guzik 		if (LK_SHARERS(x) > 1) {
522*c4a48867SMateusz Guzik 			if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &x,
523*c4a48867SMateusz Guzik 			    x - LK_ONE_SHARER))
524*c4a48867SMateusz Guzik 				return (true);
525*c4a48867SMateusz Guzik 			continue;
526*c4a48867SMateusz Guzik 		}
527*c4a48867SMateusz Guzik 
528*c4a48867SMateusz Guzik 		/*
529*c4a48867SMateusz Guzik 		 * If there are not waiters on the exclusive queue, drop the
530*c4a48867SMateusz Guzik 		 * lock quickly.
531*c4a48867SMateusz Guzik 		 */
532*c4a48867SMateusz Guzik 		if ((x & LK_ALL_WAITERS) == 0) {
533*c4a48867SMateusz Guzik 			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
534*c4a48867SMateusz Guzik 			    LK_SHARERS_LOCK(1));
535*c4a48867SMateusz Guzik 			if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &x,
536*c4a48867SMateusz Guzik 			    LK_UNLOCKED))
537*c4a48867SMateusz Guzik 				return (true);
538*c4a48867SMateusz Guzik 			continue;
539*c4a48867SMateusz Guzik 		}
540*c4a48867SMateusz Guzik 		break;
541*c4a48867SMateusz Guzik 	}
542*c4a48867SMateusz Guzik 	return (false);
543*c4a48867SMateusz Guzik }
544*c4a48867SMateusz Guzik 
545*c4a48867SMateusz Guzik int
546*c4a48867SMateusz Guzik lockmgr_lock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk,
547*c4a48867SMateusz Guzik     const char *file, int line)
548*c4a48867SMateusz Guzik {
549*c4a48867SMateusz Guzik 	struct lock_class *class;
550*c4a48867SMateusz Guzik 	uintptr_t x, v, tid;
551*c4a48867SMateusz Guzik 	u_int op;
552*c4a48867SMateusz Guzik 	bool locked;
553*c4a48867SMateusz Guzik 
554*c4a48867SMateusz Guzik 	op = flags & LK_TYPE_MASK;
555*c4a48867SMateusz Guzik 	locked = false;
556*c4a48867SMateusz Guzik 	switch (op) {
557*c4a48867SMateusz Guzik 	case LK_SHARED:
558*c4a48867SMateusz Guzik 		if (LK_CAN_WITNESS(flags))
559*c4a48867SMateusz Guzik 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
560*c4a48867SMateusz Guzik 			    file, line, flags & LK_INTERLOCK ? ilk : NULL);
561*c4a48867SMateusz Guzik 		if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
562*c4a48867SMateusz Guzik 			break;
563*c4a48867SMateusz Guzik 		if (lockmgr_slock_try(lk, &x, flags)) {
564*c4a48867SMateusz Guzik 			lockmgr_note_shared_acquire(lk, 0, 0,
565*c4a48867SMateusz Guzik 			    file, line, flags);
566*c4a48867SMateusz Guzik 			locked = true;
567*c4a48867SMateusz Guzik 		}
568*c4a48867SMateusz Guzik 		break;
569*c4a48867SMateusz Guzik 	case LK_EXCLUSIVE:
570*c4a48867SMateusz Guzik 		if (LK_CAN_WITNESS(flags))
571*c4a48867SMateusz Guzik 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
572*c4a48867SMateusz Guzik 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
573*c4a48867SMateusz Guzik 			    ilk : NULL);
574*c4a48867SMateusz Guzik 		tid = (uintptr_t)curthread;
575*c4a48867SMateusz Guzik 		if (lk->lk_lock == LK_UNLOCKED &&
576*c4a48867SMateusz Guzik 		    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
577*c4a48867SMateusz Guzik 			lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
578*c4a48867SMateusz Guzik 			    flags);
579*c4a48867SMateusz Guzik 			locked = true;
580*c4a48867SMateusz Guzik 		}
581*c4a48867SMateusz Guzik 		break;
582*c4a48867SMateusz Guzik 	case LK_UPGRADE:
583*c4a48867SMateusz Guzik 	case LK_TRYUPGRADE:
584*c4a48867SMateusz Guzik 		_lockmgr_assert(lk, KA_SLOCKED, file, line);
585*c4a48867SMateusz Guzik 		tid = (uintptr_t)curthread;
586*c4a48867SMateusz Guzik 		v = lk->lk_lock;
587*c4a48867SMateusz Guzik 		x = v & LK_ALL_WAITERS;
588*c4a48867SMateusz Guzik 		v &= LK_EXCLUSIVE_SPINNERS;
589*c4a48867SMateusz Guzik 		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
590*c4a48867SMateusz Guzik 		    tid | x)) {
591*c4a48867SMateusz Guzik 			lockmgr_note_exclusive_upgrade(lk, file, line, flags);
592*c4a48867SMateusz Guzik 			locked = true;
593*c4a48867SMateusz Guzik 		}
594*c4a48867SMateusz Guzik 		break;
595*c4a48867SMateusz Guzik 	default:
596*c4a48867SMateusz Guzik 		break;
597*c4a48867SMateusz Guzik 	}
598*c4a48867SMateusz Guzik 	if (__predict_true(locked)) {
599*c4a48867SMateusz Guzik 		if (__predict_false(flags & LK_INTERLOCK)) {
600*c4a48867SMateusz Guzik 			class = LOCK_CLASS(ilk);
601*c4a48867SMateusz Guzik 			class->lc_unlock(ilk);
602*c4a48867SMateusz Guzik 		}
603*c4a48867SMateusz Guzik 		return (0);
604*c4a48867SMateusz Guzik 	} else {
605*c4a48867SMateusz Guzik 		return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
606*c4a48867SMateusz Guzik 		    LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
607*c4a48867SMateusz Guzik 	}
608*c4a48867SMateusz Guzik }
609*c4a48867SMateusz Guzik 
610*c4a48867SMateusz Guzik int
611*c4a48867SMateusz Guzik lockmgr_unlock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk)
612*c4a48867SMateusz Guzik {
613*c4a48867SMateusz Guzik 	struct lock_class *class;
614*c4a48867SMateusz Guzik 	uintptr_t x, tid;
615*c4a48867SMateusz Guzik 	bool unlocked;
616*c4a48867SMateusz Guzik 	const char *file;
617*c4a48867SMateusz Guzik 	int line;
618*c4a48867SMateusz Guzik 
619*c4a48867SMateusz Guzik 	file = __FILE__;
620*c4a48867SMateusz Guzik 	line = __LINE__;
621*c4a48867SMateusz Guzik 
622*c4a48867SMateusz Guzik 	_lockmgr_assert(lk, KA_LOCKED, file, line);
623*c4a48867SMateusz Guzik 	unlocked = false;
624*c4a48867SMateusz Guzik 	x = lk->lk_lock;
625*c4a48867SMateusz Guzik 	if (__predict_true(x & LK_SHARE) != 0) {
626*c4a48867SMateusz Guzik 		if (lockmgr_sunlock_try(lk, x)) {
627*c4a48867SMateusz Guzik 			lockmgr_note_shared_release(lk, file, line);
628*c4a48867SMateusz Guzik 			unlocked = true;
629*c4a48867SMateusz Guzik 		}
630*c4a48867SMateusz Guzik 	} else {
631*c4a48867SMateusz Guzik 		tid = (uintptr_t)curthread;
632*c4a48867SMateusz Guzik 		if (!lockmgr_recursed(lk) &&
633*c4a48867SMateusz Guzik 		    atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
634*c4a48867SMateusz Guzik 			lockmgr_note_exclusive_release(lk, file, line);
635*c4a48867SMateusz Guzik 			unlocked = true;
636*c4a48867SMateusz Guzik 		}
637*c4a48867SMateusz Guzik 	}
638*c4a48867SMateusz Guzik 	if (__predict_true(unlocked)) {
639*c4a48867SMateusz Guzik 		if (__predict_false(flags & LK_INTERLOCK)) {
640*c4a48867SMateusz Guzik 			class = LOCK_CLASS(ilk);
641*c4a48867SMateusz Guzik 			class->lc_unlock(ilk);
642*c4a48867SMateusz Guzik 		}
643*c4a48867SMateusz Guzik 		return (0);
644*c4a48867SMateusz Guzik 	} else {
645*c4a48867SMateusz Guzik 		return (__lockmgr_args(lk, flags | LK_RELEASE, ilk, LK_WMESG_DEFAULT,
646*c4a48867SMateusz Guzik 		    LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, LOCK_FILE, LOCK_LINE));
647*c4a48867SMateusz Guzik 	}
648*c4a48867SMateusz Guzik }
649*c4a48867SMateusz Guzik 
650047dd67eSAttilio Rao int
651047dd67eSAttilio Rao __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
652047dd67eSAttilio Rao     const char *wmesg, int pri, int timo, const char *file, int line)
653047dd67eSAttilio Rao {
654e5f94314SAttilio Rao 	GIANT_DECLARE;
655047dd67eSAttilio Rao 	struct lock_class *class;
656047dd67eSAttilio Rao 	const char *iwmesg;
657047dd67eSAttilio Rao 	uintptr_t tid, v, x;
6582028867dSAttilio Rao 	u_int op, realexslp;
6591723a064SJeff Roberson 	int error, ipri, itimo, queue, wakeup_swapper;
6601723a064SJeff Roberson #ifdef LOCK_PROFILING
6611723a064SJeff Roberson 	uint64_t waittime = 0;
6621723a064SJeff Roberson 	int contested = 0;
6631723a064SJeff Roberson #endif
664651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
665651175c9SAttilio Rao 	volatile struct thread *owner;
666651175c9SAttilio Rao 	u_int i, spintries = 0;
667651175c9SAttilio Rao #endif
668047dd67eSAttilio Rao 
669047dd67eSAttilio Rao 	error = 0;
670047dd67eSAttilio Rao 	tid = (uintptr_t)curthread;
671047dd67eSAttilio Rao 	op = (flags & LK_TYPE_MASK);
672047dd67eSAttilio Rao 	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
673047dd67eSAttilio Rao 	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
674047dd67eSAttilio Rao 	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
675047dd67eSAttilio Rao 
676047dd67eSAttilio Rao 	MPASS((flags & ~LK_TOTAL_MASK) == 0);
677872b7289SAttilio Rao 	KASSERT((op & (op - 1)) == 0,
678872b7289SAttilio Rao 	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
679047dd67eSAttilio Rao 	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
680047dd67eSAttilio Rao 	    (op != LK_DOWNGRADE && op != LK_RELEASE),
681047dd67eSAttilio Rao 	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
682047dd67eSAttilio Rao 	    __func__, file, line));
683047dd67eSAttilio Rao 	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
684047dd67eSAttilio Rao 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
685047dd67eSAttilio Rao 	    __func__, file, line));
686cd2fe4e6SAttilio Rao 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
687e3ae0dfeSAttilio Rao 	    ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
688e3ae0dfeSAttilio Rao 	    lk->lock_object.lo_name, file, line));
689047dd67eSAttilio Rao 
690047dd67eSAttilio Rao 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
691047dd67eSAttilio Rao 	if (panicstr != NULL) {
692047dd67eSAttilio Rao 		if (flags & LK_INTERLOCK)
693047dd67eSAttilio Rao 			class->lc_unlock(ilk);
694047dd67eSAttilio Rao 		return (0);
695047dd67eSAttilio Rao 	}
696047dd67eSAttilio Rao 
697d0a724c5SKonstantin Belousov 	if (lk->lock_object.lo_flags & LK_NOSHARE) {
698d0a724c5SKonstantin Belousov 		switch (op) {
699d0a724c5SKonstantin Belousov 		case LK_SHARED:
700047dd67eSAttilio Rao 			op = LK_EXCLUSIVE;
701d0a724c5SKonstantin Belousov 			break;
702d0a724c5SKonstantin Belousov 		case LK_UPGRADE:
7037c6fe803SKonstantin Belousov 		case LK_TRYUPGRADE:
704d0a724c5SKonstantin Belousov 		case LK_DOWNGRADE:
705d0a724c5SKonstantin Belousov 			_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
706d0a724c5SKonstantin Belousov 			    file, line);
70743287e27SMateusz Guzik 			if (flags & LK_INTERLOCK)
70843287e27SMateusz Guzik 				class->lc_unlock(ilk);
709d0a724c5SKonstantin Belousov 			return (0);
710d0a724c5SKonstantin Belousov 		}
711d0a724c5SKonstantin Belousov 	}
712047dd67eSAttilio Rao 
713da7bbd2cSJohn Baldwin 	wakeup_swapper = 0;
714047dd67eSAttilio Rao 	switch (op) {
715047dd67eSAttilio Rao 	case LK_SHARED:
716e5f94314SAttilio Rao 		if (LK_CAN_WITNESS(flags))
717e5f94314SAttilio Rao 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
71824150d37SJohn Baldwin 			    file, line, flags & LK_INTERLOCK ? ilk : NULL);
719047dd67eSAttilio Rao 		for (;;) {
720*c4a48867SMateusz Guzik 			if (lockmgr_slock_try(lk, &x, flags))
721047dd67eSAttilio Rao 				break;
722f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
723f5f9340bSFabien Thomas 			PMC_SOFT_CALL( , , lock, failed);
724f5f9340bSFabien Thomas #endif
725047dd67eSAttilio Rao 			lock_profile_obtain_lock_failed(&lk->lock_object,
726047dd67eSAttilio Rao 			    &contested, &waittime);
727047dd67eSAttilio Rao 
728047dd67eSAttilio Rao 			/*
72996f1567fSKonstantin Belousov 			 * If the lock is already held by curthread in
730047dd67eSAttilio Rao 			 * exclusive way avoid a deadlock.
731047dd67eSAttilio Rao 			 */
732047dd67eSAttilio Rao 			if (LK_HOLDER(x) == tid) {
733047dd67eSAttilio Rao 				LOCK_LOG2(lk,
73496f1567fSKonstantin Belousov 				    "%s: %p already held in exclusive mode",
735047dd67eSAttilio Rao 				    __func__, lk);
736047dd67eSAttilio Rao 				error = EDEADLK;
737047dd67eSAttilio Rao 				break;
738a18b1f1dSJason Evans 			}
739a18b1f1dSJason Evans 
740a18b1f1dSJason Evans 			/*
741047dd67eSAttilio Rao 			 * If the lock is expected to not sleep just give up
742047dd67eSAttilio Rao 			 * and return.
743d7a7e179SAttilio Rao 			 */
744047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
745047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p fails the try operation",
746047dd67eSAttilio Rao 				    __func__, lk);
747047dd67eSAttilio Rao 				error = EBUSY;
748047dd67eSAttilio Rao 				break;
749047dd67eSAttilio Rao 			}
750047dd67eSAttilio Rao 
751651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
752651175c9SAttilio Rao 			/*
753651175c9SAttilio Rao 			 * If the owner is running on another CPU, spin until
754651175c9SAttilio Rao 			 * the owner stops running or the state of the lock
7558d3635c4SAttilio Rao 			 * changes.  We need a double-state handle here
7568d3635c4SAttilio Rao 			 * because for a failed acquisition the lock can be
7578d3635c4SAttilio Rao 			 * either held in exclusive mode or shared mode
7588d3635c4SAttilio Rao 			 * (for the writer starvation avoidance technique).
759651175c9SAttilio Rao 			 */
760651175c9SAttilio Rao 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
761651175c9SAttilio Rao 			    LK_HOLDER(x) != LK_KERNPROC) {
762651175c9SAttilio Rao 				owner = (struct thread *)LK_HOLDER(x);
763651175c9SAttilio Rao 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
764651175c9SAttilio Rao 					CTR3(KTR_LOCK,
765651175c9SAttilio Rao 					    "%s: spinning on %p held by %p",
766651175c9SAttilio Rao 					    __func__, lk, owner);
7672cba8dd3SJohn Baldwin 				KTR_STATE1(KTR_SCHED, "thread",
7682cba8dd3SJohn Baldwin 				    sched_tdname(td), "spinning",
7692cba8dd3SJohn Baldwin 				    "lockname:\"%s\"", lk->lock_object.lo_name);
770651175c9SAttilio Rao 
771651175c9SAttilio Rao 				/*
772651175c9SAttilio Rao 				 * If we are holding also an interlock drop it
773651175c9SAttilio Rao 				 * in order to avoid a deadlock if the lockmgr
774651175c9SAttilio Rao 				 * owner is adaptively spinning on the
775651175c9SAttilio Rao 				 * interlock itself.
776651175c9SAttilio Rao 				 */
777651175c9SAttilio Rao 				if (flags & LK_INTERLOCK) {
778651175c9SAttilio Rao 					class->lc_unlock(ilk);
779651175c9SAttilio Rao 					flags &= ~LK_INTERLOCK;
780651175c9SAttilio Rao 				}
781651175c9SAttilio Rao 				GIANT_SAVE();
782651175c9SAttilio Rao 				while (LK_HOLDER(lk->lk_lock) ==
783651175c9SAttilio Rao 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
784651175c9SAttilio Rao 					cpu_spinwait();
7852cba8dd3SJohn Baldwin 				KTR_STATE0(KTR_SCHED, "thread",
7862cba8dd3SJohn Baldwin 				    sched_tdname(td), "running");
7878d3635c4SAttilio Rao 				GIANT_RESTORE();
7888d3635c4SAttilio Rao 				continue;
789651175c9SAttilio Rao 			} else if (LK_CAN_ADAPT(lk, flags) &&
790651175c9SAttilio Rao 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
791651175c9SAttilio Rao 			    spintries < alk_retries) {
7922cba8dd3SJohn Baldwin 				KTR_STATE1(KTR_SCHED, "thread",
7932cba8dd3SJohn Baldwin 				    sched_tdname(td), "spinning",
7942cba8dd3SJohn Baldwin 				    "lockname:\"%s\"", lk->lock_object.lo_name);
795651175c9SAttilio Rao 				if (flags & LK_INTERLOCK) {
796651175c9SAttilio Rao 					class->lc_unlock(ilk);
797651175c9SAttilio Rao 					flags &= ~LK_INTERLOCK;
798651175c9SAttilio Rao 				}
799651175c9SAttilio Rao 				GIANT_SAVE();
800651175c9SAttilio Rao 				spintries++;
801651175c9SAttilio Rao 				for (i = 0; i < alk_loops; i++) {
802651175c9SAttilio Rao 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
803651175c9SAttilio Rao 						CTR4(KTR_LOCK,
804651175c9SAttilio Rao 				    "%s: shared spinning on %p with %u and %u",
805651175c9SAttilio Rao 						    __func__, lk, spintries, i);
806651175c9SAttilio Rao 					x = lk->lk_lock;
807651175c9SAttilio Rao 					if ((x & LK_SHARE) == 0 ||
808cc246667SKonstantin Belousov 					    LK_CAN_SHARE(x, flags) != 0)
809651175c9SAttilio Rao 						break;
810651175c9SAttilio Rao 					cpu_spinwait();
811651175c9SAttilio Rao 				}
8122cba8dd3SJohn Baldwin 				KTR_STATE0(KTR_SCHED, "thread",
8132cba8dd3SJohn Baldwin 				    sched_tdname(td), "running");
8148d3635c4SAttilio Rao 				GIANT_RESTORE();
815651175c9SAttilio Rao 				if (i != alk_loops)
816651175c9SAttilio Rao 					continue;
817651175c9SAttilio Rao 			}
818651175c9SAttilio Rao #endif
819651175c9SAttilio Rao 
820047dd67eSAttilio Rao 			/*
821047dd67eSAttilio Rao 			 * Acquire the sleepqueue chain lock because we
822047dd67eSAttilio Rao 			 * probabilly will need to manipulate waiters flags.
823047dd67eSAttilio Rao 			 */
824047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
825047dd67eSAttilio Rao 			x = lk->lk_lock;
826047dd67eSAttilio Rao 
827047dd67eSAttilio Rao 			/*
828047dd67eSAttilio Rao 			 * if the lock can be acquired in shared mode, try
829047dd67eSAttilio Rao 			 * again.
830047dd67eSAttilio Rao 			 */
83172ba3c08SKonstantin Belousov 			if (LK_CAN_SHARE(x, flags)) {
832047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
833047dd67eSAttilio Rao 				continue;
834047dd67eSAttilio Rao 			}
835047dd67eSAttilio Rao 
836651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
837651175c9SAttilio Rao 			/*
838651175c9SAttilio Rao 			 * The current lock owner might have started executing
839651175c9SAttilio Rao 			 * on another CPU (or the lock could have changed
840651175c9SAttilio Rao 			 * owner) while we were waiting on the turnstile
841651175c9SAttilio Rao 			 * chain lock.  If so, drop the turnstile lock and try
842651175c9SAttilio Rao 			 * again.
843651175c9SAttilio Rao 			 */
844651175c9SAttilio Rao 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
845651175c9SAttilio Rao 			    LK_HOLDER(x) != LK_KERNPROC) {
846651175c9SAttilio Rao 				owner = (struct thread *)LK_HOLDER(x);
847651175c9SAttilio Rao 				if (TD_IS_RUNNING(owner)) {
848651175c9SAttilio Rao 					sleepq_release(&lk->lock_object);
849651175c9SAttilio Rao 					continue;
850651175c9SAttilio Rao 				}
851651175c9SAttilio Rao 			}
852651175c9SAttilio Rao #endif
853651175c9SAttilio Rao 
854047dd67eSAttilio Rao 			/*
855047dd67eSAttilio Rao 			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
856047dd67eSAttilio Rao 			 * loop back and retry.
857047dd67eSAttilio Rao 			 */
858047dd67eSAttilio Rao 			if ((x & LK_SHARED_WAITERS) == 0) {
859047dd67eSAttilio Rao 				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
860047dd67eSAttilio Rao 				    x | LK_SHARED_WAITERS)) {
861047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
862047dd67eSAttilio Rao 					continue;
863047dd67eSAttilio Rao 				}
864047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
865047dd67eSAttilio Rao 				    __func__, lk);
866047dd67eSAttilio Rao 			}
867047dd67eSAttilio Rao 
868047dd67eSAttilio Rao 			/*
869047dd67eSAttilio Rao 			 * As far as we have been unable to acquire the
870047dd67eSAttilio Rao 			 * shared lock and the shared waiters flag is set,
871047dd67eSAttilio Rao 			 * we will sleep.
872047dd67eSAttilio Rao 			 */
873047dd67eSAttilio Rao 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
874047dd67eSAttilio Rao 			    SQ_SHARED_QUEUE);
875047dd67eSAttilio Rao 			flags &= ~LK_INTERLOCK;
876047dd67eSAttilio Rao 			if (error) {
877047dd67eSAttilio Rao 				LOCK_LOG3(lk,
878047dd67eSAttilio Rao 				    "%s: interrupted sleep for %p with %d",
879047dd67eSAttilio Rao 				    __func__, lk, error);
880047dd67eSAttilio Rao 				break;
881047dd67eSAttilio Rao 			}
882047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
883047dd67eSAttilio Rao 			    __func__, lk);
884047dd67eSAttilio Rao 		}
885047dd67eSAttilio Rao 		if (error == 0) {
886*c4a48867SMateusz Guzik #ifdef LOCK_PROFILING
887*c4a48867SMateusz Guzik 			lockmgr_note_shared_acquire(lk, contested, waittime,
888*c4a48867SMateusz Guzik 			    file, line, flags);
889*c4a48867SMateusz Guzik #else
890*c4a48867SMateusz Guzik 			lockmgr_note_shared_acquire(lk, 0, 0, file, line,
891*c4a48867SMateusz Guzik 			    flags);
892*c4a48867SMateusz Guzik #endif
893047dd67eSAttilio Rao 		}
894047dd67eSAttilio Rao 		break;
895047dd67eSAttilio Rao 	case LK_UPGRADE:
8967c6fe803SKonstantin Belousov 	case LK_TRYUPGRADE:
897047dd67eSAttilio Rao 		_lockmgr_assert(lk, KA_SLOCKED, file, line);
898651175c9SAttilio Rao 		v = lk->lk_lock;
899651175c9SAttilio Rao 		x = v & LK_ALL_WAITERS;
900651175c9SAttilio Rao 		v &= LK_EXCLUSIVE_SPINNERS;
901047dd67eSAttilio Rao 
902047dd67eSAttilio Rao 		/*
903047dd67eSAttilio Rao 		 * Try to switch from one shared lock to an exclusive one.
904047dd67eSAttilio Rao 		 * We need to preserve waiters flags during the operation.
905047dd67eSAttilio Rao 		 */
906651175c9SAttilio Rao 		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
907047dd67eSAttilio Rao 		    tid | x)) {
908047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
909047dd67eSAttilio Rao 			    line);
910e5f94314SAttilio Rao 			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
911e5f94314SAttilio Rao 			    LK_TRYWIT(flags), file, line);
912047dd67eSAttilio Rao 			TD_SLOCKS_DEC(curthread);
913047dd67eSAttilio Rao 			break;
914047dd67eSAttilio Rao 		}
915047dd67eSAttilio Rao 
916047dd67eSAttilio Rao 		/*
9177c6fe803SKonstantin Belousov 		 * In LK_TRYUPGRADE mode, do not drop the lock,
9187c6fe803SKonstantin Belousov 		 * returning EBUSY instead.
9197c6fe803SKonstantin Belousov 		 */
9207c6fe803SKonstantin Belousov 		if (op == LK_TRYUPGRADE) {
9217c6fe803SKonstantin Belousov 			LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
9227c6fe803SKonstantin Belousov 			    __func__, lk);
9237c6fe803SKonstantin Belousov 			error = EBUSY;
9247c6fe803SKonstantin Belousov 			break;
9257c6fe803SKonstantin Belousov 		}
9267c6fe803SKonstantin Belousov 
9277c6fe803SKonstantin Belousov 		/*
928047dd67eSAttilio Rao 		 * We have been unable to succeed in upgrading, so just
929047dd67eSAttilio Rao 		 * give up the shared lock.
930047dd67eSAttilio Rao 		 */
931814f26daSJohn Baldwin 		wakeup_swapper |= wakeupshlk(lk, file, line);
932047dd67eSAttilio Rao 
933047dd67eSAttilio Rao 		/* FALLTHROUGH */
934047dd67eSAttilio Rao 	case LK_EXCLUSIVE:
935e5f94314SAttilio Rao 		if (LK_CAN_WITNESS(flags))
936e5f94314SAttilio Rao 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
93724150d37SJohn Baldwin 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
93824150d37SJohn Baldwin 			    ilk : NULL);
939047dd67eSAttilio Rao 
940047dd67eSAttilio Rao 		/*
94196f1567fSKonstantin Belousov 		 * If curthread already holds the lock and this one is
942047dd67eSAttilio Rao 		 * allowed to recurse, simply recurse on it.
943047dd67eSAttilio Rao 		 */
944047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk)) {
945047dd67eSAttilio Rao 			if ((flags & LK_CANRECURSE) == 0 &&
946f0830182SAttilio Rao 			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
947047dd67eSAttilio Rao 
948047dd67eSAttilio Rao 				/*
949047dd67eSAttilio Rao 				 * If the lock is expected to not panic just
950047dd67eSAttilio Rao 				 * give up and return.
951047dd67eSAttilio Rao 				 */
952047dd67eSAttilio Rao 				if (LK_TRYOP(flags)) {
953047dd67eSAttilio Rao 					LOCK_LOG2(lk,
954047dd67eSAttilio Rao 					    "%s: %p fails the try operation",
955047dd67eSAttilio Rao 					    __func__, lk);
956047dd67eSAttilio Rao 					error = EBUSY;
957047dd67eSAttilio Rao 					break;
958047dd67eSAttilio Rao 				}
959047dd67eSAttilio Rao 				if (flags & LK_INTERLOCK)
960047dd67eSAttilio Rao 					class->lc_unlock(ilk);
961047dd67eSAttilio Rao 		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
962047dd67eSAttilio Rao 				    __func__, iwmesg, file, line);
963047dd67eSAttilio Rao 			}
964047dd67eSAttilio Rao 			lk->lk_recurse++;
965047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
966047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
967047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
968e5f94314SAttilio Rao 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
969e5f94314SAttilio Rao 			    LK_TRYWIT(flags), file, line);
970047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
971047dd67eSAttilio Rao 			break;
972047dd67eSAttilio Rao 		}
973047dd67eSAttilio Rao 
974fc4f686dSMateusz Guzik 		for (;;) {
975fc4f686dSMateusz Guzik 			if (lk->lk_lock == LK_UNLOCKED &&
976fc4f686dSMateusz Guzik 			    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
977fc4f686dSMateusz Guzik 				break;
978f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
979f5f9340bSFabien Thomas 			PMC_SOFT_CALL( , , lock, failed);
980f5f9340bSFabien Thomas #endif
981047dd67eSAttilio Rao 			lock_profile_obtain_lock_failed(&lk->lock_object,
982047dd67eSAttilio Rao 			    &contested, &waittime);
983047dd67eSAttilio Rao 
984047dd67eSAttilio Rao 			/*
985047dd67eSAttilio Rao 			 * If the lock is expected to not sleep just give up
986047dd67eSAttilio Rao 			 * and return.
987047dd67eSAttilio Rao 			 */
988047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
989047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p fails the try operation",
990047dd67eSAttilio Rao 				    __func__, lk);
991047dd67eSAttilio Rao 				error = EBUSY;
992047dd67eSAttilio Rao 				break;
993047dd67eSAttilio Rao 			}
994047dd67eSAttilio Rao 
995651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
996651175c9SAttilio Rao 			/*
997651175c9SAttilio Rao 			 * If the owner is running on another CPU, spin until
998651175c9SAttilio Rao 			 * the owner stops running or the state of the lock
999651175c9SAttilio Rao 			 * changes.
1000651175c9SAttilio Rao 			 */
1001651175c9SAttilio Rao 			x = lk->lk_lock;
1002651175c9SAttilio Rao 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
1003651175c9SAttilio Rao 			    LK_HOLDER(x) != LK_KERNPROC) {
1004651175c9SAttilio Rao 				owner = (struct thread *)LK_HOLDER(x);
1005651175c9SAttilio Rao 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
1006651175c9SAttilio Rao 					CTR3(KTR_LOCK,
1007651175c9SAttilio Rao 					    "%s: spinning on %p held by %p",
1008651175c9SAttilio Rao 					    __func__, lk, owner);
10092cba8dd3SJohn Baldwin 				KTR_STATE1(KTR_SCHED, "thread",
10102cba8dd3SJohn Baldwin 				    sched_tdname(td), "spinning",
10112cba8dd3SJohn Baldwin 				    "lockname:\"%s\"", lk->lock_object.lo_name);
1012651175c9SAttilio Rao 
1013651175c9SAttilio Rao 				/*
1014651175c9SAttilio Rao 				 * If we are holding also an interlock drop it
1015651175c9SAttilio Rao 				 * in order to avoid a deadlock if the lockmgr
1016651175c9SAttilio Rao 				 * owner is adaptively spinning on the
1017651175c9SAttilio Rao 				 * interlock itself.
1018651175c9SAttilio Rao 				 */
1019651175c9SAttilio Rao 				if (flags & LK_INTERLOCK) {
1020651175c9SAttilio Rao 					class->lc_unlock(ilk);
1021651175c9SAttilio Rao 					flags &= ~LK_INTERLOCK;
1022651175c9SAttilio Rao 				}
1023651175c9SAttilio Rao 				GIANT_SAVE();
1024651175c9SAttilio Rao 				while (LK_HOLDER(lk->lk_lock) ==
1025651175c9SAttilio Rao 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
1026651175c9SAttilio Rao 					cpu_spinwait();
10272cba8dd3SJohn Baldwin 				KTR_STATE0(KTR_SCHED, "thread",
10282cba8dd3SJohn Baldwin 				    sched_tdname(td), "running");
10298d3635c4SAttilio Rao 				GIANT_RESTORE();
10308d3635c4SAttilio Rao 				continue;
1031651175c9SAttilio Rao 			} else if (LK_CAN_ADAPT(lk, flags) &&
1032651175c9SAttilio Rao 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
1033651175c9SAttilio Rao 			    spintries < alk_retries) {
1034651175c9SAttilio Rao 				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
1035651175c9SAttilio Rao 				    !atomic_cmpset_ptr(&lk->lk_lock, x,
1036651175c9SAttilio Rao 				    x | LK_EXCLUSIVE_SPINNERS))
1037651175c9SAttilio Rao 					continue;
10382cba8dd3SJohn Baldwin 				KTR_STATE1(KTR_SCHED, "thread",
10392cba8dd3SJohn Baldwin 				    sched_tdname(td), "spinning",
10402cba8dd3SJohn Baldwin 				    "lockname:\"%s\"", lk->lock_object.lo_name);
1041651175c9SAttilio Rao 				if (flags & LK_INTERLOCK) {
1042651175c9SAttilio Rao 					class->lc_unlock(ilk);
1043651175c9SAttilio Rao 					flags &= ~LK_INTERLOCK;
1044651175c9SAttilio Rao 				}
1045651175c9SAttilio Rao 				GIANT_SAVE();
1046651175c9SAttilio Rao 				spintries++;
1047651175c9SAttilio Rao 				for (i = 0; i < alk_loops; i++) {
1048651175c9SAttilio Rao 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
1049651175c9SAttilio Rao 						CTR4(KTR_LOCK,
1050651175c9SAttilio Rao 				    "%s: shared spinning on %p with %u and %u",
1051651175c9SAttilio Rao 						    __func__, lk, spintries, i);
1052651175c9SAttilio Rao 					if ((lk->lk_lock &
1053651175c9SAttilio Rao 					    LK_EXCLUSIVE_SPINNERS) == 0)
1054651175c9SAttilio Rao 						break;
1055651175c9SAttilio Rao 					cpu_spinwait();
1056651175c9SAttilio Rao 				}
10572cba8dd3SJohn Baldwin 				KTR_STATE0(KTR_SCHED, "thread",
10582cba8dd3SJohn Baldwin 				    sched_tdname(td), "running");
10598d3635c4SAttilio Rao 				GIANT_RESTORE();
1060651175c9SAttilio Rao 				if (i != alk_loops)
1061651175c9SAttilio Rao 					continue;
1062651175c9SAttilio Rao 			}
1063651175c9SAttilio Rao #endif
1064651175c9SAttilio Rao 
1065047dd67eSAttilio Rao 			/*
1066047dd67eSAttilio Rao 			 * Acquire the sleepqueue chain lock because we
1067047dd67eSAttilio Rao 			 * probabilly will need to manipulate waiters flags.
1068047dd67eSAttilio Rao 			 */
1069047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
1070047dd67eSAttilio Rao 			x = lk->lk_lock;
1071047dd67eSAttilio Rao 
1072047dd67eSAttilio Rao 			/*
1073047dd67eSAttilio Rao 			 * if the lock has been released while we spun on
1074047dd67eSAttilio Rao 			 * the sleepqueue chain lock just try again.
1075047dd67eSAttilio Rao 			 */
1076047dd67eSAttilio Rao 			if (x == LK_UNLOCKED) {
1077047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
1078047dd67eSAttilio Rao 				continue;
1079047dd67eSAttilio Rao 			}
1080047dd67eSAttilio Rao 
1081651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
1082651175c9SAttilio Rao 			/*
1083651175c9SAttilio Rao 			 * The current lock owner might have started executing
1084651175c9SAttilio Rao 			 * on another CPU (or the lock could have changed
1085651175c9SAttilio Rao 			 * owner) while we were waiting on the turnstile
1086651175c9SAttilio Rao 			 * chain lock.  If so, drop the turnstile lock and try
1087651175c9SAttilio Rao 			 * again.
1088651175c9SAttilio Rao 			 */
1089651175c9SAttilio Rao 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
1090651175c9SAttilio Rao 			    LK_HOLDER(x) != LK_KERNPROC) {
1091651175c9SAttilio Rao 				owner = (struct thread *)LK_HOLDER(x);
1092651175c9SAttilio Rao 				if (TD_IS_RUNNING(owner)) {
1093651175c9SAttilio Rao 					sleepq_release(&lk->lock_object);
1094651175c9SAttilio Rao 					continue;
1095651175c9SAttilio Rao 				}
1096651175c9SAttilio Rao 			}
1097651175c9SAttilio Rao #endif
1098651175c9SAttilio Rao 
1099047dd67eSAttilio Rao 			/*
1100047dd67eSAttilio Rao 			 * The lock can be in the state where there is a
1101047dd67eSAttilio Rao 			 * pending queue of waiters, but still no owner.
1102047dd67eSAttilio Rao 			 * This happens when the lock is contested and an
1103047dd67eSAttilio Rao 			 * owner is going to claim the lock.
1104047dd67eSAttilio Rao 			 * If curthread is the one successfully acquiring it
1105047dd67eSAttilio Rao 			 * claim lock ownership and return, preserving waiters
1106047dd67eSAttilio Rao 			 * flags.
1107047dd67eSAttilio Rao 			 */
1108651175c9SAttilio Rao 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1109651175c9SAttilio Rao 			if ((x & ~v) == LK_UNLOCKED) {
1110651175c9SAttilio Rao 				v &= ~LK_EXCLUSIVE_SPINNERS;
1111047dd67eSAttilio Rao 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
1112047dd67eSAttilio Rao 				    tid | v)) {
1113047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
1114047dd67eSAttilio Rao 					LOCK_LOG2(lk,
1115047dd67eSAttilio Rao 					    "%s: %p claimed by a new writer",
1116047dd67eSAttilio Rao 					    __func__, lk);
1117047dd67eSAttilio Rao 					break;
1118047dd67eSAttilio Rao 				}
1119047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
1120047dd67eSAttilio Rao 				continue;
1121047dd67eSAttilio Rao 			}
1122047dd67eSAttilio Rao 
1123047dd67eSAttilio Rao 			/*
1124047dd67eSAttilio Rao 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1125047dd67eSAttilio Rao 			 * fail, loop back and retry.
1126047dd67eSAttilio Rao 			 */
1127047dd67eSAttilio Rao 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1128047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1129047dd67eSAttilio Rao 				    x | LK_EXCLUSIVE_WAITERS)) {
1130047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
1131047dd67eSAttilio Rao 					continue;
1132047dd67eSAttilio Rao 				}
1133047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
1134047dd67eSAttilio Rao 				    __func__, lk);
1135047dd67eSAttilio Rao 			}
1136047dd67eSAttilio Rao 
1137047dd67eSAttilio Rao 			/*
1138047dd67eSAttilio Rao 			 * As far as we have been unable to acquire the
1139047dd67eSAttilio Rao 			 * exclusive lock and the exclusive waiters flag
1140047dd67eSAttilio Rao 			 * is set, we will sleep.
1141047dd67eSAttilio Rao 			 */
1142047dd67eSAttilio Rao 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
1143047dd67eSAttilio Rao 			    SQ_EXCLUSIVE_QUEUE);
1144047dd67eSAttilio Rao 			flags &= ~LK_INTERLOCK;
1145047dd67eSAttilio Rao 			if (error) {
1146047dd67eSAttilio Rao 				LOCK_LOG3(lk,
1147047dd67eSAttilio Rao 				    "%s: interrupted sleep for %p with %d",
1148047dd67eSAttilio Rao 				    __func__, lk, error);
1149047dd67eSAttilio Rao 				break;
1150047dd67eSAttilio Rao 			}
1151047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1152047dd67eSAttilio Rao 			    __func__, lk);
1153047dd67eSAttilio Rao 		}
1154047dd67eSAttilio Rao 		if (error == 0) {
1155*c4a48867SMateusz Guzik #ifdef LOCK_PROFILING
1156*c4a48867SMateusz Guzik 			lockmgr_note_exclusive_acquire(lk, contested, waittime,
1157*c4a48867SMateusz Guzik 			    file, line, flags);
1158*c4a48867SMateusz Guzik #else
1159*c4a48867SMateusz Guzik 			lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1160*c4a48867SMateusz Guzik 			    flags);
1161*c4a48867SMateusz Guzik #endif
1162047dd67eSAttilio Rao 		}
1163047dd67eSAttilio Rao 		break;
1164047dd67eSAttilio Rao 	case LK_DOWNGRADE:
11651c7d98d0SAttilio Rao 		_lockmgr_assert(lk, KA_XLOCKED, file, line);
1166e5f94314SAttilio Rao 		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1167e5f94314SAttilio Rao 		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
11681c7d98d0SAttilio Rao 
11691c7d98d0SAttilio Rao 		/*
11701c7d98d0SAttilio Rao 		 * Panic if the lock is recursed.
11711c7d98d0SAttilio Rao 		 */
11721c7d98d0SAttilio Rao 		if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
11731c7d98d0SAttilio Rao 			if (flags & LK_INTERLOCK)
11741c7d98d0SAttilio Rao 				class->lc_unlock(ilk);
11751c7d98d0SAttilio Rao 			panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
11761c7d98d0SAttilio Rao 			    __func__, iwmesg, file, line);
11771c7d98d0SAttilio Rao 		}
1178e5f94314SAttilio Rao 		TD_SLOCKS_INC(curthread);
1179047dd67eSAttilio Rao 
1180047dd67eSAttilio Rao 		/*
1181047dd67eSAttilio Rao 		 * In order to preserve waiters flags, just spin.
1182047dd67eSAttilio Rao 		 */
1183047dd67eSAttilio Rao 		for (;;) {
1184651175c9SAttilio Rao 			x = lk->lk_lock;
1185651175c9SAttilio Rao 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1186651175c9SAttilio Rao 			x &= LK_ALL_WAITERS;
1187047dd67eSAttilio Rao 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1188e5f94314SAttilio Rao 			    LK_SHARERS_LOCK(1) | x))
1189047dd67eSAttilio Rao 				break;
1190047dd67eSAttilio Rao 			cpu_spinwait();
1191047dd67eSAttilio Rao 		}
1192047dd67eSAttilio Rao 		break;
1193047dd67eSAttilio Rao 	case LK_RELEASE:
1194047dd67eSAttilio Rao 		_lockmgr_assert(lk, KA_LOCKED, file, line);
1195047dd67eSAttilio Rao 		x = lk->lk_lock;
1196047dd67eSAttilio Rao 
1197047dd67eSAttilio Rao 		if ((x & LK_SHARE) == 0) {
1198047dd67eSAttilio Rao 
1199047dd67eSAttilio Rao 			/*
1200047dd67eSAttilio Rao 			 * As first option, treact the lock as if it has not
1201047dd67eSAttilio Rao 			 * any waiter.
1202047dd67eSAttilio Rao 			 * Fix-up the tid var if the lock has been disowned.
1203047dd67eSAttilio Rao 			 */
1204047dd67eSAttilio Rao 			if (LK_HOLDER(x) == LK_KERNPROC)
1205047dd67eSAttilio Rao 				tid = LK_KERNPROC;
1206e5f94314SAttilio Rao 			else {
1207e5f94314SAttilio Rao 				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
1208e5f94314SAttilio Rao 				    file, line);
1209047dd67eSAttilio Rao 				TD_LOCKS_DEC(curthread);
1210e5f94314SAttilio Rao 			}
1211047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
1212047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
1213047dd67eSAttilio Rao 
1214047dd67eSAttilio Rao 			/*
1215047dd67eSAttilio Rao 			 * The lock is held in exclusive mode.
1216047dd67eSAttilio Rao 			 * If the lock is recursed also, then unrecurse it.
1217047dd67eSAttilio Rao 			 */
1218047dd67eSAttilio Rao 			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1219047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
1220047dd67eSAttilio Rao 				    lk);
1221047dd67eSAttilio Rao 				lk->lk_recurse--;
1222047dd67eSAttilio Rao 				break;
1223047dd67eSAttilio Rao 			}
122404a28689SJeff Roberson 			if (tid != LK_KERNPROC)
1225047dd67eSAttilio Rao 				lock_profile_release_lock(&lk->lock_object);
1226047dd67eSAttilio Rao 
1227047dd67eSAttilio Rao 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
1228047dd67eSAttilio Rao 			    LK_UNLOCKED))
1229047dd67eSAttilio Rao 				break;
1230047dd67eSAttilio Rao 
1231047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
1232651175c9SAttilio Rao 			x = lk->lk_lock;
1233047dd67eSAttilio Rao 			v = LK_UNLOCKED;
1234047dd67eSAttilio Rao 
1235047dd67eSAttilio Rao 			/*
1236047dd67eSAttilio Rao 		 	 * If the lock has exclusive waiters, give them
1237047dd67eSAttilio Rao 			 * preference in order to avoid deadlock with
1238047dd67eSAttilio Rao 			 * shared runners up.
12392028867dSAttilio Rao 			 * If interruptible sleeps left the exclusive queue
12402028867dSAttilio Rao 			 * empty avoid a starvation for the threads sleeping
12412028867dSAttilio Rao 			 * on the shared queue by giving them precedence
12422028867dSAttilio Rao 			 * and cleaning up the exclusive waiters bit anyway.
1243c636ba83SAttilio Rao 			 * Please note that lk_exslpfail count may be lying
1244c636ba83SAttilio Rao 			 * about the real number of waiters with the
1245c636ba83SAttilio Rao 			 * LK_SLEEPFAIL flag on because they may be used in
1246e3043798SPedro F. Giffuni 			 * conjunction with interruptible sleeps so
1247aab9c8c2SAttilio Rao 			 * lk_exslpfail might be considered an 'upper limit'
1248aab9c8c2SAttilio Rao 			 * bound, including the edge cases.
1249047dd67eSAttilio Rao 			 */
1250651175c9SAttilio Rao 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
12512028867dSAttilio Rao 			realexslp = sleepq_sleepcnt(&lk->lock_object,
12522028867dSAttilio Rao 			    SQ_EXCLUSIVE_QUEUE);
12532028867dSAttilio Rao 			if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
12542028867dSAttilio Rao 				if (lk->lk_exslpfail < realexslp) {
12552028867dSAttilio Rao 					lk->lk_exslpfail = 0;
1256047dd67eSAttilio Rao 					queue = SQ_EXCLUSIVE_QUEUE;
1257047dd67eSAttilio Rao 					v |= (x & LK_SHARED_WAITERS);
1258047dd67eSAttilio Rao 				} else {
12592028867dSAttilio Rao 					lk->lk_exslpfail = 0;
12602028867dSAttilio Rao 					LOCK_LOG2(lk,
12612028867dSAttilio Rao 					"%s: %p has only LK_SLEEPFAIL sleepers",
12622028867dSAttilio Rao 					    __func__, lk);
12632028867dSAttilio Rao 					LOCK_LOG2(lk,
12642028867dSAttilio Rao 			"%s: %p waking up threads on the exclusive queue",
12652028867dSAttilio Rao 					    __func__, lk);
12662028867dSAttilio Rao 					wakeup_swapper =
12672028867dSAttilio Rao 					    sleepq_broadcast(&lk->lock_object,
12682028867dSAttilio Rao 					    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
12692028867dSAttilio Rao 					queue = SQ_SHARED_QUEUE;
12702028867dSAttilio Rao 				}
12712028867dSAttilio Rao 			} else {
12729dbf7a62SAttilio Rao 
12739dbf7a62SAttilio Rao 				/*
12749dbf7a62SAttilio Rao 				 * Exclusive waiters sleeping with LK_SLEEPFAIL
12759dbf7a62SAttilio Rao 				 * on and using interruptible sleeps/timeout
12769dbf7a62SAttilio Rao 				 * may have left spourious lk_exslpfail counts
12779dbf7a62SAttilio Rao 				 * on, so clean it up anyway.
12789dbf7a62SAttilio Rao 				 */
12799dbf7a62SAttilio Rao 				lk->lk_exslpfail = 0;
1280047dd67eSAttilio Rao 				queue = SQ_SHARED_QUEUE;
1281047dd67eSAttilio Rao 			}
1282047dd67eSAttilio Rao 
1283047dd67eSAttilio Rao 			LOCK_LOG3(lk,
1284047dd67eSAttilio Rao 			    "%s: %p waking up threads on the %s queue",
1285047dd67eSAttilio Rao 			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1286047dd67eSAttilio Rao 			    "exclusive");
1287047dd67eSAttilio Rao 			atomic_store_rel_ptr(&lk->lk_lock, v);
12882028867dSAttilio Rao 			wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1289da7bbd2cSJohn Baldwin 			    SLEEPQ_LK, 0, queue);
1290047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
1291047dd67eSAttilio Rao 			break;
1292047dd67eSAttilio Rao 		} else
1293da7bbd2cSJohn Baldwin 			wakeup_swapper = wakeupshlk(lk, file, line);
1294047dd67eSAttilio Rao 		break;
1295047dd67eSAttilio Rao 	case LK_DRAIN:
1296e5f94314SAttilio Rao 		if (LK_CAN_WITNESS(flags))
1297e5f94314SAttilio Rao 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
129824150d37SJohn Baldwin 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
129924150d37SJohn Baldwin 			    ilk : NULL);
1300047dd67eSAttilio Rao 
1301047dd67eSAttilio Rao 		/*
130296f1567fSKonstantin Belousov 		 * Trying to drain a lock we already own will result in a
1303047dd67eSAttilio Rao 		 * deadlock.
1304047dd67eSAttilio Rao 		 */
1305047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk)) {
1306047dd67eSAttilio Rao 			if (flags & LK_INTERLOCK)
1307047dd67eSAttilio Rao 				class->lc_unlock(ilk);
1308047dd67eSAttilio Rao 			panic("%s: draining %s with the lock held @ %s:%d\n",
1309047dd67eSAttilio Rao 			    __func__, iwmesg, file, line);
1310047dd67eSAttilio Rao 		}
1311047dd67eSAttilio Rao 
1312fc4f686dSMateusz Guzik 		for (;;) {
1313fc4f686dSMateusz Guzik 			if (lk->lk_lock == LK_UNLOCKED &&
1314fc4f686dSMateusz Guzik 			    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1315fc4f686dSMateusz Guzik 				break;
1316fc4f686dSMateusz Guzik 
1317f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
1318f5f9340bSFabien Thomas 			PMC_SOFT_CALL( , , lock, failed);
1319f5f9340bSFabien Thomas #endif
1320047dd67eSAttilio Rao 			lock_profile_obtain_lock_failed(&lk->lock_object,
1321047dd67eSAttilio Rao 			    &contested, &waittime);
1322047dd67eSAttilio Rao 
1323047dd67eSAttilio Rao 			/*
1324047dd67eSAttilio Rao 			 * If the lock is expected to not sleep just give up
1325047dd67eSAttilio Rao 			 * and return.
1326047dd67eSAttilio Rao 			 */
1327047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
1328047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p fails the try operation",
1329047dd67eSAttilio Rao 				    __func__, lk);
1330047dd67eSAttilio Rao 				error = EBUSY;
1331047dd67eSAttilio Rao 				break;
1332047dd67eSAttilio Rao 			}
1333047dd67eSAttilio Rao 
1334047dd67eSAttilio Rao 			/*
1335047dd67eSAttilio Rao 			 * Acquire the sleepqueue chain lock because we
1336047dd67eSAttilio Rao 			 * probabilly will need to manipulate waiters flags.
1337047dd67eSAttilio Rao 			 */
1338047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
1339047dd67eSAttilio Rao 			x = lk->lk_lock;
1340047dd67eSAttilio Rao 
1341047dd67eSAttilio Rao 			/*
1342047dd67eSAttilio Rao 			 * if the lock has been released while we spun on
1343047dd67eSAttilio Rao 			 * the sleepqueue chain lock just try again.
1344047dd67eSAttilio Rao 			 */
1345047dd67eSAttilio Rao 			if (x == LK_UNLOCKED) {
1346047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
1347047dd67eSAttilio Rao 				continue;
1348047dd67eSAttilio Rao 			}
1349047dd67eSAttilio Rao 
1350651175c9SAttilio Rao 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1351651175c9SAttilio Rao 			if ((x & ~v) == LK_UNLOCKED) {
1352651175c9SAttilio Rao 				v = (x & ~LK_EXCLUSIVE_SPINNERS);
13532028867dSAttilio Rao 
13542028867dSAttilio Rao 				/*
13552028867dSAttilio Rao 				 * If interruptible sleeps left the exclusive
13562028867dSAttilio Rao 				 * queue empty avoid a starvation for the
13572028867dSAttilio Rao 				 * threads sleeping on the shared queue by
13582028867dSAttilio Rao 				 * giving them precedence and cleaning up the
13592028867dSAttilio Rao 				 * exclusive waiters bit anyway.
1360c636ba83SAttilio Rao 				 * Please note that lk_exslpfail count may be
1361c636ba83SAttilio Rao 				 * lying about the real number of waiters with
1362c636ba83SAttilio Rao 				 * the LK_SLEEPFAIL flag on because they may
1363e3043798SPedro F. Giffuni 				 * be used in conjunction with interruptible
1364aab9c8c2SAttilio Rao 				 * sleeps so lk_exslpfail might be considered
1365aab9c8c2SAttilio Rao 				 * an 'upper limit' bound, including the edge
1366c636ba83SAttilio Rao 				 * cases.
13672028867dSAttilio Rao 				 */
1368047dd67eSAttilio Rao 				if (v & LK_EXCLUSIVE_WAITERS) {
1369047dd67eSAttilio Rao 					queue = SQ_EXCLUSIVE_QUEUE;
1370047dd67eSAttilio Rao 					v &= ~LK_EXCLUSIVE_WAITERS;
1371047dd67eSAttilio Rao 				} else {
13729dbf7a62SAttilio Rao 
13739dbf7a62SAttilio Rao 					/*
13749dbf7a62SAttilio Rao 					 * Exclusive waiters sleeping with
13759dbf7a62SAttilio Rao 					 * LK_SLEEPFAIL on and using
13769dbf7a62SAttilio Rao 					 * interruptible sleeps/timeout may
13779dbf7a62SAttilio Rao 					 * have left spourious lk_exslpfail
13789dbf7a62SAttilio Rao 					 * counts on, so clean it up anyway.
13799dbf7a62SAttilio Rao 					 */
1380047dd67eSAttilio Rao 					MPASS(v & LK_SHARED_WAITERS);
13819dbf7a62SAttilio Rao 					lk->lk_exslpfail = 0;
1382047dd67eSAttilio Rao 					queue = SQ_SHARED_QUEUE;
1383047dd67eSAttilio Rao 					v &= ~LK_SHARED_WAITERS;
1384047dd67eSAttilio Rao 				}
13852028867dSAttilio Rao 				if (queue == SQ_EXCLUSIVE_QUEUE) {
13862028867dSAttilio Rao 					realexslp =
13872028867dSAttilio Rao 					    sleepq_sleepcnt(&lk->lock_object,
13882028867dSAttilio Rao 					    SQ_EXCLUSIVE_QUEUE);
13892028867dSAttilio Rao 					if (lk->lk_exslpfail >= realexslp) {
13902028867dSAttilio Rao 						lk->lk_exslpfail = 0;
13912028867dSAttilio Rao 						queue = SQ_SHARED_QUEUE;
13922028867dSAttilio Rao 						v &= ~LK_SHARED_WAITERS;
13932028867dSAttilio Rao 						if (realexslp != 0) {
13942028867dSAttilio Rao 							LOCK_LOG2(lk,
13952028867dSAttilio Rao 					"%s: %p has only LK_SLEEPFAIL sleepers",
13962028867dSAttilio Rao 							    __func__, lk);
13972028867dSAttilio Rao 							LOCK_LOG2(lk,
13982028867dSAttilio Rao 			"%s: %p waking up threads on the exclusive queue",
13992028867dSAttilio Rao 							    __func__, lk);
14002028867dSAttilio Rao 							wakeup_swapper =
14012028867dSAttilio Rao 							    sleepq_broadcast(
14022028867dSAttilio Rao 							    &lk->lock_object,
14032028867dSAttilio Rao 							    SLEEPQ_LK, 0,
14042028867dSAttilio Rao 							    SQ_EXCLUSIVE_QUEUE);
14052028867dSAttilio Rao 						}
14062028867dSAttilio Rao 					} else
14072028867dSAttilio Rao 						lk->lk_exslpfail = 0;
14082028867dSAttilio Rao 				}
1409047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1410047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
1411047dd67eSAttilio Rao 					continue;
1412047dd67eSAttilio Rao 				}
1413047dd67eSAttilio Rao 				LOCK_LOG3(lk,
1414047dd67eSAttilio Rao 				"%s: %p waking up all threads on the %s queue",
1415047dd67eSAttilio Rao 				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1416047dd67eSAttilio Rao 				    "shared" : "exclusive");
1417814f26daSJohn Baldwin 				wakeup_swapper |= sleepq_broadcast(
1418da7bbd2cSJohn Baldwin 				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1419047dd67eSAttilio Rao 
1420047dd67eSAttilio Rao 				/*
1421047dd67eSAttilio Rao 				 * If shared waiters have been woken up we need
1422047dd67eSAttilio Rao 				 * to wait for one of them to acquire the lock
1423047dd67eSAttilio Rao 				 * before to set the exclusive waiters in
1424047dd67eSAttilio Rao 				 * order to avoid a deadlock.
1425047dd67eSAttilio Rao 				 */
1426047dd67eSAttilio Rao 				if (queue == SQ_SHARED_QUEUE) {
1427047dd67eSAttilio Rao 					for (v = lk->lk_lock;
1428047dd67eSAttilio Rao 					    (v & LK_SHARE) && !LK_SHARERS(v);
1429047dd67eSAttilio Rao 					    v = lk->lk_lock)
1430047dd67eSAttilio Rao 						cpu_spinwait();
1431047dd67eSAttilio Rao 				}
1432047dd67eSAttilio Rao 			}
1433047dd67eSAttilio Rao 
1434047dd67eSAttilio Rao 			/*
1435047dd67eSAttilio Rao 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1436047dd67eSAttilio Rao 			 * fail, loop back and retry.
1437047dd67eSAttilio Rao 			 */
1438047dd67eSAttilio Rao 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1439047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1440047dd67eSAttilio Rao 				    x | LK_EXCLUSIVE_WAITERS)) {
1441047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
1442047dd67eSAttilio Rao 					continue;
1443047dd67eSAttilio Rao 				}
1444047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1445047dd67eSAttilio Rao 				    __func__, lk);
1446047dd67eSAttilio Rao 			}
1447047dd67eSAttilio Rao 
1448047dd67eSAttilio Rao 			/*
1449047dd67eSAttilio Rao 			 * As far as we have been unable to acquire the
1450047dd67eSAttilio Rao 			 * exclusive lock and the exclusive waiters flag
1451047dd67eSAttilio Rao 			 * is set, we will sleep.
1452047dd67eSAttilio Rao 			 */
1453047dd67eSAttilio Rao 			if (flags & LK_INTERLOCK) {
1454047dd67eSAttilio Rao 				class->lc_unlock(ilk);
1455047dd67eSAttilio Rao 				flags &= ~LK_INTERLOCK;
1456047dd67eSAttilio Rao 			}
1457e5f94314SAttilio Rao 			GIANT_SAVE();
1458047dd67eSAttilio Rao 			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1459047dd67eSAttilio Rao 			    SQ_EXCLUSIVE_QUEUE);
1460047dd67eSAttilio Rao 			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1461e5f94314SAttilio Rao 			GIANT_RESTORE();
1462047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1463047dd67eSAttilio Rao 			    __func__, lk);
1464047dd67eSAttilio Rao 		}
1465047dd67eSAttilio Rao 
1466047dd67eSAttilio Rao 		if (error == 0) {
1467047dd67eSAttilio Rao 			lock_profile_obtain_lock_success(&lk->lock_object,
1468047dd67eSAttilio Rao 			    contested, waittime, file, line);
1469047dd67eSAttilio Rao 			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1470047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
1471e5f94314SAttilio Rao 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1472e5f94314SAttilio Rao 			    LK_TRYWIT(flags), file, line);
1473047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
1474047dd67eSAttilio Rao 			STACK_SAVE(lk);
1475047dd67eSAttilio Rao 		}
1476047dd67eSAttilio Rao 		break;
1477047dd67eSAttilio Rao 	default:
1478047dd67eSAttilio Rao 		if (flags & LK_INTERLOCK)
1479047dd67eSAttilio Rao 			class->lc_unlock(ilk);
1480047dd67eSAttilio Rao 		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1481047dd67eSAttilio Rao 	}
1482047dd67eSAttilio Rao 
1483047dd67eSAttilio Rao 	if (flags & LK_INTERLOCK)
1484047dd67eSAttilio Rao 		class->lc_unlock(ilk);
1485da7bbd2cSJohn Baldwin 	if (wakeup_swapper)
1486da7bbd2cSJohn Baldwin 		kick_proc0();
1487047dd67eSAttilio Rao 
1488047dd67eSAttilio Rao 	return (error);
1489047dd67eSAttilio Rao }
1490047dd67eSAttilio Rao 
1491d7a7e179SAttilio Rao void
1492047dd67eSAttilio Rao _lockmgr_disown(struct lock *lk, const char *file, int line)
1493047dd67eSAttilio Rao {
1494047dd67eSAttilio Rao 	uintptr_t tid, x;
1495047dd67eSAttilio Rao 
149635370593SAndriy Gapon 	if (SCHEDULER_STOPPED())
149735370593SAndriy Gapon 		return;
149835370593SAndriy Gapon 
1499047dd67eSAttilio Rao 	tid = (uintptr_t)curthread;
15001c7d98d0SAttilio Rao 	_lockmgr_assert(lk, KA_XLOCKED, file, line);
15011c7d98d0SAttilio Rao 
15021c7d98d0SAttilio Rao 	/*
15031c7d98d0SAttilio Rao 	 * Panic if the lock is recursed.
15041c7d98d0SAttilio Rao 	 */
15051c7d98d0SAttilio Rao 	if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
15061c7d98d0SAttilio Rao 		panic("%s: disown a recursed lockmgr @ %s:%d\n",
15071c7d98d0SAttilio Rao 		    __func__,  file, line);
1508047dd67eSAttilio Rao 
1509047dd67eSAttilio Rao 	/*
151096f1567fSKonstantin Belousov 	 * If the owner is already LK_KERNPROC just skip the whole operation.
1511047dd67eSAttilio Rao 	 */
1512047dd67eSAttilio Rao 	if (LK_HOLDER(lk->lk_lock) != tid)
1513047dd67eSAttilio Rao 		return;
151404a28689SJeff Roberson 	lock_profile_release_lock(&lk->lock_object);
1515e5f94314SAttilio Rao 	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1516e5f94314SAttilio Rao 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1517e5f94314SAttilio Rao 	TD_LOCKS_DEC(curthread);
1518337c5ff4SAttilio Rao 	STACK_SAVE(lk);
1519047dd67eSAttilio Rao 
1520047dd67eSAttilio Rao 	/*
1521047dd67eSAttilio Rao 	 * In order to preserve waiters flags, just spin.
1522047dd67eSAttilio Rao 	 */
1523047dd67eSAttilio Rao 	for (;;) {
1524651175c9SAttilio Rao 		x = lk->lk_lock;
1525651175c9SAttilio Rao 		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1526651175c9SAttilio Rao 		x &= LK_ALL_WAITERS;
152722dd228dSAttilio Rao 		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1528e5f94314SAttilio Rao 		    LK_KERNPROC | x))
1529047dd67eSAttilio Rao 			return;
1530047dd67eSAttilio Rao 		cpu_spinwait();
1531047dd67eSAttilio Rao 	}
1532047dd67eSAttilio Rao }
1533047dd67eSAttilio Rao 
1534047dd67eSAttilio Rao void
1535d576deedSPawel Jakub Dawidek lockmgr_printinfo(const struct lock *lk)
1536d7a7e179SAttilio Rao {
1537d7a7e179SAttilio Rao 	struct thread *td;
1538047dd67eSAttilio Rao 	uintptr_t x;
1539d7a7e179SAttilio Rao 
1540047dd67eSAttilio Rao 	if (lk->lk_lock == LK_UNLOCKED)
1541047dd67eSAttilio Rao 		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1542047dd67eSAttilio Rao 	else if (lk->lk_lock & LK_SHARE)
1543047dd67eSAttilio Rao 		printf("lock type %s: SHARED (count %ju)\n",
1544047dd67eSAttilio Rao 		    lk->lock_object.lo_name,
1545047dd67eSAttilio Rao 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1546047dd67eSAttilio Rao 	else {
1547047dd67eSAttilio Rao 		td = lockmgr_xholder(lk);
1548e64b4fa8SKonstantin Belousov 		if (td == (struct thread *)LK_KERNPROC)
1549e64b4fa8SKonstantin Belousov 			printf("lock type %s: EXCL by KERNPROC\n",
1550e64b4fa8SKonstantin Belousov 			    lk->lock_object.lo_name);
1551e64b4fa8SKonstantin Belousov 		else
15522573ea5fSIvan Voras 			printf("lock type %s: EXCL by thread %p "
1553e64b4fa8SKonstantin Belousov 			    "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1554e64b4fa8SKonstantin Belousov 			    td, td->td_proc->p_pid, td->td_proc->p_comm,
1555e64b4fa8SKonstantin Belousov 			    td->td_tid);
1556d7a7e179SAttilio Rao 	}
1557d7a7e179SAttilio Rao 
1558047dd67eSAttilio Rao 	x = lk->lk_lock;
1559047dd67eSAttilio Rao 	if (x & LK_EXCLUSIVE_WAITERS)
1560047dd67eSAttilio Rao 		printf(" with exclusive waiters pending\n");
1561047dd67eSAttilio Rao 	if (x & LK_SHARED_WAITERS)
1562047dd67eSAttilio Rao 		printf(" with shared waiters pending\n");
1563651175c9SAttilio Rao 	if (x & LK_EXCLUSIVE_SPINNERS)
1564651175c9SAttilio Rao 		printf(" with exclusive spinners pending\n");
1565047dd67eSAttilio Rao 
1566047dd67eSAttilio Rao 	STACK_PRINT(lk);
1567047dd67eSAttilio Rao }
1568047dd67eSAttilio Rao 
156999448ed1SJohn Dyson int
1570d576deedSPawel Jakub Dawidek lockstatus(const struct lock *lk)
157199448ed1SJohn Dyson {
1572047dd67eSAttilio Rao 	uintptr_t v, x;
1573047dd67eSAttilio Rao 	int ret;
157499448ed1SJohn Dyson 
1575047dd67eSAttilio Rao 	ret = LK_SHARED;
1576047dd67eSAttilio Rao 	x = lk->lk_lock;
1577047dd67eSAttilio Rao 	v = LK_HOLDER(x);
15780e9eb108SAttilio Rao 
1579047dd67eSAttilio Rao 	if ((x & LK_SHARE) == 0) {
1580047dd67eSAttilio Rao 		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1581047dd67eSAttilio Rao 			ret = LK_EXCLUSIVE;
15826bdfe06aSEivind Eklund 		else
1583047dd67eSAttilio Rao 			ret = LK_EXCLOTHER;
1584047dd67eSAttilio Rao 	} else if (x == LK_UNLOCKED)
1585047dd67eSAttilio Rao 		ret = 0;
158699448ed1SJohn Dyson 
1587047dd67eSAttilio Rao 	return (ret);
158853bf4bb2SPeter Wemm }
1589be6847d7SJohn Baldwin 
159084887fa3SAttilio Rao #ifdef INVARIANT_SUPPORT
1591de5b1952SAlexander Leidinger 
1592de5b1952SAlexander Leidinger FEATURE(invariant_support,
1593de5b1952SAlexander Leidinger     "Support for modules compiled with INVARIANTS option");
1594de5b1952SAlexander Leidinger 
159584887fa3SAttilio Rao #ifndef INVARIANTS
159684887fa3SAttilio Rao #undef	_lockmgr_assert
159784887fa3SAttilio Rao #endif
159884887fa3SAttilio Rao 
159984887fa3SAttilio Rao void
1600d576deedSPawel Jakub Dawidek _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
160184887fa3SAttilio Rao {
160284887fa3SAttilio Rao 	int slocked = 0;
160384887fa3SAttilio Rao 
160484887fa3SAttilio Rao 	if (panicstr != NULL)
160584887fa3SAttilio Rao 		return;
160684887fa3SAttilio Rao 	switch (what) {
160784887fa3SAttilio Rao 	case KA_SLOCKED:
160884887fa3SAttilio Rao 	case KA_SLOCKED | KA_NOTRECURSED:
160984887fa3SAttilio Rao 	case KA_SLOCKED | KA_RECURSED:
161084887fa3SAttilio Rao 		slocked = 1;
161184887fa3SAttilio Rao 	case KA_LOCKED:
161284887fa3SAttilio Rao 	case KA_LOCKED | KA_NOTRECURSED:
161384887fa3SAttilio Rao 	case KA_LOCKED | KA_RECURSED:
1614e5f94314SAttilio Rao #ifdef WITNESS
1615e5f94314SAttilio Rao 
1616e5f94314SAttilio Rao 		/*
1617e5f94314SAttilio Rao 		 * We cannot trust WITNESS if the lock is held in exclusive
1618e5f94314SAttilio Rao 		 * mode and a call to lockmgr_disown() happened.
1619e5f94314SAttilio Rao 		 * Workaround this skipping the check if the lock is held in
1620e5f94314SAttilio Rao 		 * exclusive mode even for the KA_LOCKED case.
1621e5f94314SAttilio Rao 		 */
1622e5f94314SAttilio Rao 		if (slocked || (lk->lk_lock & LK_SHARE)) {
1623e5f94314SAttilio Rao 			witness_assert(&lk->lock_object, what, file, line);
1624e5f94314SAttilio Rao 			break;
1625e5f94314SAttilio Rao 		}
1626e5f94314SAttilio Rao #endif
1627047dd67eSAttilio Rao 		if (lk->lk_lock == LK_UNLOCKED ||
1628047dd67eSAttilio Rao 		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1629047dd67eSAttilio Rao 		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
163084887fa3SAttilio Rao 			panic("Lock %s not %slocked @ %s:%d\n",
1631047dd67eSAttilio Rao 			    lk->lock_object.lo_name, slocked ? "share" : "",
163284887fa3SAttilio Rao 			    file, line);
1633047dd67eSAttilio Rao 
1634047dd67eSAttilio Rao 		if ((lk->lk_lock & LK_SHARE) == 0) {
1635047dd67eSAttilio Rao 			if (lockmgr_recursed(lk)) {
163684887fa3SAttilio Rao 				if (what & KA_NOTRECURSED)
163784887fa3SAttilio Rao 					panic("Lock %s recursed @ %s:%d\n",
1638047dd67eSAttilio Rao 					    lk->lock_object.lo_name, file,
1639047dd67eSAttilio Rao 					    line);
164084887fa3SAttilio Rao 			} else if (what & KA_RECURSED)
164184887fa3SAttilio Rao 				panic("Lock %s not recursed @ %s:%d\n",
1642047dd67eSAttilio Rao 				    lk->lock_object.lo_name, file, line);
164384887fa3SAttilio Rao 		}
164484887fa3SAttilio Rao 		break;
164584887fa3SAttilio Rao 	case KA_XLOCKED:
164684887fa3SAttilio Rao 	case KA_XLOCKED | KA_NOTRECURSED:
164784887fa3SAttilio Rao 	case KA_XLOCKED | KA_RECURSED:
1648047dd67eSAttilio Rao 		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
164984887fa3SAttilio Rao 			panic("Lock %s not exclusively locked @ %s:%d\n",
1650047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
1651047dd67eSAttilio Rao 		if (lockmgr_recursed(lk)) {
165284887fa3SAttilio Rao 			if (what & KA_NOTRECURSED)
165384887fa3SAttilio Rao 				panic("Lock %s recursed @ %s:%d\n",
1654047dd67eSAttilio Rao 				    lk->lock_object.lo_name, file, line);
165584887fa3SAttilio Rao 		} else if (what & KA_RECURSED)
165684887fa3SAttilio Rao 			panic("Lock %s not recursed @ %s:%d\n",
1657047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
165884887fa3SAttilio Rao 		break;
165984887fa3SAttilio Rao 	case KA_UNLOCKED:
1660047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
166184887fa3SAttilio Rao 			panic("Lock %s exclusively locked @ %s:%d\n",
1662047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
166384887fa3SAttilio Rao 		break;
166484887fa3SAttilio Rao 	default:
1665047dd67eSAttilio Rao 		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1666047dd67eSAttilio Rao 		    line);
166784887fa3SAttilio Rao 	}
166884887fa3SAttilio Rao }
1669047dd67eSAttilio Rao #endif
167084887fa3SAttilio Rao 
1671be6847d7SJohn Baldwin #ifdef DDB
1672462a7addSJohn Baldwin int
1673462a7addSJohn Baldwin lockmgr_chain(struct thread *td, struct thread **ownerp)
1674462a7addSJohn Baldwin {
1675047dd67eSAttilio Rao 	struct lock *lk;
1676462a7addSJohn Baldwin 
1677047dd67eSAttilio Rao 	lk = td->td_wchan;
1678462a7addSJohn Baldwin 
1679047dd67eSAttilio Rao 	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1680462a7addSJohn Baldwin 		return (0);
1681047dd67eSAttilio Rao 	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1682047dd67eSAttilio Rao 	if (lk->lk_lock & LK_SHARE)
1683047dd67eSAttilio Rao 		db_printf("SHARED (count %ju)\n",
1684047dd67eSAttilio Rao 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1685047dd67eSAttilio Rao 	else
1686047dd67eSAttilio Rao 		db_printf("EXCL\n");
1687047dd67eSAttilio Rao 	*ownerp = lockmgr_xholder(lk);
1688462a7addSJohn Baldwin 
1689462a7addSJohn Baldwin 	return (1);
1690462a7addSJohn Baldwin }
1691462a7addSJohn Baldwin 
1692047dd67eSAttilio Rao static void
1693d576deedSPawel Jakub Dawidek db_show_lockmgr(const struct lock_object *lock)
1694be6847d7SJohn Baldwin {
1695be6847d7SJohn Baldwin 	struct thread *td;
1696d576deedSPawel Jakub Dawidek 	const struct lock *lk;
1697be6847d7SJohn Baldwin 
1698d576deedSPawel Jakub Dawidek 	lk = (const struct lock *)lock;
1699be6847d7SJohn Baldwin 
1700be6847d7SJohn Baldwin 	db_printf(" state: ");
1701047dd67eSAttilio Rao 	if (lk->lk_lock == LK_UNLOCKED)
1702be6847d7SJohn Baldwin 		db_printf("UNLOCKED\n");
1703047dd67eSAttilio Rao 	else if (lk->lk_lock & LK_SHARE)
1704047dd67eSAttilio Rao 		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1705047dd67eSAttilio Rao 	else {
1706047dd67eSAttilio Rao 		td = lockmgr_xholder(lk);
1707047dd67eSAttilio Rao 		if (td == (struct thread *)LK_KERNPROC)
1708047dd67eSAttilio Rao 			db_printf("XLOCK: LK_KERNPROC\n");
1709047dd67eSAttilio Rao 		else
1710047dd67eSAttilio Rao 			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1711047dd67eSAttilio Rao 			    td->td_tid, td->td_proc->p_pid,
1712047dd67eSAttilio Rao 			    td->td_proc->p_comm);
1713047dd67eSAttilio Rao 		if (lockmgr_recursed(lk))
1714047dd67eSAttilio Rao 			db_printf(" recursed: %d\n", lk->lk_recurse);
1715047dd67eSAttilio Rao 	}
1716047dd67eSAttilio Rao 	db_printf(" waiters: ");
1717047dd67eSAttilio Rao 	switch (lk->lk_lock & LK_ALL_WAITERS) {
1718047dd67eSAttilio Rao 	case LK_SHARED_WAITERS:
1719047dd67eSAttilio Rao 		db_printf("shared\n");
1720e5023dd9SEdward Tomasz Napierala 		break;
1721047dd67eSAttilio Rao 	case LK_EXCLUSIVE_WAITERS:
1722047dd67eSAttilio Rao 		db_printf("exclusive\n");
1723047dd67eSAttilio Rao 		break;
1724047dd67eSAttilio Rao 	case LK_ALL_WAITERS:
1725047dd67eSAttilio Rao 		db_printf("shared and exclusive\n");
1726047dd67eSAttilio Rao 		break;
1727047dd67eSAttilio Rao 	default:
1728047dd67eSAttilio Rao 		db_printf("none\n");
1729047dd67eSAttilio Rao 	}
1730651175c9SAttilio Rao 	db_printf(" spinners: ");
1731651175c9SAttilio Rao 	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1732651175c9SAttilio Rao 		db_printf("exclusive\n");
1733651175c9SAttilio Rao 	else
1734651175c9SAttilio Rao 		db_printf("none\n");
1735be6847d7SJohn Baldwin }
1736be6847d7SJohn Baldwin #endif
1737