xref: /freebsd/sys/kern/kern_lock.c (revision f5f9340b9807d44d200658ba1bbbbbb57ab72e07)
19454b2d8SWarner Losh /*-
2047dd67eSAttilio Rao  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3047dd67eSAttilio Rao  * All rights reserved.
453bf4bb2SPeter Wemm  *
553bf4bb2SPeter Wemm  * Redistribution and use in source and binary forms, with or without
653bf4bb2SPeter Wemm  * modification, are permitted provided that the following conditions
753bf4bb2SPeter Wemm  * are met:
853bf4bb2SPeter Wemm  * 1. Redistributions of source code must retain the above copyright
9047dd67eSAttilio Rao  *    notice(s), this list of conditions and the following disclaimer as
10047dd67eSAttilio Rao  *    the first lines of this file unmodified other than the possible
11047dd67eSAttilio Rao  *    addition of one or more copyright notices.
1253bf4bb2SPeter Wemm  * 2. Redistributions in binary form must reproduce the above copyright
13047dd67eSAttilio Rao  *    notice(s), this list of conditions and the following disclaimer in the
1453bf4bb2SPeter Wemm  *    documentation and/or other materials provided with the distribution.
1553bf4bb2SPeter Wemm  *
16047dd67eSAttilio Rao  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17047dd67eSAttilio Rao  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18047dd67eSAttilio Rao  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19047dd67eSAttilio Rao  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20047dd67eSAttilio Rao  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21047dd67eSAttilio Rao  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22047dd67eSAttilio Rao  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23047dd67eSAttilio Rao  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2453bf4bb2SPeter Wemm  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25047dd67eSAttilio Rao  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26047dd67eSAttilio Rao  * DAMAGE.
2753bf4bb2SPeter Wemm  */
2853bf4bb2SPeter Wemm 
29651175c9SAttilio Rao #include "opt_adaptive_lockmgrs.h"
30047dd67eSAttilio Rao #include "opt_ddb.h"
31*f5f9340bSFabien Thomas #include "opt_hwpmc_hooks.h"
32a5aedd68SStacey Son #include "opt_kdtrace.h"
33047dd67eSAttilio Rao 
34677b542eSDavid E. O'Brien #include <sys/cdefs.h>
35677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
36677b542eSDavid E. O'Brien 
3753bf4bb2SPeter Wemm #include <sys/param.h>
3861d80e90SJohn Baldwin #include <sys/ktr.h>
3953bf4bb2SPeter Wemm #include <sys/lock.h>
40047dd67eSAttilio Rao #include <sys/lock_profile.h>
418302d183SBruce Evans #include <sys/lockmgr.h>
42d8881ca3SJohn Baldwin #include <sys/mutex.h>
438302d183SBruce Evans #include <sys/proc.h>
44047dd67eSAttilio Rao #include <sys/sleepqueue.h>
45e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS
46e8ddb61dSJeff Roberson #include <sys/stack.h>
47e8ddb61dSJeff Roberson #endif
48651175c9SAttilio Rao #include <sys/sysctl.h>
49047dd67eSAttilio Rao #include <sys/systm.h>
5053bf4bb2SPeter Wemm 
51047dd67eSAttilio Rao #include <machine/cpu.h>
526efc8a16SAttilio Rao 
53be6847d7SJohn Baldwin #ifdef DDB
54be6847d7SJohn Baldwin #include <ddb/ddb.h>
55047dd67eSAttilio Rao #endif
56047dd67eSAttilio Rao 
57*f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
58*f5f9340bSFabien Thomas #include <sys/pmckern.h>
59*f5f9340bSFabien Thomas PMC_SOFT_DECLARE( , , lock, failed);
60*f5f9340bSFabien Thomas #endif
61*f5f9340bSFabien Thomas 
62651175c9SAttilio Rao CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
63651175c9SAttilio Rao     (LK_ADAPTIVE | LK_NOSHARE));
64651175c9SAttilio Rao CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
65651175c9SAttilio Rao     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
66047dd67eSAttilio Rao 
67047dd67eSAttilio Rao #define	SQ_EXCLUSIVE_QUEUE	0
68047dd67eSAttilio Rao #define	SQ_SHARED_QUEUE		1
69047dd67eSAttilio Rao 
70047dd67eSAttilio Rao #ifndef INVARIANTS
71047dd67eSAttilio Rao #define	_lockmgr_assert(lk, what, file, line)
72047dd67eSAttilio Rao #define	TD_LOCKS_INC(td)
73047dd67eSAttilio Rao #define	TD_LOCKS_DEC(td)
74047dd67eSAttilio Rao #else
75047dd67eSAttilio Rao #define	TD_LOCKS_INC(td)	((td)->td_locks++)
76047dd67eSAttilio Rao #define	TD_LOCKS_DEC(td)	((td)->td_locks--)
77047dd67eSAttilio Rao #endif
78047dd67eSAttilio Rao #define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
79047dd67eSAttilio Rao #define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
80047dd67eSAttilio Rao 
81047dd67eSAttilio Rao #ifndef DEBUG_LOCKS
82047dd67eSAttilio Rao #define	STACK_PRINT(lk)
83047dd67eSAttilio Rao #define	STACK_SAVE(lk)
84047dd67eSAttilio Rao #define	STACK_ZERO(lk)
85047dd67eSAttilio Rao #else
86047dd67eSAttilio Rao #define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
87047dd67eSAttilio Rao #define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
88047dd67eSAttilio Rao #define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
89047dd67eSAttilio Rao #endif
90047dd67eSAttilio Rao 
91047dd67eSAttilio Rao #define	LOCK_LOG2(lk, string, arg1, arg2)				\
92047dd67eSAttilio Rao 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
93047dd67eSAttilio Rao 		CTR2(KTR_LOCK, (string), (arg1), (arg2))
94047dd67eSAttilio Rao #define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
95047dd67eSAttilio Rao 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
96047dd67eSAttilio Rao 		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
97047dd67eSAttilio Rao 
98e5f94314SAttilio Rao #define	GIANT_DECLARE							\
99e5f94314SAttilio Rao 	int _i = 0;							\
100e5f94314SAttilio Rao 	WITNESS_SAVE_DECL(Giant)
101e5f94314SAttilio Rao #define	GIANT_RESTORE() do {						\
102e5f94314SAttilio Rao 	if (_i > 0) {							\
103e5f94314SAttilio Rao 		while (_i--)						\
104e5f94314SAttilio Rao 			mtx_lock(&Giant);				\
105e5f94314SAttilio Rao 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
106e5f94314SAttilio Rao 	}								\
107e5f94314SAttilio Rao } while (0)
108e5f94314SAttilio Rao #define	GIANT_SAVE() do {						\
109e5f94314SAttilio Rao 	if (mtx_owned(&Giant)) {					\
110e5f94314SAttilio Rao 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
111e5f94314SAttilio Rao 		while (mtx_owned(&Giant)) {				\
112e5f94314SAttilio Rao 			_i++;						\
113e5f94314SAttilio Rao 			mtx_unlock(&Giant);				\
114e5f94314SAttilio Rao 		}							\
115e5f94314SAttilio Rao 	}								\
116e5f94314SAttilio Rao } while (0)
117e5f94314SAttilio Rao 
118047dd67eSAttilio Rao #define	LK_CAN_SHARE(x)							\
119047dd67eSAttilio Rao 	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
120651175c9SAttilio Rao 	((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||				\
121e0f62984SAttilio Rao 	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
122e5f94314SAttilio Rao #define	LK_TRYOP(x)							\
123e5f94314SAttilio Rao 	((x) & LK_NOWAIT)
124e5f94314SAttilio Rao 
125e5f94314SAttilio Rao #define	LK_CAN_WITNESS(x)						\
126e5f94314SAttilio Rao 	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
127e5f94314SAttilio Rao #define	LK_TRYWIT(x)							\
128e5f94314SAttilio Rao 	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
129047dd67eSAttilio Rao 
130651175c9SAttilio Rao #define	LK_CAN_ADAPT(lk, f)						\
131651175c9SAttilio Rao 	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
132651175c9SAttilio Rao 	((f) & LK_SLEEPFAIL) == 0)
133651175c9SAttilio Rao 
134047dd67eSAttilio Rao #define	lockmgr_disowned(lk)						\
135047dd67eSAttilio Rao 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
136047dd67eSAttilio Rao 
137047dd67eSAttilio Rao #define	lockmgr_xlocked(lk)						\
138047dd67eSAttilio Rao 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
139047dd67eSAttilio Rao 
140d576deedSPawel Jakub Dawidek static void	assert_lockmgr(const struct lock_object *lock, int how);
141047dd67eSAttilio Rao #ifdef DDB
142d576deedSPawel Jakub Dawidek static void	db_show_lockmgr(const struct lock_object *lock);
143be6847d7SJohn Baldwin #endif
1446e21afd4SJohn Baldwin static void	lock_lockmgr(struct lock_object *lock, int how);
145a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
146d576deedSPawel Jakub Dawidek static int	owner_lockmgr(const struct lock_object *lock,
147d576deedSPawel Jakub Dawidek 		    struct thread **owner);
148a5aedd68SStacey Son #endif
1496e21afd4SJohn Baldwin static int	unlock_lockmgr(struct lock_object *lock);
15061bd5e21SKip Macy 
15161bd5e21SKip Macy struct lock_class lock_class_lockmgr = {
1523ff6d229SJohn Baldwin 	.lc_name = "lockmgr",
153047dd67eSAttilio Rao 	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
154f9721b43SAttilio Rao 	.lc_assert = assert_lockmgr,
15561bd5e21SKip Macy #ifdef DDB
1566e21afd4SJohn Baldwin 	.lc_ddb_show = db_show_lockmgr,
15761bd5e21SKip Macy #endif
1586e21afd4SJohn Baldwin 	.lc_lock = lock_lockmgr,
159a5aedd68SStacey Son 	.lc_unlock = unlock_lockmgr,
160a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
161a5aedd68SStacey Son 	.lc_owner = owner_lockmgr,
162a5aedd68SStacey Son #endif
16361bd5e21SKip Macy };
16461bd5e21SKip Macy 
165651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
166651175c9SAttilio Rao static u_int alk_retries = 10;
167651175c9SAttilio Rao static u_int alk_loops = 10000;
1686472ac3dSEd Schouten static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
1696472ac3dSEd Schouten     "lockmgr debugging");
170651175c9SAttilio Rao SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
171651175c9SAttilio Rao SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
172651175c9SAttilio Rao #endif
173651175c9SAttilio Rao 
174047dd67eSAttilio Rao static __inline struct thread *
175d576deedSPawel Jakub Dawidek lockmgr_xholder(const struct lock *lk)
176047dd67eSAttilio Rao {
177047dd67eSAttilio Rao 	uintptr_t x;
178047dd67eSAttilio Rao 
179047dd67eSAttilio Rao 	x = lk->lk_lock;
180047dd67eSAttilio Rao 	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
181047dd67eSAttilio Rao }
18284887fa3SAttilio Rao 
18353bf4bb2SPeter Wemm /*
184047dd67eSAttilio Rao  * It assumes sleepq_lock held and returns with this one unheld.
185047dd67eSAttilio Rao  * It also assumes the generic interlock is sane and previously checked.
186047dd67eSAttilio Rao  * If LK_INTERLOCK is specified the interlock is not reacquired after the
187047dd67eSAttilio Rao  * sleep.
18853bf4bb2SPeter Wemm  */
189047dd67eSAttilio Rao static __inline int
190047dd67eSAttilio Rao sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
191047dd67eSAttilio Rao     const char *wmesg, int pri, int timo, int queue)
192047dd67eSAttilio Rao {
193e5f94314SAttilio Rao 	GIANT_DECLARE;
194047dd67eSAttilio Rao 	struct lock_class *class;
195047dd67eSAttilio Rao 	int catch, error;
19653bf4bb2SPeter Wemm 
197047dd67eSAttilio Rao 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1985047a8fdSAttilio Rao 	catch = pri & PCATCH;
199047dd67eSAttilio Rao 	pri &= PRIMASK;
200047dd67eSAttilio Rao 	error = 0;
201047dd67eSAttilio Rao 
202047dd67eSAttilio Rao 	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
203047dd67eSAttilio Rao 	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
204047dd67eSAttilio Rao 
205047dd67eSAttilio Rao 	if (flags & LK_INTERLOCK)
206047dd67eSAttilio Rao 		class->lc_unlock(ilk);
2072028867dSAttilio Rao 	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
2082028867dSAttilio Rao 		lk->lk_exslpfail++;
209e5f94314SAttilio Rao 	GIANT_SAVE();
210047dd67eSAttilio Rao 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
211047dd67eSAttilio Rao 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
212047dd67eSAttilio Rao 	if ((flags & LK_TIMELOCK) && timo)
213047dd67eSAttilio Rao 		sleepq_set_timeout(&lk->lock_object, timo);
214047dd67eSAttilio Rao 
215047dd67eSAttilio Rao 	/*
216047dd67eSAttilio Rao 	 * Decisional switch for real sleeping.
217047dd67eSAttilio Rao 	 */
218047dd67eSAttilio Rao 	if ((flags & LK_TIMELOCK) && timo && catch)
219047dd67eSAttilio Rao 		error = sleepq_timedwait_sig(&lk->lock_object, pri);
220047dd67eSAttilio Rao 	else if ((flags & LK_TIMELOCK) && timo)
221047dd67eSAttilio Rao 		error = sleepq_timedwait(&lk->lock_object, pri);
222047dd67eSAttilio Rao 	else if (catch)
223047dd67eSAttilio Rao 		error = sleepq_wait_sig(&lk->lock_object, pri);
224047dd67eSAttilio Rao 	else
225047dd67eSAttilio Rao 		sleepq_wait(&lk->lock_object, pri);
226e5f94314SAttilio Rao 	GIANT_RESTORE();
227047dd67eSAttilio Rao 	if ((flags & LK_SLEEPFAIL) && error == 0)
228047dd67eSAttilio Rao 		error = ENOLCK;
229047dd67eSAttilio Rao 
230047dd67eSAttilio Rao 	return (error);
231047dd67eSAttilio Rao }
232047dd67eSAttilio Rao 
233da7bbd2cSJohn Baldwin static __inline int
234047dd67eSAttilio Rao wakeupshlk(struct lock *lk, const char *file, int line)
235047dd67eSAttilio Rao {
236047dd67eSAttilio Rao 	uintptr_t v, x;
2372028867dSAttilio Rao 	u_int realexslp;
238da7bbd2cSJohn Baldwin 	int queue, wakeup_swapper;
239047dd67eSAttilio Rao 
240047dd67eSAttilio Rao 	TD_LOCKS_DEC(curthread);
241047dd67eSAttilio Rao 	TD_SLOCKS_DEC(curthread);
242e5f94314SAttilio Rao 	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
243047dd67eSAttilio Rao 	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
244047dd67eSAttilio Rao 
245da7bbd2cSJohn Baldwin 	wakeup_swapper = 0;
246047dd67eSAttilio Rao 	for (;;) {
247047dd67eSAttilio Rao 		x = lk->lk_lock;
248047dd67eSAttilio Rao 
249047dd67eSAttilio Rao 		/*
250047dd67eSAttilio Rao 		 * If there is more than one shared lock held, just drop one
251047dd67eSAttilio Rao 		 * and return.
252047dd67eSAttilio Rao 		 */
253047dd67eSAttilio Rao 		if (LK_SHARERS(x) > 1) {
2547f9f80ceSAttilio Rao 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
255047dd67eSAttilio Rao 			    x - LK_ONE_SHARER))
256047dd67eSAttilio Rao 				break;
257047dd67eSAttilio Rao 			continue;
258047dd67eSAttilio Rao 		}
259047dd67eSAttilio Rao 
260047dd67eSAttilio Rao 		/*
261047dd67eSAttilio Rao 		 * If there are not waiters on the exclusive queue, drop the
262047dd67eSAttilio Rao 		 * lock quickly.
263047dd67eSAttilio Rao 		 */
264047dd67eSAttilio Rao 		if ((x & LK_ALL_WAITERS) == 0) {
265651175c9SAttilio Rao 			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
266651175c9SAttilio Rao 			    LK_SHARERS_LOCK(1));
2677f9f80ceSAttilio Rao 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
268047dd67eSAttilio Rao 				break;
269047dd67eSAttilio Rao 			continue;
270047dd67eSAttilio Rao 		}
271047dd67eSAttilio Rao 
272047dd67eSAttilio Rao 		/*
273047dd67eSAttilio Rao 		 * We should have a sharer with waiters, so enter the hard
274047dd67eSAttilio Rao 		 * path in order to handle wakeups correctly.
275047dd67eSAttilio Rao 		 */
276047dd67eSAttilio Rao 		sleepq_lock(&lk->lock_object);
277651175c9SAttilio Rao 		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
278047dd67eSAttilio Rao 		v = LK_UNLOCKED;
279047dd67eSAttilio Rao 
280047dd67eSAttilio Rao 		/*
281047dd67eSAttilio Rao 		 * If the lock has exclusive waiters, give them preference in
282047dd67eSAttilio Rao 		 * order to avoid deadlock with shared runners up.
2832028867dSAttilio Rao 		 * If interruptible sleeps left the exclusive queue empty
2842028867dSAttilio Rao 		 * avoid a starvation for the threads sleeping on the shared
2852028867dSAttilio Rao 		 * queue by giving them precedence and cleaning up the
2862028867dSAttilio Rao 		 * exclusive waiters bit anyway.
287c636ba83SAttilio Rao 		 * Please note that lk_exslpfail count may be lying about
288c636ba83SAttilio Rao 		 * the real number of waiters with the LK_SLEEPFAIL flag on
289c636ba83SAttilio Rao 		 * because they may be used in conjuction with interruptible
290aab9c8c2SAttilio Rao 		 * sleeps so lk_exslpfail might be considered an 'upper limit'
291aab9c8c2SAttilio Rao 		 * bound, including the edge cases.
292047dd67eSAttilio Rao 		 */
2932028867dSAttilio Rao 		realexslp = sleepq_sleepcnt(&lk->lock_object,
2942028867dSAttilio Rao 		    SQ_EXCLUSIVE_QUEUE);
2952028867dSAttilio Rao 		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
2962028867dSAttilio Rao 			if (lk->lk_exslpfail < realexslp) {
2972028867dSAttilio Rao 				lk->lk_exslpfail = 0;
298047dd67eSAttilio Rao 				queue = SQ_EXCLUSIVE_QUEUE;
299047dd67eSAttilio Rao 				v |= (x & LK_SHARED_WAITERS);
300047dd67eSAttilio Rao 			} else {
3012028867dSAttilio Rao 				lk->lk_exslpfail = 0;
3022028867dSAttilio Rao 				LOCK_LOG2(lk,
3032028867dSAttilio Rao 				    "%s: %p has only LK_SLEEPFAIL sleepers",
3042028867dSAttilio Rao 				    __func__, lk);
3052028867dSAttilio Rao 				LOCK_LOG2(lk,
3062028867dSAttilio Rao 			    "%s: %p waking up threads on the exclusive queue",
3072028867dSAttilio Rao 				    __func__, lk);
3082028867dSAttilio Rao 				wakeup_swapper =
3092028867dSAttilio Rao 				    sleepq_broadcast(&lk->lock_object,
3102028867dSAttilio Rao 				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
3112028867dSAttilio Rao 				queue = SQ_SHARED_QUEUE;
3122028867dSAttilio Rao 			}
3132028867dSAttilio Rao 
3142028867dSAttilio Rao 		} else {
3159dbf7a62SAttilio Rao 
3169dbf7a62SAttilio Rao 			/*
3179dbf7a62SAttilio Rao 			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
3189dbf7a62SAttilio Rao 			 * and using interruptible sleeps/timeout may have
3199dbf7a62SAttilio Rao 			 * left spourious lk_exslpfail counts on, so clean
3209dbf7a62SAttilio Rao 			 * it up anyway.
3219dbf7a62SAttilio Rao 			 */
3229dbf7a62SAttilio Rao 			lk->lk_exslpfail = 0;
323047dd67eSAttilio Rao 			queue = SQ_SHARED_QUEUE;
324047dd67eSAttilio Rao 		}
325047dd67eSAttilio Rao 
3267f9f80ceSAttilio Rao 		if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
327047dd67eSAttilio Rao 		    v)) {
328047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
329047dd67eSAttilio Rao 			continue;
330047dd67eSAttilio Rao 		}
331047dd67eSAttilio Rao 		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
332047dd67eSAttilio Rao 		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
333047dd67eSAttilio Rao 		    "exclusive");
3342028867dSAttilio Rao 		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
335da7bbd2cSJohn Baldwin 		    0, queue);
336047dd67eSAttilio Rao 		sleepq_release(&lk->lock_object);
337047dd67eSAttilio Rao 		break;
338047dd67eSAttilio Rao 	}
339047dd67eSAttilio Rao 
340047dd67eSAttilio Rao 	lock_profile_release_lock(&lk->lock_object);
341da7bbd2cSJohn Baldwin 	return (wakeup_swapper);
342047dd67eSAttilio Rao }
343047dd67eSAttilio Rao 
344047dd67eSAttilio Rao static void
345d576deedSPawel Jakub Dawidek assert_lockmgr(const struct lock_object *lock, int what)
346f9721b43SAttilio Rao {
347f9721b43SAttilio Rao 
348f9721b43SAttilio Rao 	panic("lockmgr locks do not support assertions");
349f9721b43SAttilio Rao }
350f9721b43SAttilio Rao 
351047dd67eSAttilio Rao static void
3526e21afd4SJohn Baldwin lock_lockmgr(struct lock_object *lock, int how)
3536e21afd4SJohn Baldwin {
3546e21afd4SJohn Baldwin 
3556e21afd4SJohn Baldwin 	panic("lockmgr locks do not support sleep interlocking");
3566e21afd4SJohn Baldwin }
3576e21afd4SJohn Baldwin 
358047dd67eSAttilio Rao static int
3596e21afd4SJohn Baldwin unlock_lockmgr(struct lock_object *lock)
3606e21afd4SJohn Baldwin {
3616e21afd4SJohn Baldwin 
3626e21afd4SJohn Baldwin 	panic("lockmgr locks do not support sleep interlocking");
3636e21afd4SJohn Baldwin }
3646e21afd4SJohn Baldwin 
365a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
366a5aedd68SStacey Son static int
367d576deedSPawel Jakub Dawidek owner_lockmgr(const struct lock_object *lock, struct thread **owner)
368a5aedd68SStacey Son {
369a5aedd68SStacey Son 
370a5aedd68SStacey Son 	panic("lockmgr locks do not support owner inquiring");
371a5aedd68SStacey Son }
372a5aedd68SStacey Son #endif
373a5aedd68SStacey Son 
37499448ed1SJohn Dyson void
375047dd67eSAttilio Rao lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
37699448ed1SJohn Dyson {
3776efc8a16SAttilio Rao 	int iflags;
3786efc8a16SAttilio Rao 
379047dd67eSAttilio Rao 	MPASS((flags & ~LK_INIT_MASK) == 0);
380353998acSAttilio Rao 	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
381353998acSAttilio Rao             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
382353998acSAttilio Rao             &lk->lk_lock));
38399448ed1SJohn Dyson 
384f0830182SAttilio Rao 	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
385f0830182SAttilio Rao 	if (flags & LK_CANRECURSE)
386f0830182SAttilio Rao 		iflags |= LO_RECURSABLE;
387047dd67eSAttilio Rao 	if ((flags & LK_NODUP) == 0)
3886efc8a16SAttilio Rao 		iflags |= LO_DUPOK;
3897fbfba7bSAttilio Rao 	if (flags & LK_NOPROFILE)
3907fbfba7bSAttilio Rao 		iflags |= LO_NOPROFILE;
391047dd67eSAttilio Rao 	if ((flags & LK_NOWITNESS) == 0)
3926efc8a16SAttilio Rao 		iflags |= LO_WITNESS;
3937fbfba7bSAttilio Rao 	if (flags & LK_QUIET)
3947fbfba7bSAttilio Rao 		iflags |= LO_QUIET;
395651175c9SAttilio Rao 	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
396047dd67eSAttilio Rao 
397047dd67eSAttilio Rao 	lk->lk_lock = LK_UNLOCKED;
398047dd67eSAttilio Rao 	lk->lk_recurse = 0;
3992028867dSAttilio Rao 	lk->lk_exslpfail = 0;
400047dd67eSAttilio Rao 	lk->lk_timo = timo;
401047dd67eSAttilio Rao 	lk->lk_pri = pri;
402047dd67eSAttilio Rao 	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
403047dd67eSAttilio Rao 	STACK_ZERO(lk);
40499448ed1SJohn Dyson }
40599448ed1SJohn Dyson 
4063634d5b2SJohn Baldwin /*
4073634d5b2SJohn Baldwin  * XXX: Gross hacks to manipulate external lock flags after
4083634d5b2SJohn Baldwin  * initialization.  Used for certain vnode and buf locks.
4093634d5b2SJohn Baldwin  */
4103634d5b2SJohn Baldwin void
4113634d5b2SJohn Baldwin lockallowshare(struct lock *lk)
4123634d5b2SJohn Baldwin {
4133634d5b2SJohn Baldwin 
4143634d5b2SJohn Baldwin 	lockmgr_assert(lk, KA_XLOCKED);
4153634d5b2SJohn Baldwin 	lk->lock_object.lo_flags &= ~LK_NOSHARE;
4163634d5b2SJohn Baldwin }
4173634d5b2SJohn Baldwin 
4183634d5b2SJohn Baldwin void
4193634d5b2SJohn Baldwin lockallowrecurse(struct lock *lk)
4203634d5b2SJohn Baldwin {
4213634d5b2SJohn Baldwin 
4223634d5b2SJohn Baldwin 	lockmgr_assert(lk, KA_XLOCKED);
4233634d5b2SJohn Baldwin 	lk->lock_object.lo_flags |= LO_RECURSABLE;
4243634d5b2SJohn Baldwin }
4253634d5b2SJohn Baldwin 
4263634d5b2SJohn Baldwin void
4273634d5b2SJohn Baldwin lockdisablerecurse(struct lock *lk)
4283634d5b2SJohn Baldwin {
4293634d5b2SJohn Baldwin 
4303634d5b2SJohn Baldwin 	lockmgr_assert(lk, KA_XLOCKED);
4313634d5b2SJohn Baldwin 	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
4323634d5b2SJohn Baldwin }
4333634d5b2SJohn Baldwin 
434a18b1f1dSJason Evans void
435047dd67eSAttilio Rao lockdestroy(struct lock *lk)
436a18b1f1dSJason Evans {
437c91fcee7SJohn Baldwin 
438047dd67eSAttilio Rao 	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
439047dd67eSAttilio Rao 	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
4402028867dSAttilio Rao 	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
441047dd67eSAttilio Rao 	lock_destroy(&lk->lock_object);
442047dd67eSAttilio Rao }
443047dd67eSAttilio Rao 
444047dd67eSAttilio Rao int
445047dd67eSAttilio Rao __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
446047dd67eSAttilio Rao     const char *wmesg, int pri, int timo, const char *file, int line)
447047dd67eSAttilio Rao {
448e5f94314SAttilio Rao 	GIANT_DECLARE;
449047dd67eSAttilio Rao 	struct lock_class *class;
450047dd67eSAttilio Rao 	const char *iwmesg;
451047dd67eSAttilio Rao 	uintptr_t tid, v, x;
4522028867dSAttilio Rao 	u_int op, realexslp;
4531723a064SJeff Roberson 	int error, ipri, itimo, queue, wakeup_swapper;
4541723a064SJeff Roberson #ifdef LOCK_PROFILING
4551723a064SJeff Roberson 	uint64_t waittime = 0;
4561723a064SJeff Roberson 	int contested = 0;
4571723a064SJeff Roberson #endif
458651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
459651175c9SAttilio Rao 	volatile struct thread *owner;
460651175c9SAttilio Rao 	u_int i, spintries = 0;
461651175c9SAttilio Rao #endif
462047dd67eSAttilio Rao 
463047dd67eSAttilio Rao 	error = 0;
464047dd67eSAttilio Rao 	tid = (uintptr_t)curthread;
465047dd67eSAttilio Rao 	op = (flags & LK_TYPE_MASK);
466047dd67eSAttilio Rao 	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
467047dd67eSAttilio Rao 	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
468047dd67eSAttilio Rao 	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
469047dd67eSAttilio Rao 
470047dd67eSAttilio Rao 	MPASS((flags & ~LK_TOTAL_MASK) == 0);
471872b7289SAttilio Rao 	KASSERT((op & (op - 1)) == 0,
472872b7289SAttilio Rao 	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
473047dd67eSAttilio Rao 	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
474047dd67eSAttilio Rao 	    (op != LK_DOWNGRADE && op != LK_RELEASE),
475047dd67eSAttilio Rao 	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
476047dd67eSAttilio Rao 	    __func__, file, line));
477047dd67eSAttilio Rao 	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
478047dd67eSAttilio Rao 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
479047dd67eSAttilio Rao 	    __func__, file, line));
480047dd67eSAttilio Rao 
481047dd67eSAttilio Rao 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
482047dd67eSAttilio Rao 	if (panicstr != NULL) {
483047dd67eSAttilio Rao 		if (flags & LK_INTERLOCK)
484047dd67eSAttilio Rao 			class->lc_unlock(ilk);
485047dd67eSAttilio Rao 		return (0);
486047dd67eSAttilio Rao 	}
487047dd67eSAttilio Rao 
488d0a724c5SKonstantin Belousov 	if (lk->lock_object.lo_flags & LK_NOSHARE) {
489d0a724c5SKonstantin Belousov 		switch (op) {
490d0a724c5SKonstantin Belousov 		case LK_SHARED:
491047dd67eSAttilio Rao 			op = LK_EXCLUSIVE;
492d0a724c5SKonstantin Belousov 			break;
493d0a724c5SKonstantin Belousov 		case LK_UPGRADE:
494d0a724c5SKonstantin Belousov 		case LK_DOWNGRADE:
495d0a724c5SKonstantin Belousov 			_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
496d0a724c5SKonstantin Belousov 			    file, line);
497d0a724c5SKonstantin Belousov 			return (0);
498d0a724c5SKonstantin Belousov 		}
499d0a724c5SKonstantin Belousov 	}
500047dd67eSAttilio Rao 
501da7bbd2cSJohn Baldwin 	wakeup_swapper = 0;
502047dd67eSAttilio Rao 	switch (op) {
503047dd67eSAttilio Rao 	case LK_SHARED:
504e5f94314SAttilio Rao 		if (LK_CAN_WITNESS(flags))
505e5f94314SAttilio Rao 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
50641313430SJohn Baldwin 			    file, line, ilk);
507047dd67eSAttilio Rao 		for (;;) {
508047dd67eSAttilio Rao 			x = lk->lk_lock;
509047dd67eSAttilio Rao 
510047dd67eSAttilio Rao 			/*
511047dd67eSAttilio Rao 			 * If no other thread has an exclusive lock, or
512047dd67eSAttilio Rao 			 * no exclusive waiter is present, bump the count of
513047dd67eSAttilio Rao 			 * sharers.  Since we have to preserve the state of
514047dd67eSAttilio Rao 			 * waiters, if we fail to acquire the shared lock
515047dd67eSAttilio Rao 			 * loop back and retry.
516047dd67eSAttilio Rao 			 */
517047dd67eSAttilio Rao 			if (LK_CAN_SHARE(x)) {
518047dd67eSAttilio Rao 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
519047dd67eSAttilio Rao 				    x + LK_ONE_SHARER))
520047dd67eSAttilio Rao 					break;
521047dd67eSAttilio Rao 				continue;
522047dd67eSAttilio Rao 			}
523*f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
524*f5f9340bSFabien Thomas 			PMC_SOFT_CALL( , , lock, failed);
525*f5f9340bSFabien Thomas #endif
526047dd67eSAttilio Rao 			lock_profile_obtain_lock_failed(&lk->lock_object,
527047dd67eSAttilio Rao 			    &contested, &waittime);
528047dd67eSAttilio Rao 
529047dd67eSAttilio Rao 			/*
53096f1567fSKonstantin Belousov 			 * If the lock is already held by curthread in
531047dd67eSAttilio Rao 			 * exclusive way avoid a deadlock.
532047dd67eSAttilio Rao 			 */
533047dd67eSAttilio Rao 			if (LK_HOLDER(x) == tid) {
534047dd67eSAttilio Rao 				LOCK_LOG2(lk,
53596f1567fSKonstantin Belousov 				    "%s: %p already held in exclusive mode",
536047dd67eSAttilio Rao 				    __func__, lk);
537047dd67eSAttilio Rao 				error = EDEADLK;
538047dd67eSAttilio Rao 				break;
539a18b1f1dSJason Evans 			}
540a18b1f1dSJason Evans 
541a18b1f1dSJason Evans 			/*
542047dd67eSAttilio Rao 			 * If the lock is expected to not sleep just give up
543047dd67eSAttilio Rao 			 * and return.
544d7a7e179SAttilio Rao 			 */
545047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
546047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p fails the try operation",
547047dd67eSAttilio Rao 				    __func__, lk);
548047dd67eSAttilio Rao 				error = EBUSY;
549047dd67eSAttilio Rao 				break;
550047dd67eSAttilio Rao 			}
551047dd67eSAttilio Rao 
552651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
553651175c9SAttilio Rao 			/*
554651175c9SAttilio Rao 			 * If the owner is running on another CPU, spin until
555651175c9SAttilio Rao 			 * the owner stops running or the state of the lock
5568d3635c4SAttilio Rao 			 * changes.  We need a double-state handle here
5578d3635c4SAttilio Rao 			 * because for a failed acquisition the lock can be
5588d3635c4SAttilio Rao 			 * either held in exclusive mode or shared mode
5598d3635c4SAttilio Rao 			 * (for the writer starvation avoidance technique).
560651175c9SAttilio Rao 			 */
561651175c9SAttilio Rao 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
562651175c9SAttilio Rao 			    LK_HOLDER(x) != LK_KERNPROC) {
563651175c9SAttilio Rao 				owner = (struct thread *)LK_HOLDER(x);
564651175c9SAttilio Rao 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
565651175c9SAttilio Rao 					CTR3(KTR_LOCK,
566651175c9SAttilio Rao 					    "%s: spinning on %p held by %p",
567651175c9SAttilio Rao 					    __func__, lk, owner);
568651175c9SAttilio Rao 
569651175c9SAttilio Rao 				/*
570651175c9SAttilio Rao 				 * If we are holding also an interlock drop it
571651175c9SAttilio Rao 				 * in order to avoid a deadlock if the lockmgr
572651175c9SAttilio Rao 				 * owner is adaptively spinning on the
573651175c9SAttilio Rao 				 * interlock itself.
574651175c9SAttilio Rao 				 */
575651175c9SAttilio Rao 				if (flags & LK_INTERLOCK) {
576651175c9SAttilio Rao 					class->lc_unlock(ilk);
577651175c9SAttilio Rao 					flags &= ~LK_INTERLOCK;
578651175c9SAttilio Rao 				}
579651175c9SAttilio Rao 				GIANT_SAVE();
580651175c9SAttilio Rao 				while (LK_HOLDER(lk->lk_lock) ==
581651175c9SAttilio Rao 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
582651175c9SAttilio Rao 					cpu_spinwait();
5838d3635c4SAttilio Rao 				GIANT_RESTORE();
5848d3635c4SAttilio Rao 				continue;
585651175c9SAttilio Rao 			} else if (LK_CAN_ADAPT(lk, flags) &&
586651175c9SAttilio Rao 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
587651175c9SAttilio Rao 			    spintries < alk_retries) {
588651175c9SAttilio Rao 				if (flags & LK_INTERLOCK) {
589651175c9SAttilio Rao 					class->lc_unlock(ilk);
590651175c9SAttilio Rao 					flags &= ~LK_INTERLOCK;
591651175c9SAttilio Rao 				}
592651175c9SAttilio Rao 				GIANT_SAVE();
593651175c9SAttilio Rao 				spintries++;
594651175c9SAttilio Rao 				for (i = 0; i < alk_loops; i++) {
595651175c9SAttilio Rao 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
596651175c9SAttilio Rao 						CTR4(KTR_LOCK,
597651175c9SAttilio Rao 				    "%s: shared spinning on %p with %u and %u",
598651175c9SAttilio Rao 						    __func__, lk, spintries, i);
599651175c9SAttilio Rao 					x = lk->lk_lock;
600651175c9SAttilio Rao 					if ((x & LK_SHARE) == 0 ||
601651175c9SAttilio Rao 					    LK_CAN_SHARE(x) != 0)
602651175c9SAttilio Rao 						break;
603651175c9SAttilio Rao 					cpu_spinwait();
604651175c9SAttilio Rao 				}
6058d3635c4SAttilio Rao 				GIANT_RESTORE();
606651175c9SAttilio Rao 				if (i != alk_loops)
607651175c9SAttilio Rao 					continue;
608651175c9SAttilio Rao 			}
609651175c9SAttilio Rao #endif
610651175c9SAttilio Rao 
611047dd67eSAttilio Rao 			/*
612047dd67eSAttilio Rao 			 * Acquire the sleepqueue chain lock because we
613047dd67eSAttilio Rao 			 * probabilly will need to manipulate waiters flags.
614047dd67eSAttilio Rao 			 */
615047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
616047dd67eSAttilio Rao 			x = lk->lk_lock;
617047dd67eSAttilio Rao 
618047dd67eSAttilio Rao 			/*
619047dd67eSAttilio Rao 			 * if the lock can be acquired in shared mode, try
620047dd67eSAttilio Rao 			 * again.
621047dd67eSAttilio Rao 			 */
622047dd67eSAttilio Rao 			if (LK_CAN_SHARE(x)) {
623047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
624047dd67eSAttilio Rao 				continue;
625047dd67eSAttilio Rao 			}
626047dd67eSAttilio Rao 
627651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
628651175c9SAttilio Rao 			/*
629651175c9SAttilio Rao 			 * The current lock owner might have started executing
630651175c9SAttilio Rao 			 * on another CPU (or the lock could have changed
631651175c9SAttilio Rao 			 * owner) while we were waiting on the turnstile
632651175c9SAttilio Rao 			 * chain lock.  If so, drop the turnstile lock and try
633651175c9SAttilio Rao 			 * again.
634651175c9SAttilio Rao 			 */
635651175c9SAttilio Rao 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
636651175c9SAttilio Rao 			    LK_HOLDER(x) != LK_KERNPROC) {
637651175c9SAttilio Rao 				owner = (struct thread *)LK_HOLDER(x);
638651175c9SAttilio Rao 				if (TD_IS_RUNNING(owner)) {
639651175c9SAttilio Rao 					sleepq_release(&lk->lock_object);
640651175c9SAttilio Rao 					continue;
641651175c9SAttilio Rao 				}
642651175c9SAttilio Rao 			}
643651175c9SAttilio Rao #endif
644651175c9SAttilio Rao 
645047dd67eSAttilio Rao 			/*
646047dd67eSAttilio Rao 			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
647047dd67eSAttilio Rao 			 * loop back and retry.
648047dd67eSAttilio Rao 			 */
649047dd67eSAttilio Rao 			if ((x & LK_SHARED_WAITERS) == 0) {
650047dd67eSAttilio Rao 				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
651047dd67eSAttilio Rao 				    x | LK_SHARED_WAITERS)) {
652047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
653047dd67eSAttilio Rao 					continue;
654047dd67eSAttilio Rao 				}
655047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
656047dd67eSAttilio Rao 				    __func__, lk);
657047dd67eSAttilio Rao 			}
658047dd67eSAttilio Rao 
659047dd67eSAttilio Rao 			/*
660047dd67eSAttilio Rao 			 * As far as we have been unable to acquire the
661047dd67eSAttilio Rao 			 * shared lock and the shared waiters flag is set,
662047dd67eSAttilio Rao 			 * we will sleep.
663047dd67eSAttilio Rao 			 */
664047dd67eSAttilio Rao 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
665047dd67eSAttilio Rao 			    SQ_SHARED_QUEUE);
666047dd67eSAttilio Rao 			flags &= ~LK_INTERLOCK;
667047dd67eSAttilio Rao 			if (error) {
668047dd67eSAttilio Rao 				LOCK_LOG3(lk,
669047dd67eSAttilio Rao 				    "%s: interrupted sleep for %p with %d",
670047dd67eSAttilio Rao 				    __func__, lk, error);
671047dd67eSAttilio Rao 				break;
672047dd67eSAttilio Rao 			}
673047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
674047dd67eSAttilio Rao 			    __func__, lk);
675047dd67eSAttilio Rao 		}
676047dd67eSAttilio Rao 		if (error == 0) {
677047dd67eSAttilio Rao 			lock_profile_obtain_lock_success(&lk->lock_object,
678047dd67eSAttilio Rao 			    contested, waittime, file, line);
679047dd67eSAttilio Rao 			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
680047dd67eSAttilio Rao 			    line);
681e5f94314SAttilio Rao 			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
682e5f94314SAttilio Rao 			    line);
683047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
684047dd67eSAttilio Rao 			TD_SLOCKS_INC(curthread);
685047dd67eSAttilio Rao 			STACK_SAVE(lk);
686047dd67eSAttilio Rao 		}
687047dd67eSAttilio Rao 		break;
688047dd67eSAttilio Rao 	case LK_UPGRADE:
689047dd67eSAttilio Rao 		_lockmgr_assert(lk, KA_SLOCKED, file, line);
690651175c9SAttilio Rao 		v = lk->lk_lock;
691651175c9SAttilio Rao 		x = v & LK_ALL_WAITERS;
692651175c9SAttilio Rao 		v &= LK_EXCLUSIVE_SPINNERS;
693047dd67eSAttilio Rao 
694047dd67eSAttilio Rao 		/*
695047dd67eSAttilio Rao 		 * Try to switch from one shared lock to an exclusive one.
696047dd67eSAttilio Rao 		 * We need to preserve waiters flags during the operation.
697047dd67eSAttilio Rao 		 */
698651175c9SAttilio Rao 		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
699047dd67eSAttilio Rao 		    tid | x)) {
700047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
701047dd67eSAttilio Rao 			    line);
702e5f94314SAttilio Rao 			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
703e5f94314SAttilio Rao 			    LK_TRYWIT(flags), file, line);
704047dd67eSAttilio Rao 			TD_SLOCKS_DEC(curthread);
705047dd67eSAttilio Rao 			break;
706047dd67eSAttilio Rao 		}
707047dd67eSAttilio Rao 
708047dd67eSAttilio Rao 		/*
709047dd67eSAttilio Rao 		 * We have been unable to succeed in upgrading, so just
710047dd67eSAttilio Rao 		 * give up the shared lock.
711047dd67eSAttilio Rao 		 */
712814f26daSJohn Baldwin 		wakeup_swapper |= wakeupshlk(lk, file, line);
713047dd67eSAttilio Rao 
714047dd67eSAttilio Rao 		/* FALLTHROUGH */
715047dd67eSAttilio Rao 	case LK_EXCLUSIVE:
716e5f94314SAttilio Rao 		if (LK_CAN_WITNESS(flags))
717e5f94314SAttilio Rao 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
71841313430SJohn Baldwin 			    LOP_EXCLUSIVE, file, line, ilk);
719047dd67eSAttilio Rao 
720047dd67eSAttilio Rao 		/*
72196f1567fSKonstantin Belousov 		 * If curthread already holds the lock and this one is
722047dd67eSAttilio Rao 		 * allowed to recurse, simply recurse on it.
723047dd67eSAttilio Rao 		 */
724047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk)) {
725047dd67eSAttilio Rao 			if ((flags & LK_CANRECURSE) == 0 &&
726f0830182SAttilio Rao 			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
727047dd67eSAttilio Rao 
728047dd67eSAttilio Rao 				/*
729047dd67eSAttilio Rao 				 * If the lock is expected to not panic just
730047dd67eSAttilio Rao 				 * give up and return.
731047dd67eSAttilio Rao 				 */
732047dd67eSAttilio Rao 				if (LK_TRYOP(flags)) {
733047dd67eSAttilio Rao 					LOCK_LOG2(lk,
734047dd67eSAttilio Rao 					    "%s: %p fails the try operation",
735047dd67eSAttilio Rao 					    __func__, lk);
736047dd67eSAttilio Rao 					error = EBUSY;
737047dd67eSAttilio Rao 					break;
738047dd67eSAttilio Rao 				}
739047dd67eSAttilio Rao 				if (flags & LK_INTERLOCK)
740047dd67eSAttilio Rao 					class->lc_unlock(ilk);
741047dd67eSAttilio Rao 		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
742047dd67eSAttilio Rao 				    __func__, iwmesg, file, line);
743047dd67eSAttilio Rao 			}
744047dd67eSAttilio Rao 			lk->lk_recurse++;
745047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
746047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
747047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
748e5f94314SAttilio Rao 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
749e5f94314SAttilio Rao 			    LK_TRYWIT(flags), file, line);
750047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
751047dd67eSAttilio Rao 			break;
752047dd67eSAttilio Rao 		}
753047dd67eSAttilio Rao 
754047dd67eSAttilio Rao 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
755047dd67eSAttilio Rao 		    tid)) {
756*f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
757*f5f9340bSFabien Thomas 			PMC_SOFT_CALL( , , lock, failed);
758*f5f9340bSFabien Thomas #endif
759047dd67eSAttilio Rao 			lock_profile_obtain_lock_failed(&lk->lock_object,
760047dd67eSAttilio Rao 			    &contested, &waittime);
761047dd67eSAttilio Rao 
762047dd67eSAttilio Rao 			/*
763047dd67eSAttilio Rao 			 * If the lock is expected to not sleep just give up
764047dd67eSAttilio Rao 			 * and return.
765047dd67eSAttilio Rao 			 */
766047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
767047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p fails the try operation",
768047dd67eSAttilio Rao 				    __func__, lk);
769047dd67eSAttilio Rao 				error = EBUSY;
770047dd67eSAttilio Rao 				break;
771047dd67eSAttilio Rao 			}
772047dd67eSAttilio Rao 
773651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
774651175c9SAttilio Rao 			/*
775651175c9SAttilio Rao 			 * If the owner is running on another CPU, spin until
776651175c9SAttilio Rao 			 * the owner stops running or the state of the lock
777651175c9SAttilio Rao 			 * changes.
778651175c9SAttilio Rao 			 */
779651175c9SAttilio Rao 			x = lk->lk_lock;
780651175c9SAttilio Rao 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
781651175c9SAttilio Rao 			    LK_HOLDER(x) != LK_KERNPROC) {
782651175c9SAttilio Rao 				owner = (struct thread *)LK_HOLDER(x);
783651175c9SAttilio Rao 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
784651175c9SAttilio Rao 					CTR3(KTR_LOCK,
785651175c9SAttilio Rao 					    "%s: spinning on %p held by %p",
786651175c9SAttilio Rao 					    __func__, lk, owner);
787651175c9SAttilio Rao 
788651175c9SAttilio Rao 				/*
789651175c9SAttilio Rao 				 * If we are holding also an interlock drop it
790651175c9SAttilio Rao 				 * in order to avoid a deadlock if the lockmgr
791651175c9SAttilio Rao 				 * owner is adaptively spinning on the
792651175c9SAttilio Rao 				 * interlock itself.
793651175c9SAttilio Rao 				 */
794651175c9SAttilio Rao 				if (flags & LK_INTERLOCK) {
795651175c9SAttilio Rao 					class->lc_unlock(ilk);
796651175c9SAttilio Rao 					flags &= ~LK_INTERLOCK;
797651175c9SAttilio Rao 				}
798651175c9SAttilio Rao 				GIANT_SAVE();
799651175c9SAttilio Rao 				while (LK_HOLDER(lk->lk_lock) ==
800651175c9SAttilio Rao 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
801651175c9SAttilio Rao 					cpu_spinwait();
8028d3635c4SAttilio Rao 				GIANT_RESTORE();
8038d3635c4SAttilio Rao 				continue;
804651175c9SAttilio Rao 			} else if (LK_CAN_ADAPT(lk, flags) &&
805651175c9SAttilio Rao 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
806651175c9SAttilio Rao 			    spintries < alk_retries) {
807651175c9SAttilio Rao 				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
808651175c9SAttilio Rao 				    !atomic_cmpset_ptr(&lk->lk_lock, x,
809651175c9SAttilio Rao 				    x | LK_EXCLUSIVE_SPINNERS))
810651175c9SAttilio Rao 					continue;
811651175c9SAttilio Rao 				if (flags & LK_INTERLOCK) {
812651175c9SAttilio Rao 					class->lc_unlock(ilk);
813651175c9SAttilio Rao 					flags &= ~LK_INTERLOCK;
814651175c9SAttilio Rao 				}
815651175c9SAttilio Rao 				GIANT_SAVE();
816651175c9SAttilio Rao 				spintries++;
817651175c9SAttilio Rao 				for (i = 0; i < alk_loops; i++) {
818651175c9SAttilio Rao 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
819651175c9SAttilio Rao 						CTR4(KTR_LOCK,
820651175c9SAttilio Rao 				    "%s: shared spinning on %p with %u and %u",
821651175c9SAttilio Rao 						    __func__, lk, spintries, i);
822651175c9SAttilio Rao 					if ((lk->lk_lock &
823651175c9SAttilio Rao 					    LK_EXCLUSIVE_SPINNERS) == 0)
824651175c9SAttilio Rao 						break;
825651175c9SAttilio Rao 					cpu_spinwait();
826651175c9SAttilio Rao 				}
8278d3635c4SAttilio Rao 				GIANT_RESTORE();
828651175c9SAttilio Rao 				if (i != alk_loops)
829651175c9SAttilio Rao 					continue;
830651175c9SAttilio Rao 			}
831651175c9SAttilio Rao #endif
832651175c9SAttilio Rao 
833047dd67eSAttilio Rao 			/*
834047dd67eSAttilio Rao 			 * Acquire the sleepqueue chain lock because we
835047dd67eSAttilio Rao 			 * probabilly will need to manipulate waiters flags.
836047dd67eSAttilio Rao 			 */
837047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
838047dd67eSAttilio Rao 			x = lk->lk_lock;
839047dd67eSAttilio Rao 
840047dd67eSAttilio Rao 			/*
841047dd67eSAttilio Rao 			 * if the lock has been released while we spun on
842047dd67eSAttilio Rao 			 * the sleepqueue chain lock just try again.
843047dd67eSAttilio Rao 			 */
844047dd67eSAttilio Rao 			if (x == LK_UNLOCKED) {
845047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
846047dd67eSAttilio Rao 				continue;
847047dd67eSAttilio Rao 			}
848047dd67eSAttilio Rao 
849651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
850651175c9SAttilio Rao 			/*
851651175c9SAttilio Rao 			 * The current lock owner might have started executing
852651175c9SAttilio Rao 			 * on another CPU (or the lock could have changed
853651175c9SAttilio Rao 			 * owner) while we were waiting on the turnstile
854651175c9SAttilio Rao 			 * chain lock.  If so, drop the turnstile lock and try
855651175c9SAttilio Rao 			 * again.
856651175c9SAttilio Rao 			 */
857651175c9SAttilio Rao 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
858651175c9SAttilio Rao 			    LK_HOLDER(x) != LK_KERNPROC) {
859651175c9SAttilio Rao 				owner = (struct thread *)LK_HOLDER(x);
860651175c9SAttilio Rao 				if (TD_IS_RUNNING(owner)) {
861651175c9SAttilio Rao 					sleepq_release(&lk->lock_object);
862651175c9SAttilio Rao 					continue;
863651175c9SAttilio Rao 				}
864651175c9SAttilio Rao 			}
865651175c9SAttilio Rao #endif
866651175c9SAttilio Rao 
867047dd67eSAttilio Rao 			/*
868047dd67eSAttilio Rao 			 * The lock can be in the state where there is a
869047dd67eSAttilio Rao 			 * pending queue of waiters, but still no owner.
870047dd67eSAttilio Rao 			 * This happens when the lock is contested and an
871047dd67eSAttilio Rao 			 * owner is going to claim the lock.
872047dd67eSAttilio Rao 			 * If curthread is the one successfully acquiring it
873047dd67eSAttilio Rao 			 * claim lock ownership and return, preserving waiters
874047dd67eSAttilio Rao 			 * flags.
875047dd67eSAttilio Rao 			 */
876651175c9SAttilio Rao 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
877651175c9SAttilio Rao 			if ((x & ~v) == LK_UNLOCKED) {
878651175c9SAttilio Rao 				v &= ~LK_EXCLUSIVE_SPINNERS;
879047dd67eSAttilio Rao 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
880047dd67eSAttilio Rao 				    tid | v)) {
881047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
882047dd67eSAttilio Rao 					LOCK_LOG2(lk,
883047dd67eSAttilio Rao 					    "%s: %p claimed by a new writer",
884047dd67eSAttilio Rao 					    __func__, lk);
885047dd67eSAttilio Rao 					break;
886047dd67eSAttilio Rao 				}
887047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
888047dd67eSAttilio Rao 				continue;
889047dd67eSAttilio Rao 			}
890047dd67eSAttilio Rao 
891047dd67eSAttilio Rao 			/*
892047dd67eSAttilio Rao 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
893047dd67eSAttilio Rao 			 * fail, loop back and retry.
894047dd67eSAttilio Rao 			 */
895047dd67eSAttilio Rao 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
896047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
897047dd67eSAttilio Rao 				    x | LK_EXCLUSIVE_WAITERS)) {
898047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
899047dd67eSAttilio Rao 					continue;
900047dd67eSAttilio Rao 				}
901047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
902047dd67eSAttilio Rao 				    __func__, lk);
903047dd67eSAttilio Rao 			}
904047dd67eSAttilio Rao 
905047dd67eSAttilio Rao 			/*
906047dd67eSAttilio Rao 			 * As far as we have been unable to acquire the
907047dd67eSAttilio Rao 			 * exclusive lock and the exclusive waiters flag
908047dd67eSAttilio Rao 			 * is set, we will sleep.
909047dd67eSAttilio Rao 			 */
910047dd67eSAttilio Rao 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
911047dd67eSAttilio Rao 			    SQ_EXCLUSIVE_QUEUE);
912047dd67eSAttilio Rao 			flags &= ~LK_INTERLOCK;
913047dd67eSAttilio Rao 			if (error) {
914047dd67eSAttilio Rao 				LOCK_LOG3(lk,
915047dd67eSAttilio Rao 				    "%s: interrupted sleep for %p with %d",
916047dd67eSAttilio Rao 				    __func__, lk, error);
917047dd67eSAttilio Rao 				break;
918047dd67eSAttilio Rao 			}
919047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
920047dd67eSAttilio Rao 			    __func__, lk);
921047dd67eSAttilio Rao 		}
922047dd67eSAttilio Rao 		if (error == 0) {
923047dd67eSAttilio Rao 			lock_profile_obtain_lock_success(&lk->lock_object,
924047dd67eSAttilio Rao 			    contested, waittime, file, line);
925047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
926047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
927e5f94314SAttilio Rao 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
928e5f94314SAttilio Rao 			    LK_TRYWIT(flags), file, line);
929047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
930047dd67eSAttilio Rao 			STACK_SAVE(lk);
931047dd67eSAttilio Rao 		}
932047dd67eSAttilio Rao 		break;
933047dd67eSAttilio Rao 	case LK_DOWNGRADE:
934047dd67eSAttilio Rao 		_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
935e5f94314SAttilio Rao 		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
936e5f94314SAttilio Rao 		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
937e5f94314SAttilio Rao 		TD_SLOCKS_INC(curthread);
938047dd67eSAttilio Rao 
939047dd67eSAttilio Rao 		/*
940047dd67eSAttilio Rao 		 * In order to preserve waiters flags, just spin.
941047dd67eSAttilio Rao 		 */
942047dd67eSAttilio Rao 		for (;;) {
943651175c9SAttilio Rao 			x = lk->lk_lock;
944651175c9SAttilio Rao 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
945651175c9SAttilio Rao 			x &= LK_ALL_WAITERS;
946047dd67eSAttilio Rao 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
947e5f94314SAttilio Rao 			    LK_SHARERS_LOCK(1) | x))
948047dd67eSAttilio Rao 				break;
949047dd67eSAttilio Rao 			cpu_spinwait();
950047dd67eSAttilio Rao 		}
951047dd67eSAttilio Rao 		break;
952047dd67eSAttilio Rao 	case LK_RELEASE:
953047dd67eSAttilio Rao 		_lockmgr_assert(lk, KA_LOCKED, file, line);
954047dd67eSAttilio Rao 		x = lk->lk_lock;
955047dd67eSAttilio Rao 
956047dd67eSAttilio Rao 		if ((x & LK_SHARE) == 0) {
957047dd67eSAttilio Rao 
958047dd67eSAttilio Rao 			/*
959047dd67eSAttilio Rao 			 * As first option, treact the lock as if it has not
960047dd67eSAttilio Rao 			 * any waiter.
961047dd67eSAttilio Rao 			 * Fix-up the tid var if the lock has been disowned.
962047dd67eSAttilio Rao 			 */
963047dd67eSAttilio Rao 			if (LK_HOLDER(x) == LK_KERNPROC)
964047dd67eSAttilio Rao 				tid = LK_KERNPROC;
965e5f94314SAttilio Rao 			else {
966e5f94314SAttilio Rao 				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
967e5f94314SAttilio Rao 				    file, line);
968047dd67eSAttilio Rao 				TD_LOCKS_DEC(curthread);
969e5f94314SAttilio Rao 			}
970047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
971047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
972047dd67eSAttilio Rao 
973047dd67eSAttilio Rao 			/*
974047dd67eSAttilio Rao 			 * The lock is held in exclusive mode.
975047dd67eSAttilio Rao 			 * If the lock is recursed also, then unrecurse it.
976047dd67eSAttilio Rao 			 */
977047dd67eSAttilio Rao 			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
978047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
979047dd67eSAttilio Rao 				    lk);
980047dd67eSAttilio Rao 				lk->lk_recurse--;
981047dd67eSAttilio Rao 				break;
982047dd67eSAttilio Rao 			}
98304a28689SJeff Roberson 			if (tid != LK_KERNPROC)
984047dd67eSAttilio Rao 				lock_profile_release_lock(&lk->lock_object);
985047dd67eSAttilio Rao 
986047dd67eSAttilio Rao 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
987047dd67eSAttilio Rao 			    LK_UNLOCKED))
988047dd67eSAttilio Rao 				break;
989047dd67eSAttilio Rao 
990047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
991651175c9SAttilio Rao 			x = lk->lk_lock;
992047dd67eSAttilio Rao 			v = LK_UNLOCKED;
993047dd67eSAttilio Rao 
994047dd67eSAttilio Rao 			/*
995047dd67eSAttilio Rao 		 	 * If the lock has exclusive waiters, give them
996047dd67eSAttilio Rao 			 * preference in order to avoid deadlock with
997047dd67eSAttilio Rao 			 * shared runners up.
9982028867dSAttilio Rao 			 * If interruptible sleeps left the exclusive queue
9992028867dSAttilio Rao 			 * empty avoid a starvation for the threads sleeping
10002028867dSAttilio Rao 			 * on the shared queue by giving them precedence
10012028867dSAttilio Rao 			 * and cleaning up the exclusive waiters bit anyway.
1002c636ba83SAttilio Rao 			 * Please note that lk_exslpfail count may be lying
1003c636ba83SAttilio Rao 			 * about the real number of waiters with the
1004c636ba83SAttilio Rao 			 * LK_SLEEPFAIL flag on because they may be used in
1005c636ba83SAttilio Rao 			 * conjuction with interruptible sleeps so
1006aab9c8c2SAttilio Rao 			 * lk_exslpfail might be considered an 'upper limit'
1007aab9c8c2SAttilio Rao 			 * bound, including the edge cases.
1008047dd67eSAttilio Rao 			 */
1009651175c9SAttilio Rao 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
10102028867dSAttilio Rao 			realexslp = sleepq_sleepcnt(&lk->lock_object,
10112028867dSAttilio Rao 			    SQ_EXCLUSIVE_QUEUE);
10122028867dSAttilio Rao 			if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
10132028867dSAttilio Rao 				if (lk->lk_exslpfail < realexslp) {
10142028867dSAttilio Rao 					lk->lk_exslpfail = 0;
1015047dd67eSAttilio Rao 					queue = SQ_EXCLUSIVE_QUEUE;
1016047dd67eSAttilio Rao 					v |= (x & LK_SHARED_WAITERS);
1017047dd67eSAttilio Rao 				} else {
10182028867dSAttilio Rao 					lk->lk_exslpfail = 0;
10192028867dSAttilio Rao 					LOCK_LOG2(lk,
10202028867dSAttilio Rao 					"%s: %p has only LK_SLEEPFAIL sleepers",
10212028867dSAttilio Rao 					    __func__, lk);
10222028867dSAttilio Rao 					LOCK_LOG2(lk,
10232028867dSAttilio Rao 			"%s: %p waking up threads on the exclusive queue",
10242028867dSAttilio Rao 					    __func__, lk);
10252028867dSAttilio Rao 					wakeup_swapper =
10262028867dSAttilio Rao 					    sleepq_broadcast(&lk->lock_object,
10272028867dSAttilio Rao 					    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
10282028867dSAttilio Rao 					queue = SQ_SHARED_QUEUE;
10292028867dSAttilio Rao 				}
10302028867dSAttilio Rao 			} else {
10319dbf7a62SAttilio Rao 
10329dbf7a62SAttilio Rao 				/*
10339dbf7a62SAttilio Rao 				 * Exclusive waiters sleeping with LK_SLEEPFAIL
10349dbf7a62SAttilio Rao 				 * on and using interruptible sleeps/timeout
10359dbf7a62SAttilio Rao 				 * may have left spourious lk_exslpfail counts
10369dbf7a62SAttilio Rao 				 * on, so clean it up anyway.
10379dbf7a62SAttilio Rao 				 */
10389dbf7a62SAttilio Rao 				lk->lk_exslpfail = 0;
1039047dd67eSAttilio Rao 				queue = SQ_SHARED_QUEUE;
1040047dd67eSAttilio Rao 			}
1041047dd67eSAttilio Rao 
1042047dd67eSAttilio Rao 			LOCK_LOG3(lk,
1043047dd67eSAttilio Rao 			    "%s: %p waking up threads on the %s queue",
1044047dd67eSAttilio Rao 			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1045047dd67eSAttilio Rao 			    "exclusive");
1046047dd67eSAttilio Rao 			atomic_store_rel_ptr(&lk->lk_lock, v);
10472028867dSAttilio Rao 			wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1048da7bbd2cSJohn Baldwin 			    SLEEPQ_LK, 0, queue);
1049047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
1050047dd67eSAttilio Rao 			break;
1051047dd67eSAttilio Rao 		} else
1052da7bbd2cSJohn Baldwin 			wakeup_swapper = wakeupshlk(lk, file, line);
1053047dd67eSAttilio Rao 		break;
1054047dd67eSAttilio Rao 	case LK_DRAIN:
1055e5f94314SAttilio Rao 		if (LK_CAN_WITNESS(flags))
1056e5f94314SAttilio Rao 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
105741313430SJohn Baldwin 			    LOP_EXCLUSIVE, file, line, ilk);
1058047dd67eSAttilio Rao 
1059047dd67eSAttilio Rao 		/*
106096f1567fSKonstantin Belousov 		 * Trying to drain a lock we already own will result in a
1061047dd67eSAttilio Rao 		 * deadlock.
1062047dd67eSAttilio Rao 		 */
1063047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk)) {
1064047dd67eSAttilio Rao 			if (flags & LK_INTERLOCK)
1065047dd67eSAttilio Rao 				class->lc_unlock(ilk);
1066047dd67eSAttilio Rao 			panic("%s: draining %s with the lock held @ %s:%d\n",
1067047dd67eSAttilio Rao 			    __func__, iwmesg, file, line);
1068047dd67eSAttilio Rao 		}
1069047dd67eSAttilio Rao 
1070047dd67eSAttilio Rao 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1071*f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
1072*f5f9340bSFabien Thomas 			PMC_SOFT_CALL( , , lock, failed);
1073*f5f9340bSFabien Thomas #endif
1074047dd67eSAttilio Rao 			lock_profile_obtain_lock_failed(&lk->lock_object,
1075047dd67eSAttilio Rao 			    &contested, &waittime);
1076047dd67eSAttilio Rao 
1077047dd67eSAttilio Rao 			/*
1078047dd67eSAttilio Rao 			 * If the lock is expected to not sleep just give up
1079047dd67eSAttilio Rao 			 * and return.
1080047dd67eSAttilio Rao 			 */
1081047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
1082047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p fails the try operation",
1083047dd67eSAttilio Rao 				    __func__, lk);
1084047dd67eSAttilio Rao 				error = EBUSY;
1085047dd67eSAttilio Rao 				break;
1086047dd67eSAttilio Rao 			}
1087047dd67eSAttilio Rao 
1088047dd67eSAttilio Rao 			/*
1089047dd67eSAttilio Rao 			 * Acquire the sleepqueue chain lock because we
1090047dd67eSAttilio Rao 			 * probabilly will need to manipulate waiters flags.
1091047dd67eSAttilio Rao 			 */
1092047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
1093047dd67eSAttilio Rao 			x = lk->lk_lock;
1094047dd67eSAttilio Rao 
1095047dd67eSAttilio Rao 			/*
1096047dd67eSAttilio Rao 			 * if the lock has been released while we spun on
1097047dd67eSAttilio Rao 			 * the sleepqueue chain lock just try again.
1098047dd67eSAttilio Rao 			 */
1099047dd67eSAttilio Rao 			if (x == LK_UNLOCKED) {
1100047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
1101047dd67eSAttilio Rao 				continue;
1102047dd67eSAttilio Rao 			}
1103047dd67eSAttilio Rao 
1104651175c9SAttilio Rao 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1105651175c9SAttilio Rao 			if ((x & ~v) == LK_UNLOCKED) {
1106651175c9SAttilio Rao 				v = (x & ~LK_EXCLUSIVE_SPINNERS);
11072028867dSAttilio Rao 
11082028867dSAttilio Rao 				/*
11092028867dSAttilio Rao 				 * If interruptible sleeps left the exclusive
11102028867dSAttilio Rao 				 * queue empty avoid a starvation for the
11112028867dSAttilio Rao 				 * threads sleeping on the shared queue by
11122028867dSAttilio Rao 				 * giving them precedence and cleaning up the
11132028867dSAttilio Rao 				 * exclusive waiters bit anyway.
1114c636ba83SAttilio Rao 				 * Please note that lk_exslpfail count may be
1115c636ba83SAttilio Rao 				 * lying about the real number of waiters with
1116c636ba83SAttilio Rao 				 * the LK_SLEEPFAIL flag on because they may
1117c636ba83SAttilio Rao 				 * be used in conjuction with interruptible
1118aab9c8c2SAttilio Rao 				 * sleeps so lk_exslpfail might be considered
1119aab9c8c2SAttilio Rao 				 * an 'upper limit' bound, including the edge
1120c636ba83SAttilio Rao 				 * cases.
11212028867dSAttilio Rao 				 */
1122047dd67eSAttilio Rao 				if (v & LK_EXCLUSIVE_WAITERS) {
1123047dd67eSAttilio Rao 					queue = SQ_EXCLUSIVE_QUEUE;
1124047dd67eSAttilio Rao 					v &= ~LK_EXCLUSIVE_WAITERS;
1125047dd67eSAttilio Rao 				} else {
11269dbf7a62SAttilio Rao 
11279dbf7a62SAttilio Rao 					/*
11289dbf7a62SAttilio Rao 					 * Exclusive waiters sleeping with
11299dbf7a62SAttilio Rao 					 * LK_SLEEPFAIL on and using
11309dbf7a62SAttilio Rao 					 * interruptible sleeps/timeout may
11319dbf7a62SAttilio Rao 					 * have left spourious lk_exslpfail
11329dbf7a62SAttilio Rao 					 * counts on, so clean it up anyway.
11339dbf7a62SAttilio Rao 					 */
1134047dd67eSAttilio Rao 					MPASS(v & LK_SHARED_WAITERS);
11359dbf7a62SAttilio Rao 					lk->lk_exslpfail = 0;
1136047dd67eSAttilio Rao 					queue = SQ_SHARED_QUEUE;
1137047dd67eSAttilio Rao 					v &= ~LK_SHARED_WAITERS;
1138047dd67eSAttilio Rao 				}
11392028867dSAttilio Rao 				if (queue == SQ_EXCLUSIVE_QUEUE) {
11402028867dSAttilio Rao 					realexslp =
11412028867dSAttilio Rao 					    sleepq_sleepcnt(&lk->lock_object,
11422028867dSAttilio Rao 					    SQ_EXCLUSIVE_QUEUE);
11432028867dSAttilio Rao 					if (lk->lk_exslpfail >= realexslp) {
11442028867dSAttilio Rao 						lk->lk_exslpfail = 0;
11452028867dSAttilio Rao 						queue = SQ_SHARED_QUEUE;
11462028867dSAttilio Rao 						v &= ~LK_SHARED_WAITERS;
11472028867dSAttilio Rao 						if (realexslp != 0) {
11482028867dSAttilio Rao 							LOCK_LOG2(lk,
11492028867dSAttilio Rao 					"%s: %p has only LK_SLEEPFAIL sleepers",
11502028867dSAttilio Rao 							    __func__, lk);
11512028867dSAttilio Rao 							LOCK_LOG2(lk,
11522028867dSAttilio Rao 			"%s: %p waking up threads on the exclusive queue",
11532028867dSAttilio Rao 							    __func__, lk);
11542028867dSAttilio Rao 							wakeup_swapper =
11552028867dSAttilio Rao 							    sleepq_broadcast(
11562028867dSAttilio Rao 							    &lk->lock_object,
11572028867dSAttilio Rao 							    SLEEPQ_LK, 0,
11582028867dSAttilio Rao 							    SQ_EXCLUSIVE_QUEUE);
11592028867dSAttilio Rao 						}
11602028867dSAttilio Rao 					} else
11612028867dSAttilio Rao 						lk->lk_exslpfail = 0;
11622028867dSAttilio Rao 				}
1163047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1164047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
1165047dd67eSAttilio Rao 					continue;
1166047dd67eSAttilio Rao 				}
1167047dd67eSAttilio Rao 				LOCK_LOG3(lk,
1168047dd67eSAttilio Rao 				"%s: %p waking up all threads on the %s queue",
1169047dd67eSAttilio Rao 				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1170047dd67eSAttilio Rao 				    "shared" : "exclusive");
1171814f26daSJohn Baldwin 				wakeup_swapper |= sleepq_broadcast(
1172da7bbd2cSJohn Baldwin 				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1173047dd67eSAttilio Rao 
1174047dd67eSAttilio Rao 				/*
1175047dd67eSAttilio Rao 				 * If shared waiters have been woken up we need
1176047dd67eSAttilio Rao 				 * to wait for one of them to acquire the lock
1177047dd67eSAttilio Rao 				 * before to set the exclusive waiters in
1178047dd67eSAttilio Rao 				 * order to avoid a deadlock.
1179047dd67eSAttilio Rao 				 */
1180047dd67eSAttilio Rao 				if (queue == SQ_SHARED_QUEUE) {
1181047dd67eSAttilio Rao 					for (v = lk->lk_lock;
1182047dd67eSAttilio Rao 					    (v & LK_SHARE) && !LK_SHARERS(v);
1183047dd67eSAttilio Rao 					    v = lk->lk_lock)
1184047dd67eSAttilio Rao 						cpu_spinwait();
1185047dd67eSAttilio Rao 				}
1186047dd67eSAttilio Rao 			}
1187047dd67eSAttilio Rao 
1188047dd67eSAttilio Rao 			/*
1189047dd67eSAttilio Rao 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1190047dd67eSAttilio Rao 			 * fail, loop back and retry.
1191047dd67eSAttilio Rao 			 */
1192047dd67eSAttilio Rao 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1193047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1194047dd67eSAttilio Rao 				    x | LK_EXCLUSIVE_WAITERS)) {
1195047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
1196047dd67eSAttilio Rao 					continue;
1197047dd67eSAttilio Rao 				}
1198047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1199047dd67eSAttilio Rao 				    __func__, lk);
1200047dd67eSAttilio Rao 			}
1201047dd67eSAttilio Rao 
1202047dd67eSAttilio Rao 			/*
1203047dd67eSAttilio Rao 			 * As far as we have been unable to acquire the
1204047dd67eSAttilio Rao 			 * exclusive lock and the exclusive waiters flag
1205047dd67eSAttilio Rao 			 * is set, we will sleep.
1206047dd67eSAttilio Rao 			 */
1207047dd67eSAttilio Rao 			if (flags & LK_INTERLOCK) {
1208047dd67eSAttilio Rao 				class->lc_unlock(ilk);
1209047dd67eSAttilio Rao 				flags &= ~LK_INTERLOCK;
1210047dd67eSAttilio Rao 			}
1211e5f94314SAttilio Rao 			GIANT_SAVE();
1212047dd67eSAttilio Rao 			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1213047dd67eSAttilio Rao 			    SQ_EXCLUSIVE_QUEUE);
1214047dd67eSAttilio Rao 			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1215e5f94314SAttilio Rao 			GIANT_RESTORE();
1216047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1217047dd67eSAttilio Rao 			    __func__, lk);
1218047dd67eSAttilio Rao 		}
1219047dd67eSAttilio Rao 
1220047dd67eSAttilio Rao 		if (error == 0) {
1221047dd67eSAttilio Rao 			lock_profile_obtain_lock_success(&lk->lock_object,
1222047dd67eSAttilio Rao 			    contested, waittime, file, line);
1223047dd67eSAttilio Rao 			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1224047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
1225e5f94314SAttilio Rao 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1226e5f94314SAttilio Rao 			    LK_TRYWIT(flags), file, line);
1227047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
1228047dd67eSAttilio Rao 			STACK_SAVE(lk);
1229047dd67eSAttilio Rao 		}
1230047dd67eSAttilio Rao 		break;
1231047dd67eSAttilio Rao 	default:
1232047dd67eSAttilio Rao 		if (flags & LK_INTERLOCK)
1233047dd67eSAttilio Rao 			class->lc_unlock(ilk);
1234047dd67eSAttilio Rao 		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1235047dd67eSAttilio Rao 	}
1236047dd67eSAttilio Rao 
1237047dd67eSAttilio Rao 	if (flags & LK_INTERLOCK)
1238047dd67eSAttilio Rao 		class->lc_unlock(ilk);
1239da7bbd2cSJohn Baldwin 	if (wakeup_swapper)
1240da7bbd2cSJohn Baldwin 		kick_proc0();
1241047dd67eSAttilio Rao 
1242047dd67eSAttilio Rao 	return (error);
1243047dd67eSAttilio Rao }
1244047dd67eSAttilio Rao 
1245d7a7e179SAttilio Rao void
1246047dd67eSAttilio Rao _lockmgr_disown(struct lock *lk, const char *file, int line)
1247047dd67eSAttilio Rao {
1248047dd67eSAttilio Rao 	uintptr_t tid, x;
1249047dd67eSAttilio Rao 
125035370593SAndriy Gapon 	if (SCHEDULER_STOPPED())
125135370593SAndriy Gapon 		return;
125235370593SAndriy Gapon 
1253047dd67eSAttilio Rao 	tid = (uintptr_t)curthread;
1254047dd67eSAttilio Rao 	_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
1255047dd67eSAttilio Rao 
1256047dd67eSAttilio Rao 	/*
125796f1567fSKonstantin Belousov 	 * If the owner is already LK_KERNPROC just skip the whole operation.
1258047dd67eSAttilio Rao 	 */
1259047dd67eSAttilio Rao 	if (LK_HOLDER(lk->lk_lock) != tid)
1260047dd67eSAttilio Rao 		return;
126104a28689SJeff Roberson 	lock_profile_release_lock(&lk->lock_object);
1262e5f94314SAttilio Rao 	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1263e5f94314SAttilio Rao 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1264e5f94314SAttilio Rao 	TD_LOCKS_DEC(curthread);
1265337c5ff4SAttilio Rao 	STACK_SAVE(lk);
1266047dd67eSAttilio Rao 
1267047dd67eSAttilio Rao 	/*
1268047dd67eSAttilio Rao 	 * In order to preserve waiters flags, just spin.
1269047dd67eSAttilio Rao 	 */
1270047dd67eSAttilio Rao 	for (;;) {
1271651175c9SAttilio Rao 		x = lk->lk_lock;
1272651175c9SAttilio Rao 		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1273651175c9SAttilio Rao 		x &= LK_ALL_WAITERS;
127422dd228dSAttilio Rao 		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1275e5f94314SAttilio Rao 		    LK_KERNPROC | x))
1276047dd67eSAttilio Rao 			return;
1277047dd67eSAttilio Rao 		cpu_spinwait();
1278047dd67eSAttilio Rao 	}
1279047dd67eSAttilio Rao }
1280047dd67eSAttilio Rao 
1281047dd67eSAttilio Rao void
1282d576deedSPawel Jakub Dawidek lockmgr_printinfo(const struct lock *lk)
1283d7a7e179SAttilio Rao {
1284d7a7e179SAttilio Rao 	struct thread *td;
1285047dd67eSAttilio Rao 	uintptr_t x;
1286d7a7e179SAttilio Rao 
1287047dd67eSAttilio Rao 	if (lk->lk_lock == LK_UNLOCKED)
1288047dd67eSAttilio Rao 		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1289047dd67eSAttilio Rao 	else if (lk->lk_lock & LK_SHARE)
1290047dd67eSAttilio Rao 		printf("lock type %s: SHARED (count %ju)\n",
1291047dd67eSAttilio Rao 		    lk->lock_object.lo_name,
1292047dd67eSAttilio Rao 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1293047dd67eSAttilio Rao 	else {
1294047dd67eSAttilio Rao 		td = lockmgr_xholder(lk);
12952573ea5fSIvan Voras 		printf("lock type %s: EXCL by thread %p "
12962573ea5fSIvan Voras 		    "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, td,
12972573ea5fSIvan Voras 		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_tid);
1298d7a7e179SAttilio Rao 	}
1299d7a7e179SAttilio Rao 
1300047dd67eSAttilio Rao 	x = lk->lk_lock;
1301047dd67eSAttilio Rao 	if (x & LK_EXCLUSIVE_WAITERS)
1302047dd67eSAttilio Rao 		printf(" with exclusive waiters pending\n");
1303047dd67eSAttilio Rao 	if (x & LK_SHARED_WAITERS)
1304047dd67eSAttilio Rao 		printf(" with shared waiters pending\n");
1305651175c9SAttilio Rao 	if (x & LK_EXCLUSIVE_SPINNERS)
1306651175c9SAttilio Rao 		printf(" with exclusive spinners pending\n");
1307047dd67eSAttilio Rao 
1308047dd67eSAttilio Rao 	STACK_PRINT(lk);
1309047dd67eSAttilio Rao }
1310047dd67eSAttilio Rao 
131199448ed1SJohn Dyson int
1312d576deedSPawel Jakub Dawidek lockstatus(const struct lock *lk)
131399448ed1SJohn Dyson {
1314047dd67eSAttilio Rao 	uintptr_t v, x;
1315047dd67eSAttilio Rao 	int ret;
131699448ed1SJohn Dyson 
1317047dd67eSAttilio Rao 	ret = LK_SHARED;
1318047dd67eSAttilio Rao 	x = lk->lk_lock;
1319047dd67eSAttilio Rao 	v = LK_HOLDER(x);
13200e9eb108SAttilio Rao 
1321047dd67eSAttilio Rao 	if ((x & LK_SHARE) == 0) {
1322047dd67eSAttilio Rao 		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1323047dd67eSAttilio Rao 			ret = LK_EXCLUSIVE;
13246bdfe06aSEivind Eklund 		else
1325047dd67eSAttilio Rao 			ret = LK_EXCLOTHER;
1326047dd67eSAttilio Rao 	} else if (x == LK_UNLOCKED)
1327047dd67eSAttilio Rao 		ret = 0;
132899448ed1SJohn Dyson 
1329047dd67eSAttilio Rao 	return (ret);
133053bf4bb2SPeter Wemm }
1331be6847d7SJohn Baldwin 
133284887fa3SAttilio Rao #ifdef INVARIANT_SUPPORT
1333de5b1952SAlexander Leidinger 
1334de5b1952SAlexander Leidinger FEATURE(invariant_support,
1335de5b1952SAlexander Leidinger     "Support for modules compiled with INVARIANTS option");
1336de5b1952SAlexander Leidinger 
133784887fa3SAttilio Rao #ifndef INVARIANTS
133884887fa3SAttilio Rao #undef	_lockmgr_assert
133984887fa3SAttilio Rao #endif
134084887fa3SAttilio Rao 
134184887fa3SAttilio Rao void
1342d576deedSPawel Jakub Dawidek _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
134384887fa3SAttilio Rao {
134484887fa3SAttilio Rao 	int slocked = 0;
134584887fa3SAttilio Rao 
134684887fa3SAttilio Rao 	if (panicstr != NULL)
134784887fa3SAttilio Rao 		return;
134884887fa3SAttilio Rao 	switch (what) {
134984887fa3SAttilio Rao 	case KA_SLOCKED:
135084887fa3SAttilio Rao 	case KA_SLOCKED | KA_NOTRECURSED:
135184887fa3SAttilio Rao 	case KA_SLOCKED | KA_RECURSED:
135284887fa3SAttilio Rao 		slocked = 1;
135384887fa3SAttilio Rao 	case KA_LOCKED:
135484887fa3SAttilio Rao 	case KA_LOCKED | KA_NOTRECURSED:
135584887fa3SAttilio Rao 	case KA_LOCKED | KA_RECURSED:
1356e5f94314SAttilio Rao #ifdef WITNESS
1357e5f94314SAttilio Rao 
1358e5f94314SAttilio Rao 		/*
1359e5f94314SAttilio Rao 		 * We cannot trust WITNESS if the lock is held in exclusive
1360e5f94314SAttilio Rao 		 * mode and a call to lockmgr_disown() happened.
1361e5f94314SAttilio Rao 		 * Workaround this skipping the check if the lock is held in
1362e5f94314SAttilio Rao 		 * exclusive mode even for the KA_LOCKED case.
1363e5f94314SAttilio Rao 		 */
1364e5f94314SAttilio Rao 		if (slocked || (lk->lk_lock & LK_SHARE)) {
1365e5f94314SAttilio Rao 			witness_assert(&lk->lock_object, what, file, line);
1366e5f94314SAttilio Rao 			break;
1367e5f94314SAttilio Rao 		}
1368e5f94314SAttilio Rao #endif
1369047dd67eSAttilio Rao 		if (lk->lk_lock == LK_UNLOCKED ||
1370047dd67eSAttilio Rao 		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1371047dd67eSAttilio Rao 		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
137284887fa3SAttilio Rao 			panic("Lock %s not %slocked @ %s:%d\n",
1373047dd67eSAttilio Rao 			    lk->lock_object.lo_name, slocked ? "share" : "",
137484887fa3SAttilio Rao 			    file, line);
1375047dd67eSAttilio Rao 
1376047dd67eSAttilio Rao 		if ((lk->lk_lock & LK_SHARE) == 0) {
1377047dd67eSAttilio Rao 			if (lockmgr_recursed(lk)) {
137884887fa3SAttilio Rao 				if (what & KA_NOTRECURSED)
137984887fa3SAttilio Rao 					panic("Lock %s recursed @ %s:%d\n",
1380047dd67eSAttilio Rao 					    lk->lock_object.lo_name, file,
1381047dd67eSAttilio Rao 					    line);
138284887fa3SAttilio Rao 			} else if (what & KA_RECURSED)
138384887fa3SAttilio Rao 				panic("Lock %s not recursed @ %s:%d\n",
1384047dd67eSAttilio Rao 				    lk->lock_object.lo_name, file, line);
138584887fa3SAttilio Rao 		}
138684887fa3SAttilio Rao 		break;
138784887fa3SAttilio Rao 	case KA_XLOCKED:
138884887fa3SAttilio Rao 	case KA_XLOCKED | KA_NOTRECURSED:
138984887fa3SAttilio Rao 	case KA_XLOCKED | KA_RECURSED:
1390047dd67eSAttilio Rao 		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
139184887fa3SAttilio Rao 			panic("Lock %s not exclusively locked @ %s:%d\n",
1392047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
1393047dd67eSAttilio Rao 		if (lockmgr_recursed(lk)) {
139484887fa3SAttilio Rao 			if (what & KA_NOTRECURSED)
139584887fa3SAttilio Rao 				panic("Lock %s recursed @ %s:%d\n",
1396047dd67eSAttilio Rao 				    lk->lock_object.lo_name, file, line);
139784887fa3SAttilio Rao 		} else if (what & KA_RECURSED)
139884887fa3SAttilio Rao 			panic("Lock %s not recursed @ %s:%d\n",
1399047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
140084887fa3SAttilio Rao 		break;
140184887fa3SAttilio Rao 	case KA_UNLOCKED:
1402047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
140384887fa3SAttilio Rao 			panic("Lock %s exclusively locked @ %s:%d\n",
1404047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
140584887fa3SAttilio Rao 		break;
140684887fa3SAttilio Rao 	default:
1407047dd67eSAttilio Rao 		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1408047dd67eSAttilio Rao 		    line);
140984887fa3SAttilio Rao 	}
141084887fa3SAttilio Rao }
1411047dd67eSAttilio Rao #endif
141284887fa3SAttilio Rao 
1413be6847d7SJohn Baldwin #ifdef DDB
1414462a7addSJohn Baldwin int
1415462a7addSJohn Baldwin lockmgr_chain(struct thread *td, struct thread **ownerp)
1416462a7addSJohn Baldwin {
1417047dd67eSAttilio Rao 	struct lock *lk;
1418462a7addSJohn Baldwin 
1419047dd67eSAttilio Rao 	lk = td->td_wchan;
1420462a7addSJohn Baldwin 
1421047dd67eSAttilio Rao 	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1422462a7addSJohn Baldwin 		return (0);
1423047dd67eSAttilio Rao 	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1424047dd67eSAttilio Rao 	if (lk->lk_lock & LK_SHARE)
1425047dd67eSAttilio Rao 		db_printf("SHARED (count %ju)\n",
1426047dd67eSAttilio Rao 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1427047dd67eSAttilio Rao 	else
1428047dd67eSAttilio Rao 		db_printf("EXCL\n");
1429047dd67eSAttilio Rao 	*ownerp = lockmgr_xholder(lk);
1430462a7addSJohn Baldwin 
1431462a7addSJohn Baldwin 	return (1);
1432462a7addSJohn Baldwin }
1433462a7addSJohn Baldwin 
1434047dd67eSAttilio Rao static void
1435d576deedSPawel Jakub Dawidek db_show_lockmgr(const struct lock_object *lock)
1436be6847d7SJohn Baldwin {
1437be6847d7SJohn Baldwin 	struct thread *td;
1438d576deedSPawel Jakub Dawidek 	const struct lock *lk;
1439be6847d7SJohn Baldwin 
1440d576deedSPawel Jakub Dawidek 	lk = (const struct lock *)lock;
1441be6847d7SJohn Baldwin 
1442be6847d7SJohn Baldwin 	db_printf(" state: ");
1443047dd67eSAttilio Rao 	if (lk->lk_lock == LK_UNLOCKED)
1444be6847d7SJohn Baldwin 		db_printf("UNLOCKED\n");
1445047dd67eSAttilio Rao 	else if (lk->lk_lock & LK_SHARE)
1446047dd67eSAttilio Rao 		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1447047dd67eSAttilio Rao 	else {
1448047dd67eSAttilio Rao 		td = lockmgr_xholder(lk);
1449047dd67eSAttilio Rao 		if (td == (struct thread *)LK_KERNPROC)
1450047dd67eSAttilio Rao 			db_printf("XLOCK: LK_KERNPROC\n");
1451047dd67eSAttilio Rao 		else
1452047dd67eSAttilio Rao 			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1453047dd67eSAttilio Rao 			    td->td_tid, td->td_proc->p_pid,
1454047dd67eSAttilio Rao 			    td->td_proc->p_comm);
1455047dd67eSAttilio Rao 		if (lockmgr_recursed(lk))
1456047dd67eSAttilio Rao 			db_printf(" recursed: %d\n", lk->lk_recurse);
1457047dd67eSAttilio Rao 	}
1458047dd67eSAttilio Rao 	db_printf(" waiters: ");
1459047dd67eSAttilio Rao 	switch (lk->lk_lock & LK_ALL_WAITERS) {
1460047dd67eSAttilio Rao 	case LK_SHARED_WAITERS:
1461047dd67eSAttilio Rao 		db_printf("shared\n");
1462e5023dd9SEdward Tomasz Napierala 		break;
1463047dd67eSAttilio Rao 	case LK_EXCLUSIVE_WAITERS:
1464047dd67eSAttilio Rao 		db_printf("exclusive\n");
1465047dd67eSAttilio Rao 		break;
1466047dd67eSAttilio Rao 	case LK_ALL_WAITERS:
1467047dd67eSAttilio Rao 		db_printf("shared and exclusive\n");
1468047dd67eSAttilio Rao 		break;
1469047dd67eSAttilio Rao 	default:
1470047dd67eSAttilio Rao 		db_printf("none\n");
1471047dd67eSAttilio Rao 	}
1472651175c9SAttilio Rao 	db_printf(" spinners: ");
1473651175c9SAttilio Rao 	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1474651175c9SAttilio Rao 		db_printf("exclusive\n");
1475651175c9SAttilio Rao 	else
1476651175c9SAttilio Rao 		db_printf("none\n");
1477be6847d7SJohn Baldwin }
1478be6847d7SJohn Baldwin #endif
1479