xref: /freebsd/sys/kern/kern_lock.c (revision 1c6987ebc5f9272620172b2d31245e0cad428a63)
19454b2d8SWarner Losh /*-
28a36da99SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
38a36da99SPedro F. Giffuni  *
4047dd67eSAttilio Rao  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5047dd67eSAttilio Rao  * All rights reserved.
653bf4bb2SPeter Wemm  *
753bf4bb2SPeter Wemm  * Redistribution and use in source and binary forms, with or without
853bf4bb2SPeter Wemm  * modification, are permitted provided that the following conditions
953bf4bb2SPeter Wemm  * are met:
1053bf4bb2SPeter Wemm  * 1. Redistributions of source code must retain the above copyright
11047dd67eSAttilio Rao  *    notice(s), this list of conditions and the following disclaimer as
12047dd67eSAttilio Rao  *    the first lines of this file unmodified other than the possible
13047dd67eSAttilio Rao  *    addition of one or more copyright notices.
1453bf4bb2SPeter Wemm  * 2. Redistributions in binary form must reproduce the above copyright
15047dd67eSAttilio Rao  *    notice(s), this list of conditions and the following disclaimer in the
1653bf4bb2SPeter Wemm  *    documentation and/or other materials provided with the distribution.
1753bf4bb2SPeter Wemm  *
18047dd67eSAttilio Rao  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19047dd67eSAttilio Rao  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20047dd67eSAttilio Rao  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21047dd67eSAttilio Rao  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22047dd67eSAttilio Rao  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23047dd67eSAttilio Rao  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24047dd67eSAttilio Rao  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25047dd67eSAttilio Rao  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2653bf4bb2SPeter Wemm  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27047dd67eSAttilio Rao  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28047dd67eSAttilio Rao  * DAMAGE.
2953bf4bb2SPeter Wemm  */
3053bf4bb2SPeter Wemm 
31047dd67eSAttilio Rao #include "opt_ddb.h"
32f5f9340bSFabien Thomas #include "opt_hwpmc_hooks.h"
33047dd67eSAttilio Rao 
34677b542eSDavid E. O'Brien #include <sys/cdefs.h>
35677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
36677b542eSDavid E. O'Brien 
3753bf4bb2SPeter Wemm #include <sys/param.h>
38cd2fe4e6SAttilio Rao #include <sys/kdb.h>
3961d80e90SJohn Baldwin #include <sys/ktr.h>
4053bf4bb2SPeter Wemm #include <sys/lock.h>
41047dd67eSAttilio Rao #include <sys/lock_profile.h>
428302d183SBruce Evans #include <sys/lockmgr.h>
43d8881ca3SJohn Baldwin #include <sys/mutex.h>
448302d183SBruce Evans #include <sys/proc.h>
45047dd67eSAttilio Rao #include <sys/sleepqueue.h>
46e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS
47e8ddb61dSJeff Roberson #include <sys/stack.h>
48e8ddb61dSJeff Roberson #endif
49651175c9SAttilio Rao #include <sys/sysctl.h>
50047dd67eSAttilio Rao #include <sys/systm.h>
5153bf4bb2SPeter Wemm 
52047dd67eSAttilio Rao #include <machine/cpu.h>
536efc8a16SAttilio Rao 
54be6847d7SJohn Baldwin #ifdef DDB
55be6847d7SJohn Baldwin #include <ddb/ddb.h>
56047dd67eSAttilio Rao #endif
57047dd67eSAttilio Rao 
58f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
59f5f9340bSFabien Thomas #include <sys/pmckern.h>
60f5f9340bSFabien Thomas PMC_SOFT_DECLARE( , , lock, failed);
61f5f9340bSFabien Thomas #endif
62f5f9340bSFabien Thomas 
63651175c9SAttilio Rao CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
64651175c9SAttilio Rao     (LK_ADAPTIVE | LK_NOSHARE));
65651175c9SAttilio Rao CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
66651175c9SAttilio Rao     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
67047dd67eSAttilio Rao 
68047dd67eSAttilio Rao #define	SQ_EXCLUSIVE_QUEUE	0
69047dd67eSAttilio Rao #define	SQ_SHARED_QUEUE		1
70047dd67eSAttilio Rao 
71047dd67eSAttilio Rao #ifndef INVARIANTS
72047dd67eSAttilio Rao #define	_lockmgr_assert(lk, what, file, line)
73047dd67eSAttilio Rao #endif
74ce1c953eSMark Johnston 
75047dd67eSAttilio Rao #define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
76047dd67eSAttilio Rao #define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
77047dd67eSAttilio Rao 
78047dd67eSAttilio Rao #ifndef DEBUG_LOCKS
79047dd67eSAttilio Rao #define	STACK_PRINT(lk)
80047dd67eSAttilio Rao #define	STACK_SAVE(lk)
81047dd67eSAttilio Rao #define	STACK_ZERO(lk)
82047dd67eSAttilio Rao #else
83047dd67eSAttilio Rao #define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
84047dd67eSAttilio Rao #define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
85047dd67eSAttilio Rao #define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
86047dd67eSAttilio Rao #endif
87047dd67eSAttilio Rao 
88047dd67eSAttilio Rao #define	LOCK_LOG2(lk, string, arg1, arg2)				\
89047dd67eSAttilio Rao 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
90047dd67eSAttilio Rao 		CTR2(KTR_LOCK, (string), (arg1), (arg2))
91047dd67eSAttilio Rao #define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
92047dd67eSAttilio Rao 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
93047dd67eSAttilio Rao 		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
94047dd67eSAttilio Rao 
95e5f94314SAttilio Rao #define	GIANT_DECLARE							\
96e5f94314SAttilio Rao 	int _i = 0;							\
97e5f94314SAttilio Rao 	WITNESS_SAVE_DECL(Giant)
98e5f94314SAttilio Rao #define	GIANT_RESTORE() do {						\
99e5f94314SAttilio Rao 	if (_i > 0) {							\
100e5f94314SAttilio Rao 		while (_i--)						\
101e5f94314SAttilio Rao 			mtx_lock(&Giant);				\
102e5f94314SAttilio Rao 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
103e5f94314SAttilio Rao 	}								\
104e5f94314SAttilio Rao } while (0)
105e5f94314SAttilio Rao #define	GIANT_SAVE() do {						\
106e5f94314SAttilio Rao 	if (mtx_owned(&Giant)) {					\
107e5f94314SAttilio Rao 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
108e5f94314SAttilio Rao 		while (mtx_owned(&Giant)) {				\
109e5f94314SAttilio Rao 			_i++;						\
110e5f94314SAttilio Rao 			mtx_unlock(&Giant);				\
111e5f94314SAttilio Rao 		}							\
112e5f94314SAttilio Rao 	}								\
113e5f94314SAttilio Rao } while (0)
114e5f94314SAttilio Rao 
11572ba3c08SKonstantin Belousov #define	LK_CAN_SHARE(x, flags)						\
11672ba3c08SKonstantin Belousov 	(((x) & LK_SHARE) &&						\
11772ba3c08SKonstantin Belousov 	(((x) & (LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 0 ||	\
11872ba3c08SKonstantin Belousov 	(curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||	\
11972ba3c08SKonstantin Belousov 	(curthread->td_pflags & TDP_DEADLKTREAT)))
120e5f94314SAttilio Rao #define	LK_TRYOP(x)							\
121e5f94314SAttilio Rao 	((x) & LK_NOWAIT)
122e5f94314SAttilio Rao 
123e5f94314SAttilio Rao #define	LK_CAN_WITNESS(x)						\
124e5f94314SAttilio Rao 	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
125e5f94314SAttilio Rao #define	LK_TRYWIT(x)							\
126e5f94314SAttilio Rao 	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
127047dd67eSAttilio Rao 
128651175c9SAttilio Rao #define	LK_CAN_ADAPT(lk, f)						\
129651175c9SAttilio Rao 	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
130651175c9SAttilio Rao 	((f) & LK_SLEEPFAIL) == 0)
131651175c9SAttilio Rao 
132047dd67eSAttilio Rao #define	lockmgr_disowned(lk)						\
133047dd67eSAttilio Rao 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
134047dd67eSAttilio Rao 
135047dd67eSAttilio Rao #define	lockmgr_xlocked(lk)						\
136047dd67eSAttilio Rao 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
137047dd67eSAttilio Rao 
138d576deedSPawel Jakub Dawidek static void	assert_lockmgr(const struct lock_object *lock, int how);
139047dd67eSAttilio Rao #ifdef DDB
140d576deedSPawel Jakub Dawidek static void	db_show_lockmgr(const struct lock_object *lock);
141be6847d7SJohn Baldwin #endif
1427faf4d90SDavide Italiano static void	lock_lockmgr(struct lock_object *lock, uintptr_t how);
143a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
144d576deedSPawel Jakub Dawidek static int	owner_lockmgr(const struct lock_object *lock,
145d576deedSPawel Jakub Dawidek 		    struct thread **owner);
146a5aedd68SStacey Son #endif
1477faf4d90SDavide Italiano static uintptr_t unlock_lockmgr(struct lock_object *lock);
14861bd5e21SKip Macy 
14961bd5e21SKip Macy struct lock_class lock_class_lockmgr = {
1503ff6d229SJohn Baldwin 	.lc_name = "lockmgr",
151047dd67eSAttilio Rao 	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
152f9721b43SAttilio Rao 	.lc_assert = assert_lockmgr,
15361bd5e21SKip Macy #ifdef DDB
1546e21afd4SJohn Baldwin 	.lc_ddb_show = db_show_lockmgr,
15561bd5e21SKip Macy #endif
1566e21afd4SJohn Baldwin 	.lc_lock = lock_lockmgr,
157a5aedd68SStacey Son 	.lc_unlock = unlock_lockmgr,
158a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
159a5aedd68SStacey Son 	.lc_owner = owner_lockmgr,
160a5aedd68SStacey Son #endif
16161bd5e21SKip Macy };
16261bd5e21SKip Macy 
163*1c6987ebSMateusz Guzik struct lockmgr_wait {
164*1c6987ebSMateusz Guzik 	const char *iwmesg;
165*1c6987ebSMateusz Guzik 	int ipri;
166*1c6987ebSMateusz Guzik 	int itimo;
167*1c6987ebSMateusz Guzik };
168*1c6987ebSMateusz Guzik 
169c4a48867SMateusz Guzik static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
170c4a48867SMateusz Guzik     int flags);
171*1c6987ebSMateusz Guzik static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
172*1c6987ebSMateusz Guzik 
173*1c6987ebSMateusz Guzik static void
174*1c6987ebSMateusz Guzik lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
175*1c6987ebSMateusz Guzik {
176*1c6987ebSMateusz Guzik 	struct lock_class *class;
177*1c6987ebSMateusz Guzik 
178*1c6987ebSMateusz Guzik 	if (flags & LK_INTERLOCK) {
179*1c6987ebSMateusz Guzik 		class = LOCK_CLASS(ilk);
180*1c6987ebSMateusz Guzik 		class->lc_unlock(ilk);
181*1c6987ebSMateusz Guzik 	}
182*1c6987ebSMateusz Guzik 
183*1c6987ebSMateusz Guzik 	if (__predict_false(wakeup_swapper))
184*1c6987ebSMateusz Guzik 		kick_proc0();
185*1c6987ebSMateusz Guzik }
186c4a48867SMateusz Guzik 
187c4a48867SMateusz Guzik static void
188c4a48867SMateusz Guzik lockmgr_note_shared_acquire(struct lock *lk, int contested,
189c4a48867SMateusz Guzik     uint64_t waittime, const char *file, int line, int flags)
190c4a48867SMateusz Guzik {
191c4a48867SMateusz Guzik 
192c4a48867SMateusz Guzik 	lock_profile_obtain_lock_success(&lk->lock_object, contested, waittime,
193c4a48867SMateusz Guzik 	    file, line);
194c4a48867SMateusz Guzik 	LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
195c4a48867SMateusz Guzik 	WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
196c4a48867SMateusz Guzik 	TD_LOCKS_INC(curthread);
197c4a48867SMateusz Guzik 	TD_SLOCKS_INC(curthread);
198c4a48867SMateusz Guzik 	STACK_SAVE(lk);
199c4a48867SMateusz Guzik }
200c4a48867SMateusz Guzik 
201c4a48867SMateusz Guzik static void
202c4a48867SMateusz Guzik lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
203c4a48867SMateusz Guzik {
204c4a48867SMateusz Guzik 
205c4a48867SMateusz Guzik 	lock_profile_release_lock(&lk->lock_object);
206c4a48867SMateusz Guzik 	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
207c4a48867SMateusz Guzik 	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
208c4a48867SMateusz Guzik 	TD_LOCKS_DEC(curthread);
209c4a48867SMateusz Guzik 	TD_SLOCKS_DEC(curthread);
210c4a48867SMateusz Guzik }
211c4a48867SMateusz Guzik 
212c4a48867SMateusz Guzik static void
213c4a48867SMateusz Guzik lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
214c4a48867SMateusz Guzik     uint64_t waittime, const char *file, int line, int flags)
215c4a48867SMateusz Guzik {
216c4a48867SMateusz Guzik 
217c4a48867SMateusz Guzik 	lock_profile_obtain_lock_success(&lk->lock_object, contested, waittime,
218c4a48867SMateusz Guzik 	    file, line);
219c4a48867SMateusz Guzik 	LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
220c4a48867SMateusz Guzik 	WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
221c4a48867SMateusz Guzik 	    line);
222c4a48867SMateusz Guzik 	TD_LOCKS_INC(curthread);
223c4a48867SMateusz Guzik 	STACK_SAVE(lk);
224c4a48867SMateusz Guzik }
225c4a48867SMateusz Guzik 
226c4a48867SMateusz Guzik static void
227c4a48867SMateusz Guzik lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
228c4a48867SMateusz Guzik {
229c4a48867SMateusz Guzik 
230c4a48867SMateusz Guzik 	lock_profile_release_lock(&lk->lock_object);
231c4a48867SMateusz Guzik 	LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
232c4a48867SMateusz Guzik 	    line);
233c4a48867SMateusz Guzik 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
234c4a48867SMateusz Guzik 	TD_LOCKS_DEC(curthread);
235c4a48867SMateusz Guzik }
236c4a48867SMateusz Guzik 
237c4a48867SMateusz Guzik static void
238c4a48867SMateusz Guzik lockmgr_note_exclusive_upgrade(struct lock *lk, const char *file, int line,
239c4a48867SMateusz Guzik     int flags)
240c4a48867SMateusz Guzik {
241c4a48867SMateusz Guzik 
242c4a48867SMateusz Guzik 	LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
243c4a48867SMateusz Guzik 	    line);
244c4a48867SMateusz Guzik 	WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
245c4a48867SMateusz Guzik 	    LK_TRYWIT(flags), file, line);
246c4a48867SMateusz Guzik 	TD_SLOCKS_DEC(curthread);
247c4a48867SMateusz Guzik }
248c4a48867SMateusz Guzik 
249047dd67eSAttilio Rao static __inline struct thread *
250d576deedSPawel Jakub Dawidek lockmgr_xholder(const struct lock *lk)
251047dd67eSAttilio Rao {
252047dd67eSAttilio Rao 	uintptr_t x;
253047dd67eSAttilio Rao 
254047dd67eSAttilio Rao 	x = lk->lk_lock;
255047dd67eSAttilio Rao 	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
256047dd67eSAttilio Rao }
25784887fa3SAttilio Rao 
25853bf4bb2SPeter Wemm /*
259047dd67eSAttilio Rao  * It assumes sleepq_lock held and returns with this one unheld.
260047dd67eSAttilio Rao  * It also assumes the generic interlock is sane and previously checked.
261047dd67eSAttilio Rao  * If LK_INTERLOCK is specified the interlock is not reacquired after the
262047dd67eSAttilio Rao  * sleep.
26353bf4bb2SPeter Wemm  */
264047dd67eSAttilio Rao static __inline int
265047dd67eSAttilio Rao sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
266047dd67eSAttilio Rao     const char *wmesg, int pri, int timo, int queue)
267047dd67eSAttilio Rao {
268e5f94314SAttilio Rao 	GIANT_DECLARE;
269047dd67eSAttilio Rao 	struct lock_class *class;
270047dd67eSAttilio Rao 	int catch, error;
27153bf4bb2SPeter Wemm 
272047dd67eSAttilio Rao 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
2735047a8fdSAttilio Rao 	catch = pri & PCATCH;
274047dd67eSAttilio Rao 	pri &= PRIMASK;
275047dd67eSAttilio Rao 	error = 0;
276047dd67eSAttilio Rao 
277047dd67eSAttilio Rao 	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
278047dd67eSAttilio Rao 	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
279047dd67eSAttilio Rao 
280047dd67eSAttilio Rao 	if (flags & LK_INTERLOCK)
281047dd67eSAttilio Rao 		class->lc_unlock(ilk);
2822028867dSAttilio Rao 	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
2832028867dSAttilio Rao 		lk->lk_exslpfail++;
284e5f94314SAttilio Rao 	GIANT_SAVE();
285047dd67eSAttilio Rao 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
286047dd67eSAttilio Rao 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
287a115fb62SHans Petter Selasky 	if ((flags & LK_TIMELOCK) && timo)
288047dd67eSAttilio Rao 		sleepq_set_timeout(&lk->lock_object, timo);
289a115fb62SHans Petter Selasky 
290047dd67eSAttilio Rao 	/*
291047dd67eSAttilio Rao 	 * Decisional switch for real sleeping.
292047dd67eSAttilio Rao 	 */
293047dd67eSAttilio Rao 	if ((flags & LK_TIMELOCK) && timo && catch)
294047dd67eSAttilio Rao 		error = sleepq_timedwait_sig(&lk->lock_object, pri);
295047dd67eSAttilio Rao 	else if ((flags & LK_TIMELOCK) && timo)
296047dd67eSAttilio Rao 		error = sleepq_timedwait(&lk->lock_object, pri);
297047dd67eSAttilio Rao 	else if (catch)
298047dd67eSAttilio Rao 		error = sleepq_wait_sig(&lk->lock_object, pri);
299047dd67eSAttilio Rao 	else
300047dd67eSAttilio Rao 		sleepq_wait(&lk->lock_object, pri);
301e5f94314SAttilio Rao 	GIANT_RESTORE();
302047dd67eSAttilio Rao 	if ((flags & LK_SLEEPFAIL) && error == 0)
303047dd67eSAttilio Rao 		error = ENOLCK;
304047dd67eSAttilio Rao 
305047dd67eSAttilio Rao 	return (error);
306047dd67eSAttilio Rao }
307047dd67eSAttilio Rao 
308da7bbd2cSJohn Baldwin static __inline int
309047dd67eSAttilio Rao wakeupshlk(struct lock *lk, const char *file, int line)
310047dd67eSAttilio Rao {
311047dd67eSAttilio Rao 	uintptr_t v, x;
3122028867dSAttilio Rao 	u_int realexslp;
313da7bbd2cSJohn Baldwin 	int queue, wakeup_swapper;
314047dd67eSAttilio Rao 
315da7bbd2cSJohn Baldwin 	wakeup_swapper = 0;
316047dd67eSAttilio Rao 	for (;;) {
317047dd67eSAttilio Rao 		x = lk->lk_lock;
318*1c6987ebSMateusz Guzik 		if (lockmgr_sunlock_try(lk, &x))
319047dd67eSAttilio Rao 			break;
320047dd67eSAttilio Rao 
321047dd67eSAttilio Rao 		/*
322047dd67eSAttilio Rao 		 * We should have a sharer with waiters, so enter the hard
323047dd67eSAttilio Rao 		 * path in order to handle wakeups correctly.
324047dd67eSAttilio Rao 		 */
325047dd67eSAttilio Rao 		sleepq_lock(&lk->lock_object);
326651175c9SAttilio Rao 		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
327047dd67eSAttilio Rao 		v = LK_UNLOCKED;
328047dd67eSAttilio Rao 
329047dd67eSAttilio Rao 		/*
330047dd67eSAttilio Rao 		 * If the lock has exclusive waiters, give them preference in
331047dd67eSAttilio Rao 		 * order to avoid deadlock with shared runners up.
3322028867dSAttilio Rao 		 * If interruptible sleeps left the exclusive queue empty
3332028867dSAttilio Rao 		 * avoid a starvation for the threads sleeping on the shared
3342028867dSAttilio Rao 		 * queue by giving them precedence and cleaning up the
3352028867dSAttilio Rao 		 * exclusive waiters bit anyway.
336c636ba83SAttilio Rao 		 * Please note that lk_exslpfail count may be lying about
337c636ba83SAttilio Rao 		 * the real number of waiters with the LK_SLEEPFAIL flag on
338e3043798SPedro F. Giffuni 		 * because they may be used in conjunction with interruptible
339aab9c8c2SAttilio Rao 		 * sleeps so lk_exslpfail might be considered an 'upper limit'
340aab9c8c2SAttilio Rao 		 * bound, including the edge cases.
341047dd67eSAttilio Rao 		 */
3422028867dSAttilio Rao 		realexslp = sleepq_sleepcnt(&lk->lock_object,
3432028867dSAttilio Rao 		    SQ_EXCLUSIVE_QUEUE);
3442028867dSAttilio Rao 		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
3452028867dSAttilio Rao 			if (lk->lk_exslpfail < realexslp) {
3462028867dSAttilio Rao 				lk->lk_exslpfail = 0;
347047dd67eSAttilio Rao 				queue = SQ_EXCLUSIVE_QUEUE;
348047dd67eSAttilio Rao 				v |= (x & LK_SHARED_WAITERS);
349047dd67eSAttilio Rao 			} else {
3502028867dSAttilio Rao 				lk->lk_exslpfail = 0;
3512028867dSAttilio Rao 				LOCK_LOG2(lk,
3522028867dSAttilio Rao 				    "%s: %p has only LK_SLEEPFAIL sleepers",
3532028867dSAttilio Rao 				    __func__, lk);
3542028867dSAttilio Rao 				LOCK_LOG2(lk,
3552028867dSAttilio Rao 			    "%s: %p waking up threads on the exclusive queue",
3562028867dSAttilio Rao 				    __func__, lk);
3572028867dSAttilio Rao 				wakeup_swapper =
3582028867dSAttilio Rao 				    sleepq_broadcast(&lk->lock_object,
3592028867dSAttilio Rao 				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
3602028867dSAttilio Rao 				queue = SQ_SHARED_QUEUE;
3612028867dSAttilio Rao 			}
3622028867dSAttilio Rao 
3632028867dSAttilio Rao 		} else {
3649dbf7a62SAttilio Rao 
3659dbf7a62SAttilio Rao 			/*
3669dbf7a62SAttilio Rao 			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
3679dbf7a62SAttilio Rao 			 * and using interruptible sleeps/timeout may have
3689dbf7a62SAttilio Rao 			 * left spourious lk_exslpfail counts on, so clean
3699dbf7a62SAttilio Rao 			 * it up anyway.
3709dbf7a62SAttilio Rao 			 */
3719dbf7a62SAttilio Rao 			lk->lk_exslpfail = 0;
372047dd67eSAttilio Rao 			queue = SQ_SHARED_QUEUE;
373047dd67eSAttilio Rao 		}
374047dd67eSAttilio Rao 
3757f9f80ceSAttilio Rao 		if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
376047dd67eSAttilio Rao 		    v)) {
377047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
378047dd67eSAttilio Rao 			continue;
379047dd67eSAttilio Rao 		}
380047dd67eSAttilio Rao 		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
381047dd67eSAttilio Rao 		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
382047dd67eSAttilio Rao 		    "exclusive");
3832028867dSAttilio Rao 		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
384da7bbd2cSJohn Baldwin 		    0, queue);
385047dd67eSAttilio Rao 		sleepq_release(&lk->lock_object);
386047dd67eSAttilio Rao 		break;
387047dd67eSAttilio Rao 	}
388047dd67eSAttilio Rao 
389c4a48867SMateusz Guzik 	lockmgr_note_shared_release(lk, file, line);
390da7bbd2cSJohn Baldwin 	return (wakeup_swapper);
391047dd67eSAttilio Rao }
392047dd67eSAttilio Rao 
393047dd67eSAttilio Rao static void
394d576deedSPawel Jakub Dawidek assert_lockmgr(const struct lock_object *lock, int what)
395f9721b43SAttilio Rao {
396f9721b43SAttilio Rao 
397f9721b43SAttilio Rao 	panic("lockmgr locks do not support assertions");
398f9721b43SAttilio Rao }
399f9721b43SAttilio Rao 
400047dd67eSAttilio Rao static void
4017faf4d90SDavide Italiano lock_lockmgr(struct lock_object *lock, uintptr_t how)
4026e21afd4SJohn Baldwin {
4036e21afd4SJohn Baldwin 
4046e21afd4SJohn Baldwin 	panic("lockmgr locks do not support sleep interlocking");
4056e21afd4SJohn Baldwin }
4066e21afd4SJohn Baldwin 
4077faf4d90SDavide Italiano static uintptr_t
4086e21afd4SJohn Baldwin unlock_lockmgr(struct lock_object *lock)
4096e21afd4SJohn Baldwin {
4106e21afd4SJohn Baldwin 
4116e21afd4SJohn Baldwin 	panic("lockmgr locks do not support sleep interlocking");
4126e21afd4SJohn Baldwin }
4136e21afd4SJohn Baldwin 
414a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
415a5aedd68SStacey Son static int
416d576deedSPawel Jakub Dawidek owner_lockmgr(const struct lock_object *lock, struct thread **owner)
417a5aedd68SStacey Son {
418a5aedd68SStacey Son 
419a5aedd68SStacey Son 	panic("lockmgr locks do not support owner inquiring");
420a5aedd68SStacey Son }
421a5aedd68SStacey Son #endif
422a5aedd68SStacey Son 
42399448ed1SJohn Dyson void
424047dd67eSAttilio Rao lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
42599448ed1SJohn Dyson {
4266efc8a16SAttilio Rao 	int iflags;
4276efc8a16SAttilio Rao 
428047dd67eSAttilio Rao 	MPASS((flags & ~LK_INIT_MASK) == 0);
429353998acSAttilio Rao 	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
430353998acSAttilio Rao             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
431353998acSAttilio Rao             &lk->lk_lock));
43299448ed1SJohn Dyson 
433f0830182SAttilio Rao 	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
434f0830182SAttilio Rao 	if (flags & LK_CANRECURSE)
435f0830182SAttilio Rao 		iflags |= LO_RECURSABLE;
436047dd67eSAttilio Rao 	if ((flags & LK_NODUP) == 0)
4376efc8a16SAttilio Rao 		iflags |= LO_DUPOK;
4387fbfba7bSAttilio Rao 	if (flags & LK_NOPROFILE)
4397fbfba7bSAttilio Rao 		iflags |= LO_NOPROFILE;
440047dd67eSAttilio Rao 	if ((flags & LK_NOWITNESS) == 0)
4416efc8a16SAttilio Rao 		iflags |= LO_WITNESS;
4427fbfba7bSAttilio Rao 	if (flags & LK_QUIET)
4437fbfba7bSAttilio Rao 		iflags |= LO_QUIET;
444e63091eaSMarcel Moolenaar 	if (flags & LK_IS_VNODE)
445e63091eaSMarcel Moolenaar 		iflags |= LO_IS_VNODE;
446651175c9SAttilio Rao 	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
447047dd67eSAttilio Rao 
448b5fb43e5SJohn Baldwin 	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
449047dd67eSAttilio Rao 	lk->lk_lock = LK_UNLOCKED;
450047dd67eSAttilio Rao 	lk->lk_recurse = 0;
4512028867dSAttilio Rao 	lk->lk_exslpfail = 0;
452047dd67eSAttilio Rao 	lk->lk_timo = timo;
453047dd67eSAttilio Rao 	lk->lk_pri = pri;
454047dd67eSAttilio Rao 	STACK_ZERO(lk);
45599448ed1SJohn Dyson }
45699448ed1SJohn Dyson 
4573634d5b2SJohn Baldwin /*
4583634d5b2SJohn Baldwin  * XXX: Gross hacks to manipulate external lock flags after
4593634d5b2SJohn Baldwin  * initialization.  Used for certain vnode and buf locks.
4603634d5b2SJohn Baldwin  */
4613634d5b2SJohn Baldwin void
4623634d5b2SJohn Baldwin lockallowshare(struct lock *lk)
4633634d5b2SJohn Baldwin {
4643634d5b2SJohn Baldwin 
4653634d5b2SJohn Baldwin 	lockmgr_assert(lk, KA_XLOCKED);
4663634d5b2SJohn Baldwin 	lk->lock_object.lo_flags &= ~LK_NOSHARE;
4673634d5b2SJohn Baldwin }
4683634d5b2SJohn Baldwin 
4693634d5b2SJohn Baldwin void
470575e02d9SKonstantin Belousov lockdisableshare(struct lock *lk)
471575e02d9SKonstantin Belousov {
472575e02d9SKonstantin Belousov 
473575e02d9SKonstantin Belousov 	lockmgr_assert(lk, KA_XLOCKED);
474575e02d9SKonstantin Belousov 	lk->lock_object.lo_flags |= LK_NOSHARE;
475575e02d9SKonstantin Belousov }
476575e02d9SKonstantin Belousov 
477575e02d9SKonstantin Belousov void
4783634d5b2SJohn Baldwin lockallowrecurse(struct lock *lk)
4793634d5b2SJohn Baldwin {
4803634d5b2SJohn Baldwin 
4813634d5b2SJohn Baldwin 	lockmgr_assert(lk, KA_XLOCKED);
4823634d5b2SJohn Baldwin 	lk->lock_object.lo_flags |= LO_RECURSABLE;
4833634d5b2SJohn Baldwin }
4843634d5b2SJohn Baldwin 
4853634d5b2SJohn Baldwin void
4863634d5b2SJohn Baldwin lockdisablerecurse(struct lock *lk)
4873634d5b2SJohn Baldwin {
4883634d5b2SJohn Baldwin 
4893634d5b2SJohn Baldwin 	lockmgr_assert(lk, KA_XLOCKED);
4903634d5b2SJohn Baldwin 	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
4913634d5b2SJohn Baldwin }
4923634d5b2SJohn Baldwin 
493a18b1f1dSJason Evans void
494047dd67eSAttilio Rao lockdestroy(struct lock *lk)
495a18b1f1dSJason Evans {
496c91fcee7SJohn Baldwin 
497047dd67eSAttilio Rao 	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
498047dd67eSAttilio Rao 	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
4992028867dSAttilio Rao 	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
500047dd67eSAttilio Rao 	lock_destroy(&lk->lock_object);
501047dd67eSAttilio Rao }
502047dd67eSAttilio Rao 
503c4a48867SMateusz Guzik static bool __always_inline
504c4a48867SMateusz Guzik lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags)
505c4a48867SMateusz Guzik {
506c4a48867SMateusz Guzik 
507c4a48867SMateusz Guzik 	/*
508c4a48867SMateusz Guzik 	 * If no other thread has an exclusive lock, or
509c4a48867SMateusz Guzik 	 * no exclusive waiter is present, bump the count of
510c4a48867SMateusz Guzik 	 * sharers.  Since we have to preserve the state of
511c4a48867SMateusz Guzik 	 * waiters, if we fail to acquire the shared lock
512c4a48867SMateusz Guzik 	 * loop back and retry.
513c4a48867SMateusz Guzik 	 */
514c4a48867SMateusz Guzik 	*xp = lk->lk_lock;
515c4a48867SMateusz Guzik 	while (LK_CAN_SHARE(*xp, flags)) {
516c4a48867SMateusz Guzik 		if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
517c4a48867SMateusz Guzik 		    *xp + LK_ONE_SHARER)) {
518c4a48867SMateusz Guzik 			return (true);
519c4a48867SMateusz Guzik 		}
520c4a48867SMateusz Guzik 	}
521c4a48867SMateusz Guzik 	return (false);
522c4a48867SMateusz Guzik }
523c4a48867SMateusz Guzik 
524c4a48867SMateusz Guzik static bool __always_inline
525*1c6987ebSMateusz Guzik lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
526c4a48867SMateusz Guzik {
527c4a48867SMateusz Guzik 
528c4a48867SMateusz Guzik 	for (;;) {
529c4a48867SMateusz Guzik 		/*
530c4a48867SMateusz Guzik 		 * If there is more than one shared lock held, just drop one
531c4a48867SMateusz Guzik 		 * and return.
532c4a48867SMateusz Guzik 		 */
533*1c6987ebSMateusz Guzik 		if (LK_SHARERS(*xp) > 1) {
534*1c6987ebSMateusz Guzik 			if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
535*1c6987ebSMateusz Guzik 			    *xp - LK_ONE_SHARER))
536c4a48867SMateusz Guzik 				return (true);
537c4a48867SMateusz Guzik 			continue;
538c4a48867SMateusz Guzik 		}
539c4a48867SMateusz Guzik 
540c4a48867SMateusz Guzik 		/*
541c4a48867SMateusz Guzik 		 * If there are not waiters on the exclusive queue, drop the
542c4a48867SMateusz Guzik 		 * lock quickly.
543c4a48867SMateusz Guzik 		 */
544*1c6987ebSMateusz Guzik 		if ((*xp & LK_ALL_WAITERS) == 0) {
545*1c6987ebSMateusz Guzik 			MPASS((*xp & ~LK_EXCLUSIVE_SPINNERS) ==
546c4a48867SMateusz Guzik 			    LK_SHARERS_LOCK(1));
547*1c6987ebSMateusz Guzik 			if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
548c4a48867SMateusz Guzik 			    LK_UNLOCKED))
549c4a48867SMateusz Guzik 				return (true);
550c4a48867SMateusz Guzik 			continue;
551c4a48867SMateusz Guzik 		}
552c4a48867SMateusz Guzik 		break;
553c4a48867SMateusz Guzik 	}
554c4a48867SMateusz Guzik 	return (false);
555c4a48867SMateusz Guzik }
556c4a48867SMateusz Guzik 
557*1c6987ebSMateusz Guzik static __noinline int
558*1c6987ebSMateusz Guzik lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
559*1c6987ebSMateusz Guzik     const char *file, int line, struct lockmgr_wait *lwa)
560c4a48867SMateusz Guzik {
561*1c6987ebSMateusz Guzik 	uintptr_t tid, x;
562*1c6987ebSMateusz Guzik 	int error = 0;
563047dd67eSAttilio Rao 	const char *iwmesg;
564*1c6987ebSMateusz Guzik 	int ipri, itimo;
565*1c6987ebSMateusz Guzik 
5661723a064SJeff Roberson #ifdef LOCK_PROFILING
5671723a064SJeff Roberson 	uint64_t waittime = 0;
5681723a064SJeff Roberson 	int contested = 0;
5691723a064SJeff Roberson #endif
570047dd67eSAttilio Rao 
571*1c6987ebSMateusz Guzik 	if (__predict_false(panicstr != NULL))
572*1c6987ebSMateusz Guzik 		goto out;
573*1c6987ebSMateusz Guzik 
574047dd67eSAttilio Rao 	tid = (uintptr_t)curthread;
575047dd67eSAttilio Rao 
576e5f94314SAttilio Rao 	if (LK_CAN_WITNESS(flags))
577e5f94314SAttilio Rao 		WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
57824150d37SJohn Baldwin 		    file, line, flags & LK_INTERLOCK ? ilk : NULL);
579047dd67eSAttilio Rao 	for (;;) {
580c4a48867SMateusz Guzik 		if (lockmgr_slock_try(lk, &x, flags))
581047dd67eSAttilio Rao 			break;
582f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
583f5f9340bSFabien Thomas 		PMC_SOFT_CALL( , , lock, failed);
584f5f9340bSFabien Thomas #endif
585047dd67eSAttilio Rao 		lock_profile_obtain_lock_failed(&lk->lock_object,
586047dd67eSAttilio Rao 		    &contested, &waittime);
587047dd67eSAttilio Rao 
588047dd67eSAttilio Rao 		/*
58996f1567fSKonstantin Belousov 		 * If the lock is already held by curthread in
590047dd67eSAttilio Rao 		 * exclusive way avoid a deadlock.
591047dd67eSAttilio Rao 		 */
592047dd67eSAttilio Rao 		if (LK_HOLDER(x) == tid) {
593047dd67eSAttilio Rao 			LOCK_LOG2(lk,
59496f1567fSKonstantin Belousov 			    "%s: %p already held in exclusive mode",
595047dd67eSAttilio Rao 			    __func__, lk);
596047dd67eSAttilio Rao 			error = EDEADLK;
597047dd67eSAttilio Rao 			break;
598a18b1f1dSJason Evans 		}
599a18b1f1dSJason Evans 
600a18b1f1dSJason Evans 		/*
601047dd67eSAttilio Rao 		 * If the lock is expected to not sleep just give up
602047dd67eSAttilio Rao 		 * and return.
603d7a7e179SAttilio Rao 		 */
604047dd67eSAttilio Rao 		if (LK_TRYOP(flags)) {
605047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p fails the try operation",
606047dd67eSAttilio Rao 			    __func__, lk);
607047dd67eSAttilio Rao 			error = EBUSY;
608047dd67eSAttilio Rao 			break;
609047dd67eSAttilio Rao 		}
610047dd67eSAttilio Rao 
611047dd67eSAttilio Rao 		/*
612047dd67eSAttilio Rao 		 * Acquire the sleepqueue chain lock because we
613047dd67eSAttilio Rao 		 * probabilly will need to manipulate waiters flags.
614047dd67eSAttilio Rao 		 */
615047dd67eSAttilio Rao 		sleepq_lock(&lk->lock_object);
616047dd67eSAttilio Rao 		x = lk->lk_lock;
617047dd67eSAttilio Rao 
618047dd67eSAttilio Rao 		/*
619047dd67eSAttilio Rao 		 * if the lock can be acquired in shared mode, try
620047dd67eSAttilio Rao 		 * again.
621047dd67eSAttilio Rao 		 */
62272ba3c08SKonstantin Belousov 		if (LK_CAN_SHARE(x, flags)) {
623047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
624047dd67eSAttilio Rao 			continue;
625047dd67eSAttilio Rao 		}
626047dd67eSAttilio Rao 
627047dd67eSAttilio Rao 		/*
628047dd67eSAttilio Rao 		 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
629047dd67eSAttilio Rao 		 * loop back and retry.
630047dd67eSAttilio Rao 		 */
631047dd67eSAttilio Rao 		if ((x & LK_SHARED_WAITERS) == 0) {
632047dd67eSAttilio Rao 			if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
633047dd67eSAttilio Rao 			    x | LK_SHARED_WAITERS)) {
634047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
635047dd67eSAttilio Rao 				continue;
636047dd67eSAttilio Rao 			}
637047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p set shared waiters flag",
638047dd67eSAttilio Rao 			    __func__, lk);
639047dd67eSAttilio Rao 		}
640047dd67eSAttilio Rao 
641*1c6987ebSMateusz Guzik 		if (lwa == NULL) {
642*1c6987ebSMateusz Guzik 			iwmesg = lk->lock_object.lo_name;
643*1c6987ebSMateusz Guzik 			ipri = lk->lk_pri;
644*1c6987ebSMateusz Guzik 			itimo = lk->lk_timo;
645*1c6987ebSMateusz Guzik 		} else {
646*1c6987ebSMateusz Guzik 			iwmesg = lwa->iwmesg;
647*1c6987ebSMateusz Guzik 			ipri = lwa->ipri;
648*1c6987ebSMateusz Guzik 			itimo = lwa->itimo;
649*1c6987ebSMateusz Guzik 		}
650*1c6987ebSMateusz Guzik 
651047dd67eSAttilio Rao 		/*
652047dd67eSAttilio Rao 		 * As far as we have been unable to acquire the
653047dd67eSAttilio Rao 		 * shared lock and the shared waiters flag is set,
654047dd67eSAttilio Rao 		 * we will sleep.
655047dd67eSAttilio Rao 		 */
656047dd67eSAttilio Rao 		error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
657047dd67eSAttilio Rao 		    SQ_SHARED_QUEUE);
658047dd67eSAttilio Rao 		flags &= ~LK_INTERLOCK;
659047dd67eSAttilio Rao 		if (error) {
660047dd67eSAttilio Rao 			LOCK_LOG3(lk,
661047dd67eSAttilio Rao 			    "%s: interrupted sleep for %p with %d",
662047dd67eSAttilio Rao 			    __func__, lk, error);
663047dd67eSAttilio Rao 			break;
664047dd67eSAttilio Rao 		}
665047dd67eSAttilio Rao 		LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
666047dd67eSAttilio Rao 		    __func__, lk);
667047dd67eSAttilio Rao 	}
668047dd67eSAttilio Rao 	if (error == 0) {
669c4a48867SMateusz Guzik #ifdef LOCK_PROFILING
670c4a48867SMateusz Guzik 		lockmgr_note_shared_acquire(lk, contested, waittime,
671c4a48867SMateusz Guzik 		    file, line, flags);
672c4a48867SMateusz Guzik #else
673c4a48867SMateusz Guzik 		lockmgr_note_shared_acquire(lk, 0, 0, file, line,
674c4a48867SMateusz Guzik 		    flags);
675c4a48867SMateusz Guzik #endif
676047dd67eSAttilio Rao 	}
677047dd67eSAttilio Rao 
678*1c6987ebSMateusz Guzik out:
679*1c6987ebSMateusz Guzik 	lockmgr_exit(flags, ilk, 0);
680*1c6987ebSMateusz Guzik 	return (error);
681047dd67eSAttilio Rao }
682047dd67eSAttilio Rao 
683*1c6987ebSMateusz Guzik static __noinline int
684*1c6987ebSMateusz Guzik lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
685*1c6987ebSMateusz Guzik     const char *file, int line, struct lockmgr_wait *lwa)
686*1c6987ebSMateusz Guzik {
687*1c6987ebSMateusz Guzik 	struct lock_class *class;
688*1c6987ebSMateusz Guzik 	uintptr_t tid, x, v;
689*1c6987ebSMateusz Guzik 	int error = 0;
690*1c6987ebSMateusz Guzik 	const char *iwmesg;
691*1c6987ebSMateusz Guzik 	int ipri, itimo;
6927c6fe803SKonstantin Belousov 
693*1c6987ebSMateusz Guzik #ifdef LOCK_PROFILING
694*1c6987ebSMateusz Guzik 	uint64_t waittime = 0;
695*1c6987ebSMateusz Guzik 	int contested = 0;
696*1c6987ebSMateusz Guzik #endif
697047dd67eSAttilio Rao 
698*1c6987ebSMateusz Guzik 	if (__predict_false(panicstr != NULL))
699*1c6987ebSMateusz Guzik 		goto out;
700*1c6987ebSMateusz Guzik 
701*1c6987ebSMateusz Guzik 	tid = (uintptr_t)curthread;
702*1c6987ebSMateusz Guzik 
703e5f94314SAttilio Rao 	if (LK_CAN_WITNESS(flags))
704e5f94314SAttilio Rao 		WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
70524150d37SJohn Baldwin 		    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
70624150d37SJohn Baldwin 		    ilk : NULL);
707047dd67eSAttilio Rao 
708047dd67eSAttilio Rao 	/*
70996f1567fSKonstantin Belousov 	 * If curthread already holds the lock and this one is
710047dd67eSAttilio Rao 	 * allowed to recurse, simply recurse on it.
711047dd67eSAttilio Rao 	 */
712047dd67eSAttilio Rao 	if (lockmgr_xlocked(lk)) {
713047dd67eSAttilio Rao 		if ((flags & LK_CANRECURSE) == 0 &&
714f0830182SAttilio Rao 		    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
715047dd67eSAttilio Rao 			/*
716047dd67eSAttilio Rao 			 * If the lock is expected to not panic just
717047dd67eSAttilio Rao 			 * give up and return.
718047dd67eSAttilio Rao 			 */
719047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
720047dd67eSAttilio Rao 				LOCK_LOG2(lk,
721047dd67eSAttilio Rao 				    "%s: %p fails the try operation",
722047dd67eSAttilio Rao 				    __func__, lk);
723047dd67eSAttilio Rao 				error = EBUSY;
724*1c6987ebSMateusz Guzik 				goto out;
725047dd67eSAttilio Rao 			}
726*1c6987ebSMateusz Guzik 			if (flags & LK_INTERLOCK) {
727*1c6987ebSMateusz Guzik 				class = LOCK_CLASS(ilk);
728047dd67eSAttilio Rao 				class->lc_unlock(ilk);
729*1c6987ebSMateusz Guzik 			}
730047dd67eSAttilio Rao 	panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
731047dd67eSAttilio Rao 			    __func__, iwmesg, file, line);
732047dd67eSAttilio Rao 		}
733047dd67eSAttilio Rao 		lk->lk_recurse++;
734047dd67eSAttilio Rao 		LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
735047dd67eSAttilio Rao 		LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
736047dd67eSAttilio Rao 		    lk->lk_recurse, file, line);
737e5f94314SAttilio Rao 		WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
738e5f94314SAttilio Rao 		    LK_TRYWIT(flags), file, line);
739047dd67eSAttilio Rao 		TD_LOCKS_INC(curthread);
740*1c6987ebSMateusz Guzik 		goto out;
741047dd67eSAttilio Rao 	}
742047dd67eSAttilio Rao 
743fc4f686dSMateusz Guzik 	for (;;) {
744fc4f686dSMateusz Guzik 		if (lk->lk_lock == LK_UNLOCKED &&
745fc4f686dSMateusz Guzik 		    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
746fc4f686dSMateusz Guzik 			break;
747f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
748f5f9340bSFabien Thomas 		PMC_SOFT_CALL( , , lock, failed);
749f5f9340bSFabien Thomas #endif
750047dd67eSAttilio Rao 		lock_profile_obtain_lock_failed(&lk->lock_object,
751047dd67eSAttilio Rao 		    &contested, &waittime);
752047dd67eSAttilio Rao 
753047dd67eSAttilio Rao 		/*
754047dd67eSAttilio Rao 		 * If the lock is expected to not sleep just give up
755047dd67eSAttilio Rao 		 * and return.
756047dd67eSAttilio Rao 		 */
757047dd67eSAttilio Rao 		if (LK_TRYOP(flags)) {
758047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p fails the try operation",
759047dd67eSAttilio Rao 			    __func__, lk);
760047dd67eSAttilio Rao 			error = EBUSY;
761047dd67eSAttilio Rao 			break;
762047dd67eSAttilio Rao 		}
763047dd67eSAttilio Rao 
764047dd67eSAttilio Rao 		/*
765047dd67eSAttilio Rao 		 * Acquire the sleepqueue chain lock because we
766047dd67eSAttilio Rao 		 * probabilly will need to manipulate waiters flags.
767047dd67eSAttilio Rao 		 */
768047dd67eSAttilio Rao 		sleepq_lock(&lk->lock_object);
769047dd67eSAttilio Rao 		x = lk->lk_lock;
770047dd67eSAttilio Rao 
771047dd67eSAttilio Rao 		/*
772047dd67eSAttilio Rao 		 * if the lock has been released while we spun on
773047dd67eSAttilio Rao 		 * the sleepqueue chain lock just try again.
774047dd67eSAttilio Rao 		 */
775047dd67eSAttilio Rao 		if (x == LK_UNLOCKED) {
776047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
777047dd67eSAttilio Rao 			continue;
778047dd67eSAttilio Rao 		}
779047dd67eSAttilio Rao 
780047dd67eSAttilio Rao 		/*
781047dd67eSAttilio Rao 		 * The lock can be in the state where there is a
782047dd67eSAttilio Rao 		 * pending queue of waiters, but still no owner.
783047dd67eSAttilio Rao 		 * This happens when the lock is contested and an
784047dd67eSAttilio Rao 		 * owner is going to claim the lock.
785047dd67eSAttilio Rao 		 * If curthread is the one successfully acquiring it
786047dd67eSAttilio Rao 		 * claim lock ownership and return, preserving waiters
787047dd67eSAttilio Rao 		 * flags.
788047dd67eSAttilio Rao 		 */
789651175c9SAttilio Rao 		v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
790651175c9SAttilio Rao 		if ((x & ~v) == LK_UNLOCKED) {
791651175c9SAttilio Rao 			v &= ~LK_EXCLUSIVE_SPINNERS;
792047dd67eSAttilio Rao 			if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
793047dd67eSAttilio Rao 			    tid | v)) {
794047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
795047dd67eSAttilio Rao 				LOCK_LOG2(lk,
796047dd67eSAttilio Rao 				    "%s: %p claimed by a new writer",
797047dd67eSAttilio Rao 				    __func__, lk);
798047dd67eSAttilio Rao 				break;
799047dd67eSAttilio Rao 			}
800047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
801047dd67eSAttilio Rao 			continue;
802047dd67eSAttilio Rao 		}
803047dd67eSAttilio Rao 
804047dd67eSAttilio Rao 		/*
805047dd67eSAttilio Rao 		 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
806047dd67eSAttilio Rao 		 * fail, loop back and retry.
807047dd67eSAttilio Rao 		 */
808047dd67eSAttilio Rao 		if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
809047dd67eSAttilio Rao 			if (!atomic_cmpset_ptr(&lk->lk_lock, x,
810047dd67eSAttilio Rao 			    x | LK_EXCLUSIVE_WAITERS)) {
811047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
812047dd67eSAttilio Rao 				continue;
813047dd67eSAttilio Rao 			}
814047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p set excl waiters flag",
815047dd67eSAttilio Rao 			    __func__, lk);
816047dd67eSAttilio Rao 		}
817047dd67eSAttilio Rao 
818*1c6987ebSMateusz Guzik 		if (lwa == NULL) {
819*1c6987ebSMateusz Guzik 			iwmesg = lk->lock_object.lo_name;
820*1c6987ebSMateusz Guzik 			ipri = lk->lk_pri;
821*1c6987ebSMateusz Guzik 			itimo = lk->lk_timo;
822*1c6987ebSMateusz Guzik 		} else {
823*1c6987ebSMateusz Guzik 			iwmesg = lwa->iwmesg;
824*1c6987ebSMateusz Guzik 			ipri = lwa->ipri;
825*1c6987ebSMateusz Guzik 			itimo = lwa->itimo;
826*1c6987ebSMateusz Guzik 		}
827*1c6987ebSMateusz Guzik 
828047dd67eSAttilio Rao 		/*
829047dd67eSAttilio Rao 		 * As far as we have been unable to acquire the
830047dd67eSAttilio Rao 		 * exclusive lock and the exclusive waiters flag
831047dd67eSAttilio Rao 		 * is set, we will sleep.
832047dd67eSAttilio Rao 		 */
833047dd67eSAttilio Rao 		error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
834047dd67eSAttilio Rao 		    SQ_EXCLUSIVE_QUEUE);
835047dd67eSAttilio Rao 		flags &= ~LK_INTERLOCK;
836047dd67eSAttilio Rao 		if (error) {
837047dd67eSAttilio Rao 			LOCK_LOG3(lk,
838047dd67eSAttilio Rao 			    "%s: interrupted sleep for %p with %d",
839047dd67eSAttilio Rao 			    __func__, lk, error);
840047dd67eSAttilio Rao 			break;
841047dd67eSAttilio Rao 		}
842047dd67eSAttilio Rao 		LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
843047dd67eSAttilio Rao 		    __func__, lk);
844047dd67eSAttilio Rao 	}
845047dd67eSAttilio Rao 	if (error == 0) {
846c4a48867SMateusz Guzik #ifdef LOCK_PROFILING
847c4a48867SMateusz Guzik 		lockmgr_note_exclusive_acquire(lk, contested, waittime,
848c4a48867SMateusz Guzik 		    file, line, flags);
849c4a48867SMateusz Guzik #else
850c4a48867SMateusz Guzik 		lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
851c4a48867SMateusz Guzik 		    flags);
852c4a48867SMateusz Guzik #endif
853047dd67eSAttilio Rao 	}
854*1c6987ebSMateusz Guzik 
855*1c6987ebSMateusz Guzik out:
856*1c6987ebSMateusz Guzik 	lockmgr_exit(flags, ilk, 0);
857*1c6987ebSMateusz Guzik 	return (error);
858*1c6987ebSMateusz Guzik }
859*1c6987ebSMateusz Guzik 
860*1c6987ebSMateusz Guzik static __noinline int
861*1c6987ebSMateusz Guzik lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
862*1c6987ebSMateusz Guzik     const char *file, int line, struct lockmgr_wait *lwa)
863*1c6987ebSMateusz Guzik {
864*1c6987ebSMateusz Guzik 	uintptr_t tid, x, v;
865*1c6987ebSMateusz Guzik 	int error = 0;
866*1c6987ebSMateusz Guzik 	int wakeup_swapper = 0;
867*1c6987ebSMateusz Guzik 	int op;
868*1c6987ebSMateusz Guzik 
869*1c6987ebSMateusz Guzik 	if (__predict_false(panicstr != NULL))
870*1c6987ebSMateusz Guzik 		goto out;
871*1c6987ebSMateusz Guzik 
872*1c6987ebSMateusz Guzik 	tid = (uintptr_t)curthread;
873*1c6987ebSMateusz Guzik 
874*1c6987ebSMateusz Guzik 	_lockmgr_assert(lk, KA_SLOCKED, file, line);
875*1c6987ebSMateusz Guzik 	v = lk->lk_lock;
876*1c6987ebSMateusz Guzik 	x = v & LK_ALL_WAITERS;
877*1c6987ebSMateusz Guzik 	v &= LK_EXCLUSIVE_SPINNERS;
878*1c6987ebSMateusz Guzik 
879*1c6987ebSMateusz Guzik 	/*
880*1c6987ebSMateusz Guzik 	 * Try to switch from one shared lock to an exclusive one.
881*1c6987ebSMateusz Guzik 	 * We need to preserve waiters flags during the operation.
882*1c6987ebSMateusz Guzik 	 */
883*1c6987ebSMateusz Guzik 	if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
884*1c6987ebSMateusz Guzik 	    tid | x)) {
885*1c6987ebSMateusz Guzik 		LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
886*1c6987ebSMateusz Guzik 		    line);
887*1c6987ebSMateusz Guzik 		WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
888*1c6987ebSMateusz Guzik 		    LK_TRYWIT(flags), file, line);
889*1c6987ebSMateusz Guzik 		TD_SLOCKS_DEC(curthread);
890*1c6987ebSMateusz Guzik 		goto out;
891*1c6987ebSMateusz Guzik 	}
892*1c6987ebSMateusz Guzik 
893*1c6987ebSMateusz Guzik 	op = flags & LK_TYPE_MASK;
894*1c6987ebSMateusz Guzik 
895*1c6987ebSMateusz Guzik 	/*
896*1c6987ebSMateusz Guzik 	 * In LK_TRYUPGRADE mode, do not drop the lock,
897*1c6987ebSMateusz Guzik 	 * returning EBUSY instead.
898*1c6987ebSMateusz Guzik 	 */
899*1c6987ebSMateusz Guzik 	if (op == LK_TRYUPGRADE) {
900*1c6987ebSMateusz Guzik 		LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
901*1c6987ebSMateusz Guzik 		    __func__, lk);
902*1c6987ebSMateusz Guzik 		error = EBUSY;
903*1c6987ebSMateusz Guzik 		goto out;
904*1c6987ebSMateusz Guzik 	}
905*1c6987ebSMateusz Guzik 
906*1c6987ebSMateusz Guzik 	/*
907*1c6987ebSMateusz Guzik 	 * We have been unable to succeed in upgrading, so just
908*1c6987ebSMateusz Guzik 	 * give up the shared lock.
909*1c6987ebSMateusz Guzik 	 */
910*1c6987ebSMateusz Guzik 	wakeup_swapper |= wakeupshlk(lk, file, line);
911*1c6987ebSMateusz Guzik 	error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
912*1c6987ebSMateusz Guzik 	flags &= ~LK_INTERLOCK;
913*1c6987ebSMateusz Guzik out:
914*1c6987ebSMateusz Guzik 	lockmgr_exit(flags, ilk, wakeup_swapper);
915*1c6987ebSMateusz Guzik 	return (error);
916*1c6987ebSMateusz Guzik }
917*1c6987ebSMateusz Guzik 
918*1c6987ebSMateusz Guzik int
919*1c6987ebSMateusz Guzik lockmgr_lock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk,
920*1c6987ebSMateusz Guzik     const char *file, int line)
921*1c6987ebSMateusz Guzik {
922*1c6987ebSMateusz Guzik 	struct lock_class *class;
923*1c6987ebSMateusz Guzik 	uintptr_t x, tid;
924*1c6987ebSMateusz Guzik 	u_int op;
925*1c6987ebSMateusz Guzik 	bool locked;
926*1c6987ebSMateusz Guzik 
927*1c6987ebSMateusz Guzik 	op = flags & LK_TYPE_MASK;
928*1c6987ebSMateusz Guzik 	locked = false;
929*1c6987ebSMateusz Guzik 	switch (op) {
930*1c6987ebSMateusz Guzik 	case LK_SHARED:
931*1c6987ebSMateusz Guzik 		if (LK_CAN_WITNESS(flags))
932*1c6987ebSMateusz Guzik 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
933*1c6987ebSMateusz Guzik 			    file, line, flags & LK_INTERLOCK ? ilk : NULL);
934*1c6987ebSMateusz Guzik 		if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
935*1c6987ebSMateusz Guzik 			break;
936*1c6987ebSMateusz Guzik 		if (lockmgr_slock_try(lk, &x, flags)) {
937*1c6987ebSMateusz Guzik 			lockmgr_note_shared_acquire(lk, 0, 0,
938*1c6987ebSMateusz Guzik 			    file, line, flags);
939*1c6987ebSMateusz Guzik 			locked = true;
940*1c6987ebSMateusz Guzik 		} else {
941*1c6987ebSMateusz Guzik 			return (lockmgr_slock_hard(lk, flags, ilk, file, line,
942*1c6987ebSMateusz Guzik 			    NULL));
943*1c6987ebSMateusz Guzik 		}
944*1c6987ebSMateusz Guzik 		break;
945*1c6987ebSMateusz Guzik 	case LK_EXCLUSIVE:
946*1c6987ebSMateusz Guzik 		if (LK_CAN_WITNESS(flags))
947*1c6987ebSMateusz Guzik 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
948*1c6987ebSMateusz Guzik 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
949*1c6987ebSMateusz Guzik 			    ilk : NULL);
950*1c6987ebSMateusz Guzik 		tid = (uintptr_t)curthread;
951*1c6987ebSMateusz Guzik 		if (lk->lk_lock == LK_UNLOCKED &&
952*1c6987ebSMateusz Guzik 		    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
953*1c6987ebSMateusz Guzik 			lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
954*1c6987ebSMateusz Guzik 			    flags);
955*1c6987ebSMateusz Guzik 			locked = true;
956*1c6987ebSMateusz Guzik 		} else {
957*1c6987ebSMateusz Guzik 			return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
958*1c6987ebSMateusz Guzik 			    NULL));
959*1c6987ebSMateusz Guzik 		}
960*1c6987ebSMateusz Guzik 		break;
961*1c6987ebSMateusz Guzik 	case LK_UPGRADE:
962*1c6987ebSMateusz Guzik 	case LK_TRYUPGRADE:
963*1c6987ebSMateusz Guzik 		return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
964*1c6987ebSMateusz Guzik 	default:
965*1c6987ebSMateusz Guzik 		break;
966*1c6987ebSMateusz Guzik 	}
967*1c6987ebSMateusz Guzik 	if (__predict_true(locked)) {
968*1c6987ebSMateusz Guzik 		if (__predict_false(flags & LK_INTERLOCK)) {
969*1c6987ebSMateusz Guzik 			class = LOCK_CLASS(ilk);
970*1c6987ebSMateusz Guzik 			class->lc_unlock(ilk);
971*1c6987ebSMateusz Guzik 		}
972*1c6987ebSMateusz Guzik 		return (0);
973*1c6987ebSMateusz Guzik 	} else {
974*1c6987ebSMateusz Guzik 		return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
975*1c6987ebSMateusz Guzik 		    LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
976*1c6987ebSMateusz Guzik 	}
977*1c6987ebSMateusz Guzik }
978*1c6987ebSMateusz Guzik 
979*1c6987ebSMateusz Guzik static __noinline int
980*1c6987ebSMateusz Guzik lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
981*1c6987ebSMateusz Guzik     const char *file, int line)
982*1c6987ebSMateusz Guzik 
983*1c6987ebSMateusz Guzik {
984*1c6987ebSMateusz Guzik 	int wakeup_swapper = 0;
985*1c6987ebSMateusz Guzik 
986*1c6987ebSMateusz Guzik 	if (__predict_false(panicstr != NULL))
987*1c6987ebSMateusz Guzik 		goto out;
988*1c6987ebSMateusz Guzik 
989*1c6987ebSMateusz Guzik 	wakeup_swapper = wakeupshlk(lk, file, line);
990*1c6987ebSMateusz Guzik 
991*1c6987ebSMateusz Guzik out:
992*1c6987ebSMateusz Guzik 	lockmgr_exit(flags, ilk, wakeup_swapper);
993*1c6987ebSMateusz Guzik 	return (0);
994*1c6987ebSMateusz Guzik }
995*1c6987ebSMateusz Guzik 
996*1c6987ebSMateusz Guzik static __noinline int
997*1c6987ebSMateusz Guzik lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
998*1c6987ebSMateusz Guzik     const char *file, int line)
999*1c6987ebSMateusz Guzik {
1000*1c6987ebSMateusz Guzik 	uintptr_t tid, v;
1001*1c6987ebSMateusz Guzik 	int wakeup_swapper = 0;
1002*1c6987ebSMateusz Guzik 	u_int realexslp;
1003*1c6987ebSMateusz Guzik 	int queue;
1004*1c6987ebSMateusz Guzik 
1005*1c6987ebSMateusz Guzik 	if (__predict_false(panicstr != NULL))
1006*1c6987ebSMateusz Guzik 		goto out;
1007*1c6987ebSMateusz Guzik 
1008*1c6987ebSMateusz Guzik 	tid = (uintptr_t)curthread;
1009*1c6987ebSMateusz Guzik 
1010*1c6987ebSMateusz Guzik 	/*
1011*1c6987ebSMateusz Guzik 	 * As first option, treact the lock as if it has not
1012*1c6987ebSMateusz Guzik 	 * any waiter.
1013*1c6987ebSMateusz Guzik 	 * Fix-up the tid var if the lock has been disowned.
1014*1c6987ebSMateusz Guzik 	 */
1015*1c6987ebSMateusz Guzik 	if (LK_HOLDER(x) == LK_KERNPROC)
1016*1c6987ebSMateusz Guzik 		tid = LK_KERNPROC;
1017*1c6987ebSMateusz Guzik 	else {
1018*1c6987ebSMateusz Guzik 		WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1019*1c6987ebSMateusz Guzik 		TD_LOCKS_DEC(curthread);
1020*1c6987ebSMateusz Guzik 	}
1021*1c6987ebSMateusz Guzik 	LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
1022*1c6987ebSMateusz Guzik 
1023*1c6987ebSMateusz Guzik 	/*
1024*1c6987ebSMateusz Guzik 	 * The lock is held in exclusive mode.
1025*1c6987ebSMateusz Guzik 	 * If the lock is recursed also, then unrecurse it.
1026*1c6987ebSMateusz Guzik 	 */
1027*1c6987ebSMateusz Guzik 	if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1028*1c6987ebSMateusz Guzik 		LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1029*1c6987ebSMateusz Guzik 		lk->lk_recurse--;
1030*1c6987ebSMateusz Guzik 		goto out;
1031*1c6987ebSMateusz Guzik 	}
1032*1c6987ebSMateusz Guzik 	if (tid != LK_KERNPROC)
1033*1c6987ebSMateusz Guzik 		lock_profile_release_lock(&lk->lock_object);
1034*1c6987ebSMateusz Guzik 
1035*1c6987ebSMateusz Guzik 	if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1036*1c6987ebSMateusz Guzik 		goto out;
1037*1c6987ebSMateusz Guzik 
1038*1c6987ebSMateusz Guzik 	sleepq_lock(&lk->lock_object);
1039*1c6987ebSMateusz Guzik 	x = lk->lk_lock;
1040*1c6987ebSMateusz Guzik 	v = LK_UNLOCKED;
1041*1c6987ebSMateusz Guzik 
1042*1c6987ebSMateusz Guzik 	/*
1043*1c6987ebSMateusz Guzik 	 * If the lock has exclusive waiters, give them
1044*1c6987ebSMateusz Guzik 	 * preference in order to avoid deadlock with
1045*1c6987ebSMateusz Guzik 	 * shared runners up.
1046*1c6987ebSMateusz Guzik 	 * If interruptible sleeps left the exclusive queue
1047*1c6987ebSMateusz Guzik 	 * empty avoid a starvation for the threads sleeping
1048*1c6987ebSMateusz Guzik 	 * on the shared queue by giving them precedence
1049*1c6987ebSMateusz Guzik 	 * and cleaning up the exclusive waiters bit anyway.
1050*1c6987ebSMateusz Guzik 	 * Please note that lk_exslpfail count may be lying
1051*1c6987ebSMateusz Guzik 	 * about the real number of waiters with the
1052*1c6987ebSMateusz Guzik 	 * LK_SLEEPFAIL flag on because they may be used in
1053*1c6987ebSMateusz Guzik 	 * conjunction with interruptible sleeps so
1054*1c6987ebSMateusz Guzik 	 * lk_exslpfail might be considered an 'upper limit'
1055*1c6987ebSMateusz Guzik 	 * bound, including the edge cases.
1056*1c6987ebSMateusz Guzik 	 */
1057*1c6987ebSMateusz Guzik 	MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1058*1c6987ebSMateusz Guzik 	realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1059*1c6987ebSMateusz Guzik 	if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1060*1c6987ebSMateusz Guzik 		if (lk->lk_exslpfail < realexslp) {
1061*1c6987ebSMateusz Guzik 			lk->lk_exslpfail = 0;
1062*1c6987ebSMateusz Guzik 			queue = SQ_EXCLUSIVE_QUEUE;
1063*1c6987ebSMateusz Guzik 			v |= (x & LK_SHARED_WAITERS);
1064*1c6987ebSMateusz Guzik 		} else {
1065*1c6987ebSMateusz Guzik 			lk->lk_exslpfail = 0;
1066*1c6987ebSMateusz Guzik 			LOCK_LOG2(lk,
1067*1c6987ebSMateusz Guzik 			    "%s: %p has only LK_SLEEPFAIL sleepers",
1068*1c6987ebSMateusz Guzik 			    __func__, lk);
1069*1c6987ebSMateusz Guzik 			LOCK_LOG2(lk,
1070*1c6987ebSMateusz Guzik 			    "%s: %p waking up threads on the exclusive queue",
1071*1c6987ebSMateusz Guzik 			    __func__, lk);
1072*1c6987ebSMateusz Guzik 			wakeup_swapper = sleepq_broadcast(&lk->lock_object,
1073*1c6987ebSMateusz Guzik 			    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1074*1c6987ebSMateusz Guzik 			queue = SQ_SHARED_QUEUE;
1075*1c6987ebSMateusz Guzik 		}
1076*1c6987ebSMateusz Guzik 	} else {
1077*1c6987ebSMateusz Guzik 
1078*1c6987ebSMateusz Guzik 		/*
1079*1c6987ebSMateusz Guzik 		 * Exclusive waiters sleeping with LK_SLEEPFAIL
1080*1c6987ebSMateusz Guzik 		 * on and using interruptible sleeps/timeout
1081*1c6987ebSMateusz Guzik 		 * may have left spourious lk_exslpfail counts
1082*1c6987ebSMateusz Guzik 		 * on, so clean it up anyway.
1083*1c6987ebSMateusz Guzik 		 */
1084*1c6987ebSMateusz Guzik 		lk->lk_exslpfail = 0;
1085*1c6987ebSMateusz Guzik 		queue = SQ_SHARED_QUEUE;
1086*1c6987ebSMateusz Guzik 	}
1087*1c6987ebSMateusz Guzik 
1088*1c6987ebSMateusz Guzik 	LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1089*1c6987ebSMateusz Guzik 	    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1090*1c6987ebSMateusz Guzik 	    "exclusive");
1091*1c6987ebSMateusz Guzik 	atomic_store_rel_ptr(&lk->lk_lock, v);
1092*1c6987ebSMateusz Guzik 	wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1093*1c6987ebSMateusz Guzik 	sleepq_release(&lk->lock_object);
1094*1c6987ebSMateusz Guzik 
1095*1c6987ebSMateusz Guzik out:
1096*1c6987ebSMateusz Guzik 	lockmgr_exit(flags, ilk, wakeup_swapper);
1097*1c6987ebSMateusz Guzik 	return (0);
1098*1c6987ebSMateusz Guzik }
1099*1c6987ebSMateusz Guzik 
1100*1c6987ebSMateusz Guzik int
1101*1c6987ebSMateusz Guzik lockmgr_unlock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk)
1102*1c6987ebSMateusz Guzik {
1103*1c6987ebSMateusz Guzik 	struct lock_class *class;
1104*1c6987ebSMateusz Guzik 	uintptr_t x, tid;
1105*1c6987ebSMateusz Guzik 	bool unlocked;
1106*1c6987ebSMateusz Guzik 	const char *file;
1107*1c6987ebSMateusz Guzik 	int line;
1108*1c6987ebSMateusz Guzik 
1109*1c6987ebSMateusz Guzik 	file = __FILE__;
1110*1c6987ebSMateusz Guzik 	line = __LINE__;
1111*1c6987ebSMateusz Guzik 
1112*1c6987ebSMateusz Guzik 	_lockmgr_assert(lk, KA_LOCKED, file, line);
1113*1c6987ebSMateusz Guzik 	unlocked = false;
1114*1c6987ebSMateusz Guzik 	x = lk->lk_lock;
1115*1c6987ebSMateusz Guzik 	if (__predict_true(x & LK_SHARE) != 0) {
1116*1c6987ebSMateusz Guzik 		if (lockmgr_sunlock_try(lk, &x)) {
1117*1c6987ebSMateusz Guzik 			lockmgr_note_shared_release(lk, file, line);
1118*1c6987ebSMateusz Guzik 			unlocked = true;
1119*1c6987ebSMateusz Guzik 		} else {
1120*1c6987ebSMateusz Guzik 			return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1121*1c6987ebSMateusz Guzik 		}
1122*1c6987ebSMateusz Guzik 	} else {
1123*1c6987ebSMateusz Guzik 		tid = (uintptr_t)curthread;
1124*1c6987ebSMateusz Guzik 		if (!lockmgr_recursed(lk) &&
1125*1c6987ebSMateusz Guzik 		    atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1126*1c6987ebSMateusz Guzik 			lockmgr_note_exclusive_release(lk, file, line);
1127*1c6987ebSMateusz Guzik 			unlocked = true;
1128*1c6987ebSMateusz Guzik 		} else {
1129*1c6987ebSMateusz Guzik 			return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1130*1c6987ebSMateusz Guzik 		}
1131*1c6987ebSMateusz Guzik 	}
1132*1c6987ebSMateusz Guzik 	if (__predict_false(flags & LK_INTERLOCK)) {
1133*1c6987ebSMateusz Guzik 		class = LOCK_CLASS(ilk);
1134*1c6987ebSMateusz Guzik 		class->lc_unlock(ilk);
1135*1c6987ebSMateusz Guzik 	}
1136*1c6987ebSMateusz Guzik 	return (0);
1137*1c6987ebSMateusz Guzik }
1138*1c6987ebSMateusz Guzik 
1139*1c6987ebSMateusz Guzik int
1140*1c6987ebSMateusz Guzik __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1141*1c6987ebSMateusz Guzik     const char *wmesg, int pri, int timo, const char *file, int line)
1142*1c6987ebSMateusz Guzik {
1143*1c6987ebSMateusz Guzik 	GIANT_DECLARE;
1144*1c6987ebSMateusz Guzik 	struct lockmgr_wait lwa;
1145*1c6987ebSMateusz Guzik 	struct lock_class *class;
1146*1c6987ebSMateusz Guzik 	const char *iwmesg;
1147*1c6987ebSMateusz Guzik 	uintptr_t tid, v, x;
1148*1c6987ebSMateusz Guzik 	u_int op, realexslp;
1149*1c6987ebSMateusz Guzik 	int error, ipri, itimo, queue, wakeup_swapper;
1150*1c6987ebSMateusz Guzik #ifdef LOCK_PROFILING
1151*1c6987ebSMateusz Guzik 	uint64_t waittime = 0;
1152*1c6987ebSMateusz Guzik 	int contested = 0;
1153*1c6987ebSMateusz Guzik #endif
1154*1c6987ebSMateusz Guzik 
1155*1c6987ebSMateusz Guzik 	error = 0;
1156*1c6987ebSMateusz Guzik 	tid = (uintptr_t)curthread;
1157*1c6987ebSMateusz Guzik 	op = (flags & LK_TYPE_MASK);
1158*1c6987ebSMateusz Guzik 	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1159*1c6987ebSMateusz Guzik 	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1160*1c6987ebSMateusz Guzik 	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1161*1c6987ebSMateusz Guzik 
1162*1c6987ebSMateusz Guzik 	lwa.iwmesg = iwmesg;
1163*1c6987ebSMateusz Guzik 	lwa.ipri = ipri;
1164*1c6987ebSMateusz Guzik 	lwa.itimo = itimo;
1165*1c6987ebSMateusz Guzik 
1166*1c6987ebSMateusz Guzik 	MPASS((flags & ~LK_TOTAL_MASK) == 0);
1167*1c6987ebSMateusz Guzik 	KASSERT((op & (op - 1)) == 0,
1168*1c6987ebSMateusz Guzik 	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1169*1c6987ebSMateusz Guzik 	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1170*1c6987ebSMateusz Guzik 	    (op != LK_DOWNGRADE && op != LK_RELEASE),
1171*1c6987ebSMateusz Guzik 	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1172*1c6987ebSMateusz Guzik 	    __func__, file, line));
1173*1c6987ebSMateusz Guzik 	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1174*1c6987ebSMateusz Guzik 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1175*1c6987ebSMateusz Guzik 	    __func__, file, line));
1176*1c6987ebSMateusz Guzik 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1177*1c6987ebSMateusz Guzik 	    ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
1178*1c6987ebSMateusz Guzik 	    lk->lock_object.lo_name, file, line));
1179*1c6987ebSMateusz Guzik 
1180*1c6987ebSMateusz Guzik 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1181*1c6987ebSMateusz Guzik 	if (panicstr != NULL) {
1182*1c6987ebSMateusz Guzik 		if (flags & LK_INTERLOCK)
1183*1c6987ebSMateusz Guzik 			class->lc_unlock(ilk);
1184*1c6987ebSMateusz Guzik 		return (0);
1185*1c6987ebSMateusz Guzik 	}
1186*1c6987ebSMateusz Guzik 
1187*1c6987ebSMateusz Guzik 	if (lk->lock_object.lo_flags & LK_NOSHARE) {
1188*1c6987ebSMateusz Guzik 		switch (op) {
1189*1c6987ebSMateusz Guzik 		case LK_SHARED:
1190*1c6987ebSMateusz Guzik 			op = LK_EXCLUSIVE;
1191*1c6987ebSMateusz Guzik 			break;
1192*1c6987ebSMateusz Guzik 		case LK_UPGRADE:
1193*1c6987ebSMateusz Guzik 		case LK_TRYUPGRADE:
1194*1c6987ebSMateusz Guzik 		case LK_DOWNGRADE:
1195*1c6987ebSMateusz Guzik 			_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1196*1c6987ebSMateusz Guzik 			    file, line);
1197*1c6987ebSMateusz Guzik 			if (flags & LK_INTERLOCK)
1198*1c6987ebSMateusz Guzik 				class->lc_unlock(ilk);
1199*1c6987ebSMateusz Guzik 			return (0);
1200*1c6987ebSMateusz Guzik 		}
1201*1c6987ebSMateusz Guzik 	}
1202*1c6987ebSMateusz Guzik 
1203*1c6987ebSMateusz Guzik 	wakeup_swapper = 0;
1204*1c6987ebSMateusz Guzik 	switch (op) {
1205*1c6987ebSMateusz Guzik 	case LK_SHARED:
1206*1c6987ebSMateusz Guzik 		return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1207*1c6987ebSMateusz Guzik 		break;
1208*1c6987ebSMateusz Guzik 	case LK_UPGRADE:
1209*1c6987ebSMateusz Guzik 	case LK_TRYUPGRADE:
1210*1c6987ebSMateusz Guzik 		return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1211*1c6987ebSMateusz Guzik 		break;
1212*1c6987ebSMateusz Guzik 	case LK_EXCLUSIVE:
1213*1c6987ebSMateusz Guzik 		return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1214047dd67eSAttilio Rao 		break;
1215047dd67eSAttilio Rao 	case LK_DOWNGRADE:
12161c7d98d0SAttilio Rao 		_lockmgr_assert(lk, KA_XLOCKED, file, line);
1217e5f94314SAttilio Rao 		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1218e5f94314SAttilio Rao 		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
12191c7d98d0SAttilio Rao 
12201c7d98d0SAttilio Rao 		/*
12211c7d98d0SAttilio Rao 		 * Panic if the lock is recursed.
12221c7d98d0SAttilio Rao 		 */
12231c7d98d0SAttilio Rao 		if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
12241c7d98d0SAttilio Rao 			if (flags & LK_INTERLOCK)
12251c7d98d0SAttilio Rao 				class->lc_unlock(ilk);
12261c7d98d0SAttilio Rao 			panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
12271c7d98d0SAttilio Rao 			    __func__, iwmesg, file, line);
12281c7d98d0SAttilio Rao 		}
1229e5f94314SAttilio Rao 		TD_SLOCKS_INC(curthread);
1230047dd67eSAttilio Rao 
1231047dd67eSAttilio Rao 		/*
1232047dd67eSAttilio Rao 		 * In order to preserve waiters flags, just spin.
1233047dd67eSAttilio Rao 		 */
1234047dd67eSAttilio Rao 		for (;;) {
1235651175c9SAttilio Rao 			x = lk->lk_lock;
1236651175c9SAttilio Rao 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1237651175c9SAttilio Rao 			x &= LK_ALL_WAITERS;
1238047dd67eSAttilio Rao 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1239e5f94314SAttilio Rao 			    LK_SHARERS_LOCK(1) | x))
1240047dd67eSAttilio Rao 				break;
1241047dd67eSAttilio Rao 			cpu_spinwait();
1242047dd67eSAttilio Rao 		}
1243047dd67eSAttilio Rao 		break;
1244047dd67eSAttilio Rao 	case LK_RELEASE:
1245047dd67eSAttilio Rao 		_lockmgr_assert(lk, KA_LOCKED, file, line);
1246047dd67eSAttilio Rao 		x = lk->lk_lock;
1247047dd67eSAttilio Rao 
1248*1c6987ebSMateusz Guzik 		if (__predict_true(x & LK_SHARE) != 0) {
1249*1c6987ebSMateusz Guzik 			return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1250047dd67eSAttilio Rao 		} else {
1251*1c6987ebSMateusz Guzik 			return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
12522028867dSAttilio Rao 		}
1253047dd67eSAttilio Rao 		break;
1254047dd67eSAttilio Rao 	case LK_DRAIN:
1255e5f94314SAttilio Rao 		if (LK_CAN_WITNESS(flags))
1256e5f94314SAttilio Rao 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
125724150d37SJohn Baldwin 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
125824150d37SJohn Baldwin 			    ilk : NULL);
1259047dd67eSAttilio Rao 
1260047dd67eSAttilio Rao 		/*
126196f1567fSKonstantin Belousov 		 * Trying to drain a lock we already own will result in a
1262047dd67eSAttilio Rao 		 * deadlock.
1263047dd67eSAttilio Rao 		 */
1264047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk)) {
1265047dd67eSAttilio Rao 			if (flags & LK_INTERLOCK)
1266047dd67eSAttilio Rao 				class->lc_unlock(ilk);
1267047dd67eSAttilio Rao 			panic("%s: draining %s with the lock held @ %s:%d\n",
1268047dd67eSAttilio Rao 			    __func__, iwmesg, file, line);
1269047dd67eSAttilio Rao 		}
1270047dd67eSAttilio Rao 
1271fc4f686dSMateusz Guzik 		for (;;) {
1272fc4f686dSMateusz Guzik 			if (lk->lk_lock == LK_UNLOCKED &&
1273fc4f686dSMateusz Guzik 			    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1274fc4f686dSMateusz Guzik 				break;
1275fc4f686dSMateusz Guzik 
1276f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
1277f5f9340bSFabien Thomas 			PMC_SOFT_CALL( , , lock, failed);
1278f5f9340bSFabien Thomas #endif
1279047dd67eSAttilio Rao 			lock_profile_obtain_lock_failed(&lk->lock_object,
1280047dd67eSAttilio Rao 			    &contested, &waittime);
1281047dd67eSAttilio Rao 
1282047dd67eSAttilio Rao 			/*
1283047dd67eSAttilio Rao 			 * If the lock is expected to not sleep just give up
1284047dd67eSAttilio Rao 			 * and return.
1285047dd67eSAttilio Rao 			 */
1286047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
1287047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p fails the try operation",
1288047dd67eSAttilio Rao 				    __func__, lk);
1289047dd67eSAttilio Rao 				error = EBUSY;
1290047dd67eSAttilio Rao 				break;
1291047dd67eSAttilio Rao 			}
1292047dd67eSAttilio Rao 
1293047dd67eSAttilio Rao 			/*
1294047dd67eSAttilio Rao 			 * Acquire the sleepqueue chain lock because we
1295047dd67eSAttilio Rao 			 * probabilly will need to manipulate waiters flags.
1296047dd67eSAttilio Rao 			 */
1297047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
1298047dd67eSAttilio Rao 			x = lk->lk_lock;
1299047dd67eSAttilio Rao 
1300047dd67eSAttilio Rao 			/*
1301047dd67eSAttilio Rao 			 * if the lock has been released while we spun on
1302047dd67eSAttilio Rao 			 * the sleepqueue chain lock just try again.
1303047dd67eSAttilio Rao 			 */
1304047dd67eSAttilio Rao 			if (x == LK_UNLOCKED) {
1305047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
1306047dd67eSAttilio Rao 				continue;
1307047dd67eSAttilio Rao 			}
1308047dd67eSAttilio Rao 
1309651175c9SAttilio Rao 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1310651175c9SAttilio Rao 			if ((x & ~v) == LK_UNLOCKED) {
1311651175c9SAttilio Rao 				v = (x & ~LK_EXCLUSIVE_SPINNERS);
13122028867dSAttilio Rao 
13132028867dSAttilio Rao 				/*
13142028867dSAttilio Rao 				 * If interruptible sleeps left the exclusive
13152028867dSAttilio Rao 				 * queue empty avoid a starvation for the
13162028867dSAttilio Rao 				 * threads sleeping on the shared queue by
13172028867dSAttilio Rao 				 * giving them precedence and cleaning up the
13182028867dSAttilio Rao 				 * exclusive waiters bit anyway.
1319c636ba83SAttilio Rao 				 * Please note that lk_exslpfail count may be
1320c636ba83SAttilio Rao 				 * lying about the real number of waiters with
1321c636ba83SAttilio Rao 				 * the LK_SLEEPFAIL flag on because they may
1322e3043798SPedro F. Giffuni 				 * be used in conjunction with interruptible
1323aab9c8c2SAttilio Rao 				 * sleeps so lk_exslpfail might be considered
1324aab9c8c2SAttilio Rao 				 * an 'upper limit' bound, including the edge
1325c636ba83SAttilio Rao 				 * cases.
13262028867dSAttilio Rao 				 */
1327047dd67eSAttilio Rao 				if (v & LK_EXCLUSIVE_WAITERS) {
1328047dd67eSAttilio Rao 					queue = SQ_EXCLUSIVE_QUEUE;
1329047dd67eSAttilio Rao 					v &= ~LK_EXCLUSIVE_WAITERS;
1330047dd67eSAttilio Rao 				} else {
13319dbf7a62SAttilio Rao 
13329dbf7a62SAttilio Rao 					/*
13339dbf7a62SAttilio Rao 					 * Exclusive waiters sleeping with
13349dbf7a62SAttilio Rao 					 * LK_SLEEPFAIL on and using
13359dbf7a62SAttilio Rao 					 * interruptible sleeps/timeout may
13369dbf7a62SAttilio Rao 					 * have left spourious lk_exslpfail
13379dbf7a62SAttilio Rao 					 * counts on, so clean it up anyway.
13389dbf7a62SAttilio Rao 					 */
1339047dd67eSAttilio Rao 					MPASS(v & LK_SHARED_WAITERS);
13409dbf7a62SAttilio Rao 					lk->lk_exslpfail = 0;
1341047dd67eSAttilio Rao 					queue = SQ_SHARED_QUEUE;
1342047dd67eSAttilio Rao 					v &= ~LK_SHARED_WAITERS;
1343047dd67eSAttilio Rao 				}
13442028867dSAttilio Rao 				if (queue == SQ_EXCLUSIVE_QUEUE) {
13452028867dSAttilio Rao 					realexslp =
13462028867dSAttilio Rao 					    sleepq_sleepcnt(&lk->lock_object,
13472028867dSAttilio Rao 					    SQ_EXCLUSIVE_QUEUE);
13482028867dSAttilio Rao 					if (lk->lk_exslpfail >= realexslp) {
13492028867dSAttilio Rao 						lk->lk_exslpfail = 0;
13502028867dSAttilio Rao 						queue = SQ_SHARED_QUEUE;
13512028867dSAttilio Rao 						v &= ~LK_SHARED_WAITERS;
13522028867dSAttilio Rao 						if (realexslp != 0) {
13532028867dSAttilio Rao 							LOCK_LOG2(lk,
13542028867dSAttilio Rao 					"%s: %p has only LK_SLEEPFAIL sleepers",
13552028867dSAttilio Rao 							    __func__, lk);
13562028867dSAttilio Rao 							LOCK_LOG2(lk,
13572028867dSAttilio Rao 			"%s: %p waking up threads on the exclusive queue",
13582028867dSAttilio Rao 							    __func__, lk);
13592028867dSAttilio Rao 							wakeup_swapper =
13602028867dSAttilio Rao 							    sleepq_broadcast(
13612028867dSAttilio Rao 							    &lk->lock_object,
13622028867dSAttilio Rao 							    SLEEPQ_LK, 0,
13632028867dSAttilio Rao 							    SQ_EXCLUSIVE_QUEUE);
13642028867dSAttilio Rao 						}
13652028867dSAttilio Rao 					} else
13662028867dSAttilio Rao 						lk->lk_exslpfail = 0;
13672028867dSAttilio Rao 				}
1368047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1369047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
1370047dd67eSAttilio Rao 					continue;
1371047dd67eSAttilio Rao 				}
1372047dd67eSAttilio Rao 				LOCK_LOG3(lk,
1373047dd67eSAttilio Rao 				"%s: %p waking up all threads on the %s queue",
1374047dd67eSAttilio Rao 				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1375047dd67eSAttilio Rao 				    "shared" : "exclusive");
1376814f26daSJohn Baldwin 				wakeup_swapper |= sleepq_broadcast(
1377da7bbd2cSJohn Baldwin 				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1378047dd67eSAttilio Rao 
1379047dd67eSAttilio Rao 				/*
1380047dd67eSAttilio Rao 				 * If shared waiters have been woken up we need
1381047dd67eSAttilio Rao 				 * to wait for one of them to acquire the lock
1382047dd67eSAttilio Rao 				 * before to set the exclusive waiters in
1383047dd67eSAttilio Rao 				 * order to avoid a deadlock.
1384047dd67eSAttilio Rao 				 */
1385047dd67eSAttilio Rao 				if (queue == SQ_SHARED_QUEUE) {
1386047dd67eSAttilio Rao 					for (v = lk->lk_lock;
1387047dd67eSAttilio Rao 					    (v & LK_SHARE) && !LK_SHARERS(v);
1388047dd67eSAttilio Rao 					    v = lk->lk_lock)
1389047dd67eSAttilio Rao 						cpu_spinwait();
1390047dd67eSAttilio Rao 				}
1391047dd67eSAttilio Rao 			}
1392047dd67eSAttilio Rao 
1393047dd67eSAttilio Rao 			/*
1394047dd67eSAttilio Rao 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1395047dd67eSAttilio Rao 			 * fail, loop back and retry.
1396047dd67eSAttilio Rao 			 */
1397047dd67eSAttilio Rao 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1398047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1399047dd67eSAttilio Rao 				    x | LK_EXCLUSIVE_WAITERS)) {
1400047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
1401047dd67eSAttilio Rao 					continue;
1402047dd67eSAttilio Rao 				}
1403047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1404047dd67eSAttilio Rao 				    __func__, lk);
1405047dd67eSAttilio Rao 			}
1406047dd67eSAttilio Rao 
1407047dd67eSAttilio Rao 			/*
1408047dd67eSAttilio Rao 			 * As far as we have been unable to acquire the
1409047dd67eSAttilio Rao 			 * exclusive lock and the exclusive waiters flag
1410047dd67eSAttilio Rao 			 * is set, we will sleep.
1411047dd67eSAttilio Rao 			 */
1412047dd67eSAttilio Rao 			if (flags & LK_INTERLOCK) {
1413047dd67eSAttilio Rao 				class->lc_unlock(ilk);
1414047dd67eSAttilio Rao 				flags &= ~LK_INTERLOCK;
1415047dd67eSAttilio Rao 			}
1416e5f94314SAttilio Rao 			GIANT_SAVE();
1417047dd67eSAttilio Rao 			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1418047dd67eSAttilio Rao 			    SQ_EXCLUSIVE_QUEUE);
1419047dd67eSAttilio Rao 			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1420e5f94314SAttilio Rao 			GIANT_RESTORE();
1421047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1422047dd67eSAttilio Rao 			    __func__, lk);
1423047dd67eSAttilio Rao 		}
1424047dd67eSAttilio Rao 
1425047dd67eSAttilio Rao 		if (error == 0) {
1426047dd67eSAttilio Rao 			lock_profile_obtain_lock_success(&lk->lock_object,
1427047dd67eSAttilio Rao 			    contested, waittime, file, line);
1428047dd67eSAttilio Rao 			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1429047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
1430e5f94314SAttilio Rao 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1431e5f94314SAttilio Rao 			    LK_TRYWIT(flags), file, line);
1432047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
1433047dd67eSAttilio Rao 			STACK_SAVE(lk);
1434047dd67eSAttilio Rao 		}
1435047dd67eSAttilio Rao 		break;
1436047dd67eSAttilio Rao 	default:
1437047dd67eSAttilio Rao 		if (flags & LK_INTERLOCK)
1438047dd67eSAttilio Rao 			class->lc_unlock(ilk);
1439047dd67eSAttilio Rao 		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1440047dd67eSAttilio Rao 	}
1441047dd67eSAttilio Rao 
1442047dd67eSAttilio Rao 	if (flags & LK_INTERLOCK)
1443047dd67eSAttilio Rao 		class->lc_unlock(ilk);
1444da7bbd2cSJohn Baldwin 	if (wakeup_swapper)
1445da7bbd2cSJohn Baldwin 		kick_proc0();
1446047dd67eSAttilio Rao 
1447047dd67eSAttilio Rao 	return (error);
1448047dd67eSAttilio Rao }
1449047dd67eSAttilio Rao 
1450d7a7e179SAttilio Rao void
1451047dd67eSAttilio Rao _lockmgr_disown(struct lock *lk, const char *file, int line)
1452047dd67eSAttilio Rao {
1453047dd67eSAttilio Rao 	uintptr_t tid, x;
1454047dd67eSAttilio Rao 
145535370593SAndriy Gapon 	if (SCHEDULER_STOPPED())
145635370593SAndriy Gapon 		return;
145735370593SAndriy Gapon 
1458047dd67eSAttilio Rao 	tid = (uintptr_t)curthread;
14591c7d98d0SAttilio Rao 	_lockmgr_assert(lk, KA_XLOCKED, file, line);
14601c7d98d0SAttilio Rao 
14611c7d98d0SAttilio Rao 	/*
14621c7d98d0SAttilio Rao 	 * Panic if the lock is recursed.
14631c7d98d0SAttilio Rao 	 */
14641c7d98d0SAttilio Rao 	if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
14651c7d98d0SAttilio Rao 		panic("%s: disown a recursed lockmgr @ %s:%d\n",
14661c7d98d0SAttilio Rao 		    __func__,  file, line);
1467047dd67eSAttilio Rao 
1468047dd67eSAttilio Rao 	/*
146996f1567fSKonstantin Belousov 	 * If the owner is already LK_KERNPROC just skip the whole operation.
1470047dd67eSAttilio Rao 	 */
1471047dd67eSAttilio Rao 	if (LK_HOLDER(lk->lk_lock) != tid)
1472047dd67eSAttilio Rao 		return;
147304a28689SJeff Roberson 	lock_profile_release_lock(&lk->lock_object);
1474e5f94314SAttilio Rao 	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1475e5f94314SAttilio Rao 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1476e5f94314SAttilio Rao 	TD_LOCKS_DEC(curthread);
1477337c5ff4SAttilio Rao 	STACK_SAVE(lk);
1478047dd67eSAttilio Rao 
1479047dd67eSAttilio Rao 	/*
1480047dd67eSAttilio Rao 	 * In order to preserve waiters flags, just spin.
1481047dd67eSAttilio Rao 	 */
1482047dd67eSAttilio Rao 	for (;;) {
1483651175c9SAttilio Rao 		x = lk->lk_lock;
1484651175c9SAttilio Rao 		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1485651175c9SAttilio Rao 		x &= LK_ALL_WAITERS;
148622dd228dSAttilio Rao 		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1487e5f94314SAttilio Rao 		    LK_KERNPROC | x))
1488047dd67eSAttilio Rao 			return;
1489047dd67eSAttilio Rao 		cpu_spinwait();
1490047dd67eSAttilio Rao 	}
1491047dd67eSAttilio Rao }
1492047dd67eSAttilio Rao 
1493047dd67eSAttilio Rao void
1494d576deedSPawel Jakub Dawidek lockmgr_printinfo(const struct lock *lk)
1495d7a7e179SAttilio Rao {
1496d7a7e179SAttilio Rao 	struct thread *td;
1497047dd67eSAttilio Rao 	uintptr_t x;
1498d7a7e179SAttilio Rao 
1499047dd67eSAttilio Rao 	if (lk->lk_lock == LK_UNLOCKED)
1500047dd67eSAttilio Rao 		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1501047dd67eSAttilio Rao 	else if (lk->lk_lock & LK_SHARE)
1502047dd67eSAttilio Rao 		printf("lock type %s: SHARED (count %ju)\n",
1503047dd67eSAttilio Rao 		    lk->lock_object.lo_name,
1504047dd67eSAttilio Rao 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1505047dd67eSAttilio Rao 	else {
1506047dd67eSAttilio Rao 		td = lockmgr_xholder(lk);
1507e64b4fa8SKonstantin Belousov 		if (td == (struct thread *)LK_KERNPROC)
1508e64b4fa8SKonstantin Belousov 			printf("lock type %s: EXCL by KERNPROC\n",
1509e64b4fa8SKonstantin Belousov 			    lk->lock_object.lo_name);
1510e64b4fa8SKonstantin Belousov 		else
15112573ea5fSIvan Voras 			printf("lock type %s: EXCL by thread %p "
1512e64b4fa8SKonstantin Belousov 			    "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1513e64b4fa8SKonstantin Belousov 			    td, td->td_proc->p_pid, td->td_proc->p_comm,
1514e64b4fa8SKonstantin Belousov 			    td->td_tid);
1515d7a7e179SAttilio Rao 	}
1516d7a7e179SAttilio Rao 
1517047dd67eSAttilio Rao 	x = lk->lk_lock;
1518047dd67eSAttilio Rao 	if (x & LK_EXCLUSIVE_WAITERS)
1519047dd67eSAttilio Rao 		printf(" with exclusive waiters pending\n");
1520047dd67eSAttilio Rao 	if (x & LK_SHARED_WAITERS)
1521047dd67eSAttilio Rao 		printf(" with shared waiters pending\n");
1522651175c9SAttilio Rao 	if (x & LK_EXCLUSIVE_SPINNERS)
1523651175c9SAttilio Rao 		printf(" with exclusive spinners pending\n");
1524047dd67eSAttilio Rao 
1525047dd67eSAttilio Rao 	STACK_PRINT(lk);
1526047dd67eSAttilio Rao }
1527047dd67eSAttilio Rao 
152899448ed1SJohn Dyson int
1529d576deedSPawel Jakub Dawidek lockstatus(const struct lock *lk)
153099448ed1SJohn Dyson {
1531047dd67eSAttilio Rao 	uintptr_t v, x;
1532047dd67eSAttilio Rao 	int ret;
153399448ed1SJohn Dyson 
1534047dd67eSAttilio Rao 	ret = LK_SHARED;
1535047dd67eSAttilio Rao 	x = lk->lk_lock;
1536047dd67eSAttilio Rao 	v = LK_HOLDER(x);
15370e9eb108SAttilio Rao 
1538047dd67eSAttilio Rao 	if ((x & LK_SHARE) == 0) {
1539047dd67eSAttilio Rao 		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1540047dd67eSAttilio Rao 			ret = LK_EXCLUSIVE;
15416bdfe06aSEivind Eklund 		else
1542047dd67eSAttilio Rao 			ret = LK_EXCLOTHER;
1543047dd67eSAttilio Rao 	} else if (x == LK_UNLOCKED)
1544047dd67eSAttilio Rao 		ret = 0;
154599448ed1SJohn Dyson 
1546047dd67eSAttilio Rao 	return (ret);
154753bf4bb2SPeter Wemm }
1548be6847d7SJohn Baldwin 
154984887fa3SAttilio Rao #ifdef INVARIANT_SUPPORT
1550de5b1952SAlexander Leidinger 
1551de5b1952SAlexander Leidinger FEATURE(invariant_support,
1552de5b1952SAlexander Leidinger     "Support for modules compiled with INVARIANTS option");
1553de5b1952SAlexander Leidinger 
155484887fa3SAttilio Rao #ifndef INVARIANTS
155584887fa3SAttilio Rao #undef	_lockmgr_assert
155684887fa3SAttilio Rao #endif
155784887fa3SAttilio Rao 
155884887fa3SAttilio Rao void
1559d576deedSPawel Jakub Dawidek _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
156084887fa3SAttilio Rao {
156184887fa3SAttilio Rao 	int slocked = 0;
156284887fa3SAttilio Rao 
156384887fa3SAttilio Rao 	if (panicstr != NULL)
156484887fa3SAttilio Rao 		return;
156584887fa3SAttilio Rao 	switch (what) {
156684887fa3SAttilio Rao 	case KA_SLOCKED:
156784887fa3SAttilio Rao 	case KA_SLOCKED | KA_NOTRECURSED:
156884887fa3SAttilio Rao 	case KA_SLOCKED | KA_RECURSED:
156984887fa3SAttilio Rao 		slocked = 1;
157084887fa3SAttilio Rao 	case KA_LOCKED:
157184887fa3SAttilio Rao 	case KA_LOCKED | KA_NOTRECURSED:
157284887fa3SAttilio Rao 	case KA_LOCKED | KA_RECURSED:
1573e5f94314SAttilio Rao #ifdef WITNESS
1574e5f94314SAttilio Rao 
1575e5f94314SAttilio Rao 		/*
1576e5f94314SAttilio Rao 		 * We cannot trust WITNESS if the lock is held in exclusive
1577e5f94314SAttilio Rao 		 * mode and a call to lockmgr_disown() happened.
1578e5f94314SAttilio Rao 		 * Workaround this skipping the check if the lock is held in
1579e5f94314SAttilio Rao 		 * exclusive mode even for the KA_LOCKED case.
1580e5f94314SAttilio Rao 		 */
1581e5f94314SAttilio Rao 		if (slocked || (lk->lk_lock & LK_SHARE)) {
1582e5f94314SAttilio Rao 			witness_assert(&lk->lock_object, what, file, line);
1583e5f94314SAttilio Rao 			break;
1584e5f94314SAttilio Rao 		}
1585e5f94314SAttilio Rao #endif
1586047dd67eSAttilio Rao 		if (lk->lk_lock == LK_UNLOCKED ||
1587047dd67eSAttilio Rao 		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1588047dd67eSAttilio Rao 		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
158984887fa3SAttilio Rao 			panic("Lock %s not %slocked @ %s:%d\n",
1590047dd67eSAttilio Rao 			    lk->lock_object.lo_name, slocked ? "share" : "",
159184887fa3SAttilio Rao 			    file, line);
1592047dd67eSAttilio Rao 
1593047dd67eSAttilio Rao 		if ((lk->lk_lock & LK_SHARE) == 0) {
1594047dd67eSAttilio Rao 			if (lockmgr_recursed(lk)) {
159584887fa3SAttilio Rao 				if (what & KA_NOTRECURSED)
159684887fa3SAttilio Rao 					panic("Lock %s recursed @ %s:%d\n",
1597047dd67eSAttilio Rao 					    lk->lock_object.lo_name, file,
1598047dd67eSAttilio Rao 					    line);
159984887fa3SAttilio Rao 			} else if (what & KA_RECURSED)
160084887fa3SAttilio Rao 				panic("Lock %s not recursed @ %s:%d\n",
1601047dd67eSAttilio Rao 				    lk->lock_object.lo_name, file, line);
160284887fa3SAttilio Rao 		}
160384887fa3SAttilio Rao 		break;
160484887fa3SAttilio Rao 	case KA_XLOCKED:
160584887fa3SAttilio Rao 	case KA_XLOCKED | KA_NOTRECURSED:
160684887fa3SAttilio Rao 	case KA_XLOCKED | KA_RECURSED:
1607047dd67eSAttilio Rao 		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
160884887fa3SAttilio Rao 			panic("Lock %s not exclusively locked @ %s:%d\n",
1609047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
1610047dd67eSAttilio Rao 		if (lockmgr_recursed(lk)) {
161184887fa3SAttilio Rao 			if (what & KA_NOTRECURSED)
161284887fa3SAttilio Rao 				panic("Lock %s recursed @ %s:%d\n",
1613047dd67eSAttilio Rao 				    lk->lock_object.lo_name, file, line);
161484887fa3SAttilio Rao 		} else if (what & KA_RECURSED)
161584887fa3SAttilio Rao 			panic("Lock %s not recursed @ %s:%d\n",
1616047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
161784887fa3SAttilio Rao 		break;
161884887fa3SAttilio Rao 	case KA_UNLOCKED:
1619047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
162084887fa3SAttilio Rao 			panic("Lock %s exclusively locked @ %s:%d\n",
1621047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
162284887fa3SAttilio Rao 		break;
162384887fa3SAttilio Rao 	default:
1624047dd67eSAttilio Rao 		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1625047dd67eSAttilio Rao 		    line);
162684887fa3SAttilio Rao 	}
162784887fa3SAttilio Rao }
1628047dd67eSAttilio Rao #endif
162984887fa3SAttilio Rao 
1630be6847d7SJohn Baldwin #ifdef DDB
1631462a7addSJohn Baldwin int
1632462a7addSJohn Baldwin lockmgr_chain(struct thread *td, struct thread **ownerp)
1633462a7addSJohn Baldwin {
1634047dd67eSAttilio Rao 	struct lock *lk;
1635462a7addSJohn Baldwin 
1636047dd67eSAttilio Rao 	lk = td->td_wchan;
1637462a7addSJohn Baldwin 
1638047dd67eSAttilio Rao 	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1639462a7addSJohn Baldwin 		return (0);
1640047dd67eSAttilio Rao 	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1641047dd67eSAttilio Rao 	if (lk->lk_lock & LK_SHARE)
1642047dd67eSAttilio Rao 		db_printf("SHARED (count %ju)\n",
1643047dd67eSAttilio Rao 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1644047dd67eSAttilio Rao 	else
1645047dd67eSAttilio Rao 		db_printf("EXCL\n");
1646047dd67eSAttilio Rao 	*ownerp = lockmgr_xholder(lk);
1647462a7addSJohn Baldwin 
1648462a7addSJohn Baldwin 	return (1);
1649462a7addSJohn Baldwin }
1650462a7addSJohn Baldwin 
1651047dd67eSAttilio Rao static void
1652d576deedSPawel Jakub Dawidek db_show_lockmgr(const struct lock_object *lock)
1653be6847d7SJohn Baldwin {
1654be6847d7SJohn Baldwin 	struct thread *td;
1655d576deedSPawel Jakub Dawidek 	const struct lock *lk;
1656be6847d7SJohn Baldwin 
1657d576deedSPawel Jakub Dawidek 	lk = (const struct lock *)lock;
1658be6847d7SJohn Baldwin 
1659be6847d7SJohn Baldwin 	db_printf(" state: ");
1660047dd67eSAttilio Rao 	if (lk->lk_lock == LK_UNLOCKED)
1661be6847d7SJohn Baldwin 		db_printf("UNLOCKED\n");
1662047dd67eSAttilio Rao 	else if (lk->lk_lock & LK_SHARE)
1663047dd67eSAttilio Rao 		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1664047dd67eSAttilio Rao 	else {
1665047dd67eSAttilio Rao 		td = lockmgr_xholder(lk);
1666047dd67eSAttilio Rao 		if (td == (struct thread *)LK_KERNPROC)
1667047dd67eSAttilio Rao 			db_printf("XLOCK: LK_KERNPROC\n");
1668047dd67eSAttilio Rao 		else
1669047dd67eSAttilio Rao 			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1670047dd67eSAttilio Rao 			    td->td_tid, td->td_proc->p_pid,
1671047dd67eSAttilio Rao 			    td->td_proc->p_comm);
1672047dd67eSAttilio Rao 		if (lockmgr_recursed(lk))
1673047dd67eSAttilio Rao 			db_printf(" recursed: %d\n", lk->lk_recurse);
1674047dd67eSAttilio Rao 	}
1675047dd67eSAttilio Rao 	db_printf(" waiters: ");
1676047dd67eSAttilio Rao 	switch (lk->lk_lock & LK_ALL_WAITERS) {
1677047dd67eSAttilio Rao 	case LK_SHARED_WAITERS:
1678047dd67eSAttilio Rao 		db_printf("shared\n");
1679e5023dd9SEdward Tomasz Napierala 		break;
1680047dd67eSAttilio Rao 	case LK_EXCLUSIVE_WAITERS:
1681047dd67eSAttilio Rao 		db_printf("exclusive\n");
1682047dd67eSAttilio Rao 		break;
1683047dd67eSAttilio Rao 	case LK_ALL_WAITERS:
1684047dd67eSAttilio Rao 		db_printf("shared and exclusive\n");
1685047dd67eSAttilio Rao 		break;
1686047dd67eSAttilio Rao 	default:
1687047dd67eSAttilio Rao 		db_printf("none\n");
1688047dd67eSAttilio Rao 	}
1689651175c9SAttilio Rao 	db_printf(" spinners: ");
1690651175c9SAttilio Rao 	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1691651175c9SAttilio Rao 		db_printf("exclusive\n");
1692651175c9SAttilio Rao 	else
1693651175c9SAttilio Rao 		db_printf("none\n");
1694be6847d7SJohn Baldwin }
1695be6847d7SJohn Baldwin #endif
1696