xref: /freebsd/sys/kern/kern_lock.c (revision c8b29d12120da985471d1c8f320b836d50a7bcca)
19454b2d8SWarner Losh /*-
28a36da99SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
38a36da99SPedro F. Giffuni  *
4047dd67eSAttilio Rao  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5047dd67eSAttilio Rao  * All rights reserved.
653bf4bb2SPeter Wemm  *
753bf4bb2SPeter Wemm  * Redistribution and use in source and binary forms, with or without
853bf4bb2SPeter Wemm  * modification, are permitted provided that the following conditions
953bf4bb2SPeter Wemm  * are met:
1053bf4bb2SPeter Wemm  * 1. Redistributions of source code must retain the above copyright
11047dd67eSAttilio Rao  *    notice(s), this list of conditions and the following disclaimer as
12047dd67eSAttilio Rao  *    the first lines of this file unmodified other than the possible
13047dd67eSAttilio Rao  *    addition of one or more copyright notices.
1453bf4bb2SPeter Wemm  * 2. Redistributions in binary form must reproduce the above copyright
15047dd67eSAttilio Rao  *    notice(s), this list of conditions and the following disclaimer in the
1653bf4bb2SPeter Wemm  *    documentation and/or other materials provided with the distribution.
1753bf4bb2SPeter Wemm  *
18047dd67eSAttilio Rao  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19047dd67eSAttilio Rao  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20047dd67eSAttilio Rao  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21047dd67eSAttilio Rao  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22047dd67eSAttilio Rao  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23047dd67eSAttilio Rao  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24047dd67eSAttilio Rao  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25047dd67eSAttilio Rao  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2653bf4bb2SPeter Wemm  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27047dd67eSAttilio Rao  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28047dd67eSAttilio Rao  * DAMAGE.
2953bf4bb2SPeter Wemm  */
3053bf4bb2SPeter Wemm 
31047dd67eSAttilio Rao #include "opt_ddb.h"
32f5f9340bSFabien Thomas #include "opt_hwpmc_hooks.h"
33047dd67eSAttilio Rao 
34677b542eSDavid E. O'Brien #include <sys/cdefs.h>
35677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
36677b542eSDavid E. O'Brien 
3753bf4bb2SPeter Wemm #include <sys/param.h>
38cd2fe4e6SAttilio Rao #include <sys/kdb.h>
3961d80e90SJohn Baldwin #include <sys/ktr.h>
4053bf4bb2SPeter Wemm #include <sys/lock.h>
41047dd67eSAttilio Rao #include <sys/lock_profile.h>
428302d183SBruce Evans #include <sys/lockmgr.h>
435b699f16SMark Johnston #include <sys/lockstat.h>
44d8881ca3SJohn Baldwin #include <sys/mutex.h>
458302d183SBruce Evans #include <sys/proc.h>
46047dd67eSAttilio Rao #include <sys/sleepqueue.h>
47e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS
48e8ddb61dSJeff Roberson #include <sys/stack.h>
49e8ddb61dSJeff Roberson #endif
50651175c9SAttilio Rao #include <sys/sysctl.h>
51047dd67eSAttilio Rao #include <sys/systm.h>
5253bf4bb2SPeter Wemm 
53047dd67eSAttilio Rao #include <machine/cpu.h>
546efc8a16SAttilio Rao 
55be6847d7SJohn Baldwin #ifdef DDB
56be6847d7SJohn Baldwin #include <ddb/ddb.h>
57047dd67eSAttilio Rao #endif
58047dd67eSAttilio Rao 
59f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
60f5f9340bSFabien Thomas #include <sys/pmckern.h>
61f5f9340bSFabien Thomas PMC_SOFT_DECLARE( , , lock, failed);
62f5f9340bSFabien Thomas #endif
63f5f9340bSFabien Thomas 
64651175c9SAttilio Rao CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
65651175c9SAttilio Rao     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
66047dd67eSAttilio Rao 
67047dd67eSAttilio Rao #define	SQ_EXCLUSIVE_QUEUE	0
68047dd67eSAttilio Rao #define	SQ_SHARED_QUEUE		1
69047dd67eSAttilio Rao 
70047dd67eSAttilio Rao #ifndef INVARIANTS
71047dd67eSAttilio Rao #define	_lockmgr_assert(lk, what, file, line)
72047dd67eSAttilio Rao #endif
73ce1c953eSMark Johnston 
74047dd67eSAttilio Rao #define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
75047dd67eSAttilio Rao #define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
76047dd67eSAttilio Rao 
77047dd67eSAttilio Rao #ifndef DEBUG_LOCKS
78047dd67eSAttilio Rao #define	STACK_PRINT(lk)
79047dd67eSAttilio Rao #define	STACK_SAVE(lk)
80047dd67eSAttilio Rao #define	STACK_ZERO(lk)
81047dd67eSAttilio Rao #else
82047dd67eSAttilio Rao #define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
83047dd67eSAttilio Rao #define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
84047dd67eSAttilio Rao #define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
85047dd67eSAttilio Rao #endif
86047dd67eSAttilio Rao 
87047dd67eSAttilio Rao #define	LOCK_LOG2(lk, string, arg1, arg2)				\
88047dd67eSAttilio Rao 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
89047dd67eSAttilio Rao 		CTR2(KTR_LOCK, (string), (arg1), (arg2))
90047dd67eSAttilio Rao #define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
91047dd67eSAttilio Rao 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
92047dd67eSAttilio Rao 		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
93047dd67eSAttilio Rao 
94e5f94314SAttilio Rao #define	GIANT_DECLARE							\
95e5f94314SAttilio Rao 	int _i = 0;							\
96e5f94314SAttilio Rao 	WITNESS_SAVE_DECL(Giant)
97e5f94314SAttilio Rao #define	GIANT_RESTORE() do {						\
986e8c1ccbSMateusz Guzik 	if (__predict_false(_i > 0)) {					\
99e5f94314SAttilio Rao 		while (_i--)						\
100e5f94314SAttilio Rao 			mtx_lock(&Giant);				\
101e5f94314SAttilio Rao 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
102e5f94314SAttilio Rao 	}								\
103e5f94314SAttilio Rao } while (0)
104e5f94314SAttilio Rao #define	GIANT_SAVE() do {						\
1056e8c1ccbSMateusz Guzik 	if (__predict_false(mtx_owned(&Giant))) {			\
106e5f94314SAttilio Rao 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
107e5f94314SAttilio Rao 		while (mtx_owned(&Giant)) {				\
108e5f94314SAttilio Rao 			_i++;						\
109e5f94314SAttilio Rao 			mtx_unlock(&Giant);				\
110e5f94314SAttilio Rao 		}							\
111e5f94314SAttilio Rao 	}								\
112e5f94314SAttilio Rao } while (0)
113e5f94314SAttilio Rao 
11495ab076dSMateusz Guzik static bool __always_inline
11595ab076dSMateusz Guzik LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
11695ab076dSMateusz Guzik {
11795ab076dSMateusz Guzik 
11895ab076dSMateusz Guzik 	if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
11995ab076dSMateusz Guzik 	    LK_SHARE)
12095ab076dSMateusz Guzik 		return (true);
12195ab076dSMateusz Guzik 	if (fp || (!(x & LK_SHARE)))
12295ab076dSMateusz Guzik 		return (false);
12395ab076dSMateusz Guzik 	if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
12495ab076dSMateusz Guzik 	    (curthread->td_pflags & TDP_DEADLKTREAT))
12595ab076dSMateusz Guzik 		return (true);
12695ab076dSMateusz Guzik 	return (false);
12795ab076dSMateusz Guzik }
12895ab076dSMateusz Guzik 
129e5f94314SAttilio Rao #define	LK_TRYOP(x)							\
130e5f94314SAttilio Rao 	((x) & LK_NOWAIT)
131e5f94314SAttilio Rao 
132e5f94314SAttilio Rao #define	LK_CAN_WITNESS(x)						\
133e5f94314SAttilio Rao 	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
134e5f94314SAttilio Rao #define	LK_TRYWIT(x)							\
135e5f94314SAttilio Rao 	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
136047dd67eSAttilio Rao 
137047dd67eSAttilio Rao #define	lockmgr_disowned(lk)						\
138047dd67eSAttilio Rao 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
139047dd67eSAttilio Rao 
14010391db5SMateusz Guzik #define	lockmgr_xlocked_v(v)						\
14110391db5SMateusz Guzik 	(((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
14210391db5SMateusz Guzik 
14310391db5SMateusz Guzik #define	lockmgr_xlocked(lk) lockmgr_xlocked_v((lk)->lk_lock)
144047dd67eSAttilio Rao 
145d576deedSPawel Jakub Dawidek static void	assert_lockmgr(const struct lock_object *lock, int how);
146047dd67eSAttilio Rao #ifdef DDB
147d576deedSPawel Jakub Dawidek static void	db_show_lockmgr(const struct lock_object *lock);
148be6847d7SJohn Baldwin #endif
1497faf4d90SDavide Italiano static void	lock_lockmgr(struct lock_object *lock, uintptr_t how);
150a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
151d576deedSPawel Jakub Dawidek static int	owner_lockmgr(const struct lock_object *lock,
152d576deedSPawel Jakub Dawidek 		    struct thread **owner);
153a5aedd68SStacey Son #endif
1547faf4d90SDavide Italiano static uintptr_t unlock_lockmgr(struct lock_object *lock);
15561bd5e21SKip Macy 
15661bd5e21SKip Macy struct lock_class lock_class_lockmgr = {
1573ff6d229SJohn Baldwin 	.lc_name = "lockmgr",
158047dd67eSAttilio Rao 	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
159f9721b43SAttilio Rao 	.lc_assert = assert_lockmgr,
16061bd5e21SKip Macy #ifdef DDB
1616e21afd4SJohn Baldwin 	.lc_ddb_show = db_show_lockmgr,
16261bd5e21SKip Macy #endif
1636e21afd4SJohn Baldwin 	.lc_lock = lock_lockmgr,
164a5aedd68SStacey Son 	.lc_unlock = unlock_lockmgr,
165a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
166a5aedd68SStacey Son 	.lc_owner = owner_lockmgr,
167a5aedd68SStacey Son #endif
16861bd5e21SKip Macy };
16961bd5e21SKip Macy 
1701c6987ebSMateusz Guzik struct lockmgr_wait {
1711c6987ebSMateusz Guzik 	const char *iwmesg;
1721c6987ebSMateusz Guzik 	int ipri;
1731c6987ebSMateusz Guzik 	int itimo;
1741c6987ebSMateusz Guzik };
1751c6987ebSMateusz Guzik 
176c4a48867SMateusz Guzik static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
17795ab076dSMateusz Guzik     int flags, bool fp);
1781c6987ebSMateusz Guzik static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
1791c6987ebSMateusz Guzik 
1801c6987ebSMateusz Guzik static void
1811c6987ebSMateusz Guzik lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
1821c6987ebSMateusz Guzik {
1831c6987ebSMateusz Guzik 	struct lock_class *class;
1841c6987ebSMateusz Guzik 
1851c6987ebSMateusz Guzik 	if (flags & LK_INTERLOCK) {
1861c6987ebSMateusz Guzik 		class = LOCK_CLASS(ilk);
1871c6987ebSMateusz Guzik 		class->lc_unlock(ilk);
1881c6987ebSMateusz Guzik 	}
1891c6987ebSMateusz Guzik 
1901c6987ebSMateusz Guzik 	if (__predict_false(wakeup_swapper))
1911c6987ebSMateusz Guzik 		kick_proc0();
1921c6987ebSMateusz Guzik }
193c4a48867SMateusz Guzik 
194c4a48867SMateusz Guzik static void
195c4a48867SMateusz Guzik lockmgr_note_shared_acquire(struct lock *lk, int contested,
196c4a48867SMateusz Guzik     uint64_t waittime, const char *file, int line, int flags)
197c4a48867SMateusz Guzik {
198c4a48867SMateusz Guzik 
1995b699f16SMark Johnston 	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
2005b699f16SMark Johnston 	    waittime, file, line, LOCKSTAT_READER);
201c4a48867SMateusz Guzik 	LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
202c4a48867SMateusz Guzik 	WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
203c4a48867SMateusz Guzik 	TD_LOCKS_INC(curthread);
204c4a48867SMateusz Guzik 	TD_SLOCKS_INC(curthread);
205c4a48867SMateusz Guzik 	STACK_SAVE(lk);
206c4a48867SMateusz Guzik }
207c4a48867SMateusz Guzik 
208c4a48867SMateusz Guzik static void
209c4a48867SMateusz Guzik lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
210c4a48867SMateusz Guzik {
211c4a48867SMateusz Guzik 
2125b699f16SMark Johnston 	LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
213c4a48867SMateusz Guzik 	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
214c4a48867SMateusz Guzik 	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
215c4a48867SMateusz Guzik 	TD_LOCKS_DEC(curthread);
216c4a48867SMateusz Guzik 	TD_SLOCKS_DEC(curthread);
217c4a48867SMateusz Guzik }
218c4a48867SMateusz Guzik 
219c4a48867SMateusz Guzik static void
220c4a48867SMateusz Guzik lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
221c4a48867SMateusz Guzik     uint64_t waittime, const char *file, int line, int flags)
222c4a48867SMateusz Guzik {
223c4a48867SMateusz Guzik 
2245b699f16SMark Johnston 	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
2255b699f16SMark Johnston 	    waittime, file, line, LOCKSTAT_WRITER);
226c4a48867SMateusz Guzik 	LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
227c4a48867SMateusz Guzik 	WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
228c4a48867SMateusz Guzik 	    line);
229c4a48867SMateusz Guzik 	TD_LOCKS_INC(curthread);
230c4a48867SMateusz Guzik 	STACK_SAVE(lk);
231c4a48867SMateusz Guzik }
232c4a48867SMateusz Guzik 
233c4a48867SMateusz Guzik static void
234c4a48867SMateusz Guzik lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
235c4a48867SMateusz Guzik {
236c4a48867SMateusz Guzik 
2375b699f16SMark Johnston 	LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_WRITER);
238c4a48867SMateusz Guzik 	LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
239c4a48867SMateusz Guzik 	    line);
240c4a48867SMateusz Guzik 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
241c4a48867SMateusz Guzik 	TD_LOCKS_DEC(curthread);
242c4a48867SMateusz Guzik }
243c4a48867SMateusz Guzik 
244047dd67eSAttilio Rao static __inline struct thread *
245d576deedSPawel Jakub Dawidek lockmgr_xholder(const struct lock *lk)
246047dd67eSAttilio Rao {
247047dd67eSAttilio Rao 	uintptr_t x;
248047dd67eSAttilio Rao 
249047dd67eSAttilio Rao 	x = lk->lk_lock;
250047dd67eSAttilio Rao 	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
251047dd67eSAttilio Rao }
25284887fa3SAttilio Rao 
25353bf4bb2SPeter Wemm /*
254047dd67eSAttilio Rao  * It assumes sleepq_lock held and returns with this one unheld.
255047dd67eSAttilio Rao  * It also assumes the generic interlock is sane and previously checked.
256047dd67eSAttilio Rao  * If LK_INTERLOCK is specified the interlock is not reacquired after the
257047dd67eSAttilio Rao  * sleep.
25853bf4bb2SPeter Wemm  */
259047dd67eSAttilio Rao static __inline int
260047dd67eSAttilio Rao sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
261047dd67eSAttilio Rao     const char *wmesg, int pri, int timo, int queue)
262047dd67eSAttilio Rao {
263e5f94314SAttilio Rao 	GIANT_DECLARE;
264047dd67eSAttilio Rao 	struct lock_class *class;
265047dd67eSAttilio Rao 	int catch, error;
26653bf4bb2SPeter Wemm 
267047dd67eSAttilio Rao 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
2685047a8fdSAttilio Rao 	catch = pri & PCATCH;
269047dd67eSAttilio Rao 	pri &= PRIMASK;
270047dd67eSAttilio Rao 	error = 0;
271047dd67eSAttilio Rao 
272047dd67eSAttilio Rao 	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
273047dd67eSAttilio Rao 	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
274047dd67eSAttilio Rao 
275047dd67eSAttilio Rao 	if (flags & LK_INTERLOCK)
276047dd67eSAttilio Rao 		class->lc_unlock(ilk);
2772028867dSAttilio Rao 	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
2782028867dSAttilio Rao 		lk->lk_exslpfail++;
279e5f94314SAttilio Rao 	GIANT_SAVE();
280047dd67eSAttilio Rao 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
281047dd67eSAttilio Rao 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
282a115fb62SHans Petter Selasky 	if ((flags & LK_TIMELOCK) && timo)
283047dd67eSAttilio Rao 		sleepq_set_timeout(&lk->lock_object, timo);
284a115fb62SHans Petter Selasky 
285047dd67eSAttilio Rao 	/*
286047dd67eSAttilio Rao 	 * Decisional switch for real sleeping.
287047dd67eSAttilio Rao 	 */
288047dd67eSAttilio Rao 	if ((flags & LK_TIMELOCK) && timo && catch)
289047dd67eSAttilio Rao 		error = sleepq_timedwait_sig(&lk->lock_object, pri);
290047dd67eSAttilio Rao 	else if ((flags & LK_TIMELOCK) && timo)
291047dd67eSAttilio Rao 		error = sleepq_timedwait(&lk->lock_object, pri);
292047dd67eSAttilio Rao 	else if (catch)
293047dd67eSAttilio Rao 		error = sleepq_wait_sig(&lk->lock_object, pri);
294047dd67eSAttilio Rao 	else
295047dd67eSAttilio Rao 		sleepq_wait(&lk->lock_object, pri);
296e5f94314SAttilio Rao 	GIANT_RESTORE();
297047dd67eSAttilio Rao 	if ((flags & LK_SLEEPFAIL) && error == 0)
298047dd67eSAttilio Rao 		error = ENOLCK;
299047dd67eSAttilio Rao 
300047dd67eSAttilio Rao 	return (error);
301047dd67eSAttilio Rao }
302047dd67eSAttilio Rao 
303da7bbd2cSJohn Baldwin static __inline int
304047dd67eSAttilio Rao wakeupshlk(struct lock *lk, const char *file, int line)
305047dd67eSAttilio Rao {
3060ad122a9SMateusz Guzik 	uintptr_t v, x, orig_x;
3072028867dSAttilio Rao 	u_int realexslp;
308da7bbd2cSJohn Baldwin 	int queue, wakeup_swapper;
309047dd67eSAttilio Rao 
310da7bbd2cSJohn Baldwin 	wakeup_swapper = 0;
311047dd67eSAttilio Rao 	for (;;) {
312047dd67eSAttilio Rao 		x = lk->lk_lock;
3131c6987ebSMateusz Guzik 		if (lockmgr_sunlock_try(lk, &x))
314047dd67eSAttilio Rao 			break;
315047dd67eSAttilio Rao 
316047dd67eSAttilio Rao 		/*
317047dd67eSAttilio Rao 		 * We should have a sharer with waiters, so enter the hard
318047dd67eSAttilio Rao 		 * path in order to handle wakeups correctly.
319047dd67eSAttilio Rao 		 */
320047dd67eSAttilio Rao 		sleepq_lock(&lk->lock_object);
3210ad122a9SMateusz Guzik 		orig_x = lk->lk_lock;
3220ad122a9SMateusz Guzik retry_sleepq:
3230ad122a9SMateusz Guzik 		x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
324047dd67eSAttilio Rao 		v = LK_UNLOCKED;
325047dd67eSAttilio Rao 
326047dd67eSAttilio Rao 		/*
327047dd67eSAttilio Rao 		 * If the lock has exclusive waiters, give them preference in
328047dd67eSAttilio Rao 		 * order to avoid deadlock with shared runners up.
3292028867dSAttilio Rao 		 * If interruptible sleeps left the exclusive queue empty
3302028867dSAttilio Rao 		 * avoid a starvation for the threads sleeping on the shared
3312028867dSAttilio Rao 		 * queue by giving them precedence and cleaning up the
3322028867dSAttilio Rao 		 * exclusive waiters bit anyway.
333c636ba83SAttilio Rao 		 * Please note that lk_exslpfail count may be lying about
334c636ba83SAttilio Rao 		 * the real number of waiters with the LK_SLEEPFAIL flag on
335e3043798SPedro F. Giffuni 		 * because they may be used in conjunction with interruptible
336aab9c8c2SAttilio Rao 		 * sleeps so lk_exslpfail might be considered an 'upper limit'
337aab9c8c2SAttilio Rao 		 * bound, including the edge cases.
338047dd67eSAttilio Rao 		 */
3392028867dSAttilio Rao 		realexslp = sleepq_sleepcnt(&lk->lock_object,
3402028867dSAttilio Rao 		    SQ_EXCLUSIVE_QUEUE);
3412028867dSAttilio Rao 		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
3422028867dSAttilio Rao 			if (lk->lk_exslpfail < realexslp) {
3432028867dSAttilio Rao 				lk->lk_exslpfail = 0;
344047dd67eSAttilio Rao 				queue = SQ_EXCLUSIVE_QUEUE;
345047dd67eSAttilio Rao 				v |= (x & LK_SHARED_WAITERS);
346047dd67eSAttilio Rao 			} else {
3472028867dSAttilio Rao 				lk->lk_exslpfail = 0;
3482028867dSAttilio Rao 				LOCK_LOG2(lk,
3492028867dSAttilio Rao 				    "%s: %p has only LK_SLEEPFAIL sleepers",
3502028867dSAttilio Rao 				    __func__, lk);
3512028867dSAttilio Rao 				LOCK_LOG2(lk,
3522028867dSAttilio Rao 			    "%s: %p waking up threads on the exclusive queue",
3532028867dSAttilio Rao 				    __func__, lk);
3542028867dSAttilio Rao 				wakeup_swapper =
3552028867dSAttilio Rao 				    sleepq_broadcast(&lk->lock_object,
3562028867dSAttilio Rao 				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
3572028867dSAttilio Rao 				queue = SQ_SHARED_QUEUE;
3582028867dSAttilio Rao 			}
3592028867dSAttilio Rao 
3602028867dSAttilio Rao 		} else {
3619dbf7a62SAttilio Rao 
3629dbf7a62SAttilio Rao 			/*
3639dbf7a62SAttilio Rao 			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
3649dbf7a62SAttilio Rao 			 * and using interruptible sleeps/timeout may have
3659dbf7a62SAttilio Rao 			 * left spourious lk_exslpfail counts on, so clean
3669dbf7a62SAttilio Rao 			 * it up anyway.
3679dbf7a62SAttilio Rao 			 */
3689dbf7a62SAttilio Rao 			lk->lk_exslpfail = 0;
369047dd67eSAttilio Rao 			queue = SQ_SHARED_QUEUE;
370047dd67eSAttilio Rao 		}
371047dd67eSAttilio Rao 
3720ad122a9SMateusz Guzik 		if (lockmgr_sunlock_try(lk, &orig_x)) {
373047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
3740ad122a9SMateusz Guzik 			break;
3750ad122a9SMateusz Guzik 		}
3760ad122a9SMateusz Guzik 
3770ad122a9SMateusz Guzik 		x |= LK_SHARERS_LOCK(1);
3780ad122a9SMateusz Guzik 		if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
3790ad122a9SMateusz Guzik 			orig_x = x;
3800ad122a9SMateusz Guzik 			goto retry_sleepq;
381047dd67eSAttilio Rao 		}
382047dd67eSAttilio Rao 		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
383047dd67eSAttilio Rao 		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
384047dd67eSAttilio Rao 		    "exclusive");
3852028867dSAttilio Rao 		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
386da7bbd2cSJohn Baldwin 		    0, queue);
387047dd67eSAttilio Rao 		sleepq_release(&lk->lock_object);
388047dd67eSAttilio Rao 		break;
389047dd67eSAttilio Rao 	}
390047dd67eSAttilio Rao 
391c4a48867SMateusz Guzik 	lockmgr_note_shared_release(lk, file, line);
392da7bbd2cSJohn Baldwin 	return (wakeup_swapper);
393047dd67eSAttilio Rao }
394047dd67eSAttilio Rao 
395047dd67eSAttilio Rao static void
396d576deedSPawel Jakub Dawidek assert_lockmgr(const struct lock_object *lock, int what)
397f9721b43SAttilio Rao {
398f9721b43SAttilio Rao 
399f9721b43SAttilio Rao 	panic("lockmgr locks do not support assertions");
400f9721b43SAttilio Rao }
401f9721b43SAttilio Rao 
402047dd67eSAttilio Rao static void
4037faf4d90SDavide Italiano lock_lockmgr(struct lock_object *lock, uintptr_t how)
4046e21afd4SJohn Baldwin {
4056e21afd4SJohn Baldwin 
4066e21afd4SJohn Baldwin 	panic("lockmgr locks do not support sleep interlocking");
4076e21afd4SJohn Baldwin }
4086e21afd4SJohn Baldwin 
4097faf4d90SDavide Italiano static uintptr_t
4106e21afd4SJohn Baldwin unlock_lockmgr(struct lock_object *lock)
4116e21afd4SJohn Baldwin {
4126e21afd4SJohn Baldwin 
4136e21afd4SJohn Baldwin 	panic("lockmgr locks do not support sleep interlocking");
4146e21afd4SJohn Baldwin }
4156e21afd4SJohn Baldwin 
416a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
417a5aedd68SStacey Son static int
418d576deedSPawel Jakub Dawidek owner_lockmgr(const struct lock_object *lock, struct thread **owner)
419a5aedd68SStacey Son {
420a5aedd68SStacey Son 
421a5aedd68SStacey Son 	panic("lockmgr locks do not support owner inquiring");
422a5aedd68SStacey Son }
423a5aedd68SStacey Son #endif
424a5aedd68SStacey Son 
42599448ed1SJohn Dyson void
426047dd67eSAttilio Rao lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
42799448ed1SJohn Dyson {
4286efc8a16SAttilio Rao 	int iflags;
4296efc8a16SAttilio Rao 
430047dd67eSAttilio Rao 	MPASS((flags & ~LK_INIT_MASK) == 0);
431353998acSAttilio Rao 	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
432353998acSAttilio Rao             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
433353998acSAttilio Rao             &lk->lk_lock));
43499448ed1SJohn Dyson 
435f0830182SAttilio Rao 	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
436f0830182SAttilio Rao 	if (flags & LK_CANRECURSE)
437f0830182SAttilio Rao 		iflags |= LO_RECURSABLE;
438047dd67eSAttilio Rao 	if ((flags & LK_NODUP) == 0)
4396efc8a16SAttilio Rao 		iflags |= LO_DUPOK;
4407fbfba7bSAttilio Rao 	if (flags & LK_NOPROFILE)
4417fbfba7bSAttilio Rao 		iflags |= LO_NOPROFILE;
442047dd67eSAttilio Rao 	if ((flags & LK_NOWITNESS) == 0)
4436efc8a16SAttilio Rao 		iflags |= LO_WITNESS;
4447fbfba7bSAttilio Rao 	if (flags & LK_QUIET)
4457fbfba7bSAttilio Rao 		iflags |= LO_QUIET;
446e63091eaSMarcel Moolenaar 	if (flags & LK_IS_VNODE)
447e63091eaSMarcel Moolenaar 		iflags |= LO_IS_VNODE;
44846713135SGleb Smirnoff 	if (flags & LK_NEW)
44946713135SGleb Smirnoff 		iflags |= LO_NEW;
4505fe188b1SMateusz Guzik 	iflags |= flags & LK_NOSHARE;
451047dd67eSAttilio Rao 
452b5fb43e5SJohn Baldwin 	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
453047dd67eSAttilio Rao 	lk->lk_lock = LK_UNLOCKED;
454047dd67eSAttilio Rao 	lk->lk_recurse = 0;
4552028867dSAttilio Rao 	lk->lk_exslpfail = 0;
456047dd67eSAttilio Rao 	lk->lk_timo = timo;
457047dd67eSAttilio Rao 	lk->lk_pri = pri;
458047dd67eSAttilio Rao 	STACK_ZERO(lk);
45999448ed1SJohn Dyson }
46099448ed1SJohn Dyson 
4613634d5b2SJohn Baldwin /*
4623634d5b2SJohn Baldwin  * XXX: Gross hacks to manipulate external lock flags after
4633634d5b2SJohn Baldwin  * initialization.  Used for certain vnode and buf locks.
4643634d5b2SJohn Baldwin  */
4653634d5b2SJohn Baldwin void
4663634d5b2SJohn Baldwin lockallowshare(struct lock *lk)
4673634d5b2SJohn Baldwin {
4683634d5b2SJohn Baldwin 
4693634d5b2SJohn Baldwin 	lockmgr_assert(lk, KA_XLOCKED);
4703634d5b2SJohn Baldwin 	lk->lock_object.lo_flags &= ~LK_NOSHARE;
4713634d5b2SJohn Baldwin }
4723634d5b2SJohn Baldwin 
4733634d5b2SJohn Baldwin void
474575e02d9SKonstantin Belousov lockdisableshare(struct lock *lk)
475575e02d9SKonstantin Belousov {
476575e02d9SKonstantin Belousov 
477575e02d9SKonstantin Belousov 	lockmgr_assert(lk, KA_XLOCKED);
478575e02d9SKonstantin Belousov 	lk->lock_object.lo_flags |= LK_NOSHARE;
479575e02d9SKonstantin Belousov }
480575e02d9SKonstantin Belousov 
481575e02d9SKonstantin Belousov void
4823634d5b2SJohn Baldwin lockallowrecurse(struct lock *lk)
4833634d5b2SJohn Baldwin {
4843634d5b2SJohn Baldwin 
4853634d5b2SJohn Baldwin 	lockmgr_assert(lk, KA_XLOCKED);
4863634d5b2SJohn Baldwin 	lk->lock_object.lo_flags |= LO_RECURSABLE;
4873634d5b2SJohn Baldwin }
4883634d5b2SJohn Baldwin 
4893634d5b2SJohn Baldwin void
4903634d5b2SJohn Baldwin lockdisablerecurse(struct lock *lk)
4913634d5b2SJohn Baldwin {
4923634d5b2SJohn Baldwin 
4933634d5b2SJohn Baldwin 	lockmgr_assert(lk, KA_XLOCKED);
4943634d5b2SJohn Baldwin 	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
4953634d5b2SJohn Baldwin }
4963634d5b2SJohn Baldwin 
497a18b1f1dSJason Evans void
498047dd67eSAttilio Rao lockdestroy(struct lock *lk)
499a18b1f1dSJason Evans {
500c91fcee7SJohn Baldwin 
501047dd67eSAttilio Rao 	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
502047dd67eSAttilio Rao 	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
5032028867dSAttilio Rao 	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
504047dd67eSAttilio Rao 	lock_destroy(&lk->lock_object);
505047dd67eSAttilio Rao }
506047dd67eSAttilio Rao 
507c4a48867SMateusz Guzik static bool __always_inline
50895ab076dSMateusz Guzik lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
509c4a48867SMateusz Guzik {
510c4a48867SMateusz Guzik 
511c4a48867SMateusz Guzik 	/*
512c4a48867SMateusz Guzik 	 * If no other thread has an exclusive lock, or
513c4a48867SMateusz Guzik 	 * no exclusive waiter is present, bump the count of
514c4a48867SMateusz Guzik 	 * sharers.  Since we have to preserve the state of
515c4a48867SMateusz Guzik 	 * waiters, if we fail to acquire the shared lock
516c4a48867SMateusz Guzik 	 * loop back and retry.
517c4a48867SMateusz Guzik 	 */
518c4a48867SMateusz Guzik 	*xp = lk->lk_lock;
51995ab076dSMateusz Guzik 	while (LK_CAN_SHARE(*xp, flags, fp)) {
520c4a48867SMateusz Guzik 		if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
521c4a48867SMateusz Guzik 		    *xp + LK_ONE_SHARER)) {
522c4a48867SMateusz Guzik 			return (true);
523c4a48867SMateusz Guzik 		}
524c4a48867SMateusz Guzik 	}
525c4a48867SMateusz Guzik 	return (false);
526c4a48867SMateusz Guzik }
527c4a48867SMateusz Guzik 
528c4a48867SMateusz Guzik static bool __always_inline
5291c6987ebSMateusz Guzik lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
530c4a48867SMateusz Guzik {
531c4a48867SMateusz Guzik 
532c4a48867SMateusz Guzik 	for (;;) {
53395ab076dSMateusz Guzik 		if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
5341c6987ebSMateusz Guzik 			if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
5351c6987ebSMateusz Guzik 			    *xp - LK_ONE_SHARER))
536c4a48867SMateusz Guzik 				return (true);
537c4a48867SMateusz Guzik 			continue;
538c4a48867SMateusz Guzik 		}
539c4a48867SMateusz Guzik 		break;
540c4a48867SMateusz Guzik 	}
541c4a48867SMateusz Guzik 	return (false);
542c4a48867SMateusz Guzik }
543c4a48867SMateusz Guzik 
5441c6987ebSMateusz Guzik static __noinline int
5451c6987ebSMateusz Guzik lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
5461c6987ebSMateusz Guzik     const char *file, int line, struct lockmgr_wait *lwa)
547c4a48867SMateusz Guzik {
5481c6987ebSMateusz Guzik 	uintptr_t tid, x;
5491c6987ebSMateusz Guzik 	int error = 0;
550047dd67eSAttilio Rao 	const char *iwmesg;
5511c6987ebSMateusz Guzik 	int ipri, itimo;
5521c6987ebSMateusz Guzik 
5535b699f16SMark Johnston #ifdef KDTRACE_HOOKS
5545b699f16SMark Johnston 	uint64_t sleep_time = 0;
5555b699f16SMark Johnston #endif
5561723a064SJeff Roberson #ifdef LOCK_PROFILING
5571723a064SJeff Roberson 	uint64_t waittime = 0;
5581723a064SJeff Roberson 	int contested = 0;
5591723a064SJeff Roberson #endif
560047dd67eSAttilio Rao 
5611c6987ebSMateusz Guzik 	if (__predict_false(panicstr != NULL))
5621c6987ebSMateusz Guzik 		goto out;
5631c6987ebSMateusz Guzik 
564047dd67eSAttilio Rao 	tid = (uintptr_t)curthread;
565047dd67eSAttilio Rao 
566e5f94314SAttilio Rao 	if (LK_CAN_WITNESS(flags))
567e5f94314SAttilio Rao 		WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
56824150d37SJohn Baldwin 		    file, line, flags & LK_INTERLOCK ? ilk : NULL);
569047dd67eSAttilio Rao 	for (;;) {
57095ab076dSMateusz Guzik 		if (lockmgr_slock_try(lk, &x, flags, false))
571047dd67eSAttilio Rao 			break;
572f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
573f5f9340bSFabien Thomas 		PMC_SOFT_CALL( , , lock, failed);
574f5f9340bSFabien Thomas #endif
575047dd67eSAttilio Rao 		lock_profile_obtain_lock_failed(&lk->lock_object,
576047dd67eSAttilio Rao 		    &contested, &waittime);
577047dd67eSAttilio Rao 
578047dd67eSAttilio Rao 		/*
57996f1567fSKonstantin Belousov 		 * If the lock is already held by curthread in
580047dd67eSAttilio Rao 		 * exclusive way avoid a deadlock.
581047dd67eSAttilio Rao 		 */
582047dd67eSAttilio Rao 		if (LK_HOLDER(x) == tid) {
583047dd67eSAttilio Rao 			LOCK_LOG2(lk,
58496f1567fSKonstantin Belousov 			    "%s: %p already held in exclusive mode",
585047dd67eSAttilio Rao 			    __func__, lk);
586047dd67eSAttilio Rao 			error = EDEADLK;
587047dd67eSAttilio Rao 			break;
588a18b1f1dSJason Evans 		}
589a18b1f1dSJason Evans 
590a18b1f1dSJason Evans 		/*
591047dd67eSAttilio Rao 		 * If the lock is expected to not sleep just give up
592047dd67eSAttilio Rao 		 * and return.
593d7a7e179SAttilio Rao 		 */
594047dd67eSAttilio Rao 		if (LK_TRYOP(flags)) {
595047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p fails the try operation",
596047dd67eSAttilio Rao 			    __func__, lk);
597047dd67eSAttilio Rao 			error = EBUSY;
598047dd67eSAttilio Rao 			break;
599047dd67eSAttilio Rao 		}
600047dd67eSAttilio Rao 
601047dd67eSAttilio Rao 		/*
602047dd67eSAttilio Rao 		 * Acquire the sleepqueue chain lock because we
603047dd67eSAttilio Rao 		 * probabilly will need to manipulate waiters flags.
604047dd67eSAttilio Rao 		 */
605047dd67eSAttilio Rao 		sleepq_lock(&lk->lock_object);
606047dd67eSAttilio Rao 		x = lk->lk_lock;
6070ad122a9SMateusz Guzik retry_sleepq:
608047dd67eSAttilio Rao 
609047dd67eSAttilio Rao 		/*
610047dd67eSAttilio Rao 		 * if the lock can be acquired in shared mode, try
611047dd67eSAttilio Rao 		 * again.
612047dd67eSAttilio Rao 		 */
61395ab076dSMateusz Guzik 		if (LK_CAN_SHARE(x, flags, false)) {
614047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
615047dd67eSAttilio Rao 			continue;
616047dd67eSAttilio Rao 		}
617047dd67eSAttilio Rao 
618047dd67eSAttilio Rao 		/*
619047dd67eSAttilio Rao 		 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
620047dd67eSAttilio Rao 		 * loop back and retry.
621047dd67eSAttilio Rao 		 */
622047dd67eSAttilio Rao 		if ((x & LK_SHARED_WAITERS) == 0) {
6230ad122a9SMateusz Guzik 			if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
624047dd67eSAttilio Rao 			    x | LK_SHARED_WAITERS)) {
6250ad122a9SMateusz Guzik 				goto retry_sleepq;
626047dd67eSAttilio Rao 			}
627047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p set shared waiters flag",
628047dd67eSAttilio Rao 			    __func__, lk);
629047dd67eSAttilio Rao 		}
630047dd67eSAttilio Rao 
6311c6987ebSMateusz Guzik 		if (lwa == NULL) {
6321c6987ebSMateusz Guzik 			iwmesg = lk->lock_object.lo_name;
6331c6987ebSMateusz Guzik 			ipri = lk->lk_pri;
6341c6987ebSMateusz Guzik 			itimo = lk->lk_timo;
6351c6987ebSMateusz Guzik 		} else {
6361c6987ebSMateusz Guzik 			iwmesg = lwa->iwmesg;
6371c6987ebSMateusz Guzik 			ipri = lwa->ipri;
6381c6987ebSMateusz Guzik 			itimo = lwa->itimo;
6391c6987ebSMateusz Guzik 		}
6401c6987ebSMateusz Guzik 
641047dd67eSAttilio Rao 		/*
642047dd67eSAttilio Rao 		 * As far as we have been unable to acquire the
643047dd67eSAttilio Rao 		 * shared lock and the shared waiters flag is set,
644047dd67eSAttilio Rao 		 * we will sleep.
645047dd67eSAttilio Rao 		 */
6465b699f16SMark Johnston #ifdef KDTRACE_HOOKS
6475b699f16SMark Johnston 		sleep_time -= lockstat_nsecs(&lk->lock_object);
6485b699f16SMark Johnston #endif
649047dd67eSAttilio Rao 		error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
650047dd67eSAttilio Rao 		    SQ_SHARED_QUEUE);
6515b699f16SMark Johnston #ifdef KDTRACE_HOOKS
6525b699f16SMark Johnston 		sleep_time += lockstat_nsecs(&lk->lock_object);
6535b699f16SMark Johnston #endif
654047dd67eSAttilio Rao 		flags &= ~LK_INTERLOCK;
655047dd67eSAttilio Rao 		if (error) {
656047dd67eSAttilio Rao 			LOCK_LOG3(lk,
657047dd67eSAttilio Rao 			    "%s: interrupted sleep for %p with %d",
658047dd67eSAttilio Rao 			    __func__, lk, error);
659047dd67eSAttilio Rao 			break;
660047dd67eSAttilio Rao 		}
661047dd67eSAttilio Rao 		LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
662047dd67eSAttilio Rao 		    __func__, lk);
663047dd67eSAttilio Rao 	}
664047dd67eSAttilio Rao 	if (error == 0) {
6655b699f16SMark Johnston #ifdef KDTRACE_HOOKS
6665b699f16SMark Johnston 		if (sleep_time != 0)
6675b699f16SMark Johnston 			LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
6685b699f16SMark Johnston 			    LOCKSTAT_READER, (x & LK_SHARE) == 0,
6695b699f16SMark Johnston 			    (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
6705b699f16SMark Johnston #endif
671c4a48867SMateusz Guzik #ifdef LOCK_PROFILING
672c4a48867SMateusz Guzik 		lockmgr_note_shared_acquire(lk, contested, waittime,
673c4a48867SMateusz Guzik 		    file, line, flags);
674c4a48867SMateusz Guzik #else
675c4a48867SMateusz Guzik 		lockmgr_note_shared_acquire(lk, 0, 0, file, line,
676c4a48867SMateusz Guzik 		    flags);
677c4a48867SMateusz Guzik #endif
678047dd67eSAttilio Rao 	}
679047dd67eSAttilio Rao 
6801c6987ebSMateusz Guzik out:
6811c6987ebSMateusz Guzik 	lockmgr_exit(flags, ilk, 0);
6821c6987ebSMateusz Guzik 	return (error);
683047dd67eSAttilio Rao }
684047dd67eSAttilio Rao 
6851c6987ebSMateusz Guzik static __noinline int
6861c6987ebSMateusz Guzik lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
6871c6987ebSMateusz Guzik     const char *file, int line, struct lockmgr_wait *lwa)
6881c6987ebSMateusz Guzik {
6891c6987ebSMateusz Guzik 	struct lock_class *class;
6901c6987ebSMateusz Guzik 	uintptr_t tid, x, v;
6911c6987ebSMateusz Guzik 	int error = 0;
6921c6987ebSMateusz Guzik 	const char *iwmesg;
6931c6987ebSMateusz Guzik 	int ipri, itimo;
6947c6fe803SKonstantin Belousov 
6955b699f16SMark Johnston #ifdef KDTRACE_HOOKS
6965b699f16SMark Johnston 	uint64_t sleep_time = 0;
6975b699f16SMark Johnston #endif
6981c6987ebSMateusz Guzik #ifdef LOCK_PROFILING
6991c6987ebSMateusz Guzik 	uint64_t waittime = 0;
7001c6987ebSMateusz Guzik 	int contested = 0;
7011c6987ebSMateusz Guzik #endif
702047dd67eSAttilio Rao 
7031c6987ebSMateusz Guzik 	if (__predict_false(panicstr != NULL))
7041c6987ebSMateusz Guzik 		goto out;
7051c6987ebSMateusz Guzik 
7061c6987ebSMateusz Guzik 	tid = (uintptr_t)curthread;
7071c6987ebSMateusz Guzik 
708e5f94314SAttilio Rao 	if (LK_CAN_WITNESS(flags))
709e5f94314SAttilio Rao 		WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
71024150d37SJohn Baldwin 		    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
71124150d37SJohn Baldwin 		    ilk : NULL);
712047dd67eSAttilio Rao 
713047dd67eSAttilio Rao 	/*
71496f1567fSKonstantin Belousov 	 * If curthread already holds the lock and this one is
715047dd67eSAttilio Rao 	 * allowed to recurse, simply recurse on it.
716047dd67eSAttilio Rao 	 */
717047dd67eSAttilio Rao 	if (lockmgr_xlocked(lk)) {
718047dd67eSAttilio Rao 		if ((flags & LK_CANRECURSE) == 0 &&
719f0830182SAttilio Rao 		    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
720047dd67eSAttilio Rao 			/*
721047dd67eSAttilio Rao 			 * If the lock is expected to not panic just
722047dd67eSAttilio Rao 			 * give up and return.
723047dd67eSAttilio Rao 			 */
724047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
725047dd67eSAttilio Rao 				LOCK_LOG2(lk,
726047dd67eSAttilio Rao 				    "%s: %p fails the try operation",
727047dd67eSAttilio Rao 				    __func__, lk);
728047dd67eSAttilio Rao 				error = EBUSY;
7291c6987ebSMateusz Guzik 				goto out;
730047dd67eSAttilio Rao 			}
7311c6987ebSMateusz Guzik 			if (flags & LK_INTERLOCK) {
7321c6987ebSMateusz Guzik 				class = LOCK_CLASS(ilk);
733047dd67eSAttilio Rao 				class->lc_unlock(ilk);
7341c6987ebSMateusz Guzik 			}
73583fc34eaSGleb Smirnoff 			panic("%s: recursing on non recursive lockmgr %p "
73683fc34eaSGleb Smirnoff 			    "@ %s:%d\n", __func__, lk, file, line);
737047dd67eSAttilio Rao 		}
738047dd67eSAttilio Rao 		lk->lk_recurse++;
739047dd67eSAttilio Rao 		LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
740047dd67eSAttilio Rao 		LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
741047dd67eSAttilio Rao 		    lk->lk_recurse, file, line);
742e5f94314SAttilio Rao 		WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
743e5f94314SAttilio Rao 		    LK_TRYWIT(flags), file, line);
744047dd67eSAttilio Rao 		TD_LOCKS_INC(curthread);
7451c6987ebSMateusz Guzik 		goto out;
746047dd67eSAttilio Rao 	}
747047dd67eSAttilio Rao 
748fc4f686dSMateusz Guzik 	for (;;) {
749fc4f686dSMateusz Guzik 		if (lk->lk_lock == LK_UNLOCKED &&
750fc4f686dSMateusz Guzik 		    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
751fc4f686dSMateusz Guzik 			break;
752f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
753f5f9340bSFabien Thomas 		PMC_SOFT_CALL( , , lock, failed);
754f5f9340bSFabien Thomas #endif
755047dd67eSAttilio Rao 		lock_profile_obtain_lock_failed(&lk->lock_object,
756047dd67eSAttilio Rao 		    &contested, &waittime);
757047dd67eSAttilio Rao 
758047dd67eSAttilio Rao 		/*
759047dd67eSAttilio Rao 		 * If the lock is expected to not sleep just give up
760047dd67eSAttilio Rao 		 * and return.
761047dd67eSAttilio Rao 		 */
762047dd67eSAttilio Rao 		if (LK_TRYOP(flags)) {
763047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p fails the try operation",
764047dd67eSAttilio Rao 			    __func__, lk);
765047dd67eSAttilio Rao 			error = EBUSY;
766047dd67eSAttilio Rao 			break;
767047dd67eSAttilio Rao 		}
768047dd67eSAttilio Rao 
769047dd67eSAttilio Rao 		/*
770047dd67eSAttilio Rao 		 * Acquire the sleepqueue chain lock because we
771047dd67eSAttilio Rao 		 * probabilly will need to manipulate waiters flags.
772047dd67eSAttilio Rao 		 */
773047dd67eSAttilio Rao 		sleepq_lock(&lk->lock_object);
774047dd67eSAttilio Rao 		x = lk->lk_lock;
7750ad122a9SMateusz Guzik retry_sleepq:
776047dd67eSAttilio Rao 
777047dd67eSAttilio Rao 		/*
778047dd67eSAttilio Rao 		 * if the lock has been released while we spun on
779047dd67eSAttilio Rao 		 * the sleepqueue chain lock just try again.
780047dd67eSAttilio Rao 		 */
781047dd67eSAttilio Rao 		if (x == LK_UNLOCKED) {
782047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
783047dd67eSAttilio Rao 			continue;
784047dd67eSAttilio Rao 		}
785047dd67eSAttilio Rao 
786047dd67eSAttilio Rao 		/*
787047dd67eSAttilio Rao 		 * The lock can be in the state where there is a
788047dd67eSAttilio Rao 		 * pending queue of waiters, but still no owner.
789047dd67eSAttilio Rao 		 * This happens when the lock is contested and an
790047dd67eSAttilio Rao 		 * owner is going to claim the lock.
791047dd67eSAttilio Rao 		 * If curthread is the one successfully acquiring it
792047dd67eSAttilio Rao 		 * claim lock ownership and return, preserving waiters
793047dd67eSAttilio Rao 		 * flags.
794047dd67eSAttilio Rao 		 */
795651175c9SAttilio Rao 		v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
796651175c9SAttilio Rao 		if ((x & ~v) == LK_UNLOCKED) {
797651175c9SAttilio Rao 			v &= ~LK_EXCLUSIVE_SPINNERS;
7980ad122a9SMateusz Guzik 			if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
799047dd67eSAttilio Rao 			    tid | v)) {
800047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
801047dd67eSAttilio Rao 				LOCK_LOG2(lk,
802047dd67eSAttilio Rao 				    "%s: %p claimed by a new writer",
803047dd67eSAttilio Rao 				    __func__, lk);
804047dd67eSAttilio Rao 				break;
805047dd67eSAttilio Rao 			}
8060ad122a9SMateusz Guzik 			goto retry_sleepq;
807047dd67eSAttilio Rao 		}
808047dd67eSAttilio Rao 
809047dd67eSAttilio Rao 		/*
810047dd67eSAttilio Rao 		 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
811047dd67eSAttilio Rao 		 * fail, loop back and retry.
812047dd67eSAttilio Rao 		 */
813047dd67eSAttilio Rao 		if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
8140ad122a9SMateusz Guzik 			if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
815047dd67eSAttilio Rao 			    x | LK_EXCLUSIVE_WAITERS)) {
8160ad122a9SMateusz Guzik 				goto retry_sleepq;
817047dd67eSAttilio Rao 			}
818047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p set excl waiters flag",
819047dd67eSAttilio Rao 			    __func__, lk);
820047dd67eSAttilio Rao 		}
821047dd67eSAttilio Rao 
8221c6987ebSMateusz Guzik 		if (lwa == NULL) {
8231c6987ebSMateusz Guzik 			iwmesg = lk->lock_object.lo_name;
8241c6987ebSMateusz Guzik 			ipri = lk->lk_pri;
8251c6987ebSMateusz Guzik 			itimo = lk->lk_timo;
8261c6987ebSMateusz Guzik 		} else {
8271c6987ebSMateusz Guzik 			iwmesg = lwa->iwmesg;
8281c6987ebSMateusz Guzik 			ipri = lwa->ipri;
8291c6987ebSMateusz Guzik 			itimo = lwa->itimo;
8301c6987ebSMateusz Guzik 		}
8311c6987ebSMateusz Guzik 
832047dd67eSAttilio Rao 		/*
833047dd67eSAttilio Rao 		 * As far as we have been unable to acquire the
834047dd67eSAttilio Rao 		 * exclusive lock and the exclusive waiters flag
835047dd67eSAttilio Rao 		 * is set, we will sleep.
836047dd67eSAttilio Rao 		 */
8375b699f16SMark Johnston #ifdef KDTRACE_HOOKS
8385b699f16SMark Johnston 		sleep_time -= lockstat_nsecs(&lk->lock_object);
8395b699f16SMark Johnston #endif
840047dd67eSAttilio Rao 		error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
841047dd67eSAttilio Rao 		    SQ_EXCLUSIVE_QUEUE);
8425b699f16SMark Johnston #ifdef KDTRACE_HOOKS
8435b699f16SMark Johnston 		sleep_time += lockstat_nsecs(&lk->lock_object);
8445b699f16SMark Johnston #endif
845047dd67eSAttilio Rao 		flags &= ~LK_INTERLOCK;
846047dd67eSAttilio Rao 		if (error) {
847047dd67eSAttilio Rao 			LOCK_LOG3(lk,
848047dd67eSAttilio Rao 			    "%s: interrupted sleep for %p with %d",
849047dd67eSAttilio Rao 			    __func__, lk, error);
850047dd67eSAttilio Rao 			break;
851047dd67eSAttilio Rao 		}
852047dd67eSAttilio Rao 		LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
853047dd67eSAttilio Rao 		    __func__, lk);
854047dd67eSAttilio Rao 	}
855047dd67eSAttilio Rao 	if (error == 0) {
8565b699f16SMark Johnston #ifdef KDTRACE_HOOKS
8575b699f16SMark Johnston 		if (sleep_time != 0)
8585b699f16SMark Johnston 			LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
8595b699f16SMark Johnston 			    LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
8605b699f16SMark Johnston 			    (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
8615b699f16SMark Johnston #endif
862c4a48867SMateusz Guzik #ifdef LOCK_PROFILING
863c4a48867SMateusz Guzik 		lockmgr_note_exclusive_acquire(lk, contested, waittime,
864c4a48867SMateusz Guzik 		    file, line, flags);
865c4a48867SMateusz Guzik #else
866c4a48867SMateusz Guzik 		lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
867c4a48867SMateusz Guzik 		    flags);
868c4a48867SMateusz Guzik #endif
869047dd67eSAttilio Rao 	}
8701c6987ebSMateusz Guzik 
8711c6987ebSMateusz Guzik out:
8721c6987ebSMateusz Guzik 	lockmgr_exit(flags, ilk, 0);
8731c6987ebSMateusz Guzik 	return (error);
8741c6987ebSMateusz Guzik }
8751c6987ebSMateusz Guzik 
8761c6987ebSMateusz Guzik static __noinline int
8771c6987ebSMateusz Guzik lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
8781c6987ebSMateusz Guzik     const char *file, int line, struct lockmgr_wait *lwa)
8791c6987ebSMateusz Guzik {
8801c6987ebSMateusz Guzik 	uintptr_t tid, x, v;
8811c6987ebSMateusz Guzik 	int error = 0;
8821c6987ebSMateusz Guzik 	int wakeup_swapper = 0;
8831c6987ebSMateusz Guzik 	int op;
8841c6987ebSMateusz Guzik 
8851c6987ebSMateusz Guzik 	if (__predict_false(panicstr != NULL))
8861c6987ebSMateusz Guzik 		goto out;
8871c6987ebSMateusz Guzik 
8881c6987ebSMateusz Guzik 	tid = (uintptr_t)curthread;
8891c6987ebSMateusz Guzik 
8901c6987ebSMateusz Guzik 	_lockmgr_assert(lk, KA_SLOCKED, file, line);
8911c6987ebSMateusz Guzik 	v = lk->lk_lock;
8921c6987ebSMateusz Guzik 	x = v & LK_ALL_WAITERS;
8931c6987ebSMateusz Guzik 	v &= LK_EXCLUSIVE_SPINNERS;
8941c6987ebSMateusz Guzik 
8951c6987ebSMateusz Guzik 	/*
8961c6987ebSMateusz Guzik 	 * Try to switch from one shared lock to an exclusive one.
8971c6987ebSMateusz Guzik 	 * We need to preserve waiters flags during the operation.
8981c6987ebSMateusz Guzik 	 */
8991c6987ebSMateusz Guzik 	if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
9001c6987ebSMateusz Guzik 	    tid | x)) {
9011c6987ebSMateusz Guzik 		LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
9021c6987ebSMateusz Guzik 		    line);
9031c6987ebSMateusz Guzik 		WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
9041c6987ebSMateusz Guzik 		    LK_TRYWIT(flags), file, line);
9055b699f16SMark Johnston 		LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
9061c6987ebSMateusz Guzik 		TD_SLOCKS_DEC(curthread);
9071c6987ebSMateusz Guzik 		goto out;
9081c6987ebSMateusz Guzik 	}
9091c6987ebSMateusz Guzik 
9101c6987ebSMateusz Guzik 	op = flags & LK_TYPE_MASK;
9111c6987ebSMateusz Guzik 
9121c6987ebSMateusz Guzik 	/*
9131c6987ebSMateusz Guzik 	 * In LK_TRYUPGRADE mode, do not drop the lock,
9141c6987ebSMateusz Guzik 	 * returning EBUSY instead.
9151c6987ebSMateusz Guzik 	 */
9161c6987ebSMateusz Guzik 	if (op == LK_TRYUPGRADE) {
9171c6987ebSMateusz Guzik 		LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
9181c6987ebSMateusz Guzik 		    __func__, lk);
9191c6987ebSMateusz Guzik 		error = EBUSY;
9201c6987ebSMateusz Guzik 		goto out;
9211c6987ebSMateusz Guzik 	}
9221c6987ebSMateusz Guzik 
9231c6987ebSMateusz Guzik 	/*
9241c6987ebSMateusz Guzik 	 * We have been unable to succeed in upgrading, so just
9251c6987ebSMateusz Guzik 	 * give up the shared lock.
9261c6987ebSMateusz Guzik 	 */
9271c6987ebSMateusz Guzik 	wakeup_swapper |= wakeupshlk(lk, file, line);
9281c6987ebSMateusz Guzik 	error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
9291c6987ebSMateusz Guzik 	flags &= ~LK_INTERLOCK;
9301c6987ebSMateusz Guzik out:
9311c6987ebSMateusz Guzik 	lockmgr_exit(flags, ilk, wakeup_swapper);
9321c6987ebSMateusz Guzik 	return (error);
9331c6987ebSMateusz Guzik }
9341c6987ebSMateusz Guzik 
9351c6987ebSMateusz Guzik int
9361c6987ebSMateusz Guzik lockmgr_lock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk,
9371c6987ebSMateusz Guzik     const char *file, int line)
9381c6987ebSMateusz Guzik {
9391c6987ebSMateusz Guzik 	struct lock_class *class;
9401c6987ebSMateusz Guzik 	uintptr_t x, tid;
9411c6987ebSMateusz Guzik 	u_int op;
9421c6987ebSMateusz Guzik 	bool locked;
9431c6987ebSMateusz Guzik 
944b543c98cSConrad Meyer 	if (__predict_false(panicstr != NULL))
945b543c98cSConrad Meyer 		return (0);
946b543c98cSConrad Meyer 
9471c6987ebSMateusz Guzik 	op = flags & LK_TYPE_MASK;
9481c6987ebSMateusz Guzik 	locked = false;
9491c6987ebSMateusz Guzik 	switch (op) {
9501c6987ebSMateusz Guzik 	case LK_SHARED:
9511c6987ebSMateusz Guzik 		if (LK_CAN_WITNESS(flags))
9521c6987ebSMateusz Guzik 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
9531c6987ebSMateusz Guzik 			    file, line, flags & LK_INTERLOCK ? ilk : NULL);
9541c6987ebSMateusz Guzik 		if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
9551c6987ebSMateusz Guzik 			break;
95695ab076dSMateusz Guzik 		if (lockmgr_slock_try(lk, &x, flags, true)) {
9571c6987ebSMateusz Guzik 			lockmgr_note_shared_acquire(lk, 0, 0,
9581c6987ebSMateusz Guzik 			    file, line, flags);
9591c6987ebSMateusz Guzik 			locked = true;
9601c6987ebSMateusz Guzik 		} else {
9611c6987ebSMateusz Guzik 			return (lockmgr_slock_hard(lk, flags, ilk, file, line,
9621c6987ebSMateusz Guzik 			    NULL));
9631c6987ebSMateusz Guzik 		}
9641c6987ebSMateusz Guzik 		break;
9651c6987ebSMateusz Guzik 	case LK_EXCLUSIVE:
9661c6987ebSMateusz Guzik 		if (LK_CAN_WITNESS(flags))
9671c6987ebSMateusz Guzik 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
9681c6987ebSMateusz Guzik 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
9691c6987ebSMateusz Guzik 			    ilk : NULL);
9701c6987ebSMateusz Guzik 		tid = (uintptr_t)curthread;
9711c6987ebSMateusz Guzik 		if (lk->lk_lock == LK_UNLOCKED &&
9721c6987ebSMateusz Guzik 		    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
9731c6987ebSMateusz Guzik 			lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
9741c6987ebSMateusz Guzik 			    flags);
9751c6987ebSMateusz Guzik 			locked = true;
9761c6987ebSMateusz Guzik 		} else {
9771c6987ebSMateusz Guzik 			return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
9781c6987ebSMateusz Guzik 			    NULL));
9791c6987ebSMateusz Guzik 		}
9801c6987ebSMateusz Guzik 		break;
9811c6987ebSMateusz Guzik 	case LK_UPGRADE:
9821c6987ebSMateusz Guzik 	case LK_TRYUPGRADE:
9831c6987ebSMateusz Guzik 		return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
9841c6987ebSMateusz Guzik 	default:
9851c6987ebSMateusz Guzik 		break;
9861c6987ebSMateusz Guzik 	}
9871c6987ebSMateusz Guzik 	if (__predict_true(locked)) {
9881c6987ebSMateusz Guzik 		if (__predict_false(flags & LK_INTERLOCK)) {
9891c6987ebSMateusz Guzik 			class = LOCK_CLASS(ilk);
9901c6987ebSMateusz Guzik 			class->lc_unlock(ilk);
9911c6987ebSMateusz Guzik 		}
9921c6987ebSMateusz Guzik 		return (0);
9931c6987ebSMateusz Guzik 	} else {
9941c6987ebSMateusz Guzik 		return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
9951c6987ebSMateusz Guzik 		    LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
9961c6987ebSMateusz Guzik 	}
9971c6987ebSMateusz Guzik }
9981c6987ebSMateusz Guzik 
9991c6987ebSMateusz Guzik static __noinline int
10001c6987ebSMateusz Guzik lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
10011c6987ebSMateusz Guzik     const char *file, int line)
10021c6987ebSMateusz Guzik 
10031c6987ebSMateusz Guzik {
10041c6987ebSMateusz Guzik 	int wakeup_swapper = 0;
10051c6987ebSMateusz Guzik 
10061c6987ebSMateusz Guzik 	if (__predict_false(panicstr != NULL))
10071c6987ebSMateusz Guzik 		goto out;
10081c6987ebSMateusz Guzik 
10091c6987ebSMateusz Guzik 	wakeup_swapper = wakeupshlk(lk, file, line);
10101c6987ebSMateusz Guzik 
10111c6987ebSMateusz Guzik out:
10121c6987ebSMateusz Guzik 	lockmgr_exit(flags, ilk, wakeup_swapper);
10131c6987ebSMateusz Guzik 	return (0);
10141c6987ebSMateusz Guzik }
10151c6987ebSMateusz Guzik 
10161c6987ebSMateusz Guzik static __noinline int
10171c6987ebSMateusz Guzik lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
10181c6987ebSMateusz Guzik     const char *file, int line)
10191c6987ebSMateusz Guzik {
10201c6987ebSMateusz Guzik 	uintptr_t tid, v;
10211c6987ebSMateusz Guzik 	int wakeup_swapper = 0;
10221c6987ebSMateusz Guzik 	u_int realexslp;
10231c6987ebSMateusz Guzik 	int queue;
10241c6987ebSMateusz Guzik 
10251c6987ebSMateusz Guzik 	if (__predict_false(panicstr != NULL))
10261c6987ebSMateusz Guzik 		goto out;
10271c6987ebSMateusz Guzik 
10281c6987ebSMateusz Guzik 	tid = (uintptr_t)curthread;
10291c6987ebSMateusz Guzik 
10301c6987ebSMateusz Guzik 	/*
10311c6987ebSMateusz Guzik 	 * As first option, treact the lock as if it has not
10321c6987ebSMateusz Guzik 	 * any waiter.
10331c6987ebSMateusz Guzik 	 * Fix-up the tid var if the lock has been disowned.
10341c6987ebSMateusz Guzik 	 */
10351c6987ebSMateusz Guzik 	if (LK_HOLDER(x) == LK_KERNPROC)
10361c6987ebSMateusz Guzik 		tid = LK_KERNPROC;
10371c6987ebSMateusz Guzik 	else {
10381c6987ebSMateusz Guzik 		WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
10391c6987ebSMateusz Guzik 		TD_LOCKS_DEC(curthread);
10401c6987ebSMateusz Guzik 	}
10411c6987ebSMateusz Guzik 	LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
10421c6987ebSMateusz Guzik 
10431c6987ebSMateusz Guzik 	/*
10441c6987ebSMateusz Guzik 	 * The lock is held in exclusive mode.
10451c6987ebSMateusz Guzik 	 * If the lock is recursed also, then unrecurse it.
10461c6987ebSMateusz Guzik 	 */
104710391db5SMateusz Guzik 	if (lockmgr_xlocked_v(x) && lockmgr_recursed(lk)) {
10481c6987ebSMateusz Guzik 		LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
10491c6987ebSMateusz Guzik 		lk->lk_recurse--;
10501c6987ebSMateusz Guzik 		goto out;
10511c6987ebSMateusz Guzik 	}
10521c6987ebSMateusz Guzik 	if (tid != LK_KERNPROC)
10535b699f16SMark Johnston 		LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
10545b699f16SMark Johnston 		    LOCKSTAT_WRITER);
10551c6987ebSMateusz Guzik 
105610391db5SMateusz Guzik 	if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
10571c6987ebSMateusz Guzik 		goto out;
10581c6987ebSMateusz Guzik 
10591c6987ebSMateusz Guzik 	sleepq_lock(&lk->lock_object);
10601c6987ebSMateusz Guzik 	x = lk->lk_lock;
10611c6987ebSMateusz Guzik 	v = LK_UNLOCKED;
10621c6987ebSMateusz Guzik 
10631c6987ebSMateusz Guzik 	/*
10641c6987ebSMateusz Guzik 	 * If the lock has exclusive waiters, give them
10651c6987ebSMateusz Guzik 	 * preference in order to avoid deadlock with
10661c6987ebSMateusz Guzik 	 * shared runners up.
10671c6987ebSMateusz Guzik 	 * If interruptible sleeps left the exclusive queue
10681c6987ebSMateusz Guzik 	 * empty avoid a starvation for the threads sleeping
10691c6987ebSMateusz Guzik 	 * on the shared queue by giving them precedence
10701c6987ebSMateusz Guzik 	 * and cleaning up the exclusive waiters bit anyway.
10711c6987ebSMateusz Guzik 	 * Please note that lk_exslpfail count may be lying
10721c6987ebSMateusz Guzik 	 * about the real number of waiters with the
10731c6987ebSMateusz Guzik 	 * LK_SLEEPFAIL flag on because they may be used in
10741c6987ebSMateusz Guzik 	 * conjunction with interruptible sleeps so
10751c6987ebSMateusz Guzik 	 * lk_exslpfail might be considered an 'upper limit'
10761c6987ebSMateusz Guzik 	 * bound, including the edge cases.
10771c6987ebSMateusz Guzik 	 */
10781c6987ebSMateusz Guzik 	MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
10791c6987ebSMateusz Guzik 	realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
10801c6987ebSMateusz Guzik 	if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
10811c6987ebSMateusz Guzik 		if (lk->lk_exslpfail < realexslp) {
10821c6987ebSMateusz Guzik 			lk->lk_exslpfail = 0;
10831c6987ebSMateusz Guzik 			queue = SQ_EXCLUSIVE_QUEUE;
10841c6987ebSMateusz Guzik 			v |= (x & LK_SHARED_WAITERS);
10851c6987ebSMateusz Guzik 		} else {
10861c6987ebSMateusz Guzik 			lk->lk_exslpfail = 0;
10871c6987ebSMateusz Guzik 			LOCK_LOG2(lk,
10881c6987ebSMateusz Guzik 			    "%s: %p has only LK_SLEEPFAIL sleepers",
10891c6987ebSMateusz Guzik 			    __func__, lk);
10901c6987ebSMateusz Guzik 			LOCK_LOG2(lk,
10911c6987ebSMateusz Guzik 			    "%s: %p waking up threads on the exclusive queue",
10921c6987ebSMateusz Guzik 			    __func__, lk);
10931c6987ebSMateusz Guzik 			wakeup_swapper = sleepq_broadcast(&lk->lock_object,
10941c6987ebSMateusz Guzik 			    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
10951c6987ebSMateusz Guzik 			queue = SQ_SHARED_QUEUE;
10961c6987ebSMateusz Guzik 		}
10971c6987ebSMateusz Guzik 	} else {
10981c6987ebSMateusz Guzik 
10991c6987ebSMateusz Guzik 		/*
11001c6987ebSMateusz Guzik 		 * Exclusive waiters sleeping with LK_SLEEPFAIL
11011c6987ebSMateusz Guzik 		 * on and using interruptible sleeps/timeout
11021c6987ebSMateusz Guzik 		 * may have left spourious lk_exslpfail counts
11031c6987ebSMateusz Guzik 		 * on, so clean it up anyway.
11041c6987ebSMateusz Guzik 		 */
11051c6987ebSMateusz Guzik 		lk->lk_exslpfail = 0;
11061c6987ebSMateusz Guzik 		queue = SQ_SHARED_QUEUE;
11071c6987ebSMateusz Guzik 	}
11081c6987ebSMateusz Guzik 
11091c6987ebSMateusz Guzik 	LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
11101c6987ebSMateusz Guzik 	    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
11111c6987ebSMateusz Guzik 	    "exclusive");
11121c6987ebSMateusz Guzik 	atomic_store_rel_ptr(&lk->lk_lock, v);
11131c6987ebSMateusz Guzik 	wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
11141c6987ebSMateusz Guzik 	sleepq_release(&lk->lock_object);
11151c6987ebSMateusz Guzik 
11161c6987ebSMateusz Guzik out:
11171c6987ebSMateusz Guzik 	lockmgr_exit(flags, ilk, wakeup_swapper);
11181c6987ebSMateusz Guzik 	return (0);
11191c6987ebSMateusz Guzik }
11201c6987ebSMateusz Guzik 
11211c6987ebSMateusz Guzik int
11221c6987ebSMateusz Guzik lockmgr_unlock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk)
11231c6987ebSMateusz Guzik {
11241c6987ebSMateusz Guzik 	struct lock_class *class;
11251c6987ebSMateusz Guzik 	uintptr_t x, tid;
11261c6987ebSMateusz Guzik 	const char *file;
11271c6987ebSMateusz Guzik 	int line;
11281c6987ebSMateusz Guzik 
1129b543c98cSConrad Meyer 	if (__predict_false(panicstr != NULL))
1130b543c98cSConrad Meyer 		return (0);
1131b543c98cSConrad Meyer 
11321c6987ebSMateusz Guzik 	file = __FILE__;
11331c6987ebSMateusz Guzik 	line = __LINE__;
11341c6987ebSMateusz Guzik 
11351c6987ebSMateusz Guzik 	_lockmgr_assert(lk, KA_LOCKED, file, line);
11361c6987ebSMateusz Guzik 	x = lk->lk_lock;
11371c6987ebSMateusz Guzik 	if (__predict_true(x & LK_SHARE) != 0) {
11381c6987ebSMateusz Guzik 		if (lockmgr_sunlock_try(lk, &x)) {
11391c6987ebSMateusz Guzik 			lockmgr_note_shared_release(lk, file, line);
11401c6987ebSMateusz Guzik 		} else {
11411c6987ebSMateusz Guzik 			return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
11421c6987ebSMateusz Guzik 		}
11431c6987ebSMateusz Guzik 	} else {
11441c6987ebSMateusz Guzik 		tid = (uintptr_t)curthread;
11451c6987ebSMateusz Guzik 		if (!lockmgr_recursed(lk) &&
11461c6987ebSMateusz Guzik 		    atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
11471c6987ebSMateusz Guzik 			lockmgr_note_exclusive_release(lk, file, line);
11481c6987ebSMateusz Guzik 		} else {
11491c6987ebSMateusz Guzik 			return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
11501c6987ebSMateusz Guzik 		}
11511c6987ebSMateusz Guzik 	}
11521c6987ebSMateusz Guzik 	if (__predict_false(flags & LK_INTERLOCK)) {
11531c6987ebSMateusz Guzik 		class = LOCK_CLASS(ilk);
11541c6987ebSMateusz Guzik 		class->lc_unlock(ilk);
11551c6987ebSMateusz Guzik 	}
11561c6987ebSMateusz Guzik 	return (0);
11571c6987ebSMateusz Guzik }
11581c6987ebSMateusz Guzik 
1159*c8b29d12SMateusz Guzik /*
1160*c8b29d12SMateusz Guzik  * Lightweight entry points for common operations.
1161*c8b29d12SMateusz Guzik  *
1162*c8b29d12SMateusz Guzik  * Functionality is similar to sx locks, in that none of the additional lockmgr
1163*c8b29d12SMateusz Guzik  * features are supported. To be clear, these are NOT supported:
1164*c8b29d12SMateusz Guzik  * 1. shared locking disablement
1165*c8b29d12SMateusz Guzik  * 2. returning with an error after sleep
1166*c8b29d12SMateusz Guzik  * 3. unlocking the interlock
1167*c8b29d12SMateusz Guzik  *
1168*c8b29d12SMateusz Guzik  * If in doubt, use lockmgr_*_fast_path.
1169*c8b29d12SMateusz Guzik  */
1170*c8b29d12SMateusz Guzik int
1171*c8b29d12SMateusz Guzik lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1172*c8b29d12SMateusz Guzik {
1173*c8b29d12SMateusz Guzik 	uintptr_t x;
1174*c8b29d12SMateusz Guzik 
1175*c8b29d12SMateusz Guzik 	MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1176*c8b29d12SMateusz Guzik 	MPASS((flags & LK_INTERLOCK) == 0);
1177*c8b29d12SMateusz Guzik 	MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1178*c8b29d12SMateusz Guzik 
1179*c8b29d12SMateusz Guzik 	if (LK_CAN_WITNESS(flags))
1180*c8b29d12SMateusz Guzik 		WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1181*c8b29d12SMateusz Guzik 		    file, line, NULL);
1182*c8b29d12SMateusz Guzik 	if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1183*c8b29d12SMateusz Guzik 		lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1184*c8b29d12SMateusz Guzik 		return (0);
1185*c8b29d12SMateusz Guzik 	}
1186*c8b29d12SMateusz Guzik 
1187*c8b29d12SMateusz Guzik 	return (lockmgr_slock_hard(lk, flags, NULL, file, line, NULL));
1188*c8b29d12SMateusz Guzik }
1189*c8b29d12SMateusz Guzik 
1190*c8b29d12SMateusz Guzik int
1191*c8b29d12SMateusz Guzik lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1192*c8b29d12SMateusz Guzik {
1193*c8b29d12SMateusz Guzik 	uintptr_t tid;
1194*c8b29d12SMateusz Guzik 
1195*c8b29d12SMateusz Guzik 	MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1196*c8b29d12SMateusz Guzik 	MPASS((flags & LK_INTERLOCK) == 0);
1197*c8b29d12SMateusz Guzik 
1198*c8b29d12SMateusz Guzik 	if (LK_CAN_WITNESS(flags))
1199*c8b29d12SMateusz Guzik 		WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1200*c8b29d12SMateusz Guzik 		    LOP_EXCLUSIVE, file, line, NULL);
1201*c8b29d12SMateusz Guzik 	tid = (uintptr_t)curthread;
1202*c8b29d12SMateusz Guzik 	if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1203*c8b29d12SMateusz Guzik 		lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1204*c8b29d12SMateusz Guzik 		    flags);
1205*c8b29d12SMateusz Guzik 		return (0);
1206*c8b29d12SMateusz Guzik 	}
1207*c8b29d12SMateusz Guzik 
1208*c8b29d12SMateusz Guzik 	return (lockmgr_xlock_hard(lk, flags, NULL, file, line, NULL));
1209*c8b29d12SMateusz Guzik }
1210*c8b29d12SMateusz Guzik 
1211*c8b29d12SMateusz Guzik int
1212*c8b29d12SMateusz Guzik lockmgr_unlock(struct lock *lk)
1213*c8b29d12SMateusz Guzik {
1214*c8b29d12SMateusz Guzik 	uintptr_t x, tid;
1215*c8b29d12SMateusz Guzik 	const char *file;
1216*c8b29d12SMateusz Guzik 	int line;
1217*c8b29d12SMateusz Guzik 
1218*c8b29d12SMateusz Guzik 	file = __FILE__;
1219*c8b29d12SMateusz Guzik 	line = __LINE__;
1220*c8b29d12SMateusz Guzik 
1221*c8b29d12SMateusz Guzik 	_lockmgr_assert(lk, KA_LOCKED, file, line);
1222*c8b29d12SMateusz Guzik 	x = lk->lk_lock;
1223*c8b29d12SMateusz Guzik 	if (__predict_true(x & LK_SHARE) != 0) {
1224*c8b29d12SMateusz Guzik 		if (lockmgr_sunlock_try(lk, &x)) {
1225*c8b29d12SMateusz Guzik 			lockmgr_note_shared_release(lk, file, line);
1226*c8b29d12SMateusz Guzik 		} else {
1227*c8b29d12SMateusz Guzik 			return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1228*c8b29d12SMateusz Guzik 		}
1229*c8b29d12SMateusz Guzik 	} else {
1230*c8b29d12SMateusz Guzik 		tid = (uintptr_t)curthread;
1231*c8b29d12SMateusz Guzik 		if (!lockmgr_recursed(lk) &&
1232*c8b29d12SMateusz Guzik 		    atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1233*c8b29d12SMateusz Guzik 			lockmgr_note_exclusive_release(lk, file, line);
1234*c8b29d12SMateusz Guzik 		} else {
1235*c8b29d12SMateusz Guzik 			return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1236*c8b29d12SMateusz Guzik 		}
1237*c8b29d12SMateusz Guzik 	}
1238*c8b29d12SMateusz Guzik 	return (0);
1239*c8b29d12SMateusz Guzik }
1240*c8b29d12SMateusz Guzik 
12411c6987ebSMateusz Guzik int
12421c6987ebSMateusz Guzik __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
12431c6987ebSMateusz Guzik     const char *wmesg, int pri, int timo, const char *file, int line)
12441c6987ebSMateusz Guzik {
12451c6987ebSMateusz Guzik 	GIANT_DECLARE;
12461c6987ebSMateusz Guzik 	struct lockmgr_wait lwa;
12471c6987ebSMateusz Guzik 	struct lock_class *class;
12481c6987ebSMateusz Guzik 	const char *iwmesg;
12491c6987ebSMateusz Guzik 	uintptr_t tid, v, x;
12501c6987ebSMateusz Guzik 	u_int op, realexslp;
12511c6987ebSMateusz Guzik 	int error, ipri, itimo, queue, wakeup_swapper;
12521c6987ebSMateusz Guzik #ifdef LOCK_PROFILING
12531c6987ebSMateusz Guzik 	uint64_t waittime = 0;
12541c6987ebSMateusz Guzik 	int contested = 0;
12551c6987ebSMateusz Guzik #endif
12561c6987ebSMateusz Guzik 
1257b543c98cSConrad Meyer 	if (panicstr != NULL)
1258b543c98cSConrad Meyer 		return (0);
1259b543c98cSConrad Meyer 
12601c6987ebSMateusz Guzik 	error = 0;
12611c6987ebSMateusz Guzik 	tid = (uintptr_t)curthread;
12621c6987ebSMateusz Guzik 	op = (flags & LK_TYPE_MASK);
12631c6987ebSMateusz Guzik 	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
12641c6987ebSMateusz Guzik 	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
12651c6987ebSMateusz Guzik 	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
12661c6987ebSMateusz Guzik 
12671c6987ebSMateusz Guzik 	lwa.iwmesg = iwmesg;
12681c6987ebSMateusz Guzik 	lwa.ipri = ipri;
12691c6987ebSMateusz Guzik 	lwa.itimo = itimo;
12701c6987ebSMateusz Guzik 
12711c6987ebSMateusz Guzik 	MPASS((flags & ~LK_TOTAL_MASK) == 0);
12721c6987ebSMateusz Guzik 	KASSERT((op & (op - 1)) == 0,
12731c6987ebSMateusz Guzik 	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
12741c6987ebSMateusz Guzik 	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
12751c6987ebSMateusz Guzik 	    (op != LK_DOWNGRADE && op != LK_RELEASE),
12761c6987ebSMateusz Guzik 	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
12771c6987ebSMateusz Guzik 	    __func__, file, line));
12781c6987ebSMateusz Guzik 	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
12791c6987ebSMateusz Guzik 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
12801c6987ebSMateusz Guzik 	    __func__, file, line));
12811c6987ebSMateusz Guzik 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
12821c6987ebSMateusz Guzik 	    ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
12831c6987ebSMateusz Guzik 	    lk->lock_object.lo_name, file, line));
12841c6987ebSMateusz Guzik 
12851c6987ebSMateusz Guzik 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
12861c6987ebSMateusz Guzik 
12871c6987ebSMateusz Guzik 	if (lk->lock_object.lo_flags & LK_NOSHARE) {
12881c6987ebSMateusz Guzik 		switch (op) {
12891c6987ebSMateusz Guzik 		case LK_SHARED:
12901c6987ebSMateusz Guzik 			op = LK_EXCLUSIVE;
12911c6987ebSMateusz Guzik 			break;
12921c6987ebSMateusz Guzik 		case LK_UPGRADE:
12931c6987ebSMateusz Guzik 		case LK_TRYUPGRADE:
12941c6987ebSMateusz Guzik 		case LK_DOWNGRADE:
12951c6987ebSMateusz Guzik 			_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
12961c6987ebSMateusz Guzik 			    file, line);
12971c6987ebSMateusz Guzik 			if (flags & LK_INTERLOCK)
12981c6987ebSMateusz Guzik 				class->lc_unlock(ilk);
12991c6987ebSMateusz Guzik 			return (0);
13001c6987ebSMateusz Guzik 		}
13011c6987ebSMateusz Guzik 	}
13021c6987ebSMateusz Guzik 
13031c6987ebSMateusz Guzik 	wakeup_swapper = 0;
13041c6987ebSMateusz Guzik 	switch (op) {
13051c6987ebSMateusz Guzik 	case LK_SHARED:
13061c6987ebSMateusz Guzik 		return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
13071c6987ebSMateusz Guzik 		break;
13081c6987ebSMateusz Guzik 	case LK_UPGRADE:
13091c6987ebSMateusz Guzik 	case LK_TRYUPGRADE:
13101c6987ebSMateusz Guzik 		return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
13111c6987ebSMateusz Guzik 		break;
13121c6987ebSMateusz Guzik 	case LK_EXCLUSIVE:
13131c6987ebSMateusz Guzik 		return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1314047dd67eSAttilio Rao 		break;
1315047dd67eSAttilio Rao 	case LK_DOWNGRADE:
13161c7d98d0SAttilio Rao 		_lockmgr_assert(lk, KA_XLOCKED, file, line);
1317e5f94314SAttilio Rao 		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
13181c7d98d0SAttilio Rao 
13191c7d98d0SAttilio Rao 		/*
13201c7d98d0SAttilio Rao 		 * Panic if the lock is recursed.
13211c7d98d0SAttilio Rao 		 */
13221c7d98d0SAttilio Rao 		if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
13231c7d98d0SAttilio Rao 			if (flags & LK_INTERLOCK)
13241c7d98d0SAttilio Rao 				class->lc_unlock(ilk);
13251c7d98d0SAttilio Rao 			panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
13261c7d98d0SAttilio Rao 			    __func__, iwmesg, file, line);
13271c7d98d0SAttilio Rao 		}
1328e5f94314SAttilio Rao 		TD_SLOCKS_INC(curthread);
1329047dd67eSAttilio Rao 
1330047dd67eSAttilio Rao 		/*
1331047dd67eSAttilio Rao 		 * In order to preserve waiters flags, just spin.
1332047dd67eSAttilio Rao 		 */
1333047dd67eSAttilio Rao 		for (;;) {
1334651175c9SAttilio Rao 			x = lk->lk_lock;
1335651175c9SAttilio Rao 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1336651175c9SAttilio Rao 			x &= LK_ALL_WAITERS;
1337047dd67eSAttilio Rao 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1338e5f94314SAttilio Rao 			    LK_SHARERS_LOCK(1) | x))
1339047dd67eSAttilio Rao 				break;
1340047dd67eSAttilio Rao 			cpu_spinwait();
1341047dd67eSAttilio Rao 		}
13425b699f16SMark Johnston 		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
13435b699f16SMark Johnston 		LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1344047dd67eSAttilio Rao 		break;
1345047dd67eSAttilio Rao 	case LK_RELEASE:
1346047dd67eSAttilio Rao 		_lockmgr_assert(lk, KA_LOCKED, file, line);
1347047dd67eSAttilio Rao 		x = lk->lk_lock;
1348047dd67eSAttilio Rao 
13491c6987ebSMateusz Guzik 		if (__predict_true(x & LK_SHARE) != 0) {
13501c6987ebSMateusz Guzik 			return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1351047dd67eSAttilio Rao 		} else {
13521c6987ebSMateusz Guzik 			return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
13532028867dSAttilio Rao 		}
1354047dd67eSAttilio Rao 		break;
1355047dd67eSAttilio Rao 	case LK_DRAIN:
1356e5f94314SAttilio Rao 		if (LK_CAN_WITNESS(flags))
1357e5f94314SAttilio Rao 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
135824150d37SJohn Baldwin 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
135924150d37SJohn Baldwin 			    ilk : NULL);
1360047dd67eSAttilio Rao 
1361047dd67eSAttilio Rao 		/*
136296f1567fSKonstantin Belousov 		 * Trying to drain a lock we already own will result in a
1363047dd67eSAttilio Rao 		 * deadlock.
1364047dd67eSAttilio Rao 		 */
1365047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk)) {
1366047dd67eSAttilio Rao 			if (flags & LK_INTERLOCK)
1367047dd67eSAttilio Rao 				class->lc_unlock(ilk);
1368047dd67eSAttilio Rao 			panic("%s: draining %s with the lock held @ %s:%d\n",
1369047dd67eSAttilio Rao 			    __func__, iwmesg, file, line);
1370047dd67eSAttilio Rao 		}
1371047dd67eSAttilio Rao 
1372fc4f686dSMateusz Guzik 		for (;;) {
1373fc4f686dSMateusz Guzik 			if (lk->lk_lock == LK_UNLOCKED &&
1374fc4f686dSMateusz Guzik 			    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1375fc4f686dSMateusz Guzik 				break;
1376fc4f686dSMateusz Guzik 
1377f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
1378f5f9340bSFabien Thomas 			PMC_SOFT_CALL( , , lock, failed);
1379f5f9340bSFabien Thomas #endif
1380047dd67eSAttilio Rao 			lock_profile_obtain_lock_failed(&lk->lock_object,
1381047dd67eSAttilio Rao 			    &contested, &waittime);
1382047dd67eSAttilio Rao 
1383047dd67eSAttilio Rao 			/*
1384047dd67eSAttilio Rao 			 * If the lock is expected to not sleep just give up
1385047dd67eSAttilio Rao 			 * and return.
1386047dd67eSAttilio Rao 			 */
1387047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
1388047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p fails the try operation",
1389047dd67eSAttilio Rao 				    __func__, lk);
1390047dd67eSAttilio Rao 				error = EBUSY;
1391047dd67eSAttilio Rao 				break;
1392047dd67eSAttilio Rao 			}
1393047dd67eSAttilio Rao 
1394047dd67eSAttilio Rao 			/*
1395047dd67eSAttilio Rao 			 * Acquire the sleepqueue chain lock because we
1396047dd67eSAttilio Rao 			 * probabilly will need to manipulate waiters flags.
1397047dd67eSAttilio Rao 			 */
1398047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
1399047dd67eSAttilio Rao 			x = lk->lk_lock;
1400047dd67eSAttilio Rao 
1401047dd67eSAttilio Rao 			/*
1402047dd67eSAttilio Rao 			 * if the lock has been released while we spun on
1403047dd67eSAttilio Rao 			 * the sleepqueue chain lock just try again.
1404047dd67eSAttilio Rao 			 */
1405047dd67eSAttilio Rao 			if (x == LK_UNLOCKED) {
1406047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
1407047dd67eSAttilio Rao 				continue;
1408047dd67eSAttilio Rao 			}
1409047dd67eSAttilio Rao 
1410651175c9SAttilio Rao 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1411651175c9SAttilio Rao 			if ((x & ~v) == LK_UNLOCKED) {
1412651175c9SAttilio Rao 				v = (x & ~LK_EXCLUSIVE_SPINNERS);
14132028867dSAttilio Rao 
14142028867dSAttilio Rao 				/*
14152028867dSAttilio Rao 				 * If interruptible sleeps left the exclusive
14162028867dSAttilio Rao 				 * queue empty avoid a starvation for the
14172028867dSAttilio Rao 				 * threads sleeping on the shared queue by
14182028867dSAttilio Rao 				 * giving them precedence and cleaning up the
14192028867dSAttilio Rao 				 * exclusive waiters bit anyway.
1420c636ba83SAttilio Rao 				 * Please note that lk_exslpfail count may be
1421c636ba83SAttilio Rao 				 * lying about the real number of waiters with
1422c636ba83SAttilio Rao 				 * the LK_SLEEPFAIL flag on because they may
1423e3043798SPedro F. Giffuni 				 * be used in conjunction with interruptible
1424aab9c8c2SAttilio Rao 				 * sleeps so lk_exslpfail might be considered
1425aab9c8c2SAttilio Rao 				 * an 'upper limit' bound, including the edge
1426c636ba83SAttilio Rao 				 * cases.
14272028867dSAttilio Rao 				 */
1428047dd67eSAttilio Rao 				if (v & LK_EXCLUSIVE_WAITERS) {
1429047dd67eSAttilio Rao 					queue = SQ_EXCLUSIVE_QUEUE;
1430047dd67eSAttilio Rao 					v &= ~LK_EXCLUSIVE_WAITERS;
1431047dd67eSAttilio Rao 				} else {
14329dbf7a62SAttilio Rao 
14339dbf7a62SAttilio Rao 					/*
14349dbf7a62SAttilio Rao 					 * Exclusive waiters sleeping with
14359dbf7a62SAttilio Rao 					 * LK_SLEEPFAIL on and using
14369dbf7a62SAttilio Rao 					 * interruptible sleeps/timeout may
14379dbf7a62SAttilio Rao 					 * have left spourious lk_exslpfail
14389dbf7a62SAttilio Rao 					 * counts on, so clean it up anyway.
14399dbf7a62SAttilio Rao 					 */
1440047dd67eSAttilio Rao 					MPASS(v & LK_SHARED_WAITERS);
14419dbf7a62SAttilio Rao 					lk->lk_exslpfail = 0;
1442047dd67eSAttilio Rao 					queue = SQ_SHARED_QUEUE;
1443047dd67eSAttilio Rao 					v &= ~LK_SHARED_WAITERS;
1444047dd67eSAttilio Rao 				}
14452028867dSAttilio Rao 				if (queue == SQ_EXCLUSIVE_QUEUE) {
14462028867dSAttilio Rao 					realexslp =
14472028867dSAttilio Rao 					    sleepq_sleepcnt(&lk->lock_object,
14482028867dSAttilio Rao 					    SQ_EXCLUSIVE_QUEUE);
14492028867dSAttilio Rao 					if (lk->lk_exslpfail >= realexslp) {
14502028867dSAttilio Rao 						lk->lk_exslpfail = 0;
14512028867dSAttilio Rao 						queue = SQ_SHARED_QUEUE;
14522028867dSAttilio Rao 						v &= ~LK_SHARED_WAITERS;
14532028867dSAttilio Rao 						if (realexslp != 0) {
14542028867dSAttilio Rao 							LOCK_LOG2(lk,
14552028867dSAttilio Rao 					"%s: %p has only LK_SLEEPFAIL sleepers",
14562028867dSAttilio Rao 							    __func__, lk);
14572028867dSAttilio Rao 							LOCK_LOG2(lk,
14582028867dSAttilio Rao 			"%s: %p waking up threads on the exclusive queue",
14592028867dSAttilio Rao 							    __func__, lk);
14602028867dSAttilio Rao 							wakeup_swapper =
14612028867dSAttilio Rao 							    sleepq_broadcast(
14622028867dSAttilio Rao 							    &lk->lock_object,
14632028867dSAttilio Rao 							    SLEEPQ_LK, 0,
14642028867dSAttilio Rao 							    SQ_EXCLUSIVE_QUEUE);
14652028867dSAttilio Rao 						}
14662028867dSAttilio Rao 					} else
14672028867dSAttilio Rao 						lk->lk_exslpfail = 0;
14682028867dSAttilio Rao 				}
1469047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1470047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
1471047dd67eSAttilio Rao 					continue;
1472047dd67eSAttilio Rao 				}
1473047dd67eSAttilio Rao 				LOCK_LOG3(lk,
1474047dd67eSAttilio Rao 				"%s: %p waking up all threads on the %s queue",
1475047dd67eSAttilio Rao 				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1476047dd67eSAttilio Rao 				    "shared" : "exclusive");
1477814f26daSJohn Baldwin 				wakeup_swapper |= sleepq_broadcast(
1478da7bbd2cSJohn Baldwin 				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1479047dd67eSAttilio Rao 
1480047dd67eSAttilio Rao 				/*
1481047dd67eSAttilio Rao 				 * If shared waiters have been woken up we need
1482047dd67eSAttilio Rao 				 * to wait for one of them to acquire the lock
1483047dd67eSAttilio Rao 				 * before to set the exclusive waiters in
1484047dd67eSAttilio Rao 				 * order to avoid a deadlock.
1485047dd67eSAttilio Rao 				 */
1486047dd67eSAttilio Rao 				if (queue == SQ_SHARED_QUEUE) {
1487047dd67eSAttilio Rao 					for (v = lk->lk_lock;
1488047dd67eSAttilio Rao 					    (v & LK_SHARE) && !LK_SHARERS(v);
1489047dd67eSAttilio Rao 					    v = lk->lk_lock)
1490047dd67eSAttilio Rao 						cpu_spinwait();
1491047dd67eSAttilio Rao 				}
1492047dd67eSAttilio Rao 			}
1493047dd67eSAttilio Rao 
1494047dd67eSAttilio Rao 			/*
1495047dd67eSAttilio Rao 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1496047dd67eSAttilio Rao 			 * fail, loop back and retry.
1497047dd67eSAttilio Rao 			 */
1498047dd67eSAttilio Rao 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1499047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1500047dd67eSAttilio Rao 				    x | LK_EXCLUSIVE_WAITERS)) {
1501047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
1502047dd67eSAttilio Rao 					continue;
1503047dd67eSAttilio Rao 				}
1504047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1505047dd67eSAttilio Rao 				    __func__, lk);
1506047dd67eSAttilio Rao 			}
1507047dd67eSAttilio Rao 
1508047dd67eSAttilio Rao 			/*
1509047dd67eSAttilio Rao 			 * As far as we have been unable to acquire the
1510047dd67eSAttilio Rao 			 * exclusive lock and the exclusive waiters flag
1511047dd67eSAttilio Rao 			 * is set, we will sleep.
1512047dd67eSAttilio Rao 			 */
1513047dd67eSAttilio Rao 			if (flags & LK_INTERLOCK) {
1514047dd67eSAttilio Rao 				class->lc_unlock(ilk);
1515047dd67eSAttilio Rao 				flags &= ~LK_INTERLOCK;
1516047dd67eSAttilio Rao 			}
1517e5f94314SAttilio Rao 			GIANT_SAVE();
1518047dd67eSAttilio Rao 			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1519047dd67eSAttilio Rao 			    SQ_EXCLUSIVE_QUEUE);
1520047dd67eSAttilio Rao 			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1521e5f94314SAttilio Rao 			GIANT_RESTORE();
1522047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1523047dd67eSAttilio Rao 			    __func__, lk);
1524047dd67eSAttilio Rao 		}
1525047dd67eSAttilio Rao 
1526047dd67eSAttilio Rao 		if (error == 0) {
1527047dd67eSAttilio Rao 			lock_profile_obtain_lock_success(&lk->lock_object,
1528047dd67eSAttilio Rao 			    contested, waittime, file, line);
1529047dd67eSAttilio Rao 			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1530047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
1531e5f94314SAttilio Rao 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1532e5f94314SAttilio Rao 			    LK_TRYWIT(flags), file, line);
1533047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
1534047dd67eSAttilio Rao 			STACK_SAVE(lk);
1535047dd67eSAttilio Rao 		}
1536047dd67eSAttilio Rao 		break;
1537047dd67eSAttilio Rao 	default:
1538047dd67eSAttilio Rao 		if (flags & LK_INTERLOCK)
1539047dd67eSAttilio Rao 			class->lc_unlock(ilk);
1540047dd67eSAttilio Rao 		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1541047dd67eSAttilio Rao 	}
1542047dd67eSAttilio Rao 
1543047dd67eSAttilio Rao 	if (flags & LK_INTERLOCK)
1544047dd67eSAttilio Rao 		class->lc_unlock(ilk);
1545da7bbd2cSJohn Baldwin 	if (wakeup_swapper)
1546da7bbd2cSJohn Baldwin 		kick_proc0();
1547047dd67eSAttilio Rao 
1548047dd67eSAttilio Rao 	return (error);
1549047dd67eSAttilio Rao }
1550047dd67eSAttilio Rao 
1551d7a7e179SAttilio Rao void
1552047dd67eSAttilio Rao _lockmgr_disown(struct lock *lk, const char *file, int line)
1553047dd67eSAttilio Rao {
1554047dd67eSAttilio Rao 	uintptr_t tid, x;
1555047dd67eSAttilio Rao 
155635370593SAndriy Gapon 	if (SCHEDULER_STOPPED())
155735370593SAndriy Gapon 		return;
155835370593SAndriy Gapon 
1559047dd67eSAttilio Rao 	tid = (uintptr_t)curthread;
15601c7d98d0SAttilio Rao 	_lockmgr_assert(lk, KA_XLOCKED, file, line);
15611c7d98d0SAttilio Rao 
15621c7d98d0SAttilio Rao 	/*
15631c7d98d0SAttilio Rao 	 * Panic if the lock is recursed.
15641c7d98d0SAttilio Rao 	 */
15651c7d98d0SAttilio Rao 	if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
15661c7d98d0SAttilio Rao 		panic("%s: disown a recursed lockmgr @ %s:%d\n",
15671c7d98d0SAttilio Rao 		    __func__,  file, line);
1568047dd67eSAttilio Rao 
1569047dd67eSAttilio Rao 	/*
157096f1567fSKonstantin Belousov 	 * If the owner is already LK_KERNPROC just skip the whole operation.
1571047dd67eSAttilio Rao 	 */
1572047dd67eSAttilio Rao 	if (LK_HOLDER(lk->lk_lock) != tid)
1573047dd67eSAttilio Rao 		return;
157404a28689SJeff Roberson 	lock_profile_release_lock(&lk->lock_object);
15755b699f16SMark Johnston 	LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1576e5f94314SAttilio Rao 	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1577e5f94314SAttilio Rao 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1578e5f94314SAttilio Rao 	TD_LOCKS_DEC(curthread);
1579337c5ff4SAttilio Rao 	STACK_SAVE(lk);
1580047dd67eSAttilio Rao 
1581047dd67eSAttilio Rao 	/*
1582047dd67eSAttilio Rao 	 * In order to preserve waiters flags, just spin.
1583047dd67eSAttilio Rao 	 */
1584047dd67eSAttilio Rao 	for (;;) {
1585651175c9SAttilio Rao 		x = lk->lk_lock;
1586651175c9SAttilio Rao 		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1587651175c9SAttilio Rao 		x &= LK_ALL_WAITERS;
158822dd228dSAttilio Rao 		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1589e5f94314SAttilio Rao 		    LK_KERNPROC | x))
1590047dd67eSAttilio Rao 			return;
1591047dd67eSAttilio Rao 		cpu_spinwait();
1592047dd67eSAttilio Rao 	}
1593047dd67eSAttilio Rao }
1594047dd67eSAttilio Rao 
1595047dd67eSAttilio Rao void
1596d576deedSPawel Jakub Dawidek lockmgr_printinfo(const struct lock *lk)
1597d7a7e179SAttilio Rao {
1598d7a7e179SAttilio Rao 	struct thread *td;
1599047dd67eSAttilio Rao 	uintptr_t x;
1600d7a7e179SAttilio Rao 
1601047dd67eSAttilio Rao 	if (lk->lk_lock == LK_UNLOCKED)
1602047dd67eSAttilio Rao 		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1603047dd67eSAttilio Rao 	else if (lk->lk_lock & LK_SHARE)
1604047dd67eSAttilio Rao 		printf("lock type %s: SHARED (count %ju)\n",
1605047dd67eSAttilio Rao 		    lk->lock_object.lo_name,
1606047dd67eSAttilio Rao 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1607047dd67eSAttilio Rao 	else {
1608047dd67eSAttilio Rao 		td = lockmgr_xholder(lk);
1609e64b4fa8SKonstantin Belousov 		if (td == (struct thread *)LK_KERNPROC)
1610e64b4fa8SKonstantin Belousov 			printf("lock type %s: EXCL by KERNPROC\n",
1611e64b4fa8SKonstantin Belousov 			    lk->lock_object.lo_name);
1612e64b4fa8SKonstantin Belousov 		else
16132573ea5fSIvan Voras 			printf("lock type %s: EXCL by thread %p "
1614e64b4fa8SKonstantin Belousov 			    "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1615e64b4fa8SKonstantin Belousov 			    td, td->td_proc->p_pid, td->td_proc->p_comm,
1616e64b4fa8SKonstantin Belousov 			    td->td_tid);
1617d7a7e179SAttilio Rao 	}
1618d7a7e179SAttilio Rao 
1619047dd67eSAttilio Rao 	x = lk->lk_lock;
1620047dd67eSAttilio Rao 	if (x & LK_EXCLUSIVE_WAITERS)
1621047dd67eSAttilio Rao 		printf(" with exclusive waiters pending\n");
1622047dd67eSAttilio Rao 	if (x & LK_SHARED_WAITERS)
1623047dd67eSAttilio Rao 		printf(" with shared waiters pending\n");
1624651175c9SAttilio Rao 	if (x & LK_EXCLUSIVE_SPINNERS)
1625651175c9SAttilio Rao 		printf(" with exclusive spinners pending\n");
1626047dd67eSAttilio Rao 
1627047dd67eSAttilio Rao 	STACK_PRINT(lk);
1628047dd67eSAttilio Rao }
1629047dd67eSAttilio Rao 
163099448ed1SJohn Dyson int
1631d576deedSPawel Jakub Dawidek lockstatus(const struct lock *lk)
163299448ed1SJohn Dyson {
1633047dd67eSAttilio Rao 	uintptr_t v, x;
1634047dd67eSAttilio Rao 	int ret;
163599448ed1SJohn Dyson 
1636047dd67eSAttilio Rao 	ret = LK_SHARED;
1637047dd67eSAttilio Rao 	x = lk->lk_lock;
1638047dd67eSAttilio Rao 	v = LK_HOLDER(x);
16390e9eb108SAttilio Rao 
1640047dd67eSAttilio Rao 	if ((x & LK_SHARE) == 0) {
1641047dd67eSAttilio Rao 		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1642047dd67eSAttilio Rao 			ret = LK_EXCLUSIVE;
16436bdfe06aSEivind Eklund 		else
1644047dd67eSAttilio Rao 			ret = LK_EXCLOTHER;
1645047dd67eSAttilio Rao 	} else if (x == LK_UNLOCKED)
1646047dd67eSAttilio Rao 		ret = 0;
164799448ed1SJohn Dyson 
1648047dd67eSAttilio Rao 	return (ret);
164953bf4bb2SPeter Wemm }
1650be6847d7SJohn Baldwin 
165184887fa3SAttilio Rao #ifdef INVARIANT_SUPPORT
1652de5b1952SAlexander Leidinger 
1653de5b1952SAlexander Leidinger FEATURE(invariant_support,
1654de5b1952SAlexander Leidinger     "Support for modules compiled with INVARIANTS option");
1655de5b1952SAlexander Leidinger 
165684887fa3SAttilio Rao #ifndef INVARIANTS
165784887fa3SAttilio Rao #undef	_lockmgr_assert
165884887fa3SAttilio Rao #endif
165984887fa3SAttilio Rao 
166084887fa3SAttilio Rao void
1661d576deedSPawel Jakub Dawidek _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
166284887fa3SAttilio Rao {
166384887fa3SAttilio Rao 	int slocked = 0;
166484887fa3SAttilio Rao 
166584887fa3SAttilio Rao 	if (panicstr != NULL)
166684887fa3SAttilio Rao 		return;
166784887fa3SAttilio Rao 	switch (what) {
166884887fa3SAttilio Rao 	case KA_SLOCKED:
166984887fa3SAttilio Rao 	case KA_SLOCKED | KA_NOTRECURSED:
167084887fa3SAttilio Rao 	case KA_SLOCKED | KA_RECURSED:
167184887fa3SAttilio Rao 		slocked = 1;
167284887fa3SAttilio Rao 	case KA_LOCKED:
167384887fa3SAttilio Rao 	case KA_LOCKED | KA_NOTRECURSED:
167484887fa3SAttilio Rao 	case KA_LOCKED | KA_RECURSED:
1675e5f94314SAttilio Rao #ifdef WITNESS
1676e5f94314SAttilio Rao 
1677e5f94314SAttilio Rao 		/*
1678e5f94314SAttilio Rao 		 * We cannot trust WITNESS if the lock is held in exclusive
1679e5f94314SAttilio Rao 		 * mode and a call to lockmgr_disown() happened.
1680e5f94314SAttilio Rao 		 * Workaround this skipping the check if the lock is held in
1681e5f94314SAttilio Rao 		 * exclusive mode even for the KA_LOCKED case.
1682e5f94314SAttilio Rao 		 */
1683e5f94314SAttilio Rao 		if (slocked || (lk->lk_lock & LK_SHARE)) {
1684e5f94314SAttilio Rao 			witness_assert(&lk->lock_object, what, file, line);
1685e5f94314SAttilio Rao 			break;
1686e5f94314SAttilio Rao 		}
1687e5f94314SAttilio Rao #endif
1688047dd67eSAttilio Rao 		if (lk->lk_lock == LK_UNLOCKED ||
1689047dd67eSAttilio Rao 		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1690047dd67eSAttilio Rao 		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
169184887fa3SAttilio Rao 			panic("Lock %s not %slocked @ %s:%d\n",
1692047dd67eSAttilio Rao 			    lk->lock_object.lo_name, slocked ? "share" : "",
169384887fa3SAttilio Rao 			    file, line);
1694047dd67eSAttilio Rao 
1695047dd67eSAttilio Rao 		if ((lk->lk_lock & LK_SHARE) == 0) {
1696047dd67eSAttilio Rao 			if (lockmgr_recursed(lk)) {
169784887fa3SAttilio Rao 				if (what & KA_NOTRECURSED)
169884887fa3SAttilio Rao 					panic("Lock %s recursed @ %s:%d\n",
1699047dd67eSAttilio Rao 					    lk->lock_object.lo_name, file,
1700047dd67eSAttilio Rao 					    line);
170184887fa3SAttilio Rao 			} else if (what & KA_RECURSED)
170284887fa3SAttilio Rao 				panic("Lock %s not recursed @ %s:%d\n",
1703047dd67eSAttilio Rao 				    lk->lock_object.lo_name, file, line);
170484887fa3SAttilio Rao 		}
170584887fa3SAttilio Rao 		break;
170684887fa3SAttilio Rao 	case KA_XLOCKED:
170784887fa3SAttilio Rao 	case KA_XLOCKED | KA_NOTRECURSED:
170884887fa3SAttilio Rao 	case KA_XLOCKED | KA_RECURSED:
1709047dd67eSAttilio Rao 		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
171084887fa3SAttilio Rao 			panic("Lock %s not exclusively locked @ %s:%d\n",
1711047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
1712047dd67eSAttilio Rao 		if (lockmgr_recursed(lk)) {
171384887fa3SAttilio Rao 			if (what & KA_NOTRECURSED)
171484887fa3SAttilio Rao 				panic("Lock %s recursed @ %s:%d\n",
1715047dd67eSAttilio Rao 				    lk->lock_object.lo_name, file, line);
171684887fa3SAttilio Rao 		} else if (what & KA_RECURSED)
171784887fa3SAttilio Rao 			panic("Lock %s not recursed @ %s:%d\n",
1718047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
171984887fa3SAttilio Rao 		break;
172084887fa3SAttilio Rao 	case KA_UNLOCKED:
1721047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
172284887fa3SAttilio Rao 			panic("Lock %s exclusively locked @ %s:%d\n",
1723047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
172484887fa3SAttilio Rao 		break;
172584887fa3SAttilio Rao 	default:
1726047dd67eSAttilio Rao 		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1727047dd67eSAttilio Rao 		    line);
172884887fa3SAttilio Rao 	}
172984887fa3SAttilio Rao }
1730047dd67eSAttilio Rao #endif
173184887fa3SAttilio Rao 
1732be6847d7SJohn Baldwin #ifdef DDB
1733462a7addSJohn Baldwin int
1734462a7addSJohn Baldwin lockmgr_chain(struct thread *td, struct thread **ownerp)
1735462a7addSJohn Baldwin {
1736047dd67eSAttilio Rao 	struct lock *lk;
1737462a7addSJohn Baldwin 
1738047dd67eSAttilio Rao 	lk = td->td_wchan;
1739462a7addSJohn Baldwin 
1740047dd67eSAttilio Rao 	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1741462a7addSJohn Baldwin 		return (0);
1742047dd67eSAttilio Rao 	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1743047dd67eSAttilio Rao 	if (lk->lk_lock & LK_SHARE)
1744047dd67eSAttilio Rao 		db_printf("SHARED (count %ju)\n",
1745047dd67eSAttilio Rao 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1746047dd67eSAttilio Rao 	else
1747047dd67eSAttilio Rao 		db_printf("EXCL\n");
1748047dd67eSAttilio Rao 	*ownerp = lockmgr_xholder(lk);
1749462a7addSJohn Baldwin 
1750462a7addSJohn Baldwin 	return (1);
1751462a7addSJohn Baldwin }
1752462a7addSJohn Baldwin 
1753047dd67eSAttilio Rao static void
1754d576deedSPawel Jakub Dawidek db_show_lockmgr(const struct lock_object *lock)
1755be6847d7SJohn Baldwin {
1756be6847d7SJohn Baldwin 	struct thread *td;
1757d576deedSPawel Jakub Dawidek 	const struct lock *lk;
1758be6847d7SJohn Baldwin 
1759d576deedSPawel Jakub Dawidek 	lk = (const struct lock *)lock;
1760be6847d7SJohn Baldwin 
1761be6847d7SJohn Baldwin 	db_printf(" state: ");
1762047dd67eSAttilio Rao 	if (lk->lk_lock == LK_UNLOCKED)
1763be6847d7SJohn Baldwin 		db_printf("UNLOCKED\n");
1764047dd67eSAttilio Rao 	else if (lk->lk_lock & LK_SHARE)
1765047dd67eSAttilio Rao 		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1766047dd67eSAttilio Rao 	else {
1767047dd67eSAttilio Rao 		td = lockmgr_xholder(lk);
1768047dd67eSAttilio Rao 		if (td == (struct thread *)LK_KERNPROC)
1769047dd67eSAttilio Rao 			db_printf("XLOCK: LK_KERNPROC\n");
1770047dd67eSAttilio Rao 		else
1771047dd67eSAttilio Rao 			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1772047dd67eSAttilio Rao 			    td->td_tid, td->td_proc->p_pid,
1773047dd67eSAttilio Rao 			    td->td_proc->p_comm);
1774047dd67eSAttilio Rao 		if (lockmgr_recursed(lk))
1775047dd67eSAttilio Rao 			db_printf(" recursed: %d\n", lk->lk_recurse);
1776047dd67eSAttilio Rao 	}
1777047dd67eSAttilio Rao 	db_printf(" waiters: ");
1778047dd67eSAttilio Rao 	switch (lk->lk_lock & LK_ALL_WAITERS) {
1779047dd67eSAttilio Rao 	case LK_SHARED_WAITERS:
1780047dd67eSAttilio Rao 		db_printf("shared\n");
1781e5023dd9SEdward Tomasz Napierala 		break;
1782047dd67eSAttilio Rao 	case LK_EXCLUSIVE_WAITERS:
1783047dd67eSAttilio Rao 		db_printf("exclusive\n");
1784047dd67eSAttilio Rao 		break;
1785047dd67eSAttilio Rao 	case LK_ALL_WAITERS:
1786047dd67eSAttilio Rao 		db_printf("shared and exclusive\n");
1787047dd67eSAttilio Rao 		break;
1788047dd67eSAttilio Rao 	default:
1789047dd67eSAttilio Rao 		db_printf("none\n");
1790047dd67eSAttilio Rao 	}
1791651175c9SAttilio Rao 	db_printf(" spinners: ");
1792651175c9SAttilio Rao 	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1793651175c9SAttilio Rao 		db_printf("exclusive\n");
1794651175c9SAttilio Rao 	else
1795651175c9SAttilio Rao 		db_printf("none\n");
1796be6847d7SJohn Baldwin }
1797be6847d7SJohn Baldwin #endif
1798