xref: /freebsd/sys/kern/kern_lock.c (revision f902e4bb04ad717935a97ce1ae59e2dd389d940d)
19454b2d8SWarner Losh /*-
28a36da99SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
38a36da99SPedro F. Giffuni  *
4047dd67eSAttilio Rao  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5047dd67eSAttilio Rao  * All rights reserved.
653bf4bb2SPeter Wemm  *
753bf4bb2SPeter Wemm  * Redistribution and use in source and binary forms, with or without
853bf4bb2SPeter Wemm  * modification, are permitted provided that the following conditions
953bf4bb2SPeter Wemm  * are met:
1053bf4bb2SPeter Wemm  * 1. Redistributions of source code must retain the above copyright
11047dd67eSAttilio Rao  *    notice(s), this list of conditions and the following disclaimer as
12047dd67eSAttilio Rao  *    the first lines of this file unmodified other than the possible
13047dd67eSAttilio Rao  *    addition of one or more copyright notices.
1453bf4bb2SPeter Wemm  * 2. Redistributions in binary form must reproduce the above copyright
15047dd67eSAttilio Rao  *    notice(s), this list of conditions and the following disclaimer in the
1653bf4bb2SPeter Wemm  *    documentation and/or other materials provided with the distribution.
1753bf4bb2SPeter Wemm  *
18047dd67eSAttilio Rao  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19047dd67eSAttilio Rao  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20047dd67eSAttilio Rao  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21047dd67eSAttilio Rao  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22047dd67eSAttilio Rao  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23047dd67eSAttilio Rao  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24047dd67eSAttilio Rao  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25047dd67eSAttilio Rao  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2653bf4bb2SPeter Wemm  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27047dd67eSAttilio Rao  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28047dd67eSAttilio Rao  * DAMAGE.
2953bf4bb2SPeter Wemm  */
3053bf4bb2SPeter Wemm 
31047dd67eSAttilio Rao #include "opt_ddb.h"
32f5f9340bSFabien Thomas #include "opt_hwpmc_hooks.h"
33047dd67eSAttilio Rao 
34677b542eSDavid E. O'Brien #include <sys/cdefs.h>
35677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
36677b542eSDavid E. O'Brien 
3753bf4bb2SPeter Wemm #include <sys/param.h>
38cd2fe4e6SAttilio Rao #include <sys/kdb.h>
3961d80e90SJohn Baldwin #include <sys/ktr.h>
40eac22dd4SMateusz Guzik #include <sys/limits.h>
4153bf4bb2SPeter Wemm #include <sys/lock.h>
42047dd67eSAttilio Rao #include <sys/lock_profile.h>
438302d183SBruce Evans #include <sys/lockmgr.h>
445b699f16SMark Johnston #include <sys/lockstat.h>
45d8881ca3SJohn Baldwin #include <sys/mutex.h>
468302d183SBruce Evans #include <sys/proc.h>
47047dd67eSAttilio Rao #include <sys/sleepqueue.h>
48e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS
49e8ddb61dSJeff Roberson #include <sys/stack.h>
50e8ddb61dSJeff Roberson #endif
51651175c9SAttilio Rao #include <sys/sysctl.h>
52047dd67eSAttilio Rao #include <sys/systm.h>
5353bf4bb2SPeter Wemm 
54047dd67eSAttilio Rao #include <machine/cpu.h>
556efc8a16SAttilio Rao 
56be6847d7SJohn Baldwin #ifdef DDB
57be6847d7SJohn Baldwin #include <ddb/ddb.h>
58047dd67eSAttilio Rao #endif
59047dd67eSAttilio Rao 
60f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
61f5f9340bSFabien Thomas #include <sys/pmckern.h>
62f5f9340bSFabien Thomas PMC_SOFT_DECLARE( , , lock, failed);
63f5f9340bSFabien Thomas #endif
64f5f9340bSFabien Thomas 
65eac22dd4SMateusz Guzik /*
66eac22dd4SMateusz Guzik  * Hack. There should be prio_t or similar so that this is not necessary.
67eac22dd4SMateusz Guzik  */
68eac22dd4SMateusz Guzik _Static_assert((PRILASTFLAG * 2) - 1 <= USHRT_MAX,
69eac22dd4SMateusz Guzik     "prio flags wont fit in u_short pri in struct lock");
70eac22dd4SMateusz Guzik 
71651175c9SAttilio Rao CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
72651175c9SAttilio Rao     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
73047dd67eSAttilio Rao 
74047dd67eSAttilio Rao #define	SQ_EXCLUSIVE_QUEUE	0
75047dd67eSAttilio Rao #define	SQ_SHARED_QUEUE		1
76047dd67eSAttilio Rao 
77047dd67eSAttilio Rao #ifndef INVARIANTS
78047dd67eSAttilio Rao #define	_lockmgr_assert(lk, what, file, line)
79047dd67eSAttilio Rao #endif
80ce1c953eSMark Johnston 
81047dd67eSAttilio Rao #define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
82047dd67eSAttilio Rao #define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
83047dd67eSAttilio Rao 
84047dd67eSAttilio Rao #ifndef DEBUG_LOCKS
85047dd67eSAttilio Rao #define	STACK_PRINT(lk)
86047dd67eSAttilio Rao #define	STACK_SAVE(lk)
87047dd67eSAttilio Rao #define	STACK_ZERO(lk)
88047dd67eSAttilio Rao #else
89047dd67eSAttilio Rao #define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
90047dd67eSAttilio Rao #define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
91047dd67eSAttilio Rao #define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
92047dd67eSAttilio Rao #endif
93047dd67eSAttilio Rao 
94047dd67eSAttilio Rao #define	LOCK_LOG2(lk, string, arg1, arg2)				\
95047dd67eSAttilio Rao 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
96047dd67eSAttilio Rao 		CTR2(KTR_LOCK, (string), (arg1), (arg2))
97047dd67eSAttilio Rao #define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
98047dd67eSAttilio Rao 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
99047dd67eSAttilio Rao 		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
100047dd67eSAttilio Rao 
101e5f94314SAttilio Rao #define	GIANT_DECLARE							\
102e5f94314SAttilio Rao 	int _i = 0;							\
103e5f94314SAttilio Rao 	WITNESS_SAVE_DECL(Giant)
104e5f94314SAttilio Rao #define	GIANT_RESTORE() do {						\
1056e8c1ccbSMateusz Guzik 	if (__predict_false(_i > 0)) {					\
106e5f94314SAttilio Rao 		while (_i--)						\
107e5f94314SAttilio Rao 			mtx_lock(&Giant);				\
108e5f94314SAttilio Rao 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
109e5f94314SAttilio Rao 	}								\
110e5f94314SAttilio Rao } while (0)
111e5f94314SAttilio Rao #define	GIANT_SAVE() do {						\
1126e8c1ccbSMateusz Guzik 	if (__predict_false(mtx_owned(&Giant))) {			\
113e5f94314SAttilio Rao 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
114e5f94314SAttilio Rao 		while (mtx_owned(&Giant)) {				\
115e5f94314SAttilio Rao 			_i++;						\
116e5f94314SAttilio Rao 			mtx_unlock(&Giant);				\
117e5f94314SAttilio Rao 		}							\
118e5f94314SAttilio Rao 	}								\
119e5f94314SAttilio Rao } while (0)
120e5f94314SAttilio Rao 
12195ab076dSMateusz Guzik static bool __always_inline
12295ab076dSMateusz Guzik LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
12395ab076dSMateusz Guzik {
12495ab076dSMateusz Guzik 
12595ab076dSMateusz Guzik 	if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
12695ab076dSMateusz Guzik 	    LK_SHARE)
12795ab076dSMateusz Guzik 		return (true);
12895ab076dSMateusz Guzik 	if (fp || (!(x & LK_SHARE)))
12995ab076dSMateusz Guzik 		return (false);
13095ab076dSMateusz Guzik 	if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
13195ab076dSMateusz Guzik 	    (curthread->td_pflags & TDP_DEADLKTREAT))
13295ab076dSMateusz Guzik 		return (true);
13395ab076dSMateusz Guzik 	return (false);
13495ab076dSMateusz Guzik }
13595ab076dSMateusz Guzik 
136e5f94314SAttilio Rao #define	LK_TRYOP(x)							\
137e5f94314SAttilio Rao 	((x) & LK_NOWAIT)
138e5f94314SAttilio Rao 
139e5f94314SAttilio Rao #define	LK_CAN_WITNESS(x)						\
140e5f94314SAttilio Rao 	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
141e5f94314SAttilio Rao #define	LK_TRYWIT(x)							\
142e5f94314SAttilio Rao 	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
143047dd67eSAttilio Rao 
144047dd67eSAttilio Rao #define	lockmgr_disowned(lk)						\
145047dd67eSAttilio Rao 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
146047dd67eSAttilio Rao 
14710391db5SMateusz Guzik #define	lockmgr_xlocked_v(v)						\
14810391db5SMateusz Guzik 	(((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
14910391db5SMateusz Guzik 
150bdb6d824SMateusz Guzik #define	lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk))
151047dd67eSAttilio Rao 
152d576deedSPawel Jakub Dawidek static void	assert_lockmgr(const struct lock_object *lock, int how);
153047dd67eSAttilio Rao #ifdef DDB
154d576deedSPawel Jakub Dawidek static void	db_show_lockmgr(const struct lock_object *lock);
155be6847d7SJohn Baldwin #endif
1567faf4d90SDavide Italiano static void	lock_lockmgr(struct lock_object *lock, uintptr_t how);
157a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
158d576deedSPawel Jakub Dawidek static int	owner_lockmgr(const struct lock_object *lock,
159d576deedSPawel Jakub Dawidek 		    struct thread **owner);
160a5aedd68SStacey Son #endif
1617faf4d90SDavide Italiano static uintptr_t unlock_lockmgr(struct lock_object *lock);
16261bd5e21SKip Macy 
16361bd5e21SKip Macy struct lock_class lock_class_lockmgr = {
1643ff6d229SJohn Baldwin 	.lc_name = "lockmgr",
165047dd67eSAttilio Rao 	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
166f9721b43SAttilio Rao 	.lc_assert = assert_lockmgr,
16761bd5e21SKip Macy #ifdef DDB
1686e21afd4SJohn Baldwin 	.lc_ddb_show = db_show_lockmgr,
16961bd5e21SKip Macy #endif
1706e21afd4SJohn Baldwin 	.lc_lock = lock_lockmgr,
171a5aedd68SStacey Son 	.lc_unlock = unlock_lockmgr,
172a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
173a5aedd68SStacey Son 	.lc_owner = owner_lockmgr,
174a5aedd68SStacey Son #endif
17561bd5e21SKip Macy };
17661bd5e21SKip Macy 
17731ad4050SMateusz Guzik static __read_mostly bool lk_adaptive = true;
17831ad4050SMateusz Guzik static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
17931ad4050SMateusz Guzik SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive,
18031ad4050SMateusz Guzik     0, "");
18131ad4050SMateusz Guzik #define lockmgr_delay  locks_delay
18231ad4050SMateusz Guzik 
1831c6987ebSMateusz Guzik struct lockmgr_wait {
1841c6987ebSMateusz Guzik 	const char *iwmesg;
1851c6987ebSMateusz Guzik 	int ipri;
1861c6987ebSMateusz Guzik 	int itimo;
1871c6987ebSMateusz Guzik };
1881c6987ebSMateusz Guzik 
189c4a48867SMateusz Guzik static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
19095ab076dSMateusz Guzik     int flags, bool fp);
1911c6987ebSMateusz Guzik static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
1921c6987ebSMateusz Guzik 
1931c6987ebSMateusz Guzik static void
1941c6987ebSMateusz Guzik lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
1951c6987ebSMateusz Guzik {
1961c6987ebSMateusz Guzik 	struct lock_class *class;
1971c6987ebSMateusz Guzik 
1981c6987ebSMateusz Guzik 	if (flags & LK_INTERLOCK) {
1991c6987ebSMateusz Guzik 		class = LOCK_CLASS(ilk);
2001c6987ebSMateusz Guzik 		class->lc_unlock(ilk);
2011c6987ebSMateusz Guzik 	}
2021c6987ebSMateusz Guzik 
2031c6987ebSMateusz Guzik 	if (__predict_false(wakeup_swapper))
2041c6987ebSMateusz Guzik 		kick_proc0();
2051c6987ebSMateusz Guzik }
206c4a48867SMateusz Guzik 
207c4a48867SMateusz Guzik static void
208c4a48867SMateusz Guzik lockmgr_note_shared_acquire(struct lock *lk, int contested,
209c4a48867SMateusz Guzik     uint64_t waittime, const char *file, int line, int flags)
210c4a48867SMateusz Guzik {
211c4a48867SMateusz Guzik 
2125b699f16SMark Johnston 	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
2135b699f16SMark Johnston 	    waittime, file, line, LOCKSTAT_READER);
214c4a48867SMateusz Guzik 	LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
215c4a48867SMateusz Guzik 	WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
216c4a48867SMateusz Guzik 	TD_LOCKS_INC(curthread);
217c4a48867SMateusz Guzik 	TD_SLOCKS_INC(curthread);
218c4a48867SMateusz Guzik 	STACK_SAVE(lk);
219c4a48867SMateusz Guzik }
220c4a48867SMateusz Guzik 
221c4a48867SMateusz Guzik static void
222c4a48867SMateusz Guzik lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
223c4a48867SMateusz Guzik {
224c4a48867SMateusz Guzik 
225c4a48867SMateusz Guzik 	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
226c4a48867SMateusz Guzik 	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
227c4a48867SMateusz Guzik 	TD_LOCKS_DEC(curthread);
228c4a48867SMateusz Guzik 	TD_SLOCKS_DEC(curthread);
229c4a48867SMateusz Guzik }
230c4a48867SMateusz Guzik 
231c4a48867SMateusz Guzik static void
232c4a48867SMateusz Guzik lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
233c4a48867SMateusz Guzik     uint64_t waittime, const char *file, int line, int flags)
234c4a48867SMateusz Guzik {
235c4a48867SMateusz Guzik 
2365b699f16SMark Johnston 	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
2375b699f16SMark Johnston 	    waittime, file, line, LOCKSTAT_WRITER);
238c4a48867SMateusz Guzik 	LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
239c4a48867SMateusz Guzik 	WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
240c4a48867SMateusz Guzik 	    line);
241c4a48867SMateusz Guzik 	TD_LOCKS_INC(curthread);
242c4a48867SMateusz Guzik 	STACK_SAVE(lk);
243c4a48867SMateusz Guzik }
244c4a48867SMateusz Guzik 
245c4a48867SMateusz Guzik static void
246c4a48867SMateusz Guzik lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
247c4a48867SMateusz Guzik {
248c4a48867SMateusz Guzik 
249bdb6d824SMateusz Guzik 	if (LK_HOLDER(lockmgr_read_value(lk)) != LK_KERNPROC) {
250c4a48867SMateusz Guzik 		WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
251c4a48867SMateusz Guzik 		TD_LOCKS_DEC(curthread);
252c4a48867SMateusz Guzik 	}
253c00115f1SMateusz Guzik 	LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
254c00115f1SMateusz Guzik 	    line);
255c00115f1SMateusz Guzik }
256c4a48867SMateusz Guzik 
257047dd67eSAttilio Rao static __inline struct thread *
258d576deedSPawel Jakub Dawidek lockmgr_xholder(const struct lock *lk)
259047dd67eSAttilio Rao {
260047dd67eSAttilio Rao 	uintptr_t x;
261047dd67eSAttilio Rao 
262bdb6d824SMateusz Guzik 	x = lockmgr_read_value(lk);
263047dd67eSAttilio Rao 	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
264047dd67eSAttilio Rao }
26584887fa3SAttilio Rao 
26653bf4bb2SPeter Wemm /*
267047dd67eSAttilio Rao  * It assumes sleepq_lock held and returns with this one unheld.
268047dd67eSAttilio Rao  * It also assumes the generic interlock is sane and previously checked.
269047dd67eSAttilio Rao  * If LK_INTERLOCK is specified the interlock is not reacquired after the
270047dd67eSAttilio Rao  * sleep.
27153bf4bb2SPeter Wemm  */
272047dd67eSAttilio Rao static __inline int
273047dd67eSAttilio Rao sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
274047dd67eSAttilio Rao     const char *wmesg, int pri, int timo, int queue)
275047dd67eSAttilio Rao {
276e5f94314SAttilio Rao 	GIANT_DECLARE;
277047dd67eSAttilio Rao 	struct lock_class *class;
278047dd67eSAttilio Rao 	int catch, error;
27953bf4bb2SPeter Wemm 
280047dd67eSAttilio Rao 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
2815047a8fdSAttilio Rao 	catch = pri & PCATCH;
282047dd67eSAttilio Rao 	pri &= PRIMASK;
283047dd67eSAttilio Rao 	error = 0;
284047dd67eSAttilio Rao 
285047dd67eSAttilio Rao 	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
286047dd67eSAttilio Rao 	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
287047dd67eSAttilio Rao 
288047dd67eSAttilio Rao 	if (flags & LK_INTERLOCK)
289047dd67eSAttilio Rao 		class->lc_unlock(ilk);
290eac22dd4SMateusz Guzik 	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) {
291eac22dd4SMateusz Guzik 		if (lk->lk_exslpfail < USHRT_MAX)
2922028867dSAttilio Rao 			lk->lk_exslpfail++;
293eac22dd4SMateusz Guzik 	}
294e5f94314SAttilio Rao 	GIANT_SAVE();
295047dd67eSAttilio Rao 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
296047dd67eSAttilio Rao 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
297a115fb62SHans Petter Selasky 	if ((flags & LK_TIMELOCK) && timo)
298047dd67eSAttilio Rao 		sleepq_set_timeout(&lk->lock_object, timo);
299a115fb62SHans Petter Selasky 
300047dd67eSAttilio Rao 	/*
301047dd67eSAttilio Rao 	 * Decisional switch for real sleeping.
302047dd67eSAttilio Rao 	 */
303047dd67eSAttilio Rao 	if ((flags & LK_TIMELOCK) && timo && catch)
304047dd67eSAttilio Rao 		error = sleepq_timedwait_sig(&lk->lock_object, pri);
305047dd67eSAttilio Rao 	else if ((flags & LK_TIMELOCK) && timo)
306047dd67eSAttilio Rao 		error = sleepq_timedwait(&lk->lock_object, pri);
307047dd67eSAttilio Rao 	else if (catch)
308047dd67eSAttilio Rao 		error = sleepq_wait_sig(&lk->lock_object, pri);
309047dd67eSAttilio Rao 	else
310047dd67eSAttilio Rao 		sleepq_wait(&lk->lock_object, pri);
311e5f94314SAttilio Rao 	GIANT_RESTORE();
312047dd67eSAttilio Rao 	if ((flags & LK_SLEEPFAIL) && error == 0)
313047dd67eSAttilio Rao 		error = ENOLCK;
314047dd67eSAttilio Rao 
315047dd67eSAttilio Rao 	return (error);
316047dd67eSAttilio Rao }
317047dd67eSAttilio Rao 
318da7bbd2cSJohn Baldwin static __inline int
319047dd67eSAttilio Rao wakeupshlk(struct lock *lk, const char *file, int line)
320047dd67eSAttilio Rao {
3210ad122a9SMateusz Guzik 	uintptr_t v, x, orig_x;
3222028867dSAttilio Rao 	u_int realexslp;
323da7bbd2cSJohn Baldwin 	int queue, wakeup_swapper;
324047dd67eSAttilio Rao 
325da7bbd2cSJohn Baldwin 	wakeup_swapper = 0;
326047dd67eSAttilio Rao 	for (;;) {
327bdb6d824SMateusz Guzik 		x = lockmgr_read_value(lk);
3281c6987ebSMateusz Guzik 		if (lockmgr_sunlock_try(lk, &x))
329047dd67eSAttilio Rao 			break;
330047dd67eSAttilio Rao 
331047dd67eSAttilio Rao 		/*
332047dd67eSAttilio Rao 		 * We should have a sharer with waiters, so enter the hard
333047dd67eSAttilio Rao 		 * path in order to handle wakeups correctly.
334047dd67eSAttilio Rao 		 */
335047dd67eSAttilio Rao 		sleepq_lock(&lk->lock_object);
336bdb6d824SMateusz Guzik 		orig_x = lockmgr_read_value(lk);
3370ad122a9SMateusz Guzik retry_sleepq:
3380ad122a9SMateusz Guzik 		x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
339047dd67eSAttilio Rao 		v = LK_UNLOCKED;
340047dd67eSAttilio Rao 
341047dd67eSAttilio Rao 		/*
342047dd67eSAttilio Rao 		 * If the lock has exclusive waiters, give them preference in
343047dd67eSAttilio Rao 		 * order to avoid deadlock with shared runners up.
3442028867dSAttilio Rao 		 * If interruptible sleeps left the exclusive queue empty
3452028867dSAttilio Rao 		 * avoid a starvation for the threads sleeping on the shared
3462028867dSAttilio Rao 		 * queue by giving them precedence and cleaning up the
3472028867dSAttilio Rao 		 * exclusive waiters bit anyway.
348c636ba83SAttilio Rao 		 * Please note that lk_exslpfail count may be lying about
349c636ba83SAttilio Rao 		 * the real number of waiters with the LK_SLEEPFAIL flag on
350e3043798SPedro F. Giffuni 		 * because they may be used in conjunction with interruptible
351aab9c8c2SAttilio Rao 		 * sleeps so lk_exslpfail might be considered an 'upper limit'
352aab9c8c2SAttilio Rao 		 * bound, including the edge cases.
353047dd67eSAttilio Rao 		 */
3542028867dSAttilio Rao 		realexslp = sleepq_sleepcnt(&lk->lock_object,
3552028867dSAttilio Rao 		    SQ_EXCLUSIVE_QUEUE);
3562028867dSAttilio Rao 		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
357eac22dd4SMateusz Guzik 			if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
3582028867dSAttilio Rao 				lk->lk_exslpfail = 0;
359047dd67eSAttilio Rao 				queue = SQ_EXCLUSIVE_QUEUE;
360047dd67eSAttilio Rao 				v |= (x & LK_SHARED_WAITERS);
361047dd67eSAttilio Rao 			} else {
3622028867dSAttilio Rao 				lk->lk_exslpfail = 0;
3632028867dSAttilio Rao 				LOCK_LOG2(lk,
3642028867dSAttilio Rao 				    "%s: %p has only LK_SLEEPFAIL sleepers",
3652028867dSAttilio Rao 				    __func__, lk);
3662028867dSAttilio Rao 				LOCK_LOG2(lk,
3672028867dSAttilio Rao 			    "%s: %p waking up threads on the exclusive queue",
3682028867dSAttilio Rao 				    __func__, lk);
3692028867dSAttilio Rao 				wakeup_swapper =
3702028867dSAttilio Rao 				    sleepq_broadcast(&lk->lock_object,
3712028867dSAttilio Rao 				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
3722028867dSAttilio Rao 				queue = SQ_SHARED_QUEUE;
3732028867dSAttilio Rao 			}
3742028867dSAttilio Rao 		} else {
3759dbf7a62SAttilio Rao 			/*
3769dbf7a62SAttilio Rao 			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
3779dbf7a62SAttilio Rao 			 * and using interruptible sleeps/timeout may have
3789dbf7a62SAttilio Rao 			 * left spourious lk_exslpfail counts on, so clean
3799dbf7a62SAttilio Rao 			 * it up anyway.
3809dbf7a62SAttilio Rao 			 */
3819dbf7a62SAttilio Rao 			lk->lk_exslpfail = 0;
382047dd67eSAttilio Rao 			queue = SQ_SHARED_QUEUE;
383047dd67eSAttilio Rao 		}
384047dd67eSAttilio Rao 
3850ad122a9SMateusz Guzik 		if (lockmgr_sunlock_try(lk, &orig_x)) {
386047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
3870ad122a9SMateusz Guzik 			break;
3880ad122a9SMateusz Guzik 		}
3890ad122a9SMateusz Guzik 
3900ad122a9SMateusz Guzik 		x |= LK_SHARERS_LOCK(1);
3910ad122a9SMateusz Guzik 		if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
3920ad122a9SMateusz Guzik 			orig_x = x;
3930ad122a9SMateusz Guzik 			goto retry_sleepq;
394047dd67eSAttilio Rao 		}
395047dd67eSAttilio Rao 		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
396047dd67eSAttilio Rao 		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
397047dd67eSAttilio Rao 		    "exclusive");
3982028867dSAttilio Rao 		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
399da7bbd2cSJohn Baldwin 		    0, queue);
400047dd67eSAttilio Rao 		sleepq_release(&lk->lock_object);
401047dd67eSAttilio Rao 		break;
402047dd67eSAttilio Rao 	}
403047dd67eSAttilio Rao 
404c00115f1SMateusz Guzik 	LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
405da7bbd2cSJohn Baldwin 	return (wakeup_swapper);
406047dd67eSAttilio Rao }
407047dd67eSAttilio Rao 
408047dd67eSAttilio Rao static void
409d576deedSPawel Jakub Dawidek assert_lockmgr(const struct lock_object *lock, int what)
410f9721b43SAttilio Rao {
411f9721b43SAttilio Rao 
412f9721b43SAttilio Rao 	panic("lockmgr locks do not support assertions");
413f9721b43SAttilio Rao }
414f9721b43SAttilio Rao 
415047dd67eSAttilio Rao static void
4167faf4d90SDavide Italiano lock_lockmgr(struct lock_object *lock, uintptr_t how)
4176e21afd4SJohn Baldwin {
4186e21afd4SJohn Baldwin 
4196e21afd4SJohn Baldwin 	panic("lockmgr locks do not support sleep interlocking");
4206e21afd4SJohn Baldwin }
4216e21afd4SJohn Baldwin 
4227faf4d90SDavide Italiano static uintptr_t
4236e21afd4SJohn Baldwin unlock_lockmgr(struct lock_object *lock)
4246e21afd4SJohn Baldwin {
4256e21afd4SJohn Baldwin 
4266e21afd4SJohn Baldwin 	panic("lockmgr locks do not support sleep interlocking");
4276e21afd4SJohn Baldwin }
4286e21afd4SJohn Baldwin 
429a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
430a5aedd68SStacey Son static int
431d576deedSPawel Jakub Dawidek owner_lockmgr(const struct lock_object *lock, struct thread **owner)
432a5aedd68SStacey Son {
433a5aedd68SStacey Son 
434a5aedd68SStacey Son 	panic("lockmgr locks do not support owner inquiring");
435a5aedd68SStacey Son }
436a5aedd68SStacey Son #endif
437a5aedd68SStacey Son 
43899448ed1SJohn Dyson void
439047dd67eSAttilio Rao lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
44099448ed1SJohn Dyson {
4416efc8a16SAttilio Rao 	int iflags;
4426efc8a16SAttilio Rao 
443047dd67eSAttilio Rao 	MPASS((flags & ~LK_INIT_MASK) == 0);
444353998acSAttilio Rao 	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
445353998acSAttilio Rao             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
446353998acSAttilio Rao             &lk->lk_lock));
44799448ed1SJohn Dyson 
448f0830182SAttilio Rao 	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
449f0830182SAttilio Rao 	if (flags & LK_CANRECURSE)
450f0830182SAttilio Rao 		iflags |= LO_RECURSABLE;
451047dd67eSAttilio Rao 	if ((flags & LK_NODUP) == 0)
4526efc8a16SAttilio Rao 		iflags |= LO_DUPOK;
4537fbfba7bSAttilio Rao 	if (flags & LK_NOPROFILE)
4547fbfba7bSAttilio Rao 		iflags |= LO_NOPROFILE;
455047dd67eSAttilio Rao 	if ((flags & LK_NOWITNESS) == 0)
4566efc8a16SAttilio Rao 		iflags |= LO_WITNESS;
4577fbfba7bSAttilio Rao 	if (flags & LK_QUIET)
4587fbfba7bSAttilio Rao 		iflags |= LO_QUIET;
459e63091eaSMarcel Moolenaar 	if (flags & LK_IS_VNODE)
460e63091eaSMarcel Moolenaar 		iflags |= LO_IS_VNODE;
46146713135SGleb Smirnoff 	if (flags & LK_NEW)
46246713135SGleb Smirnoff 		iflags |= LO_NEW;
4635fe188b1SMateusz Guzik 	iflags |= flags & LK_NOSHARE;
464047dd67eSAttilio Rao 
465b5fb43e5SJohn Baldwin 	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
466047dd67eSAttilio Rao 	lk->lk_lock = LK_UNLOCKED;
467047dd67eSAttilio Rao 	lk->lk_recurse = 0;
4682028867dSAttilio Rao 	lk->lk_exslpfail = 0;
469047dd67eSAttilio Rao 	lk->lk_timo = timo;
470047dd67eSAttilio Rao 	lk->lk_pri = pri;
471047dd67eSAttilio Rao 	STACK_ZERO(lk);
47299448ed1SJohn Dyson }
47399448ed1SJohn Dyson 
4743634d5b2SJohn Baldwin /*
4753634d5b2SJohn Baldwin  * XXX: Gross hacks to manipulate external lock flags after
4763634d5b2SJohn Baldwin  * initialization.  Used for certain vnode and buf locks.
4773634d5b2SJohn Baldwin  */
4783634d5b2SJohn Baldwin void
4793634d5b2SJohn Baldwin lockallowshare(struct lock *lk)
4803634d5b2SJohn Baldwin {
4813634d5b2SJohn Baldwin 
4823634d5b2SJohn Baldwin 	lockmgr_assert(lk, KA_XLOCKED);
4833634d5b2SJohn Baldwin 	lk->lock_object.lo_flags &= ~LK_NOSHARE;
4843634d5b2SJohn Baldwin }
4853634d5b2SJohn Baldwin 
4863634d5b2SJohn Baldwin void
487575e02d9SKonstantin Belousov lockdisableshare(struct lock *lk)
488575e02d9SKonstantin Belousov {
489575e02d9SKonstantin Belousov 
490575e02d9SKonstantin Belousov 	lockmgr_assert(lk, KA_XLOCKED);
491575e02d9SKonstantin Belousov 	lk->lock_object.lo_flags |= LK_NOSHARE;
492575e02d9SKonstantin Belousov }
493575e02d9SKonstantin Belousov 
494575e02d9SKonstantin Belousov void
4953634d5b2SJohn Baldwin lockallowrecurse(struct lock *lk)
4963634d5b2SJohn Baldwin {
4973634d5b2SJohn Baldwin 
4983634d5b2SJohn Baldwin 	lockmgr_assert(lk, KA_XLOCKED);
4993634d5b2SJohn Baldwin 	lk->lock_object.lo_flags |= LO_RECURSABLE;
5003634d5b2SJohn Baldwin }
5013634d5b2SJohn Baldwin 
5023634d5b2SJohn Baldwin void
5033634d5b2SJohn Baldwin lockdisablerecurse(struct lock *lk)
5043634d5b2SJohn Baldwin {
5053634d5b2SJohn Baldwin 
5063634d5b2SJohn Baldwin 	lockmgr_assert(lk, KA_XLOCKED);
5073634d5b2SJohn Baldwin 	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
5083634d5b2SJohn Baldwin }
5093634d5b2SJohn Baldwin 
510a18b1f1dSJason Evans void
511047dd67eSAttilio Rao lockdestroy(struct lock *lk)
512a18b1f1dSJason Evans {
513c91fcee7SJohn Baldwin 
514047dd67eSAttilio Rao 	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
515047dd67eSAttilio Rao 	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
5162028867dSAttilio Rao 	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
517047dd67eSAttilio Rao 	lock_destroy(&lk->lock_object);
518047dd67eSAttilio Rao }
519047dd67eSAttilio Rao 
520c4a48867SMateusz Guzik static bool __always_inline
52195ab076dSMateusz Guzik lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
522c4a48867SMateusz Guzik {
523c4a48867SMateusz Guzik 
524c4a48867SMateusz Guzik 	/*
525c4a48867SMateusz Guzik 	 * If no other thread has an exclusive lock, or
526c4a48867SMateusz Guzik 	 * no exclusive waiter is present, bump the count of
527c4a48867SMateusz Guzik 	 * sharers.  Since we have to preserve the state of
528c4a48867SMateusz Guzik 	 * waiters, if we fail to acquire the shared lock
529c4a48867SMateusz Guzik 	 * loop back and retry.
530c4a48867SMateusz Guzik 	 */
53195ab076dSMateusz Guzik 	while (LK_CAN_SHARE(*xp, flags, fp)) {
532c4a48867SMateusz Guzik 		if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
533c4a48867SMateusz Guzik 		    *xp + LK_ONE_SHARER)) {
534c4a48867SMateusz Guzik 			return (true);
535c4a48867SMateusz Guzik 		}
536c4a48867SMateusz Guzik 	}
537c4a48867SMateusz Guzik 	return (false);
538c4a48867SMateusz Guzik }
539c4a48867SMateusz Guzik 
540c4a48867SMateusz Guzik static bool __always_inline
5411c6987ebSMateusz Guzik lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
542c4a48867SMateusz Guzik {
543c4a48867SMateusz Guzik 
544c4a48867SMateusz Guzik 	for (;;) {
54595ab076dSMateusz Guzik 		if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
5461c6987ebSMateusz Guzik 			if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
5471c6987ebSMateusz Guzik 			    *xp - LK_ONE_SHARER))
548c4a48867SMateusz Guzik 				return (true);
549c4a48867SMateusz Guzik 			continue;
550c4a48867SMateusz Guzik 		}
551c4a48867SMateusz Guzik 		break;
552c4a48867SMateusz Guzik 	}
553c4a48867SMateusz Guzik 	return (false);
554c4a48867SMateusz Guzik }
555c4a48867SMateusz Guzik 
55631ad4050SMateusz Guzik static bool
55731ad4050SMateusz Guzik lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp,
55831ad4050SMateusz Guzik     int flags)
55931ad4050SMateusz Guzik {
56031ad4050SMateusz Guzik 	struct thread *owner;
56131ad4050SMateusz Guzik 	uintptr_t x;
56231ad4050SMateusz Guzik 
56331ad4050SMateusz Guzik 	x = *xp;
56431ad4050SMateusz Guzik 	MPASS(x != LK_UNLOCKED);
56531ad4050SMateusz Guzik 	owner = (struct thread *)LK_HOLDER(x);
56631ad4050SMateusz Guzik 	for (;;) {
56731ad4050SMateusz Guzik 		MPASS(owner != curthread);
56831ad4050SMateusz Guzik 		if (owner == (struct thread *)LK_KERNPROC)
56931ad4050SMateusz Guzik 			return (false);
57031ad4050SMateusz Guzik 		if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
57131ad4050SMateusz Guzik 			return (false);
57231ad4050SMateusz Guzik 		if (owner == NULL)
57331ad4050SMateusz Guzik 			return (false);
57431ad4050SMateusz Guzik 		if (!TD_IS_RUNNING(owner))
57531ad4050SMateusz Guzik 			return (false);
57631ad4050SMateusz Guzik 		if ((x & LK_ALL_WAITERS) != 0)
57731ad4050SMateusz Guzik 			return (false);
57831ad4050SMateusz Guzik 		lock_delay(lda);
57931ad4050SMateusz Guzik 		x = lockmgr_read_value(lk);
58031ad4050SMateusz Guzik 		if (LK_CAN_SHARE(x, flags, false)) {
58131ad4050SMateusz Guzik 			*xp = x;
58231ad4050SMateusz Guzik 			return (true);
58331ad4050SMateusz Guzik 		}
58431ad4050SMateusz Guzik 		owner = (struct thread *)LK_HOLDER(x);
58531ad4050SMateusz Guzik 	}
58631ad4050SMateusz Guzik }
58731ad4050SMateusz Guzik 
5881c6987ebSMateusz Guzik static __noinline int
5891c6987ebSMateusz Guzik lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
5901c6987ebSMateusz Guzik     const char *file, int line, struct lockmgr_wait *lwa)
591c4a48867SMateusz Guzik {
5921c6987ebSMateusz Guzik 	uintptr_t tid, x;
5931c6987ebSMateusz Guzik 	int error = 0;
594047dd67eSAttilio Rao 	const char *iwmesg;
5951c6987ebSMateusz Guzik 	int ipri, itimo;
5961c6987ebSMateusz Guzik 
5975b699f16SMark Johnston #ifdef KDTRACE_HOOKS
5985b699f16SMark Johnston 	uint64_t sleep_time = 0;
5995b699f16SMark Johnston #endif
6001723a064SJeff Roberson #ifdef LOCK_PROFILING
6011723a064SJeff Roberson 	uint64_t waittime = 0;
6021723a064SJeff Roberson 	int contested = 0;
6031723a064SJeff Roberson #endif
60431ad4050SMateusz Guzik 	struct lock_delay_arg lda;
605047dd67eSAttilio Rao 
606879e0604SMateusz Guzik 	if (KERNEL_PANICKED())
6071c6987ebSMateusz Guzik 		goto out;
6081c6987ebSMateusz Guzik 
609047dd67eSAttilio Rao 	tid = (uintptr_t)curthread;
610047dd67eSAttilio Rao 
611e5f94314SAttilio Rao 	if (LK_CAN_WITNESS(flags))
612e5f94314SAttilio Rao 		WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
61324150d37SJohn Baldwin 		    file, line, flags & LK_INTERLOCK ? ilk : NULL);
614f90d57b8SMateusz Guzik 	x = lockmgr_read_value(lk);
61531ad4050SMateusz Guzik 	lock_delay_arg_init(&lda, &lockmgr_delay);
61631ad4050SMateusz Guzik 	if (!lk_adaptive)
61731ad4050SMateusz Guzik 		flags &= ~LK_ADAPTIVE;
618047dd67eSAttilio Rao 	/*
61931ad4050SMateusz Guzik 	 * The lock may already be locked exclusive by curthread,
62031ad4050SMateusz Guzik 	 * avoid deadlock.
621047dd67eSAttilio Rao 	 */
622047dd67eSAttilio Rao 	if (LK_HOLDER(x) == tid) {
623047dd67eSAttilio Rao 		LOCK_LOG2(lk,
62496f1567fSKonstantin Belousov 		    "%s: %p already held in exclusive mode",
625047dd67eSAttilio Rao 		    __func__, lk);
626047dd67eSAttilio Rao 		error = EDEADLK;
62731ad4050SMateusz Guzik 		goto out;
628a18b1f1dSJason Evans 	}
629a18b1f1dSJason Evans 
63031ad4050SMateusz Guzik 	for (;;) {
63131ad4050SMateusz Guzik 		if (lockmgr_slock_try(lk, &x, flags, false))
63231ad4050SMateusz Guzik 			break;
63331ad4050SMateusz Guzik 
634*f902e4bbSMateusz Guzik 		lock_profile_obtain_lock_failed(&lk->lock_object, false,
635*f902e4bbSMateusz Guzik 		    &contested, &waittime);
636*f902e4bbSMateusz Guzik 
63731ad4050SMateusz Guzik 		if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
63831ad4050SMateusz Guzik 			if (lockmgr_slock_adaptive(&lda, lk, &x, flags))
63931ad4050SMateusz Guzik 				continue;
64031ad4050SMateusz Guzik 		}
64131ad4050SMateusz Guzik 
64231ad4050SMateusz Guzik #ifdef HWPMC_HOOKS
64331ad4050SMateusz Guzik 		PMC_SOFT_CALL( , , lock, failed);
64431ad4050SMateusz Guzik #endif
64531ad4050SMateusz Guzik 
646a18b1f1dSJason Evans 		/*
647047dd67eSAttilio Rao 		 * If the lock is expected to not sleep just give up
648047dd67eSAttilio Rao 		 * and return.
649d7a7e179SAttilio Rao 		 */
650047dd67eSAttilio Rao 		if (LK_TRYOP(flags)) {
651047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p fails the try operation",
652047dd67eSAttilio Rao 			    __func__, lk);
653047dd67eSAttilio Rao 			error = EBUSY;
654047dd67eSAttilio Rao 			break;
655047dd67eSAttilio Rao 		}
656047dd67eSAttilio Rao 
657047dd67eSAttilio Rao 		/*
658047dd67eSAttilio Rao 		 * Acquire the sleepqueue chain lock because we
659047dd67eSAttilio Rao 		 * probabilly will need to manipulate waiters flags.
660047dd67eSAttilio Rao 		 */
661047dd67eSAttilio Rao 		sleepq_lock(&lk->lock_object);
662bdb6d824SMateusz Guzik 		x = lockmgr_read_value(lk);
6630ad122a9SMateusz Guzik retry_sleepq:
664047dd67eSAttilio Rao 
665047dd67eSAttilio Rao 		/*
666047dd67eSAttilio Rao 		 * if the lock can be acquired in shared mode, try
667047dd67eSAttilio Rao 		 * again.
668047dd67eSAttilio Rao 		 */
66995ab076dSMateusz Guzik 		if (LK_CAN_SHARE(x, flags, false)) {
670047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
671047dd67eSAttilio Rao 			continue;
672047dd67eSAttilio Rao 		}
673047dd67eSAttilio Rao 
674047dd67eSAttilio Rao 		/*
675047dd67eSAttilio Rao 		 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
676047dd67eSAttilio Rao 		 * loop back and retry.
677047dd67eSAttilio Rao 		 */
678047dd67eSAttilio Rao 		if ((x & LK_SHARED_WAITERS) == 0) {
6790ad122a9SMateusz Guzik 			if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
680047dd67eSAttilio Rao 			    x | LK_SHARED_WAITERS)) {
6810ad122a9SMateusz Guzik 				goto retry_sleepq;
682047dd67eSAttilio Rao 			}
683047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p set shared waiters flag",
684047dd67eSAttilio Rao 			    __func__, lk);
685047dd67eSAttilio Rao 		}
686047dd67eSAttilio Rao 
6871c6987ebSMateusz Guzik 		if (lwa == NULL) {
6881c6987ebSMateusz Guzik 			iwmesg = lk->lock_object.lo_name;
6891c6987ebSMateusz Guzik 			ipri = lk->lk_pri;
6901c6987ebSMateusz Guzik 			itimo = lk->lk_timo;
6911c6987ebSMateusz Guzik 		} else {
6921c6987ebSMateusz Guzik 			iwmesg = lwa->iwmesg;
6931c6987ebSMateusz Guzik 			ipri = lwa->ipri;
6941c6987ebSMateusz Guzik 			itimo = lwa->itimo;
6951c6987ebSMateusz Guzik 		}
6961c6987ebSMateusz Guzik 
697047dd67eSAttilio Rao 		/*
698047dd67eSAttilio Rao 		 * As far as we have been unable to acquire the
699047dd67eSAttilio Rao 		 * shared lock and the shared waiters flag is set,
700047dd67eSAttilio Rao 		 * we will sleep.
701047dd67eSAttilio Rao 		 */
7025b699f16SMark Johnston #ifdef KDTRACE_HOOKS
7035b699f16SMark Johnston 		sleep_time -= lockstat_nsecs(&lk->lock_object);
7045b699f16SMark Johnston #endif
705047dd67eSAttilio Rao 		error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
706047dd67eSAttilio Rao 		    SQ_SHARED_QUEUE);
7075b699f16SMark Johnston #ifdef KDTRACE_HOOKS
7085b699f16SMark Johnston 		sleep_time += lockstat_nsecs(&lk->lock_object);
7095b699f16SMark Johnston #endif
710047dd67eSAttilio Rao 		flags &= ~LK_INTERLOCK;
711047dd67eSAttilio Rao 		if (error) {
712047dd67eSAttilio Rao 			LOCK_LOG3(lk,
713047dd67eSAttilio Rao 			    "%s: interrupted sleep for %p with %d",
714047dd67eSAttilio Rao 			    __func__, lk, error);
715047dd67eSAttilio Rao 			break;
716047dd67eSAttilio Rao 		}
717047dd67eSAttilio Rao 		LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
718047dd67eSAttilio Rao 		    __func__, lk);
71931ad4050SMateusz Guzik 		x = lockmgr_read_value(lk);
720047dd67eSAttilio Rao 	}
721047dd67eSAttilio Rao 	if (error == 0) {
7225b699f16SMark Johnston #ifdef KDTRACE_HOOKS
7235b699f16SMark Johnston 		if (sleep_time != 0)
7245b699f16SMark Johnston 			LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
7255b699f16SMark Johnston 			    LOCKSTAT_READER, (x & LK_SHARE) == 0,
7265b699f16SMark Johnston 			    (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
7275b699f16SMark Johnston #endif
728c4a48867SMateusz Guzik #ifdef LOCK_PROFILING
729c4a48867SMateusz Guzik 		lockmgr_note_shared_acquire(lk, contested, waittime,
730c4a48867SMateusz Guzik 		    file, line, flags);
731c4a48867SMateusz Guzik #else
732c4a48867SMateusz Guzik 		lockmgr_note_shared_acquire(lk, 0, 0, file, line,
733c4a48867SMateusz Guzik 		    flags);
734c4a48867SMateusz Guzik #endif
735047dd67eSAttilio Rao 	}
736047dd67eSAttilio Rao 
7371c6987ebSMateusz Guzik out:
7381c6987ebSMateusz Guzik 	lockmgr_exit(flags, ilk, 0);
7391c6987ebSMateusz Guzik 	return (error);
740047dd67eSAttilio Rao }
741047dd67eSAttilio Rao 
74231ad4050SMateusz Guzik static bool
74331ad4050SMateusz Guzik lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp)
74431ad4050SMateusz Guzik {
74531ad4050SMateusz Guzik 	struct thread *owner;
74631ad4050SMateusz Guzik 	uintptr_t x;
74731ad4050SMateusz Guzik 
74831ad4050SMateusz Guzik 	x = *xp;
74931ad4050SMateusz Guzik 	MPASS(x != LK_UNLOCKED);
75031ad4050SMateusz Guzik 	owner = (struct thread *)LK_HOLDER(x);
75131ad4050SMateusz Guzik 	for (;;) {
75231ad4050SMateusz Guzik 		MPASS(owner != curthread);
75331ad4050SMateusz Guzik 		if (owner == NULL)
75431ad4050SMateusz Guzik 			return (false);
75531ad4050SMateusz Guzik 		if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
75631ad4050SMateusz Guzik 			return (false);
75731ad4050SMateusz Guzik 		if (owner == (struct thread *)LK_KERNPROC)
75831ad4050SMateusz Guzik 			return (false);
75931ad4050SMateusz Guzik 		if (!TD_IS_RUNNING(owner))
76031ad4050SMateusz Guzik 			return (false);
76131ad4050SMateusz Guzik 		if ((x & LK_ALL_WAITERS) != 0)
76231ad4050SMateusz Guzik 			return (false);
76331ad4050SMateusz Guzik 		lock_delay(lda);
76431ad4050SMateusz Guzik 		x = lockmgr_read_value(lk);
76531ad4050SMateusz Guzik 		if (x == LK_UNLOCKED) {
76631ad4050SMateusz Guzik 			*xp = x;
76731ad4050SMateusz Guzik 			return (true);
76831ad4050SMateusz Guzik 		}
76931ad4050SMateusz Guzik 		owner = (struct thread *)LK_HOLDER(x);
77031ad4050SMateusz Guzik 	}
77131ad4050SMateusz Guzik }
77231ad4050SMateusz Guzik 
7731c6987ebSMateusz Guzik static __noinline int
7741c6987ebSMateusz Guzik lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
7751c6987ebSMateusz Guzik     const char *file, int line, struct lockmgr_wait *lwa)
7761c6987ebSMateusz Guzik {
7771c6987ebSMateusz Guzik 	struct lock_class *class;
7781c6987ebSMateusz Guzik 	uintptr_t tid, x, v;
7791c6987ebSMateusz Guzik 	int error = 0;
7801c6987ebSMateusz Guzik 	const char *iwmesg;
7811c6987ebSMateusz Guzik 	int ipri, itimo;
7827c6fe803SKonstantin Belousov 
7835b699f16SMark Johnston #ifdef KDTRACE_HOOKS
7845b699f16SMark Johnston 	uint64_t sleep_time = 0;
7855b699f16SMark Johnston #endif
7861c6987ebSMateusz Guzik #ifdef LOCK_PROFILING
7871c6987ebSMateusz Guzik 	uint64_t waittime = 0;
7881c6987ebSMateusz Guzik 	int contested = 0;
7891c6987ebSMateusz Guzik #endif
79031ad4050SMateusz Guzik 	struct lock_delay_arg lda;
791047dd67eSAttilio Rao 
792879e0604SMateusz Guzik 	if (KERNEL_PANICKED())
7931c6987ebSMateusz Guzik 		goto out;
7941c6987ebSMateusz Guzik 
7951c6987ebSMateusz Guzik 	tid = (uintptr_t)curthread;
7961c6987ebSMateusz Guzik 
797e5f94314SAttilio Rao 	if (LK_CAN_WITNESS(flags))
798e5f94314SAttilio Rao 		WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
79924150d37SJohn Baldwin 		    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
80024150d37SJohn Baldwin 		    ilk : NULL);
801047dd67eSAttilio Rao 
802047dd67eSAttilio Rao 	/*
80396f1567fSKonstantin Belousov 	 * If curthread already holds the lock and this one is
804047dd67eSAttilio Rao 	 * allowed to recurse, simply recurse on it.
805047dd67eSAttilio Rao 	 */
806047dd67eSAttilio Rao 	if (lockmgr_xlocked(lk)) {
807047dd67eSAttilio Rao 		if ((flags & LK_CANRECURSE) == 0 &&
808f0830182SAttilio Rao 		    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
809047dd67eSAttilio Rao 			/*
810047dd67eSAttilio Rao 			 * If the lock is expected to not panic just
811047dd67eSAttilio Rao 			 * give up and return.
812047dd67eSAttilio Rao 			 */
813047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
814047dd67eSAttilio Rao 				LOCK_LOG2(lk,
815047dd67eSAttilio Rao 				    "%s: %p fails the try operation",
816047dd67eSAttilio Rao 				    __func__, lk);
817047dd67eSAttilio Rao 				error = EBUSY;
8181c6987ebSMateusz Guzik 				goto out;
819047dd67eSAttilio Rao 			}
8201c6987ebSMateusz Guzik 			if (flags & LK_INTERLOCK) {
8211c6987ebSMateusz Guzik 				class = LOCK_CLASS(ilk);
822047dd67eSAttilio Rao 				class->lc_unlock(ilk);
8231c6987ebSMateusz Guzik 			}
8249a79b990SKirk McKusick 			STACK_PRINT(lk);
82583fc34eaSGleb Smirnoff 			panic("%s: recursing on non recursive lockmgr %p "
82683fc34eaSGleb Smirnoff 			    "@ %s:%d\n", __func__, lk, file, line);
827047dd67eSAttilio Rao 		}
8284aff9f5dSMateusz Guzik 		atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
829047dd67eSAttilio Rao 		lk->lk_recurse++;
830047dd67eSAttilio Rao 		LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
831047dd67eSAttilio Rao 		LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
832047dd67eSAttilio Rao 		    lk->lk_recurse, file, line);
833e5f94314SAttilio Rao 		WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
834e5f94314SAttilio Rao 		    LK_TRYWIT(flags), file, line);
835047dd67eSAttilio Rao 		TD_LOCKS_INC(curthread);
8361c6987ebSMateusz Guzik 		goto out;
837047dd67eSAttilio Rao 	}
838047dd67eSAttilio Rao 
83931ad4050SMateusz Guzik 	x = LK_UNLOCKED;
84031ad4050SMateusz Guzik 	lock_delay_arg_init(&lda, &lockmgr_delay);
84131ad4050SMateusz Guzik 	if (!lk_adaptive)
84231ad4050SMateusz Guzik 		flags &= ~LK_ADAPTIVE;
843fc4f686dSMateusz Guzik 	for (;;) {
84431ad4050SMateusz Guzik 		if (x == LK_UNLOCKED) {
84531ad4050SMateusz Guzik 			if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid))
846fc4f686dSMateusz Guzik 				break;
84713869889SMateusz Guzik 			continue;
84831ad4050SMateusz Guzik 		}
849*f902e4bbSMateusz Guzik 
850*f902e4bbSMateusz Guzik 		lock_profile_obtain_lock_failed(&lk->lock_object, false,
851*f902e4bbSMateusz Guzik 		    &contested, &waittime);
852*f902e4bbSMateusz Guzik 
85331ad4050SMateusz Guzik 		if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
85431ad4050SMateusz Guzik 			if (lockmgr_xlock_adaptive(&lda, lk, &x))
85531ad4050SMateusz Guzik 				continue;
85631ad4050SMateusz Guzik 		}
857f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
858f5f9340bSFabien Thomas 		PMC_SOFT_CALL( , , lock, failed);
859f5f9340bSFabien Thomas #endif
860047dd67eSAttilio Rao 
861047dd67eSAttilio Rao 		/*
862047dd67eSAttilio Rao 		 * If the lock is expected to not sleep just give up
863047dd67eSAttilio Rao 		 * and return.
864047dd67eSAttilio Rao 		 */
865047dd67eSAttilio Rao 		if (LK_TRYOP(flags)) {
866047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p fails the try operation",
867047dd67eSAttilio Rao 			    __func__, lk);
868047dd67eSAttilio Rao 			error = EBUSY;
869047dd67eSAttilio Rao 			break;
870047dd67eSAttilio Rao 		}
871047dd67eSAttilio Rao 
872047dd67eSAttilio Rao 		/*
873047dd67eSAttilio Rao 		 * Acquire the sleepqueue chain lock because we
874047dd67eSAttilio Rao 		 * probabilly will need to manipulate waiters flags.
875047dd67eSAttilio Rao 		 */
876047dd67eSAttilio Rao 		sleepq_lock(&lk->lock_object);
877bdb6d824SMateusz Guzik 		x = lockmgr_read_value(lk);
8780ad122a9SMateusz Guzik retry_sleepq:
879047dd67eSAttilio Rao 
880047dd67eSAttilio Rao 		/*
881047dd67eSAttilio Rao 		 * if the lock has been released while we spun on
882047dd67eSAttilio Rao 		 * the sleepqueue chain lock just try again.
883047dd67eSAttilio Rao 		 */
884047dd67eSAttilio Rao 		if (x == LK_UNLOCKED) {
885047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
886047dd67eSAttilio Rao 			continue;
887047dd67eSAttilio Rao 		}
888047dd67eSAttilio Rao 
889047dd67eSAttilio Rao 		/*
890047dd67eSAttilio Rao 		 * The lock can be in the state where there is a
891047dd67eSAttilio Rao 		 * pending queue of waiters, but still no owner.
892047dd67eSAttilio Rao 		 * This happens when the lock is contested and an
893047dd67eSAttilio Rao 		 * owner is going to claim the lock.
894047dd67eSAttilio Rao 		 * If curthread is the one successfully acquiring it
895047dd67eSAttilio Rao 		 * claim lock ownership and return, preserving waiters
896047dd67eSAttilio Rao 		 * flags.
897047dd67eSAttilio Rao 		 */
898651175c9SAttilio Rao 		v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
899651175c9SAttilio Rao 		if ((x & ~v) == LK_UNLOCKED) {
900651175c9SAttilio Rao 			v &= ~LK_EXCLUSIVE_SPINNERS;
9010ad122a9SMateusz Guzik 			if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
902047dd67eSAttilio Rao 			    tid | v)) {
903047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
904047dd67eSAttilio Rao 				LOCK_LOG2(lk,
905047dd67eSAttilio Rao 				    "%s: %p claimed by a new writer",
906047dd67eSAttilio Rao 				    __func__, lk);
907047dd67eSAttilio Rao 				break;
908047dd67eSAttilio Rao 			}
9090ad122a9SMateusz Guzik 			goto retry_sleepq;
910047dd67eSAttilio Rao 		}
911047dd67eSAttilio Rao 
912047dd67eSAttilio Rao 		/*
913047dd67eSAttilio Rao 		 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
914047dd67eSAttilio Rao 		 * fail, loop back and retry.
915047dd67eSAttilio Rao 		 */
916047dd67eSAttilio Rao 		if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
9170ad122a9SMateusz Guzik 			if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
918047dd67eSAttilio Rao 			    x | LK_EXCLUSIVE_WAITERS)) {
9190ad122a9SMateusz Guzik 				goto retry_sleepq;
920047dd67eSAttilio Rao 			}
921047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p set excl waiters flag",
922047dd67eSAttilio Rao 			    __func__, lk);
923047dd67eSAttilio Rao 		}
924047dd67eSAttilio Rao 
9251c6987ebSMateusz Guzik 		if (lwa == NULL) {
9261c6987ebSMateusz Guzik 			iwmesg = lk->lock_object.lo_name;
9271c6987ebSMateusz Guzik 			ipri = lk->lk_pri;
9281c6987ebSMateusz Guzik 			itimo = lk->lk_timo;
9291c6987ebSMateusz Guzik 		} else {
9301c6987ebSMateusz Guzik 			iwmesg = lwa->iwmesg;
9311c6987ebSMateusz Guzik 			ipri = lwa->ipri;
9321c6987ebSMateusz Guzik 			itimo = lwa->itimo;
9331c6987ebSMateusz Guzik 		}
9341c6987ebSMateusz Guzik 
935047dd67eSAttilio Rao 		/*
936047dd67eSAttilio Rao 		 * As far as we have been unable to acquire the
937047dd67eSAttilio Rao 		 * exclusive lock and the exclusive waiters flag
938047dd67eSAttilio Rao 		 * is set, we will sleep.
939047dd67eSAttilio Rao 		 */
9405b699f16SMark Johnston #ifdef KDTRACE_HOOKS
9415b699f16SMark Johnston 		sleep_time -= lockstat_nsecs(&lk->lock_object);
9425b699f16SMark Johnston #endif
943047dd67eSAttilio Rao 		error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
944047dd67eSAttilio Rao 		    SQ_EXCLUSIVE_QUEUE);
9455b699f16SMark Johnston #ifdef KDTRACE_HOOKS
9465b699f16SMark Johnston 		sleep_time += lockstat_nsecs(&lk->lock_object);
9475b699f16SMark Johnston #endif
948047dd67eSAttilio Rao 		flags &= ~LK_INTERLOCK;
949047dd67eSAttilio Rao 		if (error) {
950047dd67eSAttilio Rao 			LOCK_LOG3(lk,
951047dd67eSAttilio Rao 			    "%s: interrupted sleep for %p with %d",
952047dd67eSAttilio Rao 			    __func__, lk, error);
953047dd67eSAttilio Rao 			break;
954047dd67eSAttilio Rao 		}
955047dd67eSAttilio Rao 		LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
956047dd67eSAttilio Rao 		    __func__, lk);
95731ad4050SMateusz Guzik 		x = lockmgr_read_value(lk);
958047dd67eSAttilio Rao 	}
959047dd67eSAttilio Rao 	if (error == 0) {
9605b699f16SMark Johnston #ifdef KDTRACE_HOOKS
9615b699f16SMark Johnston 		if (sleep_time != 0)
9625b699f16SMark Johnston 			LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
9635b699f16SMark Johnston 			    LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
9645b699f16SMark Johnston 			    (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
9655b699f16SMark Johnston #endif
966c4a48867SMateusz Guzik #ifdef LOCK_PROFILING
967c4a48867SMateusz Guzik 		lockmgr_note_exclusive_acquire(lk, contested, waittime,
968c4a48867SMateusz Guzik 		    file, line, flags);
969c4a48867SMateusz Guzik #else
970c4a48867SMateusz Guzik 		lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
971c4a48867SMateusz Guzik 		    flags);
972c4a48867SMateusz Guzik #endif
973047dd67eSAttilio Rao 	}
9741c6987ebSMateusz Guzik 
9751c6987ebSMateusz Guzik out:
9761c6987ebSMateusz Guzik 	lockmgr_exit(flags, ilk, 0);
9771c6987ebSMateusz Guzik 	return (error);
9781c6987ebSMateusz Guzik }
9791c6987ebSMateusz Guzik 
9801c6987ebSMateusz Guzik static __noinline int
9811c6987ebSMateusz Guzik lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
9821c6987ebSMateusz Guzik     const char *file, int line, struct lockmgr_wait *lwa)
9831c6987ebSMateusz Guzik {
984f6b091fbSMateusz Guzik 	uintptr_t tid, v, setv;
9851c6987ebSMateusz Guzik 	int error = 0;
9861c6987ebSMateusz Guzik 	int op;
9871c6987ebSMateusz Guzik 
988879e0604SMateusz Guzik 	if (KERNEL_PANICKED())
9891c6987ebSMateusz Guzik 		goto out;
9901c6987ebSMateusz Guzik 
9911c6987ebSMateusz Guzik 	tid = (uintptr_t)curthread;
9921c6987ebSMateusz Guzik 
9931c6987ebSMateusz Guzik 	_lockmgr_assert(lk, KA_SLOCKED, file, line);
994f6b091fbSMateusz Guzik 
995f6b091fbSMateusz Guzik 	op = flags & LK_TYPE_MASK;
996bdb6d824SMateusz Guzik 	v = lockmgr_read_value(lk);
997f6b091fbSMateusz Guzik 	for (;;) {
99838baca17SMateusz Guzik 		if (LK_SHARERS(v) > 1) {
999f6b091fbSMateusz Guzik 			if (op == LK_TRYUPGRADE) {
1000f6b091fbSMateusz Guzik 				LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
1001f6b091fbSMateusz Guzik 				    __func__, lk);
1002f6b091fbSMateusz Guzik 				error = EBUSY;
1003f6b091fbSMateusz Guzik 				goto out;
1004f6b091fbSMateusz Guzik 			}
100538baca17SMateusz Guzik 			if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &v,
100638baca17SMateusz Guzik 			    v - LK_ONE_SHARER)) {
1007f6b091fbSMateusz Guzik 				lockmgr_note_shared_release(lk, file, line);
1008f6b091fbSMateusz Guzik 				goto out_xlock;
1009f6b091fbSMateusz Guzik 			}
101038baca17SMateusz Guzik 			continue;
1011f6b091fbSMateusz Guzik 		}
1012f6b091fbSMateusz Guzik 		MPASS((v & ~LK_ALL_WAITERS) == LK_SHARERS_LOCK(1));
1013f6b091fbSMateusz Guzik 
1014f6b091fbSMateusz Guzik 		setv = tid;
1015f6b091fbSMateusz Guzik 		setv |= (v & LK_ALL_WAITERS);
10161c6987ebSMateusz Guzik 
10171c6987ebSMateusz Guzik 		/*
10181c6987ebSMateusz Guzik 		 * Try to switch from one shared lock to an exclusive one.
10191c6987ebSMateusz Guzik 		 * We need to preserve waiters flags during the operation.
10201c6987ebSMateusz Guzik 		 */
1021f6b091fbSMateusz Guzik 		if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) {
10221c6987ebSMateusz Guzik 			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
10231c6987ebSMateusz Guzik 			    line);
10241c6987ebSMateusz Guzik 			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
10251c6987ebSMateusz Guzik 			    LK_TRYWIT(flags), file, line);
10265b699f16SMark Johnston 			LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
10271c6987ebSMateusz Guzik 			TD_SLOCKS_DEC(curthread);
10281c6987ebSMateusz Guzik 			goto out;
10291c6987ebSMateusz Guzik 		}
10301c6987ebSMateusz Guzik 	}
10311c6987ebSMateusz Guzik 
1032f6b091fbSMateusz Guzik out_xlock:
10331c6987ebSMateusz Guzik 	error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
10341c6987ebSMateusz Guzik 	flags &= ~LK_INTERLOCK;
10351c6987ebSMateusz Guzik out:
1036f6b091fbSMateusz Guzik 	lockmgr_exit(flags, ilk, 0);
10371c6987ebSMateusz Guzik 	return (error);
10381c6987ebSMateusz Guzik }
10391c6987ebSMateusz Guzik 
10401c6987ebSMateusz Guzik int
1041c1b57fa7SMateusz Guzik lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
10421c6987ebSMateusz Guzik     const char *file, int line)
10431c6987ebSMateusz Guzik {
10441c6987ebSMateusz Guzik 	struct lock_class *class;
10451c6987ebSMateusz Guzik 	uintptr_t x, tid;
10461c6987ebSMateusz Guzik 	u_int op;
10471c6987ebSMateusz Guzik 	bool locked;
10481c6987ebSMateusz Guzik 
1049879e0604SMateusz Guzik 	if (KERNEL_PANICKED())
1050b543c98cSConrad Meyer 		return (0);
1051b543c98cSConrad Meyer 
10521c6987ebSMateusz Guzik 	op = flags & LK_TYPE_MASK;
10531c6987ebSMateusz Guzik 	locked = false;
10541c6987ebSMateusz Guzik 	switch (op) {
10551c6987ebSMateusz Guzik 	case LK_SHARED:
10561c6987ebSMateusz Guzik 		if (LK_CAN_WITNESS(flags))
10571c6987ebSMateusz Guzik 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
10581c6987ebSMateusz Guzik 			    file, line, flags & LK_INTERLOCK ? ilk : NULL);
10591c6987ebSMateusz Guzik 		if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
10601c6987ebSMateusz Guzik 			break;
106131ad4050SMateusz Guzik 		x = lockmgr_read_value(lk);
106295ab076dSMateusz Guzik 		if (lockmgr_slock_try(lk, &x, flags, true)) {
10631c6987ebSMateusz Guzik 			lockmgr_note_shared_acquire(lk, 0, 0,
10641c6987ebSMateusz Guzik 			    file, line, flags);
10651c6987ebSMateusz Guzik 			locked = true;
10661c6987ebSMateusz Guzik 		} else {
10671c6987ebSMateusz Guzik 			return (lockmgr_slock_hard(lk, flags, ilk, file, line,
10681c6987ebSMateusz Guzik 			    NULL));
10691c6987ebSMateusz Guzik 		}
10701c6987ebSMateusz Guzik 		break;
10711c6987ebSMateusz Guzik 	case LK_EXCLUSIVE:
10721c6987ebSMateusz Guzik 		if (LK_CAN_WITNESS(flags))
10731c6987ebSMateusz Guzik 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
10741c6987ebSMateusz Guzik 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
10751c6987ebSMateusz Guzik 			    ilk : NULL);
10761c6987ebSMateusz Guzik 		tid = (uintptr_t)curthread;
1077bdb6d824SMateusz Guzik 		if (lockmgr_read_value(lk) == LK_UNLOCKED &&
10781c6987ebSMateusz Guzik 		    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
10791c6987ebSMateusz Guzik 			lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
10801c6987ebSMateusz Guzik 			    flags);
10811c6987ebSMateusz Guzik 			locked = true;
10821c6987ebSMateusz Guzik 		} else {
10831c6987ebSMateusz Guzik 			return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
10841c6987ebSMateusz Guzik 			    NULL));
10851c6987ebSMateusz Guzik 		}
10861c6987ebSMateusz Guzik 		break;
10871c6987ebSMateusz Guzik 	case LK_UPGRADE:
10881c6987ebSMateusz Guzik 	case LK_TRYUPGRADE:
10891c6987ebSMateusz Guzik 		return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
10901c6987ebSMateusz Guzik 	default:
10911c6987ebSMateusz Guzik 		break;
10921c6987ebSMateusz Guzik 	}
10931c6987ebSMateusz Guzik 	if (__predict_true(locked)) {
10941c6987ebSMateusz Guzik 		if (__predict_false(flags & LK_INTERLOCK)) {
10951c6987ebSMateusz Guzik 			class = LOCK_CLASS(ilk);
10961c6987ebSMateusz Guzik 			class->lc_unlock(ilk);
10971c6987ebSMateusz Guzik 		}
10981c6987ebSMateusz Guzik 		return (0);
10991c6987ebSMateusz Guzik 	} else {
11001c6987ebSMateusz Guzik 		return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
11011c6987ebSMateusz Guzik 		    LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
11021c6987ebSMateusz Guzik 	}
11031c6987ebSMateusz Guzik }
11041c6987ebSMateusz Guzik 
11051c6987ebSMateusz Guzik static __noinline int
11061c6987ebSMateusz Guzik lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
11071c6987ebSMateusz Guzik     const char *file, int line)
11081c6987ebSMateusz Guzik 
11091c6987ebSMateusz Guzik {
11101c6987ebSMateusz Guzik 	int wakeup_swapper = 0;
11111c6987ebSMateusz Guzik 
1112879e0604SMateusz Guzik 	if (KERNEL_PANICKED())
11131c6987ebSMateusz Guzik 		goto out;
11141c6987ebSMateusz Guzik 
11151c6987ebSMateusz Guzik 	wakeup_swapper = wakeupshlk(lk, file, line);
11161c6987ebSMateusz Guzik 
11171c6987ebSMateusz Guzik out:
11181c6987ebSMateusz Guzik 	lockmgr_exit(flags, ilk, wakeup_swapper);
11191c6987ebSMateusz Guzik 	return (0);
11201c6987ebSMateusz Guzik }
11211c6987ebSMateusz Guzik 
11221c6987ebSMateusz Guzik static __noinline int
11231c6987ebSMateusz Guzik lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
11241c6987ebSMateusz Guzik     const char *file, int line)
11251c6987ebSMateusz Guzik {
11261c6987ebSMateusz Guzik 	uintptr_t tid, v;
11271c6987ebSMateusz Guzik 	int wakeup_swapper = 0;
11281c6987ebSMateusz Guzik 	u_int realexslp;
11291c6987ebSMateusz Guzik 	int queue;
11301c6987ebSMateusz Guzik 
1131879e0604SMateusz Guzik 	if (KERNEL_PANICKED())
11321c6987ebSMateusz Guzik 		goto out;
11331c6987ebSMateusz Guzik 
11341c6987ebSMateusz Guzik 	tid = (uintptr_t)curthread;
11351c6987ebSMateusz Guzik 
11361c6987ebSMateusz Guzik 	/*
11371c6987ebSMateusz Guzik 	 * As first option, treact the lock as if it has not
11381c6987ebSMateusz Guzik 	 * any waiter.
11391c6987ebSMateusz Guzik 	 * Fix-up the tid var if the lock has been disowned.
11401c6987ebSMateusz Guzik 	 */
11411c6987ebSMateusz Guzik 	if (LK_HOLDER(x) == LK_KERNPROC)
11421c6987ebSMateusz Guzik 		tid = LK_KERNPROC;
11431c6987ebSMateusz Guzik 
11441c6987ebSMateusz Guzik 	/*
11451c6987ebSMateusz Guzik 	 * The lock is held in exclusive mode.
11461c6987ebSMateusz Guzik 	 * If the lock is recursed also, then unrecurse it.
11471c6987ebSMateusz Guzik 	 */
11484aff9f5dSMateusz Guzik 	if (lockmgr_recursed_v(x)) {
11491c6987ebSMateusz Guzik 		LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
11501c6987ebSMateusz Guzik 		lk->lk_recurse--;
11514aff9f5dSMateusz Guzik 		if (lk->lk_recurse == 0)
11524aff9f5dSMateusz Guzik 			atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
11531c6987ebSMateusz Guzik 		goto out;
11541c6987ebSMateusz Guzik 	}
11551c6987ebSMateusz Guzik 	if (tid != LK_KERNPROC)
11565b699f16SMark Johnston 		LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
11575b699f16SMark Johnston 		    LOCKSTAT_WRITER);
11581c6987ebSMateusz Guzik 
115910391db5SMateusz Guzik 	if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
11601c6987ebSMateusz Guzik 		goto out;
11611c6987ebSMateusz Guzik 
11621c6987ebSMateusz Guzik 	sleepq_lock(&lk->lock_object);
1163bdb6d824SMateusz Guzik 	x = lockmgr_read_value(lk);
11641c6987ebSMateusz Guzik 	v = LK_UNLOCKED;
11651c6987ebSMateusz Guzik 
11661c6987ebSMateusz Guzik 	/*
11671c6987ebSMateusz Guzik 	 * If the lock has exclusive waiters, give them
11681c6987ebSMateusz Guzik 	 * preference in order to avoid deadlock with
11691c6987ebSMateusz Guzik 	 * shared runners up.
11701c6987ebSMateusz Guzik 	 * If interruptible sleeps left the exclusive queue
11711c6987ebSMateusz Guzik 	 * empty avoid a starvation for the threads sleeping
11721c6987ebSMateusz Guzik 	 * on the shared queue by giving them precedence
11731c6987ebSMateusz Guzik 	 * and cleaning up the exclusive waiters bit anyway.
11741c6987ebSMateusz Guzik 	 * Please note that lk_exslpfail count may be lying
11751c6987ebSMateusz Guzik 	 * about the real number of waiters with the
11761c6987ebSMateusz Guzik 	 * LK_SLEEPFAIL flag on because they may be used in
11771c6987ebSMateusz Guzik 	 * conjunction with interruptible sleeps so
11781c6987ebSMateusz Guzik 	 * lk_exslpfail might be considered an 'upper limit'
11791c6987ebSMateusz Guzik 	 * bound, including the edge cases.
11801c6987ebSMateusz Guzik 	 */
11811c6987ebSMateusz Guzik 	MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
11821c6987ebSMateusz Guzik 	realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
11831c6987ebSMateusz Guzik 	if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1184eac22dd4SMateusz Guzik 		if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
11851c6987ebSMateusz Guzik 			lk->lk_exslpfail = 0;
11861c6987ebSMateusz Guzik 			queue = SQ_EXCLUSIVE_QUEUE;
11871c6987ebSMateusz Guzik 			v |= (x & LK_SHARED_WAITERS);
11881c6987ebSMateusz Guzik 		} else {
11891c6987ebSMateusz Guzik 			lk->lk_exslpfail = 0;
11901c6987ebSMateusz Guzik 			LOCK_LOG2(lk,
11911c6987ebSMateusz Guzik 			    "%s: %p has only LK_SLEEPFAIL sleepers",
11921c6987ebSMateusz Guzik 			    __func__, lk);
11931c6987ebSMateusz Guzik 			LOCK_LOG2(lk,
11941c6987ebSMateusz Guzik 			    "%s: %p waking up threads on the exclusive queue",
11951c6987ebSMateusz Guzik 			    __func__, lk);
11961c6987ebSMateusz Guzik 			wakeup_swapper = sleepq_broadcast(&lk->lock_object,
11971c6987ebSMateusz Guzik 			    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
11981c6987ebSMateusz Guzik 			queue = SQ_SHARED_QUEUE;
11991c6987ebSMateusz Guzik 		}
12001c6987ebSMateusz Guzik 	} else {
12011c6987ebSMateusz Guzik 		/*
12021c6987ebSMateusz Guzik 		 * Exclusive waiters sleeping with LK_SLEEPFAIL
12031c6987ebSMateusz Guzik 		 * on and using interruptible sleeps/timeout
12041c6987ebSMateusz Guzik 		 * may have left spourious lk_exslpfail counts
12051c6987ebSMateusz Guzik 		 * on, so clean it up anyway.
12061c6987ebSMateusz Guzik 		 */
12071c6987ebSMateusz Guzik 		lk->lk_exslpfail = 0;
12081c6987ebSMateusz Guzik 		queue = SQ_SHARED_QUEUE;
12091c6987ebSMateusz Guzik 	}
12101c6987ebSMateusz Guzik 
12111c6987ebSMateusz Guzik 	LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
12121c6987ebSMateusz Guzik 	    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
12131c6987ebSMateusz Guzik 	    "exclusive");
12141c6987ebSMateusz Guzik 	atomic_store_rel_ptr(&lk->lk_lock, v);
12151c6987ebSMateusz Guzik 	wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
12161c6987ebSMateusz Guzik 	sleepq_release(&lk->lock_object);
12171c6987ebSMateusz Guzik 
12181c6987ebSMateusz Guzik out:
12191c6987ebSMateusz Guzik 	lockmgr_exit(flags, ilk, wakeup_swapper);
12201c6987ebSMateusz Guzik 	return (0);
12211c6987ebSMateusz Guzik }
12221c6987ebSMateusz Guzik 
1223c8b29d12SMateusz Guzik /*
1224c8b29d12SMateusz Guzik  * Lightweight entry points for common operations.
1225c8b29d12SMateusz Guzik  *
1226c8b29d12SMateusz Guzik  * Functionality is similar to sx locks, in that none of the additional lockmgr
1227c8b29d12SMateusz Guzik  * features are supported. To be clear, these are NOT supported:
1228c8b29d12SMateusz Guzik  * 1. shared locking disablement
1229c8b29d12SMateusz Guzik  * 2. returning with an error after sleep
1230c8b29d12SMateusz Guzik  * 3. unlocking the interlock
1231c8b29d12SMateusz Guzik  *
1232c1b57fa7SMateusz Guzik  * If in doubt, use lockmgr_lock_flags.
1233c8b29d12SMateusz Guzik  */
1234c8b29d12SMateusz Guzik int
1235c8b29d12SMateusz Guzik lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1236c8b29d12SMateusz Guzik {
1237c8b29d12SMateusz Guzik 	uintptr_t x;
1238c8b29d12SMateusz Guzik 
1239c8b29d12SMateusz Guzik 	MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1240c8b29d12SMateusz Guzik 	MPASS((flags & LK_INTERLOCK) == 0);
1241c8b29d12SMateusz Guzik 	MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1242c8b29d12SMateusz Guzik 
1243c8b29d12SMateusz Guzik 	if (LK_CAN_WITNESS(flags))
1244c8b29d12SMateusz Guzik 		WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1245c8b29d12SMateusz Guzik 		    file, line, NULL);
124631ad4050SMateusz Guzik 	x = lockmgr_read_value(lk);
1247c8b29d12SMateusz Guzik 	if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1248c8b29d12SMateusz Guzik 		lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1249c8b29d12SMateusz Guzik 		return (0);
1250c8b29d12SMateusz Guzik 	}
1251c8b29d12SMateusz Guzik 
125231ad4050SMateusz Guzik 	return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1253c8b29d12SMateusz Guzik }
1254c8b29d12SMateusz Guzik 
1255c8b29d12SMateusz Guzik int
1256c8b29d12SMateusz Guzik lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1257c8b29d12SMateusz Guzik {
1258c8b29d12SMateusz Guzik 	uintptr_t tid;
1259c8b29d12SMateusz Guzik 
1260c8b29d12SMateusz Guzik 	MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1261c8b29d12SMateusz Guzik 	MPASS((flags & LK_INTERLOCK) == 0);
1262c8b29d12SMateusz Guzik 
1263c8b29d12SMateusz Guzik 	if (LK_CAN_WITNESS(flags))
1264c8b29d12SMateusz Guzik 		WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1265c8b29d12SMateusz Guzik 		    LOP_EXCLUSIVE, file, line, NULL);
1266c8b29d12SMateusz Guzik 	tid = (uintptr_t)curthread;
1267c8b29d12SMateusz Guzik 	if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1268c8b29d12SMateusz Guzik 		lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1269c8b29d12SMateusz Guzik 		    flags);
1270c8b29d12SMateusz Guzik 		return (0);
1271c8b29d12SMateusz Guzik 	}
1272c8b29d12SMateusz Guzik 
127331ad4050SMateusz Guzik 	return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1274c8b29d12SMateusz Guzik }
1275c8b29d12SMateusz Guzik 
1276c8b29d12SMateusz Guzik int
1277c8b29d12SMateusz Guzik lockmgr_unlock(struct lock *lk)
1278c8b29d12SMateusz Guzik {
1279c8b29d12SMateusz Guzik 	uintptr_t x, tid;
1280c8b29d12SMateusz Guzik 	const char *file;
1281c8b29d12SMateusz Guzik 	int line;
1282c8b29d12SMateusz Guzik 
1283c8b29d12SMateusz Guzik 	file = __FILE__;
1284c8b29d12SMateusz Guzik 	line = __LINE__;
1285c8b29d12SMateusz Guzik 
1286c8b29d12SMateusz Guzik 	_lockmgr_assert(lk, KA_LOCKED, file, line);
1287bdb6d824SMateusz Guzik 	x = lockmgr_read_value(lk);
1288c8b29d12SMateusz Guzik 	if (__predict_true(x & LK_SHARE) != 0) {
1289c8b29d12SMateusz Guzik 		lockmgr_note_shared_release(lk, file, line);
1290c00115f1SMateusz Guzik 		if (lockmgr_sunlock_try(lk, &x)) {
1291c00115f1SMateusz Guzik 			LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1292c8b29d12SMateusz Guzik 		} else {
1293c8b29d12SMateusz Guzik 			return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1294c8b29d12SMateusz Guzik 		}
1295c8b29d12SMateusz Guzik 	} else {
1296c8b29d12SMateusz Guzik 		tid = (uintptr_t)curthread;
1297c00115f1SMateusz Guzik 		lockmgr_note_exclusive_release(lk, file, line);
12984aff9f5dSMateusz Guzik 		if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1299c00115f1SMateusz Guzik 			LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
1300c8b29d12SMateusz Guzik 		} else {
1301c8b29d12SMateusz Guzik 			return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1302c8b29d12SMateusz Guzik 		}
1303c8b29d12SMateusz Guzik 	}
1304c8b29d12SMateusz Guzik 	return (0);
1305c8b29d12SMateusz Guzik }
1306c8b29d12SMateusz Guzik 
13071c6987ebSMateusz Guzik int
13081c6987ebSMateusz Guzik __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
13091c6987ebSMateusz Guzik     const char *wmesg, int pri, int timo, const char *file, int line)
13101c6987ebSMateusz Guzik {
13111c6987ebSMateusz Guzik 	GIANT_DECLARE;
13121c6987ebSMateusz Guzik 	struct lockmgr_wait lwa;
13131c6987ebSMateusz Guzik 	struct lock_class *class;
13141c6987ebSMateusz Guzik 	const char *iwmesg;
13151c6987ebSMateusz Guzik 	uintptr_t tid, v, x;
13161c6987ebSMateusz Guzik 	u_int op, realexslp;
13171c6987ebSMateusz Guzik 	int error, ipri, itimo, queue, wakeup_swapper;
13181c6987ebSMateusz Guzik #ifdef LOCK_PROFILING
13191c6987ebSMateusz Guzik 	uint64_t waittime = 0;
13201c6987ebSMateusz Guzik 	int contested = 0;
13211c6987ebSMateusz Guzik #endif
13221c6987ebSMateusz Guzik 
1323879e0604SMateusz Guzik 	if (KERNEL_PANICKED())
1324b543c98cSConrad Meyer 		return (0);
1325b543c98cSConrad Meyer 
13261c6987ebSMateusz Guzik 	error = 0;
13271c6987ebSMateusz Guzik 	tid = (uintptr_t)curthread;
13281c6987ebSMateusz Guzik 	op = (flags & LK_TYPE_MASK);
13291c6987ebSMateusz Guzik 	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
13301c6987ebSMateusz Guzik 	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
13311c6987ebSMateusz Guzik 	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
13321c6987ebSMateusz Guzik 
13331c6987ebSMateusz Guzik 	lwa.iwmesg = iwmesg;
13341c6987ebSMateusz Guzik 	lwa.ipri = ipri;
13351c6987ebSMateusz Guzik 	lwa.itimo = itimo;
13361c6987ebSMateusz Guzik 
13371c6987ebSMateusz Guzik 	MPASS((flags & ~LK_TOTAL_MASK) == 0);
13381c6987ebSMateusz Guzik 	KASSERT((op & (op - 1)) == 0,
13391c6987ebSMateusz Guzik 	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
13401c6987ebSMateusz Guzik 	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
13411c6987ebSMateusz Guzik 	    (op != LK_DOWNGRADE && op != LK_RELEASE),
13421c6987ebSMateusz Guzik 	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
13431c6987ebSMateusz Guzik 	    __func__, file, line));
13441c6987ebSMateusz Guzik 	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
13451c6987ebSMateusz Guzik 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
13461c6987ebSMateusz Guzik 	    __func__, file, line));
13471c6987ebSMateusz Guzik 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
13481c6987ebSMateusz Guzik 	    ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
13491c6987ebSMateusz Guzik 	    lk->lock_object.lo_name, file, line));
13501c6987ebSMateusz Guzik 
13511c6987ebSMateusz Guzik 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
13521c6987ebSMateusz Guzik 
13531c6987ebSMateusz Guzik 	if (lk->lock_object.lo_flags & LK_NOSHARE) {
13541c6987ebSMateusz Guzik 		switch (op) {
13551c6987ebSMateusz Guzik 		case LK_SHARED:
13561c6987ebSMateusz Guzik 			op = LK_EXCLUSIVE;
13571c6987ebSMateusz Guzik 			break;
13581c6987ebSMateusz Guzik 		case LK_UPGRADE:
13591c6987ebSMateusz Guzik 		case LK_TRYUPGRADE:
13601c6987ebSMateusz Guzik 		case LK_DOWNGRADE:
13611c6987ebSMateusz Guzik 			_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
13621c6987ebSMateusz Guzik 			    file, line);
13631c6987ebSMateusz Guzik 			if (flags & LK_INTERLOCK)
13641c6987ebSMateusz Guzik 				class->lc_unlock(ilk);
13651c6987ebSMateusz Guzik 			return (0);
13661c6987ebSMateusz Guzik 		}
13671c6987ebSMateusz Guzik 	}
13681c6987ebSMateusz Guzik 
13691c6987ebSMateusz Guzik 	wakeup_swapper = 0;
13701c6987ebSMateusz Guzik 	switch (op) {
13711c6987ebSMateusz Guzik 	case LK_SHARED:
13721c6987ebSMateusz Guzik 		return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
13731c6987ebSMateusz Guzik 		break;
13741c6987ebSMateusz Guzik 	case LK_UPGRADE:
13751c6987ebSMateusz Guzik 	case LK_TRYUPGRADE:
13761c6987ebSMateusz Guzik 		return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
13771c6987ebSMateusz Guzik 		break;
13781c6987ebSMateusz Guzik 	case LK_EXCLUSIVE:
13791c6987ebSMateusz Guzik 		return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1380047dd67eSAttilio Rao 		break;
1381047dd67eSAttilio Rao 	case LK_DOWNGRADE:
13821c7d98d0SAttilio Rao 		_lockmgr_assert(lk, KA_XLOCKED, file, line);
1383e5f94314SAttilio Rao 		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
13841c7d98d0SAttilio Rao 
13851c7d98d0SAttilio Rao 		/*
13861c7d98d0SAttilio Rao 		 * Panic if the lock is recursed.
13871c7d98d0SAttilio Rao 		 */
13881c7d98d0SAttilio Rao 		if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
13891c7d98d0SAttilio Rao 			if (flags & LK_INTERLOCK)
13901c7d98d0SAttilio Rao 				class->lc_unlock(ilk);
13911c7d98d0SAttilio Rao 			panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
13921c7d98d0SAttilio Rao 			    __func__, iwmesg, file, line);
13931c7d98d0SAttilio Rao 		}
1394e5f94314SAttilio Rao 		TD_SLOCKS_INC(curthread);
1395047dd67eSAttilio Rao 
1396047dd67eSAttilio Rao 		/*
1397047dd67eSAttilio Rao 		 * In order to preserve waiters flags, just spin.
1398047dd67eSAttilio Rao 		 */
1399047dd67eSAttilio Rao 		for (;;) {
1400bdb6d824SMateusz Guzik 			x = lockmgr_read_value(lk);
1401651175c9SAttilio Rao 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1402651175c9SAttilio Rao 			x &= LK_ALL_WAITERS;
1403047dd67eSAttilio Rao 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1404e5f94314SAttilio Rao 			    LK_SHARERS_LOCK(1) | x))
1405047dd67eSAttilio Rao 				break;
1406047dd67eSAttilio Rao 			cpu_spinwait();
1407047dd67eSAttilio Rao 		}
14085b699f16SMark Johnston 		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
14095b699f16SMark Johnston 		LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1410047dd67eSAttilio Rao 		break;
1411047dd67eSAttilio Rao 	case LK_RELEASE:
1412047dd67eSAttilio Rao 		_lockmgr_assert(lk, KA_LOCKED, file, line);
1413bdb6d824SMateusz Guzik 		x = lockmgr_read_value(lk);
1414047dd67eSAttilio Rao 
14151c6987ebSMateusz Guzik 		if (__predict_true(x & LK_SHARE) != 0) {
1416c00115f1SMateusz Guzik 			lockmgr_note_shared_release(lk, file, line);
14171c6987ebSMateusz Guzik 			return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1418047dd67eSAttilio Rao 		} else {
1419c00115f1SMateusz Guzik 			lockmgr_note_exclusive_release(lk, file, line);
14201c6987ebSMateusz Guzik 			return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
14212028867dSAttilio Rao 		}
1422047dd67eSAttilio Rao 		break;
1423047dd67eSAttilio Rao 	case LK_DRAIN:
1424e5f94314SAttilio Rao 		if (LK_CAN_WITNESS(flags))
1425e5f94314SAttilio Rao 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
142624150d37SJohn Baldwin 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
142724150d37SJohn Baldwin 			    ilk : NULL);
1428047dd67eSAttilio Rao 
1429047dd67eSAttilio Rao 		/*
143096f1567fSKonstantin Belousov 		 * Trying to drain a lock we already own will result in a
1431047dd67eSAttilio Rao 		 * deadlock.
1432047dd67eSAttilio Rao 		 */
1433047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk)) {
1434047dd67eSAttilio Rao 			if (flags & LK_INTERLOCK)
1435047dd67eSAttilio Rao 				class->lc_unlock(ilk);
1436047dd67eSAttilio Rao 			panic("%s: draining %s with the lock held @ %s:%d\n",
1437047dd67eSAttilio Rao 			    __func__, iwmesg, file, line);
1438047dd67eSAttilio Rao 		}
1439047dd67eSAttilio Rao 
1440fc4f686dSMateusz Guzik 		for (;;) {
1441fc4f686dSMateusz Guzik 			if (lk->lk_lock == LK_UNLOCKED &&
1442fc4f686dSMateusz Guzik 			    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1443fc4f686dSMateusz Guzik 				break;
1444fc4f686dSMateusz Guzik 
1445f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
1446f5f9340bSFabien Thomas 			PMC_SOFT_CALL( , , lock, failed);
1447f5f9340bSFabien Thomas #endif
14486a467cc5SMateusz Guzik 			lock_profile_obtain_lock_failed(&lk->lock_object, false,
1449047dd67eSAttilio Rao 			    &contested, &waittime);
1450047dd67eSAttilio Rao 
1451047dd67eSAttilio Rao 			/*
1452047dd67eSAttilio Rao 			 * If the lock is expected to not sleep just give up
1453047dd67eSAttilio Rao 			 * and return.
1454047dd67eSAttilio Rao 			 */
1455047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
1456047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p fails the try operation",
1457047dd67eSAttilio Rao 				    __func__, lk);
1458047dd67eSAttilio Rao 				error = EBUSY;
1459047dd67eSAttilio Rao 				break;
1460047dd67eSAttilio Rao 			}
1461047dd67eSAttilio Rao 
1462047dd67eSAttilio Rao 			/*
1463047dd67eSAttilio Rao 			 * Acquire the sleepqueue chain lock because we
1464047dd67eSAttilio Rao 			 * probabilly will need to manipulate waiters flags.
1465047dd67eSAttilio Rao 			 */
1466047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
1467bdb6d824SMateusz Guzik 			x = lockmgr_read_value(lk);
1468047dd67eSAttilio Rao 
1469047dd67eSAttilio Rao 			/*
1470047dd67eSAttilio Rao 			 * if the lock has been released while we spun on
1471047dd67eSAttilio Rao 			 * the sleepqueue chain lock just try again.
1472047dd67eSAttilio Rao 			 */
1473047dd67eSAttilio Rao 			if (x == LK_UNLOCKED) {
1474047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
1475047dd67eSAttilio Rao 				continue;
1476047dd67eSAttilio Rao 			}
1477047dd67eSAttilio Rao 
1478651175c9SAttilio Rao 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1479651175c9SAttilio Rao 			if ((x & ~v) == LK_UNLOCKED) {
1480651175c9SAttilio Rao 				v = (x & ~LK_EXCLUSIVE_SPINNERS);
14812028867dSAttilio Rao 
14822028867dSAttilio Rao 				/*
14832028867dSAttilio Rao 				 * If interruptible sleeps left the exclusive
14842028867dSAttilio Rao 				 * queue empty avoid a starvation for the
14852028867dSAttilio Rao 				 * threads sleeping on the shared queue by
14862028867dSAttilio Rao 				 * giving them precedence and cleaning up the
14872028867dSAttilio Rao 				 * exclusive waiters bit anyway.
1488c636ba83SAttilio Rao 				 * Please note that lk_exslpfail count may be
1489c636ba83SAttilio Rao 				 * lying about the real number of waiters with
1490c636ba83SAttilio Rao 				 * the LK_SLEEPFAIL flag on because they may
1491e3043798SPedro F. Giffuni 				 * be used in conjunction with interruptible
1492aab9c8c2SAttilio Rao 				 * sleeps so lk_exslpfail might be considered
1493aab9c8c2SAttilio Rao 				 * an 'upper limit' bound, including the edge
1494c636ba83SAttilio Rao 				 * cases.
14952028867dSAttilio Rao 				 */
1496047dd67eSAttilio Rao 				if (v & LK_EXCLUSIVE_WAITERS) {
1497047dd67eSAttilio Rao 					queue = SQ_EXCLUSIVE_QUEUE;
1498047dd67eSAttilio Rao 					v &= ~LK_EXCLUSIVE_WAITERS;
1499047dd67eSAttilio Rao 				} else {
15009dbf7a62SAttilio Rao 					/*
15019dbf7a62SAttilio Rao 					 * Exclusive waiters sleeping with
15029dbf7a62SAttilio Rao 					 * LK_SLEEPFAIL on and using
15039dbf7a62SAttilio Rao 					 * interruptible sleeps/timeout may
15049dbf7a62SAttilio Rao 					 * have left spourious lk_exslpfail
15059dbf7a62SAttilio Rao 					 * counts on, so clean it up anyway.
15069dbf7a62SAttilio Rao 					 */
1507047dd67eSAttilio Rao 					MPASS(v & LK_SHARED_WAITERS);
15089dbf7a62SAttilio Rao 					lk->lk_exslpfail = 0;
1509047dd67eSAttilio Rao 					queue = SQ_SHARED_QUEUE;
1510047dd67eSAttilio Rao 					v &= ~LK_SHARED_WAITERS;
1511047dd67eSAttilio Rao 				}
15122028867dSAttilio Rao 				if (queue == SQ_EXCLUSIVE_QUEUE) {
15132028867dSAttilio Rao 					realexslp =
15142028867dSAttilio Rao 					    sleepq_sleepcnt(&lk->lock_object,
15152028867dSAttilio Rao 					    SQ_EXCLUSIVE_QUEUE);
15162028867dSAttilio Rao 					if (lk->lk_exslpfail >= realexslp) {
15172028867dSAttilio Rao 						lk->lk_exslpfail = 0;
15182028867dSAttilio Rao 						queue = SQ_SHARED_QUEUE;
15192028867dSAttilio Rao 						v &= ~LK_SHARED_WAITERS;
15202028867dSAttilio Rao 						if (realexslp != 0) {
15212028867dSAttilio Rao 							LOCK_LOG2(lk,
15222028867dSAttilio Rao 					"%s: %p has only LK_SLEEPFAIL sleepers",
15232028867dSAttilio Rao 							    __func__, lk);
15242028867dSAttilio Rao 							LOCK_LOG2(lk,
15252028867dSAttilio Rao 			"%s: %p waking up threads on the exclusive queue",
15262028867dSAttilio Rao 							    __func__, lk);
15272028867dSAttilio Rao 							wakeup_swapper =
15282028867dSAttilio Rao 							    sleepq_broadcast(
15292028867dSAttilio Rao 							    &lk->lock_object,
15302028867dSAttilio Rao 							    SLEEPQ_LK, 0,
15312028867dSAttilio Rao 							    SQ_EXCLUSIVE_QUEUE);
15322028867dSAttilio Rao 						}
15332028867dSAttilio Rao 					} else
15342028867dSAttilio Rao 						lk->lk_exslpfail = 0;
15352028867dSAttilio Rao 				}
1536047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1537047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
1538047dd67eSAttilio Rao 					continue;
1539047dd67eSAttilio Rao 				}
1540047dd67eSAttilio Rao 				LOCK_LOG3(lk,
1541047dd67eSAttilio Rao 				"%s: %p waking up all threads on the %s queue",
1542047dd67eSAttilio Rao 				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1543047dd67eSAttilio Rao 				    "shared" : "exclusive");
1544814f26daSJohn Baldwin 				wakeup_swapper |= sleepq_broadcast(
1545da7bbd2cSJohn Baldwin 				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1546047dd67eSAttilio Rao 
1547047dd67eSAttilio Rao 				/*
1548047dd67eSAttilio Rao 				 * If shared waiters have been woken up we need
1549047dd67eSAttilio Rao 				 * to wait for one of them to acquire the lock
1550047dd67eSAttilio Rao 				 * before to set the exclusive waiters in
1551047dd67eSAttilio Rao 				 * order to avoid a deadlock.
1552047dd67eSAttilio Rao 				 */
1553047dd67eSAttilio Rao 				if (queue == SQ_SHARED_QUEUE) {
1554047dd67eSAttilio Rao 					for (v = lk->lk_lock;
1555047dd67eSAttilio Rao 					    (v & LK_SHARE) && !LK_SHARERS(v);
1556047dd67eSAttilio Rao 					    v = lk->lk_lock)
1557047dd67eSAttilio Rao 						cpu_spinwait();
1558047dd67eSAttilio Rao 				}
1559047dd67eSAttilio Rao 			}
1560047dd67eSAttilio Rao 
1561047dd67eSAttilio Rao 			/*
1562047dd67eSAttilio Rao 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1563047dd67eSAttilio Rao 			 * fail, loop back and retry.
1564047dd67eSAttilio Rao 			 */
1565047dd67eSAttilio Rao 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1566047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1567047dd67eSAttilio Rao 				    x | LK_EXCLUSIVE_WAITERS)) {
1568047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
1569047dd67eSAttilio Rao 					continue;
1570047dd67eSAttilio Rao 				}
1571047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1572047dd67eSAttilio Rao 				    __func__, lk);
1573047dd67eSAttilio Rao 			}
1574047dd67eSAttilio Rao 
1575047dd67eSAttilio Rao 			/*
1576047dd67eSAttilio Rao 			 * As far as we have been unable to acquire the
1577047dd67eSAttilio Rao 			 * exclusive lock and the exclusive waiters flag
1578047dd67eSAttilio Rao 			 * is set, we will sleep.
1579047dd67eSAttilio Rao 			 */
1580047dd67eSAttilio Rao 			if (flags & LK_INTERLOCK) {
1581047dd67eSAttilio Rao 				class->lc_unlock(ilk);
1582047dd67eSAttilio Rao 				flags &= ~LK_INTERLOCK;
1583047dd67eSAttilio Rao 			}
1584e5f94314SAttilio Rao 			GIANT_SAVE();
1585047dd67eSAttilio Rao 			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1586047dd67eSAttilio Rao 			    SQ_EXCLUSIVE_QUEUE);
1587047dd67eSAttilio Rao 			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1588e5f94314SAttilio Rao 			GIANT_RESTORE();
1589047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1590047dd67eSAttilio Rao 			    __func__, lk);
1591047dd67eSAttilio Rao 		}
1592047dd67eSAttilio Rao 
1593047dd67eSAttilio Rao 		if (error == 0) {
1594047dd67eSAttilio Rao 			lock_profile_obtain_lock_success(&lk->lock_object,
15956a467cc5SMateusz Guzik 			    false, contested, waittime, file, line);
1596047dd67eSAttilio Rao 			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1597047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
1598e5f94314SAttilio Rao 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1599e5f94314SAttilio Rao 			    LK_TRYWIT(flags), file, line);
1600047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
1601047dd67eSAttilio Rao 			STACK_SAVE(lk);
1602047dd67eSAttilio Rao 		}
1603047dd67eSAttilio Rao 		break;
1604047dd67eSAttilio Rao 	default:
1605047dd67eSAttilio Rao 		if (flags & LK_INTERLOCK)
1606047dd67eSAttilio Rao 			class->lc_unlock(ilk);
1607047dd67eSAttilio Rao 		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1608047dd67eSAttilio Rao 	}
1609047dd67eSAttilio Rao 
1610047dd67eSAttilio Rao 	if (flags & LK_INTERLOCK)
1611047dd67eSAttilio Rao 		class->lc_unlock(ilk);
1612da7bbd2cSJohn Baldwin 	if (wakeup_swapper)
1613da7bbd2cSJohn Baldwin 		kick_proc0();
1614047dd67eSAttilio Rao 
1615047dd67eSAttilio Rao 	return (error);
1616047dd67eSAttilio Rao }
1617047dd67eSAttilio Rao 
1618d7a7e179SAttilio Rao void
1619047dd67eSAttilio Rao _lockmgr_disown(struct lock *lk, const char *file, int line)
1620047dd67eSAttilio Rao {
1621047dd67eSAttilio Rao 	uintptr_t tid, x;
1622047dd67eSAttilio Rao 
162335370593SAndriy Gapon 	if (SCHEDULER_STOPPED())
162435370593SAndriy Gapon 		return;
162535370593SAndriy Gapon 
1626047dd67eSAttilio Rao 	tid = (uintptr_t)curthread;
16271c7d98d0SAttilio Rao 	_lockmgr_assert(lk, KA_XLOCKED, file, line);
16281c7d98d0SAttilio Rao 
16291c7d98d0SAttilio Rao 	/*
16301c7d98d0SAttilio Rao 	 * Panic if the lock is recursed.
16311c7d98d0SAttilio Rao 	 */
16321c7d98d0SAttilio Rao 	if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
16331c7d98d0SAttilio Rao 		panic("%s: disown a recursed lockmgr @ %s:%d\n",
16341c7d98d0SAttilio Rao 		    __func__,  file, line);
1635047dd67eSAttilio Rao 
1636047dd67eSAttilio Rao 	/*
163796f1567fSKonstantin Belousov 	 * If the owner is already LK_KERNPROC just skip the whole operation.
1638047dd67eSAttilio Rao 	 */
1639047dd67eSAttilio Rao 	if (LK_HOLDER(lk->lk_lock) != tid)
1640047dd67eSAttilio Rao 		return;
16416a467cc5SMateusz Guzik 	lock_profile_release_lock(&lk->lock_object, false);
16425b699f16SMark Johnston 	LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1643e5f94314SAttilio Rao 	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1644e5f94314SAttilio Rao 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1645e5f94314SAttilio Rao 	TD_LOCKS_DEC(curthread);
1646337c5ff4SAttilio Rao 	STACK_SAVE(lk);
1647047dd67eSAttilio Rao 
1648047dd67eSAttilio Rao 	/*
1649047dd67eSAttilio Rao 	 * In order to preserve waiters flags, just spin.
1650047dd67eSAttilio Rao 	 */
1651047dd67eSAttilio Rao 	for (;;) {
1652bdb6d824SMateusz Guzik 		x = lockmgr_read_value(lk);
1653651175c9SAttilio Rao 		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1654651175c9SAttilio Rao 		x &= LK_ALL_WAITERS;
165522dd228dSAttilio Rao 		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1656e5f94314SAttilio Rao 		    LK_KERNPROC | x))
1657047dd67eSAttilio Rao 			return;
1658047dd67eSAttilio Rao 		cpu_spinwait();
1659047dd67eSAttilio Rao 	}
1660047dd67eSAttilio Rao }
1661047dd67eSAttilio Rao 
1662047dd67eSAttilio Rao void
1663d576deedSPawel Jakub Dawidek lockmgr_printinfo(const struct lock *lk)
1664d7a7e179SAttilio Rao {
1665d7a7e179SAttilio Rao 	struct thread *td;
1666047dd67eSAttilio Rao 	uintptr_t x;
1667d7a7e179SAttilio Rao 
1668047dd67eSAttilio Rao 	if (lk->lk_lock == LK_UNLOCKED)
1669047dd67eSAttilio Rao 		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1670047dd67eSAttilio Rao 	else if (lk->lk_lock & LK_SHARE)
1671047dd67eSAttilio Rao 		printf("lock type %s: SHARED (count %ju)\n",
1672047dd67eSAttilio Rao 		    lk->lock_object.lo_name,
1673047dd67eSAttilio Rao 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1674047dd67eSAttilio Rao 	else {
1675047dd67eSAttilio Rao 		td = lockmgr_xholder(lk);
1676e64b4fa8SKonstantin Belousov 		if (td == (struct thread *)LK_KERNPROC)
1677e64b4fa8SKonstantin Belousov 			printf("lock type %s: EXCL by KERNPROC\n",
1678e64b4fa8SKonstantin Belousov 			    lk->lock_object.lo_name);
1679e64b4fa8SKonstantin Belousov 		else
16802573ea5fSIvan Voras 			printf("lock type %s: EXCL by thread %p "
1681e64b4fa8SKonstantin Belousov 			    "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1682e64b4fa8SKonstantin Belousov 			    td, td->td_proc->p_pid, td->td_proc->p_comm,
1683e64b4fa8SKonstantin Belousov 			    td->td_tid);
1684d7a7e179SAttilio Rao 	}
1685d7a7e179SAttilio Rao 
1686047dd67eSAttilio Rao 	x = lk->lk_lock;
1687047dd67eSAttilio Rao 	if (x & LK_EXCLUSIVE_WAITERS)
1688047dd67eSAttilio Rao 		printf(" with exclusive waiters pending\n");
1689047dd67eSAttilio Rao 	if (x & LK_SHARED_WAITERS)
1690047dd67eSAttilio Rao 		printf(" with shared waiters pending\n");
1691651175c9SAttilio Rao 	if (x & LK_EXCLUSIVE_SPINNERS)
1692651175c9SAttilio Rao 		printf(" with exclusive spinners pending\n");
1693047dd67eSAttilio Rao 
1694047dd67eSAttilio Rao 	STACK_PRINT(lk);
1695047dd67eSAttilio Rao }
1696047dd67eSAttilio Rao 
169799448ed1SJohn Dyson int
1698d576deedSPawel Jakub Dawidek lockstatus(const struct lock *lk)
169999448ed1SJohn Dyson {
1700047dd67eSAttilio Rao 	uintptr_t v, x;
1701047dd67eSAttilio Rao 	int ret;
170299448ed1SJohn Dyson 
1703047dd67eSAttilio Rao 	ret = LK_SHARED;
1704bdb6d824SMateusz Guzik 	x = lockmgr_read_value(lk);
1705047dd67eSAttilio Rao 	v = LK_HOLDER(x);
17060e9eb108SAttilio Rao 
1707047dd67eSAttilio Rao 	if ((x & LK_SHARE) == 0) {
1708047dd67eSAttilio Rao 		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1709047dd67eSAttilio Rao 			ret = LK_EXCLUSIVE;
17106bdfe06aSEivind Eklund 		else
1711047dd67eSAttilio Rao 			ret = LK_EXCLOTHER;
1712047dd67eSAttilio Rao 	} else if (x == LK_UNLOCKED)
1713047dd67eSAttilio Rao 		ret = 0;
171499448ed1SJohn Dyson 
1715047dd67eSAttilio Rao 	return (ret);
171653bf4bb2SPeter Wemm }
1717be6847d7SJohn Baldwin 
171884887fa3SAttilio Rao #ifdef INVARIANT_SUPPORT
1719de5b1952SAlexander Leidinger 
1720de5b1952SAlexander Leidinger FEATURE(invariant_support,
1721de5b1952SAlexander Leidinger     "Support for modules compiled with INVARIANTS option");
1722de5b1952SAlexander Leidinger 
172384887fa3SAttilio Rao #ifndef INVARIANTS
172484887fa3SAttilio Rao #undef	_lockmgr_assert
172584887fa3SAttilio Rao #endif
172684887fa3SAttilio Rao 
172784887fa3SAttilio Rao void
1728d576deedSPawel Jakub Dawidek _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
172984887fa3SAttilio Rao {
173084887fa3SAttilio Rao 	int slocked = 0;
173184887fa3SAttilio Rao 
1732879e0604SMateusz Guzik 	if (KERNEL_PANICKED())
173384887fa3SAttilio Rao 		return;
173484887fa3SAttilio Rao 	switch (what) {
173584887fa3SAttilio Rao 	case KA_SLOCKED:
173684887fa3SAttilio Rao 	case KA_SLOCKED | KA_NOTRECURSED:
173784887fa3SAttilio Rao 	case KA_SLOCKED | KA_RECURSED:
173884887fa3SAttilio Rao 		slocked = 1;
173984887fa3SAttilio Rao 	case KA_LOCKED:
174084887fa3SAttilio Rao 	case KA_LOCKED | KA_NOTRECURSED:
174184887fa3SAttilio Rao 	case KA_LOCKED | KA_RECURSED:
1742e5f94314SAttilio Rao #ifdef WITNESS
1743e5f94314SAttilio Rao 
1744e5f94314SAttilio Rao 		/*
1745e5f94314SAttilio Rao 		 * We cannot trust WITNESS if the lock is held in exclusive
1746e5f94314SAttilio Rao 		 * mode and a call to lockmgr_disown() happened.
1747e5f94314SAttilio Rao 		 * Workaround this skipping the check if the lock is held in
1748e5f94314SAttilio Rao 		 * exclusive mode even for the KA_LOCKED case.
1749e5f94314SAttilio Rao 		 */
1750e5f94314SAttilio Rao 		if (slocked || (lk->lk_lock & LK_SHARE)) {
1751e5f94314SAttilio Rao 			witness_assert(&lk->lock_object, what, file, line);
1752e5f94314SAttilio Rao 			break;
1753e5f94314SAttilio Rao 		}
1754e5f94314SAttilio Rao #endif
1755047dd67eSAttilio Rao 		if (lk->lk_lock == LK_UNLOCKED ||
1756047dd67eSAttilio Rao 		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1757047dd67eSAttilio Rao 		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
175884887fa3SAttilio Rao 			panic("Lock %s not %slocked @ %s:%d\n",
1759047dd67eSAttilio Rao 			    lk->lock_object.lo_name, slocked ? "share" : "",
176084887fa3SAttilio Rao 			    file, line);
1761047dd67eSAttilio Rao 
1762047dd67eSAttilio Rao 		if ((lk->lk_lock & LK_SHARE) == 0) {
1763047dd67eSAttilio Rao 			if (lockmgr_recursed(lk)) {
176484887fa3SAttilio Rao 				if (what & KA_NOTRECURSED)
176584887fa3SAttilio Rao 					panic("Lock %s recursed @ %s:%d\n",
1766047dd67eSAttilio Rao 					    lk->lock_object.lo_name, file,
1767047dd67eSAttilio Rao 					    line);
176884887fa3SAttilio Rao 			} else if (what & KA_RECURSED)
176984887fa3SAttilio Rao 				panic("Lock %s not recursed @ %s:%d\n",
1770047dd67eSAttilio Rao 				    lk->lock_object.lo_name, file, line);
177184887fa3SAttilio Rao 		}
177284887fa3SAttilio Rao 		break;
177384887fa3SAttilio Rao 	case KA_XLOCKED:
177484887fa3SAttilio Rao 	case KA_XLOCKED | KA_NOTRECURSED:
177584887fa3SAttilio Rao 	case KA_XLOCKED | KA_RECURSED:
1776047dd67eSAttilio Rao 		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
177784887fa3SAttilio Rao 			panic("Lock %s not exclusively locked @ %s:%d\n",
1778047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
1779047dd67eSAttilio Rao 		if (lockmgr_recursed(lk)) {
178084887fa3SAttilio Rao 			if (what & KA_NOTRECURSED)
178184887fa3SAttilio Rao 				panic("Lock %s recursed @ %s:%d\n",
1782047dd67eSAttilio Rao 				    lk->lock_object.lo_name, file, line);
178384887fa3SAttilio Rao 		} else if (what & KA_RECURSED)
178484887fa3SAttilio Rao 			panic("Lock %s not recursed @ %s:%d\n",
1785047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
178684887fa3SAttilio Rao 		break;
178784887fa3SAttilio Rao 	case KA_UNLOCKED:
1788047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
178984887fa3SAttilio Rao 			panic("Lock %s exclusively locked @ %s:%d\n",
1790047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
179184887fa3SAttilio Rao 		break;
179284887fa3SAttilio Rao 	default:
1793047dd67eSAttilio Rao 		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1794047dd67eSAttilio Rao 		    line);
179584887fa3SAttilio Rao 	}
179684887fa3SAttilio Rao }
1797047dd67eSAttilio Rao #endif
179884887fa3SAttilio Rao 
1799be6847d7SJohn Baldwin #ifdef DDB
1800462a7addSJohn Baldwin int
1801462a7addSJohn Baldwin lockmgr_chain(struct thread *td, struct thread **ownerp)
1802462a7addSJohn Baldwin {
1803fea73412SConrad Meyer 	const struct lock *lk;
1804462a7addSJohn Baldwin 
1805047dd67eSAttilio Rao 	lk = td->td_wchan;
1806462a7addSJohn Baldwin 
1807047dd67eSAttilio Rao 	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1808462a7addSJohn Baldwin 		return (0);
1809047dd67eSAttilio Rao 	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1810047dd67eSAttilio Rao 	if (lk->lk_lock & LK_SHARE)
1811047dd67eSAttilio Rao 		db_printf("SHARED (count %ju)\n",
1812047dd67eSAttilio Rao 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1813047dd67eSAttilio Rao 	else
1814047dd67eSAttilio Rao 		db_printf("EXCL\n");
1815047dd67eSAttilio Rao 	*ownerp = lockmgr_xholder(lk);
1816462a7addSJohn Baldwin 
1817462a7addSJohn Baldwin 	return (1);
1818462a7addSJohn Baldwin }
1819462a7addSJohn Baldwin 
1820047dd67eSAttilio Rao static void
1821d576deedSPawel Jakub Dawidek db_show_lockmgr(const struct lock_object *lock)
1822be6847d7SJohn Baldwin {
1823be6847d7SJohn Baldwin 	struct thread *td;
1824d576deedSPawel Jakub Dawidek 	const struct lock *lk;
1825be6847d7SJohn Baldwin 
1826d576deedSPawel Jakub Dawidek 	lk = (const struct lock *)lock;
1827be6847d7SJohn Baldwin 
1828be6847d7SJohn Baldwin 	db_printf(" state: ");
1829047dd67eSAttilio Rao 	if (lk->lk_lock == LK_UNLOCKED)
1830be6847d7SJohn Baldwin 		db_printf("UNLOCKED\n");
1831047dd67eSAttilio Rao 	else if (lk->lk_lock & LK_SHARE)
1832047dd67eSAttilio Rao 		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1833047dd67eSAttilio Rao 	else {
1834047dd67eSAttilio Rao 		td = lockmgr_xholder(lk);
1835047dd67eSAttilio Rao 		if (td == (struct thread *)LK_KERNPROC)
1836047dd67eSAttilio Rao 			db_printf("XLOCK: LK_KERNPROC\n");
1837047dd67eSAttilio Rao 		else
1838047dd67eSAttilio Rao 			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1839047dd67eSAttilio Rao 			    td->td_tid, td->td_proc->p_pid,
1840047dd67eSAttilio Rao 			    td->td_proc->p_comm);
1841047dd67eSAttilio Rao 		if (lockmgr_recursed(lk))
1842047dd67eSAttilio Rao 			db_printf(" recursed: %d\n", lk->lk_recurse);
1843047dd67eSAttilio Rao 	}
1844047dd67eSAttilio Rao 	db_printf(" waiters: ");
1845047dd67eSAttilio Rao 	switch (lk->lk_lock & LK_ALL_WAITERS) {
1846047dd67eSAttilio Rao 	case LK_SHARED_WAITERS:
1847047dd67eSAttilio Rao 		db_printf("shared\n");
1848e5023dd9SEdward Tomasz Napierala 		break;
1849047dd67eSAttilio Rao 	case LK_EXCLUSIVE_WAITERS:
1850047dd67eSAttilio Rao 		db_printf("exclusive\n");
1851047dd67eSAttilio Rao 		break;
1852047dd67eSAttilio Rao 	case LK_ALL_WAITERS:
1853047dd67eSAttilio Rao 		db_printf("shared and exclusive\n");
1854047dd67eSAttilio Rao 		break;
1855047dd67eSAttilio Rao 	default:
1856047dd67eSAttilio Rao 		db_printf("none\n");
1857047dd67eSAttilio Rao 	}
1858651175c9SAttilio Rao 	db_printf(" spinners: ");
1859651175c9SAttilio Rao 	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1860651175c9SAttilio Rao 		db_printf("exclusive\n");
1861651175c9SAttilio Rao 	else
1862651175c9SAttilio Rao 		db_printf("none\n");
1863be6847d7SJohn Baldwin }
1864be6847d7SJohn Baldwin #endif
1865