xref: /freebsd/sys/kern/kern_lock.c (revision 651175c9db6786d20f95b07dc8716ab6ff552f92)
19454b2d8SWarner Losh /*-
2047dd67eSAttilio Rao  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3047dd67eSAttilio Rao  * All rights reserved.
453bf4bb2SPeter Wemm  *
553bf4bb2SPeter Wemm  * Redistribution and use in source and binary forms, with or without
653bf4bb2SPeter Wemm  * modification, are permitted provided that the following conditions
753bf4bb2SPeter Wemm  * are met:
853bf4bb2SPeter Wemm  * 1. Redistributions of source code must retain the above copyright
9047dd67eSAttilio Rao  *    notice(s), this list of conditions and the following disclaimer as
10047dd67eSAttilio Rao  *    the first lines of this file unmodified other than the possible
11047dd67eSAttilio Rao  *    addition of one or more copyright notices.
1253bf4bb2SPeter Wemm  * 2. Redistributions in binary form must reproduce the above copyright
13047dd67eSAttilio Rao  *    notice(s), this list of conditions and the following disclaimer in the
1453bf4bb2SPeter Wemm  *    documentation and/or other materials provided with the distribution.
1553bf4bb2SPeter Wemm  *
16047dd67eSAttilio Rao  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17047dd67eSAttilio Rao  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18047dd67eSAttilio Rao  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19047dd67eSAttilio Rao  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20047dd67eSAttilio Rao  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21047dd67eSAttilio Rao  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22047dd67eSAttilio Rao  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23047dd67eSAttilio Rao  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2453bf4bb2SPeter Wemm  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25047dd67eSAttilio Rao  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26047dd67eSAttilio Rao  * DAMAGE.
2753bf4bb2SPeter Wemm  */
2853bf4bb2SPeter Wemm 
29651175c9SAttilio Rao #include "opt_adaptive_lockmgrs.h"
30047dd67eSAttilio Rao #include "opt_ddb.h"
31a5aedd68SStacey Son #include "opt_kdtrace.h"
32047dd67eSAttilio Rao 
33677b542eSDavid E. O'Brien #include <sys/cdefs.h>
34677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
35677b542eSDavid E. O'Brien 
3653bf4bb2SPeter Wemm #include <sys/param.h>
3761d80e90SJohn Baldwin #include <sys/ktr.h>
38651175c9SAttilio Rao #include <sys/linker_set.h>
3953bf4bb2SPeter Wemm #include <sys/lock.h>
40047dd67eSAttilio Rao #include <sys/lock_profile.h>
418302d183SBruce Evans #include <sys/lockmgr.h>
42d8881ca3SJohn Baldwin #include <sys/mutex.h>
438302d183SBruce Evans #include <sys/proc.h>
44047dd67eSAttilio Rao #include <sys/sleepqueue.h>
45e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS
46e8ddb61dSJeff Roberson #include <sys/stack.h>
47e8ddb61dSJeff Roberson #endif
48651175c9SAttilio Rao #include <sys/sysctl.h>
49047dd67eSAttilio Rao #include <sys/systm.h>
5053bf4bb2SPeter Wemm 
51047dd67eSAttilio Rao #include <machine/cpu.h>
526efc8a16SAttilio Rao 
53be6847d7SJohn Baldwin #ifdef DDB
54be6847d7SJohn Baldwin #include <ddb/ddb.h>
55047dd67eSAttilio Rao #endif
56047dd67eSAttilio Rao 
57651175c9SAttilio Rao CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
58651175c9SAttilio Rao     (LK_ADAPTIVE | LK_NOSHARE));
59651175c9SAttilio Rao CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
60651175c9SAttilio Rao     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
61047dd67eSAttilio Rao 
62047dd67eSAttilio Rao #define	SQ_EXCLUSIVE_QUEUE	0
63047dd67eSAttilio Rao #define	SQ_SHARED_QUEUE		1
64047dd67eSAttilio Rao 
65047dd67eSAttilio Rao #ifndef INVARIANTS
66047dd67eSAttilio Rao #define	_lockmgr_assert(lk, what, file, line)
67047dd67eSAttilio Rao #define	TD_LOCKS_INC(td)
68047dd67eSAttilio Rao #define	TD_LOCKS_DEC(td)
69047dd67eSAttilio Rao #else
70047dd67eSAttilio Rao #define	TD_LOCKS_INC(td)	((td)->td_locks++)
71047dd67eSAttilio Rao #define	TD_LOCKS_DEC(td)	((td)->td_locks--)
72047dd67eSAttilio Rao #endif
73047dd67eSAttilio Rao #define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
74047dd67eSAttilio Rao #define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
75047dd67eSAttilio Rao 
76047dd67eSAttilio Rao #ifndef DEBUG_LOCKS
77047dd67eSAttilio Rao #define	STACK_PRINT(lk)
78047dd67eSAttilio Rao #define	STACK_SAVE(lk)
79047dd67eSAttilio Rao #define	STACK_ZERO(lk)
80047dd67eSAttilio Rao #else
81047dd67eSAttilio Rao #define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
82047dd67eSAttilio Rao #define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
83047dd67eSAttilio Rao #define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
84047dd67eSAttilio Rao #endif
85047dd67eSAttilio Rao 
86047dd67eSAttilio Rao #define	LOCK_LOG2(lk, string, arg1, arg2)				\
87047dd67eSAttilio Rao 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
88047dd67eSAttilio Rao 		CTR2(KTR_LOCK, (string), (arg1), (arg2))
89047dd67eSAttilio Rao #define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
90047dd67eSAttilio Rao 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
91047dd67eSAttilio Rao 		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
92047dd67eSAttilio Rao 
93e5f94314SAttilio Rao #define	GIANT_DECLARE							\
94e5f94314SAttilio Rao 	int _i = 0;							\
95e5f94314SAttilio Rao 	WITNESS_SAVE_DECL(Giant)
96e5f94314SAttilio Rao #define	GIANT_RESTORE() do {						\
97e5f94314SAttilio Rao 	if (_i > 0) {							\
98e5f94314SAttilio Rao 		while (_i--)						\
99e5f94314SAttilio Rao 			mtx_lock(&Giant);				\
100e5f94314SAttilio Rao 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
101e5f94314SAttilio Rao 	}								\
102e5f94314SAttilio Rao } while (0)
103e5f94314SAttilio Rao #define	GIANT_SAVE() do {						\
104e5f94314SAttilio Rao 	if (mtx_owned(&Giant)) {					\
105e5f94314SAttilio Rao 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
106e5f94314SAttilio Rao 		while (mtx_owned(&Giant)) {				\
107e5f94314SAttilio Rao 			_i++;						\
108e5f94314SAttilio Rao 			mtx_unlock(&Giant);				\
109e5f94314SAttilio Rao 		}							\
110e5f94314SAttilio Rao 	}								\
111e5f94314SAttilio Rao } while (0)
112e5f94314SAttilio Rao 
113047dd67eSAttilio Rao #define	LK_CAN_SHARE(x)							\
114047dd67eSAttilio Rao 	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
115651175c9SAttilio Rao 	((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||				\
116e0f62984SAttilio Rao 	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
117e5f94314SAttilio Rao #define	LK_TRYOP(x)							\
118e5f94314SAttilio Rao 	((x) & LK_NOWAIT)
119e5f94314SAttilio Rao 
120e5f94314SAttilio Rao #define	LK_CAN_WITNESS(x)						\
121e5f94314SAttilio Rao 	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
122e5f94314SAttilio Rao #define	LK_TRYWIT(x)							\
123e5f94314SAttilio Rao 	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
124047dd67eSAttilio Rao 
125651175c9SAttilio Rao #define	LK_CAN_ADAPT(lk, f)						\
126651175c9SAttilio Rao 	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
127651175c9SAttilio Rao 	((f) & LK_SLEEPFAIL) == 0)
128651175c9SAttilio Rao 
129047dd67eSAttilio Rao #define	lockmgr_disowned(lk)						\
130047dd67eSAttilio Rao 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
131047dd67eSAttilio Rao 
132047dd67eSAttilio Rao #define	lockmgr_xlocked(lk)						\
133047dd67eSAttilio Rao 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
134047dd67eSAttilio Rao 
135047dd67eSAttilio Rao static void	 assert_lockmgr(struct lock_object *lock, int how);
136047dd67eSAttilio Rao #ifdef DDB
13761bd5e21SKip Macy static void	 db_show_lockmgr(struct lock_object *lock);
138be6847d7SJohn Baldwin #endif
1396e21afd4SJohn Baldwin static void	 lock_lockmgr(struct lock_object *lock, int how);
140a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
141a5aedd68SStacey Son static int	 owner_lockmgr(struct lock_object *lock, struct thread **owner);
142a5aedd68SStacey Son #endif
1436e21afd4SJohn Baldwin static int	 unlock_lockmgr(struct lock_object *lock);
14461bd5e21SKip Macy 
14561bd5e21SKip Macy struct lock_class lock_class_lockmgr = {
1463ff6d229SJohn Baldwin 	.lc_name = "lockmgr",
147047dd67eSAttilio Rao 	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
148f9721b43SAttilio Rao 	.lc_assert = assert_lockmgr,
14961bd5e21SKip Macy #ifdef DDB
1506e21afd4SJohn Baldwin 	.lc_ddb_show = db_show_lockmgr,
15161bd5e21SKip Macy #endif
1526e21afd4SJohn Baldwin 	.lc_lock = lock_lockmgr,
153a5aedd68SStacey Son 	.lc_unlock = unlock_lockmgr,
154a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
155a5aedd68SStacey Son 	.lc_owner = owner_lockmgr,
156a5aedd68SStacey Son #endif
15761bd5e21SKip Macy };
15861bd5e21SKip Macy 
159651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
160651175c9SAttilio Rao static u_int alk_retries = 10;
161651175c9SAttilio Rao static u_int alk_loops = 10000;
162651175c9SAttilio Rao SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
163651175c9SAttilio Rao SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
164651175c9SAttilio Rao SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
165651175c9SAttilio Rao #endif
166651175c9SAttilio Rao 
167047dd67eSAttilio Rao static __inline struct thread *
168047dd67eSAttilio Rao lockmgr_xholder(struct lock *lk)
169047dd67eSAttilio Rao {
170047dd67eSAttilio Rao 	uintptr_t x;
171047dd67eSAttilio Rao 
172047dd67eSAttilio Rao 	x = lk->lk_lock;
173047dd67eSAttilio Rao 	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
174047dd67eSAttilio Rao }
17584887fa3SAttilio Rao 
17653bf4bb2SPeter Wemm /*
177047dd67eSAttilio Rao  * It assumes sleepq_lock held and returns with this one unheld.
178047dd67eSAttilio Rao  * It also assumes the generic interlock is sane and previously checked.
179047dd67eSAttilio Rao  * If LK_INTERLOCK is specified the interlock is not reacquired after the
180047dd67eSAttilio Rao  * sleep.
18153bf4bb2SPeter Wemm  */
182047dd67eSAttilio Rao static __inline int
183047dd67eSAttilio Rao sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
184047dd67eSAttilio Rao     const char *wmesg, int pri, int timo, int queue)
185047dd67eSAttilio Rao {
186e5f94314SAttilio Rao 	GIANT_DECLARE;
187047dd67eSAttilio Rao 	struct lock_class *class;
188047dd67eSAttilio Rao 	int catch, error;
18953bf4bb2SPeter Wemm 
190047dd67eSAttilio Rao 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1915047a8fdSAttilio Rao 	catch = pri & PCATCH;
192047dd67eSAttilio Rao 	pri &= PRIMASK;
193047dd67eSAttilio Rao 	error = 0;
194047dd67eSAttilio Rao 
195047dd67eSAttilio Rao 	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
196047dd67eSAttilio Rao 	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
197047dd67eSAttilio Rao 
198047dd67eSAttilio Rao 	if (flags & LK_INTERLOCK)
199047dd67eSAttilio Rao 		class->lc_unlock(ilk);
200e5f94314SAttilio Rao 	GIANT_SAVE();
201047dd67eSAttilio Rao 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
202047dd67eSAttilio Rao 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
203047dd67eSAttilio Rao 	if ((flags & LK_TIMELOCK) && timo)
204047dd67eSAttilio Rao 		sleepq_set_timeout(&lk->lock_object, timo);
205047dd67eSAttilio Rao 
206047dd67eSAttilio Rao 	/*
207047dd67eSAttilio Rao 	 * Decisional switch for real sleeping.
208047dd67eSAttilio Rao 	 */
209047dd67eSAttilio Rao 	if ((flags & LK_TIMELOCK) && timo && catch)
210047dd67eSAttilio Rao 		error = sleepq_timedwait_sig(&lk->lock_object, pri);
211047dd67eSAttilio Rao 	else if ((flags & LK_TIMELOCK) && timo)
212047dd67eSAttilio Rao 		error = sleepq_timedwait(&lk->lock_object, pri);
213047dd67eSAttilio Rao 	else if (catch)
214047dd67eSAttilio Rao 		error = sleepq_wait_sig(&lk->lock_object, pri);
215047dd67eSAttilio Rao 	else
216047dd67eSAttilio Rao 		sleepq_wait(&lk->lock_object, pri);
217e5f94314SAttilio Rao 	GIANT_RESTORE();
218047dd67eSAttilio Rao 	if ((flags & LK_SLEEPFAIL) && error == 0)
219047dd67eSAttilio Rao 		error = ENOLCK;
220047dd67eSAttilio Rao 
221047dd67eSAttilio Rao 	return (error);
222047dd67eSAttilio Rao }
223047dd67eSAttilio Rao 
224da7bbd2cSJohn Baldwin static __inline int
225047dd67eSAttilio Rao wakeupshlk(struct lock *lk, const char *file, int line)
226047dd67eSAttilio Rao {
227047dd67eSAttilio Rao 	uintptr_t v, x;
228da7bbd2cSJohn Baldwin 	int queue, wakeup_swapper;
229047dd67eSAttilio Rao 
230047dd67eSAttilio Rao 	TD_LOCKS_DEC(curthread);
231047dd67eSAttilio Rao 	TD_SLOCKS_DEC(curthread);
232e5f94314SAttilio Rao 	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
233047dd67eSAttilio Rao 	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
234047dd67eSAttilio Rao 
235da7bbd2cSJohn Baldwin 	wakeup_swapper = 0;
236047dd67eSAttilio Rao 	for (;;) {
237047dd67eSAttilio Rao 		x = lk->lk_lock;
238047dd67eSAttilio Rao 
239047dd67eSAttilio Rao 		/*
240047dd67eSAttilio Rao 		 * If there is more than one shared lock held, just drop one
241047dd67eSAttilio Rao 		 * and return.
242047dd67eSAttilio Rao 		 */
243047dd67eSAttilio Rao 		if (LK_SHARERS(x) > 1) {
244047dd67eSAttilio Rao 			if (atomic_cmpset_ptr(&lk->lk_lock, x,
245047dd67eSAttilio Rao 			    x - LK_ONE_SHARER))
246047dd67eSAttilio Rao 				break;
247047dd67eSAttilio Rao 			continue;
248047dd67eSAttilio Rao 		}
249047dd67eSAttilio Rao 
250047dd67eSAttilio Rao 		/*
251047dd67eSAttilio Rao 		 * If there are not waiters on the exclusive queue, drop the
252047dd67eSAttilio Rao 		 * lock quickly.
253047dd67eSAttilio Rao 		 */
254047dd67eSAttilio Rao 		if ((x & LK_ALL_WAITERS) == 0) {
255651175c9SAttilio Rao 			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
256651175c9SAttilio Rao 			    LK_SHARERS_LOCK(1));
257651175c9SAttilio Rao 			if (atomic_cmpset_ptr(&lk->lk_lock, x, LK_UNLOCKED))
258047dd67eSAttilio Rao 				break;
259047dd67eSAttilio Rao 			continue;
260047dd67eSAttilio Rao 		}
261047dd67eSAttilio Rao 
262047dd67eSAttilio Rao 		/*
263047dd67eSAttilio Rao 		 * We should have a sharer with waiters, so enter the hard
264047dd67eSAttilio Rao 		 * path in order to handle wakeups correctly.
265047dd67eSAttilio Rao 		 */
266047dd67eSAttilio Rao 		sleepq_lock(&lk->lock_object);
267651175c9SAttilio Rao 		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
268047dd67eSAttilio Rao 		v = LK_UNLOCKED;
269047dd67eSAttilio Rao 
270047dd67eSAttilio Rao 		/*
271047dd67eSAttilio Rao 		 * If the lock has exclusive waiters, give them preference in
272047dd67eSAttilio Rao 		 * order to avoid deadlock with shared runners up.
273047dd67eSAttilio Rao 		 */
274047dd67eSAttilio Rao 		if (x & LK_EXCLUSIVE_WAITERS) {
275047dd67eSAttilio Rao 			queue = SQ_EXCLUSIVE_QUEUE;
276047dd67eSAttilio Rao 			v |= (x & LK_SHARED_WAITERS);
277047dd67eSAttilio Rao 		} else {
278651175c9SAttilio Rao 			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
279651175c9SAttilio Rao 			    LK_SHARED_WAITERS);
280047dd67eSAttilio Rao 			queue = SQ_SHARED_QUEUE;
281047dd67eSAttilio Rao 		}
282047dd67eSAttilio Rao 
283047dd67eSAttilio Rao 		if (!atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
284047dd67eSAttilio Rao 		    v)) {
285047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
286047dd67eSAttilio Rao 			continue;
287047dd67eSAttilio Rao 		}
288047dd67eSAttilio Rao 		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
289047dd67eSAttilio Rao 		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
290047dd67eSAttilio Rao 		    "exclusive");
291da7bbd2cSJohn Baldwin 		wakeup_swapper = sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
292da7bbd2cSJohn Baldwin 		    0, queue);
293047dd67eSAttilio Rao 		sleepq_release(&lk->lock_object);
294047dd67eSAttilio Rao 		break;
295047dd67eSAttilio Rao 	}
296047dd67eSAttilio Rao 
297047dd67eSAttilio Rao 	lock_profile_release_lock(&lk->lock_object);
298da7bbd2cSJohn Baldwin 	return (wakeup_swapper);
299047dd67eSAttilio Rao }
300047dd67eSAttilio Rao 
301047dd67eSAttilio Rao static void
302f9721b43SAttilio Rao assert_lockmgr(struct lock_object *lock, int what)
303f9721b43SAttilio Rao {
304f9721b43SAttilio Rao 
305f9721b43SAttilio Rao 	panic("lockmgr locks do not support assertions");
306f9721b43SAttilio Rao }
307f9721b43SAttilio Rao 
308047dd67eSAttilio Rao static void
3096e21afd4SJohn Baldwin lock_lockmgr(struct lock_object *lock, int how)
3106e21afd4SJohn Baldwin {
3116e21afd4SJohn Baldwin 
3126e21afd4SJohn Baldwin 	panic("lockmgr locks do not support sleep interlocking");
3136e21afd4SJohn Baldwin }
3146e21afd4SJohn Baldwin 
315047dd67eSAttilio Rao static int
3166e21afd4SJohn Baldwin unlock_lockmgr(struct lock_object *lock)
3176e21afd4SJohn Baldwin {
3186e21afd4SJohn Baldwin 
3196e21afd4SJohn Baldwin 	panic("lockmgr locks do not support sleep interlocking");
3206e21afd4SJohn Baldwin }
3216e21afd4SJohn Baldwin 
322a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
323a5aedd68SStacey Son static int
324a5aedd68SStacey Son owner_lockmgr(struct lock_object *lock, struct thread **owner)
325a5aedd68SStacey Son {
326a5aedd68SStacey Son 
327a5aedd68SStacey Son 	panic("lockmgr locks do not support owner inquiring");
328a5aedd68SStacey Son }
329a5aedd68SStacey Son #endif
330a5aedd68SStacey Son 
33199448ed1SJohn Dyson void
332047dd67eSAttilio Rao lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
33399448ed1SJohn Dyson {
3346efc8a16SAttilio Rao 	int iflags;
3356efc8a16SAttilio Rao 
336047dd67eSAttilio Rao 	MPASS((flags & ~LK_INIT_MASK) == 0);
33799448ed1SJohn Dyson 
338f0830182SAttilio Rao 	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
339f0830182SAttilio Rao 	if (flags & LK_CANRECURSE)
340f0830182SAttilio Rao 		iflags |= LO_RECURSABLE;
341047dd67eSAttilio Rao 	if ((flags & LK_NODUP) == 0)
3426efc8a16SAttilio Rao 		iflags |= LO_DUPOK;
3437fbfba7bSAttilio Rao 	if (flags & LK_NOPROFILE)
3447fbfba7bSAttilio Rao 		iflags |= LO_NOPROFILE;
345047dd67eSAttilio Rao 	if ((flags & LK_NOWITNESS) == 0)
3466efc8a16SAttilio Rao 		iflags |= LO_WITNESS;
3477fbfba7bSAttilio Rao 	if (flags & LK_QUIET)
3487fbfba7bSAttilio Rao 		iflags |= LO_QUIET;
349651175c9SAttilio Rao 	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
350047dd67eSAttilio Rao 
351047dd67eSAttilio Rao 	lk->lk_lock = LK_UNLOCKED;
352047dd67eSAttilio Rao 	lk->lk_recurse = 0;
353047dd67eSAttilio Rao 	lk->lk_timo = timo;
354047dd67eSAttilio Rao 	lk->lk_pri = pri;
355047dd67eSAttilio Rao 	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
356047dd67eSAttilio Rao 	STACK_ZERO(lk);
35799448ed1SJohn Dyson }
35899448ed1SJohn Dyson 
359a18b1f1dSJason Evans void
360047dd67eSAttilio Rao lockdestroy(struct lock *lk)
361a18b1f1dSJason Evans {
362c91fcee7SJohn Baldwin 
363047dd67eSAttilio Rao 	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
364047dd67eSAttilio Rao 	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
365047dd67eSAttilio Rao 	lock_destroy(&lk->lock_object);
366047dd67eSAttilio Rao }
367047dd67eSAttilio Rao 
368047dd67eSAttilio Rao int
369047dd67eSAttilio Rao __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
370047dd67eSAttilio Rao     const char *wmesg, int pri, int timo, const char *file, int line)
371047dd67eSAttilio Rao {
372e5f94314SAttilio Rao 	GIANT_DECLARE;
373047dd67eSAttilio Rao 	struct lock_class *class;
374047dd67eSAttilio Rao 	const char *iwmesg;
375047dd67eSAttilio Rao 	uintptr_t tid, v, x;
376047dd67eSAttilio Rao 	u_int op;
3771723a064SJeff Roberson 	int error, ipri, itimo, queue, wakeup_swapper;
3781723a064SJeff Roberson #ifdef LOCK_PROFILING
3791723a064SJeff Roberson 	uint64_t waittime = 0;
3801723a064SJeff Roberson 	int contested = 0;
3811723a064SJeff Roberson #endif
382651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
383651175c9SAttilio Rao 	volatile struct thread *owner;
384651175c9SAttilio Rao 	u_int i, spintries = 0;
385651175c9SAttilio Rao #endif
386047dd67eSAttilio Rao 
387047dd67eSAttilio Rao 	error = 0;
388047dd67eSAttilio Rao 	tid = (uintptr_t)curthread;
389047dd67eSAttilio Rao 	op = (flags & LK_TYPE_MASK);
390047dd67eSAttilio Rao 	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
391047dd67eSAttilio Rao 	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
392047dd67eSAttilio Rao 	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
393047dd67eSAttilio Rao 
394047dd67eSAttilio Rao 	MPASS((flags & ~LK_TOTAL_MASK) == 0);
395872b7289SAttilio Rao 	KASSERT((op & (op - 1)) == 0,
396872b7289SAttilio Rao 	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
397047dd67eSAttilio Rao 	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
398047dd67eSAttilio Rao 	    (op != LK_DOWNGRADE && op != LK_RELEASE),
399047dd67eSAttilio Rao 	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
400047dd67eSAttilio Rao 	    __func__, file, line));
401047dd67eSAttilio Rao 	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
402047dd67eSAttilio Rao 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
403047dd67eSAttilio Rao 	    __func__, file, line));
404047dd67eSAttilio Rao 
405047dd67eSAttilio Rao 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
406047dd67eSAttilio Rao 	if (panicstr != NULL) {
407047dd67eSAttilio Rao 		if (flags & LK_INTERLOCK)
408047dd67eSAttilio Rao 			class->lc_unlock(ilk);
409047dd67eSAttilio Rao 		return (0);
410047dd67eSAttilio Rao 	}
411047dd67eSAttilio Rao 
412047dd67eSAttilio Rao 	if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE))
413047dd67eSAttilio Rao 		op = LK_EXCLUSIVE;
414047dd67eSAttilio Rao 
415da7bbd2cSJohn Baldwin 	wakeup_swapper = 0;
416047dd67eSAttilio Rao 	switch (op) {
417047dd67eSAttilio Rao 	case LK_SHARED:
418e5f94314SAttilio Rao 		if (LK_CAN_WITNESS(flags))
419e5f94314SAttilio Rao 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
42041313430SJohn Baldwin 			    file, line, ilk);
421047dd67eSAttilio Rao 		for (;;) {
422047dd67eSAttilio Rao 			x = lk->lk_lock;
423047dd67eSAttilio Rao 
424047dd67eSAttilio Rao 			/*
425047dd67eSAttilio Rao 			 * If no other thread has an exclusive lock, or
426047dd67eSAttilio Rao 			 * no exclusive waiter is present, bump the count of
427047dd67eSAttilio Rao 			 * sharers.  Since we have to preserve the state of
428047dd67eSAttilio Rao 			 * waiters, if we fail to acquire the shared lock
429047dd67eSAttilio Rao 			 * loop back and retry.
430047dd67eSAttilio Rao 			 */
431047dd67eSAttilio Rao 			if (LK_CAN_SHARE(x)) {
432047dd67eSAttilio Rao 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
433047dd67eSAttilio Rao 				    x + LK_ONE_SHARER))
434047dd67eSAttilio Rao 					break;
435047dd67eSAttilio Rao 				continue;
436047dd67eSAttilio Rao 			}
437047dd67eSAttilio Rao 			lock_profile_obtain_lock_failed(&lk->lock_object,
438047dd67eSAttilio Rao 			    &contested, &waittime);
439047dd67eSAttilio Rao 
440047dd67eSAttilio Rao 			/*
44196f1567fSKonstantin Belousov 			 * If the lock is already held by curthread in
442047dd67eSAttilio Rao 			 * exclusive way avoid a deadlock.
443047dd67eSAttilio Rao 			 */
444047dd67eSAttilio Rao 			if (LK_HOLDER(x) == tid) {
445047dd67eSAttilio Rao 				LOCK_LOG2(lk,
44696f1567fSKonstantin Belousov 				    "%s: %p already held in exclusive mode",
447047dd67eSAttilio Rao 				    __func__, lk);
448047dd67eSAttilio Rao 				error = EDEADLK;
449047dd67eSAttilio Rao 				break;
450a18b1f1dSJason Evans 			}
451a18b1f1dSJason Evans 
452a18b1f1dSJason Evans 			/*
453047dd67eSAttilio Rao 			 * If the lock is expected to not sleep just give up
454047dd67eSAttilio Rao 			 * and return.
455d7a7e179SAttilio Rao 			 */
456047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
457047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p fails the try operation",
458047dd67eSAttilio Rao 				    __func__, lk);
459047dd67eSAttilio Rao 				error = EBUSY;
460047dd67eSAttilio Rao 				break;
461047dd67eSAttilio Rao 			}
462047dd67eSAttilio Rao 
463651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
464651175c9SAttilio Rao 			/*
465651175c9SAttilio Rao 			 * If the owner is running on another CPU, spin until
466651175c9SAttilio Rao 			 * the owner stops running or the state of the lock
467651175c9SAttilio Rao 			 * changes.
468651175c9SAttilio Rao 			 */
469651175c9SAttilio Rao 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
470651175c9SAttilio Rao 			    LK_HOLDER(x) != LK_KERNPROC) {
471651175c9SAttilio Rao 				owner = (struct thread *)LK_HOLDER(x);
472651175c9SAttilio Rao 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
473651175c9SAttilio Rao 					CTR3(KTR_LOCK,
474651175c9SAttilio Rao 					    "%s: spinning on %p held by %p",
475651175c9SAttilio Rao 					    __func__, lk, owner);
476651175c9SAttilio Rao 
477651175c9SAttilio Rao 				/*
478651175c9SAttilio Rao 				 * If we are holding also an interlock drop it
479651175c9SAttilio Rao 				 * in order to avoid a deadlock if the lockmgr
480651175c9SAttilio Rao 				 * owner is adaptively spinning on the
481651175c9SAttilio Rao 				 * interlock itself.
482651175c9SAttilio Rao 				 */
483651175c9SAttilio Rao 				if (flags & LK_INTERLOCK) {
484651175c9SAttilio Rao 					class->lc_unlock(ilk);
485651175c9SAttilio Rao 					flags &= ~LK_INTERLOCK;
486651175c9SAttilio Rao 				}
487651175c9SAttilio Rao 				GIANT_SAVE();
488651175c9SAttilio Rao 				while (LK_HOLDER(lk->lk_lock) ==
489651175c9SAttilio Rao 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
490651175c9SAttilio Rao 					cpu_spinwait();
491651175c9SAttilio Rao 			} else if (LK_CAN_ADAPT(lk, flags) &&
492651175c9SAttilio Rao 			    (x & LK_SHARE) !=0 && LK_SHARERS(x) &&
493651175c9SAttilio Rao 			    spintries < alk_retries) {
494651175c9SAttilio Rao 				if (flags & LK_INTERLOCK) {
495651175c9SAttilio Rao 					class->lc_unlock(ilk);
496651175c9SAttilio Rao 					flags &= ~LK_INTERLOCK;
497651175c9SAttilio Rao 				}
498651175c9SAttilio Rao 				GIANT_SAVE();
499651175c9SAttilio Rao 				spintries++;
500651175c9SAttilio Rao 				for (i = 0; i < alk_loops; i++) {
501651175c9SAttilio Rao 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
502651175c9SAttilio Rao 						CTR4(KTR_LOCK,
503651175c9SAttilio Rao 				    "%s: shared spinning on %p with %u and %u",
504651175c9SAttilio Rao 						    __func__, lk, spintries, i);
505651175c9SAttilio Rao 					x = lk->lk_lock;
506651175c9SAttilio Rao 					if ((x & LK_SHARE) == 0 ||
507651175c9SAttilio Rao 					    LK_CAN_SHARE(x) != 0)
508651175c9SAttilio Rao 						break;
509651175c9SAttilio Rao 					cpu_spinwait();
510651175c9SAttilio Rao 				}
511651175c9SAttilio Rao 				if (i != alk_loops)
512651175c9SAttilio Rao 					continue;
513651175c9SAttilio Rao 			}
514651175c9SAttilio Rao #endif
515651175c9SAttilio Rao 
516047dd67eSAttilio Rao 			/*
517047dd67eSAttilio Rao 			 * Acquire the sleepqueue chain lock because we
518047dd67eSAttilio Rao 			 * probabilly will need to manipulate waiters flags.
519047dd67eSAttilio Rao 			 */
520047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
521047dd67eSAttilio Rao 			x = lk->lk_lock;
522047dd67eSAttilio Rao 
523047dd67eSAttilio Rao 			/*
524047dd67eSAttilio Rao 			 * if the lock can be acquired in shared mode, try
525047dd67eSAttilio Rao 			 * again.
526047dd67eSAttilio Rao 			 */
527047dd67eSAttilio Rao 			if (LK_CAN_SHARE(x)) {
528047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
529047dd67eSAttilio Rao 				continue;
530047dd67eSAttilio Rao 			}
531047dd67eSAttilio Rao 
532651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
533651175c9SAttilio Rao 			/*
534651175c9SAttilio Rao 			 * The current lock owner might have started executing
535651175c9SAttilio Rao 			 * on another CPU (or the lock could have changed
536651175c9SAttilio Rao 			 * owner) while we were waiting on the turnstile
537651175c9SAttilio Rao 			 * chain lock.  If so, drop the turnstile lock and try
538651175c9SAttilio Rao 			 * again.
539651175c9SAttilio Rao 			 */
540651175c9SAttilio Rao 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
541651175c9SAttilio Rao 			    LK_HOLDER(x) != LK_KERNPROC) {
542651175c9SAttilio Rao 				owner = (struct thread *)LK_HOLDER(x);
543651175c9SAttilio Rao 				if (TD_IS_RUNNING(owner)) {
544651175c9SAttilio Rao 					sleepq_release(&lk->lock_object);
545651175c9SAttilio Rao 					continue;
546651175c9SAttilio Rao 				}
547651175c9SAttilio Rao 			}
548651175c9SAttilio Rao #endif
549651175c9SAttilio Rao 
550047dd67eSAttilio Rao 			/*
551047dd67eSAttilio Rao 			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
552047dd67eSAttilio Rao 			 * loop back and retry.
553047dd67eSAttilio Rao 			 */
554047dd67eSAttilio Rao 			if ((x & LK_SHARED_WAITERS) == 0) {
555047dd67eSAttilio Rao 				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
556047dd67eSAttilio Rao 				    x | LK_SHARED_WAITERS)) {
557047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
558047dd67eSAttilio Rao 					continue;
559047dd67eSAttilio Rao 				}
560047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
561047dd67eSAttilio Rao 				    __func__, lk);
562047dd67eSAttilio Rao 			}
563047dd67eSAttilio Rao 
564047dd67eSAttilio Rao 			/*
565047dd67eSAttilio Rao 			 * As far as we have been unable to acquire the
566047dd67eSAttilio Rao 			 * shared lock and the shared waiters flag is set,
567047dd67eSAttilio Rao 			 * we will sleep.
568047dd67eSAttilio Rao 			 */
569047dd67eSAttilio Rao 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
570047dd67eSAttilio Rao 			    SQ_SHARED_QUEUE);
571047dd67eSAttilio Rao 			flags &= ~LK_INTERLOCK;
572047dd67eSAttilio Rao 			if (error) {
573047dd67eSAttilio Rao 				LOCK_LOG3(lk,
574047dd67eSAttilio Rao 				    "%s: interrupted sleep for %p with %d",
575047dd67eSAttilio Rao 				    __func__, lk, error);
576047dd67eSAttilio Rao 				break;
577047dd67eSAttilio Rao 			}
578047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
579047dd67eSAttilio Rao 			    __func__, lk);
580047dd67eSAttilio Rao 		}
581047dd67eSAttilio Rao 		if (error == 0) {
582047dd67eSAttilio Rao 			lock_profile_obtain_lock_success(&lk->lock_object,
583047dd67eSAttilio Rao 			    contested, waittime, file, line);
584047dd67eSAttilio Rao 			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
585047dd67eSAttilio Rao 			    line);
586e5f94314SAttilio Rao 			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
587e5f94314SAttilio Rao 			    line);
588047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
589047dd67eSAttilio Rao 			TD_SLOCKS_INC(curthread);
590047dd67eSAttilio Rao 			STACK_SAVE(lk);
591047dd67eSAttilio Rao 		}
592047dd67eSAttilio Rao 		break;
593047dd67eSAttilio Rao 	case LK_UPGRADE:
594047dd67eSAttilio Rao 		_lockmgr_assert(lk, KA_SLOCKED, file, line);
595651175c9SAttilio Rao 		v = lk->lk_lock;
596651175c9SAttilio Rao 		x = v & LK_ALL_WAITERS;
597651175c9SAttilio Rao 		v &= LK_EXCLUSIVE_SPINNERS;
598047dd67eSAttilio Rao 
599047dd67eSAttilio Rao 		/*
600047dd67eSAttilio Rao 		 * Try to switch from one shared lock to an exclusive one.
601047dd67eSAttilio Rao 		 * We need to preserve waiters flags during the operation.
602047dd67eSAttilio Rao 		 */
603651175c9SAttilio Rao 		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
604047dd67eSAttilio Rao 		    tid | x)) {
605047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
606047dd67eSAttilio Rao 			    line);
607e5f94314SAttilio Rao 			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
608e5f94314SAttilio Rao 			    LK_TRYWIT(flags), file, line);
609047dd67eSAttilio Rao 			TD_SLOCKS_DEC(curthread);
610047dd67eSAttilio Rao 			break;
611047dd67eSAttilio Rao 		}
612047dd67eSAttilio Rao 
613047dd67eSAttilio Rao 		/*
614047dd67eSAttilio Rao 		 * We have been unable to succeed in upgrading, so just
615047dd67eSAttilio Rao 		 * give up the shared lock.
616047dd67eSAttilio Rao 		 */
617814f26daSJohn Baldwin 		wakeup_swapper |= wakeupshlk(lk, file, line);
618047dd67eSAttilio Rao 
619047dd67eSAttilio Rao 		/* FALLTHROUGH */
620047dd67eSAttilio Rao 	case LK_EXCLUSIVE:
621e5f94314SAttilio Rao 		if (LK_CAN_WITNESS(flags))
622e5f94314SAttilio Rao 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
62341313430SJohn Baldwin 			    LOP_EXCLUSIVE, file, line, ilk);
624047dd67eSAttilio Rao 
625047dd67eSAttilio Rao 		/*
62696f1567fSKonstantin Belousov 		 * If curthread already holds the lock and this one is
627047dd67eSAttilio Rao 		 * allowed to recurse, simply recurse on it.
628047dd67eSAttilio Rao 		 */
629047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk)) {
630047dd67eSAttilio Rao 			if ((flags & LK_CANRECURSE) == 0 &&
631f0830182SAttilio Rao 			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
632047dd67eSAttilio Rao 
633047dd67eSAttilio Rao 				/*
634047dd67eSAttilio Rao 				 * If the lock is expected to not panic just
635047dd67eSAttilio Rao 				 * give up and return.
636047dd67eSAttilio Rao 				 */
637047dd67eSAttilio Rao 				if (LK_TRYOP(flags)) {
638047dd67eSAttilio Rao 					LOCK_LOG2(lk,
639047dd67eSAttilio Rao 					    "%s: %p fails the try operation",
640047dd67eSAttilio Rao 					    __func__, lk);
641047dd67eSAttilio Rao 					error = EBUSY;
642047dd67eSAttilio Rao 					break;
643047dd67eSAttilio Rao 				}
644047dd67eSAttilio Rao 				if (flags & LK_INTERLOCK)
645047dd67eSAttilio Rao 					class->lc_unlock(ilk);
646047dd67eSAttilio Rao 		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
647047dd67eSAttilio Rao 				    __func__, iwmesg, file, line);
648047dd67eSAttilio Rao 			}
649047dd67eSAttilio Rao 			lk->lk_recurse++;
650047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
651047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
652047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
653e5f94314SAttilio Rao 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
654e5f94314SAttilio Rao 			    LK_TRYWIT(flags), file, line);
655047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
656047dd67eSAttilio Rao 			break;
657047dd67eSAttilio Rao 		}
658047dd67eSAttilio Rao 
659047dd67eSAttilio Rao 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
660047dd67eSAttilio Rao 		    tid)) {
661047dd67eSAttilio Rao 			lock_profile_obtain_lock_failed(&lk->lock_object,
662047dd67eSAttilio Rao 			    &contested, &waittime);
663047dd67eSAttilio Rao 
664047dd67eSAttilio Rao 			/*
665047dd67eSAttilio Rao 			 * If the lock is expected to not sleep just give up
666047dd67eSAttilio Rao 			 * and return.
667047dd67eSAttilio Rao 			 */
668047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
669047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p fails the try operation",
670047dd67eSAttilio Rao 				    __func__, lk);
671047dd67eSAttilio Rao 				error = EBUSY;
672047dd67eSAttilio Rao 				break;
673047dd67eSAttilio Rao 			}
674047dd67eSAttilio Rao 
675651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
676651175c9SAttilio Rao 			/*
677651175c9SAttilio Rao 			 * If the owner is running on another CPU, spin until
678651175c9SAttilio Rao 			 * the owner stops running or the state of the lock
679651175c9SAttilio Rao 			 * changes.
680651175c9SAttilio Rao 			 */
681651175c9SAttilio Rao 			x = lk->lk_lock;
682651175c9SAttilio Rao 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
683651175c9SAttilio Rao 			    LK_HOLDER(x) != LK_KERNPROC) {
684651175c9SAttilio Rao 				owner = (struct thread *)LK_HOLDER(x);
685651175c9SAttilio Rao 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
686651175c9SAttilio Rao 					CTR3(KTR_LOCK,
687651175c9SAttilio Rao 					    "%s: spinning on %p held by %p",
688651175c9SAttilio Rao 					    __func__, lk, owner);
689651175c9SAttilio Rao 
690651175c9SAttilio Rao 				/*
691651175c9SAttilio Rao 				 * If we are holding also an interlock drop it
692651175c9SAttilio Rao 				 * in order to avoid a deadlock if the lockmgr
693651175c9SAttilio Rao 				 * owner is adaptively spinning on the
694651175c9SAttilio Rao 				 * interlock itself.
695651175c9SAttilio Rao 				 */
696651175c9SAttilio Rao 				if (flags & LK_INTERLOCK) {
697651175c9SAttilio Rao 					class->lc_unlock(ilk);
698651175c9SAttilio Rao 					flags &= ~LK_INTERLOCK;
699651175c9SAttilio Rao 				}
700651175c9SAttilio Rao 				GIANT_SAVE();
701651175c9SAttilio Rao 				while (LK_HOLDER(lk->lk_lock) ==
702651175c9SAttilio Rao 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
703651175c9SAttilio Rao 					cpu_spinwait();
704651175c9SAttilio Rao 			} else if (LK_CAN_ADAPT(lk, flags) &&
705651175c9SAttilio Rao 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
706651175c9SAttilio Rao 			    spintries < alk_retries) {
707651175c9SAttilio Rao 				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
708651175c9SAttilio Rao 				    !atomic_cmpset_ptr(&lk->lk_lock, x,
709651175c9SAttilio Rao 				    x | LK_EXCLUSIVE_SPINNERS))
710651175c9SAttilio Rao 					continue;
711651175c9SAttilio Rao 				if (flags & LK_INTERLOCK) {
712651175c9SAttilio Rao 					class->lc_unlock(ilk);
713651175c9SAttilio Rao 					flags &= ~LK_INTERLOCK;
714651175c9SAttilio Rao 				}
715651175c9SAttilio Rao 				GIANT_SAVE();
716651175c9SAttilio Rao 				spintries++;
717651175c9SAttilio Rao 				for (i = 0; i < alk_loops; i++) {
718651175c9SAttilio Rao 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
719651175c9SAttilio Rao 						CTR4(KTR_LOCK,
720651175c9SAttilio Rao 				    "%s: shared spinning on %p with %u and %u",
721651175c9SAttilio Rao 						    __func__, lk, spintries, i);
722651175c9SAttilio Rao 					if ((lk->lk_lock &
723651175c9SAttilio Rao 					    LK_EXCLUSIVE_SPINNERS) == 0)
724651175c9SAttilio Rao 						break;
725651175c9SAttilio Rao 					cpu_spinwait();
726651175c9SAttilio Rao 				}
727651175c9SAttilio Rao 				if (i != alk_loops)
728651175c9SAttilio Rao 					continue;
729651175c9SAttilio Rao 			}
730651175c9SAttilio Rao #endif
731651175c9SAttilio Rao 
732047dd67eSAttilio Rao 			/*
733047dd67eSAttilio Rao 			 * Acquire the sleepqueue chain lock because we
734047dd67eSAttilio Rao 			 * probabilly will need to manipulate waiters flags.
735047dd67eSAttilio Rao 			 */
736047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
737047dd67eSAttilio Rao 			x = lk->lk_lock;
738047dd67eSAttilio Rao 
739047dd67eSAttilio Rao 			/*
740047dd67eSAttilio Rao 			 * if the lock has been released while we spun on
741047dd67eSAttilio Rao 			 * the sleepqueue chain lock just try again.
742047dd67eSAttilio Rao 			 */
743047dd67eSAttilio Rao 			if (x == LK_UNLOCKED) {
744047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
745047dd67eSAttilio Rao 				continue;
746047dd67eSAttilio Rao 			}
747047dd67eSAttilio Rao 
748651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS
749651175c9SAttilio Rao 			/*
750651175c9SAttilio Rao 			 * The current lock owner might have started executing
751651175c9SAttilio Rao 			 * on another CPU (or the lock could have changed
752651175c9SAttilio Rao 			 * owner) while we were waiting on the turnstile
753651175c9SAttilio Rao 			 * chain lock.  If so, drop the turnstile lock and try
754651175c9SAttilio Rao 			 * again.
755651175c9SAttilio Rao 			 */
756651175c9SAttilio Rao 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
757651175c9SAttilio Rao 			    LK_HOLDER(x) != LK_KERNPROC) {
758651175c9SAttilio Rao 				owner = (struct thread *)LK_HOLDER(x);
759651175c9SAttilio Rao 				if (TD_IS_RUNNING(owner)) {
760651175c9SAttilio Rao 					sleepq_release(&lk->lock_object);
761651175c9SAttilio Rao 					continue;
762651175c9SAttilio Rao 				}
763651175c9SAttilio Rao 			}
764651175c9SAttilio Rao #endif
765651175c9SAttilio Rao 
766047dd67eSAttilio Rao 			/*
767047dd67eSAttilio Rao 			 * The lock can be in the state where there is a
768047dd67eSAttilio Rao 			 * pending queue of waiters, but still no owner.
769047dd67eSAttilio Rao 			 * This happens when the lock is contested and an
770047dd67eSAttilio Rao 			 * owner is going to claim the lock.
771047dd67eSAttilio Rao 			 * If curthread is the one successfully acquiring it
772047dd67eSAttilio Rao 			 * claim lock ownership and return, preserving waiters
773047dd67eSAttilio Rao 			 * flags.
774047dd67eSAttilio Rao 			 */
775651175c9SAttilio Rao 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
776651175c9SAttilio Rao 			if ((x & ~v) == LK_UNLOCKED) {
777651175c9SAttilio Rao 				v &= ~LK_EXCLUSIVE_SPINNERS;
778047dd67eSAttilio Rao 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
779047dd67eSAttilio Rao 				    tid | v)) {
780047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
781047dd67eSAttilio Rao 					LOCK_LOG2(lk,
782047dd67eSAttilio Rao 					    "%s: %p claimed by a new writer",
783047dd67eSAttilio Rao 					    __func__, lk);
784047dd67eSAttilio Rao 					break;
785047dd67eSAttilio Rao 				}
786047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
787047dd67eSAttilio Rao 				continue;
788047dd67eSAttilio Rao 			}
789047dd67eSAttilio Rao 
790047dd67eSAttilio Rao 			/*
791047dd67eSAttilio Rao 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
792047dd67eSAttilio Rao 			 * fail, loop back and retry.
793047dd67eSAttilio Rao 			 */
794047dd67eSAttilio Rao 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
795047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
796047dd67eSAttilio Rao 				    x | LK_EXCLUSIVE_WAITERS)) {
797047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
798047dd67eSAttilio Rao 					continue;
799047dd67eSAttilio Rao 				}
800047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
801047dd67eSAttilio Rao 				    __func__, lk);
802047dd67eSAttilio Rao 			}
803047dd67eSAttilio Rao 
804047dd67eSAttilio Rao 			/*
805047dd67eSAttilio Rao 			 * As far as we have been unable to acquire the
806047dd67eSAttilio Rao 			 * exclusive lock and the exclusive waiters flag
807047dd67eSAttilio Rao 			 * is set, we will sleep.
808047dd67eSAttilio Rao 			 */
809047dd67eSAttilio Rao 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
810047dd67eSAttilio Rao 			    SQ_EXCLUSIVE_QUEUE);
811047dd67eSAttilio Rao 			flags &= ~LK_INTERLOCK;
812047dd67eSAttilio Rao 			if (error) {
813047dd67eSAttilio Rao 				LOCK_LOG3(lk,
814047dd67eSAttilio Rao 				    "%s: interrupted sleep for %p with %d",
815047dd67eSAttilio Rao 				    __func__, lk, error);
816047dd67eSAttilio Rao 				break;
817047dd67eSAttilio Rao 			}
818047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
819047dd67eSAttilio Rao 			    __func__, lk);
820047dd67eSAttilio Rao 		}
821047dd67eSAttilio Rao 		if (error == 0) {
822047dd67eSAttilio Rao 			lock_profile_obtain_lock_success(&lk->lock_object,
823047dd67eSAttilio Rao 			    contested, waittime, file, line);
824047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
825047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
826e5f94314SAttilio Rao 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
827e5f94314SAttilio Rao 			    LK_TRYWIT(flags), file, line);
828047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
829047dd67eSAttilio Rao 			STACK_SAVE(lk);
830047dd67eSAttilio Rao 		}
831047dd67eSAttilio Rao 		break;
832047dd67eSAttilio Rao 	case LK_DOWNGRADE:
833047dd67eSAttilio Rao 		_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
834e5f94314SAttilio Rao 		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
835e5f94314SAttilio Rao 		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
836e5f94314SAttilio Rao 		TD_SLOCKS_INC(curthread);
837047dd67eSAttilio Rao 
838047dd67eSAttilio Rao 		/*
839047dd67eSAttilio Rao 		 * In order to preserve waiters flags, just spin.
840047dd67eSAttilio Rao 		 */
841047dd67eSAttilio Rao 		for (;;) {
842651175c9SAttilio Rao 			x = lk->lk_lock;
843651175c9SAttilio Rao 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
844651175c9SAttilio Rao 			x &= LK_ALL_WAITERS;
845047dd67eSAttilio Rao 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
846e5f94314SAttilio Rao 			    LK_SHARERS_LOCK(1) | x))
847047dd67eSAttilio Rao 				break;
848047dd67eSAttilio Rao 			cpu_spinwait();
849047dd67eSAttilio Rao 		}
850047dd67eSAttilio Rao 		break;
851047dd67eSAttilio Rao 	case LK_RELEASE:
852047dd67eSAttilio Rao 		_lockmgr_assert(lk, KA_LOCKED, file, line);
853047dd67eSAttilio Rao 		x = lk->lk_lock;
854047dd67eSAttilio Rao 
855047dd67eSAttilio Rao 		if ((x & LK_SHARE) == 0) {
856047dd67eSAttilio Rao 
857047dd67eSAttilio Rao 			/*
858047dd67eSAttilio Rao 			 * As first option, treact the lock as if it has not
859047dd67eSAttilio Rao 			 * any waiter.
860047dd67eSAttilio Rao 			 * Fix-up the tid var if the lock has been disowned.
861047dd67eSAttilio Rao 			 */
862047dd67eSAttilio Rao 			if (LK_HOLDER(x) == LK_KERNPROC)
863047dd67eSAttilio Rao 				tid = LK_KERNPROC;
864e5f94314SAttilio Rao 			else {
865e5f94314SAttilio Rao 				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
866e5f94314SAttilio Rao 				    file, line);
867047dd67eSAttilio Rao 				TD_LOCKS_DEC(curthread);
868e5f94314SAttilio Rao 			}
869047dd67eSAttilio Rao 			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
870047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
871047dd67eSAttilio Rao 
872047dd67eSAttilio Rao 			/*
873047dd67eSAttilio Rao 			 * The lock is held in exclusive mode.
874047dd67eSAttilio Rao 			 * If the lock is recursed also, then unrecurse it.
875047dd67eSAttilio Rao 			 */
876047dd67eSAttilio Rao 			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
877047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
878047dd67eSAttilio Rao 				    lk);
879047dd67eSAttilio Rao 				lk->lk_recurse--;
880047dd67eSAttilio Rao 				break;
881047dd67eSAttilio Rao 			}
88204a28689SJeff Roberson 			if (tid != LK_KERNPROC)
883047dd67eSAttilio Rao 				lock_profile_release_lock(&lk->lock_object);
884047dd67eSAttilio Rao 
885047dd67eSAttilio Rao 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
886047dd67eSAttilio Rao 			    LK_UNLOCKED))
887047dd67eSAttilio Rao 				break;
888047dd67eSAttilio Rao 
889047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
890651175c9SAttilio Rao 			x = lk->lk_lock;
891047dd67eSAttilio Rao 			v = LK_UNLOCKED;
892047dd67eSAttilio Rao 
893047dd67eSAttilio Rao 			/*
894047dd67eSAttilio Rao 		 	 * If the lock has exclusive waiters, give them
895047dd67eSAttilio Rao 			 * preference in order to avoid deadlock with
896047dd67eSAttilio Rao 			 * shared runners up.
897047dd67eSAttilio Rao 			 */
898651175c9SAttilio Rao 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
899047dd67eSAttilio Rao 			if (x & LK_EXCLUSIVE_WAITERS) {
900047dd67eSAttilio Rao 				queue = SQ_EXCLUSIVE_QUEUE;
901047dd67eSAttilio Rao 				v |= (x & LK_SHARED_WAITERS);
902047dd67eSAttilio Rao 			} else {
903651175c9SAttilio Rao 				MPASS((x & LK_ALL_WAITERS) ==
904651175c9SAttilio Rao 				    LK_SHARED_WAITERS);
905047dd67eSAttilio Rao 				queue = SQ_SHARED_QUEUE;
906047dd67eSAttilio Rao 			}
907047dd67eSAttilio Rao 
908047dd67eSAttilio Rao 			LOCK_LOG3(lk,
909047dd67eSAttilio Rao 			    "%s: %p waking up threads on the %s queue",
910047dd67eSAttilio Rao 			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
911047dd67eSAttilio Rao 			    "exclusive");
912047dd67eSAttilio Rao 			atomic_store_rel_ptr(&lk->lk_lock, v);
913da7bbd2cSJohn Baldwin 			wakeup_swapper = sleepq_broadcast(&lk->lock_object,
914da7bbd2cSJohn Baldwin 			    SLEEPQ_LK, 0, queue);
915047dd67eSAttilio Rao 			sleepq_release(&lk->lock_object);
916047dd67eSAttilio Rao 			break;
917047dd67eSAttilio Rao 		} else
918da7bbd2cSJohn Baldwin 			wakeup_swapper = wakeupshlk(lk, file, line);
919047dd67eSAttilio Rao 		break;
920047dd67eSAttilio Rao 	case LK_DRAIN:
921e5f94314SAttilio Rao 		if (LK_CAN_WITNESS(flags))
922e5f94314SAttilio Rao 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
92341313430SJohn Baldwin 			    LOP_EXCLUSIVE, file, line, ilk);
924047dd67eSAttilio Rao 
925047dd67eSAttilio Rao 		/*
92696f1567fSKonstantin Belousov 		 * Trying to drain a lock we already own will result in a
927047dd67eSAttilio Rao 		 * deadlock.
928047dd67eSAttilio Rao 		 */
929047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk)) {
930047dd67eSAttilio Rao 			if (flags & LK_INTERLOCK)
931047dd67eSAttilio Rao 				class->lc_unlock(ilk);
932047dd67eSAttilio Rao 			panic("%s: draining %s with the lock held @ %s:%d\n",
933047dd67eSAttilio Rao 			    __func__, iwmesg, file, line);
934047dd67eSAttilio Rao 		}
935047dd67eSAttilio Rao 
936047dd67eSAttilio Rao 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
937047dd67eSAttilio Rao 			lock_profile_obtain_lock_failed(&lk->lock_object,
938047dd67eSAttilio Rao 			    &contested, &waittime);
939047dd67eSAttilio Rao 
940047dd67eSAttilio Rao 			/*
941047dd67eSAttilio Rao 			 * If the lock is expected to not sleep just give up
942047dd67eSAttilio Rao 			 * and return.
943047dd67eSAttilio Rao 			 */
944047dd67eSAttilio Rao 			if (LK_TRYOP(flags)) {
945047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p fails the try operation",
946047dd67eSAttilio Rao 				    __func__, lk);
947047dd67eSAttilio Rao 				error = EBUSY;
948047dd67eSAttilio Rao 				break;
949047dd67eSAttilio Rao 			}
950047dd67eSAttilio Rao 
951047dd67eSAttilio Rao 			/*
952047dd67eSAttilio Rao 			 * Acquire the sleepqueue chain lock because we
953047dd67eSAttilio Rao 			 * probabilly will need to manipulate waiters flags.
954047dd67eSAttilio Rao 			 */
955047dd67eSAttilio Rao 			sleepq_lock(&lk->lock_object);
956047dd67eSAttilio Rao 			x = lk->lk_lock;
957047dd67eSAttilio Rao 
958047dd67eSAttilio Rao 			/*
959047dd67eSAttilio Rao 			 * if the lock has been released while we spun on
960047dd67eSAttilio Rao 			 * the sleepqueue chain lock just try again.
961047dd67eSAttilio Rao 			 */
962047dd67eSAttilio Rao 			if (x == LK_UNLOCKED) {
963047dd67eSAttilio Rao 				sleepq_release(&lk->lock_object);
964047dd67eSAttilio Rao 				continue;
965047dd67eSAttilio Rao 			}
966047dd67eSAttilio Rao 
967651175c9SAttilio Rao 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
968651175c9SAttilio Rao 			if ((x & ~v) == LK_UNLOCKED) {
969651175c9SAttilio Rao 				v = (x & ~LK_EXCLUSIVE_SPINNERS);
970047dd67eSAttilio Rao 				if (v & LK_EXCLUSIVE_WAITERS) {
971047dd67eSAttilio Rao 					queue = SQ_EXCLUSIVE_QUEUE;
972047dd67eSAttilio Rao 					v &= ~LK_EXCLUSIVE_WAITERS;
973047dd67eSAttilio Rao 				} else {
974047dd67eSAttilio Rao 					MPASS(v & LK_SHARED_WAITERS);
975047dd67eSAttilio Rao 					queue = SQ_SHARED_QUEUE;
976047dd67eSAttilio Rao 					v &= ~LK_SHARED_WAITERS;
977047dd67eSAttilio Rao 				}
978047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
979047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
980047dd67eSAttilio Rao 					continue;
981047dd67eSAttilio Rao 				}
982047dd67eSAttilio Rao 				LOCK_LOG3(lk,
983047dd67eSAttilio Rao 				"%s: %p waking up all threads on the %s queue",
984047dd67eSAttilio Rao 				    __func__, lk, queue == SQ_SHARED_QUEUE ?
985047dd67eSAttilio Rao 				    "shared" : "exclusive");
986814f26daSJohn Baldwin 				wakeup_swapper |= sleepq_broadcast(
987da7bbd2cSJohn Baldwin 				    &lk->lock_object, SLEEPQ_LK, 0, queue);
988047dd67eSAttilio Rao 
989047dd67eSAttilio Rao 				/*
990047dd67eSAttilio Rao 				 * If shared waiters have been woken up we need
991047dd67eSAttilio Rao 				 * to wait for one of them to acquire the lock
992047dd67eSAttilio Rao 				 * before to set the exclusive waiters in
993047dd67eSAttilio Rao 				 * order to avoid a deadlock.
994047dd67eSAttilio Rao 				 */
995047dd67eSAttilio Rao 				if (queue == SQ_SHARED_QUEUE) {
996047dd67eSAttilio Rao 					for (v = lk->lk_lock;
997047dd67eSAttilio Rao 					    (v & LK_SHARE) && !LK_SHARERS(v);
998047dd67eSAttilio Rao 					    v = lk->lk_lock)
999047dd67eSAttilio Rao 						cpu_spinwait();
1000047dd67eSAttilio Rao 				}
1001047dd67eSAttilio Rao 			}
1002047dd67eSAttilio Rao 
1003047dd67eSAttilio Rao 			/*
1004047dd67eSAttilio Rao 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1005047dd67eSAttilio Rao 			 * fail, loop back and retry.
1006047dd67eSAttilio Rao 			 */
1007047dd67eSAttilio Rao 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1008047dd67eSAttilio Rao 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1009047dd67eSAttilio Rao 				    x | LK_EXCLUSIVE_WAITERS)) {
1010047dd67eSAttilio Rao 					sleepq_release(&lk->lock_object);
1011047dd67eSAttilio Rao 					continue;
1012047dd67eSAttilio Rao 				}
1013047dd67eSAttilio Rao 				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1014047dd67eSAttilio Rao 				    __func__, lk);
1015047dd67eSAttilio Rao 			}
1016047dd67eSAttilio Rao 
1017047dd67eSAttilio Rao 			/*
1018047dd67eSAttilio Rao 			 * As far as we have been unable to acquire the
1019047dd67eSAttilio Rao 			 * exclusive lock and the exclusive waiters flag
1020047dd67eSAttilio Rao 			 * is set, we will sleep.
1021047dd67eSAttilio Rao 			 */
1022047dd67eSAttilio Rao 			if (flags & LK_INTERLOCK) {
1023047dd67eSAttilio Rao 				class->lc_unlock(ilk);
1024047dd67eSAttilio Rao 				flags &= ~LK_INTERLOCK;
1025047dd67eSAttilio Rao 			}
1026e5f94314SAttilio Rao 			GIANT_SAVE();
1027047dd67eSAttilio Rao 			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1028047dd67eSAttilio Rao 			    SQ_EXCLUSIVE_QUEUE);
1029047dd67eSAttilio Rao 			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1030e5f94314SAttilio Rao 			GIANT_RESTORE();
1031047dd67eSAttilio Rao 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1032047dd67eSAttilio Rao 			    __func__, lk);
1033047dd67eSAttilio Rao 		}
1034047dd67eSAttilio Rao 
1035047dd67eSAttilio Rao 		if (error == 0) {
1036047dd67eSAttilio Rao 			lock_profile_obtain_lock_success(&lk->lock_object,
1037047dd67eSAttilio Rao 			    contested, waittime, file, line);
1038047dd67eSAttilio Rao 			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1039047dd67eSAttilio Rao 			    lk->lk_recurse, file, line);
1040e5f94314SAttilio Rao 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1041e5f94314SAttilio Rao 			    LK_TRYWIT(flags), file, line);
1042047dd67eSAttilio Rao 			TD_LOCKS_INC(curthread);
1043047dd67eSAttilio Rao 			STACK_SAVE(lk);
1044047dd67eSAttilio Rao 		}
1045047dd67eSAttilio Rao 		break;
1046047dd67eSAttilio Rao 	default:
1047047dd67eSAttilio Rao 		if (flags & LK_INTERLOCK)
1048047dd67eSAttilio Rao 			class->lc_unlock(ilk);
1049047dd67eSAttilio Rao 		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1050047dd67eSAttilio Rao 	}
1051047dd67eSAttilio Rao 
1052047dd67eSAttilio Rao 	if (flags & LK_INTERLOCK)
1053047dd67eSAttilio Rao 		class->lc_unlock(ilk);
1054da7bbd2cSJohn Baldwin 	if (wakeup_swapper)
1055da7bbd2cSJohn Baldwin 		kick_proc0();
1056047dd67eSAttilio Rao 
1057047dd67eSAttilio Rao 	return (error);
1058047dd67eSAttilio Rao }
1059047dd67eSAttilio Rao 
1060d7a7e179SAttilio Rao void
1061047dd67eSAttilio Rao _lockmgr_disown(struct lock *lk, const char *file, int line)
1062047dd67eSAttilio Rao {
1063047dd67eSAttilio Rao 	uintptr_t tid, x;
1064047dd67eSAttilio Rao 
1065047dd67eSAttilio Rao 	tid = (uintptr_t)curthread;
1066047dd67eSAttilio Rao 	_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
1067047dd67eSAttilio Rao 
1068047dd67eSAttilio Rao 	/*
106996f1567fSKonstantin Belousov 	 * If the owner is already LK_KERNPROC just skip the whole operation.
1070047dd67eSAttilio Rao 	 */
1071047dd67eSAttilio Rao 	if (LK_HOLDER(lk->lk_lock) != tid)
1072047dd67eSAttilio Rao 		return;
107304a28689SJeff Roberson 	lock_profile_release_lock(&lk->lock_object);
1074e5f94314SAttilio Rao 	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1075e5f94314SAttilio Rao 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1076e5f94314SAttilio Rao 	TD_LOCKS_DEC(curthread);
1077047dd67eSAttilio Rao 
1078047dd67eSAttilio Rao 	/*
1079047dd67eSAttilio Rao 	 * In order to preserve waiters flags, just spin.
1080047dd67eSAttilio Rao 	 */
1081047dd67eSAttilio Rao 	for (;;) {
1082651175c9SAttilio Rao 		x = lk->lk_lock;
1083651175c9SAttilio Rao 		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1084651175c9SAttilio Rao 		x &= LK_ALL_WAITERS;
108522dd228dSAttilio Rao 		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1086e5f94314SAttilio Rao 		    LK_KERNPROC | x))
1087047dd67eSAttilio Rao 			return;
1088047dd67eSAttilio Rao 		cpu_spinwait();
1089047dd67eSAttilio Rao 	}
1090047dd67eSAttilio Rao }
1091047dd67eSAttilio Rao 
1092047dd67eSAttilio Rao void
1093047dd67eSAttilio Rao lockmgr_printinfo(struct lock *lk)
1094d7a7e179SAttilio Rao {
1095d7a7e179SAttilio Rao 	struct thread *td;
1096047dd67eSAttilio Rao 	uintptr_t x;
1097d7a7e179SAttilio Rao 
1098047dd67eSAttilio Rao 	if (lk->lk_lock == LK_UNLOCKED)
1099047dd67eSAttilio Rao 		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1100047dd67eSAttilio Rao 	else if (lk->lk_lock & LK_SHARE)
1101047dd67eSAttilio Rao 		printf("lock type %s: SHARED (count %ju)\n",
1102047dd67eSAttilio Rao 		    lk->lock_object.lo_name,
1103047dd67eSAttilio Rao 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1104047dd67eSAttilio Rao 	else {
1105047dd67eSAttilio Rao 		td = lockmgr_xholder(lk);
1106047dd67eSAttilio Rao 		printf("lock type %s: EXCL by thread %p (pid %d)\n",
1107047dd67eSAttilio Rao 		    lk->lock_object.lo_name, td, td->td_proc->p_pid);
1108d7a7e179SAttilio Rao 	}
1109d7a7e179SAttilio Rao 
1110047dd67eSAttilio Rao 	x = lk->lk_lock;
1111047dd67eSAttilio Rao 	if (x & LK_EXCLUSIVE_WAITERS)
1112047dd67eSAttilio Rao 		printf(" with exclusive waiters pending\n");
1113047dd67eSAttilio Rao 	if (x & LK_SHARED_WAITERS)
1114047dd67eSAttilio Rao 		printf(" with shared waiters pending\n");
1115651175c9SAttilio Rao 	if (x & LK_EXCLUSIVE_SPINNERS)
1116651175c9SAttilio Rao 		printf(" with exclusive spinners pending\n");
1117047dd67eSAttilio Rao 
1118047dd67eSAttilio Rao 	STACK_PRINT(lk);
1119047dd67eSAttilio Rao }
1120047dd67eSAttilio Rao 
112199448ed1SJohn Dyson int
1122047dd67eSAttilio Rao lockstatus(struct lock *lk)
112399448ed1SJohn Dyson {
1124047dd67eSAttilio Rao 	uintptr_t v, x;
1125047dd67eSAttilio Rao 	int ret;
112699448ed1SJohn Dyson 
1127047dd67eSAttilio Rao 	ret = LK_SHARED;
1128047dd67eSAttilio Rao 	x = lk->lk_lock;
1129047dd67eSAttilio Rao 	v = LK_HOLDER(x);
11300e9eb108SAttilio Rao 
1131047dd67eSAttilio Rao 	if ((x & LK_SHARE) == 0) {
1132047dd67eSAttilio Rao 		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1133047dd67eSAttilio Rao 			ret = LK_EXCLUSIVE;
11346bdfe06aSEivind Eklund 		else
1135047dd67eSAttilio Rao 			ret = LK_EXCLOTHER;
1136047dd67eSAttilio Rao 	} else if (x == LK_UNLOCKED)
1137047dd67eSAttilio Rao 		ret = 0;
113899448ed1SJohn Dyson 
1139047dd67eSAttilio Rao 	return (ret);
114053bf4bb2SPeter Wemm }
1141be6847d7SJohn Baldwin 
114284887fa3SAttilio Rao #ifdef INVARIANT_SUPPORT
114384887fa3SAttilio Rao #ifndef INVARIANTS
114484887fa3SAttilio Rao #undef	_lockmgr_assert
114584887fa3SAttilio Rao #endif
114684887fa3SAttilio Rao 
114784887fa3SAttilio Rao void
1148047dd67eSAttilio Rao _lockmgr_assert(struct lock *lk, int what, const char *file, int line)
114984887fa3SAttilio Rao {
115084887fa3SAttilio Rao 	int slocked = 0;
115184887fa3SAttilio Rao 
115284887fa3SAttilio Rao 	if (panicstr != NULL)
115384887fa3SAttilio Rao 		return;
115484887fa3SAttilio Rao 	switch (what) {
115584887fa3SAttilio Rao 	case KA_SLOCKED:
115684887fa3SAttilio Rao 	case KA_SLOCKED | KA_NOTRECURSED:
115784887fa3SAttilio Rao 	case KA_SLOCKED | KA_RECURSED:
115884887fa3SAttilio Rao 		slocked = 1;
115984887fa3SAttilio Rao 	case KA_LOCKED:
116084887fa3SAttilio Rao 	case KA_LOCKED | KA_NOTRECURSED:
116184887fa3SAttilio Rao 	case KA_LOCKED | KA_RECURSED:
1162e5f94314SAttilio Rao #ifdef WITNESS
1163e5f94314SAttilio Rao 
1164e5f94314SAttilio Rao 		/*
1165e5f94314SAttilio Rao 		 * We cannot trust WITNESS if the lock is held in exclusive
1166e5f94314SAttilio Rao 		 * mode and a call to lockmgr_disown() happened.
1167e5f94314SAttilio Rao 		 * Workaround this skipping the check if the lock is held in
1168e5f94314SAttilio Rao 		 * exclusive mode even for the KA_LOCKED case.
1169e5f94314SAttilio Rao 		 */
1170e5f94314SAttilio Rao 		if (slocked || (lk->lk_lock & LK_SHARE)) {
1171e5f94314SAttilio Rao 			witness_assert(&lk->lock_object, what, file, line);
1172e5f94314SAttilio Rao 			break;
1173e5f94314SAttilio Rao 		}
1174e5f94314SAttilio Rao #endif
1175047dd67eSAttilio Rao 		if (lk->lk_lock == LK_UNLOCKED ||
1176047dd67eSAttilio Rao 		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1177047dd67eSAttilio Rao 		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
117884887fa3SAttilio Rao 			panic("Lock %s not %slocked @ %s:%d\n",
1179047dd67eSAttilio Rao 			    lk->lock_object.lo_name, slocked ? "share" : "",
118084887fa3SAttilio Rao 			    file, line);
1181047dd67eSAttilio Rao 
1182047dd67eSAttilio Rao 		if ((lk->lk_lock & LK_SHARE) == 0) {
1183047dd67eSAttilio Rao 			if (lockmgr_recursed(lk)) {
118484887fa3SAttilio Rao 				if (what & KA_NOTRECURSED)
118584887fa3SAttilio Rao 					panic("Lock %s recursed @ %s:%d\n",
1186047dd67eSAttilio Rao 					    lk->lock_object.lo_name, file,
1187047dd67eSAttilio Rao 					    line);
118884887fa3SAttilio Rao 			} else if (what & KA_RECURSED)
118984887fa3SAttilio Rao 				panic("Lock %s not recursed @ %s:%d\n",
1190047dd67eSAttilio Rao 				    lk->lock_object.lo_name, file, line);
119184887fa3SAttilio Rao 		}
119284887fa3SAttilio Rao 		break;
119384887fa3SAttilio Rao 	case KA_XLOCKED:
119484887fa3SAttilio Rao 	case KA_XLOCKED | KA_NOTRECURSED:
119584887fa3SAttilio Rao 	case KA_XLOCKED | KA_RECURSED:
1196047dd67eSAttilio Rao 		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
119784887fa3SAttilio Rao 			panic("Lock %s not exclusively locked @ %s:%d\n",
1198047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
1199047dd67eSAttilio Rao 		if (lockmgr_recursed(lk)) {
120084887fa3SAttilio Rao 			if (what & KA_NOTRECURSED)
120184887fa3SAttilio Rao 				panic("Lock %s recursed @ %s:%d\n",
1202047dd67eSAttilio Rao 				    lk->lock_object.lo_name, file, line);
120384887fa3SAttilio Rao 		} else if (what & KA_RECURSED)
120484887fa3SAttilio Rao 			panic("Lock %s not recursed @ %s:%d\n",
1205047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
120684887fa3SAttilio Rao 		break;
120784887fa3SAttilio Rao 	case KA_UNLOCKED:
1208047dd67eSAttilio Rao 		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
120984887fa3SAttilio Rao 			panic("Lock %s exclusively locked @ %s:%d\n",
1210047dd67eSAttilio Rao 			    lk->lock_object.lo_name, file, line);
121184887fa3SAttilio Rao 		break;
121284887fa3SAttilio Rao 	default:
1213047dd67eSAttilio Rao 		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1214047dd67eSAttilio Rao 		    line);
121584887fa3SAttilio Rao 	}
121684887fa3SAttilio Rao }
1217047dd67eSAttilio Rao #endif
121884887fa3SAttilio Rao 
1219be6847d7SJohn Baldwin #ifdef DDB
1220462a7addSJohn Baldwin int
1221462a7addSJohn Baldwin lockmgr_chain(struct thread *td, struct thread **ownerp)
1222462a7addSJohn Baldwin {
1223047dd67eSAttilio Rao 	struct lock *lk;
1224462a7addSJohn Baldwin 
1225047dd67eSAttilio Rao 	lk = td->td_wchan;
1226462a7addSJohn Baldwin 
1227047dd67eSAttilio Rao 	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1228462a7addSJohn Baldwin 		return (0);
1229047dd67eSAttilio Rao 	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1230047dd67eSAttilio Rao 	if (lk->lk_lock & LK_SHARE)
1231047dd67eSAttilio Rao 		db_printf("SHARED (count %ju)\n",
1232047dd67eSAttilio Rao 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1233047dd67eSAttilio Rao 	else
1234047dd67eSAttilio Rao 		db_printf("EXCL\n");
1235047dd67eSAttilio Rao 	*ownerp = lockmgr_xholder(lk);
1236462a7addSJohn Baldwin 
1237462a7addSJohn Baldwin 	return (1);
1238462a7addSJohn Baldwin }
1239462a7addSJohn Baldwin 
1240047dd67eSAttilio Rao static void
124161bd5e21SKip Macy db_show_lockmgr(struct lock_object *lock)
1242be6847d7SJohn Baldwin {
1243be6847d7SJohn Baldwin 	struct thread *td;
1244047dd67eSAttilio Rao 	struct lock *lk;
1245be6847d7SJohn Baldwin 
1246047dd67eSAttilio Rao 	lk = (struct lock *)lock;
1247be6847d7SJohn Baldwin 
1248be6847d7SJohn Baldwin 	db_printf(" state: ");
1249047dd67eSAttilio Rao 	if (lk->lk_lock == LK_UNLOCKED)
1250be6847d7SJohn Baldwin 		db_printf("UNLOCKED\n");
1251047dd67eSAttilio Rao 	else if (lk->lk_lock & LK_SHARE)
1252047dd67eSAttilio Rao 		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1253047dd67eSAttilio Rao 	else {
1254047dd67eSAttilio Rao 		td = lockmgr_xholder(lk);
1255047dd67eSAttilio Rao 		if (td == (struct thread *)LK_KERNPROC)
1256047dd67eSAttilio Rao 			db_printf("XLOCK: LK_KERNPROC\n");
1257047dd67eSAttilio Rao 		else
1258047dd67eSAttilio Rao 			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1259047dd67eSAttilio Rao 			    td->td_tid, td->td_proc->p_pid,
1260047dd67eSAttilio Rao 			    td->td_proc->p_comm);
1261047dd67eSAttilio Rao 		if (lockmgr_recursed(lk))
1262047dd67eSAttilio Rao 			db_printf(" recursed: %d\n", lk->lk_recurse);
1263047dd67eSAttilio Rao 	}
1264047dd67eSAttilio Rao 	db_printf(" waiters: ");
1265047dd67eSAttilio Rao 	switch (lk->lk_lock & LK_ALL_WAITERS) {
1266047dd67eSAttilio Rao 	case LK_SHARED_WAITERS:
1267047dd67eSAttilio Rao 		db_printf("shared\n");
1268e5023dd9SEdward Tomasz Napierala 		break;
1269047dd67eSAttilio Rao 	case LK_EXCLUSIVE_WAITERS:
1270047dd67eSAttilio Rao 		db_printf("exclusive\n");
1271047dd67eSAttilio Rao 		break;
1272047dd67eSAttilio Rao 	case LK_ALL_WAITERS:
1273047dd67eSAttilio Rao 		db_printf("shared and exclusive\n");
1274047dd67eSAttilio Rao 		break;
1275047dd67eSAttilio Rao 	default:
1276047dd67eSAttilio Rao 		db_printf("none\n");
1277047dd67eSAttilio Rao 	}
1278651175c9SAttilio Rao 	db_printf(" spinners: ");
1279651175c9SAttilio Rao 	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1280651175c9SAttilio Rao 		db_printf("exclusive\n");
1281651175c9SAttilio Rao 	else
1282651175c9SAttilio Rao 		db_printf("none\n");
1283be6847d7SJohn Baldwin }
1284be6847d7SJohn Baldwin #endif
1285