xref: /freebsd/sys/kern/kern_lock.c (revision 8d20be1e22095c27faf8fe8b2f0d089739cc742e)
1 /*-
2  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  */
28 
29 #include "opt_adaptive_lockmgrs.h"
30 #include "opt_ddb.h"
31 #include "opt_hwpmc_hooks.h"
32 #include "opt_kdtrace.h"
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/kdb.h>
39 #include <sys/ktr.h>
40 #include <sys/lock.h>
41 #include <sys/lock_profile.h>
42 #include <sys/lockmgr.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/sleepqueue.h>
46 #ifdef DEBUG_LOCKS
47 #include <sys/stack.h>
48 #endif
49 #include <sys/sysctl.h>
50 #include <sys/systm.h>
51 
52 #include <machine/cpu.h>
53 
54 #ifdef DDB
55 #include <ddb/ddb.h>
56 #endif
57 
58 #ifdef HWPMC_HOOKS
59 #include <sys/pmckern.h>
60 PMC_SOFT_DECLARE( , , lock, failed);
61 #endif
62 
63 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
64     (LK_ADAPTIVE | LK_NOSHARE));
65 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
66     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
67 
68 #define	SQ_EXCLUSIVE_QUEUE	0
69 #define	SQ_SHARED_QUEUE		1
70 
71 #ifndef INVARIANTS
72 #define	_lockmgr_assert(lk, what, file, line)
73 #define	TD_LOCKS_INC(td)
74 #define	TD_LOCKS_DEC(td)
75 #else
76 #define	TD_LOCKS_INC(td)	((td)->td_locks++)
77 #define	TD_LOCKS_DEC(td)	((td)->td_locks--)
78 #endif
79 #define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
80 #define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
81 
82 #ifndef DEBUG_LOCKS
83 #define	STACK_PRINT(lk)
84 #define	STACK_SAVE(lk)
85 #define	STACK_ZERO(lk)
86 #else
87 #define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
88 #define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
89 #define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
90 #endif
91 
92 #define	LOCK_LOG2(lk, string, arg1, arg2)				\
93 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
94 		CTR2(KTR_LOCK, (string), (arg1), (arg2))
95 #define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
96 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
97 		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
98 
99 #define	GIANT_DECLARE							\
100 	int _i = 0;							\
101 	WITNESS_SAVE_DECL(Giant)
102 #define	GIANT_RESTORE() do {						\
103 	if (_i > 0) {							\
104 		while (_i--)						\
105 			mtx_lock(&Giant);				\
106 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
107 	}								\
108 } while (0)
109 #define	GIANT_SAVE() do {						\
110 	if (mtx_owned(&Giant)) {					\
111 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
112 		while (mtx_owned(&Giant)) {				\
113 			_i++;						\
114 			mtx_unlock(&Giant);				\
115 		}							\
116 	}								\
117 } while (0)
118 
119 #define	LK_CAN_SHARE(x)							\
120 	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
121 	((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||				\
122 	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
123 #define	LK_TRYOP(x)							\
124 	((x) & LK_NOWAIT)
125 
126 #define	LK_CAN_WITNESS(x)						\
127 	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
128 #define	LK_TRYWIT(x)							\
129 	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
130 
131 #define	LK_CAN_ADAPT(lk, f)						\
132 	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
133 	((f) & LK_SLEEPFAIL) == 0)
134 
135 #define	lockmgr_disowned(lk)						\
136 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
137 
138 #define	lockmgr_xlocked(lk)						\
139 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
140 
141 static void	assert_lockmgr(const struct lock_object *lock, int how);
142 #ifdef DDB
143 static void	db_show_lockmgr(const struct lock_object *lock);
144 #endif
145 static void	lock_lockmgr(struct lock_object *lock, uintptr_t how);
146 #ifdef KDTRACE_HOOKS
147 static int	owner_lockmgr(const struct lock_object *lock,
148 		    struct thread **owner);
149 #endif
150 static uintptr_t unlock_lockmgr(struct lock_object *lock);
151 
152 struct lock_class lock_class_lockmgr = {
153 	.lc_name = "lockmgr",
154 	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
155 	.lc_assert = assert_lockmgr,
156 #ifdef DDB
157 	.lc_ddb_show = db_show_lockmgr,
158 #endif
159 	.lc_lock = lock_lockmgr,
160 	.lc_unlock = unlock_lockmgr,
161 #ifdef KDTRACE_HOOKS
162 	.lc_owner = owner_lockmgr,
163 #endif
164 };
165 
166 #ifdef ADAPTIVE_LOCKMGRS
167 static u_int alk_retries = 10;
168 static u_int alk_loops = 10000;
169 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
170     "lockmgr debugging");
171 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
172 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
173 #endif
174 
175 static __inline struct thread *
176 lockmgr_xholder(const struct lock *lk)
177 {
178 	uintptr_t x;
179 
180 	x = lk->lk_lock;
181 	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
182 }
183 
184 /*
185  * It assumes sleepq_lock held and returns with this one unheld.
186  * It also assumes the generic interlock is sane and previously checked.
187  * If LK_INTERLOCK is specified the interlock is not reacquired after the
188  * sleep.
189  */
190 static __inline int
191 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
192     const char *wmesg, int pri, int timo, int queue)
193 {
194 	GIANT_DECLARE;
195 	struct lock_class *class;
196 	int catch, error;
197 
198 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
199 	catch = pri & PCATCH;
200 	pri &= PRIMASK;
201 	error = 0;
202 
203 	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
204 	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
205 
206 	if (flags & LK_INTERLOCK)
207 		class->lc_unlock(ilk);
208 	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
209 		lk->lk_exslpfail++;
210 	GIANT_SAVE();
211 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
212 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
213 	if ((flags & LK_TIMELOCK) && timo)
214 		sleepq_set_timeout(&lk->lock_object, timo);
215 
216 	/*
217 	 * Decisional switch for real sleeping.
218 	 */
219 	if ((flags & LK_TIMELOCK) && timo && catch)
220 		error = sleepq_timedwait_sig(&lk->lock_object, pri);
221 	else if ((flags & LK_TIMELOCK) && timo)
222 		error = sleepq_timedwait(&lk->lock_object, pri);
223 	else if (catch)
224 		error = sleepq_wait_sig(&lk->lock_object, pri);
225 	else
226 		sleepq_wait(&lk->lock_object, pri);
227 	GIANT_RESTORE();
228 	if ((flags & LK_SLEEPFAIL) && error == 0)
229 		error = ENOLCK;
230 
231 	return (error);
232 }
233 
234 static __inline int
235 wakeupshlk(struct lock *lk, const char *file, int line)
236 {
237 	uintptr_t v, x;
238 	u_int realexslp;
239 	int queue, wakeup_swapper;
240 
241 	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
242 	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
243 
244 	wakeup_swapper = 0;
245 	for (;;) {
246 		x = lk->lk_lock;
247 
248 		/*
249 		 * If there is more than one shared lock held, just drop one
250 		 * and return.
251 		 */
252 		if (LK_SHARERS(x) > 1) {
253 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
254 			    x - LK_ONE_SHARER))
255 				break;
256 			continue;
257 		}
258 
259 		/*
260 		 * If there are not waiters on the exclusive queue, drop the
261 		 * lock quickly.
262 		 */
263 		if ((x & LK_ALL_WAITERS) == 0) {
264 			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
265 			    LK_SHARERS_LOCK(1));
266 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
267 				break;
268 			continue;
269 		}
270 
271 		/*
272 		 * We should have a sharer with waiters, so enter the hard
273 		 * path in order to handle wakeups correctly.
274 		 */
275 		sleepq_lock(&lk->lock_object);
276 		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
277 		v = LK_UNLOCKED;
278 
279 		/*
280 		 * If the lock has exclusive waiters, give them preference in
281 		 * order to avoid deadlock with shared runners up.
282 		 * If interruptible sleeps left the exclusive queue empty
283 		 * avoid a starvation for the threads sleeping on the shared
284 		 * queue by giving them precedence and cleaning up the
285 		 * exclusive waiters bit anyway.
286 		 * Please note that lk_exslpfail count may be lying about
287 		 * the real number of waiters with the LK_SLEEPFAIL flag on
288 		 * because they may be used in conjuction with interruptible
289 		 * sleeps so lk_exslpfail might be considered an 'upper limit'
290 		 * bound, including the edge cases.
291 		 */
292 		realexslp = sleepq_sleepcnt(&lk->lock_object,
293 		    SQ_EXCLUSIVE_QUEUE);
294 		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
295 			if (lk->lk_exslpfail < realexslp) {
296 				lk->lk_exslpfail = 0;
297 				queue = SQ_EXCLUSIVE_QUEUE;
298 				v |= (x & LK_SHARED_WAITERS);
299 			} else {
300 				lk->lk_exslpfail = 0;
301 				LOCK_LOG2(lk,
302 				    "%s: %p has only LK_SLEEPFAIL sleepers",
303 				    __func__, lk);
304 				LOCK_LOG2(lk,
305 			    "%s: %p waking up threads on the exclusive queue",
306 				    __func__, lk);
307 				wakeup_swapper =
308 				    sleepq_broadcast(&lk->lock_object,
309 				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
310 				queue = SQ_SHARED_QUEUE;
311 			}
312 
313 		} else {
314 
315 			/*
316 			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
317 			 * and using interruptible sleeps/timeout may have
318 			 * left spourious lk_exslpfail counts on, so clean
319 			 * it up anyway.
320 			 */
321 			lk->lk_exslpfail = 0;
322 			queue = SQ_SHARED_QUEUE;
323 		}
324 
325 		if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
326 		    v)) {
327 			sleepq_release(&lk->lock_object);
328 			continue;
329 		}
330 		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
331 		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
332 		    "exclusive");
333 		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
334 		    0, queue);
335 		sleepq_release(&lk->lock_object);
336 		break;
337 	}
338 
339 	lock_profile_release_lock(&lk->lock_object);
340 	TD_LOCKS_DEC(curthread);
341 	TD_SLOCKS_DEC(curthread);
342 	return (wakeup_swapper);
343 }
344 
345 static void
346 assert_lockmgr(const struct lock_object *lock, int what)
347 {
348 
349 	panic("lockmgr locks do not support assertions");
350 }
351 
352 static void
353 lock_lockmgr(struct lock_object *lock, uintptr_t how)
354 {
355 
356 	panic("lockmgr locks do not support sleep interlocking");
357 }
358 
359 static uintptr_t
360 unlock_lockmgr(struct lock_object *lock)
361 {
362 
363 	panic("lockmgr locks do not support sleep interlocking");
364 }
365 
366 #ifdef KDTRACE_HOOKS
367 static int
368 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
369 {
370 
371 	panic("lockmgr locks do not support owner inquiring");
372 }
373 #endif
374 
375 void
376 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
377 {
378 	int iflags;
379 
380 	MPASS((flags & ~LK_INIT_MASK) == 0);
381 	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
382             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
383             &lk->lk_lock));
384 
385 	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
386 	if (flags & LK_CANRECURSE)
387 		iflags |= LO_RECURSABLE;
388 	if ((flags & LK_NODUP) == 0)
389 		iflags |= LO_DUPOK;
390 	if (flags & LK_NOPROFILE)
391 		iflags |= LO_NOPROFILE;
392 	if ((flags & LK_NOWITNESS) == 0)
393 		iflags |= LO_WITNESS;
394 	if (flags & LK_QUIET)
395 		iflags |= LO_QUIET;
396 	if (flags & LK_IS_VNODE)
397 		iflags |= LO_IS_VNODE;
398 	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
399 
400 	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
401 	lk->lk_lock = LK_UNLOCKED;
402 	lk->lk_recurse = 0;
403 	lk->lk_exslpfail = 0;
404 	lk->lk_timo = timo;
405 	lk->lk_pri = pri;
406 	STACK_ZERO(lk);
407 }
408 
409 /*
410  * XXX: Gross hacks to manipulate external lock flags after
411  * initialization.  Used for certain vnode and buf locks.
412  */
413 void
414 lockallowshare(struct lock *lk)
415 {
416 
417 	lockmgr_assert(lk, KA_XLOCKED);
418 	lk->lock_object.lo_flags &= ~LK_NOSHARE;
419 }
420 
421 void
422 lockallowrecurse(struct lock *lk)
423 {
424 
425 	lockmgr_assert(lk, KA_XLOCKED);
426 	lk->lock_object.lo_flags |= LO_RECURSABLE;
427 }
428 
429 void
430 lockdisablerecurse(struct lock *lk)
431 {
432 
433 	lockmgr_assert(lk, KA_XLOCKED);
434 	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
435 }
436 
437 void
438 lockdestroy(struct lock *lk)
439 {
440 
441 	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
442 	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
443 	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
444 	lock_destroy(&lk->lock_object);
445 }
446 
447 int
448 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
449     const char *wmesg, int pri, int timo, const char *file, int line)
450 {
451 	GIANT_DECLARE;
452 	struct lock_class *class;
453 	const char *iwmesg;
454 	uintptr_t tid, v, x;
455 	u_int op, realexslp;
456 	int error, ipri, itimo, queue, wakeup_swapper;
457 #ifdef LOCK_PROFILING
458 	uint64_t waittime = 0;
459 	int contested = 0;
460 #endif
461 #ifdef ADAPTIVE_LOCKMGRS
462 	volatile struct thread *owner;
463 	u_int i, spintries = 0;
464 #endif
465 
466 	error = 0;
467 	tid = (uintptr_t)curthread;
468 	op = (flags & LK_TYPE_MASK);
469 	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
470 	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
471 	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
472 
473 	MPASS((flags & ~LK_TOTAL_MASK) == 0);
474 	KASSERT((op & (op - 1)) == 0,
475 	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
476 	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
477 	    (op != LK_DOWNGRADE && op != LK_RELEASE),
478 	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
479 	    __func__, file, line));
480 	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
481 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
482 	    __func__, file, line));
483 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
484 	    ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
485 	    lk->lock_object.lo_name, file, line));
486 
487 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
488 	if (panicstr != NULL) {
489 		if (flags & LK_INTERLOCK)
490 			class->lc_unlock(ilk);
491 		return (0);
492 	}
493 
494 	if (lk->lock_object.lo_flags & LK_NOSHARE) {
495 		switch (op) {
496 		case LK_SHARED:
497 			op = LK_EXCLUSIVE;
498 			break;
499 		case LK_UPGRADE:
500 		case LK_TRYUPGRADE:
501 		case LK_DOWNGRADE:
502 			_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
503 			    file, line);
504 			if (flags & LK_INTERLOCK)
505 				class->lc_unlock(ilk);
506 			return (0);
507 		}
508 	}
509 
510 	wakeup_swapper = 0;
511 	switch (op) {
512 	case LK_SHARED:
513 		if (LK_CAN_WITNESS(flags))
514 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
515 			    file, line, flags & LK_INTERLOCK ? ilk : NULL);
516 		for (;;) {
517 			x = lk->lk_lock;
518 
519 			/*
520 			 * If no other thread has an exclusive lock, or
521 			 * no exclusive waiter is present, bump the count of
522 			 * sharers.  Since we have to preserve the state of
523 			 * waiters, if we fail to acquire the shared lock
524 			 * loop back and retry.
525 			 */
526 			if (LK_CAN_SHARE(x)) {
527 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
528 				    x + LK_ONE_SHARER))
529 					break;
530 				continue;
531 			}
532 #ifdef HWPMC_HOOKS
533 			PMC_SOFT_CALL( , , lock, failed);
534 #endif
535 			lock_profile_obtain_lock_failed(&lk->lock_object,
536 			    &contested, &waittime);
537 
538 			/*
539 			 * If the lock is already held by curthread in
540 			 * exclusive way avoid a deadlock.
541 			 */
542 			if (LK_HOLDER(x) == tid) {
543 				LOCK_LOG2(lk,
544 				    "%s: %p already held in exclusive mode",
545 				    __func__, lk);
546 				error = EDEADLK;
547 				break;
548 			}
549 
550 			/*
551 			 * If the lock is expected to not sleep just give up
552 			 * and return.
553 			 */
554 			if (LK_TRYOP(flags)) {
555 				LOCK_LOG2(lk, "%s: %p fails the try operation",
556 				    __func__, lk);
557 				error = EBUSY;
558 				break;
559 			}
560 
561 #ifdef ADAPTIVE_LOCKMGRS
562 			/*
563 			 * If the owner is running on another CPU, spin until
564 			 * the owner stops running or the state of the lock
565 			 * changes.  We need a double-state handle here
566 			 * because for a failed acquisition the lock can be
567 			 * either held in exclusive mode or shared mode
568 			 * (for the writer starvation avoidance technique).
569 			 */
570 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
571 			    LK_HOLDER(x) != LK_KERNPROC) {
572 				owner = (struct thread *)LK_HOLDER(x);
573 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
574 					CTR3(KTR_LOCK,
575 					    "%s: spinning on %p held by %p",
576 					    __func__, lk, owner);
577 
578 				/*
579 				 * If we are holding also an interlock drop it
580 				 * in order to avoid a deadlock if the lockmgr
581 				 * owner is adaptively spinning on the
582 				 * interlock itself.
583 				 */
584 				if (flags & LK_INTERLOCK) {
585 					class->lc_unlock(ilk);
586 					flags &= ~LK_INTERLOCK;
587 				}
588 				GIANT_SAVE();
589 				while (LK_HOLDER(lk->lk_lock) ==
590 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
591 					cpu_spinwait();
592 				GIANT_RESTORE();
593 				continue;
594 			} else if (LK_CAN_ADAPT(lk, flags) &&
595 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
596 			    spintries < alk_retries) {
597 				if (flags & LK_INTERLOCK) {
598 					class->lc_unlock(ilk);
599 					flags &= ~LK_INTERLOCK;
600 				}
601 				GIANT_SAVE();
602 				spintries++;
603 				for (i = 0; i < alk_loops; i++) {
604 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
605 						CTR4(KTR_LOCK,
606 				    "%s: shared spinning on %p with %u and %u",
607 						    __func__, lk, spintries, i);
608 					x = lk->lk_lock;
609 					if ((x & LK_SHARE) == 0 ||
610 					    LK_CAN_SHARE(x) != 0)
611 						break;
612 					cpu_spinwait();
613 				}
614 				GIANT_RESTORE();
615 				if (i != alk_loops)
616 					continue;
617 			}
618 #endif
619 
620 			/*
621 			 * Acquire the sleepqueue chain lock because we
622 			 * probabilly will need to manipulate waiters flags.
623 			 */
624 			sleepq_lock(&lk->lock_object);
625 			x = lk->lk_lock;
626 
627 			/*
628 			 * if the lock can be acquired in shared mode, try
629 			 * again.
630 			 */
631 			if (LK_CAN_SHARE(x)) {
632 				sleepq_release(&lk->lock_object);
633 				continue;
634 			}
635 
636 #ifdef ADAPTIVE_LOCKMGRS
637 			/*
638 			 * The current lock owner might have started executing
639 			 * on another CPU (or the lock could have changed
640 			 * owner) while we were waiting on the turnstile
641 			 * chain lock.  If so, drop the turnstile lock and try
642 			 * again.
643 			 */
644 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
645 			    LK_HOLDER(x) != LK_KERNPROC) {
646 				owner = (struct thread *)LK_HOLDER(x);
647 				if (TD_IS_RUNNING(owner)) {
648 					sleepq_release(&lk->lock_object);
649 					continue;
650 				}
651 			}
652 #endif
653 
654 			/*
655 			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
656 			 * loop back and retry.
657 			 */
658 			if ((x & LK_SHARED_WAITERS) == 0) {
659 				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
660 				    x | LK_SHARED_WAITERS)) {
661 					sleepq_release(&lk->lock_object);
662 					continue;
663 				}
664 				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
665 				    __func__, lk);
666 			}
667 
668 			/*
669 			 * As far as we have been unable to acquire the
670 			 * shared lock and the shared waiters flag is set,
671 			 * we will sleep.
672 			 */
673 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
674 			    SQ_SHARED_QUEUE);
675 			flags &= ~LK_INTERLOCK;
676 			if (error) {
677 				LOCK_LOG3(lk,
678 				    "%s: interrupted sleep for %p with %d",
679 				    __func__, lk, error);
680 				break;
681 			}
682 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
683 			    __func__, lk);
684 		}
685 		if (error == 0) {
686 			lock_profile_obtain_lock_success(&lk->lock_object,
687 			    contested, waittime, file, line);
688 			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
689 			    line);
690 			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
691 			    line);
692 			TD_LOCKS_INC(curthread);
693 			TD_SLOCKS_INC(curthread);
694 			STACK_SAVE(lk);
695 		}
696 		break;
697 	case LK_UPGRADE:
698 	case LK_TRYUPGRADE:
699 		_lockmgr_assert(lk, KA_SLOCKED, file, line);
700 		v = lk->lk_lock;
701 		x = v & LK_ALL_WAITERS;
702 		v &= LK_EXCLUSIVE_SPINNERS;
703 
704 		/*
705 		 * Try to switch from one shared lock to an exclusive one.
706 		 * We need to preserve waiters flags during the operation.
707 		 */
708 		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
709 		    tid | x)) {
710 			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
711 			    line);
712 			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
713 			    LK_TRYWIT(flags), file, line);
714 			TD_SLOCKS_DEC(curthread);
715 			break;
716 		}
717 
718 		/*
719 		 * In LK_TRYUPGRADE mode, do not drop the lock,
720 		 * returning EBUSY instead.
721 		 */
722 		if (op == LK_TRYUPGRADE) {
723 			LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
724 			    __func__, lk);
725 			error = EBUSY;
726 			break;
727 		}
728 
729 		/*
730 		 * We have been unable to succeed in upgrading, so just
731 		 * give up the shared lock.
732 		 */
733 		wakeup_swapper |= wakeupshlk(lk, file, line);
734 
735 		/* FALLTHROUGH */
736 	case LK_EXCLUSIVE:
737 		if (LK_CAN_WITNESS(flags))
738 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
739 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
740 			    ilk : NULL);
741 
742 		/*
743 		 * If curthread already holds the lock and this one is
744 		 * allowed to recurse, simply recurse on it.
745 		 */
746 		if (lockmgr_xlocked(lk)) {
747 			if ((flags & LK_CANRECURSE) == 0 &&
748 			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
749 
750 				/*
751 				 * If the lock is expected to not panic just
752 				 * give up and return.
753 				 */
754 				if (LK_TRYOP(flags)) {
755 					LOCK_LOG2(lk,
756 					    "%s: %p fails the try operation",
757 					    __func__, lk);
758 					error = EBUSY;
759 					break;
760 				}
761 				if (flags & LK_INTERLOCK)
762 					class->lc_unlock(ilk);
763 		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
764 				    __func__, iwmesg, file, line);
765 			}
766 			lk->lk_recurse++;
767 			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
768 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
769 			    lk->lk_recurse, file, line);
770 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
771 			    LK_TRYWIT(flags), file, line);
772 			TD_LOCKS_INC(curthread);
773 			break;
774 		}
775 
776 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
777 		    tid)) {
778 #ifdef HWPMC_HOOKS
779 			PMC_SOFT_CALL( , , lock, failed);
780 #endif
781 			lock_profile_obtain_lock_failed(&lk->lock_object,
782 			    &contested, &waittime);
783 
784 			/*
785 			 * If the lock is expected to not sleep just give up
786 			 * and return.
787 			 */
788 			if (LK_TRYOP(flags)) {
789 				LOCK_LOG2(lk, "%s: %p fails the try operation",
790 				    __func__, lk);
791 				error = EBUSY;
792 				break;
793 			}
794 
795 #ifdef ADAPTIVE_LOCKMGRS
796 			/*
797 			 * If the owner is running on another CPU, spin until
798 			 * the owner stops running or the state of the lock
799 			 * changes.
800 			 */
801 			x = lk->lk_lock;
802 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
803 			    LK_HOLDER(x) != LK_KERNPROC) {
804 				owner = (struct thread *)LK_HOLDER(x);
805 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
806 					CTR3(KTR_LOCK,
807 					    "%s: spinning on %p held by %p",
808 					    __func__, lk, owner);
809 
810 				/*
811 				 * If we are holding also an interlock drop it
812 				 * in order to avoid a deadlock if the lockmgr
813 				 * owner is adaptively spinning on the
814 				 * interlock itself.
815 				 */
816 				if (flags & LK_INTERLOCK) {
817 					class->lc_unlock(ilk);
818 					flags &= ~LK_INTERLOCK;
819 				}
820 				GIANT_SAVE();
821 				while (LK_HOLDER(lk->lk_lock) ==
822 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
823 					cpu_spinwait();
824 				GIANT_RESTORE();
825 				continue;
826 			} else if (LK_CAN_ADAPT(lk, flags) &&
827 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
828 			    spintries < alk_retries) {
829 				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
830 				    !atomic_cmpset_ptr(&lk->lk_lock, x,
831 				    x | LK_EXCLUSIVE_SPINNERS))
832 					continue;
833 				if (flags & LK_INTERLOCK) {
834 					class->lc_unlock(ilk);
835 					flags &= ~LK_INTERLOCK;
836 				}
837 				GIANT_SAVE();
838 				spintries++;
839 				for (i = 0; i < alk_loops; i++) {
840 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
841 						CTR4(KTR_LOCK,
842 				    "%s: shared spinning on %p with %u and %u",
843 						    __func__, lk, spintries, i);
844 					if ((lk->lk_lock &
845 					    LK_EXCLUSIVE_SPINNERS) == 0)
846 						break;
847 					cpu_spinwait();
848 				}
849 				GIANT_RESTORE();
850 				if (i != alk_loops)
851 					continue;
852 			}
853 #endif
854 
855 			/*
856 			 * Acquire the sleepqueue chain lock because we
857 			 * probabilly will need to manipulate waiters flags.
858 			 */
859 			sleepq_lock(&lk->lock_object);
860 			x = lk->lk_lock;
861 
862 			/*
863 			 * if the lock has been released while we spun on
864 			 * the sleepqueue chain lock just try again.
865 			 */
866 			if (x == LK_UNLOCKED) {
867 				sleepq_release(&lk->lock_object);
868 				continue;
869 			}
870 
871 #ifdef ADAPTIVE_LOCKMGRS
872 			/*
873 			 * The current lock owner might have started executing
874 			 * on another CPU (or the lock could have changed
875 			 * owner) while we were waiting on the turnstile
876 			 * chain lock.  If so, drop the turnstile lock and try
877 			 * again.
878 			 */
879 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
880 			    LK_HOLDER(x) != LK_KERNPROC) {
881 				owner = (struct thread *)LK_HOLDER(x);
882 				if (TD_IS_RUNNING(owner)) {
883 					sleepq_release(&lk->lock_object);
884 					continue;
885 				}
886 			}
887 #endif
888 
889 			/*
890 			 * The lock can be in the state where there is a
891 			 * pending queue of waiters, but still no owner.
892 			 * This happens when the lock is contested and an
893 			 * owner is going to claim the lock.
894 			 * If curthread is the one successfully acquiring it
895 			 * claim lock ownership and return, preserving waiters
896 			 * flags.
897 			 */
898 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
899 			if ((x & ~v) == LK_UNLOCKED) {
900 				v &= ~LK_EXCLUSIVE_SPINNERS;
901 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
902 				    tid | v)) {
903 					sleepq_release(&lk->lock_object);
904 					LOCK_LOG2(lk,
905 					    "%s: %p claimed by a new writer",
906 					    __func__, lk);
907 					break;
908 				}
909 				sleepq_release(&lk->lock_object);
910 				continue;
911 			}
912 
913 			/*
914 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
915 			 * fail, loop back and retry.
916 			 */
917 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
918 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
919 				    x | LK_EXCLUSIVE_WAITERS)) {
920 					sleepq_release(&lk->lock_object);
921 					continue;
922 				}
923 				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
924 				    __func__, lk);
925 			}
926 
927 			/*
928 			 * As far as we have been unable to acquire the
929 			 * exclusive lock and the exclusive waiters flag
930 			 * is set, we will sleep.
931 			 */
932 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
933 			    SQ_EXCLUSIVE_QUEUE);
934 			flags &= ~LK_INTERLOCK;
935 			if (error) {
936 				LOCK_LOG3(lk,
937 				    "%s: interrupted sleep for %p with %d",
938 				    __func__, lk, error);
939 				break;
940 			}
941 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
942 			    __func__, lk);
943 		}
944 		if (error == 0) {
945 			lock_profile_obtain_lock_success(&lk->lock_object,
946 			    contested, waittime, file, line);
947 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
948 			    lk->lk_recurse, file, line);
949 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
950 			    LK_TRYWIT(flags), file, line);
951 			TD_LOCKS_INC(curthread);
952 			STACK_SAVE(lk);
953 		}
954 		break;
955 	case LK_DOWNGRADE:
956 		_lockmgr_assert(lk, KA_XLOCKED, file, line);
957 		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
958 		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
959 
960 		/*
961 		 * Panic if the lock is recursed.
962 		 */
963 		if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
964 			if (flags & LK_INTERLOCK)
965 				class->lc_unlock(ilk);
966 			panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
967 			    __func__, iwmesg, file, line);
968 		}
969 		TD_SLOCKS_INC(curthread);
970 
971 		/*
972 		 * In order to preserve waiters flags, just spin.
973 		 */
974 		for (;;) {
975 			x = lk->lk_lock;
976 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
977 			x &= LK_ALL_WAITERS;
978 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
979 			    LK_SHARERS_LOCK(1) | x))
980 				break;
981 			cpu_spinwait();
982 		}
983 		break;
984 	case LK_RELEASE:
985 		_lockmgr_assert(lk, KA_LOCKED, file, line);
986 		x = lk->lk_lock;
987 
988 		if ((x & LK_SHARE) == 0) {
989 
990 			/*
991 			 * As first option, treact the lock as if it has not
992 			 * any waiter.
993 			 * Fix-up the tid var if the lock has been disowned.
994 			 */
995 			if (LK_HOLDER(x) == LK_KERNPROC)
996 				tid = LK_KERNPROC;
997 			else {
998 				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
999 				    file, line);
1000 				TD_LOCKS_DEC(curthread);
1001 			}
1002 			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
1003 			    lk->lk_recurse, file, line);
1004 
1005 			/*
1006 			 * The lock is held in exclusive mode.
1007 			 * If the lock is recursed also, then unrecurse it.
1008 			 */
1009 			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1010 				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
1011 				    lk);
1012 				lk->lk_recurse--;
1013 				break;
1014 			}
1015 			if (tid != LK_KERNPROC)
1016 				lock_profile_release_lock(&lk->lock_object);
1017 
1018 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
1019 			    LK_UNLOCKED))
1020 				break;
1021 
1022 			sleepq_lock(&lk->lock_object);
1023 			x = lk->lk_lock;
1024 			v = LK_UNLOCKED;
1025 
1026 			/*
1027 		 	 * If the lock has exclusive waiters, give them
1028 			 * preference in order to avoid deadlock with
1029 			 * shared runners up.
1030 			 * If interruptible sleeps left the exclusive queue
1031 			 * empty avoid a starvation for the threads sleeping
1032 			 * on the shared queue by giving them precedence
1033 			 * and cleaning up the exclusive waiters bit anyway.
1034 			 * Please note that lk_exslpfail count may be lying
1035 			 * about the real number of waiters with the
1036 			 * LK_SLEEPFAIL flag on because they may be used in
1037 			 * conjuction with interruptible sleeps so
1038 			 * lk_exslpfail might be considered an 'upper limit'
1039 			 * bound, including the edge cases.
1040 			 */
1041 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1042 			realexslp = sleepq_sleepcnt(&lk->lock_object,
1043 			    SQ_EXCLUSIVE_QUEUE);
1044 			if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1045 				if (lk->lk_exslpfail < realexslp) {
1046 					lk->lk_exslpfail = 0;
1047 					queue = SQ_EXCLUSIVE_QUEUE;
1048 					v |= (x & LK_SHARED_WAITERS);
1049 				} else {
1050 					lk->lk_exslpfail = 0;
1051 					LOCK_LOG2(lk,
1052 					"%s: %p has only LK_SLEEPFAIL sleepers",
1053 					    __func__, lk);
1054 					LOCK_LOG2(lk,
1055 			"%s: %p waking up threads on the exclusive queue",
1056 					    __func__, lk);
1057 					wakeup_swapper =
1058 					    sleepq_broadcast(&lk->lock_object,
1059 					    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1060 					queue = SQ_SHARED_QUEUE;
1061 				}
1062 			} else {
1063 
1064 				/*
1065 				 * Exclusive waiters sleeping with LK_SLEEPFAIL
1066 				 * on and using interruptible sleeps/timeout
1067 				 * may have left spourious lk_exslpfail counts
1068 				 * on, so clean it up anyway.
1069 				 */
1070 				lk->lk_exslpfail = 0;
1071 				queue = SQ_SHARED_QUEUE;
1072 			}
1073 
1074 			LOCK_LOG3(lk,
1075 			    "%s: %p waking up threads on the %s queue",
1076 			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1077 			    "exclusive");
1078 			atomic_store_rel_ptr(&lk->lk_lock, v);
1079 			wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1080 			    SLEEPQ_LK, 0, queue);
1081 			sleepq_release(&lk->lock_object);
1082 			break;
1083 		} else
1084 			wakeup_swapper = wakeupshlk(lk, file, line);
1085 		break;
1086 	case LK_DRAIN:
1087 		if (LK_CAN_WITNESS(flags))
1088 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1089 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1090 			    ilk : NULL);
1091 
1092 		/*
1093 		 * Trying to drain a lock we already own will result in a
1094 		 * deadlock.
1095 		 */
1096 		if (lockmgr_xlocked(lk)) {
1097 			if (flags & LK_INTERLOCK)
1098 				class->lc_unlock(ilk);
1099 			panic("%s: draining %s with the lock held @ %s:%d\n",
1100 			    __func__, iwmesg, file, line);
1101 		}
1102 
1103 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1104 #ifdef HWPMC_HOOKS
1105 			PMC_SOFT_CALL( , , lock, failed);
1106 #endif
1107 			lock_profile_obtain_lock_failed(&lk->lock_object,
1108 			    &contested, &waittime);
1109 
1110 			/*
1111 			 * If the lock is expected to not sleep just give up
1112 			 * and return.
1113 			 */
1114 			if (LK_TRYOP(flags)) {
1115 				LOCK_LOG2(lk, "%s: %p fails the try operation",
1116 				    __func__, lk);
1117 				error = EBUSY;
1118 				break;
1119 			}
1120 
1121 			/*
1122 			 * Acquire the sleepqueue chain lock because we
1123 			 * probabilly will need to manipulate waiters flags.
1124 			 */
1125 			sleepq_lock(&lk->lock_object);
1126 			x = lk->lk_lock;
1127 
1128 			/*
1129 			 * if the lock has been released while we spun on
1130 			 * the sleepqueue chain lock just try again.
1131 			 */
1132 			if (x == LK_UNLOCKED) {
1133 				sleepq_release(&lk->lock_object);
1134 				continue;
1135 			}
1136 
1137 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1138 			if ((x & ~v) == LK_UNLOCKED) {
1139 				v = (x & ~LK_EXCLUSIVE_SPINNERS);
1140 
1141 				/*
1142 				 * If interruptible sleeps left the exclusive
1143 				 * queue empty avoid a starvation for the
1144 				 * threads sleeping on the shared queue by
1145 				 * giving them precedence and cleaning up the
1146 				 * exclusive waiters bit anyway.
1147 				 * Please note that lk_exslpfail count may be
1148 				 * lying about the real number of waiters with
1149 				 * the LK_SLEEPFAIL flag on because they may
1150 				 * be used in conjuction with interruptible
1151 				 * sleeps so lk_exslpfail might be considered
1152 				 * an 'upper limit' bound, including the edge
1153 				 * cases.
1154 				 */
1155 				if (v & LK_EXCLUSIVE_WAITERS) {
1156 					queue = SQ_EXCLUSIVE_QUEUE;
1157 					v &= ~LK_EXCLUSIVE_WAITERS;
1158 				} else {
1159 
1160 					/*
1161 					 * Exclusive waiters sleeping with
1162 					 * LK_SLEEPFAIL on and using
1163 					 * interruptible sleeps/timeout may
1164 					 * have left spourious lk_exslpfail
1165 					 * counts on, so clean it up anyway.
1166 					 */
1167 					MPASS(v & LK_SHARED_WAITERS);
1168 					lk->lk_exslpfail = 0;
1169 					queue = SQ_SHARED_QUEUE;
1170 					v &= ~LK_SHARED_WAITERS;
1171 				}
1172 				if (queue == SQ_EXCLUSIVE_QUEUE) {
1173 					realexslp =
1174 					    sleepq_sleepcnt(&lk->lock_object,
1175 					    SQ_EXCLUSIVE_QUEUE);
1176 					if (lk->lk_exslpfail >= realexslp) {
1177 						lk->lk_exslpfail = 0;
1178 						queue = SQ_SHARED_QUEUE;
1179 						v &= ~LK_SHARED_WAITERS;
1180 						if (realexslp != 0) {
1181 							LOCK_LOG2(lk,
1182 					"%s: %p has only LK_SLEEPFAIL sleepers",
1183 							    __func__, lk);
1184 							LOCK_LOG2(lk,
1185 			"%s: %p waking up threads on the exclusive queue",
1186 							    __func__, lk);
1187 							wakeup_swapper =
1188 							    sleepq_broadcast(
1189 							    &lk->lock_object,
1190 							    SLEEPQ_LK, 0,
1191 							    SQ_EXCLUSIVE_QUEUE);
1192 						}
1193 					} else
1194 						lk->lk_exslpfail = 0;
1195 				}
1196 				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1197 					sleepq_release(&lk->lock_object);
1198 					continue;
1199 				}
1200 				LOCK_LOG3(lk,
1201 				"%s: %p waking up all threads on the %s queue",
1202 				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1203 				    "shared" : "exclusive");
1204 				wakeup_swapper |= sleepq_broadcast(
1205 				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1206 
1207 				/*
1208 				 * If shared waiters have been woken up we need
1209 				 * to wait for one of them to acquire the lock
1210 				 * before to set the exclusive waiters in
1211 				 * order to avoid a deadlock.
1212 				 */
1213 				if (queue == SQ_SHARED_QUEUE) {
1214 					for (v = lk->lk_lock;
1215 					    (v & LK_SHARE) && !LK_SHARERS(v);
1216 					    v = lk->lk_lock)
1217 						cpu_spinwait();
1218 				}
1219 			}
1220 
1221 			/*
1222 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1223 			 * fail, loop back and retry.
1224 			 */
1225 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1226 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1227 				    x | LK_EXCLUSIVE_WAITERS)) {
1228 					sleepq_release(&lk->lock_object);
1229 					continue;
1230 				}
1231 				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1232 				    __func__, lk);
1233 			}
1234 
1235 			/*
1236 			 * As far as we have been unable to acquire the
1237 			 * exclusive lock and the exclusive waiters flag
1238 			 * is set, we will sleep.
1239 			 */
1240 			if (flags & LK_INTERLOCK) {
1241 				class->lc_unlock(ilk);
1242 				flags &= ~LK_INTERLOCK;
1243 			}
1244 			GIANT_SAVE();
1245 			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1246 			    SQ_EXCLUSIVE_QUEUE);
1247 			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1248 			GIANT_RESTORE();
1249 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1250 			    __func__, lk);
1251 		}
1252 
1253 		if (error == 0) {
1254 			lock_profile_obtain_lock_success(&lk->lock_object,
1255 			    contested, waittime, file, line);
1256 			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1257 			    lk->lk_recurse, file, line);
1258 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1259 			    LK_TRYWIT(flags), file, line);
1260 			TD_LOCKS_INC(curthread);
1261 			STACK_SAVE(lk);
1262 		}
1263 		break;
1264 	default:
1265 		if (flags & LK_INTERLOCK)
1266 			class->lc_unlock(ilk);
1267 		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1268 	}
1269 
1270 	if (flags & LK_INTERLOCK)
1271 		class->lc_unlock(ilk);
1272 	if (wakeup_swapper)
1273 		kick_proc0();
1274 
1275 	return (error);
1276 }
1277 
1278 void
1279 _lockmgr_disown(struct lock *lk, const char *file, int line)
1280 {
1281 	uintptr_t tid, x;
1282 
1283 	if (SCHEDULER_STOPPED())
1284 		return;
1285 
1286 	tid = (uintptr_t)curthread;
1287 	_lockmgr_assert(lk, KA_XLOCKED, file, line);
1288 
1289 	/*
1290 	 * Panic if the lock is recursed.
1291 	 */
1292 	if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1293 		panic("%s: disown a recursed lockmgr @ %s:%d\n",
1294 		    __func__,  file, line);
1295 
1296 	/*
1297 	 * If the owner is already LK_KERNPROC just skip the whole operation.
1298 	 */
1299 	if (LK_HOLDER(lk->lk_lock) != tid)
1300 		return;
1301 	lock_profile_release_lock(&lk->lock_object);
1302 	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1303 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1304 	TD_LOCKS_DEC(curthread);
1305 	STACK_SAVE(lk);
1306 
1307 	/*
1308 	 * In order to preserve waiters flags, just spin.
1309 	 */
1310 	for (;;) {
1311 		x = lk->lk_lock;
1312 		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1313 		x &= LK_ALL_WAITERS;
1314 		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1315 		    LK_KERNPROC | x))
1316 			return;
1317 		cpu_spinwait();
1318 	}
1319 }
1320 
1321 void
1322 lockmgr_printinfo(const struct lock *lk)
1323 {
1324 	struct thread *td;
1325 	uintptr_t x;
1326 
1327 	if (lk->lk_lock == LK_UNLOCKED)
1328 		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1329 	else if (lk->lk_lock & LK_SHARE)
1330 		printf("lock type %s: SHARED (count %ju)\n",
1331 		    lk->lock_object.lo_name,
1332 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1333 	else {
1334 		td = lockmgr_xholder(lk);
1335 		printf("lock type %s: EXCL by thread %p "
1336 		    "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, td,
1337 		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_tid);
1338 	}
1339 
1340 	x = lk->lk_lock;
1341 	if (x & LK_EXCLUSIVE_WAITERS)
1342 		printf(" with exclusive waiters pending\n");
1343 	if (x & LK_SHARED_WAITERS)
1344 		printf(" with shared waiters pending\n");
1345 	if (x & LK_EXCLUSIVE_SPINNERS)
1346 		printf(" with exclusive spinners pending\n");
1347 
1348 	STACK_PRINT(lk);
1349 }
1350 
1351 int
1352 lockstatus(const struct lock *lk)
1353 {
1354 	uintptr_t v, x;
1355 	int ret;
1356 
1357 	ret = LK_SHARED;
1358 	x = lk->lk_lock;
1359 	v = LK_HOLDER(x);
1360 
1361 	if ((x & LK_SHARE) == 0) {
1362 		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1363 			ret = LK_EXCLUSIVE;
1364 		else
1365 			ret = LK_EXCLOTHER;
1366 	} else if (x == LK_UNLOCKED)
1367 		ret = 0;
1368 
1369 	return (ret);
1370 }
1371 
1372 #ifdef INVARIANT_SUPPORT
1373 
1374 FEATURE(invariant_support,
1375     "Support for modules compiled with INVARIANTS option");
1376 
1377 #ifndef INVARIANTS
1378 #undef	_lockmgr_assert
1379 #endif
1380 
1381 void
1382 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1383 {
1384 	int slocked = 0;
1385 
1386 	if (panicstr != NULL)
1387 		return;
1388 	switch (what) {
1389 	case KA_SLOCKED:
1390 	case KA_SLOCKED | KA_NOTRECURSED:
1391 	case KA_SLOCKED | KA_RECURSED:
1392 		slocked = 1;
1393 	case KA_LOCKED:
1394 	case KA_LOCKED | KA_NOTRECURSED:
1395 	case KA_LOCKED | KA_RECURSED:
1396 #ifdef WITNESS
1397 
1398 		/*
1399 		 * We cannot trust WITNESS if the lock is held in exclusive
1400 		 * mode and a call to lockmgr_disown() happened.
1401 		 * Workaround this skipping the check if the lock is held in
1402 		 * exclusive mode even for the KA_LOCKED case.
1403 		 */
1404 		if (slocked || (lk->lk_lock & LK_SHARE)) {
1405 			witness_assert(&lk->lock_object, what, file, line);
1406 			break;
1407 		}
1408 #endif
1409 		if (lk->lk_lock == LK_UNLOCKED ||
1410 		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1411 		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1412 			panic("Lock %s not %slocked @ %s:%d\n",
1413 			    lk->lock_object.lo_name, slocked ? "share" : "",
1414 			    file, line);
1415 
1416 		if ((lk->lk_lock & LK_SHARE) == 0) {
1417 			if (lockmgr_recursed(lk)) {
1418 				if (what & KA_NOTRECURSED)
1419 					panic("Lock %s recursed @ %s:%d\n",
1420 					    lk->lock_object.lo_name, file,
1421 					    line);
1422 			} else if (what & KA_RECURSED)
1423 				panic("Lock %s not recursed @ %s:%d\n",
1424 				    lk->lock_object.lo_name, file, line);
1425 		}
1426 		break;
1427 	case KA_XLOCKED:
1428 	case KA_XLOCKED | KA_NOTRECURSED:
1429 	case KA_XLOCKED | KA_RECURSED:
1430 		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1431 			panic("Lock %s not exclusively locked @ %s:%d\n",
1432 			    lk->lock_object.lo_name, file, line);
1433 		if (lockmgr_recursed(lk)) {
1434 			if (what & KA_NOTRECURSED)
1435 				panic("Lock %s recursed @ %s:%d\n",
1436 				    lk->lock_object.lo_name, file, line);
1437 		} else if (what & KA_RECURSED)
1438 			panic("Lock %s not recursed @ %s:%d\n",
1439 			    lk->lock_object.lo_name, file, line);
1440 		break;
1441 	case KA_UNLOCKED:
1442 		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1443 			panic("Lock %s exclusively locked @ %s:%d\n",
1444 			    lk->lock_object.lo_name, file, line);
1445 		break;
1446 	default:
1447 		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1448 		    line);
1449 	}
1450 }
1451 #endif
1452 
1453 #ifdef DDB
1454 int
1455 lockmgr_chain(struct thread *td, struct thread **ownerp)
1456 {
1457 	struct lock *lk;
1458 
1459 	lk = td->td_wchan;
1460 
1461 	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1462 		return (0);
1463 	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1464 	if (lk->lk_lock & LK_SHARE)
1465 		db_printf("SHARED (count %ju)\n",
1466 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1467 	else
1468 		db_printf("EXCL\n");
1469 	*ownerp = lockmgr_xholder(lk);
1470 
1471 	return (1);
1472 }
1473 
1474 static void
1475 db_show_lockmgr(const struct lock_object *lock)
1476 {
1477 	struct thread *td;
1478 	const struct lock *lk;
1479 
1480 	lk = (const struct lock *)lock;
1481 
1482 	db_printf(" state: ");
1483 	if (lk->lk_lock == LK_UNLOCKED)
1484 		db_printf("UNLOCKED\n");
1485 	else if (lk->lk_lock & LK_SHARE)
1486 		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1487 	else {
1488 		td = lockmgr_xholder(lk);
1489 		if (td == (struct thread *)LK_KERNPROC)
1490 			db_printf("XLOCK: LK_KERNPROC\n");
1491 		else
1492 			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1493 			    td->td_tid, td->td_proc->p_pid,
1494 			    td->td_proc->p_comm);
1495 		if (lockmgr_recursed(lk))
1496 			db_printf(" recursed: %d\n", lk->lk_recurse);
1497 	}
1498 	db_printf(" waiters: ");
1499 	switch (lk->lk_lock & LK_ALL_WAITERS) {
1500 	case LK_SHARED_WAITERS:
1501 		db_printf("shared\n");
1502 		break;
1503 	case LK_EXCLUSIVE_WAITERS:
1504 		db_printf("exclusive\n");
1505 		break;
1506 	case LK_ALL_WAITERS:
1507 		db_printf("shared and exclusive\n");
1508 		break;
1509 	default:
1510 		db_printf("none\n");
1511 	}
1512 	db_printf(" spinners: ");
1513 	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1514 		db_printf("exclusive\n");
1515 	else
1516 		db_printf("none\n");
1517 }
1518 #endif
1519