xref: /freebsd/sys/kern/kern_lock.c (revision 830940567b49bb0c08dfaed40418999e76616909)
1 /*-
2  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  */
28 
29 #include "opt_adaptive_lockmgrs.h"
30 #include "opt_ddb.h"
31 #include "opt_kdtrace.h"
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/ktr.h>
38 #include <sys/linker_set.h>
39 #include <sys/lock.h>
40 #include <sys/lock_profile.h>
41 #include <sys/lockmgr.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/sleepqueue.h>
45 #ifdef DEBUG_LOCKS
46 #include <sys/stack.h>
47 #endif
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 
51 #include <machine/cpu.h>
52 
53 #ifdef DDB
54 #include <ddb/ddb.h>
55 #endif
56 
57 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
58     (LK_ADAPTIVE | LK_NOSHARE));
59 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
60     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
61 
62 #define	SQ_EXCLUSIVE_QUEUE	0
63 #define	SQ_SHARED_QUEUE		1
64 
65 #ifndef INVARIANTS
66 #define	_lockmgr_assert(lk, what, file, line)
67 #define	TD_LOCKS_INC(td)
68 #define	TD_LOCKS_DEC(td)
69 #else
70 #define	TD_LOCKS_INC(td)	((td)->td_locks++)
71 #define	TD_LOCKS_DEC(td)	((td)->td_locks--)
72 #endif
73 #define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
74 #define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
75 
76 #ifndef DEBUG_LOCKS
77 #define	STACK_PRINT(lk)
78 #define	STACK_SAVE(lk)
79 #define	STACK_ZERO(lk)
80 #else
81 #define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
82 #define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
83 #define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
84 #endif
85 
86 #define	LOCK_LOG2(lk, string, arg1, arg2)				\
87 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
88 		CTR2(KTR_LOCK, (string), (arg1), (arg2))
89 #define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
90 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
91 		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
92 
93 #define	GIANT_DECLARE							\
94 	int _i = 0;							\
95 	WITNESS_SAVE_DECL(Giant)
96 #define	GIANT_RESTORE() do {						\
97 	if (_i > 0) {							\
98 		while (_i--)						\
99 			mtx_lock(&Giant);				\
100 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
101 	}								\
102 } while (0)
103 #define	GIANT_SAVE() do {						\
104 	if (mtx_owned(&Giant)) {					\
105 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
106 		while (mtx_owned(&Giant)) {				\
107 			_i++;						\
108 			mtx_unlock(&Giant);				\
109 		}							\
110 	}								\
111 } while (0)
112 
113 #define	LK_CAN_SHARE(x)							\
114 	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
115 	((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||				\
116 	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
117 #define	LK_TRYOP(x)							\
118 	((x) & LK_NOWAIT)
119 
120 #define	LK_CAN_WITNESS(x)						\
121 	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
122 #define	LK_TRYWIT(x)							\
123 	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
124 
125 #define	LK_CAN_ADAPT(lk, f)						\
126 	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
127 	((f) & LK_SLEEPFAIL) == 0)
128 
129 #define	lockmgr_disowned(lk)						\
130 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
131 
132 #define	lockmgr_xlocked(lk)						\
133 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
134 
135 static void	 assert_lockmgr(struct lock_object *lock, int how);
136 #ifdef DDB
137 static void	 db_show_lockmgr(struct lock_object *lock);
138 #endif
139 static void	 lock_lockmgr(struct lock_object *lock, int how);
140 #ifdef KDTRACE_HOOKS
141 static int	 owner_lockmgr(struct lock_object *lock, struct thread **owner);
142 #endif
143 static int	 unlock_lockmgr(struct lock_object *lock);
144 
145 struct lock_class lock_class_lockmgr = {
146 	.lc_name = "lockmgr",
147 	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
148 	.lc_assert = assert_lockmgr,
149 #ifdef DDB
150 	.lc_ddb_show = db_show_lockmgr,
151 #endif
152 	.lc_lock = lock_lockmgr,
153 	.lc_unlock = unlock_lockmgr,
154 #ifdef KDTRACE_HOOKS
155 	.lc_owner = owner_lockmgr,
156 #endif
157 };
158 
159 #ifdef ADAPTIVE_LOCKMGRS
160 static u_int alk_retries = 10;
161 static u_int alk_loops = 10000;
162 SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
163 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
164 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
165 #endif
166 
167 static __inline struct thread *
168 lockmgr_xholder(struct lock *lk)
169 {
170 	uintptr_t x;
171 
172 	x = lk->lk_lock;
173 	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
174 }
175 
176 /*
177  * It assumes sleepq_lock held and returns with this one unheld.
178  * It also assumes the generic interlock is sane and previously checked.
179  * If LK_INTERLOCK is specified the interlock is not reacquired after the
180  * sleep.
181  */
182 static __inline int
183 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
184     const char *wmesg, int pri, int timo, int queue)
185 {
186 	GIANT_DECLARE;
187 	struct lock_class *class;
188 	int catch, error;
189 
190 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
191 	catch = pri & PCATCH;
192 	pri &= PRIMASK;
193 	error = 0;
194 
195 	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
196 	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
197 
198 	if (flags & LK_INTERLOCK)
199 		class->lc_unlock(ilk);
200 	GIANT_SAVE();
201 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
202 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
203 	if ((flags & LK_TIMELOCK) && timo)
204 		sleepq_set_timeout(&lk->lock_object, timo);
205 
206 	/*
207 	 * Decisional switch for real sleeping.
208 	 */
209 	if ((flags & LK_TIMELOCK) && timo && catch)
210 		error = sleepq_timedwait_sig(&lk->lock_object, pri);
211 	else if ((flags & LK_TIMELOCK) && timo)
212 		error = sleepq_timedwait(&lk->lock_object, pri);
213 	else if (catch)
214 		error = sleepq_wait_sig(&lk->lock_object, pri);
215 	else
216 		sleepq_wait(&lk->lock_object, pri);
217 	GIANT_RESTORE();
218 	if ((flags & LK_SLEEPFAIL) && error == 0)
219 		error = ENOLCK;
220 
221 	return (error);
222 }
223 
224 static __inline int
225 wakeupshlk(struct lock *lk, const char *file, int line)
226 {
227 	uintptr_t v, x;
228 	int queue, wakeup_swapper;
229 
230 	TD_LOCKS_DEC(curthread);
231 	TD_SLOCKS_DEC(curthread);
232 	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
233 	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
234 
235 	wakeup_swapper = 0;
236 	for (;;) {
237 		x = lk->lk_lock;
238 
239 		/*
240 		 * If there is more than one shared lock held, just drop one
241 		 * and return.
242 		 */
243 		if (LK_SHARERS(x) > 1) {
244 			if (atomic_cmpset_ptr(&lk->lk_lock, x,
245 			    x - LK_ONE_SHARER))
246 				break;
247 			continue;
248 		}
249 
250 		/*
251 		 * If there are not waiters on the exclusive queue, drop the
252 		 * lock quickly.
253 		 */
254 		if ((x & LK_ALL_WAITERS) == 0) {
255 			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
256 			    LK_SHARERS_LOCK(1));
257 			if (atomic_cmpset_ptr(&lk->lk_lock, x, LK_UNLOCKED))
258 				break;
259 			continue;
260 		}
261 
262 		/*
263 		 * We should have a sharer with waiters, so enter the hard
264 		 * path in order to handle wakeups correctly.
265 		 */
266 		sleepq_lock(&lk->lock_object);
267 		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
268 		v = LK_UNLOCKED;
269 
270 		/*
271 		 * If the lock has exclusive waiters, give them preference in
272 		 * order to avoid deadlock with shared runners up.
273 		 */
274 		if (x & LK_EXCLUSIVE_WAITERS) {
275 			queue = SQ_EXCLUSIVE_QUEUE;
276 			v |= (x & LK_SHARED_WAITERS);
277 		} else {
278 			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
279 			    LK_SHARED_WAITERS);
280 			queue = SQ_SHARED_QUEUE;
281 		}
282 
283 		if (!atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
284 		    v)) {
285 			sleepq_release(&lk->lock_object);
286 			continue;
287 		}
288 		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
289 		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
290 		    "exclusive");
291 		wakeup_swapper = sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
292 		    0, queue);
293 		sleepq_release(&lk->lock_object);
294 		break;
295 	}
296 
297 	lock_profile_release_lock(&lk->lock_object);
298 	return (wakeup_swapper);
299 }
300 
301 static void
302 assert_lockmgr(struct lock_object *lock, int what)
303 {
304 
305 	panic("lockmgr locks do not support assertions");
306 }
307 
308 static void
309 lock_lockmgr(struct lock_object *lock, int how)
310 {
311 
312 	panic("lockmgr locks do not support sleep interlocking");
313 }
314 
315 static int
316 unlock_lockmgr(struct lock_object *lock)
317 {
318 
319 	panic("lockmgr locks do not support sleep interlocking");
320 }
321 
322 #ifdef KDTRACE_HOOKS
323 static int
324 owner_lockmgr(struct lock_object *lock, struct thread **owner)
325 {
326 
327 	panic("lockmgr locks do not support owner inquiring");
328 }
329 #endif
330 
331 void
332 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
333 {
334 	int iflags;
335 
336 	MPASS((flags & ~LK_INIT_MASK) == 0);
337 
338 	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
339 	if (flags & LK_CANRECURSE)
340 		iflags |= LO_RECURSABLE;
341 	if ((flags & LK_NODUP) == 0)
342 		iflags |= LO_DUPOK;
343 	if (flags & LK_NOPROFILE)
344 		iflags |= LO_NOPROFILE;
345 	if ((flags & LK_NOWITNESS) == 0)
346 		iflags |= LO_WITNESS;
347 	if (flags & LK_QUIET)
348 		iflags |= LO_QUIET;
349 	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
350 
351 	lk->lk_lock = LK_UNLOCKED;
352 	lk->lk_recurse = 0;
353 	lk->lk_timo = timo;
354 	lk->lk_pri = pri;
355 	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
356 	STACK_ZERO(lk);
357 }
358 
359 void
360 lockdestroy(struct lock *lk)
361 {
362 
363 	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
364 	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
365 	lock_destroy(&lk->lock_object);
366 }
367 
368 int
369 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
370     const char *wmesg, int pri, int timo, const char *file, int line)
371 {
372 	GIANT_DECLARE;
373 	struct lock_class *class;
374 	const char *iwmesg;
375 	uintptr_t tid, v, x;
376 	u_int op;
377 	int error, ipri, itimo, queue, wakeup_swapper;
378 #ifdef LOCK_PROFILING
379 	uint64_t waittime = 0;
380 	int contested = 0;
381 #endif
382 #ifdef ADAPTIVE_LOCKMGRS
383 	volatile struct thread *owner;
384 	u_int i, spintries = 0;
385 #endif
386 
387 	error = 0;
388 	tid = (uintptr_t)curthread;
389 	op = (flags & LK_TYPE_MASK);
390 	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
391 	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
392 	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
393 
394 	MPASS((flags & ~LK_TOTAL_MASK) == 0);
395 	KASSERT((op & (op - 1)) == 0,
396 	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
397 	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
398 	    (op != LK_DOWNGRADE && op != LK_RELEASE),
399 	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
400 	    __func__, file, line));
401 	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
402 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
403 	    __func__, file, line));
404 
405 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
406 	if (panicstr != NULL) {
407 		if (flags & LK_INTERLOCK)
408 			class->lc_unlock(ilk);
409 		return (0);
410 	}
411 
412 	if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE))
413 		op = LK_EXCLUSIVE;
414 
415 	wakeup_swapper = 0;
416 	switch (op) {
417 	case LK_SHARED:
418 		if (LK_CAN_WITNESS(flags))
419 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
420 			    file, line, ilk);
421 		for (;;) {
422 			x = lk->lk_lock;
423 
424 			/*
425 			 * If no other thread has an exclusive lock, or
426 			 * no exclusive waiter is present, bump the count of
427 			 * sharers.  Since we have to preserve the state of
428 			 * waiters, if we fail to acquire the shared lock
429 			 * loop back and retry.
430 			 */
431 			if (LK_CAN_SHARE(x)) {
432 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
433 				    x + LK_ONE_SHARER))
434 					break;
435 				continue;
436 			}
437 			lock_profile_obtain_lock_failed(&lk->lock_object,
438 			    &contested, &waittime);
439 
440 			/*
441 			 * If the lock is already held by curthread in
442 			 * exclusive way avoid a deadlock.
443 			 */
444 			if (LK_HOLDER(x) == tid) {
445 				LOCK_LOG2(lk,
446 				    "%s: %p already held in exclusive mode",
447 				    __func__, lk);
448 				error = EDEADLK;
449 				break;
450 			}
451 
452 			/*
453 			 * If the lock is expected to not sleep just give up
454 			 * and return.
455 			 */
456 			if (LK_TRYOP(flags)) {
457 				LOCK_LOG2(lk, "%s: %p fails the try operation",
458 				    __func__, lk);
459 				error = EBUSY;
460 				break;
461 			}
462 
463 #ifdef ADAPTIVE_LOCKMGRS
464 			/*
465 			 * If the owner is running on another CPU, spin until
466 			 * the owner stops running or the state of the lock
467 			 * changes.
468 			 */
469 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
470 			    LK_HOLDER(x) != LK_KERNPROC) {
471 				owner = (struct thread *)LK_HOLDER(x);
472 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
473 					CTR3(KTR_LOCK,
474 					    "%s: spinning on %p held by %p",
475 					    __func__, lk, owner);
476 
477 				/*
478 				 * If we are holding also an interlock drop it
479 				 * in order to avoid a deadlock if the lockmgr
480 				 * owner is adaptively spinning on the
481 				 * interlock itself.
482 				 */
483 				if (flags & LK_INTERLOCK) {
484 					class->lc_unlock(ilk);
485 					flags &= ~LK_INTERLOCK;
486 				}
487 				GIANT_SAVE();
488 				while (LK_HOLDER(lk->lk_lock) ==
489 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
490 					cpu_spinwait();
491 			} else if (LK_CAN_ADAPT(lk, flags) &&
492 			    (x & LK_SHARE) !=0 && LK_SHARERS(x) &&
493 			    spintries < alk_retries) {
494 				if (flags & LK_INTERLOCK) {
495 					class->lc_unlock(ilk);
496 					flags &= ~LK_INTERLOCK;
497 				}
498 				GIANT_SAVE();
499 				spintries++;
500 				for (i = 0; i < alk_loops; i++) {
501 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
502 						CTR4(KTR_LOCK,
503 				    "%s: shared spinning on %p with %u and %u",
504 						    __func__, lk, spintries, i);
505 					x = lk->lk_lock;
506 					if ((x & LK_SHARE) == 0 ||
507 					    LK_CAN_SHARE(x) != 0)
508 						break;
509 					cpu_spinwait();
510 				}
511 				if (i != alk_loops)
512 					continue;
513 			}
514 #endif
515 
516 			/*
517 			 * Acquire the sleepqueue chain lock because we
518 			 * probabilly will need to manipulate waiters flags.
519 			 */
520 			sleepq_lock(&lk->lock_object);
521 			x = lk->lk_lock;
522 
523 			/*
524 			 * if the lock can be acquired in shared mode, try
525 			 * again.
526 			 */
527 			if (LK_CAN_SHARE(x)) {
528 				sleepq_release(&lk->lock_object);
529 				continue;
530 			}
531 
532 #ifdef ADAPTIVE_LOCKMGRS
533 			/*
534 			 * The current lock owner might have started executing
535 			 * on another CPU (or the lock could have changed
536 			 * owner) while we were waiting on the turnstile
537 			 * chain lock.  If so, drop the turnstile lock and try
538 			 * again.
539 			 */
540 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
541 			    LK_HOLDER(x) != LK_KERNPROC) {
542 				owner = (struct thread *)LK_HOLDER(x);
543 				if (TD_IS_RUNNING(owner)) {
544 					sleepq_release(&lk->lock_object);
545 					continue;
546 				}
547 			}
548 #endif
549 
550 			/*
551 			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
552 			 * loop back and retry.
553 			 */
554 			if ((x & LK_SHARED_WAITERS) == 0) {
555 				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
556 				    x | LK_SHARED_WAITERS)) {
557 					sleepq_release(&lk->lock_object);
558 					continue;
559 				}
560 				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
561 				    __func__, lk);
562 			}
563 
564 			/*
565 			 * As far as we have been unable to acquire the
566 			 * shared lock and the shared waiters flag is set,
567 			 * we will sleep.
568 			 */
569 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
570 			    SQ_SHARED_QUEUE);
571 			flags &= ~LK_INTERLOCK;
572 			if (error) {
573 				LOCK_LOG3(lk,
574 				    "%s: interrupted sleep for %p with %d",
575 				    __func__, lk, error);
576 				break;
577 			}
578 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
579 			    __func__, lk);
580 		}
581 		if (error == 0) {
582 			lock_profile_obtain_lock_success(&lk->lock_object,
583 			    contested, waittime, file, line);
584 			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
585 			    line);
586 			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
587 			    line);
588 			TD_LOCKS_INC(curthread);
589 			TD_SLOCKS_INC(curthread);
590 			STACK_SAVE(lk);
591 		}
592 		break;
593 	case LK_UPGRADE:
594 		_lockmgr_assert(lk, KA_SLOCKED, file, line);
595 		v = lk->lk_lock;
596 		x = v & LK_ALL_WAITERS;
597 		v &= LK_EXCLUSIVE_SPINNERS;
598 
599 		/*
600 		 * Try to switch from one shared lock to an exclusive one.
601 		 * We need to preserve waiters flags during the operation.
602 		 */
603 		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
604 		    tid | x)) {
605 			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
606 			    line);
607 			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
608 			    LK_TRYWIT(flags), file, line);
609 			TD_SLOCKS_DEC(curthread);
610 			break;
611 		}
612 
613 		/*
614 		 * We have been unable to succeed in upgrading, so just
615 		 * give up the shared lock.
616 		 */
617 		wakeup_swapper |= wakeupshlk(lk, file, line);
618 
619 		/* FALLTHROUGH */
620 	case LK_EXCLUSIVE:
621 		if (LK_CAN_WITNESS(flags))
622 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
623 			    LOP_EXCLUSIVE, file, line, ilk);
624 
625 		/*
626 		 * If curthread already holds the lock and this one is
627 		 * allowed to recurse, simply recurse on it.
628 		 */
629 		if (lockmgr_xlocked(lk)) {
630 			if ((flags & LK_CANRECURSE) == 0 &&
631 			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
632 
633 				/*
634 				 * If the lock is expected to not panic just
635 				 * give up and return.
636 				 */
637 				if (LK_TRYOP(flags)) {
638 					LOCK_LOG2(lk,
639 					    "%s: %p fails the try operation",
640 					    __func__, lk);
641 					error = EBUSY;
642 					break;
643 				}
644 				if (flags & LK_INTERLOCK)
645 					class->lc_unlock(ilk);
646 		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
647 				    __func__, iwmesg, file, line);
648 			}
649 			lk->lk_recurse++;
650 			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
651 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
652 			    lk->lk_recurse, file, line);
653 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
654 			    LK_TRYWIT(flags), file, line);
655 			TD_LOCKS_INC(curthread);
656 			break;
657 		}
658 
659 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
660 		    tid)) {
661 			lock_profile_obtain_lock_failed(&lk->lock_object,
662 			    &contested, &waittime);
663 
664 			/*
665 			 * If the lock is expected to not sleep just give up
666 			 * and return.
667 			 */
668 			if (LK_TRYOP(flags)) {
669 				LOCK_LOG2(lk, "%s: %p fails the try operation",
670 				    __func__, lk);
671 				error = EBUSY;
672 				break;
673 			}
674 
675 #ifdef ADAPTIVE_LOCKMGRS
676 			/*
677 			 * If the owner is running on another CPU, spin until
678 			 * the owner stops running or the state of the lock
679 			 * changes.
680 			 */
681 			x = lk->lk_lock;
682 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
683 			    LK_HOLDER(x) != LK_KERNPROC) {
684 				owner = (struct thread *)LK_HOLDER(x);
685 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
686 					CTR3(KTR_LOCK,
687 					    "%s: spinning on %p held by %p",
688 					    __func__, lk, owner);
689 
690 				/*
691 				 * If we are holding also an interlock drop it
692 				 * in order to avoid a deadlock if the lockmgr
693 				 * owner is adaptively spinning on the
694 				 * interlock itself.
695 				 */
696 				if (flags & LK_INTERLOCK) {
697 					class->lc_unlock(ilk);
698 					flags &= ~LK_INTERLOCK;
699 				}
700 				GIANT_SAVE();
701 				while (LK_HOLDER(lk->lk_lock) ==
702 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
703 					cpu_spinwait();
704 			} else if (LK_CAN_ADAPT(lk, flags) &&
705 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
706 			    spintries < alk_retries) {
707 				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
708 				    !atomic_cmpset_ptr(&lk->lk_lock, x,
709 				    x | LK_EXCLUSIVE_SPINNERS))
710 					continue;
711 				if (flags & LK_INTERLOCK) {
712 					class->lc_unlock(ilk);
713 					flags &= ~LK_INTERLOCK;
714 				}
715 				GIANT_SAVE();
716 				spintries++;
717 				for (i = 0; i < alk_loops; i++) {
718 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
719 						CTR4(KTR_LOCK,
720 				    "%s: shared spinning on %p with %u and %u",
721 						    __func__, lk, spintries, i);
722 					if ((lk->lk_lock &
723 					    LK_EXCLUSIVE_SPINNERS) == 0)
724 						break;
725 					cpu_spinwait();
726 				}
727 				if (i != alk_loops)
728 					continue;
729 			}
730 #endif
731 
732 			/*
733 			 * Acquire the sleepqueue chain lock because we
734 			 * probabilly will need to manipulate waiters flags.
735 			 */
736 			sleepq_lock(&lk->lock_object);
737 			x = lk->lk_lock;
738 
739 			/*
740 			 * if the lock has been released while we spun on
741 			 * the sleepqueue chain lock just try again.
742 			 */
743 			if (x == LK_UNLOCKED) {
744 				sleepq_release(&lk->lock_object);
745 				continue;
746 			}
747 
748 #ifdef ADAPTIVE_LOCKMGRS
749 			/*
750 			 * The current lock owner might have started executing
751 			 * on another CPU (or the lock could have changed
752 			 * owner) while we were waiting on the turnstile
753 			 * chain lock.  If so, drop the turnstile lock and try
754 			 * again.
755 			 */
756 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
757 			    LK_HOLDER(x) != LK_KERNPROC) {
758 				owner = (struct thread *)LK_HOLDER(x);
759 				if (TD_IS_RUNNING(owner)) {
760 					sleepq_release(&lk->lock_object);
761 					continue;
762 				}
763 			}
764 #endif
765 
766 			/*
767 			 * The lock can be in the state where there is a
768 			 * pending queue of waiters, but still no owner.
769 			 * This happens when the lock is contested and an
770 			 * owner is going to claim the lock.
771 			 * If curthread is the one successfully acquiring it
772 			 * claim lock ownership and return, preserving waiters
773 			 * flags.
774 			 */
775 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
776 			if ((x & ~v) == LK_UNLOCKED) {
777 				v &= ~LK_EXCLUSIVE_SPINNERS;
778 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
779 				    tid | v)) {
780 					sleepq_release(&lk->lock_object);
781 					LOCK_LOG2(lk,
782 					    "%s: %p claimed by a new writer",
783 					    __func__, lk);
784 					break;
785 				}
786 				sleepq_release(&lk->lock_object);
787 				continue;
788 			}
789 
790 			/*
791 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
792 			 * fail, loop back and retry.
793 			 */
794 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
795 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
796 				    x | LK_EXCLUSIVE_WAITERS)) {
797 					sleepq_release(&lk->lock_object);
798 					continue;
799 				}
800 				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
801 				    __func__, lk);
802 			}
803 
804 			/*
805 			 * As far as we have been unable to acquire the
806 			 * exclusive lock and the exclusive waiters flag
807 			 * is set, we will sleep.
808 			 */
809 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
810 			    SQ_EXCLUSIVE_QUEUE);
811 			flags &= ~LK_INTERLOCK;
812 			if (error) {
813 				LOCK_LOG3(lk,
814 				    "%s: interrupted sleep for %p with %d",
815 				    __func__, lk, error);
816 				break;
817 			}
818 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
819 			    __func__, lk);
820 		}
821 		if (error == 0) {
822 			lock_profile_obtain_lock_success(&lk->lock_object,
823 			    contested, waittime, file, line);
824 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
825 			    lk->lk_recurse, file, line);
826 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
827 			    LK_TRYWIT(flags), file, line);
828 			TD_LOCKS_INC(curthread);
829 			STACK_SAVE(lk);
830 		}
831 		break;
832 	case LK_DOWNGRADE:
833 		_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
834 		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
835 		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
836 		TD_SLOCKS_INC(curthread);
837 
838 		/*
839 		 * In order to preserve waiters flags, just spin.
840 		 */
841 		for (;;) {
842 			x = lk->lk_lock;
843 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
844 			x &= LK_ALL_WAITERS;
845 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
846 			    LK_SHARERS_LOCK(1) | x))
847 				break;
848 			cpu_spinwait();
849 		}
850 		break;
851 	case LK_RELEASE:
852 		_lockmgr_assert(lk, KA_LOCKED, file, line);
853 		x = lk->lk_lock;
854 
855 		if ((x & LK_SHARE) == 0) {
856 
857 			/*
858 			 * As first option, treact the lock as if it has not
859 			 * any waiter.
860 			 * Fix-up the tid var if the lock has been disowned.
861 			 */
862 			if (LK_HOLDER(x) == LK_KERNPROC)
863 				tid = LK_KERNPROC;
864 			else {
865 				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
866 				    file, line);
867 				TD_LOCKS_DEC(curthread);
868 			}
869 			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
870 			    lk->lk_recurse, file, line);
871 
872 			/*
873 			 * The lock is held in exclusive mode.
874 			 * If the lock is recursed also, then unrecurse it.
875 			 */
876 			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
877 				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
878 				    lk);
879 				lk->lk_recurse--;
880 				break;
881 			}
882 			if (tid != LK_KERNPROC)
883 				lock_profile_release_lock(&lk->lock_object);
884 
885 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
886 			    LK_UNLOCKED))
887 				break;
888 
889 			sleepq_lock(&lk->lock_object);
890 			x = lk->lk_lock;
891 			v = LK_UNLOCKED;
892 
893 			/*
894 		 	 * If the lock has exclusive waiters, give them
895 			 * preference in order to avoid deadlock with
896 			 * shared runners up.
897 			 */
898 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
899 			if (x & LK_EXCLUSIVE_WAITERS) {
900 				queue = SQ_EXCLUSIVE_QUEUE;
901 				v |= (x & LK_SHARED_WAITERS);
902 			} else {
903 				MPASS((x & LK_ALL_WAITERS) ==
904 				    LK_SHARED_WAITERS);
905 				queue = SQ_SHARED_QUEUE;
906 			}
907 
908 			LOCK_LOG3(lk,
909 			    "%s: %p waking up threads on the %s queue",
910 			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
911 			    "exclusive");
912 			atomic_store_rel_ptr(&lk->lk_lock, v);
913 			wakeup_swapper = sleepq_broadcast(&lk->lock_object,
914 			    SLEEPQ_LK, 0, queue);
915 			sleepq_release(&lk->lock_object);
916 			break;
917 		} else
918 			wakeup_swapper = wakeupshlk(lk, file, line);
919 		break;
920 	case LK_DRAIN:
921 		if (LK_CAN_WITNESS(flags))
922 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
923 			    LOP_EXCLUSIVE, file, line, ilk);
924 
925 		/*
926 		 * Trying to drain a lock we already own will result in a
927 		 * deadlock.
928 		 */
929 		if (lockmgr_xlocked(lk)) {
930 			if (flags & LK_INTERLOCK)
931 				class->lc_unlock(ilk);
932 			panic("%s: draining %s with the lock held @ %s:%d\n",
933 			    __func__, iwmesg, file, line);
934 		}
935 
936 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
937 			lock_profile_obtain_lock_failed(&lk->lock_object,
938 			    &contested, &waittime);
939 
940 			/*
941 			 * If the lock is expected to not sleep just give up
942 			 * and return.
943 			 */
944 			if (LK_TRYOP(flags)) {
945 				LOCK_LOG2(lk, "%s: %p fails the try operation",
946 				    __func__, lk);
947 				error = EBUSY;
948 				break;
949 			}
950 
951 			/*
952 			 * Acquire the sleepqueue chain lock because we
953 			 * probabilly will need to manipulate waiters flags.
954 			 */
955 			sleepq_lock(&lk->lock_object);
956 			x = lk->lk_lock;
957 
958 			/*
959 			 * if the lock has been released while we spun on
960 			 * the sleepqueue chain lock just try again.
961 			 */
962 			if (x == LK_UNLOCKED) {
963 				sleepq_release(&lk->lock_object);
964 				continue;
965 			}
966 
967 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
968 			if ((x & ~v) == LK_UNLOCKED) {
969 				v = (x & ~LK_EXCLUSIVE_SPINNERS);
970 				if (v & LK_EXCLUSIVE_WAITERS) {
971 					queue = SQ_EXCLUSIVE_QUEUE;
972 					v &= ~LK_EXCLUSIVE_WAITERS;
973 				} else {
974 					MPASS(v & LK_SHARED_WAITERS);
975 					queue = SQ_SHARED_QUEUE;
976 					v &= ~LK_SHARED_WAITERS;
977 				}
978 				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
979 					sleepq_release(&lk->lock_object);
980 					continue;
981 				}
982 				LOCK_LOG3(lk,
983 				"%s: %p waking up all threads on the %s queue",
984 				    __func__, lk, queue == SQ_SHARED_QUEUE ?
985 				    "shared" : "exclusive");
986 				wakeup_swapper |= sleepq_broadcast(
987 				    &lk->lock_object, SLEEPQ_LK, 0, queue);
988 
989 				/*
990 				 * If shared waiters have been woken up we need
991 				 * to wait for one of them to acquire the lock
992 				 * before to set the exclusive waiters in
993 				 * order to avoid a deadlock.
994 				 */
995 				if (queue == SQ_SHARED_QUEUE) {
996 					for (v = lk->lk_lock;
997 					    (v & LK_SHARE) && !LK_SHARERS(v);
998 					    v = lk->lk_lock)
999 						cpu_spinwait();
1000 				}
1001 			}
1002 
1003 			/*
1004 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1005 			 * fail, loop back and retry.
1006 			 */
1007 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1008 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1009 				    x | LK_EXCLUSIVE_WAITERS)) {
1010 					sleepq_release(&lk->lock_object);
1011 					continue;
1012 				}
1013 				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1014 				    __func__, lk);
1015 			}
1016 
1017 			/*
1018 			 * As far as we have been unable to acquire the
1019 			 * exclusive lock and the exclusive waiters flag
1020 			 * is set, we will sleep.
1021 			 */
1022 			if (flags & LK_INTERLOCK) {
1023 				class->lc_unlock(ilk);
1024 				flags &= ~LK_INTERLOCK;
1025 			}
1026 			GIANT_SAVE();
1027 			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1028 			    SQ_EXCLUSIVE_QUEUE);
1029 			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1030 			GIANT_RESTORE();
1031 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1032 			    __func__, lk);
1033 		}
1034 
1035 		if (error == 0) {
1036 			lock_profile_obtain_lock_success(&lk->lock_object,
1037 			    contested, waittime, file, line);
1038 			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1039 			    lk->lk_recurse, file, line);
1040 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1041 			    LK_TRYWIT(flags), file, line);
1042 			TD_LOCKS_INC(curthread);
1043 			STACK_SAVE(lk);
1044 		}
1045 		break;
1046 	default:
1047 		if (flags & LK_INTERLOCK)
1048 			class->lc_unlock(ilk);
1049 		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1050 	}
1051 
1052 	if (flags & LK_INTERLOCK)
1053 		class->lc_unlock(ilk);
1054 	if (wakeup_swapper)
1055 		kick_proc0();
1056 
1057 	return (error);
1058 }
1059 
1060 void
1061 _lockmgr_disown(struct lock *lk, const char *file, int line)
1062 {
1063 	uintptr_t tid, x;
1064 
1065 	tid = (uintptr_t)curthread;
1066 	_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
1067 
1068 	/*
1069 	 * If the owner is already LK_KERNPROC just skip the whole operation.
1070 	 */
1071 	if (LK_HOLDER(lk->lk_lock) != tid)
1072 		return;
1073 	lock_profile_release_lock(&lk->lock_object);
1074 	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1075 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1076 	TD_LOCKS_DEC(curthread);
1077 
1078 	/*
1079 	 * In order to preserve waiters flags, just spin.
1080 	 */
1081 	for (;;) {
1082 		x = lk->lk_lock;
1083 		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1084 		x &= LK_ALL_WAITERS;
1085 		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1086 		    LK_KERNPROC | x))
1087 			return;
1088 		cpu_spinwait();
1089 	}
1090 }
1091 
1092 void
1093 lockmgr_printinfo(struct lock *lk)
1094 {
1095 	struct thread *td;
1096 	uintptr_t x;
1097 
1098 	if (lk->lk_lock == LK_UNLOCKED)
1099 		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1100 	else if (lk->lk_lock & LK_SHARE)
1101 		printf("lock type %s: SHARED (count %ju)\n",
1102 		    lk->lock_object.lo_name,
1103 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1104 	else {
1105 		td = lockmgr_xholder(lk);
1106 		printf("lock type %s: EXCL by thread %p (pid %d)\n",
1107 		    lk->lock_object.lo_name, td, td->td_proc->p_pid);
1108 	}
1109 
1110 	x = lk->lk_lock;
1111 	if (x & LK_EXCLUSIVE_WAITERS)
1112 		printf(" with exclusive waiters pending\n");
1113 	if (x & LK_SHARED_WAITERS)
1114 		printf(" with shared waiters pending\n");
1115 	if (x & LK_EXCLUSIVE_SPINNERS)
1116 		printf(" with exclusive spinners pending\n");
1117 
1118 	STACK_PRINT(lk);
1119 }
1120 
1121 int
1122 lockstatus(struct lock *lk)
1123 {
1124 	uintptr_t v, x;
1125 	int ret;
1126 
1127 	ret = LK_SHARED;
1128 	x = lk->lk_lock;
1129 	v = LK_HOLDER(x);
1130 
1131 	if ((x & LK_SHARE) == 0) {
1132 		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1133 			ret = LK_EXCLUSIVE;
1134 		else
1135 			ret = LK_EXCLOTHER;
1136 	} else if (x == LK_UNLOCKED)
1137 		ret = 0;
1138 
1139 	return (ret);
1140 }
1141 
1142 #ifdef INVARIANT_SUPPORT
1143 #ifndef INVARIANTS
1144 #undef	_lockmgr_assert
1145 #endif
1146 
1147 void
1148 _lockmgr_assert(struct lock *lk, int what, const char *file, int line)
1149 {
1150 	int slocked = 0;
1151 
1152 	if (panicstr != NULL)
1153 		return;
1154 	switch (what) {
1155 	case KA_SLOCKED:
1156 	case KA_SLOCKED | KA_NOTRECURSED:
1157 	case KA_SLOCKED | KA_RECURSED:
1158 		slocked = 1;
1159 	case KA_LOCKED:
1160 	case KA_LOCKED | KA_NOTRECURSED:
1161 	case KA_LOCKED | KA_RECURSED:
1162 #ifdef WITNESS
1163 
1164 		/*
1165 		 * We cannot trust WITNESS if the lock is held in exclusive
1166 		 * mode and a call to lockmgr_disown() happened.
1167 		 * Workaround this skipping the check if the lock is held in
1168 		 * exclusive mode even for the KA_LOCKED case.
1169 		 */
1170 		if (slocked || (lk->lk_lock & LK_SHARE)) {
1171 			witness_assert(&lk->lock_object, what, file, line);
1172 			break;
1173 		}
1174 #endif
1175 		if (lk->lk_lock == LK_UNLOCKED ||
1176 		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1177 		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1178 			panic("Lock %s not %slocked @ %s:%d\n",
1179 			    lk->lock_object.lo_name, slocked ? "share" : "",
1180 			    file, line);
1181 
1182 		if ((lk->lk_lock & LK_SHARE) == 0) {
1183 			if (lockmgr_recursed(lk)) {
1184 				if (what & KA_NOTRECURSED)
1185 					panic("Lock %s recursed @ %s:%d\n",
1186 					    lk->lock_object.lo_name, file,
1187 					    line);
1188 			} else if (what & KA_RECURSED)
1189 				panic("Lock %s not recursed @ %s:%d\n",
1190 				    lk->lock_object.lo_name, file, line);
1191 		}
1192 		break;
1193 	case KA_XLOCKED:
1194 	case KA_XLOCKED | KA_NOTRECURSED:
1195 	case KA_XLOCKED | KA_RECURSED:
1196 		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1197 			panic("Lock %s not exclusively locked @ %s:%d\n",
1198 			    lk->lock_object.lo_name, file, line);
1199 		if (lockmgr_recursed(lk)) {
1200 			if (what & KA_NOTRECURSED)
1201 				panic("Lock %s recursed @ %s:%d\n",
1202 				    lk->lock_object.lo_name, file, line);
1203 		} else if (what & KA_RECURSED)
1204 			panic("Lock %s not recursed @ %s:%d\n",
1205 			    lk->lock_object.lo_name, file, line);
1206 		break;
1207 	case KA_UNLOCKED:
1208 		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1209 			panic("Lock %s exclusively locked @ %s:%d\n",
1210 			    lk->lock_object.lo_name, file, line);
1211 		break;
1212 	default:
1213 		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1214 		    line);
1215 	}
1216 }
1217 #endif
1218 
1219 #ifdef DDB
1220 int
1221 lockmgr_chain(struct thread *td, struct thread **ownerp)
1222 {
1223 	struct lock *lk;
1224 
1225 	lk = td->td_wchan;
1226 
1227 	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1228 		return (0);
1229 	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1230 	if (lk->lk_lock & LK_SHARE)
1231 		db_printf("SHARED (count %ju)\n",
1232 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1233 	else
1234 		db_printf("EXCL\n");
1235 	*ownerp = lockmgr_xholder(lk);
1236 
1237 	return (1);
1238 }
1239 
1240 static void
1241 db_show_lockmgr(struct lock_object *lock)
1242 {
1243 	struct thread *td;
1244 	struct lock *lk;
1245 
1246 	lk = (struct lock *)lock;
1247 
1248 	db_printf(" state: ");
1249 	if (lk->lk_lock == LK_UNLOCKED)
1250 		db_printf("UNLOCKED\n");
1251 	else if (lk->lk_lock & LK_SHARE)
1252 		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1253 	else {
1254 		td = lockmgr_xholder(lk);
1255 		if (td == (struct thread *)LK_KERNPROC)
1256 			db_printf("XLOCK: LK_KERNPROC\n");
1257 		else
1258 			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1259 			    td->td_tid, td->td_proc->p_pid,
1260 			    td->td_proc->p_comm);
1261 		if (lockmgr_recursed(lk))
1262 			db_printf(" recursed: %d\n", lk->lk_recurse);
1263 	}
1264 	db_printf(" waiters: ");
1265 	switch (lk->lk_lock & LK_ALL_WAITERS) {
1266 	case LK_SHARED_WAITERS:
1267 		db_printf("shared\n");
1268 		break;
1269 	case LK_EXCLUSIVE_WAITERS:
1270 		db_printf("exclusive\n");
1271 		break;
1272 	case LK_ALL_WAITERS:
1273 		db_printf("shared and exclusive\n");
1274 		break;
1275 	default:
1276 		db_printf("none\n");
1277 	}
1278 	db_printf(" spinners: ");
1279 	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1280 		db_printf("exclusive\n");
1281 	else
1282 		db_printf("none\n");
1283 }
1284 #endif
1285