xref: /freebsd/sys/kern/kern_lock.c (revision 35ae9291c2621d66ac66ed4a4996761946ac3e2d)
1 /*-
2  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  */
28 
29 #include "opt_adaptive_lockmgrs.h"
30 #include "opt_ddb.h"
31 #include "opt_kdtrace.h"
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/ktr.h>
38 #include <sys/linker_set.h>
39 #include <sys/lock.h>
40 #include <sys/lock_profile.h>
41 #include <sys/lockmgr.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/sleepqueue.h>
45 #ifdef DEBUG_LOCKS
46 #include <sys/stack.h>
47 #endif
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 
51 #include <machine/cpu.h>
52 
53 #ifdef DDB
54 #include <ddb/ddb.h>
55 #endif
56 
57 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
58     (LK_ADAPTIVE | LK_NOSHARE));
59 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
60     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
61 
62 #define	SQ_EXCLUSIVE_QUEUE	0
63 #define	SQ_SHARED_QUEUE		1
64 
65 #ifndef INVARIANTS
66 #define	_lockmgr_assert(lk, what, file, line)
67 #define	TD_LOCKS_INC(td)
68 #define	TD_LOCKS_DEC(td)
69 #else
70 #define	TD_LOCKS_INC(td)	((td)->td_locks++)
71 #define	TD_LOCKS_DEC(td)	((td)->td_locks--)
72 #endif
73 #define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
74 #define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
75 
76 #ifndef DEBUG_LOCKS
77 #define	STACK_PRINT(lk)
78 #define	STACK_SAVE(lk)
79 #define	STACK_ZERO(lk)
80 #else
81 #define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
82 #define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
83 #define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
84 #endif
85 
86 #define	LOCK_LOG2(lk, string, arg1, arg2)				\
87 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
88 		CTR2(KTR_LOCK, (string), (arg1), (arg2))
89 #define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
90 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
91 		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
92 
93 #define	GIANT_DECLARE							\
94 	int _i = 0;							\
95 	WITNESS_SAVE_DECL(Giant)
96 #define	GIANT_RESTORE() do {						\
97 	if (_i > 0) {							\
98 		while (_i--)						\
99 			mtx_lock(&Giant);				\
100 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
101 	}								\
102 } while (0)
103 #define	GIANT_SAVE() do {						\
104 	if (mtx_owned(&Giant)) {					\
105 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
106 		while (mtx_owned(&Giant)) {				\
107 			_i++;						\
108 			mtx_unlock(&Giant);				\
109 		}							\
110 	}								\
111 } while (0)
112 
113 #define	LK_CAN_SHARE(x)							\
114 	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
115 	((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||				\
116 	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
117 #define	LK_TRYOP(x)							\
118 	((x) & LK_NOWAIT)
119 
120 #define	LK_CAN_WITNESS(x)						\
121 	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
122 #define	LK_TRYWIT(x)							\
123 	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
124 
125 #define	LK_CAN_ADAPT(lk, f)						\
126 	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
127 	((f) & LK_SLEEPFAIL) == 0)
128 
129 #define	lockmgr_disowned(lk)						\
130 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
131 
132 #define	lockmgr_xlocked(lk)						\
133 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
134 
135 static void	 assert_lockmgr(struct lock_object *lock, int how);
136 #ifdef DDB
137 static void	 db_show_lockmgr(struct lock_object *lock);
138 #endif
139 static void	 lock_lockmgr(struct lock_object *lock, int how);
140 #ifdef KDTRACE_HOOKS
141 static int	 owner_lockmgr(struct lock_object *lock, struct thread **owner);
142 #endif
143 static int	 unlock_lockmgr(struct lock_object *lock);
144 
145 struct lock_class lock_class_lockmgr = {
146 	.lc_name = "lockmgr",
147 	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
148 	.lc_assert = assert_lockmgr,
149 #ifdef DDB
150 	.lc_ddb_show = db_show_lockmgr,
151 #endif
152 	.lc_lock = lock_lockmgr,
153 	.lc_unlock = unlock_lockmgr,
154 #ifdef KDTRACE_HOOKS
155 	.lc_owner = owner_lockmgr,
156 #endif
157 };
158 
159 #ifdef ADAPTIVE_LOCKMGRS
160 static u_int alk_retries = 10;
161 static u_int alk_loops = 10000;
162 SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
163 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
164 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
165 #endif
166 
167 static __inline struct thread *
168 lockmgr_xholder(struct lock *lk)
169 {
170 	uintptr_t x;
171 
172 	x = lk->lk_lock;
173 	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
174 }
175 
176 /*
177  * It assumes sleepq_lock held and returns with this one unheld.
178  * It also assumes the generic interlock is sane and previously checked.
179  * If LK_INTERLOCK is specified the interlock is not reacquired after the
180  * sleep.
181  */
182 static __inline int
183 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
184     const char *wmesg, int pri, int timo, int queue)
185 {
186 	GIANT_DECLARE;
187 	struct lock_class *class;
188 	int catch, error;
189 
190 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
191 	catch = pri & PCATCH;
192 	pri &= PRIMASK;
193 	error = 0;
194 
195 	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
196 	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
197 
198 	if (flags & LK_INTERLOCK)
199 		class->lc_unlock(ilk);
200 	GIANT_SAVE();
201 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
202 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
203 	if ((flags & LK_TIMELOCK) && timo)
204 		sleepq_set_timeout(&lk->lock_object, timo);
205 
206 	/*
207 	 * Decisional switch for real sleeping.
208 	 */
209 	if ((flags & LK_TIMELOCK) && timo && catch)
210 		error = sleepq_timedwait_sig(&lk->lock_object, pri);
211 	else if ((flags & LK_TIMELOCK) && timo)
212 		error = sleepq_timedwait(&lk->lock_object, pri);
213 	else if (catch)
214 		error = sleepq_wait_sig(&lk->lock_object, pri);
215 	else
216 		sleepq_wait(&lk->lock_object, pri);
217 	GIANT_RESTORE();
218 	if ((flags & LK_SLEEPFAIL) && error == 0)
219 		error = ENOLCK;
220 
221 	return (error);
222 }
223 
224 static __inline int
225 wakeupshlk(struct lock *lk, const char *file, int line)
226 {
227 	uintptr_t v, x;
228 	int queue, wakeup_swapper;
229 
230 	TD_LOCKS_DEC(curthread);
231 	TD_SLOCKS_DEC(curthread);
232 	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
233 	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
234 
235 	wakeup_swapper = 0;
236 	for (;;) {
237 		x = lk->lk_lock;
238 
239 		/*
240 		 * If there is more than one shared lock held, just drop one
241 		 * and return.
242 		 */
243 		if (LK_SHARERS(x) > 1) {
244 			if (atomic_cmpset_ptr(&lk->lk_lock, x,
245 			    x - LK_ONE_SHARER))
246 				break;
247 			continue;
248 		}
249 
250 		/*
251 		 * If there are not waiters on the exclusive queue, drop the
252 		 * lock quickly.
253 		 */
254 		if ((x & LK_ALL_WAITERS) == 0) {
255 			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
256 			    LK_SHARERS_LOCK(1));
257 			if (atomic_cmpset_ptr(&lk->lk_lock, x, LK_UNLOCKED))
258 				break;
259 			continue;
260 		}
261 
262 		/*
263 		 * We should have a sharer with waiters, so enter the hard
264 		 * path in order to handle wakeups correctly.
265 		 */
266 		sleepq_lock(&lk->lock_object);
267 		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
268 		v = LK_UNLOCKED;
269 
270 		/*
271 		 * If the lock has exclusive waiters, give them preference in
272 		 * order to avoid deadlock with shared runners up.
273 		 */
274 		if (x & LK_EXCLUSIVE_WAITERS) {
275 			queue = SQ_EXCLUSIVE_QUEUE;
276 			v |= (x & LK_SHARED_WAITERS);
277 		} else {
278 			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
279 			    LK_SHARED_WAITERS);
280 			queue = SQ_SHARED_QUEUE;
281 		}
282 
283 		if (!atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
284 		    v)) {
285 			sleepq_release(&lk->lock_object);
286 			continue;
287 		}
288 		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
289 		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
290 		    "exclusive");
291 		wakeup_swapper = sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
292 		    0, queue);
293 		sleepq_release(&lk->lock_object);
294 		break;
295 	}
296 
297 	lock_profile_release_lock(&lk->lock_object);
298 	return (wakeup_swapper);
299 }
300 
301 static void
302 assert_lockmgr(struct lock_object *lock, int what)
303 {
304 
305 	panic("lockmgr locks do not support assertions");
306 }
307 
308 static void
309 lock_lockmgr(struct lock_object *lock, int how)
310 {
311 
312 	panic("lockmgr locks do not support sleep interlocking");
313 }
314 
315 static int
316 unlock_lockmgr(struct lock_object *lock)
317 {
318 
319 	panic("lockmgr locks do not support sleep interlocking");
320 }
321 
322 #ifdef KDTRACE_HOOKS
323 static int
324 owner_lockmgr(struct lock_object *lock, struct thread **owner)
325 {
326 
327 	panic("lockmgr locks do not support owner inquiring");
328 }
329 #endif
330 
331 void
332 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
333 {
334 	int iflags;
335 
336 	MPASS((flags & ~LK_INIT_MASK) == 0);
337 	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
338             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
339             &lk->lk_lock));
340 
341 	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
342 	if (flags & LK_CANRECURSE)
343 		iflags |= LO_RECURSABLE;
344 	if ((flags & LK_NODUP) == 0)
345 		iflags |= LO_DUPOK;
346 	if (flags & LK_NOPROFILE)
347 		iflags |= LO_NOPROFILE;
348 	if ((flags & LK_NOWITNESS) == 0)
349 		iflags |= LO_WITNESS;
350 	if (flags & LK_QUIET)
351 		iflags |= LO_QUIET;
352 	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
353 
354 	lk->lk_lock = LK_UNLOCKED;
355 	lk->lk_recurse = 0;
356 	lk->lk_timo = timo;
357 	lk->lk_pri = pri;
358 	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
359 	STACK_ZERO(lk);
360 }
361 
362 void
363 lockdestroy(struct lock *lk)
364 {
365 
366 	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
367 	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
368 	lock_destroy(&lk->lock_object);
369 }
370 
371 int
372 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
373     const char *wmesg, int pri, int timo, const char *file, int line)
374 {
375 	GIANT_DECLARE;
376 	struct lock_class *class;
377 	const char *iwmesg;
378 	uintptr_t tid, v, x;
379 	u_int op;
380 	int error, ipri, itimo, queue, wakeup_swapper;
381 #ifdef LOCK_PROFILING
382 	uint64_t waittime = 0;
383 	int contested = 0;
384 #endif
385 #ifdef ADAPTIVE_LOCKMGRS
386 	volatile struct thread *owner;
387 	u_int i, spintries = 0;
388 #endif
389 
390 	error = 0;
391 	tid = (uintptr_t)curthread;
392 	op = (flags & LK_TYPE_MASK);
393 	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
394 	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
395 	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
396 
397 	MPASS((flags & ~LK_TOTAL_MASK) == 0);
398 	KASSERT((op & (op - 1)) == 0,
399 	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
400 	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
401 	    (op != LK_DOWNGRADE && op != LK_RELEASE),
402 	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
403 	    __func__, file, line));
404 	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
405 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
406 	    __func__, file, line));
407 
408 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
409 	if (panicstr != NULL) {
410 		if (flags & LK_INTERLOCK)
411 			class->lc_unlock(ilk);
412 		return (0);
413 	}
414 
415 	if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE))
416 		op = LK_EXCLUSIVE;
417 
418 	wakeup_swapper = 0;
419 	switch (op) {
420 	case LK_SHARED:
421 		if (LK_CAN_WITNESS(flags))
422 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
423 			    file, line, ilk);
424 		for (;;) {
425 			x = lk->lk_lock;
426 
427 			/*
428 			 * If no other thread has an exclusive lock, or
429 			 * no exclusive waiter is present, bump the count of
430 			 * sharers.  Since we have to preserve the state of
431 			 * waiters, if we fail to acquire the shared lock
432 			 * loop back and retry.
433 			 */
434 			if (LK_CAN_SHARE(x)) {
435 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
436 				    x + LK_ONE_SHARER))
437 					break;
438 				continue;
439 			}
440 			lock_profile_obtain_lock_failed(&lk->lock_object,
441 			    &contested, &waittime);
442 
443 			/*
444 			 * If the lock is already held by curthread in
445 			 * exclusive way avoid a deadlock.
446 			 */
447 			if (LK_HOLDER(x) == tid) {
448 				LOCK_LOG2(lk,
449 				    "%s: %p already held in exclusive mode",
450 				    __func__, lk);
451 				error = EDEADLK;
452 				break;
453 			}
454 
455 			/*
456 			 * If the lock is expected to not sleep just give up
457 			 * and return.
458 			 */
459 			if (LK_TRYOP(flags)) {
460 				LOCK_LOG2(lk, "%s: %p fails the try operation",
461 				    __func__, lk);
462 				error = EBUSY;
463 				break;
464 			}
465 
466 #ifdef ADAPTIVE_LOCKMGRS
467 			/*
468 			 * If the owner is running on another CPU, spin until
469 			 * the owner stops running or the state of the lock
470 			 * changes.
471 			 */
472 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
473 			    LK_HOLDER(x) != LK_KERNPROC) {
474 				owner = (struct thread *)LK_HOLDER(x);
475 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
476 					CTR3(KTR_LOCK,
477 					    "%s: spinning on %p held by %p",
478 					    __func__, lk, owner);
479 
480 				/*
481 				 * If we are holding also an interlock drop it
482 				 * in order to avoid a deadlock if the lockmgr
483 				 * owner is adaptively spinning on the
484 				 * interlock itself.
485 				 */
486 				if (flags & LK_INTERLOCK) {
487 					class->lc_unlock(ilk);
488 					flags &= ~LK_INTERLOCK;
489 				}
490 				GIANT_SAVE();
491 				while (LK_HOLDER(lk->lk_lock) ==
492 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
493 					cpu_spinwait();
494 			} else if (LK_CAN_ADAPT(lk, flags) &&
495 			    (x & LK_SHARE) !=0 && LK_SHARERS(x) &&
496 			    spintries < alk_retries) {
497 				if (flags & LK_INTERLOCK) {
498 					class->lc_unlock(ilk);
499 					flags &= ~LK_INTERLOCK;
500 				}
501 				GIANT_SAVE();
502 				spintries++;
503 				for (i = 0; i < alk_loops; i++) {
504 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
505 						CTR4(KTR_LOCK,
506 				    "%s: shared spinning on %p with %u and %u",
507 						    __func__, lk, spintries, i);
508 					x = lk->lk_lock;
509 					if ((x & LK_SHARE) == 0 ||
510 					    LK_CAN_SHARE(x) != 0)
511 						break;
512 					cpu_spinwait();
513 				}
514 				if (i != alk_loops)
515 					continue;
516 			}
517 #endif
518 
519 			/*
520 			 * Acquire the sleepqueue chain lock because we
521 			 * probabilly will need to manipulate waiters flags.
522 			 */
523 			sleepq_lock(&lk->lock_object);
524 			x = lk->lk_lock;
525 
526 			/*
527 			 * if the lock can be acquired in shared mode, try
528 			 * again.
529 			 */
530 			if (LK_CAN_SHARE(x)) {
531 				sleepq_release(&lk->lock_object);
532 				continue;
533 			}
534 
535 #ifdef ADAPTIVE_LOCKMGRS
536 			/*
537 			 * The current lock owner might have started executing
538 			 * on another CPU (or the lock could have changed
539 			 * owner) while we were waiting on the turnstile
540 			 * chain lock.  If so, drop the turnstile lock and try
541 			 * again.
542 			 */
543 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
544 			    LK_HOLDER(x) != LK_KERNPROC) {
545 				owner = (struct thread *)LK_HOLDER(x);
546 				if (TD_IS_RUNNING(owner)) {
547 					sleepq_release(&lk->lock_object);
548 					continue;
549 				}
550 			}
551 #endif
552 
553 			/*
554 			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
555 			 * loop back and retry.
556 			 */
557 			if ((x & LK_SHARED_WAITERS) == 0) {
558 				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
559 				    x | LK_SHARED_WAITERS)) {
560 					sleepq_release(&lk->lock_object);
561 					continue;
562 				}
563 				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
564 				    __func__, lk);
565 			}
566 
567 			/*
568 			 * As far as we have been unable to acquire the
569 			 * shared lock and the shared waiters flag is set,
570 			 * we will sleep.
571 			 */
572 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
573 			    SQ_SHARED_QUEUE);
574 			flags &= ~LK_INTERLOCK;
575 			if (error) {
576 				LOCK_LOG3(lk,
577 				    "%s: interrupted sleep for %p with %d",
578 				    __func__, lk, error);
579 				break;
580 			}
581 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
582 			    __func__, lk);
583 		}
584 		if (error == 0) {
585 			lock_profile_obtain_lock_success(&lk->lock_object,
586 			    contested, waittime, file, line);
587 			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
588 			    line);
589 			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
590 			    line);
591 			TD_LOCKS_INC(curthread);
592 			TD_SLOCKS_INC(curthread);
593 			STACK_SAVE(lk);
594 		}
595 		break;
596 	case LK_UPGRADE:
597 		_lockmgr_assert(lk, KA_SLOCKED, file, line);
598 		v = lk->lk_lock;
599 		x = v & LK_ALL_WAITERS;
600 		v &= LK_EXCLUSIVE_SPINNERS;
601 
602 		/*
603 		 * Try to switch from one shared lock to an exclusive one.
604 		 * We need to preserve waiters flags during the operation.
605 		 */
606 		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
607 		    tid | x)) {
608 			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
609 			    line);
610 			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
611 			    LK_TRYWIT(flags), file, line);
612 			TD_SLOCKS_DEC(curthread);
613 			break;
614 		}
615 
616 		/*
617 		 * We have been unable to succeed in upgrading, so just
618 		 * give up the shared lock.
619 		 */
620 		wakeup_swapper |= wakeupshlk(lk, file, line);
621 
622 		/* FALLTHROUGH */
623 	case LK_EXCLUSIVE:
624 		if (LK_CAN_WITNESS(flags))
625 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
626 			    LOP_EXCLUSIVE, file, line, ilk);
627 
628 		/*
629 		 * If curthread already holds the lock and this one is
630 		 * allowed to recurse, simply recurse on it.
631 		 */
632 		if (lockmgr_xlocked(lk)) {
633 			if ((flags & LK_CANRECURSE) == 0 &&
634 			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
635 
636 				/*
637 				 * If the lock is expected to not panic just
638 				 * give up and return.
639 				 */
640 				if (LK_TRYOP(flags)) {
641 					LOCK_LOG2(lk,
642 					    "%s: %p fails the try operation",
643 					    __func__, lk);
644 					error = EBUSY;
645 					break;
646 				}
647 				if (flags & LK_INTERLOCK)
648 					class->lc_unlock(ilk);
649 		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
650 				    __func__, iwmesg, file, line);
651 			}
652 			lk->lk_recurse++;
653 			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
654 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
655 			    lk->lk_recurse, file, line);
656 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
657 			    LK_TRYWIT(flags), file, line);
658 			TD_LOCKS_INC(curthread);
659 			break;
660 		}
661 
662 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
663 		    tid)) {
664 			lock_profile_obtain_lock_failed(&lk->lock_object,
665 			    &contested, &waittime);
666 
667 			/*
668 			 * If the lock is expected to not sleep just give up
669 			 * and return.
670 			 */
671 			if (LK_TRYOP(flags)) {
672 				LOCK_LOG2(lk, "%s: %p fails the try operation",
673 				    __func__, lk);
674 				error = EBUSY;
675 				break;
676 			}
677 
678 #ifdef ADAPTIVE_LOCKMGRS
679 			/*
680 			 * If the owner is running on another CPU, spin until
681 			 * the owner stops running or the state of the lock
682 			 * changes.
683 			 */
684 			x = lk->lk_lock;
685 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
686 			    LK_HOLDER(x) != LK_KERNPROC) {
687 				owner = (struct thread *)LK_HOLDER(x);
688 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
689 					CTR3(KTR_LOCK,
690 					    "%s: spinning on %p held by %p",
691 					    __func__, lk, owner);
692 
693 				/*
694 				 * If we are holding also an interlock drop it
695 				 * in order to avoid a deadlock if the lockmgr
696 				 * owner is adaptively spinning on the
697 				 * interlock itself.
698 				 */
699 				if (flags & LK_INTERLOCK) {
700 					class->lc_unlock(ilk);
701 					flags &= ~LK_INTERLOCK;
702 				}
703 				GIANT_SAVE();
704 				while (LK_HOLDER(lk->lk_lock) ==
705 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
706 					cpu_spinwait();
707 			} else if (LK_CAN_ADAPT(lk, flags) &&
708 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
709 			    spintries < alk_retries) {
710 				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
711 				    !atomic_cmpset_ptr(&lk->lk_lock, x,
712 				    x | LK_EXCLUSIVE_SPINNERS))
713 					continue;
714 				if (flags & LK_INTERLOCK) {
715 					class->lc_unlock(ilk);
716 					flags &= ~LK_INTERLOCK;
717 				}
718 				GIANT_SAVE();
719 				spintries++;
720 				for (i = 0; i < alk_loops; i++) {
721 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
722 						CTR4(KTR_LOCK,
723 				    "%s: shared spinning on %p with %u and %u",
724 						    __func__, lk, spintries, i);
725 					if ((lk->lk_lock &
726 					    LK_EXCLUSIVE_SPINNERS) == 0)
727 						break;
728 					cpu_spinwait();
729 				}
730 				if (i != alk_loops)
731 					continue;
732 			}
733 #endif
734 
735 			/*
736 			 * Acquire the sleepqueue chain lock because we
737 			 * probabilly will need to manipulate waiters flags.
738 			 */
739 			sleepq_lock(&lk->lock_object);
740 			x = lk->lk_lock;
741 
742 			/*
743 			 * if the lock has been released while we spun on
744 			 * the sleepqueue chain lock just try again.
745 			 */
746 			if (x == LK_UNLOCKED) {
747 				sleepq_release(&lk->lock_object);
748 				continue;
749 			}
750 
751 #ifdef ADAPTIVE_LOCKMGRS
752 			/*
753 			 * The current lock owner might have started executing
754 			 * on another CPU (or the lock could have changed
755 			 * owner) while we were waiting on the turnstile
756 			 * chain lock.  If so, drop the turnstile lock and try
757 			 * again.
758 			 */
759 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
760 			    LK_HOLDER(x) != LK_KERNPROC) {
761 				owner = (struct thread *)LK_HOLDER(x);
762 				if (TD_IS_RUNNING(owner)) {
763 					sleepq_release(&lk->lock_object);
764 					continue;
765 				}
766 			}
767 #endif
768 
769 			/*
770 			 * The lock can be in the state where there is a
771 			 * pending queue of waiters, but still no owner.
772 			 * This happens when the lock is contested and an
773 			 * owner is going to claim the lock.
774 			 * If curthread is the one successfully acquiring it
775 			 * claim lock ownership and return, preserving waiters
776 			 * flags.
777 			 */
778 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
779 			if ((x & ~v) == LK_UNLOCKED) {
780 				v &= ~LK_EXCLUSIVE_SPINNERS;
781 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
782 				    tid | v)) {
783 					sleepq_release(&lk->lock_object);
784 					LOCK_LOG2(lk,
785 					    "%s: %p claimed by a new writer",
786 					    __func__, lk);
787 					break;
788 				}
789 				sleepq_release(&lk->lock_object);
790 				continue;
791 			}
792 
793 			/*
794 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
795 			 * fail, loop back and retry.
796 			 */
797 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
798 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
799 				    x | LK_EXCLUSIVE_WAITERS)) {
800 					sleepq_release(&lk->lock_object);
801 					continue;
802 				}
803 				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
804 				    __func__, lk);
805 			}
806 
807 			/*
808 			 * As far as we have been unable to acquire the
809 			 * exclusive lock and the exclusive waiters flag
810 			 * is set, we will sleep.
811 			 */
812 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
813 			    SQ_EXCLUSIVE_QUEUE);
814 			flags &= ~LK_INTERLOCK;
815 			if (error) {
816 				LOCK_LOG3(lk,
817 				    "%s: interrupted sleep for %p with %d",
818 				    __func__, lk, error);
819 				break;
820 			}
821 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
822 			    __func__, lk);
823 		}
824 		if (error == 0) {
825 			lock_profile_obtain_lock_success(&lk->lock_object,
826 			    contested, waittime, file, line);
827 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
828 			    lk->lk_recurse, file, line);
829 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
830 			    LK_TRYWIT(flags), file, line);
831 			TD_LOCKS_INC(curthread);
832 			STACK_SAVE(lk);
833 		}
834 		break;
835 	case LK_DOWNGRADE:
836 		_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
837 		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
838 		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
839 		TD_SLOCKS_INC(curthread);
840 
841 		/*
842 		 * In order to preserve waiters flags, just spin.
843 		 */
844 		for (;;) {
845 			x = lk->lk_lock;
846 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
847 			x &= LK_ALL_WAITERS;
848 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
849 			    LK_SHARERS_LOCK(1) | x))
850 				break;
851 			cpu_spinwait();
852 		}
853 		break;
854 	case LK_RELEASE:
855 		_lockmgr_assert(lk, KA_LOCKED, file, line);
856 		x = lk->lk_lock;
857 
858 		if ((x & LK_SHARE) == 0) {
859 
860 			/*
861 			 * As first option, treact the lock as if it has not
862 			 * any waiter.
863 			 * Fix-up the tid var if the lock has been disowned.
864 			 */
865 			if (LK_HOLDER(x) == LK_KERNPROC)
866 				tid = LK_KERNPROC;
867 			else {
868 				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
869 				    file, line);
870 				TD_LOCKS_DEC(curthread);
871 			}
872 			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
873 			    lk->lk_recurse, file, line);
874 
875 			/*
876 			 * The lock is held in exclusive mode.
877 			 * If the lock is recursed also, then unrecurse it.
878 			 */
879 			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
880 				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
881 				    lk);
882 				lk->lk_recurse--;
883 				break;
884 			}
885 			if (tid != LK_KERNPROC)
886 				lock_profile_release_lock(&lk->lock_object);
887 
888 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
889 			    LK_UNLOCKED))
890 				break;
891 
892 			sleepq_lock(&lk->lock_object);
893 			x = lk->lk_lock;
894 			v = LK_UNLOCKED;
895 
896 			/*
897 		 	 * If the lock has exclusive waiters, give them
898 			 * preference in order to avoid deadlock with
899 			 * shared runners up.
900 			 */
901 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
902 			if (x & LK_EXCLUSIVE_WAITERS) {
903 				queue = SQ_EXCLUSIVE_QUEUE;
904 				v |= (x & LK_SHARED_WAITERS);
905 			} else {
906 				MPASS((x & LK_ALL_WAITERS) ==
907 				    LK_SHARED_WAITERS);
908 				queue = SQ_SHARED_QUEUE;
909 			}
910 
911 			LOCK_LOG3(lk,
912 			    "%s: %p waking up threads on the %s queue",
913 			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
914 			    "exclusive");
915 			atomic_store_rel_ptr(&lk->lk_lock, v);
916 			wakeup_swapper = sleepq_broadcast(&lk->lock_object,
917 			    SLEEPQ_LK, 0, queue);
918 			sleepq_release(&lk->lock_object);
919 			break;
920 		} else
921 			wakeup_swapper = wakeupshlk(lk, file, line);
922 		break;
923 	case LK_DRAIN:
924 		if (LK_CAN_WITNESS(flags))
925 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
926 			    LOP_EXCLUSIVE, file, line, ilk);
927 
928 		/*
929 		 * Trying to drain a lock we already own will result in a
930 		 * deadlock.
931 		 */
932 		if (lockmgr_xlocked(lk)) {
933 			if (flags & LK_INTERLOCK)
934 				class->lc_unlock(ilk);
935 			panic("%s: draining %s with the lock held @ %s:%d\n",
936 			    __func__, iwmesg, file, line);
937 		}
938 
939 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
940 			lock_profile_obtain_lock_failed(&lk->lock_object,
941 			    &contested, &waittime);
942 
943 			/*
944 			 * If the lock is expected to not sleep just give up
945 			 * and return.
946 			 */
947 			if (LK_TRYOP(flags)) {
948 				LOCK_LOG2(lk, "%s: %p fails the try operation",
949 				    __func__, lk);
950 				error = EBUSY;
951 				break;
952 			}
953 
954 			/*
955 			 * Acquire the sleepqueue chain lock because we
956 			 * probabilly will need to manipulate waiters flags.
957 			 */
958 			sleepq_lock(&lk->lock_object);
959 			x = lk->lk_lock;
960 
961 			/*
962 			 * if the lock has been released while we spun on
963 			 * the sleepqueue chain lock just try again.
964 			 */
965 			if (x == LK_UNLOCKED) {
966 				sleepq_release(&lk->lock_object);
967 				continue;
968 			}
969 
970 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
971 			if ((x & ~v) == LK_UNLOCKED) {
972 				v = (x & ~LK_EXCLUSIVE_SPINNERS);
973 				if (v & LK_EXCLUSIVE_WAITERS) {
974 					queue = SQ_EXCLUSIVE_QUEUE;
975 					v &= ~LK_EXCLUSIVE_WAITERS;
976 				} else {
977 					MPASS(v & LK_SHARED_WAITERS);
978 					queue = SQ_SHARED_QUEUE;
979 					v &= ~LK_SHARED_WAITERS;
980 				}
981 				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
982 					sleepq_release(&lk->lock_object);
983 					continue;
984 				}
985 				LOCK_LOG3(lk,
986 				"%s: %p waking up all threads on the %s queue",
987 				    __func__, lk, queue == SQ_SHARED_QUEUE ?
988 				    "shared" : "exclusive");
989 				wakeup_swapper |= sleepq_broadcast(
990 				    &lk->lock_object, SLEEPQ_LK, 0, queue);
991 
992 				/*
993 				 * If shared waiters have been woken up we need
994 				 * to wait for one of them to acquire the lock
995 				 * before to set the exclusive waiters in
996 				 * order to avoid a deadlock.
997 				 */
998 				if (queue == SQ_SHARED_QUEUE) {
999 					for (v = lk->lk_lock;
1000 					    (v & LK_SHARE) && !LK_SHARERS(v);
1001 					    v = lk->lk_lock)
1002 						cpu_spinwait();
1003 				}
1004 			}
1005 
1006 			/*
1007 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1008 			 * fail, loop back and retry.
1009 			 */
1010 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1011 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1012 				    x | LK_EXCLUSIVE_WAITERS)) {
1013 					sleepq_release(&lk->lock_object);
1014 					continue;
1015 				}
1016 				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1017 				    __func__, lk);
1018 			}
1019 
1020 			/*
1021 			 * As far as we have been unable to acquire the
1022 			 * exclusive lock and the exclusive waiters flag
1023 			 * is set, we will sleep.
1024 			 */
1025 			if (flags & LK_INTERLOCK) {
1026 				class->lc_unlock(ilk);
1027 				flags &= ~LK_INTERLOCK;
1028 			}
1029 			GIANT_SAVE();
1030 			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1031 			    SQ_EXCLUSIVE_QUEUE);
1032 			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1033 			GIANT_RESTORE();
1034 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1035 			    __func__, lk);
1036 		}
1037 
1038 		if (error == 0) {
1039 			lock_profile_obtain_lock_success(&lk->lock_object,
1040 			    contested, waittime, file, line);
1041 			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1042 			    lk->lk_recurse, file, line);
1043 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1044 			    LK_TRYWIT(flags), file, line);
1045 			TD_LOCKS_INC(curthread);
1046 			STACK_SAVE(lk);
1047 		}
1048 		break;
1049 	default:
1050 		if (flags & LK_INTERLOCK)
1051 			class->lc_unlock(ilk);
1052 		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1053 	}
1054 
1055 	if (flags & LK_INTERLOCK)
1056 		class->lc_unlock(ilk);
1057 	if (wakeup_swapper)
1058 		kick_proc0();
1059 
1060 	return (error);
1061 }
1062 
1063 void
1064 _lockmgr_disown(struct lock *lk, const char *file, int line)
1065 {
1066 	uintptr_t tid, x;
1067 
1068 	tid = (uintptr_t)curthread;
1069 	_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
1070 
1071 	/*
1072 	 * If the owner is already LK_KERNPROC just skip the whole operation.
1073 	 */
1074 	if (LK_HOLDER(lk->lk_lock) != tid)
1075 		return;
1076 	lock_profile_release_lock(&lk->lock_object);
1077 	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1078 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1079 	TD_LOCKS_DEC(curthread);
1080 
1081 	/*
1082 	 * In order to preserve waiters flags, just spin.
1083 	 */
1084 	for (;;) {
1085 		x = lk->lk_lock;
1086 		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1087 		x &= LK_ALL_WAITERS;
1088 		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1089 		    LK_KERNPROC | x))
1090 			return;
1091 		cpu_spinwait();
1092 	}
1093 }
1094 
1095 void
1096 lockmgr_printinfo(struct lock *lk)
1097 {
1098 	struct thread *td;
1099 	uintptr_t x;
1100 
1101 	if (lk->lk_lock == LK_UNLOCKED)
1102 		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1103 	else if (lk->lk_lock & LK_SHARE)
1104 		printf("lock type %s: SHARED (count %ju)\n",
1105 		    lk->lock_object.lo_name,
1106 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1107 	else {
1108 		td = lockmgr_xholder(lk);
1109 		printf("lock type %s: EXCL by thread %p (pid %d)\n",
1110 		    lk->lock_object.lo_name, td, td->td_proc->p_pid);
1111 	}
1112 
1113 	x = lk->lk_lock;
1114 	if (x & LK_EXCLUSIVE_WAITERS)
1115 		printf(" with exclusive waiters pending\n");
1116 	if (x & LK_SHARED_WAITERS)
1117 		printf(" with shared waiters pending\n");
1118 	if (x & LK_EXCLUSIVE_SPINNERS)
1119 		printf(" with exclusive spinners pending\n");
1120 
1121 	STACK_PRINT(lk);
1122 }
1123 
1124 int
1125 lockstatus(struct lock *lk)
1126 {
1127 	uintptr_t v, x;
1128 	int ret;
1129 
1130 	ret = LK_SHARED;
1131 	x = lk->lk_lock;
1132 	v = LK_HOLDER(x);
1133 
1134 	if ((x & LK_SHARE) == 0) {
1135 		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1136 			ret = LK_EXCLUSIVE;
1137 		else
1138 			ret = LK_EXCLOTHER;
1139 	} else if (x == LK_UNLOCKED)
1140 		ret = 0;
1141 
1142 	return (ret);
1143 }
1144 
1145 #ifdef INVARIANT_SUPPORT
1146 #ifndef INVARIANTS
1147 #undef	_lockmgr_assert
1148 #endif
1149 
1150 void
1151 _lockmgr_assert(struct lock *lk, int what, const char *file, int line)
1152 {
1153 	int slocked = 0;
1154 
1155 	if (panicstr != NULL)
1156 		return;
1157 	switch (what) {
1158 	case KA_SLOCKED:
1159 	case KA_SLOCKED | KA_NOTRECURSED:
1160 	case KA_SLOCKED | KA_RECURSED:
1161 		slocked = 1;
1162 	case KA_LOCKED:
1163 	case KA_LOCKED | KA_NOTRECURSED:
1164 	case KA_LOCKED | KA_RECURSED:
1165 #ifdef WITNESS
1166 
1167 		/*
1168 		 * We cannot trust WITNESS if the lock is held in exclusive
1169 		 * mode and a call to lockmgr_disown() happened.
1170 		 * Workaround this skipping the check if the lock is held in
1171 		 * exclusive mode even for the KA_LOCKED case.
1172 		 */
1173 		if (slocked || (lk->lk_lock & LK_SHARE)) {
1174 			witness_assert(&lk->lock_object, what, file, line);
1175 			break;
1176 		}
1177 #endif
1178 		if (lk->lk_lock == LK_UNLOCKED ||
1179 		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1180 		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1181 			panic("Lock %s not %slocked @ %s:%d\n",
1182 			    lk->lock_object.lo_name, slocked ? "share" : "",
1183 			    file, line);
1184 
1185 		if ((lk->lk_lock & LK_SHARE) == 0) {
1186 			if (lockmgr_recursed(lk)) {
1187 				if (what & KA_NOTRECURSED)
1188 					panic("Lock %s recursed @ %s:%d\n",
1189 					    lk->lock_object.lo_name, file,
1190 					    line);
1191 			} else if (what & KA_RECURSED)
1192 				panic("Lock %s not recursed @ %s:%d\n",
1193 				    lk->lock_object.lo_name, file, line);
1194 		}
1195 		break;
1196 	case KA_XLOCKED:
1197 	case KA_XLOCKED | KA_NOTRECURSED:
1198 	case KA_XLOCKED | KA_RECURSED:
1199 		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1200 			panic("Lock %s not exclusively locked @ %s:%d\n",
1201 			    lk->lock_object.lo_name, file, line);
1202 		if (lockmgr_recursed(lk)) {
1203 			if (what & KA_NOTRECURSED)
1204 				panic("Lock %s recursed @ %s:%d\n",
1205 				    lk->lock_object.lo_name, file, line);
1206 		} else if (what & KA_RECURSED)
1207 			panic("Lock %s not recursed @ %s:%d\n",
1208 			    lk->lock_object.lo_name, file, line);
1209 		break;
1210 	case KA_UNLOCKED:
1211 		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1212 			panic("Lock %s exclusively locked @ %s:%d\n",
1213 			    lk->lock_object.lo_name, file, line);
1214 		break;
1215 	default:
1216 		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1217 		    line);
1218 	}
1219 }
1220 #endif
1221 
1222 #ifdef DDB
1223 int
1224 lockmgr_chain(struct thread *td, struct thread **ownerp)
1225 {
1226 	struct lock *lk;
1227 
1228 	lk = td->td_wchan;
1229 
1230 	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1231 		return (0);
1232 	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1233 	if (lk->lk_lock & LK_SHARE)
1234 		db_printf("SHARED (count %ju)\n",
1235 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1236 	else
1237 		db_printf("EXCL\n");
1238 	*ownerp = lockmgr_xholder(lk);
1239 
1240 	return (1);
1241 }
1242 
1243 static void
1244 db_show_lockmgr(struct lock_object *lock)
1245 {
1246 	struct thread *td;
1247 	struct lock *lk;
1248 
1249 	lk = (struct lock *)lock;
1250 
1251 	db_printf(" state: ");
1252 	if (lk->lk_lock == LK_UNLOCKED)
1253 		db_printf("UNLOCKED\n");
1254 	else if (lk->lk_lock & LK_SHARE)
1255 		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1256 	else {
1257 		td = lockmgr_xholder(lk);
1258 		if (td == (struct thread *)LK_KERNPROC)
1259 			db_printf("XLOCK: LK_KERNPROC\n");
1260 		else
1261 			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1262 			    td->td_tid, td->td_proc->p_pid,
1263 			    td->td_proc->p_comm);
1264 		if (lockmgr_recursed(lk))
1265 			db_printf(" recursed: %d\n", lk->lk_recurse);
1266 	}
1267 	db_printf(" waiters: ");
1268 	switch (lk->lk_lock & LK_ALL_WAITERS) {
1269 	case LK_SHARED_WAITERS:
1270 		db_printf("shared\n");
1271 		break;
1272 	case LK_EXCLUSIVE_WAITERS:
1273 		db_printf("exclusive\n");
1274 		break;
1275 	case LK_ALL_WAITERS:
1276 		db_printf("shared and exclusive\n");
1277 		break;
1278 	default:
1279 		db_printf("none\n");
1280 	}
1281 	db_printf(" spinners: ");
1282 	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1283 		db_printf("exclusive\n");
1284 	else
1285 		db_printf("none\n");
1286 }
1287 #endif
1288