xref: /freebsd/sys/kern/kern_lock.c (revision 9cbf1de7e34a6fced041388fad5d9180cb7705fe)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice(s), this list of conditions and the following disclaimer as
12  *    the first lines of this file unmodified other than the possible
13  *    addition of one or more copyright notices.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice(s), this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28  * DAMAGE.
29  */
30 
31 #include "opt_ddb.h"
32 #include "opt_hwpmc_hooks.h"
33 
34 #include <sys/param.h>
35 #include <sys/kdb.h>
36 #include <sys/ktr.h>
37 #include <sys/limits.h>
38 #include <sys/lock.h>
39 #include <sys/lock_profile.h>
40 #include <sys/lockmgr.h>
41 #include <sys/lockstat.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/sleepqueue.h>
45 #ifdef DEBUG_LOCKS
46 #include <sys/stack.h>
47 #endif
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 
51 #include <machine/cpu.h>
52 
53 #ifdef DDB
54 #include <ddb/ddb.h>
55 #endif
56 
57 #ifdef HWPMC_HOOKS
58 #include <sys/pmckern.h>
59 PMC_SOFT_DECLARE( , , lock, failed);
60 #endif
61 
62 /*
63  * Hack. There should be prio_t or similar so that this is not necessary.
64  */
65 _Static_assert((PRILASTFLAG * 2) - 1 <= USHRT_MAX,
66     "prio flags wont fit in u_short pri in struct lock");
67 
68 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
69     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
70 
71 #define	SQ_EXCLUSIVE_QUEUE	0
72 #define	SQ_SHARED_QUEUE		1
73 
74 #ifndef INVARIANTS
75 #define	_lockmgr_assert(lk, what, file, line)
76 #endif
77 
78 #define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
79 #define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
80 
81 #ifndef DEBUG_LOCKS
82 #define	STACK_PRINT(lk)
83 #define	STACK_SAVE(lk)
84 #define	STACK_ZERO(lk)
85 #else
86 #define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
87 #define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
88 #define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
89 #endif
90 
91 #define	LOCK_LOG2(lk, string, arg1, arg2)				\
92 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
93 		CTR2(KTR_LOCK, (string), (arg1), (arg2))
94 #define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
95 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
96 		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
97 
98 #define	GIANT_DECLARE							\
99 	int _i = 0;							\
100 	WITNESS_SAVE_DECL(Giant)
101 #define	GIANT_RESTORE() do {						\
102 	if (__predict_false(_i > 0)) {					\
103 		while (_i--)						\
104 			mtx_lock(&Giant);				\
105 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
106 	}								\
107 } while (0)
108 #define	GIANT_SAVE() do {						\
109 	if (__predict_false(mtx_owned(&Giant))) {			\
110 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
111 		while (mtx_owned(&Giant)) {				\
112 			_i++;						\
113 			mtx_unlock(&Giant);				\
114 		}							\
115 	}								\
116 } while (0)
117 
118 static __always_inline bool
119 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
120 {
121 
122 	if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
123 	    LK_SHARE)
124 		return (true);
125 	if (fp || (!(x & LK_SHARE)))
126 		return (false);
127 	if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
128 	    (curthread->td_pflags & TDP_DEADLKTREAT))
129 		return (true);
130 	return (false);
131 }
132 
133 #define	LK_TRYOP(x)							\
134 	((x) & LK_NOWAIT)
135 
136 #define	LK_CAN_WITNESS(x)						\
137 	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
138 #define	LK_TRYWIT(x)							\
139 	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
140 
141 #define	lockmgr_xlocked_v(v)						\
142 	(((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
143 
144 #define	lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk))
145 
146 static void	assert_lockmgr(const struct lock_object *lock, int how);
147 #ifdef DDB
148 static void	db_show_lockmgr(const struct lock_object *lock);
149 #endif
150 static void	lock_lockmgr(struct lock_object *lock, uintptr_t how);
151 #ifdef KDTRACE_HOOKS
152 static int	owner_lockmgr(const struct lock_object *lock,
153 		    struct thread **owner);
154 #endif
155 static uintptr_t unlock_lockmgr(struct lock_object *lock);
156 
157 struct lock_class lock_class_lockmgr = {
158 	.lc_name = "lockmgr",
159 	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
160 	.lc_assert = assert_lockmgr,
161 #ifdef DDB
162 	.lc_ddb_show = db_show_lockmgr,
163 #endif
164 	.lc_lock = lock_lockmgr,
165 	.lc_unlock = unlock_lockmgr,
166 #ifdef KDTRACE_HOOKS
167 	.lc_owner = owner_lockmgr,
168 #endif
169 };
170 
171 static __read_mostly bool lk_adaptive = true;
172 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
173 SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive,
174     0, "");
175 #define lockmgr_delay  locks_delay
176 
177 struct lockmgr_wait {
178 	const char *iwmesg;
179 	int ipri;
180 	int itimo;
181 };
182 
183 static __always_inline bool lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
184     int flags, bool fp);
185 static __always_inline bool lockmgr_sunlock_try(struct lock *lk,
186     uintptr_t *xp);
187 
188 static void
189 lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
190 {
191 	struct lock_class *class;
192 
193 	if (flags & LK_INTERLOCK) {
194 		class = LOCK_CLASS(ilk);
195 		class->lc_unlock(ilk);
196 	}
197 
198 	if (__predict_false(wakeup_swapper))
199 		kick_proc0();
200 }
201 
202 static void
203 lockmgr_note_shared_acquire(struct lock *lk, int contested,
204     uint64_t waittime, const char *file, int line, int flags)
205 {
206 
207 	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
208 	    waittime, file, line, LOCKSTAT_READER);
209 	LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
210 	WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
211 	TD_LOCKS_INC(curthread);
212 	TD_SLOCKS_INC(curthread);
213 	STACK_SAVE(lk);
214 }
215 
216 static void
217 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
218 {
219 
220 	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
221 	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
222 	TD_LOCKS_DEC(curthread);
223 	TD_SLOCKS_DEC(curthread);
224 }
225 
226 static void
227 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
228     uint64_t waittime, const char *file, int line, int flags)
229 {
230 
231 	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
232 	    waittime, file, line, LOCKSTAT_WRITER);
233 	LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
234 	WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
235 	    line);
236 	TD_LOCKS_INC(curthread);
237 	STACK_SAVE(lk);
238 }
239 
240 static void
241 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
242 {
243 
244 	if (!lockmgr_disowned(lk)) {
245 		WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
246 		TD_LOCKS_DEC(curthread);
247 	}
248 	LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
249 	    line);
250 }
251 
252 static __inline struct thread *
253 lockmgr_xholder(const struct lock *lk)
254 {
255 	uintptr_t x;
256 
257 	x = lockmgr_read_value(lk);
258 	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
259 }
260 
261 /*
262  * It assumes sleepq_lock held and returns with this one unheld.
263  * It also assumes the generic interlock is sane and previously checked.
264  * If LK_INTERLOCK is specified the interlock is not reacquired after the
265  * sleep.
266  */
267 static __inline int
268 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
269     const char *wmesg, int pri, int timo, int queue)
270 {
271 	GIANT_DECLARE;
272 	struct lock_class *class;
273 	int catch, error;
274 
275 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
276 	catch = pri & PCATCH;
277 	pri &= PRIMASK;
278 	error = 0;
279 
280 	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
281 	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
282 
283 	if (flags & LK_INTERLOCK)
284 		class->lc_unlock(ilk);
285 	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) {
286 		if (lk->lk_exslpfail < USHRT_MAX)
287 			lk->lk_exslpfail++;
288 	}
289 	GIANT_SAVE();
290 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
291 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
292 	if ((flags & LK_TIMELOCK) && timo)
293 		sleepq_set_timeout(&lk->lock_object, timo);
294 
295 	/*
296 	 * Decisional switch for real sleeping.
297 	 */
298 	if ((flags & LK_TIMELOCK) && timo && catch)
299 		error = sleepq_timedwait_sig(&lk->lock_object, pri);
300 	else if ((flags & LK_TIMELOCK) && timo)
301 		error = sleepq_timedwait(&lk->lock_object, pri);
302 	else if (catch)
303 		error = sleepq_wait_sig(&lk->lock_object, pri);
304 	else
305 		sleepq_wait(&lk->lock_object, pri);
306 	GIANT_RESTORE();
307 	if ((flags & LK_SLEEPFAIL) && error == 0)
308 		error = ENOLCK;
309 
310 	return (error);
311 }
312 
313 static __inline int
314 wakeupshlk(struct lock *lk, const char *file, int line)
315 {
316 	uintptr_t v, x, orig_x;
317 	u_int realexslp;
318 	int queue, wakeup_swapper;
319 
320 	wakeup_swapper = 0;
321 	for (;;) {
322 		x = lockmgr_read_value(lk);
323 		if (lockmgr_sunlock_try(lk, &x))
324 			break;
325 
326 		/*
327 		 * We should have a sharer with waiters, so enter the hard
328 		 * path in order to handle wakeups correctly.
329 		 */
330 		sleepq_lock(&lk->lock_object);
331 		orig_x = lockmgr_read_value(lk);
332 retry_sleepq:
333 		x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
334 		v = LK_UNLOCKED;
335 
336 		/*
337 		 * If the lock has exclusive waiters, give them preference in
338 		 * order to avoid deadlock with shared runners up.
339 		 * If interruptible sleeps left the exclusive queue empty
340 		 * avoid a starvation for the threads sleeping on the shared
341 		 * queue by giving them precedence and cleaning up the
342 		 * exclusive waiters bit anyway.
343 		 * Please note that lk_exslpfail count may be lying about
344 		 * the real number of waiters with the LK_SLEEPFAIL flag on
345 		 * because they may be used in conjunction with interruptible
346 		 * sleeps so lk_exslpfail might be considered an 'upper limit'
347 		 * bound, including the edge cases.
348 		 */
349 		realexslp = sleepq_sleepcnt(&lk->lock_object,
350 		    SQ_EXCLUSIVE_QUEUE);
351 		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
352 			if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
353 				lk->lk_exslpfail = 0;
354 				queue = SQ_EXCLUSIVE_QUEUE;
355 				v |= (x & LK_SHARED_WAITERS);
356 			} else {
357 				lk->lk_exslpfail = 0;
358 				LOCK_LOG2(lk,
359 				    "%s: %p has only LK_SLEEPFAIL sleepers",
360 				    __func__, lk);
361 				LOCK_LOG2(lk,
362 			    "%s: %p waking up threads on the exclusive queue",
363 				    __func__, lk);
364 				wakeup_swapper =
365 				    sleepq_broadcast(&lk->lock_object,
366 				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
367 				queue = SQ_SHARED_QUEUE;
368 			}
369 		} else {
370 			/*
371 			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
372 			 * and using interruptible sleeps/timeout may have
373 			 * left spourious lk_exslpfail counts on, so clean
374 			 * it up anyway.
375 			 */
376 			lk->lk_exslpfail = 0;
377 			queue = SQ_SHARED_QUEUE;
378 		}
379 
380 		if (lockmgr_sunlock_try(lk, &orig_x)) {
381 			sleepq_release(&lk->lock_object);
382 			break;
383 		}
384 
385 		x |= LK_SHARERS_LOCK(1);
386 		if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
387 			orig_x = x;
388 			goto retry_sleepq;
389 		}
390 		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
391 		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
392 		    "exclusive");
393 		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
394 		    0, queue);
395 		sleepq_release(&lk->lock_object);
396 		break;
397 	}
398 
399 	LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
400 	return (wakeup_swapper);
401 }
402 
403 static void
404 assert_lockmgr(const struct lock_object *lock, int what)
405 {
406 
407 	panic("lockmgr locks do not support assertions");
408 }
409 
410 static void
411 lock_lockmgr(struct lock_object *lock, uintptr_t how)
412 {
413 
414 	panic("lockmgr locks do not support sleep interlocking");
415 }
416 
417 static uintptr_t
418 unlock_lockmgr(struct lock_object *lock)
419 {
420 
421 	panic("lockmgr locks do not support sleep interlocking");
422 }
423 
424 #ifdef KDTRACE_HOOKS
425 static int
426 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
427 {
428 
429 	panic("lockmgr locks do not support owner inquiring");
430 }
431 #endif
432 
433 void
434 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
435 {
436 	int iflags;
437 
438 	MPASS((flags & ~LK_INIT_MASK) == 0);
439 	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
440             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
441             &lk->lk_lock));
442 
443 	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
444 	if (flags & LK_CANRECURSE)
445 		iflags |= LO_RECURSABLE;
446 	if ((flags & LK_NODUP) == 0)
447 		iflags |= LO_DUPOK;
448 	if (flags & LK_NOPROFILE)
449 		iflags |= LO_NOPROFILE;
450 	if ((flags & LK_NOWITNESS) == 0)
451 		iflags |= LO_WITNESS;
452 	if (flags & LK_QUIET)
453 		iflags |= LO_QUIET;
454 	if (flags & LK_IS_VNODE)
455 		iflags |= LO_IS_VNODE;
456 	if (flags & LK_NEW)
457 		iflags |= LO_NEW;
458 	iflags |= flags & LK_NOSHARE;
459 
460 	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
461 	lk->lk_lock = LK_UNLOCKED;
462 	lk->lk_recurse = 0;
463 	lk->lk_exslpfail = 0;
464 	lk->lk_timo = timo;
465 	lk->lk_pri = pri;
466 	STACK_ZERO(lk);
467 }
468 
469 /*
470  * XXX: Gross hacks to manipulate external lock flags after
471  * initialization.  Used for certain vnode and buf locks.
472  */
473 void
474 lockallowshare(struct lock *lk)
475 {
476 
477 	lockmgr_assert(lk, KA_XLOCKED);
478 	lk->lock_object.lo_flags &= ~LK_NOSHARE;
479 }
480 
481 void
482 lockdisableshare(struct lock *lk)
483 {
484 
485 	lockmgr_assert(lk, KA_XLOCKED);
486 	lk->lock_object.lo_flags |= LK_NOSHARE;
487 }
488 
489 void
490 lockallowrecurse(struct lock *lk)
491 {
492 
493 	lockmgr_assert(lk, KA_XLOCKED);
494 	lk->lock_object.lo_flags |= LO_RECURSABLE;
495 }
496 
497 void
498 lockdisablerecurse(struct lock *lk)
499 {
500 
501 	lockmgr_assert(lk, KA_XLOCKED);
502 	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
503 }
504 
505 void
506 lockdestroy(struct lock *lk)
507 {
508 
509 	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
510 	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
511 	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
512 	lock_destroy(&lk->lock_object);
513 }
514 
515 static __always_inline bool
516 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
517 {
518 
519 	/*
520 	 * If no other thread has an exclusive lock, or
521 	 * no exclusive waiter is present, bump the count of
522 	 * sharers.  Since we have to preserve the state of
523 	 * waiters, if we fail to acquire the shared lock
524 	 * loop back and retry.
525 	 */
526 	while (LK_CAN_SHARE(*xp, flags, fp)) {
527 		if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
528 		    *xp + LK_ONE_SHARER)) {
529 			return (true);
530 		}
531 	}
532 	return (false);
533 }
534 
535 static __always_inline bool
536 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
537 {
538 
539 	for (;;) {
540 		if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
541 			if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
542 			    *xp - LK_ONE_SHARER))
543 				return (true);
544 			continue;
545 		}
546 		break;
547 	}
548 	return (false);
549 }
550 
551 static bool
552 lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp,
553     int flags)
554 {
555 	struct thread *owner;
556 	uintptr_t x;
557 
558 	x = *xp;
559 	MPASS(x != LK_UNLOCKED);
560 	owner = (struct thread *)LK_HOLDER(x);
561 	for (;;) {
562 		MPASS(owner != curthread);
563 		if (owner == (struct thread *)LK_KERNPROC)
564 			return (false);
565 		if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
566 			return (false);
567 		if (owner == NULL)
568 			return (false);
569 		if (!TD_IS_RUNNING(owner))
570 			return (false);
571 		if ((x & LK_ALL_WAITERS) != 0)
572 			return (false);
573 		lock_delay(lda);
574 		x = lockmgr_read_value(lk);
575 		if (LK_CAN_SHARE(x, flags, false)) {
576 			*xp = x;
577 			return (true);
578 		}
579 		owner = (struct thread *)LK_HOLDER(x);
580 	}
581 }
582 
583 static __noinline int
584 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
585     const char *file, int line, struct lockmgr_wait *lwa)
586 {
587 	uintptr_t tid, x;
588 	int error = 0;
589 	const char *iwmesg;
590 	int ipri, itimo;
591 
592 #ifdef KDTRACE_HOOKS
593 	uint64_t sleep_time = 0;
594 #endif
595 #ifdef LOCK_PROFILING
596 	uint64_t waittime = 0;
597 	int contested = 0;
598 #endif
599 	struct lock_delay_arg lda;
600 
601 	if (SCHEDULER_STOPPED())
602 		goto out;
603 
604 	tid = (uintptr_t)curthread;
605 
606 	if (LK_CAN_WITNESS(flags))
607 		WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
608 		    file, line, flags & LK_INTERLOCK ? ilk : NULL);
609 	x = lockmgr_read_value(lk);
610 	lock_delay_arg_init(&lda, &lockmgr_delay);
611 	if (!lk_adaptive)
612 		flags &= ~LK_ADAPTIVE;
613 	/*
614 	 * The lock may already be locked exclusive by curthread,
615 	 * avoid deadlock.
616 	 */
617 	if (LK_HOLDER(x) == tid) {
618 		LOCK_LOG2(lk,
619 		    "%s: %p already held in exclusive mode",
620 		    __func__, lk);
621 		error = EDEADLK;
622 		goto out;
623 	}
624 
625 	for (;;) {
626 		if (lockmgr_slock_try(lk, &x, flags, false))
627 			break;
628 
629 		lock_profile_obtain_lock_failed(&lk->lock_object, false,
630 		    &contested, &waittime);
631 
632 		if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
633 			if (lockmgr_slock_adaptive(&lda, lk, &x, flags))
634 				continue;
635 		}
636 
637 #ifdef HWPMC_HOOKS
638 		PMC_SOFT_CALL( , , lock, failed);
639 #endif
640 
641 		/*
642 		 * If the lock is expected to not sleep just give up
643 		 * and return.
644 		 */
645 		if (LK_TRYOP(flags)) {
646 			LOCK_LOG2(lk, "%s: %p fails the try operation",
647 			    __func__, lk);
648 			error = EBUSY;
649 			break;
650 		}
651 
652 		/*
653 		 * Acquire the sleepqueue chain lock because we
654 		 * probabilly will need to manipulate waiters flags.
655 		 */
656 		sleepq_lock(&lk->lock_object);
657 		x = lockmgr_read_value(lk);
658 retry_sleepq:
659 
660 		/*
661 		 * if the lock can be acquired in shared mode, try
662 		 * again.
663 		 */
664 		if (LK_CAN_SHARE(x, flags, false)) {
665 			sleepq_release(&lk->lock_object);
666 			continue;
667 		}
668 
669 		/*
670 		 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
671 		 * loop back and retry.
672 		 */
673 		if ((x & LK_SHARED_WAITERS) == 0) {
674 			if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
675 			    x | LK_SHARED_WAITERS)) {
676 				goto retry_sleepq;
677 			}
678 			LOCK_LOG2(lk, "%s: %p set shared waiters flag",
679 			    __func__, lk);
680 		}
681 
682 		if (lwa == NULL) {
683 			iwmesg = lk->lock_object.lo_name;
684 			ipri = lk->lk_pri;
685 			itimo = lk->lk_timo;
686 		} else {
687 			iwmesg = lwa->iwmesg;
688 			ipri = lwa->ipri;
689 			itimo = lwa->itimo;
690 		}
691 
692 		/*
693 		 * As far as we have been unable to acquire the
694 		 * shared lock and the shared waiters flag is set,
695 		 * we will sleep.
696 		 */
697 #ifdef KDTRACE_HOOKS
698 		sleep_time -= lockstat_nsecs(&lk->lock_object);
699 #endif
700 		error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
701 		    SQ_SHARED_QUEUE);
702 #ifdef KDTRACE_HOOKS
703 		sleep_time += lockstat_nsecs(&lk->lock_object);
704 #endif
705 		flags &= ~LK_INTERLOCK;
706 		if (error) {
707 			LOCK_LOG3(lk,
708 			    "%s: interrupted sleep for %p with %d",
709 			    __func__, lk, error);
710 			break;
711 		}
712 		LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
713 		    __func__, lk);
714 		x = lockmgr_read_value(lk);
715 	}
716 	if (error == 0) {
717 #ifdef KDTRACE_HOOKS
718 		if (sleep_time != 0)
719 			LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
720 			    LOCKSTAT_READER, (x & LK_SHARE) == 0,
721 			    (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
722 #endif
723 #ifdef LOCK_PROFILING
724 		lockmgr_note_shared_acquire(lk, contested, waittime,
725 		    file, line, flags);
726 #else
727 		lockmgr_note_shared_acquire(lk, 0, 0, file, line,
728 		    flags);
729 #endif
730 	}
731 
732 out:
733 	lockmgr_exit(flags, ilk, 0);
734 	return (error);
735 }
736 
737 static bool
738 lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp)
739 {
740 	struct thread *owner;
741 	uintptr_t x;
742 
743 	x = *xp;
744 	MPASS(x != LK_UNLOCKED);
745 	owner = (struct thread *)LK_HOLDER(x);
746 	for (;;) {
747 		MPASS(owner != curthread);
748 		if (owner == NULL)
749 			return (false);
750 		if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
751 			return (false);
752 		if (owner == (struct thread *)LK_KERNPROC)
753 			return (false);
754 		if (!TD_IS_RUNNING(owner))
755 			return (false);
756 		if ((x & LK_ALL_WAITERS) != 0)
757 			return (false);
758 		lock_delay(lda);
759 		x = lockmgr_read_value(lk);
760 		if (x == LK_UNLOCKED) {
761 			*xp = x;
762 			return (true);
763 		}
764 		owner = (struct thread *)LK_HOLDER(x);
765 	}
766 }
767 
768 static __noinline int
769 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
770     const char *file, int line, struct lockmgr_wait *lwa)
771 {
772 	struct lock_class *class;
773 	uintptr_t tid, x, v;
774 	int error = 0;
775 	const char *iwmesg;
776 	int ipri, itimo;
777 
778 #ifdef KDTRACE_HOOKS
779 	uint64_t sleep_time = 0;
780 #endif
781 #ifdef LOCK_PROFILING
782 	uint64_t waittime = 0;
783 	int contested = 0;
784 #endif
785 	struct lock_delay_arg lda;
786 
787 	if (SCHEDULER_STOPPED())
788 		goto out;
789 
790 	tid = (uintptr_t)curthread;
791 
792 	if (LK_CAN_WITNESS(flags))
793 		WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
794 		    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
795 		    ilk : NULL);
796 
797 	/*
798 	 * If curthread already holds the lock and this one is
799 	 * allowed to recurse, simply recurse on it.
800 	 */
801 	if (lockmgr_xlocked(lk)) {
802 		if ((flags & LK_CANRECURSE) == 0 &&
803 		    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
804 			/*
805 			 * If the lock is expected to not panic just
806 			 * give up and return.
807 			 */
808 			if (LK_TRYOP(flags)) {
809 				LOCK_LOG2(lk,
810 				    "%s: %p fails the try operation",
811 				    __func__, lk);
812 				error = EBUSY;
813 				goto out;
814 			}
815 			if (flags & LK_INTERLOCK) {
816 				class = LOCK_CLASS(ilk);
817 				class->lc_unlock(ilk);
818 			}
819 			STACK_PRINT(lk);
820 			panic("%s: recursing on non recursive lockmgr %p "
821 			    "@ %s:%d\n", __func__, lk, file, line);
822 		}
823 		atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
824 		lk->lk_recurse++;
825 		LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
826 		LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
827 		    lk->lk_recurse, file, line);
828 		WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
829 		    LK_TRYWIT(flags), file, line);
830 		TD_LOCKS_INC(curthread);
831 		goto out;
832 	}
833 
834 	x = LK_UNLOCKED;
835 	lock_delay_arg_init(&lda, &lockmgr_delay);
836 	if (!lk_adaptive)
837 		flags &= ~LK_ADAPTIVE;
838 	for (;;) {
839 		if (x == LK_UNLOCKED) {
840 			if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid))
841 				break;
842 			continue;
843 		}
844 
845 		lock_profile_obtain_lock_failed(&lk->lock_object, false,
846 		    &contested, &waittime);
847 
848 		if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
849 			if (lockmgr_xlock_adaptive(&lda, lk, &x))
850 				continue;
851 		}
852 #ifdef HWPMC_HOOKS
853 		PMC_SOFT_CALL( , , lock, failed);
854 #endif
855 
856 		/*
857 		 * If the lock is expected to not sleep just give up
858 		 * and return.
859 		 */
860 		if (LK_TRYOP(flags)) {
861 			LOCK_LOG2(lk, "%s: %p fails the try operation",
862 			    __func__, lk);
863 			error = EBUSY;
864 			break;
865 		}
866 
867 		/*
868 		 * Acquire the sleepqueue chain lock because we
869 		 * probabilly will need to manipulate waiters flags.
870 		 */
871 		sleepq_lock(&lk->lock_object);
872 		x = lockmgr_read_value(lk);
873 retry_sleepq:
874 
875 		/*
876 		 * if the lock has been released while we spun on
877 		 * the sleepqueue chain lock just try again.
878 		 */
879 		if (x == LK_UNLOCKED) {
880 			sleepq_release(&lk->lock_object);
881 			continue;
882 		}
883 
884 		/*
885 		 * The lock can be in the state where there is a
886 		 * pending queue of waiters, but still no owner.
887 		 * This happens when the lock is contested and an
888 		 * owner is going to claim the lock.
889 		 * If curthread is the one successfully acquiring it
890 		 * claim lock ownership and return, preserving waiters
891 		 * flags.
892 		 */
893 		v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
894 		if ((x & ~v) == LK_UNLOCKED) {
895 			v &= ~LK_EXCLUSIVE_SPINNERS;
896 			if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
897 			    tid | v)) {
898 				sleepq_release(&lk->lock_object);
899 				LOCK_LOG2(lk,
900 				    "%s: %p claimed by a new writer",
901 				    __func__, lk);
902 				break;
903 			}
904 			goto retry_sleepq;
905 		}
906 
907 		/*
908 		 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
909 		 * fail, loop back and retry.
910 		 */
911 		if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
912 			if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
913 			    x | LK_EXCLUSIVE_WAITERS)) {
914 				goto retry_sleepq;
915 			}
916 			LOCK_LOG2(lk, "%s: %p set excl waiters flag",
917 			    __func__, lk);
918 		}
919 
920 		if (lwa == NULL) {
921 			iwmesg = lk->lock_object.lo_name;
922 			ipri = lk->lk_pri;
923 			itimo = lk->lk_timo;
924 		} else {
925 			iwmesg = lwa->iwmesg;
926 			ipri = lwa->ipri;
927 			itimo = lwa->itimo;
928 		}
929 
930 		/*
931 		 * As far as we have been unable to acquire the
932 		 * exclusive lock and the exclusive waiters flag
933 		 * is set, we will sleep.
934 		 */
935 #ifdef KDTRACE_HOOKS
936 		sleep_time -= lockstat_nsecs(&lk->lock_object);
937 #endif
938 		error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
939 		    SQ_EXCLUSIVE_QUEUE);
940 #ifdef KDTRACE_HOOKS
941 		sleep_time += lockstat_nsecs(&lk->lock_object);
942 #endif
943 		flags &= ~LK_INTERLOCK;
944 		if (error) {
945 			LOCK_LOG3(lk,
946 			    "%s: interrupted sleep for %p with %d",
947 			    __func__, lk, error);
948 			break;
949 		}
950 		LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
951 		    __func__, lk);
952 		x = lockmgr_read_value(lk);
953 	}
954 	if (error == 0) {
955 #ifdef KDTRACE_HOOKS
956 		if (sleep_time != 0)
957 			LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
958 			    LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
959 			    (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
960 #endif
961 #ifdef LOCK_PROFILING
962 		lockmgr_note_exclusive_acquire(lk, contested, waittime,
963 		    file, line, flags);
964 #else
965 		lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
966 		    flags);
967 #endif
968 	}
969 
970 out:
971 	lockmgr_exit(flags, ilk, 0);
972 	return (error);
973 }
974 
975 static __noinline int
976 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
977     const char *file, int line, struct lockmgr_wait *lwa)
978 {
979 	uintptr_t tid, v, setv;
980 	int error = 0;
981 	int op;
982 
983 	if (SCHEDULER_STOPPED())
984 		goto out;
985 
986 	tid = (uintptr_t)curthread;
987 
988 	_lockmgr_assert(lk, KA_SLOCKED, file, line);
989 
990 	op = flags & LK_TYPE_MASK;
991 	v = lockmgr_read_value(lk);
992 	for (;;) {
993 		if (LK_SHARERS(v) > 1) {
994 			if (op == LK_TRYUPGRADE) {
995 				LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
996 				    __func__, lk);
997 				error = EBUSY;
998 				goto out;
999 			}
1000 			if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &v,
1001 			    v - LK_ONE_SHARER)) {
1002 				lockmgr_note_shared_release(lk, file, line);
1003 				goto out_xlock;
1004 			}
1005 			continue;
1006 		}
1007 		MPASS((v & ~LK_ALL_WAITERS) == LK_SHARERS_LOCK(1));
1008 
1009 		setv = tid;
1010 		setv |= (v & LK_ALL_WAITERS);
1011 
1012 		/*
1013 		 * Try to switch from one shared lock to an exclusive one.
1014 		 * We need to preserve waiters flags during the operation.
1015 		 */
1016 		if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) {
1017 			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
1018 			    line);
1019 			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
1020 			    LK_TRYWIT(flags), file, line);
1021 			LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
1022 			TD_SLOCKS_DEC(curthread);
1023 			goto out;
1024 		}
1025 	}
1026 
1027 out_xlock:
1028 	error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
1029 	flags &= ~LK_INTERLOCK;
1030 out:
1031 	lockmgr_exit(flags, ilk, 0);
1032 	return (error);
1033 }
1034 
1035 int
1036 lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
1037     const char *file, int line)
1038 {
1039 	struct lock_class *class;
1040 	uintptr_t x, tid;
1041 	u_int op;
1042 	bool locked;
1043 
1044 	if (SCHEDULER_STOPPED())
1045 		return (0);
1046 
1047 	op = flags & LK_TYPE_MASK;
1048 	locked = false;
1049 	switch (op) {
1050 	case LK_SHARED:
1051 		if (LK_CAN_WITNESS(flags))
1052 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1053 			    file, line, flags & LK_INTERLOCK ? ilk : NULL);
1054 		if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
1055 			break;
1056 		x = lockmgr_read_value(lk);
1057 		if (lockmgr_slock_try(lk, &x, flags, true)) {
1058 			lockmgr_note_shared_acquire(lk, 0, 0,
1059 			    file, line, flags);
1060 			locked = true;
1061 		} else {
1062 			return (lockmgr_slock_hard(lk, flags, ilk, file, line,
1063 			    NULL));
1064 		}
1065 		break;
1066 	case LK_EXCLUSIVE:
1067 		if (LK_CAN_WITNESS(flags))
1068 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1069 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1070 			    ilk : NULL);
1071 		tid = (uintptr_t)curthread;
1072 		if (lockmgr_read_value(lk) == LK_UNLOCKED &&
1073 		    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1074 			lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1075 			    flags);
1076 			locked = true;
1077 		} else {
1078 			return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
1079 			    NULL));
1080 		}
1081 		break;
1082 	case LK_UPGRADE:
1083 	case LK_TRYUPGRADE:
1084 		return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
1085 	default:
1086 		break;
1087 	}
1088 	if (__predict_true(locked)) {
1089 		if (__predict_false(flags & LK_INTERLOCK)) {
1090 			class = LOCK_CLASS(ilk);
1091 			class->lc_unlock(ilk);
1092 		}
1093 		return (0);
1094 	} else {
1095 		return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
1096 		    LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
1097 	}
1098 }
1099 
1100 static __noinline int
1101 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1102     const char *file, int line)
1103 
1104 {
1105 	int wakeup_swapper = 0;
1106 
1107 	if (SCHEDULER_STOPPED())
1108 		goto out;
1109 
1110 	wakeup_swapper = wakeupshlk(lk, file, line);
1111 
1112 out:
1113 	lockmgr_exit(flags, ilk, wakeup_swapper);
1114 	return (0);
1115 }
1116 
1117 static __noinline int
1118 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1119     const char *file, int line)
1120 {
1121 	uintptr_t tid, v;
1122 	int wakeup_swapper = 0;
1123 	u_int realexslp;
1124 	int queue;
1125 
1126 	if (SCHEDULER_STOPPED())
1127 		goto out;
1128 
1129 	tid = (uintptr_t)curthread;
1130 
1131 	/*
1132 	 * As first option, treact the lock as if it has not
1133 	 * any waiter.
1134 	 * Fix-up the tid var if the lock has been disowned.
1135 	 */
1136 	if (lockmgr_disowned_v(x))
1137 		tid = LK_KERNPROC;
1138 
1139 	/*
1140 	 * The lock is held in exclusive mode.
1141 	 * If the lock is recursed also, then unrecurse it.
1142 	 */
1143 	if (lockmgr_recursed_v(x)) {
1144 		LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1145 		lk->lk_recurse--;
1146 		if (lk->lk_recurse == 0)
1147 			atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
1148 		goto out;
1149 	}
1150 	if (tid != LK_KERNPROC)
1151 		LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
1152 		    LOCKSTAT_WRITER);
1153 
1154 	if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1155 		goto out;
1156 
1157 	sleepq_lock(&lk->lock_object);
1158 	x = lockmgr_read_value(lk);
1159 	v = LK_UNLOCKED;
1160 
1161 	/*
1162 	 * If the lock has exclusive waiters, give them
1163 	 * preference in order to avoid deadlock with
1164 	 * shared runners up.
1165 	 * If interruptible sleeps left the exclusive queue
1166 	 * empty avoid a starvation for the threads sleeping
1167 	 * on the shared queue by giving them precedence
1168 	 * and cleaning up the exclusive waiters bit anyway.
1169 	 * Please note that lk_exslpfail count may be lying
1170 	 * about the real number of waiters with the
1171 	 * LK_SLEEPFAIL flag on because they may be used in
1172 	 * conjunction with interruptible sleeps so
1173 	 * lk_exslpfail might be considered an 'upper limit'
1174 	 * bound, including the edge cases.
1175 	 */
1176 	MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1177 	realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1178 	if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1179 		if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
1180 			lk->lk_exslpfail = 0;
1181 			queue = SQ_EXCLUSIVE_QUEUE;
1182 			v |= (x & LK_SHARED_WAITERS);
1183 		} else {
1184 			lk->lk_exslpfail = 0;
1185 			LOCK_LOG2(lk,
1186 			    "%s: %p has only LK_SLEEPFAIL sleepers",
1187 			    __func__, lk);
1188 			LOCK_LOG2(lk,
1189 			    "%s: %p waking up threads on the exclusive queue",
1190 			    __func__, lk);
1191 			wakeup_swapper = sleepq_broadcast(&lk->lock_object,
1192 			    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1193 			queue = SQ_SHARED_QUEUE;
1194 		}
1195 	} else {
1196 		/*
1197 		 * Exclusive waiters sleeping with LK_SLEEPFAIL
1198 		 * on and using interruptible sleeps/timeout
1199 		 * may have left spourious lk_exslpfail counts
1200 		 * on, so clean it up anyway.
1201 		 */
1202 		lk->lk_exslpfail = 0;
1203 		queue = SQ_SHARED_QUEUE;
1204 	}
1205 
1206 	LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1207 	    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1208 	    "exclusive");
1209 	atomic_store_rel_ptr(&lk->lk_lock, v);
1210 	wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1211 	sleepq_release(&lk->lock_object);
1212 
1213 out:
1214 	lockmgr_exit(flags, ilk, wakeup_swapper);
1215 	return (0);
1216 }
1217 
1218 /*
1219  * Lightweight entry points for common operations.
1220  *
1221  * Functionality is similar to sx locks, in that none of the additional lockmgr
1222  * features are supported. To be clear, these are NOT supported:
1223  * 1. shared locking disablement
1224  * 2. returning with an error after sleep
1225  * 3. unlocking the interlock
1226  *
1227  * If in doubt, use lockmgr_lock_flags.
1228  */
1229 int
1230 lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1231 {
1232 	uintptr_t x;
1233 
1234 	MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1235 	MPASS((flags & LK_INTERLOCK) == 0);
1236 	MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1237 
1238 	if (LK_CAN_WITNESS(flags))
1239 		WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1240 		    file, line, NULL);
1241 	x = lockmgr_read_value(lk);
1242 	if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1243 		lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1244 		return (0);
1245 	}
1246 
1247 	return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1248 }
1249 
1250 int
1251 lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1252 {
1253 	uintptr_t tid;
1254 
1255 	MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1256 	MPASS((flags & LK_INTERLOCK) == 0);
1257 
1258 	if (LK_CAN_WITNESS(flags))
1259 		WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1260 		    LOP_EXCLUSIVE, file, line, NULL);
1261 	tid = (uintptr_t)curthread;
1262 	if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1263 		lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1264 		    flags);
1265 		return (0);
1266 	}
1267 
1268 	return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1269 }
1270 
1271 int
1272 lockmgr_unlock(struct lock *lk)
1273 {
1274 	uintptr_t x, tid;
1275 	const char *file;
1276 	int line;
1277 
1278 	file = __FILE__;
1279 	line = __LINE__;
1280 
1281 	_lockmgr_assert(lk, KA_LOCKED, file, line);
1282 	x = lockmgr_read_value(lk);
1283 	if (__predict_true(x & LK_SHARE) != 0) {
1284 		lockmgr_note_shared_release(lk, file, line);
1285 		if (lockmgr_sunlock_try(lk, &x)) {
1286 			LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1287 		} else {
1288 			return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1289 		}
1290 	} else {
1291 		tid = (uintptr_t)curthread;
1292 		lockmgr_note_exclusive_release(lk, file, line);
1293 		if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1294 			LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
1295 		} else {
1296 			return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1297 		}
1298 	}
1299 	return (0);
1300 }
1301 
1302 int
1303 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1304     const char *wmesg, int pri, int timo, const char *file, int line)
1305 {
1306 	GIANT_DECLARE;
1307 	struct lockmgr_wait lwa;
1308 	struct lock_class *class;
1309 	const char *iwmesg;
1310 	uintptr_t tid, v, x;
1311 	u_int op, realexslp;
1312 	int error, ipri, itimo, queue, wakeup_swapper;
1313 #ifdef LOCK_PROFILING
1314 	uint64_t waittime = 0;
1315 	int contested = 0;
1316 #endif
1317 
1318 	if (SCHEDULER_STOPPED())
1319 		return (0);
1320 
1321 	error = 0;
1322 	tid = (uintptr_t)curthread;
1323 	op = (flags & LK_TYPE_MASK);
1324 	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1325 	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1326 	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1327 
1328 	lwa.iwmesg = iwmesg;
1329 	lwa.ipri = ipri;
1330 	lwa.itimo = itimo;
1331 
1332 	MPASS((flags & ~LK_TOTAL_MASK) == 0);
1333 	KASSERT((op & (op - 1)) == 0,
1334 	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1335 	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1336 	    (op != LK_DOWNGRADE && op != LK_RELEASE),
1337 	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1338 	    __func__, file, line));
1339 	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1340 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1341 	    __func__, file, line));
1342 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1343 	    ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
1344 	    lk->lock_object.lo_name, file, line));
1345 
1346 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1347 
1348 	if (lk->lock_object.lo_flags & LK_NOSHARE) {
1349 		switch (op) {
1350 		case LK_SHARED:
1351 			op = LK_EXCLUSIVE;
1352 			break;
1353 		case LK_UPGRADE:
1354 		case LK_TRYUPGRADE:
1355 		case LK_DOWNGRADE:
1356 			_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1357 			    file, line);
1358 			if (flags & LK_INTERLOCK)
1359 				class->lc_unlock(ilk);
1360 			return (0);
1361 		}
1362 	}
1363 
1364 	wakeup_swapper = 0;
1365 	switch (op) {
1366 	case LK_SHARED:
1367 		return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1368 		break;
1369 	case LK_UPGRADE:
1370 	case LK_TRYUPGRADE:
1371 		return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1372 		break;
1373 	case LK_EXCLUSIVE:
1374 		return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1375 		break;
1376 	case LK_DOWNGRADE:
1377 		_lockmgr_assert(lk, KA_XLOCKED, file, line);
1378 		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1379 
1380 		/*
1381 		 * Panic if the lock is recursed.
1382 		 */
1383 		if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1384 			if (flags & LK_INTERLOCK)
1385 				class->lc_unlock(ilk);
1386 			panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
1387 			    __func__, iwmesg, file, line);
1388 		}
1389 		TD_SLOCKS_INC(curthread);
1390 
1391 		/*
1392 		 * In order to preserve waiters flags, just spin.
1393 		 */
1394 		for (;;) {
1395 			x = lockmgr_read_value(lk);
1396 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1397 			x &= LK_ALL_WAITERS;
1398 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1399 			    LK_SHARERS_LOCK(1) | x))
1400 				break;
1401 			cpu_spinwait();
1402 		}
1403 		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1404 		LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1405 		break;
1406 	case LK_RELEASE:
1407 		_lockmgr_assert(lk, KA_LOCKED, file, line);
1408 		x = lockmgr_read_value(lk);
1409 
1410 		if (__predict_true(x & LK_SHARE) != 0) {
1411 			lockmgr_note_shared_release(lk, file, line);
1412 			return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1413 		} else {
1414 			lockmgr_note_exclusive_release(lk, file, line);
1415 			return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1416 		}
1417 		break;
1418 	case LK_DRAIN:
1419 		if (LK_CAN_WITNESS(flags))
1420 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1421 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1422 			    ilk : NULL);
1423 
1424 		/*
1425 		 * Trying to drain a lock we already own will result in a
1426 		 * deadlock.
1427 		 */
1428 		if (lockmgr_xlocked(lk)) {
1429 			if (flags & LK_INTERLOCK)
1430 				class->lc_unlock(ilk);
1431 			panic("%s: draining %s with the lock held @ %s:%d\n",
1432 			    __func__, iwmesg, file, line);
1433 		}
1434 
1435 		for (;;) {
1436 			if (lk->lk_lock == LK_UNLOCKED &&
1437 			    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1438 				break;
1439 
1440 #ifdef HWPMC_HOOKS
1441 			PMC_SOFT_CALL( , , lock, failed);
1442 #endif
1443 			lock_profile_obtain_lock_failed(&lk->lock_object, false,
1444 			    &contested, &waittime);
1445 
1446 			/*
1447 			 * If the lock is expected to not sleep just give up
1448 			 * and return.
1449 			 */
1450 			if (LK_TRYOP(flags)) {
1451 				LOCK_LOG2(lk, "%s: %p fails the try operation",
1452 				    __func__, lk);
1453 				error = EBUSY;
1454 				break;
1455 			}
1456 
1457 			/*
1458 			 * Acquire the sleepqueue chain lock because we
1459 			 * probabilly will need to manipulate waiters flags.
1460 			 */
1461 			sleepq_lock(&lk->lock_object);
1462 			x = lockmgr_read_value(lk);
1463 
1464 			/*
1465 			 * if the lock has been released while we spun on
1466 			 * the sleepqueue chain lock just try again.
1467 			 */
1468 			if (x == LK_UNLOCKED) {
1469 				sleepq_release(&lk->lock_object);
1470 				continue;
1471 			}
1472 
1473 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1474 			if ((x & ~v) == LK_UNLOCKED) {
1475 				v = (x & ~LK_EXCLUSIVE_SPINNERS);
1476 
1477 				/*
1478 				 * If interruptible sleeps left the exclusive
1479 				 * queue empty avoid a starvation for the
1480 				 * threads sleeping on the shared queue by
1481 				 * giving them precedence and cleaning up the
1482 				 * exclusive waiters bit anyway.
1483 				 * Please note that lk_exslpfail count may be
1484 				 * lying about the real number of waiters with
1485 				 * the LK_SLEEPFAIL flag on because they may
1486 				 * be used in conjunction with interruptible
1487 				 * sleeps so lk_exslpfail might be considered
1488 				 * an 'upper limit' bound, including the edge
1489 				 * cases.
1490 				 */
1491 				if (v & LK_EXCLUSIVE_WAITERS) {
1492 					queue = SQ_EXCLUSIVE_QUEUE;
1493 					v &= ~LK_EXCLUSIVE_WAITERS;
1494 				} else {
1495 					/*
1496 					 * Exclusive waiters sleeping with
1497 					 * LK_SLEEPFAIL on and using
1498 					 * interruptible sleeps/timeout may
1499 					 * have left spourious lk_exslpfail
1500 					 * counts on, so clean it up anyway.
1501 					 */
1502 					MPASS(v & LK_SHARED_WAITERS);
1503 					lk->lk_exslpfail = 0;
1504 					queue = SQ_SHARED_QUEUE;
1505 					v &= ~LK_SHARED_WAITERS;
1506 				}
1507 				if (queue == SQ_EXCLUSIVE_QUEUE) {
1508 					realexslp =
1509 					    sleepq_sleepcnt(&lk->lock_object,
1510 					    SQ_EXCLUSIVE_QUEUE);
1511 					if (lk->lk_exslpfail >= realexslp) {
1512 						lk->lk_exslpfail = 0;
1513 						queue = SQ_SHARED_QUEUE;
1514 						v &= ~LK_SHARED_WAITERS;
1515 						if (realexslp != 0) {
1516 							LOCK_LOG2(lk,
1517 					"%s: %p has only LK_SLEEPFAIL sleepers",
1518 							    __func__, lk);
1519 							LOCK_LOG2(lk,
1520 			"%s: %p waking up threads on the exclusive queue",
1521 							    __func__, lk);
1522 							wakeup_swapper =
1523 							    sleepq_broadcast(
1524 							    &lk->lock_object,
1525 							    SLEEPQ_LK, 0,
1526 							    SQ_EXCLUSIVE_QUEUE);
1527 						}
1528 					} else
1529 						lk->lk_exslpfail = 0;
1530 				}
1531 				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1532 					sleepq_release(&lk->lock_object);
1533 					continue;
1534 				}
1535 				LOCK_LOG3(lk,
1536 				"%s: %p waking up all threads on the %s queue",
1537 				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1538 				    "shared" : "exclusive");
1539 				wakeup_swapper |= sleepq_broadcast(
1540 				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1541 
1542 				/*
1543 				 * If shared waiters have been woken up we need
1544 				 * to wait for one of them to acquire the lock
1545 				 * before to set the exclusive waiters in
1546 				 * order to avoid a deadlock.
1547 				 */
1548 				if (queue == SQ_SHARED_QUEUE) {
1549 					for (v = lk->lk_lock;
1550 					    (v & LK_SHARE) && !LK_SHARERS(v);
1551 					    v = lk->lk_lock)
1552 						cpu_spinwait();
1553 				}
1554 			}
1555 
1556 			/*
1557 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1558 			 * fail, loop back and retry.
1559 			 */
1560 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1561 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1562 				    x | LK_EXCLUSIVE_WAITERS)) {
1563 					sleepq_release(&lk->lock_object);
1564 					continue;
1565 				}
1566 				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1567 				    __func__, lk);
1568 			}
1569 
1570 			/*
1571 			 * As far as we have been unable to acquire the
1572 			 * exclusive lock and the exclusive waiters flag
1573 			 * is set, we will sleep.
1574 			 */
1575 			if (flags & LK_INTERLOCK) {
1576 				class->lc_unlock(ilk);
1577 				flags &= ~LK_INTERLOCK;
1578 			}
1579 			GIANT_SAVE();
1580 			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1581 			    SQ_EXCLUSIVE_QUEUE);
1582 			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1583 			GIANT_RESTORE();
1584 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1585 			    __func__, lk);
1586 		}
1587 
1588 		if (error == 0) {
1589 			lock_profile_obtain_lock_success(&lk->lock_object,
1590 			    false, contested, waittime, file, line);
1591 			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1592 			    lk->lk_recurse, file, line);
1593 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1594 			    LK_TRYWIT(flags), file, line);
1595 			TD_LOCKS_INC(curthread);
1596 			STACK_SAVE(lk);
1597 		}
1598 		break;
1599 	default:
1600 		if (flags & LK_INTERLOCK)
1601 			class->lc_unlock(ilk);
1602 		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1603 	}
1604 
1605 	if (flags & LK_INTERLOCK)
1606 		class->lc_unlock(ilk);
1607 	if (wakeup_swapper)
1608 		kick_proc0();
1609 
1610 	return (error);
1611 }
1612 
1613 void
1614 _lockmgr_disown(struct lock *lk, const char *file, int line)
1615 {
1616 	uintptr_t tid, x;
1617 
1618 	if (SCHEDULER_STOPPED())
1619 		return;
1620 
1621 	tid = (uintptr_t)curthread;
1622 	_lockmgr_assert(lk, KA_XLOCKED, file, line);
1623 
1624 	/*
1625 	 * Panic if the lock is recursed.
1626 	 */
1627 	if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1628 		panic("%s: disown a recursed lockmgr @ %s:%d\n",
1629 		    __func__,  file, line);
1630 
1631 	/*
1632 	 * If the owner is already LK_KERNPROC just skip the whole operation.
1633 	 */
1634 	if (LK_HOLDER(lk->lk_lock) != tid)
1635 		return;
1636 	lock_profile_release_lock(&lk->lock_object, false);
1637 	LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1638 	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1639 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1640 	TD_LOCKS_DEC(curthread);
1641 	STACK_SAVE(lk);
1642 
1643 	/*
1644 	 * In order to preserve waiters flags, just spin.
1645 	 */
1646 	for (;;) {
1647 		x = lockmgr_read_value(lk);
1648 		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1649 		x &= LK_ALL_WAITERS;
1650 		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1651 		    LK_KERNPROC | x))
1652 			return;
1653 		cpu_spinwait();
1654 	}
1655 }
1656 
1657 void
1658 lockmgr_printinfo(const struct lock *lk)
1659 {
1660 	struct thread *td;
1661 	uintptr_t x;
1662 
1663 	if (lk->lk_lock == LK_UNLOCKED)
1664 		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1665 	else if (lk->lk_lock & LK_SHARE)
1666 		printf("lock type %s: SHARED (count %ju)\n",
1667 		    lk->lock_object.lo_name,
1668 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1669 	else {
1670 		td = lockmgr_xholder(lk);
1671 		if (td == (struct thread *)LK_KERNPROC)
1672 			printf("lock type %s: EXCL by KERNPROC\n",
1673 			    lk->lock_object.lo_name);
1674 		else
1675 			printf("lock type %s: EXCL by thread %p "
1676 			    "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1677 			    td, td->td_proc->p_pid, td->td_proc->p_comm,
1678 			    td->td_tid);
1679 	}
1680 
1681 	x = lk->lk_lock;
1682 	if (x & LK_EXCLUSIVE_WAITERS)
1683 		printf(" with exclusive waiters pending\n");
1684 	if (x & LK_SHARED_WAITERS)
1685 		printf(" with shared waiters pending\n");
1686 	if (x & LK_EXCLUSIVE_SPINNERS)
1687 		printf(" with exclusive spinners pending\n");
1688 
1689 	STACK_PRINT(lk);
1690 }
1691 
1692 int
1693 lockstatus(const struct lock *lk)
1694 {
1695 	uintptr_t v, x;
1696 	int ret;
1697 
1698 	ret = LK_SHARED;
1699 	x = lockmgr_read_value(lk);
1700 	v = LK_HOLDER(x);
1701 
1702 	if ((x & LK_SHARE) == 0) {
1703 		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1704 			ret = LK_EXCLUSIVE;
1705 		else
1706 			ret = LK_EXCLOTHER;
1707 	} else if (x == LK_UNLOCKED)
1708 		ret = 0;
1709 
1710 	return (ret);
1711 }
1712 
1713 #ifdef INVARIANT_SUPPORT
1714 
1715 FEATURE(invariant_support,
1716     "Support for modules compiled with INVARIANTS option");
1717 
1718 #ifndef INVARIANTS
1719 #undef	_lockmgr_assert
1720 #endif
1721 
1722 void
1723 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1724 {
1725 	int slocked = 0;
1726 
1727 	if (SCHEDULER_STOPPED())
1728 		return;
1729 	switch (what) {
1730 	case KA_SLOCKED:
1731 	case KA_SLOCKED | KA_NOTRECURSED:
1732 	case KA_SLOCKED | KA_RECURSED:
1733 		slocked = 1;
1734 	case KA_LOCKED:
1735 	case KA_LOCKED | KA_NOTRECURSED:
1736 	case KA_LOCKED | KA_RECURSED:
1737 #ifdef WITNESS
1738 
1739 		/*
1740 		 * We cannot trust WITNESS if the lock is held in exclusive
1741 		 * mode and a call to lockmgr_disown() happened.
1742 		 * Workaround this skipping the check if the lock is held in
1743 		 * exclusive mode even for the KA_LOCKED case.
1744 		 */
1745 		if (slocked || (lk->lk_lock & LK_SHARE)) {
1746 			witness_assert(&lk->lock_object, what, file, line);
1747 			break;
1748 		}
1749 #endif
1750 		if (lk->lk_lock == LK_UNLOCKED ||
1751 		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1752 		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1753 			panic("Lock %s not %slocked @ %s:%d\n",
1754 			    lk->lock_object.lo_name, slocked ? "share" : "",
1755 			    file, line);
1756 
1757 		if ((lk->lk_lock & LK_SHARE) == 0) {
1758 			if (lockmgr_recursed(lk)) {
1759 				if (what & KA_NOTRECURSED)
1760 					panic("Lock %s recursed @ %s:%d\n",
1761 					    lk->lock_object.lo_name, file,
1762 					    line);
1763 			} else if (what & KA_RECURSED)
1764 				panic("Lock %s not recursed @ %s:%d\n",
1765 				    lk->lock_object.lo_name, file, line);
1766 		}
1767 		break;
1768 	case KA_XLOCKED:
1769 	case KA_XLOCKED | KA_NOTRECURSED:
1770 	case KA_XLOCKED | KA_RECURSED:
1771 		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1772 			panic("Lock %s not exclusively locked @ %s:%d\n",
1773 			    lk->lock_object.lo_name, file, line);
1774 		if (lockmgr_recursed(lk)) {
1775 			if (what & KA_NOTRECURSED)
1776 				panic("Lock %s recursed @ %s:%d\n",
1777 				    lk->lock_object.lo_name, file, line);
1778 		} else if (what & KA_RECURSED)
1779 			panic("Lock %s not recursed @ %s:%d\n",
1780 			    lk->lock_object.lo_name, file, line);
1781 		break;
1782 	case KA_UNLOCKED:
1783 		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1784 			panic("Lock %s exclusively locked @ %s:%d\n",
1785 			    lk->lock_object.lo_name, file, line);
1786 		break;
1787 	default:
1788 		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1789 		    line);
1790 	}
1791 }
1792 #endif
1793 
1794 #ifdef DDB
1795 int
1796 lockmgr_chain(struct thread *td, struct thread **ownerp)
1797 {
1798 	const struct lock *lk;
1799 
1800 	lk = td->td_wchan;
1801 
1802 	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1803 		return (0);
1804 	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1805 	if (lk->lk_lock & LK_SHARE)
1806 		db_printf("SHARED (count %ju)\n",
1807 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1808 	else
1809 		db_printf("EXCL\n");
1810 	*ownerp = lockmgr_xholder(lk);
1811 
1812 	return (1);
1813 }
1814 
1815 static void
1816 db_show_lockmgr(const struct lock_object *lock)
1817 {
1818 	struct thread *td;
1819 	const struct lock *lk;
1820 
1821 	lk = (const struct lock *)lock;
1822 
1823 	db_printf(" state: ");
1824 	if (lk->lk_lock == LK_UNLOCKED)
1825 		db_printf("UNLOCKED\n");
1826 	else if (lk->lk_lock & LK_SHARE)
1827 		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1828 	else {
1829 		td = lockmgr_xholder(lk);
1830 		if (td == (struct thread *)LK_KERNPROC)
1831 			db_printf("XLOCK: LK_KERNPROC\n");
1832 		else
1833 			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1834 			    td->td_tid, td->td_proc->p_pid,
1835 			    td->td_proc->p_comm);
1836 		if (lockmgr_recursed(lk))
1837 			db_printf(" recursed: %d\n", lk->lk_recurse);
1838 	}
1839 	db_printf(" waiters: ");
1840 	switch (lk->lk_lock & LK_ALL_WAITERS) {
1841 	case LK_SHARED_WAITERS:
1842 		db_printf("shared\n");
1843 		break;
1844 	case LK_EXCLUSIVE_WAITERS:
1845 		db_printf("exclusive\n");
1846 		break;
1847 	case LK_ALL_WAITERS:
1848 		db_printf("shared and exclusive\n");
1849 		break;
1850 	default:
1851 		db_printf("none\n");
1852 	}
1853 	db_printf(" spinners: ");
1854 	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1855 		db_printf("exclusive\n");
1856 	else
1857 		db_printf("none\n");
1858 }
1859 #endif
1860