xref: /freebsd/sys/kern/kern_lock.c (revision 732a02b4e77866604a120a275c082bb6221bd2ff)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice(s), this list of conditions and the following disclaimer as
12  *    the first lines of this file unmodified other than the possible
13  *    addition of one or more copyright notices.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice(s), this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28  * DAMAGE.
29  */
30 
31 #include "opt_ddb.h"
32 #include "opt_hwpmc_hooks.h"
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/kdb.h>
39 #include <sys/ktr.h>
40 #include <sys/lock.h>
41 #include <sys/lock_profile.h>
42 #include <sys/lockmgr.h>
43 #include <sys/lockstat.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/sleepqueue.h>
47 #ifdef DEBUG_LOCKS
48 #include <sys/stack.h>
49 #endif
50 #include <sys/sysctl.h>
51 #include <sys/systm.h>
52 
53 #include <machine/cpu.h>
54 
55 #ifdef DDB
56 #include <ddb/ddb.h>
57 #endif
58 
59 #ifdef HWPMC_HOOKS
60 #include <sys/pmckern.h>
61 PMC_SOFT_DECLARE( , , lock, failed);
62 #endif
63 
64 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
65     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
66 
67 #define	SQ_EXCLUSIVE_QUEUE	0
68 #define	SQ_SHARED_QUEUE		1
69 
70 #ifndef INVARIANTS
71 #define	_lockmgr_assert(lk, what, file, line)
72 #endif
73 
74 #define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
75 #define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
76 
77 #ifndef DEBUG_LOCKS
78 #define	STACK_PRINT(lk)
79 #define	STACK_SAVE(lk)
80 #define	STACK_ZERO(lk)
81 #else
82 #define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
83 #define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
84 #define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
85 #endif
86 
87 #define	LOCK_LOG2(lk, string, arg1, arg2)				\
88 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
89 		CTR2(KTR_LOCK, (string), (arg1), (arg2))
90 #define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
91 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
92 		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
93 
94 #define	GIANT_DECLARE							\
95 	int _i = 0;							\
96 	WITNESS_SAVE_DECL(Giant)
97 #define	GIANT_RESTORE() do {						\
98 	if (__predict_false(_i > 0)) {					\
99 		while (_i--)						\
100 			mtx_lock(&Giant);				\
101 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
102 	}								\
103 } while (0)
104 #define	GIANT_SAVE() do {						\
105 	if (__predict_false(mtx_owned(&Giant))) {			\
106 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
107 		while (mtx_owned(&Giant)) {				\
108 			_i++;						\
109 			mtx_unlock(&Giant);				\
110 		}							\
111 	}								\
112 } while (0)
113 
114 static bool __always_inline
115 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
116 {
117 
118 	if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
119 	    LK_SHARE)
120 		return (true);
121 	if (fp || (!(x & LK_SHARE)))
122 		return (false);
123 	if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
124 	    (curthread->td_pflags & TDP_DEADLKTREAT))
125 		return (true);
126 	return (false);
127 }
128 
129 #define	LK_TRYOP(x)							\
130 	((x) & LK_NOWAIT)
131 
132 #define	LK_CAN_WITNESS(x)						\
133 	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
134 #define	LK_TRYWIT(x)							\
135 	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
136 
137 #define	lockmgr_disowned(lk)						\
138 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
139 
140 #define	lockmgr_xlocked_v(v)						\
141 	(((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
142 
143 #define	lockmgr_xlocked(lk) lockmgr_xlocked_v((lk)->lk_lock)
144 
145 static void	assert_lockmgr(const struct lock_object *lock, int how);
146 #ifdef DDB
147 static void	db_show_lockmgr(const struct lock_object *lock);
148 #endif
149 static void	lock_lockmgr(struct lock_object *lock, uintptr_t how);
150 #ifdef KDTRACE_HOOKS
151 static int	owner_lockmgr(const struct lock_object *lock,
152 		    struct thread **owner);
153 #endif
154 static uintptr_t unlock_lockmgr(struct lock_object *lock);
155 
156 struct lock_class lock_class_lockmgr = {
157 	.lc_name = "lockmgr",
158 	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
159 	.lc_assert = assert_lockmgr,
160 #ifdef DDB
161 	.lc_ddb_show = db_show_lockmgr,
162 #endif
163 	.lc_lock = lock_lockmgr,
164 	.lc_unlock = unlock_lockmgr,
165 #ifdef KDTRACE_HOOKS
166 	.lc_owner = owner_lockmgr,
167 #endif
168 };
169 
170 struct lockmgr_wait {
171 	const char *iwmesg;
172 	int ipri;
173 	int itimo;
174 };
175 
176 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
177     int flags, bool fp);
178 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
179 
180 static void
181 lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
182 {
183 	struct lock_class *class;
184 
185 	if (flags & LK_INTERLOCK) {
186 		class = LOCK_CLASS(ilk);
187 		class->lc_unlock(ilk);
188 	}
189 
190 	if (__predict_false(wakeup_swapper))
191 		kick_proc0();
192 }
193 
194 static void
195 lockmgr_note_shared_acquire(struct lock *lk, int contested,
196     uint64_t waittime, const char *file, int line, int flags)
197 {
198 
199 	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
200 	    waittime, file, line, LOCKSTAT_READER);
201 	LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
202 	WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
203 	TD_LOCKS_INC(curthread);
204 	TD_SLOCKS_INC(curthread);
205 	STACK_SAVE(lk);
206 }
207 
208 static void
209 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
210 {
211 
212 	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
213 	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
214 	TD_LOCKS_DEC(curthread);
215 	TD_SLOCKS_DEC(curthread);
216 }
217 
218 static void
219 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
220     uint64_t waittime, const char *file, int line, int flags)
221 {
222 
223 	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
224 	    waittime, file, line, LOCKSTAT_WRITER);
225 	LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
226 	WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
227 	    line);
228 	TD_LOCKS_INC(curthread);
229 	STACK_SAVE(lk);
230 }
231 
232 static void
233 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
234 {
235 
236 	if (LK_HOLDER(lk->lk_lock) != LK_KERNPROC) {
237 		WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
238 		TD_LOCKS_DEC(curthread);
239 	}
240 	LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
241 	    line);
242 }
243 
244 static __inline struct thread *
245 lockmgr_xholder(const struct lock *lk)
246 {
247 	uintptr_t x;
248 
249 	x = lk->lk_lock;
250 	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
251 }
252 
253 /*
254  * It assumes sleepq_lock held and returns with this one unheld.
255  * It also assumes the generic interlock is sane and previously checked.
256  * If LK_INTERLOCK is specified the interlock is not reacquired after the
257  * sleep.
258  */
259 static __inline int
260 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
261     const char *wmesg, int pri, int timo, int queue)
262 {
263 	GIANT_DECLARE;
264 	struct lock_class *class;
265 	int catch, error;
266 
267 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
268 	catch = pri & PCATCH;
269 	pri &= PRIMASK;
270 	error = 0;
271 
272 	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
273 	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
274 
275 	if (flags & LK_INTERLOCK)
276 		class->lc_unlock(ilk);
277 	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
278 		lk->lk_exslpfail++;
279 	GIANT_SAVE();
280 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
281 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
282 	if ((flags & LK_TIMELOCK) && timo)
283 		sleepq_set_timeout(&lk->lock_object, timo);
284 
285 	/*
286 	 * Decisional switch for real sleeping.
287 	 */
288 	if ((flags & LK_TIMELOCK) && timo && catch)
289 		error = sleepq_timedwait_sig(&lk->lock_object, pri);
290 	else if ((flags & LK_TIMELOCK) && timo)
291 		error = sleepq_timedwait(&lk->lock_object, pri);
292 	else if (catch)
293 		error = sleepq_wait_sig(&lk->lock_object, pri);
294 	else
295 		sleepq_wait(&lk->lock_object, pri);
296 	GIANT_RESTORE();
297 	if ((flags & LK_SLEEPFAIL) && error == 0)
298 		error = ENOLCK;
299 
300 	return (error);
301 }
302 
303 static __inline int
304 wakeupshlk(struct lock *lk, const char *file, int line)
305 {
306 	uintptr_t v, x, orig_x;
307 	u_int realexslp;
308 	int queue, wakeup_swapper;
309 
310 	wakeup_swapper = 0;
311 	for (;;) {
312 		x = lk->lk_lock;
313 		if (lockmgr_sunlock_try(lk, &x))
314 			break;
315 
316 		/*
317 		 * We should have a sharer with waiters, so enter the hard
318 		 * path in order to handle wakeups correctly.
319 		 */
320 		sleepq_lock(&lk->lock_object);
321 		orig_x = lk->lk_lock;
322 retry_sleepq:
323 		x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
324 		v = LK_UNLOCKED;
325 
326 		/*
327 		 * If the lock has exclusive waiters, give them preference in
328 		 * order to avoid deadlock with shared runners up.
329 		 * If interruptible sleeps left the exclusive queue empty
330 		 * avoid a starvation for the threads sleeping on the shared
331 		 * queue by giving them precedence and cleaning up the
332 		 * exclusive waiters bit anyway.
333 		 * Please note that lk_exslpfail count may be lying about
334 		 * the real number of waiters with the LK_SLEEPFAIL flag on
335 		 * because they may be used in conjunction with interruptible
336 		 * sleeps so lk_exslpfail might be considered an 'upper limit'
337 		 * bound, including the edge cases.
338 		 */
339 		realexslp = sleepq_sleepcnt(&lk->lock_object,
340 		    SQ_EXCLUSIVE_QUEUE);
341 		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
342 			if (lk->lk_exslpfail < realexslp) {
343 				lk->lk_exslpfail = 0;
344 				queue = SQ_EXCLUSIVE_QUEUE;
345 				v |= (x & LK_SHARED_WAITERS);
346 			} else {
347 				lk->lk_exslpfail = 0;
348 				LOCK_LOG2(lk,
349 				    "%s: %p has only LK_SLEEPFAIL sleepers",
350 				    __func__, lk);
351 				LOCK_LOG2(lk,
352 			    "%s: %p waking up threads on the exclusive queue",
353 				    __func__, lk);
354 				wakeup_swapper =
355 				    sleepq_broadcast(&lk->lock_object,
356 				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
357 				queue = SQ_SHARED_QUEUE;
358 			}
359 
360 		} else {
361 
362 			/*
363 			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
364 			 * and using interruptible sleeps/timeout may have
365 			 * left spourious lk_exslpfail counts on, so clean
366 			 * it up anyway.
367 			 */
368 			lk->lk_exslpfail = 0;
369 			queue = SQ_SHARED_QUEUE;
370 		}
371 
372 		if (lockmgr_sunlock_try(lk, &orig_x)) {
373 			sleepq_release(&lk->lock_object);
374 			break;
375 		}
376 
377 		x |= LK_SHARERS_LOCK(1);
378 		if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
379 			orig_x = x;
380 			goto retry_sleepq;
381 		}
382 		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
383 		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
384 		    "exclusive");
385 		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
386 		    0, queue);
387 		sleepq_release(&lk->lock_object);
388 		break;
389 	}
390 
391 	LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
392 	return (wakeup_swapper);
393 }
394 
395 static void
396 assert_lockmgr(const struct lock_object *lock, int what)
397 {
398 
399 	panic("lockmgr locks do not support assertions");
400 }
401 
402 static void
403 lock_lockmgr(struct lock_object *lock, uintptr_t how)
404 {
405 
406 	panic("lockmgr locks do not support sleep interlocking");
407 }
408 
409 static uintptr_t
410 unlock_lockmgr(struct lock_object *lock)
411 {
412 
413 	panic("lockmgr locks do not support sleep interlocking");
414 }
415 
416 #ifdef KDTRACE_HOOKS
417 static int
418 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
419 {
420 
421 	panic("lockmgr locks do not support owner inquiring");
422 }
423 #endif
424 
425 void
426 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
427 {
428 	int iflags;
429 
430 	MPASS((flags & ~LK_INIT_MASK) == 0);
431 	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
432             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
433             &lk->lk_lock));
434 
435 	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
436 	if (flags & LK_CANRECURSE)
437 		iflags |= LO_RECURSABLE;
438 	if ((flags & LK_NODUP) == 0)
439 		iflags |= LO_DUPOK;
440 	if (flags & LK_NOPROFILE)
441 		iflags |= LO_NOPROFILE;
442 	if ((flags & LK_NOWITNESS) == 0)
443 		iflags |= LO_WITNESS;
444 	if (flags & LK_QUIET)
445 		iflags |= LO_QUIET;
446 	if (flags & LK_IS_VNODE)
447 		iflags |= LO_IS_VNODE;
448 	if (flags & LK_NEW)
449 		iflags |= LO_NEW;
450 	iflags |= flags & LK_NOSHARE;
451 
452 	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
453 	lk->lk_lock = LK_UNLOCKED;
454 	lk->lk_recurse = 0;
455 	lk->lk_exslpfail = 0;
456 	lk->lk_timo = timo;
457 	lk->lk_pri = pri;
458 	STACK_ZERO(lk);
459 }
460 
461 /*
462  * XXX: Gross hacks to manipulate external lock flags after
463  * initialization.  Used for certain vnode and buf locks.
464  */
465 void
466 lockallowshare(struct lock *lk)
467 {
468 
469 	lockmgr_assert(lk, KA_XLOCKED);
470 	lk->lock_object.lo_flags &= ~LK_NOSHARE;
471 }
472 
473 void
474 lockdisableshare(struct lock *lk)
475 {
476 
477 	lockmgr_assert(lk, KA_XLOCKED);
478 	lk->lock_object.lo_flags |= LK_NOSHARE;
479 }
480 
481 void
482 lockallowrecurse(struct lock *lk)
483 {
484 
485 	lockmgr_assert(lk, KA_XLOCKED);
486 	lk->lock_object.lo_flags |= LO_RECURSABLE;
487 }
488 
489 void
490 lockdisablerecurse(struct lock *lk)
491 {
492 
493 	lockmgr_assert(lk, KA_XLOCKED);
494 	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
495 }
496 
497 void
498 lockdestroy(struct lock *lk)
499 {
500 
501 	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
502 	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
503 	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
504 	lock_destroy(&lk->lock_object);
505 }
506 
507 static bool __always_inline
508 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
509 {
510 
511 	/*
512 	 * If no other thread has an exclusive lock, or
513 	 * no exclusive waiter is present, bump the count of
514 	 * sharers.  Since we have to preserve the state of
515 	 * waiters, if we fail to acquire the shared lock
516 	 * loop back and retry.
517 	 */
518 	*xp = lk->lk_lock;
519 	while (LK_CAN_SHARE(*xp, flags, fp)) {
520 		if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
521 		    *xp + LK_ONE_SHARER)) {
522 			return (true);
523 		}
524 	}
525 	return (false);
526 }
527 
528 static bool __always_inline
529 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
530 {
531 
532 	for (;;) {
533 		if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
534 			if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
535 			    *xp - LK_ONE_SHARER))
536 				return (true);
537 			continue;
538 		}
539 		break;
540 	}
541 	return (false);
542 }
543 
544 static __noinline int
545 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
546     const char *file, int line, struct lockmgr_wait *lwa)
547 {
548 	uintptr_t tid, x;
549 	int error = 0;
550 	const char *iwmesg;
551 	int ipri, itimo;
552 
553 #ifdef KDTRACE_HOOKS
554 	uint64_t sleep_time = 0;
555 #endif
556 #ifdef LOCK_PROFILING
557 	uint64_t waittime = 0;
558 	int contested = 0;
559 #endif
560 
561 	if (KERNEL_PANICKED())
562 		goto out;
563 
564 	tid = (uintptr_t)curthread;
565 
566 	if (LK_CAN_WITNESS(flags))
567 		WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
568 		    file, line, flags & LK_INTERLOCK ? ilk : NULL);
569 	for (;;) {
570 		if (lockmgr_slock_try(lk, &x, flags, false))
571 			break;
572 #ifdef HWPMC_HOOKS
573 		PMC_SOFT_CALL( , , lock, failed);
574 #endif
575 		lock_profile_obtain_lock_failed(&lk->lock_object,
576 		    &contested, &waittime);
577 
578 		/*
579 		 * If the lock is already held by curthread in
580 		 * exclusive way avoid a deadlock.
581 		 */
582 		if (LK_HOLDER(x) == tid) {
583 			LOCK_LOG2(lk,
584 			    "%s: %p already held in exclusive mode",
585 			    __func__, lk);
586 			error = EDEADLK;
587 			break;
588 		}
589 
590 		/*
591 		 * If the lock is expected to not sleep just give up
592 		 * and return.
593 		 */
594 		if (LK_TRYOP(flags)) {
595 			LOCK_LOG2(lk, "%s: %p fails the try operation",
596 			    __func__, lk);
597 			error = EBUSY;
598 			break;
599 		}
600 
601 		/*
602 		 * Acquire the sleepqueue chain lock because we
603 		 * probabilly will need to manipulate waiters flags.
604 		 */
605 		sleepq_lock(&lk->lock_object);
606 		x = lk->lk_lock;
607 retry_sleepq:
608 
609 		/*
610 		 * if the lock can be acquired in shared mode, try
611 		 * again.
612 		 */
613 		if (LK_CAN_SHARE(x, flags, false)) {
614 			sleepq_release(&lk->lock_object);
615 			continue;
616 		}
617 
618 		/*
619 		 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
620 		 * loop back and retry.
621 		 */
622 		if ((x & LK_SHARED_WAITERS) == 0) {
623 			if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
624 			    x | LK_SHARED_WAITERS)) {
625 				goto retry_sleepq;
626 			}
627 			LOCK_LOG2(lk, "%s: %p set shared waiters flag",
628 			    __func__, lk);
629 		}
630 
631 		if (lwa == NULL) {
632 			iwmesg = lk->lock_object.lo_name;
633 			ipri = lk->lk_pri;
634 			itimo = lk->lk_timo;
635 		} else {
636 			iwmesg = lwa->iwmesg;
637 			ipri = lwa->ipri;
638 			itimo = lwa->itimo;
639 		}
640 
641 		/*
642 		 * As far as we have been unable to acquire the
643 		 * shared lock and the shared waiters flag is set,
644 		 * we will sleep.
645 		 */
646 #ifdef KDTRACE_HOOKS
647 		sleep_time -= lockstat_nsecs(&lk->lock_object);
648 #endif
649 		error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
650 		    SQ_SHARED_QUEUE);
651 #ifdef KDTRACE_HOOKS
652 		sleep_time += lockstat_nsecs(&lk->lock_object);
653 #endif
654 		flags &= ~LK_INTERLOCK;
655 		if (error) {
656 			LOCK_LOG3(lk,
657 			    "%s: interrupted sleep for %p with %d",
658 			    __func__, lk, error);
659 			break;
660 		}
661 		LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
662 		    __func__, lk);
663 	}
664 	if (error == 0) {
665 #ifdef KDTRACE_HOOKS
666 		if (sleep_time != 0)
667 			LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
668 			    LOCKSTAT_READER, (x & LK_SHARE) == 0,
669 			    (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
670 #endif
671 #ifdef LOCK_PROFILING
672 		lockmgr_note_shared_acquire(lk, contested, waittime,
673 		    file, line, flags);
674 #else
675 		lockmgr_note_shared_acquire(lk, 0, 0, file, line,
676 		    flags);
677 #endif
678 	}
679 
680 out:
681 	lockmgr_exit(flags, ilk, 0);
682 	return (error);
683 }
684 
685 static __noinline int
686 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
687     const char *file, int line, struct lockmgr_wait *lwa)
688 {
689 	struct lock_class *class;
690 	uintptr_t tid, x, v;
691 	int error = 0;
692 	const char *iwmesg;
693 	int ipri, itimo;
694 
695 #ifdef KDTRACE_HOOKS
696 	uint64_t sleep_time = 0;
697 #endif
698 #ifdef LOCK_PROFILING
699 	uint64_t waittime = 0;
700 	int contested = 0;
701 #endif
702 
703 	if (KERNEL_PANICKED())
704 		goto out;
705 
706 	tid = (uintptr_t)curthread;
707 
708 	if (LK_CAN_WITNESS(flags))
709 		WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
710 		    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
711 		    ilk : NULL);
712 
713 	/*
714 	 * If curthread already holds the lock and this one is
715 	 * allowed to recurse, simply recurse on it.
716 	 */
717 	if (lockmgr_xlocked(lk)) {
718 		if ((flags & LK_CANRECURSE) == 0 &&
719 		    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
720 			/*
721 			 * If the lock is expected to not panic just
722 			 * give up and return.
723 			 */
724 			if (LK_TRYOP(flags)) {
725 				LOCK_LOG2(lk,
726 				    "%s: %p fails the try operation",
727 				    __func__, lk);
728 				error = EBUSY;
729 				goto out;
730 			}
731 			if (flags & LK_INTERLOCK) {
732 				class = LOCK_CLASS(ilk);
733 				class->lc_unlock(ilk);
734 			}
735 			STACK_PRINT(lk);
736 			panic("%s: recursing on non recursive lockmgr %p "
737 			    "@ %s:%d\n", __func__, lk, file, line);
738 		}
739 		lk->lk_recurse++;
740 		LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
741 		LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
742 		    lk->lk_recurse, file, line);
743 		WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
744 		    LK_TRYWIT(flags), file, line);
745 		TD_LOCKS_INC(curthread);
746 		goto out;
747 	}
748 
749 	for (;;) {
750 		if (lk->lk_lock == LK_UNLOCKED &&
751 		    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
752 			break;
753 #ifdef HWPMC_HOOKS
754 		PMC_SOFT_CALL( , , lock, failed);
755 #endif
756 		lock_profile_obtain_lock_failed(&lk->lock_object,
757 		    &contested, &waittime);
758 
759 		/*
760 		 * If the lock is expected to not sleep just give up
761 		 * and return.
762 		 */
763 		if (LK_TRYOP(flags)) {
764 			LOCK_LOG2(lk, "%s: %p fails the try operation",
765 			    __func__, lk);
766 			error = EBUSY;
767 			break;
768 		}
769 
770 		/*
771 		 * Acquire the sleepqueue chain lock because we
772 		 * probabilly will need to manipulate waiters flags.
773 		 */
774 		sleepq_lock(&lk->lock_object);
775 		x = lk->lk_lock;
776 retry_sleepq:
777 
778 		/*
779 		 * if the lock has been released while we spun on
780 		 * the sleepqueue chain lock just try again.
781 		 */
782 		if (x == LK_UNLOCKED) {
783 			sleepq_release(&lk->lock_object);
784 			continue;
785 		}
786 
787 		/*
788 		 * The lock can be in the state where there is a
789 		 * pending queue of waiters, but still no owner.
790 		 * This happens when the lock is contested and an
791 		 * owner is going to claim the lock.
792 		 * If curthread is the one successfully acquiring it
793 		 * claim lock ownership and return, preserving waiters
794 		 * flags.
795 		 */
796 		v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
797 		if ((x & ~v) == LK_UNLOCKED) {
798 			v &= ~LK_EXCLUSIVE_SPINNERS;
799 			if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
800 			    tid | v)) {
801 				sleepq_release(&lk->lock_object);
802 				LOCK_LOG2(lk,
803 				    "%s: %p claimed by a new writer",
804 				    __func__, lk);
805 				break;
806 			}
807 			goto retry_sleepq;
808 		}
809 
810 		/*
811 		 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
812 		 * fail, loop back and retry.
813 		 */
814 		if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
815 			if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
816 			    x | LK_EXCLUSIVE_WAITERS)) {
817 				goto retry_sleepq;
818 			}
819 			LOCK_LOG2(lk, "%s: %p set excl waiters flag",
820 			    __func__, lk);
821 		}
822 
823 		if (lwa == NULL) {
824 			iwmesg = lk->lock_object.lo_name;
825 			ipri = lk->lk_pri;
826 			itimo = lk->lk_timo;
827 		} else {
828 			iwmesg = lwa->iwmesg;
829 			ipri = lwa->ipri;
830 			itimo = lwa->itimo;
831 		}
832 
833 		/*
834 		 * As far as we have been unable to acquire the
835 		 * exclusive lock and the exclusive waiters flag
836 		 * is set, we will sleep.
837 		 */
838 #ifdef KDTRACE_HOOKS
839 		sleep_time -= lockstat_nsecs(&lk->lock_object);
840 #endif
841 		error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
842 		    SQ_EXCLUSIVE_QUEUE);
843 #ifdef KDTRACE_HOOKS
844 		sleep_time += lockstat_nsecs(&lk->lock_object);
845 #endif
846 		flags &= ~LK_INTERLOCK;
847 		if (error) {
848 			LOCK_LOG3(lk,
849 			    "%s: interrupted sleep for %p with %d",
850 			    __func__, lk, error);
851 			break;
852 		}
853 		LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
854 		    __func__, lk);
855 	}
856 	if (error == 0) {
857 #ifdef KDTRACE_HOOKS
858 		if (sleep_time != 0)
859 			LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
860 			    LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
861 			    (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
862 #endif
863 #ifdef LOCK_PROFILING
864 		lockmgr_note_exclusive_acquire(lk, contested, waittime,
865 		    file, line, flags);
866 #else
867 		lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
868 		    flags);
869 #endif
870 	}
871 
872 out:
873 	lockmgr_exit(flags, ilk, 0);
874 	return (error);
875 }
876 
877 static __noinline int
878 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
879     const char *file, int line, struct lockmgr_wait *lwa)
880 {
881 	uintptr_t tid, x, v;
882 	int error = 0;
883 	int wakeup_swapper = 0;
884 	int op;
885 
886 	if (KERNEL_PANICKED())
887 		goto out;
888 
889 	tid = (uintptr_t)curthread;
890 
891 	_lockmgr_assert(lk, KA_SLOCKED, file, line);
892 	v = lk->lk_lock;
893 	x = v & LK_ALL_WAITERS;
894 	v &= LK_EXCLUSIVE_SPINNERS;
895 
896 	/*
897 	 * Try to switch from one shared lock to an exclusive one.
898 	 * We need to preserve waiters flags during the operation.
899 	 */
900 	if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
901 	    tid | x)) {
902 		LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
903 		    line);
904 		WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
905 		    LK_TRYWIT(flags), file, line);
906 		LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
907 		TD_SLOCKS_DEC(curthread);
908 		goto out;
909 	}
910 
911 	op = flags & LK_TYPE_MASK;
912 
913 	/*
914 	 * In LK_TRYUPGRADE mode, do not drop the lock,
915 	 * returning EBUSY instead.
916 	 */
917 	if (op == LK_TRYUPGRADE) {
918 		LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
919 		    __func__, lk);
920 		error = EBUSY;
921 		goto out;
922 	}
923 
924 	/*
925 	 * We have been unable to succeed in upgrading, so just
926 	 * give up the shared lock.
927 	 */
928 	lockmgr_note_shared_release(lk, file, line);
929 	wakeup_swapper |= wakeupshlk(lk, file, line);
930 	error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
931 	flags &= ~LK_INTERLOCK;
932 out:
933 	lockmgr_exit(flags, ilk, wakeup_swapper);
934 	return (error);
935 }
936 
937 int
938 lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
939     const char *file, int line)
940 {
941 	struct lock_class *class;
942 	uintptr_t x, tid;
943 	u_int op;
944 	bool locked;
945 
946 	if (KERNEL_PANICKED())
947 		return (0);
948 
949 	op = flags & LK_TYPE_MASK;
950 	locked = false;
951 	switch (op) {
952 	case LK_SHARED:
953 		if (LK_CAN_WITNESS(flags))
954 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
955 			    file, line, flags & LK_INTERLOCK ? ilk : NULL);
956 		if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
957 			break;
958 		if (lockmgr_slock_try(lk, &x, flags, true)) {
959 			lockmgr_note_shared_acquire(lk, 0, 0,
960 			    file, line, flags);
961 			locked = true;
962 		} else {
963 			return (lockmgr_slock_hard(lk, flags, ilk, file, line,
964 			    NULL));
965 		}
966 		break;
967 	case LK_EXCLUSIVE:
968 		if (LK_CAN_WITNESS(flags))
969 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
970 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
971 			    ilk : NULL);
972 		tid = (uintptr_t)curthread;
973 		if (lk->lk_lock == LK_UNLOCKED &&
974 		    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
975 			lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
976 			    flags);
977 			locked = true;
978 		} else {
979 			return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
980 			    NULL));
981 		}
982 		break;
983 	case LK_UPGRADE:
984 	case LK_TRYUPGRADE:
985 		return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
986 	default:
987 		break;
988 	}
989 	if (__predict_true(locked)) {
990 		if (__predict_false(flags & LK_INTERLOCK)) {
991 			class = LOCK_CLASS(ilk);
992 			class->lc_unlock(ilk);
993 		}
994 		return (0);
995 	} else {
996 		return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
997 		    LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
998 	}
999 }
1000 
1001 static __noinline int
1002 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1003     const char *file, int line)
1004 
1005 {
1006 	int wakeup_swapper = 0;
1007 
1008 	if (KERNEL_PANICKED())
1009 		goto out;
1010 
1011 	wakeup_swapper = wakeupshlk(lk, file, line);
1012 
1013 out:
1014 	lockmgr_exit(flags, ilk, wakeup_swapper);
1015 	return (0);
1016 }
1017 
1018 static __noinline int
1019 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1020     const char *file, int line)
1021 {
1022 	uintptr_t tid, v;
1023 	int wakeup_swapper = 0;
1024 	u_int realexslp;
1025 	int queue;
1026 
1027 	if (KERNEL_PANICKED())
1028 		goto out;
1029 
1030 	tid = (uintptr_t)curthread;
1031 
1032 	/*
1033 	 * As first option, treact the lock as if it has not
1034 	 * any waiter.
1035 	 * Fix-up the tid var if the lock has been disowned.
1036 	 */
1037 	if (LK_HOLDER(x) == LK_KERNPROC)
1038 		tid = LK_KERNPROC;
1039 
1040 	/*
1041 	 * The lock is held in exclusive mode.
1042 	 * If the lock is recursed also, then unrecurse it.
1043 	 */
1044 	if (lockmgr_xlocked_v(x) && lockmgr_recursed(lk)) {
1045 		LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1046 		lk->lk_recurse--;
1047 		goto out;
1048 	}
1049 	if (tid != LK_KERNPROC)
1050 		LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
1051 		    LOCKSTAT_WRITER);
1052 
1053 	if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1054 		goto out;
1055 
1056 	sleepq_lock(&lk->lock_object);
1057 	x = lk->lk_lock;
1058 	v = LK_UNLOCKED;
1059 
1060 	/*
1061 	 * If the lock has exclusive waiters, give them
1062 	 * preference in order to avoid deadlock with
1063 	 * shared runners up.
1064 	 * If interruptible sleeps left the exclusive queue
1065 	 * empty avoid a starvation for the threads sleeping
1066 	 * on the shared queue by giving them precedence
1067 	 * and cleaning up the exclusive waiters bit anyway.
1068 	 * Please note that lk_exslpfail count may be lying
1069 	 * about the real number of waiters with the
1070 	 * LK_SLEEPFAIL flag on because they may be used in
1071 	 * conjunction with interruptible sleeps so
1072 	 * lk_exslpfail might be considered an 'upper limit'
1073 	 * bound, including the edge cases.
1074 	 */
1075 	MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1076 	realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1077 	if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1078 		if (lk->lk_exslpfail < realexslp) {
1079 			lk->lk_exslpfail = 0;
1080 			queue = SQ_EXCLUSIVE_QUEUE;
1081 			v |= (x & LK_SHARED_WAITERS);
1082 		} else {
1083 			lk->lk_exslpfail = 0;
1084 			LOCK_LOG2(lk,
1085 			    "%s: %p has only LK_SLEEPFAIL sleepers",
1086 			    __func__, lk);
1087 			LOCK_LOG2(lk,
1088 			    "%s: %p waking up threads on the exclusive queue",
1089 			    __func__, lk);
1090 			wakeup_swapper = sleepq_broadcast(&lk->lock_object,
1091 			    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1092 			queue = SQ_SHARED_QUEUE;
1093 		}
1094 	} else {
1095 
1096 		/*
1097 		 * Exclusive waiters sleeping with LK_SLEEPFAIL
1098 		 * on and using interruptible sleeps/timeout
1099 		 * may have left spourious lk_exslpfail counts
1100 		 * on, so clean it up anyway.
1101 		 */
1102 		lk->lk_exslpfail = 0;
1103 		queue = SQ_SHARED_QUEUE;
1104 	}
1105 
1106 	LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1107 	    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1108 	    "exclusive");
1109 	atomic_store_rel_ptr(&lk->lk_lock, v);
1110 	wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1111 	sleepq_release(&lk->lock_object);
1112 
1113 out:
1114 	lockmgr_exit(flags, ilk, wakeup_swapper);
1115 	return (0);
1116 }
1117 
1118 /*
1119  * Lightweight entry points for common operations.
1120  *
1121  * Functionality is similar to sx locks, in that none of the additional lockmgr
1122  * features are supported. To be clear, these are NOT supported:
1123  * 1. shared locking disablement
1124  * 2. returning with an error after sleep
1125  * 3. unlocking the interlock
1126  *
1127  * If in doubt, use lockmgr_lock_flags.
1128  */
1129 int
1130 lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1131 {
1132 	uintptr_t x;
1133 
1134 	MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1135 	MPASS((flags & LK_INTERLOCK) == 0);
1136 	MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1137 
1138 	if (LK_CAN_WITNESS(flags))
1139 		WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1140 		    file, line, NULL);
1141 	if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1142 		lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1143 		return (0);
1144 	}
1145 
1146 	return (lockmgr_slock_hard(lk, flags, NULL, file, line, NULL));
1147 }
1148 
1149 int
1150 lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1151 {
1152 	uintptr_t tid;
1153 
1154 	MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1155 	MPASS((flags & LK_INTERLOCK) == 0);
1156 
1157 	if (LK_CAN_WITNESS(flags))
1158 		WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1159 		    LOP_EXCLUSIVE, file, line, NULL);
1160 	tid = (uintptr_t)curthread;
1161 	if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1162 		lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1163 		    flags);
1164 		return (0);
1165 	}
1166 
1167 	return (lockmgr_xlock_hard(lk, flags, NULL, file, line, NULL));
1168 }
1169 
1170 int
1171 lockmgr_unlock(struct lock *lk)
1172 {
1173 	uintptr_t x, tid;
1174 	const char *file;
1175 	int line;
1176 
1177 	file = __FILE__;
1178 	line = __LINE__;
1179 
1180 	_lockmgr_assert(lk, KA_LOCKED, file, line);
1181 	x = lk->lk_lock;
1182 	if (__predict_true(x & LK_SHARE) != 0) {
1183 		lockmgr_note_shared_release(lk, file, line);
1184 		if (lockmgr_sunlock_try(lk, &x)) {
1185 			LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1186 		} else {
1187 			return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1188 		}
1189 	} else {
1190 		tid = (uintptr_t)curthread;
1191 		lockmgr_note_exclusive_release(lk, file, line);
1192 		if (!lockmgr_recursed(lk) &&
1193 		    atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1194 			LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_WRITER);
1195 		} else {
1196 			return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1197 		}
1198 	}
1199 	return (0);
1200 }
1201 
1202 int
1203 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1204     const char *wmesg, int pri, int timo, const char *file, int line)
1205 {
1206 	GIANT_DECLARE;
1207 	struct lockmgr_wait lwa;
1208 	struct lock_class *class;
1209 	const char *iwmesg;
1210 	uintptr_t tid, v, x;
1211 	u_int op, realexslp;
1212 	int error, ipri, itimo, queue, wakeup_swapper;
1213 #ifdef LOCK_PROFILING
1214 	uint64_t waittime = 0;
1215 	int contested = 0;
1216 #endif
1217 
1218 	if (KERNEL_PANICKED())
1219 		return (0);
1220 
1221 	error = 0;
1222 	tid = (uintptr_t)curthread;
1223 	op = (flags & LK_TYPE_MASK);
1224 	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1225 	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1226 	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1227 
1228 	lwa.iwmesg = iwmesg;
1229 	lwa.ipri = ipri;
1230 	lwa.itimo = itimo;
1231 
1232 	MPASS((flags & ~LK_TOTAL_MASK) == 0);
1233 	KASSERT((op & (op - 1)) == 0,
1234 	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1235 	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1236 	    (op != LK_DOWNGRADE && op != LK_RELEASE),
1237 	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1238 	    __func__, file, line));
1239 	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1240 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1241 	    __func__, file, line));
1242 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1243 	    ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
1244 	    lk->lock_object.lo_name, file, line));
1245 
1246 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1247 
1248 	if (lk->lock_object.lo_flags & LK_NOSHARE) {
1249 		switch (op) {
1250 		case LK_SHARED:
1251 			op = LK_EXCLUSIVE;
1252 			break;
1253 		case LK_UPGRADE:
1254 		case LK_TRYUPGRADE:
1255 		case LK_DOWNGRADE:
1256 			_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1257 			    file, line);
1258 			if (flags & LK_INTERLOCK)
1259 				class->lc_unlock(ilk);
1260 			return (0);
1261 		}
1262 	}
1263 
1264 	wakeup_swapper = 0;
1265 	switch (op) {
1266 	case LK_SHARED:
1267 		return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1268 		break;
1269 	case LK_UPGRADE:
1270 	case LK_TRYUPGRADE:
1271 		return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1272 		break;
1273 	case LK_EXCLUSIVE:
1274 		return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1275 		break;
1276 	case LK_DOWNGRADE:
1277 		_lockmgr_assert(lk, KA_XLOCKED, file, line);
1278 		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1279 
1280 		/*
1281 		 * Panic if the lock is recursed.
1282 		 */
1283 		if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1284 			if (flags & LK_INTERLOCK)
1285 				class->lc_unlock(ilk);
1286 			panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
1287 			    __func__, iwmesg, file, line);
1288 		}
1289 		TD_SLOCKS_INC(curthread);
1290 
1291 		/*
1292 		 * In order to preserve waiters flags, just spin.
1293 		 */
1294 		for (;;) {
1295 			x = lk->lk_lock;
1296 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1297 			x &= LK_ALL_WAITERS;
1298 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1299 			    LK_SHARERS_LOCK(1) | x))
1300 				break;
1301 			cpu_spinwait();
1302 		}
1303 		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1304 		LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1305 		break;
1306 	case LK_RELEASE:
1307 		_lockmgr_assert(lk, KA_LOCKED, file, line);
1308 		x = lk->lk_lock;
1309 
1310 		if (__predict_true(x & LK_SHARE) != 0) {
1311 			lockmgr_note_shared_release(lk, file, line);
1312 			return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1313 		} else {
1314 			lockmgr_note_exclusive_release(lk, file, line);
1315 			return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1316 		}
1317 		break;
1318 	case LK_DRAIN:
1319 		if (LK_CAN_WITNESS(flags))
1320 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1321 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1322 			    ilk : NULL);
1323 
1324 		/*
1325 		 * Trying to drain a lock we already own will result in a
1326 		 * deadlock.
1327 		 */
1328 		if (lockmgr_xlocked(lk)) {
1329 			if (flags & LK_INTERLOCK)
1330 				class->lc_unlock(ilk);
1331 			panic("%s: draining %s with the lock held @ %s:%d\n",
1332 			    __func__, iwmesg, file, line);
1333 		}
1334 
1335 		for (;;) {
1336 			if (lk->lk_lock == LK_UNLOCKED &&
1337 			    atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1338 				break;
1339 
1340 #ifdef HWPMC_HOOKS
1341 			PMC_SOFT_CALL( , , lock, failed);
1342 #endif
1343 			lock_profile_obtain_lock_failed(&lk->lock_object,
1344 			    &contested, &waittime);
1345 
1346 			/*
1347 			 * If the lock is expected to not sleep just give up
1348 			 * and return.
1349 			 */
1350 			if (LK_TRYOP(flags)) {
1351 				LOCK_LOG2(lk, "%s: %p fails the try operation",
1352 				    __func__, lk);
1353 				error = EBUSY;
1354 				break;
1355 			}
1356 
1357 			/*
1358 			 * Acquire the sleepqueue chain lock because we
1359 			 * probabilly will need to manipulate waiters flags.
1360 			 */
1361 			sleepq_lock(&lk->lock_object);
1362 			x = lk->lk_lock;
1363 
1364 			/*
1365 			 * if the lock has been released while we spun on
1366 			 * the sleepqueue chain lock just try again.
1367 			 */
1368 			if (x == LK_UNLOCKED) {
1369 				sleepq_release(&lk->lock_object);
1370 				continue;
1371 			}
1372 
1373 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1374 			if ((x & ~v) == LK_UNLOCKED) {
1375 				v = (x & ~LK_EXCLUSIVE_SPINNERS);
1376 
1377 				/*
1378 				 * If interruptible sleeps left the exclusive
1379 				 * queue empty avoid a starvation for the
1380 				 * threads sleeping on the shared queue by
1381 				 * giving them precedence and cleaning up the
1382 				 * exclusive waiters bit anyway.
1383 				 * Please note that lk_exslpfail count may be
1384 				 * lying about the real number of waiters with
1385 				 * the LK_SLEEPFAIL flag on because they may
1386 				 * be used in conjunction with interruptible
1387 				 * sleeps so lk_exslpfail might be considered
1388 				 * an 'upper limit' bound, including the edge
1389 				 * cases.
1390 				 */
1391 				if (v & LK_EXCLUSIVE_WAITERS) {
1392 					queue = SQ_EXCLUSIVE_QUEUE;
1393 					v &= ~LK_EXCLUSIVE_WAITERS;
1394 				} else {
1395 
1396 					/*
1397 					 * Exclusive waiters sleeping with
1398 					 * LK_SLEEPFAIL on and using
1399 					 * interruptible sleeps/timeout may
1400 					 * have left spourious lk_exslpfail
1401 					 * counts on, so clean it up anyway.
1402 					 */
1403 					MPASS(v & LK_SHARED_WAITERS);
1404 					lk->lk_exslpfail = 0;
1405 					queue = SQ_SHARED_QUEUE;
1406 					v &= ~LK_SHARED_WAITERS;
1407 				}
1408 				if (queue == SQ_EXCLUSIVE_QUEUE) {
1409 					realexslp =
1410 					    sleepq_sleepcnt(&lk->lock_object,
1411 					    SQ_EXCLUSIVE_QUEUE);
1412 					if (lk->lk_exslpfail >= realexslp) {
1413 						lk->lk_exslpfail = 0;
1414 						queue = SQ_SHARED_QUEUE;
1415 						v &= ~LK_SHARED_WAITERS;
1416 						if (realexslp != 0) {
1417 							LOCK_LOG2(lk,
1418 					"%s: %p has only LK_SLEEPFAIL sleepers",
1419 							    __func__, lk);
1420 							LOCK_LOG2(lk,
1421 			"%s: %p waking up threads on the exclusive queue",
1422 							    __func__, lk);
1423 							wakeup_swapper =
1424 							    sleepq_broadcast(
1425 							    &lk->lock_object,
1426 							    SLEEPQ_LK, 0,
1427 							    SQ_EXCLUSIVE_QUEUE);
1428 						}
1429 					} else
1430 						lk->lk_exslpfail = 0;
1431 				}
1432 				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1433 					sleepq_release(&lk->lock_object);
1434 					continue;
1435 				}
1436 				LOCK_LOG3(lk,
1437 				"%s: %p waking up all threads on the %s queue",
1438 				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1439 				    "shared" : "exclusive");
1440 				wakeup_swapper |= sleepq_broadcast(
1441 				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1442 
1443 				/*
1444 				 * If shared waiters have been woken up we need
1445 				 * to wait for one of them to acquire the lock
1446 				 * before to set the exclusive waiters in
1447 				 * order to avoid a deadlock.
1448 				 */
1449 				if (queue == SQ_SHARED_QUEUE) {
1450 					for (v = lk->lk_lock;
1451 					    (v & LK_SHARE) && !LK_SHARERS(v);
1452 					    v = lk->lk_lock)
1453 						cpu_spinwait();
1454 				}
1455 			}
1456 
1457 			/*
1458 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1459 			 * fail, loop back and retry.
1460 			 */
1461 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1462 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1463 				    x | LK_EXCLUSIVE_WAITERS)) {
1464 					sleepq_release(&lk->lock_object);
1465 					continue;
1466 				}
1467 				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1468 				    __func__, lk);
1469 			}
1470 
1471 			/*
1472 			 * As far as we have been unable to acquire the
1473 			 * exclusive lock and the exclusive waiters flag
1474 			 * is set, we will sleep.
1475 			 */
1476 			if (flags & LK_INTERLOCK) {
1477 				class->lc_unlock(ilk);
1478 				flags &= ~LK_INTERLOCK;
1479 			}
1480 			GIANT_SAVE();
1481 			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1482 			    SQ_EXCLUSIVE_QUEUE);
1483 			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1484 			GIANT_RESTORE();
1485 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1486 			    __func__, lk);
1487 		}
1488 
1489 		if (error == 0) {
1490 			lock_profile_obtain_lock_success(&lk->lock_object,
1491 			    contested, waittime, file, line);
1492 			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1493 			    lk->lk_recurse, file, line);
1494 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1495 			    LK_TRYWIT(flags), file, line);
1496 			TD_LOCKS_INC(curthread);
1497 			STACK_SAVE(lk);
1498 		}
1499 		break;
1500 	default:
1501 		if (flags & LK_INTERLOCK)
1502 			class->lc_unlock(ilk);
1503 		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1504 	}
1505 
1506 	if (flags & LK_INTERLOCK)
1507 		class->lc_unlock(ilk);
1508 	if (wakeup_swapper)
1509 		kick_proc0();
1510 
1511 	return (error);
1512 }
1513 
1514 void
1515 _lockmgr_disown(struct lock *lk, const char *file, int line)
1516 {
1517 	uintptr_t tid, x;
1518 
1519 	if (SCHEDULER_STOPPED())
1520 		return;
1521 
1522 	tid = (uintptr_t)curthread;
1523 	_lockmgr_assert(lk, KA_XLOCKED, file, line);
1524 
1525 	/*
1526 	 * Panic if the lock is recursed.
1527 	 */
1528 	if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1529 		panic("%s: disown a recursed lockmgr @ %s:%d\n",
1530 		    __func__,  file, line);
1531 
1532 	/*
1533 	 * If the owner is already LK_KERNPROC just skip the whole operation.
1534 	 */
1535 	if (LK_HOLDER(lk->lk_lock) != tid)
1536 		return;
1537 	lock_profile_release_lock(&lk->lock_object);
1538 	LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1539 	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1540 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1541 	TD_LOCKS_DEC(curthread);
1542 	STACK_SAVE(lk);
1543 
1544 	/*
1545 	 * In order to preserve waiters flags, just spin.
1546 	 */
1547 	for (;;) {
1548 		x = lk->lk_lock;
1549 		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1550 		x &= LK_ALL_WAITERS;
1551 		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1552 		    LK_KERNPROC | x))
1553 			return;
1554 		cpu_spinwait();
1555 	}
1556 }
1557 
1558 void
1559 lockmgr_printinfo(const struct lock *lk)
1560 {
1561 	struct thread *td;
1562 	uintptr_t x;
1563 
1564 	if (lk->lk_lock == LK_UNLOCKED)
1565 		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1566 	else if (lk->lk_lock & LK_SHARE)
1567 		printf("lock type %s: SHARED (count %ju)\n",
1568 		    lk->lock_object.lo_name,
1569 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1570 	else {
1571 		td = lockmgr_xholder(lk);
1572 		if (td == (struct thread *)LK_KERNPROC)
1573 			printf("lock type %s: EXCL by KERNPROC\n",
1574 			    lk->lock_object.lo_name);
1575 		else
1576 			printf("lock type %s: EXCL by thread %p "
1577 			    "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1578 			    td, td->td_proc->p_pid, td->td_proc->p_comm,
1579 			    td->td_tid);
1580 	}
1581 
1582 	x = lk->lk_lock;
1583 	if (x & LK_EXCLUSIVE_WAITERS)
1584 		printf(" with exclusive waiters pending\n");
1585 	if (x & LK_SHARED_WAITERS)
1586 		printf(" with shared waiters pending\n");
1587 	if (x & LK_EXCLUSIVE_SPINNERS)
1588 		printf(" with exclusive spinners pending\n");
1589 
1590 	STACK_PRINT(lk);
1591 }
1592 
1593 int
1594 lockstatus(const struct lock *lk)
1595 {
1596 	uintptr_t v, x;
1597 	int ret;
1598 
1599 	ret = LK_SHARED;
1600 	x = lk->lk_lock;
1601 	v = LK_HOLDER(x);
1602 
1603 	if ((x & LK_SHARE) == 0) {
1604 		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1605 			ret = LK_EXCLUSIVE;
1606 		else
1607 			ret = LK_EXCLOTHER;
1608 	} else if (x == LK_UNLOCKED)
1609 		ret = 0;
1610 
1611 	return (ret);
1612 }
1613 
1614 #ifdef INVARIANT_SUPPORT
1615 
1616 FEATURE(invariant_support,
1617     "Support for modules compiled with INVARIANTS option");
1618 
1619 #ifndef INVARIANTS
1620 #undef	_lockmgr_assert
1621 #endif
1622 
1623 void
1624 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1625 {
1626 	int slocked = 0;
1627 
1628 	if (KERNEL_PANICKED())
1629 		return;
1630 	switch (what) {
1631 	case KA_SLOCKED:
1632 	case KA_SLOCKED | KA_NOTRECURSED:
1633 	case KA_SLOCKED | KA_RECURSED:
1634 		slocked = 1;
1635 	case KA_LOCKED:
1636 	case KA_LOCKED | KA_NOTRECURSED:
1637 	case KA_LOCKED | KA_RECURSED:
1638 #ifdef WITNESS
1639 
1640 		/*
1641 		 * We cannot trust WITNESS if the lock is held in exclusive
1642 		 * mode and a call to lockmgr_disown() happened.
1643 		 * Workaround this skipping the check if the lock is held in
1644 		 * exclusive mode even for the KA_LOCKED case.
1645 		 */
1646 		if (slocked || (lk->lk_lock & LK_SHARE)) {
1647 			witness_assert(&lk->lock_object, what, file, line);
1648 			break;
1649 		}
1650 #endif
1651 		if (lk->lk_lock == LK_UNLOCKED ||
1652 		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1653 		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1654 			panic("Lock %s not %slocked @ %s:%d\n",
1655 			    lk->lock_object.lo_name, slocked ? "share" : "",
1656 			    file, line);
1657 
1658 		if ((lk->lk_lock & LK_SHARE) == 0) {
1659 			if (lockmgr_recursed(lk)) {
1660 				if (what & KA_NOTRECURSED)
1661 					panic("Lock %s recursed @ %s:%d\n",
1662 					    lk->lock_object.lo_name, file,
1663 					    line);
1664 			} else if (what & KA_RECURSED)
1665 				panic("Lock %s not recursed @ %s:%d\n",
1666 				    lk->lock_object.lo_name, file, line);
1667 		}
1668 		break;
1669 	case KA_XLOCKED:
1670 	case KA_XLOCKED | KA_NOTRECURSED:
1671 	case KA_XLOCKED | KA_RECURSED:
1672 		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1673 			panic("Lock %s not exclusively locked @ %s:%d\n",
1674 			    lk->lock_object.lo_name, file, line);
1675 		if (lockmgr_recursed(lk)) {
1676 			if (what & KA_NOTRECURSED)
1677 				panic("Lock %s recursed @ %s:%d\n",
1678 				    lk->lock_object.lo_name, file, line);
1679 		} else if (what & KA_RECURSED)
1680 			panic("Lock %s not recursed @ %s:%d\n",
1681 			    lk->lock_object.lo_name, file, line);
1682 		break;
1683 	case KA_UNLOCKED:
1684 		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1685 			panic("Lock %s exclusively locked @ %s:%d\n",
1686 			    lk->lock_object.lo_name, file, line);
1687 		break;
1688 	default:
1689 		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1690 		    line);
1691 	}
1692 }
1693 #endif
1694 
1695 #ifdef DDB
1696 int
1697 lockmgr_chain(struct thread *td, struct thread **ownerp)
1698 {
1699 	const struct lock *lk;
1700 
1701 	lk = td->td_wchan;
1702 
1703 	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1704 		return (0);
1705 	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1706 	if (lk->lk_lock & LK_SHARE)
1707 		db_printf("SHARED (count %ju)\n",
1708 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1709 	else
1710 		db_printf("EXCL\n");
1711 	*ownerp = lockmgr_xholder(lk);
1712 
1713 	return (1);
1714 }
1715 
1716 static void
1717 db_show_lockmgr(const struct lock_object *lock)
1718 {
1719 	struct thread *td;
1720 	const struct lock *lk;
1721 
1722 	lk = (const struct lock *)lock;
1723 
1724 	db_printf(" state: ");
1725 	if (lk->lk_lock == LK_UNLOCKED)
1726 		db_printf("UNLOCKED\n");
1727 	else if (lk->lk_lock & LK_SHARE)
1728 		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1729 	else {
1730 		td = lockmgr_xholder(lk);
1731 		if (td == (struct thread *)LK_KERNPROC)
1732 			db_printf("XLOCK: LK_KERNPROC\n");
1733 		else
1734 			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1735 			    td->td_tid, td->td_proc->p_pid,
1736 			    td->td_proc->p_comm);
1737 		if (lockmgr_recursed(lk))
1738 			db_printf(" recursed: %d\n", lk->lk_recurse);
1739 	}
1740 	db_printf(" waiters: ");
1741 	switch (lk->lk_lock & LK_ALL_WAITERS) {
1742 	case LK_SHARED_WAITERS:
1743 		db_printf("shared\n");
1744 		break;
1745 	case LK_EXCLUSIVE_WAITERS:
1746 		db_printf("exclusive\n");
1747 		break;
1748 	case LK_ALL_WAITERS:
1749 		db_printf("shared and exclusive\n");
1750 		break;
1751 	default:
1752 		db_printf("none\n");
1753 	}
1754 	db_printf(" spinners: ");
1755 	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1756 		db_printf("exclusive\n");
1757 	else
1758 		db_printf("none\n");
1759 }
1760 #endif
1761