xref: /freebsd/sys/kern/kern_lock.c (revision 430f7286a566b1407c7b32ce13585caf5aa59b92)
1 /*-
2  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  */
28 
29 #include "opt_adaptive_lockmgrs.h"
30 #include "opt_ddb.h"
31 #include "opt_hwpmc_hooks.h"
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/kdb.h>
38 #include <sys/ktr.h>
39 #include <sys/lock.h>
40 #include <sys/lock_profile.h>
41 #include <sys/lockmgr.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/sleepqueue.h>
45 #ifdef DEBUG_LOCKS
46 #include <sys/stack.h>
47 #endif
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 
51 #include <machine/cpu.h>
52 
53 #ifdef DDB
54 #include <ddb/ddb.h>
55 #endif
56 
57 #ifdef HWPMC_HOOKS
58 #include <sys/pmckern.h>
59 PMC_SOFT_DECLARE( , , lock, failed);
60 #endif
61 
62 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
63     (LK_ADAPTIVE | LK_NOSHARE));
64 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
65     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
66 
67 #define	SQ_EXCLUSIVE_QUEUE	0
68 #define	SQ_SHARED_QUEUE		1
69 
70 #ifndef INVARIANTS
71 #define	_lockmgr_assert(lk, what, file, line)
72 #endif
73 
74 #define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
75 #define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
76 
77 #ifndef DEBUG_LOCKS
78 #define	STACK_PRINT(lk)
79 #define	STACK_SAVE(lk)
80 #define	STACK_ZERO(lk)
81 #else
82 #define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
83 #define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
84 #define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
85 #endif
86 
87 #define	LOCK_LOG2(lk, string, arg1, arg2)				\
88 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
89 		CTR2(KTR_LOCK, (string), (arg1), (arg2))
90 #define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
91 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
92 		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
93 
94 #define	GIANT_DECLARE							\
95 	int _i = 0;							\
96 	WITNESS_SAVE_DECL(Giant)
97 #define	GIANT_RESTORE() do {						\
98 	if (_i > 0) {							\
99 		while (_i--)						\
100 			mtx_lock(&Giant);				\
101 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
102 	}								\
103 } while (0)
104 #define	GIANT_SAVE() do {						\
105 	if (mtx_owned(&Giant)) {					\
106 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
107 		while (mtx_owned(&Giant)) {				\
108 			_i++;						\
109 			mtx_unlock(&Giant);				\
110 		}							\
111 	}								\
112 } while (0)
113 
114 #define	LK_CAN_SHARE(x, flags)						\
115 	(((x) & LK_SHARE) &&						\
116 	(((x) & (LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 0 ||	\
117 	(curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||	\
118 	(curthread->td_pflags & TDP_DEADLKTREAT)))
119 #define	LK_TRYOP(x)							\
120 	((x) & LK_NOWAIT)
121 
122 #define	LK_CAN_WITNESS(x)						\
123 	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
124 #define	LK_TRYWIT(x)							\
125 	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
126 
127 #define	LK_CAN_ADAPT(lk, f)						\
128 	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
129 	((f) & LK_SLEEPFAIL) == 0)
130 
131 #define	lockmgr_disowned(lk)						\
132 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
133 
134 #define	lockmgr_xlocked(lk)						\
135 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
136 
137 static void	assert_lockmgr(const struct lock_object *lock, int how);
138 #ifdef DDB
139 static void	db_show_lockmgr(const struct lock_object *lock);
140 #endif
141 static void	lock_lockmgr(struct lock_object *lock, uintptr_t how);
142 #ifdef KDTRACE_HOOKS
143 static int	owner_lockmgr(const struct lock_object *lock,
144 		    struct thread **owner);
145 #endif
146 static uintptr_t unlock_lockmgr(struct lock_object *lock);
147 
148 struct lock_class lock_class_lockmgr = {
149 	.lc_name = "lockmgr",
150 	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
151 	.lc_assert = assert_lockmgr,
152 #ifdef DDB
153 	.lc_ddb_show = db_show_lockmgr,
154 #endif
155 	.lc_lock = lock_lockmgr,
156 	.lc_unlock = unlock_lockmgr,
157 #ifdef KDTRACE_HOOKS
158 	.lc_owner = owner_lockmgr,
159 #endif
160 };
161 
162 #ifdef ADAPTIVE_LOCKMGRS
163 static u_int alk_retries = 10;
164 static u_int alk_loops = 10000;
165 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
166     "lockmgr debugging");
167 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
168 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
169 #endif
170 
171 static __inline struct thread *
172 lockmgr_xholder(const struct lock *lk)
173 {
174 	uintptr_t x;
175 
176 	x = lk->lk_lock;
177 	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
178 }
179 
180 /*
181  * It assumes sleepq_lock held and returns with this one unheld.
182  * It also assumes the generic interlock is sane and previously checked.
183  * If LK_INTERLOCK is specified the interlock is not reacquired after the
184  * sleep.
185  */
186 static __inline int
187 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
188     const char *wmesg, int pri, int timo, int queue)
189 {
190 	GIANT_DECLARE;
191 	struct lock_class *class;
192 	int catch, error;
193 
194 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
195 	catch = pri & PCATCH;
196 	pri &= PRIMASK;
197 	error = 0;
198 
199 	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
200 	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
201 
202 	if (flags & LK_INTERLOCK)
203 		class->lc_unlock(ilk);
204 	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
205 		lk->lk_exslpfail++;
206 	GIANT_SAVE();
207 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
208 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
209 	if ((flags & LK_TIMELOCK) && timo)
210 		sleepq_set_timeout(&lk->lock_object, timo);
211 
212 	/*
213 	 * Decisional switch for real sleeping.
214 	 */
215 	if ((flags & LK_TIMELOCK) && timo && catch)
216 		error = sleepq_timedwait_sig(&lk->lock_object, pri);
217 	else if ((flags & LK_TIMELOCK) && timo)
218 		error = sleepq_timedwait(&lk->lock_object, pri);
219 	else if (catch)
220 		error = sleepq_wait_sig(&lk->lock_object, pri);
221 	else
222 		sleepq_wait(&lk->lock_object, pri);
223 	GIANT_RESTORE();
224 	if ((flags & LK_SLEEPFAIL) && error == 0)
225 		error = ENOLCK;
226 
227 	return (error);
228 }
229 
230 static __inline int
231 wakeupshlk(struct lock *lk, const char *file, int line)
232 {
233 	uintptr_t v, x;
234 	u_int realexslp;
235 	int queue, wakeup_swapper;
236 
237 	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
238 	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
239 
240 	wakeup_swapper = 0;
241 	for (;;) {
242 		x = lk->lk_lock;
243 
244 		/*
245 		 * If there is more than one shared lock held, just drop one
246 		 * and return.
247 		 */
248 		if (LK_SHARERS(x) > 1) {
249 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
250 			    x - LK_ONE_SHARER))
251 				break;
252 			continue;
253 		}
254 
255 		/*
256 		 * If there are not waiters on the exclusive queue, drop the
257 		 * lock quickly.
258 		 */
259 		if ((x & LK_ALL_WAITERS) == 0) {
260 			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
261 			    LK_SHARERS_LOCK(1));
262 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
263 				break;
264 			continue;
265 		}
266 
267 		/*
268 		 * We should have a sharer with waiters, so enter the hard
269 		 * path in order to handle wakeups correctly.
270 		 */
271 		sleepq_lock(&lk->lock_object);
272 		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
273 		v = LK_UNLOCKED;
274 
275 		/*
276 		 * If the lock has exclusive waiters, give them preference in
277 		 * order to avoid deadlock with shared runners up.
278 		 * If interruptible sleeps left the exclusive queue empty
279 		 * avoid a starvation for the threads sleeping on the shared
280 		 * queue by giving them precedence and cleaning up the
281 		 * exclusive waiters bit anyway.
282 		 * Please note that lk_exslpfail count may be lying about
283 		 * the real number of waiters with the LK_SLEEPFAIL flag on
284 		 * because they may be used in conjunction with interruptible
285 		 * sleeps so lk_exslpfail might be considered an 'upper limit'
286 		 * bound, including the edge cases.
287 		 */
288 		realexslp = sleepq_sleepcnt(&lk->lock_object,
289 		    SQ_EXCLUSIVE_QUEUE);
290 		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
291 			if (lk->lk_exslpfail < realexslp) {
292 				lk->lk_exslpfail = 0;
293 				queue = SQ_EXCLUSIVE_QUEUE;
294 				v |= (x & LK_SHARED_WAITERS);
295 			} else {
296 				lk->lk_exslpfail = 0;
297 				LOCK_LOG2(lk,
298 				    "%s: %p has only LK_SLEEPFAIL sleepers",
299 				    __func__, lk);
300 				LOCK_LOG2(lk,
301 			    "%s: %p waking up threads on the exclusive queue",
302 				    __func__, lk);
303 				wakeup_swapper =
304 				    sleepq_broadcast(&lk->lock_object,
305 				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
306 				queue = SQ_SHARED_QUEUE;
307 			}
308 
309 		} else {
310 
311 			/*
312 			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
313 			 * and using interruptible sleeps/timeout may have
314 			 * left spourious lk_exslpfail counts on, so clean
315 			 * it up anyway.
316 			 */
317 			lk->lk_exslpfail = 0;
318 			queue = SQ_SHARED_QUEUE;
319 		}
320 
321 		if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
322 		    v)) {
323 			sleepq_release(&lk->lock_object);
324 			continue;
325 		}
326 		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
327 		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
328 		    "exclusive");
329 		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
330 		    0, queue);
331 		sleepq_release(&lk->lock_object);
332 		break;
333 	}
334 
335 	lock_profile_release_lock(&lk->lock_object);
336 	TD_LOCKS_DEC(curthread);
337 	TD_SLOCKS_DEC(curthread);
338 	return (wakeup_swapper);
339 }
340 
341 static void
342 assert_lockmgr(const struct lock_object *lock, int what)
343 {
344 
345 	panic("lockmgr locks do not support assertions");
346 }
347 
348 static void
349 lock_lockmgr(struct lock_object *lock, uintptr_t how)
350 {
351 
352 	panic("lockmgr locks do not support sleep interlocking");
353 }
354 
355 static uintptr_t
356 unlock_lockmgr(struct lock_object *lock)
357 {
358 
359 	panic("lockmgr locks do not support sleep interlocking");
360 }
361 
362 #ifdef KDTRACE_HOOKS
363 static int
364 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
365 {
366 
367 	panic("lockmgr locks do not support owner inquiring");
368 }
369 #endif
370 
371 void
372 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
373 {
374 	int iflags;
375 
376 	MPASS((flags & ~LK_INIT_MASK) == 0);
377 	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
378             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
379             &lk->lk_lock));
380 
381 	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
382 	if (flags & LK_CANRECURSE)
383 		iflags |= LO_RECURSABLE;
384 	if ((flags & LK_NODUP) == 0)
385 		iflags |= LO_DUPOK;
386 	if (flags & LK_NOPROFILE)
387 		iflags |= LO_NOPROFILE;
388 	if ((flags & LK_NOWITNESS) == 0)
389 		iflags |= LO_WITNESS;
390 	if (flags & LK_QUIET)
391 		iflags |= LO_QUIET;
392 	if (flags & LK_IS_VNODE)
393 		iflags |= LO_IS_VNODE;
394 	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
395 
396 	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
397 	lk->lk_lock = LK_UNLOCKED;
398 	lk->lk_recurse = 0;
399 	lk->lk_exslpfail = 0;
400 	lk->lk_timo = timo;
401 	lk->lk_pri = pri;
402 	STACK_ZERO(lk);
403 }
404 
405 /*
406  * XXX: Gross hacks to manipulate external lock flags after
407  * initialization.  Used for certain vnode and buf locks.
408  */
409 void
410 lockallowshare(struct lock *lk)
411 {
412 
413 	lockmgr_assert(lk, KA_XLOCKED);
414 	lk->lock_object.lo_flags &= ~LK_NOSHARE;
415 }
416 
417 void
418 lockdisableshare(struct lock *lk)
419 {
420 
421 	lockmgr_assert(lk, KA_XLOCKED);
422 	lk->lock_object.lo_flags |= LK_NOSHARE;
423 }
424 
425 void
426 lockallowrecurse(struct lock *lk)
427 {
428 
429 	lockmgr_assert(lk, KA_XLOCKED);
430 	lk->lock_object.lo_flags |= LO_RECURSABLE;
431 }
432 
433 void
434 lockdisablerecurse(struct lock *lk)
435 {
436 
437 	lockmgr_assert(lk, KA_XLOCKED);
438 	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
439 }
440 
441 void
442 lockdestroy(struct lock *lk)
443 {
444 
445 	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
446 	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
447 	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
448 	lock_destroy(&lk->lock_object);
449 }
450 
451 int
452 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
453     const char *wmesg, int pri, int timo, const char *file, int line)
454 {
455 	GIANT_DECLARE;
456 	struct lock_class *class;
457 	const char *iwmesg;
458 	uintptr_t tid, v, x;
459 	u_int op, realexslp;
460 	int error, ipri, itimo, queue, wakeup_swapper;
461 #ifdef LOCK_PROFILING
462 	uint64_t waittime = 0;
463 	int contested = 0;
464 #endif
465 #ifdef ADAPTIVE_LOCKMGRS
466 	volatile struct thread *owner;
467 	u_int i, spintries = 0;
468 #endif
469 
470 	error = 0;
471 	tid = (uintptr_t)curthread;
472 	op = (flags & LK_TYPE_MASK);
473 	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
474 	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
475 	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
476 
477 	MPASS((flags & ~LK_TOTAL_MASK) == 0);
478 	KASSERT((op & (op - 1)) == 0,
479 	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
480 	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
481 	    (op != LK_DOWNGRADE && op != LK_RELEASE),
482 	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
483 	    __func__, file, line));
484 	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
485 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
486 	    __func__, file, line));
487 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
488 	    ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
489 	    lk->lock_object.lo_name, file, line));
490 
491 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
492 	if (panicstr != NULL) {
493 		if (flags & LK_INTERLOCK)
494 			class->lc_unlock(ilk);
495 		return (0);
496 	}
497 
498 	if (lk->lock_object.lo_flags & LK_NOSHARE) {
499 		switch (op) {
500 		case LK_SHARED:
501 			op = LK_EXCLUSIVE;
502 			break;
503 		case LK_UPGRADE:
504 		case LK_TRYUPGRADE:
505 		case LK_DOWNGRADE:
506 			_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
507 			    file, line);
508 			if (flags & LK_INTERLOCK)
509 				class->lc_unlock(ilk);
510 			return (0);
511 		}
512 	}
513 
514 	wakeup_swapper = 0;
515 	switch (op) {
516 	case LK_SHARED:
517 		if (LK_CAN_WITNESS(flags))
518 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
519 			    file, line, flags & LK_INTERLOCK ? ilk : NULL);
520 		for (;;) {
521 			x = lk->lk_lock;
522 
523 			/*
524 			 * If no other thread has an exclusive lock, or
525 			 * no exclusive waiter is present, bump the count of
526 			 * sharers.  Since we have to preserve the state of
527 			 * waiters, if we fail to acquire the shared lock
528 			 * loop back and retry.
529 			 */
530 			if (LK_CAN_SHARE(x, flags)) {
531 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
532 				    x + LK_ONE_SHARER))
533 					break;
534 				continue;
535 			}
536 #ifdef HWPMC_HOOKS
537 			PMC_SOFT_CALL( , , lock, failed);
538 #endif
539 			lock_profile_obtain_lock_failed(&lk->lock_object,
540 			    &contested, &waittime);
541 
542 			/*
543 			 * If the lock is already held by curthread in
544 			 * exclusive way avoid a deadlock.
545 			 */
546 			if (LK_HOLDER(x) == tid) {
547 				LOCK_LOG2(lk,
548 				    "%s: %p already held in exclusive mode",
549 				    __func__, lk);
550 				error = EDEADLK;
551 				break;
552 			}
553 
554 			/*
555 			 * If the lock is expected to not sleep just give up
556 			 * and return.
557 			 */
558 			if (LK_TRYOP(flags)) {
559 				LOCK_LOG2(lk, "%s: %p fails the try operation",
560 				    __func__, lk);
561 				error = EBUSY;
562 				break;
563 			}
564 
565 #ifdef ADAPTIVE_LOCKMGRS
566 			/*
567 			 * If the owner is running on another CPU, spin until
568 			 * the owner stops running or the state of the lock
569 			 * changes.  We need a double-state handle here
570 			 * because for a failed acquisition the lock can be
571 			 * either held in exclusive mode or shared mode
572 			 * (for the writer starvation avoidance technique).
573 			 */
574 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
575 			    LK_HOLDER(x) != LK_KERNPROC) {
576 				owner = (struct thread *)LK_HOLDER(x);
577 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
578 					CTR3(KTR_LOCK,
579 					    "%s: spinning on %p held by %p",
580 					    __func__, lk, owner);
581 				KTR_STATE1(KTR_SCHED, "thread",
582 				    sched_tdname(td), "spinning",
583 				    "lockname:\"%s\"", lk->lock_object.lo_name);
584 
585 				/*
586 				 * If we are holding also an interlock drop it
587 				 * in order to avoid a deadlock if the lockmgr
588 				 * owner is adaptively spinning on the
589 				 * interlock itself.
590 				 */
591 				if (flags & LK_INTERLOCK) {
592 					class->lc_unlock(ilk);
593 					flags &= ~LK_INTERLOCK;
594 				}
595 				GIANT_SAVE();
596 				while (LK_HOLDER(lk->lk_lock) ==
597 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
598 					cpu_spinwait();
599 				KTR_STATE0(KTR_SCHED, "thread",
600 				    sched_tdname(td), "running");
601 				GIANT_RESTORE();
602 				continue;
603 			} else if (LK_CAN_ADAPT(lk, flags) &&
604 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
605 			    spintries < alk_retries) {
606 				KTR_STATE1(KTR_SCHED, "thread",
607 				    sched_tdname(td), "spinning",
608 				    "lockname:\"%s\"", lk->lock_object.lo_name);
609 				if (flags & LK_INTERLOCK) {
610 					class->lc_unlock(ilk);
611 					flags &= ~LK_INTERLOCK;
612 				}
613 				GIANT_SAVE();
614 				spintries++;
615 				for (i = 0; i < alk_loops; i++) {
616 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
617 						CTR4(KTR_LOCK,
618 				    "%s: shared spinning on %p with %u and %u",
619 						    __func__, lk, spintries, i);
620 					x = lk->lk_lock;
621 					if ((x & LK_SHARE) == 0 ||
622 					    LK_CAN_SHARE(x, flags) != 0)
623 						break;
624 					cpu_spinwait();
625 				}
626 				KTR_STATE0(KTR_SCHED, "thread",
627 				    sched_tdname(td), "running");
628 				GIANT_RESTORE();
629 				if (i != alk_loops)
630 					continue;
631 			}
632 #endif
633 
634 			/*
635 			 * Acquire the sleepqueue chain lock because we
636 			 * probabilly will need to manipulate waiters flags.
637 			 */
638 			sleepq_lock(&lk->lock_object);
639 			x = lk->lk_lock;
640 
641 			/*
642 			 * if the lock can be acquired in shared mode, try
643 			 * again.
644 			 */
645 			if (LK_CAN_SHARE(x, flags)) {
646 				sleepq_release(&lk->lock_object);
647 				continue;
648 			}
649 
650 #ifdef ADAPTIVE_LOCKMGRS
651 			/*
652 			 * The current lock owner might have started executing
653 			 * on another CPU (or the lock could have changed
654 			 * owner) while we were waiting on the turnstile
655 			 * chain lock.  If so, drop the turnstile lock and try
656 			 * again.
657 			 */
658 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
659 			    LK_HOLDER(x) != LK_KERNPROC) {
660 				owner = (struct thread *)LK_HOLDER(x);
661 				if (TD_IS_RUNNING(owner)) {
662 					sleepq_release(&lk->lock_object);
663 					continue;
664 				}
665 			}
666 #endif
667 
668 			/*
669 			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
670 			 * loop back and retry.
671 			 */
672 			if ((x & LK_SHARED_WAITERS) == 0) {
673 				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
674 				    x | LK_SHARED_WAITERS)) {
675 					sleepq_release(&lk->lock_object);
676 					continue;
677 				}
678 				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
679 				    __func__, lk);
680 			}
681 
682 			/*
683 			 * As far as we have been unable to acquire the
684 			 * shared lock and the shared waiters flag is set,
685 			 * we will sleep.
686 			 */
687 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
688 			    SQ_SHARED_QUEUE);
689 			flags &= ~LK_INTERLOCK;
690 			if (error) {
691 				LOCK_LOG3(lk,
692 				    "%s: interrupted sleep for %p with %d",
693 				    __func__, lk, error);
694 				break;
695 			}
696 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
697 			    __func__, lk);
698 		}
699 		if (error == 0) {
700 			lock_profile_obtain_lock_success(&lk->lock_object,
701 			    contested, waittime, file, line);
702 			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
703 			    line);
704 			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
705 			    line);
706 			TD_LOCKS_INC(curthread);
707 			TD_SLOCKS_INC(curthread);
708 			STACK_SAVE(lk);
709 		}
710 		break;
711 	case LK_UPGRADE:
712 	case LK_TRYUPGRADE:
713 		_lockmgr_assert(lk, KA_SLOCKED, file, line);
714 		v = lk->lk_lock;
715 		x = v & LK_ALL_WAITERS;
716 		v &= LK_EXCLUSIVE_SPINNERS;
717 
718 		/*
719 		 * Try to switch from one shared lock to an exclusive one.
720 		 * We need to preserve waiters flags during the operation.
721 		 */
722 		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
723 		    tid | x)) {
724 			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
725 			    line);
726 			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
727 			    LK_TRYWIT(flags), file, line);
728 			TD_SLOCKS_DEC(curthread);
729 			break;
730 		}
731 
732 		/*
733 		 * In LK_TRYUPGRADE mode, do not drop the lock,
734 		 * returning EBUSY instead.
735 		 */
736 		if (op == LK_TRYUPGRADE) {
737 			LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
738 			    __func__, lk);
739 			error = EBUSY;
740 			break;
741 		}
742 
743 		/*
744 		 * We have been unable to succeed in upgrading, so just
745 		 * give up the shared lock.
746 		 */
747 		wakeup_swapper |= wakeupshlk(lk, file, line);
748 
749 		/* FALLTHROUGH */
750 	case LK_EXCLUSIVE:
751 		if (LK_CAN_WITNESS(flags))
752 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
753 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
754 			    ilk : NULL);
755 
756 		/*
757 		 * If curthread already holds the lock and this one is
758 		 * allowed to recurse, simply recurse on it.
759 		 */
760 		if (lockmgr_xlocked(lk)) {
761 			if ((flags & LK_CANRECURSE) == 0 &&
762 			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
763 
764 				/*
765 				 * If the lock is expected to not panic just
766 				 * give up and return.
767 				 */
768 				if (LK_TRYOP(flags)) {
769 					LOCK_LOG2(lk,
770 					    "%s: %p fails the try operation",
771 					    __func__, lk);
772 					error = EBUSY;
773 					break;
774 				}
775 				if (flags & LK_INTERLOCK)
776 					class->lc_unlock(ilk);
777 		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
778 				    __func__, iwmesg, file, line);
779 			}
780 			lk->lk_recurse++;
781 			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
782 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
783 			    lk->lk_recurse, file, line);
784 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
785 			    LK_TRYWIT(flags), file, line);
786 			TD_LOCKS_INC(curthread);
787 			break;
788 		}
789 
790 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
791 		    tid)) {
792 #ifdef HWPMC_HOOKS
793 			PMC_SOFT_CALL( , , lock, failed);
794 #endif
795 			lock_profile_obtain_lock_failed(&lk->lock_object,
796 			    &contested, &waittime);
797 
798 			/*
799 			 * If the lock is expected to not sleep just give up
800 			 * and return.
801 			 */
802 			if (LK_TRYOP(flags)) {
803 				LOCK_LOG2(lk, "%s: %p fails the try operation",
804 				    __func__, lk);
805 				error = EBUSY;
806 				break;
807 			}
808 
809 #ifdef ADAPTIVE_LOCKMGRS
810 			/*
811 			 * If the owner is running on another CPU, spin until
812 			 * the owner stops running or the state of the lock
813 			 * changes.
814 			 */
815 			x = lk->lk_lock;
816 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
817 			    LK_HOLDER(x) != LK_KERNPROC) {
818 				owner = (struct thread *)LK_HOLDER(x);
819 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
820 					CTR3(KTR_LOCK,
821 					    "%s: spinning on %p held by %p",
822 					    __func__, lk, owner);
823 				KTR_STATE1(KTR_SCHED, "thread",
824 				    sched_tdname(td), "spinning",
825 				    "lockname:\"%s\"", lk->lock_object.lo_name);
826 
827 				/*
828 				 * If we are holding also an interlock drop it
829 				 * in order to avoid a deadlock if the lockmgr
830 				 * owner is adaptively spinning on the
831 				 * interlock itself.
832 				 */
833 				if (flags & LK_INTERLOCK) {
834 					class->lc_unlock(ilk);
835 					flags &= ~LK_INTERLOCK;
836 				}
837 				GIANT_SAVE();
838 				while (LK_HOLDER(lk->lk_lock) ==
839 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
840 					cpu_spinwait();
841 				KTR_STATE0(KTR_SCHED, "thread",
842 				    sched_tdname(td), "running");
843 				GIANT_RESTORE();
844 				continue;
845 			} else if (LK_CAN_ADAPT(lk, flags) &&
846 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
847 			    spintries < alk_retries) {
848 				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
849 				    !atomic_cmpset_ptr(&lk->lk_lock, x,
850 				    x | LK_EXCLUSIVE_SPINNERS))
851 					continue;
852 				KTR_STATE1(KTR_SCHED, "thread",
853 				    sched_tdname(td), "spinning",
854 				    "lockname:\"%s\"", lk->lock_object.lo_name);
855 				if (flags & LK_INTERLOCK) {
856 					class->lc_unlock(ilk);
857 					flags &= ~LK_INTERLOCK;
858 				}
859 				GIANT_SAVE();
860 				spintries++;
861 				for (i = 0; i < alk_loops; i++) {
862 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
863 						CTR4(KTR_LOCK,
864 				    "%s: shared spinning on %p with %u and %u",
865 						    __func__, lk, spintries, i);
866 					if ((lk->lk_lock &
867 					    LK_EXCLUSIVE_SPINNERS) == 0)
868 						break;
869 					cpu_spinwait();
870 				}
871 				KTR_STATE0(KTR_SCHED, "thread",
872 				    sched_tdname(td), "running");
873 				GIANT_RESTORE();
874 				if (i != alk_loops)
875 					continue;
876 			}
877 #endif
878 
879 			/*
880 			 * Acquire the sleepqueue chain lock because we
881 			 * probabilly will need to manipulate waiters flags.
882 			 */
883 			sleepq_lock(&lk->lock_object);
884 			x = lk->lk_lock;
885 
886 			/*
887 			 * if the lock has been released while we spun on
888 			 * the sleepqueue chain lock just try again.
889 			 */
890 			if (x == LK_UNLOCKED) {
891 				sleepq_release(&lk->lock_object);
892 				continue;
893 			}
894 
895 #ifdef ADAPTIVE_LOCKMGRS
896 			/*
897 			 * The current lock owner might have started executing
898 			 * on another CPU (or the lock could have changed
899 			 * owner) while we were waiting on the turnstile
900 			 * chain lock.  If so, drop the turnstile lock and try
901 			 * again.
902 			 */
903 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
904 			    LK_HOLDER(x) != LK_KERNPROC) {
905 				owner = (struct thread *)LK_HOLDER(x);
906 				if (TD_IS_RUNNING(owner)) {
907 					sleepq_release(&lk->lock_object);
908 					continue;
909 				}
910 			}
911 #endif
912 
913 			/*
914 			 * The lock can be in the state where there is a
915 			 * pending queue of waiters, but still no owner.
916 			 * This happens when the lock is contested and an
917 			 * owner is going to claim the lock.
918 			 * If curthread is the one successfully acquiring it
919 			 * claim lock ownership and return, preserving waiters
920 			 * flags.
921 			 */
922 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
923 			if ((x & ~v) == LK_UNLOCKED) {
924 				v &= ~LK_EXCLUSIVE_SPINNERS;
925 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
926 				    tid | v)) {
927 					sleepq_release(&lk->lock_object);
928 					LOCK_LOG2(lk,
929 					    "%s: %p claimed by a new writer",
930 					    __func__, lk);
931 					break;
932 				}
933 				sleepq_release(&lk->lock_object);
934 				continue;
935 			}
936 
937 			/*
938 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
939 			 * fail, loop back and retry.
940 			 */
941 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
942 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
943 				    x | LK_EXCLUSIVE_WAITERS)) {
944 					sleepq_release(&lk->lock_object);
945 					continue;
946 				}
947 				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
948 				    __func__, lk);
949 			}
950 
951 			/*
952 			 * As far as we have been unable to acquire the
953 			 * exclusive lock and the exclusive waiters flag
954 			 * is set, we will sleep.
955 			 */
956 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
957 			    SQ_EXCLUSIVE_QUEUE);
958 			flags &= ~LK_INTERLOCK;
959 			if (error) {
960 				LOCK_LOG3(lk,
961 				    "%s: interrupted sleep for %p with %d",
962 				    __func__, lk, error);
963 				break;
964 			}
965 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
966 			    __func__, lk);
967 		}
968 		if (error == 0) {
969 			lock_profile_obtain_lock_success(&lk->lock_object,
970 			    contested, waittime, file, line);
971 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
972 			    lk->lk_recurse, file, line);
973 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
974 			    LK_TRYWIT(flags), file, line);
975 			TD_LOCKS_INC(curthread);
976 			STACK_SAVE(lk);
977 		}
978 		break;
979 	case LK_DOWNGRADE:
980 		_lockmgr_assert(lk, KA_XLOCKED, file, line);
981 		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
982 		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
983 
984 		/*
985 		 * Panic if the lock is recursed.
986 		 */
987 		if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
988 			if (flags & LK_INTERLOCK)
989 				class->lc_unlock(ilk);
990 			panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
991 			    __func__, iwmesg, file, line);
992 		}
993 		TD_SLOCKS_INC(curthread);
994 
995 		/*
996 		 * In order to preserve waiters flags, just spin.
997 		 */
998 		for (;;) {
999 			x = lk->lk_lock;
1000 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1001 			x &= LK_ALL_WAITERS;
1002 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1003 			    LK_SHARERS_LOCK(1) | x))
1004 				break;
1005 			cpu_spinwait();
1006 		}
1007 		break;
1008 	case LK_RELEASE:
1009 		_lockmgr_assert(lk, KA_LOCKED, file, line);
1010 		x = lk->lk_lock;
1011 
1012 		if ((x & LK_SHARE) == 0) {
1013 
1014 			/*
1015 			 * As first option, treact the lock as if it has not
1016 			 * any waiter.
1017 			 * Fix-up the tid var if the lock has been disowned.
1018 			 */
1019 			if (LK_HOLDER(x) == LK_KERNPROC)
1020 				tid = LK_KERNPROC;
1021 			else {
1022 				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
1023 				    file, line);
1024 				TD_LOCKS_DEC(curthread);
1025 			}
1026 			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
1027 			    lk->lk_recurse, file, line);
1028 
1029 			/*
1030 			 * The lock is held in exclusive mode.
1031 			 * If the lock is recursed also, then unrecurse it.
1032 			 */
1033 			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1034 				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
1035 				    lk);
1036 				lk->lk_recurse--;
1037 				break;
1038 			}
1039 			if (tid != LK_KERNPROC)
1040 				lock_profile_release_lock(&lk->lock_object);
1041 
1042 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
1043 			    LK_UNLOCKED))
1044 				break;
1045 
1046 			sleepq_lock(&lk->lock_object);
1047 			x = lk->lk_lock;
1048 			v = LK_UNLOCKED;
1049 
1050 			/*
1051 		 	 * If the lock has exclusive waiters, give them
1052 			 * preference in order to avoid deadlock with
1053 			 * shared runners up.
1054 			 * If interruptible sleeps left the exclusive queue
1055 			 * empty avoid a starvation for the threads sleeping
1056 			 * on the shared queue by giving them precedence
1057 			 * and cleaning up the exclusive waiters bit anyway.
1058 			 * Please note that lk_exslpfail count may be lying
1059 			 * about the real number of waiters with the
1060 			 * LK_SLEEPFAIL flag on because they may be used in
1061 			 * conjunction with interruptible sleeps so
1062 			 * lk_exslpfail might be considered an 'upper limit'
1063 			 * bound, including the edge cases.
1064 			 */
1065 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1066 			realexslp = sleepq_sleepcnt(&lk->lock_object,
1067 			    SQ_EXCLUSIVE_QUEUE);
1068 			if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1069 				if (lk->lk_exslpfail < realexslp) {
1070 					lk->lk_exslpfail = 0;
1071 					queue = SQ_EXCLUSIVE_QUEUE;
1072 					v |= (x & LK_SHARED_WAITERS);
1073 				} else {
1074 					lk->lk_exslpfail = 0;
1075 					LOCK_LOG2(lk,
1076 					"%s: %p has only LK_SLEEPFAIL sleepers",
1077 					    __func__, lk);
1078 					LOCK_LOG2(lk,
1079 			"%s: %p waking up threads on the exclusive queue",
1080 					    __func__, lk);
1081 					wakeup_swapper =
1082 					    sleepq_broadcast(&lk->lock_object,
1083 					    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1084 					queue = SQ_SHARED_QUEUE;
1085 				}
1086 			} else {
1087 
1088 				/*
1089 				 * Exclusive waiters sleeping with LK_SLEEPFAIL
1090 				 * on and using interruptible sleeps/timeout
1091 				 * may have left spourious lk_exslpfail counts
1092 				 * on, so clean it up anyway.
1093 				 */
1094 				lk->lk_exslpfail = 0;
1095 				queue = SQ_SHARED_QUEUE;
1096 			}
1097 
1098 			LOCK_LOG3(lk,
1099 			    "%s: %p waking up threads on the %s queue",
1100 			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1101 			    "exclusive");
1102 			atomic_store_rel_ptr(&lk->lk_lock, v);
1103 			wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1104 			    SLEEPQ_LK, 0, queue);
1105 			sleepq_release(&lk->lock_object);
1106 			break;
1107 		} else
1108 			wakeup_swapper = wakeupshlk(lk, file, line);
1109 		break;
1110 	case LK_DRAIN:
1111 		if (LK_CAN_WITNESS(flags))
1112 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1113 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1114 			    ilk : NULL);
1115 
1116 		/*
1117 		 * Trying to drain a lock we already own will result in a
1118 		 * deadlock.
1119 		 */
1120 		if (lockmgr_xlocked(lk)) {
1121 			if (flags & LK_INTERLOCK)
1122 				class->lc_unlock(ilk);
1123 			panic("%s: draining %s with the lock held @ %s:%d\n",
1124 			    __func__, iwmesg, file, line);
1125 		}
1126 
1127 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1128 #ifdef HWPMC_HOOKS
1129 			PMC_SOFT_CALL( , , lock, failed);
1130 #endif
1131 			lock_profile_obtain_lock_failed(&lk->lock_object,
1132 			    &contested, &waittime);
1133 
1134 			/*
1135 			 * If the lock is expected to not sleep just give up
1136 			 * and return.
1137 			 */
1138 			if (LK_TRYOP(flags)) {
1139 				LOCK_LOG2(lk, "%s: %p fails the try operation",
1140 				    __func__, lk);
1141 				error = EBUSY;
1142 				break;
1143 			}
1144 
1145 			/*
1146 			 * Acquire the sleepqueue chain lock because we
1147 			 * probabilly will need to manipulate waiters flags.
1148 			 */
1149 			sleepq_lock(&lk->lock_object);
1150 			x = lk->lk_lock;
1151 
1152 			/*
1153 			 * if the lock has been released while we spun on
1154 			 * the sleepqueue chain lock just try again.
1155 			 */
1156 			if (x == LK_UNLOCKED) {
1157 				sleepq_release(&lk->lock_object);
1158 				continue;
1159 			}
1160 
1161 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1162 			if ((x & ~v) == LK_UNLOCKED) {
1163 				v = (x & ~LK_EXCLUSIVE_SPINNERS);
1164 
1165 				/*
1166 				 * If interruptible sleeps left the exclusive
1167 				 * queue empty avoid a starvation for the
1168 				 * threads sleeping on the shared queue by
1169 				 * giving them precedence and cleaning up the
1170 				 * exclusive waiters bit anyway.
1171 				 * Please note that lk_exslpfail count may be
1172 				 * lying about the real number of waiters with
1173 				 * the LK_SLEEPFAIL flag on because they may
1174 				 * be used in conjunction with interruptible
1175 				 * sleeps so lk_exslpfail might be considered
1176 				 * an 'upper limit' bound, including the edge
1177 				 * cases.
1178 				 */
1179 				if (v & LK_EXCLUSIVE_WAITERS) {
1180 					queue = SQ_EXCLUSIVE_QUEUE;
1181 					v &= ~LK_EXCLUSIVE_WAITERS;
1182 				} else {
1183 
1184 					/*
1185 					 * Exclusive waiters sleeping with
1186 					 * LK_SLEEPFAIL on and using
1187 					 * interruptible sleeps/timeout may
1188 					 * have left spourious lk_exslpfail
1189 					 * counts on, so clean it up anyway.
1190 					 */
1191 					MPASS(v & LK_SHARED_WAITERS);
1192 					lk->lk_exslpfail = 0;
1193 					queue = SQ_SHARED_QUEUE;
1194 					v &= ~LK_SHARED_WAITERS;
1195 				}
1196 				if (queue == SQ_EXCLUSIVE_QUEUE) {
1197 					realexslp =
1198 					    sleepq_sleepcnt(&lk->lock_object,
1199 					    SQ_EXCLUSIVE_QUEUE);
1200 					if (lk->lk_exslpfail >= realexslp) {
1201 						lk->lk_exslpfail = 0;
1202 						queue = SQ_SHARED_QUEUE;
1203 						v &= ~LK_SHARED_WAITERS;
1204 						if (realexslp != 0) {
1205 							LOCK_LOG2(lk,
1206 					"%s: %p has only LK_SLEEPFAIL sleepers",
1207 							    __func__, lk);
1208 							LOCK_LOG2(lk,
1209 			"%s: %p waking up threads on the exclusive queue",
1210 							    __func__, lk);
1211 							wakeup_swapper =
1212 							    sleepq_broadcast(
1213 							    &lk->lock_object,
1214 							    SLEEPQ_LK, 0,
1215 							    SQ_EXCLUSIVE_QUEUE);
1216 						}
1217 					} else
1218 						lk->lk_exslpfail = 0;
1219 				}
1220 				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1221 					sleepq_release(&lk->lock_object);
1222 					continue;
1223 				}
1224 				LOCK_LOG3(lk,
1225 				"%s: %p waking up all threads on the %s queue",
1226 				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1227 				    "shared" : "exclusive");
1228 				wakeup_swapper |= sleepq_broadcast(
1229 				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1230 
1231 				/*
1232 				 * If shared waiters have been woken up we need
1233 				 * to wait for one of them to acquire the lock
1234 				 * before to set the exclusive waiters in
1235 				 * order to avoid a deadlock.
1236 				 */
1237 				if (queue == SQ_SHARED_QUEUE) {
1238 					for (v = lk->lk_lock;
1239 					    (v & LK_SHARE) && !LK_SHARERS(v);
1240 					    v = lk->lk_lock)
1241 						cpu_spinwait();
1242 				}
1243 			}
1244 
1245 			/*
1246 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1247 			 * fail, loop back and retry.
1248 			 */
1249 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1250 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1251 				    x | LK_EXCLUSIVE_WAITERS)) {
1252 					sleepq_release(&lk->lock_object);
1253 					continue;
1254 				}
1255 				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1256 				    __func__, lk);
1257 			}
1258 
1259 			/*
1260 			 * As far as we have been unable to acquire the
1261 			 * exclusive lock and the exclusive waiters flag
1262 			 * is set, we will sleep.
1263 			 */
1264 			if (flags & LK_INTERLOCK) {
1265 				class->lc_unlock(ilk);
1266 				flags &= ~LK_INTERLOCK;
1267 			}
1268 			GIANT_SAVE();
1269 			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1270 			    SQ_EXCLUSIVE_QUEUE);
1271 			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1272 			GIANT_RESTORE();
1273 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1274 			    __func__, lk);
1275 		}
1276 
1277 		if (error == 0) {
1278 			lock_profile_obtain_lock_success(&lk->lock_object,
1279 			    contested, waittime, file, line);
1280 			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1281 			    lk->lk_recurse, file, line);
1282 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1283 			    LK_TRYWIT(flags), file, line);
1284 			TD_LOCKS_INC(curthread);
1285 			STACK_SAVE(lk);
1286 		}
1287 		break;
1288 	default:
1289 		if (flags & LK_INTERLOCK)
1290 			class->lc_unlock(ilk);
1291 		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1292 	}
1293 
1294 	if (flags & LK_INTERLOCK)
1295 		class->lc_unlock(ilk);
1296 	if (wakeup_swapper)
1297 		kick_proc0();
1298 
1299 	return (error);
1300 }
1301 
1302 void
1303 _lockmgr_disown(struct lock *lk, const char *file, int line)
1304 {
1305 	uintptr_t tid, x;
1306 
1307 	if (SCHEDULER_STOPPED())
1308 		return;
1309 
1310 	tid = (uintptr_t)curthread;
1311 	_lockmgr_assert(lk, KA_XLOCKED, file, line);
1312 
1313 	/*
1314 	 * Panic if the lock is recursed.
1315 	 */
1316 	if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1317 		panic("%s: disown a recursed lockmgr @ %s:%d\n",
1318 		    __func__,  file, line);
1319 
1320 	/*
1321 	 * If the owner is already LK_KERNPROC just skip the whole operation.
1322 	 */
1323 	if (LK_HOLDER(lk->lk_lock) != tid)
1324 		return;
1325 	lock_profile_release_lock(&lk->lock_object);
1326 	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1327 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1328 	TD_LOCKS_DEC(curthread);
1329 	STACK_SAVE(lk);
1330 
1331 	/*
1332 	 * In order to preserve waiters flags, just spin.
1333 	 */
1334 	for (;;) {
1335 		x = lk->lk_lock;
1336 		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1337 		x &= LK_ALL_WAITERS;
1338 		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1339 		    LK_KERNPROC | x))
1340 			return;
1341 		cpu_spinwait();
1342 	}
1343 }
1344 
1345 void
1346 lockmgr_printinfo(const struct lock *lk)
1347 {
1348 	struct thread *td;
1349 	uintptr_t x;
1350 
1351 	if (lk->lk_lock == LK_UNLOCKED)
1352 		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1353 	else if (lk->lk_lock & LK_SHARE)
1354 		printf("lock type %s: SHARED (count %ju)\n",
1355 		    lk->lock_object.lo_name,
1356 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1357 	else {
1358 		td = lockmgr_xholder(lk);
1359 		if (td == (struct thread *)LK_KERNPROC)
1360 			printf("lock type %s: EXCL by KERNPROC\n",
1361 			    lk->lock_object.lo_name);
1362 		else
1363 			printf("lock type %s: EXCL by thread %p "
1364 			    "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1365 			    td, td->td_proc->p_pid, td->td_proc->p_comm,
1366 			    td->td_tid);
1367 	}
1368 
1369 	x = lk->lk_lock;
1370 	if (x & LK_EXCLUSIVE_WAITERS)
1371 		printf(" with exclusive waiters pending\n");
1372 	if (x & LK_SHARED_WAITERS)
1373 		printf(" with shared waiters pending\n");
1374 	if (x & LK_EXCLUSIVE_SPINNERS)
1375 		printf(" with exclusive spinners pending\n");
1376 
1377 	STACK_PRINT(lk);
1378 }
1379 
1380 int
1381 lockstatus(const struct lock *lk)
1382 {
1383 	uintptr_t v, x;
1384 	int ret;
1385 
1386 	ret = LK_SHARED;
1387 	x = lk->lk_lock;
1388 	v = LK_HOLDER(x);
1389 
1390 	if ((x & LK_SHARE) == 0) {
1391 		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1392 			ret = LK_EXCLUSIVE;
1393 		else
1394 			ret = LK_EXCLOTHER;
1395 	} else if (x == LK_UNLOCKED)
1396 		ret = 0;
1397 
1398 	return (ret);
1399 }
1400 
1401 #ifdef INVARIANT_SUPPORT
1402 
1403 FEATURE(invariant_support,
1404     "Support for modules compiled with INVARIANTS option");
1405 
1406 #ifndef INVARIANTS
1407 #undef	_lockmgr_assert
1408 #endif
1409 
1410 void
1411 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1412 {
1413 	int slocked = 0;
1414 
1415 	if (panicstr != NULL)
1416 		return;
1417 	switch (what) {
1418 	case KA_SLOCKED:
1419 	case KA_SLOCKED | KA_NOTRECURSED:
1420 	case KA_SLOCKED | KA_RECURSED:
1421 		slocked = 1;
1422 	case KA_LOCKED:
1423 	case KA_LOCKED | KA_NOTRECURSED:
1424 	case KA_LOCKED | KA_RECURSED:
1425 #ifdef WITNESS
1426 
1427 		/*
1428 		 * We cannot trust WITNESS if the lock is held in exclusive
1429 		 * mode and a call to lockmgr_disown() happened.
1430 		 * Workaround this skipping the check if the lock is held in
1431 		 * exclusive mode even for the KA_LOCKED case.
1432 		 */
1433 		if (slocked || (lk->lk_lock & LK_SHARE)) {
1434 			witness_assert(&lk->lock_object, what, file, line);
1435 			break;
1436 		}
1437 #endif
1438 		if (lk->lk_lock == LK_UNLOCKED ||
1439 		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1440 		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1441 			panic("Lock %s not %slocked @ %s:%d\n",
1442 			    lk->lock_object.lo_name, slocked ? "share" : "",
1443 			    file, line);
1444 
1445 		if ((lk->lk_lock & LK_SHARE) == 0) {
1446 			if (lockmgr_recursed(lk)) {
1447 				if (what & KA_NOTRECURSED)
1448 					panic("Lock %s recursed @ %s:%d\n",
1449 					    lk->lock_object.lo_name, file,
1450 					    line);
1451 			} else if (what & KA_RECURSED)
1452 				panic("Lock %s not recursed @ %s:%d\n",
1453 				    lk->lock_object.lo_name, file, line);
1454 		}
1455 		break;
1456 	case KA_XLOCKED:
1457 	case KA_XLOCKED | KA_NOTRECURSED:
1458 	case KA_XLOCKED | KA_RECURSED:
1459 		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1460 			panic("Lock %s not exclusively locked @ %s:%d\n",
1461 			    lk->lock_object.lo_name, file, line);
1462 		if (lockmgr_recursed(lk)) {
1463 			if (what & KA_NOTRECURSED)
1464 				panic("Lock %s recursed @ %s:%d\n",
1465 				    lk->lock_object.lo_name, file, line);
1466 		} else if (what & KA_RECURSED)
1467 			panic("Lock %s not recursed @ %s:%d\n",
1468 			    lk->lock_object.lo_name, file, line);
1469 		break;
1470 	case KA_UNLOCKED:
1471 		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1472 			panic("Lock %s exclusively locked @ %s:%d\n",
1473 			    lk->lock_object.lo_name, file, line);
1474 		break;
1475 	default:
1476 		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1477 		    line);
1478 	}
1479 }
1480 #endif
1481 
1482 #ifdef DDB
1483 int
1484 lockmgr_chain(struct thread *td, struct thread **ownerp)
1485 {
1486 	struct lock *lk;
1487 
1488 	lk = td->td_wchan;
1489 
1490 	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1491 		return (0);
1492 	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1493 	if (lk->lk_lock & LK_SHARE)
1494 		db_printf("SHARED (count %ju)\n",
1495 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1496 	else
1497 		db_printf("EXCL\n");
1498 	*ownerp = lockmgr_xholder(lk);
1499 
1500 	return (1);
1501 }
1502 
1503 static void
1504 db_show_lockmgr(const struct lock_object *lock)
1505 {
1506 	struct thread *td;
1507 	const struct lock *lk;
1508 
1509 	lk = (const struct lock *)lock;
1510 
1511 	db_printf(" state: ");
1512 	if (lk->lk_lock == LK_UNLOCKED)
1513 		db_printf("UNLOCKED\n");
1514 	else if (lk->lk_lock & LK_SHARE)
1515 		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1516 	else {
1517 		td = lockmgr_xholder(lk);
1518 		if (td == (struct thread *)LK_KERNPROC)
1519 			db_printf("XLOCK: LK_KERNPROC\n");
1520 		else
1521 			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1522 			    td->td_tid, td->td_proc->p_pid,
1523 			    td->td_proc->p_comm);
1524 		if (lockmgr_recursed(lk))
1525 			db_printf(" recursed: %d\n", lk->lk_recurse);
1526 	}
1527 	db_printf(" waiters: ");
1528 	switch (lk->lk_lock & LK_ALL_WAITERS) {
1529 	case LK_SHARED_WAITERS:
1530 		db_printf("shared\n");
1531 		break;
1532 	case LK_EXCLUSIVE_WAITERS:
1533 		db_printf("exclusive\n");
1534 		break;
1535 	case LK_ALL_WAITERS:
1536 		db_printf("shared and exclusive\n");
1537 		break;
1538 	default:
1539 		db_printf("none\n");
1540 	}
1541 	db_printf(" spinners: ");
1542 	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1543 		db_printf("exclusive\n");
1544 	else
1545 		db_printf("none\n");
1546 }
1547 #endif
1548