xref: /freebsd/sys/kern/kern_lock.c (revision 595e514d0df2bac5b813d35f83e32875dbf16a83)
1 /*-
2  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  */
28 
29 #include "opt_adaptive_lockmgrs.h"
30 #include "opt_ddb.h"
31 #include "opt_hwpmc_hooks.h"
32 #include "opt_kdtrace.h"
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/kdb.h>
39 #include <sys/ktr.h>
40 #include <sys/lock.h>
41 #include <sys/lock_profile.h>
42 #include <sys/lockmgr.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/sleepqueue.h>
46 #ifdef DEBUG_LOCKS
47 #include <sys/stack.h>
48 #endif
49 #include <sys/sysctl.h>
50 #include <sys/systm.h>
51 
52 #include <machine/cpu.h>
53 
54 #ifdef DDB
55 #include <ddb/ddb.h>
56 #endif
57 
58 #ifdef HWPMC_HOOKS
59 #include <sys/pmckern.h>
60 PMC_SOFT_DECLARE( , , lock, failed);
61 #endif
62 
63 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
64     (LK_ADAPTIVE | LK_NOSHARE));
65 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
66     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
67 
68 #define	SQ_EXCLUSIVE_QUEUE	0
69 #define	SQ_SHARED_QUEUE		1
70 
71 #ifndef INVARIANTS
72 #define	_lockmgr_assert(lk, what, file, line)
73 #define	TD_LOCKS_INC(td)
74 #define	TD_LOCKS_DEC(td)
75 #else
76 #define	TD_LOCKS_INC(td)	((td)->td_locks++)
77 #define	TD_LOCKS_DEC(td)	((td)->td_locks--)
78 #endif
79 #define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
80 #define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
81 
82 #ifndef DEBUG_LOCKS
83 #define	STACK_PRINT(lk)
84 #define	STACK_SAVE(lk)
85 #define	STACK_ZERO(lk)
86 #else
87 #define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
88 #define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
89 #define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
90 #endif
91 
92 #define	LOCK_LOG2(lk, string, arg1, arg2)				\
93 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
94 		CTR2(KTR_LOCK, (string), (arg1), (arg2))
95 #define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
96 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
97 		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
98 
99 #define	GIANT_DECLARE							\
100 	int _i = 0;							\
101 	WITNESS_SAVE_DECL(Giant)
102 #define	GIANT_RESTORE() do {						\
103 	if (_i > 0) {							\
104 		while (_i--)						\
105 			mtx_lock(&Giant);				\
106 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
107 	}								\
108 } while (0)
109 #define	GIANT_SAVE() do {						\
110 	if (mtx_owned(&Giant)) {					\
111 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
112 		while (mtx_owned(&Giant)) {				\
113 			_i++;						\
114 			mtx_unlock(&Giant);				\
115 		}							\
116 	}								\
117 } while (0)
118 
119 #define	LK_CAN_SHARE(x)							\
120 	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
121 	((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||				\
122 	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
123 #define	LK_TRYOP(x)							\
124 	((x) & LK_NOWAIT)
125 
126 #define	LK_CAN_WITNESS(x)						\
127 	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
128 #define	LK_TRYWIT(x)							\
129 	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
130 
131 #define	LK_CAN_ADAPT(lk, f)						\
132 	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
133 	((f) & LK_SLEEPFAIL) == 0)
134 
135 #define	lockmgr_disowned(lk)						\
136 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
137 
138 #define	lockmgr_xlocked(lk)						\
139 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
140 
141 static void	assert_lockmgr(const struct lock_object *lock, int how);
142 #ifdef DDB
143 static void	db_show_lockmgr(const struct lock_object *lock);
144 #endif
145 static void	lock_lockmgr(struct lock_object *lock, int how);
146 #ifdef KDTRACE_HOOKS
147 static int	owner_lockmgr(const struct lock_object *lock,
148 		    struct thread **owner);
149 #endif
150 static int	unlock_lockmgr(struct lock_object *lock);
151 
152 struct lock_class lock_class_lockmgr = {
153 	.lc_name = "lockmgr",
154 	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
155 	.lc_assert = assert_lockmgr,
156 #ifdef DDB
157 	.lc_ddb_show = db_show_lockmgr,
158 #endif
159 	.lc_lock = lock_lockmgr,
160 	.lc_unlock = unlock_lockmgr,
161 #ifdef KDTRACE_HOOKS
162 	.lc_owner = owner_lockmgr,
163 #endif
164 };
165 
166 #ifdef ADAPTIVE_LOCKMGRS
167 static u_int alk_retries = 10;
168 static u_int alk_loops = 10000;
169 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
170     "lockmgr debugging");
171 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
172 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
173 #endif
174 
175 static __inline struct thread *
176 lockmgr_xholder(const struct lock *lk)
177 {
178 	uintptr_t x;
179 
180 	x = lk->lk_lock;
181 	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
182 }
183 
184 /*
185  * It assumes sleepq_lock held and returns with this one unheld.
186  * It also assumes the generic interlock is sane and previously checked.
187  * If LK_INTERLOCK is specified the interlock is not reacquired after the
188  * sleep.
189  */
190 static __inline int
191 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
192     const char *wmesg, int pri, int timo, int queue)
193 {
194 	GIANT_DECLARE;
195 	struct lock_class *class;
196 	int catch, error;
197 
198 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
199 	catch = pri & PCATCH;
200 	pri &= PRIMASK;
201 	error = 0;
202 
203 	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
204 	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
205 
206 	if (flags & LK_INTERLOCK)
207 		class->lc_unlock(ilk);
208 	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
209 		lk->lk_exslpfail++;
210 	GIANT_SAVE();
211 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
212 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
213 	if ((flags & LK_TIMELOCK) && timo)
214 		sleepq_set_timeout(&lk->lock_object, timo);
215 
216 	/*
217 	 * Decisional switch for real sleeping.
218 	 */
219 	if ((flags & LK_TIMELOCK) && timo && catch)
220 		error = sleepq_timedwait_sig(&lk->lock_object, pri);
221 	else if ((flags & LK_TIMELOCK) && timo)
222 		error = sleepq_timedwait(&lk->lock_object, pri);
223 	else if (catch)
224 		error = sleepq_wait_sig(&lk->lock_object, pri);
225 	else
226 		sleepq_wait(&lk->lock_object, pri);
227 	GIANT_RESTORE();
228 	if ((flags & LK_SLEEPFAIL) && error == 0)
229 		error = ENOLCK;
230 
231 	return (error);
232 }
233 
234 static __inline int
235 wakeupshlk(struct lock *lk, const char *file, int line)
236 {
237 	uintptr_t v, x;
238 	u_int realexslp;
239 	int queue, wakeup_swapper;
240 
241 	TD_LOCKS_DEC(curthread);
242 	TD_SLOCKS_DEC(curthread);
243 	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
244 	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
245 
246 	wakeup_swapper = 0;
247 	for (;;) {
248 		x = lk->lk_lock;
249 
250 		/*
251 		 * If there is more than one shared lock held, just drop one
252 		 * and return.
253 		 */
254 		if (LK_SHARERS(x) > 1) {
255 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
256 			    x - LK_ONE_SHARER))
257 				break;
258 			continue;
259 		}
260 
261 		/*
262 		 * If there are not waiters on the exclusive queue, drop the
263 		 * lock quickly.
264 		 */
265 		if ((x & LK_ALL_WAITERS) == 0) {
266 			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
267 			    LK_SHARERS_LOCK(1));
268 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
269 				break;
270 			continue;
271 		}
272 
273 		/*
274 		 * We should have a sharer with waiters, so enter the hard
275 		 * path in order to handle wakeups correctly.
276 		 */
277 		sleepq_lock(&lk->lock_object);
278 		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
279 		v = LK_UNLOCKED;
280 
281 		/*
282 		 * If the lock has exclusive waiters, give them preference in
283 		 * order to avoid deadlock with shared runners up.
284 		 * If interruptible sleeps left the exclusive queue empty
285 		 * avoid a starvation for the threads sleeping on the shared
286 		 * queue by giving them precedence and cleaning up the
287 		 * exclusive waiters bit anyway.
288 		 * Please note that lk_exslpfail count may be lying about
289 		 * the real number of waiters with the LK_SLEEPFAIL flag on
290 		 * because they may be used in conjuction with interruptible
291 		 * sleeps so lk_exslpfail might be considered an 'upper limit'
292 		 * bound, including the edge cases.
293 		 */
294 		realexslp = sleepq_sleepcnt(&lk->lock_object,
295 		    SQ_EXCLUSIVE_QUEUE);
296 		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
297 			if (lk->lk_exslpfail < realexslp) {
298 				lk->lk_exslpfail = 0;
299 				queue = SQ_EXCLUSIVE_QUEUE;
300 				v |= (x & LK_SHARED_WAITERS);
301 			} else {
302 				lk->lk_exslpfail = 0;
303 				LOCK_LOG2(lk,
304 				    "%s: %p has only LK_SLEEPFAIL sleepers",
305 				    __func__, lk);
306 				LOCK_LOG2(lk,
307 			    "%s: %p waking up threads on the exclusive queue",
308 				    __func__, lk);
309 				wakeup_swapper =
310 				    sleepq_broadcast(&lk->lock_object,
311 				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
312 				queue = SQ_SHARED_QUEUE;
313 			}
314 
315 		} else {
316 
317 			/*
318 			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
319 			 * and using interruptible sleeps/timeout may have
320 			 * left spourious lk_exslpfail counts on, so clean
321 			 * it up anyway.
322 			 */
323 			lk->lk_exslpfail = 0;
324 			queue = SQ_SHARED_QUEUE;
325 		}
326 
327 		if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
328 		    v)) {
329 			sleepq_release(&lk->lock_object);
330 			continue;
331 		}
332 		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
333 		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
334 		    "exclusive");
335 		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
336 		    0, queue);
337 		sleepq_release(&lk->lock_object);
338 		break;
339 	}
340 
341 	lock_profile_release_lock(&lk->lock_object);
342 	return (wakeup_swapper);
343 }
344 
345 static void
346 assert_lockmgr(const struct lock_object *lock, int what)
347 {
348 
349 	panic("lockmgr locks do not support assertions");
350 }
351 
352 static void
353 lock_lockmgr(struct lock_object *lock, int how)
354 {
355 
356 	panic("lockmgr locks do not support sleep interlocking");
357 }
358 
359 static int
360 unlock_lockmgr(struct lock_object *lock)
361 {
362 
363 	panic("lockmgr locks do not support sleep interlocking");
364 }
365 
366 #ifdef KDTRACE_HOOKS
367 static int
368 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
369 {
370 
371 	panic("lockmgr locks do not support owner inquiring");
372 }
373 #endif
374 
375 void
376 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
377 {
378 	int iflags;
379 
380 	MPASS((flags & ~LK_INIT_MASK) == 0);
381 	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
382             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
383             &lk->lk_lock));
384 
385 	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
386 	if (flags & LK_CANRECURSE)
387 		iflags |= LO_RECURSABLE;
388 	if ((flags & LK_NODUP) == 0)
389 		iflags |= LO_DUPOK;
390 	if (flags & LK_NOPROFILE)
391 		iflags |= LO_NOPROFILE;
392 	if ((flags & LK_NOWITNESS) == 0)
393 		iflags |= LO_WITNESS;
394 	if (flags & LK_QUIET)
395 		iflags |= LO_QUIET;
396 	if (flags & LK_IS_VNODE)
397 		iflags |= LO_IS_VNODE;
398 	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
399 
400 	lk->lk_lock = LK_UNLOCKED;
401 	lk->lk_recurse = 0;
402 	lk->lk_exslpfail = 0;
403 	lk->lk_timo = timo;
404 	lk->lk_pri = pri;
405 	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
406 	STACK_ZERO(lk);
407 }
408 
409 /*
410  * XXX: Gross hacks to manipulate external lock flags after
411  * initialization.  Used for certain vnode and buf locks.
412  */
413 void
414 lockallowshare(struct lock *lk)
415 {
416 
417 	lockmgr_assert(lk, KA_XLOCKED);
418 	lk->lock_object.lo_flags &= ~LK_NOSHARE;
419 }
420 
421 void
422 lockallowrecurse(struct lock *lk)
423 {
424 
425 	lockmgr_assert(lk, KA_XLOCKED);
426 	lk->lock_object.lo_flags |= LO_RECURSABLE;
427 }
428 
429 void
430 lockdisablerecurse(struct lock *lk)
431 {
432 
433 	lockmgr_assert(lk, KA_XLOCKED);
434 	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
435 }
436 
437 void
438 lockdestroy(struct lock *lk)
439 {
440 
441 	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
442 	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
443 	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
444 	lock_destroy(&lk->lock_object);
445 }
446 
447 int
448 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
449     const char *wmesg, int pri, int timo, const char *file, int line)
450 {
451 	GIANT_DECLARE;
452 	struct lock_class *class;
453 	const char *iwmesg;
454 	uintptr_t tid, v, x;
455 	u_int op, realexslp;
456 	int error, ipri, itimo, queue, wakeup_swapper;
457 #ifdef LOCK_PROFILING
458 	uint64_t waittime = 0;
459 	int contested = 0;
460 #endif
461 #ifdef ADAPTIVE_LOCKMGRS
462 	volatile struct thread *owner;
463 	u_int i, spintries = 0;
464 #endif
465 
466 	error = 0;
467 	tid = (uintptr_t)curthread;
468 	op = (flags & LK_TYPE_MASK);
469 	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
470 	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
471 	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
472 
473 	MPASS((flags & ~LK_TOTAL_MASK) == 0);
474 	KASSERT((op & (op - 1)) == 0,
475 	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
476 	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
477 	    (op != LK_DOWNGRADE && op != LK_RELEASE),
478 	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
479 	    __func__, file, line));
480 	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
481 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
482 	    __func__, file, line));
483 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
484 	    ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
485 	    lk->lock_object.lo_name, file, line));
486 
487 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
488 	if (panicstr != NULL) {
489 		if (flags & LK_INTERLOCK)
490 			class->lc_unlock(ilk);
491 		return (0);
492 	}
493 
494 	if (lk->lock_object.lo_flags & LK_NOSHARE) {
495 		switch (op) {
496 		case LK_SHARED:
497 			op = LK_EXCLUSIVE;
498 			break;
499 		case LK_UPGRADE:
500 		case LK_DOWNGRADE:
501 			_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
502 			    file, line);
503 			if (flags & LK_INTERLOCK)
504 				class->lc_unlock(ilk);
505 			return (0);
506 		}
507 	}
508 
509 	wakeup_swapper = 0;
510 	switch (op) {
511 	case LK_SHARED:
512 		if (LK_CAN_WITNESS(flags))
513 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
514 			    file, line, flags & LK_INTERLOCK ? ilk : NULL);
515 		for (;;) {
516 			x = lk->lk_lock;
517 
518 			/*
519 			 * If no other thread has an exclusive lock, or
520 			 * no exclusive waiter is present, bump the count of
521 			 * sharers.  Since we have to preserve the state of
522 			 * waiters, if we fail to acquire the shared lock
523 			 * loop back and retry.
524 			 */
525 			if (LK_CAN_SHARE(x)) {
526 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
527 				    x + LK_ONE_SHARER))
528 					break;
529 				continue;
530 			}
531 #ifdef HWPMC_HOOKS
532 			PMC_SOFT_CALL( , , lock, failed);
533 #endif
534 			lock_profile_obtain_lock_failed(&lk->lock_object,
535 			    &contested, &waittime);
536 
537 			/*
538 			 * If the lock is already held by curthread in
539 			 * exclusive way avoid a deadlock.
540 			 */
541 			if (LK_HOLDER(x) == tid) {
542 				LOCK_LOG2(lk,
543 				    "%s: %p already held in exclusive mode",
544 				    __func__, lk);
545 				error = EDEADLK;
546 				break;
547 			}
548 
549 			/*
550 			 * If the lock is expected to not sleep just give up
551 			 * and return.
552 			 */
553 			if (LK_TRYOP(flags)) {
554 				LOCK_LOG2(lk, "%s: %p fails the try operation",
555 				    __func__, lk);
556 				error = EBUSY;
557 				break;
558 			}
559 
560 #ifdef ADAPTIVE_LOCKMGRS
561 			/*
562 			 * If the owner is running on another CPU, spin until
563 			 * the owner stops running or the state of the lock
564 			 * changes.  We need a double-state handle here
565 			 * because for a failed acquisition the lock can be
566 			 * either held in exclusive mode or shared mode
567 			 * (for the writer starvation avoidance technique).
568 			 */
569 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
570 			    LK_HOLDER(x) != LK_KERNPROC) {
571 				owner = (struct thread *)LK_HOLDER(x);
572 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
573 					CTR3(KTR_LOCK,
574 					    "%s: spinning on %p held by %p",
575 					    __func__, lk, owner);
576 
577 				/*
578 				 * If we are holding also an interlock drop it
579 				 * in order to avoid a deadlock if the lockmgr
580 				 * owner is adaptively spinning on the
581 				 * interlock itself.
582 				 */
583 				if (flags & LK_INTERLOCK) {
584 					class->lc_unlock(ilk);
585 					flags &= ~LK_INTERLOCK;
586 				}
587 				GIANT_SAVE();
588 				while (LK_HOLDER(lk->lk_lock) ==
589 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
590 					cpu_spinwait();
591 				GIANT_RESTORE();
592 				continue;
593 			} else if (LK_CAN_ADAPT(lk, flags) &&
594 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
595 			    spintries < alk_retries) {
596 				if (flags & LK_INTERLOCK) {
597 					class->lc_unlock(ilk);
598 					flags &= ~LK_INTERLOCK;
599 				}
600 				GIANT_SAVE();
601 				spintries++;
602 				for (i = 0; i < alk_loops; i++) {
603 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
604 						CTR4(KTR_LOCK,
605 				    "%s: shared spinning on %p with %u and %u",
606 						    __func__, lk, spintries, i);
607 					x = lk->lk_lock;
608 					if ((x & LK_SHARE) == 0 ||
609 					    LK_CAN_SHARE(x) != 0)
610 						break;
611 					cpu_spinwait();
612 				}
613 				GIANT_RESTORE();
614 				if (i != alk_loops)
615 					continue;
616 			}
617 #endif
618 
619 			/*
620 			 * Acquire the sleepqueue chain lock because we
621 			 * probabilly will need to manipulate waiters flags.
622 			 */
623 			sleepq_lock(&lk->lock_object);
624 			x = lk->lk_lock;
625 
626 			/*
627 			 * if the lock can be acquired in shared mode, try
628 			 * again.
629 			 */
630 			if (LK_CAN_SHARE(x)) {
631 				sleepq_release(&lk->lock_object);
632 				continue;
633 			}
634 
635 #ifdef ADAPTIVE_LOCKMGRS
636 			/*
637 			 * The current lock owner might have started executing
638 			 * on another CPU (or the lock could have changed
639 			 * owner) while we were waiting on the turnstile
640 			 * chain lock.  If so, drop the turnstile lock and try
641 			 * again.
642 			 */
643 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
644 			    LK_HOLDER(x) != LK_KERNPROC) {
645 				owner = (struct thread *)LK_HOLDER(x);
646 				if (TD_IS_RUNNING(owner)) {
647 					sleepq_release(&lk->lock_object);
648 					continue;
649 				}
650 			}
651 #endif
652 
653 			/*
654 			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
655 			 * loop back and retry.
656 			 */
657 			if ((x & LK_SHARED_WAITERS) == 0) {
658 				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
659 				    x | LK_SHARED_WAITERS)) {
660 					sleepq_release(&lk->lock_object);
661 					continue;
662 				}
663 				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
664 				    __func__, lk);
665 			}
666 
667 			/*
668 			 * As far as we have been unable to acquire the
669 			 * shared lock and the shared waiters flag is set,
670 			 * we will sleep.
671 			 */
672 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
673 			    SQ_SHARED_QUEUE);
674 			flags &= ~LK_INTERLOCK;
675 			if (error) {
676 				LOCK_LOG3(lk,
677 				    "%s: interrupted sleep for %p with %d",
678 				    __func__, lk, error);
679 				break;
680 			}
681 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
682 			    __func__, lk);
683 		}
684 		if (error == 0) {
685 			lock_profile_obtain_lock_success(&lk->lock_object,
686 			    contested, waittime, file, line);
687 			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
688 			    line);
689 			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
690 			    line);
691 			TD_LOCKS_INC(curthread);
692 			TD_SLOCKS_INC(curthread);
693 			STACK_SAVE(lk);
694 		}
695 		break;
696 	case LK_UPGRADE:
697 		_lockmgr_assert(lk, KA_SLOCKED, file, line);
698 		v = lk->lk_lock;
699 		x = v & LK_ALL_WAITERS;
700 		v &= LK_EXCLUSIVE_SPINNERS;
701 
702 		/*
703 		 * Try to switch from one shared lock to an exclusive one.
704 		 * We need to preserve waiters flags during the operation.
705 		 */
706 		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
707 		    tid | x)) {
708 			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
709 			    line);
710 			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
711 			    LK_TRYWIT(flags), file, line);
712 			TD_SLOCKS_DEC(curthread);
713 			break;
714 		}
715 
716 		/*
717 		 * We have been unable to succeed in upgrading, so just
718 		 * give up the shared lock.
719 		 */
720 		wakeup_swapper |= wakeupshlk(lk, file, line);
721 
722 		/* FALLTHROUGH */
723 	case LK_EXCLUSIVE:
724 		if (LK_CAN_WITNESS(flags))
725 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
726 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
727 			    ilk : NULL);
728 
729 		/*
730 		 * If curthread already holds the lock and this one is
731 		 * allowed to recurse, simply recurse on it.
732 		 */
733 		if (lockmgr_xlocked(lk)) {
734 			if ((flags & LK_CANRECURSE) == 0 &&
735 			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
736 
737 				/*
738 				 * If the lock is expected to not panic just
739 				 * give up and return.
740 				 */
741 				if (LK_TRYOP(flags)) {
742 					LOCK_LOG2(lk,
743 					    "%s: %p fails the try operation",
744 					    __func__, lk);
745 					error = EBUSY;
746 					break;
747 				}
748 				if (flags & LK_INTERLOCK)
749 					class->lc_unlock(ilk);
750 		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
751 				    __func__, iwmesg, file, line);
752 			}
753 			lk->lk_recurse++;
754 			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
755 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
756 			    lk->lk_recurse, file, line);
757 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
758 			    LK_TRYWIT(flags), file, line);
759 			TD_LOCKS_INC(curthread);
760 			break;
761 		}
762 
763 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
764 		    tid)) {
765 #ifdef HWPMC_HOOKS
766 			PMC_SOFT_CALL( , , lock, failed);
767 #endif
768 			lock_profile_obtain_lock_failed(&lk->lock_object,
769 			    &contested, &waittime);
770 
771 			/*
772 			 * If the lock is expected to not sleep just give up
773 			 * and return.
774 			 */
775 			if (LK_TRYOP(flags)) {
776 				LOCK_LOG2(lk, "%s: %p fails the try operation",
777 				    __func__, lk);
778 				error = EBUSY;
779 				break;
780 			}
781 
782 #ifdef ADAPTIVE_LOCKMGRS
783 			/*
784 			 * If the owner is running on another CPU, spin until
785 			 * the owner stops running or the state of the lock
786 			 * changes.
787 			 */
788 			x = lk->lk_lock;
789 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
790 			    LK_HOLDER(x) != LK_KERNPROC) {
791 				owner = (struct thread *)LK_HOLDER(x);
792 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
793 					CTR3(KTR_LOCK,
794 					    "%s: spinning on %p held by %p",
795 					    __func__, lk, owner);
796 
797 				/*
798 				 * If we are holding also an interlock drop it
799 				 * in order to avoid a deadlock if the lockmgr
800 				 * owner is adaptively spinning on the
801 				 * interlock itself.
802 				 */
803 				if (flags & LK_INTERLOCK) {
804 					class->lc_unlock(ilk);
805 					flags &= ~LK_INTERLOCK;
806 				}
807 				GIANT_SAVE();
808 				while (LK_HOLDER(lk->lk_lock) ==
809 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
810 					cpu_spinwait();
811 				GIANT_RESTORE();
812 				continue;
813 			} else if (LK_CAN_ADAPT(lk, flags) &&
814 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
815 			    spintries < alk_retries) {
816 				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
817 				    !atomic_cmpset_ptr(&lk->lk_lock, x,
818 				    x | LK_EXCLUSIVE_SPINNERS))
819 					continue;
820 				if (flags & LK_INTERLOCK) {
821 					class->lc_unlock(ilk);
822 					flags &= ~LK_INTERLOCK;
823 				}
824 				GIANT_SAVE();
825 				spintries++;
826 				for (i = 0; i < alk_loops; i++) {
827 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
828 						CTR4(KTR_LOCK,
829 				    "%s: shared spinning on %p with %u and %u",
830 						    __func__, lk, spintries, i);
831 					if ((lk->lk_lock &
832 					    LK_EXCLUSIVE_SPINNERS) == 0)
833 						break;
834 					cpu_spinwait();
835 				}
836 				GIANT_RESTORE();
837 				if (i != alk_loops)
838 					continue;
839 			}
840 #endif
841 
842 			/*
843 			 * Acquire the sleepqueue chain lock because we
844 			 * probabilly will need to manipulate waiters flags.
845 			 */
846 			sleepq_lock(&lk->lock_object);
847 			x = lk->lk_lock;
848 
849 			/*
850 			 * if the lock has been released while we spun on
851 			 * the sleepqueue chain lock just try again.
852 			 */
853 			if (x == LK_UNLOCKED) {
854 				sleepq_release(&lk->lock_object);
855 				continue;
856 			}
857 
858 #ifdef ADAPTIVE_LOCKMGRS
859 			/*
860 			 * The current lock owner might have started executing
861 			 * on another CPU (or the lock could have changed
862 			 * owner) while we were waiting on the turnstile
863 			 * chain lock.  If so, drop the turnstile lock and try
864 			 * again.
865 			 */
866 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
867 			    LK_HOLDER(x) != LK_KERNPROC) {
868 				owner = (struct thread *)LK_HOLDER(x);
869 				if (TD_IS_RUNNING(owner)) {
870 					sleepq_release(&lk->lock_object);
871 					continue;
872 				}
873 			}
874 #endif
875 
876 			/*
877 			 * The lock can be in the state where there is a
878 			 * pending queue of waiters, but still no owner.
879 			 * This happens when the lock is contested and an
880 			 * owner is going to claim the lock.
881 			 * If curthread is the one successfully acquiring it
882 			 * claim lock ownership and return, preserving waiters
883 			 * flags.
884 			 */
885 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
886 			if ((x & ~v) == LK_UNLOCKED) {
887 				v &= ~LK_EXCLUSIVE_SPINNERS;
888 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
889 				    tid | v)) {
890 					sleepq_release(&lk->lock_object);
891 					LOCK_LOG2(lk,
892 					    "%s: %p claimed by a new writer",
893 					    __func__, lk);
894 					break;
895 				}
896 				sleepq_release(&lk->lock_object);
897 				continue;
898 			}
899 
900 			/*
901 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
902 			 * fail, loop back and retry.
903 			 */
904 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
905 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
906 				    x | LK_EXCLUSIVE_WAITERS)) {
907 					sleepq_release(&lk->lock_object);
908 					continue;
909 				}
910 				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
911 				    __func__, lk);
912 			}
913 
914 			/*
915 			 * As far as we have been unable to acquire the
916 			 * exclusive lock and the exclusive waiters flag
917 			 * is set, we will sleep.
918 			 */
919 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
920 			    SQ_EXCLUSIVE_QUEUE);
921 			flags &= ~LK_INTERLOCK;
922 			if (error) {
923 				LOCK_LOG3(lk,
924 				    "%s: interrupted sleep for %p with %d",
925 				    __func__, lk, error);
926 				break;
927 			}
928 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
929 			    __func__, lk);
930 		}
931 		if (error == 0) {
932 			lock_profile_obtain_lock_success(&lk->lock_object,
933 			    contested, waittime, file, line);
934 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
935 			    lk->lk_recurse, file, line);
936 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
937 			    LK_TRYWIT(flags), file, line);
938 			TD_LOCKS_INC(curthread);
939 			STACK_SAVE(lk);
940 		}
941 		break;
942 	case LK_DOWNGRADE:
943 		_lockmgr_assert(lk, KA_XLOCKED, file, line);
944 		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
945 		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
946 
947 		/*
948 		 * Panic if the lock is recursed.
949 		 */
950 		if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
951 			if (flags & LK_INTERLOCK)
952 				class->lc_unlock(ilk);
953 			panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
954 			    __func__, iwmesg, file, line);
955 		}
956 		TD_SLOCKS_INC(curthread);
957 
958 		/*
959 		 * In order to preserve waiters flags, just spin.
960 		 */
961 		for (;;) {
962 			x = lk->lk_lock;
963 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
964 			x &= LK_ALL_WAITERS;
965 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
966 			    LK_SHARERS_LOCK(1) | x))
967 				break;
968 			cpu_spinwait();
969 		}
970 		break;
971 	case LK_RELEASE:
972 		_lockmgr_assert(lk, KA_LOCKED, file, line);
973 		x = lk->lk_lock;
974 
975 		if ((x & LK_SHARE) == 0) {
976 
977 			/*
978 			 * As first option, treact the lock as if it has not
979 			 * any waiter.
980 			 * Fix-up the tid var if the lock has been disowned.
981 			 */
982 			if (LK_HOLDER(x) == LK_KERNPROC)
983 				tid = LK_KERNPROC;
984 			else {
985 				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
986 				    file, line);
987 				TD_LOCKS_DEC(curthread);
988 			}
989 			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
990 			    lk->lk_recurse, file, line);
991 
992 			/*
993 			 * The lock is held in exclusive mode.
994 			 * If the lock is recursed also, then unrecurse it.
995 			 */
996 			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
997 				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
998 				    lk);
999 				lk->lk_recurse--;
1000 				break;
1001 			}
1002 			if (tid != LK_KERNPROC)
1003 				lock_profile_release_lock(&lk->lock_object);
1004 
1005 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
1006 			    LK_UNLOCKED))
1007 				break;
1008 
1009 			sleepq_lock(&lk->lock_object);
1010 			x = lk->lk_lock;
1011 			v = LK_UNLOCKED;
1012 
1013 			/*
1014 		 	 * If the lock has exclusive waiters, give them
1015 			 * preference in order to avoid deadlock with
1016 			 * shared runners up.
1017 			 * If interruptible sleeps left the exclusive queue
1018 			 * empty avoid a starvation for the threads sleeping
1019 			 * on the shared queue by giving them precedence
1020 			 * and cleaning up the exclusive waiters bit anyway.
1021 			 * Please note that lk_exslpfail count may be lying
1022 			 * about the real number of waiters with the
1023 			 * LK_SLEEPFAIL flag on because they may be used in
1024 			 * conjuction with interruptible sleeps so
1025 			 * lk_exslpfail might be considered an 'upper limit'
1026 			 * bound, including the edge cases.
1027 			 */
1028 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1029 			realexslp = sleepq_sleepcnt(&lk->lock_object,
1030 			    SQ_EXCLUSIVE_QUEUE);
1031 			if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1032 				if (lk->lk_exslpfail < realexslp) {
1033 					lk->lk_exslpfail = 0;
1034 					queue = SQ_EXCLUSIVE_QUEUE;
1035 					v |= (x & LK_SHARED_WAITERS);
1036 				} else {
1037 					lk->lk_exslpfail = 0;
1038 					LOCK_LOG2(lk,
1039 					"%s: %p has only LK_SLEEPFAIL sleepers",
1040 					    __func__, lk);
1041 					LOCK_LOG2(lk,
1042 			"%s: %p waking up threads on the exclusive queue",
1043 					    __func__, lk);
1044 					wakeup_swapper =
1045 					    sleepq_broadcast(&lk->lock_object,
1046 					    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1047 					queue = SQ_SHARED_QUEUE;
1048 				}
1049 			} else {
1050 
1051 				/*
1052 				 * Exclusive waiters sleeping with LK_SLEEPFAIL
1053 				 * on and using interruptible sleeps/timeout
1054 				 * may have left spourious lk_exslpfail counts
1055 				 * on, so clean it up anyway.
1056 				 */
1057 				lk->lk_exslpfail = 0;
1058 				queue = SQ_SHARED_QUEUE;
1059 			}
1060 
1061 			LOCK_LOG3(lk,
1062 			    "%s: %p waking up threads on the %s queue",
1063 			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1064 			    "exclusive");
1065 			atomic_store_rel_ptr(&lk->lk_lock, v);
1066 			wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1067 			    SLEEPQ_LK, 0, queue);
1068 			sleepq_release(&lk->lock_object);
1069 			break;
1070 		} else
1071 			wakeup_swapper = wakeupshlk(lk, file, line);
1072 		break;
1073 	case LK_DRAIN:
1074 		if (LK_CAN_WITNESS(flags))
1075 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1076 			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1077 			    ilk : NULL);
1078 
1079 		/*
1080 		 * Trying to drain a lock we already own will result in a
1081 		 * deadlock.
1082 		 */
1083 		if (lockmgr_xlocked(lk)) {
1084 			if (flags & LK_INTERLOCK)
1085 				class->lc_unlock(ilk);
1086 			panic("%s: draining %s with the lock held @ %s:%d\n",
1087 			    __func__, iwmesg, file, line);
1088 		}
1089 
1090 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1091 #ifdef HWPMC_HOOKS
1092 			PMC_SOFT_CALL( , , lock, failed);
1093 #endif
1094 			lock_profile_obtain_lock_failed(&lk->lock_object,
1095 			    &contested, &waittime);
1096 
1097 			/*
1098 			 * If the lock is expected to not sleep just give up
1099 			 * and return.
1100 			 */
1101 			if (LK_TRYOP(flags)) {
1102 				LOCK_LOG2(lk, "%s: %p fails the try operation",
1103 				    __func__, lk);
1104 				error = EBUSY;
1105 				break;
1106 			}
1107 
1108 			/*
1109 			 * Acquire the sleepqueue chain lock because we
1110 			 * probabilly will need to manipulate waiters flags.
1111 			 */
1112 			sleepq_lock(&lk->lock_object);
1113 			x = lk->lk_lock;
1114 
1115 			/*
1116 			 * if the lock has been released while we spun on
1117 			 * the sleepqueue chain lock just try again.
1118 			 */
1119 			if (x == LK_UNLOCKED) {
1120 				sleepq_release(&lk->lock_object);
1121 				continue;
1122 			}
1123 
1124 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1125 			if ((x & ~v) == LK_UNLOCKED) {
1126 				v = (x & ~LK_EXCLUSIVE_SPINNERS);
1127 
1128 				/*
1129 				 * If interruptible sleeps left the exclusive
1130 				 * queue empty avoid a starvation for the
1131 				 * threads sleeping on the shared queue by
1132 				 * giving them precedence and cleaning up the
1133 				 * exclusive waiters bit anyway.
1134 				 * Please note that lk_exslpfail count may be
1135 				 * lying about the real number of waiters with
1136 				 * the LK_SLEEPFAIL flag on because they may
1137 				 * be used in conjuction with interruptible
1138 				 * sleeps so lk_exslpfail might be considered
1139 				 * an 'upper limit' bound, including the edge
1140 				 * cases.
1141 				 */
1142 				if (v & LK_EXCLUSIVE_WAITERS) {
1143 					queue = SQ_EXCLUSIVE_QUEUE;
1144 					v &= ~LK_EXCLUSIVE_WAITERS;
1145 				} else {
1146 
1147 					/*
1148 					 * Exclusive waiters sleeping with
1149 					 * LK_SLEEPFAIL on and using
1150 					 * interruptible sleeps/timeout may
1151 					 * have left spourious lk_exslpfail
1152 					 * counts on, so clean it up anyway.
1153 					 */
1154 					MPASS(v & LK_SHARED_WAITERS);
1155 					lk->lk_exslpfail = 0;
1156 					queue = SQ_SHARED_QUEUE;
1157 					v &= ~LK_SHARED_WAITERS;
1158 				}
1159 				if (queue == SQ_EXCLUSIVE_QUEUE) {
1160 					realexslp =
1161 					    sleepq_sleepcnt(&lk->lock_object,
1162 					    SQ_EXCLUSIVE_QUEUE);
1163 					if (lk->lk_exslpfail >= realexslp) {
1164 						lk->lk_exslpfail = 0;
1165 						queue = SQ_SHARED_QUEUE;
1166 						v &= ~LK_SHARED_WAITERS;
1167 						if (realexslp != 0) {
1168 							LOCK_LOG2(lk,
1169 					"%s: %p has only LK_SLEEPFAIL sleepers",
1170 							    __func__, lk);
1171 							LOCK_LOG2(lk,
1172 			"%s: %p waking up threads on the exclusive queue",
1173 							    __func__, lk);
1174 							wakeup_swapper =
1175 							    sleepq_broadcast(
1176 							    &lk->lock_object,
1177 							    SLEEPQ_LK, 0,
1178 							    SQ_EXCLUSIVE_QUEUE);
1179 						}
1180 					} else
1181 						lk->lk_exslpfail = 0;
1182 				}
1183 				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1184 					sleepq_release(&lk->lock_object);
1185 					continue;
1186 				}
1187 				LOCK_LOG3(lk,
1188 				"%s: %p waking up all threads on the %s queue",
1189 				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1190 				    "shared" : "exclusive");
1191 				wakeup_swapper |= sleepq_broadcast(
1192 				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1193 
1194 				/*
1195 				 * If shared waiters have been woken up we need
1196 				 * to wait for one of them to acquire the lock
1197 				 * before to set the exclusive waiters in
1198 				 * order to avoid a deadlock.
1199 				 */
1200 				if (queue == SQ_SHARED_QUEUE) {
1201 					for (v = lk->lk_lock;
1202 					    (v & LK_SHARE) && !LK_SHARERS(v);
1203 					    v = lk->lk_lock)
1204 						cpu_spinwait();
1205 				}
1206 			}
1207 
1208 			/*
1209 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1210 			 * fail, loop back and retry.
1211 			 */
1212 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1213 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1214 				    x | LK_EXCLUSIVE_WAITERS)) {
1215 					sleepq_release(&lk->lock_object);
1216 					continue;
1217 				}
1218 				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1219 				    __func__, lk);
1220 			}
1221 
1222 			/*
1223 			 * As far as we have been unable to acquire the
1224 			 * exclusive lock and the exclusive waiters flag
1225 			 * is set, we will sleep.
1226 			 */
1227 			if (flags & LK_INTERLOCK) {
1228 				class->lc_unlock(ilk);
1229 				flags &= ~LK_INTERLOCK;
1230 			}
1231 			GIANT_SAVE();
1232 			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1233 			    SQ_EXCLUSIVE_QUEUE);
1234 			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1235 			GIANT_RESTORE();
1236 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1237 			    __func__, lk);
1238 		}
1239 
1240 		if (error == 0) {
1241 			lock_profile_obtain_lock_success(&lk->lock_object,
1242 			    contested, waittime, file, line);
1243 			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1244 			    lk->lk_recurse, file, line);
1245 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1246 			    LK_TRYWIT(flags), file, line);
1247 			TD_LOCKS_INC(curthread);
1248 			STACK_SAVE(lk);
1249 		}
1250 		break;
1251 	default:
1252 		if (flags & LK_INTERLOCK)
1253 			class->lc_unlock(ilk);
1254 		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1255 	}
1256 
1257 	if (flags & LK_INTERLOCK)
1258 		class->lc_unlock(ilk);
1259 	if (wakeup_swapper)
1260 		kick_proc0();
1261 
1262 	return (error);
1263 }
1264 
1265 void
1266 _lockmgr_disown(struct lock *lk, const char *file, int line)
1267 {
1268 	uintptr_t tid, x;
1269 
1270 	if (SCHEDULER_STOPPED())
1271 		return;
1272 
1273 	tid = (uintptr_t)curthread;
1274 	_lockmgr_assert(lk, KA_XLOCKED, file, line);
1275 
1276 	/*
1277 	 * Panic if the lock is recursed.
1278 	 */
1279 	if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1280 		panic("%s: disown a recursed lockmgr @ %s:%d\n",
1281 		    __func__,  file, line);
1282 
1283 	/*
1284 	 * If the owner is already LK_KERNPROC just skip the whole operation.
1285 	 */
1286 	if (LK_HOLDER(lk->lk_lock) != tid)
1287 		return;
1288 	lock_profile_release_lock(&lk->lock_object);
1289 	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1290 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1291 	TD_LOCKS_DEC(curthread);
1292 	STACK_SAVE(lk);
1293 
1294 	/*
1295 	 * In order to preserve waiters flags, just spin.
1296 	 */
1297 	for (;;) {
1298 		x = lk->lk_lock;
1299 		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1300 		x &= LK_ALL_WAITERS;
1301 		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1302 		    LK_KERNPROC | x))
1303 			return;
1304 		cpu_spinwait();
1305 	}
1306 }
1307 
1308 void
1309 lockmgr_printinfo(const struct lock *lk)
1310 {
1311 	struct thread *td;
1312 	uintptr_t x;
1313 
1314 	if (lk->lk_lock == LK_UNLOCKED)
1315 		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1316 	else if (lk->lk_lock & LK_SHARE)
1317 		printf("lock type %s: SHARED (count %ju)\n",
1318 		    lk->lock_object.lo_name,
1319 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1320 	else {
1321 		td = lockmgr_xholder(lk);
1322 		printf("lock type %s: EXCL by thread %p "
1323 		    "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, td,
1324 		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_tid);
1325 	}
1326 
1327 	x = lk->lk_lock;
1328 	if (x & LK_EXCLUSIVE_WAITERS)
1329 		printf(" with exclusive waiters pending\n");
1330 	if (x & LK_SHARED_WAITERS)
1331 		printf(" with shared waiters pending\n");
1332 	if (x & LK_EXCLUSIVE_SPINNERS)
1333 		printf(" with exclusive spinners pending\n");
1334 
1335 	STACK_PRINT(lk);
1336 }
1337 
1338 int
1339 lockstatus(const struct lock *lk)
1340 {
1341 	uintptr_t v, x;
1342 	int ret;
1343 
1344 	ret = LK_SHARED;
1345 	x = lk->lk_lock;
1346 	v = LK_HOLDER(x);
1347 
1348 	if ((x & LK_SHARE) == 0) {
1349 		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1350 			ret = LK_EXCLUSIVE;
1351 		else
1352 			ret = LK_EXCLOTHER;
1353 	} else if (x == LK_UNLOCKED)
1354 		ret = 0;
1355 
1356 	return (ret);
1357 }
1358 
1359 #ifdef INVARIANT_SUPPORT
1360 
1361 FEATURE(invariant_support,
1362     "Support for modules compiled with INVARIANTS option");
1363 
1364 #ifndef INVARIANTS
1365 #undef	_lockmgr_assert
1366 #endif
1367 
1368 void
1369 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1370 {
1371 	int slocked = 0;
1372 
1373 	if (panicstr != NULL)
1374 		return;
1375 	switch (what) {
1376 	case KA_SLOCKED:
1377 	case KA_SLOCKED | KA_NOTRECURSED:
1378 	case KA_SLOCKED | KA_RECURSED:
1379 		slocked = 1;
1380 	case KA_LOCKED:
1381 	case KA_LOCKED | KA_NOTRECURSED:
1382 	case KA_LOCKED | KA_RECURSED:
1383 #ifdef WITNESS
1384 
1385 		/*
1386 		 * We cannot trust WITNESS if the lock is held in exclusive
1387 		 * mode and a call to lockmgr_disown() happened.
1388 		 * Workaround this skipping the check if the lock is held in
1389 		 * exclusive mode even for the KA_LOCKED case.
1390 		 */
1391 		if (slocked || (lk->lk_lock & LK_SHARE)) {
1392 			witness_assert(&lk->lock_object, what, file, line);
1393 			break;
1394 		}
1395 #endif
1396 		if (lk->lk_lock == LK_UNLOCKED ||
1397 		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1398 		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1399 			panic("Lock %s not %slocked @ %s:%d\n",
1400 			    lk->lock_object.lo_name, slocked ? "share" : "",
1401 			    file, line);
1402 
1403 		if ((lk->lk_lock & LK_SHARE) == 0) {
1404 			if (lockmgr_recursed(lk)) {
1405 				if (what & KA_NOTRECURSED)
1406 					panic("Lock %s recursed @ %s:%d\n",
1407 					    lk->lock_object.lo_name, file,
1408 					    line);
1409 			} else if (what & KA_RECURSED)
1410 				panic("Lock %s not recursed @ %s:%d\n",
1411 				    lk->lock_object.lo_name, file, line);
1412 		}
1413 		break;
1414 	case KA_XLOCKED:
1415 	case KA_XLOCKED | KA_NOTRECURSED:
1416 	case KA_XLOCKED | KA_RECURSED:
1417 		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1418 			panic("Lock %s not exclusively locked @ %s:%d\n",
1419 			    lk->lock_object.lo_name, file, line);
1420 		if (lockmgr_recursed(lk)) {
1421 			if (what & KA_NOTRECURSED)
1422 				panic("Lock %s recursed @ %s:%d\n",
1423 				    lk->lock_object.lo_name, file, line);
1424 		} else if (what & KA_RECURSED)
1425 			panic("Lock %s not recursed @ %s:%d\n",
1426 			    lk->lock_object.lo_name, file, line);
1427 		break;
1428 	case KA_UNLOCKED:
1429 		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1430 			panic("Lock %s exclusively locked @ %s:%d\n",
1431 			    lk->lock_object.lo_name, file, line);
1432 		break;
1433 	default:
1434 		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1435 		    line);
1436 	}
1437 }
1438 #endif
1439 
1440 #ifdef DDB
1441 int
1442 lockmgr_chain(struct thread *td, struct thread **ownerp)
1443 {
1444 	struct lock *lk;
1445 
1446 	lk = td->td_wchan;
1447 
1448 	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1449 		return (0);
1450 	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1451 	if (lk->lk_lock & LK_SHARE)
1452 		db_printf("SHARED (count %ju)\n",
1453 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1454 	else
1455 		db_printf("EXCL\n");
1456 	*ownerp = lockmgr_xholder(lk);
1457 
1458 	return (1);
1459 }
1460 
1461 static void
1462 db_show_lockmgr(const struct lock_object *lock)
1463 {
1464 	struct thread *td;
1465 	const struct lock *lk;
1466 
1467 	lk = (const struct lock *)lock;
1468 
1469 	db_printf(" state: ");
1470 	if (lk->lk_lock == LK_UNLOCKED)
1471 		db_printf("UNLOCKED\n");
1472 	else if (lk->lk_lock & LK_SHARE)
1473 		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1474 	else {
1475 		td = lockmgr_xholder(lk);
1476 		if (td == (struct thread *)LK_KERNPROC)
1477 			db_printf("XLOCK: LK_KERNPROC\n");
1478 		else
1479 			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1480 			    td->td_tid, td->td_proc->p_pid,
1481 			    td->td_proc->p_comm);
1482 		if (lockmgr_recursed(lk))
1483 			db_printf(" recursed: %d\n", lk->lk_recurse);
1484 	}
1485 	db_printf(" waiters: ");
1486 	switch (lk->lk_lock & LK_ALL_WAITERS) {
1487 	case LK_SHARED_WAITERS:
1488 		db_printf("shared\n");
1489 		break;
1490 	case LK_EXCLUSIVE_WAITERS:
1491 		db_printf("exclusive\n");
1492 		break;
1493 	case LK_ALL_WAITERS:
1494 		db_printf("shared and exclusive\n");
1495 		break;
1496 	default:
1497 		db_printf("none\n");
1498 	}
1499 	db_printf(" spinners: ");
1500 	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1501 		db_printf("exclusive\n");
1502 	else
1503 		db_printf("none\n");
1504 }
1505 #endif
1506