xref: /freebsd/sys/kern/kern_lock.c (revision 955c8cbb4960e6cf3602de144b1b9154a5092968)
1 /*-
2  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  */
28 
29 #include "opt_adaptive_lockmgrs.h"
30 #include "opt_ddb.h"
31 #include "opt_hwpmc_hooks.h"
32 #include "opt_kdtrace.h"
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/kdb.h>
39 #include <sys/ktr.h>
40 #include <sys/lock.h>
41 #include <sys/lock_profile.h>
42 #include <sys/lockmgr.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/sleepqueue.h>
46 #ifdef DEBUG_LOCKS
47 #include <sys/stack.h>
48 #endif
49 #include <sys/sysctl.h>
50 #include <sys/systm.h>
51 
52 #include <machine/cpu.h>
53 
54 #ifdef DDB
55 #include <ddb/ddb.h>
56 #endif
57 
58 #ifdef HWPMC_HOOKS
59 #include <sys/pmckern.h>
60 PMC_SOFT_DECLARE( , , lock, failed);
61 #endif
62 
63 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
64     (LK_ADAPTIVE | LK_NOSHARE));
65 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
66     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
67 
68 #define	SQ_EXCLUSIVE_QUEUE	0
69 #define	SQ_SHARED_QUEUE		1
70 
71 #ifndef INVARIANTS
72 #define	_lockmgr_assert(lk, what, file, line)
73 #define	TD_LOCKS_INC(td)
74 #define	TD_LOCKS_DEC(td)
75 #else
76 #define	TD_LOCKS_INC(td)	((td)->td_locks++)
77 #define	TD_LOCKS_DEC(td)	((td)->td_locks--)
78 #endif
79 #define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
80 #define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
81 
82 #ifndef DEBUG_LOCKS
83 #define	STACK_PRINT(lk)
84 #define	STACK_SAVE(lk)
85 #define	STACK_ZERO(lk)
86 #else
87 #define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
88 #define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
89 #define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
90 #endif
91 
92 #define	LOCK_LOG2(lk, string, arg1, arg2)				\
93 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
94 		CTR2(KTR_LOCK, (string), (arg1), (arg2))
95 #define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
96 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
97 		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
98 
99 #define	GIANT_DECLARE							\
100 	int _i = 0;							\
101 	WITNESS_SAVE_DECL(Giant)
102 #define	GIANT_RESTORE() do {						\
103 	if (_i > 0) {							\
104 		while (_i--)						\
105 			mtx_lock(&Giant);				\
106 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
107 	}								\
108 } while (0)
109 #define	GIANT_SAVE() do {						\
110 	if (mtx_owned(&Giant)) {					\
111 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
112 		while (mtx_owned(&Giant)) {				\
113 			_i++;						\
114 			mtx_unlock(&Giant);				\
115 		}							\
116 	}								\
117 } while (0)
118 
119 #define	LK_CAN_SHARE(x)							\
120 	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
121 	((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||				\
122 	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
123 #define	LK_TRYOP(x)							\
124 	((x) & LK_NOWAIT)
125 
126 #define	LK_CAN_WITNESS(x)						\
127 	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
128 #define	LK_TRYWIT(x)							\
129 	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
130 
131 #define	LK_CAN_ADAPT(lk, f)						\
132 	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
133 	((f) & LK_SLEEPFAIL) == 0)
134 
135 #define	lockmgr_disowned(lk)						\
136 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
137 
138 #define	lockmgr_xlocked(lk)						\
139 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
140 
141 static void	assert_lockmgr(const struct lock_object *lock, int how);
142 #ifdef DDB
143 static void	db_show_lockmgr(const struct lock_object *lock);
144 #endif
145 static void	lock_lockmgr(struct lock_object *lock, int how);
146 #ifdef KDTRACE_HOOKS
147 static int	owner_lockmgr(const struct lock_object *lock,
148 		    struct thread **owner);
149 #endif
150 static int	unlock_lockmgr(struct lock_object *lock);
151 
152 struct lock_class lock_class_lockmgr = {
153 	.lc_name = "lockmgr",
154 	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
155 	.lc_assert = assert_lockmgr,
156 #ifdef DDB
157 	.lc_ddb_show = db_show_lockmgr,
158 #endif
159 	.lc_lock = lock_lockmgr,
160 	.lc_unlock = unlock_lockmgr,
161 #ifdef KDTRACE_HOOKS
162 	.lc_owner = owner_lockmgr,
163 #endif
164 };
165 
166 #ifdef ADAPTIVE_LOCKMGRS
167 static u_int alk_retries = 10;
168 static u_int alk_loops = 10000;
169 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
170     "lockmgr debugging");
171 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
172 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
173 #endif
174 
175 static __inline struct thread *
176 lockmgr_xholder(const struct lock *lk)
177 {
178 	uintptr_t x;
179 
180 	x = lk->lk_lock;
181 	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
182 }
183 
184 /*
185  * It assumes sleepq_lock held and returns with this one unheld.
186  * It also assumes the generic interlock is sane and previously checked.
187  * If LK_INTERLOCK is specified the interlock is not reacquired after the
188  * sleep.
189  */
190 static __inline int
191 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
192     const char *wmesg, int pri, int timo, int queue)
193 {
194 	GIANT_DECLARE;
195 	struct lock_class *class;
196 	int catch, error;
197 
198 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
199 	catch = pri & PCATCH;
200 	pri &= PRIMASK;
201 	error = 0;
202 
203 	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
204 	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
205 
206 	if (flags & LK_INTERLOCK)
207 		class->lc_unlock(ilk);
208 	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
209 		lk->lk_exslpfail++;
210 	GIANT_SAVE();
211 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
212 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
213 	if ((flags & LK_TIMELOCK) && timo)
214 		sleepq_set_timeout(&lk->lock_object, timo);
215 
216 	/*
217 	 * Decisional switch for real sleeping.
218 	 */
219 	if ((flags & LK_TIMELOCK) && timo && catch)
220 		error = sleepq_timedwait_sig(&lk->lock_object, pri);
221 	else if ((flags & LK_TIMELOCK) && timo)
222 		error = sleepq_timedwait(&lk->lock_object, pri);
223 	else if (catch)
224 		error = sleepq_wait_sig(&lk->lock_object, pri);
225 	else
226 		sleepq_wait(&lk->lock_object, pri);
227 	GIANT_RESTORE();
228 	if ((flags & LK_SLEEPFAIL) && error == 0)
229 		error = ENOLCK;
230 
231 	return (error);
232 }
233 
234 static __inline int
235 wakeupshlk(struct lock *lk, const char *file, int line)
236 {
237 	uintptr_t v, x;
238 	u_int realexslp;
239 	int queue, wakeup_swapper;
240 
241 	TD_LOCKS_DEC(curthread);
242 	TD_SLOCKS_DEC(curthread);
243 	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
244 	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
245 
246 	wakeup_swapper = 0;
247 	for (;;) {
248 		x = lk->lk_lock;
249 
250 		/*
251 		 * If there is more than one shared lock held, just drop one
252 		 * and return.
253 		 */
254 		if (LK_SHARERS(x) > 1) {
255 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
256 			    x - LK_ONE_SHARER))
257 				break;
258 			continue;
259 		}
260 
261 		/*
262 		 * If there are not waiters on the exclusive queue, drop the
263 		 * lock quickly.
264 		 */
265 		if ((x & LK_ALL_WAITERS) == 0) {
266 			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
267 			    LK_SHARERS_LOCK(1));
268 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
269 				break;
270 			continue;
271 		}
272 
273 		/*
274 		 * We should have a sharer with waiters, so enter the hard
275 		 * path in order to handle wakeups correctly.
276 		 */
277 		sleepq_lock(&lk->lock_object);
278 		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
279 		v = LK_UNLOCKED;
280 
281 		/*
282 		 * If the lock has exclusive waiters, give them preference in
283 		 * order to avoid deadlock with shared runners up.
284 		 * If interruptible sleeps left the exclusive queue empty
285 		 * avoid a starvation for the threads sleeping on the shared
286 		 * queue by giving them precedence and cleaning up the
287 		 * exclusive waiters bit anyway.
288 		 * Please note that lk_exslpfail count may be lying about
289 		 * the real number of waiters with the LK_SLEEPFAIL flag on
290 		 * because they may be used in conjuction with interruptible
291 		 * sleeps so lk_exslpfail might be considered an 'upper limit'
292 		 * bound, including the edge cases.
293 		 */
294 		realexslp = sleepq_sleepcnt(&lk->lock_object,
295 		    SQ_EXCLUSIVE_QUEUE);
296 		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
297 			if (lk->lk_exslpfail < realexslp) {
298 				lk->lk_exslpfail = 0;
299 				queue = SQ_EXCLUSIVE_QUEUE;
300 				v |= (x & LK_SHARED_WAITERS);
301 			} else {
302 				lk->lk_exslpfail = 0;
303 				LOCK_LOG2(lk,
304 				    "%s: %p has only LK_SLEEPFAIL sleepers",
305 				    __func__, lk);
306 				LOCK_LOG2(lk,
307 			    "%s: %p waking up threads on the exclusive queue",
308 				    __func__, lk);
309 				wakeup_swapper =
310 				    sleepq_broadcast(&lk->lock_object,
311 				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
312 				queue = SQ_SHARED_QUEUE;
313 			}
314 
315 		} else {
316 
317 			/*
318 			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
319 			 * and using interruptible sleeps/timeout may have
320 			 * left spourious lk_exslpfail counts on, so clean
321 			 * it up anyway.
322 			 */
323 			lk->lk_exslpfail = 0;
324 			queue = SQ_SHARED_QUEUE;
325 		}
326 
327 		if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
328 		    v)) {
329 			sleepq_release(&lk->lock_object);
330 			continue;
331 		}
332 		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
333 		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
334 		    "exclusive");
335 		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
336 		    0, queue);
337 		sleepq_release(&lk->lock_object);
338 		break;
339 	}
340 
341 	lock_profile_release_lock(&lk->lock_object);
342 	return (wakeup_swapper);
343 }
344 
345 static void
346 assert_lockmgr(const struct lock_object *lock, int what)
347 {
348 
349 	panic("lockmgr locks do not support assertions");
350 }
351 
352 static void
353 lock_lockmgr(struct lock_object *lock, int how)
354 {
355 
356 	panic("lockmgr locks do not support sleep interlocking");
357 }
358 
359 static int
360 unlock_lockmgr(struct lock_object *lock)
361 {
362 
363 	panic("lockmgr locks do not support sleep interlocking");
364 }
365 
366 #ifdef KDTRACE_HOOKS
367 static int
368 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
369 {
370 
371 	panic("lockmgr locks do not support owner inquiring");
372 }
373 #endif
374 
375 void
376 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
377 {
378 	int iflags;
379 
380 	MPASS((flags & ~LK_INIT_MASK) == 0);
381 	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
382             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
383             &lk->lk_lock));
384 
385 	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
386 	if (flags & LK_CANRECURSE)
387 		iflags |= LO_RECURSABLE;
388 	if ((flags & LK_NODUP) == 0)
389 		iflags |= LO_DUPOK;
390 	if (flags & LK_NOPROFILE)
391 		iflags |= LO_NOPROFILE;
392 	if ((flags & LK_NOWITNESS) == 0)
393 		iflags |= LO_WITNESS;
394 	if (flags & LK_QUIET)
395 		iflags |= LO_QUIET;
396 	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
397 
398 	lk->lk_lock = LK_UNLOCKED;
399 	lk->lk_recurse = 0;
400 	lk->lk_exslpfail = 0;
401 	lk->lk_timo = timo;
402 	lk->lk_pri = pri;
403 	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
404 	STACK_ZERO(lk);
405 }
406 
407 /*
408  * XXX: Gross hacks to manipulate external lock flags after
409  * initialization.  Used for certain vnode and buf locks.
410  */
411 void
412 lockallowshare(struct lock *lk)
413 {
414 
415 	lockmgr_assert(lk, KA_XLOCKED);
416 	lk->lock_object.lo_flags &= ~LK_NOSHARE;
417 }
418 
419 void
420 lockallowrecurse(struct lock *lk)
421 {
422 
423 	lockmgr_assert(lk, KA_XLOCKED);
424 	lk->lock_object.lo_flags |= LO_RECURSABLE;
425 }
426 
427 void
428 lockdisablerecurse(struct lock *lk)
429 {
430 
431 	lockmgr_assert(lk, KA_XLOCKED);
432 	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
433 }
434 
435 void
436 lockdestroy(struct lock *lk)
437 {
438 
439 	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
440 	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
441 	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
442 	lock_destroy(&lk->lock_object);
443 }
444 
445 int
446 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
447     const char *wmesg, int pri, int timo, const char *file, int line)
448 {
449 	GIANT_DECLARE;
450 	struct lock_class *class;
451 	const char *iwmesg;
452 	uintptr_t tid, v, x;
453 	u_int op, realexslp;
454 	int error, ipri, itimo, queue, wakeup_swapper;
455 #ifdef LOCK_PROFILING
456 	uint64_t waittime = 0;
457 	int contested = 0;
458 #endif
459 #ifdef ADAPTIVE_LOCKMGRS
460 	volatile struct thread *owner;
461 	u_int i, spintries = 0;
462 #endif
463 
464 	error = 0;
465 	tid = (uintptr_t)curthread;
466 	op = (flags & LK_TYPE_MASK);
467 	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
468 	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
469 	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
470 
471 	MPASS((flags & ~LK_TOTAL_MASK) == 0);
472 	KASSERT((op & (op - 1)) == 0,
473 	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
474 	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
475 	    (op != LK_DOWNGRADE && op != LK_RELEASE),
476 	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
477 	    __func__, file, line));
478 	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
479 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
480 	    __func__, file, line));
481 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
482 	    ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
483 	    lk->lock_object.lo_name, file, line));
484 
485 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
486 	if (panicstr != NULL) {
487 		if (flags & LK_INTERLOCK)
488 			class->lc_unlock(ilk);
489 		return (0);
490 	}
491 
492 	if (lk->lock_object.lo_flags & LK_NOSHARE) {
493 		switch (op) {
494 		case LK_SHARED:
495 			op = LK_EXCLUSIVE;
496 			break;
497 		case LK_UPGRADE:
498 		case LK_DOWNGRADE:
499 			_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
500 			    file, line);
501 			return (0);
502 		}
503 	}
504 
505 	wakeup_swapper = 0;
506 	switch (op) {
507 	case LK_SHARED:
508 		if (LK_CAN_WITNESS(flags))
509 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
510 			    file, line, ilk);
511 		for (;;) {
512 			x = lk->lk_lock;
513 
514 			/*
515 			 * If no other thread has an exclusive lock, or
516 			 * no exclusive waiter is present, bump the count of
517 			 * sharers.  Since we have to preserve the state of
518 			 * waiters, if we fail to acquire the shared lock
519 			 * loop back and retry.
520 			 */
521 			if (LK_CAN_SHARE(x)) {
522 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
523 				    x + LK_ONE_SHARER))
524 					break;
525 				continue;
526 			}
527 #ifdef HWPMC_HOOKS
528 			PMC_SOFT_CALL( , , lock, failed);
529 #endif
530 			lock_profile_obtain_lock_failed(&lk->lock_object,
531 			    &contested, &waittime);
532 
533 			/*
534 			 * If the lock is already held by curthread in
535 			 * exclusive way avoid a deadlock.
536 			 */
537 			if (LK_HOLDER(x) == tid) {
538 				LOCK_LOG2(lk,
539 				    "%s: %p already held in exclusive mode",
540 				    __func__, lk);
541 				error = EDEADLK;
542 				break;
543 			}
544 
545 			/*
546 			 * If the lock is expected to not sleep just give up
547 			 * and return.
548 			 */
549 			if (LK_TRYOP(flags)) {
550 				LOCK_LOG2(lk, "%s: %p fails the try operation",
551 				    __func__, lk);
552 				error = EBUSY;
553 				break;
554 			}
555 
556 #ifdef ADAPTIVE_LOCKMGRS
557 			/*
558 			 * If the owner is running on another CPU, spin until
559 			 * the owner stops running or the state of the lock
560 			 * changes.  We need a double-state handle here
561 			 * because for a failed acquisition the lock can be
562 			 * either held in exclusive mode or shared mode
563 			 * (for the writer starvation avoidance technique).
564 			 */
565 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
566 			    LK_HOLDER(x) != LK_KERNPROC) {
567 				owner = (struct thread *)LK_HOLDER(x);
568 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
569 					CTR3(KTR_LOCK,
570 					    "%s: spinning on %p held by %p",
571 					    __func__, lk, owner);
572 
573 				/*
574 				 * If we are holding also an interlock drop it
575 				 * in order to avoid a deadlock if the lockmgr
576 				 * owner is adaptively spinning on the
577 				 * interlock itself.
578 				 */
579 				if (flags & LK_INTERLOCK) {
580 					class->lc_unlock(ilk);
581 					flags &= ~LK_INTERLOCK;
582 				}
583 				GIANT_SAVE();
584 				while (LK_HOLDER(lk->lk_lock) ==
585 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
586 					cpu_spinwait();
587 				GIANT_RESTORE();
588 				continue;
589 			} else if (LK_CAN_ADAPT(lk, flags) &&
590 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
591 			    spintries < alk_retries) {
592 				if (flags & LK_INTERLOCK) {
593 					class->lc_unlock(ilk);
594 					flags &= ~LK_INTERLOCK;
595 				}
596 				GIANT_SAVE();
597 				spintries++;
598 				for (i = 0; i < alk_loops; i++) {
599 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
600 						CTR4(KTR_LOCK,
601 				    "%s: shared spinning on %p with %u and %u",
602 						    __func__, lk, spintries, i);
603 					x = lk->lk_lock;
604 					if ((x & LK_SHARE) == 0 ||
605 					    LK_CAN_SHARE(x) != 0)
606 						break;
607 					cpu_spinwait();
608 				}
609 				GIANT_RESTORE();
610 				if (i != alk_loops)
611 					continue;
612 			}
613 #endif
614 
615 			/*
616 			 * Acquire the sleepqueue chain lock because we
617 			 * probabilly will need to manipulate waiters flags.
618 			 */
619 			sleepq_lock(&lk->lock_object);
620 			x = lk->lk_lock;
621 
622 			/*
623 			 * if the lock can be acquired in shared mode, try
624 			 * again.
625 			 */
626 			if (LK_CAN_SHARE(x)) {
627 				sleepq_release(&lk->lock_object);
628 				continue;
629 			}
630 
631 #ifdef ADAPTIVE_LOCKMGRS
632 			/*
633 			 * The current lock owner might have started executing
634 			 * on another CPU (or the lock could have changed
635 			 * owner) while we were waiting on the turnstile
636 			 * chain lock.  If so, drop the turnstile lock and try
637 			 * again.
638 			 */
639 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
640 			    LK_HOLDER(x) != LK_KERNPROC) {
641 				owner = (struct thread *)LK_HOLDER(x);
642 				if (TD_IS_RUNNING(owner)) {
643 					sleepq_release(&lk->lock_object);
644 					continue;
645 				}
646 			}
647 #endif
648 
649 			/*
650 			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
651 			 * loop back and retry.
652 			 */
653 			if ((x & LK_SHARED_WAITERS) == 0) {
654 				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
655 				    x | LK_SHARED_WAITERS)) {
656 					sleepq_release(&lk->lock_object);
657 					continue;
658 				}
659 				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
660 				    __func__, lk);
661 			}
662 
663 			/*
664 			 * As far as we have been unable to acquire the
665 			 * shared lock and the shared waiters flag is set,
666 			 * we will sleep.
667 			 */
668 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
669 			    SQ_SHARED_QUEUE);
670 			flags &= ~LK_INTERLOCK;
671 			if (error) {
672 				LOCK_LOG3(lk,
673 				    "%s: interrupted sleep for %p with %d",
674 				    __func__, lk, error);
675 				break;
676 			}
677 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
678 			    __func__, lk);
679 		}
680 		if (error == 0) {
681 			lock_profile_obtain_lock_success(&lk->lock_object,
682 			    contested, waittime, file, line);
683 			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
684 			    line);
685 			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
686 			    line);
687 			TD_LOCKS_INC(curthread);
688 			TD_SLOCKS_INC(curthread);
689 			STACK_SAVE(lk);
690 		}
691 		break;
692 	case LK_UPGRADE:
693 		_lockmgr_assert(lk, KA_SLOCKED, file, line);
694 		v = lk->lk_lock;
695 		x = v & LK_ALL_WAITERS;
696 		v &= LK_EXCLUSIVE_SPINNERS;
697 
698 		/*
699 		 * Try to switch from one shared lock to an exclusive one.
700 		 * We need to preserve waiters flags during the operation.
701 		 */
702 		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
703 		    tid | x)) {
704 			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
705 			    line);
706 			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
707 			    LK_TRYWIT(flags), file, line);
708 			TD_SLOCKS_DEC(curthread);
709 			break;
710 		}
711 
712 		/*
713 		 * We have been unable to succeed in upgrading, so just
714 		 * give up the shared lock.
715 		 */
716 		wakeup_swapper |= wakeupshlk(lk, file, line);
717 
718 		/* FALLTHROUGH */
719 	case LK_EXCLUSIVE:
720 		if (LK_CAN_WITNESS(flags))
721 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
722 			    LOP_EXCLUSIVE, file, line, ilk);
723 
724 		/*
725 		 * If curthread already holds the lock and this one is
726 		 * allowed to recurse, simply recurse on it.
727 		 */
728 		if (lockmgr_xlocked(lk)) {
729 			if ((flags & LK_CANRECURSE) == 0 &&
730 			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
731 
732 				/*
733 				 * If the lock is expected to not panic just
734 				 * give up and return.
735 				 */
736 				if (LK_TRYOP(flags)) {
737 					LOCK_LOG2(lk,
738 					    "%s: %p fails the try operation",
739 					    __func__, lk);
740 					error = EBUSY;
741 					break;
742 				}
743 				if (flags & LK_INTERLOCK)
744 					class->lc_unlock(ilk);
745 		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
746 				    __func__, iwmesg, file, line);
747 			}
748 			lk->lk_recurse++;
749 			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
750 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
751 			    lk->lk_recurse, file, line);
752 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
753 			    LK_TRYWIT(flags), file, line);
754 			TD_LOCKS_INC(curthread);
755 			break;
756 		}
757 
758 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
759 		    tid)) {
760 #ifdef HWPMC_HOOKS
761 			PMC_SOFT_CALL( , , lock, failed);
762 #endif
763 			lock_profile_obtain_lock_failed(&lk->lock_object,
764 			    &contested, &waittime);
765 
766 			/*
767 			 * If the lock is expected to not sleep just give up
768 			 * and return.
769 			 */
770 			if (LK_TRYOP(flags)) {
771 				LOCK_LOG2(lk, "%s: %p fails the try operation",
772 				    __func__, lk);
773 				error = EBUSY;
774 				break;
775 			}
776 
777 #ifdef ADAPTIVE_LOCKMGRS
778 			/*
779 			 * If the owner is running on another CPU, spin until
780 			 * the owner stops running or the state of the lock
781 			 * changes.
782 			 */
783 			x = lk->lk_lock;
784 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
785 			    LK_HOLDER(x) != LK_KERNPROC) {
786 				owner = (struct thread *)LK_HOLDER(x);
787 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
788 					CTR3(KTR_LOCK,
789 					    "%s: spinning on %p held by %p",
790 					    __func__, lk, owner);
791 
792 				/*
793 				 * If we are holding also an interlock drop it
794 				 * in order to avoid a deadlock if the lockmgr
795 				 * owner is adaptively spinning on the
796 				 * interlock itself.
797 				 */
798 				if (flags & LK_INTERLOCK) {
799 					class->lc_unlock(ilk);
800 					flags &= ~LK_INTERLOCK;
801 				}
802 				GIANT_SAVE();
803 				while (LK_HOLDER(lk->lk_lock) ==
804 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
805 					cpu_spinwait();
806 				GIANT_RESTORE();
807 				continue;
808 			} else if (LK_CAN_ADAPT(lk, flags) &&
809 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
810 			    spintries < alk_retries) {
811 				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
812 				    !atomic_cmpset_ptr(&lk->lk_lock, x,
813 				    x | LK_EXCLUSIVE_SPINNERS))
814 					continue;
815 				if (flags & LK_INTERLOCK) {
816 					class->lc_unlock(ilk);
817 					flags &= ~LK_INTERLOCK;
818 				}
819 				GIANT_SAVE();
820 				spintries++;
821 				for (i = 0; i < alk_loops; i++) {
822 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
823 						CTR4(KTR_LOCK,
824 				    "%s: shared spinning on %p with %u and %u",
825 						    __func__, lk, spintries, i);
826 					if ((lk->lk_lock &
827 					    LK_EXCLUSIVE_SPINNERS) == 0)
828 						break;
829 					cpu_spinwait();
830 				}
831 				GIANT_RESTORE();
832 				if (i != alk_loops)
833 					continue;
834 			}
835 #endif
836 
837 			/*
838 			 * Acquire the sleepqueue chain lock because we
839 			 * probabilly will need to manipulate waiters flags.
840 			 */
841 			sleepq_lock(&lk->lock_object);
842 			x = lk->lk_lock;
843 
844 			/*
845 			 * if the lock has been released while we spun on
846 			 * the sleepqueue chain lock just try again.
847 			 */
848 			if (x == LK_UNLOCKED) {
849 				sleepq_release(&lk->lock_object);
850 				continue;
851 			}
852 
853 #ifdef ADAPTIVE_LOCKMGRS
854 			/*
855 			 * The current lock owner might have started executing
856 			 * on another CPU (or the lock could have changed
857 			 * owner) while we were waiting on the turnstile
858 			 * chain lock.  If so, drop the turnstile lock and try
859 			 * again.
860 			 */
861 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
862 			    LK_HOLDER(x) != LK_KERNPROC) {
863 				owner = (struct thread *)LK_HOLDER(x);
864 				if (TD_IS_RUNNING(owner)) {
865 					sleepq_release(&lk->lock_object);
866 					continue;
867 				}
868 			}
869 #endif
870 
871 			/*
872 			 * The lock can be in the state where there is a
873 			 * pending queue of waiters, but still no owner.
874 			 * This happens when the lock is contested and an
875 			 * owner is going to claim the lock.
876 			 * If curthread is the one successfully acquiring it
877 			 * claim lock ownership and return, preserving waiters
878 			 * flags.
879 			 */
880 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
881 			if ((x & ~v) == LK_UNLOCKED) {
882 				v &= ~LK_EXCLUSIVE_SPINNERS;
883 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
884 				    tid | v)) {
885 					sleepq_release(&lk->lock_object);
886 					LOCK_LOG2(lk,
887 					    "%s: %p claimed by a new writer",
888 					    __func__, lk);
889 					break;
890 				}
891 				sleepq_release(&lk->lock_object);
892 				continue;
893 			}
894 
895 			/*
896 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
897 			 * fail, loop back and retry.
898 			 */
899 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
900 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
901 				    x | LK_EXCLUSIVE_WAITERS)) {
902 					sleepq_release(&lk->lock_object);
903 					continue;
904 				}
905 				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
906 				    __func__, lk);
907 			}
908 
909 			/*
910 			 * As far as we have been unable to acquire the
911 			 * exclusive lock and the exclusive waiters flag
912 			 * is set, we will sleep.
913 			 */
914 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
915 			    SQ_EXCLUSIVE_QUEUE);
916 			flags &= ~LK_INTERLOCK;
917 			if (error) {
918 				LOCK_LOG3(lk,
919 				    "%s: interrupted sleep for %p with %d",
920 				    __func__, lk, error);
921 				break;
922 			}
923 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
924 			    __func__, lk);
925 		}
926 		if (error == 0) {
927 			lock_profile_obtain_lock_success(&lk->lock_object,
928 			    contested, waittime, file, line);
929 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
930 			    lk->lk_recurse, file, line);
931 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
932 			    LK_TRYWIT(flags), file, line);
933 			TD_LOCKS_INC(curthread);
934 			STACK_SAVE(lk);
935 		}
936 		break;
937 	case LK_DOWNGRADE:
938 		_lockmgr_assert(lk, KA_XLOCKED, file, line);
939 		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
940 		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
941 
942 		/*
943 		 * Panic if the lock is recursed.
944 		 */
945 		if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
946 			if (flags & LK_INTERLOCK)
947 				class->lc_unlock(ilk);
948 			panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
949 			    __func__, iwmesg, file, line);
950 		}
951 		TD_SLOCKS_INC(curthread);
952 
953 		/*
954 		 * In order to preserve waiters flags, just spin.
955 		 */
956 		for (;;) {
957 			x = lk->lk_lock;
958 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
959 			x &= LK_ALL_WAITERS;
960 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
961 			    LK_SHARERS_LOCK(1) | x))
962 				break;
963 			cpu_spinwait();
964 		}
965 		break;
966 	case LK_RELEASE:
967 		_lockmgr_assert(lk, KA_LOCKED, file, line);
968 		x = lk->lk_lock;
969 
970 		if ((x & LK_SHARE) == 0) {
971 
972 			/*
973 			 * As first option, treact the lock as if it has not
974 			 * any waiter.
975 			 * Fix-up the tid var if the lock has been disowned.
976 			 */
977 			if (LK_HOLDER(x) == LK_KERNPROC)
978 				tid = LK_KERNPROC;
979 			else {
980 				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
981 				    file, line);
982 				TD_LOCKS_DEC(curthread);
983 			}
984 			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
985 			    lk->lk_recurse, file, line);
986 
987 			/*
988 			 * The lock is held in exclusive mode.
989 			 * If the lock is recursed also, then unrecurse it.
990 			 */
991 			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
992 				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
993 				    lk);
994 				lk->lk_recurse--;
995 				break;
996 			}
997 			if (tid != LK_KERNPROC)
998 				lock_profile_release_lock(&lk->lock_object);
999 
1000 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
1001 			    LK_UNLOCKED))
1002 				break;
1003 
1004 			sleepq_lock(&lk->lock_object);
1005 			x = lk->lk_lock;
1006 			v = LK_UNLOCKED;
1007 
1008 			/*
1009 		 	 * If the lock has exclusive waiters, give them
1010 			 * preference in order to avoid deadlock with
1011 			 * shared runners up.
1012 			 * If interruptible sleeps left the exclusive queue
1013 			 * empty avoid a starvation for the threads sleeping
1014 			 * on the shared queue by giving them precedence
1015 			 * and cleaning up the exclusive waiters bit anyway.
1016 			 * Please note that lk_exslpfail count may be lying
1017 			 * about the real number of waiters with the
1018 			 * LK_SLEEPFAIL flag on because they may be used in
1019 			 * conjuction with interruptible sleeps so
1020 			 * lk_exslpfail might be considered an 'upper limit'
1021 			 * bound, including the edge cases.
1022 			 */
1023 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1024 			realexslp = sleepq_sleepcnt(&lk->lock_object,
1025 			    SQ_EXCLUSIVE_QUEUE);
1026 			if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1027 				if (lk->lk_exslpfail < realexslp) {
1028 					lk->lk_exslpfail = 0;
1029 					queue = SQ_EXCLUSIVE_QUEUE;
1030 					v |= (x & LK_SHARED_WAITERS);
1031 				} else {
1032 					lk->lk_exslpfail = 0;
1033 					LOCK_LOG2(lk,
1034 					"%s: %p has only LK_SLEEPFAIL sleepers",
1035 					    __func__, lk);
1036 					LOCK_LOG2(lk,
1037 			"%s: %p waking up threads on the exclusive queue",
1038 					    __func__, lk);
1039 					wakeup_swapper =
1040 					    sleepq_broadcast(&lk->lock_object,
1041 					    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1042 					queue = SQ_SHARED_QUEUE;
1043 				}
1044 			} else {
1045 
1046 				/*
1047 				 * Exclusive waiters sleeping with LK_SLEEPFAIL
1048 				 * on and using interruptible sleeps/timeout
1049 				 * may have left spourious lk_exslpfail counts
1050 				 * on, so clean it up anyway.
1051 				 */
1052 				lk->lk_exslpfail = 0;
1053 				queue = SQ_SHARED_QUEUE;
1054 			}
1055 
1056 			LOCK_LOG3(lk,
1057 			    "%s: %p waking up threads on the %s queue",
1058 			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1059 			    "exclusive");
1060 			atomic_store_rel_ptr(&lk->lk_lock, v);
1061 			wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1062 			    SLEEPQ_LK, 0, queue);
1063 			sleepq_release(&lk->lock_object);
1064 			break;
1065 		} else
1066 			wakeup_swapper = wakeupshlk(lk, file, line);
1067 		break;
1068 	case LK_DRAIN:
1069 		if (LK_CAN_WITNESS(flags))
1070 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1071 			    LOP_EXCLUSIVE, file, line, ilk);
1072 
1073 		/*
1074 		 * Trying to drain a lock we already own will result in a
1075 		 * deadlock.
1076 		 */
1077 		if (lockmgr_xlocked(lk)) {
1078 			if (flags & LK_INTERLOCK)
1079 				class->lc_unlock(ilk);
1080 			panic("%s: draining %s with the lock held @ %s:%d\n",
1081 			    __func__, iwmesg, file, line);
1082 		}
1083 
1084 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1085 #ifdef HWPMC_HOOKS
1086 			PMC_SOFT_CALL( , , lock, failed);
1087 #endif
1088 			lock_profile_obtain_lock_failed(&lk->lock_object,
1089 			    &contested, &waittime);
1090 
1091 			/*
1092 			 * If the lock is expected to not sleep just give up
1093 			 * and return.
1094 			 */
1095 			if (LK_TRYOP(flags)) {
1096 				LOCK_LOG2(lk, "%s: %p fails the try operation",
1097 				    __func__, lk);
1098 				error = EBUSY;
1099 				break;
1100 			}
1101 
1102 			/*
1103 			 * Acquire the sleepqueue chain lock because we
1104 			 * probabilly will need to manipulate waiters flags.
1105 			 */
1106 			sleepq_lock(&lk->lock_object);
1107 			x = lk->lk_lock;
1108 
1109 			/*
1110 			 * if the lock has been released while we spun on
1111 			 * the sleepqueue chain lock just try again.
1112 			 */
1113 			if (x == LK_UNLOCKED) {
1114 				sleepq_release(&lk->lock_object);
1115 				continue;
1116 			}
1117 
1118 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1119 			if ((x & ~v) == LK_UNLOCKED) {
1120 				v = (x & ~LK_EXCLUSIVE_SPINNERS);
1121 
1122 				/*
1123 				 * If interruptible sleeps left the exclusive
1124 				 * queue empty avoid a starvation for the
1125 				 * threads sleeping on the shared queue by
1126 				 * giving them precedence and cleaning up the
1127 				 * exclusive waiters bit anyway.
1128 				 * Please note that lk_exslpfail count may be
1129 				 * lying about the real number of waiters with
1130 				 * the LK_SLEEPFAIL flag on because they may
1131 				 * be used in conjuction with interruptible
1132 				 * sleeps so lk_exslpfail might be considered
1133 				 * an 'upper limit' bound, including the edge
1134 				 * cases.
1135 				 */
1136 				if (v & LK_EXCLUSIVE_WAITERS) {
1137 					queue = SQ_EXCLUSIVE_QUEUE;
1138 					v &= ~LK_EXCLUSIVE_WAITERS;
1139 				} else {
1140 
1141 					/*
1142 					 * Exclusive waiters sleeping with
1143 					 * LK_SLEEPFAIL on and using
1144 					 * interruptible sleeps/timeout may
1145 					 * have left spourious lk_exslpfail
1146 					 * counts on, so clean it up anyway.
1147 					 */
1148 					MPASS(v & LK_SHARED_WAITERS);
1149 					lk->lk_exslpfail = 0;
1150 					queue = SQ_SHARED_QUEUE;
1151 					v &= ~LK_SHARED_WAITERS;
1152 				}
1153 				if (queue == SQ_EXCLUSIVE_QUEUE) {
1154 					realexslp =
1155 					    sleepq_sleepcnt(&lk->lock_object,
1156 					    SQ_EXCLUSIVE_QUEUE);
1157 					if (lk->lk_exslpfail >= realexslp) {
1158 						lk->lk_exslpfail = 0;
1159 						queue = SQ_SHARED_QUEUE;
1160 						v &= ~LK_SHARED_WAITERS;
1161 						if (realexslp != 0) {
1162 							LOCK_LOG2(lk,
1163 					"%s: %p has only LK_SLEEPFAIL sleepers",
1164 							    __func__, lk);
1165 							LOCK_LOG2(lk,
1166 			"%s: %p waking up threads on the exclusive queue",
1167 							    __func__, lk);
1168 							wakeup_swapper =
1169 							    sleepq_broadcast(
1170 							    &lk->lock_object,
1171 							    SLEEPQ_LK, 0,
1172 							    SQ_EXCLUSIVE_QUEUE);
1173 						}
1174 					} else
1175 						lk->lk_exslpfail = 0;
1176 				}
1177 				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1178 					sleepq_release(&lk->lock_object);
1179 					continue;
1180 				}
1181 				LOCK_LOG3(lk,
1182 				"%s: %p waking up all threads on the %s queue",
1183 				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1184 				    "shared" : "exclusive");
1185 				wakeup_swapper |= sleepq_broadcast(
1186 				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1187 
1188 				/*
1189 				 * If shared waiters have been woken up we need
1190 				 * to wait for one of them to acquire the lock
1191 				 * before to set the exclusive waiters in
1192 				 * order to avoid a deadlock.
1193 				 */
1194 				if (queue == SQ_SHARED_QUEUE) {
1195 					for (v = lk->lk_lock;
1196 					    (v & LK_SHARE) && !LK_SHARERS(v);
1197 					    v = lk->lk_lock)
1198 						cpu_spinwait();
1199 				}
1200 			}
1201 
1202 			/*
1203 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1204 			 * fail, loop back and retry.
1205 			 */
1206 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1207 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1208 				    x | LK_EXCLUSIVE_WAITERS)) {
1209 					sleepq_release(&lk->lock_object);
1210 					continue;
1211 				}
1212 				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1213 				    __func__, lk);
1214 			}
1215 
1216 			/*
1217 			 * As far as we have been unable to acquire the
1218 			 * exclusive lock and the exclusive waiters flag
1219 			 * is set, we will sleep.
1220 			 */
1221 			if (flags & LK_INTERLOCK) {
1222 				class->lc_unlock(ilk);
1223 				flags &= ~LK_INTERLOCK;
1224 			}
1225 			GIANT_SAVE();
1226 			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1227 			    SQ_EXCLUSIVE_QUEUE);
1228 			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1229 			GIANT_RESTORE();
1230 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1231 			    __func__, lk);
1232 		}
1233 
1234 		if (error == 0) {
1235 			lock_profile_obtain_lock_success(&lk->lock_object,
1236 			    contested, waittime, file, line);
1237 			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1238 			    lk->lk_recurse, file, line);
1239 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1240 			    LK_TRYWIT(flags), file, line);
1241 			TD_LOCKS_INC(curthread);
1242 			STACK_SAVE(lk);
1243 		}
1244 		break;
1245 	default:
1246 		if (flags & LK_INTERLOCK)
1247 			class->lc_unlock(ilk);
1248 		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1249 	}
1250 
1251 	if (flags & LK_INTERLOCK)
1252 		class->lc_unlock(ilk);
1253 	if (wakeup_swapper)
1254 		kick_proc0();
1255 
1256 	return (error);
1257 }
1258 
1259 void
1260 _lockmgr_disown(struct lock *lk, const char *file, int line)
1261 {
1262 	uintptr_t tid, x;
1263 
1264 	if (SCHEDULER_STOPPED())
1265 		return;
1266 
1267 	tid = (uintptr_t)curthread;
1268 	_lockmgr_assert(lk, KA_XLOCKED, file, line);
1269 
1270 	/*
1271 	 * Panic if the lock is recursed.
1272 	 */
1273 	if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1274 		panic("%s: disown a recursed lockmgr @ %s:%d\n",
1275 		    __func__,  file, line);
1276 
1277 	/*
1278 	 * If the owner is already LK_KERNPROC just skip the whole operation.
1279 	 */
1280 	if (LK_HOLDER(lk->lk_lock) != tid)
1281 		return;
1282 	lock_profile_release_lock(&lk->lock_object);
1283 	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1284 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1285 	TD_LOCKS_DEC(curthread);
1286 	STACK_SAVE(lk);
1287 
1288 	/*
1289 	 * In order to preserve waiters flags, just spin.
1290 	 */
1291 	for (;;) {
1292 		x = lk->lk_lock;
1293 		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1294 		x &= LK_ALL_WAITERS;
1295 		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1296 		    LK_KERNPROC | x))
1297 			return;
1298 		cpu_spinwait();
1299 	}
1300 }
1301 
1302 void
1303 lockmgr_printinfo(const struct lock *lk)
1304 {
1305 	struct thread *td;
1306 	uintptr_t x;
1307 
1308 	if (lk->lk_lock == LK_UNLOCKED)
1309 		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1310 	else if (lk->lk_lock & LK_SHARE)
1311 		printf("lock type %s: SHARED (count %ju)\n",
1312 		    lk->lock_object.lo_name,
1313 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1314 	else {
1315 		td = lockmgr_xholder(lk);
1316 		printf("lock type %s: EXCL by thread %p "
1317 		    "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, td,
1318 		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_tid);
1319 	}
1320 
1321 	x = lk->lk_lock;
1322 	if (x & LK_EXCLUSIVE_WAITERS)
1323 		printf(" with exclusive waiters pending\n");
1324 	if (x & LK_SHARED_WAITERS)
1325 		printf(" with shared waiters pending\n");
1326 	if (x & LK_EXCLUSIVE_SPINNERS)
1327 		printf(" with exclusive spinners pending\n");
1328 
1329 	STACK_PRINT(lk);
1330 }
1331 
1332 int
1333 lockstatus(const struct lock *lk)
1334 {
1335 	uintptr_t v, x;
1336 	int ret;
1337 
1338 	ret = LK_SHARED;
1339 	x = lk->lk_lock;
1340 	v = LK_HOLDER(x);
1341 
1342 	if ((x & LK_SHARE) == 0) {
1343 		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1344 			ret = LK_EXCLUSIVE;
1345 		else
1346 			ret = LK_EXCLOTHER;
1347 	} else if (x == LK_UNLOCKED)
1348 		ret = 0;
1349 
1350 	return (ret);
1351 }
1352 
1353 #ifdef INVARIANT_SUPPORT
1354 
1355 FEATURE(invariant_support,
1356     "Support for modules compiled with INVARIANTS option");
1357 
1358 #ifndef INVARIANTS
1359 #undef	_lockmgr_assert
1360 #endif
1361 
1362 void
1363 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1364 {
1365 	int slocked = 0;
1366 
1367 	if (panicstr != NULL)
1368 		return;
1369 	switch (what) {
1370 	case KA_SLOCKED:
1371 	case KA_SLOCKED | KA_NOTRECURSED:
1372 	case KA_SLOCKED | KA_RECURSED:
1373 		slocked = 1;
1374 	case KA_LOCKED:
1375 	case KA_LOCKED | KA_NOTRECURSED:
1376 	case KA_LOCKED | KA_RECURSED:
1377 #ifdef WITNESS
1378 
1379 		/*
1380 		 * We cannot trust WITNESS if the lock is held in exclusive
1381 		 * mode and a call to lockmgr_disown() happened.
1382 		 * Workaround this skipping the check if the lock is held in
1383 		 * exclusive mode even for the KA_LOCKED case.
1384 		 */
1385 		if (slocked || (lk->lk_lock & LK_SHARE)) {
1386 			witness_assert(&lk->lock_object, what, file, line);
1387 			break;
1388 		}
1389 #endif
1390 		if (lk->lk_lock == LK_UNLOCKED ||
1391 		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1392 		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1393 			panic("Lock %s not %slocked @ %s:%d\n",
1394 			    lk->lock_object.lo_name, slocked ? "share" : "",
1395 			    file, line);
1396 
1397 		if ((lk->lk_lock & LK_SHARE) == 0) {
1398 			if (lockmgr_recursed(lk)) {
1399 				if (what & KA_NOTRECURSED)
1400 					panic("Lock %s recursed @ %s:%d\n",
1401 					    lk->lock_object.lo_name, file,
1402 					    line);
1403 			} else if (what & KA_RECURSED)
1404 				panic("Lock %s not recursed @ %s:%d\n",
1405 				    lk->lock_object.lo_name, file, line);
1406 		}
1407 		break;
1408 	case KA_XLOCKED:
1409 	case KA_XLOCKED | KA_NOTRECURSED:
1410 	case KA_XLOCKED | KA_RECURSED:
1411 		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1412 			panic("Lock %s not exclusively locked @ %s:%d\n",
1413 			    lk->lock_object.lo_name, file, line);
1414 		if (lockmgr_recursed(lk)) {
1415 			if (what & KA_NOTRECURSED)
1416 				panic("Lock %s recursed @ %s:%d\n",
1417 				    lk->lock_object.lo_name, file, line);
1418 		} else if (what & KA_RECURSED)
1419 			panic("Lock %s not recursed @ %s:%d\n",
1420 			    lk->lock_object.lo_name, file, line);
1421 		break;
1422 	case KA_UNLOCKED:
1423 		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1424 			panic("Lock %s exclusively locked @ %s:%d\n",
1425 			    lk->lock_object.lo_name, file, line);
1426 		break;
1427 	default:
1428 		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1429 		    line);
1430 	}
1431 }
1432 #endif
1433 
1434 #ifdef DDB
1435 int
1436 lockmgr_chain(struct thread *td, struct thread **ownerp)
1437 {
1438 	struct lock *lk;
1439 
1440 	lk = td->td_wchan;
1441 
1442 	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1443 		return (0);
1444 	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1445 	if (lk->lk_lock & LK_SHARE)
1446 		db_printf("SHARED (count %ju)\n",
1447 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1448 	else
1449 		db_printf("EXCL\n");
1450 	*ownerp = lockmgr_xholder(lk);
1451 
1452 	return (1);
1453 }
1454 
1455 static void
1456 db_show_lockmgr(const struct lock_object *lock)
1457 {
1458 	struct thread *td;
1459 	const struct lock *lk;
1460 
1461 	lk = (const struct lock *)lock;
1462 
1463 	db_printf(" state: ");
1464 	if (lk->lk_lock == LK_UNLOCKED)
1465 		db_printf("UNLOCKED\n");
1466 	else if (lk->lk_lock & LK_SHARE)
1467 		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1468 	else {
1469 		td = lockmgr_xholder(lk);
1470 		if (td == (struct thread *)LK_KERNPROC)
1471 			db_printf("XLOCK: LK_KERNPROC\n");
1472 		else
1473 			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1474 			    td->td_tid, td->td_proc->p_pid,
1475 			    td->td_proc->p_comm);
1476 		if (lockmgr_recursed(lk))
1477 			db_printf(" recursed: %d\n", lk->lk_recurse);
1478 	}
1479 	db_printf(" waiters: ");
1480 	switch (lk->lk_lock & LK_ALL_WAITERS) {
1481 	case LK_SHARED_WAITERS:
1482 		db_printf("shared\n");
1483 		break;
1484 	case LK_EXCLUSIVE_WAITERS:
1485 		db_printf("exclusive\n");
1486 		break;
1487 	case LK_ALL_WAITERS:
1488 		db_printf("shared and exclusive\n");
1489 		break;
1490 	default:
1491 		db_printf("none\n");
1492 	}
1493 	db_printf(" spinners: ");
1494 	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1495 		db_printf("exclusive\n");
1496 	else
1497 		db_printf("none\n");
1498 }
1499 #endif
1500