xref: /freebsd/sys/kern/kern_lock.c (revision 36daf0495aa68d669ac6abf004940ec1b1e83e42)
1 /*-
2  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  */
28 
29 #include "opt_adaptive_lockmgrs.h"
30 #include "opt_ddb.h"
31 #include "opt_kdtrace.h"
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/ktr.h>
38 #include <sys/lock.h>
39 #include <sys/lock_profile.h>
40 #include <sys/lockmgr.h>
41 #include <sys/mutex.h>
42 #include <sys/proc.h>
43 #include <sys/sleepqueue.h>
44 #ifdef DEBUG_LOCKS
45 #include <sys/stack.h>
46 #endif
47 #include <sys/sysctl.h>
48 #include <sys/systm.h>
49 
50 #include <machine/cpu.h>
51 
52 #ifdef DDB
53 #include <ddb/ddb.h>
54 #endif
55 
56 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
57     (LK_ADAPTIVE | LK_NOSHARE));
58 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
59     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
60 
61 #define	SQ_EXCLUSIVE_QUEUE	0
62 #define	SQ_SHARED_QUEUE		1
63 
64 #ifndef INVARIANTS
65 #define	_lockmgr_assert(lk, what, file, line)
66 #define	TD_LOCKS_INC(td)
67 #define	TD_LOCKS_DEC(td)
68 #else
69 #define	TD_LOCKS_INC(td)	((td)->td_locks++)
70 #define	TD_LOCKS_DEC(td)	((td)->td_locks--)
71 #endif
72 #define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
73 #define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
74 
75 #ifndef DEBUG_LOCKS
76 #define	STACK_PRINT(lk)
77 #define	STACK_SAVE(lk)
78 #define	STACK_ZERO(lk)
79 #else
80 #define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
81 #define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
82 #define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
83 #endif
84 
85 #define	LOCK_LOG2(lk, string, arg1, arg2)				\
86 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
87 		CTR2(KTR_LOCK, (string), (arg1), (arg2))
88 #define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
89 	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
90 		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
91 
92 #define	GIANT_DECLARE							\
93 	int _i = 0;							\
94 	WITNESS_SAVE_DECL(Giant)
95 #define	GIANT_RESTORE() do {						\
96 	if (_i > 0) {							\
97 		while (_i--)						\
98 			mtx_lock(&Giant);				\
99 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
100 	}								\
101 } while (0)
102 #define	GIANT_SAVE() do {						\
103 	if (mtx_owned(&Giant)) {					\
104 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
105 		while (mtx_owned(&Giant)) {				\
106 			_i++;						\
107 			mtx_unlock(&Giant);				\
108 		}							\
109 	}								\
110 } while (0)
111 
112 #define	LK_CAN_SHARE(x)							\
113 	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
114 	((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||				\
115 	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
116 #define	LK_TRYOP(x)							\
117 	((x) & LK_NOWAIT)
118 
119 #define	LK_CAN_WITNESS(x)						\
120 	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
121 #define	LK_TRYWIT(x)							\
122 	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
123 
124 #define	LK_CAN_ADAPT(lk, f)						\
125 	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
126 	((f) & LK_SLEEPFAIL) == 0)
127 
128 #define	lockmgr_disowned(lk)						\
129 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
130 
131 #define	lockmgr_xlocked(lk)						\
132 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
133 
134 static void	 assert_lockmgr(struct lock_object *lock, int how);
135 #ifdef DDB
136 static void	 db_show_lockmgr(struct lock_object *lock);
137 #endif
138 static void	 lock_lockmgr(struct lock_object *lock, int how);
139 #ifdef KDTRACE_HOOKS
140 static int	 owner_lockmgr(struct lock_object *lock, struct thread **owner);
141 #endif
142 static int	 unlock_lockmgr(struct lock_object *lock);
143 
144 struct lock_class lock_class_lockmgr = {
145 	.lc_name = "lockmgr",
146 	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
147 	.lc_assert = assert_lockmgr,
148 #ifdef DDB
149 	.lc_ddb_show = db_show_lockmgr,
150 #endif
151 	.lc_lock = lock_lockmgr,
152 	.lc_unlock = unlock_lockmgr,
153 #ifdef KDTRACE_HOOKS
154 	.lc_owner = owner_lockmgr,
155 #endif
156 };
157 
158 #ifdef ADAPTIVE_LOCKMGRS
159 static u_int alk_retries = 10;
160 static u_int alk_loops = 10000;
161 SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
162 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
163 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
164 #endif
165 
166 static __inline struct thread *
167 lockmgr_xholder(struct lock *lk)
168 {
169 	uintptr_t x;
170 
171 	x = lk->lk_lock;
172 	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
173 }
174 
175 /*
176  * It assumes sleepq_lock held and returns with this one unheld.
177  * It also assumes the generic interlock is sane and previously checked.
178  * If LK_INTERLOCK is specified the interlock is not reacquired after the
179  * sleep.
180  */
181 static __inline int
182 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
183     const char *wmesg, int pri, int timo, int queue)
184 {
185 	GIANT_DECLARE;
186 	struct lock_class *class;
187 	int catch, error;
188 
189 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
190 	catch = pri & PCATCH;
191 	pri &= PRIMASK;
192 	error = 0;
193 
194 	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
195 	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
196 
197 	if (flags & LK_INTERLOCK)
198 		class->lc_unlock(ilk);
199 	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
200 		lk->lk_exslpfail++;
201 	GIANT_SAVE();
202 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
203 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
204 	if ((flags & LK_TIMELOCK) && timo)
205 		sleepq_set_timeout(&lk->lock_object, timo);
206 
207 	/*
208 	 * Decisional switch for real sleeping.
209 	 */
210 	if ((flags & LK_TIMELOCK) && timo && catch)
211 		error = sleepq_timedwait_sig(&lk->lock_object, pri);
212 	else if ((flags & LK_TIMELOCK) && timo)
213 		error = sleepq_timedwait(&lk->lock_object, pri);
214 	else if (catch)
215 		error = sleepq_wait_sig(&lk->lock_object, pri);
216 	else
217 		sleepq_wait(&lk->lock_object, pri);
218 	GIANT_RESTORE();
219 	if ((flags & LK_SLEEPFAIL) && error == 0)
220 		error = ENOLCK;
221 
222 	return (error);
223 }
224 
225 static __inline int
226 wakeupshlk(struct lock *lk, const char *file, int line)
227 {
228 	uintptr_t v, x;
229 	u_int realexslp;
230 	int queue, wakeup_swapper;
231 
232 	TD_LOCKS_DEC(curthread);
233 	TD_SLOCKS_DEC(curthread);
234 	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
235 	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
236 
237 	wakeup_swapper = 0;
238 	for (;;) {
239 		x = lk->lk_lock;
240 
241 		/*
242 		 * If there is more than one shared lock held, just drop one
243 		 * and return.
244 		 */
245 		if (LK_SHARERS(x) > 1) {
246 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
247 			    x - LK_ONE_SHARER))
248 				break;
249 			continue;
250 		}
251 
252 		/*
253 		 * If there are not waiters on the exclusive queue, drop the
254 		 * lock quickly.
255 		 */
256 		if ((x & LK_ALL_WAITERS) == 0) {
257 			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
258 			    LK_SHARERS_LOCK(1));
259 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
260 				break;
261 			continue;
262 		}
263 
264 		/*
265 		 * We should have a sharer with waiters, so enter the hard
266 		 * path in order to handle wakeups correctly.
267 		 */
268 		sleepq_lock(&lk->lock_object);
269 		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
270 		v = LK_UNLOCKED;
271 
272 		/*
273 		 * If the lock has exclusive waiters, give them preference in
274 		 * order to avoid deadlock with shared runners up.
275 		 * If interruptible sleeps left the exclusive queue empty
276 		 * avoid a starvation for the threads sleeping on the shared
277 		 * queue by giving them precedence and cleaning up the
278 		 * exclusive waiters bit anyway.
279 		 * Please note that lk_exslpfail count may be lying about
280 		 * the real number of waiters with the LK_SLEEPFAIL flag on
281 		 * because they may be used in conjuction with interruptible
282 		 * sleeps so lk_exslpfail might be considered an 'upper limit'
283 		 * bound, including the edge cases.
284 		 */
285 		realexslp = sleepq_sleepcnt(&lk->lock_object,
286 		    SQ_EXCLUSIVE_QUEUE);
287 		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
288 			if (lk->lk_exslpfail < realexslp) {
289 				lk->lk_exslpfail = 0;
290 				queue = SQ_EXCLUSIVE_QUEUE;
291 				v |= (x & LK_SHARED_WAITERS);
292 			} else {
293 				lk->lk_exslpfail = 0;
294 				LOCK_LOG2(lk,
295 				    "%s: %p has only LK_SLEEPFAIL sleepers",
296 				    __func__, lk);
297 				LOCK_LOG2(lk,
298 			    "%s: %p waking up threads on the exclusive queue",
299 				    __func__, lk);
300 				wakeup_swapper =
301 				    sleepq_broadcast(&lk->lock_object,
302 				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
303 				queue = SQ_SHARED_QUEUE;
304 			}
305 
306 		} else {
307 
308 			/*
309 			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
310 			 * and using interruptible sleeps/timeout may have
311 			 * left spourious lk_exslpfail counts on, so clean
312 			 * it up anyway.
313 			 */
314 			lk->lk_exslpfail = 0;
315 			queue = SQ_SHARED_QUEUE;
316 		}
317 
318 		if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
319 		    v)) {
320 			sleepq_release(&lk->lock_object);
321 			continue;
322 		}
323 		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
324 		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
325 		    "exclusive");
326 		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
327 		    0, queue);
328 		sleepq_release(&lk->lock_object);
329 		break;
330 	}
331 
332 	lock_profile_release_lock(&lk->lock_object);
333 	return (wakeup_swapper);
334 }
335 
336 static void
337 assert_lockmgr(struct lock_object *lock, int what)
338 {
339 
340 	panic("lockmgr locks do not support assertions");
341 }
342 
343 static void
344 lock_lockmgr(struct lock_object *lock, int how)
345 {
346 
347 	panic("lockmgr locks do not support sleep interlocking");
348 }
349 
350 static int
351 unlock_lockmgr(struct lock_object *lock)
352 {
353 
354 	panic("lockmgr locks do not support sleep interlocking");
355 }
356 
357 #ifdef KDTRACE_HOOKS
358 static int
359 owner_lockmgr(struct lock_object *lock, struct thread **owner)
360 {
361 
362 	panic("lockmgr locks do not support owner inquiring");
363 }
364 #endif
365 
366 void
367 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
368 {
369 	int iflags;
370 
371 	MPASS((flags & ~LK_INIT_MASK) == 0);
372 	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
373             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
374             &lk->lk_lock));
375 
376 	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
377 	if (flags & LK_CANRECURSE)
378 		iflags |= LO_RECURSABLE;
379 	if ((flags & LK_NODUP) == 0)
380 		iflags |= LO_DUPOK;
381 	if (flags & LK_NOPROFILE)
382 		iflags |= LO_NOPROFILE;
383 	if ((flags & LK_NOWITNESS) == 0)
384 		iflags |= LO_WITNESS;
385 	if (flags & LK_QUIET)
386 		iflags |= LO_QUIET;
387 	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
388 
389 	lk->lk_lock = LK_UNLOCKED;
390 	lk->lk_recurse = 0;
391 	lk->lk_exslpfail = 0;
392 	lk->lk_timo = timo;
393 	lk->lk_pri = pri;
394 	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
395 	STACK_ZERO(lk);
396 }
397 
398 /*
399  * XXX: Gross hacks to manipulate external lock flags after
400  * initialization.  Used for certain vnode and buf locks.
401  */
402 void
403 lockallowshare(struct lock *lk)
404 {
405 
406 	lockmgr_assert(lk, KA_XLOCKED);
407 	lk->lock_object.lo_flags &= ~LK_NOSHARE;
408 }
409 
410 void
411 lockallowrecurse(struct lock *lk)
412 {
413 
414 	lockmgr_assert(lk, KA_XLOCKED);
415 	lk->lock_object.lo_flags |= LO_RECURSABLE;
416 }
417 
418 void
419 lockdisablerecurse(struct lock *lk)
420 {
421 
422 	lockmgr_assert(lk, KA_XLOCKED);
423 	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
424 }
425 
426 void
427 lockdestroy(struct lock *lk)
428 {
429 
430 	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
431 	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
432 	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
433 	lock_destroy(&lk->lock_object);
434 }
435 
436 int
437 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
438     const char *wmesg, int pri, int timo, const char *file, int line)
439 {
440 	GIANT_DECLARE;
441 	struct lock_class *class;
442 	const char *iwmesg;
443 	uintptr_t tid, v, x;
444 	u_int op, realexslp;
445 	int error, ipri, itimo, queue, wakeup_swapper;
446 #ifdef LOCK_PROFILING
447 	uint64_t waittime = 0;
448 	int contested = 0;
449 #endif
450 #ifdef ADAPTIVE_LOCKMGRS
451 	volatile struct thread *owner;
452 	u_int i, spintries = 0;
453 #endif
454 
455 	error = 0;
456 	tid = (uintptr_t)curthread;
457 	op = (flags & LK_TYPE_MASK);
458 	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
459 	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
460 	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
461 
462 	MPASS((flags & ~LK_TOTAL_MASK) == 0);
463 	KASSERT((op & (op - 1)) == 0,
464 	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
465 	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
466 	    (op != LK_DOWNGRADE && op != LK_RELEASE),
467 	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
468 	    __func__, file, line));
469 	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
470 	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
471 	    __func__, file, line));
472 
473 	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
474 	if (panicstr != NULL) {
475 		if (flags & LK_INTERLOCK)
476 			class->lc_unlock(ilk);
477 		return (0);
478 	}
479 
480 	if (lk->lock_object.lo_flags & LK_NOSHARE) {
481 		switch (op) {
482 		case LK_SHARED:
483 			op = LK_EXCLUSIVE;
484 			break;
485 		case LK_UPGRADE:
486 		case LK_DOWNGRADE:
487 			_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
488 			    file, line);
489 			return (0);
490 		}
491 	}
492 
493 	wakeup_swapper = 0;
494 	switch (op) {
495 	case LK_SHARED:
496 		if (LK_CAN_WITNESS(flags))
497 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
498 			    file, line, ilk);
499 		for (;;) {
500 			x = lk->lk_lock;
501 
502 			/*
503 			 * If no other thread has an exclusive lock, or
504 			 * no exclusive waiter is present, bump the count of
505 			 * sharers.  Since we have to preserve the state of
506 			 * waiters, if we fail to acquire the shared lock
507 			 * loop back and retry.
508 			 */
509 			if (LK_CAN_SHARE(x)) {
510 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
511 				    x + LK_ONE_SHARER))
512 					break;
513 				continue;
514 			}
515 			lock_profile_obtain_lock_failed(&lk->lock_object,
516 			    &contested, &waittime);
517 
518 			/*
519 			 * If the lock is already held by curthread in
520 			 * exclusive way avoid a deadlock.
521 			 */
522 			if (LK_HOLDER(x) == tid) {
523 				LOCK_LOG2(lk,
524 				    "%s: %p already held in exclusive mode",
525 				    __func__, lk);
526 				error = EDEADLK;
527 				break;
528 			}
529 
530 			/*
531 			 * If the lock is expected to not sleep just give up
532 			 * and return.
533 			 */
534 			if (LK_TRYOP(flags)) {
535 				LOCK_LOG2(lk, "%s: %p fails the try operation",
536 				    __func__, lk);
537 				error = EBUSY;
538 				break;
539 			}
540 
541 #ifdef ADAPTIVE_LOCKMGRS
542 			/*
543 			 * If the owner is running on another CPU, spin until
544 			 * the owner stops running or the state of the lock
545 			 * changes.  We need a double-state handle here
546 			 * because for a failed acquisition the lock can be
547 			 * either held in exclusive mode or shared mode
548 			 * (for the writer starvation avoidance technique).
549 			 */
550 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
551 			    LK_HOLDER(x) != LK_KERNPROC) {
552 				owner = (struct thread *)LK_HOLDER(x);
553 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
554 					CTR3(KTR_LOCK,
555 					    "%s: spinning on %p held by %p",
556 					    __func__, lk, owner);
557 
558 				/*
559 				 * If we are holding also an interlock drop it
560 				 * in order to avoid a deadlock if the lockmgr
561 				 * owner is adaptively spinning on the
562 				 * interlock itself.
563 				 */
564 				if (flags & LK_INTERLOCK) {
565 					class->lc_unlock(ilk);
566 					flags &= ~LK_INTERLOCK;
567 				}
568 				GIANT_SAVE();
569 				while (LK_HOLDER(lk->lk_lock) ==
570 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
571 					cpu_spinwait();
572 				GIANT_RESTORE();
573 				continue;
574 			} else if (LK_CAN_ADAPT(lk, flags) &&
575 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
576 			    spintries < alk_retries) {
577 				if (flags & LK_INTERLOCK) {
578 					class->lc_unlock(ilk);
579 					flags &= ~LK_INTERLOCK;
580 				}
581 				GIANT_SAVE();
582 				spintries++;
583 				for (i = 0; i < alk_loops; i++) {
584 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
585 						CTR4(KTR_LOCK,
586 				    "%s: shared spinning on %p with %u and %u",
587 						    __func__, lk, spintries, i);
588 					x = lk->lk_lock;
589 					if ((x & LK_SHARE) == 0 ||
590 					    LK_CAN_SHARE(x) != 0)
591 						break;
592 					cpu_spinwait();
593 				}
594 				GIANT_RESTORE();
595 				if (i != alk_loops)
596 					continue;
597 			}
598 #endif
599 
600 			/*
601 			 * Acquire the sleepqueue chain lock because we
602 			 * probabilly will need to manipulate waiters flags.
603 			 */
604 			sleepq_lock(&lk->lock_object);
605 			x = lk->lk_lock;
606 
607 			/*
608 			 * if the lock can be acquired in shared mode, try
609 			 * again.
610 			 */
611 			if (LK_CAN_SHARE(x)) {
612 				sleepq_release(&lk->lock_object);
613 				continue;
614 			}
615 
616 #ifdef ADAPTIVE_LOCKMGRS
617 			/*
618 			 * The current lock owner might have started executing
619 			 * on another CPU (or the lock could have changed
620 			 * owner) while we were waiting on the turnstile
621 			 * chain lock.  If so, drop the turnstile lock and try
622 			 * again.
623 			 */
624 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
625 			    LK_HOLDER(x) != LK_KERNPROC) {
626 				owner = (struct thread *)LK_HOLDER(x);
627 				if (TD_IS_RUNNING(owner)) {
628 					sleepq_release(&lk->lock_object);
629 					continue;
630 				}
631 			}
632 #endif
633 
634 			/*
635 			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
636 			 * loop back and retry.
637 			 */
638 			if ((x & LK_SHARED_WAITERS) == 0) {
639 				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
640 				    x | LK_SHARED_WAITERS)) {
641 					sleepq_release(&lk->lock_object);
642 					continue;
643 				}
644 				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
645 				    __func__, lk);
646 			}
647 
648 			/*
649 			 * As far as we have been unable to acquire the
650 			 * shared lock and the shared waiters flag is set,
651 			 * we will sleep.
652 			 */
653 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
654 			    SQ_SHARED_QUEUE);
655 			flags &= ~LK_INTERLOCK;
656 			if (error) {
657 				LOCK_LOG3(lk,
658 				    "%s: interrupted sleep for %p with %d",
659 				    __func__, lk, error);
660 				break;
661 			}
662 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
663 			    __func__, lk);
664 		}
665 		if (error == 0) {
666 			lock_profile_obtain_lock_success(&lk->lock_object,
667 			    contested, waittime, file, line);
668 			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
669 			    line);
670 			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
671 			    line);
672 			TD_LOCKS_INC(curthread);
673 			TD_SLOCKS_INC(curthread);
674 			STACK_SAVE(lk);
675 		}
676 		break;
677 	case LK_UPGRADE:
678 		_lockmgr_assert(lk, KA_SLOCKED, file, line);
679 		v = lk->lk_lock;
680 		x = v & LK_ALL_WAITERS;
681 		v &= LK_EXCLUSIVE_SPINNERS;
682 
683 		/*
684 		 * Try to switch from one shared lock to an exclusive one.
685 		 * We need to preserve waiters flags during the operation.
686 		 */
687 		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
688 		    tid | x)) {
689 			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
690 			    line);
691 			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
692 			    LK_TRYWIT(flags), file, line);
693 			TD_SLOCKS_DEC(curthread);
694 			break;
695 		}
696 
697 		/*
698 		 * We have been unable to succeed in upgrading, so just
699 		 * give up the shared lock.
700 		 */
701 		wakeup_swapper |= wakeupshlk(lk, file, line);
702 
703 		/* FALLTHROUGH */
704 	case LK_EXCLUSIVE:
705 		if (LK_CAN_WITNESS(flags))
706 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
707 			    LOP_EXCLUSIVE, file, line, ilk);
708 
709 		/*
710 		 * If curthread already holds the lock and this one is
711 		 * allowed to recurse, simply recurse on it.
712 		 */
713 		if (lockmgr_xlocked(lk)) {
714 			if ((flags & LK_CANRECURSE) == 0 &&
715 			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
716 
717 				/*
718 				 * If the lock is expected to not panic just
719 				 * give up and return.
720 				 */
721 				if (LK_TRYOP(flags)) {
722 					LOCK_LOG2(lk,
723 					    "%s: %p fails the try operation",
724 					    __func__, lk);
725 					error = EBUSY;
726 					break;
727 				}
728 				if (flags & LK_INTERLOCK)
729 					class->lc_unlock(ilk);
730 		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
731 				    __func__, iwmesg, file, line);
732 			}
733 			lk->lk_recurse++;
734 			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
735 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
736 			    lk->lk_recurse, file, line);
737 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
738 			    LK_TRYWIT(flags), file, line);
739 			TD_LOCKS_INC(curthread);
740 			break;
741 		}
742 
743 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
744 		    tid)) {
745 			lock_profile_obtain_lock_failed(&lk->lock_object,
746 			    &contested, &waittime);
747 
748 			/*
749 			 * If the lock is expected to not sleep just give up
750 			 * and return.
751 			 */
752 			if (LK_TRYOP(flags)) {
753 				LOCK_LOG2(lk, "%s: %p fails the try operation",
754 				    __func__, lk);
755 				error = EBUSY;
756 				break;
757 			}
758 
759 #ifdef ADAPTIVE_LOCKMGRS
760 			/*
761 			 * If the owner is running on another CPU, spin until
762 			 * the owner stops running or the state of the lock
763 			 * changes.
764 			 */
765 			x = lk->lk_lock;
766 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
767 			    LK_HOLDER(x) != LK_KERNPROC) {
768 				owner = (struct thread *)LK_HOLDER(x);
769 				if (LOCK_LOG_TEST(&lk->lock_object, 0))
770 					CTR3(KTR_LOCK,
771 					    "%s: spinning on %p held by %p",
772 					    __func__, lk, owner);
773 
774 				/*
775 				 * If we are holding also an interlock drop it
776 				 * in order to avoid a deadlock if the lockmgr
777 				 * owner is adaptively spinning on the
778 				 * interlock itself.
779 				 */
780 				if (flags & LK_INTERLOCK) {
781 					class->lc_unlock(ilk);
782 					flags &= ~LK_INTERLOCK;
783 				}
784 				GIANT_SAVE();
785 				while (LK_HOLDER(lk->lk_lock) ==
786 				    (uintptr_t)owner && TD_IS_RUNNING(owner))
787 					cpu_spinwait();
788 				GIANT_RESTORE();
789 				continue;
790 			} else if (LK_CAN_ADAPT(lk, flags) &&
791 			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
792 			    spintries < alk_retries) {
793 				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
794 				    !atomic_cmpset_ptr(&lk->lk_lock, x,
795 				    x | LK_EXCLUSIVE_SPINNERS))
796 					continue;
797 				if (flags & LK_INTERLOCK) {
798 					class->lc_unlock(ilk);
799 					flags &= ~LK_INTERLOCK;
800 				}
801 				GIANT_SAVE();
802 				spintries++;
803 				for (i = 0; i < alk_loops; i++) {
804 					if (LOCK_LOG_TEST(&lk->lock_object, 0))
805 						CTR4(KTR_LOCK,
806 				    "%s: shared spinning on %p with %u and %u",
807 						    __func__, lk, spintries, i);
808 					if ((lk->lk_lock &
809 					    LK_EXCLUSIVE_SPINNERS) == 0)
810 						break;
811 					cpu_spinwait();
812 				}
813 				GIANT_RESTORE();
814 				if (i != alk_loops)
815 					continue;
816 			}
817 #endif
818 
819 			/*
820 			 * Acquire the sleepqueue chain lock because we
821 			 * probabilly will need to manipulate waiters flags.
822 			 */
823 			sleepq_lock(&lk->lock_object);
824 			x = lk->lk_lock;
825 
826 			/*
827 			 * if the lock has been released while we spun on
828 			 * the sleepqueue chain lock just try again.
829 			 */
830 			if (x == LK_UNLOCKED) {
831 				sleepq_release(&lk->lock_object);
832 				continue;
833 			}
834 
835 #ifdef ADAPTIVE_LOCKMGRS
836 			/*
837 			 * The current lock owner might have started executing
838 			 * on another CPU (or the lock could have changed
839 			 * owner) while we were waiting on the turnstile
840 			 * chain lock.  If so, drop the turnstile lock and try
841 			 * again.
842 			 */
843 			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
844 			    LK_HOLDER(x) != LK_KERNPROC) {
845 				owner = (struct thread *)LK_HOLDER(x);
846 				if (TD_IS_RUNNING(owner)) {
847 					sleepq_release(&lk->lock_object);
848 					continue;
849 				}
850 			}
851 #endif
852 
853 			/*
854 			 * The lock can be in the state where there is a
855 			 * pending queue of waiters, but still no owner.
856 			 * This happens when the lock is contested and an
857 			 * owner is going to claim the lock.
858 			 * If curthread is the one successfully acquiring it
859 			 * claim lock ownership and return, preserving waiters
860 			 * flags.
861 			 */
862 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
863 			if ((x & ~v) == LK_UNLOCKED) {
864 				v &= ~LK_EXCLUSIVE_SPINNERS;
865 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
866 				    tid | v)) {
867 					sleepq_release(&lk->lock_object);
868 					LOCK_LOG2(lk,
869 					    "%s: %p claimed by a new writer",
870 					    __func__, lk);
871 					break;
872 				}
873 				sleepq_release(&lk->lock_object);
874 				continue;
875 			}
876 
877 			/*
878 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
879 			 * fail, loop back and retry.
880 			 */
881 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
882 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
883 				    x | LK_EXCLUSIVE_WAITERS)) {
884 					sleepq_release(&lk->lock_object);
885 					continue;
886 				}
887 				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
888 				    __func__, lk);
889 			}
890 
891 			/*
892 			 * As far as we have been unable to acquire the
893 			 * exclusive lock and the exclusive waiters flag
894 			 * is set, we will sleep.
895 			 */
896 			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
897 			    SQ_EXCLUSIVE_QUEUE);
898 			flags &= ~LK_INTERLOCK;
899 			if (error) {
900 				LOCK_LOG3(lk,
901 				    "%s: interrupted sleep for %p with %d",
902 				    __func__, lk, error);
903 				break;
904 			}
905 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
906 			    __func__, lk);
907 		}
908 		if (error == 0) {
909 			lock_profile_obtain_lock_success(&lk->lock_object,
910 			    contested, waittime, file, line);
911 			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
912 			    lk->lk_recurse, file, line);
913 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
914 			    LK_TRYWIT(flags), file, line);
915 			TD_LOCKS_INC(curthread);
916 			STACK_SAVE(lk);
917 		}
918 		break;
919 	case LK_DOWNGRADE:
920 		_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
921 		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
922 		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
923 		TD_SLOCKS_INC(curthread);
924 
925 		/*
926 		 * In order to preserve waiters flags, just spin.
927 		 */
928 		for (;;) {
929 			x = lk->lk_lock;
930 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
931 			x &= LK_ALL_WAITERS;
932 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
933 			    LK_SHARERS_LOCK(1) | x))
934 				break;
935 			cpu_spinwait();
936 		}
937 		break;
938 	case LK_RELEASE:
939 		_lockmgr_assert(lk, KA_LOCKED, file, line);
940 		x = lk->lk_lock;
941 
942 		if ((x & LK_SHARE) == 0) {
943 
944 			/*
945 			 * As first option, treact the lock as if it has not
946 			 * any waiter.
947 			 * Fix-up the tid var if the lock has been disowned.
948 			 */
949 			if (LK_HOLDER(x) == LK_KERNPROC)
950 				tid = LK_KERNPROC;
951 			else {
952 				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
953 				    file, line);
954 				TD_LOCKS_DEC(curthread);
955 			}
956 			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
957 			    lk->lk_recurse, file, line);
958 
959 			/*
960 			 * The lock is held in exclusive mode.
961 			 * If the lock is recursed also, then unrecurse it.
962 			 */
963 			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
964 				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
965 				    lk);
966 				lk->lk_recurse--;
967 				break;
968 			}
969 			if (tid != LK_KERNPROC)
970 				lock_profile_release_lock(&lk->lock_object);
971 
972 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
973 			    LK_UNLOCKED))
974 				break;
975 
976 			sleepq_lock(&lk->lock_object);
977 			x = lk->lk_lock;
978 			v = LK_UNLOCKED;
979 
980 			/*
981 		 	 * If the lock has exclusive waiters, give them
982 			 * preference in order to avoid deadlock with
983 			 * shared runners up.
984 			 * If interruptible sleeps left the exclusive queue
985 			 * empty avoid a starvation for the threads sleeping
986 			 * on the shared queue by giving them precedence
987 			 * and cleaning up the exclusive waiters bit anyway.
988 			 * Please note that lk_exslpfail count may be lying
989 			 * about the real number of waiters with the
990 			 * LK_SLEEPFAIL flag on because they may be used in
991 			 * conjuction with interruptible sleeps so
992 			 * lk_exslpfail might be considered an 'upper limit'
993 			 * bound, including the edge cases.
994 			 */
995 			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
996 			realexslp = sleepq_sleepcnt(&lk->lock_object,
997 			    SQ_EXCLUSIVE_QUEUE);
998 			if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
999 				if (lk->lk_exslpfail < realexslp) {
1000 					lk->lk_exslpfail = 0;
1001 					queue = SQ_EXCLUSIVE_QUEUE;
1002 					v |= (x & LK_SHARED_WAITERS);
1003 				} else {
1004 					lk->lk_exslpfail = 0;
1005 					LOCK_LOG2(lk,
1006 					"%s: %p has only LK_SLEEPFAIL sleepers",
1007 					    __func__, lk);
1008 					LOCK_LOG2(lk,
1009 			"%s: %p waking up threads on the exclusive queue",
1010 					    __func__, lk);
1011 					wakeup_swapper =
1012 					    sleepq_broadcast(&lk->lock_object,
1013 					    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1014 					queue = SQ_SHARED_QUEUE;
1015 				}
1016 			} else {
1017 
1018 				/*
1019 				 * Exclusive waiters sleeping with LK_SLEEPFAIL
1020 				 * on and using interruptible sleeps/timeout
1021 				 * may have left spourious lk_exslpfail counts
1022 				 * on, so clean it up anyway.
1023 				 */
1024 				lk->lk_exslpfail = 0;
1025 				queue = SQ_SHARED_QUEUE;
1026 			}
1027 
1028 			LOCK_LOG3(lk,
1029 			    "%s: %p waking up threads on the %s queue",
1030 			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1031 			    "exclusive");
1032 			atomic_store_rel_ptr(&lk->lk_lock, v);
1033 			wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1034 			    SLEEPQ_LK, 0, queue);
1035 			sleepq_release(&lk->lock_object);
1036 			break;
1037 		} else
1038 			wakeup_swapper = wakeupshlk(lk, file, line);
1039 		break;
1040 	case LK_DRAIN:
1041 		if (LK_CAN_WITNESS(flags))
1042 			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1043 			    LOP_EXCLUSIVE, file, line, ilk);
1044 
1045 		/*
1046 		 * Trying to drain a lock we already own will result in a
1047 		 * deadlock.
1048 		 */
1049 		if (lockmgr_xlocked(lk)) {
1050 			if (flags & LK_INTERLOCK)
1051 				class->lc_unlock(ilk);
1052 			panic("%s: draining %s with the lock held @ %s:%d\n",
1053 			    __func__, iwmesg, file, line);
1054 		}
1055 
1056 		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1057 			lock_profile_obtain_lock_failed(&lk->lock_object,
1058 			    &contested, &waittime);
1059 
1060 			/*
1061 			 * If the lock is expected to not sleep just give up
1062 			 * and return.
1063 			 */
1064 			if (LK_TRYOP(flags)) {
1065 				LOCK_LOG2(lk, "%s: %p fails the try operation",
1066 				    __func__, lk);
1067 				error = EBUSY;
1068 				break;
1069 			}
1070 
1071 			/*
1072 			 * Acquire the sleepqueue chain lock because we
1073 			 * probabilly will need to manipulate waiters flags.
1074 			 */
1075 			sleepq_lock(&lk->lock_object);
1076 			x = lk->lk_lock;
1077 
1078 			/*
1079 			 * if the lock has been released while we spun on
1080 			 * the sleepqueue chain lock just try again.
1081 			 */
1082 			if (x == LK_UNLOCKED) {
1083 				sleepq_release(&lk->lock_object);
1084 				continue;
1085 			}
1086 
1087 			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1088 			if ((x & ~v) == LK_UNLOCKED) {
1089 				v = (x & ~LK_EXCLUSIVE_SPINNERS);
1090 
1091 				/*
1092 				 * If interruptible sleeps left the exclusive
1093 				 * queue empty avoid a starvation for the
1094 				 * threads sleeping on the shared queue by
1095 				 * giving them precedence and cleaning up the
1096 				 * exclusive waiters bit anyway.
1097 				 * Please note that lk_exslpfail count may be
1098 				 * lying about the real number of waiters with
1099 				 * the LK_SLEEPFAIL flag on because they may
1100 				 * be used in conjuction with interruptible
1101 				 * sleeps so lk_exslpfail might be considered
1102 				 * an 'upper limit' bound, including the edge
1103 				 * cases.
1104 				 */
1105 				if (v & LK_EXCLUSIVE_WAITERS) {
1106 					queue = SQ_EXCLUSIVE_QUEUE;
1107 					v &= ~LK_EXCLUSIVE_WAITERS;
1108 				} else {
1109 
1110 					/*
1111 					 * Exclusive waiters sleeping with
1112 					 * LK_SLEEPFAIL on and using
1113 					 * interruptible sleeps/timeout may
1114 					 * have left spourious lk_exslpfail
1115 					 * counts on, so clean it up anyway.
1116 					 */
1117 					MPASS(v & LK_SHARED_WAITERS);
1118 					lk->lk_exslpfail = 0;
1119 					queue = SQ_SHARED_QUEUE;
1120 					v &= ~LK_SHARED_WAITERS;
1121 				}
1122 				if (queue == SQ_EXCLUSIVE_QUEUE) {
1123 					realexslp =
1124 					    sleepq_sleepcnt(&lk->lock_object,
1125 					    SQ_EXCLUSIVE_QUEUE);
1126 					if (lk->lk_exslpfail >= realexslp) {
1127 						lk->lk_exslpfail = 0;
1128 						queue = SQ_SHARED_QUEUE;
1129 						v &= ~LK_SHARED_WAITERS;
1130 						if (realexslp != 0) {
1131 							LOCK_LOG2(lk,
1132 					"%s: %p has only LK_SLEEPFAIL sleepers",
1133 							    __func__, lk);
1134 							LOCK_LOG2(lk,
1135 			"%s: %p waking up threads on the exclusive queue",
1136 							    __func__, lk);
1137 							wakeup_swapper =
1138 							    sleepq_broadcast(
1139 							    &lk->lock_object,
1140 							    SLEEPQ_LK, 0,
1141 							    SQ_EXCLUSIVE_QUEUE);
1142 						}
1143 					} else
1144 						lk->lk_exslpfail = 0;
1145 				}
1146 				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1147 					sleepq_release(&lk->lock_object);
1148 					continue;
1149 				}
1150 				LOCK_LOG3(lk,
1151 				"%s: %p waking up all threads on the %s queue",
1152 				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1153 				    "shared" : "exclusive");
1154 				wakeup_swapper |= sleepq_broadcast(
1155 				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1156 
1157 				/*
1158 				 * If shared waiters have been woken up we need
1159 				 * to wait for one of them to acquire the lock
1160 				 * before to set the exclusive waiters in
1161 				 * order to avoid a deadlock.
1162 				 */
1163 				if (queue == SQ_SHARED_QUEUE) {
1164 					for (v = lk->lk_lock;
1165 					    (v & LK_SHARE) && !LK_SHARERS(v);
1166 					    v = lk->lk_lock)
1167 						cpu_spinwait();
1168 				}
1169 			}
1170 
1171 			/*
1172 			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1173 			 * fail, loop back and retry.
1174 			 */
1175 			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1176 				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1177 				    x | LK_EXCLUSIVE_WAITERS)) {
1178 					sleepq_release(&lk->lock_object);
1179 					continue;
1180 				}
1181 				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1182 				    __func__, lk);
1183 			}
1184 
1185 			/*
1186 			 * As far as we have been unable to acquire the
1187 			 * exclusive lock and the exclusive waiters flag
1188 			 * is set, we will sleep.
1189 			 */
1190 			if (flags & LK_INTERLOCK) {
1191 				class->lc_unlock(ilk);
1192 				flags &= ~LK_INTERLOCK;
1193 			}
1194 			GIANT_SAVE();
1195 			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1196 			    SQ_EXCLUSIVE_QUEUE);
1197 			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1198 			GIANT_RESTORE();
1199 			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1200 			    __func__, lk);
1201 		}
1202 
1203 		if (error == 0) {
1204 			lock_profile_obtain_lock_success(&lk->lock_object,
1205 			    contested, waittime, file, line);
1206 			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1207 			    lk->lk_recurse, file, line);
1208 			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1209 			    LK_TRYWIT(flags), file, line);
1210 			TD_LOCKS_INC(curthread);
1211 			STACK_SAVE(lk);
1212 		}
1213 		break;
1214 	default:
1215 		if (flags & LK_INTERLOCK)
1216 			class->lc_unlock(ilk);
1217 		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1218 	}
1219 
1220 	if (flags & LK_INTERLOCK)
1221 		class->lc_unlock(ilk);
1222 	if (wakeup_swapper)
1223 		kick_proc0();
1224 
1225 	return (error);
1226 }
1227 
1228 void
1229 _lockmgr_disown(struct lock *lk, const char *file, int line)
1230 {
1231 	uintptr_t tid, x;
1232 
1233 	tid = (uintptr_t)curthread;
1234 	_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
1235 
1236 	/*
1237 	 * If the owner is already LK_KERNPROC just skip the whole operation.
1238 	 */
1239 	if (LK_HOLDER(lk->lk_lock) != tid)
1240 		return;
1241 	lock_profile_release_lock(&lk->lock_object);
1242 	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1243 	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1244 	TD_LOCKS_DEC(curthread);
1245 	STACK_SAVE(lk);
1246 
1247 	/*
1248 	 * In order to preserve waiters flags, just spin.
1249 	 */
1250 	for (;;) {
1251 		x = lk->lk_lock;
1252 		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1253 		x &= LK_ALL_WAITERS;
1254 		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1255 		    LK_KERNPROC | x))
1256 			return;
1257 		cpu_spinwait();
1258 	}
1259 }
1260 
1261 void
1262 lockmgr_printinfo(struct lock *lk)
1263 {
1264 	struct thread *td;
1265 	uintptr_t x;
1266 
1267 	if (lk->lk_lock == LK_UNLOCKED)
1268 		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1269 	else if (lk->lk_lock & LK_SHARE)
1270 		printf("lock type %s: SHARED (count %ju)\n",
1271 		    lk->lock_object.lo_name,
1272 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1273 	else {
1274 		td = lockmgr_xholder(lk);
1275 		printf("lock type %s: EXCL by thread %p (pid %d)\n",
1276 		    lk->lock_object.lo_name, td, td->td_proc->p_pid);
1277 	}
1278 
1279 	x = lk->lk_lock;
1280 	if (x & LK_EXCLUSIVE_WAITERS)
1281 		printf(" with exclusive waiters pending\n");
1282 	if (x & LK_SHARED_WAITERS)
1283 		printf(" with shared waiters pending\n");
1284 	if (x & LK_EXCLUSIVE_SPINNERS)
1285 		printf(" with exclusive spinners pending\n");
1286 
1287 	STACK_PRINT(lk);
1288 }
1289 
1290 int
1291 lockstatus(struct lock *lk)
1292 {
1293 	uintptr_t v, x;
1294 	int ret;
1295 
1296 	ret = LK_SHARED;
1297 	x = lk->lk_lock;
1298 	v = LK_HOLDER(x);
1299 
1300 	if ((x & LK_SHARE) == 0) {
1301 		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1302 			ret = LK_EXCLUSIVE;
1303 		else
1304 			ret = LK_EXCLOTHER;
1305 	} else if (x == LK_UNLOCKED)
1306 		ret = 0;
1307 
1308 	return (ret);
1309 }
1310 
1311 #ifdef INVARIANT_SUPPORT
1312 
1313 FEATURE(invariant_support,
1314     "Support for modules compiled with INVARIANTS option");
1315 
1316 #ifndef INVARIANTS
1317 #undef	_lockmgr_assert
1318 #endif
1319 
1320 void
1321 _lockmgr_assert(struct lock *lk, int what, const char *file, int line)
1322 {
1323 	int slocked = 0;
1324 
1325 	if (panicstr != NULL)
1326 		return;
1327 	switch (what) {
1328 	case KA_SLOCKED:
1329 	case KA_SLOCKED | KA_NOTRECURSED:
1330 	case KA_SLOCKED | KA_RECURSED:
1331 		slocked = 1;
1332 	case KA_LOCKED:
1333 	case KA_LOCKED | KA_NOTRECURSED:
1334 	case KA_LOCKED | KA_RECURSED:
1335 #ifdef WITNESS
1336 
1337 		/*
1338 		 * We cannot trust WITNESS if the lock is held in exclusive
1339 		 * mode and a call to lockmgr_disown() happened.
1340 		 * Workaround this skipping the check if the lock is held in
1341 		 * exclusive mode even for the KA_LOCKED case.
1342 		 */
1343 		if (slocked || (lk->lk_lock & LK_SHARE)) {
1344 			witness_assert(&lk->lock_object, what, file, line);
1345 			break;
1346 		}
1347 #endif
1348 		if (lk->lk_lock == LK_UNLOCKED ||
1349 		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1350 		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1351 			panic("Lock %s not %slocked @ %s:%d\n",
1352 			    lk->lock_object.lo_name, slocked ? "share" : "",
1353 			    file, line);
1354 
1355 		if ((lk->lk_lock & LK_SHARE) == 0) {
1356 			if (lockmgr_recursed(lk)) {
1357 				if (what & KA_NOTRECURSED)
1358 					panic("Lock %s recursed @ %s:%d\n",
1359 					    lk->lock_object.lo_name, file,
1360 					    line);
1361 			} else if (what & KA_RECURSED)
1362 				panic("Lock %s not recursed @ %s:%d\n",
1363 				    lk->lock_object.lo_name, file, line);
1364 		}
1365 		break;
1366 	case KA_XLOCKED:
1367 	case KA_XLOCKED | KA_NOTRECURSED:
1368 	case KA_XLOCKED | KA_RECURSED:
1369 		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1370 			panic("Lock %s not exclusively locked @ %s:%d\n",
1371 			    lk->lock_object.lo_name, file, line);
1372 		if (lockmgr_recursed(lk)) {
1373 			if (what & KA_NOTRECURSED)
1374 				panic("Lock %s recursed @ %s:%d\n",
1375 				    lk->lock_object.lo_name, file, line);
1376 		} else if (what & KA_RECURSED)
1377 			panic("Lock %s not recursed @ %s:%d\n",
1378 			    lk->lock_object.lo_name, file, line);
1379 		break;
1380 	case KA_UNLOCKED:
1381 		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1382 			panic("Lock %s exclusively locked @ %s:%d\n",
1383 			    lk->lock_object.lo_name, file, line);
1384 		break;
1385 	default:
1386 		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1387 		    line);
1388 	}
1389 }
1390 #endif
1391 
1392 #ifdef DDB
1393 int
1394 lockmgr_chain(struct thread *td, struct thread **ownerp)
1395 {
1396 	struct lock *lk;
1397 
1398 	lk = td->td_wchan;
1399 
1400 	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1401 		return (0);
1402 	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1403 	if (lk->lk_lock & LK_SHARE)
1404 		db_printf("SHARED (count %ju)\n",
1405 		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1406 	else
1407 		db_printf("EXCL\n");
1408 	*ownerp = lockmgr_xholder(lk);
1409 
1410 	return (1);
1411 }
1412 
1413 static void
1414 db_show_lockmgr(struct lock_object *lock)
1415 {
1416 	struct thread *td;
1417 	struct lock *lk;
1418 
1419 	lk = (struct lock *)lock;
1420 
1421 	db_printf(" state: ");
1422 	if (lk->lk_lock == LK_UNLOCKED)
1423 		db_printf("UNLOCKED\n");
1424 	else if (lk->lk_lock & LK_SHARE)
1425 		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1426 	else {
1427 		td = lockmgr_xholder(lk);
1428 		if (td == (struct thread *)LK_KERNPROC)
1429 			db_printf("XLOCK: LK_KERNPROC\n");
1430 		else
1431 			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1432 			    td->td_tid, td->td_proc->p_pid,
1433 			    td->td_proc->p_comm);
1434 		if (lockmgr_recursed(lk))
1435 			db_printf(" recursed: %d\n", lk->lk_recurse);
1436 	}
1437 	db_printf(" waiters: ");
1438 	switch (lk->lk_lock & LK_ALL_WAITERS) {
1439 	case LK_SHARED_WAITERS:
1440 		db_printf("shared\n");
1441 		break;
1442 	case LK_EXCLUSIVE_WAITERS:
1443 		db_printf("exclusive\n");
1444 		break;
1445 	case LK_ALL_WAITERS:
1446 		db_printf("shared and exclusive\n");
1447 		break;
1448 	default:
1449 		db_printf("none\n");
1450 	}
1451 	db_printf(" spinners: ");
1452 	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1453 		db_printf("exclusive\n");
1454 	else
1455 		db_printf("none\n");
1456 }
1457 #endif
1458