xref: /freebsd/sys/kern/kern_sx.c (revision 7cd2dcf07629713e5a3d60472cfe4701b705a167)
1 /*-
2  * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
3  * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice(s), this list of conditions and the following disclaimer as
11  *    the first lines of this file unmodified other than the possible
12  *    addition of one or more copyright notices.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice(s), this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
18  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
21  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27  * DAMAGE.
28  */
29 
30 /*
31  * Shared/exclusive locks.  This implementation attempts to ensure
32  * deterministic lock granting behavior, so that slocks and xlocks are
33  * interleaved.
34  *
35  * Priority propagation will not generally raise the priority of lock holders,
36  * so should not be relied upon in combination with sx locks.
37  */
38 
39 #include "opt_ddb.h"
40 #include "opt_hwpmc_hooks.h"
41 #include "opt_kdtrace.h"
42 #include "opt_no_adaptive_sx.h"
43 
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/ktr.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/sleepqueue.h>
54 #include <sys/sx.h>
55 #include <sys/sysctl.h>
56 
57 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
58 #include <machine/cpu.h>
59 #endif
60 
61 #ifdef DDB
62 #include <ddb/ddb.h>
63 #endif
64 
65 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
66 #define	ADAPTIVE_SX
67 #endif
68 
69 CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
70 
71 #ifdef HWPMC_HOOKS
72 #include <sys/pmckern.h>
73 PMC_SOFT_DECLARE( , , lock, failed);
74 #endif
75 
76 /* Handy macros for sleep queues. */
77 #define	SQ_EXCLUSIVE_QUEUE	0
78 #define	SQ_SHARED_QUEUE		1
79 
80 /*
81  * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
82  * drop Giant anytime we have to sleep or if we adaptively spin.
83  */
84 #define	GIANT_DECLARE							\
85 	int _giantcnt = 0;						\
86 	WITNESS_SAVE_DECL(Giant)					\
87 
88 #define	GIANT_SAVE() do {						\
89 	if (mtx_owned(&Giant)) {					\
90 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
91 		while (mtx_owned(&Giant)) {				\
92 			_giantcnt++;					\
93 			mtx_unlock(&Giant);				\
94 		}							\
95 	}								\
96 } while (0)
97 
98 #define GIANT_RESTORE() do {						\
99 	if (_giantcnt > 0) {						\
100 		mtx_assert(&Giant, MA_NOTOWNED);			\
101 		while (_giantcnt--)					\
102 			mtx_lock(&Giant);				\
103 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
104 	}								\
105 } while (0)
106 
107 /*
108  * Returns true if an exclusive lock is recursed.  It assumes
109  * curthread currently has an exclusive lock.
110  */
111 #define	sx_recurse		lock_object.lo_data
112 #define	sx_recursed(sx)		((sx)->sx_recurse != 0)
113 
114 static void	assert_sx(const struct lock_object *lock, int what);
115 #ifdef DDB
116 static void	db_show_sx(const struct lock_object *lock);
117 #endif
118 static void	lock_sx(struct lock_object *lock, int how);
119 #ifdef KDTRACE_HOOKS
120 static int	owner_sx(const struct lock_object *lock, struct thread **owner);
121 #endif
122 static int	unlock_sx(struct lock_object *lock);
123 
124 struct lock_class lock_class_sx = {
125 	.lc_name = "sx",
126 	.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
127 	.lc_assert = assert_sx,
128 #ifdef DDB
129 	.lc_ddb_show = db_show_sx,
130 #endif
131 	.lc_lock = lock_sx,
132 	.lc_unlock = unlock_sx,
133 #ifdef KDTRACE_HOOKS
134 	.lc_owner = owner_sx,
135 #endif
136 };
137 
138 #ifndef INVARIANTS
139 #define	_sx_assert(sx, what, file, line)
140 #endif
141 
142 #ifdef ADAPTIVE_SX
143 static u_int asx_retries = 10;
144 static u_int asx_loops = 10000;
145 static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
146 SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
147 SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
148 #endif
149 
150 void
151 assert_sx(const struct lock_object *lock, int what)
152 {
153 
154 	sx_assert((const struct sx *)lock, what);
155 }
156 
157 void
158 lock_sx(struct lock_object *lock, int how)
159 {
160 	struct sx *sx;
161 
162 	sx = (struct sx *)lock;
163 	if (how)
164 		sx_xlock(sx);
165 	else
166 		sx_slock(sx);
167 }
168 
169 int
170 unlock_sx(struct lock_object *lock)
171 {
172 	struct sx *sx;
173 
174 	sx = (struct sx *)lock;
175 	sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
176 	if (sx_xlocked(sx)) {
177 		sx_xunlock(sx);
178 		return (1);
179 	} else {
180 		sx_sunlock(sx);
181 		return (0);
182 	}
183 }
184 
185 #ifdef KDTRACE_HOOKS
186 int
187 owner_sx(const struct lock_object *lock, struct thread **owner)
188 {
189         const struct sx *sx = (const struct sx *)lock;
190 	uintptr_t x = sx->sx_lock;
191 
192         *owner = (struct thread *)SX_OWNER(x);
193         return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
194 	    (*owner != NULL));
195 }
196 #endif
197 
198 void
199 sx_sysinit(void *arg)
200 {
201 	struct sx_args *sargs = arg;
202 
203 	sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
204 }
205 
206 void
207 sx_init_flags(struct sx *sx, const char *description, int opts)
208 {
209 	int flags;
210 
211 	MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
212 	    SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
213 	ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
214 	    ("%s: sx_lock not aligned for %s: %p", __func__, description,
215 	    &sx->sx_lock));
216 
217 	flags = LO_SLEEPABLE | LO_UPGRADABLE;
218 	if (opts & SX_DUPOK)
219 		flags |= LO_DUPOK;
220 	if (opts & SX_NOPROFILE)
221 		flags |= LO_NOPROFILE;
222 	if (!(opts & SX_NOWITNESS))
223 		flags |= LO_WITNESS;
224 	if (opts & SX_RECURSE)
225 		flags |= LO_RECURSABLE;
226 	if (opts & SX_QUIET)
227 		flags |= LO_QUIET;
228 
229 	flags |= opts & SX_NOADAPTIVE;
230 	sx->sx_lock = SX_LOCK_UNLOCKED;
231 	sx->sx_recurse = 0;
232 	lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
233 }
234 
235 void
236 sx_destroy(struct sx *sx)
237 {
238 
239 	KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
240 	KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
241 	sx->sx_lock = SX_LOCK_DESTROYED;
242 	lock_destroy(&sx->lock_object);
243 }
244 
245 int
246 _sx_slock(struct sx *sx, int opts, const char *file, int line)
247 {
248 	int error = 0;
249 
250 	if (SCHEDULER_STOPPED())
251 		return (0);
252 	KASSERT(!TD_IS_IDLETHREAD(curthread),
253 	    ("sx_slock() by idle thread %p on sx %s @ %s:%d",
254 	    curthread, sx->lock_object.lo_name, file, line));
255 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
256 	    ("sx_slock() of destroyed sx @ %s:%d", file, line));
257 	WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
258 	error = __sx_slock(sx, opts, file, line);
259 	if (!error) {
260 		LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
261 		WITNESS_LOCK(&sx->lock_object, 0, file, line);
262 		curthread->td_locks++;
263 	}
264 
265 	return (error);
266 }
267 
268 int
269 sx_try_slock_(struct sx *sx, const char *file, int line)
270 {
271 	uintptr_t x;
272 
273 	if (SCHEDULER_STOPPED())
274 		return (1);
275 
276 	KASSERT(!TD_IS_IDLETHREAD(curthread),
277 	    ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
278 	    curthread, sx->lock_object.lo_name, file, line));
279 
280 	for (;;) {
281 		x = sx->sx_lock;
282 		KASSERT(x != SX_LOCK_DESTROYED,
283 		    ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
284 		if (!(x & SX_LOCK_SHARED))
285 			break;
286 		if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
287 			LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
288 			WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
289 			curthread->td_locks++;
290 			return (1);
291 		}
292 	}
293 
294 	LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
295 	return (0);
296 }
297 
298 int
299 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
300 {
301 	int error = 0;
302 
303 	if (SCHEDULER_STOPPED())
304 		return (0);
305 	KASSERT(!TD_IS_IDLETHREAD(curthread),
306 	    ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
307 	    curthread, sx->lock_object.lo_name, file, line));
308 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
309 	    ("sx_xlock() of destroyed sx @ %s:%d", file, line));
310 	WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
311 	    line, NULL);
312 	error = __sx_xlock(sx, curthread, opts, file, line);
313 	if (!error) {
314 		LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
315 		    file, line);
316 		WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
317 		curthread->td_locks++;
318 	}
319 
320 	return (error);
321 }
322 
323 int
324 sx_try_xlock_(struct sx *sx, const char *file, int line)
325 {
326 	int rval;
327 
328 	if (SCHEDULER_STOPPED())
329 		return (1);
330 
331 	KASSERT(!TD_IS_IDLETHREAD(curthread),
332 	    ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
333 	    curthread, sx->lock_object.lo_name, file, line));
334 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
335 	    ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
336 
337 	if (sx_xlocked(sx) &&
338 	    (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) {
339 		sx->sx_recurse++;
340 		atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
341 		rval = 1;
342 	} else
343 		rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
344 		    (uintptr_t)curthread);
345 	LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
346 	if (rval) {
347 		WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
348 		    file, line);
349 		curthread->td_locks++;
350 	}
351 
352 	return (rval);
353 }
354 
355 void
356 _sx_sunlock(struct sx *sx, const char *file, int line)
357 {
358 
359 	if (SCHEDULER_STOPPED())
360 		return;
361 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
362 	    ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
363 	_sx_assert(sx, SA_SLOCKED, file, line);
364 	curthread->td_locks--;
365 	WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
366 	LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
367 	__sx_sunlock(sx, file, line);
368 	LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_SUNLOCK_RELEASE, sx);
369 }
370 
371 void
372 _sx_xunlock(struct sx *sx, const char *file, int line)
373 {
374 
375 	if (SCHEDULER_STOPPED())
376 		return;
377 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
378 	    ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
379 	_sx_assert(sx, SA_XLOCKED, file, line);
380 	curthread->td_locks--;
381 	WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
382 	LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
383 	    line);
384 	if (!sx_recursed(sx))
385 		LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_XUNLOCK_RELEASE, sx);
386 	__sx_xunlock(sx, curthread, file, line);
387 }
388 
389 /*
390  * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
391  * This will only succeed if this thread holds a single shared lock.
392  * Return 1 if if the upgrade succeed, 0 otherwise.
393  */
394 int
395 sx_try_upgrade_(struct sx *sx, const char *file, int line)
396 {
397 	uintptr_t x;
398 	int success;
399 
400 	if (SCHEDULER_STOPPED())
401 		return (1);
402 
403 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
404 	    ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
405 	_sx_assert(sx, SA_SLOCKED, file, line);
406 
407 	/*
408 	 * Try to switch from one shared lock to an exclusive lock.  We need
409 	 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
410 	 * we will wake up the exclusive waiters when we drop the lock.
411 	 */
412 	x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
413 	success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
414 	    (uintptr_t)curthread | x);
415 	LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
416 	if (success) {
417 		WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
418 		    file, line);
419 		LOCKSTAT_RECORD0(LS_SX_TRYUPGRADE_UPGRADE, sx);
420 	}
421 	return (success);
422 }
423 
424 /*
425  * Downgrade an unrecursed exclusive lock into a single shared lock.
426  */
427 void
428 sx_downgrade_(struct sx *sx, const char *file, int line)
429 {
430 	uintptr_t x;
431 	int wakeup_swapper;
432 
433 	if (SCHEDULER_STOPPED())
434 		return;
435 
436 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
437 	    ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
438 	_sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
439 #ifndef INVARIANTS
440 	if (sx_recursed(sx))
441 		panic("downgrade of a recursed lock");
442 #endif
443 
444 	WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
445 
446 	/*
447 	 * Try to switch from an exclusive lock with no shared waiters
448 	 * to one sharer with no shared waiters.  If there are
449 	 * exclusive waiters, we don't need to lock the sleep queue so
450 	 * long as we preserve the flag.  We do one quick try and if
451 	 * that fails we grab the sleepq lock to keep the flags from
452 	 * changing and do it the slow way.
453 	 *
454 	 * We have to lock the sleep queue if there are shared waiters
455 	 * so we can wake them up.
456 	 */
457 	x = sx->sx_lock;
458 	if (!(x & SX_LOCK_SHARED_WAITERS) &&
459 	    atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
460 	    (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
461 		LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
462 		return;
463 	}
464 
465 	/*
466 	 * Lock the sleep queue so we can read the waiters bits
467 	 * without any races and wakeup any shared waiters.
468 	 */
469 	sleepq_lock(&sx->lock_object);
470 
471 	/*
472 	 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
473 	 * shared lock.  If there are any shared waiters, wake them up.
474 	 */
475 	wakeup_swapper = 0;
476 	x = sx->sx_lock;
477 	atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
478 	    (x & SX_LOCK_EXCLUSIVE_WAITERS));
479 	if (x & SX_LOCK_SHARED_WAITERS)
480 		wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
481 		    0, SQ_SHARED_QUEUE);
482 	sleepq_release(&sx->lock_object);
483 
484 	LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
485 	LOCKSTAT_RECORD0(LS_SX_DOWNGRADE_DOWNGRADE, sx);
486 
487 	if (wakeup_swapper)
488 		kick_proc0();
489 }
490 
491 /*
492  * This function represents the so-called 'hard case' for sx_xlock
493  * operation.  All 'easy case' failures are redirected to this.  Note
494  * that ideally this would be a static function, but it needs to be
495  * accessible from at least sx.h.
496  */
497 int
498 _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
499     int line)
500 {
501 	GIANT_DECLARE;
502 #ifdef ADAPTIVE_SX
503 	volatile struct thread *owner;
504 	u_int i, spintries = 0;
505 #endif
506 	uintptr_t x;
507 #ifdef LOCK_PROFILING
508 	uint64_t waittime = 0;
509 	int contested = 0;
510 #endif
511 	int error = 0;
512 #ifdef	KDTRACE_HOOKS
513 	uint64_t spin_cnt = 0;
514 	uint64_t sleep_cnt = 0;
515 	int64_t sleep_time = 0;
516 #endif
517 
518 	if (SCHEDULER_STOPPED())
519 		return (0);
520 
521 	/* If we already hold an exclusive lock, then recurse. */
522 	if (sx_xlocked(sx)) {
523 		KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
524 	    ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
525 		    sx->lock_object.lo_name, file, line));
526 		sx->sx_recurse++;
527 		atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
528 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
529 			CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
530 		return (0);
531 	}
532 
533 	if (LOCK_LOG_TEST(&sx->lock_object, 0))
534 		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
535 		    sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
536 
537 	while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
538 #ifdef KDTRACE_HOOKS
539 		spin_cnt++;
540 #endif
541 #ifdef HWPMC_HOOKS
542 		PMC_SOFT_CALL( , , lock, failed);
543 #endif
544 		lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
545 		    &waittime);
546 #ifdef ADAPTIVE_SX
547 		/*
548 		 * If the lock is write locked and the owner is
549 		 * running on another CPU, spin until the owner stops
550 		 * running or the state of the lock changes.
551 		 */
552 		x = sx->sx_lock;
553 		if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
554 			if ((x & SX_LOCK_SHARED) == 0) {
555 				x = SX_OWNER(x);
556 				owner = (struct thread *)x;
557 				if (TD_IS_RUNNING(owner)) {
558 					if (LOCK_LOG_TEST(&sx->lock_object, 0))
559 						CTR3(KTR_LOCK,
560 					    "%s: spinning on %p held by %p",
561 						    __func__, sx, owner);
562 					GIANT_SAVE();
563 					while (SX_OWNER(sx->sx_lock) == x &&
564 					    TD_IS_RUNNING(owner)) {
565 						cpu_spinwait();
566 #ifdef KDTRACE_HOOKS
567 						spin_cnt++;
568 #endif
569 					}
570 					continue;
571 				}
572 			} else if (SX_SHARERS(x) && spintries < asx_retries) {
573 				GIANT_SAVE();
574 				spintries++;
575 				for (i = 0; i < asx_loops; i++) {
576 					if (LOCK_LOG_TEST(&sx->lock_object, 0))
577 						CTR4(KTR_LOCK,
578 				    "%s: shared spinning on %p with %u and %u",
579 						    __func__, sx, spintries, i);
580 					x = sx->sx_lock;
581 					if ((x & SX_LOCK_SHARED) == 0 ||
582 					    SX_SHARERS(x) == 0)
583 						break;
584 					cpu_spinwait();
585 #ifdef KDTRACE_HOOKS
586 					spin_cnt++;
587 #endif
588 				}
589 				if (i != asx_loops)
590 					continue;
591 			}
592 		}
593 #endif
594 
595 		sleepq_lock(&sx->lock_object);
596 		x = sx->sx_lock;
597 
598 		/*
599 		 * If the lock was released while spinning on the
600 		 * sleep queue chain lock, try again.
601 		 */
602 		if (x == SX_LOCK_UNLOCKED) {
603 			sleepq_release(&sx->lock_object);
604 			continue;
605 		}
606 
607 #ifdef ADAPTIVE_SX
608 		/*
609 		 * The current lock owner might have started executing
610 		 * on another CPU (or the lock could have changed
611 		 * owners) while we were waiting on the sleep queue
612 		 * chain lock.  If so, drop the sleep queue lock and try
613 		 * again.
614 		 */
615 		if (!(x & SX_LOCK_SHARED) &&
616 		    (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
617 			owner = (struct thread *)SX_OWNER(x);
618 			if (TD_IS_RUNNING(owner)) {
619 				sleepq_release(&sx->lock_object);
620 				continue;
621 			}
622 		}
623 #endif
624 
625 		/*
626 		 * If an exclusive lock was released with both shared
627 		 * and exclusive waiters and a shared waiter hasn't
628 		 * woken up and acquired the lock yet, sx_lock will be
629 		 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
630 		 * If we see that value, try to acquire it once.  Note
631 		 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
632 		 * as there are other exclusive waiters still.  If we
633 		 * fail, restart the loop.
634 		 */
635 		if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
636 			if (atomic_cmpset_acq_ptr(&sx->sx_lock,
637 			    SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
638 			    tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
639 				sleepq_release(&sx->lock_object);
640 				CTR2(KTR_LOCK, "%s: %p claimed by new writer",
641 				    __func__, sx);
642 				break;
643 			}
644 			sleepq_release(&sx->lock_object);
645 			continue;
646 		}
647 
648 		/*
649 		 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
650 		 * than loop back and retry.
651 		 */
652 		if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
653 			if (!atomic_cmpset_ptr(&sx->sx_lock, x,
654 			    x | SX_LOCK_EXCLUSIVE_WAITERS)) {
655 				sleepq_release(&sx->lock_object);
656 				continue;
657 			}
658 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
659 				CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
660 				    __func__, sx);
661 		}
662 
663 		/*
664 		 * Since we have been unable to acquire the exclusive
665 		 * lock and the exclusive waiters flag is set, we have
666 		 * to sleep.
667 		 */
668 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
669 			CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
670 			    __func__, sx);
671 
672 #ifdef KDTRACE_HOOKS
673 		sleep_time -= lockstat_nsecs();
674 #endif
675 		GIANT_SAVE();
676 		sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
677 		    SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
678 		    SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
679 		if (!(opts & SX_INTERRUPTIBLE))
680 			sleepq_wait(&sx->lock_object, 0);
681 		else
682 			error = sleepq_wait_sig(&sx->lock_object, 0);
683 #ifdef KDTRACE_HOOKS
684 		sleep_time += lockstat_nsecs();
685 		sleep_cnt++;
686 #endif
687 		if (error) {
688 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
689 				CTR2(KTR_LOCK,
690 			"%s: interruptible sleep by %p suspended by signal",
691 				    __func__, sx);
692 			break;
693 		}
694 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
695 			CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
696 			    __func__, sx);
697 	}
698 
699 	GIANT_RESTORE();
700 	if (!error)
701 		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE, sx,
702 		    contested, waittime, file, line);
703 #ifdef KDTRACE_HOOKS
704 	if (sleep_time)
705 		LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time);
706 	if (spin_cnt > sleep_cnt)
707 		LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt));
708 #endif
709 	return (error);
710 }
711 
712 /*
713  * This function represents the so-called 'hard case' for sx_xunlock
714  * operation.  All 'easy case' failures are redirected to this.  Note
715  * that ideally this would be a static function, but it needs to be
716  * accessible from at least sx.h.
717  */
718 void
719 _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
720 {
721 	uintptr_t x;
722 	int queue, wakeup_swapper;
723 
724 	if (SCHEDULER_STOPPED())
725 		return;
726 
727 	MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
728 
729 	/* If the lock is recursed, then unrecurse one level. */
730 	if (sx_xlocked(sx) && sx_recursed(sx)) {
731 		if ((--sx->sx_recurse) == 0)
732 			atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
733 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
734 			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
735 		return;
736 	}
737 	MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
738 	    SX_LOCK_EXCLUSIVE_WAITERS));
739 	if (LOCK_LOG_TEST(&sx->lock_object, 0))
740 		CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
741 
742 	sleepq_lock(&sx->lock_object);
743 	x = SX_LOCK_UNLOCKED;
744 
745 	/*
746 	 * The wake up algorithm here is quite simple and probably not
747 	 * ideal.  It gives precedence to shared waiters if they are
748 	 * present.  For this condition, we have to preserve the
749 	 * state of the exclusive waiters flag.
750 	 * If interruptible sleeps left the shared queue empty avoid a
751 	 * starvation for the threads sleeping on the exclusive queue by giving
752 	 * them precedence and cleaning up the shared waiters bit anyway.
753 	 */
754 	if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 &&
755 	    sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) {
756 		queue = SQ_SHARED_QUEUE;
757 		x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
758 	} else
759 		queue = SQ_EXCLUSIVE_QUEUE;
760 
761 	/* Wake up all the waiters for the specific queue. */
762 	if (LOCK_LOG_TEST(&sx->lock_object, 0))
763 		CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
764 		    __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
765 		    "exclusive");
766 	atomic_store_rel_ptr(&sx->sx_lock, x);
767 	wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
768 	    queue);
769 	sleepq_release(&sx->lock_object);
770 	if (wakeup_swapper)
771 		kick_proc0();
772 }
773 
774 /*
775  * This function represents the so-called 'hard case' for sx_slock
776  * operation.  All 'easy case' failures are redirected to this.  Note
777  * that ideally this would be a static function, but it needs to be
778  * accessible from at least sx.h.
779  */
780 int
781 _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
782 {
783 	GIANT_DECLARE;
784 #ifdef ADAPTIVE_SX
785 	volatile struct thread *owner;
786 #endif
787 #ifdef LOCK_PROFILING
788 	uint64_t waittime = 0;
789 	int contested = 0;
790 #endif
791 	uintptr_t x;
792 	int error = 0;
793 #ifdef KDTRACE_HOOKS
794 	uint64_t spin_cnt = 0;
795 	uint64_t sleep_cnt = 0;
796 	int64_t sleep_time = 0;
797 #endif
798 
799 	if (SCHEDULER_STOPPED())
800 		return (0);
801 
802 	/*
803 	 * As with rwlocks, we don't make any attempt to try to block
804 	 * shared locks once there is an exclusive waiter.
805 	 */
806 	for (;;) {
807 #ifdef KDTRACE_HOOKS
808 		spin_cnt++;
809 #endif
810 		x = sx->sx_lock;
811 
812 		/*
813 		 * If no other thread has an exclusive lock then try to bump up
814 		 * the count of sharers.  Since we have to preserve the state
815 		 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
816 		 * shared lock loop back and retry.
817 		 */
818 		if (x & SX_LOCK_SHARED) {
819 			MPASS(!(x & SX_LOCK_SHARED_WAITERS));
820 			if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
821 			    x + SX_ONE_SHARER)) {
822 				if (LOCK_LOG_TEST(&sx->lock_object, 0))
823 					CTR4(KTR_LOCK,
824 					    "%s: %p succeed %p -> %p", __func__,
825 					    sx, (void *)x,
826 					    (void *)(x + SX_ONE_SHARER));
827 				break;
828 			}
829 			continue;
830 		}
831 #ifdef HWPMC_HOOKS
832 		PMC_SOFT_CALL( , , lock, failed);
833 #endif
834 		lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
835 		    &waittime);
836 
837 #ifdef ADAPTIVE_SX
838 		/*
839 		 * If the owner is running on another CPU, spin until
840 		 * the owner stops running or the state of the lock
841 		 * changes.
842 		 */
843 		if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
844 			x = SX_OWNER(x);
845 			owner = (struct thread *)x;
846 			if (TD_IS_RUNNING(owner)) {
847 				if (LOCK_LOG_TEST(&sx->lock_object, 0))
848 					CTR3(KTR_LOCK,
849 					    "%s: spinning on %p held by %p",
850 					    __func__, sx, owner);
851 				GIANT_SAVE();
852 				while (SX_OWNER(sx->sx_lock) == x &&
853 				    TD_IS_RUNNING(owner)) {
854 #ifdef KDTRACE_HOOKS
855 					spin_cnt++;
856 #endif
857 					cpu_spinwait();
858 				}
859 				continue;
860 			}
861 		}
862 #endif
863 
864 		/*
865 		 * Some other thread already has an exclusive lock, so
866 		 * start the process of blocking.
867 		 */
868 		sleepq_lock(&sx->lock_object);
869 		x = sx->sx_lock;
870 
871 		/*
872 		 * The lock could have been released while we spun.
873 		 * In this case loop back and retry.
874 		 */
875 		if (x & SX_LOCK_SHARED) {
876 			sleepq_release(&sx->lock_object);
877 			continue;
878 		}
879 
880 #ifdef ADAPTIVE_SX
881 		/*
882 		 * If the owner is running on another CPU, spin until
883 		 * the owner stops running or the state of the lock
884 		 * changes.
885 		 */
886 		if (!(x & SX_LOCK_SHARED) &&
887 		    (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
888 			owner = (struct thread *)SX_OWNER(x);
889 			if (TD_IS_RUNNING(owner)) {
890 				sleepq_release(&sx->lock_object);
891 				continue;
892 			}
893 		}
894 #endif
895 
896 		/*
897 		 * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
898 		 * fail to set it drop the sleep queue lock and loop
899 		 * back.
900 		 */
901 		if (!(x & SX_LOCK_SHARED_WAITERS)) {
902 			if (!atomic_cmpset_ptr(&sx->sx_lock, x,
903 			    x | SX_LOCK_SHARED_WAITERS)) {
904 				sleepq_release(&sx->lock_object);
905 				continue;
906 			}
907 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
908 				CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
909 				    __func__, sx);
910 		}
911 
912 		/*
913 		 * Since we have been unable to acquire the shared lock,
914 		 * we have to sleep.
915 		 */
916 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
917 			CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
918 			    __func__, sx);
919 
920 #ifdef KDTRACE_HOOKS
921 		sleep_time -= lockstat_nsecs();
922 #endif
923 		GIANT_SAVE();
924 		sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
925 		    SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
926 		    SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
927 		if (!(opts & SX_INTERRUPTIBLE))
928 			sleepq_wait(&sx->lock_object, 0);
929 		else
930 			error = sleepq_wait_sig(&sx->lock_object, 0);
931 #ifdef KDTRACE_HOOKS
932 		sleep_time += lockstat_nsecs();
933 		sleep_cnt++;
934 #endif
935 		if (error) {
936 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
937 				CTR2(KTR_LOCK,
938 			"%s: interruptible sleep by %p suspended by signal",
939 				    __func__, sx);
940 			break;
941 		}
942 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
943 			CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
944 			    __func__, sx);
945 	}
946 	if (error == 0)
947 		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx,
948 		    contested, waittime, file, line);
949 #ifdef KDTRACE_HOOKS
950 	if (sleep_time)
951 		LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time);
952 	if (spin_cnt > sleep_cnt)
953 		LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt));
954 #endif
955 	GIANT_RESTORE();
956 	return (error);
957 }
958 
959 /*
960  * This function represents the so-called 'hard case' for sx_sunlock
961  * operation.  All 'easy case' failures are redirected to this.  Note
962  * that ideally this would be a static function, but it needs to be
963  * accessible from at least sx.h.
964  */
965 void
966 _sx_sunlock_hard(struct sx *sx, const char *file, int line)
967 {
968 	uintptr_t x;
969 	int wakeup_swapper;
970 
971 	if (SCHEDULER_STOPPED())
972 		return;
973 
974 	for (;;) {
975 		x = sx->sx_lock;
976 
977 		/*
978 		 * We should never have sharers while at least one thread
979 		 * holds a shared lock.
980 		 */
981 		KASSERT(!(x & SX_LOCK_SHARED_WAITERS),
982 		    ("%s: waiting sharers", __func__));
983 
984 		/*
985 		 * See if there is more than one shared lock held.  If
986 		 * so, just drop one and return.
987 		 */
988 		if (SX_SHARERS(x) > 1) {
989 			if (atomic_cmpset_rel_ptr(&sx->sx_lock, x,
990 			    x - SX_ONE_SHARER)) {
991 				if (LOCK_LOG_TEST(&sx->lock_object, 0))
992 					CTR4(KTR_LOCK,
993 					    "%s: %p succeeded %p -> %p",
994 					    __func__, sx, (void *)x,
995 					    (void *)(x - SX_ONE_SHARER));
996 				break;
997 			}
998 			continue;
999 		}
1000 
1001 		/*
1002 		 * If there aren't any waiters for an exclusive lock,
1003 		 * then try to drop it quickly.
1004 		 */
1005 		if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
1006 			MPASS(x == SX_SHARERS_LOCK(1));
1007 			if (atomic_cmpset_rel_ptr(&sx->sx_lock,
1008 			    SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) {
1009 				if (LOCK_LOG_TEST(&sx->lock_object, 0))
1010 					CTR2(KTR_LOCK, "%s: %p last succeeded",
1011 					    __func__, sx);
1012 				break;
1013 			}
1014 			continue;
1015 		}
1016 
1017 		/*
1018 		 * At this point, there should just be one sharer with
1019 		 * exclusive waiters.
1020 		 */
1021 		MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
1022 
1023 		sleepq_lock(&sx->lock_object);
1024 
1025 		/*
1026 		 * Wake up semantic here is quite simple:
1027 		 * Just wake up all the exclusive waiters.
1028 		 * Note that the state of the lock could have changed,
1029 		 * so if it fails loop back and retry.
1030 		 */
1031 		if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
1032 		    SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
1033 		    SX_LOCK_UNLOCKED)) {
1034 			sleepq_release(&sx->lock_object);
1035 			continue;
1036 		}
1037 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
1038 			CTR2(KTR_LOCK, "%s: %p waking up all thread on"
1039 			    "exclusive queue", __func__, sx);
1040 		wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
1041 		    0, SQ_EXCLUSIVE_QUEUE);
1042 		sleepq_release(&sx->lock_object);
1043 		if (wakeup_swapper)
1044 			kick_proc0();
1045 		break;
1046 	}
1047 }
1048 
1049 #ifdef INVARIANT_SUPPORT
1050 #ifndef INVARIANTS
1051 #undef	_sx_assert
1052 #endif
1053 
1054 /*
1055  * In the non-WITNESS case, sx_assert() can only detect that at least
1056  * *some* thread owns an slock, but it cannot guarantee that *this*
1057  * thread owns an slock.
1058  */
1059 void
1060 _sx_assert(const struct sx *sx, int what, const char *file, int line)
1061 {
1062 #ifndef WITNESS
1063 	int slocked = 0;
1064 #endif
1065 
1066 	if (panicstr != NULL)
1067 		return;
1068 	switch (what) {
1069 	case SA_SLOCKED:
1070 	case SA_SLOCKED | SA_NOTRECURSED:
1071 	case SA_SLOCKED | SA_RECURSED:
1072 #ifndef WITNESS
1073 		slocked = 1;
1074 		/* FALLTHROUGH */
1075 #endif
1076 	case SA_LOCKED:
1077 	case SA_LOCKED | SA_NOTRECURSED:
1078 	case SA_LOCKED | SA_RECURSED:
1079 #ifdef WITNESS
1080 		witness_assert(&sx->lock_object, what, file, line);
1081 #else
1082 		/*
1083 		 * If some other thread has an exclusive lock or we
1084 		 * have one and are asserting a shared lock, fail.
1085 		 * Also, if no one has a lock at all, fail.
1086 		 */
1087 		if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1088 		    (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1089 		    sx_xholder(sx) != curthread)))
1090 			panic("Lock %s not %slocked @ %s:%d\n",
1091 			    sx->lock_object.lo_name, slocked ? "share " : "",
1092 			    file, line);
1093 
1094 		if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1095 			if (sx_recursed(sx)) {
1096 				if (what & SA_NOTRECURSED)
1097 					panic("Lock %s recursed @ %s:%d\n",
1098 					    sx->lock_object.lo_name, file,
1099 					    line);
1100 			} else if (what & SA_RECURSED)
1101 				panic("Lock %s not recursed @ %s:%d\n",
1102 				    sx->lock_object.lo_name, file, line);
1103 		}
1104 #endif
1105 		break;
1106 	case SA_XLOCKED:
1107 	case SA_XLOCKED | SA_NOTRECURSED:
1108 	case SA_XLOCKED | SA_RECURSED:
1109 		if (sx_xholder(sx) != curthread)
1110 			panic("Lock %s not exclusively locked @ %s:%d\n",
1111 			    sx->lock_object.lo_name, file, line);
1112 		if (sx_recursed(sx)) {
1113 			if (what & SA_NOTRECURSED)
1114 				panic("Lock %s recursed @ %s:%d\n",
1115 				    sx->lock_object.lo_name, file, line);
1116 		} else if (what & SA_RECURSED)
1117 			panic("Lock %s not recursed @ %s:%d\n",
1118 			    sx->lock_object.lo_name, file, line);
1119 		break;
1120 	case SA_UNLOCKED:
1121 #ifdef WITNESS
1122 		witness_assert(&sx->lock_object, what, file, line);
1123 #else
1124 		/*
1125 		 * If we hold an exclusve lock fail.  We can't
1126 		 * reliably check to see if we hold a shared lock or
1127 		 * not.
1128 		 */
1129 		if (sx_xholder(sx) == curthread)
1130 			panic("Lock %s exclusively locked @ %s:%d\n",
1131 			    sx->lock_object.lo_name, file, line);
1132 #endif
1133 		break;
1134 	default:
1135 		panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
1136 		    line);
1137 	}
1138 }
1139 #endif	/* INVARIANT_SUPPORT */
1140 
1141 #ifdef DDB
1142 static void
1143 db_show_sx(const struct lock_object *lock)
1144 {
1145 	struct thread *td;
1146 	const struct sx *sx;
1147 
1148 	sx = (const struct sx *)lock;
1149 
1150 	db_printf(" state: ");
1151 	if (sx->sx_lock == SX_LOCK_UNLOCKED)
1152 		db_printf("UNLOCKED\n");
1153 	else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1154 		db_printf("DESTROYED\n");
1155 		return;
1156 	} else if (sx->sx_lock & SX_LOCK_SHARED)
1157 		db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1158 	else {
1159 		td = sx_xholder(sx);
1160 		db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1161 		    td->td_tid, td->td_proc->p_pid, td->td_name);
1162 		if (sx_recursed(sx))
1163 			db_printf(" recursed: %d\n", sx->sx_recurse);
1164 	}
1165 
1166 	db_printf(" waiters: ");
1167 	switch(sx->sx_lock &
1168 	    (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1169 	case SX_LOCK_SHARED_WAITERS:
1170 		db_printf("shared\n");
1171 		break;
1172 	case SX_LOCK_EXCLUSIVE_WAITERS:
1173 		db_printf("exclusive\n");
1174 		break;
1175 	case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1176 		db_printf("exclusive and shared\n");
1177 		break;
1178 	default:
1179 		db_printf("none\n");
1180 	}
1181 }
1182 
1183 /*
1184  * Check to see if a thread that is blocked on a sleep queue is actually
1185  * blocked on an sx lock.  If so, output some details and return true.
1186  * If the lock has an exclusive owner, return that in *ownerp.
1187  */
1188 int
1189 sx_chain(struct thread *td, struct thread **ownerp)
1190 {
1191 	struct sx *sx;
1192 
1193 	/*
1194 	 * Check to see if this thread is blocked on an sx lock.
1195 	 * First, we check the lock class.  If that is ok, then we
1196 	 * compare the lock name against the wait message.
1197 	 */
1198 	sx = td->td_wchan;
1199 	if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1200 	    sx->lock_object.lo_name != td->td_wmesg)
1201 		return (0);
1202 
1203 	/* We think we have an sx lock, so output some details. */
1204 	db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1205 	*ownerp = sx_xholder(sx);
1206 	if (sx->sx_lock & SX_LOCK_SHARED)
1207 		db_printf("SLOCK (count %ju)\n",
1208 		    (uintmax_t)SX_SHARERS(sx->sx_lock));
1209 	else
1210 		db_printf("XLOCK\n");
1211 	return (1);
1212 }
1213 #endif
1214