xref: /freebsd/sys/kern/kern_sx.c (revision 99429157e8615dc3b7f11afbe3ed92de7476a5db)
1 /*-
2  * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
3  * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice(s), this list of conditions and the following disclaimer as
11  *    the first lines of this file unmodified other than the possible
12  *    addition of one or more copyright notices.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice(s), this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
18  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
21  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27  * DAMAGE.
28  */
29 
30 /*
31  * Shared/exclusive locks.  This implementation attempts to ensure
32  * deterministic lock granting behavior, so that slocks and xlocks are
33  * interleaved.
34  *
35  * Priority propagation will not generally raise the priority of lock holders,
36  * so should not be relied upon in combination with sx locks.
37  */
38 
39 #include "opt_ddb.h"
40 #include "opt_hwpmc_hooks.h"
41 #include "opt_no_adaptive_sx.h"
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kdb.h>
49 #include <sys/kernel.h>
50 #include <sys/ktr.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/proc.h>
54 #include <sys/sched.h>
55 #include <sys/sleepqueue.h>
56 #include <sys/sx.h>
57 #include <sys/smp.h>
58 #include <sys/sysctl.h>
59 
60 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
61 #include <machine/cpu.h>
62 #endif
63 
64 #ifdef DDB
65 #include <ddb/ddb.h>
66 #endif
67 
68 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
69 #define	ADAPTIVE_SX
70 #endif
71 
72 CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
73 
74 #ifdef HWPMC_HOOKS
75 #include <sys/pmckern.h>
76 PMC_SOFT_DECLARE( , , lock, failed);
77 #endif
78 
79 /* Handy macros for sleep queues. */
80 #define	SQ_EXCLUSIVE_QUEUE	0
81 #define	SQ_SHARED_QUEUE		1
82 
83 /*
84  * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
85  * drop Giant anytime we have to sleep or if we adaptively spin.
86  */
87 #define	GIANT_DECLARE							\
88 	int _giantcnt = 0;						\
89 	WITNESS_SAVE_DECL(Giant)					\
90 
91 #define	GIANT_SAVE() do {						\
92 	if (mtx_owned(&Giant)) {					\
93 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
94 		while (mtx_owned(&Giant)) {				\
95 			_giantcnt++;					\
96 			mtx_unlock(&Giant);				\
97 		}							\
98 	}								\
99 } while (0)
100 
101 #define GIANT_RESTORE() do {						\
102 	if (_giantcnt > 0) {						\
103 		mtx_assert(&Giant, MA_NOTOWNED);			\
104 		while (_giantcnt--)					\
105 			mtx_lock(&Giant);				\
106 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
107 	}								\
108 } while (0)
109 
110 /*
111  * Returns true if an exclusive lock is recursed.  It assumes
112  * curthread currently has an exclusive lock.
113  */
114 #define	sx_recursed(sx)		((sx)->sx_recurse != 0)
115 
116 static void	assert_sx(const struct lock_object *lock, int what);
117 #ifdef DDB
118 static void	db_show_sx(const struct lock_object *lock);
119 #endif
120 static void	lock_sx(struct lock_object *lock, uintptr_t how);
121 #ifdef KDTRACE_HOOKS
122 static int	owner_sx(const struct lock_object *lock, struct thread **owner);
123 #endif
124 static uintptr_t unlock_sx(struct lock_object *lock);
125 
126 struct lock_class lock_class_sx = {
127 	.lc_name = "sx",
128 	.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
129 	.lc_assert = assert_sx,
130 #ifdef DDB
131 	.lc_ddb_show = db_show_sx,
132 #endif
133 	.lc_lock = lock_sx,
134 	.lc_unlock = unlock_sx,
135 #ifdef KDTRACE_HOOKS
136 	.lc_owner = owner_sx,
137 #endif
138 };
139 
140 #ifndef INVARIANTS
141 #define	_sx_assert(sx, what, file, line)
142 #endif
143 
144 #ifdef ADAPTIVE_SX
145 static u_int asx_retries = 10;
146 static u_int asx_loops = 10000;
147 static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
148 SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
149 SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
150 
151 static struct lock_delay_config __read_mostly sx_delay;
152 
153 SYSCTL_INT(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base,
154     0, "");
155 SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max,
156     0, "");
157 
158 LOCK_DELAY_SYSINIT_DEFAULT(sx_delay);
159 #endif
160 
161 void
162 assert_sx(const struct lock_object *lock, int what)
163 {
164 
165 	sx_assert((const struct sx *)lock, what);
166 }
167 
168 void
169 lock_sx(struct lock_object *lock, uintptr_t how)
170 {
171 	struct sx *sx;
172 
173 	sx = (struct sx *)lock;
174 	if (how)
175 		sx_slock(sx);
176 	else
177 		sx_xlock(sx);
178 }
179 
180 uintptr_t
181 unlock_sx(struct lock_object *lock)
182 {
183 	struct sx *sx;
184 
185 	sx = (struct sx *)lock;
186 	sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
187 	if (sx_xlocked(sx)) {
188 		sx_xunlock(sx);
189 		return (0);
190 	} else {
191 		sx_sunlock(sx);
192 		return (1);
193 	}
194 }
195 
196 #ifdef KDTRACE_HOOKS
197 int
198 owner_sx(const struct lock_object *lock, struct thread **owner)
199 {
200 	const struct sx *sx;
201 	uintptr_t x;
202 
203 	sx = (const struct sx *)lock;
204 	x = sx->sx_lock;
205 	*owner = NULL;
206 	return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
207 	    ((*owner = (struct thread *)SX_OWNER(x)) != NULL));
208 }
209 #endif
210 
211 void
212 sx_sysinit(void *arg)
213 {
214 	struct sx_args *sargs = arg;
215 
216 	sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
217 }
218 
219 void
220 sx_init_flags(struct sx *sx, const char *description, int opts)
221 {
222 	int flags;
223 
224 	MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
225 	    SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0);
226 	ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
227 	    ("%s: sx_lock not aligned for %s: %p", __func__, description,
228 	    &sx->sx_lock));
229 
230 	flags = LO_SLEEPABLE | LO_UPGRADABLE;
231 	if (opts & SX_DUPOK)
232 		flags |= LO_DUPOK;
233 	if (opts & SX_NOPROFILE)
234 		flags |= LO_NOPROFILE;
235 	if (!(opts & SX_NOWITNESS))
236 		flags |= LO_WITNESS;
237 	if (opts & SX_RECURSE)
238 		flags |= LO_RECURSABLE;
239 	if (opts & SX_QUIET)
240 		flags |= LO_QUIET;
241 	if (opts & SX_NEW)
242 		flags |= LO_NEW;
243 
244 	flags |= opts & SX_NOADAPTIVE;
245 	lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
246 	sx->sx_lock = SX_LOCK_UNLOCKED;
247 	sx->sx_recurse = 0;
248 }
249 
250 void
251 sx_destroy(struct sx *sx)
252 {
253 
254 	KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
255 	KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
256 	sx->sx_lock = SX_LOCK_DESTROYED;
257 	lock_destroy(&sx->lock_object);
258 }
259 
260 int
261 sx_try_slock_(struct sx *sx, const char *file, int line)
262 {
263 	uintptr_t x;
264 
265 	if (SCHEDULER_STOPPED())
266 		return (1);
267 
268 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
269 	    ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
270 	    curthread, sx->lock_object.lo_name, file, line));
271 
272 	x = sx->sx_lock;
273 	for (;;) {
274 		KASSERT(x != SX_LOCK_DESTROYED,
275 		    ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
276 		if (!(x & SX_LOCK_SHARED))
277 			break;
278 		if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) {
279 			LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
280 			WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
281 			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
282 			    sx, 0, 0, file, line, LOCKSTAT_READER);
283 			TD_LOCKS_INC(curthread);
284 			return (1);
285 		}
286 	}
287 
288 	LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
289 	return (0);
290 }
291 
292 int
293 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
294 {
295 	uintptr_t tid, x;
296 	int error = 0;
297 
298 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
299 	    ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
300 	    curthread, sx->lock_object.lo_name, file, line));
301 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
302 	    ("sx_xlock() of destroyed sx @ %s:%d", file, line));
303 	WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
304 	    line, NULL);
305 	tid = (uintptr_t)curthread;
306 	x = SX_LOCK_UNLOCKED;
307 	if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
308 		error = _sx_xlock_hard(sx, x, tid, opts, file, line);
309 	else
310 		LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
311 		    0, 0, file, line, LOCKSTAT_WRITER);
312 	if (!error) {
313 		LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
314 		    file, line);
315 		WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
316 		TD_LOCKS_INC(curthread);
317 	}
318 
319 	return (error);
320 }
321 
322 int
323 sx_try_xlock_(struct sx *sx, const char *file, int line)
324 {
325 	struct thread *td;
326 	uintptr_t tid, x;
327 	int rval;
328 	bool recursed;
329 
330 	td = curthread;
331 	tid = (uintptr_t)td;
332 	if (SCHEDULER_STOPPED_TD(td))
333 		return (1);
334 
335 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
336 	    ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
337 	    curthread, sx->lock_object.lo_name, file, line));
338 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
339 	    ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
340 
341 	rval = 1;
342 	recursed = false;
343 	x = SX_LOCK_UNLOCKED;
344 	for (;;) {
345 		if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
346 			break;
347 		if (x == SX_LOCK_UNLOCKED)
348 			continue;
349 		if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) {
350 			sx->sx_recurse++;
351 			atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
352 			break;
353 		}
354 		rval = 0;
355 		break;
356 	}
357 
358 	LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
359 	if (rval) {
360 		WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
361 		    file, line);
362 		if (!recursed)
363 			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
364 			    sx, 0, 0, file, line, LOCKSTAT_WRITER);
365 		TD_LOCKS_INC(curthread);
366 	}
367 
368 	return (rval);
369 }
370 
371 void
372 _sx_xunlock(struct sx *sx, const char *file, int line)
373 {
374 
375 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
376 	    ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
377 	_sx_assert(sx, SA_XLOCKED, file, line);
378 	WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
379 	LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
380 	    line);
381 #if LOCK_DEBUG > 0
382 	_sx_xunlock_hard(sx, (uintptr_t)curthread, file, line);
383 #else
384 	__sx_xunlock(sx, curthread, file, line);
385 #endif
386 	TD_LOCKS_DEC(curthread);
387 }
388 
389 /*
390  * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
391  * This will only succeed if this thread holds a single shared lock.
392  * Return 1 if if the upgrade succeed, 0 otherwise.
393  */
394 int
395 sx_try_upgrade_(struct sx *sx, const char *file, int line)
396 {
397 	uintptr_t x;
398 	int success;
399 
400 	if (SCHEDULER_STOPPED())
401 		return (1);
402 
403 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
404 	    ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
405 	_sx_assert(sx, SA_SLOCKED, file, line);
406 
407 	/*
408 	 * Try to switch from one shared lock to an exclusive lock.  We need
409 	 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
410 	 * we will wake up the exclusive waiters when we drop the lock.
411 	 */
412 	x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
413 	success = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
414 	    (uintptr_t)curthread | x);
415 	LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
416 	if (success) {
417 		WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
418 		    file, line);
419 		LOCKSTAT_RECORD0(sx__upgrade, sx);
420 	}
421 	return (success);
422 }
423 
424 /*
425  * Downgrade an unrecursed exclusive lock into a single shared lock.
426  */
427 void
428 sx_downgrade_(struct sx *sx, const char *file, int line)
429 {
430 	uintptr_t x;
431 	int wakeup_swapper;
432 
433 	if (SCHEDULER_STOPPED())
434 		return;
435 
436 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
437 	    ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
438 	_sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
439 #ifndef INVARIANTS
440 	if (sx_recursed(sx))
441 		panic("downgrade of a recursed lock");
442 #endif
443 
444 	WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
445 
446 	/*
447 	 * Try to switch from an exclusive lock with no shared waiters
448 	 * to one sharer with no shared waiters.  If there are
449 	 * exclusive waiters, we don't need to lock the sleep queue so
450 	 * long as we preserve the flag.  We do one quick try and if
451 	 * that fails we grab the sleepq lock to keep the flags from
452 	 * changing and do it the slow way.
453 	 *
454 	 * We have to lock the sleep queue if there are shared waiters
455 	 * so we can wake them up.
456 	 */
457 	x = sx->sx_lock;
458 	if (!(x & SX_LOCK_SHARED_WAITERS) &&
459 	    atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
460 	    (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
461 		LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
462 		return;
463 	}
464 
465 	/*
466 	 * Lock the sleep queue so we can read the waiters bits
467 	 * without any races and wakeup any shared waiters.
468 	 */
469 	sleepq_lock(&sx->lock_object);
470 
471 	/*
472 	 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
473 	 * shared lock.  If there are any shared waiters, wake them up.
474 	 */
475 	wakeup_swapper = 0;
476 	x = sx->sx_lock;
477 	atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
478 	    (x & SX_LOCK_EXCLUSIVE_WAITERS));
479 	if (x & SX_LOCK_SHARED_WAITERS)
480 		wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
481 		    0, SQ_SHARED_QUEUE);
482 	sleepq_release(&sx->lock_object);
483 
484 	LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
485 	LOCKSTAT_RECORD0(sx__downgrade, sx);
486 
487 	if (wakeup_swapper)
488 		kick_proc0();
489 }
490 
491 /*
492  * This function represents the so-called 'hard case' for sx_xlock
493  * operation.  All 'easy case' failures are redirected to this.  Note
494  * that ideally this would be a static function, but it needs to be
495  * accessible from at least sx.h.
496  */
497 int
498 _sx_xlock_hard(struct sx *sx, uintptr_t x, uintptr_t tid, int opts,
499     const char *file, int line)
500 {
501 	GIANT_DECLARE;
502 #ifdef ADAPTIVE_SX
503 	volatile struct thread *owner;
504 	u_int i, spintries = 0;
505 #endif
506 #ifdef LOCK_PROFILING
507 	uint64_t waittime = 0;
508 	int contested = 0;
509 #endif
510 	int error = 0;
511 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
512 	struct lock_delay_arg lda;
513 #endif
514 #ifdef	KDTRACE_HOOKS
515 	uintptr_t state;
516 	u_int sleep_cnt = 0;
517 	int64_t sleep_time = 0;
518 	int64_t all_time = 0;
519 #endif
520 
521 	if (SCHEDULER_STOPPED())
522 		return (0);
523 
524 #if defined(ADAPTIVE_SX)
525 	lock_delay_arg_init(&lda, &sx_delay);
526 #elif defined(KDTRACE_HOOKS)
527 	lock_delay_arg_init(&lda, NULL);
528 #endif
529 
530 	if (__predict_false(x == SX_LOCK_UNLOCKED))
531 		x = SX_READ_VALUE(sx);
532 
533 	/* If we already hold an exclusive lock, then recurse. */
534 	if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
535 		KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
536 	    ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
537 		    sx->lock_object.lo_name, file, line));
538 		sx->sx_recurse++;
539 		atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
540 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
541 			CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
542 		return (0);
543 	}
544 
545 	if (LOCK_LOG_TEST(&sx->lock_object, 0))
546 		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
547 		    sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
548 
549 #ifdef KDTRACE_HOOKS
550 	all_time -= lockstat_nsecs(&sx->lock_object);
551 	state = x;
552 #endif
553 	for (;;) {
554 		if (x == SX_LOCK_UNLOCKED) {
555 			if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
556 				break;
557 			continue;
558 		}
559 #ifdef KDTRACE_HOOKS
560 		lda.spin_cnt++;
561 #endif
562 #ifdef HWPMC_HOOKS
563 		PMC_SOFT_CALL( , , lock, failed);
564 #endif
565 		lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
566 		    &waittime);
567 #ifdef ADAPTIVE_SX
568 		/*
569 		 * If the lock is write locked and the owner is
570 		 * running on another CPU, spin until the owner stops
571 		 * running or the state of the lock changes.
572 		 */
573 		if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
574 			if ((x & SX_LOCK_SHARED) == 0) {
575 				owner = lv_sx_owner(x);
576 				if (TD_IS_RUNNING(owner)) {
577 					if (LOCK_LOG_TEST(&sx->lock_object, 0))
578 						CTR3(KTR_LOCK,
579 					    "%s: spinning on %p held by %p",
580 						    __func__, sx, owner);
581 					KTR_STATE1(KTR_SCHED, "thread",
582 					    sched_tdname(curthread), "spinning",
583 					    "lockname:\"%s\"",
584 					    sx->lock_object.lo_name);
585 					GIANT_SAVE();
586 					do {
587 						lock_delay(&lda);
588 						x = SX_READ_VALUE(sx);
589 						owner = lv_sx_owner(x);
590 					} while (owner != NULL &&
591 						    TD_IS_RUNNING(owner));
592 					KTR_STATE0(KTR_SCHED, "thread",
593 					    sched_tdname(curthread), "running");
594 					continue;
595 				}
596 			} else if (SX_SHARERS(x) && spintries < asx_retries) {
597 				KTR_STATE1(KTR_SCHED, "thread",
598 				    sched_tdname(curthread), "spinning",
599 				    "lockname:\"%s\"", sx->lock_object.lo_name);
600 				GIANT_SAVE();
601 				spintries++;
602 				for (i = 0; i < asx_loops; i++) {
603 					if (LOCK_LOG_TEST(&sx->lock_object, 0))
604 						CTR4(KTR_LOCK,
605 				    "%s: shared spinning on %p with %u and %u",
606 						    __func__, sx, spintries, i);
607 					x = sx->sx_lock;
608 					if ((x & SX_LOCK_SHARED) == 0 ||
609 					    SX_SHARERS(x) == 0)
610 						break;
611 					cpu_spinwait();
612 #ifdef KDTRACE_HOOKS
613 					lda.spin_cnt++;
614 #endif
615 				}
616 				KTR_STATE0(KTR_SCHED, "thread",
617 				    sched_tdname(curthread), "running");
618 				x = SX_READ_VALUE(sx);
619 				if (i != asx_loops)
620 					continue;
621 			}
622 		}
623 #endif
624 
625 		sleepq_lock(&sx->lock_object);
626 		x = SX_READ_VALUE(sx);
627 
628 		/*
629 		 * If the lock was released while spinning on the
630 		 * sleep queue chain lock, try again.
631 		 */
632 		if (x == SX_LOCK_UNLOCKED) {
633 			sleepq_release(&sx->lock_object);
634 			continue;
635 		}
636 
637 #ifdef ADAPTIVE_SX
638 		/*
639 		 * The current lock owner might have started executing
640 		 * on another CPU (or the lock could have changed
641 		 * owners) while we were waiting on the sleep queue
642 		 * chain lock.  If so, drop the sleep queue lock and try
643 		 * again.
644 		 */
645 		if (!(x & SX_LOCK_SHARED) &&
646 		    (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
647 			owner = (struct thread *)SX_OWNER(x);
648 			if (TD_IS_RUNNING(owner)) {
649 				sleepq_release(&sx->lock_object);
650 				continue;
651 			}
652 		}
653 #endif
654 
655 		/*
656 		 * If an exclusive lock was released with both shared
657 		 * and exclusive waiters and a shared waiter hasn't
658 		 * woken up and acquired the lock yet, sx_lock will be
659 		 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
660 		 * If we see that value, try to acquire it once.  Note
661 		 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
662 		 * as there are other exclusive waiters still.  If we
663 		 * fail, restart the loop.
664 		 */
665 		if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
666 			if (atomic_cmpset_acq_ptr(&sx->sx_lock,
667 			    SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
668 			    tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
669 				sleepq_release(&sx->lock_object);
670 				CTR2(KTR_LOCK, "%s: %p claimed by new writer",
671 				    __func__, sx);
672 				break;
673 			}
674 			sleepq_release(&sx->lock_object);
675 			x = SX_READ_VALUE(sx);
676 			continue;
677 		}
678 
679 		/*
680 		 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
681 		 * than loop back and retry.
682 		 */
683 		if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
684 			if (!atomic_cmpset_ptr(&sx->sx_lock, x,
685 			    x | SX_LOCK_EXCLUSIVE_WAITERS)) {
686 				sleepq_release(&sx->lock_object);
687 				x = SX_READ_VALUE(sx);
688 				continue;
689 			}
690 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
691 				CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
692 				    __func__, sx);
693 		}
694 
695 		/*
696 		 * Since we have been unable to acquire the exclusive
697 		 * lock and the exclusive waiters flag is set, we have
698 		 * to sleep.
699 		 */
700 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
701 			CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
702 			    __func__, sx);
703 
704 #ifdef KDTRACE_HOOKS
705 		sleep_time -= lockstat_nsecs(&sx->lock_object);
706 #endif
707 		GIANT_SAVE();
708 		sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
709 		    SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
710 		    SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
711 		if (!(opts & SX_INTERRUPTIBLE))
712 			sleepq_wait(&sx->lock_object, 0);
713 		else
714 			error = sleepq_wait_sig(&sx->lock_object, 0);
715 #ifdef KDTRACE_HOOKS
716 		sleep_time += lockstat_nsecs(&sx->lock_object);
717 		sleep_cnt++;
718 #endif
719 		if (error) {
720 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
721 				CTR2(KTR_LOCK,
722 			"%s: interruptible sleep by %p suspended by signal",
723 				    __func__, sx);
724 			break;
725 		}
726 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
727 			CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
728 			    __func__, sx);
729 		x = SX_READ_VALUE(sx);
730 	}
731 #ifdef KDTRACE_HOOKS
732 	all_time += lockstat_nsecs(&sx->lock_object);
733 	if (sleep_time)
734 		LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
735 		    LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
736 		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
737 	if (lda.spin_cnt > sleep_cnt)
738 		LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
739 		    LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
740 		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
741 #endif
742 	if (!error)
743 		LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
744 		    contested, waittime, file, line, LOCKSTAT_WRITER);
745 	GIANT_RESTORE();
746 	return (error);
747 }
748 
749 /*
750  * This function represents the so-called 'hard case' for sx_xunlock
751  * operation.  All 'easy case' failures are redirected to this.  Note
752  * that ideally this would be a static function, but it needs to be
753  * accessible from at least sx.h.
754  */
755 void
756 _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
757 {
758 	uintptr_t x;
759 	int queue, wakeup_swapper;
760 
761 	if (SCHEDULER_STOPPED())
762 		return;
763 
764 	MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
765 
766 	x = SX_READ_VALUE(sx);
767 	if (x & SX_LOCK_RECURSED) {
768 		/* The lock is recursed, unrecurse one level. */
769 		if ((--sx->sx_recurse) == 0)
770 			atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
771 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
772 			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
773 		return;
774 	}
775 
776 	LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER);
777 	if (x == tid &&
778 	    atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
779 		return;
780 
781 	MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
782 	    SX_LOCK_EXCLUSIVE_WAITERS));
783 	if (LOCK_LOG_TEST(&sx->lock_object, 0))
784 		CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
785 
786 	sleepq_lock(&sx->lock_object);
787 	x = SX_LOCK_UNLOCKED;
788 
789 	/*
790 	 * The wake up algorithm here is quite simple and probably not
791 	 * ideal.  It gives precedence to shared waiters if they are
792 	 * present.  For this condition, we have to preserve the
793 	 * state of the exclusive waiters flag.
794 	 * If interruptible sleeps left the shared queue empty avoid a
795 	 * starvation for the threads sleeping on the exclusive queue by giving
796 	 * them precedence and cleaning up the shared waiters bit anyway.
797 	 */
798 	if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 &&
799 	    sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) {
800 		queue = SQ_SHARED_QUEUE;
801 		x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
802 	} else
803 		queue = SQ_EXCLUSIVE_QUEUE;
804 
805 	/* Wake up all the waiters for the specific queue. */
806 	if (LOCK_LOG_TEST(&sx->lock_object, 0))
807 		CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
808 		    __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
809 		    "exclusive");
810 	atomic_store_rel_ptr(&sx->sx_lock, x);
811 	wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
812 	    queue);
813 	sleepq_release(&sx->lock_object);
814 	if (wakeup_swapper)
815 		kick_proc0();
816 }
817 
818 static bool __always_inline
819 __sx_slock_try(struct sx *sx, uintptr_t *xp, const char *file, int line)
820 {
821 
822 	/*
823 	 * If no other thread has an exclusive lock then try to bump up
824 	 * the count of sharers.  Since we have to preserve the state
825 	 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
826 	 * shared lock loop back and retry.
827 	 */
828 	while (*xp & SX_LOCK_SHARED) {
829 		MPASS(!(*xp & SX_LOCK_SHARED_WAITERS));
830 		if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp,
831 		    *xp + SX_ONE_SHARER)) {
832 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
833 				CTR4(KTR_LOCK, "%s: %p succeed %p -> %p",
834 				    __func__, sx, (void *)*xp,
835 				    (void *)(*xp + SX_ONE_SHARER));
836 			return (true);
837 		}
838 	}
839 	return (false);
840 }
841 
842 static int __noinline
843 _sx_slock_hard(struct sx *sx, int opts, const char *file, int line, uintptr_t x)
844 {
845 	GIANT_DECLARE;
846 #ifdef ADAPTIVE_SX
847 	volatile struct thread *owner;
848 #endif
849 #ifdef LOCK_PROFILING
850 	uint64_t waittime = 0;
851 	int contested = 0;
852 #endif
853 	int error = 0;
854 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
855 	struct lock_delay_arg lda;
856 #endif
857 #ifdef KDTRACE_HOOKS
858 	uintptr_t state;
859 	u_int sleep_cnt = 0;
860 	int64_t sleep_time = 0;
861 	int64_t all_time = 0;
862 #endif
863 
864 	if (SCHEDULER_STOPPED())
865 		return (0);
866 
867 #if defined(ADAPTIVE_SX)
868 	lock_delay_arg_init(&lda, &sx_delay);
869 #elif defined(KDTRACE_HOOKS)
870 	lock_delay_arg_init(&lda, NULL);
871 #endif
872 #ifdef KDTRACE_HOOKS
873 	all_time -= lockstat_nsecs(&sx->lock_object);
874 	state = x;
875 #endif
876 
877 	/*
878 	 * As with rwlocks, we don't make any attempt to try to block
879 	 * shared locks once there is an exclusive waiter.
880 	 */
881 	for (;;) {
882 		if (__sx_slock_try(sx, &x, file, line))
883 			break;
884 #ifdef KDTRACE_HOOKS
885 		lda.spin_cnt++;
886 #endif
887 
888 #ifdef HWPMC_HOOKS
889 		PMC_SOFT_CALL( , , lock, failed);
890 #endif
891 		lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
892 		    &waittime);
893 
894 #ifdef ADAPTIVE_SX
895 		/*
896 		 * If the owner is running on another CPU, spin until
897 		 * the owner stops running or the state of the lock
898 		 * changes.
899 		 */
900 		if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
901 			owner = lv_sx_owner(x);
902 			if (TD_IS_RUNNING(owner)) {
903 				if (LOCK_LOG_TEST(&sx->lock_object, 0))
904 					CTR3(KTR_LOCK,
905 					    "%s: spinning on %p held by %p",
906 					    __func__, sx, owner);
907 				KTR_STATE1(KTR_SCHED, "thread",
908 				    sched_tdname(curthread), "spinning",
909 				    "lockname:\"%s\"", sx->lock_object.lo_name);
910 				GIANT_SAVE();
911 				do {
912 					lock_delay(&lda);
913 					x = SX_READ_VALUE(sx);
914 					owner = lv_sx_owner(x);
915 				} while (owner != NULL && TD_IS_RUNNING(owner));
916 				KTR_STATE0(KTR_SCHED, "thread",
917 				    sched_tdname(curthread), "running");
918 				continue;
919 			}
920 		}
921 #endif
922 
923 		/*
924 		 * Some other thread already has an exclusive lock, so
925 		 * start the process of blocking.
926 		 */
927 		sleepq_lock(&sx->lock_object);
928 		x = SX_READ_VALUE(sx);
929 
930 		/*
931 		 * The lock could have been released while we spun.
932 		 * In this case loop back and retry.
933 		 */
934 		if (x & SX_LOCK_SHARED) {
935 			sleepq_release(&sx->lock_object);
936 			continue;
937 		}
938 
939 #ifdef ADAPTIVE_SX
940 		/*
941 		 * If the owner is running on another CPU, spin until
942 		 * the owner stops running or the state of the lock
943 		 * changes.
944 		 */
945 		if (!(x & SX_LOCK_SHARED) &&
946 		    (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
947 			owner = (struct thread *)SX_OWNER(x);
948 			if (TD_IS_RUNNING(owner)) {
949 				sleepq_release(&sx->lock_object);
950 				x = SX_READ_VALUE(sx);
951 				continue;
952 			}
953 		}
954 #endif
955 
956 		/*
957 		 * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
958 		 * fail to set it drop the sleep queue lock and loop
959 		 * back.
960 		 */
961 		if (!(x & SX_LOCK_SHARED_WAITERS)) {
962 			if (!atomic_cmpset_ptr(&sx->sx_lock, x,
963 			    x | SX_LOCK_SHARED_WAITERS)) {
964 				sleepq_release(&sx->lock_object);
965 				x = SX_READ_VALUE(sx);
966 				continue;
967 			}
968 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
969 				CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
970 				    __func__, sx);
971 		}
972 
973 		/*
974 		 * Since we have been unable to acquire the shared lock,
975 		 * we have to sleep.
976 		 */
977 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
978 			CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
979 			    __func__, sx);
980 
981 #ifdef KDTRACE_HOOKS
982 		sleep_time -= lockstat_nsecs(&sx->lock_object);
983 #endif
984 		GIANT_SAVE();
985 		sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
986 		    SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
987 		    SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
988 		if (!(opts & SX_INTERRUPTIBLE))
989 			sleepq_wait(&sx->lock_object, 0);
990 		else
991 			error = sleepq_wait_sig(&sx->lock_object, 0);
992 #ifdef KDTRACE_HOOKS
993 		sleep_time += lockstat_nsecs(&sx->lock_object);
994 		sleep_cnt++;
995 #endif
996 		if (error) {
997 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
998 				CTR2(KTR_LOCK,
999 			"%s: interruptible sleep by %p suspended by signal",
1000 				    __func__, sx);
1001 			break;
1002 		}
1003 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
1004 			CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
1005 			    __func__, sx);
1006 		x = SX_READ_VALUE(sx);
1007 	}
1008 #ifdef KDTRACE_HOOKS
1009 	all_time += lockstat_nsecs(&sx->lock_object);
1010 	if (sleep_time)
1011 		LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
1012 		    LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1013 		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1014 	if (lda.spin_cnt > sleep_cnt)
1015 		LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
1016 		    LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1017 		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1018 #endif
1019 	if (error == 0) {
1020 		LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
1021 		    contested, waittime, file, line, LOCKSTAT_READER);
1022 	}
1023 	GIANT_RESTORE();
1024 	return (error);
1025 }
1026 
1027 int
1028 _sx_slock(struct sx *sx, int opts, const char *file, int line)
1029 {
1030 	uintptr_t x;
1031 	int error;
1032 
1033 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1034 	    ("sx_slock() by idle thread %p on sx %s @ %s:%d",
1035 	    curthread, sx->lock_object.lo_name, file, line));
1036 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1037 	    ("sx_slock() of destroyed sx @ %s:%d", file, line));
1038 	WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
1039 
1040 	error = 0;
1041 	x = SX_READ_VALUE(sx);
1042 	if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__acquire) ||
1043 	    !__sx_slock_try(sx, &x, file, line)))
1044 		error = _sx_slock_hard(sx, opts, file, line, x);
1045 	if (error == 0) {
1046 		LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
1047 		WITNESS_LOCK(&sx->lock_object, 0, file, line);
1048 		TD_LOCKS_INC(curthread);
1049 	}
1050 	return (error);
1051 }
1052 
1053 static bool __always_inline
1054 _sx_sunlock_try(struct sx *sx, uintptr_t *xp)
1055 {
1056 
1057 	for (;;) {
1058 		/*
1059 		 * We should never have sharers while at least one thread
1060 		 * holds a shared lock.
1061 		 */
1062 		KASSERT(!(*xp & SX_LOCK_SHARED_WAITERS),
1063 		    ("%s: waiting sharers", __func__));
1064 
1065 		/*
1066 		 * See if there is more than one shared lock held.  If
1067 		 * so, just drop one and return.
1068 		 */
1069 		if (SX_SHARERS(*xp) > 1) {
1070 			if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp,
1071 			    *xp - SX_ONE_SHARER)) {
1072 				if (LOCK_LOG_TEST(&sx->lock_object, 0))
1073 					CTR4(KTR_LOCK,
1074 					    "%s: %p succeeded %p -> %p",
1075 					    __func__, sx, (void *)*xp,
1076 					    (void *)(*xp - SX_ONE_SHARER));
1077 				return (true);
1078 			}
1079 			continue;
1080 		}
1081 
1082 		/*
1083 		 * If there aren't any waiters for an exclusive lock,
1084 		 * then try to drop it quickly.
1085 		 */
1086 		if (!(*xp & SX_LOCK_EXCLUSIVE_WAITERS)) {
1087 			MPASS(*xp == SX_SHARERS_LOCK(1));
1088 			*xp = SX_SHARERS_LOCK(1);
1089 			if (atomic_fcmpset_rel_ptr(&sx->sx_lock,
1090 			    xp, SX_LOCK_UNLOCKED)) {
1091 				if (LOCK_LOG_TEST(&sx->lock_object, 0))
1092 					CTR2(KTR_LOCK, "%s: %p last succeeded",
1093 					    __func__, sx);
1094 				return (true);
1095 			}
1096 			continue;
1097 		}
1098 		break;
1099 	}
1100 	return (false);
1101 }
1102 
1103 static void __noinline
1104 _sx_sunlock_hard(struct sx *sx, uintptr_t x, const char *file, int line)
1105 {
1106 	int wakeup_swapper;
1107 
1108 	if (SCHEDULER_STOPPED())
1109 		return;
1110 
1111 	LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER);
1112 
1113 	for (;;) {
1114 		if (_sx_sunlock_try(sx, &x))
1115 			break;
1116 
1117 		/*
1118 		 * At this point, there should just be one sharer with
1119 		 * exclusive waiters.
1120 		 */
1121 		MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
1122 
1123 		sleepq_lock(&sx->lock_object);
1124 
1125 		/*
1126 		 * Wake up semantic here is quite simple:
1127 		 * Just wake up all the exclusive waiters.
1128 		 * Note that the state of the lock could have changed,
1129 		 * so if it fails loop back and retry.
1130 		 */
1131 		if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
1132 		    SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
1133 		    SX_LOCK_UNLOCKED)) {
1134 			sleepq_release(&sx->lock_object);
1135 			x = SX_READ_VALUE(sx);
1136 			continue;
1137 		}
1138 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
1139 			CTR2(KTR_LOCK, "%s: %p waking up all thread on"
1140 			    "exclusive queue", __func__, sx);
1141 		wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
1142 		    0, SQ_EXCLUSIVE_QUEUE);
1143 		sleepq_release(&sx->lock_object);
1144 		if (wakeup_swapper)
1145 			kick_proc0();
1146 		break;
1147 	}
1148 }
1149 
1150 void
1151 _sx_sunlock(struct sx *sx, const char *file, int line)
1152 {
1153 	uintptr_t x;
1154 
1155 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1156 	    ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
1157 	_sx_assert(sx, SA_SLOCKED, file, line);
1158 	WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
1159 	LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
1160 
1161 	x = SX_READ_VALUE(sx);
1162 	if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__release) ||
1163 	    !_sx_sunlock_try(sx, &x)))
1164 		_sx_sunlock_hard(sx, x, file, line);
1165 
1166 	TD_LOCKS_DEC(curthread);
1167 }
1168 
1169 #ifdef INVARIANT_SUPPORT
1170 #ifndef INVARIANTS
1171 #undef	_sx_assert
1172 #endif
1173 
1174 /*
1175  * In the non-WITNESS case, sx_assert() can only detect that at least
1176  * *some* thread owns an slock, but it cannot guarantee that *this*
1177  * thread owns an slock.
1178  */
1179 void
1180 _sx_assert(const struct sx *sx, int what, const char *file, int line)
1181 {
1182 #ifndef WITNESS
1183 	int slocked = 0;
1184 #endif
1185 
1186 	if (panicstr != NULL)
1187 		return;
1188 	switch (what) {
1189 	case SA_SLOCKED:
1190 	case SA_SLOCKED | SA_NOTRECURSED:
1191 	case SA_SLOCKED | SA_RECURSED:
1192 #ifndef WITNESS
1193 		slocked = 1;
1194 		/* FALLTHROUGH */
1195 #endif
1196 	case SA_LOCKED:
1197 	case SA_LOCKED | SA_NOTRECURSED:
1198 	case SA_LOCKED | SA_RECURSED:
1199 #ifdef WITNESS
1200 		witness_assert(&sx->lock_object, what, file, line);
1201 #else
1202 		/*
1203 		 * If some other thread has an exclusive lock or we
1204 		 * have one and are asserting a shared lock, fail.
1205 		 * Also, if no one has a lock at all, fail.
1206 		 */
1207 		if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1208 		    (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1209 		    sx_xholder(sx) != curthread)))
1210 			panic("Lock %s not %slocked @ %s:%d\n",
1211 			    sx->lock_object.lo_name, slocked ? "share " : "",
1212 			    file, line);
1213 
1214 		if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1215 			if (sx_recursed(sx)) {
1216 				if (what & SA_NOTRECURSED)
1217 					panic("Lock %s recursed @ %s:%d\n",
1218 					    sx->lock_object.lo_name, file,
1219 					    line);
1220 			} else if (what & SA_RECURSED)
1221 				panic("Lock %s not recursed @ %s:%d\n",
1222 				    sx->lock_object.lo_name, file, line);
1223 		}
1224 #endif
1225 		break;
1226 	case SA_XLOCKED:
1227 	case SA_XLOCKED | SA_NOTRECURSED:
1228 	case SA_XLOCKED | SA_RECURSED:
1229 		if (sx_xholder(sx) != curthread)
1230 			panic("Lock %s not exclusively locked @ %s:%d\n",
1231 			    sx->lock_object.lo_name, file, line);
1232 		if (sx_recursed(sx)) {
1233 			if (what & SA_NOTRECURSED)
1234 				panic("Lock %s recursed @ %s:%d\n",
1235 				    sx->lock_object.lo_name, file, line);
1236 		} else if (what & SA_RECURSED)
1237 			panic("Lock %s not recursed @ %s:%d\n",
1238 			    sx->lock_object.lo_name, file, line);
1239 		break;
1240 	case SA_UNLOCKED:
1241 #ifdef WITNESS
1242 		witness_assert(&sx->lock_object, what, file, line);
1243 #else
1244 		/*
1245 		 * If we hold an exclusve lock fail.  We can't
1246 		 * reliably check to see if we hold a shared lock or
1247 		 * not.
1248 		 */
1249 		if (sx_xholder(sx) == curthread)
1250 			panic("Lock %s exclusively locked @ %s:%d\n",
1251 			    sx->lock_object.lo_name, file, line);
1252 #endif
1253 		break;
1254 	default:
1255 		panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
1256 		    line);
1257 	}
1258 }
1259 #endif	/* INVARIANT_SUPPORT */
1260 
1261 #ifdef DDB
1262 static void
1263 db_show_sx(const struct lock_object *lock)
1264 {
1265 	struct thread *td;
1266 	const struct sx *sx;
1267 
1268 	sx = (const struct sx *)lock;
1269 
1270 	db_printf(" state: ");
1271 	if (sx->sx_lock == SX_LOCK_UNLOCKED)
1272 		db_printf("UNLOCKED\n");
1273 	else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1274 		db_printf("DESTROYED\n");
1275 		return;
1276 	} else if (sx->sx_lock & SX_LOCK_SHARED)
1277 		db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1278 	else {
1279 		td = sx_xholder(sx);
1280 		db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1281 		    td->td_tid, td->td_proc->p_pid, td->td_name);
1282 		if (sx_recursed(sx))
1283 			db_printf(" recursed: %d\n", sx->sx_recurse);
1284 	}
1285 
1286 	db_printf(" waiters: ");
1287 	switch(sx->sx_lock &
1288 	    (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1289 	case SX_LOCK_SHARED_WAITERS:
1290 		db_printf("shared\n");
1291 		break;
1292 	case SX_LOCK_EXCLUSIVE_WAITERS:
1293 		db_printf("exclusive\n");
1294 		break;
1295 	case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1296 		db_printf("exclusive and shared\n");
1297 		break;
1298 	default:
1299 		db_printf("none\n");
1300 	}
1301 }
1302 
1303 /*
1304  * Check to see if a thread that is blocked on a sleep queue is actually
1305  * blocked on an sx lock.  If so, output some details and return true.
1306  * If the lock has an exclusive owner, return that in *ownerp.
1307  */
1308 int
1309 sx_chain(struct thread *td, struct thread **ownerp)
1310 {
1311 	struct sx *sx;
1312 
1313 	/*
1314 	 * Check to see if this thread is blocked on an sx lock.
1315 	 * First, we check the lock class.  If that is ok, then we
1316 	 * compare the lock name against the wait message.
1317 	 */
1318 	sx = td->td_wchan;
1319 	if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1320 	    sx->lock_object.lo_name != td->td_wmesg)
1321 		return (0);
1322 
1323 	/* We think we have an sx lock, so output some details. */
1324 	db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1325 	*ownerp = sx_xholder(sx);
1326 	if (sx->sx_lock & SX_LOCK_SHARED)
1327 		db_printf("SLOCK (count %ju)\n",
1328 		    (uintmax_t)SX_SHARERS(sx->sx_lock));
1329 	else
1330 		db_printf("XLOCK\n");
1331 	return (1);
1332 }
1333 #endif
1334