xref: /freebsd/sys/kern/kern_sx.c (revision d0ba1baed3f6e4936a0c1b89c25f6c59168ef6de)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
5  * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice(s), this list of conditions and the following disclaimer as
13  *    the first lines of this file unmodified other than the possible
14  *    addition of one or more copyright notices.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice(s), this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
20  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
23  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29  * DAMAGE.
30  */
31 
32 /*
33  * Shared/exclusive locks.  This implementation attempts to ensure
34  * deterministic lock granting behavior, so that slocks and xlocks are
35  * interleaved.
36  *
37  * Priority propagation will not generally raise the priority of lock holders,
38  * so should not be relied upon in combination with sx locks.
39  */
40 
41 #include "opt_ddb.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_no_adaptive_sx.h"
44 
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kdb.h>
51 #include <sys/kernel.h>
52 #include <sys/ktr.h>
53 #include <sys/lock.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/sched.h>
57 #include <sys/sleepqueue.h>
58 #include <sys/sx.h>
59 #include <sys/smp.h>
60 #include <sys/sysctl.h>
61 
62 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
63 #include <machine/cpu.h>
64 #endif
65 
66 #ifdef DDB
67 #include <ddb/ddb.h>
68 #endif
69 
70 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
71 #define	ADAPTIVE_SX
72 #endif
73 
74 CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
75 
76 #ifdef HWPMC_HOOKS
77 #include <sys/pmckern.h>
78 PMC_SOFT_DECLARE( , , lock, failed);
79 #endif
80 
81 /* Handy macros for sleep queues. */
82 #define	SQ_EXCLUSIVE_QUEUE	0
83 #define	SQ_SHARED_QUEUE		1
84 
85 /*
86  * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
87  * drop Giant anytime we have to sleep or if we adaptively spin.
88  */
89 #define	GIANT_DECLARE							\
90 	int _giantcnt = 0;						\
91 	WITNESS_SAVE_DECL(Giant)					\
92 
93 #define	GIANT_SAVE(work) do {						\
94 	if (__predict_false(mtx_owned(&Giant))) {			\
95 		work++;							\
96 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
97 		while (mtx_owned(&Giant)) {				\
98 			_giantcnt++;					\
99 			mtx_unlock(&Giant);				\
100 		}							\
101 	}								\
102 } while (0)
103 
104 #define GIANT_RESTORE() do {						\
105 	if (_giantcnt > 0) {						\
106 		mtx_assert(&Giant, MA_NOTOWNED);			\
107 		while (_giantcnt--)					\
108 			mtx_lock(&Giant);				\
109 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
110 	}								\
111 } while (0)
112 
113 /*
114  * Returns true if an exclusive lock is recursed.  It assumes
115  * curthread currently has an exclusive lock.
116  */
117 #define	sx_recursed(sx)		((sx)->sx_recurse != 0)
118 
119 static void	assert_sx(const struct lock_object *lock, int what);
120 #ifdef DDB
121 static void	db_show_sx(const struct lock_object *lock);
122 #endif
123 static void	lock_sx(struct lock_object *lock, uintptr_t how);
124 #ifdef KDTRACE_HOOKS
125 static int	owner_sx(const struct lock_object *lock, struct thread **owner);
126 #endif
127 static uintptr_t unlock_sx(struct lock_object *lock);
128 
129 struct lock_class lock_class_sx = {
130 	.lc_name = "sx",
131 	.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
132 	.lc_assert = assert_sx,
133 #ifdef DDB
134 	.lc_ddb_show = db_show_sx,
135 #endif
136 	.lc_lock = lock_sx,
137 	.lc_unlock = unlock_sx,
138 #ifdef KDTRACE_HOOKS
139 	.lc_owner = owner_sx,
140 #endif
141 };
142 
143 #ifndef INVARIANTS
144 #define	_sx_assert(sx, what, file, line)
145 #endif
146 
147 #ifdef ADAPTIVE_SX
148 static __read_frequently u_int asx_retries;
149 static __read_frequently u_int asx_loops;
150 static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
151 SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
152 SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
153 
154 static struct lock_delay_config __read_frequently sx_delay;
155 
156 SYSCTL_INT(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base,
157     0, "");
158 SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max,
159     0, "");
160 
161 static void
162 sx_lock_delay_init(void *arg __unused)
163 {
164 
165 	lock_delay_default_init(&sx_delay);
166 	asx_retries = 10;
167 	asx_loops = max(10000, sx_delay.max);
168 }
169 LOCK_DELAY_SYSINIT(sx_lock_delay_init);
170 #endif
171 
172 void
173 assert_sx(const struct lock_object *lock, int what)
174 {
175 
176 	sx_assert((const struct sx *)lock, what);
177 }
178 
179 void
180 lock_sx(struct lock_object *lock, uintptr_t how)
181 {
182 	struct sx *sx;
183 
184 	sx = (struct sx *)lock;
185 	if (how)
186 		sx_slock(sx);
187 	else
188 		sx_xlock(sx);
189 }
190 
191 uintptr_t
192 unlock_sx(struct lock_object *lock)
193 {
194 	struct sx *sx;
195 
196 	sx = (struct sx *)lock;
197 	sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
198 	if (sx_xlocked(sx)) {
199 		sx_xunlock(sx);
200 		return (0);
201 	} else {
202 		sx_sunlock(sx);
203 		return (1);
204 	}
205 }
206 
207 #ifdef KDTRACE_HOOKS
208 int
209 owner_sx(const struct lock_object *lock, struct thread **owner)
210 {
211 	const struct sx *sx;
212 	uintptr_t x;
213 
214 	sx = (const struct sx *)lock;
215 	x = sx->sx_lock;
216 	*owner = NULL;
217 	return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
218 	    ((*owner = (struct thread *)SX_OWNER(x)) != NULL));
219 }
220 #endif
221 
222 void
223 sx_sysinit(void *arg)
224 {
225 	struct sx_args *sargs = arg;
226 
227 	sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
228 }
229 
230 void
231 sx_init_flags(struct sx *sx, const char *description, int opts)
232 {
233 	int flags;
234 
235 	MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
236 	    SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0);
237 	ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
238 	    ("%s: sx_lock not aligned for %s: %p", __func__, description,
239 	    &sx->sx_lock));
240 
241 	flags = LO_SLEEPABLE | LO_UPGRADABLE;
242 	if (opts & SX_DUPOK)
243 		flags |= LO_DUPOK;
244 	if (opts & SX_NOPROFILE)
245 		flags |= LO_NOPROFILE;
246 	if (!(opts & SX_NOWITNESS))
247 		flags |= LO_WITNESS;
248 	if (opts & SX_RECURSE)
249 		flags |= LO_RECURSABLE;
250 	if (opts & SX_QUIET)
251 		flags |= LO_QUIET;
252 	if (opts & SX_NEW)
253 		flags |= LO_NEW;
254 
255 	flags |= opts & SX_NOADAPTIVE;
256 	lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
257 	sx->sx_lock = SX_LOCK_UNLOCKED;
258 	sx->sx_recurse = 0;
259 }
260 
261 void
262 sx_destroy(struct sx *sx)
263 {
264 
265 	KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
266 	KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
267 	sx->sx_lock = SX_LOCK_DESTROYED;
268 	lock_destroy(&sx->lock_object);
269 }
270 
271 int
272 sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
273 {
274 	uintptr_t x;
275 
276 	if (SCHEDULER_STOPPED())
277 		return (1);
278 
279 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
280 	    ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
281 	    curthread, sx->lock_object.lo_name, file, line));
282 
283 	x = sx->sx_lock;
284 	for (;;) {
285 		KASSERT(x != SX_LOCK_DESTROYED,
286 		    ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
287 		if (!(x & SX_LOCK_SHARED))
288 			break;
289 		if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) {
290 			LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
291 			WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
292 			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
293 			    sx, 0, 0, file, line, LOCKSTAT_READER);
294 			TD_LOCKS_INC(curthread);
295 			return (1);
296 		}
297 	}
298 
299 	LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
300 	return (0);
301 }
302 
303 int
304 sx_try_slock_(struct sx *sx, const char *file, int line)
305 {
306 
307 	return (sx_try_slock_int(sx LOCK_FILE_LINE_ARG));
308 }
309 
310 int
311 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
312 {
313 	uintptr_t tid, x;
314 	int error = 0;
315 
316 	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
317 	    !TD_IS_IDLETHREAD(curthread),
318 	    ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
319 	    curthread, sx->lock_object.lo_name, file, line));
320 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
321 	    ("sx_xlock() of destroyed sx @ %s:%d", file, line));
322 	WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
323 	    line, NULL);
324 	tid = (uintptr_t)curthread;
325 	x = SX_LOCK_UNLOCKED;
326 	if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
327 		error = _sx_xlock_hard(sx, x, opts LOCK_FILE_LINE_ARG);
328 	else
329 		LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
330 		    0, 0, file, line, LOCKSTAT_WRITER);
331 	if (!error) {
332 		LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
333 		    file, line);
334 		WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
335 		TD_LOCKS_INC(curthread);
336 	}
337 
338 	return (error);
339 }
340 
341 int
342 sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
343 {
344 	struct thread *td;
345 	uintptr_t tid, x;
346 	int rval;
347 	bool recursed;
348 
349 	td = curthread;
350 	tid = (uintptr_t)td;
351 	if (SCHEDULER_STOPPED_TD(td))
352 		return (1);
353 
354 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
355 	    ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
356 	    curthread, sx->lock_object.lo_name, file, line));
357 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
358 	    ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
359 
360 	rval = 1;
361 	recursed = false;
362 	x = SX_LOCK_UNLOCKED;
363 	for (;;) {
364 		if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
365 			break;
366 		if (x == SX_LOCK_UNLOCKED)
367 			continue;
368 		if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) {
369 			sx->sx_recurse++;
370 			atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
371 			break;
372 		}
373 		rval = 0;
374 		break;
375 	}
376 
377 	LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
378 	if (rval) {
379 		WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
380 		    file, line);
381 		if (!recursed)
382 			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
383 			    sx, 0, 0, file, line, LOCKSTAT_WRITER);
384 		TD_LOCKS_INC(curthread);
385 	}
386 
387 	return (rval);
388 }
389 
390 int
391 sx_try_xlock_(struct sx *sx, const char *file, int line)
392 {
393 
394 	return (sx_try_xlock_int(sx LOCK_FILE_LINE_ARG));
395 }
396 
397 void
398 _sx_xunlock(struct sx *sx, const char *file, int line)
399 {
400 
401 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
402 	    ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
403 	_sx_assert(sx, SA_XLOCKED, file, line);
404 	WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
405 	LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
406 	    line);
407 #if LOCK_DEBUG > 0
408 	_sx_xunlock_hard(sx, (uintptr_t)curthread, file, line);
409 #else
410 	__sx_xunlock(sx, curthread, file, line);
411 #endif
412 	TD_LOCKS_DEC(curthread);
413 }
414 
415 /*
416  * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
417  * This will only succeed if this thread holds a single shared lock.
418  * Return 1 if if the upgrade succeed, 0 otherwise.
419  */
420 int
421 sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
422 {
423 	uintptr_t x;
424 	uintptr_t waiters;
425 	int success;
426 
427 	if (SCHEDULER_STOPPED())
428 		return (1);
429 
430 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
431 	    ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
432 	_sx_assert(sx, SA_SLOCKED, file, line);
433 
434 	/*
435 	 * Try to switch from one shared lock to an exclusive lock.  We need
436 	 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
437 	 * we will wake up the exclusive waiters when we drop the lock.
438 	 */
439 	success = 0;
440 	x = SX_READ_VALUE(sx);
441 	for (;;) {
442 		if (SX_SHARERS(x) > 1)
443 			break;
444 		waiters = (x & SX_LOCK_EXCLUSIVE_WAITERS);
445 		if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x,
446 		    (uintptr_t)curthread | waiters)) {
447 			success = 1;
448 			break;
449 		}
450 	}
451 	LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
452 	if (success) {
453 		WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
454 		    file, line);
455 		LOCKSTAT_RECORD0(sx__upgrade, sx);
456 	}
457 	return (success);
458 }
459 
460 int
461 sx_try_upgrade_(struct sx *sx, const char *file, int line)
462 {
463 
464 	return (sx_try_upgrade_int(sx LOCK_FILE_LINE_ARG));
465 }
466 
467 /*
468  * Downgrade an unrecursed exclusive lock into a single shared lock.
469  */
470 void
471 sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
472 {
473 	uintptr_t x;
474 	int wakeup_swapper;
475 
476 	if (SCHEDULER_STOPPED())
477 		return;
478 
479 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
480 	    ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
481 	_sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
482 #ifndef INVARIANTS
483 	if (sx_recursed(sx))
484 		panic("downgrade of a recursed lock");
485 #endif
486 
487 	WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
488 
489 	/*
490 	 * Try to switch from an exclusive lock with no shared waiters
491 	 * to one sharer with no shared waiters.  If there are
492 	 * exclusive waiters, we don't need to lock the sleep queue so
493 	 * long as we preserve the flag.  We do one quick try and if
494 	 * that fails we grab the sleepq lock to keep the flags from
495 	 * changing and do it the slow way.
496 	 *
497 	 * We have to lock the sleep queue if there are shared waiters
498 	 * so we can wake them up.
499 	 */
500 	x = sx->sx_lock;
501 	if (!(x & SX_LOCK_SHARED_WAITERS) &&
502 	    atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
503 	    (x & SX_LOCK_EXCLUSIVE_WAITERS)))
504 		goto out;
505 
506 	/*
507 	 * Lock the sleep queue so we can read the waiters bits
508 	 * without any races and wakeup any shared waiters.
509 	 */
510 	sleepq_lock(&sx->lock_object);
511 
512 	/*
513 	 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
514 	 * shared lock.  If there are any shared waiters, wake them up.
515 	 */
516 	wakeup_swapper = 0;
517 	x = sx->sx_lock;
518 	atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
519 	    (x & SX_LOCK_EXCLUSIVE_WAITERS));
520 	if (x & SX_LOCK_SHARED_WAITERS)
521 		wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
522 		    0, SQ_SHARED_QUEUE);
523 	sleepq_release(&sx->lock_object);
524 
525 	if (wakeup_swapper)
526 		kick_proc0();
527 
528 out:
529 	LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
530 	LOCKSTAT_RECORD0(sx__downgrade, sx);
531 }
532 
533 void
534 sx_downgrade_(struct sx *sx, const char *file, int line)
535 {
536 
537 	sx_downgrade_int(sx LOCK_FILE_LINE_ARG);
538 }
539 
540 /*
541  * This function represents the so-called 'hard case' for sx_xlock
542  * operation.  All 'easy case' failures are redirected to this.  Note
543  * that ideally this would be a static function, but it needs to be
544  * accessible from at least sx.h.
545  */
546 int
547 _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
548 {
549 	GIANT_DECLARE;
550 	uintptr_t tid;
551 #ifdef ADAPTIVE_SX
552 	volatile struct thread *owner;
553 	u_int i, n, spintries = 0;
554 	enum { READERS, WRITER } sleep_reason = READERS;
555 	bool adaptive;
556 #endif
557 #ifdef LOCK_PROFILING
558 	uint64_t waittime = 0;
559 	int contested = 0;
560 #endif
561 	int error = 0;
562 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
563 	struct lock_delay_arg lda;
564 #endif
565 #ifdef	KDTRACE_HOOKS
566 	u_int sleep_cnt = 0;
567 	int64_t sleep_time = 0;
568 	int64_t all_time = 0;
569 #endif
570 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
571 	uintptr_t state = 0;
572 #endif
573 	int extra_work = 0;
574 
575 	tid = (uintptr_t)curthread;
576 
577 #ifdef KDTRACE_HOOKS
578 	if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) {
579 		while (x == SX_LOCK_UNLOCKED) {
580 			if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
581 				goto out_lockstat;
582 		}
583 		extra_work = 1;
584 		all_time -= lockstat_nsecs(&sx->lock_object);
585 		state = x;
586 	}
587 #endif
588 #ifdef LOCK_PROFILING
589 	extra_work = 1;
590 	state = x;
591 #endif
592 
593 	if (SCHEDULER_STOPPED())
594 		return (0);
595 
596 #if defined(ADAPTIVE_SX)
597 	lock_delay_arg_init(&lda, &sx_delay);
598 #elif defined(KDTRACE_HOOKS)
599 	lock_delay_arg_init(&lda, NULL);
600 #endif
601 
602 	if (__predict_false(x == SX_LOCK_UNLOCKED))
603 		x = SX_READ_VALUE(sx);
604 
605 	/* If we already hold an exclusive lock, then recurse. */
606 	if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
607 		KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
608 	    ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
609 		    sx->lock_object.lo_name, file, line));
610 		sx->sx_recurse++;
611 		atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
612 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
613 			CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
614 		return (0);
615 	}
616 
617 	if (LOCK_LOG_TEST(&sx->lock_object, 0))
618 		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
619 		    sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
620 
621 #ifdef ADAPTIVE_SX
622 	adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0);
623 #endif
624 
625 #ifdef HWPMC_HOOKS
626 	PMC_SOFT_CALL( , , lock, failed);
627 #endif
628 	lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
629 	    &waittime);
630 
631 #ifndef INVARIANTS
632 	GIANT_SAVE(extra_work);
633 #endif
634 
635 	for (;;) {
636 		if (x == SX_LOCK_UNLOCKED) {
637 			if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
638 				break;
639 			continue;
640 		}
641 #ifdef INVARIANTS
642 		GIANT_SAVE(extra_work);
643 #endif
644 #ifdef KDTRACE_HOOKS
645 		lda.spin_cnt++;
646 #endif
647 #ifdef ADAPTIVE_SX
648 		if (__predict_false(!adaptive))
649 			goto sleepq;
650 		/*
651 		 * If the lock is write locked and the owner is
652 		 * running on another CPU, spin until the owner stops
653 		 * running or the state of the lock changes.
654 		 */
655 		if ((x & SX_LOCK_SHARED) == 0) {
656 			sleep_reason = WRITER;
657 			owner = lv_sx_owner(x);
658 			if (!TD_IS_RUNNING(owner))
659 				goto sleepq;
660 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
661 				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
662 				    __func__, sx, owner);
663 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
664 			    "spinning", "lockname:\"%s\"",
665 			    sx->lock_object.lo_name);
666 			do {
667 				lock_delay(&lda);
668 				x = SX_READ_VALUE(sx);
669 				owner = lv_sx_owner(x);
670 			} while (owner != NULL && TD_IS_RUNNING(owner));
671 			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
672 			    "running");
673 			continue;
674 		} else if (SX_SHARERS(x) > 0) {
675 			sleep_reason = READERS;
676 			if (spintries == asx_retries)
677 				goto sleepq;
678 			spintries++;
679 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
680 			    "spinning", "lockname:\"%s\"",
681 			    sx->lock_object.lo_name);
682 			for (i = 0; i < asx_loops; i += n) {
683 				n = SX_SHARERS(x);
684 				lock_delay_spin(n);
685 				x = SX_READ_VALUE(sx);
686 				if ((x & SX_LOCK_SHARED) == 0 ||
687 				    SX_SHARERS(x) == 0)
688 					break;
689 			}
690 #ifdef KDTRACE_HOOKS
691 			lda.spin_cnt += i;
692 #endif
693 			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
694 			    "running");
695 			if (i < asx_loops)
696 				continue;
697 		}
698 sleepq:
699 #endif
700 		sleepq_lock(&sx->lock_object);
701 		x = SX_READ_VALUE(sx);
702 retry_sleepq:
703 
704 		/*
705 		 * If the lock was released while spinning on the
706 		 * sleep queue chain lock, try again.
707 		 */
708 		if (x == SX_LOCK_UNLOCKED) {
709 			sleepq_release(&sx->lock_object);
710 			continue;
711 		}
712 
713 #ifdef ADAPTIVE_SX
714 		/*
715 		 * The current lock owner might have started executing
716 		 * on another CPU (or the lock could have changed
717 		 * owners) while we were waiting on the sleep queue
718 		 * chain lock.  If so, drop the sleep queue lock and try
719 		 * again.
720 		 */
721 		if (adaptive) {
722 			if (!(x & SX_LOCK_SHARED)) {
723 				owner = (struct thread *)SX_OWNER(x);
724 				if (TD_IS_RUNNING(owner)) {
725 					sleepq_release(&sx->lock_object);
726 					continue;
727 				}
728 			} else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) {
729 				sleepq_release(&sx->lock_object);
730 				continue;
731 			}
732 		}
733 #endif
734 
735 		/*
736 		 * If an exclusive lock was released with both shared
737 		 * and exclusive waiters and a shared waiter hasn't
738 		 * woken up and acquired the lock yet, sx_lock will be
739 		 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
740 		 * If we see that value, try to acquire it once.  Note
741 		 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
742 		 * as there are other exclusive waiters still.  If we
743 		 * fail, restart the loop.
744 		 */
745 		if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
746 			if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x,
747 			    tid | SX_LOCK_EXCLUSIVE_WAITERS))
748 				goto retry_sleepq;
749 			sleepq_release(&sx->lock_object);
750 			CTR2(KTR_LOCK, "%s: %p claimed by new writer",
751 			    __func__, sx);
752 			break;
753 		}
754 
755 		/*
756 		 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
757 		 * than loop back and retry.
758 		 */
759 		if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
760 			if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
761 			    x | SX_LOCK_EXCLUSIVE_WAITERS)) {
762 				goto retry_sleepq;
763 			}
764 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
765 				CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
766 				    __func__, sx);
767 		}
768 
769 		/*
770 		 * Since we have been unable to acquire the exclusive
771 		 * lock and the exclusive waiters flag is set, we have
772 		 * to sleep.
773 		 */
774 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
775 			CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
776 			    __func__, sx);
777 
778 #ifdef KDTRACE_HOOKS
779 		sleep_time -= lockstat_nsecs(&sx->lock_object);
780 #endif
781 		sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
782 		    SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
783 		    SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
784 		if (!(opts & SX_INTERRUPTIBLE))
785 			sleepq_wait(&sx->lock_object, 0);
786 		else
787 			error = sleepq_wait_sig(&sx->lock_object, 0);
788 #ifdef KDTRACE_HOOKS
789 		sleep_time += lockstat_nsecs(&sx->lock_object);
790 		sleep_cnt++;
791 #endif
792 		if (error) {
793 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
794 				CTR2(KTR_LOCK,
795 			"%s: interruptible sleep by %p suspended by signal",
796 				    __func__, sx);
797 			break;
798 		}
799 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
800 			CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
801 			    __func__, sx);
802 		x = SX_READ_VALUE(sx);
803 	}
804 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
805 	if (__predict_true(!extra_work))
806 		return (error);
807 #endif
808 #ifdef KDTRACE_HOOKS
809 	all_time += lockstat_nsecs(&sx->lock_object);
810 	if (sleep_time)
811 		LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
812 		    LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
813 		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
814 	if (lda.spin_cnt > sleep_cnt)
815 		LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
816 		    LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
817 		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
818 out_lockstat:
819 #endif
820 	if (!error)
821 		LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
822 		    contested, waittime, file, line, LOCKSTAT_WRITER);
823 	GIANT_RESTORE();
824 	return (error);
825 }
826 
827 /*
828  * This function represents the so-called 'hard case' for sx_xunlock
829  * operation.  All 'easy case' failures are redirected to this.  Note
830  * that ideally this would be a static function, but it needs to be
831  * accessible from at least sx.h.
832  */
833 void
834 _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
835 {
836 	uintptr_t tid, setx;
837 	int queue, wakeup_swapper;
838 
839 	if (SCHEDULER_STOPPED())
840 		return;
841 
842 	tid = (uintptr_t)curthread;
843 
844 	if (__predict_false(x == tid))
845 		x = SX_READ_VALUE(sx);
846 
847 	MPASS(!(x & SX_LOCK_SHARED));
848 
849 	if (__predict_false(x & SX_LOCK_RECURSED)) {
850 		/* The lock is recursed, unrecurse one level. */
851 		if ((--sx->sx_recurse) == 0)
852 			atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
853 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
854 			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
855 		return;
856 	}
857 
858 	LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER);
859 	if (x == tid &&
860 	    atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
861 		return;
862 
863 	if (LOCK_LOG_TEST(&sx->lock_object, 0))
864 		CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
865 
866 	sleepq_lock(&sx->lock_object);
867 	x = SX_READ_VALUE(sx);
868 	MPASS(x & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS));
869 
870 	/*
871 	 * The wake up algorithm here is quite simple and probably not
872 	 * ideal.  It gives precedence to shared waiters if they are
873 	 * present.  For this condition, we have to preserve the
874 	 * state of the exclusive waiters flag.
875 	 * If interruptible sleeps left the shared queue empty avoid a
876 	 * starvation for the threads sleeping on the exclusive queue by giving
877 	 * them precedence and cleaning up the shared waiters bit anyway.
878 	 */
879 	setx = SX_LOCK_UNLOCKED;
880 	queue = SQ_EXCLUSIVE_QUEUE;
881 	if ((x & SX_LOCK_SHARED_WAITERS) != 0 &&
882 	    sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) {
883 		queue = SQ_SHARED_QUEUE;
884 		setx |= (x & SX_LOCK_EXCLUSIVE_WAITERS);
885 	}
886 	atomic_store_rel_ptr(&sx->sx_lock, setx);
887 
888 	/* Wake up all the waiters for the specific queue. */
889 	if (LOCK_LOG_TEST(&sx->lock_object, 0))
890 		CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
891 		    __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
892 		    "exclusive");
893 
894 	wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
895 	    queue);
896 	sleepq_release(&sx->lock_object);
897 	if (wakeup_swapper)
898 		kick_proc0();
899 }
900 
901 static bool __always_inline
902 __sx_slock_try(struct sx *sx, uintptr_t *xp LOCK_FILE_LINE_ARG_DEF)
903 {
904 
905 	/*
906 	 * If no other thread has an exclusive lock then try to bump up
907 	 * the count of sharers.  Since we have to preserve the state
908 	 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
909 	 * shared lock loop back and retry.
910 	 */
911 	while (*xp & SX_LOCK_SHARED) {
912 		MPASS(!(*xp & SX_LOCK_SHARED_WAITERS));
913 		if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp,
914 		    *xp + SX_ONE_SHARER)) {
915 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
916 				CTR4(KTR_LOCK, "%s: %p succeed %p -> %p",
917 				    __func__, sx, (void *)*xp,
918 				    (void *)(*xp + SX_ONE_SHARER));
919 			return (true);
920 		}
921 	}
922 	return (false);
923 }
924 
925 static int __noinline
926 _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
927 {
928 	GIANT_DECLARE;
929 #ifdef ADAPTIVE_SX
930 	volatile struct thread *owner;
931 	bool adaptive;
932 #endif
933 #ifdef LOCK_PROFILING
934 	uint64_t waittime = 0;
935 	int contested = 0;
936 #endif
937 	int error = 0;
938 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
939 	struct lock_delay_arg lda;
940 #endif
941 #ifdef KDTRACE_HOOKS
942 	u_int sleep_cnt = 0;
943 	int64_t sleep_time = 0;
944 	int64_t all_time = 0;
945 #endif
946 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
947 	uintptr_t state = 0;
948 #endif
949 	int extra_work = 0;
950 
951 #ifdef KDTRACE_HOOKS
952 	if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) {
953 		if (__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG))
954 			goto out_lockstat;
955 		extra_work = 1;
956 		all_time -= lockstat_nsecs(&sx->lock_object);
957 		state = x;
958 	}
959 #endif
960 #ifdef LOCK_PROFILING
961 	extra_work = 1;
962 	state = x;
963 #endif
964 
965 	if (SCHEDULER_STOPPED())
966 		return (0);
967 
968 #if defined(ADAPTIVE_SX)
969 	lock_delay_arg_init(&lda, &sx_delay);
970 #elif defined(KDTRACE_HOOKS)
971 	lock_delay_arg_init(&lda, NULL);
972 #endif
973 
974 #ifdef ADAPTIVE_SX
975 	adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0);
976 #endif
977 
978 #ifdef HWPMC_HOOKS
979 	PMC_SOFT_CALL( , , lock, failed);
980 #endif
981 	lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
982 	    &waittime);
983 
984 #ifndef INVARIANTS
985 	GIANT_SAVE(extra_work);
986 #endif
987 
988 	/*
989 	 * As with rwlocks, we don't make any attempt to try to block
990 	 * shared locks once there is an exclusive waiter.
991 	 */
992 	for (;;) {
993 		if (__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG))
994 			break;
995 #ifdef INVARIANTS
996 		GIANT_SAVE(extra_work);
997 #endif
998 #ifdef KDTRACE_HOOKS
999 		lda.spin_cnt++;
1000 #endif
1001 
1002 #ifdef ADAPTIVE_SX
1003 		if (__predict_false(!adaptive))
1004 			goto sleepq;
1005 		/*
1006 		 * If the owner is running on another CPU, spin until
1007 		 * the owner stops running or the state of the lock
1008 		 * changes.
1009 		 */
1010 		owner = lv_sx_owner(x);
1011 		if (TD_IS_RUNNING(owner)) {
1012 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
1013 				CTR3(KTR_LOCK,
1014 				    "%s: spinning on %p held by %p",
1015 				    __func__, sx, owner);
1016 			KTR_STATE1(KTR_SCHED, "thread",
1017 			    sched_tdname(curthread), "spinning",
1018 			    "lockname:\"%s\"", sx->lock_object.lo_name);
1019 			do {
1020 				lock_delay(&lda);
1021 				x = SX_READ_VALUE(sx);
1022 				owner = lv_sx_owner(x);
1023 			} while (owner != NULL && TD_IS_RUNNING(owner));
1024 			KTR_STATE0(KTR_SCHED, "thread",
1025 			    sched_tdname(curthread), "running");
1026 			continue;
1027 		}
1028 sleepq:
1029 #endif
1030 
1031 		/*
1032 		 * Some other thread already has an exclusive lock, so
1033 		 * start the process of blocking.
1034 		 */
1035 		sleepq_lock(&sx->lock_object);
1036 		x = SX_READ_VALUE(sx);
1037 retry_sleepq:
1038 		/*
1039 		 * The lock could have been released while we spun.
1040 		 * In this case loop back and retry.
1041 		 */
1042 		if (x & SX_LOCK_SHARED) {
1043 			sleepq_release(&sx->lock_object);
1044 			continue;
1045 		}
1046 
1047 #ifdef ADAPTIVE_SX
1048 		/*
1049 		 * If the owner is running on another CPU, spin until
1050 		 * the owner stops running or the state of the lock
1051 		 * changes.
1052 		 */
1053 		if (!(x & SX_LOCK_SHARED) && adaptive) {
1054 			owner = (struct thread *)SX_OWNER(x);
1055 			if (TD_IS_RUNNING(owner)) {
1056 				sleepq_release(&sx->lock_object);
1057 				x = SX_READ_VALUE(sx);
1058 				continue;
1059 			}
1060 		}
1061 #endif
1062 
1063 		/*
1064 		 * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
1065 		 * fail to set it drop the sleep queue lock and loop
1066 		 * back.
1067 		 */
1068 		if (!(x & SX_LOCK_SHARED_WAITERS)) {
1069 			if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
1070 			    x | SX_LOCK_SHARED_WAITERS))
1071 				goto retry_sleepq;
1072 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
1073 				CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
1074 				    __func__, sx);
1075 		}
1076 
1077 		/*
1078 		 * Since we have been unable to acquire the shared lock,
1079 		 * we have to sleep.
1080 		 */
1081 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
1082 			CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
1083 			    __func__, sx);
1084 
1085 #ifdef KDTRACE_HOOKS
1086 		sleep_time -= lockstat_nsecs(&sx->lock_object);
1087 #endif
1088 		sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
1089 		    SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
1090 		    SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
1091 		if (!(opts & SX_INTERRUPTIBLE))
1092 			sleepq_wait(&sx->lock_object, 0);
1093 		else
1094 			error = sleepq_wait_sig(&sx->lock_object, 0);
1095 #ifdef KDTRACE_HOOKS
1096 		sleep_time += lockstat_nsecs(&sx->lock_object);
1097 		sleep_cnt++;
1098 #endif
1099 		if (error) {
1100 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
1101 				CTR2(KTR_LOCK,
1102 			"%s: interruptible sleep by %p suspended by signal",
1103 				    __func__, sx);
1104 			break;
1105 		}
1106 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
1107 			CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
1108 			    __func__, sx);
1109 		x = SX_READ_VALUE(sx);
1110 	}
1111 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
1112 	if (__predict_true(!extra_work))
1113 		return (error);
1114 #endif
1115 #ifdef KDTRACE_HOOKS
1116 	all_time += lockstat_nsecs(&sx->lock_object);
1117 	if (sleep_time)
1118 		LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
1119 		    LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1120 		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1121 	if (lda.spin_cnt > sleep_cnt)
1122 		LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
1123 		    LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1124 		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1125 out_lockstat:
1126 #endif
1127 	if (error == 0) {
1128 		LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
1129 		    contested, waittime, file, line, LOCKSTAT_READER);
1130 	}
1131 	GIANT_RESTORE();
1132 	return (error);
1133 }
1134 
1135 int
1136 _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF)
1137 {
1138 	uintptr_t x;
1139 	int error;
1140 
1141 	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
1142 	    !TD_IS_IDLETHREAD(curthread),
1143 	    ("sx_slock() by idle thread %p on sx %s @ %s:%d",
1144 	    curthread, sx->lock_object.lo_name, file, line));
1145 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1146 	    ("sx_slock() of destroyed sx @ %s:%d", file, line));
1147 	WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
1148 
1149 	error = 0;
1150 	x = SX_READ_VALUE(sx);
1151 	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__acquire) ||
1152 	    !__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG)))
1153 		error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG);
1154 	else
1155 		lock_profile_obtain_lock_success(&sx->lock_object, 0, 0,
1156 		    file, line);
1157 	if (error == 0) {
1158 		LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
1159 		WITNESS_LOCK(&sx->lock_object, 0, file, line);
1160 		TD_LOCKS_INC(curthread);
1161 	}
1162 	return (error);
1163 }
1164 
1165 int
1166 _sx_slock(struct sx *sx, int opts, const char *file, int line)
1167 {
1168 
1169 	return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG));
1170 }
1171 
1172 static bool __always_inline
1173 _sx_sunlock_try(struct sx *sx, uintptr_t *xp)
1174 {
1175 
1176 	for (;;) {
1177 		/*
1178 		 * We should never have sharers while at least one thread
1179 		 * holds a shared lock.
1180 		 */
1181 		KASSERT(!(*xp & SX_LOCK_SHARED_WAITERS),
1182 		    ("%s: waiting sharers", __func__));
1183 
1184 		/*
1185 		 * See if there is more than one shared lock held.  If
1186 		 * so, just drop one and return.
1187 		 */
1188 		if (SX_SHARERS(*xp) > 1) {
1189 			if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp,
1190 			    *xp - SX_ONE_SHARER)) {
1191 				if (LOCK_LOG_TEST(&sx->lock_object, 0))
1192 					CTR4(KTR_LOCK,
1193 					    "%s: %p succeeded %p -> %p",
1194 					    __func__, sx, (void *)*xp,
1195 					    (void *)(*xp - SX_ONE_SHARER));
1196 				return (true);
1197 			}
1198 			continue;
1199 		}
1200 
1201 		/*
1202 		 * If there aren't any waiters for an exclusive lock,
1203 		 * then try to drop it quickly.
1204 		 */
1205 		if (!(*xp & SX_LOCK_EXCLUSIVE_WAITERS)) {
1206 			MPASS(*xp == SX_SHARERS_LOCK(1));
1207 			*xp = SX_SHARERS_LOCK(1);
1208 			if (atomic_fcmpset_rel_ptr(&sx->sx_lock,
1209 			    xp, SX_LOCK_UNLOCKED)) {
1210 				if (LOCK_LOG_TEST(&sx->lock_object, 0))
1211 					CTR2(KTR_LOCK, "%s: %p last succeeded",
1212 					    __func__, sx);
1213 				return (true);
1214 			}
1215 			continue;
1216 		}
1217 		break;
1218 	}
1219 	return (false);
1220 }
1221 
1222 static void __noinline
1223 _sx_sunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
1224 {
1225 	int wakeup_swapper = 0;
1226 	uintptr_t setx;
1227 
1228 	if (SCHEDULER_STOPPED())
1229 		return;
1230 
1231 	if (_sx_sunlock_try(sx, &x))
1232 		goto out_lockstat;
1233 
1234 	/*
1235 	 * At this point, there should just be one sharer with
1236 	 * exclusive waiters.
1237 	 */
1238 	MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
1239 
1240 	sleepq_lock(&sx->lock_object);
1241 	x = SX_READ_VALUE(sx);
1242 	for (;;) {
1243 		MPASS(x & SX_LOCK_EXCLUSIVE_WAITERS);
1244 		MPASS(!(x & SX_LOCK_SHARED_WAITERS));
1245 		if (_sx_sunlock_try(sx, &x))
1246 			break;
1247 
1248 		/*
1249 		 * Wake up semantic here is quite simple:
1250 		 * Just wake up all the exclusive waiters.
1251 		 * Note that the state of the lock could have changed,
1252 		 * so if it fails loop back and retry.
1253 		 */
1254 		setx = x - SX_ONE_SHARER;
1255 		setx &= ~SX_LOCK_EXCLUSIVE_WAITERS;
1256 		if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx))
1257 			continue;
1258 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
1259 			CTR2(KTR_LOCK, "%s: %p waking up all thread on"
1260 			    "exclusive queue", __func__, sx);
1261 		wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
1262 		    0, SQ_EXCLUSIVE_QUEUE);
1263 		break;
1264 	}
1265 	sleepq_release(&sx->lock_object);
1266 	if (wakeup_swapper)
1267 		kick_proc0();
1268 out_lockstat:
1269 	LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER);
1270 }
1271 
1272 void
1273 _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
1274 {
1275 	uintptr_t x;
1276 
1277 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1278 	    ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
1279 	_sx_assert(sx, SA_SLOCKED, file, line);
1280 	WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
1281 	LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
1282 
1283 	x = SX_READ_VALUE(sx);
1284 	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__release) ||
1285 	    !_sx_sunlock_try(sx, &x)))
1286 		_sx_sunlock_hard(sx, x LOCK_FILE_LINE_ARG);
1287 	else
1288 		lock_profile_release_lock(&sx->lock_object);
1289 
1290 	TD_LOCKS_DEC(curthread);
1291 }
1292 
1293 void
1294 _sx_sunlock(struct sx *sx, const char *file, int line)
1295 {
1296 
1297 	_sx_sunlock_int(sx LOCK_FILE_LINE_ARG);
1298 }
1299 
1300 #ifdef INVARIANT_SUPPORT
1301 #ifndef INVARIANTS
1302 #undef	_sx_assert
1303 #endif
1304 
1305 /*
1306  * In the non-WITNESS case, sx_assert() can only detect that at least
1307  * *some* thread owns an slock, but it cannot guarantee that *this*
1308  * thread owns an slock.
1309  */
1310 void
1311 _sx_assert(const struct sx *sx, int what, const char *file, int line)
1312 {
1313 #ifndef WITNESS
1314 	int slocked = 0;
1315 #endif
1316 
1317 	if (panicstr != NULL)
1318 		return;
1319 	switch (what) {
1320 	case SA_SLOCKED:
1321 	case SA_SLOCKED | SA_NOTRECURSED:
1322 	case SA_SLOCKED | SA_RECURSED:
1323 #ifndef WITNESS
1324 		slocked = 1;
1325 		/* FALLTHROUGH */
1326 #endif
1327 	case SA_LOCKED:
1328 	case SA_LOCKED | SA_NOTRECURSED:
1329 	case SA_LOCKED | SA_RECURSED:
1330 #ifdef WITNESS
1331 		witness_assert(&sx->lock_object, what, file, line);
1332 #else
1333 		/*
1334 		 * If some other thread has an exclusive lock or we
1335 		 * have one and are asserting a shared lock, fail.
1336 		 * Also, if no one has a lock at all, fail.
1337 		 */
1338 		if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1339 		    (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1340 		    sx_xholder(sx) != curthread)))
1341 			panic("Lock %s not %slocked @ %s:%d\n",
1342 			    sx->lock_object.lo_name, slocked ? "share " : "",
1343 			    file, line);
1344 
1345 		if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1346 			if (sx_recursed(sx)) {
1347 				if (what & SA_NOTRECURSED)
1348 					panic("Lock %s recursed @ %s:%d\n",
1349 					    sx->lock_object.lo_name, file,
1350 					    line);
1351 			} else if (what & SA_RECURSED)
1352 				panic("Lock %s not recursed @ %s:%d\n",
1353 				    sx->lock_object.lo_name, file, line);
1354 		}
1355 #endif
1356 		break;
1357 	case SA_XLOCKED:
1358 	case SA_XLOCKED | SA_NOTRECURSED:
1359 	case SA_XLOCKED | SA_RECURSED:
1360 		if (sx_xholder(sx) != curthread)
1361 			panic("Lock %s not exclusively locked @ %s:%d\n",
1362 			    sx->lock_object.lo_name, file, line);
1363 		if (sx_recursed(sx)) {
1364 			if (what & SA_NOTRECURSED)
1365 				panic("Lock %s recursed @ %s:%d\n",
1366 				    sx->lock_object.lo_name, file, line);
1367 		} else if (what & SA_RECURSED)
1368 			panic("Lock %s not recursed @ %s:%d\n",
1369 			    sx->lock_object.lo_name, file, line);
1370 		break;
1371 	case SA_UNLOCKED:
1372 #ifdef WITNESS
1373 		witness_assert(&sx->lock_object, what, file, line);
1374 #else
1375 		/*
1376 		 * If we hold an exclusve lock fail.  We can't
1377 		 * reliably check to see if we hold a shared lock or
1378 		 * not.
1379 		 */
1380 		if (sx_xholder(sx) == curthread)
1381 			panic("Lock %s exclusively locked @ %s:%d\n",
1382 			    sx->lock_object.lo_name, file, line);
1383 #endif
1384 		break;
1385 	default:
1386 		panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
1387 		    line);
1388 	}
1389 }
1390 #endif	/* INVARIANT_SUPPORT */
1391 
1392 #ifdef DDB
1393 static void
1394 db_show_sx(const struct lock_object *lock)
1395 {
1396 	struct thread *td;
1397 	const struct sx *sx;
1398 
1399 	sx = (const struct sx *)lock;
1400 
1401 	db_printf(" state: ");
1402 	if (sx->sx_lock == SX_LOCK_UNLOCKED)
1403 		db_printf("UNLOCKED\n");
1404 	else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1405 		db_printf("DESTROYED\n");
1406 		return;
1407 	} else if (sx->sx_lock & SX_LOCK_SHARED)
1408 		db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1409 	else {
1410 		td = sx_xholder(sx);
1411 		db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1412 		    td->td_tid, td->td_proc->p_pid, td->td_name);
1413 		if (sx_recursed(sx))
1414 			db_printf(" recursed: %d\n", sx->sx_recurse);
1415 	}
1416 
1417 	db_printf(" waiters: ");
1418 	switch(sx->sx_lock &
1419 	    (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1420 	case SX_LOCK_SHARED_WAITERS:
1421 		db_printf("shared\n");
1422 		break;
1423 	case SX_LOCK_EXCLUSIVE_WAITERS:
1424 		db_printf("exclusive\n");
1425 		break;
1426 	case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1427 		db_printf("exclusive and shared\n");
1428 		break;
1429 	default:
1430 		db_printf("none\n");
1431 	}
1432 }
1433 
1434 /*
1435  * Check to see if a thread that is blocked on a sleep queue is actually
1436  * blocked on an sx lock.  If so, output some details and return true.
1437  * If the lock has an exclusive owner, return that in *ownerp.
1438  */
1439 int
1440 sx_chain(struct thread *td, struct thread **ownerp)
1441 {
1442 	struct sx *sx;
1443 
1444 	/*
1445 	 * Check to see if this thread is blocked on an sx lock.
1446 	 * First, we check the lock class.  If that is ok, then we
1447 	 * compare the lock name against the wait message.
1448 	 */
1449 	sx = td->td_wchan;
1450 	if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1451 	    sx->lock_object.lo_name != td->td_wmesg)
1452 		return (0);
1453 
1454 	/* We think we have an sx lock, so output some details. */
1455 	db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1456 	*ownerp = sx_xholder(sx);
1457 	if (sx->sx_lock & SX_LOCK_SHARED)
1458 		db_printf("SLOCK (count %ju)\n",
1459 		    (uintmax_t)SX_SHARERS(sx->sx_lock));
1460 	else
1461 		db_printf("XLOCK\n");
1462 	return (1);
1463 }
1464 #endif
1465