xref: /freebsd/sys/kern/kern_sx.c (revision b64c5a0ace59af62eff52bfe110a521dc73c937b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
5  * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice(s), this list of conditions and the following disclaimer as
13  *    the first lines of this file unmodified other than the possible
14  *    addition of one or more copyright notices.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice(s), this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
20  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
23  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29  * DAMAGE.
30  */
31 
32 /*
33  * Shared/exclusive locks.  This implementation attempts to ensure
34  * deterministic lock granting behavior, so that slocks and xlocks are
35  * interleaved.
36  *
37  * Priority propagation will not generally raise the priority of lock holders,
38  * so should not be relied upon in combination with sx locks.
39  */
40 
41 #include "opt_ddb.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_no_adaptive_sx.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kdb.h>
48 #include <sys/kernel.h>
49 #include <sys/ktr.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/sched.h>
54 #include <sys/sleepqueue.h>
55 #include <sys/sx.h>
56 #include <sys/smp.h>
57 #include <sys/sysctl.h>
58 
59 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
60 #include <machine/cpu.h>
61 #endif
62 
63 #ifdef DDB
64 #include <ddb/ddb.h>
65 #endif
66 
67 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
68 #define	ADAPTIVE_SX
69 #endif
70 
71 #ifdef HWPMC_HOOKS
72 #include <sys/pmckern.h>
73 PMC_SOFT_DECLARE( , , lock, failed);
74 #endif
75 
76 /* Handy macros for sleep queues. */
77 #define	SQ_EXCLUSIVE_QUEUE	0
78 #define	SQ_SHARED_QUEUE		1
79 
80 /*
81  * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
82  * drop Giant anytime we have to sleep or if we adaptively spin.
83  */
84 #define	GIANT_DECLARE							\
85 	int _giantcnt = 0;						\
86 	WITNESS_SAVE_DECL(Giant)					\
87 
88 #define	GIANT_SAVE(work) do {						\
89 	if (__predict_false(mtx_owned(&Giant))) {			\
90 		work++;							\
91 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
92 		while (mtx_owned(&Giant)) {				\
93 			_giantcnt++;					\
94 			mtx_unlock(&Giant);				\
95 		}							\
96 	}								\
97 } while (0)
98 
99 #define GIANT_RESTORE() do {						\
100 	if (_giantcnt > 0) {						\
101 		mtx_assert(&Giant, MA_NOTOWNED);			\
102 		while (_giantcnt--)					\
103 			mtx_lock(&Giant);				\
104 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
105 	}								\
106 } while (0)
107 
108 /*
109  * Returns true if an exclusive lock is recursed.  It assumes
110  * curthread currently has an exclusive lock.
111  */
112 #define	sx_recursed(sx)		((sx)->sx_recurse != 0)
113 
114 static void	assert_sx(const struct lock_object *lock, int what);
115 #ifdef DDB
116 static void	db_show_sx(const struct lock_object *lock);
117 #endif
118 static void	lock_sx(struct lock_object *lock, uintptr_t how);
119 #ifdef KDTRACE_HOOKS
120 static int	owner_sx(const struct lock_object *lock, struct thread **owner);
121 #endif
122 static uintptr_t unlock_sx(struct lock_object *lock);
123 
124 struct lock_class lock_class_sx = {
125 	.lc_name = "sx",
126 	.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
127 	.lc_assert = assert_sx,
128 #ifdef DDB
129 	.lc_ddb_show = db_show_sx,
130 #endif
131 	.lc_lock = lock_sx,
132 	.lc_unlock = unlock_sx,
133 #ifdef KDTRACE_HOOKS
134 	.lc_owner = owner_sx,
135 #endif
136 };
137 
138 #ifndef INVARIANTS
139 #define	_sx_assert(sx, what, file, line)
140 #endif
141 
142 #ifdef ADAPTIVE_SX
143 #ifdef SX_CUSTOM_BACKOFF
144 static u_short __read_frequently asx_retries;
145 static u_short __read_frequently asx_loops;
146 static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
147     "sxlock debugging");
148 SYSCTL_U16(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
149 SYSCTL_U16(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
150 
151 static struct lock_delay_config __read_frequently sx_delay;
152 
153 SYSCTL_U16(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base,
154     0, "");
155 SYSCTL_U16(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max,
156     0, "");
157 
158 static void
159 sx_lock_delay_init(void *arg __unused)
160 {
161 
162 	lock_delay_default_init(&sx_delay);
163 	asx_retries = 10;
164 	asx_loops = max(10000, sx_delay.max);
165 }
166 LOCK_DELAY_SYSINIT(sx_lock_delay_init);
167 #else
168 #define sx_delay	locks_delay
169 #define asx_retries	locks_delay_retries
170 #define asx_loops	locks_delay_loops
171 #endif
172 #endif
173 
174 void
175 assert_sx(const struct lock_object *lock, int what)
176 {
177 
178 	sx_assert((const struct sx *)lock, what);
179 }
180 
181 void
182 lock_sx(struct lock_object *lock, uintptr_t how)
183 {
184 	struct sx *sx;
185 
186 	sx = (struct sx *)lock;
187 	if (how)
188 		sx_slock(sx);
189 	else
190 		sx_xlock(sx);
191 }
192 
193 uintptr_t
194 unlock_sx(struct lock_object *lock)
195 {
196 	struct sx *sx;
197 
198 	sx = (struct sx *)lock;
199 	sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
200 	if (sx_xlocked(sx)) {
201 		sx_xunlock(sx);
202 		return (0);
203 	} else {
204 		sx_sunlock(sx);
205 		return (1);
206 	}
207 }
208 
209 #ifdef KDTRACE_HOOKS
210 int
211 owner_sx(const struct lock_object *lock, struct thread **owner)
212 {
213 	const struct sx *sx;
214 	uintptr_t x;
215 
216 	sx = (const struct sx *)lock;
217 	x = sx->sx_lock;
218 	*owner = NULL;
219 	return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
220 	    ((*owner = (struct thread *)SX_OWNER(x)) != NULL));
221 }
222 #endif
223 
224 void
225 sx_sysinit(void *arg)
226 {
227 	struct sx_args *sargs = arg;
228 
229 	sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
230 }
231 
232 void
233 sx_init_flags(struct sx *sx, const char *description, int opts)
234 {
235 	int flags;
236 
237 	MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
238 	    SX_NOPROFILE | SX_NEW)) == 0);
239 	ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
240 	    ("%s: sx_lock not aligned for %s: %p", __func__, description,
241 	    &sx->sx_lock));
242 
243 	flags = LO_SLEEPABLE | LO_UPGRADABLE;
244 	if (opts & SX_DUPOK)
245 		flags |= LO_DUPOK;
246 	if (opts & SX_NOPROFILE)
247 		flags |= LO_NOPROFILE;
248 	if (!(opts & SX_NOWITNESS))
249 		flags |= LO_WITNESS;
250 	if (opts & SX_RECURSE)
251 		flags |= LO_RECURSABLE;
252 	if (opts & SX_QUIET)
253 		flags |= LO_QUIET;
254 	if (opts & SX_NEW)
255 		flags |= LO_NEW;
256 
257 	lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
258 	sx->sx_lock = SX_LOCK_UNLOCKED;
259 	sx->sx_recurse = 0;
260 }
261 
262 void
263 sx_destroy(struct sx *sx)
264 {
265 
266 	KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
267 	KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
268 	sx->sx_lock = SX_LOCK_DESTROYED;
269 	lock_destroy(&sx->lock_object);
270 }
271 
272 int
273 sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
274 {
275 	uintptr_t x;
276 
277 	if (SCHEDULER_STOPPED())
278 		return (1);
279 
280 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
281 	    ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
282 	    curthread, sx->lock_object.lo_name, file, line));
283 
284 	x = sx->sx_lock;
285 	for (;;) {
286 		KASSERT(x != SX_LOCK_DESTROYED,
287 		    ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
288 		if (!(x & SX_LOCK_SHARED))
289 			break;
290 		if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) {
291 			LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
292 			WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
293 			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
294 			    sx, 0, 0, file, line, LOCKSTAT_READER);
295 			TD_LOCKS_INC(curthread);
296 			curthread->td_sx_slocks++;
297 			return (1);
298 		}
299 	}
300 
301 	LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
302 	return (0);
303 }
304 
305 int
306 sx_try_slock_(struct sx *sx, const char *file, int line)
307 {
308 
309 	return (sx_try_slock_int(sx LOCK_FILE_LINE_ARG));
310 }
311 
312 int
313 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
314 {
315 	uintptr_t tid, x;
316 	int error = 0;
317 
318 	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
319 	    !TD_IS_IDLETHREAD(curthread),
320 	    ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
321 	    curthread, sx->lock_object.lo_name, file, line));
322 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
323 	    ("sx_xlock() of destroyed sx @ %s:%d", file, line));
324 	WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
325 	    line, NULL);
326 	tid = (uintptr_t)curthread;
327 	x = SX_LOCK_UNLOCKED;
328 	if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
329 		error = _sx_xlock_hard(sx, x, opts LOCK_FILE_LINE_ARG);
330 	else
331 		LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
332 		    0, 0, file, line, LOCKSTAT_WRITER);
333 	if (!error) {
334 		LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
335 		    file, line);
336 		WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
337 		TD_LOCKS_INC(curthread);
338 	}
339 
340 	return (error);
341 }
342 
343 int
344 sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
345 {
346 	struct thread *td;
347 	uintptr_t tid, x;
348 	int rval;
349 	bool recursed;
350 
351 	td = curthread;
352 	tid = (uintptr_t)td;
353 	if (SCHEDULER_STOPPED())
354 		return (1);
355 
356 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
357 	    ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
358 	    curthread, sx->lock_object.lo_name, file, line));
359 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
360 	    ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
361 
362 	rval = 1;
363 	recursed = false;
364 	x = SX_LOCK_UNLOCKED;
365 	for (;;) {
366 		if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
367 			break;
368 		if (x == SX_LOCK_UNLOCKED)
369 			continue;
370 		if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) {
371 			sx->sx_recurse++;
372 			atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
373 			break;
374 		}
375 		rval = 0;
376 		break;
377 	}
378 
379 	LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
380 	if (rval) {
381 		WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
382 		    file, line);
383 		if (!recursed)
384 			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
385 			    sx, 0, 0, file, line, LOCKSTAT_WRITER);
386 		TD_LOCKS_INC(curthread);
387 	}
388 
389 	return (rval);
390 }
391 
392 int
393 sx_try_xlock_(struct sx *sx, const char *file, int line)
394 {
395 
396 	return (sx_try_xlock_int(sx LOCK_FILE_LINE_ARG));
397 }
398 
399 void
400 _sx_xunlock(struct sx *sx, const char *file, int line)
401 {
402 
403 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
404 	    ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
405 	_sx_assert(sx, SA_XLOCKED, file, line);
406 	WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
407 	LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
408 	    line);
409 #if LOCK_DEBUG > 0
410 	_sx_xunlock_hard(sx, (uintptr_t)curthread, file, line);
411 #else
412 	__sx_xunlock(sx, curthread, file, line);
413 #endif
414 	TD_LOCKS_DEC(curthread);
415 }
416 
417 /*
418  * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
419  * This will only succeed if this thread holds a single shared lock.
420  * Return 1 if if the upgrade succeed, 0 otherwise.
421  */
422 int
423 sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
424 {
425 	uintptr_t x;
426 	uintptr_t waiters;
427 	int success;
428 
429 	if (SCHEDULER_STOPPED())
430 		return (1);
431 
432 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
433 	    ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
434 	_sx_assert(sx, SA_SLOCKED, file, line);
435 
436 	/*
437 	 * Try to switch from one shared lock to an exclusive lock.  We need
438 	 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
439 	 * we will wake up the exclusive waiters when we drop the lock.
440 	 */
441 	success = 0;
442 	x = SX_READ_VALUE(sx);
443 	for (;;) {
444 		if (SX_SHARERS(x) > 1)
445 			break;
446 		waiters = (x & SX_LOCK_WAITERS);
447 		if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x,
448 		    (uintptr_t)curthread | waiters)) {
449 			success = 1;
450 			break;
451 		}
452 	}
453 	LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
454 	if (success) {
455 		curthread->td_sx_slocks--;
456 		WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
457 		    file, line);
458 		LOCKSTAT_RECORD0(sx__upgrade, sx);
459 	}
460 	return (success);
461 }
462 
463 int
464 sx_try_upgrade_(struct sx *sx, const char *file, int line)
465 {
466 
467 	return (sx_try_upgrade_int(sx LOCK_FILE_LINE_ARG));
468 }
469 
470 /*
471  * Downgrade an unrecursed exclusive lock into a single shared lock.
472  */
473 void
474 sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
475 {
476 	uintptr_t x;
477 
478 	if (SCHEDULER_STOPPED())
479 		return;
480 
481 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
482 	    ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
483 	_sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
484 #ifndef INVARIANTS
485 	if (sx_recursed(sx))
486 		panic("downgrade of a recursed lock");
487 #endif
488 
489 	WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
490 
491 	/*
492 	 * Try to switch from an exclusive lock with no shared waiters
493 	 * to one sharer with no shared waiters.  If there are
494 	 * exclusive waiters, we don't need to lock the sleep queue so
495 	 * long as we preserve the flag.  We do one quick try and if
496 	 * that fails we grab the sleepq lock to keep the flags from
497 	 * changing and do it the slow way.
498 	 *
499 	 * We have to lock the sleep queue if there are shared waiters
500 	 * so we can wake them up.
501 	 */
502 	x = sx->sx_lock;
503 	if (!(x & SX_LOCK_SHARED_WAITERS) &&
504 	    atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
505 	    (x & SX_LOCK_EXCLUSIVE_WAITERS)))
506 		goto out;
507 
508 	/*
509 	 * Lock the sleep queue so we can read the waiters bits
510 	 * without any races and wakeup any shared waiters.
511 	 */
512 	sleepq_lock(&sx->lock_object);
513 
514 	/*
515 	 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
516 	 * shared lock.  If there are any shared waiters, wake them up.
517 	 */
518 	x = sx->sx_lock;
519 	atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
520 	    (x & SX_LOCK_EXCLUSIVE_WAITERS));
521 	if (x & SX_LOCK_SHARED_WAITERS)
522 		sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
523 		    SQ_SHARED_QUEUE);
524 	sleepq_release(&sx->lock_object);
525 
526 out:
527 	curthread->td_sx_slocks++;
528 	LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
529 	LOCKSTAT_RECORD0(sx__downgrade, sx);
530 }
531 
532 void
533 sx_downgrade_(struct sx *sx, const char *file, int line)
534 {
535 
536 	sx_downgrade_int(sx LOCK_FILE_LINE_ARG);
537 }
538 
539 #ifdef	ADAPTIVE_SX
540 static inline void
541 sx_drop_critical(uintptr_t x, bool *in_critical, int *extra_work)
542 {
543 
544 	if (x & SX_LOCK_WRITE_SPINNER)
545 		return;
546 	if (*in_critical) {
547 		critical_exit();
548 		*in_critical = false;
549 		(*extra_work)--;
550 	}
551 }
552 #else
553 #define sx_drop_critical(x, in_critical, extra_work) do { } while (0)
554 #endif
555 
556 /*
557  * This function represents the so-called 'hard case' for sx_xlock
558  * operation.  All 'easy case' failures are redirected to this.  Note
559  * that ideally this would be a static function, but it needs to be
560  * accessible from at least sx.h.
561  */
562 int
563 _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
564 {
565 	GIANT_DECLARE;
566 	uintptr_t tid, setx;
567 #ifdef ADAPTIVE_SX
568 	struct thread *owner;
569 	u_int i, n, spintries = 0;
570 	enum { READERS, WRITER } sleep_reason = READERS;
571 	bool in_critical = false;
572 #endif
573 #ifdef LOCK_PROFILING
574 	uint64_t waittime = 0;
575 	int contested = 0;
576 #endif
577 	int error = 0;
578 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
579 	struct lock_delay_arg lda;
580 #endif
581 #ifdef	KDTRACE_HOOKS
582 	u_int sleep_cnt = 0;
583 	int64_t sleep_time = 0;
584 	int64_t all_time = 0;
585 	uintptr_t state = 0;
586 #endif
587 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
588 	int doing_lockprof = 0;
589 #endif
590 	int extra_work = 0;
591 
592 	tid = (uintptr_t)curthread;
593 
594 #ifdef KDTRACE_HOOKS
595 	if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) {
596 		while (x == SX_LOCK_UNLOCKED) {
597 			if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
598 				goto out_lockstat;
599 		}
600 		extra_work = 1;
601 		doing_lockprof = 1;
602 		all_time -= lockstat_nsecs(&sx->lock_object);
603 	}
604 	state = x;
605 #endif
606 #ifdef LOCK_PROFILING
607 	extra_work = 1;
608 	doing_lockprof = 1;
609 #endif
610 
611 	if (SCHEDULER_STOPPED())
612 		return (0);
613 
614 	if (__predict_false(x == SX_LOCK_UNLOCKED))
615 		x = SX_READ_VALUE(sx);
616 
617 	/* If we already hold an exclusive lock, then recurse. */
618 	if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
619 		KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
620 	    ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
621 		    sx->lock_object.lo_name, file, line));
622 		sx->sx_recurse++;
623 		atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
624 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
625 			CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
626 		return (0);
627 	}
628 
629 	if (LOCK_LOG_TEST(&sx->lock_object, 0))
630 		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
631 		    sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
632 
633 #if defined(ADAPTIVE_SX)
634 	lock_delay_arg_init(&lda, &sx_delay);
635 #elif defined(KDTRACE_HOOKS)
636 	lock_delay_arg_init_noadapt(&lda);
637 #endif
638 
639 #ifdef HWPMC_HOOKS
640 	PMC_SOFT_CALL( , , lock, failed);
641 #endif
642 	lock_profile_obtain_lock_failed(&sx->lock_object, false, &contested,
643 	    &waittime);
644 
645 #ifndef INVARIANTS
646 	GIANT_SAVE(extra_work);
647 #endif
648 
649 	THREAD_CONTENDS_ON_LOCK(&sx->lock_object);
650 
651 	for (;;) {
652 		if (x == SX_LOCK_UNLOCKED) {
653 			if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
654 				break;
655 			continue;
656 		}
657 #ifdef INVARIANTS
658 		GIANT_SAVE(extra_work);
659 #endif
660 #ifdef KDTRACE_HOOKS
661 		lda.spin_cnt++;
662 #endif
663 #ifdef ADAPTIVE_SX
664 		if (x == (SX_LOCK_SHARED | SX_LOCK_WRITE_SPINNER)) {
665 			if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
666 				break;
667 			continue;
668 		}
669 
670 		/*
671 		 * If the lock is write locked and the owner is
672 		 * running on another CPU, spin until the owner stops
673 		 * running or the state of the lock changes.
674 		 */
675 		if ((x & SX_LOCK_SHARED) == 0) {
676 			sx_drop_critical(x, &in_critical, &extra_work);
677 			sleep_reason = WRITER;
678 			owner = lv_sx_owner(x);
679 			if (!TD_IS_RUNNING(owner))
680 				goto sleepq;
681 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
682 				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
683 				    __func__, sx, owner);
684 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
685 			    "spinning", "lockname:\"%s\"",
686 			    sx->lock_object.lo_name);
687 			do {
688 				lock_delay(&lda);
689 				x = SX_READ_VALUE(sx);
690 				owner = lv_sx_owner(x);
691 			} while (owner != NULL && TD_IS_RUNNING(owner));
692 			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
693 			    "running");
694 			continue;
695 		} else if (SX_SHARERS(x) > 0) {
696 			sleep_reason = READERS;
697 			if (spintries == asx_retries)
698 				goto sleepq;
699 			if (!(x & SX_LOCK_WRITE_SPINNER)) {
700 				if (!in_critical) {
701 					critical_enter();
702 					in_critical = true;
703 					extra_work++;
704 				}
705 				if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
706 				    x | SX_LOCK_WRITE_SPINNER)) {
707 					critical_exit();
708 					in_critical = false;
709 					extra_work--;
710 					continue;
711 				}
712 			}
713 			spintries++;
714 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
715 			    "spinning", "lockname:\"%s\"",
716 			    sx->lock_object.lo_name);
717 			n = SX_SHARERS(x);
718 			for (i = 0; i < asx_loops; i += n) {
719 				lock_delay_spin(n);
720 				x = SX_READ_VALUE(sx);
721 				if (!(x & SX_LOCK_WRITE_SPINNER))
722 					break;
723 				if (!(x & SX_LOCK_SHARED))
724 					break;
725 				n = SX_SHARERS(x);
726 				if (n == 0)
727 					break;
728 			}
729 #ifdef KDTRACE_HOOKS
730 			lda.spin_cnt += i;
731 #endif
732 			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
733 			    "running");
734 			if (i < asx_loops)
735 				continue;
736 		}
737 sleepq:
738 #endif
739 		sleepq_lock(&sx->lock_object);
740 		x = SX_READ_VALUE(sx);
741 retry_sleepq:
742 
743 		/*
744 		 * If the lock was released while spinning on the
745 		 * sleep queue chain lock, try again.
746 		 */
747 		if (x == SX_LOCK_UNLOCKED) {
748 			sleepq_release(&sx->lock_object);
749 			sx_drop_critical(x, &in_critical, &extra_work);
750 			continue;
751 		}
752 
753 #ifdef ADAPTIVE_SX
754 		/*
755 		 * The current lock owner might have started executing
756 		 * on another CPU (or the lock could have changed
757 		 * owners) while we were waiting on the sleep queue
758 		 * chain lock.  If so, drop the sleep queue lock and try
759 		 * again.
760 		 */
761 		if (!(x & SX_LOCK_SHARED)) {
762 			owner = (struct thread *)SX_OWNER(x);
763 			if (TD_IS_RUNNING(owner)) {
764 				sleepq_release(&sx->lock_object);
765 				sx_drop_critical(x, &in_critical,
766 				    &extra_work);
767 				continue;
768 			}
769 		} else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) {
770 			sleepq_release(&sx->lock_object);
771 			sx_drop_critical(x, &in_critical, &extra_work);
772 			continue;
773 		}
774 #endif
775 
776 		/*
777 		 * If an exclusive lock was released with both shared
778 		 * and exclusive waiters and a shared waiter hasn't
779 		 * woken up and acquired the lock yet, sx_lock will be
780 		 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
781 		 * If we see that value, try to acquire it once.  Note
782 		 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
783 		 * as there are other exclusive waiters still.  If we
784 		 * fail, restart the loop.
785 		 */
786 		setx = x & (SX_LOCK_WAITERS | SX_LOCK_WRITE_SPINNER);
787 		if ((x & ~setx) == SX_LOCK_SHARED) {
788 			setx &= ~SX_LOCK_WRITE_SPINNER;
789 			if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid | setx))
790 				goto retry_sleepq;
791 			sleepq_release(&sx->lock_object);
792 			CTR2(KTR_LOCK, "%s: %p claimed by new writer",
793 			    __func__, sx);
794 			break;
795 		}
796 
797 #ifdef ADAPTIVE_SX
798 		/*
799 		 * It is possible we set the SX_LOCK_WRITE_SPINNER bit.
800 		 * It is an invariant that when the bit is set, there is
801 		 * a writer ready to grab the lock. Thus clear the bit since
802 		 * we are going to sleep.
803 		 */
804 		if (in_critical) {
805 			if ((x & SX_LOCK_WRITE_SPINNER) ||
806 			    !((x & SX_LOCK_EXCLUSIVE_WAITERS))) {
807 				setx = x & ~SX_LOCK_WRITE_SPINNER;
808 				setx |= SX_LOCK_EXCLUSIVE_WAITERS;
809 				if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
810 				    setx)) {
811 					goto retry_sleepq;
812 				}
813 			}
814 			critical_exit();
815 			in_critical = false;
816 		} else {
817 #endif
818 			/*
819 			 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
820 			 * than loop back and retry.
821 			 */
822 			if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
823 				if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
824 				    x | SX_LOCK_EXCLUSIVE_WAITERS)) {
825 					goto retry_sleepq;
826 				}
827 				if (LOCK_LOG_TEST(&sx->lock_object, 0))
828 					CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
829 					    __func__, sx);
830 			}
831 #ifdef ADAPTIVE_SX
832 		}
833 #endif
834 
835 		/*
836 		 * Since we have been unable to acquire the exclusive
837 		 * lock and the exclusive waiters flag is set, we have
838 		 * to sleep.
839 		 */
840 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
841 			CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
842 			    __func__, sx);
843 
844 #ifdef KDTRACE_HOOKS
845 		sleep_time -= lockstat_nsecs(&sx->lock_object);
846 #endif
847 		sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
848 		    SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
849 		    SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
850 		/*
851 		 * Hack: this can land in thread_suspend_check which will
852 		 * conditionally take a mutex, tripping over an assert if a
853 		 * lock we are waiting for is set.
854 		 */
855 		THREAD_CONTENTION_DONE(&sx->lock_object);
856 		if (!(opts & SX_INTERRUPTIBLE))
857 			sleepq_wait(&sx->lock_object, 0);
858 		else
859 			error = sleepq_wait_sig(&sx->lock_object, 0);
860 		THREAD_CONTENDS_ON_LOCK(&sx->lock_object);
861 #ifdef KDTRACE_HOOKS
862 		sleep_time += lockstat_nsecs(&sx->lock_object);
863 		sleep_cnt++;
864 #endif
865 		if (error) {
866 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
867 				CTR2(KTR_LOCK,
868 			"%s: interruptible sleep by %p suspended by signal",
869 				    __func__, sx);
870 			break;
871 		}
872 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
873 			CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
874 			    __func__, sx);
875 		x = SX_READ_VALUE(sx);
876 	}
877 	THREAD_CONTENTION_DONE(&sx->lock_object);
878 	if (__predict_true(!extra_work))
879 		return (error);
880 #ifdef ADAPTIVE_SX
881 	if (in_critical)
882 		critical_exit();
883 #endif
884 	GIANT_RESTORE();
885 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
886 	if (__predict_true(!doing_lockprof))
887 		return (error);
888 #endif
889 #ifdef KDTRACE_HOOKS
890 	all_time += lockstat_nsecs(&sx->lock_object);
891 	if (sleep_time)
892 		LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
893 		    LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
894 		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
895 	if (lda.spin_cnt > sleep_cnt)
896 		LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
897 		    LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
898 		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
899 out_lockstat:
900 #endif
901 	if (!error)
902 		LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
903 		    contested, waittime, file, line, LOCKSTAT_WRITER);
904 	return (error);
905 }
906 
907 /*
908  * This function represents the so-called 'hard case' for sx_xunlock
909  * operation.  All 'easy case' failures are redirected to this.  Note
910  * that ideally this would be a static function, but it needs to be
911  * accessible from at least sx.h.
912  */
913 void
914 _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
915 {
916 	uintptr_t tid, setx;
917 	int queue;
918 
919 	if (SCHEDULER_STOPPED())
920 		return;
921 
922 	tid = (uintptr_t)curthread;
923 
924 	if (__predict_false(x == tid))
925 		x = SX_READ_VALUE(sx);
926 
927 	MPASS(!(x & SX_LOCK_SHARED));
928 
929 	if (__predict_false(x & SX_LOCK_RECURSED)) {
930 		/* The lock is recursed, unrecurse one level. */
931 		if ((--sx->sx_recurse) == 0)
932 			atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
933 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
934 			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
935 		return;
936 	}
937 
938 	LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER);
939 	if (x == tid &&
940 	    atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
941 		return;
942 
943 	if (LOCK_LOG_TEST(&sx->lock_object, 0))
944 		CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
945 
946 	sleepq_lock(&sx->lock_object);
947 	x = SX_READ_VALUE(sx);
948 	MPASS(x & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS));
949 
950 	/*
951 	 * The wake up algorithm here is quite simple and probably not
952 	 * ideal.  It gives precedence to shared waiters if they are
953 	 * present.  For this condition, we have to preserve the
954 	 * state of the exclusive waiters flag.
955 	 * If interruptible sleeps left the shared queue empty avoid a
956 	 * starvation for the threads sleeping on the exclusive queue by giving
957 	 * them precedence and cleaning up the shared waiters bit anyway.
958 	 */
959 	setx = SX_LOCK_UNLOCKED;
960 	queue = SQ_SHARED_QUEUE;
961 	if ((x & SX_LOCK_EXCLUSIVE_WAITERS) != 0 &&
962 	    sleepq_sleepcnt(&sx->lock_object, SQ_EXCLUSIVE_QUEUE) != 0) {
963 		queue = SQ_EXCLUSIVE_QUEUE;
964 		setx |= (x & SX_LOCK_SHARED_WAITERS);
965 	}
966 	atomic_store_rel_ptr(&sx->sx_lock, setx);
967 
968 	/* Wake up all the waiters for the specific queue. */
969 	if (LOCK_LOG_TEST(&sx->lock_object, 0))
970 		CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
971 		    __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
972 		    "exclusive");
973 
974 	sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue);
975 	sleepq_release(&sx->lock_object);
976 }
977 
978 static __always_inline bool
979 __sx_can_read(struct thread *td, uintptr_t x, bool fp)
980 {
981 
982 	if ((x & (SX_LOCK_SHARED | SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_WRITE_SPINNER))
983 			== SX_LOCK_SHARED)
984 		return (true);
985 	if (!fp && td->td_sx_slocks && (x & SX_LOCK_SHARED))
986 		return (true);
987 	return (false);
988 }
989 
990 static __always_inline bool
991 __sx_slock_try(struct sx *sx, struct thread *td, uintptr_t *xp, bool fp
992     LOCK_FILE_LINE_ARG_DEF)
993 {
994 
995 	/*
996 	 * If no other thread has an exclusive lock then try to bump up
997 	 * the count of sharers.  Since we have to preserve the state
998 	 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
999 	 * shared lock loop back and retry.
1000 	 */
1001 	while (__sx_can_read(td, *xp, fp)) {
1002 		if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp,
1003 		    *xp + SX_ONE_SHARER)) {
1004 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
1005 				CTR4(KTR_LOCK, "%s: %p succeed %p -> %p",
1006 				    __func__, sx, (void *)*xp,
1007 				    (void *)(*xp + SX_ONE_SHARER));
1008 			td->td_sx_slocks++;
1009 			return (true);
1010 		}
1011 	}
1012 	return (false);
1013 }
1014 
1015 static int __noinline
1016 _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
1017 {
1018 	GIANT_DECLARE;
1019 	struct thread *td;
1020 #ifdef ADAPTIVE_SX
1021 	struct thread *owner;
1022 	u_int i, n, spintries = 0;
1023 #endif
1024 #ifdef LOCK_PROFILING
1025 	uint64_t waittime = 0;
1026 	int contested = 0;
1027 #endif
1028 	int error = 0;
1029 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
1030 	struct lock_delay_arg lda;
1031 #endif
1032 #ifdef KDTRACE_HOOKS
1033 	u_int sleep_cnt = 0;
1034 	int64_t sleep_time = 0;
1035 	int64_t all_time = 0;
1036 	uintptr_t state = 0;
1037 #endif
1038 	int extra_work __sdt_used = 0;
1039 
1040 	td = curthread;
1041 
1042 #ifdef KDTRACE_HOOKS
1043 	if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) {
1044 		if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG))
1045 			goto out_lockstat;
1046 		extra_work = 1;
1047 		all_time -= lockstat_nsecs(&sx->lock_object);
1048 	}
1049 	state = x;
1050 #endif
1051 #ifdef LOCK_PROFILING
1052 	extra_work = 1;
1053 #endif
1054 
1055 	if (SCHEDULER_STOPPED())
1056 		return (0);
1057 
1058 #if defined(ADAPTIVE_SX)
1059 	lock_delay_arg_init(&lda, &sx_delay);
1060 #elif defined(KDTRACE_HOOKS)
1061 	lock_delay_arg_init_noadapt(&lda);
1062 #endif
1063 
1064 #ifdef HWPMC_HOOKS
1065 	PMC_SOFT_CALL( , , lock, failed);
1066 #endif
1067 	lock_profile_obtain_lock_failed(&sx->lock_object, false, &contested,
1068 	    &waittime);
1069 
1070 #ifndef INVARIANTS
1071 	GIANT_SAVE(extra_work);
1072 #endif
1073 
1074 	THREAD_CONTENDS_ON_LOCK(&sx->lock_object);
1075 
1076 	/*
1077 	 * As with rwlocks, we don't make any attempt to try to block
1078 	 * shared locks once there is an exclusive waiter.
1079 	 */
1080 	for (;;) {
1081 		if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG))
1082 			break;
1083 #ifdef INVARIANTS
1084 		GIANT_SAVE(extra_work);
1085 #endif
1086 #ifdef KDTRACE_HOOKS
1087 		lda.spin_cnt++;
1088 #endif
1089 
1090 #ifdef ADAPTIVE_SX
1091 		/*
1092 		 * If the owner is running on another CPU, spin until
1093 		 * the owner stops running or the state of the lock
1094 		 * changes.
1095 		 */
1096 		if ((x & SX_LOCK_SHARED) == 0) {
1097 			owner = lv_sx_owner(x);
1098 			if (TD_IS_RUNNING(owner)) {
1099 				if (LOCK_LOG_TEST(&sx->lock_object, 0))
1100 					CTR3(KTR_LOCK,
1101 					    "%s: spinning on %p held by %p",
1102 					    __func__, sx, owner);
1103 				KTR_STATE1(KTR_SCHED, "thread",
1104 				    sched_tdname(curthread), "spinning",
1105 				    "lockname:\"%s\"", sx->lock_object.lo_name);
1106 				do {
1107 					lock_delay(&lda);
1108 					x = SX_READ_VALUE(sx);
1109 					owner = lv_sx_owner(x);
1110 				} while (owner != NULL && TD_IS_RUNNING(owner));
1111 				KTR_STATE0(KTR_SCHED, "thread",
1112 				    sched_tdname(curthread), "running");
1113 				continue;
1114 			}
1115 		} else {
1116 			if ((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) {
1117 				MPASS(!__sx_can_read(td, x, false));
1118 				lock_delay_spin(2);
1119 				x = SX_READ_VALUE(sx);
1120 				continue;
1121 			}
1122 			if (spintries < asx_retries) {
1123 				KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
1124 				    "spinning", "lockname:\"%s\"",
1125 				    sx->lock_object.lo_name);
1126 				n = SX_SHARERS(x);
1127 				for (i = 0; i < asx_loops; i += n) {
1128 					lock_delay_spin(n);
1129 					x = SX_READ_VALUE(sx);
1130 					if (!(x & SX_LOCK_SHARED))
1131 						break;
1132 					n = SX_SHARERS(x);
1133 					if (n == 0)
1134 						break;
1135 					if (__sx_can_read(td, x, false))
1136 						break;
1137 				}
1138 #ifdef KDTRACE_HOOKS
1139 				lda.spin_cnt += i;
1140 #endif
1141 				KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
1142 				    "running");
1143 				if (i < asx_loops)
1144 					continue;
1145 			}
1146 		}
1147 #endif
1148 
1149 		/*
1150 		 * Some other thread already has an exclusive lock, so
1151 		 * start the process of blocking.
1152 		 */
1153 		sleepq_lock(&sx->lock_object);
1154 		x = SX_READ_VALUE(sx);
1155 retry_sleepq:
1156 		if (((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) ||
1157 		    __sx_can_read(td, x, false)) {
1158 			sleepq_release(&sx->lock_object);
1159 			continue;
1160 		}
1161 
1162 #ifdef ADAPTIVE_SX
1163 		/*
1164 		 * If the owner is running on another CPU, spin until
1165 		 * the owner stops running or the state of the lock
1166 		 * changes.
1167 		 */
1168 		if (!(x & SX_LOCK_SHARED)) {
1169 			owner = (struct thread *)SX_OWNER(x);
1170 			if (TD_IS_RUNNING(owner)) {
1171 				sleepq_release(&sx->lock_object);
1172 				x = SX_READ_VALUE(sx);
1173 				continue;
1174 			}
1175 		}
1176 #endif
1177 
1178 		/*
1179 		 * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
1180 		 * fail to set it drop the sleep queue lock and loop
1181 		 * back.
1182 		 */
1183 		if (!(x & SX_LOCK_SHARED_WAITERS)) {
1184 			if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
1185 			    x | SX_LOCK_SHARED_WAITERS))
1186 				goto retry_sleepq;
1187 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
1188 				CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
1189 				    __func__, sx);
1190 		}
1191 
1192 		/*
1193 		 * Since we have been unable to acquire the shared lock,
1194 		 * we have to sleep.
1195 		 */
1196 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
1197 			CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
1198 			    __func__, sx);
1199 
1200 #ifdef KDTRACE_HOOKS
1201 		sleep_time -= lockstat_nsecs(&sx->lock_object);
1202 #endif
1203 		sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
1204 		    SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
1205 		    SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
1206 		/*
1207 		 * Hack: this can land in thread_suspend_check which will
1208 		 * conditionally take a mutex, tripping over an assert if a
1209 		 * lock we are waiting for is set.
1210 		 */
1211 		THREAD_CONTENTION_DONE(&sx->lock_object);
1212 		if (!(opts & SX_INTERRUPTIBLE))
1213 			sleepq_wait(&sx->lock_object, 0);
1214 		else
1215 			error = sleepq_wait_sig(&sx->lock_object, 0);
1216 		THREAD_CONTENDS_ON_LOCK(&sx->lock_object);
1217 #ifdef KDTRACE_HOOKS
1218 		sleep_time += lockstat_nsecs(&sx->lock_object);
1219 		sleep_cnt++;
1220 #endif
1221 		if (error) {
1222 			if (LOCK_LOG_TEST(&sx->lock_object, 0))
1223 				CTR2(KTR_LOCK,
1224 			"%s: interruptible sleep by %p suspended by signal",
1225 				    __func__, sx);
1226 			break;
1227 		}
1228 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
1229 			CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
1230 			    __func__, sx);
1231 		x = SX_READ_VALUE(sx);
1232 	}
1233 	THREAD_CONTENTION_DONE(&sx->lock_object);
1234 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
1235 	if (__predict_true(!extra_work))
1236 		return (error);
1237 #endif
1238 #ifdef KDTRACE_HOOKS
1239 	all_time += lockstat_nsecs(&sx->lock_object);
1240 	if (sleep_time)
1241 		LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
1242 		    LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1243 		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1244 	if (lda.spin_cnt > sleep_cnt)
1245 		LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
1246 		    LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1247 		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1248 out_lockstat:
1249 #endif
1250 	if (error == 0) {
1251 		LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
1252 		    contested, waittime, file, line, LOCKSTAT_READER);
1253 	}
1254 	GIANT_RESTORE();
1255 	return (error);
1256 }
1257 
1258 int
1259 _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF)
1260 {
1261 	struct thread *td;
1262 	uintptr_t x;
1263 	int error;
1264 
1265 	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
1266 	    !TD_IS_IDLETHREAD(curthread),
1267 	    ("sx_slock() by idle thread %p on sx %s @ %s:%d",
1268 	    curthread, sx->lock_object.lo_name, file, line));
1269 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1270 	    ("sx_slock() of destroyed sx @ %s:%d", file, line));
1271 	WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
1272 
1273 	error = 0;
1274 	td = curthread;
1275 	x = SX_READ_VALUE(sx);
1276 	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__acquire) ||
1277 	    !__sx_slock_try(sx, td, &x, true LOCK_FILE_LINE_ARG)))
1278 		error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG);
1279 	else
1280 		lock_profile_obtain_lock_success(&sx->lock_object, false, 0, 0,
1281 		    file, line);
1282 	if (error == 0) {
1283 		LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
1284 		WITNESS_LOCK(&sx->lock_object, 0, file, line);
1285 		TD_LOCKS_INC(curthread);
1286 	}
1287 	return (error);
1288 }
1289 
1290 int
1291 _sx_slock(struct sx *sx, int opts, const char *file, int line)
1292 {
1293 
1294 	return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG));
1295 }
1296 
1297 static __always_inline bool
1298 _sx_sunlock_try(struct sx *sx, struct thread *td, uintptr_t *xp)
1299 {
1300 
1301 	for (;;) {
1302 		if (SX_SHARERS(*xp) > 1 || !(*xp & SX_LOCK_WAITERS)) {
1303 			if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp,
1304 			    *xp - SX_ONE_SHARER)) {
1305 				if (LOCK_LOG_TEST(&sx->lock_object, 0))
1306 					CTR4(KTR_LOCK,
1307 					    "%s: %p succeeded %p -> %p",
1308 					    __func__, sx, (void *)*xp,
1309 					    (void *)(*xp - SX_ONE_SHARER));
1310 				td->td_sx_slocks--;
1311 				return (true);
1312 			}
1313 			continue;
1314 		}
1315 		break;
1316 	}
1317 	return (false);
1318 }
1319 
1320 static void __noinline
1321 _sx_sunlock_hard(struct sx *sx, struct thread *td, uintptr_t x
1322     LOCK_FILE_LINE_ARG_DEF)
1323 {
1324 	uintptr_t setx, queue;
1325 
1326 	if (SCHEDULER_STOPPED())
1327 		return;
1328 
1329 	if (_sx_sunlock_try(sx, td, &x))
1330 		goto out_lockstat;
1331 
1332 	sleepq_lock(&sx->lock_object);
1333 	x = SX_READ_VALUE(sx);
1334 	for (;;) {
1335 		if (_sx_sunlock_try(sx, td, &x))
1336 			break;
1337 
1338 		/*
1339 		 * Wake up semantic here is quite simple:
1340 		 * Just wake up all the exclusive waiters.
1341 		 * Note that the state of the lock could have changed,
1342 		 * so if it fails loop back and retry.
1343 		 */
1344 		setx = SX_LOCK_UNLOCKED;
1345 		queue = SQ_SHARED_QUEUE;
1346 		if (x & SX_LOCK_EXCLUSIVE_WAITERS) {
1347 			setx |= (x & SX_LOCK_SHARED_WAITERS);
1348 			queue = SQ_EXCLUSIVE_QUEUE;
1349 		}
1350 		setx |= (x & SX_LOCK_WRITE_SPINNER);
1351 		if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx))
1352 			continue;
1353 		if (LOCK_LOG_TEST(&sx->lock_object, 0))
1354 			CTR2(KTR_LOCK, "%s: %p waking up all thread on"
1355 			    "exclusive queue", __func__, sx);
1356 		sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue);
1357 		td->td_sx_slocks--;
1358 		break;
1359 	}
1360 	sleepq_release(&sx->lock_object);
1361 out_lockstat:
1362 	LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER);
1363 }
1364 
1365 void
1366 _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
1367 {
1368 	struct thread *td;
1369 	uintptr_t x;
1370 
1371 	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1372 	    ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
1373 	_sx_assert(sx, SA_SLOCKED, file, line);
1374 	WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
1375 	LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
1376 
1377 	td = curthread;
1378 	x = SX_READ_VALUE(sx);
1379 	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__release) ||
1380 	    !_sx_sunlock_try(sx, td, &x)))
1381 		_sx_sunlock_hard(sx, td, x LOCK_FILE_LINE_ARG);
1382 	else
1383 		lock_profile_release_lock(&sx->lock_object, false);
1384 
1385 	TD_LOCKS_DEC(curthread);
1386 }
1387 
1388 void
1389 _sx_sunlock(struct sx *sx, const char *file, int line)
1390 {
1391 
1392 	_sx_sunlock_int(sx LOCK_FILE_LINE_ARG);
1393 }
1394 
1395 #ifdef INVARIANT_SUPPORT
1396 #ifndef INVARIANTS
1397 #undef	_sx_assert
1398 #endif
1399 
1400 /*
1401  * In the non-WITNESS case, sx_assert() can only detect that at least
1402  * *some* thread owns an slock, but it cannot guarantee that *this*
1403  * thread owns an slock.
1404  */
1405 void
1406 _sx_assert(const struct sx *sx, int what, const char *file, int line)
1407 {
1408 #ifndef WITNESS
1409 	int slocked = 0;
1410 #endif
1411 
1412 	if (SCHEDULER_STOPPED())
1413 		return;
1414 	switch (what) {
1415 	case SA_SLOCKED:
1416 	case SA_SLOCKED | SA_NOTRECURSED:
1417 	case SA_SLOCKED | SA_RECURSED:
1418 #ifndef WITNESS
1419 		slocked = 1;
1420 		/* FALLTHROUGH */
1421 #endif
1422 	case SA_LOCKED:
1423 	case SA_LOCKED | SA_NOTRECURSED:
1424 	case SA_LOCKED | SA_RECURSED:
1425 #ifdef WITNESS
1426 		witness_assert(&sx->lock_object, what, file, line);
1427 #else
1428 		/*
1429 		 * If some other thread has an exclusive lock or we
1430 		 * have one and are asserting a shared lock, fail.
1431 		 * Also, if no one has a lock at all, fail.
1432 		 */
1433 		if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1434 		    (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1435 		    sx_xholder(sx) != curthread)))
1436 			panic("Lock %s not %slocked @ %s:%d\n",
1437 			    sx->lock_object.lo_name, slocked ? "share " : "",
1438 			    file, line);
1439 
1440 		if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1441 			if (sx_recursed(sx)) {
1442 				if (what & SA_NOTRECURSED)
1443 					panic("Lock %s recursed @ %s:%d\n",
1444 					    sx->lock_object.lo_name, file,
1445 					    line);
1446 			} else if (what & SA_RECURSED)
1447 				panic("Lock %s not recursed @ %s:%d\n",
1448 				    sx->lock_object.lo_name, file, line);
1449 		}
1450 #endif
1451 		break;
1452 	case SA_XLOCKED:
1453 	case SA_XLOCKED | SA_NOTRECURSED:
1454 	case SA_XLOCKED | SA_RECURSED:
1455 		if (sx_xholder(sx) != curthread)
1456 			panic("Lock %s not exclusively locked @ %s:%d\n",
1457 			    sx->lock_object.lo_name, file, line);
1458 		if (sx_recursed(sx)) {
1459 			if (what & SA_NOTRECURSED)
1460 				panic("Lock %s recursed @ %s:%d\n",
1461 				    sx->lock_object.lo_name, file, line);
1462 		} else if (what & SA_RECURSED)
1463 			panic("Lock %s not recursed @ %s:%d\n",
1464 			    sx->lock_object.lo_name, file, line);
1465 		break;
1466 	case SA_UNLOCKED:
1467 #ifdef WITNESS
1468 		witness_assert(&sx->lock_object, what, file, line);
1469 #else
1470 		/*
1471 		 * If we hold an exclusve lock fail.  We can't
1472 		 * reliably check to see if we hold a shared lock or
1473 		 * not.
1474 		 */
1475 		if (sx_xholder(sx) == curthread)
1476 			panic("Lock %s exclusively locked @ %s:%d\n",
1477 			    sx->lock_object.lo_name, file, line);
1478 #endif
1479 		break;
1480 	default:
1481 		panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
1482 		    line);
1483 	}
1484 }
1485 #endif	/* INVARIANT_SUPPORT */
1486 
1487 #ifdef DDB
1488 static void
1489 db_show_sx(const struct lock_object *lock)
1490 {
1491 	struct thread *td;
1492 	const struct sx *sx;
1493 
1494 	sx = (const struct sx *)lock;
1495 
1496 	db_printf(" state: ");
1497 	if (sx->sx_lock == SX_LOCK_UNLOCKED)
1498 		db_printf("UNLOCKED\n");
1499 	else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1500 		db_printf("DESTROYED\n");
1501 		return;
1502 	} else if (sx->sx_lock & SX_LOCK_SHARED)
1503 		db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1504 	else {
1505 		td = sx_xholder(sx);
1506 		db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1507 		    td->td_tid, td->td_proc->p_pid, td->td_name);
1508 		if (sx_recursed(sx))
1509 			db_printf(" recursed: %d\n", sx->sx_recurse);
1510 	}
1511 
1512 	db_printf(" waiters: ");
1513 	switch(sx->sx_lock &
1514 	    (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1515 	case SX_LOCK_SHARED_WAITERS:
1516 		db_printf("shared\n");
1517 		break;
1518 	case SX_LOCK_EXCLUSIVE_WAITERS:
1519 		db_printf("exclusive\n");
1520 		break;
1521 	case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1522 		db_printf("exclusive and shared\n");
1523 		break;
1524 	default:
1525 		db_printf("none\n");
1526 	}
1527 }
1528 
1529 /*
1530  * Check to see if a thread that is blocked on a sleep queue is actually
1531  * blocked on an sx lock.  If so, output some details and return true.
1532  * If the lock has an exclusive owner, return that in *ownerp.
1533  */
1534 int
1535 sx_chain(struct thread *td, struct thread **ownerp)
1536 {
1537 	const struct sx *sx;
1538 
1539 	/*
1540 	 * Check to see if this thread is blocked on an sx lock.
1541 	 * First, we check the lock class.  If that is ok, then we
1542 	 * compare the lock name against the wait message.
1543 	 */
1544 	sx = td->td_wchan;
1545 	if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1546 	    sx->lock_object.lo_name != td->td_wmesg)
1547 		return (0);
1548 
1549 	/* We think we have an sx lock, so output some details. */
1550 	db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1551 	*ownerp = sx_xholder(sx);
1552 	if (sx->sx_lock & SX_LOCK_SHARED)
1553 		db_printf("SLOCK (count %ju)\n",
1554 		    (uintmax_t)SX_SHARERS(sx->sx_lock));
1555 	else
1556 		db_printf("XLOCK\n");
1557 	return (1);
1558 }
1559 #endif
1560