xref: /freebsd/sys/kern/kern_rwlock.c (revision b37f6c9805edb4b89f0a8c2b78f78a3dcfc0647b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * Machine independent bits of reader/writer lock implementation.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_ddb.h"
37 #include "opt_hwpmc_hooks.h"
38 #include "opt_no_adaptive_rwlocks.h"
39 
40 #include <sys/param.h>
41 #include <sys/kdb.h>
42 #include <sys/ktr.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/proc.h>
47 #include <sys/rwlock.h>
48 #include <sys/sched.h>
49 #include <sys/smp.h>
50 #include <sys/sysctl.h>
51 #include <sys/systm.h>
52 #include <sys/turnstile.h>
53 
54 #include <machine/cpu.h>
55 
56 #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
57 #define	ADAPTIVE_RWLOCKS
58 #endif
59 
60 #ifdef HWPMC_HOOKS
61 #include <sys/pmckern.h>
62 PMC_SOFT_DECLARE( , , lock, failed);
63 #endif
64 
65 /*
66  * Return the rwlock address when the lock cookie address is provided.
67  * This functionality assumes that struct rwlock* have a member named rw_lock.
68  */
69 #define	rwlock2rw(c)	(__containerof(c, struct rwlock, rw_lock))
70 
71 #ifdef DDB
72 #include <ddb/ddb.h>
73 
74 static void	db_show_rwlock(const struct lock_object *lock);
75 #endif
76 static void	assert_rw(const struct lock_object *lock, int what);
77 static void	lock_rw(struct lock_object *lock, uintptr_t how);
78 #ifdef KDTRACE_HOOKS
79 static int	owner_rw(const struct lock_object *lock, struct thread **owner);
80 #endif
81 static uintptr_t unlock_rw(struct lock_object *lock);
82 
83 struct lock_class lock_class_rw = {
84 	.lc_name = "rw",
85 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
86 	.lc_assert = assert_rw,
87 #ifdef DDB
88 	.lc_ddb_show = db_show_rwlock,
89 #endif
90 	.lc_lock = lock_rw,
91 	.lc_unlock = unlock_rw,
92 #ifdef KDTRACE_HOOKS
93 	.lc_owner = owner_rw,
94 #endif
95 };
96 
97 #ifdef ADAPTIVE_RWLOCKS
98 static int __read_frequently rowner_retries = 10;
99 static int __read_frequently rowner_loops = 10000;
100 static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL,
101     "rwlock debugging");
102 SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
103 SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
104 
105 static struct lock_delay_config __read_frequently rw_delay;
106 
107 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_base, CTLFLAG_RW, &rw_delay.base,
108     0, "");
109 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_max, CTLFLAG_RW, &rw_delay.max,
110     0, "");
111 
112 LOCK_DELAY_SYSINIT_DEFAULT(rw_delay);
113 #endif
114 
115 /*
116  * Return a pointer to the owning thread if the lock is write-locked or
117  * NULL if the lock is unlocked or read-locked.
118  */
119 
120 #define	lv_rw_wowner(v)							\
121 	((v) & RW_LOCK_READ ? NULL :					\
122 	 (struct thread *)RW_OWNER((v)))
123 
124 #define	rw_wowner(rw)	lv_rw_wowner(RW_READ_VALUE(rw))
125 
126 /*
127  * Returns if a write owner is recursed.  Write ownership is not assured
128  * here and should be previously checked.
129  */
130 #define	rw_recursed(rw)		((rw)->rw_recurse != 0)
131 
132 /*
133  * Return true if curthread helds the lock.
134  */
135 #define	rw_wlocked(rw)		(rw_wowner((rw)) == curthread)
136 
137 /*
138  * Return a pointer to the owning thread for this lock who should receive
139  * any priority lent by threads that block on this lock.  Currently this
140  * is identical to rw_wowner().
141  */
142 #define	rw_owner(rw)		rw_wowner(rw)
143 
144 #ifndef INVARIANTS
145 #define	__rw_assert(c, what, file, line)
146 #endif
147 
148 void
149 assert_rw(const struct lock_object *lock, int what)
150 {
151 
152 	rw_assert((const struct rwlock *)lock, what);
153 }
154 
155 void
156 lock_rw(struct lock_object *lock, uintptr_t how)
157 {
158 	struct rwlock *rw;
159 
160 	rw = (struct rwlock *)lock;
161 	if (how)
162 		rw_rlock(rw);
163 	else
164 		rw_wlock(rw);
165 }
166 
167 uintptr_t
168 unlock_rw(struct lock_object *lock)
169 {
170 	struct rwlock *rw;
171 
172 	rw = (struct rwlock *)lock;
173 	rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
174 	if (rw->rw_lock & RW_LOCK_READ) {
175 		rw_runlock(rw);
176 		return (1);
177 	} else {
178 		rw_wunlock(rw);
179 		return (0);
180 	}
181 }
182 
183 #ifdef KDTRACE_HOOKS
184 int
185 owner_rw(const struct lock_object *lock, struct thread **owner)
186 {
187 	const struct rwlock *rw = (const struct rwlock *)lock;
188 	uintptr_t x = rw->rw_lock;
189 
190 	*owner = rw_wowner(rw);
191 	return ((x & RW_LOCK_READ) != 0 ?  (RW_READERS(x) != 0) :
192 	    (*owner != NULL));
193 }
194 #endif
195 
196 void
197 _rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
198 {
199 	struct rwlock *rw;
200 	int flags;
201 
202 	rw = rwlock2rw(c);
203 
204 	MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
205 	    RW_RECURSE | RW_NEW)) == 0);
206 	ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
207 	    ("%s: rw_lock not aligned for %s: %p", __func__, name,
208 	    &rw->rw_lock));
209 
210 	flags = LO_UPGRADABLE;
211 	if (opts & RW_DUPOK)
212 		flags |= LO_DUPOK;
213 	if (opts & RW_NOPROFILE)
214 		flags |= LO_NOPROFILE;
215 	if (!(opts & RW_NOWITNESS))
216 		flags |= LO_WITNESS;
217 	if (opts & RW_RECURSE)
218 		flags |= LO_RECURSABLE;
219 	if (opts & RW_QUIET)
220 		flags |= LO_QUIET;
221 	if (opts & RW_NEW)
222 		flags |= LO_NEW;
223 
224 	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
225 	rw->rw_lock = RW_UNLOCKED;
226 	rw->rw_recurse = 0;
227 }
228 
229 void
230 _rw_destroy(volatile uintptr_t *c)
231 {
232 	struct rwlock *rw;
233 
234 	rw = rwlock2rw(c);
235 
236 	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
237 	KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw));
238 	rw->rw_lock = RW_DESTROYED;
239 	lock_destroy(&rw->lock_object);
240 }
241 
242 void
243 rw_sysinit(void *arg)
244 {
245 	struct rw_args *args;
246 
247 	args = arg;
248 	rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc,
249 	    args->ra_flags);
250 }
251 
252 int
253 _rw_wowned(const volatile uintptr_t *c)
254 {
255 
256 	return (rw_wowner(rwlock2rw(c)) == curthread);
257 }
258 
259 void
260 _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line)
261 {
262 	struct rwlock *rw;
263 	uintptr_t tid, v;
264 
265 	rw = rwlock2rw(c);
266 
267 	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
268 	    !TD_IS_IDLETHREAD(curthread),
269 	    ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
270 	    curthread, rw->lock_object.lo_name, file, line));
271 	KASSERT(rw->rw_lock != RW_DESTROYED,
272 	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
273 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
274 	    line, NULL);
275 	tid = (uintptr_t)curthread;
276 	v = RW_UNLOCKED;
277 	if (!_rw_write_lock_fetch(rw, &v, tid))
278 		_rw_wlock_hard(rw, v, file, line);
279 	else
280 		LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw,
281 		    0, 0, file, line, LOCKSTAT_WRITER);
282 
283 	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
284 	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
285 	TD_LOCKS_INC(curthread);
286 }
287 
288 int
289 __rw_try_wlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
290 {
291 	struct thread *td;
292 	uintptr_t tid, v;
293 	int rval;
294 	bool recursed;
295 
296 	td = curthread;
297 	tid = (uintptr_t)td;
298 	if (SCHEDULER_STOPPED_TD(td))
299 		return (1);
300 
301 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
302 	    ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
303 	    curthread, rw->lock_object.lo_name, file, line));
304 	KASSERT(rw->rw_lock != RW_DESTROYED,
305 	    ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
306 
307 	rval = 1;
308 	recursed = false;
309 	v = RW_UNLOCKED;
310 	for (;;) {
311 		if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid))
312 			break;
313 		if (v == RW_UNLOCKED)
314 			continue;
315 		if (v == tid && (rw->lock_object.lo_flags & LO_RECURSABLE)) {
316 			rw->rw_recurse++;
317 			atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
318 			break;
319 		}
320 		rval = 0;
321 		break;
322 	}
323 
324 	LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
325 	if (rval) {
326 		WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
327 		    file, line);
328 		if (!recursed)
329 			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
330 			    rw, 0, 0, file, line, LOCKSTAT_WRITER);
331 		TD_LOCKS_INC(curthread);
332 	}
333 	return (rval);
334 }
335 
336 int
337 __rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
338 {
339 	struct rwlock *rw;
340 
341 	rw = rwlock2rw(c);
342 	return (__rw_try_wlock_int(rw LOCK_FILE_LINE_ARG));
343 }
344 
345 void
346 _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
347 {
348 	struct rwlock *rw;
349 
350 	rw = rwlock2rw(c);
351 
352 	KASSERT(rw->rw_lock != RW_DESTROYED,
353 	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
354 	__rw_assert(c, RA_WLOCKED, file, line);
355 	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
356 	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
357 	    line);
358 
359 #ifdef LOCK_PROFILING
360 	_rw_wunlock_hard(rw, (uintptr_t)curthread, file, line);
361 #else
362 	__rw_wunlock(rw, curthread, file, line);
363 #endif
364 
365 	TD_LOCKS_DEC(curthread);
366 }
367 
368 /*
369  * Determines whether a new reader can acquire a lock.  Succeeds if the
370  * reader already owns a read lock and the lock is locked for read to
371  * prevent deadlock from reader recursion.  Also succeeds if the lock
372  * is unlocked and has no writer waiters or spinners.  Failing otherwise
373  * prioritizes writers before readers.
374  */
375 static bool __always_inline
376 __rw_can_read(struct thread *td, uintptr_t v, bool fp)
377 {
378 
379 	if ((v & (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER))
380 	    == RW_LOCK_READ)
381 		return (true);
382 	if (!fp && td->td_rw_rlocks && (v & RW_LOCK_READ))
383 		return (true);
384 	return (false);
385 }
386 
387 static bool __always_inline
388 __rw_rlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp, bool fp
389     LOCK_FILE_LINE_ARG_DEF)
390 {
391 
392 	/*
393 	 * Handle the easy case.  If no other thread has a write
394 	 * lock, then try to bump up the count of read locks.  Note
395 	 * that we have to preserve the current state of the
396 	 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
397 	 * read lock, then rw_lock must have changed, so restart
398 	 * the loop.  Note that this handles the case of a
399 	 * completely unlocked rwlock since such a lock is encoded
400 	 * as a read lock with no waiters.
401 	 */
402 	while (__rw_can_read(td, *vp, fp)) {
403 		if (atomic_fcmpset_acq_ptr(&rw->rw_lock, vp,
404 			*vp + RW_ONE_READER)) {
405 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
406 				CTR4(KTR_LOCK,
407 				    "%s: %p succeed %p -> %p", __func__,
408 				    rw, (void *)*vp,
409 				    (void *)(*vp + RW_ONE_READER));
410 			td->td_rw_rlocks++;
411 			return (true);
412 		}
413 	}
414 	return (false);
415 }
416 
417 static void __noinline
418 __rw_rlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v
419     LOCK_FILE_LINE_ARG_DEF)
420 {
421 	struct turnstile *ts;
422 	struct thread *owner;
423 #ifdef ADAPTIVE_RWLOCKS
424 	int spintries = 0;
425 	int i, n;
426 #endif
427 #ifdef LOCK_PROFILING
428 	uint64_t waittime = 0;
429 	int contested = 0;
430 #endif
431 #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
432 	struct lock_delay_arg lda;
433 #endif
434 #ifdef KDTRACE_HOOKS
435 	u_int sleep_cnt = 0;
436 	int64_t sleep_time = 0;
437 	int64_t all_time = 0;
438 #endif
439 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
440 	uintptr_t state;
441 	int doing_lockprof;
442 #endif
443 
444 	if (SCHEDULER_STOPPED())
445 		return;
446 
447 #if defined(ADAPTIVE_RWLOCKS)
448 	lock_delay_arg_init(&lda, &rw_delay);
449 #elif defined(KDTRACE_HOOKS)
450 	lock_delay_arg_init(&lda, NULL);
451 #endif
452 
453 #ifdef HWPMC_HOOKS
454 	PMC_SOFT_CALL( , , lock, failed);
455 #endif
456 	lock_profile_obtain_lock_failed(&rw->lock_object,
457 	    &contested, &waittime);
458 
459 #ifdef LOCK_PROFILING
460 	doing_lockprof = 1;
461 	state = v;
462 #elif defined(KDTRACE_HOOKS)
463 	doing_lockprof = lockstat_enabled;
464 	if (__predict_false(doing_lockprof)) {
465 		all_time -= lockstat_nsecs(&rw->lock_object);
466 		state = v;
467 	}
468 #endif
469 
470 	for (;;) {
471 		if (__rw_rlock_try(rw, td, &v, false LOCK_FILE_LINE_ARG))
472 			break;
473 #ifdef KDTRACE_HOOKS
474 		lda.spin_cnt++;
475 #endif
476 
477 #ifdef ADAPTIVE_RWLOCKS
478 		/*
479 		 * If the owner is running on another CPU, spin until
480 		 * the owner stops running or the state of the lock
481 		 * changes.
482 		 */
483 		if ((v & RW_LOCK_READ) == 0) {
484 			owner = (struct thread *)RW_OWNER(v);
485 			if (TD_IS_RUNNING(owner)) {
486 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
487 					CTR3(KTR_LOCK,
488 					    "%s: spinning on %p held by %p",
489 					    __func__, rw, owner);
490 				KTR_STATE1(KTR_SCHED, "thread",
491 				    sched_tdname(curthread), "spinning",
492 				    "lockname:\"%s\"", rw->lock_object.lo_name);
493 				do {
494 					lock_delay(&lda);
495 					v = RW_READ_VALUE(rw);
496 					owner = lv_rw_wowner(v);
497 				} while (owner != NULL && TD_IS_RUNNING(owner));
498 				KTR_STATE0(KTR_SCHED, "thread",
499 				    sched_tdname(curthread), "running");
500 				continue;
501 			}
502 		} else if (spintries < rowner_retries) {
503 			spintries++;
504 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
505 			    "spinning", "lockname:\"%s\"",
506 			    rw->lock_object.lo_name);
507 			for (i = 0; i < rowner_loops; i += n) {
508 				n = RW_READERS(v);
509 				lock_delay_spin(n);
510 				v = RW_READ_VALUE(rw);
511 				if ((v & RW_LOCK_READ) == 0 || __rw_can_read(td, v, false))
512 					break;
513 			}
514 #ifdef KDTRACE_HOOKS
515 			lda.spin_cnt += rowner_loops - i;
516 #endif
517 			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
518 			    "running");
519 			if (i < rowner_loops)
520 				continue;
521 		}
522 #endif
523 
524 		/*
525 		 * Okay, now it's the hard case.  Some other thread already
526 		 * has a write lock or there are write waiters present,
527 		 * acquire the turnstile lock so we can begin the process
528 		 * of blocking.
529 		 */
530 		ts = turnstile_trywait(&rw->lock_object);
531 
532 		/*
533 		 * The lock might have been released while we spun, so
534 		 * recheck its state and restart the loop if needed.
535 		 */
536 		v = RW_READ_VALUE(rw);
537 retry_ts:
538 		if (__rw_can_read(td, v, false)) {
539 			turnstile_cancel(ts);
540 			continue;
541 		}
542 
543 		owner = lv_rw_wowner(v);
544 
545 #ifdef ADAPTIVE_RWLOCKS
546 		/*
547 		 * The current lock owner might have started executing
548 		 * on another CPU (or the lock could have changed
549 		 * owners) while we were waiting on the turnstile
550 		 * chain lock.  If so, drop the turnstile lock and try
551 		 * again.
552 		 */
553 		if (owner != NULL) {
554 			if (TD_IS_RUNNING(owner)) {
555 				turnstile_cancel(ts);
556 				continue;
557 			}
558 		}
559 #endif
560 
561 		/*
562 		 * The lock is held in write mode or it already has waiters.
563 		 */
564 		MPASS(!__rw_can_read(td, v, false));
565 
566 		/*
567 		 * If the RW_LOCK_READ_WAITERS flag is already set, then
568 		 * we can go ahead and block.  If it is not set then try
569 		 * to set it.  If we fail to set it drop the turnstile
570 		 * lock and restart the loop.
571 		 */
572 		if (!(v & RW_LOCK_READ_WAITERS)) {
573 			if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
574 			    v | RW_LOCK_READ_WAITERS))
575 				goto retry_ts;
576 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
577 				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
578 				    __func__, rw);
579 		}
580 
581 		/*
582 		 * We were unable to acquire the lock and the read waiters
583 		 * flag is set, so we must block on the turnstile.
584 		 */
585 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
586 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
587 			    rw);
588 #ifdef KDTRACE_HOOKS
589 		sleep_time -= lockstat_nsecs(&rw->lock_object);
590 #endif
591 		MPASS(owner == rw_owner(rw));
592 		turnstile_wait(ts, owner, TS_SHARED_QUEUE);
593 #ifdef KDTRACE_HOOKS
594 		sleep_time += lockstat_nsecs(&rw->lock_object);
595 		sleep_cnt++;
596 #endif
597 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
598 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
599 			    __func__, rw);
600 		v = RW_READ_VALUE(rw);
601 	}
602 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
603 	if (__predict_true(!doing_lockprof))
604 		return;
605 #endif
606 #ifdef KDTRACE_HOOKS
607 	all_time += lockstat_nsecs(&rw->lock_object);
608 	if (sleep_time)
609 		LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
610 		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
611 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
612 
613 	/* Record only the loops spinning and not sleeping. */
614 	if (lda.spin_cnt > sleep_cnt)
615 		LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
616 		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
617 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
618 #endif
619 	/*
620 	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
621 	 * however.  turnstiles don't like owners changing between calls to
622 	 * turnstile_wait() currently.
623 	 */
624 	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
625 	    waittime, file, line, LOCKSTAT_READER);
626 }
627 
628 void
629 __rw_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
630 {
631 	struct thread *td;
632 	uintptr_t v;
633 
634 	td = curthread;
635 
636 	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED_TD(td) ||
637 	    !TD_IS_IDLETHREAD(td),
638 	    ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
639 	    td, rw->lock_object.lo_name, file, line));
640 	KASSERT(rw->rw_lock != RW_DESTROYED,
641 	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
642 	KASSERT(rw_wowner(rw) != td,
643 	    ("rw_rlock: wlock already held for %s @ %s:%d",
644 	    rw->lock_object.lo_name, file, line));
645 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
646 
647 	v = RW_READ_VALUE(rw);
648 	if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(rw__acquire) ||
649 	    !__rw_rlock_try(rw, td, &v, true LOCK_FILE_LINE_ARG)))
650 		__rw_rlock_hard(rw, td, v LOCK_FILE_LINE_ARG);
651 
652 	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
653 	WITNESS_LOCK(&rw->lock_object, 0, file, line);
654 	TD_LOCKS_INC(curthread);
655 }
656 
657 void
658 __rw_rlock(volatile uintptr_t *c, const char *file, int line)
659 {
660 	struct rwlock *rw;
661 
662 	rw = rwlock2rw(c);
663 	__rw_rlock_int(rw LOCK_FILE_LINE_ARG);
664 }
665 
666 int
667 __rw_try_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
668 {
669 	uintptr_t x;
670 
671 	if (SCHEDULER_STOPPED())
672 		return (1);
673 
674 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
675 	    ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
676 	    curthread, rw->lock_object.lo_name, file, line));
677 
678 	x = rw->rw_lock;
679 	for (;;) {
680 		KASSERT(rw->rw_lock != RW_DESTROYED,
681 		    ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
682 		if (!(x & RW_LOCK_READ))
683 			break;
684 		if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &x, x + RW_ONE_READER)) {
685 			LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
686 			    line);
687 			WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
688 			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
689 			    rw, 0, 0, file, line, LOCKSTAT_READER);
690 			TD_LOCKS_INC(curthread);
691 			curthread->td_rw_rlocks++;
692 			return (1);
693 		}
694 	}
695 
696 	LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
697 	return (0);
698 }
699 
700 int
701 __rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
702 {
703 	struct rwlock *rw;
704 
705 	rw = rwlock2rw(c);
706 	return (__rw_try_rlock_int(rw LOCK_FILE_LINE_ARG));
707 }
708 
709 static bool __always_inline
710 __rw_runlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp)
711 {
712 
713 	for (;;) {
714 		/*
715 		 * See if there is more than one read lock held.  If so,
716 		 * just drop one and return.
717 		 */
718 		if (RW_READERS(*vp) > 1) {
719 			if (atomic_fcmpset_rel_ptr(&rw->rw_lock, vp,
720 			    *vp - RW_ONE_READER)) {
721 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
722 					CTR4(KTR_LOCK,
723 					    "%s: %p succeeded %p -> %p",
724 					    __func__, rw, (void *)*vp,
725 					    (void *)(*vp - RW_ONE_READER));
726 				td->td_rw_rlocks--;
727 				return (true);
728 			}
729 			continue;
730 		}
731 		/*
732 		 * If there aren't any waiters for a write lock, then try
733 		 * to drop it quickly.
734 		 */
735 		if (!(*vp & RW_LOCK_WAITERS)) {
736 			MPASS((*vp & ~RW_LOCK_WRITE_SPINNER) ==
737 			    RW_READERS_LOCK(1));
738 			if (atomic_fcmpset_rel_ptr(&rw->rw_lock, vp,
739 			    RW_UNLOCKED)) {
740 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
741 					CTR2(KTR_LOCK, "%s: %p last succeeded",
742 					    __func__, rw);
743 				td->td_rw_rlocks--;
744 				return (true);
745 			}
746 			continue;
747 		}
748 		break;
749 	}
750 	return (false);
751 }
752 
753 static void __noinline
754 __rw_runlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v
755     LOCK_FILE_LINE_ARG_DEF)
756 {
757 	struct turnstile *ts;
758 	uintptr_t setv, queue;
759 
760 	if (SCHEDULER_STOPPED())
761 		return;
762 
763 	for (;;) {
764 		if (__rw_runlock_try(rw, td, &v))
765 			break;
766 
767 		/*
768 		 * Ok, we know we have waiters and we think we are the
769 		 * last reader, so grab the turnstile lock.
770 		 */
771 		turnstile_chain_lock(&rw->lock_object);
772 		v = RW_READ_VALUE(rw);
773 retry_ts:
774 		if (__rw_runlock_try(rw, td, &v)) {
775 			turnstile_chain_unlock(&rw->lock_object);
776 			break;
777 		}
778 
779 		v &= (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
780 		MPASS(v & RW_LOCK_WAITERS);
781 
782 		/*
783 		 * Try to drop our lock leaving the lock in a unlocked
784 		 * state.
785 		 *
786 		 * If you wanted to do explicit lock handoff you'd have to
787 		 * do it here.  You'd also want to use turnstile_signal()
788 		 * and you'd have to handle the race where a higher
789 		 * priority thread blocks on the write lock before the
790 		 * thread you wakeup actually runs and have the new thread
791 		 * "steal" the lock.  For now it's a lot simpler to just
792 		 * wakeup all of the waiters.
793 		 *
794 		 * As above, if we fail, then another thread might have
795 		 * acquired a read lock, so drop the turnstile lock and
796 		 * restart.
797 		 */
798 		setv = RW_UNLOCKED;
799 		queue = TS_SHARED_QUEUE;
800 		if (v & RW_LOCK_WRITE_WAITERS) {
801 			queue = TS_EXCLUSIVE_QUEUE;
802 			setv |= (v & RW_LOCK_READ_WAITERS);
803 		}
804 		v |= RW_READERS_LOCK(1);
805 		if (!atomic_fcmpset_rel_ptr(&rw->rw_lock, &v, setv))
806 			goto retry_ts;
807 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
808 			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
809 			    __func__, rw);
810 
811 		/*
812 		 * Ok.  The lock is released and all that's left is to
813 		 * wake up the waiters.  Note that the lock might not be
814 		 * free anymore, but in that case the writers will just
815 		 * block again if they run before the new lock holder(s)
816 		 * release the lock.
817 		 */
818 		ts = turnstile_lookup(&rw->lock_object);
819 		MPASS(ts != NULL);
820 		turnstile_broadcast(ts, queue);
821 		turnstile_unpend(ts, TS_SHARED_LOCK);
822 		turnstile_chain_unlock(&rw->lock_object);
823 		td->td_rw_rlocks--;
824 		break;
825 	}
826 	LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER);
827 }
828 
829 void
830 _rw_runlock_cookie_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
831 {
832 	struct thread *td;
833 	uintptr_t v;
834 
835 	KASSERT(rw->rw_lock != RW_DESTROYED,
836 	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
837 	__rw_assert(&rw->rw_lock, RA_RLOCKED, file, line);
838 	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
839 	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
840 
841 	td = curthread;
842 	v = RW_READ_VALUE(rw);
843 
844 	if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(rw__release) ||
845 	    !__rw_runlock_try(rw, td, &v)))
846 		__rw_runlock_hard(rw, td, v LOCK_FILE_LINE_ARG);
847 
848 	TD_LOCKS_DEC(curthread);
849 }
850 
851 void
852 _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
853 {
854 	struct rwlock *rw;
855 
856 	rw = rwlock2rw(c);
857 	_rw_runlock_cookie_int(rw LOCK_FILE_LINE_ARG);
858 }
859 
860 /*
861  * This function is called when we are unable to obtain a write lock on the
862  * first try.  This means that at least one other thread holds either a
863  * read or write lock.
864  */
865 void
866 __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
867 {
868 	uintptr_t tid;
869 	struct rwlock *rw;
870 	struct turnstile *ts;
871 	struct thread *owner;
872 #ifdef ADAPTIVE_RWLOCKS
873 	int spintries = 0;
874 	int i, n;
875 	int sleep_reason = 0;
876 #endif
877 	uintptr_t x;
878 #ifdef LOCK_PROFILING
879 	uint64_t waittime = 0;
880 	int contested = 0;
881 #endif
882 #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
883 	struct lock_delay_arg lda;
884 #endif
885 #ifdef KDTRACE_HOOKS
886 	u_int sleep_cnt = 0;
887 	int64_t sleep_time = 0;
888 	int64_t all_time = 0;
889 #endif
890 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
891 	uintptr_t state;
892 	int doing_lockprof;
893 #endif
894 
895 	tid = (uintptr_t)curthread;
896 	if (SCHEDULER_STOPPED())
897 		return;
898 
899 #if defined(ADAPTIVE_RWLOCKS)
900 	lock_delay_arg_init(&lda, &rw_delay);
901 #elif defined(KDTRACE_HOOKS)
902 	lock_delay_arg_init(&lda, NULL);
903 #endif
904 	rw = rwlock2rw(c);
905 	if (__predict_false(v == RW_UNLOCKED))
906 		v = RW_READ_VALUE(rw);
907 
908 	if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) {
909 		KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
910 		    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
911 		    __func__, rw->lock_object.lo_name, file, line));
912 		rw->rw_recurse++;
913 		atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
914 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
915 			CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
916 		return;
917 	}
918 
919 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
920 		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
921 		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
922 
923 #ifdef HWPMC_HOOKS
924 	PMC_SOFT_CALL( , , lock, failed);
925 #endif
926 	lock_profile_obtain_lock_failed(&rw->lock_object,
927 	    &contested, &waittime);
928 
929 #ifdef LOCK_PROFILING
930 	doing_lockprof = 1;
931 	state = v;
932 #elif defined(KDTRACE_HOOKS)
933 	doing_lockprof = lockstat_enabled;
934 	if (__predict_false(doing_lockprof)) {
935 		all_time -= lockstat_nsecs(&rw->lock_object);
936 		state = v;
937 	}
938 #endif
939 
940 	for (;;) {
941 		if (v == RW_UNLOCKED) {
942 			if (_rw_write_lock_fetch(rw, &v, tid))
943 				break;
944 			continue;
945 		}
946 #ifdef KDTRACE_HOOKS
947 		lda.spin_cnt++;
948 #endif
949 
950 #ifdef ADAPTIVE_RWLOCKS
951 		/*
952 		 * If the lock is write locked and the owner is
953 		 * running on another CPU, spin until the owner stops
954 		 * running or the state of the lock changes.
955 		 */
956 		sleep_reason = 1;
957 		owner = lv_rw_wowner(v);
958 		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
959 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
960 				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
961 				    __func__, rw, owner);
962 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
963 			    "spinning", "lockname:\"%s\"",
964 			    rw->lock_object.lo_name);
965 			do {
966 				lock_delay(&lda);
967 				v = RW_READ_VALUE(rw);
968 				owner = lv_rw_wowner(v);
969 			} while (owner != NULL && TD_IS_RUNNING(owner));
970 			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
971 			    "running");
972 			continue;
973 		}
974 		if ((v & RW_LOCK_READ) && RW_READERS(v) &&
975 		    spintries < rowner_retries) {
976 			if (!(v & RW_LOCK_WRITE_SPINNER)) {
977 				if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
978 				    v | RW_LOCK_WRITE_SPINNER)) {
979 					continue;
980 				}
981 			}
982 			spintries++;
983 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
984 			    "spinning", "lockname:\"%s\"",
985 			    rw->lock_object.lo_name);
986 			for (i = 0; i < rowner_loops; i += n) {
987 				n = RW_READERS(v);
988 				lock_delay_spin(n);
989 				v = RW_READ_VALUE(rw);
990 				if ((v & RW_LOCK_WRITE_SPINNER) == 0)
991 					break;
992 			}
993 			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
994 			    "running");
995 #ifdef KDTRACE_HOOKS
996 			lda.spin_cnt += rowner_loops - i;
997 #endif
998 			if (i < rowner_loops)
999 				continue;
1000 			sleep_reason = 2;
1001 		}
1002 #endif
1003 		ts = turnstile_trywait(&rw->lock_object);
1004 		v = RW_READ_VALUE(rw);
1005 retry_ts:
1006 		owner = lv_rw_wowner(v);
1007 
1008 #ifdef ADAPTIVE_RWLOCKS
1009 		/*
1010 		 * The current lock owner might have started executing
1011 		 * on another CPU (or the lock could have changed
1012 		 * owners) while we were waiting on the turnstile
1013 		 * chain lock.  If so, drop the turnstile lock and try
1014 		 * again.
1015 		 */
1016 		if (owner != NULL) {
1017 			if (TD_IS_RUNNING(owner)) {
1018 				turnstile_cancel(ts);
1019 				continue;
1020 			}
1021 		} else if (RW_READERS(v) > 0 && sleep_reason == 1) {
1022 			turnstile_cancel(ts);
1023 			continue;
1024 		}
1025 #endif
1026 		/*
1027 		 * Check for the waiters flags about this rwlock.
1028 		 * If the lock was released, without maintain any pending
1029 		 * waiters queue, simply try to acquire it.
1030 		 * If a pending waiters queue is present, claim the lock
1031 		 * ownership and maintain the pending queue.
1032 		 */
1033 		x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
1034 		if ((v & ~x) == RW_UNLOCKED) {
1035 			x &= ~RW_LOCK_WRITE_SPINNER;
1036 			if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid | x)) {
1037 				if (x)
1038 					turnstile_claim(ts);
1039 				else
1040 					turnstile_cancel(ts);
1041 				break;
1042 			}
1043 			goto retry_ts;
1044 		}
1045 		/*
1046 		 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
1047 		 * set it.  If we fail to set it, then loop back and try
1048 		 * again.
1049 		 */
1050 		if (!(v & RW_LOCK_WRITE_WAITERS)) {
1051 			if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
1052 			    v | RW_LOCK_WRITE_WAITERS))
1053 				goto retry_ts;
1054 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
1055 				CTR2(KTR_LOCK, "%s: %p set write waiters flag",
1056 				    __func__, rw);
1057 		}
1058 		/*
1059 		 * We were unable to acquire the lock and the write waiters
1060 		 * flag is set, so we must block on the turnstile.
1061 		 */
1062 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
1063 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
1064 			    rw);
1065 #ifdef KDTRACE_HOOKS
1066 		sleep_time -= lockstat_nsecs(&rw->lock_object);
1067 #endif
1068 		MPASS(owner == rw_owner(rw));
1069 		turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE);
1070 #ifdef KDTRACE_HOOKS
1071 		sleep_time += lockstat_nsecs(&rw->lock_object);
1072 		sleep_cnt++;
1073 #endif
1074 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
1075 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
1076 			    __func__, rw);
1077 #ifdef ADAPTIVE_RWLOCKS
1078 		spintries = 0;
1079 #endif
1080 		v = RW_READ_VALUE(rw);
1081 	}
1082 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
1083 	if (__predict_true(!doing_lockprof))
1084 		return;
1085 #endif
1086 #ifdef KDTRACE_HOOKS
1087 	all_time += lockstat_nsecs(&rw->lock_object);
1088 	if (sleep_time)
1089 		LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
1090 		    LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
1091 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
1092 
1093 	/* Record only the loops spinning and not sleeping. */
1094 	if (lda.spin_cnt > sleep_cnt)
1095 		LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
1096 		    LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
1097 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
1098 #endif
1099 	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
1100 	    waittime, file, line, LOCKSTAT_WRITER);
1101 }
1102 
1103 /*
1104  * This function is called if lockstat is active or the first try at releasing
1105  * a write lock failed.  The latter means that the lock is recursed or one of
1106  * the 2 waiter bits must be set indicating that at least one thread is waiting
1107  * on this lock.
1108  */
1109 void
1110 __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
1111 {
1112 	struct rwlock *rw;
1113 	struct turnstile *ts;
1114 	uintptr_t tid, setv;
1115 	int queue;
1116 
1117 	tid = (uintptr_t)curthread;
1118 	if (SCHEDULER_STOPPED())
1119 		return;
1120 
1121 	rw = rwlock2rw(c);
1122 	if (__predict_false(v == tid))
1123 		v = RW_READ_VALUE(rw);
1124 
1125 	if (v & RW_LOCK_WRITER_RECURSED) {
1126 		if (--(rw->rw_recurse) == 0)
1127 			atomic_clear_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
1128 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
1129 			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
1130 		return;
1131 	}
1132 
1133 	LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_WRITER);
1134 	if (v == tid && _rw_write_unlock(rw, tid))
1135 		return;
1136 
1137 	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
1138 	    ("%s: neither of the waiter flags are set", __func__));
1139 
1140 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
1141 		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
1142 
1143 	turnstile_chain_lock(&rw->lock_object);
1144 
1145 	/*
1146 	 * Use the same algo as sx locks for now.  Prefer waking up shared
1147 	 * waiters if we have any over writers.  This is probably not ideal.
1148 	 *
1149 	 * 'v' is the value we are going to write back to rw_lock.  If we
1150 	 * have waiters on both queues, we need to preserve the state of
1151 	 * the waiter flag for the queue we don't wake up.  For now this is
1152 	 * hardcoded for the algorithm mentioned above.
1153 	 *
1154 	 * In the case of both readers and writers waiting we wakeup the
1155 	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
1156 	 * new writer comes in before a reader it will claim the lock up
1157 	 * above.  There is probably a potential priority inversion in
1158 	 * there that could be worked around either by waking both queues
1159 	 * of waiters or doing some complicated lock handoff gymnastics.
1160 	 */
1161 	setv = RW_UNLOCKED;
1162 	v = RW_READ_VALUE(rw);
1163 	queue = TS_SHARED_QUEUE;
1164 	if (v & RW_LOCK_WRITE_WAITERS) {
1165 		queue = TS_EXCLUSIVE_QUEUE;
1166 		setv |= (v & RW_LOCK_READ_WAITERS);
1167 	}
1168 	atomic_store_rel_ptr(&rw->rw_lock, setv);
1169 
1170 	/* Wake up all waiters for the specific queue. */
1171 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
1172 		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
1173 		    queue == TS_SHARED_QUEUE ? "read" : "write");
1174 
1175 	ts = turnstile_lookup(&rw->lock_object);
1176 	MPASS(ts != NULL);
1177 	turnstile_broadcast(ts, queue);
1178 	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1179 	turnstile_chain_unlock(&rw->lock_object);
1180 }
1181 
1182 /*
1183  * Attempt to do a non-blocking upgrade from a read lock to a write
1184  * lock.  This will only succeed if this thread holds a single read
1185  * lock.  Returns true if the upgrade succeeded and false otherwise.
1186  */
1187 int
1188 __rw_try_upgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
1189 {
1190 	uintptr_t v, x, tid;
1191 	struct turnstile *ts;
1192 	int success;
1193 
1194 	if (SCHEDULER_STOPPED())
1195 		return (1);
1196 
1197 	KASSERT(rw->rw_lock != RW_DESTROYED,
1198 	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
1199 	__rw_assert(&rw->rw_lock, RA_RLOCKED, file, line);
1200 
1201 	/*
1202 	 * Attempt to switch from one reader to a writer.  If there
1203 	 * are any write waiters, then we will have to lock the
1204 	 * turnstile first to prevent races with another writer
1205 	 * calling turnstile_wait() before we have claimed this
1206 	 * turnstile.  So, do the simple case of no waiters first.
1207 	 */
1208 	tid = (uintptr_t)curthread;
1209 	success = 0;
1210 	for (;;) {
1211 		v = rw->rw_lock;
1212 		if (RW_READERS(v) > 1)
1213 			break;
1214 		if (!(v & RW_LOCK_WAITERS)) {
1215 			success = atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid);
1216 			if (!success)
1217 				continue;
1218 			break;
1219 		}
1220 
1221 		/*
1222 		 * Ok, we think we have waiters, so lock the turnstile.
1223 		 */
1224 		ts = turnstile_trywait(&rw->lock_object);
1225 		v = rw->rw_lock;
1226 		if (RW_READERS(v) > 1) {
1227 			turnstile_cancel(ts);
1228 			break;
1229 		}
1230 		/*
1231 		 * Try to switch from one reader to a writer again.  This time
1232 		 * we honor the current state of the waiters flags.
1233 		 * If we obtain the lock with the flags set, then claim
1234 		 * ownership of the turnstile.
1235 		 */
1236 		x = rw->rw_lock & RW_LOCK_WAITERS;
1237 		success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
1238 		if (success) {
1239 			if (x)
1240 				turnstile_claim(ts);
1241 			else
1242 				turnstile_cancel(ts);
1243 			break;
1244 		}
1245 		turnstile_cancel(ts);
1246 	}
1247 	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
1248 	if (success) {
1249 		curthread->td_rw_rlocks--;
1250 		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
1251 		    file, line);
1252 		LOCKSTAT_RECORD0(rw__upgrade, rw);
1253 	}
1254 	return (success);
1255 }
1256 
1257 int
1258 __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
1259 {
1260 	struct rwlock *rw;
1261 
1262 	rw = rwlock2rw(c);
1263 	return (__rw_try_upgrade_int(rw LOCK_FILE_LINE_ARG));
1264 }
1265 
1266 /*
1267  * Downgrade a write lock into a single read lock.
1268  */
1269 void
1270 __rw_downgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
1271 {
1272 	struct turnstile *ts;
1273 	uintptr_t tid, v;
1274 	int rwait, wwait;
1275 
1276 	if (SCHEDULER_STOPPED())
1277 		return;
1278 
1279 	KASSERT(rw->rw_lock != RW_DESTROYED,
1280 	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
1281 	__rw_assert(&rw->rw_lock, RA_WLOCKED | RA_NOTRECURSED, file, line);
1282 #ifndef INVARIANTS
1283 	if (rw_recursed(rw))
1284 		panic("downgrade of a recursed lock");
1285 #endif
1286 
1287 	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
1288 
1289 	/*
1290 	 * Convert from a writer to a single reader.  First we handle
1291 	 * the easy case with no waiters.  If there are any waiters, we
1292 	 * lock the turnstile and "disown" the lock.
1293 	 */
1294 	tid = (uintptr_t)curthread;
1295 	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
1296 		goto out;
1297 
1298 	/*
1299 	 * Ok, we think we have waiters, so lock the turnstile so we can
1300 	 * read the waiter flags without any races.
1301 	 */
1302 	turnstile_chain_lock(&rw->lock_object);
1303 	v = rw->rw_lock & RW_LOCK_WAITERS;
1304 	rwait = v & RW_LOCK_READ_WAITERS;
1305 	wwait = v & RW_LOCK_WRITE_WAITERS;
1306 	MPASS(rwait | wwait);
1307 
1308 	/*
1309 	 * Downgrade from a write lock while preserving waiters flag
1310 	 * and give up ownership of the turnstile.
1311 	 */
1312 	ts = turnstile_lookup(&rw->lock_object);
1313 	MPASS(ts != NULL);
1314 	if (!wwait)
1315 		v &= ~RW_LOCK_READ_WAITERS;
1316 	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
1317 	/*
1318 	 * Wake other readers if there are no writers pending.  Otherwise they
1319 	 * won't be able to acquire the lock anyway.
1320 	 */
1321 	if (rwait && !wwait) {
1322 		turnstile_broadcast(ts, TS_SHARED_QUEUE);
1323 		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1324 	} else
1325 		turnstile_disown(ts);
1326 	turnstile_chain_unlock(&rw->lock_object);
1327 out:
1328 	curthread->td_rw_rlocks++;
1329 	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
1330 	LOCKSTAT_RECORD0(rw__downgrade, rw);
1331 }
1332 
1333 void
1334 __rw_downgrade(volatile uintptr_t *c, const char *file, int line)
1335 {
1336 	struct rwlock *rw;
1337 
1338 	rw = rwlock2rw(c);
1339 	__rw_downgrade_int(rw LOCK_FILE_LINE_ARG);
1340 }
1341 
1342 #ifdef INVARIANT_SUPPORT
1343 #ifndef INVARIANTS
1344 #undef __rw_assert
1345 #endif
1346 
1347 /*
1348  * In the non-WITNESS case, rw_assert() can only detect that at least
1349  * *some* thread owns an rlock, but it cannot guarantee that *this*
1350  * thread owns an rlock.
1351  */
1352 void
1353 __rw_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1354 {
1355 	const struct rwlock *rw;
1356 
1357 	if (panicstr != NULL)
1358 		return;
1359 
1360 	rw = rwlock2rw(c);
1361 
1362 	switch (what) {
1363 	case RA_LOCKED:
1364 	case RA_LOCKED | RA_RECURSED:
1365 	case RA_LOCKED | RA_NOTRECURSED:
1366 	case RA_RLOCKED:
1367 	case RA_RLOCKED | RA_RECURSED:
1368 	case RA_RLOCKED | RA_NOTRECURSED:
1369 #ifdef WITNESS
1370 		witness_assert(&rw->lock_object, what, file, line);
1371 #else
1372 		/*
1373 		 * If some other thread has a write lock or we have one
1374 		 * and are asserting a read lock, fail.  Also, if no one
1375 		 * has a lock at all, fail.
1376 		 */
1377 		if (rw->rw_lock == RW_UNLOCKED ||
1378 		    (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED ||
1379 		    rw_wowner(rw) != curthread)))
1380 			panic("Lock %s not %slocked @ %s:%d\n",
1381 			    rw->lock_object.lo_name, (what & RA_RLOCKED) ?
1382 			    "read " : "", file, line);
1383 
1384 		if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) {
1385 			if (rw_recursed(rw)) {
1386 				if (what & RA_NOTRECURSED)
1387 					panic("Lock %s recursed @ %s:%d\n",
1388 					    rw->lock_object.lo_name, file,
1389 					    line);
1390 			} else if (what & RA_RECURSED)
1391 				panic("Lock %s not recursed @ %s:%d\n",
1392 				    rw->lock_object.lo_name, file, line);
1393 		}
1394 #endif
1395 		break;
1396 	case RA_WLOCKED:
1397 	case RA_WLOCKED | RA_RECURSED:
1398 	case RA_WLOCKED | RA_NOTRECURSED:
1399 		if (rw_wowner(rw) != curthread)
1400 			panic("Lock %s not exclusively locked @ %s:%d\n",
1401 			    rw->lock_object.lo_name, file, line);
1402 		if (rw_recursed(rw)) {
1403 			if (what & RA_NOTRECURSED)
1404 				panic("Lock %s recursed @ %s:%d\n",
1405 				    rw->lock_object.lo_name, file, line);
1406 		} else if (what & RA_RECURSED)
1407 			panic("Lock %s not recursed @ %s:%d\n",
1408 			    rw->lock_object.lo_name, file, line);
1409 		break;
1410 	case RA_UNLOCKED:
1411 #ifdef WITNESS
1412 		witness_assert(&rw->lock_object, what, file, line);
1413 #else
1414 		/*
1415 		 * If we hold a write lock fail.  We can't reliably check
1416 		 * to see if we hold a read lock or not.
1417 		 */
1418 		if (rw_wowner(rw) == curthread)
1419 			panic("Lock %s exclusively locked @ %s:%d\n",
1420 			    rw->lock_object.lo_name, file, line);
1421 #endif
1422 		break;
1423 	default:
1424 		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
1425 		    line);
1426 	}
1427 }
1428 #endif /* INVARIANT_SUPPORT */
1429 
1430 #ifdef DDB
1431 void
1432 db_show_rwlock(const struct lock_object *lock)
1433 {
1434 	const struct rwlock *rw;
1435 	struct thread *td;
1436 
1437 	rw = (const struct rwlock *)lock;
1438 
1439 	db_printf(" state: ");
1440 	if (rw->rw_lock == RW_UNLOCKED)
1441 		db_printf("UNLOCKED\n");
1442 	else if (rw->rw_lock == RW_DESTROYED) {
1443 		db_printf("DESTROYED\n");
1444 		return;
1445 	} else if (rw->rw_lock & RW_LOCK_READ)
1446 		db_printf("RLOCK: %ju locks\n",
1447 		    (uintmax_t)(RW_READERS(rw->rw_lock)));
1448 	else {
1449 		td = rw_wowner(rw);
1450 		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1451 		    td->td_tid, td->td_proc->p_pid, td->td_name);
1452 		if (rw_recursed(rw))
1453 			db_printf(" recursed: %u\n", rw->rw_recurse);
1454 	}
1455 	db_printf(" waiters: ");
1456 	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
1457 	case RW_LOCK_READ_WAITERS:
1458 		db_printf("readers\n");
1459 		break;
1460 	case RW_LOCK_WRITE_WAITERS:
1461 		db_printf("writers\n");
1462 		break;
1463 	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
1464 		db_printf("readers and writers\n");
1465 		break;
1466 	default:
1467 		db_printf("none\n");
1468 		break;
1469 	}
1470 }
1471 
1472 #endif
1473