xref: /freebsd/sys/kern/kern_rwlock.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 /*
29  * Machine independent bits of reader/writer lock implementation.
30  */
31 
32 #include <sys/cdefs.h>
33 #include "opt_ddb.h"
34 #include "opt_hwpmc_hooks.h"
35 #include "opt_no_adaptive_rwlocks.h"
36 
37 #include <sys/param.h>
38 #include <sys/kdb.h>
39 #include <sys/ktr.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/rwlock.h>
45 #include <sys/sched.h>
46 #include <sys/smp.h>
47 #include <sys/sysctl.h>
48 #include <sys/systm.h>
49 #include <sys/turnstile.h>
50 
51 #include <machine/cpu.h>
52 
53 #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
54 #define	ADAPTIVE_RWLOCKS
55 #endif
56 
57 #ifdef HWPMC_HOOKS
58 #include <sys/pmckern.h>
59 PMC_SOFT_DECLARE( , , lock, failed);
60 #endif
61 
62 /*
63  * Return the rwlock address when the lock cookie address is provided.
64  * This functionality assumes that struct rwlock* have a member named rw_lock.
65  */
66 #define	rwlock2rw(c)	(__containerof(c, struct rwlock, rw_lock))
67 
68 #ifdef DDB
69 #include <ddb/ddb.h>
70 
71 static void	db_show_rwlock(const struct lock_object *lock);
72 #endif
73 static void	assert_rw(const struct lock_object *lock, int what);
74 static void	lock_rw(struct lock_object *lock, uintptr_t how);
75 #ifdef KDTRACE_HOOKS
76 static int	owner_rw(const struct lock_object *lock, struct thread **owner);
77 #endif
78 static uintptr_t unlock_rw(struct lock_object *lock);
79 
80 struct lock_class lock_class_rw = {
81 	.lc_name = "rw",
82 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
83 	.lc_assert = assert_rw,
84 #ifdef DDB
85 	.lc_ddb_show = db_show_rwlock,
86 #endif
87 	.lc_lock = lock_rw,
88 	.lc_unlock = unlock_rw,
89 #ifdef KDTRACE_HOOKS
90 	.lc_owner = owner_rw,
91 #endif
92 };
93 
94 #ifdef ADAPTIVE_RWLOCKS
95 #ifdef RWLOCK_CUSTOM_BACKOFF
96 static u_short __read_frequently rowner_retries;
97 static u_short __read_frequently rowner_loops;
98 static SYSCTL_NODE(_debug, OID_AUTO, rwlock,
99     CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
100     "rwlock debugging");
101 SYSCTL_U16(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
102 SYSCTL_U16(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
103 
104 static struct lock_delay_config __read_frequently rw_delay;
105 
106 SYSCTL_U16(_debug_rwlock, OID_AUTO, delay_base, CTLFLAG_RW, &rw_delay.base,
107     0, "");
108 SYSCTL_U16(_debug_rwlock, OID_AUTO, delay_max, CTLFLAG_RW, &rw_delay.max,
109     0, "");
110 
111 static void
112 rw_lock_delay_init(void *arg __unused)
113 {
114 
115 	lock_delay_default_init(&rw_delay);
116 	rowner_retries = 10;
117 	rowner_loops = max(10000, rw_delay.max);
118 }
119 LOCK_DELAY_SYSINIT(rw_lock_delay_init);
120 #else
121 #define rw_delay	locks_delay
122 #define rowner_retries	locks_delay_retries
123 #define rowner_loops	locks_delay_loops
124 #endif
125 #endif
126 
127 /*
128  * Return a pointer to the owning thread if the lock is write-locked or
129  * NULL if the lock is unlocked or read-locked.
130  */
131 
132 #define	lv_rw_wowner(v)							\
133 	((v) & RW_LOCK_READ ? NULL :					\
134 	 (struct thread *)RW_OWNER((v)))
135 
136 #define	rw_wowner(rw)	lv_rw_wowner(RW_READ_VALUE(rw))
137 
138 /*
139  * Returns if a write owner is recursed.  Write ownership is not assured
140  * here and should be previously checked.
141  */
142 #define	rw_recursed(rw)		((rw)->rw_recurse != 0)
143 
144 /*
145  * Return true if curthread helds the lock.
146  */
147 #define	rw_wlocked(rw)		(rw_wowner((rw)) == curthread)
148 
149 /*
150  * Return a pointer to the owning thread for this lock who should receive
151  * any priority lent by threads that block on this lock.  Currently this
152  * is identical to rw_wowner().
153  */
154 #define	rw_owner(rw)		rw_wowner(rw)
155 
156 #ifndef INVARIANTS
157 #define	__rw_assert(c, what, file, line)
158 #endif
159 
160 void
161 assert_rw(const struct lock_object *lock, int what)
162 {
163 
164 	rw_assert((const struct rwlock *)lock, what);
165 }
166 
167 void
168 lock_rw(struct lock_object *lock, uintptr_t how)
169 {
170 	struct rwlock *rw;
171 
172 	rw = (struct rwlock *)lock;
173 	if (how)
174 		rw_rlock(rw);
175 	else
176 		rw_wlock(rw);
177 }
178 
179 uintptr_t
180 unlock_rw(struct lock_object *lock)
181 {
182 	struct rwlock *rw;
183 
184 	rw = (struct rwlock *)lock;
185 	rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
186 	if (rw->rw_lock & RW_LOCK_READ) {
187 		rw_runlock(rw);
188 		return (1);
189 	} else {
190 		rw_wunlock(rw);
191 		return (0);
192 	}
193 }
194 
195 #ifdef KDTRACE_HOOKS
196 int
197 owner_rw(const struct lock_object *lock, struct thread **owner)
198 {
199 	const struct rwlock *rw = (const struct rwlock *)lock;
200 	uintptr_t x = rw->rw_lock;
201 
202 	*owner = rw_wowner(rw);
203 	return ((x & RW_LOCK_READ) != 0 ?  (RW_READERS(x) != 0) :
204 	    (*owner != NULL));
205 }
206 #endif
207 
208 void
209 _rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
210 {
211 	struct rwlock *rw;
212 	int flags;
213 
214 	rw = rwlock2rw(c);
215 
216 	MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
217 	    RW_RECURSE | RW_NEW)) == 0);
218 	ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
219 	    ("%s: rw_lock not aligned for %s: %p", __func__, name,
220 	    &rw->rw_lock));
221 
222 	flags = LO_UPGRADABLE;
223 	if (opts & RW_DUPOK)
224 		flags |= LO_DUPOK;
225 	if (opts & RW_NOPROFILE)
226 		flags |= LO_NOPROFILE;
227 	if (!(opts & RW_NOWITNESS))
228 		flags |= LO_WITNESS;
229 	if (opts & RW_RECURSE)
230 		flags |= LO_RECURSABLE;
231 	if (opts & RW_QUIET)
232 		flags |= LO_QUIET;
233 	if (opts & RW_NEW)
234 		flags |= LO_NEW;
235 
236 	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
237 	rw->rw_lock = RW_UNLOCKED;
238 	rw->rw_recurse = 0;
239 }
240 
241 void
242 _rw_destroy(volatile uintptr_t *c)
243 {
244 	struct rwlock *rw;
245 
246 	rw = rwlock2rw(c);
247 
248 	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
249 	KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw));
250 	rw->rw_lock = RW_DESTROYED;
251 	lock_destroy(&rw->lock_object);
252 }
253 
254 void
255 rw_sysinit(void *arg)
256 {
257 	struct rw_args *args;
258 
259 	args = arg;
260 	rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc,
261 	    args->ra_flags);
262 }
263 
264 int
265 _rw_wowned(const volatile uintptr_t *c)
266 {
267 
268 	return (rw_wowner(rwlock2rw(c)) == curthread);
269 }
270 
271 void
272 _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line)
273 {
274 	struct rwlock *rw;
275 	uintptr_t tid, v;
276 
277 	rw = rwlock2rw(c);
278 
279 	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
280 	    !TD_IS_IDLETHREAD(curthread),
281 	    ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
282 	    curthread, rw->lock_object.lo_name, file, line));
283 	KASSERT(rw->rw_lock != RW_DESTROYED,
284 	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
285 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
286 	    line, NULL);
287 	tid = (uintptr_t)curthread;
288 	v = RW_UNLOCKED;
289 	if (!_rw_write_lock_fetch(rw, &v, tid))
290 		_rw_wlock_hard(rw, v, file, line);
291 	else
292 		LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw,
293 		    0, 0, file, line, LOCKSTAT_WRITER);
294 
295 	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
296 	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
297 	TD_LOCKS_INC(curthread);
298 }
299 
300 int
301 __rw_try_wlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
302 {
303 	struct thread *td;
304 	uintptr_t tid, v;
305 	int rval;
306 	bool recursed;
307 
308 	td = curthread;
309 	tid = (uintptr_t)td;
310 	if (SCHEDULER_STOPPED_TD(td))
311 		return (1);
312 
313 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
314 	    ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
315 	    curthread, rw->lock_object.lo_name, file, line));
316 	KASSERT(rw->rw_lock != RW_DESTROYED,
317 	    ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
318 
319 	rval = 1;
320 	recursed = false;
321 	v = RW_UNLOCKED;
322 	for (;;) {
323 		if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid))
324 			break;
325 		if (v == RW_UNLOCKED)
326 			continue;
327 		if (v == tid && (rw->lock_object.lo_flags & LO_RECURSABLE)) {
328 			rw->rw_recurse++;
329 			atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
330 			break;
331 		}
332 		rval = 0;
333 		break;
334 	}
335 
336 	LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
337 	if (rval) {
338 		WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
339 		    file, line);
340 		if (!recursed)
341 			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
342 			    rw, 0, 0, file, line, LOCKSTAT_WRITER);
343 		TD_LOCKS_INC(curthread);
344 	}
345 	return (rval);
346 }
347 
348 int
349 __rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
350 {
351 	struct rwlock *rw;
352 
353 	rw = rwlock2rw(c);
354 	return (__rw_try_wlock_int(rw LOCK_FILE_LINE_ARG));
355 }
356 
357 void
358 _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
359 {
360 	struct rwlock *rw;
361 
362 	rw = rwlock2rw(c);
363 
364 	KASSERT(rw->rw_lock != RW_DESTROYED,
365 	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
366 	__rw_assert(c, RA_WLOCKED, file, line);
367 	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
368 	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
369 	    line);
370 
371 #ifdef LOCK_PROFILING
372 	_rw_wunlock_hard(rw, (uintptr_t)curthread, file, line);
373 #else
374 	__rw_wunlock(rw, curthread, file, line);
375 #endif
376 
377 	TD_LOCKS_DEC(curthread);
378 }
379 
380 /*
381  * Determines whether a new reader can acquire a lock.  Succeeds if the
382  * reader already owns a read lock and the lock is locked for read to
383  * prevent deadlock from reader recursion.  Also succeeds if the lock
384  * is unlocked and has no writer waiters or spinners.  Failing otherwise
385  * prioritizes writers before readers.
386  */
387 static bool __always_inline
388 __rw_can_read(struct thread *td, uintptr_t v, bool fp)
389 {
390 
391 	if ((v & (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER))
392 	    == RW_LOCK_READ)
393 		return (true);
394 	if (!fp && td->td_rw_rlocks && (v & RW_LOCK_READ))
395 		return (true);
396 	return (false);
397 }
398 
399 static bool __always_inline
400 __rw_rlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp, bool fp
401     LOCK_FILE_LINE_ARG_DEF)
402 {
403 
404 	/*
405 	 * Handle the easy case.  If no other thread has a write
406 	 * lock, then try to bump up the count of read locks.  Note
407 	 * that we have to preserve the current state of the
408 	 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
409 	 * read lock, then rw_lock must have changed, so restart
410 	 * the loop.  Note that this handles the case of a
411 	 * completely unlocked rwlock since such a lock is encoded
412 	 * as a read lock with no waiters.
413 	 */
414 	while (__rw_can_read(td, *vp, fp)) {
415 		if (atomic_fcmpset_acq_ptr(&rw->rw_lock, vp,
416 			*vp + RW_ONE_READER)) {
417 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
418 				CTR4(KTR_LOCK,
419 				    "%s: %p succeed %p -> %p", __func__,
420 				    rw, (void *)*vp,
421 				    (void *)(*vp + RW_ONE_READER));
422 			td->td_rw_rlocks++;
423 			return (true);
424 		}
425 	}
426 	return (false);
427 }
428 
429 static void __noinline
430 __rw_rlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v
431     LOCK_FILE_LINE_ARG_DEF)
432 {
433 	struct turnstile *ts;
434 	struct thread *owner;
435 #ifdef ADAPTIVE_RWLOCKS
436 	int spintries = 0;
437 	int i, n;
438 #endif
439 #ifdef LOCK_PROFILING
440 	uint64_t waittime = 0;
441 	int contested = 0;
442 #endif
443 #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
444 	struct lock_delay_arg lda;
445 #endif
446 #ifdef KDTRACE_HOOKS
447 	u_int sleep_cnt = 0;
448 	int64_t sleep_time = 0;
449 	int64_t all_time = 0;
450 #endif
451 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
452 	uintptr_t state = 0;
453 	int doing_lockprof = 0;
454 #endif
455 
456 #ifdef KDTRACE_HOOKS
457 	if (LOCKSTAT_PROFILE_ENABLED(rw__acquire)) {
458 		if (__rw_rlock_try(rw, td, &v, false LOCK_FILE_LINE_ARG))
459 			goto out_lockstat;
460 		doing_lockprof = 1;
461 		all_time -= lockstat_nsecs(&rw->lock_object);
462 		state = v;
463 	}
464 #endif
465 #ifdef LOCK_PROFILING
466 	doing_lockprof = 1;
467 	state = v;
468 #endif
469 
470 	if (SCHEDULER_STOPPED())
471 		return;
472 
473 #if defined(ADAPTIVE_RWLOCKS)
474 	lock_delay_arg_init(&lda, &rw_delay);
475 #elif defined(KDTRACE_HOOKS)
476 	lock_delay_arg_init_noadapt(&lda);
477 #endif
478 
479 #ifdef HWPMC_HOOKS
480 	PMC_SOFT_CALL( , , lock, failed);
481 #endif
482 	lock_profile_obtain_lock_failed(&rw->lock_object, false,
483 	    &contested, &waittime);
484 
485 	for (;;) {
486 		if (__rw_rlock_try(rw, td, &v, false LOCK_FILE_LINE_ARG))
487 			break;
488 #ifdef KDTRACE_HOOKS
489 		lda.spin_cnt++;
490 #endif
491 
492 #ifdef ADAPTIVE_RWLOCKS
493 		/*
494 		 * If the owner is running on another CPU, spin until
495 		 * the owner stops running or the state of the lock
496 		 * changes.
497 		 */
498 		if ((v & RW_LOCK_READ) == 0) {
499 			owner = (struct thread *)RW_OWNER(v);
500 			if (TD_IS_RUNNING(owner)) {
501 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
502 					CTR3(KTR_LOCK,
503 					    "%s: spinning on %p held by %p",
504 					    __func__, rw, owner);
505 				KTR_STATE1(KTR_SCHED, "thread",
506 				    sched_tdname(curthread), "spinning",
507 				    "lockname:\"%s\"", rw->lock_object.lo_name);
508 				do {
509 					lock_delay(&lda);
510 					v = RW_READ_VALUE(rw);
511 					owner = lv_rw_wowner(v);
512 				} while (owner != NULL && TD_IS_RUNNING(owner));
513 				KTR_STATE0(KTR_SCHED, "thread",
514 				    sched_tdname(curthread), "running");
515 				continue;
516 			}
517 		} else {
518 			if ((v & RW_LOCK_WRITE_SPINNER) && RW_READERS(v) == 0) {
519 				MPASS(!__rw_can_read(td, v, false));
520 				lock_delay_spin(2);
521 				v = RW_READ_VALUE(rw);
522 				continue;
523 			}
524 			if (spintries < rowner_retries) {
525 				spintries++;
526 				KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
527 				    "spinning", "lockname:\"%s\"",
528 				    rw->lock_object.lo_name);
529 				n = RW_READERS(v);
530 				for (i = 0; i < rowner_loops; i += n) {
531 					lock_delay_spin(n);
532 					v = RW_READ_VALUE(rw);
533 					if (!(v & RW_LOCK_READ))
534 						break;
535 					n = RW_READERS(v);
536 					if (n == 0)
537 						break;
538 					if (__rw_can_read(td, v, false))
539 						break;
540 				}
541 #ifdef KDTRACE_HOOKS
542 				lda.spin_cnt += rowner_loops - i;
543 #endif
544 				KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
545 				    "running");
546 				if (i < rowner_loops)
547 					continue;
548 			}
549 		}
550 #endif
551 
552 		/*
553 		 * Okay, now it's the hard case.  Some other thread already
554 		 * has a write lock or there are write waiters present,
555 		 * acquire the turnstile lock so we can begin the process
556 		 * of blocking.
557 		 */
558 		ts = turnstile_trywait(&rw->lock_object);
559 
560 		/*
561 		 * The lock might have been released while we spun, so
562 		 * recheck its state and restart the loop if needed.
563 		 */
564 		v = RW_READ_VALUE(rw);
565 retry_ts:
566 		if (((v & RW_LOCK_WRITE_SPINNER) && RW_READERS(v) == 0) ||
567 		    __rw_can_read(td, v, false)) {
568 			turnstile_cancel(ts);
569 			continue;
570 		}
571 
572 		owner = lv_rw_wowner(v);
573 
574 #ifdef ADAPTIVE_RWLOCKS
575 		/*
576 		 * The current lock owner might have started executing
577 		 * on another CPU (or the lock could have changed
578 		 * owners) while we were waiting on the turnstile
579 		 * chain lock.  If so, drop the turnstile lock and try
580 		 * again.
581 		 */
582 		if (owner != NULL) {
583 			if (TD_IS_RUNNING(owner)) {
584 				turnstile_cancel(ts);
585 				continue;
586 			}
587 		}
588 #endif
589 
590 		/*
591 		 * The lock is held in write mode or it already has waiters.
592 		 */
593 		MPASS(!__rw_can_read(td, v, false));
594 
595 		/*
596 		 * If the RW_LOCK_READ_WAITERS flag is already set, then
597 		 * we can go ahead and block.  If it is not set then try
598 		 * to set it.  If we fail to set it drop the turnstile
599 		 * lock and restart the loop.
600 		 */
601 		if (!(v & RW_LOCK_READ_WAITERS)) {
602 			if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
603 			    v | RW_LOCK_READ_WAITERS))
604 				goto retry_ts;
605 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
606 				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
607 				    __func__, rw);
608 		}
609 
610 		/*
611 		 * We were unable to acquire the lock and the read waiters
612 		 * flag is set, so we must block on the turnstile.
613 		 */
614 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
615 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
616 			    rw);
617 #ifdef KDTRACE_HOOKS
618 		sleep_time -= lockstat_nsecs(&rw->lock_object);
619 #endif
620 		MPASS(owner == rw_owner(rw));
621 		turnstile_wait(ts, owner, TS_SHARED_QUEUE);
622 #ifdef KDTRACE_HOOKS
623 		sleep_time += lockstat_nsecs(&rw->lock_object);
624 		sleep_cnt++;
625 #endif
626 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
627 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
628 			    __func__, rw);
629 		v = RW_READ_VALUE(rw);
630 	}
631 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
632 	if (__predict_true(!doing_lockprof))
633 		return;
634 #endif
635 #ifdef KDTRACE_HOOKS
636 	all_time += lockstat_nsecs(&rw->lock_object);
637 	if (sleep_time)
638 		LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
639 		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
640 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
641 
642 	/* Record only the loops spinning and not sleeping. */
643 	if (lda.spin_cnt > sleep_cnt)
644 		LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
645 		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
646 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
647 out_lockstat:
648 #endif
649 	/*
650 	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
651 	 * however.  turnstiles don't like owners changing between calls to
652 	 * turnstile_wait() currently.
653 	 */
654 	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
655 	    waittime, file, line, LOCKSTAT_READER);
656 }
657 
658 void
659 __rw_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
660 {
661 	struct thread *td;
662 	uintptr_t v;
663 
664 	td = curthread;
665 
666 	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED_TD(td) ||
667 	    !TD_IS_IDLETHREAD(td),
668 	    ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
669 	    td, rw->lock_object.lo_name, file, line));
670 	KASSERT(rw->rw_lock != RW_DESTROYED,
671 	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
672 	KASSERT(rw_wowner(rw) != td,
673 	    ("rw_rlock: wlock already held for %s @ %s:%d",
674 	    rw->lock_object.lo_name, file, line));
675 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
676 
677 	v = RW_READ_VALUE(rw);
678 	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__acquire) ||
679 	    !__rw_rlock_try(rw, td, &v, true LOCK_FILE_LINE_ARG)))
680 		__rw_rlock_hard(rw, td, v LOCK_FILE_LINE_ARG);
681 	else
682 		lock_profile_obtain_lock_success(&rw->lock_object, false, 0, 0,
683 		    file, line);
684 
685 	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
686 	WITNESS_LOCK(&rw->lock_object, 0, file, line);
687 	TD_LOCKS_INC(curthread);
688 }
689 
690 void
691 __rw_rlock(volatile uintptr_t *c, const char *file, int line)
692 {
693 	struct rwlock *rw;
694 
695 	rw = rwlock2rw(c);
696 	__rw_rlock_int(rw LOCK_FILE_LINE_ARG);
697 }
698 
699 int
700 __rw_try_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
701 {
702 	uintptr_t x;
703 
704 	if (SCHEDULER_STOPPED())
705 		return (1);
706 
707 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
708 	    ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
709 	    curthread, rw->lock_object.lo_name, file, line));
710 
711 	x = rw->rw_lock;
712 	for (;;) {
713 		KASSERT(rw->rw_lock != RW_DESTROYED,
714 		    ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
715 		if (!(x & RW_LOCK_READ))
716 			break;
717 		if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &x, x + RW_ONE_READER)) {
718 			LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
719 			    line);
720 			WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
721 			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
722 			    rw, 0, 0, file, line, LOCKSTAT_READER);
723 			TD_LOCKS_INC(curthread);
724 			curthread->td_rw_rlocks++;
725 			return (1);
726 		}
727 	}
728 
729 	LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
730 	return (0);
731 }
732 
733 int
734 __rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
735 {
736 	struct rwlock *rw;
737 
738 	rw = rwlock2rw(c);
739 	return (__rw_try_rlock_int(rw LOCK_FILE_LINE_ARG));
740 }
741 
742 static bool __always_inline
743 __rw_runlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp)
744 {
745 
746 	for (;;) {
747 		if (RW_READERS(*vp) > 1 || !(*vp & RW_LOCK_WAITERS)) {
748 			if (atomic_fcmpset_rel_ptr(&rw->rw_lock, vp,
749 			    *vp - RW_ONE_READER)) {
750 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
751 					CTR4(KTR_LOCK,
752 					    "%s: %p succeeded %p -> %p",
753 					    __func__, rw, (void *)*vp,
754 					    (void *)(*vp - RW_ONE_READER));
755 				td->td_rw_rlocks--;
756 				return (true);
757 			}
758 			continue;
759 		}
760 		break;
761 	}
762 	return (false);
763 }
764 
765 static void __noinline
766 __rw_runlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v
767     LOCK_FILE_LINE_ARG_DEF)
768 {
769 	struct turnstile *ts;
770 	uintptr_t setv, queue;
771 
772 	if (SCHEDULER_STOPPED())
773 		return;
774 
775 	if (__rw_runlock_try(rw, td, &v))
776 		goto out_lockstat;
777 
778 	/*
779 	 * Ok, we know we have waiters and we think we are the
780 	 * last reader, so grab the turnstile lock.
781 	 */
782 	turnstile_chain_lock(&rw->lock_object);
783 	v = RW_READ_VALUE(rw);
784 	for (;;) {
785 		if (__rw_runlock_try(rw, td, &v))
786 			break;
787 
788 		MPASS(v & RW_LOCK_WAITERS);
789 
790 		/*
791 		 * Try to drop our lock leaving the lock in a unlocked
792 		 * state.
793 		 *
794 		 * If you wanted to do explicit lock handoff you'd have to
795 		 * do it here.  You'd also want to use turnstile_signal()
796 		 * and you'd have to handle the race where a higher
797 		 * priority thread blocks on the write lock before the
798 		 * thread you wakeup actually runs and have the new thread
799 		 * "steal" the lock.  For now it's a lot simpler to just
800 		 * wakeup all of the waiters.
801 		 *
802 		 * As above, if we fail, then another thread might have
803 		 * acquired a read lock, so drop the turnstile lock and
804 		 * restart.
805 		 */
806 		setv = RW_UNLOCKED;
807 		queue = TS_SHARED_QUEUE;
808 		if (v & RW_LOCK_WRITE_WAITERS) {
809 			queue = TS_EXCLUSIVE_QUEUE;
810 			setv |= (v & RW_LOCK_READ_WAITERS);
811 		}
812 		setv |= (v & RW_LOCK_WRITE_SPINNER);
813 		if (!atomic_fcmpset_rel_ptr(&rw->rw_lock, &v, setv))
814 			continue;
815 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
816 			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
817 			    __func__, rw);
818 
819 		/*
820 		 * Ok.  The lock is released and all that's left is to
821 		 * wake up the waiters.  Note that the lock might not be
822 		 * free anymore, but in that case the writers will just
823 		 * block again if they run before the new lock holder(s)
824 		 * release the lock.
825 		 */
826 		ts = turnstile_lookup(&rw->lock_object);
827 		MPASS(ts != NULL);
828 		turnstile_broadcast(ts, queue);
829 		turnstile_unpend(ts);
830 		td->td_rw_rlocks--;
831 		break;
832 	}
833 	turnstile_chain_unlock(&rw->lock_object);
834 out_lockstat:
835 	LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER);
836 }
837 
838 void
839 _rw_runlock_cookie_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
840 {
841 	struct thread *td;
842 	uintptr_t v;
843 
844 	KASSERT(rw->rw_lock != RW_DESTROYED,
845 	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
846 	__rw_assert(&rw->rw_lock, RA_RLOCKED, file, line);
847 	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
848 	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
849 
850 	td = curthread;
851 	v = RW_READ_VALUE(rw);
852 
853 	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__release) ||
854 	    !__rw_runlock_try(rw, td, &v)))
855 		__rw_runlock_hard(rw, td, v LOCK_FILE_LINE_ARG);
856 	else
857 		lock_profile_release_lock(&rw->lock_object, false);
858 
859 	TD_LOCKS_DEC(curthread);
860 }
861 
862 void
863 _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
864 {
865 	struct rwlock *rw;
866 
867 	rw = rwlock2rw(c);
868 	_rw_runlock_cookie_int(rw LOCK_FILE_LINE_ARG);
869 }
870 
871 #ifdef ADAPTIVE_RWLOCKS
872 static inline void
873 rw_drop_critical(uintptr_t v, bool *in_critical, int *extra_work)
874 {
875 
876 	if (v & RW_LOCK_WRITE_SPINNER)
877 		return;
878 	if (*in_critical) {
879 		critical_exit();
880 		*in_critical = false;
881 		(*extra_work)--;
882 	}
883 }
884 #else
885 #define rw_drop_critical(v, in_critical, extra_work) do { } while (0)
886 #endif
887 
888 /*
889  * This function is called when we are unable to obtain a write lock on the
890  * first try.  This means that at least one other thread holds either a
891  * read or write lock.
892  */
893 void
894 __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
895 {
896 	uintptr_t tid;
897 	struct rwlock *rw;
898 	struct turnstile *ts;
899 	struct thread *owner;
900 #ifdef ADAPTIVE_RWLOCKS
901 	int spintries = 0;
902 	int i, n;
903 	enum { READERS, WRITER } sleep_reason = READERS;
904 	bool in_critical = false;
905 #endif
906 	uintptr_t setv;
907 #ifdef LOCK_PROFILING
908 	uint64_t waittime = 0;
909 	int contested = 0;
910 #endif
911 #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
912 	struct lock_delay_arg lda;
913 #endif
914 #ifdef KDTRACE_HOOKS
915 	u_int sleep_cnt = 0;
916 	int64_t sleep_time = 0;
917 	int64_t all_time = 0;
918 #endif
919 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
920 	uintptr_t state = 0;
921 	int doing_lockprof = 0;
922 #endif
923 	int extra_work = 0;
924 
925 	tid = (uintptr_t)curthread;
926 	rw = rwlock2rw(c);
927 
928 #ifdef KDTRACE_HOOKS
929 	if (LOCKSTAT_PROFILE_ENABLED(rw__acquire)) {
930 		while (v == RW_UNLOCKED) {
931 			if (_rw_write_lock_fetch(rw, &v, tid))
932 				goto out_lockstat;
933 		}
934 		extra_work = 1;
935 		doing_lockprof = 1;
936 		all_time -= lockstat_nsecs(&rw->lock_object);
937 		state = v;
938 	}
939 #endif
940 #ifdef LOCK_PROFILING
941 	extra_work = 1;
942 	doing_lockprof = 1;
943 	state = v;
944 #endif
945 
946 	if (SCHEDULER_STOPPED())
947 		return;
948 
949 	if (__predict_false(v == RW_UNLOCKED))
950 		v = RW_READ_VALUE(rw);
951 
952 	if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) {
953 		KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
954 		    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
955 		    __func__, rw->lock_object.lo_name, file, line));
956 		rw->rw_recurse++;
957 		atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
958 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
959 			CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
960 		return;
961 	}
962 
963 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
964 		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
965 		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
966 
967 #if defined(ADAPTIVE_RWLOCKS)
968 	lock_delay_arg_init(&lda, &rw_delay);
969 #elif defined(KDTRACE_HOOKS)
970 	lock_delay_arg_init_noadapt(&lda);
971 #endif
972 
973 #ifdef HWPMC_HOOKS
974 	PMC_SOFT_CALL( , , lock, failed);
975 #endif
976 	lock_profile_obtain_lock_failed(&rw->lock_object, false,
977 	    &contested, &waittime);
978 
979 	for (;;) {
980 		if (v == RW_UNLOCKED) {
981 			if (_rw_write_lock_fetch(rw, &v, tid))
982 				break;
983 			continue;
984 		}
985 #ifdef KDTRACE_HOOKS
986 		lda.spin_cnt++;
987 #endif
988 
989 #ifdef ADAPTIVE_RWLOCKS
990 		if (v == (RW_LOCK_READ | RW_LOCK_WRITE_SPINNER)) {
991 			if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid))
992 				break;
993 			continue;
994 		}
995 
996 		/*
997 		 * If the lock is write locked and the owner is
998 		 * running on another CPU, spin until the owner stops
999 		 * running or the state of the lock changes.
1000 		 */
1001 		if (!(v & RW_LOCK_READ)) {
1002 			rw_drop_critical(v, &in_critical, &extra_work);
1003 			sleep_reason = WRITER;
1004 			owner = lv_rw_wowner(v);
1005 			if (!TD_IS_RUNNING(owner))
1006 				goto ts;
1007 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
1008 				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
1009 				    __func__, rw, owner);
1010 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
1011 			    "spinning", "lockname:\"%s\"",
1012 			    rw->lock_object.lo_name);
1013 			do {
1014 				lock_delay(&lda);
1015 				v = RW_READ_VALUE(rw);
1016 				owner = lv_rw_wowner(v);
1017 			} while (owner != NULL && TD_IS_RUNNING(owner));
1018 			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
1019 			    "running");
1020 			continue;
1021 		} else if (RW_READERS(v) > 0) {
1022 			sleep_reason = READERS;
1023 			if (spintries == rowner_retries)
1024 				goto ts;
1025 			if (!(v & RW_LOCK_WRITE_SPINNER)) {
1026 				if (!in_critical) {
1027 					critical_enter();
1028 					in_critical = true;
1029 					extra_work++;
1030 				}
1031 				if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
1032 				    v | RW_LOCK_WRITE_SPINNER)) {
1033 					critical_exit();
1034 					in_critical = false;
1035 					extra_work--;
1036 					continue;
1037 				}
1038 			}
1039 			spintries++;
1040 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
1041 			    "spinning", "lockname:\"%s\"",
1042 			    rw->lock_object.lo_name);
1043 			n = RW_READERS(v);
1044 			for (i = 0; i < rowner_loops; i += n) {
1045 				lock_delay_spin(n);
1046 				v = RW_READ_VALUE(rw);
1047 				if (!(v & RW_LOCK_WRITE_SPINNER))
1048 					break;
1049 				if (!(v & RW_LOCK_READ))
1050 					break;
1051 				n = RW_READERS(v);
1052 				if (n == 0)
1053 					break;
1054 			}
1055 #ifdef KDTRACE_HOOKS
1056 			lda.spin_cnt += i;
1057 #endif
1058 			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
1059 			    "running");
1060 			if (i < rowner_loops)
1061 				continue;
1062 		}
1063 ts:
1064 #endif
1065 		ts = turnstile_trywait(&rw->lock_object);
1066 		v = RW_READ_VALUE(rw);
1067 retry_ts:
1068 		owner = lv_rw_wowner(v);
1069 
1070 #ifdef ADAPTIVE_RWLOCKS
1071 		/*
1072 		 * The current lock owner might have started executing
1073 		 * on another CPU (or the lock could have changed
1074 		 * owners) while we were waiting on the turnstile
1075 		 * chain lock.  If so, drop the turnstile lock and try
1076 		 * again.
1077 		 */
1078 		if (owner != NULL) {
1079 			if (TD_IS_RUNNING(owner)) {
1080 				turnstile_cancel(ts);
1081 				rw_drop_critical(v, &in_critical, &extra_work);
1082 				continue;
1083 			}
1084 		} else if (RW_READERS(v) > 0 && sleep_reason == WRITER) {
1085 			turnstile_cancel(ts);
1086 			rw_drop_critical(v, &in_critical, &extra_work);
1087 			continue;
1088 		}
1089 #endif
1090 		/*
1091 		 * Check for the waiters flags about this rwlock.
1092 		 * If the lock was released, without maintain any pending
1093 		 * waiters queue, simply try to acquire it.
1094 		 * If a pending waiters queue is present, claim the lock
1095 		 * ownership and maintain the pending queue.
1096 		 */
1097 		setv = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
1098 		if ((v & ~setv) == RW_UNLOCKED) {
1099 			setv &= ~RW_LOCK_WRITE_SPINNER;
1100 			if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid | setv)) {
1101 				if (setv)
1102 					turnstile_claim(ts);
1103 				else
1104 					turnstile_cancel(ts);
1105 				break;
1106 			}
1107 			goto retry_ts;
1108 		}
1109 
1110 #ifdef ADAPTIVE_RWLOCKS
1111 		if (in_critical) {
1112 			if ((v & RW_LOCK_WRITE_SPINNER) ||
1113 			    !((v & RW_LOCK_WRITE_WAITERS))) {
1114 				setv = v & ~RW_LOCK_WRITE_SPINNER;
1115 				setv |= RW_LOCK_WRITE_WAITERS;
1116 				if (!atomic_fcmpset_ptr(&rw->rw_lock, &v, setv))
1117 					goto retry_ts;
1118 			}
1119 			critical_exit();
1120 			in_critical = false;
1121 			extra_work--;
1122 		} else {
1123 #endif
1124 			/*
1125 			 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
1126 			 * set it.  If we fail to set it, then loop back and try
1127 			 * again.
1128 			 */
1129 			if (!(v & RW_LOCK_WRITE_WAITERS)) {
1130 				if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
1131 				    v | RW_LOCK_WRITE_WAITERS))
1132 					goto retry_ts;
1133 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
1134 					CTR2(KTR_LOCK, "%s: %p set write waiters flag",
1135 					    __func__, rw);
1136 			}
1137 #ifdef ADAPTIVE_RWLOCKS
1138 		}
1139 #endif
1140 		/*
1141 		 * We were unable to acquire the lock and the write waiters
1142 		 * flag is set, so we must block on the turnstile.
1143 		 */
1144 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
1145 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
1146 			    rw);
1147 #ifdef KDTRACE_HOOKS
1148 		sleep_time -= lockstat_nsecs(&rw->lock_object);
1149 #endif
1150 		MPASS(owner == rw_owner(rw));
1151 		turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE);
1152 #ifdef KDTRACE_HOOKS
1153 		sleep_time += lockstat_nsecs(&rw->lock_object);
1154 		sleep_cnt++;
1155 #endif
1156 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
1157 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
1158 			    __func__, rw);
1159 #ifdef ADAPTIVE_RWLOCKS
1160 		spintries = 0;
1161 #endif
1162 		v = RW_READ_VALUE(rw);
1163 	}
1164 	if (__predict_true(!extra_work))
1165 		return;
1166 #ifdef ADAPTIVE_RWLOCKS
1167 	if (in_critical)
1168 		critical_exit();
1169 #endif
1170 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
1171 	if (__predict_true(!doing_lockprof))
1172 		return;
1173 #endif
1174 #ifdef KDTRACE_HOOKS
1175 	all_time += lockstat_nsecs(&rw->lock_object);
1176 	if (sleep_time)
1177 		LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
1178 		    LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
1179 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
1180 
1181 	/* Record only the loops spinning and not sleeping. */
1182 	if (lda.spin_cnt > sleep_cnt)
1183 		LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
1184 		    LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
1185 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
1186 out_lockstat:
1187 #endif
1188 	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
1189 	    waittime, file, line, LOCKSTAT_WRITER);
1190 }
1191 
1192 /*
1193  * This function is called if lockstat is active or the first try at releasing
1194  * a write lock failed.  The latter means that the lock is recursed or one of
1195  * the 2 waiter bits must be set indicating that at least one thread is waiting
1196  * on this lock.
1197  */
1198 void
1199 __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
1200 {
1201 	struct rwlock *rw;
1202 	struct turnstile *ts;
1203 	uintptr_t tid, setv;
1204 	int queue;
1205 
1206 	tid = (uintptr_t)curthread;
1207 	if (SCHEDULER_STOPPED())
1208 		return;
1209 
1210 	rw = rwlock2rw(c);
1211 	if (__predict_false(v == tid))
1212 		v = RW_READ_VALUE(rw);
1213 
1214 	if (v & RW_LOCK_WRITER_RECURSED) {
1215 		if (--(rw->rw_recurse) == 0)
1216 			atomic_clear_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
1217 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
1218 			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
1219 		return;
1220 	}
1221 
1222 	LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_WRITER);
1223 	if (v == tid && _rw_write_unlock(rw, tid))
1224 		return;
1225 
1226 	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
1227 	    ("%s: neither of the waiter flags are set", __func__));
1228 
1229 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
1230 		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
1231 
1232 	turnstile_chain_lock(&rw->lock_object);
1233 
1234 	/*
1235 	 * Use the same algo as sx locks for now.  Prefer waking up shared
1236 	 * waiters if we have any over writers.  This is probably not ideal.
1237 	 *
1238 	 * 'v' is the value we are going to write back to rw_lock.  If we
1239 	 * have waiters on both queues, we need to preserve the state of
1240 	 * the waiter flag for the queue we don't wake up.  For now this is
1241 	 * hardcoded for the algorithm mentioned above.
1242 	 *
1243 	 * In the case of both readers and writers waiting we wakeup the
1244 	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
1245 	 * new writer comes in before a reader it will claim the lock up
1246 	 * above.  There is probably a potential priority inversion in
1247 	 * there that could be worked around either by waking both queues
1248 	 * of waiters or doing some complicated lock handoff gymnastics.
1249 	 */
1250 	setv = RW_UNLOCKED;
1251 	v = RW_READ_VALUE(rw);
1252 	queue = TS_SHARED_QUEUE;
1253 	if (v & RW_LOCK_WRITE_WAITERS) {
1254 		queue = TS_EXCLUSIVE_QUEUE;
1255 		setv |= (v & RW_LOCK_READ_WAITERS);
1256 	}
1257 	atomic_store_rel_ptr(&rw->rw_lock, setv);
1258 
1259 	/* Wake up all waiters for the specific queue. */
1260 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
1261 		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
1262 		    queue == TS_SHARED_QUEUE ? "read" : "write");
1263 
1264 	ts = turnstile_lookup(&rw->lock_object);
1265 	MPASS(ts != NULL);
1266 	turnstile_broadcast(ts, queue);
1267 	turnstile_unpend(ts);
1268 	turnstile_chain_unlock(&rw->lock_object);
1269 }
1270 
1271 /*
1272  * Attempt to do a non-blocking upgrade from a read lock to a write
1273  * lock.  This will only succeed if this thread holds a single read
1274  * lock.  Returns true if the upgrade succeeded and false otherwise.
1275  */
1276 int
1277 __rw_try_upgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
1278 {
1279 	uintptr_t v, setv, tid;
1280 	struct turnstile *ts;
1281 	int success;
1282 
1283 	if (SCHEDULER_STOPPED())
1284 		return (1);
1285 
1286 	KASSERT(rw->rw_lock != RW_DESTROYED,
1287 	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
1288 	__rw_assert(&rw->rw_lock, RA_RLOCKED, file, line);
1289 
1290 	/*
1291 	 * Attempt to switch from one reader to a writer.  If there
1292 	 * are any write waiters, then we will have to lock the
1293 	 * turnstile first to prevent races with another writer
1294 	 * calling turnstile_wait() before we have claimed this
1295 	 * turnstile.  So, do the simple case of no waiters first.
1296 	 */
1297 	tid = (uintptr_t)curthread;
1298 	success = 0;
1299 	v = RW_READ_VALUE(rw);
1300 	for (;;) {
1301 		if (RW_READERS(v) > 1)
1302 			break;
1303 		if (!(v & RW_LOCK_WAITERS)) {
1304 			success = atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid);
1305 			if (!success)
1306 				continue;
1307 			break;
1308 		}
1309 
1310 		/*
1311 		 * Ok, we think we have waiters, so lock the turnstile.
1312 		 */
1313 		ts = turnstile_trywait(&rw->lock_object);
1314 		v = RW_READ_VALUE(rw);
1315 retry_ts:
1316 		if (RW_READERS(v) > 1) {
1317 			turnstile_cancel(ts);
1318 			break;
1319 		}
1320 		/*
1321 		 * Try to switch from one reader to a writer again.  This time
1322 		 * we honor the current state of the waiters flags.
1323 		 * If we obtain the lock with the flags set, then claim
1324 		 * ownership of the turnstile.
1325 		 */
1326 		setv = tid | (v & RW_LOCK_WAITERS);
1327 		success = atomic_fcmpset_ptr(&rw->rw_lock, &v, setv);
1328 		if (success) {
1329 			if (v & RW_LOCK_WAITERS)
1330 				turnstile_claim(ts);
1331 			else
1332 				turnstile_cancel(ts);
1333 			break;
1334 		}
1335 		goto retry_ts;
1336 	}
1337 	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
1338 	if (success) {
1339 		curthread->td_rw_rlocks--;
1340 		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
1341 		    file, line);
1342 		LOCKSTAT_RECORD0(rw__upgrade, rw);
1343 	}
1344 	return (success);
1345 }
1346 
1347 int
1348 __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
1349 {
1350 	struct rwlock *rw;
1351 
1352 	rw = rwlock2rw(c);
1353 	return (__rw_try_upgrade_int(rw LOCK_FILE_LINE_ARG));
1354 }
1355 
1356 /*
1357  * Downgrade a write lock into a single read lock.
1358  */
1359 void
1360 __rw_downgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
1361 {
1362 	struct turnstile *ts;
1363 	uintptr_t tid, v;
1364 	int rwait, wwait;
1365 
1366 	if (SCHEDULER_STOPPED())
1367 		return;
1368 
1369 	KASSERT(rw->rw_lock != RW_DESTROYED,
1370 	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
1371 	__rw_assert(&rw->rw_lock, RA_WLOCKED | RA_NOTRECURSED, file, line);
1372 #ifndef INVARIANTS
1373 	if (rw_recursed(rw))
1374 		panic("downgrade of a recursed lock");
1375 #endif
1376 
1377 	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
1378 
1379 	/*
1380 	 * Convert from a writer to a single reader.  First we handle
1381 	 * the easy case with no waiters.  If there are any waiters, we
1382 	 * lock the turnstile and "disown" the lock.
1383 	 */
1384 	tid = (uintptr_t)curthread;
1385 	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
1386 		goto out;
1387 
1388 	/*
1389 	 * Ok, we think we have waiters, so lock the turnstile so we can
1390 	 * read the waiter flags without any races.
1391 	 */
1392 	turnstile_chain_lock(&rw->lock_object);
1393 	v = rw->rw_lock & RW_LOCK_WAITERS;
1394 	rwait = v & RW_LOCK_READ_WAITERS;
1395 	wwait = v & RW_LOCK_WRITE_WAITERS;
1396 	MPASS(rwait | wwait);
1397 
1398 	/*
1399 	 * Downgrade from a write lock while preserving waiters flag
1400 	 * and give up ownership of the turnstile.
1401 	 */
1402 	ts = turnstile_lookup(&rw->lock_object);
1403 	MPASS(ts != NULL);
1404 	if (!wwait)
1405 		v &= ~RW_LOCK_READ_WAITERS;
1406 	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
1407 	/*
1408 	 * Wake other readers if there are no writers pending.  Otherwise they
1409 	 * won't be able to acquire the lock anyway.
1410 	 */
1411 	if (rwait && !wwait) {
1412 		turnstile_broadcast(ts, TS_SHARED_QUEUE);
1413 		turnstile_unpend(ts);
1414 	} else
1415 		turnstile_disown(ts);
1416 	turnstile_chain_unlock(&rw->lock_object);
1417 out:
1418 	curthread->td_rw_rlocks++;
1419 	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
1420 	LOCKSTAT_RECORD0(rw__downgrade, rw);
1421 }
1422 
1423 void
1424 __rw_downgrade(volatile uintptr_t *c, const char *file, int line)
1425 {
1426 	struct rwlock *rw;
1427 
1428 	rw = rwlock2rw(c);
1429 	__rw_downgrade_int(rw LOCK_FILE_LINE_ARG);
1430 }
1431 
1432 #ifdef INVARIANT_SUPPORT
1433 #ifndef INVARIANTS
1434 #undef __rw_assert
1435 #endif
1436 
1437 /*
1438  * In the non-WITNESS case, rw_assert() can only detect that at least
1439  * *some* thread owns an rlock, but it cannot guarantee that *this*
1440  * thread owns an rlock.
1441  */
1442 void
1443 __rw_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1444 {
1445 	const struct rwlock *rw;
1446 
1447 	if (SCHEDULER_STOPPED())
1448 		return;
1449 
1450 	rw = rwlock2rw(c);
1451 
1452 	switch (what) {
1453 	case RA_LOCKED:
1454 	case RA_LOCKED | RA_RECURSED:
1455 	case RA_LOCKED | RA_NOTRECURSED:
1456 	case RA_RLOCKED:
1457 	case RA_RLOCKED | RA_RECURSED:
1458 	case RA_RLOCKED | RA_NOTRECURSED:
1459 #ifdef WITNESS
1460 		witness_assert(&rw->lock_object, what, file, line);
1461 #else
1462 		/*
1463 		 * If some other thread has a write lock or we have one
1464 		 * and are asserting a read lock, fail.  Also, if no one
1465 		 * has a lock at all, fail.
1466 		 */
1467 		if (rw->rw_lock == RW_UNLOCKED ||
1468 		    (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED ||
1469 		    rw_wowner(rw) != curthread)))
1470 			panic("Lock %s not %slocked @ %s:%d\n",
1471 			    rw->lock_object.lo_name, (what & RA_RLOCKED) ?
1472 			    "read " : "", file, line);
1473 
1474 		if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) {
1475 			if (rw_recursed(rw)) {
1476 				if (what & RA_NOTRECURSED)
1477 					panic("Lock %s recursed @ %s:%d\n",
1478 					    rw->lock_object.lo_name, file,
1479 					    line);
1480 			} else if (what & RA_RECURSED)
1481 				panic("Lock %s not recursed @ %s:%d\n",
1482 				    rw->lock_object.lo_name, file, line);
1483 		}
1484 #endif
1485 		break;
1486 	case RA_WLOCKED:
1487 	case RA_WLOCKED | RA_RECURSED:
1488 	case RA_WLOCKED | RA_NOTRECURSED:
1489 		if (rw_wowner(rw) != curthread)
1490 			panic("Lock %s not exclusively locked @ %s:%d\n",
1491 			    rw->lock_object.lo_name, file, line);
1492 		if (rw_recursed(rw)) {
1493 			if (what & RA_NOTRECURSED)
1494 				panic("Lock %s recursed @ %s:%d\n",
1495 				    rw->lock_object.lo_name, file, line);
1496 		} else if (what & RA_RECURSED)
1497 			panic("Lock %s not recursed @ %s:%d\n",
1498 			    rw->lock_object.lo_name, file, line);
1499 		break;
1500 	case RA_UNLOCKED:
1501 #ifdef WITNESS
1502 		witness_assert(&rw->lock_object, what, file, line);
1503 #else
1504 		/*
1505 		 * If we hold a write lock fail.  We can't reliably check
1506 		 * to see if we hold a read lock or not.
1507 		 */
1508 		if (rw_wowner(rw) == curthread)
1509 			panic("Lock %s exclusively locked @ %s:%d\n",
1510 			    rw->lock_object.lo_name, file, line);
1511 #endif
1512 		break;
1513 	default:
1514 		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
1515 		    line);
1516 	}
1517 }
1518 #endif /* INVARIANT_SUPPORT */
1519 
1520 #ifdef DDB
1521 void
1522 db_show_rwlock(const struct lock_object *lock)
1523 {
1524 	const struct rwlock *rw;
1525 	struct thread *td;
1526 
1527 	rw = (const struct rwlock *)lock;
1528 
1529 	db_printf(" state: ");
1530 	if (rw->rw_lock == RW_UNLOCKED)
1531 		db_printf("UNLOCKED\n");
1532 	else if (rw->rw_lock == RW_DESTROYED) {
1533 		db_printf("DESTROYED\n");
1534 		return;
1535 	} else if (rw->rw_lock & RW_LOCK_READ)
1536 		db_printf("RLOCK: %ju locks\n",
1537 		    (uintmax_t)(RW_READERS(rw->rw_lock)));
1538 	else {
1539 		td = rw_wowner(rw);
1540 		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1541 		    td->td_tid, td->td_proc->p_pid, td->td_name);
1542 		if (rw_recursed(rw))
1543 			db_printf(" recursed: %u\n", rw->rw_recurse);
1544 	}
1545 	db_printf(" waiters: ");
1546 	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
1547 	case RW_LOCK_READ_WAITERS:
1548 		db_printf("readers\n");
1549 		break;
1550 	case RW_LOCK_WRITE_WAITERS:
1551 		db_printf("writers\n");
1552 		break;
1553 	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
1554 		db_printf("readers and writers\n");
1555 		break;
1556 	default:
1557 		db_printf("none\n");
1558 		break;
1559 	}
1560 }
1561 
1562 #endif
1563