xref: /freebsd/sys/kern/kern_rwlock.c (revision 8b25e8410533a6e69cceff910546b2dc485a5059)
1 /*-
2  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * Machine independent bits of reader/writer lock implementation.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_ddb.h"
35 #include "opt_hwpmc_hooks.h"
36 #include "opt_no_adaptive_rwlocks.h"
37 
38 #include <sys/param.h>
39 #include <sys/kdb.h>
40 #include <sys/ktr.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/rwlock.h>
46 #include <sys/sched.h>
47 #include <sys/smp.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 #include <sys/turnstile.h>
51 
52 #include <machine/cpu.h>
53 
54 #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
55 #define	ADAPTIVE_RWLOCKS
56 #endif
57 
58 #ifdef HWPMC_HOOKS
59 #include <sys/pmckern.h>
60 PMC_SOFT_DECLARE( , , lock, failed);
61 #endif
62 
63 /*
64  * Return the rwlock address when the lock cookie address is provided.
65  * This functionality assumes that struct rwlock* have a member named rw_lock.
66  */
67 #define	rwlock2rw(c)	(__containerof(c, struct rwlock, rw_lock))
68 
69 #ifdef DDB
70 #include <ddb/ddb.h>
71 
72 static void	db_show_rwlock(const struct lock_object *lock);
73 #endif
74 static void	assert_rw(const struct lock_object *lock, int what);
75 static void	lock_rw(struct lock_object *lock, uintptr_t how);
76 #ifdef KDTRACE_HOOKS
77 static int	owner_rw(const struct lock_object *lock, struct thread **owner);
78 #endif
79 static uintptr_t unlock_rw(struct lock_object *lock);
80 
81 struct lock_class lock_class_rw = {
82 	.lc_name = "rw",
83 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
84 	.lc_assert = assert_rw,
85 #ifdef DDB
86 	.lc_ddb_show = db_show_rwlock,
87 #endif
88 	.lc_lock = lock_rw,
89 	.lc_unlock = unlock_rw,
90 #ifdef KDTRACE_HOOKS
91 	.lc_owner = owner_rw,
92 #endif
93 };
94 
95 #ifdef ADAPTIVE_RWLOCKS
96 static int rowner_retries = 10;
97 static int rowner_loops = 10000;
98 static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL,
99     "rwlock debugging");
100 SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
101 SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
102 
103 static struct lock_delay_config rw_delay = {
104 	.initial	= 1000,
105 	.step		= 500,
106 	.min		= 100,
107 	.max		= 5000,
108 };
109 
110 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_initial, CTLFLAG_RW, &rw_delay.initial,
111     0, "");
112 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_step, CTLFLAG_RW, &rw_delay.step,
113     0, "");
114 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_min, CTLFLAG_RW, &rw_delay.min,
115     0, "");
116 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_max, CTLFLAG_RW, &rw_delay.max,
117     0, "");
118 
119 static void
120 rw_delay_sysinit(void *dummy)
121 {
122 
123 	rw_delay.initial = mp_ncpus * 25;
124 	rw_delay.step = (mp_ncpus * 25) / 2;
125 	rw_delay.min = mp_ncpus * 5;
126 	rw_delay.max = mp_ncpus * 25 * 10;
127 }
128 LOCK_DELAY_SYSINIT(rw_delay_sysinit);
129 #endif
130 
131 /*
132  * Return a pointer to the owning thread if the lock is write-locked or
133  * NULL if the lock is unlocked or read-locked.
134  */
135 
136 #define	lv_rw_wowner(v)							\
137 	((v) & RW_LOCK_READ ? NULL :					\
138 	 (struct thread *)RW_OWNER((v)))
139 
140 #define	rw_wowner(rw)	lv_rw_wowner(RW_READ_VALUE(rw))
141 
142 /*
143  * Returns if a write owner is recursed.  Write ownership is not assured
144  * here and should be previously checked.
145  */
146 #define	rw_recursed(rw)		((rw)->rw_recurse != 0)
147 
148 /*
149  * Return true if curthread helds the lock.
150  */
151 #define	rw_wlocked(rw)		(rw_wowner((rw)) == curthread)
152 
153 /*
154  * Return a pointer to the owning thread for this lock who should receive
155  * any priority lent by threads that block on this lock.  Currently this
156  * is identical to rw_wowner().
157  */
158 #define	rw_owner(rw)		rw_wowner(rw)
159 
160 #ifndef INVARIANTS
161 #define	__rw_assert(c, what, file, line)
162 #endif
163 
164 void
165 assert_rw(const struct lock_object *lock, int what)
166 {
167 
168 	rw_assert((const struct rwlock *)lock, what);
169 }
170 
171 void
172 lock_rw(struct lock_object *lock, uintptr_t how)
173 {
174 	struct rwlock *rw;
175 
176 	rw = (struct rwlock *)lock;
177 	if (how)
178 		rw_rlock(rw);
179 	else
180 		rw_wlock(rw);
181 }
182 
183 uintptr_t
184 unlock_rw(struct lock_object *lock)
185 {
186 	struct rwlock *rw;
187 
188 	rw = (struct rwlock *)lock;
189 	rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
190 	if (rw->rw_lock & RW_LOCK_READ) {
191 		rw_runlock(rw);
192 		return (1);
193 	} else {
194 		rw_wunlock(rw);
195 		return (0);
196 	}
197 }
198 
199 #ifdef KDTRACE_HOOKS
200 int
201 owner_rw(const struct lock_object *lock, struct thread **owner)
202 {
203 	const struct rwlock *rw = (const struct rwlock *)lock;
204 	uintptr_t x = rw->rw_lock;
205 
206 	*owner = rw_wowner(rw);
207 	return ((x & RW_LOCK_READ) != 0 ?  (RW_READERS(x) != 0) :
208 	    (*owner != NULL));
209 }
210 #endif
211 
212 void
213 _rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
214 {
215 	struct rwlock *rw;
216 	int flags;
217 
218 	rw = rwlock2rw(c);
219 
220 	MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
221 	    RW_RECURSE | RW_NEW)) == 0);
222 	ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
223 	    ("%s: rw_lock not aligned for %s: %p", __func__, name,
224 	    &rw->rw_lock));
225 
226 	flags = LO_UPGRADABLE;
227 	if (opts & RW_DUPOK)
228 		flags |= LO_DUPOK;
229 	if (opts & RW_NOPROFILE)
230 		flags |= LO_NOPROFILE;
231 	if (!(opts & RW_NOWITNESS))
232 		flags |= LO_WITNESS;
233 	if (opts & RW_RECURSE)
234 		flags |= LO_RECURSABLE;
235 	if (opts & RW_QUIET)
236 		flags |= LO_QUIET;
237 	if (opts & RW_NEW)
238 		flags |= LO_NEW;
239 
240 	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
241 	rw->rw_lock = RW_UNLOCKED;
242 	rw->rw_recurse = 0;
243 }
244 
245 void
246 _rw_destroy(volatile uintptr_t *c)
247 {
248 	struct rwlock *rw;
249 
250 	rw = rwlock2rw(c);
251 
252 	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
253 	KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw));
254 	rw->rw_lock = RW_DESTROYED;
255 	lock_destroy(&rw->lock_object);
256 }
257 
258 void
259 rw_sysinit(void *arg)
260 {
261 	struct rw_args *args = arg;
262 
263 	rw_init((struct rwlock *)args->ra_rw, args->ra_desc);
264 }
265 
266 void
267 rw_sysinit_flags(void *arg)
268 {
269 	struct rw_args_flags *args = arg;
270 
271 	rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc,
272 	    args->ra_flags);
273 }
274 
275 int
276 _rw_wowned(const volatile uintptr_t *c)
277 {
278 
279 	return (rw_wowner(rwlock2rw(c)) == curthread);
280 }
281 
282 void
283 _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line)
284 {
285 	struct rwlock *rw;
286 
287 	if (SCHEDULER_STOPPED())
288 		return;
289 
290 	rw = rwlock2rw(c);
291 
292 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
293 	    ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
294 	    curthread, rw->lock_object.lo_name, file, line));
295 	KASSERT(rw->rw_lock != RW_DESTROYED,
296 	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
297 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
298 	    line, NULL);
299 	__rw_wlock(rw, curthread, file, line);
300 	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
301 	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
302 	TD_LOCKS_INC(curthread);
303 }
304 
305 int
306 __rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
307 {
308 	struct rwlock *rw;
309 	int rval;
310 
311 	if (SCHEDULER_STOPPED())
312 		return (1);
313 
314 	rw = rwlock2rw(c);
315 
316 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
317 	    ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
318 	    curthread, rw->lock_object.lo_name, file, line));
319 	KASSERT(rw->rw_lock != RW_DESTROYED,
320 	    ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
321 
322 	if (rw_wlocked(rw) &&
323 	    (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
324 		rw->rw_recurse++;
325 		rval = 1;
326 	} else
327 		rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
328 		    (uintptr_t)curthread);
329 
330 	LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
331 	if (rval) {
332 		WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
333 		    file, line);
334 		if (!rw_recursed(rw))
335 			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
336 			    rw, 0, 0, file, line, LOCKSTAT_WRITER);
337 		TD_LOCKS_INC(curthread);
338 	}
339 	return (rval);
340 }
341 
342 void
343 _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
344 {
345 	struct rwlock *rw;
346 
347 	if (SCHEDULER_STOPPED())
348 		return;
349 
350 	rw = rwlock2rw(c);
351 
352 	KASSERT(rw->rw_lock != RW_DESTROYED,
353 	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
354 	__rw_assert(c, RA_WLOCKED, file, line);
355 	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
356 	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
357 	    line);
358 	__rw_wunlock(rw, curthread, file, line);
359 	TD_LOCKS_DEC(curthread);
360 }
361 
362 /*
363  * Determines whether a new reader can acquire a lock.  Succeeds if the
364  * reader already owns a read lock and the lock is locked for read to
365  * prevent deadlock from reader recursion.  Also succeeds if the lock
366  * is unlocked and has no writer waiters or spinners.  Failing otherwise
367  * prioritizes writers before readers.
368  */
369 #define	RW_CAN_READ(_rw)						\
370     ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) &	\
371     (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) ==	\
372     RW_LOCK_READ)
373 
374 void
375 __rw_rlock(volatile uintptr_t *c, const char *file, int line)
376 {
377 	struct rwlock *rw;
378 	struct turnstile *ts;
379 #ifdef ADAPTIVE_RWLOCKS
380 	volatile struct thread *owner;
381 	int spintries = 0;
382 	int i;
383 #endif
384 #ifdef LOCK_PROFILING
385 	uint64_t waittime = 0;
386 	int contested = 0;
387 #endif
388 	uintptr_t v;
389 #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
390 	struct lock_delay_arg lda;
391 #endif
392 #ifdef KDTRACE_HOOKS
393 	uintptr_t state;
394 	u_int sleep_cnt = 0;
395 	int64_t sleep_time = 0;
396 	int64_t all_time = 0;
397 #endif
398 
399 	if (SCHEDULER_STOPPED())
400 		return;
401 
402 #if defined(ADAPTIVE_RWLOCKS)
403 	lock_delay_arg_init(&lda, &rw_delay);
404 #elif defined(KDTRACE_HOOKS)
405 	lock_delay_arg_init(&lda, NULL);
406 #endif
407 	rw = rwlock2rw(c);
408 
409 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
410 	    ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
411 	    curthread, rw->lock_object.lo_name, file, line));
412 	KASSERT(rw->rw_lock != RW_DESTROYED,
413 	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
414 	KASSERT(rw_wowner(rw) != curthread,
415 	    ("rw_rlock: wlock already held for %s @ %s:%d",
416 	    rw->lock_object.lo_name, file, line));
417 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
418 
419 #ifdef KDTRACE_HOOKS
420 	all_time -= lockstat_nsecs(&rw->lock_object);
421 #endif
422 	v = RW_READ_VALUE(rw);
423 #ifdef KDTRACE_HOOKS
424 	state = v;
425 #endif
426 	for (;;) {
427 		/*
428 		 * Handle the easy case.  If no other thread has a write
429 		 * lock, then try to bump up the count of read locks.  Note
430 		 * that we have to preserve the current state of the
431 		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
432 		 * read lock, then rw_lock must have changed, so restart
433 		 * the loop.  Note that this handles the case of a
434 		 * completely unlocked rwlock since such a lock is encoded
435 		 * as a read lock with no waiters.
436 		 */
437 		if (RW_CAN_READ(v)) {
438 			/*
439 			 * The RW_LOCK_READ_WAITERS flag should only be set
440 			 * if the lock has been unlocked and write waiters
441 			 * were present.
442 			 */
443 			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v,
444 			    v + RW_ONE_READER)) {
445 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
446 					CTR4(KTR_LOCK,
447 					    "%s: %p succeed %p -> %p", __func__,
448 					    rw, (void *)v,
449 					    (void *)(v + RW_ONE_READER));
450 				break;
451 			}
452 			v = RW_READ_VALUE(rw);
453 			continue;
454 		}
455 #ifdef KDTRACE_HOOKS
456 		lda.spin_cnt++;
457 #endif
458 #ifdef HWPMC_HOOKS
459 		PMC_SOFT_CALL( , , lock, failed);
460 #endif
461 		lock_profile_obtain_lock_failed(&rw->lock_object,
462 		    &contested, &waittime);
463 
464 #ifdef ADAPTIVE_RWLOCKS
465 		/*
466 		 * If the owner is running on another CPU, spin until
467 		 * the owner stops running or the state of the lock
468 		 * changes.
469 		 */
470 		if ((v & RW_LOCK_READ) == 0) {
471 			owner = (struct thread *)RW_OWNER(v);
472 			if (TD_IS_RUNNING(owner)) {
473 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
474 					CTR3(KTR_LOCK,
475 					    "%s: spinning on %p held by %p",
476 					    __func__, rw, owner);
477 				KTR_STATE1(KTR_SCHED, "thread",
478 				    sched_tdname(curthread), "spinning",
479 				    "lockname:\"%s\"", rw->lock_object.lo_name);
480 				do {
481 					lock_delay(&lda);
482 					v = RW_READ_VALUE(rw);
483 					owner = lv_rw_wowner(v);
484 				} while (owner != NULL && TD_IS_RUNNING(owner));
485 				KTR_STATE0(KTR_SCHED, "thread",
486 				    sched_tdname(curthread), "running");
487 				continue;
488 			}
489 		} else if (spintries < rowner_retries) {
490 			spintries++;
491 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
492 			    "spinning", "lockname:\"%s\"",
493 			    rw->lock_object.lo_name);
494 			for (i = 0; i < rowner_loops; i++) {
495 				v = RW_READ_VALUE(rw);
496 				if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
497 					break;
498 				cpu_spinwait();
499 			}
500 			v = RW_READ_VALUE(rw);
501 #ifdef KDTRACE_HOOKS
502 			lda.spin_cnt += rowner_loops - i;
503 #endif
504 			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
505 			    "running");
506 			if (i != rowner_loops)
507 				continue;
508 		}
509 #endif
510 
511 		/*
512 		 * Okay, now it's the hard case.  Some other thread already
513 		 * has a write lock or there are write waiters present,
514 		 * acquire the turnstile lock so we can begin the process
515 		 * of blocking.
516 		 */
517 		ts = turnstile_trywait(&rw->lock_object);
518 
519 		/*
520 		 * The lock might have been released while we spun, so
521 		 * recheck its state and restart the loop if needed.
522 		 */
523 		v = RW_READ_VALUE(rw);
524 		if (RW_CAN_READ(v)) {
525 			turnstile_cancel(ts);
526 			continue;
527 		}
528 
529 #ifdef ADAPTIVE_RWLOCKS
530 		/*
531 		 * The current lock owner might have started executing
532 		 * on another CPU (or the lock could have changed
533 		 * owners) while we were waiting on the turnstile
534 		 * chain lock.  If so, drop the turnstile lock and try
535 		 * again.
536 		 */
537 		if ((v & RW_LOCK_READ) == 0) {
538 			owner = (struct thread *)RW_OWNER(v);
539 			if (TD_IS_RUNNING(owner)) {
540 				turnstile_cancel(ts);
541 				continue;
542 			}
543 		}
544 #endif
545 
546 		/*
547 		 * The lock is held in write mode or it already has waiters.
548 		 */
549 		MPASS(!RW_CAN_READ(v));
550 
551 		/*
552 		 * If the RW_LOCK_READ_WAITERS flag is already set, then
553 		 * we can go ahead and block.  If it is not set then try
554 		 * to set it.  If we fail to set it drop the turnstile
555 		 * lock and restart the loop.
556 		 */
557 		if (!(v & RW_LOCK_READ_WAITERS)) {
558 			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
559 			    v | RW_LOCK_READ_WAITERS)) {
560 				turnstile_cancel(ts);
561 				v = RW_READ_VALUE(rw);
562 				continue;
563 			}
564 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
565 				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
566 				    __func__, rw);
567 		}
568 
569 		/*
570 		 * We were unable to acquire the lock and the read waiters
571 		 * flag is set, so we must block on the turnstile.
572 		 */
573 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
574 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
575 			    rw);
576 #ifdef KDTRACE_HOOKS
577 		sleep_time -= lockstat_nsecs(&rw->lock_object);
578 #endif
579 		turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
580 #ifdef KDTRACE_HOOKS
581 		sleep_time += lockstat_nsecs(&rw->lock_object);
582 		sleep_cnt++;
583 #endif
584 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
585 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
586 			    __func__, rw);
587 		v = RW_READ_VALUE(rw);
588 	}
589 #ifdef KDTRACE_HOOKS
590 	all_time += lockstat_nsecs(&rw->lock_object);
591 	if (sleep_time)
592 		LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
593 		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
594 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
595 
596 	/* Record only the loops spinning and not sleeping. */
597 	if (lda.spin_cnt > sleep_cnt)
598 		LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
599 		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
600 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
601 #endif
602 	/*
603 	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
604 	 * however.  turnstiles don't like owners changing between calls to
605 	 * turnstile_wait() currently.
606 	 */
607 	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
608 	    waittime, file, line, LOCKSTAT_READER);
609 	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
610 	WITNESS_LOCK(&rw->lock_object, 0, file, line);
611 	TD_LOCKS_INC(curthread);
612 	curthread->td_rw_rlocks++;
613 }
614 
615 int
616 __rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
617 {
618 	struct rwlock *rw;
619 	uintptr_t x;
620 
621 	if (SCHEDULER_STOPPED())
622 		return (1);
623 
624 	rw = rwlock2rw(c);
625 
626 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
627 	    ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
628 	    curthread, rw->lock_object.lo_name, file, line));
629 
630 	for (;;) {
631 		x = rw->rw_lock;
632 		KASSERT(rw->rw_lock != RW_DESTROYED,
633 		    ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
634 		if (!(x & RW_LOCK_READ))
635 			break;
636 		if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
637 			LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
638 			    line);
639 			WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
640 			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
641 			    rw, 0, 0, file, line, LOCKSTAT_READER);
642 			TD_LOCKS_INC(curthread);
643 			curthread->td_rw_rlocks++;
644 			return (1);
645 		}
646 	}
647 
648 	LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
649 	return (0);
650 }
651 
652 void
653 _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
654 {
655 	struct rwlock *rw;
656 	struct turnstile *ts;
657 	uintptr_t x, v, queue;
658 
659 	if (SCHEDULER_STOPPED())
660 		return;
661 
662 	rw = rwlock2rw(c);
663 
664 	KASSERT(rw->rw_lock != RW_DESTROYED,
665 	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
666 	__rw_assert(c, RA_RLOCKED, file, line);
667 	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
668 	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
669 
670 	/* TODO: drop "owner of record" here. */
671 	x = RW_READ_VALUE(rw);
672 	for (;;) {
673 		/*
674 		 * See if there is more than one read lock held.  If so,
675 		 * just drop one and return.
676 		 */
677 		if (RW_READERS(x) > 1) {
678 			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
679 			    x - RW_ONE_READER)) {
680 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
681 					CTR4(KTR_LOCK,
682 					    "%s: %p succeeded %p -> %p",
683 					    __func__, rw, (void *)x,
684 					    (void *)(x - RW_ONE_READER));
685 				break;
686 			}
687 			x = RW_READ_VALUE(rw);
688 			continue;
689 		}
690 		/*
691 		 * If there aren't any waiters for a write lock, then try
692 		 * to drop it quickly.
693 		 */
694 		if (!(x & RW_LOCK_WAITERS)) {
695 			MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
696 			    RW_READERS_LOCK(1));
697 			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
698 			    RW_UNLOCKED)) {
699 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
700 					CTR2(KTR_LOCK, "%s: %p last succeeded",
701 					    __func__, rw);
702 				break;
703 			}
704 			x = RW_READ_VALUE(rw);
705 			continue;
706 		}
707 		/*
708 		 * Ok, we know we have waiters and we think we are the
709 		 * last reader, so grab the turnstile lock.
710 		 */
711 		turnstile_chain_lock(&rw->lock_object);
712 		v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
713 		MPASS(v & RW_LOCK_WAITERS);
714 
715 		/*
716 		 * Try to drop our lock leaving the lock in a unlocked
717 		 * state.
718 		 *
719 		 * If you wanted to do explicit lock handoff you'd have to
720 		 * do it here.  You'd also want to use turnstile_signal()
721 		 * and you'd have to handle the race where a higher
722 		 * priority thread blocks on the write lock before the
723 		 * thread you wakeup actually runs and have the new thread
724 		 * "steal" the lock.  For now it's a lot simpler to just
725 		 * wakeup all of the waiters.
726 		 *
727 		 * As above, if we fail, then another thread might have
728 		 * acquired a read lock, so drop the turnstile lock and
729 		 * restart.
730 		 */
731 		x = RW_UNLOCKED;
732 		if (v & RW_LOCK_WRITE_WAITERS) {
733 			queue = TS_EXCLUSIVE_QUEUE;
734 			x |= (v & RW_LOCK_READ_WAITERS);
735 		} else
736 			queue = TS_SHARED_QUEUE;
737 		if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
738 		    x)) {
739 			turnstile_chain_unlock(&rw->lock_object);
740 			x = RW_READ_VALUE(rw);
741 			continue;
742 		}
743 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
744 			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
745 			    __func__, rw);
746 
747 		/*
748 		 * Ok.  The lock is released and all that's left is to
749 		 * wake up the waiters.  Note that the lock might not be
750 		 * free anymore, but in that case the writers will just
751 		 * block again if they run before the new lock holder(s)
752 		 * release the lock.
753 		 */
754 		ts = turnstile_lookup(&rw->lock_object);
755 		MPASS(ts != NULL);
756 		turnstile_broadcast(ts, queue);
757 		turnstile_unpend(ts, TS_SHARED_LOCK);
758 		turnstile_chain_unlock(&rw->lock_object);
759 		break;
760 	}
761 	LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER);
762 	TD_LOCKS_DEC(curthread);
763 	curthread->td_rw_rlocks--;
764 }
765 
766 /*
767  * This function is called when we are unable to obtain a write lock on the
768  * first try.  This means that at least one other thread holds either a
769  * read or write lock.
770  */
771 void
772 __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
773     int line)
774 {
775 	struct rwlock *rw;
776 	struct turnstile *ts;
777 #ifdef ADAPTIVE_RWLOCKS
778 	volatile struct thread *owner;
779 	int spintries = 0;
780 	int i;
781 #endif
782 	uintptr_t v, x;
783 #ifdef LOCK_PROFILING
784 	uint64_t waittime = 0;
785 	int contested = 0;
786 #endif
787 #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
788 	struct lock_delay_arg lda;
789 #endif
790 #ifdef KDTRACE_HOOKS
791 	uintptr_t state;
792 	u_int sleep_cnt = 0;
793 	int64_t sleep_time = 0;
794 	int64_t all_time = 0;
795 #endif
796 
797 	if (SCHEDULER_STOPPED())
798 		return;
799 
800 #if defined(ADAPTIVE_RWLOCKS)
801 	lock_delay_arg_init(&lda, &rw_delay);
802 #elif defined(KDTRACE_HOOKS)
803 	lock_delay_arg_init(&lda, NULL);
804 #endif
805 	rw = rwlock2rw(c);
806 	v = RW_READ_VALUE(rw);
807 
808 	if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) {
809 		KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
810 		    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
811 		    __func__, rw->lock_object.lo_name, file, line));
812 		rw->rw_recurse++;
813 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
814 			CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
815 		return;
816 	}
817 
818 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
819 		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
820 		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
821 
822 #ifdef KDTRACE_HOOKS
823 	all_time -= lockstat_nsecs(&rw->lock_object);
824 	state = v;
825 #endif
826 	for (;;) {
827 		if (v == RW_UNLOCKED) {
828 			if (_rw_write_lock(rw, tid))
829 				break;
830 			v = RW_READ_VALUE(rw);
831 			continue;
832 		}
833 #ifdef KDTRACE_HOOKS
834 		lda.spin_cnt++;
835 #endif
836 #ifdef HWPMC_HOOKS
837 		PMC_SOFT_CALL( , , lock, failed);
838 #endif
839 		lock_profile_obtain_lock_failed(&rw->lock_object,
840 		    &contested, &waittime);
841 #ifdef ADAPTIVE_RWLOCKS
842 		/*
843 		 * If the lock is write locked and the owner is
844 		 * running on another CPU, spin until the owner stops
845 		 * running or the state of the lock changes.
846 		 */
847 		owner = lv_rw_wowner(v);
848 		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
849 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
850 				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
851 				    __func__, rw, owner);
852 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
853 			    "spinning", "lockname:\"%s\"",
854 			    rw->lock_object.lo_name);
855 			do {
856 				lock_delay(&lda);
857 				v = RW_READ_VALUE(rw);
858 				owner = lv_rw_wowner(v);
859 			} while (owner != NULL && TD_IS_RUNNING(owner));
860 			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
861 			    "running");
862 			continue;
863 		}
864 		if ((v & RW_LOCK_READ) && RW_READERS(v) &&
865 		    spintries < rowner_retries) {
866 			if (!(v & RW_LOCK_WRITE_SPINNER)) {
867 				if (!atomic_cmpset_ptr(&rw->rw_lock, v,
868 				    v | RW_LOCK_WRITE_SPINNER)) {
869 					v = RW_READ_VALUE(rw);
870 					continue;
871 				}
872 			}
873 			spintries++;
874 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
875 			    "spinning", "lockname:\"%s\"",
876 			    rw->lock_object.lo_name);
877 			for (i = 0; i < rowner_loops; i++) {
878 				if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
879 					break;
880 				cpu_spinwait();
881 			}
882 			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
883 			    "running");
884 			v = RW_READ_VALUE(rw);
885 #ifdef KDTRACE_HOOKS
886 			lda.spin_cnt += rowner_loops - i;
887 #endif
888 			if (i != rowner_loops)
889 				continue;
890 		}
891 #endif
892 		ts = turnstile_trywait(&rw->lock_object);
893 		v = RW_READ_VALUE(rw);
894 
895 #ifdef ADAPTIVE_RWLOCKS
896 		/*
897 		 * The current lock owner might have started executing
898 		 * on another CPU (or the lock could have changed
899 		 * owners) while we were waiting on the turnstile
900 		 * chain lock.  If so, drop the turnstile lock and try
901 		 * again.
902 		 */
903 		if (!(v & RW_LOCK_READ)) {
904 			owner = (struct thread *)RW_OWNER(v);
905 			if (TD_IS_RUNNING(owner)) {
906 				turnstile_cancel(ts);
907 				continue;
908 			}
909 		}
910 #endif
911 		/*
912 		 * Check for the waiters flags about this rwlock.
913 		 * If the lock was released, without maintain any pending
914 		 * waiters queue, simply try to acquire it.
915 		 * If a pending waiters queue is present, claim the lock
916 		 * ownership and maintain the pending queue.
917 		 */
918 		x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
919 		if ((v & ~x) == RW_UNLOCKED) {
920 			x &= ~RW_LOCK_WRITE_SPINNER;
921 			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) {
922 				if (x)
923 					turnstile_claim(ts);
924 				else
925 					turnstile_cancel(ts);
926 				break;
927 			}
928 			turnstile_cancel(ts);
929 			v = RW_READ_VALUE(rw);
930 			continue;
931 		}
932 		/*
933 		 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
934 		 * set it.  If we fail to set it, then loop back and try
935 		 * again.
936 		 */
937 		if (!(v & RW_LOCK_WRITE_WAITERS)) {
938 			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
939 			    v | RW_LOCK_WRITE_WAITERS)) {
940 				turnstile_cancel(ts);
941 				v = RW_READ_VALUE(rw);
942 				continue;
943 			}
944 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
945 				CTR2(KTR_LOCK, "%s: %p set write waiters flag",
946 				    __func__, rw);
947 		}
948 		/*
949 		 * We were unable to acquire the lock and the write waiters
950 		 * flag is set, so we must block on the turnstile.
951 		 */
952 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
953 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
954 			    rw);
955 #ifdef KDTRACE_HOOKS
956 		sleep_time -= lockstat_nsecs(&rw->lock_object);
957 #endif
958 		turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
959 #ifdef KDTRACE_HOOKS
960 		sleep_time += lockstat_nsecs(&rw->lock_object);
961 		sleep_cnt++;
962 #endif
963 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
964 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
965 			    __func__, rw);
966 #ifdef ADAPTIVE_RWLOCKS
967 		spintries = 0;
968 #endif
969 		v = RW_READ_VALUE(rw);
970 	}
971 #ifdef KDTRACE_HOOKS
972 	all_time += lockstat_nsecs(&rw->lock_object);
973 	if (sleep_time)
974 		LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
975 		    LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
976 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
977 
978 	/* Record only the loops spinning and not sleeping. */
979 	if (lda.spin_cnt > sleep_cnt)
980 		LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
981 		    LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
982 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
983 #endif
984 	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
985 	    waittime, file, line, LOCKSTAT_WRITER);
986 }
987 
988 /*
989  * This function is called if the first try at releasing a write lock failed.
990  * This means that one of the 2 waiter bits must be set indicating that at
991  * least one thread is waiting on this lock.
992  */
993 void
994 __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
995     int line)
996 {
997 	struct rwlock *rw;
998 	struct turnstile *ts;
999 	uintptr_t v;
1000 	int queue;
1001 
1002 	if (SCHEDULER_STOPPED())
1003 		return;
1004 
1005 	rw = rwlock2rw(c);
1006 
1007 	if (rw_wlocked(rw) && rw_recursed(rw)) {
1008 		rw->rw_recurse--;
1009 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
1010 			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
1011 		return;
1012 	}
1013 
1014 	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
1015 	    ("%s: neither of the waiter flags are set", __func__));
1016 
1017 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
1018 		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
1019 
1020 	turnstile_chain_lock(&rw->lock_object);
1021 	ts = turnstile_lookup(&rw->lock_object);
1022 	MPASS(ts != NULL);
1023 
1024 	/*
1025 	 * Use the same algo as sx locks for now.  Prefer waking up shared
1026 	 * waiters if we have any over writers.  This is probably not ideal.
1027 	 *
1028 	 * 'v' is the value we are going to write back to rw_lock.  If we
1029 	 * have waiters on both queues, we need to preserve the state of
1030 	 * the waiter flag for the queue we don't wake up.  For now this is
1031 	 * hardcoded for the algorithm mentioned above.
1032 	 *
1033 	 * In the case of both readers and writers waiting we wakeup the
1034 	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
1035 	 * new writer comes in before a reader it will claim the lock up
1036 	 * above.  There is probably a potential priority inversion in
1037 	 * there that could be worked around either by waking both queues
1038 	 * of waiters or doing some complicated lock handoff gymnastics.
1039 	 */
1040 	v = RW_UNLOCKED;
1041 	if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
1042 		queue = TS_EXCLUSIVE_QUEUE;
1043 		v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
1044 	} else
1045 		queue = TS_SHARED_QUEUE;
1046 
1047 	/* Wake up all waiters for the specific queue. */
1048 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
1049 		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
1050 		    queue == TS_SHARED_QUEUE ? "read" : "write");
1051 	turnstile_broadcast(ts, queue);
1052 	atomic_store_rel_ptr(&rw->rw_lock, v);
1053 	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1054 	turnstile_chain_unlock(&rw->lock_object);
1055 }
1056 
1057 /*
1058  * Attempt to do a non-blocking upgrade from a read lock to a write
1059  * lock.  This will only succeed if this thread holds a single read
1060  * lock.  Returns true if the upgrade succeeded and false otherwise.
1061  */
1062 int
1063 __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
1064 {
1065 	struct rwlock *rw;
1066 	uintptr_t v, x, tid;
1067 	struct turnstile *ts;
1068 	int success;
1069 
1070 	if (SCHEDULER_STOPPED())
1071 		return (1);
1072 
1073 	rw = rwlock2rw(c);
1074 
1075 	KASSERT(rw->rw_lock != RW_DESTROYED,
1076 	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
1077 	__rw_assert(c, RA_RLOCKED, file, line);
1078 
1079 	/*
1080 	 * Attempt to switch from one reader to a writer.  If there
1081 	 * are any write waiters, then we will have to lock the
1082 	 * turnstile first to prevent races with another writer
1083 	 * calling turnstile_wait() before we have claimed this
1084 	 * turnstile.  So, do the simple case of no waiters first.
1085 	 */
1086 	tid = (uintptr_t)curthread;
1087 	success = 0;
1088 	for (;;) {
1089 		v = rw->rw_lock;
1090 		if (RW_READERS(v) > 1)
1091 			break;
1092 		if (!(v & RW_LOCK_WAITERS)) {
1093 			success = atomic_cmpset_ptr(&rw->rw_lock, v, tid);
1094 			if (!success)
1095 				continue;
1096 			break;
1097 		}
1098 
1099 		/*
1100 		 * Ok, we think we have waiters, so lock the turnstile.
1101 		 */
1102 		ts = turnstile_trywait(&rw->lock_object);
1103 		v = rw->rw_lock;
1104 		if (RW_READERS(v) > 1) {
1105 			turnstile_cancel(ts);
1106 			break;
1107 		}
1108 		/*
1109 		 * Try to switch from one reader to a writer again.  This time
1110 		 * we honor the current state of the waiters flags.
1111 		 * If we obtain the lock with the flags set, then claim
1112 		 * ownership of the turnstile.
1113 		 */
1114 		x = rw->rw_lock & RW_LOCK_WAITERS;
1115 		success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
1116 		if (success) {
1117 			if (x)
1118 				turnstile_claim(ts);
1119 			else
1120 				turnstile_cancel(ts);
1121 			break;
1122 		}
1123 		turnstile_cancel(ts);
1124 	}
1125 	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
1126 	if (success) {
1127 		curthread->td_rw_rlocks--;
1128 		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
1129 		    file, line);
1130 		LOCKSTAT_RECORD0(rw__upgrade, rw);
1131 	}
1132 	return (success);
1133 }
1134 
1135 /*
1136  * Downgrade a write lock into a single read lock.
1137  */
1138 void
1139 __rw_downgrade(volatile uintptr_t *c, const char *file, int line)
1140 {
1141 	struct rwlock *rw;
1142 	struct turnstile *ts;
1143 	uintptr_t tid, v;
1144 	int rwait, wwait;
1145 
1146 	if (SCHEDULER_STOPPED())
1147 		return;
1148 
1149 	rw = rwlock2rw(c);
1150 
1151 	KASSERT(rw->rw_lock != RW_DESTROYED,
1152 	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
1153 	__rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line);
1154 #ifndef INVARIANTS
1155 	if (rw_recursed(rw))
1156 		panic("downgrade of a recursed lock");
1157 #endif
1158 
1159 	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
1160 
1161 	/*
1162 	 * Convert from a writer to a single reader.  First we handle
1163 	 * the easy case with no waiters.  If there are any waiters, we
1164 	 * lock the turnstile and "disown" the lock.
1165 	 */
1166 	tid = (uintptr_t)curthread;
1167 	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
1168 		goto out;
1169 
1170 	/*
1171 	 * Ok, we think we have waiters, so lock the turnstile so we can
1172 	 * read the waiter flags without any races.
1173 	 */
1174 	turnstile_chain_lock(&rw->lock_object);
1175 	v = rw->rw_lock & RW_LOCK_WAITERS;
1176 	rwait = v & RW_LOCK_READ_WAITERS;
1177 	wwait = v & RW_LOCK_WRITE_WAITERS;
1178 	MPASS(rwait | wwait);
1179 
1180 	/*
1181 	 * Downgrade from a write lock while preserving waiters flag
1182 	 * and give up ownership of the turnstile.
1183 	 */
1184 	ts = turnstile_lookup(&rw->lock_object);
1185 	MPASS(ts != NULL);
1186 	if (!wwait)
1187 		v &= ~RW_LOCK_READ_WAITERS;
1188 	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
1189 	/*
1190 	 * Wake other readers if there are no writers pending.  Otherwise they
1191 	 * won't be able to acquire the lock anyway.
1192 	 */
1193 	if (rwait && !wwait) {
1194 		turnstile_broadcast(ts, TS_SHARED_QUEUE);
1195 		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1196 	} else
1197 		turnstile_disown(ts);
1198 	turnstile_chain_unlock(&rw->lock_object);
1199 out:
1200 	curthread->td_rw_rlocks++;
1201 	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
1202 	LOCKSTAT_RECORD0(rw__downgrade, rw);
1203 }
1204 
1205 #ifdef INVARIANT_SUPPORT
1206 #ifndef INVARIANTS
1207 #undef __rw_assert
1208 #endif
1209 
1210 /*
1211  * In the non-WITNESS case, rw_assert() can only detect that at least
1212  * *some* thread owns an rlock, but it cannot guarantee that *this*
1213  * thread owns an rlock.
1214  */
1215 void
1216 __rw_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1217 {
1218 	const struct rwlock *rw;
1219 
1220 	if (panicstr != NULL)
1221 		return;
1222 
1223 	rw = rwlock2rw(c);
1224 
1225 	switch (what) {
1226 	case RA_LOCKED:
1227 	case RA_LOCKED | RA_RECURSED:
1228 	case RA_LOCKED | RA_NOTRECURSED:
1229 	case RA_RLOCKED:
1230 	case RA_RLOCKED | RA_RECURSED:
1231 	case RA_RLOCKED | RA_NOTRECURSED:
1232 #ifdef WITNESS
1233 		witness_assert(&rw->lock_object, what, file, line);
1234 #else
1235 		/*
1236 		 * If some other thread has a write lock or we have one
1237 		 * and are asserting a read lock, fail.  Also, if no one
1238 		 * has a lock at all, fail.
1239 		 */
1240 		if (rw->rw_lock == RW_UNLOCKED ||
1241 		    (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED ||
1242 		    rw_wowner(rw) != curthread)))
1243 			panic("Lock %s not %slocked @ %s:%d\n",
1244 			    rw->lock_object.lo_name, (what & RA_RLOCKED) ?
1245 			    "read " : "", file, line);
1246 
1247 		if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) {
1248 			if (rw_recursed(rw)) {
1249 				if (what & RA_NOTRECURSED)
1250 					panic("Lock %s recursed @ %s:%d\n",
1251 					    rw->lock_object.lo_name, file,
1252 					    line);
1253 			} else if (what & RA_RECURSED)
1254 				panic("Lock %s not recursed @ %s:%d\n",
1255 				    rw->lock_object.lo_name, file, line);
1256 		}
1257 #endif
1258 		break;
1259 	case RA_WLOCKED:
1260 	case RA_WLOCKED | RA_RECURSED:
1261 	case RA_WLOCKED | RA_NOTRECURSED:
1262 		if (rw_wowner(rw) != curthread)
1263 			panic("Lock %s not exclusively locked @ %s:%d\n",
1264 			    rw->lock_object.lo_name, file, line);
1265 		if (rw_recursed(rw)) {
1266 			if (what & RA_NOTRECURSED)
1267 				panic("Lock %s recursed @ %s:%d\n",
1268 				    rw->lock_object.lo_name, file, line);
1269 		} else if (what & RA_RECURSED)
1270 			panic("Lock %s not recursed @ %s:%d\n",
1271 			    rw->lock_object.lo_name, file, line);
1272 		break;
1273 	case RA_UNLOCKED:
1274 #ifdef WITNESS
1275 		witness_assert(&rw->lock_object, what, file, line);
1276 #else
1277 		/*
1278 		 * If we hold a write lock fail.  We can't reliably check
1279 		 * to see if we hold a read lock or not.
1280 		 */
1281 		if (rw_wowner(rw) == curthread)
1282 			panic("Lock %s exclusively locked @ %s:%d\n",
1283 			    rw->lock_object.lo_name, file, line);
1284 #endif
1285 		break;
1286 	default:
1287 		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
1288 		    line);
1289 	}
1290 }
1291 #endif /* INVARIANT_SUPPORT */
1292 
1293 #ifdef DDB
1294 void
1295 db_show_rwlock(const struct lock_object *lock)
1296 {
1297 	const struct rwlock *rw;
1298 	struct thread *td;
1299 
1300 	rw = (const struct rwlock *)lock;
1301 
1302 	db_printf(" state: ");
1303 	if (rw->rw_lock == RW_UNLOCKED)
1304 		db_printf("UNLOCKED\n");
1305 	else if (rw->rw_lock == RW_DESTROYED) {
1306 		db_printf("DESTROYED\n");
1307 		return;
1308 	} else if (rw->rw_lock & RW_LOCK_READ)
1309 		db_printf("RLOCK: %ju locks\n",
1310 		    (uintmax_t)(RW_READERS(rw->rw_lock)));
1311 	else {
1312 		td = rw_wowner(rw);
1313 		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1314 		    td->td_tid, td->td_proc->p_pid, td->td_name);
1315 		if (rw_recursed(rw))
1316 			db_printf(" recursed: %u\n", rw->rw_recurse);
1317 	}
1318 	db_printf(" waiters: ");
1319 	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
1320 	case RW_LOCK_READ_WAITERS:
1321 		db_printf("readers\n");
1322 		break;
1323 	case RW_LOCK_WRITE_WAITERS:
1324 		db_printf("writers\n");
1325 		break;
1326 	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
1327 		db_printf("readers and writers\n");
1328 		break;
1329 	default:
1330 		db_printf("none\n");
1331 		break;
1332 	}
1333 }
1334 
1335 #endif
1336