xref: /freebsd/sys/kern/kern_rwlock.c (revision 907b59d76938e654f0d040a888e8dfca3de1e222)
1 /*-
2  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * Machine independent bits of reader/writer lock implementation.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_ddb.h"
35 #include "opt_hwpmc_hooks.h"
36 #include "opt_no_adaptive_rwlocks.h"
37 
38 #include <sys/param.h>
39 #include <sys/kdb.h>
40 #include <sys/ktr.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/rwlock.h>
46 #include <sys/sched.h>
47 #include <sys/sysctl.h>
48 #include <sys/systm.h>
49 #include <sys/turnstile.h>
50 
51 #include <machine/cpu.h>
52 
53 #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
54 #define	ADAPTIVE_RWLOCKS
55 #endif
56 
57 #ifdef HWPMC_HOOKS
58 #include <sys/pmckern.h>
59 PMC_SOFT_DECLARE( , , lock, failed);
60 #endif
61 
62 /*
63  * Return the rwlock address when the lock cookie address is provided.
64  * This functionality assumes that struct rwlock* have a member named rw_lock.
65  */
66 #define	rwlock2rw(c)	(__containerof(c, struct rwlock, rw_lock))
67 
68 #ifdef ADAPTIVE_RWLOCKS
69 static int rowner_retries = 10;
70 static int rowner_loops = 10000;
71 static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL,
72     "rwlock debugging");
73 SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
74 SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
75 #endif
76 
77 #ifdef DDB
78 #include <ddb/ddb.h>
79 
80 static void	db_show_rwlock(const struct lock_object *lock);
81 #endif
82 static void	assert_rw(const struct lock_object *lock, int what);
83 static void	lock_rw(struct lock_object *lock, uintptr_t how);
84 #ifdef KDTRACE_HOOKS
85 static int	owner_rw(const struct lock_object *lock, struct thread **owner);
86 #endif
87 static uintptr_t unlock_rw(struct lock_object *lock);
88 
89 struct lock_class lock_class_rw = {
90 	.lc_name = "rw",
91 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
92 	.lc_assert = assert_rw,
93 #ifdef DDB
94 	.lc_ddb_show = db_show_rwlock,
95 #endif
96 	.lc_lock = lock_rw,
97 	.lc_unlock = unlock_rw,
98 #ifdef KDTRACE_HOOKS
99 	.lc_owner = owner_rw,
100 #endif
101 };
102 
103 /*
104  * Return a pointer to the owning thread if the lock is write-locked or
105  * NULL if the lock is unlocked or read-locked.
106  */
107 #define	rw_wowner(rw)							\
108 	((rw)->rw_lock & RW_LOCK_READ ? NULL :				\
109 	    (struct thread *)RW_OWNER((rw)->rw_lock))
110 
111 /*
112  * Returns if a write owner is recursed.  Write ownership is not assured
113  * here and should be previously checked.
114  */
115 #define	rw_recursed(rw)		((rw)->rw_recurse != 0)
116 
117 /*
118  * Return true if curthread helds the lock.
119  */
120 #define	rw_wlocked(rw)		(rw_wowner((rw)) == curthread)
121 
122 /*
123  * Return a pointer to the owning thread for this lock who should receive
124  * any priority lent by threads that block on this lock.  Currently this
125  * is identical to rw_wowner().
126  */
127 #define	rw_owner(rw)		rw_wowner(rw)
128 
129 #ifndef INVARIANTS
130 #define	__rw_assert(c, what, file, line)
131 #endif
132 
133 void
134 assert_rw(const struct lock_object *lock, int what)
135 {
136 
137 	rw_assert((const struct rwlock *)lock, what);
138 }
139 
140 void
141 lock_rw(struct lock_object *lock, uintptr_t how)
142 {
143 	struct rwlock *rw;
144 
145 	rw = (struct rwlock *)lock;
146 	if (how)
147 		rw_rlock(rw);
148 	else
149 		rw_wlock(rw);
150 }
151 
152 uintptr_t
153 unlock_rw(struct lock_object *lock)
154 {
155 	struct rwlock *rw;
156 
157 	rw = (struct rwlock *)lock;
158 	rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
159 	if (rw->rw_lock & RW_LOCK_READ) {
160 		rw_runlock(rw);
161 		return (1);
162 	} else {
163 		rw_wunlock(rw);
164 		return (0);
165 	}
166 }
167 
168 #ifdef KDTRACE_HOOKS
169 int
170 owner_rw(const struct lock_object *lock, struct thread **owner)
171 {
172 	const struct rwlock *rw = (const struct rwlock *)lock;
173 	uintptr_t x = rw->rw_lock;
174 
175 	*owner = rw_wowner(rw);
176 	return ((x & RW_LOCK_READ) != 0 ?  (RW_READERS(x) != 0) :
177 	    (*owner != NULL));
178 }
179 #endif
180 
181 void
182 _rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
183 {
184 	struct rwlock *rw;
185 	int flags;
186 
187 	rw = rwlock2rw(c);
188 
189 	MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
190 	    RW_RECURSE | RW_NEW)) == 0);
191 	ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
192 	    ("%s: rw_lock not aligned for %s: %p", __func__, name,
193 	    &rw->rw_lock));
194 
195 	flags = LO_UPGRADABLE;
196 	if (opts & RW_DUPOK)
197 		flags |= LO_DUPOK;
198 	if (opts & RW_NOPROFILE)
199 		flags |= LO_NOPROFILE;
200 	if (!(opts & RW_NOWITNESS))
201 		flags |= LO_WITNESS;
202 	if (opts & RW_RECURSE)
203 		flags |= LO_RECURSABLE;
204 	if (opts & RW_QUIET)
205 		flags |= LO_QUIET;
206 	if (opts & RW_NEW)
207 		flags |= LO_NEW;
208 
209 	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
210 	rw->rw_lock = RW_UNLOCKED;
211 	rw->rw_recurse = 0;
212 }
213 
214 void
215 _rw_destroy(volatile uintptr_t *c)
216 {
217 	struct rwlock *rw;
218 
219 	rw = rwlock2rw(c);
220 
221 	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
222 	KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw));
223 	rw->rw_lock = RW_DESTROYED;
224 	lock_destroy(&rw->lock_object);
225 }
226 
227 void
228 rw_sysinit(void *arg)
229 {
230 	struct rw_args *args = arg;
231 
232 	rw_init((struct rwlock *)args->ra_rw, args->ra_desc);
233 }
234 
235 void
236 rw_sysinit_flags(void *arg)
237 {
238 	struct rw_args_flags *args = arg;
239 
240 	rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc,
241 	    args->ra_flags);
242 }
243 
244 int
245 _rw_wowned(const volatile uintptr_t *c)
246 {
247 
248 	return (rw_wowner(rwlock2rw(c)) == curthread);
249 }
250 
251 void
252 _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line)
253 {
254 	struct rwlock *rw;
255 
256 	if (SCHEDULER_STOPPED())
257 		return;
258 
259 	rw = rwlock2rw(c);
260 
261 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
262 	    ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
263 	    curthread, rw->lock_object.lo_name, file, line));
264 	KASSERT(rw->rw_lock != RW_DESTROYED,
265 	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
266 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
267 	    line, NULL);
268 	__rw_wlock(rw, curthread, file, line);
269 	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
270 	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
271 	TD_LOCKS_INC(curthread);
272 }
273 
274 int
275 __rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
276 {
277 	struct rwlock *rw;
278 	int rval;
279 
280 	if (SCHEDULER_STOPPED())
281 		return (1);
282 
283 	rw = rwlock2rw(c);
284 
285 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
286 	    ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
287 	    curthread, rw->lock_object.lo_name, file, line));
288 	KASSERT(rw->rw_lock != RW_DESTROYED,
289 	    ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
290 
291 	if (rw_wlocked(rw) &&
292 	    (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
293 		rw->rw_recurse++;
294 		rval = 1;
295 	} else
296 		rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
297 		    (uintptr_t)curthread);
298 
299 	LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
300 	if (rval) {
301 		WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
302 		    file, line);
303 		if (!rw_recursed(rw))
304 			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
305 			    rw, 0, 0, file, line, LOCKSTAT_WRITER);
306 		TD_LOCKS_INC(curthread);
307 	}
308 	return (rval);
309 }
310 
311 void
312 _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
313 {
314 	struct rwlock *rw;
315 
316 	if (SCHEDULER_STOPPED())
317 		return;
318 
319 	rw = rwlock2rw(c);
320 
321 	KASSERT(rw->rw_lock != RW_DESTROYED,
322 	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
323 	__rw_assert(c, RA_WLOCKED, file, line);
324 	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
325 	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
326 	    line);
327 	__rw_wunlock(rw, curthread, file, line);
328 	TD_LOCKS_DEC(curthread);
329 }
330 
331 /*
332  * Determines whether a new reader can acquire a lock.  Succeeds if the
333  * reader already owns a read lock and the lock is locked for read to
334  * prevent deadlock from reader recursion.  Also succeeds if the lock
335  * is unlocked and has no writer waiters or spinners.  Failing otherwise
336  * prioritizes writers before readers.
337  */
338 #define	RW_CAN_READ(_rw)						\
339     ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) &	\
340     (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) ==	\
341     RW_LOCK_READ)
342 
343 void
344 __rw_rlock(volatile uintptr_t *c, const char *file, int line)
345 {
346 	struct rwlock *rw;
347 	struct turnstile *ts;
348 #ifdef ADAPTIVE_RWLOCKS
349 	volatile struct thread *owner;
350 	int spintries = 0;
351 	int i;
352 #endif
353 #ifdef LOCK_PROFILING
354 	uint64_t waittime = 0;
355 	int contested = 0;
356 #endif
357 	uintptr_t v;
358 #ifdef KDTRACE_HOOKS
359 	uintptr_t state;
360 	uint64_t spin_cnt = 0;
361 	uint64_t sleep_cnt = 0;
362 	int64_t sleep_time = 0;
363 	int64_t all_time = 0;
364 #endif
365 
366 	if (SCHEDULER_STOPPED())
367 		return;
368 
369 	rw = rwlock2rw(c);
370 
371 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
372 	    ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
373 	    curthread, rw->lock_object.lo_name, file, line));
374 	KASSERT(rw->rw_lock != RW_DESTROYED,
375 	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
376 	KASSERT(rw_wowner(rw) != curthread,
377 	    ("rw_rlock: wlock already held for %s @ %s:%d",
378 	    rw->lock_object.lo_name, file, line));
379 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
380 
381 #ifdef KDTRACE_HOOKS
382 	all_time -= lockstat_nsecs(&rw->lock_object);
383 	state = rw->rw_lock;
384 #endif
385 	for (;;) {
386 		/*
387 		 * Handle the easy case.  If no other thread has a write
388 		 * lock, then try to bump up the count of read locks.  Note
389 		 * that we have to preserve the current state of the
390 		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
391 		 * read lock, then rw_lock must have changed, so restart
392 		 * the loop.  Note that this handles the case of a
393 		 * completely unlocked rwlock since such a lock is encoded
394 		 * as a read lock with no waiters.
395 		 */
396 		v = rw->rw_lock;
397 		if (RW_CAN_READ(v)) {
398 			/*
399 			 * The RW_LOCK_READ_WAITERS flag should only be set
400 			 * if the lock has been unlocked and write waiters
401 			 * were present.
402 			 */
403 			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v,
404 			    v + RW_ONE_READER)) {
405 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
406 					CTR4(KTR_LOCK,
407 					    "%s: %p succeed %p -> %p", __func__,
408 					    rw, (void *)v,
409 					    (void *)(v + RW_ONE_READER));
410 				break;
411 			}
412 			continue;
413 		}
414 #ifdef KDTRACE_HOOKS
415 		spin_cnt++;
416 #endif
417 #ifdef HWPMC_HOOKS
418 		PMC_SOFT_CALL( , , lock, failed);
419 #endif
420 		lock_profile_obtain_lock_failed(&rw->lock_object,
421 		    &contested, &waittime);
422 
423 #ifdef ADAPTIVE_RWLOCKS
424 		/*
425 		 * If the owner is running on another CPU, spin until
426 		 * the owner stops running or the state of the lock
427 		 * changes.
428 		 */
429 		if ((v & RW_LOCK_READ) == 0) {
430 			owner = (struct thread *)RW_OWNER(v);
431 			if (TD_IS_RUNNING(owner)) {
432 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
433 					CTR3(KTR_LOCK,
434 					    "%s: spinning on %p held by %p",
435 					    __func__, rw, owner);
436 				KTR_STATE1(KTR_SCHED, "thread",
437 				    sched_tdname(curthread), "spinning",
438 				    "lockname:\"%s\"", rw->lock_object.lo_name);
439 				while ((struct thread*)RW_OWNER(rw->rw_lock) ==
440 				    owner && TD_IS_RUNNING(owner)) {
441 					cpu_spinwait();
442 #ifdef KDTRACE_HOOKS
443 					spin_cnt++;
444 #endif
445 				}
446 				KTR_STATE0(KTR_SCHED, "thread",
447 				    sched_tdname(curthread), "running");
448 				continue;
449 			}
450 		} else if (spintries < rowner_retries) {
451 			spintries++;
452 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
453 			    "spinning", "lockname:\"%s\"",
454 			    rw->lock_object.lo_name);
455 			for (i = 0; i < rowner_loops; i++) {
456 				v = rw->rw_lock;
457 				if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
458 					break;
459 				cpu_spinwait();
460 			}
461 #ifdef KDTRACE_HOOKS
462 			spin_cnt += rowner_loops - i;
463 #endif
464 			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
465 			    "running");
466 			if (i != rowner_loops)
467 				continue;
468 		}
469 #endif
470 
471 		/*
472 		 * Okay, now it's the hard case.  Some other thread already
473 		 * has a write lock or there are write waiters present,
474 		 * acquire the turnstile lock so we can begin the process
475 		 * of blocking.
476 		 */
477 		ts = turnstile_trywait(&rw->lock_object);
478 
479 		/*
480 		 * The lock might have been released while we spun, so
481 		 * recheck its state and restart the loop if needed.
482 		 */
483 		v = rw->rw_lock;
484 		if (RW_CAN_READ(v)) {
485 			turnstile_cancel(ts);
486 			continue;
487 		}
488 
489 #ifdef ADAPTIVE_RWLOCKS
490 		/*
491 		 * The current lock owner might have started executing
492 		 * on another CPU (or the lock could have changed
493 		 * owners) while we were waiting on the turnstile
494 		 * chain lock.  If so, drop the turnstile lock and try
495 		 * again.
496 		 */
497 		if ((v & RW_LOCK_READ) == 0) {
498 			owner = (struct thread *)RW_OWNER(v);
499 			if (TD_IS_RUNNING(owner)) {
500 				turnstile_cancel(ts);
501 				continue;
502 			}
503 		}
504 #endif
505 
506 		/*
507 		 * The lock is held in write mode or it already has waiters.
508 		 */
509 		MPASS(!RW_CAN_READ(v));
510 
511 		/*
512 		 * If the RW_LOCK_READ_WAITERS flag is already set, then
513 		 * we can go ahead and block.  If it is not set then try
514 		 * to set it.  If we fail to set it drop the turnstile
515 		 * lock and restart the loop.
516 		 */
517 		if (!(v & RW_LOCK_READ_WAITERS)) {
518 			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
519 			    v | RW_LOCK_READ_WAITERS)) {
520 				turnstile_cancel(ts);
521 				continue;
522 			}
523 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
524 				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
525 				    __func__, rw);
526 		}
527 
528 		/*
529 		 * We were unable to acquire the lock and the read waiters
530 		 * flag is set, so we must block on the turnstile.
531 		 */
532 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
533 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
534 			    rw);
535 #ifdef KDTRACE_HOOKS
536 		sleep_time -= lockstat_nsecs(&rw->lock_object);
537 #endif
538 		turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
539 #ifdef KDTRACE_HOOKS
540 		sleep_time += lockstat_nsecs(&rw->lock_object);
541 		sleep_cnt++;
542 #endif
543 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
544 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
545 			    __func__, rw);
546 	}
547 #ifdef KDTRACE_HOOKS
548 	all_time += lockstat_nsecs(&rw->lock_object);
549 	if (sleep_time)
550 		LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
551 		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
552 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
553 
554 	/* Record only the loops spinning and not sleeping. */
555 	if (spin_cnt > sleep_cnt)
556 		LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
557 		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
558 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
559 #endif
560 	/*
561 	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
562 	 * however.  turnstiles don't like owners changing between calls to
563 	 * turnstile_wait() currently.
564 	 */
565 	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
566 	    waittime, file, line, LOCKSTAT_READER);
567 	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
568 	WITNESS_LOCK(&rw->lock_object, 0, file, line);
569 	TD_LOCKS_INC(curthread);
570 	curthread->td_rw_rlocks++;
571 }
572 
573 int
574 __rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
575 {
576 	struct rwlock *rw;
577 	uintptr_t x;
578 
579 	if (SCHEDULER_STOPPED())
580 		return (1);
581 
582 	rw = rwlock2rw(c);
583 
584 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
585 	    ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
586 	    curthread, rw->lock_object.lo_name, file, line));
587 
588 	for (;;) {
589 		x = rw->rw_lock;
590 		KASSERT(rw->rw_lock != RW_DESTROYED,
591 		    ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
592 		if (!(x & RW_LOCK_READ))
593 			break;
594 		if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
595 			LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
596 			    line);
597 			WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
598 			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
599 			    rw, 0, 0, file, line, LOCKSTAT_READER);
600 			TD_LOCKS_INC(curthread);
601 			curthread->td_rw_rlocks++;
602 			return (1);
603 		}
604 	}
605 
606 	LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
607 	return (0);
608 }
609 
610 void
611 _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
612 {
613 	struct rwlock *rw;
614 	struct turnstile *ts;
615 	uintptr_t x, v, queue;
616 
617 	if (SCHEDULER_STOPPED())
618 		return;
619 
620 	rw = rwlock2rw(c);
621 
622 	KASSERT(rw->rw_lock != RW_DESTROYED,
623 	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
624 	__rw_assert(c, RA_RLOCKED, file, line);
625 	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
626 	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
627 
628 	/* TODO: drop "owner of record" here. */
629 
630 	for (;;) {
631 		/*
632 		 * See if there is more than one read lock held.  If so,
633 		 * just drop one and return.
634 		 */
635 		x = rw->rw_lock;
636 		if (RW_READERS(x) > 1) {
637 			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
638 			    x - RW_ONE_READER)) {
639 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
640 					CTR4(KTR_LOCK,
641 					    "%s: %p succeeded %p -> %p",
642 					    __func__, rw, (void *)x,
643 					    (void *)(x - RW_ONE_READER));
644 				break;
645 			}
646 			continue;
647 		}
648 		/*
649 		 * If there aren't any waiters for a write lock, then try
650 		 * to drop it quickly.
651 		 */
652 		if (!(x & RW_LOCK_WAITERS)) {
653 			MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
654 			    RW_READERS_LOCK(1));
655 			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
656 			    RW_UNLOCKED)) {
657 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
658 					CTR2(KTR_LOCK, "%s: %p last succeeded",
659 					    __func__, rw);
660 				break;
661 			}
662 			continue;
663 		}
664 		/*
665 		 * Ok, we know we have waiters and we think we are the
666 		 * last reader, so grab the turnstile lock.
667 		 */
668 		turnstile_chain_lock(&rw->lock_object);
669 		v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
670 		MPASS(v & RW_LOCK_WAITERS);
671 
672 		/*
673 		 * Try to drop our lock leaving the lock in a unlocked
674 		 * state.
675 		 *
676 		 * If you wanted to do explicit lock handoff you'd have to
677 		 * do it here.  You'd also want to use turnstile_signal()
678 		 * and you'd have to handle the race where a higher
679 		 * priority thread blocks on the write lock before the
680 		 * thread you wakeup actually runs and have the new thread
681 		 * "steal" the lock.  For now it's a lot simpler to just
682 		 * wakeup all of the waiters.
683 		 *
684 		 * As above, if we fail, then another thread might have
685 		 * acquired a read lock, so drop the turnstile lock and
686 		 * restart.
687 		 */
688 		x = RW_UNLOCKED;
689 		if (v & RW_LOCK_WRITE_WAITERS) {
690 			queue = TS_EXCLUSIVE_QUEUE;
691 			x |= (v & RW_LOCK_READ_WAITERS);
692 		} else
693 			queue = TS_SHARED_QUEUE;
694 		if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
695 		    x)) {
696 			turnstile_chain_unlock(&rw->lock_object);
697 			continue;
698 		}
699 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
700 			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
701 			    __func__, rw);
702 
703 		/*
704 		 * Ok.  The lock is released and all that's left is to
705 		 * wake up the waiters.  Note that the lock might not be
706 		 * free anymore, but in that case the writers will just
707 		 * block again if they run before the new lock holder(s)
708 		 * release the lock.
709 		 */
710 		ts = turnstile_lookup(&rw->lock_object);
711 		MPASS(ts != NULL);
712 		turnstile_broadcast(ts, queue);
713 		turnstile_unpend(ts, TS_SHARED_LOCK);
714 		turnstile_chain_unlock(&rw->lock_object);
715 		break;
716 	}
717 	LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER);
718 	TD_LOCKS_DEC(curthread);
719 	curthread->td_rw_rlocks--;
720 }
721 
722 /*
723  * This function is called when we are unable to obtain a write lock on the
724  * first try.  This means that at least one other thread holds either a
725  * read or write lock.
726  */
727 void
728 __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
729     int line)
730 {
731 	struct rwlock *rw;
732 	struct turnstile *ts;
733 #ifdef ADAPTIVE_RWLOCKS
734 	volatile struct thread *owner;
735 	int spintries = 0;
736 	int i;
737 #endif
738 	uintptr_t v, x;
739 #ifdef LOCK_PROFILING
740 	uint64_t waittime = 0;
741 	int contested = 0;
742 #endif
743 #ifdef KDTRACE_HOOKS
744 	uintptr_t state;
745 	uint64_t spin_cnt = 0;
746 	uint64_t sleep_cnt = 0;
747 	int64_t sleep_time = 0;
748 	int64_t all_time = 0;
749 #endif
750 
751 	if (SCHEDULER_STOPPED())
752 		return;
753 
754 	rw = rwlock2rw(c);
755 
756 	if (rw_wlocked(rw)) {
757 		KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
758 		    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
759 		    __func__, rw->lock_object.lo_name, file, line));
760 		rw->rw_recurse++;
761 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
762 			CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
763 		return;
764 	}
765 
766 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
767 		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
768 		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
769 
770 #ifdef KDTRACE_HOOKS
771 	all_time -= lockstat_nsecs(&rw->lock_object);
772 	state = rw->rw_lock;
773 #endif
774 	for (;;) {
775 		if (rw->rw_lock == RW_UNLOCKED && _rw_write_lock(rw, tid))
776 			break;
777 #ifdef KDTRACE_HOOKS
778 		spin_cnt++;
779 #endif
780 #ifdef HWPMC_HOOKS
781 		PMC_SOFT_CALL( , , lock, failed);
782 #endif
783 		lock_profile_obtain_lock_failed(&rw->lock_object,
784 		    &contested, &waittime);
785 #ifdef ADAPTIVE_RWLOCKS
786 		/*
787 		 * If the lock is write locked and the owner is
788 		 * running on another CPU, spin until the owner stops
789 		 * running or the state of the lock changes.
790 		 */
791 		v = rw->rw_lock;
792 		owner = (struct thread *)RW_OWNER(v);
793 		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
794 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
795 				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
796 				    __func__, rw, owner);
797 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
798 			    "spinning", "lockname:\"%s\"",
799 			    rw->lock_object.lo_name);
800 			while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
801 			    TD_IS_RUNNING(owner)) {
802 				cpu_spinwait();
803 #ifdef KDTRACE_HOOKS
804 				spin_cnt++;
805 #endif
806 			}
807 			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
808 			    "running");
809 			continue;
810 		}
811 		if ((v & RW_LOCK_READ) && RW_READERS(v) &&
812 		    spintries < rowner_retries) {
813 			if (!(v & RW_LOCK_WRITE_SPINNER)) {
814 				if (!atomic_cmpset_ptr(&rw->rw_lock, v,
815 				    v | RW_LOCK_WRITE_SPINNER)) {
816 					continue;
817 				}
818 			}
819 			spintries++;
820 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
821 			    "spinning", "lockname:\"%s\"",
822 			    rw->lock_object.lo_name);
823 			for (i = 0; i < rowner_loops; i++) {
824 				if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
825 					break;
826 				cpu_spinwait();
827 			}
828 			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
829 			    "running");
830 #ifdef KDTRACE_HOOKS
831 			spin_cnt += rowner_loops - i;
832 #endif
833 			if (i != rowner_loops)
834 				continue;
835 		}
836 #endif
837 		ts = turnstile_trywait(&rw->lock_object);
838 		v = rw->rw_lock;
839 
840 #ifdef ADAPTIVE_RWLOCKS
841 		/*
842 		 * The current lock owner might have started executing
843 		 * on another CPU (or the lock could have changed
844 		 * owners) while we were waiting on the turnstile
845 		 * chain lock.  If so, drop the turnstile lock and try
846 		 * again.
847 		 */
848 		if (!(v & RW_LOCK_READ)) {
849 			owner = (struct thread *)RW_OWNER(v);
850 			if (TD_IS_RUNNING(owner)) {
851 				turnstile_cancel(ts);
852 				continue;
853 			}
854 		}
855 #endif
856 		/*
857 		 * Check for the waiters flags about this rwlock.
858 		 * If the lock was released, without maintain any pending
859 		 * waiters queue, simply try to acquire it.
860 		 * If a pending waiters queue is present, claim the lock
861 		 * ownership and maintain the pending queue.
862 		 */
863 		x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
864 		if ((v & ~x) == RW_UNLOCKED) {
865 			x &= ~RW_LOCK_WRITE_SPINNER;
866 			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) {
867 				if (x)
868 					turnstile_claim(ts);
869 				else
870 					turnstile_cancel(ts);
871 				break;
872 			}
873 			turnstile_cancel(ts);
874 			continue;
875 		}
876 		/*
877 		 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
878 		 * set it.  If we fail to set it, then loop back and try
879 		 * again.
880 		 */
881 		if (!(v & RW_LOCK_WRITE_WAITERS)) {
882 			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
883 			    v | RW_LOCK_WRITE_WAITERS)) {
884 				turnstile_cancel(ts);
885 				continue;
886 			}
887 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
888 				CTR2(KTR_LOCK, "%s: %p set write waiters flag",
889 				    __func__, rw);
890 		}
891 		/*
892 		 * We were unable to acquire the lock and the write waiters
893 		 * flag is set, so we must block on the turnstile.
894 		 */
895 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
896 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
897 			    rw);
898 #ifdef KDTRACE_HOOKS
899 		sleep_time -= lockstat_nsecs(&rw->lock_object);
900 #endif
901 		turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
902 #ifdef KDTRACE_HOOKS
903 		sleep_time += lockstat_nsecs(&rw->lock_object);
904 		sleep_cnt++;
905 #endif
906 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
907 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
908 			    __func__, rw);
909 #ifdef ADAPTIVE_RWLOCKS
910 		spintries = 0;
911 #endif
912 	}
913 #ifdef KDTRACE_HOOKS
914 	all_time += lockstat_nsecs(&rw->lock_object);
915 	if (sleep_time)
916 		LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
917 		    LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
918 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
919 
920 	/* Record only the loops spinning and not sleeping. */
921 	if (spin_cnt > sleep_cnt)
922 		LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
923 		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
924 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
925 #endif
926 	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
927 	    waittime, file, line, LOCKSTAT_WRITER);
928 }
929 
930 /*
931  * This function is called if the first try at releasing a write lock failed.
932  * This means that one of the 2 waiter bits must be set indicating that at
933  * least one thread is waiting on this lock.
934  */
935 void
936 __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
937     int line)
938 {
939 	struct rwlock *rw;
940 	struct turnstile *ts;
941 	uintptr_t v;
942 	int queue;
943 
944 	if (SCHEDULER_STOPPED())
945 		return;
946 
947 	rw = rwlock2rw(c);
948 
949 	if (rw_wlocked(rw) && rw_recursed(rw)) {
950 		rw->rw_recurse--;
951 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
952 			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
953 		return;
954 	}
955 
956 	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
957 	    ("%s: neither of the waiter flags are set", __func__));
958 
959 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
960 		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
961 
962 	turnstile_chain_lock(&rw->lock_object);
963 	ts = turnstile_lookup(&rw->lock_object);
964 	MPASS(ts != NULL);
965 
966 	/*
967 	 * Use the same algo as sx locks for now.  Prefer waking up shared
968 	 * waiters if we have any over writers.  This is probably not ideal.
969 	 *
970 	 * 'v' is the value we are going to write back to rw_lock.  If we
971 	 * have waiters on both queues, we need to preserve the state of
972 	 * the waiter flag for the queue we don't wake up.  For now this is
973 	 * hardcoded for the algorithm mentioned above.
974 	 *
975 	 * In the case of both readers and writers waiting we wakeup the
976 	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
977 	 * new writer comes in before a reader it will claim the lock up
978 	 * above.  There is probably a potential priority inversion in
979 	 * there that could be worked around either by waking both queues
980 	 * of waiters or doing some complicated lock handoff gymnastics.
981 	 */
982 	v = RW_UNLOCKED;
983 	if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
984 		queue = TS_EXCLUSIVE_QUEUE;
985 		v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
986 	} else
987 		queue = TS_SHARED_QUEUE;
988 
989 	/* Wake up all waiters for the specific queue. */
990 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
991 		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
992 		    queue == TS_SHARED_QUEUE ? "read" : "write");
993 	turnstile_broadcast(ts, queue);
994 	atomic_store_rel_ptr(&rw->rw_lock, v);
995 	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
996 	turnstile_chain_unlock(&rw->lock_object);
997 }
998 
999 /*
1000  * Attempt to do a non-blocking upgrade from a read lock to a write
1001  * lock.  This will only succeed if this thread holds a single read
1002  * lock.  Returns true if the upgrade succeeded and false otherwise.
1003  */
1004 int
1005 __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
1006 {
1007 	struct rwlock *rw;
1008 	uintptr_t v, x, tid;
1009 	struct turnstile *ts;
1010 	int success;
1011 
1012 	if (SCHEDULER_STOPPED())
1013 		return (1);
1014 
1015 	rw = rwlock2rw(c);
1016 
1017 	KASSERT(rw->rw_lock != RW_DESTROYED,
1018 	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
1019 	__rw_assert(c, RA_RLOCKED, file, line);
1020 
1021 	/*
1022 	 * Attempt to switch from one reader to a writer.  If there
1023 	 * are any write waiters, then we will have to lock the
1024 	 * turnstile first to prevent races with another writer
1025 	 * calling turnstile_wait() before we have claimed this
1026 	 * turnstile.  So, do the simple case of no waiters first.
1027 	 */
1028 	tid = (uintptr_t)curthread;
1029 	success = 0;
1030 	for (;;) {
1031 		v = rw->rw_lock;
1032 		if (RW_READERS(v) > 1)
1033 			break;
1034 		if (!(v & RW_LOCK_WAITERS)) {
1035 			success = atomic_cmpset_ptr(&rw->rw_lock, v, tid);
1036 			if (!success)
1037 				continue;
1038 			break;
1039 		}
1040 
1041 		/*
1042 		 * Ok, we think we have waiters, so lock the turnstile.
1043 		 */
1044 		ts = turnstile_trywait(&rw->lock_object);
1045 		v = rw->rw_lock;
1046 		if (RW_READERS(v) > 1) {
1047 			turnstile_cancel(ts);
1048 			break;
1049 		}
1050 		/*
1051 		 * Try to switch from one reader to a writer again.  This time
1052 		 * we honor the current state of the waiters flags.
1053 		 * If we obtain the lock with the flags set, then claim
1054 		 * ownership of the turnstile.
1055 		 */
1056 		x = rw->rw_lock & RW_LOCK_WAITERS;
1057 		success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
1058 		if (success) {
1059 			if (x)
1060 				turnstile_claim(ts);
1061 			else
1062 				turnstile_cancel(ts);
1063 			break;
1064 		}
1065 		turnstile_cancel(ts);
1066 	}
1067 	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
1068 	if (success) {
1069 		curthread->td_rw_rlocks--;
1070 		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
1071 		    file, line);
1072 		LOCKSTAT_RECORD0(rw__upgrade, rw);
1073 	}
1074 	return (success);
1075 }
1076 
1077 /*
1078  * Downgrade a write lock into a single read lock.
1079  */
1080 void
1081 __rw_downgrade(volatile uintptr_t *c, const char *file, int line)
1082 {
1083 	struct rwlock *rw;
1084 	struct turnstile *ts;
1085 	uintptr_t tid, v;
1086 	int rwait, wwait;
1087 
1088 	if (SCHEDULER_STOPPED())
1089 		return;
1090 
1091 	rw = rwlock2rw(c);
1092 
1093 	KASSERT(rw->rw_lock != RW_DESTROYED,
1094 	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
1095 	__rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line);
1096 #ifndef INVARIANTS
1097 	if (rw_recursed(rw))
1098 		panic("downgrade of a recursed lock");
1099 #endif
1100 
1101 	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
1102 
1103 	/*
1104 	 * Convert from a writer to a single reader.  First we handle
1105 	 * the easy case with no waiters.  If there are any waiters, we
1106 	 * lock the turnstile and "disown" the lock.
1107 	 */
1108 	tid = (uintptr_t)curthread;
1109 	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
1110 		goto out;
1111 
1112 	/*
1113 	 * Ok, we think we have waiters, so lock the turnstile so we can
1114 	 * read the waiter flags without any races.
1115 	 */
1116 	turnstile_chain_lock(&rw->lock_object);
1117 	v = rw->rw_lock & RW_LOCK_WAITERS;
1118 	rwait = v & RW_LOCK_READ_WAITERS;
1119 	wwait = v & RW_LOCK_WRITE_WAITERS;
1120 	MPASS(rwait | wwait);
1121 
1122 	/*
1123 	 * Downgrade from a write lock while preserving waiters flag
1124 	 * and give up ownership of the turnstile.
1125 	 */
1126 	ts = turnstile_lookup(&rw->lock_object);
1127 	MPASS(ts != NULL);
1128 	if (!wwait)
1129 		v &= ~RW_LOCK_READ_WAITERS;
1130 	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
1131 	/*
1132 	 * Wake other readers if there are no writers pending.  Otherwise they
1133 	 * won't be able to acquire the lock anyway.
1134 	 */
1135 	if (rwait && !wwait) {
1136 		turnstile_broadcast(ts, TS_SHARED_QUEUE);
1137 		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1138 	} else
1139 		turnstile_disown(ts);
1140 	turnstile_chain_unlock(&rw->lock_object);
1141 out:
1142 	curthread->td_rw_rlocks++;
1143 	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
1144 	LOCKSTAT_RECORD0(rw__downgrade, rw);
1145 }
1146 
1147 #ifdef INVARIANT_SUPPORT
1148 #ifndef INVARIANTS
1149 #undef __rw_assert
1150 #endif
1151 
1152 /*
1153  * In the non-WITNESS case, rw_assert() can only detect that at least
1154  * *some* thread owns an rlock, but it cannot guarantee that *this*
1155  * thread owns an rlock.
1156  */
1157 void
1158 __rw_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1159 {
1160 	const struct rwlock *rw;
1161 
1162 	if (panicstr != NULL)
1163 		return;
1164 
1165 	rw = rwlock2rw(c);
1166 
1167 	switch (what) {
1168 	case RA_LOCKED:
1169 	case RA_LOCKED | RA_RECURSED:
1170 	case RA_LOCKED | RA_NOTRECURSED:
1171 	case RA_RLOCKED:
1172 	case RA_RLOCKED | RA_RECURSED:
1173 	case RA_RLOCKED | RA_NOTRECURSED:
1174 #ifdef WITNESS
1175 		witness_assert(&rw->lock_object, what, file, line);
1176 #else
1177 		/*
1178 		 * If some other thread has a write lock or we have one
1179 		 * and are asserting a read lock, fail.  Also, if no one
1180 		 * has a lock at all, fail.
1181 		 */
1182 		if (rw->rw_lock == RW_UNLOCKED ||
1183 		    (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED ||
1184 		    rw_wowner(rw) != curthread)))
1185 			panic("Lock %s not %slocked @ %s:%d\n",
1186 			    rw->lock_object.lo_name, (what & RA_RLOCKED) ?
1187 			    "read " : "", file, line);
1188 
1189 		if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) {
1190 			if (rw_recursed(rw)) {
1191 				if (what & RA_NOTRECURSED)
1192 					panic("Lock %s recursed @ %s:%d\n",
1193 					    rw->lock_object.lo_name, file,
1194 					    line);
1195 			} else if (what & RA_RECURSED)
1196 				panic("Lock %s not recursed @ %s:%d\n",
1197 				    rw->lock_object.lo_name, file, line);
1198 		}
1199 #endif
1200 		break;
1201 	case RA_WLOCKED:
1202 	case RA_WLOCKED | RA_RECURSED:
1203 	case RA_WLOCKED | RA_NOTRECURSED:
1204 		if (rw_wowner(rw) != curthread)
1205 			panic("Lock %s not exclusively locked @ %s:%d\n",
1206 			    rw->lock_object.lo_name, file, line);
1207 		if (rw_recursed(rw)) {
1208 			if (what & RA_NOTRECURSED)
1209 				panic("Lock %s recursed @ %s:%d\n",
1210 				    rw->lock_object.lo_name, file, line);
1211 		} else if (what & RA_RECURSED)
1212 			panic("Lock %s not recursed @ %s:%d\n",
1213 			    rw->lock_object.lo_name, file, line);
1214 		break;
1215 	case RA_UNLOCKED:
1216 #ifdef WITNESS
1217 		witness_assert(&rw->lock_object, what, file, line);
1218 #else
1219 		/*
1220 		 * If we hold a write lock fail.  We can't reliably check
1221 		 * to see if we hold a read lock or not.
1222 		 */
1223 		if (rw_wowner(rw) == curthread)
1224 			panic("Lock %s exclusively locked @ %s:%d\n",
1225 			    rw->lock_object.lo_name, file, line);
1226 #endif
1227 		break;
1228 	default:
1229 		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
1230 		    line);
1231 	}
1232 }
1233 #endif /* INVARIANT_SUPPORT */
1234 
1235 #ifdef DDB
1236 void
1237 db_show_rwlock(const struct lock_object *lock)
1238 {
1239 	const struct rwlock *rw;
1240 	struct thread *td;
1241 
1242 	rw = (const struct rwlock *)lock;
1243 
1244 	db_printf(" state: ");
1245 	if (rw->rw_lock == RW_UNLOCKED)
1246 		db_printf("UNLOCKED\n");
1247 	else if (rw->rw_lock == RW_DESTROYED) {
1248 		db_printf("DESTROYED\n");
1249 		return;
1250 	} else if (rw->rw_lock & RW_LOCK_READ)
1251 		db_printf("RLOCK: %ju locks\n",
1252 		    (uintmax_t)(RW_READERS(rw->rw_lock)));
1253 	else {
1254 		td = rw_wowner(rw);
1255 		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1256 		    td->td_tid, td->td_proc->p_pid, td->td_name);
1257 		if (rw_recursed(rw))
1258 			db_printf(" recursed: %u\n", rw->rw_recurse);
1259 	}
1260 	db_printf(" waiters: ");
1261 	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
1262 	case RW_LOCK_READ_WAITERS:
1263 		db_printf("readers\n");
1264 		break;
1265 	case RW_LOCK_WRITE_WAITERS:
1266 		db_printf("writers\n");
1267 		break;
1268 	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
1269 		db_printf("readers and writers\n");
1270 		break;
1271 	default:
1272 		db_printf("none\n");
1273 		break;
1274 	}
1275 }
1276 
1277 #endif
1278