xref: /freebsd/sys/kern/kern_rwlock.c (revision 6574b8ed19b093f0af09501d2c9676c28993cb97)
1 /*-
2  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * Machine independent bits of reader/writer lock implementation.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_ddb.h"
35 #include "opt_hwpmc_hooks.h"
36 #include "opt_no_adaptive_rwlocks.h"
37 
38 #include <sys/param.h>
39 #include <sys/kdb.h>
40 #include <sys/ktr.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/rwlock.h>
46 #include <sys/sysctl.h>
47 #include <sys/systm.h>
48 #include <sys/turnstile.h>
49 
50 #include <machine/cpu.h>
51 
52 #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
53 #define	ADAPTIVE_RWLOCKS
54 #endif
55 
56 #ifdef HWPMC_HOOKS
57 #include <sys/pmckern.h>
58 PMC_SOFT_DECLARE( , , lock, failed);
59 #endif
60 
61 /*
62  * Return the rwlock address when the lock cookie address is provided.
63  * This functionality assumes that struct rwlock* have a member named rw_lock.
64  */
65 #define	rwlock2rw(c)	(__containerof(c, struct rwlock, rw_lock))
66 
67 #ifdef ADAPTIVE_RWLOCKS
68 static int rowner_retries = 10;
69 static int rowner_loops = 10000;
70 static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL,
71     "rwlock debugging");
72 SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
73 SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
74 #endif
75 
76 #ifdef DDB
77 #include <ddb/ddb.h>
78 
79 static void	db_show_rwlock(const struct lock_object *lock);
80 #endif
81 static void	assert_rw(const struct lock_object *lock, int what);
82 static void	lock_rw(struct lock_object *lock, uintptr_t how);
83 #ifdef KDTRACE_HOOKS
84 static int	owner_rw(const struct lock_object *lock, struct thread **owner);
85 #endif
86 static uintptr_t unlock_rw(struct lock_object *lock);
87 
88 struct lock_class lock_class_rw = {
89 	.lc_name = "rw",
90 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
91 	.lc_assert = assert_rw,
92 #ifdef DDB
93 	.lc_ddb_show = db_show_rwlock,
94 #endif
95 	.lc_lock = lock_rw,
96 	.lc_unlock = unlock_rw,
97 #ifdef KDTRACE_HOOKS
98 	.lc_owner = owner_rw,
99 #endif
100 };
101 
102 /*
103  * Return a pointer to the owning thread if the lock is write-locked or
104  * NULL if the lock is unlocked or read-locked.
105  */
106 #define	rw_wowner(rw)							\
107 	((rw)->rw_lock & RW_LOCK_READ ? NULL :				\
108 	    (struct thread *)RW_OWNER((rw)->rw_lock))
109 
110 /*
111  * Returns if a write owner is recursed.  Write ownership is not assured
112  * here and should be previously checked.
113  */
114 #define	rw_recursed(rw)		((rw)->rw_recurse != 0)
115 
116 /*
117  * Return true if curthread helds the lock.
118  */
119 #define	rw_wlocked(rw)		(rw_wowner((rw)) == curthread)
120 
121 /*
122  * Return a pointer to the owning thread for this lock who should receive
123  * any priority lent by threads that block on this lock.  Currently this
124  * is identical to rw_wowner().
125  */
126 #define	rw_owner(rw)		rw_wowner(rw)
127 
128 #ifndef INVARIANTS
129 #define	__rw_assert(c, what, file, line)
130 #endif
131 
132 void
133 assert_rw(const struct lock_object *lock, int what)
134 {
135 
136 	rw_assert((const struct rwlock *)lock, what);
137 }
138 
139 void
140 lock_rw(struct lock_object *lock, uintptr_t how)
141 {
142 	struct rwlock *rw;
143 
144 	rw = (struct rwlock *)lock;
145 	if (how)
146 		rw_rlock(rw);
147 	else
148 		rw_wlock(rw);
149 }
150 
151 uintptr_t
152 unlock_rw(struct lock_object *lock)
153 {
154 	struct rwlock *rw;
155 
156 	rw = (struct rwlock *)lock;
157 	rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
158 	if (rw->rw_lock & RW_LOCK_READ) {
159 		rw_runlock(rw);
160 		return (1);
161 	} else {
162 		rw_wunlock(rw);
163 		return (0);
164 	}
165 }
166 
167 #ifdef KDTRACE_HOOKS
168 int
169 owner_rw(const struct lock_object *lock, struct thread **owner)
170 {
171 	const struct rwlock *rw = (const struct rwlock *)lock;
172 	uintptr_t x = rw->rw_lock;
173 
174 	*owner = rw_wowner(rw);
175 	return ((x & RW_LOCK_READ) != 0 ?  (RW_READERS(x) != 0) :
176 	    (*owner != NULL));
177 }
178 #endif
179 
180 void
181 _rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
182 {
183 	struct rwlock *rw;
184 	int flags;
185 
186 	rw = rwlock2rw(c);
187 
188 	MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
189 	    RW_RECURSE)) == 0);
190 	ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
191 	    ("%s: rw_lock not aligned for %s: %p", __func__, name,
192 	    &rw->rw_lock));
193 
194 	flags = LO_UPGRADABLE;
195 	if (opts & RW_DUPOK)
196 		flags |= LO_DUPOK;
197 	if (opts & RW_NOPROFILE)
198 		flags |= LO_NOPROFILE;
199 	if (!(opts & RW_NOWITNESS))
200 		flags |= LO_WITNESS;
201 	if (opts & RW_RECURSE)
202 		flags |= LO_RECURSABLE;
203 	if (opts & RW_QUIET)
204 		flags |= LO_QUIET;
205 
206 	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
207 	rw->rw_lock = RW_UNLOCKED;
208 	rw->rw_recurse = 0;
209 }
210 
211 void
212 _rw_destroy(volatile uintptr_t *c)
213 {
214 	struct rwlock *rw;
215 
216 	rw = rwlock2rw(c);
217 
218 	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
219 	KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw));
220 	rw->rw_lock = RW_DESTROYED;
221 	lock_destroy(&rw->lock_object);
222 }
223 
224 void
225 rw_sysinit(void *arg)
226 {
227 	struct rw_args *args = arg;
228 
229 	rw_init((struct rwlock *)args->ra_rw, args->ra_desc);
230 }
231 
232 void
233 rw_sysinit_flags(void *arg)
234 {
235 	struct rw_args_flags *args = arg;
236 
237 	rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc,
238 	    args->ra_flags);
239 }
240 
241 int
242 _rw_wowned(const volatile uintptr_t *c)
243 {
244 
245 	return (rw_wowner(rwlock2rw(c)) == curthread);
246 }
247 
248 void
249 _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line)
250 {
251 	struct rwlock *rw;
252 
253 	if (SCHEDULER_STOPPED())
254 		return;
255 
256 	rw = rwlock2rw(c);
257 
258 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
259 	    ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
260 	    curthread, rw->lock_object.lo_name, file, line));
261 	KASSERT(rw->rw_lock != RW_DESTROYED,
262 	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
263 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
264 	    line, NULL);
265 	__rw_wlock(rw, curthread, file, line);
266 	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
267 	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
268 	curthread->td_locks++;
269 }
270 
271 int
272 __rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
273 {
274 	struct rwlock *rw;
275 	int rval;
276 
277 	if (SCHEDULER_STOPPED())
278 		return (1);
279 
280 	rw = rwlock2rw(c);
281 
282 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
283 	    ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
284 	    curthread, rw->lock_object.lo_name, file, line));
285 	KASSERT(rw->rw_lock != RW_DESTROYED,
286 	    ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
287 
288 	if (rw_wlocked(rw) &&
289 	    (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
290 		rw->rw_recurse++;
291 		rval = 1;
292 	} else
293 		rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
294 		    (uintptr_t)curthread);
295 
296 	LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
297 	if (rval) {
298 		WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
299 		    file, line);
300 		curthread->td_locks++;
301 	}
302 	return (rval);
303 }
304 
305 void
306 _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
307 {
308 	struct rwlock *rw;
309 
310 	if (SCHEDULER_STOPPED())
311 		return;
312 
313 	rw = rwlock2rw(c);
314 
315 	KASSERT(rw->rw_lock != RW_DESTROYED,
316 	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
317 	__rw_assert(c, RA_WLOCKED, file, line);
318 	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
319 	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
320 	    line);
321 	__rw_wunlock(rw, curthread, file, line);
322 	curthread->td_locks--;
323 }
324 /*
325  * Determines whether a new reader can acquire a lock.  Succeeds if the
326  * reader already owns a read lock and the lock is locked for read to
327  * prevent deadlock from reader recursion.  Also succeeds if the lock
328  * is unlocked and has no writer waiters or spinners.  Failing otherwise
329  * prioritizes writers before readers.
330  */
331 #define	RW_CAN_READ(_rw)						\
332     ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) &	\
333     (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) ==	\
334     RW_LOCK_READ)
335 
336 void
337 __rw_rlock(volatile uintptr_t *c, const char *file, int line)
338 {
339 	struct rwlock *rw;
340 	struct turnstile *ts;
341 #ifdef ADAPTIVE_RWLOCKS
342 	volatile struct thread *owner;
343 	int spintries = 0;
344 	int i;
345 #endif
346 #ifdef LOCK_PROFILING
347 	uint64_t waittime = 0;
348 	int contested = 0;
349 #endif
350 	uintptr_t v;
351 #ifdef KDTRACE_HOOKS
352 	uint64_t spin_cnt = 0;
353 	uint64_t sleep_cnt = 0;
354 	int64_t sleep_time = 0;
355 #endif
356 
357 	if (SCHEDULER_STOPPED())
358 		return;
359 
360 	rw = rwlock2rw(c);
361 
362 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
363 	    ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
364 	    curthread, rw->lock_object.lo_name, file, line));
365 	KASSERT(rw->rw_lock != RW_DESTROYED,
366 	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
367 	KASSERT(rw_wowner(rw) != curthread,
368 	    ("rw_rlock: wlock already held for %s @ %s:%d",
369 	    rw->lock_object.lo_name, file, line));
370 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
371 
372 	for (;;) {
373 #ifdef KDTRACE_HOOKS
374 		spin_cnt++;
375 #endif
376 		/*
377 		 * Handle the easy case.  If no other thread has a write
378 		 * lock, then try to bump up the count of read locks.  Note
379 		 * that we have to preserve the current state of the
380 		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
381 		 * read lock, then rw_lock must have changed, so restart
382 		 * the loop.  Note that this handles the case of a
383 		 * completely unlocked rwlock since such a lock is encoded
384 		 * as a read lock with no waiters.
385 		 */
386 		v = rw->rw_lock;
387 		if (RW_CAN_READ(v)) {
388 			/*
389 			 * The RW_LOCK_READ_WAITERS flag should only be set
390 			 * if the lock has been unlocked and write waiters
391 			 * were present.
392 			 */
393 			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v,
394 			    v + RW_ONE_READER)) {
395 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
396 					CTR4(KTR_LOCK,
397 					    "%s: %p succeed %p -> %p", __func__,
398 					    rw, (void *)v,
399 					    (void *)(v + RW_ONE_READER));
400 				break;
401 			}
402 			continue;
403 		}
404 #ifdef HWPMC_HOOKS
405 		PMC_SOFT_CALL( , , lock, failed);
406 #endif
407 		lock_profile_obtain_lock_failed(&rw->lock_object,
408 		    &contested, &waittime);
409 
410 #ifdef ADAPTIVE_RWLOCKS
411 		/*
412 		 * If the owner is running on another CPU, spin until
413 		 * the owner stops running or the state of the lock
414 		 * changes.
415 		 */
416 		if ((v & RW_LOCK_READ) == 0) {
417 			owner = (struct thread *)RW_OWNER(v);
418 			if (TD_IS_RUNNING(owner)) {
419 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
420 					CTR3(KTR_LOCK,
421 					    "%s: spinning on %p held by %p",
422 					    __func__, rw, owner);
423 				while ((struct thread*)RW_OWNER(rw->rw_lock) ==
424 				    owner && TD_IS_RUNNING(owner)) {
425 					cpu_spinwait();
426 #ifdef KDTRACE_HOOKS
427 					spin_cnt++;
428 #endif
429 				}
430 				continue;
431 			}
432 		} else if (spintries < rowner_retries) {
433 			spintries++;
434 			for (i = 0; i < rowner_loops; i++) {
435 				v = rw->rw_lock;
436 				if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
437 					break;
438 				cpu_spinwait();
439 			}
440 #ifdef KDTRACE_HOOKS
441 			spin_cnt += rowner_loops - i;
442 #endif
443 			if (i != rowner_loops)
444 				continue;
445 		}
446 #endif
447 
448 		/*
449 		 * Okay, now it's the hard case.  Some other thread already
450 		 * has a write lock or there are write waiters present,
451 		 * acquire the turnstile lock so we can begin the process
452 		 * of blocking.
453 		 */
454 		ts = turnstile_trywait(&rw->lock_object);
455 
456 		/*
457 		 * The lock might have been released while we spun, so
458 		 * recheck its state and restart the loop if needed.
459 		 */
460 		v = rw->rw_lock;
461 		if (RW_CAN_READ(v)) {
462 			turnstile_cancel(ts);
463 			continue;
464 		}
465 
466 #ifdef ADAPTIVE_RWLOCKS
467 		/*
468 		 * The current lock owner might have started executing
469 		 * on another CPU (or the lock could have changed
470 		 * owners) while we were waiting on the turnstile
471 		 * chain lock.  If so, drop the turnstile lock and try
472 		 * again.
473 		 */
474 		if ((v & RW_LOCK_READ) == 0) {
475 			owner = (struct thread *)RW_OWNER(v);
476 			if (TD_IS_RUNNING(owner)) {
477 				turnstile_cancel(ts);
478 				continue;
479 			}
480 		}
481 #endif
482 
483 		/*
484 		 * The lock is held in write mode or it already has waiters.
485 		 */
486 		MPASS(!RW_CAN_READ(v));
487 
488 		/*
489 		 * If the RW_LOCK_READ_WAITERS flag is already set, then
490 		 * we can go ahead and block.  If it is not set then try
491 		 * to set it.  If we fail to set it drop the turnstile
492 		 * lock and restart the loop.
493 		 */
494 		if (!(v & RW_LOCK_READ_WAITERS)) {
495 			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
496 			    v | RW_LOCK_READ_WAITERS)) {
497 				turnstile_cancel(ts);
498 				continue;
499 			}
500 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
501 				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
502 				    __func__, rw);
503 		}
504 
505 		/*
506 		 * We were unable to acquire the lock and the read waiters
507 		 * flag is set, so we must block on the turnstile.
508 		 */
509 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
510 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
511 			    rw);
512 #ifdef KDTRACE_HOOKS
513 		sleep_time -= lockstat_nsecs();
514 #endif
515 		turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
516 #ifdef KDTRACE_HOOKS
517 		sleep_time += lockstat_nsecs();
518 		sleep_cnt++;
519 #endif
520 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
521 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
522 			    __func__, rw);
523 	}
524 
525 	/*
526 	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
527 	 * however.  turnstiles don't like owners changing between calls to
528 	 * turnstile_wait() currently.
529 	 */
530 	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_RLOCK_ACQUIRE, rw, contested,
531 	    waittime, file, line);
532 	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
533 	WITNESS_LOCK(&rw->lock_object, 0, file, line);
534 	curthread->td_locks++;
535 	curthread->td_rw_rlocks++;
536 #ifdef KDTRACE_HOOKS
537 	if (sleep_time)
538 		LOCKSTAT_RECORD1(LS_RW_RLOCK_BLOCK, rw, sleep_time);
539 
540 	/*
541 	 * Record only the loops spinning and not sleeping.
542 	 */
543 	if (spin_cnt > sleep_cnt)
544 		LOCKSTAT_RECORD1(LS_RW_RLOCK_SPIN, rw, (spin_cnt - sleep_cnt));
545 #endif
546 }
547 
548 int
549 __rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
550 {
551 	struct rwlock *rw;
552 	uintptr_t x;
553 
554 	if (SCHEDULER_STOPPED())
555 		return (1);
556 
557 	rw = rwlock2rw(c);
558 
559 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
560 	    ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
561 	    curthread, rw->lock_object.lo_name, file, line));
562 
563 	for (;;) {
564 		x = rw->rw_lock;
565 		KASSERT(rw->rw_lock != RW_DESTROYED,
566 		    ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
567 		if (!(x & RW_LOCK_READ))
568 			break;
569 		if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
570 			LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
571 			    line);
572 			WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
573 			curthread->td_locks++;
574 			curthread->td_rw_rlocks++;
575 			return (1);
576 		}
577 	}
578 
579 	LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
580 	return (0);
581 }
582 
583 void
584 _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
585 {
586 	struct rwlock *rw;
587 	struct turnstile *ts;
588 	uintptr_t x, v, queue;
589 
590 	if (SCHEDULER_STOPPED())
591 		return;
592 
593 	rw = rwlock2rw(c);
594 
595 	KASSERT(rw->rw_lock != RW_DESTROYED,
596 	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
597 	__rw_assert(c, RA_RLOCKED, file, line);
598 	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
599 	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
600 
601 	/* TODO: drop "owner of record" here. */
602 
603 	for (;;) {
604 		/*
605 		 * See if there is more than one read lock held.  If so,
606 		 * just drop one and return.
607 		 */
608 		x = rw->rw_lock;
609 		if (RW_READERS(x) > 1) {
610 			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
611 			    x - RW_ONE_READER)) {
612 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
613 					CTR4(KTR_LOCK,
614 					    "%s: %p succeeded %p -> %p",
615 					    __func__, rw, (void *)x,
616 					    (void *)(x - RW_ONE_READER));
617 				break;
618 			}
619 			continue;
620 		}
621 		/*
622 		 * If there aren't any waiters for a write lock, then try
623 		 * to drop it quickly.
624 		 */
625 		if (!(x & RW_LOCK_WAITERS)) {
626 			MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
627 			    RW_READERS_LOCK(1));
628 			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
629 			    RW_UNLOCKED)) {
630 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
631 					CTR2(KTR_LOCK, "%s: %p last succeeded",
632 					    __func__, rw);
633 				break;
634 			}
635 			continue;
636 		}
637 		/*
638 		 * Ok, we know we have waiters and we think we are the
639 		 * last reader, so grab the turnstile lock.
640 		 */
641 		turnstile_chain_lock(&rw->lock_object);
642 		v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
643 		MPASS(v & RW_LOCK_WAITERS);
644 
645 		/*
646 		 * Try to drop our lock leaving the lock in a unlocked
647 		 * state.
648 		 *
649 		 * If you wanted to do explicit lock handoff you'd have to
650 		 * do it here.  You'd also want to use turnstile_signal()
651 		 * and you'd have to handle the race where a higher
652 		 * priority thread blocks on the write lock before the
653 		 * thread you wakeup actually runs and have the new thread
654 		 * "steal" the lock.  For now it's a lot simpler to just
655 		 * wakeup all of the waiters.
656 		 *
657 		 * As above, if we fail, then another thread might have
658 		 * acquired a read lock, so drop the turnstile lock and
659 		 * restart.
660 		 */
661 		x = RW_UNLOCKED;
662 		if (v & RW_LOCK_WRITE_WAITERS) {
663 			queue = TS_EXCLUSIVE_QUEUE;
664 			x |= (v & RW_LOCK_READ_WAITERS);
665 		} else
666 			queue = TS_SHARED_QUEUE;
667 		if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
668 		    x)) {
669 			turnstile_chain_unlock(&rw->lock_object);
670 			continue;
671 		}
672 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
673 			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
674 			    __func__, rw);
675 
676 		/*
677 		 * Ok.  The lock is released and all that's left is to
678 		 * wake up the waiters.  Note that the lock might not be
679 		 * free anymore, but in that case the writers will just
680 		 * block again if they run before the new lock holder(s)
681 		 * release the lock.
682 		 */
683 		ts = turnstile_lookup(&rw->lock_object);
684 		MPASS(ts != NULL);
685 		turnstile_broadcast(ts, queue);
686 		turnstile_unpend(ts, TS_SHARED_LOCK);
687 		turnstile_chain_unlock(&rw->lock_object);
688 		break;
689 	}
690 	LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_RUNLOCK_RELEASE, rw);
691 	curthread->td_locks--;
692 	curthread->td_rw_rlocks--;
693 }
694 
695 /*
696  * This function is called when we are unable to obtain a write lock on the
697  * first try.  This means that at least one other thread holds either a
698  * read or write lock.
699  */
700 void
701 __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
702     int line)
703 {
704 	struct rwlock *rw;
705 	struct turnstile *ts;
706 #ifdef ADAPTIVE_RWLOCKS
707 	volatile struct thread *owner;
708 	int spintries = 0;
709 	int i;
710 #endif
711 	uintptr_t v, x;
712 #ifdef LOCK_PROFILING
713 	uint64_t waittime = 0;
714 	int contested = 0;
715 #endif
716 #ifdef KDTRACE_HOOKS
717 	uint64_t spin_cnt = 0;
718 	uint64_t sleep_cnt = 0;
719 	int64_t sleep_time = 0;
720 #endif
721 
722 	if (SCHEDULER_STOPPED())
723 		return;
724 
725 	rw = rwlock2rw(c);
726 
727 	if (rw_wlocked(rw)) {
728 		KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
729 		    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
730 		    __func__, rw->lock_object.lo_name, file, line));
731 		rw->rw_recurse++;
732 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
733 			CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
734 		return;
735 	}
736 
737 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
738 		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
739 		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
740 
741 	while (!_rw_write_lock(rw, tid)) {
742 #ifdef KDTRACE_HOOKS
743 		spin_cnt++;
744 #endif
745 #ifdef HWPMC_HOOKS
746 		PMC_SOFT_CALL( , , lock, failed);
747 #endif
748 		lock_profile_obtain_lock_failed(&rw->lock_object,
749 		    &contested, &waittime);
750 #ifdef ADAPTIVE_RWLOCKS
751 		/*
752 		 * If the lock is write locked and the owner is
753 		 * running on another CPU, spin until the owner stops
754 		 * running or the state of the lock changes.
755 		 */
756 		v = rw->rw_lock;
757 		owner = (struct thread *)RW_OWNER(v);
758 		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
759 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
760 				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
761 				    __func__, rw, owner);
762 			while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
763 			    TD_IS_RUNNING(owner)) {
764 				cpu_spinwait();
765 #ifdef KDTRACE_HOOKS
766 				spin_cnt++;
767 #endif
768 			}
769 			continue;
770 		}
771 		if ((v & RW_LOCK_READ) && RW_READERS(v) &&
772 		    spintries < rowner_retries) {
773 			if (!(v & RW_LOCK_WRITE_SPINNER)) {
774 				if (!atomic_cmpset_ptr(&rw->rw_lock, v,
775 				    v | RW_LOCK_WRITE_SPINNER)) {
776 					continue;
777 				}
778 			}
779 			spintries++;
780 			for (i = 0; i < rowner_loops; i++) {
781 				if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
782 					break;
783 				cpu_spinwait();
784 			}
785 #ifdef KDTRACE_HOOKS
786 			spin_cnt += rowner_loops - i;
787 #endif
788 			if (i != rowner_loops)
789 				continue;
790 		}
791 #endif
792 		ts = turnstile_trywait(&rw->lock_object);
793 		v = rw->rw_lock;
794 
795 #ifdef ADAPTIVE_RWLOCKS
796 		/*
797 		 * The current lock owner might have started executing
798 		 * on another CPU (or the lock could have changed
799 		 * owners) while we were waiting on the turnstile
800 		 * chain lock.  If so, drop the turnstile lock and try
801 		 * again.
802 		 */
803 		if (!(v & RW_LOCK_READ)) {
804 			owner = (struct thread *)RW_OWNER(v);
805 			if (TD_IS_RUNNING(owner)) {
806 				turnstile_cancel(ts);
807 				continue;
808 			}
809 		}
810 #endif
811 		/*
812 		 * Check for the waiters flags about this rwlock.
813 		 * If the lock was released, without maintain any pending
814 		 * waiters queue, simply try to acquire it.
815 		 * If a pending waiters queue is present, claim the lock
816 		 * ownership and maintain the pending queue.
817 		 */
818 		x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
819 		if ((v & ~x) == RW_UNLOCKED) {
820 			x &= ~RW_LOCK_WRITE_SPINNER;
821 			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) {
822 				if (x)
823 					turnstile_claim(ts);
824 				else
825 					turnstile_cancel(ts);
826 				break;
827 			}
828 			turnstile_cancel(ts);
829 			continue;
830 		}
831 		/*
832 		 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
833 		 * set it.  If we fail to set it, then loop back and try
834 		 * again.
835 		 */
836 		if (!(v & RW_LOCK_WRITE_WAITERS)) {
837 			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
838 			    v | RW_LOCK_WRITE_WAITERS)) {
839 				turnstile_cancel(ts);
840 				continue;
841 			}
842 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
843 				CTR2(KTR_LOCK, "%s: %p set write waiters flag",
844 				    __func__, rw);
845 		}
846 		/*
847 		 * We were unable to acquire the lock and the write waiters
848 		 * flag is set, so we must block on the turnstile.
849 		 */
850 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
851 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
852 			    rw);
853 #ifdef KDTRACE_HOOKS
854 		sleep_time -= lockstat_nsecs();
855 #endif
856 		turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
857 #ifdef KDTRACE_HOOKS
858 		sleep_time += lockstat_nsecs();
859 		sleep_cnt++;
860 #endif
861 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
862 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
863 			    __func__, rw);
864 #ifdef ADAPTIVE_RWLOCKS
865 		spintries = 0;
866 #endif
867 	}
868 	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE, rw, contested,
869 	    waittime, file, line);
870 #ifdef KDTRACE_HOOKS
871 	if (sleep_time)
872 		LOCKSTAT_RECORD1(LS_RW_WLOCK_BLOCK, rw, sleep_time);
873 
874 	/*
875 	 * Record only the loops spinning and not sleeping.
876 	 */
877 	if (spin_cnt > sleep_cnt)
878 		LOCKSTAT_RECORD1(LS_RW_WLOCK_SPIN, rw, (spin_cnt - sleep_cnt));
879 #endif
880 }
881 
882 /*
883  * This function is called if the first try at releasing a write lock failed.
884  * This means that one of the 2 waiter bits must be set indicating that at
885  * least one thread is waiting on this lock.
886  */
887 void
888 __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
889     int line)
890 {
891 	struct rwlock *rw;
892 	struct turnstile *ts;
893 	uintptr_t v;
894 	int queue;
895 
896 	if (SCHEDULER_STOPPED())
897 		return;
898 
899 	rw = rwlock2rw(c);
900 
901 	if (rw_wlocked(rw) && rw_recursed(rw)) {
902 		rw->rw_recurse--;
903 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
904 			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
905 		return;
906 	}
907 
908 	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
909 	    ("%s: neither of the waiter flags are set", __func__));
910 
911 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
912 		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
913 
914 	turnstile_chain_lock(&rw->lock_object);
915 	ts = turnstile_lookup(&rw->lock_object);
916 	MPASS(ts != NULL);
917 
918 	/*
919 	 * Use the same algo as sx locks for now.  Prefer waking up shared
920 	 * waiters if we have any over writers.  This is probably not ideal.
921 	 *
922 	 * 'v' is the value we are going to write back to rw_lock.  If we
923 	 * have waiters on both queues, we need to preserve the state of
924 	 * the waiter flag for the queue we don't wake up.  For now this is
925 	 * hardcoded for the algorithm mentioned above.
926 	 *
927 	 * In the case of both readers and writers waiting we wakeup the
928 	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
929 	 * new writer comes in before a reader it will claim the lock up
930 	 * above.  There is probably a potential priority inversion in
931 	 * there that could be worked around either by waking both queues
932 	 * of waiters or doing some complicated lock handoff gymnastics.
933 	 */
934 	v = RW_UNLOCKED;
935 	if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
936 		queue = TS_EXCLUSIVE_QUEUE;
937 		v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
938 	} else
939 		queue = TS_SHARED_QUEUE;
940 
941 	/* Wake up all waiters for the specific queue. */
942 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
943 		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
944 		    queue == TS_SHARED_QUEUE ? "read" : "write");
945 	turnstile_broadcast(ts, queue);
946 	atomic_store_rel_ptr(&rw->rw_lock, v);
947 	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
948 	turnstile_chain_unlock(&rw->lock_object);
949 }
950 
951 /*
952  * Attempt to do a non-blocking upgrade from a read lock to a write
953  * lock.  This will only succeed if this thread holds a single read
954  * lock.  Returns true if the upgrade succeeded and false otherwise.
955  */
956 int
957 __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
958 {
959 	struct rwlock *rw;
960 	uintptr_t v, x, tid;
961 	struct turnstile *ts;
962 	int success;
963 
964 	if (SCHEDULER_STOPPED())
965 		return (1);
966 
967 	rw = rwlock2rw(c);
968 
969 	KASSERT(rw->rw_lock != RW_DESTROYED,
970 	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
971 	__rw_assert(c, RA_RLOCKED, file, line);
972 
973 	/*
974 	 * Attempt to switch from one reader to a writer.  If there
975 	 * are any write waiters, then we will have to lock the
976 	 * turnstile first to prevent races with another writer
977 	 * calling turnstile_wait() before we have claimed this
978 	 * turnstile.  So, do the simple case of no waiters first.
979 	 */
980 	tid = (uintptr_t)curthread;
981 	success = 0;
982 	for (;;) {
983 		v = rw->rw_lock;
984 		if (RW_READERS(v) > 1)
985 			break;
986 		if (!(v & RW_LOCK_WAITERS)) {
987 			success = atomic_cmpset_ptr(&rw->rw_lock, v, tid);
988 			if (!success)
989 				continue;
990 			break;
991 		}
992 
993 		/*
994 		 * Ok, we think we have waiters, so lock the turnstile.
995 		 */
996 		ts = turnstile_trywait(&rw->lock_object);
997 		v = rw->rw_lock;
998 		if (RW_READERS(v) > 1) {
999 			turnstile_cancel(ts);
1000 			break;
1001 		}
1002 		/*
1003 		 * Try to switch from one reader to a writer again.  This time
1004 		 * we honor the current state of the waiters flags.
1005 		 * If we obtain the lock with the flags set, then claim
1006 		 * ownership of the turnstile.
1007 		 */
1008 		x = rw->rw_lock & RW_LOCK_WAITERS;
1009 		success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
1010 		if (success) {
1011 			if (x)
1012 				turnstile_claim(ts);
1013 			else
1014 				turnstile_cancel(ts);
1015 			break;
1016 		}
1017 		turnstile_cancel(ts);
1018 	}
1019 	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
1020 	if (success) {
1021 		curthread->td_rw_rlocks--;
1022 		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
1023 		    file, line);
1024 		LOCKSTAT_RECORD0(LS_RW_TRYUPGRADE_UPGRADE, rw);
1025 	}
1026 	return (success);
1027 }
1028 
1029 /*
1030  * Downgrade a write lock into a single read lock.
1031  */
1032 void
1033 __rw_downgrade(volatile uintptr_t *c, const char *file, int line)
1034 {
1035 	struct rwlock *rw;
1036 	struct turnstile *ts;
1037 	uintptr_t tid, v;
1038 	int rwait, wwait;
1039 
1040 	if (SCHEDULER_STOPPED())
1041 		return;
1042 
1043 	rw = rwlock2rw(c);
1044 
1045 	KASSERT(rw->rw_lock != RW_DESTROYED,
1046 	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
1047 	__rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line);
1048 #ifndef INVARIANTS
1049 	if (rw_recursed(rw))
1050 		panic("downgrade of a recursed lock");
1051 #endif
1052 
1053 	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
1054 
1055 	/*
1056 	 * Convert from a writer to a single reader.  First we handle
1057 	 * the easy case with no waiters.  If there are any waiters, we
1058 	 * lock the turnstile and "disown" the lock.
1059 	 */
1060 	tid = (uintptr_t)curthread;
1061 	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
1062 		goto out;
1063 
1064 	/*
1065 	 * Ok, we think we have waiters, so lock the turnstile so we can
1066 	 * read the waiter flags without any races.
1067 	 */
1068 	turnstile_chain_lock(&rw->lock_object);
1069 	v = rw->rw_lock & RW_LOCK_WAITERS;
1070 	rwait = v & RW_LOCK_READ_WAITERS;
1071 	wwait = v & RW_LOCK_WRITE_WAITERS;
1072 	MPASS(rwait | wwait);
1073 
1074 	/*
1075 	 * Downgrade from a write lock while preserving waiters flag
1076 	 * and give up ownership of the turnstile.
1077 	 */
1078 	ts = turnstile_lookup(&rw->lock_object);
1079 	MPASS(ts != NULL);
1080 	if (!wwait)
1081 		v &= ~RW_LOCK_READ_WAITERS;
1082 	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
1083 	/*
1084 	 * Wake other readers if there are no writers pending.  Otherwise they
1085 	 * won't be able to acquire the lock anyway.
1086 	 */
1087 	if (rwait && !wwait) {
1088 		turnstile_broadcast(ts, TS_SHARED_QUEUE);
1089 		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1090 	} else
1091 		turnstile_disown(ts);
1092 	turnstile_chain_unlock(&rw->lock_object);
1093 out:
1094 	curthread->td_rw_rlocks++;
1095 	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
1096 	LOCKSTAT_RECORD0(LS_RW_DOWNGRADE_DOWNGRADE, rw);
1097 }
1098 
1099 #ifdef INVARIANT_SUPPORT
1100 #ifndef INVARIANTS
1101 #undef __rw_assert
1102 #endif
1103 
1104 /*
1105  * In the non-WITNESS case, rw_assert() can only detect that at least
1106  * *some* thread owns an rlock, but it cannot guarantee that *this*
1107  * thread owns an rlock.
1108  */
1109 void
1110 __rw_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1111 {
1112 	const struct rwlock *rw;
1113 
1114 	if (panicstr != NULL)
1115 		return;
1116 
1117 	rw = rwlock2rw(c);
1118 
1119 	switch (what) {
1120 	case RA_LOCKED:
1121 	case RA_LOCKED | RA_RECURSED:
1122 	case RA_LOCKED | RA_NOTRECURSED:
1123 	case RA_RLOCKED:
1124 	case RA_RLOCKED | RA_RECURSED:
1125 	case RA_RLOCKED | RA_NOTRECURSED:
1126 #ifdef WITNESS
1127 		witness_assert(&rw->lock_object, what, file, line);
1128 #else
1129 		/*
1130 		 * If some other thread has a write lock or we have one
1131 		 * and are asserting a read lock, fail.  Also, if no one
1132 		 * has a lock at all, fail.
1133 		 */
1134 		if (rw->rw_lock == RW_UNLOCKED ||
1135 		    (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED ||
1136 		    rw_wowner(rw) != curthread)))
1137 			panic("Lock %s not %slocked @ %s:%d\n",
1138 			    rw->lock_object.lo_name, (what & RA_RLOCKED) ?
1139 			    "read " : "", file, line);
1140 
1141 		if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) {
1142 			if (rw_recursed(rw)) {
1143 				if (what & RA_NOTRECURSED)
1144 					panic("Lock %s recursed @ %s:%d\n",
1145 					    rw->lock_object.lo_name, file,
1146 					    line);
1147 			} else if (what & RA_RECURSED)
1148 				panic("Lock %s not recursed @ %s:%d\n",
1149 				    rw->lock_object.lo_name, file, line);
1150 		}
1151 #endif
1152 		break;
1153 	case RA_WLOCKED:
1154 	case RA_WLOCKED | RA_RECURSED:
1155 	case RA_WLOCKED | RA_NOTRECURSED:
1156 		if (rw_wowner(rw) != curthread)
1157 			panic("Lock %s not exclusively locked @ %s:%d\n",
1158 			    rw->lock_object.lo_name, file, line);
1159 		if (rw_recursed(rw)) {
1160 			if (what & RA_NOTRECURSED)
1161 				panic("Lock %s recursed @ %s:%d\n",
1162 				    rw->lock_object.lo_name, file, line);
1163 		} else if (what & RA_RECURSED)
1164 			panic("Lock %s not recursed @ %s:%d\n",
1165 			    rw->lock_object.lo_name, file, line);
1166 		break;
1167 	case RA_UNLOCKED:
1168 #ifdef WITNESS
1169 		witness_assert(&rw->lock_object, what, file, line);
1170 #else
1171 		/*
1172 		 * If we hold a write lock fail.  We can't reliably check
1173 		 * to see if we hold a read lock or not.
1174 		 */
1175 		if (rw_wowner(rw) == curthread)
1176 			panic("Lock %s exclusively locked @ %s:%d\n",
1177 			    rw->lock_object.lo_name, file, line);
1178 #endif
1179 		break;
1180 	default:
1181 		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
1182 		    line);
1183 	}
1184 }
1185 #endif /* INVARIANT_SUPPORT */
1186 
1187 #ifdef DDB
1188 void
1189 db_show_rwlock(const struct lock_object *lock)
1190 {
1191 	const struct rwlock *rw;
1192 	struct thread *td;
1193 
1194 	rw = (const struct rwlock *)lock;
1195 
1196 	db_printf(" state: ");
1197 	if (rw->rw_lock == RW_UNLOCKED)
1198 		db_printf("UNLOCKED\n");
1199 	else if (rw->rw_lock == RW_DESTROYED) {
1200 		db_printf("DESTROYED\n");
1201 		return;
1202 	} else if (rw->rw_lock & RW_LOCK_READ)
1203 		db_printf("RLOCK: %ju locks\n",
1204 		    (uintmax_t)(RW_READERS(rw->rw_lock)));
1205 	else {
1206 		td = rw_wowner(rw);
1207 		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1208 		    td->td_tid, td->td_proc->p_pid, td->td_name);
1209 		if (rw_recursed(rw))
1210 			db_printf(" recursed: %u\n", rw->rw_recurse);
1211 	}
1212 	db_printf(" waiters: ");
1213 	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
1214 	case RW_LOCK_READ_WAITERS:
1215 		db_printf("readers\n");
1216 		break;
1217 	case RW_LOCK_WRITE_WAITERS:
1218 		db_printf("writers\n");
1219 		break;
1220 	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
1221 		db_printf("readers and writers\n");
1222 		break;
1223 	default:
1224 		db_printf("none\n");
1225 		break;
1226 	}
1227 }
1228 
1229 #endif
1230