xref: /freebsd/sys/kern/kern_rwlock.c (revision f4b37ed0f8b307b1f3f0f630ca725d68f1dff30d)
1 /*-
2  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * Machine independent bits of reader/writer lock implementation.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_ddb.h"
35 #include "opt_hwpmc_hooks.h"
36 #include "opt_no_adaptive_rwlocks.h"
37 
38 #include <sys/param.h>
39 #include <sys/kdb.h>
40 #include <sys/ktr.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/rwlock.h>
46 #include <sys/sched.h>
47 #include <sys/sysctl.h>
48 #include <sys/systm.h>
49 #include <sys/turnstile.h>
50 
51 #include <machine/cpu.h>
52 
53 #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
54 #define	ADAPTIVE_RWLOCKS
55 #endif
56 
57 #ifdef HWPMC_HOOKS
58 #include <sys/pmckern.h>
59 PMC_SOFT_DECLARE( , , lock, failed);
60 #endif
61 
62 /*
63  * Return the rwlock address when the lock cookie address is provided.
64  * This functionality assumes that struct rwlock* have a member named rw_lock.
65  */
66 #define	rwlock2rw(c)	(__containerof(c, struct rwlock, rw_lock))
67 
68 #ifdef ADAPTIVE_RWLOCKS
69 static int rowner_retries = 10;
70 static int rowner_loops = 10000;
71 static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL,
72     "rwlock debugging");
73 SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
74 SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
75 #endif
76 
77 #ifdef DDB
78 #include <ddb/ddb.h>
79 
80 static void	db_show_rwlock(const struct lock_object *lock);
81 #endif
82 static void	assert_rw(const struct lock_object *lock, int what);
83 static void	lock_rw(struct lock_object *lock, uintptr_t how);
84 #ifdef KDTRACE_HOOKS
85 static int	owner_rw(const struct lock_object *lock, struct thread **owner);
86 #endif
87 static uintptr_t unlock_rw(struct lock_object *lock);
88 
89 struct lock_class lock_class_rw = {
90 	.lc_name = "rw",
91 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
92 	.lc_assert = assert_rw,
93 #ifdef DDB
94 	.lc_ddb_show = db_show_rwlock,
95 #endif
96 	.lc_lock = lock_rw,
97 	.lc_unlock = unlock_rw,
98 #ifdef KDTRACE_HOOKS
99 	.lc_owner = owner_rw,
100 #endif
101 };
102 
103 /*
104  * Return a pointer to the owning thread if the lock is write-locked or
105  * NULL if the lock is unlocked or read-locked.
106  */
107 #define	rw_wowner(rw)							\
108 	((rw)->rw_lock & RW_LOCK_READ ? NULL :				\
109 	    (struct thread *)RW_OWNER((rw)->rw_lock))
110 
111 /*
112  * Returns if a write owner is recursed.  Write ownership is not assured
113  * here and should be previously checked.
114  */
115 #define	rw_recursed(rw)		((rw)->rw_recurse != 0)
116 
117 /*
118  * Return true if curthread helds the lock.
119  */
120 #define	rw_wlocked(rw)		(rw_wowner((rw)) == curthread)
121 
122 /*
123  * Return a pointer to the owning thread for this lock who should receive
124  * any priority lent by threads that block on this lock.  Currently this
125  * is identical to rw_wowner().
126  */
127 #define	rw_owner(rw)		rw_wowner(rw)
128 
129 #ifndef INVARIANTS
130 #define	__rw_assert(c, what, file, line)
131 #endif
132 
133 void
134 assert_rw(const struct lock_object *lock, int what)
135 {
136 
137 	rw_assert((const struct rwlock *)lock, what);
138 }
139 
140 void
141 lock_rw(struct lock_object *lock, uintptr_t how)
142 {
143 	struct rwlock *rw;
144 
145 	rw = (struct rwlock *)lock;
146 	if (how)
147 		rw_rlock(rw);
148 	else
149 		rw_wlock(rw);
150 }
151 
152 uintptr_t
153 unlock_rw(struct lock_object *lock)
154 {
155 	struct rwlock *rw;
156 
157 	rw = (struct rwlock *)lock;
158 	rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
159 	if (rw->rw_lock & RW_LOCK_READ) {
160 		rw_runlock(rw);
161 		return (1);
162 	} else {
163 		rw_wunlock(rw);
164 		return (0);
165 	}
166 }
167 
168 #ifdef KDTRACE_HOOKS
169 int
170 owner_rw(const struct lock_object *lock, struct thread **owner)
171 {
172 	const struct rwlock *rw = (const struct rwlock *)lock;
173 	uintptr_t x = rw->rw_lock;
174 
175 	*owner = rw_wowner(rw);
176 	return ((x & RW_LOCK_READ) != 0 ?  (RW_READERS(x) != 0) :
177 	    (*owner != NULL));
178 }
179 #endif
180 
181 void
182 _rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
183 {
184 	struct rwlock *rw;
185 	int flags;
186 
187 	rw = rwlock2rw(c);
188 
189 	MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
190 	    RW_RECURSE | RW_NEW)) == 0);
191 	ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
192 	    ("%s: rw_lock not aligned for %s: %p", __func__, name,
193 	    &rw->rw_lock));
194 
195 	flags = LO_UPGRADABLE;
196 	if (opts & RW_DUPOK)
197 		flags |= LO_DUPOK;
198 	if (opts & RW_NOPROFILE)
199 		flags |= LO_NOPROFILE;
200 	if (!(opts & RW_NOWITNESS))
201 		flags |= LO_WITNESS;
202 	if (opts & RW_RECURSE)
203 		flags |= LO_RECURSABLE;
204 	if (opts & RW_QUIET)
205 		flags |= LO_QUIET;
206 	if (opts & RW_NEW)
207 		flags |= LO_NEW;
208 
209 	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
210 	rw->rw_lock = RW_UNLOCKED;
211 	rw->rw_recurse = 0;
212 }
213 
214 void
215 _rw_destroy(volatile uintptr_t *c)
216 {
217 	struct rwlock *rw;
218 
219 	rw = rwlock2rw(c);
220 
221 	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
222 	KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw));
223 	rw->rw_lock = RW_DESTROYED;
224 	lock_destroy(&rw->lock_object);
225 }
226 
227 void
228 rw_sysinit(void *arg)
229 {
230 	struct rw_args *args = arg;
231 
232 	rw_init((struct rwlock *)args->ra_rw, args->ra_desc);
233 }
234 
235 void
236 rw_sysinit_flags(void *arg)
237 {
238 	struct rw_args_flags *args = arg;
239 
240 	rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc,
241 	    args->ra_flags);
242 }
243 
244 int
245 _rw_wowned(const volatile uintptr_t *c)
246 {
247 
248 	return (rw_wowner(rwlock2rw(c)) == curthread);
249 }
250 
251 void
252 _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line)
253 {
254 	struct rwlock *rw;
255 
256 	if (SCHEDULER_STOPPED())
257 		return;
258 
259 	rw = rwlock2rw(c);
260 
261 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
262 	    ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
263 	    curthread, rw->lock_object.lo_name, file, line));
264 	KASSERT(rw->rw_lock != RW_DESTROYED,
265 	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
266 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
267 	    line, NULL);
268 	__rw_wlock(rw, curthread, file, line);
269 	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
270 	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
271 	curthread->td_locks++;
272 }
273 
274 int
275 __rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
276 {
277 	struct rwlock *rw;
278 	int rval;
279 
280 	if (SCHEDULER_STOPPED())
281 		return (1);
282 
283 	rw = rwlock2rw(c);
284 
285 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
286 	    ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
287 	    curthread, rw->lock_object.lo_name, file, line));
288 	KASSERT(rw->rw_lock != RW_DESTROYED,
289 	    ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
290 
291 	if (rw_wlocked(rw) &&
292 	    (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
293 		rw->rw_recurse++;
294 		rval = 1;
295 	} else
296 		rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
297 		    (uintptr_t)curthread);
298 
299 	LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
300 	if (rval) {
301 		WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
302 		    file, line);
303 		if (!rw_recursed(rw))
304 			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
305 			    rw, 0, 0, file, line, LOCKSTAT_WRITER);
306 		curthread->td_locks++;
307 	}
308 	return (rval);
309 }
310 
311 void
312 _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
313 {
314 	struct rwlock *rw;
315 
316 	if (SCHEDULER_STOPPED())
317 		return;
318 
319 	rw = rwlock2rw(c);
320 
321 	KASSERT(rw->rw_lock != RW_DESTROYED,
322 	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
323 	__rw_assert(c, RA_WLOCKED, file, line);
324 	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
325 	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
326 	    line);
327 	__rw_wunlock(rw, curthread, file, line);
328 	curthread->td_locks--;
329 }
330 /*
331  * Determines whether a new reader can acquire a lock.  Succeeds if the
332  * reader already owns a read lock and the lock is locked for read to
333  * prevent deadlock from reader recursion.  Also succeeds if the lock
334  * is unlocked and has no writer waiters or spinners.  Failing otherwise
335  * prioritizes writers before readers.
336  */
337 #define	RW_CAN_READ(_rw)						\
338     ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) &	\
339     (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) ==	\
340     RW_LOCK_READ)
341 
342 void
343 __rw_rlock(volatile uintptr_t *c, const char *file, int line)
344 {
345 	struct rwlock *rw;
346 	struct turnstile *ts;
347 #ifdef ADAPTIVE_RWLOCKS
348 	volatile struct thread *owner;
349 	int spintries = 0;
350 	int i;
351 #endif
352 #ifdef LOCK_PROFILING
353 	uint64_t waittime = 0;
354 	int contested = 0;
355 #endif
356 	uintptr_t v;
357 #ifdef KDTRACE_HOOKS
358 	uintptr_t state;
359 	uint64_t spin_cnt = 0;
360 	uint64_t sleep_cnt = 0;
361 	int64_t sleep_time = 0;
362 	int64_t all_time = 0;
363 #endif
364 
365 	if (SCHEDULER_STOPPED())
366 		return;
367 
368 	rw = rwlock2rw(c);
369 
370 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
371 	    ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
372 	    curthread, rw->lock_object.lo_name, file, line));
373 	KASSERT(rw->rw_lock != RW_DESTROYED,
374 	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
375 	KASSERT(rw_wowner(rw) != curthread,
376 	    ("rw_rlock: wlock already held for %s @ %s:%d",
377 	    rw->lock_object.lo_name, file, line));
378 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
379 
380 #ifdef KDTRACE_HOOKS
381 	all_time -= lockstat_nsecs(&rw->lock_object);
382 	state = rw->rw_lock;
383 #endif
384 	for (;;) {
385 		/*
386 		 * Handle the easy case.  If no other thread has a write
387 		 * lock, then try to bump up the count of read locks.  Note
388 		 * that we have to preserve the current state of the
389 		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
390 		 * read lock, then rw_lock must have changed, so restart
391 		 * the loop.  Note that this handles the case of a
392 		 * completely unlocked rwlock since such a lock is encoded
393 		 * as a read lock with no waiters.
394 		 */
395 		v = rw->rw_lock;
396 		if (RW_CAN_READ(v)) {
397 			/*
398 			 * The RW_LOCK_READ_WAITERS flag should only be set
399 			 * if the lock has been unlocked and write waiters
400 			 * were present.
401 			 */
402 			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v,
403 			    v + RW_ONE_READER)) {
404 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
405 					CTR4(KTR_LOCK,
406 					    "%s: %p succeed %p -> %p", __func__,
407 					    rw, (void *)v,
408 					    (void *)(v + RW_ONE_READER));
409 				break;
410 			}
411 			continue;
412 		}
413 #ifdef KDTRACE_HOOKS
414 		spin_cnt++;
415 #endif
416 #ifdef HWPMC_HOOKS
417 		PMC_SOFT_CALL( , , lock, failed);
418 #endif
419 		lock_profile_obtain_lock_failed(&rw->lock_object,
420 		    &contested, &waittime);
421 
422 #ifdef ADAPTIVE_RWLOCKS
423 		/*
424 		 * If the owner is running on another CPU, spin until
425 		 * the owner stops running or the state of the lock
426 		 * changes.
427 		 */
428 		if ((v & RW_LOCK_READ) == 0) {
429 			owner = (struct thread *)RW_OWNER(v);
430 			if (TD_IS_RUNNING(owner)) {
431 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
432 					CTR3(KTR_LOCK,
433 					    "%s: spinning on %p held by %p",
434 					    __func__, rw, owner);
435 				KTR_STATE1(KTR_SCHED, "thread",
436 				    sched_tdname(curthread), "spinning",
437 				    "lockname:\"%s\"", rw->lock_object.lo_name);
438 				while ((struct thread*)RW_OWNER(rw->rw_lock) ==
439 				    owner && TD_IS_RUNNING(owner)) {
440 					cpu_spinwait();
441 #ifdef KDTRACE_HOOKS
442 					spin_cnt++;
443 #endif
444 				}
445 				KTR_STATE0(KTR_SCHED, "thread",
446 				    sched_tdname(curthread), "running");
447 				continue;
448 			}
449 		} else if (spintries < rowner_retries) {
450 			spintries++;
451 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
452 			    "spinning", "lockname:\"%s\"",
453 			    rw->lock_object.lo_name);
454 			for (i = 0; i < rowner_loops; i++) {
455 				v = rw->rw_lock;
456 				if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
457 					break;
458 				cpu_spinwait();
459 			}
460 #ifdef KDTRACE_HOOKS
461 			spin_cnt += rowner_loops - i;
462 #endif
463 			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
464 			    "running");
465 			if (i != rowner_loops)
466 				continue;
467 		}
468 #endif
469 
470 		/*
471 		 * Okay, now it's the hard case.  Some other thread already
472 		 * has a write lock or there are write waiters present,
473 		 * acquire the turnstile lock so we can begin the process
474 		 * of blocking.
475 		 */
476 		ts = turnstile_trywait(&rw->lock_object);
477 
478 		/*
479 		 * The lock might have been released while we spun, so
480 		 * recheck its state and restart the loop if needed.
481 		 */
482 		v = rw->rw_lock;
483 		if (RW_CAN_READ(v)) {
484 			turnstile_cancel(ts);
485 			continue;
486 		}
487 
488 #ifdef ADAPTIVE_RWLOCKS
489 		/*
490 		 * The current lock owner might have started executing
491 		 * on another CPU (or the lock could have changed
492 		 * owners) while we were waiting on the turnstile
493 		 * chain lock.  If so, drop the turnstile lock and try
494 		 * again.
495 		 */
496 		if ((v & RW_LOCK_READ) == 0) {
497 			owner = (struct thread *)RW_OWNER(v);
498 			if (TD_IS_RUNNING(owner)) {
499 				turnstile_cancel(ts);
500 				continue;
501 			}
502 		}
503 #endif
504 
505 		/*
506 		 * The lock is held in write mode or it already has waiters.
507 		 */
508 		MPASS(!RW_CAN_READ(v));
509 
510 		/*
511 		 * If the RW_LOCK_READ_WAITERS flag is already set, then
512 		 * we can go ahead and block.  If it is not set then try
513 		 * to set it.  If we fail to set it drop the turnstile
514 		 * lock and restart the loop.
515 		 */
516 		if (!(v & RW_LOCK_READ_WAITERS)) {
517 			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
518 			    v | RW_LOCK_READ_WAITERS)) {
519 				turnstile_cancel(ts);
520 				continue;
521 			}
522 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
523 				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
524 				    __func__, rw);
525 		}
526 
527 		/*
528 		 * We were unable to acquire the lock and the read waiters
529 		 * flag is set, so we must block on the turnstile.
530 		 */
531 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
532 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
533 			    rw);
534 #ifdef KDTRACE_HOOKS
535 		sleep_time -= lockstat_nsecs(&rw->lock_object);
536 #endif
537 		turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
538 #ifdef KDTRACE_HOOKS
539 		sleep_time += lockstat_nsecs(&rw->lock_object);
540 		sleep_cnt++;
541 #endif
542 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
543 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
544 			    __func__, rw);
545 	}
546 #ifdef KDTRACE_HOOKS
547 	all_time += lockstat_nsecs(&rw->lock_object);
548 	if (sleep_time)
549 		LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
550 		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
551 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
552 
553 	/* Record only the loops spinning and not sleeping. */
554 	if (spin_cnt > sleep_cnt)
555 		LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
556 		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
557 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
558 #endif
559 	/*
560 	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
561 	 * however.  turnstiles don't like owners changing between calls to
562 	 * turnstile_wait() currently.
563 	 */
564 	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
565 	    waittime, file, line, LOCKSTAT_READER);
566 	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
567 	WITNESS_LOCK(&rw->lock_object, 0, file, line);
568 	curthread->td_locks++;
569 	curthread->td_rw_rlocks++;
570 }
571 
572 int
573 __rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
574 {
575 	struct rwlock *rw;
576 	uintptr_t x;
577 
578 	if (SCHEDULER_STOPPED())
579 		return (1);
580 
581 	rw = rwlock2rw(c);
582 
583 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
584 	    ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
585 	    curthread, rw->lock_object.lo_name, file, line));
586 
587 	for (;;) {
588 		x = rw->rw_lock;
589 		KASSERT(rw->rw_lock != RW_DESTROYED,
590 		    ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
591 		if (!(x & RW_LOCK_READ))
592 			break;
593 		if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
594 			LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
595 			    line);
596 			WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
597 			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
598 			    rw, 0, 0, file, line, LOCKSTAT_READER);
599 			curthread->td_locks++;
600 			curthread->td_rw_rlocks++;
601 			return (1);
602 		}
603 	}
604 
605 	LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
606 	return (0);
607 }
608 
609 void
610 _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
611 {
612 	struct rwlock *rw;
613 	struct turnstile *ts;
614 	uintptr_t x, v, queue;
615 
616 	if (SCHEDULER_STOPPED())
617 		return;
618 
619 	rw = rwlock2rw(c);
620 
621 	KASSERT(rw->rw_lock != RW_DESTROYED,
622 	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
623 	__rw_assert(c, RA_RLOCKED, file, line);
624 	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
625 	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
626 
627 	/* TODO: drop "owner of record" here. */
628 
629 	for (;;) {
630 		/*
631 		 * See if there is more than one read lock held.  If so,
632 		 * just drop one and return.
633 		 */
634 		x = rw->rw_lock;
635 		if (RW_READERS(x) > 1) {
636 			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
637 			    x - RW_ONE_READER)) {
638 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
639 					CTR4(KTR_LOCK,
640 					    "%s: %p succeeded %p -> %p",
641 					    __func__, rw, (void *)x,
642 					    (void *)(x - RW_ONE_READER));
643 				break;
644 			}
645 			continue;
646 		}
647 		/*
648 		 * If there aren't any waiters for a write lock, then try
649 		 * to drop it quickly.
650 		 */
651 		if (!(x & RW_LOCK_WAITERS)) {
652 			MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
653 			    RW_READERS_LOCK(1));
654 			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
655 			    RW_UNLOCKED)) {
656 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
657 					CTR2(KTR_LOCK, "%s: %p last succeeded",
658 					    __func__, rw);
659 				break;
660 			}
661 			continue;
662 		}
663 		/*
664 		 * Ok, we know we have waiters and we think we are the
665 		 * last reader, so grab the turnstile lock.
666 		 */
667 		turnstile_chain_lock(&rw->lock_object);
668 		v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
669 		MPASS(v & RW_LOCK_WAITERS);
670 
671 		/*
672 		 * Try to drop our lock leaving the lock in a unlocked
673 		 * state.
674 		 *
675 		 * If you wanted to do explicit lock handoff you'd have to
676 		 * do it here.  You'd also want to use turnstile_signal()
677 		 * and you'd have to handle the race where a higher
678 		 * priority thread blocks on the write lock before the
679 		 * thread you wakeup actually runs and have the new thread
680 		 * "steal" the lock.  For now it's a lot simpler to just
681 		 * wakeup all of the waiters.
682 		 *
683 		 * As above, if we fail, then another thread might have
684 		 * acquired a read lock, so drop the turnstile lock and
685 		 * restart.
686 		 */
687 		x = RW_UNLOCKED;
688 		if (v & RW_LOCK_WRITE_WAITERS) {
689 			queue = TS_EXCLUSIVE_QUEUE;
690 			x |= (v & RW_LOCK_READ_WAITERS);
691 		} else
692 			queue = TS_SHARED_QUEUE;
693 		if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
694 		    x)) {
695 			turnstile_chain_unlock(&rw->lock_object);
696 			continue;
697 		}
698 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
699 			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
700 			    __func__, rw);
701 
702 		/*
703 		 * Ok.  The lock is released and all that's left is to
704 		 * wake up the waiters.  Note that the lock might not be
705 		 * free anymore, but in that case the writers will just
706 		 * block again if they run before the new lock holder(s)
707 		 * release the lock.
708 		 */
709 		ts = turnstile_lookup(&rw->lock_object);
710 		MPASS(ts != NULL);
711 		turnstile_broadcast(ts, queue);
712 		turnstile_unpend(ts, TS_SHARED_LOCK);
713 		turnstile_chain_unlock(&rw->lock_object);
714 		break;
715 	}
716 	LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER);
717 	curthread->td_locks--;
718 	curthread->td_rw_rlocks--;
719 }
720 
721 /*
722  * This function is called when we are unable to obtain a write lock on the
723  * first try.  This means that at least one other thread holds either a
724  * read or write lock.
725  */
726 void
727 __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
728     int line)
729 {
730 	struct rwlock *rw;
731 	struct turnstile *ts;
732 #ifdef ADAPTIVE_RWLOCKS
733 	volatile struct thread *owner;
734 	int spintries = 0;
735 	int i;
736 #endif
737 	uintptr_t v, x;
738 #ifdef LOCK_PROFILING
739 	uint64_t waittime = 0;
740 	int contested = 0;
741 #endif
742 #ifdef KDTRACE_HOOKS
743 	uintptr_t state;
744 	uint64_t spin_cnt = 0;
745 	uint64_t sleep_cnt = 0;
746 	int64_t sleep_time = 0;
747 	int64_t all_time = 0;
748 #endif
749 
750 	if (SCHEDULER_STOPPED())
751 		return;
752 
753 	rw = rwlock2rw(c);
754 
755 	if (rw_wlocked(rw)) {
756 		KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
757 		    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
758 		    __func__, rw->lock_object.lo_name, file, line));
759 		rw->rw_recurse++;
760 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
761 			CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
762 		return;
763 	}
764 
765 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
766 		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
767 		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
768 
769 #ifdef KDTRACE_HOOKS
770 	all_time -= lockstat_nsecs(&rw->lock_object);
771 	state = rw->rw_lock;
772 #endif
773 	while (!_rw_write_lock(rw, tid)) {
774 #ifdef KDTRACE_HOOKS
775 		spin_cnt++;
776 #endif
777 #ifdef HWPMC_HOOKS
778 		PMC_SOFT_CALL( , , lock, failed);
779 #endif
780 		lock_profile_obtain_lock_failed(&rw->lock_object,
781 		    &contested, &waittime);
782 #ifdef ADAPTIVE_RWLOCKS
783 		/*
784 		 * If the lock is write locked and the owner is
785 		 * running on another CPU, spin until the owner stops
786 		 * running or the state of the lock changes.
787 		 */
788 		v = rw->rw_lock;
789 		owner = (struct thread *)RW_OWNER(v);
790 		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
791 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
792 				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
793 				    __func__, rw, owner);
794 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
795 			    "spinning", "lockname:\"%s\"",
796 			    rw->lock_object.lo_name);
797 			while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
798 			    TD_IS_RUNNING(owner)) {
799 				cpu_spinwait();
800 #ifdef KDTRACE_HOOKS
801 				spin_cnt++;
802 #endif
803 			}
804 			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
805 			    "running");
806 			continue;
807 		}
808 		if ((v & RW_LOCK_READ) && RW_READERS(v) &&
809 		    spintries < rowner_retries) {
810 			if (!(v & RW_LOCK_WRITE_SPINNER)) {
811 				if (!atomic_cmpset_ptr(&rw->rw_lock, v,
812 				    v | RW_LOCK_WRITE_SPINNER)) {
813 					continue;
814 				}
815 			}
816 			spintries++;
817 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
818 			    "spinning", "lockname:\"%s\"",
819 			    rw->lock_object.lo_name);
820 			for (i = 0; i < rowner_loops; i++) {
821 				if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
822 					break;
823 				cpu_spinwait();
824 			}
825 			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
826 			    "running");
827 #ifdef KDTRACE_HOOKS
828 			spin_cnt += rowner_loops - i;
829 #endif
830 			if (i != rowner_loops)
831 				continue;
832 		}
833 #endif
834 		ts = turnstile_trywait(&rw->lock_object);
835 		v = rw->rw_lock;
836 
837 #ifdef ADAPTIVE_RWLOCKS
838 		/*
839 		 * The current lock owner might have started executing
840 		 * on another CPU (or the lock could have changed
841 		 * owners) while we were waiting on the turnstile
842 		 * chain lock.  If so, drop the turnstile lock and try
843 		 * again.
844 		 */
845 		if (!(v & RW_LOCK_READ)) {
846 			owner = (struct thread *)RW_OWNER(v);
847 			if (TD_IS_RUNNING(owner)) {
848 				turnstile_cancel(ts);
849 				continue;
850 			}
851 		}
852 #endif
853 		/*
854 		 * Check for the waiters flags about this rwlock.
855 		 * If the lock was released, without maintain any pending
856 		 * waiters queue, simply try to acquire it.
857 		 * If a pending waiters queue is present, claim the lock
858 		 * ownership and maintain the pending queue.
859 		 */
860 		x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
861 		if ((v & ~x) == RW_UNLOCKED) {
862 			x &= ~RW_LOCK_WRITE_SPINNER;
863 			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) {
864 				if (x)
865 					turnstile_claim(ts);
866 				else
867 					turnstile_cancel(ts);
868 				break;
869 			}
870 			turnstile_cancel(ts);
871 			continue;
872 		}
873 		/*
874 		 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
875 		 * set it.  If we fail to set it, then loop back and try
876 		 * again.
877 		 */
878 		if (!(v & RW_LOCK_WRITE_WAITERS)) {
879 			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
880 			    v | RW_LOCK_WRITE_WAITERS)) {
881 				turnstile_cancel(ts);
882 				continue;
883 			}
884 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
885 				CTR2(KTR_LOCK, "%s: %p set write waiters flag",
886 				    __func__, rw);
887 		}
888 		/*
889 		 * We were unable to acquire the lock and the write waiters
890 		 * flag is set, so we must block on the turnstile.
891 		 */
892 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
893 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
894 			    rw);
895 #ifdef KDTRACE_HOOKS
896 		sleep_time -= lockstat_nsecs(&rw->lock_object);
897 #endif
898 		turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
899 #ifdef KDTRACE_HOOKS
900 		sleep_time += lockstat_nsecs(&rw->lock_object);
901 		sleep_cnt++;
902 #endif
903 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
904 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
905 			    __func__, rw);
906 #ifdef ADAPTIVE_RWLOCKS
907 		spintries = 0;
908 #endif
909 	}
910 #ifdef KDTRACE_HOOKS
911 	all_time += lockstat_nsecs(&rw->lock_object);
912 	if (sleep_time)
913 		LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
914 		    LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
915 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
916 
917 	/* Record only the loops spinning and not sleeping. */
918 	if (spin_cnt > sleep_cnt)
919 		LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
920 		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
921 		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
922 #endif
923 	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
924 	    waittime, file, line, LOCKSTAT_WRITER);
925 }
926 
927 /*
928  * This function is called if the first try at releasing a write lock failed.
929  * This means that one of the 2 waiter bits must be set indicating that at
930  * least one thread is waiting on this lock.
931  */
932 void
933 __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
934     int line)
935 {
936 	struct rwlock *rw;
937 	struct turnstile *ts;
938 	uintptr_t v;
939 	int queue;
940 
941 	if (SCHEDULER_STOPPED())
942 		return;
943 
944 	rw = rwlock2rw(c);
945 
946 	if (rw_wlocked(rw) && rw_recursed(rw)) {
947 		rw->rw_recurse--;
948 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
949 			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
950 		return;
951 	}
952 
953 	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
954 	    ("%s: neither of the waiter flags are set", __func__));
955 
956 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
957 		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
958 
959 	turnstile_chain_lock(&rw->lock_object);
960 	ts = turnstile_lookup(&rw->lock_object);
961 	MPASS(ts != NULL);
962 
963 	/*
964 	 * Use the same algo as sx locks for now.  Prefer waking up shared
965 	 * waiters if we have any over writers.  This is probably not ideal.
966 	 *
967 	 * 'v' is the value we are going to write back to rw_lock.  If we
968 	 * have waiters on both queues, we need to preserve the state of
969 	 * the waiter flag for the queue we don't wake up.  For now this is
970 	 * hardcoded for the algorithm mentioned above.
971 	 *
972 	 * In the case of both readers and writers waiting we wakeup the
973 	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
974 	 * new writer comes in before a reader it will claim the lock up
975 	 * above.  There is probably a potential priority inversion in
976 	 * there that could be worked around either by waking both queues
977 	 * of waiters or doing some complicated lock handoff gymnastics.
978 	 */
979 	v = RW_UNLOCKED;
980 	if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
981 		queue = TS_EXCLUSIVE_QUEUE;
982 		v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
983 	} else
984 		queue = TS_SHARED_QUEUE;
985 
986 	/* Wake up all waiters for the specific queue. */
987 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
988 		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
989 		    queue == TS_SHARED_QUEUE ? "read" : "write");
990 	turnstile_broadcast(ts, queue);
991 	atomic_store_rel_ptr(&rw->rw_lock, v);
992 	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
993 	turnstile_chain_unlock(&rw->lock_object);
994 }
995 
996 /*
997  * Attempt to do a non-blocking upgrade from a read lock to a write
998  * lock.  This will only succeed if this thread holds a single read
999  * lock.  Returns true if the upgrade succeeded and false otherwise.
1000  */
1001 int
1002 __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
1003 {
1004 	struct rwlock *rw;
1005 	uintptr_t v, x, tid;
1006 	struct turnstile *ts;
1007 	int success;
1008 
1009 	if (SCHEDULER_STOPPED())
1010 		return (1);
1011 
1012 	rw = rwlock2rw(c);
1013 
1014 	KASSERT(rw->rw_lock != RW_DESTROYED,
1015 	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
1016 	__rw_assert(c, RA_RLOCKED, file, line);
1017 
1018 	/*
1019 	 * Attempt to switch from one reader to a writer.  If there
1020 	 * are any write waiters, then we will have to lock the
1021 	 * turnstile first to prevent races with another writer
1022 	 * calling turnstile_wait() before we have claimed this
1023 	 * turnstile.  So, do the simple case of no waiters first.
1024 	 */
1025 	tid = (uintptr_t)curthread;
1026 	success = 0;
1027 	for (;;) {
1028 		v = rw->rw_lock;
1029 		if (RW_READERS(v) > 1)
1030 			break;
1031 		if (!(v & RW_LOCK_WAITERS)) {
1032 			success = atomic_cmpset_ptr(&rw->rw_lock, v, tid);
1033 			if (!success)
1034 				continue;
1035 			break;
1036 		}
1037 
1038 		/*
1039 		 * Ok, we think we have waiters, so lock the turnstile.
1040 		 */
1041 		ts = turnstile_trywait(&rw->lock_object);
1042 		v = rw->rw_lock;
1043 		if (RW_READERS(v) > 1) {
1044 			turnstile_cancel(ts);
1045 			break;
1046 		}
1047 		/*
1048 		 * Try to switch from one reader to a writer again.  This time
1049 		 * we honor the current state of the waiters flags.
1050 		 * If we obtain the lock with the flags set, then claim
1051 		 * ownership of the turnstile.
1052 		 */
1053 		x = rw->rw_lock & RW_LOCK_WAITERS;
1054 		success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
1055 		if (success) {
1056 			if (x)
1057 				turnstile_claim(ts);
1058 			else
1059 				turnstile_cancel(ts);
1060 			break;
1061 		}
1062 		turnstile_cancel(ts);
1063 	}
1064 	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
1065 	if (success) {
1066 		curthread->td_rw_rlocks--;
1067 		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
1068 		    file, line);
1069 		LOCKSTAT_RECORD0(rw__upgrade, rw);
1070 	}
1071 	return (success);
1072 }
1073 
1074 /*
1075  * Downgrade a write lock into a single read lock.
1076  */
1077 void
1078 __rw_downgrade(volatile uintptr_t *c, const char *file, int line)
1079 {
1080 	struct rwlock *rw;
1081 	struct turnstile *ts;
1082 	uintptr_t tid, v;
1083 	int rwait, wwait;
1084 
1085 	if (SCHEDULER_STOPPED())
1086 		return;
1087 
1088 	rw = rwlock2rw(c);
1089 
1090 	KASSERT(rw->rw_lock != RW_DESTROYED,
1091 	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
1092 	__rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line);
1093 #ifndef INVARIANTS
1094 	if (rw_recursed(rw))
1095 		panic("downgrade of a recursed lock");
1096 #endif
1097 
1098 	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
1099 
1100 	/*
1101 	 * Convert from a writer to a single reader.  First we handle
1102 	 * the easy case with no waiters.  If there are any waiters, we
1103 	 * lock the turnstile and "disown" the lock.
1104 	 */
1105 	tid = (uintptr_t)curthread;
1106 	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
1107 		goto out;
1108 
1109 	/*
1110 	 * Ok, we think we have waiters, so lock the turnstile so we can
1111 	 * read the waiter flags without any races.
1112 	 */
1113 	turnstile_chain_lock(&rw->lock_object);
1114 	v = rw->rw_lock & RW_LOCK_WAITERS;
1115 	rwait = v & RW_LOCK_READ_WAITERS;
1116 	wwait = v & RW_LOCK_WRITE_WAITERS;
1117 	MPASS(rwait | wwait);
1118 
1119 	/*
1120 	 * Downgrade from a write lock while preserving waiters flag
1121 	 * and give up ownership of the turnstile.
1122 	 */
1123 	ts = turnstile_lookup(&rw->lock_object);
1124 	MPASS(ts != NULL);
1125 	if (!wwait)
1126 		v &= ~RW_LOCK_READ_WAITERS;
1127 	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
1128 	/*
1129 	 * Wake other readers if there are no writers pending.  Otherwise they
1130 	 * won't be able to acquire the lock anyway.
1131 	 */
1132 	if (rwait && !wwait) {
1133 		turnstile_broadcast(ts, TS_SHARED_QUEUE);
1134 		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1135 	} else
1136 		turnstile_disown(ts);
1137 	turnstile_chain_unlock(&rw->lock_object);
1138 out:
1139 	curthread->td_rw_rlocks++;
1140 	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
1141 	LOCKSTAT_RECORD0(rw__downgrade, rw);
1142 }
1143 
1144 #ifdef INVARIANT_SUPPORT
1145 #ifndef INVARIANTS
1146 #undef __rw_assert
1147 #endif
1148 
1149 /*
1150  * In the non-WITNESS case, rw_assert() can only detect that at least
1151  * *some* thread owns an rlock, but it cannot guarantee that *this*
1152  * thread owns an rlock.
1153  */
1154 void
1155 __rw_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1156 {
1157 	const struct rwlock *rw;
1158 
1159 	if (panicstr != NULL)
1160 		return;
1161 
1162 	rw = rwlock2rw(c);
1163 
1164 	switch (what) {
1165 	case RA_LOCKED:
1166 	case RA_LOCKED | RA_RECURSED:
1167 	case RA_LOCKED | RA_NOTRECURSED:
1168 	case RA_RLOCKED:
1169 	case RA_RLOCKED | RA_RECURSED:
1170 	case RA_RLOCKED | RA_NOTRECURSED:
1171 #ifdef WITNESS
1172 		witness_assert(&rw->lock_object, what, file, line);
1173 #else
1174 		/*
1175 		 * If some other thread has a write lock or we have one
1176 		 * and are asserting a read lock, fail.  Also, if no one
1177 		 * has a lock at all, fail.
1178 		 */
1179 		if (rw->rw_lock == RW_UNLOCKED ||
1180 		    (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED ||
1181 		    rw_wowner(rw) != curthread)))
1182 			panic("Lock %s not %slocked @ %s:%d\n",
1183 			    rw->lock_object.lo_name, (what & RA_RLOCKED) ?
1184 			    "read " : "", file, line);
1185 
1186 		if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) {
1187 			if (rw_recursed(rw)) {
1188 				if (what & RA_NOTRECURSED)
1189 					panic("Lock %s recursed @ %s:%d\n",
1190 					    rw->lock_object.lo_name, file,
1191 					    line);
1192 			} else if (what & RA_RECURSED)
1193 				panic("Lock %s not recursed @ %s:%d\n",
1194 				    rw->lock_object.lo_name, file, line);
1195 		}
1196 #endif
1197 		break;
1198 	case RA_WLOCKED:
1199 	case RA_WLOCKED | RA_RECURSED:
1200 	case RA_WLOCKED | RA_NOTRECURSED:
1201 		if (rw_wowner(rw) != curthread)
1202 			panic("Lock %s not exclusively locked @ %s:%d\n",
1203 			    rw->lock_object.lo_name, file, line);
1204 		if (rw_recursed(rw)) {
1205 			if (what & RA_NOTRECURSED)
1206 				panic("Lock %s recursed @ %s:%d\n",
1207 				    rw->lock_object.lo_name, file, line);
1208 		} else if (what & RA_RECURSED)
1209 			panic("Lock %s not recursed @ %s:%d\n",
1210 			    rw->lock_object.lo_name, file, line);
1211 		break;
1212 	case RA_UNLOCKED:
1213 #ifdef WITNESS
1214 		witness_assert(&rw->lock_object, what, file, line);
1215 #else
1216 		/*
1217 		 * If we hold a write lock fail.  We can't reliably check
1218 		 * to see if we hold a read lock or not.
1219 		 */
1220 		if (rw_wowner(rw) == curthread)
1221 			panic("Lock %s exclusively locked @ %s:%d\n",
1222 			    rw->lock_object.lo_name, file, line);
1223 #endif
1224 		break;
1225 	default:
1226 		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
1227 		    line);
1228 	}
1229 }
1230 #endif /* INVARIANT_SUPPORT */
1231 
1232 #ifdef DDB
1233 void
1234 db_show_rwlock(const struct lock_object *lock)
1235 {
1236 	const struct rwlock *rw;
1237 	struct thread *td;
1238 
1239 	rw = (const struct rwlock *)lock;
1240 
1241 	db_printf(" state: ");
1242 	if (rw->rw_lock == RW_UNLOCKED)
1243 		db_printf("UNLOCKED\n");
1244 	else if (rw->rw_lock == RW_DESTROYED) {
1245 		db_printf("DESTROYED\n");
1246 		return;
1247 	} else if (rw->rw_lock & RW_LOCK_READ)
1248 		db_printf("RLOCK: %ju locks\n",
1249 		    (uintmax_t)(RW_READERS(rw->rw_lock)));
1250 	else {
1251 		td = rw_wowner(rw);
1252 		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1253 		    td->td_tid, td->td_proc->p_pid, td->td_name);
1254 		if (rw_recursed(rw))
1255 			db_printf(" recursed: %u\n", rw->rw_recurse);
1256 	}
1257 	db_printf(" waiters: ");
1258 	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
1259 	case RW_LOCK_READ_WAITERS:
1260 		db_printf("readers\n");
1261 		break;
1262 	case RW_LOCK_WRITE_WAITERS:
1263 		db_printf("writers\n");
1264 		break;
1265 	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
1266 		db_printf("readers and writers\n");
1267 		break;
1268 	default:
1269 		db_printf("none\n");
1270 		break;
1271 	}
1272 }
1273 
1274 #endif
1275