Lines Matching refs:rw

138 #define	rw_wowner(rw)	lv_rw_wowner(RW_READ_VALUE(rw))  argument
144 #define rw_recursed(rw) ((rw)->rw_recurse != 0) argument
149 #define rw_wlocked(rw) (rw_wowner((rw)) == curthread) argument
156 #define rw_owner(rw) rw_wowner(rw) argument
172 struct rwlock *rw; in lock_rw() local
174 rw = (struct rwlock *)lock; in lock_rw()
176 rw_rlock(rw); in lock_rw()
178 rw_wlock(rw); in lock_rw()
184 struct rwlock *rw; in trylock_rw() local
186 rw = (struct rwlock *)lock; in trylock_rw()
188 return (rw_try_rlock(rw)); in trylock_rw()
190 return (rw_try_wlock(rw)); in trylock_rw()
196 struct rwlock *rw; in unlock_rw() local
198 rw = (struct rwlock *)lock; in unlock_rw()
199 rw_assert(rw, RA_LOCKED | LA_NOTRECURSED); in unlock_rw()
200 if (rw->rw_lock & RW_LOCK_READ) { in unlock_rw()
201 rw_runlock(rw); in unlock_rw()
204 rw_wunlock(rw); in unlock_rw()
213 const struct rwlock *rw = (const struct rwlock *)lock; in owner_rw() local
214 uintptr_t x = rw->rw_lock; in owner_rw()
216 *owner = rw_wowner(rw); in owner_rw()
225 struct rwlock *rw; in _rw_init_flags() local
228 rw = rwlock2rw(c); in _rw_init_flags()
232 ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock, in _rw_init_flags()
234 &rw->rw_lock)); in _rw_init_flags()
250 lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags); in _rw_init_flags()
251 rw->rw_lock = RW_UNLOCKED; in _rw_init_flags()
252 rw->rw_recurse = 0; in _rw_init_flags()
258 struct rwlock *rw; in _rw_destroy() local
260 rw = rwlock2rw(c); in _rw_destroy()
262 KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw)); in _rw_destroy()
263 KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw)); in _rw_destroy()
264 rw->rw_lock = RW_DESTROYED; in _rw_destroy()
265 lock_destroy(&rw->lock_object); in _rw_destroy()
288 struct rwlock *rw; in _rw_wlock_cookie() local
291 rw = rwlock2rw(c); in _rw_wlock_cookie()
296 curthread, rw->lock_object.lo_name, file, line)); in _rw_wlock_cookie()
297 KASSERT(rw->rw_lock != RW_DESTROYED, in _rw_wlock_cookie()
299 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, in _rw_wlock_cookie()
303 if (!_rw_write_lock_fetch(rw, &v, tid)) in _rw_wlock_cookie()
304 _rw_wlock_hard(rw, v, file, line); in _rw_wlock_cookie()
306 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, in _rw_wlock_cookie()
309 LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line); in _rw_wlock_cookie()
310 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); in _rw_wlock_cookie()
315 __rw_try_wlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) in __rw_try_wlock_int()
329 curthread, rw->lock_object.lo_name, file, line)); in __rw_try_wlock_int()
330 KASSERT(rw->rw_lock != RW_DESTROYED, in __rw_try_wlock_int()
337 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid)) in __rw_try_wlock_int()
341 if (v == tid && (rw->lock_object.lo_flags & LO_RECURSABLE)) { in __rw_try_wlock_int()
342 rw->rw_recurse++; in __rw_try_wlock_int()
343 atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); in __rw_try_wlock_int()
350 LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line); in __rw_try_wlock_int()
352 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, in __rw_try_wlock_int()
356 rw, 0, 0, file, line, LOCKSTAT_WRITER); in __rw_try_wlock_int()
365 struct rwlock *rw; in __rw_try_wlock() local
367 rw = rwlock2rw(c); in __rw_try_wlock()
368 return (__rw_try_wlock_int(rw LOCK_FILE_LINE_ARG)); in __rw_try_wlock()
374 struct rwlock *rw; in _rw_wunlock_cookie() local
376 rw = rwlock2rw(c); in _rw_wunlock_cookie()
378 KASSERT(rw->rw_lock != RW_DESTROYED, in _rw_wunlock_cookie()
381 WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); in _rw_wunlock_cookie()
382 LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file, in _rw_wunlock_cookie()
386 _rw_wunlock_hard(rw, (uintptr_t)curthread, file, line); in _rw_wunlock_cookie()
388 __rw_wunlock(rw, curthread, file, line); in _rw_wunlock_cookie()
414 __rw_rlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp, bool fp in __rw_rlock_try() argument
429 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, vp, in __rw_rlock_try()
431 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_rlock_try()
434 rw, (void *)*vp, in __rw_rlock_try()
444 __rw_rlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v in __rw_rlock_hard() argument
472 if (__rw_rlock_try(rw, td, &v, false LOCK_FILE_LINE_ARG)) in __rw_rlock_hard()
475 all_time -= lockstat_nsecs(&rw->lock_object); in __rw_rlock_hard()
495 lock_profile_obtain_lock_failed(&rw->lock_object, false, in __rw_rlock_hard()
498 THREAD_CONTENDS_ON_LOCK(&rw->lock_object); in __rw_rlock_hard()
501 if (__rw_rlock_try(rw, td, &v, false LOCK_FILE_LINE_ARG)) in __rw_rlock_hard()
516 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_rlock_hard()
519 __func__, rw, owner); in __rw_rlock_hard()
522 "lockname:\"%s\"", rw->lock_object.lo_name); in __rw_rlock_hard()
525 v = RW_READ_VALUE(rw); in __rw_rlock_hard()
536 v = RW_READ_VALUE(rw); in __rw_rlock_hard()
543 rw->lock_object.lo_name); in __rw_rlock_hard()
547 v = RW_READ_VALUE(rw); in __rw_rlock_hard()
573 ts = turnstile_trywait(&rw->lock_object); in __rw_rlock_hard()
579 v = RW_READ_VALUE(rw); in __rw_rlock_hard()
617 if (!atomic_fcmpset_ptr(&rw->rw_lock, &v, in __rw_rlock_hard()
620 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_rlock_hard()
622 __func__, rw); in __rw_rlock_hard()
629 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_rlock_hard()
631 rw); in __rw_rlock_hard()
633 sleep_time -= lockstat_nsecs(&rw->lock_object); in __rw_rlock_hard()
635 MPASS(owner == rw_owner(rw)); in __rw_rlock_hard()
638 sleep_time += lockstat_nsecs(&rw->lock_object); in __rw_rlock_hard()
641 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_rlock_hard()
643 __func__, rw); in __rw_rlock_hard()
644 v = RW_READ_VALUE(rw); in __rw_rlock_hard()
646 THREAD_CONTENTION_DONE(&rw->lock_object); in __rw_rlock_hard()
652 all_time += lockstat_nsecs(&rw->lock_object); in __rw_rlock_hard()
654 LOCKSTAT_RECORD4(rw__block, rw, sleep_time, in __rw_rlock_hard()
660 LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time, in __rw_rlock_hard()
670 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested, in __rw_rlock_hard()
675 __rw_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) in __rw_rlock_int()
685 td, rw->lock_object.lo_name, file, line)); in __rw_rlock_int()
686 KASSERT(rw->rw_lock != RW_DESTROYED, in __rw_rlock_int()
688 KASSERT(rw_wowner(rw) != td, in __rw_rlock_int()
690 rw->lock_object.lo_name, file, line)); in __rw_rlock_int()
691 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL); in __rw_rlock_int()
693 v = RW_READ_VALUE(rw); in __rw_rlock_int()
695 !__rw_rlock_try(rw, td, &v, true LOCK_FILE_LINE_ARG))) in __rw_rlock_int()
696 __rw_rlock_hard(rw, td, v LOCK_FILE_LINE_ARG); in __rw_rlock_int()
698 lock_profile_obtain_lock_success(&rw->lock_object, false, 0, 0, in __rw_rlock_int()
701 LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line); in __rw_rlock_int()
702 WITNESS_LOCK(&rw->lock_object, 0, file, line); in __rw_rlock_int()
709 struct rwlock *rw; in __rw_rlock() local
711 rw = rwlock2rw(c); in __rw_rlock()
712 __rw_rlock_int(rw LOCK_FILE_LINE_ARG); in __rw_rlock()
716 __rw_try_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) in __rw_try_rlock_int()
725 curthread, rw->lock_object.lo_name, file, line)); in __rw_try_rlock_int()
727 x = rw->rw_lock; in __rw_try_rlock_int()
729 KASSERT(rw->rw_lock != RW_DESTROYED, in __rw_try_rlock_int()
733 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &x, x + RW_ONE_READER)) { in __rw_try_rlock_int()
734 LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file, in __rw_try_rlock_int()
736 WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line); in __rw_try_rlock_int()
738 rw, 0, 0, file, line, LOCKSTAT_READER); in __rw_try_rlock_int()
745 LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line); in __rw_try_rlock_int()
752 struct rwlock *rw; in __rw_try_rlock() local
754 rw = rwlock2rw(c); in __rw_try_rlock()
755 return (__rw_try_rlock_int(rw LOCK_FILE_LINE_ARG)); in __rw_try_rlock()
759 __rw_runlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp) in __rw_runlock_try() argument
764 if (atomic_fcmpset_rel_ptr(&rw->rw_lock, vp, in __rw_runlock_try()
766 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_runlock_try()
769 __func__, rw, (void *)*vp, in __rw_runlock_try()
782 __rw_runlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v in __rw_runlock_hard() argument
792 if (__rw_runlock_try(rw, td, &v)) in __rw_runlock_hard()
799 turnstile_chain_lock(&rw->lock_object); in __rw_runlock_hard()
800 v = RW_READ_VALUE(rw); in __rw_runlock_hard()
802 if (__rw_runlock_try(rw, td, &v)) in __rw_runlock_hard()
830 if (!atomic_fcmpset_rel_ptr(&rw->rw_lock, &v, setv)) in __rw_runlock_hard()
832 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_runlock_hard()
834 __func__, rw); in __rw_runlock_hard()
843 ts = turnstile_lookup(&rw->lock_object); in __rw_runlock_hard()
846 rw, (void *)passedv, (void *)v); in __rw_runlock_hard()
853 turnstile_chain_unlock(&rw->lock_object); in __rw_runlock_hard()
855 LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER); in __rw_runlock_hard()
859 _rw_runlock_cookie_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) in _rw_runlock_cookie_int()
864 KASSERT(rw->rw_lock != RW_DESTROYED, in _rw_runlock_cookie_int()
866 __rw_assert(&rw->rw_lock, RA_RLOCKED, file, line); in _rw_runlock_cookie_int()
867 WITNESS_UNLOCK(&rw->lock_object, 0, file, line); in _rw_runlock_cookie_int()
868 LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line); in _rw_runlock_cookie_int()
871 v = RW_READ_VALUE(rw); in _rw_runlock_cookie_int()
874 !__rw_runlock_try(rw, td, &v))) in _rw_runlock_cookie_int()
875 __rw_runlock_hard(rw, td, v LOCK_FILE_LINE_ARG); in _rw_runlock_cookie_int()
877 lock_profile_release_lock(&rw->lock_object, false); in _rw_runlock_cookie_int()
885 struct rwlock *rw; in _rw_runlock_cookie() local
887 rw = rwlock2rw(c); in _rw_runlock_cookie()
888 _rw_runlock_cookie_int(rw LOCK_FILE_LINE_ARG); in _rw_runlock_cookie()
917 struct rwlock *rw; in __rw_wlock_hard() local
946 rw = rwlock2rw(c); in __rw_wlock_hard()
951 if (_rw_write_lock_fetch(rw, &v, tid)) in __rw_wlock_hard()
956 all_time -= lockstat_nsecs(&rw->lock_object); in __rw_wlock_hard()
969 v = RW_READ_VALUE(rw); in __rw_wlock_hard()
972 KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE, in __rw_wlock_hard()
974 __func__, rw->lock_object.lo_name, file, line)); in __rw_wlock_hard()
975 rw->rw_recurse++; in __rw_wlock_hard()
976 atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); in __rw_wlock_hard()
977 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_wlock_hard()
978 CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw); in __rw_wlock_hard()
982 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_wlock_hard()
984 rw->lock_object.lo_name, (void *)rw->rw_lock, file, line); in __rw_wlock_hard()
995 lock_profile_obtain_lock_failed(&rw->lock_object, false, in __rw_wlock_hard()
998 THREAD_CONTENDS_ON_LOCK(&rw->lock_object); in __rw_wlock_hard()
1002 if (_rw_write_lock_fetch(rw, &v, tid)) in __rw_wlock_hard()
1012 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid)) in __rw_wlock_hard()
1028 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_wlock_hard()
1030 __func__, rw, owner); in __rw_wlock_hard()
1033 rw->lock_object.lo_name); in __rw_wlock_hard()
1036 v = RW_READ_VALUE(rw); in __rw_wlock_hard()
1052 if (!atomic_fcmpset_ptr(&rw->rw_lock, &v, in __rw_wlock_hard()
1063 rw->lock_object.lo_name); in __rw_wlock_hard()
1067 v = RW_READ_VALUE(rw); in __rw_wlock_hard()
1086 ts = turnstile_trywait(&rw->lock_object); in __rw_wlock_hard()
1087 v = RW_READ_VALUE(rw); in __rw_wlock_hard()
1121 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid | setv)) { in __rw_wlock_hard()
1137 if (!atomic_fcmpset_ptr(&rw->rw_lock, &v, setv)) in __rw_wlock_hard()
1151 if (!atomic_fcmpset_ptr(&rw->rw_lock, &v, in __rw_wlock_hard()
1154 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_wlock_hard()
1156 __func__, rw); in __rw_wlock_hard()
1165 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_wlock_hard()
1167 rw); in __rw_wlock_hard()
1169 sleep_time -= lockstat_nsecs(&rw->lock_object); in __rw_wlock_hard()
1171 MPASS(owner == rw_owner(rw)); in __rw_wlock_hard()
1174 sleep_time += lockstat_nsecs(&rw->lock_object); in __rw_wlock_hard()
1177 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_wlock_hard()
1179 __func__, rw); in __rw_wlock_hard()
1183 v = RW_READ_VALUE(rw); in __rw_wlock_hard()
1185 THREAD_CONTENTION_DONE(&rw->lock_object); in __rw_wlock_hard()
1197 all_time += lockstat_nsecs(&rw->lock_object); in __rw_wlock_hard()
1199 LOCKSTAT_RECORD4(rw__block, rw, sleep_time, in __rw_wlock_hard()
1205 LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time, in __rw_wlock_hard()
1210 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested, in __rw_wlock_hard()
1223 struct rwlock *rw; in __rw_wunlock_hard() local
1232 rw = rwlock2rw(c); in __rw_wunlock_hard()
1234 v = RW_READ_VALUE(rw); in __rw_wunlock_hard()
1237 if (--(rw->rw_recurse) == 0) in __rw_wunlock_hard()
1238 atomic_clear_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); in __rw_wunlock_hard()
1239 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_wunlock_hard()
1240 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw); in __rw_wunlock_hard()
1244 LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_WRITER); in __rw_wunlock_hard()
1245 if (v == tid && _rw_write_unlock(rw, tid)) in __rw_wunlock_hard()
1248 KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS), in __rw_wunlock_hard()
1251 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_wunlock_hard()
1252 CTR2(KTR_LOCK, "%s: %p contested", __func__, rw); in __rw_wunlock_hard()
1254 turnstile_chain_lock(&rw->lock_object); in __rw_wunlock_hard()
1274 v = RW_READ_VALUE(rw); in __rw_wunlock_hard()
1280 atomic_store_rel_ptr(&rw->rw_lock, setv); in __rw_wunlock_hard()
1283 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_wunlock_hard()
1284 CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw, in __rw_wunlock_hard()
1287 ts = turnstile_lookup(&rw->lock_object); in __rw_wunlock_hard()
1289 panic("got NULL turnstile on rwlock %p passedv %p v %p", rw, in __rw_wunlock_hard()
1294 turnstile_chain_unlock(&rw->lock_object); in __rw_wunlock_hard()
1303 __rw_try_upgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) in __rw_try_upgrade_int()
1312 KASSERT(rw->rw_lock != RW_DESTROYED, in __rw_try_upgrade_int()
1314 __rw_assert(&rw->rw_lock, RA_RLOCKED, file, line); in __rw_try_upgrade_int()
1325 v = RW_READ_VALUE(rw); in __rw_try_upgrade_int()
1330 success = atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid); in __rw_try_upgrade_int()
1339 ts = turnstile_trywait(&rw->lock_object); in __rw_try_upgrade_int()
1340 v = RW_READ_VALUE(rw); in __rw_try_upgrade_int()
1353 success = atomic_fcmpset_ptr(&rw->rw_lock, &v, setv); in __rw_try_upgrade_int()
1363 LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line); in __rw_try_upgrade_int()
1366 WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, in __rw_try_upgrade_int()
1368 LOCKSTAT_RECORD0(rw__upgrade, rw); in __rw_try_upgrade_int()
1376 struct rwlock *rw; in __rw_try_upgrade() local
1378 rw = rwlock2rw(c); in __rw_try_upgrade()
1379 return (__rw_try_upgrade_int(rw LOCK_FILE_LINE_ARG)); in __rw_try_upgrade()
1386 __rw_downgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) in __rw_downgrade_int()
1395 KASSERT(rw->rw_lock != RW_DESTROYED, in __rw_downgrade_int()
1397 __rw_assert(&rw->rw_lock, RA_WLOCKED | RA_NOTRECURSED, file, line); in __rw_downgrade_int()
1399 if (rw_recursed(rw)) in __rw_downgrade_int()
1403 WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line); in __rw_downgrade_int()
1411 if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1))) in __rw_downgrade_int()
1418 turnstile_chain_lock(&rw->lock_object); in __rw_downgrade_int()
1419 v = rw->rw_lock & RW_LOCK_WAITERS; in __rw_downgrade_int()
1428 ts = turnstile_lookup(&rw->lock_object); in __rw_downgrade_int()
1432 atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v); in __rw_downgrade_int()
1442 turnstile_chain_unlock(&rw->lock_object); in __rw_downgrade_int()
1445 LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line); in __rw_downgrade_int()
1446 LOCKSTAT_RECORD0(rw__downgrade, rw); in __rw_downgrade_int()
1452 struct rwlock *rw; in __rw_downgrade() local
1454 rw = rwlock2rw(c); in __rw_downgrade()
1455 __rw_downgrade_int(rw LOCK_FILE_LINE_ARG); in __rw_downgrade()
1471 const struct rwlock *rw; in __rw_assert() local
1476 rw = rwlock2rw(c); in __rw_assert()
1486 witness_assert(&rw->lock_object, what, file, line); in __rw_assert()
1493 if (rw->rw_lock == RW_UNLOCKED || in __rw_assert()
1494 (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED || in __rw_assert()
1495 rw_wowner(rw) != curthread))) in __rw_assert()
1497 rw->lock_object.lo_name, (what & RA_RLOCKED) ? in __rw_assert()
1500 if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) { in __rw_assert()
1501 if (rw_recursed(rw)) { in __rw_assert()
1504 rw->lock_object.lo_name, file, in __rw_assert()
1508 rw->lock_object.lo_name, file, line); in __rw_assert()
1515 if (rw_wowner(rw) != curthread) in __rw_assert()
1517 rw->lock_object.lo_name, file, line); in __rw_assert()
1518 if (rw_recursed(rw)) { in __rw_assert()
1521 rw->lock_object.lo_name, file, line); in __rw_assert()
1524 rw->lock_object.lo_name, file, line); in __rw_assert()
1528 witness_assert(&rw->lock_object, what, file, line); in __rw_assert()
1534 if (rw_wowner(rw) == curthread) in __rw_assert()
1536 rw->lock_object.lo_name, file, line); in __rw_assert()
1550 const struct rwlock *rw; in db_show_rwlock() local
1553 rw = (const struct rwlock *)lock; in db_show_rwlock()
1556 if (rw->rw_lock == RW_UNLOCKED) in db_show_rwlock()
1558 else if (rw->rw_lock == RW_DESTROYED) { in db_show_rwlock()
1561 } else if (rw->rw_lock & RW_LOCK_READ) in db_show_rwlock()
1563 (uintmax_t)(RW_READERS(rw->rw_lock))); in db_show_rwlock()
1565 td = rw_wowner(rw); in db_show_rwlock()
1568 if (rw_recursed(rw)) in db_show_rwlock()
1569 db_printf(" recursed: %u\n", rw->rw_recurse); in db_show_rwlock()
1572 switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) { in db_show_rwlock()