Lines Matching +full:lock +full:- +full:state

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
34 * deterministic lock granting behavior, so that slocks and xlocks are
37 * Priority propagation will not generally raise the priority of lock holders,
50 #include <sys/lock.h>
73 PMC_SOFT_DECLARE( , , lock, failed);
102 while (_giantcnt--) \
109 * Returns true if an exclusive lock is recursed. It assumes
110 * curthread currently has an exclusive lock.
112 #define sx_recursed(sx) ((sx)->sx_recurse != 0)
114 static void assert_sx(const struct lock_object *lock, int what);
116 static void db_show_sx(const struct lock_object *lock);
118 static void lock_sx(struct lock_object *lock, uintptr_t how);
120 static int owner_sx(const struct lock_object *lock, struct thread **owner);
122 static uintptr_t unlock_sx(struct lock_object *lock);
175 assert_sx(const struct lock_object *lock, int what) in assert_sx() argument
178 sx_assert((const struct sx *)lock, what); in assert_sx()
182 lock_sx(struct lock_object *lock, uintptr_t how) in lock_sx() argument
186 sx = (struct sx *)lock; in lock_sx()
194 unlock_sx(struct lock_object *lock) in unlock_sx() argument
198 sx = (struct sx *)lock; in unlock_sx()
211 owner_sx(const struct lock_object *lock, struct thread **owner) in owner_sx() argument
216 sx = (const struct sx *)lock; in owner_sx()
217 x = sx->sx_lock; in owner_sx()
229 sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags); in sx_sysinit()
239 ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock, in sx_init_flags()
241 &sx->sx_lock)); in sx_init_flags()
257 lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags); in sx_init_flags()
258 sx->sx_lock = SX_LOCK_UNLOCKED; in sx_init_flags()
259 sx->sx_recurse = 0; in sx_init_flags()
266 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held")); in sx_destroy()
267 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed")); in sx_destroy()
268 sx->sx_lock = SX_LOCK_DESTROYED; in sx_destroy()
269 lock_destroy(&sx->lock_object); in sx_destroy()
284 x = sx->sx_lock; in sx_try_slock_int()
291 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) { in sx_try_slock_int()
292 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line); in sx_try_slock_int()
293 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line); in sx_try_slock_int()
297 curthread->td_sx_slocks++; in sx_try_slock_int()
302 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line); in sx_try_slock_int()
323 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, in _sx_xlock()
325 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, in _sx_xlock()
329 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) in _sx_xlock()
335 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, in _sx_xlock()
337 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); in _sx_xlock()
360 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, in sx_try_xlock_int()
367 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) in sx_try_xlock_int()
371 if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) { in sx_try_xlock_int()
372 sx->sx_recurse++; in sx_try_xlock_int()
373 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); in sx_try_xlock_int()
380 LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line); in sx_try_xlock_int()
382 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, in sx_try_xlock_int()
404 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, in _sx_xunlock()
407 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); in _sx_xunlock()
408 LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file, in _sx_xunlock()
419 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
420 * This will only succeed if this thread holds a single shared lock.
433 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, in sx_try_upgrade_int()
438 * Try to switch from one shared lock to an exclusive lock. We need in sx_try_upgrade_int()
440 * we will wake up the exclusive waiters when we drop the lock. in sx_try_upgrade_int()
448 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, in sx_try_upgrade_int()
454 LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line); in sx_try_upgrade_int()
456 curthread->td_sx_slocks--; in sx_try_upgrade_int()
457 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, in sx_try_upgrade_int()
472 * Downgrade an unrecursed exclusive lock into a single shared lock.
482 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, in sx_downgrade_int()
487 panic("downgrade of a recursed lock"); in sx_downgrade_int()
490 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line); in sx_downgrade_int()
493 * Try to switch from an exclusive lock with no shared waiters in sx_downgrade_int()
495 * exclusive waiters, we don't need to lock the sleep queue so in sx_downgrade_int()
497 * that fails we grab the sleepq lock to keep the flags from in sx_downgrade_int()
500 * We have to lock the sleep queue if there are shared waiters in sx_downgrade_int()
503 x = sx->sx_lock; in sx_downgrade_int()
505 atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) | in sx_downgrade_int()
510 * Lock the sleep queue so we can read the waiters bits in sx_downgrade_int()
513 sleepq_lock(&sx->lock_object); in sx_downgrade_int()
517 * shared lock. If there are any shared waiters, wake them up. in sx_downgrade_int()
519 x = sx->sx_lock; in sx_downgrade_int()
520 atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | in sx_downgrade_int()
523 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, in sx_downgrade_int()
525 sleepq_release(&sx->lock_object); in sx_downgrade_int()
528 curthread->td_sx_slocks++; in sx_downgrade_int()
529 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); in sx_downgrade_int()
550 (*extra_work)--; in sx_drop_critical()
558 * This function represents the so-called 'hard case' for sx_xlock
586 uintptr_t state = 0; in _sx_xlock_hard() local
598 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) in _sx_xlock_hard()
603 all_time -= lockstat_nsecs(&sx->lock_object); in _sx_xlock_hard()
605 state = x; in _sx_xlock_hard()
618 /* If we already hold an exclusive lock, then recurse. */ in _sx_xlock_hard()
620 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0, in _sx_xlock_hard()
621 ("_sx_xlock_hard: recursed on non-recursive sx %p @ %s:%d\n", in _sx_xlock_hard()
623 sx->sx_recurse++; in _sx_xlock_hard()
624 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); in _sx_xlock_hard()
625 if (LOCK_LOG_TEST(&sx->lock_object, 0)) in _sx_xlock_hard()
630 if (LOCK_LOG_TEST(&sx->lock_object, 0)) in _sx_xlock_hard()
631 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, in _sx_xlock_hard()
632 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); in _sx_xlock_hard()
641 PMC_SOFT_CALL( , , lock, failed); in _sx_xlock_hard()
643 lock_profile_obtain_lock_failed(&sx->lock_object, false, &contested, in _sx_xlock_hard()
650 THREAD_CONTENDS_ON_LOCK(&sx->lock_object); in _sx_xlock_hard()
654 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) in _sx_xlock_hard()
666 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) in _sx_xlock_hard()
672 * If the lock is write locked and the owner is in _sx_xlock_hard()
674 * running or the state of the lock changes. in _sx_xlock_hard()
682 if (LOCK_LOG_TEST(&sx->lock_object, 0)) in _sx_xlock_hard()
687 sx->lock_object.lo_name); in _sx_xlock_hard()
706 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, in _sx_xlock_hard()
710 extra_work--; in _sx_xlock_hard()
717 sx->lock_object.lo_name); in _sx_xlock_hard()
740 sleepq_lock(&sx->lock_object); in _sx_xlock_hard()
745 * If the lock was released while spinning on the in _sx_xlock_hard()
746 * sleep queue chain lock, try again. in _sx_xlock_hard()
749 sleepq_release(&sx->lock_object); in _sx_xlock_hard()
756 * The current lock owner might have started executing in _sx_xlock_hard()
757 * on another CPU (or the lock could have changed in _sx_xlock_hard()
759 * chain lock. If so, drop the sleep queue lock and try in _sx_xlock_hard()
765 sleepq_release(&sx->lock_object); in _sx_xlock_hard()
771 sleepq_release(&sx->lock_object); in _sx_xlock_hard()
778 * If an exclusive lock was released with both shared in _sx_xlock_hard()
780 * woken up and acquired the lock yet, sx_lock will be in _sx_xlock_hard()
790 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid | setx)) in _sx_xlock_hard()
792 sleepq_release(&sx->lock_object); in _sx_xlock_hard()
802 * a writer ready to grab the lock. Thus clear the bit since in _sx_xlock_hard()
810 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, in _sx_xlock_hard()
824 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, in _sx_xlock_hard()
828 if (LOCK_LOG_TEST(&sx->lock_object, 0)) in _sx_xlock_hard()
838 * lock and the exclusive waiters flag is set, we have in _sx_xlock_hard()
841 if (LOCK_LOG_TEST(&sx->lock_object, 0)) in _sx_xlock_hard()
846 sleep_time -= lockstat_nsecs(&sx->lock_object); in _sx_xlock_hard()
848 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, in _sx_xlock_hard()
854 * lock we are waiting for is set. in _sx_xlock_hard()
856 THREAD_CONTENTION_DONE(&sx->lock_object); in _sx_xlock_hard()
858 sleepq_wait(&sx->lock_object, 0); in _sx_xlock_hard()
860 error = sleepq_wait_sig(&sx->lock_object, 0); in _sx_xlock_hard()
861 THREAD_CONTENDS_ON_LOCK(&sx->lock_object); in _sx_xlock_hard()
863 sleep_time += lockstat_nsecs(&sx->lock_object); in _sx_xlock_hard()
867 if (LOCK_LOG_TEST(&sx->lock_object, 0)) in _sx_xlock_hard()
873 if (LOCK_LOG_TEST(&sx->lock_object, 0)) in _sx_xlock_hard()
878 THREAD_CONTENTION_DONE(&sx->lock_object); in _sx_xlock_hard()
891 all_time += lockstat_nsecs(&sx->lock_object); in _sx_xlock_hard()
894 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, in _sx_xlock_hard()
895 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); in _sx_xlock_hard()
897 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, in _sx_xlock_hard()
898 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, in _sx_xlock_hard()
899 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); in _sx_xlock_hard()
909 * This function represents the so-called 'hard case' for sx_xunlock
931 /* The lock is recursed, unrecurse one level. */ in _sx_xunlock_hard()
932 if ((--sx->sx_recurse) == 0) in _sx_xunlock_hard()
933 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED); in _sx_xunlock_hard()
934 if (LOCK_LOG_TEST(&sx->lock_object, 0)) in _sx_xunlock_hard()
941 atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) in _sx_xunlock_hard()
944 if (LOCK_LOG_TEST(&sx->lock_object, 0)) in _sx_xunlock_hard()
947 sleepq_lock(&sx->lock_object); in _sx_xunlock_hard()
955 * state of the exclusive waiters flag. in _sx_xunlock_hard()
963 sleepq_sleepcnt(&sx->lock_object, SQ_EXCLUSIVE_QUEUE) != 0) { in _sx_xunlock_hard()
967 atomic_store_rel_ptr(&sx->sx_lock, setx); in _sx_xunlock_hard()
970 if (LOCK_LOG_TEST(&sx->lock_object, 0)) in _sx_xunlock_hard()
975 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue); in _sx_xunlock_hard()
976 sleepq_release(&sx->lock_object); in _sx_xunlock_hard()
986 if (!fp && td->td_sx_slocks && (x & SX_LOCK_SHARED)) in __sx_can_read()
997 * If no other thread has an exclusive lock then try to bump up in __sx_slock_try()
998 * the count of sharers. Since we have to preserve the state in __sx_slock_try()
1000 * shared lock loop back and retry. in __sx_slock_try()
1003 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp, in __sx_slock_try()
1005 if (LOCK_LOG_TEST(&sx->lock_object, 0)) in __sx_slock_try()
1006 CTR4(KTR_LOCK, "%s: %p succeed %p -> %p", in __sx_slock_try()
1009 td->td_sx_slocks++; in __sx_slock_try()
1037 uintptr_t state = 0; in _sx_slock_hard() local
1048 all_time -= lockstat_nsecs(&sx->lock_object); in _sx_slock_hard()
1050 state = x; in _sx_slock_hard()
1066 PMC_SOFT_CALL( , , lock, failed); in _sx_slock_hard()
1068 lock_profile_obtain_lock_failed(&sx->lock_object, false, &contested, in _sx_slock_hard()
1075 THREAD_CONTENDS_ON_LOCK(&sx->lock_object); in _sx_slock_hard()
1094 * the owner stops running or the state of the lock in _sx_slock_hard()
1100 if (LOCK_LOG_TEST(&sx->lock_object, 0)) in _sx_slock_hard()
1106 "lockname:\"%s\"", sx->lock_object.lo_name); in _sx_slock_hard()
1126 sx->lock_object.lo_name); in _sx_slock_hard()
1151 * Some other thread already has an exclusive lock, so in _sx_slock_hard()
1154 sleepq_lock(&sx->lock_object); in _sx_slock_hard()
1159 sleepq_release(&sx->lock_object); in _sx_slock_hard()
1166 * the owner stops running or the state of the lock in _sx_slock_hard()
1172 sleepq_release(&sx->lock_object); in _sx_slock_hard()
1181 * fail to set it drop the sleep queue lock and loop in _sx_slock_hard()
1185 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, in _sx_slock_hard()
1188 if (LOCK_LOG_TEST(&sx->lock_object, 0)) in _sx_slock_hard()
1194 * Since we have been unable to acquire the shared lock, in _sx_slock_hard()
1197 if (LOCK_LOG_TEST(&sx->lock_object, 0)) in _sx_slock_hard()
1202 sleep_time -= lockstat_nsecs(&sx->lock_object); in _sx_slock_hard()
1204 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, in _sx_slock_hard()
1210 * lock we are waiting for is set. in _sx_slock_hard()
1212 THREAD_CONTENTION_DONE(&sx->lock_object); in _sx_slock_hard()
1214 sleepq_wait(&sx->lock_object, 0); in _sx_slock_hard()
1216 error = sleepq_wait_sig(&sx->lock_object, 0); in _sx_slock_hard()
1217 THREAD_CONTENDS_ON_LOCK(&sx->lock_object); in _sx_slock_hard()
1219 sleep_time += lockstat_nsecs(&sx->lock_object); in _sx_slock_hard()
1223 if (LOCK_LOG_TEST(&sx->lock_object, 0)) in _sx_slock_hard()
1229 if (LOCK_LOG_TEST(&sx->lock_object, 0)) in _sx_slock_hard()
1234 THREAD_CONTENTION_DONE(&sx->lock_object); in _sx_slock_hard()
1240 all_time += lockstat_nsecs(&sx->lock_object); in _sx_slock_hard()
1243 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, in _sx_slock_hard()
1244 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); in _sx_slock_hard()
1246 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, in _sx_slock_hard()
1247 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, in _sx_slock_hard()
1248 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); in _sx_slock_hard()
1270 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, in _sx_slock_int()
1272 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL); in _sx_slock_int()
1281 lock_profile_obtain_lock_success(&sx->lock_object, false, 0, 0, in _sx_slock_int()
1284 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line); in _sx_slock_int()
1285 WITNESS_LOCK(&sx->lock_object, 0, file, line); in _sx_slock_int()
1304 if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp, in _sx_sunlock_try()
1305 *xp - SX_ONE_SHARER)) { in _sx_sunlock_try()
1306 if (LOCK_LOG_TEST(&sx->lock_object, 0)) in _sx_sunlock_try()
1308 "%s: %p succeeded %p -> %p", in _sx_sunlock_try()
1310 (void *)(*xp - SX_ONE_SHARER)); in _sx_sunlock_try()
1311 td->td_sx_slocks--; in _sx_sunlock_try()
1333 sleepq_lock(&sx->lock_object); in _sx_sunlock_hard()
1342 * Note that the state of the lock could have changed, in _sx_sunlock_hard()
1352 if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx)) in _sx_sunlock_hard()
1354 if (LOCK_LOG_TEST(&sx->lock_object, 0)) in _sx_sunlock_hard()
1357 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue); in _sx_sunlock_hard()
1358 td->td_sx_slocks--; in _sx_sunlock_hard()
1361 sleepq_release(&sx->lock_object); in _sx_sunlock_hard()
1372 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, in _sx_sunlock_int()
1375 WITNESS_UNLOCK(&sx->lock_object, 0, file, line); in _sx_sunlock_int()
1376 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line); in _sx_sunlock_int()
1384 lock_profile_release_lock(&sx->lock_object, false); in _sx_sunlock_int()
1402 * In the non-WITNESS case, sx_assert() can only detect that at least
1427 witness_assert(&sx->lock_object, what, file, line); in _sx_assert()
1430 * If some other thread has an exclusive lock or we in _sx_assert()
1431 * have one and are asserting a shared lock, fail. in _sx_assert()
1432 * Also, if no one has a lock at all, fail. in _sx_assert()
1434 if (sx->sx_lock == SX_LOCK_UNLOCKED || in _sx_assert()
1435 (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked || in _sx_assert()
1437 panic("Lock %s not %slocked @ %s:%d\n", in _sx_assert()
1438 sx->lock_object.lo_name, slocked ? "share " : "", in _sx_assert()
1441 if (!(sx->sx_lock & SX_LOCK_SHARED)) { in _sx_assert()
1444 panic("Lock %s recursed @ %s:%d\n", in _sx_assert()
1445 sx->lock_object.lo_name, file, in _sx_assert()
1448 panic("Lock %s not recursed @ %s:%d\n", in _sx_assert()
1449 sx->lock_object.lo_name, file, line); in _sx_assert()
1457 panic("Lock %s not exclusively locked @ %s:%d\n", in _sx_assert()
1458 sx->lock_object.lo_name, file, line); in _sx_assert()
1461 panic("Lock %s recursed @ %s:%d\n", in _sx_assert()
1462 sx->lock_object.lo_name, file, line); in _sx_assert()
1464 panic("Lock %s not recursed @ %s:%d\n", in _sx_assert()
1465 sx->lock_object.lo_name, file, line); in _sx_assert()
1469 witness_assert(&sx->lock_object, what, file, line); in _sx_assert()
1472 * If we hold an exclusve lock fail. We can't in _sx_assert()
1473 * reliably check to see if we hold a shared lock or in _sx_assert()
1477 panic("Lock %s exclusively locked @ %s:%d\n", in _sx_assert()
1478 sx->lock_object.lo_name, file, line); in _sx_assert()
1482 panic("Unknown sx lock assertion: %d @ %s:%d", what, file, in _sx_assert()
1490 db_show_sx(const struct lock_object *lock) in db_show_sx() argument
1495 sx = (const struct sx *)lock; in db_show_sx()
1497 db_printf(" state: "); in db_show_sx()
1498 if (sx->sx_lock == SX_LOCK_UNLOCKED) in db_show_sx()
1500 else if (sx->sx_lock == SX_LOCK_DESTROYED) { in db_show_sx()
1503 } else if (sx->sx_lock & SX_LOCK_SHARED) in db_show_sx()
1504 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock)); in db_show_sx()
1508 td->td_tid, td->td_proc->p_pid, td->td_name); in db_show_sx()
1510 db_printf(" recursed: %d\n", sx->sx_recurse); in db_show_sx()
1514 switch(sx->sx_lock & in db_show_sx()
1532 * blocked on an sx lock. If so, output some details and return true.
1533 * If the lock has an exclusive owner, return that in *ownerp.
1541 * Check to see if this thread is blocked on an sx lock. in sx_chain()
1543 * purported lock should have the lock class index of sx, and the lock in sx_chain()
1546 sx = td->td_wchan; in sx_chain()
1547 if (!TD_ON_SLEEPQ(td) || sleepq_type(td->td_wchan) != SLEEPQ_SX || in sx_chain()
1548 LOCK_CLASS(&sx->lock_object) != &lock_class_sx || in sx_chain()
1549 sx->lock_object.lo_name != td->td_wmesg) in sx_chain()
1552 /* We think we have an sx lock, so output some details. */ in sx_chain()
1553 db_printf("blocked on lock %p (%s) \"%s\" ", &sx->lock_object, in sx_chain()
1554 lock_class_sx.lc_name, td->td_wmesg); in sx_chain()
1556 if (sx->sx_lock & SX_LOCK_SHARED) in sx_chain()
1558 (uintmax_t)SX_SHARERS(sx->sx_lock)); in sx_chain()