Lines Matching +full:0 +full:m
93 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) argument
95 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED) argument
152 0, "");
154 0, "");
170 &mtx_spin_delay.base, 0, "");
172 &mtx_spin_delay.max, 0, "");
240 struct mtx *m; in unlock_mtx() local
242 m = (struct mtx *)lock; in unlock_mtx()
243 mtx_assert(m, MA_OWNED | MA_NOTRECURSED); in unlock_mtx()
244 mtx_unlock(m); in unlock_mtx()
245 return (0); in unlock_mtx()
251 struct mtx *m; in unlock_spin() local
253 m = (struct mtx *)lock; in unlock_spin()
254 mtx_assert(m, MA_OWNED | MA_NOTRECURSED); in unlock_spin()
255 mtx_unlock_spin(m); in unlock_spin()
256 return (0); in unlock_spin()
263 const struct mtx *m; in owner_mtx() local
266 m = (const struct mtx *)lock; in owner_mtx()
267 x = m->mtx_lock; in owner_mtx()
280 struct mtx *m; in __mtx_lock_flags() local
283 m = mtxlock2mtx(c); in __mtx_lock_flags()
285 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || in __mtx_lock_flags()
288 curthread, m->lock_object.lo_name, file, line)); in __mtx_lock_flags()
289 KASSERT(m->mtx_lock != MTX_DESTROYED, in __mtx_lock_flags()
291 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, in __mtx_lock_flags()
292 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, in __mtx_lock_flags()
294 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) | in __mtx_lock_flags()
299 if (!_mtx_obtain_lock_fetch(m, &v, tid)) in __mtx_lock_flags()
300 _mtx_lock_sleep(m, v, opts, file, line); in __mtx_lock_flags()
303 m, 0, 0, file, line); in __mtx_lock_flags()
304 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, in __mtx_lock_flags()
306 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE, in __mtx_lock_flags()
314 struct mtx *m; in __mtx_unlock_flags() local
316 m = mtxlock2mtx(c); in __mtx_unlock_flags()
318 KASSERT(m->mtx_lock != MTX_DESTROYED, in __mtx_unlock_flags()
320 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, in __mtx_unlock_flags()
321 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, in __mtx_unlock_flags()
323 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); in __mtx_unlock_flags()
324 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, in __mtx_unlock_flags()
326 mtx_assert(m, MA_OWNED); in __mtx_unlock_flags()
331 __mtx_unlock(m, curthread, opts, file, line); in __mtx_unlock_flags()
340 struct mtx *m; in __mtx_lock_spin_flags() local
345 m = mtxlock2mtx(c); in __mtx_lock_spin_flags()
347 KASSERT(m->mtx_lock != MTX_DESTROYED, in __mtx_lock_spin_flags()
349 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, in __mtx_lock_spin_flags()
351 m->lock_object.lo_name, file, line)); in __mtx_lock_spin_flags()
352 if (mtx_owned(m)) in __mtx_lock_spin_flags()
353 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || in __mtx_lock_spin_flags()
354 (opts & MTX_RECURSE) != 0, in __mtx_lock_spin_flags()
356 m->lock_object.lo_name, file, line)); in __mtx_lock_spin_flags()
358 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, in __mtx_lock_spin_flags()
364 if (!_mtx_obtain_lock_fetch(m, &v, tid)) in __mtx_lock_spin_flags()
365 _mtx_lock_spin(m, v, opts, file, line); in __mtx_lock_spin_flags()
368 m, 0, 0, file, line); in __mtx_lock_spin_flags()
370 __mtx_lock_spin(m, curthread, opts, file, line); in __mtx_lock_spin_flags()
372 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, in __mtx_lock_spin_flags()
374 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); in __mtx_lock_spin_flags()
381 struct mtx *m; in __mtx_trylock_spin_flags() local
386 m = mtxlock2mtx(c); in __mtx_trylock_spin_flags()
388 KASSERT(m->mtx_lock != MTX_DESTROYED, in __mtx_trylock_spin_flags()
390 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, in __mtx_trylock_spin_flags()
392 m->lock_object.lo_name, file, line)); in __mtx_trylock_spin_flags()
393 KASSERT((opts & MTX_RECURSE) == 0, in __mtx_trylock_spin_flags()
395 m->lock_object.lo_name, file, line)); in __mtx_trylock_spin_flags()
396 if (__mtx_trylock_spin(m, curthread, opts, file, line)) { in __mtx_trylock_spin_flags()
397 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line); in __mtx_trylock_spin_flags()
398 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); in __mtx_trylock_spin_flags()
401 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line); in __mtx_trylock_spin_flags()
402 return (0); in __mtx_trylock_spin_flags()
409 struct mtx *m; in __mtx_unlock_spin_flags() local
411 m = mtxlock2mtx(c); in __mtx_unlock_spin_flags()
413 KASSERT(m->mtx_lock != MTX_DESTROYED, in __mtx_unlock_spin_flags()
415 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, in __mtx_unlock_spin_flags()
417 m->lock_object.lo_name, file, line)); in __mtx_unlock_spin_flags()
418 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); in __mtx_unlock_spin_flags()
419 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, in __mtx_unlock_spin_flags()
421 mtx_assert(m, MA_OWNED); in __mtx_unlock_spin_flags()
423 __mtx_unlock_spin(m); in __mtx_unlock_spin_flags()
428 * Tries to acquire lock `m.' If this function is called on a mutex that
432 _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF) in _mtx_trylock_flags_int() argument
437 uint64_t waittime = 0; in _mtx_trylock_flags_int()
438 int contested = 0; in _mtx_trylock_flags_int()
448 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td), in _mtx_trylock_flags_int()
450 curthread, m->lock_object.lo_name, file, line)); in _mtx_trylock_flags_int()
451 KASSERT(m->mtx_lock != MTX_DESTROYED, in _mtx_trylock_flags_int()
453 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, in _mtx_trylock_flags_int()
454 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, in _mtx_trylock_flags_int()
461 if (_mtx_obtain_lock_fetch(m, &v, tid)) in _mtx_trylock_flags_int()
466 ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || in _mtx_trylock_flags_int()
467 (opts & MTX_RECURSE) != 0)) { in _mtx_trylock_flags_int()
468 m->mtx_recurse++; in _mtx_trylock_flags_int()
469 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); in _mtx_trylock_flags_int()
473 rval = 0; in _mtx_trylock_flags_int()
479 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); in _mtx_trylock_flags_int()
481 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, in _mtx_trylock_flags_int()
486 m, contested, waittime, file, line); in _mtx_trylock_flags_int()
495 struct mtx *m; in _mtx_trylock_flags_() local
497 m = mtxlock2mtx(c); in _mtx_trylock_flags_()
498 return (_mtx_trylock_flags_int(m, opts LOCK_FILE_LINE_ARG)); in _mtx_trylock_flags_()
507 #if LOCK_DEBUG > 0
517 struct mtx *m; in __mtx_lock_sleep() local
522 int contested = 0; in __mtx_lock_sleep()
523 uint64_t waittime = 0; in __mtx_lock_sleep()
529 u_int sleep_cnt = 0; in __mtx_lock_sleep()
530 int64_t sleep_time = 0; in __mtx_lock_sleep()
531 int64_t all_time = 0; in __mtx_lock_sleep()
534 int doing_lockprof = 0; in __mtx_lock_sleep()
539 m = mtxlock2mtx(c); in __mtx_lock_sleep()
544 if (_mtx_obtain_lock_fetch(m, &v, tid)) in __mtx_lock_sleep()
548 all_time -= lockstat_nsecs(&m->lock_object); in __mtx_lock_sleep()
559 v = MTX_READ_VALUE(m); in __mtx_lock_sleep()
562 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || in __mtx_lock_sleep()
563 (opts & MTX_RECURSE) != 0, in __mtx_lock_sleep()
565 m->lock_object.lo_name, file, line)); in __mtx_lock_sleep()
566 #if LOCK_DEBUG > 0 in __mtx_lock_sleep()
569 m->mtx_recurse++; in __mtx_lock_sleep()
570 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); in __mtx_lock_sleep()
571 if (LOCK_LOG_TEST(&m->lock_object, opts)) in __mtx_lock_sleep()
572 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); in __mtx_lock_sleep()
575 #if LOCK_DEBUG > 0 in __mtx_lock_sleep()
588 lock_profile_obtain_lock_failed(&m->lock_object, false, in __mtx_lock_sleep()
590 if (LOCK_LOG_TEST(&m->lock_object, opts)) in __mtx_lock_sleep()
593 m->lock_object.lo_name, (void *)m->mtx_lock, file, line); in __mtx_lock_sleep()
595 THREAD_CONTENDS_ON_LOCK(&m->lock_object); in __mtx_lock_sleep()
599 if (_mtx_obtain_lock_fetch(m, &v, tid)) in __mtx_lock_sleep()
613 if (LOCK_LOG_TEST(&m->lock_object, 0)) in __mtx_lock_sleep()
616 __func__, m, owner); in __mtx_lock_sleep()
620 m->lock_object.lo_name); in __mtx_lock_sleep()
623 v = MTX_READ_VALUE(m); in __mtx_lock_sleep()
633 ts = turnstile_trywait(&m->lock_object); in __mtx_lock_sleep()
634 v = MTX_READ_VALUE(m); in __mtx_lock_sleep()
666 if ((v & MTX_CONTESTED) == 0 && in __mtx_lock_sleep()
667 !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) { in __mtx_lock_sleep()
674 mtx_assert(m, MA_NOTOWNED); in __mtx_lock_sleep()
680 sleep_time -= lockstat_nsecs(&m->lock_object); in __mtx_lock_sleep()
683 owner = mtx_owner(m); in __mtx_lock_sleep()
685 MPASS(owner == mtx_owner(m)); in __mtx_lock_sleep()
688 sleep_time += lockstat_nsecs(&m->lock_object); in __mtx_lock_sleep()
691 v = MTX_READ_VALUE(m); in __mtx_lock_sleep()
693 THREAD_CONTENTION_DONE(&m->lock_object); in __mtx_lock_sleep()
699 all_time += lockstat_nsecs(&m->lock_object); in __mtx_lock_sleep()
701 LOCKSTAT_RECORD1(adaptive__block, m, sleep_time); in __mtx_lock_sleep()
707 LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time); in __mtx_lock_sleep()
710 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested, in __mtx_lock_sleep()
721 #if LOCK_DEBUG > 0
730 struct mtx *m; in _mtx_lock_spin_cookie() local
734 int contested = 0; in _mtx_lock_spin_cookie()
735 uint64_t waittime = 0; in _mtx_lock_spin_cookie()
738 int64_t spin_time = 0; in _mtx_lock_spin_cookie()
741 int doing_lockprof = 0; in _mtx_lock_spin_cookie()
745 m = mtxlock2mtx(c); in _mtx_lock_spin_cookie()
750 if (_mtx_obtain_lock_fetch(m, &v, tid)) in _mtx_lock_spin_cookie()
754 spin_time -= lockstat_nsecs(&m->lock_object); in _mtx_lock_spin_cookie()
762 v = MTX_READ_VALUE(m); in _mtx_lock_spin_cookie()
765 m->mtx_recurse++; in _mtx_lock_spin_cookie()
772 if (LOCK_LOG_TEST(&m->lock_object, opts)) in _mtx_lock_spin_cookie()
773 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); in _mtx_lock_spin_cookie()
775 "spinning", "lockname:\"%s\"", m->lock_object.lo_name); in _mtx_lock_spin_cookie()
782 lock_profile_obtain_lock_failed(&m->lock_object, true, &contested, &waittime); in _mtx_lock_spin_cookie()
786 if (_mtx_obtain_lock_fetch(m, &v, tid)) in _mtx_lock_spin_cookie()
796 _mtx_lock_indefinite_check(m, &lda); in _mtx_lock_spin_cookie()
798 v = MTX_READ_VALUE(m); in _mtx_lock_spin_cookie()
803 if (LOCK_LOG_TEST(&m->lock_object, opts)) in _mtx_lock_spin_cookie()
804 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); in _mtx_lock_spin_cookie()
813 spin_time += lockstat_nsecs(&m->lock_object); in _mtx_lock_spin_cookie()
814 if (lda.spin_cnt != 0) in _mtx_lock_spin_cookie()
815 LOCKSTAT_RECORD1(spin__spin, m, spin_time); in _mtx_lock_spin_cookie()
818 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m, in _mtx_lock_spin_cookie()
825 thread_lock_validate(struct mtx *m, int opts, const char *file, int line) in thread_lock_validate() argument
828 KASSERT(m->mtx_lock != MTX_DESTROYED, in thread_lock_validate()
830 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, in thread_lock_validate()
832 m->lock_object.lo_name, file, line)); in thread_lock_validate()
833 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) == 0, in thread_lock_validate()
835 m->lock_object.lo_name, file, line)); in thread_lock_validate()
836 WITNESS_CHECKORDER(&m->lock_object, in thread_lock_validate()
840 #define thread_lock_validate(m, opts, file, line) do { } while (0) argument
844 #if LOCK_DEBUG > 0
852 struct mtx *m; in _thread_lock() local
860 m = td->td_lock; in _thread_lock()
861 thread_lock_validate(m, 0, file, line); in _thread_lock()
862 if (__predict_false(m == &blocked_lock)) in _thread_lock()
864 if (__predict_false(!_mtx_obtain_lock(m, tid))) in _thread_lock()
866 if (__predict_true(m == td->td_lock)) { in _thread_lock()
867 WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line); in _thread_lock()
870 _mtx_release_lock_quick(m); in _thread_lock()
874 #if LOCK_DEBUG > 0 in _thread_lock()
877 thread_lock_flags_(td, 0, 0, 0); in _thread_lock()
885 struct mtx *m; in thread_lock_flags_() local
889 int contested = 0; in thread_lock_flags_()
890 uint64_t waittime = 0; in thread_lock_flags_()
893 int64_t spin_time = 0; in thread_lock_flags_()
930 m = td->td_lock; in thread_lock_flags_()
931 thread_lock_validate(m, opts, file, line); in thread_lock_flags_()
932 v = MTX_READ_VALUE(m); in thread_lock_flags_()
935 if (_mtx_obtain_lock_fetch(m, &v, tid)) in thread_lock_flags_()
940 lock_profile_obtain_lock_failed(&m->lock_object, true, in thread_lock_flags_()
948 _mtx_lock_indefinite_check(m, &lda); in thread_lock_flags_()
950 if (m != td->td_lock) { in thread_lock_flags_()
954 v = MTX_READ_VALUE(m); in thread_lock_flags_()
958 if (m == td->td_lock) in thread_lock_flags_()
960 _mtx_release_lock_quick(m); in thread_lock_flags_()
962 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, in thread_lock_flags_()
964 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); in thread_lock_flags_()
971 spin_time += lockstat_nsecs(&m->lock_object); in thread_lock_flags_()
973 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m, contested, in thread_lock_flags_()
976 if (lda.spin_cnt != 0) in thread_lock_flags_()
977 LOCKSTAT_RECORD1(thread__spin, m, spin_time); in thread_lock_flags_()
1033 #if LOCK_DEBUG > 0
1042 struct mtx *m; in __mtx_unlock_sleep() local
1050 m = mtxlock2mtx(c); in __mtx_unlock_sleep()
1053 v = MTX_READ_VALUE(m); in __mtx_unlock_sleep()
1056 if (--(m->mtx_recurse) == 0) in __mtx_unlock_sleep()
1057 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); in __mtx_unlock_sleep()
1058 if (LOCK_LOG_TEST(&m->lock_object, opts)) in __mtx_unlock_sleep()
1059 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); in __mtx_unlock_sleep()
1063 LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m); in __mtx_unlock_sleep()
1064 if (v == tid && _mtx_release_lock(m, tid)) in __mtx_unlock_sleep()
1071 turnstile_chain_lock(&m->lock_object); in __mtx_unlock_sleep()
1072 _mtx_release_lock_quick(m); in __mtx_unlock_sleep()
1073 ts = turnstile_lookup(&m->lock_object); in __mtx_unlock_sleep()
1075 panic("got NULL turnstile on mutex %p v %p", m, (void *)v); in __mtx_unlock_sleep()
1077 if (LOCK_LOG_TEST(&m->lock_object, opts)) in __mtx_unlock_sleep()
1078 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); in __mtx_unlock_sleep()
1086 turnstile_chain_unlock(&m->lock_object); in __mtx_unlock_sleep()
1101 const struct mtx *m; in __mtx_assert() local
1106 m = mtxlock2mtx(c); in __mtx_assert()
1112 if (!mtx_owned(m)) in __mtx_assert()
1114 m->lock_object.lo_name, file, line); in __mtx_assert()
1115 if (mtx_recursed(m)) { in __mtx_assert()
1116 if ((what & MA_NOTRECURSED) != 0) in __mtx_assert()
1118 m->lock_object.lo_name, file, line); in __mtx_assert()
1119 } else if ((what & MA_RECURSED) != 0) { in __mtx_assert()
1121 m->lock_object.lo_name, file, line); in __mtx_assert()
1125 if (mtx_owned(m)) in __mtx_assert()
1127 m->lock_object.lo_name, file, line); in __mtx_assert()
1148 * Mutex initialization routine; initialize lock `m' of type contained in
1156 struct mtx *m; in _mtx_init() local
1160 m = mtxlock2mtx(c); in _mtx_init()
1163 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0); in _mtx_init()
1164 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, in _mtx_init()
1166 &m->mtx_lock)); in _mtx_init()
1173 flags = 0; in _mtx_init()
1178 if ((opts & MTX_NOWITNESS) == 0) in _mtx_init()
1188 lock_init(&m->lock_object, class, name, type, flags); in _mtx_init()
1190 m->mtx_lock = MTX_UNOWNED; in _mtx_init()
1191 m->mtx_recurse = 0; in _mtx_init()
1195 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
1203 struct mtx *m; in _mtx_destroy() local
1205 m = mtxlock2mtx(c); in _mtx_destroy()
1207 if (!mtx_owned(m)) in _mtx_destroy()
1208 MPASS(mtx_unowned(m)); in _mtx_destroy()
1210 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); in _mtx_destroy()
1213 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) { in _mtx_destroy()
1214 lock_profile_release_lock(&m->lock_object, true); in _mtx_destroy()
1218 lock_profile_release_lock(&m->lock_object, false); in _mtx_destroy()
1222 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, in _mtx_destroy()
1226 m->mtx_lock = MTX_DESTROYED; in _mtx_destroy()
1227 lock_destroy(&m->lock_object); in _mtx_destroy()
1247 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */ in mutex_init()
1258 _mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap) in _mtx_lock_indefinite_check() argument
1266 td = mtx_owner(m); in _mtx_lock_indefinite_check()
1273 m, m->lock_object.lo_name, td, td->td_tid); in _mtx_lock_indefinite_check()
1275 witness_display_spinlock(&m->lock_object, td, printf); in _mtx_lock_indefinite_check()
1283 mtx_spin_wait_unlocked(struct mtx *m) in mtx_spin_wait_unlocked() argument
1287 KASSERT(m->mtx_lock != MTX_DESTROYED, in mtx_spin_wait_unlocked()
1288 ("%s() of destroyed mutex %p", __func__, m)); in mtx_spin_wait_unlocked()
1289 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, in mtx_spin_wait_unlocked()
1290 ("%s() of sleep mutex %p (%s)", __func__, m, in mtx_spin_wait_unlocked()
1291 m->lock_object.lo_name)); in mtx_spin_wait_unlocked()
1292 KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m, in mtx_spin_wait_unlocked()
1293 m->lock_object.lo_name)); in mtx_spin_wait_unlocked()
1295 lda.spin_cnt = 0; in mtx_spin_wait_unlocked()
1297 while (atomic_load_acq_ptr(&m->mtx_lock) != MTX_UNOWNED) { in mtx_spin_wait_unlocked()
1302 _mtx_lock_indefinite_check(m, &lda); in mtx_spin_wait_unlocked()
1308 mtx_wait_unlocked(struct mtx *m) in mtx_wait_unlocked() argument
1313 KASSERT(m->mtx_lock != MTX_DESTROYED, in mtx_wait_unlocked()
1314 ("%s() of destroyed mutex %p", __func__, m)); in mtx_wait_unlocked()
1315 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, in mtx_wait_unlocked()
1316 ("%s() not a sleep mutex %p (%s)", __func__, m, in mtx_wait_unlocked()
1317 m->lock_object.lo_name)); in mtx_wait_unlocked()
1318 KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m, in mtx_wait_unlocked()
1319 m->lock_object.lo_name)); in mtx_wait_unlocked()
1322 v = atomic_load_acq_ptr(&m->mtx_lock); in mtx_wait_unlocked()
1328 mtx_lock(m); in mtx_wait_unlocked()
1329 mtx_unlock(m); in mtx_wait_unlocked()
1341 const struct mtx *m; in db_show_mtx() local
1343 m = (const struct mtx *)lock; in db_show_mtx()
1350 if (m->lock_object.lo_flags & LO_RECURSABLE) in db_show_mtx()
1352 if (m->lock_object.lo_flags & LO_DUPOK) in db_show_mtx()
1356 if (mtx_unowned(m)) in db_show_mtx()
1358 else if (mtx_destroyed(m)) in db_show_mtx()
1362 if (m->mtx_lock & MTX_CONTESTED) in db_show_mtx()
1364 if (m->mtx_lock & MTX_RECURSED) in db_show_mtx()
1368 if (!mtx_unowned(m) && !mtx_destroyed(m)) { in db_show_mtx()
1369 td = mtx_owner(m); in db_show_mtx()
1372 if (mtx_recursed(m)) in db_show_mtx()
1373 db_printf(" recursed: %d\n", m->mtx_recurse); in db_show_mtx()