Lines Matching +full:0 +full:m
81 static int mutex_qidx(struct pthread_mutex *m);
82 static bool is_robust_mutex(struct pthread_mutex *m);
83 static bool is_pshared_mutex(struct pthread_mutex *m);
120 mutex_init_link(struct pthread_mutex *m __unused) in mutex_init_link()
124 m->m_qe.tqe_prev = NULL; in mutex_init_link()
125 m->m_qe.tqe_next = NULL; in mutex_init_link()
126 m->m_pqe.tqe_prev = NULL; in mutex_init_link()
127 m->m_pqe.tqe_next = NULL; in mutex_init_link()
132 mutex_assert_is_owned(struct pthread_mutex *m __unused) in mutex_assert_is_owned()
136 if (__predict_false(m->m_qe.tqe_prev == NULL)) in mutex_assert_is_owned()
138 m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next); in mutex_assert_is_owned()
144 struct pthread_mutex *m __unused) in mutex_assert_not_owned()
148 if (__predict_false(m->m_qe.tqe_prev != NULL || in mutex_assert_not_owned()
149 m->m_qe.tqe_next != NULL)) in mutex_assert_not_owned()
151 m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next); in mutex_assert_not_owned()
152 if (__predict_false(is_robust_mutex(m) && in mutex_assert_not_owned()
153 (m->m_lock.m_rb_lnk != 0 || m->m_rb_prev != NULL || in mutex_assert_not_owned()
154 (is_pshared_mutex(m) && curthread->robust_list == in mutex_assert_not_owned()
155 (uintptr_t)&m->m_lock) || in mutex_assert_not_owned()
156 (!is_pshared_mutex(m) && curthread->priv_robust_list == in mutex_assert_not_owned()
157 (uintptr_t)&m->m_lock)))) in mutex_assert_not_owned()
160 m, m->m_lock.m_owner, (void *)m->m_lock.m_rb_lnk, in mutex_assert_not_owned()
161 m->m_rb_prev, (void *)curthread->robust_list, in mutex_assert_not_owned()
167 is_pshared_mutex(struct pthread_mutex *m) in is_pshared_mutex() argument
170 return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0); in is_pshared_mutex()
174 is_robust_mutex(struct pthread_mutex *m) in is_robust_mutex() argument
177 return ((m->m_lock.m_flags & UMUTEX_ROBUST) != 0); in is_robust_mutex()
181 _mutex_enter_robust(struct pthread *curthread, struct pthread_mutex *m) in _mutex_enter_robust() argument
185 if (__predict_false(curthread->inact_mtx != 0)) in _mutex_enter_robust()
188 if (!is_robust_mutex(m)) in _mutex_enter_robust()
189 return (0); in _mutex_enter_robust()
192 curthread->inact_mtx = (uintptr_t)&m->m_lock; in _mutex_enter_robust()
197 _mutex_leave_robust(struct pthread *curthread, struct pthread_mutex *m __unused) in _mutex_leave_robust()
201 if (__predict_false(curthread->inact_mtx != (uintptr_t)&m->m_lock)) in _mutex_leave_robust()
204 curthread->inact_mtx = 0; in _mutex_leave_robust()
217 return (0); in mutex_check_attr()
242 pmutex->m_count = 0; in mutex_init_body()
243 pmutex->m_spinloops = 0; in mutex_init_body()
244 pmutex->m_yieldloops = 0; in mutex_init_body()
249 pmutex->m_lock.m_flags = 0; in mutex_init_body()
258 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; in mutex_init_body()
288 if (error != 0) in mutex_init()
296 return (0); in mutex_init()
313 ret = 0; in init_static()
320 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) in set_inherited_priority() argument
324 m2 = TAILQ_LAST(&curthread->mq[mutex_qidx(m)], mutex_queue); in set_inherited_priority()
326 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; in set_inherited_priority()
328 m->m_lock.m_ceilings[1] = -1; in set_inherited_priority()
338 .m_ceiling = 0, in shared_mutex_init()
388 if (ret != 0) in __Tthr_mutex_init()
402 return (0); in __Tthr_mutex_init()
413 .m_ceiling = 0, in _pthread_mutex_init_calloc_cb()
420 if (ret == 0) in _pthread_mutex_init_calloc_cb()
439 struct pthread_mutex *m; in queue_fork() local
442 TAILQ_FOREACH(m, qp, m_pqe) { in queue_fork()
443 TAILQ_INSERT_TAIL(q, m, m_qe); in queue_fork()
444 m->m_lock.m_owner = TID(curthread) | bit; in queue_fork()
453 &curthread->mq[TMQ_NORM_PRIV], 0); in _mutex_fork()
458 curthread->robust_list = 0; in _mutex_fork()
464 pthread_mutex_t m, m1; in _thr_mutex_destroy() local
467 m = *mutex; in _thr_mutex_destroy()
468 if (m < THR_MUTEX_DESTROYED) { in _thr_mutex_destroy()
469 ret = 0; in _thr_mutex_destroy()
470 } else if (m == THR_MUTEX_DESTROYED) { in _thr_mutex_destroy()
473 if (m == THR_PSHARED_PTR) { in _thr_mutex_destroy()
474 m1 = __thr_pshared_offpage(mutex, 0); in _thr_mutex_destroy()
484 return (0); in _thr_mutex_destroy()
486 if (PMUTEX_OWNER_ID(m) != 0 && in _thr_mutex_destroy()
487 (uint32_t)m->m_lock.m_owner != UMUTEX_RB_NOTRECOV) { in _thr_mutex_destroy()
491 mutex_assert_not_owned(_get_curthread(), m); in _thr_mutex_destroy()
492 __thr_free(m); in _thr_mutex_destroy()
493 ret = 0; in _thr_mutex_destroy()
501 mutex_qidx(struct pthread_mutex *m) in mutex_qidx() argument
504 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) in mutex_qidx()
506 return (is_robust_mutex(m) ? TMQ_ROBUST_PP : TMQ_NORM_PP); in mutex_qidx()
522 enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m, in enqueue_mutex() argument
531 mutex_assert_not_owned(curthread, m); in enqueue_mutex()
532 qidx = mutex_qidx(m); in enqueue_mutex()
533 TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe); in enqueue_mutex()
534 if (!is_pshared_mutex(m)) in enqueue_mutex()
535 TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe); in enqueue_mutex()
536 if (is_robust_mutex(m)) { in enqueue_mutex()
537 rl = is_pshared_mutex(m) ? &curthread->robust_list : in enqueue_mutex()
539 m->m_rb_prev = NULL; in enqueue_mutex()
540 if (*rl != 0) { in enqueue_mutex()
543 m->m_lock.m_rb_lnk = (uintptr_t)&m1->m_lock; in enqueue_mutex()
544 m1->m_rb_prev = m; in enqueue_mutex()
547 m->m_lock.m_rb_lnk = 0; in enqueue_mutex()
549 *rl = (uintptr_t)&m->m_lock; in enqueue_mutex()
554 dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m) in dequeue_mutex() argument
559 mutex_assert_is_owned(m); in dequeue_mutex()
560 qidx = mutex_qidx(m); in dequeue_mutex()
561 if (is_robust_mutex(m)) { in dequeue_mutex()
562 mp = m->m_rb_prev; in dequeue_mutex()
564 if (is_pshared_mutex(m)) { in dequeue_mutex()
565 curthread->robust_list = m->m_lock.m_rb_lnk; in dequeue_mutex()
568 m->m_lock.m_rb_lnk; in dequeue_mutex()
571 mp->m_lock.m_rb_lnk = m->m_lock.m_rb_lnk; in dequeue_mutex()
573 if (m->m_lock.m_rb_lnk != 0) { in dequeue_mutex()
574 mn = __containerof((void *)m->m_lock.m_rb_lnk, in dequeue_mutex()
576 mn->m_rb_prev = m->m_rb_prev; in dequeue_mutex()
578 m->m_lock.m_rb_lnk = 0; in dequeue_mutex()
579 m->m_rb_prev = NULL; in dequeue_mutex()
581 TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe); in dequeue_mutex()
582 if (!is_pshared_mutex(m)) in dequeue_mutex()
583 TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe); in dequeue_mutex()
584 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0) in dequeue_mutex()
585 set_inherited_priority(curthread, m); in dequeue_mutex()
586 mutex_init_link(m); in dequeue_mutex()
590 check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m) in check_and_init_mutex() argument
594 *m = *mutex; in check_and_init_mutex()
595 ret = 0; in check_and_init_mutex()
596 if (__predict_false(*m == THR_PSHARED_PTR)) { in check_and_init_mutex()
597 *m = __thr_pshared_offpage(mutex, 0); in check_and_init_mutex()
598 if (*m == NULL) in check_and_init_mutex()
601 shared_mutex_init(*m, NULL); in check_and_init_mutex()
602 } else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) { in check_and_init_mutex()
603 if (*m == THR_MUTEX_DESTROYED) { in check_and_init_mutex()
607 if (ret == 0) in check_and_init_mutex()
608 *m = *mutex; in check_and_init_mutex()
618 struct pthread_mutex *m; in __Tthr_mutex_trylock() local
623 ret = check_and_init_mutex(mutex, &m); in __Tthr_mutex_trylock()
624 if (ret != 0) in __Tthr_mutex_trylock()
628 if (m->m_flags & PMUTEX_FLAG_PRIVATE) in __Tthr_mutex_trylock()
630 robust = _mutex_enter_robust(curthread, m); in __Tthr_mutex_trylock()
631 ret = _thr_umutex_trylock(&m->m_lock, id); in __Tthr_mutex_trylock()
632 if (__predict_true(ret == 0) || ret == EOWNERDEAD) { in __Tthr_mutex_trylock()
633 enqueue_mutex(curthread, m, ret); in __Tthr_mutex_trylock()
635 m->m_lock.m_flags |= UMUTEX_NONCONSISTENT; in __Tthr_mutex_trylock()
636 } else if (PMUTEX_OWNER_ID(m) == id) { in __Tthr_mutex_trylock()
637 ret = mutex_self_trylock(m); in __Tthr_mutex_trylock()
640 _mutex_leave_robust(curthread, m); in __Tthr_mutex_trylock()
641 if (ret != 0 && ret != EOWNERDEAD && in __Tthr_mutex_trylock()
642 (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0) in __Tthr_mutex_trylock()
648 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, in mutex_lock_sleep() argument
655 if (PMUTEX_OWNER_ID(m) == id) in mutex_lock_sleep()
656 return (mutex_self_lock(m, abstime)); in mutex_lock_sleep()
664 if (__predict_false((m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | in mutex_lock_sleep()
665 UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) != 0)) in mutex_lock_sleep()
671 count = m->m_spinloops; in mutex_lock_sleep()
673 owner = m->m_lock.m_owner; in mutex_lock_sleep()
674 if ((owner & ~UMUTEX_CONTESTED) == 0) { in mutex_lock_sleep()
675 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, in mutex_lock_sleep()
677 ret = 0; in mutex_lock_sleep()
685 count = m->m_yieldloops; in mutex_lock_sleep()
688 owner = m->m_lock.m_owner; in mutex_lock_sleep()
689 if ((owner & ~UMUTEX_CONTESTED) == 0) { in mutex_lock_sleep()
690 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, in mutex_lock_sleep()
692 ret = 0; in mutex_lock_sleep()
700 ret = __thr_umutex_lock(&m->m_lock, id); in mutex_lock_sleep()
701 else if (__predict_false(abstime->tv_nsec < 0 || in mutex_lock_sleep()
705 ret = __thr_umutex_timedlock(&m->m_lock, id, abstime); in mutex_lock_sleep()
707 if (ret == 0 || ret == EOWNERDEAD) { in mutex_lock_sleep()
708 enqueue_mutex(curthread, m, ret); in mutex_lock_sleep()
710 m->m_lock.m_flags |= UMUTEX_NONCONSISTENT; in mutex_lock_sleep()
716 mutex_lock_common(struct pthread_mutex *m, const struct timespec *abstime, in mutex_lock_common() argument
722 robust = 0; /* pacify gcc */ in mutex_lock_common()
724 if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE) in mutex_lock_common()
727 robust = _mutex_enter_robust(curthread, m); in mutex_lock_common()
728 ret = _thr_umutex_trylock2(&m->m_lock, TID(curthread)); in mutex_lock_common()
729 if (__predict_true(ret == 0) || ret == EOWNERDEAD) { in mutex_lock_common()
730 enqueue_mutex(curthread, m, ret); in mutex_lock_common()
732 m->m_lock.m_flags |= UMUTEX_NONCONSISTENT; in mutex_lock_common()
734 ret = mutex_lock_sleep(curthread, m, abstime); in mutex_lock_common()
737 _mutex_leave_robust(curthread, m); in mutex_lock_common()
738 if (ret != 0 && ret != EOWNERDEAD && in mutex_lock_common()
739 (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0 && !cvattach) in mutex_lock_common()
747 struct pthread_mutex *m; in __Tthr_mutex_lock() local
751 ret = check_and_init_mutex(mutex, &m); in __Tthr_mutex_lock()
752 if (ret == 0) in __Tthr_mutex_lock()
753 ret = mutex_lock_common(m, NULL, false, false); in __Tthr_mutex_lock()
761 struct pthread_mutex *m; in __pthread_mutex_timedlock() local
765 ret = check_and_init_mutex(mutex, &m); in __pthread_mutex_timedlock()
766 if (ret == 0) in __pthread_mutex_timedlock()
767 ret = mutex_lock_common(m, abstime, false, false); in __pthread_mutex_timedlock()
777 mp = __thr_pshared_offpage(mutex, 0); in _thr_mutex_unlock()
788 _mutex_cv_lock(struct pthread_mutex *m, int count, bool rb_onlist) in _mutex_cv_lock() argument
792 error = mutex_lock_common(m, NULL, true, rb_onlist); in _mutex_cv_lock()
793 if (error == 0 || error == EOWNERDEAD) in _mutex_cv_lock()
794 m->m_count = count; in _mutex_cv_lock()
799 _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer) in _mutex_cv_unlock() argument
805 *count = m->m_count; in _mutex_cv_unlock()
806 m->m_count = 0; in _mutex_cv_unlock()
807 (void)mutex_unlock_common(m, true, defer); in _mutex_cv_unlock()
808 return (0); in _mutex_cv_unlock()
812 _mutex_cv_attach(struct pthread_mutex *m, int count) in _mutex_cv_attach() argument
817 enqueue_mutex(curthread, m, 0); in _mutex_cv_attach()
818 m->m_count = count; in _mutex_cv_attach()
819 return (0); in _mutex_cv_attach()
829 if ((error = _mutex_owned(curthread, mp)) != 0) in _mutex_cv_detach()
836 mp->m_count = 0; in _mutex_cv_detach()
840 if ((mp->m_flags & PMUTEX_FLAG_DEFERRED) != 0) { in _mutex_cv_detach()
844 deferred = 0; in _mutex_cv_detach()
849 curthread->nwaiter_defer = 0; in _mutex_cv_detach()
851 return (0); in _mutex_cv_detach()
855 mutex_self_trylock(struct pthread_mutex *m) in mutex_self_trylock() argument
859 switch (PMUTEX_TYPE(m->m_flags)) { in mutex_self_trylock()
868 if (m->m_count + 1 > 0) { in mutex_self_trylock()
869 m->m_count++; in mutex_self_trylock()
870 ret = 0; in mutex_self_trylock()
884 mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime) in mutex_self_lock() argument
889 switch (PMUTEX_TYPE(m->m_flags)) { in mutex_self_lock()
893 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || in mutex_self_lock()
916 ret = 0; in mutex_self_lock()
918 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || in mutex_self_lock()
929 ts1.tv_nsec = 0; in mutex_self_lock()
937 if (m->m_count + 1 > 0) { in mutex_self_lock()
938 m->m_count++; in mutex_self_lock()
939 ret = 0; in mutex_self_lock()
953 mutex_unlock_common(struct pthread_mutex *m, bool cv, int *mtx_defer) in mutex_unlock_common() argument
959 if (__predict_false(m <= THR_MUTEX_DESTROYED)) { in mutex_unlock_common()
960 if (m == THR_MUTEX_DESTROYED) in mutex_unlock_common()
971 if (__predict_false(PMUTEX_OWNER_ID(m) != id)) in mutex_unlock_common()
974 error = 0; in mutex_unlock_common()
975 private = (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0; in mutex_unlock_common()
976 if (__predict_false(PMUTEX_TYPE(m->m_flags) == in mutex_unlock_common()
977 PTHREAD_MUTEX_RECURSIVE && m->m_count > 0)) { in mutex_unlock_common()
978 m->m_count--; in mutex_unlock_common()
980 if ((m->m_flags & PMUTEX_FLAG_DEFERRED) != 0) { in mutex_unlock_common()
982 m->m_flags &= ~PMUTEX_FLAG_DEFERRED; in mutex_unlock_common()
984 deferred = 0; in mutex_unlock_common()
986 robust = _mutex_enter_robust(curthread, m); in mutex_unlock_common()
987 dequeue_mutex(curthread, m); in mutex_unlock_common()
988 error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer); in mutex_unlock_common()
993 curthread->nwaiter_defer = 0; in mutex_unlock_common()
998 _mutex_leave_robust(curthread, m); in mutex_unlock_common()
1009 struct pthread_mutex *m; in _pthread_mutex_getprioceiling() local
1012 m = __thr_pshared_offpage(__DECONST(void *, mutex), 0); in _pthread_mutex_getprioceiling()
1013 if (m == NULL) in _pthread_mutex_getprioceiling()
1015 shared_mutex_init(m, NULL); in _pthread_mutex_getprioceiling()
1017 m = *mutex; in _pthread_mutex_getprioceiling()
1018 if (m <= THR_MUTEX_DESTROYED) in _pthread_mutex_getprioceiling()
1021 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) in _pthread_mutex_getprioceiling()
1023 *prioceiling = m->m_lock.m_ceilings[0]; in _pthread_mutex_getprioceiling()
1024 return (0); in _pthread_mutex_getprioceiling()
1032 struct pthread_mutex *m, *m1, *m2; in _pthread_mutex_setprioceiling() local
1037 m = __thr_pshared_offpage(mutex, 0); in _pthread_mutex_setprioceiling()
1038 if (m == NULL) in _pthread_mutex_setprioceiling()
1040 shared_mutex_init(m, NULL); in _pthread_mutex_setprioceiling()
1042 m = *mutex; in _pthread_mutex_setprioceiling()
1043 if (m <= THR_MUTEX_DESTROYED) in _pthread_mutex_setprioceiling()
1046 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) in _pthread_mutex_setprioceiling()
1049 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); in _pthread_mutex_setprioceiling()
1050 if (ret != 0) in _pthread_mutex_setprioceiling()
1054 if (PMUTEX_OWNER_ID(m) == TID(curthread)) { in _pthread_mutex_setprioceiling()
1055 mutex_assert_is_owned(m); in _pthread_mutex_setprioceiling()
1056 m1 = TAILQ_PREV(m, mutex_queue, m_qe); in _pthread_mutex_setprioceiling()
1057 m2 = TAILQ_NEXT(m, m_qe); in _pthread_mutex_setprioceiling()
1058 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || in _pthread_mutex_setprioceiling()
1059 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { in _pthread_mutex_setprioceiling()
1060 qidx = mutex_qidx(m); in _pthread_mutex_setprioceiling()
1063 TAILQ_REMOVE(q, m, m_qe); in _pthread_mutex_setprioceiling()
1064 if (!is_pshared_mutex(m)) in _pthread_mutex_setprioceiling()
1065 TAILQ_REMOVE(qp, m, m_pqe); in _pthread_mutex_setprioceiling()
1067 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) { in _pthread_mutex_setprioceiling()
1068 TAILQ_INSERT_BEFORE(m2, m, m_qe); in _pthread_mutex_setprioceiling()
1069 if (!is_pshared_mutex(m)) { in _pthread_mutex_setprioceiling()
1077 m, m_pqe); in _pthread_mutex_setprioceiling()
1080 m, m_pqe); in _pthread_mutex_setprioceiling()
1083 return (0); in _pthread_mutex_setprioceiling()
1086 TAILQ_INSERT_TAIL(q, m, m_qe); in _pthread_mutex_setprioceiling()
1087 if (!is_pshared_mutex(m)) in _pthread_mutex_setprioceiling()
1088 TAILQ_INSERT_TAIL(qp, m, m_pqe); in _pthread_mutex_setprioceiling()
1091 return (0); in _pthread_mutex_setprioceiling()
1097 struct pthread_mutex *m; in _pthread_mutex_getspinloops_np() local
1100 ret = check_and_init_mutex(mutex, &m); in _pthread_mutex_getspinloops_np()
1101 if (ret == 0) in _pthread_mutex_getspinloops_np()
1102 *count = m->m_spinloops; in _pthread_mutex_getspinloops_np()
1109 struct pthread_mutex *m; in __pthread_mutex_setspinloops_np() local
1112 ret = check_and_init_mutex(mutex, &m); in __pthread_mutex_setspinloops_np()
1113 if (ret == 0) in __pthread_mutex_setspinloops_np()
1114 m->m_spinloops = count; in __pthread_mutex_setspinloops_np()
1121 struct pthread_mutex *m; in _pthread_mutex_getyieldloops_np() local
1124 ret = check_and_init_mutex(mutex, &m); in _pthread_mutex_getyieldloops_np()
1125 if (ret == 0) in _pthread_mutex_getyieldloops_np()
1126 *count = m->m_yieldloops; in _pthread_mutex_getyieldloops_np()
1133 struct pthread_mutex *m; in __pthread_mutex_setyieldloops_np() local
1136 ret = check_and_init_mutex(mutex, &m); in __pthread_mutex_setyieldloops_np()
1137 if (ret == 0) in __pthread_mutex_setyieldloops_np()
1138 m->m_yieldloops = count; in __pthread_mutex_setyieldloops_np()
1139 return (0); in __pthread_mutex_setyieldloops_np()
1145 struct pthread_mutex *m; in _pthread_mutex_isowned_np() local
1148 m = __thr_pshared_offpage(mutex, 0); in _pthread_mutex_isowned_np()
1149 if (m == NULL) in _pthread_mutex_isowned_np()
1150 return (0); in _pthread_mutex_isowned_np()
1151 shared_mutex_init(m, NULL); in _pthread_mutex_isowned_np()
1153 m = *mutex; in _pthread_mutex_isowned_np()
1154 if (m <= THR_MUTEX_DESTROYED) in _pthread_mutex_isowned_np()
1155 return (0); in _pthread_mutex_isowned_np()
1157 return (PMUTEX_OWNER_ID(m) == TID(_get_curthread())); in _pthread_mutex_isowned_np()
1171 return (0); in _mutex_owned()
1177 struct pthread_mutex *m; in _Tthr_mutex_consistent() local
1181 m = __thr_pshared_offpage(mutex, 0); in _Tthr_mutex_consistent()
1182 if (m == NULL) in _Tthr_mutex_consistent()
1184 shared_mutex_init(m, NULL); in _Tthr_mutex_consistent()
1186 m = *mutex; in _Tthr_mutex_consistent()
1187 if (m <= THR_MUTEX_DESTROYED) in _Tthr_mutex_consistent()
1191 if ((m->m_lock.m_flags & (UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) != in _Tthr_mutex_consistent()
1194 if (PMUTEX_OWNER_ID(m) != TID(curthread)) in _Tthr_mutex_consistent()
1196 m->m_lock.m_flags &= ~UMUTEX_NONCONSISTENT; in _Tthr_mutex_consistent()
1197 return (0); in _Tthr_mutex_consistent()