Lines Matching +full:- +full:m
1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
24 * 4. Neither the name of the author nor the names of any co-contributors
49 #include "un-namespace.h"
54 "pthread_mutex is too large for off-page");
81 static int mutex_qidx(struct pthread_mutex *m);
82 static bool is_robust_mutex(struct pthread_mutex *m);
83 static bool is_pshared_mutex(struct pthread_mutex *m);
120 mutex_init_link(struct pthread_mutex *m __unused) in mutex_init_link()
124 m->m_qe.tqe_prev = NULL; in mutex_init_link()
125 m->m_qe.tqe_next = NULL; in mutex_init_link()
126 m->m_pqe.tqe_prev = NULL; in mutex_init_link()
127 m->m_pqe.tqe_next = NULL; in mutex_init_link()
132 mutex_assert_is_owned(struct pthread_mutex *m __unused) in mutex_assert_is_owned()
136 if (__predict_false(m->m_qe.tqe_prev == NULL)) in mutex_assert_is_owned()
138 m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next); in mutex_assert_is_owned()
144 struct pthread_mutex *m __unused) in mutex_assert_not_owned()
148 if (__predict_false(m->m_qe.tqe_prev != NULL || in mutex_assert_not_owned()
149 m->m_qe.tqe_next != NULL)) in mutex_assert_not_owned()
151 m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next); in mutex_assert_not_owned()
152 if (__predict_false(is_robust_mutex(m) && in mutex_assert_not_owned()
153 (m->m_lock.m_rb_lnk != 0 || m->m_rb_prev != NULL || in mutex_assert_not_owned()
154 (is_pshared_mutex(m) && curthread->robust_list == in mutex_assert_not_owned()
155 (uintptr_t)&m->m_lock) || in mutex_assert_not_owned()
156 (!is_pshared_mutex(m) && curthread->priv_robust_list == in mutex_assert_not_owned()
157 (uintptr_t)&m->m_lock)))) in mutex_assert_not_owned()
160 m, m->m_lock.m_owner, (void *)m->m_lock.m_rb_lnk, in mutex_assert_not_owned()
161 m->m_rb_prev, (void *)curthread->robust_list, in mutex_assert_not_owned()
162 (void *)curthread->priv_robust_list); in mutex_assert_not_owned()
167 is_pshared_mutex(struct pthread_mutex *m) in is_pshared_mutex() argument
170 return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0); in is_pshared_mutex()
174 is_robust_mutex(struct pthread_mutex *m) in is_robust_mutex() argument
177 return ((m->m_lock.m_flags & UMUTEX_ROBUST) != 0); in is_robust_mutex()
181 _mutex_enter_robust(struct pthread *curthread, struct pthread_mutex *m) in _mutex_enter_robust() argument
185 if (__predict_false(curthread->inact_mtx != 0)) in _mutex_enter_robust()
188 if (!is_robust_mutex(m)) in _mutex_enter_robust()
192 curthread->inact_mtx = (uintptr_t)&m->m_lock; in _mutex_enter_robust()
197 _mutex_leave_robust(struct pthread *curthread, struct pthread_mutex *m __unused) in _mutex_leave_robust()
201 if (__predict_false(curthread->inact_mtx != (uintptr_t)&m->m_lock)) in _mutex_leave_robust()
204 curthread->inact_mtx = 0; in _mutex_leave_robust()
211 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || in mutex_check_attr()
212 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) in mutex_check_attr()
214 if (attr->m_protocol < PTHREAD_PRIO_NONE || in mutex_check_attr()
215 attr->m_protocol > PTHREAD_PRIO_PROTECT) in mutex_check_attr()
227 if (curthread->robust_inited) in mutex_init_robust()
229 rb.robust_list_offset = (uintptr_t)&curthread->robust_list; in mutex_init_robust()
230 rb.robust_priv_list_offset = (uintptr_t)&curthread->priv_robust_list; in mutex_init_robust()
231 rb.robust_inact_offset = (uintptr_t)&curthread->inact_mtx; in mutex_init_robust()
233 curthread->robust_inited = 1; in mutex_init_robust()
241 pmutex->m_flags = attr->m_type; in mutex_init_body()
242 pmutex->m_count = 0; in mutex_init_body()
243 pmutex->m_spinloops = 0; in mutex_init_body()
244 pmutex->m_yieldloops = 0; in mutex_init_body()
246 switch (attr->m_protocol) { in mutex_init_body()
248 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; in mutex_init_body()
249 pmutex->m_lock.m_flags = 0; in mutex_init_body()
252 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; in mutex_init_body()
253 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; in mutex_init_body()
256 pmutex->m_lock.m_owner = UMUTEX_CONTESTED; in mutex_init_body()
257 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; in mutex_init_body()
258 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; in mutex_init_body()
261 if (attr->m_pshared == PTHREAD_PROCESS_SHARED) in mutex_init_body()
262 pmutex->m_lock.m_flags |= USYNC_PROCESS_SHARED; in mutex_init_body()
263 if (attr->m_robust == PTHREAD_MUTEX_ROBUST) { in mutex_init_body()
265 pmutex->m_lock.m_flags |= UMUTEX_ROBUST; in mutex_init_body()
267 if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) { in mutex_init_body()
268 pmutex->m_spinloops = in mutex_init_body()
270 pmutex->m_yieldloops = _thr_yieldloops; in mutex_init_body()
320 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) in set_inherited_priority() argument
324 m2 = TAILQ_LAST(&curthread->mq[mutex_qidx(m)], mutex_queue); in set_inherited_priority()
326 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; in set_inherited_priority()
328 m->m_lock.m_ceilings[1] = -1; in set_inherited_priority()
346 * same process-shared mutex. We rely on kernel allocating in shared_mutex_init()
351 switch (pmtx->m_ps) { in shared_mutex_init()
357 if (atomic_cmpset_int(&pmtx->m_ps, in shared_mutex_init()
362 atomic_store_rel_int(&pmtx->m_ps, in shared_mutex_init()
392 (*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) { in __Tthr_mutex_init()
421 (*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE; in _pthread_mutex_init_calloc_cb()
439 struct pthread_mutex *m; in queue_fork() local
442 TAILQ_FOREACH(m, qp, m_pqe) { in queue_fork()
443 TAILQ_INSERT_TAIL(q, m, m_qe); in queue_fork()
444 m->m_lock.m_owner = TID(curthread) | bit; in queue_fork()
452 queue_fork(curthread, &curthread->mq[TMQ_NORM], in _mutex_fork()
453 &curthread->mq[TMQ_NORM_PRIV], 0); in _mutex_fork()
454 queue_fork(curthread, &curthread->mq[TMQ_NORM_PP], in _mutex_fork()
455 &curthread->mq[TMQ_NORM_PP_PRIV], UMUTEX_CONTESTED); in _mutex_fork()
456 queue_fork(curthread, &curthread->mq[TMQ_ROBUST_PP], in _mutex_fork()
457 &curthread->mq[TMQ_ROBUST_PP_PRIV], UMUTEX_CONTESTED); in _mutex_fork()
458 curthread->robust_list = 0; in _mutex_fork()
464 pthread_mutex_t m, m1; in _thr_mutex_destroy() local
467 m = *mutex; in _thr_mutex_destroy()
468 if (m < THR_MUTEX_DESTROYED) { in _thr_mutex_destroy()
470 } else if (m == THR_MUTEX_DESTROYED) { in _thr_mutex_destroy()
473 if (m == THR_PSHARED_PTR) { in _thr_mutex_destroy()
476 if ((uint32_t)m1->m_lock.m_owner != in _thr_mutex_destroy()
486 if (PMUTEX_OWNER_ID(m) != 0 && in _thr_mutex_destroy()
487 (uint32_t)m->m_lock.m_owner != UMUTEX_RB_NOTRECOV) { in _thr_mutex_destroy()
491 mutex_assert_not_owned(_get_curthread(), m); in _thr_mutex_destroy()
492 __thr_free(m); in _thr_mutex_destroy()
501 mutex_qidx(struct pthread_mutex *m) in mutex_qidx() argument
504 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) in mutex_qidx()
506 return (is_robust_mutex(m) ? TMQ_ROBUST_PP : TMQ_NORM_PP); in mutex_qidx()
511 * thread-private linkage of the locked mutexes and on the robust
522 enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m, in enqueue_mutex() argument
531 mutex_assert_not_owned(curthread, m); in enqueue_mutex()
532 qidx = mutex_qidx(m); in enqueue_mutex()
533 TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe); in enqueue_mutex()
534 if (!is_pshared_mutex(m)) in enqueue_mutex()
535 TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe); in enqueue_mutex()
536 if (is_robust_mutex(m)) { in enqueue_mutex()
537 rl = is_pshared_mutex(m) ? &curthread->robust_list : in enqueue_mutex()
538 &curthread->priv_robust_list; in enqueue_mutex()
539 m->m_rb_prev = NULL; in enqueue_mutex()
543 m->m_lock.m_rb_lnk = (uintptr_t)&m1->m_lock; in enqueue_mutex()
544 m1->m_rb_prev = m; in enqueue_mutex()
547 m->m_lock.m_rb_lnk = 0; in enqueue_mutex()
549 *rl = (uintptr_t)&m->m_lock; in enqueue_mutex()
554 dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m) in dequeue_mutex() argument
559 mutex_assert_is_owned(m); in dequeue_mutex()
560 qidx = mutex_qidx(m); in dequeue_mutex()
561 if (is_robust_mutex(m)) { in dequeue_mutex()
562 mp = m->m_rb_prev; in dequeue_mutex()
564 if (is_pshared_mutex(m)) { in dequeue_mutex()
565 curthread->robust_list = m->m_lock.m_rb_lnk; in dequeue_mutex()
567 curthread->priv_robust_list = in dequeue_mutex()
568 m->m_lock.m_rb_lnk; in dequeue_mutex()
571 mp->m_lock.m_rb_lnk = m->m_lock.m_rb_lnk; in dequeue_mutex()
573 if (m->m_lock.m_rb_lnk != 0) { in dequeue_mutex()
574 mn = __containerof((void *)m->m_lock.m_rb_lnk, in dequeue_mutex()
576 mn->m_rb_prev = m->m_rb_prev; in dequeue_mutex()
578 m->m_lock.m_rb_lnk = 0; in dequeue_mutex()
579 m->m_rb_prev = NULL; in dequeue_mutex()
581 TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe); in dequeue_mutex()
582 if (!is_pshared_mutex(m)) in dequeue_mutex()
583 TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe); in dequeue_mutex()
584 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0) in dequeue_mutex()
585 set_inherited_priority(curthread, m); in dequeue_mutex()
586 mutex_init_link(m); in dequeue_mutex()
590 check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m) in check_and_init_mutex() argument
594 *m = *mutex; in check_and_init_mutex()
596 if (__predict_false(*m == THR_PSHARED_PTR)) { in check_and_init_mutex()
597 *m = __thr_pshared_offpage(mutex, 0); in check_and_init_mutex()
598 if (*m == NULL) in check_and_init_mutex()
601 shared_mutex_init(*m, NULL); in check_and_init_mutex()
602 } else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) { in check_and_init_mutex()
603 if (*m == THR_MUTEX_DESTROYED) { in check_and_init_mutex()
608 *m = *mutex; in check_and_init_mutex()
618 struct pthread_mutex *m; in __Tthr_mutex_trylock() local
623 ret = check_and_init_mutex(mutex, &m); in __Tthr_mutex_trylock()
628 if (m->m_flags & PMUTEX_FLAG_PRIVATE) in __Tthr_mutex_trylock()
630 robust = _mutex_enter_robust(curthread, m); in __Tthr_mutex_trylock()
631 ret = _thr_umutex_trylock(&m->m_lock, id); in __Tthr_mutex_trylock()
633 enqueue_mutex(curthread, m, ret); in __Tthr_mutex_trylock()
635 m->m_lock.m_flags |= UMUTEX_NONCONSISTENT; in __Tthr_mutex_trylock()
636 } else if (PMUTEX_OWNER_ID(m) == id) { in __Tthr_mutex_trylock()
637 ret = mutex_self_trylock(m); in __Tthr_mutex_trylock()
640 _mutex_leave_robust(curthread, m); in __Tthr_mutex_trylock()
642 (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0) in __Tthr_mutex_trylock()
648 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, in mutex_lock_sleep() argument
655 if (PMUTEX_OWNER_ID(m) == id) in mutex_lock_sleep()
656 return (mutex_self_lock(m, abstime)); in mutex_lock_sleep()
664 if (__predict_false((m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | in mutex_lock_sleep()
671 count = m->m_spinloops; in mutex_lock_sleep()
672 while (count--) { in mutex_lock_sleep()
673 owner = m->m_lock.m_owner; in mutex_lock_sleep()
675 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, in mutex_lock_sleep()
685 count = m->m_yieldloops; in mutex_lock_sleep()
686 while (count--) { in mutex_lock_sleep()
688 owner = m->m_lock.m_owner; in mutex_lock_sleep()
690 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, in mutex_lock_sleep()
700 ret = __thr_umutex_lock(&m->m_lock, id); in mutex_lock_sleep()
701 else if (__predict_false(abstime->tv_nsec < 0 || in mutex_lock_sleep()
702 abstime->tv_nsec >= 1000000000)) in mutex_lock_sleep()
705 ret = __thr_umutex_timedlock(&m->m_lock, id, abstime); in mutex_lock_sleep()
708 enqueue_mutex(curthread, m, ret); in mutex_lock_sleep()
710 m->m_lock.m_flags |= UMUTEX_NONCONSISTENT; in mutex_lock_sleep()
716 mutex_lock_common(struct pthread_mutex *m, const struct timespec *abstime, in mutex_lock_common() argument
724 if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE) in mutex_lock_common()
727 robust = _mutex_enter_robust(curthread, m); in mutex_lock_common()
728 ret = _thr_umutex_trylock2(&m->m_lock, TID(curthread)); in mutex_lock_common()
730 enqueue_mutex(curthread, m, ret); in mutex_lock_common()
732 m->m_lock.m_flags |= UMUTEX_NONCONSISTENT; in mutex_lock_common()
734 ret = mutex_lock_sleep(curthread, m, abstime); in mutex_lock_common()
737 _mutex_leave_robust(curthread, m); in mutex_lock_common()
739 (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0 && !cvattach) in mutex_lock_common()
747 struct pthread_mutex *m; in __Tthr_mutex_lock() local
751 ret = check_and_init_mutex(mutex, &m); in __Tthr_mutex_lock()
753 ret = mutex_lock_common(m, NULL, false, false); in __Tthr_mutex_lock()
761 struct pthread_mutex *m; in __pthread_mutex_timedlock() local
765 ret = check_and_init_mutex(mutex, &m); in __pthread_mutex_timedlock()
767 ret = mutex_lock_common(m, abstime, false, false); in __pthread_mutex_timedlock()
788 _mutex_cv_lock(struct pthread_mutex *m, int count, bool rb_onlist) in _mutex_cv_lock() argument
792 error = mutex_lock_common(m, NULL, true, rb_onlist); in _mutex_cv_lock()
794 m->m_count = count; in _mutex_cv_lock()
799 _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer) in _mutex_cv_unlock() argument
805 *count = m->m_count; in _mutex_cv_unlock()
806 m->m_count = 0; in _mutex_cv_unlock()
807 (void)mutex_unlock_common(m, true, defer); in _mutex_cv_unlock()
812 _mutex_cv_attach(struct pthread_mutex *m, int count) in _mutex_cv_attach() argument
817 enqueue_mutex(curthread, m, 0); in _mutex_cv_attach()
818 m->m_count = count; in _mutex_cv_attach()
835 *recurse = mp->m_count; in _mutex_cv_detach()
836 mp->m_count = 0; in _mutex_cv_detach()
839 /* Will this happen in real-world ? */ in _mutex_cv_detach()
840 if ((mp->m_flags & PMUTEX_FLAG_DEFERRED) != 0) { in _mutex_cv_detach()
842 mp->m_flags &= ~PMUTEX_FLAG_DEFERRED; in _mutex_cv_detach()
847 _thr_wake_all(curthread->defer_waiters, in _mutex_cv_detach()
848 curthread->nwaiter_defer); in _mutex_cv_detach()
849 curthread->nwaiter_defer = 0; in _mutex_cv_detach()
855 mutex_self_trylock(struct pthread_mutex *m) in mutex_self_trylock() argument
859 switch (PMUTEX_TYPE(m->m_flags)) { in mutex_self_trylock()
868 if (m->m_count + 1 > 0) { in mutex_self_trylock()
869 m->m_count++; in mutex_self_trylock()
884 mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime) in mutex_self_lock() argument
889 switch (PMUTEX_TYPE(m->m_flags)) { in mutex_self_lock()
893 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || in mutex_self_lock()
894 abstime->tv_nsec >= 1000000000) { in mutex_self_lock()
918 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || in mutex_self_lock()
919 abstime->tv_nsec >= 1000000000) { in mutex_self_lock()
937 if (m->m_count + 1 > 0) { in mutex_self_lock()
938 m->m_count++; in mutex_self_lock()
953 mutex_unlock_common(struct pthread_mutex *m, bool cv, int *mtx_defer) in mutex_unlock_common() argument
959 if (__predict_false(m <= THR_MUTEX_DESTROYED)) { in mutex_unlock_common()
960 if (m == THR_MUTEX_DESTROYED) in mutex_unlock_common()
971 if (__predict_false(PMUTEX_OWNER_ID(m) != id)) in mutex_unlock_common()
975 private = (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0; in mutex_unlock_common()
976 if (__predict_false(PMUTEX_TYPE(m->m_flags) == in mutex_unlock_common()
977 PTHREAD_MUTEX_RECURSIVE && m->m_count > 0)) { in mutex_unlock_common()
978 m->m_count--; in mutex_unlock_common()
980 if ((m->m_flags & PMUTEX_FLAG_DEFERRED) != 0) { in mutex_unlock_common()
982 m->m_flags &= ~PMUTEX_FLAG_DEFERRED; in mutex_unlock_common()
986 robust = _mutex_enter_robust(curthread, m); in mutex_unlock_common()
987 dequeue_mutex(curthread, m); in mutex_unlock_common()
988 error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer); in mutex_unlock_common()
991 _thr_wake_all(curthread->defer_waiters, in mutex_unlock_common()
992 curthread->nwaiter_defer); in mutex_unlock_common()
993 curthread->nwaiter_defer = 0; in mutex_unlock_common()
998 _mutex_leave_robust(curthread, m); in mutex_unlock_common()
1009 struct pthread_mutex *m; in _pthread_mutex_getprioceiling() local
1012 m = __thr_pshared_offpage(__DECONST(void *, mutex), 0); in _pthread_mutex_getprioceiling()
1013 if (m == NULL) in _pthread_mutex_getprioceiling()
1015 shared_mutex_init(m, NULL); in _pthread_mutex_getprioceiling()
1017 m = *mutex; in _pthread_mutex_getprioceiling()
1018 if (m <= THR_MUTEX_DESTROYED) in _pthread_mutex_getprioceiling()
1021 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) in _pthread_mutex_getprioceiling()
1023 *prioceiling = m->m_lock.m_ceilings[0]; in _pthread_mutex_getprioceiling()
1032 struct pthread_mutex *m, *m1, *m2; in _pthread_mutex_setprioceiling() local
1037 m = __thr_pshared_offpage(mutex, 0); in _pthread_mutex_setprioceiling()
1038 if (m == NULL) in _pthread_mutex_setprioceiling()
1040 shared_mutex_init(m, NULL); in _pthread_mutex_setprioceiling()
1042 m = *mutex; in _pthread_mutex_setprioceiling()
1043 if (m <= THR_MUTEX_DESTROYED) in _pthread_mutex_setprioceiling()
1046 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) in _pthread_mutex_setprioceiling()
1049 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); in _pthread_mutex_setprioceiling()
1054 if (PMUTEX_OWNER_ID(m) == TID(curthread)) { in _pthread_mutex_setprioceiling()
1055 mutex_assert_is_owned(m); in _pthread_mutex_setprioceiling()
1056 m1 = TAILQ_PREV(m, mutex_queue, m_qe); in _pthread_mutex_setprioceiling()
1057 m2 = TAILQ_NEXT(m, m_qe); in _pthread_mutex_setprioceiling()
1058 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || in _pthread_mutex_setprioceiling()
1059 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { in _pthread_mutex_setprioceiling()
1060 qidx = mutex_qidx(m); in _pthread_mutex_setprioceiling()
1061 q = &curthread->mq[qidx]; in _pthread_mutex_setprioceiling()
1062 qp = &curthread->mq[qidx + 1]; in _pthread_mutex_setprioceiling()
1063 TAILQ_REMOVE(q, m, m_qe); in _pthread_mutex_setprioceiling()
1064 if (!is_pshared_mutex(m)) in _pthread_mutex_setprioceiling()
1065 TAILQ_REMOVE(qp, m, m_pqe); in _pthread_mutex_setprioceiling()
1067 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) { in _pthread_mutex_setprioceiling()
1068 TAILQ_INSERT_BEFORE(m2, m, m_qe); in _pthread_mutex_setprioceiling()
1069 if (!is_pshared_mutex(m)) { in _pthread_mutex_setprioceiling()
1077 m, m_pqe); in _pthread_mutex_setprioceiling()
1080 m, m_pqe); in _pthread_mutex_setprioceiling()
1086 TAILQ_INSERT_TAIL(q, m, m_qe); in _pthread_mutex_setprioceiling()
1087 if (!is_pshared_mutex(m)) in _pthread_mutex_setprioceiling()
1088 TAILQ_INSERT_TAIL(qp, m, m_pqe); in _pthread_mutex_setprioceiling()
1097 struct pthread_mutex *m; in _pthread_mutex_getspinloops_np() local
1100 ret = check_and_init_mutex(mutex, &m); in _pthread_mutex_getspinloops_np()
1102 *count = m->m_spinloops; in _pthread_mutex_getspinloops_np()
1109 struct pthread_mutex *m; in __pthread_mutex_setspinloops_np() local
1112 ret = check_and_init_mutex(mutex, &m); in __pthread_mutex_setspinloops_np()
1114 m->m_spinloops = count; in __pthread_mutex_setspinloops_np()
1121 struct pthread_mutex *m; in _pthread_mutex_getyieldloops_np() local
1124 ret = check_and_init_mutex(mutex, &m); in _pthread_mutex_getyieldloops_np()
1126 *count = m->m_yieldloops; in _pthread_mutex_getyieldloops_np()
1133 struct pthread_mutex *m; in __pthread_mutex_setyieldloops_np() local
1136 ret = check_and_init_mutex(mutex, &m); in __pthread_mutex_setyieldloops_np()
1138 m->m_yieldloops = count; in __pthread_mutex_setyieldloops_np()
1145 struct pthread_mutex *m; in _pthread_mutex_isowned_np() local
1148 m = __thr_pshared_offpage(mutex, 0); in _pthread_mutex_isowned_np()
1149 if (m == NULL) in _pthread_mutex_isowned_np()
1151 shared_mutex_init(m, NULL); in _pthread_mutex_isowned_np()
1153 m = *mutex; in _pthread_mutex_isowned_np()
1154 if (m <= THR_MUTEX_DESTROYED) in _pthread_mutex_isowned_np()
1157 return (PMUTEX_OWNER_ID(m) == TID(_get_curthread())); in _pthread_mutex_isowned_np()
1177 struct pthread_mutex *m; in _Tthr_mutex_consistent() local
1181 m = __thr_pshared_offpage(mutex, 0); in _Tthr_mutex_consistent()
1182 if (m == NULL) in _Tthr_mutex_consistent()
1184 shared_mutex_init(m, NULL); in _Tthr_mutex_consistent()
1186 m = *mutex; in _Tthr_mutex_consistent()
1187 if (m <= THR_MUTEX_DESTROYED) in _Tthr_mutex_consistent()
1191 if ((m->m_lock.m_flags & (UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) != in _Tthr_mutex_consistent()
1194 if (PMUTEX_OWNER_ID(m) != TID(curthread)) in _Tthr_mutex_consistent()
1196 m->m_lock.m_flags &= ~UMUTEX_NONCONSISTENT; in _Tthr_mutex_consistent()