xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision 9718f18471f700b271eb898c764e02b7fcd3766f)
1df57947fSPedro F. Giffuni /*-
2df57947fSPedro F. Giffuni  * SPDX-License-Identifier: BSD-4-Clause
3df57947fSPedro F. Giffuni  *
4bb535300SJeff Roberson  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
59ad4b644SDavid Xu  * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
62a339d9eSKonstantin Belousov  * Copyright (c) 2015, 2016 The FreeBSD Foundation
71bdbd705SKonstantin Belousov  *
8bb535300SJeff Roberson  * All rights reserved.
9bb535300SJeff Roberson  *
101bdbd705SKonstantin Belousov  * Portions of this software were developed by Konstantin Belousov
111bdbd705SKonstantin Belousov  * under sponsorship from the FreeBSD Foundation.
121bdbd705SKonstantin Belousov  *
13bb535300SJeff Roberson  * Redistribution and use in source and binary forms, with or without
14bb535300SJeff Roberson  * modification, are permitted provided that the following conditions
15bb535300SJeff Roberson  * are met:
16bb535300SJeff Roberson  * 1. Redistributions of source code must retain the above copyright
17bb535300SJeff Roberson  *    notice, this list of conditions and the following disclaimer.
18bb535300SJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
19bb535300SJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
20bb535300SJeff Roberson  *    documentation and/or other materials provided with the distribution.
21bb535300SJeff Roberson  * 3. All advertising materials mentioning features or use of this software
22bb535300SJeff Roberson  *    must display the following acknowledgement:
23bb535300SJeff Roberson  *	This product includes software developed by John Birrell.
24bb535300SJeff Roberson  * 4. Neither the name of the author nor the names of any co-contributors
25bb535300SJeff Roberson  *    may be used to endorse or promote products derived from this software
26bb535300SJeff Roberson  *    without specific prior written permission.
27bb535300SJeff Roberson  *
28bb535300SJeff Roberson  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
29bb535300SJeff Roberson  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30bb535300SJeff Roberson  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31bb535300SJeff Roberson  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
32bb535300SJeff Roberson  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33bb535300SJeff Roberson  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34bb535300SJeff Roberson  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35bb535300SJeff Roberson  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36bb535300SJeff Roberson  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37bb535300SJeff Roberson  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38bb535300SJeff Roberson  * SUCH DAMAGE.
39bb535300SJeff Roberson  */
40a091d823SDavid Xu 
4137a6356bSDavid Xu #include "namespace.h"
42bb535300SJeff Roberson #include <stdlib.h>
43bb535300SJeff Roberson #include <errno.h>
44bb535300SJeff Roberson #include <string.h>
45bb535300SJeff Roberson #include <sys/param.h>
46bb535300SJeff Roberson #include <sys/queue.h>
47bb535300SJeff Roberson #include <pthread.h>
487e0e7824SRuslan Ermilov #include <pthread_np.h>
4937a6356bSDavid Xu #include "un-namespace.h"
5037a6356bSDavid Xu 
51bb535300SJeff Roberson #include "thr_private.h"
52bb535300SJeff Roberson 
53c7904405SAndrew Turner _Static_assert(sizeof(struct pthread_mutex) <= THR_PAGE_SIZE_MIN,
549e821f27SKonstantin Belousov     "pthread_mutex is too large for off-page");
559e821f27SKonstantin Belousov 
56bb535300SJeff Roberson /*
57e8ef3c28SDavid Xu  * For adaptive mutexes, how many times to spin doing trylock2
58e8ef3c28SDavid Xu  * before entering the kernel to block
59e8ef3c28SDavid Xu  */
60dd77f9f7SKris Kennaway #define MUTEX_ADAPTIVE_SPINS	2000
61e8ef3c28SDavid Xu 
62e8ef3c28SDavid Xu /*
63bb535300SJeff Roberson  * Prototypes
64bb535300SJeff Roberson  */
65b6413b6dSPedro F. Giffuni int	__pthread_mutex_timedlock(pthread_mutex_t * __restrict mutex,
66b6413b6dSPedro F. Giffuni 		const struct timespec * __restrict abstime);
67c5f41151SDavid Xu int	_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
68c5f41151SDavid Xu int	_pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
69c5f41151SDavid Xu int	__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
70c5f41151SDavid Xu int	_pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
71093fcf16SDavid Xu int	_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
72c5f41151SDavid Xu int	__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
7337a6356bSDavid Xu 
7437a6356bSDavid Xu static int	mutex_self_trylock(pthread_mutex_t);
7537a6356bSDavid Xu static int	mutex_self_lock(pthread_mutex_t,
76a091d823SDavid Xu 				const struct timespec *abstime);
772a339d9eSKonstantin Belousov static int	mutex_unlock_common(struct pthread_mutex *, bool, int *);
787de1ecefSDavid Xu static int	mutex_lock_sleep(struct pthread *, pthread_mutex_t,
797de1ecefSDavid Xu 				const struct timespec *);
802a339d9eSKonstantin Belousov static void	mutex_init_robust(struct pthread *curthread);
812a339d9eSKonstantin Belousov static int	mutex_qidx(struct pthread_mutex *m);
822a339d9eSKonstantin Belousov static bool	is_robust_mutex(struct pthread_mutex *m);
832a339d9eSKonstantin Belousov static bool	is_pshared_mutex(struct pthread_mutex *m);
84360a5194SJeff Roberson 
850ab1bfc7SKonstantin Belousov __weak_reference(__Tthr_mutex_init, pthread_mutex_init);
860ab1bfc7SKonstantin Belousov __weak_reference(__Tthr_mutex_init, __pthread_mutex_init);
870ab1bfc7SKonstantin Belousov __strong_reference(__Tthr_mutex_init, _pthread_mutex_init);
880ab1bfc7SKonstantin Belousov __weak_reference(__Tthr_mutex_lock, pthread_mutex_lock);
890ab1bfc7SKonstantin Belousov __weak_reference(__Tthr_mutex_lock, __pthread_mutex_lock);
900ab1bfc7SKonstantin Belousov __strong_reference(__Tthr_mutex_lock, _pthread_mutex_lock);
91a091d823SDavid Xu __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
92850f4d66SDavid Xu __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
930ab1bfc7SKonstantin Belousov __weak_reference(__Tthr_mutex_trylock, pthread_mutex_trylock);
940ab1bfc7SKonstantin Belousov __weak_reference(__Tthr_mutex_trylock, __pthread_mutex_trylock);
950ab1bfc7SKonstantin Belousov __strong_reference(__Tthr_mutex_trylock, _pthread_mutex_trylock);
960ab1bfc7SKonstantin Belousov __weak_reference(_Tthr_mutex_consistent, pthread_mutex_consistent);
970ab1bfc7SKonstantin Belousov __weak_reference(_Tthr_mutex_consistent, _pthread_mutex_consistent);
980ab1bfc7SKonstantin Belousov __strong_reference(_Tthr_mutex_consistent, __pthread_mutex_consistent);
99bb535300SJeff Roberson 
100bb535300SJeff Roberson /* Single underscore versions provided for libc internal usage: */
101bb535300SJeff Roberson /* No difference between libc and application usage of these: */
1020ab1bfc7SKonstantin Belousov __weak_reference(_thr_mutex_destroy, pthread_mutex_destroy);
1030ab1bfc7SKonstantin Belousov __weak_reference(_thr_mutex_destroy, _pthread_mutex_destroy);
1040ab1bfc7SKonstantin Belousov __weak_reference(_thr_mutex_unlock, pthread_mutex_unlock);
1050ab1bfc7SKonstantin Belousov __weak_reference(_thr_mutex_unlock, _pthread_mutex_unlock);
106bb535300SJeff Roberson 
1079ad4b644SDavid Xu __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
1089ad4b644SDavid Xu __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
1099ad4b644SDavid Xu 
110093fcf16SDavid Xu __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
111850f4d66SDavid Xu __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
112093fcf16SDavid Xu __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
113093fcf16SDavid Xu 
114093fcf16SDavid Xu __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
115850f4d66SDavid Xu __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
116093fcf16SDavid Xu __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
1171cbdac26SDag-Erling Smørgrav __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
118093fcf16SDavid Xu 
1191bdbd705SKonstantin Belousov static void
mutex_init_link(struct pthread_mutex * m __unused)120642cd511SGreg Becker mutex_init_link(struct pthread_mutex *m __unused)
121bb535300SJeff Roberson {
122bb535300SJeff Roberson 
1231bdbd705SKonstantin Belousov #if defined(_PTHREADS_INVARIANTS)
1241bdbd705SKonstantin Belousov 	m->m_qe.tqe_prev = NULL;
1251bdbd705SKonstantin Belousov 	m->m_qe.tqe_next = NULL;
1261bdbd705SKonstantin Belousov 	m->m_pqe.tqe_prev = NULL;
1271bdbd705SKonstantin Belousov 	m->m_pqe.tqe_next = NULL;
1281bdbd705SKonstantin Belousov #endif
1291bdbd705SKonstantin Belousov }
1301bdbd705SKonstantin Belousov 
1311bdbd705SKonstantin Belousov static void
mutex_assert_is_owned(struct pthread_mutex * m __unused)1322a339d9eSKonstantin Belousov mutex_assert_is_owned(struct pthread_mutex *m __unused)
1331bdbd705SKonstantin Belousov {
1341bdbd705SKonstantin Belousov 
1351bdbd705SKonstantin Belousov #if defined(_PTHREADS_INVARIANTS)
136c72ef5eaSConrad Meyer 	if (__predict_false(m->m_qe.tqe_prev == NULL))
137c72ef5eaSConrad Meyer 		PANIC("mutex %p own %#x is not on list %p %p",
1382a339d9eSKonstantin Belousov 		    m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next);
1391bdbd705SKonstantin Belousov #endif
1401bdbd705SKonstantin Belousov }
1411bdbd705SKonstantin Belousov 
1421bdbd705SKonstantin Belousov static void
mutex_assert_not_owned(struct pthread * curthread __unused,struct pthread_mutex * m __unused)1432a339d9eSKonstantin Belousov mutex_assert_not_owned(struct pthread *curthread __unused,
1442a339d9eSKonstantin Belousov     struct pthread_mutex *m __unused)
1451bdbd705SKonstantin Belousov {
1461bdbd705SKonstantin Belousov 
1471bdbd705SKonstantin Belousov #if defined(_PTHREADS_INVARIANTS)
1481bdbd705SKonstantin Belousov 	if (__predict_false(m->m_qe.tqe_prev != NULL ||
149c72ef5eaSConrad Meyer 	    m->m_qe.tqe_next != NULL))
150c72ef5eaSConrad Meyer 		PANIC("mutex %p own %#x is on list %p %p",
1512a339d9eSKonstantin Belousov 		    m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next);
1522a339d9eSKonstantin Belousov 	if (__predict_false(is_robust_mutex(m) &&
1532a339d9eSKonstantin Belousov 	    (m->m_lock.m_rb_lnk != 0 || m->m_rb_prev != NULL ||
1542a339d9eSKonstantin Belousov 	    (is_pshared_mutex(m) && curthread->robust_list ==
1552a339d9eSKonstantin Belousov 	    (uintptr_t)&m->m_lock) ||
1562a339d9eSKonstantin Belousov 	    (!is_pshared_mutex(m) && curthread->priv_robust_list ==
157c72ef5eaSConrad Meyer 	    (uintptr_t)&m->m_lock))))
158c72ef5eaSConrad Meyer 		PANIC(
1592a339d9eSKonstantin Belousov     "mutex %p own %#x is on robust linkage %p %p head %p phead %p",
1602a339d9eSKonstantin Belousov 		    m, m->m_lock.m_owner, (void *)m->m_lock.m_rb_lnk,
1612a339d9eSKonstantin Belousov 		    m->m_rb_prev, (void *)curthread->robust_list,
1622a339d9eSKonstantin Belousov 		    (void *)curthread->priv_robust_list);
1631bdbd705SKonstantin Belousov #endif
1641bdbd705SKonstantin Belousov }
1651bdbd705SKonstantin Belousov 
1662a339d9eSKonstantin Belousov static bool
is_pshared_mutex(struct pthread_mutex * m)1671bdbd705SKonstantin Belousov is_pshared_mutex(struct pthread_mutex *m)
1681bdbd705SKonstantin Belousov {
1691bdbd705SKonstantin Belousov 
1701bdbd705SKonstantin Belousov 	return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0);
1711bdbd705SKonstantin Belousov }
1721bdbd705SKonstantin Belousov 
1732a339d9eSKonstantin Belousov static bool
is_robust_mutex(struct pthread_mutex * m)1742a339d9eSKonstantin Belousov is_robust_mutex(struct pthread_mutex *m)
1752a339d9eSKonstantin Belousov {
1762a339d9eSKonstantin Belousov 
1772a339d9eSKonstantin Belousov 	return ((m->m_lock.m_flags & UMUTEX_ROBUST) != 0);
1782a339d9eSKonstantin Belousov }
1792a339d9eSKonstantin Belousov 
1802a339d9eSKonstantin Belousov int
_mutex_enter_robust(struct pthread * curthread,struct pthread_mutex * m)1812a339d9eSKonstantin Belousov _mutex_enter_robust(struct pthread *curthread, struct pthread_mutex *m)
1822a339d9eSKonstantin Belousov {
1832a339d9eSKonstantin Belousov 
1842a339d9eSKonstantin Belousov #if defined(_PTHREADS_INVARIANTS)
1852a339d9eSKonstantin Belousov 	if (__predict_false(curthread->inact_mtx != 0))
1862a339d9eSKonstantin Belousov 		PANIC("inact_mtx enter");
1872a339d9eSKonstantin Belousov #endif
1882a339d9eSKonstantin Belousov 	if (!is_robust_mutex(m))
1892a339d9eSKonstantin Belousov 		return (0);
1902a339d9eSKonstantin Belousov 
1912a339d9eSKonstantin Belousov 	mutex_init_robust(curthread);
1922a339d9eSKonstantin Belousov 	curthread->inact_mtx = (uintptr_t)&m->m_lock;
1932a339d9eSKonstantin Belousov 	return (1);
1942a339d9eSKonstantin Belousov }
1952a339d9eSKonstantin Belousov 
1962a339d9eSKonstantin Belousov void
_mutex_leave_robust(struct pthread * curthread,struct pthread_mutex * m __unused)1972a339d9eSKonstantin Belousov _mutex_leave_robust(struct pthread *curthread, struct pthread_mutex *m __unused)
1982a339d9eSKonstantin Belousov {
1992a339d9eSKonstantin Belousov 
2002a339d9eSKonstantin Belousov #if defined(_PTHREADS_INVARIANTS)
2012a339d9eSKonstantin Belousov 	if (__predict_false(curthread->inact_mtx != (uintptr_t)&m->m_lock))
2022a339d9eSKonstantin Belousov 		PANIC("inact_mtx leave");
2032a339d9eSKonstantin Belousov #endif
2042a339d9eSKonstantin Belousov 	curthread->inact_mtx = 0;
2052a339d9eSKonstantin Belousov }
2062a339d9eSKonstantin Belousov 
2071bdbd705SKonstantin Belousov static int
mutex_check_attr(const struct pthread_mutex_attr * attr)2081bdbd705SKonstantin Belousov mutex_check_attr(const struct pthread_mutex_attr *attr)
2091bdbd705SKonstantin Belousov {
2101bdbd705SKonstantin Belousov 
2117b8797d3SDavid Xu 	if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
2127b8797d3SDavid Xu 	    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
2137b8797d3SDavid Xu 		return (EINVAL);
2147b8797d3SDavid Xu 	if (attr->m_protocol < PTHREAD_PRIO_NONE ||
2157b8797d3SDavid Xu 	    attr->m_protocol > PTHREAD_PRIO_PROTECT)
2167b8797d3SDavid Xu 		return (EINVAL);
2171bdbd705SKonstantin Belousov 	return (0);
218a091d823SDavid Xu }
2191bdbd705SKonstantin Belousov 
2201bdbd705SKonstantin Belousov static void
mutex_init_robust(struct pthread * curthread)2212a339d9eSKonstantin Belousov mutex_init_robust(struct pthread *curthread)
2222a339d9eSKonstantin Belousov {
2232a339d9eSKonstantin Belousov 	struct umtx_robust_lists_params rb;
2242a339d9eSKonstantin Belousov 
2252a339d9eSKonstantin Belousov 	if (curthread == NULL)
2262a339d9eSKonstantin Belousov 		curthread = _get_curthread();
2272a339d9eSKonstantin Belousov 	if (curthread->robust_inited)
2282a339d9eSKonstantin Belousov 		return;
2292a339d9eSKonstantin Belousov 	rb.robust_list_offset = (uintptr_t)&curthread->robust_list;
2302a339d9eSKonstantin Belousov 	rb.robust_priv_list_offset = (uintptr_t)&curthread->priv_robust_list;
2312a339d9eSKonstantin Belousov 	rb.robust_inact_offset = (uintptr_t)&curthread->inact_mtx;
2322a339d9eSKonstantin Belousov 	_umtx_op(NULL, UMTX_OP_ROBUST_LISTS, sizeof(rb), &rb, NULL);
2332a339d9eSKonstantin Belousov 	curthread->robust_inited = 1;
2342a339d9eSKonstantin Belousov }
2352a339d9eSKonstantin Belousov 
2362a339d9eSKonstantin Belousov static void
mutex_init_body(struct pthread_mutex * pmutex,const struct pthread_mutex_attr * attr)2371bdbd705SKonstantin Belousov mutex_init_body(struct pthread_mutex *pmutex,
2381bdbd705SKonstantin Belousov     const struct pthread_mutex_attr *attr)
2391bdbd705SKonstantin Belousov {
2407b8797d3SDavid Xu 
241d1078b0bSDavid Xu 	pmutex->m_flags = attr->m_type;
2427b8797d3SDavid Xu 	pmutex->m_count = 0;
243093fcf16SDavid Xu 	pmutex->m_spinloops = 0;
244093fcf16SDavid Xu 	pmutex->m_yieldloops = 0;
2451bdbd705SKonstantin Belousov 	mutex_init_link(pmutex);
2468ab9d78bSDavid Xu 	switch (attr->m_protocol) {
247bbb64c21SDavid Xu 	case PTHREAD_PRIO_NONE:
248bbb64c21SDavid Xu 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
249bbb64c21SDavid Xu 		pmutex->m_lock.m_flags = 0;
250bbb64c21SDavid Xu 		break;
2518ab9d78bSDavid Xu 	case PTHREAD_PRIO_INHERIT:
2528ab9d78bSDavid Xu 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
2538ab9d78bSDavid Xu 		pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
2548ab9d78bSDavid Xu 		break;
2558ab9d78bSDavid Xu 	case PTHREAD_PRIO_PROTECT:
2568ab9d78bSDavid Xu 		pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
2578ab9d78bSDavid Xu 		pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
2588ab9d78bSDavid Xu 		pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
2598ab9d78bSDavid Xu 		break;
2608ab9d78bSDavid Xu 	}
2611bdbd705SKonstantin Belousov 	if (attr->m_pshared == PTHREAD_PROCESS_SHARED)
2621bdbd705SKonstantin Belousov 		pmutex->m_lock.m_flags |= USYNC_PROCESS_SHARED;
2632a339d9eSKonstantin Belousov 	if (attr->m_robust == PTHREAD_MUTEX_ROBUST) {
2642a339d9eSKonstantin Belousov 		mutex_init_robust(NULL);
2652a339d9eSKonstantin Belousov 		pmutex->m_lock.m_flags |= UMUTEX_ROBUST;
2662a339d9eSKonstantin Belousov 	}
267d1078b0bSDavid Xu 	if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) {
268093fcf16SDavid Xu 		pmutex->m_spinloops =
269093fcf16SDavid Xu 		    _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
270093fcf16SDavid Xu 		pmutex->m_yieldloops = _thr_yieldloops;
271093fcf16SDavid Xu 	}
2721bdbd705SKonstantin Belousov }
273093fcf16SDavid Xu 
2741bdbd705SKonstantin Belousov static int
mutex_init(pthread_mutex_t * mutex,const struct pthread_mutex_attr * mutex_attr,void * (calloc_cb)(size_t,size_t))2751bdbd705SKonstantin Belousov mutex_init(pthread_mutex_t *mutex,
2761bdbd705SKonstantin Belousov     const struct pthread_mutex_attr *mutex_attr,
2771bdbd705SKonstantin Belousov     void *(calloc_cb)(size_t, size_t))
2781bdbd705SKonstantin Belousov {
2791bdbd705SKonstantin Belousov 	const struct pthread_mutex_attr *attr;
2801bdbd705SKonstantin Belousov 	struct pthread_mutex *pmutex;
2811bdbd705SKonstantin Belousov 	int error;
2821bdbd705SKonstantin Belousov 
2831bdbd705SKonstantin Belousov 	if (mutex_attr == NULL) {
2841bdbd705SKonstantin Belousov 		attr = &_pthread_mutexattr_default;
2851bdbd705SKonstantin Belousov 	} else {
2861bdbd705SKonstantin Belousov 		attr = mutex_attr;
2871bdbd705SKonstantin Belousov 		error = mutex_check_attr(attr);
2881bdbd705SKonstantin Belousov 		if (error != 0)
2891bdbd705SKonstantin Belousov 			return (error);
2901bdbd705SKonstantin Belousov 	}
2910a5c29a6SKonstantin Belousov 	if ((pmutex = (pthread_mutex_t)calloc_cb(1,
2920a5c29a6SKonstantin Belousov 	    sizeof(struct pthread_mutex))) == NULL)
2931bdbd705SKonstantin Belousov 		return (ENOMEM);
2941bdbd705SKonstantin Belousov 	mutex_init_body(pmutex, attr);
295a091d823SDavid Xu 	*mutex = pmutex;
2967b8797d3SDavid Xu 	return (0);
297a091d823SDavid Xu }
298a091d823SDavid Xu 
299a091d823SDavid Xu static int
init_static(struct pthread * thread,pthread_mutex_t * mutex)300a091d823SDavid Xu init_static(struct pthread *thread, pthread_mutex_t *mutex)
301a091d823SDavid Xu {
302a091d823SDavid Xu 	int ret;
303a091d823SDavid Xu 
304a091d823SDavid Xu 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
305a091d823SDavid Xu 
306bbb64c21SDavid Xu 	if (*mutex == THR_MUTEX_INITIALIZER)
307381c2d2eSKonstantin Belousov 		ret = mutex_init(mutex, &_pthread_mutexattr_default,
308381c2d2eSKonstantin Belousov 		    __thr_calloc);
309bbb64c21SDavid Xu 	else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
3101bdbd705SKonstantin Belousov 		ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default,
311381c2d2eSKonstantin Belousov 		    __thr_calloc);
312a091d823SDavid Xu 	else
313a091d823SDavid Xu 		ret = 0;
314a091d823SDavid Xu 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
315a091d823SDavid Xu 
316360a5194SJeff Roberson 	return (ret);
317bb535300SJeff Roberson }
318bb535300SJeff Roberson 
31958c7bab3SDavid Xu static void
set_inherited_priority(struct pthread * curthread,struct pthread_mutex * m)32058c7bab3SDavid Xu set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
32158c7bab3SDavid Xu {
32258c7bab3SDavid Xu 	struct pthread_mutex *m2;
32358c7bab3SDavid Xu 
3242a339d9eSKonstantin Belousov 	m2 = TAILQ_LAST(&curthread->mq[mutex_qidx(m)], mutex_queue);
32558c7bab3SDavid Xu 	if (m2 != NULL)
32658c7bab3SDavid Xu 		m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
32758c7bab3SDavid Xu 	else
32858c7bab3SDavid Xu 		m->m_lock.m_ceilings[1] = -1;
32958c7bab3SDavid Xu }
33058c7bab3SDavid Xu 
3316044c03aSKonstantin Belousov static void
shared_mutex_init(struct pthread_mutex * pmtx,const struct pthread_mutex_attr * mutex_attr)3326044c03aSKonstantin Belousov shared_mutex_init(struct pthread_mutex *pmtx, const struct
3336044c03aSKonstantin Belousov     pthread_mutex_attr *mutex_attr)
3346044c03aSKonstantin Belousov {
3356044c03aSKonstantin Belousov 	static const struct pthread_mutex_attr foobar_mutex_attr = {
3366044c03aSKonstantin Belousov 		.m_type = PTHREAD_MUTEX_DEFAULT,
3376044c03aSKonstantin Belousov 		.m_protocol = PTHREAD_PRIO_NONE,
3386044c03aSKonstantin Belousov 		.m_ceiling = 0,
3392a339d9eSKonstantin Belousov 		.m_pshared = PTHREAD_PROCESS_SHARED,
3402a339d9eSKonstantin Belousov 		.m_robust = PTHREAD_MUTEX_STALLED,
3416044c03aSKonstantin Belousov 	};
3426044c03aSKonstantin Belousov 	bool done;
3436044c03aSKonstantin Belousov 
3446044c03aSKonstantin Belousov 	/*
3456044c03aSKonstantin Belousov 	 * Hack to allow multiple pthread_mutex_init() calls on the
3466044c03aSKonstantin Belousov 	 * same process-shared mutex.  We rely on kernel allocating
3476044c03aSKonstantin Belousov 	 * zeroed offpage for the mutex, i.e. the
3486044c03aSKonstantin Belousov 	 * PMUTEX_INITSTAGE_ALLOC value must be zero.
3496044c03aSKonstantin Belousov 	 */
3506044c03aSKonstantin Belousov 	for (done = false; !done;) {
3516044c03aSKonstantin Belousov 		switch (pmtx->m_ps) {
3526044c03aSKonstantin Belousov 		case PMUTEX_INITSTAGE_DONE:
3536044c03aSKonstantin Belousov 			atomic_thread_fence_acq();
3546044c03aSKonstantin Belousov 			done = true;
3556044c03aSKonstantin Belousov 			break;
3566044c03aSKonstantin Belousov 		case PMUTEX_INITSTAGE_ALLOC:
3576044c03aSKonstantin Belousov 			if (atomic_cmpset_int(&pmtx->m_ps,
3586044c03aSKonstantin Belousov 			    PMUTEX_INITSTAGE_ALLOC, PMUTEX_INITSTAGE_BUSY)) {
3596044c03aSKonstantin Belousov 				if (mutex_attr == NULL)
3606044c03aSKonstantin Belousov 					mutex_attr = &foobar_mutex_attr;
3616044c03aSKonstantin Belousov 				mutex_init_body(pmtx, mutex_attr);
3626044c03aSKonstantin Belousov 				atomic_store_rel_int(&pmtx->m_ps,
3636044c03aSKonstantin Belousov 				    PMUTEX_INITSTAGE_DONE);
3646044c03aSKonstantin Belousov 				done = true;
3656044c03aSKonstantin Belousov 			}
3666044c03aSKonstantin Belousov 			break;
3676044c03aSKonstantin Belousov 		case PMUTEX_INITSTAGE_BUSY:
3686044c03aSKonstantin Belousov 			_pthread_yield();
3696044c03aSKonstantin Belousov 			break;
3706044c03aSKonstantin Belousov 		default:
3716044c03aSKonstantin Belousov 			PANIC("corrupted offpage");
3726044c03aSKonstantin Belousov 			break;
3736044c03aSKonstantin Belousov 		}
3746044c03aSKonstantin Belousov 	}
3756044c03aSKonstantin Belousov }
3766044c03aSKonstantin Belousov 
377bb535300SJeff Roberson int
__Tthr_mutex_init(pthread_mutex_t * __restrict mutex,const pthread_mutexattr_t * __restrict mutex_attr)3780ab1bfc7SKonstantin Belousov __Tthr_mutex_init(pthread_mutex_t * __restrict mutex,
379b6413b6dSPedro F. Giffuni     const pthread_mutexattr_t * __restrict mutex_attr)
380a091d823SDavid Xu {
3811bdbd705SKonstantin Belousov 	struct pthread_mutex *pmtx;
3821bdbd705SKonstantin Belousov 	int ret;
3831bdbd705SKonstantin Belousov 
384ec5fed75SKonstantin Belousov 	_thr_check_init();
385ec5fed75SKonstantin Belousov 
3861bdbd705SKonstantin Belousov 	if (mutex_attr != NULL) {
3871bdbd705SKonstantin Belousov 		ret = mutex_check_attr(*mutex_attr);
3881bdbd705SKonstantin Belousov 		if (ret != 0)
3891bdbd705SKonstantin Belousov 			return (ret);
3901bdbd705SKonstantin Belousov 	}
3911bdbd705SKonstantin Belousov 	if (mutex_attr == NULL ||
3921bdbd705SKonstantin Belousov 	    (*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) {
393e4314da2SKonstantin Belousov 		__thr_malloc_init();
3941bdbd705SKonstantin Belousov 		return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL,
395381c2d2eSKonstantin Belousov 		    __thr_calloc));
3961bdbd705SKonstantin Belousov 	}
397b6413b6dSPedro F. Giffuni 	pmtx = __thr_pshared_offpage(__DECONST(void *, mutex), 1);
3981bdbd705SKonstantin Belousov 	if (pmtx == NULL)
3991bdbd705SKonstantin Belousov 		return (EFAULT);
4001bdbd705SKonstantin Belousov 	*mutex = THR_PSHARED_PTR;
4016044c03aSKonstantin Belousov 	shared_mutex_init(pmtx, *mutex_attr);
4021bdbd705SKonstantin Belousov 	return (0);
403e1636e1fSJason Evans }
404e1636e1fSJason Evans 
405e1636e1fSJason Evans /* This function is used internally by malloc. */
406e1636e1fSJason Evans int
_pthread_mutex_init_calloc_cb(pthread_mutex_t * mutex,void * (calloc_cb)(size_t,size_t))407e1636e1fSJason Evans _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
408e1636e1fSJason Evans     void *(calloc_cb)(size_t, size_t))
409e1636e1fSJason Evans {
410e1636e1fSJason Evans 	static const struct pthread_mutex_attr attr = {
411b6b7fd3eSJason Evans 		.m_type = PTHREAD_MUTEX_NORMAL,
412e1636e1fSJason Evans 		.m_protocol = PTHREAD_PRIO_NONE,
4131bdbd705SKonstantin Belousov 		.m_ceiling = 0,
4141bdbd705SKonstantin Belousov 		.m_pshared = PTHREAD_PROCESS_PRIVATE,
4152a339d9eSKonstantin Belousov 		.m_robust = PTHREAD_MUTEX_STALLED,
416e1636e1fSJason Evans 	};
417ada33a6eSDavid Xu 	int ret;
418e1636e1fSJason Evans 
419bbb64c21SDavid Xu 	ret = mutex_init(mutex, &attr, calloc_cb);
420ada33a6eSDavid Xu 	if (ret == 0)
421d1078b0bSDavid Xu 		(*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE;
422ada33a6eSDavid Xu 	return (ret);
423a091d823SDavid Xu }
424a091d823SDavid Xu 
4251bdbd705SKonstantin Belousov /*
4261bdbd705SKonstantin Belousov  * Fix mutex ownership for child process.
4271bdbd705SKonstantin Belousov  *
4281bdbd705SKonstantin Belousov  * Process private mutex ownership is transmitted from the forking
4291bdbd705SKonstantin Belousov  * thread to the child process.
4301bdbd705SKonstantin Belousov  *
4311bdbd705SKonstantin Belousov  * Process shared mutex should not be inherited because owner is
4321bdbd705SKonstantin Belousov  * forking thread which is in parent process, they are removed from
4331bdbd705SKonstantin Belousov  * the owned mutex list.
4341bdbd705SKonstantin Belousov  */
4351bdbd705SKonstantin Belousov static void
queue_fork(struct pthread * curthread,struct mutex_queue * q,struct mutex_queue * qp,uint bit)4361bdbd705SKonstantin Belousov queue_fork(struct pthread *curthread, struct mutex_queue *q,
4371bdbd705SKonstantin Belousov     struct mutex_queue *qp, uint bit)
438a091d823SDavid Xu {
439a091d823SDavid Xu 	struct pthread_mutex *m;
440a091d823SDavid Xu 
4411bdbd705SKonstantin Belousov 	TAILQ_INIT(q);
4421bdbd705SKonstantin Belousov 	TAILQ_FOREACH(m, qp, m_pqe) {
4431bdbd705SKonstantin Belousov 		TAILQ_INSERT_TAIL(q, m, m_qe);
4441bdbd705SKonstantin Belousov 		m->m_lock.m_owner = TID(curthread) | bit;
4451bdbd705SKonstantin Belousov 	}
4461bdbd705SKonstantin Belousov }
4478ab9d78bSDavid Xu 
4481bdbd705SKonstantin Belousov void
_mutex_fork(struct pthread * curthread)4491bdbd705SKonstantin Belousov _mutex_fork(struct pthread *curthread)
4501bdbd705SKonstantin Belousov {
4511bdbd705SKonstantin Belousov 
4521bdbd705SKonstantin Belousov 	queue_fork(curthread, &curthread->mq[TMQ_NORM],
4531bdbd705SKonstantin Belousov 	    &curthread->mq[TMQ_NORM_PRIV], 0);
4541bdbd705SKonstantin Belousov 	queue_fork(curthread, &curthread->mq[TMQ_NORM_PP],
4551bdbd705SKonstantin Belousov 	    &curthread->mq[TMQ_NORM_PP_PRIV], UMUTEX_CONTESTED);
4562a339d9eSKonstantin Belousov 	queue_fork(curthread, &curthread->mq[TMQ_ROBUST_PP],
4572a339d9eSKonstantin Belousov 	    &curthread->mq[TMQ_ROBUST_PP_PRIV], UMUTEX_CONTESTED);
4582a339d9eSKonstantin Belousov 	curthread->robust_list = 0;
459a091d823SDavid Xu }
460a091d823SDavid Xu 
461bb535300SJeff Roberson int
_thr_mutex_destroy(pthread_mutex_t * mutex)4620ab1bfc7SKonstantin Belousov _thr_mutex_destroy(pthread_mutex_t *mutex)
463bb535300SJeff Roberson {
4641bdbd705SKonstantin Belousov 	pthread_mutex_t m, m1;
465e96b4de8SDavid Xu 	int ret;
466a091d823SDavid Xu 
467bbb64c21SDavid Xu 	m = *mutex;
468bbb64c21SDavid Xu 	if (m < THR_MUTEX_DESTROYED) {
469bbb64c21SDavid Xu 		ret = 0;
470bbb64c21SDavid Xu 	} else if (m == THR_MUTEX_DESTROYED) {
471a091d823SDavid Xu 		ret = EINVAL;
472bbb64c21SDavid Xu 	} else {
4731bdbd705SKonstantin Belousov 		if (m == THR_PSHARED_PTR) {
4741bdbd705SKonstantin Belousov 			m1 = __thr_pshared_offpage(mutex, 0);
4751bdbd705SKonstantin Belousov 			if (m1 != NULL) {
476f9bf9282SKonstantin Belousov 				if ((uint32_t)m1->m_lock.m_owner !=
477f9bf9282SKonstantin Belousov 				    UMUTEX_RB_OWNERDEAD) {
478f9bf9282SKonstantin Belousov 					mutex_assert_not_owned(
479f9bf9282SKonstantin Belousov 					    _get_curthread(), m1);
480f9bf9282SKonstantin Belousov 				}
4811bdbd705SKonstantin Belousov 				__thr_pshared_destroy(mutex);
4821bdbd705SKonstantin Belousov 			}
4831bdbd705SKonstantin Belousov 			*mutex = THR_MUTEX_DESTROYED;
4841bdbd705SKonstantin Belousov 			return (0);
4851bdbd705SKonstantin Belousov 		}
4862a339d9eSKonstantin Belousov 		if (PMUTEX_OWNER_ID(m) != 0 &&
4872a339d9eSKonstantin Belousov 		    (uint32_t)m->m_lock.m_owner != UMUTEX_RB_NOTRECOV) {
488a091d823SDavid Xu 			ret = EBUSY;
489a091d823SDavid Xu 		} else {
490bbb64c21SDavid Xu 			*mutex = THR_MUTEX_DESTROYED;
4912a339d9eSKonstantin Belousov 			mutex_assert_not_owned(_get_curthread(), m);
492381c2d2eSKonstantin Belousov 			__thr_free(m);
493e96b4de8SDavid Xu 			ret = 0;
494a091d823SDavid Xu 		}
495bb535300SJeff Roberson 	}
496bb535300SJeff Roberson 
497360a5194SJeff Roberson 	return (ret);
498bb535300SJeff Roberson }
499bb535300SJeff Roberson 
5001bdbd705SKonstantin Belousov static int
mutex_qidx(struct pthread_mutex * m)5011bdbd705SKonstantin Belousov mutex_qidx(struct pthread_mutex *m)
5021bdbd705SKonstantin Belousov {
5037416cdabSDavid Xu 
5041bdbd705SKonstantin Belousov 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
5051bdbd705SKonstantin Belousov 		return (TMQ_NORM);
5062a339d9eSKonstantin Belousov 	return (is_robust_mutex(m) ? TMQ_ROBUST_PP : TMQ_NORM_PP);
5071bdbd705SKonstantin Belousov }
508d1078b0bSDavid Xu 
5092a339d9eSKonstantin Belousov /*
5102a339d9eSKonstantin Belousov  * Both enqueue_mutex() and dequeue_mutex() operate on the
5112a339d9eSKonstantin Belousov  * thread-private linkage of the locked mutexes and on the robust
5122a339d9eSKonstantin Belousov  * linkage.
5132a339d9eSKonstantin Belousov  *
5142a339d9eSKonstantin Belousov  * Robust list, as seen by kernel, must be consistent even in the case
5152a339d9eSKonstantin Belousov  * of thread termination at arbitrary moment.  Since either enqueue or
5162a339d9eSKonstantin Belousov  * dequeue for list walked by kernel consists of rewriting a single
5172a339d9eSKonstantin Belousov  * forward pointer, it is safe.  On the other hand, rewrite of the
5182a339d9eSKonstantin Belousov  * back pointer is not atomic WRT the forward one, but kernel does not
5192a339d9eSKonstantin Belousov  * care.
5202a339d9eSKonstantin Belousov  */
5211bdbd705SKonstantin Belousov static void
enqueue_mutex(struct pthread * curthread,struct pthread_mutex * m,int error)5222a339d9eSKonstantin Belousov enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m,
5232a339d9eSKonstantin Belousov     int error)
5241bdbd705SKonstantin Belousov {
5252a339d9eSKonstantin Belousov 	struct pthread_mutex *m1;
5262a339d9eSKonstantin Belousov 	uintptr_t *rl;
5271bdbd705SKonstantin Belousov 	int qidx;
5281bdbd705SKonstantin Belousov 
5291bdbd705SKonstantin Belousov 	/* Add to the list of owned mutexes: */
5302a339d9eSKonstantin Belousov 	if (error != EOWNERDEAD)
5312a339d9eSKonstantin Belousov 		mutex_assert_not_owned(curthread, m);
5321bdbd705SKonstantin Belousov 	qidx = mutex_qidx(m);
5331bdbd705SKonstantin Belousov 	TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe);
5341bdbd705SKonstantin Belousov 	if (!is_pshared_mutex(m))
5351bdbd705SKonstantin Belousov 		TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe);
5362a339d9eSKonstantin Belousov 	if (is_robust_mutex(m)) {
5372a339d9eSKonstantin Belousov 		rl = is_pshared_mutex(m) ? &curthread->robust_list :
5382a339d9eSKonstantin Belousov 		    &curthread->priv_robust_list;
5392a339d9eSKonstantin Belousov 		m->m_rb_prev = NULL;
5402a339d9eSKonstantin Belousov 		if (*rl != 0) {
5412a339d9eSKonstantin Belousov 			m1 = __containerof((void *)*rl,
5422a339d9eSKonstantin Belousov 			    struct pthread_mutex, m_lock);
5432a339d9eSKonstantin Belousov 			m->m_lock.m_rb_lnk = (uintptr_t)&m1->m_lock;
5442a339d9eSKonstantin Belousov 			m1->m_rb_prev = m;
5452a339d9eSKonstantin Belousov 		} else {
5462a339d9eSKonstantin Belousov 			m1 = NULL;
5472a339d9eSKonstantin Belousov 			m->m_lock.m_rb_lnk = 0;
5482a339d9eSKonstantin Belousov 		}
5492a339d9eSKonstantin Belousov 		*rl = (uintptr_t)&m->m_lock;
5502a339d9eSKonstantin Belousov 	}
5511bdbd705SKonstantin Belousov }
5521bdbd705SKonstantin Belousov 
5531bdbd705SKonstantin Belousov static void
dequeue_mutex(struct pthread * curthread,struct pthread_mutex * m)5541bdbd705SKonstantin Belousov dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m)
5551bdbd705SKonstantin Belousov {
5562a339d9eSKonstantin Belousov 	struct pthread_mutex *mp, *mn;
5571bdbd705SKonstantin Belousov 	int qidx;
5581bdbd705SKonstantin Belousov 
5591bdbd705SKonstantin Belousov 	mutex_assert_is_owned(m);
5601bdbd705SKonstantin Belousov 	qidx = mutex_qidx(m);
5612a339d9eSKonstantin Belousov 	if (is_robust_mutex(m)) {
5622a339d9eSKonstantin Belousov 		mp = m->m_rb_prev;
5632a339d9eSKonstantin Belousov 		if (mp == NULL) {
5642a339d9eSKonstantin Belousov 			if (is_pshared_mutex(m)) {
5652a339d9eSKonstantin Belousov 				curthread->robust_list = m->m_lock.m_rb_lnk;
5662a339d9eSKonstantin Belousov 			} else {
5672a339d9eSKonstantin Belousov 				curthread->priv_robust_list =
5682a339d9eSKonstantin Belousov 				    m->m_lock.m_rb_lnk;
5692a339d9eSKonstantin Belousov 			}
5702a339d9eSKonstantin Belousov 		} else {
5712a339d9eSKonstantin Belousov 			mp->m_lock.m_rb_lnk = m->m_lock.m_rb_lnk;
5722a339d9eSKonstantin Belousov 		}
5732a339d9eSKonstantin Belousov 		if (m->m_lock.m_rb_lnk != 0) {
5742a339d9eSKonstantin Belousov 			mn = __containerof((void *)m->m_lock.m_rb_lnk,
5752a339d9eSKonstantin Belousov 			    struct pthread_mutex, m_lock);
5762a339d9eSKonstantin Belousov 			mn->m_rb_prev = m->m_rb_prev;
5772a339d9eSKonstantin Belousov 		}
5782a339d9eSKonstantin Belousov 		m->m_lock.m_rb_lnk = 0;
5792a339d9eSKonstantin Belousov 		m->m_rb_prev = NULL;
5802a339d9eSKonstantin Belousov 	}
5811bdbd705SKonstantin Belousov 	TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe);
5821bdbd705SKonstantin Belousov 	if (!is_pshared_mutex(m))
5831bdbd705SKonstantin Belousov 		TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe);
5841bdbd705SKonstantin Belousov 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0)
5851bdbd705SKonstantin Belousov 		set_inherited_priority(curthread, m);
5861bdbd705SKonstantin Belousov 	mutex_init_link(m);
587bbb64c21SDavid Xu }
588bbb64c21SDavid Xu 
589bb535300SJeff Roberson static int
check_and_init_mutex(pthread_mutex_t * mutex,struct pthread_mutex ** m)5901bdbd705SKonstantin Belousov check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m)
591bb535300SJeff Roberson {
5929ad4b644SDavid Xu 	int ret;
593c984b5a7SMike Makonnen 
5941bdbd705SKonstantin Belousov 	*m = *mutex;
5951bdbd705SKonstantin Belousov 	ret = 0;
596b370ef15SGreg Becker 	if (__predict_false(*m == THR_PSHARED_PTR)) {
5971bdbd705SKonstantin Belousov 		*m = __thr_pshared_offpage(mutex, 0);
5981bdbd705SKonstantin Belousov 		if (*m == NULL)
5991bdbd705SKonstantin Belousov 			ret = EINVAL;
600fe60c146SKonstantin Belousov 		else
6016044c03aSKonstantin Belousov 			shared_mutex_init(*m, NULL);
6021bdbd705SKonstantin Belousov 	} else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) {
6031bdbd705SKonstantin Belousov 		if (*m == THR_MUTEX_DESTROYED) {
6041bdbd705SKonstantin Belousov 			ret = EINVAL;
6051bdbd705SKonstantin Belousov 		} else {
6061bdbd705SKonstantin Belousov 			ret = init_static(_get_curthread(), mutex);
6071bdbd705SKonstantin Belousov 			if (ret == 0)
6081bdbd705SKonstantin Belousov 				*m = *mutex;
6091bdbd705SKonstantin Belousov 		}
6101bdbd705SKonstantin Belousov 	}
611a091d823SDavid Xu 	return (ret);
612a091d823SDavid Xu }
613a091d823SDavid Xu 
614bb535300SJeff Roberson int
__Tthr_mutex_trylock(pthread_mutex_t * mutex)6150ab1bfc7SKonstantin Belousov __Tthr_mutex_trylock(pthread_mutex_t *mutex)
616bb535300SJeff Roberson {
6171bdbd705SKonstantin Belousov 	struct pthread *curthread;
618bbb64c21SDavid Xu 	struct pthread_mutex *m;
6191bdbd705SKonstantin Belousov 	uint32_t id;
6202a339d9eSKonstantin Belousov 	int ret, robust;
621bb535300SJeff Roberson 
622*9718f184SKonstantin Belousov 	_thr_check_init();
6231bdbd705SKonstantin Belousov 	ret = check_and_init_mutex(mutex, &m);
6241bdbd705SKonstantin Belousov 	if (ret != 0)
6251bdbd705SKonstantin Belousov 		return (ret);
6261bdbd705SKonstantin Belousov 	curthread = _get_curthread();
6271bdbd705SKonstantin Belousov 	id = TID(curthread);
6281bdbd705SKonstantin Belousov 	if (m->m_flags & PMUTEX_FLAG_PRIVATE)
6291bdbd705SKonstantin Belousov 		THR_CRITICAL_ENTER(curthread);
6302a339d9eSKonstantin Belousov 	robust = _mutex_enter_robust(curthread, m);
6311bdbd705SKonstantin Belousov 	ret = _thr_umutex_trylock(&m->m_lock, id);
6322a339d9eSKonstantin Belousov 	if (__predict_true(ret == 0) || ret == EOWNERDEAD) {
6332a339d9eSKonstantin Belousov 		enqueue_mutex(curthread, m, ret);
6342a339d9eSKonstantin Belousov 		if (ret == EOWNERDEAD)
6352a339d9eSKonstantin Belousov 			m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
6362a339d9eSKonstantin Belousov 	} else if (PMUTEX_OWNER_ID(m) == id) {
6371bdbd705SKonstantin Belousov 		ret = mutex_self_trylock(m);
6381bdbd705SKonstantin Belousov 	} /* else {} */
6392a339d9eSKonstantin Belousov 	if (robust)
6402a339d9eSKonstantin Belousov 		_mutex_leave_robust(curthread, m);
641c45e7190SJason Evans 	if (ret != 0 && ret != EOWNERDEAD &&
6422a339d9eSKonstantin Belousov 	    (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0)
6431bdbd705SKonstantin Belousov 		THR_CRITICAL_LEAVE(curthread);
6441bdbd705SKonstantin Belousov 	return (ret);
645a9794459SDavid Xu }
646bb535300SJeff Roberson 
647a091d823SDavid Xu static int
mutex_lock_sleep(struct pthread * curthread,struct pthread_mutex * m,const struct timespec * abstime)6487de1ecefSDavid Xu mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
649a091d823SDavid Xu     const struct timespec *abstime)
650a091d823SDavid Xu {
6517de1ecefSDavid Xu 	uint32_t id, owner;
6522a339d9eSKonstantin Belousov 	int count, ret;
653a091d823SDavid Xu 
6547de1ecefSDavid Xu 	id = TID(curthread);
6552a339d9eSKonstantin Belousov 	if (PMUTEX_OWNER_ID(m) == id)
6561bdbd705SKonstantin Belousov 		return (mutex_self_lock(m, abstime));
6571bdbd705SKonstantin Belousov 
658e8ef3c28SDavid Xu 	/*
659e8ef3c28SDavid Xu 	 * For adaptive mutexes, spin for a bit in the expectation
660e8ef3c28SDavid Xu 	 * that if the application requests this mutex type then
661e8ef3c28SDavid Xu 	 * the lock is likely to be released quickly and it is
662e8ef3c28SDavid Xu 	 * faster than entering the kernel
663e8ef3c28SDavid Xu 	 */
6642a339d9eSKonstantin Belousov 	if (__predict_false((m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT |
6652a339d9eSKonstantin Belousov 	    UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) != 0))
66656b45d90SDavid Xu 		goto sleep_in_kernel;
66756b45d90SDavid Xu 
66855f18e07SDavid Xu 	if (!_thr_is_smp)
66955f18e07SDavid Xu 		goto yield_loop;
67055f18e07SDavid Xu 
671093fcf16SDavid Xu 	count = m->m_spinloops;
672e8ef3c28SDavid Xu 	while (count--) {
6737de1ecefSDavid Xu 		owner = m->m_lock.m_owner;
6747de1ecefSDavid Xu 		if ((owner & ~UMUTEX_CONTESTED) == 0) {
6752a339d9eSKonstantin Belousov 			if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner,
6762a339d9eSKonstantin Belousov 			    id | owner)) {
6777de1ecefSDavid Xu 				ret = 0;
678e8ef3c28SDavid Xu 				goto done;
679e8ef3c28SDavid Xu 			}
6807de1ecefSDavid Xu 		}
681e8ef3c28SDavid Xu 		CPU_SPINWAIT;
6827416cdabSDavid Xu 	}
6837416cdabSDavid Xu 
68455f18e07SDavid Xu yield_loop:
685093fcf16SDavid Xu 	count = m->m_yieldloops;
6867416cdabSDavid Xu 	while (count--) {
6877416cdabSDavid Xu 		_sched_yield();
6887de1ecefSDavid Xu 		owner = m->m_lock.m_owner;
6897de1ecefSDavid Xu 		if ((owner & ~UMUTEX_CONTESTED) == 0) {
6902a339d9eSKonstantin Belousov 			if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner,
6912a339d9eSKonstantin Belousov 			    id | owner)) {
6927de1ecefSDavid Xu 				ret = 0;
693e8ef3c28SDavid Xu 				goto done;
6947416cdabSDavid Xu 			}
6957de1ecefSDavid Xu 		}
6967de1ecefSDavid Xu 	}
6972017a7cdSKris Kennaway 
69856b45d90SDavid Xu sleep_in_kernel:
6992a339d9eSKonstantin Belousov 	if (abstime == NULL)
7007de1ecefSDavid Xu 		ret = __thr_umutex_lock(&m->m_lock, id);
7012a339d9eSKonstantin Belousov 	else if (__predict_false(abstime->tv_nsec < 0 ||
7022a339d9eSKonstantin Belousov 	    abstime->tv_nsec >= 1000000000))
703a9794459SDavid Xu 		ret = EINVAL;
7042a339d9eSKonstantin Belousov 	else
7057de1ecefSDavid Xu 		ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
706e8ef3c28SDavid Xu done:
7072a339d9eSKonstantin Belousov 	if (ret == 0 || ret == EOWNERDEAD) {
7082a339d9eSKonstantin Belousov 		enqueue_mutex(curthread, m, ret);
7092a339d9eSKonstantin Belousov 		if (ret == EOWNERDEAD)
7102a339d9eSKonstantin Belousov 			m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
7112a339d9eSKonstantin Belousov 	}
712850f4d66SDavid Xu 	return (ret);
713a091d823SDavid Xu }
714850f4d66SDavid Xu 
715b370ef15SGreg Becker static __always_inline int
mutex_lock_common(struct pthread_mutex * m,const struct timespec * abstime,bool cvattach,bool rb_onlist)7162a339d9eSKonstantin Belousov mutex_lock_common(struct pthread_mutex *m, const struct timespec *abstime,
7172a339d9eSKonstantin Belousov     bool cvattach, bool rb_onlist)
718850f4d66SDavid Xu {
7192a339d9eSKonstantin Belousov 	struct pthread *curthread;
7202a339d9eSKonstantin Belousov 	int ret, robust;
721850f4d66SDavid Xu 
7225a6d7b72SEric van Gyzen 	robust = 0;  /* pacify gcc */
7232a339d9eSKonstantin Belousov 	curthread  = _get_curthread();
724d1078b0bSDavid Xu 	if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
725ada33a6eSDavid Xu 		THR_CRITICAL_ENTER(curthread);
7262a339d9eSKonstantin Belousov 	if (!rb_onlist)
7272a339d9eSKonstantin Belousov 		robust = _mutex_enter_robust(curthread, m);
7282a339d9eSKonstantin Belousov 	ret = _thr_umutex_trylock2(&m->m_lock, TID(curthread));
729b370ef15SGreg Becker 	if (__predict_true(ret == 0) || ret == EOWNERDEAD) {
7302a339d9eSKonstantin Belousov 		enqueue_mutex(curthread, m, ret);
7312a339d9eSKonstantin Belousov 		if (ret == EOWNERDEAD)
7322a339d9eSKonstantin Belousov 			m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
733ada33a6eSDavid Xu 	} else {
734ada33a6eSDavid Xu 		ret = mutex_lock_sleep(curthread, m, abstime);
7357de1ecefSDavid Xu 	}
7362a339d9eSKonstantin Belousov 	if (!rb_onlist && robust)
7372a339d9eSKonstantin Belousov 		_mutex_leave_robust(curthread, m);
7382a339d9eSKonstantin Belousov 	if (ret != 0 && ret != EOWNERDEAD &&
7392a339d9eSKonstantin Belousov 	    (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0 && !cvattach)
740ada33a6eSDavid Xu 		THR_CRITICAL_LEAVE(curthread);
741ada33a6eSDavid Xu 	return (ret);
742a091d823SDavid Xu }
743a091d823SDavid Xu 
744a091d823SDavid Xu int
__Tthr_mutex_lock(pthread_mutex_t * mutex)7450ab1bfc7SKonstantin Belousov __Tthr_mutex_lock(pthread_mutex_t *mutex)
746a091d823SDavid Xu {
747850f4d66SDavid Xu 	struct pthread_mutex *m;
7481bdbd705SKonstantin Belousov 	int ret;
749a091d823SDavid Xu 
750a091d823SDavid Xu 	_thr_check_init();
7511bdbd705SKonstantin Belousov 	ret = check_and_init_mutex(mutex, &m);
7521bdbd705SKonstantin Belousov 	if (ret == 0)
7532a339d9eSKonstantin Belousov 		ret = mutex_lock_common(m, NULL, false, false);
7541bdbd705SKonstantin Belousov 	return (ret);
755a9794459SDavid Xu }
756a091d823SDavid Xu 
757a091d823SDavid Xu int
__pthread_mutex_timedlock(pthread_mutex_t * __restrict mutex,const struct timespec * __restrict abstime)758b6413b6dSPedro F. Giffuni __pthread_mutex_timedlock(pthread_mutex_t * __restrict mutex,
759b6413b6dSPedro F. Giffuni     const struct timespec * __restrict abstime)
760a091d823SDavid Xu {
761850f4d66SDavid Xu 	struct pthread_mutex *m;
7621bdbd705SKonstantin Belousov 	int ret;
7632aa9de1fSMike Makonnen 
764a091d823SDavid Xu 	_thr_check_init();
7651bdbd705SKonstantin Belousov 	ret = check_and_init_mutex(mutex, &m);
7661bdbd705SKonstantin Belousov 	if (ret == 0)
7672a339d9eSKonstantin Belousov 		ret = mutex_lock_common(m, abstime, false, false);
7681bdbd705SKonstantin Belousov 	return (ret);
769a9794459SDavid Xu }
770f493d09aSMike Makonnen 
771360a5194SJeff Roberson int
_thr_mutex_unlock(pthread_mutex_t * mutex)7720ab1bfc7SKonstantin Belousov _thr_mutex_unlock(pthread_mutex_t *mutex)
773360a5194SJeff Roberson {
774d1078b0bSDavid Xu 	struct pthread_mutex *mp;
775d1078b0bSDavid Xu 
7761bdbd705SKonstantin Belousov 	if (*mutex == THR_PSHARED_PTR) {
7771bdbd705SKonstantin Belousov 		mp = __thr_pshared_offpage(mutex, 0);
7781bdbd705SKonstantin Belousov 		if (mp == NULL)
7791bdbd705SKonstantin Belousov 			return (EINVAL);
7806044c03aSKonstantin Belousov 		shared_mutex_init(mp, NULL);
7811bdbd705SKonstantin Belousov 	} else {
782d1078b0bSDavid Xu 		mp = *mutex;
7831bdbd705SKonstantin Belousov 	}
7842a339d9eSKonstantin Belousov 	return (mutex_unlock_common(mp, false, NULL));
785360a5194SJeff Roberson }
786bb535300SJeff Roberson 
787360a5194SJeff Roberson int
_mutex_cv_lock(struct pthread_mutex * m,int count,bool rb_onlist)7882a339d9eSKonstantin Belousov _mutex_cv_lock(struct pthread_mutex *m, int count, bool rb_onlist)
789a091d823SDavid Xu {
790d1078b0bSDavid Xu 	int error;
791a091d823SDavid Xu 
7922a339d9eSKonstantin Belousov 	error = mutex_lock_common(m, NULL, true, rb_onlist);
7932a339d9eSKonstantin Belousov 	if (error == 0 || error == EOWNERDEAD)
794d1078b0bSDavid Xu 		m->m_count = count;
795d1078b0bSDavid Xu 	return (error);
796a9794459SDavid Xu }
797d1078b0bSDavid Xu 
798d1078b0bSDavid Xu int
_mutex_cv_unlock(struct pthread_mutex * m,int * count,int * defer)799e220a13aSDavid Xu _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer)
800d1078b0bSDavid Xu {
801d1078b0bSDavid Xu 
802d1078b0bSDavid Xu 	/*
803d1078b0bSDavid Xu 	 * Clear the count in case this is a recursive mutex.
804d1078b0bSDavid Xu 	 */
805d1078b0bSDavid Xu 	*count = m->m_count;
806d1078b0bSDavid Xu 	m->m_count = 0;
8072a339d9eSKonstantin Belousov 	(void)mutex_unlock_common(m, true, defer);
808d1078b0bSDavid Xu         return (0);
809d1078b0bSDavid Xu }
810d1078b0bSDavid Xu 
811d1078b0bSDavid Xu int
_mutex_cv_attach(struct pthread_mutex * m,int count)812d1078b0bSDavid Xu _mutex_cv_attach(struct pthread_mutex *m, int count)
813d1078b0bSDavid Xu {
8142a339d9eSKonstantin Belousov 	struct pthread *curthread;
815d1078b0bSDavid Xu 
8162a339d9eSKonstantin Belousov 	curthread = _get_curthread();
8172a339d9eSKonstantin Belousov 	enqueue_mutex(curthread, m, 0);
818d1078b0bSDavid Xu 	m->m_count = count;
819ebc8e8fdSDavid Xu 	return (0);
820d1078b0bSDavid Xu }
821d1078b0bSDavid Xu 
822d1078b0bSDavid Xu int
_mutex_cv_detach(struct pthread_mutex * mp,int * recurse)823d1078b0bSDavid Xu _mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
824d1078b0bSDavid Xu {
8252a339d9eSKonstantin Belousov 	struct pthread *curthread;
8262a339d9eSKonstantin Belousov 	int deferred, error;
827d1078b0bSDavid Xu 
8282a339d9eSKonstantin Belousov 	curthread = _get_curthread();
829d1078b0bSDavid Xu 	if ((error = _mutex_owned(curthread, mp)) != 0)
830d1078b0bSDavid Xu 		return (error);
831d1078b0bSDavid Xu 
832d1078b0bSDavid Xu 	/*
833d1078b0bSDavid Xu 	 * Clear the count in case this is a recursive mutex.
834d1078b0bSDavid Xu 	 */
835d1078b0bSDavid Xu 	*recurse = mp->m_count;
836d1078b0bSDavid Xu 	mp->m_count = 0;
8371bdbd705SKonstantin Belousov 	dequeue_mutex(curthread, mp);
838d1078b0bSDavid Xu 
839d1078b0bSDavid Xu 	/* Will this happen in real-world ? */
8402a339d9eSKonstantin Belousov         if ((mp->m_flags & PMUTEX_FLAG_DEFERRED) != 0) {
8412a339d9eSKonstantin Belousov 		deferred = 1;
8422a339d9eSKonstantin Belousov 		mp->m_flags &= ~PMUTEX_FLAG_DEFERRED;
843d1078b0bSDavid Xu 	} else
8442a339d9eSKonstantin Belousov 		deferred = 0;
845d1078b0bSDavid Xu 
8462a339d9eSKonstantin Belousov 	if (deferred)  {
847d1078b0bSDavid Xu 		_thr_wake_all(curthread->defer_waiters,
848d1078b0bSDavid Xu 		    curthread->nwaiter_defer);
849d1078b0bSDavid Xu 		curthread->nwaiter_defer = 0;
850d1078b0bSDavid Xu 	}
851d1078b0bSDavid Xu 	return (0);
852a091d823SDavid Xu }
853a091d823SDavid Xu 
854a091d823SDavid Xu static int
mutex_self_trylock(struct pthread_mutex * m)855bbb64c21SDavid Xu mutex_self_trylock(struct pthread_mutex *m)
856360a5194SJeff Roberson {
857360a5194SJeff Roberson 	int ret;
858a091d823SDavid Xu 
859d1078b0bSDavid Xu 	switch (PMUTEX_TYPE(m->m_flags)) {
860a091d823SDavid Xu 	case PTHREAD_MUTEX_ERRORCHECK:
861a0684802SKonstantin Belousov 	case PTHREAD_MUTEX_NORMAL:
8624edfc1e3SKonstantin Belousov 	case PTHREAD_MUTEX_ADAPTIVE_NP:
863a091d823SDavid Xu 		ret = EBUSY;
864a091d823SDavid Xu 		break;
865a091d823SDavid Xu 
866a091d823SDavid Xu 	case PTHREAD_MUTEX_RECURSIVE:
867a091d823SDavid Xu 		/* Increment the lock count: */
868a091d823SDavid Xu 		if (m->m_count + 1 > 0) {
869a091d823SDavid Xu 			m->m_count++;
870a091d823SDavid Xu 			ret = 0;
871a091d823SDavid Xu 		} else
872a091d823SDavid Xu 			ret = EAGAIN;
873a091d823SDavid Xu 		break;
874a091d823SDavid Xu 
875a091d823SDavid Xu 	default:
876a091d823SDavid Xu 		/* Trap invalid mutex types; */
877a091d823SDavid Xu 		ret = EINVAL;
878a091d823SDavid Xu 	}
879a091d823SDavid Xu 
880360a5194SJeff Roberson 	return (ret);
881360a5194SJeff Roberson }
882360a5194SJeff Roberson 
883a091d823SDavid Xu static int
mutex_self_lock(struct pthread_mutex * m,const struct timespec * abstime)884bbb64c21SDavid Xu mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
885360a5194SJeff Roberson {
886a091d823SDavid Xu 	struct timespec	ts1, ts2;
887a091d823SDavid Xu 	int ret;
888a091d823SDavid Xu 
889d1078b0bSDavid Xu 	switch (PMUTEX_TYPE(m->m_flags)) {
890360a5194SJeff Roberson 	case PTHREAD_MUTEX_ERRORCHECK:
89183941f79SKris Kennaway 	case PTHREAD_MUTEX_ADAPTIVE_NP:
892a091d823SDavid Xu 		if (abstime) {
893850f4d66SDavid Xu 			if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
894850f4d66SDavid Xu 			    abstime->tv_nsec >= 1000000000) {
895850f4d66SDavid Xu 				ret = EINVAL;
896850f4d66SDavid Xu 			} else {
897a091d823SDavid Xu 				clock_gettime(CLOCK_REALTIME, &ts1);
898a091d823SDavid Xu 				TIMESPEC_SUB(&ts2, abstime, &ts1);
899a091d823SDavid Xu 				__sys_nanosleep(&ts2, NULL);
900a091d823SDavid Xu 				ret = ETIMEDOUT;
901850f4d66SDavid Xu 			}
902a091d823SDavid Xu 		} else {
903360a5194SJeff Roberson 			/*
904a091d823SDavid Xu 			 * POSIX specifies that mutexes should return
905a091d823SDavid Xu 			 * EDEADLK if a recursive lock is detected.
906360a5194SJeff Roberson 			 */
907a091d823SDavid Xu 			ret = EDEADLK;
908a091d823SDavid Xu 		}
909360a5194SJeff Roberson 		break;
910360a5194SJeff Roberson 
911360a5194SJeff Roberson 	case PTHREAD_MUTEX_NORMAL:
912360a5194SJeff Roberson 		/*
913360a5194SJeff Roberson 		 * What SS2 define as a 'normal' mutex.  Intentionally
914360a5194SJeff Roberson 		 * deadlock on attempts to get a lock you already own.
915360a5194SJeff Roberson 		 */
916a091d823SDavid Xu 		ret = 0;
917a091d823SDavid Xu 		if (abstime) {
918850f4d66SDavid Xu 			if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
919850f4d66SDavid Xu 			    abstime->tv_nsec >= 1000000000) {
920850f4d66SDavid Xu 				ret = EINVAL;
921850f4d66SDavid Xu 			} else {
922a091d823SDavid Xu 				clock_gettime(CLOCK_REALTIME, &ts1);
923a091d823SDavid Xu 				TIMESPEC_SUB(&ts2, abstime, &ts1);
924a091d823SDavid Xu 				__sys_nanosleep(&ts2, NULL);
925a091d823SDavid Xu 				ret = ETIMEDOUT;
926850f4d66SDavid Xu 			}
927a091d823SDavid Xu 		} else {
928a091d823SDavid Xu 			ts1.tv_sec = 30;
929a091d823SDavid Xu 			ts1.tv_nsec = 0;
930a091d823SDavid Xu 			for (;;)
931a091d823SDavid Xu 				__sys_nanosleep(&ts1, NULL);
932a091d823SDavid Xu 		}
933a091d823SDavid Xu 		break;
934a091d823SDavid Xu 
935a091d823SDavid Xu 	case PTHREAD_MUTEX_RECURSIVE:
936a091d823SDavid Xu 		/* Increment the lock count: */
937a091d823SDavid Xu 		if (m->m_count + 1 > 0) {
938a091d823SDavid Xu 			m->m_count++;
939a091d823SDavid Xu 			ret = 0;
940a091d823SDavid Xu 		} else
941a091d823SDavid Xu 			ret = EAGAIN;
942360a5194SJeff Roberson 		break;
943360a5194SJeff Roberson 
944360a5194SJeff Roberson 	default:
945a091d823SDavid Xu 		/* Trap invalid mutex types; */
946a091d823SDavid Xu 		ret = EINVAL;
947360a5194SJeff Roberson 	}
948360a5194SJeff Roberson 
949a091d823SDavid Xu 	return (ret);
950a091d823SDavid Xu }
951a091d823SDavid Xu 
952b370ef15SGreg Becker static __always_inline int
mutex_unlock_common(struct pthread_mutex * m,bool cv,int * mtx_defer)9532a339d9eSKonstantin Belousov mutex_unlock_common(struct pthread_mutex *m, bool cv, int *mtx_defer)
954360a5194SJeff Roberson {
9552a339d9eSKonstantin Belousov 	struct pthread *curthread;
9568ab9d78bSDavid Xu 	uint32_t id;
957b8f75b17SKonstantin Belousov 	int deferred, error, private, robust;
958360a5194SJeff Roberson 
959bbb64c21SDavid Xu 	if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
960bbb64c21SDavid Xu 		if (m == THR_MUTEX_DESTROYED)
9619ad4b644SDavid Xu 			return (EINVAL);
962bbb64c21SDavid Xu 		return (EPERM);
963bbb64c21SDavid Xu 	}
964a091d823SDavid Xu 
9652a339d9eSKonstantin Belousov 	curthread = _get_curthread();
9661bdbd705SKonstantin Belousov 	id = TID(curthread);
9671bdbd705SKonstantin Belousov 
968a091d823SDavid Xu 	/*
9699ad4b644SDavid Xu 	 * Check if the running thread is not the owner of the mutex.
970a091d823SDavid Xu 	 */
9712a339d9eSKonstantin Belousov 	if (__predict_false(PMUTEX_OWNER_ID(m) != id))
972a9794459SDavid Xu 		return (EPERM);
973a9794459SDavid Xu 
9743e6d2e9bSKonstantin Belousov 	error = 0;
975b8f75b17SKonstantin Belousov 	private = (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0;
9762a339d9eSKonstantin Belousov 	if (__predict_false(PMUTEX_TYPE(m->m_flags) ==
9772a339d9eSKonstantin Belousov 	    PTHREAD_MUTEX_RECURSIVE && m->m_count > 0)) {
9789ad4b644SDavid Xu 		m->m_count--;
979a091d823SDavid Xu 	} else {
9802a339d9eSKonstantin Belousov 		if ((m->m_flags & PMUTEX_FLAG_DEFERRED) != 0) {
9812a339d9eSKonstantin Belousov 			deferred = 1;
9822a339d9eSKonstantin Belousov 			m->m_flags &= ~PMUTEX_FLAG_DEFERRED;
983d1078b0bSDavid Xu         	} else
9842a339d9eSKonstantin Belousov 			deferred = 0;
985d1078b0bSDavid Xu 
9862a339d9eSKonstantin Belousov 		robust = _mutex_enter_robust(curthread, m);
9871bdbd705SKonstantin Belousov 		dequeue_mutex(curthread, m);
9883e6d2e9bSKonstantin Belousov 		error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer);
9892a339d9eSKonstantin Belousov 		if (deferred)  {
9902a339d9eSKonstantin Belousov 			if (mtx_defer == NULL) {
991d1078b0bSDavid Xu 				_thr_wake_all(curthread->defer_waiters,
992d1078b0bSDavid Xu 				    curthread->nwaiter_defer);
993d1078b0bSDavid Xu 				curthread->nwaiter_defer = 0;
9942a339d9eSKonstantin Belousov 			} else
9952a339d9eSKonstantin Belousov 				*mtx_defer = 1;
996a9794459SDavid Xu 		}
9972a339d9eSKonstantin Belousov 		if (robust)
9982a339d9eSKonstantin Belousov 			_mutex_leave_robust(curthread, m);
999a9794459SDavid Xu 	}
1000b8f75b17SKonstantin Belousov 	if (!cv && private)
1001ada33a6eSDavid Xu 		THR_CRITICAL_LEAVE(curthread);
10023e6d2e9bSKonstantin Belousov 	return (error);
1003a561651cSMike Makonnen }
1004360a5194SJeff Roberson 
10059ad4b644SDavid Xu int
_pthread_mutex_getprioceiling(const pthread_mutex_t * __restrict mutex,int * __restrict prioceiling)1006b6413b6dSPedro F. Giffuni _pthread_mutex_getprioceiling(const pthread_mutex_t * __restrict mutex,
1007b6413b6dSPedro F. Giffuni     int * __restrict prioceiling)
1008a091d823SDavid Xu {
1009bbb64c21SDavid Xu 	struct pthread_mutex *m;
1010a091d823SDavid Xu 
10111bdbd705SKonstantin Belousov 	if (*mutex == THR_PSHARED_PTR) {
1012b6413b6dSPedro F. Giffuni 		m = __thr_pshared_offpage(__DECONST(void *, mutex), 0);
10131bdbd705SKonstantin Belousov 		if (m == NULL)
10141bdbd705SKonstantin Belousov 			return (EINVAL);
10156044c03aSKonstantin Belousov 		shared_mutex_init(m, NULL);
10161bdbd705SKonstantin Belousov 	} else {
1017bbb64c21SDavid Xu 		m = *mutex;
10181bdbd705SKonstantin Belousov 		if (m <= THR_MUTEX_DESTROYED)
10191bdbd705SKonstantin Belousov 			return (EINVAL);
102037a6356bSDavid Xu 	}
10211bdbd705SKonstantin Belousov 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
10221bdbd705SKonstantin Belousov 		return (EINVAL);
10231bdbd705SKonstantin Belousov 	*prioceiling = m->m_lock.m_ceilings[0];
10241bdbd705SKonstantin Belousov 	return (0);
1025a091d823SDavid Xu }
1026a091d823SDavid Xu 
10279ad4b644SDavid Xu int
_pthread_mutex_setprioceiling(pthread_mutex_t * __restrict mutex,int ceiling,int * __restrict old_ceiling)1028b6413b6dSPedro F. Giffuni _pthread_mutex_setprioceiling(pthread_mutex_t * __restrict mutex,
1029b6413b6dSPedro F. Giffuni     int ceiling, int * __restrict old_ceiling)
1030360a5194SJeff Roberson {
10311bdbd705SKonstantin Belousov 	struct pthread *curthread;
10325470bb56SDavid Xu 	struct pthread_mutex *m, *m1, *m2;
10331bdbd705SKonstantin Belousov 	struct mutex_queue *q, *qp;
10342a339d9eSKonstantin Belousov 	int qidx, ret;
1035360a5194SJeff Roberson 
10361bdbd705SKonstantin Belousov 	if (*mutex == THR_PSHARED_PTR) {
10371bdbd705SKonstantin Belousov 		m = __thr_pshared_offpage(mutex, 0);
10381bdbd705SKonstantin Belousov 		if (m == NULL)
10391bdbd705SKonstantin Belousov 			return (EINVAL);
10406044c03aSKonstantin Belousov 		shared_mutex_init(m, NULL);
10411bdbd705SKonstantin Belousov 	} else {
10425470bb56SDavid Xu 		m = *mutex;
10431bdbd705SKonstantin Belousov 		if (m <= THR_MUTEX_DESTROYED)
10441bdbd705SKonstantin Belousov 			return (EINVAL);
10451bdbd705SKonstantin Belousov 	}
10461bdbd705SKonstantin Belousov 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
10475470bb56SDavid Xu 		return (EINVAL);
10485470bb56SDavid Xu 
10495470bb56SDavid Xu 	ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
10505470bb56SDavid Xu 	if (ret != 0)
10519ad4b644SDavid Xu 		return (ret);
10525470bb56SDavid Xu 
10531bdbd705SKonstantin Belousov 	curthread = _get_curthread();
10542a339d9eSKonstantin Belousov 	if (PMUTEX_OWNER_ID(m) == TID(curthread)) {
10551bdbd705SKonstantin Belousov 		mutex_assert_is_owned(m);
10565470bb56SDavid Xu 		m1 = TAILQ_PREV(m, mutex_queue, m_qe);
10575470bb56SDavid Xu 		m2 = TAILQ_NEXT(m, m_qe);
10589e1ddd5fSDavid Xu 		if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
10599e1ddd5fSDavid Xu 		    (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
10602a339d9eSKonstantin Belousov 			qidx = mutex_qidx(m);
10612a339d9eSKonstantin Belousov 			q = &curthread->mq[qidx];
10622a339d9eSKonstantin Belousov 			qp = &curthread->mq[qidx + 1];
10631bdbd705SKonstantin Belousov 			TAILQ_REMOVE(q, m, m_qe);
10641bdbd705SKonstantin Belousov 			if (!is_pshared_mutex(m))
10651bdbd705SKonstantin Belousov 				TAILQ_REMOVE(qp, m, m_pqe);
10661bdbd705SKonstantin Belousov 			TAILQ_FOREACH(m2, q, m_qe) {
10679e1ddd5fSDavid Xu 				if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
10685470bb56SDavid Xu 					TAILQ_INSERT_BEFORE(m2, m, m_qe);
10691bdbd705SKonstantin Belousov 					if (!is_pshared_mutex(m)) {
10701bdbd705SKonstantin Belousov 						while (m2 != NULL &&
10711bdbd705SKonstantin Belousov 						    is_pshared_mutex(m2)) {
10721bdbd705SKonstantin Belousov 							m2 = TAILQ_PREV(m2,
10731bdbd705SKonstantin Belousov 							    mutex_queue, m_qe);
10741bdbd705SKonstantin Belousov 						}
10751bdbd705SKonstantin Belousov 						if (m2 == NULL) {
10761bdbd705SKonstantin Belousov 							TAILQ_INSERT_HEAD(qp,
10771bdbd705SKonstantin Belousov 							    m, m_pqe);
10781bdbd705SKonstantin Belousov 						} else {
10791bdbd705SKonstantin Belousov 							TAILQ_INSERT_BEFORE(m2,
10801bdbd705SKonstantin Belousov 							    m, m_pqe);
10811bdbd705SKonstantin Belousov 						}
10821bdbd705SKonstantin Belousov 					}
10835470bb56SDavid Xu 					return (0);
10845470bb56SDavid Xu 				}
10855470bb56SDavid Xu 			}
10861bdbd705SKonstantin Belousov 			TAILQ_INSERT_TAIL(q, m, m_qe);
10871bdbd705SKonstantin Belousov 			if (!is_pshared_mutex(m))
10881bdbd705SKonstantin Belousov 				TAILQ_INSERT_TAIL(qp, m, m_pqe);
10895470bb56SDavid Xu 		}
1090da20a63dSDavid Xu 	}
10915470bb56SDavid Xu 	return (0);
1092c984b5a7SMike Makonnen }
1093093fcf16SDavid Xu 
1094093fcf16SDavid Xu int
_pthread_mutex_getspinloops_np(pthread_mutex_t * mutex,int * count)1095093fcf16SDavid Xu _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
1096093fcf16SDavid Xu {
1097bbb64c21SDavid Xu 	struct pthread_mutex *m;
10981bdbd705SKonstantin Belousov 	int ret;
1099bbb64c21SDavid Xu 
11001bdbd705SKonstantin Belousov 	ret = check_and_init_mutex(mutex, &m);
11011bdbd705SKonstantin Belousov 	if (ret == 0)
1102bbb64c21SDavid Xu 		*count = m->m_spinloops;
11031bdbd705SKonstantin Belousov 	return (ret);
1104093fcf16SDavid Xu }
1105093fcf16SDavid Xu 
1106093fcf16SDavid Xu int
__pthread_mutex_setspinloops_np(pthread_mutex_t * mutex,int count)1107093fcf16SDavid Xu __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
1108093fcf16SDavid Xu {
1109bbb64c21SDavid Xu 	struct pthread_mutex *m;
11101bdbd705SKonstantin Belousov 	int ret;
1111093fcf16SDavid Xu 
11121bdbd705SKonstantin Belousov 	ret = check_and_init_mutex(mutex, &m);
11131bdbd705SKonstantin Belousov 	if (ret == 0)
1114bbb64c21SDavid Xu 		m->m_spinloops = count;
11151bdbd705SKonstantin Belousov 	return (ret);
1116093fcf16SDavid Xu }
1117093fcf16SDavid Xu 
1118093fcf16SDavid Xu int
_pthread_mutex_getyieldloops_np(pthread_mutex_t * mutex,int * count)1119093fcf16SDavid Xu _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
1120093fcf16SDavid Xu {
1121bbb64c21SDavid Xu 	struct pthread_mutex *m;
11221bdbd705SKonstantin Belousov 	int ret;
1123bbb64c21SDavid Xu 
11241bdbd705SKonstantin Belousov 	ret = check_and_init_mutex(mutex, &m);
11251bdbd705SKonstantin Belousov 	if (ret == 0)
1126bbb64c21SDavid Xu 		*count = m->m_yieldloops;
11271bdbd705SKonstantin Belousov 	return (ret);
1128093fcf16SDavid Xu }
1129093fcf16SDavid Xu 
1130093fcf16SDavid Xu int
__pthread_mutex_setyieldloops_np(pthread_mutex_t * mutex,int count)1131093fcf16SDavid Xu __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
1132093fcf16SDavid Xu {
1133bbb64c21SDavid Xu 	struct pthread_mutex *m;
11341bdbd705SKonstantin Belousov 	int ret;
1135093fcf16SDavid Xu 
11361bdbd705SKonstantin Belousov 	ret = check_and_init_mutex(mutex, &m);
11371bdbd705SKonstantin Belousov 	if (ret == 0)
1138bbb64c21SDavid Xu 		m->m_yieldloops = count;
1139093fcf16SDavid Xu 	return (0);
1140093fcf16SDavid Xu }
11415fd410a7SDag-Erling Smørgrav 
11425fd410a7SDag-Erling Smørgrav int
_pthread_mutex_isowned_np(pthread_mutex_t * mutex)11431cbdac26SDag-Erling Smørgrav _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
11445fd410a7SDag-Erling Smørgrav {
1145bbb64c21SDavid Xu 	struct pthread_mutex *m;
11465fd410a7SDag-Erling Smørgrav 
11471bdbd705SKonstantin Belousov 	if (*mutex == THR_PSHARED_PTR) {
11481bdbd705SKonstantin Belousov 		m = __thr_pshared_offpage(mutex, 0);
11491bdbd705SKonstantin Belousov 		if (m == NULL)
11501bdbd705SKonstantin Belousov 			return (0);
11516044c03aSKonstantin Belousov 		shared_mutex_init(m, NULL);
11521bdbd705SKonstantin Belousov 	} else {
1153bbb64c21SDavid Xu 		m = *mutex;
1154bbb64c21SDavid Xu 		if (m <= THR_MUTEX_DESTROYED)
1155bbb64c21SDavid Xu 			return (0);
11561bdbd705SKonstantin Belousov 	}
11572a339d9eSKonstantin Belousov 	return (PMUTEX_OWNER_ID(m) == TID(_get_curthread()));
11585fd410a7SDag-Erling Smørgrav }
1159d1078b0bSDavid Xu 
1160d1078b0bSDavid Xu int
_mutex_owned(struct pthread * curthread,const struct pthread_mutex * mp)1161d1078b0bSDavid Xu _mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp)
1162d1078b0bSDavid Xu {
11632a339d9eSKonstantin Belousov 
1164d1078b0bSDavid Xu 	if (__predict_false(mp <= THR_MUTEX_DESTROYED)) {
1165d1078b0bSDavid Xu 		if (mp == THR_MUTEX_DESTROYED)
1166d1078b0bSDavid Xu 			return (EINVAL);
1167d1078b0bSDavid Xu 		return (EPERM);
1168d1078b0bSDavid Xu 	}
11692a339d9eSKonstantin Belousov 	if (PMUTEX_OWNER_ID(mp) != TID(curthread))
1170d1078b0bSDavid Xu 		return (EPERM);
1171d1078b0bSDavid Xu 	return (0);
1172d1078b0bSDavid Xu }
11732a339d9eSKonstantin Belousov 
11742a339d9eSKonstantin Belousov int
_Tthr_mutex_consistent(pthread_mutex_t * mutex)11750ab1bfc7SKonstantin Belousov _Tthr_mutex_consistent(pthread_mutex_t *mutex)
11762a339d9eSKonstantin Belousov {
11772a339d9eSKonstantin Belousov 	struct pthread_mutex *m;
11782a339d9eSKonstantin Belousov 	struct pthread *curthread;
11792a339d9eSKonstantin Belousov 
11802a339d9eSKonstantin Belousov 	if (*mutex == THR_PSHARED_PTR) {
11812a339d9eSKonstantin Belousov 		m = __thr_pshared_offpage(mutex, 0);
11822a339d9eSKonstantin Belousov 		if (m == NULL)
11832a339d9eSKonstantin Belousov 			return (EINVAL);
11842a339d9eSKonstantin Belousov 		shared_mutex_init(m, NULL);
11852a339d9eSKonstantin Belousov 	} else {
11862a339d9eSKonstantin Belousov 		m = *mutex;
11872a339d9eSKonstantin Belousov 		if (m <= THR_MUTEX_DESTROYED)
11882a339d9eSKonstantin Belousov 			return (EINVAL);
11892a339d9eSKonstantin Belousov 	}
11902a339d9eSKonstantin Belousov 	curthread = _get_curthread();
11912a339d9eSKonstantin Belousov 	if ((m->m_lock.m_flags & (UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) !=
11922a339d9eSKonstantin Belousov 	    (UMUTEX_ROBUST | UMUTEX_NONCONSISTENT))
11932a339d9eSKonstantin Belousov 		return (EINVAL);
11942a339d9eSKonstantin Belousov 	if (PMUTEX_OWNER_ID(m) != TID(curthread))
11952a339d9eSKonstantin Belousov 		return (EPERM);
11962a339d9eSKonstantin Belousov 	m->m_lock.m_flags &= ~UMUTEX_NONCONSISTENT;
11972a339d9eSKonstantin Belousov 	return (0);
11982a339d9eSKonstantin Belousov }
1199