xref: /freebsd/lib/libthr/thread/thr_umtx.h (revision 2a339d9e3dc129f0b0b79c2cb8d2bb0386fb0f5f)
1a091d823SDavid Xu /*-
2a091d823SDavid Xu  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3a091d823SDavid Xu  * All rights reserved.
4a091d823SDavid Xu  *
5a091d823SDavid Xu  * Redistribution and use in source and binary forms, with or without
6a091d823SDavid Xu  * modification, are permitted provided that the following conditions
7a091d823SDavid Xu  * are met:
8a091d823SDavid Xu  * 1. Redistributions of source code must retain the above copyright
9a091d823SDavid Xu  *    notice, this list of conditions and the following disclaimer.
10a091d823SDavid Xu  * 2. Redistributions in binary form must reproduce the above copyright
11a091d823SDavid Xu  *    notice, this list of conditions and the following disclaimer in the
12a091d823SDavid Xu  *    documentation and/or other materials provided with the distribution.
13a091d823SDavid Xu  *
14a091d823SDavid Xu  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15a091d823SDavid Xu  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16a091d823SDavid Xu  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17a091d823SDavid Xu  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18a091d823SDavid Xu  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19a091d823SDavid Xu  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20a091d823SDavid Xu  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21a091d823SDavid Xu  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22a091d823SDavid Xu  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23a091d823SDavid Xu  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24a091d823SDavid Xu  * SUCH DAMAGE.
25a091d823SDavid Xu  *
26a091d823SDavid Xu  * $FreeBSD$
27a091d823SDavid Xu  */
28a091d823SDavid Xu 
29a091d823SDavid Xu #ifndef _THR_FBSD_UMTX_H_
30a091d823SDavid Xu #define _THR_FBSD_UMTX_H_
31a091d823SDavid Xu 
32bddd24cdSDavid Xu #include <strings.h>
33a091d823SDavid Xu #include <sys/umtx.h>
34a091d823SDavid Xu 
35*2a339d9eSKonstantin Belousov #ifdef __LP64__
36*2a339d9eSKonstantin Belousov #define DEFAULT_UMUTEX	{0,0,{0,0},0,{0,0}}
37*2a339d9eSKonstantin Belousov #else
38*2a339d9eSKonstantin Belousov #define DEFAULT_UMUTEX	{0,0,{0,0},0,0,{0,0}}
39*2a339d9eSKonstantin Belousov #endif
40ada33a6eSDavid Xu #define DEFAULT_URWLOCK {0,0,0,0,{0,0,0,0}}
41a091d823SDavid Xu 
4217ce6063SDavid Xu int _umtx_op_err(void *, int op, u_long, void *, void *) __hidden;
437de1ecefSDavid Xu int __thr_umutex_lock(struct umutex *mtx, uint32_t id) __hidden;
44d1078b0bSDavid Xu int __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) __hidden;
457de1ecefSDavid Xu int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
46cf13ecdaSDavid Xu 	const struct timespec *timeout) __hidden;
477de1ecefSDavid Xu int __thr_umutex_unlock(struct umutex *mtx, uint32_t id) __hidden;
488042f26dSDavid Xu int __thr_umutex_trylock(struct umutex *mtx) __hidden;
49cf13ecdaSDavid Xu int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
50cf13ecdaSDavid Xu 	uint32_t *oldceiling) __hidden;
51cf13ecdaSDavid Xu 
528042f26dSDavid Xu void _thr_umutex_init(struct umutex *mtx) __hidden;
53ada33a6eSDavid Xu void _thr_urwlock_init(struct urwlock *rwl) __hidden;
54ada33a6eSDavid Xu 
556fdfcacbSDavid Xu int _thr_umtx_wait(volatile long *mtx, long exp,
568429e734SDavid Xu 	const struct timespec *timeout) __hidden;
576fdfcacbSDavid Xu int _thr_umtx_wait_uint(volatile u_int *mtx, u_int exp,
588d6a11a0SDavid Xu 	const struct timespec *timeout, int shared) __hidden;
59d1078b0bSDavid Xu int _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int exp, int clockid,
60d1078b0bSDavid Xu 	const struct timespec *timeout, int shared) __hidden;
618d6a11a0SDavid Xu int _thr_umtx_wake(volatile void *mtx, int count, int shared) __hidden;
622bd2c907SDavid Xu int _thr_ucond_wait(struct ucond *cv, struct umutex *m,
633cb14a89SJilles Tjoelker         const struct timespec *timeout, int flags) __hidden;
64670b44d6SDavid Xu void _thr_ucond_init(struct ucond *cv) __hidden;
65670b44d6SDavid Xu int _thr_ucond_signal(struct ucond *cv) __hidden;
66670b44d6SDavid Xu int _thr_ucond_broadcast(struct ucond *cv) __hidden;
67cf13ecdaSDavid Xu 
6824c20949SDavid Xu int __thr_rwlock_rdlock(struct urwlock *rwlock, int flags,
6924c20949SDavid Xu 	const struct timespec *tsp) __hidden;
7024c20949SDavid Xu int __thr_rwlock_wrlock(struct urwlock *rwlock,
7124c20949SDavid Xu 	const struct timespec *tsp) __hidden;
728bf1a48cSDavid Xu int __thr_rwlock_unlock(struct urwlock *rwlock) __hidden;
738bf1a48cSDavid Xu 
7402c3c858SDavid Xu /* Internal used only */
7502c3c858SDavid Xu void _thr_rwl_rdlock(struct urwlock *rwlock) __hidden;
7602c3c858SDavid Xu void _thr_rwl_wrlock(struct urwlock *rwlock) __hidden;
7702c3c858SDavid Xu void _thr_rwl_unlock(struct urwlock *rwlock) __hidden;
7802c3c858SDavid Xu 
79cf13ecdaSDavid Xu static inline int
80cf13ecdaSDavid Xu _thr_umutex_trylock(struct umutex *mtx, uint32_t id)
81cf13ecdaSDavid Xu {
82*2a339d9eSKonstantin Belousov 
83cf13ecdaSDavid Xu 	if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id))
84cf13ecdaSDavid Xu 		return (0);
85*2a339d9eSKonstantin Belousov 	if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_OWNERDEAD) &&
86*2a339d9eSKonstantin Belousov 	    atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_RB_OWNERDEAD,
87*2a339d9eSKonstantin Belousov 	    id | UMUTEX_CONTESTED))
88*2a339d9eSKonstantin Belousov 		return (EOWNERDEAD);
89*2a339d9eSKonstantin Belousov 	if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_NOTRECOV))
90*2a339d9eSKonstantin Belousov 		return (ENOTRECOVERABLE);
91cf13ecdaSDavid Xu 	if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0)
92cf13ecdaSDavid Xu 		return (EBUSY);
938042f26dSDavid Xu 	return (__thr_umutex_trylock(mtx));
94cf13ecdaSDavid Xu }
95cf13ecdaSDavid Xu 
96cf13ecdaSDavid Xu static inline int
978a8178c0SDavid Xu _thr_umutex_trylock2(struct umutex *mtx, uint32_t id)
988a8178c0SDavid Xu {
99*2a339d9eSKonstantin Belousov 
1007de1ecefSDavid Xu 	if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0)
1017de1ecefSDavid Xu 		return (0);
1027de1ecefSDavid Xu 	if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED &&
103*2a339d9eSKonstantin Belousov 	    __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT |
104*2a339d9eSKonstantin Belousov 	   UMUTEX_PRIO_INHERIT)) == 0) &&
105*2a339d9eSKonstantin Belousov 	   atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED,
106*2a339d9eSKonstantin Belousov 	   id | UMUTEX_CONTESTED))
1078a8178c0SDavid Xu 		return (0);
108*2a339d9eSKonstantin Belousov 	if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_OWNERDEAD) &&
109*2a339d9eSKonstantin Belousov 	    atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_RB_OWNERDEAD,
110*2a339d9eSKonstantin Belousov 	    id | UMUTEX_CONTESTED))
111*2a339d9eSKonstantin Belousov 		return (EOWNERDEAD);
112*2a339d9eSKonstantin Belousov 	if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_NOTRECOV))
113*2a339d9eSKonstantin Belousov 		return (ENOTRECOVERABLE);
1148a8178c0SDavid Xu 	return (EBUSY);
1158a8178c0SDavid Xu }
1168a8178c0SDavid Xu 
1178a8178c0SDavid Xu static inline int
118cf13ecdaSDavid Xu _thr_umutex_lock(struct umutex *mtx, uint32_t id)
119cf13ecdaSDavid Xu {
120*2a339d9eSKonstantin Belousov 
1217de1ecefSDavid Xu 	if (_thr_umutex_trylock2(mtx, id) == 0)
122cf13ecdaSDavid Xu 		return (0);
1237de1ecefSDavid Xu 	return (__thr_umutex_lock(mtx, id));
124cf13ecdaSDavid Xu }
125cf13ecdaSDavid Xu 
126cf13ecdaSDavid Xu static inline int
127d1078b0bSDavid Xu _thr_umutex_lock_spin(struct umutex *mtx, uint32_t id)
128d1078b0bSDavid Xu {
129*2a339d9eSKonstantin Belousov 
130d1078b0bSDavid Xu 	if (_thr_umutex_trylock2(mtx, id) == 0)
131d1078b0bSDavid Xu 		return (0);
132d1078b0bSDavid Xu 	return (__thr_umutex_lock_spin(mtx, id));
133d1078b0bSDavid Xu }
134d1078b0bSDavid Xu 
135d1078b0bSDavid Xu static inline int
136cf13ecdaSDavid Xu _thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
137cf13ecdaSDavid Xu     const struct timespec *timeout)
138cf13ecdaSDavid Xu {
139*2a339d9eSKonstantin Belousov 
1407de1ecefSDavid Xu 	if (_thr_umutex_trylock2(mtx, id) == 0)
141cf13ecdaSDavid Xu 		return (0);
1427de1ecefSDavid Xu 	return (__thr_umutex_timedlock(mtx, id, timeout));
143cf13ecdaSDavid Xu }
144cf13ecdaSDavid Xu 
145cf13ecdaSDavid Xu static inline int
146e220a13aSDavid Xu _thr_umutex_unlock2(struct umutex *mtx, uint32_t id, int *defer)
147cf13ecdaSDavid Xu {
148*2a339d9eSKonstantin Belousov 	uint32_t flags, owner;
149*2a339d9eSKonstantin Belousov 	bool noncst;
15017ce6063SDavid Xu 
151*2a339d9eSKonstantin Belousov 	flags = mtx->m_flags;
152*2a339d9eSKonstantin Belousov 	noncst = (flags & UMUTEX_NONCONSISTENT) != 0;
153*2a339d9eSKonstantin Belousov 
154*2a339d9eSKonstantin Belousov 	if ((flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0) {
155*2a339d9eSKonstantin Belousov 		if (atomic_cmpset_rel_32(&mtx->m_owner, id, noncst ?
156*2a339d9eSKonstantin Belousov 		    UMUTEX_RB_NOTRECOV : UMUTEX_UNOWNED))
157*2a339d9eSKonstantin Belousov 			return (0);
158*2a339d9eSKonstantin Belousov 		return (__thr_umutex_unlock(mtx, id));
159*2a339d9eSKonstantin Belousov 	}
160*2a339d9eSKonstantin Belousov 
16117ce6063SDavid Xu 	do {
16217ce6063SDavid Xu 		owner = mtx->m_owner;
16317ce6063SDavid Xu 		if (__predict_false((owner & ~UMUTEX_CONTESTED) != id))
16417ce6063SDavid Xu 			return (EPERM);
165*2a339d9eSKonstantin Belousov 	} while (__predict_false(!atomic_cmpset_rel_32(&mtx->m_owner, owner,
166*2a339d9eSKonstantin Belousov 	    noncst ? UMUTEX_RB_NOTRECOV : UMUTEX_UNOWNED)));
167*2a339d9eSKonstantin Belousov 	if ((owner & UMUTEX_CONTESTED) != 0) {
168*2a339d9eSKonstantin Belousov 		if (defer == NULL || noncst)
169*2a339d9eSKonstantin Belousov 			(void)_umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2,
170*2a339d9eSKonstantin Belousov 			    flags, 0, 0);
171e220a13aSDavid Xu 		else
172e220a13aSDavid Xu 			*defer = 1;
173e220a13aSDavid Xu 	}
17417ce6063SDavid Xu 	return (0);
17517ce6063SDavid Xu }
176cf13ecdaSDavid Xu 
1778bf1a48cSDavid Xu static inline int
178e220a13aSDavid Xu _thr_umutex_unlock(struct umutex *mtx, uint32_t id)
179e220a13aSDavid Xu {
180*2a339d9eSKonstantin Belousov 
181*2a339d9eSKonstantin Belousov 	return (_thr_umutex_unlock2(mtx, id, NULL));
182e220a13aSDavid Xu }
183e220a13aSDavid Xu 
184e220a13aSDavid Xu static inline int
1858bf1a48cSDavid Xu _thr_rwlock_tryrdlock(struct urwlock *rwlock, int flags)
1868bf1a48cSDavid Xu {
187*2a339d9eSKonstantin Belousov 	int32_t state, wrflags;
1888bf1a48cSDavid Xu 
189*2a339d9eSKonstantin Belousov 	if ((flags & URWLOCK_PREFER_READER) != 0 ||
190*2a339d9eSKonstantin Belousov 	    (rwlock->rw_flags & URWLOCK_PREFER_READER) != 0)
1918bf1a48cSDavid Xu 		wrflags = URWLOCK_WRITE_OWNER;
1928bf1a48cSDavid Xu 	else
1938bf1a48cSDavid Xu 		wrflags = URWLOCK_WRITE_OWNER | URWLOCK_WRITE_WAITERS;
1948bf1a48cSDavid Xu 	state = rwlock->rw_state;
1958bf1a48cSDavid Xu 	while (!(state & wrflags)) {
196*2a339d9eSKonstantin Belousov 		if (__predict_false(URWLOCK_READER_COUNT(state) ==
197*2a339d9eSKonstantin Belousov 		    URWLOCK_MAX_READERS))
1988bf1a48cSDavid Xu 			return (EAGAIN);
1998bf1a48cSDavid Xu 		if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state + 1))
2008bf1a48cSDavid Xu 			return (0);
2018bf1a48cSDavid Xu 		state = rwlock->rw_state;
2028bf1a48cSDavid Xu 	}
2038bf1a48cSDavid Xu 
2048bf1a48cSDavid Xu 	return (EBUSY);
2058bf1a48cSDavid Xu }
2068bf1a48cSDavid Xu 
2078bf1a48cSDavid Xu static inline int
2088bf1a48cSDavid Xu _thr_rwlock_trywrlock(struct urwlock *rwlock)
2098bf1a48cSDavid Xu {
2108bf1a48cSDavid Xu 	int32_t state;
2118bf1a48cSDavid Xu 
2128bf1a48cSDavid Xu 	state = rwlock->rw_state;
213*2a339d9eSKonstantin Belousov 	while ((state & URWLOCK_WRITE_OWNER) == 0 &&
214*2a339d9eSKonstantin Belousov 	    URWLOCK_READER_COUNT(state) == 0) {
215*2a339d9eSKonstantin Belousov 		if (atomic_cmpset_acq_32(&rwlock->rw_state, state,
216*2a339d9eSKonstantin Belousov 		    state | URWLOCK_WRITE_OWNER))
2178bf1a48cSDavid Xu 			return (0);
2188bf1a48cSDavid Xu 		state = rwlock->rw_state;
2198bf1a48cSDavid Xu 	}
2208bf1a48cSDavid Xu 
2218bf1a48cSDavid Xu 	return (EBUSY);
2228bf1a48cSDavid Xu }
2238bf1a48cSDavid Xu 
2248bf1a48cSDavid Xu static inline int
2258bf1a48cSDavid Xu _thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp)
2268bf1a48cSDavid Xu {
227*2a339d9eSKonstantin Belousov 
2288bf1a48cSDavid Xu 	if (_thr_rwlock_tryrdlock(rwlock, flags) == 0)
2298bf1a48cSDavid Xu 		return (0);
2308bf1a48cSDavid Xu 	return (__thr_rwlock_rdlock(rwlock, flags, tsp));
2318bf1a48cSDavid Xu }
2328bf1a48cSDavid Xu 
2338bf1a48cSDavid Xu static inline int
2348bf1a48cSDavid Xu _thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp)
2358bf1a48cSDavid Xu {
236*2a339d9eSKonstantin Belousov 
2378bf1a48cSDavid Xu 	if (_thr_rwlock_trywrlock(rwlock) == 0)
2388bf1a48cSDavid Xu 		return (0);
2398bf1a48cSDavid Xu 	return (__thr_rwlock_wrlock(rwlock, tsp));
2408bf1a48cSDavid Xu }
2418bf1a48cSDavid Xu 
2428bf1a48cSDavid Xu static inline int
2438bf1a48cSDavid Xu _thr_rwlock_unlock(struct urwlock *rwlock)
2448bf1a48cSDavid Xu {
2458bf1a48cSDavid Xu 	int32_t state;
2468bf1a48cSDavid Xu 
2478bf1a48cSDavid Xu 	state = rwlock->rw_state;
248*2a339d9eSKonstantin Belousov 	if ((state & URWLOCK_WRITE_OWNER) != 0) {
249*2a339d9eSKonstantin Belousov 		if (atomic_cmpset_rel_32(&rwlock->rw_state,
250*2a339d9eSKonstantin Belousov 		    URWLOCK_WRITE_OWNER, 0))
2518bf1a48cSDavid Xu 			return (0);
2528bf1a48cSDavid Xu 	} else {
2538bf1a48cSDavid Xu 		for (;;) {
2548bf1a48cSDavid Xu 			if (__predict_false(URWLOCK_READER_COUNT(state) == 0))
2558bf1a48cSDavid Xu 				return (EPERM);
256b13c5f28SAttilio Rao 			if (!((state & (URWLOCK_WRITE_WAITERS |
257*2a339d9eSKonstantin Belousov 			    URWLOCK_READ_WAITERS)) != 0 &&
258b13c5f28SAttilio Rao 			    URWLOCK_READER_COUNT(state) == 1)) {
259b13c5f28SAttilio Rao 				if (atomic_cmpset_rel_32(&rwlock->rw_state,
260b13c5f28SAttilio Rao 				    state, state - 1))
2618bf1a48cSDavid Xu 					return (0);
2628bf1a48cSDavid Xu 				state = rwlock->rw_state;
2638bf1a48cSDavid Xu 			} else {
2648bf1a48cSDavid Xu 				break;
2658bf1a48cSDavid Xu 			}
2668bf1a48cSDavid Xu 		}
2678bf1a48cSDavid Xu     	}
2688bf1a48cSDavid Xu     	return (__thr_rwlock_unlock(rwlock));
2698bf1a48cSDavid Xu }
270a091d823SDavid Xu #endif
271