1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #ifndef _THR_FBSD_UMTX_H_
30 #define _THR_FBSD_UMTX_H_
31
32 #include <strings.h>
33 #include <sys/umtx.h>
34
35 #ifdef __LP64__
36 #define DEFAULT_UMUTEX {0,0,{0,0},0,{0,0}}
37 #else
38 #define DEFAULT_UMUTEX {0,0,{0,0},0,0,{0,0}}
39 #endif
40 #define DEFAULT_URWLOCK {0,0,0,0,{0,0,0,0}}
41
42 int __thr_umutex_lock(struct umutex *mtx, uint32_t id) __hidden;
43 int __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) __hidden;
44 int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
45 const struct timespec *timeout) __hidden;
46 int __thr_umutex_unlock(struct umutex *mtx) __hidden;
47 int __thr_umutex_trylock(struct umutex *mtx) __hidden;
48 int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
49 uint32_t *oldceiling) __hidden;
50
51 void _thr_umutex_init(struct umutex *mtx) __hidden;
52 void _thr_urwlock_init(struct urwlock *rwl) __hidden;
53
54 int _thr_umtx_wait(volatile long *mtx, long exp,
55 const struct timespec *timeout) __hidden;
56 int _thr_umtx_wait_uint(volatile u_int *mtx, u_int exp,
57 const struct timespec *timeout, int shared) __hidden;
58 int _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int exp, int clockid,
59 const struct timespec *timeout, int shared) __hidden;
60 int _thr_umtx_wake(volatile void *mtx, int count, int shared) __hidden;
61 int _thr_ucond_wait(struct ucond *cv, struct umutex *m,
62 const struct timespec *timeout, int flags) __hidden;
63 void _thr_ucond_init(struct ucond *cv) __hidden;
64 int _thr_ucond_signal(struct ucond *cv) __hidden;
65 int _thr_ucond_broadcast(struct ucond *cv) __hidden;
66
67 int __thr_rwlock_rdlock(struct urwlock *rwlock, int flags,
68 const struct timespec *tsp) __hidden;
69 int __thr_rwlock_wrlock(struct urwlock *rwlock,
70 const struct timespec *tsp) __hidden;
71 int __thr_rwlock_unlock(struct urwlock *rwlock) __hidden;
72
73 /* Internal used only */
74 void _thr_rwl_rdlock(struct urwlock *rwlock) __hidden;
75 void _thr_rwl_wrlock(struct urwlock *rwlock) __hidden;
76 void _thr_rwl_unlock(struct urwlock *rwlock) __hidden;
77
78 static inline int
_thr_umutex_trylock(struct umutex * mtx,uint32_t id)79 _thr_umutex_trylock(struct umutex *mtx, uint32_t id)
80 {
81
82 if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id))
83 return (0);
84 if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_OWNERDEAD) &&
85 atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_RB_OWNERDEAD,
86 id | UMUTEX_CONTESTED))
87 return (EOWNERDEAD);
88 if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_NOTRECOV))
89 return (ENOTRECOVERABLE);
90 if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0)
91 return (EBUSY);
92 return (__thr_umutex_trylock(mtx));
93 }
94
95 static inline int
_thr_umutex_trylock2(struct umutex * mtx,uint32_t id)96 _thr_umutex_trylock2(struct umutex *mtx, uint32_t id)
97 {
98
99 if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0)
100 return (0);
101 if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED &&
102 __predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT |
103 UMUTEX_PRIO_INHERIT)) == 0) &&
104 atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED,
105 id | UMUTEX_CONTESTED))
106 return (0);
107 if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_OWNERDEAD) &&
108 atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_RB_OWNERDEAD,
109 id | UMUTEX_CONTESTED))
110 return (EOWNERDEAD);
111 if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_NOTRECOV))
112 return (ENOTRECOVERABLE);
113 return (EBUSY);
114 }
115
116 static inline int
_thr_umutex_lock(struct umutex * mtx,uint32_t id)117 _thr_umutex_lock(struct umutex *mtx, uint32_t id)
118 {
119
120 if (_thr_umutex_trylock2(mtx, id) == 0)
121 return (0);
122 return (__thr_umutex_lock(mtx, id));
123 }
124
125 static inline int
_thr_umutex_lock_spin(struct umutex * mtx,uint32_t id)126 _thr_umutex_lock_spin(struct umutex *mtx, uint32_t id)
127 {
128
129 if (_thr_umutex_trylock2(mtx, id) == 0)
130 return (0);
131 return (__thr_umutex_lock_spin(mtx, id));
132 }
133
134 static inline int
_thr_umutex_timedlock(struct umutex * mtx,uint32_t id,const struct timespec * timeout)135 _thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
136 const struct timespec *timeout)
137 {
138
139 if (_thr_umutex_trylock2(mtx, id) == 0)
140 return (0);
141 return (__thr_umutex_timedlock(mtx, id, timeout));
142 }
143
144 static inline int
_thr_umutex_unlock2(struct umutex * mtx,uint32_t id,int * defer)145 _thr_umutex_unlock2(struct umutex *mtx, uint32_t id, int *defer)
146 {
147 uint32_t flags, owner;
148 bool noncst;
149
150 flags = mtx->m_flags;
151 noncst = (flags & UMUTEX_NONCONSISTENT) != 0;
152
153 if ((flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0) {
154 if (atomic_cmpset_rel_32(&mtx->m_owner, id, noncst ?
155 UMUTEX_RB_NOTRECOV : UMUTEX_UNOWNED))
156 return (0);
157 return (__thr_umutex_unlock(mtx));
158 }
159
160 do {
161 owner = mtx->m_owner;
162 if (__predict_false((owner & ~UMUTEX_CONTESTED) != id))
163 return (EPERM);
164 } while (__predict_false(!atomic_cmpset_rel_32(&mtx->m_owner, owner,
165 noncst ? UMUTEX_RB_NOTRECOV : UMUTEX_UNOWNED)));
166 if ((owner & UMUTEX_CONTESTED) != 0) {
167 if (defer == NULL || noncst)
168 (void)_umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2,
169 flags, 0, 0);
170 else
171 *defer = 1;
172 }
173 return (0);
174 }
175
176 static inline int
_thr_umutex_unlock(struct umutex * mtx,uint32_t id)177 _thr_umutex_unlock(struct umutex *mtx, uint32_t id)
178 {
179
180 return (_thr_umutex_unlock2(mtx, id, NULL));
181 }
182
183 static inline int
_thr_rwlock_tryrdlock(struct urwlock * rwlock,int flags)184 _thr_rwlock_tryrdlock(struct urwlock *rwlock, int flags)
185 {
186 int32_t state, wrflags;
187
188 if ((flags & URWLOCK_PREFER_READER) != 0 ||
189 (rwlock->rw_flags & URWLOCK_PREFER_READER) != 0)
190 wrflags = URWLOCK_WRITE_OWNER;
191 else
192 wrflags = URWLOCK_WRITE_OWNER | URWLOCK_WRITE_WAITERS;
193 state = rwlock->rw_state;
194 while (!(state & wrflags)) {
195 if (__predict_false(URWLOCK_READER_COUNT(state) ==
196 URWLOCK_MAX_READERS))
197 return (EAGAIN);
198 if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state + 1))
199 return (0);
200 state = rwlock->rw_state;
201 }
202
203 return (EBUSY);
204 }
205
206 static inline int
_thr_rwlock_trywrlock(struct urwlock * rwlock)207 _thr_rwlock_trywrlock(struct urwlock *rwlock)
208 {
209 int32_t state;
210
211 state = rwlock->rw_state;
212 while ((state & URWLOCK_WRITE_OWNER) == 0 &&
213 URWLOCK_READER_COUNT(state) == 0) {
214 if (atomic_cmpset_acq_32(&rwlock->rw_state, state,
215 state | URWLOCK_WRITE_OWNER))
216 return (0);
217 state = rwlock->rw_state;
218 }
219
220 return (EBUSY);
221 }
222
223 static inline int
_thr_rwlock_rdlock(struct urwlock * rwlock,int flags,struct timespec * tsp)224 _thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp)
225 {
226
227 if (_thr_rwlock_tryrdlock(rwlock, flags) == 0)
228 return (0);
229 return (__thr_rwlock_rdlock(rwlock, flags, tsp));
230 }
231
232 static inline int
_thr_rwlock_wrlock(struct urwlock * rwlock,struct timespec * tsp)233 _thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp)
234 {
235
236 if (_thr_rwlock_trywrlock(rwlock) == 0)
237 return (0);
238 return (__thr_rwlock_wrlock(rwlock, tsp));
239 }
240
241 static inline int
_thr_rwlock_unlock(struct urwlock * rwlock)242 _thr_rwlock_unlock(struct urwlock *rwlock)
243 {
244 int32_t state;
245
246 state = rwlock->rw_state;
247 if ((state & URWLOCK_WRITE_OWNER) != 0) {
248 if (atomic_cmpset_rel_32(&rwlock->rw_state,
249 URWLOCK_WRITE_OWNER, 0))
250 return (0);
251 } else {
252 for (;;) {
253 if (__predict_false(URWLOCK_READER_COUNT(state) == 0))
254 return (EPERM);
255 if (!((state & (URWLOCK_WRITE_WAITERS |
256 URWLOCK_READ_WAITERS)) != 0 &&
257 URWLOCK_READER_COUNT(state) == 1)) {
258 if (atomic_cmpset_rel_32(&rwlock->rw_state,
259 state, state - 1))
260 return (0);
261 state = rwlock->rw_state;
262 } else {
263 break;
264 }
265 }
266 }
267 return (__thr_rwlock_unlock(rwlock));
268 }
269 #endif
270