1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include "thr_private.h"
30 #include "thr_umtx.h"
31
32 void
_thr_umutex_init(struct umutex * mtx)33 _thr_umutex_init(struct umutex *mtx)
34 {
35 static const struct umutex default_mtx = DEFAULT_UMUTEX;
36
37 *mtx = default_mtx;
38 }
39
40 void
_thr_urwlock_init(struct urwlock * rwl)41 _thr_urwlock_init(struct urwlock *rwl)
42 {
43 static const struct urwlock default_rwl = DEFAULT_URWLOCK;
44
45 *rwl = default_rwl;
46 }
47
48 int
__thr_umutex_lock(struct umutex * mtx,uint32_t id)49 __thr_umutex_lock(struct umutex *mtx, uint32_t id)
50 {
51 uint32_t owner;
52
53 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)
54 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0));
55
56 for (;;) {
57 owner = mtx->m_owner;
58 if ((owner & ~UMUTEX_CONTESTED) == 0 &&
59 atomic_cmpset_acq_32(&mtx->m_owner, owner, id | owner))
60 return (0);
61 if (owner == UMUTEX_RB_OWNERDEAD &&
62 atomic_cmpset_acq_32(&mtx->m_owner, owner,
63 id | UMUTEX_CONTESTED))
64 return (EOWNERDEAD);
65 if (owner == UMUTEX_RB_NOTRECOV)
66 return (ENOTRECOVERABLE);
67
68 /* wait in kernel */
69 _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
70 }
71 }
72
73 #define SPINLOOPS 1000
74
75 int
__thr_umutex_lock_spin(struct umutex * mtx,uint32_t id)76 __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id)
77 {
78 uint32_t owner;
79 int count;
80
81 if (!_thr_is_smp)
82 return (__thr_umutex_lock(mtx, id));
83 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)
84 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0));
85
86 for (;;) {
87 count = SPINLOOPS;
88 while (count--) {
89 owner = mtx->m_owner;
90 if ((owner & ~UMUTEX_CONTESTED) == 0 &&
91 atomic_cmpset_acq_32(&mtx->m_owner, owner,
92 id | owner))
93 return (0);
94 if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) &&
95 atomic_cmpset_acq_32(&mtx->m_owner, owner,
96 id | UMUTEX_CONTESTED))
97 return (EOWNERDEAD);
98 if (__predict_false(owner == UMUTEX_RB_NOTRECOV))
99 return (ENOTRECOVERABLE);
100 CPU_SPINWAIT;
101 }
102
103 /* wait in kernel */
104 _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
105 }
106 }
107
108 int
__thr_umutex_timedlock(struct umutex * mtx,uint32_t id,const struct timespec * abstime)109 __thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
110 const struct timespec *abstime)
111 {
112 struct _umtx_time *tm_p, timeout;
113 size_t tm_size;
114 uint32_t owner;
115 int ret;
116
117 if (abstime == NULL) {
118 tm_p = NULL;
119 tm_size = 0;
120 } else {
121 timeout._clockid = CLOCK_REALTIME;
122 timeout._flags = UMTX_ABSTIME;
123 timeout._timeout = *abstime;
124 tm_p = &timeout;
125 tm_size = sizeof(timeout);
126 }
127
128 for (;;) {
129 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT |
130 UMUTEX_PRIO_INHERIT)) == 0) {
131 /* try to lock it */
132 owner = mtx->m_owner;
133 if ((owner & ~UMUTEX_CONTESTED) == 0 &&
134 atomic_cmpset_acq_32(&mtx->m_owner, owner,
135 id | owner))
136 return (0);
137 if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) &&
138 atomic_cmpset_acq_32(&mtx->m_owner, owner,
139 id | UMUTEX_CONTESTED))
140 return (EOWNERDEAD);
141 if (__predict_false(owner == UMUTEX_RB_NOTRECOV))
142 return (ENOTRECOVERABLE);
143 /* wait in kernel */
144 ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0,
145 (void *)tm_size, __DECONST(void *, tm_p));
146 } else {
147 ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0,
148 (void *)tm_size, __DECONST(void *, tm_p));
149 if (ret == 0 || ret == EOWNERDEAD ||
150 ret == ENOTRECOVERABLE)
151 break;
152 }
153 if (ret == ETIMEDOUT)
154 break;
155 }
156 return (ret);
157 }
158
159 int
__thr_umutex_unlock(struct umutex * mtx)160 __thr_umutex_unlock(struct umutex *mtx)
161 {
162
163 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0));
164 }
165
166 int
__thr_umutex_trylock(struct umutex * mtx)167 __thr_umutex_trylock(struct umutex *mtx)
168 {
169
170 return (_umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0));
171 }
172
173 int
__thr_umutex_set_ceiling(struct umutex * mtx,uint32_t ceiling,uint32_t * oldceiling)174 __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
175 uint32_t *oldceiling)
176 {
177
178 return (_umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0));
179 }
180
181 int
_thr_umtx_wait(volatile long * mtx,long id,const struct timespec * timeout)182 _thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout)
183 {
184
185 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
186 timeout->tv_nsec <= 0)))
187 return (ETIMEDOUT);
188 return (_umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0,
189 __DECONST(void*, timeout)));
190 }
191
192 int
_thr_umtx_wait_uint(volatile u_int * mtx,u_int id,const struct timespec * timeout,int shared)193 _thr_umtx_wait_uint(volatile u_int *mtx, u_int id,
194 const struct timespec *timeout, int shared)
195 {
196
197 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
198 timeout->tv_nsec <= 0)))
199 return (ETIMEDOUT);
200 return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ?
201 UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0,
202 __DECONST(void*, timeout)));
203 }
204
205 int
_thr_umtx_timedwait_uint(volatile u_int * mtx,u_int id,int clockid,const struct timespec * abstime,int shared)206 _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid,
207 const struct timespec *abstime, int shared)
208 {
209 struct _umtx_time *tm_p, timeout;
210 size_t tm_size;
211
212 if (abstime == NULL) {
213 tm_p = NULL;
214 tm_size = 0;
215 } else {
216 timeout._clockid = clockid;
217 timeout._flags = UMTX_ABSTIME;
218 timeout._timeout = *abstime;
219 tm_p = &timeout;
220 tm_size = sizeof(timeout);
221 }
222
223 return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ?
224 UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id,
225 (void *)tm_size, __DECONST(void *, tm_p)));
226 }
227
228 int
_thr_umtx_wake(volatile void * mtx,int nr_wakeup,int shared)229 _thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared)
230 {
231
232 return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ?
233 UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, nr_wakeup, 0, 0));
234 }
235
236 void
_thr_ucond_init(struct ucond * cv)237 _thr_ucond_init(struct ucond *cv)
238 {
239
240 bzero(cv, sizeof(struct ucond));
241 }
242
243 int
_thr_ucond_wait(struct ucond * cv,struct umutex * m,const struct timespec * timeout,int flags)244 _thr_ucond_wait(struct ucond *cv, struct umutex *m,
245 const struct timespec *timeout, int flags)
246 {
247 struct pthread *curthread;
248
249 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
250 timeout->tv_nsec <= 0))) {
251 curthread = _get_curthread();
252 _thr_umutex_unlock(m, TID(curthread));
253 return (ETIMEDOUT);
254 }
255 return (_umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, m,
256 __DECONST(void*, timeout)));
257 }
258
259 int
_thr_ucond_signal(struct ucond * cv)260 _thr_ucond_signal(struct ucond *cv)
261 {
262
263 if (!cv->c_has_waiters)
264 return (0);
265 return (_umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL));
266 }
267
268 int
_thr_ucond_broadcast(struct ucond * cv)269 _thr_ucond_broadcast(struct ucond *cv)
270 {
271
272 if (!cv->c_has_waiters)
273 return (0);
274 return (_umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL));
275 }
276
277 int
__thr_rwlock_rdlock(struct urwlock * rwlock,int flags,const struct timespec * tsp)278 __thr_rwlock_rdlock(struct urwlock *rwlock, int flags,
279 const struct timespec *tsp)
280 {
281 struct _umtx_time timeout, *tm_p;
282 size_t tm_size;
283
284 if (tsp == NULL) {
285 tm_p = NULL;
286 tm_size = 0;
287 } else {
288 timeout._timeout = *tsp;
289 timeout._flags = UMTX_ABSTIME;
290 timeout._clockid = CLOCK_REALTIME;
291 tm_p = &timeout;
292 tm_size = sizeof(timeout);
293 }
294 return (_umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags,
295 (void *)tm_size, tm_p));
296 }
297
298 int
__thr_rwlock_wrlock(struct urwlock * rwlock,const struct timespec * tsp)299 __thr_rwlock_wrlock(struct urwlock *rwlock, const struct timespec *tsp)
300 {
301 struct _umtx_time timeout, *tm_p;
302 size_t tm_size;
303
304 if (tsp == NULL) {
305 tm_p = NULL;
306 tm_size = 0;
307 } else {
308 timeout._timeout = *tsp;
309 timeout._flags = UMTX_ABSTIME;
310 timeout._clockid = CLOCK_REALTIME;
311 tm_p = &timeout;
312 tm_size = sizeof(timeout);
313 }
314 return (_umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, (void *)tm_size,
315 tm_p));
316 }
317
318 int
__thr_rwlock_unlock(struct urwlock * rwlock)319 __thr_rwlock_unlock(struct urwlock *rwlock)
320 {
321
322 return (_umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL));
323 }
324
325 void
_thr_rwl_rdlock(struct urwlock * rwlock)326 _thr_rwl_rdlock(struct urwlock *rwlock)
327 {
328 int ret;
329
330 for (;;) {
331 if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0)
332 return;
333 ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL);
334 if (ret == 0)
335 return;
336 if (ret != EINTR)
337 PANIC("rdlock error");
338 }
339 }
340
341 void
_thr_rwl_wrlock(struct urwlock * rwlock)342 _thr_rwl_wrlock(struct urwlock *rwlock)
343 {
344 int ret;
345
346 for (;;) {
347 if (_thr_rwlock_trywrlock(rwlock) == 0)
348 return;
349 ret = __thr_rwlock_wrlock(rwlock, NULL);
350 if (ret == 0)
351 return;
352 if (ret != EINTR)
353 PANIC("wrlock error");
354 }
355 }
356
357 void
_thr_rwl_unlock(struct urwlock * rwlock)358 _thr_rwl_unlock(struct urwlock *rwlock)
359 {
360
361 if (_thr_rwlock_unlock(rwlock))
362 PANIC("unlock error");
363 }
364