1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 1998 Alex Nash
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <errno.h>
30 #include <limits.h>
31 #include <stdlib.h>
32 #include <string.h>
33
34 #include "namespace.h"
35 #include <pthread.h>
36 #include "un-namespace.h"
37 #include "thr_private.h"
38
39 _Static_assert(sizeof(struct pthread_rwlock) <= THR_PAGE_SIZE_MIN,
40 "pthread_rwlock is too large for off-page");
41
42 __weak_reference(_thr_rwlock_destroy, pthread_rwlock_destroy);
43 __weak_reference(_thr_rwlock_destroy, _pthread_rwlock_destroy);
44 __weak_reference(_thr_rwlock_init, pthread_rwlock_init);
45 __weak_reference(_thr_rwlock_init, _pthread_rwlock_init);
46 __weak_reference(_Tthr_rwlock_rdlock, pthread_rwlock_rdlock);
47 __weak_reference(_Tthr_rwlock_rdlock, _pthread_rwlock_rdlock);
48 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
49 __weak_reference(_Tthr_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
50 __weak_reference(_Tthr_rwlock_tryrdlock, _pthread_rwlock_tryrdlock);
51 __weak_reference(_Tthr_rwlock_trywrlock, pthread_rwlock_trywrlock);
52 __weak_reference(_Tthr_rwlock_trywrlock, _pthread_rwlock_trywrlock);
53 __weak_reference(_Tthr_rwlock_unlock, pthread_rwlock_unlock);
54 __weak_reference(_Tthr_rwlock_unlock, _pthread_rwlock_unlock);
55 __weak_reference(_Tthr_rwlock_wrlock, pthread_rwlock_wrlock);
56 __weak_reference(_Tthr_rwlock_wrlock, _pthread_rwlock_wrlock);
57 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
58
59 static int init_static(struct pthread *thread, pthread_rwlock_t *rwlock);
60 static int init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out);
61
62 static __always_inline int
check_and_init_rwlock(pthread_rwlock_t * rwlock,pthread_rwlock_t * rwlock_out)63 check_and_init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
64 {
65 if (__predict_false(*rwlock == THR_PSHARED_PTR ||
66 *rwlock <= THR_RWLOCK_DESTROYED))
67 return (init_rwlock(rwlock, rwlock_out));
68 *rwlock_out = *rwlock;
69 return (0);
70 }
71
72 static int __noinline
init_rwlock(pthread_rwlock_t * rwlock,pthread_rwlock_t * rwlock_out)73 init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
74 {
75 pthread_rwlock_t prwlock;
76 int ret;
77
78 if (*rwlock == THR_PSHARED_PTR) {
79 prwlock = __thr_pshared_offpage(rwlock, 0);
80 if (prwlock == NULL)
81 return (EINVAL);
82 } else if ((prwlock = *rwlock) <= THR_RWLOCK_DESTROYED) {
83 if (prwlock == THR_RWLOCK_INITIALIZER) {
84 ret = init_static(_get_curthread(), rwlock);
85 if (ret != 0)
86 return (ret);
87 } else if (prwlock == THR_RWLOCK_DESTROYED) {
88 return (EINVAL);
89 }
90 prwlock = *rwlock;
91 }
92 *rwlock_out = prwlock;
93 return (0);
94 }
95
96 static int
rwlock_init(pthread_rwlock_t * rwlock,const pthread_rwlockattr_t * attr)97 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
98 {
99 pthread_rwlock_t prwlock;
100
101 if (attr == NULL || *attr == NULL ||
102 (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
103 prwlock = aligned_alloc(CACHE_LINE_SIZE,
104 roundup(sizeof(struct pthread_rwlock), CACHE_LINE_SIZE));
105 if (prwlock == NULL)
106 return (ENOMEM);
107 memset(prwlock, 0, sizeof(struct pthread_rwlock));
108 *rwlock = prwlock;
109 } else {
110 prwlock = __thr_pshared_offpage(rwlock, 1);
111 if (prwlock == NULL)
112 return (EFAULT);
113 prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED;
114 *rwlock = THR_PSHARED_PTR;
115 }
116 return (0);
117 }
118
119 int
_thr_rwlock_destroy(pthread_rwlock_t * rwlock)120 _thr_rwlock_destroy(pthread_rwlock_t *rwlock)
121 {
122 pthread_rwlock_t prwlock;
123 int ret;
124
125 prwlock = *rwlock;
126 if (prwlock == THR_RWLOCK_INITIALIZER)
127 ret = 0;
128 else if (prwlock == THR_RWLOCK_DESTROYED)
129 ret = EINVAL;
130 else if (prwlock == THR_PSHARED_PTR) {
131 *rwlock = THR_RWLOCK_DESTROYED;
132 __thr_pshared_destroy(rwlock);
133 ret = 0;
134 } else {
135 *rwlock = THR_RWLOCK_DESTROYED;
136 free(prwlock);
137 ret = 0;
138 }
139 return (ret);
140 }
141
142 static int
init_static(struct pthread * thread,pthread_rwlock_t * rwlock)143 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
144 {
145 int ret;
146
147 THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
148
149 if (*rwlock == THR_RWLOCK_INITIALIZER)
150 ret = rwlock_init(rwlock, NULL);
151 else
152 ret = 0;
153
154 THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
155
156 return (ret);
157 }
158
159 int
_thr_rwlock_init(pthread_rwlock_t * rwlock,const pthread_rwlockattr_t * attr)160 _thr_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
161 {
162
163 _thr_check_init();
164 *rwlock = NULL;
165 return (rwlock_init(rwlock, attr));
166 }
167
168 static int
rwlock_rdlock_common(pthread_rwlock_t * rwlock,const struct timespec * abstime)169 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
170 {
171 struct pthread *curthread = _get_curthread();
172 pthread_rwlock_t prwlock;
173 int flags;
174 int ret;
175
176 ret = check_and_init_rwlock(rwlock, &prwlock);
177 if (ret != 0)
178 return (ret);
179
180 if (curthread->rdlock_count) {
181 /*
182 * To avoid having to track all the rdlocks held by
183 * a thread or all of the threads that hold a rdlock,
184 * we keep a simple count of all the rdlocks held by
185 * a thread. If a thread holds any rdlocks it is
186 * possible that it is attempting to take a recursive
187 * rdlock. If there are blocked writers and precedence
188 * is given to them, then that would result in the thread
189 * deadlocking. So allowing a thread to take the rdlock
190 * when it already has one or more rdlocks avoids the
191 * deadlock. I hope the reader can follow that logic ;-)
192 */
193 flags = URWLOCK_PREFER_READER;
194 } else {
195 flags = 0;
196 }
197
198 /*
199 * POSIX said the validity of the abstimeout parameter need
200 * not be checked if the lock can be immediately acquired.
201 */
202 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
203 if (ret == 0) {
204 curthread->rdlock_count++;
205 return (ret);
206 }
207
208 if (__predict_false(abstime &&
209 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
210 return (EINVAL);
211
212 for (;;) {
213 /* goto kernel and lock it */
214 ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime);
215 if (ret != EINTR)
216 break;
217
218 /* if interrupted, try to lock it in userland again. */
219 if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
220 ret = 0;
221 break;
222 }
223 }
224 if (ret == 0)
225 curthread->rdlock_count++;
226 return (ret);
227 }
228
229 int
_Tthr_rwlock_rdlock(pthread_rwlock_t * rwlock)230 _Tthr_rwlock_rdlock(pthread_rwlock_t *rwlock)
231 {
232 _thr_check_init();
233 return (rwlock_rdlock_common(rwlock, NULL));
234 }
235
236 int
_pthread_rwlock_timedrdlock(pthread_rwlock_t * __restrict rwlock,const struct timespec * __restrict abstime)237 _pthread_rwlock_timedrdlock(pthread_rwlock_t * __restrict rwlock,
238 const struct timespec * __restrict abstime)
239 {
240 _thr_check_init();
241 return (rwlock_rdlock_common(rwlock, abstime));
242 }
243
244 int
_Tthr_rwlock_tryrdlock(pthread_rwlock_t * rwlock)245 _Tthr_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
246 {
247 struct pthread *curthread;
248 pthread_rwlock_t prwlock;
249 int flags;
250 int ret;
251
252 _thr_check_init();
253 ret = check_and_init_rwlock(rwlock, &prwlock);
254 if (ret != 0)
255 return (ret);
256
257 curthread = _get_curthread();
258 if (curthread->rdlock_count) {
259 /*
260 * To avoid having to track all the rdlocks held by
261 * a thread or all of the threads that hold a rdlock,
262 * we keep a simple count of all the rdlocks held by
263 * a thread. If a thread holds any rdlocks it is
264 * possible that it is attempting to take a recursive
265 * rdlock. If there are blocked writers and precedence
266 * is given to them, then that would result in the thread
267 * deadlocking. So allowing a thread to take the rdlock
268 * when it already has one or more rdlocks avoids the
269 * deadlock. I hope the reader can follow that logic ;-)
270 */
271 flags = URWLOCK_PREFER_READER;
272 } else {
273 flags = 0;
274 }
275
276 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
277 if (ret == 0)
278 curthread->rdlock_count++;
279 return (ret);
280 }
281
282 int
_Tthr_rwlock_trywrlock(pthread_rwlock_t * rwlock)283 _Tthr_rwlock_trywrlock(pthread_rwlock_t *rwlock)
284 {
285 struct pthread *curthread;
286 pthread_rwlock_t prwlock;
287 int ret;
288
289 _thr_check_init();
290 ret = check_and_init_rwlock(rwlock, &prwlock);
291 if (ret != 0)
292 return (ret);
293
294 curthread = _get_curthread();
295 ret = _thr_rwlock_trywrlock(&prwlock->lock);
296 if (ret == 0)
297 prwlock->owner = TID(curthread);
298 return (ret);
299 }
300
301 static int
rwlock_wrlock_common(pthread_rwlock_t * rwlock,const struct timespec * abstime)302 rwlock_wrlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
303 {
304 struct pthread *curthread = _get_curthread();
305 pthread_rwlock_t prwlock;
306 int ret;
307
308 ret = check_and_init_rwlock(rwlock, &prwlock);
309 if (ret != 0)
310 return (ret);
311
312 /*
313 * POSIX said the validity of the abstimeout parameter need
314 * not be checked if the lock can be immediately acquired.
315 */
316 ret = _thr_rwlock_trywrlock(&prwlock->lock);
317 if (ret == 0) {
318 prwlock->owner = TID(curthread);
319 return (ret);
320 }
321
322 if (__predict_false(abstime &&
323 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
324 return (EINVAL);
325
326 for (;;) {
327 /* goto kernel and lock it */
328 ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
329 if (ret == 0) {
330 prwlock->owner = TID(curthread);
331 break;
332 }
333
334 if (ret != EINTR)
335 break;
336
337 /* if interrupted, try to lock it in userland again. */
338 if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
339 ret = 0;
340 prwlock->owner = TID(curthread);
341 break;
342 }
343 }
344 return (ret);
345 }
346
347 int
_Tthr_rwlock_wrlock(pthread_rwlock_t * rwlock)348 _Tthr_rwlock_wrlock(pthread_rwlock_t *rwlock)
349 {
350 _thr_check_init();
351 return (rwlock_wrlock_common(rwlock, NULL));
352 }
353
354 int
_pthread_rwlock_timedwrlock(pthread_rwlock_t * __restrict rwlock,const struct timespec * __restrict abstime)355 _pthread_rwlock_timedwrlock(pthread_rwlock_t * __restrict rwlock,
356 const struct timespec * __restrict abstime)
357 {
358 _thr_check_init();
359 return (rwlock_wrlock_common(rwlock, abstime));
360 }
361
362 int
_Tthr_rwlock_unlock(pthread_rwlock_t * rwlock)363 _Tthr_rwlock_unlock(pthread_rwlock_t *rwlock)
364 {
365 struct pthread *curthread = _get_curthread();
366 pthread_rwlock_t prwlock;
367 int ret;
368 int32_t state;
369
370 if (*rwlock == THR_PSHARED_PTR) {
371 prwlock = __thr_pshared_offpage(rwlock, 0);
372 if (prwlock == NULL)
373 return (EINVAL);
374 } else {
375 prwlock = *rwlock;
376 }
377
378 if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
379 return (EINVAL);
380
381 state = prwlock->lock.rw_state;
382 if (state & URWLOCK_WRITE_OWNER) {
383 if (__predict_false(prwlock->owner != TID(curthread)))
384 return (EPERM);
385 prwlock->owner = 0;
386 }
387
388 ret = _thr_rwlock_unlock(&prwlock->lock);
389 if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
390 curthread->rdlock_count--;
391
392 return (ret);
393 }
394