xref: /freebsd/lib/libthr/thread/thr_rwlock.c (revision ba3c1f5972d7b90feb6e6da47905ff2757e0fe57)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 1998 Alex Nash
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <errno.h>
33 #include <limits.h>
34 #include <stdlib.h>
35 #include <string.h>
36 
37 #include "namespace.h"
38 #include <pthread.h>
39 #include "un-namespace.h"
40 #include "thr_private.h"
41 
42 _Static_assert(sizeof(struct pthread_rwlock) <= THR_PAGE_SIZE_MIN,
43     "pthread_rwlock is too large for off-page");
44 
45 __weak_reference(_thr_rwlock_destroy, pthread_rwlock_destroy);
46 __weak_reference(_thr_rwlock_destroy, _pthread_rwlock_destroy);
47 __weak_reference(_thr_rwlock_init, pthread_rwlock_init);
48 __weak_reference(_thr_rwlock_init, _pthread_rwlock_init);
49 __weak_reference(_Tthr_rwlock_rdlock, pthread_rwlock_rdlock);
50 __weak_reference(_Tthr_rwlock_rdlock, _pthread_rwlock_rdlock);
51 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
52 __weak_reference(_Tthr_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
53 __weak_reference(_Tthr_rwlock_tryrdlock, _pthread_rwlock_tryrdlock);
54 __weak_reference(_Tthr_rwlock_trywrlock, pthread_rwlock_trywrlock);
55 __weak_reference(_Tthr_rwlock_trywrlock, _pthread_rwlock_trywrlock);
56 __weak_reference(_Tthr_rwlock_unlock, pthread_rwlock_unlock);
57 __weak_reference(_Tthr_rwlock_unlock, _pthread_rwlock_unlock);
58 __weak_reference(_Tthr_rwlock_wrlock, pthread_rwlock_wrlock);
59 __weak_reference(_Tthr_rwlock_wrlock, _pthread_rwlock_wrlock);
60 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
61 
62 static int init_static(struct pthread *thread, pthread_rwlock_t *rwlock);
63 static int init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out);
64 
65 static int __always_inline
66 check_and_init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
67 {
68 	if (__predict_false(*rwlock == THR_PSHARED_PTR ||
69 	    *rwlock <= THR_RWLOCK_DESTROYED))
70 		return (init_rwlock(rwlock, rwlock_out));
71 	*rwlock_out = *rwlock;
72 	return (0);
73 }
74 
75 static int __noinline
76 init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
77 {
78 	pthread_rwlock_t prwlock;
79 	int ret;
80 
81 	if (*rwlock == THR_PSHARED_PTR) {
82 		prwlock = __thr_pshared_offpage(rwlock, 0);
83 		if (prwlock == NULL)
84 			return (EINVAL);
85 	} else if ((prwlock = *rwlock) <= THR_RWLOCK_DESTROYED) {
86 		if (prwlock == THR_RWLOCK_INITIALIZER) {
87 			ret = init_static(_get_curthread(), rwlock);
88 			if (ret != 0)
89 				return (ret);
90 		} else if (prwlock == THR_RWLOCK_DESTROYED) {
91 			return (EINVAL);
92 		}
93 		prwlock = *rwlock;
94 	}
95 	*rwlock_out = prwlock;
96 	return (0);
97 }
98 
99 static int
100 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
101 {
102 	pthread_rwlock_t prwlock;
103 
104 	if (attr == NULL || *attr == NULL ||
105 	    (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
106 		prwlock = aligned_alloc(CACHE_LINE_SIZE,
107 		    roundup(sizeof(struct pthread_rwlock), CACHE_LINE_SIZE));
108 		if (prwlock == NULL)
109 			return (ENOMEM);
110 		memset(prwlock, 0, sizeof(struct pthread_rwlock));
111 		*rwlock = prwlock;
112 	} else {
113 		prwlock = __thr_pshared_offpage(rwlock, 1);
114 		if (prwlock == NULL)
115 			return (EFAULT);
116 		prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED;
117 		*rwlock = THR_PSHARED_PTR;
118 	}
119 	return (0);
120 }
121 
122 int
123 _thr_rwlock_destroy(pthread_rwlock_t *rwlock)
124 {
125 	pthread_rwlock_t prwlock;
126 	int ret;
127 
128 	prwlock = *rwlock;
129 	if (prwlock == THR_RWLOCK_INITIALIZER)
130 		ret = 0;
131 	else if (prwlock == THR_RWLOCK_DESTROYED)
132 		ret = EINVAL;
133 	else if (prwlock == THR_PSHARED_PTR) {
134 		*rwlock = THR_RWLOCK_DESTROYED;
135 		__thr_pshared_destroy(rwlock);
136 		ret = 0;
137 	} else {
138 		*rwlock = THR_RWLOCK_DESTROYED;
139 		free(prwlock);
140 		ret = 0;
141 	}
142 	return (ret);
143 }
144 
145 static int
146 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
147 {
148 	int ret;
149 
150 	THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
151 
152 	if (*rwlock == THR_RWLOCK_INITIALIZER)
153 		ret = rwlock_init(rwlock, NULL);
154 	else
155 		ret = 0;
156 
157 	THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
158 
159 	return (ret);
160 }
161 
162 int
163 _thr_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
164 {
165 
166 	_thr_check_init();
167 	*rwlock = NULL;
168 	return (rwlock_init(rwlock, attr));
169 }
170 
171 static int
172 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
173 {
174 	struct pthread *curthread = _get_curthread();
175 	pthread_rwlock_t prwlock;
176 	int flags;
177 	int ret;
178 
179 	ret = check_and_init_rwlock(rwlock, &prwlock);
180 	if (ret != 0)
181 		return (ret);
182 
183 	if (curthread->rdlock_count) {
184 		/*
185 		 * To avoid having to track all the rdlocks held by
186 		 * a thread or all of the threads that hold a rdlock,
187 		 * we keep a simple count of all the rdlocks held by
188 		 * a thread.  If a thread holds any rdlocks it is
189 		 * possible that it is attempting to take a recursive
190 		 * rdlock.  If there are blocked writers and precedence
191 		 * is given to them, then that would result in the thread
192 		 * deadlocking.  So allowing a thread to take the rdlock
193 		 * when it already has one or more rdlocks avoids the
194 		 * deadlock.  I hope the reader can follow that logic ;-)
195 		 */
196 		flags = URWLOCK_PREFER_READER;
197 	} else {
198 		flags = 0;
199 	}
200 
201 	/*
202 	 * POSIX said the validity of the abstimeout parameter need
203 	 * not be checked if the lock can be immediately acquired.
204 	 */
205 	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
206 	if (ret == 0) {
207 		curthread->rdlock_count++;
208 		return (ret);
209 	}
210 
211 	if (__predict_false(abstime &&
212 		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
213 		return (EINVAL);
214 
215 	for (;;) {
216 		/* goto kernel and lock it */
217 		ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime);
218 		if (ret != EINTR)
219 			break;
220 
221 		/* if interrupted, try to lock it in userland again. */
222 		if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
223 			ret = 0;
224 			break;
225 		}
226 	}
227 	if (ret == 0)
228 		curthread->rdlock_count++;
229 	return (ret);
230 }
231 
232 int
233 _Tthr_rwlock_rdlock(pthread_rwlock_t *rwlock)
234 {
235 	_thr_check_init();
236 	return (rwlock_rdlock_common(rwlock, NULL));
237 }
238 
239 int
240 _pthread_rwlock_timedrdlock(pthread_rwlock_t * __restrict rwlock,
241     const struct timespec * __restrict abstime)
242 {
243 	_thr_check_init();
244 	return (rwlock_rdlock_common(rwlock, abstime));
245 }
246 
247 int
248 _Tthr_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
249 {
250 	struct pthread *curthread;
251 	pthread_rwlock_t prwlock;
252 	int flags;
253 	int ret;
254 
255 	_thr_check_init();
256 	ret = check_and_init_rwlock(rwlock, &prwlock);
257 	if (ret != 0)
258 		return (ret);
259 
260 	curthread = _get_curthread();
261 	if (curthread->rdlock_count) {
262 		/*
263 		 * To avoid having to track all the rdlocks held by
264 		 * a thread or all of the threads that hold a rdlock,
265 		 * we keep a simple count of all the rdlocks held by
266 		 * a thread.  If a thread holds any rdlocks it is
267 		 * possible that it is attempting to take a recursive
268 		 * rdlock.  If there are blocked writers and precedence
269 		 * is given to them, then that would result in the thread
270 		 * deadlocking.  So allowing a thread to take the rdlock
271 		 * when it already has one or more rdlocks avoids the
272 		 * deadlock.  I hope the reader can follow that logic ;-)
273 		 */
274 		flags = URWLOCK_PREFER_READER;
275 	} else {
276 		flags = 0;
277 	}
278 
279 	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
280 	if (ret == 0)
281 		curthread->rdlock_count++;
282 	return (ret);
283 }
284 
285 int
286 _Tthr_rwlock_trywrlock(pthread_rwlock_t *rwlock)
287 {
288 	struct pthread *curthread;
289 	pthread_rwlock_t prwlock;
290 	int ret;
291 
292 	_thr_check_init();
293 	ret = check_and_init_rwlock(rwlock, &prwlock);
294 	if (ret != 0)
295 		return (ret);
296 
297 	curthread = _get_curthread();
298 	ret = _thr_rwlock_trywrlock(&prwlock->lock);
299 	if (ret == 0)
300 		prwlock->owner = TID(curthread);
301 	return (ret);
302 }
303 
304 static int
305 rwlock_wrlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
306 {
307 	struct pthread *curthread = _get_curthread();
308 	pthread_rwlock_t prwlock;
309 	int ret;
310 
311 	ret = check_and_init_rwlock(rwlock, &prwlock);
312 	if (ret != 0)
313 		return (ret);
314 
315 	/*
316 	 * POSIX said the validity of the abstimeout parameter need
317 	 * not be checked if the lock can be immediately acquired.
318 	 */
319 	ret = _thr_rwlock_trywrlock(&prwlock->lock);
320 	if (ret == 0) {
321 		prwlock->owner = TID(curthread);
322 		return (ret);
323 	}
324 
325 	if (__predict_false(abstime &&
326 	    (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
327 		return (EINVAL);
328 
329 	for (;;) {
330 		/* goto kernel and lock it */
331 		ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
332 		if (ret == 0) {
333 			prwlock->owner = TID(curthread);
334 			break;
335 		}
336 
337 		if (ret != EINTR)
338 			break;
339 
340 		/* if interrupted, try to lock it in userland again. */
341 		if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
342 			ret = 0;
343 			prwlock->owner = TID(curthread);
344 			break;
345 		}
346 	}
347 	return (ret);
348 }
349 
350 int
351 _Tthr_rwlock_wrlock(pthread_rwlock_t *rwlock)
352 {
353 	_thr_check_init();
354 	return (rwlock_wrlock_common(rwlock, NULL));
355 }
356 
357 int
358 _pthread_rwlock_timedwrlock(pthread_rwlock_t * __restrict rwlock,
359     const struct timespec * __restrict abstime)
360 {
361 	_thr_check_init();
362 	return (rwlock_wrlock_common(rwlock, abstime));
363 }
364 
365 int
366 _Tthr_rwlock_unlock(pthread_rwlock_t *rwlock)
367 {
368 	struct pthread *curthread = _get_curthread();
369 	pthread_rwlock_t prwlock;
370 	int ret;
371 	int32_t state;
372 
373 	if (*rwlock == THR_PSHARED_PTR) {
374 		prwlock = __thr_pshared_offpage(rwlock, 0);
375 		if (prwlock == NULL)
376 			return (EINVAL);
377 	} else {
378 		prwlock = *rwlock;
379 	}
380 
381 	if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
382 		return (EINVAL);
383 
384 	state = prwlock->lock.rw_state;
385 	if (state & URWLOCK_WRITE_OWNER) {
386 		if (__predict_false(prwlock->owner != TID(curthread)))
387 			return (EPERM);
388 		prwlock->owner = 0;
389 	}
390 
391 	ret = _thr_rwlock_unlock(&prwlock->lock);
392 	if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
393 		curthread->rdlock_count--;
394 
395 	return (ret);
396 }
397