xref: /freebsd/lib/libthr/thread/thr_rwlock.c (revision d0b2dbfa0ecf2bbc9709efc5e20baf8e4b44bbbf)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 1998 Alex Nash
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <errno.h>
31 #include <limits.h>
32 #include <stdlib.h>
33 #include <string.h>
34 
35 #include "namespace.h"
36 #include <pthread.h>
37 #include "un-namespace.h"
38 #include "thr_private.h"
39 
40 _Static_assert(sizeof(struct pthread_rwlock) <= THR_PAGE_SIZE_MIN,
41     "pthread_rwlock is too large for off-page");
42 
43 __weak_reference(_thr_rwlock_destroy, pthread_rwlock_destroy);
44 __weak_reference(_thr_rwlock_destroy, _pthread_rwlock_destroy);
45 __weak_reference(_thr_rwlock_init, pthread_rwlock_init);
46 __weak_reference(_thr_rwlock_init, _pthread_rwlock_init);
47 __weak_reference(_Tthr_rwlock_rdlock, pthread_rwlock_rdlock);
48 __weak_reference(_Tthr_rwlock_rdlock, _pthread_rwlock_rdlock);
49 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
50 __weak_reference(_Tthr_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
51 __weak_reference(_Tthr_rwlock_tryrdlock, _pthread_rwlock_tryrdlock);
52 __weak_reference(_Tthr_rwlock_trywrlock, pthread_rwlock_trywrlock);
53 __weak_reference(_Tthr_rwlock_trywrlock, _pthread_rwlock_trywrlock);
54 __weak_reference(_Tthr_rwlock_unlock, pthread_rwlock_unlock);
55 __weak_reference(_Tthr_rwlock_unlock, _pthread_rwlock_unlock);
56 __weak_reference(_Tthr_rwlock_wrlock, pthread_rwlock_wrlock);
57 __weak_reference(_Tthr_rwlock_wrlock, _pthread_rwlock_wrlock);
58 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
59 
60 static int init_static(struct pthread *thread, pthread_rwlock_t *rwlock);
61 static int init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out);
62 
63 static int __always_inline
64 check_and_init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
65 {
66 	if (__predict_false(*rwlock == THR_PSHARED_PTR ||
67 	    *rwlock <= THR_RWLOCK_DESTROYED))
68 		return (init_rwlock(rwlock, rwlock_out));
69 	*rwlock_out = *rwlock;
70 	return (0);
71 }
72 
73 static int __noinline
74 init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
75 {
76 	pthread_rwlock_t prwlock;
77 	int ret;
78 
79 	if (*rwlock == THR_PSHARED_PTR) {
80 		prwlock = __thr_pshared_offpage(rwlock, 0);
81 		if (prwlock == NULL)
82 			return (EINVAL);
83 	} else if ((prwlock = *rwlock) <= THR_RWLOCK_DESTROYED) {
84 		if (prwlock == THR_RWLOCK_INITIALIZER) {
85 			ret = init_static(_get_curthread(), rwlock);
86 			if (ret != 0)
87 				return (ret);
88 		} else if (prwlock == THR_RWLOCK_DESTROYED) {
89 			return (EINVAL);
90 		}
91 		prwlock = *rwlock;
92 	}
93 	*rwlock_out = prwlock;
94 	return (0);
95 }
96 
97 static int
98 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
99 {
100 	pthread_rwlock_t prwlock;
101 
102 	if (attr == NULL || *attr == NULL ||
103 	    (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
104 		prwlock = aligned_alloc(CACHE_LINE_SIZE,
105 		    roundup(sizeof(struct pthread_rwlock), CACHE_LINE_SIZE));
106 		if (prwlock == NULL)
107 			return (ENOMEM);
108 		memset(prwlock, 0, sizeof(struct pthread_rwlock));
109 		*rwlock = prwlock;
110 	} else {
111 		prwlock = __thr_pshared_offpage(rwlock, 1);
112 		if (prwlock == NULL)
113 			return (EFAULT);
114 		prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED;
115 		*rwlock = THR_PSHARED_PTR;
116 	}
117 	return (0);
118 }
119 
120 int
121 _thr_rwlock_destroy(pthread_rwlock_t *rwlock)
122 {
123 	pthread_rwlock_t prwlock;
124 	int ret;
125 
126 	prwlock = *rwlock;
127 	if (prwlock == THR_RWLOCK_INITIALIZER)
128 		ret = 0;
129 	else if (prwlock == THR_RWLOCK_DESTROYED)
130 		ret = EINVAL;
131 	else if (prwlock == THR_PSHARED_PTR) {
132 		*rwlock = THR_RWLOCK_DESTROYED;
133 		__thr_pshared_destroy(rwlock);
134 		ret = 0;
135 	} else {
136 		*rwlock = THR_RWLOCK_DESTROYED;
137 		free(prwlock);
138 		ret = 0;
139 	}
140 	return (ret);
141 }
142 
143 static int
144 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
145 {
146 	int ret;
147 
148 	THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
149 
150 	if (*rwlock == THR_RWLOCK_INITIALIZER)
151 		ret = rwlock_init(rwlock, NULL);
152 	else
153 		ret = 0;
154 
155 	THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
156 
157 	return (ret);
158 }
159 
160 int
161 _thr_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
162 {
163 
164 	_thr_check_init();
165 	*rwlock = NULL;
166 	return (rwlock_init(rwlock, attr));
167 }
168 
169 static int
170 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
171 {
172 	struct pthread *curthread = _get_curthread();
173 	pthread_rwlock_t prwlock;
174 	int flags;
175 	int ret;
176 
177 	ret = check_and_init_rwlock(rwlock, &prwlock);
178 	if (ret != 0)
179 		return (ret);
180 
181 	if (curthread->rdlock_count) {
182 		/*
183 		 * To avoid having to track all the rdlocks held by
184 		 * a thread or all of the threads that hold a rdlock,
185 		 * we keep a simple count of all the rdlocks held by
186 		 * a thread.  If a thread holds any rdlocks it is
187 		 * possible that it is attempting to take a recursive
188 		 * rdlock.  If there are blocked writers and precedence
189 		 * is given to them, then that would result in the thread
190 		 * deadlocking.  So allowing a thread to take the rdlock
191 		 * when it already has one or more rdlocks avoids the
192 		 * deadlock.  I hope the reader can follow that logic ;-)
193 		 */
194 		flags = URWLOCK_PREFER_READER;
195 	} else {
196 		flags = 0;
197 	}
198 
199 	/*
200 	 * POSIX said the validity of the abstimeout parameter need
201 	 * not be checked if the lock can be immediately acquired.
202 	 */
203 	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
204 	if (ret == 0) {
205 		curthread->rdlock_count++;
206 		return (ret);
207 	}
208 
209 	if (__predict_false(abstime &&
210 		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
211 		return (EINVAL);
212 
213 	for (;;) {
214 		/* goto kernel and lock it */
215 		ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime);
216 		if (ret != EINTR)
217 			break;
218 
219 		/* if interrupted, try to lock it in userland again. */
220 		if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
221 			ret = 0;
222 			break;
223 		}
224 	}
225 	if (ret == 0)
226 		curthread->rdlock_count++;
227 	return (ret);
228 }
229 
230 int
231 _Tthr_rwlock_rdlock(pthread_rwlock_t *rwlock)
232 {
233 	_thr_check_init();
234 	return (rwlock_rdlock_common(rwlock, NULL));
235 }
236 
237 int
238 _pthread_rwlock_timedrdlock(pthread_rwlock_t * __restrict rwlock,
239     const struct timespec * __restrict abstime)
240 {
241 	_thr_check_init();
242 	return (rwlock_rdlock_common(rwlock, abstime));
243 }
244 
245 int
246 _Tthr_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
247 {
248 	struct pthread *curthread;
249 	pthread_rwlock_t prwlock;
250 	int flags;
251 	int ret;
252 
253 	_thr_check_init();
254 	ret = check_and_init_rwlock(rwlock, &prwlock);
255 	if (ret != 0)
256 		return (ret);
257 
258 	curthread = _get_curthread();
259 	if (curthread->rdlock_count) {
260 		/*
261 		 * To avoid having to track all the rdlocks held by
262 		 * a thread or all of the threads that hold a rdlock,
263 		 * we keep a simple count of all the rdlocks held by
264 		 * a thread.  If a thread holds any rdlocks it is
265 		 * possible that it is attempting to take a recursive
266 		 * rdlock.  If there are blocked writers and precedence
267 		 * is given to them, then that would result in the thread
268 		 * deadlocking.  So allowing a thread to take the rdlock
269 		 * when it already has one or more rdlocks avoids the
270 		 * deadlock.  I hope the reader can follow that logic ;-)
271 		 */
272 		flags = URWLOCK_PREFER_READER;
273 	} else {
274 		flags = 0;
275 	}
276 
277 	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
278 	if (ret == 0)
279 		curthread->rdlock_count++;
280 	return (ret);
281 }
282 
283 int
284 _Tthr_rwlock_trywrlock(pthread_rwlock_t *rwlock)
285 {
286 	struct pthread *curthread;
287 	pthread_rwlock_t prwlock;
288 	int ret;
289 
290 	_thr_check_init();
291 	ret = check_and_init_rwlock(rwlock, &prwlock);
292 	if (ret != 0)
293 		return (ret);
294 
295 	curthread = _get_curthread();
296 	ret = _thr_rwlock_trywrlock(&prwlock->lock);
297 	if (ret == 0)
298 		prwlock->owner = TID(curthread);
299 	return (ret);
300 }
301 
302 static int
303 rwlock_wrlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
304 {
305 	struct pthread *curthread = _get_curthread();
306 	pthread_rwlock_t prwlock;
307 	int ret;
308 
309 	ret = check_and_init_rwlock(rwlock, &prwlock);
310 	if (ret != 0)
311 		return (ret);
312 
313 	/*
314 	 * POSIX said the validity of the abstimeout parameter need
315 	 * not be checked if the lock can be immediately acquired.
316 	 */
317 	ret = _thr_rwlock_trywrlock(&prwlock->lock);
318 	if (ret == 0) {
319 		prwlock->owner = TID(curthread);
320 		return (ret);
321 	}
322 
323 	if (__predict_false(abstime &&
324 	    (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
325 		return (EINVAL);
326 
327 	for (;;) {
328 		/* goto kernel and lock it */
329 		ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
330 		if (ret == 0) {
331 			prwlock->owner = TID(curthread);
332 			break;
333 		}
334 
335 		if (ret != EINTR)
336 			break;
337 
338 		/* if interrupted, try to lock it in userland again. */
339 		if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
340 			ret = 0;
341 			prwlock->owner = TID(curthread);
342 			break;
343 		}
344 	}
345 	return (ret);
346 }
347 
348 int
349 _Tthr_rwlock_wrlock(pthread_rwlock_t *rwlock)
350 {
351 	_thr_check_init();
352 	return (rwlock_wrlock_common(rwlock, NULL));
353 }
354 
355 int
356 _pthread_rwlock_timedwrlock(pthread_rwlock_t * __restrict rwlock,
357     const struct timespec * __restrict abstime)
358 {
359 	_thr_check_init();
360 	return (rwlock_wrlock_common(rwlock, abstime));
361 }
362 
363 int
364 _Tthr_rwlock_unlock(pthread_rwlock_t *rwlock)
365 {
366 	struct pthread *curthread = _get_curthread();
367 	pthread_rwlock_t prwlock;
368 	int ret;
369 	int32_t state;
370 
371 	if (*rwlock == THR_PSHARED_PTR) {
372 		prwlock = __thr_pshared_offpage(rwlock, 0);
373 		if (prwlock == NULL)
374 			return (EINVAL);
375 	} else {
376 		prwlock = *rwlock;
377 	}
378 
379 	if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
380 		return (EINVAL);
381 
382 	state = prwlock->lock.rw_state;
383 	if (state & URWLOCK_WRITE_OWNER) {
384 		if (__predict_false(prwlock->owner != TID(curthread)))
385 			return (EPERM);
386 		prwlock->owner = 0;
387 	}
388 
389 	ret = _thr_rwlock_unlock(&prwlock->lock);
390 	if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
391 		curthread->rdlock_count--;
392 
393 	return (ret);
394 }
395