xref: /freebsd/lib/libthr/thread/thr_rwlock.c (revision 7cc1fde083b1fe9d55388045ba3df7d770d4f3bf)
1 /*-
2  * Copyright (c) 1998 Alex Nash
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <errno.h>
30 #include <limits.h>
31 #include <stdlib.h>
32 
33 #include "namespace.h"
34 #include <pthread.h>
35 #include "un-namespace.h"
36 #include "thr_private.h"
37 
38 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
39 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
40 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
41 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
42 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
43 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
44 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
45 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
46 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
47 
48 #define CHECK_AND_INIT_RWLOCK							\
49 	if (__predict_false((prwlock = (*rwlock)) <= THR_RWLOCK_DESTROYED)) {	\
50 		if (prwlock == THR_RWLOCK_INITIALIZER) {			\
51 			int ret;						\
52 			ret = init_static(_get_curthread(), rwlock);		\
53 			if (ret)						\
54 				return (ret);					\
55 		} else if (prwlock == THR_RWLOCK_DESTROYED) {			\
56 			return (EINVAL);					\
57 		}								\
58 		prwlock = *rwlock;						\
59 	}
60 
61 /*
62  * Prototypes
63  */
64 
65 static int
66 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
67 {
68 	pthread_rwlock_t prwlock;
69 
70 	prwlock = (pthread_rwlock_t)calloc(1, sizeof(struct pthread_rwlock));
71 	if (prwlock == NULL)
72 		return (ENOMEM);
73 	if (attr != NULL)
74 		prwlock->kind = (*attr)->kind;
75 	else
76 		prwlock->kind = PTHREAD_RWLOCK_DEFAULT_NP;
77 	if (prwlock->kind == PTHREAD_RWLOCK_PREFER_READER_NP)
78 		prwlock->lock.rw_flags |= URWLOCK_PREFER_READER;
79 	*rwlock = prwlock;
80 	return (0);
81 }
82 
83 int
84 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
85 {
86 	pthread_rwlock_t prwlock;
87 	int ret;
88 
89 	prwlock = *rwlock;
90 	if (prwlock == THR_RWLOCK_INITIALIZER)
91 		ret = 0;
92 	else if (prwlock == THR_RWLOCK_DESTROYED)
93 		ret = EINVAL;
94 	else {
95 		*rwlock = THR_RWLOCK_DESTROYED;
96 
97 		free(prwlock);
98 		ret = 0;
99 	}
100 	return (ret);
101 }
102 
103 static int
104 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
105 {
106 	int ret;
107 
108 	THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
109 
110 	if (*rwlock == THR_RWLOCK_INITIALIZER)
111 		ret = rwlock_init(rwlock, NULL);
112 	else
113 		ret = 0;
114 
115 	THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
116 
117 	return (ret);
118 }
119 
120 int
121 _pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
122 {
123 	*rwlock = NULL;
124 	return (rwlock_init(rwlock, attr));
125 }
126 
127 static int
128 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
129 {
130 	struct pthread *curthread = _get_curthread();
131 	pthread_rwlock_t prwlock;
132 	struct timespec ts, ts2, *tsp;
133 	int flags;
134 	int ret;
135 
136 	CHECK_AND_INIT_RWLOCK
137 
138 	if (curthread->rdlock_count) {
139 		/*
140 		 * To avoid having to track all the rdlocks held by
141 		 * a thread or all of the threads that hold a rdlock,
142 		 * we keep a simple count of all the rdlocks held by
143 		 * a thread.  If a thread holds any rdlocks it is
144 		 * possible that it is attempting to take a recursive
145 		 * rdlock.  If there are blocked writers and precedence
146 		 * is given to them, then that would result in the thread
147 		 * deadlocking.  So allowing a thread to take the rdlock
148 		 * when it already has one or more rdlocks avoids the
149 		 * deadlock.  I hope the reader can follow that logic ;-)
150 		 */
151 		flags = URWLOCK_PREFER_READER;
152 	} else {
153 		flags = 0;
154 	}
155 
156 	/*
157 	 * POSIX said the validity of the abstimeout parameter need
158 	 * not be checked if the lock can be immediately acquired.
159 	 */
160 	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
161 	if (ret == 0) {
162 		curthread->rdlock_count++;
163 		return (ret);
164 	}
165 
166 	if (__predict_false(abstime &&
167 		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
168 		return (EINVAL);
169 
170 	for (;;) {
171 		if (abstime) {
172 			clock_gettime(CLOCK_REALTIME, &ts);
173 			TIMESPEC_SUB(&ts2, abstime, &ts);
174 			if (ts2.tv_sec < 0 ||
175 			    (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
176 				return (ETIMEDOUT);
177 			tsp = &ts2;
178 		} else
179 			tsp = NULL;
180 
181 		/* goto kernel and lock it */
182 		ret = __thr_rwlock_rdlock(&prwlock->lock, flags, tsp);
183 		if (ret != EINTR)
184 			break;
185 
186 		/* if interrupted, try to lock it in userland again. */
187 		if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
188 			ret = 0;
189 			break;
190 		}
191 	}
192 	if (ret == 0)
193 		curthread->rdlock_count++;
194 	return (ret);
195 }
196 
197 int
198 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
199 {
200 	return (rwlock_rdlock_common(rwlock, NULL));
201 }
202 
203 int
204 _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
205 	 const struct timespec *abstime)
206 {
207 	return (rwlock_rdlock_common(rwlock, abstime));
208 }
209 
210 int
211 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
212 {
213 	struct pthread *curthread = _get_curthread();
214 	pthread_rwlock_t prwlock;
215 	int flags;
216 	int ret;
217 
218 	CHECK_AND_INIT_RWLOCK
219 
220 	if (curthread->rdlock_count) {
221 		/*
222 		 * To avoid having to track all the rdlocks held by
223 		 * a thread or all of the threads that hold a rdlock,
224 		 * we keep a simple count of all the rdlocks held by
225 		 * a thread.  If a thread holds any rdlocks it is
226 		 * possible that it is attempting to take a recursive
227 		 * rdlock.  If there are blocked writers and precedence
228 		 * is given to them, then that would result in the thread
229 		 * deadlocking.  So allowing a thread to take the rdlock
230 		 * when it already has one or more rdlocks avoids the
231 		 * deadlock.  I hope the reader can follow that logic ;-)
232 		 */
233 		flags = URWLOCK_PREFER_READER;
234 	} else {
235 		flags = 0;
236 	}
237 
238 	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
239 	if (ret == 0)
240 		curthread->rdlock_count++;
241 	return (ret);
242 }
243 
244 int
245 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
246 {
247 	struct pthread *curthread = _get_curthread();
248 	pthread_rwlock_t prwlock;
249 	int ret;
250 
251 	CHECK_AND_INIT_RWLOCK
252 
253 	ret = _thr_rwlock_trywrlock(&prwlock->lock);
254 	if (ret == 0)
255 		prwlock->owner = curthread;
256 	return (ret);
257 }
258 
259 static int
260 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
261 {
262 	struct pthread *curthread = _get_curthread();
263 	pthread_rwlock_t prwlock;
264 	struct timespec ts, ts2, *tsp;
265 	int ret;
266 
267 	CHECK_AND_INIT_RWLOCK
268 
269 	if (__predict_false(prwlock->owner == curthread)) {
270 		if (__predict_false(
271 			prwlock->kind == PTHREAD_RWLOCK_PREFER_WRITER_NP)) {
272 			prwlock->recurse++;
273 			return (0);
274 		}
275 	}
276 
277 	/*
278 	 * POSIX said the validity of the abstimeout parameter need
279 	 * not be checked if the lock can be immediately acquired.
280 	 */
281 	ret = _thr_rwlock_trywrlock(&prwlock->lock);
282 	if (ret == 0) {
283 		prwlock->owner = curthread;
284 		return (ret);
285 	}
286 
287 	if (__predict_false(abstime &&
288 		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
289 		return (EINVAL);
290 
291 	for (;;) {
292 		if (abstime != NULL) {
293 			clock_gettime(CLOCK_REALTIME, &ts);
294 			TIMESPEC_SUB(&ts2, abstime, &ts);
295 			if (ts2.tv_sec < 0 ||
296 			    (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
297 				return (ETIMEDOUT);
298 			tsp = &ts2;
299 		} else
300 			tsp = NULL;
301 
302 		/* goto kernel and lock it */
303 		ret = __thr_rwlock_wrlock(&prwlock->lock, tsp);
304 		if (ret == 0) {
305 			prwlock->owner = curthread;
306 			break;
307 		}
308 
309 		if (ret != EINTR)
310 			break;
311 
312 		/* if interrupted, try to lock it in userland again. */
313 		if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
314 			ret = 0;
315 			prwlock->owner = curthread;
316 			break;
317 		}
318 	}
319 	return (ret);
320 }
321 
322 int
323 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
324 {
325 	return (rwlock_wrlock_common (rwlock, NULL));
326 }
327 
328 int
329 _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
330     const struct timespec *abstime)
331 {
332 	return (rwlock_wrlock_common (rwlock, abstime));
333 }
334 
335 int
336 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
337 {
338 	struct pthread *curthread = _get_curthread();
339 	pthread_rwlock_t prwlock;
340 	int ret;
341 	int32_t state;
342 
343 	prwlock = *rwlock;
344 
345 	if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
346 		return (EINVAL);
347 
348 	state = prwlock->lock.rw_state;
349 	if (state & URWLOCK_WRITE_OWNER) {
350 		if (__predict_false(prwlock->owner != curthread))
351 			return (EPERM);
352 		if (__predict_false(
353 			prwlock->kind == PTHREAD_RWLOCK_PREFER_WRITER_NP)) {
354 			if (prwlock->recurse > 0) {
355 				prwlock->recurse--;
356 				return (0);
357 			}
358 		}
359 		prwlock->owner = NULL;
360 	}
361 
362 	ret = _thr_rwlock_unlock(&prwlock->lock);
363 	if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
364 		curthread->rdlock_count--;
365 
366 	return (ret);
367 }
368