xref: /freebsd/lib/libthr/thread/thr_rwlock.c (revision 830940567b49bb0c08dfaed40418999e76616909)
1 /*-
2  * Copyright (c) 1998 Alex Nash
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <errno.h>
30 #include <limits.h>
31 #include <stdlib.h>
32 
33 #include "namespace.h"
34 #include <pthread.h>
35 #include "un-namespace.h"
36 #include "thr_private.h"
37 
38 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
39 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
40 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
41 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
42 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
43 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
44 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
45 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
46 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
47 
48 /*
49  * Prototypes
50  */
51 
52 static int
53 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused)
54 {
55 	pthread_rwlock_t prwlock;
56 
57 	prwlock = (pthread_rwlock_t)calloc(1, sizeof(struct pthread_rwlock));
58 	if (prwlock == NULL)
59 		return (ENOMEM);
60 	*rwlock = prwlock;
61 	return (0);
62 }
63 
64 int
65 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
66 {
67 	int ret;
68 
69 	if (rwlock == NULL)
70 		ret = EINVAL;
71 	else {
72 		pthread_rwlock_t prwlock;
73 
74 		prwlock = *rwlock;
75 		*rwlock = NULL;
76 
77 		free(prwlock);
78 		ret = 0;
79 	}
80 	return (ret);
81 }
82 
83 static int
84 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
85 {
86 	int ret;
87 
88 	THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
89 
90 	if (*rwlock == NULL)
91 		ret = rwlock_init(rwlock, NULL);
92 	else
93 		ret = 0;
94 
95 	THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
96 
97 	return (ret);
98 }
99 
100 int
101 _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
102 {
103 	*rwlock = NULL;
104 	return (rwlock_init(rwlock, attr));
105 }
106 
107 static int
108 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
109 {
110 	struct pthread *curthread = _get_curthread();
111 	pthread_rwlock_t prwlock;
112 	struct timespec ts, ts2, *tsp;
113 	int flags;
114 	int ret;
115 
116 	if (__predict_false(rwlock == NULL))
117 		return (EINVAL);
118 
119 	prwlock = *rwlock;
120 
121 	/* check for static initialization */
122 	if (__predict_false(prwlock == NULL)) {
123 		if ((ret = init_static(curthread, rwlock)) != 0)
124 			return (ret);
125 
126 		prwlock = *rwlock;
127 	}
128 
129 	if (curthread->rdlock_count) {
130 		/*
131 		 * To avoid having to track all the rdlocks held by
132 		 * a thread or all of the threads that hold a rdlock,
133 		 * we keep a simple count of all the rdlocks held by
134 		 * a thread.  If a thread holds any rdlocks it is
135 		 * possible that it is attempting to take a recursive
136 		 * rdlock.  If there are blocked writers and precedence
137 		 * is given to them, then that would result in the thread
138 		 * deadlocking.  So allowing a thread to take the rdlock
139 		 * when it already has one or more rdlocks avoids the
140 		 * deadlock.  I hope the reader can follow that logic ;-)
141 		 */
142 		flags = URWLOCK_PREFER_READER;
143 	} else {
144 		flags = 0;
145 	}
146 
147 	/*
148 	 * POSIX said the validity of the abstimeout parameter need
149 	 * not be checked if the lock can be immediately acquired.
150 	 */
151 	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
152 	if (ret == 0) {
153 		curthread->rdlock_count++;
154 		return (ret);
155 	}
156 
157 	if (__predict_false(abstime &&
158 		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
159 		return (EINVAL);
160 
161 	for (;;) {
162 		if (abstime) {
163 			clock_gettime(CLOCK_REALTIME, &ts);
164 			TIMESPEC_SUB(&ts2, abstime, &ts);
165 			if (ts2.tv_sec < 0 ||
166 			    (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
167 				return (ETIMEDOUT);
168 			tsp = &ts2;
169 		} else
170 			tsp = NULL;
171 
172 		/* goto kernel and lock it */
173 		ret = __thr_rwlock_rdlock(&prwlock->lock, flags, tsp);
174 		if (ret != EINTR)
175 			break;
176 
177 		/* if interrupted, try to lock it in userland again. */
178 		if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
179 			ret = 0;
180 			break;
181 		}
182 	}
183 	if (ret == 0)
184 		curthread->rdlock_count++;
185 	return (ret);
186 }
187 
188 int
189 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
190 {
191 	return (rwlock_rdlock_common(rwlock, NULL));
192 }
193 
194 int
195 _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
196 	 const struct timespec *abstime)
197 {
198 	return (rwlock_rdlock_common(rwlock, abstime));
199 }
200 
201 int
202 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
203 {
204 	struct pthread *curthread = _get_curthread();
205 	pthread_rwlock_t prwlock;
206 	int flags;
207 	int ret;
208 
209 	if (__predict_false(rwlock == NULL))
210 		return (EINVAL);
211 
212 	prwlock = *rwlock;
213 
214 	/* check for static initialization */
215 	if (__predict_false(prwlock == NULL)) {
216 		if ((ret = init_static(curthread, rwlock)) != 0)
217 			return (ret);
218 
219 		prwlock = *rwlock;
220 	}
221 
222 	if (curthread->rdlock_count) {
223 		/*
224 		 * To avoid having to track all the rdlocks held by
225 		 * a thread or all of the threads that hold a rdlock,
226 		 * we keep a simple count of all the rdlocks held by
227 		 * a thread.  If a thread holds any rdlocks it is
228 		 * possible that it is attempting to take a recursive
229 		 * rdlock.  If there are blocked writers and precedence
230 		 * is given to them, then that would result in the thread
231 		 * deadlocking.  So allowing a thread to take the rdlock
232 		 * when it already has one or more rdlocks avoids the
233 		 * deadlock.  I hope the reader can follow that logic ;-)
234 		 */
235 		flags = URWLOCK_PREFER_READER;
236 	} else {
237 		flags = 0;
238 	}
239 
240 	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
241 	if (ret == 0)
242 		curthread->rdlock_count++;
243 	return (ret);
244 }
245 
246 int
247 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
248 {
249 	struct pthread *curthread = _get_curthread();
250 	pthread_rwlock_t prwlock;
251 	int ret;
252 
253 	if (__predict_false(rwlock == NULL))
254 		return (EINVAL);
255 
256 	prwlock = *rwlock;
257 
258 	/* check for static initialization */
259 	if (__predict_false(prwlock == NULL)) {
260 		if ((ret = init_static(curthread, rwlock)) != 0)
261 			return (ret);
262 
263 		prwlock = *rwlock;
264 	}
265 
266 	ret = _thr_rwlock_trywrlock(&prwlock->lock);
267 	if (ret == 0)
268 		prwlock->owner = curthread;
269 	return (ret);
270 }
271 
272 static int
273 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
274 {
275 	struct pthread *curthread = _get_curthread();
276 	pthread_rwlock_t prwlock;
277 	struct timespec ts, ts2, *tsp;
278 	int ret;
279 
280 	if (__predict_false(rwlock == NULL))
281 		return (EINVAL);
282 
283 	prwlock = *rwlock;
284 
285 	/* check for static initialization */
286 	if (__predict_false(prwlock == NULL)) {
287 		if ((ret = init_static(curthread, rwlock)) != 0)
288 			return (ret);
289 
290 		prwlock = *rwlock;
291 	}
292 
293 	/*
294 	 * POSIX said the validity of the abstimeout parameter need
295 	 * not be checked if the lock can be immediately acquired.
296 	 */
297 	ret = _thr_rwlock_trywrlock(&prwlock->lock);
298 	if (ret == 0) {
299 		prwlock->owner = curthread;
300 		return (ret);
301 	}
302 
303 	if (__predict_false(abstime &&
304 		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
305 		return (EINVAL);
306 
307 	for (;;) {
308 		if (abstime != NULL) {
309 			clock_gettime(CLOCK_REALTIME, &ts);
310 			TIMESPEC_SUB(&ts2, abstime, &ts);
311 			if (ts2.tv_sec < 0 ||
312 			    (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
313 				return (ETIMEDOUT);
314 			tsp = &ts2;
315 		} else
316 			tsp = NULL;
317 
318 		/* goto kernel and lock it */
319 		ret = __thr_rwlock_wrlock(&prwlock->lock, tsp);
320 		if (ret == 0) {
321 			prwlock->owner = curthread;
322 			break;
323 		}
324 
325 		if (ret != EINTR)
326 			break;
327 
328 		/* if interrupted, try to lock it in userland again. */
329 		if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
330 			ret = 0;
331 			prwlock->owner = curthread;
332 			break;
333 		}
334 	}
335 	return (ret);
336 }
337 
338 int
339 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
340 {
341 	return (rwlock_wrlock_common (rwlock, NULL));
342 }
343 
344 int
345 _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
346     const struct timespec *abstime)
347 {
348 	return (rwlock_wrlock_common (rwlock, abstime));
349 }
350 
351 int
352 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
353 {
354 	struct pthread *curthread = _get_curthread();
355 	pthread_rwlock_t prwlock;
356 	int ret;
357 	int32_t state;
358 
359 	if (__predict_false(rwlock == NULL))
360 		return (EINVAL);
361 
362 	prwlock = *rwlock;
363 
364 	if (__predict_false(prwlock == NULL))
365 		return (EINVAL);
366 
367 	state = prwlock->lock.rw_state;
368 	if (state & URWLOCK_WRITE_OWNER) {
369 		if (__predict_false(prwlock->owner != curthread))
370 			return (EPERM);
371 		prwlock->owner = NULL;
372 	}
373 
374 	ret = _thr_rwlock_unlock(&prwlock->lock);
375 	if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
376 		curthread->rdlock_count--;
377 
378 	return (ret);
379 }
380