xref: /freebsd/lib/libthr/thread/thr_rwlock.c (revision 1e413cf93298b5b97441a21d9a50fdcd0ee9945e)
1 /*-
2  * Copyright (c) 1998 Alex Nash
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <errno.h>
30 #include <limits.h>
31 #include <stdlib.h>
32 
33 #include "namespace.h"
34 #include <pthread.h>
35 #include "un-namespace.h"
36 #include "thr_private.h"
37 
38 /* maximum number of times a read lock may be obtained */
39 #define	MAX_READ_LOCKS		(INT_MAX - 1)
40 
41 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
42 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
43 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
44 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
45 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
46 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
47 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
48 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
49 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
50 
51 /*
52  * Prototypes
53  */
54 
55 static int
56 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused)
57 {
58 	pthread_rwlock_t prwlock;
59 	int ret;
60 
61 	/* allocate rwlock object */
62 	prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
63 
64 	if (prwlock == NULL)
65 		return (ENOMEM);
66 
67 	/* initialize the lock */
68 	if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0)
69 		free(prwlock);
70 	else {
71 		/* initialize the read condition signal */
72 		ret = _pthread_cond_init(&prwlock->read_signal, NULL);
73 
74 		if (ret != 0) {
75 			_pthread_mutex_destroy(&prwlock->lock);
76 			free(prwlock);
77 		} else {
78 			/* initialize the write condition signal */
79 			ret = _pthread_cond_init(&prwlock->write_signal, NULL);
80 
81 			if (ret != 0) {
82 				_pthread_cond_destroy(&prwlock->read_signal);
83 				_pthread_mutex_destroy(&prwlock->lock);
84 				free(prwlock);
85 			} else {
86 				/* success */
87 				prwlock->state = 0;
88 				prwlock->blocked_writers = 0;
89 				*rwlock = prwlock;
90 			}
91 		}
92 	}
93 
94 	return (ret);
95 }
96 
97 int
98 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
99 {
100 	int ret;
101 
102 	if (rwlock == NULL)
103 		ret = EINVAL;
104 	else {
105 		pthread_rwlock_t prwlock;
106 
107 		prwlock = *rwlock;
108 
109 		_pthread_mutex_destroy(&prwlock->lock);
110 		_pthread_cond_destroy(&prwlock->read_signal);
111 		_pthread_cond_destroy(&prwlock->write_signal);
112 		free(prwlock);
113 
114 		*rwlock = NULL;
115 
116 		ret = 0;
117 	}
118 	return (ret);
119 }
120 
121 static int
122 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
123 {
124 	int ret;
125 
126 	THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
127 
128 	if (*rwlock == NULL)
129 		ret = rwlock_init(rwlock, NULL);
130 	else
131 		ret = 0;
132 
133 	THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
134 
135 	return (ret);
136 }
137 
138 int
139 _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
140 {
141 	*rwlock = NULL;
142 	return (rwlock_init(rwlock, attr));
143 }
144 
145 static int
146 rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
147 {
148 	struct pthread *curthread = _get_curthread();
149 	pthread_rwlock_t prwlock;
150 	int ret;
151 
152 	if (rwlock == NULL)
153 		return (EINVAL);
154 
155 	prwlock = *rwlock;
156 
157 	/* check for static initialization */
158 	if (prwlock == NULL) {
159 		if ((ret = init_static(curthread, rwlock)) != 0)
160 			return (ret);
161 
162 		prwlock = *rwlock;
163 	}
164 
165 	/* grab the monitor lock */
166 	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
167 		return (ret);
168 
169 	/* check lock count */
170 	if (prwlock->state == MAX_READ_LOCKS) {
171 		_pthread_mutex_unlock(&prwlock->lock);
172 		return (EAGAIN);
173 	}
174 
175 	if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
176 		/*
177 		 * To avoid having to track all the rdlocks held by
178 		 * a thread or all of the threads that hold a rdlock,
179 		 * we keep a simple count of all the rdlocks held by
180 		 * a thread.  If a thread holds any rdlocks it is
181 		 * possible that it is attempting to take a recursive
182 		 * rdlock.  If there are blocked writers and precedence
183 		 * is given to them, then that would result in the thread
184 		 * deadlocking.  So allowing a thread to take the rdlock
185 		 * when it already has one or more rdlocks avoids the
186 		 * deadlock.  I hope the reader can follow that logic ;-)
187 		 */
188 		;	/* nothing needed */
189 	} else {
190 		/* give writers priority over readers */
191 		while (prwlock->blocked_writers || prwlock->state < 0) {
192 			if (abstime)
193 				ret = _pthread_cond_timedwait
194 				    (&prwlock->read_signal,
195 				    &prwlock->lock, abstime);
196 			else
197 				ret = _pthread_cond_wait(&prwlock->read_signal,
198 			    &prwlock->lock);
199 			if (ret != 0) {
200 				/* can't do a whole lot if this fails */
201 				_pthread_mutex_unlock(&prwlock->lock);
202 				return (ret);
203 			}
204 		}
205 	}
206 
207 	curthread->rdlock_count++;
208 	prwlock->state++; /* indicate we are locked for reading */
209 
210 	/*
211 	 * Something is really wrong if this call fails.  Returning
212 	 * error won't do because we've already obtained the read
213 	 * lock.  Decrementing 'state' is no good because we probably
214 	 * don't have the monitor lock.
215 	 */
216 	_pthread_mutex_unlock(&prwlock->lock);
217 
218 	return (ret);
219 }
220 
221 int
222 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
223 {
224 	return (rwlock_rdlock_common(rwlock, NULL));
225 }
226 
227 int
228 _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
229 	 const struct timespec *abstime)
230 {
231 	return (rwlock_rdlock_common(rwlock, abstime));
232 }
233 
234 int
235 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
236 {
237 	struct pthread *curthread = _get_curthread();
238 	pthread_rwlock_t prwlock;
239 	int ret;
240 
241 	if (rwlock == NULL)
242 		return (EINVAL);
243 
244 	prwlock = *rwlock;
245 
246 	/* check for static initialization */
247 	if (prwlock == NULL) {
248 		if ((ret = init_static(curthread, rwlock)) != 0)
249 			return (ret);
250 
251 		prwlock = *rwlock;
252 	}
253 
254 	/* grab the monitor lock */
255 	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
256 		return (ret);
257 
258 	if (prwlock->state == MAX_READ_LOCKS)
259 		ret = EAGAIN;
260 	else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
261 		/* see comment for pthread_rwlock_rdlock() */
262 		curthread->rdlock_count++;
263 		prwlock->state++;
264 	}
265 	/* give writers priority over readers */
266 	else if (prwlock->blocked_writers || prwlock->state < 0)
267 		ret = EBUSY;
268 	else {
269 		curthread->rdlock_count++;
270 		prwlock->state++; /* indicate we are locked for reading */
271 	}
272 
273 	/* see the comment on this in pthread_rwlock_rdlock */
274 	_pthread_mutex_unlock(&prwlock->lock);
275 
276 	return (ret);
277 }
278 
279 int
280 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
281 {
282 	struct pthread *curthread = _get_curthread();
283 	pthread_rwlock_t prwlock;
284 	int ret;
285 
286 	if (rwlock == NULL)
287 		return (EINVAL);
288 
289 	prwlock = *rwlock;
290 
291 	/* check for static initialization */
292 	if (prwlock == NULL) {
293 		if ((ret = init_static(curthread, rwlock)) != 0)
294 			return (ret);
295 
296 		prwlock = *rwlock;
297 	}
298 
299 	/* grab the monitor lock */
300 	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
301 		return (ret);
302 
303 	if (prwlock->state != 0)
304 		ret = EBUSY;
305 	else
306 		/* indicate we are locked for writing */
307 		prwlock->state = -1;
308 
309 	/* see the comment on this in pthread_rwlock_rdlock */
310 	_pthread_mutex_unlock(&prwlock->lock);
311 
312 	return (ret);
313 }
314 
315 int
316 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
317 {
318 	struct pthread *curthread = _get_curthread();
319 	pthread_rwlock_t prwlock;
320 	int ret;
321 
322 	if (rwlock == NULL)
323 		return (EINVAL);
324 
325 	prwlock = *rwlock;
326 
327 	if (prwlock == NULL)
328 		return (EINVAL);
329 
330 	/* grab the monitor lock */
331 	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
332 		return (ret);
333 
334 	if (prwlock->state > 0) {
335 		curthread->rdlock_count--;
336 		prwlock->state--;
337 		if (prwlock->state == 0 && prwlock->blocked_writers)
338 			ret = _pthread_cond_signal(&prwlock->write_signal);
339 	} else if (prwlock->state < 0) {
340 		prwlock->state = 0;
341 
342 		if (prwlock->blocked_writers)
343 			ret = _pthread_cond_signal(&prwlock->write_signal);
344 		else
345 			ret = _pthread_cond_broadcast(&prwlock->read_signal);
346 	} else
347 		ret = EINVAL;
348 
349 	/* see the comment on this in pthread_rwlock_rdlock */
350 	_pthread_mutex_unlock(&prwlock->lock);
351 
352 	return (ret);
353 }
354 
355 static int
356 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
357 {
358 	struct pthread *curthread = _get_curthread();
359 	pthread_rwlock_t prwlock;
360 	int ret;
361 
362 	if (rwlock == NULL)
363 		return (EINVAL);
364 
365 	prwlock = *rwlock;
366 
367 	/* check for static initialization */
368 	if (prwlock == NULL) {
369 		if ((ret = init_static(curthread, rwlock)) != 0)
370 			return (ret);
371 
372 		prwlock = *rwlock;
373 	}
374 
375 	/* grab the monitor lock */
376 	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
377 		return (ret);
378 
379 	while (prwlock->state != 0) {
380 		prwlock->blocked_writers++;
381 
382 		if (abstime != NULL)
383 			ret = _pthread_cond_timedwait(&prwlock->write_signal,
384 			    &prwlock->lock, abstime);
385 		else
386 			ret = _pthread_cond_wait(&prwlock->write_signal,
387 			    &prwlock->lock);
388 		if (ret != 0) {
389 			prwlock->blocked_writers--;
390 			_pthread_mutex_unlock(&prwlock->lock);
391 			return (ret);
392 		}
393 
394 		prwlock->blocked_writers--;
395 	}
396 
397 	/* indicate we are locked for writing */
398 	prwlock->state = -1;
399 
400 	/* see the comment on this in pthread_rwlock_rdlock */
401 	_pthread_mutex_unlock(&prwlock->lock);
402 
403 	return (ret);
404 }
405 
406 int
407 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
408 {
409 	return (rwlock_wrlock_common (rwlock, NULL));
410 }
411 
412 int
413 _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
414     const struct timespec *abstime)
415 {
416 	return (rwlock_wrlock_common (rwlock, abstime));
417 }
418