xref: /freebsd/lib/libthr/thread/thr_rwlock.c (revision d37ea99837e6ad50837fd9fe1771ddf1c3ba6002)
1 /*-
2  * Copyright (c) 1998 Alex Nash
3  * Copyright (c) 2004 Michael Telahun Makonnen
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29 
30 #include <errno.h>
31 #include <limits.h>
32 #include <stdlib.h>
33 
34 #include <pthread.h>
35 #include "thr_private.h"
36 
37 /* maximum number of times a read lock may be obtained */
38 #define	MAX_READ_LOCKS		(INT_MAX - 1)
39 
40 /*
41  * For distinguishing operations on read and write locks.
42  */
43 enum rwlock_type {RWT_READ, RWT_WRITE};
44 
45 /* Support for staticaly initialized mutexes. */
46 static struct umtx init_lock = UMTX_INITIALIZER;
47 
48 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
49 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
50 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
51 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
52 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
53 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
54 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
55 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
56 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
57 
58 static int	insert_rwlock(struct pthread_rwlock *, enum rwlock_type);
59 static int	rwlock_init_static(struct pthread_rwlock **rwlock);
60 static int	rwlock_rdlock_common(pthread_rwlock_t *, int,
61 		    const struct timespec *);
62 static int	rwlock_wrlock_common(pthread_rwlock_t *, int,
63 		    const struct timespec *);
64 
65 int
66 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
67 {
68 	pthread_rwlock_t prwlock;
69 
70 	if (rwlock == NULL || *rwlock == NULL)
71 		return (EINVAL);
72 
73 	prwlock = *rwlock;
74 
75 	if (prwlock->state != 0)
76 		return (EBUSY);
77 
78 	pthread_mutex_destroy(&prwlock->lock);
79 	pthread_cond_destroy(&prwlock->read_signal);
80 	pthread_cond_destroy(&prwlock->write_signal);
81 	free(prwlock);
82 
83 	*rwlock = NULL;
84 
85 	return (0);
86 }
87 
88 int
89 _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
90 {
91 	pthread_rwlock_t	prwlock;
92 	int			ret;
93 
94 	/* allocate rwlock object */
95 	prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
96 
97 	if (prwlock == NULL) {
98 		ret = ENOMEM;
99 		goto out;
100 	}
101 
102 	/* initialize the lock */
103 	if ((ret = pthread_mutex_init(&prwlock->lock, NULL)) != 0)
104 		goto out;
105 
106 	/* initialize the read condition signal */
107 	if ((ret = pthread_cond_init(&prwlock->read_signal, NULL)) != 0)
108 		goto out_readcond;
109 
110 	/* initialize the write condition signal */
111 	if ((ret = pthread_cond_init(&prwlock->write_signal, NULL)) != 0)
112 		goto out_writecond;
113 
114 	/* success */
115 	prwlock->state		 = 0;
116 	prwlock->blocked_writers = 0;
117 
118 	*rwlock = prwlock;
119 	return (0);
120 
121 out_writecond:
122 	pthread_cond_destroy(&prwlock->read_signal);
123 out_readcond:
124 	pthread_mutex_destroy(&prwlock->lock);
125 out:
126 	if (prwlock != NULL)
127 		free(prwlock);
128 	return(ret);
129 }
130 
131 /*
132  * If nonblocking is 0 this function will wait on the lock. If
133  * it is greater than 0 it will return immediately with EBUSY.
134  */
135 static int
136 rwlock_rdlock_common(pthread_rwlock_t *rwlock, int nonblocking,
137     const struct timespec *timeout)
138 {
139 	struct rwlock_held	*rh;
140 	pthread_rwlock_t 	prwlock;
141 	int			ret;
142 
143 	rh = NULL;
144 	if (rwlock == NULL)
145 		return(EINVAL);
146 
147 	/*
148 	 * Check for validity of the timeout parameter.
149 	 */
150 	if (timeout != NULL &&
151 	    (timeout->tv_nsec < 0 || timeout->tv_nsec >= 1000000000))
152 		return (EINVAL);
153 
154 	if ((ret = rwlock_init_static(rwlock)) !=0 )
155 		return (ret);
156 	prwlock = *rwlock;
157 
158 	/* grab the monitor lock */
159 	if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
160 		return(ret);
161 
162 	/* check lock count */
163 	if (prwlock->state == MAX_READ_LOCKS) {
164 		pthread_mutex_unlock(&prwlock->lock);
165 		return (EAGAIN);
166 	}
167 
168 	/* give writers priority over readers */
169 	while (prwlock->blocked_writers || prwlock->state < 0) {
170 		if (nonblocking) {
171 			pthread_mutex_unlock(&prwlock->lock);
172 			return (EBUSY);
173 		}
174 
175 		/*
176 		 * If this lock is already held for writing we have
177 		 * a deadlock situation.
178 		 */
179 		if (curthread->rwlockList != NULL && prwlock->state < 0) {
180 			LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
181 				if (rh->rh_rwlock == prwlock &&
182 				    rh->rh_wrcount > 0) {
183 					pthread_mutex_unlock(&prwlock->lock);
184 					return (EDEADLK);
185 				}
186 			}
187 		}
188 		if (timeout == NULL)
189 			ret = pthread_cond_wait(&prwlock->read_signal,
190 			    &prwlock->lock);
191 		else
192 			ret = pthread_cond_timedwait(&prwlock->read_signal,
193 			    &prwlock->lock, timeout);
194 
195 		if (ret != 0 && ret != EINTR) {
196 			/* can't do a whole lot if this fails */
197 			pthread_mutex_unlock(&prwlock->lock);
198 			return(ret);
199 		}
200 	}
201 
202 	++prwlock->state; /* indicate we are locked for reading */
203 	ret = insert_rwlock(prwlock, RWT_READ);
204 	if (ret != 0) {
205 		pthread_mutex_unlock(&prwlock->lock);
206 		return (ret);
207 	}
208 
209 	/*
210 	 * Something is really wrong if this call fails.  Returning
211 	 * error won't do because we've already obtained the read
212 	 * lock.  Decrementing 'state' is no good because we probably
213 	 * don't have the monitor lock.
214 	 */
215 	pthread_mutex_unlock(&prwlock->lock);
216 
217 	return(0);
218 }
219 
220 int
221 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
222 {
223 	return (rwlock_rdlock_common(rwlock, 0, NULL));
224 }
225 
226 int
227 _pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock,
228     const struct timespec *timeout)
229 {
230 	return (rwlock_rdlock_common(rwlock, 0, timeout));
231 }
232 
233 int
234 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
235 {
236 	return (rwlock_rdlock_common(rwlock, 1, NULL));
237 }
238 
239 int
240 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
241 {
242 	struct rwlock_held	*rh;
243 	pthread_rwlock_t 	prwlock;
244 	int			ret;
245 
246 	rh = NULL;
247 	if (rwlock == NULL || *rwlock == NULL)
248 		return(EINVAL);
249 
250 	prwlock = *rwlock;
251 
252 	/* grab the monitor lock */
253 	if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
254 		return(ret);
255 
256 	if (curthread->rwlockList != NULL) {
257 		LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
258 			if (rh->rh_rwlock == prwlock)
259 				break;
260 		}
261 	}
262 	if (rh == NULL) {
263 		ret = EPERM;
264 		goto out;
265 	}
266 	if (prwlock->state > 0) {
267 		rh->rh_rdcount--;
268 		if (rh->rh_rdcount == 0) {
269 			LIST_REMOVE(rh, rh_link);
270 			free(rh);
271 		}
272 		if (--prwlock->state == 0 && prwlock->blocked_writers)
273 			ret = pthread_cond_signal(&prwlock->write_signal);
274 	} else if (prwlock->state < 0) {
275 		rh->rh_wrcount--;
276 		if (rh->rh_wrcount == 0) {
277 			LIST_REMOVE(rh, rh_link);
278 			free(rh);
279 		}
280 		prwlock->state = 0;
281 		if (prwlock->blocked_writers)
282 			ret = pthread_cond_signal(&prwlock->write_signal);
283 		else
284 			ret = pthread_cond_broadcast(&prwlock->read_signal);
285 	} else {
286 		/*
287 		 * No thread holds this lock. We should never get here.
288 		 */
289 		PTHREAD_ASSERT(0, "state=0 on read-write lock held by thread");
290 		ret = EPERM;
291 		goto out;
292 	}
293 
294 out:
295 	/* see the comment on this in rwlock_rdlock_common */
296 	pthread_mutex_unlock(&prwlock->lock);
297 
298 	return(ret);
299 }
300 
301 int
302 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
303 {
304 	return (rwlock_wrlock_common(rwlock, 0, NULL));
305 }
306 
307 int
308 _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
309     const struct timespec *timeout)
310 {
311 	return (rwlock_wrlock_common(rwlock, 0, timeout));
312 }
313 
314 int
315 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
316 {
317 	return (rwlock_wrlock_common(rwlock, 1, NULL));
318 }
319 
320 /*
321  * If nonblocking is 0 this function will wait on the lock. If
322  * it is greater than 0 it will return immediately with EBUSY.
323  */
324 static int
325 rwlock_wrlock_common(pthread_rwlock_t *rwlock, int nonblocking,
326     const struct timespec *timeout)
327 {
328 	struct rwlock_held	*rh;
329 	pthread_rwlock_t 	prwlock;
330 	int			ret;
331 
332 	rh = NULL;
333 	if (rwlock == NULL)
334 		return(EINVAL);
335 
336 	/*
337 	 * Check the timeout value for validity.
338 	 */
339 	if (timeout != NULL &&
340 	    (timeout->tv_nsec < 0 || timeout->tv_nsec >= 1000000000))
341 		return (EINVAL);
342 
343 	if ((ret = rwlock_init_static(rwlock)) !=0 )
344 		return (ret);
345 	prwlock = *rwlock;
346 
347 	/* grab the monitor lock */
348 	if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
349 		return(ret);
350 
351 	while (prwlock->state != 0) {
352 		if (nonblocking) {
353 			pthread_mutex_unlock(&prwlock->lock);
354 			return (EBUSY);
355 		}
356 
357 		/*
358 		 * If this thread already holds the lock for reading
359 		 * or writing we have a deadlock situation.
360 		 */
361 		if (curthread->rwlockList != NULL) {
362 			LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
363 				if (rh->rh_rwlock == prwlock &&
364 				    (rh->rh_rdcount > 0 || rh->rh_wrcount > 0)) {
365 					pthread_mutex_unlock(&prwlock->lock);
366 					return (EDEADLK);
367 					break;
368 				}
369 			}
370 		}
371 
372 		++prwlock->blocked_writers;
373 
374 		if (timeout == NULL)
375 			ret = pthread_cond_wait(&prwlock->write_signal,
376 			    &prwlock->lock);
377 		else
378 			ret = pthread_cond_timedwait(&prwlock->write_signal,
379 			    &prwlock->lock, timeout);
380 
381 		if (ret != 0 && ret != EINTR) {
382 			--prwlock->blocked_writers;
383 			pthread_mutex_unlock(&prwlock->lock);
384 			return(ret);
385 		}
386 
387 		--prwlock->blocked_writers;
388 	}
389 
390 	/* indicate we are locked for writing */
391 	prwlock->state = -1;
392 	ret = insert_rwlock(prwlock, RWT_WRITE);
393 	if (ret != 0) {
394 		pthread_mutex_unlock(&prwlock->lock);
395 		return (ret);
396 	}
397 
398 	/* see the comment on this in pthread_rwlock_rdlock */
399 	pthread_mutex_unlock(&prwlock->lock);
400 
401 	return(0);
402 }
403 
404 static int
405 insert_rwlock(struct pthread_rwlock *prwlock, enum rwlock_type rwt)
406 {
407 	struct rwlock_held *rh;
408 
409 	/*
410 	 * Initialize the rwlock list in the thread. Although this function
411 	 * may be called for many read-write locks, the initialization
412 	 * of the the head happens only once during the lifetime of
413 	 * the thread.
414 	 */
415 	if (curthread->rwlockList == NULL) {
416 		curthread->rwlockList =
417 		    (struct rwlock_listhead *)malloc(sizeof(struct rwlock_listhead));
418 		if (curthread->rwlockList == NULL) {
419 			return (ENOMEM);
420 		}
421 		LIST_INIT(curthread->rwlockList);
422 	}
423 
424 	LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
425 		if (rh->rh_rwlock == prwlock) {
426 			if (rwt == RWT_READ)
427 				rh->rh_rdcount++;
428 			else if (rwt == RWT_WRITE)
429 				rh->rh_wrcount++;
430 			return (0);
431 		}
432 	}
433 
434 	/*
435 	 * This is the first time we're holding this lock,
436 	 * create a new entry.
437 	 */
438 	rh = (struct rwlock_held *)malloc(sizeof(struct rwlock_held));
439 	if (rh == NULL)
440 		return (ENOMEM);
441 	rh->rh_rwlock = prwlock;
442 	rh->rh_rdcount = 0;
443 	rh->rh_wrcount = 0;
444 	if (rwt == RWT_READ)
445 		rh->rh_rdcount = 1;
446 	else if (rwt == RWT_WRITE)
447 		rh->rh_wrcount = 1;
448 	LIST_INSERT_HEAD(curthread->rwlockList, rh, rh_link);
449 	return (0);
450 }
451 
452 /*
453  * There are consumers of rwlocks, inluding our own libc, that depend on
454  * a PTHREAD_RWLOCK_INITIALIZER to do for rwlocks what
455  * a similarly named symbol does for statically initialized mutexes.
456  * This symbol was dropped in The Open Group Base Specifications Issue 6
457  * and does not exist in IEEE Std 1003.1, 2003, but it should still be
458  * supported for backwards compatibility.
459  */
460 static int
461 rwlock_init_static(struct pthread_rwlock **rwlock)
462 {
463 	int error;
464 
465 	/*
466 	 * The initial check is done without locks to not
467 	 * pessimize the common path.
468 	 */
469 	error = 0;
470 	if (*rwlock == PTHREAD_RWLOCK_INITIALIZER) {
471 		UMTX_LOCK(&init_lock);
472 		if (*rwlock == PTHREAD_RWLOCK_INITIALIZER)
473 			error = _pthread_rwlock_init(rwlock, NULL);
474 		UMTX_UNLOCK(&init_lock);
475 	}
476 	return (error);
477 }
478