xref: /freebsd/lib/libthr/thread/thr_rwlock.c (revision 012e32c66529ba74a80b7a02da6c983d5a2c2f70)
1bb535300SJeff Roberson /*-
24d846d26SWarner Losh  * SPDX-License-Identifier: BSD-2-Clause
35e53a4f9SPedro F. Giffuni  *
4bb535300SJeff Roberson  * Copyright (c) 1998 Alex Nash
5bb535300SJeff Roberson  * All rights reserved.
6bb535300SJeff Roberson  *
7bb535300SJeff Roberson  * Redistribution and use in source and binary forms, with or without
8bb535300SJeff Roberson  * modification, are permitted provided that the following conditions
9bb535300SJeff Roberson  * are met:
10bb535300SJeff Roberson  * 1. Redistributions of source code must retain the above copyright
11bb535300SJeff Roberson  *    notice, this list of conditions and the following disclaimer.
12bb535300SJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
13bb535300SJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
14bb535300SJeff Roberson  *    documentation and/or other materials provided with the distribution.
15bb535300SJeff Roberson  *
16bb535300SJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17bb535300SJeff Roberson  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18bb535300SJeff Roberson  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19bb535300SJeff Roberson  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20bb535300SJeff Roberson  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21bb535300SJeff Roberson  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22bb535300SJeff Roberson  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23bb535300SJeff Roberson  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24bb535300SJeff Roberson  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25bb535300SJeff Roberson  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26bb535300SJeff Roberson  * SUCH DAMAGE.
27bb535300SJeff Roberson  */
28bb535300SJeff Roberson 
29bb535300SJeff Roberson #include <errno.h>
30bb535300SJeff Roberson #include <limits.h>
31bb535300SJeff Roberson #include <stdlib.h>
32a6c0d801SGreg Becker #include <string.h>
33bb535300SJeff Roberson 
34a091d823SDavid Xu #include "namespace.h"
35bb535300SJeff Roberson #include <pthread.h>
36a091d823SDavid Xu #include "un-namespace.h"
37bb535300SJeff Roberson #include "thr_private.h"
38bb535300SJeff Roberson 
39c7904405SAndrew Turner _Static_assert(sizeof(struct pthread_rwlock) <= THR_PAGE_SIZE_MIN,
409e821f27SKonstantin Belousov     "pthread_rwlock is too large for off-page");
419e821f27SKonstantin Belousov 
420ab1bfc7SKonstantin Belousov __weak_reference(_thr_rwlock_destroy, pthread_rwlock_destroy);
430ab1bfc7SKonstantin Belousov __weak_reference(_thr_rwlock_destroy, _pthread_rwlock_destroy);
440ab1bfc7SKonstantin Belousov __weak_reference(_thr_rwlock_init, pthread_rwlock_init);
450ab1bfc7SKonstantin Belousov __weak_reference(_thr_rwlock_init, _pthread_rwlock_init);
460ab1bfc7SKonstantin Belousov __weak_reference(_Tthr_rwlock_rdlock, pthread_rwlock_rdlock);
470ab1bfc7SKonstantin Belousov __weak_reference(_Tthr_rwlock_rdlock, _pthread_rwlock_rdlock);
4832eaa7ddSMike Makonnen __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
490ab1bfc7SKonstantin Belousov __weak_reference(_Tthr_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
500ab1bfc7SKonstantin Belousov __weak_reference(_Tthr_rwlock_tryrdlock, _pthread_rwlock_tryrdlock);
510ab1bfc7SKonstantin Belousov __weak_reference(_Tthr_rwlock_trywrlock, pthread_rwlock_trywrlock);
520ab1bfc7SKonstantin Belousov __weak_reference(_Tthr_rwlock_trywrlock, _pthread_rwlock_trywrlock);
530ab1bfc7SKonstantin Belousov __weak_reference(_Tthr_rwlock_unlock, pthread_rwlock_unlock);
540ab1bfc7SKonstantin Belousov __weak_reference(_Tthr_rwlock_unlock, _pthread_rwlock_unlock);
550ab1bfc7SKonstantin Belousov __weak_reference(_Tthr_rwlock_wrlock, pthread_rwlock_wrlock);
560ab1bfc7SKonstantin Belousov __weak_reference(_Tthr_rwlock_wrlock, _pthread_rwlock_wrlock);
57a091d823SDavid Xu __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
58bb535300SJeff Roberson 
59048ad6aeSEric van Gyzen static int init_static(struct pthread *thread, pthread_rwlock_t *rwlock);
60048ad6aeSEric van Gyzen static int init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out);
61048ad6aeSEric van Gyzen 
62*012e32c6SRyan Libby static __always_inline int
check_and_init_rwlock(pthread_rwlock_t * rwlock,pthread_rwlock_t * rwlock_out)63048ad6aeSEric van Gyzen check_and_init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
64048ad6aeSEric van Gyzen {
65048ad6aeSEric van Gyzen 	if (__predict_false(*rwlock == THR_PSHARED_PTR ||
66048ad6aeSEric van Gyzen 	    *rwlock <= THR_RWLOCK_DESTROYED))
67048ad6aeSEric van Gyzen 		return (init_rwlock(rwlock, rwlock_out));
68048ad6aeSEric van Gyzen 	*rwlock_out = *rwlock;
69048ad6aeSEric van Gyzen 	return (0);
70bbb64c21SDavid Xu }
71bbb64c21SDavid Xu 
72048ad6aeSEric van Gyzen static int __noinline
init_rwlock(pthread_rwlock_t * rwlock,pthread_rwlock_t * rwlock_out)73048ad6aeSEric van Gyzen init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
74048ad6aeSEric van Gyzen {
75048ad6aeSEric van Gyzen 	pthread_rwlock_t prwlock;
76048ad6aeSEric van Gyzen 	int ret;
77048ad6aeSEric van Gyzen 
78048ad6aeSEric van Gyzen 	if (*rwlock == THR_PSHARED_PTR) {
79048ad6aeSEric van Gyzen 		prwlock = __thr_pshared_offpage(rwlock, 0);
80048ad6aeSEric van Gyzen 		if (prwlock == NULL)
81048ad6aeSEric van Gyzen 			return (EINVAL);
82048ad6aeSEric van Gyzen 	} else if ((prwlock = *rwlock) <= THR_RWLOCK_DESTROYED) {
83048ad6aeSEric van Gyzen 		if (prwlock == THR_RWLOCK_INITIALIZER) {
84048ad6aeSEric van Gyzen 			ret = init_static(_get_curthread(), rwlock);
85048ad6aeSEric van Gyzen 			if (ret != 0)
86048ad6aeSEric van Gyzen 				return (ret);
87048ad6aeSEric van Gyzen 		} else if (prwlock == THR_RWLOCK_DESTROYED) {
88048ad6aeSEric van Gyzen 			return (EINVAL);
89048ad6aeSEric van Gyzen 		}
90048ad6aeSEric van Gyzen 		prwlock = *rwlock;
91048ad6aeSEric van Gyzen 	}
92048ad6aeSEric van Gyzen 	*rwlock_out = prwlock;
93048ad6aeSEric van Gyzen 	return (0);
94048ad6aeSEric van Gyzen }
95bb535300SJeff Roberson 
96a091d823SDavid Xu static int
rwlock_init(pthread_rwlock_t * rwlock,const pthread_rwlockattr_t * attr)971bdbd705SKonstantin Belousov rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
98bb535300SJeff Roberson {
99bb535300SJeff Roberson 	pthread_rwlock_t prwlock;
100bb535300SJeff Roberson 
1011bdbd705SKonstantin Belousov 	if (attr == NULL || *attr == NULL ||
1021bdbd705SKonstantin Belousov 	    (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
103a6c0d801SGreg Becker 		prwlock = aligned_alloc(CACHE_LINE_SIZE,
104a6c0d801SGreg Becker 		    roundup(sizeof(struct pthread_rwlock), CACHE_LINE_SIZE));
105a091d823SDavid Xu 		if (prwlock == NULL)
106a091d823SDavid Xu 			return (ENOMEM);
107a6c0d801SGreg Becker 		memset(prwlock, 0, sizeof(struct pthread_rwlock));
108bb535300SJeff Roberson 		*rwlock = prwlock;
1091bdbd705SKonstantin Belousov 	} else {
1101bdbd705SKonstantin Belousov 		prwlock = __thr_pshared_offpage(rwlock, 1);
1111bdbd705SKonstantin Belousov 		if (prwlock == NULL)
1121bdbd705SKonstantin Belousov 			return (EFAULT);
1131bdbd705SKonstantin Belousov 		prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED;
1141bdbd705SKonstantin Belousov 		*rwlock = THR_PSHARED_PTR;
1151bdbd705SKonstantin Belousov 	}
1168bf1a48cSDavid Xu 	return (0);
117bb535300SJeff Roberson }
118bb535300SJeff Roberson 
119a091d823SDavid Xu int
_thr_rwlock_destroy(pthread_rwlock_t * rwlock)1200ab1bfc7SKonstantin Belousov _thr_rwlock_destroy(pthread_rwlock_t *rwlock)
121bb535300SJeff Roberson {
122bbb64c21SDavid Xu 	pthread_rwlock_t prwlock;
123a091d823SDavid Xu 	int ret;
124a091d823SDavid Xu 
125bbb64c21SDavid Xu 	prwlock = *rwlock;
126bbb64c21SDavid Xu 	if (prwlock == THR_RWLOCK_INITIALIZER)
127bbb64c21SDavid Xu 		ret = 0;
128bbb64c21SDavid Xu 	else if (prwlock == THR_RWLOCK_DESTROYED)
129a091d823SDavid Xu 		ret = EINVAL;
1301bdbd705SKonstantin Belousov 	else if (prwlock == THR_PSHARED_PTR) {
131bbb64c21SDavid Xu 		*rwlock = THR_RWLOCK_DESTROYED;
1321bdbd705SKonstantin Belousov 		__thr_pshared_destroy(rwlock);
1331bdbd705SKonstantin Belousov 		ret = 0;
1341bdbd705SKonstantin Belousov 	} else {
1351bdbd705SKonstantin Belousov 		*rwlock = THR_RWLOCK_DESTROYED;
136a091d823SDavid Xu 		free(prwlock);
137a091d823SDavid Xu 		ret = 0;
138a091d823SDavid Xu 	}
139a091d823SDavid Xu 	return (ret);
140a091d823SDavid Xu }
141a091d823SDavid Xu 
142a091d823SDavid Xu static int
init_static(struct pthread * thread,pthread_rwlock_t * rwlock)143a091d823SDavid Xu init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
144a091d823SDavid Xu {
145a091d823SDavid Xu 	int ret;
146a091d823SDavid Xu 
147a091d823SDavid Xu 	THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
148a091d823SDavid Xu 
149bbb64c21SDavid Xu 	if (*rwlock == THR_RWLOCK_INITIALIZER)
150a091d823SDavid Xu 		ret = rwlock_init(rwlock, NULL);
151a091d823SDavid Xu 	else
152a091d823SDavid Xu 		ret = 0;
153a091d823SDavid Xu 
154a091d823SDavid Xu 	THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
155a091d823SDavid Xu 
156a091d823SDavid Xu 	return (ret);
157a091d823SDavid Xu }
158a091d823SDavid Xu 
159a091d823SDavid Xu int
_thr_rwlock_init(pthread_rwlock_t * rwlock,const pthread_rwlockattr_t * attr)1600ab1bfc7SKonstantin Belousov _thr_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
161a091d823SDavid Xu {
1621bdbd705SKonstantin Belousov 
163ad056b5dSKonstantin Belousov 	_thr_check_init();
164a091d823SDavid Xu 	*rwlock = NULL;
165a091d823SDavid Xu 	return (rwlock_init(rwlock, attr));
166a091d823SDavid Xu }
167a091d823SDavid Xu 
168a091d823SDavid Xu static int
rwlock_rdlock_common(pthread_rwlock_t * rwlock,const struct timespec * abstime)169a091d823SDavid Xu rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
170a091d823SDavid Xu {
171a091d823SDavid Xu 	struct pthread *curthread = _get_curthread();
172bb535300SJeff Roberson 	pthread_rwlock_t prwlock;
1738bf1a48cSDavid Xu 	int flags;
1748bf1a48cSDavid Xu 	int ret;
175bb535300SJeff Roberson 
176048ad6aeSEric van Gyzen 	ret = check_and_init_rwlock(rwlock, &prwlock);
177048ad6aeSEric van Gyzen 	if (ret != 0)
178048ad6aeSEric van Gyzen 		return (ret);
179a091d823SDavid Xu 
1808bf1a48cSDavid Xu 	if (curthread->rdlock_count) {
181a091d823SDavid Xu 		/*
182a091d823SDavid Xu 		 * To avoid having to track all the rdlocks held by
183a091d823SDavid Xu 		 * a thread or all of the threads that hold a rdlock,
184a091d823SDavid Xu 		 * we keep a simple count of all the rdlocks held by
185a091d823SDavid Xu 		 * a thread.  If a thread holds any rdlocks it is
186a091d823SDavid Xu 		 * possible that it is attempting to take a recursive
187a091d823SDavid Xu 		 * rdlock.  If there are blocked writers and precedence
188a091d823SDavid Xu 		 * is given to them, then that would result in the thread
189a091d823SDavid Xu 		 * deadlocking.  So allowing a thread to take the rdlock
190a091d823SDavid Xu 		 * when it already has one or more rdlocks avoids the
191a091d823SDavid Xu 		 * deadlock.  I hope the reader can follow that logic ;-)
192a091d823SDavid Xu 		 */
1938bf1a48cSDavid Xu 		flags = URWLOCK_PREFER_READER;
1945ab512bbSDavid Xu 	} else {
1958bf1a48cSDavid Xu 		flags = 0;
196a091d823SDavid Xu 	}
197a091d823SDavid Xu 
1988bf1a48cSDavid Xu 	/*
1998bf1a48cSDavid Xu 	 * POSIX said the validity of the abstimeout parameter need
2008bf1a48cSDavid Xu 	 * not be checked if the lock can be immediately acquired.
2018bf1a48cSDavid Xu 	 */
2028bf1a48cSDavid Xu 	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
2038bf1a48cSDavid Xu 	if (ret == 0) {
204a091d823SDavid Xu 		curthread->rdlock_count++;
2058bf1a48cSDavid Xu 		return (ret);
2065ab512bbSDavid Xu 	}
207bb535300SJeff Roberson 
2088bf1a48cSDavid Xu 	if (__predict_false(abstime &&
2098bf1a48cSDavid Xu 		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
2108bf1a48cSDavid Xu 		return (EINVAL);
2118bf1a48cSDavid Xu 
2128bf1a48cSDavid Xu 	for (;;) {
2138bf1a48cSDavid Xu 		/* goto kernel and lock it */
21424c20949SDavid Xu 		ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime);
2158bf1a48cSDavid Xu 		if (ret != EINTR)
2165ab512bbSDavid Xu 			break;
2178bf1a48cSDavid Xu 
2188bf1a48cSDavid Xu 		/* if interrupted, try to lock it in userland again. */
2198bf1a48cSDavid Xu 		if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
2208bf1a48cSDavid Xu 			ret = 0;
2218bf1a48cSDavid Xu 			break;
2228bf1a48cSDavid Xu 		}
2235ab512bbSDavid Xu 	}
224137ae5d2SAttilio Rao 	if (ret == 0)
225137ae5d2SAttilio Rao 		curthread->rdlock_count++;
226a091d823SDavid Xu 	return (ret);
22714f8ddcdSMike Makonnen }
22814f8ddcdSMike Makonnen 
22914f8ddcdSMike Makonnen int
_Tthr_rwlock_rdlock(pthread_rwlock_t * rwlock)2300ab1bfc7SKonstantin Belousov _Tthr_rwlock_rdlock(pthread_rwlock_t *rwlock)
2311baa6473SMike Makonnen {
232ad056b5dSKonstantin Belousov 	_thr_check_init();
233a091d823SDavid Xu 	return (rwlock_rdlock_common(rwlock, NULL));
234bb535300SJeff Roberson }
235bb535300SJeff Roberson 
236bb535300SJeff Roberson int
_pthread_rwlock_timedrdlock(pthread_rwlock_t * __restrict rwlock,const struct timespec * __restrict abstime)237b6413b6dSPedro F. Giffuni _pthread_rwlock_timedrdlock(pthread_rwlock_t * __restrict rwlock,
238b6413b6dSPedro F. Giffuni     const struct timespec * __restrict abstime)
2391baa6473SMike Makonnen {
240ad056b5dSKonstantin Belousov 	_thr_check_init();
241a091d823SDavid Xu 	return (rwlock_rdlock_common(rwlock, abstime));
242bb535300SJeff Roberson }
243bb535300SJeff Roberson 
244bb535300SJeff Roberson int
_Tthr_rwlock_tryrdlock(pthread_rwlock_t * rwlock)2450ab1bfc7SKonstantin Belousov _Tthr_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
2461baa6473SMike Makonnen {
247ad056b5dSKonstantin Belousov 	struct pthread *curthread;
248a091d823SDavid Xu 	pthread_rwlock_t prwlock;
2498bf1a48cSDavid Xu 	int flags;
250a091d823SDavid Xu 	int ret;
251a091d823SDavid Xu 
252ad056b5dSKonstantin Belousov 	_thr_check_init();
253048ad6aeSEric van Gyzen 	ret = check_and_init_rwlock(rwlock, &prwlock);
254048ad6aeSEric van Gyzen 	if (ret != 0)
255048ad6aeSEric van Gyzen 		return (ret);
256a091d823SDavid Xu 
257ad056b5dSKonstantin Belousov 	curthread = _get_curthread();
2588bf1a48cSDavid Xu 	if (curthread->rdlock_count) {
2598bf1a48cSDavid Xu 		/*
2608bf1a48cSDavid Xu 		 * To avoid having to track all the rdlocks held by
2618bf1a48cSDavid Xu 		 * a thread or all of the threads that hold a rdlock,
2628bf1a48cSDavid Xu 		 * we keep a simple count of all the rdlocks held by
2638bf1a48cSDavid Xu 		 * a thread.  If a thread holds any rdlocks it is
2648bf1a48cSDavid Xu 		 * possible that it is attempting to take a recursive
2658bf1a48cSDavid Xu 		 * rdlock.  If there are blocked writers and precedence
2668bf1a48cSDavid Xu 		 * is given to them, then that would result in the thread
2678bf1a48cSDavid Xu 		 * deadlocking.  So allowing a thread to take the rdlock
2688bf1a48cSDavid Xu 		 * when it already has one or more rdlocks avoids the
2698bf1a48cSDavid Xu 		 * deadlock.  I hope the reader can follow that logic ;-)
2708bf1a48cSDavid Xu 		 */
2718bf1a48cSDavid Xu 		flags = URWLOCK_PREFER_READER;
2728bf1a48cSDavid Xu 	} else {
2738bf1a48cSDavid Xu 		flags = 0;
2748bf1a48cSDavid Xu 	}
2758bf1a48cSDavid Xu 
2768bf1a48cSDavid Xu 	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
2775ab512bbSDavid Xu 	if (ret == 0)
2785ab512bbSDavid Xu 		curthread->rdlock_count++;
279a091d823SDavid Xu 	return (ret);
280a091d823SDavid Xu }
281a091d823SDavid Xu 
282a091d823SDavid Xu int
_Tthr_rwlock_trywrlock(pthread_rwlock_t * rwlock)2830ab1bfc7SKonstantin Belousov _Tthr_rwlock_trywrlock(pthread_rwlock_t *rwlock)
284a091d823SDavid Xu {
285ad056b5dSKonstantin Belousov 	struct pthread *curthread;
286a091d823SDavid Xu 	pthread_rwlock_t prwlock;
287a091d823SDavid Xu 	int ret;
288a091d823SDavid Xu 
289ad056b5dSKonstantin Belousov 	_thr_check_init();
290048ad6aeSEric van Gyzen 	ret = check_and_init_rwlock(rwlock, &prwlock);
291048ad6aeSEric van Gyzen 	if (ret != 0)
292048ad6aeSEric van Gyzen 		return (ret);
293a091d823SDavid Xu 
294ad056b5dSKonstantin Belousov 	curthread = _get_curthread();
2958bf1a48cSDavid Xu 	ret = _thr_rwlock_trywrlock(&prwlock->lock);
2965ab512bbSDavid Xu 	if (ret == 0)
2971bdbd705SKonstantin Belousov 		prwlock->owner = TID(curthread);
298a091d823SDavid Xu 	return (ret);
2990feabab5SMike Makonnen }
300bb535300SJeff Roberson 
301a091d823SDavid Xu static int
rwlock_wrlock_common(pthread_rwlock_t * rwlock,const struct timespec * abstime)302a091d823SDavid Xu rwlock_wrlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
303a091d823SDavid Xu {
304a091d823SDavid Xu 	struct pthread *curthread = _get_curthread();
305a091d823SDavid Xu 	pthread_rwlock_t prwlock;
306a091d823SDavid Xu 	int ret;
307a091d823SDavid Xu 
308048ad6aeSEric van Gyzen 	ret = check_and_init_rwlock(rwlock, &prwlock);
309048ad6aeSEric van Gyzen 	if (ret != 0)
310048ad6aeSEric van Gyzen 		return (ret);
311a091d823SDavid Xu 
3125ab512bbSDavid Xu 	/*
3135ab512bbSDavid Xu 	 * POSIX said the validity of the abstimeout parameter need
3145ab512bbSDavid Xu 	 * not be checked if the lock can be immediately acquired.
3155ab512bbSDavid Xu 	 */
3168bf1a48cSDavid Xu 	ret = _thr_rwlock_trywrlock(&prwlock->lock);
3175ab512bbSDavid Xu 	if (ret == 0) {
3181bdbd705SKonstantin Belousov 		prwlock->owner = TID(curthread);
3195ab512bbSDavid Xu 		return (ret);
3205ab512bbSDavid Xu 	}
3215ab512bbSDavid Xu 
3225ab512bbSDavid Xu 	if (__predict_false(abstime &&
3235ab512bbSDavid Xu 	    (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
3245ab512bbSDavid Xu 		return (EINVAL);
3255ab512bbSDavid Xu 
3265ab512bbSDavid Xu 	for (;;) {
3278bf1a48cSDavid Xu 		/* goto kernel and lock it */
32824c20949SDavid Xu 		ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
3298bf1a48cSDavid Xu 		if (ret == 0) {
3301bdbd705SKonstantin Belousov 			prwlock->owner = TID(curthread);
3315ab512bbSDavid Xu 			break;
3325ab512bbSDavid Xu 		}
3335ab512bbSDavid Xu 
3348bf1a48cSDavid Xu 		if (ret != EINTR)
3355ab512bbSDavid Xu 			break;
3368bf1a48cSDavid Xu 
3378bf1a48cSDavid Xu 		/* if interrupted, try to lock it in userland again. */
3388bf1a48cSDavid Xu 		if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
3398bf1a48cSDavid Xu 			ret = 0;
3401bdbd705SKonstantin Belousov 			prwlock->owner = TID(curthread);
3418bf1a48cSDavid Xu 			break;
3428bf1a48cSDavid Xu 		}
3435ab512bbSDavid Xu 	}
344bb535300SJeff Roberson 	return (ret);
345bb535300SJeff Roberson }
346bb535300SJeff Roberson 
347bb535300SJeff Roberson int
_Tthr_rwlock_wrlock(pthread_rwlock_t * rwlock)3480ab1bfc7SKonstantin Belousov _Tthr_rwlock_wrlock(pthread_rwlock_t *rwlock)
3491baa6473SMike Makonnen {
350ad056b5dSKonstantin Belousov 	_thr_check_init();
351a091d823SDavid Xu 	return (rwlock_wrlock_common(rwlock, NULL));
35214f8ddcdSMike Makonnen }
35314f8ddcdSMike Makonnen 
35414f8ddcdSMike Makonnen int
_pthread_rwlock_timedwrlock(pthread_rwlock_t * __restrict rwlock,const struct timespec * __restrict abstime)355b6413b6dSPedro F. Giffuni _pthread_rwlock_timedwrlock(pthread_rwlock_t * __restrict rwlock,
356b6413b6dSPedro F. Giffuni     const struct timespec * __restrict abstime)
3571baa6473SMike Makonnen {
358ad056b5dSKonstantin Belousov 	_thr_check_init();
359a091d823SDavid Xu 	return (rwlock_wrlock_common(rwlock, abstime));
3601baa6473SMike Makonnen }
3615ab512bbSDavid Xu 
3625ab512bbSDavid Xu int
_Tthr_rwlock_unlock(pthread_rwlock_t * rwlock)3630ab1bfc7SKonstantin Belousov _Tthr_rwlock_unlock(pthread_rwlock_t *rwlock)
3645ab512bbSDavid Xu {
3655ab512bbSDavid Xu 	struct pthread *curthread = _get_curthread();
3665ab512bbSDavid Xu 	pthread_rwlock_t prwlock;
3678bf1a48cSDavid Xu 	int ret;
3685ab512bbSDavid Xu 	int32_t state;
3695ab512bbSDavid Xu 
3701bdbd705SKonstantin Belousov 	if (*rwlock == THR_PSHARED_PTR) {
3711bdbd705SKonstantin Belousov 		prwlock = __thr_pshared_offpage(rwlock, 0);
3721bdbd705SKonstantin Belousov 		if (prwlock == NULL)
3731bdbd705SKonstantin Belousov 			return (EINVAL);
3741bdbd705SKonstantin Belousov 	} else {
3755ab512bbSDavid Xu 		prwlock = *rwlock;
3761bdbd705SKonstantin Belousov 	}
3775ab512bbSDavid Xu 
378bbb64c21SDavid Xu 	if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
3795ab512bbSDavid Xu 		return (EINVAL);
3805ab512bbSDavid Xu 
3818bf1a48cSDavid Xu 	state = prwlock->lock.rw_state;
3828bf1a48cSDavid Xu 	if (state & URWLOCK_WRITE_OWNER) {
3831bdbd705SKonstantin Belousov 		if (__predict_false(prwlock->owner != TID(curthread)))
3845ab512bbSDavid Xu 			return (EPERM);
3851bdbd705SKonstantin Belousov 		prwlock->owner = 0;
3865ab512bbSDavid Xu 	}
3878bf1a48cSDavid Xu 
3888bf1a48cSDavid Xu 	ret = _thr_rwlock_unlock(&prwlock->lock);
3898bf1a48cSDavid Xu 	if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
3905ab512bbSDavid Xu 		curthread->rdlock_count--;
3915ab512bbSDavid Xu 
3928bf1a48cSDavid Xu 	return (ret);
3935ab512bbSDavid Xu }
394