xref: /freebsd/lib/libthr/thread/thr_cond.c (revision 2a339d9e3dc129f0b0b79c2cb8d2bb0386fb0f5f)
1bb535300SJeff Roberson /*
2a091d823SDavid Xu  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
31bdbd705SKonstantin Belousov  * Copyright (c) 2015 The FreeBSD Foundation
4bb535300SJeff Roberson  * All rights reserved.
5bb535300SJeff Roberson  *
61bdbd705SKonstantin Belousov  * Portions of this software were developed by Konstantin Belousov
71bdbd705SKonstantin Belousov  * under sponsorship from the FreeBSD Foundation.
81bdbd705SKonstantin Belousov  *
9bb535300SJeff Roberson  * Redistribution and use in source and binary forms, with or without
10bb535300SJeff Roberson  * modification, are permitted provided that the following conditions
11bb535300SJeff Roberson  * are met:
12bb535300SJeff Roberson  * 1. Redistributions of source code must retain the above copyright
13a091d823SDavid Xu  *    notice unmodified, this list of conditions, and the following
14a091d823SDavid Xu  *    disclaimer.
15bb535300SJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
16bb535300SJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
17bb535300SJeff Roberson  *    documentation and/or other materials provided with the distribution.
18bb535300SJeff Roberson  *
19a091d823SDavid Xu  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20a091d823SDavid Xu  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21a091d823SDavid Xu  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22a091d823SDavid Xu  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23a091d823SDavid Xu  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24a091d823SDavid Xu  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25a091d823SDavid Xu  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26a091d823SDavid Xu  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27a091d823SDavid Xu  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28a091d823SDavid Xu  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29bb535300SJeff Roberson  */
30a091d823SDavid Xu 
3132793011SKonstantin Belousov #include <sys/cdefs.h>
3232793011SKonstantin Belousov __FBSDID("$FreeBSD$");
3332793011SKonstantin Belousov 
3437a6356bSDavid Xu #include "namespace.h"
35bb535300SJeff Roberson #include <stdlib.h>
36bb535300SJeff Roberson #include <errno.h>
37bb535300SJeff Roberson #include <string.h>
38bb535300SJeff Roberson #include <pthread.h>
39a091d823SDavid Xu #include <limits.h>
4037a6356bSDavid Xu #include "un-namespace.h"
41bb535300SJeff Roberson 
42a091d823SDavid Xu #include "thr_private.h"
4341f2bd85SMike Makonnen 
449e821f27SKonstantin Belousov _Static_assert(sizeof(struct pthread_cond) <= PAGE_SIZE,
459e821f27SKonstantin Belousov     "pthread_cond too large");
469e821f27SKonstantin Belousov 
4741f2bd85SMike Makonnen /*
48bb535300SJeff Roberson  * Prototypes
49bb535300SJeff Roberson  */
5037a6356bSDavid Xu int	__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex);
5137a6356bSDavid Xu int	__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
5237a6356bSDavid Xu 		       const struct timespec * abstime);
53a091d823SDavid Xu static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr);
54a091d823SDavid Xu static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
55a091d823SDavid Xu 		    const struct timespec *abstime, int cancel);
56d1078b0bSDavid Xu static int cond_signal_common(pthread_cond_t *cond);
57d1078b0bSDavid Xu static int cond_broadcast_common(pthread_cond_t *cond);
58a091d823SDavid Xu 
59a091d823SDavid Xu /*
60a091d823SDavid Xu  * Double underscore versions are cancellation points.  Single underscore
61a091d823SDavid Xu  * versions are not and are provided for libc internal usage (which
62a091d823SDavid Xu  * shouldn't introduce cancellation points).
63a091d823SDavid Xu  */
64a091d823SDavid Xu __weak_reference(__pthread_cond_wait, pthread_cond_wait);
65a091d823SDavid Xu __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
66bb535300SJeff Roberson 
67bb535300SJeff Roberson __weak_reference(_pthread_cond_init, pthread_cond_init);
68bb535300SJeff Roberson __weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
69bb535300SJeff Roberson __weak_reference(_pthread_cond_signal, pthread_cond_signal);
70bb535300SJeff Roberson __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
71bb535300SJeff Roberson 
72d1078b0bSDavid Xu #define CV_PSHARED(cvp)	(((cvp)->__flags & USYNC_PROCESS_SHARED) != 0)
73d1078b0bSDavid Xu 
741bdbd705SKonstantin Belousov static void
751bdbd705SKonstantin Belousov cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr)
761bdbd705SKonstantin Belousov {
771bdbd705SKonstantin Belousov 
781bdbd705SKonstantin Belousov 	if (cattr == NULL) {
791bdbd705SKonstantin Belousov 		cvp->__clock_id = CLOCK_REALTIME;
801bdbd705SKonstantin Belousov 	} else {
811bdbd705SKonstantin Belousov 		if (cattr->c_pshared)
821bdbd705SKonstantin Belousov 			cvp->__flags |= USYNC_PROCESS_SHARED;
831bdbd705SKonstantin Belousov 		cvp->__clock_id = cattr->c_clockid;
841bdbd705SKonstantin Belousov 	}
851bdbd705SKonstantin Belousov }
861bdbd705SKonstantin Belousov 
87a091d823SDavid Xu static int
88a091d823SDavid Xu cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
89bb535300SJeff Roberson {
90d1078b0bSDavid Xu 	struct pthread_cond *cvp;
911bdbd705SKonstantin Belousov 	const struct pthread_cond_attr *cattr;
921bdbd705SKonstantin Belousov 	int pshared;
93a091d823SDavid Xu 
941bdbd705SKonstantin Belousov 	cattr = cond_attr != NULL ? *cond_attr : NULL;
951bdbd705SKonstantin Belousov 	if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) {
961bdbd705SKonstantin Belousov 		pshared = 0;
971bdbd705SKonstantin Belousov 		cvp = calloc(1, sizeof(struct pthread_cond));
981bdbd705SKonstantin Belousov 		if (cvp == NULL)
991bdbd705SKonstantin Belousov 			return (ENOMEM);
100a091d823SDavid Xu 	} else {
1011bdbd705SKonstantin Belousov 		pshared = 1;
1021bdbd705SKonstantin Belousov 		cvp = __thr_pshared_offpage(cond, 1);
1031bdbd705SKonstantin Belousov 		if (cvp == NULL)
1041bdbd705SKonstantin Belousov 			return (EFAULT);
1051bdbd705SKonstantin Belousov 	}
1061bdbd705SKonstantin Belousov 
107a091d823SDavid Xu 	/*
108a091d823SDavid Xu 	 * Initialise the condition variable structure:
109a091d823SDavid Xu 	 */
1101bdbd705SKonstantin Belousov 	cond_init_body(cvp, cattr);
1111bdbd705SKonstantin Belousov 	*cond = pshared ? THR_PSHARED_PTR : cvp;
1121bdbd705SKonstantin Belousov 	return (0);
113a091d823SDavid Xu }
114a091d823SDavid Xu 
115a091d823SDavid Xu static int
116a091d823SDavid Xu init_static(struct pthread *thread, pthread_cond_t *cond)
117a091d823SDavid Xu {
118a091d823SDavid Xu 	int ret;
119a091d823SDavid Xu 
120a091d823SDavid Xu 	THR_LOCK_ACQUIRE(thread, &_cond_static_lock);
121bb535300SJeff Roberson 
122bb535300SJeff Roberson 	if (*cond == NULL)
123a091d823SDavid Xu 		ret = cond_init(cond, NULL);
124a091d823SDavid Xu 	else
125a091d823SDavid Xu 		ret = 0;
126bb535300SJeff Roberson 
127a091d823SDavid Xu 	THR_LOCK_RELEASE(thread, &_cond_static_lock);
128bb535300SJeff Roberson 
129a091d823SDavid Xu 	return (ret);
130bb535300SJeff Roberson }
131bb535300SJeff Roberson 
132bbb64c21SDavid Xu #define CHECK_AND_INIT_COND							\
1331bdbd705SKonstantin Belousov 	if (*cond == THR_PSHARED_PTR) {						\
1341bdbd705SKonstantin Belousov 		cvp = __thr_pshared_offpage(cond, 0);				\
1351bdbd705SKonstantin Belousov 		if (cvp == NULL)						\
1361bdbd705SKonstantin Belousov 			return (EINVAL);					\
1371bdbd705SKonstantin Belousov 	} else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) {	\
138d1078b0bSDavid Xu 		if (cvp == THR_COND_INITIALIZER) {				\
139bbb64c21SDavid Xu 			int ret;						\
140bbb64c21SDavid Xu 			ret = init_static(_get_curthread(), cond);		\
141bbb64c21SDavid Xu 			if (ret)						\
142bbb64c21SDavid Xu 				return (ret);					\
143d1078b0bSDavid Xu 		} else if (cvp == THR_COND_DESTROYED) {				\
144bbb64c21SDavid Xu 			return (EINVAL);					\
145bbb64c21SDavid Xu 		}								\
146d1078b0bSDavid Xu 		cvp = *cond;							\
147bbb64c21SDavid Xu 	}
148bbb64c21SDavid Xu 
149bb535300SJeff Roberson int
150bb535300SJeff Roberson _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
151bb535300SJeff Roberson {
152bb535300SJeff Roberson 
153a091d823SDavid Xu 	*cond = NULL;
154a091d823SDavid Xu 	return (cond_init(cond, cond_attr));
155bb535300SJeff Roberson }
156bb535300SJeff Roberson 
157bb535300SJeff Roberson int
158bb535300SJeff Roberson _pthread_cond_destroy(pthread_cond_t *cond)
159bb535300SJeff Roberson {
160d1078b0bSDavid Xu 	struct pthread_cond *cvp;
1611bdbd705SKonstantin Belousov 	int error;
162bb535300SJeff Roberson 
163d1078b0bSDavid Xu 	error = 0;
1641bdbd705SKonstantin Belousov 	if (*cond == THR_PSHARED_PTR) {
1651bdbd705SKonstantin Belousov 		cvp = __thr_pshared_offpage(cond, 0);
1661bdbd705SKonstantin Belousov 		if (cvp != NULL)
1671bdbd705SKonstantin Belousov 			__thr_pshared_destroy(cond);
1681bdbd705SKonstantin Belousov 		*cond = THR_COND_DESTROYED;
1691bdbd705SKonstantin Belousov 	} else if ((cvp = *cond) == THR_COND_INITIALIZER) {
1701bdbd705SKonstantin Belousov 		/* nothing */
1711bdbd705SKonstantin Belousov 	} else if (cvp == THR_COND_DESTROYED) {
172d1078b0bSDavid Xu 		error = EINVAL;
1731bdbd705SKonstantin Belousov 	} else {
174d1078b0bSDavid Xu 		cvp = *cond;
175bbb64c21SDavid Xu 		*cond = THR_COND_DESTROYED;
176d1078b0bSDavid Xu 		free(cvp);
177a091d823SDavid Xu 	}
178d1078b0bSDavid Xu 	return (error);
179a091d823SDavid Xu }
180a091d823SDavid Xu 
181635f917aSDavid Xu /*
182585bf8aeSRui Paulo  * Cancellation behavior:
183635f917aSDavid Xu  *   Thread may be canceled at start, if thread is canceled, it means it
184635f917aSDavid Xu  *   did not get a wakeup from pthread_cond_signal(), otherwise, it is
185635f917aSDavid Xu  *   not canceled.
186635f917aSDavid Xu  *   Thread cancellation never cause wakeup from pthread_cond_signal()
187635f917aSDavid Xu  *   to be lost.
188635f917aSDavid Xu  */
189a091d823SDavid Xu static int
190d1078b0bSDavid Xu cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp,
191d1078b0bSDavid Xu     const struct timespec *abstime, int cancel)
192d1078b0bSDavid Xu {
193*2a339d9eSKonstantin Belousov 	struct pthread *curthread;
194*2a339d9eSKonstantin Belousov 	int error, error2, recurse, robust;
195*2a339d9eSKonstantin Belousov 
196*2a339d9eSKonstantin Belousov 	curthread = _get_curthread();
197*2a339d9eSKonstantin Belousov 	robust = _mutex_enter_robust(curthread, mp);
198d1078b0bSDavid Xu 
199d1078b0bSDavid Xu 	error = _mutex_cv_detach(mp, &recurse);
200*2a339d9eSKonstantin Belousov 	if (error != 0) {
201*2a339d9eSKonstantin Belousov 		if (robust)
202*2a339d9eSKonstantin Belousov 			_mutex_leave_robust(curthread, mp);
203d1078b0bSDavid Xu 		return (error);
204*2a339d9eSKonstantin Belousov 	}
205d1078b0bSDavid Xu 
206*2a339d9eSKonstantin Belousov 	if (cancel)
207d1078b0bSDavid Xu 		_thr_cancel_enter2(curthread, 0);
208d1078b0bSDavid Xu 	error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters,
209*2a339d9eSKonstantin Belousov 	    (struct umutex *)&mp->m_lock, abstime, CVWAIT_ABSTIME |
210*2a339d9eSKonstantin Belousov 	    CVWAIT_CLOCKID);
211*2a339d9eSKonstantin Belousov 	if (cancel)
212d1078b0bSDavid Xu 		_thr_cancel_leave(curthread, 0);
213d1078b0bSDavid Xu 
214d1078b0bSDavid Xu 	/*
215d1078b0bSDavid Xu 	 * Note that PP mutex and ROBUST mutex may return
216d1078b0bSDavid Xu 	 * interesting error codes.
217d1078b0bSDavid Xu 	 */
218d1078b0bSDavid Xu 	if (error == 0) {
219*2a339d9eSKonstantin Belousov 		error2 = _mutex_cv_lock(mp, recurse, true);
220d1078b0bSDavid Xu 	} else if (error == EINTR || error == ETIMEDOUT) {
221*2a339d9eSKonstantin Belousov 		error2 = _mutex_cv_lock(mp, recurse, true);
222*2a339d9eSKonstantin Belousov 		/*
223*2a339d9eSKonstantin Belousov 		 * Do not do cancellation on EOWNERDEAD there.  The
224*2a339d9eSKonstantin Belousov 		 * cancellation cleanup handler will use the protected
225*2a339d9eSKonstantin Belousov 		 * state and unlock the mutex without making the state
226*2a339d9eSKonstantin Belousov 		 * consistent and the state will be unrecoverable.
227*2a339d9eSKonstantin Belousov 		 */
228d1078b0bSDavid Xu 		if (error2 == 0 && cancel)
229d1078b0bSDavid Xu 			_thr_testcancel(curthread);
230*2a339d9eSKonstantin Belousov 
231d1078b0bSDavid Xu 		if (error == EINTR)
232d1078b0bSDavid Xu 			error = 0;
233d1078b0bSDavid Xu 	} else {
234d1078b0bSDavid Xu 		/* We know that it didn't unlock the mutex. */
235*2a339d9eSKonstantin Belousov 		_mutex_cv_attach(mp, recurse);
236*2a339d9eSKonstantin Belousov 		if (cancel)
237d1078b0bSDavid Xu 			_thr_testcancel(curthread);
238*2a339d9eSKonstantin Belousov 		error2 = 0;
239d1078b0bSDavid Xu 	}
240*2a339d9eSKonstantin Belousov 	if (robust)
241*2a339d9eSKonstantin Belousov 		_mutex_leave_robust(curthread, mp);
242d1078b0bSDavid Xu 	return (error2 != 0 ? error2 : error);
243d1078b0bSDavid Xu }
244d1078b0bSDavid Xu 
245d1078b0bSDavid Xu /*
246d1078b0bSDavid Xu  * Thread waits in userland queue whenever possible, when thread
247d1078b0bSDavid Xu  * is signaled or broadcasted, it is removed from the queue, and
248d1078b0bSDavid Xu  * is saved in curthread's defer_waiters[] buffer, but won't be
249d1078b0bSDavid Xu  * woken up until mutex is unlocked.
250d1078b0bSDavid Xu  */
251d1078b0bSDavid Xu 
252d1078b0bSDavid Xu static int
253d1078b0bSDavid Xu cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
254d1078b0bSDavid Xu     const struct timespec *abstime, int cancel)
255d1078b0bSDavid Xu {
256*2a339d9eSKonstantin Belousov 	struct pthread *curthread;
257d1078b0bSDavid Xu 	struct sleepqueue *sq;
258*2a339d9eSKonstantin Belousov 	int deferred, error, error2, recurse;
259d1078b0bSDavid Xu 
260*2a339d9eSKonstantin Belousov 	curthread = _get_curthread();
261d1078b0bSDavid Xu 	if (curthread->wchan != NULL)
262d1078b0bSDavid Xu 		PANIC("thread was already on queue.");
263d1078b0bSDavid Xu 
264d1078b0bSDavid Xu 	if (cancel)
265d1078b0bSDavid Xu 		_thr_testcancel(curthread);
266d1078b0bSDavid Xu 
267d1078b0bSDavid Xu 	_sleepq_lock(cvp);
268d1078b0bSDavid Xu 	/*
269d1078b0bSDavid Xu 	 * set __has_user_waiters before unlocking mutex, this allows
270d1078b0bSDavid Xu 	 * us to check it without locking in pthread_cond_signal().
271d1078b0bSDavid Xu 	 */
272d1078b0bSDavid Xu 	cvp->__has_user_waiters = 1;
273*2a339d9eSKonstantin Belousov 	deferred = 0;
274*2a339d9eSKonstantin Belousov 	(void)_mutex_cv_unlock(mp, &recurse, &deferred);
275d1078b0bSDavid Xu 	curthread->mutex_obj = mp;
276d1078b0bSDavid Xu 	_sleepq_add(cvp, curthread);
277d1078b0bSDavid Xu 	for(;;) {
278d1078b0bSDavid Xu 		_thr_clear_wake(curthread);
279d1078b0bSDavid Xu 		_sleepq_unlock(cvp);
280*2a339d9eSKonstantin Belousov 		if (deferred) {
281*2a339d9eSKonstantin Belousov 			deferred = 0;
282e220a13aSDavid Xu 			if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0)
283*2a339d9eSKonstantin Belousov 				(void)_umtx_op_err(&mp->m_lock,
284*2a339d9eSKonstantin Belousov 				    UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags,
285*2a339d9eSKonstantin Belousov 				    0, 0);
286e220a13aSDavid Xu 		}
287e220a13aSDavid Xu 		if (curthread->nwaiter_defer > 0) {
288e220a13aSDavid Xu 			_thr_wake_all(curthread->defer_waiters,
289e220a13aSDavid Xu 			    curthread->nwaiter_defer);
290e220a13aSDavid Xu 			curthread->nwaiter_defer = 0;
291e220a13aSDavid Xu 		}
292d1078b0bSDavid Xu 
293*2a339d9eSKonstantin Belousov 		if (cancel)
294d1078b0bSDavid Xu 			_thr_cancel_enter2(curthread, 0);
295d1078b0bSDavid Xu 		error = _thr_sleep(curthread, cvp->__clock_id, abstime);
296*2a339d9eSKonstantin Belousov 		if (cancel)
297d1078b0bSDavid Xu 			_thr_cancel_leave(curthread, 0);
298d1078b0bSDavid Xu 
299d1078b0bSDavid Xu 		_sleepq_lock(cvp);
300d1078b0bSDavid Xu 		if (curthread->wchan == NULL) {
301d1078b0bSDavid Xu 			error = 0;
302d1078b0bSDavid Xu 			break;
303d1078b0bSDavid Xu 		} else if (cancel && SHOULD_CANCEL(curthread)) {
304d1078b0bSDavid Xu 			sq = _sleepq_lookup(cvp);
305*2a339d9eSKonstantin Belousov 			cvp->__has_user_waiters = _sleepq_remove(sq, curthread);
306d1078b0bSDavid Xu 			_sleepq_unlock(cvp);
307d1078b0bSDavid Xu 			curthread->mutex_obj = NULL;
308*2a339d9eSKonstantin Belousov 			error2 = _mutex_cv_lock(mp, recurse, false);
309d1078b0bSDavid Xu 			if (!THR_IN_CRITICAL(curthread))
310d1078b0bSDavid Xu 				_pthread_exit(PTHREAD_CANCELED);
311d1078b0bSDavid Xu 			else /* this should not happen */
312*2a339d9eSKonstantin Belousov 				return (error2);
313d1078b0bSDavid Xu 		} else if (error == ETIMEDOUT) {
314d1078b0bSDavid Xu 			sq = _sleepq_lookup(cvp);
315d1078b0bSDavid Xu 			cvp->__has_user_waiters =
316d1078b0bSDavid Xu 			    _sleepq_remove(sq, curthread);
317d1078b0bSDavid Xu 			break;
318d1078b0bSDavid Xu 		}
319d1078b0bSDavid Xu 	}
320d1078b0bSDavid Xu 	_sleepq_unlock(cvp);
321d1078b0bSDavid Xu 	curthread->mutex_obj = NULL;
322*2a339d9eSKonstantin Belousov 	error2 = _mutex_cv_lock(mp, recurse, false);
323*2a339d9eSKonstantin Belousov 	if (error == 0)
324*2a339d9eSKonstantin Belousov 		error = error2;
325d1078b0bSDavid Xu 	return (error);
326d1078b0bSDavid Xu }
327d1078b0bSDavid Xu 
328d1078b0bSDavid Xu static int
329a091d823SDavid Xu cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
330a091d823SDavid Xu 	const struct timespec *abstime, int cancel)
331a091d823SDavid Xu {
332a091d823SDavid Xu 	struct pthread	*curthread = _get_curthread();
333d1078b0bSDavid Xu 	struct pthread_cond *cvp;
334d1078b0bSDavid Xu 	struct pthread_mutex *mp;
335d1078b0bSDavid Xu 	int	error;
336a091d823SDavid Xu 
337bbb64c21SDavid Xu 	CHECK_AND_INIT_COND
338635f917aSDavid Xu 
3391bdbd705SKonstantin Belousov 	if (*mutex == THR_PSHARED_PTR) {
3401bdbd705SKonstantin Belousov 		mp = __thr_pshared_offpage(mutex, 0);
3411bdbd705SKonstantin Belousov 		if (mp == NULL)
3421bdbd705SKonstantin Belousov 			return (EINVAL);
3431bdbd705SKonstantin Belousov 	} else {
344d1078b0bSDavid Xu 		mp = *mutex;
3451bdbd705SKonstantin Belousov 	}
3462bd2c907SDavid Xu 
347d1078b0bSDavid Xu 	if ((error = _mutex_owned(curthread, mp)) != 0)
348d1078b0bSDavid Xu 		return (error);
349a091d823SDavid Xu 
350d1078b0bSDavid Xu 	if (curthread->attr.sched_policy != SCHED_OTHER ||
351d1078b0bSDavid Xu 	    (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT |
352d1078b0bSDavid Xu 	    USYNC_PROCESS_SHARED)) != 0 ||
353d1078b0bSDavid Xu 	    (cvp->__flags & USYNC_PROCESS_SHARED) != 0)
354*2a339d9eSKonstantin Belousov 		return (cond_wait_kernel(cvp, mp, abstime, cancel));
355d1078b0bSDavid Xu 	else
356*2a339d9eSKonstantin Belousov 		return (cond_wait_user(cvp, mp, abstime, cancel));
357bb535300SJeff Roberson }
358bb535300SJeff Roberson 
359bb535300SJeff Roberson int
360bb535300SJeff Roberson _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
361bb535300SJeff Roberson {
362bb535300SJeff Roberson 
363a091d823SDavid Xu 	return (cond_wait_common(cond, mutex, NULL, 0));
364a091d823SDavid Xu }
365bb535300SJeff Roberson 
366a091d823SDavid Xu int
367a091d823SDavid Xu __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
368a091d823SDavid Xu {
369bb535300SJeff Roberson 
370a091d823SDavid Xu 	return (cond_wait_common(cond, mutex, NULL, 1));
371bb535300SJeff Roberson }
372bb535300SJeff Roberson 
373bb535300SJeff Roberson int
374bb535300SJeff Roberson _pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
375bb535300SJeff Roberson 		       const struct timespec * abstime)
376bb535300SJeff Roberson {
377a091d823SDavid Xu 
378a091d823SDavid Xu 	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
379a091d823SDavid Xu 	    abstime->tv_nsec >= 1000000000)
380dd3b229eSMike Makonnen 		return (EINVAL);
381dd3b229eSMike Makonnen 
382a091d823SDavid Xu 	return (cond_wait_common(cond, mutex, abstime, 0));
383a091d823SDavid Xu }
384a091d823SDavid Xu 
385a091d823SDavid Xu int
386a091d823SDavid Xu __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
387a091d823SDavid Xu 		       const struct timespec *abstime)
388a091d823SDavid Xu {
389a091d823SDavid Xu 
390a091d823SDavid Xu 	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
391a091d823SDavid Xu 	    abstime->tv_nsec >= 1000000000)
392a091d823SDavid Xu 		return (EINVAL);
393a091d823SDavid Xu 
394a091d823SDavid Xu 	return (cond_wait_common(cond, mutex, abstime, 1));
395dd3b229eSMike Makonnen }
396dd3b229eSMike Makonnen 
397dd3b229eSMike Makonnen static int
398d1078b0bSDavid Xu cond_signal_common(pthread_cond_t *cond)
399dd3b229eSMike Makonnen {
400a091d823SDavid Xu 	struct pthread	*curthread = _get_curthread();
401d1078b0bSDavid Xu 	struct pthread *td;
402d1078b0bSDavid Xu 	struct pthread_cond *cvp;
403d1078b0bSDavid Xu 	struct pthread_mutex *mp;
404d1078b0bSDavid Xu 	struct sleepqueue *sq;
405d1078b0bSDavid Xu 	int	*waddr;
406d1078b0bSDavid Xu 	int	pshared;
407bb535300SJeff Roberson 
408bb535300SJeff Roberson 	/*
409bb535300SJeff Roberson 	 * If the condition variable is statically initialized, perform dynamic
410bb535300SJeff Roberson 	 * initialization.
411bb535300SJeff Roberson 	 */
412bbb64c21SDavid Xu 	CHECK_AND_INIT_COND
413bb535300SJeff Roberson 
414d1078b0bSDavid Xu 	pshared = CV_PSHARED(cvp);
415d1078b0bSDavid Xu 
416d1078b0bSDavid Xu 	_thr_ucond_signal((struct ucond *)&cvp->__has_kern_waiters);
417d1078b0bSDavid Xu 
418d1078b0bSDavid Xu 	if (pshared || cvp->__has_user_waiters == 0)
419d1078b0bSDavid Xu 		return (0);
420d1078b0bSDavid Xu 
421d1078b0bSDavid Xu 	curthread = _get_curthread();
422d1078b0bSDavid Xu 	waddr = NULL;
423d1078b0bSDavid Xu 	_sleepq_lock(cvp);
424d1078b0bSDavid Xu 	sq = _sleepq_lookup(cvp);
425d1078b0bSDavid Xu 	if (sq == NULL) {
426d1078b0bSDavid Xu 		_sleepq_unlock(cvp);
427d1078b0bSDavid Xu 		return (0);
428d1078b0bSDavid Xu 	}
429d1078b0bSDavid Xu 
430d1078b0bSDavid Xu 	td = _sleepq_first(sq);
431d1078b0bSDavid Xu 	mp = td->mutex_obj;
432d1078b0bSDavid Xu 	cvp->__has_user_waiters = _sleepq_remove(sq, td);
433*2a339d9eSKonstantin Belousov 	if (PMUTEX_OWNER_ID(mp) == TID(curthread)) {
434d1078b0bSDavid Xu 		if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
435d1078b0bSDavid Xu 			_thr_wake_all(curthread->defer_waiters,
436d1078b0bSDavid Xu 			    curthread->nwaiter_defer);
437d1078b0bSDavid Xu 			curthread->nwaiter_defer = 0;
438d1078b0bSDavid Xu 		}
439d1078b0bSDavid Xu 		curthread->defer_waiters[curthread->nwaiter_defer++] =
440d1078b0bSDavid Xu 		    &td->wake_addr->value;
441*2a339d9eSKonstantin Belousov 		mp->m_flags |= PMUTEX_FLAG_DEFERRED;
442d1078b0bSDavid Xu 	} else {
443d1078b0bSDavid Xu 		waddr = &td->wake_addr->value;
444d1078b0bSDavid Xu 	}
445d1078b0bSDavid Xu 	_sleepq_unlock(cvp);
446d1078b0bSDavid Xu 	if (waddr != NULL)
447d1078b0bSDavid Xu 		_thr_set_wake(waddr);
448d1078b0bSDavid Xu 	return (0);
449d1078b0bSDavid Xu }
450d1078b0bSDavid Xu 
451d1078b0bSDavid Xu struct broadcast_arg {
452d1078b0bSDavid Xu 	struct pthread *curthread;
453d1078b0bSDavid Xu 	unsigned int *waddrs[MAX_DEFER_WAITERS];
454d1078b0bSDavid Xu 	int count;
455d1078b0bSDavid Xu };
456d1078b0bSDavid Xu 
457d1078b0bSDavid Xu static void
458d1078b0bSDavid Xu drop_cb(struct pthread *td, void *arg)
459d1078b0bSDavid Xu {
460d1078b0bSDavid Xu 	struct broadcast_arg *ba = arg;
461d1078b0bSDavid Xu 	struct pthread_mutex *mp;
462d1078b0bSDavid Xu 	struct pthread *curthread = ba->curthread;
463d1078b0bSDavid Xu 
464d1078b0bSDavid Xu 	mp = td->mutex_obj;
465*2a339d9eSKonstantin Belousov 	if (PMUTEX_OWNER_ID(mp) == TID(curthread)) {
466d1078b0bSDavid Xu 		if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
467d1078b0bSDavid Xu 			_thr_wake_all(curthread->defer_waiters,
468d1078b0bSDavid Xu 			    curthread->nwaiter_defer);
469d1078b0bSDavid Xu 			curthread->nwaiter_defer = 0;
470d1078b0bSDavid Xu 		}
471d1078b0bSDavid Xu 		curthread->defer_waiters[curthread->nwaiter_defer++] =
472d1078b0bSDavid Xu 		    &td->wake_addr->value;
473*2a339d9eSKonstantin Belousov 		mp->m_flags |= PMUTEX_FLAG_DEFERRED;
474d1078b0bSDavid Xu 	} else {
475d1078b0bSDavid Xu 		if (ba->count >= MAX_DEFER_WAITERS) {
476d1078b0bSDavid Xu 			_thr_wake_all(ba->waddrs, ba->count);
477d1078b0bSDavid Xu 			ba->count = 0;
478d1078b0bSDavid Xu 		}
479d1078b0bSDavid Xu 		ba->waddrs[ba->count++] = &td->wake_addr->value;
480d1078b0bSDavid Xu 	}
481d1078b0bSDavid Xu }
482d1078b0bSDavid Xu 
483d1078b0bSDavid Xu static int
484d1078b0bSDavid Xu cond_broadcast_common(pthread_cond_t *cond)
485d1078b0bSDavid Xu {
486d1078b0bSDavid Xu 	int    pshared;
487d1078b0bSDavid Xu 	struct pthread_cond *cvp;
488d1078b0bSDavid Xu 	struct sleepqueue *sq;
489d1078b0bSDavid Xu 	struct broadcast_arg ba;
490d1078b0bSDavid Xu 
491d1078b0bSDavid Xu 	/*
492d1078b0bSDavid Xu 	 * If the condition variable is statically initialized, perform dynamic
493d1078b0bSDavid Xu 	 * initialization.
494d1078b0bSDavid Xu 	 */
495d1078b0bSDavid Xu 	CHECK_AND_INIT_COND
496d1078b0bSDavid Xu 
497d1078b0bSDavid Xu 	pshared = CV_PSHARED(cvp);
498d1078b0bSDavid Xu 
499d1078b0bSDavid Xu 	_thr_ucond_broadcast((struct ucond *)&cvp->__has_kern_waiters);
500d1078b0bSDavid Xu 
501d1078b0bSDavid Xu 	if (pshared || cvp->__has_user_waiters == 0)
502d1078b0bSDavid Xu 		return (0);
503d1078b0bSDavid Xu 
504d1078b0bSDavid Xu 	ba.curthread = _get_curthread();
505d1078b0bSDavid Xu 	ba.count = 0;
506d1078b0bSDavid Xu 
507d1078b0bSDavid Xu 	_sleepq_lock(cvp);
508d1078b0bSDavid Xu 	sq = _sleepq_lookup(cvp);
509d1078b0bSDavid Xu 	if (sq == NULL) {
510d1078b0bSDavid Xu 		_sleepq_unlock(cvp);
511d1078b0bSDavid Xu 		return (0);
512d1078b0bSDavid Xu 	}
513d1078b0bSDavid Xu 	_sleepq_drop(sq, drop_cb, &ba);
514d1078b0bSDavid Xu 	cvp->__has_user_waiters = 0;
515d1078b0bSDavid Xu 	_sleepq_unlock(cvp);
516d1078b0bSDavid Xu 	if (ba.count > 0)
517d1078b0bSDavid Xu 		_thr_wake_all(ba.waddrs, ba.count);
518d1078b0bSDavid Xu 	return (0);
519bb535300SJeff Roberson }
520bb535300SJeff Roberson 
521bb535300SJeff Roberson int
522bb535300SJeff Roberson _pthread_cond_signal(pthread_cond_t * cond)
523bb535300SJeff Roberson {
524a091d823SDavid Xu 
525d1078b0bSDavid Xu 	return (cond_signal_common(cond));
526bb535300SJeff Roberson }
527bb535300SJeff Roberson 
528bb535300SJeff Roberson int
529bb535300SJeff Roberson _pthread_cond_broadcast(pthread_cond_t * cond)
530bb535300SJeff Roberson {
531a091d823SDavid Xu 
532d1078b0bSDavid Xu 	return (cond_broadcast_common(cond));
533a224a391SMike Makonnen }
534