xref: /freebsd/lib/libthr/thread/thr_cond.c (revision c7904405a8d47f64c3b0e73158572e2dc8ef0217)
15e53a4f9SPedro F. Giffuni /*-
25e53a4f9SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
35e53a4f9SPedro F. Giffuni  *
4a091d823SDavid Xu  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
51bdbd705SKonstantin Belousov  * Copyright (c) 2015 The FreeBSD Foundation
6bb535300SJeff Roberson  * All rights reserved.
7bb535300SJeff Roberson  *
81bdbd705SKonstantin Belousov  * Portions of this software were developed by Konstantin Belousov
91bdbd705SKonstantin Belousov  * under sponsorship from the FreeBSD Foundation.
101bdbd705SKonstantin Belousov  *
11bb535300SJeff Roberson  * Redistribution and use in source and binary forms, with or without
12bb535300SJeff Roberson  * modification, are permitted provided that the following conditions
13bb535300SJeff Roberson  * are met:
14bb535300SJeff Roberson  * 1. Redistributions of source code must retain the above copyright
15a091d823SDavid Xu  *    notice unmodified, this list of conditions, and the following
16a091d823SDavid Xu  *    disclaimer.
17bb535300SJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
18bb535300SJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
19bb535300SJeff Roberson  *    documentation and/or other materials provided with the distribution.
20bb535300SJeff Roberson  *
21a091d823SDavid Xu  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22a091d823SDavid Xu  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23a091d823SDavid Xu  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24a091d823SDavid Xu  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25a091d823SDavid Xu  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26a091d823SDavid Xu  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27a091d823SDavid Xu  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28a091d823SDavid Xu  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29a091d823SDavid Xu  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30a091d823SDavid Xu  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31bb535300SJeff Roberson  */
32a091d823SDavid Xu 
3332793011SKonstantin Belousov #include <sys/cdefs.h>
3432793011SKonstantin Belousov __FBSDID("$FreeBSD$");
3532793011SKonstantin Belousov 
3637a6356bSDavid Xu #include "namespace.h"
37bb535300SJeff Roberson #include <stdlib.h>
38bb535300SJeff Roberson #include <errno.h>
39bb535300SJeff Roberson #include <string.h>
40bb535300SJeff Roberson #include <pthread.h>
41a091d823SDavid Xu #include <limits.h>
4237a6356bSDavid Xu #include "un-namespace.h"
43bb535300SJeff Roberson 
44a091d823SDavid Xu #include "thr_private.h"
4541f2bd85SMike Makonnen 
46*c7904405SAndrew Turner _Static_assert(sizeof(struct pthread_cond) <= THR_PAGE_SIZE_MIN,
479e821f27SKonstantin Belousov     "pthread_cond too large");
489e821f27SKonstantin Belousov 
4941f2bd85SMike Makonnen /*
50bb535300SJeff Roberson  * Prototypes
51bb535300SJeff Roberson  */
5237a6356bSDavid Xu int	__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
5337a6356bSDavid Xu 		       const struct timespec * abstime);
54a091d823SDavid Xu static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr);
55a091d823SDavid Xu static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
56a091d823SDavid Xu 		    const struct timespec *abstime, int cancel);
57d1078b0bSDavid Xu static int cond_signal_common(pthread_cond_t *cond);
58d1078b0bSDavid Xu static int cond_broadcast_common(pthread_cond_t *cond);
59a091d823SDavid Xu 
60a091d823SDavid Xu /*
61a091d823SDavid Xu  * Double underscore versions are cancellation points.  Single underscore
62a091d823SDavid Xu  * versions are not and are provided for libc internal usage (which
63a091d823SDavid Xu  * shouldn't introduce cancellation points).
64a091d823SDavid Xu  */
650ab1bfc7SKonstantin Belousov __weak_reference(__thr_cond_wait, pthread_cond_wait);
660ab1bfc7SKonstantin Belousov __weak_reference(__thr_cond_wait, __pthread_cond_wait);
670ab1bfc7SKonstantin Belousov __weak_reference(_thr_cond_wait, _pthread_cond_wait);
68a091d823SDavid Xu __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
690ab1bfc7SKonstantin Belousov __weak_reference(_thr_cond_init, pthread_cond_init);
700ab1bfc7SKonstantin Belousov __weak_reference(_thr_cond_init, _pthread_cond_init);
710ab1bfc7SKonstantin Belousov __weak_reference(_thr_cond_destroy, pthread_cond_destroy);
720ab1bfc7SKonstantin Belousov __weak_reference(_thr_cond_destroy, _pthread_cond_destroy);
730ab1bfc7SKonstantin Belousov __weak_reference(_thr_cond_signal, pthread_cond_signal);
740ab1bfc7SKonstantin Belousov __weak_reference(_thr_cond_signal, _pthread_cond_signal);
750ab1bfc7SKonstantin Belousov __weak_reference(_thr_cond_broadcast, pthread_cond_broadcast);
760ab1bfc7SKonstantin Belousov __weak_reference(_thr_cond_broadcast, _pthread_cond_broadcast);
77bb535300SJeff Roberson 
786180f50bSKonstantin Belousov #define CV_PSHARED(cvp)	(((cvp)->kcond.c_flags & USYNC_PROCESS_SHARED) != 0)
79d1078b0bSDavid Xu 
801bdbd705SKonstantin Belousov static void
811bdbd705SKonstantin Belousov cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr)
821bdbd705SKonstantin Belousov {
831bdbd705SKonstantin Belousov 
841bdbd705SKonstantin Belousov 	if (cattr == NULL) {
856180f50bSKonstantin Belousov 		cvp->kcond.c_clockid = CLOCK_REALTIME;
861bdbd705SKonstantin Belousov 	} else {
871bdbd705SKonstantin Belousov 		if (cattr->c_pshared)
886180f50bSKonstantin Belousov 			cvp->kcond.c_flags |= USYNC_PROCESS_SHARED;
896180f50bSKonstantin Belousov 		cvp->kcond.c_clockid = cattr->c_clockid;
901bdbd705SKonstantin Belousov 	}
911bdbd705SKonstantin Belousov }
921bdbd705SKonstantin Belousov 
93a091d823SDavid Xu static int
94a091d823SDavid Xu cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
95bb535300SJeff Roberson {
96d1078b0bSDavid Xu 	struct pthread_cond *cvp;
971bdbd705SKonstantin Belousov 	const struct pthread_cond_attr *cattr;
981bdbd705SKonstantin Belousov 	int pshared;
99a091d823SDavid Xu 
1001bdbd705SKonstantin Belousov 	cattr = cond_attr != NULL ? *cond_attr : NULL;
1011bdbd705SKonstantin Belousov 	if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) {
1021bdbd705SKonstantin Belousov 		pshared = 0;
1031bdbd705SKonstantin Belousov 		cvp = calloc(1, sizeof(struct pthread_cond));
1041bdbd705SKonstantin Belousov 		if (cvp == NULL)
1051bdbd705SKonstantin Belousov 			return (ENOMEM);
106a091d823SDavid Xu 	} else {
1071bdbd705SKonstantin Belousov 		pshared = 1;
1081bdbd705SKonstantin Belousov 		cvp = __thr_pshared_offpage(cond, 1);
1091bdbd705SKonstantin Belousov 		if (cvp == NULL)
1101bdbd705SKonstantin Belousov 			return (EFAULT);
1111bdbd705SKonstantin Belousov 	}
1121bdbd705SKonstantin Belousov 
113a091d823SDavid Xu 	/*
114a091d823SDavid Xu 	 * Initialise the condition variable structure:
115a091d823SDavid Xu 	 */
1161bdbd705SKonstantin Belousov 	cond_init_body(cvp, cattr);
1171bdbd705SKonstantin Belousov 	*cond = pshared ? THR_PSHARED_PTR : cvp;
1181bdbd705SKonstantin Belousov 	return (0);
119a091d823SDavid Xu }
120a091d823SDavid Xu 
121a091d823SDavid Xu static int
122a091d823SDavid Xu init_static(struct pthread *thread, pthread_cond_t *cond)
123a091d823SDavid Xu {
124a091d823SDavid Xu 	int ret;
125a091d823SDavid Xu 
126a091d823SDavid Xu 	THR_LOCK_ACQUIRE(thread, &_cond_static_lock);
127bb535300SJeff Roberson 
128bb535300SJeff Roberson 	if (*cond == NULL)
129a091d823SDavid Xu 		ret = cond_init(cond, NULL);
130a091d823SDavid Xu 	else
131a091d823SDavid Xu 		ret = 0;
132bb535300SJeff Roberson 
133a091d823SDavid Xu 	THR_LOCK_RELEASE(thread, &_cond_static_lock);
134bb535300SJeff Roberson 
135a091d823SDavid Xu 	return (ret);
136bb535300SJeff Roberson }
137bb535300SJeff Roberson 
138bbb64c21SDavid Xu #define CHECK_AND_INIT_COND							\
1391bdbd705SKonstantin Belousov 	if (*cond == THR_PSHARED_PTR) {						\
1401bdbd705SKonstantin Belousov 		cvp = __thr_pshared_offpage(cond, 0);				\
1411bdbd705SKonstantin Belousov 		if (cvp == NULL)						\
1421bdbd705SKonstantin Belousov 			return (EINVAL);					\
1431bdbd705SKonstantin Belousov 	} else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) {	\
144d1078b0bSDavid Xu 		if (cvp == THR_COND_INITIALIZER) {				\
145bbb64c21SDavid Xu 			int ret;						\
146bbb64c21SDavid Xu 			ret = init_static(_get_curthread(), cond);		\
147bbb64c21SDavid Xu 			if (ret)						\
148bbb64c21SDavid Xu 				return (ret);					\
149d1078b0bSDavid Xu 		} else if (cvp == THR_COND_DESTROYED) {				\
150bbb64c21SDavid Xu 			return (EINVAL);					\
151bbb64c21SDavid Xu 		}								\
152d1078b0bSDavid Xu 		cvp = *cond;							\
153bbb64c21SDavid Xu 	}
154bbb64c21SDavid Xu 
155bb535300SJeff Roberson int
1560ab1bfc7SKonstantin Belousov _thr_cond_init(pthread_cond_t * __restrict cond,
157b6413b6dSPedro F. Giffuni     const pthread_condattr_t * __restrict cond_attr)
158bb535300SJeff Roberson {
159bb535300SJeff Roberson 
160a091d823SDavid Xu 	*cond = NULL;
161a091d823SDavid Xu 	return (cond_init(cond, cond_attr));
162bb535300SJeff Roberson }
163bb535300SJeff Roberson 
164bb535300SJeff Roberson int
1650ab1bfc7SKonstantin Belousov _thr_cond_destroy(pthread_cond_t *cond)
166bb535300SJeff Roberson {
167d1078b0bSDavid Xu 	struct pthread_cond *cvp;
1681bdbd705SKonstantin Belousov 	int error;
169bb535300SJeff Roberson 
170d1078b0bSDavid Xu 	error = 0;
1711bdbd705SKonstantin Belousov 	if (*cond == THR_PSHARED_PTR) {
1721bdbd705SKonstantin Belousov 		cvp = __thr_pshared_offpage(cond, 0);
173b16150eaSMark Johnston 		if (cvp != NULL) {
174b16150eaSMark Johnston 			if (cvp->kcond.c_has_waiters)
175b16150eaSMark Johnston 				error = EBUSY;
176b16150eaSMark Johnston 			else
1771bdbd705SKonstantin Belousov 				__thr_pshared_destroy(cond);
178b16150eaSMark Johnston 		}
179b16150eaSMark Johnston 		if (error == 0)
1801bdbd705SKonstantin Belousov 			*cond = THR_COND_DESTROYED;
1811bdbd705SKonstantin Belousov 	} else if ((cvp = *cond) == THR_COND_INITIALIZER) {
1821bdbd705SKonstantin Belousov 		/* nothing */
1831bdbd705SKonstantin Belousov 	} else if (cvp == THR_COND_DESTROYED) {
184d1078b0bSDavid Xu 		error = EINVAL;
1851bdbd705SKonstantin Belousov 	} else {
186d1078b0bSDavid Xu 		cvp = *cond;
187b16150eaSMark Johnston 		if (cvp->__has_user_waiters || cvp->kcond.c_has_waiters)
188b16150eaSMark Johnston 			error = EBUSY;
189b16150eaSMark Johnston 		else {
190bbb64c21SDavid Xu 			*cond = THR_COND_DESTROYED;
191d1078b0bSDavid Xu 			free(cvp);
192a091d823SDavid Xu 		}
193b16150eaSMark Johnston 	}
194d1078b0bSDavid Xu 	return (error);
195a091d823SDavid Xu }
196a091d823SDavid Xu 
197635f917aSDavid Xu /*
198585bf8aeSRui Paulo  * Cancellation behavior:
199635f917aSDavid Xu  *   Thread may be canceled at start, if thread is canceled, it means it
200635f917aSDavid Xu  *   did not get a wakeup from pthread_cond_signal(), otherwise, it is
201635f917aSDavid Xu  *   not canceled.
202635f917aSDavid Xu  *   Thread cancellation never cause wakeup from pthread_cond_signal()
203635f917aSDavid Xu  *   to be lost.
204635f917aSDavid Xu  */
205a091d823SDavid Xu static int
206d1078b0bSDavid Xu cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp,
207d1078b0bSDavid Xu     const struct timespec *abstime, int cancel)
208d1078b0bSDavid Xu {
2092a339d9eSKonstantin Belousov 	struct pthread *curthread;
2102a339d9eSKonstantin Belousov 	int error, error2, recurse, robust;
2112a339d9eSKonstantin Belousov 
2122a339d9eSKonstantin Belousov 	curthread = _get_curthread();
2132a339d9eSKonstantin Belousov 	robust = _mutex_enter_robust(curthread, mp);
214d1078b0bSDavid Xu 
215d1078b0bSDavid Xu 	error = _mutex_cv_detach(mp, &recurse);
2162a339d9eSKonstantin Belousov 	if (error != 0) {
2172a339d9eSKonstantin Belousov 		if (robust)
2182a339d9eSKonstantin Belousov 			_mutex_leave_robust(curthread, mp);
219d1078b0bSDavid Xu 		return (error);
2202a339d9eSKonstantin Belousov 	}
221d1078b0bSDavid Xu 
2222a339d9eSKonstantin Belousov 	if (cancel)
223d1078b0bSDavid Xu 		_thr_cancel_enter2(curthread, 0);
2246180f50bSKonstantin Belousov 	error = _thr_ucond_wait(&cvp->kcond, &mp->m_lock, abstime,
2256180f50bSKonstantin Belousov 	    CVWAIT_ABSTIME | CVWAIT_CLOCKID);
2262a339d9eSKonstantin Belousov 	if (cancel)
227d1078b0bSDavid Xu 		_thr_cancel_leave(curthread, 0);
228d1078b0bSDavid Xu 
229d1078b0bSDavid Xu 	/*
230d1078b0bSDavid Xu 	 * Note that PP mutex and ROBUST mutex may return
231d1078b0bSDavid Xu 	 * interesting error codes.
232d1078b0bSDavid Xu 	 */
233d1078b0bSDavid Xu 	if (error == 0) {
2342a339d9eSKonstantin Belousov 		error2 = _mutex_cv_lock(mp, recurse, true);
235d1078b0bSDavid Xu 	} else if (error == EINTR || error == ETIMEDOUT) {
2362a339d9eSKonstantin Belousov 		error2 = _mutex_cv_lock(mp, recurse, true);
2372a339d9eSKonstantin Belousov 		/*
2382a339d9eSKonstantin Belousov 		 * Do not do cancellation on EOWNERDEAD there.  The
2392a339d9eSKonstantin Belousov 		 * cancellation cleanup handler will use the protected
2402a339d9eSKonstantin Belousov 		 * state and unlock the mutex without making the state
2412a339d9eSKonstantin Belousov 		 * consistent and the state will be unrecoverable.
2422a339d9eSKonstantin Belousov 		 */
2434dafad49SKonstantin Belousov 		if (error2 == 0 && cancel) {
2444dafad49SKonstantin Belousov 			if (robust) {
2454dafad49SKonstantin Belousov 				_mutex_leave_robust(curthread, mp);
2464dafad49SKonstantin Belousov 				robust = false;
2474dafad49SKonstantin Belousov 			}
248d1078b0bSDavid Xu 			_thr_testcancel(curthread);
2494dafad49SKonstantin Belousov 		}
2502a339d9eSKonstantin Belousov 
251d1078b0bSDavid Xu 		if (error == EINTR)
252d1078b0bSDavid Xu 			error = 0;
253d1078b0bSDavid Xu 	} else {
254d1078b0bSDavid Xu 		/* We know that it didn't unlock the mutex. */
2552a339d9eSKonstantin Belousov 		_mutex_cv_attach(mp, recurse);
2564dafad49SKonstantin Belousov 		if (cancel) {
2574dafad49SKonstantin Belousov 			if (robust) {
2584dafad49SKonstantin Belousov 				_mutex_leave_robust(curthread, mp);
2594dafad49SKonstantin Belousov 				robust = false;
2604dafad49SKonstantin Belousov 			}
261d1078b0bSDavid Xu 			_thr_testcancel(curthread);
2624dafad49SKonstantin Belousov 		}
2632a339d9eSKonstantin Belousov 		error2 = 0;
264d1078b0bSDavid Xu 	}
2652a339d9eSKonstantin Belousov 	if (robust)
2662a339d9eSKonstantin Belousov 		_mutex_leave_robust(curthread, mp);
267d1078b0bSDavid Xu 	return (error2 != 0 ? error2 : error);
268d1078b0bSDavid Xu }
269d1078b0bSDavid Xu 
270d1078b0bSDavid Xu /*
271d1078b0bSDavid Xu  * Thread waits in userland queue whenever possible, when thread
272d1078b0bSDavid Xu  * is signaled or broadcasted, it is removed from the queue, and
273d1078b0bSDavid Xu  * is saved in curthread's defer_waiters[] buffer, but won't be
274d1078b0bSDavid Xu  * woken up until mutex is unlocked.
275d1078b0bSDavid Xu  */
276d1078b0bSDavid Xu 
277d1078b0bSDavid Xu static int
278d1078b0bSDavid Xu cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
279d1078b0bSDavid Xu     const struct timespec *abstime, int cancel)
280d1078b0bSDavid Xu {
2812a339d9eSKonstantin Belousov 	struct pthread *curthread;
282d1078b0bSDavid Xu 	struct sleepqueue *sq;
2832a339d9eSKonstantin Belousov 	int deferred, error, error2, recurse;
284d1078b0bSDavid Xu 
2852a339d9eSKonstantin Belousov 	curthread = _get_curthread();
286d1078b0bSDavid Xu 	if (curthread->wchan != NULL)
287c72ef5eaSConrad Meyer 		PANIC("thread %p was already on queue.", curthread);
288d1078b0bSDavid Xu 
289d1078b0bSDavid Xu 	if (cancel)
290d1078b0bSDavid Xu 		_thr_testcancel(curthread);
291d1078b0bSDavid Xu 
292d1078b0bSDavid Xu 	_sleepq_lock(cvp);
293d1078b0bSDavid Xu 	/*
294d1078b0bSDavid Xu 	 * set __has_user_waiters before unlocking mutex, this allows
295d1078b0bSDavid Xu 	 * us to check it without locking in pthread_cond_signal().
296d1078b0bSDavid Xu 	 */
297d1078b0bSDavid Xu 	cvp->__has_user_waiters = 1;
2982a339d9eSKonstantin Belousov 	deferred = 0;
2992a339d9eSKonstantin Belousov 	(void)_mutex_cv_unlock(mp, &recurse, &deferred);
300d1078b0bSDavid Xu 	curthread->mutex_obj = mp;
301d1078b0bSDavid Xu 	_sleepq_add(cvp, curthread);
302d1078b0bSDavid Xu 	for(;;) {
303d1078b0bSDavid Xu 		_thr_clear_wake(curthread);
304d1078b0bSDavid Xu 		_sleepq_unlock(cvp);
3052a339d9eSKonstantin Belousov 		if (deferred) {
3062a339d9eSKonstantin Belousov 			deferred = 0;
307e220a13aSDavid Xu 			if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0)
3082a339d9eSKonstantin Belousov 				(void)_umtx_op_err(&mp->m_lock,
3092a339d9eSKonstantin Belousov 				    UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags,
3102a339d9eSKonstantin Belousov 				    0, 0);
311e220a13aSDavid Xu 		}
312e220a13aSDavid Xu 		if (curthread->nwaiter_defer > 0) {
313e220a13aSDavid Xu 			_thr_wake_all(curthread->defer_waiters,
314e220a13aSDavid Xu 			    curthread->nwaiter_defer);
315e220a13aSDavid Xu 			curthread->nwaiter_defer = 0;
316e220a13aSDavid Xu 		}
317d1078b0bSDavid Xu 
3182a339d9eSKonstantin Belousov 		if (cancel)
319d1078b0bSDavid Xu 			_thr_cancel_enter2(curthread, 0);
3206180f50bSKonstantin Belousov 		error = _thr_sleep(curthread, cvp->kcond.c_clockid, abstime);
3212a339d9eSKonstantin Belousov 		if (cancel)
322d1078b0bSDavid Xu 			_thr_cancel_leave(curthread, 0);
323d1078b0bSDavid Xu 
324d1078b0bSDavid Xu 		_sleepq_lock(cvp);
325d1078b0bSDavid Xu 		if (curthread->wchan == NULL) {
326d1078b0bSDavid Xu 			error = 0;
327d1078b0bSDavid Xu 			break;
328d1078b0bSDavid Xu 		} else if (cancel && SHOULD_CANCEL(curthread)) {
329d1078b0bSDavid Xu 			sq = _sleepq_lookup(cvp);
3302a339d9eSKonstantin Belousov 			cvp->__has_user_waiters = _sleepq_remove(sq, curthread);
331d1078b0bSDavid Xu 			_sleepq_unlock(cvp);
332d1078b0bSDavid Xu 			curthread->mutex_obj = NULL;
3332a339d9eSKonstantin Belousov 			error2 = _mutex_cv_lock(mp, recurse, false);
334d1078b0bSDavid Xu 			if (!THR_IN_CRITICAL(curthread))
335d1078b0bSDavid Xu 				_pthread_exit(PTHREAD_CANCELED);
336d1078b0bSDavid Xu 			else /* this should not happen */
3372a339d9eSKonstantin Belousov 				return (error2);
338d1078b0bSDavid Xu 		} else if (error == ETIMEDOUT) {
339d1078b0bSDavid Xu 			sq = _sleepq_lookup(cvp);
340d1078b0bSDavid Xu 			cvp->__has_user_waiters =
341d1078b0bSDavid Xu 			    _sleepq_remove(sq, curthread);
342d1078b0bSDavid Xu 			break;
343d1078b0bSDavid Xu 		}
344d1078b0bSDavid Xu 	}
345d1078b0bSDavid Xu 	_sleepq_unlock(cvp);
346d1078b0bSDavid Xu 	curthread->mutex_obj = NULL;
3472a339d9eSKonstantin Belousov 	error2 = _mutex_cv_lock(mp, recurse, false);
3482a339d9eSKonstantin Belousov 	if (error == 0)
3492a339d9eSKonstantin Belousov 		error = error2;
350d1078b0bSDavid Xu 	return (error);
351d1078b0bSDavid Xu }
352d1078b0bSDavid Xu 
353d1078b0bSDavid Xu static int
354a091d823SDavid Xu cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
355a091d823SDavid Xu 	const struct timespec *abstime, int cancel)
356a091d823SDavid Xu {
357a091d823SDavid Xu 	struct pthread	*curthread = _get_curthread();
358d1078b0bSDavid Xu 	struct pthread_cond *cvp;
359d1078b0bSDavid Xu 	struct pthread_mutex *mp;
360d1078b0bSDavid Xu 	int	error;
361a091d823SDavid Xu 
362bbb64c21SDavid Xu 	CHECK_AND_INIT_COND
363635f917aSDavid Xu 
3641bdbd705SKonstantin Belousov 	if (*mutex == THR_PSHARED_PTR) {
3651bdbd705SKonstantin Belousov 		mp = __thr_pshared_offpage(mutex, 0);
3661bdbd705SKonstantin Belousov 		if (mp == NULL)
3671bdbd705SKonstantin Belousov 			return (EINVAL);
3681bdbd705SKonstantin Belousov 	} else {
369d1078b0bSDavid Xu 		mp = *mutex;
3701bdbd705SKonstantin Belousov 	}
3712bd2c907SDavid Xu 
372d1078b0bSDavid Xu 	if ((error = _mutex_owned(curthread, mp)) != 0)
373d1078b0bSDavid Xu 		return (error);
374a091d823SDavid Xu 
375d1078b0bSDavid Xu 	if (curthread->attr.sched_policy != SCHED_OTHER ||
376d1078b0bSDavid Xu 	    (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT |
3776180f50bSKonstantin Belousov 	    USYNC_PROCESS_SHARED)) != 0 || CV_PSHARED(cvp))
3782a339d9eSKonstantin Belousov 		return (cond_wait_kernel(cvp, mp, abstime, cancel));
379d1078b0bSDavid Xu 	else
3802a339d9eSKonstantin Belousov 		return (cond_wait_user(cvp, mp, abstime, cancel));
381bb535300SJeff Roberson }
382bb535300SJeff Roberson 
383bb535300SJeff Roberson int
3840ab1bfc7SKonstantin Belousov _thr_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
385bb535300SJeff Roberson {
386bb535300SJeff Roberson 
387a091d823SDavid Xu 	return (cond_wait_common(cond, mutex, NULL, 0));
388a091d823SDavid Xu }
389bb535300SJeff Roberson 
390a091d823SDavid Xu int
3910ab1bfc7SKonstantin Belousov __thr_cond_wait(pthread_cond_t * __restrict cond,
392b6413b6dSPedro F. Giffuni     pthread_mutex_t * __restrict mutex)
393a091d823SDavid Xu {
394bb535300SJeff Roberson 
395a091d823SDavid Xu 	return (cond_wait_common(cond, mutex, NULL, 1));
396bb535300SJeff Roberson }
397bb535300SJeff Roberson 
398bb535300SJeff Roberson int
3990ab1bfc7SKonstantin Belousov _thr_cond_timedwait(pthread_cond_t * __restrict cond,
400b6413b6dSPedro F. Giffuni     pthread_mutex_t * __restrict mutex,
401b6413b6dSPedro F. Giffuni     const struct timespec * __restrict abstime)
402bb535300SJeff Roberson {
403a091d823SDavid Xu 
404a091d823SDavid Xu 	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
405a091d823SDavid Xu 	    abstime->tv_nsec >= 1000000000)
406dd3b229eSMike Makonnen 		return (EINVAL);
407dd3b229eSMike Makonnen 
408a091d823SDavid Xu 	return (cond_wait_common(cond, mutex, abstime, 0));
409a091d823SDavid Xu }
410a091d823SDavid Xu 
411a091d823SDavid Xu int
412a091d823SDavid Xu __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
413a091d823SDavid Xu 		       const struct timespec *abstime)
414a091d823SDavid Xu {
415a091d823SDavid Xu 
416a091d823SDavid Xu 	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
417a091d823SDavid Xu 	    abstime->tv_nsec >= 1000000000)
418a091d823SDavid Xu 		return (EINVAL);
419a091d823SDavid Xu 
420a091d823SDavid Xu 	return (cond_wait_common(cond, mutex, abstime, 1));
421dd3b229eSMike Makonnen }
422dd3b229eSMike Makonnen 
423dd3b229eSMike Makonnen static int
424d1078b0bSDavid Xu cond_signal_common(pthread_cond_t *cond)
425dd3b229eSMike Makonnen {
426a091d823SDavid Xu 	struct pthread	*curthread = _get_curthread();
427d1078b0bSDavid Xu 	struct pthread *td;
428d1078b0bSDavid Xu 	struct pthread_cond *cvp;
429d1078b0bSDavid Xu 	struct pthread_mutex *mp;
430d1078b0bSDavid Xu 	struct sleepqueue *sq;
431d1078b0bSDavid Xu 	int	*waddr;
432d1078b0bSDavid Xu 	int	pshared;
433bb535300SJeff Roberson 
434bb535300SJeff Roberson 	/*
435bb535300SJeff Roberson 	 * If the condition variable is statically initialized, perform dynamic
436bb535300SJeff Roberson 	 * initialization.
437bb535300SJeff Roberson 	 */
438bbb64c21SDavid Xu 	CHECK_AND_INIT_COND
439bb535300SJeff Roberson 
440d1078b0bSDavid Xu 	pshared = CV_PSHARED(cvp);
441d1078b0bSDavid Xu 
4426180f50bSKonstantin Belousov 	_thr_ucond_signal(&cvp->kcond);
443d1078b0bSDavid Xu 
444d1078b0bSDavid Xu 	if (pshared || cvp->__has_user_waiters == 0)
445d1078b0bSDavid Xu 		return (0);
446d1078b0bSDavid Xu 
447d1078b0bSDavid Xu 	curthread = _get_curthread();
448d1078b0bSDavid Xu 	waddr = NULL;
449d1078b0bSDavid Xu 	_sleepq_lock(cvp);
450d1078b0bSDavid Xu 	sq = _sleepq_lookup(cvp);
451d1078b0bSDavid Xu 	if (sq == NULL) {
452d1078b0bSDavid Xu 		_sleepq_unlock(cvp);
453d1078b0bSDavid Xu 		return (0);
454d1078b0bSDavid Xu 	}
455d1078b0bSDavid Xu 
456d1078b0bSDavid Xu 	td = _sleepq_first(sq);
457d1078b0bSDavid Xu 	mp = td->mutex_obj;
458d1078b0bSDavid Xu 	cvp->__has_user_waiters = _sleepq_remove(sq, td);
4592a339d9eSKonstantin Belousov 	if (PMUTEX_OWNER_ID(mp) == TID(curthread)) {
460d1078b0bSDavid Xu 		if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
461d1078b0bSDavid Xu 			_thr_wake_all(curthread->defer_waiters,
462d1078b0bSDavid Xu 			    curthread->nwaiter_defer);
463d1078b0bSDavid Xu 			curthread->nwaiter_defer = 0;
464d1078b0bSDavid Xu 		}
465d1078b0bSDavid Xu 		curthread->defer_waiters[curthread->nwaiter_defer++] =
466d1078b0bSDavid Xu 		    &td->wake_addr->value;
4672a339d9eSKonstantin Belousov 		mp->m_flags |= PMUTEX_FLAG_DEFERRED;
468d1078b0bSDavid Xu 	} else {
469d1078b0bSDavid Xu 		waddr = &td->wake_addr->value;
470d1078b0bSDavid Xu 	}
471d1078b0bSDavid Xu 	_sleepq_unlock(cvp);
472d1078b0bSDavid Xu 	if (waddr != NULL)
473d1078b0bSDavid Xu 		_thr_set_wake(waddr);
474d1078b0bSDavid Xu 	return (0);
475d1078b0bSDavid Xu }
476d1078b0bSDavid Xu 
477d1078b0bSDavid Xu struct broadcast_arg {
478d1078b0bSDavid Xu 	struct pthread *curthread;
479d1078b0bSDavid Xu 	unsigned int *waddrs[MAX_DEFER_WAITERS];
480d1078b0bSDavid Xu 	int count;
481d1078b0bSDavid Xu };
482d1078b0bSDavid Xu 
483d1078b0bSDavid Xu static void
484d1078b0bSDavid Xu drop_cb(struct pthread *td, void *arg)
485d1078b0bSDavid Xu {
486d1078b0bSDavid Xu 	struct broadcast_arg *ba = arg;
487d1078b0bSDavid Xu 	struct pthread_mutex *mp;
488d1078b0bSDavid Xu 	struct pthread *curthread = ba->curthread;
489d1078b0bSDavid Xu 
490d1078b0bSDavid Xu 	mp = td->mutex_obj;
4912a339d9eSKonstantin Belousov 	if (PMUTEX_OWNER_ID(mp) == TID(curthread)) {
492d1078b0bSDavid Xu 		if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
493d1078b0bSDavid Xu 			_thr_wake_all(curthread->defer_waiters,
494d1078b0bSDavid Xu 			    curthread->nwaiter_defer);
495d1078b0bSDavid Xu 			curthread->nwaiter_defer = 0;
496d1078b0bSDavid Xu 		}
497d1078b0bSDavid Xu 		curthread->defer_waiters[curthread->nwaiter_defer++] =
498d1078b0bSDavid Xu 		    &td->wake_addr->value;
4992a339d9eSKonstantin Belousov 		mp->m_flags |= PMUTEX_FLAG_DEFERRED;
500d1078b0bSDavid Xu 	} else {
501d1078b0bSDavid Xu 		if (ba->count >= MAX_DEFER_WAITERS) {
502d1078b0bSDavid Xu 			_thr_wake_all(ba->waddrs, ba->count);
503d1078b0bSDavid Xu 			ba->count = 0;
504d1078b0bSDavid Xu 		}
505d1078b0bSDavid Xu 		ba->waddrs[ba->count++] = &td->wake_addr->value;
506d1078b0bSDavid Xu 	}
507d1078b0bSDavid Xu }
508d1078b0bSDavid Xu 
509d1078b0bSDavid Xu static int
510d1078b0bSDavid Xu cond_broadcast_common(pthread_cond_t *cond)
511d1078b0bSDavid Xu {
512d1078b0bSDavid Xu 	int    pshared;
513d1078b0bSDavid Xu 	struct pthread_cond *cvp;
514d1078b0bSDavid Xu 	struct sleepqueue *sq;
515d1078b0bSDavid Xu 	struct broadcast_arg ba;
516d1078b0bSDavid Xu 
517d1078b0bSDavid Xu 	/*
518d1078b0bSDavid Xu 	 * If the condition variable is statically initialized, perform dynamic
519d1078b0bSDavid Xu 	 * initialization.
520d1078b0bSDavid Xu 	 */
521d1078b0bSDavid Xu 	CHECK_AND_INIT_COND
522d1078b0bSDavid Xu 
523d1078b0bSDavid Xu 	pshared = CV_PSHARED(cvp);
524d1078b0bSDavid Xu 
5256180f50bSKonstantin Belousov 	_thr_ucond_broadcast(&cvp->kcond);
526d1078b0bSDavid Xu 
527d1078b0bSDavid Xu 	if (pshared || cvp->__has_user_waiters == 0)
528d1078b0bSDavid Xu 		return (0);
529d1078b0bSDavid Xu 
530d1078b0bSDavid Xu 	ba.curthread = _get_curthread();
531d1078b0bSDavid Xu 	ba.count = 0;
532d1078b0bSDavid Xu 
533d1078b0bSDavid Xu 	_sleepq_lock(cvp);
534d1078b0bSDavid Xu 	sq = _sleepq_lookup(cvp);
535d1078b0bSDavid Xu 	if (sq == NULL) {
536d1078b0bSDavid Xu 		_sleepq_unlock(cvp);
537d1078b0bSDavid Xu 		return (0);
538d1078b0bSDavid Xu 	}
539d1078b0bSDavid Xu 	_sleepq_drop(sq, drop_cb, &ba);
540d1078b0bSDavid Xu 	cvp->__has_user_waiters = 0;
541d1078b0bSDavid Xu 	_sleepq_unlock(cvp);
542d1078b0bSDavid Xu 	if (ba.count > 0)
543d1078b0bSDavid Xu 		_thr_wake_all(ba.waddrs, ba.count);
544d1078b0bSDavid Xu 	return (0);
545bb535300SJeff Roberson }
546bb535300SJeff Roberson 
547bb535300SJeff Roberson int
5480ab1bfc7SKonstantin Belousov _thr_cond_signal(pthread_cond_t * cond)
549bb535300SJeff Roberson {
550a091d823SDavid Xu 
551d1078b0bSDavid Xu 	return (cond_signal_common(cond));
552bb535300SJeff Roberson }
553bb535300SJeff Roberson 
554bb535300SJeff Roberson int
5550ab1bfc7SKonstantin Belousov _thr_cond_broadcast(pthread_cond_t * cond)
556bb535300SJeff Roberson {
557a091d823SDavid Xu 
558d1078b0bSDavid Xu 	return (cond_broadcast_common(cond));
559a224a391SMike Makonnen }
560