15e53a4f9SPedro F. Giffuni /*-
24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause
35e53a4f9SPedro F. Giffuni *
4a091d823SDavid Xu * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
51bdbd705SKonstantin Belousov * Copyright (c) 2015 The FreeBSD Foundation
6bb535300SJeff Roberson * All rights reserved.
7bb535300SJeff Roberson *
81bdbd705SKonstantin Belousov * Portions of this software were developed by Konstantin Belousov
91bdbd705SKonstantin Belousov * under sponsorship from the FreeBSD Foundation.
101bdbd705SKonstantin Belousov *
11bb535300SJeff Roberson * Redistribution and use in source and binary forms, with or without
12bb535300SJeff Roberson * modification, are permitted provided that the following conditions
13bb535300SJeff Roberson * are met:
14bb535300SJeff Roberson * 1. Redistributions of source code must retain the above copyright
15a091d823SDavid Xu * notice unmodified, this list of conditions, and the following
16a091d823SDavid Xu * disclaimer.
17bb535300SJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright
18bb535300SJeff Roberson * notice, this list of conditions and the following disclaimer in the
19bb535300SJeff Roberson * documentation and/or other materials provided with the distribution.
20bb535300SJeff Roberson *
21a091d823SDavid Xu * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22a091d823SDavid Xu * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23a091d823SDavid Xu * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24a091d823SDavid Xu * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25a091d823SDavid Xu * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26a091d823SDavid Xu * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27a091d823SDavid Xu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28a091d823SDavid Xu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29a091d823SDavid Xu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30a091d823SDavid Xu * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31bb535300SJeff Roberson */
32a091d823SDavid Xu
3337a6356bSDavid Xu #include "namespace.h"
34bb535300SJeff Roberson #include <stdlib.h>
35bb535300SJeff Roberson #include <errno.h>
36bb535300SJeff Roberson #include <string.h>
37bb535300SJeff Roberson #include <pthread.h>
38a091d823SDavid Xu #include <limits.h>
3937a6356bSDavid Xu #include "un-namespace.h"
40bb535300SJeff Roberson
41a091d823SDavid Xu #include "thr_private.h"
4241f2bd85SMike Makonnen
43c7904405SAndrew Turner _Static_assert(sizeof(struct pthread_cond) <= THR_PAGE_SIZE_MIN,
449e821f27SKonstantin Belousov "pthread_cond too large");
459e821f27SKonstantin Belousov
4641f2bd85SMike Makonnen /*
47bb535300SJeff Roberson * Prototypes
48bb535300SJeff Roberson */
4937a6356bSDavid Xu int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
5037a6356bSDavid Xu const struct timespec * abstime);
51a091d823SDavid Xu static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr);
52a091d823SDavid Xu static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
53a091d823SDavid Xu const struct timespec *abstime, int cancel);
54d1078b0bSDavid Xu static int cond_signal_common(pthread_cond_t *cond);
55d1078b0bSDavid Xu static int cond_broadcast_common(pthread_cond_t *cond);
56a091d823SDavid Xu
57a091d823SDavid Xu /*
58a091d823SDavid Xu * Double underscore versions are cancellation points. Single underscore
59a091d823SDavid Xu * versions are not and are provided for libc internal usage (which
60a091d823SDavid Xu * shouldn't introduce cancellation points).
61a091d823SDavid Xu */
620ab1bfc7SKonstantin Belousov __weak_reference(__thr_cond_wait, pthread_cond_wait);
630ab1bfc7SKonstantin Belousov __weak_reference(__thr_cond_wait, __pthread_cond_wait);
640ab1bfc7SKonstantin Belousov __weak_reference(_thr_cond_wait, _pthread_cond_wait);
65a091d823SDavid Xu __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
66*8f86c108SBrooks Davis __weak_reference(_thr_cond_timedwait, _pthread_cond_timedwait);
670ab1bfc7SKonstantin Belousov __weak_reference(_thr_cond_init, pthread_cond_init);
680ab1bfc7SKonstantin Belousov __weak_reference(_thr_cond_init, _pthread_cond_init);
690ab1bfc7SKonstantin Belousov __weak_reference(_thr_cond_destroy, pthread_cond_destroy);
700ab1bfc7SKonstantin Belousov __weak_reference(_thr_cond_destroy, _pthread_cond_destroy);
710ab1bfc7SKonstantin Belousov __weak_reference(_thr_cond_signal, pthread_cond_signal);
720ab1bfc7SKonstantin Belousov __weak_reference(_thr_cond_signal, _pthread_cond_signal);
730ab1bfc7SKonstantin Belousov __weak_reference(_thr_cond_broadcast, pthread_cond_broadcast);
740ab1bfc7SKonstantin Belousov __weak_reference(_thr_cond_broadcast, _pthread_cond_broadcast);
75bb535300SJeff Roberson
766180f50bSKonstantin Belousov #define CV_PSHARED(cvp) (((cvp)->kcond.c_flags & USYNC_PROCESS_SHARED) != 0)
77d1078b0bSDavid Xu
781bdbd705SKonstantin Belousov static void
cond_init_body(struct pthread_cond * cvp,const struct pthread_cond_attr * cattr)791bdbd705SKonstantin Belousov cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr)
801bdbd705SKonstantin Belousov {
811bdbd705SKonstantin Belousov
821bdbd705SKonstantin Belousov if (cattr == NULL) {
836180f50bSKonstantin Belousov cvp->kcond.c_clockid = CLOCK_REALTIME;
841bdbd705SKonstantin Belousov } else {
851bdbd705SKonstantin Belousov if (cattr->c_pshared)
866180f50bSKonstantin Belousov cvp->kcond.c_flags |= USYNC_PROCESS_SHARED;
876180f50bSKonstantin Belousov cvp->kcond.c_clockid = cattr->c_clockid;
881bdbd705SKonstantin Belousov }
891bdbd705SKonstantin Belousov }
901bdbd705SKonstantin Belousov
91a091d823SDavid Xu static int
cond_init(pthread_cond_t * cond,const pthread_condattr_t * cond_attr)92a091d823SDavid Xu cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
93bb535300SJeff Roberson {
94d1078b0bSDavid Xu struct pthread_cond *cvp;
951bdbd705SKonstantin Belousov const struct pthread_cond_attr *cattr;
961bdbd705SKonstantin Belousov int pshared;
97a091d823SDavid Xu
981bdbd705SKonstantin Belousov cattr = cond_attr != NULL ? *cond_attr : NULL;
991bdbd705SKonstantin Belousov if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) {
1001bdbd705SKonstantin Belousov pshared = 0;
1011bdbd705SKonstantin Belousov cvp = calloc(1, sizeof(struct pthread_cond));
1021bdbd705SKonstantin Belousov if (cvp == NULL)
1031bdbd705SKonstantin Belousov return (ENOMEM);
104a091d823SDavid Xu } else {
1051bdbd705SKonstantin Belousov pshared = 1;
1061bdbd705SKonstantin Belousov cvp = __thr_pshared_offpage(cond, 1);
1071bdbd705SKonstantin Belousov if (cvp == NULL)
1081bdbd705SKonstantin Belousov return (EFAULT);
1091bdbd705SKonstantin Belousov }
1101bdbd705SKonstantin Belousov
111a091d823SDavid Xu /*
112a091d823SDavid Xu * Initialise the condition variable structure:
113a091d823SDavid Xu */
1141bdbd705SKonstantin Belousov cond_init_body(cvp, cattr);
1151bdbd705SKonstantin Belousov *cond = pshared ? THR_PSHARED_PTR : cvp;
1161bdbd705SKonstantin Belousov return (0);
117a091d823SDavid Xu }
118a091d823SDavid Xu
119a091d823SDavid Xu static int
init_static(struct pthread * thread,pthread_cond_t * cond)120a091d823SDavid Xu init_static(struct pthread *thread, pthread_cond_t *cond)
121a091d823SDavid Xu {
122a091d823SDavid Xu int ret;
123a091d823SDavid Xu
124a091d823SDavid Xu THR_LOCK_ACQUIRE(thread, &_cond_static_lock);
125bb535300SJeff Roberson
126bb535300SJeff Roberson if (*cond == NULL)
127a091d823SDavid Xu ret = cond_init(cond, NULL);
128a091d823SDavid Xu else
129a091d823SDavid Xu ret = 0;
130bb535300SJeff Roberson
131a091d823SDavid Xu THR_LOCK_RELEASE(thread, &_cond_static_lock);
132bb535300SJeff Roberson
133a091d823SDavid Xu return (ret);
134bb535300SJeff Roberson }
135bb535300SJeff Roberson
136bbb64c21SDavid Xu #define CHECK_AND_INIT_COND \
1371bdbd705SKonstantin Belousov if (*cond == THR_PSHARED_PTR) { \
1381bdbd705SKonstantin Belousov cvp = __thr_pshared_offpage(cond, 0); \
1391bdbd705SKonstantin Belousov if (cvp == NULL) \
1401bdbd705SKonstantin Belousov return (EINVAL); \
1411bdbd705SKonstantin Belousov } else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \
142d1078b0bSDavid Xu if (cvp == THR_COND_INITIALIZER) { \
143bbb64c21SDavid Xu int ret; \
144bbb64c21SDavid Xu ret = init_static(_get_curthread(), cond); \
145bbb64c21SDavid Xu if (ret) \
146bbb64c21SDavid Xu return (ret); \
147d1078b0bSDavid Xu } else if (cvp == THR_COND_DESTROYED) { \
148bbb64c21SDavid Xu return (EINVAL); \
149bbb64c21SDavid Xu } \
150d1078b0bSDavid Xu cvp = *cond; \
151bbb64c21SDavid Xu }
152bbb64c21SDavid Xu
153bb535300SJeff Roberson int
_thr_cond_init(pthread_cond_t * __restrict cond,const pthread_condattr_t * __restrict cond_attr)1540ab1bfc7SKonstantin Belousov _thr_cond_init(pthread_cond_t * __restrict cond,
155b6413b6dSPedro F. Giffuni const pthread_condattr_t * __restrict cond_attr)
156bb535300SJeff Roberson {
157bb535300SJeff Roberson
158a091d823SDavid Xu *cond = NULL;
159a091d823SDavid Xu return (cond_init(cond, cond_attr));
160bb535300SJeff Roberson }
161bb535300SJeff Roberson
162bb535300SJeff Roberson int
_thr_cond_destroy(pthread_cond_t * cond)1630ab1bfc7SKonstantin Belousov _thr_cond_destroy(pthread_cond_t *cond)
164bb535300SJeff Roberson {
165d1078b0bSDavid Xu struct pthread_cond *cvp;
1661bdbd705SKonstantin Belousov int error;
167bb535300SJeff Roberson
168d1078b0bSDavid Xu error = 0;
1691bdbd705SKonstantin Belousov if (*cond == THR_PSHARED_PTR) {
1701bdbd705SKonstantin Belousov cvp = __thr_pshared_offpage(cond, 0);
171b16150eaSMark Johnston if (cvp != NULL) {
172b16150eaSMark Johnston if (cvp->kcond.c_has_waiters)
173b16150eaSMark Johnston error = EBUSY;
174b16150eaSMark Johnston else
1751bdbd705SKonstantin Belousov __thr_pshared_destroy(cond);
176b16150eaSMark Johnston }
177b16150eaSMark Johnston if (error == 0)
1781bdbd705SKonstantin Belousov *cond = THR_COND_DESTROYED;
1791bdbd705SKonstantin Belousov } else if ((cvp = *cond) == THR_COND_INITIALIZER) {
1801bdbd705SKonstantin Belousov /* nothing */
1811bdbd705SKonstantin Belousov } else if (cvp == THR_COND_DESTROYED) {
182d1078b0bSDavid Xu error = EINVAL;
1831bdbd705SKonstantin Belousov } else {
184d1078b0bSDavid Xu cvp = *cond;
185b16150eaSMark Johnston if (cvp->__has_user_waiters || cvp->kcond.c_has_waiters)
186b16150eaSMark Johnston error = EBUSY;
187b16150eaSMark Johnston else {
188bbb64c21SDavid Xu *cond = THR_COND_DESTROYED;
189d1078b0bSDavid Xu free(cvp);
190a091d823SDavid Xu }
191b16150eaSMark Johnston }
192d1078b0bSDavid Xu return (error);
193a091d823SDavid Xu }
194a091d823SDavid Xu
195635f917aSDavid Xu /*
196585bf8aeSRui Paulo * Cancellation behavior:
197635f917aSDavid Xu * Thread may be canceled at start, if thread is canceled, it means it
198635f917aSDavid Xu * did not get a wakeup from pthread_cond_signal(), otherwise, it is
199635f917aSDavid Xu * not canceled.
200635f917aSDavid Xu * Thread cancellation never cause wakeup from pthread_cond_signal()
201635f917aSDavid Xu * to be lost.
202635f917aSDavid Xu */
203a091d823SDavid Xu static int
cond_wait_kernel(struct pthread_cond * cvp,struct pthread_mutex * mp,const struct timespec * abstime,int cancel)204d1078b0bSDavid Xu cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp,
205d1078b0bSDavid Xu const struct timespec *abstime, int cancel)
206d1078b0bSDavid Xu {
2072a339d9eSKonstantin Belousov struct pthread *curthread;
2082a339d9eSKonstantin Belousov int error, error2, recurse, robust;
2092a339d9eSKonstantin Belousov
2102a339d9eSKonstantin Belousov curthread = _get_curthread();
2112a339d9eSKonstantin Belousov robust = _mutex_enter_robust(curthread, mp);
212d1078b0bSDavid Xu
213d1078b0bSDavid Xu error = _mutex_cv_detach(mp, &recurse);
2142a339d9eSKonstantin Belousov if (error != 0) {
2152a339d9eSKonstantin Belousov if (robust)
2162a339d9eSKonstantin Belousov _mutex_leave_robust(curthread, mp);
217d1078b0bSDavid Xu return (error);
2182a339d9eSKonstantin Belousov }
219d1078b0bSDavid Xu
2202a339d9eSKonstantin Belousov if (cancel)
221d1078b0bSDavid Xu _thr_cancel_enter2(curthread, 0);
2226180f50bSKonstantin Belousov error = _thr_ucond_wait(&cvp->kcond, &mp->m_lock, abstime,
2236180f50bSKonstantin Belousov CVWAIT_ABSTIME | CVWAIT_CLOCKID);
2242a339d9eSKonstantin Belousov if (cancel)
225d1078b0bSDavid Xu _thr_cancel_leave(curthread, 0);
226d1078b0bSDavid Xu
227d1078b0bSDavid Xu /*
228d1078b0bSDavid Xu * Note that PP mutex and ROBUST mutex may return
229d1078b0bSDavid Xu * interesting error codes.
230d1078b0bSDavid Xu */
231d1078b0bSDavid Xu if (error == 0) {
2322a339d9eSKonstantin Belousov error2 = _mutex_cv_lock(mp, recurse, true);
233d1078b0bSDavid Xu } else if (error == EINTR || error == ETIMEDOUT) {
2342a339d9eSKonstantin Belousov error2 = _mutex_cv_lock(mp, recurse, true);
2352a339d9eSKonstantin Belousov /*
2362a339d9eSKonstantin Belousov * Do not do cancellation on EOWNERDEAD there. The
2372a339d9eSKonstantin Belousov * cancellation cleanup handler will use the protected
2382a339d9eSKonstantin Belousov * state and unlock the mutex without making the state
2392a339d9eSKonstantin Belousov * consistent and the state will be unrecoverable.
2402a339d9eSKonstantin Belousov */
2414dafad49SKonstantin Belousov if (error2 == 0 && cancel) {
2424dafad49SKonstantin Belousov if (robust) {
2434dafad49SKonstantin Belousov _mutex_leave_robust(curthread, mp);
2444dafad49SKonstantin Belousov robust = false;
2454dafad49SKonstantin Belousov }
246d1078b0bSDavid Xu _thr_testcancel(curthread);
2474dafad49SKonstantin Belousov }
2482a339d9eSKonstantin Belousov
249d1078b0bSDavid Xu if (error == EINTR)
250d1078b0bSDavid Xu error = 0;
251d1078b0bSDavid Xu } else {
252d1078b0bSDavid Xu /* We know that it didn't unlock the mutex. */
2532a339d9eSKonstantin Belousov _mutex_cv_attach(mp, recurse);
2544dafad49SKonstantin Belousov if (cancel) {
2554dafad49SKonstantin Belousov if (robust) {
2564dafad49SKonstantin Belousov _mutex_leave_robust(curthread, mp);
2574dafad49SKonstantin Belousov robust = false;
2584dafad49SKonstantin Belousov }
259d1078b0bSDavid Xu _thr_testcancel(curthread);
2604dafad49SKonstantin Belousov }
2612a339d9eSKonstantin Belousov error2 = 0;
262d1078b0bSDavid Xu }
2632a339d9eSKonstantin Belousov if (robust)
2642a339d9eSKonstantin Belousov _mutex_leave_robust(curthread, mp);
265d1078b0bSDavid Xu return (error2 != 0 ? error2 : error);
266d1078b0bSDavid Xu }
267d1078b0bSDavid Xu
268d1078b0bSDavid Xu /*
269d1078b0bSDavid Xu * Thread waits in userland queue whenever possible, when thread
270d1078b0bSDavid Xu * is signaled or broadcasted, it is removed from the queue, and
271d1078b0bSDavid Xu * is saved in curthread's defer_waiters[] buffer, but won't be
272d1078b0bSDavid Xu * woken up until mutex is unlocked.
273d1078b0bSDavid Xu */
274d1078b0bSDavid Xu
275d1078b0bSDavid Xu static int
cond_wait_user(struct pthread_cond * cvp,struct pthread_mutex * mp,const struct timespec * abstime,int cancel)276d1078b0bSDavid Xu cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
277d1078b0bSDavid Xu const struct timespec *abstime, int cancel)
278d1078b0bSDavid Xu {
2792a339d9eSKonstantin Belousov struct pthread *curthread;
280d1078b0bSDavid Xu struct sleepqueue *sq;
2812a339d9eSKonstantin Belousov int deferred, error, error2, recurse;
282d1078b0bSDavid Xu
2832a339d9eSKonstantin Belousov curthread = _get_curthread();
284d1078b0bSDavid Xu if (curthread->wchan != NULL)
285c72ef5eaSConrad Meyer PANIC("thread %p was already on queue.", curthread);
286d1078b0bSDavid Xu
287d1078b0bSDavid Xu if (cancel)
288d1078b0bSDavid Xu _thr_testcancel(curthread);
289d1078b0bSDavid Xu
290d1078b0bSDavid Xu _sleepq_lock(cvp);
291d1078b0bSDavid Xu /*
292d1078b0bSDavid Xu * set __has_user_waiters before unlocking mutex, this allows
293d1078b0bSDavid Xu * us to check it without locking in pthread_cond_signal().
294d1078b0bSDavid Xu */
295d1078b0bSDavid Xu cvp->__has_user_waiters = 1;
2962a339d9eSKonstantin Belousov deferred = 0;
2972a339d9eSKonstantin Belousov (void)_mutex_cv_unlock(mp, &recurse, &deferred);
298d1078b0bSDavid Xu curthread->mutex_obj = mp;
299d1078b0bSDavid Xu _sleepq_add(cvp, curthread);
300d1078b0bSDavid Xu for(;;) {
301d1078b0bSDavid Xu _thr_clear_wake(curthread);
302d1078b0bSDavid Xu _sleepq_unlock(cvp);
3032a339d9eSKonstantin Belousov if (deferred) {
3042a339d9eSKonstantin Belousov deferred = 0;
305e220a13aSDavid Xu if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0)
3062a339d9eSKonstantin Belousov (void)_umtx_op_err(&mp->m_lock,
3072a339d9eSKonstantin Belousov UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags,
3082a339d9eSKonstantin Belousov 0, 0);
309e220a13aSDavid Xu }
310e220a13aSDavid Xu if (curthread->nwaiter_defer > 0) {
311e220a13aSDavid Xu _thr_wake_all(curthread->defer_waiters,
312e220a13aSDavid Xu curthread->nwaiter_defer);
313e220a13aSDavid Xu curthread->nwaiter_defer = 0;
314e220a13aSDavid Xu }
315d1078b0bSDavid Xu
3162a339d9eSKonstantin Belousov if (cancel)
317d1078b0bSDavid Xu _thr_cancel_enter2(curthread, 0);
3186180f50bSKonstantin Belousov error = _thr_sleep(curthread, cvp->kcond.c_clockid, abstime);
3192a339d9eSKonstantin Belousov if (cancel)
320d1078b0bSDavid Xu _thr_cancel_leave(curthread, 0);
321d1078b0bSDavid Xu
322d1078b0bSDavid Xu _sleepq_lock(cvp);
323d1078b0bSDavid Xu if (curthread->wchan == NULL) {
324d1078b0bSDavid Xu error = 0;
325d1078b0bSDavid Xu break;
326d1078b0bSDavid Xu } else if (cancel && SHOULD_CANCEL(curthread)) {
327d1078b0bSDavid Xu sq = _sleepq_lookup(cvp);
3282a339d9eSKonstantin Belousov cvp->__has_user_waiters = _sleepq_remove(sq, curthread);
329d1078b0bSDavid Xu _sleepq_unlock(cvp);
330d1078b0bSDavid Xu curthread->mutex_obj = NULL;
3312a339d9eSKonstantin Belousov error2 = _mutex_cv_lock(mp, recurse, false);
332d1078b0bSDavid Xu if (!THR_IN_CRITICAL(curthread))
333d1078b0bSDavid Xu _pthread_exit(PTHREAD_CANCELED);
334d1078b0bSDavid Xu else /* this should not happen */
3352a339d9eSKonstantin Belousov return (error2);
336d1078b0bSDavid Xu } else if (error == ETIMEDOUT) {
337d1078b0bSDavid Xu sq = _sleepq_lookup(cvp);
338d1078b0bSDavid Xu cvp->__has_user_waiters =
339d1078b0bSDavid Xu _sleepq_remove(sq, curthread);
340d1078b0bSDavid Xu break;
341d1078b0bSDavid Xu }
342d1078b0bSDavid Xu }
343d1078b0bSDavid Xu _sleepq_unlock(cvp);
344d1078b0bSDavid Xu curthread->mutex_obj = NULL;
3452a339d9eSKonstantin Belousov error2 = _mutex_cv_lock(mp, recurse, false);
3462a339d9eSKonstantin Belousov if (error == 0)
3472a339d9eSKonstantin Belousov error = error2;
348d1078b0bSDavid Xu return (error);
349d1078b0bSDavid Xu }
350d1078b0bSDavid Xu
351d1078b0bSDavid Xu static int
cond_wait_common(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime,int cancel)352a091d823SDavid Xu cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
353a091d823SDavid Xu const struct timespec *abstime, int cancel)
354a091d823SDavid Xu {
355a091d823SDavid Xu struct pthread *curthread = _get_curthread();
356d1078b0bSDavid Xu struct pthread_cond *cvp;
357d1078b0bSDavid Xu struct pthread_mutex *mp;
358d1078b0bSDavid Xu int error;
359a091d823SDavid Xu
360bbb64c21SDavid Xu CHECK_AND_INIT_COND
361635f917aSDavid Xu
3621bdbd705SKonstantin Belousov if (*mutex == THR_PSHARED_PTR) {
3631bdbd705SKonstantin Belousov mp = __thr_pshared_offpage(mutex, 0);
3641bdbd705SKonstantin Belousov if (mp == NULL)
3651bdbd705SKonstantin Belousov return (EINVAL);
3661bdbd705SKonstantin Belousov } else {
367d1078b0bSDavid Xu mp = *mutex;
3681bdbd705SKonstantin Belousov }
3692bd2c907SDavid Xu
370d1078b0bSDavid Xu if ((error = _mutex_owned(curthread, mp)) != 0)
371d1078b0bSDavid Xu return (error);
372a091d823SDavid Xu
373d1078b0bSDavid Xu if (curthread->attr.sched_policy != SCHED_OTHER ||
374d1078b0bSDavid Xu (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT |
3756180f50bSKonstantin Belousov USYNC_PROCESS_SHARED)) != 0 || CV_PSHARED(cvp))
3762a339d9eSKonstantin Belousov return (cond_wait_kernel(cvp, mp, abstime, cancel));
377d1078b0bSDavid Xu else
3782a339d9eSKonstantin Belousov return (cond_wait_user(cvp, mp, abstime, cancel));
379bb535300SJeff Roberson }
380bb535300SJeff Roberson
381bb535300SJeff Roberson int
_thr_cond_wait(pthread_cond_t * cond,pthread_mutex_t * mutex)3820ab1bfc7SKonstantin Belousov _thr_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
383bb535300SJeff Roberson {
384bb535300SJeff Roberson
385a091d823SDavid Xu return (cond_wait_common(cond, mutex, NULL, 0));
386a091d823SDavid Xu }
387bb535300SJeff Roberson
388a091d823SDavid Xu int
__thr_cond_wait(pthread_cond_t * __restrict cond,pthread_mutex_t * __restrict mutex)3890ab1bfc7SKonstantin Belousov __thr_cond_wait(pthread_cond_t * __restrict cond,
390b6413b6dSPedro F. Giffuni pthread_mutex_t * __restrict mutex)
391a091d823SDavid Xu {
392bb535300SJeff Roberson
393a091d823SDavid Xu return (cond_wait_common(cond, mutex, NULL, 1));
394bb535300SJeff Roberson }
395bb535300SJeff Roberson
396bb535300SJeff Roberson int
_thr_cond_timedwait(pthread_cond_t * __restrict cond,pthread_mutex_t * __restrict mutex,const struct timespec * __restrict abstime)3970ab1bfc7SKonstantin Belousov _thr_cond_timedwait(pthread_cond_t * __restrict cond,
398b6413b6dSPedro F. Giffuni pthread_mutex_t * __restrict mutex,
399b6413b6dSPedro F. Giffuni const struct timespec * __restrict abstime)
400bb535300SJeff Roberson {
401a091d823SDavid Xu
402a091d823SDavid Xu if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
403a091d823SDavid Xu abstime->tv_nsec >= 1000000000)
404dd3b229eSMike Makonnen return (EINVAL);
405dd3b229eSMike Makonnen
406a091d823SDavid Xu return (cond_wait_common(cond, mutex, abstime, 0));
407a091d823SDavid Xu }
408a091d823SDavid Xu
409a091d823SDavid Xu int
__pthread_cond_timedwait(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime)410a091d823SDavid Xu __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
411a091d823SDavid Xu const struct timespec *abstime)
412a091d823SDavid Xu {
413a091d823SDavid Xu
414a091d823SDavid Xu if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
415a091d823SDavid Xu abstime->tv_nsec >= 1000000000)
416a091d823SDavid Xu return (EINVAL);
417a091d823SDavid Xu
418a091d823SDavid Xu return (cond_wait_common(cond, mutex, abstime, 1));
419dd3b229eSMike Makonnen }
420dd3b229eSMike Makonnen
421dd3b229eSMike Makonnen static int
cond_signal_common(pthread_cond_t * cond)422d1078b0bSDavid Xu cond_signal_common(pthread_cond_t *cond)
423dd3b229eSMike Makonnen {
424a091d823SDavid Xu struct pthread *curthread = _get_curthread();
425d1078b0bSDavid Xu struct pthread *td;
426d1078b0bSDavid Xu struct pthread_cond *cvp;
427d1078b0bSDavid Xu struct pthread_mutex *mp;
428d1078b0bSDavid Xu struct sleepqueue *sq;
429d1078b0bSDavid Xu int *waddr;
430d1078b0bSDavid Xu int pshared;
431bb535300SJeff Roberson
432bb535300SJeff Roberson /*
433bb535300SJeff Roberson * If the condition variable is statically initialized, perform dynamic
434bb535300SJeff Roberson * initialization.
435bb535300SJeff Roberson */
436bbb64c21SDavid Xu CHECK_AND_INIT_COND
437bb535300SJeff Roberson
438d1078b0bSDavid Xu pshared = CV_PSHARED(cvp);
439d1078b0bSDavid Xu
4406180f50bSKonstantin Belousov _thr_ucond_signal(&cvp->kcond);
441d1078b0bSDavid Xu
442d1078b0bSDavid Xu if (pshared || cvp->__has_user_waiters == 0)
443d1078b0bSDavid Xu return (0);
444d1078b0bSDavid Xu
445d1078b0bSDavid Xu curthread = _get_curthread();
446d1078b0bSDavid Xu waddr = NULL;
447d1078b0bSDavid Xu _sleepq_lock(cvp);
448d1078b0bSDavid Xu sq = _sleepq_lookup(cvp);
449d1078b0bSDavid Xu if (sq == NULL) {
450d1078b0bSDavid Xu _sleepq_unlock(cvp);
451d1078b0bSDavid Xu return (0);
452d1078b0bSDavid Xu }
453d1078b0bSDavid Xu
454d1078b0bSDavid Xu td = _sleepq_first(sq);
455d1078b0bSDavid Xu mp = td->mutex_obj;
456d1078b0bSDavid Xu cvp->__has_user_waiters = _sleepq_remove(sq, td);
4572a339d9eSKonstantin Belousov if (PMUTEX_OWNER_ID(mp) == TID(curthread)) {
458d1078b0bSDavid Xu if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
459d1078b0bSDavid Xu _thr_wake_all(curthread->defer_waiters,
460d1078b0bSDavid Xu curthread->nwaiter_defer);
461d1078b0bSDavid Xu curthread->nwaiter_defer = 0;
462d1078b0bSDavid Xu }
463d1078b0bSDavid Xu curthread->defer_waiters[curthread->nwaiter_defer++] =
464d1078b0bSDavid Xu &td->wake_addr->value;
4652a339d9eSKonstantin Belousov mp->m_flags |= PMUTEX_FLAG_DEFERRED;
466d1078b0bSDavid Xu } else {
467d1078b0bSDavid Xu waddr = &td->wake_addr->value;
468d1078b0bSDavid Xu }
469d1078b0bSDavid Xu _sleepq_unlock(cvp);
470d1078b0bSDavid Xu if (waddr != NULL)
471d1078b0bSDavid Xu _thr_set_wake(waddr);
472d1078b0bSDavid Xu return (0);
473d1078b0bSDavid Xu }
474d1078b0bSDavid Xu
475d1078b0bSDavid Xu struct broadcast_arg {
476d1078b0bSDavid Xu struct pthread *curthread;
477d1078b0bSDavid Xu unsigned int *waddrs[MAX_DEFER_WAITERS];
478d1078b0bSDavid Xu int count;
479d1078b0bSDavid Xu };
480d1078b0bSDavid Xu
481d1078b0bSDavid Xu static void
drop_cb(struct pthread * td,void * arg)482d1078b0bSDavid Xu drop_cb(struct pthread *td, void *arg)
483d1078b0bSDavid Xu {
484d1078b0bSDavid Xu struct broadcast_arg *ba = arg;
485d1078b0bSDavid Xu struct pthread_mutex *mp;
486d1078b0bSDavid Xu struct pthread *curthread = ba->curthread;
487d1078b0bSDavid Xu
488d1078b0bSDavid Xu mp = td->mutex_obj;
4892a339d9eSKonstantin Belousov if (PMUTEX_OWNER_ID(mp) == TID(curthread)) {
490d1078b0bSDavid Xu if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
491d1078b0bSDavid Xu _thr_wake_all(curthread->defer_waiters,
492d1078b0bSDavid Xu curthread->nwaiter_defer);
493d1078b0bSDavid Xu curthread->nwaiter_defer = 0;
494d1078b0bSDavid Xu }
495d1078b0bSDavid Xu curthread->defer_waiters[curthread->nwaiter_defer++] =
496d1078b0bSDavid Xu &td->wake_addr->value;
4972a339d9eSKonstantin Belousov mp->m_flags |= PMUTEX_FLAG_DEFERRED;
498d1078b0bSDavid Xu } else {
499d1078b0bSDavid Xu if (ba->count >= MAX_DEFER_WAITERS) {
500d1078b0bSDavid Xu _thr_wake_all(ba->waddrs, ba->count);
501d1078b0bSDavid Xu ba->count = 0;
502d1078b0bSDavid Xu }
503d1078b0bSDavid Xu ba->waddrs[ba->count++] = &td->wake_addr->value;
504d1078b0bSDavid Xu }
505d1078b0bSDavid Xu }
506d1078b0bSDavid Xu
507d1078b0bSDavid Xu static int
cond_broadcast_common(pthread_cond_t * cond)508d1078b0bSDavid Xu cond_broadcast_common(pthread_cond_t *cond)
509d1078b0bSDavid Xu {
510d1078b0bSDavid Xu int pshared;
511d1078b0bSDavid Xu struct pthread_cond *cvp;
512d1078b0bSDavid Xu struct sleepqueue *sq;
513d1078b0bSDavid Xu struct broadcast_arg ba;
514d1078b0bSDavid Xu
515d1078b0bSDavid Xu /*
516d1078b0bSDavid Xu * If the condition variable is statically initialized, perform dynamic
517d1078b0bSDavid Xu * initialization.
518d1078b0bSDavid Xu */
519d1078b0bSDavid Xu CHECK_AND_INIT_COND
520d1078b0bSDavid Xu
521d1078b0bSDavid Xu pshared = CV_PSHARED(cvp);
522d1078b0bSDavid Xu
5236180f50bSKonstantin Belousov _thr_ucond_broadcast(&cvp->kcond);
524d1078b0bSDavid Xu
525d1078b0bSDavid Xu if (pshared || cvp->__has_user_waiters == 0)
526d1078b0bSDavid Xu return (0);
527d1078b0bSDavid Xu
528d1078b0bSDavid Xu ba.curthread = _get_curthread();
529d1078b0bSDavid Xu ba.count = 0;
530d1078b0bSDavid Xu
531d1078b0bSDavid Xu _sleepq_lock(cvp);
532d1078b0bSDavid Xu sq = _sleepq_lookup(cvp);
533d1078b0bSDavid Xu if (sq == NULL) {
534d1078b0bSDavid Xu _sleepq_unlock(cvp);
535d1078b0bSDavid Xu return (0);
536d1078b0bSDavid Xu }
537d1078b0bSDavid Xu _sleepq_drop(sq, drop_cb, &ba);
538d1078b0bSDavid Xu cvp->__has_user_waiters = 0;
539d1078b0bSDavid Xu _sleepq_unlock(cvp);
540d1078b0bSDavid Xu if (ba.count > 0)
541d1078b0bSDavid Xu _thr_wake_all(ba.waddrs, ba.count);
542d1078b0bSDavid Xu return (0);
543bb535300SJeff Roberson }
544bb535300SJeff Roberson
545bb535300SJeff Roberson int
_thr_cond_signal(pthread_cond_t * cond)5460ab1bfc7SKonstantin Belousov _thr_cond_signal(pthread_cond_t * cond)
547bb535300SJeff Roberson {
548a091d823SDavid Xu
549d1078b0bSDavid Xu return (cond_signal_common(cond));
550bb535300SJeff Roberson }
551bb535300SJeff Roberson
552bb535300SJeff Roberson int
_thr_cond_broadcast(pthread_cond_t * cond)5530ab1bfc7SKonstantin Belousov _thr_cond_broadcast(pthread_cond_t * cond)
554bb535300SJeff Roberson {
555a091d823SDavid Xu
556d1078b0bSDavid Xu return (cond_broadcast_common(cond));
557a224a391SMike Makonnen }
558