1bb535300SJeff Roberson /* 2a091d823SDavid Xu * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 31bdbd705SKonstantin Belousov * Copyright (c) 2015 The FreeBSD Foundation 4bb535300SJeff Roberson * All rights reserved. 5bb535300SJeff Roberson * 61bdbd705SKonstantin Belousov * Portions of this software were developed by Konstantin Belousov 71bdbd705SKonstantin Belousov * under sponsorship from the FreeBSD Foundation. 81bdbd705SKonstantin Belousov * 9bb535300SJeff Roberson * Redistribution and use in source and binary forms, with or without 10bb535300SJeff Roberson * modification, are permitted provided that the following conditions 11bb535300SJeff Roberson * are met: 12bb535300SJeff Roberson * 1. Redistributions of source code must retain the above copyright 13a091d823SDavid Xu * notice unmodified, this list of conditions, and the following 14a091d823SDavid Xu * disclaimer. 15bb535300SJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 16bb535300SJeff Roberson * notice, this list of conditions and the following disclaimer in the 17bb535300SJeff Roberson * documentation and/or other materials provided with the distribution. 18bb535300SJeff Roberson * 19a091d823SDavid Xu * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20a091d823SDavid Xu * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21a091d823SDavid Xu * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22a091d823SDavid Xu * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23a091d823SDavid Xu * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24a091d823SDavid Xu * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25a091d823SDavid Xu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26a091d823SDavid Xu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27a091d823SDavid Xu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28a091d823SDavid Xu * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29bb535300SJeff Roberson */ 30a091d823SDavid Xu 3132793011SKonstantin Belousov #include <sys/cdefs.h> 3232793011SKonstantin Belousov __FBSDID("$FreeBSD$"); 3332793011SKonstantin Belousov 3437a6356bSDavid Xu #include "namespace.h" 35bb535300SJeff Roberson #include <stdlib.h> 36bb535300SJeff Roberson #include <errno.h> 37bb535300SJeff Roberson #include <string.h> 38bb535300SJeff Roberson #include <pthread.h> 39a091d823SDavid Xu #include <limits.h> 4037a6356bSDavid Xu #include "un-namespace.h" 41bb535300SJeff Roberson 42a091d823SDavid Xu #include "thr_private.h" 4341f2bd85SMike Makonnen 449e821f27SKonstantin Belousov _Static_assert(sizeof(struct pthread_cond) <= PAGE_SIZE, 459e821f27SKonstantin Belousov "pthread_cond too large"); 469e821f27SKonstantin Belousov 4741f2bd85SMike Makonnen /* 48bb535300SJeff Roberson * Prototypes 49bb535300SJeff Roberson */ 5037a6356bSDavid Xu int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex); 5137a6356bSDavid Xu int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 5237a6356bSDavid Xu const struct timespec * abstime); 53a091d823SDavid Xu static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr); 54a091d823SDavid Xu static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 55a091d823SDavid Xu const struct timespec *abstime, int cancel); 56d1078b0bSDavid Xu static int cond_signal_common(pthread_cond_t *cond); 57d1078b0bSDavid Xu static int cond_broadcast_common(pthread_cond_t *cond); 58a091d823SDavid Xu 59a091d823SDavid Xu /* 60a091d823SDavid Xu * Double underscore versions are cancellation points. Single underscore 61a091d823SDavid Xu * versions are not and are provided for libc internal usage (which 62a091d823SDavid Xu * shouldn't introduce cancellation points). 63a091d823SDavid Xu */ 64a091d823SDavid Xu __weak_reference(__pthread_cond_wait, pthread_cond_wait); 65a091d823SDavid Xu __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait); 66bb535300SJeff Roberson 67bb535300SJeff Roberson __weak_reference(_pthread_cond_init, pthread_cond_init); 68bb535300SJeff Roberson __weak_reference(_pthread_cond_destroy, pthread_cond_destroy); 69bb535300SJeff Roberson __weak_reference(_pthread_cond_signal, pthread_cond_signal); 70bb535300SJeff Roberson __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast); 71bb535300SJeff Roberson 72*6180f50bSKonstantin Belousov #define CV_PSHARED(cvp) (((cvp)->kcond.c_flags & USYNC_PROCESS_SHARED) != 0) 73d1078b0bSDavid Xu 741bdbd705SKonstantin Belousov static void 751bdbd705SKonstantin Belousov cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr) 761bdbd705SKonstantin Belousov { 771bdbd705SKonstantin Belousov 781bdbd705SKonstantin Belousov if (cattr == NULL) { 79*6180f50bSKonstantin Belousov cvp->kcond.c_clockid = CLOCK_REALTIME; 801bdbd705SKonstantin Belousov } else { 811bdbd705SKonstantin Belousov if (cattr->c_pshared) 82*6180f50bSKonstantin Belousov cvp->kcond.c_flags |= USYNC_PROCESS_SHARED; 83*6180f50bSKonstantin Belousov cvp->kcond.c_clockid = cattr->c_clockid; 841bdbd705SKonstantin Belousov } 851bdbd705SKonstantin Belousov } 861bdbd705SKonstantin Belousov 87a091d823SDavid Xu static int 88a091d823SDavid Xu cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 89bb535300SJeff Roberson { 90d1078b0bSDavid Xu struct pthread_cond *cvp; 911bdbd705SKonstantin Belousov const struct pthread_cond_attr *cattr; 921bdbd705SKonstantin Belousov int pshared; 93a091d823SDavid Xu 941bdbd705SKonstantin Belousov cattr = cond_attr != NULL ? *cond_attr : NULL; 951bdbd705SKonstantin Belousov if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) { 961bdbd705SKonstantin Belousov pshared = 0; 971bdbd705SKonstantin Belousov cvp = calloc(1, sizeof(struct pthread_cond)); 981bdbd705SKonstantin Belousov if (cvp == NULL) 991bdbd705SKonstantin Belousov return (ENOMEM); 100a091d823SDavid Xu } else { 1011bdbd705SKonstantin Belousov pshared = 1; 1021bdbd705SKonstantin Belousov cvp = __thr_pshared_offpage(cond, 1); 1031bdbd705SKonstantin Belousov if (cvp == NULL) 1041bdbd705SKonstantin Belousov return (EFAULT); 1051bdbd705SKonstantin Belousov } 1061bdbd705SKonstantin Belousov 107a091d823SDavid Xu /* 108a091d823SDavid Xu * Initialise the condition variable structure: 109a091d823SDavid Xu */ 1101bdbd705SKonstantin Belousov cond_init_body(cvp, cattr); 1111bdbd705SKonstantin Belousov *cond = pshared ? THR_PSHARED_PTR : cvp; 1121bdbd705SKonstantin Belousov return (0); 113a091d823SDavid Xu } 114a091d823SDavid Xu 115a091d823SDavid Xu static int 116a091d823SDavid Xu init_static(struct pthread *thread, pthread_cond_t *cond) 117a091d823SDavid Xu { 118a091d823SDavid Xu int ret; 119a091d823SDavid Xu 120a091d823SDavid Xu THR_LOCK_ACQUIRE(thread, &_cond_static_lock); 121bb535300SJeff Roberson 122bb535300SJeff Roberson if (*cond == NULL) 123a091d823SDavid Xu ret = cond_init(cond, NULL); 124a091d823SDavid Xu else 125a091d823SDavid Xu ret = 0; 126bb535300SJeff Roberson 127a091d823SDavid Xu THR_LOCK_RELEASE(thread, &_cond_static_lock); 128bb535300SJeff Roberson 129a091d823SDavid Xu return (ret); 130bb535300SJeff Roberson } 131bb535300SJeff Roberson 132bbb64c21SDavid Xu #define CHECK_AND_INIT_COND \ 1331bdbd705SKonstantin Belousov if (*cond == THR_PSHARED_PTR) { \ 1341bdbd705SKonstantin Belousov cvp = __thr_pshared_offpage(cond, 0); \ 1351bdbd705SKonstantin Belousov if (cvp == NULL) \ 1361bdbd705SKonstantin Belousov return (EINVAL); \ 1371bdbd705SKonstantin Belousov } else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \ 138d1078b0bSDavid Xu if (cvp == THR_COND_INITIALIZER) { \ 139bbb64c21SDavid Xu int ret; \ 140bbb64c21SDavid Xu ret = init_static(_get_curthread(), cond); \ 141bbb64c21SDavid Xu if (ret) \ 142bbb64c21SDavid Xu return (ret); \ 143d1078b0bSDavid Xu } else if (cvp == THR_COND_DESTROYED) { \ 144bbb64c21SDavid Xu return (EINVAL); \ 145bbb64c21SDavid Xu } \ 146d1078b0bSDavid Xu cvp = *cond; \ 147bbb64c21SDavid Xu } 148bbb64c21SDavid Xu 149bb535300SJeff Roberson int 150bb535300SJeff Roberson _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 151bb535300SJeff Roberson { 152bb535300SJeff Roberson 153a091d823SDavid Xu *cond = NULL; 154a091d823SDavid Xu return (cond_init(cond, cond_attr)); 155bb535300SJeff Roberson } 156bb535300SJeff Roberson 157bb535300SJeff Roberson int 158bb535300SJeff Roberson _pthread_cond_destroy(pthread_cond_t *cond) 159bb535300SJeff Roberson { 160d1078b0bSDavid Xu struct pthread_cond *cvp; 1611bdbd705SKonstantin Belousov int error; 162bb535300SJeff Roberson 163d1078b0bSDavid Xu error = 0; 1641bdbd705SKonstantin Belousov if (*cond == THR_PSHARED_PTR) { 1651bdbd705SKonstantin Belousov cvp = __thr_pshared_offpage(cond, 0); 1661bdbd705SKonstantin Belousov if (cvp != NULL) 1671bdbd705SKonstantin Belousov __thr_pshared_destroy(cond); 1681bdbd705SKonstantin Belousov *cond = THR_COND_DESTROYED; 1691bdbd705SKonstantin Belousov } else if ((cvp = *cond) == THR_COND_INITIALIZER) { 1701bdbd705SKonstantin Belousov /* nothing */ 1711bdbd705SKonstantin Belousov } else if (cvp == THR_COND_DESTROYED) { 172d1078b0bSDavid Xu error = EINVAL; 1731bdbd705SKonstantin Belousov } else { 174d1078b0bSDavid Xu cvp = *cond; 175bbb64c21SDavid Xu *cond = THR_COND_DESTROYED; 176d1078b0bSDavid Xu free(cvp); 177a091d823SDavid Xu } 178d1078b0bSDavid Xu return (error); 179a091d823SDavid Xu } 180a091d823SDavid Xu 181635f917aSDavid Xu /* 182585bf8aeSRui Paulo * Cancellation behavior: 183635f917aSDavid Xu * Thread may be canceled at start, if thread is canceled, it means it 184635f917aSDavid Xu * did not get a wakeup from pthread_cond_signal(), otherwise, it is 185635f917aSDavid Xu * not canceled. 186635f917aSDavid Xu * Thread cancellation never cause wakeup from pthread_cond_signal() 187635f917aSDavid Xu * to be lost. 188635f917aSDavid Xu */ 189a091d823SDavid Xu static int 190d1078b0bSDavid Xu cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp, 191d1078b0bSDavid Xu const struct timespec *abstime, int cancel) 192d1078b0bSDavid Xu { 1932a339d9eSKonstantin Belousov struct pthread *curthread; 1942a339d9eSKonstantin Belousov int error, error2, recurse, robust; 1952a339d9eSKonstantin Belousov 1962a339d9eSKonstantin Belousov curthread = _get_curthread(); 1972a339d9eSKonstantin Belousov robust = _mutex_enter_robust(curthread, mp); 198d1078b0bSDavid Xu 199d1078b0bSDavid Xu error = _mutex_cv_detach(mp, &recurse); 2002a339d9eSKonstantin Belousov if (error != 0) { 2012a339d9eSKonstantin Belousov if (robust) 2022a339d9eSKonstantin Belousov _mutex_leave_robust(curthread, mp); 203d1078b0bSDavid Xu return (error); 2042a339d9eSKonstantin Belousov } 205d1078b0bSDavid Xu 2062a339d9eSKonstantin Belousov if (cancel) 207d1078b0bSDavid Xu _thr_cancel_enter2(curthread, 0); 208*6180f50bSKonstantin Belousov error = _thr_ucond_wait(&cvp->kcond, &mp->m_lock, abstime, 209*6180f50bSKonstantin Belousov CVWAIT_ABSTIME | CVWAIT_CLOCKID); 2102a339d9eSKonstantin Belousov if (cancel) 211d1078b0bSDavid Xu _thr_cancel_leave(curthread, 0); 212d1078b0bSDavid Xu 213d1078b0bSDavid Xu /* 214d1078b0bSDavid Xu * Note that PP mutex and ROBUST mutex may return 215d1078b0bSDavid Xu * interesting error codes. 216d1078b0bSDavid Xu */ 217d1078b0bSDavid Xu if (error == 0) { 2182a339d9eSKonstantin Belousov error2 = _mutex_cv_lock(mp, recurse, true); 219d1078b0bSDavid Xu } else if (error == EINTR || error == ETIMEDOUT) { 2202a339d9eSKonstantin Belousov error2 = _mutex_cv_lock(mp, recurse, true); 2212a339d9eSKonstantin Belousov /* 2222a339d9eSKonstantin Belousov * Do not do cancellation on EOWNERDEAD there. The 2232a339d9eSKonstantin Belousov * cancellation cleanup handler will use the protected 2242a339d9eSKonstantin Belousov * state and unlock the mutex without making the state 2252a339d9eSKonstantin Belousov * consistent and the state will be unrecoverable. 2262a339d9eSKonstantin Belousov */ 227d1078b0bSDavid Xu if (error2 == 0 && cancel) 228d1078b0bSDavid Xu _thr_testcancel(curthread); 2292a339d9eSKonstantin Belousov 230d1078b0bSDavid Xu if (error == EINTR) 231d1078b0bSDavid Xu error = 0; 232d1078b0bSDavid Xu } else { 233d1078b0bSDavid Xu /* We know that it didn't unlock the mutex. */ 2342a339d9eSKonstantin Belousov _mutex_cv_attach(mp, recurse); 2352a339d9eSKonstantin Belousov if (cancel) 236d1078b0bSDavid Xu _thr_testcancel(curthread); 2372a339d9eSKonstantin Belousov error2 = 0; 238d1078b0bSDavid Xu } 2392a339d9eSKonstantin Belousov if (robust) 2402a339d9eSKonstantin Belousov _mutex_leave_robust(curthread, mp); 241d1078b0bSDavid Xu return (error2 != 0 ? error2 : error); 242d1078b0bSDavid Xu } 243d1078b0bSDavid Xu 244d1078b0bSDavid Xu /* 245d1078b0bSDavid Xu * Thread waits in userland queue whenever possible, when thread 246d1078b0bSDavid Xu * is signaled or broadcasted, it is removed from the queue, and 247d1078b0bSDavid Xu * is saved in curthread's defer_waiters[] buffer, but won't be 248d1078b0bSDavid Xu * woken up until mutex is unlocked. 249d1078b0bSDavid Xu */ 250d1078b0bSDavid Xu 251d1078b0bSDavid Xu static int 252d1078b0bSDavid Xu cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp, 253d1078b0bSDavid Xu const struct timespec *abstime, int cancel) 254d1078b0bSDavid Xu { 2552a339d9eSKonstantin Belousov struct pthread *curthread; 256d1078b0bSDavid Xu struct sleepqueue *sq; 2572a339d9eSKonstantin Belousov int deferred, error, error2, recurse; 258d1078b0bSDavid Xu 2592a339d9eSKonstantin Belousov curthread = _get_curthread(); 260d1078b0bSDavid Xu if (curthread->wchan != NULL) 261d1078b0bSDavid Xu PANIC("thread was already on queue."); 262d1078b0bSDavid Xu 263d1078b0bSDavid Xu if (cancel) 264d1078b0bSDavid Xu _thr_testcancel(curthread); 265d1078b0bSDavid Xu 266d1078b0bSDavid Xu _sleepq_lock(cvp); 267d1078b0bSDavid Xu /* 268d1078b0bSDavid Xu * set __has_user_waiters before unlocking mutex, this allows 269d1078b0bSDavid Xu * us to check it without locking in pthread_cond_signal(). 270d1078b0bSDavid Xu */ 271d1078b0bSDavid Xu cvp->__has_user_waiters = 1; 2722a339d9eSKonstantin Belousov deferred = 0; 2732a339d9eSKonstantin Belousov (void)_mutex_cv_unlock(mp, &recurse, &deferred); 274d1078b0bSDavid Xu curthread->mutex_obj = mp; 275d1078b0bSDavid Xu _sleepq_add(cvp, curthread); 276d1078b0bSDavid Xu for(;;) { 277d1078b0bSDavid Xu _thr_clear_wake(curthread); 278d1078b0bSDavid Xu _sleepq_unlock(cvp); 2792a339d9eSKonstantin Belousov if (deferred) { 2802a339d9eSKonstantin Belousov deferred = 0; 281e220a13aSDavid Xu if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0) 2822a339d9eSKonstantin Belousov (void)_umtx_op_err(&mp->m_lock, 2832a339d9eSKonstantin Belousov UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags, 2842a339d9eSKonstantin Belousov 0, 0); 285e220a13aSDavid Xu } 286e220a13aSDavid Xu if (curthread->nwaiter_defer > 0) { 287e220a13aSDavid Xu _thr_wake_all(curthread->defer_waiters, 288e220a13aSDavid Xu curthread->nwaiter_defer); 289e220a13aSDavid Xu curthread->nwaiter_defer = 0; 290e220a13aSDavid Xu } 291d1078b0bSDavid Xu 2922a339d9eSKonstantin Belousov if (cancel) 293d1078b0bSDavid Xu _thr_cancel_enter2(curthread, 0); 294*6180f50bSKonstantin Belousov error = _thr_sleep(curthread, cvp->kcond.c_clockid, abstime); 2952a339d9eSKonstantin Belousov if (cancel) 296d1078b0bSDavid Xu _thr_cancel_leave(curthread, 0); 297d1078b0bSDavid Xu 298d1078b0bSDavid Xu _sleepq_lock(cvp); 299d1078b0bSDavid Xu if (curthread->wchan == NULL) { 300d1078b0bSDavid Xu error = 0; 301d1078b0bSDavid Xu break; 302d1078b0bSDavid Xu } else if (cancel && SHOULD_CANCEL(curthread)) { 303d1078b0bSDavid Xu sq = _sleepq_lookup(cvp); 3042a339d9eSKonstantin Belousov cvp->__has_user_waiters = _sleepq_remove(sq, curthread); 305d1078b0bSDavid Xu _sleepq_unlock(cvp); 306d1078b0bSDavid Xu curthread->mutex_obj = NULL; 3072a339d9eSKonstantin Belousov error2 = _mutex_cv_lock(mp, recurse, false); 308d1078b0bSDavid Xu if (!THR_IN_CRITICAL(curthread)) 309d1078b0bSDavid Xu _pthread_exit(PTHREAD_CANCELED); 310d1078b0bSDavid Xu else /* this should not happen */ 3112a339d9eSKonstantin Belousov return (error2); 312d1078b0bSDavid Xu } else if (error == ETIMEDOUT) { 313d1078b0bSDavid Xu sq = _sleepq_lookup(cvp); 314d1078b0bSDavid Xu cvp->__has_user_waiters = 315d1078b0bSDavid Xu _sleepq_remove(sq, curthread); 316d1078b0bSDavid Xu break; 317d1078b0bSDavid Xu } 318d1078b0bSDavid Xu } 319d1078b0bSDavid Xu _sleepq_unlock(cvp); 320d1078b0bSDavid Xu curthread->mutex_obj = NULL; 3212a339d9eSKonstantin Belousov error2 = _mutex_cv_lock(mp, recurse, false); 3222a339d9eSKonstantin Belousov if (error == 0) 3232a339d9eSKonstantin Belousov error = error2; 324d1078b0bSDavid Xu return (error); 325d1078b0bSDavid Xu } 326d1078b0bSDavid Xu 327d1078b0bSDavid Xu static int 328a091d823SDavid Xu cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 329a091d823SDavid Xu const struct timespec *abstime, int cancel) 330a091d823SDavid Xu { 331a091d823SDavid Xu struct pthread *curthread = _get_curthread(); 332d1078b0bSDavid Xu struct pthread_cond *cvp; 333d1078b0bSDavid Xu struct pthread_mutex *mp; 334d1078b0bSDavid Xu int error; 335a091d823SDavid Xu 336bbb64c21SDavid Xu CHECK_AND_INIT_COND 337635f917aSDavid Xu 3381bdbd705SKonstantin Belousov if (*mutex == THR_PSHARED_PTR) { 3391bdbd705SKonstantin Belousov mp = __thr_pshared_offpage(mutex, 0); 3401bdbd705SKonstantin Belousov if (mp == NULL) 3411bdbd705SKonstantin Belousov return (EINVAL); 3421bdbd705SKonstantin Belousov } else { 343d1078b0bSDavid Xu mp = *mutex; 3441bdbd705SKonstantin Belousov } 3452bd2c907SDavid Xu 346d1078b0bSDavid Xu if ((error = _mutex_owned(curthread, mp)) != 0) 347d1078b0bSDavid Xu return (error); 348a091d823SDavid Xu 349d1078b0bSDavid Xu if (curthread->attr.sched_policy != SCHED_OTHER || 350d1078b0bSDavid Xu (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT | 351*6180f50bSKonstantin Belousov USYNC_PROCESS_SHARED)) != 0 || CV_PSHARED(cvp)) 3522a339d9eSKonstantin Belousov return (cond_wait_kernel(cvp, mp, abstime, cancel)); 353d1078b0bSDavid Xu else 3542a339d9eSKonstantin Belousov return (cond_wait_user(cvp, mp, abstime, cancel)); 355bb535300SJeff Roberson } 356bb535300SJeff Roberson 357bb535300SJeff Roberson int 358bb535300SJeff Roberson _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 359bb535300SJeff Roberson { 360bb535300SJeff Roberson 361a091d823SDavid Xu return (cond_wait_common(cond, mutex, NULL, 0)); 362a091d823SDavid Xu } 363bb535300SJeff Roberson 364a091d823SDavid Xu int 365a091d823SDavid Xu __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 366a091d823SDavid Xu { 367bb535300SJeff Roberson 368a091d823SDavid Xu return (cond_wait_common(cond, mutex, NULL, 1)); 369bb535300SJeff Roberson } 370bb535300SJeff Roberson 371bb535300SJeff Roberson int 372bb535300SJeff Roberson _pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 373bb535300SJeff Roberson const struct timespec * abstime) 374bb535300SJeff Roberson { 375a091d823SDavid Xu 376a091d823SDavid Xu if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 377a091d823SDavid Xu abstime->tv_nsec >= 1000000000) 378dd3b229eSMike Makonnen return (EINVAL); 379dd3b229eSMike Makonnen 380a091d823SDavid Xu return (cond_wait_common(cond, mutex, abstime, 0)); 381a091d823SDavid Xu } 382a091d823SDavid Xu 383a091d823SDavid Xu int 384a091d823SDavid Xu __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 385a091d823SDavid Xu const struct timespec *abstime) 386a091d823SDavid Xu { 387a091d823SDavid Xu 388a091d823SDavid Xu if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 389a091d823SDavid Xu abstime->tv_nsec >= 1000000000) 390a091d823SDavid Xu return (EINVAL); 391a091d823SDavid Xu 392a091d823SDavid Xu return (cond_wait_common(cond, mutex, abstime, 1)); 393dd3b229eSMike Makonnen } 394dd3b229eSMike Makonnen 395dd3b229eSMike Makonnen static int 396d1078b0bSDavid Xu cond_signal_common(pthread_cond_t *cond) 397dd3b229eSMike Makonnen { 398a091d823SDavid Xu struct pthread *curthread = _get_curthread(); 399d1078b0bSDavid Xu struct pthread *td; 400d1078b0bSDavid Xu struct pthread_cond *cvp; 401d1078b0bSDavid Xu struct pthread_mutex *mp; 402d1078b0bSDavid Xu struct sleepqueue *sq; 403d1078b0bSDavid Xu int *waddr; 404d1078b0bSDavid Xu int pshared; 405bb535300SJeff Roberson 406bb535300SJeff Roberson /* 407bb535300SJeff Roberson * If the condition variable is statically initialized, perform dynamic 408bb535300SJeff Roberson * initialization. 409bb535300SJeff Roberson */ 410bbb64c21SDavid Xu CHECK_AND_INIT_COND 411bb535300SJeff Roberson 412d1078b0bSDavid Xu pshared = CV_PSHARED(cvp); 413d1078b0bSDavid Xu 414*6180f50bSKonstantin Belousov _thr_ucond_signal(&cvp->kcond); 415d1078b0bSDavid Xu 416d1078b0bSDavid Xu if (pshared || cvp->__has_user_waiters == 0) 417d1078b0bSDavid Xu return (0); 418d1078b0bSDavid Xu 419d1078b0bSDavid Xu curthread = _get_curthread(); 420d1078b0bSDavid Xu waddr = NULL; 421d1078b0bSDavid Xu _sleepq_lock(cvp); 422d1078b0bSDavid Xu sq = _sleepq_lookup(cvp); 423d1078b0bSDavid Xu if (sq == NULL) { 424d1078b0bSDavid Xu _sleepq_unlock(cvp); 425d1078b0bSDavid Xu return (0); 426d1078b0bSDavid Xu } 427d1078b0bSDavid Xu 428d1078b0bSDavid Xu td = _sleepq_first(sq); 429d1078b0bSDavid Xu mp = td->mutex_obj; 430d1078b0bSDavid Xu cvp->__has_user_waiters = _sleepq_remove(sq, td); 4312a339d9eSKonstantin Belousov if (PMUTEX_OWNER_ID(mp) == TID(curthread)) { 432d1078b0bSDavid Xu if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 433d1078b0bSDavid Xu _thr_wake_all(curthread->defer_waiters, 434d1078b0bSDavid Xu curthread->nwaiter_defer); 435d1078b0bSDavid Xu curthread->nwaiter_defer = 0; 436d1078b0bSDavid Xu } 437d1078b0bSDavid Xu curthread->defer_waiters[curthread->nwaiter_defer++] = 438d1078b0bSDavid Xu &td->wake_addr->value; 4392a339d9eSKonstantin Belousov mp->m_flags |= PMUTEX_FLAG_DEFERRED; 440d1078b0bSDavid Xu } else { 441d1078b0bSDavid Xu waddr = &td->wake_addr->value; 442d1078b0bSDavid Xu } 443d1078b0bSDavid Xu _sleepq_unlock(cvp); 444d1078b0bSDavid Xu if (waddr != NULL) 445d1078b0bSDavid Xu _thr_set_wake(waddr); 446d1078b0bSDavid Xu return (0); 447d1078b0bSDavid Xu } 448d1078b0bSDavid Xu 449d1078b0bSDavid Xu struct broadcast_arg { 450d1078b0bSDavid Xu struct pthread *curthread; 451d1078b0bSDavid Xu unsigned int *waddrs[MAX_DEFER_WAITERS]; 452d1078b0bSDavid Xu int count; 453d1078b0bSDavid Xu }; 454d1078b0bSDavid Xu 455d1078b0bSDavid Xu static void 456d1078b0bSDavid Xu drop_cb(struct pthread *td, void *arg) 457d1078b0bSDavid Xu { 458d1078b0bSDavid Xu struct broadcast_arg *ba = arg; 459d1078b0bSDavid Xu struct pthread_mutex *mp; 460d1078b0bSDavid Xu struct pthread *curthread = ba->curthread; 461d1078b0bSDavid Xu 462d1078b0bSDavid Xu mp = td->mutex_obj; 4632a339d9eSKonstantin Belousov if (PMUTEX_OWNER_ID(mp) == TID(curthread)) { 464d1078b0bSDavid Xu if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 465d1078b0bSDavid Xu _thr_wake_all(curthread->defer_waiters, 466d1078b0bSDavid Xu curthread->nwaiter_defer); 467d1078b0bSDavid Xu curthread->nwaiter_defer = 0; 468d1078b0bSDavid Xu } 469d1078b0bSDavid Xu curthread->defer_waiters[curthread->nwaiter_defer++] = 470d1078b0bSDavid Xu &td->wake_addr->value; 4712a339d9eSKonstantin Belousov mp->m_flags |= PMUTEX_FLAG_DEFERRED; 472d1078b0bSDavid Xu } else { 473d1078b0bSDavid Xu if (ba->count >= MAX_DEFER_WAITERS) { 474d1078b0bSDavid Xu _thr_wake_all(ba->waddrs, ba->count); 475d1078b0bSDavid Xu ba->count = 0; 476d1078b0bSDavid Xu } 477d1078b0bSDavid Xu ba->waddrs[ba->count++] = &td->wake_addr->value; 478d1078b0bSDavid Xu } 479d1078b0bSDavid Xu } 480d1078b0bSDavid Xu 481d1078b0bSDavid Xu static int 482d1078b0bSDavid Xu cond_broadcast_common(pthread_cond_t *cond) 483d1078b0bSDavid Xu { 484d1078b0bSDavid Xu int pshared; 485d1078b0bSDavid Xu struct pthread_cond *cvp; 486d1078b0bSDavid Xu struct sleepqueue *sq; 487d1078b0bSDavid Xu struct broadcast_arg ba; 488d1078b0bSDavid Xu 489d1078b0bSDavid Xu /* 490d1078b0bSDavid Xu * If the condition variable is statically initialized, perform dynamic 491d1078b0bSDavid Xu * initialization. 492d1078b0bSDavid Xu */ 493d1078b0bSDavid Xu CHECK_AND_INIT_COND 494d1078b0bSDavid Xu 495d1078b0bSDavid Xu pshared = CV_PSHARED(cvp); 496d1078b0bSDavid Xu 497*6180f50bSKonstantin Belousov _thr_ucond_broadcast(&cvp->kcond); 498d1078b0bSDavid Xu 499d1078b0bSDavid Xu if (pshared || cvp->__has_user_waiters == 0) 500d1078b0bSDavid Xu return (0); 501d1078b0bSDavid Xu 502d1078b0bSDavid Xu ba.curthread = _get_curthread(); 503d1078b0bSDavid Xu ba.count = 0; 504d1078b0bSDavid Xu 505d1078b0bSDavid Xu _sleepq_lock(cvp); 506d1078b0bSDavid Xu sq = _sleepq_lookup(cvp); 507d1078b0bSDavid Xu if (sq == NULL) { 508d1078b0bSDavid Xu _sleepq_unlock(cvp); 509d1078b0bSDavid Xu return (0); 510d1078b0bSDavid Xu } 511d1078b0bSDavid Xu _sleepq_drop(sq, drop_cb, &ba); 512d1078b0bSDavid Xu cvp->__has_user_waiters = 0; 513d1078b0bSDavid Xu _sleepq_unlock(cvp); 514d1078b0bSDavid Xu if (ba.count > 0) 515d1078b0bSDavid Xu _thr_wake_all(ba.waddrs, ba.count); 516d1078b0bSDavid Xu return (0); 517bb535300SJeff Roberson } 518bb535300SJeff Roberson 519bb535300SJeff Roberson int 520bb535300SJeff Roberson _pthread_cond_signal(pthread_cond_t * cond) 521bb535300SJeff Roberson { 522a091d823SDavid Xu 523d1078b0bSDavid Xu return (cond_signal_common(cond)); 524bb535300SJeff Roberson } 525bb535300SJeff Roberson 526bb535300SJeff Roberson int 527bb535300SJeff Roberson _pthread_cond_broadcast(pthread_cond_t * cond) 528bb535300SJeff Roberson { 529a091d823SDavid Xu 530d1078b0bSDavid Xu return (cond_broadcast_common(cond)); 531a224a391SMike Makonnen } 532