1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 5 * Copyright (c) 2015 The FreeBSD Foundation 6 * All rights reserved. 7 * 8 * Portions of this software were developed by Konstantin Belousov 9 * under sponsorship from the FreeBSD Foundation. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice unmodified, this list of conditions, and the following 16 * disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "namespace.h" 37 #include <stdlib.h> 38 #include <errno.h> 39 #include <string.h> 40 #include <pthread.h> 41 #include <limits.h> 42 #include "un-namespace.h" 43 44 #include "thr_private.h" 45 46 _Static_assert(sizeof(struct pthread_cond) <= PAGE_SIZE, 47 "pthread_cond too large"); 48 49 /* 50 * Prototypes 51 */ 52 int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex); 53 int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 54 const struct timespec * abstime); 55 static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr); 56 static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 57 const struct timespec *abstime, int cancel); 58 static int cond_signal_common(pthread_cond_t *cond); 59 static int cond_broadcast_common(pthread_cond_t *cond); 60 61 /* 62 * Double underscore versions are cancellation points. Single underscore 63 * versions are not and are provided for libc internal usage (which 64 * shouldn't introduce cancellation points). 65 */ 66 __weak_reference(__pthread_cond_wait, pthread_cond_wait); 67 __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait); 68 69 __weak_reference(_pthread_cond_init, pthread_cond_init); 70 __weak_reference(_pthread_cond_destroy, pthread_cond_destroy); 71 __weak_reference(_pthread_cond_signal, pthread_cond_signal); 72 __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast); 73 74 #define CV_PSHARED(cvp) (((cvp)->kcond.c_flags & USYNC_PROCESS_SHARED) != 0) 75 76 static void 77 cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr) 78 { 79 80 if (cattr == NULL) { 81 cvp->kcond.c_clockid = CLOCK_REALTIME; 82 } else { 83 if (cattr->c_pshared) 84 cvp->kcond.c_flags |= USYNC_PROCESS_SHARED; 85 cvp->kcond.c_clockid = cattr->c_clockid; 86 } 87 } 88 89 static int 90 cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 91 { 92 struct pthread_cond *cvp; 93 const struct pthread_cond_attr *cattr; 94 int pshared; 95 96 cattr = cond_attr != NULL ? *cond_attr : NULL; 97 if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) { 98 pshared = 0; 99 cvp = calloc(1, sizeof(struct pthread_cond)); 100 if (cvp == NULL) 101 return (ENOMEM); 102 } else { 103 pshared = 1; 104 cvp = __thr_pshared_offpage(cond, 1); 105 if (cvp == NULL) 106 return (EFAULT); 107 } 108 109 /* 110 * Initialise the condition variable structure: 111 */ 112 cond_init_body(cvp, cattr); 113 *cond = pshared ? THR_PSHARED_PTR : cvp; 114 return (0); 115 } 116 117 static int 118 init_static(struct pthread *thread, pthread_cond_t *cond) 119 { 120 int ret; 121 122 THR_LOCK_ACQUIRE(thread, &_cond_static_lock); 123 124 if (*cond == NULL) 125 ret = cond_init(cond, NULL); 126 else 127 ret = 0; 128 129 THR_LOCK_RELEASE(thread, &_cond_static_lock); 130 131 return (ret); 132 } 133 134 #define CHECK_AND_INIT_COND \ 135 if (*cond == THR_PSHARED_PTR) { \ 136 cvp = __thr_pshared_offpage(cond, 0); \ 137 if (cvp == NULL) \ 138 return (EINVAL); \ 139 } else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \ 140 if (cvp == THR_COND_INITIALIZER) { \ 141 int ret; \ 142 ret = init_static(_get_curthread(), cond); \ 143 if (ret) \ 144 return (ret); \ 145 } else if (cvp == THR_COND_DESTROYED) { \ 146 return (EINVAL); \ 147 } \ 148 cvp = *cond; \ 149 } 150 151 int 152 _pthread_cond_init(pthread_cond_t * __restrict cond, 153 const pthread_condattr_t * __restrict cond_attr) 154 { 155 156 *cond = NULL; 157 return (cond_init(cond, cond_attr)); 158 } 159 160 int 161 _pthread_cond_destroy(pthread_cond_t *cond) 162 { 163 struct pthread_cond *cvp; 164 int error; 165 166 error = 0; 167 if (*cond == THR_PSHARED_PTR) { 168 cvp = __thr_pshared_offpage(cond, 0); 169 if (cvp != NULL) { 170 if (cvp->kcond.c_has_waiters) 171 error = EBUSY; 172 else 173 __thr_pshared_destroy(cond); 174 } 175 if (error == 0) 176 *cond = THR_COND_DESTROYED; 177 } else if ((cvp = *cond) == THR_COND_INITIALIZER) { 178 /* nothing */ 179 } else if (cvp == THR_COND_DESTROYED) { 180 error = EINVAL; 181 } else { 182 cvp = *cond; 183 if (cvp->__has_user_waiters || cvp->kcond.c_has_waiters) 184 error = EBUSY; 185 else { 186 *cond = THR_COND_DESTROYED; 187 free(cvp); 188 } 189 } 190 return (error); 191 } 192 193 /* 194 * Cancellation behavior: 195 * Thread may be canceled at start, if thread is canceled, it means it 196 * did not get a wakeup from pthread_cond_signal(), otherwise, it is 197 * not canceled. 198 * Thread cancellation never cause wakeup from pthread_cond_signal() 199 * to be lost. 200 */ 201 static int 202 cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp, 203 const struct timespec *abstime, int cancel) 204 { 205 struct pthread *curthread; 206 int error, error2, recurse, robust; 207 208 curthread = _get_curthread(); 209 robust = _mutex_enter_robust(curthread, mp); 210 211 error = _mutex_cv_detach(mp, &recurse); 212 if (error != 0) { 213 if (robust) 214 _mutex_leave_robust(curthread, mp); 215 return (error); 216 } 217 218 if (cancel) 219 _thr_cancel_enter2(curthread, 0); 220 error = _thr_ucond_wait(&cvp->kcond, &mp->m_lock, abstime, 221 CVWAIT_ABSTIME | CVWAIT_CLOCKID); 222 if (cancel) 223 _thr_cancel_leave(curthread, 0); 224 225 /* 226 * Note that PP mutex and ROBUST mutex may return 227 * interesting error codes. 228 */ 229 if (error == 0) { 230 error2 = _mutex_cv_lock(mp, recurse, true); 231 } else if (error == EINTR || error == ETIMEDOUT) { 232 error2 = _mutex_cv_lock(mp, recurse, true); 233 /* 234 * Do not do cancellation on EOWNERDEAD there. The 235 * cancellation cleanup handler will use the protected 236 * state and unlock the mutex without making the state 237 * consistent and the state will be unrecoverable. 238 */ 239 if (error2 == 0 && cancel) { 240 if (robust) { 241 _mutex_leave_robust(curthread, mp); 242 robust = false; 243 } 244 _thr_testcancel(curthread); 245 } 246 247 if (error == EINTR) 248 error = 0; 249 } else { 250 /* We know that it didn't unlock the mutex. */ 251 _mutex_cv_attach(mp, recurse); 252 if (cancel) { 253 if (robust) { 254 _mutex_leave_robust(curthread, mp); 255 robust = false; 256 } 257 _thr_testcancel(curthread); 258 } 259 error2 = 0; 260 } 261 if (robust) 262 _mutex_leave_robust(curthread, mp); 263 return (error2 != 0 ? error2 : error); 264 } 265 266 /* 267 * Thread waits in userland queue whenever possible, when thread 268 * is signaled or broadcasted, it is removed from the queue, and 269 * is saved in curthread's defer_waiters[] buffer, but won't be 270 * woken up until mutex is unlocked. 271 */ 272 273 static int 274 cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp, 275 const struct timespec *abstime, int cancel) 276 { 277 struct pthread *curthread; 278 struct sleepqueue *sq; 279 int deferred, error, error2, recurse; 280 281 curthread = _get_curthread(); 282 if (curthread->wchan != NULL) 283 PANIC("thread %p was already on queue.", curthread); 284 285 if (cancel) 286 _thr_testcancel(curthread); 287 288 _sleepq_lock(cvp); 289 /* 290 * set __has_user_waiters before unlocking mutex, this allows 291 * us to check it without locking in pthread_cond_signal(). 292 */ 293 cvp->__has_user_waiters = 1; 294 deferred = 0; 295 (void)_mutex_cv_unlock(mp, &recurse, &deferred); 296 curthread->mutex_obj = mp; 297 _sleepq_add(cvp, curthread); 298 for(;;) { 299 _thr_clear_wake(curthread); 300 _sleepq_unlock(cvp); 301 if (deferred) { 302 deferred = 0; 303 if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0) 304 (void)_umtx_op_err(&mp->m_lock, 305 UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags, 306 0, 0); 307 } 308 if (curthread->nwaiter_defer > 0) { 309 _thr_wake_all(curthread->defer_waiters, 310 curthread->nwaiter_defer); 311 curthread->nwaiter_defer = 0; 312 } 313 314 if (cancel) 315 _thr_cancel_enter2(curthread, 0); 316 error = _thr_sleep(curthread, cvp->kcond.c_clockid, abstime); 317 if (cancel) 318 _thr_cancel_leave(curthread, 0); 319 320 _sleepq_lock(cvp); 321 if (curthread->wchan == NULL) { 322 error = 0; 323 break; 324 } else if (cancel && SHOULD_CANCEL(curthread)) { 325 sq = _sleepq_lookup(cvp); 326 cvp->__has_user_waiters = _sleepq_remove(sq, curthread); 327 _sleepq_unlock(cvp); 328 curthread->mutex_obj = NULL; 329 error2 = _mutex_cv_lock(mp, recurse, false); 330 if (!THR_IN_CRITICAL(curthread)) 331 _pthread_exit(PTHREAD_CANCELED); 332 else /* this should not happen */ 333 return (error2); 334 } else if (error == ETIMEDOUT) { 335 sq = _sleepq_lookup(cvp); 336 cvp->__has_user_waiters = 337 _sleepq_remove(sq, curthread); 338 break; 339 } 340 } 341 _sleepq_unlock(cvp); 342 curthread->mutex_obj = NULL; 343 error2 = _mutex_cv_lock(mp, recurse, false); 344 if (error == 0) 345 error = error2; 346 return (error); 347 } 348 349 static int 350 cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 351 const struct timespec *abstime, int cancel) 352 { 353 struct pthread *curthread = _get_curthread(); 354 struct pthread_cond *cvp; 355 struct pthread_mutex *mp; 356 int error; 357 358 CHECK_AND_INIT_COND 359 360 if (*mutex == THR_PSHARED_PTR) { 361 mp = __thr_pshared_offpage(mutex, 0); 362 if (mp == NULL) 363 return (EINVAL); 364 } else { 365 mp = *mutex; 366 } 367 368 if ((error = _mutex_owned(curthread, mp)) != 0) 369 return (error); 370 371 if (curthread->attr.sched_policy != SCHED_OTHER || 372 (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT | 373 USYNC_PROCESS_SHARED)) != 0 || CV_PSHARED(cvp)) 374 return (cond_wait_kernel(cvp, mp, abstime, cancel)); 375 else 376 return (cond_wait_user(cvp, mp, abstime, cancel)); 377 } 378 379 int 380 _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 381 { 382 383 return (cond_wait_common(cond, mutex, NULL, 0)); 384 } 385 386 int 387 __pthread_cond_wait(pthread_cond_t * __restrict cond, 388 pthread_mutex_t * __restrict mutex) 389 { 390 391 return (cond_wait_common(cond, mutex, NULL, 1)); 392 } 393 394 int 395 _pthread_cond_timedwait(pthread_cond_t * __restrict cond, 396 pthread_mutex_t * __restrict mutex, 397 const struct timespec * __restrict abstime) 398 { 399 400 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 401 abstime->tv_nsec >= 1000000000) 402 return (EINVAL); 403 404 return (cond_wait_common(cond, mutex, abstime, 0)); 405 } 406 407 int 408 __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 409 const struct timespec *abstime) 410 { 411 412 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 413 abstime->tv_nsec >= 1000000000) 414 return (EINVAL); 415 416 return (cond_wait_common(cond, mutex, abstime, 1)); 417 } 418 419 static int 420 cond_signal_common(pthread_cond_t *cond) 421 { 422 struct pthread *curthread = _get_curthread(); 423 struct pthread *td; 424 struct pthread_cond *cvp; 425 struct pthread_mutex *mp; 426 struct sleepqueue *sq; 427 int *waddr; 428 int pshared; 429 430 /* 431 * If the condition variable is statically initialized, perform dynamic 432 * initialization. 433 */ 434 CHECK_AND_INIT_COND 435 436 pshared = CV_PSHARED(cvp); 437 438 _thr_ucond_signal(&cvp->kcond); 439 440 if (pshared || cvp->__has_user_waiters == 0) 441 return (0); 442 443 curthread = _get_curthread(); 444 waddr = NULL; 445 _sleepq_lock(cvp); 446 sq = _sleepq_lookup(cvp); 447 if (sq == NULL) { 448 _sleepq_unlock(cvp); 449 return (0); 450 } 451 452 td = _sleepq_first(sq); 453 mp = td->mutex_obj; 454 cvp->__has_user_waiters = _sleepq_remove(sq, td); 455 if (PMUTEX_OWNER_ID(mp) == TID(curthread)) { 456 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 457 _thr_wake_all(curthread->defer_waiters, 458 curthread->nwaiter_defer); 459 curthread->nwaiter_defer = 0; 460 } 461 curthread->defer_waiters[curthread->nwaiter_defer++] = 462 &td->wake_addr->value; 463 mp->m_flags |= PMUTEX_FLAG_DEFERRED; 464 } else { 465 waddr = &td->wake_addr->value; 466 } 467 _sleepq_unlock(cvp); 468 if (waddr != NULL) 469 _thr_set_wake(waddr); 470 return (0); 471 } 472 473 struct broadcast_arg { 474 struct pthread *curthread; 475 unsigned int *waddrs[MAX_DEFER_WAITERS]; 476 int count; 477 }; 478 479 static void 480 drop_cb(struct pthread *td, void *arg) 481 { 482 struct broadcast_arg *ba = arg; 483 struct pthread_mutex *mp; 484 struct pthread *curthread = ba->curthread; 485 486 mp = td->mutex_obj; 487 if (PMUTEX_OWNER_ID(mp) == TID(curthread)) { 488 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 489 _thr_wake_all(curthread->defer_waiters, 490 curthread->nwaiter_defer); 491 curthread->nwaiter_defer = 0; 492 } 493 curthread->defer_waiters[curthread->nwaiter_defer++] = 494 &td->wake_addr->value; 495 mp->m_flags |= PMUTEX_FLAG_DEFERRED; 496 } else { 497 if (ba->count >= MAX_DEFER_WAITERS) { 498 _thr_wake_all(ba->waddrs, ba->count); 499 ba->count = 0; 500 } 501 ba->waddrs[ba->count++] = &td->wake_addr->value; 502 } 503 } 504 505 static int 506 cond_broadcast_common(pthread_cond_t *cond) 507 { 508 int pshared; 509 struct pthread_cond *cvp; 510 struct sleepqueue *sq; 511 struct broadcast_arg ba; 512 513 /* 514 * If the condition variable is statically initialized, perform dynamic 515 * initialization. 516 */ 517 CHECK_AND_INIT_COND 518 519 pshared = CV_PSHARED(cvp); 520 521 _thr_ucond_broadcast(&cvp->kcond); 522 523 if (pshared || cvp->__has_user_waiters == 0) 524 return (0); 525 526 ba.curthread = _get_curthread(); 527 ba.count = 0; 528 529 _sleepq_lock(cvp); 530 sq = _sleepq_lookup(cvp); 531 if (sq == NULL) { 532 _sleepq_unlock(cvp); 533 return (0); 534 } 535 _sleepq_drop(sq, drop_cb, &ba); 536 cvp->__has_user_waiters = 0; 537 _sleepq_unlock(cvp); 538 if (ba.count > 0) 539 _thr_wake_all(ba.waddrs, ba.count); 540 return (0); 541 } 542 543 int 544 _pthread_cond_signal(pthread_cond_t * cond) 545 { 546 547 return (cond_signal_common(cond)); 548 } 549 550 int 551 _pthread_cond_broadcast(pthread_cond_t * cond) 552 { 553 554 return (cond_broadcast_common(cond)); 555 } 556