1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 5 * Copyright (c) 2015 The FreeBSD Foundation 6 * All rights reserved. 7 * 8 * Portions of this software were developed by Konstantin Belousov 9 * under sponsorship from the FreeBSD Foundation. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice unmodified, this list of conditions, and the following 16 * disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "namespace.h" 37 #include <stdlib.h> 38 #include <errno.h> 39 #include <string.h> 40 #include <pthread.h> 41 #include <limits.h> 42 #include "un-namespace.h" 43 44 #include "thr_private.h" 45 46 _Static_assert(sizeof(struct pthread_cond) <= PAGE_SIZE, 47 "pthread_cond too large"); 48 49 /* 50 * Prototypes 51 */ 52 int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex); 53 int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 54 const struct timespec * abstime); 55 static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr); 56 static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 57 const struct timespec *abstime, int cancel); 58 static int cond_signal_common(pthread_cond_t *cond); 59 static int cond_broadcast_common(pthread_cond_t *cond); 60 61 /* 62 * Double underscore versions are cancellation points. Single underscore 63 * versions are not and are provided for libc internal usage (which 64 * shouldn't introduce cancellation points). 65 */ 66 __weak_reference(__pthread_cond_wait, pthread_cond_wait); 67 __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait); 68 69 __weak_reference(_pthread_cond_init, pthread_cond_init); 70 __weak_reference(_pthread_cond_destroy, pthread_cond_destroy); 71 __weak_reference(_pthread_cond_signal, pthread_cond_signal); 72 __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast); 73 74 #define CV_PSHARED(cvp) (((cvp)->kcond.c_flags & USYNC_PROCESS_SHARED) != 0) 75 76 static void 77 cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr) 78 { 79 80 if (cattr == NULL) { 81 cvp->kcond.c_clockid = CLOCK_REALTIME; 82 } else { 83 if (cattr->c_pshared) 84 cvp->kcond.c_flags |= USYNC_PROCESS_SHARED; 85 cvp->kcond.c_clockid = cattr->c_clockid; 86 } 87 } 88 89 static int 90 cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 91 { 92 struct pthread_cond *cvp; 93 const struct pthread_cond_attr *cattr; 94 int pshared; 95 96 cattr = cond_attr != NULL ? *cond_attr : NULL; 97 if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) { 98 pshared = 0; 99 cvp = calloc(1, sizeof(struct pthread_cond)); 100 if (cvp == NULL) 101 return (ENOMEM); 102 } else { 103 pshared = 1; 104 cvp = __thr_pshared_offpage(cond, 1); 105 if (cvp == NULL) 106 return (EFAULT); 107 } 108 109 /* 110 * Initialise the condition variable structure: 111 */ 112 cond_init_body(cvp, cattr); 113 *cond = pshared ? THR_PSHARED_PTR : cvp; 114 return (0); 115 } 116 117 static int 118 init_static(struct pthread *thread, pthread_cond_t *cond) 119 { 120 int ret; 121 122 THR_LOCK_ACQUIRE(thread, &_cond_static_lock); 123 124 if (*cond == NULL) 125 ret = cond_init(cond, NULL); 126 else 127 ret = 0; 128 129 THR_LOCK_RELEASE(thread, &_cond_static_lock); 130 131 return (ret); 132 } 133 134 #define CHECK_AND_INIT_COND \ 135 if (*cond == THR_PSHARED_PTR) { \ 136 cvp = __thr_pshared_offpage(cond, 0); \ 137 if (cvp == NULL) \ 138 return (EINVAL); \ 139 } else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \ 140 if (cvp == THR_COND_INITIALIZER) { \ 141 int ret; \ 142 ret = init_static(_get_curthread(), cond); \ 143 if (ret) \ 144 return (ret); \ 145 } else if (cvp == THR_COND_DESTROYED) { \ 146 return (EINVAL); \ 147 } \ 148 cvp = *cond; \ 149 } 150 151 int 152 _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 153 { 154 155 *cond = NULL; 156 return (cond_init(cond, cond_attr)); 157 } 158 159 int 160 _pthread_cond_destroy(pthread_cond_t *cond) 161 { 162 struct pthread_cond *cvp; 163 int error; 164 165 error = 0; 166 if (*cond == THR_PSHARED_PTR) { 167 cvp = __thr_pshared_offpage(cond, 0); 168 if (cvp != NULL) 169 __thr_pshared_destroy(cond); 170 *cond = THR_COND_DESTROYED; 171 } else if ((cvp = *cond) == THR_COND_INITIALIZER) { 172 /* nothing */ 173 } else if (cvp == THR_COND_DESTROYED) { 174 error = EINVAL; 175 } else { 176 cvp = *cond; 177 *cond = THR_COND_DESTROYED; 178 free(cvp); 179 } 180 return (error); 181 } 182 183 /* 184 * Cancellation behavior: 185 * Thread may be canceled at start, if thread is canceled, it means it 186 * did not get a wakeup from pthread_cond_signal(), otherwise, it is 187 * not canceled. 188 * Thread cancellation never cause wakeup from pthread_cond_signal() 189 * to be lost. 190 */ 191 static int 192 cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp, 193 const struct timespec *abstime, int cancel) 194 { 195 struct pthread *curthread; 196 int error, error2, recurse, robust; 197 198 curthread = _get_curthread(); 199 robust = _mutex_enter_robust(curthread, mp); 200 201 error = _mutex_cv_detach(mp, &recurse); 202 if (error != 0) { 203 if (robust) 204 _mutex_leave_robust(curthread, mp); 205 return (error); 206 } 207 208 if (cancel) 209 _thr_cancel_enter2(curthread, 0); 210 error = _thr_ucond_wait(&cvp->kcond, &mp->m_lock, abstime, 211 CVWAIT_ABSTIME | CVWAIT_CLOCKID); 212 if (cancel) 213 _thr_cancel_leave(curthread, 0); 214 215 /* 216 * Note that PP mutex and ROBUST mutex may return 217 * interesting error codes. 218 */ 219 if (error == 0) { 220 error2 = _mutex_cv_lock(mp, recurse, true); 221 } else if (error == EINTR || error == ETIMEDOUT) { 222 error2 = _mutex_cv_lock(mp, recurse, true); 223 /* 224 * Do not do cancellation on EOWNERDEAD there. The 225 * cancellation cleanup handler will use the protected 226 * state and unlock the mutex without making the state 227 * consistent and the state will be unrecoverable. 228 */ 229 if (error2 == 0 && cancel) { 230 if (robust) { 231 _mutex_leave_robust(curthread, mp); 232 robust = false; 233 } 234 _thr_testcancel(curthread); 235 } 236 237 if (error == EINTR) 238 error = 0; 239 } else { 240 /* We know that it didn't unlock the mutex. */ 241 _mutex_cv_attach(mp, recurse); 242 if (cancel) { 243 if (robust) { 244 _mutex_leave_robust(curthread, mp); 245 robust = false; 246 } 247 _thr_testcancel(curthread); 248 } 249 error2 = 0; 250 } 251 if (robust) 252 _mutex_leave_robust(curthread, mp); 253 return (error2 != 0 ? error2 : error); 254 } 255 256 /* 257 * Thread waits in userland queue whenever possible, when thread 258 * is signaled or broadcasted, it is removed from the queue, and 259 * is saved in curthread's defer_waiters[] buffer, but won't be 260 * woken up until mutex is unlocked. 261 */ 262 263 static int 264 cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp, 265 const struct timespec *abstime, int cancel) 266 { 267 struct pthread *curthread; 268 struct sleepqueue *sq; 269 int deferred, error, error2, recurse; 270 271 curthread = _get_curthread(); 272 if (curthread->wchan != NULL) 273 PANIC("thread %p was already on queue.", curthread); 274 275 if (cancel) 276 _thr_testcancel(curthread); 277 278 _sleepq_lock(cvp); 279 /* 280 * set __has_user_waiters before unlocking mutex, this allows 281 * us to check it without locking in pthread_cond_signal(). 282 */ 283 cvp->__has_user_waiters = 1; 284 deferred = 0; 285 (void)_mutex_cv_unlock(mp, &recurse, &deferred); 286 curthread->mutex_obj = mp; 287 _sleepq_add(cvp, curthread); 288 for(;;) { 289 _thr_clear_wake(curthread); 290 _sleepq_unlock(cvp); 291 if (deferred) { 292 deferred = 0; 293 if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0) 294 (void)_umtx_op_err(&mp->m_lock, 295 UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags, 296 0, 0); 297 } 298 if (curthread->nwaiter_defer > 0) { 299 _thr_wake_all(curthread->defer_waiters, 300 curthread->nwaiter_defer); 301 curthread->nwaiter_defer = 0; 302 } 303 304 if (cancel) 305 _thr_cancel_enter2(curthread, 0); 306 error = _thr_sleep(curthread, cvp->kcond.c_clockid, abstime); 307 if (cancel) 308 _thr_cancel_leave(curthread, 0); 309 310 _sleepq_lock(cvp); 311 if (curthread->wchan == NULL) { 312 error = 0; 313 break; 314 } else if (cancel && SHOULD_CANCEL(curthread)) { 315 sq = _sleepq_lookup(cvp); 316 cvp->__has_user_waiters = _sleepq_remove(sq, curthread); 317 _sleepq_unlock(cvp); 318 curthread->mutex_obj = NULL; 319 error2 = _mutex_cv_lock(mp, recurse, false); 320 if (!THR_IN_CRITICAL(curthread)) 321 _pthread_exit(PTHREAD_CANCELED); 322 else /* this should not happen */ 323 return (error2); 324 } else if (error == ETIMEDOUT) { 325 sq = _sleepq_lookup(cvp); 326 cvp->__has_user_waiters = 327 _sleepq_remove(sq, curthread); 328 break; 329 } 330 } 331 _sleepq_unlock(cvp); 332 curthread->mutex_obj = NULL; 333 error2 = _mutex_cv_lock(mp, recurse, false); 334 if (error == 0) 335 error = error2; 336 return (error); 337 } 338 339 static int 340 cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 341 const struct timespec *abstime, int cancel) 342 { 343 struct pthread *curthread = _get_curthread(); 344 struct pthread_cond *cvp; 345 struct pthread_mutex *mp; 346 int error; 347 348 CHECK_AND_INIT_COND 349 350 if (*mutex == THR_PSHARED_PTR) { 351 mp = __thr_pshared_offpage(mutex, 0); 352 if (mp == NULL) 353 return (EINVAL); 354 } else { 355 mp = *mutex; 356 } 357 358 if ((error = _mutex_owned(curthread, mp)) != 0) 359 return (error); 360 361 if (curthread->attr.sched_policy != SCHED_OTHER || 362 (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT | 363 USYNC_PROCESS_SHARED)) != 0 || CV_PSHARED(cvp)) 364 return (cond_wait_kernel(cvp, mp, abstime, cancel)); 365 else 366 return (cond_wait_user(cvp, mp, abstime, cancel)); 367 } 368 369 int 370 _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 371 { 372 373 return (cond_wait_common(cond, mutex, NULL, 0)); 374 } 375 376 int 377 __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 378 { 379 380 return (cond_wait_common(cond, mutex, NULL, 1)); 381 } 382 383 int 384 _pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 385 const struct timespec * abstime) 386 { 387 388 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 389 abstime->tv_nsec >= 1000000000) 390 return (EINVAL); 391 392 return (cond_wait_common(cond, mutex, abstime, 0)); 393 } 394 395 int 396 __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 397 const struct timespec *abstime) 398 { 399 400 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 401 abstime->tv_nsec >= 1000000000) 402 return (EINVAL); 403 404 return (cond_wait_common(cond, mutex, abstime, 1)); 405 } 406 407 static int 408 cond_signal_common(pthread_cond_t *cond) 409 { 410 struct pthread *curthread = _get_curthread(); 411 struct pthread *td; 412 struct pthread_cond *cvp; 413 struct pthread_mutex *mp; 414 struct sleepqueue *sq; 415 int *waddr; 416 int pshared; 417 418 /* 419 * If the condition variable is statically initialized, perform dynamic 420 * initialization. 421 */ 422 CHECK_AND_INIT_COND 423 424 pshared = CV_PSHARED(cvp); 425 426 _thr_ucond_signal(&cvp->kcond); 427 428 if (pshared || cvp->__has_user_waiters == 0) 429 return (0); 430 431 curthread = _get_curthread(); 432 waddr = NULL; 433 _sleepq_lock(cvp); 434 sq = _sleepq_lookup(cvp); 435 if (sq == NULL) { 436 _sleepq_unlock(cvp); 437 return (0); 438 } 439 440 td = _sleepq_first(sq); 441 mp = td->mutex_obj; 442 cvp->__has_user_waiters = _sleepq_remove(sq, td); 443 if (PMUTEX_OWNER_ID(mp) == TID(curthread)) { 444 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 445 _thr_wake_all(curthread->defer_waiters, 446 curthread->nwaiter_defer); 447 curthread->nwaiter_defer = 0; 448 } 449 curthread->defer_waiters[curthread->nwaiter_defer++] = 450 &td->wake_addr->value; 451 mp->m_flags |= PMUTEX_FLAG_DEFERRED; 452 } else { 453 waddr = &td->wake_addr->value; 454 } 455 _sleepq_unlock(cvp); 456 if (waddr != NULL) 457 _thr_set_wake(waddr); 458 return (0); 459 } 460 461 struct broadcast_arg { 462 struct pthread *curthread; 463 unsigned int *waddrs[MAX_DEFER_WAITERS]; 464 int count; 465 }; 466 467 static void 468 drop_cb(struct pthread *td, void *arg) 469 { 470 struct broadcast_arg *ba = arg; 471 struct pthread_mutex *mp; 472 struct pthread *curthread = ba->curthread; 473 474 mp = td->mutex_obj; 475 if (PMUTEX_OWNER_ID(mp) == TID(curthread)) { 476 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 477 _thr_wake_all(curthread->defer_waiters, 478 curthread->nwaiter_defer); 479 curthread->nwaiter_defer = 0; 480 } 481 curthread->defer_waiters[curthread->nwaiter_defer++] = 482 &td->wake_addr->value; 483 mp->m_flags |= PMUTEX_FLAG_DEFERRED; 484 } else { 485 if (ba->count >= MAX_DEFER_WAITERS) { 486 _thr_wake_all(ba->waddrs, ba->count); 487 ba->count = 0; 488 } 489 ba->waddrs[ba->count++] = &td->wake_addr->value; 490 } 491 } 492 493 static int 494 cond_broadcast_common(pthread_cond_t *cond) 495 { 496 int pshared; 497 struct pthread_cond *cvp; 498 struct sleepqueue *sq; 499 struct broadcast_arg ba; 500 501 /* 502 * If the condition variable is statically initialized, perform dynamic 503 * initialization. 504 */ 505 CHECK_AND_INIT_COND 506 507 pshared = CV_PSHARED(cvp); 508 509 _thr_ucond_broadcast(&cvp->kcond); 510 511 if (pshared || cvp->__has_user_waiters == 0) 512 return (0); 513 514 ba.curthread = _get_curthread(); 515 ba.count = 0; 516 517 _sleepq_lock(cvp); 518 sq = _sleepq_lookup(cvp); 519 if (sq == NULL) { 520 _sleepq_unlock(cvp); 521 return (0); 522 } 523 _sleepq_drop(sq, drop_cb, &ba); 524 cvp->__has_user_waiters = 0; 525 _sleepq_unlock(cvp); 526 if (ba.count > 0) 527 _thr_wake_all(ba.waddrs, ba.count); 528 return (0); 529 } 530 531 int 532 _pthread_cond_signal(pthread_cond_t * cond) 533 { 534 535 return (cond_signal_common(cond)); 536 } 537 538 int 539 _pthread_cond_broadcast(pthread_cond_t * cond) 540 { 541 542 return (cond_broadcast_common(cond)); 543 } 544