1 /* 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * Copyright (c) 2015 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Konstantin Belousov 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "namespace.h" 35 #include <stdlib.h> 36 #include <errno.h> 37 #include <string.h> 38 #include <pthread.h> 39 #include <limits.h> 40 #include "un-namespace.h" 41 42 #include "thr_private.h" 43 44 _Static_assert(sizeof(struct pthread_cond) <= PAGE_SIZE, 45 "pthread_cond too large"); 46 47 /* 48 * Prototypes 49 */ 50 int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex); 51 int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 52 const struct timespec * abstime); 53 static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr); 54 static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 55 const struct timespec *abstime, int cancel); 56 static int cond_signal_common(pthread_cond_t *cond); 57 static int cond_broadcast_common(pthread_cond_t *cond); 58 59 /* 60 * Double underscore versions are cancellation points. Single underscore 61 * versions are not and are provided for libc internal usage (which 62 * shouldn't introduce cancellation points). 63 */ 64 __weak_reference(__pthread_cond_wait, pthread_cond_wait); 65 __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait); 66 67 __weak_reference(_pthread_cond_init, pthread_cond_init); 68 __weak_reference(_pthread_cond_destroy, pthread_cond_destroy); 69 __weak_reference(_pthread_cond_signal, pthread_cond_signal); 70 __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast); 71 72 #define CV_PSHARED(cvp) (((cvp)->kcond.c_flags & USYNC_PROCESS_SHARED) != 0) 73 74 static void 75 cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr) 76 { 77 78 if (cattr == NULL) { 79 cvp->kcond.c_clockid = CLOCK_REALTIME; 80 } else { 81 if (cattr->c_pshared) 82 cvp->kcond.c_flags |= USYNC_PROCESS_SHARED; 83 cvp->kcond.c_clockid = cattr->c_clockid; 84 } 85 } 86 87 static int 88 cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 89 { 90 struct pthread_cond *cvp; 91 const struct pthread_cond_attr *cattr; 92 int pshared; 93 94 cattr = cond_attr != NULL ? *cond_attr : NULL; 95 if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) { 96 pshared = 0; 97 cvp = calloc(1, sizeof(struct pthread_cond)); 98 if (cvp == NULL) 99 return (ENOMEM); 100 } else { 101 pshared = 1; 102 cvp = __thr_pshared_offpage(cond, 1); 103 if (cvp == NULL) 104 return (EFAULT); 105 } 106 107 /* 108 * Initialise the condition variable structure: 109 */ 110 cond_init_body(cvp, cattr); 111 *cond = pshared ? THR_PSHARED_PTR : cvp; 112 return (0); 113 } 114 115 static int 116 init_static(struct pthread *thread, pthread_cond_t *cond) 117 { 118 int ret; 119 120 THR_LOCK_ACQUIRE(thread, &_cond_static_lock); 121 122 if (*cond == NULL) 123 ret = cond_init(cond, NULL); 124 else 125 ret = 0; 126 127 THR_LOCK_RELEASE(thread, &_cond_static_lock); 128 129 return (ret); 130 } 131 132 #define CHECK_AND_INIT_COND \ 133 if (*cond == THR_PSHARED_PTR) { \ 134 cvp = __thr_pshared_offpage(cond, 0); \ 135 if (cvp == NULL) \ 136 return (EINVAL); \ 137 } else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \ 138 if (cvp == THR_COND_INITIALIZER) { \ 139 int ret; \ 140 ret = init_static(_get_curthread(), cond); \ 141 if (ret) \ 142 return (ret); \ 143 } else if (cvp == THR_COND_DESTROYED) { \ 144 return (EINVAL); \ 145 } \ 146 cvp = *cond; \ 147 } 148 149 int 150 _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 151 { 152 153 *cond = NULL; 154 return (cond_init(cond, cond_attr)); 155 } 156 157 int 158 _pthread_cond_destroy(pthread_cond_t *cond) 159 { 160 struct pthread_cond *cvp; 161 int error; 162 163 error = 0; 164 if (*cond == THR_PSHARED_PTR) { 165 cvp = __thr_pshared_offpage(cond, 0); 166 if (cvp != NULL) 167 __thr_pshared_destroy(cond); 168 *cond = THR_COND_DESTROYED; 169 } else if ((cvp = *cond) == THR_COND_INITIALIZER) { 170 /* nothing */ 171 } else if (cvp == THR_COND_DESTROYED) { 172 error = EINVAL; 173 } else { 174 cvp = *cond; 175 *cond = THR_COND_DESTROYED; 176 free(cvp); 177 } 178 return (error); 179 } 180 181 /* 182 * Cancellation behavior: 183 * Thread may be canceled at start, if thread is canceled, it means it 184 * did not get a wakeup from pthread_cond_signal(), otherwise, it is 185 * not canceled. 186 * Thread cancellation never cause wakeup from pthread_cond_signal() 187 * to be lost. 188 */ 189 static int 190 cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp, 191 const struct timespec *abstime, int cancel) 192 { 193 struct pthread *curthread; 194 int error, error2, recurse, robust; 195 196 curthread = _get_curthread(); 197 robust = _mutex_enter_robust(curthread, mp); 198 199 error = _mutex_cv_detach(mp, &recurse); 200 if (error != 0) { 201 if (robust) 202 _mutex_leave_robust(curthread, mp); 203 return (error); 204 } 205 206 if (cancel) 207 _thr_cancel_enter2(curthread, 0); 208 error = _thr_ucond_wait(&cvp->kcond, &mp->m_lock, abstime, 209 CVWAIT_ABSTIME | CVWAIT_CLOCKID); 210 if (cancel) 211 _thr_cancel_leave(curthread, 0); 212 213 /* 214 * Note that PP mutex and ROBUST mutex may return 215 * interesting error codes. 216 */ 217 if (error == 0) { 218 error2 = _mutex_cv_lock(mp, recurse, true); 219 } else if (error == EINTR || error == ETIMEDOUT) { 220 error2 = _mutex_cv_lock(mp, recurse, true); 221 /* 222 * Do not do cancellation on EOWNERDEAD there. The 223 * cancellation cleanup handler will use the protected 224 * state and unlock the mutex without making the state 225 * consistent and the state will be unrecoverable. 226 */ 227 if (error2 == 0 && cancel) { 228 if (robust) { 229 _mutex_leave_robust(curthread, mp); 230 robust = false; 231 } 232 _thr_testcancel(curthread); 233 } 234 235 if (error == EINTR) 236 error = 0; 237 } else { 238 /* We know that it didn't unlock the mutex. */ 239 _mutex_cv_attach(mp, recurse); 240 if (cancel) { 241 if (robust) { 242 _mutex_leave_robust(curthread, mp); 243 robust = false; 244 } 245 _thr_testcancel(curthread); 246 } 247 error2 = 0; 248 } 249 if (robust) 250 _mutex_leave_robust(curthread, mp); 251 return (error2 != 0 ? error2 : error); 252 } 253 254 /* 255 * Thread waits in userland queue whenever possible, when thread 256 * is signaled or broadcasted, it is removed from the queue, and 257 * is saved in curthread's defer_waiters[] buffer, but won't be 258 * woken up until mutex is unlocked. 259 */ 260 261 static int 262 cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp, 263 const struct timespec *abstime, int cancel) 264 { 265 struct pthread *curthread; 266 struct sleepqueue *sq; 267 int deferred, error, error2, recurse; 268 269 curthread = _get_curthread(); 270 if (curthread->wchan != NULL) 271 PANIC("thread %p was already on queue.", curthread); 272 273 if (cancel) 274 _thr_testcancel(curthread); 275 276 _sleepq_lock(cvp); 277 /* 278 * set __has_user_waiters before unlocking mutex, this allows 279 * us to check it without locking in pthread_cond_signal(). 280 */ 281 cvp->__has_user_waiters = 1; 282 deferred = 0; 283 (void)_mutex_cv_unlock(mp, &recurse, &deferred); 284 curthread->mutex_obj = mp; 285 _sleepq_add(cvp, curthread); 286 for(;;) { 287 _thr_clear_wake(curthread); 288 _sleepq_unlock(cvp); 289 if (deferred) { 290 deferred = 0; 291 if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0) 292 (void)_umtx_op_err(&mp->m_lock, 293 UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags, 294 0, 0); 295 } 296 if (curthread->nwaiter_defer > 0) { 297 _thr_wake_all(curthread->defer_waiters, 298 curthread->nwaiter_defer); 299 curthread->nwaiter_defer = 0; 300 } 301 302 if (cancel) 303 _thr_cancel_enter2(curthread, 0); 304 error = _thr_sleep(curthread, cvp->kcond.c_clockid, abstime); 305 if (cancel) 306 _thr_cancel_leave(curthread, 0); 307 308 _sleepq_lock(cvp); 309 if (curthread->wchan == NULL) { 310 error = 0; 311 break; 312 } else if (cancel && SHOULD_CANCEL(curthread)) { 313 sq = _sleepq_lookup(cvp); 314 cvp->__has_user_waiters = _sleepq_remove(sq, curthread); 315 _sleepq_unlock(cvp); 316 curthread->mutex_obj = NULL; 317 error2 = _mutex_cv_lock(mp, recurse, false); 318 if (!THR_IN_CRITICAL(curthread)) 319 _pthread_exit(PTHREAD_CANCELED); 320 else /* this should not happen */ 321 return (error2); 322 } else if (error == ETIMEDOUT) { 323 sq = _sleepq_lookup(cvp); 324 cvp->__has_user_waiters = 325 _sleepq_remove(sq, curthread); 326 break; 327 } 328 } 329 _sleepq_unlock(cvp); 330 curthread->mutex_obj = NULL; 331 error2 = _mutex_cv_lock(mp, recurse, false); 332 if (error == 0) 333 error = error2; 334 return (error); 335 } 336 337 static int 338 cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 339 const struct timespec *abstime, int cancel) 340 { 341 struct pthread *curthread = _get_curthread(); 342 struct pthread_cond *cvp; 343 struct pthread_mutex *mp; 344 int error; 345 346 CHECK_AND_INIT_COND 347 348 if (*mutex == THR_PSHARED_PTR) { 349 mp = __thr_pshared_offpage(mutex, 0); 350 if (mp == NULL) 351 return (EINVAL); 352 } else { 353 mp = *mutex; 354 } 355 356 if ((error = _mutex_owned(curthread, mp)) != 0) 357 return (error); 358 359 if (curthread->attr.sched_policy != SCHED_OTHER || 360 (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT | 361 USYNC_PROCESS_SHARED)) != 0 || CV_PSHARED(cvp)) 362 return (cond_wait_kernel(cvp, mp, abstime, cancel)); 363 else 364 return (cond_wait_user(cvp, mp, abstime, cancel)); 365 } 366 367 int 368 _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 369 { 370 371 return (cond_wait_common(cond, mutex, NULL, 0)); 372 } 373 374 int 375 __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 376 { 377 378 return (cond_wait_common(cond, mutex, NULL, 1)); 379 } 380 381 int 382 _pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 383 const struct timespec * abstime) 384 { 385 386 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 387 abstime->tv_nsec >= 1000000000) 388 return (EINVAL); 389 390 return (cond_wait_common(cond, mutex, abstime, 0)); 391 } 392 393 int 394 __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 395 const struct timespec *abstime) 396 { 397 398 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 399 abstime->tv_nsec >= 1000000000) 400 return (EINVAL); 401 402 return (cond_wait_common(cond, mutex, abstime, 1)); 403 } 404 405 static int 406 cond_signal_common(pthread_cond_t *cond) 407 { 408 struct pthread *curthread = _get_curthread(); 409 struct pthread *td; 410 struct pthread_cond *cvp; 411 struct pthread_mutex *mp; 412 struct sleepqueue *sq; 413 int *waddr; 414 int pshared; 415 416 /* 417 * If the condition variable is statically initialized, perform dynamic 418 * initialization. 419 */ 420 CHECK_AND_INIT_COND 421 422 pshared = CV_PSHARED(cvp); 423 424 _thr_ucond_signal(&cvp->kcond); 425 426 if (pshared || cvp->__has_user_waiters == 0) 427 return (0); 428 429 curthread = _get_curthread(); 430 waddr = NULL; 431 _sleepq_lock(cvp); 432 sq = _sleepq_lookup(cvp); 433 if (sq == NULL) { 434 _sleepq_unlock(cvp); 435 return (0); 436 } 437 438 td = _sleepq_first(sq); 439 mp = td->mutex_obj; 440 cvp->__has_user_waiters = _sleepq_remove(sq, td); 441 if (PMUTEX_OWNER_ID(mp) == TID(curthread)) { 442 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 443 _thr_wake_all(curthread->defer_waiters, 444 curthread->nwaiter_defer); 445 curthread->nwaiter_defer = 0; 446 } 447 curthread->defer_waiters[curthread->nwaiter_defer++] = 448 &td->wake_addr->value; 449 mp->m_flags |= PMUTEX_FLAG_DEFERRED; 450 } else { 451 waddr = &td->wake_addr->value; 452 } 453 _sleepq_unlock(cvp); 454 if (waddr != NULL) 455 _thr_set_wake(waddr); 456 return (0); 457 } 458 459 struct broadcast_arg { 460 struct pthread *curthread; 461 unsigned int *waddrs[MAX_DEFER_WAITERS]; 462 int count; 463 }; 464 465 static void 466 drop_cb(struct pthread *td, void *arg) 467 { 468 struct broadcast_arg *ba = arg; 469 struct pthread_mutex *mp; 470 struct pthread *curthread = ba->curthread; 471 472 mp = td->mutex_obj; 473 if (PMUTEX_OWNER_ID(mp) == TID(curthread)) { 474 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 475 _thr_wake_all(curthread->defer_waiters, 476 curthread->nwaiter_defer); 477 curthread->nwaiter_defer = 0; 478 } 479 curthread->defer_waiters[curthread->nwaiter_defer++] = 480 &td->wake_addr->value; 481 mp->m_flags |= PMUTEX_FLAG_DEFERRED; 482 } else { 483 if (ba->count >= MAX_DEFER_WAITERS) { 484 _thr_wake_all(ba->waddrs, ba->count); 485 ba->count = 0; 486 } 487 ba->waddrs[ba->count++] = &td->wake_addr->value; 488 } 489 } 490 491 static int 492 cond_broadcast_common(pthread_cond_t *cond) 493 { 494 int pshared; 495 struct pthread_cond *cvp; 496 struct sleepqueue *sq; 497 struct broadcast_arg ba; 498 499 /* 500 * If the condition variable is statically initialized, perform dynamic 501 * initialization. 502 */ 503 CHECK_AND_INIT_COND 504 505 pshared = CV_PSHARED(cvp); 506 507 _thr_ucond_broadcast(&cvp->kcond); 508 509 if (pshared || cvp->__has_user_waiters == 0) 510 return (0); 511 512 ba.curthread = _get_curthread(); 513 ba.count = 0; 514 515 _sleepq_lock(cvp); 516 sq = _sleepq_lookup(cvp); 517 if (sq == NULL) { 518 _sleepq_unlock(cvp); 519 return (0); 520 } 521 _sleepq_drop(sq, drop_cb, &ba); 522 cvp->__has_user_waiters = 0; 523 _sleepq_unlock(cvp); 524 if (ba.count > 0) 525 _thr_wake_all(ba.waddrs, ba.count); 526 return (0); 527 } 528 529 int 530 _pthread_cond_signal(pthread_cond_t * cond) 531 { 532 533 return (cond_signal_common(cond)); 534 } 535 536 int 537 _pthread_cond_broadcast(pthread_cond_t * cond) 538 { 539 540 return (cond_broadcast_common(cond)); 541 } 542