1 /* 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * Copyright (c) 2015 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Konstantin Belousov 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "namespace.h" 35 #include <stdlib.h> 36 #include <errno.h> 37 #include <string.h> 38 #include <pthread.h> 39 #include <limits.h> 40 #include "un-namespace.h" 41 42 #include "thr_private.h" 43 44 _Static_assert(sizeof(struct pthread_cond) <= PAGE_SIZE, 45 "pthread_cond too large"); 46 47 /* 48 * Prototypes 49 */ 50 int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex); 51 int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 52 const struct timespec * abstime); 53 static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr); 54 static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 55 const struct timespec *abstime, int cancel); 56 static int cond_signal_common(pthread_cond_t *cond); 57 static int cond_broadcast_common(pthread_cond_t *cond); 58 59 /* 60 * Double underscore versions are cancellation points. Single underscore 61 * versions are not and are provided for libc internal usage (which 62 * shouldn't introduce cancellation points). 63 */ 64 __weak_reference(__pthread_cond_wait, pthread_cond_wait); 65 __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait); 66 67 __weak_reference(_pthread_cond_init, pthread_cond_init); 68 __weak_reference(_pthread_cond_destroy, pthread_cond_destroy); 69 __weak_reference(_pthread_cond_signal, pthread_cond_signal); 70 __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast); 71 72 #define CV_PSHARED(cvp) (((cvp)->__flags & USYNC_PROCESS_SHARED) != 0) 73 74 static void 75 cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr) 76 { 77 78 if (cattr == NULL) { 79 cvp->__clock_id = CLOCK_REALTIME; 80 } else { 81 if (cattr->c_pshared) 82 cvp->__flags |= USYNC_PROCESS_SHARED; 83 cvp->__clock_id = cattr->c_clockid; 84 } 85 } 86 87 static int 88 cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 89 { 90 struct pthread_cond *cvp; 91 const struct pthread_cond_attr *cattr; 92 int pshared; 93 94 cattr = cond_attr != NULL ? *cond_attr : NULL; 95 if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) { 96 pshared = 0; 97 cvp = calloc(1, sizeof(struct pthread_cond)); 98 if (cvp == NULL) 99 return (ENOMEM); 100 } else { 101 pshared = 1; 102 cvp = __thr_pshared_offpage(cond, 1); 103 if (cvp == NULL) 104 return (EFAULT); 105 } 106 107 /* 108 * Initialise the condition variable structure: 109 */ 110 cond_init_body(cvp, cattr); 111 *cond = pshared ? THR_PSHARED_PTR : cvp; 112 return (0); 113 } 114 115 static int 116 init_static(struct pthread *thread, pthread_cond_t *cond) 117 { 118 int ret; 119 120 THR_LOCK_ACQUIRE(thread, &_cond_static_lock); 121 122 if (*cond == NULL) 123 ret = cond_init(cond, NULL); 124 else 125 ret = 0; 126 127 THR_LOCK_RELEASE(thread, &_cond_static_lock); 128 129 return (ret); 130 } 131 132 #define CHECK_AND_INIT_COND \ 133 if (*cond == THR_PSHARED_PTR) { \ 134 cvp = __thr_pshared_offpage(cond, 0); \ 135 if (cvp == NULL) \ 136 return (EINVAL); \ 137 } else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \ 138 if (cvp == THR_COND_INITIALIZER) { \ 139 int ret; \ 140 ret = init_static(_get_curthread(), cond); \ 141 if (ret) \ 142 return (ret); \ 143 } else if (cvp == THR_COND_DESTROYED) { \ 144 return (EINVAL); \ 145 } \ 146 cvp = *cond; \ 147 } 148 149 int 150 _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 151 { 152 153 *cond = NULL; 154 return (cond_init(cond, cond_attr)); 155 } 156 157 int 158 _pthread_cond_destroy(pthread_cond_t *cond) 159 { 160 struct pthread_cond *cvp; 161 int error; 162 163 error = 0; 164 if (*cond == THR_PSHARED_PTR) { 165 cvp = __thr_pshared_offpage(cond, 0); 166 if (cvp != NULL) 167 __thr_pshared_destroy(cond); 168 *cond = THR_COND_DESTROYED; 169 } else if ((cvp = *cond) == THR_COND_INITIALIZER) { 170 /* nothing */ 171 } else if (cvp == THR_COND_DESTROYED) { 172 error = EINVAL; 173 } else { 174 cvp = *cond; 175 *cond = THR_COND_DESTROYED; 176 free(cvp); 177 } 178 return (error); 179 } 180 181 /* 182 * Cancellation behavior: 183 * Thread may be canceled at start, if thread is canceled, it means it 184 * did not get a wakeup from pthread_cond_signal(), otherwise, it is 185 * not canceled. 186 * Thread cancellation never cause wakeup from pthread_cond_signal() 187 * to be lost. 188 */ 189 static int 190 cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp, 191 const struct timespec *abstime, int cancel) 192 { 193 struct pthread *curthread; 194 int error, error2, recurse, robust; 195 196 curthread = _get_curthread(); 197 robust = _mutex_enter_robust(curthread, mp); 198 199 error = _mutex_cv_detach(mp, &recurse); 200 if (error != 0) { 201 if (robust) 202 _mutex_leave_robust(curthread, mp); 203 return (error); 204 } 205 206 if (cancel) 207 _thr_cancel_enter2(curthread, 0); 208 error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, 209 (struct umutex *)&mp->m_lock, abstime, CVWAIT_ABSTIME | 210 CVWAIT_CLOCKID); 211 if (cancel) 212 _thr_cancel_leave(curthread, 0); 213 214 /* 215 * Note that PP mutex and ROBUST mutex may return 216 * interesting error codes. 217 */ 218 if (error == 0) { 219 error2 = _mutex_cv_lock(mp, recurse, true); 220 } else if (error == EINTR || error == ETIMEDOUT) { 221 error2 = _mutex_cv_lock(mp, recurse, true); 222 /* 223 * Do not do cancellation on EOWNERDEAD there. The 224 * cancellation cleanup handler will use the protected 225 * state and unlock the mutex without making the state 226 * consistent and the state will be unrecoverable. 227 */ 228 if (error2 == 0 && cancel) 229 _thr_testcancel(curthread); 230 231 if (error == EINTR) 232 error = 0; 233 } else { 234 /* We know that it didn't unlock the mutex. */ 235 _mutex_cv_attach(mp, recurse); 236 if (cancel) 237 _thr_testcancel(curthread); 238 error2 = 0; 239 } 240 if (robust) 241 _mutex_leave_robust(curthread, mp); 242 return (error2 != 0 ? error2 : error); 243 } 244 245 /* 246 * Thread waits in userland queue whenever possible, when thread 247 * is signaled or broadcasted, it is removed from the queue, and 248 * is saved in curthread's defer_waiters[] buffer, but won't be 249 * woken up until mutex is unlocked. 250 */ 251 252 static int 253 cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp, 254 const struct timespec *abstime, int cancel) 255 { 256 struct pthread *curthread; 257 struct sleepqueue *sq; 258 int deferred, error, error2, recurse; 259 260 curthread = _get_curthread(); 261 if (curthread->wchan != NULL) 262 PANIC("thread was already on queue."); 263 264 if (cancel) 265 _thr_testcancel(curthread); 266 267 _sleepq_lock(cvp); 268 /* 269 * set __has_user_waiters before unlocking mutex, this allows 270 * us to check it without locking in pthread_cond_signal(). 271 */ 272 cvp->__has_user_waiters = 1; 273 deferred = 0; 274 (void)_mutex_cv_unlock(mp, &recurse, &deferred); 275 curthread->mutex_obj = mp; 276 _sleepq_add(cvp, curthread); 277 for(;;) { 278 _thr_clear_wake(curthread); 279 _sleepq_unlock(cvp); 280 if (deferred) { 281 deferred = 0; 282 if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0) 283 (void)_umtx_op_err(&mp->m_lock, 284 UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags, 285 0, 0); 286 } 287 if (curthread->nwaiter_defer > 0) { 288 _thr_wake_all(curthread->defer_waiters, 289 curthread->nwaiter_defer); 290 curthread->nwaiter_defer = 0; 291 } 292 293 if (cancel) 294 _thr_cancel_enter2(curthread, 0); 295 error = _thr_sleep(curthread, cvp->__clock_id, abstime); 296 if (cancel) 297 _thr_cancel_leave(curthread, 0); 298 299 _sleepq_lock(cvp); 300 if (curthread->wchan == NULL) { 301 error = 0; 302 break; 303 } else if (cancel && SHOULD_CANCEL(curthread)) { 304 sq = _sleepq_lookup(cvp); 305 cvp->__has_user_waiters = _sleepq_remove(sq, curthread); 306 _sleepq_unlock(cvp); 307 curthread->mutex_obj = NULL; 308 error2 = _mutex_cv_lock(mp, recurse, false); 309 if (!THR_IN_CRITICAL(curthread)) 310 _pthread_exit(PTHREAD_CANCELED); 311 else /* this should not happen */ 312 return (error2); 313 } else if (error == ETIMEDOUT) { 314 sq = _sleepq_lookup(cvp); 315 cvp->__has_user_waiters = 316 _sleepq_remove(sq, curthread); 317 break; 318 } 319 } 320 _sleepq_unlock(cvp); 321 curthread->mutex_obj = NULL; 322 error2 = _mutex_cv_lock(mp, recurse, false); 323 if (error == 0) 324 error = error2; 325 return (error); 326 } 327 328 static int 329 cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 330 const struct timespec *abstime, int cancel) 331 { 332 struct pthread *curthread = _get_curthread(); 333 struct pthread_cond *cvp; 334 struct pthread_mutex *mp; 335 int error; 336 337 CHECK_AND_INIT_COND 338 339 if (*mutex == THR_PSHARED_PTR) { 340 mp = __thr_pshared_offpage(mutex, 0); 341 if (mp == NULL) 342 return (EINVAL); 343 } else { 344 mp = *mutex; 345 } 346 347 if ((error = _mutex_owned(curthread, mp)) != 0) 348 return (error); 349 350 if (curthread->attr.sched_policy != SCHED_OTHER || 351 (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT | 352 USYNC_PROCESS_SHARED)) != 0 || 353 (cvp->__flags & USYNC_PROCESS_SHARED) != 0) 354 return (cond_wait_kernel(cvp, mp, abstime, cancel)); 355 else 356 return (cond_wait_user(cvp, mp, abstime, cancel)); 357 } 358 359 int 360 _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 361 { 362 363 return (cond_wait_common(cond, mutex, NULL, 0)); 364 } 365 366 int 367 __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 368 { 369 370 return (cond_wait_common(cond, mutex, NULL, 1)); 371 } 372 373 int 374 _pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 375 const struct timespec * abstime) 376 { 377 378 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 379 abstime->tv_nsec >= 1000000000) 380 return (EINVAL); 381 382 return (cond_wait_common(cond, mutex, abstime, 0)); 383 } 384 385 int 386 __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 387 const struct timespec *abstime) 388 { 389 390 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 391 abstime->tv_nsec >= 1000000000) 392 return (EINVAL); 393 394 return (cond_wait_common(cond, mutex, abstime, 1)); 395 } 396 397 static int 398 cond_signal_common(pthread_cond_t *cond) 399 { 400 struct pthread *curthread = _get_curthread(); 401 struct pthread *td; 402 struct pthread_cond *cvp; 403 struct pthread_mutex *mp; 404 struct sleepqueue *sq; 405 int *waddr; 406 int pshared; 407 408 /* 409 * If the condition variable is statically initialized, perform dynamic 410 * initialization. 411 */ 412 CHECK_AND_INIT_COND 413 414 pshared = CV_PSHARED(cvp); 415 416 _thr_ucond_signal((struct ucond *)&cvp->__has_kern_waiters); 417 418 if (pshared || cvp->__has_user_waiters == 0) 419 return (0); 420 421 curthread = _get_curthread(); 422 waddr = NULL; 423 _sleepq_lock(cvp); 424 sq = _sleepq_lookup(cvp); 425 if (sq == NULL) { 426 _sleepq_unlock(cvp); 427 return (0); 428 } 429 430 td = _sleepq_first(sq); 431 mp = td->mutex_obj; 432 cvp->__has_user_waiters = _sleepq_remove(sq, td); 433 if (PMUTEX_OWNER_ID(mp) == TID(curthread)) { 434 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 435 _thr_wake_all(curthread->defer_waiters, 436 curthread->nwaiter_defer); 437 curthread->nwaiter_defer = 0; 438 } 439 curthread->defer_waiters[curthread->nwaiter_defer++] = 440 &td->wake_addr->value; 441 mp->m_flags |= PMUTEX_FLAG_DEFERRED; 442 } else { 443 waddr = &td->wake_addr->value; 444 } 445 _sleepq_unlock(cvp); 446 if (waddr != NULL) 447 _thr_set_wake(waddr); 448 return (0); 449 } 450 451 struct broadcast_arg { 452 struct pthread *curthread; 453 unsigned int *waddrs[MAX_DEFER_WAITERS]; 454 int count; 455 }; 456 457 static void 458 drop_cb(struct pthread *td, void *arg) 459 { 460 struct broadcast_arg *ba = arg; 461 struct pthread_mutex *mp; 462 struct pthread *curthread = ba->curthread; 463 464 mp = td->mutex_obj; 465 if (PMUTEX_OWNER_ID(mp) == TID(curthread)) { 466 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 467 _thr_wake_all(curthread->defer_waiters, 468 curthread->nwaiter_defer); 469 curthread->nwaiter_defer = 0; 470 } 471 curthread->defer_waiters[curthread->nwaiter_defer++] = 472 &td->wake_addr->value; 473 mp->m_flags |= PMUTEX_FLAG_DEFERRED; 474 } else { 475 if (ba->count >= MAX_DEFER_WAITERS) { 476 _thr_wake_all(ba->waddrs, ba->count); 477 ba->count = 0; 478 } 479 ba->waddrs[ba->count++] = &td->wake_addr->value; 480 } 481 } 482 483 static int 484 cond_broadcast_common(pthread_cond_t *cond) 485 { 486 int pshared; 487 struct pthread_cond *cvp; 488 struct sleepqueue *sq; 489 struct broadcast_arg ba; 490 491 /* 492 * If the condition variable is statically initialized, perform dynamic 493 * initialization. 494 */ 495 CHECK_AND_INIT_COND 496 497 pshared = CV_PSHARED(cvp); 498 499 _thr_ucond_broadcast((struct ucond *)&cvp->__has_kern_waiters); 500 501 if (pshared || cvp->__has_user_waiters == 0) 502 return (0); 503 504 ba.curthread = _get_curthread(); 505 ba.count = 0; 506 507 _sleepq_lock(cvp); 508 sq = _sleepq_lookup(cvp); 509 if (sq == NULL) { 510 _sleepq_unlock(cvp); 511 return (0); 512 } 513 _sleepq_drop(sq, drop_cb, &ba); 514 cvp->__has_user_waiters = 0; 515 _sleepq_unlock(cvp); 516 if (ba.count > 0) 517 _thr_wake_all(ba.waddrs, ba.count); 518 return (0); 519 } 520 521 int 522 _pthread_cond_signal(pthread_cond_t * cond) 523 { 524 525 return (cond_signal_common(cond)); 526 } 527 528 int 529 _pthread_cond_broadcast(pthread_cond_t * cond) 530 { 531 532 return (cond_broadcast_common(cond)); 533 } 534