1 /* 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * Copyright (c) 2015 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Konstantin Belousov 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "namespace.h" 35 #include <stdlib.h> 36 #include <errno.h> 37 #include <string.h> 38 #include <pthread.h> 39 #include <limits.h> 40 #include "un-namespace.h" 41 42 #include "thr_private.h" 43 44 _Static_assert(sizeof(struct pthread_cond) <= PAGE_SIZE, 45 "pthread_cond too large"); 46 47 /* 48 * Prototypes 49 */ 50 int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex); 51 int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 52 const struct timespec * abstime); 53 static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr); 54 static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 55 const struct timespec *abstime, int cancel); 56 static int cond_signal_common(pthread_cond_t *cond); 57 static int cond_broadcast_common(pthread_cond_t *cond); 58 59 /* 60 * Double underscore versions are cancellation points. Single underscore 61 * versions are not and are provided for libc internal usage (which 62 * shouldn't introduce cancellation points). 63 */ 64 __weak_reference(__pthread_cond_wait, pthread_cond_wait); 65 __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait); 66 67 __weak_reference(_pthread_cond_init, pthread_cond_init); 68 __weak_reference(_pthread_cond_destroy, pthread_cond_destroy); 69 __weak_reference(_pthread_cond_signal, pthread_cond_signal); 70 __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast); 71 72 #define CV_PSHARED(cvp) (((cvp)->kcond.c_flags & USYNC_PROCESS_SHARED) != 0) 73 74 static void 75 cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr) 76 { 77 78 if (cattr == NULL) { 79 cvp->kcond.c_clockid = CLOCK_REALTIME; 80 } else { 81 if (cattr->c_pshared) 82 cvp->kcond.c_flags |= USYNC_PROCESS_SHARED; 83 cvp->kcond.c_clockid = cattr->c_clockid; 84 } 85 } 86 87 static int 88 cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 89 { 90 struct pthread_cond *cvp; 91 const struct pthread_cond_attr *cattr; 92 int pshared; 93 94 cattr = cond_attr != NULL ? *cond_attr : NULL; 95 if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) { 96 pshared = 0; 97 cvp = calloc(1, sizeof(struct pthread_cond)); 98 if (cvp == NULL) 99 return (ENOMEM); 100 } else { 101 pshared = 1; 102 cvp = __thr_pshared_offpage(cond, 1); 103 if (cvp == NULL) 104 return (EFAULT); 105 } 106 107 /* 108 * Initialise the condition variable structure: 109 */ 110 cond_init_body(cvp, cattr); 111 *cond = pshared ? THR_PSHARED_PTR : cvp; 112 return (0); 113 } 114 115 static int 116 init_static(struct pthread *thread, pthread_cond_t *cond) 117 { 118 int ret; 119 120 THR_LOCK_ACQUIRE(thread, &_cond_static_lock); 121 122 if (*cond == NULL) 123 ret = cond_init(cond, NULL); 124 else 125 ret = 0; 126 127 THR_LOCK_RELEASE(thread, &_cond_static_lock); 128 129 return (ret); 130 } 131 132 #define CHECK_AND_INIT_COND \ 133 if (*cond == THR_PSHARED_PTR) { \ 134 cvp = __thr_pshared_offpage(cond, 0); \ 135 if (cvp == NULL) \ 136 return (EINVAL); \ 137 } else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \ 138 if (cvp == THR_COND_INITIALIZER) { \ 139 int ret; \ 140 ret = init_static(_get_curthread(), cond); \ 141 if (ret) \ 142 return (ret); \ 143 } else if (cvp == THR_COND_DESTROYED) { \ 144 return (EINVAL); \ 145 } \ 146 cvp = *cond; \ 147 } 148 149 int 150 _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 151 { 152 153 *cond = NULL; 154 return (cond_init(cond, cond_attr)); 155 } 156 157 int 158 _pthread_cond_destroy(pthread_cond_t *cond) 159 { 160 struct pthread_cond *cvp; 161 int error; 162 163 error = 0; 164 if (*cond == THR_PSHARED_PTR) { 165 cvp = __thr_pshared_offpage(cond, 0); 166 if (cvp != NULL) 167 __thr_pshared_destroy(cond); 168 *cond = THR_COND_DESTROYED; 169 } else if ((cvp = *cond) == THR_COND_INITIALIZER) { 170 /* nothing */ 171 } else if (cvp == THR_COND_DESTROYED) { 172 error = EINVAL; 173 } else { 174 cvp = *cond; 175 *cond = THR_COND_DESTROYED; 176 free(cvp); 177 } 178 return (error); 179 } 180 181 /* 182 * Cancellation behavior: 183 * Thread may be canceled at start, if thread is canceled, it means it 184 * did not get a wakeup from pthread_cond_signal(), otherwise, it is 185 * not canceled. 186 * Thread cancellation never cause wakeup from pthread_cond_signal() 187 * to be lost. 188 */ 189 static int 190 cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp, 191 const struct timespec *abstime, int cancel) 192 { 193 struct pthread *curthread; 194 int error, error2, recurse, robust; 195 196 curthread = _get_curthread(); 197 robust = _mutex_enter_robust(curthread, mp); 198 199 error = _mutex_cv_detach(mp, &recurse); 200 if (error != 0) { 201 if (robust) 202 _mutex_leave_robust(curthread, mp); 203 return (error); 204 } 205 206 if (cancel) 207 _thr_cancel_enter2(curthread, 0); 208 error = _thr_ucond_wait(&cvp->kcond, &mp->m_lock, abstime, 209 CVWAIT_ABSTIME | CVWAIT_CLOCKID); 210 if (cancel) 211 _thr_cancel_leave(curthread, 0); 212 213 /* 214 * Note that PP mutex and ROBUST mutex may return 215 * interesting error codes. 216 */ 217 if (error == 0) { 218 error2 = _mutex_cv_lock(mp, recurse, true); 219 } else if (error == EINTR || error == ETIMEDOUT) { 220 error2 = _mutex_cv_lock(mp, recurse, true); 221 /* 222 * Do not do cancellation on EOWNERDEAD there. The 223 * cancellation cleanup handler will use the protected 224 * state and unlock the mutex without making the state 225 * consistent and the state will be unrecoverable. 226 */ 227 if (error2 == 0 && cancel) 228 _thr_testcancel(curthread); 229 230 if (error == EINTR) 231 error = 0; 232 } else { 233 /* We know that it didn't unlock the mutex. */ 234 _mutex_cv_attach(mp, recurse); 235 if (cancel) 236 _thr_testcancel(curthread); 237 error2 = 0; 238 } 239 if (robust) 240 _mutex_leave_robust(curthread, mp); 241 return (error2 != 0 ? error2 : error); 242 } 243 244 /* 245 * Thread waits in userland queue whenever possible, when thread 246 * is signaled or broadcasted, it is removed from the queue, and 247 * is saved in curthread's defer_waiters[] buffer, but won't be 248 * woken up until mutex is unlocked. 249 */ 250 251 static int 252 cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp, 253 const struct timespec *abstime, int cancel) 254 { 255 struct pthread *curthread; 256 struct sleepqueue *sq; 257 int deferred, error, error2, recurse; 258 259 curthread = _get_curthread(); 260 if (curthread->wchan != NULL) 261 PANIC("thread %p was already on queue.", curthread); 262 263 if (cancel) 264 _thr_testcancel(curthread); 265 266 _sleepq_lock(cvp); 267 /* 268 * set __has_user_waiters before unlocking mutex, this allows 269 * us to check it without locking in pthread_cond_signal(). 270 */ 271 cvp->__has_user_waiters = 1; 272 deferred = 0; 273 (void)_mutex_cv_unlock(mp, &recurse, &deferred); 274 curthread->mutex_obj = mp; 275 _sleepq_add(cvp, curthread); 276 for(;;) { 277 _thr_clear_wake(curthread); 278 _sleepq_unlock(cvp); 279 if (deferred) { 280 deferred = 0; 281 if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0) 282 (void)_umtx_op_err(&mp->m_lock, 283 UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags, 284 0, 0); 285 } 286 if (curthread->nwaiter_defer > 0) { 287 _thr_wake_all(curthread->defer_waiters, 288 curthread->nwaiter_defer); 289 curthread->nwaiter_defer = 0; 290 } 291 292 if (cancel) 293 _thr_cancel_enter2(curthread, 0); 294 error = _thr_sleep(curthread, cvp->kcond.c_clockid, abstime); 295 if (cancel) 296 _thr_cancel_leave(curthread, 0); 297 298 _sleepq_lock(cvp); 299 if (curthread->wchan == NULL) { 300 error = 0; 301 break; 302 } else if (cancel && SHOULD_CANCEL(curthread)) { 303 sq = _sleepq_lookup(cvp); 304 cvp->__has_user_waiters = _sleepq_remove(sq, curthread); 305 _sleepq_unlock(cvp); 306 curthread->mutex_obj = NULL; 307 error2 = _mutex_cv_lock(mp, recurse, false); 308 if (!THR_IN_CRITICAL(curthread)) 309 _pthread_exit(PTHREAD_CANCELED); 310 else /* this should not happen */ 311 return (error2); 312 } else if (error == ETIMEDOUT) { 313 sq = _sleepq_lookup(cvp); 314 cvp->__has_user_waiters = 315 _sleepq_remove(sq, curthread); 316 break; 317 } 318 } 319 _sleepq_unlock(cvp); 320 curthread->mutex_obj = NULL; 321 error2 = _mutex_cv_lock(mp, recurse, false); 322 if (error == 0) 323 error = error2; 324 return (error); 325 } 326 327 static int 328 cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 329 const struct timespec *abstime, int cancel) 330 { 331 struct pthread *curthread = _get_curthread(); 332 struct pthread_cond *cvp; 333 struct pthread_mutex *mp; 334 int error; 335 336 CHECK_AND_INIT_COND 337 338 if (*mutex == THR_PSHARED_PTR) { 339 mp = __thr_pshared_offpage(mutex, 0); 340 if (mp == NULL) 341 return (EINVAL); 342 } else { 343 mp = *mutex; 344 } 345 346 if ((error = _mutex_owned(curthread, mp)) != 0) 347 return (error); 348 349 if (curthread->attr.sched_policy != SCHED_OTHER || 350 (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT | 351 USYNC_PROCESS_SHARED)) != 0 || CV_PSHARED(cvp)) 352 return (cond_wait_kernel(cvp, mp, abstime, cancel)); 353 else 354 return (cond_wait_user(cvp, mp, abstime, cancel)); 355 } 356 357 int 358 _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 359 { 360 361 return (cond_wait_common(cond, mutex, NULL, 0)); 362 } 363 364 int 365 __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 366 { 367 368 return (cond_wait_common(cond, mutex, NULL, 1)); 369 } 370 371 int 372 _pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 373 const struct timespec * abstime) 374 { 375 376 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 377 abstime->tv_nsec >= 1000000000) 378 return (EINVAL); 379 380 return (cond_wait_common(cond, mutex, abstime, 0)); 381 } 382 383 int 384 __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 385 const struct timespec *abstime) 386 { 387 388 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 389 abstime->tv_nsec >= 1000000000) 390 return (EINVAL); 391 392 return (cond_wait_common(cond, mutex, abstime, 1)); 393 } 394 395 static int 396 cond_signal_common(pthread_cond_t *cond) 397 { 398 struct pthread *curthread = _get_curthread(); 399 struct pthread *td; 400 struct pthread_cond *cvp; 401 struct pthread_mutex *mp; 402 struct sleepqueue *sq; 403 int *waddr; 404 int pshared; 405 406 /* 407 * If the condition variable is statically initialized, perform dynamic 408 * initialization. 409 */ 410 CHECK_AND_INIT_COND 411 412 pshared = CV_PSHARED(cvp); 413 414 _thr_ucond_signal(&cvp->kcond); 415 416 if (pshared || cvp->__has_user_waiters == 0) 417 return (0); 418 419 curthread = _get_curthread(); 420 waddr = NULL; 421 _sleepq_lock(cvp); 422 sq = _sleepq_lookup(cvp); 423 if (sq == NULL) { 424 _sleepq_unlock(cvp); 425 return (0); 426 } 427 428 td = _sleepq_first(sq); 429 mp = td->mutex_obj; 430 cvp->__has_user_waiters = _sleepq_remove(sq, td); 431 if (PMUTEX_OWNER_ID(mp) == TID(curthread)) { 432 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 433 _thr_wake_all(curthread->defer_waiters, 434 curthread->nwaiter_defer); 435 curthread->nwaiter_defer = 0; 436 } 437 curthread->defer_waiters[curthread->nwaiter_defer++] = 438 &td->wake_addr->value; 439 mp->m_flags |= PMUTEX_FLAG_DEFERRED; 440 } else { 441 waddr = &td->wake_addr->value; 442 } 443 _sleepq_unlock(cvp); 444 if (waddr != NULL) 445 _thr_set_wake(waddr); 446 return (0); 447 } 448 449 struct broadcast_arg { 450 struct pthread *curthread; 451 unsigned int *waddrs[MAX_DEFER_WAITERS]; 452 int count; 453 }; 454 455 static void 456 drop_cb(struct pthread *td, void *arg) 457 { 458 struct broadcast_arg *ba = arg; 459 struct pthread_mutex *mp; 460 struct pthread *curthread = ba->curthread; 461 462 mp = td->mutex_obj; 463 if (PMUTEX_OWNER_ID(mp) == TID(curthread)) { 464 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 465 _thr_wake_all(curthread->defer_waiters, 466 curthread->nwaiter_defer); 467 curthread->nwaiter_defer = 0; 468 } 469 curthread->defer_waiters[curthread->nwaiter_defer++] = 470 &td->wake_addr->value; 471 mp->m_flags |= PMUTEX_FLAG_DEFERRED; 472 } else { 473 if (ba->count >= MAX_DEFER_WAITERS) { 474 _thr_wake_all(ba->waddrs, ba->count); 475 ba->count = 0; 476 } 477 ba->waddrs[ba->count++] = &td->wake_addr->value; 478 } 479 } 480 481 static int 482 cond_broadcast_common(pthread_cond_t *cond) 483 { 484 int pshared; 485 struct pthread_cond *cvp; 486 struct sleepqueue *sq; 487 struct broadcast_arg ba; 488 489 /* 490 * If the condition variable is statically initialized, perform dynamic 491 * initialization. 492 */ 493 CHECK_AND_INIT_COND 494 495 pshared = CV_PSHARED(cvp); 496 497 _thr_ucond_broadcast(&cvp->kcond); 498 499 if (pshared || cvp->__has_user_waiters == 0) 500 return (0); 501 502 ba.curthread = _get_curthread(); 503 ba.count = 0; 504 505 _sleepq_lock(cvp); 506 sq = _sleepq_lookup(cvp); 507 if (sq == NULL) { 508 _sleepq_unlock(cvp); 509 return (0); 510 } 511 _sleepq_drop(sq, drop_cb, &ba); 512 cvp->__has_user_waiters = 0; 513 _sleepq_unlock(cvp); 514 if (ba.count > 0) 515 _thr_wake_all(ba.waddrs, ba.count); 516 return (0); 517 } 518 519 int 520 _pthread_cond_signal(pthread_cond_t * cond) 521 { 522 523 return (cond_signal_common(cond)); 524 } 525 526 int 527 _pthread_cond_broadcast(pthread_cond_t * cond) 528 { 529 530 return (cond_broadcast_common(cond)); 531 } 532