1 /* 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * Copyright (c) 2015 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Konstantin Belousov 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "namespace.h" 35 #include <stdlib.h> 36 #include <errno.h> 37 #include <string.h> 38 #include <pthread.h> 39 #include <limits.h> 40 #include "un-namespace.h" 41 42 #include "thr_private.h" 43 44 _Static_assert(sizeof(struct pthread_cond) <= PAGE_SIZE, 45 "pthread_cond too large"); 46 47 /* 48 * Prototypes 49 */ 50 int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex); 51 int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 52 const struct timespec * abstime); 53 static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr); 54 static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 55 const struct timespec *abstime, int cancel); 56 static int cond_signal_common(pthread_cond_t *cond); 57 static int cond_broadcast_common(pthread_cond_t *cond); 58 59 /* 60 * Double underscore versions are cancellation points. Single underscore 61 * versions are not and are provided for libc internal usage (which 62 * shouldn't introduce cancellation points). 63 */ 64 __weak_reference(__pthread_cond_wait, pthread_cond_wait); 65 __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait); 66 67 __weak_reference(_pthread_cond_init, pthread_cond_init); 68 __weak_reference(_pthread_cond_destroy, pthread_cond_destroy); 69 __weak_reference(_pthread_cond_signal, pthread_cond_signal); 70 __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast); 71 72 #define CV_PSHARED(cvp) (((cvp)->__flags & USYNC_PROCESS_SHARED) != 0) 73 74 static void 75 cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr) 76 { 77 78 if (cattr == NULL) { 79 cvp->__clock_id = CLOCK_REALTIME; 80 } else { 81 if (cattr->c_pshared) 82 cvp->__flags |= USYNC_PROCESS_SHARED; 83 cvp->__clock_id = cattr->c_clockid; 84 } 85 } 86 87 static int 88 cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 89 { 90 struct pthread_cond *cvp; 91 const struct pthread_cond_attr *cattr; 92 int pshared; 93 94 cattr = cond_attr != NULL ? *cond_attr : NULL; 95 if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) { 96 pshared = 0; 97 cvp = calloc(1, sizeof(struct pthread_cond)); 98 if (cvp == NULL) 99 return (ENOMEM); 100 } else { 101 pshared = 1; 102 cvp = __thr_pshared_offpage(cond, 1); 103 if (cvp == NULL) 104 return (EFAULT); 105 } 106 107 /* 108 * Initialise the condition variable structure: 109 */ 110 cond_init_body(cvp, cattr); 111 *cond = pshared ? THR_PSHARED_PTR : cvp; 112 return (0); 113 } 114 115 static int 116 init_static(struct pthread *thread, pthread_cond_t *cond) 117 { 118 int ret; 119 120 THR_LOCK_ACQUIRE(thread, &_cond_static_lock); 121 122 if (*cond == NULL) 123 ret = cond_init(cond, NULL); 124 else 125 ret = 0; 126 127 THR_LOCK_RELEASE(thread, &_cond_static_lock); 128 129 return (ret); 130 } 131 132 #define CHECK_AND_INIT_COND \ 133 if (*cond == THR_PSHARED_PTR) { \ 134 cvp = __thr_pshared_offpage(cond, 0); \ 135 if (cvp == NULL) \ 136 return (EINVAL); \ 137 } else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \ 138 if (cvp == THR_COND_INITIALIZER) { \ 139 int ret; \ 140 ret = init_static(_get_curthread(), cond); \ 141 if (ret) \ 142 return (ret); \ 143 } else if (cvp == THR_COND_DESTROYED) { \ 144 return (EINVAL); \ 145 } \ 146 cvp = *cond; \ 147 } 148 149 int 150 _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 151 { 152 153 *cond = NULL; 154 return (cond_init(cond, cond_attr)); 155 } 156 157 int 158 _pthread_cond_destroy(pthread_cond_t *cond) 159 { 160 struct pthread_cond *cvp; 161 int error; 162 163 error = 0; 164 if (*cond == THR_PSHARED_PTR) { 165 cvp = __thr_pshared_offpage(cond, 0); 166 if (cvp != NULL) 167 __thr_pshared_destroy(cond); 168 *cond = THR_COND_DESTROYED; 169 } else if ((cvp = *cond) == THR_COND_INITIALIZER) { 170 /* nothing */ 171 } else if (cvp == THR_COND_DESTROYED) { 172 error = EINVAL; 173 } else { 174 cvp = *cond; 175 *cond = THR_COND_DESTROYED; 176 free(cvp); 177 } 178 return (error); 179 } 180 181 /* 182 * Cancellation behavior: 183 * Thread may be canceled at start, if thread is canceled, it means it 184 * did not get a wakeup from pthread_cond_signal(), otherwise, it is 185 * not canceled. 186 * Thread cancellation never cause wakeup from pthread_cond_signal() 187 * to be lost. 188 */ 189 static int 190 cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp, 191 const struct timespec *abstime, int cancel) 192 { 193 struct pthread *curthread = _get_curthread(); 194 int recurse; 195 int error, error2 = 0; 196 197 error = _mutex_cv_detach(mp, &recurse); 198 if (error != 0) 199 return (error); 200 201 if (cancel) { 202 _thr_cancel_enter2(curthread, 0); 203 error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, 204 (struct umutex *)&mp->m_lock, abstime, 205 CVWAIT_ABSTIME|CVWAIT_CLOCKID); 206 _thr_cancel_leave(curthread, 0); 207 } else { 208 error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, 209 (struct umutex *)&mp->m_lock, abstime, 210 CVWAIT_ABSTIME|CVWAIT_CLOCKID); 211 } 212 213 /* 214 * Note that PP mutex and ROBUST mutex may return 215 * interesting error codes. 216 */ 217 if (error == 0) { 218 error2 = _mutex_cv_lock(mp, recurse); 219 } else if (error == EINTR || error == ETIMEDOUT) { 220 error2 = _mutex_cv_lock(mp, recurse); 221 if (error2 == 0 && cancel) 222 _thr_testcancel(curthread); 223 if (error == EINTR) 224 error = 0; 225 } else { 226 /* We know that it didn't unlock the mutex. */ 227 error2 = _mutex_cv_attach(mp, recurse); 228 if (error2 == 0 && cancel) 229 _thr_testcancel(curthread); 230 } 231 return (error2 != 0 ? error2 : error); 232 } 233 234 /* 235 * Thread waits in userland queue whenever possible, when thread 236 * is signaled or broadcasted, it is removed from the queue, and 237 * is saved in curthread's defer_waiters[] buffer, but won't be 238 * woken up until mutex is unlocked. 239 */ 240 241 static int 242 cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp, 243 const struct timespec *abstime, int cancel) 244 { 245 struct pthread *curthread = _get_curthread(); 246 struct sleepqueue *sq; 247 int recurse; 248 int error; 249 int defered; 250 251 if (curthread->wchan != NULL) 252 PANIC("thread was already on queue."); 253 254 if (cancel) 255 _thr_testcancel(curthread); 256 257 _sleepq_lock(cvp); 258 /* 259 * set __has_user_waiters before unlocking mutex, this allows 260 * us to check it without locking in pthread_cond_signal(). 261 */ 262 cvp->__has_user_waiters = 1; 263 defered = 0; 264 (void)_mutex_cv_unlock(mp, &recurse, &defered); 265 curthread->mutex_obj = mp; 266 _sleepq_add(cvp, curthread); 267 for(;;) { 268 _thr_clear_wake(curthread); 269 _sleepq_unlock(cvp); 270 if (defered) { 271 defered = 0; 272 if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0) 273 (void)_umtx_op_err(&mp->m_lock, UMTX_OP_MUTEX_WAKE2, 274 mp->m_lock.m_flags, 0, 0); 275 } 276 if (curthread->nwaiter_defer > 0) { 277 _thr_wake_all(curthread->defer_waiters, 278 curthread->nwaiter_defer); 279 curthread->nwaiter_defer = 0; 280 } 281 282 if (cancel) { 283 _thr_cancel_enter2(curthread, 0); 284 error = _thr_sleep(curthread, cvp->__clock_id, abstime); 285 _thr_cancel_leave(curthread, 0); 286 } else { 287 error = _thr_sleep(curthread, cvp->__clock_id, abstime); 288 } 289 290 _sleepq_lock(cvp); 291 if (curthread->wchan == NULL) { 292 error = 0; 293 break; 294 } else if (cancel && SHOULD_CANCEL(curthread)) { 295 sq = _sleepq_lookup(cvp); 296 cvp->__has_user_waiters = 297 _sleepq_remove(sq, curthread); 298 _sleepq_unlock(cvp); 299 curthread->mutex_obj = NULL; 300 _mutex_cv_lock(mp, recurse); 301 if (!THR_IN_CRITICAL(curthread)) 302 _pthread_exit(PTHREAD_CANCELED); 303 else /* this should not happen */ 304 return (0); 305 } else if (error == ETIMEDOUT) { 306 sq = _sleepq_lookup(cvp); 307 cvp->__has_user_waiters = 308 _sleepq_remove(sq, curthread); 309 break; 310 } 311 } 312 _sleepq_unlock(cvp); 313 curthread->mutex_obj = NULL; 314 _mutex_cv_lock(mp, recurse); 315 return (error); 316 } 317 318 static int 319 cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 320 const struct timespec *abstime, int cancel) 321 { 322 struct pthread *curthread = _get_curthread(); 323 struct pthread_cond *cvp; 324 struct pthread_mutex *mp; 325 int error; 326 327 CHECK_AND_INIT_COND 328 329 if (*mutex == THR_PSHARED_PTR) { 330 mp = __thr_pshared_offpage(mutex, 0); 331 if (mp == NULL) 332 return (EINVAL); 333 } else { 334 mp = *mutex; 335 } 336 337 if ((error = _mutex_owned(curthread, mp)) != 0) 338 return (error); 339 340 if (curthread->attr.sched_policy != SCHED_OTHER || 341 (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT|UMUTEX_PRIO_INHERIT| 342 USYNC_PROCESS_SHARED)) != 0 || 343 (cvp->__flags & USYNC_PROCESS_SHARED) != 0) 344 return cond_wait_kernel(cvp, mp, abstime, cancel); 345 else 346 return cond_wait_user(cvp, mp, abstime, cancel); 347 } 348 349 int 350 _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 351 { 352 353 return (cond_wait_common(cond, mutex, NULL, 0)); 354 } 355 356 int 357 __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 358 { 359 360 return (cond_wait_common(cond, mutex, NULL, 1)); 361 } 362 363 int 364 _pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 365 const struct timespec * abstime) 366 { 367 368 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 369 abstime->tv_nsec >= 1000000000) 370 return (EINVAL); 371 372 return (cond_wait_common(cond, mutex, abstime, 0)); 373 } 374 375 int 376 __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 377 const struct timespec *abstime) 378 { 379 380 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 381 abstime->tv_nsec >= 1000000000) 382 return (EINVAL); 383 384 return (cond_wait_common(cond, mutex, abstime, 1)); 385 } 386 387 static int 388 cond_signal_common(pthread_cond_t *cond) 389 { 390 struct pthread *curthread = _get_curthread(); 391 struct pthread *td; 392 struct pthread_cond *cvp; 393 struct pthread_mutex *mp; 394 struct sleepqueue *sq; 395 int *waddr; 396 int pshared; 397 398 /* 399 * If the condition variable is statically initialized, perform dynamic 400 * initialization. 401 */ 402 CHECK_AND_INIT_COND 403 404 pshared = CV_PSHARED(cvp); 405 406 _thr_ucond_signal((struct ucond *)&cvp->__has_kern_waiters); 407 408 if (pshared || cvp->__has_user_waiters == 0) 409 return (0); 410 411 curthread = _get_curthread(); 412 waddr = NULL; 413 _sleepq_lock(cvp); 414 sq = _sleepq_lookup(cvp); 415 if (sq == NULL) { 416 _sleepq_unlock(cvp); 417 return (0); 418 } 419 420 td = _sleepq_first(sq); 421 mp = td->mutex_obj; 422 cvp->__has_user_waiters = _sleepq_remove(sq, td); 423 if (mp->m_owner == TID(curthread)) { 424 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 425 _thr_wake_all(curthread->defer_waiters, 426 curthread->nwaiter_defer); 427 curthread->nwaiter_defer = 0; 428 } 429 curthread->defer_waiters[curthread->nwaiter_defer++] = 430 &td->wake_addr->value; 431 mp->m_flags |= PMUTEX_FLAG_DEFERED; 432 } else { 433 waddr = &td->wake_addr->value; 434 } 435 _sleepq_unlock(cvp); 436 if (waddr != NULL) 437 _thr_set_wake(waddr); 438 return (0); 439 } 440 441 struct broadcast_arg { 442 struct pthread *curthread; 443 unsigned int *waddrs[MAX_DEFER_WAITERS]; 444 int count; 445 }; 446 447 static void 448 drop_cb(struct pthread *td, void *arg) 449 { 450 struct broadcast_arg *ba = arg; 451 struct pthread_mutex *mp; 452 struct pthread *curthread = ba->curthread; 453 454 mp = td->mutex_obj; 455 if (mp->m_owner == TID(curthread)) { 456 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 457 _thr_wake_all(curthread->defer_waiters, 458 curthread->nwaiter_defer); 459 curthread->nwaiter_defer = 0; 460 } 461 curthread->defer_waiters[curthread->nwaiter_defer++] = 462 &td->wake_addr->value; 463 mp->m_flags |= PMUTEX_FLAG_DEFERED; 464 } else { 465 if (ba->count >= MAX_DEFER_WAITERS) { 466 _thr_wake_all(ba->waddrs, ba->count); 467 ba->count = 0; 468 } 469 ba->waddrs[ba->count++] = &td->wake_addr->value; 470 } 471 } 472 473 static int 474 cond_broadcast_common(pthread_cond_t *cond) 475 { 476 int pshared; 477 struct pthread_cond *cvp; 478 struct sleepqueue *sq; 479 struct broadcast_arg ba; 480 481 /* 482 * If the condition variable is statically initialized, perform dynamic 483 * initialization. 484 */ 485 CHECK_AND_INIT_COND 486 487 pshared = CV_PSHARED(cvp); 488 489 _thr_ucond_broadcast((struct ucond *)&cvp->__has_kern_waiters); 490 491 if (pshared || cvp->__has_user_waiters == 0) 492 return (0); 493 494 ba.curthread = _get_curthread(); 495 ba.count = 0; 496 497 _sleepq_lock(cvp); 498 sq = _sleepq_lookup(cvp); 499 if (sq == NULL) { 500 _sleepq_unlock(cvp); 501 return (0); 502 } 503 _sleepq_drop(sq, drop_cb, &ba); 504 cvp->__has_user_waiters = 0; 505 _sleepq_unlock(cvp); 506 if (ba.count > 0) 507 _thr_wake_all(ba.waddrs, ba.count); 508 return (0); 509 } 510 511 int 512 _pthread_cond_signal(pthread_cond_t * cond) 513 { 514 515 return (cond_signal_common(cond)); 516 } 517 518 int 519 _pthread_cond_broadcast(pthread_cond_t * cond) 520 { 521 522 return (cond_broadcast_common(cond)); 523 } 524