1 /* 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include "namespace.h" 30 #include <stdlib.h> 31 #include <errno.h> 32 #include <string.h> 33 #include <pthread.h> 34 #include <limits.h> 35 #include "un-namespace.h" 36 37 #include "thr_private.h" 38 39 /* 40 * Prototypes 41 */ 42 int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex); 43 int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 44 const struct timespec * abstime); 45 static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr); 46 static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 47 const struct timespec *abstime, int cancel); 48 static int cond_signal_common(pthread_cond_t *cond); 49 static int cond_broadcast_common(pthread_cond_t *cond); 50 51 /* 52 * Double underscore versions are cancellation points. Single underscore 53 * versions are not and are provided for libc internal usage (which 54 * shouldn't introduce cancellation points). 55 */ 56 __weak_reference(__pthread_cond_wait, pthread_cond_wait); 57 __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait); 58 59 __weak_reference(_pthread_cond_init, pthread_cond_init); 60 __weak_reference(_pthread_cond_destroy, pthread_cond_destroy); 61 __weak_reference(_pthread_cond_signal, pthread_cond_signal); 62 __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast); 63 64 #define CV_PSHARED(cvp) (((cvp)->__flags & USYNC_PROCESS_SHARED) != 0) 65 66 static int 67 cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 68 { 69 struct pthread_cond *cvp; 70 int error = 0; 71 72 if ((cvp = (pthread_cond_t) 73 calloc(1, sizeof(struct pthread_cond))) == NULL) { 74 error = ENOMEM; 75 } else { 76 /* 77 * Initialise the condition variable structure: 78 */ 79 if (cond_attr == NULL || *cond_attr == NULL) { 80 cvp->__clock_id = CLOCK_REALTIME; 81 } else { 82 if ((*cond_attr)->c_pshared) 83 cvp->__flags |= USYNC_PROCESS_SHARED; 84 cvp->__clock_id = (*cond_attr)->c_clockid; 85 } 86 *cond = cvp; 87 } 88 return (error); 89 } 90 91 static int 92 init_static(struct pthread *thread, pthread_cond_t *cond) 93 { 94 int ret; 95 96 THR_LOCK_ACQUIRE(thread, &_cond_static_lock); 97 98 if (*cond == NULL) 99 ret = cond_init(cond, NULL); 100 else 101 ret = 0; 102 103 THR_LOCK_RELEASE(thread, &_cond_static_lock); 104 105 return (ret); 106 } 107 108 #define CHECK_AND_INIT_COND \ 109 if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \ 110 if (cvp == THR_COND_INITIALIZER) { \ 111 int ret; \ 112 ret = init_static(_get_curthread(), cond); \ 113 if (ret) \ 114 return (ret); \ 115 } else if (cvp == THR_COND_DESTROYED) { \ 116 return (EINVAL); \ 117 } \ 118 cvp = *cond; \ 119 } 120 121 int 122 _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 123 { 124 125 *cond = NULL; 126 return (cond_init(cond, cond_attr)); 127 } 128 129 int 130 _pthread_cond_destroy(pthread_cond_t *cond) 131 { 132 struct pthread_cond *cvp; 133 int error = 0; 134 135 if ((cvp = *cond) == THR_COND_INITIALIZER) 136 error = 0; 137 else if (cvp == THR_COND_DESTROYED) 138 error = EINVAL; 139 else { 140 cvp = *cond; 141 *cond = THR_COND_DESTROYED; 142 143 /* 144 * Free the memory allocated for the condition 145 * variable structure: 146 */ 147 free(cvp); 148 } 149 return (error); 150 } 151 152 /* 153 * Cancellation behavior: 154 * Thread may be canceled at start, if thread is canceled, it means it 155 * did not get a wakeup from pthread_cond_signal(), otherwise, it is 156 * not canceled. 157 * Thread cancellation never cause wakeup from pthread_cond_signal() 158 * to be lost. 159 */ 160 static int 161 cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp, 162 const struct timespec *abstime, int cancel) 163 { 164 struct pthread *curthread = _get_curthread(); 165 int recurse; 166 int error, error2 = 0; 167 168 error = _mutex_cv_detach(mp, &recurse); 169 if (error != 0) 170 return (error); 171 172 if (cancel) { 173 _thr_cancel_enter2(curthread, 0); 174 error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, 175 (struct umutex *)&mp->m_lock, abstime, 176 CVWAIT_ABSTIME|CVWAIT_CLOCKID); 177 _thr_cancel_leave(curthread, 0); 178 } else { 179 error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, 180 (struct umutex *)&mp->m_lock, abstime, 181 CVWAIT_ABSTIME|CVWAIT_CLOCKID); 182 } 183 184 /* 185 * Note that PP mutex and ROBUST mutex may return 186 * interesting error codes. 187 */ 188 if (error == 0) { 189 error2 = _mutex_cv_lock(mp, recurse); 190 } else if (error == EINTR || error == ETIMEDOUT) { 191 error2 = _mutex_cv_lock(mp, recurse); 192 if (error2 == 0 && cancel) 193 _thr_testcancel(curthread); 194 if (error == EINTR) 195 error = 0; 196 } else { 197 /* We know that it didn't unlock the mutex. */ 198 error2 = _mutex_cv_attach(mp, recurse); 199 if (error2 == 0 && cancel) 200 _thr_testcancel(curthread); 201 } 202 return (error2 != 0 ? error2 : error); 203 } 204 205 /* 206 * Thread waits in userland queue whenever possible, when thread 207 * is signaled or broadcasted, it is removed from the queue, and 208 * is saved in curthread's defer_waiters[] buffer, but won't be 209 * woken up until mutex is unlocked. 210 */ 211 212 static int 213 cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp, 214 const struct timespec *abstime, int cancel) 215 { 216 struct pthread *curthread = _get_curthread(); 217 struct sleepqueue *sq; 218 int recurse; 219 int error; 220 int defered; 221 222 if (curthread->wchan != NULL) 223 PANIC("thread was already on queue."); 224 225 if (cancel) 226 _thr_testcancel(curthread); 227 228 _sleepq_lock(cvp); 229 /* 230 * set __has_user_waiters before unlocking mutex, this allows 231 * us to check it without locking in pthread_cond_signal(). 232 */ 233 cvp->__has_user_waiters = 1; 234 defered = 0; 235 (void)_mutex_cv_unlock(mp, &recurse, &defered); 236 curthread->mutex_obj = mp; 237 _sleepq_add(cvp, curthread); 238 for(;;) { 239 _thr_clear_wake(curthread); 240 _sleepq_unlock(cvp); 241 if (defered) { 242 defered = 0; 243 if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0) 244 (void)_umtx_op_err(&mp->m_lock, UMTX_OP_MUTEX_WAKE2, 245 mp->m_lock.m_flags, 0, 0); 246 } 247 if (curthread->nwaiter_defer > 0) { 248 _thr_wake_all(curthread->defer_waiters, 249 curthread->nwaiter_defer); 250 curthread->nwaiter_defer = 0; 251 } 252 253 if (cancel) { 254 _thr_cancel_enter2(curthread, 0); 255 error = _thr_sleep(curthread, cvp->__clock_id, abstime); 256 _thr_cancel_leave(curthread, 0); 257 } else { 258 error = _thr_sleep(curthread, cvp->__clock_id, abstime); 259 } 260 261 _sleepq_lock(cvp); 262 if (curthread->wchan == NULL) { 263 error = 0; 264 break; 265 } else if (cancel && SHOULD_CANCEL(curthread)) { 266 sq = _sleepq_lookup(cvp); 267 cvp->__has_user_waiters = 268 _sleepq_remove(sq, curthread); 269 _sleepq_unlock(cvp); 270 curthread->mutex_obj = NULL; 271 _mutex_cv_lock(mp, recurse); 272 if (!THR_IN_CRITICAL(curthread)) 273 _pthread_exit(PTHREAD_CANCELED); 274 else /* this should not happen */ 275 return (0); 276 } else if (error == ETIMEDOUT) { 277 sq = _sleepq_lookup(cvp); 278 cvp->__has_user_waiters = 279 _sleepq_remove(sq, curthread); 280 break; 281 } 282 } 283 _sleepq_unlock(cvp); 284 curthread->mutex_obj = NULL; 285 _mutex_cv_lock(mp, recurse); 286 return (error); 287 } 288 289 static int 290 cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 291 const struct timespec *abstime, int cancel) 292 { 293 struct pthread *curthread = _get_curthread(); 294 struct pthread_cond *cvp; 295 struct pthread_mutex *mp; 296 int error; 297 298 CHECK_AND_INIT_COND 299 300 mp = *mutex; 301 302 if ((error = _mutex_owned(curthread, mp)) != 0) 303 return (error); 304 305 if (curthread->attr.sched_policy != SCHED_OTHER || 306 (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT|UMUTEX_PRIO_INHERIT| 307 USYNC_PROCESS_SHARED)) != 0 || 308 (cvp->__flags & USYNC_PROCESS_SHARED) != 0) 309 return cond_wait_kernel(cvp, mp, abstime, cancel); 310 else 311 return cond_wait_user(cvp, mp, abstime, cancel); 312 } 313 314 int 315 _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 316 { 317 318 return (cond_wait_common(cond, mutex, NULL, 0)); 319 } 320 321 int 322 __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 323 { 324 325 return (cond_wait_common(cond, mutex, NULL, 1)); 326 } 327 328 int 329 _pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 330 const struct timespec * abstime) 331 { 332 333 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 334 abstime->tv_nsec >= 1000000000) 335 return (EINVAL); 336 337 return (cond_wait_common(cond, mutex, abstime, 0)); 338 } 339 340 int 341 __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 342 const struct timespec *abstime) 343 { 344 345 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 346 abstime->tv_nsec >= 1000000000) 347 return (EINVAL); 348 349 return (cond_wait_common(cond, mutex, abstime, 1)); 350 } 351 352 static int 353 cond_signal_common(pthread_cond_t *cond) 354 { 355 struct pthread *curthread = _get_curthread(); 356 struct pthread *td; 357 struct pthread_cond *cvp; 358 struct pthread_mutex *mp; 359 struct sleepqueue *sq; 360 int *waddr; 361 int pshared; 362 363 /* 364 * If the condition variable is statically initialized, perform dynamic 365 * initialization. 366 */ 367 CHECK_AND_INIT_COND 368 369 pshared = CV_PSHARED(cvp); 370 371 _thr_ucond_signal((struct ucond *)&cvp->__has_kern_waiters); 372 373 if (pshared || cvp->__has_user_waiters == 0) 374 return (0); 375 376 curthread = _get_curthread(); 377 waddr = NULL; 378 _sleepq_lock(cvp); 379 sq = _sleepq_lookup(cvp); 380 if (sq == NULL) { 381 _sleepq_unlock(cvp); 382 return (0); 383 } 384 385 td = _sleepq_first(sq); 386 mp = td->mutex_obj; 387 cvp->__has_user_waiters = _sleepq_remove(sq, td); 388 if (mp->m_owner == curthread) { 389 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 390 _thr_wake_all(curthread->defer_waiters, 391 curthread->nwaiter_defer); 392 curthread->nwaiter_defer = 0; 393 } 394 curthread->defer_waiters[curthread->nwaiter_defer++] = 395 &td->wake_addr->value; 396 mp->m_flags |= PMUTEX_FLAG_DEFERED; 397 } else { 398 waddr = &td->wake_addr->value; 399 } 400 _sleepq_unlock(cvp); 401 if (waddr != NULL) 402 _thr_set_wake(waddr); 403 return (0); 404 } 405 406 struct broadcast_arg { 407 struct pthread *curthread; 408 unsigned int *waddrs[MAX_DEFER_WAITERS]; 409 int count; 410 }; 411 412 static void 413 drop_cb(struct pthread *td, void *arg) 414 { 415 struct broadcast_arg *ba = arg; 416 struct pthread_mutex *mp; 417 struct pthread *curthread = ba->curthread; 418 419 mp = td->mutex_obj; 420 if (mp->m_owner == curthread) { 421 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 422 _thr_wake_all(curthread->defer_waiters, 423 curthread->nwaiter_defer); 424 curthread->nwaiter_defer = 0; 425 } 426 curthread->defer_waiters[curthread->nwaiter_defer++] = 427 &td->wake_addr->value; 428 mp->m_flags |= PMUTEX_FLAG_DEFERED; 429 } else { 430 if (ba->count >= MAX_DEFER_WAITERS) { 431 _thr_wake_all(ba->waddrs, ba->count); 432 ba->count = 0; 433 } 434 ba->waddrs[ba->count++] = &td->wake_addr->value; 435 } 436 } 437 438 static int 439 cond_broadcast_common(pthread_cond_t *cond) 440 { 441 int pshared; 442 struct pthread_cond *cvp; 443 struct sleepqueue *sq; 444 struct broadcast_arg ba; 445 446 /* 447 * If the condition variable is statically initialized, perform dynamic 448 * initialization. 449 */ 450 CHECK_AND_INIT_COND 451 452 pshared = CV_PSHARED(cvp); 453 454 _thr_ucond_broadcast((struct ucond *)&cvp->__has_kern_waiters); 455 456 if (pshared || cvp->__has_user_waiters == 0) 457 return (0); 458 459 ba.curthread = _get_curthread(); 460 ba.count = 0; 461 462 _sleepq_lock(cvp); 463 sq = _sleepq_lookup(cvp); 464 if (sq == NULL) { 465 _sleepq_unlock(cvp); 466 return (0); 467 } 468 _sleepq_drop(sq, drop_cb, &ba); 469 cvp->__has_user_waiters = 0; 470 _sleepq_unlock(cvp); 471 if (ba.count > 0) 472 _thr_wake_all(ba.waddrs, ba.count); 473 return (0); 474 } 475 476 int 477 _pthread_cond_signal(pthread_cond_t * cond) 478 { 479 480 return (cond_signal_common(cond)); 481 } 482 483 int 484 _pthread_cond_broadcast(pthread_cond_t * cond) 485 { 486 487 return (cond_broadcast_common(cond)); 488 } 489