1 /* 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include "namespace.h" 30 #include <stdlib.h> 31 #include <errno.h> 32 #include <string.h> 33 #include <pthread.h> 34 #include <limits.h> 35 #include "un-namespace.h" 36 37 #include "thr_private.h" 38 39 /* 40 * Prototypes 41 */ 42 int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex); 43 int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 44 const struct timespec * abstime); 45 static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr); 46 static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 47 const struct timespec *abstime, int cancel); 48 static int cond_signal_common(pthread_cond_t *cond); 49 static int cond_broadcast_common(pthread_cond_t *cond); 50 51 /* 52 * Double underscore versions are cancellation points. Single underscore 53 * versions are not and are provided for libc internal usage (which 54 * shouldn't introduce cancellation points). 55 */ 56 __weak_reference(__pthread_cond_wait, pthread_cond_wait); 57 __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait); 58 59 __weak_reference(_pthread_cond_init, pthread_cond_init); 60 __weak_reference(_pthread_cond_destroy, pthread_cond_destroy); 61 __weak_reference(_pthread_cond_signal, pthread_cond_signal); 62 __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast); 63 64 #define CV_PSHARED(cvp) (((cvp)->__flags & USYNC_PROCESS_SHARED) != 0) 65 66 static int 67 cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 68 { 69 struct pthread_cond *cvp; 70 int error = 0; 71 72 if ((cvp = (pthread_cond_t) 73 calloc(1, sizeof(struct pthread_cond))) == NULL) { 74 error = ENOMEM; 75 } else { 76 /* 77 * Initialise the condition variable structure: 78 */ 79 if (cond_attr == NULL || *cond_attr == NULL) { 80 cvp->__clock_id = CLOCK_REALTIME; 81 } else { 82 if ((*cond_attr)->c_pshared) 83 cvp->__flags |= USYNC_PROCESS_SHARED; 84 cvp->__clock_id = (*cond_attr)->c_clockid; 85 } 86 *cond = cvp; 87 } 88 return (error); 89 } 90 91 static int 92 init_static(struct pthread *thread, pthread_cond_t *cond) 93 { 94 int ret; 95 96 THR_LOCK_ACQUIRE(thread, &_cond_static_lock); 97 98 if (*cond == NULL) 99 ret = cond_init(cond, NULL); 100 else 101 ret = 0; 102 103 THR_LOCK_RELEASE(thread, &_cond_static_lock); 104 105 return (ret); 106 } 107 108 #define CHECK_AND_INIT_COND \ 109 if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) { \ 110 if (cvp == THR_COND_INITIALIZER) { \ 111 int ret; \ 112 ret = init_static(_get_curthread(), cond); \ 113 if (ret) \ 114 return (ret); \ 115 } else if (cvp == THR_COND_DESTROYED) { \ 116 return (EINVAL); \ 117 } \ 118 cvp = *cond; \ 119 } 120 121 int 122 _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr) 123 { 124 125 *cond = NULL; 126 return (cond_init(cond, cond_attr)); 127 } 128 129 int 130 _pthread_cond_destroy(pthread_cond_t *cond) 131 { 132 struct pthread_cond *cvp; 133 int error = 0; 134 135 if ((cvp = *cond) == THR_COND_INITIALIZER) 136 error = 0; 137 else if (cvp == THR_COND_DESTROYED) 138 error = EINVAL; 139 else { 140 cvp = *cond; 141 *cond = THR_COND_DESTROYED; 142 143 /* 144 * Free the memory allocated for the condition 145 * variable structure: 146 */ 147 free(cvp); 148 } 149 return (error); 150 } 151 152 /* 153 * Cancellation behaivor: 154 * Thread may be canceled at start, if thread is canceled, it means it 155 * did not get a wakeup from pthread_cond_signal(), otherwise, it is 156 * not canceled. 157 * Thread cancellation never cause wakeup from pthread_cond_signal() 158 * to be lost. 159 */ 160 static int 161 cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp, 162 const struct timespec *abstime, int cancel) 163 { 164 struct pthread *curthread = _get_curthread(); 165 int recurse; 166 int error, error2 = 0; 167 168 error = _mutex_cv_detach(mp, &recurse); 169 if (error != 0) 170 return (error); 171 172 if (cancel) { 173 _thr_cancel_enter2(curthread, 0); 174 error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, 175 (struct umutex *)&mp->m_lock, abstime, 176 CVWAIT_ABSTIME|CVWAIT_CLOCKID); 177 _thr_cancel_leave(curthread, 0); 178 } else { 179 error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, 180 (struct umutex *)&mp->m_lock, abstime, 181 CVWAIT_ABSTIME|CVWAIT_CLOCKID); 182 } 183 184 /* 185 * Note that PP mutex and ROBUST mutex may return 186 * interesting error codes. 187 */ 188 if (error == 0) { 189 error2 = _mutex_cv_lock(mp, recurse); 190 } else if (error == EINTR || error == ETIMEDOUT) { 191 error2 = _mutex_cv_lock(mp, recurse); 192 if (error2 == 0 && cancel) 193 _thr_testcancel(curthread); 194 if (error == EINTR) 195 error = 0; 196 } else { 197 /* We know that it didn't unlock the mutex. */ 198 error2 = _mutex_cv_attach(mp, recurse); 199 if (error2 == 0 && cancel) 200 _thr_testcancel(curthread); 201 } 202 return (error2 != 0 ? error2 : error); 203 } 204 205 /* 206 * Thread waits in userland queue whenever possible, when thread 207 * is signaled or broadcasted, it is removed from the queue, and 208 * is saved in curthread's defer_waiters[] buffer, but won't be 209 * woken up until mutex is unlocked. 210 */ 211 212 static int 213 cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp, 214 const struct timespec *abstime, int cancel) 215 { 216 struct pthread *curthread = _get_curthread(); 217 struct sleepqueue *sq; 218 int recurse; 219 int error; 220 221 if (curthread->wchan != NULL) 222 PANIC("thread was already on queue."); 223 224 if (cancel) 225 _thr_testcancel(curthread); 226 227 _sleepq_lock(cvp); 228 /* 229 * set __has_user_waiters before unlocking mutex, this allows 230 * us to check it without locking in pthread_cond_signal(). 231 */ 232 cvp->__has_user_waiters = 1; 233 curthread->will_sleep = 1; 234 (void)_mutex_cv_unlock(mp, &recurse); 235 curthread->mutex_obj = mp; 236 _sleepq_add(cvp, curthread); 237 for(;;) { 238 _thr_clear_wake(curthread); 239 _sleepq_unlock(cvp); 240 241 if (cancel) { 242 _thr_cancel_enter2(curthread, 0); 243 error = _thr_sleep(curthread, cvp->__clock_id, abstime); 244 _thr_cancel_leave(curthread, 0); 245 } else { 246 error = _thr_sleep(curthread, cvp->__clock_id, abstime); 247 } 248 249 _sleepq_lock(cvp); 250 if (curthread->wchan == NULL) { 251 error = 0; 252 break; 253 } else if (cancel && SHOULD_CANCEL(curthread)) { 254 sq = _sleepq_lookup(cvp); 255 cvp->__has_user_waiters = 256 _sleepq_remove(sq, curthread); 257 _sleepq_unlock(cvp); 258 curthread->mutex_obj = NULL; 259 _mutex_cv_lock(mp, recurse); 260 if (!THR_IN_CRITICAL(curthread)) 261 _pthread_exit(PTHREAD_CANCELED); 262 else /* this should not happen */ 263 return (0); 264 } else if (error == ETIMEDOUT) { 265 sq = _sleepq_lookup(cvp); 266 cvp->__has_user_waiters = 267 _sleepq_remove(sq, curthread); 268 break; 269 } 270 } 271 _sleepq_unlock(cvp); 272 curthread->mutex_obj = NULL; 273 _mutex_cv_lock(mp, recurse); 274 return (error); 275 } 276 277 static int 278 cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex, 279 const struct timespec *abstime, int cancel) 280 { 281 struct pthread *curthread = _get_curthread(); 282 struct pthread_cond *cvp; 283 struct pthread_mutex *mp; 284 int error; 285 286 CHECK_AND_INIT_COND 287 288 mp = *mutex; 289 290 if ((error = _mutex_owned(curthread, mp)) != 0) 291 return (error); 292 293 if (curthread->attr.sched_policy != SCHED_OTHER || 294 (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT|UMUTEX_PRIO_INHERIT| 295 USYNC_PROCESS_SHARED)) != 0 || 296 (cvp->__flags & USYNC_PROCESS_SHARED) != 0) 297 return cond_wait_kernel(cvp, mp, abstime, cancel); 298 else 299 return cond_wait_user(cvp, mp, abstime, cancel); 300 } 301 302 int 303 _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 304 { 305 306 return (cond_wait_common(cond, mutex, NULL, 0)); 307 } 308 309 int 310 __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) 311 { 312 313 return (cond_wait_common(cond, mutex, NULL, 1)); 314 } 315 316 int 317 _pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 318 const struct timespec * abstime) 319 { 320 321 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 322 abstime->tv_nsec >= 1000000000) 323 return (EINVAL); 324 325 return (cond_wait_common(cond, mutex, abstime, 0)); 326 } 327 328 int 329 __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, 330 const struct timespec *abstime) 331 { 332 333 if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 334 abstime->tv_nsec >= 1000000000) 335 return (EINVAL); 336 337 return (cond_wait_common(cond, mutex, abstime, 1)); 338 } 339 340 static int 341 cond_signal_common(pthread_cond_t *cond) 342 { 343 struct pthread *curthread = _get_curthread(); 344 struct pthread *td; 345 struct pthread_cond *cvp; 346 struct pthread_mutex *mp; 347 struct sleepqueue *sq; 348 int *waddr; 349 int pshared; 350 351 /* 352 * If the condition variable is statically initialized, perform dynamic 353 * initialization. 354 */ 355 CHECK_AND_INIT_COND 356 357 pshared = CV_PSHARED(cvp); 358 359 _thr_ucond_signal((struct ucond *)&cvp->__has_kern_waiters); 360 361 if (pshared || cvp->__has_user_waiters == 0) 362 return (0); 363 364 curthread = _get_curthread(); 365 waddr = NULL; 366 _sleepq_lock(cvp); 367 sq = _sleepq_lookup(cvp); 368 if (sq == NULL) { 369 _sleepq_unlock(cvp); 370 return (0); 371 } 372 373 td = _sleepq_first(sq); 374 mp = td->mutex_obj; 375 cvp->__has_user_waiters = _sleepq_remove(sq, td); 376 if (mp->m_owner == curthread) { 377 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 378 _thr_wake_all(curthread->defer_waiters, 379 curthread->nwaiter_defer); 380 curthread->nwaiter_defer = 0; 381 } 382 curthread->defer_waiters[curthread->nwaiter_defer++] = 383 &td->wake_addr->value; 384 mp->m_flags |= PMUTEX_FLAG_DEFERED; 385 } else { 386 waddr = &td->wake_addr->value; 387 } 388 _sleepq_unlock(cvp); 389 if (waddr != NULL) 390 _thr_set_wake(waddr); 391 return (0); 392 } 393 394 struct broadcast_arg { 395 struct pthread *curthread; 396 unsigned int *waddrs[MAX_DEFER_WAITERS]; 397 int count; 398 }; 399 400 static void 401 drop_cb(struct pthread *td, void *arg) 402 { 403 struct broadcast_arg *ba = arg; 404 struct pthread_mutex *mp; 405 struct pthread *curthread = ba->curthread; 406 407 mp = td->mutex_obj; 408 if (mp->m_owner == curthread) { 409 if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) { 410 _thr_wake_all(curthread->defer_waiters, 411 curthread->nwaiter_defer); 412 curthread->nwaiter_defer = 0; 413 } 414 curthread->defer_waiters[curthread->nwaiter_defer++] = 415 &td->wake_addr->value; 416 mp->m_flags |= PMUTEX_FLAG_DEFERED; 417 } else { 418 if (ba->count >= MAX_DEFER_WAITERS) { 419 _thr_wake_all(ba->waddrs, ba->count); 420 ba->count = 0; 421 } 422 ba->waddrs[ba->count++] = &td->wake_addr->value; 423 } 424 } 425 426 static int 427 cond_broadcast_common(pthread_cond_t *cond) 428 { 429 int pshared; 430 struct pthread_cond *cvp; 431 struct sleepqueue *sq; 432 struct broadcast_arg ba; 433 434 /* 435 * If the condition variable is statically initialized, perform dynamic 436 * initialization. 437 */ 438 CHECK_AND_INIT_COND 439 440 pshared = CV_PSHARED(cvp); 441 442 _thr_ucond_broadcast((struct ucond *)&cvp->__has_kern_waiters); 443 444 if (pshared || cvp->__has_user_waiters == 0) 445 return (0); 446 447 ba.curthread = _get_curthread(); 448 ba.count = 0; 449 450 _sleepq_lock(cvp); 451 sq = _sleepq_lookup(cvp); 452 if (sq == NULL) { 453 _sleepq_unlock(cvp); 454 return (0); 455 } 456 _sleepq_drop(sq, drop_cb, &ba); 457 cvp->__has_user_waiters = 0; 458 _sleepq_unlock(cvp); 459 if (ba.count > 0) 460 _thr_wake_all(ba.waddrs, ba.count); 461 return (0); 462 } 463 464 int 465 _pthread_cond_signal(pthread_cond_t * cond) 466 { 467 468 return (cond_signal_common(cond)); 469 } 470 471 int 472 _pthread_cond_broadcast(pthread_cond_t * cond) 473 { 474 475 return (cond_broadcast_common(cond)); 476 } 477