1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by John Birrell. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD$ 34 */ 35 36 #include "namespace.h" 37 #include <stdlib.h> 38 #include <errno.h> 39 #include <string.h> 40 #include <sys/param.h> 41 #include <sys/queue.h> 42 #include <pthread.h> 43 #include "un-namespace.h" 44 45 #include "thr_private.h" 46 47 #if defined(_PTHREADS_INVARIANTS) 48 #define MUTEX_INIT_LINK(m) do { \ 49 (m)->m_qe.tqe_prev = NULL; \ 50 (m)->m_qe.tqe_next = NULL; \ 51 } while (0) 52 #define MUTEX_ASSERT_IS_OWNED(m) do { \ 53 if ((m)->m_qe.tqe_prev == NULL) \ 54 PANIC("mutex is not on list"); \ 55 } while (0) 56 #define MUTEX_ASSERT_NOT_OWNED(m) do { \ 57 if (((m)->m_qe.tqe_prev != NULL) || \ 58 ((m)->m_qe.tqe_next != NULL)) \ 59 PANIC("mutex is on list"); \ 60 } while (0) 61 #else 62 #define MUTEX_INIT_LINK(m) 63 #define MUTEX_ASSERT_IS_OWNED(m) 64 #define MUTEX_ASSERT_NOT_OWNED(m) 65 #endif 66 67 /* 68 * Prototypes 69 */ 70 int __pthread_mutex_init(pthread_mutex_t *mutex, 71 const pthread_mutexattr_t *mutex_attr); 72 int __pthread_mutex_trylock(pthread_mutex_t *mutex); 73 int __pthread_mutex_lock(pthread_mutex_t *mutex); 74 int __pthread_mutex_timedlock(pthread_mutex_t *mutex, 75 const struct timespec *abstime); 76 77 static int mutex_self_trylock(pthread_mutex_t); 78 static int mutex_self_lock(pthread_mutex_t, 79 const struct timespec *abstime); 80 static int mutex_unlock_common(pthread_mutex_t *); 81 82 __weak_reference(__pthread_mutex_init, pthread_mutex_init); 83 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 84 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 85 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 86 87 /* Single underscore versions provided for libc internal usage: */ 88 /* No difference between libc and application usage of these: */ 89 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 90 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 91 92 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); 93 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); 94 95 static int 96 mutex_init(pthread_mutex_t *mutex, 97 const pthread_mutexattr_t *mutex_attr, int private) 98 { 99 const struct pthread_mutex_attr *attr; 100 struct pthread_mutex *pmutex; 101 102 if (mutex_attr == NULL) { 103 attr = &_pthread_mutexattr_default; 104 } else { 105 attr = *mutex_attr; 106 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || 107 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) 108 return (EINVAL); 109 if (attr->m_protocol < PTHREAD_PRIO_NONE || 110 attr->m_protocol > PTHREAD_PRIO_PROTECT) 111 return (EINVAL); 112 } 113 114 if ((pmutex = (pthread_mutex_t) 115 malloc(sizeof(struct pthread_mutex))) == NULL) 116 return (ENOMEM); 117 118 _thr_umtx_init(&pmutex->m_lock); 119 pmutex->m_type = attr->m_type; 120 pmutex->m_protocol = attr->m_protocol; 121 pmutex->m_owner = NULL; 122 pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED; 123 if (private) 124 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE; 125 pmutex->m_count = 0; 126 pmutex->m_refcount = 0; 127 if (attr->m_protocol == PTHREAD_PRIO_PROTECT) 128 pmutex->m_prio = attr->m_ceiling; 129 else 130 pmutex->m_prio = -1; 131 pmutex->m_saved_prio = 0; 132 MUTEX_INIT_LINK(pmutex); 133 *mutex = pmutex; 134 return (0); 135 } 136 137 static int 138 init_static(struct pthread *thread, pthread_mutex_t *mutex) 139 { 140 int ret; 141 142 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 143 144 if (*mutex == NULL) 145 ret = mutex_init(mutex, NULL, 0); 146 else 147 ret = 0; 148 149 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 150 151 return (ret); 152 } 153 154 static int 155 init_static_private(struct pthread *thread, pthread_mutex_t *mutex) 156 { 157 int ret; 158 159 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 160 161 if (*mutex == NULL) 162 ret = mutex_init(mutex, NULL, 1); 163 else 164 ret = 0; 165 166 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 167 168 return (ret); 169 } 170 171 int 172 _pthread_mutex_init(pthread_mutex_t *mutex, 173 const pthread_mutexattr_t *mutex_attr) 174 { 175 return mutex_init(mutex, mutex_attr, 1); 176 } 177 178 int 179 __pthread_mutex_init(pthread_mutex_t *mutex, 180 const pthread_mutexattr_t *mutex_attr) 181 { 182 return mutex_init(mutex, mutex_attr, 0); 183 } 184 185 int 186 _mutex_reinit(pthread_mutex_t *mutex) 187 { 188 _thr_umtx_init(&(*mutex)->m_lock); 189 MUTEX_INIT_LINK(*mutex); 190 (*mutex)->m_owner = NULL; 191 (*mutex)->m_count = 0; 192 (*mutex)->m_refcount = 0; 193 (*mutex)->m_prio = 0; 194 (*mutex)->m_saved_prio = 0; 195 return (0); 196 } 197 198 void 199 _mutex_fork(struct pthread *curthread) 200 { 201 struct pthread_mutex *m; 202 203 /* 204 * Fix mutex ownership for child process. 205 * note that process shared mutex should not 206 * be inherited because owner is forking thread 207 * which is in parent process, they should be 208 * removed from the owned mutex list, current, 209 * process shared mutex is not supported, so I 210 * am not worried. 211 */ 212 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) 213 m->m_lock = (umtx_t)curthread->tid; 214 } 215 216 int 217 _pthread_mutex_destroy(pthread_mutex_t *mutex) 218 { 219 struct pthread *curthread = _get_curthread(); 220 pthread_mutex_t m; 221 int ret = 0; 222 223 if (__predict_false(*mutex == NULL)) 224 ret = EINVAL; 225 else { 226 /* 227 * Try to lock the mutex structure, we only need to 228 * try once, if failed, the mutex is in used. 229 */ 230 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock); 231 if (ret) 232 return (ret); 233 234 /* 235 * Check mutex other fields to see if this mutex is 236 * in use. Mostly for prority mutex types, or there 237 * are condition variables referencing it. 238 */ 239 if ((*mutex)->m_owner != NULL || (*mutex)->m_refcount != 0) { 240 THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock); 241 ret = EBUSY; 242 } else { 243 /* 244 * Save a pointer to the mutex so it can be free'd 245 * and set the caller's pointer to NULL. 246 */ 247 m = *mutex; 248 *mutex = NULL; 249 250 THR_UMTX_UNLOCK(curthread, &m->m_lock); 251 252 MUTEX_ASSERT_NOT_OWNED(m); 253 free(m); 254 } 255 } 256 257 return (ret); 258 } 259 260 static int 261 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) 262 { 263 struct pthread_mutex *m; 264 int ret; 265 266 m = *mutex; 267 ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock); 268 if (ret == 0) { 269 m->m_owner = curthread; 270 /* Add to the list of owned mutexes. */ 271 MUTEX_ASSERT_NOT_OWNED(m); 272 TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe); 273 } else if (m->m_owner == curthread) { 274 ret = mutex_self_trylock(m); 275 } /* else {} */ 276 277 return (ret); 278 } 279 280 int 281 __pthread_mutex_trylock(pthread_mutex_t *mutex) 282 { 283 struct pthread *curthread = _get_curthread(); 284 int ret; 285 286 /* 287 * If the mutex is statically initialized, perform the dynamic 288 * initialization: 289 */ 290 if (__predict_false(*mutex == NULL)) { 291 ret = init_static(curthread, mutex); 292 if (__predict_false(ret)) 293 return (ret); 294 } 295 return (mutex_trylock_common(curthread, mutex)); 296 } 297 298 int 299 _pthread_mutex_trylock(pthread_mutex_t *mutex) 300 { 301 struct pthread *curthread = _get_curthread(); 302 int ret; 303 304 /* 305 * If the mutex is statically initialized, perform the dynamic 306 * initialization marking the mutex private (delete safe): 307 */ 308 if (__predict_false(*mutex == NULL)) { 309 ret = init_static_private(curthread, mutex); 310 if (__predict_false(ret)) 311 return (ret); 312 } 313 return (mutex_trylock_common(curthread, mutex)); 314 } 315 316 static int 317 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex, 318 const struct timespec * abstime) 319 { 320 struct timespec ts, ts2; 321 struct pthread_mutex *m; 322 int ret; 323 324 m = *mutex; 325 ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock); 326 if (ret == 0) { 327 m->m_owner = curthread; 328 /* Add to the list of owned mutexes: */ 329 MUTEX_ASSERT_NOT_OWNED(m); 330 TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe); 331 } else if (m->m_owner == curthread) { 332 ret = mutex_self_lock(m, abstime); 333 } else { 334 if (abstime == NULL) { 335 THR_UMTX_LOCK(curthread, &m->m_lock); 336 ret = 0; 337 } else if (__predict_false( 338 abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 339 abstime->tv_nsec >= 1000000000)) { 340 ret = EINVAL; 341 } else { 342 clock_gettime(CLOCK_REALTIME, &ts); 343 TIMESPEC_SUB(&ts2, abstime, &ts); 344 ret = THR_UMTX_TIMEDLOCK(curthread, &m->m_lock, &ts2); 345 /* 346 * Timed out wait is not restarted if 347 * it was interrupted, not worth to do it. 348 */ 349 if (ret == EINTR) 350 ret = ETIMEDOUT; 351 } 352 if (ret == 0) { 353 m->m_owner = curthread; 354 /* Add to the list of owned mutexes: */ 355 MUTEX_ASSERT_NOT_OWNED(m); 356 TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe); 357 } 358 } 359 return (ret); 360 } 361 362 int 363 __pthread_mutex_lock(pthread_mutex_t *m) 364 { 365 struct pthread *curthread; 366 int ret; 367 368 _thr_check_init(); 369 370 curthread = _get_curthread(); 371 372 /* 373 * If the mutex is statically initialized, perform the dynamic 374 * initialization: 375 */ 376 if (__predict_false(*m == NULL)) { 377 ret = init_static(curthread, m); 378 if (__predict_false(ret)) 379 return (ret); 380 } 381 return (mutex_lock_common(curthread, m, NULL)); 382 } 383 384 int 385 _pthread_mutex_lock(pthread_mutex_t *m) 386 { 387 struct pthread *curthread; 388 int ret; 389 390 _thr_check_init(); 391 392 curthread = _get_curthread(); 393 394 /* 395 * If the mutex is statically initialized, perform the dynamic 396 * initialization marking it private (delete safe): 397 */ 398 if (__predict_false(*m == NULL)) { 399 ret = init_static_private(curthread, m); 400 if (__predict_false(ret)) 401 return (ret); 402 } 403 return (mutex_lock_common(curthread, m, NULL)); 404 } 405 406 int 407 __pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime) 408 { 409 struct pthread *curthread; 410 int ret; 411 412 _thr_check_init(); 413 414 curthread = _get_curthread(); 415 416 /* 417 * If the mutex is statically initialized, perform the dynamic 418 * initialization: 419 */ 420 if (__predict_false(*m == NULL)) { 421 ret = init_static(curthread, m); 422 if (__predict_false(ret)) 423 return (ret); 424 } 425 return (mutex_lock_common(curthread, m, abstime)); 426 } 427 428 int 429 _pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime) 430 { 431 struct pthread *curthread; 432 int ret; 433 434 _thr_check_init(); 435 436 curthread = _get_curthread(); 437 438 /* 439 * If the mutex is statically initialized, perform the dynamic 440 * initialization marking it private (delete safe): 441 */ 442 if (__predict_false(*m == NULL)) { 443 ret = init_static_private(curthread, m); 444 if (__predict_false(ret)) 445 return (ret); 446 } 447 return (mutex_lock_common(curthread, m, abstime)); 448 } 449 450 int 451 _pthread_mutex_unlock(pthread_mutex_t *m) 452 { 453 return (mutex_unlock_common(m)); 454 } 455 456 int 457 _mutex_cv_lock(pthread_mutex_t *m, int count) 458 { 459 int ret; 460 461 ret = mutex_lock_common(_get_curthread(), m, NULL); 462 if (ret == 0) { 463 (*m)->m_refcount--; 464 (*m)->m_count += count; 465 } 466 return (ret); 467 } 468 469 static int 470 mutex_self_trylock(pthread_mutex_t m) 471 { 472 int ret; 473 474 switch (m->m_type) { 475 case PTHREAD_MUTEX_ERRORCHECK: 476 case PTHREAD_MUTEX_NORMAL: 477 ret = EBUSY; 478 break; 479 480 case PTHREAD_MUTEX_RECURSIVE: 481 /* Increment the lock count: */ 482 if (m->m_count + 1 > 0) { 483 m->m_count++; 484 ret = 0; 485 } else 486 ret = EAGAIN; 487 break; 488 489 default: 490 /* Trap invalid mutex types; */ 491 ret = EINVAL; 492 } 493 494 return (ret); 495 } 496 497 static int 498 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime) 499 { 500 struct timespec ts1, ts2; 501 int ret; 502 503 switch (m->m_type) { 504 case PTHREAD_MUTEX_ERRORCHECK: 505 if (abstime) { 506 clock_gettime(CLOCK_REALTIME, &ts1); 507 TIMESPEC_SUB(&ts2, abstime, &ts1); 508 __sys_nanosleep(&ts2, NULL); 509 ret = ETIMEDOUT; 510 } else { 511 /* 512 * POSIX specifies that mutexes should return 513 * EDEADLK if a recursive lock is detected. 514 */ 515 ret = EDEADLK; 516 } 517 break; 518 519 case PTHREAD_MUTEX_NORMAL: 520 /* 521 * What SS2 define as a 'normal' mutex. Intentionally 522 * deadlock on attempts to get a lock you already own. 523 */ 524 ret = 0; 525 if (abstime) { 526 clock_gettime(CLOCK_REALTIME, &ts1); 527 TIMESPEC_SUB(&ts2, abstime, &ts1); 528 __sys_nanosleep(&ts2, NULL); 529 ret = ETIMEDOUT; 530 } else { 531 ts1.tv_sec = 30; 532 ts1.tv_nsec = 0; 533 for (;;) 534 __sys_nanosleep(&ts1, NULL); 535 } 536 break; 537 538 case PTHREAD_MUTEX_RECURSIVE: 539 /* Increment the lock count: */ 540 if (m->m_count + 1 > 0) { 541 m->m_count++; 542 ret = 0; 543 } else 544 ret = EAGAIN; 545 break; 546 547 default: 548 /* Trap invalid mutex types; */ 549 ret = EINVAL; 550 } 551 552 return (ret); 553 } 554 555 static int 556 mutex_unlock_common(pthread_mutex_t *mutex) 557 { 558 struct pthread *curthread = _get_curthread(); 559 struct pthread_mutex *m; 560 561 if (__predict_false((m = *mutex) == NULL)) 562 return (EINVAL); 563 564 /* 565 * Check if the running thread is not the owner of the mutex. 566 */ 567 if (__predict_false(m->m_owner != curthread)) 568 return (EPERM); 569 570 if (__predict_false( 571 m->m_type == PTHREAD_MUTEX_RECURSIVE && 572 m->m_count > 0)) { 573 m->m_count--; 574 } else { 575 m->m_owner = NULL; 576 /* Remove the mutex from the threads queue. */ 577 MUTEX_ASSERT_IS_OWNED(m); 578 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 579 MUTEX_INIT_LINK(m); 580 THR_UMTX_UNLOCK(curthread, &m->m_lock); 581 } 582 return (0); 583 } 584 585 int 586 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count) 587 { 588 struct pthread *curthread = _get_curthread(); 589 struct pthread_mutex *m; 590 591 if (__predict_false((m = *mutex) == NULL)) 592 return (EINVAL); 593 594 /* 595 * Check if the running thread is not the owner of the mutex. 596 */ 597 if (__predict_false(m->m_owner != curthread)) 598 return (EPERM); 599 600 /* 601 * Clear the count in case this is a recursive mutex. 602 */ 603 *count = m->m_count; 604 m->m_refcount++; 605 m->m_count = 0; 606 m->m_owner = NULL; 607 /* Remove the mutex from the threads queue. */ 608 MUTEX_ASSERT_IS_OWNED(m); 609 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 610 MUTEX_INIT_LINK(m); 611 THR_UMTX_UNLOCK(curthread, &m->m_lock); 612 return (0); 613 } 614 615 void 616 _mutex_unlock_private(pthread_t pthread) 617 { 618 struct pthread_mutex *m, *m_next; 619 620 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { 621 m_next = TAILQ_NEXT(m, m_qe); 622 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 623 _pthread_mutex_unlock(&m); 624 } 625 } 626 627 int 628 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex, 629 int *prioceiling) 630 { 631 int ret; 632 633 if (*mutex == NULL) 634 ret = EINVAL; 635 else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT) 636 ret = EINVAL; 637 else { 638 *prioceiling = (*mutex)->m_prio; 639 ret = 0; 640 } 641 642 return(ret); 643 } 644 645 int 646 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex, 647 int prioceiling, int *old_ceiling) 648 { 649 int ret = 0; 650 int tmp; 651 652 if (*mutex == NULL) 653 ret = EINVAL; 654 else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT) 655 ret = EINVAL; 656 else if ((ret = _pthread_mutex_lock(mutex)) == 0) { 657 tmp = (*mutex)->m_prio; 658 (*mutex)->m_prio = prioceiling; 659 ret = _pthread_mutex_unlock(mutex); 660 661 /* Return the old ceiling. */ 662 *old_ceiling = tmp; 663 } 664 return(ret); 665 } 666