1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by John Birrell. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 #include <stdlib.h> 35 #include <errno.h> 36 #include <string.h> 37 #include <sys/param.h> 38 #include <sys/queue.h> 39 #include <pthread.h> 40 #include <time.h> 41 #include "thr_private.h" 42 43 #if defined(_PTHREADS_INVARIANTS) 44 #define _MUTEX_INIT_LINK(m) do { \ 45 (m)->m_qe.tqe_prev = NULL; \ 46 (m)->m_qe.tqe_next = NULL; \ 47 } while (0) 48 #define _MUTEX_ASSERT_IS_OWNED(m) do { \ 49 if ((m)->m_qe.tqe_prev == NULL) \ 50 PANIC("mutex is not on list"); \ 51 } while (0) 52 #define _MUTEX_ASSERT_NOT_OWNED(m) do { \ 53 if (((m)->m_qe.tqe_prev != NULL) || \ 54 ((m)->m_qe.tqe_next != NULL)) \ 55 PANIC("mutex is on list"); \ 56 } while (0) 57 #else 58 #define _MUTEX_INIT_LINK(m) 59 #define _MUTEX_ASSERT_IS_OWNED(m) 60 #define _MUTEX_ASSERT_NOT_OWNED(m) 61 #endif 62 63 64 /* 65 * Prototypes 66 */ 67 static void acquire_mutex(struct pthread_mutex *, struct pthread *); 68 static int get_mcontested(pthread_mutex_t, 69 const struct timespec *); 70 static void mutex_attach_to_next_pthread(struct pthread_mutex *); 71 static int mutex_init(pthread_mutex_t *, int); 72 static int mutex_lock_common(pthread_mutex_t *, int, 73 const struct timespec *); 74 static inline int mutex_self_lock(pthread_mutex_t, int); 75 static inline int mutex_unlock_common(pthread_mutex_t *, int); 76 static inline pthread_t mutex_queue_deq(pthread_mutex_t); 77 static inline void mutex_queue_remove(pthread_mutex_t, pthread_t); 78 static inline void mutex_queue_enq(pthread_mutex_t, pthread_t); 79 static void restore_prio_inheritance(struct pthread *); 80 static void restore_prio_protection(struct pthread *); 81 82 83 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER; 84 85 static struct pthread_mutex_attr static_mutex_attr = 86 PTHREAD_MUTEXATTR_STATIC_INITIALIZER; 87 static pthread_mutexattr_t static_mattr = &static_mutex_attr; 88 89 /* Single underscore versions provided for libc internal usage: */ 90 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 91 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 92 __weak_reference(__pthread_mutex_unlock, pthread_mutex_unlock); 93 94 /* No difference between libc and application usage of these: */ 95 __weak_reference(_pthread_mutex_init, pthread_mutex_init); 96 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 97 __weak_reference(_pthread_mutex_timedlock, pthread_mutex_timedlock); 98 99 100 /* 101 * Reinitialize a private mutex; this is only used for internal mutexes. 102 */ 103 int 104 _mutex_reinit(pthread_mutex_t * mutex) 105 { 106 int ret = 0; 107 108 if (mutex == NULL) 109 ret = EINVAL; 110 else if (*mutex == PTHREAD_MUTEX_INITIALIZER) 111 ret = _pthread_mutex_init(mutex, NULL); 112 else { 113 /* 114 * Initialize the mutex structure: 115 */ 116 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT; 117 (*mutex)->m_protocol = PTHREAD_PRIO_NONE; 118 TAILQ_INIT(&(*mutex)->m_queue); 119 (*mutex)->m_owner = NULL; 120 (*mutex)->m_data.m_count = 0; 121 (*mutex)->m_flags |= MUTEX_FLAGS_INITED | MUTEX_FLAGS_PRIVATE; 122 (*mutex)->m_refcount = 0; 123 (*mutex)->m_prio = 0; 124 (*mutex)->m_saved_prio = 0; 125 _MUTEX_INIT_LINK(*mutex); 126 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock)); 127 } 128 return (ret); 129 } 130 131 int 132 _pthread_mutex_init(pthread_mutex_t * mutex, 133 const pthread_mutexattr_t * mutex_attr) 134 { 135 struct pthread_mutex_attr default_attr = {PTHREAD_MUTEX_ERRORCHECK, 136 PTHREAD_PRIO_NONE, PTHREAD_MAX_PRIORITY, 0 }; 137 struct pthread_mutex_attr *attr; 138 139 if (mutex_attr == NULL) { 140 attr = &default_attr; 141 } else { 142 /* 143 * Check that the given mutex attribute is valid. 144 */ 145 if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || 146 ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX)) 147 return (EINVAL); 148 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || 149 ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE)) 150 return (EINVAL); 151 attr = *mutex_attr; 152 } 153 if ((*mutex = 154 (pthread_mutex_t)malloc(sizeof(struct pthread_mutex))) == NULL) 155 return (ENOMEM); 156 memset((void *)(*mutex), 0, sizeof(struct pthread_mutex)); 157 158 /* Initialise the rest of the mutex: */ 159 TAILQ_INIT(&(*mutex)->m_queue); 160 _MUTEX_INIT_LINK(*mutex); 161 (*mutex)->m_protocol = attr->m_protocol; 162 (*mutex)->m_flags = (attr->m_flags | MUTEX_FLAGS_INITED); 163 (*mutex)->m_type = attr->m_type; 164 if ((*mutex)->m_protocol == PTHREAD_PRIO_PROTECT) 165 (*mutex)->m_prio = attr->m_ceiling; 166 return (0); 167 } 168 169 int 170 _pthread_mutex_destroy(pthread_mutex_t * mutex) 171 { 172 if (mutex == NULL) 173 return (EINVAL); 174 175 /* 176 * If this mutex was statically initialized, don't bother 177 * initializing it in order to destroy it immediately. 178 */ 179 if (*mutex == PTHREAD_MUTEX_INITIALIZER) 180 return (0); 181 182 /* Lock the mutex structure: */ 183 _SPINLOCK(&(*mutex)->lock); 184 185 /* 186 * Check to see if this mutex is in use: 187 */ 188 if (((*mutex)->m_owner != NULL) || 189 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || 190 ((*mutex)->m_refcount != 0)) { 191 /* Unlock the mutex structure: */ 192 _SPINUNLOCK(&(*mutex)->lock); 193 return (EBUSY); 194 } 195 196 /* 197 * Free the memory allocated for the mutex 198 * structure: 199 */ 200 _MUTEX_ASSERT_NOT_OWNED(*mutex); 201 _SPINUNLOCK(&(*mutex)->lock); 202 free(*mutex); 203 204 /* 205 * Leave the caller's pointer NULL now that 206 * the mutex has been destroyed: 207 */ 208 *mutex = NULL; 209 210 return (0); 211 } 212 213 static int 214 mutex_init(pthread_mutex_t *mutex, int private) 215 { 216 pthread_mutexattr_t *pma; 217 int error; 218 219 error = 0; 220 pma = private ? &static_mattr : NULL; 221 _SPINLOCK(&static_init_lock); 222 if (*mutex == PTHREAD_MUTEX_INITIALIZER) 223 error = _pthread_mutex_init(mutex, pma); 224 _SPINUNLOCK(&static_init_lock); 225 return (error); 226 } 227 228 /* 229 * Acquires a mutex for the current thread. The caller must 230 * lock the mutex before calling this function. 231 */ 232 static void 233 acquire_mutex(struct pthread_mutex *mtx, struct pthread *ptd) 234 { 235 mtx->m_owner = ptd; 236 _MUTEX_ASSERT_NOT_OWNED(mtx); 237 _thread_critical_enter(ptd); 238 TAILQ_INSERT_TAIL(&ptd->mutexq, mtx, m_qe); 239 _thread_critical_exit(ptd); 240 } 241 242 /* 243 * Releases a mutex from the current thread. The owner must 244 * lock the mutex. The next thread on the queue will be returned 245 * locked by the current thread. The caller must take care to 246 * unlock it. 247 */ 248 static void 249 mutex_attach_to_next_pthread(struct pthread_mutex *mtx) 250 { 251 struct pthread *ptd; 252 253 _MUTEX_ASSERT_IS_OWNED(mtx); 254 TAILQ_REMOVE(&mtx->m_owner->mutexq, (mtx), m_qe); 255 _MUTEX_INIT_LINK(mtx); 256 257 /* 258 * Deque next thread waiting for this mutex and attach 259 * the mutex to it. The thread will already be locked. 260 */ 261 if ((ptd = mutex_queue_deq(mtx)) != NULL) { 262 TAILQ_INSERT_TAIL(&ptd->mutexq, mtx, m_qe); 263 ptd->data.mutex = NULL; 264 PTHREAD_NEW_STATE(ptd, PS_RUNNING); 265 } 266 mtx->m_owner = ptd; 267 } 268 269 int 270 __pthread_mutex_trylock(pthread_mutex_t *mutex) 271 { 272 int ret = 0; 273 274 if (mutex == NULL) 275 ret = EINVAL; 276 277 /* 278 * If the mutex is statically initialized, perform the dynamic 279 * initialization: 280 */ 281 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 282 (ret = mutex_init(mutex, 0)) == 0) 283 ret = mutex_lock_common(mutex, 1, NULL); 284 285 return (ret); 286 } 287 288 /* 289 * Libc internal. 290 */ 291 int 292 _pthread_mutex_trylock(pthread_mutex_t *mutex) 293 { 294 int ret = 0; 295 296 _thread_sigblock(); 297 298 if (mutex == NULL) 299 ret = EINVAL; 300 301 /* 302 * If the mutex is statically initialized, perform the dynamic 303 * initialization marking the mutex private (delete safe): 304 */ 305 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 306 (ret = mutex_init(mutex, 1)) == 0) 307 ret = mutex_lock_common(mutex, 1, NULL); 308 309 if (ret != 0) 310 _thread_sigunblock(); 311 312 return (ret); 313 } 314 315 static int 316 mutex_lock_common(pthread_mutex_t * mutex, int nonblock, 317 const struct timespec *abstime) 318 { 319 int error; 320 321 error = 0; 322 PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL), 323 "Uninitialized mutex in mutex_lock_common"); 324 PTHREAD_ASSERT(((*mutex)->m_protocol >= PTHREAD_PRIO_NONE && 325 (*mutex)->m_protocol <= PTHREAD_PRIO_PROTECT), 326 "Invalid mutex protocol"); 327 pthread_testcancel(); 328 _SPINLOCK(&(*mutex)->lock); 329 330 /* 331 * If the mutex was statically allocated, properly 332 * initialize the tail queue. 333 */ 334 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { 335 TAILQ_INIT(&(*mutex)->m_queue); 336 (*mutex)->m_flags |= MUTEX_FLAGS_INITED; 337 _MUTEX_INIT_LINK(*mutex); 338 } 339 340 retry: 341 /* 342 * If the mutex is a priority protected mutex the thread's 343 * priority may not be higher than that of the mutex. 344 */ 345 if ((*mutex)->m_protocol == PTHREAD_PRIO_PROTECT && 346 curthread->active_priority > (*mutex)->m_prio) { 347 _SPINUNLOCK(&(*mutex)->lock); 348 return (EINVAL); 349 } 350 if ((*mutex)->m_owner == NULL) { 351 /* 352 * Mutex is currently unowned. 353 */ 354 acquire_mutex(*mutex, curthread); 355 } else if ((*mutex)->m_owner == curthread) { 356 /* 357 * Mutex is owned by curthread. We must test against 358 * certain conditions in such a case. 359 */ 360 if ((error = mutex_self_lock((*mutex), nonblock)) != 0) { 361 _SPINUNLOCK(&(*mutex)->lock); 362 return (error); 363 } 364 } else { 365 if (nonblock) { 366 error = EBUSY; 367 goto out; 368 } 369 370 /* 371 * Another thread owns the mutex. This thread must 372 * wait for that thread to unlock the mutex. This 373 * thread must not return to the caller if it was 374 * interrupted by a signal. 375 */ 376 error = get_mcontested(*mutex, abstime); 377 if (error == EINTR) 378 goto retry; 379 else if (error == ETIMEDOUT) 380 goto out; 381 } 382 383 /* 384 * The mutex is now owned by curthread. 385 */ 386 _thread_critical_enter(curthread); 387 388 /* 389 * The mutex's priority may have changed while waiting for it. 390 */ 391 if ((*mutex)->m_protocol == PTHREAD_PRIO_PROTECT && 392 curthread->active_priority > (*mutex)->m_prio) { 393 mutex_attach_to_next_pthread(*mutex); 394 if ((*mutex)->m_owner != NULL) 395 _thread_critical_exit((*mutex)->m_owner); 396 _thread_critical_exit(curthread); 397 _SPINUNLOCK(&(*mutex)->lock); 398 return (EINVAL); 399 } 400 401 switch ((*mutex)->m_protocol) { 402 case PTHREAD_PRIO_INHERIT: 403 curthread->prio_inherit_count++; 404 break; 405 case PTHREAD_PRIO_PROTECT: 406 PTHREAD_ASSERT((curthread->active_priority <= 407 (*mutex)->m_prio), "priority protection violation"); 408 curthread->prio_protect_count++; 409 if ((*mutex)->m_prio > curthread->active_priority) { 410 curthread->inherited_priority = (*mutex)->m_prio; 411 curthread->active_priority = (*mutex)->m_prio; 412 } 413 break; 414 default: 415 /* Nothing */ 416 break; 417 } 418 _thread_critical_exit(curthread); 419 out: 420 _SPINUNLOCK(&(*mutex)->lock); 421 pthread_testcancel(); 422 return (error); 423 } 424 425 /* 426 * Caller must lock thread. 427 */ 428 void 429 adjust_prio_inheritance(struct pthread *ptd) 430 { 431 struct pthread_mutex *tempMtx; 432 struct pthread *tempTd; 433 434 /* 435 * Scan owned mutexes's wait queue and execute at the 436 * higher of thread's current priority or the priority of 437 * the highest priority thread waiting on any of the the 438 * mutexes the thread owns. Note: the highest priority thread 439 * on a queue is always at the head of the queue. 440 */ 441 TAILQ_FOREACH(tempMtx, &ptd->mutexq, m_qe) { 442 if (tempMtx->m_protocol != PTHREAD_PRIO_INHERIT) 443 continue; 444 445 /* 446 * XXX LOR with respect to tempMtx and ptd. 447 * Order should be: 1. mutex 448 * 2. pthread 449 */ 450 _SPINLOCK(&tempMtx->lock); 451 452 tempTd = TAILQ_FIRST(&tempMtx->m_queue); 453 if (tempTd != NULL) { 454 UMTX_LOCK(&tempTd->lock); 455 if (tempTd->active_priority > ptd->active_priority) { 456 ptd->inherited_priority = 457 tempTd->active_priority; 458 ptd->active_priority = 459 tempTd->active_priority; 460 } 461 UMTX_UNLOCK(&tempTd->lock); 462 } 463 _SPINUNLOCK(&tempMtx->lock); 464 } 465 } 466 467 /* 468 * Caller must lock thread. 469 */ 470 static void 471 restore_prio_inheritance(struct pthread *ptd) 472 { 473 ptd->inherited_priority = PTHREAD_MIN_PRIORITY; 474 ptd->active_priority = ptd->base_priority; 475 adjust_prio_inheritance(ptd); 476 } 477 478 /* 479 * Caller must lock thread. 480 */ 481 void 482 adjust_prio_protection(struct pthread *ptd) 483 { 484 struct pthread_mutex *tempMtx; 485 486 /* 487 * The thread shall execute at the higher of its priority or 488 * the highest priority ceiling of all the priority protection 489 * mutexes it owns. 490 */ 491 TAILQ_FOREACH(tempMtx, &ptd->mutexq, m_qe) { 492 if (tempMtx->m_protocol != PTHREAD_PRIO_PROTECT) 493 continue; 494 if (ptd->active_priority < tempMtx->m_prio) { 495 ptd->inherited_priority = tempMtx->m_prio; 496 ptd->active_priority = tempMtx->m_prio; 497 } 498 } 499 } 500 501 /* 502 * Caller must lock thread. 503 */ 504 static void 505 restore_prio_protection(struct pthread *ptd) 506 { 507 ptd->inherited_priority = PTHREAD_MIN_PRIORITY; 508 ptd->active_priority = ptd->base_priority; 509 adjust_prio_protection(ptd); 510 } 511 512 int 513 __pthread_mutex_lock(pthread_mutex_t *mutex) 514 { 515 int ret = 0; 516 517 if (_thread_initial == NULL) 518 _thread_init(); 519 520 if (mutex == NULL) 521 ret = EINVAL; 522 523 /* 524 * If the mutex is statically initialized, perform the dynamic 525 * initialization: 526 */ 527 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 528 ((ret = mutex_init(mutex, 0)) == 0)) 529 ret = mutex_lock_common(mutex, 0, NULL); 530 531 return (ret); 532 } 533 534 /* 535 * Libc internal. 536 */ 537 int 538 _pthread_mutex_lock(pthread_mutex_t *mutex) 539 { 540 int ret = 0; 541 542 if (_thread_initial == NULL) 543 _thread_init(); 544 545 _thread_sigblock(); 546 547 if (mutex == NULL) 548 ret = EINVAL; 549 550 /* 551 * If the mutex is statically initialized, perform the dynamic 552 * initialization marking it private (delete safe): 553 */ 554 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 555 ((ret = mutex_init(mutex, 1)) == 0)) 556 ret = mutex_lock_common(mutex, 0, NULL); 557 558 if (ret != 0) 559 _thread_sigunblock(); 560 561 return (ret); 562 } 563 564 int 565 _pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime) 566 { 567 int error; 568 569 error = 0; 570 if (_thread_initial == NULL) 571 _thread_init(); 572 573 /* 574 * Initialize it if it's a valid statically inited mutex. 575 */ 576 if (mutex == NULL) 577 error = EINVAL; 578 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 579 ((error = mutex_init(mutex, 0)) == 0)) 580 error = mutex_lock_common(mutex, 0, abstime); 581 582 PTHREAD_ASSERT(error != EINTR, "According to SUSv3 this function shall not return an error code of EINTR"); 583 return (error); 584 } 585 586 int 587 __pthread_mutex_unlock(pthread_mutex_t * mutex) 588 { 589 return (mutex_unlock_common(mutex, /* add reference */ 0)); 590 } 591 592 /* 593 * Libc internal 594 */ 595 int 596 _pthread_mutex_unlock(pthread_mutex_t * mutex) 597 { 598 int error; 599 if ((error = mutex_unlock_common(mutex, /* add reference */ 0)) == 0) 600 _thread_sigunblock(); 601 return (error); 602 } 603 604 int 605 _mutex_cv_unlock(pthread_mutex_t * mutex) 606 { 607 return (mutex_unlock_common(mutex, /* add reference */ 1)); 608 } 609 610 int 611 _mutex_cv_lock(pthread_mutex_t * mutex) 612 { 613 int ret; 614 if ((ret = _pthread_mutex_lock(mutex)) == 0) 615 (*mutex)->m_refcount--; 616 return (ret); 617 } 618 619 /* 620 * Caller must lock mutex and then disable signals and lock curthread. 621 */ 622 static inline int 623 mutex_self_lock(pthread_mutex_t mutex, int noblock) 624 { 625 switch (mutex->m_type) { 626 case PTHREAD_MUTEX_ERRORCHECK: 627 /* 628 * POSIX specifies that mutexes should return EDEADLK if a 629 * recursive lock is detected. 630 */ 631 if (noblock) 632 return (EBUSY); 633 return (EDEADLK); 634 break; 635 636 case PTHREAD_MUTEX_NORMAL: 637 /* 638 * What SS2 define as a 'normal' mutex. Intentionally 639 * deadlock on attempts to get a lock you already own. 640 */ 641 if (noblock) 642 return (EBUSY); 643 PTHREAD_SET_STATE(curthread, PS_DEADLOCK); 644 _SPINUNLOCK(&(mutex)->lock); 645 _thread_suspend(curthread, NULL); 646 PANIC("Shouldn't resume here?\n"); 647 break; 648 649 case PTHREAD_MUTEX_RECURSIVE: 650 /* Increment the lock count: */ 651 mutex->m_data.m_count++; 652 break; 653 654 default: 655 /* Trap invalid mutex types; */ 656 return (EINVAL); 657 } 658 return (0); 659 } 660 661 static inline int 662 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference) 663 { 664 /* 665 * Error checking. 666 */ 667 if (*mutex == NULL) 668 return (EINVAL); 669 if ((*mutex)->m_owner != curthread) 670 return (EPERM); 671 PTHREAD_ASSERT(((*mutex)->m_protocol >= PTHREAD_PRIO_NONE && 672 (*mutex)->m_protocol <= PTHREAD_PRIO_PROTECT), 673 "Invalid mutex protocol"); 674 675 _SPINLOCK(&(*mutex)->lock); 676 if ((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) { 677 (*mutex)->m_data.m_count--; 678 PTHREAD_ASSERT((*mutex)->m_data.m_count >= 0, 679 "The mutex recurse count cannot be less than zero"); 680 if ((*mutex)->m_data.m_count > 0) { 681 _SPINUNLOCK(&(*mutex)->lock); 682 return (0); 683 } 684 } 685 686 /* 687 * Release the mutex from this thread and attach it to 688 * the next thread in the queue, if there is one waiting. 689 */ 690 _thread_critical_enter(curthread); 691 mutex_attach_to_next_pthread(*mutex); 692 if ((*mutex)->m_owner != NULL) 693 _thread_critical_exit((*mutex)->m_owner); 694 if (add_reference != 0) { 695 /* Increment the reference count: */ 696 (*mutex)->m_refcount++; 697 } 698 _SPINUNLOCK(&(*mutex)->lock); 699 700 /* 701 * Fix priority of the thread that just released the mutex. 702 */ 703 switch ((*mutex)->m_protocol) { 704 case PTHREAD_PRIO_INHERIT: 705 curthread->prio_inherit_count--; 706 PTHREAD_ASSERT(curthread->prio_inherit_count >= 0, 707 "priority inheritance counter cannot be less than zero"); 708 restore_prio_inheritance(curthread); 709 if (curthread->prio_protect_count > 0) 710 restore_prio_protection(curthread); 711 break; 712 case PTHREAD_PRIO_PROTECT: 713 curthread->prio_protect_count--; 714 PTHREAD_ASSERT(curthread->prio_protect_count >= 0, 715 "priority protection counter cannot be less than zero"); 716 restore_prio_protection(curthread); 717 if (curthread->prio_inherit_count > 0) 718 restore_prio_inheritance(curthread); 719 break; 720 default: 721 /* Nothing */ 722 break; 723 } 724 _thread_critical_exit(curthread); 725 return (0); 726 } 727 728 void 729 _mutex_unlock_private(pthread_t pthread) 730 { 731 struct pthread_mutex *m, *m_next; 732 733 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { 734 m_next = TAILQ_NEXT(m, m_qe); 735 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 736 _pthread_mutex_unlock(&m); 737 } 738 } 739 740 void 741 _mutex_lock_backout(pthread_t pthread) 742 { 743 struct pthread_mutex *mutex; 744 745 mutex = pthread->data.mutex; 746 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { 747 748 mutex_queue_remove(mutex, pthread); 749 750 /* This thread is no longer waiting for the mutex: */ 751 pthread->data.mutex = NULL; 752 753 } 754 } 755 756 /* 757 * Dequeue a waiting thread from the head of a mutex queue in descending 758 * priority order. This funtion will return with the thread locked. 759 */ 760 static inline pthread_t 761 mutex_queue_deq(pthread_mutex_t mutex) 762 { 763 pthread_t pthread; 764 765 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { 766 _thread_critical_enter(pthread); 767 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 768 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; 769 770 /* 771 * Only exit the loop if the thread hasn't been 772 * cancelled. 773 */ 774 if ((pthread->cancelflags & PTHREAD_CANCELLING) == 0 && 775 pthread->state == PS_MUTEX_WAIT) 776 break; 777 else 778 _thread_critical_exit(pthread); 779 } 780 781 return (pthread); 782 } 783 784 /* 785 * Remove a waiting thread from a mutex queue in descending priority order. 786 */ 787 static inline void 788 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) 789 { 790 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { 791 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 792 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; 793 } 794 } 795 796 /* 797 * Enqueue a waiting thread to a queue in descending priority order. 798 */ 799 static inline void 800 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) 801 { 802 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); 803 char *name; 804 805 name = pthread->name ? pthread->name : "unknown"; 806 if ((pthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0) 807 _thread_printf(2, "Thread (%s:%u) already on condq\n", 808 pthread->name, pthread->uniqueid); 809 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) 810 _thread_printf(2, "Thread (%s:%u) already on mutexq\n", 811 pthread->name, pthread->uniqueid); 812 PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread); 813 /* 814 * For the common case of all threads having equal priority, 815 * we perform a quick check against the priority of the thread 816 * at the tail of the queue. 817 */ 818 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) 819 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe); 820 else { 821 tid = TAILQ_FIRST(&mutex->m_queue); 822 while (pthread->active_priority <= tid->active_priority) 823 tid = TAILQ_NEXT(tid, sqe); 824 TAILQ_INSERT_BEFORE(tid, pthread, sqe); 825 } 826 if (mutex->m_protocol == PTHREAD_PRIO_INHERIT && 827 pthread == TAILQ_FIRST(&mutex->m_queue)) { 828 UMTX_LOCK(&mutex->m_owner->lock); 829 if (pthread->active_priority > 830 mutex->m_owner->active_priority) { 831 mutex->m_owner->inherited_priority = 832 pthread->active_priority; 833 mutex->m_owner->active_priority = 834 pthread->active_priority; 835 } 836 UMTX_UNLOCK(&mutex->m_owner->lock); 837 } 838 pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ; 839 } 840 841 /* 842 * Caller must lock mutex and pthread. 843 */ 844 void 845 readjust_priorities(struct pthread *pthread, struct pthread_mutex *mtx) 846 { 847 if (pthread->state == PS_MUTEX_WAIT) { 848 mutex_queue_remove(mtx, pthread); 849 mutex_queue_enq(mtx, pthread); 850 UMTX_LOCK(&mtx->m_owner->lock); 851 adjust_prio_inheritance(mtx->m_owner); 852 if (mtx->m_owner->prio_protect_count > 0) 853 adjust_prio_protection(mtx->m_owner); 854 UMTX_UNLOCK(&mtx->m_owner->lock); 855 } 856 if (pthread->prio_inherit_count > 0) 857 adjust_prio_inheritance(pthread); 858 if (pthread->prio_protect_count > 0) 859 adjust_prio_protection(pthread); 860 } 861 862 /* 863 * Returns with the lock owned and on the thread's mutexq. If 864 * the mutex is currently owned by another thread it will sleep 865 * until it is available. 866 */ 867 static int 868 get_mcontested(pthread_mutex_t mutexp, const struct timespec *abstime) 869 { 870 int error; 871 872 /* 873 * If the timeout is invalid this thread is not allowed 874 * to block; 875 */ 876 if (abstime != NULL) { 877 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) 878 return (EINVAL); 879 } 880 881 /* 882 * Put this thread on the mutex's list of waiting threads. 883 * The lock on the thread ensures atomic (as far as other 884 * threads are concerned) setting of the thread state with 885 * it's status on the mutex queue. 886 */ 887 _thread_critical_enter(curthread); 888 mutex_queue_enq(mutexp, curthread); 889 do { 890 PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT); 891 curthread->data.mutex = mutexp; 892 _thread_critical_exit(curthread); 893 _SPINUNLOCK(&mutexp->lock); 894 error = _thread_suspend(curthread, abstime); 895 if (error != 0 && error != EAGAIN && error != EINTR) 896 PANIC("Cannot suspend on mutex."); 897 _SPINLOCK(&mutexp->lock); 898 _thread_critical_enter(curthread); 899 if (error == EAGAIN) { 900 /* 901 * Between the timeout and when the mutex was 902 * locked the previous owner may have released 903 * the mutex to this thread. Or not. 904 */ 905 if (mutexp->m_owner == curthread) { 906 error = 0; 907 } else { 908 _mutex_lock_backout(curthread); 909 curthread->state = PS_RUNNING; 910 error = ETIMEDOUT; 911 } 912 } 913 } while ((curthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0); 914 _thread_critical_exit(curthread); 915 return (error); 916 } 917