1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by John Birrell. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 #include <stdlib.h> 35 #include <errno.h> 36 #include <string.h> 37 #include <sys/param.h> 38 #include <sys/queue.h> 39 #include <pthread.h> 40 #include "thr_private.h" 41 42 #if defined(_PTHREADS_INVARIANTS) 43 #define _MUTEX_INIT_LINK(m) do { \ 44 (m)->m_qe.tqe_prev = NULL; \ 45 (m)->m_qe.tqe_next = NULL; \ 46 } while (0) 47 #define _MUTEX_ASSERT_IS_OWNED(m) do { \ 48 if ((m)->m_qe.tqe_prev == NULL) \ 49 PANIC("mutex is not on list"); \ 50 } while (0) 51 #define _MUTEX_ASSERT_NOT_OWNED(m) do { \ 52 if (((m)->m_qe.tqe_prev != NULL) || \ 53 ((m)->m_qe.tqe_next != NULL)) \ 54 PANIC("mutex is on list"); \ 55 } while (0) 56 #else 57 #define _MUTEX_INIT_LINK(m) 58 #define _MUTEX_ASSERT_IS_OWNED(m) 59 #define _MUTEX_ASSERT_NOT_OWNED(m) 60 #endif 61 62 /* 63 * Prototypes 64 */ 65 static int get_muncontested(pthread_mutex_t, int); 66 static void get_mcontested(pthread_mutex_t); 67 static int mutex_lock_common(pthread_mutex_t *, int); 68 static inline int mutex_self_trylock(pthread_mutex_t); 69 static inline int mutex_self_lock(pthread_mutex_t); 70 static inline int mutex_unlock_common(pthread_mutex_t *, int); 71 static void mutex_priority_adjust(pthread_mutex_t); 72 static void mutex_rescan_owned (pthread_t, pthread_mutex_t); 73 static inline pthread_t mutex_queue_deq(pthread_mutex_t); 74 static inline void mutex_queue_remove(pthread_mutex_t, pthread_t); 75 static inline void mutex_queue_enq(pthread_mutex_t, pthread_t); 76 77 78 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER; 79 80 static struct pthread_mutex_attr static_mutex_attr = 81 PTHREAD_MUTEXATTR_STATIC_INITIALIZER; 82 static pthread_mutexattr_t static_mattr = &static_mutex_attr; 83 84 /* Single underscore versions provided for libc internal usage: */ 85 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 86 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 87 88 /* No difference between libc and application usage of these: */ 89 __weak_reference(_pthread_mutex_init, pthread_mutex_init); 90 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 91 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 92 93 94 /* 95 * Reinitialize a private mutex; this is only used for internal mutexes. 96 */ 97 int 98 _mutex_reinit(pthread_mutex_t * mutex) 99 { 100 int ret = 0; 101 102 if (mutex == NULL) 103 ret = EINVAL; 104 else if (*mutex == NULL) 105 ret = _pthread_mutex_init(mutex, NULL); 106 else { 107 /* 108 * Initialize the mutex structure: 109 */ 110 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT; 111 (*mutex)->m_protocol = PTHREAD_PRIO_NONE; 112 TAILQ_INIT(&(*mutex)->m_queue); 113 (*mutex)->m_owner = NULL; 114 (*mutex)->m_data.m_count = 0; 115 (*mutex)->m_flags |= MUTEX_FLAGS_INITED | MUTEX_FLAGS_PRIVATE; 116 (*mutex)->m_refcount = 0; 117 (*mutex)->m_prio = 0; 118 (*mutex)->m_saved_prio = 0; 119 _MUTEX_INIT_LINK(*mutex); 120 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock)); 121 } 122 return (ret); 123 } 124 125 int 126 _pthread_mutex_init(pthread_mutex_t * mutex, 127 const pthread_mutexattr_t * mutex_attr) 128 { 129 enum pthread_mutextype type; 130 int protocol; 131 int ceiling; 132 int flags; 133 pthread_mutex_t pmutex; 134 int ret = 0; 135 136 if (mutex == NULL) 137 ret = EINVAL; 138 139 /* Check if default mutex attributes: */ 140 if (mutex_attr == NULL || *mutex_attr == NULL) { 141 /* Default to a (error checking) POSIX mutex: */ 142 type = PTHREAD_MUTEX_ERRORCHECK; 143 protocol = PTHREAD_PRIO_NONE; 144 ceiling = PTHREAD_MAX_PRIORITY; 145 flags = 0; 146 } 147 148 /* Check mutex type: */ 149 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || 150 ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX)) 151 /* Return an invalid argument error: */ 152 ret = EINVAL; 153 154 /* Check mutex protocol: */ 155 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || 156 ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE)) 157 /* Return an invalid argument error: */ 158 ret = EINVAL; 159 160 else { 161 /* Use the requested mutex type and protocol: */ 162 type = (*mutex_attr)->m_type; 163 protocol = (*mutex_attr)->m_protocol; 164 ceiling = (*mutex_attr)->m_ceiling; 165 flags = (*mutex_attr)->m_flags; 166 } 167 168 /* Check no errors so far: */ 169 if (ret == 0) { 170 if ((pmutex = (pthread_mutex_t) 171 malloc(sizeof(struct pthread_mutex))) == NULL) 172 ret = ENOMEM; 173 else { 174 /* Set the mutex flags: */ 175 pmutex->m_flags = flags; 176 177 /* Process according to mutex type: */ 178 switch (type) { 179 /* case PTHREAD_MUTEX_DEFAULT: */ 180 case PTHREAD_MUTEX_ERRORCHECK: 181 case PTHREAD_MUTEX_NORMAL: 182 /* Nothing to do here. */ 183 break; 184 185 /* Single UNIX Spec 2 recursive mutex: */ 186 case PTHREAD_MUTEX_RECURSIVE: 187 /* Reset the mutex count: */ 188 pmutex->m_data.m_count = 0; 189 break; 190 191 /* Trap invalid mutex types: */ 192 default: 193 /* Return an invalid argument error: */ 194 ret = EINVAL; 195 break; 196 } 197 if (ret == 0) { 198 /* Initialise the rest of the mutex: */ 199 TAILQ_INIT(&pmutex->m_queue); 200 pmutex->m_flags |= MUTEX_FLAGS_INITED; 201 pmutex->m_owner = NULL; 202 pmutex->m_type = type; 203 pmutex->m_protocol = protocol; 204 pmutex->m_refcount = 0; 205 if (protocol == PTHREAD_PRIO_PROTECT) 206 pmutex->m_prio = ceiling; 207 else 208 pmutex->m_prio = 0; 209 pmutex->m_saved_prio = 0; 210 _MUTEX_INIT_LINK(pmutex); 211 memset(&pmutex->lock, 0, sizeof(pmutex->lock)); 212 *mutex = pmutex; 213 } else { 214 free(pmutex); 215 *mutex = NULL; 216 } 217 } 218 } 219 /* Return the completion status: */ 220 return (ret); 221 } 222 223 int 224 _pthread_mutex_destroy(pthread_mutex_t * mutex) 225 { 226 int ret = 0; 227 228 if (mutex == NULL || *mutex == NULL) 229 ret = EINVAL; 230 else { 231 /* Lock the mutex structure: */ 232 _SPINLOCK(&(*mutex)->lock); 233 234 /* 235 * Check to see if this mutex is in use: 236 */ 237 if (((*mutex)->m_owner != NULL) || 238 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || 239 ((*mutex)->m_refcount != 0)) { 240 ret = EBUSY; 241 242 /* Unlock the mutex structure: */ 243 _SPINUNLOCK(&(*mutex)->lock); 244 } 245 else { 246 /* 247 * Free the memory allocated for the mutex 248 * structure: 249 */ 250 _MUTEX_ASSERT_NOT_OWNED(*mutex); 251 252 /* Unlock the mutex structure: */ 253 _SPINUNLOCK(&(*mutex)->lock); 254 255 free(*mutex); 256 257 /* 258 * Leave the caller's pointer NULL now that 259 * the mutex has been destroyed: 260 */ 261 *mutex = NULL; 262 } 263 } 264 265 /* Return the completion status: */ 266 return (ret); 267 } 268 269 static int 270 init_static(pthread_mutex_t *mutex) 271 { 272 int ret; 273 274 _SPINLOCK(&static_init_lock); 275 276 if (*mutex == NULL) 277 ret = _pthread_mutex_init(mutex, NULL); 278 else 279 ret = 0; 280 281 _SPINUNLOCK(&static_init_lock); 282 283 return (ret); 284 } 285 286 static int 287 init_static_private(pthread_mutex_t *mutex) 288 { 289 int ret; 290 291 _SPINLOCK(&static_init_lock); 292 293 if (*mutex == NULL) 294 ret = _pthread_mutex_init(mutex, &static_mattr); 295 else 296 ret = 0; 297 298 _SPINUNLOCK(&static_init_lock); 299 300 return (ret); 301 } 302 303 int 304 __pthread_mutex_trylock(pthread_mutex_t *mutex) 305 { 306 int ret = 0; 307 308 if (mutex == NULL) 309 ret = EINVAL; 310 311 /* 312 * If the mutex is statically initialized, perform the dynamic 313 * initialization: 314 */ 315 else if ((*mutex != NULL) || (ret = init_static(mutex)) == 0) 316 ret = mutex_lock_common(mutex, 1); 317 318 return (ret); 319 } 320 321 int 322 _pthread_mutex_trylock(pthread_mutex_t *mutex) 323 { 324 int ret = 0; 325 326 if (mutex == NULL) 327 ret = EINVAL; 328 329 /* 330 * If the mutex is statically initialized, perform the dynamic 331 * initialization marking the mutex private (delete safe): 332 */ 333 else if ((*mutex != NULL) || (ret = init_static_private(mutex)) == 0) 334 ret = mutex_lock_common(mutex, 1); 335 336 return (ret); 337 } 338 339 static int 340 mutex_lock_common(pthread_mutex_t * mutex, int nonblock) 341 { 342 int ret, error, inCancel; 343 344 ret = error = inCancel = 0; 345 346 PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL), 347 "Uninitialized mutex in mutex_lock_common"); 348 349 /* 350 * Enter a loop waiting to become the mutex owner. We need a 351 * loop in case the waiting thread is interrupted by a signal 352 * to execute a signal handler. It is not (currently) possible 353 * to remain in the waiting queue while running a handler. 354 * Instead, the thread is interrupted and backed out of the 355 * waiting queue prior to executing the signal handler. 356 */ 357 do { 358 /* 359 * Defer signals to protect the scheduling queues from 360 * access by the signal handler: 361 */ 362 /* _thread_kern_sig_defer(); */ 363 364 /* Lock the mutex structure: */ 365 _SPINLOCK(&(*mutex)->lock); 366 367 /* 368 * If the mutex was statically allocated, properly 369 * initialize the tail queue. 370 */ 371 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { 372 TAILQ_INIT(&(*mutex)->m_queue); 373 (*mutex)->m_flags |= MUTEX_FLAGS_INITED; 374 _MUTEX_INIT_LINK(*mutex); 375 } 376 377 /* Process according to mutex type: */ 378 switch ((*mutex)->m_protocol) { 379 /* Default POSIX mutex: */ 380 case PTHREAD_PRIO_NONE: 381 if ((error = get_muncontested(*mutex, nonblock)) == -1) 382 if (nonblock) { 383 ret = EBUSY; 384 break; 385 } else { 386 get_mcontested(*mutex); 387 } 388 else 389 ret = error; 390 break; 391 392 /* POSIX priority inheritence mutex: */ 393 case PTHREAD_PRIO_INHERIT: 394 if ((error = get_muncontested(*mutex, nonblock)) == 0) { 395 /* Track number of priority mutexes owned: */ 396 curthread->priority_mutex_count++; 397 398 /* 399 * The mutex takes on attributes of the 400 * running thread when there are no waiters. 401 */ 402 (*mutex)->m_prio = curthread->active_priority; 403 (*mutex)->m_saved_prio = 404 curthread->inherited_priority; 405 curthread->inherited_priority = 406 (*mutex)->m_prio; 407 } else if (error == -1) { 408 if (nonblock) { 409 ret = EBUSY; 410 break; 411 } else { 412 get_mcontested(*mutex); 413 } 414 415 if (curthread->active_priority > 416 (*mutex)->m_prio) 417 /* Adjust priorities: */ 418 mutex_priority_adjust(*mutex); 419 } else { 420 ret = error; 421 } 422 break; 423 424 /* POSIX priority protection mutex: */ 425 case PTHREAD_PRIO_PROTECT: 426 /* Check for a priority ceiling violation: */ 427 if (curthread->active_priority > (*mutex)->m_prio) 428 ret = EINVAL; 429 430 if ((error = get_muncontested(*mutex, nonblock)) == 0) { 431 /* Track number of priority mutexes owned: */ 432 curthread->priority_mutex_count++; 433 434 /* 435 * The running thread inherits the ceiling 436 * priority of the mutex and executes at that 437 * priority: 438 */ 439 curthread->active_priority = (*mutex)->m_prio; 440 (*mutex)->m_saved_prio = 441 curthread->inherited_priority; 442 curthread->inherited_priority = 443 (*mutex)->m_prio; 444 } else if (error == -1) { 445 if (nonblock) { 446 ret = EBUSY; 447 break; 448 } 449 450 /* Clear any previous error: */ 451 curthread->error = 0; 452 453 get_mcontested(*mutex); 454 455 /* 456 * The threads priority may have changed while 457 * waiting for the mutex causing a ceiling 458 * violation. 459 */ 460 ret = curthread->error; 461 curthread->error = 0; 462 } else { 463 ret = error; 464 } 465 break; 466 467 /* Trap invalid mutex types: */ 468 default: 469 /* Return an invalid argument error: */ 470 ret = EINVAL; 471 break; 472 } 473 474 /* 475 * Check to see if this thread was interrupted and 476 * is still in the mutex queue of waiting threads: 477 */ 478 if (curthread->cancelflags & PTHREAD_CANCELLING) { 479 if (!nonblock) 480 mutex_queue_remove(*mutex, curthread); 481 inCancel=1; 482 } 483 484 /* Unlock the mutex structure: */ 485 _SPINUNLOCK(&(*mutex)->lock); 486 487 /* 488 * Undefer and handle pending signals, yielding if 489 * necessary: 490 */ 491 /* _thread_kern_sig_undefer(); */ 492 if (inCancel) { 493 pthread_testcancel(); 494 PANIC("Canceled thread came back.\n"); 495 } 496 } while ((*mutex)->m_owner != curthread && ret == 0); 497 498 /* Return the completion status: */ 499 return (ret); 500 } 501 502 int 503 __pthread_mutex_lock(pthread_mutex_t *mutex) 504 { 505 int ret = 0; 506 507 if (_thread_initial == NULL) 508 _thread_init(); 509 510 if (mutex == NULL) 511 ret = EINVAL; 512 513 /* 514 * If the mutex is statically initialized, perform the dynamic 515 * initialization: 516 */ 517 else if ((*mutex != NULL) || ((ret = init_static(mutex)) == 0)) 518 ret = mutex_lock_common(mutex, 0); 519 520 return (ret); 521 } 522 523 int 524 _pthread_mutex_lock(pthread_mutex_t *mutex) 525 { 526 int ret = 0; 527 528 if (_thread_initial == NULL) 529 _thread_init(); 530 531 if (mutex == NULL) 532 ret = EINVAL; 533 534 /* 535 * If the mutex is statically initialized, perform the dynamic 536 * initialization marking it private (delete safe): 537 */ 538 else if ((*mutex != NULL) || ((ret = init_static_private(mutex)) == 0)) 539 ret = mutex_lock_common(mutex, 0); 540 541 return (ret); 542 } 543 544 int 545 _pthread_mutex_unlock(pthread_mutex_t * mutex) 546 { 547 return (mutex_unlock_common(mutex, /* add reference */ 0)); 548 } 549 550 int 551 _mutex_cv_unlock(pthread_mutex_t * mutex) 552 { 553 return (mutex_unlock_common(mutex, /* add reference */ 1)); 554 } 555 556 int 557 _mutex_cv_lock(pthread_mutex_t * mutex) 558 { 559 int ret; 560 if ((ret = _pthread_mutex_lock(mutex)) == 0) 561 (*mutex)->m_refcount--; 562 return (ret); 563 } 564 565 static inline int 566 mutex_self_trylock(pthread_mutex_t mutex) 567 { 568 int ret = 0; 569 570 switch (mutex->m_type) { 571 572 /* case PTHREAD_MUTEX_DEFAULT: */ 573 case PTHREAD_MUTEX_ERRORCHECK: 574 case PTHREAD_MUTEX_NORMAL: 575 /* 576 * POSIX specifies that mutexes should return EDEADLK if a 577 * recursive lock is detected. 578 */ 579 ret = EBUSY; 580 break; 581 582 case PTHREAD_MUTEX_RECURSIVE: 583 /* Increment the lock count: */ 584 mutex->m_data.m_count++; 585 break; 586 587 default: 588 /* Trap invalid mutex types; */ 589 ret = EINVAL; 590 } 591 592 return (ret); 593 } 594 595 static inline int 596 mutex_self_lock(pthread_mutex_t mutex) 597 { 598 int ret = 0; 599 600 switch (mutex->m_type) { 601 /* case PTHREAD_MUTEX_DEFAULT: */ 602 case PTHREAD_MUTEX_ERRORCHECK: 603 /* 604 * POSIX specifies that mutexes should return EDEADLK if a 605 * recursive lock is detected. 606 */ 607 ret = EDEADLK; 608 break; 609 610 case PTHREAD_MUTEX_NORMAL: 611 /* 612 * What SS2 define as a 'normal' mutex. Intentionally 613 * deadlock on attempts to get a lock you already own. 614 */ 615 /* XXX Sched lock. */ 616 PTHREAD_SET_STATE(curthread, PS_DEADLOCK); 617 _SPINUNLOCK(&(mutex)->lock); 618 _thread_suspend(curthread, NULL); 619 PANIC("Shouldn't resume here?\n"); 620 break; 621 622 case PTHREAD_MUTEX_RECURSIVE: 623 /* Increment the lock count: */ 624 mutex->m_data.m_count++; 625 break; 626 627 default: 628 /* Trap invalid mutex types; */ 629 ret = EINVAL; 630 } 631 632 return (ret); 633 } 634 635 static inline int 636 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference) 637 { 638 int ret = 0; 639 640 if (mutex == NULL || *mutex == NULL) { 641 ret = EINVAL; 642 } else { 643 /* 644 * Defer signals to protect the scheduling queues from 645 * access by the signal handler: 646 */ 647 /* _thread_kern_sig_defer(); */ 648 649 /* Lock the mutex structure: */ 650 _SPINLOCK(&(*mutex)->lock); 651 652 /* Process according to mutex type: */ 653 switch ((*mutex)->m_protocol) { 654 /* Default POSIX mutex: */ 655 case PTHREAD_PRIO_NONE: 656 /* 657 * Check if the running thread is not the owner of the 658 * mutex: 659 */ 660 if ((*mutex)->m_owner != curthread) { 661 /* 662 * Return an invalid argument error for no 663 * owner and a permission error otherwise: 664 */ 665 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; 666 } 667 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && 668 ((*mutex)->m_data.m_count > 0)) { 669 /* Decrement the count: */ 670 (*mutex)->m_data.m_count--; 671 } else { 672 /* 673 * Clear the count in case this is recursive 674 * mutex. 675 */ 676 (*mutex)->m_data.m_count = 0; 677 678 /* Remove the mutex from the threads queue. */ 679 _MUTEX_ASSERT_IS_OWNED(*mutex); 680 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, 681 (*mutex), m_qe); 682 _MUTEX_INIT_LINK(*mutex); 683 684 /* 685 * Get the next thread from the queue of 686 * threads waiting on the mutex. The deq 687 * function will have already locked it 688 * for us. 689 */ 690 if (((*mutex)->m_owner = 691 mutex_queue_deq(*mutex)) != NULL) { 692 /* Make the new owner runnable: */ 693 /* XXXTHR sched lock. */ 694 PTHREAD_NEW_STATE((*mutex)->m_owner, 695 PS_RUNNING); 696 697 /* 698 * Add the mutex to the threads list of 699 * owned mutexes: 700 */ 701 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, 702 (*mutex), m_qe); 703 704 /* 705 * The owner is no longer waiting for 706 * this mutex: 707 */ 708 (*mutex)->m_owner->data.mutex = NULL; 709 _thread_critical_exit((*mutex)->m_owner); 710 } 711 } 712 break; 713 714 /* POSIX priority inheritence mutex: */ 715 case PTHREAD_PRIO_INHERIT: 716 /* 717 * Check if the running thread is not the owner of the 718 * mutex: 719 */ 720 if ((*mutex)->m_owner != curthread) { 721 /* 722 * Return an invalid argument error for no 723 * owner and a permission error otherwise: 724 */ 725 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; 726 } 727 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && 728 ((*mutex)->m_data.m_count > 0)) { 729 /* Decrement the count: */ 730 (*mutex)->m_data.m_count--; 731 } else { 732 /* 733 * Clear the count in case this is recursive 734 * mutex. 735 */ 736 (*mutex)->m_data.m_count = 0; 737 738 /* 739 * Restore the threads inherited priority and 740 * recompute the active priority (being careful 741 * not to override changes in the threads base 742 * priority subsequent to locking the mutex). 743 */ 744 curthread->inherited_priority = 745 (*mutex)->m_saved_prio; 746 curthread->active_priority = 747 MAX(curthread->inherited_priority, 748 curthread->base_priority); 749 750 /* 751 * This thread now owns one less priority mutex. 752 */ 753 curthread->priority_mutex_count--; 754 755 /* Remove the mutex from the threads queue. */ 756 _MUTEX_ASSERT_IS_OWNED(*mutex); 757 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, 758 (*mutex), m_qe); 759 _MUTEX_INIT_LINK(*mutex); 760 761 /* 762 * Get the next thread from the queue of threads 763 * waiting on the mutex. It will already be 764 * locked for us. 765 */ 766 if (((*mutex)->m_owner = 767 mutex_queue_deq(*mutex)) == NULL) 768 /* This mutex has no priority. */ 769 (*mutex)->m_prio = 0; 770 else { 771 /* 772 * Track number of priority mutexes owned: 773 */ 774 (*mutex)->m_owner->priority_mutex_count++; 775 776 /* 777 * Add the mutex to the threads list 778 * of owned mutexes: 779 */ 780 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, 781 (*mutex), m_qe); 782 783 /* 784 * The owner is no longer waiting for 785 * this mutex: 786 */ 787 (*mutex)->m_owner->data.mutex = NULL; 788 789 /* 790 * Set the priority of the mutex. Since 791 * our waiting threads are in descending 792 * priority order, the priority of the 793 * mutex becomes the active priority of 794 * the thread we just dequeued. 795 */ 796 (*mutex)->m_prio = 797 (*mutex)->m_owner->active_priority; 798 799 /* 800 * Save the owning threads inherited 801 * priority: 802 */ 803 (*mutex)->m_saved_prio = 804 (*mutex)->m_owner->inherited_priority; 805 806 /* 807 * The owning threads inherited priority 808 * now becomes his active priority (the 809 * priority of the mutex). 810 */ 811 (*mutex)->m_owner->inherited_priority = 812 (*mutex)->m_prio; 813 814 /* 815 * Make the new owner runnable: 816 */ 817 /* XXXTHR sched lock. */ 818 PTHREAD_NEW_STATE((*mutex)->m_owner, 819 PS_RUNNING); 820 821 _thread_critical_exit((*mutex)->m_owner); 822 } 823 } 824 break; 825 826 /* POSIX priority ceiling mutex: */ 827 case PTHREAD_PRIO_PROTECT: 828 /* 829 * Check if the running thread is not the owner of the 830 * mutex: 831 */ 832 if ((*mutex)->m_owner != curthread) { 833 /* 834 * Return an invalid argument error for no 835 * owner and a permission error otherwise: 836 */ 837 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; 838 } 839 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && 840 ((*mutex)->m_data.m_count > 0)) { 841 /* Decrement the count: */ 842 (*mutex)->m_data.m_count--; 843 } else { 844 /* 845 * Clear the count in case this is recursive 846 * mutex. 847 */ 848 (*mutex)->m_data.m_count = 0; 849 850 /* 851 * Restore the threads inherited priority and 852 * recompute the active priority (being careful 853 * not to override changes in the threads base 854 * priority subsequent to locking the mutex). 855 */ 856 curthread->inherited_priority = 857 (*mutex)->m_saved_prio; 858 curthread->active_priority = 859 MAX(curthread->inherited_priority, 860 curthread->base_priority); 861 862 /* 863 * This thread now owns one less priority mutex. 864 */ 865 curthread->priority_mutex_count--; 866 867 /* Remove the mutex from the threads queue. */ 868 _MUTEX_ASSERT_IS_OWNED(*mutex); 869 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, 870 (*mutex), m_qe); 871 _MUTEX_INIT_LINK(*mutex); 872 873 /* 874 * Enter a loop to find a waiting thread whose 875 * active priority will not cause a ceiling 876 * violation. It will already be locked for us. 877 */ 878 while ((((*mutex)->m_owner = 879 mutex_queue_deq(*mutex)) != NULL) && 880 ((*mutex)->m_owner->active_priority > 881 (*mutex)->m_prio)) { 882 /* 883 * Either the mutex ceiling priority 884 * been lowered and/or this threads 885 * priority has been raised subsequent 886 * to this thread being queued on the 887 * waiting list. 888 */ 889 (*mutex)->m_owner->error = EINVAL; 890 PTHREAD_NEW_STATE((*mutex)->m_owner, 891 PS_RUNNING); 892 /* 893 * The thread is no longer waiting for 894 * this mutex: 895 */ 896 (*mutex)->m_owner->data.mutex = NULL; 897 898 _thread_critical_exit((*mutex)->m_owner); 899 } 900 901 /* Check for a new owner: */ 902 if ((*mutex)->m_owner != NULL) { 903 /* 904 * Track number of priority mutexes owned: 905 */ 906 (*mutex)->m_owner->priority_mutex_count++; 907 908 /* 909 * Add the mutex to the threads list 910 * of owned mutexes: 911 */ 912 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, 913 (*mutex), m_qe); 914 915 /* 916 * The owner is no longer waiting for 917 * this mutex: 918 */ 919 (*mutex)->m_owner->data.mutex = NULL; 920 921 /* 922 * Save the owning threads inherited 923 * priority: 924 */ 925 (*mutex)->m_saved_prio = 926 (*mutex)->m_owner->inherited_priority; 927 928 /* 929 * The owning thread inherits the 930 * ceiling priority of the mutex and 931 * executes at that priority: 932 */ 933 (*mutex)->m_owner->inherited_priority = 934 (*mutex)->m_prio; 935 (*mutex)->m_owner->active_priority = 936 (*mutex)->m_prio; 937 938 /* 939 * Make the new owner runnable: 940 */ 941 /* XXXTHR sched lock. */ 942 PTHREAD_NEW_STATE((*mutex)->m_owner, 943 PS_RUNNING); 944 945 _thread_critical_exit((*mutex)->m_owner); 946 } 947 } 948 break; 949 950 /* Trap invalid mutex types: */ 951 default: 952 /* Return an invalid argument error: */ 953 ret = EINVAL; 954 break; 955 } 956 957 if ((ret == 0) && (add_reference != 0)) { 958 /* Increment the reference count: */ 959 (*mutex)->m_refcount++; 960 } 961 962 /* Unlock the mutex structure: */ 963 _SPINUNLOCK(&(*mutex)->lock); 964 965 /* 966 * Undefer and handle pending signals, yielding if 967 * necessary: 968 */ 969 /* _thread_kern_sig_undefer(); */ 970 } 971 972 /* Return the completion status: */ 973 return (ret); 974 } 975 976 977 /* 978 * This function is called when a change in base priority occurs for 979 * a thread that is holding or waiting for a priority protection or 980 * inheritence mutex. A change in a threads base priority can effect 981 * changes to active priorities of other threads and to the ordering 982 * of mutex locking by waiting threads. 983 * 984 * This must be called while thread scheduling is deferred. 985 */ 986 void 987 _mutex_notify_priochange(pthread_t pthread) 988 { 989 /* Adjust the priorites of any owned priority mutexes: */ 990 if (pthread->priority_mutex_count > 0) { 991 /* 992 * Rescan the mutexes owned by this thread and correct 993 * their priorities to account for this threads change 994 * in priority. This has the side effect of changing 995 * the threads active priority. 996 */ 997 mutex_rescan_owned(pthread, /* rescan all owned */ NULL); 998 } 999 1000 /* 1001 * If this thread is waiting on a priority inheritence mutex, 1002 * check for priority adjustments. A change in priority can 1003 * also effect a ceiling violation(*) for a thread waiting on 1004 * a priority protection mutex; we don't perform the check here 1005 * as it is done in pthread_mutex_unlock. 1006 * 1007 * (*) It should be noted that a priority change to a thread 1008 * _after_ taking and owning a priority ceiling mutex 1009 * does not affect ownership of that mutex; the ceiling 1010 * priority is only checked before mutex ownership occurs. 1011 */ 1012 if (pthread->state == PS_MUTEX_WAIT) { 1013 /* Lock the mutex structure: */ 1014 _SPINLOCK(&pthread->data.mutex->lock); 1015 1016 /* 1017 * Check to make sure this thread is still in the same state 1018 * (the spinlock above can yield the CPU to another thread): 1019 */ 1020 if (pthread->state == PS_MUTEX_WAIT) { 1021 /* 1022 * Remove and reinsert this thread into the list of 1023 * waiting threads to preserve decreasing priority 1024 * order. 1025 */ 1026 mutex_queue_remove(pthread->data.mutex, pthread); 1027 mutex_queue_enq(pthread->data.mutex, pthread); 1028 1029 if (pthread->data.mutex->m_protocol == 1030 PTHREAD_PRIO_INHERIT) { 1031 /* Adjust priorities: */ 1032 mutex_priority_adjust(pthread->data.mutex); 1033 } 1034 } 1035 1036 /* Unlock the mutex structure: */ 1037 _SPINUNLOCK(&pthread->data.mutex->lock); 1038 } 1039 } 1040 1041 /* 1042 * Called when a new thread is added to the mutex waiting queue or 1043 * when a threads priority changes that is already in the mutex 1044 * waiting queue. 1045 */ 1046 static void 1047 mutex_priority_adjust(pthread_mutex_t mutex) 1048 { 1049 pthread_t pthread_next, pthread = mutex->m_owner; 1050 int temp_prio; 1051 pthread_mutex_t m = mutex; 1052 1053 /* 1054 * Calculate the mutex priority as the maximum of the highest 1055 * active priority of any waiting threads and the owning threads 1056 * active priority(*). 1057 * 1058 * (*) Because the owning threads current active priority may 1059 * reflect priority inherited from this mutex (and the mutex 1060 * priority may have changed) we must recalculate the active 1061 * priority based on the threads saved inherited priority 1062 * and its base priority. 1063 */ 1064 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */ 1065 temp_prio = MAX(pthread_next->active_priority, 1066 MAX(m->m_saved_prio, pthread->base_priority)); 1067 1068 /* See if this mutex really needs adjusting: */ 1069 if (temp_prio == m->m_prio) 1070 /* No need to propagate the priority: */ 1071 return; 1072 1073 /* Set new priority of the mutex: */ 1074 m->m_prio = temp_prio; 1075 1076 while (m != NULL) { 1077 /* 1078 * Save the threads priority before rescanning the 1079 * owned mutexes: 1080 */ 1081 temp_prio = pthread->active_priority; 1082 1083 /* 1084 * Fix the priorities for all the mutexes this thread has 1085 * locked since taking this mutex. This also has a 1086 * potential side-effect of changing the threads priority. 1087 */ 1088 mutex_rescan_owned(pthread, m); 1089 1090 /* 1091 * If the thread is currently waiting on a mutex, check 1092 * to see if the threads new priority has affected the 1093 * priority of the mutex. 1094 */ 1095 if ((temp_prio != pthread->active_priority) && 1096 (pthread->state == PS_MUTEX_WAIT) && 1097 (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) { 1098 /* Grab the mutex this thread is waiting on: */ 1099 m = pthread->data.mutex; 1100 1101 /* 1102 * The priority for this thread has changed. Remove 1103 * and reinsert this thread into the list of waiting 1104 * threads to preserve decreasing priority order. 1105 */ 1106 mutex_queue_remove(m, pthread); 1107 mutex_queue_enq(m, pthread); 1108 1109 /* Grab the waiting thread with highest priority: */ 1110 pthread_next = TAILQ_FIRST(&m->m_queue); 1111 1112 /* 1113 * Calculate the mutex priority as the maximum of the 1114 * highest active priority of any waiting threads and 1115 * the owning threads active priority. 1116 */ 1117 temp_prio = MAX(pthread_next->active_priority, 1118 MAX(m->m_saved_prio, m->m_owner->base_priority)); 1119 1120 if (temp_prio != m->m_prio) { 1121 /* 1122 * The priority needs to be propagated to the 1123 * mutex this thread is waiting on and up to 1124 * the owner of that mutex. 1125 */ 1126 m->m_prio = temp_prio; 1127 pthread = m->m_owner; 1128 } 1129 else 1130 /* We're done: */ 1131 m = NULL; 1132 1133 } 1134 else 1135 /* We're done: */ 1136 m = NULL; 1137 } 1138 } 1139 1140 static void 1141 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex) 1142 { 1143 int active_prio, inherited_prio; 1144 pthread_mutex_t m; 1145 pthread_t pthread_next; 1146 1147 /* 1148 * Start walking the mutexes the thread has taken since 1149 * taking this mutex. 1150 */ 1151 if (mutex == NULL) { 1152 /* 1153 * A null mutex means start at the beginning of the owned 1154 * mutex list. 1155 */ 1156 m = TAILQ_FIRST(&pthread->mutexq); 1157 1158 /* There is no inherited priority yet. */ 1159 inherited_prio = 0; 1160 } 1161 else { 1162 /* 1163 * The caller wants to start after a specific mutex. It 1164 * is assumed that this mutex is a priority inheritence 1165 * mutex and that its priority has been correctly 1166 * calculated. 1167 */ 1168 m = TAILQ_NEXT(mutex, m_qe); 1169 1170 /* Start inheriting priority from the specified mutex. */ 1171 inherited_prio = mutex->m_prio; 1172 } 1173 active_prio = MAX(inherited_prio, pthread->base_priority); 1174 1175 while (m != NULL) { 1176 /* 1177 * We only want to deal with priority inheritence 1178 * mutexes. This might be optimized by only placing 1179 * priority inheritence mutexes into the owned mutex 1180 * list, but it may prove to be useful having all 1181 * owned mutexes in this list. Consider a thread 1182 * exiting while holding mutexes... 1183 */ 1184 if (m->m_protocol == PTHREAD_PRIO_INHERIT) { 1185 /* 1186 * Fix the owners saved (inherited) priority to 1187 * reflect the priority of the previous mutex. 1188 */ 1189 m->m_saved_prio = inherited_prio; 1190 1191 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL) 1192 /* Recalculate the priority of the mutex: */ 1193 m->m_prio = MAX(active_prio, 1194 pthread_next->active_priority); 1195 else 1196 m->m_prio = active_prio; 1197 1198 /* Recalculate new inherited and active priorities: */ 1199 inherited_prio = m->m_prio; 1200 active_prio = MAX(m->m_prio, pthread->base_priority); 1201 } 1202 1203 /* Advance to the next mutex owned by this thread: */ 1204 m = TAILQ_NEXT(m, m_qe); 1205 } 1206 1207 /* 1208 * Fix the threads inherited priority and recalculate its 1209 * active priority. 1210 */ 1211 pthread->inherited_priority = inherited_prio; 1212 active_prio = MAX(inherited_prio, pthread->base_priority); 1213 1214 if (active_prio != pthread->active_priority) { 1215 #if 0 1216 /* 1217 * If this thread is in the priority queue, it must be 1218 * removed and reinserted for its new priority. 1219 */ 1220 if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) { 1221 /* 1222 * Remove the thread from the priority queue 1223 * before changing its priority: 1224 */ 1225 PTHREAD_PRIOQ_REMOVE(pthread); 1226 1227 /* 1228 * POSIX states that if the priority is being 1229 * lowered, the thread must be inserted at the 1230 * head of the queue for its priority if it owns 1231 * any priority protection or inheritence mutexes. 1232 */ 1233 if ((active_prio < pthread->active_priority) && 1234 (pthread->priority_mutex_count > 0)) { 1235 /* Set the new active priority. */ 1236 pthread->active_priority = active_prio; 1237 1238 PTHREAD_PRIOQ_INSERT_HEAD(pthread); 1239 } 1240 else { 1241 /* Set the new active priority. */ 1242 pthread->active_priority = active_prio; 1243 1244 PTHREAD_PRIOQ_INSERT_TAIL(pthread); 1245 } 1246 } 1247 else { 1248 /* Set the new active priority. */ 1249 pthread->active_priority = active_prio; 1250 } 1251 #endif 1252 pthread->active_priority = active_prio; 1253 } 1254 } 1255 1256 void 1257 _mutex_unlock_private(pthread_t pthread) 1258 { 1259 struct pthread_mutex *m, *m_next; 1260 1261 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { 1262 m_next = TAILQ_NEXT(m, m_qe); 1263 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 1264 _pthread_mutex_unlock(&m); 1265 } 1266 } 1267 1268 void 1269 _mutex_lock_backout(pthread_t pthread) 1270 { 1271 struct pthread_mutex *mutex; 1272 1273 /* 1274 * Defer signals to protect the scheduling queues from 1275 * access by the signal handler: 1276 */ 1277 /* _thread_kern_sig_defer();*/ 1278 1279 /* XXX - Necessary to obey lock order */ 1280 _SPINLOCK(&pthread->lock); 1281 mutex = pthread->data.mutex; 1282 _SPINUNLOCK(&pthread->lock); 1283 1284 _SPINLOCK(&mutex->lock); 1285 1286 _thread_critical_enter(pthread); 1287 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { 1288 1289 mutex_queue_remove(mutex, pthread); 1290 1291 /* This thread is no longer waiting for the mutex: */ 1292 pthread->data.mutex = NULL; 1293 1294 } 1295 /* 1296 * Undefer and handle pending signals, yielding if 1297 * necessary: 1298 */ 1299 /* _thread_kern_sig_undefer(); */ 1300 1301 _thread_critical_exit(pthread); 1302 _SPINUNLOCK(&mutex->lock); 1303 } 1304 1305 /* 1306 * Dequeue a waiting thread from the head of a mutex queue in descending 1307 * priority order. This funtion will return with the thread locked. 1308 */ 1309 static inline pthread_t 1310 mutex_queue_deq(pthread_mutex_t mutex) 1311 { 1312 pthread_t pthread; 1313 1314 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { 1315 _thread_critical_enter(pthread); 1316 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1317 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; 1318 1319 /* 1320 * Only exit the loop if the thread hasn't been 1321 * cancelled. 1322 */ 1323 if ((pthread->cancelflags & PTHREAD_CANCELLING) == 0 && 1324 pthread->state == PS_MUTEX_WAIT) 1325 break; 1326 else 1327 _thread_critical_exit(pthread); 1328 } 1329 1330 return (pthread); 1331 } 1332 1333 /* 1334 * Remove a waiting thread from a mutex queue in descending priority order. 1335 */ 1336 static inline void 1337 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) 1338 { 1339 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { 1340 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1341 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; 1342 } 1343 } 1344 1345 /* 1346 * Enqueue a waiting thread to a queue in descending priority order. 1347 */ 1348 static inline void 1349 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) 1350 { 1351 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); 1352 char *name; 1353 1354 name = pthread->name ? pthread->name : "unknown"; 1355 if ((pthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0) 1356 _thread_printf(2, "Thread (%s:%u) already on condq\n", 1357 pthread->name, pthread->uniqueid); 1358 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) 1359 _thread_printf(2, "Thread (%s:%u) already on mutexq\n", 1360 pthread->name, pthread->uniqueid); 1361 PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread); 1362 /* 1363 * For the common case of all threads having equal priority, 1364 * we perform a quick check against the priority of the thread 1365 * at the tail of the queue. 1366 */ 1367 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) 1368 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe); 1369 else { 1370 tid = TAILQ_FIRST(&mutex->m_queue); 1371 while (pthread->active_priority <= tid->active_priority) 1372 tid = TAILQ_NEXT(tid, sqe); 1373 TAILQ_INSERT_BEFORE(tid, pthread, sqe); 1374 } 1375 pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ; 1376 } 1377 1378 /* 1379 * Returns with the lock owned and on the threads mutexq if 1380 * it is currently unowned. Returns 1, otherwise. 1381 */ 1382 static int 1383 get_muncontested(pthread_mutex_t mutexp, int nonblock) 1384 { 1385 if (mutexp->m_owner != NULL && mutexp->m_owner != curthread) { 1386 return (-1); 1387 } else if (mutexp->m_owner == curthread) { 1388 if (nonblock) 1389 return (mutex_self_trylock(mutexp)); 1390 else 1391 return (mutex_self_lock(mutexp)); 1392 } 1393 1394 /* 1395 * The mutex belongs to this thread now. Mark it as 1396 * such. Add it to the list of mutexes owned by this 1397 * thread. 1398 */ 1399 mutexp->m_owner = curthread; 1400 _MUTEX_ASSERT_NOT_OWNED(mutexp); 1401 TAILQ_INSERT_TAIL(&curthread->mutexq, mutexp, m_qe); 1402 return (0); 1403 } 1404 1405 /* 1406 * Returns with the lock owned and on the thread's mutexq. If 1407 * the mutex is currently owned by another thread it will sleep 1408 * until it is available. 1409 */ 1410 static void 1411 get_mcontested(pthread_mutex_t mutexp) 1412 { 1413 _thread_critical_enter(curthread); 1414 1415 /* 1416 * Put this thread on the mutex's list of waiting threads. 1417 * The lock on the thread ensures atomic (as far as other 1418 * threads are concerned) setting of the thread state with 1419 * it's status on the mutex queue. 1420 */ 1421 do { 1422 mutex_queue_enq(mutexp, curthread); 1423 PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT); 1424 curthread->data.mutex = mutexp; 1425 _thread_critical_exit(curthread); 1426 _SPINUNLOCK(&mutexp->lock); 1427 _thread_suspend(curthread, NULL); 1428 1429 _SPINLOCK(&mutexp->lock); 1430 _thread_critical_enter(curthread); 1431 } while ((curthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0); 1432 1433 _thread_critical_exit(curthread); 1434 } 1435