1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by John Birrell. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 #include <stdlib.h> 35 #include <errno.h> 36 #include <string.h> 37 #include <sys/param.h> 38 #include <sys/queue.h> 39 #include <pthread.h> 40 #include "thr_private.h" 41 42 #if defined(_PTHREADS_INVARIANTS) 43 #define _MUTEX_INIT_LINK(m) do { \ 44 (m)->m_qe.tqe_prev = NULL; \ 45 (m)->m_qe.tqe_next = NULL; \ 46 } while (0) 47 #define _MUTEX_ASSERT_IS_OWNED(m) do { \ 48 if ((m)->m_qe.tqe_prev == NULL) \ 49 PANIC("mutex is not on list"); \ 50 } while (0) 51 #define _MUTEX_ASSERT_NOT_OWNED(m) do { \ 52 if (((m)->m_qe.tqe_prev != NULL) || \ 53 ((m)->m_qe.tqe_next != NULL)) \ 54 PANIC("mutex is on list"); \ 55 } while (0) 56 #else 57 #define _MUTEX_INIT_LINK(m) 58 #define _MUTEX_ASSERT_IS_OWNED(m) 59 #define _MUTEX_ASSERT_NOT_OWNED(m) 60 #endif 61 62 /* 63 * Prototypes 64 */ 65 static int get_muncontested(pthread_mutex_t, int); 66 static void get_mcontested(pthread_mutex_t); 67 static int mutex_init(pthread_mutex_t *, int); 68 static int mutex_lock_common(pthread_mutex_t *, int); 69 static inline int mutex_self_trylock(pthread_mutex_t); 70 static inline int mutex_self_lock(pthread_mutex_t); 71 static inline int mutex_unlock_common(pthread_mutex_t *, int); 72 static void mutex_priority_adjust(pthread_mutex_t); 73 static void mutex_rescan_owned (pthread_t, pthread_mutex_t); 74 static inline pthread_t mutex_queue_deq(pthread_mutex_t); 75 static inline void mutex_queue_remove(pthread_mutex_t, pthread_t); 76 static inline void mutex_queue_enq(pthread_mutex_t, pthread_t); 77 78 79 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER; 80 81 static struct pthread_mutex_attr static_mutex_attr = 82 PTHREAD_MUTEXATTR_STATIC_INITIALIZER; 83 static pthread_mutexattr_t static_mattr = &static_mutex_attr; 84 85 /* Single underscore versions provided for libc internal usage: */ 86 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 87 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 88 __weak_reference(__pthread_mutex_unlock, pthread_mutex_unlock); 89 90 /* No difference between libc and application usage of these: */ 91 __weak_reference(_pthread_mutex_init, pthread_mutex_init); 92 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 93 94 95 /* 96 * Reinitialize a private mutex; this is only used for internal mutexes. 97 */ 98 int 99 _mutex_reinit(pthread_mutex_t * mutex) 100 { 101 int ret = 0; 102 103 if (mutex == NULL) 104 ret = EINVAL; 105 else if (*mutex == PTHREAD_MUTEX_INITIALIZER) 106 ret = _pthread_mutex_init(mutex, NULL); 107 else { 108 /* 109 * Initialize the mutex structure: 110 */ 111 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT; 112 (*mutex)->m_protocol = PTHREAD_PRIO_NONE; 113 TAILQ_INIT(&(*mutex)->m_queue); 114 (*mutex)->m_owner = NULL; 115 (*mutex)->m_data.m_count = 0; 116 (*mutex)->m_flags |= MUTEX_FLAGS_INITED | MUTEX_FLAGS_PRIVATE; 117 (*mutex)->m_refcount = 0; 118 (*mutex)->m_prio = 0; 119 (*mutex)->m_saved_prio = 0; 120 _MUTEX_INIT_LINK(*mutex); 121 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock)); 122 } 123 return (ret); 124 } 125 126 int 127 _pthread_mutex_init(pthread_mutex_t * mutex, 128 const pthread_mutexattr_t * mutex_attr) 129 { 130 enum pthread_mutextype type; 131 int protocol; 132 int ceiling; 133 int flags; 134 pthread_mutex_t pmutex; 135 int ret = 0; 136 137 if (mutex == NULL) 138 ret = EINVAL; 139 140 /* Check if default mutex attributes: */ 141 if (mutex_attr == NULL || *mutex_attr == NULL) { 142 /* Default to a (error checking) POSIX mutex: */ 143 type = PTHREAD_MUTEX_ERRORCHECK; 144 protocol = PTHREAD_PRIO_NONE; 145 ceiling = PTHREAD_MAX_PRIORITY; 146 flags = 0; 147 } 148 149 /* Check mutex type: */ 150 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || 151 ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX)) 152 /* Return an invalid argument error: */ 153 ret = EINVAL; 154 155 /* Check mutex protocol: */ 156 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || 157 ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE)) 158 /* Return an invalid argument error: */ 159 ret = EINVAL; 160 161 else { 162 /* Use the requested mutex type and protocol: */ 163 type = (*mutex_attr)->m_type; 164 protocol = (*mutex_attr)->m_protocol; 165 ceiling = (*mutex_attr)->m_ceiling; 166 flags = (*mutex_attr)->m_flags; 167 } 168 169 /* Check no errors so far: */ 170 if (ret == 0) { 171 if ((pmutex = (pthread_mutex_t) 172 malloc(sizeof(struct pthread_mutex))) == NULL) 173 ret = ENOMEM; 174 else { 175 /* Set the mutex flags: */ 176 pmutex->m_flags = flags; 177 178 /* Process according to mutex type: */ 179 switch (type) { 180 /* case PTHREAD_MUTEX_DEFAULT: */ 181 case PTHREAD_MUTEX_ERRORCHECK: 182 case PTHREAD_MUTEX_NORMAL: 183 /* Nothing to do here. */ 184 break; 185 186 /* Single UNIX Spec 2 recursive mutex: */ 187 case PTHREAD_MUTEX_RECURSIVE: 188 /* Reset the mutex count: */ 189 pmutex->m_data.m_count = 0; 190 break; 191 192 /* Trap invalid mutex types: */ 193 default: 194 /* Return an invalid argument error: */ 195 ret = EINVAL; 196 break; 197 } 198 if (ret == 0) { 199 /* Initialise the rest of the mutex: */ 200 TAILQ_INIT(&pmutex->m_queue); 201 pmutex->m_flags |= MUTEX_FLAGS_INITED; 202 pmutex->m_owner = NULL; 203 pmutex->m_type = type; 204 pmutex->m_protocol = protocol; 205 pmutex->m_refcount = 0; 206 if (protocol == PTHREAD_PRIO_PROTECT) 207 pmutex->m_prio = ceiling; 208 else 209 pmutex->m_prio = 0; 210 pmutex->m_saved_prio = 0; 211 _MUTEX_INIT_LINK(pmutex); 212 memset(&pmutex->lock, 0, sizeof(pmutex->lock)); 213 *mutex = pmutex; 214 } else { 215 free(pmutex); 216 *mutex = NULL; 217 } 218 } 219 } 220 /* Return the completion status: */ 221 return (ret); 222 } 223 224 int 225 _pthread_mutex_destroy(pthread_mutex_t * mutex) 226 { 227 int ret = 0; 228 229 if (mutex == NULL || *mutex == NULL) 230 ret = EINVAL; 231 else { 232 /* Lock the mutex structure: */ 233 _SPINLOCK(&(*mutex)->lock); 234 235 /* 236 * Check to see if this mutex is in use: 237 */ 238 if (((*mutex)->m_owner != NULL) || 239 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || 240 ((*mutex)->m_refcount != 0)) { 241 ret = EBUSY; 242 243 /* Unlock the mutex structure: */ 244 _SPINUNLOCK(&(*mutex)->lock); 245 } 246 else { 247 /* 248 * Free the memory allocated for the mutex 249 * structure: 250 */ 251 _MUTEX_ASSERT_NOT_OWNED(*mutex); 252 253 /* Unlock the mutex structure: */ 254 _SPINUNLOCK(&(*mutex)->lock); 255 256 free(*mutex); 257 258 /* 259 * Leave the caller's pointer NULL now that 260 * the mutex has been destroyed: 261 */ 262 *mutex = NULL; 263 } 264 } 265 266 /* Return the completion status: */ 267 return (ret); 268 } 269 270 static int 271 mutex_init(pthread_mutex_t *mutex, int private) 272 { 273 pthread_mutexattr_t *pma; 274 int error; 275 276 error = 0; 277 pma = private ? &static_mattr : NULL; 278 _SPINLOCK(&static_init_lock); 279 if (*mutex == PTHREAD_MUTEX_INITIALIZER) 280 error = _pthread_mutex_init(mutex, pma); 281 _SPINUNLOCK(&static_init_lock); 282 return (error); 283 } 284 285 int 286 __pthread_mutex_trylock(pthread_mutex_t *mutex) 287 { 288 int ret = 0; 289 290 if (mutex == NULL) 291 ret = EINVAL; 292 293 /* 294 * If the mutex is statically initialized, perform the dynamic 295 * initialization: 296 */ 297 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 298 (ret = mutex_init(mutex, 0)) == 0) 299 ret = mutex_lock_common(mutex, 1); 300 301 return (ret); 302 } 303 304 int 305 _pthread_mutex_trylock(pthread_mutex_t *mutex) 306 { 307 int ret = 0; 308 309 if (mutex == NULL) 310 ret = EINVAL; 311 312 /* 313 * If the mutex is statically initialized, perform the dynamic 314 * initialization marking the mutex private (delete safe): 315 */ 316 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 317 (ret = mutex_init(mutex, 1)) == 0) 318 ret = mutex_lock_common(mutex, 1); 319 320 return (ret); 321 } 322 323 static int 324 mutex_lock_common(pthread_mutex_t * mutex, int nonblock) 325 { 326 int ret, error, inCancel; 327 328 ret = error = inCancel = 0; 329 330 PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL), 331 "Uninitialized mutex in mutex_lock_common"); 332 333 /* 334 * Enter a loop waiting to become the mutex owner. We need a 335 * loop in case the waiting thread is interrupted by a signal 336 * to execute a signal handler. It is not (currently) possible 337 * to remain in the waiting queue while running a handler. 338 * Instead, the thread is interrupted and backed out of the 339 * waiting queue prior to executing the signal handler. 340 */ 341 do { 342 /* 343 * Defer signals to protect the scheduling queues from 344 * access by the signal handler: 345 */ 346 /* _thread_kern_sig_defer(); */ 347 348 /* Lock the mutex structure: */ 349 _SPINLOCK(&(*mutex)->lock); 350 351 /* 352 * If the mutex was statically allocated, properly 353 * initialize the tail queue. 354 */ 355 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { 356 TAILQ_INIT(&(*mutex)->m_queue); 357 (*mutex)->m_flags |= MUTEX_FLAGS_INITED; 358 _MUTEX_INIT_LINK(*mutex); 359 } 360 361 /* Process according to mutex type: */ 362 switch ((*mutex)->m_protocol) { 363 /* Default POSIX mutex: */ 364 case PTHREAD_PRIO_NONE: 365 if ((error = get_muncontested(*mutex, nonblock)) == -1) 366 if (nonblock) { 367 ret = EBUSY; 368 break; 369 } else { 370 get_mcontested(*mutex); 371 } 372 else 373 ret = error; 374 break; 375 376 /* POSIX priority inheritence mutex: */ 377 case PTHREAD_PRIO_INHERIT: 378 if ((error = get_muncontested(*mutex, nonblock)) == 0) { 379 /* Track number of priority mutexes owned: */ 380 curthread->priority_mutex_count++; 381 382 /* 383 * The mutex takes on attributes of the 384 * running thread when there are no waiters. 385 */ 386 (*mutex)->m_prio = curthread->active_priority; 387 (*mutex)->m_saved_prio = 388 curthread->inherited_priority; 389 curthread->inherited_priority = 390 (*mutex)->m_prio; 391 } else if (error == -1) { 392 if (nonblock) { 393 ret = EBUSY; 394 break; 395 } else { 396 get_mcontested(*mutex); 397 } 398 399 if (curthread->active_priority > 400 (*mutex)->m_prio) 401 /* Adjust priorities: */ 402 mutex_priority_adjust(*mutex); 403 } else { 404 ret = error; 405 } 406 break; 407 408 /* POSIX priority protection mutex: */ 409 case PTHREAD_PRIO_PROTECT: 410 /* Check for a priority ceiling violation: */ 411 if (curthread->active_priority > (*mutex)->m_prio) 412 ret = EINVAL; 413 414 if ((error = get_muncontested(*mutex, nonblock)) == 0) { 415 /* Track number of priority mutexes owned: */ 416 curthread->priority_mutex_count++; 417 418 /* 419 * The running thread inherits the ceiling 420 * priority of the mutex and executes at that 421 * priority: 422 */ 423 curthread->active_priority = (*mutex)->m_prio; 424 (*mutex)->m_saved_prio = 425 curthread->inherited_priority; 426 curthread->inherited_priority = 427 (*mutex)->m_prio; 428 } else if (error == -1) { 429 if (nonblock) { 430 ret = EBUSY; 431 break; 432 } 433 434 /* Clear any previous error: */ 435 curthread->error = 0; 436 437 get_mcontested(*mutex); 438 439 /* 440 * The threads priority may have changed while 441 * waiting for the mutex causing a ceiling 442 * violation. 443 */ 444 ret = curthread->error; 445 curthread->error = 0; 446 } else { 447 ret = error; 448 } 449 break; 450 451 /* Trap invalid mutex types: */ 452 default: 453 /* Return an invalid argument error: */ 454 ret = EINVAL; 455 break; 456 } 457 458 /* 459 * Check to see if this thread was interrupted and 460 * is still in the mutex queue of waiting threads: 461 */ 462 if (curthread->cancelflags & PTHREAD_CANCELLING) { 463 if (!nonblock) 464 mutex_queue_remove(*mutex, curthread); 465 inCancel=1; 466 } 467 468 /* Unlock the mutex structure: */ 469 _SPINUNLOCK(&(*mutex)->lock); 470 471 /* 472 * Undefer and handle pending signals, yielding if 473 * necessary: 474 */ 475 /* _thread_kern_sig_undefer(); */ 476 if (inCancel) { 477 pthread_testcancel(); 478 PANIC("Canceled thread came back.\n"); 479 } 480 } while ((*mutex)->m_owner != curthread && ret == 0); 481 482 /* Return the completion status: */ 483 return (ret); 484 } 485 486 int 487 __pthread_mutex_lock(pthread_mutex_t *mutex) 488 { 489 int ret = 0; 490 491 if (_thread_initial == NULL) 492 _thread_init(); 493 494 if (mutex == NULL) 495 ret = EINVAL; 496 497 /* 498 * If the mutex is statically initialized, perform the dynamic 499 * initialization: 500 */ 501 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 502 ((ret = mutex_init(mutex, 0)) == 0)) 503 ret = mutex_lock_common(mutex, 0); 504 505 return (ret); 506 } 507 508 /* 509 * Libc internal. 510 */ 511 int 512 _pthread_mutex_lock(pthread_mutex_t *mutex) 513 { 514 int ret = 0; 515 516 if (_thread_initial == NULL) 517 _thread_init(); 518 519 _thread_sigblock(); 520 521 if (mutex == NULL) 522 ret = EINVAL; 523 524 /* 525 * If the mutex is statically initialized, perform the dynamic 526 * initialization marking it private (delete safe): 527 */ 528 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 529 ((ret = mutex_init(mutex, 1)) == 0)) 530 ret = mutex_lock_common(mutex, 0); 531 532 if (ret != 0) 533 _thread_sigunblock(); 534 535 return (ret); 536 } 537 538 int 539 __pthread_mutex_unlock(pthread_mutex_t * mutex) 540 { 541 return (mutex_unlock_common(mutex, /* add reference */ 0)); 542 } 543 544 /* 545 * Libc internal 546 */ 547 int 548 _pthread_mutex_unlock(pthread_mutex_t * mutex) 549 { 550 int error; 551 if ((error = mutex_unlock_common(mutex, /* add reference */ 0)) == 0) 552 _thread_sigunblock(); 553 return (error); 554 } 555 556 int 557 _mutex_cv_unlock(pthread_mutex_t * mutex) 558 { 559 return (mutex_unlock_common(mutex, /* add reference */ 1)); 560 } 561 562 int 563 _mutex_cv_lock(pthread_mutex_t * mutex) 564 { 565 int ret; 566 if ((ret = _pthread_mutex_lock(mutex)) == 0) 567 (*mutex)->m_refcount--; 568 return (ret); 569 } 570 571 static inline int 572 mutex_self_trylock(pthread_mutex_t mutex) 573 { 574 int ret = 0; 575 576 switch (mutex->m_type) { 577 578 /* case PTHREAD_MUTEX_DEFAULT: */ 579 case PTHREAD_MUTEX_ERRORCHECK: 580 case PTHREAD_MUTEX_NORMAL: 581 /* 582 * POSIX specifies that mutexes should return EDEADLK if a 583 * recursive lock is detected. 584 */ 585 ret = EBUSY; 586 break; 587 588 case PTHREAD_MUTEX_RECURSIVE: 589 /* Increment the lock count: */ 590 mutex->m_data.m_count++; 591 break; 592 593 default: 594 /* Trap invalid mutex types; */ 595 ret = EINVAL; 596 } 597 598 return (ret); 599 } 600 601 static inline int 602 mutex_self_lock(pthread_mutex_t mutex) 603 { 604 int ret = 0; 605 606 switch (mutex->m_type) { 607 /* case PTHREAD_MUTEX_DEFAULT: */ 608 case PTHREAD_MUTEX_ERRORCHECK: 609 /* 610 * POSIX specifies that mutexes should return EDEADLK if a 611 * recursive lock is detected. 612 */ 613 ret = EDEADLK; 614 break; 615 616 case PTHREAD_MUTEX_NORMAL: 617 /* 618 * What SS2 define as a 'normal' mutex. Intentionally 619 * deadlock on attempts to get a lock you already own. 620 */ 621 /* XXX Sched lock. */ 622 PTHREAD_SET_STATE(curthread, PS_DEADLOCK); 623 _SPINUNLOCK(&(mutex)->lock); 624 _thread_suspend(curthread, NULL); 625 PANIC("Shouldn't resume here?\n"); 626 break; 627 628 case PTHREAD_MUTEX_RECURSIVE: 629 /* Increment the lock count: */ 630 mutex->m_data.m_count++; 631 break; 632 633 default: 634 /* Trap invalid mutex types; */ 635 ret = EINVAL; 636 } 637 638 return (ret); 639 } 640 641 static inline int 642 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference) 643 { 644 int ret = 0; 645 646 if (mutex == NULL || *mutex == NULL) { 647 ret = EINVAL; 648 } else { 649 /* 650 * Defer signals to protect the scheduling queues from 651 * access by the signal handler: 652 */ 653 /* _thread_kern_sig_defer(); */ 654 655 /* Lock the mutex structure: */ 656 _SPINLOCK(&(*mutex)->lock); 657 658 /* Process according to mutex type: */ 659 switch ((*mutex)->m_protocol) { 660 /* Default POSIX mutex: */ 661 case PTHREAD_PRIO_NONE: 662 /* 663 * Check if the running thread is not the owner of the 664 * mutex: 665 */ 666 if ((*mutex)->m_owner != curthread) { 667 /* 668 * Return an invalid argument error for no 669 * owner and a permission error otherwise: 670 */ 671 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; 672 } 673 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && 674 ((*mutex)->m_data.m_count > 0)) { 675 /* Decrement the count: */ 676 (*mutex)->m_data.m_count--; 677 } else { 678 /* 679 * Clear the count in case this is recursive 680 * mutex. 681 */ 682 (*mutex)->m_data.m_count = 0; 683 684 /* Remove the mutex from the threads queue. */ 685 _MUTEX_ASSERT_IS_OWNED(*mutex); 686 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, 687 (*mutex), m_qe); 688 _MUTEX_INIT_LINK(*mutex); 689 690 /* 691 * Get the next thread from the queue of 692 * threads waiting on the mutex. The deq 693 * function will have already locked it 694 * for us. 695 */ 696 if (((*mutex)->m_owner = 697 mutex_queue_deq(*mutex)) != NULL) { 698 /* Make the new owner runnable: */ 699 /* XXXTHR sched lock. */ 700 PTHREAD_NEW_STATE((*mutex)->m_owner, 701 PS_RUNNING); 702 703 /* 704 * Add the mutex to the threads list of 705 * owned mutexes: 706 */ 707 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, 708 (*mutex), m_qe); 709 710 /* 711 * The owner is no longer waiting for 712 * this mutex: 713 */ 714 (*mutex)->m_owner->data.mutex = NULL; 715 _thread_critical_exit((*mutex)->m_owner); 716 } 717 } 718 break; 719 720 /* POSIX priority inheritence mutex: */ 721 case PTHREAD_PRIO_INHERIT: 722 /* 723 * Check if the running thread is not the owner of the 724 * mutex: 725 */ 726 if ((*mutex)->m_owner != curthread) { 727 /* 728 * Return an invalid argument error for no 729 * owner and a permission error otherwise: 730 */ 731 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; 732 } 733 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && 734 ((*mutex)->m_data.m_count > 0)) { 735 /* Decrement the count: */ 736 (*mutex)->m_data.m_count--; 737 } else { 738 /* 739 * Clear the count in case this is recursive 740 * mutex. 741 */ 742 (*mutex)->m_data.m_count = 0; 743 744 /* 745 * Restore the threads inherited priority and 746 * recompute the active priority (being careful 747 * not to override changes in the threads base 748 * priority subsequent to locking the mutex). 749 */ 750 curthread->inherited_priority = 751 (*mutex)->m_saved_prio; 752 curthread->active_priority = 753 MAX(curthread->inherited_priority, 754 curthread->base_priority); 755 756 /* 757 * This thread now owns one less priority mutex. 758 */ 759 curthread->priority_mutex_count--; 760 761 /* Remove the mutex from the threads queue. */ 762 _MUTEX_ASSERT_IS_OWNED(*mutex); 763 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, 764 (*mutex), m_qe); 765 _MUTEX_INIT_LINK(*mutex); 766 767 /* 768 * Get the next thread from the queue of threads 769 * waiting on the mutex. It will already be 770 * locked for us. 771 */ 772 if (((*mutex)->m_owner = 773 mutex_queue_deq(*mutex)) == NULL) 774 /* This mutex has no priority. */ 775 (*mutex)->m_prio = 0; 776 else { 777 /* 778 * Track number of priority mutexes owned: 779 */ 780 (*mutex)->m_owner->priority_mutex_count++; 781 782 /* 783 * Add the mutex to the threads list 784 * of owned mutexes: 785 */ 786 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, 787 (*mutex), m_qe); 788 789 /* 790 * The owner is no longer waiting for 791 * this mutex: 792 */ 793 (*mutex)->m_owner->data.mutex = NULL; 794 795 /* 796 * Set the priority of the mutex. Since 797 * our waiting threads are in descending 798 * priority order, the priority of the 799 * mutex becomes the active priority of 800 * the thread we just dequeued. 801 */ 802 (*mutex)->m_prio = 803 (*mutex)->m_owner->active_priority; 804 805 /* 806 * Save the owning threads inherited 807 * priority: 808 */ 809 (*mutex)->m_saved_prio = 810 (*mutex)->m_owner->inherited_priority; 811 812 /* 813 * The owning threads inherited priority 814 * now becomes his active priority (the 815 * priority of the mutex). 816 */ 817 (*mutex)->m_owner->inherited_priority = 818 (*mutex)->m_prio; 819 820 /* 821 * Make the new owner runnable: 822 */ 823 /* XXXTHR sched lock. */ 824 PTHREAD_NEW_STATE((*mutex)->m_owner, 825 PS_RUNNING); 826 827 _thread_critical_exit((*mutex)->m_owner); 828 } 829 } 830 break; 831 832 /* POSIX priority ceiling mutex: */ 833 case PTHREAD_PRIO_PROTECT: 834 /* 835 * Check if the running thread is not the owner of the 836 * mutex: 837 */ 838 if ((*mutex)->m_owner != curthread) { 839 /* 840 * Return an invalid argument error for no 841 * owner and a permission error otherwise: 842 */ 843 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; 844 } 845 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && 846 ((*mutex)->m_data.m_count > 0)) { 847 /* Decrement the count: */ 848 (*mutex)->m_data.m_count--; 849 } else { 850 /* 851 * Clear the count in case this is recursive 852 * mutex. 853 */ 854 (*mutex)->m_data.m_count = 0; 855 856 /* 857 * Restore the threads inherited priority and 858 * recompute the active priority (being careful 859 * not to override changes in the threads base 860 * priority subsequent to locking the mutex). 861 */ 862 curthread->inherited_priority = 863 (*mutex)->m_saved_prio; 864 curthread->active_priority = 865 MAX(curthread->inherited_priority, 866 curthread->base_priority); 867 868 /* 869 * This thread now owns one less priority mutex. 870 */ 871 curthread->priority_mutex_count--; 872 873 /* Remove the mutex from the threads queue. */ 874 _MUTEX_ASSERT_IS_OWNED(*mutex); 875 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, 876 (*mutex), m_qe); 877 _MUTEX_INIT_LINK(*mutex); 878 879 /* 880 * Enter a loop to find a waiting thread whose 881 * active priority will not cause a ceiling 882 * violation. It will already be locked for us. 883 */ 884 while ((((*mutex)->m_owner = 885 mutex_queue_deq(*mutex)) != NULL) && 886 ((*mutex)->m_owner->active_priority > 887 (*mutex)->m_prio)) { 888 /* 889 * Either the mutex ceiling priority 890 * been lowered and/or this threads 891 * priority has been raised subsequent 892 * to this thread being queued on the 893 * waiting list. 894 */ 895 (*mutex)->m_owner->error = EINVAL; 896 PTHREAD_NEW_STATE((*mutex)->m_owner, 897 PS_RUNNING); 898 /* 899 * The thread is no longer waiting for 900 * this mutex: 901 */ 902 (*mutex)->m_owner->data.mutex = NULL; 903 904 _thread_critical_exit((*mutex)->m_owner); 905 } 906 907 /* Check for a new owner: */ 908 if ((*mutex)->m_owner != NULL) { 909 /* 910 * Track number of priority mutexes owned: 911 */ 912 (*mutex)->m_owner->priority_mutex_count++; 913 914 /* 915 * Add the mutex to the threads list 916 * of owned mutexes: 917 */ 918 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, 919 (*mutex), m_qe); 920 921 /* 922 * The owner is no longer waiting for 923 * this mutex: 924 */ 925 (*mutex)->m_owner->data.mutex = NULL; 926 927 /* 928 * Save the owning threads inherited 929 * priority: 930 */ 931 (*mutex)->m_saved_prio = 932 (*mutex)->m_owner->inherited_priority; 933 934 /* 935 * The owning thread inherits the 936 * ceiling priority of the mutex and 937 * executes at that priority: 938 */ 939 (*mutex)->m_owner->inherited_priority = 940 (*mutex)->m_prio; 941 (*mutex)->m_owner->active_priority = 942 (*mutex)->m_prio; 943 944 /* 945 * Make the new owner runnable: 946 */ 947 /* XXXTHR sched lock. */ 948 PTHREAD_NEW_STATE((*mutex)->m_owner, 949 PS_RUNNING); 950 951 _thread_critical_exit((*mutex)->m_owner); 952 } 953 } 954 break; 955 956 /* Trap invalid mutex types: */ 957 default: 958 /* Return an invalid argument error: */ 959 ret = EINVAL; 960 break; 961 } 962 963 if ((ret == 0) && (add_reference != 0)) { 964 /* Increment the reference count: */ 965 (*mutex)->m_refcount++; 966 } 967 968 /* Unlock the mutex structure: */ 969 _SPINUNLOCK(&(*mutex)->lock); 970 971 /* 972 * Undefer and handle pending signals, yielding if 973 * necessary: 974 */ 975 /* _thread_kern_sig_undefer(); */ 976 } 977 978 /* Return the completion status: */ 979 return (ret); 980 } 981 982 983 /* 984 * This function is called when a change in base priority occurs for 985 * a thread that is holding or waiting for a priority protection or 986 * inheritence mutex. A change in a threads base priority can effect 987 * changes to active priorities of other threads and to the ordering 988 * of mutex locking by waiting threads. 989 * 990 * This must be called while thread scheduling is deferred. 991 */ 992 void 993 _mutex_notify_priochange(pthread_t pthread) 994 { 995 /* Adjust the priorites of any owned priority mutexes: */ 996 if (pthread->priority_mutex_count > 0) { 997 /* 998 * Rescan the mutexes owned by this thread and correct 999 * their priorities to account for this threads change 1000 * in priority. This has the side effect of changing 1001 * the threads active priority. 1002 */ 1003 mutex_rescan_owned(pthread, /* rescan all owned */ NULL); 1004 } 1005 1006 /* 1007 * If this thread is waiting on a priority inheritence mutex, 1008 * check for priority adjustments. A change in priority can 1009 * also effect a ceiling violation(*) for a thread waiting on 1010 * a priority protection mutex; we don't perform the check here 1011 * as it is done in pthread_mutex_unlock. 1012 * 1013 * (*) It should be noted that a priority change to a thread 1014 * _after_ taking and owning a priority ceiling mutex 1015 * does not affect ownership of that mutex; the ceiling 1016 * priority is only checked before mutex ownership occurs. 1017 */ 1018 if (pthread->state == PS_MUTEX_WAIT) { 1019 /* Lock the mutex structure: */ 1020 _SPINLOCK(&pthread->data.mutex->lock); 1021 1022 /* 1023 * Check to make sure this thread is still in the same state 1024 * (the spinlock above can yield the CPU to another thread): 1025 */ 1026 if (pthread->state == PS_MUTEX_WAIT) { 1027 /* 1028 * Remove and reinsert this thread into the list of 1029 * waiting threads to preserve decreasing priority 1030 * order. 1031 */ 1032 mutex_queue_remove(pthread->data.mutex, pthread); 1033 mutex_queue_enq(pthread->data.mutex, pthread); 1034 1035 if (pthread->data.mutex->m_protocol == 1036 PTHREAD_PRIO_INHERIT) { 1037 /* Adjust priorities: */ 1038 mutex_priority_adjust(pthread->data.mutex); 1039 } 1040 } 1041 1042 /* Unlock the mutex structure: */ 1043 _SPINUNLOCK(&pthread->data.mutex->lock); 1044 } 1045 } 1046 1047 /* 1048 * Called when a new thread is added to the mutex waiting queue or 1049 * when a threads priority changes that is already in the mutex 1050 * waiting queue. 1051 */ 1052 static void 1053 mutex_priority_adjust(pthread_mutex_t mutex) 1054 { 1055 pthread_t pthread_next, pthread = mutex->m_owner; 1056 int temp_prio; 1057 pthread_mutex_t m = mutex; 1058 1059 /* 1060 * Calculate the mutex priority as the maximum of the highest 1061 * active priority of any waiting threads and the owning threads 1062 * active priority(*). 1063 * 1064 * (*) Because the owning threads current active priority may 1065 * reflect priority inherited from this mutex (and the mutex 1066 * priority may have changed) we must recalculate the active 1067 * priority based on the threads saved inherited priority 1068 * and its base priority. 1069 */ 1070 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */ 1071 temp_prio = MAX(pthread_next->active_priority, 1072 MAX(m->m_saved_prio, pthread->base_priority)); 1073 1074 /* See if this mutex really needs adjusting: */ 1075 if (temp_prio == m->m_prio) 1076 /* No need to propagate the priority: */ 1077 return; 1078 1079 /* Set new priority of the mutex: */ 1080 m->m_prio = temp_prio; 1081 1082 while (m != NULL) { 1083 /* 1084 * Save the threads priority before rescanning the 1085 * owned mutexes: 1086 */ 1087 temp_prio = pthread->active_priority; 1088 1089 /* 1090 * Fix the priorities for all the mutexes this thread has 1091 * locked since taking this mutex. This also has a 1092 * potential side-effect of changing the threads priority. 1093 */ 1094 mutex_rescan_owned(pthread, m); 1095 1096 /* 1097 * If the thread is currently waiting on a mutex, check 1098 * to see if the threads new priority has affected the 1099 * priority of the mutex. 1100 */ 1101 if ((temp_prio != pthread->active_priority) && 1102 (pthread->state == PS_MUTEX_WAIT) && 1103 (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) { 1104 /* Grab the mutex this thread is waiting on: */ 1105 m = pthread->data.mutex; 1106 1107 /* 1108 * The priority for this thread has changed. Remove 1109 * and reinsert this thread into the list of waiting 1110 * threads to preserve decreasing priority order. 1111 */ 1112 mutex_queue_remove(m, pthread); 1113 mutex_queue_enq(m, pthread); 1114 1115 /* Grab the waiting thread with highest priority: */ 1116 pthread_next = TAILQ_FIRST(&m->m_queue); 1117 1118 /* 1119 * Calculate the mutex priority as the maximum of the 1120 * highest active priority of any waiting threads and 1121 * the owning threads active priority. 1122 */ 1123 temp_prio = MAX(pthread_next->active_priority, 1124 MAX(m->m_saved_prio, m->m_owner->base_priority)); 1125 1126 if (temp_prio != m->m_prio) { 1127 /* 1128 * The priority needs to be propagated to the 1129 * mutex this thread is waiting on and up to 1130 * the owner of that mutex. 1131 */ 1132 m->m_prio = temp_prio; 1133 pthread = m->m_owner; 1134 } 1135 else 1136 /* We're done: */ 1137 m = NULL; 1138 1139 } 1140 else 1141 /* We're done: */ 1142 m = NULL; 1143 } 1144 } 1145 1146 static void 1147 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex) 1148 { 1149 int active_prio, inherited_prio; 1150 pthread_mutex_t m; 1151 pthread_t pthread_next; 1152 1153 /* 1154 * Start walking the mutexes the thread has taken since 1155 * taking this mutex. 1156 */ 1157 if (mutex == NULL) { 1158 /* 1159 * A null mutex means start at the beginning of the owned 1160 * mutex list. 1161 */ 1162 m = TAILQ_FIRST(&pthread->mutexq); 1163 1164 /* There is no inherited priority yet. */ 1165 inherited_prio = 0; 1166 } 1167 else { 1168 /* 1169 * The caller wants to start after a specific mutex. It 1170 * is assumed that this mutex is a priority inheritence 1171 * mutex and that its priority has been correctly 1172 * calculated. 1173 */ 1174 m = TAILQ_NEXT(mutex, m_qe); 1175 1176 /* Start inheriting priority from the specified mutex. */ 1177 inherited_prio = mutex->m_prio; 1178 } 1179 active_prio = MAX(inherited_prio, pthread->base_priority); 1180 1181 while (m != NULL) { 1182 /* 1183 * We only want to deal with priority inheritence 1184 * mutexes. This might be optimized by only placing 1185 * priority inheritence mutexes into the owned mutex 1186 * list, but it may prove to be useful having all 1187 * owned mutexes in this list. Consider a thread 1188 * exiting while holding mutexes... 1189 */ 1190 if (m->m_protocol == PTHREAD_PRIO_INHERIT) { 1191 /* 1192 * Fix the owners saved (inherited) priority to 1193 * reflect the priority of the previous mutex. 1194 */ 1195 m->m_saved_prio = inherited_prio; 1196 1197 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL) 1198 /* Recalculate the priority of the mutex: */ 1199 m->m_prio = MAX(active_prio, 1200 pthread_next->active_priority); 1201 else 1202 m->m_prio = active_prio; 1203 1204 /* Recalculate new inherited and active priorities: */ 1205 inherited_prio = m->m_prio; 1206 active_prio = MAX(m->m_prio, pthread->base_priority); 1207 } 1208 1209 /* Advance to the next mutex owned by this thread: */ 1210 m = TAILQ_NEXT(m, m_qe); 1211 } 1212 1213 /* 1214 * Fix the threads inherited priority and recalculate its 1215 * active priority. 1216 */ 1217 pthread->inherited_priority = inherited_prio; 1218 active_prio = MAX(inherited_prio, pthread->base_priority); 1219 1220 if (active_prio != pthread->active_priority) { 1221 #if 0 1222 /* 1223 * If this thread is in the priority queue, it must be 1224 * removed and reinserted for its new priority. 1225 */ 1226 if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) { 1227 /* 1228 * Remove the thread from the priority queue 1229 * before changing its priority: 1230 */ 1231 PTHREAD_PRIOQ_REMOVE(pthread); 1232 1233 /* 1234 * POSIX states that if the priority is being 1235 * lowered, the thread must be inserted at the 1236 * head of the queue for its priority if it owns 1237 * any priority protection or inheritence mutexes. 1238 */ 1239 if ((active_prio < pthread->active_priority) && 1240 (pthread->priority_mutex_count > 0)) { 1241 /* Set the new active priority. */ 1242 pthread->active_priority = active_prio; 1243 1244 PTHREAD_PRIOQ_INSERT_HEAD(pthread); 1245 } 1246 else { 1247 /* Set the new active priority. */ 1248 pthread->active_priority = active_prio; 1249 1250 PTHREAD_PRIOQ_INSERT_TAIL(pthread); 1251 } 1252 } 1253 else { 1254 /* Set the new active priority. */ 1255 pthread->active_priority = active_prio; 1256 } 1257 #endif 1258 pthread->active_priority = active_prio; 1259 } 1260 } 1261 1262 void 1263 _mutex_unlock_private(pthread_t pthread) 1264 { 1265 struct pthread_mutex *m, *m_next; 1266 1267 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { 1268 m_next = TAILQ_NEXT(m, m_qe); 1269 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 1270 _pthread_mutex_unlock(&m); 1271 } 1272 } 1273 1274 void 1275 _mutex_lock_backout(pthread_t pthread) 1276 { 1277 struct pthread_mutex *mutex; 1278 1279 /* 1280 * Defer signals to protect the scheduling queues from 1281 * access by the signal handler: 1282 */ 1283 /* _thread_kern_sig_defer();*/ 1284 1285 /* XXX - Necessary to obey lock order */ 1286 THR_LOCK(&pthread->lock); 1287 mutex = pthread->data.mutex; 1288 THR_UNLOCK(&pthread->lock); 1289 1290 _SPINLOCK(&mutex->lock); 1291 1292 _thread_critical_enter(pthread); 1293 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { 1294 1295 mutex_queue_remove(mutex, pthread); 1296 1297 /* This thread is no longer waiting for the mutex: */ 1298 pthread->data.mutex = NULL; 1299 1300 } 1301 /* 1302 * Undefer and handle pending signals, yielding if 1303 * necessary: 1304 */ 1305 /* _thread_kern_sig_undefer(); */ 1306 1307 _thread_critical_exit(pthread); 1308 _SPINUNLOCK(&mutex->lock); 1309 } 1310 1311 /* 1312 * Dequeue a waiting thread from the head of a mutex queue in descending 1313 * priority order. This funtion will return with the thread locked. 1314 */ 1315 static inline pthread_t 1316 mutex_queue_deq(pthread_mutex_t mutex) 1317 { 1318 pthread_t pthread; 1319 1320 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { 1321 _thread_critical_enter(pthread); 1322 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1323 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; 1324 1325 /* 1326 * Only exit the loop if the thread hasn't been 1327 * cancelled. 1328 */ 1329 if ((pthread->cancelflags & PTHREAD_CANCELLING) == 0 && 1330 pthread->state == PS_MUTEX_WAIT) 1331 break; 1332 else 1333 _thread_critical_exit(pthread); 1334 } 1335 1336 return (pthread); 1337 } 1338 1339 /* 1340 * Remove a waiting thread from a mutex queue in descending priority order. 1341 */ 1342 static inline void 1343 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) 1344 { 1345 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { 1346 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1347 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; 1348 } 1349 } 1350 1351 /* 1352 * Enqueue a waiting thread to a queue in descending priority order. 1353 */ 1354 static inline void 1355 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) 1356 { 1357 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); 1358 char *name; 1359 1360 name = pthread->name ? pthread->name : "unknown"; 1361 if ((pthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0) 1362 _thread_printf(2, "Thread (%s:%u) already on condq\n", 1363 pthread->name, pthread->uniqueid); 1364 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) 1365 _thread_printf(2, "Thread (%s:%u) already on mutexq\n", 1366 pthread->name, pthread->uniqueid); 1367 PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread); 1368 /* 1369 * For the common case of all threads having equal priority, 1370 * we perform a quick check against the priority of the thread 1371 * at the tail of the queue. 1372 */ 1373 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) 1374 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe); 1375 else { 1376 tid = TAILQ_FIRST(&mutex->m_queue); 1377 while (pthread->active_priority <= tid->active_priority) 1378 tid = TAILQ_NEXT(tid, sqe); 1379 TAILQ_INSERT_BEFORE(tid, pthread, sqe); 1380 } 1381 pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ; 1382 } 1383 1384 /* 1385 * Returns with the lock owned and on the threads mutexq if 1386 * it is currently unowned. Returns 1, otherwise. 1387 */ 1388 static int 1389 get_muncontested(pthread_mutex_t mutexp, int nonblock) 1390 { 1391 if (mutexp->m_owner != NULL && mutexp->m_owner != curthread) { 1392 return (-1); 1393 } else if (mutexp->m_owner == curthread) { 1394 if (nonblock) 1395 return (mutex_self_trylock(mutexp)); 1396 else 1397 return (mutex_self_lock(mutexp)); 1398 } 1399 1400 /* 1401 * The mutex belongs to this thread now. Mark it as 1402 * such. Add it to the list of mutexes owned by this 1403 * thread. 1404 */ 1405 mutexp->m_owner = curthread; 1406 _MUTEX_ASSERT_NOT_OWNED(mutexp); 1407 TAILQ_INSERT_TAIL(&curthread->mutexq, mutexp, m_qe); 1408 return (0); 1409 } 1410 1411 /* 1412 * Returns with the lock owned and on the thread's mutexq. If 1413 * the mutex is currently owned by another thread it will sleep 1414 * until it is available. 1415 */ 1416 static void 1417 get_mcontested(pthread_mutex_t mutexp) 1418 { 1419 int error; 1420 1421 _thread_critical_enter(curthread); 1422 1423 /* 1424 * Put this thread on the mutex's list of waiting threads. 1425 * The lock on the thread ensures atomic (as far as other 1426 * threads are concerned) setting of the thread state with 1427 * it's status on the mutex queue. 1428 */ 1429 mutex_queue_enq(mutexp, curthread); 1430 do { 1431 PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT); 1432 curthread->data.mutex = mutexp; 1433 _thread_critical_exit(curthread); 1434 _SPINUNLOCK(&mutexp->lock); 1435 error = _thread_suspend(curthread, NULL); 1436 if (error != 0 && error != EAGAIN && error != EINTR) 1437 PANIC("Cannot suspend on mutex."); 1438 1439 _SPINLOCK(&mutexp->lock); 1440 _thread_critical_enter(curthread); 1441 } while ((curthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0); 1442 1443 _thread_critical_exit(curthread); 1444 } 1445