1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by John Birrell. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 #include <stdlib.h> 35 #include <errno.h> 36 #include <string.h> 37 #include <sys/param.h> 38 #include <sys/queue.h> 39 #include <pthread.h> 40 #include "thr_private.h" 41 42 #if defined(_PTHREADS_INVARIANTS) 43 #define _MUTEX_INIT_LINK(m) do { \ 44 (m)->m_qe.tqe_prev = NULL; \ 45 (m)->m_qe.tqe_next = NULL; \ 46 } while (0) 47 #define _MUTEX_ASSERT_IS_OWNED(m) do { \ 48 if ((m)->m_qe.tqe_prev == NULL) \ 49 PANIC("mutex is not on list"); \ 50 } while (0) 51 #define _MUTEX_ASSERT_NOT_OWNED(m) do { \ 52 if (((m)->m_qe.tqe_prev != NULL) || \ 53 ((m)->m_qe.tqe_next != NULL)) \ 54 PANIC("mutex is on list"); \ 55 } while (0) 56 #else 57 #define _MUTEX_INIT_LINK(m) 58 #define _MUTEX_ASSERT_IS_OWNED(m) 59 #define _MUTEX_ASSERT_NOT_OWNED(m) 60 #endif 61 62 /* 63 * Prototypes 64 */ 65 static int get_muncontested(pthread_mutex_t, int); 66 static void get_mcontested(pthread_mutex_t); 67 static int mutex_init(pthread_mutex_t *, int); 68 static int mutex_lock_common(pthread_mutex_t *, int); 69 static inline int mutex_self_trylock(pthread_mutex_t); 70 static inline int mutex_self_lock(pthread_mutex_t); 71 static inline int mutex_unlock_common(pthread_mutex_t *, int); 72 static void mutex_priority_adjust(pthread_mutex_t); 73 static void mutex_rescan_owned (pthread_t, pthread_mutex_t); 74 static inline pthread_t mutex_queue_deq(pthread_mutex_t); 75 static inline void mutex_queue_remove(pthread_mutex_t, pthread_t); 76 static inline void mutex_queue_enq(pthread_mutex_t, pthread_t); 77 78 79 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER; 80 81 static struct pthread_mutex_attr static_mutex_attr = 82 PTHREAD_MUTEXATTR_STATIC_INITIALIZER; 83 static pthread_mutexattr_t static_mattr = &static_mutex_attr; 84 85 /* Single underscore versions provided for libc internal usage: */ 86 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 87 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 88 __weak_reference(__pthread_mutex_unlock, pthread_mutex_unlock); 89 90 /* No difference between libc and application usage of these: */ 91 __weak_reference(_pthread_mutex_init, pthread_mutex_init); 92 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 93 94 95 /* 96 * Reinitialize a private mutex; this is only used for internal mutexes. 97 */ 98 int 99 _mutex_reinit(pthread_mutex_t * mutex) 100 { 101 int ret = 0; 102 103 if (mutex == NULL) 104 ret = EINVAL; 105 else if (*mutex == PTHREAD_MUTEX_INITIALIZER) 106 ret = _pthread_mutex_init(mutex, NULL); 107 else { 108 /* 109 * Initialize the mutex structure: 110 */ 111 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT; 112 (*mutex)->m_protocol = PTHREAD_PRIO_NONE; 113 TAILQ_INIT(&(*mutex)->m_queue); 114 (*mutex)->m_owner = NULL; 115 (*mutex)->m_data.m_count = 0; 116 (*mutex)->m_flags |= MUTEX_FLAGS_INITED | MUTEX_FLAGS_PRIVATE; 117 (*mutex)->m_refcount = 0; 118 (*mutex)->m_prio = 0; 119 (*mutex)->m_saved_prio = 0; 120 _MUTEX_INIT_LINK(*mutex); 121 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock)); 122 } 123 return (ret); 124 } 125 126 int 127 _pthread_mutex_init(pthread_mutex_t * mutex, 128 const pthread_mutexattr_t * mutex_attr) 129 { 130 enum pthread_mutextype type; 131 int protocol; 132 int ceiling; 133 int flags; 134 pthread_mutex_t pmutex; 135 int ret = 0; 136 137 if (mutex == NULL) 138 ret = EINVAL; 139 140 /* Check if default mutex attributes: */ 141 if (mutex_attr == NULL || *mutex_attr == NULL) { 142 /* Default to a (error checking) POSIX mutex: */ 143 type = PTHREAD_MUTEX_ERRORCHECK; 144 protocol = PTHREAD_PRIO_NONE; 145 ceiling = PTHREAD_MAX_PRIORITY; 146 flags = 0; 147 } 148 149 /* Check mutex type: */ 150 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || 151 ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX)) 152 /* Return an invalid argument error: */ 153 ret = EINVAL; 154 155 /* Check mutex protocol: */ 156 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || 157 ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE)) 158 /* Return an invalid argument error: */ 159 ret = EINVAL; 160 161 else { 162 /* Use the requested mutex type and protocol: */ 163 type = (*mutex_attr)->m_type; 164 protocol = (*mutex_attr)->m_protocol; 165 ceiling = (*mutex_attr)->m_ceiling; 166 flags = (*mutex_attr)->m_flags; 167 } 168 169 /* Check no errors so far: */ 170 if (ret == 0) { 171 if ((pmutex = (pthread_mutex_t) 172 malloc(sizeof(struct pthread_mutex))) == NULL) 173 ret = ENOMEM; 174 else { 175 /* Set the mutex flags: */ 176 pmutex->m_flags = flags; 177 178 /* Process according to mutex type: */ 179 switch (type) { 180 /* case PTHREAD_MUTEX_DEFAULT: */ 181 case PTHREAD_MUTEX_ERRORCHECK: 182 case PTHREAD_MUTEX_NORMAL: 183 /* Nothing to do here. */ 184 break; 185 186 /* Single UNIX Spec 2 recursive mutex: */ 187 case PTHREAD_MUTEX_RECURSIVE: 188 /* Reset the mutex count: */ 189 pmutex->m_data.m_count = 0; 190 break; 191 192 /* Trap invalid mutex types: */ 193 default: 194 /* Return an invalid argument error: */ 195 ret = EINVAL; 196 break; 197 } 198 if (ret == 0) { 199 /* Initialise the rest of the mutex: */ 200 TAILQ_INIT(&pmutex->m_queue); 201 pmutex->m_flags |= MUTEX_FLAGS_INITED; 202 pmutex->m_owner = NULL; 203 pmutex->m_type = type; 204 pmutex->m_protocol = protocol; 205 pmutex->m_refcount = 0; 206 if (protocol == PTHREAD_PRIO_PROTECT) 207 pmutex->m_prio = ceiling; 208 else 209 pmutex->m_prio = 0; 210 pmutex->m_saved_prio = 0; 211 _MUTEX_INIT_LINK(pmutex); 212 memset(&pmutex->lock, 0, sizeof(pmutex->lock)); 213 *mutex = pmutex; 214 } else { 215 free(pmutex); 216 *mutex = NULL; 217 } 218 } 219 } 220 /* Return the completion status: */ 221 return (ret); 222 } 223 224 int 225 _pthread_mutex_destroy(pthread_mutex_t * mutex) 226 { 227 int ret = 0; 228 229 if (mutex == NULL || *mutex == NULL) 230 ret = EINVAL; 231 else { 232 /* Lock the mutex structure: */ 233 _SPINLOCK(&(*mutex)->lock); 234 235 /* 236 * Check to see if this mutex is in use: 237 */ 238 if (((*mutex)->m_owner != NULL) || 239 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || 240 ((*mutex)->m_refcount != 0)) { 241 ret = EBUSY; 242 243 /* Unlock the mutex structure: */ 244 _SPINUNLOCK(&(*mutex)->lock); 245 } 246 else { 247 /* 248 * Free the memory allocated for the mutex 249 * structure: 250 */ 251 _MUTEX_ASSERT_NOT_OWNED(*mutex); 252 253 /* Unlock the mutex structure: */ 254 _SPINUNLOCK(&(*mutex)->lock); 255 256 free(*mutex); 257 258 /* 259 * Leave the caller's pointer NULL now that 260 * the mutex has been destroyed: 261 */ 262 *mutex = NULL; 263 } 264 } 265 266 /* Return the completion status: */ 267 return (ret); 268 } 269 270 static int 271 mutex_init(pthread_mutex_t *mutex, int private) 272 { 273 pthread_mutexattr_t *pma; 274 int error; 275 276 error = 0; 277 pma = private ? &static_mattr : NULL; 278 _SPINLOCK(&static_init_lock); 279 if (*mutex == PTHREAD_MUTEX_INITIALIZER) 280 error = _pthread_mutex_init(mutex, pma); 281 _SPINUNLOCK(&static_init_lock); 282 return (error); 283 } 284 285 int 286 __pthread_mutex_trylock(pthread_mutex_t *mutex) 287 { 288 int ret = 0; 289 290 if (mutex == NULL) 291 ret = EINVAL; 292 293 /* 294 * If the mutex is statically initialized, perform the dynamic 295 * initialization: 296 */ 297 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 298 (ret = mutex_init(mutex, 0)) == 0) 299 ret = mutex_lock_common(mutex, 1); 300 301 return (ret); 302 } 303 304 /* 305 * Libc internal. 306 */ 307 int 308 _pthread_mutex_trylock(pthread_mutex_t *mutex) 309 { 310 int ret = 0; 311 312 _thread_sigblock(); 313 314 if (mutex == NULL) 315 ret = EINVAL; 316 317 /* 318 * If the mutex is statically initialized, perform the dynamic 319 * initialization marking the mutex private (delete safe): 320 */ 321 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 322 (ret = mutex_init(mutex, 1)) == 0) 323 ret = mutex_lock_common(mutex, 1); 324 325 if (ret != 0) 326 _thread_sigunblock(); 327 328 return (ret); 329 } 330 331 static int 332 mutex_lock_common(pthread_mutex_t * mutex, int nonblock) 333 { 334 int ret, error, inCancel; 335 336 ret = error = inCancel = 0; 337 338 PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL), 339 "Uninitialized mutex in mutex_lock_common"); 340 341 /* 342 * Enter a loop waiting to become the mutex owner. We need a 343 * loop in case the waiting thread is interrupted by a signal 344 * to execute a signal handler. It is not (currently) possible 345 * to remain in the waiting queue while running a handler. 346 * Instead, the thread is interrupted and backed out of the 347 * waiting queue prior to executing the signal handler. 348 */ 349 do { 350 /* 351 * Defer signals to protect the scheduling queues from 352 * access by the signal handler: 353 */ 354 /* _thread_kern_sig_defer(); */ 355 356 /* Lock the mutex structure: */ 357 _SPINLOCK(&(*mutex)->lock); 358 359 /* 360 * If the mutex was statically allocated, properly 361 * initialize the tail queue. 362 */ 363 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { 364 TAILQ_INIT(&(*mutex)->m_queue); 365 (*mutex)->m_flags |= MUTEX_FLAGS_INITED; 366 _MUTEX_INIT_LINK(*mutex); 367 } 368 369 /* Process according to mutex type: */ 370 switch ((*mutex)->m_protocol) { 371 /* Default POSIX mutex: */ 372 case PTHREAD_PRIO_NONE: 373 if ((error = get_muncontested(*mutex, nonblock)) == -1) 374 if (nonblock) { 375 ret = EBUSY; 376 break; 377 } else { 378 get_mcontested(*mutex); 379 } 380 else 381 ret = error; 382 break; 383 384 /* POSIX priority inheritence mutex: */ 385 case PTHREAD_PRIO_INHERIT: 386 if ((error = get_muncontested(*mutex, nonblock)) == 0) { 387 /* Track number of priority mutexes owned: */ 388 curthread->priority_mutex_count++; 389 390 /* 391 * The mutex takes on attributes of the 392 * running thread when there are no waiters. 393 */ 394 (*mutex)->m_prio = curthread->active_priority; 395 (*mutex)->m_saved_prio = 396 curthread->inherited_priority; 397 curthread->inherited_priority = 398 (*mutex)->m_prio; 399 } else if (error == -1) { 400 if (nonblock) { 401 ret = EBUSY; 402 break; 403 } else { 404 get_mcontested(*mutex); 405 } 406 407 if (curthread->active_priority > 408 (*mutex)->m_prio) 409 /* Adjust priorities: */ 410 mutex_priority_adjust(*mutex); 411 } else { 412 ret = error; 413 } 414 break; 415 416 /* POSIX priority protection mutex: */ 417 case PTHREAD_PRIO_PROTECT: 418 /* Check for a priority ceiling violation: */ 419 if (curthread->active_priority > (*mutex)->m_prio) 420 ret = EINVAL; 421 422 if ((error = get_muncontested(*mutex, nonblock)) == 0) { 423 /* Track number of priority mutexes owned: */ 424 curthread->priority_mutex_count++; 425 426 /* 427 * The running thread inherits the ceiling 428 * priority of the mutex and executes at that 429 * priority: 430 */ 431 curthread->active_priority = (*mutex)->m_prio; 432 (*mutex)->m_saved_prio = 433 curthread->inherited_priority; 434 curthread->inherited_priority = 435 (*mutex)->m_prio; 436 } else if (error == -1) { 437 if (nonblock) { 438 ret = EBUSY; 439 break; 440 } 441 442 /* Clear any previous error: */ 443 curthread->error = 0; 444 445 get_mcontested(*mutex); 446 447 /* 448 * The threads priority may have changed while 449 * waiting for the mutex causing a ceiling 450 * violation. 451 */ 452 ret = curthread->error; 453 curthread->error = 0; 454 } else { 455 ret = error; 456 } 457 break; 458 459 /* Trap invalid mutex types: */ 460 default: 461 /* Return an invalid argument error: */ 462 ret = EINVAL; 463 break; 464 } 465 466 /* 467 * Check to see if this thread was interrupted and 468 * is still in the mutex queue of waiting threads: 469 */ 470 if (curthread->cancelflags & PTHREAD_CANCELLING) { 471 if (!nonblock) 472 mutex_queue_remove(*mutex, curthread); 473 inCancel=1; 474 } 475 476 /* Unlock the mutex structure: */ 477 _SPINUNLOCK(&(*mutex)->lock); 478 479 /* 480 * Undefer and handle pending signals, yielding if 481 * necessary: 482 */ 483 /* _thread_kern_sig_undefer(); */ 484 if (inCancel) { 485 pthread_testcancel(); 486 PANIC("Canceled thread came back.\n"); 487 } 488 } while ((*mutex)->m_owner != curthread && ret == 0); 489 490 /* Return the completion status: */ 491 return (ret); 492 } 493 494 int 495 __pthread_mutex_lock(pthread_mutex_t *mutex) 496 { 497 int ret = 0; 498 499 if (_thread_initial == NULL) 500 _thread_init(); 501 502 if (mutex == NULL) 503 ret = EINVAL; 504 505 /* 506 * If the mutex is statically initialized, perform the dynamic 507 * initialization: 508 */ 509 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 510 ((ret = mutex_init(mutex, 0)) == 0)) 511 ret = mutex_lock_common(mutex, 0); 512 513 return (ret); 514 } 515 516 /* 517 * Libc internal. 518 */ 519 int 520 _pthread_mutex_lock(pthread_mutex_t *mutex) 521 { 522 int ret = 0; 523 524 if (_thread_initial == NULL) 525 _thread_init(); 526 527 _thread_sigblock(); 528 529 if (mutex == NULL) 530 ret = EINVAL; 531 532 /* 533 * If the mutex is statically initialized, perform the dynamic 534 * initialization marking it private (delete safe): 535 */ 536 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 537 ((ret = mutex_init(mutex, 1)) == 0)) 538 ret = mutex_lock_common(mutex, 0); 539 540 if (ret != 0) 541 _thread_sigunblock(); 542 543 return (ret); 544 } 545 546 int 547 __pthread_mutex_unlock(pthread_mutex_t * mutex) 548 { 549 return (mutex_unlock_common(mutex, /* add reference */ 0)); 550 } 551 552 /* 553 * Libc internal 554 */ 555 int 556 _pthread_mutex_unlock(pthread_mutex_t * mutex) 557 { 558 int error; 559 if ((error = mutex_unlock_common(mutex, /* add reference */ 0)) == 0) 560 _thread_sigunblock(); 561 return (error); 562 } 563 564 int 565 _mutex_cv_unlock(pthread_mutex_t * mutex) 566 { 567 return (mutex_unlock_common(mutex, /* add reference */ 1)); 568 } 569 570 int 571 _mutex_cv_lock(pthread_mutex_t * mutex) 572 { 573 int ret; 574 if ((ret = _pthread_mutex_lock(mutex)) == 0) 575 (*mutex)->m_refcount--; 576 return (ret); 577 } 578 579 static inline int 580 mutex_self_trylock(pthread_mutex_t mutex) 581 { 582 int ret = 0; 583 584 switch (mutex->m_type) { 585 586 /* case PTHREAD_MUTEX_DEFAULT: */ 587 case PTHREAD_MUTEX_ERRORCHECK: 588 case PTHREAD_MUTEX_NORMAL: 589 /* 590 * POSIX specifies that mutexes should return EDEADLK if a 591 * recursive lock is detected. 592 */ 593 ret = EBUSY; 594 break; 595 596 case PTHREAD_MUTEX_RECURSIVE: 597 /* Increment the lock count: */ 598 mutex->m_data.m_count++; 599 break; 600 601 default: 602 /* Trap invalid mutex types; */ 603 ret = EINVAL; 604 } 605 606 return (ret); 607 } 608 609 static inline int 610 mutex_self_lock(pthread_mutex_t mutex) 611 { 612 int ret = 0; 613 614 switch (mutex->m_type) { 615 /* case PTHREAD_MUTEX_DEFAULT: */ 616 case PTHREAD_MUTEX_ERRORCHECK: 617 /* 618 * POSIX specifies that mutexes should return EDEADLK if a 619 * recursive lock is detected. 620 */ 621 ret = EDEADLK; 622 break; 623 624 case PTHREAD_MUTEX_NORMAL: 625 /* 626 * What SS2 define as a 'normal' mutex. Intentionally 627 * deadlock on attempts to get a lock you already own. 628 */ 629 /* XXX Sched lock. */ 630 PTHREAD_SET_STATE(curthread, PS_DEADLOCK); 631 _SPINUNLOCK(&(mutex)->lock); 632 _thread_suspend(curthread, NULL); 633 PANIC("Shouldn't resume here?\n"); 634 break; 635 636 case PTHREAD_MUTEX_RECURSIVE: 637 /* Increment the lock count: */ 638 mutex->m_data.m_count++; 639 break; 640 641 default: 642 /* Trap invalid mutex types; */ 643 ret = EINVAL; 644 } 645 646 return (ret); 647 } 648 649 static inline int 650 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference) 651 { 652 int ret = 0; 653 654 if (mutex == NULL || *mutex == NULL) { 655 ret = EINVAL; 656 } else { 657 /* 658 * Defer signals to protect the scheduling queues from 659 * access by the signal handler: 660 */ 661 /* _thread_kern_sig_defer(); */ 662 663 /* Lock the mutex structure: */ 664 _SPINLOCK(&(*mutex)->lock); 665 666 /* Process according to mutex type: */ 667 switch ((*mutex)->m_protocol) { 668 /* Default POSIX mutex: */ 669 case PTHREAD_PRIO_NONE: 670 /* 671 * Check if the running thread is not the owner of the 672 * mutex: 673 */ 674 if ((*mutex)->m_owner != curthread) { 675 /* 676 * Return an invalid argument error for no 677 * owner and a permission error otherwise: 678 */ 679 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; 680 } 681 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && 682 ((*mutex)->m_data.m_count > 0)) { 683 /* Decrement the count: */ 684 (*mutex)->m_data.m_count--; 685 } else { 686 /* 687 * Clear the count in case this is recursive 688 * mutex. 689 */ 690 (*mutex)->m_data.m_count = 0; 691 692 /* Remove the mutex from the threads queue. */ 693 _MUTEX_ASSERT_IS_OWNED(*mutex); 694 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, 695 (*mutex), m_qe); 696 _MUTEX_INIT_LINK(*mutex); 697 698 /* 699 * Get the next thread from the queue of 700 * threads waiting on the mutex. The deq 701 * function will have already locked it 702 * for us. 703 */ 704 if (((*mutex)->m_owner = 705 mutex_queue_deq(*mutex)) != NULL) { 706 /* Make the new owner runnable: */ 707 /* XXXTHR sched lock. */ 708 PTHREAD_NEW_STATE((*mutex)->m_owner, 709 PS_RUNNING); 710 711 /* 712 * Add the mutex to the threads list of 713 * owned mutexes: 714 */ 715 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, 716 (*mutex), m_qe); 717 718 /* 719 * The owner is no longer waiting for 720 * this mutex: 721 */ 722 (*mutex)->m_owner->data.mutex = NULL; 723 _thread_critical_exit((*mutex)->m_owner); 724 } 725 } 726 break; 727 728 /* POSIX priority inheritence mutex: */ 729 case PTHREAD_PRIO_INHERIT: 730 /* 731 * Check if the running thread is not the owner of the 732 * mutex: 733 */ 734 if ((*mutex)->m_owner != curthread) { 735 /* 736 * Return an invalid argument error for no 737 * owner and a permission error otherwise: 738 */ 739 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; 740 } 741 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && 742 ((*mutex)->m_data.m_count > 0)) { 743 /* Decrement the count: */ 744 (*mutex)->m_data.m_count--; 745 } else { 746 /* 747 * Clear the count in case this is recursive 748 * mutex. 749 */ 750 (*mutex)->m_data.m_count = 0; 751 752 /* 753 * Restore the threads inherited priority and 754 * recompute the active priority (being careful 755 * not to override changes in the threads base 756 * priority subsequent to locking the mutex). 757 */ 758 curthread->inherited_priority = 759 (*mutex)->m_saved_prio; 760 curthread->active_priority = 761 MAX(curthread->inherited_priority, 762 curthread->base_priority); 763 764 /* 765 * This thread now owns one less priority mutex. 766 */ 767 curthread->priority_mutex_count--; 768 769 /* Remove the mutex from the threads queue. */ 770 _MUTEX_ASSERT_IS_OWNED(*mutex); 771 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, 772 (*mutex), m_qe); 773 _MUTEX_INIT_LINK(*mutex); 774 775 /* 776 * Get the next thread from the queue of threads 777 * waiting on the mutex. It will already be 778 * locked for us. 779 */ 780 if (((*mutex)->m_owner = 781 mutex_queue_deq(*mutex)) == NULL) 782 /* This mutex has no priority. */ 783 (*mutex)->m_prio = 0; 784 else { 785 /* 786 * Track number of priority mutexes owned: 787 */ 788 (*mutex)->m_owner->priority_mutex_count++; 789 790 /* 791 * Add the mutex to the threads list 792 * of owned mutexes: 793 */ 794 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, 795 (*mutex), m_qe); 796 797 /* 798 * The owner is no longer waiting for 799 * this mutex: 800 */ 801 (*mutex)->m_owner->data.mutex = NULL; 802 803 /* 804 * Set the priority of the mutex. Since 805 * our waiting threads are in descending 806 * priority order, the priority of the 807 * mutex becomes the active priority of 808 * the thread we just dequeued. 809 */ 810 (*mutex)->m_prio = 811 (*mutex)->m_owner->active_priority; 812 813 /* 814 * Save the owning threads inherited 815 * priority: 816 */ 817 (*mutex)->m_saved_prio = 818 (*mutex)->m_owner->inherited_priority; 819 820 /* 821 * The owning threads inherited priority 822 * now becomes his active priority (the 823 * priority of the mutex). 824 */ 825 (*mutex)->m_owner->inherited_priority = 826 (*mutex)->m_prio; 827 828 /* 829 * Make the new owner runnable: 830 */ 831 /* XXXTHR sched lock. */ 832 PTHREAD_NEW_STATE((*mutex)->m_owner, 833 PS_RUNNING); 834 835 _thread_critical_exit((*mutex)->m_owner); 836 } 837 } 838 break; 839 840 /* POSIX priority ceiling mutex: */ 841 case PTHREAD_PRIO_PROTECT: 842 /* 843 * Check if the running thread is not the owner of the 844 * mutex: 845 */ 846 if ((*mutex)->m_owner != curthread) { 847 /* 848 * Return an invalid argument error for no 849 * owner and a permission error otherwise: 850 */ 851 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; 852 } 853 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && 854 ((*mutex)->m_data.m_count > 0)) { 855 /* Decrement the count: */ 856 (*mutex)->m_data.m_count--; 857 } else { 858 /* 859 * Clear the count in case this is recursive 860 * mutex. 861 */ 862 (*mutex)->m_data.m_count = 0; 863 864 /* 865 * Restore the threads inherited priority and 866 * recompute the active priority (being careful 867 * not to override changes in the threads base 868 * priority subsequent to locking the mutex). 869 */ 870 curthread->inherited_priority = 871 (*mutex)->m_saved_prio; 872 curthread->active_priority = 873 MAX(curthread->inherited_priority, 874 curthread->base_priority); 875 876 /* 877 * This thread now owns one less priority mutex. 878 */ 879 curthread->priority_mutex_count--; 880 881 /* Remove the mutex from the threads queue. */ 882 _MUTEX_ASSERT_IS_OWNED(*mutex); 883 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, 884 (*mutex), m_qe); 885 _MUTEX_INIT_LINK(*mutex); 886 887 /* 888 * Enter a loop to find a waiting thread whose 889 * active priority will not cause a ceiling 890 * violation. It will already be locked for us. 891 */ 892 while ((((*mutex)->m_owner = 893 mutex_queue_deq(*mutex)) != NULL) && 894 ((*mutex)->m_owner->active_priority > 895 (*mutex)->m_prio)) { 896 /* 897 * Either the mutex ceiling priority 898 * been lowered and/or this threads 899 * priority has been raised subsequent 900 * to this thread being queued on the 901 * waiting list. 902 */ 903 (*mutex)->m_owner->error = EINVAL; 904 PTHREAD_NEW_STATE((*mutex)->m_owner, 905 PS_RUNNING); 906 /* 907 * The thread is no longer waiting for 908 * this mutex: 909 */ 910 (*mutex)->m_owner->data.mutex = NULL; 911 912 _thread_critical_exit((*mutex)->m_owner); 913 } 914 915 /* Check for a new owner: */ 916 if ((*mutex)->m_owner != NULL) { 917 /* 918 * Track number of priority mutexes owned: 919 */ 920 (*mutex)->m_owner->priority_mutex_count++; 921 922 /* 923 * Add the mutex to the threads list 924 * of owned mutexes: 925 */ 926 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, 927 (*mutex), m_qe); 928 929 /* 930 * The owner is no longer waiting for 931 * this mutex: 932 */ 933 (*mutex)->m_owner->data.mutex = NULL; 934 935 /* 936 * Save the owning threads inherited 937 * priority: 938 */ 939 (*mutex)->m_saved_prio = 940 (*mutex)->m_owner->inherited_priority; 941 942 /* 943 * The owning thread inherits the 944 * ceiling priority of the mutex and 945 * executes at that priority: 946 */ 947 (*mutex)->m_owner->inherited_priority = 948 (*mutex)->m_prio; 949 (*mutex)->m_owner->active_priority = 950 (*mutex)->m_prio; 951 952 /* 953 * Make the new owner runnable: 954 */ 955 /* XXXTHR sched lock. */ 956 PTHREAD_NEW_STATE((*mutex)->m_owner, 957 PS_RUNNING); 958 959 _thread_critical_exit((*mutex)->m_owner); 960 } 961 } 962 break; 963 964 /* Trap invalid mutex types: */ 965 default: 966 /* Return an invalid argument error: */ 967 ret = EINVAL; 968 break; 969 } 970 971 if ((ret == 0) && (add_reference != 0)) { 972 /* Increment the reference count: */ 973 (*mutex)->m_refcount++; 974 } 975 976 /* Unlock the mutex structure: */ 977 _SPINUNLOCK(&(*mutex)->lock); 978 979 /* 980 * Undefer and handle pending signals, yielding if 981 * necessary: 982 */ 983 /* _thread_kern_sig_undefer(); */ 984 } 985 986 /* Return the completion status: */ 987 return (ret); 988 } 989 990 991 /* 992 * This function is called when a change in base priority occurs for 993 * a thread that is holding or waiting for a priority protection or 994 * inheritence mutex. A change in a threads base priority can effect 995 * changes to active priorities of other threads and to the ordering 996 * of mutex locking by waiting threads. 997 * 998 * This must be called while thread scheduling is deferred. 999 */ 1000 void 1001 _mutex_notify_priochange(pthread_t pthread) 1002 { 1003 /* Adjust the priorites of any owned priority mutexes: */ 1004 if (pthread->priority_mutex_count > 0) { 1005 /* 1006 * Rescan the mutexes owned by this thread and correct 1007 * their priorities to account for this threads change 1008 * in priority. This has the side effect of changing 1009 * the threads active priority. 1010 */ 1011 mutex_rescan_owned(pthread, /* rescan all owned */ NULL); 1012 } 1013 1014 /* 1015 * If this thread is waiting on a priority inheritence mutex, 1016 * check for priority adjustments. A change in priority can 1017 * also effect a ceiling violation(*) for a thread waiting on 1018 * a priority protection mutex; we don't perform the check here 1019 * as it is done in pthread_mutex_unlock. 1020 * 1021 * (*) It should be noted that a priority change to a thread 1022 * _after_ taking and owning a priority ceiling mutex 1023 * does not affect ownership of that mutex; the ceiling 1024 * priority is only checked before mutex ownership occurs. 1025 */ 1026 if (pthread->state == PS_MUTEX_WAIT) { 1027 /* Lock the mutex structure: */ 1028 _SPINLOCK(&pthread->data.mutex->lock); 1029 1030 /* 1031 * Check to make sure this thread is still in the same state 1032 * (the spinlock above can yield the CPU to another thread): 1033 */ 1034 if (pthread->state == PS_MUTEX_WAIT) { 1035 /* 1036 * Remove and reinsert this thread into the list of 1037 * waiting threads to preserve decreasing priority 1038 * order. 1039 */ 1040 mutex_queue_remove(pthread->data.mutex, pthread); 1041 mutex_queue_enq(pthread->data.mutex, pthread); 1042 1043 if (pthread->data.mutex->m_protocol == 1044 PTHREAD_PRIO_INHERIT) { 1045 /* Adjust priorities: */ 1046 mutex_priority_adjust(pthread->data.mutex); 1047 } 1048 } 1049 1050 /* Unlock the mutex structure: */ 1051 _SPINUNLOCK(&pthread->data.mutex->lock); 1052 } 1053 } 1054 1055 /* 1056 * Called when a new thread is added to the mutex waiting queue or 1057 * when a threads priority changes that is already in the mutex 1058 * waiting queue. 1059 */ 1060 static void 1061 mutex_priority_adjust(pthread_mutex_t mutex) 1062 { 1063 pthread_t pthread_next, pthread = mutex->m_owner; 1064 int temp_prio; 1065 pthread_mutex_t m = mutex; 1066 1067 /* 1068 * Calculate the mutex priority as the maximum of the highest 1069 * active priority of any waiting threads and the owning threads 1070 * active priority(*). 1071 * 1072 * (*) Because the owning threads current active priority may 1073 * reflect priority inherited from this mutex (and the mutex 1074 * priority may have changed) we must recalculate the active 1075 * priority based on the threads saved inherited priority 1076 * and its base priority. 1077 */ 1078 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */ 1079 temp_prio = MAX(pthread_next->active_priority, 1080 MAX(m->m_saved_prio, pthread->base_priority)); 1081 1082 /* See if this mutex really needs adjusting: */ 1083 if (temp_prio == m->m_prio) 1084 /* No need to propagate the priority: */ 1085 return; 1086 1087 /* Set new priority of the mutex: */ 1088 m->m_prio = temp_prio; 1089 1090 while (m != NULL) { 1091 /* 1092 * Save the threads priority before rescanning the 1093 * owned mutexes: 1094 */ 1095 temp_prio = pthread->active_priority; 1096 1097 /* 1098 * Fix the priorities for all the mutexes this thread has 1099 * locked since taking this mutex. This also has a 1100 * potential side-effect of changing the threads priority. 1101 */ 1102 mutex_rescan_owned(pthread, m); 1103 1104 /* 1105 * If the thread is currently waiting on a mutex, check 1106 * to see if the threads new priority has affected the 1107 * priority of the mutex. 1108 */ 1109 if ((temp_prio != pthread->active_priority) && 1110 (pthread->state == PS_MUTEX_WAIT) && 1111 (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) { 1112 /* Grab the mutex this thread is waiting on: */ 1113 m = pthread->data.mutex; 1114 1115 /* 1116 * The priority for this thread has changed. Remove 1117 * and reinsert this thread into the list of waiting 1118 * threads to preserve decreasing priority order. 1119 */ 1120 mutex_queue_remove(m, pthread); 1121 mutex_queue_enq(m, pthread); 1122 1123 /* Grab the waiting thread with highest priority: */ 1124 pthread_next = TAILQ_FIRST(&m->m_queue); 1125 1126 /* 1127 * Calculate the mutex priority as the maximum of the 1128 * highest active priority of any waiting threads and 1129 * the owning threads active priority. 1130 */ 1131 temp_prio = MAX(pthread_next->active_priority, 1132 MAX(m->m_saved_prio, m->m_owner->base_priority)); 1133 1134 if (temp_prio != m->m_prio) { 1135 /* 1136 * The priority needs to be propagated to the 1137 * mutex this thread is waiting on and up to 1138 * the owner of that mutex. 1139 */ 1140 m->m_prio = temp_prio; 1141 pthread = m->m_owner; 1142 } 1143 else 1144 /* We're done: */ 1145 m = NULL; 1146 1147 } 1148 else 1149 /* We're done: */ 1150 m = NULL; 1151 } 1152 } 1153 1154 static void 1155 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex) 1156 { 1157 int active_prio, inherited_prio; 1158 pthread_mutex_t m; 1159 pthread_t pthread_next; 1160 1161 /* 1162 * Start walking the mutexes the thread has taken since 1163 * taking this mutex. 1164 */ 1165 if (mutex == NULL) { 1166 /* 1167 * A null mutex means start at the beginning of the owned 1168 * mutex list. 1169 */ 1170 m = TAILQ_FIRST(&pthread->mutexq); 1171 1172 /* There is no inherited priority yet. */ 1173 inherited_prio = 0; 1174 } 1175 else { 1176 /* 1177 * The caller wants to start after a specific mutex. It 1178 * is assumed that this mutex is a priority inheritence 1179 * mutex and that its priority has been correctly 1180 * calculated. 1181 */ 1182 m = TAILQ_NEXT(mutex, m_qe); 1183 1184 /* Start inheriting priority from the specified mutex. */ 1185 inherited_prio = mutex->m_prio; 1186 } 1187 active_prio = MAX(inherited_prio, pthread->base_priority); 1188 1189 while (m != NULL) { 1190 /* 1191 * We only want to deal with priority inheritence 1192 * mutexes. This might be optimized by only placing 1193 * priority inheritence mutexes into the owned mutex 1194 * list, but it may prove to be useful having all 1195 * owned mutexes in this list. Consider a thread 1196 * exiting while holding mutexes... 1197 */ 1198 if (m->m_protocol == PTHREAD_PRIO_INHERIT) { 1199 /* 1200 * Fix the owners saved (inherited) priority to 1201 * reflect the priority of the previous mutex. 1202 */ 1203 m->m_saved_prio = inherited_prio; 1204 1205 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL) 1206 /* Recalculate the priority of the mutex: */ 1207 m->m_prio = MAX(active_prio, 1208 pthread_next->active_priority); 1209 else 1210 m->m_prio = active_prio; 1211 1212 /* Recalculate new inherited and active priorities: */ 1213 inherited_prio = m->m_prio; 1214 active_prio = MAX(m->m_prio, pthread->base_priority); 1215 } 1216 1217 /* Advance to the next mutex owned by this thread: */ 1218 m = TAILQ_NEXT(m, m_qe); 1219 } 1220 1221 /* 1222 * Fix the threads inherited priority and recalculate its 1223 * active priority. 1224 */ 1225 pthread->inherited_priority = inherited_prio; 1226 active_prio = MAX(inherited_prio, pthread->base_priority); 1227 1228 if (active_prio != pthread->active_priority) { 1229 #if 0 1230 /* 1231 * If this thread is in the priority queue, it must be 1232 * removed and reinserted for its new priority. 1233 */ 1234 if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) { 1235 /* 1236 * Remove the thread from the priority queue 1237 * before changing its priority: 1238 */ 1239 PTHREAD_PRIOQ_REMOVE(pthread); 1240 1241 /* 1242 * POSIX states that if the priority is being 1243 * lowered, the thread must be inserted at the 1244 * head of the queue for its priority if it owns 1245 * any priority protection or inheritence mutexes. 1246 */ 1247 if ((active_prio < pthread->active_priority) && 1248 (pthread->priority_mutex_count > 0)) { 1249 /* Set the new active priority. */ 1250 pthread->active_priority = active_prio; 1251 1252 PTHREAD_PRIOQ_INSERT_HEAD(pthread); 1253 } 1254 else { 1255 /* Set the new active priority. */ 1256 pthread->active_priority = active_prio; 1257 1258 PTHREAD_PRIOQ_INSERT_TAIL(pthread); 1259 } 1260 } 1261 else { 1262 /* Set the new active priority. */ 1263 pthread->active_priority = active_prio; 1264 } 1265 #endif 1266 pthread->active_priority = active_prio; 1267 } 1268 } 1269 1270 void 1271 _mutex_unlock_private(pthread_t pthread) 1272 { 1273 struct pthread_mutex *m, *m_next; 1274 1275 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { 1276 m_next = TAILQ_NEXT(m, m_qe); 1277 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 1278 _pthread_mutex_unlock(&m); 1279 } 1280 } 1281 1282 void 1283 _mutex_lock_backout(pthread_t pthread) 1284 { 1285 struct pthread_mutex *mutex; 1286 1287 /* 1288 * Defer signals to protect the scheduling queues from 1289 * access by the signal handler: 1290 */ 1291 /* _thread_kern_sig_defer();*/ 1292 1293 /* XXX - Necessary to obey lock order */ 1294 UMTX_LOCK(&pthread->lock); 1295 mutex = pthread->data.mutex; 1296 UMTX_UNLOCK(&pthread->lock); 1297 1298 _SPINLOCK(&mutex->lock); 1299 1300 _thread_critical_enter(pthread); 1301 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { 1302 1303 mutex_queue_remove(mutex, pthread); 1304 1305 /* This thread is no longer waiting for the mutex: */ 1306 pthread->data.mutex = NULL; 1307 1308 } 1309 /* 1310 * Undefer and handle pending signals, yielding if 1311 * necessary: 1312 */ 1313 /* _thread_kern_sig_undefer(); */ 1314 1315 _thread_critical_exit(pthread); 1316 _SPINUNLOCK(&mutex->lock); 1317 } 1318 1319 /* 1320 * Dequeue a waiting thread from the head of a mutex queue in descending 1321 * priority order. This funtion will return with the thread locked. 1322 */ 1323 static inline pthread_t 1324 mutex_queue_deq(pthread_mutex_t mutex) 1325 { 1326 pthread_t pthread; 1327 1328 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { 1329 _thread_critical_enter(pthread); 1330 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1331 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; 1332 1333 /* 1334 * Only exit the loop if the thread hasn't been 1335 * cancelled. 1336 */ 1337 if ((pthread->cancelflags & PTHREAD_CANCELLING) == 0 && 1338 pthread->state == PS_MUTEX_WAIT) 1339 break; 1340 else 1341 _thread_critical_exit(pthread); 1342 } 1343 1344 return (pthread); 1345 } 1346 1347 /* 1348 * Remove a waiting thread from a mutex queue in descending priority order. 1349 */ 1350 static inline void 1351 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) 1352 { 1353 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { 1354 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1355 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; 1356 } 1357 } 1358 1359 /* 1360 * Enqueue a waiting thread to a queue in descending priority order. 1361 */ 1362 static inline void 1363 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) 1364 { 1365 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); 1366 char *name; 1367 1368 name = pthread->name ? pthread->name : "unknown"; 1369 if ((pthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0) 1370 _thread_printf(2, "Thread (%s:%u) already on condq\n", 1371 pthread->name, pthread->uniqueid); 1372 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) 1373 _thread_printf(2, "Thread (%s:%u) already on mutexq\n", 1374 pthread->name, pthread->uniqueid); 1375 PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread); 1376 /* 1377 * For the common case of all threads having equal priority, 1378 * we perform a quick check against the priority of the thread 1379 * at the tail of the queue. 1380 */ 1381 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) 1382 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe); 1383 else { 1384 tid = TAILQ_FIRST(&mutex->m_queue); 1385 while (pthread->active_priority <= tid->active_priority) 1386 tid = TAILQ_NEXT(tid, sqe); 1387 TAILQ_INSERT_BEFORE(tid, pthread, sqe); 1388 } 1389 pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ; 1390 } 1391 1392 /* 1393 * Returns with the lock owned and on the threads mutexq if 1394 * it is currently unowned. Returns 1, otherwise. 1395 */ 1396 static int 1397 get_muncontested(pthread_mutex_t mutexp, int nonblock) 1398 { 1399 if (mutexp->m_owner != NULL && mutexp->m_owner != curthread) { 1400 return (-1); 1401 } else if (mutexp->m_owner == curthread) { 1402 if (nonblock) 1403 return (mutex_self_trylock(mutexp)); 1404 else 1405 return (mutex_self_lock(mutexp)); 1406 } 1407 1408 /* 1409 * The mutex belongs to this thread now. Mark it as 1410 * such. Add it to the list of mutexes owned by this 1411 * thread. 1412 */ 1413 mutexp->m_owner = curthread; 1414 _MUTEX_ASSERT_NOT_OWNED(mutexp); 1415 TAILQ_INSERT_TAIL(&curthread->mutexq, mutexp, m_qe); 1416 return (0); 1417 } 1418 1419 /* 1420 * Returns with the lock owned and on the thread's mutexq. If 1421 * the mutex is currently owned by another thread it will sleep 1422 * until it is available. 1423 */ 1424 static void 1425 get_mcontested(pthread_mutex_t mutexp) 1426 { 1427 int error; 1428 1429 _thread_critical_enter(curthread); 1430 1431 /* 1432 * Put this thread on the mutex's list of waiting threads. 1433 * The lock on the thread ensures atomic (as far as other 1434 * threads are concerned) setting of the thread state with 1435 * it's status on the mutex queue. 1436 */ 1437 mutex_queue_enq(mutexp, curthread); 1438 do { 1439 PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT); 1440 curthread->data.mutex = mutexp; 1441 _thread_critical_exit(curthread); 1442 _SPINUNLOCK(&mutexp->lock); 1443 error = _thread_suspend(curthread, NULL); 1444 if (error != 0 && error != EAGAIN && error != EINTR) 1445 PANIC("Cannot suspend on mutex."); 1446 1447 _SPINLOCK(&mutexp->lock); 1448 _thread_critical_enter(curthread); 1449 } while ((curthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0); 1450 1451 _thread_critical_exit(curthread); 1452 } 1453