1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by John Birrell. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 #include <stdlib.h> 35 #include <errno.h> 36 #include <string.h> 37 #include <sys/param.h> 38 #include <sys/queue.h> 39 #include <pthread.h> 40 #include <time.h> 41 #include "thr_private.h" 42 43 #if defined(_PTHREADS_INVARIANTS) 44 #define _MUTEX_INIT_LINK(m) do { \ 45 (m)->m_qe.tqe_prev = NULL; \ 46 (m)->m_qe.tqe_next = NULL; \ 47 } while (0) 48 #define _MUTEX_ASSERT_IS_OWNED(m) do { \ 49 if ((m)->m_qe.tqe_prev == NULL) \ 50 PANIC("mutex is not on list"); \ 51 } while (0) 52 #define _MUTEX_ASSERT_NOT_OWNED(m) do { \ 53 if (((m)->m_qe.tqe_prev != NULL) || \ 54 ((m)->m_qe.tqe_next != NULL)) \ 55 PANIC("mutex is on list"); \ 56 } while (0) 57 #else 58 #define _MUTEX_INIT_LINK(m) 59 #define _MUTEX_ASSERT_IS_OWNED(m) 60 #define _MUTEX_ASSERT_NOT_OWNED(m) 61 #endif 62 63 64 /* 65 * Prototypes 66 */ 67 static int get_muncontested(pthread_mutex_t, int); 68 static int get_mcontested(pthread_mutex_t, 69 const struct timespec *); 70 static int mutex_init(pthread_mutex_t *, int); 71 static int mutex_lock_common(pthread_mutex_t *, int, 72 const struct timespec *); 73 static inline int mutex_self_trylock(pthread_mutex_t); 74 static inline int mutex_self_lock(pthread_mutex_t); 75 static inline int mutex_unlock_common(pthread_mutex_t *, int); 76 static void mutex_priority_adjust(pthread_mutex_t); 77 static void mutex_rescan_owned (pthread_t, pthread_mutex_t); 78 static inline pthread_t mutex_queue_deq(pthread_mutex_t); 79 static inline void mutex_queue_remove(pthread_mutex_t, pthread_t); 80 static inline void mutex_queue_enq(pthread_mutex_t, pthread_t); 81 82 83 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER; 84 85 static struct pthread_mutex_attr static_mutex_attr = 86 PTHREAD_MUTEXATTR_STATIC_INITIALIZER; 87 static pthread_mutexattr_t static_mattr = &static_mutex_attr; 88 89 /* Single underscore versions provided for libc internal usage: */ 90 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 91 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 92 __weak_reference(__pthread_mutex_unlock, pthread_mutex_unlock); 93 94 /* No difference between libc and application usage of these: */ 95 __weak_reference(_pthread_mutex_init, pthread_mutex_init); 96 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 97 __weak_reference(_pthread_mutex_timedlock, pthread_mutex_timedlock); 98 99 100 /* 101 * Reinitialize a private mutex; this is only used for internal mutexes. 102 */ 103 int 104 _mutex_reinit(pthread_mutex_t * mutex) 105 { 106 int ret = 0; 107 108 if (mutex == NULL) 109 ret = EINVAL; 110 else if (*mutex == PTHREAD_MUTEX_INITIALIZER) 111 ret = _pthread_mutex_init(mutex, NULL); 112 else { 113 /* 114 * Initialize the mutex structure: 115 */ 116 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT; 117 (*mutex)->m_protocol = PTHREAD_PRIO_NONE; 118 TAILQ_INIT(&(*mutex)->m_queue); 119 (*mutex)->m_owner = NULL; 120 (*mutex)->m_data.m_count = 0; 121 (*mutex)->m_flags |= MUTEX_FLAGS_INITED | MUTEX_FLAGS_PRIVATE; 122 (*mutex)->m_refcount = 0; 123 (*mutex)->m_prio = 0; 124 (*mutex)->m_saved_prio = 0; 125 _MUTEX_INIT_LINK(*mutex); 126 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock)); 127 } 128 return (ret); 129 } 130 131 int 132 _pthread_mutex_init(pthread_mutex_t * mutex, 133 const pthread_mutexattr_t * mutex_attr) 134 { 135 struct pthread_mutex_attr default_attr = {PTHREAD_MUTEX_ERRORCHECK, 136 PTHREAD_PRIO_NONE, PTHREAD_MAX_PRIORITY, 0 }; 137 struct pthread_mutex_attr *attr; 138 139 if (mutex_attr == NULL) { 140 attr = &default_attr; 141 } else { 142 /* 143 * Check that the given mutex attribute is valid. 144 */ 145 if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || 146 ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX)) 147 return (EINVAL); 148 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || 149 ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE)) 150 return (EINVAL); 151 attr = *mutex_attr; 152 } 153 if ((*mutex = 154 (pthread_mutex_t)malloc(sizeof(struct pthread_mutex))) == NULL) 155 return (ENOMEM); 156 memset((void *)(*mutex), 0, sizeof(struct pthread_mutex)); 157 158 /* Initialise the rest of the mutex: */ 159 TAILQ_INIT(&(*mutex)->m_queue); 160 _MUTEX_INIT_LINK(*mutex); 161 (*mutex)->m_protocol = attr->m_protocol; 162 (*mutex)->m_flags = (attr->m_flags | MUTEX_FLAGS_INITED); 163 (*mutex)->m_type = attr->m_type; 164 if ((*mutex)->m_protocol == PTHREAD_PRIO_PROTECT) 165 (*mutex)->m_prio = attr->m_ceiling; 166 return (0); 167 } 168 169 int 170 _pthread_mutex_destroy(pthread_mutex_t * mutex) 171 { 172 if (mutex == NULL) 173 return (EINVAL); 174 175 /* 176 * If this mutex was statically initialized, don't bother 177 * initializing it in order to destroy it immediately. 178 */ 179 if (*mutex == PTHREAD_MUTEX_INITIALIZER) 180 return (0); 181 182 /* Lock the mutex structure: */ 183 _SPINLOCK(&(*mutex)->lock); 184 185 /* 186 * Check to see if this mutex is in use: 187 */ 188 if (((*mutex)->m_owner != NULL) || 189 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || 190 ((*mutex)->m_refcount != 0)) { 191 /* Unlock the mutex structure: */ 192 _SPINUNLOCK(&(*mutex)->lock); 193 return (EBUSY); 194 } 195 196 /* 197 * Free the memory allocated for the mutex 198 * structure: 199 */ 200 _MUTEX_ASSERT_NOT_OWNED(*mutex); 201 _SPINUNLOCK(&(*mutex)->lock); 202 free(*mutex); 203 204 /* 205 * Leave the caller's pointer NULL now that 206 * the mutex has been destroyed: 207 */ 208 *mutex = NULL; 209 210 return (0); 211 } 212 213 static int 214 mutex_init(pthread_mutex_t *mutex, int private) 215 { 216 pthread_mutexattr_t *pma; 217 int error; 218 219 error = 0; 220 pma = private ? &static_mattr : NULL; 221 _SPINLOCK(&static_init_lock); 222 if (*mutex == PTHREAD_MUTEX_INITIALIZER) 223 error = _pthread_mutex_init(mutex, pma); 224 _SPINUNLOCK(&static_init_lock); 225 return (error); 226 } 227 228 int 229 __pthread_mutex_trylock(pthread_mutex_t *mutex) 230 { 231 int ret = 0; 232 233 if (mutex == NULL) 234 ret = EINVAL; 235 236 /* 237 * If the mutex is statically initialized, perform the dynamic 238 * initialization: 239 */ 240 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 241 (ret = mutex_init(mutex, 0)) == 0) 242 ret = mutex_lock_common(mutex, 1, NULL); 243 244 return (ret); 245 } 246 247 /* 248 * Libc internal. 249 */ 250 int 251 _pthread_mutex_trylock(pthread_mutex_t *mutex) 252 { 253 int ret = 0; 254 255 _thread_sigblock(); 256 257 if (mutex == NULL) 258 ret = EINVAL; 259 260 /* 261 * If the mutex is statically initialized, perform the dynamic 262 * initialization marking the mutex private (delete safe): 263 */ 264 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 265 (ret = mutex_init(mutex, 1)) == 0) 266 ret = mutex_lock_common(mutex, 1, NULL); 267 268 if (ret != 0) 269 _thread_sigunblock(); 270 271 return (ret); 272 } 273 274 static int 275 mutex_lock_common(pthread_mutex_t * mutex, int nonblock, 276 const struct timespec *abstime) 277 { 278 int ret, error, inCancel; 279 280 ret = error = inCancel = 0; 281 282 PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL), 283 "Uninitialized mutex in mutex_lock_common"); 284 285 /* 286 * Enter a loop waiting to become the mutex owner. We need a 287 * loop in case the waiting thread is interrupted by a signal 288 * to execute a signal handler. It is not (currently) possible 289 * to remain in the waiting queue while running a handler. 290 * Instead, the thread is interrupted and backed out of the 291 * waiting queue prior to executing the signal handler. 292 */ 293 do { 294 /* 295 * Defer signals to protect the scheduling queues from 296 * access by the signal handler: 297 */ 298 /* _thread_kern_sig_defer(); */ 299 300 /* Lock the mutex structure: */ 301 _SPINLOCK(&(*mutex)->lock); 302 303 /* 304 * If the mutex was statically allocated, properly 305 * initialize the tail queue. 306 */ 307 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { 308 TAILQ_INIT(&(*mutex)->m_queue); 309 (*mutex)->m_flags |= MUTEX_FLAGS_INITED; 310 _MUTEX_INIT_LINK(*mutex); 311 } 312 313 /* Process according to mutex type: */ 314 switch ((*mutex)->m_protocol) { 315 /* Default POSIX mutex: */ 316 case PTHREAD_PRIO_NONE: 317 if ((error = get_muncontested(*mutex, nonblock)) == -1) 318 if (nonblock) { 319 ret = EBUSY; 320 break; 321 } else { 322 error = get_mcontested(*mutex, abstime); 323 ret = (error != EINTR) ? error : ret; 324 } 325 else 326 ret = error; 327 break; 328 329 /* POSIX priority inheritence mutex: */ 330 case PTHREAD_PRIO_INHERIT: 331 if ((error = get_muncontested(*mutex, nonblock)) == 0) { 332 /* Track number of priority mutexes owned: */ 333 curthread->priority_mutex_count++; 334 335 /* 336 * The mutex takes on attributes of the 337 * running thread when there are no waiters. 338 */ 339 (*mutex)->m_prio = curthread->active_priority; 340 (*mutex)->m_saved_prio = 341 curthread->inherited_priority; 342 curthread->inherited_priority = 343 (*mutex)->m_prio; 344 } else if (error == -1) { 345 if (nonblock) { 346 ret = EBUSY; 347 break; 348 } else { 349 error = get_mcontested(*mutex, abstime); 350 ret = (error != EINTR) ? error : ret; 351 } 352 if (error == 0) { 353 if (curthread->active_priority > 354 (*mutex)->m_prio) 355 /* Adjust priorities: */ 356 mutex_priority_adjust(*mutex); 357 } else if (error == ETIMEDOUT) { 358 /* XXX - mutex priorities don't work */ 359 } 360 } else { 361 ret = error; 362 } 363 break; 364 365 /* POSIX priority protection mutex: */ 366 case PTHREAD_PRIO_PROTECT: 367 /* Check for a priority ceiling violation: */ 368 if (curthread->active_priority > (*mutex)->m_prio) 369 ret = EINVAL; 370 371 if ((error = get_muncontested(*mutex, nonblock)) == 0) { 372 /* Track number of priority mutexes owned: */ 373 curthread->priority_mutex_count++; 374 375 /* 376 * The running thread inherits the ceiling 377 * priority of the mutex and executes at that 378 * priority: 379 */ 380 curthread->active_priority = (*mutex)->m_prio; 381 (*mutex)->m_saved_prio = 382 curthread->inherited_priority; 383 curthread->inherited_priority = 384 (*mutex)->m_prio; 385 } else if (error == -1) { 386 if (nonblock) { 387 ret = EBUSY; 388 break; 389 } 390 391 /* Clear any previous error: */ 392 curthread->error = 0; 393 394 error = get_mcontested(*mutex, abstime); 395 ret = (error != EINTR) ? error : ret; 396 397 /* 398 * The threads priority may have changed while 399 * waiting for the mutex causing a ceiling 400 * violation. 401 */ 402 if (error == 0) { 403 ret = curthread->error; 404 curthread->error = 0; 405 } 406 } else { 407 ret = error; 408 } 409 break; 410 411 /* Trap invalid mutex types: */ 412 default: 413 /* Return an invalid argument error: */ 414 ret = EINVAL; 415 break; 416 } 417 418 /* 419 * Check to see if this thread was interrupted and 420 * is still in the mutex queue of waiting threads: 421 */ 422 if (curthread->cancelflags & PTHREAD_CANCELLING) { 423 if (!nonblock) 424 mutex_queue_remove(*mutex, curthread); 425 inCancel=1; 426 } 427 428 /* Unlock the mutex structure: */ 429 _SPINUNLOCK(&(*mutex)->lock); 430 431 /* 432 * Undefer and handle pending signals, yielding if 433 * necessary: 434 */ 435 /* _thread_kern_sig_undefer(); */ 436 if (inCancel) { 437 pthread_testcancel(); 438 PANIC("Canceled thread came back.\n"); 439 } 440 } while ((*mutex)->m_owner != curthread && ret == 0); 441 442 /* Return the completion status: */ 443 return (ret); 444 } 445 446 int 447 __pthread_mutex_lock(pthread_mutex_t *mutex) 448 { 449 int ret = 0; 450 451 if (_thread_initial == NULL) 452 _thread_init(); 453 454 if (mutex == NULL) 455 ret = EINVAL; 456 457 /* 458 * If the mutex is statically initialized, perform the dynamic 459 * initialization: 460 */ 461 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 462 ((ret = mutex_init(mutex, 0)) == 0)) 463 ret = mutex_lock_common(mutex, 0, NULL); 464 465 return (ret); 466 } 467 468 /* 469 * Libc internal. 470 */ 471 int 472 _pthread_mutex_lock(pthread_mutex_t *mutex) 473 { 474 int ret = 0; 475 476 if (_thread_initial == NULL) 477 _thread_init(); 478 479 _thread_sigblock(); 480 481 if (mutex == NULL) 482 ret = EINVAL; 483 484 /* 485 * If the mutex is statically initialized, perform the dynamic 486 * initialization marking it private (delete safe): 487 */ 488 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 489 ((ret = mutex_init(mutex, 1)) == 0)) 490 ret = mutex_lock_common(mutex, 0, NULL); 491 492 if (ret != 0) 493 _thread_sigunblock(); 494 495 return (ret); 496 } 497 498 int 499 _pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime) 500 { 501 int error; 502 503 error = 0; 504 if (_thread_initial == NULL) 505 _thread_init(); 506 507 /* 508 * Initialize it if it's a valid statically inited mutex. 509 */ 510 if (mutex == NULL) 511 error = EINVAL; 512 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 513 ((error = mutex_init(mutex, 0)) == 0)) 514 error = mutex_lock_common(mutex, 0, abstime); 515 516 PTHREAD_ASSERT(error != EINTR, "According to SUSv3 this function shall not return an error code of EINTR"); 517 return (error); 518 } 519 520 int 521 __pthread_mutex_unlock(pthread_mutex_t * mutex) 522 { 523 return (mutex_unlock_common(mutex, /* add reference */ 0)); 524 } 525 526 /* 527 * Libc internal 528 */ 529 int 530 _pthread_mutex_unlock(pthread_mutex_t * mutex) 531 { 532 int error; 533 if ((error = mutex_unlock_common(mutex, /* add reference */ 0)) == 0) 534 _thread_sigunblock(); 535 return (error); 536 } 537 538 int 539 _mutex_cv_unlock(pthread_mutex_t * mutex) 540 { 541 return (mutex_unlock_common(mutex, /* add reference */ 1)); 542 } 543 544 int 545 _mutex_cv_lock(pthread_mutex_t * mutex) 546 { 547 int ret; 548 if ((ret = _pthread_mutex_lock(mutex)) == 0) 549 (*mutex)->m_refcount--; 550 return (ret); 551 } 552 553 static inline int 554 mutex_self_trylock(pthread_mutex_t mutex) 555 { 556 int ret = 0; 557 558 switch (mutex->m_type) { 559 560 /* case PTHREAD_MUTEX_DEFAULT: */ 561 case PTHREAD_MUTEX_ERRORCHECK: 562 case PTHREAD_MUTEX_NORMAL: 563 /* 564 * POSIX specifies that mutexes should return EDEADLK if a 565 * recursive lock is detected. 566 */ 567 ret = EBUSY; 568 break; 569 570 case PTHREAD_MUTEX_RECURSIVE: 571 /* Increment the lock count: */ 572 mutex->m_data.m_count++; 573 break; 574 575 default: 576 /* Trap invalid mutex types; */ 577 ret = EINVAL; 578 } 579 580 return (ret); 581 } 582 583 static inline int 584 mutex_self_lock(pthread_mutex_t mutex) 585 { 586 int ret = 0; 587 588 switch (mutex->m_type) { 589 /* case PTHREAD_MUTEX_DEFAULT: */ 590 case PTHREAD_MUTEX_ERRORCHECK: 591 /* 592 * POSIX specifies that mutexes should return EDEADLK if a 593 * recursive lock is detected. 594 */ 595 ret = EDEADLK; 596 break; 597 598 case PTHREAD_MUTEX_NORMAL: 599 /* 600 * What SS2 define as a 'normal' mutex. Intentionally 601 * deadlock on attempts to get a lock you already own. 602 */ 603 /* XXX Sched lock. */ 604 PTHREAD_SET_STATE(curthread, PS_DEADLOCK); 605 _SPINUNLOCK(&(mutex)->lock); 606 _thread_suspend(curthread, NULL); 607 PANIC("Shouldn't resume here?\n"); 608 break; 609 610 case PTHREAD_MUTEX_RECURSIVE: 611 /* Increment the lock count: */ 612 mutex->m_data.m_count++; 613 break; 614 615 default: 616 /* Trap invalid mutex types; */ 617 ret = EINVAL; 618 } 619 620 return (ret); 621 } 622 623 static inline int 624 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference) 625 { 626 int ret = 0; 627 628 if (mutex == NULL || *mutex == NULL) { 629 ret = EINVAL; 630 } else { 631 /* 632 * Defer signals to protect the scheduling queues from 633 * access by the signal handler: 634 */ 635 /* _thread_kern_sig_defer(); */ 636 637 /* Lock the mutex structure: */ 638 _SPINLOCK(&(*mutex)->lock); 639 640 /* Process according to mutex type: */ 641 switch ((*mutex)->m_protocol) { 642 /* Default POSIX mutex: */ 643 case PTHREAD_PRIO_NONE: 644 /* 645 * Check if the running thread is not the owner of the 646 * mutex: 647 */ 648 if ((*mutex)->m_owner != curthread) { 649 /* 650 * Return an invalid argument error for no 651 * owner and a permission error otherwise: 652 */ 653 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; 654 } 655 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && 656 ((*mutex)->m_data.m_count > 0)) { 657 /* Decrement the count: */ 658 (*mutex)->m_data.m_count--; 659 } else { 660 /* 661 * Clear the count in case this is recursive 662 * mutex. 663 */ 664 (*mutex)->m_data.m_count = 0; 665 666 /* Remove the mutex from the threads queue. */ 667 _MUTEX_ASSERT_IS_OWNED(*mutex); 668 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, 669 (*mutex), m_qe); 670 _MUTEX_INIT_LINK(*mutex); 671 672 /* 673 * Get the next thread from the queue of 674 * threads waiting on the mutex. The deq 675 * function will have already locked it 676 * for us. 677 */ 678 if (((*mutex)->m_owner = 679 mutex_queue_deq(*mutex)) != NULL) { 680 /* Make the new owner runnable: */ 681 /* XXXTHR sched lock. */ 682 PTHREAD_NEW_STATE((*mutex)->m_owner, 683 PS_RUNNING); 684 685 /* 686 * Add the mutex to the threads list of 687 * owned mutexes: 688 */ 689 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, 690 (*mutex), m_qe); 691 692 /* 693 * The owner is no longer waiting for 694 * this mutex: 695 */ 696 (*mutex)->m_owner->data.mutex = NULL; 697 _thread_critical_exit((*mutex)->m_owner); 698 } 699 } 700 break; 701 702 /* POSIX priority inheritence mutex: */ 703 case PTHREAD_PRIO_INHERIT: 704 /* 705 * Check if the running thread is not the owner of the 706 * mutex: 707 */ 708 if ((*mutex)->m_owner != curthread) { 709 /* 710 * Return an invalid argument error for no 711 * owner and a permission error otherwise: 712 */ 713 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; 714 } 715 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && 716 ((*mutex)->m_data.m_count > 0)) { 717 /* Decrement the count: */ 718 (*mutex)->m_data.m_count--; 719 } else { 720 /* 721 * Clear the count in case this is recursive 722 * mutex. 723 */ 724 (*mutex)->m_data.m_count = 0; 725 726 /* 727 * Restore the threads inherited priority and 728 * recompute the active priority (being careful 729 * not to override changes in the threads base 730 * priority subsequent to locking the mutex). 731 */ 732 curthread->inherited_priority = 733 (*mutex)->m_saved_prio; 734 curthread->active_priority = 735 MAX(curthread->inherited_priority, 736 curthread->base_priority); 737 738 /* 739 * This thread now owns one less priority mutex. 740 */ 741 curthread->priority_mutex_count--; 742 743 /* Remove the mutex from the threads queue. */ 744 _MUTEX_ASSERT_IS_OWNED(*mutex); 745 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, 746 (*mutex), m_qe); 747 _MUTEX_INIT_LINK(*mutex); 748 749 /* 750 * Get the next thread from the queue of threads 751 * waiting on the mutex. It will already be 752 * locked for us. 753 */ 754 if (((*mutex)->m_owner = 755 mutex_queue_deq(*mutex)) == NULL) 756 /* This mutex has no priority. */ 757 (*mutex)->m_prio = 0; 758 else { 759 /* 760 * Track number of priority mutexes owned: 761 */ 762 (*mutex)->m_owner->priority_mutex_count++; 763 764 /* 765 * Add the mutex to the threads list 766 * of owned mutexes: 767 */ 768 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, 769 (*mutex), m_qe); 770 771 /* 772 * The owner is no longer waiting for 773 * this mutex: 774 */ 775 (*mutex)->m_owner->data.mutex = NULL; 776 777 /* 778 * Set the priority of the mutex. Since 779 * our waiting threads are in descending 780 * priority order, the priority of the 781 * mutex becomes the active priority of 782 * the thread we just dequeued. 783 */ 784 (*mutex)->m_prio = 785 (*mutex)->m_owner->active_priority; 786 787 /* 788 * Save the owning threads inherited 789 * priority: 790 */ 791 (*mutex)->m_saved_prio = 792 (*mutex)->m_owner->inherited_priority; 793 794 /* 795 * The owning threads inherited priority 796 * now becomes his active priority (the 797 * priority of the mutex). 798 */ 799 (*mutex)->m_owner->inherited_priority = 800 (*mutex)->m_prio; 801 802 /* 803 * Make the new owner runnable: 804 */ 805 /* XXXTHR sched lock. */ 806 PTHREAD_NEW_STATE((*mutex)->m_owner, 807 PS_RUNNING); 808 809 _thread_critical_exit((*mutex)->m_owner); 810 } 811 } 812 break; 813 814 /* POSIX priority ceiling mutex: */ 815 case PTHREAD_PRIO_PROTECT: 816 /* 817 * Check if the running thread is not the owner of the 818 * mutex: 819 */ 820 if ((*mutex)->m_owner != curthread) { 821 /* 822 * Return an invalid argument error for no 823 * owner and a permission error otherwise: 824 */ 825 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; 826 } 827 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && 828 ((*mutex)->m_data.m_count > 0)) { 829 /* Decrement the count: */ 830 (*mutex)->m_data.m_count--; 831 } else { 832 /* 833 * Clear the count in case this is recursive 834 * mutex. 835 */ 836 (*mutex)->m_data.m_count = 0; 837 838 /* 839 * Restore the threads inherited priority and 840 * recompute the active priority (being careful 841 * not to override changes in the threads base 842 * priority subsequent to locking the mutex). 843 */ 844 curthread->inherited_priority = 845 (*mutex)->m_saved_prio; 846 curthread->active_priority = 847 MAX(curthread->inherited_priority, 848 curthread->base_priority); 849 850 /* 851 * This thread now owns one less priority mutex. 852 */ 853 curthread->priority_mutex_count--; 854 855 /* Remove the mutex from the threads queue. */ 856 _MUTEX_ASSERT_IS_OWNED(*mutex); 857 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, 858 (*mutex), m_qe); 859 _MUTEX_INIT_LINK(*mutex); 860 861 /* 862 * Enter a loop to find a waiting thread whose 863 * active priority will not cause a ceiling 864 * violation. It will already be locked for us. 865 */ 866 while ((((*mutex)->m_owner = 867 mutex_queue_deq(*mutex)) != NULL) && 868 ((*mutex)->m_owner->active_priority > 869 (*mutex)->m_prio)) { 870 /* 871 * Either the mutex ceiling priority 872 * been lowered and/or this threads 873 * priority has been raised subsequent 874 * to this thread being queued on the 875 * waiting list. 876 */ 877 (*mutex)->m_owner->error = EINVAL; 878 PTHREAD_NEW_STATE((*mutex)->m_owner, 879 PS_RUNNING); 880 /* 881 * The thread is no longer waiting for 882 * this mutex: 883 */ 884 (*mutex)->m_owner->data.mutex = NULL; 885 886 _thread_critical_exit((*mutex)->m_owner); 887 } 888 889 /* Check for a new owner: */ 890 if ((*mutex)->m_owner != NULL) { 891 /* 892 * Track number of priority mutexes owned: 893 */ 894 (*mutex)->m_owner->priority_mutex_count++; 895 896 /* 897 * Add the mutex to the threads list 898 * of owned mutexes: 899 */ 900 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, 901 (*mutex), m_qe); 902 903 /* 904 * The owner is no longer waiting for 905 * this mutex: 906 */ 907 (*mutex)->m_owner->data.mutex = NULL; 908 909 /* 910 * Save the owning threads inherited 911 * priority: 912 */ 913 (*mutex)->m_saved_prio = 914 (*mutex)->m_owner->inherited_priority; 915 916 /* 917 * The owning thread inherits the 918 * ceiling priority of the mutex and 919 * executes at that priority: 920 */ 921 (*mutex)->m_owner->inherited_priority = 922 (*mutex)->m_prio; 923 (*mutex)->m_owner->active_priority = 924 (*mutex)->m_prio; 925 926 /* 927 * Make the new owner runnable: 928 */ 929 /* XXXTHR sched lock. */ 930 PTHREAD_NEW_STATE((*mutex)->m_owner, 931 PS_RUNNING); 932 933 _thread_critical_exit((*mutex)->m_owner); 934 } 935 } 936 break; 937 938 /* Trap invalid mutex types: */ 939 default: 940 /* Return an invalid argument error: */ 941 ret = EINVAL; 942 break; 943 } 944 945 if ((ret == 0) && (add_reference != 0)) { 946 /* Increment the reference count: */ 947 (*mutex)->m_refcount++; 948 } 949 950 /* Unlock the mutex structure: */ 951 _SPINUNLOCK(&(*mutex)->lock); 952 953 /* 954 * Undefer and handle pending signals, yielding if 955 * necessary: 956 */ 957 /* _thread_kern_sig_undefer(); */ 958 } 959 960 /* Return the completion status: */ 961 return (ret); 962 } 963 964 965 /* 966 * This function is called when a change in base priority occurs for 967 * a thread that is holding or waiting for a priority protection or 968 * inheritence mutex. A change in a threads base priority can effect 969 * changes to active priorities of other threads and to the ordering 970 * of mutex locking by waiting threads. 971 * 972 * This must be called while thread scheduling is deferred. 973 */ 974 void 975 _mutex_notify_priochange(pthread_t pthread) 976 { 977 /* Adjust the priorites of any owned priority mutexes: */ 978 if (pthread->priority_mutex_count > 0) { 979 /* 980 * Rescan the mutexes owned by this thread and correct 981 * their priorities to account for this threads change 982 * in priority. This has the side effect of changing 983 * the threads active priority. 984 */ 985 mutex_rescan_owned(pthread, /* rescan all owned */ NULL); 986 } 987 988 /* 989 * If this thread is waiting on a priority inheritence mutex, 990 * check for priority adjustments. A change in priority can 991 * also effect a ceiling violation(*) for a thread waiting on 992 * a priority protection mutex; we don't perform the check here 993 * as it is done in pthread_mutex_unlock. 994 * 995 * (*) It should be noted that a priority change to a thread 996 * _after_ taking and owning a priority ceiling mutex 997 * does not affect ownership of that mutex; the ceiling 998 * priority is only checked before mutex ownership occurs. 999 */ 1000 if (pthread->state == PS_MUTEX_WAIT) { 1001 /* Lock the mutex structure: */ 1002 _SPINLOCK(&pthread->data.mutex->lock); 1003 1004 /* 1005 * Check to make sure this thread is still in the same state 1006 * (the spinlock above can yield the CPU to another thread): 1007 */ 1008 if (pthread->state == PS_MUTEX_WAIT) { 1009 /* 1010 * Remove and reinsert this thread into the list of 1011 * waiting threads to preserve decreasing priority 1012 * order. 1013 */ 1014 mutex_queue_remove(pthread->data.mutex, pthread); 1015 mutex_queue_enq(pthread->data.mutex, pthread); 1016 1017 if (pthread->data.mutex->m_protocol == 1018 PTHREAD_PRIO_INHERIT) { 1019 /* Adjust priorities: */ 1020 mutex_priority_adjust(pthread->data.mutex); 1021 } 1022 } 1023 1024 /* Unlock the mutex structure: */ 1025 _SPINUNLOCK(&pthread->data.mutex->lock); 1026 } 1027 } 1028 1029 /* 1030 * Called when a new thread is added to the mutex waiting queue or 1031 * when a threads priority changes that is already in the mutex 1032 * waiting queue. 1033 */ 1034 static void 1035 mutex_priority_adjust(pthread_mutex_t mutex) 1036 { 1037 pthread_t pthread_next, pthread = mutex->m_owner; 1038 int temp_prio; 1039 pthread_mutex_t m = mutex; 1040 1041 /* 1042 * Calculate the mutex priority as the maximum of the highest 1043 * active priority of any waiting threads and the owning threads 1044 * active priority(*). 1045 * 1046 * (*) Because the owning threads current active priority may 1047 * reflect priority inherited from this mutex (and the mutex 1048 * priority may have changed) we must recalculate the active 1049 * priority based on the threads saved inherited priority 1050 * and its base priority. 1051 */ 1052 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */ 1053 temp_prio = MAX(pthread_next->active_priority, 1054 MAX(m->m_saved_prio, pthread->base_priority)); 1055 1056 /* See if this mutex really needs adjusting: */ 1057 if (temp_prio == m->m_prio) 1058 /* No need to propagate the priority: */ 1059 return; 1060 1061 /* Set new priority of the mutex: */ 1062 m->m_prio = temp_prio; 1063 1064 while (m != NULL) { 1065 /* 1066 * Save the threads priority before rescanning the 1067 * owned mutexes: 1068 */ 1069 temp_prio = pthread->active_priority; 1070 1071 /* 1072 * Fix the priorities for all the mutexes this thread has 1073 * locked since taking this mutex. This also has a 1074 * potential side-effect of changing the threads priority. 1075 */ 1076 mutex_rescan_owned(pthread, m); 1077 1078 /* 1079 * If the thread is currently waiting on a mutex, check 1080 * to see if the threads new priority has affected the 1081 * priority of the mutex. 1082 */ 1083 if ((temp_prio != pthread->active_priority) && 1084 (pthread->state == PS_MUTEX_WAIT) && 1085 (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) { 1086 /* Grab the mutex this thread is waiting on: */ 1087 m = pthread->data.mutex; 1088 1089 /* 1090 * The priority for this thread has changed. Remove 1091 * and reinsert this thread into the list of waiting 1092 * threads to preserve decreasing priority order. 1093 */ 1094 mutex_queue_remove(m, pthread); 1095 mutex_queue_enq(m, pthread); 1096 1097 /* Grab the waiting thread with highest priority: */ 1098 pthread_next = TAILQ_FIRST(&m->m_queue); 1099 1100 /* 1101 * Calculate the mutex priority as the maximum of the 1102 * highest active priority of any waiting threads and 1103 * the owning threads active priority. 1104 */ 1105 temp_prio = MAX(pthread_next->active_priority, 1106 MAX(m->m_saved_prio, m->m_owner->base_priority)); 1107 1108 if (temp_prio != m->m_prio) { 1109 /* 1110 * The priority needs to be propagated to the 1111 * mutex this thread is waiting on and up to 1112 * the owner of that mutex. 1113 */ 1114 m->m_prio = temp_prio; 1115 pthread = m->m_owner; 1116 } 1117 else 1118 /* We're done: */ 1119 m = NULL; 1120 1121 } 1122 else 1123 /* We're done: */ 1124 m = NULL; 1125 } 1126 } 1127 1128 static void 1129 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex) 1130 { 1131 int active_prio, inherited_prio; 1132 pthread_mutex_t m; 1133 pthread_t pthread_next; 1134 1135 /* 1136 * Start walking the mutexes the thread has taken since 1137 * taking this mutex. 1138 */ 1139 if (mutex == NULL) { 1140 /* 1141 * A null mutex means start at the beginning of the owned 1142 * mutex list. 1143 */ 1144 m = TAILQ_FIRST(&pthread->mutexq); 1145 1146 /* There is no inherited priority yet. */ 1147 inherited_prio = 0; 1148 } 1149 else { 1150 /* 1151 * The caller wants to start after a specific mutex. It 1152 * is assumed that this mutex is a priority inheritence 1153 * mutex and that its priority has been correctly 1154 * calculated. 1155 */ 1156 m = TAILQ_NEXT(mutex, m_qe); 1157 1158 /* Start inheriting priority from the specified mutex. */ 1159 inherited_prio = mutex->m_prio; 1160 } 1161 active_prio = MAX(inherited_prio, pthread->base_priority); 1162 1163 while (m != NULL) { 1164 /* 1165 * We only want to deal with priority inheritence 1166 * mutexes. This might be optimized by only placing 1167 * priority inheritence mutexes into the owned mutex 1168 * list, but it may prove to be useful having all 1169 * owned mutexes in this list. Consider a thread 1170 * exiting while holding mutexes... 1171 */ 1172 if (m->m_protocol == PTHREAD_PRIO_INHERIT) { 1173 /* 1174 * Fix the owners saved (inherited) priority to 1175 * reflect the priority of the previous mutex. 1176 */ 1177 m->m_saved_prio = inherited_prio; 1178 1179 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL) 1180 /* Recalculate the priority of the mutex: */ 1181 m->m_prio = MAX(active_prio, 1182 pthread_next->active_priority); 1183 else 1184 m->m_prio = active_prio; 1185 1186 /* Recalculate new inherited and active priorities: */ 1187 inherited_prio = m->m_prio; 1188 active_prio = MAX(m->m_prio, pthread->base_priority); 1189 } 1190 1191 /* Advance to the next mutex owned by this thread: */ 1192 m = TAILQ_NEXT(m, m_qe); 1193 } 1194 1195 /* 1196 * Fix the threads inherited priority and recalculate its 1197 * active priority. 1198 */ 1199 pthread->inherited_priority = inherited_prio; 1200 active_prio = MAX(inherited_prio, pthread->base_priority); 1201 1202 if (active_prio != pthread->active_priority) { 1203 #if 0 1204 /* 1205 * If this thread is in the priority queue, it must be 1206 * removed and reinserted for its new priority. 1207 */ 1208 if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) { 1209 /* 1210 * Remove the thread from the priority queue 1211 * before changing its priority: 1212 */ 1213 PTHREAD_PRIOQ_REMOVE(pthread); 1214 1215 /* 1216 * POSIX states that if the priority is being 1217 * lowered, the thread must be inserted at the 1218 * head of the queue for its priority if it owns 1219 * any priority protection or inheritence mutexes. 1220 */ 1221 if ((active_prio < pthread->active_priority) && 1222 (pthread->priority_mutex_count > 0)) { 1223 /* Set the new active priority. */ 1224 pthread->active_priority = active_prio; 1225 1226 PTHREAD_PRIOQ_INSERT_HEAD(pthread); 1227 } 1228 else { 1229 /* Set the new active priority. */ 1230 pthread->active_priority = active_prio; 1231 1232 PTHREAD_PRIOQ_INSERT_TAIL(pthread); 1233 } 1234 } 1235 else { 1236 /* Set the new active priority. */ 1237 pthread->active_priority = active_prio; 1238 } 1239 #endif 1240 pthread->active_priority = active_prio; 1241 } 1242 } 1243 1244 void 1245 _mutex_unlock_private(pthread_t pthread) 1246 { 1247 struct pthread_mutex *m, *m_next; 1248 1249 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { 1250 m_next = TAILQ_NEXT(m, m_qe); 1251 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 1252 _pthread_mutex_unlock(&m); 1253 } 1254 } 1255 1256 void 1257 _mutex_lock_backout(pthread_t pthread) 1258 { 1259 struct pthread_mutex *mutex; 1260 1261 mutex = pthread->data.mutex; 1262 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { 1263 1264 mutex_queue_remove(mutex, pthread); 1265 1266 /* This thread is no longer waiting for the mutex: */ 1267 pthread->data.mutex = NULL; 1268 1269 } 1270 } 1271 1272 /* 1273 * Dequeue a waiting thread from the head of a mutex queue in descending 1274 * priority order. This funtion will return with the thread locked. 1275 */ 1276 static inline pthread_t 1277 mutex_queue_deq(pthread_mutex_t mutex) 1278 { 1279 pthread_t pthread; 1280 1281 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { 1282 _thread_critical_enter(pthread); 1283 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1284 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; 1285 1286 /* 1287 * Only exit the loop if the thread hasn't been 1288 * cancelled. 1289 */ 1290 if ((pthread->cancelflags & PTHREAD_CANCELLING) == 0 && 1291 pthread->state == PS_MUTEX_WAIT) 1292 break; 1293 else 1294 _thread_critical_exit(pthread); 1295 } 1296 1297 return (pthread); 1298 } 1299 1300 /* 1301 * Remove a waiting thread from a mutex queue in descending priority order. 1302 */ 1303 static inline void 1304 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) 1305 { 1306 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { 1307 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1308 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; 1309 } 1310 } 1311 1312 /* 1313 * Enqueue a waiting thread to a queue in descending priority order. 1314 */ 1315 static inline void 1316 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) 1317 { 1318 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); 1319 char *name; 1320 1321 name = pthread->name ? pthread->name : "unknown"; 1322 if ((pthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0) 1323 _thread_printf(2, "Thread (%s:%u) already on condq\n", 1324 pthread->name, pthread->uniqueid); 1325 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) 1326 _thread_printf(2, "Thread (%s:%u) already on mutexq\n", 1327 pthread->name, pthread->uniqueid); 1328 PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread); 1329 /* 1330 * For the common case of all threads having equal priority, 1331 * we perform a quick check against the priority of the thread 1332 * at the tail of the queue. 1333 */ 1334 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) 1335 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe); 1336 else { 1337 tid = TAILQ_FIRST(&mutex->m_queue); 1338 while (pthread->active_priority <= tid->active_priority) 1339 tid = TAILQ_NEXT(tid, sqe); 1340 TAILQ_INSERT_BEFORE(tid, pthread, sqe); 1341 } 1342 pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ; 1343 } 1344 1345 /* 1346 * Returns with the lock owned and on the threads mutexq if 1347 * it is currently unowned. Returns 1, otherwise. 1348 */ 1349 static int 1350 get_muncontested(pthread_mutex_t mutexp, int nonblock) 1351 { 1352 if (mutexp->m_owner != NULL && mutexp->m_owner != curthread) { 1353 return (-1); 1354 } else if (mutexp->m_owner == curthread) { 1355 if (nonblock) 1356 return (mutex_self_trylock(mutexp)); 1357 else 1358 return (mutex_self_lock(mutexp)); 1359 } 1360 1361 /* 1362 * The mutex belongs to this thread now. Mark it as 1363 * such. Add it to the list of mutexes owned by this 1364 * thread. 1365 */ 1366 mutexp->m_owner = curthread; 1367 _MUTEX_ASSERT_NOT_OWNED(mutexp); 1368 TAILQ_INSERT_TAIL(&curthread->mutexq, mutexp, m_qe); 1369 return (0); 1370 } 1371 1372 /* 1373 * Returns with the lock owned and on the thread's mutexq. If 1374 * the mutex is currently owned by another thread it will sleep 1375 * until it is available. 1376 */ 1377 static int 1378 get_mcontested(pthread_mutex_t mutexp, const struct timespec *abstime) 1379 { 1380 int error; 1381 1382 /* 1383 * If the timeout is invalid this thread is not allowed 1384 * to block; 1385 */ 1386 if (abstime != NULL) { 1387 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) 1388 return (EINVAL); 1389 } 1390 1391 /* 1392 * Put this thread on the mutex's list of waiting threads. 1393 * The lock on the thread ensures atomic (as far as other 1394 * threads are concerned) setting of the thread state with 1395 * it's status on the mutex queue. 1396 */ 1397 _thread_critical_enter(curthread); 1398 mutex_queue_enq(mutexp, curthread); 1399 do { 1400 PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT); 1401 curthread->data.mutex = mutexp; 1402 _thread_critical_exit(curthread); 1403 _SPINUNLOCK(&mutexp->lock); 1404 error = _thread_suspend(curthread, abstime); 1405 if (error != 0 && error != EAGAIN && error != EINTR) 1406 PANIC("Cannot suspend on mutex."); 1407 _SPINLOCK(&mutexp->lock); 1408 _thread_critical_enter(curthread); 1409 if (error == EAGAIN) { 1410 /* 1411 * Between the timeout and when the mutex was 1412 * locked the previous owner may have released 1413 * the mutex to this thread. Or not. 1414 */ 1415 if (mutexp->m_owner == curthread) { 1416 error = 0; 1417 } else { 1418 _mutex_lock_backout(curthread); 1419 curthread->state = PS_RUNNING; 1420 error = ETIMEDOUT; 1421 } 1422 } 1423 } while ((curthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0); 1424 _thread_critical_exit(curthread); 1425 return (error); 1426 } 1427