1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by John Birrell. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 #include <stdlib.h> 35 #include <errno.h> 36 #include <string.h> 37 #include <sys/param.h> 38 #include <sys/queue.h> 39 #include <pthread.h> 40 #include <time.h> 41 #include "thr_private.h" 42 43 #if defined(_PTHREADS_INVARIANTS) 44 #define _MUTEX_INIT_LINK(m) do { \ 45 (m)->m_qe.tqe_prev = NULL; \ 46 (m)->m_qe.tqe_next = NULL; \ 47 } while (0) 48 #define _MUTEX_ASSERT_IS_OWNED(m) do { \ 49 if ((m)->m_qe.tqe_prev == NULL) \ 50 PANIC("mutex is not on list"); \ 51 } while (0) 52 #define _MUTEX_ASSERT_NOT_OWNED(m) do { \ 53 if (((m)->m_qe.tqe_prev != NULL) || \ 54 ((m)->m_qe.tqe_next != NULL)) \ 55 PANIC("mutex is on list"); \ 56 } while (0) 57 #else 58 #define _MUTEX_INIT_LINK(m) 59 #define _MUTEX_ASSERT_IS_OWNED(m) 60 #define _MUTEX_ASSERT_NOT_OWNED(m) 61 #endif 62 63 64 /* 65 * Prototypes 66 */ 67 static void acquire_mutex(struct pthread_mutex *, struct pthread *); 68 static int get_mcontested(pthread_mutex_t, 69 const struct timespec *); 70 static void mutex_attach_to_next_pthread(struct pthread_mutex *); 71 static int mutex_init(pthread_mutex_t *, int); 72 static int mutex_lock_common(pthread_mutex_t *, int, 73 const struct timespec *); 74 static inline int mutex_self_lock(pthread_mutex_t, int); 75 static inline int mutex_unlock_common(pthread_mutex_t *, int); 76 static inline pthread_t mutex_queue_deq(pthread_mutex_t); 77 static inline void mutex_queue_remove(pthread_mutex_t, pthread_t); 78 static inline void mutex_queue_enq(pthread_mutex_t, pthread_t); 79 static void restore_prio_inheritance(struct pthread *); 80 static void restore_prio_protection(struct pthread *); 81 82 83 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER; 84 85 static struct pthread_mutex_attr static_mutex_attr = 86 PTHREAD_MUTEXATTR_STATIC_INITIALIZER; 87 static pthread_mutexattr_t static_mattr = &static_mutex_attr; 88 89 /* Single underscore versions provided for libc internal usage: */ 90 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 91 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 92 __weak_reference(__pthread_mutex_unlock, pthread_mutex_unlock); 93 94 /* No difference between libc and application usage of these: */ 95 __weak_reference(_pthread_mutex_init, pthread_mutex_init); 96 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 97 __weak_reference(_pthread_mutex_timedlock, pthread_mutex_timedlock); 98 99 100 /* 101 * Reinitialize a private mutex; this is only used for internal mutexes. 102 */ 103 int 104 _mutex_reinit(pthread_mutex_t * mutex) 105 { 106 int ret = 0; 107 108 if (mutex == NULL) 109 ret = EINVAL; 110 else if (*mutex == PTHREAD_MUTEX_INITIALIZER) 111 ret = _pthread_mutex_init(mutex, NULL); 112 else { 113 /* 114 * Initialize the mutex structure: 115 */ 116 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT; 117 (*mutex)->m_protocol = PTHREAD_PRIO_NONE; 118 TAILQ_INIT(&(*mutex)->m_queue); 119 (*mutex)->m_owner = NULL; 120 (*mutex)->m_data.m_count = 0; 121 (*mutex)->m_flags |= MUTEX_FLAGS_INITED | MUTEX_FLAGS_PRIVATE; 122 (*mutex)->m_refcount = 0; 123 (*mutex)->m_prio = 0; 124 (*mutex)->m_saved_prio = 0; 125 _MUTEX_INIT_LINK(*mutex); 126 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock)); 127 } 128 return (ret); 129 } 130 131 int 132 _pthread_mutex_init(pthread_mutex_t * mutex, 133 const pthread_mutexattr_t * mutex_attr) 134 { 135 struct pthread_mutex_attr default_attr = {PTHREAD_MUTEX_ERRORCHECK, 136 PTHREAD_PRIO_NONE, PTHREAD_MAX_PRIORITY, 0 }; 137 struct pthread_mutex_attr *attr; 138 139 if (mutex_attr == NULL) { 140 attr = &default_attr; 141 } else { 142 /* 143 * Check that the given mutex attribute is valid. 144 */ 145 if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || 146 ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX)) 147 return (EINVAL); 148 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || 149 ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE)) 150 return (EINVAL); 151 attr = *mutex_attr; 152 } 153 if ((*mutex = 154 (pthread_mutex_t)malloc(sizeof(struct pthread_mutex))) == NULL) 155 return (ENOMEM); 156 memset((void *)(*mutex), 0, sizeof(struct pthread_mutex)); 157 158 /* Initialise the rest of the mutex: */ 159 TAILQ_INIT(&(*mutex)->m_queue); 160 _MUTEX_INIT_LINK(*mutex); 161 (*mutex)->m_protocol = attr->m_protocol; 162 (*mutex)->m_flags = (attr->m_flags | MUTEX_FLAGS_INITED); 163 (*mutex)->m_type = attr->m_type; 164 if ((*mutex)->m_protocol == PTHREAD_PRIO_PROTECT) 165 (*mutex)->m_prio = attr->m_ceiling; 166 return (0); 167 } 168 169 int 170 _pthread_mutex_destroy(pthread_mutex_t * mutex) 171 { 172 if (mutex == NULL) 173 return (EINVAL); 174 175 /* 176 * If this mutex was statically initialized, don't bother 177 * initializing it in order to destroy it immediately. 178 */ 179 if (*mutex == PTHREAD_MUTEX_INITIALIZER) 180 return (0); 181 182 /* Lock the mutex structure: */ 183 _SPINLOCK(&(*mutex)->lock); 184 185 /* 186 * Check to see if this mutex is in use: 187 */ 188 if (((*mutex)->m_owner != NULL) || 189 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || 190 ((*mutex)->m_refcount != 0)) { 191 /* Unlock the mutex structure: */ 192 _SPINUNLOCK(&(*mutex)->lock); 193 return (EBUSY); 194 } 195 196 /* 197 * Free the memory allocated for the mutex 198 * structure: 199 */ 200 _MUTEX_ASSERT_NOT_OWNED(*mutex); 201 _SPINUNLOCK(&(*mutex)->lock); 202 free(*mutex); 203 204 /* 205 * Leave the caller's pointer NULL now that 206 * the mutex has been destroyed: 207 */ 208 *mutex = NULL; 209 210 return (0); 211 } 212 213 static int 214 mutex_init(pthread_mutex_t *mutex, int private) 215 { 216 pthread_mutexattr_t *pma; 217 int error; 218 219 error = 0; 220 pma = private ? &static_mattr : NULL; 221 _SPINLOCK(&static_init_lock); 222 if (*mutex == PTHREAD_MUTEX_INITIALIZER) 223 error = _pthread_mutex_init(mutex, pma); 224 _SPINUNLOCK(&static_init_lock); 225 return (error); 226 } 227 228 /* 229 * Acquires a mutex for the current thread. The caller must 230 * lock the mutex before calling this function. 231 */ 232 static void 233 acquire_mutex(struct pthread_mutex *mtx, struct pthread *ptd) 234 { 235 mtx->m_owner = ptd; 236 _MUTEX_ASSERT_NOT_OWNED(mtx); 237 _thread_critical_enter(ptd); 238 TAILQ_INSERT_TAIL(&ptd->mutexq, mtx, m_qe); 239 _thread_critical_exit(ptd); 240 } 241 242 /* 243 * Releases a mutex from the current thread. The owner must 244 * lock the mutex. The next thread on the queue will be returned 245 * locked by the current thread. The caller must take care to 246 * unlock it. 247 */ 248 static void 249 mutex_attach_to_next_pthread(struct pthread_mutex *mtx) 250 { 251 struct pthread *ptd; 252 253 _MUTEX_ASSERT_IS_OWNED(mtx); 254 TAILQ_REMOVE(&mtx->m_owner->mutexq, (mtx), m_qe); 255 _MUTEX_INIT_LINK(mtx); 256 257 /* 258 * Deque next thread waiting for this mutex and attach 259 * the mutex to it. The thread will already be locked. 260 */ 261 if ((ptd = mutex_queue_deq(mtx)) != NULL) { 262 TAILQ_INSERT_TAIL(&ptd->mutexq, mtx, m_qe); 263 ptd->data.mutex = NULL; 264 PTHREAD_NEW_STATE(ptd, PS_RUNNING); 265 } 266 mtx->m_owner = ptd; 267 } 268 269 int 270 __pthread_mutex_trylock(pthread_mutex_t *mutex) 271 { 272 int ret = 0; 273 274 if (mutex == NULL) 275 ret = EINVAL; 276 277 /* 278 * If the mutex is statically initialized, perform the dynamic 279 * initialization: 280 */ 281 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 282 (ret = mutex_init(mutex, 0)) == 0) 283 ret = mutex_lock_common(mutex, 1, NULL); 284 285 return (ret); 286 } 287 288 /* 289 * Libc internal. 290 */ 291 int 292 _pthread_mutex_trylock(pthread_mutex_t *mutex) 293 { 294 int ret = 0; 295 296 _thread_sigblock(); 297 298 if (mutex == NULL) 299 ret = EINVAL; 300 301 /* 302 * If the mutex is statically initialized, perform the dynamic 303 * initialization marking the mutex private (delete safe): 304 */ 305 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 306 (ret = mutex_init(mutex, 1)) == 0) 307 ret = mutex_lock_common(mutex, 1, NULL); 308 309 if (ret != 0) 310 _thread_sigunblock(); 311 312 return (ret); 313 } 314 315 static int 316 mutex_lock_common(pthread_mutex_t * mutex, int nonblock, 317 const struct timespec *abstime) 318 { 319 int error; 320 321 error = 0; 322 PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL), 323 "Uninitialized mutex in mutex_lock_common"); 324 PTHREAD_ASSERT(((*mutex)->m_protocol >= PTHREAD_PRIO_NONE && 325 (*mutex)->m_protocol <= PTHREAD_PRIO_PROTECT), 326 "Invalid mutex protocol"); 327 _SPINLOCK(&(*mutex)->lock); 328 329 /* 330 * If the mutex was statically allocated, properly 331 * initialize the tail queue. 332 */ 333 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { 334 TAILQ_INIT(&(*mutex)->m_queue); 335 (*mutex)->m_flags |= MUTEX_FLAGS_INITED; 336 _MUTEX_INIT_LINK(*mutex); 337 } 338 339 retry: 340 /* 341 * If the mutex is a priority protected mutex the thread's 342 * priority may not be higher than that of the mutex. 343 */ 344 if ((*mutex)->m_protocol == PTHREAD_PRIO_PROTECT && 345 curthread->active_priority > (*mutex)->m_prio) { 346 _SPINUNLOCK(&(*mutex)->lock); 347 return (EINVAL); 348 } 349 if ((*mutex)->m_owner == NULL) { 350 /* 351 * Mutex is currently unowned. 352 */ 353 acquire_mutex(*mutex, curthread); 354 } else if ((*mutex)->m_owner == curthread) { 355 /* 356 * Mutex is owned by curthread. We must test against 357 * certain conditions in such a case. 358 */ 359 if ((error = mutex_self_lock((*mutex), nonblock)) != 0) { 360 _SPINUNLOCK(&(*mutex)->lock); 361 return (error); 362 } 363 } else { 364 if (nonblock) { 365 error = EBUSY; 366 goto out; 367 } 368 369 /* 370 * Another thread owns the mutex. This thread must 371 * wait for that thread to unlock the mutex. This 372 * thread must not return to the caller if it was 373 * interrupted by a signal. 374 */ 375 error = get_mcontested(*mutex, abstime); 376 if (error == EINTR) 377 goto retry; 378 else if (error == ETIMEDOUT) 379 goto out; 380 } 381 382 /* 383 * The mutex is now owned by curthread. 384 */ 385 _thread_critical_enter(curthread); 386 387 /* 388 * The mutex's priority may have changed while waiting for it. 389 */ 390 if ((*mutex)->m_protocol == PTHREAD_PRIO_PROTECT && 391 curthread->active_priority > (*mutex)->m_prio) { 392 mutex_attach_to_next_pthread(*mutex); 393 if ((*mutex)->m_owner != NULL) 394 _thread_critical_exit((*mutex)->m_owner); 395 _thread_critical_exit(curthread); 396 _SPINUNLOCK(&(*mutex)->lock); 397 return (EINVAL); 398 } 399 400 switch ((*mutex)->m_protocol) { 401 case PTHREAD_PRIO_INHERIT: 402 curthread->prio_inherit_count++; 403 break; 404 case PTHREAD_PRIO_PROTECT: 405 PTHREAD_ASSERT((curthread->active_priority <= 406 (*mutex)->m_prio), "priority protection violation"); 407 curthread->prio_protect_count++; 408 if ((*mutex)->m_prio > curthread->active_priority) { 409 curthread->inherited_priority = (*mutex)->m_prio; 410 curthread->active_priority = (*mutex)->m_prio; 411 } 412 break; 413 default: 414 /* Nothing */ 415 break; 416 } 417 _thread_critical_exit(curthread); 418 out: 419 _SPINUNLOCK(&(*mutex)->lock); 420 return (error); 421 } 422 423 /* 424 * Caller must lock thread. 425 */ 426 void 427 adjust_prio_inheritance(struct pthread *ptd) 428 { 429 struct pthread_mutex *tempMtx; 430 struct pthread *tempTd; 431 432 /* 433 * Scan owned mutexes's wait queue and execute at the 434 * higher of thread's current priority or the priority of 435 * the highest priority thread waiting on any of the the 436 * mutexes the thread owns. Note: the highest priority thread 437 * on a queue is always at the head of the queue. 438 */ 439 TAILQ_FOREACH(tempMtx, &ptd->mutexq, m_qe) { 440 if (tempMtx->m_protocol != PTHREAD_PRIO_INHERIT) 441 continue; 442 443 /* 444 * XXX LOR with respect to tempMtx and ptd. 445 * Order should be: 1. mutex 446 * 2. pthread 447 */ 448 _SPINLOCK(&tempMtx->lock); 449 450 tempTd = TAILQ_FIRST(&tempMtx->m_queue); 451 if (tempTd != NULL) { 452 UMTX_LOCK(&tempTd->lock); 453 if (tempTd->active_priority > ptd->active_priority) { 454 ptd->inherited_priority = 455 tempTd->active_priority; 456 ptd->active_priority = 457 tempTd->active_priority; 458 } 459 UMTX_UNLOCK(&tempTd->lock); 460 } 461 _SPINUNLOCK(&tempMtx->lock); 462 } 463 } 464 465 /* 466 * Caller must lock thread. 467 */ 468 static void 469 restore_prio_inheritance(struct pthread *ptd) 470 { 471 ptd->inherited_priority = PTHREAD_MIN_PRIORITY; 472 ptd->active_priority = ptd->base_priority; 473 adjust_prio_inheritance(ptd); 474 } 475 476 /* 477 * Caller must lock thread. 478 */ 479 void 480 adjust_prio_protection(struct pthread *ptd) 481 { 482 struct pthread_mutex *tempMtx; 483 484 /* 485 * The thread shall execute at the higher of its priority or 486 * the highest priority ceiling of all the priority protection 487 * mutexes it owns. 488 */ 489 TAILQ_FOREACH(tempMtx, &ptd->mutexq, m_qe) { 490 if (tempMtx->m_protocol != PTHREAD_PRIO_PROTECT) 491 continue; 492 if (ptd->active_priority < tempMtx->m_prio) { 493 ptd->inherited_priority = tempMtx->m_prio; 494 ptd->active_priority = tempMtx->m_prio; 495 } 496 } 497 } 498 499 /* 500 * Caller must lock thread. 501 */ 502 static void 503 restore_prio_protection(struct pthread *ptd) 504 { 505 ptd->inherited_priority = PTHREAD_MIN_PRIORITY; 506 ptd->active_priority = ptd->base_priority; 507 adjust_prio_protection(ptd); 508 } 509 510 int 511 __pthread_mutex_lock(pthread_mutex_t *mutex) 512 { 513 int ret = 0; 514 515 if (_thread_initial == NULL) 516 _thread_init(); 517 518 if (mutex == NULL) 519 ret = EINVAL; 520 521 /* 522 * If the mutex is statically initialized, perform the dynamic 523 * initialization: 524 */ 525 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 526 ((ret = mutex_init(mutex, 0)) == 0)) 527 ret = mutex_lock_common(mutex, 0, NULL); 528 529 return (ret); 530 } 531 532 /* 533 * Libc internal. 534 */ 535 int 536 _pthread_mutex_lock(pthread_mutex_t *mutex) 537 { 538 int ret = 0; 539 540 if (_thread_initial == NULL) 541 _thread_init(); 542 543 _thread_sigblock(); 544 545 if (mutex == NULL) 546 ret = EINVAL; 547 548 /* 549 * If the mutex is statically initialized, perform the dynamic 550 * initialization marking it private (delete safe): 551 */ 552 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 553 ((ret = mutex_init(mutex, 1)) == 0)) 554 ret = mutex_lock_common(mutex, 0, NULL); 555 556 if (ret != 0) 557 _thread_sigunblock(); 558 559 return (ret); 560 } 561 562 int 563 _pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime) 564 { 565 int error; 566 567 error = 0; 568 if (_thread_initial == NULL) 569 _thread_init(); 570 571 /* 572 * Initialize it if it's a valid statically inited mutex. 573 */ 574 if (mutex == NULL) 575 error = EINVAL; 576 else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) || 577 ((error = mutex_init(mutex, 0)) == 0)) 578 error = mutex_lock_common(mutex, 0, abstime); 579 580 PTHREAD_ASSERT(error != EINTR, "According to SUSv3 this function shall not return an error code of EINTR"); 581 return (error); 582 } 583 584 int 585 __pthread_mutex_unlock(pthread_mutex_t * mutex) 586 { 587 return (mutex_unlock_common(mutex, /* add reference */ 0)); 588 } 589 590 /* 591 * Libc internal 592 */ 593 int 594 _pthread_mutex_unlock(pthread_mutex_t * mutex) 595 { 596 int error; 597 if ((error = mutex_unlock_common(mutex, /* add reference */ 0)) == 0) 598 _thread_sigunblock(); 599 return (error); 600 } 601 602 int 603 _mutex_cv_unlock(pthread_mutex_t * mutex) 604 { 605 return (mutex_unlock_common(mutex, /* add reference */ 1)); 606 } 607 608 int 609 _mutex_cv_lock(pthread_mutex_t * mutex) 610 { 611 int ret; 612 if ((ret = _pthread_mutex_lock(mutex)) == 0) 613 (*mutex)->m_refcount--; 614 return (ret); 615 } 616 617 /* 618 * Caller must lock mutex and then disable signals and lock curthread. 619 */ 620 static inline int 621 mutex_self_lock(pthread_mutex_t mutex, int noblock) 622 { 623 switch (mutex->m_type) { 624 case PTHREAD_MUTEX_ERRORCHECK: 625 /* 626 * POSIX specifies that mutexes should return EDEADLK if a 627 * recursive lock is detected. 628 */ 629 if (noblock) 630 return (EBUSY); 631 return (EDEADLK); 632 break; 633 634 case PTHREAD_MUTEX_NORMAL: 635 /* 636 * What SS2 define as a 'normal' mutex. Intentionally 637 * deadlock on attempts to get a lock you already own. 638 */ 639 if (noblock) 640 return (EBUSY); 641 PTHREAD_SET_STATE(curthread, PS_DEADLOCK); 642 _SPINUNLOCK(&(mutex)->lock); 643 _thread_suspend(curthread, NULL); 644 PANIC("Shouldn't resume here?\n"); 645 break; 646 647 case PTHREAD_MUTEX_RECURSIVE: 648 /* Increment the lock count: */ 649 mutex->m_data.m_count++; 650 break; 651 652 default: 653 /* Trap invalid mutex types; */ 654 return (EINVAL); 655 } 656 return (0); 657 } 658 659 static inline int 660 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference) 661 { 662 /* 663 * Error checking. 664 */ 665 if (*mutex == NULL) 666 return (EINVAL); 667 if ((*mutex)->m_owner != curthread) 668 return (EPERM); 669 PTHREAD_ASSERT(((*mutex)->m_protocol >= PTHREAD_PRIO_NONE && 670 (*mutex)->m_protocol <= PTHREAD_PRIO_PROTECT), 671 "Invalid mutex protocol"); 672 673 _SPINLOCK(&(*mutex)->lock); 674 if ((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) { 675 (*mutex)->m_data.m_count--; 676 PTHREAD_ASSERT((*mutex)->m_data.m_count >= 0, 677 "The mutex recurse count cannot be less than zero"); 678 if ((*mutex)->m_data.m_count > 0) { 679 _SPINUNLOCK(&(*mutex)->lock); 680 return (0); 681 } 682 } 683 684 /* 685 * Release the mutex from this thread and attach it to 686 * the next thread in the queue, if there is one waiting. 687 */ 688 _thread_critical_enter(curthread); 689 mutex_attach_to_next_pthread(*mutex); 690 if ((*mutex)->m_owner != NULL) 691 _thread_critical_exit((*mutex)->m_owner); 692 if (add_reference != 0) { 693 /* Increment the reference count: */ 694 (*mutex)->m_refcount++; 695 } 696 _SPINUNLOCK(&(*mutex)->lock); 697 698 /* 699 * Fix priority of the thread that just released the mutex. 700 */ 701 switch ((*mutex)->m_protocol) { 702 case PTHREAD_PRIO_INHERIT: 703 curthread->prio_inherit_count--; 704 PTHREAD_ASSERT(curthread->prio_inherit_count >= 0, 705 "priority inheritance counter cannot be less than zero"); 706 restore_prio_inheritance(curthread); 707 if (curthread->prio_protect_count > 0) 708 restore_prio_protection(curthread); 709 break; 710 case PTHREAD_PRIO_PROTECT: 711 curthread->prio_protect_count--; 712 PTHREAD_ASSERT(curthread->prio_protect_count >= 0, 713 "priority protection counter cannot be less than zero"); 714 restore_prio_protection(curthread); 715 if (curthread->prio_inherit_count > 0) 716 restore_prio_inheritance(curthread); 717 break; 718 default: 719 /* Nothing */ 720 break; 721 } 722 _thread_critical_exit(curthread); 723 return (0); 724 } 725 726 void 727 _mutex_unlock_private(pthread_t pthread) 728 { 729 struct pthread_mutex *m, *m_next; 730 731 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { 732 m_next = TAILQ_NEXT(m, m_qe); 733 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 734 _pthread_mutex_unlock(&m); 735 } 736 } 737 738 void 739 _mutex_lock_backout(pthread_t pthread) 740 { 741 struct pthread_mutex *mutex; 742 743 mutex = pthread->data.mutex; 744 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { 745 746 mutex_queue_remove(mutex, pthread); 747 748 /* This thread is no longer waiting for the mutex: */ 749 pthread->data.mutex = NULL; 750 751 } 752 } 753 754 /* 755 * Dequeue a waiting thread from the head of a mutex queue in descending 756 * priority order. This funtion will return with the thread locked. 757 */ 758 static inline pthread_t 759 mutex_queue_deq(pthread_mutex_t mutex) 760 { 761 pthread_t pthread; 762 763 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { 764 _thread_critical_enter(pthread); 765 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 766 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; 767 768 /* 769 * Only exit the loop if the thread hasn't been 770 * cancelled. 771 */ 772 if (((pthread->cancelflags & PTHREAD_CANCELLING) == 0 || 773 (pthread->cancelflags & PTHREAD_CANCEL_DISABLE) != 0 || 774 ((pthread->cancelflags & PTHREAD_CANCELLING) != 0 && 775 (pthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) == 0)) && 776 pthread->state == PS_MUTEX_WAIT) 777 break; 778 else 779 _thread_critical_exit(pthread); 780 } 781 782 return (pthread); 783 } 784 785 /* 786 * Remove a waiting thread from a mutex queue in descending priority order. 787 */ 788 static inline void 789 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) 790 { 791 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { 792 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 793 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; 794 } 795 } 796 797 /* 798 * Enqueue a waiting thread to a queue in descending priority order. 799 */ 800 static inline void 801 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) 802 { 803 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); 804 char *name; 805 806 name = pthread->name ? pthread->name : "unknown"; 807 if ((pthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0) 808 _thread_printf(2, "Thread (%s:%u) already on condq\n", 809 pthread->name, pthread->uniqueid); 810 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) 811 _thread_printf(2, "Thread (%s:%u) already on mutexq\n", 812 pthread->name, pthread->uniqueid); 813 PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread); 814 /* 815 * For the common case of all threads having equal priority, 816 * we perform a quick check against the priority of the thread 817 * at the tail of the queue. 818 */ 819 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) 820 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe); 821 else { 822 tid = TAILQ_FIRST(&mutex->m_queue); 823 while (pthread->active_priority <= tid->active_priority) 824 tid = TAILQ_NEXT(tid, sqe); 825 TAILQ_INSERT_BEFORE(tid, pthread, sqe); 826 } 827 if (mutex->m_protocol == PTHREAD_PRIO_INHERIT && 828 pthread == TAILQ_FIRST(&mutex->m_queue)) { 829 UMTX_LOCK(&mutex->m_owner->lock); 830 if (pthread->active_priority > 831 mutex->m_owner->active_priority) { 832 mutex->m_owner->inherited_priority = 833 pthread->active_priority; 834 mutex->m_owner->active_priority = 835 pthread->active_priority; 836 } 837 UMTX_UNLOCK(&mutex->m_owner->lock); 838 } 839 pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ; 840 } 841 842 /* 843 * Caller must lock mutex and pthread. 844 */ 845 void 846 readjust_priorities(struct pthread *pthread, struct pthread_mutex *mtx) 847 { 848 if (pthread->state == PS_MUTEX_WAIT) { 849 mutex_queue_remove(mtx, pthread); 850 mutex_queue_enq(mtx, pthread); 851 UMTX_LOCK(&mtx->m_owner->lock); 852 adjust_prio_inheritance(mtx->m_owner); 853 if (mtx->m_owner->prio_protect_count > 0) 854 adjust_prio_protection(mtx->m_owner); 855 UMTX_UNLOCK(&mtx->m_owner->lock); 856 } 857 if (pthread->prio_inherit_count > 0) 858 adjust_prio_inheritance(pthread); 859 if (pthread->prio_protect_count > 0) 860 adjust_prio_protection(pthread); 861 } 862 863 /* 864 * Returns with the lock owned and on the thread's mutexq. If 865 * the mutex is currently owned by another thread it will sleep 866 * until it is available. 867 */ 868 static int 869 get_mcontested(pthread_mutex_t mutexp, const struct timespec *abstime) 870 { 871 int error; 872 873 /* 874 * If the timeout is invalid this thread is not allowed 875 * to block; 876 */ 877 if (abstime != NULL) { 878 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) 879 return (EINVAL); 880 } 881 882 /* 883 * Put this thread on the mutex's list of waiting threads. 884 * The lock on the thread ensures atomic (as far as other 885 * threads are concerned) setting of the thread state with 886 * it's status on the mutex queue. 887 */ 888 _thread_critical_enter(curthread); 889 mutex_queue_enq(mutexp, curthread); 890 do { 891 if ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0 && 892 (curthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0 && 893 (curthread->cancelflags & PTHREAD_CANCELLING) != 0) { 894 mutex_queue_remove(mutexp, curthread); 895 _thread_critical_exit(curthread); 896 _SPINUNLOCK(&mutexp->lock); 897 pthread_testcancel(); 898 } 899 PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT); 900 curthread->data.mutex = mutexp; 901 _thread_critical_exit(curthread); 902 _SPINUNLOCK(&mutexp->lock); 903 error = _thread_suspend(curthread, abstime); 904 if (error != 0 && error != ETIMEDOUT && error != EINTR) 905 PANIC("Cannot suspend on mutex."); 906 _SPINLOCK(&mutexp->lock); 907 _thread_critical_enter(curthread); 908 if (error == ETIMEDOUT) { 909 /* 910 * Between the timeout and when the mutex was 911 * locked the previous owner may have released 912 * the mutex to this thread. Or not. 913 */ 914 if (mutexp->m_owner == curthread) { 915 error = 0; 916 } else { 917 _mutex_lock_backout(curthread); 918 curthread->state = PS_RUNNING; 919 error = ETIMEDOUT; 920 } 921 } 922 } while ((curthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0); 923 _thread_critical_exit(curthread); 924 return (error); 925 } 926