1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by John Birrell. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 35 #include <stdlib.h> 36 #include <errno.h> 37 #include <string.h> 38 #include <sys/param.h> 39 #include <sys/queue.h> 40 #include <pthread.h> 41 #include "thr_private.h" 42 43 #if defined(_PTHREADS_INVARIANTS) 44 #define MUTEX_INIT_LINK(m) do { \ 45 (m)->m_qe.tqe_prev = NULL; \ 46 (m)->m_qe.tqe_next = NULL; \ 47 } while (0) 48 #define MUTEX_ASSERT_IS_OWNED(m) do { \ 49 if ((m)->m_qe.tqe_prev == NULL) \ 50 PANIC("mutex is not on list"); \ 51 } while (0) 52 #define MUTEX_ASSERT_NOT_OWNED(m) do { \ 53 if (((m)->m_qe.tqe_prev != NULL) || \ 54 ((m)->m_qe.tqe_next != NULL)) \ 55 PANIC("mutex is on list"); \ 56 } while (0) 57 #define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \ 58 THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \ 59 "thread in syncq when it shouldn't be."); \ 60 } while (0); 61 #else 62 #define MUTEX_INIT_LINK(m) 63 #define MUTEX_ASSERT_IS_OWNED(m) 64 #define MUTEX_ASSERT_NOT_OWNED(m) 65 #define THR_ASSERT_NOT_IN_SYNCQ(thr) 66 #endif 67 68 #define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0) 69 #define MUTEX_DESTROY(m) do { \ 70 free(m); \ 71 } while (0) 72 73 74 /* 75 * Prototypes 76 */ 77 static long mutex_handoff(struct pthread *, struct pthread_mutex *); 78 static int mutex_self_trylock(struct pthread *, pthread_mutex_t); 79 static int mutex_self_lock(struct pthread *, pthread_mutex_t, 80 const struct timespec *abstime); 81 static int mutex_unlock_common(pthread_mutex_t *, int); 82 static void mutex_priority_adjust(struct pthread *, pthread_mutex_t); 83 static void mutex_rescan_owned (struct pthread *, struct pthread *, 84 struct pthread_mutex *); 85 #if 0 86 static pthread_t mutex_queue_deq(pthread_mutex_t); 87 #endif 88 static void mutex_queue_remove(pthread_mutex_t, pthread_t); 89 static void mutex_queue_enq(pthread_mutex_t, pthread_t); 90 91 __weak_reference(__pthread_mutex_init, pthread_mutex_init); 92 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 93 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 94 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 95 96 /* Single underscore versions provided for libc internal usage: */ 97 /* No difference between libc and application usage of these: */ 98 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 99 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 100 101 static int 102 mutex_init(pthread_mutex_t *mutex, 103 const pthread_mutexattr_t *mutex_attr, int private) 104 { 105 struct pthread_mutex *pmutex; 106 enum pthread_mutextype type; 107 int protocol; 108 int ceiling; 109 int flags; 110 int ret = 0; 111 112 /* Check if default mutex attributes: */ 113 if (mutex_attr == NULL || *mutex_attr == NULL) { 114 /* Default to a (error checking) POSIX mutex: */ 115 type = PTHREAD_MUTEX_ERRORCHECK; 116 protocol = PTHREAD_PRIO_NONE; 117 ceiling = THR_MAX_PRIORITY; 118 flags = 0; 119 } 120 121 /* Check mutex type: */ 122 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || 123 ((*mutex_attr)->m_type >= PTHREAD_MUTEX_TYPE_MAX)) 124 /* Return an invalid argument error: */ 125 ret = EINVAL; 126 127 /* Check mutex protocol: */ 128 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || 129 ((*mutex_attr)->m_protocol > PTHREAD_PRIO_PROTECT)) 130 /* Return an invalid argument error: */ 131 ret = EINVAL; 132 133 else { 134 /* Use the requested mutex type and protocol: */ 135 type = (*mutex_attr)->m_type; 136 protocol = (*mutex_attr)->m_protocol; 137 ceiling = (*mutex_attr)->m_ceiling; 138 flags = (*mutex_attr)->m_flags; 139 } 140 141 /* Check no errors so far: */ 142 if (ret == 0) { 143 if ((pmutex = (pthread_mutex_t) 144 malloc(sizeof(struct pthread_mutex))) == NULL) { 145 ret = ENOMEM; 146 } else { 147 _thr_umtx_init(&pmutex->m_lock); 148 /* Set the mutex flags: */ 149 pmutex->m_flags = flags; 150 151 /* Process according to mutex type: */ 152 switch (type) { 153 /* case PTHREAD_MUTEX_DEFAULT: */ 154 case PTHREAD_MUTEX_ERRORCHECK: 155 case PTHREAD_MUTEX_NORMAL: 156 /* Nothing to do here. */ 157 break; 158 159 /* Single UNIX Spec 2 recursive mutex: */ 160 case PTHREAD_MUTEX_RECURSIVE: 161 /* Reset the mutex count: */ 162 pmutex->m_count = 0; 163 break; 164 165 /* Trap invalid mutex types: */ 166 default: 167 /* Return an invalid argument error: */ 168 ret = EINVAL; 169 break; 170 } 171 if (ret == 0) { 172 /* Initialise the rest of the mutex: */ 173 TAILQ_INIT(&pmutex->m_queue); 174 pmutex->m_flags |= MUTEX_FLAGS_INITED; 175 if (private) 176 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE; 177 pmutex->m_owner = NULL; 178 pmutex->m_type = type; 179 pmutex->m_protocol = protocol; 180 pmutex->m_refcount = 0; 181 if (protocol == PTHREAD_PRIO_PROTECT) 182 pmutex->m_prio = ceiling; 183 else 184 pmutex->m_prio = -1; 185 pmutex->m_saved_prio = 0; 186 MUTEX_INIT_LINK(pmutex); 187 *mutex = pmutex; 188 } else { 189 /* Free the mutex lock structure: */ 190 MUTEX_DESTROY(pmutex); 191 *mutex = NULL; 192 } 193 } 194 } 195 /* Return the completion status: */ 196 return (ret); 197 } 198 199 static int 200 init_static(struct pthread *thread, pthread_mutex_t *mutex) 201 { 202 int ret; 203 204 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 205 206 if (*mutex == NULL) 207 ret = mutex_init(mutex, NULL, 0); 208 else 209 ret = 0; 210 211 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 212 213 return (ret); 214 } 215 216 static int 217 init_static_private(struct pthread *thread, pthread_mutex_t *mutex) 218 { 219 int ret; 220 221 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 222 223 if (*mutex == NULL) 224 ret = mutex_init(mutex, NULL, 1); 225 else 226 ret = 0; 227 228 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 229 230 return (ret); 231 } 232 233 int 234 _pthread_mutex_init(pthread_mutex_t *mutex, 235 const pthread_mutexattr_t *mutex_attr) 236 { 237 return mutex_init(mutex, mutex_attr, 1); 238 } 239 240 int 241 __pthread_mutex_init(pthread_mutex_t *mutex, 242 const pthread_mutexattr_t *mutex_attr) 243 { 244 return mutex_init(mutex, mutex_attr, 0); 245 } 246 247 int 248 _mutex_reinit(pthread_mutex_t *mutex) 249 { 250 _thr_umtx_init(&(*mutex)->m_lock); 251 TAILQ_INIT(&(*mutex)->m_queue); 252 MUTEX_INIT_LINK(*mutex); 253 (*mutex)->m_owner = NULL; 254 (*mutex)->m_count = 0; 255 (*mutex)->m_refcount = 0; 256 (*mutex)->m_prio = 0; 257 (*mutex)->m_saved_prio = 0; 258 return (0); 259 } 260 261 void 262 _mutex_fork(struct pthread *curthread) 263 { 264 TAILQ_INIT(&curthread->mutexq); 265 TAILQ_INIT(&curthread->pri_mutexq); 266 curthread->priority_mutex_count = 0; 267 #if 0 268 struct pthread_mutex *m; 269 270 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) { 271 m->m_lock = (umtx_t)curthread->tid; 272 } 273 274 /* Clear contender for priority mutexes */ 275 TAILQ_FOREACH(m, &curthread->pri_mutexq, m_qe) { 276 /* clear another thread locked us */ 277 _thr_umtx_init(&m->m_lock); 278 TAILQ_INIT(&m->m_queue); 279 } 280 #endif 281 } 282 283 int 284 _pthread_mutex_destroy(pthread_mutex_t *mutex) 285 { 286 struct pthread *curthread = _get_curthread(); 287 pthread_mutex_t m; 288 int ret = 0; 289 290 if (mutex == NULL || *mutex == NULL) 291 ret = EINVAL; 292 else { 293 /* 294 * Try to lock the mutex structure, we only need to 295 * try once, if failed, the mutex is in used. 296 */ 297 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock); 298 if (ret) 299 return (ret); 300 301 /* 302 * Check mutex other fields to see if this mutex is 303 * in use. Mostly for prority mutex types, or there 304 * are condition variables referencing it. 305 */ 306 if (((*mutex)->m_owner != NULL) || 307 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || 308 ((*mutex)->m_refcount != 0)) { 309 THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock); 310 ret = EBUSY; 311 } else { 312 /* 313 * Save a pointer to the mutex so it can be free'd 314 * and set the caller's pointer to NULL: 315 */ 316 m = *mutex; 317 *mutex = NULL; 318 319 /* Unlock the mutex structure: */ 320 _thr_umtx_unlock(&m->m_lock, curthread->tid); 321 322 /* 323 * Free the memory allocated for the mutex 324 * structure: 325 */ 326 MUTEX_ASSERT_NOT_OWNED(m); 327 MUTEX_DESTROY(m); 328 } 329 } 330 331 /* Return the completion status: */ 332 return (ret); 333 } 334 335 static int 336 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) 337 { 338 int ret = 0; 339 340 THR_ASSERT((mutex != NULL) && (*mutex != NULL), 341 "Uninitialized mutex in mutex_trylock_common"); 342 343 /* Short cut for simple mutex. */ 344 if ((*mutex)->m_protocol == PTHREAD_PRIO_NONE) { 345 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock); 346 if (ret == 0) { 347 (*mutex)->m_owner = curthread; 348 /* Add to the list of owned mutexes: */ 349 MUTEX_ASSERT_NOT_OWNED(*mutex); 350 TAILQ_INSERT_TAIL(&curthread->mutexq, 351 (*mutex), m_qe); 352 } else if ((*mutex)->m_owner == curthread) { 353 ret = mutex_self_trylock(curthread, *mutex); 354 } /* else {} */ 355 356 return (ret); 357 } 358 359 /* Code for priority mutex */ 360 361 /* Lock the mutex structure: */ 362 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock); 363 364 /* 365 * If the mutex was statically allocated, properly 366 * initialize the tail queue. 367 */ 368 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { 369 TAILQ_INIT(&(*mutex)->m_queue); 370 MUTEX_INIT_LINK(*mutex); 371 (*mutex)->m_flags |= MUTEX_FLAGS_INITED; 372 } 373 374 /* Process according to mutex type: */ 375 switch ((*mutex)->m_protocol) { 376 /* POSIX priority inheritence mutex: */ 377 case PTHREAD_PRIO_INHERIT: 378 /* Check if this mutex is not locked: */ 379 if ((*mutex)->m_owner == NULL) { 380 /* Lock the mutex for the running thread: */ 381 (*mutex)->m_owner = curthread; 382 383 THR_LOCK(curthread); 384 /* Track number of priority mutexes owned: */ 385 curthread->priority_mutex_count++; 386 387 /* 388 * The mutex takes on the attributes of the 389 * running thread when there are no waiters. 390 */ 391 (*mutex)->m_prio = curthread->active_priority; 392 (*mutex)->m_saved_prio = 393 curthread->inherited_priority; 394 curthread->inherited_priority = (*mutex)->m_prio; 395 THR_UNLOCK(curthread); 396 397 /* Add to the list of owned mutexes: */ 398 MUTEX_ASSERT_NOT_OWNED(*mutex); 399 TAILQ_INSERT_TAIL(&curthread->pri_mutexq, 400 (*mutex), m_qe); 401 } else if ((*mutex)->m_owner == curthread) 402 ret = mutex_self_trylock(curthread, *mutex); 403 else 404 /* Return a busy error: */ 405 ret = EBUSY; 406 break; 407 408 /* POSIX priority protection mutex: */ 409 case PTHREAD_PRIO_PROTECT: 410 /* Check for a priority ceiling violation: */ 411 if (curthread->active_priority > (*mutex)->m_prio) 412 ret = EINVAL; 413 414 /* Check if this mutex is not locked: */ 415 else if ((*mutex)->m_owner == NULL) { 416 /* Lock the mutex for the running thread: */ 417 (*mutex)->m_owner = curthread; 418 419 THR_LOCK(curthread); 420 /* Track number of priority mutexes owned: */ 421 curthread->priority_mutex_count++; 422 423 /* 424 * The running thread inherits the ceiling 425 * priority of the mutex and executes at that 426 * priority. 427 */ 428 curthread->active_priority = (*mutex)->m_prio; 429 (*mutex)->m_saved_prio = 430 curthread->inherited_priority; 431 curthread->inherited_priority = 432 (*mutex)->m_prio; 433 THR_UNLOCK(curthread); 434 /* Add to the list of owned mutexes: */ 435 MUTEX_ASSERT_NOT_OWNED(*mutex); 436 TAILQ_INSERT_TAIL(&curthread->pri_mutexq, 437 (*mutex), m_qe); 438 } else if ((*mutex)->m_owner == curthread) 439 ret = mutex_self_trylock(curthread, *mutex); 440 else 441 /* Return a busy error: */ 442 ret = EBUSY; 443 break; 444 445 /* Trap invalid mutex types: */ 446 default: 447 /* Return an invalid argument error: */ 448 ret = EINVAL; 449 break; 450 } 451 452 /* Unlock the mutex structure: */ 453 THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock); 454 455 /* Return the completion status: */ 456 return (ret); 457 } 458 459 int 460 __pthread_mutex_trylock(pthread_mutex_t *mutex) 461 { 462 struct pthread *curthread = _get_curthread(); 463 int ret = 0; 464 465 /* 466 * If the mutex is statically initialized, perform the dynamic 467 * initialization: 468 */ 469 if ((*mutex != NULL) || 470 ((ret = init_static(curthread, mutex)) == 0)) 471 ret = mutex_trylock_common(curthread, mutex); 472 473 return (ret); 474 } 475 476 int 477 _pthread_mutex_trylock(pthread_mutex_t *mutex) 478 { 479 struct pthread *curthread = _get_curthread(); 480 int ret = 0; 481 482 /* 483 * If the mutex is statically initialized, perform the dynamic 484 * initialization marking the mutex private (delete safe): 485 */ 486 if ((*mutex != NULL) || 487 ((ret = init_static_private(curthread, mutex)) == 0)) 488 ret = mutex_trylock_common(curthread, mutex); 489 490 return (ret); 491 } 492 493 static int 494 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m, 495 const struct timespec * abstime) 496 { 497 struct timespec ts, ts2; 498 long cycle; 499 int ret = 0; 500 501 THR_ASSERT((m != NULL) && (*m != NULL), 502 "Uninitialized mutex in mutex_lock_common"); 503 504 if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 505 abstime->tv_nsec >= 1000000000)) 506 return (EINVAL); 507 508 /* Short cut for simple mutex. */ 509 510 if ((*m)->m_protocol == PTHREAD_PRIO_NONE) { 511 /* Default POSIX mutex: */ 512 ret = THR_UMTX_TRYLOCK(curthread, &(*m)->m_lock); 513 if (ret == 0) { 514 (*m)->m_owner = curthread; 515 /* Add to the list of owned mutexes: */ 516 MUTEX_ASSERT_NOT_OWNED(*m); 517 TAILQ_INSERT_TAIL(&curthread->mutexq, 518 (*m), m_qe); 519 } else if ((*m)->m_owner == curthread) { 520 ret = mutex_self_lock(curthread, *m, abstime); 521 } else { 522 if (abstime == NULL) { 523 THR_UMTX_LOCK(curthread, &(*m)->m_lock); 524 ret = 0; 525 } else { 526 clock_gettime(CLOCK_REALTIME, &ts); 527 TIMESPEC_SUB(&ts2, abstime, &ts); 528 ret = THR_UMTX_TIMEDLOCK(curthread, 529 &(*m)->m_lock, &ts2); 530 /* 531 * Timed out wait is not restarted if 532 * it was interrupted, not worth to do it. 533 */ 534 if (ret == EINTR) 535 ret = ETIMEDOUT; 536 } 537 if (ret == 0) { 538 (*m)->m_owner = curthread; 539 /* Add to the list of owned mutexes: */ 540 MUTEX_ASSERT_NOT_OWNED(*m); 541 TAILQ_INSERT_TAIL(&curthread->mutexq, 542 (*m), m_qe); 543 } 544 } 545 return (ret); 546 } 547 548 /* Code for priority mutex */ 549 550 /* 551 * Enter a loop waiting to become the mutex owner. We need a 552 * loop in case the waiting thread is interrupted by a signal 553 * to execute a signal handler. It is not (currently) possible 554 * to remain in the waiting queue while running a handler. 555 * Instead, the thread is interrupted and backed out of the 556 * waiting queue prior to executing the signal handler. 557 */ 558 do { 559 /* Lock the mutex structure: */ 560 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 561 562 /* 563 * If the mutex was statically allocated, properly 564 * initialize the tail queue. 565 */ 566 if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) { 567 TAILQ_INIT(&(*m)->m_queue); 568 (*m)->m_flags |= MUTEX_FLAGS_INITED; 569 MUTEX_INIT_LINK(*m); 570 } 571 572 /* Process according to mutex type: */ 573 switch ((*m)->m_protocol) { 574 /* POSIX priority inheritence mutex: */ 575 case PTHREAD_PRIO_INHERIT: 576 /* Check if this mutex is not locked: */ 577 if ((*m)->m_owner == NULL) { 578 /* Lock the mutex for this thread: */ 579 (*m)->m_owner = curthread; 580 581 THR_LOCK(curthread); 582 /* Track number of priority mutexes owned: */ 583 curthread->priority_mutex_count++; 584 585 /* 586 * The mutex takes on attributes of the 587 * running thread when there are no waiters. 588 * Make sure the thread's scheduling lock is 589 * held while priorities are adjusted. 590 */ 591 (*m)->m_prio = curthread->active_priority; 592 (*m)->m_saved_prio = 593 curthread->inherited_priority; 594 curthread->inherited_priority = (*m)->m_prio; 595 THR_UNLOCK(curthread); 596 597 /* Add to the list of owned mutexes: */ 598 MUTEX_ASSERT_NOT_OWNED(*m); 599 TAILQ_INSERT_TAIL(&curthread->pri_mutexq, 600 (*m), m_qe); 601 602 /* Unlock the mutex structure: */ 603 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 604 } else if ((*m)->m_owner == curthread) { 605 ret = mutex_self_lock(curthread, *m, abstime); 606 607 /* Unlock the mutex structure: */ 608 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 609 } else { 610 /* 611 * Join the queue of threads waiting to lock 612 * the mutex and save a pointer to the mutex. 613 */ 614 mutex_queue_enq(*m, curthread); 615 curthread->data.mutex = *m; 616 617 if (curthread->active_priority > (*m)->m_prio) 618 /* Adjust priorities: */ 619 mutex_priority_adjust(curthread, *m); 620 621 THR_LOCK(curthread); 622 cycle = curthread->cycle; 623 THR_UNLOCK(curthread); 624 625 /* Unlock the mutex structure: */ 626 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 627 628 clock_gettime(CLOCK_REALTIME, &ts); 629 TIMESPEC_SUB(&ts2, abstime, &ts); 630 ret = _thr_umtx_wait(&curthread->cycle, cycle, 631 &ts2); 632 if (ret == EINTR) 633 ret = 0; 634 635 if (THR_IN_MUTEXQ(curthread)) { 636 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 637 mutex_queue_remove(*m, curthread); 638 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 639 } 640 /* 641 * Only clear these after assuring the 642 * thread is dequeued. 643 */ 644 curthread->data.mutex = NULL; 645 } 646 break; 647 648 /* POSIX priority protection mutex: */ 649 case PTHREAD_PRIO_PROTECT: 650 /* Check for a priority ceiling violation: */ 651 if (curthread->active_priority > (*m)->m_prio) { 652 /* Unlock the mutex structure: */ 653 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 654 ret = EINVAL; 655 } 656 /* Check if this mutex is not locked: */ 657 else if ((*m)->m_owner == NULL) { 658 /* 659 * Lock the mutex for the running 660 * thread: 661 */ 662 (*m)->m_owner = curthread; 663 664 THR_LOCK(curthread); 665 /* Track number of priority mutexes owned: */ 666 curthread->priority_mutex_count++; 667 668 /* 669 * The running thread inherits the ceiling 670 * priority of the mutex and executes at that 671 * priority. Make sure the thread's 672 * scheduling lock is held while priorities 673 * are adjusted. 674 */ 675 curthread->active_priority = (*m)->m_prio; 676 (*m)->m_saved_prio = 677 curthread->inherited_priority; 678 curthread->inherited_priority = (*m)->m_prio; 679 THR_UNLOCK(curthread); 680 681 /* Add to the list of owned mutexes: */ 682 MUTEX_ASSERT_NOT_OWNED(*m); 683 TAILQ_INSERT_TAIL(&curthread->pri_mutexq, 684 (*m), m_qe); 685 686 /* Unlock the mutex structure: */ 687 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 688 } else if ((*m)->m_owner == curthread) { 689 ret = mutex_self_lock(curthread, *m, abstime); 690 691 /* Unlock the mutex structure: */ 692 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 693 } else { 694 /* 695 * Join the queue of threads waiting to lock 696 * the mutex and save a pointer to the mutex. 697 */ 698 mutex_queue_enq(*m, curthread); 699 curthread->data.mutex = *m; 700 701 /* Clear any previous error: */ 702 curthread->error = 0; 703 704 THR_LOCK(curthread); 705 cycle = curthread->cycle; 706 THR_UNLOCK(curthread); 707 708 /* Unlock the mutex structure: */ 709 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 710 711 clock_gettime(CLOCK_REALTIME, &ts); 712 TIMESPEC_SUB(&ts2, abstime, &ts); 713 ret = _thr_umtx_wait(&curthread->cycle, cycle, 714 &ts2); 715 if (ret == EINTR) 716 ret = 0; 717 718 curthread->data.mutex = NULL; 719 if (THR_IN_MUTEXQ(curthread)) { 720 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 721 mutex_queue_remove(*m, curthread); 722 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 723 } 724 /* 725 * Only clear these after assuring the 726 * thread is dequeued. 727 */ 728 curthread->data.mutex = NULL; 729 730 /* 731 * The threads priority may have changed while 732 * waiting for the mutex causing a ceiling 733 * violation. 734 */ 735 ret = curthread->error; 736 curthread->error = 0; 737 } 738 break; 739 740 /* Trap invalid mutex types: */ 741 default: 742 /* Unlock the mutex structure: */ 743 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 744 745 /* Return an invalid argument error: */ 746 ret = EINVAL; 747 break; 748 } 749 750 } while (((*m)->m_owner != curthread) && (ret == 0)); 751 752 /* Return the completion status: */ 753 return (ret); 754 } 755 756 int 757 __pthread_mutex_lock(pthread_mutex_t *m) 758 { 759 struct pthread *curthread; 760 int ret = 0; 761 762 _thr_check_init(); 763 764 curthread = _get_curthread(); 765 766 /* 767 * If the mutex is statically initialized, perform the dynamic 768 * initialization: 769 */ 770 if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0)) 771 ret = mutex_lock_common(curthread, m, NULL); 772 773 return (ret); 774 } 775 776 int 777 _pthread_mutex_lock(pthread_mutex_t *m) 778 { 779 struct pthread *curthread; 780 int ret = 0; 781 782 _thr_check_init(); 783 784 curthread = _get_curthread(); 785 786 /* 787 * If the mutex is statically initialized, perform the dynamic 788 * initialization marking it private (delete safe): 789 */ 790 if ((*m != NULL) || 791 ((ret = init_static_private(curthread, m)) == 0)) 792 ret = mutex_lock_common(curthread, m, NULL); 793 794 return (ret); 795 } 796 797 int 798 __pthread_mutex_timedlock(pthread_mutex_t *m, 799 const struct timespec *abs_timeout) 800 { 801 struct pthread *curthread; 802 int ret = 0; 803 804 _thr_check_init(); 805 806 curthread = _get_curthread(); 807 808 /* 809 * If the mutex is statically initialized, perform the dynamic 810 * initialization: 811 */ 812 if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0)) 813 ret = mutex_lock_common(curthread, m, abs_timeout); 814 815 return (ret); 816 } 817 818 int 819 _pthread_mutex_timedlock(pthread_mutex_t *m, 820 const struct timespec *abs_timeout) 821 { 822 struct pthread *curthread; 823 int ret = 0; 824 825 _thr_check_init(); 826 827 curthread = _get_curthread(); 828 829 /* 830 * If the mutex is statically initialized, perform the dynamic 831 * initialization marking it private (delete safe): 832 */ 833 if ((*m != NULL) || 834 ((ret = init_static_private(curthread, m)) == 0)) 835 ret = mutex_lock_common(curthread, m, abs_timeout); 836 837 return (ret); 838 } 839 840 int 841 _pthread_mutex_unlock(pthread_mutex_t *m) 842 { 843 return (mutex_unlock_common(m, /* add reference */ 0)); 844 } 845 846 int 847 _mutex_cv_unlock(pthread_mutex_t *m) 848 { 849 return (mutex_unlock_common(m, /* add reference */ 1)); 850 } 851 852 int 853 _mutex_cv_lock(pthread_mutex_t *m) 854 { 855 int ret; 856 857 if ((ret = _pthread_mutex_lock(m)) == 0) 858 (*m)->m_refcount--; 859 return (ret); 860 } 861 862 static int 863 mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m) 864 { 865 int ret; 866 867 switch (m->m_type) { 868 /* case PTHREAD_MUTEX_DEFAULT: */ 869 case PTHREAD_MUTEX_ERRORCHECK: 870 case PTHREAD_MUTEX_NORMAL: 871 ret = EBUSY; 872 break; 873 874 case PTHREAD_MUTEX_RECURSIVE: 875 /* Increment the lock count: */ 876 if (m->m_count + 1 > 0) { 877 m->m_count++; 878 ret = 0; 879 } else 880 ret = EAGAIN; 881 break; 882 883 default: 884 /* Trap invalid mutex types; */ 885 ret = EINVAL; 886 } 887 888 return (ret); 889 } 890 891 static int 892 mutex_self_lock(struct pthread *curthread, pthread_mutex_t m, 893 const struct timespec *abstime) 894 { 895 struct timespec ts1, ts2; 896 int ret; 897 898 switch (m->m_type) { 899 /* case PTHREAD_MUTEX_DEFAULT: */ 900 case PTHREAD_MUTEX_ERRORCHECK: 901 if (abstime) { 902 clock_gettime(CLOCK_REALTIME, &ts1); 903 TIMESPEC_SUB(&ts2, abstime, &ts1); 904 __sys_nanosleep(&ts2, NULL); 905 ret = ETIMEDOUT; 906 } else { 907 /* 908 * POSIX specifies that mutexes should return 909 * EDEADLK if a recursive lock is detected. 910 */ 911 ret = EDEADLK; 912 } 913 break; 914 915 case PTHREAD_MUTEX_NORMAL: 916 /* 917 * What SS2 define as a 'normal' mutex. Intentionally 918 * deadlock on attempts to get a lock you already own. 919 */ 920 ret = 0; 921 if (m->m_protocol != PTHREAD_PRIO_NONE) { 922 /* Unlock the mutex structure: */ 923 THR_LOCK_RELEASE(curthread, &m->m_lock); 924 } 925 if (abstime) { 926 clock_gettime(CLOCK_REALTIME, &ts1); 927 TIMESPEC_SUB(&ts2, abstime, &ts1); 928 __sys_nanosleep(&ts2, NULL); 929 ret = ETIMEDOUT; 930 } else { 931 ts1.tv_sec = 30; 932 ts1.tv_nsec = 0; 933 for (;;) 934 __sys_nanosleep(&ts1, NULL); 935 } 936 break; 937 938 case PTHREAD_MUTEX_RECURSIVE: 939 /* Increment the lock count: */ 940 if (m->m_count + 1 > 0) { 941 m->m_count++; 942 ret = 0; 943 } else 944 ret = EAGAIN; 945 break; 946 947 default: 948 /* Trap invalid mutex types; */ 949 ret = EINVAL; 950 } 951 952 return (ret); 953 } 954 955 static int 956 mutex_unlock_common(pthread_mutex_t *m, int add_reference) 957 { 958 struct pthread *curthread = _get_curthread(); 959 long tid = -1; 960 int ret = 0; 961 962 if (m == NULL || *m == NULL) 963 ret = EINVAL; 964 else { 965 /* Short cut for simple mutex. */ 966 967 if ((*m)->m_protocol == PTHREAD_PRIO_NONE) { 968 /* 969 * Check if the running thread is not the owner of the 970 * mutex: 971 */ 972 if (__predict_false((*m)->m_owner != curthread)) { 973 ret = EPERM; 974 } else if (__predict_false( 975 (*m)->m_type == PTHREAD_MUTEX_RECURSIVE && 976 (*m)->m_count > 0)) { 977 /* Decrement the count: */ 978 (*m)->m_count--; 979 if (add_reference) 980 (*m)->m_refcount++; 981 } else { 982 /* 983 * Clear the count in case this is a recursive 984 * mutex. 985 */ 986 (*m)->m_count = 0; 987 (*m)->m_owner = NULL; 988 /* Remove the mutex from the threads queue. */ 989 MUTEX_ASSERT_IS_OWNED(*m); 990 TAILQ_REMOVE(&curthread->mutexq, (*m), m_qe); 991 MUTEX_INIT_LINK(*m); 992 if (add_reference) 993 (*m)->m_refcount++; 994 /* 995 * Hand off the mutex to the next waiting 996 * thread. 997 */ 998 _thr_umtx_unlock(&(*m)->m_lock, curthread->tid); 999 } 1000 return (ret); 1001 } 1002 1003 /* Code for priority mutex */ 1004 1005 /* Lock the mutex structure: */ 1006 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 1007 1008 /* Process according to mutex type: */ 1009 switch ((*m)->m_protocol) { 1010 /* POSIX priority inheritence mutex: */ 1011 case PTHREAD_PRIO_INHERIT: 1012 /* 1013 * Check if the running thread is not the owner of the 1014 * mutex: 1015 */ 1016 if ((*m)->m_owner != curthread) 1017 ret = EPERM; 1018 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) && 1019 ((*m)->m_count > 0)) 1020 /* Decrement the count: */ 1021 (*m)->m_count--; 1022 else { 1023 /* 1024 * Clear the count in case this is recursive 1025 * mutex. 1026 */ 1027 (*m)->m_count = 0; 1028 1029 /* 1030 * Restore the threads inherited priority and 1031 * recompute the active priority (being careful 1032 * not to override changes in the threads base 1033 * priority subsequent to locking the mutex). 1034 */ 1035 THR_LOCK(curthread); 1036 curthread->inherited_priority = 1037 (*m)->m_saved_prio; 1038 curthread->active_priority = 1039 MAX(curthread->inherited_priority, 1040 curthread->base_priority); 1041 1042 /* 1043 * This thread now owns one less priority mutex. 1044 */ 1045 curthread->priority_mutex_count--; 1046 THR_UNLOCK(curthread); 1047 1048 /* Remove the mutex from the threads queue. */ 1049 MUTEX_ASSERT_IS_OWNED(*m); 1050 TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq, 1051 (*m), m_qe); 1052 MUTEX_INIT_LINK(*m); 1053 1054 /* 1055 * Hand off the mutex to the next waiting 1056 * thread: 1057 */ 1058 tid = mutex_handoff(curthread, *m); 1059 } 1060 break; 1061 1062 /* POSIX priority ceiling mutex: */ 1063 case PTHREAD_PRIO_PROTECT: 1064 /* 1065 * Check if the running thread is not the owner of the 1066 * mutex: 1067 */ 1068 if ((*m)->m_owner != curthread) 1069 ret = EPERM; 1070 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) && 1071 ((*m)->m_count > 0)) 1072 /* Decrement the count: */ 1073 (*m)->m_count--; 1074 else { 1075 /* 1076 * Clear the count in case this is a recursive 1077 * mutex. 1078 */ 1079 (*m)->m_count = 0; 1080 1081 /* 1082 * Restore the threads inherited priority and 1083 * recompute the active priority (being careful 1084 * not to override changes in the threads base 1085 * priority subsequent to locking the mutex). 1086 */ 1087 THR_LOCK(curthread); 1088 curthread->inherited_priority = 1089 (*m)->m_saved_prio; 1090 curthread->active_priority = 1091 MAX(curthread->inherited_priority, 1092 curthread->base_priority); 1093 1094 /* 1095 * This thread now owns one less priority mutex. 1096 */ 1097 curthread->priority_mutex_count--; 1098 THR_UNLOCK(curthread); 1099 1100 /* Remove the mutex from the threads queue. */ 1101 MUTEX_ASSERT_IS_OWNED(*m); 1102 TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq, 1103 (*m), m_qe); 1104 MUTEX_INIT_LINK(*m); 1105 1106 /* 1107 * Hand off the mutex to the next waiting 1108 * thread: 1109 */ 1110 tid = mutex_handoff(curthread, *m); 1111 } 1112 break; 1113 1114 /* Trap invalid mutex types: */ 1115 default: 1116 /* Return an invalid argument error: */ 1117 ret = EINVAL; 1118 break; 1119 } 1120 1121 if ((ret == 0) && (add_reference != 0)) 1122 /* Increment the reference count: */ 1123 (*m)->m_refcount++; 1124 1125 /* Unlock the mutex structure: */ 1126 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 1127 } 1128 1129 /* Return the completion status: */ 1130 return (ret); 1131 } 1132 1133 1134 /* 1135 * This function is called when a change in base priority occurs for 1136 * a thread that is holding or waiting for a priority protection or 1137 * inheritence mutex. A change in a threads base priority can effect 1138 * changes to active priorities of other threads and to the ordering 1139 * of mutex locking by waiting threads. 1140 * 1141 * This must be called without the target thread's scheduling lock held. 1142 */ 1143 void 1144 _mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread, 1145 int propagate_prio) 1146 { 1147 struct pthread_mutex *m; 1148 1149 /* Adjust the priorites of any owned priority mutexes: */ 1150 if (pthread->priority_mutex_count > 0) { 1151 /* 1152 * Rescan the mutexes owned by this thread and correct 1153 * their priorities to account for this threads change 1154 * in priority. This has the side effect of changing 1155 * the threads active priority. 1156 * 1157 * Be sure to lock the first mutex in the list of owned 1158 * mutexes. This acts as a barrier against another 1159 * simultaneous call to change the threads priority 1160 * and from the owning thread releasing the mutex. 1161 */ 1162 m = TAILQ_FIRST(&pthread->pri_mutexq); 1163 if (m != NULL) { 1164 THR_LOCK_ACQUIRE(curthread, &m->m_lock); 1165 /* 1166 * Make sure the thread still owns the lock. 1167 */ 1168 if (m == TAILQ_FIRST(&pthread->pri_mutexq)) 1169 mutex_rescan_owned(curthread, pthread, 1170 /* rescan all owned */ NULL); 1171 THR_LOCK_RELEASE(curthread, &m->m_lock); 1172 } 1173 } 1174 1175 /* 1176 * If this thread is waiting on a priority inheritence mutex, 1177 * check for priority adjustments. A change in priority can 1178 * also cause a ceiling violation(*) for a thread waiting on 1179 * a priority protection mutex; we don't perform the check here 1180 * as it is done in pthread_mutex_unlock. 1181 * 1182 * (*) It should be noted that a priority change to a thread 1183 * _after_ taking and owning a priority ceiling mutex 1184 * does not affect ownership of that mutex; the ceiling 1185 * priority is only checked before mutex ownership occurs. 1186 */ 1187 if (propagate_prio != 0) { 1188 /* 1189 * Lock the thread's scheduling queue. This is a bit 1190 * convoluted; the "in synchronization queue flag" can 1191 * only be cleared with both the thread's scheduling and 1192 * mutex locks held. The thread's pointer to the wanted 1193 * mutex is guaranteed to be valid during this time. 1194 */ 1195 THR_THREAD_LOCK(curthread, pthread); 1196 1197 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) || 1198 ((m = pthread->data.mutex) == NULL)) 1199 THR_THREAD_UNLOCK(curthread, pthread); 1200 else { 1201 /* 1202 * This thread is currently waiting on a mutex; unlock 1203 * the scheduling queue lock and lock the mutex. We 1204 * can't hold both at the same time because the locking 1205 * order could cause a deadlock. 1206 */ 1207 THR_THREAD_UNLOCK(curthread, pthread); 1208 THR_LOCK_ACQUIRE(curthread, &m->m_lock); 1209 1210 /* 1211 * Check to make sure this thread is still in the 1212 * same state (the lock above can yield the CPU to 1213 * another thread or the thread may be running on 1214 * another CPU). 1215 */ 1216 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) && 1217 (pthread->data.mutex == m)) { 1218 /* 1219 * Remove and reinsert this thread into 1220 * the list of waiting threads to preserve 1221 * decreasing priority order. 1222 */ 1223 mutex_queue_remove(m, pthread); 1224 mutex_queue_enq(m, pthread); 1225 1226 if (m->m_protocol == PTHREAD_PRIO_INHERIT) 1227 /* Adjust priorities: */ 1228 mutex_priority_adjust(curthread, m); 1229 } 1230 1231 /* Unlock the mutex structure: */ 1232 THR_LOCK_RELEASE(curthread, &m->m_lock); 1233 } 1234 } 1235 } 1236 1237 /* 1238 * Called when a new thread is added to the mutex waiting queue or 1239 * when a threads priority changes that is already in the mutex 1240 * waiting queue. 1241 * 1242 * This must be called with the mutex locked by the current thread. 1243 */ 1244 static void 1245 mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex) 1246 { 1247 pthread_mutex_t m = mutex; 1248 struct pthread *pthread_next, *pthread = mutex->m_owner; 1249 int done, temp_prio; 1250 1251 /* 1252 * Calculate the mutex priority as the maximum of the highest 1253 * active priority of any waiting threads and the owning threads 1254 * active priority(*). 1255 * 1256 * (*) Because the owning threads current active priority may 1257 * reflect priority inherited from this mutex (and the mutex 1258 * priority may have changed) we must recalculate the active 1259 * priority based on the threads saved inherited priority 1260 * and its base priority. 1261 */ 1262 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */ 1263 temp_prio = MAX(pthread_next->active_priority, 1264 MAX(m->m_saved_prio, pthread->base_priority)); 1265 1266 /* See if this mutex really needs adjusting: */ 1267 if (temp_prio == m->m_prio) 1268 /* No need to propagate the priority: */ 1269 return; 1270 1271 /* Set new priority of the mutex: */ 1272 m->m_prio = temp_prio; 1273 1274 /* 1275 * Don't unlock the mutex passed in as an argument. It is 1276 * expected to be locked and unlocked by the caller. 1277 */ 1278 done = 1; 1279 do { 1280 /* 1281 * Save the threads priority before rescanning the 1282 * owned mutexes: 1283 */ 1284 temp_prio = pthread->active_priority; 1285 1286 /* 1287 * Fix the priorities for all mutexes held by the owning 1288 * thread since taking this mutex. This also has a 1289 * potential side-effect of changing the threads priority. 1290 * 1291 * At this point the mutex is locked by the current thread. 1292 * The owning thread can't release the mutex until it is 1293 * unlocked, so we should be able to safely walk its list 1294 * of owned mutexes. 1295 */ 1296 mutex_rescan_owned(curthread, pthread, m); 1297 1298 /* 1299 * If this isn't the first time through the loop, 1300 * the current mutex needs to be unlocked. 1301 */ 1302 if (done == 0) 1303 THR_LOCK_RELEASE(curthread, &m->m_lock); 1304 1305 /* Assume we're done unless told otherwise: */ 1306 done = 1; 1307 1308 /* 1309 * If the thread is currently waiting on a mutex, check 1310 * to see if the threads new priority has affected the 1311 * priority of the mutex. 1312 */ 1313 if ((temp_prio != pthread->active_priority) && 1314 ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) && 1315 ((m = pthread->data.mutex) != NULL) && 1316 (m->m_protocol == PTHREAD_PRIO_INHERIT)) { 1317 /* Lock the mutex structure: */ 1318 THR_LOCK_ACQUIRE(curthread, &m->m_lock); 1319 1320 /* 1321 * Make sure the thread is still waiting on the 1322 * mutex: 1323 */ 1324 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) && 1325 (m == pthread->data.mutex)) { 1326 /* 1327 * The priority for this thread has changed. 1328 * Remove and reinsert this thread into the 1329 * list of waiting threads to preserve 1330 * decreasing priority order. 1331 */ 1332 mutex_queue_remove(m, pthread); 1333 mutex_queue_enq(m, pthread); 1334 1335 /* 1336 * Grab the waiting thread with highest 1337 * priority: 1338 */ 1339 pthread_next = TAILQ_FIRST(&m->m_queue); 1340 1341 /* 1342 * Calculate the mutex priority as the maximum 1343 * of the highest active priority of any 1344 * waiting threads and the owning threads 1345 * active priority. 1346 */ 1347 temp_prio = MAX(pthread_next->active_priority, 1348 MAX(m->m_saved_prio, 1349 m->m_owner->base_priority)); 1350 1351 if (temp_prio != m->m_prio) { 1352 /* 1353 * The priority needs to be propagated 1354 * to the mutex this thread is waiting 1355 * on and up to the owner of that mutex. 1356 */ 1357 m->m_prio = temp_prio; 1358 pthread = m->m_owner; 1359 1360 /* We're not done yet: */ 1361 done = 0; 1362 } 1363 } 1364 /* Only release the mutex if we're done: */ 1365 if (done != 0) 1366 THR_LOCK_RELEASE(curthread, &m->m_lock); 1367 } 1368 } while (done == 0); 1369 } 1370 1371 static void 1372 mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread, 1373 struct pthread_mutex *mutex) 1374 { 1375 struct pthread_mutex *m; 1376 struct pthread *pthread_next; 1377 int active_prio, inherited_prio; 1378 1379 /* 1380 * Start walking the mutexes the thread has taken since 1381 * taking this mutex. 1382 */ 1383 if (mutex == NULL) { 1384 /* 1385 * A null mutex means start at the beginning of the owned 1386 * mutex list. 1387 */ 1388 m = TAILQ_FIRST(&pthread->pri_mutexq); 1389 1390 /* There is no inherited priority yet. */ 1391 inherited_prio = 0; 1392 } else { 1393 /* 1394 * The caller wants to start after a specific mutex. It 1395 * is assumed that this mutex is a priority inheritence 1396 * mutex and that its priority has been correctly 1397 * calculated. 1398 */ 1399 m = TAILQ_NEXT(mutex, m_qe); 1400 1401 /* Start inheriting priority from the specified mutex. */ 1402 inherited_prio = mutex->m_prio; 1403 } 1404 active_prio = MAX(inherited_prio, pthread->base_priority); 1405 1406 for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) { 1407 /* 1408 * We only want to deal with priority inheritence 1409 * mutexes. This might be optimized by only placing 1410 * priority inheritence mutexes into the owned mutex 1411 * list, but it may prove to be useful having all 1412 * owned mutexes in this list. Consider a thread 1413 * exiting while holding mutexes... 1414 */ 1415 if (m->m_protocol == PTHREAD_PRIO_INHERIT) { 1416 /* 1417 * Fix the owners saved (inherited) priority to 1418 * reflect the priority of the previous mutex. 1419 */ 1420 m->m_saved_prio = inherited_prio; 1421 1422 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL) 1423 /* Recalculate the priority of the mutex: */ 1424 m->m_prio = MAX(active_prio, 1425 pthread_next->active_priority); 1426 else 1427 m->m_prio = active_prio; 1428 1429 /* Recalculate new inherited and active priorities: */ 1430 inherited_prio = m->m_prio; 1431 active_prio = MAX(m->m_prio, pthread->base_priority); 1432 } 1433 } 1434 1435 /* 1436 * Fix the threads inherited priority and recalculate its 1437 * active priority. 1438 */ 1439 pthread->inherited_priority = inherited_prio; 1440 active_prio = MAX(inherited_prio, pthread->base_priority); 1441 1442 if (active_prio != pthread->active_priority) { 1443 /* Lock the thread's scheduling queue: */ 1444 THR_THREAD_LOCK(curthread, pthread); 1445 1446 /* if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) */ 1447 if (1) { 1448 /* 1449 * This thread is not in a run queue. Just set 1450 * its active priority. 1451 */ 1452 pthread->active_priority = active_prio; 1453 } 1454 else { 1455 /* 1456 * This thread is in a run queue. Remove it from 1457 * the queue before changing its priority: 1458 */ 1459 /* THR_RUNQ_REMOVE(pthread);*/ 1460 /* 1461 * POSIX states that if the priority is being 1462 * lowered, the thread must be inserted at the 1463 * head of the queue for its priority if it owns 1464 * any priority protection or inheritence mutexes. 1465 */ 1466 if ((active_prio < pthread->active_priority) && 1467 (pthread->priority_mutex_count > 0)) { 1468 /* Set the new active priority. */ 1469 pthread->active_priority = active_prio; 1470 /* THR_RUNQ_INSERT_HEAD(pthread); */ 1471 } else { 1472 /* Set the new active priority. */ 1473 pthread->active_priority = active_prio; 1474 /* THR_RUNQ_INSERT_TAIL(pthread);*/ 1475 } 1476 } 1477 THR_THREAD_UNLOCK(curthread, pthread); 1478 } 1479 } 1480 1481 void 1482 _mutex_unlock_private(pthread_t pthread) 1483 { 1484 struct pthread_mutex *m, *m_next; 1485 1486 for (m = TAILQ_FIRST(&pthread->pri_mutexq); m != NULL; m = m_next) { 1487 m_next = TAILQ_NEXT(m, m_qe); 1488 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 1489 pthread_mutex_unlock(&m); 1490 } 1491 } 1492 1493 /* 1494 * Dequeue a waiting thread from the head of a mutex queue in descending 1495 * priority order. 1496 * 1497 * In order to properly dequeue a thread from the mutex queue and 1498 * make it runnable without the possibility of errant wakeups, it 1499 * is necessary to lock the thread's scheduling queue while also 1500 * holding the mutex lock. 1501 */ 1502 static long 1503 mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex) 1504 { 1505 struct pthread *pthread; 1506 long tid = -1; 1507 1508 /* Keep dequeueing until we find a valid thread: */ 1509 mutex->m_owner = NULL; 1510 pthread = TAILQ_FIRST(&mutex->m_queue); 1511 while (pthread != NULL) { 1512 /* Take the thread's scheduling lock: */ 1513 THR_THREAD_LOCK(curthread, pthread); 1514 1515 /* Remove the thread from the mutex queue: */ 1516 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1517 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ; 1518 1519 /* 1520 * Only exit the loop if the thread hasn't been 1521 * cancelled. 1522 */ 1523 switch (mutex->m_protocol) { 1524 case PTHREAD_PRIO_NONE: 1525 /* 1526 * Assign the new owner and add the mutex to the 1527 * thread's list of owned mutexes. 1528 */ 1529 mutex->m_owner = pthread; 1530 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe); 1531 break; 1532 1533 case PTHREAD_PRIO_INHERIT: 1534 /* 1535 * Assign the new owner and add the mutex to the 1536 * thread's list of owned mutexes. 1537 */ 1538 mutex->m_owner = pthread; 1539 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe); 1540 1541 /* Track number of priority mutexes owned: */ 1542 pthread->priority_mutex_count++; 1543 1544 /* 1545 * Set the priority of the mutex. Since our waiting 1546 * threads are in descending priority order, the 1547 * priority of the mutex becomes the active priority 1548 * of the thread we just dequeued. 1549 */ 1550 mutex->m_prio = pthread->active_priority; 1551 1552 /* Save the owning threads inherited priority: */ 1553 mutex->m_saved_prio = pthread->inherited_priority; 1554 1555 /* 1556 * The owning threads inherited priority now becomes 1557 * his active priority (the priority of the mutex). 1558 */ 1559 pthread->inherited_priority = mutex->m_prio; 1560 break; 1561 1562 case PTHREAD_PRIO_PROTECT: 1563 if (pthread->active_priority > mutex->m_prio) { 1564 /* 1565 * Either the mutex ceiling priority has 1566 * been lowered and/or this threads priority 1567 * has been raised subsequent to the thread 1568 * being queued on the waiting list. 1569 */ 1570 pthread->error = EINVAL; 1571 } 1572 else { 1573 /* 1574 * Assign the new owner and add the mutex 1575 * to the thread's list of owned mutexes. 1576 */ 1577 mutex->m_owner = pthread; 1578 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, 1579 mutex, m_qe); 1580 1581 /* Track number of priority mutexes owned: */ 1582 pthread->priority_mutex_count++; 1583 1584 /* 1585 * Save the owning threads inherited 1586 * priority: 1587 */ 1588 mutex->m_saved_prio = 1589 pthread->inherited_priority; 1590 1591 /* 1592 * The owning thread inherits the ceiling 1593 * priority of the mutex and executes at 1594 * that priority: 1595 */ 1596 pthread->inherited_priority = mutex->m_prio; 1597 pthread->active_priority = mutex->m_prio; 1598 1599 } 1600 break; 1601 } 1602 1603 /* Make the thread runnable and unlock the scheduling queue: */ 1604 pthread->cycle++; 1605 _thr_umtx_wake(&pthread->cycle, 1); 1606 1607 THR_THREAD_UNLOCK(curthread, pthread); 1608 if (mutex->m_owner == pthread) 1609 /* We're done; a valid owner was found. */ 1610 break; 1611 else 1612 /* Get the next thread from the waiting queue: */ 1613 pthread = TAILQ_NEXT(pthread, sqe); 1614 } 1615 1616 if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT)) 1617 /* This mutex has no priority: */ 1618 mutex->m_prio = 0; 1619 return (tid); 1620 } 1621 1622 #if 0 1623 /* 1624 * Dequeue a waiting thread from the head of a mutex queue in descending 1625 * priority order. 1626 */ 1627 static pthread_t 1628 mutex_queue_deq(struct pthread_mutex *mutex) 1629 { 1630 pthread_t pthread; 1631 1632 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { 1633 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1634 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ; 1635 } 1636 1637 return (pthread); 1638 } 1639 #endif 1640 1641 /* 1642 * Remove a waiting thread from a mutex queue in descending priority order. 1643 */ 1644 static void 1645 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) 1646 { 1647 if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) { 1648 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1649 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ; 1650 } 1651 } 1652 1653 /* 1654 * Enqueue a waiting thread to a queue in descending priority order. 1655 */ 1656 static void 1657 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) 1658 { 1659 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); 1660 1661 THR_ASSERT_NOT_IN_SYNCQ(pthread); 1662 /* 1663 * For the common case of all threads having equal priority, 1664 * we perform a quick check against the priority of the thread 1665 * at the tail of the queue. 1666 */ 1667 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) 1668 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe); 1669 else { 1670 tid = TAILQ_FIRST(&mutex->m_queue); 1671 while (pthread->active_priority <= tid->active_priority) 1672 tid = TAILQ_NEXT(tid, sqe); 1673 TAILQ_INSERT_BEFORE(tid, pthread, sqe); 1674 } 1675 pthread->sflags |= THR_FLAGS_IN_SYNCQ; 1676 } 1677