1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by John Birrell. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 35 #include <stdlib.h> 36 #include <errno.h> 37 #include <string.h> 38 #include <sys/param.h> 39 #include <sys/queue.h> 40 #include <pthread.h> 41 #include "thr_private.h" 42 43 #if defined(_PTHREADS_INVARIANTS) 44 #define MUTEX_INIT_LINK(m) do { \ 45 (m)->m_qe.tqe_prev = NULL; \ 46 (m)->m_qe.tqe_next = NULL; \ 47 } while (0) 48 #define MUTEX_ASSERT_IS_OWNED(m) do { \ 49 if ((m)->m_qe.tqe_prev == NULL) \ 50 PANIC("mutex is not on list"); \ 51 } while (0) 52 #define MUTEX_ASSERT_NOT_OWNED(m) do { \ 53 if (((m)->m_qe.tqe_prev != NULL) || \ 54 ((m)->m_qe.tqe_next != NULL)) \ 55 PANIC("mutex is on list"); \ 56 } while (0) 57 #define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \ 58 THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \ 59 "thread in syncq when it shouldn't be."); \ 60 } while (0); 61 #else 62 #define MUTEX_INIT_LINK(m) 63 #define MUTEX_ASSERT_IS_OWNED(m) 64 #define MUTEX_ASSERT_NOT_OWNED(m) 65 #define THR_ASSERT_NOT_IN_SYNCQ(thr) 66 #endif 67 68 #define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0) 69 #define MUTEX_DESTROY(m) do { \ 70 free(m); \ 71 } while (0) 72 73 74 /* 75 * Prototypes 76 */ 77 static long mutex_handoff(struct pthread *, struct pthread_mutex *); 78 static int mutex_self_trylock(struct pthread *, pthread_mutex_t); 79 static int mutex_self_lock(struct pthread *, pthread_mutex_t, 80 const struct timespec *abstime); 81 static int mutex_unlock_common(pthread_mutex_t *, int); 82 static void mutex_priority_adjust(struct pthread *, pthread_mutex_t); 83 static void mutex_rescan_owned (struct pthread *, struct pthread *, 84 struct pthread_mutex *); 85 #if 0 86 static pthread_t mutex_queue_deq(pthread_mutex_t); 87 #endif 88 static void mutex_queue_remove(pthread_mutex_t, pthread_t); 89 static void mutex_queue_enq(pthread_mutex_t, pthread_t); 90 91 __weak_reference(__pthread_mutex_init, pthread_mutex_init); 92 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 93 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 94 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 95 96 /* Single underscore versions provided for libc internal usage: */ 97 /* No difference between libc and application usage of these: */ 98 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 99 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 100 101 static int 102 mutex_init(pthread_mutex_t *mutex, 103 const pthread_mutexattr_t *mutex_attr, int private) 104 { 105 struct pthread_mutex *pmutex; 106 enum pthread_mutextype type; 107 int protocol; 108 int ceiling; 109 int flags; 110 int ret = 0; 111 112 /* Check if default mutex attributes: */ 113 if (mutex_attr == NULL || *mutex_attr == NULL) { 114 /* Default to a (error checking) POSIX mutex: */ 115 type = PTHREAD_MUTEX_ERRORCHECK; 116 protocol = PTHREAD_PRIO_NONE; 117 ceiling = THR_MAX_PRIORITY; 118 flags = 0; 119 } 120 121 /* Check mutex type: */ 122 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || 123 ((*mutex_attr)->m_type >= PTHREAD_MUTEX_TYPE_MAX)) 124 /* Return an invalid argument error: */ 125 ret = EINVAL; 126 127 /* Check mutex protocol: */ 128 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || 129 ((*mutex_attr)->m_protocol > PTHREAD_PRIO_PROTECT)) 130 /* Return an invalid argument error: */ 131 ret = EINVAL; 132 133 else { 134 /* Use the requested mutex type and protocol: */ 135 type = (*mutex_attr)->m_type; 136 protocol = (*mutex_attr)->m_protocol; 137 ceiling = (*mutex_attr)->m_ceiling; 138 flags = (*mutex_attr)->m_flags; 139 } 140 141 /* Check no errors so far: */ 142 if (ret == 0) { 143 if ((pmutex = (pthread_mutex_t) 144 malloc(sizeof(struct pthread_mutex))) == NULL) { 145 ret = ENOMEM; 146 } else { 147 _thr_umtx_init(&pmutex->m_lock); 148 /* Set the mutex flags: */ 149 pmutex->m_flags = flags; 150 151 /* Process according to mutex type: */ 152 switch (type) { 153 /* case PTHREAD_MUTEX_DEFAULT: */ 154 case PTHREAD_MUTEX_ERRORCHECK: 155 case PTHREAD_MUTEX_NORMAL: 156 /* Nothing to do here. */ 157 break; 158 159 /* Single UNIX Spec 2 recursive mutex: */ 160 case PTHREAD_MUTEX_RECURSIVE: 161 /* Reset the mutex count: */ 162 pmutex->m_count = 0; 163 break; 164 165 /* Trap invalid mutex types: */ 166 default: 167 /* Return an invalid argument error: */ 168 ret = EINVAL; 169 break; 170 } 171 if (ret == 0) { 172 /* Initialise the rest of the mutex: */ 173 TAILQ_INIT(&pmutex->m_queue); 174 pmutex->m_flags |= MUTEX_FLAGS_INITED; 175 if (private) 176 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE; 177 pmutex->m_owner = NULL; 178 pmutex->m_type = type; 179 pmutex->m_protocol = protocol; 180 pmutex->m_refcount = 0; 181 if (protocol == PTHREAD_PRIO_PROTECT) 182 pmutex->m_prio = ceiling; 183 else 184 pmutex->m_prio = -1; 185 pmutex->m_saved_prio = 0; 186 MUTEX_INIT_LINK(pmutex); 187 *mutex = pmutex; 188 } else { 189 /* Free the mutex lock structure: */ 190 MUTEX_DESTROY(pmutex); 191 *mutex = NULL; 192 } 193 } 194 } 195 /* Return the completion status: */ 196 return (ret); 197 } 198 199 static int 200 init_static(struct pthread *thread, pthread_mutex_t *mutex) 201 { 202 int ret; 203 204 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 205 206 if (*mutex == NULL) 207 ret = mutex_init(mutex, NULL, 0); 208 else 209 ret = 0; 210 211 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 212 213 return (ret); 214 } 215 216 static int 217 init_static_private(struct pthread *thread, pthread_mutex_t *mutex) 218 { 219 int ret; 220 221 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 222 223 if (*mutex == NULL) 224 ret = mutex_init(mutex, NULL, 1); 225 else 226 ret = 0; 227 228 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 229 230 return (ret); 231 } 232 233 int 234 _pthread_mutex_init(pthread_mutex_t *mutex, 235 const pthread_mutexattr_t *mutex_attr) 236 { 237 return mutex_init(mutex, mutex_attr, 1); 238 } 239 240 int 241 __pthread_mutex_init(pthread_mutex_t *mutex, 242 const pthread_mutexattr_t *mutex_attr) 243 { 244 return mutex_init(mutex, mutex_attr, 0); 245 } 246 247 int 248 _mutex_reinit(pthread_mutex_t *mutex) 249 { 250 _thr_umtx_init(&(*mutex)->m_lock); 251 TAILQ_INIT(&(*mutex)->m_queue); 252 MUTEX_INIT_LINK(*mutex); 253 (*mutex)->m_owner = NULL; 254 (*mutex)->m_count = 0; 255 (*mutex)->m_refcount = 0; 256 (*mutex)->m_prio = 0; 257 (*mutex)->m_saved_prio = 0; 258 return (0); 259 } 260 261 void 262 _mutex_fork(struct pthread *curthread) 263 { 264 struct pthread_mutex *m; 265 266 /* 267 * Fix mutex ownership for child process. 268 * note that process shared mutex should not 269 * be inherited because owner is forking thread 270 * which is in parent process, they should be 271 * removed from the owned mutex list, current, 272 * process shared mutex is not supported, so I 273 * am not worried. 274 */ 275 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) { 276 m->m_lock = (umtx_t)curthread->tid; 277 } 278 279 /* Clear contender for priority mutexes */ 280 TAILQ_FOREACH(m, &curthread->pri_mutexq, m_qe) { 281 /* clear another thread locked us */ 282 _thr_umtx_init(&m->m_lock); 283 TAILQ_INIT(&m->m_queue); 284 } 285 } 286 287 int 288 _pthread_mutex_destroy(pthread_mutex_t *mutex) 289 { 290 struct pthread *curthread = _get_curthread(); 291 pthread_mutex_t m; 292 int ret = 0; 293 294 if (mutex == NULL || *mutex == NULL) 295 ret = EINVAL; 296 else { 297 /* 298 * Try to lock the mutex structure, we only need to 299 * try once, if failed, the mutex is in used. 300 */ 301 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock); 302 if (ret) 303 return (ret); 304 305 /* 306 * Check mutex other fields to see if this mutex is 307 * in use. Mostly for prority mutex types, or there 308 * are condition variables referencing it. 309 */ 310 if (((*mutex)->m_owner != NULL) || 311 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || 312 ((*mutex)->m_refcount != 0)) { 313 THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock); 314 ret = EBUSY; 315 } else { 316 /* 317 * Save a pointer to the mutex so it can be free'd 318 * and set the caller's pointer to NULL: 319 */ 320 m = *mutex; 321 *mutex = NULL; 322 323 /* Unlock the mutex structure: */ 324 _thr_umtx_unlock(&m->m_lock, curthread->tid); 325 326 /* 327 * Free the memory allocated for the mutex 328 * structure: 329 */ 330 MUTEX_ASSERT_NOT_OWNED(m); 331 MUTEX_DESTROY(m); 332 } 333 } 334 335 /* Return the completion status: */ 336 return (ret); 337 } 338 339 static int 340 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) 341 { 342 int ret = 0; 343 344 THR_ASSERT((mutex != NULL) && (*mutex != NULL), 345 "Uninitialized mutex in mutex_trylock_common"); 346 347 /* Short cut for simple mutex. */ 348 if ((*mutex)->m_protocol == PTHREAD_PRIO_NONE) { 349 ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock); 350 if (ret == 0) { 351 (*mutex)->m_owner = curthread; 352 /* Add to the list of owned mutexes: */ 353 MUTEX_ASSERT_NOT_OWNED(*mutex); 354 TAILQ_INSERT_TAIL(&curthread->mutexq, 355 (*mutex), m_qe); 356 } else if ((*mutex)->m_owner == curthread) { 357 ret = mutex_self_trylock(curthread, *mutex); 358 } /* else {} */ 359 360 return (ret); 361 } 362 363 /* Code for priority mutex */ 364 365 /* Lock the mutex structure: */ 366 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock); 367 368 /* 369 * If the mutex was statically allocated, properly 370 * initialize the tail queue. 371 */ 372 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { 373 TAILQ_INIT(&(*mutex)->m_queue); 374 MUTEX_INIT_LINK(*mutex); 375 (*mutex)->m_flags |= MUTEX_FLAGS_INITED; 376 } 377 378 /* Process according to mutex type: */ 379 switch ((*mutex)->m_protocol) { 380 /* POSIX priority inheritence mutex: */ 381 case PTHREAD_PRIO_INHERIT: 382 /* Check if this mutex is not locked: */ 383 if ((*mutex)->m_owner == NULL) { 384 /* Lock the mutex for the running thread: */ 385 (*mutex)->m_owner = curthread; 386 387 THR_LOCK(curthread); 388 /* Track number of priority mutexes owned: */ 389 curthread->priority_mutex_count++; 390 391 /* 392 * The mutex takes on the attributes of the 393 * running thread when there are no waiters. 394 */ 395 (*mutex)->m_prio = curthread->active_priority; 396 (*mutex)->m_saved_prio = 397 curthread->inherited_priority; 398 curthread->inherited_priority = (*mutex)->m_prio; 399 THR_UNLOCK(curthread); 400 401 /* Add to the list of owned mutexes: */ 402 MUTEX_ASSERT_NOT_OWNED(*mutex); 403 TAILQ_INSERT_TAIL(&curthread->pri_mutexq, 404 (*mutex), m_qe); 405 } else if ((*mutex)->m_owner == curthread) 406 ret = mutex_self_trylock(curthread, *mutex); 407 else 408 /* Return a busy error: */ 409 ret = EBUSY; 410 break; 411 412 /* POSIX priority protection mutex: */ 413 case PTHREAD_PRIO_PROTECT: 414 /* Check for a priority ceiling violation: */ 415 if (curthread->active_priority > (*mutex)->m_prio) 416 ret = EINVAL; 417 418 /* Check if this mutex is not locked: */ 419 else if ((*mutex)->m_owner == NULL) { 420 /* Lock the mutex for the running thread: */ 421 (*mutex)->m_owner = curthread; 422 423 THR_LOCK(curthread); 424 /* Track number of priority mutexes owned: */ 425 curthread->priority_mutex_count++; 426 427 /* 428 * The running thread inherits the ceiling 429 * priority of the mutex and executes at that 430 * priority. 431 */ 432 curthread->active_priority = (*mutex)->m_prio; 433 (*mutex)->m_saved_prio = 434 curthread->inherited_priority; 435 curthread->inherited_priority = 436 (*mutex)->m_prio; 437 THR_UNLOCK(curthread); 438 /* Add to the list of owned mutexes: */ 439 MUTEX_ASSERT_NOT_OWNED(*mutex); 440 TAILQ_INSERT_TAIL(&curthread->pri_mutexq, 441 (*mutex), m_qe); 442 } else if ((*mutex)->m_owner == curthread) 443 ret = mutex_self_trylock(curthread, *mutex); 444 else 445 /* Return a busy error: */ 446 ret = EBUSY; 447 break; 448 449 /* Trap invalid mutex types: */ 450 default: 451 /* Return an invalid argument error: */ 452 ret = EINVAL; 453 break; 454 } 455 456 /* Unlock the mutex structure: */ 457 THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock); 458 459 /* Return the completion status: */ 460 return (ret); 461 } 462 463 int 464 __pthread_mutex_trylock(pthread_mutex_t *mutex) 465 { 466 struct pthread *curthread = _get_curthread(); 467 int ret = 0; 468 469 /* 470 * If the mutex is statically initialized, perform the dynamic 471 * initialization: 472 */ 473 if ((*mutex != NULL) || 474 ((ret = init_static(curthread, mutex)) == 0)) 475 ret = mutex_trylock_common(curthread, mutex); 476 477 return (ret); 478 } 479 480 int 481 _pthread_mutex_trylock(pthread_mutex_t *mutex) 482 { 483 struct pthread *curthread = _get_curthread(); 484 int ret = 0; 485 486 /* 487 * If the mutex is statically initialized, perform the dynamic 488 * initialization marking the mutex private (delete safe): 489 */ 490 if ((*mutex != NULL) || 491 ((ret = init_static_private(curthread, mutex)) == 0)) 492 ret = mutex_trylock_common(curthread, mutex); 493 494 return (ret); 495 } 496 497 static int 498 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m, 499 const struct timespec * abstime) 500 { 501 struct timespec ts, ts2; 502 long cycle; 503 int ret = 0; 504 505 THR_ASSERT((m != NULL) && (*m != NULL), 506 "Uninitialized mutex in mutex_lock_common"); 507 508 if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 509 abstime->tv_nsec >= 1000000000)) 510 return (EINVAL); 511 512 /* Short cut for simple mutex. */ 513 514 if ((*m)->m_protocol == PTHREAD_PRIO_NONE) { 515 /* Default POSIX mutex: */ 516 ret = THR_UMTX_TRYLOCK(curthread, &(*m)->m_lock); 517 if (ret == 0) { 518 (*m)->m_owner = curthread; 519 /* Add to the list of owned mutexes: */ 520 MUTEX_ASSERT_NOT_OWNED(*m); 521 TAILQ_INSERT_TAIL(&curthread->mutexq, 522 (*m), m_qe); 523 } else if ((*m)->m_owner == curthread) { 524 ret = mutex_self_lock(curthread, *m, abstime); 525 } else { 526 if (abstime == NULL) { 527 THR_UMTX_LOCK(curthread, &(*m)->m_lock); 528 ret = 0; 529 } else { 530 clock_gettime(CLOCK_REALTIME, &ts); 531 TIMESPEC_SUB(&ts2, abstime, &ts); 532 ret = THR_UMTX_TIMEDLOCK(curthread, 533 &(*m)->m_lock, &ts2); 534 /* 535 * Timed out wait is not restarted if 536 * it was interrupted, not worth to do it. 537 */ 538 if (ret == EINTR) 539 ret = ETIMEDOUT; 540 } 541 if (ret == 0) { 542 (*m)->m_owner = curthread; 543 /* Add to the list of owned mutexes: */ 544 MUTEX_ASSERT_NOT_OWNED(*m); 545 TAILQ_INSERT_TAIL(&curthread->mutexq, 546 (*m), m_qe); 547 } 548 } 549 return (ret); 550 } 551 552 /* Code for priority mutex */ 553 554 /* 555 * Enter a loop waiting to become the mutex owner. We need a 556 * loop in case the waiting thread is interrupted by a signal 557 * to execute a signal handler. It is not (currently) possible 558 * to remain in the waiting queue while running a handler. 559 * Instead, the thread is interrupted and backed out of the 560 * waiting queue prior to executing the signal handler. 561 */ 562 do { 563 /* Lock the mutex structure: */ 564 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 565 566 /* 567 * If the mutex was statically allocated, properly 568 * initialize the tail queue. 569 */ 570 if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) { 571 TAILQ_INIT(&(*m)->m_queue); 572 (*m)->m_flags |= MUTEX_FLAGS_INITED; 573 MUTEX_INIT_LINK(*m); 574 } 575 576 /* Process according to mutex type: */ 577 switch ((*m)->m_protocol) { 578 /* POSIX priority inheritence mutex: */ 579 case PTHREAD_PRIO_INHERIT: 580 /* Check if this mutex is not locked: */ 581 if ((*m)->m_owner == NULL) { 582 /* Lock the mutex for this thread: */ 583 (*m)->m_owner = curthread; 584 585 THR_LOCK(curthread); 586 /* Track number of priority mutexes owned: */ 587 curthread->priority_mutex_count++; 588 589 /* 590 * The mutex takes on attributes of the 591 * running thread when there are no waiters. 592 * Make sure the thread's scheduling lock is 593 * held while priorities are adjusted. 594 */ 595 (*m)->m_prio = curthread->active_priority; 596 (*m)->m_saved_prio = 597 curthread->inherited_priority; 598 curthread->inherited_priority = (*m)->m_prio; 599 THR_UNLOCK(curthread); 600 601 /* Add to the list of owned mutexes: */ 602 MUTEX_ASSERT_NOT_OWNED(*m); 603 TAILQ_INSERT_TAIL(&curthread->pri_mutexq, 604 (*m), m_qe); 605 606 /* Unlock the mutex structure: */ 607 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 608 } else if ((*m)->m_owner == curthread) { 609 ret = mutex_self_lock(curthread, *m, abstime); 610 611 /* Unlock the mutex structure: */ 612 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 613 } else { 614 /* 615 * Join the queue of threads waiting to lock 616 * the mutex and save a pointer to the mutex. 617 */ 618 mutex_queue_enq(*m, curthread); 619 curthread->data.mutex = *m; 620 621 if (curthread->active_priority > (*m)->m_prio) 622 /* Adjust priorities: */ 623 mutex_priority_adjust(curthread, *m); 624 625 THR_LOCK(curthread); 626 cycle = curthread->cycle; 627 THR_UNLOCK(curthread); 628 629 /* Unlock the mutex structure: */ 630 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 631 632 clock_gettime(CLOCK_REALTIME, &ts); 633 TIMESPEC_SUB(&ts2, abstime, &ts); 634 ret = _thr_umtx_wait(&curthread->cycle, cycle, 635 &ts2); 636 if (ret == EINTR) 637 ret = 0; 638 639 if (THR_IN_MUTEXQ(curthread)) { 640 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 641 mutex_queue_remove(*m, curthread); 642 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 643 } 644 /* 645 * Only clear these after assuring the 646 * thread is dequeued. 647 */ 648 curthread->data.mutex = NULL; 649 } 650 break; 651 652 /* POSIX priority protection mutex: */ 653 case PTHREAD_PRIO_PROTECT: 654 /* Check for a priority ceiling violation: */ 655 if (curthread->active_priority > (*m)->m_prio) { 656 /* Unlock the mutex structure: */ 657 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 658 ret = EINVAL; 659 } 660 /* Check if this mutex is not locked: */ 661 else if ((*m)->m_owner == NULL) { 662 /* 663 * Lock the mutex for the running 664 * thread: 665 */ 666 (*m)->m_owner = curthread; 667 668 THR_LOCK(curthread); 669 /* Track number of priority mutexes owned: */ 670 curthread->priority_mutex_count++; 671 672 /* 673 * The running thread inherits the ceiling 674 * priority of the mutex and executes at that 675 * priority. Make sure the thread's 676 * scheduling lock is held while priorities 677 * are adjusted. 678 */ 679 curthread->active_priority = (*m)->m_prio; 680 (*m)->m_saved_prio = 681 curthread->inherited_priority; 682 curthread->inherited_priority = (*m)->m_prio; 683 THR_UNLOCK(curthread); 684 685 /* Add to the list of owned mutexes: */ 686 MUTEX_ASSERT_NOT_OWNED(*m); 687 TAILQ_INSERT_TAIL(&curthread->pri_mutexq, 688 (*m), m_qe); 689 690 /* Unlock the mutex structure: */ 691 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 692 } else if ((*m)->m_owner == curthread) { 693 ret = mutex_self_lock(curthread, *m, abstime); 694 695 /* Unlock the mutex structure: */ 696 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 697 } else { 698 /* 699 * Join the queue of threads waiting to lock 700 * the mutex and save a pointer to the mutex. 701 */ 702 mutex_queue_enq(*m, curthread); 703 curthread->data.mutex = *m; 704 705 /* Clear any previous error: */ 706 curthread->error = 0; 707 708 THR_LOCK(curthread); 709 cycle = curthread->cycle; 710 THR_UNLOCK(curthread); 711 712 /* Unlock the mutex structure: */ 713 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 714 715 clock_gettime(CLOCK_REALTIME, &ts); 716 TIMESPEC_SUB(&ts2, abstime, &ts); 717 ret = _thr_umtx_wait(&curthread->cycle, cycle, 718 &ts2); 719 if (ret == EINTR) 720 ret = 0; 721 722 curthread->data.mutex = NULL; 723 if (THR_IN_MUTEXQ(curthread)) { 724 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 725 mutex_queue_remove(*m, curthread); 726 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 727 } 728 /* 729 * Only clear these after assuring the 730 * thread is dequeued. 731 */ 732 curthread->data.mutex = NULL; 733 734 /* 735 * The threads priority may have changed while 736 * waiting for the mutex causing a ceiling 737 * violation. 738 */ 739 ret = curthread->error; 740 curthread->error = 0; 741 } 742 break; 743 744 /* Trap invalid mutex types: */ 745 default: 746 /* Unlock the mutex structure: */ 747 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 748 749 /* Return an invalid argument error: */ 750 ret = EINVAL; 751 break; 752 } 753 754 } while (((*m)->m_owner != curthread) && (ret == 0)); 755 756 /* Return the completion status: */ 757 return (ret); 758 } 759 760 int 761 __pthread_mutex_lock(pthread_mutex_t *m) 762 { 763 struct pthread *curthread; 764 int ret = 0; 765 766 _thr_check_init(); 767 768 curthread = _get_curthread(); 769 770 /* 771 * If the mutex is statically initialized, perform the dynamic 772 * initialization: 773 */ 774 if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0)) 775 ret = mutex_lock_common(curthread, m, NULL); 776 777 return (ret); 778 } 779 780 int 781 _pthread_mutex_lock(pthread_mutex_t *m) 782 { 783 struct pthread *curthread; 784 int ret = 0; 785 786 _thr_check_init(); 787 788 curthread = _get_curthread(); 789 790 /* 791 * If the mutex is statically initialized, perform the dynamic 792 * initialization marking it private (delete safe): 793 */ 794 if ((*m != NULL) || 795 ((ret = init_static_private(curthread, m)) == 0)) 796 ret = mutex_lock_common(curthread, m, NULL); 797 798 return (ret); 799 } 800 801 int 802 __pthread_mutex_timedlock(pthread_mutex_t *m, 803 const struct timespec *abs_timeout) 804 { 805 struct pthread *curthread; 806 int ret = 0; 807 808 _thr_check_init(); 809 810 curthread = _get_curthread(); 811 812 /* 813 * If the mutex is statically initialized, perform the dynamic 814 * initialization: 815 */ 816 if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0)) 817 ret = mutex_lock_common(curthread, m, abs_timeout); 818 819 return (ret); 820 } 821 822 int 823 _pthread_mutex_timedlock(pthread_mutex_t *m, 824 const struct timespec *abs_timeout) 825 { 826 struct pthread *curthread; 827 int ret = 0; 828 829 _thr_check_init(); 830 831 curthread = _get_curthread(); 832 833 /* 834 * If the mutex is statically initialized, perform the dynamic 835 * initialization marking it private (delete safe): 836 */ 837 if ((*m != NULL) || 838 ((ret = init_static_private(curthread, m)) == 0)) 839 ret = mutex_lock_common(curthread, m, abs_timeout); 840 841 return (ret); 842 } 843 844 int 845 _pthread_mutex_unlock(pthread_mutex_t *m) 846 { 847 return (mutex_unlock_common(m, /* add reference */ 0)); 848 } 849 850 int 851 _mutex_cv_unlock(pthread_mutex_t *m) 852 { 853 return (mutex_unlock_common(m, /* add reference */ 1)); 854 } 855 856 int 857 _mutex_cv_lock(pthread_mutex_t *m) 858 { 859 int ret; 860 861 ret = mutex_lock_common(_get_curthread(), m, NULL); 862 if (ret == 0) 863 (*m)->m_refcount--; 864 return (ret); 865 } 866 867 static int 868 mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m) 869 { 870 int ret; 871 872 switch (m->m_type) { 873 /* case PTHREAD_MUTEX_DEFAULT: */ 874 case PTHREAD_MUTEX_ERRORCHECK: 875 case PTHREAD_MUTEX_NORMAL: 876 ret = EBUSY; 877 break; 878 879 case PTHREAD_MUTEX_RECURSIVE: 880 /* Increment the lock count: */ 881 if (m->m_count + 1 > 0) { 882 m->m_count++; 883 ret = 0; 884 } else 885 ret = EAGAIN; 886 break; 887 888 default: 889 /* Trap invalid mutex types; */ 890 ret = EINVAL; 891 } 892 893 return (ret); 894 } 895 896 static int 897 mutex_self_lock(struct pthread *curthread, pthread_mutex_t m, 898 const struct timespec *abstime) 899 { 900 struct timespec ts1, ts2; 901 int ret; 902 903 switch (m->m_type) { 904 /* case PTHREAD_MUTEX_DEFAULT: */ 905 case PTHREAD_MUTEX_ERRORCHECK: 906 if (abstime) { 907 clock_gettime(CLOCK_REALTIME, &ts1); 908 TIMESPEC_SUB(&ts2, abstime, &ts1); 909 __sys_nanosleep(&ts2, NULL); 910 ret = ETIMEDOUT; 911 } else { 912 /* 913 * POSIX specifies that mutexes should return 914 * EDEADLK if a recursive lock is detected. 915 */ 916 ret = EDEADLK; 917 } 918 break; 919 920 case PTHREAD_MUTEX_NORMAL: 921 /* 922 * What SS2 define as a 'normal' mutex. Intentionally 923 * deadlock on attempts to get a lock you already own. 924 */ 925 ret = 0; 926 if (m->m_protocol != PTHREAD_PRIO_NONE) { 927 /* Unlock the mutex structure: */ 928 THR_LOCK_RELEASE(curthread, &m->m_lock); 929 } 930 if (abstime) { 931 clock_gettime(CLOCK_REALTIME, &ts1); 932 TIMESPEC_SUB(&ts2, abstime, &ts1); 933 __sys_nanosleep(&ts2, NULL); 934 ret = ETIMEDOUT; 935 } else { 936 ts1.tv_sec = 30; 937 ts1.tv_nsec = 0; 938 for (;;) 939 __sys_nanosleep(&ts1, NULL); 940 } 941 break; 942 943 case PTHREAD_MUTEX_RECURSIVE: 944 /* Increment the lock count: */ 945 if (m->m_count + 1 > 0) { 946 m->m_count++; 947 ret = 0; 948 } else 949 ret = EAGAIN; 950 break; 951 952 default: 953 /* Trap invalid mutex types; */ 954 ret = EINVAL; 955 } 956 957 return (ret); 958 } 959 960 static int 961 mutex_unlock_common(pthread_mutex_t *m, int add_reference) 962 { 963 struct pthread *curthread = _get_curthread(); 964 long tid = -1; 965 int ret = 0; 966 967 if (m == NULL || *m == NULL) 968 ret = EINVAL; 969 else { 970 /* Short cut for simple mutex. */ 971 972 if ((*m)->m_protocol == PTHREAD_PRIO_NONE) { 973 /* 974 * Check if the running thread is not the owner of the 975 * mutex: 976 */ 977 if (__predict_false((*m)->m_owner != curthread)) { 978 ret = EPERM; 979 } else if (__predict_false( 980 (*m)->m_type == PTHREAD_MUTEX_RECURSIVE && 981 (*m)->m_count > 0)) { 982 /* Decrement the count: */ 983 (*m)->m_count--; 984 if (add_reference) 985 (*m)->m_refcount++; 986 } else { 987 /* 988 * Clear the count in case this is a recursive 989 * mutex. 990 */ 991 (*m)->m_count = 0; 992 (*m)->m_owner = NULL; 993 /* Remove the mutex from the threads queue. */ 994 MUTEX_ASSERT_IS_OWNED(*m); 995 TAILQ_REMOVE(&curthread->mutexq, (*m), m_qe); 996 MUTEX_INIT_LINK(*m); 997 if (add_reference) 998 (*m)->m_refcount++; 999 /* 1000 * Hand off the mutex to the next waiting 1001 * thread. 1002 */ 1003 _thr_umtx_unlock(&(*m)->m_lock, curthread->tid); 1004 } 1005 return (ret); 1006 } 1007 1008 /* Code for priority mutex */ 1009 1010 /* Lock the mutex structure: */ 1011 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 1012 1013 /* Process according to mutex type: */ 1014 switch ((*m)->m_protocol) { 1015 /* POSIX priority inheritence mutex: */ 1016 case PTHREAD_PRIO_INHERIT: 1017 /* 1018 * Check if the running thread is not the owner of the 1019 * mutex: 1020 */ 1021 if ((*m)->m_owner != curthread) 1022 ret = EPERM; 1023 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) && 1024 ((*m)->m_count > 0)) 1025 /* Decrement the count: */ 1026 (*m)->m_count--; 1027 else { 1028 /* 1029 * Clear the count in case this is recursive 1030 * mutex. 1031 */ 1032 (*m)->m_count = 0; 1033 1034 /* 1035 * Restore the threads inherited priority and 1036 * recompute the active priority (being careful 1037 * not to override changes in the threads base 1038 * priority subsequent to locking the mutex). 1039 */ 1040 THR_LOCK(curthread); 1041 curthread->inherited_priority = 1042 (*m)->m_saved_prio; 1043 curthread->active_priority = 1044 MAX(curthread->inherited_priority, 1045 curthread->base_priority); 1046 1047 /* 1048 * This thread now owns one less priority mutex. 1049 */ 1050 curthread->priority_mutex_count--; 1051 THR_UNLOCK(curthread); 1052 1053 /* Remove the mutex from the threads queue. */ 1054 MUTEX_ASSERT_IS_OWNED(*m); 1055 TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq, 1056 (*m), m_qe); 1057 MUTEX_INIT_LINK(*m); 1058 1059 /* 1060 * Hand off the mutex to the next waiting 1061 * thread: 1062 */ 1063 tid = mutex_handoff(curthread, *m); 1064 } 1065 break; 1066 1067 /* POSIX priority ceiling mutex: */ 1068 case PTHREAD_PRIO_PROTECT: 1069 /* 1070 * Check if the running thread is not the owner of the 1071 * mutex: 1072 */ 1073 if ((*m)->m_owner != curthread) 1074 ret = EPERM; 1075 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) && 1076 ((*m)->m_count > 0)) 1077 /* Decrement the count: */ 1078 (*m)->m_count--; 1079 else { 1080 /* 1081 * Clear the count in case this is a recursive 1082 * mutex. 1083 */ 1084 (*m)->m_count = 0; 1085 1086 /* 1087 * Restore the threads inherited priority and 1088 * recompute the active priority (being careful 1089 * not to override changes in the threads base 1090 * priority subsequent to locking the mutex). 1091 */ 1092 THR_LOCK(curthread); 1093 curthread->inherited_priority = 1094 (*m)->m_saved_prio; 1095 curthread->active_priority = 1096 MAX(curthread->inherited_priority, 1097 curthread->base_priority); 1098 1099 /* 1100 * This thread now owns one less priority mutex. 1101 */ 1102 curthread->priority_mutex_count--; 1103 THR_UNLOCK(curthread); 1104 1105 /* Remove the mutex from the threads queue. */ 1106 MUTEX_ASSERT_IS_OWNED(*m); 1107 TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq, 1108 (*m), m_qe); 1109 MUTEX_INIT_LINK(*m); 1110 1111 /* 1112 * Hand off the mutex to the next waiting 1113 * thread: 1114 */ 1115 tid = mutex_handoff(curthread, *m); 1116 } 1117 break; 1118 1119 /* Trap invalid mutex types: */ 1120 default: 1121 /* Return an invalid argument error: */ 1122 ret = EINVAL; 1123 break; 1124 } 1125 1126 if ((ret == 0) && (add_reference != 0)) 1127 /* Increment the reference count: */ 1128 (*m)->m_refcount++; 1129 1130 /* Unlock the mutex structure: */ 1131 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 1132 } 1133 1134 /* Return the completion status: */ 1135 return (ret); 1136 } 1137 1138 1139 /* 1140 * This function is called when a change in base priority occurs for 1141 * a thread that is holding or waiting for a priority protection or 1142 * inheritence mutex. A change in a threads base priority can effect 1143 * changes to active priorities of other threads and to the ordering 1144 * of mutex locking by waiting threads. 1145 * 1146 * This must be called without the target thread's scheduling lock held. 1147 */ 1148 void 1149 _mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread, 1150 int propagate_prio) 1151 { 1152 struct pthread_mutex *m; 1153 1154 /* Adjust the priorites of any owned priority mutexes: */ 1155 if (pthread->priority_mutex_count > 0) { 1156 /* 1157 * Rescan the mutexes owned by this thread and correct 1158 * their priorities to account for this threads change 1159 * in priority. This has the side effect of changing 1160 * the threads active priority. 1161 * 1162 * Be sure to lock the first mutex in the list of owned 1163 * mutexes. This acts as a barrier against another 1164 * simultaneous call to change the threads priority 1165 * and from the owning thread releasing the mutex. 1166 */ 1167 m = TAILQ_FIRST(&pthread->pri_mutexq); 1168 if (m != NULL) { 1169 THR_LOCK_ACQUIRE(curthread, &m->m_lock); 1170 /* 1171 * Make sure the thread still owns the lock. 1172 */ 1173 if (m == TAILQ_FIRST(&pthread->pri_mutexq)) 1174 mutex_rescan_owned(curthread, pthread, 1175 /* rescan all owned */ NULL); 1176 THR_LOCK_RELEASE(curthread, &m->m_lock); 1177 } 1178 } 1179 1180 /* 1181 * If this thread is waiting on a priority inheritence mutex, 1182 * check for priority adjustments. A change in priority can 1183 * also cause a ceiling violation(*) for a thread waiting on 1184 * a priority protection mutex; we don't perform the check here 1185 * as it is done in pthread_mutex_unlock. 1186 * 1187 * (*) It should be noted that a priority change to a thread 1188 * _after_ taking and owning a priority ceiling mutex 1189 * does not affect ownership of that mutex; the ceiling 1190 * priority is only checked before mutex ownership occurs. 1191 */ 1192 if (propagate_prio != 0) { 1193 /* 1194 * Lock the thread's scheduling queue. This is a bit 1195 * convoluted; the "in synchronization queue flag" can 1196 * only be cleared with both the thread's scheduling and 1197 * mutex locks held. The thread's pointer to the wanted 1198 * mutex is guaranteed to be valid during this time. 1199 */ 1200 THR_THREAD_LOCK(curthread, pthread); 1201 1202 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) || 1203 ((m = pthread->data.mutex) == NULL)) 1204 THR_THREAD_UNLOCK(curthread, pthread); 1205 else { 1206 /* 1207 * This thread is currently waiting on a mutex; unlock 1208 * the scheduling queue lock and lock the mutex. We 1209 * can't hold both at the same time because the locking 1210 * order could cause a deadlock. 1211 */ 1212 THR_THREAD_UNLOCK(curthread, pthread); 1213 THR_LOCK_ACQUIRE(curthread, &m->m_lock); 1214 1215 /* 1216 * Check to make sure this thread is still in the 1217 * same state (the lock above can yield the CPU to 1218 * another thread or the thread may be running on 1219 * another CPU). 1220 */ 1221 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) && 1222 (pthread->data.mutex == m)) { 1223 /* 1224 * Remove and reinsert this thread into 1225 * the list of waiting threads to preserve 1226 * decreasing priority order. 1227 */ 1228 mutex_queue_remove(m, pthread); 1229 mutex_queue_enq(m, pthread); 1230 1231 if (m->m_protocol == PTHREAD_PRIO_INHERIT) 1232 /* Adjust priorities: */ 1233 mutex_priority_adjust(curthread, m); 1234 } 1235 1236 /* Unlock the mutex structure: */ 1237 THR_LOCK_RELEASE(curthread, &m->m_lock); 1238 } 1239 } 1240 } 1241 1242 /* 1243 * Called when a new thread is added to the mutex waiting queue or 1244 * when a threads priority changes that is already in the mutex 1245 * waiting queue. 1246 * 1247 * This must be called with the mutex locked by the current thread. 1248 */ 1249 static void 1250 mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex) 1251 { 1252 pthread_mutex_t m = mutex; 1253 struct pthread *pthread_next, *pthread = mutex->m_owner; 1254 int done, temp_prio; 1255 1256 /* 1257 * Calculate the mutex priority as the maximum of the highest 1258 * active priority of any waiting threads and the owning threads 1259 * active priority(*). 1260 * 1261 * (*) Because the owning threads current active priority may 1262 * reflect priority inherited from this mutex (and the mutex 1263 * priority may have changed) we must recalculate the active 1264 * priority based on the threads saved inherited priority 1265 * and its base priority. 1266 */ 1267 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */ 1268 temp_prio = MAX(pthread_next->active_priority, 1269 MAX(m->m_saved_prio, pthread->base_priority)); 1270 1271 /* See if this mutex really needs adjusting: */ 1272 if (temp_prio == m->m_prio) 1273 /* No need to propagate the priority: */ 1274 return; 1275 1276 /* Set new priority of the mutex: */ 1277 m->m_prio = temp_prio; 1278 1279 /* 1280 * Don't unlock the mutex passed in as an argument. It is 1281 * expected to be locked and unlocked by the caller. 1282 */ 1283 done = 1; 1284 do { 1285 /* 1286 * Save the threads priority before rescanning the 1287 * owned mutexes: 1288 */ 1289 temp_prio = pthread->active_priority; 1290 1291 /* 1292 * Fix the priorities for all mutexes held by the owning 1293 * thread since taking this mutex. This also has a 1294 * potential side-effect of changing the threads priority. 1295 * 1296 * At this point the mutex is locked by the current thread. 1297 * The owning thread can't release the mutex until it is 1298 * unlocked, so we should be able to safely walk its list 1299 * of owned mutexes. 1300 */ 1301 mutex_rescan_owned(curthread, pthread, m); 1302 1303 /* 1304 * If this isn't the first time through the loop, 1305 * the current mutex needs to be unlocked. 1306 */ 1307 if (done == 0) 1308 THR_LOCK_RELEASE(curthread, &m->m_lock); 1309 1310 /* Assume we're done unless told otherwise: */ 1311 done = 1; 1312 1313 /* 1314 * If the thread is currently waiting on a mutex, check 1315 * to see if the threads new priority has affected the 1316 * priority of the mutex. 1317 */ 1318 if ((temp_prio != pthread->active_priority) && 1319 ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) && 1320 ((m = pthread->data.mutex) != NULL) && 1321 (m->m_protocol == PTHREAD_PRIO_INHERIT)) { 1322 /* Lock the mutex structure: */ 1323 THR_LOCK_ACQUIRE(curthread, &m->m_lock); 1324 1325 /* 1326 * Make sure the thread is still waiting on the 1327 * mutex: 1328 */ 1329 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) && 1330 (m == pthread->data.mutex)) { 1331 /* 1332 * The priority for this thread has changed. 1333 * Remove and reinsert this thread into the 1334 * list of waiting threads to preserve 1335 * decreasing priority order. 1336 */ 1337 mutex_queue_remove(m, pthread); 1338 mutex_queue_enq(m, pthread); 1339 1340 /* 1341 * Grab the waiting thread with highest 1342 * priority: 1343 */ 1344 pthread_next = TAILQ_FIRST(&m->m_queue); 1345 1346 /* 1347 * Calculate the mutex priority as the maximum 1348 * of the highest active priority of any 1349 * waiting threads and the owning threads 1350 * active priority. 1351 */ 1352 temp_prio = MAX(pthread_next->active_priority, 1353 MAX(m->m_saved_prio, 1354 m->m_owner->base_priority)); 1355 1356 if (temp_prio != m->m_prio) { 1357 /* 1358 * The priority needs to be propagated 1359 * to the mutex this thread is waiting 1360 * on and up to the owner of that mutex. 1361 */ 1362 m->m_prio = temp_prio; 1363 pthread = m->m_owner; 1364 1365 /* We're not done yet: */ 1366 done = 0; 1367 } 1368 } 1369 /* Only release the mutex if we're done: */ 1370 if (done != 0) 1371 THR_LOCK_RELEASE(curthread, &m->m_lock); 1372 } 1373 } while (done == 0); 1374 } 1375 1376 static void 1377 mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread, 1378 struct pthread_mutex *mutex) 1379 { 1380 struct pthread_mutex *m; 1381 struct pthread *pthread_next; 1382 int active_prio, inherited_prio; 1383 1384 /* 1385 * Start walking the mutexes the thread has taken since 1386 * taking this mutex. 1387 */ 1388 if (mutex == NULL) { 1389 /* 1390 * A null mutex means start at the beginning of the owned 1391 * mutex list. 1392 */ 1393 m = TAILQ_FIRST(&pthread->pri_mutexq); 1394 1395 /* There is no inherited priority yet. */ 1396 inherited_prio = 0; 1397 } else { 1398 /* 1399 * The caller wants to start after a specific mutex. It 1400 * is assumed that this mutex is a priority inheritence 1401 * mutex and that its priority has been correctly 1402 * calculated. 1403 */ 1404 m = TAILQ_NEXT(mutex, m_qe); 1405 1406 /* Start inheriting priority from the specified mutex. */ 1407 inherited_prio = mutex->m_prio; 1408 } 1409 active_prio = MAX(inherited_prio, pthread->base_priority); 1410 1411 for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) { 1412 /* 1413 * We only want to deal with priority inheritence 1414 * mutexes. This might be optimized by only placing 1415 * priority inheritence mutexes into the owned mutex 1416 * list, but it may prove to be useful having all 1417 * owned mutexes in this list. Consider a thread 1418 * exiting while holding mutexes... 1419 */ 1420 if (m->m_protocol == PTHREAD_PRIO_INHERIT) { 1421 /* 1422 * Fix the owners saved (inherited) priority to 1423 * reflect the priority of the previous mutex. 1424 */ 1425 m->m_saved_prio = inherited_prio; 1426 1427 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL) 1428 /* Recalculate the priority of the mutex: */ 1429 m->m_prio = MAX(active_prio, 1430 pthread_next->active_priority); 1431 else 1432 m->m_prio = active_prio; 1433 1434 /* Recalculate new inherited and active priorities: */ 1435 inherited_prio = m->m_prio; 1436 active_prio = MAX(m->m_prio, pthread->base_priority); 1437 } 1438 } 1439 1440 /* 1441 * Fix the threads inherited priority and recalculate its 1442 * active priority. 1443 */ 1444 pthread->inherited_priority = inherited_prio; 1445 active_prio = MAX(inherited_prio, pthread->base_priority); 1446 1447 if (active_prio != pthread->active_priority) { 1448 /* Lock the thread's scheduling queue: */ 1449 THR_THREAD_LOCK(curthread, pthread); 1450 1451 /* if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) */ 1452 if (1) { 1453 /* 1454 * This thread is not in a run queue. Just set 1455 * its active priority. 1456 */ 1457 pthread->active_priority = active_prio; 1458 } 1459 else { 1460 /* 1461 * This thread is in a run queue. Remove it from 1462 * the queue before changing its priority: 1463 */ 1464 /* THR_RUNQ_REMOVE(pthread);*/ 1465 /* 1466 * POSIX states that if the priority is being 1467 * lowered, the thread must be inserted at the 1468 * head of the queue for its priority if it owns 1469 * any priority protection or inheritence mutexes. 1470 */ 1471 if ((active_prio < pthread->active_priority) && 1472 (pthread->priority_mutex_count > 0)) { 1473 /* Set the new active priority. */ 1474 pthread->active_priority = active_prio; 1475 /* THR_RUNQ_INSERT_HEAD(pthread); */ 1476 } else { 1477 /* Set the new active priority. */ 1478 pthread->active_priority = active_prio; 1479 /* THR_RUNQ_INSERT_TAIL(pthread);*/ 1480 } 1481 } 1482 THR_THREAD_UNLOCK(curthread, pthread); 1483 } 1484 } 1485 1486 void 1487 _mutex_unlock_private(pthread_t pthread) 1488 { 1489 struct pthread_mutex *m, *m_next; 1490 1491 for (m = TAILQ_FIRST(&pthread->pri_mutexq); m != NULL; m = m_next) { 1492 m_next = TAILQ_NEXT(m, m_qe); 1493 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 1494 pthread_mutex_unlock(&m); 1495 } 1496 } 1497 1498 /* 1499 * Dequeue a waiting thread from the head of a mutex queue in descending 1500 * priority order. 1501 * 1502 * In order to properly dequeue a thread from the mutex queue and 1503 * make it runnable without the possibility of errant wakeups, it 1504 * is necessary to lock the thread's scheduling queue while also 1505 * holding the mutex lock. 1506 */ 1507 static long 1508 mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex) 1509 { 1510 struct pthread *pthread; 1511 long tid = -1; 1512 1513 /* Keep dequeueing until we find a valid thread: */ 1514 mutex->m_owner = NULL; 1515 pthread = TAILQ_FIRST(&mutex->m_queue); 1516 while (pthread != NULL) { 1517 /* Take the thread's scheduling lock: */ 1518 THR_THREAD_LOCK(curthread, pthread); 1519 1520 /* Remove the thread from the mutex queue: */ 1521 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1522 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ; 1523 1524 /* 1525 * Only exit the loop if the thread hasn't been 1526 * cancelled. 1527 */ 1528 switch (mutex->m_protocol) { 1529 case PTHREAD_PRIO_NONE: 1530 /* 1531 * Assign the new owner and add the mutex to the 1532 * thread's list of owned mutexes. 1533 */ 1534 mutex->m_owner = pthread; 1535 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe); 1536 break; 1537 1538 case PTHREAD_PRIO_INHERIT: 1539 /* 1540 * Assign the new owner and add the mutex to the 1541 * thread's list of owned mutexes. 1542 */ 1543 mutex->m_owner = pthread; 1544 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe); 1545 1546 /* Track number of priority mutexes owned: */ 1547 pthread->priority_mutex_count++; 1548 1549 /* 1550 * Set the priority of the mutex. Since our waiting 1551 * threads are in descending priority order, the 1552 * priority of the mutex becomes the active priority 1553 * of the thread we just dequeued. 1554 */ 1555 mutex->m_prio = pthread->active_priority; 1556 1557 /* Save the owning threads inherited priority: */ 1558 mutex->m_saved_prio = pthread->inherited_priority; 1559 1560 /* 1561 * The owning threads inherited priority now becomes 1562 * his active priority (the priority of the mutex). 1563 */ 1564 pthread->inherited_priority = mutex->m_prio; 1565 break; 1566 1567 case PTHREAD_PRIO_PROTECT: 1568 if (pthread->active_priority > mutex->m_prio) { 1569 /* 1570 * Either the mutex ceiling priority has 1571 * been lowered and/or this threads priority 1572 * has been raised subsequent to the thread 1573 * being queued on the waiting list. 1574 */ 1575 pthread->error = EINVAL; 1576 } 1577 else { 1578 /* 1579 * Assign the new owner and add the mutex 1580 * to the thread's list of owned mutexes. 1581 */ 1582 mutex->m_owner = pthread; 1583 TAILQ_INSERT_TAIL(&pthread->pri_mutexq, 1584 mutex, m_qe); 1585 1586 /* Track number of priority mutexes owned: */ 1587 pthread->priority_mutex_count++; 1588 1589 /* 1590 * Save the owning threads inherited 1591 * priority: 1592 */ 1593 mutex->m_saved_prio = 1594 pthread->inherited_priority; 1595 1596 /* 1597 * The owning thread inherits the ceiling 1598 * priority of the mutex and executes at 1599 * that priority: 1600 */ 1601 pthread->inherited_priority = mutex->m_prio; 1602 pthread->active_priority = mutex->m_prio; 1603 1604 } 1605 break; 1606 } 1607 1608 /* Make the thread runnable and unlock the scheduling queue: */ 1609 pthread->cycle++; 1610 _thr_umtx_wake(&pthread->cycle, 1); 1611 1612 THR_THREAD_UNLOCK(curthread, pthread); 1613 if (mutex->m_owner == pthread) 1614 /* We're done; a valid owner was found. */ 1615 break; 1616 else 1617 /* Get the next thread from the waiting queue: */ 1618 pthread = TAILQ_NEXT(pthread, sqe); 1619 } 1620 1621 if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT)) 1622 /* This mutex has no priority: */ 1623 mutex->m_prio = 0; 1624 return (tid); 1625 } 1626 1627 #if 0 1628 /* 1629 * Dequeue a waiting thread from the head of a mutex queue in descending 1630 * priority order. 1631 */ 1632 static pthread_t 1633 mutex_queue_deq(struct pthread_mutex *mutex) 1634 { 1635 pthread_t pthread; 1636 1637 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { 1638 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1639 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ; 1640 } 1641 1642 return (pthread); 1643 } 1644 #endif 1645 1646 /* 1647 * Remove a waiting thread from a mutex queue in descending priority order. 1648 */ 1649 static void 1650 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) 1651 { 1652 if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) { 1653 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1654 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ; 1655 } 1656 } 1657 1658 /* 1659 * Enqueue a waiting thread to a queue in descending priority order. 1660 */ 1661 static void 1662 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) 1663 { 1664 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); 1665 1666 THR_ASSERT_NOT_IN_SYNCQ(pthread); 1667 /* 1668 * For the common case of all threads having equal priority, 1669 * we perform a quick check against the priority of the thread 1670 * at the tail of the queue. 1671 */ 1672 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) 1673 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe); 1674 else { 1675 tid = TAILQ_FIRST(&mutex->m_queue); 1676 while (pthread->active_priority <= tid->active_priority) 1677 tid = TAILQ_NEXT(tid, sqe); 1678 TAILQ_INSERT_BEFORE(tid, pthread, sqe); 1679 } 1680 pthread->sflags |= THR_FLAGS_IN_SYNCQ; 1681 } 1682