1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by John Birrell. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD$ 34 */ 35 36 #include "namespace.h" 37 #include <stdlib.h> 38 #include <errno.h> 39 #include <string.h> 40 #include <sys/param.h> 41 #include <sys/queue.h> 42 #include <pthread.h> 43 #include "un-namespace.h" 44 45 #include "thr_private.h" 46 47 #if defined(_PTHREADS_INVARIANTS) 48 #define MUTEX_INIT_LINK(m) do { \ 49 (m)->m_qe.tqe_prev = NULL; \ 50 (m)->m_qe.tqe_next = NULL; \ 51 } while (0) 52 #define MUTEX_ASSERT_IS_OWNED(m) do { \ 53 if ((m)->m_qe.tqe_prev == NULL) \ 54 PANIC("mutex is not on list"); \ 55 } while (0) 56 #define MUTEX_ASSERT_NOT_OWNED(m) do { \ 57 if (((m)->m_qe.tqe_prev != NULL) || \ 58 ((m)->m_qe.tqe_next != NULL)) \ 59 PANIC("mutex is on list"); \ 60 } while (0) 61 #else 62 #define MUTEX_INIT_LINK(m) 63 #define MUTEX_ASSERT_IS_OWNED(m) 64 #define MUTEX_ASSERT_NOT_OWNED(m) 65 #endif 66 67 /* 68 * For adaptive mutexes, how many times to spin doing trylock2 69 * before entering the kernel to block 70 */ 71 #define MUTEX_ADAPTIVE_SPINS 200 72 73 /* 74 * Prototypes 75 */ 76 int __pthread_mutex_init(pthread_mutex_t *mutex, 77 const pthread_mutexattr_t *mutex_attr); 78 int __pthread_mutex_trylock(pthread_mutex_t *mutex); 79 int __pthread_mutex_lock(pthread_mutex_t *mutex); 80 int __pthread_mutex_timedlock(pthread_mutex_t *mutex, 81 const struct timespec *abstime); 82 83 static int mutex_self_trylock(pthread_mutex_t); 84 static int mutex_self_lock(pthread_mutex_t, 85 const struct timespec *abstime); 86 static int mutex_unlock_common(pthread_mutex_t *); 87 88 __weak_reference(__pthread_mutex_init, pthread_mutex_init); 89 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 90 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 91 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 92 93 /* Single underscore versions provided for libc internal usage: */ 94 /* No difference between libc and application usage of these: */ 95 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 96 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 97 98 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); 99 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); 100 101 static int 102 mutex_init(pthread_mutex_t *mutex, 103 const pthread_mutexattr_t *mutex_attr, int private, 104 void *(calloc_cb)(size_t, size_t)) 105 { 106 const struct pthread_mutex_attr *attr; 107 struct pthread_mutex *pmutex; 108 109 if (mutex_attr == NULL) { 110 attr = &_pthread_mutexattr_default; 111 } else { 112 attr = *mutex_attr; 113 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || 114 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) 115 return (EINVAL); 116 if (attr->m_protocol < PTHREAD_PRIO_NONE || 117 attr->m_protocol > PTHREAD_PRIO_PROTECT) 118 return (EINVAL); 119 } 120 if ((pmutex = (pthread_mutex_t) 121 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL) 122 return (ENOMEM); 123 124 pmutex->m_type = attr->m_type; 125 pmutex->m_owner = NULL; 126 pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED; 127 if (private) 128 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE; 129 pmutex->m_count = 0; 130 pmutex->m_refcount = 0; 131 MUTEX_INIT_LINK(pmutex); 132 switch(attr->m_protocol) { 133 case PTHREAD_PRIO_INHERIT: 134 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 135 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; 136 break; 137 case PTHREAD_PRIO_PROTECT: 138 pmutex->m_lock.m_owner = UMUTEX_CONTESTED; 139 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; 140 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; 141 break; 142 case PTHREAD_PRIO_NONE: 143 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 144 pmutex->m_lock.m_flags = 0; 145 } 146 *mutex = pmutex; 147 return (0); 148 } 149 150 static int 151 init_static(struct pthread *thread, pthread_mutex_t *mutex) 152 { 153 int ret; 154 155 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 156 157 if (*mutex == NULL) 158 ret = mutex_init(mutex, NULL, 0, calloc); 159 else 160 ret = 0; 161 162 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 163 164 return (ret); 165 } 166 167 static int 168 init_static_private(struct pthread *thread, pthread_mutex_t *mutex) 169 { 170 int ret; 171 172 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 173 174 if (*mutex == NULL) 175 ret = mutex_init(mutex, NULL, 1, calloc); 176 else 177 ret = 0; 178 179 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 180 181 return (ret); 182 } 183 184 static void 185 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) 186 { 187 struct pthread_mutex *m2; 188 189 m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue); 190 if (m2 != NULL) 191 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; 192 else 193 m->m_lock.m_ceilings[1] = -1; 194 } 195 196 int 197 _pthread_mutex_init(pthread_mutex_t *mutex, 198 const pthread_mutexattr_t *mutex_attr) 199 { 200 return mutex_init(mutex, mutex_attr, 1, calloc); 201 } 202 203 int 204 __pthread_mutex_init(pthread_mutex_t *mutex, 205 const pthread_mutexattr_t *mutex_attr) 206 { 207 return mutex_init(mutex, mutex_attr, 0, calloc); 208 } 209 210 /* This function is used internally by malloc. */ 211 int 212 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 213 void *(calloc_cb)(size_t, size_t)) 214 { 215 static const struct pthread_mutex_attr attr = { 216 .m_type = PTHREAD_MUTEX_NORMAL, 217 .m_protocol = PTHREAD_PRIO_NONE, 218 .m_ceiling = 0, 219 .m_flags = 0 220 }; 221 static const struct pthread_mutex_attr *pattr = &attr; 222 223 return mutex_init(mutex, (pthread_mutexattr_t *)&pattr, 0, calloc_cb); 224 } 225 226 void 227 _mutex_fork(struct pthread *curthread) 228 { 229 struct pthread_mutex *m; 230 231 /* 232 * Fix mutex ownership for child process. 233 * note that process shared mutex should not 234 * be inherited because owner is forking thread 235 * which is in parent process, they should be 236 * removed from the owned mutex list, current, 237 * process shared mutex is not supported, so I 238 * am not worried. 239 */ 240 241 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) 242 m->m_lock.m_owner = TID(curthread); 243 TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe) 244 m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED; 245 } 246 247 int 248 _pthread_mutex_destroy(pthread_mutex_t *mutex) 249 { 250 struct pthread *curthread = _get_curthread(); 251 pthread_mutex_t m; 252 uint32_t id; 253 int ret = 0; 254 255 if (__predict_false(*mutex == NULL)) 256 ret = EINVAL; 257 else { 258 id = TID(curthread); 259 260 /* 261 * Try to lock the mutex structure, we only need to 262 * try once, if failed, the mutex is in used. 263 */ 264 ret = _thr_umutex_trylock(&(*mutex)->m_lock, id); 265 if (ret) 266 return (ret); 267 m = *mutex; 268 /* 269 * Check mutex other fields to see if this mutex is 270 * in use. Mostly for prority mutex types, or there 271 * are condition variables referencing it. 272 */ 273 if (m->m_owner != NULL || m->m_refcount != 0) { 274 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 275 set_inherited_priority(curthread, m); 276 _thr_umutex_unlock(&m->m_lock, id); 277 ret = EBUSY; 278 } else { 279 /* 280 * Save a pointer to the mutex so it can be free'd 281 * and set the caller's pointer to NULL. 282 */ 283 *mutex = NULL; 284 285 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 286 set_inherited_priority(curthread, m); 287 _thr_umutex_unlock(&m->m_lock, id); 288 289 MUTEX_ASSERT_NOT_OWNED(m); 290 free(m); 291 } 292 } 293 294 return (ret); 295 } 296 297 298 #define ENQUEUE_MUTEX(curthread, m) \ 299 m->m_owner = curthread; \ 300 /* Add to the list of owned mutexes: */ \ 301 MUTEX_ASSERT_NOT_OWNED(m); \ 302 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \ 303 TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe); \ 304 else \ 305 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe) 306 307 static int 308 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) 309 { 310 struct pthread_mutex *m; 311 uint32_t id; 312 int ret; 313 314 id = TID(curthread); 315 m = *mutex; 316 ret = _thr_umutex_trylock(&m->m_lock, id); 317 if (ret == 0) { 318 ENQUEUE_MUTEX(curthread, m); 319 } else if (m->m_owner == curthread) { 320 ret = mutex_self_trylock(m); 321 } /* else {} */ 322 323 return (ret); 324 } 325 326 int 327 __pthread_mutex_trylock(pthread_mutex_t *mutex) 328 { 329 struct pthread *curthread = _get_curthread(); 330 int ret; 331 332 /* 333 * If the mutex is statically initialized, perform the dynamic 334 * initialization: 335 */ 336 if (__predict_false(*mutex == NULL)) { 337 ret = init_static(curthread, mutex); 338 if (__predict_false(ret)) 339 return (ret); 340 } 341 return (mutex_trylock_common(curthread, mutex)); 342 } 343 344 int 345 _pthread_mutex_trylock(pthread_mutex_t *mutex) 346 { 347 struct pthread *curthread = _get_curthread(); 348 int ret; 349 350 /* 351 * If the mutex is statically initialized, perform the dynamic 352 * initialization marking the mutex private (delete safe): 353 */ 354 if (__predict_false(*mutex == NULL)) { 355 ret = init_static_private(curthread, mutex); 356 if (__predict_false(ret)) 357 return (ret); 358 } 359 return (mutex_trylock_common(curthread, mutex)); 360 } 361 362 static int 363 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex, 364 const struct timespec * abstime) 365 { 366 struct timespec ts, ts2; 367 struct pthread_mutex *m; 368 uint32_t id; 369 int ret; 370 int count; 371 372 id = TID(curthread); 373 m = *mutex; 374 ret = _thr_umutex_trylock2(&m->m_lock, id); 375 if (ret == 0) { 376 ENQUEUE_MUTEX(curthread, m); 377 } else if (m->m_owner == curthread) { 378 ret = mutex_self_lock(m, abstime); 379 } else { 380 /* 381 * For adaptive mutexes, spin for a bit in the expectation 382 * that if the application requests this mutex type then 383 * the lock is likely to be released quickly and it is 384 * faster than entering the kernel 385 */ 386 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 387 goto sleep_in_kernel; 388 389 if (!_thr_is_smp) 390 goto yield_loop; 391 392 if (m->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) { 393 count = MUTEX_ADAPTIVE_SPINS; 394 395 while (count--) { 396 ret = _thr_umutex_trylock2(&m->m_lock, id); 397 if (ret == 0) 398 break; 399 CPU_SPINWAIT; 400 } 401 if (ret == 0) 402 goto done; 403 } else { 404 if (_thr_spinloops != 0) { 405 count = _thr_spinloops; 406 while (count) { 407 if (m->m_lock.m_owner == UMUTEX_UNOWNED) { 408 ret = _thr_umutex_trylock2(&m->m_lock, id); 409 if (ret == 0) 410 goto done; 411 } 412 CPU_SPINWAIT; 413 count--; 414 } 415 } 416 } 417 418 yield_loop: 419 if (_thr_yieldloops != 0) { 420 count = _thr_yieldloops; 421 while (count--) { 422 _sched_yield(); 423 ret = _thr_umutex_trylock2(&m->m_lock, id); 424 if (ret == 0) 425 goto done; 426 } 427 } 428 429 sleep_in_kernel: 430 if (abstime == NULL) { 431 ret = __thr_umutex_lock(&m->m_lock); 432 } else if (__predict_false( 433 abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 434 abstime->tv_nsec >= 1000000000)) { 435 ret = EINVAL; 436 } else { 437 clock_gettime(CLOCK_REALTIME, &ts); 438 TIMESPEC_SUB(&ts2, abstime, &ts); 439 ret = __thr_umutex_timedlock(&m->m_lock, &ts2); 440 /* 441 * Timed out wait is not restarted if 442 * it was interrupted, not worth to do it. 443 */ 444 if (ret == EINTR) 445 ret = ETIMEDOUT; 446 } 447 done: 448 if (ret == 0) 449 ENQUEUE_MUTEX(curthread, m); 450 } 451 return (ret); 452 } 453 454 int 455 __pthread_mutex_lock(pthread_mutex_t *m) 456 { 457 struct pthread *curthread; 458 int ret; 459 460 _thr_check_init(); 461 462 curthread = _get_curthread(); 463 464 /* 465 * If the mutex is statically initialized, perform the dynamic 466 * initialization: 467 */ 468 if (__predict_false(*m == NULL)) { 469 ret = init_static(curthread, m); 470 if (__predict_false(ret)) 471 return (ret); 472 } 473 return (mutex_lock_common(curthread, m, NULL)); 474 } 475 476 int 477 _pthread_mutex_lock(pthread_mutex_t *m) 478 { 479 struct pthread *curthread; 480 int ret; 481 482 _thr_check_init(); 483 484 curthread = _get_curthread(); 485 486 /* 487 * If the mutex is statically initialized, perform the dynamic 488 * initialization marking it private (delete safe): 489 */ 490 if (__predict_false(*m == NULL)) { 491 ret = init_static_private(curthread, m); 492 if (__predict_false(ret)) 493 return (ret); 494 } 495 return (mutex_lock_common(curthread, m, NULL)); 496 } 497 498 int 499 __pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime) 500 { 501 struct pthread *curthread; 502 int ret; 503 504 _thr_check_init(); 505 506 curthread = _get_curthread(); 507 508 /* 509 * If the mutex is statically initialized, perform the dynamic 510 * initialization: 511 */ 512 if (__predict_false(*m == NULL)) { 513 ret = init_static(curthread, m); 514 if (__predict_false(ret)) 515 return (ret); 516 } 517 return (mutex_lock_common(curthread, m, abstime)); 518 } 519 520 int 521 _pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime) 522 { 523 struct pthread *curthread; 524 int ret; 525 526 _thr_check_init(); 527 528 curthread = _get_curthread(); 529 530 /* 531 * If the mutex is statically initialized, perform the dynamic 532 * initialization marking it private (delete safe): 533 */ 534 if (__predict_false(*m == NULL)) { 535 ret = init_static_private(curthread, m); 536 if (__predict_false(ret)) 537 return (ret); 538 } 539 return (mutex_lock_common(curthread, m, abstime)); 540 } 541 542 int 543 _pthread_mutex_unlock(pthread_mutex_t *m) 544 { 545 return (mutex_unlock_common(m)); 546 } 547 548 int 549 _mutex_cv_lock(pthread_mutex_t *m, int count) 550 { 551 int ret; 552 553 ret = mutex_lock_common(_get_curthread(), m, NULL); 554 if (ret == 0) { 555 (*m)->m_refcount--; 556 (*m)->m_count += count; 557 } 558 return (ret); 559 } 560 561 static int 562 mutex_self_trylock(pthread_mutex_t m) 563 { 564 int ret; 565 566 switch (m->m_type) { 567 case PTHREAD_MUTEX_ERRORCHECK: 568 case PTHREAD_MUTEX_NORMAL: 569 ret = EBUSY; 570 break; 571 572 case PTHREAD_MUTEX_RECURSIVE: 573 /* Increment the lock count: */ 574 if (m->m_count + 1 > 0) { 575 m->m_count++; 576 ret = 0; 577 } else 578 ret = EAGAIN; 579 break; 580 581 default: 582 /* Trap invalid mutex types; */ 583 ret = EINVAL; 584 } 585 586 return (ret); 587 } 588 589 static int 590 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime) 591 { 592 struct timespec ts1, ts2; 593 int ret; 594 595 switch (m->m_type) { 596 case PTHREAD_MUTEX_ERRORCHECK: 597 case PTHREAD_MUTEX_ADAPTIVE_NP: 598 if (abstime) { 599 clock_gettime(CLOCK_REALTIME, &ts1); 600 TIMESPEC_SUB(&ts2, abstime, &ts1); 601 __sys_nanosleep(&ts2, NULL); 602 ret = ETIMEDOUT; 603 } else { 604 /* 605 * POSIX specifies that mutexes should return 606 * EDEADLK if a recursive lock is detected. 607 */ 608 ret = EDEADLK; 609 } 610 break; 611 612 case PTHREAD_MUTEX_NORMAL: 613 /* 614 * What SS2 define as a 'normal' mutex. Intentionally 615 * deadlock on attempts to get a lock you already own. 616 */ 617 ret = 0; 618 if (abstime) { 619 clock_gettime(CLOCK_REALTIME, &ts1); 620 TIMESPEC_SUB(&ts2, abstime, &ts1); 621 __sys_nanosleep(&ts2, NULL); 622 ret = ETIMEDOUT; 623 } else { 624 ts1.tv_sec = 30; 625 ts1.tv_nsec = 0; 626 for (;;) 627 __sys_nanosleep(&ts1, NULL); 628 } 629 break; 630 631 case PTHREAD_MUTEX_RECURSIVE: 632 /* Increment the lock count: */ 633 if (m->m_count + 1 > 0) { 634 m->m_count++; 635 ret = 0; 636 } else 637 ret = EAGAIN; 638 break; 639 640 default: 641 /* Trap invalid mutex types; */ 642 ret = EINVAL; 643 } 644 645 return (ret); 646 } 647 648 static int 649 mutex_unlock_common(pthread_mutex_t *mutex) 650 { 651 struct pthread *curthread = _get_curthread(); 652 struct pthread_mutex *m; 653 uint32_t id; 654 655 if (__predict_false((m = *mutex) == NULL)) 656 return (EINVAL); 657 658 /* 659 * Check if the running thread is not the owner of the mutex. 660 */ 661 if (__predict_false(m->m_owner != curthread)) 662 return (EPERM); 663 664 id = TID(curthread); 665 if (__predict_false( 666 m->m_type == PTHREAD_MUTEX_RECURSIVE && 667 m->m_count > 0)) { 668 m->m_count--; 669 } else { 670 m->m_owner = NULL; 671 /* Remove the mutex from the threads queue. */ 672 MUTEX_ASSERT_IS_OWNED(m); 673 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 674 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 675 else { 676 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 677 set_inherited_priority(curthread, m); 678 } 679 MUTEX_INIT_LINK(m); 680 _thr_umutex_unlock(&m->m_lock, id); 681 } 682 return (0); 683 } 684 685 int 686 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count) 687 { 688 struct pthread *curthread = _get_curthread(); 689 struct pthread_mutex *m; 690 691 if (__predict_false((m = *mutex) == NULL)) 692 return (EINVAL); 693 694 /* 695 * Check if the running thread is not the owner of the mutex. 696 */ 697 if (__predict_false(m->m_owner != curthread)) 698 return (EPERM); 699 700 /* 701 * Clear the count in case this is a recursive mutex. 702 */ 703 *count = m->m_count; 704 m->m_refcount++; 705 m->m_count = 0; 706 m->m_owner = NULL; 707 /* Remove the mutex from the threads queue. */ 708 MUTEX_ASSERT_IS_OWNED(m); 709 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 710 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 711 else { 712 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 713 set_inherited_priority(curthread, m); 714 } 715 MUTEX_INIT_LINK(m); 716 _thr_umutex_unlock(&m->m_lock, TID(curthread)); 717 return (0); 718 } 719 720 void 721 _mutex_unlock_private(pthread_t pthread) 722 { 723 struct pthread_mutex *m, *m_next; 724 725 TAILQ_FOREACH_SAFE(m, &pthread->mutexq, m_qe, m_next) { 726 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 727 _pthread_mutex_unlock(&m); 728 } 729 } 730 731 int 732 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex, 733 int *prioceiling) 734 { 735 int ret; 736 737 if (*mutex == NULL) 738 ret = EINVAL; 739 else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 740 ret = EINVAL; 741 else { 742 *prioceiling = (*mutex)->m_lock.m_ceilings[0]; 743 ret = 0; 744 } 745 746 return(ret); 747 } 748 749 int 750 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex, 751 int ceiling, int *old_ceiling) 752 { 753 struct pthread *curthread = _get_curthread(); 754 struct pthread_mutex *m, *m1, *m2; 755 int ret; 756 757 m = *mutex; 758 if (m == NULL || (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 759 return (EINVAL); 760 761 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); 762 if (ret != 0) 763 return (ret); 764 765 if (m->m_owner == curthread) { 766 MUTEX_ASSERT_IS_OWNED(m); 767 m1 = TAILQ_PREV(m, mutex_queue, m_qe); 768 m2 = TAILQ_NEXT(m, m_qe); 769 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || 770 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { 771 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 772 TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) { 773 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) { 774 TAILQ_INSERT_BEFORE(m2, m, m_qe); 775 return (0); 776 } 777 } 778 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe); 779 } 780 } 781 return (0); 782 } 783