1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by John Birrell. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD$ 34 */ 35 36 #include "namespace.h" 37 #include <stdlib.h> 38 #include <errno.h> 39 #include <string.h> 40 #include <sys/param.h> 41 #include <sys/queue.h> 42 #include <pthread.h> 43 #include "un-namespace.h" 44 45 #include "thr_private.h" 46 47 #if defined(_PTHREADS_INVARIANTS) 48 #define MUTEX_INIT_LINK(m) do { \ 49 (m)->m_qe.tqe_prev = NULL; \ 50 (m)->m_qe.tqe_next = NULL; \ 51 } while (0) 52 #define MUTEX_ASSERT_IS_OWNED(m) do { \ 53 if ((m)->m_qe.tqe_prev == NULL) \ 54 PANIC("mutex is not on list"); \ 55 } while (0) 56 #define MUTEX_ASSERT_NOT_OWNED(m) do { \ 57 if (((m)->m_qe.tqe_prev != NULL) || \ 58 ((m)->m_qe.tqe_next != NULL)) \ 59 PANIC("mutex is on list"); \ 60 } while (0) 61 #else 62 #define MUTEX_INIT_LINK(m) 63 #define MUTEX_ASSERT_IS_OWNED(m) 64 #define MUTEX_ASSERT_NOT_OWNED(m) 65 #endif 66 67 /* 68 * For adaptive mutexes, how many times to spin doing trylock2 69 * before entering the kernel to block 70 */ 71 #define MUTEX_ADAPTIVE_SPINS 200 72 73 /* 74 * Prototypes 75 */ 76 int __pthread_mutex_init(pthread_mutex_t *mutex, 77 const pthread_mutexattr_t *mutex_attr); 78 int __pthread_mutex_trylock(pthread_mutex_t *mutex); 79 int __pthread_mutex_lock(pthread_mutex_t *mutex); 80 int __pthread_mutex_timedlock(pthread_mutex_t *mutex, 81 const struct timespec *abstime); 82 83 static int mutex_self_trylock(pthread_mutex_t); 84 static int mutex_self_lock(pthread_mutex_t, 85 const struct timespec *abstime); 86 static int mutex_unlock_common(pthread_mutex_t *); 87 88 __weak_reference(__pthread_mutex_init, pthread_mutex_init); 89 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 90 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 91 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 92 93 /* Single underscore versions provided for libc internal usage: */ 94 /* No difference between libc and application usage of these: */ 95 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 96 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 97 98 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); 99 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); 100 101 static int 102 mutex_init(pthread_mutex_t *mutex, 103 const pthread_mutexattr_t *mutex_attr, int private, 104 void *(calloc_cb)(size_t, size_t)) 105 { 106 const struct pthread_mutex_attr *attr; 107 struct pthread_mutex *pmutex; 108 109 if (mutex_attr == NULL) { 110 attr = &_pthread_mutexattr_default; 111 } else { 112 attr = *mutex_attr; 113 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || 114 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) 115 return (EINVAL); 116 if (attr->m_protocol < PTHREAD_PRIO_NONE || 117 attr->m_protocol > PTHREAD_PRIO_PROTECT) 118 return (EINVAL); 119 } 120 if ((pmutex = (pthread_mutex_t) 121 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL) 122 return (ENOMEM); 123 124 pmutex->m_type = attr->m_type; 125 pmutex->m_owner = NULL; 126 pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED; 127 if (private) 128 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE; 129 pmutex->m_count = 0; 130 pmutex->m_refcount = 0; 131 MUTEX_INIT_LINK(pmutex); 132 switch(attr->m_protocol) { 133 case PTHREAD_PRIO_INHERIT: 134 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 135 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; 136 break; 137 case PTHREAD_PRIO_PROTECT: 138 pmutex->m_lock.m_owner = UMUTEX_CONTESTED; 139 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; 140 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; 141 break; 142 case PTHREAD_PRIO_NONE: 143 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 144 pmutex->m_lock.m_flags = 0; 145 } 146 *mutex = pmutex; 147 return (0); 148 } 149 150 static int 151 init_static(struct pthread *thread, pthread_mutex_t *mutex) 152 { 153 int ret; 154 155 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 156 157 if (*mutex == NULL) 158 ret = mutex_init(mutex, NULL, 0, calloc); 159 else 160 ret = 0; 161 162 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 163 164 return (ret); 165 } 166 167 static int 168 init_static_private(struct pthread *thread, pthread_mutex_t *mutex) 169 { 170 int ret; 171 172 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 173 174 if (*mutex == NULL) 175 ret = mutex_init(mutex, NULL, 1, calloc); 176 else 177 ret = 0; 178 179 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 180 181 return (ret); 182 } 183 184 static void 185 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) 186 { 187 struct pthread_mutex *m2; 188 189 m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue); 190 if (m2 != NULL) 191 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; 192 else 193 m->m_lock.m_ceilings[1] = -1; 194 } 195 196 int 197 _pthread_mutex_init(pthread_mutex_t *mutex, 198 const pthread_mutexattr_t *mutex_attr) 199 { 200 return mutex_init(mutex, mutex_attr, 1, calloc); 201 } 202 203 int 204 __pthread_mutex_init(pthread_mutex_t *mutex, 205 const pthread_mutexattr_t *mutex_attr) 206 { 207 return mutex_init(mutex, mutex_attr, 0, calloc); 208 } 209 210 /* This function is used internally by malloc. */ 211 int 212 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 213 void *(calloc_cb)(size_t, size_t)) 214 { 215 static const struct pthread_mutex_attr attr = { 216 .m_type = PTHREAD_MUTEX_NORMAL, 217 .m_protocol = PTHREAD_PRIO_NONE, 218 .m_ceiling = 0, 219 .m_flags = 0 220 }; 221 static const struct pthread_mutex_attr *pattr = &attr; 222 223 return mutex_init(mutex, (pthread_mutexattr_t *)&pattr, 0, calloc_cb); 224 } 225 226 void 227 _mutex_fork(struct pthread *curthread) 228 { 229 struct pthread_mutex *m; 230 231 /* 232 * Fix mutex ownership for child process. 233 * note that process shared mutex should not 234 * be inherited because owner is forking thread 235 * which is in parent process, they should be 236 * removed from the owned mutex list, current, 237 * process shared mutex is not supported, so I 238 * am not worried. 239 */ 240 241 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) 242 m->m_lock.m_owner = TID(curthread); 243 TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe) 244 m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED; 245 } 246 247 int 248 _pthread_mutex_destroy(pthread_mutex_t *mutex) 249 { 250 struct pthread *curthread = _get_curthread(); 251 pthread_mutex_t m; 252 uint32_t id; 253 int ret = 0; 254 255 if (__predict_false(*mutex == NULL)) 256 ret = EINVAL; 257 else { 258 id = TID(curthread); 259 260 /* 261 * Try to lock the mutex structure, we only need to 262 * try once, if failed, the mutex is in used. 263 */ 264 ret = _thr_umutex_trylock(&(*mutex)->m_lock, id); 265 if (ret) 266 return (ret); 267 m = *mutex; 268 /* 269 * Check mutex other fields to see if this mutex is 270 * in use. Mostly for prority mutex types, or there 271 * are condition variables referencing it. 272 */ 273 if (m->m_owner != NULL || m->m_refcount != 0) { 274 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 275 set_inherited_priority(curthread, m); 276 _thr_umutex_unlock(&m->m_lock, id); 277 ret = EBUSY; 278 } else { 279 /* 280 * Save a pointer to the mutex so it can be free'd 281 * and set the caller's pointer to NULL. 282 */ 283 *mutex = NULL; 284 285 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 286 set_inherited_priority(curthread, m); 287 _thr_umutex_unlock(&m->m_lock, id); 288 289 MUTEX_ASSERT_NOT_OWNED(m); 290 free(m); 291 } 292 } 293 294 return (ret); 295 } 296 297 298 #define ENQUEUE_MUTEX(curthread, m) \ 299 do { \ 300 (m)->m_owner = curthread; \ 301 /* Add to the list of owned mutexes: */ \ 302 MUTEX_ASSERT_NOT_OWNED((m)); \ 303 if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \ 304 TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\ 305 else \ 306 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\ 307 } while (0) 308 309 static int 310 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) 311 { 312 struct pthread_mutex *m; 313 uint32_t id; 314 int ret; 315 316 id = TID(curthread); 317 m = *mutex; 318 ret = _thr_umutex_trylock(&m->m_lock, id); 319 if (ret == 0) { 320 ENQUEUE_MUTEX(curthread, m); 321 } else if (m->m_owner == curthread) { 322 ret = mutex_self_trylock(m); 323 } /* else {} */ 324 325 return (ret); 326 } 327 328 int 329 __pthread_mutex_trylock(pthread_mutex_t *mutex) 330 { 331 struct pthread *curthread = _get_curthread(); 332 int ret; 333 334 /* 335 * If the mutex is statically initialized, perform the dynamic 336 * initialization: 337 */ 338 if (__predict_false(*mutex == NULL)) { 339 ret = init_static(curthread, mutex); 340 if (__predict_false(ret)) 341 return (ret); 342 } 343 return (mutex_trylock_common(curthread, mutex)); 344 } 345 346 int 347 _pthread_mutex_trylock(pthread_mutex_t *mutex) 348 { 349 struct pthread *curthread = _get_curthread(); 350 int ret; 351 352 /* 353 * If the mutex is statically initialized, perform the dynamic 354 * initialization marking the mutex private (delete safe): 355 */ 356 if (__predict_false(*mutex == NULL)) { 357 ret = init_static_private(curthread, mutex); 358 if (__predict_false(ret)) 359 return (ret); 360 } 361 return (mutex_trylock_common(curthread, mutex)); 362 } 363 364 static int 365 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex, 366 const struct timespec * abstime) 367 { 368 struct timespec ts, ts2; 369 struct pthread_mutex *m; 370 uint32_t id; 371 int ret; 372 int count; 373 374 id = TID(curthread); 375 m = *mutex; 376 ret = _thr_umutex_trylock2(&m->m_lock, id); 377 if (ret == 0) { 378 ENQUEUE_MUTEX(curthread, m); 379 } else if (m->m_owner == curthread) { 380 ret = mutex_self_lock(m, abstime); 381 } else { 382 /* 383 * For adaptive mutexes, spin for a bit in the expectation 384 * that if the application requests this mutex type then 385 * the lock is likely to be released quickly and it is 386 * faster than entering the kernel 387 */ 388 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 389 goto sleep_in_kernel; 390 391 if (!_thr_is_smp) 392 goto yield_loop; 393 394 if (m->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) { 395 count = MUTEX_ADAPTIVE_SPINS; 396 397 while (count--) { 398 ret = _thr_umutex_trylock2(&m->m_lock, id); 399 if (ret == 0) 400 break; 401 CPU_SPINWAIT; 402 } 403 if (ret == 0) 404 goto done; 405 } else { 406 if (_thr_spinloops != 0) { 407 count = _thr_spinloops; 408 while (count) { 409 if (m->m_lock.m_owner == UMUTEX_UNOWNED) { 410 ret = _thr_umutex_trylock2(&m->m_lock, id); 411 if (ret == 0) 412 goto done; 413 } 414 CPU_SPINWAIT; 415 count--; 416 } 417 } 418 } 419 420 yield_loop: 421 if (_thr_yieldloops != 0) { 422 count = _thr_yieldloops; 423 while (count--) { 424 _sched_yield(); 425 ret = _thr_umutex_trylock2(&m->m_lock, id); 426 if (ret == 0) 427 goto done; 428 } 429 } 430 431 sleep_in_kernel: 432 if (abstime == NULL) { 433 ret = __thr_umutex_lock(&m->m_lock); 434 } else if (__predict_false( 435 abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 436 abstime->tv_nsec >= 1000000000)) { 437 ret = EINVAL; 438 } else { 439 clock_gettime(CLOCK_REALTIME, &ts); 440 TIMESPEC_SUB(&ts2, abstime, &ts); 441 ret = __thr_umutex_timedlock(&m->m_lock, &ts2); 442 /* 443 * Timed out wait is not restarted if 444 * it was interrupted, not worth to do it. 445 */ 446 if (ret == EINTR) 447 ret = ETIMEDOUT; 448 } 449 done: 450 if (ret == 0) 451 ENQUEUE_MUTEX(curthread, m); 452 } 453 return (ret); 454 } 455 456 int 457 __pthread_mutex_lock(pthread_mutex_t *m) 458 { 459 struct pthread *curthread; 460 int ret; 461 462 _thr_check_init(); 463 464 curthread = _get_curthread(); 465 466 /* 467 * If the mutex is statically initialized, perform the dynamic 468 * initialization: 469 */ 470 if (__predict_false(*m == NULL)) { 471 ret = init_static(curthread, m); 472 if (__predict_false(ret)) 473 return (ret); 474 } 475 return (mutex_lock_common(curthread, m, NULL)); 476 } 477 478 int 479 _pthread_mutex_lock(pthread_mutex_t *m) 480 { 481 struct pthread *curthread; 482 int ret; 483 484 _thr_check_init(); 485 486 curthread = _get_curthread(); 487 488 /* 489 * If the mutex is statically initialized, perform the dynamic 490 * initialization marking it private (delete safe): 491 */ 492 if (__predict_false(*m == NULL)) { 493 ret = init_static_private(curthread, m); 494 if (__predict_false(ret)) 495 return (ret); 496 } 497 return (mutex_lock_common(curthread, m, NULL)); 498 } 499 500 int 501 __pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime) 502 { 503 struct pthread *curthread; 504 int ret; 505 506 _thr_check_init(); 507 508 curthread = _get_curthread(); 509 510 /* 511 * If the mutex is statically initialized, perform the dynamic 512 * initialization: 513 */ 514 if (__predict_false(*m == NULL)) { 515 ret = init_static(curthread, m); 516 if (__predict_false(ret)) 517 return (ret); 518 } 519 return (mutex_lock_common(curthread, m, abstime)); 520 } 521 522 int 523 _pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime) 524 { 525 struct pthread *curthread; 526 int ret; 527 528 _thr_check_init(); 529 530 curthread = _get_curthread(); 531 532 /* 533 * If the mutex is statically initialized, perform the dynamic 534 * initialization marking it private (delete safe): 535 */ 536 if (__predict_false(*m == NULL)) { 537 ret = init_static_private(curthread, m); 538 if (__predict_false(ret)) 539 return (ret); 540 } 541 return (mutex_lock_common(curthread, m, abstime)); 542 } 543 544 int 545 _pthread_mutex_unlock(pthread_mutex_t *m) 546 { 547 return (mutex_unlock_common(m)); 548 } 549 550 int 551 _mutex_cv_lock(pthread_mutex_t *m, int count) 552 { 553 int ret; 554 555 ret = mutex_lock_common(_get_curthread(), m, NULL); 556 if (ret == 0) { 557 (*m)->m_refcount--; 558 (*m)->m_count += count; 559 } 560 return (ret); 561 } 562 563 static int 564 mutex_self_trylock(pthread_mutex_t m) 565 { 566 int ret; 567 568 switch (m->m_type) { 569 case PTHREAD_MUTEX_ERRORCHECK: 570 case PTHREAD_MUTEX_NORMAL: 571 ret = EBUSY; 572 break; 573 574 case PTHREAD_MUTEX_RECURSIVE: 575 /* Increment the lock count: */ 576 if (m->m_count + 1 > 0) { 577 m->m_count++; 578 ret = 0; 579 } else 580 ret = EAGAIN; 581 break; 582 583 default: 584 /* Trap invalid mutex types; */ 585 ret = EINVAL; 586 } 587 588 return (ret); 589 } 590 591 static int 592 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime) 593 { 594 struct timespec ts1, ts2; 595 int ret; 596 597 switch (m->m_type) { 598 case PTHREAD_MUTEX_ERRORCHECK: 599 case PTHREAD_MUTEX_ADAPTIVE_NP: 600 if (abstime) { 601 clock_gettime(CLOCK_REALTIME, &ts1); 602 TIMESPEC_SUB(&ts2, abstime, &ts1); 603 __sys_nanosleep(&ts2, NULL); 604 ret = ETIMEDOUT; 605 } else { 606 /* 607 * POSIX specifies that mutexes should return 608 * EDEADLK if a recursive lock is detected. 609 */ 610 ret = EDEADLK; 611 } 612 break; 613 614 case PTHREAD_MUTEX_NORMAL: 615 /* 616 * What SS2 define as a 'normal' mutex. Intentionally 617 * deadlock on attempts to get a lock you already own. 618 */ 619 ret = 0; 620 if (abstime) { 621 clock_gettime(CLOCK_REALTIME, &ts1); 622 TIMESPEC_SUB(&ts2, abstime, &ts1); 623 __sys_nanosleep(&ts2, NULL); 624 ret = ETIMEDOUT; 625 } else { 626 ts1.tv_sec = 30; 627 ts1.tv_nsec = 0; 628 for (;;) 629 __sys_nanosleep(&ts1, NULL); 630 } 631 break; 632 633 case PTHREAD_MUTEX_RECURSIVE: 634 /* Increment the lock count: */ 635 if (m->m_count + 1 > 0) { 636 m->m_count++; 637 ret = 0; 638 } else 639 ret = EAGAIN; 640 break; 641 642 default: 643 /* Trap invalid mutex types; */ 644 ret = EINVAL; 645 } 646 647 return (ret); 648 } 649 650 static int 651 mutex_unlock_common(pthread_mutex_t *mutex) 652 { 653 struct pthread *curthread = _get_curthread(); 654 struct pthread_mutex *m; 655 uint32_t id; 656 657 if (__predict_false((m = *mutex) == NULL)) 658 return (EINVAL); 659 660 /* 661 * Check if the running thread is not the owner of the mutex. 662 */ 663 if (__predict_false(m->m_owner != curthread)) 664 return (EPERM); 665 666 id = TID(curthread); 667 if (__predict_false( 668 m->m_type == PTHREAD_MUTEX_RECURSIVE && 669 m->m_count > 0)) { 670 m->m_count--; 671 } else { 672 m->m_owner = NULL; 673 /* Remove the mutex from the threads queue. */ 674 MUTEX_ASSERT_IS_OWNED(m); 675 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 676 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 677 else { 678 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 679 set_inherited_priority(curthread, m); 680 } 681 MUTEX_INIT_LINK(m); 682 _thr_umutex_unlock(&m->m_lock, id); 683 } 684 return (0); 685 } 686 687 int 688 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count) 689 { 690 struct pthread *curthread = _get_curthread(); 691 struct pthread_mutex *m; 692 693 if (__predict_false((m = *mutex) == NULL)) 694 return (EINVAL); 695 696 /* 697 * Check if the running thread is not the owner of the mutex. 698 */ 699 if (__predict_false(m->m_owner != curthread)) 700 return (EPERM); 701 702 /* 703 * Clear the count in case this is a recursive mutex. 704 */ 705 *count = m->m_count; 706 m->m_refcount++; 707 m->m_count = 0; 708 m->m_owner = NULL; 709 /* Remove the mutex from the threads queue. */ 710 MUTEX_ASSERT_IS_OWNED(m); 711 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 712 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 713 else { 714 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 715 set_inherited_priority(curthread, m); 716 } 717 MUTEX_INIT_LINK(m); 718 _thr_umutex_unlock(&m->m_lock, TID(curthread)); 719 return (0); 720 } 721 722 void 723 _mutex_unlock_private(pthread_t pthread) 724 { 725 struct pthread_mutex *m, *m_next; 726 727 TAILQ_FOREACH_SAFE(m, &pthread->mutexq, m_qe, m_next) { 728 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 729 _pthread_mutex_unlock(&m); 730 } 731 } 732 733 int 734 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex, 735 int *prioceiling) 736 { 737 int ret; 738 739 if (*mutex == NULL) 740 ret = EINVAL; 741 else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 742 ret = EINVAL; 743 else { 744 *prioceiling = (*mutex)->m_lock.m_ceilings[0]; 745 ret = 0; 746 } 747 748 return(ret); 749 } 750 751 int 752 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex, 753 int ceiling, int *old_ceiling) 754 { 755 struct pthread *curthread = _get_curthread(); 756 struct pthread_mutex *m, *m1, *m2; 757 int ret; 758 759 m = *mutex; 760 if (m == NULL || (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 761 return (EINVAL); 762 763 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); 764 if (ret != 0) 765 return (ret); 766 767 if (m->m_owner == curthread) { 768 MUTEX_ASSERT_IS_OWNED(m); 769 m1 = TAILQ_PREV(m, mutex_queue, m_qe); 770 m2 = TAILQ_NEXT(m, m_qe); 771 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || 772 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { 773 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 774 TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) { 775 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) { 776 TAILQ_INSERT_BEFORE(m2, m, m_qe); 777 return (0); 778 } 779 } 780 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe); 781 } 782 } 783 return (0); 784 } 785