1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by John Birrell. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD$ 34 */ 35 36 #include "namespace.h" 37 #include <stdlib.h> 38 #include <errno.h> 39 #include <string.h> 40 #include <sys/param.h> 41 #include <sys/queue.h> 42 #include <pthread.h> 43 #include "un-namespace.h" 44 45 #include "thr_private.h" 46 47 #if defined(_PTHREADS_INVARIANTS) 48 #define MUTEX_INIT_LINK(m) do { \ 49 (m)->m_qe.tqe_prev = NULL; \ 50 (m)->m_qe.tqe_next = NULL; \ 51 } while (0) 52 #define MUTEX_ASSERT_IS_OWNED(m) do { \ 53 if ((m)->m_qe.tqe_prev == NULL) \ 54 PANIC("mutex is not on list"); \ 55 } while (0) 56 #define MUTEX_ASSERT_NOT_OWNED(m) do { \ 57 if (((m)->m_qe.tqe_prev != NULL) || \ 58 ((m)->m_qe.tqe_next != NULL)) \ 59 PANIC("mutex is on list"); \ 60 } while (0) 61 #else 62 #define MUTEX_INIT_LINK(m) 63 #define MUTEX_ASSERT_IS_OWNED(m) 64 #define MUTEX_ASSERT_NOT_OWNED(m) 65 #endif 66 67 /* 68 * For adaptive mutexes, how many times to spin doing trylock2 69 * before entering the kernel to block 70 */ 71 #define MUTEX_ADAPTIVE_SPINS 200 72 73 /* 74 * Prototypes 75 */ 76 int __pthread_mutex_init(pthread_mutex_t *mutex, 77 const pthread_mutexattr_t *mutex_attr); 78 int __pthread_mutex_trylock(pthread_mutex_t *mutex); 79 int __pthread_mutex_lock(pthread_mutex_t *mutex); 80 int __pthread_mutex_timedlock(pthread_mutex_t *mutex, 81 const struct timespec *abstime); 82 83 static int mutex_self_trylock(pthread_mutex_t); 84 static int mutex_self_lock(pthread_mutex_t, 85 const struct timespec *abstime); 86 static int mutex_unlock_common(pthread_mutex_t *); 87 88 __weak_reference(__pthread_mutex_init, pthread_mutex_init); 89 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 90 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 91 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 92 93 /* Single underscore versions provided for libc internal usage: */ 94 /* No difference between libc and application usage of these: */ 95 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 96 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 97 98 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); 99 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); 100 101 static int 102 mutex_init(pthread_mutex_t *mutex, 103 const pthread_mutexattr_t *mutex_attr, int private) 104 { 105 const struct pthread_mutex_attr *attr; 106 struct pthread_mutex *pmutex; 107 108 if (mutex_attr == NULL) { 109 attr = &_pthread_mutexattr_default; 110 } else { 111 attr = *mutex_attr; 112 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || 113 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) 114 return (EINVAL); 115 if (attr->m_protocol < PTHREAD_PRIO_NONE || 116 attr->m_protocol > PTHREAD_PRIO_PROTECT) 117 return (EINVAL); 118 } 119 if ((pmutex = (pthread_mutex_t) 120 calloc(1, sizeof(struct pthread_mutex))) == NULL) 121 return (ENOMEM); 122 123 pmutex->m_type = attr->m_type; 124 pmutex->m_owner = NULL; 125 pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED; 126 if (private) 127 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE; 128 pmutex->m_count = 0; 129 pmutex->m_refcount = 0; 130 MUTEX_INIT_LINK(pmutex); 131 switch(attr->m_protocol) { 132 case PTHREAD_PRIO_INHERIT: 133 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 134 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; 135 break; 136 case PTHREAD_PRIO_PROTECT: 137 pmutex->m_lock.m_owner = UMUTEX_CONTESTED; 138 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; 139 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; 140 break; 141 case PTHREAD_PRIO_NONE: 142 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 143 pmutex->m_lock.m_flags = 0; 144 } 145 *mutex = pmutex; 146 return (0); 147 } 148 149 static int 150 init_static(struct pthread *thread, pthread_mutex_t *mutex) 151 { 152 int ret; 153 154 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 155 156 if (*mutex == NULL) 157 ret = mutex_init(mutex, NULL, 0); 158 else 159 ret = 0; 160 161 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 162 163 return (ret); 164 } 165 166 static int 167 init_static_private(struct pthread *thread, pthread_mutex_t *mutex) 168 { 169 int ret; 170 171 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 172 173 if (*mutex == NULL) 174 ret = mutex_init(mutex, NULL, 1); 175 else 176 ret = 0; 177 178 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 179 180 return (ret); 181 } 182 183 static void 184 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) 185 { 186 struct pthread_mutex *m2; 187 188 m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue); 189 if (m2 != NULL) 190 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; 191 else 192 m->m_lock.m_ceilings[1] = -1; 193 } 194 195 int 196 _pthread_mutex_init(pthread_mutex_t *mutex, 197 const pthread_mutexattr_t *mutex_attr) 198 { 199 return mutex_init(mutex, mutex_attr, 1); 200 } 201 202 int 203 __pthread_mutex_init(pthread_mutex_t *mutex, 204 const pthread_mutexattr_t *mutex_attr) 205 { 206 return mutex_init(mutex, mutex_attr, 0); 207 } 208 209 void 210 _mutex_fork(struct pthread *curthread) 211 { 212 struct pthread_mutex *m; 213 214 /* 215 * Fix mutex ownership for child process. 216 * note that process shared mutex should not 217 * be inherited because owner is forking thread 218 * which is in parent process, they should be 219 * removed from the owned mutex list, current, 220 * process shared mutex is not supported, so I 221 * am not worried. 222 */ 223 224 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) 225 m->m_lock.m_owner = TID(curthread); 226 TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe) 227 m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED; 228 } 229 230 int 231 _pthread_mutex_destroy(pthread_mutex_t *mutex) 232 { 233 struct pthread *curthread = _get_curthread(); 234 pthread_mutex_t m; 235 uint32_t id; 236 int ret = 0; 237 238 if (__predict_false(*mutex == NULL)) 239 ret = EINVAL; 240 else { 241 id = TID(curthread); 242 243 /* 244 * Try to lock the mutex structure, we only need to 245 * try once, if failed, the mutex is in used. 246 */ 247 ret = _thr_umutex_trylock(&(*mutex)->m_lock, id); 248 if (ret) 249 return (ret); 250 m = *mutex; 251 /* 252 * Check mutex other fields to see if this mutex is 253 * in use. Mostly for prority mutex types, or there 254 * are condition variables referencing it. 255 */ 256 if (m->m_owner != NULL || m->m_refcount != 0) { 257 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 258 set_inherited_priority(curthread, m); 259 _thr_umutex_unlock(&m->m_lock, id); 260 ret = EBUSY; 261 } else { 262 /* 263 * Save a pointer to the mutex so it can be free'd 264 * and set the caller's pointer to NULL. 265 */ 266 *mutex = NULL; 267 268 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 269 set_inherited_priority(curthread, m); 270 _thr_umutex_unlock(&m->m_lock, id); 271 272 MUTEX_ASSERT_NOT_OWNED(m); 273 free(m); 274 } 275 } 276 277 return (ret); 278 } 279 280 281 #define ENQUEUE_MUTEX(curthread, m) \ 282 m->m_owner = curthread; \ 283 /* Add to the list of owned mutexes: */ \ 284 MUTEX_ASSERT_NOT_OWNED(m); \ 285 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \ 286 TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe); \ 287 else \ 288 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe) 289 290 static int 291 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) 292 { 293 struct pthread_mutex *m; 294 uint32_t id; 295 int ret; 296 297 id = TID(curthread); 298 m = *mutex; 299 ret = _thr_umutex_trylock(&m->m_lock, id); 300 if (ret == 0) { 301 ENQUEUE_MUTEX(curthread, m); 302 } else if (m->m_owner == curthread) { 303 ret = mutex_self_trylock(m); 304 } /* else {} */ 305 306 return (ret); 307 } 308 309 int 310 __pthread_mutex_trylock(pthread_mutex_t *mutex) 311 { 312 struct pthread *curthread = _get_curthread(); 313 int ret; 314 315 /* 316 * If the mutex is statically initialized, perform the dynamic 317 * initialization: 318 */ 319 if (__predict_false(*mutex == NULL)) { 320 ret = init_static(curthread, mutex); 321 if (__predict_false(ret)) 322 return (ret); 323 } 324 return (mutex_trylock_common(curthread, mutex)); 325 } 326 327 int 328 _pthread_mutex_trylock(pthread_mutex_t *mutex) 329 { 330 struct pthread *curthread = _get_curthread(); 331 int ret; 332 333 /* 334 * If the mutex is statically initialized, perform the dynamic 335 * initialization marking the mutex private (delete safe): 336 */ 337 if (__predict_false(*mutex == NULL)) { 338 ret = init_static_private(curthread, mutex); 339 if (__predict_false(ret)) 340 return (ret); 341 } 342 return (mutex_trylock_common(curthread, mutex)); 343 } 344 345 static int 346 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex, 347 const struct timespec * abstime) 348 { 349 struct timespec ts, ts2; 350 struct pthread_mutex *m; 351 uint32_t id; 352 int ret; 353 int count; 354 355 id = TID(curthread); 356 m = *mutex; 357 ret = _thr_umutex_trylock2(&m->m_lock, id); 358 if (ret == 0) { 359 ENQUEUE_MUTEX(curthread, m); 360 } else if (m->m_owner == curthread) { 361 ret = mutex_self_lock(m, abstime); 362 } else { 363 /* 364 * For adaptive mutexes, spin for a bit in the expectation 365 * that if the application requests this mutex type then 366 * the lock is likely to be released quickly and it is 367 * faster than entering the kernel 368 */ 369 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 370 goto sleep_in_kernel; 371 372 if (!_thr_is_smp) 373 goto yield_loop; 374 375 if (m->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) { 376 count = MUTEX_ADAPTIVE_SPINS; 377 378 while (count--) { 379 ret = _thr_umutex_trylock2(&m->m_lock, id); 380 if (ret == 0) 381 break; 382 CPU_SPINWAIT; 383 } 384 if (ret == 0) 385 goto done; 386 } else { 387 if (_thr_spinloops != 0) { 388 count = _thr_spinloops; 389 while (count) { 390 if (m->m_lock.m_owner == UMUTEX_UNOWNED) { 391 ret = _thr_umutex_trylock2(&m->m_lock, id); 392 if (ret == 0) 393 goto done; 394 } 395 CPU_SPINWAIT; 396 count--; 397 } 398 } 399 } 400 401 yield_loop: 402 if (_thr_yieldloops != 0) { 403 count = _thr_yieldloops; 404 while (count--) { 405 _sched_yield(); 406 ret = _thr_umutex_trylock2(&m->m_lock, id); 407 if (ret == 0) 408 goto done; 409 } 410 } 411 412 sleep_in_kernel: 413 if (abstime == NULL) { 414 ret = __thr_umutex_lock(&m->m_lock); 415 } else if (__predict_false( 416 abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 417 abstime->tv_nsec >= 1000000000)) { 418 ret = EINVAL; 419 } else { 420 clock_gettime(CLOCK_REALTIME, &ts); 421 TIMESPEC_SUB(&ts2, abstime, &ts); 422 ret = __thr_umutex_timedlock(&m->m_lock, &ts2); 423 /* 424 * Timed out wait is not restarted if 425 * it was interrupted, not worth to do it. 426 */ 427 if (ret == EINTR) 428 ret = ETIMEDOUT; 429 } 430 done: 431 if (ret == 0) 432 ENQUEUE_MUTEX(curthread, m); 433 } 434 return (ret); 435 } 436 437 int 438 __pthread_mutex_lock(pthread_mutex_t *m) 439 { 440 struct pthread *curthread; 441 int ret; 442 443 _thr_check_init(); 444 445 curthread = _get_curthread(); 446 447 /* 448 * If the mutex is statically initialized, perform the dynamic 449 * initialization: 450 */ 451 if (__predict_false(*m == NULL)) { 452 ret = init_static(curthread, m); 453 if (__predict_false(ret)) 454 return (ret); 455 } 456 return (mutex_lock_common(curthread, m, NULL)); 457 } 458 459 int 460 _pthread_mutex_lock(pthread_mutex_t *m) 461 { 462 struct pthread *curthread; 463 int ret; 464 465 _thr_check_init(); 466 467 curthread = _get_curthread(); 468 469 /* 470 * If the mutex is statically initialized, perform the dynamic 471 * initialization marking it private (delete safe): 472 */ 473 if (__predict_false(*m == NULL)) { 474 ret = init_static_private(curthread, m); 475 if (__predict_false(ret)) 476 return (ret); 477 } 478 return (mutex_lock_common(curthread, m, NULL)); 479 } 480 481 int 482 __pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime) 483 { 484 struct pthread *curthread; 485 int ret; 486 487 _thr_check_init(); 488 489 curthread = _get_curthread(); 490 491 /* 492 * If the mutex is statically initialized, perform the dynamic 493 * initialization: 494 */ 495 if (__predict_false(*m == NULL)) { 496 ret = init_static(curthread, m); 497 if (__predict_false(ret)) 498 return (ret); 499 } 500 return (mutex_lock_common(curthread, m, abstime)); 501 } 502 503 int 504 _pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime) 505 { 506 struct pthread *curthread; 507 int ret; 508 509 _thr_check_init(); 510 511 curthread = _get_curthread(); 512 513 /* 514 * If the mutex is statically initialized, perform the dynamic 515 * initialization marking it private (delete safe): 516 */ 517 if (__predict_false(*m == NULL)) { 518 ret = init_static_private(curthread, m); 519 if (__predict_false(ret)) 520 return (ret); 521 } 522 return (mutex_lock_common(curthread, m, abstime)); 523 } 524 525 int 526 _pthread_mutex_unlock(pthread_mutex_t *m) 527 { 528 return (mutex_unlock_common(m)); 529 } 530 531 int 532 _mutex_cv_lock(pthread_mutex_t *m, int count) 533 { 534 int ret; 535 536 ret = mutex_lock_common(_get_curthread(), m, NULL); 537 if (ret == 0) { 538 (*m)->m_refcount--; 539 (*m)->m_count += count; 540 } 541 return (ret); 542 } 543 544 static int 545 mutex_self_trylock(pthread_mutex_t m) 546 { 547 int ret; 548 549 switch (m->m_type) { 550 case PTHREAD_MUTEX_ERRORCHECK: 551 case PTHREAD_MUTEX_NORMAL: 552 ret = EBUSY; 553 break; 554 555 case PTHREAD_MUTEX_RECURSIVE: 556 /* Increment the lock count: */ 557 if (m->m_count + 1 > 0) { 558 m->m_count++; 559 ret = 0; 560 } else 561 ret = EAGAIN; 562 break; 563 564 default: 565 /* Trap invalid mutex types; */ 566 ret = EINVAL; 567 } 568 569 return (ret); 570 } 571 572 static int 573 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime) 574 { 575 struct timespec ts1, ts2; 576 int ret; 577 578 switch (m->m_type) { 579 case PTHREAD_MUTEX_ERRORCHECK: 580 case PTHREAD_MUTEX_ADAPTIVE_NP: 581 if (abstime) { 582 clock_gettime(CLOCK_REALTIME, &ts1); 583 TIMESPEC_SUB(&ts2, abstime, &ts1); 584 __sys_nanosleep(&ts2, NULL); 585 ret = ETIMEDOUT; 586 } else { 587 /* 588 * POSIX specifies that mutexes should return 589 * EDEADLK if a recursive lock is detected. 590 */ 591 ret = EDEADLK; 592 } 593 break; 594 595 case PTHREAD_MUTEX_NORMAL: 596 /* 597 * What SS2 define as a 'normal' mutex. Intentionally 598 * deadlock on attempts to get a lock you already own. 599 */ 600 ret = 0; 601 if (abstime) { 602 clock_gettime(CLOCK_REALTIME, &ts1); 603 TIMESPEC_SUB(&ts2, abstime, &ts1); 604 __sys_nanosleep(&ts2, NULL); 605 ret = ETIMEDOUT; 606 } else { 607 ts1.tv_sec = 30; 608 ts1.tv_nsec = 0; 609 for (;;) 610 __sys_nanosleep(&ts1, NULL); 611 } 612 break; 613 614 case PTHREAD_MUTEX_RECURSIVE: 615 /* Increment the lock count: */ 616 if (m->m_count + 1 > 0) { 617 m->m_count++; 618 ret = 0; 619 } else 620 ret = EAGAIN; 621 break; 622 623 default: 624 /* Trap invalid mutex types; */ 625 ret = EINVAL; 626 } 627 628 return (ret); 629 } 630 631 static int 632 mutex_unlock_common(pthread_mutex_t *mutex) 633 { 634 struct pthread *curthread = _get_curthread(); 635 struct pthread_mutex *m; 636 uint32_t id; 637 638 if (__predict_false((m = *mutex) == NULL)) 639 return (EINVAL); 640 641 /* 642 * Check if the running thread is not the owner of the mutex. 643 */ 644 if (__predict_false(m->m_owner != curthread)) 645 return (EPERM); 646 647 id = TID(curthread); 648 if (__predict_false( 649 m->m_type == PTHREAD_MUTEX_RECURSIVE && 650 m->m_count > 0)) { 651 m->m_count--; 652 } else { 653 m->m_owner = NULL; 654 /* Remove the mutex from the threads queue. */ 655 MUTEX_ASSERT_IS_OWNED(m); 656 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 657 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 658 else { 659 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 660 set_inherited_priority(curthread, m); 661 } 662 MUTEX_INIT_LINK(m); 663 _thr_umutex_unlock(&m->m_lock, id); 664 } 665 return (0); 666 } 667 668 int 669 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count) 670 { 671 struct pthread *curthread = _get_curthread(); 672 struct pthread_mutex *m; 673 674 if (__predict_false((m = *mutex) == NULL)) 675 return (EINVAL); 676 677 /* 678 * Check if the running thread is not the owner of the mutex. 679 */ 680 if (__predict_false(m->m_owner != curthread)) 681 return (EPERM); 682 683 /* 684 * Clear the count in case this is a recursive mutex. 685 */ 686 *count = m->m_count; 687 m->m_refcount++; 688 m->m_count = 0; 689 m->m_owner = NULL; 690 /* Remove the mutex from the threads queue. */ 691 MUTEX_ASSERT_IS_OWNED(m); 692 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 693 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 694 else { 695 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 696 set_inherited_priority(curthread, m); 697 } 698 MUTEX_INIT_LINK(m); 699 _thr_umutex_unlock(&m->m_lock, TID(curthread)); 700 return (0); 701 } 702 703 void 704 _mutex_unlock_private(pthread_t pthread) 705 { 706 struct pthread_mutex *m, *m_next; 707 708 TAILQ_FOREACH_SAFE(m, &pthread->mutexq, m_qe, m_next) { 709 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 710 _pthread_mutex_unlock(&m); 711 } 712 } 713 714 int 715 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex, 716 int *prioceiling) 717 { 718 int ret; 719 720 if (*mutex == NULL) 721 ret = EINVAL; 722 else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 723 ret = EINVAL; 724 else { 725 *prioceiling = (*mutex)->m_lock.m_ceilings[0]; 726 ret = 0; 727 } 728 729 return(ret); 730 } 731 732 int 733 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex, 734 int ceiling, int *old_ceiling) 735 { 736 struct pthread *curthread = _get_curthread(); 737 struct pthread_mutex *m, *m1, *m2; 738 int ret; 739 740 m = *mutex; 741 if (m == NULL || (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 742 return (EINVAL); 743 744 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); 745 if (ret != 0) 746 return (ret); 747 748 if (m->m_owner == curthread) { 749 MUTEX_ASSERT_IS_OWNED(m); 750 m1 = TAILQ_PREV(m, mutex_queue, m_qe); 751 m2 = TAILQ_NEXT(m, m_qe); 752 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > ceiling) || 753 (m2 != NULL && m2->m_lock.m_ceilings[0] < ceiling)) { 754 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 755 TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) { 756 if (m2->m_lock.m_ceilings[0] > ceiling) { 757 TAILQ_INSERT_BEFORE(m2, m, m_qe); 758 return (0); 759 } 760 } 761 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe); 762 } 763 } 764 return (0); 765 } 766