1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by John Birrell. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD$ 34 */ 35 36 #include "namespace.h" 37 #include <stdlib.h> 38 #include <errno.h> 39 #include <string.h> 40 #include <sys/param.h> 41 #include <sys/queue.h> 42 #include <pthread.h> 43 #include "un-namespace.h" 44 45 #include "thr_private.h" 46 47 #if defined(_PTHREADS_INVARIANTS) 48 #define MUTEX_INIT_LINK(m) do { \ 49 (m)->m_qe.tqe_prev = NULL; \ 50 (m)->m_qe.tqe_next = NULL; \ 51 } while (0) 52 #define MUTEX_ASSERT_IS_OWNED(m) do { \ 53 if ((m)->m_qe.tqe_prev == NULL) \ 54 PANIC("mutex is not on list"); \ 55 } while (0) 56 #define MUTEX_ASSERT_NOT_OWNED(m) do { \ 57 if (((m)->m_qe.tqe_prev != NULL) || \ 58 ((m)->m_qe.tqe_next != NULL)) \ 59 PANIC("mutex is on list"); \ 60 } while (0) 61 #else 62 #define MUTEX_INIT_LINK(m) 63 #define MUTEX_ASSERT_IS_OWNED(m) 64 #define MUTEX_ASSERT_NOT_OWNED(m) 65 #endif 66 67 /* 68 * For adaptive mutexes, how many times to spin doing trylock2 69 * before entering the kernel to block 70 */ 71 #define MUTEX_ADAPTIVE_SPINS 200 72 73 /* 74 * Prototypes 75 */ 76 int __pthread_mutex_init(pthread_mutex_t *mutex, 77 const pthread_mutexattr_t *mutex_attr); 78 int __pthread_mutex_trylock(pthread_mutex_t *mutex); 79 int __pthread_mutex_lock(pthread_mutex_t *mutex); 80 int __pthread_mutex_timedlock(pthread_mutex_t *mutex, 81 const struct timespec *abstime); 82 int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); 83 int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count); 84 85 static int mutex_self_trylock(pthread_mutex_t); 86 static int mutex_self_lock(pthread_mutex_t, 87 const struct timespec *abstime); 88 static int mutex_unlock_common(pthread_mutex_t *); 89 90 __weak_reference(__pthread_mutex_init, pthread_mutex_init); 91 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 92 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 93 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 94 95 /* Single underscore versions provided for libc internal usage: */ 96 /* No difference between libc and application usage of these: */ 97 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 98 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 99 100 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); 101 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); 102 103 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np); 104 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np); 105 106 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np); 107 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np); 108 109 static int 110 mutex_init(pthread_mutex_t *mutex, 111 const pthread_mutexattr_t *mutex_attr, int private, 112 void *(calloc_cb)(size_t, size_t)) 113 { 114 const struct pthread_mutex_attr *attr; 115 struct pthread_mutex *pmutex; 116 117 if (mutex_attr == NULL) { 118 attr = &_pthread_mutexattr_default; 119 } else { 120 attr = *mutex_attr; 121 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || 122 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) 123 return (EINVAL); 124 if (attr->m_protocol < PTHREAD_PRIO_NONE || 125 attr->m_protocol > PTHREAD_PRIO_PROTECT) 126 return (EINVAL); 127 } 128 if ((pmutex = (pthread_mutex_t) 129 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL) 130 return (ENOMEM); 131 132 pmutex->m_type = attr->m_type; 133 pmutex->m_owner = NULL; 134 pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED; 135 if (private) 136 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE; 137 pmutex->m_count = 0; 138 pmutex->m_refcount = 0; 139 pmutex->m_spinloops = 0; 140 pmutex->m_yieldloops = 0; 141 MUTEX_INIT_LINK(pmutex); 142 switch(attr->m_protocol) { 143 case PTHREAD_PRIO_INHERIT: 144 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 145 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; 146 break; 147 case PTHREAD_PRIO_PROTECT: 148 pmutex->m_lock.m_owner = UMUTEX_CONTESTED; 149 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; 150 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; 151 break; 152 case PTHREAD_PRIO_NONE: 153 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 154 pmutex->m_lock.m_flags = 0; 155 } 156 157 if (pmutex->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) { 158 pmutex->m_spinloops = 159 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS; 160 pmutex->m_yieldloops = _thr_yieldloops; 161 } 162 163 *mutex = pmutex; 164 return (0); 165 } 166 167 static int 168 init_static(struct pthread *thread, pthread_mutex_t *mutex) 169 { 170 int ret; 171 172 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 173 174 if (*mutex == NULL) 175 ret = mutex_init(mutex, NULL, 0, calloc); 176 else 177 ret = 0; 178 179 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 180 181 return (ret); 182 } 183 184 static int 185 init_static_private(struct pthread *thread, pthread_mutex_t *mutex) 186 { 187 int ret; 188 189 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 190 191 if (*mutex == NULL) 192 ret = mutex_init(mutex, NULL, 1, calloc); 193 else 194 ret = 0; 195 196 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 197 198 return (ret); 199 } 200 201 static void 202 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) 203 { 204 struct pthread_mutex *m2; 205 206 m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue); 207 if (m2 != NULL) 208 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; 209 else 210 m->m_lock.m_ceilings[1] = -1; 211 } 212 213 int 214 _pthread_mutex_init(pthread_mutex_t *mutex, 215 const pthread_mutexattr_t *mutex_attr) 216 { 217 return mutex_init(mutex, mutex_attr, 1, calloc); 218 } 219 220 int 221 __pthread_mutex_init(pthread_mutex_t *mutex, 222 const pthread_mutexattr_t *mutex_attr) 223 { 224 return mutex_init(mutex, mutex_attr, 0, calloc); 225 } 226 227 /* This function is used internally by malloc. */ 228 int 229 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 230 void *(calloc_cb)(size_t, size_t)) 231 { 232 static const struct pthread_mutex_attr attr = { 233 .m_type = PTHREAD_MUTEX_NORMAL, 234 .m_protocol = PTHREAD_PRIO_NONE, 235 .m_ceiling = 0, 236 .m_flags = 0 237 }; 238 static const struct pthread_mutex_attr *pattr = &attr; 239 240 return mutex_init(mutex, (pthread_mutexattr_t *)&pattr, 0, calloc_cb); 241 } 242 243 void 244 _mutex_fork(struct pthread *curthread) 245 { 246 struct pthread_mutex *m; 247 248 /* 249 * Fix mutex ownership for child process. 250 * note that process shared mutex should not 251 * be inherited because owner is forking thread 252 * which is in parent process, they should be 253 * removed from the owned mutex list, current, 254 * process shared mutex is not supported, so I 255 * am not worried. 256 */ 257 258 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) 259 m->m_lock.m_owner = TID(curthread); 260 TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe) 261 m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED; 262 } 263 264 int 265 _pthread_mutex_destroy(pthread_mutex_t *mutex) 266 { 267 struct pthread *curthread = _get_curthread(); 268 pthread_mutex_t m; 269 uint32_t id; 270 int ret = 0; 271 272 if (__predict_false(*mutex == NULL)) 273 ret = EINVAL; 274 else { 275 id = TID(curthread); 276 277 /* 278 * Try to lock the mutex structure, we only need to 279 * try once, if failed, the mutex is in used. 280 */ 281 ret = _thr_umutex_trylock(&(*mutex)->m_lock, id); 282 if (ret) 283 return (ret); 284 m = *mutex; 285 /* 286 * Check mutex other fields to see if this mutex is 287 * in use. Mostly for prority mutex types, or there 288 * are condition variables referencing it. 289 */ 290 if (m->m_owner != NULL || m->m_refcount != 0) { 291 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 292 set_inherited_priority(curthread, m); 293 _thr_umutex_unlock(&m->m_lock, id); 294 ret = EBUSY; 295 } else { 296 /* 297 * Save a pointer to the mutex so it can be free'd 298 * and set the caller's pointer to NULL. 299 */ 300 *mutex = NULL; 301 302 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 303 set_inherited_priority(curthread, m); 304 _thr_umutex_unlock(&m->m_lock, id); 305 306 MUTEX_ASSERT_NOT_OWNED(m); 307 free(m); 308 } 309 } 310 311 return (ret); 312 } 313 314 315 #define ENQUEUE_MUTEX(curthread, m) \ 316 do { \ 317 (m)->m_owner = curthread; \ 318 /* Add to the list of owned mutexes: */ \ 319 MUTEX_ASSERT_NOT_OWNED((m)); \ 320 if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \ 321 TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\ 322 else \ 323 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\ 324 } while (0) 325 326 static int 327 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) 328 { 329 struct pthread_mutex *m; 330 uint32_t id; 331 int ret; 332 333 id = TID(curthread); 334 m = *mutex; 335 ret = _thr_umutex_trylock(&m->m_lock, id); 336 if (ret == 0) { 337 ENQUEUE_MUTEX(curthread, m); 338 } else if (m->m_owner == curthread) { 339 ret = mutex_self_trylock(m); 340 } /* else {} */ 341 342 return (ret); 343 } 344 345 int 346 __pthread_mutex_trylock(pthread_mutex_t *mutex) 347 { 348 struct pthread *curthread = _get_curthread(); 349 int ret; 350 351 /* 352 * If the mutex is statically initialized, perform the dynamic 353 * initialization: 354 */ 355 if (__predict_false(*mutex == NULL)) { 356 ret = init_static(curthread, mutex); 357 if (__predict_false(ret)) 358 return (ret); 359 } 360 return (mutex_trylock_common(curthread, mutex)); 361 } 362 363 int 364 _pthread_mutex_trylock(pthread_mutex_t *mutex) 365 { 366 struct pthread *curthread = _get_curthread(); 367 int ret; 368 369 /* 370 * If the mutex is statically initialized, perform the dynamic 371 * initialization marking the mutex private (delete safe): 372 */ 373 if (__predict_false(*mutex == NULL)) { 374 ret = init_static_private(curthread, mutex); 375 if (__predict_false(ret)) 376 return (ret); 377 } 378 return (mutex_trylock_common(curthread, mutex)); 379 } 380 381 static int 382 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex, 383 const struct timespec * abstime) 384 { 385 struct timespec ts, ts2; 386 struct pthread_mutex *m; 387 uint32_t id; 388 int ret; 389 int count; 390 391 id = TID(curthread); 392 m = *mutex; 393 ret = _thr_umutex_trylock2(&m->m_lock, id); 394 if (ret == 0) { 395 ENQUEUE_MUTEX(curthread, m); 396 } else if (m->m_owner == curthread) { 397 ret = mutex_self_lock(m, abstime); 398 } else { 399 /* 400 * For adaptive mutexes, spin for a bit in the expectation 401 * that if the application requests this mutex type then 402 * the lock is likely to be released quickly and it is 403 * faster than entering the kernel 404 */ 405 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 406 goto sleep_in_kernel; 407 408 if (!_thr_is_smp) 409 goto yield_loop; 410 411 count = m->m_spinloops; 412 while (count--) { 413 if (m->m_lock.m_owner == UMUTEX_UNOWNED) { 414 ret = _thr_umutex_trylock2(&m->m_lock, id); 415 if (ret == 0) 416 goto done; 417 } 418 CPU_SPINWAIT; 419 } 420 421 yield_loop: 422 count = m->m_yieldloops; 423 while (count--) { 424 _sched_yield(); 425 ret = _thr_umutex_trylock2(&m->m_lock, id); 426 if (ret == 0) 427 goto done; 428 } 429 430 sleep_in_kernel: 431 if (abstime == NULL) { 432 ret = __thr_umutex_lock(&m->m_lock); 433 } else if (__predict_false( 434 abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 435 abstime->tv_nsec >= 1000000000)) { 436 ret = EINVAL; 437 } else { 438 clock_gettime(CLOCK_REALTIME, &ts); 439 TIMESPEC_SUB(&ts2, abstime, &ts); 440 ret = __thr_umutex_timedlock(&m->m_lock, &ts2); 441 /* 442 * Timed out wait is not restarted if 443 * it was interrupted, not worth to do it. 444 */ 445 if (ret == EINTR) 446 ret = ETIMEDOUT; 447 } 448 done: 449 if (ret == 0) 450 ENQUEUE_MUTEX(curthread, m); 451 } 452 return (ret); 453 } 454 455 int 456 __pthread_mutex_lock(pthread_mutex_t *m) 457 { 458 struct pthread *curthread; 459 int ret; 460 461 _thr_check_init(); 462 463 curthread = _get_curthread(); 464 465 /* 466 * If the mutex is statically initialized, perform the dynamic 467 * initialization: 468 */ 469 if (__predict_false(*m == NULL)) { 470 ret = init_static(curthread, m); 471 if (__predict_false(ret)) 472 return (ret); 473 } 474 return (mutex_lock_common(curthread, m, NULL)); 475 } 476 477 int 478 _pthread_mutex_lock(pthread_mutex_t *m) 479 { 480 struct pthread *curthread; 481 int ret; 482 483 _thr_check_init(); 484 485 curthread = _get_curthread(); 486 487 /* 488 * If the mutex is statically initialized, perform the dynamic 489 * initialization marking it private (delete safe): 490 */ 491 if (__predict_false(*m == NULL)) { 492 ret = init_static_private(curthread, m); 493 if (__predict_false(ret)) 494 return (ret); 495 } 496 return (mutex_lock_common(curthread, m, NULL)); 497 } 498 499 int 500 __pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime) 501 { 502 struct pthread *curthread; 503 int ret; 504 505 _thr_check_init(); 506 507 curthread = _get_curthread(); 508 509 /* 510 * If the mutex is statically initialized, perform the dynamic 511 * initialization: 512 */ 513 if (__predict_false(*m == NULL)) { 514 ret = init_static(curthread, m); 515 if (__predict_false(ret)) 516 return (ret); 517 } 518 return (mutex_lock_common(curthread, m, abstime)); 519 } 520 521 int 522 _pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime) 523 { 524 struct pthread *curthread; 525 int ret; 526 527 _thr_check_init(); 528 529 curthread = _get_curthread(); 530 531 /* 532 * If the mutex is statically initialized, perform the dynamic 533 * initialization marking it private (delete safe): 534 */ 535 if (__predict_false(*m == NULL)) { 536 ret = init_static_private(curthread, m); 537 if (__predict_false(ret)) 538 return (ret); 539 } 540 return (mutex_lock_common(curthread, m, abstime)); 541 } 542 543 int 544 _pthread_mutex_unlock(pthread_mutex_t *m) 545 { 546 return (mutex_unlock_common(m)); 547 } 548 549 int 550 _mutex_cv_lock(pthread_mutex_t *m, int count) 551 { 552 int ret; 553 554 ret = mutex_lock_common(_get_curthread(), m, NULL); 555 if (ret == 0) { 556 (*m)->m_refcount--; 557 (*m)->m_count += count; 558 } 559 return (ret); 560 } 561 562 static int 563 mutex_self_trylock(pthread_mutex_t m) 564 { 565 int ret; 566 567 switch (m->m_type) { 568 case PTHREAD_MUTEX_ERRORCHECK: 569 case PTHREAD_MUTEX_NORMAL: 570 ret = EBUSY; 571 break; 572 573 case PTHREAD_MUTEX_RECURSIVE: 574 /* Increment the lock count: */ 575 if (m->m_count + 1 > 0) { 576 m->m_count++; 577 ret = 0; 578 } else 579 ret = EAGAIN; 580 break; 581 582 default: 583 /* Trap invalid mutex types; */ 584 ret = EINVAL; 585 } 586 587 return (ret); 588 } 589 590 static int 591 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime) 592 { 593 struct timespec ts1, ts2; 594 int ret; 595 596 switch (m->m_type) { 597 case PTHREAD_MUTEX_ERRORCHECK: 598 case PTHREAD_MUTEX_ADAPTIVE_NP: 599 if (abstime) { 600 clock_gettime(CLOCK_REALTIME, &ts1); 601 TIMESPEC_SUB(&ts2, abstime, &ts1); 602 __sys_nanosleep(&ts2, NULL); 603 ret = ETIMEDOUT; 604 } else { 605 /* 606 * POSIX specifies that mutexes should return 607 * EDEADLK if a recursive lock is detected. 608 */ 609 ret = EDEADLK; 610 } 611 break; 612 613 case PTHREAD_MUTEX_NORMAL: 614 /* 615 * What SS2 define as a 'normal' mutex. Intentionally 616 * deadlock on attempts to get a lock you already own. 617 */ 618 ret = 0; 619 if (abstime) { 620 clock_gettime(CLOCK_REALTIME, &ts1); 621 TIMESPEC_SUB(&ts2, abstime, &ts1); 622 __sys_nanosleep(&ts2, NULL); 623 ret = ETIMEDOUT; 624 } else { 625 ts1.tv_sec = 30; 626 ts1.tv_nsec = 0; 627 for (;;) 628 __sys_nanosleep(&ts1, NULL); 629 } 630 break; 631 632 case PTHREAD_MUTEX_RECURSIVE: 633 /* Increment the lock count: */ 634 if (m->m_count + 1 > 0) { 635 m->m_count++; 636 ret = 0; 637 } else 638 ret = EAGAIN; 639 break; 640 641 default: 642 /* Trap invalid mutex types; */ 643 ret = EINVAL; 644 } 645 646 return (ret); 647 } 648 649 static int 650 mutex_unlock_common(pthread_mutex_t *mutex) 651 { 652 struct pthread *curthread = _get_curthread(); 653 struct pthread_mutex *m; 654 uint32_t id; 655 656 if (__predict_false((m = *mutex) == NULL)) 657 return (EINVAL); 658 659 /* 660 * Check if the running thread is not the owner of the mutex. 661 */ 662 if (__predict_false(m->m_owner != curthread)) 663 return (EPERM); 664 665 id = TID(curthread); 666 if (__predict_false( 667 m->m_type == PTHREAD_MUTEX_RECURSIVE && 668 m->m_count > 0)) { 669 m->m_count--; 670 } else { 671 m->m_owner = NULL; 672 /* Remove the mutex from the threads queue. */ 673 MUTEX_ASSERT_IS_OWNED(m); 674 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 675 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 676 else { 677 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 678 set_inherited_priority(curthread, m); 679 } 680 MUTEX_INIT_LINK(m); 681 _thr_umutex_unlock(&m->m_lock, id); 682 } 683 return (0); 684 } 685 686 int 687 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count) 688 { 689 struct pthread *curthread = _get_curthread(); 690 struct pthread_mutex *m; 691 692 if (__predict_false((m = *mutex) == NULL)) 693 return (EINVAL); 694 695 /* 696 * Check if the running thread is not the owner of the mutex. 697 */ 698 if (__predict_false(m->m_owner != curthread)) 699 return (EPERM); 700 701 /* 702 * Clear the count in case this is a recursive mutex. 703 */ 704 *count = m->m_count; 705 m->m_refcount++; 706 m->m_count = 0; 707 m->m_owner = NULL; 708 /* Remove the mutex from the threads queue. */ 709 MUTEX_ASSERT_IS_OWNED(m); 710 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 711 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 712 else { 713 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 714 set_inherited_priority(curthread, m); 715 } 716 MUTEX_INIT_LINK(m); 717 _thr_umutex_unlock(&m->m_lock, TID(curthread)); 718 return (0); 719 } 720 721 void 722 _mutex_unlock_private(pthread_t pthread) 723 { 724 struct pthread_mutex *m, *m_next; 725 726 TAILQ_FOREACH_SAFE(m, &pthread->mutexq, m_qe, m_next) { 727 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 728 _pthread_mutex_unlock(&m); 729 } 730 } 731 732 int 733 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex, 734 int *prioceiling) 735 { 736 int ret; 737 738 if (*mutex == NULL) 739 ret = EINVAL; 740 else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 741 ret = EINVAL; 742 else { 743 *prioceiling = (*mutex)->m_lock.m_ceilings[0]; 744 ret = 0; 745 } 746 747 return(ret); 748 } 749 750 int 751 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex, 752 int ceiling, int *old_ceiling) 753 { 754 struct pthread *curthread = _get_curthread(); 755 struct pthread_mutex *m, *m1, *m2; 756 int ret; 757 758 m = *mutex; 759 if (m == NULL || (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 760 return (EINVAL); 761 762 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); 763 if (ret != 0) 764 return (ret); 765 766 if (m->m_owner == curthread) { 767 MUTEX_ASSERT_IS_OWNED(m); 768 m1 = TAILQ_PREV(m, mutex_queue, m_qe); 769 m2 = TAILQ_NEXT(m, m_qe); 770 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || 771 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { 772 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 773 TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) { 774 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) { 775 TAILQ_INSERT_BEFORE(m2, m, m_qe); 776 return (0); 777 } 778 } 779 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe); 780 } 781 } 782 return (0); 783 } 784 785 int 786 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count) 787 { 788 if (*mutex == NULL) 789 return (0); 790 return (*mutex)->m_spinloops; 791 } 792 793 int 794 _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) 795 { 796 struct pthread *curthread = _get_curthread(); 797 int ret; 798 799 if (__predict_false(*mutex == NULL)) { 800 ret = init_static_private(curthread, mutex); 801 if (__predict_false(ret)) 802 return (ret); 803 } 804 (*mutex)->m_spinloops = count; 805 return (0); 806 } 807 808 int 809 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) 810 { 811 struct pthread *curthread = _get_curthread(); 812 int ret; 813 814 if (__predict_false(*mutex == NULL)) { 815 ret = init_static(curthread, mutex); 816 if (__predict_false(ret)) 817 return (ret); 818 } 819 (*mutex)->m_spinloops = count; 820 return (0); 821 } 822 823 int 824 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count) 825 { 826 if (*mutex == NULL) 827 return (0); 828 return (*mutex)->m_yieldloops; 829 } 830 831 int 832 _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) 833 { 834 struct pthread *curthread = _get_curthread(); 835 int ret; 836 837 if (__predict_false(*mutex == NULL)) { 838 ret = init_static_private(curthread, mutex); 839 if (__predict_false(ret)) 840 return (ret); 841 } 842 (*mutex)->m_yieldloops = count; 843 return (0); 844 } 845 846 int 847 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) 848 { 849 struct pthread *curthread = _get_curthread(); 850 int ret; 851 852 if (__predict_false(*mutex == NULL)) { 853 ret = init_static(curthread, mutex); 854 if (__predict_false(ret)) 855 return (ret); 856 } 857 (*mutex)->m_yieldloops = count; 858 return (0); 859 } 860