1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by John Birrell. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD$ 34 */ 35 36 #include "namespace.h" 37 #include <stdlib.h> 38 #include <errno.h> 39 #include <string.h> 40 #include <sys/param.h> 41 #include <sys/queue.h> 42 #include <pthread.h> 43 #include "un-namespace.h" 44 45 #include "thr_private.h" 46 47 #if defined(_PTHREADS_INVARIANTS) 48 #define MUTEX_INIT_LINK(m) do { \ 49 (m)->m_qe.tqe_prev = NULL; \ 50 (m)->m_qe.tqe_next = NULL; \ 51 } while (0) 52 #define MUTEX_ASSERT_IS_OWNED(m) do { \ 53 if ((m)->m_qe.tqe_prev == NULL) \ 54 PANIC("mutex is not on list"); \ 55 } while (0) 56 #define MUTEX_ASSERT_NOT_OWNED(m) do { \ 57 if (((m)->m_qe.tqe_prev != NULL) || \ 58 ((m)->m_qe.tqe_next != NULL)) \ 59 PANIC("mutex is on list"); \ 60 } while (0) 61 #else 62 #define MUTEX_INIT_LINK(m) 63 #define MUTEX_ASSERT_IS_OWNED(m) 64 #define MUTEX_ASSERT_NOT_OWNED(m) 65 #endif 66 67 /* 68 * For adaptive mutexes, how many times to spin doing trylock2 69 * before entering the kernel to block 70 */ 71 #define MUTEX_ADAPTIVE_SPINS 200 72 73 /* 74 * Prototypes 75 */ 76 int __pthread_mutex_init(pthread_mutex_t *mutex, 77 const pthread_mutexattr_t *mutex_attr); 78 int __pthread_mutex_trylock(pthread_mutex_t *mutex); 79 int __pthread_mutex_lock(pthread_mutex_t *mutex); 80 int __pthread_mutex_timedlock(pthread_mutex_t *mutex, 81 const struct timespec *abstime); 82 int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 83 void *(calloc_cb)(size_t, size_t)); 84 int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count); 85 int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); 86 int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); 87 int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); 88 int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count); 89 int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); 90 91 static int mutex_self_trylock(pthread_mutex_t); 92 static int mutex_self_lock(pthread_mutex_t, 93 const struct timespec *abstime); 94 static int mutex_unlock_common(pthread_mutex_t *); 95 96 __weak_reference(__pthread_mutex_init, pthread_mutex_init); 97 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 98 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 99 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 100 101 /* Single underscore versions provided for libc internal usage: */ 102 /* No difference between libc and application usage of these: */ 103 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 104 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 105 106 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); 107 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); 108 109 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np); 110 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np); 111 112 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np); 113 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np); 114 115 static int 116 mutex_init(pthread_mutex_t *mutex, 117 const pthread_mutexattr_t *mutex_attr, int private, 118 void *(calloc_cb)(size_t, size_t)) 119 { 120 const struct pthread_mutex_attr *attr; 121 struct pthread_mutex *pmutex; 122 123 if (mutex_attr == NULL) { 124 attr = &_pthread_mutexattr_default; 125 } else { 126 attr = *mutex_attr; 127 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || 128 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) 129 return (EINVAL); 130 if (attr->m_protocol < PTHREAD_PRIO_NONE || 131 attr->m_protocol > PTHREAD_PRIO_PROTECT) 132 return (EINVAL); 133 } 134 if ((pmutex = (pthread_mutex_t) 135 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL) 136 return (ENOMEM); 137 138 pmutex->m_type = attr->m_type; 139 pmutex->m_owner = NULL; 140 pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED; 141 if (private) 142 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE; 143 pmutex->m_count = 0; 144 pmutex->m_refcount = 0; 145 pmutex->m_spinloops = 0; 146 pmutex->m_yieldloops = 0; 147 MUTEX_INIT_LINK(pmutex); 148 switch(attr->m_protocol) { 149 case PTHREAD_PRIO_INHERIT: 150 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 151 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; 152 break; 153 case PTHREAD_PRIO_PROTECT: 154 pmutex->m_lock.m_owner = UMUTEX_CONTESTED; 155 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; 156 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; 157 break; 158 case PTHREAD_PRIO_NONE: 159 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 160 pmutex->m_lock.m_flags = 0; 161 } 162 163 if (pmutex->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) { 164 pmutex->m_spinloops = 165 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS; 166 pmutex->m_yieldloops = _thr_yieldloops; 167 } 168 169 *mutex = pmutex; 170 return (0); 171 } 172 173 static int 174 init_static(struct pthread *thread, pthread_mutex_t *mutex) 175 { 176 int ret; 177 178 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 179 180 if (*mutex == NULL) 181 ret = mutex_init(mutex, NULL, 0, calloc); 182 else 183 ret = 0; 184 185 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 186 187 return (ret); 188 } 189 190 static int 191 init_static_private(struct pthread *thread, pthread_mutex_t *mutex) 192 { 193 int ret; 194 195 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 196 197 if (*mutex == NULL) 198 ret = mutex_init(mutex, NULL, 1, calloc); 199 else 200 ret = 0; 201 202 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 203 204 return (ret); 205 } 206 207 static void 208 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) 209 { 210 struct pthread_mutex *m2; 211 212 m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue); 213 if (m2 != NULL) 214 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; 215 else 216 m->m_lock.m_ceilings[1] = -1; 217 } 218 219 int 220 _pthread_mutex_init(pthread_mutex_t *mutex, 221 const pthread_mutexattr_t *mutex_attr) 222 { 223 return mutex_init(mutex, mutex_attr, 1, calloc); 224 } 225 226 int 227 __pthread_mutex_init(pthread_mutex_t *mutex, 228 const pthread_mutexattr_t *mutex_attr) 229 { 230 return mutex_init(mutex, mutex_attr, 0, calloc); 231 } 232 233 /* This function is used internally by malloc. */ 234 int 235 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 236 void *(calloc_cb)(size_t, size_t)) 237 { 238 static const struct pthread_mutex_attr attr = { 239 .m_type = PTHREAD_MUTEX_NORMAL, 240 .m_protocol = PTHREAD_PRIO_NONE, 241 .m_ceiling = 0, 242 .m_flags = 0 243 }; 244 static const struct pthread_mutex_attr *pattr = &attr; 245 246 return mutex_init(mutex, (pthread_mutexattr_t *)&pattr, 0, calloc_cb); 247 } 248 249 void 250 _mutex_fork(struct pthread *curthread) 251 { 252 struct pthread_mutex *m; 253 254 /* 255 * Fix mutex ownership for child process. 256 * note that process shared mutex should not 257 * be inherited because owner is forking thread 258 * which is in parent process, they should be 259 * removed from the owned mutex list, current, 260 * process shared mutex is not supported, so I 261 * am not worried. 262 */ 263 264 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) 265 m->m_lock.m_owner = TID(curthread); 266 TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe) 267 m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED; 268 } 269 270 int 271 _pthread_mutex_destroy(pthread_mutex_t *mutex) 272 { 273 struct pthread *curthread = _get_curthread(); 274 pthread_mutex_t m; 275 uint32_t id; 276 int ret = 0; 277 278 if (__predict_false(*mutex == NULL)) 279 ret = EINVAL; 280 else { 281 id = TID(curthread); 282 283 /* 284 * Try to lock the mutex structure, we only need to 285 * try once, if failed, the mutex is in used. 286 */ 287 ret = _thr_umutex_trylock(&(*mutex)->m_lock, id); 288 if (ret) 289 return (ret); 290 m = *mutex; 291 /* 292 * Check mutex other fields to see if this mutex is 293 * in use. Mostly for prority mutex types, or there 294 * are condition variables referencing it. 295 */ 296 if (m->m_owner != NULL || m->m_refcount != 0) { 297 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 298 set_inherited_priority(curthread, m); 299 _thr_umutex_unlock(&m->m_lock, id); 300 ret = EBUSY; 301 } else { 302 /* 303 * Save a pointer to the mutex so it can be free'd 304 * and set the caller's pointer to NULL. 305 */ 306 *mutex = NULL; 307 308 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 309 set_inherited_priority(curthread, m); 310 _thr_umutex_unlock(&m->m_lock, id); 311 312 MUTEX_ASSERT_NOT_OWNED(m); 313 free(m); 314 } 315 } 316 317 return (ret); 318 } 319 320 321 #define ENQUEUE_MUTEX(curthread, m) \ 322 do { \ 323 (m)->m_owner = curthread; \ 324 /* Add to the list of owned mutexes: */ \ 325 MUTEX_ASSERT_NOT_OWNED((m)); \ 326 if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \ 327 TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\ 328 else \ 329 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\ 330 } while (0) 331 332 static int 333 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) 334 { 335 struct pthread_mutex *m; 336 uint32_t id; 337 int ret; 338 339 id = TID(curthread); 340 m = *mutex; 341 ret = _thr_umutex_trylock(&m->m_lock, id); 342 if (ret == 0) { 343 ENQUEUE_MUTEX(curthread, m); 344 } else if (m->m_owner == curthread) { 345 ret = mutex_self_trylock(m); 346 } /* else {} */ 347 348 return (ret); 349 } 350 351 int 352 __pthread_mutex_trylock(pthread_mutex_t *mutex) 353 { 354 struct pthread *curthread = _get_curthread(); 355 int ret; 356 357 /* 358 * If the mutex is statically initialized, perform the dynamic 359 * initialization: 360 */ 361 if (__predict_false(*mutex == NULL)) { 362 ret = init_static(curthread, mutex); 363 if (__predict_false(ret)) 364 return (ret); 365 } 366 return (mutex_trylock_common(curthread, mutex)); 367 } 368 369 int 370 _pthread_mutex_trylock(pthread_mutex_t *mutex) 371 { 372 struct pthread *curthread = _get_curthread(); 373 int ret; 374 375 /* 376 * If the mutex is statically initialized, perform the dynamic 377 * initialization marking the mutex private (delete safe): 378 */ 379 if (__predict_false(*mutex == NULL)) { 380 ret = init_static_private(curthread, mutex); 381 if (__predict_false(ret)) 382 return (ret); 383 } 384 return (mutex_trylock_common(curthread, mutex)); 385 } 386 387 static int 388 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex, 389 const struct timespec * abstime) 390 { 391 struct timespec ts, ts2; 392 struct pthread_mutex *m; 393 uint32_t id; 394 int ret; 395 int count; 396 397 id = TID(curthread); 398 m = *mutex; 399 ret = _thr_umutex_trylock2(&m->m_lock, id); 400 if (ret == 0) { 401 ENQUEUE_MUTEX(curthread, m); 402 } else if (m->m_owner == curthread) { 403 ret = mutex_self_lock(m, abstime); 404 } else { 405 /* 406 * For adaptive mutexes, spin for a bit in the expectation 407 * that if the application requests this mutex type then 408 * the lock is likely to be released quickly and it is 409 * faster than entering the kernel 410 */ 411 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 412 goto sleep_in_kernel; 413 414 if (!_thr_is_smp) 415 goto yield_loop; 416 417 count = m->m_spinloops; 418 while (count--) { 419 if (m->m_lock.m_owner == UMUTEX_UNOWNED) { 420 ret = _thr_umutex_trylock2(&m->m_lock, id); 421 if (ret == 0) 422 goto done; 423 } 424 CPU_SPINWAIT; 425 } 426 427 yield_loop: 428 count = m->m_yieldloops; 429 while (count--) { 430 _sched_yield(); 431 ret = _thr_umutex_trylock2(&m->m_lock, id); 432 if (ret == 0) 433 goto done; 434 } 435 436 sleep_in_kernel: 437 if (abstime == NULL) { 438 ret = __thr_umutex_lock(&m->m_lock); 439 } else if (__predict_false( 440 abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 441 abstime->tv_nsec >= 1000000000)) { 442 ret = EINVAL; 443 } else { 444 clock_gettime(CLOCK_REALTIME, &ts); 445 TIMESPEC_SUB(&ts2, abstime, &ts); 446 ret = __thr_umutex_timedlock(&m->m_lock, &ts2); 447 /* 448 * Timed out wait is not restarted if 449 * it was interrupted, not worth to do it. 450 */ 451 if (ret == EINTR) 452 ret = ETIMEDOUT; 453 } 454 done: 455 if (ret == 0) 456 ENQUEUE_MUTEX(curthread, m); 457 } 458 return (ret); 459 } 460 461 int 462 __pthread_mutex_lock(pthread_mutex_t *m) 463 { 464 struct pthread *curthread; 465 int ret; 466 467 _thr_check_init(); 468 469 curthread = _get_curthread(); 470 471 /* 472 * If the mutex is statically initialized, perform the dynamic 473 * initialization: 474 */ 475 if (__predict_false(*m == NULL)) { 476 ret = init_static(curthread, m); 477 if (__predict_false(ret)) 478 return (ret); 479 } 480 return (mutex_lock_common(curthread, m, NULL)); 481 } 482 483 int 484 _pthread_mutex_lock(pthread_mutex_t *m) 485 { 486 struct pthread *curthread; 487 int ret; 488 489 _thr_check_init(); 490 491 curthread = _get_curthread(); 492 493 /* 494 * If the mutex is statically initialized, perform the dynamic 495 * initialization marking it private (delete safe): 496 */ 497 if (__predict_false(*m == NULL)) { 498 ret = init_static_private(curthread, m); 499 if (__predict_false(ret)) 500 return (ret); 501 } 502 return (mutex_lock_common(curthread, m, NULL)); 503 } 504 505 int 506 __pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime) 507 { 508 struct pthread *curthread; 509 int ret; 510 511 _thr_check_init(); 512 513 curthread = _get_curthread(); 514 515 /* 516 * If the mutex is statically initialized, perform the dynamic 517 * initialization: 518 */ 519 if (__predict_false(*m == NULL)) { 520 ret = init_static(curthread, m); 521 if (__predict_false(ret)) 522 return (ret); 523 } 524 return (mutex_lock_common(curthread, m, abstime)); 525 } 526 527 int 528 _pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime) 529 { 530 struct pthread *curthread; 531 int ret; 532 533 _thr_check_init(); 534 535 curthread = _get_curthread(); 536 537 /* 538 * If the mutex is statically initialized, perform the dynamic 539 * initialization marking it private (delete safe): 540 */ 541 if (__predict_false(*m == NULL)) { 542 ret = init_static_private(curthread, m); 543 if (__predict_false(ret)) 544 return (ret); 545 } 546 return (mutex_lock_common(curthread, m, abstime)); 547 } 548 549 int 550 _pthread_mutex_unlock(pthread_mutex_t *m) 551 { 552 return (mutex_unlock_common(m)); 553 } 554 555 int 556 _mutex_cv_lock(pthread_mutex_t *m, int count) 557 { 558 int ret; 559 560 ret = mutex_lock_common(_get_curthread(), m, NULL); 561 if (ret == 0) { 562 (*m)->m_refcount--; 563 (*m)->m_count += count; 564 } 565 return (ret); 566 } 567 568 static int 569 mutex_self_trylock(pthread_mutex_t m) 570 { 571 int ret; 572 573 switch (m->m_type) { 574 case PTHREAD_MUTEX_ERRORCHECK: 575 case PTHREAD_MUTEX_NORMAL: 576 ret = EBUSY; 577 break; 578 579 case PTHREAD_MUTEX_RECURSIVE: 580 /* Increment the lock count: */ 581 if (m->m_count + 1 > 0) { 582 m->m_count++; 583 ret = 0; 584 } else 585 ret = EAGAIN; 586 break; 587 588 default: 589 /* Trap invalid mutex types; */ 590 ret = EINVAL; 591 } 592 593 return (ret); 594 } 595 596 static int 597 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime) 598 { 599 struct timespec ts1, ts2; 600 int ret; 601 602 switch (m->m_type) { 603 case PTHREAD_MUTEX_ERRORCHECK: 604 case PTHREAD_MUTEX_ADAPTIVE_NP: 605 if (abstime) { 606 clock_gettime(CLOCK_REALTIME, &ts1); 607 TIMESPEC_SUB(&ts2, abstime, &ts1); 608 __sys_nanosleep(&ts2, NULL); 609 ret = ETIMEDOUT; 610 } else { 611 /* 612 * POSIX specifies that mutexes should return 613 * EDEADLK if a recursive lock is detected. 614 */ 615 ret = EDEADLK; 616 } 617 break; 618 619 case PTHREAD_MUTEX_NORMAL: 620 /* 621 * What SS2 define as a 'normal' mutex. Intentionally 622 * deadlock on attempts to get a lock you already own. 623 */ 624 ret = 0; 625 if (abstime) { 626 clock_gettime(CLOCK_REALTIME, &ts1); 627 TIMESPEC_SUB(&ts2, abstime, &ts1); 628 __sys_nanosleep(&ts2, NULL); 629 ret = ETIMEDOUT; 630 } else { 631 ts1.tv_sec = 30; 632 ts1.tv_nsec = 0; 633 for (;;) 634 __sys_nanosleep(&ts1, NULL); 635 } 636 break; 637 638 case PTHREAD_MUTEX_RECURSIVE: 639 /* Increment the lock count: */ 640 if (m->m_count + 1 > 0) { 641 m->m_count++; 642 ret = 0; 643 } else 644 ret = EAGAIN; 645 break; 646 647 default: 648 /* Trap invalid mutex types; */ 649 ret = EINVAL; 650 } 651 652 return (ret); 653 } 654 655 static int 656 mutex_unlock_common(pthread_mutex_t *mutex) 657 { 658 struct pthread *curthread = _get_curthread(); 659 struct pthread_mutex *m; 660 uint32_t id; 661 662 if (__predict_false((m = *mutex) == NULL)) 663 return (EINVAL); 664 665 /* 666 * Check if the running thread is not the owner of the mutex. 667 */ 668 if (__predict_false(m->m_owner != curthread)) 669 return (EPERM); 670 671 id = TID(curthread); 672 if (__predict_false( 673 m->m_type == PTHREAD_MUTEX_RECURSIVE && 674 m->m_count > 0)) { 675 m->m_count--; 676 } else { 677 m->m_owner = NULL; 678 /* Remove the mutex from the threads queue. */ 679 MUTEX_ASSERT_IS_OWNED(m); 680 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 681 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 682 else { 683 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 684 set_inherited_priority(curthread, m); 685 } 686 MUTEX_INIT_LINK(m); 687 _thr_umutex_unlock(&m->m_lock, id); 688 } 689 return (0); 690 } 691 692 int 693 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count) 694 { 695 struct pthread *curthread = _get_curthread(); 696 struct pthread_mutex *m; 697 698 if (__predict_false((m = *mutex) == NULL)) 699 return (EINVAL); 700 701 /* 702 * Check if the running thread is not the owner of the mutex. 703 */ 704 if (__predict_false(m->m_owner != curthread)) 705 return (EPERM); 706 707 /* 708 * Clear the count in case this is a recursive mutex. 709 */ 710 *count = m->m_count; 711 m->m_refcount++; 712 m->m_count = 0; 713 m->m_owner = NULL; 714 /* Remove the mutex from the threads queue. */ 715 MUTEX_ASSERT_IS_OWNED(m); 716 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 717 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 718 else { 719 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 720 set_inherited_priority(curthread, m); 721 } 722 MUTEX_INIT_LINK(m); 723 _thr_umutex_unlock(&m->m_lock, TID(curthread)); 724 return (0); 725 } 726 727 void 728 _mutex_unlock_private(pthread_t pthread) 729 { 730 struct pthread_mutex *m, *m_next; 731 732 TAILQ_FOREACH_SAFE(m, &pthread->mutexq, m_qe, m_next) { 733 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 734 _pthread_mutex_unlock(&m); 735 } 736 } 737 738 int 739 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex, 740 int *prioceiling) 741 { 742 int ret; 743 744 if (*mutex == NULL) 745 ret = EINVAL; 746 else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 747 ret = EINVAL; 748 else { 749 *prioceiling = (*mutex)->m_lock.m_ceilings[0]; 750 ret = 0; 751 } 752 753 return(ret); 754 } 755 756 int 757 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex, 758 int ceiling, int *old_ceiling) 759 { 760 struct pthread *curthread = _get_curthread(); 761 struct pthread_mutex *m, *m1, *m2; 762 int ret; 763 764 m = *mutex; 765 if (m == NULL || (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 766 return (EINVAL); 767 768 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); 769 if (ret != 0) 770 return (ret); 771 772 if (m->m_owner == curthread) { 773 MUTEX_ASSERT_IS_OWNED(m); 774 m1 = TAILQ_PREV(m, mutex_queue, m_qe); 775 m2 = TAILQ_NEXT(m, m_qe); 776 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || 777 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { 778 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 779 TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) { 780 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) { 781 TAILQ_INSERT_BEFORE(m2, m, m_qe); 782 return (0); 783 } 784 } 785 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe); 786 } 787 } 788 return (0); 789 } 790 791 int 792 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count) 793 { 794 if (*mutex == NULL) 795 return (0); 796 return (*mutex)->m_spinloops; 797 } 798 799 int 800 _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) 801 { 802 struct pthread *curthread = _get_curthread(); 803 int ret; 804 805 if (__predict_false(*mutex == NULL)) { 806 ret = init_static_private(curthread, mutex); 807 if (__predict_false(ret)) 808 return (ret); 809 } 810 (*mutex)->m_spinloops = count; 811 return (0); 812 } 813 814 int 815 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) 816 { 817 struct pthread *curthread = _get_curthread(); 818 int ret; 819 820 if (__predict_false(*mutex == NULL)) { 821 ret = init_static(curthread, mutex); 822 if (__predict_false(ret)) 823 return (ret); 824 } 825 (*mutex)->m_spinloops = count; 826 return (0); 827 } 828 829 int 830 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count) 831 { 832 if (*mutex == NULL) 833 return (0); 834 return (*mutex)->m_yieldloops; 835 } 836 837 int 838 _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) 839 { 840 struct pthread *curthread = _get_curthread(); 841 int ret; 842 843 if (__predict_false(*mutex == NULL)) { 844 ret = init_static_private(curthread, mutex); 845 if (__predict_false(ret)) 846 return (ret); 847 } 848 (*mutex)->m_yieldloops = count; 849 return (0); 850 } 851 852 int 853 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) 854 { 855 struct pthread *curthread = _get_curthread(); 856 int ret; 857 858 if (__predict_false(*mutex == NULL)) { 859 ret = init_static(curthread, mutex); 860 if (__predict_false(ret)) 861 return (ret); 862 } 863 (*mutex)->m_yieldloops = count; 864 return (0); 865 } 866