1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by John Birrell. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD$ 34 */ 35 36 #include "namespace.h" 37 #include <stdlib.h> 38 #include <errno.h> 39 #include <string.h> 40 #include <sys/param.h> 41 #include <sys/queue.h> 42 #include <pthread.h> 43 #include "un-namespace.h" 44 45 #include "thr_private.h" 46 47 #if defined(_PTHREADS_INVARIANTS) 48 #define MUTEX_INIT_LINK(m) do { \ 49 (m)->m_qe.tqe_prev = NULL; \ 50 (m)->m_qe.tqe_next = NULL; \ 51 } while (0) 52 #define MUTEX_ASSERT_IS_OWNED(m) do { \ 53 if ((m)->m_qe.tqe_prev == NULL) \ 54 PANIC("mutex is not on list"); \ 55 } while (0) 56 #define MUTEX_ASSERT_NOT_OWNED(m) do { \ 57 if (((m)->m_qe.tqe_prev != NULL) || \ 58 ((m)->m_qe.tqe_next != NULL)) \ 59 PANIC("mutex is on list"); \ 60 } while (0) 61 #else 62 #define MUTEX_INIT_LINK(m) 63 #define MUTEX_ASSERT_IS_OWNED(m) 64 #define MUTEX_ASSERT_NOT_OWNED(m) 65 #endif 66 67 /* 68 * For adaptive mutexes, how many times to spin doing trylock2 69 * before entering the kernel to block 70 */ 71 #define MUTEX_ADAPTIVE_SPINS 200 72 73 /* 74 * Prototypes 75 */ 76 int __pthread_mutex_init(pthread_mutex_t *mutex, 77 const pthread_mutexattr_t *mutex_attr); 78 int __pthread_mutex_trylock(pthread_mutex_t *mutex); 79 int __pthread_mutex_lock(pthread_mutex_t *mutex); 80 int __pthread_mutex_timedlock(pthread_mutex_t *mutex, 81 const struct timespec *abstime); 82 int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 83 void *(calloc_cb)(size_t, size_t)); 84 int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count); 85 int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); 86 int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); 87 int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); 88 int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count); 89 int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); 90 91 static int mutex_self_trylock(pthread_mutex_t); 92 static int mutex_self_lock(pthread_mutex_t, 93 const struct timespec *abstime); 94 static int mutex_unlock_common(pthread_mutex_t *); 95 96 __weak_reference(__pthread_mutex_init, pthread_mutex_init); 97 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 98 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 99 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 100 101 /* Single underscore versions provided for libc internal usage: */ 102 /* No difference between libc and application usage of these: */ 103 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 104 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 105 106 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); 107 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); 108 109 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np); 110 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np); 111 112 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np); 113 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np); 114 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np); 115 116 static int 117 mutex_init(pthread_mutex_t *mutex, 118 const pthread_mutexattr_t *mutex_attr, int private, 119 void *(calloc_cb)(size_t, size_t)) 120 { 121 const struct pthread_mutex_attr *attr; 122 struct pthread_mutex *pmutex; 123 124 if (mutex_attr == NULL) { 125 attr = &_pthread_mutexattr_default; 126 } else { 127 attr = *mutex_attr; 128 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || 129 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) 130 return (EINVAL); 131 if (attr->m_protocol < PTHREAD_PRIO_NONE || 132 attr->m_protocol > PTHREAD_PRIO_PROTECT) 133 return (EINVAL); 134 } 135 if ((pmutex = (pthread_mutex_t) 136 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL) 137 return (ENOMEM); 138 139 pmutex->m_type = attr->m_type; 140 pmutex->m_owner = NULL; 141 pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED; 142 if (private) 143 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE; 144 pmutex->m_count = 0; 145 pmutex->m_refcount = 0; 146 pmutex->m_spinloops = 0; 147 pmutex->m_yieldloops = 0; 148 MUTEX_INIT_LINK(pmutex); 149 switch(attr->m_protocol) { 150 case PTHREAD_PRIO_INHERIT: 151 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 152 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; 153 break; 154 case PTHREAD_PRIO_PROTECT: 155 pmutex->m_lock.m_owner = UMUTEX_CONTESTED; 156 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; 157 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; 158 break; 159 case PTHREAD_PRIO_NONE: 160 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 161 pmutex->m_lock.m_flags = 0; 162 } 163 164 if (pmutex->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) { 165 pmutex->m_spinloops = 166 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS; 167 pmutex->m_yieldloops = _thr_yieldloops; 168 } 169 170 *mutex = pmutex; 171 return (0); 172 } 173 174 static int 175 init_static(struct pthread *thread, pthread_mutex_t *mutex) 176 { 177 int ret; 178 179 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 180 181 if (*mutex == NULL) 182 ret = mutex_init(mutex, NULL, 0, calloc); 183 else 184 ret = 0; 185 186 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 187 188 return (ret); 189 } 190 191 static int 192 init_static_private(struct pthread *thread, pthread_mutex_t *mutex) 193 { 194 int ret; 195 196 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 197 198 if (*mutex == NULL) 199 ret = mutex_init(mutex, NULL, 1, calloc); 200 else 201 ret = 0; 202 203 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 204 205 return (ret); 206 } 207 208 static void 209 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) 210 { 211 struct pthread_mutex *m2; 212 213 m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue); 214 if (m2 != NULL) 215 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; 216 else 217 m->m_lock.m_ceilings[1] = -1; 218 } 219 220 int 221 _pthread_mutex_init(pthread_mutex_t *mutex, 222 const pthread_mutexattr_t *mutex_attr) 223 { 224 return mutex_init(mutex, mutex_attr, 1, calloc); 225 } 226 227 int 228 __pthread_mutex_init(pthread_mutex_t *mutex, 229 const pthread_mutexattr_t *mutex_attr) 230 { 231 return mutex_init(mutex, mutex_attr, 0, calloc); 232 } 233 234 /* This function is used internally by malloc. */ 235 int 236 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 237 void *(calloc_cb)(size_t, size_t)) 238 { 239 static const struct pthread_mutex_attr attr = { 240 .m_type = PTHREAD_MUTEX_NORMAL, 241 .m_protocol = PTHREAD_PRIO_NONE, 242 .m_ceiling = 0, 243 .m_flags = 0 244 }; 245 static const struct pthread_mutex_attr *pattr = &attr; 246 247 return mutex_init(mutex, (pthread_mutexattr_t *)&pattr, 0, calloc_cb); 248 } 249 250 void 251 _mutex_fork(struct pthread *curthread) 252 { 253 struct pthread_mutex *m; 254 255 /* 256 * Fix mutex ownership for child process. 257 * note that process shared mutex should not 258 * be inherited because owner is forking thread 259 * which is in parent process, they should be 260 * removed from the owned mutex list, current, 261 * process shared mutex is not supported, so I 262 * am not worried. 263 */ 264 265 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) 266 m->m_lock.m_owner = TID(curthread); 267 TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe) 268 m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED; 269 } 270 271 int 272 _pthread_mutex_destroy(pthread_mutex_t *mutex) 273 { 274 struct pthread *curthread = _get_curthread(); 275 pthread_mutex_t m; 276 uint32_t id; 277 int ret = 0; 278 279 if (__predict_false(*mutex == NULL)) 280 ret = EINVAL; 281 else { 282 id = TID(curthread); 283 284 /* 285 * Try to lock the mutex structure, we only need to 286 * try once, if failed, the mutex is in used. 287 */ 288 ret = _thr_umutex_trylock(&(*mutex)->m_lock, id); 289 if (ret) 290 return (ret); 291 m = *mutex; 292 /* 293 * Check mutex other fields to see if this mutex is 294 * in use. Mostly for prority mutex types, or there 295 * are condition variables referencing it. 296 */ 297 if (m->m_owner != NULL || m->m_refcount != 0) { 298 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 299 set_inherited_priority(curthread, m); 300 _thr_umutex_unlock(&m->m_lock, id); 301 ret = EBUSY; 302 } else { 303 /* 304 * Save a pointer to the mutex so it can be free'd 305 * and set the caller's pointer to NULL. 306 */ 307 *mutex = NULL; 308 309 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 310 set_inherited_priority(curthread, m); 311 _thr_umutex_unlock(&m->m_lock, id); 312 313 MUTEX_ASSERT_NOT_OWNED(m); 314 free(m); 315 } 316 } 317 318 return (ret); 319 } 320 321 322 #define ENQUEUE_MUTEX(curthread, m) \ 323 do { \ 324 (m)->m_owner = curthread; \ 325 /* Add to the list of owned mutexes: */ \ 326 MUTEX_ASSERT_NOT_OWNED((m)); \ 327 if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \ 328 TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\ 329 else \ 330 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\ 331 } while (0) 332 333 static int 334 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) 335 { 336 struct pthread_mutex *m; 337 uint32_t id; 338 int ret; 339 340 id = TID(curthread); 341 m = *mutex; 342 ret = _thr_umutex_trylock(&m->m_lock, id); 343 if (ret == 0) { 344 ENQUEUE_MUTEX(curthread, m); 345 } else if (m->m_owner == curthread) { 346 ret = mutex_self_trylock(m); 347 } /* else {} */ 348 349 return (ret); 350 } 351 352 int 353 __pthread_mutex_trylock(pthread_mutex_t *mutex) 354 { 355 struct pthread *curthread = _get_curthread(); 356 int ret; 357 358 /* 359 * If the mutex is statically initialized, perform the dynamic 360 * initialization: 361 */ 362 if (__predict_false(*mutex == NULL)) { 363 ret = init_static(curthread, mutex); 364 if (__predict_false(ret)) 365 return (ret); 366 } 367 return (mutex_trylock_common(curthread, mutex)); 368 } 369 370 int 371 _pthread_mutex_trylock(pthread_mutex_t *mutex) 372 { 373 struct pthread *curthread = _get_curthread(); 374 int ret; 375 376 /* 377 * If the mutex is statically initialized, perform the dynamic 378 * initialization marking the mutex private (delete safe): 379 */ 380 if (__predict_false(*mutex == NULL)) { 381 ret = init_static_private(curthread, mutex); 382 if (__predict_false(ret)) 383 return (ret); 384 } 385 return (mutex_trylock_common(curthread, mutex)); 386 } 387 388 static int 389 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex, 390 const struct timespec * abstime) 391 { 392 struct timespec ts, ts2; 393 struct pthread_mutex *m; 394 uint32_t id; 395 int ret; 396 int count; 397 398 id = TID(curthread); 399 m = *mutex; 400 ret = _thr_umutex_trylock2(&m->m_lock, id); 401 if (ret == 0) { 402 ENQUEUE_MUTEX(curthread, m); 403 } else if (m->m_owner == curthread) { 404 ret = mutex_self_lock(m, abstime); 405 } else { 406 /* 407 * For adaptive mutexes, spin for a bit in the expectation 408 * that if the application requests this mutex type then 409 * the lock is likely to be released quickly and it is 410 * faster than entering the kernel 411 */ 412 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 413 goto sleep_in_kernel; 414 415 if (!_thr_is_smp) 416 goto yield_loop; 417 418 count = m->m_spinloops; 419 while (count--) { 420 if (m->m_lock.m_owner == UMUTEX_UNOWNED) { 421 ret = _thr_umutex_trylock2(&m->m_lock, id); 422 if (ret == 0) 423 goto done; 424 } 425 CPU_SPINWAIT; 426 } 427 428 yield_loop: 429 count = m->m_yieldloops; 430 while (count--) { 431 _sched_yield(); 432 ret = _thr_umutex_trylock2(&m->m_lock, id); 433 if (ret == 0) 434 goto done; 435 } 436 437 sleep_in_kernel: 438 if (abstime == NULL) { 439 ret = __thr_umutex_lock(&m->m_lock); 440 } else if (__predict_false( 441 abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 442 abstime->tv_nsec >= 1000000000)) { 443 ret = EINVAL; 444 } else { 445 clock_gettime(CLOCK_REALTIME, &ts); 446 TIMESPEC_SUB(&ts2, abstime, &ts); 447 ret = __thr_umutex_timedlock(&m->m_lock, &ts2); 448 /* 449 * Timed out wait is not restarted if 450 * it was interrupted, not worth to do it. 451 */ 452 if (ret == EINTR) 453 ret = ETIMEDOUT; 454 } 455 done: 456 if (ret == 0) 457 ENQUEUE_MUTEX(curthread, m); 458 } 459 return (ret); 460 } 461 462 int 463 __pthread_mutex_lock(pthread_mutex_t *m) 464 { 465 struct pthread *curthread; 466 int ret; 467 468 _thr_check_init(); 469 470 curthread = _get_curthread(); 471 472 /* 473 * If the mutex is statically initialized, perform the dynamic 474 * initialization: 475 */ 476 if (__predict_false(*m == NULL)) { 477 ret = init_static(curthread, m); 478 if (__predict_false(ret)) 479 return (ret); 480 } 481 return (mutex_lock_common(curthread, m, NULL)); 482 } 483 484 int 485 _pthread_mutex_lock(pthread_mutex_t *m) 486 { 487 struct pthread *curthread; 488 int ret; 489 490 _thr_check_init(); 491 492 curthread = _get_curthread(); 493 494 /* 495 * If the mutex is statically initialized, perform the dynamic 496 * initialization marking it private (delete safe): 497 */ 498 if (__predict_false(*m == NULL)) { 499 ret = init_static_private(curthread, m); 500 if (__predict_false(ret)) 501 return (ret); 502 } 503 return (mutex_lock_common(curthread, m, NULL)); 504 } 505 506 int 507 __pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime) 508 { 509 struct pthread *curthread; 510 int ret; 511 512 _thr_check_init(); 513 514 curthread = _get_curthread(); 515 516 /* 517 * If the mutex is statically initialized, perform the dynamic 518 * initialization: 519 */ 520 if (__predict_false(*m == NULL)) { 521 ret = init_static(curthread, m); 522 if (__predict_false(ret)) 523 return (ret); 524 } 525 return (mutex_lock_common(curthread, m, abstime)); 526 } 527 528 int 529 _pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime) 530 { 531 struct pthread *curthread; 532 int ret; 533 534 _thr_check_init(); 535 536 curthread = _get_curthread(); 537 538 /* 539 * If the mutex is statically initialized, perform the dynamic 540 * initialization marking it private (delete safe): 541 */ 542 if (__predict_false(*m == NULL)) { 543 ret = init_static_private(curthread, m); 544 if (__predict_false(ret)) 545 return (ret); 546 } 547 return (mutex_lock_common(curthread, m, abstime)); 548 } 549 550 int 551 _pthread_mutex_unlock(pthread_mutex_t *m) 552 { 553 return (mutex_unlock_common(m)); 554 } 555 556 int 557 _mutex_cv_lock(pthread_mutex_t *m, int count) 558 { 559 int ret; 560 561 ret = mutex_lock_common(_get_curthread(), m, NULL); 562 if (ret == 0) { 563 (*m)->m_refcount--; 564 (*m)->m_count += count; 565 } 566 return (ret); 567 } 568 569 static int 570 mutex_self_trylock(pthread_mutex_t m) 571 { 572 int ret; 573 574 switch (m->m_type) { 575 case PTHREAD_MUTEX_ERRORCHECK: 576 case PTHREAD_MUTEX_NORMAL: 577 ret = EBUSY; 578 break; 579 580 case PTHREAD_MUTEX_RECURSIVE: 581 /* Increment the lock count: */ 582 if (m->m_count + 1 > 0) { 583 m->m_count++; 584 ret = 0; 585 } else 586 ret = EAGAIN; 587 break; 588 589 default: 590 /* Trap invalid mutex types; */ 591 ret = EINVAL; 592 } 593 594 return (ret); 595 } 596 597 static int 598 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime) 599 { 600 struct timespec ts1, ts2; 601 int ret; 602 603 switch (m->m_type) { 604 case PTHREAD_MUTEX_ERRORCHECK: 605 case PTHREAD_MUTEX_ADAPTIVE_NP: 606 if (abstime) { 607 clock_gettime(CLOCK_REALTIME, &ts1); 608 TIMESPEC_SUB(&ts2, abstime, &ts1); 609 __sys_nanosleep(&ts2, NULL); 610 ret = ETIMEDOUT; 611 } else { 612 /* 613 * POSIX specifies that mutexes should return 614 * EDEADLK if a recursive lock is detected. 615 */ 616 ret = EDEADLK; 617 } 618 break; 619 620 case PTHREAD_MUTEX_NORMAL: 621 /* 622 * What SS2 define as a 'normal' mutex. Intentionally 623 * deadlock on attempts to get a lock you already own. 624 */ 625 ret = 0; 626 if (abstime) { 627 clock_gettime(CLOCK_REALTIME, &ts1); 628 TIMESPEC_SUB(&ts2, abstime, &ts1); 629 __sys_nanosleep(&ts2, NULL); 630 ret = ETIMEDOUT; 631 } else { 632 ts1.tv_sec = 30; 633 ts1.tv_nsec = 0; 634 for (;;) 635 __sys_nanosleep(&ts1, NULL); 636 } 637 break; 638 639 case PTHREAD_MUTEX_RECURSIVE: 640 /* Increment the lock count: */ 641 if (m->m_count + 1 > 0) { 642 m->m_count++; 643 ret = 0; 644 } else 645 ret = EAGAIN; 646 break; 647 648 default: 649 /* Trap invalid mutex types; */ 650 ret = EINVAL; 651 } 652 653 return (ret); 654 } 655 656 static int 657 mutex_unlock_common(pthread_mutex_t *mutex) 658 { 659 struct pthread *curthread = _get_curthread(); 660 struct pthread_mutex *m; 661 uint32_t id; 662 663 if (__predict_false((m = *mutex) == NULL)) 664 return (EINVAL); 665 666 /* 667 * Check if the running thread is not the owner of the mutex. 668 */ 669 if (__predict_false(m->m_owner != curthread)) 670 return (EPERM); 671 672 id = TID(curthread); 673 if (__predict_false( 674 m->m_type == PTHREAD_MUTEX_RECURSIVE && 675 m->m_count > 0)) { 676 m->m_count--; 677 } else { 678 m->m_owner = NULL; 679 /* Remove the mutex from the threads queue. */ 680 MUTEX_ASSERT_IS_OWNED(m); 681 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 682 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 683 else { 684 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 685 set_inherited_priority(curthread, m); 686 } 687 MUTEX_INIT_LINK(m); 688 _thr_umutex_unlock(&m->m_lock, id); 689 } 690 return (0); 691 } 692 693 int 694 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count) 695 { 696 struct pthread *curthread = _get_curthread(); 697 struct pthread_mutex *m; 698 699 if (__predict_false((m = *mutex) == NULL)) 700 return (EINVAL); 701 702 /* 703 * Check if the running thread is not the owner of the mutex. 704 */ 705 if (__predict_false(m->m_owner != curthread)) 706 return (EPERM); 707 708 /* 709 * Clear the count in case this is a recursive mutex. 710 */ 711 *count = m->m_count; 712 m->m_refcount++; 713 m->m_count = 0; 714 m->m_owner = NULL; 715 /* Remove the mutex from the threads queue. */ 716 MUTEX_ASSERT_IS_OWNED(m); 717 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 718 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 719 else { 720 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 721 set_inherited_priority(curthread, m); 722 } 723 MUTEX_INIT_LINK(m); 724 _thr_umutex_unlock(&m->m_lock, TID(curthread)); 725 return (0); 726 } 727 728 void 729 _mutex_unlock_private(pthread_t pthread) 730 { 731 struct pthread_mutex *m, *m_next; 732 733 TAILQ_FOREACH_SAFE(m, &pthread->mutexq, m_qe, m_next) { 734 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 735 _pthread_mutex_unlock(&m); 736 } 737 } 738 739 int 740 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex, 741 int *prioceiling) 742 { 743 int ret; 744 745 if (*mutex == NULL) 746 ret = EINVAL; 747 else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 748 ret = EINVAL; 749 else { 750 *prioceiling = (*mutex)->m_lock.m_ceilings[0]; 751 ret = 0; 752 } 753 754 return(ret); 755 } 756 757 int 758 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex, 759 int ceiling, int *old_ceiling) 760 { 761 struct pthread *curthread = _get_curthread(); 762 struct pthread_mutex *m, *m1, *m2; 763 int ret; 764 765 m = *mutex; 766 if (m == NULL || (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 767 return (EINVAL); 768 769 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); 770 if (ret != 0) 771 return (ret); 772 773 if (m->m_owner == curthread) { 774 MUTEX_ASSERT_IS_OWNED(m); 775 m1 = TAILQ_PREV(m, mutex_queue, m_qe); 776 m2 = TAILQ_NEXT(m, m_qe); 777 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || 778 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { 779 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 780 TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) { 781 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) { 782 TAILQ_INSERT_BEFORE(m2, m, m_qe); 783 return (0); 784 } 785 } 786 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe); 787 } 788 } 789 return (0); 790 } 791 792 int 793 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count) 794 { 795 if (*mutex == NULL) 796 return (0); 797 return (*mutex)->m_spinloops; 798 } 799 800 int 801 _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) 802 { 803 struct pthread *curthread = _get_curthread(); 804 int ret; 805 806 if (__predict_false(*mutex == NULL)) { 807 ret = init_static_private(curthread, mutex); 808 if (__predict_false(ret)) 809 return (ret); 810 } 811 (*mutex)->m_spinloops = count; 812 return (0); 813 } 814 815 int 816 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) 817 { 818 struct pthread *curthread = _get_curthread(); 819 int ret; 820 821 if (__predict_false(*mutex == NULL)) { 822 ret = init_static(curthread, mutex); 823 if (__predict_false(ret)) 824 return (ret); 825 } 826 (*mutex)->m_spinloops = count; 827 return (0); 828 } 829 830 int 831 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count) 832 { 833 if (*mutex == NULL) 834 return (0); 835 return (*mutex)->m_yieldloops; 836 } 837 838 int 839 _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) 840 { 841 struct pthread *curthread = _get_curthread(); 842 int ret; 843 844 if (__predict_false(*mutex == NULL)) { 845 ret = init_static_private(curthread, mutex); 846 if (__predict_false(ret)) 847 return (ret); 848 } 849 (*mutex)->m_yieldloops = count; 850 return (0); 851 } 852 853 int 854 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) 855 { 856 struct pthread *curthread = _get_curthread(); 857 int ret; 858 859 if (__predict_false(*mutex == NULL)) { 860 ret = init_static(curthread, mutex); 861 if (__predict_false(ret)) 862 return (ret); 863 } 864 (*mutex)->m_yieldloops = count; 865 return (0); 866 } 867 868 int 869 _pthread_mutex_isowned_np(pthread_mutex_t *mutex) 870 { 871 struct pthread *curthread = _get_curthread(); 872 int ret; 873 874 if (__predict_false(*mutex == NULL)) { 875 ret = init_static(curthread, mutex); 876 if (__predict_false(ret)) 877 return (ret); 878 } 879 return ((*mutex)->m_owner == curthread); 880 } 881