1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by John Birrell. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD$ 34 */ 35 36 #include "namespace.h" 37 #include <stdlib.h> 38 #include <errno.h> 39 #include <string.h> 40 #include <sys/param.h> 41 #include <sys/queue.h> 42 #include <pthread.h> 43 #include <pthread_np.h> 44 #include "un-namespace.h" 45 46 #include "thr_private.h" 47 48 #if defined(_PTHREADS_INVARIANTS) 49 #define MUTEX_INIT_LINK(m) do { \ 50 (m)->m_qe.tqe_prev = NULL; \ 51 (m)->m_qe.tqe_next = NULL; \ 52 } while (0) 53 #define MUTEX_ASSERT_IS_OWNED(m) do { \ 54 if ((m)->m_qe.tqe_prev == NULL) \ 55 PANIC("mutex is not on list"); \ 56 } while (0) 57 #define MUTEX_ASSERT_NOT_OWNED(m) do { \ 58 if (((m)->m_qe.tqe_prev != NULL) || \ 59 ((m)->m_qe.tqe_next != NULL)) \ 60 PANIC("mutex is on list"); \ 61 } while (0) 62 #else 63 #define MUTEX_INIT_LINK(m) 64 #define MUTEX_ASSERT_IS_OWNED(m) 65 #define MUTEX_ASSERT_NOT_OWNED(m) 66 #endif 67 68 /* 69 * For adaptive mutexes, how many times to spin doing trylock2 70 * before entering the kernel to block 71 */ 72 #define MUTEX_ADAPTIVE_SPINS 200 73 74 /* 75 * Prototypes 76 */ 77 int __pthread_mutex_init(pthread_mutex_t *mutex, 78 const pthread_mutexattr_t *mutex_attr); 79 int __pthread_mutex_trylock(pthread_mutex_t *mutex); 80 int __pthread_mutex_lock(pthread_mutex_t *mutex); 81 int __pthread_mutex_timedlock(pthread_mutex_t *mutex, 82 const struct timespec *abstime); 83 int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 84 void *(calloc_cb)(size_t, size_t)); 85 int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count); 86 int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); 87 int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); 88 int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); 89 int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count); 90 int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); 91 92 static int mutex_self_trylock(pthread_mutex_t); 93 static int mutex_self_lock(pthread_mutex_t, 94 const struct timespec *abstime); 95 static int mutex_unlock_common(pthread_mutex_t *); 96 97 __weak_reference(__pthread_mutex_init, pthread_mutex_init); 98 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 99 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 100 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 101 102 /* Single underscore versions provided for libc internal usage: */ 103 /* No difference between libc and application usage of these: */ 104 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 105 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 106 107 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); 108 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); 109 110 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np); 111 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np); 112 113 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np); 114 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np); 115 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np); 116 117 static int 118 mutex_init(pthread_mutex_t *mutex, 119 const pthread_mutexattr_t *mutex_attr, int private, 120 void *(calloc_cb)(size_t, size_t)) 121 { 122 const struct pthread_mutex_attr *attr; 123 struct pthread_mutex *pmutex; 124 125 if (mutex_attr == NULL) { 126 attr = &_pthread_mutexattr_default; 127 } else { 128 attr = *mutex_attr; 129 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || 130 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) 131 return (EINVAL); 132 if (attr->m_protocol < PTHREAD_PRIO_NONE || 133 attr->m_protocol > PTHREAD_PRIO_PROTECT) 134 return (EINVAL); 135 } 136 if ((pmutex = (pthread_mutex_t) 137 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL) 138 return (ENOMEM); 139 140 pmutex->m_type = attr->m_type; 141 pmutex->m_owner = NULL; 142 pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED; 143 if (private) 144 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE; 145 pmutex->m_count = 0; 146 pmutex->m_refcount = 0; 147 pmutex->m_spinloops = 0; 148 pmutex->m_yieldloops = 0; 149 MUTEX_INIT_LINK(pmutex); 150 switch(attr->m_protocol) { 151 case PTHREAD_PRIO_INHERIT: 152 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 153 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; 154 break; 155 case PTHREAD_PRIO_PROTECT: 156 pmutex->m_lock.m_owner = UMUTEX_CONTESTED; 157 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; 158 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; 159 break; 160 case PTHREAD_PRIO_NONE: 161 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 162 pmutex->m_lock.m_flags = 0; 163 } 164 165 if (pmutex->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) { 166 pmutex->m_spinloops = 167 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS; 168 pmutex->m_yieldloops = _thr_yieldloops; 169 } 170 171 *mutex = pmutex; 172 return (0); 173 } 174 175 static int 176 init_static(struct pthread *thread, pthread_mutex_t *mutex) 177 { 178 int ret; 179 180 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 181 182 if (*mutex == NULL) 183 ret = mutex_init(mutex, NULL, 0, calloc); 184 else 185 ret = 0; 186 187 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 188 189 return (ret); 190 } 191 192 static int 193 init_static_private(struct pthread *thread, pthread_mutex_t *mutex) 194 { 195 int ret; 196 197 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 198 199 if (*mutex == NULL) 200 ret = mutex_init(mutex, NULL, 1, calloc); 201 else 202 ret = 0; 203 204 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 205 206 return (ret); 207 } 208 209 static void 210 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) 211 { 212 struct pthread_mutex *m2; 213 214 m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue); 215 if (m2 != NULL) 216 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; 217 else 218 m->m_lock.m_ceilings[1] = -1; 219 } 220 221 int 222 _pthread_mutex_init(pthread_mutex_t *mutex, 223 const pthread_mutexattr_t *mutex_attr) 224 { 225 return mutex_init(mutex, mutex_attr, 1, calloc); 226 } 227 228 int 229 __pthread_mutex_init(pthread_mutex_t *mutex, 230 const pthread_mutexattr_t *mutex_attr) 231 { 232 return mutex_init(mutex, mutex_attr, 0, calloc); 233 } 234 235 /* This function is used internally by malloc. */ 236 int 237 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 238 void *(calloc_cb)(size_t, size_t)) 239 { 240 static const struct pthread_mutex_attr attr = { 241 .m_type = PTHREAD_MUTEX_NORMAL, 242 .m_protocol = PTHREAD_PRIO_NONE, 243 .m_ceiling = 0, 244 .m_flags = 0 245 }; 246 static const struct pthread_mutex_attr *pattr = &attr; 247 248 return mutex_init(mutex, (pthread_mutexattr_t *)&pattr, 0, calloc_cb); 249 } 250 251 void 252 _mutex_fork(struct pthread *curthread) 253 { 254 struct pthread_mutex *m; 255 256 /* 257 * Fix mutex ownership for child process. 258 * note that process shared mutex should not 259 * be inherited because owner is forking thread 260 * which is in parent process, they should be 261 * removed from the owned mutex list, current, 262 * process shared mutex is not supported, so I 263 * am not worried. 264 */ 265 266 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) 267 m->m_lock.m_owner = TID(curthread); 268 TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe) 269 m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED; 270 } 271 272 int 273 _pthread_mutex_destroy(pthread_mutex_t *mutex) 274 { 275 struct pthread *curthread = _get_curthread(); 276 pthread_mutex_t m; 277 uint32_t id; 278 int ret = 0; 279 280 if (__predict_false(*mutex == NULL)) 281 ret = EINVAL; 282 else { 283 id = TID(curthread); 284 285 /* 286 * Try to lock the mutex structure, we only need to 287 * try once, if failed, the mutex is in used. 288 */ 289 ret = _thr_umutex_trylock(&(*mutex)->m_lock, id); 290 if (ret) 291 return (ret); 292 m = *mutex; 293 /* 294 * Check mutex other fields to see if this mutex is 295 * in use. Mostly for prority mutex types, or there 296 * are condition variables referencing it. 297 */ 298 if (m->m_owner != NULL || m->m_refcount != 0) { 299 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 300 set_inherited_priority(curthread, m); 301 _thr_umutex_unlock(&m->m_lock, id); 302 ret = EBUSY; 303 } else { 304 /* 305 * Save a pointer to the mutex so it can be free'd 306 * and set the caller's pointer to NULL. 307 */ 308 *mutex = NULL; 309 310 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 311 set_inherited_priority(curthread, m); 312 _thr_umutex_unlock(&m->m_lock, id); 313 314 MUTEX_ASSERT_NOT_OWNED(m); 315 free(m); 316 } 317 } 318 319 return (ret); 320 } 321 322 323 #define ENQUEUE_MUTEX(curthread, m) \ 324 do { \ 325 (m)->m_owner = curthread; \ 326 /* Add to the list of owned mutexes: */ \ 327 MUTEX_ASSERT_NOT_OWNED((m)); \ 328 if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \ 329 TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\ 330 else \ 331 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\ 332 } while (0) 333 334 static int 335 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) 336 { 337 struct pthread_mutex *m; 338 uint32_t id; 339 int ret; 340 341 id = TID(curthread); 342 m = *mutex; 343 ret = _thr_umutex_trylock(&m->m_lock, id); 344 if (ret == 0) { 345 ENQUEUE_MUTEX(curthread, m); 346 } else if (m->m_owner == curthread) { 347 ret = mutex_self_trylock(m); 348 } /* else {} */ 349 350 return (ret); 351 } 352 353 int 354 __pthread_mutex_trylock(pthread_mutex_t *mutex) 355 { 356 struct pthread *curthread = _get_curthread(); 357 int ret; 358 359 /* 360 * If the mutex is statically initialized, perform the dynamic 361 * initialization: 362 */ 363 if (__predict_false(*mutex == NULL)) { 364 ret = init_static(curthread, mutex); 365 if (__predict_false(ret)) 366 return (ret); 367 } 368 return (mutex_trylock_common(curthread, mutex)); 369 } 370 371 int 372 _pthread_mutex_trylock(pthread_mutex_t *mutex) 373 { 374 struct pthread *curthread = _get_curthread(); 375 int ret; 376 377 /* 378 * If the mutex is statically initialized, perform the dynamic 379 * initialization marking the mutex private (delete safe): 380 */ 381 if (__predict_false(*mutex == NULL)) { 382 ret = init_static_private(curthread, mutex); 383 if (__predict_false(ret)) 384 return (ret); 385 } 386 return (mutex_trylock_common(curthread, mutex)); 387 } 388 389 static int 390 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex, 391 const struct timespec * abstime) 392 { 393 struct timespec ts, ts2; 394 struct pthread_mutex *m; 395 uint32_t id; 396 int ret; 397 int count; 398 399 id = TID(curthread); 400 m = *mutex; 401 ret = _thr_umutex_trylock2(&m->m_lock, id); 402 if (ret == 0) { 403 ENQUEUE_MUTEX(curthread, m); 404 } else if (m->m_owner == curthread) { 405 ret = mutex_self_lock(m, abstime); 406 } else { 407 /* 408 * For adaptive mutexes, spin for a bit in the expectation 409 * that if the application requests this mutex type then 410 * the lock is likely to be released quickly and it is 411 * faster than entering the kernel 412 */ 413 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 414 goto sleep_in_kernel; 415 416 if (!_thr_is_smp) 417 goto yield_loop; 418 419 count = m->m_spinloops; 420 while (count--) { 421 if (m->m_lock.m_owner == UMUTEX_UNOWNED) { 422 ret = _thr_umutex_trylock2(&m->m_lock, id); 423 if (ret == 0) 424 goto done; 425 } 426 CPU_SPINWAIT; 427 } 428 429 yield_loop: 430 count = m->m_yieldloops; 431 while (count--) { 432 _sched_yield(); 433 ret = _thr_umutex_trylock2(&m->m_lock, id); 434 if (ret == 0) 435 goto done; 436 } 437 438 sleep_in_kernel: 439 if (abstime == NULL) { 440 ret = __thr_umutex_lock(&m->m_lock); 441 } else if (__predict_false( 442 abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 443 abstime->tv_nsec >= 1000000000)) { 444 ret = EINVAL; 445 } else { 446 clock_gettime(CLOCK_REALTIME, &ts); 447 TIMESPEC_SUB(&ts2, abstime, &ts); 448 ret = __thr_umutex_timedlock(&m->m_lock, &ts2); 449 /* 450 * Timed out wait is not restarted if 451 * it was interrupted, not worth to do it. 452 */ 453 if (ret == EINTR) 454 ret = ETIMEDOUT; 455 } 456 done: 457 if (ret == 0) 458 ENQUEUE_MUTEX(curthread, m); 459 } 460 return (ret); 461 } 462 463 int 464 __pthread_mutex_lock(pthread_mutex_t *m) 465 { 466 struct pthread *curthread; 467 int ret; 468 469 _thr_check_init(); 470 471 curthread = _get_curthread(); 472 473 /* 474 * If the mutex is statically initialized, perform the dynamic 475 * initialization: 476 */ 477 if (__predict_false(*m == NULL)) { 478 ret = init_static(curthread, m); 479 if (__predict_false(ret)) 480 return (ret); 481 } 482 return (mutex_lock_common(curthread, m, NULL)); 483 } 484 485 int 486 _pthread_mutex_lock(pthread_mutex_t *m) 487 { 488 struct pthread *curthread; 489 int ret; 490 491 _thr_check_init(); 492 493 curthread = _get_curthread(); 494 495 /* 496 * If the mutex is statically initialized, perform the dynamic 497 * initialization marking it private (delete safe): 498 */ 499 if (__predict_false(*m == NULL)) { 500 ret = init_static_private(curthread, m); 501 if (__predict_false(ret)) 502 return (ret); 503 } 504 return (mutex_lock_common(curthread, m, NULL)); 505 } 506 507 int 508 __pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime) 509 { 510 struct pthread *curthread; 511 int ret; 512 513 _thr_check_init(); 514 515 curthread = _get_curthread(); 516 517 /* 518 * If the mutex is statically initialized, perform the dynamic 519 * initialization: 520 */ 521 if (__predict_false(*m == NULL)) { 522 ret = init_static(curthread, m); 523 if (__predict_false(ret)) 524 return (ret); 525 } 526 return (mutex_lock_common(curthread, m, abstime)); 527 } 528 529 int 530 _pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime) 531 { 532 struct pthread *curthread; 533 int ret; 534 535 _thr_check_init(); 536 537 curthread = _get_curthread(); 538 539 /* 540 * If the mutex is statically initialized, perform the dynamic 541 * initialization marking it private (delete safe): 542 */ 543 if (__predict_false(*m == NULL)) { 544 ret = init_static_private(curthread, m); 545 if (__predict_false(ret)) 546 return (ret); 547 } 548 return (mutex_lock_common(curthread, m, abstime)); 549 } 550 551 int 552 _pthread_mutex_unlock(pthread_mutex_t *m) 553 { 554 return (mutex_unlock_common(m)); 555 } 556 557 int 558 _mutex_cv_lock(pthread_mutex_t *m, int count) 559 { 560 int ret; 561 562 ret = mutex_lock_common(_get_curthread(), m, NULL); 563 if (ret == 0) { 564 (*m)->m_refcount--; 565 (*m)->m_count += count; 566 } 567 return (ret); 568 } 569 570 static int 571 mutex_self_trylock(pthread_mutex_t m) 572 { 573 int ret; 574 575 switch (m->m_type) { 576 case PTHREAD_MUTEX_ERRORCHECK: 577 case PTHREAD_MUTEX_NORMAL: 578 ret = EBUSY; 579 break; 580 581 case PTHREAD_MUTEX_RECURSIVE: 582 /* Increment the lock count: */ 583 if (m->m_count + 1 > 0) { 584 m->m_count++; 585 ret = 0; 586 } else 587 ret = EAGAIN; 588 break; 589 590 default: 591 /* Trap invalid mutex types; */ 592 ret = EINVAL; 593 } 594 595 return (ret); 596 } 597 598 static int 599 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime) 600 { 601 struct timespec ts1, ts2; 602 int ret; 603 604 switch (m->m_type) { 605 case PTHREAD_MUTEX_ERRORCHECK: 606 case PTHREAD_MUTEX_ADAPTIVE_NP: 607 if (abstime) { 608 clock_gettime(CLOCK_REALTIME, &ts1); 609 TIMESPEC_SUB(&ts2, abstime, &ts1); 610 __sys_nanosleep(&ts2, NULL); 611 ret = ETIMEDOUT; 612 } else { 613 /* 614 * POSIX specifies that mutexes should return 615 * EDEADLK if a recursive lock is detected. 616 */ 617 ret = EDEADLK; 618 } 619 break; 620 621 case PTHREAD_MUTEX_NORMAL: 622 /* 623 * What SS2 define as a 'normal' mutex. Intentionally 624 * deadlock on attempts to get a lock you already own. 625 */ 626 ret = 0; 627 if (abstime) { 628 clock_gettime(CLOCK_REALTIME, &ts1); 629 TIMESPEC_SUB(&ts2, abstime, &ts1); 630 __sys_nanosleep(&ts2, NULL); 631 ret = ETIMEDOUT; 632 } else { 633 ts1.tv_sec = 30; 634 ts1.tv_nsec = 0; 635 for (;;) 636 __sys_nanosleep(&ts1, NULL); 637 } 638 break; 639 640 case PTHREAD_MUTEX_RECURSIVE: 641 /* Increment the lock count: */ 642 if (m->m_count + 1 > 0) { 643 m->m_count++; 644 ret = 0; 645 } else 646 ret = EAGAIN; 647 break; 648 649 default: 650 /* Trap invalid mutex types; */ 651 ret = EINVAL; 652 } 653 654 return (ret); 655 } 656 657 static int 658 mutex_unlock_common(pthread_mutex_t *mutex) 659 { 660 struct pthread *curthread = _get_curthread(); 661 struct pthread_mutex *m; 662 uint32_t id; 663 664 if (__predict_false((m = *mutex) == NULL)) 665 return (EINVAL); 666 667 /* 668 * Check if the running thread is not the owner of the mutex. 669 */ 670 if (__predict_false(m->m_owner != curthread)) 671 return (EPERM); 672 673 id = TID(curthread); 674 if (__predict_false( 675 m->m_type == PTHREAD_MUTEX_RECURSIVE && 676 m->m_count > 0)) { 677 m->m_count--; 678 } else { 679 m->m_owner = NULL; 680 /* Remove the mutex from the threads queue. */ 681 MUTEX_ASSERT_IS_OWNED(m); 682 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 683 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 684 else { 685 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 686 set_inherited_priority(curthread, m); 687 } 688 MUTEX_INIT_LINK(m); 689 _thr_umutex_unlock(&m->m_lock, id); 690 } 691 return (0); 692 } 693 694 int 695 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count) 696 { 697 struct pthread *curthread = _get_curthread(); 698 struct pthread_mutex *m; 699 700 if (__predict_false((m = *mutex) == NULL)) 701 return (EINVAL); 702 703 /* 704 * Check if the running thread is not the owner of the mutex. 705 */ 706 if (__predict_false(m->m_owner != curthread)) 707 return (EPERM); 708 709 /* 710 * Clear the count in case this is a recursive mutex. 711 */ 712 *count = m->m_count; 713 m->m_refcount++; 714 m->m_count = 0; 715 m->m_owner = NULL; 716 /* Remove the mutex from the threads queue. */ 717 MUTEX_ASSERT_IS_OWNED(m); 718 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 719 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 720 else { 721 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 722 set_inherited_priority(curthread, m); 723 } 724 MUTEX_INIT_LINK(m); 725 _thr_umutex_unlock(&m->m_lock, TID(curthread)); 726 return (0); 727 } 728 729 void 730 _mutex_unlock_private(pthread_t pthread) 731 { 732 struct pthread_mutex *m, *m_next; 733 734 TAILQ_FOREACH_SAFE(m, &pthread->mutexq, m_qe, m_next) { 735 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 736 _pthread_mutex_unlock(&m); 737 } 738 } 739 740 int 741 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex, 742 int *prioceiling) 743 { 744 int ret; 745 746 if (*mutex == NULL) 747 ret = EINVAL; 748 else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 749 ret = EINVAL; 750 else { 751 *prioceiling = (*mutex)->m_lock.m_ceilings[0]; 752 ret = 0; 753 } 754 755 return(ret); 756 } 757 758 int 759 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex, 760 int ceiling, int *old_ceiling) 761 { 762 struct pthread *curthread = _get_curthread(); 763 struct pthread_mutex *m, *m1, *m2; 764 int ret; 765 766 m = *mutex; 767 if (m == NULL || (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 768 return (EINVAL); 769 770 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); 771 if (ret != 0) 772 return (ret); 773 774 if (m->m_owner == curthread) { 775 MUTEX_ASSERT_IS_OWNED(m); 776 m1 = TAILQ_PREV(m, mutex_queue, m_qe); 777 m2 = TAILQ_NEXT(m, m_qe); 778 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || 779 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { 780 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 781 TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) { 782 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) { 783 TAILQ_INSERT_BEFORE(m2, m, m_qe); 784 return (0); 785 } 786 } 787 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe); 788 } 789 } 790 return (0); 791 } 792 793 int 794 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count) 795 { 796 if (*mutex == NULL) 797 return (EINVAL); 798 *count = (*mutex)->m_spinloops; 799 return (0); 800 } 801 802 int 803 _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) 804 { 805 struct pthread *curthread = _get_curthread(); 806 int ret; 807 808 if (__predict_false(*mutex == NULL)) { 809 ret = init_static_private(curthread, mutex); 810 if (__predict_false(ret)) 811 return (ret); 812 } 813 (*mutex)->m_spinloops = count; 814 return (0); 815 } 816 817 int 818 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) 819 { 820 struct pthread *curthread = _get_curthread(); 821 int ret; 822 823 if (__predict_false(*mutex == NULL)) { 824 ret = init_static(curthread, mutex); 825 if (__predict_false(ret)) 826 return (ret); 827 } 828 (*mutex)->m_spinloops = count; 829 return (0); 830 } 831 832 int 833 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count) 834 { 835 if (*mutex == NULL) 836 return (EINVAL); 837 *count = (*mutex)->m_yieldloops; 838 return (0); 839 } 840 841 int 842 _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) 843 { 844 struct pthread *curthread = _get_curthread(); 845 int ret; 846 847 if (__predict_false(*mutex == NULL)) { 848 ret = init_static_private(curthread, mutex); 849 if (__predict_false(ret)) 850 return (ret); 851 } 852 (*mutex)->m_yieldloops = count; 853 return (0); 854 } 855 856 int 857 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) 858 { 859 struct pthread *curthread = _get_curthread(); 860 int ret; 861 862 if (__predict_false(*mutex == NULL)) { 863 ret = init_static(curthread, mutex); 864 if (__predict_false(ret)) 865 return (ret); 866 } 867 (*mutex)->m_yieldloops = count; 868 return (0); 869 } 870 871 int 872 _pthread_mutex_isowned_np(pthread_mutex_t *mutex) 873 { 874 struct pthread *curthread = _get_curthread(); 875 int ret; 876 877 if (__predict_false(*mutex == NULL)) { 878 ret = init_static(curthread, mutex); 879 if (__predict_false(ret)) 880 return (ret); 881 } 882 return ((*mutex)->m_owner == curthread); 883 } 884