1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by John Birrell. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD$ 34 */ 35 36 #include "namespace.h" 37 #include <stdlib.h> 38 #include <errno.h> 39 #include <string.h> 40 #include <sys/param.h> 41 #include <sys/queue.h> 42 #include <pthread.h> 43 #include <pthread_np.h> 44 #include "un-namespace.h" 45 46 #include "thr_private.h" 47 48 #if defined(_PTHREADS_INVARIANTS) 49 #define MUTEX_INIT_LINK(m) do { \ 50 (m)->m_qe.tqe_prev = NULL; \ 51 (m)->m_qe.tqe_next = NULL; \ 52 } while (0) 53 #define MUTEX_ASSERT_IS_OWNED(m) do { \ 54 if (__predict_false((m)->m_qe.tqe_prev == NULL))\ 55 PANIC("mutex is not on list"); \ 56 } while (0) 57 #define MUTEX_ASSERT_NOT_OWNED(m) do { \ 58 if (__predict_false((m)->m_qe.tqe_prev != NULL || \ 59 (m)->m_qe.tqe_next != NULL)) \ 60 PANIC("mutex is on list"); \ 61 } while (0) 62 #else 63 #define MUTEX_INIT_LINK(m) 64 #define MUTEX_ASSERT_IS_OWNED(m) 65 #define MUTEX_ASSERT_NOT_OWNED(m) 66 #endif 67 68 /* 69 * For adaptive mutexes, how many times to spin doing trylock2 70 * before entering the kernel to block 71 */ 72 #define MUTEX_ADAPTIVE_SPINS 2000 73 74 /* 75 * Prototypes 76 */ 77 int __pthread_mutex_init(pthread_mutex_t *mutex, 78 const pthread_mutexattr_t *mutex_attr); 79 int __pthread_mutex_trylock(pthread_mutex_t *mutex); 80 int __pthread_mutex_lock(pthread_mutex_t *mutex); 81 int __pthread_mutex_timedlock(pthread_mutex_t *mutex, 82 const struct timespec *abstime); 83 int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 84 void *(calloc_cb)(size_t, size_t)); 85 int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count); 86 int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); 87 int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); 88 int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); 89 int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count); 90 int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); 91 92 static int mutex_self_trylock(pthread_mutex_t); 93 static int mutex_self_lock(pthread_mutex_t, 94 const struct timespec *abstime); 95 static int mutex_unlock_common(struct pthread_mutex *, int); 96 static int mutex_lock_sleep(struct pthread *, pthread_mutex_t, 97 const struct timespec *); 98 99 __weak_reference(__pthread_mutex_init, pthread_mutex_init); 100 __strong_reference(__pthread_mutex_init, _pthread_mutex_init); 101 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 102 __strong_reference(__pthread_mutex_lock, _pthread_mutex_lock); 103 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 104 __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock); 105 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 106 __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock); 107 108 /* Single underscore versions provided for libc internal usage: */ 109 /* No difference between libc and application usage of these: */ 110 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 111 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 112 113 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); 114 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); 115 116 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np); 117 __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np); 118 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np); 119 120 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np); 121 __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np); 122 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np); 123 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np); 124 125 static int 126 mutex_init(pthread_mutex_t *mutex, 127 const struct pthread_mutex_attr *mutex_attr, 128 void *(calloc_cb)(size_t, size_t)) 129 { 130 const struct pthread_mutex_attr *attr; 131 struct pthread_mutex *pmutex; 132 133 if (mutex_attr == NULL) { 134 attr = &_pthread_mutexattr_default; 135 } else { 136 attr = mutex_attr; 137 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || 138 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) 139 return (EINVAL); 140 if (attr->m_protocol < PTHREAD_PRIO_NONE || 141 attr->m_protocol > PTHREAD_PRIO_PROTECT) 142 return (EINVAL); 143 } 144 if ((pmutex = (pthread_mutex_t) 145 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL) 146 return (ENOMEM); 147 148 pmutex->m_flags = attr->m_type; 149 pmutex->m_owner = NULL; 150 pmutex->m_count = 0; 151 pmutex->m_spinloops = 0; 152 pmutex->m_yieldloops = 0; 153 MUTEX_INIT_LINK(pmutex); 154 switch(attr->m_protocol) { 155 case PTHREAD_PRIO_NONE: 156 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 157 pmutex->m_lock.m_flags = 0; 158 break; 159 case PTHREAD_PRIO_INHERIT: 160 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 161 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; 162 break; 163 case PTHREAD_PRIO_PROTECT: 164 pmutex->m_lock.m_owner = UMUTEX_CONTESTED; 165 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; 166 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; 167 break; 168 } 169 170 if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) { 171 pmutex->m_spinloops = 172 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS; 173 pmutex->m_yieldloops = _thr_yieldloops; 174 } 175 176 *mutex = pmutex; 177 return (0); 178 } 179 180 static int 181 init_static(struct pthread *thread, pthread_mutex_t *mutex) 182 { 183 int ret; 184 185 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 186 187 if (*mutex == THR_MUTEX_INITIALIZER) 188 ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc); 189 else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER) 190 ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc); 191 else 192 ret = 0; 193 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 194 195 return (ret); 196 } 197 198 static void 199 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) 200 { 201 struct pthread_mutex *m2; 202 203 m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue); 204 if (m2 != NULL) 205 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; 206 else 207 m->m_lock.m_ceilings[1] = -1; 208 } 209 210 int 211 __pthread_mutex_init(pthread_mutex_t *mutex, 212 const pthread_mutexattr_t *mutex_attr) 213 { 214 return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc); 215 } 216 217 /* This function is used internally by malloc. */ 218 int 219 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 220 void *(calloc_cb)(size_t, size_t)) 221 { 222 static const struct pthread_mutex_attr attr = { 223 .m_type = PTHREAD_MUTEX_NORMAL, 224 .m_protocol = PTHREAD_PRIO_NONE, 225 .m_ceiling = 0 226 }; 227 int ret; 228 229 ret = mutex_init(mutex, &attr, calloc_cb); 230 if (ret == 0) 231 (*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE; 232 return (ret); 233 } 234 235 void 236 _mutex_fork(struct pthread *curthread) 237 { 238 struct pthread_mutex *m; 239 240 /* 241 * Fix mutex ownership for child process. 242 * note that process shared mutex should not 243 * be inherited because owner is forking thread 244 * which is in parent process, they should be 245 * removed from the owned mutex list, current, 246 * process shared mutex is not supported, so I 247 * am not worried. 248 */ 249 250 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) 251 m->m_lock.m_owner = TID(curthread); 252 TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe) 253 m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED; 254 } 255 256 int 257 _pthread_mutex_destroy(pthread_mutex_t *mutex) 258 { 259 pthread_mutex_t m; 260 int ret; 261 262 m = *mutex; 263 if (m < THR_MUTEX_DESTROYED) { 264 ret = 0; 265 } else if (m == THR_MUTEX_DESTROYED) { 266 ret = EINVAL; 267 } else { 268 if (m->m_owner != NULL) { 269 ret = EBUSY; 270 } else { 271 *mutex = THR_MUTEX_DESTROYED; 272 MUTEX_ASSERT_NOT_OWNED(m); 273 free(m); 274 ret = 0; 275 } 276 } 277 278 return (ret); 279 } 280 281 #define ENQUEUE_MUTEX(curthread, m) \ 282 do { \ 283 (m)->m_owner = curthread; \ 284 /* Add to the list of owned mutexes: */ \ 285 MUTEX_ASSERT_NOT_OWNED((m)); \ 286 if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \ 287 TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\ 288 else \ 289 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\ 290 } while (0) 291 292 #define DEQUEUE_MUTEX(curthread, m) \ 293 (m)->m_owner = NULL; \ 294 MUTEX_ASSERT_IS_OWNED(m); \ 295 if (__predict_true(((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) \ 296 TAILQ_REMOVE(&curthread->mutexq, (m), m_qe); \ 297 else { \ 298 TAILQ_REMOVE(&curthread->pp_mutexq, (m), m_qe); \ 299 set_inherited_priority(curthread, m); \ 300 } \ 301 MUTEX_INIT_LINK(m); 302 303 #define CHECK_AND_INIT_MUTEX \ 304 if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) { \ 305 if (m == THR_MUTEX_DESTROYED) \ 306 return (EINVAL); \ 307 int ret; \ 308 ret = init_static(_get_curthread(), mutex); \ 309 if (ret) \ 310 return (ret); \ 311 m = *mutex; \ 312 } 313 314 static int 315 mutex_trylock_common(pthread_mutex_t *mutex) 316 { 317 struct pthread *curthread = _get_curthread(); 318 struct pthread_mutex *m = *mutex; 319 uint32_t id; 320 int ret; 321 322 id = TID(curthread); 323 if (m->m_flags & PMUTEX_FLAG_PRIVATE) 324 THR_CRITICAL_ENTER(curthread); 325 ret = _thr_umutex_trylock(&m->m_lock, id); 326 if (__predict_true(ret == 0)) { 327 ENQUEUE_MUTEX(curthread, m); 328 } else if (m->m_owner == curthread) { 329 ret = mutex_self_trylock(m); 330 } /* else {} */ 331 if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE)) 332 THR_CRITICAL_LEAVE(curthread); 333 return (ret); 334 } 335 336 int 337 __pthread_mutex_trylock(pthread_mutex_t *mutex) 338 { 339 struct pthread_mutex *m; 340 341 CHECK_AND_INIT_MUTEX 342 343 return (mutex_trylock_common(mutex)); 344 } 345 346 static int 347 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, 348 const struct timespec *abstime) 349 { 350 uint32_t id, owner; 351 int count; 352 int ret; 353 354 if (m->m_owner == curthread) 355 return mutex_self_lock(m, abstime); 356 357 id = TID(curthread); 358 /* 359 * For adaptive mutexes, spin for a bit in the expectation 360 * that if the application requests this mutex type then 361 * the lock is likely to be released quickly and it is 362 * faster than entering the kernel 363 */ 364 if (__predict_false( 365 (m->m_lock.m_flags & 366 (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)) 367 goto sleep_in_kernel; 368 369 if (!_thr_is_smp) 370 goto yield_loop; 371 372 count = m->m_spinloops; 373 while (count--) { 374 owner = m->m_lock.m_owner; 375 if ((owner & ~UMUTEX_CONTESTED) == 0) { 376 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { 377 ret = 0; 378 goto done; 379 } 380 } 381 CPU_SPINWAIT; 382 } 383 384 yield_loop: 385 count = m->m_yieldloops; 386 while (count--) { 387 _sched_yield(); 388 owner = m->m_lock.m_owner; 389 if ((owner & ~UMUTEX_CONTESTED) == 0) { 390 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { 391 ret = 0; 392 goto done; 393 } 394 } 395 } 396 397 sleep_in_kernel: 398 if (abstime == NULL) { 399 ret = __thr_umutex_lock(&m->m_lock, id); 400 } else if (__predict_false( 401 abstime->tv_nsec < 0 || 402 abstime->tv_nsec >= 1000000000)) { 403 ret = EINVAL; 404 } else { 405 ret = __thr_umutex_timedlock(&m->m_lock, id, abstime); 406 } 407 done: 408 if (ret == 0) 409 ENQUEUE_MUTEX(curthread, m); 410 411 return (ret); 412 } 413 414 static inline int 415 mutex_lock_common(struct pthread_mutex *m, 416 const struct timespec *abstime, int cvattach) 417 { 418 struct pthread *curthread = _get_curthread(); 419 int ret; 420 421 if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE) 422 THR_CRITICAL_ENTER(curthread); 423 if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) { 424 ENQUEUE_MUTEX(curthread, m); 425 ret = 0; 426 } else { 427 ret = mutex_lock_sleep(curthread, m, abstime); 428 } 429 if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE) && !cvattach) 430 THR_CRITICAL_LEAVE(curthread); 431 return (ret); 432 } 433 434 int 435 __pthread_mutex_lock(pthread_mutex_t *mutex) 436 { 437 struct pthread_mutex *m; 438 439 _thr_check_init(); 440 441 CHECK_AND_INIT_MUTEX 442 443 return (mutex_lock_common(m, NULL, 0)); 444 } 445 446 int 447 __pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime) 448 { 449 struct pthread_mutex *m; 450 451 _thr_check_init(); 452 453 CHECK_AND_INIT_MUTEX 454 455 return (mutex_lock_common(m, abstime, 0)); 456 } 457 458 int 459 _pthread_mutex_unlock(pthread_mutex_t *mutex) 460 { 461 struct pthread_mutex *mp; 462 463 mp = *mutex; 464 return (mutex_unlock_common(mp, 0)); 465 } 466 467 int 468 _mutex_cv_lock(struct pthread_mutex *m, int count) 469 { 470 int error; 471 472 error = mutex_lock_common(m, NULL, 1); 473 if (error == 0) 474 m->m_count = count; 475 return (error); 476 } 477 478 int 479 _mutex_cv_unlock(struct pthread_mutex *m, int *count) 480 { 481 482 /* 483 * Clear the count in case this is a recursive mutex. 484 */ 485 *count = m->m_count; 486 m->m_count = 0; 487 (void)mutex_unlock_common(m, 1); 488 return (0); 489 } 490 491 int 492 _mutex_cv_attach(struct pthread_mutex *m, int count) 493 { 494 struct pthread *curthread = _get_curthread(); 495 496 ENQUEUE_MUTEX(curthread, m); 497 m->m_count = count; 498 return (0); 499 } 500 501 int 502 _mutex_cv_detach(struct pthread_mutex *mp, int *recurse) 503 { 504 struct pthread *curthread = _get_curthread(); 505 int defered; 506 int error; 507 508 if ((error = _mutex_owned(curthread, mp)) != 0) 509 return (error); 510 511 /* 512 * Clear the count in case this is a recursive mutex. 513 */ 514 *recurse = mp->m_count; 515 mp->m_count = 0; 516 DEQUEUE_MUTEX(curthread, mp); 517 518 /* Will this happen in real-world ? */ 519 if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) { 520 defered = 1; 521 mp->m_flags &= ~PMUTEX_FLAG_DEFERED; 522 } else 523 defered = 0; 524 525 if (defered) { 526 _thr_wake_all(curthread->defer_waiters, 527 curthread->nwaiter_defer); 528 curthread->nwaiter_defer = 0; 529 } 530 return (0); 531 } 532 533 static int 534 mutex_self_trylock(struct pthread_mutex *m) 535 { 536 int ret; 537 538 switch (PMUTEX_TYPE(m->m_flags)) { 539 case PTHREAD_MUTEX_ERRORCHECK: 540 case PTHREAD_MUTEX_NORMAL: 541 ret = EBUSY; 542 break; 543 544 case PTHREAD_MUTEX_RECURSIVE: 545 /* Increment the lock count: */ 546 if (m->m_count + 1 > 0) { 547 m->m_count++; 548 ret = 0; 549 } else 550 ret = EAGAIN; 551 break; 552 553 default: 554 /* Trap invalid mutex types; */ 555 ret = EINVAL; 556 } 557 558 return (ret); 559 } 560 561 static int 562 mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime) 563 { 564 struct timespec ts1, ts2; 565 int ret; 566 567 switch (PMUTEX_TYPE(m->m_flags)) { 568 case PTHREAD_MUTEX_ERRORCHECK: 569 case PTHREAD_MUTEX_ADAPTIVE_NP: 570 if (abstime) { 571 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 572 abstime->tv_nsec >= 1000000000) { 573 ret = EINVAL; 574 } else { 575 clock_gettime(CLOCK_REALTIME, &ts1); 576 TIMESPEC_SUB(&ts2, abstime, &ts1); 577 __sys_nanosleep(&ts2, NULL); 578 ret = ETIMEDOUT; 579 } 580 } else { 581 /* 582 * POSIX specifies that mutexes should return 583 * EDEADLK if a recursive lock is detected. 584 */ 585 ret = EDEADLK; 586 } 587 break; 588 589 case PTHREAD_MUTEX_NORMAL: 590 /* 591 * What SS2 define as a 'normal' mutex. Intentionally 592 * deadlock on attempts to get a lock you already own. 593 */ 594 ret = 0; 595 if (abstime) { 596 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 597 abstime->tv_nsec >= 1000000000) { 598 ret = EINVAL; 599 } else { 600 clock_gettime(CLOCK_REALTIME, &ts1); 601 TIMESPEC_SUB(&ts2, abstime, &ts1); 602 __sys_nanosleep(&ts2, NULL); 603 ret = ETIMEDOUT; 604 } 605 } else { 606 ts1.tv_sec = 30; 607 ts1.tv_nsec = 0; 608 for (;;) 609 __sys_nanosleep(&ts1, NULL); 610 } 611 break; 612 613 case PTHREAD_MUTEX_RECURSIVE: 614 /* Increment the lock count: */ 615 if (m->m_count + 1 > 0) { 616 m->m_count++; 617 ret = 0; 618 } else 619 ret = EAGAIN; 620 break; 621 622 default: 623 /* Trap invalid mutex types; */ 624 ret = EINVAL; 625 } 626 627 return (ret); 628 } 629 630 static int 631 mutex_unlock_common(struct pthread_mutex *m, int cv) 632 { 633 struct pthread *curthread = _get_curthread(); 634 uint32_t id; 635 int defered; 636 637 if (__predict_false(m <= THR_MUTEX_DESTROYED)) { 638 if (m == THR_MUTEX_DESTROYED) 639 return (EINVAL); 640 return (EPERM); 641 } 642 643 /* 644 * Check if the running thread is not the owner of the mutex. 645 */ 646 if (__predict_false(m->m_owner != curthread)) 647 return (EPERM); 648 649 id = TID(curthread); 650 if (__predict_false( 651 PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE && 652 m->m_count > 0)) { 653 m->m_count--; 654 } else { 655 if ((m->m_flags & PMUTEX_FLAG_DEFERED) != 0) { 656 defered = 1; 657 m->m_flags &= ~PMUTEX_FLAG_DEFERED; 658 } else 659 defered = 0; 660 661 DEQUEUE_MUTEX(curthread, m); 662 _thr_umutex_unlock(&m->m_lock, id); 663 664 if (curthread->will_sleep == 0 && defered) { 665 _thr_wake_all(curthread->defer_waiters, 666 curthread->nwaiter_defer); 667 curthread->nwaiter_defer = 0; 668 } 669 } 670 if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE) 671 THR_CRITICAL_LEAVE(curthread); 672 return (0); 673 } 674 675 int 676 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex, 677 int *prioceiling) 678 { 679 struct pthread_mutex *m; 680 int ret; 681 682 m = *mutex; 683 if ((m <= THR_MUTEX_DESTROYED) || 684 (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 685 ret = EINVAL; 686 else { 687 *prioceiling = m->m_lock.m_ceilings[0]; 688 ret = 0; 689 } 690 691 return (ret); 692 } 693 694 int 695 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex, 696 int ceiling, int *old_ceiling) 697 { 698 struct pthread *curthread = _get_curthread(); 699 struct pthread_mutex *m, *m1, *m2; 700 int ret; 701 702 m = *mutex; 703 if ((m <= THR_MUTEX_DESTROYED) || 704 (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 705 return (EINVAL); 706 707 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); 708 if (ret != 0) 709 return (ret); 710 711 if (m->m_owner == curthread) { 712 MUTEX_ASSERT_IS_OWNED(m); 713 m1 = TAILQ_PREV(m, mutex_queue, m_qe); 714 m2 = TAILQ_NEXT(m, m_qe); 715 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || 716 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { 717 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 718 TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) { 719 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) { 720 TAILQ_INSERT_BEFORE(m2, m, m_qe); 721 return (0); 722 } 723 } 724 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe); 725 } 726 } 727 return (0); 728 } 729 730 int 731 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count) 732 { 733 struct pthread_mutex *m; 734 735 CHECK_AND_INIT_MUTEX 736 737 *count = m->m_spinloops; 738 return (0); 739 } 740 741 int 742 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) 743 { 744 struct pthread_mutex *m; 745 746 CHECK_AND_INIT_MUTEX 747 748 m->m_spinloops = count; 749 return (0); 750 } 751 752 int 753 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count) 754 { 755 struct pthread_mutex *m; 756 757 CHECK_AND_INIT_MUTEX 758 759 *count = m->m_yieldloops; 760 return (0); 761 } 762 763 int 764 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) 765 { 766 struct pthread_mutex *m; 767 768 CHECK_AND_INIT_MUTEX 769 770 m->m_yieldloops = count; 771 return (0); 772 } 773 774 int 775 _pthread_mutex_isowned_np(pthread_mutex_t *mutex) 776 { 777 struct pthread_mutex *m; 778 779 m = *mutex; 780 if (m <= THR_MUTEX_DESTROYED) 781 return (0); 782 return (m->m_owner == _get_curthread()); 783 } 784 785 int 786 _mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp) 787 { 788 if (__predict_false(mp <= THR_MUTEX_DESTROYED)) { 789 if (mp == THR_MUTEX_DESTROYED) 790 return (EINVAL); 791 return (EPERM); 792 } 793 if (mp->m_owner != curthread) 794 return (EPERM); 795 return (0); 796 } 797