1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by John Birrell. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD$ 34 */ 35 36 #include "namespace.h" 37 #include <stdlib.h> 38 #include <errno.h> 39 #include <string.h> 40 #include <sys/param.h> 41 #include <sys/queue.h> 42 #include <pthread.h> 43 #include <pthread_np.h> 44 #include "un-namespace.h" 45 46 #include "thr_private.h" 47 48 #if defined(_PTHREADS_INVARIANTS) 49 #define MUTEX_INIT_LINK(m) do { \ 50 (m)->m_qe.tqe_prev = NULL; \ 51 (m)->m_qe.tqe_next = NULL; \ 52 } while (0) 53 #define MUTEX_ASSERT_IS_OWNED(m) do { \ 54 if (__predict_false((m)->m_qe.tqe_prev == NULL))\ 55 PANIC("mutex is not on list"); \ 56 } while (0) 57 #define MUTEX_ASSERT_NOT_OWNED(m) do { \ 58 if (__predict_false((m)->m_qe.tqe_prev != NULL || \ 59 (m)->m_qe.tqe_next != NULL)) \ 60 PANIC("mutex is on list"); \ 61 } while (0) 62 #else 63 #define MUTEX_INIT_LINK(m) 64 #define MUTEX_ASSERT_IS_OWNED(m) 65 #define MUTEX_ASSERT_NOT_OWNED(m) 66 #endif 67 68 /* 69 * For adaptive mutexes, how many times to spin doing trylock2 70 * before entering the kernel to block 71 */ 72 #define MUTEX_ADAPTIVE_SPINS 2000 73 74 /* 75 * Prototypes 76 */ 77 int __pthread_mutex_init(pthread_mutex_t *mutex, 78 const pthread_mutexattr_t *mutex_attr); 79 int __pthread_mutex_trylock(pthread_mutex_t *mutex); 80 int __pthread_mutex_lock(pthread_mutex_t *mutex); 81 int __pthread_mutex_timedlock(pthread_mutex_t *mutex, 82 const struct timespec *abstime); 83 int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 84 void *(calloc_cb)(size_t, size_t)); 85 int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count); 86 int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); 87 int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); 88 int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); 89 int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count); 90 int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); 91 92 static int mutex_self_trylock(pthread_mutex_t); 93 static int mutex_self_lock(pthread_mutex_t, 94 const struct timespec *abstime); 95 static int mutex_unlock_common(struct pthread_mutex *, int, int *); 96 static int mutex_lock_sleep(struct pthread *, pthread_mutex_t, 97 const struct timespec *); 98 99 __weak_reference(__pthread_mutex_init, pthread_mutex_init); 100 __strong_reference(__pthread_mutex_init, _pthread_mutex_init); 101 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 102 __strong_reference(__pthread_mutex_lock, _pthread_mutex_lock); 103 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 104 __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock); 105 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 106 __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock); 107 108 /* Single underscore versions provided for libc internal usage: */ 109 /* No difference between libc and application usage of these: */ 110 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 111 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 112 113 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); 114 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); 115 116 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np); 117 __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np); 118 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np); 119 120 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np); 121 __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np); 122 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np); 123 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np); 124 125 static int 126 mutex_init(pthread_mutex_t *mutex, 127 const struct pthread_mutex_attr *mutex_attr, 128 void *(calloc_cb)(size_t, size_t)) 129 { 130 const struct pthread_mutex_attr *attr; 131 struct pthread_mutex *pmutex; 132 133 if (mutex_attr == NULL) { 134 attr = &_pthread_mutexattr_default; 135 } else { 136 attr = mutex_attr; 137 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || 138 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) 139 return (EINVAL); 140 if (attr->m_protocol < PTHREAD_PRIO_NONE || 141 attr->m_protocol > PTHREAD_PRIO_PROTECT) 142 return (EINVAL); 143 } 144 if ((pmutex = (pthread_mutex_t) 145 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL) 146 return (ENOMEM); 147 148 pmutex->m_flags = attr->m_type; 149 pmutex->m_owner = NULL; 150 pmutex->m_count = 0; 151 pmutex->m_spinloops = 0; 152 pmutex->m_yieldloops = 0; 153 MUTEX_INIT_LINK(pmutex); 154 switch(attr->m_protocol) { 155 case PTHREAD_PRIO_NONE: 156 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 157 pmutex->m_lock.m_flags = 0; 158 break; 159 case PTHREAD_PRIO_INHERIT: 160 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 161 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; 162 break; 163 case PTHREAD_PRIO_PROTECT: 164 pmutex->m_lock.m_owner = UMUTEX_CONTESTED; 165 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; 166 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; 167 break; 168 } 169 170 if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) { 171 pmutex->m_spinloops = 172 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS; 173 pmutex->m_yieldloops = _thr_yieldloops; 174 } 175 176 *mutex = pmutex; 177 return (0); 178 } 179 180 static int 181 init_static(struct pthread *thread, pthread_mutex_t *mutex) 182 { 183 int ret; 184 185 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 186 187 if (*mutex == THR_MUTEX_INITIALIZER) 188 ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc); 189 else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER) 190 ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc); 191 else 192 ret = 0; 193 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 194 195 return (ret); 196 } 197 198 static void 199 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) 200 { 201 struct pthread_mutex *m2; 202 203 m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue); 204 if (m2 != NULL) 205 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; 206 else 207 m->m_lock.m_ceilings[1] = -1; 208 } 209 210 int 211 __pthread_mutex_init(pthread_mutex_t *mutex, 212 const pthread_mutexattr_t *mutex_attr) 213 { 214 return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc); 215 } 216 217 /* This function is used internally by malloc. */ 218 int 219 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 220 void *(calloc_cb)(size_t, size_t)) 221 { 222 static const struct pthread_mutex_attr attr = { 223 .m_type = PTHREAD_MUTEX_NORMAL, 224 .m_protocol = PTHREAD_PRIO_NONE, 225 .m_ceiling = 0 226 }; 227 int ret; 228 229 ret = mutex_init(mutex, &attr, calloc_cb); 230 if (ret == 0) 231 (*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE; 232 return (ret); 233 } 234 235 void 236 _mutex_fork(struct pthread *curthread) 237 { 238 struct pthread_mutex *m; 239 240 /* 241 * Fix mutex ownership for child process. 242 * note that process shared mutex should not 243 * be inherited because owner is forking thread 244 * which is in parent process, they should be 245 * removed from the owned mutex list, current, 246 * process shared mutex is not supported, so I 247 * am not worried. 248 */ 249 250 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) 251 m->m_lock.m_owner = TID(curthread); 252 TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe) 253 m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED; 254 } 255 256 int 257 _pthread_mutex_destroy(pthread_mutex_t *mutex) 258 { 259 pthread_mutex_t m; 260 int ret; 261 262 m = *mutex; 263 if (m < THR_MUTEX_DESTROYED) { 264 ret = 0; 265 } else if (m == THR_MUTEX_DESTROYED) { 266 ret = EINVAL; 267 } else { 268 if (m->m_owner != NULL) { 269 ret = EBUSY; 270 } else { 271 *mutex = THR_MUTEX_DESTROYED; 272 MUTEX_ASSERT_NOT_OWNED(m); 273 free(m); 274 ret = 0; 275 } 276 } 277 278 return (ret); 279 } 280 281 #define ENQUEUE_MUTEX(curthread, m) \ 282 do { \ 283 (m)->m_owner = curthread; \ 284 /* Add to the list of owned mutexes: */ \ 285 MUTEX_ASSERT_NOT_OWNED((m)); \ 286 if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \ 287 TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\ 288 else \ 289 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\ 290 } while (0) 291 292 #define DEQUEUE_MUTEX(curthread, m) \ 293 (m)->m_owner = NULL; \ 294 MUTEX_ASSERT_IS_OWNED(m); \ 295 if (__predict_true(((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) \ 296 TAILQ_REMOVE(&curthread->mutexq, (m), m_qe); \ 297 else { \ 298 TAILQ_REMOVE(&curthread->pp_mutexq, (m), m_qe); \ 299 set_inherited_priority(curthread, m); \ 300 } \ 301 MUTEX_INIT_LINK(m); 302 303 #define CHECK_AND_INIT_MUTEX \ 304 if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) { \ 305 if (m == THR_MUTEX_DESTROYED) \ 306 return (EINVAL); \ 307 int ret; \ 308 ret = init_static(_get_curthread(), mutex); \ 309 if (ret) \ 310 return (ret); \ 311 m = *mutex; \ 312 } 313 314 static int 315 mutex_trylock_common(pthread_mutex_t *mutex) 316 { 317 struct pthread *curthread = _get_curthread(); 318 struct pthread_mutex *m = *mutex; 319 uint32_t id; 320 int ret; 321 322 id = TID(curthread); 323 if (m->m_flags & PMUTEX_FLAG_PRIVATE) 324 THR_CRITICAL_ENTER(curthread); 325 ret = _thr_umutex_trylock(&m->m_lock, id); 326 if (__predict_true(ret == 0)) { 327 ENQUEUE_MUTEX(curthread, m); 328 } else if (m->m_owner == curthread) { 329 ret = mutex_self_trylock(m); 330 } /* else {} */ 331 if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE)) 332 THR_CRITICAL_LEAVE(curthread); 333 return (ret); 334 } 335 336 int 337 __pthread_mutex_trylock(pthread_mutex_t *mutex) 338 { 339 struct pthread_mutex *m; 340 341 CHECK_AND_INIT_MUTEX 342 343 return (mutex_trylock_common(mutex)); 344 } 345 346 static int 347 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, 348 const struct timespec *abstime) 349 { 350 uint32_t id, owner; 351 int count; 352 int ret; 353 354 if (m->m_owner == curthread) 355 return mutex_self_lock(m, abstime); 356 357 id = TID(curthread); 358 /* 359 * For adaptive mutexes, spin for a bit in the expectation 360 * that if the application requests this mutex type then 361 * the lock is likely to be released quickly and it is 362 * faster than entering the kernel 363 */ 364 if (__predict_false( 365 (m->m_lock.m_flags & 366 (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)) 367 goto sleep_in_kernel; 368 369 if (!_thr_is_smp) 370 goto yield_loop; 371 372 count = m->m_spinloops; 373 while (count--) { 374 owner = m->m_lock.m_owner; 375 if ((owner & ~UMUTEX_CONTESTED) == 0) { 376 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { 377 ret = 0; 378 goto done; 379 } 380 } 381 CPU_SPINWAIT; 382 } 383 384 yield_loop: 385 count = m->m_yieldloops; 386 while (count--) { 387 _sched_yield(); 388 owner = m->m_lock.m_owner; 389 if ((owner & ~UMUTEX_CONTESTED) == 0) { 390 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { 391 ret = 0; 392 goto done; 393 } 394 } 395 } 396 397 sleep_in_kernel: 398 if (abstime == NULL) { 399 ret = __thr_umutex_lock(&m->m_lock, id); 400 } else if (__predict_false( 401 abstime->tv_nsec < 0 || 402 abstime->tv_nsec >= 1000000000)) { 403 ret = EINVAL; 404 } else { 405 ret = __thr_umutex_timedlock(&m->m_lock, id, abstime); 406 } 407 done: 408 if (ret == 0) 409 ENQUEUE_MUTEX(curthread, m); 410 411 return (ret); 412 } 413 414 static inline int 415 mutex_lock_common(struct pthread_mutex *m, 416 const struct timespec *abstime, int cvattach) 417 { 418 struct pthread *curthread = _get_curthread(); 419 int ret; 420 421 if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE) 422 THR_CRITICAL_ENTER(curthread); 423 if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) { 424 ENQUEUE_MUTEX(curthread, m); 425 ret = 0; 426 } else { 427 ret = mutex_lock_sleep(curthread, m, abstime); 428 } 429 if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE) && !cvattach) 430 THR_CRITICAL_LEAVE(curthread); 431 return (ret); 432 } 433 434 int 435 __pthread_mutex_lock(pthread_mutex_t *mutex) 436 { 437 struct pthread_mutex *m; 438 439 _thr_check_init(); 440 441 CHECK_AND_INIT_MUTEX 442 443 return (mutex_lock_common(m, NULL, 0)); 444 } 445 446 int 447 __pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime) 448 { 449 struct pthread_mutex *m; 450 451 _thr_check_init(); 452 453 CHECK_AND_INIT_MUTEX 454 455 return (mutex_lock_common(m, abstime, 0)); 456 } 457 458 int 459 _pthread_mutex_unlock(pthread_mutex_t *mutex) 460 { 461 struct pthread_mutex *mp; 462 463 mp = *mutex; 464 return (mutex_unlock_common(mp, 0, NULL)); 465 } 466 467 int 468 _mutex_cv_lock(struct pthread_mutex *m, int count) 469 { 470 int error; 471 472 error = mutex_lock_common(m, NULL, 1); 473 if (error == 0) 474 m->m_count = count; 475 return (error); 476 } 477 478 int 479 _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer) 480 { 481 482 /* 483 * Clear the count in case this is a recursive mutex. 484 */ 485 *count = m->m_count; 486 m->m_count = 0; 487 (void)mutex_unlock_common(m, 1, defer); 488 return (0); 489 } 490 491 int 492 _mutex_cv_attach(struct pthread_mutex *m, int count) 493 { 494 struct pthread *curthread = _get_curthread(); 495 496 ENQUEUE_MUTEX(curthread, m); 497 m->m_count = count; 498 return (0); 499 } 500 501 int 502 _mutex_cv_detach(struct pthread_mutex *mp, int *recurse) 503 { 504 struct pthread *curthread = _get_curthread(); 505 int defered; 506 int error; 507 508 if ((error = _mutex_owned(curthread, mp)) != 0) 509 return (error); 510 511 /* 512 * Clear the count in case this is a recursive mutex. 513 */ 514 *recurse = mp->m_count; 515 mp->m_count = 0; 516 DEQUEUE_MUTEX(curthread, mp); 517 518 /* Will this happen in real-world ? */ 519 if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) { 520 defered = 1; 521 mp->m_flags &= ~PMUTEX_FLAG_DEFERED; 522 } else 523 defered = 0; 524 525 if (defered) { 526 _thr_wake_all(curthread->defer_waiters, 527 curthread->nwaiter_defer); 528 curthread->nwaiter_defer = 0; 529 } 530 return (0); 531 } 532 533 static int 534 mutex_self_trylock(struct pthread_mutex *m) 535 { 536 int ret; 537 538 switch (PMUTEX_TYPE(m->m_flags)) { 539 case PTHREAD_MUTEX_ERRORCHECK: 540 case PTHREAD_MUTEX_NORMAL: 541 case PTHREAD_MUTEX_ADAPTIVE_NP: 542 ret = EBUSY; 543 break; 544 545 case PTHREAD_MUTEX_RECURSIVE: 546 /* Increment the lock count: */ 547 if (m->m_count + 1 > 0) { 548 m->m_count++; 549 ret = 0; 550 } else 551 ret = EAGAIN; 552 break; 553 554 default: 555 /* Trap invalid mutex types; */ 556 ret = EINVAL; 557 } 558 559 return (ret); 560 } 561 562 static int 563 mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime) 564 { 565 struct timespec ts1, ts2; 566 int ret; 567 568 switch (PMUTEX_TYPE(m->m_flags)) { 569 case PTHREAD_MUTEX_ERRORCHECK: 570 case PTHREAD_MUTEX_ADAPTIVE_NP: 571 if (abstime) { 572 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 573 abstime->tv_nsec >= 1000000000) { 574 ret = EINVAL; 575 } else { 576 clock_gettime(CLOCK_REALTIME, &ts1); 577 TIMESPEC_SUB(&ts2, abstime, &ts1); 578 __sys_nanosleep(&ts2, NULL); 579 ret = ETIMEDOUT; 580 } 581 } else { 582 /* 583 * POSIX specifies that mutexes should return 584 * EDEADLK if a recursive lock is detected. 585 */ 586 ret = EDEADLK; 587 } 588 break; 589 590 case PTHREAD_MUTEX_NORMAL: 591 /* 592 * What SS2 define as a 'normal' mutex. Intentionally 593 * deadlock on attempts to get a lock you already own. 594 */ 595 ret = 0; 596 if (abstime) { 597 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 598 abstime->tv_nsec >= 1000000000) { 599 ret = EINVAL; 600 } else { 601 clock_gettime(CLOCK_REALTIME, &ts1); 602 TIMESPEC_SUB(&ts2, abstime, &ts1); 603 __sys_nanosleep(&ts2, NULL); 604 ret = ETIMEDOUT; 605 } 606 } else { 607 ts1.tv_sec = 30; 608 ts1.tv_nsec = 0; 609 for (;;) 610 __sys_nanosleep(&ts1, NULL); 611 } 612 break; 613 614 case PTHREAD_MUTEX_RECURSIVE: 615 /* Increment the lock count: */ 616 if (m->m_count + 1 > 0) { 617 m->m_count++; 618 ret = 0; 619 } else 620 ret = EAGAIN; 621 break; 622 623 default: 624 /* Trap invalid mutex types; */ 625 ret = EINVAL; 626 } 627 628 return (ret); 629 } 630 631 static int 632 mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer) 633 { 634 struct pthread *curthread = _get_curthread(); 635 uint32_t id; 636 int defered; 637 638 if (__predict_false(m <= THR_MUTEX_DESTROYED)) { 639 if (m == THR_MUTEX_DESTROYED) 640 return (EINVAL); 641 return (EPERM); 642 } 643 644 /* 645 * Check if the running thread is not the owner of the mutex. 646 */ 647 if (__predict_false(m->m_owner != curthread)) 648 return (EPERM); 649 650 id = TID(curthread); 651 if (__predict_false( 652 PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE && 653 m->m_count > 0)) { 654 m->m_count--; 655 } else { 656 if ((m->m_flags & PMUTEX_FLAG_DEFERED) != 0) { 657 defered = 1; 658 m->m_flags &= ~PMUTEX_FLAG_DEFERED; 659 } else 660 defered = 0; 661 662 DEQUEUE_MUTEX(curthread, m); 663 _thr_umutex_unlock2(&m->m_lock, id, mtx_defer); 664 665 if (mtx_defer == NULL && defered) { 666 _thr_wake_all(curthread->defer_waiters, 667 curthread->nwaiter_defer); 668 curthread->nwaiter_defer = 0; 669 } 670 } 671 if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE) 672 THR_CRITICAL_LEAVE(curthread); 673 return (0); 674 } 675 676 int 677 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex, 678 int *prioceiling) 679 { 680 struct pthread_mutex *m; 681 int ret; 682 683 m = *mutex; 684 if ((m <= THR_MUTEX_DESTROYED) || 685 (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 686 ret = EINVAL; 687 else { 688 *prioceiling = m->m_lock.m_ceilings[0]; 689 ret = 0; 690 } 691 692 return (ret); 693 } 694 695 int 696 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex, 697 int ceiling, int *old_ceiling) 698 { 699 struct pthread *curthread = _get_curthread(); 700 struct pthread_mutex *m, *m1, *m2; 701 int ret; 702 703 m = *mutex; 704 if ((m <= THR_MUTEX_DESTROYED) || 705 (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 706 return (EINVAL); 707 708 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); 709 if (ret != 0) 710 return (ret); 711 712 if (m->m_owner == curthread) { 713 MUTEX_ASSERT_IS_OWNED(m); 714 m1 = TAILQ_PREV(m, mutex_queue, m_qe); 715 m2 = TAILQ_NEXT(m, m_qe); 716 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || 717 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { 718 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 719 TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) { 720 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) { 721 TAILQ_INSERT_BEFORE(m2, m, m_qe); 722 return (0); 723 } 724 } 725 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe); 726 } 727 } 728 return (0); 729 } 730 731 int 732 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count) 733 { 734 struct pthread_mutex *m; 735 736 CHECK_AND_INIT_MUTEX 737 738 *count = m->m_spinloops; 739 return (0); 740 } 741 742 int 743 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) 744 { 745 struct pthread_mutex *m; 746 747 CHECK_AND_INIT_MUTEX 748 749 m->m_spinloops = count; 750 return (0); 751 } 752 753 int 754 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count) 755 { 756 struct pthread_mutex *m; 757 758 CHECK_AND_INIT_MUTEX 759 760 *count = m->m_yieldloops; 761 return (0); 762 } 763 764 int 765 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) 766 { 767 struct pthread_mutex *m; 768 769 CHECK_AND_INIT_MUTEX 770 771 m->m_yieldloops = count; 772 return (0); 773 } 774 775 int 776 _pthread_mutex_isowned_np(pthread_mutex_t *mutex) 777 { 778 struct pthread_mutex *m; 779 780 m = *mutex; 781 if (m <= THR_MUTEX_DESTROYED) 782 return (0); 783 return (m->m_owner == _get_curthread()); 784 } 785 786 int 787 _mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp) 788 { 789 if (__predict_false(mp <= THR_MUTEX_DESTROYED)) { 790 if (mp == THR_MUTEX_DESTROYED) 791 return (EINVAL); 792 return (EPERM); 793 } 794 if (mp->m_owner != curthread) 795 return (EPERM); 796 return (0); 797 } 798