1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by John Birrell. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD$ 34 */ 35 36 #include "namespace.h" 37 #include <stdlib.h> 38 #include <errno.h> 39 #include <string.h> 40 #include <sys/param.h> 41 #include <sys/queue.h> 42 #include <pthread.h> 43 #include <pthread_np.h> 44 #include "un-namespace.h" 45 46 #include "thr_private.h" 47 48 #if defined(_PTHREADS_INVARIANTS) 49 #define MUTEX_INIT_LINK(m) do { \ 50 (m)->m_qe.tqe_prev = NULL; \ 51 (m)->m_qe.tqe_next = NULL; \ 52 } while (0) 53 #define MUTEX_ASSERT_IS_OWNED(m) do { \ 54 if (__predict_false((m)->m_qe.tqe_prev == NULL))\ 55 PANIC("mutex is not on list"); \ 56 } while (0) 57 #define MUTEX_ASSERT_NOT_OWNED(m) do { \ 58 if (__predict_false((m)->m_qe.tqe_prev != NULL || \ 59 (m)->m_qe.tqe_next != NULL)) \ 60 PANIC("mutex is on list"); \ 61 } while (0) 62 #else 63 #define MUTEX_INIT_LINK(m) 64 #define MUTEX_ASSERT_IS_OWNED(m) 65 #define MUTEX_ASSERT_NOT_OWNED(m) 66 #endif 67 68 /* 69 * For adaptive mutexes, how many times to spin doing trylock2 70 * before entering the kernel to block 71 */ 72 #define MUTEX_ADAPTIVE_SPINS 2000 73 74 /* 75 * Prototypes 76 */ 77 int __pthread_mutex_init(pthread_mutex_t *mutex, 78 const pthread_mutexattr_t *mutex_attr); 79 int __pthread_mutex_trylock(pthread_mutex_t *mutex); 80 int __pthread_mutex_lock(pthread_mutex_t *mutex); 81 int __pthread_mutex_timedlock(pthread_mutex_t *mutex, 82 const struct timespec *abstime); 83 int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 84 void *(calloc_cb)(size_t, size_t)); 85 int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count); 86 int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); 87 int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); 88 int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); 89 int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count); 90 int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); 91 92 static int mutex_self_trylock(pthread_mutex_t); 93 static int mutex_self_lock(pthread_mutex_t, 94 const struct timespec *abstime); 95 static int mutex_unlock_common(pthread_mutex_t *); 96 static int mutex_lock_sleep(struct pthread *, pthread_mutex_t, 97 const struct timespec *); 98 99 __weak_reference(__pthread_mutex_init, pthread_mutex_init); 100 __strong_reference(__pthread_mutex_init, _pthread_mutex_init); 101 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 102 __strong_reference(__pthread_mutex_lock, _pthread_mutex_lock); 103 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 104 __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock); 105 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 106 __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock); 107 108 /* Single underscore versions provided for libc internal usage: */ 109 /* No difference between libc and application usage of these: */ 110 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 111 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 112 113 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); 114 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); 115 116 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np); 117 __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np); 118 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np); 119 120 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np); 121 __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np); 122 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np); 123 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np); 124 125 static int 126 mutex_init(pthread_mutex_t *mutex, 127 const struct pthread_mutex_attr *mutex_attr, 128 void *(calloc_cb)(size_t, size_t)) 129 { 130 const struct pthread_mutex_attr *attr; 131 struct pthread_mutex *pmutex; 132 133 if (mutex_attr == NULL) { 134 attr = &_pthread_mutexattr_default; 135 } else { 136 attr = mutex_attr; 137 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || 138 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) 139 return (EINVAL); 140 if (attr->m_protocol < PTHREAD_PRIO_NONE || 141 attr->m_protocol > PTHREAD_PRIO_PROTECT) 142 return (EINVAL); 143 } 144 if ((pmutex = (pthread_mutex_t) 145 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL) 146 return (ENOMEM); 147 148 pmutex->m_type = attr->m_type; 149 pmutex->m_owner = NULL; 150 pmutex->m_count = 0; 151 pmutex->m_refcount = 0; 152 pmutex->m_spinloops = 0; 153 pmutex->m_yieldloops = 0; 154 MUTEX_INIT_LINK(pmutex); 155 switch(attr->m_protocol) { 156 case PTHREAD_PRIO_NONE: 157 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 158 pmutex->m_lock.m_flags = 0; 159 break; 160 case PTHREAD_PRIO_INHERIT: 161 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 162 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; 163 break; 164 case PTHREAD_PRIO_PROTECT: 165 pmutex->m_lock.m_owner = UMUTEX_CONTESTED; 166 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; 167 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; 168 break; 169 } 170 171 if (pmutex->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) { 172 pmutex->m_spinloops = 173 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS; 174 pmutex->m_yieldloops = _thr_yieldloops; 175 } 176 177 *mutex = pmutex; 178 return (0); 179 } 180 181 static int 182 init_static(struct pthread *thread, pthread_mutex_t *mutex) 183 { 184 int ret; 185 186 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 187 188 if (*mutex == THR_MUTEX_INITIALIZER) 189 ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc); 190 else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER) 191 ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc); 192 else 193 ret = 0; 194 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 195 196 return (ret); 197 } 198 199 static void 200 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) 201 { 202 struct pthread_mutex *m2; 203 204 m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue); 205 if (m2 != NULL) 206 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; 207 else 208 m->m_lock.m_ceilings[1] = -1; 209 } 210 211 int 212 __pthread_mutex_init(pthread_mutex_t *mutex, 213 const pthread_mutexattr_t *mutex_attr) 214 { 215 return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc); 216 } 217 218 /* This function is used internally by malloc. */ 219 int 220 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 221 void *(calloc_cb)(size_t, size_t)) 222 { 223 static const struct pthread_mutex_attr attr = { 224 .m_type = PTHREAD_MUTEX_NORMAL, 225 .m_protocol = PTHREAD_PRIO_NONE, 226 .m_ceiling = 0 227 }; 228 int ret; 229 230 ret = mutex_init(mutex, &attr, calloc_cb); 231 if (ret == 0) 232 (*mutex)->m_private = 1; 233 return (ret); 234 } 235 236 void 237 _mutex_fork(struct pthread *curthread) 238 { 239 struct pthread_mutex *m; 240 241 /* 242 * Fix mutex ownership for child process. 243 * note that process shared mutex should not 244 * be inherited because owner is forking thread 245 * which is in parent process, they should be 246 * removed from the owned mutex list, current, 247 * process shared mutex is not supported, so I 248 * am not worried. 249 */ 250 251 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) 252 m->m_lock.m_owner = TID(curthread); 253 TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe) 254 m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED; 255 } 256 257 int 258 _pthread_mutex_destroy(pthread_mutex_t *mutex) 259 { 260 struct pthread *curthread = _get_curthread(); 261 pthread_mutex_t m; 262 uint32_t id; 263 int ret = 0; 264 265 m = *mutex; 266 if (m < THR_MUTEX_DESTROYED) { 267 ret = 0; 268 } else if (m == THR_MUTEX_DESTROYED) { 269 ret = EINVAL; 270 } else { 271 id = TID(curthread); 272 273 /* 274 * Try to lock the mutex structure, we only need to 275 * try once, if failed, the mutex is in used. 276 */ 277 ret = _thr_umutex_trylock(&m->m_lock, id); 278 if (ret) 279 return (ret); 280 /* 281 * Check mutex other fields to see if this mutex is 282 * in use. Mostly for prority mutex types, or there 283 * are condition variables referencing it. 284 */ 285 if (m->m_owner != NULL || m->m_refcount != 0) { 286 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 287 set_inherited_priority(curthread, m); 288 _thr_umutex_unlock(&m->m_lock, id); 289 ret = EBUSY; 290 } else { 291 *mutex = THR_MUTEX_DESTROYED; 292 293 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 294 set_inherited_priority(curthread, m); 295 _thr_umutex_unlock(&m->m_lock, id); 296 297 MUTEX_ASSERT_NOT_OWNED(m); 298 free(m); 299 } 300 } 301 302 return (ret); 303 } 304 305 #define ENQUEUE_MUTEX(curthread, m) \ 306 do { \ 307 (m)->m_owner = curthread; \ 308 /* Add to the list of owned mutexes: */ \ 309 MUTEX_ASSERT_NOT_OWNED((m)); \ 310 if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \ 311 TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\ 312 else \ 313 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\ 314 } while (0) 315 316 #define CHECK_AND_INIT_MUTEX \ 317 if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) { \ 318 if (m == THR_MUTEX_DESTROYED) \ 319 return (EINVAL); \ 320 int ret; \ 321 ret = init_static(_get_curthread(), mutex); \ 322 if (ret) \ 323 return (ret); \ 324 m = *mutex; \ 325 } 326 327 static int 328 mutex_trylock_common(pthread_mutex_t *mutex) 329 { 330 struct pthread *curthread = _get_curthread(); 331 struct pthread_mutex *m = *mutex; 332 uint32_t id; 333 int ret; 334 335 id = TID(curthread); 336 if (m->m_private) 337 THR_CRITICAL_ENTER(curthread); 338 ret = _thr_umutex_trylock(&m->m_lock, id); 339 if (__predict_true(ret == 0)) { 340 ENQUEUE_MUTEX(curthread, m); 341 } else if (m->m_owner == curthread) { 342 ret = mutex_self_trylock(m); 343 } /* else {} */ 344 if (ret && m->m_private) 345 THR_CRITICAL_LEAVE(curthread); 346 return (ret); 347 } 348 349 int 350 __pthread_mutex_trylock(pthread_mutex_t *mutex) 351 { 352 struct pthread_mutex *m; 353 354 CHECK_AND_INIT_MUTEX 355 356 return (mutex_trylock_common(mutex)); 357 } 358 359 static int 360 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, 361 const struct timespec *abstime) 362 { 363 uint32_t id, owner; 364 int count; 365 int ret; 366 367 if (m->m_owner == curthread) 368 return mutex_self_lock(m, abstime); 369 370 id = TID(curthread); 371 /* 372 * For adaptive mutexes, spin for a bit in the expectation 373 * that if the application requests this mutex type then 374 * the lock is likely to be released quickly and it is 375 * faster than entering the kernel 376 */ 377 if (__predict_false( 378 (m->m_lock.m_flags & 379 (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)) 380 goto sleep_in_kernel; 381 382 if (!_thr_is_smp) 383 goto yield_loop; 384 385 count = m->m_spinloops; 386 while (count--) { 387 owner = m->m_lock.m_owner; 388 if ((owner & ~UMUTEX_CONTESTED) == 0) { 389 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { 390 ret = 0; 391 goto done; 392 } 393 } 394 CPU_SPINWAIT; 395 } 396 397 yield_loop: 398 count = m->m_yieldloops; 399 while (count--) { 400 _sched_yield(); 401 owner = m->m_lock.m_owner; 402 if ((owner & ~UMUTEX_CONTESTED) == 0) { 403 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { 404 ret = 0; 405 goto done; 406 } 407 } 408 } 409 410 sleep_in_kernel: 411 if (abstime == NULL) { 412 ret = __thr_umutex_lock(&m->m_lock, id); 413 } else if (__predict_false( 414 abstime->tv_nsec < 0 || 415 abstime->tv_nsec >= 1000000000)) { 416 ret = EINVAL; 417 } else { 418 ret = __thr_umutex_timedlock(&m->m_lock, id, abstime); 419 } 420 done: 421 if (ret == 0) 422 ENQUEUE_MUTEX(curthread, m); 423 424 return (ret); 425 } 426 427 static inline int 428 mutex_lock_common(struct pthread_mutex *m, 429 const struct timespec *abstime) 430 { 431 struct pthread *curthread = _get_curthread(); 432 int ret; 433 434 if (m->m_private) 435 THR_CRITICAL_ENTER(curthread); 436 if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) { 437 ENQUEUE_MUTEX(curthread, m); 438 ret = 0; 439 } else { 440 ret = mutex_lock_sleep(curthread, m, abstime); 441 } 442 if (ret && m->m_private) 443 THR_CRITICAL_LEAVE(curthread); 444 return (ret); 445 } 446 447 int 448 __pthread_mutex_lock(pthread_mutex_t *mutex) 449 { 450 struct pthread_mutex *m; 451 452 _thr_check_init(); 453 454 CHECK_AND_INIT_MUTEX 455 456 return (mutex_lock_common(m, NULL)); 457 } 458 459 int 460 __pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime) 461 { 462 struct pthread_mutex *m; 463 464 _thr_check_init(); 465 466 CHECK_AND_INIT_MUTEX 467 468 return (mutex_lock_common(m, abstime)); 469 } 470 471 int 472 _pthread_mutex_unlock(pthread_mutex_t *m) 473 { 474 return (mutex_unlock_common(m)); 475 } 476 477 int 478 _mutex_cv_lock(pthread_mutex_t *mutex, int count) 479 { 480 struct pthread_mutex *m; 481 int ret; 482 483 m = *mutex; 484 ret = mutex_lock_common(m, NULL); 485 if (ret == 0) { 486 m->m_refcount--; 487 m->m_count += count; 488 } 489 return (ret); 490 } 491 492 static int 493 mutex_self_trylock(struct pthread_mutex *m) 494 { 495 int ret; 496 497 switch (m->m_type) { 498 case PTHREAD_MUTEX_ERRORCHECK: 499 case PTHREAD_MUTEX_NORMAL: 500 ret = EBUSY; 501 break; 502 503 case PTHREAD_MUTEX_RECURSIVE: 504 /* Increment the lock count: */ 505 if (m->m_count + 1 > 0) { 506 m->m_count++; 507 ret = 0; 508 } else 509 ret = EAGAIN; 510 break; 511 512 default: 513 /* Trap invalid mutex types; */ 514 ret = EINVAL; 515 } 516 517 return (ret); 518 } 519 520 static int 521 mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime) 522 { 523 struct timespec ts1, ts2; 524 int ret; 525 526 switch (m->m_type) { 527 case PTHREAD_MUTEX_ERRORCHECK: 528 case PTHREAD_MUTEX_ADAPTIVE_NP: 529 if (abstime) { 530 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 531 abstime->tv_nsec >= 1000000000) { 532 ret = EINVAL; 533 } else { 534 clock_gettime(CLOCK_REALTIME, &ts1); 535 TIMESPEC_SUB(&ts2, abstime, &ts1); 536 __sys_nanosleep(&ts2, NULL); 537 ret = ETIMEDOUT; 538 } 539 } else { 540 /* 541 * POSIX specifies that mutexes should return 542 * EDEADLK if a recursive lock is detected. 543 */ 544 ret = EDEADLK; 545 } 546 break; 547 548 case PTHREAD_MUTEX_NORMAL: 549 /* 550 * What SS2 define as a 'normal' mutex. Intentionally 551 * deadlock on attempts to get a lock you already own. 552 */ 553 ret = 0; 554 if (abstime) { 555 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 556 abstime->tv_nsec >= 1000000000) { 557 ret = EINVAL; 558 } else { 559 clock_gettime(CLOCK_REALTIME, &ts1); 560 TIMESPEC_SUB(&ts2, abstime, &ts1); 561 __sys_nanosleep(&ts2, NULL); 562 ret = ETIMEDOUT; 563 } 564 } else { 565 ts1.tv_sec = 30; 566 ts1.tv_nsec = 0; 567 for (;;) 568 __sys_nanosleep(&ts1, NULL); 569 } 570 break; 571 572 case PTHREAD_MUTEX_RECURSIVE: 573 /* Increment the lock count: */ 574 if (m->m_count + 1 > 0) { 575 m->m_count++; 576 ret = 0; 577 } else 578 ret = EAGAIN; 579 break; 580 581 default: 582 /* Trap invalid mutex types; */ 583 ret = EINVAL; 584 } 585 586 return (ret); 587 } 588 589 static int 590 mutex_unlock_common(pthread_mutex_t *mutex) 591 { 592 struct pthread *curthread = _get_curthread(); 593 struct pthread_mutex *m; 594 uint32_t id; 595 596 m = *mutex; 597 if (__predict_false(m <= THR_MUTEX_DESTROYED)) { 598 if (m == THR_MUTEX_DESTROYED) 599 return (EINVAL); 600 return (EPERM); 601 } 602 603 /* 604 * Check if the running thread is not the owner of the mutex. 605 */ 606 if (__predict_false(m->m_owner != curthread)) 607 return (EPERM); 608 609 id = TID(curthread); 610 if (__predict_false( 611 m->m_type == PTHREAD_MUTEX_RECURSIVE && 612 m->m_count > 0)) { 613 m->m_count--; 614 } else { 615 m->m_owner = NULL; 616 /* Remove the mutex from the threads queue. */ 617 MUTEX_ASSERT_IS_OWNED(m); 618 if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) 619 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 620 else { 621 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 622 set_inherited_priority(curthread, m); 623 } 624 MUTEX_INIT_LINK(m); 625 _thr_umutex_unlock(&m->m_lock, id); 626 } 627 if (m->m_private) 628 THR_CRITICAL_LEAVE(curthread); 629 return (0); 630 } 631 632 int 633 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count) 634 { 635 struct pthread *curthread = _get_curthread(); 636 struct pthread_mutex *m; 637 638 m = *mutex; 639 if (__predict_false(m <= THR_MUTEX_DESTROYED)) { 640 if (m == THR_MUTEX_DESTROYED) 641 return (EINVAL); 642 return (EPERM); 643 } 644 645 /* 646 * Check if the running thread is not the owner of the mutex. 647 */ 648 if (__predict_false(m->m_owner != curthread)) 649 return (EPERM); 650 651 /* 652 * Clear the count in case this is a recursive mutex. 653 */ 654 *count = m->m_count; 655 m->m_refcount++; 656 m->m_count = 0; 657 m->m_owner = NULL; 658 /* Remove the mutex from the threads queue. */ 659 MUTEX_ASSERT_IS_OWNED(m); 660 if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) 661 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 662 else { 663 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 664 set_inherited_priority(curthread, m); 665 } 666 MUTEX_INIT_LINK(m); 667 _thr_umutex_unlock(&m->m_lock, TID(curthread)); 668 669 if (m->m_private) 670 THR_CRITICAL_LEAVE(curthread); 671 return (0); 672 } 673 674 int 675 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex, 676 int *prioceiling) 677 { 678 struct pthread_mutex *m; 679 int ret; 680 681 m = *mutex; 682 if ((m <= THR_MUTEX_DESTROYED) || 683 (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 684 ret = EINVAL; 685 else { 686 *prioceiling = m->m_lock.m_ceilings[0]; 687 ret = 0; 688 } 689 690 return (ret); 691 } 692 693 int 694 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex, 695 int ceiling, int *old_ceiling) 696 { 697 struct pthread *curthread = _get_curthread(); 698 struct pthread_mutex *m, *m1, *m2; 699 int ret; 700 701 m = *mutex; 702 if ((m <= THR_MUTEX_DESTROYED) || 703 (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 704 return (EINVAL); 705 706 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); 707 if (ret != 0) 708 return (ret); 709 710 if (m->m_owner == curthread) { 711 MUTEX_ASSERT_IS_OWNED(m); 712 m1 = TAILQ_PREV(m, mutex_queue, m_qe); 713 m2 = TAILQ_NEXT(m, m_qe); 714 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || 715 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { 716 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 717 TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) { 718 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) { 719 TAILQ_INSERT_BEFORE(m2, m, m_qe); 720 return (0); 721 } 722 } 723 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe); 724 } 725 } 726 return (0); 727 } 728 729 int 730 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count) 731 { 732 struct pthread_mutex *m; 733 734 CHECK_AND_INIT_MUTEX 735 736 *count = m->m_spinloops; 737 return (0); 738 } 739 740 int 741 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) 742 { 743 struct pthread_mutex *m; 744 745 CHECK_AND_INIT_MUTEX 746 747 m->m_spinloops = count; 748 return (0); 749 } 750 751 int 752 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count) 753 { 754 struct pthread_mutex *m; 755 756 CHECK_AND_INIT_MUTEX 757 758 *count = m->m_yieldloops; 759 return (0); 760 } 761 762 int 763 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) 764 { 765 struct pthread_mutex *m; 766 767 CHECK_AND_INIT_MUTEX 768 769 m->m_yieldloops = count; 770 return (0); 771 } 772 773 int 774 _pthread_mutex_isowned_np(pthread_mutex_t *mutex) 775 { 776 struct pthread_mutex *m; 777 778 m = *mutex; 779 if (m <= THR_MUTEX_DESTROYED) 780 return (0); 781 return (m->m_owner == _get_curthread()); 782 } 783