1 /* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by John Birrell. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD$ 34 */ 35 36 #include "namespace.h" 37 #include <stdlib.h> 38 #include <errno.h> 39 #include <string.h> 40 #include <sys/param.h> 41 #include <sys/queue.h> 42 #include <pthread.h> 43 #include "un-namespace.h" 44 45 #include "thr_private.h" 46 47 #if defined(_PTHREADS_INVARIANTS) 48 #define MUTEX_INIT_LINK(m) do { \ 49 (m)->m_qe.tqe_prev = NULL; \ 50 (m)->m_qe.tqe_next = NULL; \ 51 } while (0) 52 #define MUTEX_ASSERT_IS_OWNED(m) do { \ 53 if ((m)->m_qe.tqe_prev == NULL) \ 54 PANIC("mutex is not on list"); \ 55 } while (0) 56 #define MUTEX_ASSERT_NOT_OWNED(m) do { \ 57 if (((m)->m_qe.tqe_prev != NULL) || \ 58 ((m)->m_qe.tqe_next != NULL)) \ 59 PANIC("mutex is on list"); \ 60 } while (0) 61 #else 62 #define MUTEX_INIT_LINK(m) 63 #define MUTEX_ASSERT_IS_OWNED(m) 64 #define MUTEX_ASSERT_NOT_OWNED(m) 65 #endif 66 67 /* 68 * Prototypes 69 */ 70 int __pthread_mutex_init(pthread_mutex_t *mutex, 71 const pthread_mutexattr_t *mutex_attr); 72 int __pthread_mutex_trylock(pthread_mutex_t *mutex); 73 int __pthread_mutex_lock(pthread_mutex_t *mutex); 74 int __pthread_mutex_timedlock(pthread_mutex_t *mutex, 75 const struct timespec *abstime); 76 77 static int mutex_self_trylock(pthread_mutex_t); 78 static int mutex_self_lock(pthread_mutex_t, 79 const struct timespec *abstime); 80 static int mutex_unlock_common(pthread_mutex_t *); 81 82 __weak_reference(__pthread_mutex_init, pthread_mutex_init); 83 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 84 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 85 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 86 87 /* Single underscore versions provided for libc internal usage: */ 88 /* No difference between libc and application usage of these: */ 89 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 90 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 91 92 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); 93 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); 94 95 static int 96 mutex_init(pthread_mutex_t *mutex, 97 const pthread_mutexattr_t *mutex_attr, int private) 98 { 99 const struct pthread_mutex_attr *attr; 100 struct pthread_mutex *pmutex; 101 102 if (mutex_attr == NULL) { 103 attr = &_pthread_mutexattr_default; 104 } else { 105 attr = *mutex_attr; 106 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || 107 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) 108 return (EINVAL); 109 if (attr->m_protocol < PTHREAD_PRIO_NONE || 110 attr->m_protocol > PTHREAD_PRIO_PROTECT) 111 return (EINVAL); 112 } 113 if ((pmutex = (pthread_mutex_t) 114 calloc(1, sizeof(struct pthread_mutex))) == NULL) 115 return (ENOMEM); 116 117 pmutex->m_type = attr->m_type; 118 pmutex->m_owner = NULL; 119 pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED; 120 if (private) 121 pmutex->m_flags |= MUTEX_FLAGS_PRIVATE; 122 pmutex->m_count = 0; 123 pmutex->m_refcount = 0; 124 MUTEX_INIT_LINK(pmutex); 125 switch(attr->m_protocol) { 126 case PTHREAD_PRIO_INHERIT: 127 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 128 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; 129 break; 130 case PTHREAD_PRIO_PROTECT: 131 pmutex->m_lock.m_owner = UMUTEX_CONTESTED; 132 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; 133 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; 134 break; 135 case PTHREAD_PRIO_NONE: 136 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 137 pmutex->m_lock.m_flags = 0; 138 } 139 pmutex->m_lock.m_spincount = _thr_adaptive_spin; 140 *mutex = pmutex; 141 return (0); 142 } 143 144 static int 145 init_static(struct pthread *thread, pthread_mutex_t *mutex) 146 { 147 int ret; 148 149 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 150 151 if (*mutex == NULL) 152 ret = mutex_init(mutex, NULL, 0); 153 else 154 ret = 0; 155 156 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 157 158 return (ret); 159 } 160 161 static int 162 init_static_private(struct pthread *thread, pthread_mutex_t *mutex) 163 { 164 int ret; 165 166 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 167 168 if (*mutex == NULL) 169 ret = mutex_init(mutex, NULL, 1); 170 else 171 ret = 0; 172 173 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 174 175 return (ret); 176 } 177 178 static void 179 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) 180 { 181 struct pthread_mutex *m2; 182 183 m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue); 184 if (m2 != NULL) 185 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; 186 else 187 m->m_lock.m_ceilings[1] = -1; 188 } 189 190 int 191 _pthread_mutex_init(pthread_mutex_t *mutex, 192 const pthread_mutexattr_t *mutex_attr) 193 { 194 return mutex_init(mutex, mutex_attr, 1); 195 } 196 197 int 198 __pthread_mutex_init(pthread_mutex_t *mutex, 199 const pthread_mutexattr_t *mutex_attr) 200 { 201 return mutex_init(mutex, mutex_attr, 0); 202 } 203 204 void 205 _mutex_fork(struct pthread *curthread) 206 { 207 struct pthread_mutex *m; 208 209 /* 210 * Fix mutex ownership for child process. 211 * note that process shared mutex should not 212 * be inherited because owner is forking thread 213 * which is in parent process, they should be 214 * removed from the owned mutex list, current, 215 * process shared mutex is not supported, so I 216 * am not worried. 217 */ 218 219 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) 220 m->m_lock.m_owner = TID(curthread); 221 TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe) 222 m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED; 223 } 224 225 int 226 _pthread_mutex_destroy(pthread_mutex_t *mutex) 227 { 228 struct pthread *curthread = _get_curthread(); 229 pthread_mutex_t m; 230 uint32_t id; 231 int ret = 0; 232 233 if (__predict_false(*mutex == NULL)) 234 ret = EINVAL; 235 else { 236 id = TID(curthread); 237 238 /* 239 * Try to lock the mutex structure, we only need to 240 * try once, if failed, the mutex is in used. 241 */ 242 ret = _thr_umutex_trylock(&(*mutex)->m_lock, id); 243 if (ret) 244 return (ret); 245 m = *mutex; 246 /* 247 * Check mutex other fields to see if this mutex is 248 * in use. Mostly for prority mutex types, or there 249 * are condition variables referencing it. 250 */ 251 if (m->m_owner != NULL || m->m_refcount != 0) { 252 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 253 set_inherited_priority(curthread, m); 254 _thr_umutex_unlock(&m->m_lock, id); 255 ret = EBUSY; 256 } else { 257 /* 258 * Save a pointer to the mutex so it can be free'd 259 * and set the caller's pointer to NULL. 260 */ 261 *mutex = NULL; 262 263 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 264 set_inherited_priority(curthread, m); 265 _thr_umutex_unlock(&m->m_lock, id); 266 267 MUTEX_ASSERT_NOT_OWNED(m); 268 free(m); 269 } 270 } 271 272 return (ret); 273 } 274 275 static int 276 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) 277 { 278 struct pthread_mutex *m; 279 uint32_t id; 280 int ret; 281 282 id = TID(curthread); 283 m = *mutex; 284 ret = _thr_umutex_trylock(&m->m_lock, id); 285 if (ret == 0) { 286 m->m_owner = curthread; 287 /* Add to the list of owned mutexes. */ 288 MUTEX_ASSERT_NOT_OWNED(m); 289 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 290 TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe); 291 else 292 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe); 293 } else if (m->m_owner == curthread) { 294 ret = mutex_self_trylock(m); 295 } /* else {} */ 296 297 return (ret); 298 } 299 300 int 301 __pthread_mutex_trylock(pthread_mutex_t *mutex) 302 { 303 struct pthread *curthread = _get_curthread(); 304 int ret; 305 306 /* 307 * If the mutex is statically initialized, perform the dynamic 308 * initialization: 309 */ 310 if (__predict_false(*mutex == NULL)) { 311 ret = init_static(curthread, mutex); 312 if (__predict_false(ret)) 313 return (ret); 314 } 315 return (mutex_trylock_common(curthread, mutex)); 316 } 317 318 int 319 _pthread_mutex_trylock(pthread_mutex_t *mutex) 320 { 321 struct pthread *curthread = _get_curthread(); 322 int ret; 323 324 /* 325 * If the mutex is statically initialized, perform the dynamic 326 * initialization marking the mutex private (delete safe): 327 */ 328 if (__predict_false(*mutex == NULL)) { 329 ret = init_static_private(curthread, mutex); 330 if (__predict_false(ret)) 331 return (ret); 332 } 333 return (mutex_trylock_common(curthread, mutex)); 334 } 335 336 static int 337 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex, 338 const struct timespec * abstime) 339 { 340 struct timespec ts, ts2; 341 struct pthread_mutex *m; 342 uint32_t id; 343 int ret; 344 345 id = TID(curthread); 346 m = *mutex; 347 ret = _thr_umutex_trylock2(&m->m_lock, id); 348 if (ret == 0) { 349 m->m_owner = curthread; 350 /* Add to the list of owned mutexes: */ 351 MUTEX_ASSERT_NOT_OWNED(m); 352 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 353 TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe); 354 else 355 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe); 356 } else if (m->m_owner == curthread) { 357 ret = mutex_self_lock(m, abstime); 358 } else { 359 if (abstime == NULL) { 360 ret = __thr_umutex_lock(&m->m_lock); 361 } else if (__predict_false( 362 abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 363 abstime->tv_nsec >= 1000000000)) { 364 ret = EINVAL; 365 } else { 366 clock_gettime(CLOCK_REALTIME, &ts); 367 TIMESPEC_SUB(&ts2, abstime, &ts); 368 ret = __thr_umutex_timedlock(&m->m_lock, &ts2); 369 /* 370 * Timed out wait is not restarted if 371 * it was interrupted, not worth to do it. 372 */ 373 if (ret == EINTR) 374 ret = ETIMEDOUT; 375 } 376 if (ret == 0) { 377 m->m_owner = curthread; 378 /* Add to the list of owned mutexes: */ 379 MUTEX_ASSERT_NOT_OWNED(m); 380 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 381 TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe); 382 else 383 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, 384 m_qe); 385 } 386 } 387 return (ret); 388 } 389 390 int 391 __pthread_mutex_lock(pthread_mutex_t *m) 392 { 393 struct pthread *curthread; 394 int ret; 395 396 _thr_check_init(); 397 398 curthread = _get_curthread(); 399 400 /* 401 * If the mutex is statically initialized, perform the dynamic 402 * initialization: 403 */ 404 if (__predict_false(*m == NULL)) { 405 ret = init_static(curthread, m); 406 if (__predict_false(ret)) 407 return (ret); 408 } 409 return (mutex_lock_common(curthread, m, NULL)); 410 } 411 412 int 413 _pthread_mutex_lock(pthread_mutex_t *m) 414 { 415 struct pthread *curthread; 416 int ret; 417 418 _thr_check_init(); 419 420 curthread = _get_curthread(); 421 422 /* 423 * If the mutex is statically initialized, perform the dynamic 424 * initialization marking it private (delete safe): 425 */ 426 if (__predict_false(*m == NULL)) { 427 ret = init_static_private(curthread, m); 428 if (__predict_false(ret)) 429 return (ret); 430 } 431 return (mutex_lock_common(curthread, m, NULL)); 432 } 433 434 int 435 __pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime) 436 { 437 struct pthread *curthread; 438 int ret; 439 440 _thr_check_init(); 441 442 curthread = _get_curthread(); 443 444 /* 445 * If the mutex is statically initialized, perform the dynamic 446 * initialization: 447 */ 448 if (__predict_false(*m == NULL)) { 449 ret = init_static(curthread, m); 450 if (__predict_false(ret)) 451 return (ret); 452 } 453 return (mutex_lock_common(curthread, m, abstime)); 454 } 455 456 int 457 _pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime) 458 { 459 struct pthread *curthread; 460 int ret; 461 462 _thr_check_init(); 463 464 curthread = _get_curthread(); 465 466 /* 467 * If the mutex is statically initialized, perform the dynamic 468 * initialization marking it private (delete safe): 469 */ 470 if (__predict_false(*m == NULL)) { 471 ret = init_static_private(curthread, m); 472 if (__predict_false(ret)) 473 return (ret); 474 } 475 return (mutex_lock_common(curthread, m, abstime)); 476 } 477 478 int 479 _pthread_mutex_unlock(pthread_mutex_t *m) 480 { 481 return (mutex_unlock_common(m)); 482 } 483 484 int 485 _mutex_cv_lock(pthread_mutex_t *m, int count) 486 { 487 int ret; 488 489 ret = mutex_lock_common(_get_curthread(), m, NULL); 490 if (ret == 0) { 491 (*m)->m_refcount--; 492 (*m)->m_count += count; 493 } 494 return (ret); 495 } 496 497 static int 498 mutex_self_trylock(pthread_mutex_t m) 499 { 500 int ret; 501 502 switch (m->m_type) { 503 case PTHREAD_MUTEX_ERRORCHECK: 504 case PTHREAD_MUTEX_NORMAL: 505 ret = EBUSY; 506 break; 507 508 case PTHREAD_MUTEX_RECURSIVE: 509 /* Increment the lock count: */ 510 if (m->m_count + 1 > 0) { 511 m->m_count++; 512 ret = 0; 513 } else 514 ret = EAGAIN; 515 break; 516 517 default: 518 /* Trap invalid mutex types; */ 519 ret = EINVAL; 520 } 521 522 return (ret); 523 } 524 525 static int 526 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime) 527 { 528 struct timespec ts1, ts2; 529 int ret; 530 531 switch (m->m_type) { 532 case PTHREAD_MUTEX_ERRORCHECK: 533 if (abstime) { 534 clock_gettime(CLOCK_REALTIME, &ts1); 535 TIMESPEC_SUB(&ts2, abstime, &ts1); 536 __sys_nanosleep(&ts2, NULL); 537 ret = ETIMEDOUT; 538 } else { 539 /* 540 * POSIX specifies that mutexes should return 541 * EDEADLK if a recursive lock is detected. 542 */ 543 ret = EDEADLK; 544 } 545 break; 546 547 case PTHREAD_MUTEX_NORMAL: 548 /* 549 * What SS2 define as a 'normal' mutex. Intentionally 550 * deadlock on attempts to get a lock you already own. 551 */ 552 ret = 0; 553 if (abstime) { 554 clock_gettime(CLOCK_REALTIME, &ts1); 555 TIMESPEC_SUB(&ts2, abstime, &ts1); 556 __sys_nanosleep(&ts2, NULL); 557 ret = ETIMEDOUT; 558 } else { 559 ts1.tv_sec = 30; 560 ts1.tv_nsec = 0; 561 for (;;) 562 __sys_nanosleep(&ts1, NULL); 563 } 564 break; 565 566 case PTHREAD_MUTEX_RECURSIVE: 567 /* Increment the lock count: */ 568 if (m->m_count + 1 > 0) { 569 m->m_count++; 570 ret = 0; 571 } else 572 ret = EAGAIN; 573 break; 574 575 default: 576 /* Trap invalid mutex types; */ 577 ret = EINVAL; 578 } 579 580 return (ret); 581 } 582 583 static int 584 mutex_unlock_common(pthread_mutex_t *mutex) 585 { 586 struct pthread *curthread = _get_curthread(); 587 struct pthread_mutex *m; 588 uint32_t id; 589 590 if (__predict_false((m = *mutex) == NULL)) 591 return (EINVAL); 592 593 /* 594 * Check if the running thread is not the owner of the mutex. 595 */ 596 if (__predict_false(m->m_owner != curthread)) 597 return (EPERM); 598 599 id = TID(curthread); 600 if (__predict_false( 601 m->m_type == PTHREAD_MUTEX_RECURSIVE && 602 m->m_count > 0)) { 603 m->m_count--; 604 } else { 605 m->m_owner = NULL; 606 /* Remove the mutex from the threads queue. */ 607 MUTEX_ASSERT_IS_OWNED(m); 608 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 609 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 610 else { 611 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 612 set_inherited_priority(curthread, m); 613 } 614 MUTEX_INIT_LINK(m); 615 _thr_umutex_unlock(&m->m_lock, id); 616 } 617 return (0); 618 } 619 620 int 621 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count) 622 { 623 struct pthread *curthread = _get_curthread(); 624 struct pthread_mutex *m; 625 626 if (__predict_false((m = *mutex) == NULL)) 627 return (EINVAL); 628 629 /* 630 * Check if the running thread is not the owner of the mutex. 631 */ 632 if (__predict_false(m->m_owner != curthread)) 633 return (EPERM); 634 635 /* 636 * Clear the count in case this is a recursive mutex. 637 */ 638 *count = m->m_count; 639 m->m_refcount++; 640 m->m_count = 0; 641 m->m_owner = NULL; 642 /* Remove the mutex from the threads queue. */ 643 MUTEX_ASSERT_IS_OWNED(m); 644 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 645 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 646 else { 647 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 648 set_inherited_priority(curthread, m); 649 } 650 MUTEX_INIT_LINK(m); 651 _thr_umutex_unlock(&m->m_lock, TID(curthread)); 652 return (0); 653 } 654 655 void 656 _mutex_unlock_private(pthread_t pthread) 657 { 658 struct pthread_mutex *m, *m_next; 659 660 TAILQ_FOREACH_SAFE(m, &pthread->mutexq, m_qe, m_next) { 661 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 662 _pthread_mutex_unlock(&m); 663 } 664 } 665 666 int 667 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex, 668 int *prioceiling) 669 { 670 int ret; 671 672 if (*mutex == NULL) 673 ret = EINVAL; 674 else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 675 ret = EINVAL; 676 else { 677 *prioceiling = (*mutex)->m_lock.m_ceilings[0]; 678 ret = 0; 679 } 680 681 return(ret); 682 } 683 684 int 685 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex, 686 int ceiling, int *old_ceiling) 687 { 688 struct pthread *curthread = _get_curthread(); 689 struct pthread_mutex *m, *m1, *m2; 690 int ret; 691 692 m = *mutex; 693 if (m == NULL || (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 694 return (EINVAL); 695 696 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); 697 if (ret != 0) 698 return (ret); 699 700 if (m->m_owner == curthread) { 701 MUTEX_ASSERT_IS_OWNED(m); 702 m1 = TAILQ_PREV(m, mutex_queue, m_qe); 703 m2 = TAILQ_NEXT(m, m_qe); 704 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > ceiling) || 705 (m2 != NULL && m2->m_lock.m_ceilings[0] < ceiling)) { 706 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 707 TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) { 708 if (m2->m_lock.m_ceilings[0] > ceiling) { 709 TAILQ_INSERT_BEFORE(m2, m, m_qe); 710 return (0); 711 } 712 } 713 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe); 714 } 715 } 716 return (0); 717 } 718