xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision 438441203054e5eeb67f8cd8ca4eb0ef1603bc81)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by John Birrell.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 #include <stdlib.h>
35 #include <errno.h>
36 #include <string.h>
37 #include <sys/param.h>
38 #include <sys/queue.h>
39 #include <pthread.h>
40 #include "thr_private.h"
41 
42 #if defined(_PTHREADS_INVARIANTS)
43 #define _MUTEX_INIT_LINK(m) 		do {		\
44 	(m)->m_qe.tqe_prev = NULL;			\
45 	(m)->m_qe.tqe_next = NULL;			\
46 } while (0)
47 #define _MUTEX_ASSERT_IS_OWNED(m)	do {		\
48 	if ((m)->m_qe.tqe_prev == NULL)			\
49 		PANIC("mutex is not on list");		\
50 } while (0)
51 #define _MUTEX_ASSERT_NOT_OWNED(m)	do {		\
52 	if (((m)->m_qe.tqe_prev != NULL) ||		\
53 	    ((m)->m_qe.tqe_next != NULL))		\
54 		PANIC("mutex is on list");		\
55 } while (0)
56 #else
57 #define _MUTEX_INIT_LINK(m)
58 #define _MUTEX_ASSERT_IS_OWNED(m)
59 #define _MUTEX_ASSERT_NOT_OWNED(m)
60 #endif
61 
62 /*
63  * Prototypes
64  */
65 static int		get_muncontested(pthread_mutex_t, int);
66 static void		get_mcontested(pthread_mutex_t);
67 static int		mutex_init(pthread_mutex_t *, int);
68 static int		mutex_lock_common(pthread_mutex_t *, int);
69 static inline int	mutex_self_trylock(pthread_mutex_t);
70 static inline int	mutex_self_lock(pthread_mutex_t);
71 static inline int	mutex_unlock_common(pthread_mutex_t *, int);
72 static void		mutex_priority_adjust(pthread_mutex_t);
73 static void		mutex_rescan_owned (pthread_t, pthread_mutex_t);
74 static inline pthread_t	mutex_queue_deq(pthread_mutex_t);
75 static inline void	mutex_queue_remove(pthread_mutex_t, pthread_t);
76 static inline void	mutex_queue_enq(pthread_mutex_t, pthread_t);
77 
78 
79 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
80 
81 static struct pthread_mutex_attr	static_mutex_attr =
82     PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
83 static pthread_mutexattr_t		static_mattr = &static_mutex_attr;
84 
85 /* Single underscore versions provided for libc internal usage: */
86 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
87 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
88 
89 /* No difference between libc and application usage of these: */
90 __weak_reference(_pthread_mutex_init, pthread_mutex_init);
91 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
92 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
93 
94 
95 /*
96  * Reinitialize a private mutex; this is only used for internal mutexes.
97  */
98 int
99 _mutex_reinit(pthread_mutex_t * mutex)
100 {
101 	int	ret = 0;
102 
103 	if (mutex == NULL)
104 		ret = EINVAL;
105 	else if (*mutex == PTHREAD_MUTEX_INITIALIZER)
106 		ret = _pthread_mutex_init(mutex, NULL);
107 	else {
108 		/*
109 		 * Initialize the mutex structure:
110 		 */
111 		(*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
112 		(*mutex)->m_protocol = PTHREAD_PRIO_NONE;
113 		TAILQ_INIT(&(*mutex)->m_queue);
114 		(*mutex)->m_owner = NULL;
115 		(*mutex)->m_data.m_count = 0;
116 		(*mutex)->m_flags |= MUTEX_FLAGS_INITED | MUTEX_FLAGS_PRIVATE;
117 		(*mutex)->m_refcount = 0;
118 		(*mutex)->m_prio = 0;
119 		(*mutex)->m_saved_prio = 0;
120 		_MUTEX_INIT_LINK(*mutex);
121 		memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
122 	}
123 	return (ret);
124 }
125 
126 int
127 _pthread_mutex_init(pthread_mutex_t * mutex,
128 		   const pthread_mutexattr_t * mutex_attr)
129 {
130 	enum pthread_mutextype	type;
131 	int		protocol;
132 	int		ceiling;
133 	int		flags;
134 	pthread_mutex_t	pmutex;
135 	int		ret = 0;
136 
137 	if (mutex == NULL)
138 		ret = EINVAL;
139 
140 	/* Check if default mutex attributes: */
141 	if (mutex_attr == NULL || *mutex_attr == NULL) {
142 		/* Default to a (error checking) POSIX mutex: */
143 		type = PTHREAD_MUTEX_ERRORCHECK;
144 		protocol = PTHREAD_PRIO_NONE;
145 		ceiling = PTHREAD_MAX_PRIORITY;
146 		flags = 0;
147 	}
148 
149 	/* Check mutex type: */
150 	else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
151 	    ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
152 		/* Return an invalid argument error: */
153 		ret = EINVAL;
154 
155 	/* Check mutex protocol: */
156 	else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
157 	    ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
158 		/* Return an invalid argument error: */
159 		ret = EINVAL;
160 
161 	else {
162 		/* Use the requested mutex type and protocol: */
163 		type = (*mutex_attr)->m_type;
164 		protocol = (*mutex_attr)->m_protocol;
165 		ceiling = (*mutex_attr)->m_ceiling;
166 		flags = (*mutex_attr)->m_flags;
167 	}
168 
169 	/* Check no errors so far: */
170 	if (ret == 0) {
171 		if ((pmutex = (pthread_mutex_t)
172 		    malloc(sizeof(struct pthread_mutex))) == NULL)
173 			ret = ENOMEM;
174 		else {
175 			/* Set the mutex flags: */
176 			pmutex->m_flags = flags;
177 
178 			/* Process according to mutex type: */
179 			switch (type) {
180 			/* case PTHREAD_MUTEX_DEFAULT: */
181 			case PTHREAD_MUTEX_ERRORCHECK:
182 			case PTHREAD_MUTEX_NORMAL:
183 				/* Nothing to do here. */
184 				break;
185 
186 			/* Single UNIX Spec 2 recursive mutex: */
187 			case PTHREAD_MUTEX_RECURSIVE:
188 				/* Reset the mutex count: */
189 				pmutex->m_data.m_count = 0;
190 				break;
191 
192 			/* Trap invalid mutex types: */
193 			default:
194 				/* Return an invalid argument error: */
195 				ret = EINVAL;
196 				break;
197 			}
198 			if (ret == 0) {
199 				/* Initialise the rest of the mutex: */
200 				TAILQ_INIT(&pmutex->m_queue);
201 				pmutex->m_flags |= MUTEX_FLAGS_INITED;
202 				pmutex->m_owner = NULL;
203 				pmutex->m_type = type;
204 				pmutex->m_protocol = protocol;
205 				pmutex->m_refcount = 0;
206 				if (protocol == PTHREAD_PRIO_PROTECT)
207 					pmutex->m_prio = ceiling;
208 				else
209 					pmutex->m_prio = 0;
210 				pmutex->m_saved_prio = 0;
211 				_MUTEX_INIT_LINK(pmutex);
212 				memset(&pmutex->lock, 0, sizeof(pmutex->lock));
213 				*mutex = pmutex;
214 			} else {
215 				free(pmutex);
216 				*mutex = NULL;
217 			}
218 		}
219 	}
220 	/* Return the completion status: */
221 	return (ret);
222 }
223 
224 int
225 _pthread_mutex_destroy(pthread_mutex_t * mutex)
226 {
227 	int	ret = 0;
228 
229 	if (mutex == NULL || *mutex == NULL)
230 		ret = EINVAL;
231 	else {
232 		/* Lock the mutex structure: */
233 		_SPINLOCK(&(*mutex)->lock);
234 
235 		/*
236 		 * Check to see if this mutex is in use:
237 		 */
238 		if (((*mutex)->m_owner != NULL) ||
239 		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
240 		    ((*mutex)->m_refcount != 0)) {
241 			ret = EBUSY;
242 
243 			/* Unlock the mutex structure: */
244 			_SPINUNLOCK(&(*mutex)->lock);
245 		}
246 		else {
247 			/*
248 			 * Free the memory allocated for the mutex
249 			 * structure:
250 			 */
251 			_MUTEX_ASSERT_NOT_OWNED(*mutex);
252 
253 			/* Unlock the mutex structure: */
254 			_SPINUNLOCK(&(*mutex)->lock);
255 
256 			free(*mutex);
257 
258 			/*
259 			 * Leave the caller's pointer NULL now that
260 			 * the mutex has been destroyed:
261 			 */
262 			*mutex = NULL;
263 		}
264 	}
265 
266 	/* Return the completion status: */
267 	return (ret);
268 }
269 
270 static int
271 mutex_init(pthread_mutex_t *mutex, int private)
272 {
273 	pthread_mutexattr_t *pma;
274 	int error;
275 
276 	error = 0;
277 	pma = private ? &static_mattr : NULL;
278 	_SPINLOCK(&static_init_lock);
279 	if (*mutex == PTHREAD_MUTEX_INITIALIZER)
280 		error = _pthread_mutex_init(mutex, pma);
281 	_SPINUNLOCK(&static_init_lock);
282 	return (error);
283 }
284 
285 int
286 __pthread_mutex_trylock(pthread_mutex_t *mutex)
287 {
288 	int	ret = 0;
289 
290 	if (mutex == NULL)
291 		ret = EINVAL;
292 
293 	/*
294 	 * If the mutex is statically initialized, perform the dynamic
295 	 * initialization:
296 	 */
297 	else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
298 	    (ret = mutex_init(mutex, 0)) == 0)
299 		ret = mutex_lock_common(mutex, 1);
300 
301 	return (ret);
302 }
303 
304 int
305 _pthread_mutex_trylock(pthread_mutex_t *mutex)
306 {
307 	int	ret = 0;
308 
309 	if (mutex == NULL)
310 		ret = EINVAL;
311 
312 	/*
313 	 * If the mutex is statically initialized, perform the dynamic
314 	 * initialization marking the mutex private (delete safe):
315 	 */
316 	else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
317 	    (ret = mutex_init(mutex, 1)) == 0)
318 		ret = mutex_lock_common(mutex, 1);
319 
320 	return (ret);
321 }
322 
323 static int
324 mutex_lock_common(pthread_mutex_t * mutex, int nonblock)
325 {
326 	int ret, error, inCancel;
327 
328 	ret = error = inCancel = 0;
329 
330 	PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
331 	    "Uninitialized mutex in mutex_lock_common");
332 
333 	/*
334 	 * Enter a loop waiting to become the mutex owner.  We need a
335 	 * loop in case the waiting thread is interrupted by a signal
336 	 * to execute a signal handler.  It is not (currently) possible
337 	 * to remain in the waiting queue while running a handler.
338 	 * Instead, the thread is interrupted and backed out of the
339 	 * waiting queue prior to executing the signal handler.
340 	 */
341 	do {
342 		/*
343 		 * Defer signals to protect the scheduling queues from
344 		 * access by the signal handler:
345 		 */
346 		/* _thread_kern_sig_defer(); */
347 
348 		/* Lock the mutex structure: */
349 		_SPINLOCK(&(*mutex)->lock);
350 
351 		/*
352 		 * If the mutex was statically allocated, properly
353 		 * initialize the tail queue.
354 		 */
355 		if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
356 			TAILQ_INIT(&(*mutex)->m_queue);
357 			(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
358 			_MUTEX_INIT_LINK(*mutex);
359 		}
360 
361 		/* Process according to mutex type: */
362 		switch ((*mutex)->m_protocol) {
363 		/* Default POSIX mutex: */
364 		case PTHREAD_PRIO_NONE:
365 			if ((error = get_muncontested(*mutex, nonblock)) == -1)
366 				if (nonblock) {
367 					ret = EBUSY;
368 					break;
369 				} else {
370 					get_mcontested(*mutex);
371 				}
372 			else
373 				ret = error;
374 			break;
375 
376 		/* POSIX priority inheritence mutex: */
377 		case PTHREAD_PRIO_INHERIT:
378 			if ((error = get_muncontested(*mutex, nonblock)) == 0) {
379 				/* Track number of priority mutexes owned: */
380 				curthread->priority_mutex_count++;
381 
382 				/*
383 				 * The mutex takes on attributes of the
384 				 * running thread when there are no waiters.
385 				 */
386 				(*mutex)->m_prio = curthread->active_priority;
387 				(*mutex)->m_saved_prio =
388 				    curthread->inherited_priority;
389 				curthread->inherited_priority =
390 				    (*mutex)->m_prio;
391 			} else if (error == -1) {
392 				if (nonblock) {
393 					ret = EBUSY;
394 					break;
395 				} else {
396 					get_mcontested(*mutex);
397 				}
398 
399 				if (curthread->active_priority >
400 				    (*mutex)->m_prio)
401 					/* Adjust priorities: */
402 					mutex_priority_adjust(*mutex);
403 			} else {
404 				ret = error;
405 			}
406 			break;
407 
408 		/* POSIX priority protection mutex: */
409 		case PTHREAD_PRIO_PROTECT:
410 			/* Check for a priority ceiling violation: */
411 			if (curthread->active_priority > (*mutex)->m_prio)
412 				ret = EINVAL;
413 
414 			if ((error = get_muncontested(*mutex, nonblock)) == 0) {
415 				/* Track number of priority mutexes owned: */
416 				curthread->priority_mutex_count++;
417 
418 				/*
419 				 * The running thread inherits the ceiling
420 				 * priority of the mutex and executes at that
421 				 * priority:
422 				 */
423 				curthread->active_priority = (*mutex)->m_prio;
424 				(*mutex)->m_saved_prio =
425 				    curthread->inherited_priority;
426 				curthread->inherited_priority =
427 				    (*mutex)->m_prio;
428 			} else if (error == -1) {
429 				if (nonblock) {
430 					ret = EBUSY;
431 					break;
432 				}
433 
434 				/* Clear any previous error: */
435 				curthread->error = 0;
436 
437 				get_mcontested(*mutex);
438 
439 				/*
440 				 * The threads priority may have changed while
441 				 * waiting for the mutex causing a ceiling
442 				 * violation.
443 				 */
444 				ret = curthread->error;
445 				curthread->error = 0;
446 			} else {
447 				ret = error;
448 			}
449 			break;
450 
451 		/* Trap invalid mutex types: */
452 		default:
453 			/* Return an invalid argument error: */
454 			ret = EINVAL;
455 			break;
456 		}
457 
458 		/*
459 		 * Check to see if this thread was interrupted and
460 		 * is still in the mutex queue of waiting threads:
461 		 */
462 		if (curthread->cancelflags & PTHREAD_CANCELLING) {
463 			if (!nonblock)
464 				mutex_queue_remove(*mutex, curthread);
465 			inCancel=1;
466 		}
467 
468 		/* Unlock the mutex structure: */
469 		_SPINUNLOCK(&(*mutex)->lock);
470 
471 		/*
472 		 * Undefer and handle pending signals, yielding if
473 		 * necessary:
474 		 */
475 		/* _thread_kern_sig_undefer(); */
476 		if (inCancel) {
477 			pthread_testcancel();
478 			PANIC("Canceled thread came back.\n");
479 		}
480 	} while ((*mutex)->m_owner != curthread && ret == 0);
481 
482 	/* Return the completion status: */
483 	return (ret);
484 }
485 
486 int
487 __pthread_mutex_lock(pthread_mutex_t *mutex)
488 {
489 	int	ret = 0;
490 
491 	if (_thread_initial == NULL)
492 		_thread_init();
493 
494 	if (mutex == NULL)
495 		ret = EINVAL;
496 
497 	/*
498 	 * If the mutex is statically initialized, perform the dynamic
499 	 * initialization:
500 	 */
501 	else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
502 	    ((ret = mutex_init(mutex, 0)) == 0))
503 		ret = mutex_lock_common(mutex, 0);
504 
505 	return (ret);
506 }
507 
508 int
509 _pthread_mutex_lock(pthread_mutex_t *mutex)
510 {
511 	int	ret = 0;
512 
513 	if (_thread_initial == NULL)
514 		_thread_init();
515 
516 	if (mutex == NULL)
517 		ret = EINVAL;
518 
519 	/*
520 	 * If the mutex is statically initialized, perform the dynamic
521 	 * initialization marking it private (delete safe):
522 	 */
523 	else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
524 	    ((ret = mutex_init(mutex, 1)) == 0))
525 		ret = mutex_lock_common(mutex, 0);
526 
527 	return (ret);
528 }
529 
530 int
531 _pthread_mutex_unlock(pthread_mutex_t * mutex)
532 {
533 	return (mutex_unlock_common(mutex, /* add reference */ 0));
534 }
535 
536 int
537 _mutex_cv_unlock(pthread_mutex_t * mutex)
538 {
539 	return (mutex_unlock_common(mutex, /* add reference */ 1));
540 }
541 
542 int
543 _mutex_cv_lock(pthread_mutex_t * mutex)
544 {
545 	int	ret;
546 	if ((ret = _pthread_mutex_lock(mutex)) == 0)
547 		(*mutex)->m_refcount--;
548 	return (ret);
549 }
550 
551 static inline int
552 mutex_self_trylock(pthread_mutex_t mutex)
553 {
554 	int	ret = 0;
555 
556 	switch (mutex->m_type) {
557 
558 	/* case PTHREAD_MUTEX_DEFAULT: */
559 	case PTHREAD_MUTEX_ERRORCHECK:
560 	case PTHREAD_MUTEX_NORMAL:
561 		/*
562 		 * POSIX specifies that mutexes should return EDEADLK if a
563 		 * recursive lock is detected.
564 		 */
565 		ret = EBUSY;
566 		break;
567 
568 	case PTHREAD_MUTEX_RECURSIVE:
569 		/* Increment the lock count: */
570 		mutex->m_data.m_count++;
571 		break;
572 
573 	default:
574 		/* Trap invalid mutex types; */
575 		ret = EINVAL;
576 	}
577 
578 	return (ret);
579 }
580 
581 static inline int
582 mutex_self_lock(pthread_mutex_t mutex)
583 {
584 	int ret = 0;
585 
586 	switch (mutex->m_type) {
587 	/* case PTHREAD_MUTEX_DEFAULT: */
588 	case PTHREAD_MUTEX_ERRORCHECK:
589 		/*
590 		 * POSIX specifies that mutexes should return EDEADLK if a
591 		 * recursive lock is detected.
592 		 */
593 		ret = EDEADLK;
594 		break;
595 
596 	case PTHREAD_MUTEX_NORMAL:
597 		/*
598 		 * What SS2 define as a 'normal' mutex.  Intentionally
599 		 * deadlock on attempts to get a lock you already own.
600 		 */
601 		/* XXX Sched lock. */
602 		PTHREAD_SET_STATE(curthread, PS_DEADLOCK);
603 		_SPINUNLOCK(&(mutex)->lock);
604 		_thread_suspend(curthread, NULL);
605 		PANIC("Shouldn't resume here?\n");
606 		break;
607 
608 	case PTHREAD_MUTEX_RECURSIVE:
609 		/* Increment the lock count: */
610 		mutex->m_data.m_count++;
611 		break;
612 
613 	default:
614 		/* Trap invalid mutex types; */
615 		ret = EINVAL;
616 	}
617 
618 	return (ret);
619 }
620 
621 static inline int
622 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
623 {
624 	int	ret = 0;
625 
626 	if (mutex == NULL || *mutex == NULL) {
627 		ret = EINVAL;
628 	} else {
629 		/*
630 		 * Defer signals to protect the scheduling queues from
631 		 * access by the signal handler:
632 		 */
633 		/* _thread_kern_sig_defer(); */
634 
635 		/* Lock the mutex structure: */
636 		_SPINLOCK(&(*mutex)->lock);
637 
638 		/* Process according to mutex type: */
639 		switch ((*mutex)->m_protocol) {
640 		/* Default POSIX mutex: */
641 		case PTHREAD_PRIO_NONE:
642 			/*
643 			 * Check if the running thread is not the owner of the
644 			 * mutex:
645 			 */
646 			if ((*mutex)->m_owner != curthread) {
647 				/*
648 				 * Return an invalid argument error for no
649 				 * owner and a permission error otherwise:
650 				 */
651 				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
652 			}
653 			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
654 			    ((*mutex)->m_data.m_count > 0)) {
655 				/* Decrement the count: */
656 				(*mutex)->m_data.m_count--;
657 			} else {
658 				/*
659 				 * Clear the count in case this is recursive
660 				 * mutex.
661 				 */
662 				(*mutex)->m_data.m_count = 0;
663 
664 				/* Remove the mutex from the threads queue. */
665 				_MUTEX_ASSERT_IS_OWNED(*mutex);
666 				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
667 				    (*mutex), m_qe);
668 				_MUTEX_INIT_LINK(*mutex);
669 
670 				/*
671 				 * Get the next thread from the queue of
672 				 * threads waiting on the mutex. The deq
673 				 * function will have already locked it
674 				 * for us.
675 				 */
676 				if (((*mutex)->m_owner =
677 			  	    mutex_queue_deq(*mutex)) != NULL) {
678 					/* Make the new owner runnable: */
679 					/* XXXTHR sched lock. */
680 					PTHREAD_NEW_STATE((*mutex)->m_owner,
681 					    PS_RUNNING);
682 
683 					/*
684 					 * Add the mutex to the threads list of
685 					 * owned mutexes:
686 					 */
687 					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
688 					    (*mutex), m_qe);
689 
690 					/*
691 					 * The owner is no longer waiting for
692 					 * this mutex:
693 					 */
694 					(*mutex)->m_owner->data.mutex = NULL;
695 					_thread_critical_exit((*mutex)->m_owner);
696 				}
697 			}
698 			break;
699 
700 		/* POSIX priority inheritence mutex: */
701 		case PTHREAD_PRIO_INHERIT:
702 			/*
703 			 * Check if the running thread is not the owner of the
704 			 * mutex:
705 			 */
706 			if ((*mutex)->m_owner != curthread) {
707 				/*
708 				 * Return an invalid argument error for no
709 				 * owner and a permission error otherwise:
710 				 */
711 				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
712 			}
713 			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
714 			    ((*mutex)->m_data.m_count > 0)) {
715 				/* Decrement the count: */
716 				(*mutex)->m_data.m_count--;
717 			} else {
718 				/*
719 				 * Clear the count in case this is recursive
720 				 * mutex.
721 				 */
722 				(*mutex)->m_data.m_count = 0;
723 
724 				/*
725 				 * Restore the threads inherited priority and
726 				 * recompute the active priority (being careful
727 				 * not to override changes in the threads base
728 				 * priority subsequent to locking the mutex).
729 				 */
730 				curthread->inherited_priority =
731 					(*mutex)->m_saved_prio;
732 				curthread->active_priority =
733 				    MAX(curthread->inherited_priority,
734 				    curthread->base_priority);
735 
736 				/*
737 				 * This thread now owns one less priority mutex.
738 				 */
739 				curthread->priority_mutex_count--;
740 
741 				/* Remove the mutex from the threads queue. */
742 				_MUTEX_ASSERT_IS_OWNED(*mutex);
743 				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
744 				    (*mutex), m_qe);
745 				_MUTEX_INIT_LINK(*mutex);
746 
747 				/*
748 				 * Get the next thread from the queue of threads
749 				 * waiting on the mutex. It will already be
750 				 * locked for us.
751 				 */
752 				if (((*mutex)->m_owner =
753 				    mutex_queue_deq(*mutex)) == NULL)
754 					/* This mutex has no priority. */
755 					(*mutex)->m_prio = 0;
756 				else {
757 					/*
758 					 * Track number of priority mutexes owned:
759 					 */
760 					(*mutex)->m_owner->priority_mutex_count++;
761 
762 					/*
763 					 * Add the mutex to the threads list
764 					 * of owned mutexes:
765 					 */
766 					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
767 					    (*mutex), m_qe);
768 
769 					/*
770 					 * The owner is no longer waiting for
771 					 * this mutex:
772 					 */
773 					(*mutex)->m_owner->data.mutex = NULL;
774 
775 					/*
776 					 * Set the priority of the mutex.  Since
777 					 * our waiting threads are in descending
778 					 * priority order, the priority of the
779 					 * mutex becomes the active priority of
780 					 * the thread we just dequeued.
781 					 */
782 					(*mutex)->m_prio =
783 					    (*mutex)->m_owner->active_priority;
784 
785 					/*
786 					 * Save the owning threads inherited
787 					 * priority:
788 					 */
789 					(*mutex)->m_saved_prio =
790 						(*mutex)->m_owner->inherited_priority;
791 
792 					/*
793 					 * The owning threads inherited priority
794 					 * now becomes his active priority (the
795 					 * priority of the mutex).
796 					 */
797 					(*mutex)->m_owner->inherited_priority =
798 						(*mutex)->m_prio;
799 
800 					/*
801 					 * Make the new owner runnable:
802 					 */
803 					/* XXXTHR sched lock. */
804 					PTHREAD_NEW_STATE((*mutex)->m_owner,
805 					    PS_RUNNING);
806 
807 					_thread_critical_exit((*mutex)->m_owner);
808 				}
809 			}
810 			break;
811 
812 		/* POSIX priority ceiling mutex: */
813 		case PTHREAD_PRIO_PROTECT:
814 			/*
815 			 * Check if the running thread is not the owner of the
816 			 * mutex:
817 			 */
818 			if ((*mutex)->m_owner != curthread) {
819 				/*
820 				 * Return an invalid argument error for no
821 				 * owner and a permission error otherwise:
822 				 */
823 				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
824 			}
825 			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
826 			    ((*mutex)->m_data.m_count > 0)) {
827 				/* Decrement the count: */
828 				(*mutex)->m_data.m_count--;
829 			} else {
830 				/*
831 				 * Clear the count in case this is recursive
832 				 * mutex.
833 				 */
834 				(*mutex)->m_data.m_count = 0;
835 
836 				/*
837 				 * Restore the threads inherited priority and
838 				 * recompute the active priority (being careful
839 				 * not to override changes in the threads base
840 				 * priority subsequent to locking the mutex).
841 				 */
842 				curthread->inherited_priority =
843 					(*mutex)->m_saved_prio;
844 				curthread->active_priority =
845 				    MAX(curthread->inherited_priority,
846 				    curthread->base_priority);
847 
848 				/*
849 				 * This thread now owns one less priority mutex.
850 				 */
851 				curthread->priority_mutex_count--;
852 
853 				/* Remove the mutex from the threads queue. */
854 				_MUTEX_ASSERT_IS_OWNED(*mutex);
855 				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
856 				    (*mutex), m_qe);
857 				_MUTEX_INIT_LINK(*mutex);
858 
859 				/*
860 				 * Enter a loop to find a waiting thread whose
861 				 * active priority will not cause a ceiling
862 				 * violation. It will already be locked for us.
863 				 */
864 				while ((((*mutex)->m_owner =
865 				    mutex_queue_deq(*mutex)) != NULL) &&
866 				    ((*mutex)->m_owner->active_priority >
867 				     (*mutex)->m_prio)) {
868 					/*
869 					 * Either the mutex ceiling priority
870 					 * been lowered and/or this threads
871 					 * priority has been raised subsequent
872 					 * to this thread being queued on the
873 					 * waiting list.
874 					 */
875 					(*mutex)->m_owner->error = EINVAL;
876 					PTHREAD_NEW_STATE((*mutex)->m_owner,
877 					    PS_RUNNING);
878 					/*
879 					 * The thread is no longer waiting for
880 					 * this mutex:
881 					 */
882 					(*mutex)->m_owner->data.mutex = NULL;
883 
884 					_thread_critical_exit((*mutex)->m_owner);
885 				}
886 
887 				/* Check for a new owner: */
888 				if ((*mutex)->m_owner != NULL) {
889 					/*
890 					 * Track number of priority mutexes owned:
891 					 */
892 					(*mutex)->m_owner->priority_mutex_count++;
893 
894 					/*
895 					 * Add the mutex to the threads list
896 					 * of owned mutexes:
897 					 */
898 					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
899 					    (*mutex), m_qe);
900 
901 					/*
902 					 * The owner is no longer waiting for
903 					 * this mutex:
904 					 */
905 					(*mutex)->m_owner->data.mutex = NULL;
906 
907 					/*
908 					 * Save the owning threads inherited
909 					 * priority:
910 					 */
911 					(*mutex)->m_saved_prio =
912 						(*mutex)->m_owner->inherited_priority;
913 
914 					/*
915 					 * The owning thread inherits the
916 					 * ceiling priority of the mutex and
917 					 * executes at that priority:
918 					 */
919 					(*mutex)->m_owner->inherited_priority =
920 					    (*mutex)->m_prio;
921 					(*mutex)->m_owner->active_priority =
922 					    (*mutex)->m_prio;
923 
924 					/*
925 					 * Make the new owner runnable:
926 					 */
927 					/* XXXTHR sched lock. */
928 					PTHREAD_NEW_STATE((*mutex)->m_owner,
929 					    PS_RUNNING);
930 
931 					_thread_critical_exit((*mutex)->m_owner);
932 				}
933 			}
934 			break;
935 
936 		/* Trap invalid mutex types: */
937 		default:
938 			/* Return an invalid argument error: */
939 			ret = EINVAL;
940 			break;
941 		}
942 
943 		if ((ret == 0) && (add_reference != 0)) {
944 			/* Increment the reference count: */
945 			(*mutex)->m_refcount++;
946 		}
947 
948 		/* Unlock the mutex structure: */
949 		_SPINUNLOCK(&(*mutex)->lock);
950 
951 		/*
952 		 * Undefer and handle pending signals, yielding if
953 		 * necessary:
954 		 */
955 		/* _thread_kern_sig_undefer(); */
956 	}
957 
958 	/* Return the completion status: */
959 	return (ret);
960 }
961 
962 
963 /*
964  * This function is called when a change in base priority occurs for
965  * a thread that is holding or waiting for a priority protection or
966  * inheritence mutex.  A change in a threads base priority can effect
967  * changes to active priorities of other threads and to the ordering
968  * of mutex locking by waiting threads.
969  *
970  * This must be called while thread scheduling is deferred.
971  */
972 void
973 _mutex_notify_priochange(pthread_t pthread)
974 {
975 	/* Adjust the priorites of any owned priority mutexes: */
976 	if (pthread->priority_mutex_count > 0) {
977 		/*
978 		 * Rescan the mutexes owned by this thread and correct
979 		 * their priorities to account for this threads change
980 		 * in priority.  This has the side effect of changing
981 		 * the threads active priority.
982 		 */
983 		mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
984 	}
985 
986 	/*
987 	 * If this thread is waiting on a priority inheritence mutex,
988 	 * check for priority adjustments.  A change in priority can
989 	 * also effect a ceiling violation(*) for a thread waiting on
990 	 * a priority protection mutex; we don't perform the check here
991 	 * as it is done in pthread_mutex_unlock.
992 	 *
993 	 * (*) It should be noted that a priority change to a thread
994 	 *     _after_ taking and owning a priority ceiling mutex
995 	 *     does not affect ownership of that mutex; the ceiling
996 	 *     priority is only checked before mutex ownership occurs.
997 	 */
998 	if (pthread->state == PS_MUTEX_WAIT) {
999 		/* Lock the mutex structure: */
1000 		_SPINLOCK(&pthread->data.mutex->lock);
1001 
1002 		/*
1003 		 * Check to make sure this thread is still in the same state
1004 		 * (the spinlock above can yield the CPU to another thread):
1005 		 */
1006 		if (pthread->state == PS_MUTEX_WAIT) {
1007 			/*
1008 			 * Remove and reinsert this thread into the list of
1009 			 * waiting threads to preserve decreasing priority
1010 			 * order.
1011 			 */
1012 			mutex_queue_remove(pthread->data.mutex, pthread);
1013 			mutex_queue_enq(pthread->data.mutex, pthread);
1014 
1015 			if (pthread->data.mutex->m_protocol ==
1016 			     PTHREAD_PRIO_INHERIT) {
1017 				/* Adjust priorities: */
1018 				mutex_priority_adjust(pthread->data.mutex);
1019 			}
1020 		}
1021 
1022 		/* Unlock the mutex structure: */
1023 		_SPINUNLOCK(&pthread->data.mutex->lock);
1024 	}
1025 }
1026 
1027 /*
1028  * Called when a new thread is added to the mutex waiting queue or
1029  * when a threads priority changes that is already in the mutex
1030  * waiting queue.
1031  */
1032 static void
1033 mutex_priority_adjust(pthread_mutex_t mutex)
1034 {
1035 	pthread_t	pthread_next, pthread = mutex->m_owner;
1036 	int		temp_prio;
1037 	pthread_mutex_t	m = mutex;
1038 
1039 	/*
1040 	 * Calculate the mutex priority as the maximum of the highest
1041 	 * active priority of any waiting threads and the owning threads
1042 	 * active priority(*).
1043 	 *
1044 	 * (*) Because the owning threads current active priority may
1045 	 *     reflect priority inherited from this mutex (and the mutex
1046 	 *     priority may have changed) we must recalculate the active
1047 	 *     priority based on the threads saved inherited priority
1048 	 *     and its base priority.
1049 	 */
1050 	pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1051 	temp_prio = MAX(pthread_next->active_priority,
1052 	    MAX(m->m_saved_prio, pthread->base_priority));
1053 
1054 	/* See if this mutex really needs adjusting: */
1055 	if (temp_prio == m->m_prio)
1056 		/* No need to propagate the priority: */
1057 		return;
1058 
1059 	/* Set new priority of the mutex: */
1060 	m->m_prio = temp_prio;
1061 
1062 	while (m != NULL) {
1063 		/*
1064 		 * Save the threads priority before rescanning the
1065 		 * owned mutexes:
1066 		 */
1067 		temp_prio = pthread->active_priority;
1068 
1069 		/*
1070 		 * Fix the priorities for all the mutexes this thread has
1071 		 * locked since taking this mutex.  This also has a
1072 		 * potential side-effect of changing the threads priority.
1073 		 */
1074 		mutex_rescan_owned(pthread, m);
1075 
1076 		/*
1077 		 * If the thread is currently waiting on a mutex, check
1078 		 * to see if the threads new priority has affected the
1079 		 * priority of the mutex.
1080 		 */
1081 		if ((temp_prio != pthread->active_priority) &&
1082 		    (pthread->state == PS_MUTEX_WAIT) &&
1083 		    (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
1084 			/* Grab the mutex this thread is waiting on: */
1085 			m = pthread->data.mutex;
1086 
1087 			/*
1088 			 * The priority for this thread has changed.  Remove
1089 			 * and reinsert this thread into the list of waiting
1090 			 * threads to preserve decreasing priority order.
1091 			 */
1092 			mutex_queue_remove(m, pthread);
1093 			mutex_queue_enq(m, pthread);
1094 
1095 			/* Grab the waiting thread with highest priority: */
1096 			pthread_next = TAILQ_FIRST(&m->m_queue);
1097 
1098 			/*
1099 			 * Calculate the mutex priority as the maximum of the
1100 			 * highest active priority of any waiting threads and
1101 			 * the owning threads active priority.
1102 			 */
1103 			temp_prio = MAX(pthread_next->active_priority,
1104 			    MAX(m->m_saved_prio, m->m_owner->base_priority));
1105 
1106 			if (temp_prio != m->m_prio) {
1107 				/*
1108 				 * The priority needs to be propagated to the
1109 				 * mutex this thread is waiting on and up to
1110 				 * the owner of that mutex.
1111 				 */
1112 				m->m_prio = temp_prio;
1113 				pthread = m->m_owner;
1114 			}
1115 			else
1116 				/* We're done: */
1117 				m = NULL;
1118 
1119 		}
1120 		else
1121 			/* We're done: */
1122 			m = NULL;
1123 	}
1124 }
1125 
1126 static void
1127 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
1128 {
1129 	int		active_prio, inherited_prio;
1130 	pthread_mutex_t	m;
1131 	pthread_t	pthread_next;
1132 
1133 	/*
1134 	 * Start walking the mutexes the thread has taken since
1135 	 * taking this mutex.
1136 	 */
1137 	if (mutex == NULL) {
1138 		/*
1139 		 * A null mutex means start at the beginning of the owned
1140 		 * mutex list.
1141 		 */
1142 		m = TAILQ_FIRST(&pthread->mutexq);
1143 
1144 		/* There is no inherited priority yet. */
1145 		inherited_prio = 0;
1146 	}
1147 	else {
1148 		/*
1149 		 * The caller wants to start after a specific mutex.  It
1150 		 * is assumed that this mutex is a priority inheritence
1151 		 * mutex and that its priority has been correctly
1152 		 * calculated.
1153 		 */
1154 		m = TAILQ_NEXT(mutex, m_qe);
1155 
1156 		/* Start inheriting priority from the specified mutex. */
1157 		inherited_prio = mutex->m_prio;
1158 	}
1159 	active_prio = MAX(inherited_prio, pthread->base_priority);
1160 
1161 	while (m != NULL) {
1162 		/*
1163 		 * We only want to deal with priority inheritence
1164 		 * mutexes.  This might be optimized by only placing
1165 		 * priority inheritence mutexes into the owned mutex
1166 		 * list, but it may prove to be useful having all
1167 		 * owned mutexes in this list.  Consider a thread
1168 		 * exiting while holding mutexes...
1169 		 */
1170 		if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1171 			/*
1172 			 * Fix the owners saved (inherited) priority to
1173 			 * reflect the priority of the previous mutex.
1174 			 */
1175 			m->m_saved_prio = inherited_prio;
1176 
1177 			if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1178 				/* Recalculate the priority of the mutex: */
1179 				m->m_prio = MAX(active_prio,
1180 				     pthread_next->active_priority);
1181 			else
1182 				m->m_prio = active_prio;
1183 
1184 			/* Recalculate new inherited and active priorities: */
1185 			inherited_prio = m->m_prio;
1186 			active_prio = MAX(m->m_prio, pthread->base_priority);
1187 		}
1188 
1189 		/* Advance to the next mutex owned by this thread: */
1190 		m = TAILQ_NEXT(m, m_qe);
1191 	}
1192 
1193 	/*
1194 	 * Fix the threads inherited priority and recalculate its
1195 	 * active priority.
1196 	 */
1197 	pthread->inherited_priority = inherited_prio;
1198 	active_prio = MAX(inherited_prio, pthread->base_priority);
1199 
1200 	if (active_prio != pthread->active_priority) {
1201 #if 0
1202 		/*
1203 		 * If this thread is in the priority queue, it must be
1204 		 * removed and reinserted for its new priority.
1205 	 	 */
1206 		if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
1207 			/*
1208 			 * Remove the thread from the priority queue
1209 			 * before changing its priority:
1210 			 */
1211 			PTHREAD_PRIOQ_REMOVE(pthread);
1212 
1213 			/*
1214 			 * POSIX states that if the priority is being
1215 			 * lowered, the thread must be inserted at the
1216 			 * head of the queue for its priority if it owns
1217 			 * any priority protection or inheritence mutexes.
1218 			 */
1219 			if ((active_prio < pthread->active_priority) &&
1220 			    (pthread->priority_mutex_count > 0)) {
1221 				/* Set the new active priority. */
1222 				pthread->active_priority = active_prio;
1223 
1224 				PTHREAD_PRIOQ_INSERT_HEAD(pthread);
1225 			}
1226 			else {
1227 				/* Set the new active priority. */
1228 				pthread->active_priority = active_prio;
1229 
1230 				PTHREAD_PRIOQ_INSERT_TAIL(pthread);
1231 			}
1232 		}
1233 		else {
1234 			/* Set the new active priority. */
1235 			pthread->active_priority = active_prio;
1236 		}
1237 #endif
1238 		pthread->active_priority = active_prio;
1239 	}
1240 }
1241 
1242 void
1243 _mutex_unlock_private(pthread_t pthread)
1244 {
1245 	struct pthread_mutex	*m, *m_next;
1246 
1247 	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1248 		m_next = TAILQ_NEXT(m, m_qe);
1249 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1250 			_pthread_mutex_unlock(&m);
1251 	}
1252 }
1253 
1254 void
1255 _mutex_lock_backout(pthread_t pthread)
1256 {
1257 	struct pthread_mutex	*mutex;
1258 
1259 	/*
1260 	 * Defer signals to protect the scheduling queues from
1261 	 * access by the signal handler:
1262 	 */
1263 	/* _thread_kern_sig_defer();*/
1264 
1265 	/* XXX - Necessary to obey lock order */
1266 	_SPINLOCK(&pthread->lock);
1267 	mutex = pthread->data.mutex;
1268 	_SPINUNLOCK(&pthread->lock);
1269 
1270 	_SPINLOCK(&mutex->lock);
1271 
1272 	_thread_critical_enter(pthread);
1273 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1274 
1275 		mutex_queue_remove(mutex, pthread);
1276 
1277 		/* This thread is no longer waiting for the mutex: */
1278 		pthread->data.mutex = NULL;
1279 
1280 	}
1281 	/*
1282 	 * Undefer and handle pending signals, yielding if
1283 	 * necessary:
1284 	 */
1285 	/* _thread_kern_sig_undefer(); */
1286 
1287 	_thread_critical_exit(pthread);
1288 	_SPINUNLOCK(&mutex->lock);
1289 }
1290 
1291 /*
1292  * Dequeue a waiting thread from the head of a mutex queue in descending
1293  * priority order. This funtion will return with the thread locked.
1294  */
1295 static inline pthread_t
1296 mutex_queue_deq(pthread_mutex_t mutex)
1297 {
1298 	pthread_t pthread;
1299 
1300 	while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1301 		_thread_critical_enter(pthread);
1302 		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1303 		pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1304 
1305 		/*
1306 		 * Only exit the loop if the thread hasn't been
1307 		 * cancelled.
1308 		 */
1309 		if ((pthread->cancelflags & PTHREAD_CANCELLING) == 0 &&
1310 		    pthread->state == PS_MUTEX_WAIT)
1311 			break;
1312 		else
1313 			_thread_critical_exit(pthread);
1314 	}
1315 
1316 	return (pthread);
1317 }
1318 
1319 /*
1320  * Remove a waiting thread from a mutex queue in descending priority order.
1321  */
1322 static inline void
1323 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1324 {
1325 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1326 		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1327 		pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1328 	}
1329 }
1330 
1331 /*
1332  * Enqueue a waiting thread to a queue in descending priority order.
1333  */
1334 static inline void
1335 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1336 {
1337 	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1338 	char *name;
1339 
1340 	name = pthread->name ? pthread->name : "unknown";
1341 	if ((pthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0)
1342 		_thread_printf(2, "Thread (%s:%u) already on condq\n",
1343 		    pthread->name, pthread->uniqueid);
1344 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0)
1345 		_thread_printf(2, "Thread (%s:%u) already on mutexq\n",
1346 		    pthread->name, pthread->uniqueid);
1347 	PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
1348 	/*
1349 	 * For the common case of all threads having equal priority,
1350 	 * we perform a quick check against the priority of the thread
1351 	 * at the tail of the queue.
1352 	 */
1353 	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1354 		TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1355 	else {
1356 		tid = TAILQ_FIRST(&mutex->m_queue);
1357 		while (pthread->active_priority <= tid->active_priority)
1358 			tid = TAILQ_NEXT(tid, sqe);
1359 		TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1360 	}
1361 	pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
1362 }
1363 
1364 /*
1365  * Returns with the lock owned and on the threads mutexq if
1366  * it is currently unowned. Returns 1, otherwise.
1367  */
1368 static int
1369 get_muncontested(pthread_mutex_t mutexp, int nonblock)
1370 {
1371 	if (mutexp->m_owner != NULL && mutexp->m_owner != curthread) {
1372 		return (-1);
1373 	} else if (mutexp->m_owner == curthread) {
1374 		if (nonblock)
1375 			return (mutex_self_trylock(mutexp));
1376 		else
1377 			return (mutex_self_lock(mutexp));
1378 	}
1379 
1380 	/*
1381 	 * The mutex belongs to this thread now. Mark it as
1382 	 * such. Add it to the list of mutexes owned by this
1383 	 * thread.
1384 	 */
1385 	mutexp->m_owner = curthread;
1386 	_MUTEX_ASSERT_NOT_OWNED(mutexp);
1387 	TAILQ_INSERT_TAIL(&curthread->mutexq, mutexp, m_qe);
1388 	return (0);
1389 }
1390 
1391 /*
1392  * Returns with the lock owned and on the thread's mutexq. If
1393  * the mutex is currently owned by another thread it will sleep
1394  * until it is available.
1395  */
1396 static void
1397 get_mcontested(pthread_mutex_t mutexp)
1398 {
1399 	_thread_critical_enter(curthread);
1400 
1401 	/*
1402 	 * Put this thread on the mutex's list of waiting threads.
1403 	 * The lock on the thread ensures atomic (as far as other
1404 	 * threads are concerned) setting of the thread state with
1405 	 * it's status on the mutex queue.
1406 	 */
1407 	do {
1408 		mutex_queue_enq(mutexp, curthread);
1409 		PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT);
1410 		curthread->data.mutex = mutexp;
1411 		_thread_critical_exit(curthread);
1412 		_SPINUNLOCK(&mutexp->lock);
1413 		_thread_suspend(curthread, NULL);
1414 
1415 		_SPINLOCK(&mutexp->lock);
1416 		_thread_critical_enter(curthread);
1417 	} while ((curthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0);
1418 
1419 	_thread_critical_exit(curthread);
1420 }
1421