xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision 6e1aa51e9e0951277be41e890df0725099ae38ae)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by John Birrell.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 #include <stdlib.h>
35 #include <errno.h>
36 #include <string.h>
37 #include <sys/param.h>
38 #include <sys/queue.h>
39 #include <pthread.h>
40 #include "thr_private.h"
41 
42 #if defined(_PTHREADS_INVARIANTS)
43 #define _MUTEX_INIT_LINK(m) 		do {		\
44 	(m)->m_qe.tqe_prev = NULL;			\
45 	(m)->m_qe.tqe_next = NULL;			\
46 } while (0)
47 #define _MUTEX_ASSERT_IS_OWNED(m)	do {		\
48 	if ((m)->m_qe.tqe_prev == NULL)			\
49 		PANIC("mutex is not on list");		\
50 } while (0)
51 #define _MUTEX_ASSERT_NOT_OWNED(m)	do {		\
52 	if (((m)->m_qe.tqe_prev != NULL) ||		\
53 	    ((m)->m_qe.tqe_next != NULL))		\
54 		PANIC("mutex is on list");		\
55 } while (0)
56 #else
57 #define _MUTEX_INIT_LINK(m)
58 #define _MUTEX_ASSERT_IS_OWNED(m)
59 #define _MUTEX_ASSERT_NOT_OWNED(m)
60 #endif
61 
62 /*
63  * Prototypes
64  */
65 static int		get_muncontested(pthread_mutex_t, int);
66 static void		get_mcontested(pthread_mutex_t);
67 static int		mutex_lock_common(pthread_mutex_t *, int);
68 static inline int	mutex_self_trylock(pthread_mutex_t);
69 static inline int	mutex_self_lock(pthread_mutex_t);
70 static inline int	mutex_unlock_common(pthread_mutex_t *, int);
71 static void		mutex_priority_adjust(pthread_mutex_t);
72 static void		mutex_rescan_owned (pthread_t, pthread_mutex_t);
73 static inline pthread_t	mutex_queue_deq(pthread_mutex_t);
74 static inline void	mutex_queue_remove(pthread_mutex_t, pthread_t);
75 static inline void	mutex_queue_enq(pthread_mutex_t, pthread_t);
76 
77 
78 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
79 
80 static struct pthread_mutex_attr	static_mutex_attr =
81     PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
82 static pthread_mutexattr_t		static_mattr = &static_mutex_attr;
83 
84 /* Single underscore versions provided for libc internal usage: */
85 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
86 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
87 
88 /* No difference between libc and application usage of these: */
89 __weak_reference(_pthread_mutex_init, pthread_mutex_init);
90 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
91 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
92 
93 
94 /*
95  * Reinitialize a private mutex; this is only used for internal mutexes.
96  */
97 int
98 _mutex_reinit(pthread_mutex_t * mutex)
99 {
100 	int	ret = 0;
101 
102 	if (mutex == NULL)
103 		ret = EINVAL;
104 	else if (*mutex == PTHREAD_MUTEX_INITIALIZER)
105 		ret = _pthread_mutex_init(mutex, NULL);
106 	else {
107 		/*
108 		 * Initialize the mutex structure:
109 		 */
110 		(*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
111 		(*mutex)->m_protocol = PTHREAD_PRIO_NONE;
112 		TAILQ_INIT(&(*mutex)->m_queue);
113 		(*mutex)->m_owner = NULL;
114 		(*mutex)->m_data.m_count = 0;
115 		(*mutex)->m_flags |= MUTEX_FLAGS_INITED | MUTEX_FLAGS_PRIVATE;
116 		(*mutex)->m_refcount = 0;
117 		(*mutex)->m_prio = 0;
118 		(*mutex)->m_saved_prio = 0;
119 		_MUTEX_INIT_LINK(*mutex);
120 		memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
121 	}
122 	return (ret);
123 }
124 
125 int
126 _pthread_mutex_init(pthread_mutex_t * mutex,
127 		   const pthread_mutexattr_t * mutex_attr)
128 {
129 	enum pthread_mutextype	type;
130 	int		protocol;
131 	int		ceiling;
132 	int		flags;
133 	pthread_mutex_t	pmutex;
134 	int		ret = 0;
135 
136 	if (mutex == NULL)
137 		ret = EINVAL;
138 
139 	/* Check if default mutex attributes: */
140 	if (mutex_attr == NULL || *mutex_attr == NULL) {
141 		/* Default to a (error checking) POSIX mutex: */
142 		type = PTHREAD_MUTEX_ERRORCHECK;
143 		protocol = PTHREAD_PRIO_NONE;
144 		ceiling = PTHREAD_MAX_PRIORITY;
145 		flags = 0;
146 	}
147 
148 	/* Check mutex type: */
149 	else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
150 	    ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
151 		/* Return an invalid argument error: */
152 		ret = EINVAL;
153 
154 	/* Check mutex protocol: */
155 	else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
156 	    ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
157 		/* Return an invalid argument error: */
158 		ret = EINVAL;
159 
160 	else {
161 		/* Use the requested mutex type and protocol: */
162 		type = (*mutex_attr)->m_type;
163 		protocol = (*mutex_attr)->m_protocol;
164 		ceiling = (*mutex_attr)->m_ceiling;
165 		flags = (*mutex_attr)->m_flags;
166 	}
167 
168 	/* Check no errors so far: */
169 	if (ret == 0) {
170 		if ((pmutex = (pthread_mutex_t)
171 		    malloc(sizeof(struct pthread_mutex))) == NULL)
172 			ret = ENOMEM;
173 		else {
174 			/* Set the mutex flags: */
175 			pmutex->m_flags = flags;
176 
177 			/* Process according to mutex type: */
178 			switch (type) {
179 			/* case PTHREAD_MUTEX_DEFAULT: */
180 			case PTHREAD_MUTEX_ERRORCHECK:
181 			case PTHREAD_MUTEX_NORMAL:
182 				/* Nothing to do here. */
183 				break;
184 
185 			/* Single UNIX Spec 2 recursive mutex: */
186 			case PTHREAD_MUTEX_RECURSIVE:
187 				/* Reset the mutex count: */
188 				pmutex->m_data.m_count = 0;
189 				break;
190 
191 			/* Trap invalid mutex types: */
192 			default:
193 				/* Return an invalid argument error: */
194 				ret = EINVAL;
195 				break;
196 			}
197 			if (ret == 0) {
198 				/* Initialise the rest of the mutex: */
199 				TAILQ_INIT(&pmutex->m_queue);
200 				pmutex->m_flags |= MUTEX_FLAGS_INITED;
201 				pmutex->m_owner = NULL;
202 				pmutex->m_type = type;
203 				pmutex->m_protocol = protocol;
204 				pmutex->m_refcount = 0;
205 				if (protocol == PTHREAD_PRIO_PROTECT)
206 					pmutex->m_prio = ceiling;
207 				else
208 					pmutex->m_prio = 0;
209 				pmutex->m_saved_prio = 0;
210 				_MUTEX_INIT_LINK(pmutex);
211 				memset(&pmutex->lock, 0, sizeof(pmutex->lock));
212 				*mutex = pmutex;
213 			} else {
214 				free(pmutex);
215 				*mutex = NULL;
216 			}
217 		}
218 	}
219 	/* Return the completion status: */
220 	return (ret);
221 }
222 
223 int
224 _pthread_mutex_destroy(pthread_mutex_t * mutex)
225 {
226 	int	ret = 0;
227 
228 	if (mutex == NULL || *mutex == NULL)
229 		ret = EINVAL;
230 	else {
231 		/* Lock the mutex structure: */
232 		_SPINLOCK(&(*mutex)->lock);
233 
234 		/*
235 		 * Check to see if this mutex is in use:
236 		 */
237 		if (((*mutex)->m_owner != NULL) ||
238 		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
239 		    ((*mutex)->m_refcount != 0)) {
240 			ret = EBUSY;
241 
242 			/* Unlock the mutex structure: */
243 			_SPINUNLOCK(&(*mutex)->lock);
244 		}
245 		else {
246 			/*
247 			 * Free the memory allocated for the mutex
248 			 * structure:
249 			 */
250 			_MUTEX_ASSERT_NOT_OWNED(*mutex);
251 
252 			/* Unlock the mutex structure: */
253 			_SPINUNLOCK(&(*mutex)->lock);
254 
255 			free(*mutex);
256 
257 			/*
258 			 * Leave the caller's pointer NULL now that
259 			 * the mutex has been destroyed:
260 			 */
261 			*mutex = NULL;
262 		}
263 	}
264 
265 	/* Return the completion status: */
266 	return (ret);
267 }
268 
269 static int
270 init_static(pthread_mutex_t *mutex)
271 {
272 	int error = 0;
273 	_SPINLOCK(&static_init_lock);
274 	if (*mutex == PTHREAD_MUTEX_INITIALIZER)
275 		error = _pthread_mutex_init(mutex, NULL);
276 	_SPINUNLOCK(&static_init_lock);
277 	return (error);
278 }
279 
280 static int
281 init_static_private(pthread_mutex_t *mutex)
282 {
283 	int error = 0;
284 	_SPINLOCK(&static_init_lock);
285 	if (*mutex == PTHREAD_MUTEX_INITIALIZER)
286 		error = _pthread_mutex_init(mutex, &static_mattr);
287 	_SPINUNLOCK(&static_init_lock);
288 	return (error);
289 }
290 
291 int
292 __pthread_mutex_trylock(pthread_mutex_t *mutex)
293 {
294 	int	ret = 0;
295 
296 	if (mutex == NULL)
297 		ret = EINVAL;
298 
299 	/*
300 	 * If the mutex is statically initialized, perform the dynamic
301 	 * initialization:
302 	 */
303 	else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
304 	    (ret = init_static(mutex)) == 0)
305 		ret = mutex_lock_common(mutex, 1);
306 
307 	return (ret);
308 }
309 
310 int
311 _pthread_mutex_trylock(pthread_mutex_t *mutex)
312 {
313 	int	ret = 0;
314 
315 	if (mutex == NULL)
316 		ret = EINVAL;
317 
318 	/*
319 	 * If the mutex is statically initialized, perform the dynamic
320 	 * initialization marking the mutex private (delete safe):
321 	 */
322 	else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
323 	    (ret = init_static_private(mutex)) == 0)
324 		ret = mutex_lock_common(mutex, 1);
325 
326 	return (ret);
327 }
328 
329 static int
330 mutex_lock_common(pthread_mutex_t * mutex, int nonblock)
331 {
332 	int ret, error, inCancel;
333 
334 	ret = error = inCancel = 0;
335 
336 	PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
337 	    "Uninitialized mutex in mutex_lock_common");
338 
339 	/*
340 	 * Enter a loop waiting to become the mutex owner.  We need a
341 	 * loop in case the waiting thread is interrupted by a signal
342 	 * to execute a signal handler.  It is not (currently) possible
343 	 * to remain in the waiting queue while running a handler.
344 	 * Instead, the thread is interrupted and backed out of the
345 	 * waiting queue prior to executing the signal handler.
346 	 */
347 	do {
348 		/*
349 		 * Defer signals to protect the scheduling queues from
350 		 * access by the signal handler:
351 		 */
352 		/* _thread_kern_sig_defer(); */
353 
354 		/* Lock the mutex structure: */
355 		_SPINLOCK(&(*mutex)->lock);
356 
357 		/*
358 		 * If the mutex was statically allocated, properly
359 		 * initialize the tail queue.
360 		 */
361 		if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
362 			TAILQ_INIT(&(*mutex)->m_queue);
363 			(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
364 			_MUTEX_INIT_LINK(*mutex);
365 		}
366 
367 		/* Process according to mutex type: */
368 		switch ((*mutex)->m_protocol) {
369 		/* Default POSIX mutex: */
370 		case PTHREAD_PRIO_NONE:
371 			if ((error = get_muncontested(*mutex, nonblock)) == -1)
372 				if (nonblock) {
373 					ret = EBUSY;
374 					break;
375 				} else {
376 					get_mcontested(*mutex);
377 				}
378 			else
379 				ret = error;
380 			break;
381 
382 		/* POSIX priority inheritence mutex: */
383 		case PTHREAD_PRIO_INHERIT:
384 			if ((error = get_muncontested(*mutex, nonblock)) == 0) {
385 				/* Track number of priority mutexes owned: */
386 				curthread->priority_mutex_count++;
387 
388 				/*
389 				 * The mutex takes on attributes of the
390 				 * running thread when there are no waiters.
391 				 */
392 				(*mutex)->m_prio = curthread->active_priority;
393 				(*mutex)->m_saved_prio =
394 				    curthread->inherited_priority;
395 				curthread->inherited_priority =
396 				    (*mutex)->m_prio;
397 			} else if (error == -1) {
398 				if (nonblock) {
399 					ret = EBUSY;
400 					break;
401 				} else {
402 					get_mcontested(*mutex);
403 				}
404 
405 				if (curthread->active_priority >
406 				    (*mutex)->m_prio)
407 					/* Adjust priorities: */
408 					mutex_priority_adjust(*mutex);
409 			} else {
410 				ret = error;
411 			}
412 			break;
413 
414 		/* POSIX priority protection mutex: */
415 		case PTHREAD_PRIO_PROTECT:
416 			/* Check for a priority ceiling violation: */
417 			if (curthread->active_priority > (*mutex)->m_prio)
418 				ret = EINVAL;
419 
420 			if ((error = get_muncontested(*mutex, nonblock)) == 0) {
421 				/* Track number of priority mutexes owned: */
422 				curthread->priority_mutex_count++;
423 
424 				/*
425 				 * The running thread inherits the ceiling
426 				 * priority of the mutex and executes at that
427 				 * priority:
428 				 */
429 				curthread->active_priority = (*mutex)->m_prio;
430 				(*mutex)->m_saved_prio =
431 				    curthread->inherited_priority;
432 				curthread->inherited_priority =
433 				    (*mutex)->m_prio;
434 			} else if (error == -1) {
435 				if (nonblock) {
436 					ret = EBUSY;
437 					break;
438 				}
439 
440 				/* Clear any previous error: */
441 				curthread->error = 0;
442 
443 				get_mcontested(*mutex);
444 
445 				/*
446 				 * The threads priority may have changed while
447 				 * waiting for the mutex causing a ceiling
448 				 * violation.
449 				 */
450 				ret = curthread->error;
451 				curthread->error = 0;
452 			} else {
453 				ret = error;
454 			}
455 			break;
456 
457 		/* Trap invalid mutex types: */
458 		default:
459 			/* Return an invalid argument error: */
460 			ret = EINVAL;
461 			break;
462 		}
463 
464 		/*
465 		 * Check to see if this thread was interrupted and
466 		 * is still in the mutex queue of waiting threads:
467 		 */
468 		if (curthread->cancelflags & PTHREAD_CANCELLING) {
469 			if (!nonblock)
470 				mutex_queue_remove(*mutex, curthread);
471 			inCancel=1;
472 		}
473 
474 		/* Unlock the mutex structure: */
475 		_SPINUNLOCK(&(*mutex)->lock);
476 
477 		/*
478 		 * Undefer and handle pending signals, yielding if
479 		 * necessary:
480 		 */
481 		/* _thread_kern_sig_undefer(); */
482 		if (inCancel) {
483 			pthread_testcancel();
484 			PANIC("Canceled thread came back.\n");
485 		}
486 	} while ((*mutex)->m_owner != curthread && ret == 0);
487 
488 	/* Return the completion status: */
489 	return (ret);
490 }
491 
492 int
493 __pthread_mutex_lock(pthread_mutex_t *mutex)
494 {
495 	int	ret = 0;
496 
497 	if (_thread_initial == NULL)
498 		_thread_init();
499 
500 	if (mutex == NULL)
501 		ret = EINVAL;
502 
503 	/*
504 	 * If the mutex is statically initialized, perform the dynamic
505 	 * initialization:
506 	 */
507 	else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
508 	    ((ret = init_static(mutex)) == 0))
509 		ret = mutex_lock_common(mutex, 0);
510 
511 	return (ret);
512 }
513 
514 int
515 _pthread_mutex_lock(pthread_mutex_t *mutex)
516 {
517 	int	ret = 0;
518 
519 	if (_thread_initial == NULL)
520 		_thread_init();
521 
522 	if (mutex == NULL)
523 		ret = EINVAL;
524 
525 	/*
526 	 * If the mutex is statically initialized, perform the dynamic
527 	 * initialization marking it private (delete safe):
528 	 */
529 	else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
530 	    ((ret = init_static_private(mutex)) == 0))
531 		ret = mutex_lock_common(mutex, 0);
532 
533 	return (ret);
534 }
535 
536 int
537 _pthread_mutex_unlock(pthread_mutex_t * mutex)
538 {
539 	return (mutex_unlock_common(mutex, /* add reference */ 0));
540 }
541 
542 int
543 _mutex_cv_unlock(pthread_mutex_t * mutex)
544 {
545 	return (mutex_unlock_common(mutex, /* add reference */ 1));
546 }
547 
548 int
549 _mutex_cv_lock(pthread_mutex_t * mutex)
550 {
551 	int	ret;
552 	if ((ret = _pthread_mutex_lock(mutex)) == 0)
553 		(*mutex)->m_refcount--;
554 	return (ret);
555 }
556 
557 static inline int
558 mutex_self_trylock(pthread_mutex_t mutex)
559 {
560 	int	ret = 0;
561 
562 	switch (mutex->m_type) {
563 
564 	/* case PTHREAD_MUTEX_DEFAULT: */
565 	case PTHREAD_MUTEX_ERRORCHECK:
566 	case PTHREAD_MUTEX_NORMAL:
567 		/*
568 		 * POSIX specifies that mutexes should return EDEADLK if a
569 		 * recursive lock is detected.
570 		 */
571 		ret = EBUSY;
572 		break;
573 
574 	case PTHREAD_MUTEX_RECURSIVE:
575 		/* Increment the lock count: */
576 		mutex->m_data.m_count++;
577 		break;
578 
579 	default:
580 		/* Trap invalid mutex types; */
581 		ret = EINVAL;
582 	}
583 
584 	return (ret);
585 }
586 
587 static inline int
588 mutex_self_lock(pthread_mutex_t mutex)
589 {
590 	int ret = 0;
591 
592 	switch (mutex->m_type) {
593 	/* case PTHREAD_MUTEX_DEFAULT: */
594 	case PTHREAD_MUTEX_ERRORCHECK:
595 		/*
596 		 * POSIX specifies that mutexes should return EDEADLK if a
597 		 * recursive lock is detected.
598 		 */
599 		ret = EDEADLK;
600 		break;
601 
602 	case PTHREAD_MUTEX_NORMAL:
603 		/*
604 		 * What SS2 define as a 'normal' mutex.  Intentionally
605 		 * deadlock on attempts to get a lock you already own.
606 		 */
607 		/* XXX Sched lock. */
608 		PTHREAD_SET_STATE(curthread, PS_DEADLOCK);
609 		_SPINUNLOCK(&(mutex)->lock);
610 		_thread_suspend(curthread, NULL);
611 		PANIC("Shouldn't resume here?\n");
612 		break;
613 
614 	case PTHREAD_MUTEX_RECURSIVE:
615 		/* Increment the lock count: */
616 		mutex->m_data.m_count++;
617 		break;
618 
619 	default:
620 		/* Trap invalid mutex types; */
621 		ret = EINVAL;
622 	}
623 
624 	return (ret);
625 }
626 
627 static inline int
628 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
629 {
630 	int	ret = 0;
631 
632 	if (mutex == NULL || *mutex == NULL) {
633 		ret = EINVAL;
634 	} else {
635 		/*
636 		 * Defer signals to protect the scheduling queues from
637 		 * access by the signal handler:
638 		 */
639 		/* _thread_kern_sig_defer(); */
640 
641 		/* Lock the mutex structure: */
642 		_SPINLOCK(&(*mutex)->lock);
643 
644 		/* Process according to mutex type: */
645 		switch ((*mutex)->m_protocol) {
646 		/* Default POSIX mutex: */
647 		case PTHREAD_PRIO_NONE:
648 			/*
649 			 * Check if the running thread is not the owner of the
650 			 * mutex:
651 			 */
652 			if ((*mutex)->m_owner != curthread) {
653 				/*
654 				 * Return an invalid argument error for no
655 				 * owner and a permission error otherwise:
656 				 */
657 				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
658 			}
659 			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
660 			    ((*mutex)->m_data.m_count > 0)) {
661 				/* Decrement the count: */
662 				(*mutex)->m_data.m_count--;
663 			} else {
664 				/*
665 				 * Clear the count in case this is recursive
666 				 * mutex.
667 				 */
668 				(*mutex)->m_data.m_count = 0;
669 
670 				/* Remove the mutex from the threads queue. */
671 				_MUTEX_ASSERT_IS_OWNED(*mutex);
672 				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
673 				    (*mutex), m_qe);
674 				_MUTEX_INIT_LINK(*mutex);
675 
676 				/*
677 				 * Get the next thread from the queue of
678 				 * threads waiting on the mutex. The deq
679 				 * function will have already locked it
680 				 * for us.
681 				 */
682 				if (((*mutex)->m_owner =
683 			  	    mutex_queue_deq(*mutex)) != NULL) {
684 					/* Make the new owner runnable: */
685 					/* XXXTHR sched lock. */
686 					PTHREAD_NEW_STATE((*mutex)->m_owner,
687 					    PS_RUNNING);
688 
689 					/*
690 					 * Add the mutex to the threads list of
691 					 * owned mutexes:
692 					 */
693 					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
694 					    (*mutex), m_qe);
695 
696 					/*
697 					 * The owner is no longer waiting for
698 					 * this mutex:
699 					 */
700 					(*mutex)->m_owner->data.mutex = NULL;
701 					_thread_critical_exit((*mutex)->m_owner);
702 				}
703 			}
704 			break;
705 
706 		/* POSIX priority inheritence mutex: */
707 		case PTHREAD_PRIO_INHERIT:
708 			/*
709 			 * Check if the running thread is not the owner of the
710 			 * mutex:
711 			 */
712 			if ((*mutex)->m_owner != curthread) {
713 				/*
714 				 * Return an invalid argument error for no
715 				 * owner and a permission error otherwise:
716 				 */
717 				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
718 			}
719 			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
720 			    ((*mutex)->m_data.m_count > 0)) {
721 				/* Decrement the count: */
722 				(*mutex)->m_data.m_count--;
723 			} else {
724 				/*
725 				 * Clear the count in case this is recursive
726 				 * mutex.
727 				 */
728 				(*mutex)->m_data.m_count = 0;
729 
730 				/*
731 				 * Restore the threads inherited priority and
732 				 * recompute the active priority (being careful
733 				 * not to override changes in the threads base
734 				 * priority subsequent to locking the mutex).
735 				 */
736 				curthread->inherited_priority =
737 					(*mutex)->m_saved_prio;
738 				curthread->active_priority =
739 				    MAX(curthread->inherited_priority,
740 				    curthread->base_priority);
741 
742 				/*
743 				 * This thread now owns one less priority mutex.
744 				 */
745 				curthread->priority_mutex_count--;
746 
747 				/* Remove the mutex from the threads queue. */
748 				_MUTEX_ASSERT_IS_OWNED(*mutex);
749 				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
750 				    (*mutex), m_qe);
751 				_MUTEX_INIT_LINK(*mutex);
752 
753 				/*
754 				 * Get the next thread from the queue of threads
755 				 * waiting on the mutex. It will already be
756 				 * locked for us.
757 				 */
758 				if (((*mutex)->m_owner =
759 				    mutex_queue_deq(*mutex)) == NULL)
760 					/* This mutex has no priority. */
761 					(*mutex)->m_prio = 0;
762 				else {
763 					/*
764 					 * Track number of priority mutexes owned:
765 					 */
766 					(*mutex)->m_owner->priority_mutex_count++;
767 
768 					/*
769 					 * Add the mutex to the threads list
770 					 * of owned mutexes:
771 					 */
772 					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
773 					    (*mutex), m_qe);
774 
775 					/*
776 					 * The owner is no longer waiting for
777 					 * this mutex:
778 					 */
779 					(*mutex)->m_owner->data.mutex = NULL;
780 
781 					/*
782 					 * Set the priority of the mutex.  Since
783 					 * our waiting threads are in descending
784 					 * priority order, the priority of the
785 					 * mutex becomes the active priority of
786 					 * the thread we just dequeued.
787 					 */
788 					(*mutex)->m_prio =
789 					    (*mutex)->m_owner->active_priority;
790 
791 					/*
792 					 * Save the owning threads inherited
793 					 * priority:
794 					 */
795 					(*mutex)->m_saved_prio =
796 						(*mutex)->m_owner->inherited_priority;
797 
798 					/*
799 					 * The owning threads inherited priority
800 					 * now becomes his active priority (the
801 					 * priority of the mutex).
802 					 */
803 					(*mutex)->m_owner->inherited_priority =
804 						(*mutex)->m_prio;
805 
806 					/*
807 					 * Make the new owner runnable:
808 					 */
809 					/* XXXTHR sched lock. */
810 					PTHREAD_NEW_STATE((*mutex)->m_owner,
811 					    PS_RUNNING);
812 
813 					_thread_critical_exit((*mutex)->m_owner);
814 				}
815 			}
816 			break;
817 
818 		/* POSIX priority ceiling mutex: */
819 		case PTHREAD_PRIO_PROTECT:
820 			/*
821 			 * Check if the running thread is not the owner of the
822 			 * mutex:
823 			 */
824 			if ((*mutex)->m_owner != curthread) {
825 				/*
826 				 * Return an invalid argument error for no
827 				 * owner and a permission error otherwise:
828 				 */
829 				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
830 			}
831 			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
832 			    ((*mutex)->m_data.m_count > 0)) {
833 				/* Decrement the count: */
834 				(*mutex)->m_data.m_count--;
835 			} else {
836 				/*
837 				 * Clear the count in case this is recursive
838 				 * mutex.
839 				 */
840 				(*mutex)->m_data.m_count = 0;
841 
842 				/*
843 				 * Restore the threads inherited priority and
844 				 * recompute the active priority (being careful
845 				 * not to override changes in the threads base
846 				 * priority subsequent to locking the mutex).
847 				 */
848 				curthread->inherited_priority =
849 					(*mutex)->m_saved_prio;
850 				curthread->active_priority =
851 				    MAX(curthread->inherited_priority,
852 				    curthread->base_priority);
853 
854 				/*
855 				 * This thread now owns one less priority mutex.
856 				 */
857 				curthread->priority_mutex_count--;
858 
859 				/* Remove the mutex from the threads queue. */
860 				_MUTEX_ASSERT_IS_OWNED(*mutex);
861 				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
862 				    (*mutex), m_qe);
863 				_MUTEX_INIT_LINK(*mutex);
864 
865 				/*
866 				 * Enter a loop to find a waiting thread whose
867 				 * active priority will not cause a ceiling
868 				 * violation. It will already be locked for us.
869 				 */
870 				while ((((*mutex)->m_owner =
871 				    mutex_queue_deq(*mutex)) != NULL) &&
872 				    ((*mutex)->m_owner->active_priority >
873 				     (*mutex)->m_prio)) {
874 					/*
875 					 * Either the mutex ceiling priority
876 					 * been lowered and/or this threads
877 					 * priority has been raised subsequent
878 					 * to this thread being queued on the
879 					 * waiting list.
880 					 */
881 					(*mutex)->m_owner->error = EINVAL;
882 					PTHREAD_NEW_STATE((*mutex)->m_owner,
883 					    PS_RUNNING);
884 					/*
885 					 * The thread is no longer waiting for
886 					 * this mutex:
887 					 */
888 					(*mutex)->m_owner->data.mutex = NULL;
889 
890 					_thread_critical_exit((*mutex)->m_owner);
891 				}
892 
893 				/* Check for a new owner: */
894 				if ((*mutex)->m_owner != NULL) {
895 					/*
896 					 * Track number of priority mutexes owned:
897 					 */
898 					(*mutex)->m_owner->priority_mutex_count++;
899 
900 					/*
901 					 * Add the mutex to the threads list
902 					 * of owned mutexes:
903 					 */
904 					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
905 					    (*mutex), m_qe);
906 
907 					/*
908 					 * The owner is no longer waiting for
909 					 * this mutex:
910 					 */
911 					(*mutex)->m_owner->data.mutex = NULL;
912 
913 					/*
914 					 * Save the owning threads inherited
915 					 * priority:
916 					 */
917 					(*mutex)->m_saved_prio =
918 						(*mutex)->m_owner->inherited_priority;
919 
920 					/*
921 					 * The owning thread inherits the
922 					 * ceiling priority of the mutex and
923 					 * executes at that priority:
924 					 */
925 					(*mutex)->m_owner->inherited_priority =
926 					    (*mutex)->m_prio;
927 					(*mutex)->m_owner->active_priority =
928 					    (*mutex)->m_prio;
929 
930 					/*
931 					 * Make the new owner runnable:
932 					 */
933 					/* XXXTHR sched lock. */
934 					PTHREAD_NEW_STATE((*mutex)->m_owner,
935 					    PS_RUNNING);
936 
937 					_thread_critical_exit((*mutex)->m_owner);
938 				}
939 			}
940 			break;
941 
942 		/* Trap invalid mutex types: */
943 		default:
944 			/* Return an invalid argument error: */
945 			ret = EINVAL;
946 			break;
947 		}
948 
949 		if ((ret == 0) && (add_reference != 0)) {
950 			/* Increment the reference count: */
951 			(*mutex)->m_refcount++;
952 		}
953 
954 		/* Unlock the mutex structure: */
955 		_SPINUNLOCK(&(*mutex)->lock);
956 
957 		/*
958 		 * Undefer and handle pending signals, yielding if
959 		 * necessary:
960 		 */
961 		/* _thread_kern_sig_undefer(); */
962 	}
963 
964 	/* Return the completion status: */
965 	return (ret);
966 }
967 
968 
969 /*
970  * This function is called when a change in base priority occurs for
971  * a thread that is holding or waiting for a priority protection or
972  * inheritence mutex.  A change in a threads base priority can effect
973  * changes to active priorities of other threads and to the ordering
974  * of mutex locking by waiting threads.
975  *
976  * This must be called while thread scheduling is deferred.
977  */
978 void
979 _mutex_notify_priochange(pthread_t pthread)
980 {
981 	/* Adjust the priorites of any owned priority mutexes: */
982 	if (pthread->priority_mutex_count > 0) {
983 		/*
984 		 * Rescan the mutexes owned by this thread and correct
985 		 * their priorities to account for this threads change
986 		 * in priority.  This has the side effect of changing
987 		 * the threads active priority.
988 		 */
989 		mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
990 	}
991 
992 	/*
993 	 * If this thread is waiting on a priority inheritence mutex,
994 	 * check for priority adjustments.  A change in priority can
995 	 * also effect a ceiling violation(*) for a thread waiting on
996 	 * a priority protection mutex; we don't perform the check here
997 	 * as it is done in pthread_mutex_unlock.
998 	 *
999 	 * (*) It should be noted that a priority change to a thread
1000 	 *     _after_ taking and owning a priority ceiling mutex
1001 	 *     does not affect ownership of that mutex; the ceiling
1002 	 *     priority is only checked before mutex ownership occurs.
1003 	 */
1004 	if (pthread->state == PS_MUTEX_WAIT) {
1005 		/* Lock the mutex structure: */
1006 		_SPINLOCK(&pthread->data.mutex->lock);
1007 
1008 		/*
1009 		 * Check to make sure this thread is still in the same state
1010 		 * (the spinlock above can yield the CPU to another thread):
1011 		 */
1012 		if (pthread->state == PS_MUTEX_WAIT) {
1013 			/*
1014 			 * Remove and reinsert this thread into the list of
1015 			 * waiting threads to preserve decreasing priority
1016 			 * order.
1017 			 */
1018 			mutex_queue_remove(pthread->data.mutex, pthread);
1019 			mutex_queue_enq(pthread->data.mutex, pthread);
1020 
1021 			if (pthread->data.mutex->m_protocol ==
1022 			     PTHREAD_PRIO_INHERIT) {
1023 				/* Adjust priorities: */
1024 				mutex_priority_adjust(pthread->data.mutex);
1025 			}
1026 		}
1027 
1028 		/* Unlock the mutex structure: */
1029 		_SPINUNLOCK(&pthread->data.mutex->lock);
1030 	}
1031 }
1032 
1033 /*
1034  * Called when a new thread is added to the mutex waiting queue or
1035  * when a threads priority changes that is already in the mutex
1036  * waiting queue.
1037  */
1038 static void
1039 mutex_priority_adjust(pthread_mutex_t mutex)
1040 {
1041 	pthread_t	pthread_next, pthread = mutex->m_owner;
1042 	int		temp_prio;
1043 	pthread_mutex_t	m = mutex;
1044 
1045 	/*
1046 	 * Calculate the mutex priority as the maximum of the highest
1047 	 * active priority of any waiting threads and the owning threads
1048 	 * active priority(*).
1049 	 *
1050 	 * (*) Because the owning threads current active priority may
1051 	 *     reflect priority inherited from this mutex (and the mutex
1052 	 *     priority may have changed) we must recalculate the active
1053 	 *     priority based on the threads saved inherited priority
1054 	 *     and its base priority.
1055 	 */
1056 	pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1057 	temp_prio = MAX(pthread_next->active_priority,
1058 	    MAX(m->m_saved_prio, pthread->base_priority));
1059 
1060 	/* See if this mutex really needs adjusting: */
1061 	if (temp_prio == m->m_prio)
1062 		/* No need to propagate the priority: */
1063 		return;
1064 
1065 	/* Set new priority of the mutex: */
1066 	m->m_prio = temp_prio;
1067 
1068 	while (m != NULL) {
1069 		/*
1070 		 * Save the threads priority before rescanning the
1071 		 * owned mutexes:
1072 		 */
1073 		temp_prio = pthread->active_priority;
1074 
1075 		/*
1076 		 * Fix the priorities for all the mutexes this thread has
1077 		 * locked since taking this mutex.  This also has a
1078 		 * potential side-effect of changing the threads priority.
1079 		 */
1080 		mutex_rescan_owned(pthread, m);
1081 
1082 		/*
1083 		 * If the thread is currently waiting on a mutex, check
1084 		 * to see if the threads new priority has affected the
1085 		 * priority of the mutex.
1086 		 */
1087 		if ((temp_prio != pthread->active_priority) &&
1088 		    (pthread->state == PS_MUTEX_WAIT) &&
1089 		    (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
1090 			/* Grab the mutex this thread is waiting on: */
1091 			m = pthread->data.mutex;
1092 
1093 			/*
1094 			 * The priority for this thread has changed.  Remove
1095 			 * and reinsert this thread into the list of waiting
1096 			 * threads to preserve decreasing priority order.
1097 			 */
1098 			mutex_queue_remove(m, pthread);
1099 			mutex_queue_enq(m, pthread);
1100 
1101 			/* Grab the waiting thread with highest priority: */
1102 			pthread_next = TAILQ_FIRST(&m->m_queue);
1103 
1104 			/*
1105 			 * Calculate the mutex priority as the maximum of the
1106 			 * highest active priority of any waiting threads and
1107 			 * the owning threads active priority.
1108 			 */
1109 			temp_prio = MAX(pthread_next->active_priority,
1110 			    MAX(m->m_saved_prio, m->m_owner->base_priority));
1111 
1112 			if (temp_prio != m->m_prio) {
1113 				/*
1114 				 * The priority needs to be propagated to the
1115 				 * mutex this thread is waiting on and up to
1116 				 * the owner of that mutex.
1117 				 */
1118 				m->m_prio = temp_prio;
1119 				pthread = m->m_owner;
1120 			}
1121 			else
1122 				/* We're done: */
1123 				m = NULL;
1124 
1125 		}
1126 		else
1127 			/* We're done: */
1128 			m = NULL;
1129 	}
1130 }
1131 
1132 static void
1133 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
1134 {
1135 	int		active_prio, inherited_prio;
1136 	pthread_mutex_t	m;
1137 	pthread_t	pthread_next;
1138 
1139 	/*
1140 	 * Start walking the mutexes the thread has taken since
1141 	 * taking this mutex.
1142 	 */
1143 	if (mutex == NULL) {
1144 		/*
1145 		 * A null mutex means start at the beginning of the owned
1146 		 * mutex list.
1147 		 */
1148 		m = TAILQ_FIRST(&pthread->mutexq);
1149 
1150 		/* There is no inherited priority yet. */
1151 		inherited_prio = 0;
1152 	}
1153 	else {
1154 		/*
1155 		 * The caller wants to start after a specific mutex.  It
1156 		 * is assumed that this mutex is a priority inheritence
1157 		 * mutex and that its priority has been correctly
1158 		 * calculated.
1159 		 */
1160 		m = TAILQ_NEXT(mutex, m_qe);
1161 
1162 		/* Start inheriting priority from the specified mutex. */
1163 		inherited_prio = mutex->m_prio;
1164 	}
1165 	active_prio = MAX(inherited_prio, pthread->base_priority);
1166 
1167 	while (m != NULL) {
1168 		/*
1169 		 * We only want to deal with priority inheritence
1170 		 * mutexes.  This might be optimized by only placing
1171 		 * priority inheritence mutexes into the owned mutex
1172 		 * list, but it may prove to be useful having all
1173 		 * owned mutexes in this list.  Consider a thread
1174 		 * exiting while holding mutexes...
1175 		 */
1176 		if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1177 			/*
1178 			 * Fix the owners saved (inherited) priority to
1179 			 * reflect the priority of the previous mutex.
1180 			 */
1181 			m->m_saved_prio = inherited_prio;
1182 
1183 			if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1184 				/* Recalculate the priority of the mutex: */
1185 				m->m_prio = MAX(active_prio,
1186 				     pthread_next->active_priority);
1187 			else
1188 				m->m_prio = active_prio;
1189 
1190 			/* Recalculate new inherited and active priorities: */
1191 			inherited_prio = m->m_prio;
1192 			active_prio = MAX(m->m_prio, pthread->base_priority);
1193 		}
1194 
1195 		/* Advance to the next mutex owned by this thread: */
1196 		m = TAILQ_NEXT(m, m_qe);
1197 	}
1198 
1199 	/*
1200 	 * Fix the threads inherited priority and recalculate its
1201 	 * active priority.
1202 	 */
1203 	pthread->inherited_priority = inherited_prio;
1204 	active_prio = MAX(inherited_prio, pthread->base_priority);
1205 
1206 	if (active_prio != pthread->active_priority) {
1207 #if 0
1208 		/*
1209 		 * If this thread is in the priority queue, it must be
1210 		 * removed and reinserted for its new priority.
1211 	 	 */
1212 		if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
1213 			/*
1214 			 * Remove the thread from the priority queue
1215 			 * before changing its priority:
1216 			 */
1217 			PTHREAD_PRIOQ_REMOVE(pthread);
1218 
1219 			/*
1220 			 * POSIX states that if the priority is being
1221 			 * lowered, the thread must be inserted at the
1222 			 * head of the queue for its priority if it owns
1223 			 * any priority protection or inheritence mutexes.
1224 			 */
1225 			if ((active_prio < pthread->active_priority) &&
1226 			    (pthread->priority_mutex_count > 0)) {
1227 				/* Set the new active priority. */
1228 				pthread->active_priority = active_prio;
1229 
1230 				PTHREAD_PRIOQ_INSERT_HEAD(pthread);
1231 			}
1232 			else {
1233 				/* Set the new active priority. */
1234 				pthread->active_priority = active_prio;
1235 
1236 				PTHREAD_PRIOQ_INSERT_TAIL(pthread);
1237 			}
1238 		}
1239 		else {
1240 			/* Set the new active priority. */
1241 			pthread->active_priority = active_prio;
1242 		}
1243 #endif
1244 		pthread->active_priority = active_prio;
1245 	}
1246 }
1247 
1248 void
1249 _mutex_unlock_private(pthread_t pthread)
1250 {
1251 	struct pthread_mutex	*m, *m_next;
1252 
1253 	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1254 		m_next = TAILQ_NEXT(m, m_qe);
1255 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1256 			_pthread_mutex_unlock(&m);
1257 	}
1258 }
1259 
1260 void
1261 _mutex_lock_backout(pthread_t pthread)
1262 {
1263 	struct pthread_mutex	*mutex;
1264 
1265 	/*
1266 	 * Defer signals to protect the scheduling queues from
1267 	 * access by the signal handler:
1268 	 */
1269 	/* _thread_kern_sig_defer();*/
1270 
1271 	/* XXX - Necessary to obey lock order */
1272 	_SPINLOCK(&pthread->lock);
1273 	mutex = pthread->data.mutex;
1274 	_SPINUNLOCK(&pthread->lock);
1275 
1276 	_SPINLOCK(&mutex->lock);
1277 
1278 	_thread_critical_enter(pthread);
1279 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1280 
1281 		mutex_queue_remove(mutex, pthread);
1282 
1283 		/* This thread is no longer waiting for the mutex: */
1284 		pthread->data.mutex = NULL;
1285 
1286 	}
1287 	/*
1288 	 * Undefer and handle pending signals, yielding if
1289 	 * necessary:
1290 	 */
1291 	/* _thread_kern_sig_undefer(); */
1292 
1293 	_thread_critical_exit(pthread);
1294 	_SPINUNLOCK(&mutex->lock);
1295 }
1296 
1297 /*
1298  * Dequeue a waiting thread from the head of a mutex queue in descending
1299  * priority order. This funtion will return with the thread locked.
1300  */
1301 static inline pthread_t
1302 mutex_queue_deq(pthread_mutex_t mutex)
1303 {
1304 	pthread_t pthread;
1305 
1306 	while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1307 		_thread_critical_enter(pthread);
1308 		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1309 		pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1310 
1311 		/*
1312 		 * Only exit the loop if the thread hasn't been
1313 		 * cancelled.
1314 		 */
1315 		if ((pthread->cancelflags & PTHREAD_CANCELLING) == 0 &&
1316 		    pthread->state == PS_MUTEX_WAIT)
1317 			break;
1318 		else
1319 			_thread_critical_exit(pthread);
1320 	}
1321 
1322 	return (pthread);
1323 }
1324 
1325 /*
1326  * Remove a waiting thread from a mutex queue in descending priority order.
1327  */
1328 static inline void
1329 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1330 {
1331 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1332 		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1333 		pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1334 	}
1335 }
1336 
1337 /*
1338  * Enqueue a waiting thread to a queue in descending priority order.
1339  */
1340 static inline void
1341 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1342 {
1343 	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1344 	char *name;
1345 
1346 	name = pthread->name ? pthread->name : "unknown";
1347 	if ((pthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0)
1348 		_thread_printf(2, "Thread (%s:%u) already on condq\n",
1349 		    pthread->name, pthread->uniqueid);
1350 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0)
1351 		_thread_printf(2, "Thread (%s:%u) already on mutexq\n",
1352 		    pthread->name, pthread->uniqueid);
1353 	PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
1354 	/*
1355 	 * For the common case of all threads having equal priority,
1356 	 * we perform a quick check against the priority of the thread
1357 	 * at the tail of the queue.
1358 	 */
1359 	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1360 		TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1361 	else {
1362 		tid = TAILQ_FIRST(&mutex->m_queue);
1363 		while (pthread->active_priority <= tid->active_priority)
1364 			tid = TAILQ_NEXT(tid, sqe);
1365 		TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1366 	}
1367 	pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
1368 }
1369 
1370 /*
1371  * Returns with the lock owned and on the threads mutexq if
1372  * it is currently unowned. Returns 1, otherwise.
1373  */
1374 static int
1375 get_muncontested(pthread_mutex_t mutexp, int nonblock)
1376 {
1377 	if (mutexp->m_owner != NULL && mutexp->m_owner != curthread) {
1378 		return (-1);
1379 	} else if (mutexp->m_owner == curthread) {
1380 		if (nonblock)
1381 			return (mutex_self_trylock(mutexp));
1382 		else
1383 			return (mutex_self_lock(mutexp));
1384 	}
1385 
1386 	/*
1387 	 * The mutex belongs to this thread now. Mark it as
1388 	 * such. Add it to the list of mutexes owned by this
1389 	 * thread.
1390 	 */
1391 	mutexp->m_owner = curthread;
1392 	_MUTEX_ASSERT_NOT_OWNED(mutexp);
1393 	TAILQ_INSERT_TAIL(&curthread->mutexq, mutexp, m_qe);
1394 	return (0);
1395 }
1396 
1397 /*
1398  * Returns with the lock owned and on the thread's mutexq. If
1399  * the mutex is currently owned by another thread it will sleep
1400  * until it is available.
1401  */
1402 static void
1403 get_mcontested(pthread_mutex_t mutexp)
1404 {
1405 	_thread_critical_enter(curthread);
1406 
1407 	/*
1408 	 * Put this thread on the mutex's list of waiting threads.
1409 	 * The lock on the thread ensures atomic (as far as other
1410 	 * threads are concerned) setting of the thread state with
1411 	 * it's status on the mutex queue.
1412 	 */
1413 	do {
1414 		mutex_queue_enq(mutexp, curthread);
1415 		PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT);
1416 		curthread->data.mutex = mutexp;
1417 		_thread_critical_exit(curthread);
1418 		_SPINUNLOCK(&mutexp->lock);
1419 		_thread_suspend(curthread, NULL);
1420 
1421 		_SPINLOCK(&mutexp->lock);
1422 		_thread_critical_enter(curthread);
1423 	} while ((curthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0);
1424 
1425 	_thread_critical_exit(curthread);
1426 }
1427