xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision 6439d4c2866f4d9822ddb43789af19976ebce620)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by John Birrell.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 #include <stdlib.h>
35 #include <errno.h>
36 #include <string.h>
37 #include <sys/param.h>
38 #include <sys/queue.h>
39 #include <pthread.h>
40 #include "thr_private.h"
41 
42 #if defined(_PTHREADS_INVARIANTS)
43 #define _MUTEX_INIT_LINK(m) 		do {		\
44 	(m)->m_qe.tqe_prev = NULL;			\
45 	(m)->m_qe.tqe_next = NULL;			\
46 } while (0)
47 #define _MUTEX_ASSERT_IS_OWNED(m)	do {		\
48 	if ((m)->m_qe.tqe_prev == NULL)			\
49 		PANIC("mutex is not on list");		\
50 } while (0)
51 #define _MUTEX_ASSERT_NOT_OWNED(m)	do {		\
52 	if (((m)->m_qe.tqe_prev != NULL) ||		\
53 	    ((m)->m_qe.tqe_next != NULL))		\
54 		PANIC("mutex is on list");		\
55 } while (0)
56 #else
57 #define _MUTEX_INIT_LINK(m)
58 #define _MUTEX_ASSERT_IS_OWNED(m)
59 #define _MUTEX_ASSERT_NOT_OWNED(m)
60 #endif
61 
62 /*
63  * Prototypes
64  */
65 static int		get_muncontested(pthread_mutex_t, int);
66 static void		get_mcontested(pthread_mutex_t);
67 static inline int	mutex_self_trylock(pthread_mutex_t);
68 static inline int	mutex_self_lock(pthread_mutex_t);
69 static inline int	mutex_unlock_common(pthread_mutex_t *, int);
70 static void		mutex_priority_adjust(pthread_mutex_t);
71 static void		mutex_rescan_owned (pthread_t, pthread_mutex_t);
72 static inline pthread_t	mutex_queue_deq(pthread_mutex_t);
73 static inline void	mutex_queue_remove(pthread_mutex_t, pthread_t);
74 static inline void	mutex_queue_enq(pthread_mutex_t, pthread_t);
75 
76 
77 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
78 
79 static struct pthread_mutex_attr	static_mutex_attr =
80     PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
81 static pthread_mutexattr_t		static_mattr = &static_mutex_attr;
82 
83 /* Single underscore versions provided for libc internal usage: */
84 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
85 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
86 
87 /* No difference between libc and application usage of these: */
88 __weak_reference(_pthread_mutex_init, pthread_mutex_init);
89 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
90 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
91 
92 
93 /*
94  * Reinitialize a private mutex; this is only used for internal mutexes.
95  */
96 int
97 _mutex_reinit(pthread_mutex_t * mutex)
98 {
99 	int	ret = 0;
100 
101 	if (mutex == NULL)
102 		ret = EINVAL;
103 	else if (*mutex == NULL)
104 		ret = _pthread_mutex_init(mutex, NULL);
105 	else {
106 		/*
107 		 * Initialize the mutex structure:
108 		 */
109 		(*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
110 		(*mutex)->m_protocol = PTHREAD_PRIO_NONE;
111 		TAILQ_INIT(&(*mutex)->m_queue);
112 		(*mutex)->m_owner = NULL;
113 		(*mutex)->m_data.m_count = 0;
114 		(*mutex)->m_flags |= MUTEX_FLAGS_INITED | MUTEX_FLAGS_PRIVATE;
115 		(*mutex)->m_refcount = 0;
116 		(*mutex)->m_prio = 0;
117 		(*mutex)->m_saved_prio = 0;
118 		_MUTEX_INIT_LINK(*mutex);
119 		memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
120 	}
121 	return (ret);
122 }
123 
124 int
125 _pthread_mutex_init(pthread_mutex_t * mutex,
126 		   const pthread_mutexattr_t * mutex_attr)
127 {
128 	enum pthread_mutextype	type;
129 	int		protocol;
130 	int		ceiling;
131 	int		flags;
132 	pthread_mutex_t	pmutex;
133 	int		ret = 0;
134 
135 	if (mutex == NULL)
136 		ret = EINVAL;
137 
138 	/* Check if default mutex attributes: */
139 	if (mutex_attr == NULL || *mutex_attr == NULL) {
140 		/* Default to a (error checking) POSIX mutex: */
141 		type = PTHREAD_MUTEX_ERRORCHECK;
142 		protocol = PTHREAD_PRIO_NONE;
143 		ceiling = PTHREAD_MAX_PRIORITY;
144 		flags = 0;
145 	}
146 
147 	/* Check mutex type: */
148 	else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
149 	    ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
150 		/* Return an invalid argument error: */
151 		ret = EINVAL;
152 
153 	/* Check mutex protocol: */
154 	else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
155 	    ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
156 		/* Return an invalid argument error: */
157 		ret = EINVAL;
158 
159 	else {
160 		/* Use the requested mutex type and protocol: */
161 		type = (*mutex_attr)->m_type;
162 		protocol = (*mutex_attr)->m_protocol;
163 		ceiling = (*mutex_attr)->m_ceiling;
164 		flags = (*mutex_attr)->m_flags;
165 	}
166 
167 	/* Check no errors so far: */
168 	if (ret == 0) {
169 		if ((pmutex = (pthread_mutex_t)
170 		    malloc(sizeof(struct pthread_mutex))) == NULL)
171 			ret = ENOMEM;
172 		else {
173 			/* Set the mutex flags: */
174 			pmutex->m_flags = flags;
175 
176 			/* Process according to mutex type: */
177 			switch (type) {
178 			/* case PTHREAD_MUTEX_DEFAULT: */
179 			case PTHREAD_MUTEX_ERRORCHECK:
180 			case PTHREAD_MUTEX_NORMAL:
181 				/* Nothing to do here. */
182 				break;
183 
184 			/* Single UNIX Spec 2 recursive mutex: */
185 			case PTHREAD_MUTEX_RECURSIVE:
186 				/* Reset the mutex count: */
187 				pmutex->m_data.m_count = 0;
188 				break;
189 
190 			/* Trap invalid mutex types: */
191 			default:
192 				/* Return an invalid argument error: */
193 				ret = EINVAL;
194 				break;
195 			}
196 			if (ret == 0) {
197 				/* Initialise the rest of the mutex: */
198 				TAILQ_INIT(&pmutex->m_queue);
199 				pmutex->m_flags |= MUTEX_FLAGS_INITED;
200 				pmutex->m_owner = NULL;
201 				pmutex->m_type = type;
202 				pmutex->m_protocol = protocol;
203 				pmutex->m_refcount = 0;
204 				if (protocol == PTHREAD_PRIO_PROTECT)
205 					pmutex->m_prio = ceiling;
206 				else
207 					pmutex->m_prio = 0;
208 				pmutex->m_saved_prio = 0;
209 				_MUTEX_INIT_LINK(pmutex);
210 				memset(&pmutex->lock, 0, sizeof(pmutex->lock));
211 				*mutex = pmutex;
212 			} else {
213 				free(pmutex);
214 				*mutex = NULL;
215 			}
216 		}
217 	}
218 	/* Return the completion status: */
219 	return (ret);
220 }
221 
222 int
223 _pthread_mutex_destroy(pthread_mutex_t * mutex)
224 {
225 	int	ret = 0;
226 
227 	if (mutex == NULL || *mutex == NULL)
228 		ret = EINVAL;
229 	else {
230 		/* Lock the mutex structure: */
231 		_SPINLOCK(&(*mutex)->lock);
232 
233 		/*
234 		 * Check to see if this mutex is in use:
235 		 */
236 		if (((*mutex)->m_owner != NULL) ||
237 		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
238 		    ((*mutex)->m_refcount != 0)) {
239 			ret = EBUSY;
240 
241 			/* Unlock the mutex structure: */
242 			_SPINUNLOCK(&(*mutex)->lock);
243 		}
244 		else {
245 			/*
246 			 * Free the memory allocated for the mutex
247 			 * structure:
248 			 */
249 			_MUTEX_ASSERT_NOT_OWNED(*mutex);
250 
251 			/* Unlock the mutex structure: */
252 			_SPINUNLOCK(&(*mutex)->lock);
253 
254 			free(*mutex);
255 
256 			/*
257 			 * Leave the caller's pointer NULL now that
258 			 * the mutex has been destroyed:
259 			 */
260 			*mutex = NULL;
261 		}
262 	}
263 
264 	/* Return the completion status: */
265 	return (ret);
266 }
267 
268 static int
269 init_static(pthread_mutex_t *mutex)
270 {
271 	int	ret;
272 
273 	_SPINLOCK(&static_init_lock);
274 
275 	if (*mutex == NULL)
276 		ret = _pthread_mutex_init(mutex, NULL);
277 	else
278 		ret = 0;
279 
280 	_SPINUNLOCK(&static_init_lock);
281 
282 	return (ret);
283 }
284 
285 static int
286 init_static_private(pthread_mutex_t *mutex)
287 {
288 	int	ret;
289 
290 	_SPINLOCK(&static_init_lock);
291 
292 	if (*mutex == NULL)
293 		ret = _pthread_mutex_init(mutex, &static_mattr);
294 	else
295 		ret = 0;
296 
297 	_SPINUNLOCK(&static_init_lock);
298 
299 	return (ret);
300 }
301 
302 int
303 __pthread_mutex_trylock(pthread_mutex_t *mutex)
304 {
305 	int	ret = 0;
306 
307 	if (mutex == NULL)
308 		ret = EINVAL;
309 
310 	/*
311 	 * If the mutex is statically initialized, perform the dynamic
312 	 * initialization:
313 	 */
314 	else if ((*mutex != NULL) || (ret = init_static(mutex)) == 0)
315 		ret = mutex_lock_common(mutex, 1);
316 
317 	return (ret);
318 }
319 
320 int
321 _pthread_mutex_trylock(pthread_mutex_t *mutex)
322 {
323 	int	ret = 0;
324 
325 	if (mutex == NULL)
326 		ret = EINVAL;
327 
328 	/*
329 	 * If the mutex is statically initialized, perform the dynamic
330 	 * initialization marking the mutex private (delete safe):
331 	 */
332 	else if ((*mutex != NULL) || (ret = init_static_private(mutex)) == 0)
333 		ret = mutex_lock_common(mutex, 1);
334 
335 	return (ret);
336 }
337 
338 static int
339 mutex_lock_common(pthread_mutex_t * mutex, int nonblock)
340 {
341 	int ret, error, inCancel;
342 
343 	ret = error = inCancel = 0;
344 
345 	PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
346 	    "Uninitialized mutex in mutex_lock_common");
347 
348 	/*
349 	 * Enter a loop waiting to become the mutex owner.  We need a
350 	 * loop in case the waiting thread is interrupted by a signal
351 	 * to execute a signal handler.  It is not (currently) possible
352 	 * to remain in the waiting queue while running a handler.
353 	 * Instead, the thread is interrupted and backed out of the
354 	 * waiting queue prior to executing the signal handler.
355 	 */
356 	do {
357 		/*
358 		 * Defer signals to protect the scheduling queues from
359 		 * access by the signal handler:
360 		 */
361 		/* _thread_kern_sig_defer(); */
362 
363 		/* Lock the mutex structure: */
364 		_SPINLOCK(&(*mutex)->lock);
365 
366 		/*
367 		 * If the mutex was statically allocated, properly
368 		 * initialize the tail queue.
369 		 */
370 		if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
371 			TAILQ_INIT(&(*mutex)->m_queue);
372 			(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
373 			_MUTEX_INIT_LINK(*mutex);
374 		}
375 
376 		/* Process according to mutex type: */
377 		switch ((*mutex)->m_protocol) {
378 		/* Default POSIX mutex: */
379 		case PTHREAD_PRIO_NONE:
380 			if ((error = get_muncontested(*mutex, nonblock)) == -1)
381 				if (nonblock) {
382 					ret = EBUSY;
383 					break;
384 				} else {
385 					get_mcontested(*mutex);
386 				}
387 			else
388 				ret = error;
389 			break;
390 
391 		/* POSIX priority inheritence mutex: */
392 		case PTHREAD_PRIO_INHERIT:
393 			if ((error = get_muncontested(*mutex, nonblock)) == 0) {
394 				/* Track number of priority mutexes owned: */
395 				curthread->priority_mutex_count++;
396 
397 				/*
398 				 * The mutex takes on attributes of the
399 				 * running thread when there are no waiters.
400 				 */
401 				(*mutex)->m_prio = curthread->active_priority;
402 				(*mutex)->m_saved_prio =
403 				    curthread->inherited_priority;
404 				curthread->inherited_priority =
405 				    (*mutex)->m_prio;
406 			} else if (error == -1) {
407 				if (nonblock) {
408 					ret = EBUSY;
409 					break;
410 				} else {
411 					get_mcontested(*mutex);
412 				}
413 
414 				if (curthread->active_priority >
415 				    (*mutex)->m_prio)
416 					/* Adjust priorities: */
417 					mutex_priority_adjust(*mutex);
418 			} else {
419 				ret = error;
420 			}
421 			break;
422 
423 		/* POSIX priority protection mutex: */
424 		case PTHREAD_PRIO_PROTECT:
425 			/* Check for a priority ceiling violation: */
426 			if (curthread->active_priority > (*mutex)->m_prio)
427 				ret = EINVAL;
428 
429 			if ((error = get_muncontested(*mutex, nonblock)) == 0) {
430 				/* Track number of priority mutexes owned: */
431 				curthread->priority_mutex_count++;
432 
433 				/*
434 				 * The running thread inherits the ceiling
435 				 * priority of the mutex and executes at that
436 				 * priority:
437 				 */
438 				curthread->active_priority = (*mutex)->m_prio;
439 				(*mutex)->m_saved_prio =
440 				    curthread->inherited_priority;
441 				curthread->inherited_priority =
442 				    (*mutex)->m_prio;
443 			} else if (error == -1) {
444 				if (nonblock) {
445 					ret = EBUSY;
446 					break;
447 				}
448 
449 				/* Clear any previous error: */
450 				curthread->error = 0;
451 
452 				get_mcontested(*mutex);
453 
454 				/*
455 				 * The threads priority may have changed while
456 				 * waiting for the mutex causing a ceiling
457 				 * violation.
458 				 */
459 				ret = curthread->error;
460 				curthread->error = 0;
461 			} else {
462 				ret = error;
463 			}
464 			break;
465 
466 		/* Trap invalid mutex types: */
467 		default:
468 			/* Return an invalid argument error: */
469 			ret = EINVAL;
470 			break;
471 		}
472 
473 		/*
474 		 * Check to see if this thread was interrupted and
475 		 * is still in the mutex queue of waiting threads:
476 		 */
477 		if (curthread->cancelflags & PTHREAD_CANCELLING) {
478 			if (!nonblock)
479 				mutex_queue_remove(*mutex, curthread);
480 			inCancel=1;
481 		}
482 
483 		/* Unlock the mutex structure: */
484 		_SPINUNLOCK(&(*mutex)->lock);
485 
486 		/*
487 		 * Undefer and handle pending signals, yielding if
488 		 * necessary:
489 		 */
490 		/* _thread_kern_sig_undefer(); */
491 		if (inCancel) {
492 			pthread_testcancel();
493 			PANIC("Canceled thread came back.\n");
494 		}
495 	} while ((*mutex)->m_owner != curthread && ret == 0);
496 
497 	/* Return the completion status: */
498 	return (ret);
499 }
500 
501 int
502 __pthread_mutex_lock(pthread_mutex_t *mutex)
503 {
504 	int	ret = 0;
505 
506 	if (_thread_initial == NULL)
507 		_thread_init();
508 
509 	if (mutex == NULL)
510 		ret = EINVAL;
511 
512 	/*
513 	 * If the mutex is statically initialized, perform the dynamic
514 	 * initialization:
515 	 */
516 	else if ((*mutex != NULL) || ((ret = init_static(mutex)) == 0))
517 		ret = mutex_lock_common(mutex, 0);
518 
519 	return (ret);
520 }
521 
522 int
523 _pthread_mutex_lock(pthread_mutex_t *mutex)
524 {
525 	int	ret = 0;
526 
527 	if (_thread_initial == NULL)
528 		_thread_init();
529 
530 	if (mutex == NULL)
531 		ret = EINVAL;
532 
533 	/*
534 	 * If the mutex is statically initialized, perform the dynamic
535 	 * initialization marking it private (delete safe):
536 	 */
537 	else if ((*mutex != NULL) || ((ret = init_static_private(mutex)) == 0))
538 		ret = mutex_lock_common(mutex, 0);
539 
540 	return (ret);
541 }
542 
543 int
544 _pthread_mutex_unlock(pthread_mutex_t * mutex)
545 {
546 	return (mutex_unlock_common(mutex, /* add reference */ 0));
547 }
548 
549 int
550 _mutex_cv_unlock(pthread_mutex_t * mutex)
551 {
552 	return (mutex_unlock_common(mutex, /* add reference */ 1));
553 }
554 
555 int
556 _mutex_cv_lock(pthread_mutex_t * mutex)
557 {
558 	int	ret;
559 	if ((ret = _pthread_mutex_lock(mutex)) == 0)
560 		(*mutex)->m_refcount--;
561 	return (ret);
562 }
563 
564 static inline int
565 mutex_self_trylock(pthread_mutex_t mutex)
566 {
567 	int	ret = 0;
568 
569 	switch (mutex->m_type) {
570 
571 	/* case PTHREAD_MUTEX_DEFAULT: */
572 	case PTHREAD_MUTEX_ERRORCHECK:
573 	case PTHREAD_MUTEX_NORMAL:
574 		/*
575 		 * POSIX specifies that mutexes should return EDEADLK if a
576 		 * recursive lock is detected.
577 		 */
578 		ret = EBUSY;
579 		break;
580 
581 	case PTHREAD_MUTEX_RECURSIVE:
582 		/* Increment the lock count: */
583 		mutex->m_data.m_count++;
584 		break;
585 
586 	default:
587 		/* Trap invalid mutex types; */
588 		ret = EINVAL;
589 	}
590 
591 	return (ret);
592 }
593 
594 static inline int
595 mutex_self_lock(pthread_mutex_t mutex)
596 {
597 	int ret = 0;
598 
599 	switch (mutex->m_type) {
600 	/* case PTHREAD_MUTEX_DEFAULT: */
601 	case PTHREAD_MUTEX_ERRORCHECK:
602 		/*
603 		 * POSIX specifies that mutexes should return EDEADLK if a
604 		 * recursive lock is detected.
605 		 */
606 		ret = EDEADLK;
607 		break;
608 
609 	case PTHREAD_MUTEX_NORMAL:
610 		/*
611 		 * What SS2 define as a 'normal' mutex.  Intentionally
612 		 * deadlock on attempts to get a lock you already own.
613 		 */
614 		/* XXX Sched lock. */
615 		PTHREAD_SET_STATE(curthread, PS_DEADLOCK);
616 		_SPINUNLOCK(&(mutex)->lock);
617 		_thread_suspend(curthread, NULL);
618 		PANIC("Shouldn't resume here?\n");
619 		break;
620 
621 	case PTHREAD_MUTEX_RECURSIVE:
622 		/* Increment the lock count: */
623 		mutex->m_data.m_count++;
624 		break;
625 
626 	default:
627 		/* Trap invalid mutex types; */
628 		ret = EINVAL;
629 	}
630 
631 	return (ret);
632 }
633 
634 static inline int
635 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
636 {
637 	int	ret = 0;
638 
639 	if (mutex == NULL || *mutex == NULL) {
640 		ret = EINVAL;
641 	} else {
642 		/*
643 		 * Defer signals to protect the scheduling queues from
644 		 * access by the signal handler:
645 		 */
646 		/* _thread_kern_sig_defer(); */
647 
648 		/* Lock the mutex structure: */
649 		_SPINLOCK(&(*mutex)->lock);
650 
651 		/* Process according to mutex type: */
652 		switch ((*mutex)->m_protocol) {
653 		/* Default POSIX mutex: */
654 		case PTHREAD_PRIO_NONE:
655 			/*
656 			 * Check if the running thread is not the owner of the
657 			 * mutex:
658 			 */
659 			if ((*mutex)->m_owner != curthread) {
660 				/*
661 				 * Return an invalid argument error for no
662 				 * owner and a permission error otherwise:
663 				 */
664 				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
665 			}
666 			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
667 			    ((*mutex)->m_data.m_count > 0)) {
668 				/* Decrement the count: */
669 				(*mutex)->m_data.m_count--;
670 			} else {
671 				/*
672 				 * Clear the count in case this is recursive
673 				 * mutex.
674 				 */
675 				(*mutex)->m_data.m_count = 0;
676 
677 				/* Remove the mutex from the threads queue. */
678 				_MUTEX_ASSERT_IS_OWNED(*mutex);
679 				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
680 				    (*mutex), m_qe);
681 				_MUTEX_INIT_LINK(*mutex);
682 
683 				/*
684 				 * Get the next thread from the queue of
685 				 * threads waiting on the mutex. The deq
686 				 * function will have already locked it
687 				 * for us.
688 				 */
689 				if (((*mutex)->m_owner =
690 			  	    mutex_queue_deq(*mutex)) != NULL) {
691 					/* Make the new owner runnable: */
692 					/* XXXTHR sched lock. */
693 					PTHREAD_NEW_STATE((*mutex)->m_owner,
694 					    PS_RUNNING);
695 
696 					/*
697 					 * Add the mutex to the threads list of
698 					 * owned mutexes:
699 					 */
700 					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
701 					    (*mutex), m_qe);
702 
703 					/*
704 					 * The owner is no longer waiting for
705 					 * this mutex:
706 					 */
707 					(*mutex)->m_owner->data.mutex = NULL;
708 					_thread_critical_exit((*mutex)->m_owner);
709 				}
710 			}
711 			break;
712 
713 		/* POSIX priority inheritence mutex: */
714 		case PTHREAD_PRIO_INHERIT:
715 			/*
716 			 * Check if the running thread is not the owner of the
717 			 * mutex:
718 			 */
719 			if ((*mutex)->m_owner != curthread) {
720 				/*
721 				 * Return an invalid argument error for no
722 				 * owner and a permission error otherwise:
723 				 */
724 				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
725 			}
726 			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
727 			    ((*mutex)->m_data.m_count > 0)) {
728 				/* Decrement the count: */
729 				(*mutex)->m_data.m_count--;
730 			} else {
731 				/*
732 				 * Clear the count in case this is recursive
733 				 * mutex.
734 				 */
735 				(*mutex)->m_data.m_count = 0;
736 
737 				/*
738 				 * Restore the threads inherited priority and
739 				 * recompute the active priority (being careful
740 				 * not to override changes in the threads base
741 				 * priority subsequent to locking the mutex).
742 				 */
743 				curthread->inherited_priority =
744 					(*mutex)->m_saved_prio;
745 				curthread->active_priority =
746 				    MAX(curthread->inherited_priority,
747 				    curthread->base_priority);
748 
749 				/*
750 				 * This thread now owns one less priority mutex.
751 				 */
752 				curthread->priority_mutex_count--;
753 
754 				/* Remove the mutex from the threads queue. */
755 				_MUTEX_ASSERT_IS_OWNED(*mutex);
756 				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
757 				    (*mutex), m_qe);
758 				_MUTEX_INIT_LINK(*mutex);
759 
760 				/*
761 				 * Get the next thread from the queue of threads
762 				 * waiting on the mutex. It will already be
763 				 * locked for us.
764 				 */
765 				if (((*mutex)->m_owner =
766 				    mutex_queue_deq(*mutex)) == NULL)
767 					/* This mutex has no priority. */
768 					(*mutex)->m_prio = 0;
769 				else {
770 					/*
771 					 * Track number of priority mutexes owned:
772 					 */
773 					(*mutex)->m_owner->priority_mutex_count++;
774 
775 					/*
776 					 * Add the mutex to the threads list
777 					 * of owned mutexes:
778 					 */
779 					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
780 					    (*mutex), m_qe);
781 
782 					/*
783 					 * The owner is no longer waiting for
784 					 * this mutex:
785 					 */
786 					(*mutex)->m_owner->data.mutex = NULL;
787 
788 					/*
789 					 * Set the priority of the mutex.  Since
790 					 * our waiting threads are in descending
791 					 * priority order, the priority of the
792 					 * mutex becomes the active priority of
793 					 * the thread we just dequeued.
794 					 */
795 					(*mutex)->m_prio =
796 					    (*mutex)->m_owner->active_priority;
797 
798 					/*
799 					 * Save the owning threads inherited
800 					 * priority:
801 					 */
802 					(*mutex)->m_saved_prio =
803 						(*mutex)->m_owner->inherited_priority;
804 
805 					/*
806 					 * The owning threads inherited priority
807 					 * now becomes his active priority (the
808 					 * priority of the mutex).
809 					 */
810 					(*mutex)->m_owner->inherited_priority =
811 						(*mutex)->m_prio;
812 
813 					/*
814 					 * Make the new owner runnable:
815 					 */
816 					/* XXXTHR sched lock. */
817 					PTHREAD_NEW_STATE((*mutex)->m_owner,
818 					    PS_RUNNING);
819 
820 					_thread_critical_exit((*mutex)->m_owner);
821 				}
822 			}
823 			break;
824 
825 		/* POSIX priority ceiling mutex: */
826 		case PTHREAD_PRIO_PROTECT:
827 			/*
828 			 * Check if the running thread is not the owner of the
829 			 * mutex:
830 			 */
831 			if ((*mutex)->m_owner != curthread) {
832 				/*
833 				 * Return an invalid argument error for no
834 				 * owner and a permission error otherwise:
835 				 */
836 				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
837 			}
838 			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
839 			    ((*mutex)->m_data.m_count > 0)) {
840 				/* Decrement the count: */
841 				(*mutex)->m_data.m_count--;
842 			} else {
843 				/*
844 				 * Clear the count in case this is recursive
845 				 * mutex.
846 				 */
847 				(*mutex)->m_data.m_count = 0;
848 
849 				/*
850 				 * Restore the threads inherited priority and
851 				 * recompute the active priority (being careful
852 				 * not to override changes in the threads base
853 				 * priority subsequent to locking the mutex).
854 				 */
855 				curthread->inherited_priority =
856 					(*mutex)->m_saved_prio;
857 				curthread->active_priority =
858 				    MAX(curthread->inherited_priority,
859 				    curthread->base_priority);
860 
861 				/*
862 				 * This thread now owns one less priority mutex.
863 				 */
864 				curthread->priority_mutex_count--;
865 
866 				/* Remove the mutex from the threads queue. */
867 				_MUTEX_ASSERT_IS_OWNED(*mutex);
868 				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
869 				    (*mutex), m_qe);
870 				_MUTEX_INIT_LINK(*mutex);
871 
872 				/*
873 				 * Enter a loop to find a waiting thread whose
874 				 * active priority will not cause a ceiling
875 				 * violation. It will already be locked for us.
876 				 */
877 				while ((((*mutex)->m_owner =
878 				    mutex_queue_deq(*mutex)) != NULL) &&
879 				    ((*mutex)->m_owner->active_priority >
880 				     (*mutex)->m_prio)) {
881 					/*
882 					 * Either the mutex ceiling priority
883 					 * been lowered and/or this threads
884 					 * priority has been raised subsequent
885 					 * to this thread being queued on the
886 					 * waiting list.
887 					 */
888 					(*mutex)->m_owner->error = EINVAL;
889 					PTHREAD_NEW_STATE((*mutex)->m_owner,
890 					    PS_RUNNING);
891 					/*
892 					 * The thread is no longer waiting for
893 					 * this mutex:
894 					 */
895 					(*mutex)->m_owner->data.mutex = NULL;
896 
897 					_thread_critical_exit((*mutex)->m_owner);
898 				}
899 
900 				/* Check for a new owner: */
901 				if ((*mutex)->m_owner != NULL) {
902 					/*
903 					 * Track number of priority mutexes owned:
904 					 */
905 					(*mutex)->m_owner->priority_mutex_count++;
906 
907 					/*
908 					 * Add the mutex to the threads list
909 					 * of owned mutexes:
910 					 */
911 					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
912 					    (*mutex), m_qe);
913 
914 					/*
915 					 * The owner is no longer waiting for
916 					 * this mutex:
917 					 */
918 					(*mutex)->m_owner->data.mutex = NULL;
919 
920 					/*
921 					 * Save the owning threads inherited
922 					 * priority:
923 					 */
924 					(*mutex)->m_saved_prio =
925 						(*mutex)->m_owner->inherited_priority;
926 
927 					/*
928 					 * The owning thread inherits the
929 					 * ceiling priority of the mutex and
930 					 * executes at that priority:
931 					 */
932 					(*mutex)->m_owner->inherited_priority =
933 					    (*mutex)->m_prio;
934 					(*mutex)->m_owner->active_priority =
935 					    (*mutex)->m_prio;
936 
937 					/*
938 					 * Make the new owner runnable:
939 					 */
940 					/* XXXTHR sched lock. */
941 					PTHREAD_NEW_STATE((*mutex)->m_owner,
942 					    PS_RUNNING);
943 
944 					_thread_critical_exit((*mutex)->m_owner);
945 				}
946 			}
947 			break;
948 
949 		/* Trap invalid mutex types: */
950 		default:
951 			/* Return an invalid argument error: */
952 			ret = EINVAL;
953 			break;
954 		}
955 
956 		if ((ret == 0) && (add_reference != 0)) {
957 			/* Increment the reference count: */
958 			(*mutex)->m_refcount++;
959 		}
960 
961 		/* Unlock the mutex structure: */
962 		_SPINUNLOCK(&(*mutex)->lock);
963 
964 		/*
965 		 * Undefer and handle pending signals, yielding if
966 		 * necessary:
967 		 */
968 		/* _thread_kern_sig_undefer(); */
969 	}
970 
971 	/* Return the completion status: */
972 	return (ret);
973 }
974 
975 
976 /*
977  * This function is called when a change in base priority occurs for
978  * a thread that is holding or waiting for a priority protection or
979  * inheritence mutex.  A change in a threads base priority can effect
980  * changes to active priorities of other threads and to the ordering
981  * of mutex locking by waiting threads.
982  *
983  * This must be called while thread scheduling is deferred.
984  */
985 void
986 _mutex_notify_priochange(pthread_t pthread)
987 {
988 	/* Adjust the priorites of any owned priority mutexes: */
989 	if (pthread->priority_mutex_count > 0) {
990 		/*
991 		 * Rescan the mutexes owned by this thread and correct
992 		 * their priorities to account for this threads change
993 		 * in priority.  This has the side effect of changing
994 		 * the threads active priority.
995 		 */
996 		mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
997 	}
998 
999 	/*
1000 	 * If this thread is waiting on a priority inheritence mutex,
1001 	 * check for priority adjustments.  A change in priority can
1002 	 * also effect a ceiling violation(*) for a thread waiting on
1003 	 * a priority protection mutex; we don't perform the check here
1004 	 * as it is done in pthread_mutex_unlock.
1005 	 *
1006 	 * (*) It should be noted that a priority change to a thread
1007 	 *     _after_ taking and owning a priority ceiling mutex
1008 	 *     does not affect ownership of that mutex; the ceiling
1009 	 *     priority is only checked before mutex ownership occurs.
1010 	 */
1011 	if (pthread->state == PS_MUTEX_WAIT) {
1012 		/* Lock the mutex structure: */
1013 		_SPINLOCK(&pthread->data.mutex->lock);
1014 
1015 		/*
1016 		 * Check to make sure this thread is still in the same state
1017 		 * (the spinlock above can yield the CPU to another thread):
1018 		 */
1019 		if (pthread->state == PS_MUTEX_WAIT) {
1020 			/*
1021 			 * Remove and reinsert this thread into the list of
1022 			 * waiting threads to preserve decreasing priority
1023 			 * order.
1024 			 */
1025 			mutex_queue_remove(pthread->data.mutex, pthread);
1026 			mutex_queue_enq(pthread->data.mutex, pthread);
1027 
1028 			if (pthread->data.mutex->m_protocol ==
1029 			     PTHREAD_PRIO_INHERIT) {
1030 				/* Adjust priorities: */
1031 				mutex_priority_adjust(pthread->data.mutex);
1032 			}
1033 		}
1034 
1035 		/* Unlock the mutex structure: */
1036 		_SPINUNLOCK(&pthread->data.mutex->lock);
1037 	}
1038 }
1039 
1040 /*
1041  * Called when a new thread is added to the mutex waiting queue or
1042  * when a threads priority changes that is already in the mutex
1043  * waiting queue.
1044  */
1045 static void
1046 mutex_priority_adjust(pthread_mutex_t mutex)
1047 {
1048 	pthread_t	pthread_next, pthread = mutex->m_owner;
1049 	int		temp_prio;
1050 	pthread_mutex_t	m = mutex;
1051 
1052 	/*
1053 	 * Calculate the mutex priority as the maximum of the highest
1054 	 * active priority of any waiting threads and the owning threads
1055 	 * active priority(*).
1056 	 *
1057 	 * (*) Because the owning threads current active priority may
1058 	 *     reflect priority inherited from this mutex (and the mutex
1059 	 *     priority may have changed) we must recalculate the active
1060 	 *     priority based on the threads saved inherited priority
1061 	 *     and its base priority.
1062 	 */
1063 	pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1064 	temp_prio = MAX(pthread_next->active_priority,
1065 	    MAX(m->m_saved_prio, pthread->base_priority));
1066 
1067 	/* See if this mutex really needs adjusting: */
1068 	if (temp_prio == m->m_prio)
1069 		/* No need to propagate the priority: */
1070 		return;
1071 
1072 	/* Set new priority of the mutex: */
1073 	m->m_prio = temp_prio;
1074 
1075 	while (m != NULL) {
1076 		/*
1077 		 * Save the threads priority before rescanning the
1078 		 * owned mutexes:
1079 		 */
1080 		temp_prio = pthread->active_priority;
1081 
1082 		/*
1083 		 * Fix the priorities for all the mutexes this thread has
1084 		 * locked since taking this mutex.  This also has a
1085 		 * potential side-effect of changing the threads priority.
1086 		 */
1087 		mutex_rescan_owned(pthread, m);
1088 
1089 		/*
1090 		 * If the thread is currently waiting on a mutex, check
1091 		 * to see if the threads new priority has affected the
1092 		 * priority of the mutex.
1093 		 */
1094 		if ((temp_prio != pthread->active_priority) &&
1095 		    (pthread->state == PS_MUTEX_WAIT) &&
1096 		    (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
1097 			/* Grab the mutex this thread is waiting on: */
1098 			m = pthread->data.mutex;
1099 
1100 			/*
1101 			 * The priority for this thread has changed.  Remove
1102 			 * and reinsert this thread into the list of waiting
1103 			 * threads to preserve decreasing priority order.
1104 			 */
1105 			mutex_queue_remove(m, pthread);
1106 			mutex_queue_enq(m, pthread);
1107 
1108 			/* Grab the waiting thread with highest priority: */
1109 			pthread_next = TAILQ_FIRST(&m->m_queue);
1110 
1111 			/*
1112 			 * Calculate the mutex priority as the maximum of the
1113 			 * highest active priority of any waiting threads and
1114 			 * the owning threads active priority.
1115 			 */
1116 			temp_prio = MAX(pthread_next->active_priority,
1117 			    MAX(m->m_saved_prio, m->m_owner->base_priority));
1118 
1119 			if (temp_prio != m->m_prio) {
1120 				/*
1121 				 * The priority needs to be propagated to the
1122 				 * mutex this thread is waiting on and up to
1123 				 * the owner of that mutex.
1124 				 */
1125 				m->m_prio = temp_prio;
1126 				pthread = m->m_owner;
1127 			}
1128 			else
1129 				/* We're done: */
1130 				m = NULL;
1131 
1132 		}
1133 		else
1134 			/* We're done: */
1135 			m = NULL;
1136 	}
1137 }
1138 
1139 static void
1140 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
1141 {
1142 	int		active_prio, inherited_prio;
1143 	pthread_mutex_t	m;
1144 	pthread_t	pthread_next;
1145 
1146 	/*
1147 	 * Start walking the mutexes the thread has taken since
1148 	 * taking this mutex.
1149 	 */
1150 	if (mutex == NULL) {
1151 		/*
1152 		 * A null mutex means start at the beginning of the owned
1153 		 * mutex list.
1154 		 */
1155 		m = TAILQ_FIRST(&pthread->mutexq);
1156 
1157 		/* There is no inherited priority yet. */
1158 		inherited_prio = 0;
1159 	}
1160 	else {
1161 		/*
1162 		 * The caller wants to start after a specific mutex.  It
1163 		 * is assumed that this mutex is a priority inheritence
1164 		 * mutex and that its priority has been correctly
1165 		 * calculated.
1166 		 */
1167 		m = TAILQ_NEXT(mutex, m_qe);
1168 
1169 		/* Start inheriting priority from the specified mutex. */
1170 		inherited_prio = mutex->m_prio;
1171 	}
1172 	active_prio = MAX(inherited_prio, pthread->base_priority);
1173 
1174 	while (m != NULL) {
1175 		/*
1176 		 * We only want to deal with priority inheritence
1177 		 * mutexes.  This might be optimized by only placing
1178 		 * priority inheritence mutexes into the owned mutex
1179 		 * list, but it may prove to be useful having all
1180 		 * owned mutexes in this list.  Consider a thread
1181 		 * exiting while holding mutexes...
1182 		 */
1183 		if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1184 			/*
1185 			 * Fix the owners saved (inherited) priority to
1186 			 * reflect the priority of the previous mutex.
1187 			 */
1188 			m->m_saved_prio = inherited_prio;
1189 
1190 			if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1191 				/* Recalculate the priority of the mutex: */
1192 				m->m_prio = MAX(active_prio,
1193 				     pthread_next->active_priority);
1194 			else
1195 				m->m_prio = active_prio;
1196 
1197 			/* Recalculate new inherited and active priorities: */
1198 			inherited_prio = m->m_prio;
1199 			active_prio = MAX(m->m_prio, pthread->base_priority);
1200 		}
1201 
1202 		/* Advance to the next mutex owned by this thread: */
1203 		m = TAILQ_NEXT(m, m_qe);
1204 	}
1205 
1206 	/*
1207 	 * Fix the threads inherited priority and recalculate its
1208 	 * active priority.
1209 	 */
1210 	pthread->inherited_priority = inherited_prio;
1211 	active_prio = MAX(inherited_prio, pthread->base_priority);
1212 
1213 	if (active_prio != pthread->active_priority) {
1214 #if 0
1215 		/*
1216 		 * If this thread is in the priority queue, it must be
1217 		 * removed and reinserted for its new priority.
1218 	 	 */
1219 		if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
1220 			/*
1221 			 * Remove the thread from the priority queue
1222 			 * before changing its priority:
1223 			 */
1224 			PTHREAD_PRIOQ_REMOVE(pthread);
1225 
1226 			/*
1227 			 * POSIX states that if the priority is being
1228 			 * lowered, the thread must be inserted at the
1229 			 * head of the queue for its priority if it owns
1230 			 * any priority protection or inheritence mutexes.
1231 			 */
1232 			if ((active_prio < pthread->active_priority) &&
1233 			    (pthread->priority_mutex_count > 0)) {
1234 				/* Set the new active priority. */
1235 				pthread->active_priority = active_prio;
1236 
1237 				PTHREAD_PRIOQ_INSERT_HEAD(pthread);
1238 			}
1239 			else {
1240 				/* Set the new active priority. */
1241 				pthread->active_priority = active_prio;
1242 
1243 				PTHREAD_PRIOQ_INSERT_TAIL(pthread);
1244 			}
1245 		}
1246 		else {
1247 			/* Set the new active priority. */
1248 			pthread->active_priority = active_prio;
1249 		}
1250 #endif
1251 		pthread->active_priority = active_prio;
1252 	}
1253 }
1254 
1255 void
1256 _mutex_unlock_private(pthread_t pthread)
1257 {
1258 	struct pthread_mutex	*m, *m_next;
1259 
1260 	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1261 		m_next = TAILQ_NEXT(m, m_qe);
1262 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1263 			_pthread_mutex_unlock(&m);
1264 	}
1265 }
1266 
1267 void
1268 _mutex_lock_backout(pthread_t pthread)
1269 {
1270 	struct pthread_mutex	*mutex;
1271 
1272 	/*
1273 	 * Defer signals to protect the scheduling queues from
1274 	 * access by the signal handler:
1275 	 */
1276 	/* _thread_kern_sig_defer();*/
1277 
1278 	/* XXX - Necessary to obey lock order */
1279 	_SPINLOCK(&pthread->lock);
1280 	mutex = pthread->data.mutex;
1281 	_SPINUNLOCK(&pthread->lock);
1282 
1283 	_SPINLOCK(&mutex->lock);
1284 
1285 	_thread_critical_enter(pthread);
1286 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1287 
1288 		mutex_queue_remove(mutex, pthread);
1289 
1290 		/* This thread is no longer waiting for the mutex: */
1291 		pthread->data.mutex = NULL;
1292 
1293 	}
1294 	/*
1295 	 * Undefer and handle pending signals, yielding if
1296 	 * necessary:
1297 	 */
1298 	/* _thread_kern_sig_undefer(); */
1299 
1300 	_thread_critical_exit(pthread);
1301 	_SPINUNLOCK(&mutex->lock);
1302 }
1303 
1304 /*
1305  * Dequeue a waiting thread from the head of a mutex queue in descending
1306  * priority order. This funtion will return with the thread locked.
1307  */
1308 static inline pthread_t
1309 mutex_queue_deq(pthread_mutex_t mutex)
1310 {
1311 	pthread_t pthread;
1312 
1313 	while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1314 		_thread_critical_enter(pthread);
1315 		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1316 		pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1317 
1318 		/*
1319 		 * Only exit the loop if the thread hasn't been
1320 		 * cancelled.
1321 		 */
1322 		if ((pthread->cancelflags & PTHREAD_CANCELLING) == 0 &&
1323 		    pthread->state == PS_MUTEX_WAIT)
1324 			break;
1325 		else
1326 			_thread_critical_exit(pthread);
1327 	}
1328 
1329 	return (pthread);
1330 }
1331 
1332 /*
1333  * Remove a waiting thread from a mutex queue in descending priority order.
1334  */
1335 static inline void
1336 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1337 {
1338 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1339 		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1340 		pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1341 	}
1342 }
1343 
1344 /*
1345  * Enqueue a waiting thread to a queue in descending priority order.
1346  */
1347 static inline void
1348 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1349 {
1350 	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1351 	char *name;
1352 
1353 	name = pthread->name ? pthread->name : "unknown";
1354 	if ((pthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0)
1355 		_thread_printf(2, "Thread (%s:%u) already on condq\n",
1356 		    pthread->name, pthread->uniqueid);
1357 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0)
1358 		_thread_printf(2, "Thread (%s:%u) already on mutexq\n",
1359 		    pthread->name, pthread->uniqueid);
1360 	PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
1361 	/*
1362 	 * For the common case of all threads having equal priority,
1363 	 * we perform a quick check against the priority of the thread
1364 	 * at the tail of the queue.
1365 	 */
1366 	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1367 		TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1368 	else {
1369 		tid = TAILQ_FIRST(&mutex->m_queue);
1370 		while (pthread->active_priority <= tid->active_priority)
1371 			tid = TAILQ_NEXT(tid, sqe);
1372 		TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1373 	}
1374 	pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
1375 }
1376 
1377 /*
1378  * Returns with the lock owned and on the threads mutexq if
1379  * it is currently unowned. Returns 1, otherwise.
1380  */
1381 static int
1382 get_muncontested(pthread_mutex_t mutexp, int nonblock)
1383 {
1384 	if (mutexp->m_owner != NULL && mutexp->m_owner != curthread)
1385 		return (-1);
1386 	else if (mutexp->m_owner == curthread)
1387 		if (nonblock)
1388 			return (mutex_self_trylock(mutexp));
1389 		else
1390 			return (mutex_self_lock(mutexp));
1391 
1392 	/*
1393 	 * The mutex belongs to this thread now. Mark it as
1394 	 * such. Add it to the list of mutexes owned by this
1395 	 * thread.
1396 	 */
1397 	mutexp->m_owner = curthread;
1398 	_MUTEX_ASSERT_NOT_OWNED(mutexp);
1399 	TAILQ_INSERT_TAIL(&curthread->mutexq, mutexp, m_qe);
1400 	return (0);
1401 }
1402 
1403 /*
1404  * Returns with the lock owned and on the thread's mutexq. If
1405  * the mutex is currently owned by another thread it will sleep
1406  * until it is available.
1407  */
1408 static void
1409 get_mcontested(pthread_mutex_t mutexp)
1410 {
1411 	_thread_critical_enter(curthread);
1412 
1413 	/*
1414 	 * Put this thread on the mutex's list of waiting threads.
1415 	 * The lock on the thread ensures atomic (as far as other
1416 	 * threads are concerned) setting of the thread state with
1417 	 * it's status on the mutex queue.
1418 	 */
1419 	do {
1420 		mutex_queue_enq(mutexp, curthread);
1421 		PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT);
1422 		curthread->data.mutex = mutexp;
1423 		_thread_critical_exit(curthread);
1424 		_SPINUNLOCK(&mutexp->lock);
1425 		_thread_suspend(curthread, NULL);
1426 
1427 		_SPINLOCK(&mutexp->lock);
1428 		_thread_critical_enter(curthread);
1429 	} while ((curthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0);
1430 
1431 	_thread_critical_exit(curthread);
1432 }
1433