xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision 77b7cdf1999ee965ad494fddd184b18f532ac91a)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by John Birrell.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 #include <stdlib.h>
35 #include <errno.h>
36 #include <string.h>
37 #include <sys/param.h>
38 #include <sys/queue.h>
39 #include <pthread.h>
40 #include "thr_private.h"
41 
42 #if defined(_PTHREADS_INVARIANTS)
43 #define _MUTEX_INIT_LINK(m) 		do {		\
44 	(m)->m_qe.tqe_prev = NULL;			\
45 	(m)->m_qe.tqe_next = NULL;			\
46 } while (0)
47 #define _MUTEX_ASSERT_IS_OWNED(m)	do {		\
48 	if ((m)->m_qe.tqe_prev == NULL)			\
49 		PANIC("mutex is not on list");		\
50 } while (0)
51 #define _MUTEX_ASSERT_NOT_OWNED(m)	do {		\
52 	if (((m)->m_qe.tqe_prev != NULL) ||		\
53 	    ((m)->m_qe.tqe_next != NULL))		\
54 		PANIC("mutex is on list");		\
55 } while (0)
56 #else
57 #define _MUTEX_INIT_LINK(m)
58 #define _MUTEX_ASSERT_IS_OWNED(m)
59 #define _MUTEX_ASSERT_NOT_OWNED(m)
60 #endif
61 
62 /*
63  * Prototypes
64  */
65 static inline int	mutex_self_trylock(pthread_mutex_t);
66 static inline int	mutex_self_lock(pthread_mutex_t);
67 static inline int	mutex_unlock_common(pthread_mutex_t *, int);
68 static void		mutex_priority_adjust(pthread_mutex_t);
69 static void		mutex_rescan_owned (pthread_t, pthread_mutex_t);
70 static inline pthread_t	mutex_queue_deq(pthread_mutex_t);
71 static inline void	mutex_queue_remove(pthread_mutex_t, pthread_t);
72 static inline void	mutex_queue_enq(pthread_mutex_t, pthread_t);
73 
74 
75 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
76 
77 static struct pthread_mutex_attr	static_mutex_attr =
78     PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
79 static pthread_mutexattr_t		static_mattr = &static_mutex_attr;
80 
81 /* Single underscore versions provided for libc internal usage: */
82 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
83 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
84 
85 /* No difference between libc and application usage of these: */
86 __weak_reference(_pthread_mutex_init, pthread_mutex_init);
87 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
88 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
89 
90 
91 /*
92  * Reinitialize a private mutex; this is only used for internal mutexes.
93  */
94 int
95 _mutex_reinit(pthread_mutex_t * mutex)
96 {
97 	int	ret = 0;
98 
99 	if (mutex == NULL)
100 		ret = EINVAL;
101 	else if (*mutex == NULL)
102 		ret = _pthread_mutex_init(mutex, NULL);
103 	else {
104 		/*
105 		 * Initialize the mutex structure:
106 		 */
107 		(*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
108 		(*mutex)->m_protocol = PTHREAD_PRIO_NONE;
109 		TAILQ_INIT(&(*mutex)->m_queue);
110 		(*mutex)->m_owner = NULL;
111 		(*mutex)->m_data.m_count = 0;
112 		(*mutex)->m_flags |= MUTEX_FLAGS_INITED | MUTEX_FLAGS_PRIVATE;
113 		(*mutex)->m_refcount = 0;
114 		(*mutex)->m_prio = 0;
115 		(*mutex)->m_saved_prio = 0;
116 		_MUTEX_INIT_LINK(*mutex);
117 		memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
118 	}
119 	return (ret);
120 }
121 
122 int
123 _pthread_mutex_init(pthread_mutex_t * mutex,
124 		   const pthread_mutexattr_t * mutex_attr)
125 {
126 	enum pthread_mutextype	type;
127 	int		protocol;
128 	int		ceiling;
129 	int		flags;
130 	pthread_mutex_t	pmutex;
131 	int		ret = 0;
132 
133 	if (mutex == NULL)
134 		ret = EINVAL;
135 
136 	/* Check if default mutex attributes: */
137 	if (mutex_attr == NULL || *mutex_attr == NULL) {
138 		/* Default to a (error checking) POSIX mutex: */
139 		type = PTHREAD_MUTEX_ERRORCHECK;
140 		protocol = PTHREAD_PRIO_NONE;
141 		ceiling = PTHREAD_MAX_PRIORITY;
142 		flags = 0;
143 	}
144 
145 	/* Check mutex type: */
146 	else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
147 	    ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
148 		/* Return an invalid argument error: */
149 		ret = EINVAL;
150 
151 	/* Check mutex protocol: */
152 	else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
153 	    ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
154 		/* Return an invalid argument error: */
155 		ret = EINVAL;
156 
157 	else {
158 		/* Use the requested mutex type and protocol: */
159 		type = (*mutex_attr)->m_type;
160 		protocol = (*mutex_attr)->m_protocol;
161 		ceiling = (*mutex_attr)->m_ceiling;
162 		flags = (*mutex_attr)->m_flags;
163 	}
164 
165 	/* Check no errors so far: */
166 	if (ret == 0) {
167 		if ((pmutex = (pthread_mutex_t)
168 		    malloc(sizeof(struct pthread_mutex))) == NULL)
169 			ret = ENOMEM;
170 		else {
171 			/* Set the mutex flags: */
172 			pmutex->m_flags = flags;
173 
174 			/* Process according to mutex type: */
175 			switch (type) {
176 			/* case PTHREAD_MUTEX_DEFAULT: */
177 			case PTHREAD_MUTEX_ERRORCHECK:
178 			case PTHREAD_MUTEX_NORMAL:
179 				/* Nothing to do here. */
180 				break;
181 
182 			/* Single UNIX Spec 2 recursive mutex: */
183 			case PTHREAD_MUTEX_RECURSIVE:
184 				/* Reset the mutex count: */
185 				pmutex->m_data.m_count = 0;
186 				break;
187 
188 			/* Trap invalid mutex types: */
189 			default:
190 				/* Return an invalid argument error: */
191 				ret = EINVAL;
192 				break;
193 			}
194 			if (ret == 0) {
195 				/* Initialise the rest of the mutex: */
196 				TAILQ_INIT(&pmutex->m_queue);
197 				pmutex->m_flags |= MUTEX_FLAGS_INITED;
198 				pmutex->m_owner = NULL;
199 				pmutex->m_type = type;
200 				pmutex->m_protocol = protocol;
201 				pmutex->m_refcount = 0;
202 				if (protocol == PTHREAD_PRIO_PROTECT)
203 					pmutex->m_prio = ceiling;
204 				else
205 					pmutex->m_prio = 0;
206 				pmutex->m_saved_prio = 0;
207 				_MUTEX_INIT_LINK(pmutex);
208 				memset(&pmutex->lock, 0, sizeof(pmutex->lock));
209 				*mutex = pmutex;
210 			} else {
211 				free(pmutex);
212 				*mutex = NULL;
213 			}
214 		}
215 	}
216 	/* Return the completion status: */
217 	return (ret);
218 }
219 
220 int
221 _pthread_mutex_destroy(pthread_mutex_t * mutex)
222 {
223 	int	ret = 0;
224 
225 	if (mutex == NULL || *mutex == NULL)
226 		ret = EINVAL;
227 	else {
228 		/* Lock the mutex structure: */
229 		_SPINLOCK(&(*mutex)->lock);
230 
231 		/*
232 		 * Check to see if this mutex is in use:
233 		 */
234 		if (((*mutex)->m_owner != NULL) ||
235 		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
236 		    ((*mutex)->m_refcount != 0)) {
237 			ret = EBUSY;
238 
239 			/* Unlock the mutex structure: */
240 			_SPINUNLOCK(&(*mutex)->lock);
241 		}
242 		else {
243 			/*
244 			 * Free the memory allocated for the mutex
245 			 * structure:
246 			 */
247 			_MUTEX_ASSERT_NOT_OWNED(*mutex);
248 
249 			/* Unlock the mutex structure: */
250 			_SPINUNLOCK(&(*mutex)->lock);
251 
252 			free(*mutex);
253 
254 			/*
255 			 * Leave the caller's pointer NULL now that
256 			 * the mutex has been destroyed:
257 			 */
258 			*mutex = NULL;
259 		}
260 	}
261 
262 	/* Return the completion status: */
263 	return (ret);
264 }
265 
266 static int
267 init_static(pthread_mutex_t *mutex)
268 {
269 	int	ret;
270 
271 	_SPINLOCK(&static_init_lock);
272 
273 	if (*mutex == NULL)
274 		ret = _pthread_mutex_init(mutex, NULL);
275 	else
276 		ret = 0;
277 
278 	_SPINUNLOCK(&static_init_lock);
279 
280 	return (ret);
281 }
282 
283 static int
284 init_static_private(pthread_mutex_t *mutex)
285 {
286 	int	ret;
287 
288 	_SPINLOCK(&static_init_lock);
289 
290 	if (*mutex == NULL)
291 		ret = _pthread_mutex_init(mutex, &static_mattr);
292 	else
293 		ret = 0;
294 
295 	_SPINUNLOCK(&static_init_lock);
296 
297 	return (ret);
298 }
299 
300 static int
301 mutex_trylock_common(pthread_mutex_t *mutex)
302 {
303 	int	ret = 0;
304 
305 	PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
306 	    "Uninitialized mutex in pthread_mutex_trylock_basic");
307 
308 	/*
309 	 * Defer signals to protect the scheduling queues from
310 	 * access by the signal handler:
311 	 */
312 	/* _thread_kern_sig_defer(); XXXThr */
313 
314 	/* Lock the mutex structure: */
315 	_SPINLOCK(&(*mutex)->lock);
316 
317 	/*
318 	 * If the mutex was statically allocated, properly
319 	 * initialize the tail queue.
320 	 */
321 	if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
322 		TAILQ_INIT(&(*mutex)->m_queue);
323 		_MUTEX_INIT_LINK(*mutex);
324 		(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
325 	}
326 
327 	/* Process according to mutex type: */
328 	switch ((*mutex)->m_protocol) {
329 	/* Default POSIX mutex: */
330 	case PTHREAD_PRIO_NONE:
331 		/* Check if this mutex is not locked: */
332 		if ((*mutex)->m_owner == NULL) {
333 			/* Lock the mutex for the running thread: */
334 			(*mutex)->m_owner = curthread;
335 
336 			/* Add to the list of owned mutexes: */
337 			_MUTEX_ASSERT_NOT_OWNED(*mutex);
338 			TAILQ_INSERT_TAIL(&curthread->mutexq,
339 			    (*mutex), m_qe);
340 		} else if ((*mutex)->m_owner == curthread)
341 			ret = mutex_self_trylock(*mutex);
342 		else
343 			/* Return a busy error: */
344 			ret = EBUSY;
345 		break;
346 
347 	/* POSIX priority inheritence mutex: */
348 	case PTHREAD_PRIO_INHERIT:
349 		/* Check if this mutex is not locked: */
350 		if ((*mutex)->m_owner == NULL) {
351 			/* Lock the mutex for the running thread: */
352 			(*mutex)->m_owner = curthread;
353 
354 			/* Track number of priority mutexes owned: */
355 			curthread->priority_mutex_count++;
356 
357 			/*
358 			 * The mutex takes on the attributes of the
359 			 * running thread when there are no waiters.
360 			 */
361 			(*mutex)->m_prio = curthread->active_priority;
362 			(*mutex)->m_saved_prio =
363 			    curthread->inherited_priority;
364 
365 			/* Add to the list of owned mutexes: */
366 			_MUTEX_ASSERT_NOT_OWNED(*mutex);
367 			TAILQ_INSERT_TAIL(&curthread->mutexq,
368 			    (*mutex), m_qe);
369 		} else if ((*mutex)->m_owner == curthread)
370 			ret = mutex_self_trylock(*mutex);
371 		else
372 			/* Return a busy error: */
373 			ret = EBUSY;
374 		break;
375 
376 	/* POSIX priority protection mutex: */
377 	case PTHREAD_PRIO_PROTECT:
378 		/* Check for a priority ceiling violation: */
379 		if (curthread->active_priority > (*mutex)->m_prio)
380 			ret = EINVAL;
381 
382 		/* Check if this mutex is not locked: */
383 		else if ((*mutex)->m_owner == NULL) {
384 			/* Lock the mutex for the running thread: */
385 			(*mutex)->m_owner = curthread;
386 
387 			/* Track number of priority mutexes owned: */
388 			curthread->priority_mutex_count++;
389 
390 			/*
391 			 * The running thread inherits the ceiling
392 			 * priority of the mutex and executes at that
393 			 * priority.
394 			 */
395 			curthread->active_priority = (*mutex)->m_prio;
396 			(*mutex)->m_saved_prio =
397 			    curthread->inherited_priority;
398 			curthread->inherited_priority =
399 			    (*mutex)->m_prio;
400 
401 			/* Add to the list of owned mutexes: */
402 			_MUTEX_ASSERT_NOT_OWNED(*mutex);
403 			TAILQ_INSERT_TAIL(&curthread->mutexq,
404 			    (*mutex), m_qe);
405 		} else if ((*mutex)->m_owner == curthread)
406 			ret = mutex_self_trylock(*mutex);
407 		else
408 			/* Return a busy error: */
409 			ret = EBUSY;
410 		break;
411 
412 	/* Trap invalid mutex types: */
413 	default:
414 		/* Return an invalid argument error: */
415 		ret = EINVAL;
416 		break;
417 	}
418 
419 	/* Unlock the mutex structure: */
420 	_SPINUNLOCK(&(*mutex)->lock);
421 
422 	/*
423 	 * Undefer and handle pending signals, yielding if
424 	 * necessary:
425 	 */
426 	/* _thread_kern_sig_undefer(); */
427 
428 	/* Return the completion status: */
429 	return (ret);
430 }
431 
432 int
433 __pthread_mutex_trylock(pthread_mutex_t *mutex)
434 {
435 	int	ret = 0;
436 
437 	if (mutex == NULL)
438 		ret = EINVAL;
439 
440 	/*
441 	 * If the mutex is statically initialized, perform the dynamic
442 	 * initialization:
443 	 */
444 	else if ((*mutex != NULL) || (ret = init_static(mutex)) == 0)
445 		ret = mutex_trylock_common(mutex);
446 
447 	return (ret);
448 }
449 
450 int
451 _pthread_mutex_trylock(pthread_mutex_t *mutex)
452 {
453 	int	ret = 0;
454 
455 	if (mutex == NULL)
456 		ret = EINVAL;
457 
458 	/*
459 	 * If the mutex is statically initialized, perform the dynamic
460 	 * initialization marking the mutex private (delete safe):
461 	 */
462 	else if ((*mutex != NULL) || (ret = init_static_private(mutex)) == 0)
463 		ret = mutex_trylock_common(mutex);
464 
465 	return (ret);
466 }
467 
468 static int
469 mutex_lock_common(pthread_mutex_t * mutex)
470 {
471 	int	ret = 0;
472 
473 	PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
474 	    "Uninitialized mutex in pthread_mutex_trylock_basic");
475 
476 	/*
477 	 * Enter a loop waiting to become the mutex owner.  We need a
478 	 * loop in case the waiting thread is interrupted by a signal
479 	 * to execute a signal handler.  It is not (currently) possible
480 	 * to remain in the waiting queue while running a handler.
481 	 * Instead, the thread is interrupted and backed out of the
482 	 * waiting queue prior to executing the signal handler.
483 	 */
484 	do {
485 		/*
486 		 * Defer signals to protect the scheduling queues from
487 		 * access by the signal handler:
488 		 */
489 		/* _thread_kern_sig_defer(); */
490 
491 		/* Lock the mutex structure: */
492 		_SPINLOCK(&(*mutex)->lock);
493 
494 		/*
495 		 * If the mutex was statically allocated, properly
496 		 * initialize the tail queue.
497 		 */
498 		if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
499 			TAILQ_INIT(&(*mutex)->m_queue);
500 			(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
501 			_MUTEX_INIT_LINK(*mutex);
502 		}
503 
504 		/* Process according to mutex type: */
505 		switch ((*mutex)->m_protocol) {
506 		/* Default POSIX mutex: */
507 		case PTHREAD_PRIO_NONE:
508 			if ((*mutex)->m_owner == NULL) {
509 				/* Lock the mutex for this thread: */
510 				(*mutex)->m_owner = curthread;
511 
512 				/* Add to the list of owned mutexes: */
513 				_MUTEX_ASSERT_NOT_OWNED(*mutex);
514 				TAILQ_INSERT_TAIL(&curthread->mutexq,
515 				    (*mutex), m_qe);
516 
517 			} else if ((*mutex)->m_owner == curthread)
518 				ret = mutex_self_lock(*mutex);
519 			else {
520 				/*
521 				 * Join the queue of threads waiting to lock
522 				 * the mutex:
523 				 */
524 				mutex_queue_enq(*mutex, curthread);
525 
526 				/*
527 				 * Keep a pointer to the mutex this thread
528 				 * is waiting on:
529 				 */
530 				curthread->data.mutex = *mutex;
531 
532 				/*
533 				 * Unlock the mutex structure and schedule the
534 				 * next thread:
535 				 */
536 				/* XXX Sched lock. */
537 				PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT);
538 				_SPINUNLOCK(&(*mutex)->lock);
539 				_thread_suspend(curthread, NULL);
540 
541 				/* Lock the mutex structure again: */
542 				_SPINLOCK(&(*mutex)->lock);
543 			}
544 			break;
545 
546 		/* POSIX priority inheritence mutex: */
547 		case PTHREAD_PRIO_INHERIT:
548 			/* Check if this mutex is not locked: */
549 			if ((*mutex)->m_owner == NULL) {
550 				/* Lock the mutex for this thread: */
551 				(*mutex)->m_owner = curthread;
552 
553 				/* Track number of priority mutexes owned: */
554 				curthread->priority_mutex_count++;
555 
556 				/*
557 				 * The mutex takes on attributes of the
558 				 * running thread when there are no waiters.
559 				 */
560 				(*mutex)->m_prio = curthread->active_priority;
561 				(*mutex)->m_saved_prio =
562 				    curthread->inherited_priority;
563 				curthread->inherited_priority =
564 				    (*mutex)->m_prio;
565 
566 				/* Add to the list of owned mutexes: */
567 				_MUTEX_ASSERT_NOT_OWNED(*mutex);
568 				TAILQ_INSERT_TAIL(&curthread->mutexq,
569 				    (*mutex), m_qe);
570 
571 			} else if ((*mutex)->m_owner == curthread)
572 				ret = mutex_self_lock(*mutex);
573 			else {
574 				/*
575 				 * Join the queue of threads waiting to lock
576 				 * the mutex:
577 				 */
578 				mutex_queue_enq(*mutex, curthread);
579 
580 				/*
581 				 * Keep a pointer to the mutex this thread
582 				 * is waiting on:
583 				 */
584 				curthread->data.mutex = *mutex;
585 
586 				if (curthread->active_priority >
587 				    (*mutex)->m_prio)
588 					/* Adjust priorities: */
589 					mutex_priority_adjust(*mutex);
590 
591 				/*
592 				 * Unlock the mutex structure and schedule the
593 				 * next thread:
594 				 */
595 				/* XXX Sched lock. */
596 				PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT);
597 				_SPINUNLOCK(&(*mutex)->lock);
598 				_thread_suspend(curthread, NULL);
599 
600 				/* Lock the mutex structure again: */
601 				_SPINLOCK(&(*mutex)->lock);
602 			}
603 			break;
604 
605 		/* POSIX priority protection mutex: */
606 		case PTHREAD_PRIO_PROTECT:
607 			/* Check for a priority ceiling violation: */
608 			if (curthread->active_priority > (*mutex)->m_prio)
609 				ret = EINVAL;
610 
611 			/* Check if this mutex is not locked: */
612 			else if ((*mutex)->m_owner == NULL) {
613 				/*
614 				 * Lock the mutex for the running
615 				 * thread:
616 				 */
617 				(*mutex)->m_owner = curthread;
618 
619 				/* Track number of priority mutexes owned: */
620 				curthread->priority_mutex_count++;
621 
622 				/*
623 				 * The running thread inherits the ceiling
624 				 * priority of the mutex and executes at that
625 				 * priority:
626 				 */
627 				curthread->active_priority = (*mutex)->m_prio;
628 				(*mutex)->m_saved_prio =
629 				    curthread->inherited_priority;
630 				curthread->inherited_priority =
631 				    (*mutex)->m_prio;
632 
633 				/* Add to the list of owned mutexes: */
634 				_MUTEX_ASSERT_NOT_OWNED(*mutex);
635 				TAILQ_INSERT_TAIL(&curthread->mutexq,
636 				    (*mutex), m_qe);
637 			} else if ((*mutex)->m_owner == curthread)
638 				ret = mutex_self_lock(*mutex);
639 			else {
640 				/*
641 				 * Join the queue of threads waiting to lock
642 				 * the mutex:
643 				 */
644 				mutex_queue_enq(*mutex, curthread);
645 
646 				/*
647 				 * Keep a pointer to the mutex this thread
648 				 * is waiting on:
649 				 */
650 				curthread->data.mutex = *mutex;
651 
652 				/* Clear any previous error: */
653 				curthread->error = 0;
654 
655 				/*
656 				 * Unlock the mutex structure and schedule the
657 				 * next thread:
658 				 */
659 				/* XXX Sched lock. */
660 				PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT);
661 				_SPINUNLOCK(&(*mutex)->lock);
662 				_thread_suspend(curthread, NULL);
663 
664 				/* Lock the mutex structure again: */
665 				_SPINLOCK(&(*mutex)->lock);
666 
667 				/*
668 				 * The threads priority may have changed while
669 				 * waiting for the mutex causing a ceiling
670 				 * violation.
671 				 */
672 				ret = curthread->error;
673 				curthread->error = 0;
674 			}
675 			break;
676 
677 		/* Trap invalid mutex types: */
678 		default:
679 			/* Return an invalid argument error: */
680 			ret = EINVAL;
681 			break;
682 		}
683 
684 		/*
685 		 * Check to see if this thread was interrupted and
686 		 * is still in the mutex queue of waiting threads:
687 		 */
688 		if (curthread->cancelflags & PTHREAD_CANCELLING)
689 			mutex_queue_remove(*mutex, curthread);
690 
691 		/* Unlock the mutex structure: */
692 		_SPINUNLOCK(&(*mutex)->lock);
693 
694 		/*
695 		 * Undefer and handle pending signals, yielding if
696 		 * necessary:
697 		 */
698 		/* _thread_kern_sig_undefer(); */
699 		if (curthread->cancelflags & PTHREAD_CANCELLING) {
700 			pthread_testcancel();
701 			PANIC("Canceled thread came back.\n");
702 		}
703 	} while ((*mutex)->m_owner != curthread && ret == 0);
704 
705 	/* Return the completion status: */
706 	return (ret);
707 }
708 
709 int
710 __pthread_mutex_lock(pthread_mutex_t *mutex)
711 {
712 	int	ret = 0;
713 
714 	if (_thread_initial == NULL)
715 		_thread_init();
716 
717 	if (mutex == NULL)
718 		ret = EINVAL;
719 
720 	/*
721 	 * If the mutex is statically initialized, perform the dynamic
722 	 * initialization:
723 	 */
724 	else if ((*mutex != NULL) || ((ret = init_static(mutex)) == 0))
725 		ret = mutex_lock_common(mutex);
726 
727 	return (ret);
728 }
729 
730 int
731 _pthread_mutex_lock(pthread_mutex_t *mutex)
732 {
733 	int	ret = 0;
734 
735 	if (_thread_initial == NULL)
736 		_thread_init();
737 
738 	if (mutex == NULL)
739 		ret = EINVAL;
740 
741 	/*
742 	 * If the mutex is statically initialized, perform the dynamic
743 	 * initialization marking it private (delete safe):
744 	 */
745 	else if ((*mutex != NULL) || ((ret = init_static_private(mutex)) == 0))
746 		ret = mutex_lock_common(mutex);
747 
748 	return (ret);
749 }
750 
751 int
752 _pthread_mutex_unlock(pthread_mutex_t * mutex)
753 {
754 	return (mutex_unlock_common(mutex, /* add reference */ 0));
755 }
756 
757 int
758 _mutex_cv_unlock(pthread_mutex_t * mutex)
759 {
760 	return (mutex_unlock_common(mutex, /* add reference */ 1));
761 }
762 
763 int
764 _mutex_cv_lock(pthread_mutex_t * mutex)
765 {
766 	int	ret;
767 	if ((ret = _pthread_mutex_lock(mutex)) == 0)
768 		(*mutex)->m_refcount--;
769 	return (ret);
770 }
771 
772 static inline int
773 mutex_self_trylock(pthread_mutex_t mutex)
774 {
775 	int	ret = 0;
776 
777 	switch (mutex->m_type) {
778 
779 	/* case PTHREAD_MUTEX_DEFAULT: */
780 	case PTHREAD_MUTEX_ERRORCHECK:
781 	case PTHREAD_MUTEX_NORMAL:
782 		/*
783 		 * POSIX specifies that mutexes should return EDEADLK if a
784 		 * recursive lock is detected.
785 		 */
786 		ret = EBUSY;
787 		break;
788 
789 	case PTHREAD_MUTEX_RECURSIVE:
790 		/* Increment the lock count: */
791 		mutex->m_data.m_count++;
792 		break;
793 
794 	default:
795 		/* Trap invalid mutex types; */
796 		ret = EINVAL;
797 	}
798 
799 	return (ret);
800 }
801 
802 static inline int
803 mutex_self_lock(pthread_mutex_t mutex)
804 {
805 	int ret = 0;
806 
807 	switch (mutex->m_type) {
808 	/* case PTHREAD_MUTEX_DEFAULT: */
809 	case PTHREAD_MUTEX_ERRORCHECK:
810 		/*
811 		 * POSIX specifies that mutexes should return EDEADLK if a
812 		 * recursive lock is detected.
813 		 */
814 		ret = EDEADLK;
815 		break;
816 
817 	case PTHREAD_MUTEX_NORMAL:
818 		/*
819 		 * What SS2 define as a 'normal' mutex.  Intentionally
820 		 * deadlock on attempts to get a lock you already own.
821 		 */
822 		/* XXX Sched lock. */
823 		PTHREAD_SET_STATE(curthread, PS_DEADLOCK);
824 		_SPINUNLOCK(&(mutex)->lock);
825 		_thread_suspend(curthread, NULL);
826 		PANIC("Shouldn't resume here?\n");
827 		break;
828 
829 	case PTHREAD_MUTEX_RECURSIVE:
830 		/* Increment the lock count: */
831 		mutex->m_data.m_count++;
832 		break;
833 
834 	default:
835 		/* Trap invalid mutex types; */
836 		ret = EINVAL;
837 	}
838 
839 	return (ret);
840 }
841 
842 static inline int
843 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
844 {
845 	int	ret = 0;
846 
847 	if (mutex == NULL || *mutex == NULL) {
848 		ret = EINVAL;
849 	} else {
850 		/*
851 		 * Defer signals to protect the scheduling queues from
852 		 * access by the signal handler:
853 		 */
854 		/* _thread_kern_sig_defer(); */
855 
856 		/* Lock the mutex structure: */
857 		_SPINLOCK(&(*mutex)->lock);
858 
859 		/* Process according to mutex type: */
860 		switch ((*mutex)->m_protocol) {
861 		/* Default POSIX mutex: */
862 		case PTHREAD_PRIO_NONE:
863 			/*
864 			 * Check if the running thread is not the owner of the
865 			 * mutex:
866 			 */
867 			if ((*mutex)->m_owner != curthread) {
868 				/*
869 				 * Return an invalid argument error for no
870 				 * owner and a permission error otherwise:
871 				 */
872 				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
873 			}
874 			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
875 			    ((*mutex)->m_data.m_count > 0)) {
876 				/* Decrement the count: */
877 				(*mutex)->m_data.m_count--;
878 			} else {
879 				/*
880 				 * Clear the count in case this is recursive
881 				 * mutex.
882 				 */
883 				(*mutex)->m_data.m_count = 0;
884 
885 				/* Remove the mutex from the threads queue. */
886 				_MUTEX_ASSERT_IS_OWNED(*mutex);
887 				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
888 				    (*mutex), m_qe);
889 				_MUTEX_INIT_LINK(*mutex);
890 
891 				/*
892 				 * Get the next thread from the queue of
893 				 * threads waiting on the mutex:
894 				 */
895 				if (((*mutex)->m_owner =
896 			  	    mutex_queue_deq(*mutex)) != NULL) {
897 					/* Make the new owner runnable: */
898 					/* XXXTHR sched lock. */
899 					PTHREAD_NEW_STATE((*mutex)->m_owner,
900 					    PS_RUNNING);
901 
902 					/*
903 					 * Add the mutex to the threads list of
904 					 * owned mutexes:
905 					 */
906 					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
907 					    (*mutex), m_qe);
908 
909 					/*
910 					 * The owner is no longer waiting for
911 					 * this mutex:
912 					 */
913 					(*mutex)->m_owner->data.mutex = NULL;
914 				}
915 			}
916 			break;
917 
918 		/* POSIX priority inheritence mutex: */
919 		case PTHREAD_PRIO_INHERIT:
920 			/*
921 			 * Check if the running thread is not the owner of the
922 			 * mutex:
923 			 */
924 			if ((*mutex)->m_owner != curthread) {
925 				/*
926 				 * Return an invalid argument error for no
927 				 * owner and a permission error otherwise:
928 				 */
929 				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
930 			}
931 			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
932 			    ((*mutex)->m_data.m_count > 0)) {
933 				/* Decrement the count: */
934 				(*mutex)->m_data.m_count--;
935 			} else {
936 				/*
937 				 * Clear the count in case this is recursive
938 				 * mutex.
939 				 */
940 				(*mutex)->m_data.m_count = 0;
941 
942 				/*
943 				 * Restore the threads inherited priority and
944 				 * recompute the active priority (being careful
945 				 * not to override changes in the threads base
946 				 * priority subsequent to locking the mutex).
947 				 */
948 				curthread->inherited_priority =
949 					(*mutex)->m_saved_prio;
950 				curthread->active_priority =
951 				    MAX(curthread->inherited_priority,
952 				    curthread->base_priority);
953 
954 				/*
955 				 * This thread now owns one less priority mutex.
956 				 */
957 				curthread->priority_mutex_count--;
958 
959 				/* Remove the mutex from the threads queue. */
960 				_MUTEX_ASSERT_IS_OWNED(*mutex);
961 				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
962 				    (*mutex), m_qe);
963 				_MUTEX_INIT_LINK(*mutex);
964 
965 				/*
966 				 * Get the next thread from the queue of threads
967 				 * waiting on the mutex:
968 				 */
969 				if (((*mutex)->m_owner =
970 				    mutex_queue_deq(*mutex)) == NULL)
971 					/* This mutex has no priority. */
972 					(*mutex)->m_prio = 0;
973 				else {
974 					/*
975 					 * Track number of priority mutexes owned:
976 					 */
977 					(*mutex)->m_owner->priority_mutex_count++;
978 
979 					/*
980 					 * Add the mutex to the threads list
981 					 * of owned mutexes:
982 					 */
983 					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
984 					    (*mutex), m_qe);
985 
986 					/*
987 					 * The owner is no longer waiting for
988 					 * this mutex:
989 					 */
990 					(*mutex)->m_owner->data.mutex = NULL;
991 
992 					/*
993 					 * Set the priority of the mutex.  Since
994 					 * our waiting threads are in descending
995 					 * priority order, the priority of the
996 					 * mutex becomes the active priority of
997 					 * the thread we just dequeued.
998 					 */
999 					(*mutex)->m_prio =
1000 					    (*mutex)->m_owner->active_priority;
1001 
1002 					/*
1003 					 * Save the owning threads inherited
1004 					 * priority:
1005 					 */
1006 					(*mutex)->m_saved_prio =
1007 						(*mutex)->m_owner->inherited_priority;
1008 
1009 					/*
1010 					 * The owning threads inherited priority
1011 					 * now becomes his active priority (the
1012 					 * priority of the mutex).
1013 					 */
1014 					(*mutex)->m_owner->inherited_priority =
1015 						(*mutex)->m_prio;
1016 
1017 					/*
1018 					 * Make the new owner runnable:
1019 					 */
1020 					/* XXXTHR sched lock. */
1021 					PTHREAD_NEW_STATE((*mutex)->m_owner,
1022 					    PS_RUNNING);
1023 				}
1024 			}
1025 			break;
1026 
1027 		/* POSIX priority ceiling mutex: */
1028 		case PTHREAD_PRIO_PROTECT:
1029 			/*
1030 			 * Check if the running thread is not the owner of the
1031 			 * mutex:
1032 			 */
1033 			if ((*mutex)->m_owner != curthread) {
1034 				/*
1035 				 * Return an invalid argument error for no
1036 				 * owner and a permission error otherwise:
1037 				 */
1038 				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
1039 			}
1040 			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1041 			    ((*mutex)->m_data.m_count > 0)) {
1042 				/* Decrement the count: */
1043 				(*mutex)->m_data.m_count--;
1044 			} else {
1045 				/*
1046 				 * Clear the count in case this is recursive
1047 				 * mutex.
1048 				 */
1049 				(*mutex)->m_data.m_count = 0;
1050 
1051 				/*
1052 				 * Restore the threads inherited priority and
1053 				 * recompute the active priority (being careful
1054 				 * not to override changes in the threads base
1055 				 * priority subsequent to locking the mutex).
1056 				 */
1057 				curthread->inherited_priority =
1058 					(*mutex)->m_saved_prio;
1059 				curthread->active_priority =
1060 				    MAX(curthread->inherited_priority,
1061 				    curthread->base_priority);
1062 
1063 				/*
1064 				 * This thread now owns one less priority mutex.
1065 				 */
1066 				curthread->priority_mutex_count--;
1067 
1068 				/* Remove the mutex from the threads queue. */
1069 				_MUTEX_ASSERT_IS_OWNED(*mutex);
1070 				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
1071 				    (*mutex), m_qe);
1072 				_MUTEX_INIT_LINK(*mutex);
1073 
1074 				/*
1075 				 * Enter a loop to find a waiting thread whose
1076 				 * active priority will not cause a ceiling
1077 				 * violation:
1078 				 */
1079 				while ((((*mutex)->m_owner =
1080 				    mutex_queue_deq(*mutex)) != NULL) &&
1081 				    ((*mutex)->m_owner->active_priority >
1082 				     (*mutex)->m_prio)) {
1083 					/*
1084 					 * Either the mutex ceiling priority
1085 					 * been lowered and/or this threads
1086 					 * priority has been raised subsequent
1087 					 * to this thread being queued on the
1088 					 * waiting list.
1089 					 */
1090 					(*mutex)->m_owner->error = EINVAL;
1091 					PTHREAD_NEW_STATE((*mutex)->m_owner,
1092 					    PS_RUNNING);
1093 					/*
1094 					 * The thread is no longer waiting for
1095 					 * this mutex:
1096 					 */
1097 					(*mutex)->m_owner->data.mutex = NULL;
1098 				}
1099 
1100 				/* Check for a new owner: */
1101 				if ((*mutex)->m_owner != NULL) {
1102 					/*
1103 					 * Track number of priority mutexes owned:
1104 					 */
1105 					(*mutex)->m_owner->priority_mutex_count++;
1106 
1107 					/*
1108 					 * Add the mutex to the threads list
1109 					 * of owned mutexes:
1110 					 */
1111 					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
1112 					    (*mutex), m_qe);
1113 
1114 					/*
1115 					 * The owner is no longer waiting for
1116 					 * this mutex:
1117 					 */
1118 					(*mutex)->m_owner->data.mutex = NULL;
1119 
1120 					/*
1121 					 * Save the owning threads inherited
1122 					 * priority:
1123 					 */
1124 					(*mutex)->m_saved_prio =
1125 						(*mutex)->m_owner->inherited_priority;
1126 
1127 					/*
1128 					 * The owning thread inherits the
1129 					 * ceiling priority of the mutex and
1130 					 * executes at that priority:
1131 					 */
1132 					(*mutex)->m_owner->inherited_priority =
1133 					    (*mutex)->m_prio;
1134 					(*mutex)->m_owner->active_priority =
1135 					    (*mutex)->m_prio;
1136 
1137 					/*
1138 					 * Make the new owner runnable:
1139 					 */
1140 					/* XXXTHR sched lock. */
1141 					PTHREAD_NEW_STATE((*mutex)->m_owner,
1142 					    PS_RUNNING);
1143 				}
1144 			}
1145 			break;
1146 
1147 		/* Trap invalid mutex types: */
1148 		default:
1149 			/* Return an invalid argument error: */
1150 			ret = EINVAL;
1151 			break;
1152 		}
1153 
1154 		if ((ret == 0) && (add_reference != 0)) {
1155 			/* Increment the reference count: */
1156 			(*mutex)->m_refcount++;
1157 		}
1158 
1159 		/* Unlock the mutex structure: */
1160 		_SPINUNLOCK(&(*mutex)->lock);
1161 
1162 		/*
1163 		 * Undefer and handle pending signals, yielding if
1164 		 * necessary:
1165 		 */
1166 		/* _thread_kern_sig_undefer(); */
1167 	}
1168 
1169 	/* Return the completion status: */
1170 	return (ret);
1171 }
1172 
1173 
1174 /*
1175  * This function is called when a change in base priority occurs for
1176  * a thread that is holding or waiting for a priority protection or
1177  * inheritence mutex.  A change in a threads base priority can effect
1178  * changes to active priorities of other threads and to the ordering
1179  * of mutex locking by waiting threads.
1180  *
1181  * This must be called while thread scheduling is deferred.
1182  */
1183 void
1184 _mutex_notify_priochange(pthread_t pthread)
1185 {
1186 	/* Adjust the priorites of any owned priority mutexes: */
1187 	if (pthread->priority_mutex_count > 0) {
1188 		/*
1189 		 * Rescan the mutexes owned by this thread and correct
1190 		 * their priorities to account for this threads change
1191 		 * in priority.  This has the side effect of changing
1192 		 * the threads active priority.
1193 		 */
1194 		mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
1195 	}
1196 
1197 	/*
1198 	 * If this thread is waiting on a priority inheritence mutex,
1199 	 * check for priority adjustments.  A change in priority can
1200 	 * also effect a ceiling violation(*) for a thread waiting on
1201 	 * a priority protection mutex; we don't perform the check here
1202 	 * as it is done in pthread_mutex_unlock.
1203 	 *
1204 	 * (*) It should be noted that a priority change to a thread
1205 	 *     _after_ taking and owning a priority ceiling mutex
1206 	 *     does not affect ownership of that mutex; the ceiling
1207 	 *     priority is only checked before mutex ownership occurs.
1208 	 */
1209 	if (pthread->state == PS_MUTEX_WAIT) {
1210 		/* Lock the mutex structure: */
1211 		_SPINLOCK(&pthread->data.mutex->lock);
1212 
1213 		/*
1214 		 * Check to make sure this thread is still in the same state
1215 		 * (the spinlock above can yield the CPU to another thread):
1216 		 */
1217 		if (pthread->state == PS_MUTEX_WAIT) {
1218 			/*
1219 			 * Remove and reinsert this thread into the list of
1220 			 * waiting threads to preserve decreasing priority
1221 			 * order.
1222 			 */
1223 			mutex_queue_remove(pthread->data.mutex, pthread);
1224 			mutex_queue_enq(pthread->data.mutex, pthread);
1225 
1226 			if (pthread->data.mutex->m_protocol ==
1227 			     PTHREAD_PRIO_INHERIT) {
1228 				/* Adjust priorities: */
1229 				mutex_priority_adjust(pthread->data.mutex);
1230 			}
1231 		}
1232 
1233 		/* Unlock the mutex structure: */
1234 		_SPINUNLOCK(&pthread->data.mutex->lock);
1235 	}
1236 }
1237 
1238 /*
1239  * Called when a new thread is added to the mutex waiting queue or
1240  * when a threads priority changes that is already in the mutex
1241  * waiting queue.
1242  */
1243 static void
1244 mutex_priority_adjust(pthread_mutex_t mutex)
1245 {
1246 	pthread_t	pthread_next, pthread = mutex->m_owner;
1247 	int		temp_prio;
1248 	pthread_mutex_t	m = mutex;
1249 
1250 	/*
1251 	 * Calculate the mutex priority as the maximum of the highest
1252 	 * active priority of any waiting threads and the owning threads
1253 	 * active priority(*).
1254 	 *
1255 	 * (*) Because the owning threads current active priority may
1256 	 *     reflect priority inherited from this mutex (and the mutex
1257 	 *     priority may have changed) we must recalculate the active
1258 	 *     priority based on the threads saved inherited priority
1259 	 *     and its base priority.
1260 	 */
1261 	pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1262 	temp_prio = MAX(pthread_next->active_priority,
1263 	    MAX(m->m_saved_prio, pthread->base_priority));
1264 
1265 	/* See if this mutex really needs adjusting: */
1266 	if (temp_prio == m->m_prio)
1267 		/* No need to propagate the priority: */
1268 		return;
1269 
1270 	/* Set new priority of the mutex: */
1271 	m->m_prio = temp_prio;
1272 
1273 	while (m != NULL) {
1274 		/*
1275 		 * Save the threads priority before rescanning the
1276 		 * owned mutexes:
1277 		 */
1278 		temp_prio = pthread->active_priority;
1279 
1280 		/*
1281 		 * Fix the priorities for all the mutexes this thread has
1282 		 * locked since taking this mutex.  This also has a
1283 		 * potential side-effect of changing the threads priority.
1284 		 */
1285 		mutex_rescan_owned(pthread, m);
1286 
1287 		/*
1288 		 * If the thread is currently waiting on a mutex, check
1289 		 * to see if the threads new priority has affected the
1290 		 * priority of the mutex.
1291 		 */
1292 		if ((temp_prio != pthread->active_priority) &&
1293 		    (pthread->state == PS_MUTEX_WAIT) &&
1294 		    (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
1295 			/* Grab the mutex this thread is waiting on: */
1296 			m = pthread->data.mutex;
1297 
1298 			/*
1299 			 * The priority for this thread has changed.  Remove
1300 			 * and reinsert this thread into the list of waiting
1301 			 * threads to preserve decreasing priority order.
1302 			 */
1303 			mutex_queue_remove(m, pthread);
1304 			mutex_queue_enq(m, pthread);
1305 
1306 			/* Grab the waiting thread with highest priority: */
1307 			pthread_next = TAILQ_FIRST(&m->m_queue);
1308 
1309 			/*
1310 			 * Calculate the mutex priority as the maximum of the
1311 			 * highest active priority of any waiting threads and
1312 			 * the owning threads active priority.
1313 			 */
1314 			temp_prio = MAX(pthread_next->active_priority,
1315 			    MAX(m->m_saved_prio, m->m_owner->base_priority));
1316 
1317 			if (temp_prio != m->m_prio) {
1318 				/*
1319 				 * The priority needs to be propagated to the
1320 				 * mutex this thread is waiting on and up to
1321 				 * the owner of that mutex.
1322 				 */
1323 				m->m_prio = temp_prio;
1324 				pthread = m->m_owner;
1325 			}
1326 			else
1327 				/* We're done: */
1328 				m = NULL;
1329 
1330 		}
1331 		else
1332 			/* We're done: */
1333 			m = NULL;
1334 	}
1335 }
1336 
1337 static void
1338 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
1339 {
1340 	int		active_prio, inherited_prio;
1341 	pthread_mutex_t	m;
1342 	pthread_t	pthread_next;
1343 
1344 	/*
1345 	 * Start walking the mutexes the thread has taken since
1346 	 * taking this mutex.
1347 	 */
1348 	if (mutex == NULL) {
1349 		/*
1350 		 * A null mutex means start at the beginning of the owned
1351 		 * mutex list.
1352 		 */
1353 		m = TAILQ_FIRST(&pthread->mutexq);
1354 
1355 		/* There is no inherited priority yet. */
1356 		inherited_prio = 0;
1357 	}
1358 	else {
1359 		/*
1360 		 * The caller wants to start after a specific mutex.  It
1361 		 * is assumed that this mutex is a priority inheritence
1362 		 * mutex and that its priority has been correctly
1363 		 * calculated.
1364 		 */
1365 		m = TAILQ_NEXT(mutex, m_qe);
1366 
1367 		/* Start inheriting priority from the specified mutex. */
1368 		inherited_prio = mutex->m_prio;
1369 	}
1370 	active_prio = MAX(inherited_prio, pthread->base_priority);
1371 
1372 	while (m != NULL) {
1373 		/*
1374 		 * We only want to deal with priority inheritence
1375 		 * mutexes.  This might be optimized by only placing
1376 		 * priority inheritence mutexes into the owned mutex
1377 		 * list, but it may prove to be useful having all
1378 		 * owned mutexes in this list.  Consider a thread
1379 		 * exiting while holding mutexes...
1380 		 */
1381 		if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1382 			/*
1383 			 * Fix the owners saved (inherited) priority to
1384 			 * reflect the priority of the previous mutex.
1385 			 */
1386 			m->m_saved_prio = inherited_prio;
1387 
1388 			if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1389 				/* Recalculate the priority of the mutex: */
1390 				m->m_prio = MAX(active_prio,
1391 				     pthread_next->active_priority);
1392 			else
1393 				m->m_prio = active_prio;
1394 
1395 			/* Recalculate new inherited and active priorities: */
1396 			inherited_prio = m->m_prio;
1397 			active_prio = MAX(m->m_prio, pthread->base_priority);
1398 		}
1399 
1400 		/* Advance to the next mutex owned by this thread: */
1401 		m = TAILQ_NEXT(m, m_qe);
1402 	}
1403 
1404 	/*
1405 	 * Fix the threads inherited priority and recalculate its
1406 	 * active priority.
1407 	 */
1408 	pthread->inherited_priority = inherited_prio;
1409 	active_prio = MAX(inherited_prio, pthread->base_priority);
1410 
1411 	if (active_prio != pthread->active_priority) {
1412 #if 0
1413 		/*
1414 		 * If this thread is in the priority queue, it must be
1415 		 * removed and reinserted for its new priority.
1416 	 	 */
1417 		if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
1418 			/*
1419 			 * Remove the thread from the priority queue
1420 			 * before changing its priority:
1421 			 */
1422 			PTHREAD_PRIOQ_REMOVE(pthread);
1423 
1424 			/*
1425 			 * POSIX states that if the priority is being
1426 			 * lowered, the thread must be inserted at the
1427 			 * head of the queue for its priority if it owns
1428 			 * any priority protection or inheritence mutexes.
1429 			 */
1430 			if ((active_prio < pthread->active_priority) &&
1431 			    (pthread->priority_mutex_count > 0)) {
1432 				/* Set the new active priority. */
1433 				pthread->active_priority = active_prio;
1434 
1435 				PTHREAD_PRIOQ_INSERT_HEAD(pthread);
1436 			}
1437 			else {
1438 				/* Set the new active priority. */
1439 				pthread->active_priority = active_prio;
1440 
1441 				PTHREAD_PRIOQ_INSERT_TAIL(pthread);
1442 			}
1443 		}
1444 		else {
1445 			/* Set the new active priority. */
1446 			pthread->active_priority = active_prio;
1447 		}
1448 #endif
1449 		pthread->active_priority = active_prio;
1450 	}
1451 }
1452 
1453 void
1454 _mutex_unlock_private(pthread_t pthread)
1455 {
1456 	struct pthread_mutex	*m, *m_next;
1457 
1458 	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1459 		m_next = TAILQ_NEXT(m, m_qe);
1460 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1461 			_pthread_mutex_unlock(&m);
1462 	}
1463 }
1464 
1465 void
1466 _mutex_lock_backout(pthread_t pthread)
1467 {
1468 	struct pthread_mutex	*mutex;
1469 
1470 	/*
1471 	 * Defer signals to protect the scheduling queues from
1472 	 * access by the signal handler:
1473 	 */
1474 	/* _thread_kern_sig_defer();*/
1475 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1476 		mutex = pthread->data.mutex;
1477 
1478 		/* Lock the mutex structure: */
1479 		_SPINLOCK(&mutex->lock);
1480 
1481 		mutex_queue_remove(mutex, pthread);
1482 
1483 		/* This thread is no longer waiting for the mutex: */
1484 		pthread->data.mutex = NULL;
1485 
1486 		/* Unlock the mutex structure: */
1487 		_SPINUNLOCK(&mutex->lock);
1488 
1489 	}
1490 	/*
1491 	 * Undefer and handle pending signals, yielding if
1492 	 * necessary:
1493 	 */
1494 	/* _thread_kern_sig_undefer(); */
1495 }
1496 
1497 /*
1498  * Dequeue a waiting thread from the head of a mutex queue in descending
1499  * priority order.
1500  */
1501 static inline pthread_t
1502 mutex_queue_deq(pthread_mutex_t mutex)
1503 {
1504 	pthread_t pthread;
1505 
1506 	while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1507 		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1508 		pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1509 
1510 		/*
1511 		 * Only exit the loop if the thread hasn't been
1512 		 * cancelled.
1513 		 */
1514 		if ((pthread->cancelflags & PTHREAD_CANCELLING) == 0 &&
1515 		    pthread->state == PS_MUTEX_WAIT)
1516 			break;
1517 	}
1518 
1519 	return (pthread);
1520 }
1521 
1522 /*
1523  * Remove a waiting thread from a mutex queue in descending priority order.
1524  */
1525 static inline void
1526 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1527 {
1528 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1529 		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1530 		pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1531 	}
1532 }
1533 
1534 /*
1535  * Enqueue a waiting thread to a queue in descending priority order.
1536  */
1537 static inline void
1538 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1539 {
1540 	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1541 
1542 	PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
1543 	/*
1544 	 * For the common case of all threads having equal priority,
1545 	 * we perform a quick check against the priority of the thread
1546 	 * at the tail of the queue.
1547 	 */
1548 	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1549 		TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1550 	else {
1551 		tid = TAILQ_FIRST(&mutex->m_queue);
1552 		while (pthread->active_priority <= tid->active_priority)
1553 			tid = TAILQ_NEXT(tid, sqe);
1554 		TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1555 	}
1556 	pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
1557 }
1558 
1559