xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision 262e143bd46171a6415a5b28af260a5efa2a3db8)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by John Birrell.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 
35 #include <stdlib.h>
36 #include <errno.h>
37 #include <string.h>
38 #include <sys/param.h>
39 #include <sys/queue.h>
40 #include <pthread.h>
41 #include "thr_private.h"
42 
43 #if defined(_PTHREADS_INVARIANTS)
44 #define MUTEX_INIT_LINK(m) 		do {		\
45 	(m)->m_qe.tqe_prev = NULL;			\
46 	(m)->m_qe.tqe_next = NULL;			\
47 } while (0)
48 #define MUTEX_ASSERT_IS_OWNED(m)	do {		\
49 	if ((m)->m_qe.tqe_prev == NULL)			\
50 		PANIC("mutex is not on list");		\
51 } while (0)
52 #define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
53 	if (((m)->m_qe.tqe_prev != NULL) ||		\
54 	    ((m)->m_qe.tqe_next != NULL))		\
55 		PANIC("mutex is on list");		\
56 } while (0)
57 #define	THR_ASSERT_NOT_IN_SYNCQ(thr)	do {		\
58 	THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
59 	    "thread in syncq when it shouldn't be.");	\
60 } while (0);
61 #else
62 #define MUTEX_INIT_LINK(m)
63 #define MUTEX_ASSERT_IS_OWNED(m)
64 #define MUTEX_ASSERT_NOT_OWNED(m)
65 #define	THR_ASSERT_NOT_IN_SYNCQ(thr)
66 #endif
67 
68 #define THR_IN_MUTEXQ(thr)	(((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
69 #define	MUTEX_DESTROY(m) do {		\
70 	free(m);			\
71 } while (0)
72 
73 
74 /*
75  * Prototypes
76  */
77 static long		mutex_handoff(struct pthread *, struct pthread_mutex *);
78 static int		mutex_self_trylock(struct pthread *, pthread_mutex_t);
79 static int		mutex_self_lock(struct pthread *, pthread_mutex_t,
80 				const struct timespec *abstime);
81 static int		mutex_unlock_common(pthread_mutex_t *, int);
82 static void		mutex_priority_adjust(struct pthread *, pthread_mutex_t);
83 static void		mutex_rescan_owned (struct pthread *, struct pthread *,
84 			    struct pthread_mutex *);
85 #if 0
86 static pthread_t	mutex_queue_deq(pthread_mutex_t);
87 #endif
88 static void		mutex_queue_remove(pthread_mutex_t, pthread_t);
89 static void		mutex_queue_enq(pthread_mutex_t, pthread_t);
90 
91 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
92 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
93 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
94 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
95 
96 /* Single underscore versions provided for libc internal usage: */
97 /* No difference between libc and application usage of these: */
98 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
99 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
100 
101 static int
102 mutex_init(pthread_mutex_t *mutex,
103     const pthread_mutexattr_t *mutex_attr, int private)
104 {
105 	struct pthread_mutex *pmutex;
106 	enum pthread_mutextype type;
107 	int		protocol;
108 	int		ceiling;
109 	int		flags;
110 	int		ret = 0;
111 
112 	/* Check if default mutex attributes: */
113 	if (mutex_attr == NULL || *mutex_attr == NULL) {
114 		/* Default to a (error checking) POSIX mutex: */
115 		type = PTHREAD_MUTEX_ERRORCHECK;
116 		protocol = PTHREAD_PRIO_NONE;
117 		ceiling = THR_MAX_PRIORITY;
118 		flags = 0;
119 	}
120 
121 	/* Check mutex type: */
122 	else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
123 	    ((*mutex_attr)->m_type >= PTHREAD_MUTEX_TYPE_MAX))
124 		/* Return an invalid argument error: */
125 		ret = EINVAL;
126 
127 	/* Check mutex protocol: */
128 	else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
129 	    ((*mutex_attr)->m_protocol > PTHREAD_PRIO_PROTECT))
130 		/* Return an invalid argument error: */
131 		ret = EINVAL;
132 
133 	else {
134 		/* Use the requested mutex type and protocol: */
135 		type = (*mutex_attr)->m_type;
136 		protocol = (*mutex_attr)->m_protocol;
137 		ceiling = (*mutex_attr)->m_ceiling;
138 		flags = (*mutex_attr)->m_flags;
139 	}
140 
141 	/* Check no errors so far: */
142 	if (ret == 0) {
143 		if ((pmutex = (pthread_mutex_t)
144 		    malloc(sizeof(struct pthread_mutex))) == NULL) {
145 			ret = ENOMEM;
146 		} else {
147 			_thr_umtx_init(&pmutex->m_lock);
148 			/* Set the mutex flags: */
149 			pmutex->m_flags = flags;
150 
151 			/* Process according to mutex type: */
152 			switch (type) {
153 			/* case PTHREAD_MUTEX_DEFAULT: */
154 			case PTHREAD_MUTEX_ERRORCHECK:
155 			case PTHREAD_MUTEX_NORMAL:
156 				/* Nothing to do here. */
157 				break;
158 
159 			/* Single UNIX Spec 2 recursive mutex: */
160 			case PTHREAD_MUTEX_RECURSIVE:
161 				/* Reset the mutex count: */
162 				pmutex->m_count = 0;
163 				break;
164 
165 			/* Trap invalid mutex types: */
166 			default:
167 				/* Return an invalid argument error: */
168 				ret = EINVAL;
169 				break;
170 			}
171 			if (ret == 0) {
172 				/* Initialise the rest of the mutex: */
173 				TAILQ_INIT(&pmutex->m_queue);
174 				pmutex->m_flags |= MUTEX_FLAGS_INITED;
175 				if (private)
176 					pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
177 				pmutex->m_owner = NULL;
178 				pmutex->m_type = type;
179 				pmutex->m_protocol = protocol;
180 				pmutex->m_refcount = 0;
181 				if (protocol == PTHREAD_PRIO_PROTECT)
182 					pmutex->m_prio = ceiling;
183 				else
184 					pmutex->m_prio = -1;
185 				pmutex->m_saved_prio = 0;
186 				MUTEX_INIT_LINK(pmutex);
187 				*mutex = pmutex;
188 			} else {
189 				/* Free the mutex lock structure: */
190 				MUTEX_DESTROY(pmutex);
191 				*mutex = NULL;
192 			}
193 		}
194 	}
195 	/* Return the completion status: */
196 	return (ret);
197 }
198 
199 static int
200 init_static(struct pthread *thread, pthread_mutex_t *mutex)
201 {
202 	int ret;
203 
204 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
205 
206 	if (*mutex == NULL)
207 		ret = mutex_init(mutex, NULL, 0);
208 	else
209 		ret = 0;
210 
211 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
212 
213 	return (ret);
214 }
215 
216 static int
217 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
218 {
219 	int ret;
220 
221 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
222 
223 	if (*mutex == NULL)
224 		ret = mutex_init(mutex, NULL, 1);
225 	else
226 		ret = 0;
227 
228 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
229 
230 	return (ret);
231 }
232 
233 int
234 _pthread_mutex_init(pthread_mutex_t *mutex,
235     const pthread_mutexattr_t *mutex_attr)
236 {
237 	return mutex_init(mutex, mutex_attr, 1);
238 }
239 
240 int
241 __pthread_mutex_init(pthread_mutex_t *mutex,
242     const pthread_mutexattr_t *mutex_attr)
243 {
244 	return mutex_init(mutex, mutex_attr, 0);
245 }
246 
247 int
248 _mutex_reinit(pthread_mutex_t *mutex)
249 {
250 	_thr_umtx_init(&(*mutex)->m_lock);
251 	TAILQ_INIT(&(*mutex)->m_queue);
252 	MUTEX_INIT_LINK(*mutex);
253 	(*mutex)->m_owner = NULL;
254 	(*mutex)->m_count = 0;
255 	(*mutex)->m_refcount = 0;
256 	(*mutex)->m_prio = 0;
257 	(*mutex)->m_saved_prio = 0;
258 	return (0);
259 }
260 
261 void
262 _mutex_fork(struct pthread *curthread)
263 {
264 	TAILQ_INIT(&curthread->mutexq);
265 	TAILQ_INIT(&curthread->pri_mutexq);
266 	curthread->priority_mutex_count = 0;
267 #if 0
268 	struct pthread_mutex *m;
269 
270 	TAILQ_FOREACH(m, &curthread->mutexq, m_qe) {
271 		m->m_lock = (umtx_t)curthread->tid;
272 	}
273 
274 	/* Clear contender for priority mutexes */
275 	TAILQ_FOREACH(m, &curthread->pri_mutexq, m_qe) {
276 		/* clear another thread locked us */
277 		_thr_umtx_init(&m->m_lock);
278 		TAILQ_INIT(&m->m_queue);
279 	}
280 #endif
281 }
282 
283 int
284 _pthread_mutex_destroy(pthread_mutex_t *mutex)
285 {
286 	struct pthread *curthread = _get_curthread();
287 	pthread_mutex_t m;
288 	int ret = 0;
289 
290 	if (mutex == NULL || *mutex == NULL)
291 		ret = EINVAL;
292 	else {
293 		/*
294 		 * Try to lock the mutex structure, we only need to
295 		 * try once, if failed, the mutex is in used.
296 		 */
297 		ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
298 		if (ret)
299 			return (ret);
300 
301 		/*
302 		 * Check mutex other fields to see if this mutex is
303 		 * in use. Mostly for prority mutex types, or there
304 		 * are condition variables referencing it.
305 		 */
306 		if (((*mutex)->m_owner != NULL) ||
307 		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
308 		    ((*mutex)->m_refcount != 0)) {
309 			THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock);
310 			ret = EBUSY;
311 		} else {
312 			/*
313 			 * Save a pointer to the mutex so it can be free'd
314 			 * and set the caller's pointer to NULL:
315 			 */
316 			m = *mutex;
317 			*mutex = NULL;
318 
319 			/* Unlock the mutex structure: */
320 			_thr_umtx_unlock(&m->m_lock, curthread->tid);
321 
322 			/*
323 			 * Free the memory allocated for the mutex
324 			 * structure:
325 			 */
326 			MUTEX_ASSERT_NOT_OWNED(m);
327 			MUTEX_DESTROY(m);
328 		}
329 	}
330 
331 	/* Return the completion status: */
332 	return (ret);
333 }
334 
335 static int
336 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
337 {
338 	int ret = 0;
339 
340 	THR_ASSERT((mutex != NULL) && (*mutex != NULL),
341 	    "Uninitialized mutex in mutex_trylock_common");
342 
343 	/* Short cut for simple mutex. */
344 	if ((*mutex)->m_protocol == PTHREAD_PRIO_NONE) {
345 		ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
346 		if (ret == 0) {
347 			(*mutex)->m_owner = curthread;
348 			/* Add to the list of owned mutexes: */
349 			MUTEX_ASSERT_NOT_OWNED(*mutex);
350 			TAILQ_INSERT_TAIL(&curthread->mutexq,
351 			    (*mutex), m_qe);
352 		} else if ((*mutex)->m_owner == curthread) {
353 			ret = mutex_self_trylock(curthread, *mutex);
354 		} /* else {} */
355 
356 		return (ret);
357 	}
358 
359 	/* Code for priority mutex */
360 
361 	/* Lock the mutex structure: */
362 	THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
363 
364 	/*
365 	 * If the mutex was statically allocated, properly
366 	 * initialize the tail queue.
367 	 */
368 	if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
369 		TAILQ_INIT(&(*mutex)->m_queue);
370 		MUTEX_INIT_LINK(*mutex);
371 		(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
372 	}
373 
374 	/* Process according to mutex type: */
375 	switch ((*mutex)->m_protocol) {
376 	/* POSIX priority inheritence mutex: */
377 	case PTHREAD_PRIO_INHERIT:
378 		/* Check if this mutex is not locked: */
379 		if ((*mutex)->m_owner == NULL) {
380 			/* Lock the mutex for the running thread: */
381 			(*mutex)->m_owner = curthread;
382 
383 			THR_LOCK(curthread);
384 			/* Track number of priority mutexes owned: */
385 			curthread->priority_mutex_count++;
386 
387 			/*
388 			 * The mutex takes on the attributes of the
389 			 * running thread when there are no waiters.
390 			 */
391 			(*mutex)->m_prio = curthread->active_priority;
392 			(*mutex)->m_saved_prio =
393 			    curthread->inherited_priority;
394 			curthread->inherited_priority = (*mutex)->m_prio;
395 			THR_UNLOCK(curthread);
396 
397 			/* Add to the list of owned mutexes: */
398 			MUTEX_ASSERT_NOT_OWNED(*mutex);
399 			TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
400 			    (*mutex), m_qe);
401 		} else if ((*mutex)->m_owner == curthread)
402 			ret = mutex_self_trylock(curthread, *mutex);
403 		else
404 			/* Return a busy error: */
405 			ret = EBUSY;
406 		break;
407 
408 	/* POSIX priority protection mutex: */
409 	case PTHREAD_PRIO_PROTECT:
410 		/* Check for a priority ceiling violation: */
411 		if (curthread->active_priority > (*mutex)->m_prio)
412 			ret = EINVAL;
413 
414 		/* Check if this mutex is not locked: */
415 		else if ((*mutex)->m_owner == NULL) {
416 			/* Lock the mutex for the running thread: */
417 			(*mutex)->m_owner = curthread;
418 
419 			THR_LOCK(curthread);
420 			/* Track number of priority mutexes owned: */
421 			curthread->priority_mutex_count++;
422 
423 			/*
424 			 * The running thread inherits the ceiling
425 			 * priority of the mutex and executes at that
426 			 * priority.
427 			 */
428 			curthread->active_priority = (*mutex)->m_prio;
429 			(*mutex)->m_saved_prio =
430 			    curthread->inherited_priority;
431 			curthread->inherited_priority =
432 			    (*mutex)->m_prio;
433 			THR_UNLOCK(curthread);
434 			/* Add to the list of owned mutexes: */
435 			MUTEX_ASSERT_NOT_OWNED(*mutex);
436 			TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
437 			    (*mutex), m_qe);
438 		} else if ((*mutex)->m_owner == curthread)
439 			ret = mutex_self_trylock(curthread, *mutex);
440 		else
441 			/* Return a busy error: */
442 			ret = EBUSY;
443 		break;
444 
445 	/* Trap invalid mutex types: */
446 	default:
447 		/* Return an invalid argument error: */
448 		ret = EINVAL;
449 		break;
450 	}
451 
452 	/* Unlock the mutex structure: */
453 	THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
454 
455 	/* Return the completion status: */
456 	return (ret);
457 }
458 
459 int
460 __pthread_mutex_trylock(pthread_mutex_t *mutex)
461 {
462 	struct pthread *curthread = _get_curthread();
463 	int ret = 0;
464 
465 	/*
466 	 * If the mutex is statically initialized, perform the dynamic
467 	 * initialization:
468 	 */
469 	if ((*mutex != NULL) ||
470 	    ((ret = init_static(curthread, mutex)) == 0))
471 		ret = mutex_trylock_common(curthread, mutex);
472 
473 	return (ret);
474 }
475 
476 int
477 _pthread_mutex_trylock(pthread_mutex_t *mutex)
478 {
479 	struct pthread	*curthread = _get_curthread();
480 	int	ret = 0;
481 
482 	/*
483 	 * If the mutex is statically initialized, perform the dynamic
484 	 * initialization marking the mutex private (delete safe):
485 	 */
486 	if ((*mutex != NULL) ||
487 	    ((ret = init_static_private(curthread, mutex)) == 0))
488 		ret = mutex_trylock_common(curthread, mutex);
489 
490 	return (ret);
491 }
492 
493 static int
494 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
495 	const struct timespec * abstime)
496 {
497 	struct  timespec ts, ts2;
498 	long	cycle;
499 	int	ret = 0;
500 
501 	THR_ASSERT((m != NULL) && (*m != NULL),
502 	    "Uninitialized mutex in mutex_lock_common");
503 
504 	if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
505 	    abstime->tv_nsec >= 1000000000))
506 		return (EINVAL);
507 
508 	/* Short cut for simple mutex. */
509 
510 	if ((*m)->m_protocol == PTHREAD_PRIO_NONE) {
511 		/* Default POSIX mutex: */
512 		ret = THR_UMTX_TRYLOCK(curthread, &(*m)->m_lock);
513 		if (ret == 0) {
514 			(*m)->m_owner = curthread;
515 			/* Add to the list of owned mutexes: */
516 			MUTEX_ASSERT_NOT_OWNED(*m);
517 			TAILQ_INSERT_TAIL(&curthread->mutexq,
518 			    (*m), m_qe);
519 		} else if ((*m)->m_owner == curthread) {
520 			ret = mutex_self_lock(curthread, *m, abstime);
521 		} else {
522 			if (abstime == NULL) {
523 				THR_UMTX_LOCK(curthread, &(*m)->m_lock);
524 				ret = 0;
525 			} else {
526 				clock_gettime(CLOCK_REALTIME, &ts);
527 				TIMESPEC_SUB(&ts2, abstime, &ts);
528 				ret = THR_UMTX_TIMEDLOCK(curthread,
529 					&(*m)->m_lock, &ts2);
530 				/*
531 				 * Timed out wait is not restarted if
532 				 * it was interrupted, not worth to do it.
533 				 */
534 				if (ret == EINTR)
535 					ret = ETIMEDOUT;
536 			}
537 			if (ret == 0) {
538 				(*m)->m_owner = curthread;
539 				/* Add to the list of owned mutexes: */
540 				MUTEX_ASSERT_NOT_OWNED(*m);
541 				TAILQ_INSERT_TAIL(&curthread->mutexq,
542 				    (*m), m_qe);
543 			}
544 		}
545 		return (ret);
546 	}
547 
548 	/* Code for priority mutex */
549 
550 	/*
551 	 * Enter a loop waiting to become the mutex owner.  We need a
552 	 * loop in case the waiting thread is interrupted by a signal
553 	 * to execute a signal handler.  It is not (currently) possible
554 	 * to remain in the waiting queue while running a handler.
555 	 * Instead, the thread is interrupted and backed out of the
556 	 * waiting queue prior to executing the signal handler.
557 	 */
558 	do {
559 		/* Lock the mutex structure: */
560 		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
561 
562 		/*
563 		 * If the mutex was statically allocated, properly
564 		 * initialize the tail queue.
565 		 */
566 		if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
567 			TAILQ_INIT(&(*m)->m_queue);
568 			(*m)->m_flags |= MUTEX_FLAGS_INITED;
569 			MUTEX_INIT_LINK(*m);
570 		}
571 
572 		/* Process according to mutex type: */
573 		switch ((*m)->m_protocol) {
574 		/* POSIX priority inheritence mutex: */
575 		case PTHREAD_PRIO_INHERIT:
576 			/* Check if this mutex is not locked: */
577 			if ((*m)->m_owner == NULL) {
578 				/* Lock the mutex for this thread: */
579 				(*m)->m_owner = curthread;
580 
581 				THR_LOCK(curthread);
582 				/* Track number of priority mutexes owned: */
583 				curthread->priority_mutex_count++;
584 
585 				/*
586 				 * The mutex takes on attributes of the
587 				 * running thread when there are no waiters.
588 				 * Make sure the thread's scheduling lock is
589 				 * held while priorities are adjusted.
590 				 */
591 				(*m)->m_prio = curthread->active_priority;
592 				(*m)->m_saved_prio =
593 				    curthread->inherited_priority;
594 				curthread->inherited_priority = (*m)->m_prio;
595 				THR_UNLOCK(curthread);
596 
597 				/* Add to the list of owned mutexes: */
598 				MUTEX_ASSERT_NOT_OWNED(*m);
599 				TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
600 				    (*m), m_qe);
601 
602 				/* Unlock the mutex structure: */
603 				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
604 			} else if ((*m)->m_owner == curthread) {
605 				ret = mutex_self_lock(curthread, *m, abstime);
606 
607 				/* Unlock the mutex structure: */
608 				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
609 			} else {
610 				/*
611 				 * Join the queue of threads waiting to lock
612 				 * the mutex and save a pointer to the mutex.
613 				 */
614 				mutex_queue_enq(*m, curthread);
615 				curthread->data.mutex = *m;
616 
617 				if (curthread->active_priority > (*m)->m_prio)
618 					/* Adjust priorities: */
619 					mutex_priority_adjust(curthread, *m);
620 
621 				THR_LOCK(curthread);
622 				cycle = curthread->cycle;
623 				THR_UNLOCK(curthread);
624 
625 				/* Unlock the mutex structure: */
626 				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
627 
628 				clock_gettime(CLOCK_REALTIME, &ts);
629 				TIMESPEC_SUB(&ts2, abstime, &ts);
630 				ret = _thr_umtx_wait(&curthread->cycle, cycle,
631 					 &ts2);
632 				if (ret == EINTR)
633 					ret = 0;
634 
635 				if (THR_IN_MUTEXQ(curthread)) {
636 					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
637 					mutex_queue_remove(*m, curthread);
638 					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
639 				}
640 				/*
641 				 * Only clear these after assuring the
642 				 * thread is dequeued.
643 				 */
644 				curthread->data.mutex = NULL;
645 			}
646 			break;
647 
648 		/* POSIX priority protection mutex: */
649 		case PTHREAD_PRIO_PROTECT:
650 			/* Check for a priority ceiling violation: */
651 			if (curthread->active_priority > (*m)->m_prio) {
652 				/* Unlock the mutex structure: */
653 				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
654 				ret = EINVAL;
655 			}
656 			/* Check if this mutex is not locked: */
657 			else if ((*m)->m_owner == NULL) {
658 				/*
659 				 * Lock the mutex for the running
660 				 * thread:
661 				 */
662 				(*m)->m_owner = curthread;
663 
664 				THR_LOCK(curthread);
665 				/* Track number of priority mutexes owned: */
666 				curthread->priority_mutex_count++;
667 
668 				/*
669 				 * The running thread inherits the ceiling
670 				 * priority of the mutex and executes at that
671 				 * priority.  Make sure the thread's
672 				 * scheduling lock is held while priorities
673 				 * are adjusted.
674 				 */
675 				curthread->active_priority = (*m)->m_prio;
676 				(*m)->m_saved_prio =
677 				    curthread->inherited_priority;
678 				curthread->inherited_priority = (*m)->m_prio;
679 				THR_UNLOCK(curthread);
680 
681 				/* Add to the list of owned mutexes: */
682 				MUTEX_ASSERT_NOT_OWNED(*m);
683 				TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
684 				    (*m), m_qe);
685 
686 				/* Unlock the mutex structure: */
687 				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
688 			} else if ((*m)->m_owner == curthread) {
689 				ret = mutex_self_lock(curthread, *m, abstime);
690 
691 				/* Unlock the mutex structure: */
692 				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
693 			} else {
694 				/*
695 				 * Join the queue of threads waiting to lock
696 				 * the mutex and save a pointer to the mutex.
697 				 */
698 				mutex_queue_enq(*m, curthread);
699 				curthread->data.mutex = *m;
700 
701 				/* Clear any previous error: */
702 				curthread->error = 0;
703 
704 				THR_LOCK(curthread);
705 				cycle = curthread->cycle;
706 				THR_UNLOCK(curthread);
707 
708 				/* Unlock the mutex structure: */
709 				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
710 
711 				clock_gettime(CLOCK_REALTIME, &ts);
712 				TIMESPEC_SUB(&ts2, abstime, &ts);
713 				ret = _thr_umtx_wait(&curthread->cycle, cycle,
714 					&ts2);
715 				if (ret == EINTR)
716 					ret = 0;
717 
718 				curthread->data.mutex = NULL;
719 				if (THR_IN_MUTEXQ(curthread)) {
720 					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
721 					mutex_queue_remove(*m, curthread);
722 					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
723 				}
724 				/*
725 				 * Only clear these after assuring the
726 				 * thread is dequeued.
727 				 */
728 				curthread->data.mutex = NULL;
729 
730 				/*
731 				 * The threads priority may have changed while
732 				 * waiting for the mutex causing a ceiling
733 				 * violation.
734 				 */
735 				ret = curthread->error;
736 				curthread->error = 0;
737 			}
738 			break;
739 
740 		/* Trap invalid mutex types: */
741 		default:
742 			/* Unlock the mutex structure: */
743 			THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
744 
745 			/* Return an invalid argument error: */
746 			ret = EINVAL;
747 			break;
748 		}
749 
750 	} while (((*m)->m_owner != curthread) && (ret == 0));
751 
752 	/* Return the completion status: */
753 	return (ret);
754 }
755 
756 int
757 __pthread_mutex_lock(pthread_mutex_t *m)
758 {
759 	struct pthread *curthread;
760 	int	ret = 0;
761 
762 	_thr_check_init();
763 
764 	curthread = _get_curthread();
765 
766 	/*
767 	 * If the mutex is statically initialized, perform the dynamic
768 	 * initialization:
769 	 */
770 	if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
771 		ret = mutex_lock_common(curthread, m, NULL);
772 
773 	return (ret);
774 }
775 
776 int
777 _pthread_mutex_lock(pthread_mutex_t *m)
778 {
779 	struct pthread *curthread;
780 	int	ret = 0;
781 
782 	_thr_check_init();
783 
784 	curthread = _get_curthread();
785 
786 	/*
787 	 * If the mutex is statically initialized, perform the dynamic
788 	 * initialization marking it private (delete safe):
789 	 */
790 	if ((*m != NULL) ||
791 	    ((ret = init_static_private(curthread, m)) == 0))
792 		ret = mutex_lock_common(curthread, m, NULL);
793 
794 	return (ret);
795 }
796 
797 int
798 __pthread_mutex_timedlock(pthread_mutex_t *m,
799 	const struct timespec *abs_timeout)
800 {
801 	struct pthread *curthread;
802 	int	ret = 0;
803 
804 	_thr_check_init();
805 
806 	curthread = _get_curthread();
807 
808 	/*
809 	 * If the mutex is statically initialized, perform the dynamic
810 	 * initialization:
811 	 */
812 	if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
813 		ret = mutex_lock_common(curthread, m, abs_timeout);
814 
815 	return (ret);
816 }
817 
818 int
819 _pthread_mutex_timedlock(pthread_mutex_t *m,
820 	const struct timespec *abs_timeout)
821 {
822 	struct pthread *curthread;
823 	int	ret = 0;
824 
825 	_thr_check_init();
826 
827 	curthread = _get_curthread();
828 
829 	/*
830 	 * If the mutex is statically initialized, perform the dynamic
831 	 * initialization marking it private (delete safe):
832 	 */
833 	if ((*m != NULL) ||
834 	    ((ret = init_static_private(curthread, m)) == 0))
835 		ret = mutex_lock_common(curthread, m, abs_timeout);
836 
837 	return (ret);
838 }
839 
840 int
841 _pthread_mutex_unlock(pthread_mutex_t *m)
842 {
843 	return (mutex_unlock_common(m, /* add reference */ 0));
844 }
845 
846 int
847 _mutex_cv_unlock(pthread_mutex_t *m)
848 {
849 	return (mutex_unlock_common(m, /* add reference */ 1));
850 }
851 
852 int
853 _mutex_cv_lock(pthread_mutex_t *m)
854 {
855 	struct  pthread *curthread;
856 	int	ret;
857 
858 	curthread = _get_curthread();
859 	if ((ret = _pthread_mutex_lock(m)) == 0)
860 		(*m)->m_refcount--;
861 	return (ret);
862 }
863 
864 static int
865 mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
866 {
867 	int	ret;
868 
869 	switch (m->m_type) {
870 	/* case PTHREAD_MUTEX_DEFAULT: */
871 	case PTHREAD_MUTEX_ERRORCHECK:
872 	case PTHREAD_MUTEX_NORMAL:
873 		ret = EBUSY;
874 		break;
875 
876 	case PTHREAD_MUTEX_RECURSIVE:
877 		/* Increment the lock count: */
878 		if (m->m_count + 1 > 0) {
879 			m->m_count++;
880 			ret = 0;
881 		} else
882 			ret = EAGAIN;
883 		break;
884 
885 	default:
886 		/* Trap invalid mutex types; */
887 		ret = EINVAL;
888 	}
889 
890 	return (ret);
891 }
892 
893 static int
894 mutex_self_lock(struct pthread *curthread, pthread_mutex_t m,
895 	const struct timespec *abstime)
896 {
897 	struct timespec ts1, ts2;
898 	int ret;
899 
900 	switch (m->m_type) {
901 	/* case PTHREAD_MUTEX_DEFAULT: */
902 	case PTHREAD_MUTEX_ERRORCHECK:
903 		if (abstime) {
904 			clock_gettime(CLOCK_REALTIME, &ts1);
905 			TIMESPEC_SUB(&ts2, abstime, &ts1);
906 			__sys_nanosleep(&ts2, NULL);
907 			ret = ETIMEDOUT;
908 		} else {
909 			/*
910 			 * POSIX specifies that mutexes should return
911 			 * EDEADLK if a recursive lock is detected.
912 			 */
913 			ret = EDEADLK;
914 		}
915 		break;
916 
917 	case PTHREAD_MUTEX_NORMAL:
918 		/*
919 		 * What SS2 define as a 'normal' mutex.  Intentionally
920 		 * deadlock on attempts to get a lock you already own.
921 		 */
922 		ret = 0;
923 		if (m->m_protocol != PTHREAD_PRIO_NONE) {
924 			/* Unlock the mutex structure: */
925 			THR_LOCK_RELEASE(curthread, &m->m_lock);
926 		}
927 		if (abstime) {
928 			clock_gettime(CLOCK_REALTIME, &ts1);
929 			TIMESPEC_SUB(&ts2, abstime, &ts1);
930 			__sys_nanosleep(&ts2, NULL);
931 			ret = ETIMEDOUT;
932 		} else {
933 			ts1.tv_sec = 30;
934 			ts1.tv_nsec = 0;
935 			for (;;)
936 				__sys_nanosleep(&ts1, NULL);
937 		}
938 		break;
939 
940 	case PTHREAD_MUTEX_RECURSIVE:
941 		/* Increment the lock count: */
942 		if (m->m_count + 1 > 0) {
943 			m->m_count++;
944 			ret = 0;
945 		} else
946 			ret = EAGAIN;
947 		break;
948 
949 	default:
950 		/* Trap invalid mutex types; */
951 		ret = EINVAL;
952 	}
953 
954 	return (ret);
955 }
956 
957 static int
958 mutex_unlock_common(pthread_mutex_t *m, int add_reference)
959 {
960 	struct pthread *curthread = _get_curthread();
961 	long tid = -1;
962 	int ret = 0;
963 
964 	if (m == NULL || *m == NULL)
965 		ret = EINVAL;
966 	else {
967 		/* Short cut for simple mutex. */
968 
969 		if ((*m)->m_protocol == PTHREAD_PRIO_NONE) {
970 			/*
971 			 * Check if the running thread is not the owner of the
972 			 * mutex:
973 			 */
974 			if (__predict_false((*m)->m_owner != curthread)) {
975 				ret = EPERM;
976 			} else if (__predict_false(
977 				  (*m)->m_type == PTHREAD_MUTEX_RECURSIVE &&
978 			          (*m)->m_count > 0)) {
979 				/* Decrement the count: */
980 				(*m)->m_count--;
981 				if (add_reference)
982 					(*m)->m_refcount++;
983 			} else {
984 				/*
985 				 * Clear the count in case this is a recursive
986 				 * mutex.
987 				 */
988 				(*m)->m_count = 0;
989 				(*m)->m_owner = NULL;
990 				/* Remove the mutex from the threads queue. */
991 				MUTEX_ASSERT_IS_OWNED(*m);
992 				TAILQ_REMOVE(&curthread->mutexq, (*m), m_qe);
993 				MUTEX_INIT_LINK(*m);
994 				if (add_reference)
995 					(*m)->m_refcount++;
996 				/*
997 				 * Hand off the mutex to the next waiting
998 				 * thread.
999 				 */
1000 				_thr_umtx_unlock(&(*m)->m_lock, curthread->tid);
1001 			}
1002 			return (ret);
1003 		}
1004 
1005 		/* Code for priority mutex */
1006 
1007 		/* Lock the mutex structure: */
1008 		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
1009 
1010 		/* Process according to mutex type: */
1011 		switch ((*m)->m_protocol) {
1012 		/* POSIX priority inheritence mutex: */
1013 		case PTHREAD_PRIO_INHERIT:
1014 			/*
1015 			 * Check if the running thread is not the owner of the
1016 			 * mutex:
1017 			 */
1018 			if ((*m)->m_owner != curthread)
1019 				ret = EPERM;
1020 			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1021 			    ((*m)->m_count > 0))
1022 				/* Decrement the count: */
1023 				(*m)->m_count--;
1024 			else {
1025 				/*
1026 				 * Clear the count in case this is recursive
1027 				 * mutex.
1028 				 */
1029 				(*m)->m_count = 0;
1030 
1031 				/*
1032 				 * Restore the threads inherited priority and
1033 				 * recompute the active priority (being careful
1034 				 * not to override changes in the threads base
1035 				 * priority subsequent to locking the mutex).
1036 				 */
1037 				THR_LOCK(curthread);
1038 				curthread->inherited_priority =
1039 					(*m)->m_saved_prio;
1040 				curthread->active_priority =
1041 				    MAX(curthread->inherited_priority,
1042 				    curthread->base_priority);
1043 
1044 				/*
1045 				 * This thread now owns one less priority mutex.
1046 				 */
1047 				curthread->priority_mutex_count--;
1048 				THR_UNLOCK(curthread);
1049 
1050 				/* Remove the mutex from the threads queue. */
1051 				MUTEX_ASSERT_IS_OWNED(*m);
1052 				TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq,
1053 				    (*m), m_qe);
1054 				MUTEX_INIT_LINK(*m);
1055 
1056 				/*
1057 				 * Hand off the mutex to the next waiting
1058 				 * thread:
1059 				 */
1060 				tid = mutex_handoff(curthread, *m);
1061 			}
1062 			break;
1063 
1064 		/* POSIX priority ceiling mutex: */
1065 		case PTHREAD_PRIO_PROTECT:
1066 			/*
1067 			 * Check if the running thread is not the owner of the
1068 			 * mutex:
1069 			 */
1070 			if ((*m)->m_owner != curthread)
1071 				ret = EPERM;
1072 			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1073 			    ((*m)->m_count > 0))
1074 				/* Decrement the count: */
1075 				(*m)->m_count--;
1076 			else {
1077 				/*
1078 				 * Clear the count in case this is a recursive
1079 				 * mutex.
1080 				 */
1081 				(*m)->m_count = 0;
1082 
1083 				/*
1084 				 * Restore the threads inherited priority and
1085 				 * recompute the active priority (being careful
1086 				 * not to override changes in the threads base
1087 				 * priority subsequent to locking the mutex).
1088 				 */
1089 				THR_LOCK(curthread);
1090 				curthread->inherited_priority =
1091 					(*m)->m_saved_prio;
1092 				curthread->active_priority =
1093 				    MAX(curthread->inherited_priority,
1094 				    curthread->base_priority);
1095 
1096 				/*
1097 				 * This thread now owns one less priority mutex.
1098 				 */
1099 				curthread->priority_mutex_count--;
1100 				THR_UNLOCK(curthread);
1101 
1102 				/* Remove the mutex from the threads queue. */
1103 				MUTEX_ASSERT_IS_OWNED(*m);
1104 				TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq,
1105 				    (*m), m_qe);
1106 				MUTEX_INIT_LINK(*m);
1107 
1108 				/*
1109 				 * Hand off the mutex to the next waiting
1110 				 * thread:
1111 				 */
1112 				tid = mutex_handoff(curthread, *m);
1113 			}
1114 			break;
1115 
1116 		/* Trap invalid mutex types: */
1117 		default:
1118 			/* Return an invalid argument error: */
1119 			ret = EINVAL;
1120 			break;
1121 		}
1122 
1123 		if ((ret == 0) && (add_reference != 0))
1124 			/* Increment the reference count: */
1125 			(*m)->m_refcount++;
1126 
1127 		/* Unlock the mutex structure: */
1128 		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1129 	}
1130 
1131 	/* Return the completion status: */
1132 	return (ret);
1133 }
1134 
1135 
1136 /*
1137  * This function is called when a change in base priority occurs for
1138  * a thread that is holding or waiting for a priority protection or
1139  * inheritence mutex.  A change in a threads base priority can effect
1140  * changes to active priorities of other threads and to the ordering
1141  * of mutex locking by waiting threads.
1142  *
1143  * This must be called without the target thread's scheduling lock held.
1144  */
1145 void
1146 _mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1147     int propagate_prio)
1148 {
1149 	struct pthread_mutex *m;
1150 
1151 	/* Adjust the priorites of any owned priority mutexes: */
1152 	if (pthread->priority_mutex_count > 0) {
1153 		/*
1154 		 * Rescan the mutexes owned by this thread and correct
1155 		 * their priorities to account for this threads change
1156 		 * in priority.  This has the side effect of changing
1157 		 * the threads active priority.
1158 		 *
1159 		 * Be sure to lock the first mutex in the list of owned
1160 		 * mutexes.  This acts as a barrier against another
1161 		 * simultaneous call to change the threads priority
1162 		 * and from the owning thread releasing the mutex.
1163 		 */
1164 		m = TAILQ_FIRST(&pthread->pri_mutexq);
1165 		if (m != NULL) {
1166 			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1167 			/*
1168 			 * Make sure the thread still owns the lock.
1169 			 */
1170 			if (m == TAILQ_FIRST(&pthread->pri_mutexq))
1171 				mutex_rescan_owned(curthread, pthread,
1172 				    /* rescan all owned */ NULL);
1173 			THR_LOCK_RELEASE(curthread, &m->m_lock);
1174 		}
1175 	}
1176 
1177 	/*
1178 	 * If this thread is waiting on a priority inheritence mutex,
1179 	 * check for priority adjustments.  A change in priority can
1180 	 * also cause a ceiling violation(*) for a thread waiting on
1181 	 * a priority protection mutex; we don't perform the check here
1182 	 * as it is done in pthread_mutex_unlock.
1183 	 *
1184 	 * (*) It should be noted that a priority change to a thread
1185 	 *     _after_ taking and owning a priority ceiling mutex
1186 	 *     does not affect ownership of that mutex; the ceiling
1187 	 *     priority is only checked before mutex ownership occurs.
1188 	 */
1189 	if (propagate_prio != 0) {
1190 		/*
1191 		 * Lock the thread's scheduling queue.  This is a bit
1192 		 * convoluted; the "in synchronization queue flag" can
1193 		 * only be cleared with both the thread's scheduling and
1194 		 * mutex locks held.  The thread's pointer to the wanted
1195 		 * mutex is guaranteed to be valid during this time.
1196 		 */
1197 		THR_THREAD_LOCK(curthread, pthread);
1198 
1199 		if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1200 		    ((m = pthread->data.mutex) == NULL))
1201 			THR_THREAD_UNLOCK(curthread, pthread);
1202 		else {
1203 			/*
1204 			 * This thread is currently waiting on a mutex; unlock
1205 			 * the scheduling queue lock and lock the mutex.  We
1206 			 * can't hold both at the same time because the locking
1207 			 * order could cause a deadlock.
1208 			 */
1209 			THR_THREAD_UNLOCK(curthread, pthread);
1210 			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1211 
1212 			/*
1213 			 * Check to make sure this thread is still in the
1214 			 * same state (the lock above can yield the CPU to
1215 			 * another thread or the thread may be running on
1216 			 * another CPU).
1217 			 */
1218 			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1219 			    (pthread->data.mutex == m)) {
1220 				/*
1221 				 * Remove and reinsert this thread into
1222 				 * the list of waiting threads to preserve
1223 				 * decreasing priority order.
1224 				 */
1225 				mutex_queue_remove(m, pthread);
1226 				mutex_queue_enq(m, pthread);
1227 
1228 				if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1229 					/* Adjust priorities: */
1230 					mutex_priority_adjust(curthread, m);
1231 			}
1232 
1233 			/* Unlock the mutex structure: */
1234 			THR_LOCK_RELEASE(curthread, &m->m_lock);
1235 		}
1236 	}
1237 }
1238 
1239 /*
1240  * Called when a new thread is added to the mutex waiting queue or
1241  * when a threads priority changes that is already in the mutex
1242  * waiting queue.
1243  *
1244  * This must be called with the mutex locked by the current thread.
1245  */
1246 static void
1247 mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1248 {
1249 	pthread_mutex_t	m = mutex;
1250 	struct pthread	*pthread_next, *pthread = mutex->m_owner;
1251 	int		done, temp_prio;
1252 
1253 	/*
1254 	 * Calculate the mutex priority as the maximum of the highest
1255 	 * active priority of any waiting threads and the owning threads
1256 	 * active priority(*).
1257 	 *
1258 	 * (*) Because the owning threads current active priority may
1259 	 *     reflect priority inherited from this mutex (and the mutex
1260 	 *     priority may have changed) we must recalculate the active
1261 	 *     priority based on the threads saved inherited priority
1262 	 *     and its base priority.
1263 	 */
1264 	pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1265 	temp_prio = MAX(pthread_next->active_priority,
1266 	    MAX(m->m_saved_prio, pthread->base_priority));
1267 
1268 	/* See if this mutex really needs adjusting: */
1269 	if (temp_prio == m->m_prio)
1270 		/* No need to propagate the priority: */
1271 		return;
1272 
1273 	/* Set new priority of the mutex: */
1274 	m->m_prio = temp_prio;
1275 
1276 	/*
1277 	 * Don't unlock the mutex passed in as an argument.  It is
1278 	 * expected to be locked and unlocked by the caller.
1279 	 */
1280 	done = 1;
1281 	do {
1282 		/*
1283 		 * Save the threads priority before rescanning the
1284 		 * owned mutexes:
1285 		 */
1286 		temp_prio = pthread->active_priority;
1287 
1288 		/*
1289 		 * Fix the priorities for all mutexes held by the owning
1290 		 * thread since taking this mutex.  This also has a
1291 		 * potential side-effect of changing the threads priority.
1292 		 *
1293 		 * At this point the mutex is locked by the current thread.
1294 		 * The owning thread can't release the mutex until it is
1295 		 * unlocked, so we should be able to safely walk its list
1296 		 * of owned mutexes.
1297 		 */
1298 		mutex_rescan_owned(curthread, pthread, m);
1299 
1300 		/*
1301 		 * If this isn't the first time through the loop,
1302 		 * the current mutex needs to be unlocked.
1303 		 */
1304 		if (done == 0)
1305 			THR_LOCK_RELEASE(curthread, &m->m_lock);
1306 
1307 		/* Assume we're done unless told otherwise: */
1308 		done = 1;
1309 
1310 		/*
1311 		 * If the thread is currently waiting on a mutex, check
1312 		 * to see if the threads new priority has affected the
1313 		 * priority of the mutex.
1314 		 */
1315 		if ((temp_prio != pthread->active_priority) &&
1316 		    ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1317 		    ((m = pthread->data.mutex) != NULL) &&
1318 		    (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1319 			/* Lock the mutex structure: */
1320 			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1321 
1322 			/*
1323 			 * Make sure the thread is still waiting on the
1324 			 * mutex:
1325 			 */
1326 			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1327 			    (m == pthread->data.mutex)) {
1328 				/*
1329 				 * The priority for this thread has changed.
1330 				 * Remove and reinsert this thread into the
1331 				 * list of waiting threads to preserve
1332 				 * decreasing priority order.
1333 				 */
1334 				mutex_queue_remove(m, pthread);
1335 				mutex_queue_enq(m, pthread);
1336 
1337 				/*
1338 				 * Grab the waiting thread with highest
1339 				 * priority:
1340 				 */
1341 				pthread_next = TAILQ_FIRST(&m->m_queue);
1342 
1343 				/*
1344 				 * Calculate the mutex priority as the maximum
1345 				 * of the highest active priority of any
1346 				 * waiting threads and the owning threads
1347 				 * active priority.
1348 				 */
1349 				temp_prio = MAX(pthread_next->active_priority,
1350 				    MAX(m->m_saved_prio,
1351 				    m->m_owner->base_priority));
1352 
1353 				if (temp_prio != m->m_prio) {
1354 					/*
1355 					 * The priority needs to be propagated
1356 					 * to the mutex this thread is waiting
1357 					 * on and up to the owner of that mutex.
1358 					 */
1359 					m->m_prio = temp_prio;
1360 					pthread = m->m_owner;
1361 
1362 					/* We're not done yet: */
1363 					done = 0;
1364 				}
1365 			}
1366 			/* Only release the mutex if we're done: */
1367 			if (done != 0)
1368 				THR_LOCK_RELEASE(curthread, &m->m_lock);
1369 		}
1370 	} while (done == 0);
1371 }
1372 
1373 static void
1374 mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1375     struct pthread_mutex *mutex)
1376 {
1377 	struct pthread_mutex	*m;
1378 	struct pthread		*pthread_next;
1379 	int			active_prio, inherited_prio;
1380 
1381 	/*
1382 	 * Start walking the mutexes the thread has taken since
1383 	 * taking this mutex.
1384 	 */
1385 	if (mutex == NULL) {
1386 		/*
1387 		 * A null mutex means start at the beginning of the owned
1388 		 * mutex list.
1389 		 */
1390 		m = TAILQ_FIRST(&pthread->pri_mutexq);
1391 
1392 		/* There is no inherited priority yet. */
1393 		inherited_prio = 0;
1394 	} else {
1395 		/*
1396 		 * The caller wants to start after a specific mutex.  It
1397 		 * is assumed that this mutex is a priority inheritence
1398 		 * mutex and that its priority has been correctly
1399 		 * calculated.
1400 		 */
1401 		m = TAILQ_NEXT(mutex, m_qe);
1402 
1403 		/* Start inheriting priority from the specified mutex. */
1404 		inherited_prio = mutex->m_prio;
1405 	}
1406 	active_prio = MAX(inherited_prio, pthread->base_priority);
1407 
1408 	for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1409 		/*
1410 		 * We only want to deal with priority inheritence
1411 		 * mutexes.  This might be optimized by only placing
1412 		 * priority inheritence mutexes into the owned mutex
1413 		 * list, but it may prove to be useful having all
1414 		 * owned mutexes in this list.  Consider a thread
1415 		 * exiting while holding mutexes...
1416 		 */
1417 		if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1418 			/*
1419 			 * Fix the owners saved (inherited) priority to
1420 			 * reflect the priority of the previous mutex.
1421 			 */
1422 			m->m_saved_prio = inherited_prio;
1423 
1424 			if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1425 				/* Recalculate the priority of the mutex: */
1426 				m->m_prio = MAX(active_prio,
1427 				     pthread_next->active_priority);
1428 			else
1429 				m->m_prio = active_prio;
1430 
1431 			/* Recalculate new inherited and active priorities: */
1432 			inherited_prio = m->m_prio;
1433 			active_prio = MAX(m->m_prio, pthread->base_priority);
1434 		}
1435 	}
1436 
1437 	/*
1438 	 * Fix the threads inherited priority and recalculate its
1439 	 * active priority.
1440 	 */
1441 	pthread->inherited_priority = inherited_prio;
1442 	active_prio = MAX(inherited_prio, pthread->base_priority);
1443 
1444 	if (active_prio != pthread->active_priority) {
1445 		/* Lock the thread's scheduling queue: */
1446 		THR_THREAD_LOCK(curthread, pthread);
1447 
1448 		/* if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) */
1449 		if (1) {
1450 			/*
1451 			 * This thread is not in a run queue.  Just set
1452 			 * its active priority.
1453 			 */
1454 			pthread->active_priority = active_prio;
1455 		}
1456 		else {
1457 			/*
1458 			 * This thread is in a run queue.  Remove it from
1459 			 * the queue before changing its priority:
1460 			 */
1461 			/* THR_RUNQ_REMOVE(pthread);*/
1462 			/*
1463 			 * POSIX states that if the priority is being
1464 			 * lowered, the thread must be inserted at the
1465 			 * head of the queue for its priority if it owns
1466 			 * any priority protection or inheritence mutexes.
1467 			 */
1468 			if ((active_prio < pthread->active_priority) &&
1469 			    (pthread->priority_mutex_count > 0)) {
1470 				/* Set the new active priority. */
1471 				pthread->active_priority = active_prio;
1472 				/* THR_RUNQ_INSERT_HEAD(pthread); */
1473 			} else {
1474 				/* Set the new active priority. */
1475 				pthread->active_priority = active_prio;
1476 				/* THR_RUNQ_INSERT_TAIL(pthread);*/
1477 			}
1478 		}
1479 		THR_THREAD_UNLOCK(curthread, pthread);
1480 	}
1481 }
1482 
1483 void
1484 _mutex_unlock_private(pthread_t pthread)
1485 {
1486 	struct pthread_mutex	*m, *m_next;
1487 
1488 	for (m = TAILQ_FIRST(&pthread->pri_mutexq); m != NULL; m = m_next) {
1489 		m_next = TAILQ_NEXT(m, m_qe);
1490 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1491 			pthread_mutex_unlock(&m);
1492 	}
1493 }
1494 
1495 /*
1496  * Dequeue a waiting thread from the head of a mutex queue in descending
1497  * priority order.
1498  *
1499  * In order to properly dequeue a thread from the mutex queue and
1500  * make it runnable without the possibility of errant wakeups, it
1501  * is necessary to lock the thread's scheduling queue while also
1502  * holding the mutex lock.
1503  */
1504 static long
1505 mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1506 {
1507 	struct pthread *pthread;
1508 	long tid = -1;
1509 
1510 	/* Keep dequeueing until we find a valid thread: */
1511 	mutex->m_owner = NULL;
1512 	pthread = TAILQ_FIRST(&mutex->m_queue);
1513 	while (pthread != NULL) {
1514 		/* Take the thread's scheduling lock: */
1515 		THR_THREAD_LOCK(curthread, pthread);
1516 
1517 		/* Remove the thread from the mutex queue: */
1518 		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1519 		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1520 
1521 		/*
1522 		 * Only exit the loop if the thread hasn't been
1523 		 * cancelled.
1524 		 */
1525 		switch (mutex->m_protocol) {
1526 		case PTHREAD_PRIO_NONE:
1527 			/*
1528 			 * Assign the new owner and add the mutex to the
1529 			 * thread's list of owned mutexes.
1530 			 */
1531 			mutex->m_owner = pthread;
1532 			TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe);
1533 			break;
1534 
1535 		case PTHREAD_PRIO_INHERIT:
1536 			/*
1537 			 * Assign the new owner and add the mutex to the
1538 			 * thread's list of owned mutexes.
1539 			 */
1540 			mutex->m_owner = pthread;
1541 			TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe);
1542 
1543 			/* Track number of priority mutexes owned: */
1544 			pthread->priority_mutex_count++;
1545 
1546 			/*
1547 			 * Set the priority of the mutex.  Since our waiting
1548 			 * threads are in descending priority order, the
1549 			 * priority of the mutex becomes the active priority
1550 			 * of the thread we just dequeued.
1551 			 */
1552 			mutex->m_prio = pthread->active_priority;
1553 
1554 			/* Save the owning threads inherited priority: */
1555 			mutex->m_saved_prio = pthread->inherited_priority;
1556 
1557 			/*
1558 			 * The owning threads inherited priority now becomes
1559 			 * his active priority (the priority of the mutex).
1560 			 */
1561 			pthread->inherited_priority = mutex->m_prio;
1562 			break;
1563 
1564 		case PTHREAD_PRIO_PROTECT:
1565 			if (pthread->active_priority > mutex->m_prio) {
1566 				/*
1567 				 * Either the mutex ceiling priority has
1568 				 * been lowered and/or this threads priority
1569 			 	 * has been raised subsequent to the thread
1570 				 * being queued on the waiting list.
1571 				 */
1572 				pthread->error = EINVAL;
1573 			}
1574 			else {
1575 				/*
1576 				 * Assign the new owner and add the mutex
1577 				 * to the thread's list of owned mutexes.
1578 				 */
1579 				mutex->m_owner = pthread;
1580 				TAILQ_INSERT_TAIL(&pthread->pri_mutexq,
1581 				    mutex, m_qe);
1582 
1583 				/* Track number of priority mutexes owned: */
1584 				pthread->priority_mutex_count++;
1585 
1586 				/*
1587 				 * Save the owning threads inherited
1588 				 * priority:
1589 				 */
1590 				mutex->m_saved_prio =
1591 				    pthread->inherited_priority;
1592 
1593 				/*
1594 				 * The owning thread inherits the ceiling
1595 				 * priority of the mutex and executes at
1596 				 * that priority:
1597 				 */
1598 				pthread->inherited_priority = mutex->m_prio;
1599 				pthread->active_priority = mutex->m_prio;
1600 
1601 			}
1602 			break;
1603 		}
1604 
1605 		/* Make the thread runnable and unlock the scheduling queue: */
1606 		pthread->cycle++;
1607 		_thr_umtx_wake(&pthread->cycle, 1);
1608 
1609 		THR_THREAD_UNLOCK(curthread, pthread);
1610 		if (mutex->m_owner == pthread)
1611 			/* We're done; a valid owner was found. */
1612 			break;
1613 		else
1614 			/* Get the next thread from the waiting queue: */
1615 			pthread = TAILQ_NEXT(pthread, sqe);
1616 	}
1617 
1618 	if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1619 		/* This mutex has no priority: */
1620 		mutex->m_prio = 0;
1621 	return (tid);
1622 }
1623 
1624 #if 0
1625 /*
1626  * Dequeue a waiting thread from the head of a mutex queue in descending
1627  * priority order.
1628  */
1629 static pthread_t
1630 mutex_queue_deq(struct pthread_mutex *mutex)
1631 {
1632 	pthread_t pthread;
1633 
1634 	while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1635 		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1636 		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1637 	}
1638 
1639 	return (pthread);
1640 }
1641 #endif
1642 
1643 /*
1644  * Remove a waiting thread from a mutex queue in descending priority order.
1645  */
1646 static void
1647 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1648 {
1649 	if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1650 		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1651 		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1652 	}
1653 }
1654 
1655 /*
1656  * Enqueue a waiting thread to a queue in descending priority order.
1657  */
1658 static void
1659 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1660 {
1661 	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1662 
1663 	THR_ASSERT_NOT_IN_SYNCQ(pthread);
1664 	/*
1665 	 * For the common case of all threads having equal priority,
1666 	 * we perform a quick check against the priority of the thread
1667 	 * at the tail of the queue.
1668 	 */
1669 	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1670 		TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1671 	else {
1672 		tid = TAILQ_FIRST(&mutex->m_queue);
1673 		while (pthread->active_priority <= tid->active_priority)
1674 			tid = TAILQ_NEXT(tid, sqe);
1675 		TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1676 	}
1677 	pthread->sflags |= THR_FLAGS_IN_SYNCQ;
1678 }
1679