xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision 87569f75a91f298c52a71823c04d41cf53c88889)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by John Birrell.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 
35 #include <stdlib.h>
36 #include <errno.h>
37 #include <string.h>
38 #include <sys/param.h>
39 #include <sys/queue.h>
40 #include <pthread.h>
41 #include "thr_private.h"
42 
43 #if defined(_PTHREADS_INVARIANTS)
44 #define MUTEX_INIT_LINK(m) 		do {		\
45 	(m)->m_qe.tqe_prev = NULL;			\
46 	(m)->m_qe.tqe_next = NULL;			\
47 } while (0)
48 #define MUTEX_ASSERT_IS_OWNED(m)	do {		\
49 	if ((m)->m_qe.tqe_prev == NULL)			\
50 		PANIC("mutex is not on list");		\
51 } while (0)
52 #define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
53 	if (((m)->m_qe.tqe_prev != NULL) ||		\
54 	    ((m)->m_qe.tqe_next != NULL))		\
55 		PANIC("mutex is on list");		\
56 } while (0)
57 #define	THR_ASSERT_NOT_IN_SYNCQ(thr)	do {		\
58 	THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
59 	    "thread in syncq when it shouldn't be.");	\
60 } while (0);
61 #else
62 #define MUTEX_INIT_LINK(m)
63 #define MUTEX_ASSERT_IS_OWNED(m)
64 #define MUTEX_ASSERT_NOT_OWNED(m)
65 #define	THR_ASSERT_NOT_IN_SYNCQ(thr)
66 #endif
67 
68 #define THR_IN_MUTEXQ(thr)	(((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
69 #define	MUTEX_DESTROY(m) do {		\
70 	free(m);			\
71 } while (0)
72 
73 
74 /*
75  * Prototypes
76  */
77 static long		mutex_handoff(struct pthread *, struct pthread_mutex *);
78 static int		mutex_self_trylock(struct pthread *, pthread_mutex_t);
79 static int		mutex_self_lock(struct pthread *, pthread_mutex_t,
80 				const struct timespec *abstime);
81 static int		mutex_unlock_common(pthread_mutex_t *, int);
82 static void		mutex_priority_adjust(struct pthread *, pthread_mutex_t);
83 static void		mutex_rescan_owned (struct pthread *, struct pthread *,
84 			    struct pthread_mutex *);
85 #if 0
86 static pthread_t	mutex_queue_deq(pthread_mutex_t);
87 #endif
88 static void		mutex_queue_remove(pthread_mutex_t, pthread_t);
89 static void		mutex_queue_enq(pthread_mutex_t, pthread_t);
90 
91 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
92 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
93 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
94 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
95 
96 /* Single underscore versions provided for libc internal usage: */
97 /* No difference between libc and application usage of these: */
98 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
99 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
100 
101 static int
102 mutex_init(pthread_mutex_t *mutex,
103     const pthread_mutexattr_t *mutex_attr, int private)
104 {
105 	static const struct pthread_mutex_attr default_attr = {
106 		.m_type         = PTHREAD_MUTEX_DEFAULT,
107 		.m_protocol     = PTHREAD_PRIO_NONE,
108 		.m_ceiling      = THR_MAX_PRIORITY,
109 		.m_flags        = 0
110 	};
111 	const struct pthread_mutex_attr *attr;
112 	struct pthread_mutex *pmutex;
113 
114 	if (mutex_attr == NULL) {
115 		attr = &default_attr;
116 	} else {
117 		attr = *mutex_attr;
118 		if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
119 		    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
120 			return (EINVAL);
121 		if (attr->m_protocol < PTHREAD_PRIO_NONE ||
122 		    attr->m_protocol > PTHREAD_PRIO_PROTECT)
123 			return (EINVAL);
124 	}
125 
126 	if ((pmutex = (pthread_mutex_t)
127 		malloc(sizeof(struct pthread_mutex))) == NULL)
128 		return (ENOMEM);
129 
130 	_thr_umtx_init(&pmutex->m_lock);
131 	pmutex->m_type = attr->m_type;
132 	pmutex->m_protocol = attr->m_protocol;
133 	TAILQ_INIT(&pmutex->m_queue);
134 	pmutex->m_owner = NULL;
135 	pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED;
136 	if (private)
137 		pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
138 	pmutex->m_count = 0;
139 	pmutex->m_refcount = 0;
140 	if (attr->m_protocol == PTHREAD_PRIO_PROTECT)
141 		pmutex->m_prio = attr->m_ceiling;
142 	else
143 		pmutex->m_prio = -1;
144 	pmutex->m_saved_prio = 0;
145 	MUTEX_INIT_LINK(pmutex);
146 	*mutex = pmutex;
147 	return (0);
148 }
149 
150 static int
151 init_static(struct pthread *thread, pthread_mutex_t *mutex)
152 {
153 	int ret;
154 
155 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
156 
157 	if (*mutex == NULL)
158 		ret = mutex_init(mutex, NULL, 0);
159 	else
160 		ret = 0;
161 
162 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
163 
164 	return (ret);
165 }
166 
167 static int
168 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
169 {
170 	int ret;
171 
172 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
173 
174 	if (*mutex == NULL)
175 		ret = mutex_init(mutex, NULL, 1);
176 	else
177 		ret = 0;
178 
179 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
180 
181 	return (ret);
182 }
183 
184 int
185 _pthread_mutex_init(pthread_mutex_t *mutex,
186     const pthread_mutexattr_t *mutex_attr)
187 {
188 	return mutex_init(mutex, mutex_attr, 1);
189 }
190 
191 int
192 __pthread_mutex_init(pthread_mutex_t *mutex,
193     const pthread_mutexattr_t *mutex_attr)
194 {
195 	return mutex_init(mutex, mutex_attr, 0);
196 }
197 
198 int
199 _mutex_reinit(pthread_mutex_t *mutex)
200 {
201 	_thr_umtx_init(&(*mutex)->m_lock);
202 	TAILQ_INIT(&(*mutex)->m_queue);
203 	MUTEX_INIT_LINK(*mutex);
204 	(*mutex)->m_owner = NULL;
205 	(*mutex)->m_count = 0;
206 	(*mutex)->m_refcount = 0;
207 	(*mutex)->m_prio = 0;
208 	(*mutex)->m_saved_prio = 0;
209 	return (0);
210 }
211 
212 void
213 _mutex_fork(struct pthread *curthread)
214 {
215 	struct pthread_mutex *m;
216 
217 	/*
218 	 * Fix mutex ownership for child process.
219 	 * note that process shared mutex should not
220 	 * be inherited because owner is forking thread
221 	 * which is in parent process, they should be
222 	 * removed from the owned mutex list, current,
223 	 * process shared mutex is not supported, so I
224 	 * am not worried.
225 	 */
226 	TAILQ_FOREACH(m, &curthread->mutexq, m_qe) {
227 		m->m_lock = (umtx_t)curthread->tid;
228 	}
229 
230 	/* Clear contender for priority mutexes */
231 	TAILQ_FOREACH(m, &curthread->pri_mutexq, m_qe) {
232 		/* clear another thread locked us */
233 		_thr_umtx_init(&m->m_lock);
234 		TAILQ_INIT(&m->m_queue);
235 	}
236 }
237 
238 int
239 _pthread_mutex_destroy(pthread_mutex_t *mutex)
240 {
241 	struct pthread *curthread = _get_curthread();
242 	pthread_mutex_t m;
243 	int ret = 0;
244 
245 	if (mutex == NULL || *mutex == NULL)
246 		ret = EINVAL;
247 	else {
248 		/*
249 		 * Try to lock the mutex structure, we only need to
250 		 * try once, if failed, the mutex is in used.
251 		 */
252 		ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
253 		if (ret)
254 			return (ret);
255 
256 		/*
257 		 * Check mutex other fields to see if this mutex is
258 		 * in use. Mostly for prority mutex types, or there
259 		 * are condition variables referencing it.
260 		 */
261 		if (((*mutex)->m_owner != NULL) ||
262 		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
263 		    ((*mutex)->m_refcount != 0)) {
264 			THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock);
265 			ret = EBUSY;
266 		} else {
267 			/*
268 			 * Save a pointer to the mutex so it can be free'd
269 			 * and set the caller's pointer to NULL:
270 			 */
271 			m = *mutex;
272 			*mutex = NULL;
273 
274 			/* Unlock the mutex structure: */
275 			_thr_umtx_unlock(&m->m_lock, curthread->tid);
276 
277 			/*
278 			 * Free the memory allocated for the mutex
279 			 * structure:
280 			 */
281 			MUTEX_ASSERT_NOT_OWNED(m);
282 			MUTEX_DESTROY(m);
283 		}
284 	}
285 
286 	/* Return the completion status: */
287 	return (ret);
288 }
289 
290 static int
291 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
292 {
293 	int ret = 0;
294 
295 	THR_ASSERT((mutex != NULL) && (*mutex != NULL),
296 	    "Uninitialized mutex in mutex_trylock_common");
297 
298 	/* Short cut for simple mutex. */
299 	if ((*mutex)->m_protocol == PTHREAD_PRIO_NONE) {
300 		ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
301 		if (ret == 0) {
302 			(*mutex)->m_owner = curthread;
303 			/* Add to the list of owned mutexes: */
304 			MUTEX_ASSERT_NOT_OWNED(*mutex);
305 			TAILQ_INSERT_TAIL(&curthread->mutexq,
306 			    (*mutex), m_qe);
307 		} else if ((*mutex)->m_owner == curthread) {
308 			ret = mutex_self_trylock(curthread, *mutex);
309 		} /* else {} */
310 
311 		return (ret);
312 	}
313 
314 	/* Code for priority mutex */
315 
316 	/* Lock the mutex structure: */
317 	THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
318 
319 	/*
320 	 * If the mutex was statically allocated, properly
321 	 * initialize the tail queue.
322 	 */
323 	if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
324 		TAILQ_INIT(&(*mutex)->m_queue);
325 		MUTEX_INIT_LINK(*mutex);
326 		(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
327 	}
328 
329 	/* Process according to mutex type: */
330 	switch ((*mutex)->m_protocol) {
331 	/* POSIX priority inheritence mutex: */
332 	case PTHREAD_PRIO_INHERIT:
333 		/* Check if this mutex is not locked: */
334 		if ((*mutex)->m_owner == NULL) {
335 			/* Lock the mutex for the running thread: */
336 			(*mutex)->m_owner = curthread;
337 
338 			THR_LOCK(curthread);
339 			/* Track number of priority mutexes owned: */
340 			curthread->priority_mutex_count++;
341 
342 			/*
343 			 * The mutex takes on the attributes of the
344 			 * running thread when there are no waiters.
345 			 */
346 			(*mutex)->m_prio = curthread->active_priority;
347 			(*mutex)->m_saved_prio =
348 			    curthread->inherited_priority;
349 			curthread->inherited_priority = (*mutex)->m_prio;
350 			THR_UNLOCK(curthread);
351 
352 			/* Add to the list of owned mutexes: */
353 			MUTEX_ASSERT_NOT_OWNED(*mutex);
354 			TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
355 			    (*mutex), m_qe);
356 		} else if ((*mutex)->m_owner == curthread)
357 			ret = mutex_self_trylock(curthread, *mutex);
358 		else
359 			/* Return a busy error: */
360 			ret = EBUSY;
361 		break;
362 
363 	/* POSIX priority protection mutex: */
364 	case PTHREAD_PRIO_PROTECT:
365 		/* Check for a priority ceiling violation: */
366 		if (curthread->active_priority > (*mutex)->m_prio)
367 			ret = EINVAL;
368 
369 		/* Check if this mutex is not locked: */
370 		else if ((*mutex)->m_owner == NULL) {
371 			/* Lock the mutex for the running thread: */
372 			(*mutex)->m_owner = curthread;
373 
374 			THR_LOCK(curthread);
375 			/* Track number of priority mutexes owned: */
376 			curthread->priority_mutex_count++;
377 
378 			/*
379 			 * The running thread inherits the ceiling
380 			 * priority of the mutex and executes at that
381 			 * priority.
382 			 */
383 			curthread->active_priority = (*mutex)->m_prio;
384 			(*mutex)->m_saved_prio =
385 			    curthread->inherited_priority;
386 			curthread->inherited_priority =
387 			    (*mutex)->m_prio;
388 			THR_UNLOCK(curthread);
389 			/* Add to the list of owned mutexes: */
390 			MUTEX_ASSERT_NOT_OWNED(*mutex);
391 			TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
392 			    (*mutex), m_qe);
393 		} else if ((*mutex)->m_owner == curthread)
394 			ret = mutex_self_trylock(curthread, *mutex);
395 		else
396 			/* Return a busy error: */
397 			ret = EBUSY;
398 		break;
399 
400 	/* Trap invalid mutex types: */
401 	default:
402 		/* Return an invalid argument error: */
403 		ret = EINVAL;
404 		break;
405 	}
406 
407 	/* Unlock the mutex structure: */
408 	THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
409 
410 	/* Return the completion status: */
411 	return (ret);
412 }
413 
414 int
415 __pthread_mutex_trylock(pthread_mutex_t *mutex)
416 {
417 	struct pthread *curthread = _get_curthread();
418 	int ret = 0;
419 
420 	/*
421 	 * If the mutex is statically initialized, perform the dynamic
422 	 * initialization:
423 	 */
424 	if ((*mutex != NULL) ||
425 	    ((ret = init_static(curthread, mutex)) == 0))
426 		ret = mutex_trylock_common(curthread, mutex);
427 
428 	return (ret);
429 }
430 
431 int
432 _pthread_mutex_trylock(pthread_mutex_t *mutex)
433 {
434 	struct pthread	*curthread = _get_curthread();
435 	int	ret = 0;
436 
437 	/*
438 	 * If the mutex is statically initialized, perform the dynamic
439 	 * initialization marking the mutex private (delete safe):
440 	 */
441 	if ((*mutex != NULL) ||
442 	    ((ret = init_static_private(curthread, mutex)) == 0))
443 		ret = mutex_trylock_common(curthread, mutex);
444 
445 	return (ret);
446 }
447 
448 static int
449 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
450 	const struct timespec * abstime)
451 {
452 	struct  timespec ts, ts2;
453 	long	cycle;
454 	int	ret = 0;
455 
456 	THR_ASSERT((m != NULL) && (*m != NULL),
457 	    "Uninitialized mutex in mutex_lock_common");
458 
459 	if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
460 	    abstime->tv_nsec >= 1000000000))
461 		return (EINVAL);
462 
463 	/* Short cut for simple mutex. */
464 
465 	if ((*m)->m_protocol == PTHREAD_PRIO_NONE) {
466 		/* Default POSIX mutex: */
467 		ret = THR_UMTX_TRYLOCK(curthread, &(*m)->m_lock);
468 		if (ret == 0) {
469 			(*m)->m_owner = curthread;
470 			/* Add to the list of owned mutexes: */
471 			MUTEX_ASSERT_NOT_OWNED(*m);
472 			TAILQ_INSERT_TAIL(&curthread->mutexq,
473 			    (*m), m_qe);
474 		} else if ((*m)->m_owner == curthread) {
475 			ret = mutex_self_lock(curthread, *m, abstime);
476 		} else {
477 			if (abstime == NULL) {
478 				THR_UMTX_LOCK(curthread, &(*m)->m_lock);
479 				ret = 0;
480 			} else {
481 				clock_gettime(CLOCK_REALTIME, &ts);
482 				TIMESPEC_SUB(&ts2, abstime, &ts);
483 				ret = THR_UMTX_TIMEDLOCK(curthread,
484 					&(*m)->m_lock, &ts2);
485 				/*
486 				 * Timed out wait is not restarted if
487 				 * it was interrupted, not worth to do it.
488 				 */
489 				if (ret == EINTR)
490 					ret = ETIMEDOUT;
491 			}
492 			if (ret == 0) {
493 				(*m)->m_owner = curthread;
494 				/* Add to the list of owned mutexes: */
495 				MUTEX_ASSERT_NOT_OWNED(*m);
496 				TAILQ_INSERT_TAIL(&curthread->mutexq,
497 				    (*m), m_qe);
498 			}
499 		}
500 		return (ret);
501 	}
502 
503 	/* Code for priority mutex */
504 
505 	/*
506 	 * Enter a loop waiting to become the mutex owner.  We need a
507 	 * loop in case the waiting thread is interrupted by a signal
508 	 * to execute a signal handler.  It is not (currently) possible
509 	 * to remain in the waiting queue while running a handler.
510 	 * Instead, the thread is interrupted and backed out of the
511 	 * waiting queue prior to executing the signal handler.
512 	 */
513 	do {
514 		/* Lock the mutex structure: */
515 		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
516 
517 		/*
518 		 * If the mutex was statically allocated, properly
519 		 * initialize the tail queue.
520 		 */
521 		if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
522 			TAILQ_INIT(&(*m)->m_queue);
523 			(*m)->m_flags |= MUTEX_FLAGS_INITED;
524 			MUTEX_INIT_LINK(*m);
525 		}
526 
527 		/* Process according to mutex type: */
528 		switch ((*m)->m_protocol) {
529 		/* POSIX priority inheritence mutex: */
530 		case PTHREAD_PRIO_INHERIT:
531 			/* Check if this mutex is not locked: */
532 			if ((*m)->m_owner == NULL) {
533 				/* Lock the mutex for this thread: */
534 				(*m)->m_owner = curthread;
535 
536 				THR_LOCK(curthread);
537 				/* Track number of priority mutexes owned: */
538 				curthread->priority_mutex_count++;
539 
540 				/*
541 				 * The mutex takes on attributes of the
542 				 * running thread when there are no waiters.
543 				 * Make sure the thread's scheduling lock is
544 				 * held while priorities are adjusted.
545 				 */
546 				(*m)->m_prio = curthread->active_priority;
547 				(*m)->m_saved_prio =
548 				    curthread->inherited_priority;
549 				curthread->inherited_priority = (*m)->m_prio;
550 				THR_UNLOCK(curthread);
551 
552 				/* Add to the list of owned mutexes: */
553 				MUTEX_ASSERT_NOT_OWNED(*m);
554 				TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
555 				    (*m), m_qe);
556 
557 				/* Unlock the mutex structure: */
558 				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
559 			} else if ((*m)->m_owner == curthread) {
560 				ret = mutex_self_lock(curthread, *m, abstime);
561 
562 				/* Unlock the mutex structure: */
563 				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
564 			} else {
565 				/*
566 				 * Join the queue of threads waiting to lock
567 				 * the mutex and save a pointer to the mutex.
568 				 */
569 				mutex_queue_enq(*m, curthread);
570 				curthread->data.mutex = *m;
571 
572 				if (curthread->active_priority > (*m)->m_prio)
573 					/* Adjust priorities: */
574 					mutex_priority_adjust(curthread, *m);
575 
576 				THR_LOCK(curthread);
577 				cycle = curthread->cycle;
578 				THR_UNLOCK(curthread);
579 
580 				/* Unlock the mutex structure: */
581 				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
582 
583 				clock_gettime(CLOCK_REALTIME, &ts);
584 				TIMESPEC_SUB(&ts2, abstime, &ts);
585 				ret = _thr_umtx_wait(&curthread->cycle, cycle,
586 					 &ts2);
587 				if (ret == EINTR)
588 					ret = 0;
589 
590 				if (THR_IN_MUTEXQ(curthread)) {
591 					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
592 					mutex_queue_remove(*m, curthread);
593 					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
594 				}
595 				/*
596 				 * Only clear these after assuring the
597 				 * thread is dequeued.
598 				 */
599 				curthread->data.mutex = NULL;
600 			}
601 			break;
602 
603 		/* POSIX priority protection mutex: */
604 		case PTHREAD_PRIO_PROTECT:
605 			/* Check for a priority ceiling violation: */
606 			if (curthread->active_priority > (*m)->m_prio) {
607 				/* Unlock the mutex structure: */
608 				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
609 				ret = EINVAL;
610 			}
611 			/* Check if this mutex is not locked: */
612 			else if ((*m)->m_owner == NULL) {
613 				/*
614 				 * Lock the mutex for the running
615 				 * thread:
616 				 */
617 				(*m)->m_owner = curthread;
618 
619 				THR_LOCK(curthread);
620 				/* Track number of priority mutexes owned: */
621 				curthread->priority_mutex_count++;
622 
623 				/*
624 				 * The running thread inherits the ceiling
625 				 * priority of the mutex and executes at that
626 				 * priority.  Make sure the thread's
627 				 * scheduling lock is held while priorities
628 				 * are adjusted.
629 				 */
630 				curthread->active_priority = (*m)->m_prio;
631 				(*m)->m_saved_prio =
632 				    curthread->inherited_priority;
633 				curthread->inherited_priority = (*m)->m_prio;
634 				THR_UNLOCK(curthread);
635 
636 				/* Add to the list of owned mutexes: */
637 				MUTEX_ASSERT_NOT_OWNED(*m);
638 				TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
639 				    (*m), m_qe);
640 
641 				/* Unlock the mutex structure: */
642 				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
643 			} else if ((*m)->m_owner == curthread) {
644 				ret = mutex_self_lock(curthread, *m, abstime);
645 
646 				/* Unlock the mutex structure: */
647 				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
648 			} else {
649 				/*
650 				 * Join the queue of threads waiting to lock
651 				 * the mutex and save a pointer to the mutex.
652 				 */
653 				mutex_queue_enq(*m, curthread);
654 				curthread->data.mutex = *m;
655 
656 				/* Clear any previous error: */
657 				curthread->error = 0;
658 
659 				THR_LOCK(curthread);
660 				cycle = curthread->cycle;
661 				THR_UNLOCK(curthread);
662 
663 				/* Unlock the mutex structure: */
664 				THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
665 
666 				clock_gettime(CLOCK_REALTIME, &ts);
667 				TIMESPEC_SUB(&ts2, abstime, &ts);
668 				ret = _thr_umtx_wait(&curthread->cycle, cycle,
669 					&ts2);
670 				if (ret == EINTR)
671 					ret = 0;
672 
673 				curthread->data.mutex = NULL;
674 				if (THR_IN_MUTEXQ(curthread)) {
675 					THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
676 					mutex_queue_remove(*m, curthread);
677 					THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
678 				}
679 				/*
680 				 * Only clear these after assuring the
681 				 * thread is dequeued.
682 				 */
683 				curthread->data.mutex = NULL;
684 
685 				/*
686 				 * The threads priority may have changed while
687 				 * waiting for the mutex causing a ceiling
688 				 * violation.
689 				 */
690 				ret = curthread->error;
691 				curthread->error = 0;
692 			}
693 			break;
694 
695 		/* Trap invalid mutex types: */
696 		default:
697 			/* Unlock the mutex structure: */
698 			THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
699 
700 			/* Return an invalid argument error: */
701 			ret = EINVAL;
702 			break;
703 		}
704 
705 	} while (((*m)->m_owner != curthread) && (ret == 0));
706 
707 	/* Return the completion status: */
708 	return (ret);
709 }
710 
711 int
712 __pthread_mutex_lock(pthread_mutex_t *m)
713 {
714 	struct pthread *curthread;
715 	int	ret = 0;
716 
717 	_thr_check_init();
718 
719 	curthread = _get_curthread();
720 
721 	/*
722 	 * If the mutex is statically initialized, perform the dynamic
723 	 * initialization:
724 	 */
725 	if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
726 		ret = mutex_lock_common(curthread, m, NULL);
727 
728 	return (ret);
729 }
730 
731 int
732 _pthread_mutex_lock(pthread_mutex_t *m)
733 {
734 	struct pthread *curthread;
735 	int	ret = 0;
736 
737 	_thr_check_init();
738 
739 	curthread = _get_curthread();
740 
741 	/*
742 	 * If the mutex is statically initialized, perform the dynamic
743 	 * initialization marking it private (delete safe):
744 	 */
745 	if ((*m != NULL) ||
746 	    ((ret = init_static_private(curthread, m)) == 0))
747 		ret = mutex_lock_common(curthread, m, NULL);
748 
749 	return (ret);
750 }
751 
752 int
753 __pthread_mutex_timedlock(pthread_mutex_t *m,
754 	const struct timespec *abs_timeout)
755 {
756 	struct pthread *curthread;
757 	int	ret = 0;
758 
759 	_thr_check_init();
760 
761 	curthread = _get_curthread();
762 
763 	/*
764 	 * If the mutex is statically initialized, perform the dynamic
765 	 * initialization:
766 	 */
767 	if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
768 		ret = mutex_lock_common(curthread, m, abs_timeout);
769 
770 	return (ret);
771 }
772 
773 int
774 _pthread_mutex_timedlock(pthread_mutex_t *m,
775 	const struct timespec *abs_timeout)
776 {
777 	struct pthread *curthread;
778 	int	ret = 0;
779 
780 	_thr_check_init();
781 
782 	curthread = _get_curthread();
783 
784 	/*
785 	 * If the mutex is statically initialized, perform the dynamic
786 	 * initialization marking it private (delete safe):
787 	 */
788 	if ((*m != NULL) ||
789 	    ((ret = init_static_private(curthread, m)) == 0))
790 		ret = mutex_lock_common(curthread, m, abs_timeout);
791 
792 	return (ret);
793 }
794 
795 int
796 _pthread_mutex_unlock(pthread_mutex_t *m)
797 {
798 	return (mutex_unlock_common(m, /* add reference */ 0));
799 }
800 
801 int
802 _mutex_cv_unlock(pthread_mutex_t *m)
803 {
804 	return (mutex_unlock_common(m, /* add reference */ 1));
805 }
806 
807 int
808 _mutex_cv_lock(pthread_mutex_t *m)
809 {
810 	int	ret;
811 
812 	ret = mutex_lock_common(_get_curthread(), m, NULL);
813 	if (ret == 0)
814 		(*m)->m_refcount--;
815 	return (ret);
816 }
817 
818 static int
819 mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
820 {
821 	int	ret;
822 
823 	switch (m->m_type) {
824 	/* case PTHREAD_MUTEX_DEFAULT: */
825 	case PTHREAD_MUTEX_ERRORCHECK:
826 	case PTHREAD_MUTEX_NORMAL:
827 		ret = EBUSY;
828 		break;
829 
830 	case PTHREAD_MUTEX_RECURSIVE:
831 		/* Increment the lock count: */
832 		if (m->m_count + 1 > 0) {
833 			m->m_count++;
834 			ret = 0;
835 		} else
836 			ret = EAGAIN;
837 		break;
838 
839 	default:
840 		/* Trap invalid mutex types; */
841 		ret = EINVAL;
842 	}
843 
844 	return (ret);
845 }
846 
847 static int
848 mutex_self_lock(struct pthread *curthread, pthread_mutex_t m,
849 	const struct timespec *abstime)
850 {
851 	struct timespec ts1, ts2;
852 	int ret;
853 
854 	switch (m->m_type) {
855 	/* case PTHREAD_MUTEX_DEFAULT: */
856 	case PTHREAD_MUTEX_ERRORCHECK:
857 		if (abstime) {
858 			clock_gettime(CLOCK_REALTIME, &ts1);
859 			TIMESPEC_SUB(&ts2, abstime, &ts1);
860 			__sys_nanosleep(&ts2, NULL);
861 			ret = ETIMEDOUT;
862 		} else {
863 			/*
864 			 * POSIX specifies that mutexes should return
865 			 * EDEADLK if a recursive lock is detected.
866 			 */
867 			ret = EDEADLK;
868 		}
869 		break;
870 
871 	case PTHREAD_MUTEX_NORMAL:
872 		/*
873 		 * What SS2 define as a 'normal' mutex.  Intentionally
874 		 * deadlock on attempts to get a lock you already own.
875 		 */
876 		ret = 0;
877 		if (m->m_protocol != PTHREAD_PRIO_NONE) {
878 			/* Unlock the mutex structure: */
879 			THR_LOCK_RELEASE(curthread, &m->m_lock);
880 		}
881 		if (abstime) {
882 			clock_gettime(CLOCK_REALTIME, &ts1);
883 			TIMESPEC_SUB(&ts2, abstime, &ts1);
884 			__sys_nanosleep(&ts2, NULL);
885 			ret = ETIMEDOUT;
886 		} else {
887 			ts1.tv_sec = 30;
888 			ts1.tv_nsec = 0;
889 			for (;;)
890 				__sys_nanosleep(&ts1, NULL);
891 		}
892 		break;
893 
894 	case PTHREAD_MUTEX_RECURSIVE:
895 		/* Increment the lock count: */
896 		if (m->m_count + 1 > 0) {
897 			m->m_count++;
898 			ret = 0;
899 		} else
900 			ret = EAGAIN;
901 		break;
902 
903 	default:
904 		/* Trap invalid mutex types; */
905 		ret = EINVAL;
906 	}
907 
908 	return (ret);
909 }
910 
911 static int
912 mutex_unlock_common(pthread_mutex_t *m, int add_reference)
913 {
914 	struct pthread *curthread = _get_curthread();
915 	long tid = -1;
916 	int ret = 0;
917 
918 	if (m == NULL || *m == NULL)
919 		ret = EINVAL;
920 	else {
921 		/* Short cut for simple mutex. */
922 
923 		if ((*m)->m_protocol == PTHREAD_PRIO_NONE) {
924 			/*
925 			 * Check if the running thread is not the owner of the
926 			 * mutex:
927 			 */
928 			if (__predict_false((*m)->m_owner != curthread)) {
929 				ret = EPERM;
930 			} else if (__predict_false(
931 				  (*m)->m_type == PTHREAD_MUTEX_RECURSIVE &&
932 			          (*m)->m_count > 0)) {
933 				/* Decrement the count: */
934 				(*m)->m_count--;
935 				if (add_reference)
936 					(*m)->m_refcount++;
937 			} else {
938 				/*
939 				 * Clear the count in case this is a recursive
940 				 * mutex.
941 				 */
942 				(*m)->m_count = 0;
943 				(*m)->m_owner = NULL;
944 				/* Remove the mutex from the threads queue. */
945 				MUTEX_ASSERT_IS_OWNED(*m);
946 				TAILQ_REMOVE(&curthread->mutexq, (*m), m_qe);
947 				MUTEX_INIT_LINK(*m);
948 				if (add_reference)
949 					(*m)->m_refcount++;
950 				/*
951 				 * Hand off the mutex to the next waiting
952 				 * thread.
953 				 */
954 				_thr_umtx_unlock(&(*m)->m_lock, curthread->tid);
955 			}
956 			return (ret);
957 		}
958 
959 		/* Code for priority mutex */
960 
961 		/* Lock the mutex structure: */
962 		THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
963 
964 		/* Process according to mutex type: */
965 		switch ((*m)->m_protocol) {
966 		/* POSIX priority inheritence mutex: */
967 		case PTHREAD_PRIO_INHERIT:
968 			/*
969 			 * Check if the running thread is not the owner of the
970 			 * mutex:
971 			 */
972 			if ((*m)->m_owner != curthread)
973 				ret = EPERM;
974 			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
975 			    ((*m)->m_count > 0))
976 				/* Decrement the count: */
977 				(*m)->m_count--;
978 			else {
979 				/*
980 				 * Clear the count in case this is recursive
981 				 * mutex.
982 				 */
983 				(*m)->m_count = 0;
984 
985 				/*
986 				 * Restore the threads inherited priority and
987 				 * recompute the active priority (being careful
988 				 * not to override changes in the threads base
989 				 * priority subsequent to locking the mutex).
990 				 */
991 				THR_LOCK(curthread);
992 				curthread->inherited_priority =
993 					(*m)->m_saved_prio;
994 				curthread->active_priority =
995 				    MAX(curthread->inherited_priority,
996 				    curthread->base_priority);
997 
998 				/*
999 				 * This thread now owns one less priority mutex.
1000 				 */
1001 				curthread->priority_mutex_count--;
1002 				THR_UNLOCK(curthread);
1003 
1004 				/* Remove the mutex from the threads queue. */
1005 				MUTEX_ASSERT_IS_OWNED(*m);
1006 				TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq,
1007 				    (*m), m_qe);
1008 				MUTEX_INIT_LINK(*m);
1009 
1010 				/*
1011 				 * Hand off the mutex to the next waiting
1012 				 * thread:
1013 				 */
1014 				tid = mutex_handoff(curthread, *m);
1015 			}
1016 			break;
1017 
1018 		/* POSIX priority ceiling mutex: */
1019 		case PTHREAD_PRIO_PROTECT:
1020 			/*
1021 			 * Check if the running thread is not the owner of the
1022 			 * mutex:
1023 			 */
1024 			if ((*m)->m_owner != curthread)
1025 				ret = EPERM;
1026 			else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1027 			    ((*m)->m_count > 0))
1028 				/* Decrement the count: */
1029 				(*m)->m_count--;
1030 			else {
1031 				/*
1032 				 * Clear the count in case this is a recursive
1033 				 * mutex.
1034 				 */
1035 				(*m)->m_count = 0;
1036 
1037 				/*
1038 				 * Restore the threads inherited priority and
1039 				 * recompute the active priority (being careful
1040 				 * not to override changes in the threads base
1041 				 * priority subsequent to locking the mutex).
1042 				 */
1043 				THR_LOCK(curthread);
1044 				curthread->inherited_priority =
1045 					(*m)->m_saved_prio;
1046 				curthread->active_priority =
1047 				    MAX(curthread->inherited_priority,
1048 				    curthread->base_priority);
1049 
1050 				/*
1051 				 * This thread now owns one less priority mutex.
1052 				 */
1053 				curthread->priority_mutex_count--;
1054 				THR_UNLOCK(curthread);
1055 
1056 				/* Remove the mutex from the threads queue. */
1057 				MUTEX_ASSERT_IS_OWNED(*m);
1058 				TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq,
1059 				    (*m), m_qe);
1060 				MUTEX_INIT_LINK(*m);
1061 
1062 				/*
1063 				 * Hand off the mutex to the next waiting
1064 				 * thread:
1065 				 */
1066 				tid = mutex_handoff(curthread, *m);
1067 			}
1068 			break;
1069 
1070 		/* Trap invalid mutex types: */
1071 		default:
1072 			/* Return an invalid argument error: */
1073 			ret = EINVAL;
1074 			break;
1075 		}
1076 
1077 		if ((ret == 0) && (add_reference != 0))
1078 			/* Increment the reference count: */
1079 			(*m)->m_refcount++;
1080 
1081 		/* Unlock the mutex structure: */
1082 		THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1083 	}
1084 
1085 	/* Return the completion status: */
1086 	return (ret);
1087 }
1088 
1089 
1090 /*
1091  * This function is called when a change in base priority occurs for
1092  * a thread that is holding or waiting for a priority protection or
1093  * inheritence mutex.  A change in a threads base priority can effect
1094  * changes to active priorities of other threads and to the ordering
1095  * of mutex locking by waiting threads.
1096  *
1097  * This must be called without the target thread's scheduling lock held.
1098  */
1099 void
1100 _mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1101     int propagate_prio)
1102 {
1103 	struct pthread_mutex *m;
1104 
1105 	/* Adjust the priorites of any owned priority mutexes: */
1106 	if (pthread->priority_mutex_count > 0) {
1107 		/*
1108 		 * Rescan the mutexes owned by this thread and correct
1109 		 * their priorities to account for this threads change
1110 		 * in priority.  This has the side effect of changing
1111 		 * the threads active priority.
1112 		 *
1113 		 * Be sure to lock the first mutex in the list of owned
1114 		 * mutexes.  This acts as a barrier against another
1115 		 * simultaneous call to change the threads priority
1116 		 * and from the owning thread releasing the mutex.
1117 		 */
1118 		m = TAILQ_FIRST(&pthread->pri_mutexq);
1119 		if (m != NULL) {
1120 			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1121 			/*
1122 			 * Make sure the thread still owns the lock.
1123 			 */
1124 			if (m == TAILQ_FIRST(&pthread->pri_mutexq))
1125 				mutex_rescan_owned(curthread, pthread,
1126 				    /* rescan all owned */ NULL);
1127 			THR_LOCK_RELEASE(curthread, &m->m_lock);
1128 		}
1129 	}
1130 
1131 	/*
1132 	 * If this thread is waiting on a priority inheritence mutex,
1133 	 * check for priority adjustments.  A change in priority can
1134 	 * also cause a ceiling violation(*) for a thread waiting on
1135 	 * a priority protection mutex; we don't perform the check here
1136 	 * as it is done in pthread_mutex_unlock.
1137 	 *
1138 	 * (*) It should be noted that a priority change to a thread
1139 	 *     _after_ taking and owning a priority ceiling mutex
1140 	 *     does not affect ownership of that mutex; the ceiling
1141 	 *     priority is only checked before mutex ownership occurs.
1142 	 */
1143 	if (propagate_prio != 0) {
1144 		/*
1145 		 * Lock the thread's scheduling queue.  This is a bit
1146 		 * convoluted; the "in synchronization queue flag" can
1147 		 * only be cleared with both the thread's scheduling and
1148 		 * mutex locks held.  The thread's pointer to the wanted
1149 		 * mutex is guaranteed to be valid during this time.
1150 		 */
1151 		THR_THREAD_LOCK(curthread, pthread);
1152 
1153 		if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1154 		    ((m = pthread->data.mutex) == NULL))
1155 			THR_THREAD_UNLOCK(curthread, pthread);
1156 		else {
1157 			/*
1158 			 * This thread is currently waiting on a mutex; unlock
1159 			 * the scheduling queue lock and lock the mutex.  We
1160 			 * can't hold both at the same time because the locking
1161 			 * order could cause a deadlock.
1162 			 */
1163 			THR_THREAD_UNLOCK(curthread, pthread);
1164 			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1165 
1166 			/*
1167 			 * Check to make sure this thread is still in the
1168 			 * same state (the lock above can yield the CPU to
1169 			 * another thread or the thread may be running on
1170 			 * another CPU).
1171 			 */
1172 			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1173 			    (pthread->data.mutex == m)) {
1174 				/*
1175 				 * Remove and reinsert this thread into
1176 				 * the list of waiting threads to preserve
1177 				 * decreasing priority order.
1178 				 */
1179 				mutex_queue_remove(m, pthread);
1180 				mutex_queue_enq(m, pthread);
1181 
1182 				if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1183 					/* Adjust priorities: */
1184 					mutex_priority_adjust(curthread, m);
1185 			}
1186 
1187 			/* Unlock the mutex structure: */
1188 			THR_LOCK_RELEASE(curthread, &m->m_lock);
1189 		}
1190 	}
1191 }
1192 
1193 /*
1194  * Called when a new thread is added to the mutex waiting queue or
1195  * when a threads priority changes that is already in the mutex
1196  * waiting queue.
1197  *
1198  * This must be called with the mutex locked by the current thread.
1199  */
1200 static void
1201 mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1202 {
1203 	pthread_mutex_t	m = mutex;
1204 	struct pthread	*pthread_next, *pthread = mutex->m_owner;
1205 	int		done, temp_prio;
1206 
1207 	/*
1208 	 * Calculate the mutex priority as the maximum of the highest
1209 	 * active priority of any waiting threads and the owning threads
1210 	 * active priority(*).
1211 	 *
1212 	 * (*) Because the owning threads current active priority may
1213 	 *     reflect priority inherited from this mutex (and the mutex
1214 	 *     priority may have changed) we must recalculate the active
1215 	 *     priority based on the threads saved inherited priority
1216 	 *     and its base priority.
1217 	 */
1218 	pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1219 	temp_prio = MAX(pthread_next->active_priority,
1220 	    MAX(m->m_saved_prio, pthread->base_priority));
1221 
1222 	/* See if this mutex really needs adjusting: */
1223 	if (temp_prio == m->m_prio)
1224 		/* No need to propagate the priority: */
1225 		return;
1226 
1227 	/* Set new priority of the mutex: */
1228 	m->m_prio = temp_prio;
1229 
1230 	/*
1231 	 * Don't unlock the mutex passed in as an argument.  It is
1232 	 * expected to be locked and unlocked by the caller.
1233 	 */
1234 	done = 1;
1235 	do {
1236 		/*
1237 		 * Save the threads priority before rescanning the
1238 		 * owned mutexes:
1239 		 */
1240 		temp_prio = pthread->active_priority;
1241 
1242 		/*
1243 		 * Fix the priorities for all mutexes held by the owning
1244 		 * thread since taking this mutex.  This also has a
1245 		 * potential side-effect of changing the threads priority.
1246 		 *
1247 		 * At this point the mutex is locked by the current thread.
1248 		 * The owning thread can't release the mutex until it is
1249 		 * unlocked, so we should be able to safely walk its list
1250 		 * of owned mutexes.
1251 		 */
1252 		mutex_rescan_owned(curthread, pthread, m);
1253 
1254 		/*
1255 		 * If this isn't the first time through the loop,
1256 		 * the current mutex needs to be unlocked.
1257 		 */
1258 		if (done == 0)
1259 			THR_LOCK_RELEASE(curthread, &m->m_lock);
1260 
1261 		/* Assume we're done unless told otherwise: */
1262 		done = 1;
1263 
1264 		/*
1265 		 * If the thread is currently waiting on a mutex, check
1266 		 * to see if the threads new priority has affected the
1267 		 * priority of the mutex.
1268 		 */
1269 		if ((temp_prio != pthread->active_priority) &&
1270 		    ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1271 		    ((m = pthread->data.mutex) != NULL) &&
1272 		    (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1273 			/* Lock the mutex structure: */
1274 			THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1275 
1276 			/*
1277 			 * Make sure the thread is still waiting on the
1278 			 * mutex:
1279 			 */
1280 			if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1281 			    (m == pthread->data.mutex)) {
1282 				/*
1283 				 * The priority for this thread has changed.
1284 				 * Remove and reinsert this thread into the
1285 				 * list of waiting threads to preserve
1286 				 * decreasing priority order.
1287 				 */
1288 				mutex_queue_remove(m, pthread);
1289 				mutex_queue_enq(m, pthread);
1290 
1291 				/*
1292 				 * Grab the waiting thread with highest
1293 				 * priority:
1294 				 */
1295 				pthread_next = TAILQ_FIRST(&m->m_queue);
1296 
1297 				/*
1298 				 * Calculate the mutex priority as the maximum
1299 				 * of the highest active priority of any
1300 				 * waiting threads and the owning threads
1301 				 * active priority.
1302 				 */
1303 				temp_prio = MAX(pthread_next->active_priority,
1304 				    MAX(m->m_saved_prio,
1305 				    m->m_owner->base_priority));
1306 
1307 				if (temp_prio != m->m_prio) {
1308 					/*
1309 					 * The priority needs to be propagated
1310 					 * to the mutex this thread is waiting
1311 					 * on and up to the owner of that mutex.
1312 					 */
1313 					m->m_prio = temp_prio;
1314 					pthread = m->m_owner;
1315 
1316 					/* We're not done yet: */
1317 					done = 0;
1318 				}
1319 			}
1320 			/* Only release the mutex if we're done: */
1321 			if (done != 0)
1322 				THR_LOCK_RELEASE(curthread, &m->m_lock);
1323 		}
1324 	} while (done == 0);
1325 }
1326 
1327 static void
1328 mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1329     struct pthread_mutex *mutex)
1330 {
1331 	struct pthread_mutex	*m;
1332 	struct pthread		*pthread_next;
1333 	int			active_prio, inherited_prio;
1334 
1335 	/*
1336 	 * Start walking the mutexes the thread has taken since
1337 	 * taking this mutex.
1338 	 */
1339 	if (mutex == NULL) {
1340 		/*
1341 		 * A null mutex means start at the beginning of the owned
1342 		 * mutex list.
1343 		 */
1344 		m = TAILQ_FIRST(&pthread->pri_mutexq);
1345 
1346 		/* There is no inherited priority yet. */
1347 		inherited_prio = 0;
1348 	} else {
1349 		/*
1350 		 * The caller wants to start after a specific mutex.  It
1351 		 * is assumed that this mutex is a priority inheritence
1352 		 * mutex and that its priority has been correctly
1353 		 * calculated.
1354 		 */
1355 		m = TAILQ_NEXT(mutex, m_qe);
1356 
1357 		/* Start inheriting priority from the specified mutex. */
1358 		inherited_prio = mutex->m_prio;
1359 	}
1360 	active_prio = MAX(inherited_prio, pthread->base_priority);
1361 
1362 	for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1363 		/*
1364 		 * We only want to deal with priority inheritence
1365 		 * mutexes.  This might be optimized by only placing
1366 		 * priority inheritence mutexes into the owned mutex
1367 		 * list, but it may prove to be useful having all
1368 		 * owned mutexes in this list.  Consider a thread
1369 		 * exiting while holding mutexes...
1370 		 */
1371 		if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1372 			/*
1373 			 * Fix the owners saved (inherited) priority to
1374 			 * reflect the priority of the previous mutex.
1375 			 */
1376 			m->m_saved_prio = inherited_prio;
1377 
1378 			if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1379 				/* Recalculate the priority of the mutex: */
1380 				m->m_prio = MAX(active_prio,
1381 				     pthread_next->active_priority);
1382 			else
1383 				m->m_prio = active_prio;
1384 
1385 			/* Recalculate new inherited and active priorities: */
1386 			inherited_prio = m->m_prio;
1387 			active_prio = MAX(m->m_prio, pthread->base_priority);
1388 		}
1389 	}
1390 
1391 	/*
1392 	 * Fix the threads inherited priority and recalculate its
1393 	 * active priority.
1394 	 */
1395 	pthread->inherited_priority = inherited_prio;
1396 	active_prio = MAX(inherited_prio, pthread->base_priority);
1397 
1398 	if (active_prio != pthread->active_priority) {
1399 		/* Lock the thread's scheduling queue: */
1400 		THR_THREAD_LOCK(curthread, pthread);
1401 
1402 		/* if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) */
1403 		if (1) {
1404 			/*
1405 			 * This thread is not in a run queue.  Just set
1406 			 * its active priority.
1407 			 */
1408 			pthread->active_priority = active_prio;
1409 		}
1410 		else {
1411 			/*
1412 			 * This thread is in a run queue.  Remove it from
1413 			 * the queue before changing its priority:
1414 			 */
1415 			/* THR_RUNQ_REMOVE(pthread);*/
1416 			/*
1417 			 * POSIX states that if the priority is being
1418 			 * lowered, the thread must be inserted at the
1419 			 * head of the queue for its priority if it owns
1420 			 * any priority protection or inheritence mutexes.
1421 			 */
1422 			if ((active_prio < pthread->active_priority) &&
1423 			    (pthread->priority_mutex_count > 0)) {
1424 				/* Set the new active priority. */
1425 				pthread->active_priority = active_prio;
1426 				/* THR_RUNQ_INSERT_HEAD(pthread); */
1427 			} else {
1428 				/* Set the new active priority. */
1429 				pthread->active_priority = active_prio;
1430 				/* THR_RUNQ_INSERT_TAIL(pthread);*/
1431 			}
1432 		}
1433 		THR_THREAD_UNLOCK(curthread, pthread);
1434 	}
1435 }
1436 
1437 void
1438 _mutex_unlock_private(pthread_t pthread)
1439 {
1440 	struct pthread_mutex	*m, *m_next;
1441 
1442 	for (m = TAILQ_FIRST(&pthread->pri_mutexq); m != NULL; m = m_next) {
1443 		m_next = TAILQ_NEXT(m, m_qe);
1444 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1445 			pthread_mutex_unlock(&m);
1446 	}
1447 }
1448 
1449 /*
1450  * Dequeue a waiting thread from the head of a mutex queue in descending
1451  * priority order.
1452  *
1453  * In order to properly dequeue a thread from the mutex queue and
1454  * make it runnable without the possibility of errant wakeups, it
1455  * is necessary to lock the thread's scheduling queue while also
1456  * holding the mutex lock.
1457  */
1458 static long
1459 mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1460 {
1461 	struct pthread *pthread;
1462 	long tid = -1;
1463 
1464 	/* Keep dequeueing until we find a valid thread: */
1465 	mutex->m_owner = NULL;
1466 	pthread = TAILQ_FIRST(&mutex->m_queue);
1467 	while (pthread != NULL) {
1468 		/* Take the thread's scheduling lock: */
1469 		THR_THREAD_LOCK(curthread, pthread);
1470 
1471 		/* Remove the thread from the mutex queue: */
1472 		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1473 		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1474 
1475 		/*
1476 		 * Only exit the loop if the thread hasn't been
1477 		 * cancelled.
1478 		 */
1479 		switch (mutex->m_protocol) {
1480 		case PTHREAD_PRIO_NONE:
1481 			/*
1482 			 * Assign the new owner and add the mutex to the
1483 			 * thread's list of owned mutexes.
1484 			 */
1485 			mutex->m_owner = pthread;
1486 			TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe);
1487 			break;
1488 
1489 		case PTHREAD_PRIO_INHERIT:
1490 			/*
1491 			 * Assign the new owner and add the mutex to the
1492 			 * thread's list of owned mutexes.
1493 			 */
1494 			mutex->m_owner = pthread;
1495 			TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe);
1496 
1497 			/* Track number of priority mutexes owned: */
1498 			pthread->priority_mutex_count++;
1499 
1500 			/*
1501 			 * Set the priority of the mutex.  Since our waiting
1502 			 * threads are in descending priority order, the
1503 			 * priority of the mutex becomes the active priority
1504 			 * of the thread we just dequeued.
1505 			 */
1506 			mutex->m_prio = pthread->active_priority;
1507 
1508 			/* Save the owning threads inherited priority: */
1509 			mutex->m_saved_prio = pthread->inherited_priority;
1510 
1511 			/*
1512 			 * The owning threads inherited priority now becomes
1513 			 * his active priority (the priority of the mutex).
1514 			 */
1515 			pthread->inherited_priority = mutex->m_prio;
1516 			break;
1517 
1518 		case PTHREAD_PRIO_PROTECT:
1519 			if (pthread->active_priority > mutex->m_prio) {
1520 				/*
1521 				 * Either the mutex ceiling priority has
1522 				 * been lowered and/or this threads priority
1523 			 	 * has been raised subsequent to the thread
1524 				 * being queued on the waiting list.
1525 				 */
1526 				pthread->error = EINVAL;
1527 			}
1528 			else {
1529 				/*
1530 				 * Assign the new owner and add the mutex
1531 				 * to the thread's list of owned mutexes.
1532 				 */
1533 				mutex->m_owner = pthread;
1534 				TAILQ_INSERT_TAIL(&pthread->pri_mutexq,
1535 				    mutex, m_qe);
1536 
1537 				/* Track number of priority mutexes owned: */
1538 				pthread->priority_mutex_count++;
1539 
1540 				/*
1541 				 * Save the owning threads inherited
1542 				 * priority:
1543 				 */
1544 				mutex->m_saved_prio =
1545 				    pthread->inherited_priority;
1546 
1547 				/*
1548 				 * The owning thread inherits the ceiling
1549 				 * priority of the mutex and executes at
1550 				 * that priority:
1551 				 */
1552 				pthread->inherited_priority = mutex->m_prio;
1553 				pthread->active_priority = mutex->m_prio;
1554 
1555 			}
1556 			break;
1557 		}
1558 
1559 		/* Make the thread runnable and unlock the scheduling queue: */
1560 		pthread->cycle++;
1561 		_thr_umtx_wake(&pthread->cycle, 1);
1562 
1563 		THR_THREAD_UNLOCK(curthread, pthread);
1564 		if (mutex->m_owner == pthread)
1565 			/* We're done; a valid owner was found. */
1566 			break;
1567 		else
1568 			/* Get the next thread from the waiting queue: */
1569 			pthread = TAILQ_NEXT(pthread, sqe);
1570 	}
1571 
1572 	if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1573 		/* This mutex has no priority: */
1574 		mutex->m_prio = 0;
1575 	return (tid);
1576 }
1577 
1578 #if 0
1579 /*
1580  * Dequeue a waiting thread from the head of a mutex queue in descending
1581  * priority order.
1582  */
1583 static pthread_t
1584 mutex_queue_deq(struct pthread_mutex *mutex)
1585 {
1586 	pthread_t pthread;
1587 
1588 	while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1589 		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1590 		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1591 	}
1592 
1593 	return (pthread);
1594 }
1595 #endif
1596 
1597 /*
1598  * Remove a waiting thread from a mutex queue in descending priority order.
1599  */
1600 static void
1601 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1602 {
1603 	if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1604 		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1605 		pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1606 	}
1607 }
1608 
1609 /*
1610  * Enqueue a waiting thread to a queue in descending priority order.
1611  */
1612 static void
1613 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1614 {
1615 	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1616 
1617 	THR_ASSERT_NOT_IN_SYNCQ(pthread);
1618 	/*
1619 	 * For the common case of all threads having equal priority,
1620 	 * we perform a quick check against the priority of the thread
1621 	 * at the tail of the queue.
1622 	 */
1623 	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1624 		TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1625 	else {
1626 		tid = TAILQ_FIRST(&mutex->m_queue);
1627 		while (pthread->active_priority <= tid->active_priority)
1628 			tid = TAILQ_NEXT(tid, sqe);
1629 		TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1630 	}
1631 	pthread->sflags |= THR_FLAGS_IN_SYNCQ;
1632 }
1633