xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision 6b3455a7665208c366849f0b2b3bc916fb97516e)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by John Birrell.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 #include <stdlib.h>
35 #include <errno.h>
36 #include <string.h>
37 #include <sys/param.h>
38 #include <sys/queue.h>
39 #include <pthread.h>
40 #include <time.h>
41 #include "thr_private.h"
42 
43 #if defined(_PTHREADS_INVARIANTS)
44 #define _MUTEX_INIT_LINK(m) 		do {		\
45 	(m)->m_qe.tqe_prev = NULL;			\
46 	(m)->m_qe.tqe_next = NULL;			\
47 } while (0)
48 #define _MUTEX_ASSERT_IS_OWNED(m)	do {		\
49 	if ((m)->m_qe.tqe_prev == NULL)			\
50 		PANIC("mutex is not on list");		\
51 } while (0)
52 #define _MUTEX_ASSERT_NOT_OWNED(m)	do {		\
53 	if (((m)->m_qe.tqe_prev != NULL) ||		\
54 	    ((m)->m_qe.tqe_next != NULL))		\
55 		PANIC("mutex is on list");		\
56 } while (0)
57 #else
58 #define _MUTEX_INIT_LINK(m)
59 #define _MUTEX_ASSERT_IS_OWNED(m)
60 #define _MUTEX_ASSERT_NOT_OWNED(m)
61 #endif
62 
63 
64 /*
65  * Prototypes
66  */
67 static void		acquire_mutex(struct pthread_mutex *, struct pthread *);
68 static int		get_mcontested(pthread_mutex_t,
69 			    const struct timespec *);
70 static void		mutex_attach_to_next_pthread(struct pthread_mutex *);
71 static int		mutex_init(pthread_mutex_t *, int);
72 static int		mutex_lock_common(pthread_mutex_t *, int,
73 			    const struct timespec *);
74 static inline int	mutex_self_lock(pthread_mutex_t, int);
75 static inline int	mutex_unlock_common(pthread_mutex_t *, int);
76 static inline pthread_t	mutex_queue_deq(pthread_mutex_t);
77 static inline void	mutex_queue_remove(pthread_mutex_t, pthread_t);
78 static inline void	mutex_queue_enq(pthread_mutex_t, pthread_t);
79 static void		restore_prio_inheritance(struct pthread *);
80 static void		restore_prio_protection(struct pthread *);
81 
82 
83 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
84 
85 static struct pthread_mutex_attr	static_mutex_attr =
86     PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
87 static pthread_mutexattr_t		static_mattr = &static_mutex_attr;
88 
89 /* Single underscore versions provided for libc internal usage: */
90 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
91 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
92 __weak_reference(__pthread_mutex_unlock, pthread_mutex_unlock);
93 
94 /* No difference between libc and application usage of these: */
95 __weak_reference(_pthread_mutex_init, pthread_mutex_init);
96 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
97 __weak_reference(_pthread_mutex_timedlock, pthread_mutex_timedlock);
98 
99 
100 /*
101  * Reinitialize a private mutex; this is only used for internal mutexes.
102  */
103 int
104 _mutex_reinit(pthread_mutex_t * mutex)
105 {
106 	int	ret = 0;
107 
108 	if (mutex == NULL)
109 		ret = EINVAL;
110 	else if (*mutex == PTHREAD_MUTEX_INITIALIZER)
111 		ret = _pthread_mutex_init(mutex, NULL);
112 	else {
113 		/*
114 		 * Initialize the mutex structure:
115 		 */
116 		(*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
117 		(*mutex)->m_protocol = PTHREAD_PRIO_NONE;
118 		TAILQ_INIT(&(*mutex)->m_queue);
119 		(*mutex)->m_owner = NULL;
120 		(*mutex)->m_data.m_count = 0;
121 		(*mutex)->m_flags |= MUTEX_FLAGS_INITED | MUTEX_FLAGS_PRIVATE;
122 		(*mutex)->m_refcount = 0;
123 		(*mutex)->m_prio = 0;
124 		(*mutex)->m_saved_prio = 0;
125 		_MUTEX_INIT_LINK(*mutex);
126 		memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
127 	}
128 	return (ret);
129 }
130 
131 int
132 _pthread_mutex_init(pthread_mutex_t * mutex,
133 		   const pthread_mutexattr_t * mutex_attr)
134 {
135 	struct pthread_mutex_attr default_attr = {PTHREAD_MUTEX_ERRORCHECK,
136 	    PTHREAD_PRIO_NONE, PTHREAD_MAX_PRIORITY, 0 };
137 	struct pthread_mutex_attr *attr;
138 
139 	if (mutex_attr == NULL) {
140 		attr = &default_attr;
141 	} else {
142 		/*
143 		 * Check that the given mutex attribute is valid.
144 		 */
145 		if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
146 		    ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
147 			return (EINVAL);
148 		else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
149 		    ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
150 			return (EINVAL);
151 		attr = *mutex_attr;
152 	}
153 	if ((*mutex =
154 	    (pthread_mutex_t)malloc(sizeof(struct pthread_mutex))) == NULL)
155 		return (ENOMEM);
156 	memset((void *)(*mutex), 0, sizeof(struct pthread_mutex));
157 
158 	/* Initialise the rest of the mutex: */
159 	TAILQ_INIT(&(*mutex)->m_queue);
160 	_MUTEX_INIT_LINK(*mutex);
161 	(*mutex)->m_protocol = attr->m_protocol;
162 	(*mutex)->m_flags = (attr->m_flags | MUTEX_FLAGS_INITED);
163 	(*mutex)->m_type = attr->m_type;
164 	if ((*mutex)->m_protocol == PTHREAD_PRIO_PROTECT)
165 		(*mutex)->m_prio = attr->m_ceiling;
166 	return (0);
167 }
168 
169 int
170 _pthread_mutex_destroy(pthread_mutex_t * mutex)
171 {
172 	if (mutex == NULL)
173 		return (EINVAL);
174 
175 	/*
176 	 * If this mutex was statically initialized, don't bother
177 	 * initializing it in order to destroy it immediately.
178 	 */
179 	if (*mutex == PTHREAD_MUTEX_INITIALIZER)
180 		return (0);
181 
182 	/* Lock the mutex structure: */
183 	_SPINLOCK(&(*mutex)->lock);
184 
185 	/*
186 	 * Check to see if this mutex is in use:
187 	 */
188 	if (((*mutex)->m_owner != NULL) ||
189 	    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
190 	    ((*mutex)->m_refcount != 0)) {
191 		/* Unlock the mutex structure: */
192 		_SPINUNLOCK(&(*mutex)->lock);
193 		return (EBUSY);
194 	}
195 
196 	/*
197 	 * Free the memory allocated for the mutex
198 	 * structure:
199 	 */
200 	_MUTEX_ASSERT_NOT_OWNED(*mutex);
201 	_SPINUNLOCK(&(*mutex)->lock);
202 	free(*mutex);
203 
204 	/*
205 	 * Leave the caller's pointer NULL now that
206 	 * the mutex has been destroyed:
207 	 */
208 	*mutex = NULL;
209 
210 	return (0);
211 }
212 
213 static int
214 mutex_init(pthread_mutex_t *mutex, int private)
215 {
216 	pthread_mutexattr_t *pma;
217 	int error;
218 
219 	error = 0;
220 	pma = private ? &static_mattr : NULL;
221 	_SPINLOCK(&static_init_lock);
222 	if (*mutex == PTHREAD_MUTEX_INITIALIZER)
223 		error = _pthread_mutex_init(mutex, pma);
224 	_SPINUNLOCK(&static_init_lock);
225 	return (error);
226 }
227 
228 /*
229  * Acquires a mutex for the current thread. The caller must
230  * lock the mutex before calling this function.
231  */
232 static void
233 acquire_mutex(struct pthread_mutex *mtx, struct pthread *ptd)
234 {
235 	mtx->m_owner = ptd;
236 	_MUTEX_ASSERT_NOT_OWNED(mtx);
237 	PTHREAD_LOCK(ptd);
238 	TAILQ_INSERT_TAIL(&ptd->mutexq, mtx, m_qe);
239 	PTHREAD_UNLOCK(ptd);
240 }
241 
242 /*
243  * Releases a mutex from the current thread. The owner must
244  * lock the mutex. The next thread on the queue will be returned
245  * locked by the current thread. The caller must take care to
246  * unlock it.
247  */
248 static void
249 mutex_attach_to_next_pthread(struct pthread_mutex *mtx)
250 {
251 	struct pthread *ptd;
252 
253 	_MUTEX_ASSERT_IS_OWNED(mtx);
254 	TAILQ_REMOVE(&mtx->m_owner->mutexq, (mtx), m_qe);
255 	_MUTEX_INIT_LINK(mtx);
256 
257 	/*
258 	 * Deque next thread waiting for this mutex and attach
259 	 * the mutex to it. The thread will already be locked.
260 	 */
261 	if ((ptd = mutex_queue_deq(mtx)) != NULL) {
262 		TAILQ_INSERT_TAIL(&ptd->mutexq, mtx, m_qe);
263 		ptd->data.mutex = NULL;
264 		PTHREAD_WAKE(ptd);
265 	}
266 	mtx->m_owner = ptd;
267 }
268 
269 int
270 __pthread_mutex_trylock(pthread_mutex_t *mutex)
271 {
272 	int	ret = 0;
273 
274 	if (mutex == NULL)
275 		ret = EINVAL;
276 
277 	/*
278 	 * If the mutex is statically initialized, perform the dynamic
279 	 * initialization:
280 	 */
281 	else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
282 	    (ret = mutex_init(mutex, 0)) == 0)
283 		ret = mutex_lock_common(mutex, 1, NULL);
284 
285 	return (ret);
286 }
287 
288 /*
289  * Libc internal.
290  */
291 int
292 _pthread_mutex_trylock(pthread_mutex_t *mutex)
293 {
294 	int	ret = 0;
295 
296 	_thread_sigblock();
297 
298 	if (mutex == NULL)
299 		ret = EINVAL;
300 
301 	/*
302 	 * If the mutex is statically initialized, perform the dynamic
303 	 * initialization marking the mutex private (delete safe):
304 	 */
305 	else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
306 	    (ret = mutex_init(mutex, 1)) == 0)
307 		ret = mutex_lock_common(mutex, 1, NULL);
308 
309 	if (ret != 0)
310 		_thread_sigunblock();
311 
312 	return (ret);
313 }
314 
315 static int
316 mutex_lock_common(pthread_mutex_t * mutex, int nonblock,
317     const struct timespec *abstime)
318 {
319 	int error;
320 
321 	error = 0;
322 	PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
323 	    "Uninitialized mutex in mutex_lock_common");
324 	PTHREAD_ASSERT(((*mutex)->m_protocol >= PTHREAD_PRIO_NONE &&
325 	    (*mutex)->m_protocol <= PTHREAD_PRIO_PROTECT),
326 	    "Invalid mutex protocol");
327 	_SPINLOCK(&(*mutex)->lock);
328 
329 	/*
330 	 * If the mutex was statically allocated, properly
331 	 * initialize the tail queue.
332 	 */
333 	if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
334 		TAILQ_INIT(&(*mutex)->m_queue);
335 		(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
336 		_MUTEX_INIT_LINK(*mutex);
337 	}
338 
339 retry:
340 	/*
341 	 * If the mutex is a priority protected mutex the thread's
342 	 * priority may not be higher than that of the mutex.
343 	 */
344 	if ((*mutex)->m_protocol == PTHREAD_PRIO_PROTECT &&
345 	    curthread->active_priority > (*mutex)->m_prio) {
346 		_SPINUNLOCK(&(*mutex)->lock);
347 		return (EINVAL);
348 	}
349 	if ((*mutex)->m_owner == NULL) {
350 		/*
351 		 * Mutex is currently unowned.
352 		 */
353 		acquire_mutex(*mutex, curthread);
354 	} else if ((*mutex)->m_owner == curthread) {
355 		/*
356 		 * Mutex is owned by curthread. We must test against
357 		 * certain conditions in such a case.
358 		 */
359 		if ((error = mutex_self_lock((*mutex), nonblock)) != 0) {
360 			_SPINUNLOCK(&(*mutex)->lock);
361 			return (error);
362 		}
363 	} else {
364 		if (nonblock) {
365 			error = EBUSY;
366 			goto out;
367 		}
368 
369 		/*
370 		 * Another thread owns the mutex. This thread must
371 		 * wait for that thread to unlock the mutex. This
372 		 * thread must not return to the caller if it was
373 		 * interrupted by a signal.
374 		 */
375 		error = get_mcontested(*mutex, abstime);
376 		if (error == EINTR)
377 			goto retry;
378 		else if (error == ETIMEDOUT)
379 			goto out;
380 	}
381 
382 	if ((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE)
383 		(*mutex)->m_data.m_count++;
384 
385 	/*
386 	 * The mutex is now owned by curthread.
387 	 */
388 	PTHREAD_LOCK(curthread);
389 
390 	/*
391 	 * The mutex's priority may have changed while waiting for it.
392  	 */
393 	if ((*mutex)->m_protocol == PTHREAD_PRIO_PROTECT &&
394 	    curthread->active_priority > (*mutex)->m_prio) {
395 		mutex_attach_to_next_pthread(*mutex);
396 		if ((*mutex)->m_owner != NULL)
397 			PTHREAD_UNLOCK((*mutex)->m_owner);
398 		PTHREAD_UNLOCK(curthread);
399 		_SPINUNLOCK(&(*mutex)->lock);
400 		return (EINVAL);
401 	}
402 
403 	switch ((*mutex)->m_protocol) {
404 	case PTHREAD_PRIO_INHERIT:
405 		curthread->prio_inherit_count++;
406 		break;
407 	case PTHREAD_PRIO_PROTECT:
408 		PTHREAD_ASSERT((curthread->active_priority <=
409 		    (*mutex)->m_prio), "priority protection violation");
410 		curthread->prio_protect_count++;
411 		if ((*mutex)->m_prio > curthread->active_priority) {
412 			curthread->inherited_priority = (*mutex)->m_prio;
413 			curthread->active_priority = (*mutex)->m_prio;
414 		}
415 		break;
416 	default:
417 		/* Nothing */
418 		break;
419 	}
420 	PTHREAD_UNLOCK(curthread);
421 out:
422 	_SPINUNLOCK(&(*mutex)->lock);
423 	return (error);
424 }
425 
426 /*
427  * Caller must lock thread.
428  */
429 void
430 adjust_prio_inheritance(struct pthread *ptd)
431 {
432 	struct pthread_mutex *tempMtx;
433 	struct pthread	     *tempTd;
434 
435 	/*
436 	 * Scan owned mutexes's wait queue and execute at the
437 	 * higher of thread's current priority or the priority of
438 	 * the highest priority thread waiting on any of the the
439 	 * mutexes the thread owns. Note: the highest priority thread
440 	 * on a queue is always at the head of the queue.
441 	 */
442 	TAILQ_FOREACH(tempMtx, &ptd->mutexq, m_qe) {
443 		if (tempMtx->m_protocol != PTHREAD_PRIO_INHERIT)
444 			continue;
445 
446 		/*
447 		 * XXX LOR with respect to tempMtx and ptd.
448 		 * Order should be: 1. mutex
449 		 *		    2. pthread
450 		 */
451 		_SPINLOCK(&tempMtx->lock);
452 
453 		tempTd = TAILQ_FIRST(&tempMtx->m_queue);
454 		if (tempTd != NULL) {
455 			PTHREAD_LOCK(tempTd);
456 			if (tempTd->active_priority > ptd->active_priority) {
457 				ptd->inherited_priority =
458 				    tempTd->active_priority;
459 				ptd->active_priority =
460 				    tempTd->active_priority;
461 			}
462 			PTHREAD_UNLOCK(tempTd);
463 		}
464 		_SPINUNLOCK(&tempMtx->lock);
465 	}
466 }
467 
468 /*
469  * Caller must lock thread.
470  */
471 static void
472 restore_prio_inheritance(struct pthread *ptd)
473 {
474 	ptd->inherited_priority = PTHREAD_MIN_PRIORITY;
475 	ptd->active_priority = ptd->base_priority;
476 	adjust_prio_inheritance(ptd);
477 }
478 
479 /*
480  * Caller must lock thread.
481  */
482 void
483 adjust_prio_protection(struct pthread *ptd)
484 {
485 	struct pthread_mutex *tempMtx;
486 
487 	/*
488 	 * The thread shall execute at the higher of its priority or
489 	 * the highest priority ceiling of all the priority protection
490 	 * mutexes it owns.
491 	 */
492 	TAILQ_FOREACH(tempMtx, &ptd->mutexq, m_qe) {
493 		if (tempMtx->m_protocol != PTHREAD_PRIO_PROTECT)
494 			continue;
495 		if (ptd->active_priority < tempMtx->m_prio) {
496 			ptd->inherited_priority = tempMtx->m_prio;
497 			ptd->active_priority = tempMtx->m_prio;
498 		}
499 	}
500 }
501 
502 /*
503  * Caller must lock thread.
504  */
505 static void
506 restore_prio_protection(struct pthread *ptd)
507 {
508 	ptd->inherited_priority = PTHREAD_MIN_PRIORITY;
509 	ptd->active_priority = ptd->base_priority;
510 	adjust_prio_protection(ptd);
511 }
512 
513 int
514 __pthread_mutex_lock(pthread_mutex_t *mutex)
515 {
516 	int	ret = 0;
517 
518 	if (_thread_initial == NULL)
519 		_thread_init();
520 
521 	if (mutex == NULL)
522 		ret = EINVAL;
523 
524 	/*
525 	 * If the mutex is statically initialized, perform the dynamic
526 	 * initialization:
527 	 */
528 	else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
529 	    ((ret = mutex_init(mutex, 0)) == 0))
530 		ret = mutex_lock_common(mutex, 0, NULL);
531 
532 	return (ret);
533 }
534 
535 /*
536  * Libc internal.
537  */
538 int
539 _pthread_mutex_lock(pthread_mutex_t *mutex)
540 {
541 	int	ret = 0;
542 
543 	if (_thread_initial == NULL)
544 		_thread_init();
545 
546 	_thread_sigblock();
547 
548 	if (mutex == NULL)
549 		ret = EINVAL;
550 
551 	/*
552 	 * If the mutex is statically initialized, perform the dynamic
553 	 * initialization marking it private (delete safe):
554 	 */
555 	else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
556 	    ((ret = mutex_init(mutex, 1)) == 0))
557 		ret = mutex_lock_common(mutex, 0, NULL);
558 
559 	if (ret != 0)
560 		_thread_sigunblock();
561 
562 	return (ret);
563 }
564 
565 int
566 _pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
567 {
568 	int error;
569 
570 	error = 0;
571 	if (_thread_initial == NULL)
572 		_thread_init();
573 
574 	/*
575 	 * Initialize it if it's a valid statically inited mutex.
576 	 */
577 	if (mutex == NULL)
578 		error = EINVAL;
579 	else if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
580 	    ((error = mutex_init(mutex, 0)) == 0))
581 		error = mutex_lock_common(mutex, 0, abstime);
582 
583 	PTHREAD_ASSERT(error != EINTR, "According to SUSv3 this function shall not return an error code of EINTR");
584 	return (error);
585 }
586 
587 int
588 __pthread_mutex_unlock(pthread_mutex_t * mutex)
589 {
590 	return (mutex_unlock_common(mutex, /* add reference */ 0));
591 }
592 
593 /*
594  * Libc internal
595  */
596 int
597 _pthread_mutex_unlock(pthread_mutex_t * mutex)
598 {
599 	int error;
600 	if ((error = mutex_unlock_common(mutex, /* add reference */ 0)) == 0)
601 		_thread_sigunblock();
602 	return (error);
603 }
604 
605 int
606 _mutex_cv_unlock(pthread_mutex_t * mutex)
607 {
608 	return (mutex_unlock_common(mutex, /* add reference */ 1));
609 }
610 
611 int
612 _mutex_cv_lock(pthread_mutex_t * mutex)
613 {
614 	int	ret;
615 	if ((ret = _pthread_mutex_lock(mutex)) == 0)
616 		(*mutex)->m_refcount--;
617 	return (ret);
618 }
619 
620 /*
621  * Caller must lock mutex and then disable signals and lock curthread.
622  */
623 static inline int
624 mutex_self_lock(pthread_mutex_t mutex, int noblock)
625 {
626 	switch (mutex->m_type) {
627 	case PTHREAD_MUTEX_ERRORCHECK:
628 		/*
629 		 * POSIX specifies that mutexes should return EDEADLK if a
630 		 * recursive lock is detected.
631 		 */
632 		if (noblock)
633 			return (EBUSY);
634 		return (EDEADLK);
635 		break;
636 
637 	case PTHREAD_MUTEX_NORMAL:
638 		/*
639 		 * What SS2 define as a 'normal' mutex.  Intentionally
640 		 * deadlock on attempts to get a lock you already own.
641 		 */
642 		if (noblock)
643 			return (EBUSY);
644 		curthread->isdeadlocked = 1;
645 		_SPINUNLOCK(&(mutex)->lock);
646 		_thread_suspend(curthread, NULL);
647 		PANIC("Shouldn't resume here?\n");
648 		break;
649 
650 	default:
651 		/* Do Nothing */
652 		break;
653 	}
654 	return (0);
655 }
656 
657 static inline int
658 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
659 {
660 	/*
661 	 * Error checking.
662 	 */
663 	if (*mutex == NULL)
664 		return (EINVAL);
665 	if ((*mutex)->m_owner != curthread)
666 		return (EPERM);
667 	PTHREAD_ASSERT(((*mutex)->m_protocol >= PTHREAD_PRIO_NONE &&
668 	    (*mutex)->m_protocol <= PTHREAD_PRIO_PROTECT),
669 	    "Invalid mutex protocol");
670 
671 	_SPINLOCK(&(*mutex)->lock);
672 	if ((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) {
673 		(*mutex)->m_data.m_count--;
674 		PTHREAD_ASSERT((*mutex)->m_data.m_count >= 0,
675 		    "The mutex recurse count cannot be less than zero");
676 		if ((*mutex)->m_data.m_count > 0) {
677 			_SPINUNLOCK(&(*mutex)->lock);
678 			return (0);
679 		}
680 	}
681 
682 	/*
683 	 * Release the mutex from this thread and attach it to
684 	 * the next thread in the queue, if there is one waiting.
685 	 */
686 	PTHREAD_LOCK(curthread);
687 	mutex_attach_to_next_pthread(*mutex);
688 	if ((*mutex)->m_owner != NULL)
689 		PTHREAD_UNLOCK((*mutex)->m_owner);
690 	if (add_reference != 0) {
691 		/* Increment the reference count: */
692 		(*mutex)->m_refcount++;
693 	}
694 	_SPINUNLOCK(&(*mutex)->lock);
695 
696 	/*
697 	 * Fix priority of the thread that just released the mutex.
698 	 */
699 	switch ((*mutex)->m_protocol) {
700 	case PTHREAD_PRIO_INHERIT:
701 		curthread->prio_inherit_count--;
702 		PTHREAD_ASSERT(curthread->prio_inherit_count >= 0,
703 		    "priority inheritance counter cannot be less than zero");
704 		restore_prio_inheritance(curthread);
705 		if (curthread->prio_protect_count > 0)
706 			restore_prio_protection(curthread);
707 		break;
708 	case PTHREAD_PRIO_PROTECT:
709 		curthread->prio_protect_count--;
710 		PTHREAD_ASSERT(curthread->prio_protect_count >= 0,
711 		    "priority protection counter cannot be less than zero");
712 		restore_prio_protection(curthread);
713 		if (curthread->prio_inherit_count > 0)
714 			restore_prio_inheritance(curthread);
715 		break;
716 	default:
717 		/* Nothing */
718 		break;
719 	}
720 	PTHREAD_UNLOCK(curthread);
721 	return (0);
722 }
723 
724 void
725 _mutex_unlock_private(pthread_t pthread)
726 {
727 	struct pthread_mutex	*m, *m_next;
728 
729 	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
730 		m_next = TAILQ_NEXT(m, m_qe);
731 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
732 			_pthread_mutex_unlock(&m);
733 	}
734 }
735 
736 void
737 _mutex_lock_backout(pthread_t pthread)
738 {
739 	struct pthread_mutex	*mutex;
740 
741 	mutex = pthread->data.mutex;
742 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
743 
744 		mutex_queue_remove(mutex, pthread);
745 
746 		/* This thread is no longer waiting for the mutex: */
747 		pthread->data.mutex = NULL;
748 
749 	}
750 }
751 
752 /*
753  * Dequeue a waiting thread from the head of a mutex queue in descending
754  * priority order. This funtion will return with the thread locked.
755  */
756 static inline pthread_t
757 mutex_queue_deq(pthread_mutex_t mutex)
758 {
759 	pthread_t pthread;
760 
761 	while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
762 		PTHREAD_LOCK(pthread);
763 		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
764 		pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
765 
766 		/*
767 		 * Only exit the loop if the thread hasn't been
768 		 * asynchronously cancelled.
769 		 */
770 		if (pthread->cancelmode == M_ASYNC &&
771 		    pthread->cancellation != CS_NULL)
772 			continue;
773 		else
774 			break;
775 	}
776 	return (pthread);
777 }
778 
779 /*
780  * Remove a waiting thread from a mutex queue in descending priority order.
781  */
782 static inline void
783 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
784 {
785 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
786 		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
787 		pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
788 	}
789 }
790 
791 /*
792  * Enqueue a waiting thread to a queue in descending priority order.
793  */
794 static inline void
795 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
796 {
797 	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
798 	char *name;
799 
800 	name = pthread->name ? pthread->name : "unknown";
801 	if ((pthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0)
802 		_thread_printf(2, "Thread (%s:%ld) already on condq\n",
803 		    pthread->name, pthread->thr_id);
804 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0)
805 		_thread_printf(2, "Thread (%s:%ld) already on mutexq\n",
806 		    pthread->name, pthread->thr_id);
807 	PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
808 	/*
809 	 * For the common case of all threads having equal priority,
810 	 * we perform a quick check against the priority of the thread
811 	 * at the tail of the queue.
812 	 */
813 	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
814 		TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
815 	else {
816 		tid = TAILQ_FIRST(&mutex->m_queue);
817 		while (pthread->active_priority <= tid->active_priority)
818 			tid = TAILQ_NEXT(tid, sqe);
819 		TAILQ_INSERT_BEFORE(tid, pthread, sqe);
820 	}
821 	if (mutex->m_protocol == PTHREAD_PRIO_INHERIT &&
822 	    pthread == TAILQ_FIRST(&mutex->m_queue)) {
823 		PTHREAD_LOCK(mutex->m_owner);
824 		if (pthread->active_priority >
825 		    mutex->m_owner->active_priority) {
826 			mutex->m_owner->inherited_priority =
827 			    pthread->active_priority;
828 			mutex->m_owner->active_priority =
829 			    pthread->active_priority;
830 		}
831 		PTHREAD_UNLOCK(mutex->m_owner);
832 	}
833 	pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
834 }
835 
836 /*
837  * Caller must lock mutex and pthread.
838  */
839 void
840 readjust_priorities(struct pthread *pthread, struct pthread_mutex *mtx)
841 {
842 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
843 		PTHREAD_ASSERT(mtx != NULL,
844 		    "mutex is NULL when it should not be");
845 		mutex_queue_remove(mtx, pthread);
846 		mutex_queue_enq(mtx, pthread);
847 		PTHREAD_LOCK(mtx->m_owner);
848 		adjust_prio_inheritance(mtx->m_owner);
849 		if (mtx->m_owner->prio_protect_count > 0)
850 			adjust_prio_protection(mtx->m_owner);
851 		PTHREAD_UNLOCK(mtx->m_owner);
852 	}
853 	if (pthread->prio_inherit_count > 0)
854 		adjust_prio_inheritance(pthread);
855 	if (pthread->prio_protect_count > 0)
856 		adjust_prio_protection(pthread);
857 }
858 
859 /*
860  * Returns with the lock owned and on the thread's mutexq. If
861  * the mutex is currently owned by another thread it will sleep
862  * until it is available.
863  */
864 static int
865 get_mcontested(pthread_mutex_t mutexp, const struct timespec *abstime)
866 {
867 	int error;
868 
869 	/*
870 	 * If the timeout is invalid this thread is not allowed
871 	 * to block;
872 	 */
873 	if (abstime != NULL) {
874 		if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
875 			return (EINVAL);
876 	}
877 
878 	/*
879 	 * Put this thread on the mutex's list of waiting threads.
880 	 * The lock on the thread ensures atomic (as far as other
881 	 * threads are concerned) setting of the thread state with
882 	 * it's status on the mutex queue.
883 	 */
884 	PTHREAD_LOCK(curthread);
885 	mutex_queue_enq(mutexp, curthread);
886 	do {
887 		if (curthread->cancelmode == M_ASYNC &&
888 		    curthread->cancellation != CS_NULL) {
889 			mutex_queue_remove(mutexp, curthread);
890 			PTHREAD_UNLOCK(curthread);
891 			_SPINUNLOCK(&mutexp->lock);
892 			pthread_testcancel();
893 		}
894 		curthread->data.mutex = mutexp;
895 		PTHREAD_UNLOCK(curthread);
896 		_SPINUNLOCK(&mutexp->lock);
897 		error = _thread_suspend(curthread, abstime);
898 		if (error != 0 && error != ETIMEDOUT && error != EINTR)
899 			PANIC("Cannot suspend on mutex.");
900 		_SPINLOCK(&mutexp->lock);
901 		PTHREAD_LOCK(curthread);
902 		if (error == ETIMEDOUT) {
903 			/*
904 			 * Between the timeout and when the mutex was
905 			 * locked the previous owner may have released
906 			 * the mutex to this thread. Or not.
907 			 */
908 			if (mutexp->m_owner == curthread)
909 				error = 0;
910 			else
911 				_mutex_lock_backout(curthread);
912 		}
913 	} while ((curthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0);
914 	PTHREAD_UNLOCK(curthread);
915 	return (error);
916 }
917