xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision 6af83ee0d2941d18880b6aaa2b4facd1d30c6106)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by John Birrell.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 #include <stdlib.h>
35 #include <errno.h>
36 #include <string.h>
37 #include <sys/param.h>
38 #include <sys/queue.h>
39 #include <pthread.h>
40 #include <time.h>
41 #include "thr_private.h"
42 
43 #if defined(_PTHREADS_INVARIANTS)
44 #define _MUTEX_INIT_LINK(m) 		do {		\
45 	(m)->m_qe.tqe_prev = NULL;			\
46 	(m)->m_qe.tqe_next = NULL;			\
47 } while (0)
48 #define _MUTEX_ASSERT_IS_OWNED(m)	do {		\
49 	if ((m)->m_qe.tqe_prev == NULL)			\
50 		PANIC("mutex is not on list");		\
51 } while (0)
52 #define _MUTEX_ASSERT_NOT_OWNED(m)	do {		\
53 	if (((m)->m_qe.tqe_prev != NULL) ||		\
54 	    ((m)->m_qe.tqe_next != NULL))		\
55 		PANIC("mutex is on list");		\
56 } while (0)
57 #else
58 #define _MUTEX_INIT_LINK(m)
59 #define _MUTEX_ASSERT_IS_OWNED(m)
60 #define _MUTEX_ASSERT_NOT_OWNED(m)
61 #endif
62 
63 
64 /*
65  * Prototypes
66  */
67 static void		acquire_mutex(struct pthread_mutex *, struct pthread *);
68 static int		get_mcontested(pthread_mutex_t,
69 			    const struct timespec *);
70 static void		mutex_attach_to_next_pthread(struct pthread_mutex *);
71 static int		mutex_init(pthread_mutex_t *, int);
72 static int		mutex_lock_common(pthread_mutex_t *, int,
73 			    const struct timespec *);
74 static inline int	mutex_self_lock(pthread_mutex_t, int);
75 static inline int	mutex_unlock_common(pthread_mutex_t *, int);
76 static inline pthread_t	mutex_queue_deq(pthread_mutex_t);
77 static inline void	mutex_queue_remove(pthread_mutex_t, pthread_t);
78 static inline void	mutex_queue_enq(pthread_mutex_t, pthread_t);
79 static void		restore_prio_inheritance(struct pthread *);
80 static void		restore_prio_protection(struct pthread *);
81 
82 
83 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
84 
85 static struct pthread_mutex_attr	static_mutex_attr =
86     PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
87 static pthread_mutexattr_t		static_mattr = &static_mutex_attr;
88 
89 /* Single underscore versions provided for libc internal usage: */
90 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
91 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
92 __weak_reference(__pthread_mutex_unlock, pthread_mutex_unlock);
93 
94 /* No difference between libc and application usage of these: */
95 __weak_reference(_pthread_mutex_init, pthread_mutex_init);
96 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
97 __weak_reference(_pthread_mutex_timedlock, pthread_mutex_timedlock);
98 
99 
100 /*
101  * Reinitialize a private mutex; this is only used for internal mutexes.
102  */
103 int
104 _mutex_reinit(pthread_mutex_t * mutex)
105 {
106 	int	ret = 0;
107 
108 	if (*mutex == PTHREAD_MUTEX_INITIALIZER)
109 		ret = _pthread_mutex_init(mutex, NULL);
110 	else {
111 		/*
112 		 * Initialize the mutex structure:
113 		 */
114 		(*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
115 		(*mutex)->m_protocol = PTHREAD_PRIO_NONE;
116 		TAILQ_INIT(&(*mutex)->m_queue);
117 		(*mutex)->m_owner = NULL;
118 		(*mutex)->m_data.m_count = 0;
119 		(*mutex)->m_flags |= MUTEX_FLAGS_INITED | MUTEX_FLAGS_PRIVATE;
120 		(*mutex)->m_refcount = 0;
121 		(*mutex)->m_prio = 0;
122 		(*mutex)->m_saved_prio = 0;
123 		_MUTEX_INIT_LINK(*mutex);
124 		memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
125 	}
126 	return (ret);
127 }
128 
129 int
130 _pthread_mutex_init(pthread_mutex_t * mutex,
131 		   const pthread_mutexattr_t * mutex_attr)
132 {
133 	struct pthread_mutex_attr default_attr = {PTHREAD_MUTEX_ERRORCHECK,
134 	    PTHREAD_PRIO_NONE, PTHREAD_MAX_PRIORITY, 0 };
135 	struct pthread_mutex_attr *attr;
136 
137 	if (mutex_attr == NULL) {
138 		attr = &default_attr;
139 	} else {
140 		/*
141 		 * Check that the given mutex attribute is valid.
142 		 */
143 		if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
144 		    ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
145 			return (EINVAL);
146 		else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
147 		    ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
148 			return (EINVAL);
149 		attr = *mutex_attr;
150 	}
151 	if ((*mutex =
152 	    (pthread_mutex_t)malloc(sizeof(struct pthread_mutex))) == NULL)
153 		return (ENOMEM);
154 	memset((void *)(*mutex), 0, sizeof(struct pthread_mutex));
155 
156 	/* Initialise the rest of the mutex: */
157 	TAILQ_INIT(&(*mutex)->m_queue);
158 	_MUTEX_INIT_LINK(*mutex);
159 	(*mutex)->m_protocol = attr->m_protocol;
160 	(*mutex)->m_flags = (attr->m_flags | MUTEX_FLAGS_INITED);
161 	(*mutex)->m_type = attr->m_type;
162 	if ((*mutex)->m_protocol == PTHREAD_PRIO_PROTECT)
163 		(*mutex)->m_prio = attr->m_ceiling;
164 	return (0);
165 }
166 
167 int
168 _pthread_mutex_destroy(pthread_mutex_t * mutex)
169 {
170 	/*
171 	 * If this mutex was statically initialized, don't bother
172 	 * initializing it in order to destroy it immediately.
173 	 */
174 	if (*mutex == PTHREAD_MUTEX_INITIALIZER)
175 		return (0);
176 
177 	/* Lock the mutex structure: */
178 	_SPINLOCK(&(*mutex)->lock);
179 
180 	/*
181 	 * Check to see if this mutex is in use:
182 	 */
183 	if (((*mutex)->m_owner != NULL) ||
184 	    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
185 	    ((*mutex)->m_refcount != 0)) {
186 		/* Unlock the mutex structure: */
187 		_SPINUNLOCK(&(*mutex)->lock);
188 		return (EBUSY);
189 	}
190 
191 	/*
192 	 * Free the memory allocated for the mutex
193 	 * structure:
194 	 */
195 	_MUTEX_ASSERT_NOT_OWNED(*mutex);
196 	_SPINUNLOCK(&(*mutex)->lock);
197 	free(*mutex);
198 
199 	/*
200 	 * Leave the caller's pointer NULL now that
201 	 * the mutex has been destroyed:
202 	 */
203 	*mutex = NULL;
204 
205 	return (0);
206 }
207 
208 static int
209 mutex_init(pthread_mutex_t *mutex, int private)
210 {
211 	pthread_mutexattr_t *pma;
212 	int error;
213 
214 	error = 0;
215 	pma = private ? &static_mattr : NULL;
216 	_SPINLOCK(&static_init_lock);
217 	if (*mutex == PTHREAD_MUTEX_INITIALIZER)
218 		error = _pthread_mutex_init(mutex, pma);
219 	_SPINUNLOCK(&static_init_lock);
220 	return (error);
221 }
222 
223 /*
224  * Acquires a mutex for the current thread. The caller must
225  * lock the mutex before calling this function.
226  */
227 static void
228 acquire_mutex(struct pthread_mutex *mtx, struct pthread *ptd)
229 {
230 	mtx->m_owner = ptd;
231 	_MUTEX_ASSERT_NOT_OWNED(mtx);
232 	PTHREAD_LOCK(ptd);
233 	TAILQ_INSERT_TAIL(&ptd->mutexq, mtx, m_qe);
234 	PTHREAD_UNLOCK(ptd);
235 }
236 
237 /*
238  * Releases a mutex from the current thread. The owner must
239  * lock the mutex. The next thread on the queue will be returned
240  * locked by the current thread. The caller must take care to
241  * unlock it.
242  */
243 static void
244 mutex_attach_to_next_pthread(struct pthread_mutex *mtx)
245 {
246 	struct pthread *ptd;
247 
248 	_MUTEX_ASSERT_IS_OWNED(mtx);
249 	TAILQ_REMOVE(&mtx->m_owner->mutexq, (mtx), m_qe);
250 	_MUTEX_INIT_LINK(mtx);
251 
252 	/*
253 	 * Deque next thread waiting for this mutex and attach
254 	 * the mutex to it. The thread will already be locked.
255 	 */
256 	if ((ptd = mutex_queue_deq(mtx)) != NULL) {
257 		TAILQ_INSERT_TAIL(&ptd->mutexq, mtx, m_qe);
258 		ptd->data.mutex = NULL;
259 		PTHREAD_WAKE(ptd);
260 	}
261 	mtx->m_owner = ptd;
262 }
263 
264 int
265 __pthread_mutex_trylock(pthread_mutex_t *mutex)
266 {
267 	int	ret = 0;
268 
269 	/*
270 	 * If the mutex is statically initialized, perform the dynamic
271 	 * initialization:
272 	 */
273 	if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
274 	    (ret = mutex_init(mutex, 0)) == 0)
275 		ret = mutex_lock_common(mutex, 1, NULL);
276 
277 	return (ret);
278 }
279 
280 /*
281  * Libc internal.
282  */
283 int
284 _pthread_mutex_trylock(pthread_mutex_t *mutex)
285 {
286 	int	ret = 0;
287 
288 	/*
289 	 * If the mutex is statically initialized, perform the dynamic
290 	 * initialization marking the mutex private (delete safe):
291 	 */
292 	if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
293 	    (ret = mutex_init(mutex, 1)) == 0)
294 		ret = mutex_lock_common(mutex, 1, NULL);
295 
296 	return (ret);
297 }
298 
299 static int
300 mutex_lock_common(pthread_mutex_t * mutex, int nonblock,
301     const struct timespec *abstime)
302 {
303 	int error;
304 
305 	error = 0;
306 	PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
307 	    "Uninitialized mutex in mutex_lock_common");
308 	PTHREAD_ASSERT(((*mutex)->m_protocol >= PTHREAD_PRIO_NONE &&
309 	    (*mutex)->m_protocol <= PTHREAD_PRIO_PROTECT),
310 	    "Invalid mutex protocol");
311 	_SPINLOCK(&(*mutex)->lock);
312 
313 	/*
314 	 * If the mutex was statically allocated, properly
315 	 * initialize the tail queue.
316 	 */
317 	if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
318 		TAILQ_INIT(&(*mutex)->m_queue);
319 		(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
320 		_MUTEX_INIT_LINK(*mutex);
321 	}
322 
323 retry:
324 	/*
325 	 * If the mutex is a priority protected mutex the thread's
326 	 * priority may not be higher than that of the mutex.
327 	 */
328 	if ((*mutex)->m_protocol == PTHREAD_PRIO_PROTECT &&
329 	    curthread->active_priority > (*mutex)->m_prio) {
330 		_SPINUNLOCK(&(*mutex)->lock);
331 		return (EINVAL);
332 	}
333 	if ((*mutex)->m_owner == NULL) {
334 		/*
335 		 * Mutex is currently unowned.
336 		 */
337 		acquire_mutex(*mutex, curthread);
338 	} else if ((*mutex)->m_owner == curthread) {
339 		/*
340 		 * Mutex is owned by curthread. We must test against
341 		 * certain conditions in such a case.
342 		 */
343 		if ((error = mutex_self_lock((*mutex), nonblock)) != 0) {
344 			_SPINUNLOCK(&(*mutex)->lock);
345 			return (error);
346 		}
347 	} else {
348 		if (nonblock) {
349 			error = EBUSY;
350 			goto out;
351 		}
352 
353 		/*
354 		 * Another thread owns the mutex. This thread must
355 		 * wait for that thread to unlock the mutex. This
356 		 * thread must not return to the caller if it was
357 		 * interrupted by a signal.
358 		 */
359 		error = get_mcontested(*mutex, abstime);
360 		if (error == EINTR)
361 			goto retry;
362 		else if (error == ETIMEDOUT)
363 			goto out;
364 	}
365 
366 	if ((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE)
367 		(*mutex)->m_data.m_count++;
368 
369 	/*
370 	 * The mutex is now owned by curthread.
371 	 */
372 	PTHREAD_LOCK(curthread);
373 
374 	/*
375 	 * The mutex's priority may have changed while waiting for it.
376  	 */
377 	if ((*mutex)->m_protocol == PTHREAD_PRIO_PROTECT &&
378 	    curthread->active_priority > (*mutex)->m_prio) {
379 		mutex_attach_to_next_pthread(*mutex);
380 		if ((*mutex)->m_owner != NULL)
381 			PTHREAD_UNLOCK((*mutex)->m_owner);
382 		PTHREAD_UNLOCK(curthread);
383 		_SPINUNLOCK(&(*mutex)->lock);
384 		return (EINVAL);
385 	}
386 
387 	switch ((*mutex)->m_protocol) {
388 	case PTHREAD_PRIO_INHERIT:
389 		curthread->prio_inherit_count++;
390 		break;
391 	case PTHREAD_PRIO_PROTECT:
392 		PTHREAD_ASSERT((curthread->active_priority <=
393 		    (*mutex)->m_prio), "priority protection violation");
394 		curthread->prio_protect_count++;
395 		if ((*mutex)->m_prio > curthread->active_priority) {
396 			curthread->inherited_priority = (*mutex)->m_prio;
397 			curthread->active_priority = (*mutex)->m_prio;
398 		}
399 		break;
400 	default:
401 		/* Nothing */
402 		break;
403 	}
404 	PTHREAD_UNLOCK(curthread);
405 out:
406 	_SPINUNLOCK(&(*mutex)->lock);
407 	return (error);
408 }
409 
410 /*
411  * Caller must lock thread.
412  */
413 void
414 adjust_prio_inheritance(struct pthread *ptd)
415 {
416 	struct pthread_mutex *tempMtx;
417 	struct pthread	     *tempTd;
418 
419 	/*
420 	 * Scan owned mutexes's wait queue and execute at the
421 	 * higher of thread's current priority or the priority of
422 	 * the highest priority thread waiting on any of the the
423 	 * mutexes the thread owns. Note: the highest priority thread
424 	 * on a queue is always at the head of the queue.
425 	 */
426 	TAILQ_FOREACH(tempMtx, &ptd->mutexq, m_qe) {
427 		if (tempMtx->m_protocol != PTHREAD_PRIO_INHERIT)
428 			continue;
429 
430 		/*
431 		 * XXX LOR with respect to tempMtx and ptd.
432 		 * Order should be: 1. mutex
433 		 *		    2. pthread
434 		 */
435 		_SPINLOCK(&tempMtx->lock);
436 
437 		tempTd = TAILQ_FIRST(&tempMtx->m_queue);
438 		if (tempTd != NULL) {
439 			PTHREAD_LOCK(tempTd);
440 			if (tempTd->active_priority > ptd->active_priority) {
441 				ptd->inherited_priority =
442 				    tempTd->active_priority;
443 				ptd->active_priority =
444 				    tempTd->active_priority;
445 			}
446 			PTHREAD_UNLOCK(tempTd);
447 		}
448 		_SPINUNLOCK(&tempMtx->lock);
449 	}
450 }
451 
452 /*
453  * Caller must lock thread.
454  */
455 static void
456 restore_prio_inheritance(struct pthread *ptd)
457 {
458 	ptd->inherited_priority = PTHREAD_MIN_PRIORITY;
459 	ptd->active_priority = ptd->base_priority;
460 	adjust_prio_inheritance(ptd);
461 }
462 
463 /*
464  * Caller must lock thread.
465  */
466 void
467 adjust_prio_protection(struct pthread *ptd)
468 {
469 	struct pthread_mutex *tempMtx;
470 
471 	/*
472 	 * The thread shall execute at the higher of its priority or
473 	 * the highest priority ceiling of all the priority protection
474 	 * mutexes it owns.
475 	 */
476 	TAILQ_FOREACH(tempMtx, &ptd->mutexq, m_qe) {
477 		if (tempMtx->m_protocol != PTHREAD_PRIO_PROTECT)
478 			continue;
479 		if (ptd->active_priority < tempMtx->m_prio) {
480 			ptd->inherited_priority = tempMtx->m_prio;
481 			ptd->active_priority = tempMtx->m_prio;
482 		}
483 	}
484 }
485 
486 /*
487  * Caller must lock thread.
488  */
489 static void
490 restore_prio_protection(struct pthread *ptd)
491 {
492 	ptd->inherited_priority = PTHREAD_MIN_PRIORITY;
493 	ptd->active_priority = ptd->base_priority;
494 	adjust_prio_protection(ptd);
495 }
496 
497 int
498 __pthread_mutex_lock(pthread_mutex_t *mutex)
499 {
500 	int	ret = 0;
501 
502 	if (_thread_initial == NULL)
503 		_thread_init();
504 
505 	/*
506 	 * If the mutex is statically initialized, perform the dynamic
507 	 * initialization:
508 	 */
509 	if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
510 	    ((ret = mutex_init(mutex, 0)) == 0))
511 		ret = mutex_lock_common(mutex, 0, NULL);
512 
513 	return (ret);
514 }
515 
516 /*
517  * Libc internal.
518  */
519 int
520 _pthread_mutex_lock(pthread_mutex_t *mutex)
521 {
522 	int	ret = 0;
523 
524 	if (_thread_initial == NULL)
525 		_thread_init();
526 
527 	/*
528 	 * If the mutex is statically initialized, perform the dynamic
529 	 * initialization marking it private (delete safe):
530 	 */
531 	if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
532 	    ((ret = mutex_init(mutex, 1)) == 0))
533 		ret = mutex_lock_common(mutex, 0, NULL);
534 
535 	return (ret);
536 }
537 
538 int
539 _pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
540 {
541 	int error;
542 
543 	error = 0;
544 	if (_thread_initial == NULL)
545 		_thread_init();
546 
547 	/*
548 	 * Initialize it if it's a valid statically inited mutex.
549 	 */
550 	if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
551 	    ((error = mutex_init(mutex, 0)) == 0))
552 		error = mutex_lock_common(mutex, 0, abstime);
553 
554 	PTHREAD_ASSERT(error != EINTR, "According to SUSv3 this function shall not return an error code of EINTR");
555 	return (error);
556 }
557 
558 int
559 __pthread_mutex_unlock(pthread_mutex_t * mutex)
560 {
561 	return (mutex_unlock_common(mutex, /* add reference */ 0));
562 }
563 
564 /*
565  * Libc internal
566  */
567 int
568 _pthread_mutex_unlock(pthread_mutex_t * mutex)
569 {
570 	return (mutex_unlock_common(mutex, /* add reference */ 0));
571 }
572 
573 int
574 _mutex_cv_unlock(pthread_mutex_t * mutex)
575 {
576 	return (mutex_unlock_common(mutex, /* add reference */ 1));
577 }
578 
579 int
580 _mutex_cv_lock(pthread_mutex_t * mutex)
581 {
582 	int	ret;
583 	if ((ret = _pthread_mutex_lock(mutex)) == 0)
584 		(*mutex)->m_refcount--;
585 	return (ret);
586 }
587 
588 /*
589  * Caller must lock mutex and then disable signals and lock curthread.
590  */
591 static inline int
592 mutex_self_lock(pthread_mutex_t mutex, int noblock)
593 {
594 	switch (mutex->m_type) {
595 	case PTHREAD_MUTEX_ERRORCHECK:
596 		/*
597 		 * POSIX specifies that mutexes should return EDEADLK if a
598 		 * recursive lock is detected.
599 		 */
600 		if (noblock)
601 			return (EBUSY);
602 		return (EDEADLK);
603 		break;
604 
605 	case PTHREAD_MUTEX_NORMAL:
606 		/*
607 		 * What SS2 define as a 'normal' mutex.  Intentionally
608 		 * deadlock on attempts to get a lock you already own.
609 		 */
610 		if (noblock)
611 			return (EBUSY);
612 		curthread->isdeadlocked = 1;
613 		_SPINUNLOCK(&(mutex)->lock);
614 		_thread_suspend(curthread, NULL);
615 		PANIC("Shouldn't resume here?\n");
616 		break;
617 
618 	default:
619 		/* Do Nothing */
620 		break;
621 	}
622 	return (0);
623 }
624 
625 static inline int
626 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
627 {
628 	/*
629 	 * Error checking.
630 	 */
631 	if ((*mutex)->m_owner != curthread)
632 		return (EPERM);
633 	PTHREAD_ASSERT(((*mutex)->m_protocol >= PTHREAD_PRIO_NONE &&
634 	    (*mutex)->m_protocol <= PTHREAD_PRIO_PROTECT),
635 	    "Invalid mutex protocol");
636 
637 	_SPINLOCK(&(*mutex)->lock);
638 	if ((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) {
639 		(*mutex)->m_data.m_count--;
640 		PTHREAD_ASSERT((*mutex)->m_data.m_count >= 0,
641 		    "The mutex recurse count cannot be less than zero");
642 		if ((*mutex)->m_data.m_count > 0) {
643 			_SPINUNLOCK(&(*mutex)->lock);
644 			return (0);
645 		}
646 	}
647 
648 	/*
649 	 * Release the mutex from this thread and attach it to
650 	 * the next thread in the queue, if there is one waiting.
651 	 */
652 	PTHREAD_LOCK(curthread);
653 	mutex_attach_to_next_pthread(*mutex);
654 	if ((*mutex)->m_owner != NULL)
655 		PTHREAD_UNLOCK((*mutex)->m_owner);
656 	if (add_reference != 0) {
657 		/* Increment the reference count: */
658 		(*mutex)->m_refcount++;
659 	}
660 	_SPINUNLOCK(&(*mutex)->lock);
661 
662 	/*
663 	 * Fix priority of the thread that just released the mutex.
664 	 */
665 	switch ((*mutex)->m_protocol) {
666 	case PTHREAD_PRIO_INHERIT:
667 		curthread->prio_inherit_count--;
668 		PTHREAD_ASSERT(curthread->prio_inherit_count >= 0,
669 		    "priority inheritance counter cannot be less than zero");
670 		restore_prio_inheritance(curthread);
671 		if (curthread->prio_protect_count > 0)
672 			restore_prio_protection(curthread);
673 		break;
674 	case PTHREAD_PRIO_PROTECT:
675 		curthread->prio_protect_count--;
676 		PTHREAD_ASSERT(curthread->prio_protect_count >= 0,
677 		    "priority protection counter cannot be less than zero");
678 		restore_prio_protection(curthread);
679 		if (curthread->prio_inherit_count > 0)
680 			restore_prio_inheritance(curthread);
681 		break;
682 	default:
683 		/* Nothing */
684 		break;
685 	}
686 	PTHREAD_UNLOCK(curthread);
687 	return (0);
688 }
689 
690 void
691 _mutex_unlock_private(pthread_t pthread)
692 {
693 	struct pthread_mutex	*m, *m_next;
694 
695 	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
696 		m_next = TAILQ_NEXT(m, m_qe);
697 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
698 			_pthread_mutex_unlock(&m);
699 	}
700 }
701 
702 void
703 _mutex_lock_backout(pthread_t pthread)
704 {
705 	struct pthread_mutex	*mutex;
706 
707 	mutex = pthread->data.mutex;
708 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
709 
710 		mutex_queue_remove(mutex, pthread);
711 
712 		/* This thread is no longer waiting for the mutex: */
713 		pthread->data.mutex = NULL;
714 
715 	}
716 }
717 
718 /*
719  * Dequeue a waiting thread from the head of a mutex queue in descending
720  * priority order. This funtion will return with the thread locked.
721  */
722 static inline pthread_t
723 mutex_queue_deq(pthread_mutex_t mutex)
724 {
725 	pthread_t pthread;
726 
727 	while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
728 		PTHREAD_LOCK(pthread);
729 		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
730 		pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
731 
732 		/*
733 		 * Only exit the loop if the thread hasn't been
734 		 * asynchronously cancelled.
735 		 */
736 		if (pthread->cancelmode == M_ASYNC &&
737 		    pthread->cancellation != CS_NULL)
738 			continue;
739 		else
740 			break;
741 	}
742 	return (pthread);
743 }
744 
745 /*
746  * Remove a waiting thread from a mutex queue in descending priority order.
747  */
748 static inline void
749 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
750 {
751 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
752 		TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
753 		pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
754 	}
755 }
756 
757 /*
758  * Enqueue a waiting thread to a queue in descending priority order.
759  */
760 static inline void
761 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
762 {
763 	pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
764 	char *name;
765 
766 	name = pthread->name ? pthread->name : "unknown";
767 	if ((pthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0)
768 		_thread_printf(2, "Thread (%s:%ld) already on condq\n",
769 		    pthread->name, pthread->thr_id);
770 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0)
771 		_thread_printf(2, "Thread (%s:%ld) already on mutexq\n",
772 		    pthread->name, pthread->thr_id);
773 	PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
774 	/*
775 	 * For the common case of all threads having equal priority,
776 	 * we perform a quick check against the priority of the thread
777 	 * at the tail of the queue.
778 	 */
779 	if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
780 		TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
781 	else {
782 		tid = TAILQ_FIRST(&mutex->m_queue);
783 		while (pthread->active_priority <= tid->active_priority)
784 			tid = TAILQ_NEXT(tid, sqe);
785 		TAILQ_INSERT_BEFORE(tid, pthread, sqe);
786 	}
787 	if (mutex->m_protocol == PTHREAD_PRIO_INHERIT &&
788 	    pthread == TAILQ_FIRST(&mutex->m_queue)) {
789 		PTHREAD_LOCK(mutex->m_owner);
790 		if (pthread->active_priority >
791 		    mutex->m_owner->active_priority) {
792 			mutex->m_owner->inherited_priority =
793 			    pthread->active_priority;
794 			mutex->m_owner->active_priority =
795 			    pthread->active_priority;
796 		}
797 		PTHREAD_UNLOCK(mutex->m_owner);
798 	}
799 	pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
800 }
801 
802 /*
803  * Caller must lock mutex and pthread.
804  */
805 void
806 readjust_priorities(struct pthread *pthread, struct pthread_mutex *mtx)
807 {
808 	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
809 		PTHREAD_ASSERT(mtx != NULL,
810 		    "mutex is NULL when it should not be");
811 		mutex_queue_remove(mtx, pthread);
812 		mutex_queue_enq(mtx, pthread);
813 		PTHREAD_LOCK(mtx->m_owner);
814 		adjust_prio_inheritance(mtx->m_owner);
815 		if (mtx->m_owner->prio_protect_count > 0)
816 			adjust_prio_protection(mtx->m_owner);
817 		PTHREAD_UNLOCK(mtx->m_owner);
818 	}
819 	if (pthread->prio_inherit_count > 0)
820 		adjust_prio_inheritance(pthread);
821 	if (pthread->prio_protect_count > 0)
822 		adjust_prio_protection(pthread);
823 }
824 
825 /*
826  * Returns with the lock owned and on the thread's mutexq. If
827  * the mutex is currently owned by another thread it will sleep
828  * until it is available.
829  */
830 static int
831 get_mcontested(pthread_mutex_t mutexp, const struct timespec *abstime)
832 {
833 	int error;
834 
835 	/*
836 	 * If the timeout is invalid this thread is not allowed
837 	 * to block;
838 	 */
839 	if (abstime != NULL) {
840 		if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
841 			return (EINVAL);
842 	}
843 
844 	/*
845 	 * Put this thread on the mutex's list of waiting threads.
846 	 * The lock on the thread ensures atomic (as far as other
847 	 * threads are concerned) setting of the thread state with
848 	 * it's status on the mutex queue.
849 	 */
850 	PTHREAD_LOCK(curthread);
851 	mutex_queue_enq(mutexp, curthread);
852 	do {
853 		if (curthread->cancelmode == M_ASYNC &&
854 		    curthread->cancellation != CS_NULL) {
855 			mutex_queue_remove(mutexp, curthread);
856 			PTHREAD_UNLOCK(curthread);
857 			_SPINUNLOCK(&mutexp->lock);
858 			pthread_testcancel();
859 		}
860 		curthread->data.mutex = mutexp;
861 		PTHREAD_UNLOCK(curthread);
862 		_SPINUNLOCK(&mutexp->lock);
863 		error = _thread_suspend(curthread, abstime);
864 		if (error != 0 && error != ETIMEDOUT && error != EINTR)
865 			PANIC("Cannot suspend on mutex.");
866 		_SPINLOCK(&mutexp->lock);
867 		PTHREAD_LOCK(curthread);
868 		if (error == ETIMEDOUT) {
869 			/*
870 			 * Between the timeout and when the mutex was
871 			 * locked the previous owner may have released
872 			 * the mutex to this thread. Or not.
873 			 */
874 			if (mutexp->m_owner == curthread)
875 				error = 0;
876 			else
877 				_mutex_lock_backout(curthread);
878 		}
879 	} while ((curthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0);
880 	PTHREAD_UNLOCK(curthread);
881 	return (error);
882 }
883