xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision e8ef3c283bbce2615509a13a063690573c77d534)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by John Birrell.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * $FreeBSD$
34  */
35 
36 #include "namespace.h"
37 #include <stdlib.h>
38 #include <errno.h>
39 #include <string.h>
40 #include <sys/param.h>
41 #include <sys/queue.h>
42 #include <machine/cpu.h>
43 #include <machine/cpufunc.h>
44 #include <pthread.h>
45 #include "un-namespace.h"
46 
47 #include "thr_private.h"
48 
49 #if defined(_PTHREADS_INVARIANTS)
50 #define MUTEX_INIT_LINK(m) 		do {		\
51 	(m)->m_qe.tqe_prev = NULL;			\
52 	(m)->m_qe.tqe_next = NULL;			\
53 } while (0)
54 #define MUTEX_ASSERT_IS_OWNED(m)	do {		\
55 	if ((m)->m_qe.tqe_prev == NULL)			\
56 		PANIC("mutex is not on list");		\
57 } while (0)
58 #define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
59 	if (((m)->m_qe.tqe_prev != NULL) ||		\
60 	    ((m)->m_qe.tqe_next != NULL))		\
61 		PANIC("mutex is on list");		\
62 } while (0)
63 #else
64 #define MUTEX_INIT_LINK(m)
65 #define MUTEX_ASSERT_IS_OWNED(m)
66 #define MUTEX_ASSERT_NOT_OWNED(m)
67 #endif
68 
69 /*
70  * For adaptive mutexes, how many times to spin doing trylock2
71  * before entering the kernel to block
72  */
73 #define MUTEX_ADAPTIVE_SPINS	200
74 
75 /*
76  * Prototypes
77  */
78 int	__pthread_mutex_init(pthread_mutex_t *mutex,
79 		const pthread_mutexattr_t *mutex_attr);
80 int	__pthread_mutex_trylock(pthread_mutex_t *mutex);
81 int	__pthread_mutex_lock(pthread_mutex_t *mutex);
82 int	__pthread_mutex_timedlock(pthread_mutex_t *mutex,
83 		const struct timespec *abstime);
84 
85 static int	mutex_self_trylock(pthread_mutex_t);
86 static int	mutex_self_lock(pthread_mutex_t,
87 				const struct timespec *abstime);
88 static int	mutex_unlock_common(pthread_mutex_t *);
89 
90 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
91 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
92 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
93 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
94 
95 /* Single underscore versions provided for libc internal usage: */
96 /* No difference between libc and application usage of these: */
97 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
98 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
99 
100 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
101 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
102 
103 static int
104 mutex_init(pthread_mutex_t *mutex,
105     const pthread_mutexattr_t *mutex_attr, int private)
106 {
107 	const struct pthread_mutex_attr *attr;
108 	struct pthread_mutex *pmutex;
109 
110 	if (mutex_attr == NULL) {
111 		attr = &_pthread_mutexattr_default;
112 	} else {
113 		attr = *mutex_attr;
114 		if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
115 		    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
116 			return (EINVAL);
117 		if (attr->m_protocol < PTHREAD_PRIO_NONE ||
118 		    attr->m_protocol > PTHREAD_PRIO_PROTECT)
119 			return (EINVAL);
120 	}
121 	if ((pmutex = (pthread_mutex_t)
122 		calloc(1, sizeof(struct pthread_mutex))) == NULL)
123 		return (ENOMEM);
124 
125 	pmutex->m_type = attr->m_type;
126 	pmutex->m_owner = NULL;
127 	pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED;
128 	if (private)
129 		pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
130 	pmutex->m_count = 0;
131 	pmutex->m_refcount = 0;
132 	MUTEX_INIT_LINK(pmutex);
133 	switch(attr->m_protocol) {
134 	case PTHREAD_PRIO_INHERIT:
135 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
136 		pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
137 		break;
138 	case PTHREAD_PRIO_PROTECT:
139 		pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
140 		pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
141 		pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
142 		break;
143 	case PTHREAD_PRIO_NONE:
144 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
145 		pmutex->m_lock.m_flags = 0;
146 	}
147 	*mutex = pmutex;
148 	return (0);
149 }
150 
151 static int
152 init_static(struct pthread *thread, pthread_mutex_t *mutex)
153 {
154 	int ret;
155 
156 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
157 
158 	if (*mutex == NULL)
159 		ret = mutex_init(mutex, NULL, 0);
160 	else
161 		ret = 0;
162 
163 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
164 
165 	return (ret);
166 }
167 
168 static int
169 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
170 {
171 	int ret;
172 
173 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
174 
175 	if (*mutex == NULL)
176 		ret = mutex_init(mutex, NULL, 1);
177 	else
178 		ret = 0;
179 
180 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
181 
182 	return (ret);
183 }
184 
185 static void
186 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
187 {
188 	struct pthread_mutex *m2;
189 
190 	m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
191 	if (m2 != NULL)
192 		m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
193 	else
194 		m->m_lock.m_ceilings[1] = -1;
195 }
196 
197 int
198 _pthread_mutex_init(pthread_mutex_t *mutex,
199     const pthread_mutexattr_t *mutex_attr)
200 {
201 	return mutex_init(mutex, mutex_attr, 1);
202 }
203 
204 int
205 __pthread_mutex_init(pthread_mutex_t *mutex,
206     const pthread_mutexattr_t *mutex_attr)
207 {
208 	return mutex_init(mutex, mutex_attr, 0);
209 }
210 
211 void
212 _mutex_fork(struct pthread *curthread)
213 {
214 	struct pthread_mutex *m;
215 
216 	/*
217 	 * Fix mutex ownership for child process.
218 	 * note that process shared mutex should not
219 	 * be inherited because owner is forking thread
220 	 * which is in parent process, they should be
221 	 * removed from the owned mutex list, current,
222 	 * process shared mutex is not supported, so I
223 	 * am not worried.
224 	 */
225 
226 	TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
227 		m->m_lock.m_owner = TID(curthread);
228 	TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
229 		m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
230 }
231 
232 int
233 _pthread_mutex_destroy(pthread_mutex_t *mutex)
234 {
235 	struct pthread *curthread = _get_curthread();
236 	pthread_mutex_t m;
237 	uint32_t id;
238 	int ret = 0;
239 
240 	if (__predict_false(*mutex == NULL))
241 		ret = EINVAL;
242 	else {
243 		id = TID(curthread);
244 
245 		/*
246 		 * Try to lock the mutex structure, we only need to
247 		 * try once, if failed, the mutex is in used.
248 		 */
249 		ret = _thr_umutex_trylock(&(*mutex)->m_lock, id);
250 		if (ret)
251 			return (ret);
252 		m  = *mutex;
253 		/*
254 		 * Check mutex other fields to see if this mutex is
255 		 * in use. Mostly for prority mutex types, or there
256 		 * are condition variables referencing it.
257 		 */
258 		if (m->m_owner != NULL || m->m_refcount != 0) {
259 			if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
260 				set_inherited_priority(curthread, m);
261 			_thr_umutex_unlock(&m->m_lock, id);
262 			ret = EBUSY;
263 		} else {
264 			/*
265 			 * Save a pointer to the mutex so it can be free'd
266 			 * and set the caller's pointer to NULL.
267 			 */
268 			*mutex = NULL;
269 
270 			if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
271 				set_inherited_priority(curthread, m);
272 			_thr_umutex_unlock(&m->m_lock, id);
273 
274 			MUTEX_ASSERT_NOT_OWNED(m);
275 			free(m);
276 		}
277 	}
278 
279 	return (ret);
280 }
281 
282 
283 #define ENQUEUE_MUTEX(curthread, m)  					\
284 		m->m_owner = curthread;					\
285 		/* Add to the list of owned mutexes: */			\
286 		MUTEX_ASSERT_NOT_OWNED(m);				\
287 		if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)	\
288 			TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);	\
289 		else							\
290 			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe)
291 
292 static int
293 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
294 {
295 	struct pthread_mutex *m;
296 	uint32_t id;
297 	int ret;
298 
299 	id = TID(curthread);
300 	m = *mutex;
301 	ret = _thr_umutex_trylock(&m->m_lock, id);
302 	if (ret == 0) {
303 		ENQUEUE_MUTEX(curthread, m);
304 	} else if (m->m_owner == curthread) {
305 		ret = mutex_self_trylock(m);
306 	} /* else {} */
307 
308 	return (ret);
309 }
310 
311 int
312 __pthread_mutex_trylock(pthread_mutex_t *mutex)
313 {
314 	struct pthread *curthread = _get_curthread();
315 	int ret;
316 
317 	/*
318 	 * If the mutex is statically initialized, perform the dynamic
319 	 * initialization:
320 	 */
321 	if (__predict_false(*mutex == NULL)) {
322 		ret = init_static(curthread, mutex);
323 		if (__predict_false(ret))
324 			return (ret);
325 	}
326 	return (mutex_trylock_common(curthread, mutex));
327 }
328 
329 int
330 _pthread_mutex_trylock(pthread_mutex_t *mutex)
331 {
332 	struct pthread	*curthread = _get_curthread();
333 	int	ret;
334 
335 	/*
336 	 * If the mutex is statically initialized, perform the dynamic
337 	 * initialization marking the mutex private (delete safe):
338 	 */
339 	if (__predict_false(*mutex == NULL)) {
340 		ret = init_static_private(curthread, mutex);
341 		if (__predict_false(ret))
342 			return (ret);
343 	}
344 	return (mutex_trylock_common(curthread, mutex));
345 }
346 
347 static int
348 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
349 	const struct timespec * abstime)
350 {
351 	struct  timespec ts, ts2;
352 	struct	pthread_mutex *m;
353 	uint32_t	id;
354 	int	ret;
355 	int	count;
356 
357 	id = TID(curthread);
358 	m = *mutex;
359 	ret = _thr_umutex_trylock2(&m->m_lock, id);
360 	if (ret == 0) {
361 		ENQUEUE_MUTEX(curthread, m);
362 	} else if (m->m_owner == curthread) {
363 		ret = mutex_self_lock(m, abstime);
364 	} else {
365 		/*
366 		 * For adaptive mutexes, spin for a bit in the expectation
367 		 * that if the application requests this mutex type then
368 		 * the lock is likely to be released quickly and it is
369 		 * faster than entering the kernel
370 		 */
371 		if (m->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) {
372 			count = MUTEX_ADAPTIVE_SPINS;
373 
374 			while (count--) {
375 				ret = _thr_umutex_trylock2(&m->m_lock, id);
376 				if (ret == 0)
377 					break;
378 				CPU_SPINWAIT;
379 			}
380 			if (ret == 0)
381 				goto done;
382 		} else {
383 			if (_thr_spinloops != 0 && _thr_is_smp &&
384 			    !(m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)) {
385 				count = _thr_spinloops;
386 				while (count) {
387 					if (m->m_lock.m_owner == UMUTEX_UNOWNED) {
388 						ret = _thr_umutex_trylock2(&m->m_lock, id);
389 						if (ret == 0)
390 							goto done;
391 					}
392 					CPU_SPINWAIT;
393 					count--;
394 				}
395 			}
396 		}
397 
398 		if (_thr_yieldloops != 0) {
399 			count = _thr_yieldloops;
400 			while (count--) {
401 				_sched_yield();
402 				ret = _thr_umutex_trylock2(&m->m_lock, id);
403 				if (ret == 0)
404 					goto done;
405 			}
406 		}
407 
408 		if (abstime == NULL) {
409 			ret = __thr_umutex_lock(&m->m_lock);
410 		} else if (__predict_false(
411 			   abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
412 			   abstime->tv_nsec >= 1000000000)) {
413 			ret = EINVAL;
414 		} else {
415 			clock_gettime(CLOCK_REALTIME, &ts);
416 			TIMESPEC_SUB(&ts2, abstime, &ts);
417 			ret = __thr_umutex_timedlock(&m->m_lock, &ts2);
418 			/*
419 			 * Timed out wait is not restarted if
420 			 * it was interrupted, not worth to do it.
421 			 */
422 			if (ret == EINTR)
423 				ret = ETIMEDOUT;
424 		}
425 done:
426 		if (ret == 0)
427 			ENQUEUE_MUTEX(curthread, m);
428 	}
429 	return (ret);
430 }
431 
432 int
433 __pthread_mutex_lock(pthread_mutex_t *m)
434 {
435 	struct pthread *curthread;
436 	int	ret;
437 
438 	_thr_check_init();
439 
440 	curthread = _get_curthread();
441 
442 	/*
443 	 * If the mutex is statically initialized, perform the dynamic
444 	 * initialization:
445 	 */
446 	if (__predict_false(*m == NULL)) {
447 		ret = init_static(curthread, m);
448 		if (__predict_false(ret))
449 			return (ret);
450 	}
451 	return (mutex_lock_common(curthread, m, NULL));
452 }
453 
454 int
455 _pthread_mutex_lock(pthread_mutex_t *m)
456 {
457 	struct pthread *curthread;
458 	int	ret;
459 
460 	_thr_check_init();
461 
462 	curthread = _get_curthread();
463 
464 	/*
465 	 * If the mutex is statically initialized, perform the dynamic
466 	 * initialization marking it private (delete safe):
467 	 */
468 	if (__predict_false(*m == NULL)) {
469 		ret = init_static_private(curthread, m);
470 		if (__predict_false(ret))
471 			return (ret);
472 	}
473 	return (mutex_lock_common(curthread, m, NULL));
474 }
475 
476 int
477 __pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
478 {
479 	struct pthread *curthread;
480 	int	ret;
481 
482 	_thr_check_init();
483 
484 	curthread = _get_curthread();
485 
486 	/*
487 	 * If the mutex is statically initialized, perform the dynamic
488 	 * initialization:
489 	 */
490 	if (__predict_false(*m == NULL)) {
491 		ret = init_static(curthread, m);
492 		if (__predict_false(ret))
493 			return (ret);
494 	}
495 	return (mutex_lock_common(curthread, m, abstime));
496 }
497 
498 int
499 _pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
500 {
501 	struct pthread	*curthread;
502 	int	ret;
503 
504 	_thr_check_init();
505 
506 	curthread = _get_curthread();
507 
508 	/*
509 	 * If the mutex is statically initialized, perform the dynamic
510 	 * initialization marking it private (delete safe):
511 	 */
512 	if (__predict_false(*m == NULL)) {
513 		ret = init_static_private(curthread, m);
514 		if (__predict_false(ret))
515 			return (ret);
516 	}
517 	return (mutex_lock_common(curthread, m, abstime));
518 }
519 
520 int
521 _pthread_mutex_unlock(pthread_mutex_t *m)
522 {
523 	return (mutex_unlock_common(m));
524 }
525 
526 int
527 _mutex_cv_lock(pthread_mutex_t *m, int count)
528 {
529 	int	ret;
530 
531 	ret = mutex_lock_common(_get_curthread(), m, NULL);
532 	if (ret == 0) {
533 		(*m)->m_refcount--;
534 		(*m)->m_count += count;
535 	}
536 	return (ret);
537 }
538 
539 static int
540 mutex_self_trylock(pthread_mutex_t m)
541 {
542 	int	ret;
543 
544 	switch (m->m_type) {
545 	case PTHREAD_MUTEX_ERRORCHECK:
546 	case PTHREAD_MUTEX_NORMAL:
547 		ret = EBUSY;
548 		break;
549 
550 	case PTHREAD_MUTEX_RECURSIVE:
551 		/* Increment the lock count: */
552 		if (m->m_count + 1 > 0) {
553 			m->m_count++;
554 			ret = 0;
555 		} else
556 			ret = EAGAIN;
557 		break;
558 
559 	default:
560 		/* Trap invalid mutex types; */
561 		ret = EINVAL;
562 	}
563 
564 	return (ret);
565 }
566 
567 static int
568 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
569 {
570 	struct timespec	ts1, ts2;
571 	int	ret;
572 
573 	switch (m->m_type) {
574 	case PTHREAD_MUTEX_ERRORCHECK:
575 	case PTHREAD_MUTEX_ADAPTIVE_NP:
576 		if (abstime) {
577 			clock_gettime(CLOCK_REALTIME, &ts1);
578 			TIMESPEC_SUB(&ts2, abstime, &ts1);
579 			__sys_nanosleep(&ts2, NULL);
580 			ret = ETIMEDOUT;
581 		} else {
582 			/*
583 			 * POSIX specifies that mutexes should return
584 			 * EDEADLK if a recursive lock is detected.
585 			 */
586 			ret = EDEADLK;
587 		}
588 		break;
589 
590 	case PTHREAD_MUTEX_NORMAL:
591 		/*
592 		 * What SS2 define as a 'normal' mutex.  Intentionally
593 		 * deadlock on attempts to get a lock you already own.
594 		 */
595 		ret = 0;
596 		if (abstime) {
597 			clock_gettime(CLOCK_REALTIME, &ts1);
598 			TIMESPEC_SUB(&ts2, abstime, &ts1);
599 			__sys_nanosleep(&ts2, NULL);
600 			ret = ETIMEDOUT;
601 		} else {
602 			ts1.tv_sec = 30;
603 			ts1.tv_nsec = 0;
604 			for (;;)
605 				__sys_nanosleep(&ts1, NULL);
606 		}
607 		break;
608 
609 	case PTHREAD_MUTEX_RECURSIVE:
610 		/* Increment the lock count: */
611 		if (m->m_count + 1 > 0) {
612 			m->m_count++;
613 			ret = 0;
614 		} else
615 			ret = EAGAIN;
616 		break;
617 
618 	default:
619 		/* Trap invalid mutex types; */
620 		ret = EINVAL;
621 	}
622 
623 	return (ret);
624 }
625 
626 static int
627 mutex_unlock_common(pthread_mutex_t *mutex)
628 {
629 	struct pthread *curthread = _get_curthread();
630 	struct pthread_mutex *m;
631 	uint32_t id;
632 
633 	if (__predict_false((m = *mutex) == NULL))
634 		return (EINVAL);
635 
636 	/*
637 	 * Check if the running thread is not the owner of the mutex.
638 	 */
639 	if (__predict_false(m->m_owner != curthread))
640 		return (EPERM);
641 
642 	id = TID(curthread);
643 	if (__predict_false(
644 		m->m_type == PTHREAD_MUTEX_RECURSIVE &&
645 		m->m_count > 0)) {
646 		m->m_count--;
647 	} else {
648 		m->m_owner = NULL;
649 		/* Remove the mutex from the threads queue. */
650 		MUTEX_ASSERT_IS_OWNED(m);
651 		if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
652 			TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
653 		else {
654 			TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
655 			set_inherited_priority(curthread, m);
656 		}
657 		MUTEX_INIT_LINK(m);
658 		_thr_umutex_unlock(&m->m_lock, id);
659 	}
660 	return (0);
661 }
662 
663 int
664 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
665 {
666 	struct pthread *curthread = _get_curthread();
667 	struct pthread_mutex *m;
668 
669 	if (__predict_false((m = *mutex) == NULL))
670 		return (EINVAL);
671 
672 	/*
673 	 * Check if the running thread is not the owner of the mutex.
674 	 */
675 	if (__predict_false(m->m_owner != curthread))
676 		return (EPERM);
677 
678 	/*
679 	 * Clear the count in case this is a recursive mutex.
680 	 */
681 	*count = m->m_count;
682 	m->m_refcount++;
683 	m->m_count = 0;
684 	m->m_owner = NULL;
685 	/* Remove the mutex from the threads queue. */
686 	MUTEX_ASSERT_IS_OWNED(m);
687 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
688 		TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
689 	else {
690 		TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
691 		set_inherited_priority(curthread, m);
692 	}
693 	MUTEX_INIT_LINK(m);
694 	_thr_umutex_unlock(&m->m_lock, TID(curthread));
695 	return (0);
696 }
697 
698 void
699 _mutex_unlock_private(pthread_t pthread)
700 {
701 	struct pthread_mutex	*m, *m_next;
702 
703 	TAILQ_FOREACH_SAFE(m, &pthread->mutexq, m_qe, m_next) {
704 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
705 			_pthread_mutex_unlock(&m);
706 	}
707 }
708 
709 int
710 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
711 			      int *prioceiling)
712 {
713 	int ret;
714 
715 	if (*mutex == NULL)
716 		ret = EINVAL;
717 	else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
718 		ret = EINVAL;
719 	else {
720 		*prioceiling = (*mutex)->m_lock.m_ceilings[0];
721 		ret = 0;
722 	}
723 
724 	return(ret);
725 }
726 
727 int
728 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
729 			      int ceiling, int *old_ceiling)
730 {
731 	struct pthread *curthread = _get_curthread();
732 	struct pthread_mutex *m, *m1, *m2;
733 	int ret;
734 
735 	m = *mutex;
736 	if (m == NULL || (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
737 		return (EINVAL);
738 
739 	ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
740 	if (ret != 0)
741 		return (ret);
742 
743 	if (m->m_owner == curthread) {
744 		MUTEX_ASSERT_IS_OWNED(m);
745 		m1 = TAILQ_PREV(m, mutex_queue, m_qe);
746 		m2 = TAILQ_NEXT(m, m_qe);
747 		if ((m1 != NULL && m1->m_lock.m_ceilings[0] > ceiling) ||
748 		    (m2 != NULL && m2->m_lock.m_ceilings[0] < ceiling)) {
749 			TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
750 			TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) {
751 				if (m2->m_lock.m_ceilings[0] > ceiling) {
752 					TAILQ_INSERT_BEFORE(m2, m, m_qe);
753 					return (0);
754 				}
755 			}
756 			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
757 		}
758 	}
759 	return (0);
760 }
761