xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision 9f0c02d4255b2036f652c924d3df4fa88c7c721a)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by John Birrell.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * $FreeBSD$
34  */
35 
36 #include "namespace.h"
37 #include <stdlib.h>
38 #include <errno.h>
39 #include <string.h>
40 #include <sys/param.h>
41 #include <sys/queue.h>
42 #include <pthread.h>
43 #include "un-namespace.h"
44 
45 #include "thr_private.h"
46 
47 #if defined(_PTHREADS_INVARIANTS)
48 #define MUTEX_INIT_LINK(m) 		do {		\
49 	(m)->m_qe.tqe_prev = NULL;			\
50 	(m)->m_qe.tqe_next = NULL;			\
51 } while (0)
52 #define MUTEX_ASSERT_IS_OWNED(m)	do {		\
53 	if ((m)->m_qe.tqe_prev == NULL)			\
54 		PANIC("mutex is not on list");		\
55 } while (0)
56 #define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
57 	if (((m)->m_qe.tqe_prev != NULL) ||		\
58 	    ((m)->m_qe.tqe_next != NULL))		\
59 		PANIC("mutex is on list");		\
60 } while (0)
61 #else
62 #define MUTEX_INIT_LINK(m)
63 #define MUTEX_ASSERT_IS_OWNED(m)
64 #define MUTEX_ASSERT_NOT_OWNED(m)
65 #endif
66 
67 /*
68  * For adaptive mutexes, how many times to spin doing trylock2
69  * before entering the kernel to block
70  */
71 #define MUTEX_ADAPTIVE_SPINS	200
72 
73 /*
74  * Prototypes
75  */
76 int	__pthread_mutex_init(pthread_mutex_t *mutex,
77 		const pthread_mutexattr_t *mutex_attr);
78 int	__pthread_mutex_trylock(pthread_mutex_t *mutex);
79 int	__pthread_mutex_lock(pthread_mutex_t *mutex);
80 int	__pthread_mutex_timedlock(pthread_mutex_t *mutex,
81 		const struct timespec *abstime);
82 
83 static int	mutex_self_trylock(pthread_mutex_t);
84 static int	mutex_self_lock(pthread_mutex_t,
85 				const struct timespec *abstime);
86 static int	mutex_unlock_common(pthread_mutex_t *);
87 
88 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
89 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
90 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
91 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
92 
93 /* Single underscore versions provided for libc internal usage: */
94 /* No difference between libc and application usage of these: */
95 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
96 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
97 
98 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
99 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
100 
101 static int
102 mutex_init(pthread_mutex_t *mutex,
103     const pthread_mutexattr_t *mutex_attr, int private,
104     void *(calloc_cb)(size_t, size_t))
105 {
106 	const struct pthread_mutex_attr *attr;
107 	struct pthread_mutex *pmutex;
108 
109 	if (mutex_attr == NULL) {
110 		attr = &_pthread_mutexattr_default;
111 	} else {
112 		attr = *mutex_attr;
113 		if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
114 		    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
115 			return (EINVAL);
116 		if (attr->m_protocol < PTHREAD_PRIO_NONE ||
117 		    attr->m_protocol > PTHREAD_PRIO_PROTECT)
118 			return (EINVAL);
119 	}
120 	if ((pmutex = (pthread_mutex_t)
121 		calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
122 		return (ENOMEM);
123 
124 	pmutex->m_type = attr->m_type;
125 	pmutex->m_owner = NULL;
126 	pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED;
127 	if (private)
128 		pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
129 	pmutex->m_count = 0;
130 	pmutex->m_refcount = 0;
131 	MUTEX_INIT_LINK(pmutex);
132 	switch(attr->m_protocol) {
133 	case PTHREAD_PRIO_INHERIT:
134 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
135 		pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
136 		break;
137 	case PTHREAD_PRIO_PROTECT:
138 		pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
139 		pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
140 		pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
141 		break;
142 	case PTHREAD_PRIO_NONE:
143 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
144 		pmutex->m_lock.m_flags = 0;
145 	}
146 	*mutex = pmutex;
147 	return (0);
148 }
149 
150 static int
151 init_static(struct pthread *thread, pthread_mutex_t *mutex)
152 {
153 	int ret;
154 
155 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
156 
157 	if (*mutex == NULL)
158 		ret = mutex_init(mutex, NULL, 0, calloc);
159 	else
160 		ret = 0;
161 
162 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
163 
164 	return (ret);
165 }
166 
167 static int
168 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
169 {
170 	int ret;
171 
172 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
173 
174 	if (*mutex == NULL)
175 		ret = mutex_init(mutex, NULL, 1, calloc);
176 	else
177 		ret = 0;
178 
179 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
180 
181 	return (ret);
182 }
183 
184 static void
185 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
186 {
187 	struct pthread_mutex *m2;
188 
189 	m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
190 	if (m2 != NULL)
191 		m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
192 	else
193 		m->m_lock.m_ceilings[1] = -1;
194 }
195 
196 int
197 _pthread_mutex_init(pthread_mutex_t *mutex,
198     const pthread_mutexattr_t *mutex_attr)
199 {
200 	return mutex_init(mutex, mutex_attr, 1, calloc);
201 }
202 
203 int
204 __pthread_mutex_init(pthread_mutex_t *mutex,
205     const pthread_mutexattr_t *mutex_attr)
206 {
207 	return mutex_init(mutex, mutex_attr, 0, calloc);
208 }
209 
210 /* This function is used internally by malloc. */
211 int
212 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
213     void *(calloc_cb)(size_t, size_t))
214 {
215 /* XXX Enable adaptive locking if similar code is removed from malloc. */
216 #if 0
217 	static const struct pthread_mutex_attr attr = {
218 		.m_type = PTHREAD_MUTEX_ADAPTIVE_NP,
219 		.m_protocol = PTHREAD_PRIO_NONE,
220 		.m_ceiling = 0,
221 		.m_flags = 0
222 	};
223 
224 	return mutex_init(mutex, (pthread_mutexattr_t *)&attr, 0, calloc_cb);
225 #else
226 	return mutex_init(mutex, NULL, 0, calloc_cb);
227 #endif
228 }
229 
230 void
231 _mutex_fork(struct pthread *curthread)
232 {
233 	struct pthread_mutex *m;
234 
235 	/*
236 	 * Fix mutex ownership for child process.
237 	 * note that process shared mutex should not
238 	 * be inherited because owner is forking thread
239 	 * which is in parent process, they should be
240 	 * removed from the owned mutex list, current,
241 	 * process shared mutex is not supported, so I
242 	 * am not worried.
243 	 */
244 
245 	TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
246 		m->m_lock.m_owner = TID(curthread);
247 	TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
248 		m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
249 }
250 
251 int
252 _pthread_mutex_destroy(pthread_mutex_t *mutex)
253 {
254 	struct pthread *curthread = _get_curthread();
255 	pthread_mutex_t m;
256 	uint32_t id;
257 	int ret = 0;
258 
259 	if (__predict_false(*mutex == NULL))
260 		ret = EINVAL;
261 	else {
262 		id = TID(curthread);
263 
264 		/*
265 		 * Try to lock the mutex structure, we only need to
266 		 * try once, if failed, the mutex is in used.
267 		 */
268 		ret = _thr_umutex_trylock(&(*mutex)->m_lock, id);
269 		if (ret)
270 			return (ret);
271 		m  = *mutex;
272 		/*
273 		 * Check mutex other fields to see if this mutex is
274 		 * in use. Mostly for prority mutex types, or there
275 		 * are condition variables referencing it.
276 		 */
277 		if (m->m_owner != NULL || m->m_refcount != 0) {
278 			if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
279 				set_inherited_priority(curthread, m);
280 			_thr_umutex_unlock(&m->m_lock, id);
281 			ret = EBUSY;
282 		} else {
283 			/*
284 			 * Save a pointer to the mutex so it can be free'd
285 			 * and set the caller's pointer to NULL.
286 			 */
287 			*mutex = NULL;
288 
289 			if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
290 				set_inherited_priority(curthread, m);
291 			_thr_umutex_unlock(&m->m_lock, id);
292 
293 			MUTEX_ASSERT_NOT_OWNED(m);
294 			free(m);
295 		}
296 	}
297 
298 	return (ret);
299 }
300 
301 
302 #define ENQUEUE_MUTEX(curthread, m)  					\
303 		m->m_owner = curthread;					\
304 		/* Add to the list of owned mutexes: */			\
305 		MUTEX_ASSERT_NOT_OWNED(m);				\
306 		if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)	\
307 			TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);	\
308 		else							\
309 			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe)
310 
311 static int
312 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
313 {
314 	struct pthread_mutex *m;
315 	uint32_t id;
316 	int ret;
317 
318 	id = TID(curthread);
319 	m = *mutex;
320 	ret = _thr_umutex_trylock(&m->m_lock, id);
321 	if (ret == 0) {
322 		ENQUEUE_MUTEX(curthread, m);
323 	} else if (m->m_owner == curthread) {
324 		ret = mutex_self_trylock(m);
325 	} /* else {} */
326 
327 	return (ret);
328 }
329 
330 int
331 __pthread_mutex_trylock(pthread_mutex_t *mutex)
332 {
333 	struct pthread *curthread = _get_curthread();
334 	int ret;
335 
336 	/*
337 	 * If the mutex is statically initialized, perform the dynamic
338 	 * initialization:
339 	 */
340 	if (__predict_false(*mutex == NULL)) {
341 		ret = init_static(curthread, mutex);
342 		if (__predict_false(ret))
343 			return (ret);
344 	}
345 	return (mutex_trylock_common(curthread, mutex));
346 }
347 
348 int
349 _pthread_mutex_trylock(pthread_mutex_t *mutex)
350 {
351 	struct pthread	*curthread = _get_curthread();
352 	int	ret;
353 
354 	/*
355 	 * If the mutex is statically initialized, perform the dynamic
356 	 * initialization marking the mutex private (delete safe):
357 	 */
358 	if (__predict_false(*mutex == NULL)) {
359 		ret = init_static_private(curthread, mutex);
360 		if (__predict_false(ret))
361 			return (ret);
362 	}
363 	return (mutex_trylock_common(curthread, mutex));
364 }
365 
366 static int
367 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
368 	const struct timespec * abstime)
369 {
370 	struct  timespec ts, ts2;
371 	struct	pthread_mutex *m;
372 	uint32_t	id;
373 	int	ret;
374 	int	count;
375 
376 	id = TID(curthread);
377 	m = *mutex;
378 	ret = _thr_umutex_trylock2(&m->m_lock, id);
379 	if (ret == 0) {
380 		ENQUEUE_MUTEX(curthread, m);
381 	} else if (m->m_owner == curthread) {
382 		ret = mutex_self_lock(m, abstime);
383 	} else {
384 		/*
385 		 * For adaptive mutexes, spin for a bit in the expectation
386 		 * that if the application requests this mutex type then
387 		 * the lock is likely to be released quickly and it is
388 		 * faster than entering the kernel
389 		 */
390 		if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
391 			goto sleep_in_kernel;
392 
393 		if (!_thr_is_smp)
394 			goto yield_loop;
395 
396 		if (m->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) {
397 			count = MUTEX_ADAPTIVE_SPINS;
398 
399 			while (count--) {
400 				ret = _thr_umutex_trylock2(&m->m_lock, id);
401 				if (ret == 0)
402 					break;
403 				CPU_SPINWAIT;
404 			}
405 			if (ret == 0)
406 				goto done;
407 		} else {
408 			if (_thr_spinloops != 0) {
409 				count = _thr_spinloops;
410 				while (count) {
411 					if (m->m_lock.m_owner == UMUTEX_UNOWNED) {
412 						ret = _thr_umutex_trylock2(&m->m_lock, id);
413 						if (ret == 0)
414 							goto done;
415 					}
416 					CPU_SPINWAIT;
417 					count--;
418 				}
419 			}
420 		}
421 
422 yield_loop:
423 		if (_thr_yieldloops != 0) {
424 			count = _thr_yieldloops;
425 			while (count--) {
426 				_sched_yield();
427 				ret = _thr_umutex_trylock2(&m->m_lock, id);
428 				if (ret == 0)
429 					goto done;
430 			}
431 		}
432 
433 sleep_in_kernel:
434 		if (abstime == NULL) {
435 			ret = __thr_umutex_lock(&m->m_lock);
436 		} else if (__predict_false(
437 			   abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
438 			   abstime->tv_nsec >= 1000000000)) {
439 			ret = EINVAL;
440 		} else {
441 			clock_gettime(CLOCK_REALTIME, &ts);
442 			TIMESPEC_SUB(&ts2, abstime, &ts);
443 			ret = __thr_umutex_timedlock(&m->m_lock, &ts2);
444 			/*
445 			 * Timed out wait is not restarted if
446 			 * it was interrupted, not worth to do it.
447 			 */
448 			if (ret == EINTR)
449 				ret = ETIMEDOUT;
450 		}
451 done:
452 		if (ret == 0)
453 			ENQUEUE_MUTEX(curthread, m);
454 	}
455 	return (ret);
456 }
457 
458 int
459 __pthread_mutex_lock(pthread_mutex_t *m)
460 {
461 	struct pthread *curthread;
462 	int	ret;
463 
464 	_thr_check_init();
465 
466 	curthread = _get_curthread();
467 
468 	/*
469 	 * If the mutex is statically initialized, perform the dynamic
470 	 * initialization:
471 	 */
472 	if (__predict_false(*m == NULL)) {
473 		ret = init_static(curthread, m);
474 		if (__predict_false(ret))
475 			return (ret);
476 	}
477 	return (mutex_lock_common(curthread, m, NULL));
478 }
479 
480 int
481 _pthread_mutex_lock(pthread_mutex_t *m)
482 {
483 	struct pthread *curthread;
484 	int	ret;
485 
486 	_thr_check_init();
487 
488 	curthread = _get_curthread();
489 
490 	/*
491 	 * If the mutex is statically initialized, perform the dynamic
492 	 * initialization marking it private (delete safe):
493 	 */
494 	if (__predict_false(*m == NULL)) {
495 		ret = init_static_private(curthread, m);
496 		if (__predict_false(ret))
497 			return (ret);
498 	}
499 	return (mutex_lock_common(curthread, m, NULL));
500 }
501 
502 int
503 __pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
504 {
505 	struct pthread *curthread;
506 	int	ret;
507 
508 	_thr_check_init();
509 
510 	curthread = _get_curthread();
511 
512 	/*
513 	 * If the mutex is statically initialized, perform the dynamic
514 	 * initialization:
515 	 */
516 	if (__predict_false(*m == NULL)) {
517 		ret = init_static(curthread, m);
518 		if (__predict_false(ret))
519 			return (ret);
520 	}
521 	return (mutex_lock_common(curthread, m, abstime));
522 }
523 
524 int
525 _pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
526 {
527 	struct pthread	*curthread;
528 	int	ret;
529 
530 	_thr_check_init();
531 
532 	curthread = _get_curthread();
533 
534 	/*
535 	 * If the mutex is statically initialized, perform the dynamic
536 	 * initialization marking it private (delete safe):
537 	 */
538 	if (__predict_false(*m == NULL)) {
539 		ret = init_static_private(curthread, m);
540 		if (__predict_false(ret))
541 			return (ret);
542 	}
543 	return (mutex_lock_common(curthread, m, abstime));
544 }
545 
546 int
547 _pthread_mutex_unlock(pthread_mutex_t *m)
548 {
549 	return (mutex_unlock_common(m));
550 }
551 
552 int
553 _mutex_cv_lock(pthread_mutex_t *m, int count)
554 {
555 	int	ret;
556 
557 	ret = mutex_lock_common(_get_curthread(), m, NULL);
558 	if (ret == 0) {
559 		(*m)->m_refcount--;
560 		(*m)->m_count += count;
561 	}
562 	return (ret);
563 }
564 
565 static int
566 mutex_self_trylock(pthread_mutex_t m)
567 {
568 	int	ret;
569 
570 	switch (m->m_type) {
571 	case PTHREAD_MUTEX_ERRORCHECK:
572 	case PTHREAD_MUTEX_NORMAL:
573 		ret = EBUSY;
574 		break;
575 
576 	case PTHREAD_MUTEX_RECURSIVE:
577 		/* Increment the lock count: */
578 		if (m->m_count + 1 > 0) {
579 			m->m_count++;
580 			ret = 0;
581 		} else
582 			ret = EAGAIN;
583 		break;
584 
585 	default:
586 		/* Trap invalid mutex types; */
587 		ret = EINVAL;
588 	}
589 
590 	return (ret);
591 }
592 
593 static int
594 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
595 {
596 	struct timespec	ts1, ts2;
597 	int	ret;
598 
599 	switch (m->m_type) {
600 	case PTHREAD_MUTEX_ERRORCHECK:
601 	case PTHREAD_MUTEX_ADAPTIVE_NP:
602 		if (abstime) {
603 			clock_gettime(CLOCK_REALTIME, &ts1);
604 			TIMESPEC_SUB(&ts2, abstime, &ts1);
605 			__sys_nanosleep(&ts2, NULL);
606 			ret = ETIMEDOUT;
607 		} else {
608 			/*
609 			 * POSIX specifies that mutexes should return
610 			 * EDEADLK if a recursive lock is detected.
611 			 */
612 			ret = EDEADLK;
613 		}
614 		break;
615 
616 	case PTHREAD_MUTEX_NORMAL:
617 		/*
618 		 * What SS2 define as a 'normal' mutex.  Intentionally
619 		 * deadlock on attempts to get a lock you already own.
620 		 */
621 		ret = 0;
622 		if (abstime) {
623 			clock_gettime(CLOCK_REALTIME, &ts1);
624 			TIMESPEC_SUB(&ts2, abstime, &ts1);
625 			__sys_nanosleep(&ts2, NULL);
626 			ret = ETIMEDOUT;
627 		} else {
628 			ts1.tv_sec = 30;
629 			ts1.tv_nsec = 0;
630 			for (;;)
631 				__sys_nanosleep(&ts1, NULL);
632 		}
633 		break;
634 
635 	case PTHREAD_MUTEX_RECURSIVE:
636 		/* Increment the lock count: */
637 		if (m->m_count + 1 > 0) {
638 			m->m_count++;
639 			ret = 0;
640 		} else
641 			ret = EAGAIN;
642 		break;
643 
644 	default:
645 		/* Trap invalid mutex types; */
646 		ret = EINVAL;
647 	}
648 
649 	return (ret);
650 }
651 
652 static int
653 mutex_unlock_common(pthread_mutex_t *mutex)
654 {
655 	struct pthread *curthread = _get_curthread();
656 	struct pthread_mutex *m;
657 	uint32_t id;
658 
659 	if (__predict_false((m = *mutex) == NULL))
660 		return (EINVAL);
661 
662 	/*
663 	 * Check if the running thread is not the owner of the mutex.
664 	 */
665 	if (__predict_false(m->m_owner != curthread))
666 		return (EPERM);
667 
668 	id = TID(curthread);
669 	if (__predict_false(
670 		m->m_type == PTHREAD_MUTEX_RECURSIVE &&
671 		m->m_count > 0)) {
672 		m->m_count--;
673 	} else {
674 		m->m_owner = NULL;
675 		/* Remove the mutex from the threads queue. */
676 		MUTEX_ASSERT_IS_OWNED(m);
677 		if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
678 			TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
679 		else {
680 			TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
681 			set_inherited_priority(curthread, m);
682 		}
683 		MUTEX_INIT_LINK(m);
684 		_thr_umutex_unlock(&m->m_lock, id);
685 	}
686 	return (0);
687 }
688 
689 int
690 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
691 {
692 	struct pthread *curthread = _get_curthread();
693 	struct pthread_mutex *m;
694 
695 	if (__predict_false((m = *mutex) == NULL))
696 		return (EINVAL);
697 
698 	/*
699 	 * Check if the running thread is not the owner of the mutex.
700 	 */
701 	if (__predict_false(m->m_owner != curthread))
702 		return (EPERM);
703 
704 	/*
705 	 * Clear the count in case this is a recursive mutex.
706 	 */
707 	*count = m->m_count;
708 	m->m_refcount++;
709 	m->m_count = 0;
710 	m->m_owner = NULL;
711 	/* Remove the mutex from the threads queue. */
712 	MUTEX_ASSERT_IS_OWNED(m);
713 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
714 		TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
715 	else {
716 		TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
717 		set_inherited_priority(curthread, m);
718 	}
719 	MUTEX_INIT_LINK(m);
720 	_thr_umutex_unlock(&m->m_lock, TID(curthread));
721 	return (0);
722 }
723 
724 void
725 _mutex_unlock_private(pthread_t pthread)
726 {
727 	struct pthread_mutex	*m, *m_next;
728 
729 	TAILQ_FOREACH_SAFE(m, &pthread->mutexq, m_qe, m_next) {
730 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
731 			_pthread_mutex_unlock(&m);
732 	}
733 }
734 
735 int
736 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
737 			      int *prioceiling)
738 {
739 	int ret;
740 
741 	if (*mutex == NULL)
742 		ret = EINVAL;
743 	else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
744 		ret = EINVAL;
745 	else {
746 		*prioceiling = (*mutex)->m_lock.m_ceilings[0];
747 		ret = 0;
748 	}
749 
750 	return(ret);
751 }
752 
753 int
754 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
755 			      int ceiling, int *old_ceiling)
756 {
757 	struct pthread *curthread = _get_curthread();
758 	struct pthread_mutex *m, *m1, *m2;
759 	int ret;
760 
761 	m = *mutex;
762 	if (m == NULL || (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
763 		return (EINVAL);
764 
765 	ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
766 	if (ret != 0)
767 		return (ret);
768 
769 	if (m->m_owner == curthread) {
770 		MUTEX_ASSERT_IS_OWNED(m);
771 		m1 = TAILQ_PREV(m, mutex_queue, m_qe);
772 		m2 = TAILQ_NEXT(m, m_qe);
773 		if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
774 		    (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
775 			TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
776 			TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) {
777 				if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
778 					TAILQ_INSERT_BEFORE(m2, m, m_qe);
779 					return (0);
780 				}
781 			}
782 			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
783 		}
784 	}
785 	return (0);
786 }
787