xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision 7416cdabcd0c1dd626ff5b7edfcedf11967ff39f)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by John Birrell.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * $FreeBSD$
34  */
35 
36 #include "namespace.h"
37 #include <stdlib.h>
38 #include <errno.h>
39 #include <string.h>
40 #include <sys/param.h>
41 #include <sys/queue.h>
42 #include <machine/cpu.h>
43 #include <machine/cpufunc.h>
44 #include <pthread.h>
45 #include "un-namespace.h"
46 
47 #include "thr_private.h"
48 
49 #if defined(_PTHREADS_INVARIANTS)
50 #define MUTEX_INIT_LINK(m) 		do {		\
51 	(m)->m_qe.tqe_prev = NULL;			\
52 	(m)->m_qe.tqe_next = NULL;			\
53 } while (0)
54 #define MUTEX_ASSERT_IS_OWNED(m)	do {		\
55 	if ((m)->m_qe.tqe_prev == NULL)			\
56 		PANIC("mutex is not on list");		\
57 } while (0)
58 #define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
59 	if (((m)->m_qe.tqe_prev != NULL) ||		\
60 	    ((m)->m_qe.tqe_next != NULL))		\
61 		PANIC("mutex is on list");		\
62 } while (0)
63 #else
64 #define MUTEX_INIT_LINK(m)
65 #define MUTEX_ASSERT_IS_OWNED(m)
66 #define MUTEX_ASSERT_NOT_OWNED(m)
67 #endif
68 
69 /*
70  * Prototypes
71  */
72 int	__pthread_mutex_init(pthread_mutex_t *mutex,
73 		const pthread_mutexattr_t *mutex_attr);
74 int	__pthread_mutex_trylock(pthread_mutex_t *mutex);
75 int	__pthread_mutex_lock(pthread_mutex_t *mutex);
76 int	__pthread_mutex_timedlock(pthread_mutex_t *mutex,
77 		const struct timespec *abstime);
78 
79 static int	mutex_self_trylock(pthread_mutex_t);
80 static int	mutex_self_lock(pthread_mutex_t,
81 				const struct timespec *abstime);
82 static int	mutex_unlock_common(pthread_mutex_t *);
83 
84 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
85 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
86 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
87 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
88 
89 /* Single underscore versions provided for libc internal usage: */
90 /* No difference between libc and application usage of these: */
91 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
92 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
93 
94 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
95 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
96 
97 static int
98 mutex_init(pthread_mutex_t *mutex,
99     const pthread_mutexattr_t *mutex_attr, int private)
100 {
101 	const struct pthread_mutex_attr *attr;
102 	struct pthread_mutex *pmutex;
103 
104 	if (mutex_attr == NULL) {
105 		attr = &_pthread_mutexattr_default;
106 	} else {
107 		attr = *mutex_attr;
108 		if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
109 		    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
110 			return (EINVAL);
111 		if (attr->m_protocol < PTHREAD_PRIO_NONE ||
112 		    attr->m_protocol > PTHREAD_PRIO_PROTECT)
113 			return (EINVAL);
114 	}
115 	if ((pmutex = (pthread_mutex_t)
116 		calloc(1, sizeof(struct pthread_mutex))) == NULL)
117 		return (ENOMEM);
118 
119 	pmutex->m_type = attr->m_type;
120 	pmutex->m_owner = NULL;
121 	pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED;
122 	if (private)
123 		pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
124 	pmutex->m_count = 0;
125 	pmutex->m_refcount = 0;
126 	MUTEX_INIT_LINK(pmutex);
127 	switch(attr->m_protocol) {
128 	case PTHREAD_PRIO_INHERIT:
129 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
130 		pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
131 		break;
132 	case PTHREAD_PRIO_PROTECT:
133 		pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
134 		pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
135 		pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
136 		break;
137 	case PTHREAD_PRIO_NONE:
138 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
139 		pmutex->m_lock.m_flags = 0;
140 	}
141 	*mutex = pmutex;
142 	return (0);
143 }
144 
145 static int
146 init_static(struct pthread *thread, pthread_mutex_t *mutex)
147 {
148 	int ret;
149 
150 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
151 
152 	if (*mutex == NULL)
153 		ret = mutex_init(mutex, NULL, 0);
154 	else
155 		ret = 0;
156 
157 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
158 
159 	return (ret);
160 }
161 
162 static int
163 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
164 {
165 	int ret;
166 
167 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
168 
169 	if (*mutex == NULL)
170 		ret = mutex_init(mutex, NULL, 1);
171 	else
172 		ret = 0;
173 
174 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
175 
176 	return (ret);
177 }
178 
179 static void
180 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
181 {
182 	struct pthread_mutex *m2;
183 
184 	m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
185 	if (m2 != NULL)
186 		m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
187 	else
188 		m->m_lock.m_ceilings[1] = -1;
189 }
190 
191 int
192 _pthread_mutex_init(pthread_mutex_t *mutex,
193     const pthread_mutexattr_t *mutex_attr)
194 {
195 	return mutex_init(mutex, mutex_attr, 1);
196 }
197 
198 int
199 __pthread_mutex_init(pthread_mutex_t *mutex,
200     const pthread_mutexattr_t *mutex_attr)
201 {
202 	return mutex_init(mutex, mutex_attr, 0);
203 }
204 
205 void
206 _mutex_fork(struct pthread *curthread)
207 {
208 	struct pthread_mutex *m;
209 
210 	/*
211 	 * Fix mutex ownership for child process.
212 	 * note that process shared mutex should not
213 	 * be inherited because owner is forking thread
214 	 * which is in parent process, they should be
215 	 * removed from the owned mutex list, current,
216 	 * process shared mutex is not supported, so I
217 	 * am not worried.
218 	 */
219 
220 	TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
221 		m->m_lock.m_owner = TID(curthread);
222 	TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
223 		m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
224 }
225 
226 int
227 _pthread_mutex_destroy(pthread_mutex_t *mutex)
228 {
229 	struct pthread *curthread = _get_curthread();
230 	pthread_mutex_t m;
231 	uint32_t id;
232 	int ret = 0;
233 
234 	if (__predict_false(*mutex == NULL))
235 		ret = EINVAL;
236 	else {
237 		id = TID(curthread);
238 
239 		/*
240 		 * Try to lock the mutex structure, we only need to
241 		 * try once, if failed, the mutex is in used.
242 		 */
243 		ret = _thr_umutex_trylock(&(*mutex)->m_lock, id);
244 		if (ret)
245 			return (ret);
246 		m  = *mutex;
247 		/*
248 		 * Check mutex other fields to see if this mutex is
249 		 * in use. Mostly for prority mutex types, or there
250 		 * are condition variables referencing it.
251 		 */
252 		if (m->m_owner != NULL || m->m_refcount != 0) {
253 			if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
254 				set_inherited_priority(curthread, m);
255 			_thr_umutex_unlock(&m->m_lock, id);
256 			ret = EBUSY;
257 		} else {
258 			/*
259 			 * Save a pointer to the mutex so it can be free'd
260 			 * and set the caller's pointer to NULL.
261 			 */
262 			*mutex = NULL;
263 
264 			if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
265 				set_inherited_priority(curthread, m);
266 			_thr_umutex_unlock(&m->m_lock, id);
267 
268 			MUTEX_ASSERT_NOT_OWNED(m);
269 			free(m);
270 		}
271 	}
272 
273 	return (ret);
274 }
275 
276 
277 #define ENQUEUE_MUTEX(curthread, m)  					\
278 		m->m_owner = curthread;					\
279 		/* Add to the list of owned mutexes: */			\
280 		MUTEX_ASSERT_NOT_OWNED(m);				\
281 		if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)	\
282 			TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);	\
283 		else							\
284 			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe)
285 
286 static int
287 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
288 {
289 	struct pthread_mutex *m;
290 	uint32_t id;
291 	int ret;
292 
293 	id = TID(curthread);
294 	m = *mutex;
295 	ret = _thr_umutex_trylock(&m->m_lock, id);
296 	if (ret == 0) {
297 		ENQUEUE_MUTEX(curthread, m);
298 	} else if (m->m_owner == curthread) {
299 		ret = mutex_self_trylock(m);
300 	} /* else {} */
301 
302 	return (ret);
303 }
304 
305 int
306 __pthread_mutex_trylock(pthread_mutex_t *mutex)
307 {
308 	struct pthread *curthread = _get_curthread();
309 	int ret;
310 
311 	/*
312 	 * If the mutex is statically initialized, perform the dynamic
313 	 * initialization:
314 	 */
315 	if (__predict_false(*mutex == NULL)) {
316 		ret = init_static(curthread, mutex);
317 		if (__predict_false(ret))
318 			return (ret);
319 	}
320 	return (mutex_trylock_common(curthread, mutex));
321 }
322 
323 int
324 _pthread_mutex_trylock(pthread_mutex_t *mutex)
325 {
326 	struct pthread	*curthread = _get_curthread();
327 	int	ret;
328 
329 	/*
330 	 * If the mutex is statically initialized, perform the dynamic
331 	 * initialization marking the mutex private (delete safe):
332 	 */
333 	if (__predict_false(*mutex == NULL)) {
334 		ret = init_static_private(curthread, mutex);
335 		if (__predict_false(ret))
336 			return (ret);
337 	}
338 	return (mutex_trylock_common(curthread, mutex));
339 }
340 
341 static int
342 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
343 	const struct timespec * abstime)
344 {
345 	struct  timespec ts, ts2;
346 	struct	pthread_mutex *m;
347 	uint32_t	id;
348 	int	ret;
349 	int	count;
350 
351 	id = TID(curthread);
352 	m = *mutex;
353 	ret = _thr_umutex_trylock2(&m->m_lock, id);
354 	if (ret == 0) {
355 		ENQUEUE_MUTEX(curthread, m);
356 	} else if (m->m_owner == curthread) {
357 		ret = mutex_self_lock(m, abstime);
358 	} else {
359 		if (_thr_spinloops != 0 && _thr_is_smp &&
360 		    !(m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)) {
361 			count = _thr_spinloops;
362 			while (count && m->m_lock.m_owner != UMUTEX_UNOWNED) {
363 				count--;
364 				CPU_SPINWAIT;
365 			}
366 			if (count) {
367 				ret = _thr_umutex_trylock2(&m->m_lock, id);
368 				if (ret == 0) {
369 					ENQUEUE_MUTEX(curthread, m);
370 					return (ret);
371 				}
372 			}
373 		}
374 
375 		if (_thr_yieldloops != 0) {
376 			count = _thr_yieldloops;
377 			while (count--) {
378 				_sched_yield();
379 				ret = _thr_umutex_trylock2(&m->m_lock, id);
380 				if (ret == 0) {
381 					ENQUEUE_MUTEX(curthread, m);
382 					return (ret);
383 				}
384 			}
385 		}
386 
387 		if (abstime == NULL) {
388 			ret = __thr_umutex_lock(&m->m_lock);
389 		} else if (__predict_false(
390 			   abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
391 			   abstime->tv_nsec >= 1000000000)) {
392 			ret = EINVAL;
393 		} else {
394 			clock_gettime(CLOCK_REALTIME, &ts);
395 			TIMESPEC_SUB(&ts2, abstime, &ts);
396 			ret = __thr_umutex_timedlock(&m->m_lock, &ts2);
397 			/*
398 			 * Timed out wait is not restarted if
399 			 * it was interrupted, not worth to do it.
400 			 */
401 			if (ret == EINTR)
402 				ret = ETIMEDOUT;
403 		}
404 		if (ret == 0)
405 			ENQUEUE_MUTEX(curthread, m);
406 	}
407 	return (ret);
408 }
409 
410 int
411 __pthread_mutex_lock(pthread_mutex_t *m)
412 {
413 	struct pthread *curthread;
414 	int	ret;
415 
416 	_thr_check_init();
417 
418 	curthread = _get_curthread();
419 
420 	/*
421 	 * If the mutex is statically initialized, perform the dynamic
422 	 * initialization:
423 	 */
424 	if (__predict_false(*m == NULL)) {
425 		ret = init_static(curthread, m);
426 		if (__predict_false(ret))
427 			return (ret);
428 	}
429 	return (mutex_lock_common(curthread, m, NULL));
430 }
431 
432 int
433 _pthread_mutex_lock(pthread_mutex_t *m)
434 {
435 	struct pthread *curthread;
436 	int	ret;
437 
438 	_thr_check_init();
439 
440 	curthread = _get_curthread();
441 
442 	/*
443 	 * If the mutex is statically initialized, perform the dynamic
444 	 * initialization marking it private (delete safe):
445 	 */
446 	if (__predict_false(*m == NULL)) {
447 		ret = init_static_private(curthread, m);
448 		if (__predict_false(ret))
449 			return (ret);
450 	}
451 	return (mutex_lock_common(curthread, m, NULL));
452 }
453 
454 int
455 __pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
456 {
457 	struct pthread *curthread;
458 	int	ret;
459 
460 	_thr_check_init();
461 
462 	curthread = _get_curthread();
463 
464 	/*
465 	 * If the mutex is statically initialized, perform the dynamic
466 	 * initialization:
467 	 */
468 	if (__predict_false(*m == NULL)) {
469 		ret = init_static(curthread, m);
470 		if (__predict_false(ret))
471 			return (ret);
472 	}
473 	return (mutex_lock_common(curthread, m, abstime));
474 }
475 
476 int
477 _pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
478 {
479 	struct pthread	*curthread;
480 	int	ret;
481 
482 	_thr_check_init();
483 
484 	curthread = _get_curthread();
485 
486 	/*
487 	 * If the mutex is statically initialized, perform the dynamic
488 	 * initialization marking it private (delete safe):
489 	 */
490 	if (__predict_false(*m == NULL)) {
491 		ret = init_static_private(curthread, m);
492 		if (__predict_false(ret))
493 			return (ret);
494 	}
495 	return (mutex_lock_common(curthread, m, abstime));
496 }
497 
498 int
499 _pthread_mutex_unlock(pthread_mutex_t *m)
500 {
501 	return (mutex_unlock_common(m));
502 }
503 
504 int
505 _mutex_cv_lock(pthread_mutex_t *m, int count)
506 {
507 	int	ret;
508 
509 	ret = mutex_lock_common(_get_curthread(), m, NULL);
510 	if (ret == 0) {
511 		(*m)->m_refcount--;
512 		(*m)->m_count += count;
513 	}
514 	return (ret);
515 }
516 
517 static int
518 mutex_self_trylock(pthread_mutex_t m)
519 {
520 	int	ret;
521 
522 	switch (m->m_type) {
523 	case PTHREAD_MUTEX_ERRORCHECK:
524 	case PTHREAD_MUTEX_NORMAL:
525 		ret = EBUSY;
526 		break;
527 
528 	case PTHREAD_MUTEX_RECURSIVE:
529 		/* Increment the lock count: */
530 		if (m->m_count + 1 > 0) {
531 			m->m_count++;
532 			ret = 0;
533 		} else
534 			ret = EAGAIN;
535 		break;
536 
537 	default:
538 		/* Trap invalid mutex types; */
539 		ret = EINVAL;
540 	}
541 
542 	return (ret);
543 }
544 
545 static int
546 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
547 {
548 	struct timespec	ts1, ts2;
549 	int	ret;
550 
551 	switch (m->m_type) {
552 	case PTHREAD_MUTEX_ERRORCHECK:
553 		if (abstime) {
554 			clock_gettime(CLOCK_REALTIME, &ts1);
555 			TIMESPEC_SUB(&ts2, abstime, &ts1);
556 			__sys_nanosleep(&ts2, NULL);
557 			ret = ETIMEDOUT;
558 		} else {
559 			/*
560 			 * POSIX specifies that mutexes should return
561 			 * EDEADLK if a recursive lock is detected.
562 			 */
563 			ret = EDEADLK;
564 		}
565 		break;
566 
567 	case PTHREAD_MUTEX_NORMAL:
568 		/*
569 		 * What SS2 define as a 'normal' mutex.  Intentionally
570 		 * deadlock on attempts to get a lock you already own.
571 		 */
572 		ret = 0;
573 		if (abstime) {
574 			clock_gettime(CLOCK_REALTIME, &ts1);
575 			TIMESPEC_SUB(&ts2, abstime, &ts1);
576 			__sys_nanosleep(&ts2, NULL);
577 			ret = ETIMEDOUT;
578 		} else {
579 			ts1.tv_sec = 30;
580 			ts1.tv_nsec = 0;
581 			for (;;)
582 				__sys_nanosleep(&ts1, NULL);
583 		}
584 		break;
585 
586 	case PTHREAD_MUTEX_RECURSIVE:
587 		/* Increment the lock count: */
588 		if (m->m_count + 1 > 0) {
589 			m->m_count++;
590 			ret = 0;
591 		} else
592 			ret = EAGAIN;
593 		break;
594 
595 	default:
596 		/* Trap invalid mutex types; */
597 		ret = EINVAL;
598 	}
599 
600 	return (ret);
601 }
602 
603 static int
604 mutex_unlock_common(pthread_mutex_t *mutex)
605 {
606 	struct pthread *curthread = _get_curthread();
607 	struct pthread_mutex *m;
608 	uint32_t id;
609 
610 	if (__predict_false((m = *mutex) == NULL))
611 		return (EINVAL);
612 
613 	/*
614 	 * Check if the running thread is not the owner of the mutex.
615 	 */
616 	if (__predict_false(m->m_owner != curthread))
617 		return (EPERM);
618 
619 	id = TID(curthread);
620 	if (__predict_false(
621 		m->m_type == PTHREAD_MUTEX_RECURSIVE &&
622 		m->m_count > 0)) {
623 		m->m_count--;
624 	} else {
625 		m->m_owner = NULL;
626 		/* Remove the mutex from the threads queue. */
627 		MUTEX_ASSERT_IS_OWNED(m);
628 		if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
629 			TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
630 		else {
631 			TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
632 			set_inherited_priority(curthread, m);
633 		}
634 		MUTEX_INIT_LINK(m);
635 		_thr_umutex_unlock(&m->m_lock, id);
636 	}
637 	return (0);
638 }
639 
640 int
641 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
642 {
643 	struct pthread *curthread = _get_curthread();
644 	struct pthread_mutex *m;
645 
646 	if (__predict_false((m = *mutex) == NULL))
647 		return (EINVAL);
648 
649 	/*
650 	 * Check if the running thread is not the owner of the mutex.
651 	 */
652 	if (__predict_false(m->m_owner != curthread))
653 		return (EPERM);
654 
655 	/*
656 	 * Clear the count in case this is a recursive mutex.
657 	 */
658 	*count = m->m_count;
659 	m->m_refcount++;
660 	m->m_count = 0;
661 	m->m_owner = NULL;
662 	/* Remove the mutex from the threads queue. */
663 	MUTEX_ASSERT_IS_OWNED(m);
664 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
665 		TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
666 	else {
667 		TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
668 		set_inherited_priority(curthread, m);
669 	}
670 	MUTEX_INIT_LINK(m);
671 	_thr_umutex_unlock(&m->m_lock, TID(curthread));
672 	return (0);
673 }
674 
675 void
676 _mutex_unlock_private(pthread_t pthread)
677 {
678 	struct pthread_mutex	*m, *m_next;
679 
680 	TAILQ_FOREACH_SAFE(m, &pthread->mutexq, m_qe, m_next) {
681 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
682 			_pthread_mutex_unlock(&m);
683 	}
684 }
685 
686 int
687 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
688 			      int *prioceiling)
689 {
690 	int ret;
691 
692 	if (*mutex == NULL)
693 		ret = EINVAL;
694 	else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
695 		ret = EINVAL;
696 	else {
697 		*prioceiling = (*mutex)->m_lock.m_ceilings[0];
698 		ret = 0;
699 	}
700 
701 	return(ret);
702 }
703 
704 int
705 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
706 			      int ceiling, int *old_ceiling)
707 {
708 	struct pthread *curthread = _get_curthread();
709 	struct pthread_mutex *m, *m1, *m2;
710 	int ret;
711 
712 	m = *mutex;
713 	if (m == NULL || (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
714 		return (EINVAL);
715 
716 	ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
717 	if (ret != 0)
718 		return (ret);
719 
720 	if (m->m_owner == curthread) {
721 		MUTEX_ASSERT_IS_OWNED(m);
722 		m1 = TAILQ_PREV(m, mutex_queue, m_qe);
723 		m2 = TAILQ_NEXT(m, m_qe);
724 		if ((m1 != NULL && m1->m_lock.m_ceilings[0] > ceiling) ||
725 		    (m2 != NULL && m2->m_lock.m_ceilings[0] < ceiling)) {
726 			TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
727 			TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) {
728 				if (m2->m_lock.m_ceilings[0] > ceiling) {
729 					TAILQ_INSERT_BEFORE(m2, m, m_qe);
730 					return (0);
731 				}
732 			}
733 			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
734 		}
735 	}
736 	return (0);
737 }
738