xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision b28624fde638caadd4a89f50c9b7e7da0f98c4d2)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by John Birrell.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * $FreeBSD$
34  */
35 
36 #include "namespace.h"
37 #include <stdlib.h>
38 #include <errno.h>
39 #include <string.h>
40 #include <sys/param.h>
41 #include <sys/queue.h>
42 #include <pthread.h>
43 #include "un-namespace.h"
44 
45 #include "thr_private.h"
46 
47 #if defined(_PTHREADS_INVARIANTS)
48 #define MUTEX_INIT_LINK(m) 		do {		\
49 	(m)->m_qe.tqe_prev = NULL;			\
50 	(m)->m_qe.tqe_next = NULL;			\
51 } while (0)
52 #define MUTEX_ASSERT_IS_OWNED(m)	do {		\
53 	if ((m)->m_qe.tqe_prev == NULL)			\
54 		PANIC("mutex is not on list");		\
55 } while (0)
56 #define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
57 	if (((m)->m_qe.tqe_prev != NULL) ||		\
58 	    ((m)->m_qe.tqe_next != NULL))		\
59 		PANIC("mutex is on list");		\
60 } while (0)
61 #else
62 #define MUTEX_INIT_LINK(m)
63 #define MUTEX_ASSERT_IS_OWNED(m)
64 #define MUTEX_ASSERT_NOT_OWNED(m)
65 #endif
66 
67 /*
68  * Prototypes
69  */
70 int	__pthread_mutex_init(pthread_mutex_t *mutex,
71 		const pthread_mutexattr_t *mutex_attr);
72 int	__pthread_mutex_trylock(pthread_mutex_t *mutex);
73 int	__pthread_mutex_lock(pthread_mutex_t *mutex);
74 int	__pthread_mutex_timedlock(pthread_mutex_t *mutex,
75 		const struct timespec *abstime);
76 
77 static int	mutex_self_trylock(pthread_mutex_t);
78 static int	mutex_self_lock(pthread_mutex_t,
79 				const struct timespec *abstime);
80 static int	mutex_unlock_common(pthread_mutex_t *);
81 
82 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
83 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
84 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
85 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
86 
87 /* Single underscore versions provided for libc internal usage: */
88 /* No difference between libc and application usage of these: */
89 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
90 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
91 
92 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
93 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
94 
95 static int
96 mutex_init(pthread_mutex_t *mutex,
97     const pthread_mutexattr_t *mutex_attr, int private)
98 {
99 	const struct pthread_mutex_attr *attr;
100 	struct pthread_mutex *pmutex;
101 
102 	if (mutex_attr == NULL) {
103 		attr = &_pthread_mutexattr_default;
104 	} else {
105 		attr = *mutex_attr;
106 		if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
107 		    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
108 			return (EINVAL);
109 		if (attr->m_protocol < PTHREAD_PRIO_NONE ||
110 		    attr->m_protocol > PTHREAD_PRIO_PROTECT)
111 			return (EINVAL);
112 	}
113 	if ((pmutex = (pthread_mutex_t)
114 		calloc(1, sizeof(struct pthread_mutex))) == NULL)
115 		return (ENOMEM);
116 
117 	pmutex->m_type = attr->m_type;
118 	pmutex->m_owner = NULL;
119 	pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED;
120 	if (private)
121 		pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
122 	pmutex->m_count = 0;
123 	pmutex->m_refcount = 0;
124 	MUTEX_INIT_LINK(pmutex);
125 	switch(attr->m_protocol) {
126 	case PTHREAD_PRIO_INHERIT:
127 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
128 		pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
129 		break;
130 	case PTHREAD_PRIO_PROTECT:
131 		pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
132 		pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
133 		pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
134 		break;
135 	case PTHREAD_PRIO_NONE:
136 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
137 		pmutex->m_lock.m_flags = 0;
138 	}
139 	*mutex = pmutex;
140 	return (0);
141 }
142 
143 static int
144 init_static(struct pthread *thread, pthread_mutex_t *mutex)
145 {
146 	int ret;
147 
148 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
149 
150 	if (*mutex == NULL)
151 		ret = mutex_init(mutex, NULL, 0);
152 	else
153 		ret = 0;
154 
155 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
156 
157 	return (ret);
158 }
159 
160 static int
161 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
162 {
163 	int ret;
164 
165 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
166 
167 	if (*mutex == NULL)
168 		ret = mutex_init(mutex, NULL, 1);
169 	else
170 		ret = 0;
171 
172 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
173 
174 	return (ret);
175 }
176 
177 static void
178 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
179 {
180 	struct pthread_mutex *m2;
181 
182 	m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
183 	if (m2 != NULL)
184 		m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
185 	else
186 		m->m_lock.m_ceilings[1] = -1;
187 }
188 
189 int
190 _pthread_mutex_init(pthread_mutex_t *mutex,
191     const pthread_mutexattr_t *mutex_attr)
192 {
193 	return mutex_init(mutex, mutex_attr, 1);
194 }
195 
196 int
197 __pthread_mutex_init(pthread_mutex_t *mutex,
198     const pthread_mutexattr_t *mutex_attr)
199 {
200 	return mutex_init(mutex, mutex_attr, 0);
201 }
202 
203 void
204 _mutex_fork(struct pthread *curthread)
205 {
206 	struct pthread_mutex *m;
207 
208 	/*
209 	 * Fix mutex ownership for child process.
210 	 * note that process shared mutex should not
211 	 * be inherited because owner is forking thread
212 	 * which is in parent process, they should be
213 	 * removed from the owned mutex list, current,
214 	 * process shared mutex is not supported, so I
215 	 * am not worried.
216 	 */
217 
218 	TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
219 		m->m_lock.m_owner = TID(curthread);
220 	TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
221 		m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
222 }
223 
224 int
225 _pthread_mutex_destroy(pthread_mutex_t *mutex)
226 {
227 	struct pthread *curthread = _get_curthread();
228 	pthread_mutex_t m;
229 	uint32_t id;
230 	int ret = 0;
231 
232 	if (__predict_false(*mutex == NULL))
233 		ret = EINVAL;
234 	else {
235 		id = TID(curthread);
236 
237 		/*
238 		 * Try to lock the mutex structure, we only need to
239 		 * try once, if failed, the mutex is in used.
240 		 */
241 		ret = _thr_umutex_trylock(&(*mutex)->m_lock, id);
242 		if (ret)
243 			return (ret);
244 		m  = *mutex;
245 		/*
246 		 * Check mutex other fields to see if this mutex is
247 		 * in use. Mostly for prority mutex types, or there
248 		 * are condition variables referencing it.
249 		 */
250 		if (m->m_owner != NULL || m->m_refcount != 0) {
251 			if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
252 				set_inherited_priority(curthread, m);
253 			_thr_umutex_unlock(&m->m_lock, id);
254 			ret = EBUSY;
255 		} else {
256 			/*
257 			 * Save a pointer to the mutex so it can be free'd
258 			 * and set the caller's pointer to NULL.
259 			 */
260 			*mutex = NULL;
261 
262 			if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
263 				set_inherited_priority(curthread, m);
264 			_thr_umutex_unlock(&m->m_lock, id);
265 
266 			MUTEX_ASSERT_NOT_OWNED(m);
267 			free(m);
268 		}
269 	}
270 
271 	return (ret);
272 }
273 
274 static int
275 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
276 {
277 	struct pthread_mutex *m;
278 	uint32_t id;
279 	int ret;
280 
281 	id = TID(curthread);
282 	m = *mutex;
283 	ret = _thr_umutex_trylock(&m->m_lock, id);
284 	if (ret == 0) {
285 		m->m_owner = curthread;
286 		/* Add to the list of owned mutexes. */
287 		MUTEX_ASSERT_NOT_OWNED(m);
288 		if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
289 			TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
290 		else
291 			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
292 	} else if (m->m_owner == curthread) {
293 		ret = mutex_self_trylock(m);
294 	} /* else {} */
295 
296 	return (ret);
297 }
298 
299 int
300 __pthread_mutex_trylock(pthread_mutex_t *mutex)
301 {
302 	struct pthread *curthread = _get_curthread();
303 	int ret;
304 
305 	/*
306 	 * If the mutex is statically initialized, perform the dynamic
307 	 * initialization:
308 	 */
309 	if (__predict_false(*mutex == NULL)) {
310 		ret = init_static(curthread, mutex);
311 		if (__predict_false(ret))
312 			return (ret);
313 	}
314 	return (mutex_trylock_common(curthread, mutex));
315 }
316 
317 int
318 _pthread_mutex_trylock(pthread_mutex_t *mutex)
319 {
320 	struct pthread	*curthread = _get_curthread();
321 	int	ret;
322 
323 	/*
324 	 * If the mutex is statically initialized, perform the dynamic
325 	 * initialization marking the mutex private (delete safe):
326 	 */
327 	if (__predict_false(*mutex == NULL)) {
328 		ret = init_static_private(curthread, mutex);
329 		if (__predict_false(ret))
330 			return (ret);
331 	}
332 	return (mutex_trylock_common(curthread, mutex));
333 }
334 
335 static int
336 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
337 	const struct timespec * abstime)
338 {
339 	struct  timespec ts, ts2;
340 	struct	pthread_mutex *m;
341 	uint32_t	id;
342 	int	ret;
343 
344 	id = TID(curthread);
345 	m = *mutex;
346 	ret = _thr_umutex_trylock2(&m->m_lock, id);
347 	if (ret == 0) {
348 		m->m_owner = curthread;
349 		/* Add to the list of owned mutexes: */
350 		MUTEX_ASSERT_NOT_OWNED(m);
351 		if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
352 			TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
353 		else
354 			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
355 	} else if (m->m_owner == curthread) {
356 		ret = mutex_self_lock(m, abstime);
357 	} else {
358 		if (abstime == NULL) {
359 			ret = __thr_umutex_lock(&m->m_lock);
360 		} else if (__predict_false(
361 			   abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
362 			   abstime->tv_nsec >= 1000000000)) {
363 			ret = EINVAL;
364 		} else {
365 			clock_gettime(CLOCK_REALTIME, &ts);
366 			TIMESPEC_SUB(&ts2, abstime, &ts);
367 			ret = __thr_umutex_timedlock(&m->m_lock, &ts2);
368 			/*
369 			 * Timed out wait is not restarted if
370 			 * it was interrupted, not worth to do it.
371 			 */
372 			if (ret == EINTR)
373 				ret = ETIMEDOUT;
374 		}
375 		if (ret == 0) {
376 			m->m_owner = curthread;
377 			/* Add to the list of owned mutexes: */
378 			MUTEX_ASSERT_NOT_OWNED(m);
379 			if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
380 				TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
381 			else
382 				TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m,
383 					m_qe);
384 		}
385 	}
386 	return (ret);
387 }
388 
389 int
390 __pthread_mutex_lock(pthread_mutex_t *m)
391 {
392 	struct pthread *curthread;
393 	int	ret;
394 
395 	_thr_check_init();
396 
397 	curthread = _get_curthread();
398 
399 	/*
400 	 * If the mutex is statically initialized, perform the dynamic
401 	 * initialization:
402 	 */
403 	if (__predict_false(*m == NULL)) {
404 		ret = init_static(curthread, m);
405 		if (__predict_false(ret))
406 			return (ret);
407 	}
408 	return (mutex_lock_common(curthread, m, NULL));
409 }
410 
411 int
412 _pthread_mutex_lock(pthread_mutex_t *m)
413 {
414 	struct pthread *curthread;
415 	int	ret;
416 
417 	_thr_check_init();
418 
419 	curthread = _get_curthread();
420 
421 	/*
422 	 * If the mutex is statically initialized, perform the dynamic
423 	 * initialization marking it private (delete safe):
424 	 */
425 	if (__predict_false(*m == NULL)) {
426 		ret = init_static_private(curthread, m);
427 		if (__predict_false(ret))
428 			return (ret);
429 	}
430 	return (mutex_lock_common(curthread, m, NULL));
431 }
432 
433 int
434 __pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
435 {
436 	struct pthread *curthread;
437 	int	ret;
438 
439 	_thr_check_init();
440 
441 	curthread = _get_curthread();
442 
443 	/*
444 	 * If the mutex is statically initialized, perform the dynamic
445 	 * initialization:
446 	 */
447 	if (__predict_false(*m == NULL)) {
448 		ret = init_static(curthread, m);
449 		if (__predict_false(ret))
450 			return (ret);
451 	}
452 	return (mutex_lock_common(curthread, m, abstime));
453 }
454 
455 int
456 _pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
457 {
458 	struct pthread	*curthread;
459 	int	ret;
460 
461 	_thr_check_init();
462 
463 	curthread = _get_curthread();
464 
465 	/*
466 	 * If the mutex is statically initialized, perform the dynamic
467 	 * initialization marking it private (delete safe):
468 	 */
469 	if (__predict_false(*m == NULL)) {
470 		ret = init_static_private(curthread, m);
471 		if (__predict_false(ret))
472 			return (ret);
473 	}
474 	return (mutex_lock_common(curthread, m, abstime));
475 }
476 
477 int
478 _pthread_mutex_unlock(pthread_mutex_t *m)
479 {
480 	return (mutex_unlock_common(m));
481 }
482 
483 int
484 _mutex_cv_lock(pthread_mutex_t *m, int count)
485 {
486 	int	ret;
487 
488 	ret = mutex_lock_common(_get_curthread(), m, NULL);
489 	if (ret == 0) {
490 		(*m)->m_refcount--;
491 		(*m)->m_count += count;
492 	}
493 	return (ret);
494 }
495 
496 static int
497 mutex_self_trylock(pthread_mutex_t m)
498 {
499 	int	ret;
500 
501 	switch (m->m_type) {
502 	case PTHREAD_MUTEX_ERRORCHECK:
503 	case PTHREAD_MUTEX_NORMAL:
504 		ret = EBUSY;
505 		break;
506 
507 	case PTHREAD_MUTEX_RECURSIVE:
508 		/* Increment the lock count: */
509 		if (m->m_count + 1 > 0) {
510 			m->m_count++;
511 			ret = 0;
512 		} else
513 			ret = EAGAIN;
514 		break;
515 
516 	default:
517 		/* Trap invalid mutex types; */
518 		ret = EINVAL;
519 	}
520 
521 	return (ret);
522 }
523 
524 static int
525 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
526 {
527 	struct timespec	ts1, ts2;
528 	int	ret;
529 
530 	switch (m->m_type) {
531 	case PTHREAD_MUTEX_ERRORCHECK:
532 		if (abstime) {
533 			clock_gettime(CLOCK_REALTIME, &ts1);
534 			TIMESPEC_SUB(&ts2, abstime, &ts1);
535 			__sys_nanosleep(&ts2, NULL);
536 			ret = ETIMEDOUT;
537 		} else {
538 			/*
539 			 * POSIX specifies that mutexes should return
540 			 * EDEADLK if a recursive lock is detected.
541 			 */
542 			ret = EDEADLK;
543 		}
544 		break;
545 
546 	case PTHREAD_MUTEX_NORMAL:
547 		/*
548 		 * What SS2 define as a 'normal' mutex.  Intentionally
549 		 * deadlock on attempts to get a lock you already own.
550 		 */
551 		ret = 0;
552 		if (abstime) {
553 			clock_gettime(CLOCK_REALTIME, &ts1);
554 			TIMESPEC_SUB(&ts2, abstime, &ts1);
555 			__sys_nanosleep(&ts2, NULL);
556 			ret = ETIMEDOUT;
557 		} else {
558 			ts1.tv_sec = 30;
559 			ts1.tv_nsec = 0;
560 			for (;;)
561 				__sys_nanosleep(&ts1, NULL);
562 		}
563 		break;
564 
565 	case PTHREAD_MUTEX_RECURSIVE:
566 		/* Increment the lock count: */
567 		if (m->m_count + 1 > 0) {
568 			m->m_count++;
569 			ret = 0;
570 		} else
571 			ret = EAGAIN;
572 		break;
573 
574 	default:
575 		/* Trap invalid mutex types; */
576 		ret = EINVAL;
577 	}
578 
579 	return (ret);
580 }
581 
582 static int
583 mutex_unlock_common(pthread_mutex_t *mutex)
584 {
585 	struct pthread *curthread = _get_curthread();
586 	struct pthread_mutex *m;
587 	uint32_t id;
588 
589 	if (__predict_false((m = *mutex) == NULL))
590 		return (EINVAL);
591 
592 	/*
593 	 * Check if the running thread is not the owner of the mutex.
594 	 */
595 	if (__predict_false(m->m_owner != curthread))
596 		return (EPERM);
597 
598 	id = TID(curthread);
599 	if (__predict_false(
600 		m->m_type == PTHREAD_MUTEX_RECURSIVE &&
601 		m->m_count > 0)) {
602 		m->m_count--;
603 	} else {
604 		m->m_owner = NULL;
605 		/* Remove the mutex from the threads queue. */
606 		MUTEX_ASSERT_IS_OWNED(m);
607 		if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
608 			TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
609 		else {
610 			TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
611 			set_inherited_priority(curthread, m);
612 		}
613 		MUTEX_INIT_LINK(m);
614 		_thr_umutex_unlock(&m->m_lock, id);
615 	}
616 	return (0);
617 }
618 
619 int
620 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
621 {
622 	struct pthread *curthread = _get_curthread();
623 	struct pthread_mutex *m;
624 
625 	if (__predict_false((m = *mutex) == NULL))
626 		return (EINVAL);
627 
628 	/*
629 	 * Check if the running thread is not the owner of the mutex.
630 	 */
631 	if (__predict_false(m->m_owner != curthread))
632 		return (EPERM);
633 
634 	/*
635 	 * Clear the count in case this is a recursive mutex.
636 	 */
637 	*count = m->m_count;
638 	m->m_refcount++;
639 	m->m_count = 0;
640 	m->m_owner = NULL;
641 	/* Remove the mutex from the threads queue. */
642 	MUTEX_ASSERT_IS_OWNED(m);
643 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
644 		TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
645 	else {
646 		TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
647 		set_inherited_priority(curthread, m);
648 	}
649 	MUTEX_INIT_LINK(m);
650 	_thr_umutex_unlock(&m->m_lock, TID(curthread));
651 	return (0);
652 }
653 
654 void
655 _mutex_unlock_private(pthread_t pthread)
656 {
657 	struct pthread_mutex	*m, *m_next;
658 
659 	TAILQ_FOREACH_SAFE(m, &pthread->mutexq, m_qe, m_next) {
660 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
661 			_pthread_mutex_unlock(&m);
662 	}
663 }
664 
665 int
666 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
667 			      int *prioceiling)
668 {
669 	int ret;
670 
671 	if (*mutex == NULL)
672 		ret = EINVAL;
673 	else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
674 		ret = EINVAL;
675 	else {
676 		*prioceiling = (*mutex)->m_lock.m_ceilings[0];
677 		ret = 0;
678 	}
679 
680 	return(ret);
681 }
682 
683 int
684 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
685 			      int ceiling, int *old_ceiling)
686 {
687 	struct pthread *curthread = _get_curthread();
688 	struct pthread_mutex *m, *m1, *m2;
689 	int ret;
690 
691 	m = *mutex;
692 	if (m == NULL || (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
693 		return (EINVAL);
694 
695 	ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
696 	if (ret != 0)
697 		return (ret);
698 
699 	if (m->m_owner == curthread) {
700 		MUTEX_ASSERT_IS_OWNED(m);
701 		m1 = TAILQ_PREV(m, mutex_queue, m_qe);
702 		m2 = TAILQ_NEXT(m, m_qe);
703 		if ((m1 != NULL && m1->m_lock.m_ceilings[0] > ceiling) ||
704 		    (m2 != NULL && m2->m_lock.m_ceilings[0] < ceiling)) {
705 			TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
706 			TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) {
707 				if (m2->m_lock.m_ceilings[0] > ceiling) {
708 					TAILQ_INSERT_BEFORE(m2, m, m_qe);
709 					return (0);
710 				}
711 			}
712 			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
713 		}
714 	}
715 	return (0);
716 }
717