xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision f3cec688772252a00e244ae8e7e116f0a3c2473f)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by John Birrell.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * $FreeBSD$
34  */
35 
36 #include "namespace.h"
37 #include <stdlib.h>
38 #include <errno.h>
39 #include <string.h>
40 #include <sys/param.h>
41 #include <sys/queue.h>
42 #include <pthread.h>
43 #include "un-namespace.h"
44 
45 #include "thr_private.h"
46 
47 #if defined(_PTHREADS_INVARIANTS)
48 #define MUTEX_INIT_LINK(m) 		do {		\
49 	(m)->m_qe.tqe_prev = NULL;			\
50 	(m)->m_qe.tqe_next = NULL;			\
51 } while (0)
52 #define MUTEX_ASSERT_IS_OWNED(m)	do {		\
53 	if ((m)->m_qe.tqe_prev == NULL)			\
54 		PANIC("mutex is not on list");		\
55 } while (0)
56 #define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
57 	if (((m)->m_qe.tqe_prev != NULL) ||		\
58 	    ((m)->m_qe.tqe_next != NULL))		\
59 		PANIC("mutex is on list");		\
60 } while (0)
61 #else
62 #define MUTEX_INIT_LINK(m)
63 #define MUTEX_ASSERT_IS_OWNED(m)
64 #define MUTEX_ASSERT_NOT_OWNED(m)
65 #endif
66 
67 /*
68  * Prototypes
69  */
70 int	__pthread_mutex_init(pthread_mutex_t *mutex,
71 		const pthread_mutexattr_t *mutex_attr);
72 int	__pthread_mutex_trylock(pthread_mutex_t *mutex);
73 int	__pthread_mutex_lock(pthread_mutex_t *mutex);
74 int	__pthread_mutex_timedlock(pthread_mutex_t *mutex,
75 		const struct timespec *abstime);
76 
77 static int	mutex_self_trylock(pthread_mutex_t);
78 static int	mutex_self_lock(pthread_mutex_t,
79 				const struct timespec *abstime);
80 static int	mutex_unlock_common(pthread_mutex_t *);
81 
82 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
83 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
84 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
85 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
86 
87 /* Single underscore versions provided for libc internal usage: */
88 /* No difference between libc and application usage of these: */
89 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
90 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
91 
92 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
93 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
94 
95 static int
96 mutex_init(pthread_mutex_t *mutex,
97     const pthread_mutexattr_t *mutex_attr, int private)
98 {
99 	const struct pthread_mutex_attr *attr;
100 	struct pthread_mutex *pmutex;
101 
102 	if (mutex_attr == NULL) {
103 		attr = &_pthread_mutexattr_default;
104 	} else {
105 		attr = *mutex_attr;
106 		if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
107 		    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
108 			return (EINVAL);
109 		if (attr->m_protocol < PTHREAD_PRIO_NONE ||
110 		    attr->m_protocol > PTHREAD_PRIO_PROTECT)
111 			return (EINVAL);
112 	}
113 	if ((pmutex = (pthread_mutex_t)
114 		calloc(1, sizeof(struct pthread_mutex))) == NULL)
115 		return (ENOMEM);
116 
117 	pmutex->m_type = attr->m_type;
118 	pmutex->m_owner = NULL;
119 	pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED;
120 	if (private)
121 		pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
122 	pmutex->m_count = 0;
123 	pmutex->m_refcount = 0;
124 	MUTEX_INIT_LINK(pmutex);
125 	switch(attr->m_protocol) {
126 	case PTHREAD_PRIO_INHERIT:
127 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
128 		pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
129 		break;
130 	case PTHREAD_PRIO_PROTECT:
131 		pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
132 		pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
133 		pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
134 		break;
135 	case PTHREAD_PRIO_NONE:
136 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
137 		pmutex->m_lock.m_flags = 0;
138 	}
139 	*mutex = pmutex;
140 	return (0);
141 }
142 
143 static int
144 init_static(struct pthread *thread, pthread_mutex_t *mutex)
145 {
146 	int ret;
147 
148 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
149 
150 	if (*mutex == NULL)
151 		ret = mutex_init(mutex, NULL, 0);
152 	else
153 		ret = 0;
154 
155 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
156 
157 	return (ret);
158 }
159 
160 static int
161 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
162 {
163 	int ret;
164 
165 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
166 
167 	if (*mutex == NULL)
168 		ret = mutex_init(mutex, NULL, 1);
169 	else
170 		ret = 0;
171 
172 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
173 
174 	return (ret);
175 }
176 
177 int
178 _pthread_mutex_init(pthread_mutex_t *mutex,
179     const pthread_mutexattr_t *mutex_attr)
180 {
181 	return mutex_init(mutex, mutex_attr, 1);
182 }
183 
184 int
185 __pthread_mutex_init(pthread_mutex_t *mutex,
186     const pthread_mutexattr_t *mutex_attr)
187 {
188 	return mutex_init(mutex, mutex_attr, 0);
189 }
190 
191 void
192 _mutex_fork(struct pthread *curthread)
193 {
194 	struct pthread_mutex *m;
195 
196 	/*
197 	 * Fix mutex ownership for child process.
198 	 * note that process shared mutex should not
199 	 * be inherited because owner is forking thread
200 	 * which is in parent process, they should be
201 	 * removed from the owned mutex list, current,
202 	 * process shared mutex is not supported, so I
203 	 * am not worried.
204 	 */
205 
206 	TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
207 		m->m_lock.m_owner = TID(curthread);
208 	TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
209 		m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
210 }
211 
212 int
213 _pthread_mutex_destroy(pthread_mutex_t *mutex)
214 {
215 	struct pthread *curthread = _get_curthread();
216 	pthread_mutex_t m, m2;
217 	uint32_t id;
218 	int ret = 0;
219 
220 	if (__predict_false(*mutex == NULL))
221 		ret = EINVAL;
222 	else {
223 		id = TID(curthread);
224 
225 		/*
226 		 * Try to lock the mutex structure, we only need to
227 		 * try once, if failed, the mutex is in used.
228 		 */
229 		ret = _thr_umutex_trylock(&(*mutex)->m_lock, id);
230 		if (ret)
231 			return (ret);
232 		m  = *mutex;
233 		m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
234 		/*
235 		 * Check mutex other fields to see if this mutex is
236 		 * in use. Mostly for prority mutex types, or there
237 		 * are condition variables referencing it.
238 		 */
239 		if (m->m_owner != NULL || m->m_refcount != 0) {
240 			if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) {
241 				if (m2 != NULL)
242 					m->m_lock.m_ceilings[1] =
243 						 m2->m_lock.m_ceilings[0];
244 				else
245 					m->m_lock.m_ceilings[1] = -1;
246 			}
247 			_thr_umutex_unlock(&m->m_lock, id);
248 			ret = EBUSY;
249 		} else {
250 			/*
251 			 * Save a pointer to the mutex so it can be free'd
252 			 * and set the caller's pointer to NULL.
253 			 */
254 			*mutex = NULL;
255 
256 			if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) {
257 				if (m2 != NULL)
258 					m->m_lock.m_ceilings[1] =
259 						m2->m_lock.m_ceilings[0];
260 				else
261 					m->m_lock.m_ceilings[1] = -1;
262 			}
263 			_thr_umutex_unlock(&m->m_lock, id);
264 
265 			MUTEX_ASSERT_NOT_OWNED(m);
266 			free(m);
267 		}
268 	}
269 
270 	return (ret);
271 }
272 
273 static int
274 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
275 {
276 	struct pthread_mutex *m;
277 	uint32_t id;
278 	int ret;
279 
280 	id = TID(curthread);
281 	m = *mutex;
282 	ret = _thr_umutex_trylock(&m->m_lock, id);
283 	if (ret == 0) {
284 		m->m_owner = curthread;
285 		/* Add to the list of owned mutexes. */
286 		MUTEX_ASSERT_NOT_OWNED(m);
287 		if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
288 			TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
289 		else
290 			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
291 	} else if (m->m_owner == curthread) {
292 		ret = mutex_self_trylock(m);
293 	} /* else {} */
294 
295 	return (ret);
296 }
297 
298 int
299 __pthread_mutex_trylock(pthread_mutex_t *mutex)
300 {
301 	struct pthread *curthread = _get_curthread();
302 	int ret;
303 
304 	/*
305 	 * If the mutex is statically initialized, perform the dynamic
306 	 * initialization:
307 	 */
308 	if (__predict_false(*mutex == NULL)) {
309 		ret = init_static(curthread, mutex);
310 		if (__predict_false(ret))
311 			return (ret);
312 	}
313 	return (mutex_trylock_common(curthread, mutex));
314 }
315 
316 int
317 _pthread_mutex_trylock(pthread_mutex_t *mutex)
318 {
319 	struct pthread	*curthread = _get_curthread();
320 	int	ret;
321 
322 	/*
323 	 * If the mutex is statically initialized, perform the dynamic
324 	 * initialization marking the mutex private (delete safe):
325 	 */
326 	if (__predict_false(*mutex == NULL)) {
327 		ret = init_static_private(curthread, mutex);
328 		if (__predict_false(ret))
329 			return (ret);
330 	}
331 	return (mutex_trylock_common(curthread, mutex));
332 }
333 
334 static int
335 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
336 	const struct timespec * abstime)
337 {
338 	struct  timespec ts, ts2;
339 	struct	pthread_mutex *m;
340 	uint32_t	id;
341 	int	ret;
342 
343 	id = TID(curthread);
344 	m = *mutex;
345 	ret = _thr_umutex_trylock(&m->m_lock, id);
346 	if (ret == 0) {
347 		m->m_owner = curthread;
348 		/* Add to the list of owned mutexes: */
349 		MUTEX_ASSERT_NOT_OWNED(m);
350 		if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
351 			TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
352 		else
353 			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
354 	} else if (m->m_owner == curthread) {
355 		ret = mutex_self_lock(m, abstime);
356 	} else {
357 		if (abstime == NULL) {
358 			_thr_umutex_lock(&m->m_lock, id);
359 			ret = 0;
360 		} else if (__predict_false(
361 			   abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
362 			   abstime->tv_nsec >= 1000000000)) {
363 			ret = EINVAL;
364 		} else {
365 			clock_gettime(CLOCK_REALTIME, &ts);
366 			TIMESPEC_SUB(&ts2, abstime, &ts);
367 			ret = _thr_umutex_timedlock(&m->m_lock, id, &ts2);
368 			/*
369 			 * Timed out wait is not restarted if
370 			 * it was interrupted, not worth to do it.
371 			 */
372 			if (ret == EINTR)
373 				ret = ETIMEDOUT;
374 		}
375 		if (ret == 0) {
376 			m->m_owner = curthread;
377 			/* Add to the list of owned mutexes: */
378 			MUTEX_ASSERT_NOT_OWNED(m);
379 			if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
380 				TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
381 			else
382 				TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m,
383 					m_qe);
384 		}
385 	}
386 	return (ret);
387 }
388 
389 int
390 __pthread_mutex_lock(pthread_mutex_t *m)
391 {
392 	struct pthread *curthread;
393 	int	ret;
394 
395 	_thr_check_init();
396 
397 	curthread = _get_curthread();
398 
399 	/*
400 	 * If the mutex is statically initialized, perform the dynamic
401 	 * initialization:
402 	 */
403 	if (__predict_false(*m == NULL)) {
404 		ret = init_static(curthread, m);
405 		if (__predict_false(ret))
406 			return (ret);
407 	}
408 	return (mutex_lock_common(curthread, m, NULL));
409 }
410 
411 int
412 _pthread_mutex_lock(pthread_mutex_t *m)
413 {
414 	struct pthread *curthread;
415 	int	ret;
416 
417 	_thr_check_init();
418 
419 	curthread = _get_curthread();
420 
421 	/*
422 	 * If the mutex is statically initialized, perform the dynamic
423 	 * initialization marking it private (delete safe):
424 	 */
425 	if (__predict_false(*m == NULL)) {
426 		ret = init_static_private(curthread, m);
427 		if (__predict_false(ret))
428 			return (ret);
429 	}
430 	return (mutex_lock_common(curthread, m, NULL));
431 }
432 
433 int
434 __pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
435 {
436 	struct pthread *curthread;
437 	int	ret;
438 
439 	_thr_check_init();
440 
441 	curthread = _get_curthread();
442 
443 	/*
444 	 * If the mutex is statically initialized, perform the dynamic
445 	 * initialization:
446 	 */
447 	if (__predict_false(*m == NULL)) {
448 		ret = init_static(curthread, m);
449 		if (__predict_false(ret))
450 			return (ret);
451 	}
452 	return (mutex_lock_common(curthread, m, abstime));
453 }
454 
455 int
456 _pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
457 {
458 	struct pthread	*curthread;
459 	int	ret;
460 
461 	_thr_check_init();
462 
463 	curthread = _get_curthread();
464 
465 	/*
466 	 * If the mutex is statically initialized, perform the dynamic
467 	 * initialization marking it private (delete safe):
468 	 */
469 	if (__predict_false(*m == NULL)) {
470 		ret = init_static_private(curthread, m);
471 		if (__predict_false(ret))
472 			return (ret);
473 	}
474 	return (mutex_lock_common(curthread, m, abstime));
475 }
476 
477 int
478 _pthread_mutex_unlock(pthread_mutex_t *m)
479 {
480 	return (mutex_unlock_common(m));
481 }
482 
483 int
484 _mutex_cv_lock(pthread_mutex_t *m, int count)
485 {
486 	int	ret;
487 
488 	ret = mutex_lock_common(_get_curthread(), m, NULL);
489 	if (ret == 0) {
490 		(*m)->m_refcount--;
491 		(*m)->m_count += count;
492 	}
493 	return (ret);
494 }
495 
496 static int
497 mutex_self_trylock(pthread_mutex_t m)
498 {
499 	int	ret;
500 
501 	switch (m->m_type) {
502 	case PTHREAD_MUTEX_ERRORCHECK:
503 	case PTHREAD_MUTEX_NORMAL:
504 		ret = EBUSY;
505 		break;
506 
507 	case PTHREAD_MUTEX_RECURSIVE:
508 		/* Increment the lock count: */
509 		if (m->m_count + 1 > 0) {
510 			m->m_count++;
511 			ret = 0;
512 		} else
513 			ret = EAGAIN;
514 		break;
515 
516 	default:
517 		/* Trap invalid mutex types; */
518 		ret = EINVAL;
519 	}
520 
521 	return (ret);
522 }
523 
524 static int
525 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
526 {
527 	struct timespec	ts1, ts2;
528 	int	ret;
529 
530 	switch (m->m_type) {
531 	case PTHREAD_MUTEX_ERRORCHECK:
532 		if (abstime) {
533 			clock_gettime(CLOCK_REALTIME, &ts1);
534 			TIMESPEC_SUB(&ts2, abstime, &ts1);
535 			__sys_nanosleep(&ts2, NULL);
536 			ret = ETIMEDOUT;
537 		} else {
538 			/*
539 			 * POSIX specifies that mutexes should return
540 			 * EDEADLK if a recursive lock is detected.
541 			 */
542 			ret = EDEADLK;
543 		}
544 		break;
545 
546 	case PTHREAD_MUTEX_NORMAL:
547 		/*
548 		 * What SS2 define as a 'normal' mutex.  Intentionally
549 		 * deadlock on attempts to get a lock you already own.
550 		 */
551 		ret = 0;
552 		if (abstime) {
553 			clock_gettime(CLOCK_REALTIME, &ts1);
554 			TIMESPEC_SUB(&ts2, abstime, &ts1);
555 			__sys_nanosleep(&ts2, NULL);
556 			ret = ETIMEDOUT;
557 		} else {
558 			ts1.tv_sec = 30;
559 			ts1.tv_nsec = 0;
560 			for (;;)
561 				__sys_nanosleep(&ts1, NULL);
562 		}
563 		break;
564 
565 	case PTHREAD_MUTEX_RECURSIVE:
566 		/* Increment the lock count: */
567 		if (m->m_count + 1 > 0) {
568 			m->m_count++;
569 			ret = 0;
570 		} else
571 			ret = EAGAIN;
572 		break;
573 
574 	default:
575 		/* Trap invalid mutex types; */
576 		ret = EINVAL;
577 	}
578 
579 	return (ret);
580 }
581 
582 static int
583 mutex_unlock_common(pthread_mutex_t *mutex)
584 {
585 	struct pthread *curthread = _get_curthread();
586 	struct pthread_mutex *m, *m2;
587 	uint32_t id;
588 
589 	if (__predict_false((m = *mutex) == NULL))
590 		return (EINVAL);
591 
592 	/*
593 	 * Check if the running thread is not the owner of the mutex.
594 	 */
595 	if (__predict_false(m->m_owner != curthread))
596 		return (EPERM);
597 
598 	id = TID(curthread);
599 	if (__predict_false(
600 		m->m_type == PTHREAD_MUTEX_RECURSIVE &&
601 		m->m_count > 0)) {
602 		m->m_count--;
603 	} else {
604 		m->m_owner = NULL;
605 		/* Remove the mutex from the threads queue. */
606 		MUTEX_ASSERT_IS_OWNED(m);
607 		if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
608 			TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
609 		else {
610 			TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
611 			m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
612 			if (m2 != NULL)
613 				m->m_lock.m_ceilings[1] =
614 					m2->m_lock.m_ceilings[0];
615 			else
616 				m->m_lock.m_ceilings[1] = -1;
617 		}
618 		MUTEX_INIT_LINK(m);
619 		_thr_umutex_unlock(&m->m_lock, id);
620 	}
621 	return (0);
622 }
623 
624 int
625 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
626 {
627 	struct pthread *curthread = _get_curthread();
628 	struct pthread_mutex *m, *m2;
629 
630 	if (__predict_false((m = *mutex) == NULL))
631 		return (EINVAL);
632 
633 	/*
634 	 * Check if the running thread is not the owner of the mutex.
635 	 */
636 	if (__predict_false(m->m_owner != curthread))
637 		return (EPERM);
638 
639 	/*
640 	 * Clear the count in case this is a recursive mutex.
641 	 */
642 	*count = m->m_count;
643 	m->m_refcount++;
644 	m->m_count = 0;
645 	m->m_owner = NULL;
646 	/* Remove the mutex from the threads queue. */
647 	MUTEX_ASSERT_IS_OWNED(m);
648 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
649 		TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
650 	else {
651 		TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
652 
653 		m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
654 		if (m2 != NULL)
655 			m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
656 		else
657 			m->m_lock.m_ceilings[1] = -1;
658 	}
659 	MUTEX_INIT_LINK(m);
660 	_thr_umutex_unlock(&m->m_lock, TID(curthread));
661 	return (0);
662 }
663 
664 void
665 _mutex_unlock_private(pthread_t pthread)
666 {
667 	struct pthread_mutex	*m, *m_next;
668 
669 	TAILQ_FOREACH_SAFE(m, &pthread->mutexq, m_qe, m_next) {
670 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
671 			_pthread_mutex_unlock(&m);
672 	}
673 }
674 
675 int
676 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
677 			      int *prioceiling)
678 {
679 	int ret;
680 
681 	if (*mutex == NULL)
682 		ret = EINVAL;
683 	else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
684 		ret = EINVAL;
685 	else {
686 		*prioceiling = (*mutex)->m_lock.m_ceilings[0];
687 		ret = 0;
688 	}
689 
690 	return(ret);
691 }
692 
693 int
694 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
695 			      int ceiling, int *old_ceiling)
696 {
697 	int ret = 0;
698 
699 	if (*mutex == NULL)
700 		ret = EINVAL;
701 	else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
702 		ret = EINVAL;
703 	else
704 		ret = __thr_umutex_set_ceiling(&(*mutex)->m_lock,
705 			ceiling, old_ceiling);
706 	return (ret);
707 }
708