xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision 91c878a6935c5c2e99866eb267e5bc3028bf6d2f)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by John Birrell.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * $FreeBSD$
34  */
35 
36 #include "namespace.h"
37 #include <stdlib.h>
38 #include <errno.h>
39 #include <string.h>
40 #include <sys/param.h>
41 #include <sys/queue.h>
42 #include <pthread.h>
43 #include "un-namespace.h"
44 
45 #include "thr_private.h"
46 
47 #if defined(_PTHREADS_INVARIANTS)
48 #define MUTEX_INIT_LINK(m) 		do {		\
49 	(m)->m_qe.tqe_prev = NULL;			\
50 	(m)->m_qe.tqe_next = NULL;			\
51 } while (0)
52 #define MUTEX_ASSERT_IS_OWNED(m)	do {		\
53 	if ((m)->m_qe.tqe_prev == NULL)			\
54 		PANIC("mutex is not on list");		\
55 } while (0)
56 #define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
57 	if (((m)->m_qe.tqe_prev != NULL) ||		\
58 	    ((m)->m_qe.tqe_next != NULL))		\
59 		PANIC("mutex is on list");		\
60 } while (0)
61 #else
62 #define MUTEX_INIT_LINK(m)
63 #define MUTEX_ASSERT_IS_OWNED(m)
64 #define MUTEX_ASSERT_NOT_OWNED(m)
65 #endif
66 
67 /*
68  * Prototypes
69  */
70 int	__pthread_mutex_init(pthread_mutex_t *mutex,
71 		const pthread_mutexattr_t *mutex_attr);
72 int	__pthread_mutex_trylock(pthread_mutex_t *mutex);
73 int	__pthread_mutex_lock(pthread_mutex_t *mutex);
74 int	__pthread_mutex_timedlock(pthread_mutex_t *mutex,
75 		const struct timespec *abstime);
76 
77 static int	mutex_self_trylock(pthread_mutex_t);
78 static int	mutex_self_lock(pthread_mutex_t,
79 				const struct timespec *abstime);
80 static int	mutex_unlock_common(pthread_mutex_t *);
81 
82 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
83 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
84 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
85 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
86 
87 /* Single underscore versions provided for libc internal usage: */
88 /* No difference between libc and application usage of these: */
89 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
90 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
91 
92 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
93 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
94 
95 static int
96 mutex_init(pthread_mutex_t *mutex,
97     const pthread_mutexattr_t *mutex_attr, int private)
98 {
99 	const struct pthread_mutex_attr *attr;
100 	struct pthread_mutex *pmutex;
101 
102 	if (mutex_attr == NULL) {
103 		attr = &_pthread_mutexattr_default;
104 	} else {
105 		attr = *mutex_attr;
106 		if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
107 		    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
108 			return (EINVAL);
109 		if (attr->m_protocol < PTHREAD_PRIO_NONE ||
110 		    attr->m_protocol > PTHREAD_PRIO_PROTECT)
111 			return (EINVAL);
112 	}
113 	if ((pmutex = (pthread_mutex_t)
114 		calloc(1, sizeof(struct pthread_mutex))) == NULL)
115 		return (ENOMEM);
116 
117 	pmutex->m_type = attr->m_type;
118 	pmutex->m_owner = NULL;
119 	pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED;
120 	if (private)
121 		pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
122 	pmutex->m_count = 0;
123 	pmutex->m_refcount = 0;
124 	MUTEX_INIT_LINK(pmutex);
125 	switch(attr->m_protocol) {
126 	case PTHREAD_PRIO_INHERIT:
127 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
128 		pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
129 		break;
130 	case PTHREAD_PRIO_PROTECT:
131 		pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
132 		pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
133 		pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
134 		break;
135 	case PTHREAD_PRIO_NONE:
136 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
137 		pmutex->m_lock.m_flags = 0;
138 	}
139 	*mutex = pmutex;
140 	return (0);
141 }
142 
143 static int
144 init_static(struct pthread *thread, pthread_mutex_t *mutex)
145 {
146 	int ret;
147 
148 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
149 
150 	if (*mutex == NULL)
151 		ret = mutex_init(mutex, NULL, 0);
152 	else
153 		ret = 0;
154 
155 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
156 
157 	return (ret);
158 }
159 
160 static int
161 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
162 {
163 	int ret;
164 
165 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
166 
167 	if (*mutex == NULL)
168 		ret = mutex_init(mutex, NULL, 1);
169 	else
170 		ret = 0;
171 
172 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
173 
174 	return (ret);
175 }
176 
177 int
178 _pthread_mutex_init(pthread_mutex_t *mutex,
179     const pthread_mutexattr_t *mutex_attr)
180 {
181 	return mutex_init(mutex, mutex_attr, 1);
182 }
183 
184 int
185 __pthread_mutex_init(pthread_mutex_t *mutex,
186     const pthread_mutexattr_t *mutex_attr)
187 {
188 	return mutex_init(mutex, mutex_attr, 0);
189 }
190 
191 void
192 _mutex_fork(struct pthread *curthread)
193 {
194 	struct pthread_mutex *m;
195 
196 	/*
197 	 * Fix mutex ownership for child process.
198 	 * note that process shared mutex should not
199 	 * be inherited because owner is forking thread
200 	 * which is in parent process, they should be
201 	 * removed from the owned mutex list, current,
202 	 * process shared mutex is not supported, so I
203 	 * am not worried.
204 	 */
205 
206 	TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
207 		m->m_lock.m_owner = TID(curthread);
208 	TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
209 		m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
210 }
211 
212 int
213 _pthread_mutex_destroy(pthread_mutex_t *mutex)
214 {
215 	struct pthread *curthread = _get_curthread();
216 	pthread_mutex_t m, m2;
217 	uint32_t id;
218 	int ret = 0;
219 
220 	if (__predict_false(*mutex == NULL))
221 		ret = EINVAL;
222 	else {
223 		id = TID(curthread);
224 
225 		/*
226 		 * Try to lock the mutex structure, we only need to
227 		 * try once, if failed, the mutex is in used.
228 		 */
229 		ret = _thr_umutex_trylock(&(*mutex)->m_lock, id);
230 		if (ret)
231 			return (ret);
232 		m  = *mutex;
233 		m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
234 		/*
235 		 * Check mutex other fields to see if this mutex is
236 		 * in use. Mostly for prority mutex types, or there
237 		 * are condition variables referencing it.
238 		 */
239 		if (m->m_owner != NULL || m->m_refcount != 0) {
240 			if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) {
241 				if (m2 != NULL)
242 					m->m_lock.m_ceilings[1] =
243 						 m2->m_lock.m_ceilings[0];
244 				else
245 					m->m_lock.m_ceilings[1] = -1;
246 			}
247 			_thr_umutex_unlock(&m->m_lock, id);
248 			ret = EBUSY;
249 		} else {
250 			/*
251 			 * Save a pointer to the mutex so it can be free'd
252 			 * and set the caller's pointer to NULL.
253 			 */
254 			*mutex = NULL;
255 
256 			if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) {
257 				if (m2 != NULL)
258 					m->m_lock.m_ceilings[1] =
259 						m2->m_lock.m_ceilings[0];
260 				else
261 					m->m_lock.m_ceilings[1] = -1;
262 			}
263 			_thr_umutex_unlock(&m->m_lock, id);
264 
265 			MUTEX_ASSERT_NOT_OWNED(m);
266 			free(m);
267 		}
268 	}
269 
270 	return (ret);
271 }
272 
273 static int
274 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
275 {
276 	struct pthread_mutex *m;
277 	uint32_t id;
278 	int ret;
279 
280 	id = TID(curthread);
281 	m = *mutex;
282 	ret = _thr_umutex_trylock(&m->m_lock, id);
283 	if (ret == 0) {
284 		m->m_owner = curthread;
285 		/* Add to the list of owned mutexes. */
286 		MUTEX_ASSERT_NOT_OWNED(m);
287 		if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
288 			TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
289 		else
290 			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
291 	} else if (m->m_owner == curthread) {
292 		ret = mutex_self_trylock(m);
293 	} /* else {} */
294 
295 	return (ret);
296 }
297 
298 int
299 __pthread_mutex_trylock(pthread_mutex_t *mutex)
300 {
301 	struct pthread *curthread = _get_curthread();
302 	int ret;
303 
304 	/*
305 	 * If the mutex is statically initialized, perform the dynamic
306 	 * initialization:
307 	 */
308 	if (__predict_false(*mutex == NULL)) {
309 		ret = init_static(curthread, mutex);
310 		if (__predict_false(ret))
311 			return (ret);
312 	}
313 	return (mutex_trylock_common(curthread, mutex));
314 }
315 
316 int
317 _pthread_mutex_trylock(pthread_mutex_t *mutex)
318 {
319 	struct pthread	*curthread = _get_curthread();
320 	int	ret;
321 
322 	/*
323 	 * If the mutex is statically initialized, perform the dynamic
324 	 * initialization marking the mutex private (delete safe):
325 	 */
326 	if (__predict_false(*mutex == NULL)) {
327 		ret = init_static_private(curthread, mutex);
328 		if (__predict_false(ret))
329 			return (ret);
330 	}
331 	return (mutex_trylock_common(curthread, mutex));
332 }
333 
334 static int
335 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
336 	const struct timespec * abstime)
337 {
338 	struct  timespec ts, ts2;
339 	struct	pthread_mutex *m;
340 	uint32_t	id;
341 	int	ret;
342 
343 	id = TID(curthread);
344 	m = *mutex;
345 	ret = _thr_umutex_trylock(&m->m_lock, id);
346 	if (ret == 0) {
347 		m->m_owner = curthread;
348 		/* Add to the list of owned mutexes: */
349 		MUTEX_ASSERT_NOT_OWNED(m);
350 		if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
351 			TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
352 		else
353 			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
354 	} else if (m->m_owner == curthread) {
355 		ret = mutex_self_lock(m, abstime);
356 	} else {
357 		if (abstime == NULL) {
358 			ret = _thr_umutex_lock(&m->m_lock, id);
359 		} else if (__predict_false(
360 			   abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
361 			   abstime->tv_nsec >= 1000000000)) {
362 			ret = EINVAL;
363 		} else {
364 			clock_gettime(CLOCK_REALTIME, &ts);
365 			TIMESPEC_SUB(&ts2, abstime, &ts);
366 			ret = _thr_umutex_timedlock(&m->m_lock, id, &ts2);
367 			/*
368 			 * Timed out wait is not restarted if
369 			 * it was interrupted, not worth to do it.
370 			 */
371 			if (ret == EINTR)
372 				ret = ETIMEDOUT;
373 		}
374 		if (ret == 0) {
375 			m->m_owner = curthread;
376 			/* Add to the list of owned mutexes: */
377 			MUTEX_ASSERT_NOT_OWNED(m);
378 			if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
379 				TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
380 			else
381 				TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m,
382 					m_qe);
383 		}
384 	}
385 	return (ret);
386 }
387 
388 int
389 __pthread_mutex_lock(pthread_mutex_t *m)
390 {
391 	struct pthread *curthread;
392 	int	ret;
393 
394 	_thr_check_init();
395 
396 	curthread = _get_curthread();
397 
398 	/*
399 	 * If the mutex is statically initialized, perform the dynamic
400 	 * initialization:
401 	 */
402 	if (__predict_false(*m == NULL)) {
403 		ret = init_static(curthread, m);
404 		if (__predict_false(ret))
405 			return (ret);
406 	}
407 	return (mutex_lock_common(curthread, m, NULL));
408 }
409 
410 int
411 _pthread_mutex_lock(pthread_mutex_t *m)
412 {
413 	struct pthread *curthread;
414 	int	ret;
415 
416 	_thr_check_init();
417 
418 	curthread = _get_curthread();
419 
420 	/*
421 	 * If the mutex is statically initialized, perform the dynamic
422 	 * initialization marking it private (delete safe):
423 	 */
424 	if (__predict_false(*m == NULL)) {
425 		ret = init_static_private(curthread, m);
426 		if (__predict_false(ret))
427 			return (ret);
428 	}
429 	return (mutex_lock_common(curthread, m, NULL));
430 }
431 
432 int
433 __pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
434 {
435 	struct pthread *curthread;
436 	int	ret;
437 
438 	_thr_check_init();
439 
440 	curthread = _get_curthread();
441 
442 	/*
443 	 * If the mutex is statically initialized, perform the dynamic
444 	 * initialization:
445 	 */
446 	if (__predict_false(*m == NULL)) {
447 		ret = init_static(curthread, m);
448 		if (__predict_false(ret))
449 			return (ret);
450 	}
451 	return (mutex_lock_common(curthread, m, abstime));
452 }
453 
454 int
455 _pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
456 {
457 	struct pthread	*curthread;
458 	int	ret;
459 
460 	_thr_check_init();
461 
462 	curthread = _get_curthread();
463 
464 	/*
465 	 * If the mutex is statically initialized, perform the dynamic
466 	 * initialization marking it private (delete safe):
467 	 */
468 	if (__predict_false(*m == NULL)) {
469 		ret = init_static_private(curthread, m);
470 		if (__predict_false(ret))
471 			return (ret);
472 	}
473 	return (mutex_lock_common(curthread, m, abstime));
474 }
475 
476 int
477 _pthread_mutex_unlock(pthread_mutex_t *m)
478 {
479 	return (mutex_unlock_common(m));
480 }
481 
482 int
483 _mutex_cv_lock(pthread_mutex_t *m, int count)
484 {
485 	int	ret;
486 
487 	ret = mutex_lock_common(_get_curthread(), m, NULL);
488 	if (ret == 0) {
489 		(*m)->m_refcount--;
490 		(*m)->m_count += count;
491 	}
492 	return (ret);
493 }
494 
495 static int
496 mutex_self_trylock(pthread_mutex_t m)
497 {
498 	int	ret;
499 
500 	switch (m->m_type) {
501 	case PTHREAD_MUTEX_ERRORCHECK:
502 	case PTHREAD_MUTEX_NORMAL:
503 		ret = EBUSY;
504 		break;
505 
506 	case PTHREAD_MUTEX_RECURSIVE:
507 		/* Increment the lock count: */
508 		if (m->m_count + 1 > 0) {
509 			m->m_count++;
510 			ret = 0;
511 		} else
512 			ret = EAGAIN;
513 		break;
514 
515 	default:
516 		/* Trap invalid mutex types; */
517 		ret = EINVAL;
518 	}
519 
520 	return (ret);
521 }
522 
523 static int
524 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
525 {
526 	struct timespec	ts1, ts2;
527 	int	ret;
528 
529 	switch (m->m_type) {
530 	case PTHREAD_MUTEX_ERRORCHECK:
531 		if (abstime) {
532 			clock_gettime(CLOCK_REALTIME, &ts1);
533 			TIMESPEC_SUB(&ts2, abstime, &ts1);
534 			__sys_nanosleep(&ts2, NULL);
535 			ret = ETIMEDOUT;
536 		} else {
537 			/*
538 			 * POSIX specifies that mutexes should return
539 			 * EDEADLK if a recursive lock is detected.
540 			 */
541 			ret = EDEADLK;
542 		}
543 		break;
544 
545 	case PTHREAD_MUTEX_NORMAL:
546 		/*
547 		 * What SS2 define as a 'normal' mutex.  Intentionally
548 		 * deadlock on attempts to get a lock you already own.
549 		 */
550 		ret = 0;
551 		if (abstime) {
552 			clock_gettime(CLOCK_REALTIME, &ts1);
553 			TIMESPEC_SUB(&ts2, abstime, &ts1);
554 			__sys_nanosleep(&ts2, NULL);
555 			ret = ETIMEDOUT;
556 		} else {
557 			ts1.tv_sec = 30;
558 			ts1.tv_nsec = 0;
559 			for (;;)
560 				__sys_nanosleep(&ts1, NULL);
561 		}
562 		break;
563 
564 	case PTHREAD_MUTEX_RECURSIVE:
565 		/* Increment the lock count: */
566 		if (m->m_count + 1 > 0) {
567 			m->m_count++;
568 			ret = 0;
569 		} else
570 			ret = EAGAIN;
571 		break;
572 
573 	default:
574 		/* Trap invalid mutex types; */
575 		ret = EINVAL;
576 	}
577 
578 	return (ret);
579 }
580 
581 static int
582 mutex_unlock_common(pthread_mutex_t *mutex)
583 {
584 	struct pthread *curthread = _get_curthread();
585 	struct pthread_mutex *m, *m2;
586 	uint32_t id;
587 
588 	if (__predict_false((m = *mutex) == NULL))
589 		return (EINVAL);
590 
591 	/*
592 	 * Check if the running thread is not the owner of the mutex.
593 	 */
594 	if (__predict_false(m->m_owner != curthread))
595 		return (EPERM);
596 
597 	id = TID(curthread);
598 	if (__predict_false(
599 		m->m_type == PTHREAD_MUTEX_RECURSIVE &&
600 		m->m_count > 0)) {
601 		m->m_count--;
602 	} else {
603 		m->m_owner = NULL;
604 		/* Remove the mutex from the threads queue. */
605 		MUTEX_ASSERT_IS_OWNED(m);
606 		if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
607 			TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
608 		else {
609 			TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
610 			m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
611 			if (m2 != NULL)
612 				m->m_lock.m_ceilings[1] =
613 					m2->m_lock.m_ceilings[0];
614 			else
615 				m->m_lock.m_ceilings[1] = -1;
616 		}
617 		MUTEX_INIT_LINK(m);
618 		_thr_umutex_unlock(&m->m_lock, id);
619 	}
620 	return (0);
621 }
622 
623 int
624 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
625 {
626 	struct pthread *curthread = _get_curthread();
627 	struct pthread_mutex *m, *m2;
628 
629 	if (__predict_false((m = *mutex) == NULL))
630 		return (EINVAL);
631 
632 	/*
633 	 * Check if the running thread is not the owner of the mutex.
634 	 */
635 	if (__predict_false(m->m_owner != curthread))
636 		return (EPERM);
637 
638 	/*
639 	 * Clear the count in case this is a recursive mutex.
640 	 */
641 	*count = m->m_count;
642 	m->m_refcount++;
643 	m->m_count = 0;
644 	m->m_owner = NULL;
645 	/* Remove the mutex from the threads queue. */
646 	MUTEX_ASSERT_IS_OWNED(m);
647 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
648 		TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
649 	else {
650 		TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
651 
652 		m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
653 		if (m2 != NULL)
654 			m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
655 		else
656 			m->m_lock.m_ceilings[1] = -1;
657 	}
658 	MUTEX_INIT_LINK(m);
659 	_thr_umutex_unlock(&m->m_lock, TID(curthread));
660 	return (0);
661 }
662 
663 void
664 _mutex_unlock_private(pthread_t pthread)
665 {
666 	struct pthread_mutex	*m, *m_next;
667 
668 	TAILQ_FOREACH_SAFE(m, &pthread->mutexq, m_qe, m_next) {
669 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
670 			_pthread_mutex_unlock(&m);
671 	}
672 }
673 
674 int
675 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
676 			      int *prioceiling)
677 {
678 	int ret;
679 
680 	if (*mutex == NULL)
681 		ret = EINVAL;
682 	else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
683 		ret = EINVAL;
684 	else {
685 		*prioceiling = (*mutex)->m_lock.m_ceilings[0];
686 		ret = 0;
687 	}
688 
689 	return(ret);
690 }
691 
692 int
693 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
694 			      int ceiling, int *old_ceiling)
695 {
696 	int ret = 0;
697 
698 	if (*mutex == NULL)
699 		ret = EINVAL;
700 	else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
701 		ret = EINVAL;
702 	else
703 		ret = __thr_umutex_set_ceiling(&(*mutex)->m_lock,
704 			ceiling, old_ceiling);
705 	return (ret);
706 }
707