xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision e96b4de80ecb646a467081b65d54a0d12510f5c0)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by John Birrell.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * $FreeBSD$
34  */
35 
36 #include "namespace.h"
37 #include <stdlib.h>
38 #include <errno.h>
39 #include <string.h>
40 #include <sys/param.h>
41 #include <sys/queue.h>
42 #include <pthread.h>
43 #include <pthread_np.h>
44 #include "un-namespace.h"
45 
46 #include "thr_private.h"
47 
48 #if defined(_PTHREADS_INVARIANTS)
49 #define MUTEX_INIT_LINK(m) 		do {		\
50 	(m)->m_qe.tqe_prev = NULL;			\
51 	(m)->m_qe.tqe_next = NULL;			\
52 } while (0)
53 #define MUTEX_ASSERT_IS_OWNED(m)	do {		\
54 	if (__predict_false((m)->m_qe.tqe_prev == NULL))\
55 		PANIC("mutex is not on list");		\
56 } while (0)
57 #define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
58 	if (__predict_false((m)->m_qe.tqe_prev != NULL ||	\
59 	    (m)->m_qe.tqe_next != NULL))	\
60 		PANIC("mutex is on list");		\
61 } while (0)
62 #else
63 #define MUTEX_INIT_LINK(m)
64 #define MUTEX_ASSERT_IS_OWNED(m)
65 #define MUTEX_ASSERT_NOT_OWNED(m)
66 #endif
67 
68 /*
69  * For adaptive mutexes, how many times to spin doing trylock2
70  * before entering the kernel to block
71  */
72 #define MUTEX_ADAPTIVE_SPINS	2000
73 
74 /*
75  * Prototypes
76  */
77 int	__pthread_mutex_init(pthread_mutex_t *mutex,
78 		const pthread_mutexattr_t *mutex_attr);
79 int	__pthread_mutex_trylock(pthread_mutex_t *mutex);
80 int	__pthread_mutex_lock(pthread_mutex_t *mutex);
81 int	__pthread_mutex_timedlock(pthread_mutex_t *mutex,
82 		const struct timespec *abstime);
83 int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
84     		void *(calloc_cb)(size_t, size_t));
85 int	_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
86 int	_pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
87 int	__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
88 int	_pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
89 int	_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
90 int	__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
91 
92 static int	mutex_self_trylock(pthread_mutex_t);
93 static int	mutex_self_lock(pthread_mutex_t,
94 				const struct timespec *abstime);
95 static int	mutex_unlock_common(pthread_mutex_t *);
96 static int	mutex_lock_sleep(struct pthread *, pthread_mutex_t,
97 				const struct timespec *);
98 
99 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
100 __strong_reference(__pthread_mutex_init, _pthread_mutex_init);
101 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
102 __strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
103 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
104 __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
105 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
106 __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
107 
108 /* Single underscore versions provided for libc internal usage: */
109 /* No difference between libc and application usage of these: */
110 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
111 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
112 
113 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
114 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
115 
116 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
117 __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
118 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
119 
120 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
121 __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
122 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
123 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
124 
125 static int
126 mutex_init(pthread_mutex_t *mutex,
127     const struct pthread_mutex_attr *mutex_attr,
128     void *(calloc_cb)(size_t, size_t))
129 {
130 	const struct pthread_mutex_attr *attr;
131 	struct pthread_mutex *pmutex;
132 
133 	if (mutex_attr == NULL) {
134 		attr = &_pthread_mutexattr_default;
135 	} else {
136 		attr = mutex_attr;
137 		if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
138 		    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
139 			return (EINVAL);
140 		if (attr->m_protocol < PTHREAD_PRIO_NONE ||
141 		    attr->m_protocol > PTHREAD_PRIO_PROTECT)
142 			return (EINVAL);
143 	}
144 	if ((pmutex = (pthread_mutex_t)
145 		calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
146 		return (ENOMEM);
147 
148 	pmutex->m_type = attr->m_type;
149 	pmutex->m_owner = NULL;
150 	pmutex->m_count = 0;
151 	pmutex->m_refcount = 0;
152 	pmutex->m_spinloops = 0;
153 	pmutex->m_yieldloops = 0;
154 	MUTEX_INIT_LINK(pmutex);
155 	switch(attr->m_protocol) {
156 	case PTHREAD_PRIO_NONE:
157 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
158 		pmutex->m_lock.m_flags = 0;
159 		break;
160 	case PTHREAD_PRIO_INHERIT:
161 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
162 		pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
163 		break;
164 	case PTHREAD_PRIO_PROTECT:
165 		pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
166 		pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
167 		pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
168 		break;
169 	}
170 
171 	if (pmutex->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) {
172 		pmutex->m_spinloops =
173 		    _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
174 		pmutex->m_yieldloops = _thr_yieldloops;
175 	}
176 
177 	*mutex = pmutex;
178 	return (0);
179 }
180 
181 static int
182 init_static(struct pthread *thread, pthread_mutex_t *mutex)
183 {
184 	int ret;
185 
186 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
187 
188 	if (*mutex == THR_MUTEX_INITIALIZER)
189 		ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
190 	else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
191 		ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc);
192 	else
193 		ret = 0;
194 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
195 
196 	return (ret);
197 }
198 
199 static void
200 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
201 {
202 	struct pthread_mutex *m2;
203 
204 	m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
205 	if (m2 != NULL)
206 		m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
207 	else
208 		m->m_lock.m_ceilings[1] = -1;
209 }
210 
211 int
212 __pthread_mutex_init(pthread_mutex_t *mutex,
213     const pthread_mutexattr_t *mutex_attr)
214 {
215 	return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc);
216 }
217 
218 /* This function is used internally by malloc. */
219 int
220 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
221     void *(calloc_cb)(size_t, size_t))
222 {
223 	static const struct pthread_mutex_attr attr = {
224 		.m_type = PTHREAD_MUTEX_NORMAL,
225 		.m_protocol = PTHREAD_PRIO_NONE,
226 		.m_ceiling = 0
227 	};
228 	int ret;
229 
230 	ret = mutex_init(mutex, &attr, calloc_cb);
231 	if (ret == 0)
232 		(*mutex)->m_private = 1;
233 	return (ret);
234 }
235 
236 void
237 _mutex_fork(struct pthread *curthread)
238 {
239 	struct pthread_mutex *m;
240 
241 	/*
242 	 * Fix mutex ownership for child process.
243 	 * note that process shared mutex should not
244 	 * be inherited because owner is forking thread
245 	 * which is in parent process, they should be
246 	 * removed from the owned mutex list, current,
247 	 * process shared mutex is not supported, so I
248 	 * am not worried.
249 	 */
250 
251 	TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
252 		m->m_lock.m_owner = TID(curthread);
253 	TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
254 		m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
255 }
256 
257 int
258 _pthread_mutex_destroy(pthread_mutex_t *mutex)
259 {
260 	pthread_mutex_t m;
261 	int ret;
262 
263 	m = *mutex;
264 	if (m < THR_MUTEX_DESTROYED) {
265 		ret = 0;
266 	} else if (m == THR_MUTEX_DESTROYED) {
267 		ret = EINVAL;
268 	} else {
269 		if (m->m_owner != NULL || m->m_refcount != 0) {
270 			ret = EBUSY;
271 		} else {
272 			*mutex = THR_MUTEX_DESTROYED;
273 			MUTEX_ASSERT_NOT_OWNED(m);
274 			free(m);
275 			ret = 0;
276 		}
277 	}
278 
279 	return (ret);
280 }
281 
282 #define ENQUEUE_MUTEX(curthread, m)  					\
283 	do {								\
284 		(m)->m_owner = curthread;				\
285 		/* Add to the list of owned mutexes: */			\
286 		MUTEX_ASSERT_NOT_OWNED((m));				\
287 		if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)	\
288 			TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\
289 		else							\
290 			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\
291 	} while (0)
292 
293 #define CHECK_AND_INIT_MUTEX						\
294 	if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) {	\
295 		if (m == THR_MUTEX_DESTROYED)				\
296 			return (EINVAL);				\
297 		int ret;						\
298 		ret = init_static(_get_curthread(), mutex);		\
299 		if (ret)						\
300 			return (ret);					\
301 		m = *mutex;						\
302 	}
303 
304 static int
305 mutex_trylock_common(pthread_mutex_t *mutex)
306 {
307 	struct pthread *curthread = _get_curthread();
308 	struct pthread_mutex *m = *mutex;
309 	uint32_t id;
310 	int ret;
311 
312 	id = TID(curthread);
313 	if (m->m_private)
314 		THR_CRITICAL_ENTER(curthread);
315 	ret = _thr_umutex_trylock(&m->m_lock, id);
316 	if (__predict_true(ret == 0)) {
317 		ENQUEUE_MUTEX(curthread, m);
318 	} else if (m->m_owner == curthread) {
319 		ret = mutex_self_trylock(m);
320 	} /* else {} */
321 	if (ret && m->m_private)
322 		THR_CRITICAL_LEAVE(curthread);
323 	return (ret);
324 }
325 
326 int
327 __pthread_mutex_trylock(pthread_mutex_t *mutex)
328 {
329 	struct pthread_mutex *m;
330 
331 	CHECK_AND_INIT_MUTEX
332 
333 	return (mutex_trylock_common(mutex));
334 }
335 
336 static int
337 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
338 	const struct timespec *abstime)
339 {
340 	uint32_t	id, owner;
341 	int	count;
342 	int	ret;
343 
344 	if (m->m_owner == curthread)
345 		return mutex_self_lock(m, abstime);
346 
347 	id = TID(curthread);
348 	/*
349 	 * For adaptive mutexes, spin for a bit in the expectation
350 	 * that if the application requests this mutex type then
351 	 * the lock is likely to be released quickly and it is
352 	 * faster than entering the kernel
353 	 */
354 	if (__predict_false(
355 		(m->m_lock.m_flags &
356 		 (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0))
357 			goto sleep_in_kernel;
358 
359 	if (!_thr_is_smp)
360 		goto yield_loop;
361 
362 	count = m->m_spinloops;
363 	while (count--) {
364 		owner = m->m_lock.m_owner;
365 		if ((owner & ~UMUTEX_CONTESTED) == 0) {
366 			if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
367 				ret = 0;
368 				goto done;
369 			}
370 		}
371 		CPU_SPINWAIT;
372 	}
373 
374 yield_loop:
375 	count = m->m_yieldloops;
376 	while (count--) {
377 		_sched_yield();
378 		owner = m->m_lock.m_owner;
379 		if ((owner & ~UMUTEX_CONTESTED) == 0) {
380 			if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
381 				ret = 0;
382 				goto done;
383 			}
384 		}
385 	}
386 
387 sleep_in_kernel:
388 	if (abstime == NULL) {
389 		ret = __thr_umutex_lock(&m->m_lock, id);
390 	} else if (__predict_false(
391 		   abstime->tv_nsec < 0 ||
392 		   abstime->tv_nsec >= 1000000000)) {
393 		ret = EINVAL;
394 	} else {
395 		ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
396 	}
397 done:
398 	if (ret == 0)
399 		ENQUEUE_MUTEX(curthread, m);
400 
401 	return (ret);
402 }
403 
404 static inline int
405 mutex_lock_common(struct pthread_mutex *m,
406 	const struct timespec *abstime)
407 {
408 	struct pthread *curthread  = _get_curthread();
409 	int ret;
410 
411 	if (m->m_private)
412 		THR_CRITICAL_ENTER(curthread);
413 	if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
414 		ENQUEUE_MUTEX(curthread, m);
415 		ret = 0;
416 	} else {
417 		ret = mutex_lock_sleep(curthread, m, abstime);
418 	}
419 	if (ret && m->m_private)
420 		THR_CRITICAL_LEAVE(curthread);
421 	return (ret);
422 }
423 
424 int
425 __pthread_mutex_lock(pthread_mutex_t *mutex)
426 {
427 	struct pthread_mutex	*m;
428 
429 	_thr_check_init();
430 
431 	CHECK_AND_INIT_MUTEX
432 
433 	return (mutex_lock_common(m, NULL));
434 }
435 
436 int
437 __pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
438 {
439 	struct pthread_mutex	*m;
440 
441 	_thr_check_init();
442 
443 	CHECK_AND_INIT_MUTEX
444 
445 	return (mutex_lock_common(m, abstime));
446 }
447 
448 int
449 _pthread_mutex_unlock(pthread_mutex_t *m)
450 {
451 	return (mutex_unlock_common(m));
452 }
453 
454 int
455 _mutex_cv_lock(pthread_mutex_t *mutex, int count)
456 {
457 	struct pthread_mutex	*m;
458 	int	ret;
459 
460 	m = *mutex;
461 	ret = mutex_lock_common(m, NULL);
462 	if (ret == 0) {
463 		m->m_refcount--;
464 		m->m_count += count;
465 	}
466 	return (ret);
467 }
468 
469 static int
470 mutex_self_trylock(struct pthread_mutex *m)
471 {
472 	int	ret;
473 
474 	switch (m->m_type) {
475 	case PTHREAD_MUTEX_ERRORCHECK:
476 	case PTHREAD_MUTEX_NORMAL:
477 		ret = EBUSY;
478 		break;
479 
480 	case PTHREAD_MUTEX_RECURSIVE:
481 		/* Increment the lock count: */
482 		if (m->m_count + 1 > 0) {
483 			m->m_count++;
484 			ret = 0;
485 		} else
486 			ret = EAGAIN;
487 		break;
488 
489 	default:
490 		/* Trap invalid mutex types; */
491 		ret = EINVAL;
492 	}
493 
494 	return (ret);
495 }
496 
497 static int
498 mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
499 {
500 	struct timespec	ts1, ts2;
501 	int	ret;
502 
503 	switch (m->m_type) {
504 	case PTHREAD_MUTEX_ERRORCHECK:
505 	case PTHREAD_MUTEX_ADAPTIVE_NP:
506 		if (abstime) {
507 			if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
508 			    abstime->tv_nsec >= 1000000000) {
509 				ret = EINVAL;
510 			} else {
511 				clock_gettime(CLOCK_REALTIME, &ts1);
512 				TIMESPEC_SUB(&ts2, abstime, &ts1);
513 				__sys_nanosleep(&ts2, NULL);
514 				ret = ETIMEDOUT;
515 			}
516 		} else {
517 			/*
518 			 * POSIX specifies that mutexes should return
519 			 * EDEADLK if a recursive lock is detected.
520 			 */
521 			ret = EDEADLK;
522 		}
523 		break;
524 
525 	case PTHREAD_MUTEX_NORMAL:
526 		/*
527 		 * What SS2 define as a 'normal' mutex.  Intentionally
528 		 * deadlock on attempts to get a lock you already own.
529 		 */
530 		ret = 0;
531 		if (abstime) {
532 			if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
533 			    abstime->tv_nsec >= 1000000000) {
534 				ret = EINVAL;
535 			} else {
536 				clock_gettime(CLOCK_REALTIME, &ts1);
537 				TIMESPEC_SUB(&ts2, abstime, &ts1);
538 				__sys_nanosleep(&ts2, NULL);
539 				ret = ETIMEDOUT;
540 			}
541 		} else {
542 			ts1.tv_sec = 30;
543 			ts1.tv_nsec = 0;
544 			for (;;)
545 				__sys_nanosleep(&ts1, NULL);
546 		}
547 		break;
548 
549 	case PTHREAD_MUTEX_RECURSIVE:
550 		/* Increment the lock count: */
551 		if (m->m_count + 1 > 0) {
552 			m->m_count++;
553 			ret = 0;
554 		} else
555 			ret = EAGAIN;
556 		break;
557 
558 	default:
559 		/* Trap invalid mutex types; */
560 		ret = EINVAL;
561 	}
562 
563 	return (ret);
564 }
565 
566 static int
567 mutex_unlock_common(pthread_mutex_t *mutex)
568 {
569 	struct pthread *curthread = _get_curthread();
570 	struct pthread_mutex *m;
571 	uint32_t id;
572 
573 	m = *mutex;
574 	if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
575 		if (m == THR_MUTEX_DESTROYED)
576 			return (EINVAL);
577 		return (EPERM);
578 	}
579 
580 	/*
581 	 * Check if the running thread is not the owner of the mutex.
582 	 */
583 	if (__predict_false(m->m_owner != curthread))
584 		return (EPERM);
585 
586 	id = TID(curthread);
587 	if (__predict_false(
588 		m->m_type == PTHREAD_MUTEX_RECURSIVE &&
589 		m->m_count > 0)) {
590 		m->m_count--;
591 	} else {
592 		m->m_owner = NULL;
593 		/* Remove the mutex from the threads queue. */
594 		MUTEX_ASSERT_IS_OWNED(m);
595 		if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0))
596 			TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
597 		else {
598 			TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
599 			set_inherited_priority(curthread, m);
600 		}
601 		MUTEX_INIT_LINK(m);
602 		_thr_umutex_unlock(&m->m_lock, id);
603 	}
604 	if (m->m_private)
605 		THR_CRITICAL_LEAVE(curthread);
606 	return (0);
607 }
608 
609 int
610 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
611 {
612 	struct pthread *curthread = _get_curthread();
613 	struct pthread_mutex *m;
614 
615 	m = *mutex;
616 	if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
617 		if (m == THR_MUTEX_DESTROYED)
618 			return (EINVAL);
619 		return (EPERM);
620 	}
621 
622 	/*
623 	 * Check if the running thread is not the owner of the mutex.
624 	 */
625 	if (__predict_false(m->m_owner != curthread))
626 		return (EPERM);
627 
628 	/*
629 	 * Clear the count in case this is a recursive mutex.
630 	 */
631 	*count = m->m_count;
632 	m->m_refcount++;
633 	m->m_count = 0;
634 	m->m_owner = NULL;
635 	/* Remove the mutex from the threads queue. */
636 	MUTEX_ASSERT_IS_OWNED(m);
637 	if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0))
638 		TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
639 	else {
640 		TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
641 		set_inherited_priority(curthread, m);
642 	}
643 	MUTEX_INIT_LINK(m);
644 	_thr_umutex_unlock(&m->m_lock, TID(curthread));
645 
646 	if (m->m_private)
647 		THR_CRITICAL_LEAVE(curthread);
648 	return (0);
649 }
650 
651 int
652 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
653 			      int *prioceiling)
654 {
655 	struct pthread_mutex *m;
656 	int ret;
657 
658 	m = *mutex;
659 	if ((m <= THR_MUTEX_DESTROYED) ||
660 	    (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
661 		ret = EINVAL;
662 	else {
663 		*prioceiling = m->m_lock.m_ceilings[0];
664 		ret = 0;
665 	}
666 
667 	return (ret);
668 }
669 
670 int
671 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
672 			      int ceiling, int *old_ceiling)
673 {
674 	struct pthread *curthread = _get_curthread();
675 	struct pthread_mutex *m, *m1, *m2;
676 	int ret;
677 
678 	m = *mutex;
679 	if ((m <= THR_MUTEX_DESTROYED) ||
680 	    (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
681 		return (EINVAL);
682 
683 	ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
684 	if (ret != 0)
685 		return (ret);
686 
687 	if (m->m_owner == curthread) {
688 		MUTEX_ASSERT_IS_OWNED(m);
689 		m1 = TAILQ_PREV(m, mutex_queue, m_qe);
690 		m2 = TAILQ_NEXT(m, m_qe);
691 		if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
692 		    (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
693 			TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
694 			TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) {
695 				if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
696 					TAILQ_INSERT_BEFORE(m2, m, m_qe);
697 					return (0);
698 				}
699 			}
700 			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
701 		}
702 	}
703 	return (0);
704 }
705 
706 int
707 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
708 {
709 	struct pthread_mutex	*m;
710 
711 	CHECK_AND_INIT_MUTEX
712 
713 	*count = m->m_spinloops;
714 	return (0);
715 }
716 
717 int
718 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
719 {
720 	struct pthread_mutex	*m;
721 
722 	CHECK_AND_INIT_MUTEX
723 
724 	m->m_spinloops = count;
725 	return (0);
726 }
727 
728 int
729 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
730 {
731 	struct pthread_mutex	*m;
732 
733 	CHECK_AND_INIT_MUTEX
734 
735 	*count = m->m_yieldloops;
736 	return (0);
737 }
738 
739 int
740 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
741 {
742 	struct pthread_mutex	*m;
743 
744 	CHECK_AND_INIT_MUTEX
745 
746 	m->m_yieldloops = count;
747 	return (0);
748 }
749 
750 int
751 _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
752 {
753 	struct pthread_mutex	*m;
754 
755 	m = *mutex;
756 	if (m <= THR_MUTEX_DESTROYED)
757 		return (0);
758 	return (m->m_owner == _get_curthread());
759 }
760