xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision 7aa383846770374466b1dcb2cefd71bde9acf463)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by John Birrell.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * $FreeBSD$
34  */
35 
36 #include "namespace.h"
37 #include <stdlib.h>
38 #include <errno.h>
39 #include <string.h>
40 #include <sys/param.h>
41 #include <sys/queue.h>
42 #include <pthread.h>
43 #include <pthread_np.h>
44 #include "un-namespace.h"
45 
46 #include "thr_private.h"
47 
48 #if defined(_PTHREADS_INVARIANTS)
49 #define MUTEX_INIT_LINK(m) 		do {		\
50 	(m)->m_qe.tqe_prev = NULL;			\
51 	(m)->m_qe.tqe_next = NULL;			\
52 } while (0)
53 #define MUTEX_ASSERT_IS_OWNED(m)	do {		\
54 	if (__predict_false((m)->m_qe.tqe_prev == NULL))\
55 		PANIC("mutex is not on list");		\
56 } while (0)
57 #define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
58 	if (__predict_false((m)->m_qe.tqe_prev != NULL ||	\
59 	    (m)->m_qe.tqe_next != NULL))	\
60 		PANIC("mutex is on list");		\
61 } while (0)
62 #else
63 #define MUTEX_INIT_LINK(m)
64 #define MUTEX_ASSERT_IS_OWNED(m)
65 #define MUTEX_ASSERT_NOT_OWNED(m)
66 #endif
67 
68 /*
69  * For adaptive mutexes, how many times to spin doing trylock2
70  * before entering the kernel to block
71  */
72 #define MUTEX_ADAPTIVE_SPINS	2000
73 
74 /*
75  * Prototypes
76  */
77 int	__pthread_mutex_init(pthread_mutex_t *mutex,
78 		const pthread_mutexattr_t *mutex_attr);
79 int	__pthread_mutex_trylock(pthread_mutex_t *mutex);
80 int	__pthread_mutex_lock(pthread_mutex_t *mutex);
81 int	__pthread_mutex_timedlock(pthread_mutex_t *mutex,
82 		const struct timespec *abstime);
83 int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
84     		void *(calloc_cb)(size_t, size_t));
85 int	_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
86 int	_pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
87 int	__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
88 int	_pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
89 int	_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
90 int	__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
91 
92 static int	mutex_self_trylock(pthread_mutex_t);
93 static int	mutex_self_lock(pthread_mutex_t,
94 				const struct timespec *abstime);
95 static int	mutex_unlock_common(pthread_mutex_t *);
96 static int	mutex_lock_sleep(struct pthread *, pthread_mutex_t,
97 				const struct timespec *);
98 
99 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
100 __strong_reference(__pthread_mutex_init, _pthread_mutex_init);
101 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
102 __strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
103 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
104 __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
105 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
106 __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
107 
108 /* Single underscore versions provided for libc internal usage: */
109 /* No difference between libc and application usage of these: */
110 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
111 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
112 
113 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
114 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
115 
116 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
117 __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
118 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
119 
120 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
121 __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
122 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
123 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
124 
125 static int
126 mutex_init(pthread_mutex_t *mutex,
127     const pthread_mutexattr_t *mutex_attr,
128     void *(calloc_cb)(size_t, size_t))
129 {
130 	const struct pthread_mutex_attr *attr;
131 	struct pthread_mutex *pmutex;
132 
133 	if (mutex_attr == NULL) {
134 		attr = &_pthread_mutexattr_default;
135 	} else {
136 		attr = *mutex_attr;
137 		if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
138 		    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
139 			return (EINVAL);
140 		if (attr->m_protocol < PTHREAD_PRIO_NONE ||
141 		    attr->m_protocol > PTHREAD_PRIO_PROTECT)
142 			return (EINVAL);
143 	}
144 	if ((pmutex = (pthread_mutex_t)
145 		calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
146 		return (ENOMEM);
147 
148 	pmutex->m_type = attr->m_type;
149 	pmutex->m_owner = NULL;
150 	pmutex->m_count = 0;
151 	pmutex->m_refcount = 0;
152 	pmutex->m_spinloops = 0;
153 	pmutex->m_yieldloops = 0;
154 	MUTEX_INIT_LINK(pmutex);
155 	switch(attr->m_protocol) {
156 	case PTHREAD_PRIO_INHERIT:
157 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
158 		pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
159 		break;
160 	case PTHREAD_PRIO_PROTECT:
161 		pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
162 		pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
163 		pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
164 		break;
165 	case PTHREAD_PRIO_NONE:
166 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
167 		pmutex->m_lock.m_flags = 0;
168 	}
169 
170 	if (pmutex->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) {
171 		pmutex->m_spinloops =
172 		    _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
173 		pmutex->m_yieldloops = _thr_yieldloops;
174 	}
175 
176 	*mutex = pmutex;
177 	return (0);
178 }
179 
180 static int
181 init_static(struct pthread *thread, pthread_mutex_t *mutex)
182 {
183 	int ret;
184 
185 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
186 
187 	if (*mutex == NULL)
188 		ret = mutex_init(mutex, NULL, calloc);
189 	else
190 		ret = 0;
191 
192 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
193 
194 	return (ret);
195 }
196 
197 static void
198 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
199 {
200 	struct pthread_mutex *m2;
201 
202 	m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
203 	if (m2 != NULL)
204 		m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
205 	else
206 		m->m_lock.m_ceilings[1] = -1;
207 }
208 
209 int
210 __pthread_mutex_init(pthread_mutex_t *mutex,
211     const pthread_mutexattr_t *mutex_attr)
212 {
213 	return mutex_init(mutex, mutex_attr, calloc);
214 }
215 
216 /* This function is used internally by malloc. */
217 int
218 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
219     void *(calloc_cb)(size_t, size_t))
220 {
221 	static const struct pthread_mutex_attr attr = {
222 		.m_type = PTHREAD_MUTEX_NORMAL,
223 		.m_protocol = PTHREAD_PRIO_NONE,
224 		.m_ceiling = 0
225 	};
226 	static const struct pthread_mutex_attr *pattr = &attr;
227 
228 	return mutex_init(mutex, (pthread_mutexattr_t *)&pattr, calloc_cb);
229 }
230 
231 void
232 _mutex_fork(struct pthread *curthread)
233 {
234 	struct pthread_mutex *m;
235 
236 	/*
237 	 * Fix mutex ownership for child process.
238 	 * note that process shared mutex should not
239 	 * be inherited because owner is forking thread
240 	 * which is in parent process, they should be
241 	 * removed from the owned mutex list, current,
242 	 * process shared mutex is not supported, so I
243 	 * am not worried.
244 	 */
245 
246 	TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
247 		m->m_lock.m_owner = TID(curthread);
248 	TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
249 		m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
250 }
251 
252 int
253 _pthread_mutex_destroy(pthread_mutex_t *mutex)
254 {
255 	struct pthread *curthread = _get_curthread();
256 	pthread_mutex_t m;
257 	uint32_t id;
258 	int ret = 0;
259 
260 	if (__predict_false(*mutex == NULL))
261 		ret = EINVAL;
262 	else {
263 		id = TID(curthread);
264 
265 		/*
266 		 * Try to lock the mutex structure, we only need to
267 		 * try once, if failed, the mutex is in used.
268 		 */
269 		ret = _thr_umutex_trylock(&(*mutex)->m_lock, id);
270 		if (ret)
271 			return (ret);
272 		m  = *mutex;
273 		/*
274 		 * Check mutex other fields to see if this mutex is
275 		 * in use. Mostly for prority mutex types, or there
276 		 * are condition variables referencing it.
277 		 */
278 		if (m->m_owner != NULL || m->m_refcount != 0) {
279 			if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
280 				set_inherited_priority(curthread, m);
281 			_thr_umutex_unlock(&m->m_lock, id);
282 			ret = EBUSY;
283 		} else {
284 			/*
285 			 * Save a pointer to the mutex so it can be free'd
286 			 * and set the caller's pointer to NULL.
287 			 */
288 			*mutex = NULL;
289 
290 			if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
291 				set_inherited_priority(curthread, m);
292 			_thr_umutex_unlock(&m->m_lock, id);
293 
294 			MUTEX_ASSERT_NOT_OWNED(m);
295 			free(m);
296 		}
297 	}
298 
299 	return (ret);
300 }
301 
302 #define ENQUEUE_MUTEX(curthread, m)  					\
303 	do {								\
304 		(m)->m_owner = curthread;				\
305 		/* Add to the list of owned mutexes: */			\
306 		MUTEX_ASSERT_NOT_OWNED((m));				\
307 		if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)	\
308 			TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\
309 		else							\
310 			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\
311 	} while (0)
312 
313 static int
314 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
315 {
316 	struct pthread_mutex *m;
317 	uint32_t id;
318 	int ret;
319 
320 	id = TID(curthread);
321 	m = *mutex;
322 	ret = _thr_umutex_trylock(&m->m_lock, id);
323 	if (ret == 0) {
324 		ENQUEUE_MUTEX(curthread, m);
325 	} else if (m->m_owner == curthread) {
326 		ret = mutex_self_trylock(m);
327 	} /* else {} */
328 
329 	return (ret);
330 }
331 
332 int
333 __pthread_mutex_trylock(pthread_mutex_t *mutex)
334 {
335 	struct pthread *curthread = _get_curthread();
336 	int ret;
337 
338 	/*
339 	 * If the mutex is statically initialized, perform the dynamic
340 	 * initialization:
341 	 */
342 	if (__predict_false(*mutex == NULL)) {
343 		ret = init_static(curthread, mutex);
344 		if (__predict_false(ret))
345 			return (ret);
346 	}
347 	return (mutex_trylock_common(curthread, mutex));
348 }
349 
350 static int
351 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
352 	const struct timespec *abstime)
353 {
354 	uint32_t	id, owner;
355 	int	count;
356 	int	ret;
357 
358 	if (m->m_owner == curthread)
359 		return mutex_self_lock(m, abstime);
360 
361 	id = TID(curthread);
362 	/*
363 	 * For adaptive mutexes, spin for a bit in the expectation
364 	 * that if the application requests this mutex type then
365 	 * the lock is likely to be released quickly and it is
366 	 * faster than entering the kernel
367 	 */
368 	if (m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT))
369 		goto sleep_in_kernel;
370 
371 	if (!_thr_is_smp)
372 		goto yield_loop;
373 
374 	count = m->m_spinloops;
375 	while (count--) {
376 		owner = m->m_lock.m_owner;
377 		if ((owner & ~UMUTEX_CONTESTED) == 0) {
378 			if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
379 				ret = 0;
380 				goto done;
381 			}
382 		}
383 		CPU_SPINWAIT;
384 	}
385 
386 yield_loop:
387 	count = m->m_yieldloops;
388 	while (count--) {
389 		_sched_yield();
390 		owner = m->m_lock.m_owner;
391 		if ((owner & ~UMUTEX_CONTESTED) == 0) {
392 			if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
393 				ret = 0;
394 				goto done;
395 			}
396 		}
397 	}
398 
399 sleep_in_kernel:
400 	if (abstime == NULL) {
401 		ret = __thr_umutex_lock(&m->m_lock, id);
402 	} else if (__predict_false(
403 		   abstime->tv_nsec < 0 ||
404 		   abstime->tv_nsec >= 1000000000)) {
405 		ret = EINVAL;
406 	} else {
407 		ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
408 	}
409 done:
410 	if (ret == 0)
411 		ENQUEUE_MUTEX(curthread, m);
412 
413 	return (ret);
414 }
415 
416 static inline int
417 mutex_lock_common(struct pthread *curthread, struct pthread_mutex *m,
418 	const struct timespec *abstime)
419 {
420 
421 	if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
422 		ENQUEUE_MUTEX(curthread, m);
423 		return (0);
424 	}
425 
426 	return (mutex_lock_sleep(curthread, m, abstime));
427 }
428 
429 int
430 __pthread_mutex_lock(pthread_mutex_t *mutex)
431 {
432 	struct pthread *curthread;
433 	struct pthread_mutex *m;
434 	int	ret;
435 
436 	_thr_check_init();
437 
438 	curthread = _get_curthread();
439 
440 	/*
441 	 * If the mutex is statically initialized, perform the dynamic
442 	 * initialization:
443 	 */
444 	if (__predict_false((m = *mutex) == NULL)) {
445 		ret = init_static(curthread, mutex);
446 		if (__predict_false(ret))
447 			return (ret);
448 		m = *mutex;
449 	}
450 
451 	return (mutex_lock_common(curthread, m, NULL));
452 }
453 
454 int
455 __pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
456 {
457 	struct pthread *curthread;
458 	struct pthread_mutex *m;
459 	int	ret;
460 
461 	_thr_check_init();
462 
463 	curthread = _get_curthread();
464 
465 	/*
466 	 * If the mutex is statically initialized, perform the dynamic
467 	 * initialization:
468 	 */
469 	if (__predict_false((m = *mutex) == NULL)) {
470 		ret = init_static(curthread, mutex);
471 		if (__predict_false(ret))
472 			return (ret);
473 		m = *mutex;
474 	}
475 	return (mutex_lock_common(curthread, m, abstime));
476 }
477 
478 int
479 _pthread_mutex_unlock(pthread_mutex_t *m)
480 {
481 	return (mutex_unlock_common(m));
482 }
483 
484 int
485 _mutex_cv_lock(pthread_mutex_t *m, int count)
486 {
487 	int	ret;
488 
489 	ret = mutex_lock_common(_get_curthread(), *m, NULL);
490 	if (ret == 0) {
491 		(*m)->m_refcount--;
492 		(*m)->m_count += count;
493 	}
494 	return (ret);
495 }
496 
497 static int
498 mutex_self_trylock(pthread_mutex_t m)
499 {
500 	int	ret;
501 
502 	switch (m->m_type) {
503 	case PTHREAD_MUTEX_ERRORCHECK:
504 	case PTHREAD_MUTEX_NORMAL:
505 		ret = EBUSY;
506 		break;
507 
508 	case PTHREAD_MUTEX_RECURSIVE:
509 		/* Increment the lock count: */
510 		if (m->m_count + 1 > 0) {
511 			m->m_count++;
512 			ret = 0;
513 		} else
514 			ret = EAGAIN;
515 		break;
516 
517 	default:
518 		/* Trap invalid mutex types; */
519 		ret = EINVAL;
520 	}
521 
522 	return (ret);
523 }
524 
525 static int
526 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
527 {
528 	struct timespec	ts1, ts2;
529 	int	ret;
530 
531 	switch (m->m_type) {
532 	case PTHREAD_MUTEX_ERRORCHECK:
533 	case PTHREAD_MUTEX_ADAPTIVE_NP:
534 		if (abstime) {
535 			if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
536 			    abstime->tv_nsec >= 1000000000) {
537 				ret = EINVAL;
538 			} else {
539 				clock_gettime(CLOCK_REALTIME, &ts1);
540 				TIMESPEC_SUB(&ts2, abstime, &ts1);
541 				__sys_nanosleep(&ts2, NULL);
542 				ret = ETIMEDOUT;
543 			}
544 		} else {
545 			/*
546 			 * POSIX specifies that mutexes should return
547 			 * EDEADLK if a recursive lock is detected.
548 			 */
549 			ret = EDEADLK;
550 		}
551 		break;
552 
553 	case PTHREAD_MUTEX_NORMAL:
554 		/*
555 		 * What SS2 define as a 'normal' mutex.  Intentionally
556 		 * deadlock on attempts to get a lock you already own.
557 		 */
558 		ret = 0;
559 		if (abstime) {
560 			if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
561 			    abstime->tv_nsec >= 1000000000) {
562 				ret = EINVAL;
563 			} else {
564 				clock_gettime(CLOCK_REALTIME, &ts1);
565 				TIMESPEC_SUB(&ts2, abstime, &ts1);
566 				__sys_nanosleep(&ts2, NULL);
567 				ret = ETIMEDOUT;
568 			}
569 		} else {
570 			ts1.tv_sec = 30;
571 			ts1.tv_nsec = 0;
572 			for (;;)
573 				__sys_nanosleep(&ts1, NULL);
574 		}
575 		break;
576 
577 	case PTHREAD_MUTEX_RECURSIVE:
578 		/* Increment the lock count: */
579 		if (m->m_count + 1 > 0) {
580 			m->m_count++;
581 			ret = 0;
582 		} else
583 			ret = EAGAIN;
584 		break;
585 
586 	default:
587 		/* Trap invalid mutex types; */
588 		ret = EINVAL;
589 	}
590 
591 	return (ret);
592 }
593 
594 static int
595 mutex_unlock_common(pthread_mutex_t *mutex)
596 {
597 	struct pthread *curthread = _get_curthread();
598 	struct pthread_mutex *m;
599 	uint32_t id;
600 
601 	if (__predict_false((m = *mutex) == NULL))
602 		return (EINVAL);
603 
604 	/*
605 	 * Check if the running thread is not the owner of the mutex.
606 	 */
607 	if (__predict_false(m->m_owner != curthread))
608 		return (EPERM);
609 
610 	id = TID(curthread);
611 	if (__predict_false(
612 		m->m_type == PTHREAD_MUTEX_RECURSIVE &&
613 		m->m_count > 0)) {
614 		m->m_count--;
615 	} else {
616 		m->m_owner = NULL;
617 		/* Remove the mutex from the threads queue. */
618 		MUTEX_ASSERT_IS_OWNED(m);
619 		if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
620 			TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
621 		else {
622 			TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
623 			set_inherited_priority(curthread, m);
624 		}
625 		MUTEX_INIT_LINK(m);
626 		_thr_umutex_unlock(&m->m_lock, id);
627 	}
628 	return (0);
629 }
630 
631 int
632 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
633 {
634 	struct pthread *curthread = _get_curthread();
635 	struct pthread_mutex *m;
636 
637 	if (__predict_false((m = *mutex) == NULL))
638 		return (EINVAL);
639 
640 	/*
641 	 * Check if the running thread is not the owner of the mutex.
642 	 */
643 	if (__predict_false(m->m_owner != curthread))
644 		return (EPERM);
645 
646 	/*
647 	 * Clear the count in case this is a recursive mutex.
648 	 */
649 	*count = m->m_count;
650 	m->m_refcount++;
651 	m->m_count = 0;
652 	m->m_owner = NULL;
653 	/* Remove the mutex from the threads queue. */
654 	MUTEX_ASSERT_IS_OWNED(m);
655 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
656 		TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
657 	else {
658 		TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
659 		set_inherited_priority(curthread, m);
660 	}
661 	MUTEX_INIT_LINK(m);
662 	_thr_umutex_unlock(&m->m_lock, TID(curthread));
663 	return (0);
664 }
665 
666 int
667 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
668 			      int *prioceiling)
669 {
670 	int ret;
671 
672 	if (*mutex == NULL)
673 		ret = EINVAL;
674 	else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
675 		ret = EINVAL;
676 	else {
677 		*prioceiling = (*mutex)->m_lock.m_ceilings[0];
678 		ret = 0;
679 	}
680 
681 	return(ret);
682 }
683 
684 int
685 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
686 			      int ceiling, int *old_ceiling)
687 {
688 	struct pthread *curthread = _get_curthread();
689 	struct pthread_mutex *m, *m1, *m2;
690 	int ret;
691 
692 	m = *mutex;
693 	if (m == NULL || (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
694 		return (EINVAL);
695 
696 	ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
697 	if (ret != 0)
698 		return (ret);
699 
700 	if (m->m_owner == curthread) {
701 		MUTEX_ASSERT_IS_OWNED(m);
702 		m1 = TAILQ_PREV(m, mutex_queue, m_qe);
703 		m2 = TAILQ_NEXT(m, m_qe);
704 		if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
705 		    (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
706 			TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
707 			TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) {
708 				if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
709 					TAILQ_INSERT_BEFORE(m2, m, m_qe);
710 					return (0);
711 				}
712 			}
713 			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
714 		}
715 	}
716 	return (0);
717 }
718 
719 int
720 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
721 {
722 	if (*mutex == NULL)
723 		return (EINVAL);
724 	*count = (*mutex)->m_spinloops;
725 	return (0);
726 }
727 
728 int
729 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
730 {
731 	struct pthread *curthread = _get_curthread();
732 	int ret;
733 
734 	if (__predict_false(*mutex == NULL)) {
735 		ret = init_static(curthread, mutex);
736 		if (__predict_false(ret))
737 			return (ret);
738 	}
739 	(*mutex)->m_spinloops = count;
740 	return (0);
741 }
742 
743 int
744 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
745 {
746 	if (*mutex == NULL)
747 		return (EINVAL);
748 	*count = (*mutex)->m_yieldloops;
749 	return (0);
750 }
751 
752 int
753 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
754 {
755 	struct pthread *curthread = _get_curthread();
756 	int ret;
757 
758 	if (__predict_false(*mutex == NULL)) {
759 		ret = init_static(curthread, mutex);
760 		if (__predict_false(ret))
761 			return (ret);
762 	}
763 	(*mutex)->m_yieldloops = count;
764 	return (0);
765 }
766 
767 int
768 _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
769 {
770 	struct pthread *curthread = _get_curthread();
771 	int ret;
772 
773 	if (__predict_false(*mutex == NULL)) {
774 		ret = init_static(curthread, mutex);
775 		if (__predict_false(ret))
776 			return (ret);
777 	}
778 	return ((*mutex)->m_owner == curthread);
779 }
780