xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision faf139cc5dd3396181c11922bc6685c0c59b7b24)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4  * Copyright (c) 2015 The FreeBSD Foundation
5  *
6  * All rights reserved.
7  *
8  * Portions of this software were developed by Konstantin Belousov
9  * under sponsorship from the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by John Birrell.
22  * 4. Neither the name of the author nor the names of any co-contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * $FreeBSD$
39  */
40 
41 #include "namespace.h"
42 #include <stdlib.h>
43 #include <errno.h>
44 #include <string.h>
45 #include <sys/param.h>
46 #include <sys/queue.h>
47 #include <pthread.h>
48 #include <pthread_np.h>
49 #include "un-namespace.h"
50 
51 #include "thr_private.h"
52 
53 /*
54  * For adaptive mutexes, how many times to spin doing trylock2
55  * before entering the kernel to block
56  */
57 #define MUTEX_ADAPTIVE_SPINS	2000
58 
59 /*
60  * Prototypes
61  */
62 int	__pthread_mutex_init(pthread_mutex_t *mutex,
63 		const pthread_mutexattr_t *mutex_attr);
64 int	__pthread_mutex_trylock(pthread_mutex_t *mutex);
65 int	__pthread_mutex_lock(pthread_mutex_t *mutex);
66 int	__pthread_mutex_timedlock(pthread_mutex_t *mutex,
67 		const struct timespec *abstime);
68 int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
69     		void *(calloc_cb)(size_t, size_t));
70 int	_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
71 int	_pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
72 int	__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
73 int	_pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
74 int	_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
75 int	__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
76 
77 static int	mutex_self_trylock(pthread_mutex_t);
78 static int	mutex_self_lock(pthread_mutex_t,
79 				const struct timespec *abstime);
80 static int	mutex_unlock_common(struct pthread_mutex *, int, int *);
81 static int	mutex_lock_sleep(struct pthread *, pthread_mutex_t,
82 				const struct timespec *);
83 
84 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
85 __strong_reference(__pthread_mutex_init, _pthread_mutex_init);
86 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
87 __strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
88 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
89 __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
90 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
91 __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
92 
93 /* Single underscore versions provided for libc internal usage: */
94 /* No difference between libc and application usage of these: */
95 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
96 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
97 
98 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
99 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
100 
101 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
102 __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
103 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
104 
105 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
106 __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
107 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
108 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
109 
110 static void
111 mutex_init_link(struct pthread_mutex *m)
112 {
113 
114 #if defined(_PTHREADS_INVARIANTS)
115 	m->m_qe.tqe_prev = NULL;
116 	m->m_qe.tqe_next = NULL;
117 	m->m_pqe.tqe_prev = NULL;
118 	m->m_pqe.tqe_next = NULL;
119 #endif
120 }
121 
122 static void
123 mutex_assert_is_owned(struct pthread_mutex *m)
124 {
125 
126 #if defined(_PTHREADS_INVARIANTS)
127 	if (__predict_false(m->m_qe.tqe_prev == NULL))
128 		PANIC("mutex is not on list");
129 #endif
130 }
131 
132 static void
133 mutex_assert_not_owned(struct pthread_mutex *m)
134 {
135 
136 #if defined(_PTHREADS_INVARIANTS)
137 	if (__predict_false(m->m_qe.tqe_prev != NULL ||
138 	    m->m_qe.tqe_next != NULL))
139 		PANIC("mutex is on list");
140 #endif
141 }
142 
143 static int
144 is_pshared_mutex(struct pthread_mutex *m)
145 {
146 
147 	return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0);
148 }
149 
150 static int
151 mutex_check_attr(const struct pthread_mutex_attr *attr)
152 {
153 
154 	if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
155 	    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
156 		return (EINVAL);
157 	if (attr->m_protocol < PTHREAD_PRIO_NONE ||
158 	    attr->m_protocol > PTHREAD_PRIO_PROTECT)
159 		return (EINVAL);
160 	return (0);
161 }
162 
163 static void
164 mutex_init_body(struct pthread_mutex *pmutex,
165     const struct pthread_mutex_attr *attr)
166 {
167 
168 	pmutex->m_flags = attr->m_type;
169 	pmutex->m_owner = 0;
170 	pmutex->m_count = 0;
171 	pmutex->m_spinloops = 0;
172 	pmutex->m_yieldloops = 0;
173 	mutex_init_link(pmutex);
174 	switch (attr->m_protocol) {
175 	case PTHREAD_PRIO_NONE:
176 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
177 		pmutex->m_lock.m_flags = 0;
178 		break;
179 	case PTHREAD_PRIO_INHERIT:
180 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
181 		pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
182 		break;
183 	case PTHREAD_PRIO_PROTECT:
184 		pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
185 		pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
186 		pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
187 		break;
188 	}
189 	if (attr->m_pshared == PTHREAD_PROCESS_SHARED)
190 		pmutex->m_lock.m_flags |= USYNC_PROCESS_SHARED;
191 
192 	if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) {
193 		pmutex->m_spinloops =
194 		    _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
195 		pmutex->m_yieldloops = _thr_yieldloops;
196 	}
197 }
198 
199 static int
200 mutex_init(pthread_mutex_t *mutex,
201     const struct pthread_mutex_attr *mutex_attr,
202     void *(calloc_cb)(size_t, size_t))
203 {
204 	const struct pthread_mutex_attr *attr;
205 	struct pthread_mutex *pmutex;
206 	int error;
207 
208 	if (mutex_attr == NULL) {
209 		attr = &_pthread_mutexattr_default;
210 	} else {
211 		attr = mutex_attr;
212 		error = mutex_check_attr(attr);
213 		if (error != 0)
214 			return (error);
215 	}
216 	if ((pmutex = (pthread_mutex_t)
217 		calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
218 		return (ENOMEM);
219 	mutex_init_body(pmutex, attr);
220 	*mutex = pmutex;
221 	return (0);
222 }
223 
224 static int
225 init_static(struct pthread *thread, pthread_mutex_t *mutex)
226 {
227 	int ret;
228 
229 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
230 
231 	if (*mutex == THR_MUTEX_INITIALIZER)
232 		ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
233 	else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
234 		ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default,
235 		    calloc);
236 	else
237 		ret = 0;
238 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
239 
240 	return (ret);
241 }
242 
243 static void
244 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
245 {
246 	struct pthread_mutex *m2;
247 
248 	m2 = TAILQ_LAST(&curthread->mq[TMQ_NORM_PP], mutex_queue);
249 	if (m2 != NULL)
250 		m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
251 	else
252 		m->m_lock.m_ceilings[1] = -1;
253 }
254 
255 int
256 __pthread_mutex_init(pthread_mutex_t *mutex,
257     const pthread_mutexattr_t *mutex_attr)
258 {
259 	struct pthread_mutex *pmtx;
260 	int ret;
261 
262 	if (mutex_attr != NULL) {
263 		ret = mutex_check_attr(*mutex_attr);
264 		if (ret != 0)
265 			return (ret);
266 	}
267 	if (mutex_attr == NULL ||
268 	    (*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) {
269 		return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL,
270 		   calloc));
271 	}
272 	pmtx = __thr_pshared_offpage(mutex, 1);
273 	if (pmtx == NULL)
274 		return (EFAULT);
275 	*mutex = THR_PSHARED_PTR;
276 	mutex_init_body(pmtx, *mutex_attr);
277 	return (0);
278 }
279 
280 /* This function is used internally by malloc. */
281 int
282 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
283     void *(calloc_cb)(size_t, size_t))
284 {
285 	static const struct pthread_mutex_attr attr = {
286 		.m_type = PTHREAD_MUTEX_NORMAL,
287 		.m_protocol = PTHREAD_PRIO_NONE,
288 		.m_ceiling = 0,
289 		.m_pshared = PTHREAD_PROCESS_PRIVATE,
290 	};
291 	int ret;
292 
293 	ret = mutex_init(mutex, &attr, calloc_cb);
294 	if (ret == 0)
295 		(*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE;
296 	return (ret);
297 }
298 
299 /*
300  * Fix mutex ownership for child process.
301  *
302  * Process private mutex ownership is transmitted from the forking
303  * thread to the child process.
304  *
305  * Process shared mutex should not be inherited because owner is
306  * forking thread which is in parent process, they are removed from
307  * the owned mutex list.
308  */
309 static void
310 queue_fork(struct pthread *curthread, struct mutex_queue *q,
311     struct mutex_queue *qp, uint bit)
312 {
313 	struct pthread_mutex *m;
314 
315 	TAILQ_INIT(q);
316 	TAILQ_FOREACH(m, qp, m_pqe) {
317 		TAILQ_INSERT_TAIL(q, m, m_qe);
318 		m->m_lock.m_owner = TID(curthread) | bit;
319 		m->m_owner = TID(curthread);
320 	}
321 }
322 
323 void
324 _mutex_fork(struct pthread *curthread)
325 {
326 
327 	queue_fork(curthread, &curthread->mq[TMQ_NORM],
328 	    &curthread->mq[TMQ_NORM_PRIV], 0);
329 	queue_fork(curthread, &curthread->mq[TMQ_NORM_PP],
330 	    &curthread->mq[TMQ_NORM_PP_PRIV], UMUTEX_CONTESTED);
331 }
332 
333 int
334 _pthread_mutex_destroy(pthread_mutex_t *mutex)
335 {
336 	pthread_mutex_t m, m1;
337 	int ret;
338 
339 	m = *mutex;
340 	if (m < THR_MUTEX_DESTROYED) {
341 		ret = 0;
342 	} else if (m == THR_MUTEX_DESTROYED) {
343 		ret = EINVAL;
344 	} else {
345 		if (m == THR_PSHARED_PTR) {
346 			m1 = __thr_pshared_offpage(mutex, 0);
347 			if (m1 != NULL) {
348 				mutex_assert_not_owned(m1);
349 				__thr_pshared_destroy(mutex);
350 			}
351 			*mutex = THR_MUTEX_DESTROYED;
352 			return (0);
353 		}
354 		if (m->m_owner != 0) {
355 			ret = EBUSY;
356 		} else {
357 			*mutex = THR_MUTEX_DESTROYED;
358 			mutex_assert_not_owned(m);
359 			free(m);
360 			ret = 0;
361 		}
362 	}
363 
364 	return (ret);
365 }
366 
367 static int
368 mutex_qidx(struct pthread_mutex *m)
369 {
370 
371 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
372 		return (TMQ_NORM);
373 	return (TMQ_NORM_PP);
374 }
375 
376 static void
377 enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m)
378 {
379 	int qidx;
380 
381 	m->m_owner = TID(curthread);
382 	/* Add to the list of owned mutexes: */
383 	mutex_assert_not_owned(m);
384 	qidx = mutex_qidx(m);
385 	TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe);
386 	if (!is_pshared_mutex(m))
387 		TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe);
388 }
389 
390 static void
391 dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m)
392 {
393 	int qidx;
394 
395 	m->m_owner = 0;
396 	mutex_assert_is_owned(m);
397 	qidx = mutex_qidx(m);
398 	TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe);
399 	if (!is_pshared_mutex(m))
400 		TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe);
401 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0)
402 		set_inherited_priority(curthread, m);
403 	mutex_init_link(m);
404 }
405 
406 static int
407 check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m)
408 {
409 	int ret;
410 
411 	*m = *mutex;
412 	ret = 0;
413 	if (*m == THR_PSHARED_PTR) {
414 		*m = __thr_pshared_offpage(mutex, 0);
415 		if (*m == NULL)
416 			ret = EINVAL;
417 	} else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) {
418 		if (*m == THR_MUTEX_DESTROYED) {
419 			ret = EINVAL;
420 		} else {
421 			ret = init_static(_get_curthread(), mutex);
422 			if (ret == 0)
423 				*m = *mutex;
424 		}
425 	}
426 	return (ret);
427 }
428 
429 int
430 __pthread_mutex_trylock(pthread_mutex_t *mutex)
431 {
432 	struct pthread *curthread;
433 	struct pthread_mutex *m;
434 	uint32_t id;
435 	int ret;
436 
437 	ret = check_and_init_mutex(mutex, &m);
438 	if (ret != 0)
439 		return (ret);
440 	curthread = _get_curthread();
441 	id = TID(curthread);
442 	if (m->m_flags & PMUTEX_FLAG_PRIVATE)
443 		THR_CRITICAL_ENTER(curthread);
444 	ret = _thr_umutex_trylock(&m->m_lock, id);
445 	if (__predict_true(ret == 0)) {
446 		enqueue_mutex(curthread, m);
447 	} else if (m->m_owner == id) {
448 		ret = mutex_self_trylock(m);
449 	} /* else {} */
450 	if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE))
451 		THR_CRITICAL_LEAVE(curthread);
452 	return (ret);
453 }
454 
455 static int
456 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
457 	const struct timespec *abstime)
458 {
459 	uint32_t	id, owner;
460 	int	count;
461 	int	ret;
462 
463 	id = TID(curthread);
464 	if (m->m_owner == id)
465 		return (mutex_self_lock(m, abstime));
466 
467 	/*
468 	 * For adaptive mutexes, spin for a bit in the expectation
469 	 * that if the application requests this mutex type then
470 	 * the lock is likely to be released quickly and it is
471 	 * faster than entering the kernel
472 	 */
473 	if (__predict_false(
474 		(m->m_lock.m_flags &
475 		 (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0))
476 			goto sleep_in_kernel;
477 
478 	if (!_thr_is_smp)
479 		goto yield_loop;
480 
481 	count = m->m_spinloops;
482 	while (count--) {
483 		owner = m->m_lock.m_owner;
484 		if ((owner & ~UMUTEX_CONTESTED) == 0) {
485 			if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
486 				ret = 0;
487 				goto done;
488 			}
489 		}
490 		CPU_SPINWAIT;
491 	}
492 
493 yield_loop:
494 	count = m->m_yieldloops;
495 	while (count--) {
496 		_sched_yield();
497 		owner = m->m_lock.m_owner;
498 		if ((owner & ~UMUTEX_CONTESTED) == 0) {
499 			if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
500 				ret = 0;
501 				goto done;
502 			}
503 		}
504 	}
505 
506 sleep_in_kernel:
507 	if (abstime == NULL) {
508 		ret = __thr_umutex_lock(&m->m_lock, id);
509 	} else if (__predict_false(
510 		   abstime->tv_nsec < 0 ||
511 		   abstime->tv_nsec >= 1000000000)) {
512 		ret = EINVAL;
513 	} else {
514 		ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
515 	}
516 done:
517 	if (ret == 0)
518 		enqueue_mutex(curthread, m);
519 
520 	return (ret);
521 }
522 
523 static inline int
524 mutex_lock_common(struct pthread_mutex *m,
525 	const struct timespec *abstime, int cvattach)
526 {
527 	struct pthread *curthread  = _get_curthread();
528 	int ret;
529 
530 	if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
531 		THR_CRITICAL_ENTER(curthread);
532 	if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
533 		enqueue_mutex(curthread, m);
534 		ret = 0;
535 	} else {
536 		ret = mutex_lock_sleep(curthread, m, abstime);
537 	}
538 	if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE) && !cvattach)
539 		THR_CRITICAL_LEAVE(curthread);
540 	return (ret);
541 }
542 
543 int
544 __pthread_mutex_lock(pthread_mutex_t *mutex)
545 {
546 	struct pthread_mutex *m;
547 	int ret;
548 
549 	_thr_check_init();
550 	ret = check_and_init_mutex(mutex, &m);
551 	if (ret == 0)
552 		ret = mutex_lock_common(m, NULL, 0);
553 	return (ret);
554 }
555 
556 int
557 __pthread_mutex_timedlock(pthread_mutex_t *mutex,
558     const struct timespec *abstime)
559 {
560 	struct pthread_mutex *m;
561 	int ret;
562 
563 	_thr_check_init();
564 	ret = check_and_init_mutex(mutex, &m);
565 	if (ret == 0)
566 		ret = mutex_lock_common(m, abstime, 0);
567 	return (ret);
568 }
569 
570 int
571 _pthread_mutex_unlock(pthread_mutex_t *mutex)
572 {
573 	struct pthread_mutex *mp;
574 
575 	if (*mutex == THR_PSHARED_PTR) {
576 		mp = __thr_pshared_offpage(mutex, 0);
577 		if (mp == NULL)
578 			return (EINVAL);
579 	} else {
580 		mp = *mutex;
581 	}
582 	return (mutex_unlock_common(mp, 0, NULL));
583 }
584 
585 int
586 _mutex_cv_lock(struct pthread_mutex *m, int count)
587 {
588 	int	error;
589 
590 	error = mutex_lock_common(m, NULL, 1);
591 	if (error == 0)
592 		m->m_count = count;
593 	return (error);
594 }
595 
596 int
597 _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer)
598 {
599 
600 	/*
601 	 * Clear the count in case this is a recursive mutex.
602 	 */
603 	*count = m->m_count;
604 	m->m_count = 0;
605 	(void)mutex_unlock_common(m, 1, defer);
606         return (0);
607 }
608 
609 int
610 _mutex_cv_attach(struct pthread_mutex *m, int count)
611 {
612 	struct pthread *curthread = _get_curthread();
613 
614 	enqueue_mutex(curthread, m);
615 	m->m_count = count;
616 	return (0);
617 }
618 
619 int
620 _mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
621 {
622 	struct pthread *curthread = _get_curthread();
623 	int     defered;
624 	int     error;
625 
626 	if ((error = _mutex_owned(curthread, mp)) != 0)
627                 return (error);
628 
629 	/*
630 	 * Clear the count in case this is a recursive mutex.
631 	 */
632 	*recurse = mp->m_count;
633 	mp->m_count = 0;
634 	dequeue_mutex(curthread, mp);
635 
636 	/* Will this happen in real-world ? */
637         if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
638 		defered = 1;
639 		mp->m_flags &= ~PMUTEX_FLAG_DEFERED;
640 	} else
641 		defered = 0;
642 
643 	if (defered)  {
644 		_thr_wake_all(curthread->defer_waiters,
645 				curthread->nwaiter_defer);
646 		curthread->nwaiter_defer = 0;
647 	}
648 	return (0);
649 }
650 
651 static int
652 mutex_self_trylock(struct pthread_mutex *m)
653 {
654 	int	ret;
655 
656 	switch (PMUTEX_TYPE(m->m_flags)) {
657 	case PTHREAD_MUTEX_ERRORCHECK:
658 	case PTHREAD_MUTEX_NORMAL:
659 	case PTHREAD_MUTEX_ADAPTIVE_NP:
660 		ret = EBUSY;
661 		break;
662 
663 	case PTHREAD_MUTEX_RECURSIVE:
664 		/* Increment the lock count: */
665 		if (m->m_count + 1 > 0) {
666 			m->m_count++;
667 			ret = 0;
668 		} else
669 			ret = EAGAIN;
670 		break;
671 
672 	default:
673 		/* Trap invalid mutex types; */
674 		ret = EINVAL;
675 	}
676 
677 	return (ret);
678 }
679 
680 static int
681 mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
682 {
683 	struct timespec	ts1, ts2;
684 	int	ret;
685 
686 	switch (PMUTEX_TYPE(m->m_flags)) {
687 	case PTHREAD_MUTEX_ERRORCHECK:
688 	case PTHREAD_MUTEX_ADAPTIVE_NP:
689 		if (abstime) {
690 			if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
691 			    abstime->tv_nsec >= 1000000000) {
692 				ret = EINVAL;
693 			} else {
694 				clock_gettime(CLOCK_REALTIME, &ts1);
695 				TIMESPEC_SUB(&ts2, abstime, &ts1);
696 				__sys_nanosleep(&ts2, NULL);
697 				ret = ETIMEDOUT;
698 			}
699 		} else {
700 			/*
701 			 * POSIX specifies that mutexes should return
702 			 * EDEADLK if a recursive lock is detected.
703 			 */
704 			ret = EDEADLK;
705 		}
706 		break;
707 
708 	case PTHREAD_MUTEX_NORMAL:
709 		/*
710 		 * What SS2 define as a 'normal' mutex.  Intentionally
711 		 * deadlock on attempts to get a lock you already own.
712 		 */
713 		ret = 0;
714 		if (abstime) {
715 			if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
716 			    abstime->tv_nsec >= 1000000000) {
717 				ret = EINVAL;
718 			} else {
719 				clock_gettime(CLOCK_REALTIME, &ts1);
720 				TIMESPEC_SUB(&ts2, abstime, &ts1);
721 				__sys_nanosleep(&ts2, NULL);
722 				ret = ETIMEDOUT;
723 			}
724 		} else {
725 			ts1.tv_sec = 30;
726 			ts1.tv_nsec = 0;
727 			for (;;)
728 				__sys_nanosleep(&ts1, NULL);
729 		}
730 		break;
731 
732 	case PTHREAD_MUTEX_RECURSIVE:
733 		/* Increment the lock count: */
734 		if (m->m_count + 1 > 0) {
735 			m->m_count++;
736 			ret = 0;
737 		} else
738 			ret = EAGAIN;
739 		break;
740 
741 	default:
742 		/* Trap invalid mutex types; */
743 		ret = EINVAL;
744 	}
745 
746 	return (ret);
747 }
748 
749 static int
750 mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
751 {
752 	struct pthread *curthread = _get_curthread();
753 	uint32_t id;
754 	int defered, error;
755 
756 	if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
757 		if (m == THR_MUTEX_DESTROYED)
758 			return (EINVAL);
759 		return (EPERM);
760 	}
761 
762 	id = TID(curthread);
763 
764 	/*
765 	 * Check if the running thread is not the owner of the mutex.
766 	 */
767 	if (__predict_false(m->m_owner != id))
768 		return (EPERM);
769 
770 	error = 0;
771 	if (__predict_false(
772 		PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE &&
773 		m->m_count > 0)) {
774 		m->m_count--;
775 	} else {
776 		if ((m->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
777 			defered = 1;
778 			m->m_flags &= ~PMUTEX_FLAG_DEFERED;
779         	} else
780 			defered = 0;
781 
782 		dequeue_mutex(curthread, m);
783 		error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer);
784 
785 		if (mtx_defer == NULL && defered)  {
786 			_thr_wake_all(curthread->defer_waiters,
787 				curthread->nwaiter_defer);
788 			curthread->nwaiter_defer = 0;
789 		}
790 	}
791 	if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE)
792 		THR_CRITICAL_LEAVE(curthread);
793 	return (error);
794 }
795 
796 int
797 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
798     int *prioceiling)
799 {
800 	struct pthread_mutex *m;
801 
802 	if (*mutex == THR_PSHARED_PTR) {
803 		m = __thr_pshared_offpage(mutex, 0);
804 		if (m == NULL)
805 			return (EINVAL);
806 	} else {
807 		m = *mutex;
808 		if (m <= THR_MUTEX_DESTROYED)
809 			return (EINVAL);
810 	}
811 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
812 		return (EINVAL);
813 	*prioceiling = m->m_lock.m_ceilings[0];
814 	return (0);
815 }
816 
817 int
818 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
819     int ceiling, int *old_ceiling)
820 {
821 	struct pthread *curthread;
822 	struct pthread_mutex *m, *m1, *m2;
823 	struct mutex_queue *q, *qp;
824 	int ret;
825 
826 	if (*mutex == THR_PSHARED_PTR) {
827 		m = __thr_pshared_offpage(mutex, 0);
828 		if (m == NULL)
829 			return (EINVAL);
830 	} else {
831 		m = *mutex;
832 		if (m <= THR_MUTEX_DESTROYED)
833 			return (EINVAL);
834 	}
835 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
836 		return (EINVAL);
837 
838 	ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
839 	if (ret != 0)
840 		return (ret);
841 
842 	curthread = _get_curthread();
843 	if (m->m_owner == TID(curthread)) {
844 		mutex_assert_is_owned(m);
845 		m1 = TAILQ_PREV(m, mutex_queue, m_qe);
846 		m2 = TAILQ_NEXT(m, m_qe);
847 		if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
848 		    (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
849 			q = &curthread->mq[TMQ_NORM_PP];
850 			qp = &curthread->mq[TMQ_NORM_PP_PRIV];
851 			TAILQ_REMOVE(q, m, m_qe);
852 			if (!is_pshared_mutex(m))
853 				TAILQ_REMOVE(qp, m, m_pqe);
854 			TAILQ_FOREACH(m2, q, m_qe) {
855 				if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
856 					TAILQ_INSERT_BEFORE(m2, m, m_qe);
857 					if (!is_pshared_mutex(m)) {
858 						while (m2 != NULL &&
859 						    is_pshared_mutex(m2)) {
860 							m2 = TAILQ_PREV(m2,
861 							    mutex_queue, m_qe);
862 						}
863 						if (m2 == NULL) {
864 							TAILQ_INSERT_HEAD(qp,
865 							    m, m_pqe);
866 						} else {
867 							TAILQ_INSERT_BEFORE(m2,
868 							    m, m_pqe);
869 						}
870 					}
871 					return (0);
872 				}
873 			}
874 			TAILQ_INSERT_TAIL(q, m, m_qe);
875 			if (!is_pshared_mutex(m))
876 				TAILQ_INSERT_TAIL(qp, m, m_pqe);
877 		}
878 	}
879 	return (0);
880 }
881 
882 int
883 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
884 {
885 	struct pthread_mutex *m;
886 	int ret;
887 
888 	ret = check_and_init_mutex(mutex, &m);
889 	if (ret == 0)
890 		*count = m->m_spinloops;
891 	return (ret);
892 }
893 
894 int
895 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
896 {
897 	struct pthread_mutex *m;
898 	int ret;
899 
900 	ret = check_and_init_mutex(mutex, &m);
901 	if (ret == 0)
902 		m->m_spinloops = count;
903 	return (ret);
904 }
905 
906 int
907 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
908 {
909 	struct pthread_mutex *m;
910 	int ret;
911 
912 	ret = check_and_init_mutex(mutex, &m);
913 	if (ret == 0)
914 		*count = m->m_yieldloops;
915 	return (ret);
916 }
917 
918 int
919 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
920 {
921 	struct pthread_mutex *m;
922 	int ret;
923 
924 	ret = check_and_init_mutex(mutex, &m);
925 	if (ret == 0)
926 		m->m_yieldloops = count;
927 	return (0);
928 }
929 
930 int
931 _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
932 {
933 	struct pthread_mutex	*m;
934 
935 	if (*mutex == THR_PSHARED_PTR) {
936 		m = __thr_pshared_offpage(mutex, 0);
937 		if (m == NULL)
938 			return (0);
939 	} else {
940 		m = *mutex;
941 		if (m <= THR_MUTEX_DESTROYED)
942 			return (0);
943 	}
944 	return (m->m_owner == TID(_get_curthread()));
945 }
946 
947 int
948 _mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp)
949 {
950 	if (__predict_false(mp <= THR_MUTEX_DESTROYED)) {
951 		if (mp == THR_MUTEX_DESTROYED)
952 			return (EINVAL);
953 		return (EPERM);
954 	}
955 	if (mp->m_owner != TID(curthread))
956 		return (EPERM);
957 	return (0);
958 }
959