xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision 07f22a288d60694ac7305c64323e1d53f0f2cc0d)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4  * Copyright (c) 2015 The FreeBSD Foundation
5  *
6  * All rights reserved.
7  *
8  * Portions of this software were developed by Konstantin Belousov
9  * under sponsorship from the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by John Birrell.
22  * 4. Neither the name of the author nor the names of any co-contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * $FreeBSD$
39  */
40 
41 #include "namespace.h"
42 #include <stdlib.h>
43 #include <errno.h>
44 #include <string.h>
45 #include <sys/param.h>
46 #include <sys/queue.h>
47 #include <pthread.h>
48 #include <pthread_np.h>
49 #include "un-namespace.h"
50 
51 #include "thr_private.h"
52 
53 /*
54  * For adaptive mutexes, how many times to spin doing trylock2
55  * before entering the kernel to block
56  */
57 #define MUTEX_ADAPTIVE_SPINS	2000
58 
59 /*
60  * Prototypes
61  */
62 int	__pthread_mutex_init(pthread_mutex_t *mutex,
63 		const pthread_mutexattr_t *mutex_attr);
64 int	__pthread_mutex_trylock(pthread_mutex_t *mutex);
65 int	__pthread_mutex_lock(pthread_mutex_t *mutex);
66 int	__pthread_mutex_timedlock(pthread_mutex_t *mutex,
67 		const struct timespec *abstime);
68 int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
69     		void *(calloc_cb)(size_t, size_t));
70 int	_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
71 int	_pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
72 int	__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
73 int	_pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
74 int	_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
75 int	__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
76 
77 static int	mutex_self_trylock(pthread_mutex_t);
78 static int	mutex_self_lock(pthread_mutex_t,
79 				const struct timespec *abstime);
80 static int	mutex_unlock_common(struct pthread_mutex *, int, int *);
81 static int	mutex_lock_sleep(struct pthread *, pthread_mutex_t,
82 				const struct timespec *);
83 
84 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
85 __strong_reference(__pthread_mutex_init, _pthread_mutex_init);
86 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
87 __strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
88 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
89 __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
90 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
91 __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
92 
93 /* Single underscore versions provided for libc internal usage: */
94 /* No difference between libc and application usage of these: */
95 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
96 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
97 
98 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
99 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
100 
101 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
102 __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
103 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
104 
105 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
106 __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
107 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
108 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
109 
110 static void
111 mutex_init_link(struct pthread_mutex *m)
112 {
113 
114 #if defined(_PTHREADS_INVARIANTS)
115 	m->m_qe.tqe_prev = NULL;
116 	m->m_qe.tqe_next = NULL;
117 	m->m_pqe.tqe_prev = NULL;
118 	m->m_pqe.tqe_next = NULL;
119 #endif
120 }
121 
122 static void
123 mutex_assert_is_owned(struct pthread_mutex *m)
124 {
125 
126 #if defined(_PTHREADS_INVARIANTS)
127 	if (__predict_false(m->m_qe.tqe_prev == NULL)) {
128 		char msg[128];
129 		snprintf(msg, sizeof(msg),
130 		    "mutex %p own %#x %#x is not on list %p %p",
131 		    m, m->m_lock.m_owner, m->m_owner, m->m_qe.tqe_prev,
132 		    m->m_qe.tqe_next);
133 		PANIC(msg);
134 	}
135 #endif
136 }
137 
138 static void
139 mutex_assert_not_owned(struct pthread_mutex *m)
140 {
141 
142 #if defined(_PTHREADS_INVARIANTS)
143 	if (__predict_false(m->m_qe.tqe_prev != NULL ||
144 	    m->m_qe.tqe_next != NULL)) {
145 		char msg[128];
146 		snprintf(msg, sizeof(msg),
147 		    "mutex %p own %#x %#x is on list %p %p",
148 		    m, m->m_lock.m_owner, m->m_owner, m->m_qe.tqe_prev,
149 		    m->m_qe.tqe_next);
150 		PANIC(msg);
151 	}
152 #endif
153 }
154 
155 static int
156 is_pshared_mutex(struct pthread_mutex *m)
157 {
158 
159 	return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0);
160 }
161 
162 static int
163 mutex_check_attr(const struct pthread_mutex_attr *attr)
164 {
165 
166 	if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
167 	    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
168 		return (EINVAL);
169 	if (attr->m_protocol < PTHREAD_PRIO_NONE ||
170 	    attr->m_protocol > PTHREAD_PRIO_PROTECT)
171 		return (EINVAL);
172 	return (0);
173 }
174 
175 static void
176 mutex_init_body(struct pthread_mutex *pmutex,
177     const struct pthread_mutex_attr *attr)
178 {
179 
180 	pmutex->m_flags = attr->m_type;
181 	pmutex->m_owner = 0;
182 	pmutex->m_count = 0;
183 	pmutex->m_spinloops = 0;
184 	pmutex->m_yieldloops = 0;
185 	mutex_init_link(pmutex);
186 	switch (attr->m_protocol) {
187 	case PTHREAD_PRIO_NONE:
188 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
189 		pmutex->m_lock.m_flags = 0;
190 		break;
191 	case PTHREAD_PRIO_INHERIT:
192 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
193 		pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
194 		break;
195 	case PTHREAD_PRIO_PROTECT:
196 		pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
197 		pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
198 		pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
199 		break;
200 	}
201 	if (attr->m_pshared == PTHREAD_PROCESS_SHARED)
202 		pmutex->m_lock.m_flags |= USYNC_PROCESS_SHARED;
203 
204 	if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) {
205 		pmutex->m_spinloops =
206 		    _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
207 		pmutex->m_yieldloops = _thr_yieldloops;
208 	}
209 }
210 
211 static int
212 mutex_init(pthread_mutex_t *mutex,
213     const struct pthread_mutex_attr *mutex_attr,
214     void *(calloc_cb)(size_t, size_t))
215 {
216 	const struct pthread_mutex_attr *attr;
217 	struct pthread_mutex *pmutex;
218 	int error;
219 
220 	if (mutex_attr == NULL) {
221 		attr = &_pthread_mutexattr_default;
222 	} else {
223 		attr = mutex_attr;
224 		error = mutex_check_attr(attr);
225 		if (error != 0)
226 			return (error);
227 	}
228 	if ((pmutex = (pthread_mutex_t)
229 		calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
230 		return (ENOMEM);
231 	mutex_init_body(pmutex, attr);
232 	*mutex = pmutex;
233 	return (0);
234 }
235 
236 static int
237 init_static(struct pthread *thread, pthread_mutex_t *mutex)
238 {
239 	int ret;
240 
241 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
242 
243 	if (*mutex == THR_MUTEX_INITIALIZER)
244 		ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
245 	else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
246 		ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default,
247 		    calloc);
248 	else
249 		ret = 0;
250 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
251 
252 	return (ret);
253 }
254 
255 static void
256 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
257 {
258 	struct pthread_mutex *m2;
259 
260 	m2 = TAILQ_LAST(&curthread->mq[TMQ_NORM_PP], mutex_queue);
261 	if (m2 != NULL)
262 		m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
263 	else
264 		m->m_lock.m_ceilings[1] = -1;
265 }
266 
267 int
268 __pthread_mutex_init(pthread_mutex_t *mutex,
269     const pthread_mutexattr_t *mutex_attr)
270 {
271 	struct pthread_mutex *pmtx;
272 	int ret;
273 
274 	if (mutex_attr != NULL) {
275 		ret = mutex_check_attr(*mutex_attr);
276 		if (ret != 0)
277 			return (ret);
278 	}
279 	if (mutex_attr == NULL ||
280 	    (*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) {
281 		return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL,
282 		   calloc));
283 	}
284 	pmtx = __thr_pshared_offpage(mutex, 1);
285 	if (pmtx == NULL)
286 		return (EFAULT);
287 	*mutex = THR_PSHARED_PTR;
288 	mutex_init_body(pmtx, *mutex_attr);
289 	return (0);
290 }
291 
292 /* This function is used internally by malloc. */
293 int
294 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
295     void *(calloc_cb)(size_t, size_t))
296 {
297 	static const struct pthread_mutex_attr attr = {
298 		.m_type = PTHREAD_MUTEX_NORMAL,
299 		.m_protocol = PTHREAD_PRIO_NONE,
300 		.m_ceiling = 0,
301 		.m_pshared = PTHREAD_PROCESS_PRIVATE,
302 	};
303 	int ret;
304 
305 	ret = mutex_init(mutex, &attr, calloc_cb);
306 	if (ret == 0)
307 		(*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE;
308 	return (ret);
309 }
310 
311 /*
312  * Fix mutex ownership for child process.
313  *
314  * Process private mutex ownership is transmitted from the forking
315  * thread to the child process.
316  *
317  * Process shared mutex should not be inherited because owner is
318  * forking thread which is in parent process, they are removed from
319  * the owned mutex list.
320  */
321 static void
322 queue_fork(struct pthread *curthread, struct mutex_queue *q,
323     struct mutex_queue *qp, uint bit)
324 {
325 	struct pthread_mutex *m;
326 
327 	TAILQ_INIT(q);
328 	TAILQ_FOREACH(m, qp, m_pqe) {
329 		TAILQ_INSERT_TAIL(q, m, m_qe);
330 		m->m_lock.m_owner = TID(curthread) | bit;
331 		m->m_owner = TID(curthread);
332 	}
333 }
334 
335 void
336 _mutex_fork(struct pthread *curthread)
337 {
338 
339 	queue_fork(curthread, &curthread->mq[TMQ_NORM],
340 	    &curthread->mq[TMQ_NORM_PRIV], 0);
341 	queue_fork(curthread, &curthread->mq[TMQ_NORM_PP],
342 	    &curthread->mq[TMQ_NORM_PP_PRIV], UMUTEX_CONTESTED);
343 }
344 
345 int
346 _pthread_mutex_destroy(pthread_mutex_t *mutex)
347 {
348 	pthread_mutex_t m, m1;
349 	int ret;
350 
351 	m = *mutex;
352 	if (m < THR_MUTEX_DESTROYED) {
353 		ret = 0;
354 	} else if (m == THR_MUTEX_DESTROYED) {
355 		ret = EINVAL;
356 	} else {
357 		if (m == THR_PSHARED_PTR) {
358 			m1 = __thr_pshared_offpage(mutex, 0);
359 			if (m1 != NULL) {
360 				mutex_assert_not_owned(m1);
361 				__thr_pshared_destroy(mutex);
362 			}
363 			*mutex = THR_MUTEX_DESTROYED;
364 			return (0);
365 		}
366 		if (m->m_owner != 0) {
367 			ret = EBUSY;
368 		} else {
369 			*mutex = THR_MUTEX_DESTROYED;
370 			mutex_assert_not_owned(m);
371 			free(m);
372 			ret = 0;
373 		}
374 	}
375 
376 	return (ret);
377 }
378 
379 static int
380 mutex_qidx(struct pthread_mutex *m)
381 {
382 
383 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
384 		return (TMQ_NORM);
385 	return (TMQ_NORM_PP);
386 }
387 
388 static void
389 enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m)
390 {
391 	int qidx;
392 
393 	m->m_owner = TID(curthread);
394 	/* Add to the list of owned mutexes: */
395 	mutex_assert_not_owned(m);
396 	qidx = mutex_qidx(m);
397 	TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe);
398 	if (!is_pshared_mutex(m))
399 		TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe);
400 }
401 
402 static void
403 dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m)
404 {
405 	int qidx;
406 
407 	m->m_owner = 0;
408 	mutex_assert_is_owned(m);
409 	qidx = mutex_qidx(m);
410 	TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe);
411 	if (!is_pshared_mutex(m))
412 		TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe);
413 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0)
414 		set_inherited_priority(curthread, m);
415 	mutex_init_link(m);
416 }
417 
418 static int
419 check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m)
420 {
421 	int ret;
422 
423 	*m = *mutex;
424 	ret = 0;
425 	if (*m == THR_PSHARED_PTR) {
426 		*m = __thr_pshared_offpage(mutex, 0);
427 		if (*m == NULL)
428 			ret = EINVAL;
429 	} else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) {
430 		if (*m == THR_MUTEX_DESTROYED) {
431 			ret = EINVAL;
432 		} else {
433 			ret = init_static(_get_curthread(), mutex);
434 			if (ret == 0)
435 				*m = *mutex;
436 		}
437 	}
438 	return (ret);
439 }
440 
441 int
442 __pthread_mutex_trylock(pthread_mutex_t *mutex)
443 {
444 	struct pthread *curthread;
445 	struct pthread_mutex *m;
446 	uint32_t id;
447 	int ret;
448 
449 	ret = check_and_init_mutex(mutex, &m);
450 	if (ret != 0)
451 		return (ret);
452 	curthread = _get_curthread();
453 	id = TID(curthread);
454 	if (m->m_flags & PMUTEX_FLAG_PRIVATE)
455 		THR_CRITICAL_ENTER(curthread);
456 	ret = _thr_umutex_trylock(&m->m_lock, id);
457 	if (__predict_true(ret == 0)) {
458 		enqueue_mutex(curthread, m);
459 	} else if (m->m_owner == id) {
460 		ret = mutex_self_trylock(m);
461 	} /* else {} */
462 	if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE))
463 		THR_CRITICAL_LEAVE(curthread);
464 	return (ret);
465 }
466 
467 static int
468 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
469 	const struct timespec *abstime)
470 {
471 	uint32_t	id, owner;
472 	int	count;
473 	int	ret;
474 
475 	id = TID(curthread);
476 	if (m->m_owner == id)
477 		return (mutex_self_lock(m, abstime));
478 
479 	/*
480 	 * For adaptive mutexes, spin for a bit in the expectation
481 	 * that if the application requests this mutex type then
482 	 * the lock is likely to be released quickly and it is
483 	 * faster than entering the kernel
484 	 */
485 	if (__predict_false(
486 		(m->m_lock.m_flags &
487 		 (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0))
488 			goto sleep_in_kernel;
489 
490 	if (!_thr_is_smp)
491 		goto yield_loop;
492 
493 	count = m->m_spinloops;
494 	while (count--) {
495 		owner = m->m_lock.m_owner;
496 		if ((owner & ~UMUTEX_CONTESTED) == 0) {
497 			if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
498 				ret = 0;
499 				goto done;
500 			}
501 		}
502 		CPU_SPINWAIT;
503 	}
504 
505 yield_loop:
506 	count = m->m_yieldloops;
507 	while (count--) {
508 		_sched_yield();
509 		owner = m->m_lock.m_owner;
510 		if ((owner & ~UMUTEX_CONTESTED) == 0) {
511 			if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
512 				ret = 0;
513 				goto done;
514 			}
515 		}
516 	}
517 
518 sleep_in_kernel:
519 	if (abstime == NULL) {
520 		ret = __thr_umutex_lock(&m->m_lock, id);
521 	} else if (__predict_false(
522 		   abstime->tv_nsec < 0 ||
523 		   abstime->tv_nsec >= 1000000000)) {
524 		ret = EINVAL;
525 	} else {
526 		ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
527 	}
528 done:
529 	if (ret == 0)
530 		enqueue_mutex(curthread, m);
531 
532 	return (ret);
533 }
534 
535 static inline int
536 mutex_lock_common(struct pthread_mutex *m,
537 	const struct timespec *abstime, int cvattach)
538 {
539 	struct pthread *curthread  = _get_curthread();
540 	int ret;
541 
542 	if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
543 		THR_CRITICAL_ENTER(curthread);
544 	if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
545 		enqueue_mutex(curthread, m);
546 		ret = 0;
547 	} else {
548 		ret = mutex_lock_sleep(curthread, m, abstime);
549 	}
550 	if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE) && !cvattach)
551 		THR_CRITICAL_LEAVE(curthread);
552 	return (ret);
553 }
554 
555 int
556 __pthread_mutex_lock(pthread_mutex_t *mutex)
557 {
558 	struct pthread_mutex *m;
559 	int ret;
560 
561 	_thr_check_init();
562 	ret = check_and_init_mutex(mutex, &m);
563 	if (ret == 0)
564 		ret = mutex_lock_common(m, NULL, 0);
565 	return (ret);
566 }
567 
568 int
569 __pthread_mutex_timedlock(pthread_mutex_t *mutex,
570     const struct timespec *abstime)
571 {
572 	struct pthread_mutex *m;
573 	int ret;
574 
575 	_thr_check_init();
576 	ret = check_and_init_mutex(mutex, &m);
577 	if (ret == 0)
578 		ret = mutex_lock_common(m, abstime, 0);
579 	return (ret);
580 }
581 
582 int
583 _pthread_mutex_unlock(pthread_mutex_t *mutex)
584 {
585 	struct pthread_mutex *mp;
586 
587 	if (*mutex == THR_PSHARED_PTR) {
588 		mp = __thr_pshared_offpage(mutex, 0);
589 		if (mp == NULL)
590 			return (EINVAL);
591 	} else {
592 		mp = *mutex;
593 	}
594 	return (mutex_unlock_common(mp, 0, NULL));
595 }
596 
597 int
598 _mutex_cv_lock(struct pthread_mutex *m, int count)
599 {
600 	int	error;
601 
602 	error = mutex_lock_common(m, NULL, 1);
603 	if (error == 0)
604 		m->m_count = count;
605 	return (error);
606 }
607 
608 int
609 _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer)
610 {
611 
612 	/*
613 	 * Clear the count in case this is a recursive mutex.
614 	 */
615 	*count = m->m_count;
616 	m->m_count = 0;
617 	(void)mutex_unlock_common(m, 1, defer);
618         return (0);
619 }
620 
621 int
622 _mutex_cv_attach(struct pthread_mutex *m, int count)
623 {
624 	struct pthread *curthread = _get_curthread();
625 
626 	enqueue_mutex(curthread, m);
627 	m->m_count = count;
628 	return (0);
629 }
630 
631 int
632 _mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
633 {
634 	struct pthread *curthread = _get_curthread();
635 	int     defered;
636 	int     error;
637 
638 	if ((error = _mutex_owned(curthread, mp)) != 0)
639                 return (error);
640 
641 	/*
642 	 * Clear the count in case this is a recursive mutex.
643 	 */
644 	*recurse = mp->m_count;
645 	mp->m_count = 0;
646 	dequeue_mutex(curthread, mp);
647 
648 	/* Will this happen in real-world ? */
649         if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
650 		defered = 1;
651 		mp->m_flags &= ~PMUTEX_FLAG_DEFERED;
652 	} else
653 		defered = 0;
654 
655 	if (defered)  {
656 		_thr_wake_all(curthread->defer_waiters,
657 				curthread->nwaiter_defer);
658 		curthread->nwaiter_defer = 0;
659 	}
660 	return (0);
661 }
662 
663 static int
664 mutex_self_trylock(struct pthread_mutex *m)
665 {
666 	int	ret;
667 
668 	switch (PMUTEX_TYPE(m->m_flags)) {
669 	case PTHREAD_MUTEX_ERRORCHECK:
670 	case PTHREAD_MUTEX_NORMAL:
671 	case PTHREAD_MUTEX_ADAPTIVE_NP:
672 		ret = EBUSY;
673 		break;
674 
675 	case PTHREAD_MUTEX_RECURSIVE:
676 		/* Increment the lock count: */
677 		if (m->m_count + 1 > 0) {
678 			m->m_count++;
679 			ret = 0;
680 		} else
681 			ret = EAGAIN;
682 		break;
683 
684 	default:
685 		/* Trap invalid mutex types; */
686 		ret = EINVAL;
687 	}
688 
689 	return (ret);
690 }
691 
692 static int
693 mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
694 {
695 	struct timespec	ts1, ts2;
696 	int	ret;
697 
698 	switch (PMUTEX_TYPE(m->m_flags)) {
699 	case PTHREAD_MUTEX_ERRORCHECK:
700 	case PTHREAD_MUTEX_ADAPTIVE_NP:
701 		if (abstime) {
702 			if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
703 			    abstime->tv_nsec >= 1000000000) {
704 				ret = EINVAL;
705 			} else {
706 				clock_gettime(CLOCK_REALTIME, &ts1);
707 				TIMESPEC_SUB(&ts2, abstime, &ts1);
708 				__sys_nanosleep(&ts2, NULL);
709 				ret = ETIMEDOUT;
710 			}
711 		} else {
712 			/*
713 			 * POSIX specifies that mutexes should return
714 			 * EDEADLK if a recursive lock is detected.
715 			 */
716 			ret = EDEADLK;
717 		}
718 		break;
719 
720 	case PTHREAD_MUTEX_NORMAL:
721 		/*
722 		 * What SS2 define as a 'normal' mutex.  Intentionally
723 		 * deadlock on attempts to get a lock you already own.
724 		 */
725 		ret = 0;
726 		if (abstime) {
727 			if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
728 			    abstime->tv_nsec >= 1000000000) {
729 				ret = EINVAL;
730 			} else {
731 				clock_gettime(CLOCK_REALTIME, &ts1);
732 				TIMESPEC_SUB(&ts2, abstime, &ts1);
733 				__sys_nanosleep(&ts2, NULL);
734 				ret = ETIMEDOUT;
735 			}
736 		} else {
737 			ts1.tv_sec = 30;
738 			ts1.tv_nsec = 0;
739 			for (;;)
740 				__sys_nanosleep(&ts1, NULL);
741 		}
742 		break;
743 
744 	case PTHREAD_MUTEX_RECURSIVE:
745 		/* Increment the lock count: */
746 		if (m->m_count + 1 > 0) {
747 			m->m_count++;
748 			ret = 0;
749 		} else
750 			ret = EAGAIN;
751 		break;
752 
753 	default:
754 		/* Trap invalid mutex types; */
755 		ret = EINVAL;
756 	}
757 
758 	return (ret);
759 }
760 
761 static int
762 mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
763 {
764 	struct pthread *curthread = _get_curthread();
765 	uint32_t id;
766 	int defered, error;
767 
768 	if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
769 		if (m == THR_MUTEX_DESTROYED)
770 			return (EINVAL);
771 		return (EPERM);
772 	}
773 
774 	id = TID(curthread);
775 
776 	/*
777 	 * Check if the running thread is not the owner of the mutex.
778 	 */
779 	if (__predict_false(m->m_owner != id))
780 		return (EPERM);
781 
782 	error = 0;
783 	if (__predict_false(
784 		PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE &&
785 		m->m_count > 0)) {
786 		m->m_count--;
787 	} else {
788 		if ((m->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
789 			defered = 1;
790 			m->m_flags &= ~PMUTEX_FLAG_DEFERED;
791         	} else
792 			defered = 0;
793 
794 		dequeue_mutex(curthread, m);
795 		error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer);
796 
797 		if (mtx_defer == NULL && defered)  {
798 			_thr_wake_all(curthread->defer_waiters,
799 				curthread->nwaiter_defer);
800 			curthread->nwaiter_defer = 0;
801 		}
802 	}
803 	if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE)
804 		THR_CRITICAL_LEAVE(curthread);
805 	return (error);
806 }
807 
808 int
809 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
810     int *prioceiling)
811 {
812 	struct pthread_mutex *m;
813 
814 	if (*mutex == THR_PSHARED_PTR) {
815 		m = __thr_pshared_offpage(mutex, 0);
816 		if (m == NULL)
817 			return (EINVAL);
818 	} else {
819 		m = *mutex;
820 		if (m <= THR_MUTEX_DESTROYED)
821 			return (EINVAL);
822 	}
823 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
824 		return (EINVAL);
825 	*prioceiling = m->m_lock.m_ceilings[0];
826 	return (0);
827 }
828 
829 int
830 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
831     int ceiling, int *old_ceiling)
832 {
833 	struct pthread *curthread;
834 	struct pthread_mutex *m, *m1, *m2;
835 	struct mutex_queue *q, *qp;
836 	int ret;
837 
838 	if (*mutex == THR_PSHARED_PTR) {
839 		m = __thr_pshared_offpage(mutex, 0);
840 		if (m == NULL)
841 			return (EINVAL);
842 	} else {
843 		m = *mutex;
844 		if (m <= THR_MUTEX_DESTROYED)
845 			return (EINVAL);
846 	}
847 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
848 		return (EINVAL);
849 
850 	ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
851 	if (ret != 0)
852 		return (ret);
853 
854 	curthread = _get_curthread();
855 	if (m->m_owner == TID(curthread)) {
856 		mutex_assert_is_owned(m);
857 		m1 = TAILQ_PREV(m, mutex_queue, m_qe);
858 		m2 = TAILQ_NEXT(m, m_qe);
859 		if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
860 		    (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
861 			q = &curthread->mq[TMQ_NORM_PP];
862 			qp = &curthread->mq[TMQ_NORM_PP_PRIV];
863 			TAILQ_REMOVE(q, m, m_qe);
864 			if (!is_pshared_mutex(m))
865 				TAILQ_REMOVE(qp, m, m_pqe);
866 			TAILQ_FOREACH(m2, q, m_qe) {
867 				if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
868 					TAILQ_INSERT_BEFORE(m2, m, m_qe);
869 					if (!is_pshared_mutex(m)) {
870 						while (m2 != NULL &&
871 						    is_pshared_mutex(m2)) {
872 							m2 = TAILQ_PREV(m2,
873 							    mutex_queue, m_qe);
874 						}
875 						if (m2 == NULL) {
876 							TAILQ_INSERT_HEAD(qp,
877 							    m, m_pqe);
878 						} else {
879 							TAILQ_INSERT_BEFORE(m2,
880 							    m, m_pqe);
881 						}
882 					}
883 					return (0);
884 				}
885 			}
886 			TAILQ_INSERT_TAIL(q, m, m_qe);
887 			if (!is_pshared_mutex(m))
888 				TAILQ_INSERT_TAIL(qp, m, m_pqe);
889 		}
890 	}
891 	return (0);
892 }
893 
894 int
895 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
896 {
897 	struct pthread_mutex *m;
898 	int ret;
899 
900 	ret = check_and_init_mutex(mutex, &m);
901 	if (ret == 0)
902 		*count = m->m_spinloops;
903 	return (ret);
904 }
905 
906 int
907 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
908 {
909 	struct pthread_mutex *m;
910 	int ret;
911 
912 	ret = check_and_init_mutex(mutex, &m);
913 	if (ret == 0)
914 		m->m_spinloops = count;
915 	return (ret);
916 }
917 
918 int
919 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
920 {
921 	struct pthread_mutex *m;
922 	int ret;
923 
924 	ret = check_and_init_mutex(mutex, &m);
925 	if (ret == 0)
926 		*count = m->m_yieldloops;
927 	return (ret);
928 }
929 
930 int
931 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
932 {
933 	struct pthread_mutex *m;
934 	int ret;
935 
936 	ret = check_and_init_mutex(mutex, &m);
937 	if (ret == 0)
938 		m->m_yieldloops = count;
939 	return (0);
940 }
941 
942 int
943 _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
944 {
945 	struct pthread_mutex	*m;
946 
947 	if (*mutex == THR_PSHARED_PTR) {
948 		m = __thr_pshared_offpage(mutex, 0);
949 		if (m == NULL)
950 			return (0);
951 	} else {
952 		m = *mutex;
953 		if (m <= THR_MUTEX_DESTROYED)
954 			return (0);
955 	}
956 	return (m->m_owner == TID(_get_curthread()));
957 }
958 
959 int
960 _mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp)
961 {
962 	if (__predict_false(mp <= THR_MUTEX_DESTROYED)) {
963 		if (mp == THR_MUTEX_DESTROYED)
964 			return (EINVAL);
965 		return (EPERM);
966 	}
967 	if (mp->m_owner != TID(curthread))
968 		return (EPERM);
969 	return (0);
970 }
971