xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision 59c3cb81c1769fdb6c840c971df129b52f4a848d)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4  * Copyright (c) 2015 The FreeBSD Foundation
5  *
6  * All rights reserved.
7  *
8  * Portions of this software were developed by Konstantin Belousov
9  * under sponsorship from the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by John Birrell.
22  * 4. Neither the name of the author nor the names of any co-contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include <stdbool.h>
43 #include "namespace.h"
44 #include <stdlib.h>
45 #include <errno.h>
46 #include <string.h>
47 #include <sys/param.h>
48 #include <sys/queue.h>
49 #include <pthread.h>
50 #include <pthread_np.h>
51 #include "un-namespace.h"
52 
53 #include "thr_private.h"
54 
55 _Static_assert(sizeof(struct pthread_mutex) <= PAGE_SIZE,
56     "pthread_mutex is too large for off-page");
57 
58 /*
59  * For adaptive mutexes, how many times to spin doing trylock2
60  * before entering the kernel to block
61  */
62 #define MUTEX_ADAPTIVE_SPINS	2000
63 
64 /*
65  * Prototypes
66  */
67 int	__pthread_mutex_init(pthread_mutex_t *mutex,
68 		const pthread_mutexattr_t *mutex_attr);
69 int	__pthread_mutex_trylock(pthread_mutex_t *mutex);
70 int	__pthread_mutex_lock(pthread_mutex_t *mutex);
71 int	__pthread_mutex_timedlock(pthread_mutex_t *mutex,
72 		const struct timespec *abstime);
73 int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
74     		void *(calloc_cb)(size_t, size_t));
75 int	_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
76 int	_pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
77 int	__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
78 int	_pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
79 int	_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
80 int	__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
81 
82 static int	mutex_self_trylock(pthread_mutex_t);
83 static int	mutex_self_lock(pthread_mutex_t,
84 				const struct timespec *abstime);
85 static int	mutex_unlock_common(struct pthread_mutex *, int, int *);
86 static int	mutex_lock_sleep(struct pthread *, pthread_mutex_t,
87 				const struct timespec *);
88 
89 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
90 __strong_reference(__pthread_mutex_init, _pthread_mutex_init);
91 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
92 __strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
93 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
94 __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
95 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
96 __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
97 
98 /* Single underscore versions provided for libc internal usage: */
99 /* No difference between libc and application usage of these: */
100 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
101 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
102 
103 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
104 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
105 
106 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
107 __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
108 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
109 
110 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
111 __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
112 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
113 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
114 
115 static void
116 mutex_init_link(struct pthread_mutex *m)
117 {
118 
119 #if defined(_PTHREADS_INVARIANTS)
120 	m->m_qe.tqe_prev = NULL;
121 	m->m_qe.tqe_next = NULL;
122 	m->m_pqe.tqe_prev = NULL;
123 	m->m_pqe.tqe_next = NULL;
124 #endif
125 }
126 
127 static void
128 mutex_assert_is_owned(struct pthread_mutex *m)
129 {
130 
131 #if defined(_PTHREADS_INVARIANTS)
132 	if (__predict_false(m->m_qe.tqe_prev == NULL)) {
133 		char msg[128];
134 		snprintf(msg, sizeof(msg),
135 		    "mutex %p own %#x %#x is not on list %p %p",
136 		    m, m->m_lock.m_owner, m->m_owner, m->m_qe.tqe_prev,
137 		    m->m_qe.tqe_next);
138 		PANIC(msg);
139 	}
140 #endif
141 }
142 
143 static void
144 mutex_assert_not_owned(struct pthread_mutex *m)
145 {
146 
147 #if defined(_PTHREADS_INVARIANTS)
148 	if (__predict_false(m->m_qe.tqe_prev != NULL ||
149 	    m->m_qe.tqe_next != NULL)) {
150 		char msg[128];
151 		snprintf(msg, sizeof(msg),
152 		    "mutex %p own %#x %#x is on list %p %p",
153 		    m, m->m_lock.m_owner, m->m_owner, m->m_qe.tqe_prev,
154 		    m->m_qe.tqe_next);
155 		PANIC(msg);
156 	}
157 #endif
158 }
159 
160 static int
161 is_pshared_mutex(struct pthread_mutex *m)
162 {
163 
164 	return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0);
165 }
166 
167 static int
168 mutex_check_attr(const struct pthread_mutex_attr *attr)
169 {
170 
171 	if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
172 	    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
173 		return (EINVAL);
174 	if (attr->m_protocol < PTHREAD_PRIO_NONE ||
175 	    attr->m_protocol > PTHREAD_PRIO_PROTECT)
176 		return (EINVAL);
177 	return (0);
178 }
179 
180 static void
181 mutex_init_body(struct pthread_mutex *pmutex,
182     const struct pthread_mutex_attr *attr)
183 {
184 
185 	pmutex->m_flags = attr->m_type;
186 	pmutex->m_owner = 0;
187 	pmutex->m_count = 0;
188 	pmutex->m_spinloops = 0;
189 	pmutex->m_yieldloops = 0;
190 	mutex_init_link(pmutex);
191 	switch (attr->m_protocol) {
192 	case PTHREAD_PRIO_NONE:
193 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
194 		pmutex->m_lock.m_flags = 0;
195 		break;
196 	case PTHREAD_PRIO_INHERIT:
197 		pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
198 		pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
199 		break;
200 	case PTHREAD_PRIO_PROTECT:
201 		pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
202 		pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
203 		pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
204 		break;
205 	}
206 	if (attr->m_pshared == PTHREAD_PROCESS_SHARED)
207 		pmutex->m_lock.m_flags |= USYNC_PROCESS_SHARED;
208 
209 	if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) {
210 		pmutex->m_spinloops =
211 		    _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
212 		pmutex->m_yieldloops = _thr_yieldloops;
213 	}
214 }
215 
216 static int
217 mutex_init(pthread_mutex_t *mutex,
218     const struct pthread_mutex_attr *mutex_attr,
219     void *(calloc_cb)(size_t, size_t))
220 {
221 	const struct pthread_mutex_attr *attr;
222 	struct pthread_mutex *pmutex;
223 	int error;
224 
225 	if (mutex_attr == NULL) {
226 		attr = &_pthread_mutexattr_default;
227 	} else {
228 		attr = mutex_attr;
229 		error = mutex_check_attr(attr);
230 		if (error != 0)
231 			return (error);
232 	}
233 	if ((pmutex = (pthread_mutex_t)
234 		calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
235 		return (ENOMEM);
236 	mutex_init_body(pmutex, attr);
237 	*mutex = pmutex;
238 	return (0);
239 }
240 
241 static int
242 init_static(struct pthread *thread, pthread_mutex_t *mutex)
243 {
244 	int ret;
245 
246 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
247 
248 	if (*mutex == THR_MUTEX_INITIALIZER)
249 		ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
250 	else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
251 		ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default,
252 		    calloc);
253 	else
254 		ret = 0;
255 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
256 
257 	return (ret);
258 }
259 
260 static void
261 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
262 {
263 	struct pthread_mutex *m2;
264 
265 	m2 = TAILQ_LAST(&curthread->mq[TMQ_NORM_PP], mutex_queue);
266 	if (m2 != NULL)
267 		m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
268 	else
269 		m->m_lock.m_ceilings[1] = -1;
270 }
271 
272 static void
273 shared_mutex_init(struct pthread_mutex *pmtx, const struct
274     pthread_mutex_attr *mutex_attr)
275 {
276 	static const struct pthread_mutex_attr foobar_mutex_attr = {
277 		.m_type = PTHREAD_MUTEX_DEFAULT,
278 		.m_protocol = PTHREAD_PRIO_NONE,
279 		.m_ceiling = 0,
280 		.m_pshared = PTHREAD_PROCESS_SHARED
281 	};
282 	bool done;
283 
284 	/*
285 	 * Hack to allow multiple pthread_mutex_init() calls on the
286 	 * same process-shared mutex.  We rely on kernel allocating
287 	 * zeroed offpage for the mutex, i.e. the
288 	 * PMUTEX_INITSTAGE_ALLOC value must be zero.
289 	 */
290 	for (done = false; !done;) {
291 		switch (pmtx->m_ps) {
292 		case PMUTEX_INITSTAGE_DONE:
293 			atomic_thread_fence_acq();
294 			done = true;
295 			break;
296 		case PMUTEX_INITSTAGE_ALLOC:
297 			if (atomic_cmpset_int(&pmtx->m_ps,
298 			    PMUTEX_INITSTAGE_ALLOC, PMUTEX_INITSTAGE_BUSY)) {
299 				if (mutex_attr == NULL)
300 					mutex_attr = &foobar_mutex_attr;
301 				mutex_init_body(pmtx, mutex_attr);
302 				atomic_store_rel_int(&pmtx->m_ps,
303 				    PMUTEX_INITSTAGE_DONE);
304 				done = true;
305 			}
306 			break;
307 		case PMUTEX_INITSTAGE_BUSY:
308 			_pthread_yield();
309 			break;
310 		default:
311 			PANIC("corrupted offpage");
312 			break;
313 		}
314 	}
315 }
316 
317 int
318 __pthread_mutex_init(pthread_mutex_t *mutex,
319     const pthread_mutexattr_t *mutex_attr)
320 {
321 	struct pthread_mutex *pmtx;
322 	int ret;
323 
324 	if (mutex_attr != NULL) {
325 		ret = mutex_check_attr(*mutex_attr);
326 		if (ret != 0)
327 			return (ret);
328 	}
329 	if (mutex_attr == NULL ||
330 	    (*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) {
331 		return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL,
332 		   calloc));
333 	}
334 	pmtx = __thr_pshared_offpage(mutex, 1);
335 	if (pmtx == NULL)
336 		return (EFAULT);
337 	*mutex = THR_PSHARED_PTR;
338 	shared_mutex_init(pmtx, *mutex_attr);
339 	return (0);
340 }
341 
342 /* This function is used internally by malloc. */
343 int
344 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
345     void *(calloc_cb)(size_t, size_t))
346 {
347 	static const struct pthread_mutex_attr attr = {
348 		.m_type = PTHREAD_MUTEX_NORMAL,
349 		.m_protocol = PTHREAD_PRIO_NONE,
350 		.m_ceiling = 0,
351 		.m_pshared = PTHREAD_PROCESS_PRIVATE,
352 	};
353 	int ret;
354 
355 	ret = mutex_init(mutex, &attr, calloc_cb);
356 	if (ret == 0)
357 		(*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE;
358 	return (ret);
359 }
360 
361 /*
362  * Fix mutex ownership for child process.
363  *
364  * Process private mutex ownership is transmitted from the forking
365  * thread to the child process.
366  *
367  * Process shared mutex should not be inherited because owner is
368  * forking thread which is in parent process, they are removed from
369  * the owned mutex list.
370  */
371 static void
372 queue_fork(struct pthread *curthread, struct mutex_queue *q,
373     struct mutex_queue *qp, uint bit)
374 {
375 	struct pthread_mutex *m;
376 
377 	TAILQ_INIT(q);
378 	TAILQ_FOREACH(m, qp, m_pqe) {
379 		TAILQ_INSERT_TAIL(q, m, m_qe);
380 		m->m_lock.m_owner = TID(curthread) | bit;
381 		m->m_owner = TID(curthread);
382 	}
383 }
384 
385 void
386 _mutex_fork(struct pthread *curthread)
387 {
388 
389 	queue_fork(curthread, &curthread->mq[TMQ_NORM],
390 	    &curthread->mq[TMQ_NORM_PRIV], 0);
391 	queue_fork(curthread, &curthread->mq[TMQ_NORM_PP],
392 	    &curthread->mq[TMQ_NORM_PP_PRIV], UMUTEX_CONTESTED);
393 }
394 
395 int
396 _pthread_mutex_destroy(pthread_mutex_t *mutex)
397 {
398 	pthread_mutex_t m, m1;
399 	int ret;
400 
401 	m = *mutex;
402 	if (m < THR_MUTEX_DESTROYED) {
403 		ret = 0;
404 	} else if (m == THR_MUTEX_DESTROYED) {
405 		ret = EINVAL;
406 	} else {
407 		if (m == THR_PSHARED_PTR) {
408 			m1 = __thr_pshared_offpage(mutex, 0);
409 			if (m1 != NULL) {
410 				mutex_assert_not_owned(m1);
411 				__thr_pshared_destroy(mutex);
412 			}
413 			*mutex = THR_MUTEX_DESTROYED;
414 			return (0);
415 		}
416 		if (m->m_owner != 0) {
417 			ret = EBUSY;
418 		} else {
419 			*mutex = THR_MUTEX_DESTROYED;
420 			mutex_assert_not_owned(m);
421 			free(m);
422 			ret = 0;
423 		}
424 	}
425 
426 	return (ret);
427 }
428 
429 static int
430 mutex_qidx(struct pthread_mutex *m)
431 {
432 
433 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
434 		return (TMQ_NORM);
435 	return (TMQ_NORM_PP);
436 }
437 
438 static void
439 enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m)
440 {
441 	int qidx;
442 
443 	m->m_owner = TID(curthread);
444 	/* Add to the list of owned mutexes: */
445 	mutex_assert_not_owned(m);
446 	qidx = mutex_qidx(m);
447 	TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe);
448 	if (!is_pshared_mutex(m))
449 		TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe);
450 }
451 
452 static void
453 dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m)
454 {
455 	int qidx;
456 
457 	m->m_owner = 0;
458 	mutex_assert_is_owned(m);
459 	qidx = mutex_qidx(m);
460 	TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe);
461 	if (!is_pshared_mutex(m))
462 		TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe);
463 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0)
464 		set_inherited_priority(curthread, m);
465 	mutex_init_link(m);
466 }
467 
468 static int
469 check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m)
470 {
471 	int ret;
472 
473 	*m = *mutex;
474 	ret = 0;
475 	if (*m == THR_PSHARED_PTR) {
476 		*m = __thr_pshared_offpage(mutex, 0);
477 		if (*m == NULL)
478 			ret = EINVAL;
479 		else
480 			shared_mutex_init(*m, NULL);
481 	} else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) {
482 		if (*m == THR_MUTEX_DESTROYED) {
483 			ret = EINVAL;
484 		} else {
485 			ret = init_static(_get_curthread(), mutex);
486 			if (ret == 0)
487 				*m = *mutex;
488 		}
489 	}
490 	return (ret);
491 }
492 
493 int
494 __pthread_mutex_trylock(pthread_mutex_t *mutex)
495 {
496 	struct pthread *curthread;
497 	struct pthread_mutex *m;
498 	uint32_t id;
499 	int ret;
500 
501 	ret = check_and_init_mutex(mutex, &m);
502 	if (ret != 0)
503 		return (ret);
504 	curthread = _get_curthread();
505 	id = TID(curthread);
506 	if (m->m_flags & PMUTEX_FLAG_PRIVATE)
507 		THR_CRITICAL_ENTER(curthread);
508 	ret = _thr_umutex_trylock(&m->m_lock, id);
509 	if (__predict_true(ret == 0)) {
510 		enqueue_mutex(curthread, m);
511 	} else if (m->m_owner == id) {
512 		ret = mutex_self_trylock(m);
513 	} /* else {} */
514 	if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE))
515 		THR_CRITICAL_LEAVE(curthread);
516 	return (ret);
517 }
518 
519 static int
520 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
521 	const struct timespec *abstime)
522 {
523 	uint32_t	id, owner;
524 	int	count;
525 	int	ret;
526 
527 	id = TID(curthread);
528 	if (m->m_owner == id)
529 		return (mutex_self_lock(m, abstime));
530 
531 	/*
532 	 * For adaptive mutexes, spin for a bit in the expectation
533 	 * that if the application requests this mutex type then
534 	 * the lock is likely to be released quickly and it is
535 	 * faster than entering the kernel
536 	 */
537 	if (__predict_false(
538 		(m->m_lock.m_flags &
539 		 (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0))
540 			goto sleep_in_kernel;
541 
542 	if (!_thr_is_smp)
543 		goto yield_loop;
544 
545 	count = m->m_spinloops;
546 	while (count--) {
547 		owner = m->m_lock.m_owner;
548 		if ((owner & ~UMUTEX_CONTESTED) == 0) {
549 			if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
550 				ret = 0;
551 				goto done;
552 			}
553 		}
554 		CPU_SPINWAIT;
555 	}
556 
557 yield_loop:
558 	count = m->m_yieldloops;
559 	while (count--) {
560 		_sched_yield();
561 		owner = m->m_lock.m_owner;
562 		if ((owner & ~UMUTEX_CONTESTED) == 0) {
563 			if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
564 				ret = 0;
565 				goto done;
566 			}
567 		}
568 	}
569 
570 sleep_in_kernel:
571 	if (abstime == NULL) {
572 		ret = __thr_umutex_lock(&m->m_lock, id);
573 	} else if (__predict_false(
574 		   abstime->tv_nsec < 0 ||
575 		   abstime->tv_nsec >= 1000000000)) {
576 		ret = EINVAL;
577 	} else {
578 		ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
579 	}
580 done:
581 	if (ret == 0)
582 		enqueue_mutex(curthread, m);
583 
584 	return (ret);
585 }
586 
587 static inline int
588 mutex_lock_common(struct pthread_mutex *m,
589 	const struct timespec *abstime, int cvattach)
590 {
591 	struct pthread *curthread  = _get_curthread();
592 	int ret;
593 
594 	if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
595 		THR_CRITICAL_ENTER(curthread);
596 	if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
597 		enqueue_mutex(curthread, m);
598 		ret = 0;
599 	} else {
600 		ret = mutex_lock_sleep(curthread, m, abstime);
601 	}
602 	if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE) && !cvattach)
603 		THR_CRITICAL_LEAVE(curthread);
604 	return (ret);
605 }
606 
607 int
608 __pthread_mutex_lock(pthread_mutex_t *mutex)
609 {
610 	struct pthread_mutex *m;
611 	int ret;
612 
613 	_thr_check_init();
614 	ret = check_and_init_mutex(mutex, &m);
615 	if (ret == 0)
616 		ret = mutex_lock_common(m, NULL, 0);
617 	return (ret);
618 }
619 
620 int
621 __pthread_mutex_timedlock(pthread_mutex_t *mutex,
622     const struct timespec *abstime)
623 {
624 	struct pthread_mutex *m;
625 	int ret;
626 
627 	_thr_check_init();
628 	ret = check_and_init_mutex(mutex, &m);
629 	if (ret == 0)
630 		ret = mutex_lock_common(m, abstime, 0);
631 	return (ret);
632 }
633 
634 int
635 _pthread_mutex_unlock(pthread_mutex_t *mutex)
636 {
637 	struct pthread_mutex *mp;
638 
639 	if (*mutex == THR_PSHARED_PTR) {
640 		mp = __thr_pshared_offpage(mutex, 0);
641 		if (mp == NULL)
642 			return (EINVAL);
643 		shared_mutex_init(mp, NULL);
644 	} else {
645 		mp = *mutex;
646 	}
647 	return (mutex_unlock_common(mp, 0, NULL));
648 }
649 
650 int
651 _mutex_cv_lock(struct pthread_mutex *m, int count)
652 {
653 	int	error;
654 
655 	error = mutex_lock_common(m, NULL, 1);
656 	if (error == 0)
657 		m->m_count = count;
658 	return (error);
659 }
660 
661 int
662 _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer)
663 {
664 
665 	/*
666 	 * Clear the count in case this is a recursive mutex.
667 	 */
668 	*count = m->m_count;
669 	m->m_count = 0;
670 	(void)mutex_unlock_common(m, 1, defer);
671         return (0);
672 }
673 
674 int
675 _mutex_cv_attach(struct pthread_mutex *m, int count)
676 {
677 	struct pthread *curthread = _get_curthread();
678 
679 	enqueue_mutex(curthread, m);
680 	m->m_count = count;
681 	return (0);
682 }
683 
684 int
685 _mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
686 {
687 	struct pthread *curthread = _get_curthread();
688 	int     defered;
689 	int     error;
690 
691 	if ((error = _mutex_owned(curthread, mp)) != 0)
692                 return (error);
693 
694 	/*
695 	 * Clear the count in case this is a recursive mutex.
696 	 */
697 	*recurse = mp->m_count;
698 	mp->m_count = 0;
699 	dequeue_mutex(curthread, mp);
700 
701 	/* Will this happen in real-world ? */
702         if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
703 		defered = 1;
704 		mp->m_flags &= ~PMUTEX_FLAG_DEFERED;
705 	} else
706 		defered = 0;
707 
708 	if (defered)  {
709 		_thr_wake_all(curthread->defer_waiters,
710 				curthread->nwaiter_defer);
711 		curthread->nwaiter_defer = 0;
712 	}
713 	return (0);
714 }
715 
716 static int
717 mutex_self_trylock(struct pthread_mutex *m)
718 {
719 	int	ret;
720 
721 	switch (PMUTEX_TYPE(m->m_flags)) {
722 	case PTHREAD_MUTEX_ERRORCHECK:
723 	case PTHREAD_MUTEX_NORMAL:
724 	case PTHREAD_MUTEX_ADAPTIVE_NP:
725 		ret = EBUSY;
726 		break;
727 
728 	case PTHREAD_MUTEX_RECURSIVE:
729 		/* Increment the lock count: */
730 		if (m->m_count + 1 > 0) {
731 			m->m_count++;
732 			ret = 0;
733 		} else
734 			ret = EAGAIN;
735 		break;
736 
737 	default:
738 		/* Trap invalid mutex types; */
739 		ret = EINVAL;
740 	}
741 
742 	return (ret);
743 }
744 
745 static int
746 mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
747 {
748 	struct timespec	ts1, ts2;
749 	int	ret;
750 
751 	switch (PMUTEX_TYPE(m->m_flags)) {
752 	case PTHREAD_MUTEX_ERRORCHECK:
753 	case PTHREAD_MUTEX_ADAPTIVE_NP:
754 		if (abstime) {
755 			if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
756 			    abstime->tv_nsec >= 1000000000) {
757 				ret = EINVAL;
758 			} else {
759 				clock_gettime(CLOCK_REALTIME, &ts1);
760 				TIMESPEC_SUB(&ts2, abstime, &ts1);
761 				__sys_nanosleep(&ts2, NULL);
762 				ret = ETIMEDOUT;
763 			}
764 		} else {
765 			/*
766 			 * POSIX specifies that mutexes should return
767 			 * EDEADLK if a recursive lock is detected.
768 			 */
769 			ret = EDEADLK;
770 		}
771 		break;
772 
773 	case PTHREAD_MUTEX_NORMAL:
774 		/*
775 		 * What SS2 define as a 'normal' mutex.  Intentionally
776 		 * deadlock on attempts to get a lock you already own.
777 		 */
778 		ret = 0;
779 		if (abstime) {
780 			if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
781 			    abstime->tv_nsec >= 1000000000) {
782 				ret = EINVAL;
783 			} else {
784 				clock_gettime(CLOCK_REALTIME, &ts1);
785 				TIMESPEC_SUB(&ts2, abstime, &ts1);
786 				__sys_nanosleep(&ts2, NULL);
787 				ret = ETIMEDOUT;
788 			}
789 		} else {
790 			ts1.tv_sec = 30;
791 			ts1.tv_nsec = 0;
792 			for (;;)
793 				__sys_nanosleep(&ts1, NULL);
794 		}
795 		break;
796 
797 	case PTHREAD_MUTEX_RECURSIVE:
798 		/* Increment the lock count: */
799 		if (m->m_count + 1 > 0) {
800 			m->m_count++;
801 			ret = 0;
802 		} else
803 			ret = EAGAIN;
804 		break;
805 
806 	default:
807 		/* Trap invalid mutex types; */
808 		ret = EINVAL;
809 	}
810 
811 	return (ret);
812 }
813 
814 static int
815 mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
816 {
817 	struct pthread *curthread = _get_curthread();
818 	uint32_t id;
819 	int defered, error;
820 
821 	if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
822 		if (m == THR_MUTEX_DESTROYED)
823 			return (EINVAL);
824 		return (EPERM);
825 	}
826 
827 	id = TID(curthread);
828 
829 	/*
830 	 * Check if the running thread is not the owner of the mutex.
831 	 */
832 	if (__predict_false(m->m_owner != id))
833 		return (EPERM);
834 
835 	error = 0;
836 	if (__predict_false(
837 		PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE &&
838 		m->m_count > 0)) {
839 		m->m_count--;
840 	} else {
841 		if ((m->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
842 			defered = 1;
843 			m->m_flags &= ~PMUTEX_FLAG_DEFERED;
844         	} else
845 			defered = 0;
846 
847 		dequeue_mutex(curthread, m);
848 		error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer);
849 
850 		if (mtx_defer == NULL && defered)  {
851 			_thr_wake_all(curthread->defer_waiters,
852 				curthread->nwaiter_defer);
853 			curthread->nwaiter_defer = 0;
854 		}
855 	}
856 	if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE)
857 		THR_CRITICAL_LEAVE(curthread);
858 	return (error);
859 }
860 
861 int
862 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
863     int *prioceiling)
864 {
865 	struct pthread_mutex *m;
866 
867 	if (*mutex == THR_PSHARED_PTR) {
868 		m = __thr_pshared_offpage(mutex, 0);
869 		if (m == NULL)
870 			return (EINVAL);
871 		shared_mutex_init(m, NULL);
872 	} else {
873 		m = *mutex;
874 		if (m <= THR_MUTEX_DESTROYED)
875 			return (EINVAL);
876 	}
877 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
878 		return (EINVAL);
879 	*prioceiling = m->m_lock.m_ceilings[0];
880 	return (0);
881 }
882 
883 int
884 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
885     int ceiling, int *old_ceiling)
886 {
887 	struct pthread *curthread;
888 	struct pthread_mutex *m, *m1, *m2;
889 	struct mutex_queue *q, *qp;
890 	int ret;
891 
892 	if (*mutex == THR_PSHARED_PTR) {
893 		m = __thr_pshared_offpage(mutex, 0);
894 		if (m == NULL)
895 			return (EINVAL);
896 		shared_mutex_init(m, NULL);
897 	} else {
898 		m = *mutex;
899 		if (m <= THR_MUTEX_DESTROYED)
900 			return (EINVAL);
901 	}
902 	if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
903 		return (EINVAL);
904 
905 	ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
906 	if (ret != 0)
907 		return (ret);
908 
909 	curthread = _get_curthread();
910 	if (m->m_owner == TID(curthread)) {
911 		mutex_assert_is_owned(m);
912 		m1 = TAILQ_PREV(m, mutex_queue, m_qe);
913 		m2 = TAILQ_NEXT(m, m_qe);
914 		if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
915 		    (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
916 			q = &curthread->mq[TMQ_NORM_PP];
917 			qp = &curthread->mq[TMQ_NORM_PP_PRIV];
918 			TAILQ_REMOVE(q, m, m_qe);
919 			if (!is_pshared_mutex(m))
920 				TAILQ_REMOVE(qp, m, m_pqe);
921 			TAILQ_FOREACH(m2, q, m_qe) {
922 				if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
923 					TAILQ_INSERT_BEFORE(m2, m, m_qe);
924 					if (!is_pshared_mutex(m)) {
925 						while (m2 != NULL &&
926 						    is_pshared_mutex(m2)) {
927 							m2 = TAILQ_PREV(m2,
928 							    mutex_queue, m_qe);
929 						}
930 						if (m2 == NULL) {
931 							TAILQ_INSERT_HEAD(qp,
932 							    m, m_pqe);
933 						} else {
934 							TAILQ_INSERT_BEFORE(m2,
935 							    m, m_pqe);
936 						}
937 					}
938 					return (0);
939 				}
940 			}
941 			TAILQ_INSERT_TAIL(q, m, m_qe);
942 			if (!is_pshared_mutex(m))
943 				TAILQ_INSERT_TAIL(qp, m, m_pqe);
944 		}
945 	}
946 	return (0);
947 }
948 
949 int
950 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
951 {
952 	struct pthread_mutex *m;
953 	int ret;
954 
955 	ret = check_and_init_mutex(mutex, &m);
956 	if (ret == 0)
957 		*count = m->m_spinloops;
958 	return (ret);
959 }
960 
961 int
962 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
963 {
964 	struct pthread_mutex *m;
965 	int ret;
966 
967 	ret = check_and_init_mutex(mutex, &m);
968 	if (ret == 0)
969 		m->m_spinloops = count;
970 	return (ret);
971 }
972 
973 int
974 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
975 {
976 	struct pthread_mutex *m;
977 	int ret;
978 
979 	ret = check_and_init_mutex(mutex, &m);
980 	if (ret == 0)
981 		*count = m->m_yieldloops;
982 	return (ret);
983 }
984 
985 int
986 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
987 {
988 	struct pthread_mutex *m;
989 	int ret;
990 
991 	ret = check_and_init_mutex(mutex, &m);
992 	if (ret == 0)
993 		m->m_yieldloops = count;
994 	return (0);
995 }
996 
997 int
998 _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
999 {
1000 	struct pthread_mutex *m;
1001 
1002 	if (*mutex == THR_PSHARED_PTR) {
1003 		m = __thr_pshared_offpage(mutex, 0);
1004 		if (m == NULL)
1005 			return (0);
1006 		shared_mutex_init(m, NULL);
1007 	} else {
1008 		m = *mutex;
1009 		if (m <= THR_MUTEX_DESTROYED)
1010 			return (0);
1011 	}
1012 	return (m->m_owner == TID(_get_curthread()));
1013 }
1014 
1015 int
1016 _mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp)
1017 {
1018 	if (__predict_false(mp <= THR_MUTEX_DESTROYED)) {
1019 		if (mp == THR_MUTEX_DESTROYED)
1020 			return (EINVAL);
1021 		return (EPERM);
1022 	}
1023 	if (mp->m_owner != TID(curthread))
1024 		return (EPERM);
1025 	return (0);
1026 }
1027