xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision ff4b8cb7bd488e9f1e00bf9ed08fa4b377834961)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by John Birrell.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * $FreeBSD$
34  */
35 
36 #include "namespace.h"
37 #include <stdlib.h>
38 #include <errno.h>
39 #include <string.h>
40 #include <sys/param.h>
41 #include <sys/queue.h>
42 #include <pthread.h>
43 #include "un-namespace.h"
44 
45 #include "thr_private.h"
46 
47 #if defined(_PTHREADS_INVARIANTS)
48 #define MUTEX_INIT_LINK(m) 		do {		\
49 	(m)->m_qe.tqe_prev = NULL;			\
50 	(m)->m_qe.tqe_next = NULL;			\
51 } while (0)
52 #define MUTEX_ASSERT_IS_OWNED(m)	do {		\
53 	if ((m)->m_qe.tqe_prev == NULL)			\
54 		PANIC("mutex is not on list");		\
55 } while (0)
56 #define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
57 	if (((m)->m_qe.tqe_prev != NULL) ||		\
58 	    ((m)->m_qe.tqe_next != NULL))		\
59 		PANIC("mutex is on list");		\
60 } while (0)
61 #else
62 #define MUTEX_INIT_LINK(m)
63 #define MUTEX_ASSERT_IS_OWNED(m)
64 #define MUTEX_ASSERT_NOT_OWNED(m)
65 #endif
66 
67 /*
68  * Prototypes
69  */
70 int	__pthread_mutex_init(pthread_mutex_t *mutex,
71 		const pthread_mutexattr_t *mutex_attr);
72 int	__pthread_mutex_trylock(pthread_mutex_t *mutex);
73 int	__pthread_mutex_lock(pthread_mutex_t *mutex);
74 int	__pthread_mutex_timedlock(pthread_mutex_t *mutex,
75 		const struct timespec *abstime);
76 
77 static int	mutex_self_trylock(pthread_mutex_t);
78 static int	mutex_self_lock(pthread_mutex_t,
79 				const struct timespec *abstime);
80 static int	mutex_unlock_common(pthread_mutex_t *);
81 
82 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
83 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
84 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
85 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
86 
87 /* Single underscore versions provided for libc internal usage: */
88 /* No difference between libc and application usage of these: */
89 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
90 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
91 
92 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
93 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
94 
95 static int
96 mutex_init(pthread_mutex_t *mutex,
97     const pthread_mutexattr_t *mutex_attr, int private)
98 {
99 	const struct pthread_mutex_attr *attr;
100 	struct pthread_mutex *pmutex;
101 
102 	if (mutex_attr == NULL) {
103 		attr = &_pthread_mutexattr_default;
104 	} else {
105 		attr = *mutex_attr;
106 		if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
107 		    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
108 			return (EINVAL);
109 		if (attr->m_protocol < PTHREAD_PRIO_NONE ||
110 		    attr->m_protocol > PTHREAD_PRIO_PROTECT)
111 			return (EINVAL);
112 	}
113 
114 	if ((pmutex = (pthread_mutex_t)
115 		malloc(sizeof(struct pthread_mutex))) == NULL)
116 		return (ENOMEM);
117 
118 	_thr_umtx_init(&pmutex->m_lock);
119 	pmutex->m_type = attr->m_type;
120 	pmutex->m_protocol = attr->m_protocol;
121 	TAILQ_INIT(&pmutex->m_queue);
122 	pmutex->m_owner = NULL;
123 	pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED;
124 	if (private)
125 		pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
126 	pmutex->m_count = 0;
127 	pmutex->m_refcount = 0;
128 	if (attr->m_protocol == PTHREAD_PRIO_PROTECT)
129 		pmutex->m_prio = attr->m_ceiling;
130 	else
131 		pmutex->m_prio = -1;
132 	pmutex->m_saved_prio = 0;
133 	MUTEX_INIT_LINK(pmutex);
134 	*mutex = pmutex;
135 	return (0);
136 }
137 
138 static int
139 init_static(struct pthread *thread, pthread_mutex_t *mutex)
140 {
141 	int ret;
142 
143 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
144 
145 	if (*mutex == NULL)
146 		ret = mutex_init(mutex, NULL, 0);
147 	else
148 		ret = 0;
149 
150 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
151 
152 	return (ret);
153 }
154 
155 static int
156 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
157 {
158 	int ret;
159 
160 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
161 
162 	if (*mutex == NULL)
163 		ret = mutex_init(mutex, NULL, 1);
164 	else
165 		ret = 0;
166 
167 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
168 
169 	return (ret);
170 }
171 
172 int
173 _pthread_mutex_init(pthread_mutex_t *mutex,
174     const pthread_mutexattr_t *mutex_attr)
175 {
176 	return mutex_init(mutex, mutex_attr, 1);
177 }
178 
179 int
180 __pthread_mutex_init(pthread_mutex_t *mutex,
181     const pthread_mutexattr_t *mutex_attr)
182 {
183 	return mutex_init(mutex, mutex_attr, 0);
184 }
185 
186 int
187 _mutex_reinit(pthread_mutex_t *mutex)
188 {
189 	_thr_umtx_init(&(*mutex)->m_lock);
190 	TAILQ_INIT(&(*mutex)->m_queue);
191 	MUTEX_INIT_LINK(*mutex);
192 	(*mutex)->m_owner = NULL;
193 	(*mutex)->m_count = 0;
194 	(*mutex)->m_refcount = 0;
195 	(*mutex)->m_prio = 0;
196 	(*mutex)->m_saved_prio = 0;
197 	return (0);
198 }
199 
200 void
201 _mutex_fork(struct pthread *curthread)
202 {
203 	struct pthread_mutex *m;
204 
205 	/*
206 	 * Fix mutex ownership for child process.
207 	 * note that process shared mutex should not
208 	 * be inherited because owner is forking thread
209 	 * which is in parent process, they should be
210 	 * removed from the owned mutex list, current,
211 	 * process shared mutex is not supported, so I
212 	 * am not worried.
213 	 */
214 	TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
215 		m->m_lock = (umtx_t)curthread->tid;
216 }
217 
218 int
219 _pthread_mutex_destroy(pthread_mutex_t *mutex)
220 {
221 	struct pthread *curthread = _get_curthread();
222 	pthread_mutex_t m;
223 	int ret = 0;
224 
225 	if (__predict_false(*mutex == NULL))
226 		ret = EINVAL;
227 	else {
228 		/*
229 		 * Try to lock the mutex structure, we only need to
230 		 * try once, if failed, the mutex is in used.
231 		 */
232 		ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
233 		if (ret)
234 			return (ret);
235 
236 		/*
237 		 * Check mutex other fields to see if this mutex is
238 		 * in use. Mostly for prority mutex types, or there
239 		 * are condition variables referencing it.
240 		 */
241 		if (((*mutex)->m_owner != NULL) ||
242 		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
243 		    ((*mutex)->m_refcount != 0)) {
244 			THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock);
245 			ret = EBUSY;
246 		} else {
247 			/*
248 			 * Save a pointer to the mutex so it can be free'd
249 			 * and set the caller's pointer to NULL.
250 			 */
251 			m = *mutex;
252 			*mutex = NULL;
253 
254 			THR_UMTX_UNLOCK(curthread, &m->m_lock);
255 
256 			MUTEX_ASSERT_NOT_OWNED(m);
257 			free(m);
258 		}
259 	}
260 
261 	return (ret);
262 }
263 
264 static int
265 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
266 {
267 	struct pthread_mutex *m;
268 	int ret;
269 
270 	m = *mutex;
271 	ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock);
272 	if (ret == 0) {
273 		m->m_owner = curthread;
274 		/* Add to the list of owned mutexes. */
275 		MUTEX_ASSERT_NOT_OWNED(m);
276 		TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
277 	} else if (m->m_owner == curthread) {
278 		ret = mutex_self_trylock(m);
279 	} /* else {} */
280 
281 	return (ret);
282 }
283 
284 int
285 __pthread_mutex_trylock(pthread_mutex_t *mutex)
286 {
287 	struct pthread *curthread = _get_curthread();
288 	int ret;
289 
290 	/*
291 	 * If the mutex is statically initialized, perform the dynamic
292 	 * initialization:
293 	 */
294 	if (__predict_false(*mutex == NULL)) {
295 		ret = init_static(curthread, mutex);
296 		if (__predict_false(ret))
297 			return (ret);
298 	}
299 	return (mutex_trylock_common(curthread, mutex));
300 }
301 
302 int
303 _pthread_mutex_trylock(pthread_mutex_t *mutex)
304 {
305 	struct pthread	*curthread = _get_curthread();
306 	int	ret;
307 
308 	/*
309 	 * If the mutex is statically initialized, perform the dynamic
310 	 * initialization marking the mutex private (delete safe):
311 	 */
312 	if (__predict_false(*mutex == NULL)) {
313 		ret = init_static_private(curthread, mutex);
314 		if (__predict_false(ret))
315 			return (ret);
316 	}
317 	return (mutex_trylock_common(curthread, mutex));
318 }
319 
320 static int
321 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
322 	const struct timespec * abstime)
323 {
324 	struct  timespec ts, ts2;
325 	struct	pthread_mutex *m;
326 	int	ret;
327 
328 	m = *mutex;
329 	ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock);
330 	if (ret == 0) {
331 		m->m_owner = curthread;
332 		/* Add to the list of owned mutexes: */
333 		MUTEX_ASSERT_NOT_OWNED(m);
334 		TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
335 	} else if (m->m_owner == curthread) {
336 		ret = mutex_self_lock(m, abstime);
337 	} else {
338 		if (abstime == NULL) {
339 			THR_UMTX_LOCK(curthread, &m->m_lock);
340 			ret = 0;
341 		} else if (__predict_false(
342 			   abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
343 			   abstime->tv_nsec >= 1000000000)) {
344 			ret = EINVAL;
345 		} else {
346 			clock_gettime(CLOCK_REALTIME, &ts);
347 			TIMESPEC_SUB(&ts2, abstime, &ts);
348 			ret = THR_UMTX_TIMEDLOCK(curthread, &m->m_lock, &ts2);
349 			/*
350 			 * Timed out wait is not restarted if
351 			 * it was interrupted, not worth to do it.
352 			 */
353 			if (ret == EINTR)
354 				ret = ETIMEDOUT;
355 		}
356 		if (ret == 0) {
357 			m->m_owner = curthread;
358 			/* Add to the list of owned mutexes: */
359 			MUTEX_ASSERT_NOT_OWNED(m);
360 			TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
361 		}
362 	}
363 	return (ret);
364 }
365 
366 int
367 __pthread_mutex_lock(pthread_mutex_t *m)
368 {
369 	struct pthread *curthread;
370 	int	ret;
371 
372 	_thr_check_init();
373 
374 	curthread = _get_curthread();
375 
376 	/*
377 	 * If the mutex is statically initialized, perform the dynamic
378 	 * initialization:
379 	 */
380 	if (__predict_false(*m == NULL)) {
381 		ret = init_static(curthread, m);
382 		if (__predict_false(ret))
383 			return (ret);
384 	}
385 	return (mutex_lock_common(curthread, m, NULL));
386 }
387 
388 int
389 _pthread_mutex_lock(pthread_mutex_t *m)
390 {
391 	struct pthread *curthread;
392 	int	ret;
393 
394 	_thr_check_init();
395 
396 	curthread = _get_curthread();
397 
398 	/*
399 	 * If the mutex is statically initialized, perform the dynamic
400 	 * initialization marking it private (delete safe):
401 	 */
402 	if (__predict_false(*m == NULL)) {
403 		ret = init_static_private(curthread, m);
404 		if (__predict_false(ret))
405 			return (ret);
406 	}
407 	return (mutex_lock_common(curthread, m, NULL));
408 }
409 
410 int
411 __pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
412 {
413 	struct pthread *curthread;
414 	int	ret;
415 
416 	_thr_check_init();
417 
418 	curthread = _get_curthread();
419 
420 	/*
421 	 * If the mutex is statically initialized, perform the dynamic
422 	 * initialization:
423 	 */
424 	if (__predict_false(*m == NULL)) {
425 		ret = init_static(curthread, m);
426 		if (__predict_false(ret))
427 			return (ret);
428 	}
429 	return (mutex_lock_common(curthread, m, abstime));
430 }
431 
432 int
433 _pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
434 {
435 	struct pthread	*curthread;
436 	int	ret;
437 
438 	_thr_check_init();
439 
440 	curthread = _get_curthread();
441 
442 	/*
443 	 * If the mutex is statically initialized, perform the dynamic
444 	 * initialization marking it private (delete safe):
445 	 */
446 	if (__predict_false(*m == NULL)) {
447 		ret = init_static_private(curthread, m);
448 		if (__predict_false(ret))
449 			return (ret);
450 	}
451 	return (mutex_lock_common(curthread, m, abstime));
452 }
453 
454 int
455 _pthread_mutex_unlock(pthread_mutex_t *m)
456 {
457 	return (mutex_unlock_common(m));
458 }
459 
460 int
461 _mutex_cv_lock(pthread_mutex_t *m, int count)
462 {
463 	int	ret;
464 
465 	ret = mutex_lock_common(_get_curthread(), m, NULL);
466 	if (ret == 0) {
467 		(*m)->m_refcount--;
468 		(*m)->m_count += count;
469 	}
470 	return (ret);
471 }
472 
473 static int
474 mutex_self_trylock(pthread_mutex_t m)
475 {
476 	int	ret;
477 
478 	switch (m->m_type) {
479 	case PTHREAD_MUTEX_ERRORCHECK:
480 	case PTHREAD_MUTEX_NORMAL:
481 		ret = EBUSY;
482 		break;
483 
484 	case PTHREAD_MUTEX_RECURSIVE:
485 		/* Increment the lock count: */
486 		if (m->m_count + 1 > 0) {
487 			m->m_count++;
488 			ret = 0;
489 		} else
490 			ret = EAGAIN;
491 		break;
492 
493 	default:
494 		/* Trap invalid mutex types; */
495 		ret = EINVAL;
496 	}
497 
498 	return (ret);
499 }
500 
501 static int
502 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
503 {
504 	struct timespec	ts1, ts2;
505 	int	ret;
506 
507 	switch (m->m_type) {
508 	case PTHREAD_MUTEX_ERRORCHECK:
509 		if (abstime) {
510 			clock_gettime(CLOCK_REALTIME, &ts1);
511 			TIMESPEC_SUB(&ts2, abstime, &ts1);
512 			__sys_nanosleep(&ts2, NULL);
513 			ret = ETIMEDOUT;
514 		} else {
515 			/*
516 			 * POSIX specifies that mutexes should return
517 			 * EDEADLK if a recursive lock is detected.
518 			 */
519 			ret = EDEADLK;
520 		}
521 		break;
522 
523 	case PTHREAD_MUTEX_NORMAL:
524 		/*
525 		 * What SS2 define as a 'normal' mutex.  Intentionally
526 		 * deadlock on attempts to get a lock you already own.
527 		 */
528 		ret = 0;
529 		if (abstime) {
530 			clock_gettime(CLOCK_REALTIME, &ts1);
531 			TIMESPEC_SUB(&ts2, abstime, &ts1);
532 			__sys_nanosleep(&ts2, NULL);
533 			ret = ETIMEDOUT;
534 		} else {
535 			ts1.tv_sec = 30;
536 			ts1.tv_nsec = 0;
537 			for (;;)
538 				__sys_nanosleep(&ts1, NULL);
539 		}
540 		break;
541 
542 	case PTHREAD_MUTEX_RECURSIVE:
543 		/* Increment the lock count: */
544 		if (m->m_count + 1 > 0) {
545 			m->m_count++;
546 			ret = 0;
547 		} else
548 			ret = EAGAIN;
549 		break;
550 
551 	default:
552 		/* Trap invalid mutex types; */
553 		ret = EINVAL;
554 	}
555 
556 	return (ret);
557 }
558 
559 static int
560 mutex_unlock_common(pthread_mutex_t *mutex)
561 {
562 	struct pthread *curthread = _get_curthread();
563 	struct pthread_mutex *m;
564 
565 	if (__predict_false((m = *mutex) == NULL))
566 		return (EINVAL);
567 
568 	/*
569 	 * Check if the running thread is not the owner of the mutex.
570 	 */
571 	if (__predict_false(m->m_owner != curthread))
572 		return (EPERM);
573 
574 	if (__predict_false(
575 		m->m_type == PTHREAD_MUTEX_RECURSIVE &&
576 		m->m_count > 0)) {
577 		m->m_count--;
578 	} else {
579 		m->m_owner = NULL;
580 		/* Remove the mutex from the threads queue. */
581 		MUTEX_ASSERT_IS_OWNED(m);
582 		TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
583 		MUTEX_INIT_LINK(m);
584 		THR_UMTX_UNLOCK(curthread, &m->m_lock);
585 	}
586 	return (0);
587 }
588 
589 int
590 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
591 {
592 	struct pthread *curthread = _get_curthread();
593 	struct pthread_mutex *m;
594 
595 	if (__predict_false((m = *mutex) == NULL))
596 		return (EINVAL);
597 
598 	/*
599 	 * Check if the running thread is not the owner of the mutex.
600 	 */
601 	if (__predict_false(m->m_owner != curthread))
602 		return (EPERM);
603 
604 	/*
605 	 * Clear the count in case this is a recursive mutex.
606 	 */
607 	*count = m->m_count;
608 	m->m_refcount++;
609 	m->m_count = 0;
610 	m->m_owner = NULL;
611 	/* Remove the mutex from the threads queue. */
612 	MUTEX_ASSERT_IS_OWNED(m);
613 	TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
614 	MUTEX_INIT_LINK(m);
615 	THR_UMTX_UNLOCK(curthread, &m->m_lock);
616 	return (0);
617 }
618 
619 void
620 _mutex_unlock_private(pthread_t pthread)
621 {
622 	struct pthread_mutex	*m, *m_next;
623 
624 	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
625 		m_next = TAILQ_NEXT(m, m_qe);
626 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
627 			_pthread_mutex_unlock(&m);
628 	}
629 }
630 
631 int
632 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
633 			      int *prioceiling)
634 {
635 	int ret;
636 
637 	if (*mutex == NULL)
638 		ret = EINVAL;
639 	else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
640 		ret = EINVAL;
641 	else {
642 		*prioceiling = (*mutex)->m_prio;
643 		ret = 0;
644 	}
645 
646 	return(ret);
647 }
648 
649 int
650 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
651 			      int prioceiling, int *old_ceiling)
652 {
653 	int ret = 0;
654 	int tmp;
655 
656 	if (*mutex == NULL)
657 		ret = EINVAL;
658 	else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
659 		ret = EINVAL;
660 	else if ((ret = _pthread_mutex_lock(mutex)) == 0) {
661 		tmp = (*mutex)->m_prio;
662 		(*mutex)->m_prio = prioceiling;
663 		ret = _pthread_mutex_unlock(mutex);
664 
665 		/* Return the old ceiling. */
666 		*old_ceiling = tmp;
667 	}
668 	return(ret);
669 }
670