xref: /freebsd/lib/libthr/thread/thr_mutex.c (revision 065dbdc130d0546419938441ae69f8feb7043553)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by John Birrell.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * $FreeBSD$
34  */
35 
36 #include "namespace.h"
37 #include <stdlib.h>
38 #include <errno.h>
39 #include <string.h>
40 #include <sys/param.h>
41 #include <sys/queue.h>
42 #include <pthread.h>
43 #include "un-namespace.h"
44 
45 #include "thr_private.h"
46 
47 #if defined(_PTHREADS_INVARIANTS)
48 #define MUTEX_INIT_LINK(m) 		do {		\
49 	(m)->m_qe.tqe_prev = NULL;			\
50 	(m)->m_qe.tqe_next = NULL;			\
51 } while (0)
52 #define MUTEX_ASSERT_IS_OWNED(m)	do {		\
53 	if ((m)->m_qe.tqe_prev == NULL)			\
54 		PANIC("mutex is not on list");		\
55 } while (0)
56 #define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
57 	if (((m)->m_qe.tqe_prev != NULL) ||		\
58 	    ((m)->m_qe.tqe_next != NULL))		\
59 		PANIC("mutex is on list");		\
60 } while (0)
61 #else
62 #define MUTEX_INIT_LINK(m)
63 #define MUTEX_ASSERT_IS_OWNED(m)
64 #define MUTEX_ASSERT_NOT_OWNED(m)
65 #endif
66 
67 /*
68  * Prototypes
69  */
70 int	__pthread_mutex_init(pthread_mutex_t *mutex,
71 		const pthread_mutexattr_t *mutex_attr);
72 int	__pthread_mutex_trylock(pthread_mutex_t *mutex);
73 int	__pthread_mutex_lock(pthread_mutex_t *mutex);
74 int	__pthread_mutex_timedlock(pthread_mutex_t *mutex,
75 		const struct timespec *abstime);
76 
77 static int	mutex_self_trylock(pthread_mutex_t);
78 static int	mutex_self_lock(pthread_mutex_t,
79 				const struct timespec *abstime);
80 static int	mutex_unlock_common(pthread_mutex_t *);
81 
82 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
83 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
84 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
85 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
86 
87 /* Single underscore versions provided for libc internal usage: */
88 /* No difference between libc and application usage of these: */
89 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
90 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
91 
92 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
93 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
94 
95 static int
96 mutex_init(pthread_mutex_t *mutex,
97     const pthread_mutexattr_t *mutex_attr, int private)
98 {
99 	const struct pthread_mutex_attr *attr;
100 	struct pthread_mutex *pmutex;
101 
102 	if (mutex_attr == NULL) {
103 		attr = &_pthread_mutexattr_default;
104 	} else {
105 		attr = *mutex_attr;
106 		if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
107 		    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
108 			return (EINVAL);
109 		if (attr->m_protocol < PTHREAD_PRIO_NONE ||
110 		    attr->m_protocol > PTHREAD_PRIO_PROTECT)
111 			return (EINVAL);
112 	}
113 
114 	if ((pmutex = (pthread_mutex_t)
115 		malloc(sizeof(struct pthread_mutex))) == NULL)
116 		return (ENOMEM);
117 
118 	_thr_umtx_init(&pmutex->m_lock);
119 	pmutex->m_type = attr->m_type;
120 	pmutex->m_protocol = attr->m_protocol;
121 	pmutex->m_owner = NULL;
122 	pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED;
123 	if (private)
124 		pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
125 	pmutex->m_count = 0;
126 	pmutex->m_refcount = 0;
127 	if (attr->m_protocol == PTHREAD_PRIO_PROTECT)
128 		pmutex->m_prio = attr->m_ceiling;
129 	else
130 		pmutex->m_prio = -1;
131 	MUTEX_INIT_LINK(pmutex);
132 	*mutex = pmutex;
133 	return (0);
134 }
135 
136 static int
137 init_static(struct pthread *thread, pthread_mutex_t *mutex)
138 {
139 	int ret;
140 
141 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
142 
143 	if (*mutex == NULL)
144 		ret = mutex_init(mutex, NULL, 0);
145 	else
146 		ret = 0;
147 
148 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
149 
150 	return (ret);
151 }
152 
153 static int
154 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
155 {
156 	int ret;
157 
158 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
159 
160 	if (*mutex == NULL)
161 		ret = mutex_init(mutex, NULL, 1);
162 	else
163 		ret = 0;
164 
165 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
166 
167 	return (ret);
168 }
169 
170 int
171 _pthread_mutex_init(pthread_mutex_t *mutex,
172     const pthread_mutexattr_t *mutex_attr)
173 {
174 	return mutex_init(mutex, mutex_attr, 1);
175 }
176 
177 int
178 __pthread_mutex_init(pthread_mutex_t *mutex,
179     const pthread_mutexattr_t *mutex_attr)
180 {
181 	return mutex_init(mutex, mutex_attr, 0);
182 }
183 
184 int
185 _mutex_reinit(pthread_mutex_t *mutex)
186 {
187 	_thr_umtx_init(&(*mutex)->m_lock);
188 	MUTEX_INIT_LINK(*mutex);
189 	(*mutex)->m_owner = NULL;
190 	(*mutex)->m_count = 0;
191 	(*mutex)->m_refcount = 0;
192 	(*mutex)->m_prio = 0;
193 	return (0);
194 }
195 
196 void
197 _mutex_fork(struct pthread *curthread)
198 {
199 	struct pthread_mutex *m;
200 
201 	/*
202 	 * Fix mutex ownership for child process.
203 	 * note that process shared mutex should not
204 	 * be inherited because owner is forking thread
205 	 * which is in parent process, they should be
206 	 * removed from the owned mutex list, current,
207 	 * process shared mutex is not supported, so I
208 	 * am not worried.
209 	 */
210 	TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
211 		m->m_lock = (umtx_t)curthread->tid;
212 }
213 
214 int
215 _pthread_mutex_destroy(pthread_mutex_t *mutex)
216 {
217 	struct pthread *curthread = _get_curthread();
218 	pthread_mutex_t m;
219 	int ret = 0;
220 
221 	if (__predict_false(*mutex == NULL))
222 		ret = EINVAL;
223 	else {
224 		/*
225 		 * Try to lock the mutex structure, we only need to
226 		 * try once, if failed, the mutex is in used.
227 		 */
228 		ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
229 		if (ret)
230 			return (ret);
231 
232 		/*
233 		 * Check mutex other fields to see if this mutex is
234 		 * in use. Mostly for prority mutex types, or there
235 		 * are condition variables referencing it.
236 		 */
237 		if ((*mutex)->m_owner != NULL || (*mutex)->m_refcount != 0) {
238 			THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock);
239 			ret = EBUSY;
240 		} else {
241 			/*
242 			 * Save a pointer to the mutex so it can be free'd
243 			 * and set the caller's pointer to NULL.
244 			 */
245 			m = *mutex;
246 			*mutex = NULL;
247 
248 			THR_UMTX_UNLOCK(curthread, &m->m_lock);
249 
250 			MUTEX_ASSERT_NOT_OWNED(m);
251 			free(m);
252 		}
253 	}
254 
255 	return (ret);
256 }
257 
258 static int
259 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
260 {
261 	struct pthread_mutex *m;
262 	int ret;
263 
264 	m = *mutex;
265 	ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock);
266 	if (ret == 0) {
267 		m->m_owner = curthread;
268 		/* Add to the list of owned mutexes. */
269 		MUTEX_ASSERT_NOT_OWNED(m);
270 		TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
271 	} else if (m->m_owner == curthread) {
272 		ret = mutex_self_trylock(m);
273 	} /* else {} */
274 
275 	return (ret);
276 }
277 
278 int
279 __pthread_mutex_trylock(pthread_mutex_t *mutex)
280 {
281 	struct pthread *curthread = _get_curthread();
282 	int ret;
283 
284 	/*
285 	 * If the mutex is statically initialized, perform the dynamic
286 	 * initialization:
287 	 */
288 	if (__predict_false(*mutex == NULL)) {
289 		ret = init_static(curthread, mutex);
290 		if (__predict_false(ret))
291 			return (ret);
292 	}
293 	return (mutex_trylock_common(curthread, mutex));
294 }
295 
296 int
297 _pthread_mutex_trylock(pthread_mutex_t *mutex)
298 {
299 	struct pthread	*curthread = _get_curthread();
300 	int	ret;
301 
302 	/*
303 	 * If the mutex is statically initialized, perform the dynamic
304 	 * initialization marking the mutex private (delete safe):
305 	 */
306 	if (__predict_false(*mutex == NULL)) {
307 		ret = init_static_private(curthread, mutex);
308 		if (__predict_false(ret))
309 			return (ret);
310 	}
311 	return (mutex_trylock_common(curthread, mutex));
312 }
313 
314 static int
315 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
316 	const struct timespec * abstime)
317 {
318 	struct  timespec ts, ts2;
319 	struct	pthread_mutex *m;
320 	int	ret;
321 
322 	m = *mutex;
323 	ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock);
324 	if (ret == 0) {
325 		m->m_owner = curthread;
326 		/* Add to the list of owned mutexes: */
327 		MUTEX_ASSERT_NOT_OWNED(m);
328 		TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
329 	} else if (m->m_owner == curthread) {
330 		ret = mutex_self_lock(m, abstime);
331 	} else {
332 		if (abstime == NULL) {
333 			THR_UMTX_LOCK(curthread, &m->m_lock);
334 			ret = 0;
335 		} else if (__predict_false(
336 			   abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
337 			   abstime->tv_nsec >= 1000000000)) {
338 			ret = EINVAL;
339 		} else {
340 			clock_gettime(CLOCK_REALTIME, &ts);
341 			TIMESPEC_SUB(&ts2, abstime, &ts);
342 			ret = THR_UMTX_TIMEDLOCK(curthread, &m->m_lock, &ts2);
343 			/*
344 			 * Timed out wait is not restarted if
345 			 * it was interrupted, not worth to do it.
346 			 */
347 			if (ret == EINTR)
348 				ret = ETIMEDOUT;
349 		}
350 		if (ret == 0) {
351 			m->m_owner = curthread;
352 			/* Add to the list of owned mutexes: */
353 			MUTEX_ASSERT_NOT_OWNED(m);
354 			TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
355 		}
356 	}
357 	return (ret);
358 }
359 
360 int
361 __pthread_mutex_lock(pthread_mutex_t *m)
362 {
363 	struct pthread *curthread;
364 	int	ret;
365 
366 	_thr_check_init();
367 
368 	curthread = _get_curthread();
369 
370 	/*
371 	 * If the mutex is statically initialized, perform the dynamic
372 	 * initialization:
373 	 */
374 	if (__predict_false(*m == NULL)) {
375 		ret = init_static(curthread, m);
376 		if (__predict_false(ret))
377 			return (ret);
378 	}
379 	return (mutex_lock_common(curthread, m, NULL));
380 }
381 
382 int
383 _pthread_mutex_lock(pthread_mutex_t *m)
384 {
385 	struct pthread *curthread;
386 	int	ret;
387 
388 	_thr_check_init();
389 
390 	curthread = _get_curthread();
391 
392 	/*
393 	 * If the mutex is statically initialized, perform the dynamic
394 	 * initialization marking it private (delete safe):
395 	 */
396 	if (__predict_false(*m == NULL)) {
397 		ret = init_static_private(curthread, m);
398 		if (__predict_false(ret))
399 			return (ret);
400 	}
401 	return (mutex_lock_common(curthread, m, NULL));
402 }
403 
404 int
405 __pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
406 {
407 	struct pthread *curthread;
408 	int	ret;
409 
410 	_thr_check_init();
411 
412 	curthread = _get_curthread();
413 
414 	/*
415 	 * If the mutex is statically initialized, perform the dynamic
416 	 * initialization:
417 	 */
418 	if (__predict_false(*m == NULL)) {
419 		ret = init_static(curthread, m);
420 		if (__predict_false(ret))
421 			return (ret);
422 	}
423 	return (mutex_lock_common(curthread, m, abstime));
424 }
425 
426 int
427 _pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
428 {
429 	struct pthread	*curthread;
430 	int	ret;
431 
432 	_thr_check_init();
433 
434 	curthread = _get_curthread();
435 
436 	/*
437 	 * If the mutex is statically initialized, perform the dynamic
438 	 * initialization marking it private (delete safe):
439 	 */
440 	if (__predict_false(*m == NULL)) {
441 		ret = init_static_private(curthread, m);
442 		if (__predict_false(ret))
443 			return (ret);
444 	}
445 	return (mutex_lock_common(curthread, m, abstime));
446 }
447 
448 int
449 _pthread_mutex_unlock(pthread_mutex_t *m)
450 {
451 	return (mutex_unlock_common(m));
452 }
453 
454 int
455 _mutex_cv_lock(pthread_mutex_t *m, int count)
456 {
457 	int	ret;
458 
459 	ret = mutex_lock_common(_get_curthread(), m, NULL);
460 	if (ret == 0) {
461 		(*m)->m_refcount--;
462 		(*m)->m_count += count;
463 	}
464 	return (ret);
465 }
466 
467 static int
468 mutex_self_trylock(pthread_mutex_t m)
469 {
470 	int	ret;
471 
472 	switch (m->m_type) {
473 	case PTHREAD_MUTEX_ERRORCHECK:
474 	case PTHREAD_MUTEX_NORMAL:
475 		ret = EBUSY;
476 		break;
477 
478 	case PTHREAD_MUTEX_RECURSIVE:
479 		/* Increment the lock count: */
480 		if (m->m_count + 1 > 0) {
481 			m->m_count++;
482 			ret = 0;
483 		} else
484 			ret = EAGAIN;
485 		break;
486 
487 	default:
488 		/* Trap invalid mutex types; */
489 		ret = EINVAL;
490 	}
491 
492 	return (ret);
493 }
494 
495 static int
496 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
497 {
498 	struct timespec	ts1, ts2;
499 	int	ret;
500 
501 	switch (m->m_type) {
502 	case PTHREAD_MUTEX_ERRORCHECK:
503 		if (abstime) {
504 			clock_gettime(CLOCK_REALTIME, &ts1);
505 			TIMESPEC_SUB(&ts2, abstime, &ts1);
506 			__sys_nanosleep(&ts2, NULL);
507 			ret = ETIMEDOUT;
508 		} else {
509 			/*
510 			 * POSIX specifies that mutexes should return
511 			 * EDEADLK if a recursive lock is detected.
512 			 */
513 			ret = EDEADLK;
514 		}
515 		break;
516 
517 	case PTHREAD_MUTEX_NORMAL:
518 		/*
519 		 * What SS2 define as a 'normal' mutex.  Intentionally
520 		 * deadlock on attempts to get a lock you already own.
521 		 */
522 		ret = 0;
523 		if (abstime) {
524 			clock_gettime(CLOCK_REALTIME, &ts1);
525 			TIMESPEC_SUB(&ts2, abstime, &ts1);
526 			__sys_nanosleep(&ts2, NULL);
527 			ret = ETIMEDOUT;
528 		} else {
529 			ts1.tv_sec = 30;
530 			ts1.tv_nsec = 0;
531 			for (;;)
532 				__sys_nanosleep(&ts1, NULL);
533 		}
534 		break;
535 
536 	case PTHREAD_MUTEX_RECURSIVE:
537 		/* Increment the lock count: */
538 		if (m->m_count + 1 > 0) {
539 			m->m_count++;
540 			ret = 0;
541 		} else
542 			ret = EAGAIN;
543 		break;
544 
545 	default:
546 		/* Trap invalid mutex types; */
547 		ret = EINVAL;
548 	}
549 
550 	return (ret);
551 }
552 
553 static int
554 mutex_unlock_common(pthread_mutex_t *mutex)
555 {
556 	struct pthread *curthread = _get_curthread();
557 	struct pthread_mutex *m;
558 
559 	if (__predict_false((m = *mutex) == NULL))
560 		return (EINVAL);
561 
562 	/*
563 	 * Check if the running thread is not the owner of the mutex.
564 	 */
565 	if (__predict_false(m->m_owner != curthread))
566 		return (EPERM);
567 
568 	if (__predict_false(
569 		m->m_type == PTHREAD_MUTEX_RECURSIVE &&
570 		m->m_count > 0)) {
571 		m->m_count--;
572 	} else {
573 		m->m_owner = NULL;
574 		/* Remove the mutex from the threads queue. */
575 		MUTEX_ASSERT_IS_OWNED(m);
576 		TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
577 		MUTEX_INIT_LINK(m);
578 		THR_UMTX_UNLOCK(curthread, &m->m_lock);
579 	}
580 	return (0);
581 }
582 
583 int
584 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
585 {
586 	struct pthread *curthread = _get_curthread();
587 	struct pthread_mutex *m;
588 
589 	if (__predict_false((m = *mutex) == NULL))
590 		return (EINVAL);
591 
592 	/*
593 	 * Check if the running thread is not the owner of the mutex.
594 	 */
595 	if (__predict_false(m->m_owner != curthread))
596 		return (EPERM);
597 
598 	/*
599 	 * Clear the count in case this is a recursive mutex.
600 	 */
601 	*count = m->m_count;
602 	m->m_refcount++;
603 	m->m_count = 0;
604 	m->m_owner = NULL;
605 	/* Remove the mutex from the threads queue. */
606 	MUTEX_ASSERT_IS_OWNED(m);
607 	TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
608 	MUTEX_INIT_LINK(m);
609 	THR_UMTX_UNLOCK(curthread, &m->m_lock);
610 	return (0);
611 }
612 
613 void
614 _mutex_unlock_private(pthread_t pthread)
615 {
616 	struct pthread_mutex	*m, *m_next;
617 
618 	TAILQ_FOREACH_SAFE(m, &pthread->mutexq, m_qe, m_next) {
619 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
620 			_pthread_mutex_unlock(&m);
621 	}
622 }
623 
624 int
625 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
626 			      int *prioceiling)
627 {
628 	int ret;
629 
630 	if (*mutex == NULL)
631 		ret = EINVAL;
632 	else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
633 		ret = EINVAL;
634 	else {
635 		*prioceiling = (*mutex)->m_prio;
636 		ret = 0;
637 	}
638 
639 	return(ret);
640 }
641 
642 int
643 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
644 			      int prioceiling, int *old_ceiling)
645 {
646 	int ret = 0;
647 	int tmp;
648 
649 	if (*mutex == NULL)
650 		ret = EINVAL;
651 	else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
652 		ret = EINVAL;
653 	else if ((ret = _pthread_mutex_lock(mutex)) == 0) {
654 		tmp = (*mutex)->m_prio;
655 		(*mutex)->m_prio = prioceiling;
656 		ret = _pthread_mutex_unlock(mutex);
657 
658 		/* Return the old ceiling. */
659 		*old_ceiling = tmp;
660 	}
661 	return(ret);
662 }
663