xref: /freebsd/lib/libthr/thread/thr_cond.c (revision 7d536dc855c85c15bf45f033d108a61b1f3cecc3)
1 /*
2  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3  * Copyright (c) 2015 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Konstantin Belousov
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  */
32 
33 #include "namespace.h"
34 #include <stdlib.h>
35 #include <errno.h>
36 #include <string.h>
37 #include <pthread.h>
38 #include <limits.h>
39 #include "un-namespace.h"
40 
41 #include "thr_private.h"
42 
43 /*
44  * Prototypes
45  */
46 int	__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex);
47 int	__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
48 		       const struct timespec * abstime);
49 static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr);
50 static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
51 		    const struct timespec *abstime, int cancel);
52 static int cond_signal_common(pthread_cond_t *cond);
53 static int cond_broadcast_common(pthread_cond_t *cond);
54 
55 /*
56  * Double underscore versions are cancellation points.  Single underscore
57  * versions are not and are provided for libc internal usage (which
58  * shouldn't introduce cancellation points).
59  */
60 __weak_reference(__pthread_cond_wait, pthread_cond_wait);
61 __weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
62 
63 __weak_reference(_pthread_cond_init, pthread_cond_init);
64 __weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
65 __weak_reference(_pthread_cond_signal, pthread_cond_signal);
66 __weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
67 
68 #define CV_PSHARED(cvp)	(((cvp)->__flags & USYNC_PROCESS_SHARED) != 0)
69 
70 static void
71 cond_init_body(struct pthread_cond *cvp, const struct pthread_cond_attr *cattr)
72 {
73 
74 	if (cattr == NULL) {
75 		cvp->__clock_id = CLOCK_REALTIME;
76 	} else {
77 		if (cattr->c_pshared)
78 			cvp->__flags |= USYNC_PROCESS_SHARED;
79 		cvp->__clock_id = cattr->c_clockid;
80 	}
81 }
82 
83 static int
84 cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
85 {
86 	struct pthread_cond *cvp;
87 	const struct pthread_cond_attr *cattr;
88 	int pshared;
89 
90 	cattr = cond_attr != NULL ? *cond_attr : NULL;
91 	if (cattr == NULL || cattr->c_pshared == PTHREAD_PROCESS_PRIVATE) {
92 		pshared = 0;
93 		cvp = calloc(1, sizeof(struct pthread_cond));
94 		if (cvp == NULL)
95 			return (ENOMEM);
96 	} else {
97 		pshared = 1;
98 		cvp = __thr_pshared_offpage(cond, 1);
99 		if (cvp == NULL)
100 			return (EFAULT);
101 	}
102 
103 	/*
104 	 * Initialise the condition variable structure:
105 	 */
106 	cond_init_body(cvp, cattr);
107 	*cond = pshared ? THR_PSHARED_PTR : cvp;
108 	return (0);
109 }
110 
111 static int
112 init_static(struct pthread *thread, pthread_cond_t *cond)
113 {
114 	int ret;
115 
116 	THR_LOCK_ACQUIRE(thread, &_cond_static_lock);
117 
118 	if (*cond == NULL)
119 		ret = cond_init(cond, NULL);
120 	else
121 		ret = 0;
122 
123 	THR_LOCK_RELEASE(thread, &_cond_static_lock);
124 
125 	return (ret);
126 }
127 
128 #define CHECK_AND_INIT_COND							\
129 	if (*cond == THR_PSHARED_PTR) {						\
130 		cvp = __thr_pshared_offpage(cond, 0);				\
131 		if (cvp == NULL)						\
132 			return (EINVAL);					\
133 	} else if (__predict_false((cvp = (*cond)) <= THR_COND_DESTROYED)) {	\
134 		if (cvp == THR_COND_INITIALIZER) {				\
135 			int ret;						\
136 			ret = init_static(_get_curthread(), cond);		\
137 			if (ret)						\
138 				return (ret);					\
139 		} else if (cvp == THR_COND_DESTROYED) {				\
140 			return (EINVAL);					\
141 		}								\
142 		cvp = *cond;							\
143 	}
144 
145 int
146 _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
147 {
148 
149 	*cond = NULL;
150 	return (cond_init(cond, cond_attr));
151 }
152 
153 int
154 _pthread_cond_destroy(pthread_cond_t *cond)
155 {
156 	struct pthread_cond *cvp;
157 	int error;
158 
159 	error = 0;
160 	if (*cond == THR_PSHARED_PTR) {
161 		cvp = __thr_pshared_offpage(cond, 0);
162 		if (cvp != NULL)
163 			__thr_pshared_destroy(cond);
164 		*cond = THR_COND_DESTROYED;
165 	} else if ((cvp = *cond) == THR_COND_INITIALIZER) {
166 		/* nothing */
167 	} else if (cvp == THR_COND_DESTROYED) {
168 		error = EINVAL;
169 	} else {
170 		cvp = *cond;
171 		*cond = THR_COND_DESTROYED;
172 		free(cvp);
173 	}
174 	return (error);
175 }
176 
177 /*
178  * Cancellation behavior:
179  *   Thread may be canceled at start, if thread is canceled, it means it
180  *   did not get a wakeup from pthread_cond_signal(), otherwise, it is
181  *   not canceled.
182  *   Thread cancellation never cause wakeup from pthread_cond_signal()
183  *   to be lost.
184  */
185 static int
186 cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp,
187 	const struct timespec *abstime, int cancel)
188 {
189 	struct pthread	*curthread = _get_curthread();
190 	int		recurse;
191 	int		error, error2 = 0;
192 
193 	error = _mutex_cv_detach(mp, &recurse);
194 	if (error != 0)
195 		return (error);
196 
197 	if (cancel) {
198 		_thr_cancel_enter2(curthread, 0);
199 		error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters,
200 			(struct umutex *)&mp->m_lock, abstime,
201 			CVWAIT_ABSTIME|CVWAIT_CLOCKID);
202 		_thr_cancel_leave(curthread, 0);
203 	} else {
204 		error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters,
205 			(struct umutex *)&mp->m_lock, abstime,
206 			CVWAIT_ABSTIME|CVWAIT_CLOCKID);
207 	}
208 
209 	/*
210 	 * Note that PP mutex and ROBUST mutex may return
211 	 * interesting error codes.
212 	 */
213 	if (error == 0) {
214 		error2 = _mutex_cv_lock(mp, recurse);
215 	} else if (error == EINTR || error == ETIMEDOUT) {
216 		error2 = _mutex_cv_lock(mp, recurse);
217 		if (error2 == 0 && cancel)
218 			_thr_testcancel(curthread);
219 		if (error == EINTR)
220 			error = 0;
221 	} else {
222 		/* We know that it didn't unlock the mutex. */
223 		error2 = _mutex_cv_attach(mp, recurse);
224 		if (error2 == 0 && cancel)
225 			_thr_testcancel(curthread);
226 	}
227 	return (error2 != 0 ? error2 : error);
228 }
229 
230 /*
231  * Thread waits in userland queue whenever possible, when thread
232  * is signaled or broadcasted, it is removed from the queue, and
233  * is saved in curthread's defer_waiters[] buffer, but won't be
234  * woken up until mutex is unlocked.
235  */
236 
237 static int
238 cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
239 	const struct timespec *abstime, int cancel)
240 {
241 	struct pthread	*curthread = _get_curthread();
242 	struct sleepqueue *sq;
243 	int	recurse;
244 	int	error;
245 	int	defered;
246 
247 	if (curthread->wchan != NULL)
248 		PANIC("thread was already on queue.");
249 
250 	if (cancel)
251 		_thr_testcancel(curthread);
252 
253 	_sleepq_lock(cvp);
254 	/*
255 	 * set __has_user_waiters before unlocking mutex, this allows
256 	 * us to check it without locking in pthread_cond_signal().
257 	 */
258 	cvp->__has_user_waiters = 1;
259 	defered = 0;
260 	(void)_mutex_cv_unlock(mp, &recurse, &defered);
261 	curthread->mutex_obj = mp;
262 	_sleepq_add(cvp, curthread);
263 	for(;;) {
264 		_thr_clear_wake(curthread);
265 		_sleepq_unlock(cvp);
266 		if (defered) {
267 			defered = 0;
268 			if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0)
269 				(void)_umtx_op_err(&mp->m_lock, UMTX_OP_MUTEX_WAKE2,
270 					 mp->m_lock.m_flags, 0, 0);
271 		}
272 		if (curthread->nwaiter_defer > 0) {
273 			_thr_wake_all(curthread->defer_waiters,
274 				curthread->nwaiter_defer);
275 			curthread->nwaiter_defer = 0;
276 		}
277 
278 		if (cancel) {
279 			_thr_cancel_enter2(curthread, 0);
280 			error = _thr_sleep(curthread, cvp->__clock_id, abstime);
281 			_thr_cancel_leave(curthread, 0);
282 		} else {
283 			error = _thr_sleep(curthread, cvp->__clock_id, abstime);
284 		}
285 
286 		_sleepq_lock(cvp);
287 		if (curthread->wchan == NULL) {
288 			error = 0;
289 			break;
290 		} else if (cancel && SHOULD_CANCEL(curthread)) {
291 			sq = _sleepq_lookup(cvp);
292 			cvp->__has_user_waiters =
293 				_sleepq_remove(sq, curthread);
294 			_sleepq_unlock(cvp);
295 			curthread->mutex_obj = NULL;
296 			_mutex_cv_lock(mp, recurse);
297 			if (!THR_IN_CRITICAL(curthread))
298 				_pthread_exit(PTHREAD_CANCELED);
299 			else /* this should not happen */
300 				return (0);
301 		} else if (error == ETIMEDOUT) {
302 			sq = _sleepq_lookup(cvp);
303 			cvp->__has_user_waiters =
304 				_sleepq_remove(sq, curthread);
305 			break;
306 		}
307 	}
308 	_sleepq_unlock(cvp);
309 	curthread->mutex_obj = NULL;
310 	_mutex_cv_lock(mp, recurse);
311 	return (error);
312 }
313 
314 static int
315 cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
316 	const struct timespec *abstime, int cancel)
317 {
318 	struct pthread	*curthread = _get_curthread();
319 	struct pthread_cond *cvp;
320 	struct pthread_mutex *mp;
321 	int	error;
322 
323 	CHECK_AND_INIT_COND
324 
325 	if (*mutex == THR_PSHARED_PTR) {
326 		mp = __thr_pshared_offpage(mutex, 0);
327 		if (mp == NULL)
328 			return (EINVAL);
329 	} else {
330 		mp = *mutex;
331 	}
332 
333 	if ((error = _mutex_owned(curthread, mp)) != 0)
334 		return (error);
335 
336 	if (curthread->attr.sched_policy != SCHED_OTHER ||
337 	    (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT|UMUTEX_PRIO_INHERIT|
338 		USYNC_PROCESS_SHARED)) != 0 ||
339 	    (cvp->__flags & USYNC_PROCESS_SHARED) != 0)
340 		return cond_wait_kernel(cvp, mp, abstime, cancel);
341 	else
342 		return cond_wait_user(cvp, mp, abstime, cancel);
343 }
344 
345 int
346 _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
347 {
348 
349 	return (cond_wait_common(cond, mutex, NULL, 0));
350 }
351 
352 int
353 __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
354 {
355 
356 	return (cond_wait_common(cond, mutex, NULL, 1));
357 }
358 
359 int
360 _pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
361 		       const struct timespec * abstime)
362 {
363 
364 	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
365 	    abstime->tv_nsec >= 1000000000)
366 		return (EINVAL);
367 
368 	return (cond_wait_common(cond, mutex, abstime, 0));
369 }
370 
371 int
372 __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
373 		       const struct timespec *abstime)
374 {
375 
376 	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
377 	    abstime->tv_nsec >= 1000000000)
378 		return (EINVAL);
379 
380 	return (cond_wait_common(cond, mutex, abstime, 1));
381 }
382 
383 static int
384 cond_signal_common(pthread_cond_t *cond)
385 {
386 	struct pthread	*curthread = _get_curthread();
387 	struct pthread *td;
388 	struct pthread_cond *cvp;
389 	struct pthread_mutex *mp;
390 	struct sleepqueue *sq;
391 	int	*waddr;
392 	int	pshared;
393 
394 	/*
395 	 * If the condition variable is statically initialized, perform dynamic
396 	 * initialization.
397 	 */
398 	CHECK_AND_INIT_COND
399 
400 	pshared = CV_PSHARED(cvp);
401 
402 	_thr_ucond_signal((struct ucond *)&cvp->__has_kern_waiters);
403 
404 	if (pshared || cvp->__has_user_waiters == 0)
405 		return (0);
406 
407 	curthread = _get_curthread();
408 	waddr = NULL;
409 	_sleepq_lock(cvp);
410 	sq = _sleepq_lookup(cvp);
411 	if (sq == NULL) {
412 		_sleepq_unlock(cvp);
413 		return (0);
414 	}
415 
416 	td = _sleepq_first(sq);
417 	mp = td->mutex_obj;
418 	cvp->__has_user_waiters = _sleepq_remove(sq, td);
419 	if (mp->m_owner == TID(curthread)) {
420 		if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
421 			_thr_wake_all(curthread->defer_waiters,
422 					curthread->nwaiter_defer);
423 			curthread->nwaiter_defer = 0;
424 		}
425 		curthread->defer_waiters[curthread->nwaiter_defer++] =
426 			&td->wake_addr->value;
427 		mp->m_flags |= PMUTEX_FLAG_DEFERED;
428 	} else {
429 		waddr = &td->wake_addr->value;
430 	}
431 	_sleepq_unlock(cvp);
432 	if (waddr != NULL)
433 		_thr_set_wake(waddr);
434 	return (0);
435 }
436 
437 struct broadcast_arg {
438 	struct pthread *curthread;
439 	unsigned int *waddrs[MAX_DEFER_WAITERS];
440 	int count;
441 };
442 
443 static void
444 drop_cb(struct pthread *td, void *arg)
445 {
446 	struct broadcast_arg *ba = arg;
447 	struct pthread_mutex *mp;
448 	struct pthread *curthread = ba->curthread;
449 
450 	mp = td->mutex_obj;
451 	if (mp->m_owner == TID(curthread)) {
452 		if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
453 			_thr_wake_all(curthread->defer_waiters,
454 				curthread->nwaiter_defer);
455 			curthread->nwaiter_defer = 0;
456 		}
457 		curthread->defer_waiters[curthread->nwaiter_defer++] =
458 			&td->wake_addr->value;
459 		mp->m_flags |= PMUTEX_FLAG_DEFERED;
460 	} else {
461 		if (ba->count >= MAX_DEFER_WAITERS) {
462 			_thr_wake_all(ba->waddrs, ba->count);
463 			ba->count = 0;
464 		}
465 		ba->waddrs[ba->count++] = &td->wake_addr->value;
466 	}
467 }
468 
469 static int
470 cond_broadcast_common(pthread_cond_t *cond)
471 {
472 	int    pshared;
473 	struct pthread_cond *cvp;
474 	struct sleepqueue *sq;
475 	struct broadcast_arg ba;
476 
477 	/*
478 	 * If the condition variable is statically initialized, perform dynamic
479 	 * initialization.
480 	 */
481 	CHECK_AND_INIT_COND
482 
483 	pshared = CV_PSHARED(cvp);
484 
485 	_thr_ucond_broadcast((struct ucond *)&cvp->__has_kern_waiters);
486 
487 	if (pshared || cvp->__has_user_waiters == 0)
488 		return (0);
489 
490 	ba.curthread = _get_curthread();
491 	ba.count = 0;
492 
493 	_sleepq_lock(cvp);
494 	sq = _sleepq_lookup(cvp);
495 	if (sq == NULL) {
496 		_sleepq_unlock(cvp);
497 		return (0);
498 	}
499 	_sleepq_drop(sq, drop_cb, &ba);
500 	cvp->__has_user_waiters = 0;
501 	_sleepq_unlock(cvp);
502 	if (ba.count > 0)
503 		_thr_wake_all(ba.waddrs, ba.count);
504 	return (0);
505 }
506 
507 int
508 _pthread_cond_signal(pthread_cond_t * cond)
509 {
510 
511 	return (cond_signal_common(cond));
512 }
513 
514 int
515 _pthread_cond_broadcast(pthread_cond_t * cond)
516 {
517 
518 	return (cond_broadcast_common(cond));
519 }
520