xref: /freebsd/sys/kern/kern_condvar.c (revision 7660b554bc59a07be0431c17e0e33815818baa69)
1 /*-
2  * Copyright (c) 2000 Jake Burkholder <jake@freebsd.org>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_ktrace.h"
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/proc.h>
37 #include <sys/kernel.h>
38 #include <sys/ktr.h>
39 #include <sys/condvar.h>
40 #include <sys/sched.h>
41 #include <sys/signalvar.h>
42 #include <sys/resourcevar.h>
43 #ifdef KTRACE
44 #include <sys/uio.h>
45 #include <sys/ktrace.h>
46 #endif
47 
48 /*
49  * Common sanity checks for cv_wait* functions.
50  */
51 #define	CV_ASSERT(cvp, mp, td) do {					\
52 	KASSERT((td) != NULL, ("%s: curthread NULL", __func__));	\
53 	KASSERT(TD_IS_RUNNING(td), ("%s: not TDS_RUNNING", __func__));	\
54 	KASSERT((cvp) != NULL, ("%s: cvp NULL", __func__));		\
55 	KASSERT((mp) != NULL, ("%s: mp NULL", __func__));		\
56 	mtx_assert((mp), MA_OWNED | MA_NOTRECURSED);			\
57 } while (0)
58 
59 #ifdef INVARIANTS
60 #define	CV_WAIT_VALIDATE(cvp, mp) do {					\
61 	if (TAILQ_EMPTY(&(cvp)->cv_waitq)) {				\
62 		/* Only waiter. */					\
63 		(cvp)->cv_mtx = (mp);					\
64 	} else {							\
65 		/*							\
66 		 * Other waiter; assert that we're using the		\
67 		 * same mutex.						\
68 		 */							\
69 		KASSERT((cvp)->cv_mtx == (mp),				\
70 		    ("%s: Multiple mutexes", __func__));		\
71 	}								\
72 } while (0)
73 
74 #define	CV_SIGNAL_VALIDATE(cvp) do {					\
75 	if (!TAILQ_EMPTY(&(cvp)->cv_waitq)) {				\
76 		KASSERT(mtx_owned((cvp)->cv_mtx),			\
77 		    ("%s: Mutex not owned", __func__));			\
78 	}								\
79 } while (0)
80 
81 #else
82 #define	CV_WAIT_VALIDATE(cvp, mp)
83 #define	CV_SIGNAL_VALIDATE(cvp)
84 #endif
85 
86 static void cv_timedwait_end(void *arg);
87 
88 /*
89  * Initialize a condition variable.  Must be called before use.
90  */
91 void
92 cv_init(struct cv *cvp, const char *desc)
93 {
94 
95 	TAILQ_INIT(&cvp->cv_waitq);
96 	cvp->cv_mtx = NULL;
97 	cvp->cv_description = desc;
98 }
99 
100 /*
101  * Destroy a condition variable.  The condition variable must be re-initialized
102  * in order to be re-used.
103  */
104 void
105 cv_destroy(struct cv *cvp)
106 {
107 
108 	KASSERT(cv_waitq_empty(cvp), ("%s: cv_waitq non-empty", __func__));
109 }
110 
111 /*
112  * Common code for cv_wait* functions.  All require sched_lock.
113  */
114 
115 /*
116  * Switch context.
117  */
118 static __inline void
119 cv_switch(struct thread *td)
120 {
121 	TD_SET_SLEEPING(td);
122 	td->td_proc->p_stats->p_ru.ru_nvcsw++;
123 	mi_switch();
124 	CTR3(KTR_PROC, "cv_switch: resume thread %p (pid %d, %s)", td,
125 	    td->td_proc->p_pid, td->td_proc->p_comm);
126 }
127 
128 /*
129  * Switch context, catching signals.
130  */
131 static __inline int
132 cv_switch_catch(struct thread *td)
133 {
134 	struct proc *p;
135 	int sig;
136 
137 	/*
138 	 * We put ourselves on the sleep queue and start our timeout before
139 	 * calling cursig, as we could stop there, and a wakeup or a SIGCONT (or
140 	 * both) could occur while we were stopped.  A SIGCONT would cause us to
141 	 * be marked as TDS_SLP without resuming us, thus we must be ready for
142 	 * sleep when cursig is called.  If the wakeup happens while we're
143 	 * stopped, td->td_wchan will be 0 upon return from cursig,
144 	 * and TD_ON_SLEEPQ() will return false.
145 	 */
146 	td->td_flags |= TDF_SINTR;
147 	mtx_unlock_spin(&sched_lock);
148 	p = td->td_proc;
149 	PROC_LOCK(p);
150 	mtx_lock(&p->p_sigacts->ps_mtx);
151 	sig = cursig(td);
152 	mtx_unlock(&p->p_sigacts->ps_mtx);
153 	if (thread_suspend_check(1))
154 		sig = SIGSTOP;
155 	mtx_lock_spin(&sched_lock);
156 	PROC_UNLOCK(p);
157 	if (sig != 0) {
158 		if (TD_ON_SLEEPQ(td))
159 			cv_waitq_remove(td);
160 		TD_SET_RUNNING(td);
161 	} else if (TD_ON_SLEEPQ(td)) {
162 		cv_switch(td);
163 	}
164 	td->td_flags &= ~TDF_SINTR;
165 
166 	return sig;
167 }
168 
169 /*
170  * Add a thread to the wait queue of a condition variable.
171  */
172 static __inline void
173 cv_waitq_add(struct cv *cvp, struct thread *td)
174 {
175 
176 	td->td_flags |= TDF_CVWAITQ;
177 	TD_SET_ON_SLEEPQ(td);
178 	td->td_wchan = cvp;
179 	td->td_wmesg = cvp->cv_description;
180 	CTR3(KTR_PROC, "cv_waitq_add: thread %p (pid %d, %s)", td,
181 	    td->td_proc->p_pid, td->td_proc->p_comm);
182 	TAILQ_INSERT_TAIL(&cvp->cv_waitq, td, td_slpq);
183 	sched_sleep(td, td->td_priority);
184 }
185 
186 /*
187  * Wait on a condition variable.  The current thread is placed on the condition
188  * variable's wait queue and suspended.  A cv_signal or cv_broadcast on the same
189  * condition variable will resume the thread.  The mutex is released before
190  * sleeping and will be held on return.  It is recommended that the mutex be
191  * held when cv_signal or cv_broadcast are called.
192  */
193 void
194 cv_wait(struct cv *cvp, struct mtx *mp)
195 {
196 	struct thread *td;
197 	WITNESS_SAVE_DECL(mp);
198 
199 	td = curthread;
200 #ifdef KTRACE
201 	if (KTRPOINT(td, KTR_CSW))
202 		ktrcsw(1, 0);
203 #endif
204 	CV_ASSERT(cvp, mp, td);
205 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mp->mtx_object,
206 	    "Waiting on \"%s\"", cvp->cv_description);
207 	WITNESS_SAVE(&mp->mtx_object, mp);
208 
209 	if (cold ) {
210 		/*
211 		 * During autoconfiguration, just give interrupts
212 		 * a chance, then just return.  Don't run any other
213 		 * thread or panic below, in case this is the idle
214 		 * process and already asleep.
215 		 */
216 		return;
217 	}
218 
219 	mtx_lock_spin(&sched_lock);
220 
221 	CV_WAIT_VALIDATE(cvp, mp);
222 
223 	DROP_GIANT();
224 	mtx_unlock(mp);
225 
226 	cv_waitq_add(cvp, td);
227 	cv_switch(td);
228 
229 	mtx_unlock_spin(&sched_lock);
230 #ifdef KTRACE
231 	if (KTRPOINT(td, KTR_CSW))
232 		ktrcsw(0, 0);
233 #endif
234 	PICKUP_GIANT();
235 	mtx_lock(mp);
236 	WITNESS_RESTORE(&mp->mtx_object, mp);
237 }
238 
239 /*
240  * Wait on a condition variable, allowing interruption by signals.  Return 0 if
241  * the thread was resumed with cv_signal or cv_broadcast, EINTR or ERESTART if
242  * a signal was caught.  If ERESTART is returned the system call should be
243  * restarted if possible.
244  */
245 int
246 cv_wait_sig(struct cv *cvp, struct mtx *mp)
247 {
248 	struct thread *td;
249 	struct proc *p;
250 	int rval;
251 	int sig;
252 	WITNESS_SAVE_DECL(mp);
253 
254 	td = curthread;
255 	p = td->td_proc;
256 	rval = 0;
257 #ifdef KTRACE
258 	if (KTRPOINT(td, KTR_CSW))
259 		ktrcsw(1, 0);
260 #endif
261 	CV_ASSERT(cvp, mp, td);
262 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mp->mtx_object,
263 	    "Waiting on \"%s\"", cvp->cv_description);
264 	WITNESS_SAVE(&mp->mtx_object, mp);
265 
266 	if (cold || panicstr) {
267 		/*
268 		 * After a panic, or during autoconfiguration, just give
269 		 * interrupts a chance, then just return; don't run any other
270 		 * procs or panic below, in case this is the idle process and
271 		 * already asleep.
272 		 */
273 		return 0;
274 	}
275 
276 	mtx_lock_spin(&sched_lock);
277 
278 	CV_WAIT_VALIDATE(cvp, mp);
279 
280 	DROP_GIANT();
281 	mtx_unlock(mp);
282 
283 	cv_waitq_add(cvp, td);
284 	sig = cv_switch_catch(td);
285 
286 	mtx_unlock_spin(&sched_lock);
287 
288 	PROC_LOCK(p);
289 	mtx_lock(&p->p_sigacts->ps_mtx);
290 	if (sig == 0) {
291 		sig = cursig(td);	/* XXXKSE */
292 		if (sig == 0 && td->td_flags & TDF_INTERRUPT)
293 			rval = td->td_intrval;
294 	}
295 	if (sig != 0) {
296 		if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
297 			rval = EINTR;
298 		else
299 			rval = ERESTART;
300 	}
301 	mtx_unlock(&p->p_sigacts->ps_mtx);
302 	if (p->p_flag & P_WEXIT)
303 		rval = EINTR;
304 	PROC_UNLOCK(p);
305 
306 #ifdef KTRACE
307 	if (KTRPOINT(td, KTR_CSW))
308 		ktrcsw(0, 0);
309 #endif
310 	PICKUP_GIANT();
311 	mtx_lock(mp);
312 	WITNESS_RESTORE(&mp->mtx_object, mp);
313 
314 	return (rval);
315 }
316 
317 /*
318  * Wait on a condition variable for at most timo/hz seconds.  Returns 0 if the
319  * process was resumed by cv_signal or cv_broadcast, EWOULDBLOCK if the timeout
320  * expires.
321  */
322 int
323 cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
324 {
325 	struct thread *td;
326 	int rval;
327 	WITNESS_SAVE_DECL(mp);
328 
329 	td = curthread;
330 	rval = 0;
331 #ifdef KTRACE
332 	if (KTRPOINT(td, KTR_CSW))
333 		ktrcsw(1, 0);
334 #endif
335 	CV_ASSERT(cvp, mp, td);
336 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mp->mtx_object,
337 	    "Waiting on \"%s\"", cvp->cv_description);
338 	WITNESS_SAVE(&mp->mtx_object, mp);
339 
340 	if (cold || panicstr) {
341 		/*
342 		 * After a panic, or during autoconfiguration, just give
343 		 * interrupts a chance, then just return; don't run any other
344 		 * thread or panic below, in case this is the idle process and
345 		 * already asleep.
346 		 */
347 		return 0;
348 	}
349 
350 	mtx_lock_spin(&sched_lock);
351 
352 	CV_WAIT_VALIDATE(cvp, mp);
353 
354 	DROP_GIANT();
355 	mtx_unlock(mp);
356 
357 	cv_waitq_add(cvp, td);
358 	callout_reset(&td->td_slpcallout, timo, cv_timedwait_end, td);
359 	cv_switch(td);
360 
361 	if (td->td_flags & TDF_TIMEOUT) {
362 		td->td_flags &= ~TDF_TIMEOUT;
363 		rval = EWOULDBLOCK;
364 	} else if (td->td_flags & TDF_TIMOFAIL)
365 		td->td_flags &= ~TDF_TIMOFAIL;
366 	else if (callout_stop(&td->td_slpcallout) == 0) {
367 		/*
368 		 * Work around race with cv_timedwait_end similar to that
369 		 * between msleep and endtsleep.
370 		 * Go back to sleep.
371 		 */
372 		TD_SET_SLEEPING(td);
373 		td->td_proc->p_stats->p_ru.ru_nivcsw++;
374 		mi_switch();
375 		td->td_flags &= ~TDF_TIMOFAIL;
376 	}
377 
378 	mtx_unlock_spin(&sched_lock);
379 #ifdef KTRACE
380 	if (KTRPOINT(td, KTR_CSW))
381 		ktrcsw(0, 0);
382 #endif
383 	PICKUP_GIANT();
384 	mtx_lock(mp);
385 	WITNESS_RESTORE(&mp->mtx_object, mp);
386 
387 	return (rval);
388 }
389 
390 /*
391  * Wait on a condition variable for at most timo/hz seconds, allowing
392  * interruption by signals.  Returns 0 if the thread was resumed by cv_signal
393  * or cv_broadcast, EWOULDBLOCK if the timeout expires, and EINTR or ERESTART if
394  * a signal was caught.
395  */
396 int
397 cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
398 {
399 	struct thread *td;
400 	struct proc *p;
401 	int rval;
402 	int sig;
403 	WITNESS_SAVE_DECL(mp);
404 
405 	td = curthread;
406 	p = td->td_proc;
407 	rval = 0;
408 #ifdef KTRACE
409 	if (KTRPOINT(td, KTR_CSW))
410 		ktrcsw(1, 0);
411 #endif
412 	CV_ASSERT(cvp, mp, td);
413 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mp->mtx_object,
414 	    "Waiting on \"%s\"", cvp->cv_description);
415 	WITNESS_SAVE(&mp->mtx_object, mp);
416 
417 	if (cold || panicstr) {
418 		/*
419 		 * After a panic, or during autoconfiguration, just give
420 		 * interrupts a chance, then just return; don't run any other
421 		 * thread or panic below, in case this is the idle process and
422 		 * already asleep.
423 		 */
424 		return 0;
425 	}
426 
427 	mtx_lock_spin(&sched_lock);
428 
429 	CV_WAIT_VALIDATE(cvp, mp);
430 
431 	DROP_GIANT();
432 	mtx_unlock(mp);
433 
434 	cv_waitq_add(cvp, td);
435 	callout_reset(&td->td_slpcallout, timo, cv_timedwait_end, td);
436 	sig = cv_switch_catch(td);
437 
438 	if (td->td_flags & TDF_TIMEOUT) {
439 		td->td_flags &= ~TDF_TIMEOUT;
440 		rval = EWOULDBLOCK;
441 	} else if (td->td_flags & TDF_TIMOFAIL)
442 		td->td_flags &= ~TDF_TIMOFAIL;
443 	else if (callout_stop(&td->td_slpcallout) == 0) {
444 		/*
445 		 * Work around race with cv_timedwait_end similar to that
446 		 * between msleep and endtsleep.
447 		 * Go back to sleep.
448 		 */
449 		TD_SET_SLEEPING(td);
450 		td->td_proc->p_stats->p_ru.ru_nivcsw++;
451 		mi_switch();
452 		td->td_flags &= ~TDF_TIMOFAIL;
453 	}
454 	mtx_unlock_spin(&sched_lock);
455 
456 	PROC_LOCK(p);
457 	mtx_lock(&p->p_sigacts->ps_mtx);
458 	if (sig == 0) {
459 		sig = cursig(td);
460 		if (sig == 0 && td->td_flags & TDF_INTERRUPT)
461 			rval = td->td_intrval;
462 	}
463 	if (sig != 0) {
464 		if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
465 			rval = EINTR;
466 		else
467 			rval = ERESTART;
468 	}
469 	mtx_unlock(&p->p_sigacts->ps_mtx);
470 	if (p->p_flag & P_WEXIT)
471 		rval = EINTR;
472 	PROC_UNLOCK(p);
473 
474 #ifdef KTRACE
475 	if (KTRPOINT(td, KTR_CSW))
476 		ktrcsw(0, 0);
477 #endif
478 	PICKUP_GIANT();
479 	mtx_lock(mp);
480 	WITNESS_RESTORE(&mp->mtx_object, mp);
481 
482 	return (rval);
483 }
484 
485 /*
486  * Common code for signal and broadcast.  Assumes waitq is not empty.  Must be
487  * called with sched_lock held.
488  */
489 static __inline void
490 cv_wakeup(struct cv *cvp)
491 {
492 	struct thread *td;
493 
494 	mtx_assert(&sched_lock, MA_OWNED);
495 	td = TAILQ_FIRST(&cvp->cv_waitq);
496 	KASSERT(td->td_wchan == cvp, ("%s: bogus wchan", __func__));
497 	KASSERT(td->td_flags & TDF_CVWAITQ, ("%s: not on waitq", __func__));
498 	cv_waitq_remove(td);
499 	TD_CLR_SLEEPING(td);
500 	setrunnable(td);
501 }
502 
503 /*
504  * Signal a condition variable, wakes up one waiting thread.  Will also wakeup
505  * the swapper if the process is not in memory, so that it can bring the
506  * sleeping process in.  Note that this may also result in additional threads
507  * being made runnable.  Should be called with the same mutex as was passed to
508  * cv_wait held.
509  */
510 void
511 cv_signal(struct cv *cvp)
512 {
513 
514 	KASSERT(cvp != NULL, ("%s: cvp NULL", __func__));
515 	mtx_lock_spin(&sched_lock);
516 	if (!TAILQ_EMPTY(&cvp->cv_waitq)) {
517 		CV_SIGNAL_VALIDATE(cvp);
518 		cv_wakeup(cvp);
519 	}
520 	mtx_unlock_spin(&sched_lock);
521 }
522 
523 /*
524  * Broadcast a signal to a condition variable.  Wakes up all waiting threads.
525  * Should be called with the same mutex as was passed to cv_wait held.
526  */
527 void
528 cv_broadcast(struct cv *cvp)
529 {
530 
531 	KASSERT(cvp != NULL, ("%s: cvp NULL", __func__));
532 	mtx_lock_spin(&sched_lock);
533 	CV_SIGNAL_VALIDATE(cvp);
534 	while (!TAILQ_EMPTY(&cvp->cv_waitq))
535 		cv_wakeup(cvp);
536 	mtx_unlock_spin(&sched_lock);
537 }
538 
539 /*
540  * Remove a thread from the wait queue of its condition variable.  This may be
541  * called externally.
542  */
543 void
544 cv_waitq_remove(struct thread *td)
545 {
546 	struct cv *cvp;
547 
548 	mtx_assert(&sched_lock, MA_OWNED);
549 	if ((cvp = td->td_wchan) != NULL && td->td_flags & TDF_CVWAITQ) {
550 		TAILQ_REMOVE(&cvp->cv_waitq, td, td_slpq);
551 		td->td_flags &= ~TDF_CVWAITQ;
552 		td->td_wmesg = NULL;
553 		TD_CLR_ON_SLEEPQ(td);
554 	}
555 }
556 
557 /*
558  * Timeout function for cv_timedwait.  Put the thread on the runqueue and set
559  * its timeout flag.
560  */
561 static void
562 cv_timedwait_end(void *arg)
563 {
564 	struct thread *td;
565 
566 	td = arg;
567 	CTR3(KTR_PROC, "cv_timedwait_end: thread %p (pid %d, %s)",
568 	    td, td->td_proc->p_pid, td->td_proc->p_comm);
569 	mtx_lock_spin(&sched_lock);
570 	if (TD_ON_SLEEPQ(td)) {
571 		cv_waitq_remove(td);
572 		td->td_flags |= TDF_TIMEOUT;
573 	} else {
574 		td->td_flags |= TDF_TIMOFAIL;
575 	}
576 	TD_CLR_SLEEPING(td);
577 	setrunnable(td);
578 	mtx_unlock_spin(&sched_lock);
579 }
580 
581 /*
582  * For now only abort interruptable waits.
583  * The others will have to either complete on their own or have a timeout.
584  */
585 void
586 cv_abort(struct thread *td)
587 {
588 
589 	CTR3(KTR_PROC, "cv_abort: thread %p (pid %d, %s)", td,
590 	    td->td_proc->p_pid, td->td_proc->p_comm);
591 	mtx_lock_spin(&sched_lock);
592 	if ((td->td_flags & (TDF_SINTR|TDF_TIMEOUT)) == TDF_SINTR) {
593 		if (TD_ON_SLEEPQ(td)) {
594 			cv_waitq_remove(td);
595 		}
596 		TD_CLR_SLEEPING(td);
597 		setrunnable(td);
598 	}
599 	mtx_unlock_spin(&sched_lock);
600 }
601 
602