xref: /freebsd/sys/kern/kern_timeout.c (revision ff7ec58af8053e0778288eb615f1b4abf428361b)
1df8bae1dSRodney W. Grimes /*-
2df8bae1dSRodney W. Grimes  * Copyright (c) 1982, 1986, 1991, 1993
3df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4df8bae1dSRodney W. Grimes  * (c) UNIX System Laboratories, Inc.
5df8bae1dSRodney W. Grimes  * All or some portions of this file are derived from material licensed
6df8bae1dSRodney W. Grimes  * to the University of California by American Telephone and Telegraph
7df8bae1dSRodney W. Grimes  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8df8bae1dSRodney W. Grimes  * the permission of UNIX System Laboratories, Inc.
9df8bae1dSRodney W. Grimes  *
10df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
11df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
12df8bae1dSRodney W. Grimes  * are met:
13df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
15df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
17df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
18df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
19df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
20df8bae1dSRodney W. Grimes  *    without specific prior written permission.
21df8bae1dSRodney W. Grimes  *
22df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
33df8bae1dSRodney W. Grimes  *
34acc8326dSGarrett Wollman  *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
35df8bae1dSRodney W. Grimes  */
36df8bae1dSRodney W. Grimes 
37677b542eSDavid E. O'Brien #include <sys/cdefs.h>
38677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
39677b542eSDavid E. O'Brien 
40df8bae1dSRodney W. Grimes #include <sys/param.h>
41df8bae1dSRodney W. Grimes #include <sys/systm.h>
4215b7a470SPoul-Henning Kamp #include <sys/callout.h>
432c1bb207SColin Percival #include <sys/condvar.h>
44df8bae1dSRodney W. Grimes #include <sys/kernel.h>
45ff7ec58aSRobert Watson #include <sys/ktr.h>
46f34fa851SJohn Baldwin #include <sys/lock.h>
47cb799bfeSJohn Baldwin #include <sys/mutex.h>
4822ee8c4fSPoul-Henning Kamp #include <sys/sysctl.h>
49df8bae1dSRodney W. Grimes 
5022ee8c4fSPoul-Henning Kamp static int avg_depth;
5122ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
5222ee8c4fSPoul-Henning Kamp     "Average number of items examined per softclock call. Units = 1/1000");
5322ee8c4fSPoul-Henning Kamp static int avg_gcalls;
5422ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
5522ee8c4fSPoul-Henning Kamp     "Average number of Giant callouts made per softclock call. Units = 1/1000");
5622ee8c4fSPoul-Henning Kamp static int avg_mpcalls;
5722ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
5822ee8c4fSPoul-Henning Kamp     "Average number of MP callouts made per softclock call. Units = 1/1000");
5915b7a470SPoul-Henning Kamp /*
6015b7a470SPoul-Henning Kamp  * TODO:
6115b7a470SPoul-Henning Kamp  *	allocate more timeout table slots when table overflows.
6215b7a470SPoul-Henning Kamp  */
6315b7a470SPoul-Henning Kamp 
6415b7a470SPoul-Henning Kamp /* Exported to machdep.c and/or kern_clock.c.  */
65ab36c067SJustin T. Gibbs struct callout *callout;
66ab36c067SJustin T. Gibbs struct callout_list callfree;
67ab36c067SJustin T. Gibbs int callwheelsize, callwheelbits, callwheelmask;
68ab36c067SJustin T. Gibbs struct callout_tailq *callwheel;
6915b7a470SPoul-Henning Kamp int softticks;			/* Like ticks, but for softclock(). */
70166400b7SPoul-Henning Kamp struct mtx callout_lock;
7148b0f4b6SKirk McKusick #ifdef DIAGNOSTIC
72d87526cfSPoul-Henning Kamp struct mtx dont_sleep_in_callout;
7348b0f4b6SKirk McKusick #endif
74f23b4c91SGarrett Wollman 
75ab36c067SJustin T. Gibbs static struct callout *nextsoftcheck;	/* Next callout to be checked. */
7649a74476SColin Percival 
7749a74476SColin Percival /*-
782c1bb207SColin Percival  * Locked by callout_lock:
792c1bb207SColin Percival  *   curr_callout    - If a callout is in progress, it is curr_callout.
802c1bb207SColin Percival  *                     If curr_callout is non-NULL, threads waiting on
812c1bb207SColin Percival  *                     callout_wait will be woken up as soon as the
822c1bb207SColin Percival  *                     relevant callout completes.
832c1bb207SColin Percival  *   wakeup_ctr      - Incremented every time a thread wants to wait
842c1bb207SColin Percival  *                     for a callout to complete.  Modified only when
852c1bb207SColin Percival  *                     curr_callout is non-NULL.
8649a74476SColin Percival  *   wakeup_needed   - If a thread is waiting on callout_wait, then
8749a74476SColin Percival  *                     wakeup_needed is nonzero.  Increased only when
8849a74476SColin Percival  *                     cutt_callout is non-NULL.
892c1bb207SColin Percival  */
902c1bb207SColin Percival static struct callout *curr_callout;
912c1bb207SColin Percival static int wakeup_ctr;
9249a74476SColin Percival static int wakeup_needed;
9349a74476SColin Percival 
9449a74476SColin Percival /*-
952c1bb207SColin Percival  * Locked by callout_wait_lock:
962c1bb207SColin Percival  *   callout_wait    - If wakeup_needed is set, callout_wait will be
972c1bb207SColin Percival  *                     triggered after the current callout finishes.
982c1bb207SColin Percival  *   wakeup_done_ctr - Set to the current value of wakeup_ctr after
992c1bb207SColin Percival  *                     callout_wait is triggered.
1002c1bb207SColin Percival  */
1012c1bb207SColin Percival static struct mtx callout_wait_lock;
1022c1bb207SColin Percival static struct cv callout_wait;
1032c1bb207SColin Percival static int wakeup_done_ctr;
104df8bae1dSRodney W. Grimes 
105df8bae1dSRodney W. Grimes /*
106219d632cSMatthew Dillon  * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
107219d632cSMatthew Dillon  *
108219d632cSMatthew Dillon  *	This code is called very early in the kernel initialization sequence,
109219d632cSMatthew Dillon  *	and may be called more then once.
110219d632cSMatthew Dillon  */
111219d632cSMatthew Dillon caddr_t
112219d632cSMatthew Dillon kern_timeout_callwheel_alloc(caddr_t v)
113219d632cSMatthew Dillon {
114219d632cSMatthew Dillon 	/*
115219d632cSMatthew Dillon 	 * Calculate callout wheel size
116219d632cSMatthew Dillon 	 */
117219d632cSMatthew Dillon 	for (callwheelsize = 1, callwheelbits = 0;
118219d632cSMatthew Dillon 	     callwheelsize < ncallout;
119219d632cSMatthew Dillon 	     callwheelsize <<= 1, ++callwheelbits)
120219d632cSMatthew Dillon 		;
121219d632cSMatthew Dillon 	callwheelmask = callwheelsize - 1;
122219d632cSMatthew Dillon 
123219d632cSMatthew Dillon 	callout = (struct callout *)v;
124219d632cSMatthew Dillon 	v = (caddr_t)(callout + ncallout);
125219d632cSMatthew Dillon 	callwheel = (struct callout_tailq *)v;
126219d632cSMatthew Dillon 	v = (caddr_t)(callwheel + callwheelsize);
127219d632cSMatthew Dillon 	return(v);
128219d632cSMatthew Dillon }
129219d632cSMatthew Dillon 
130219d632cSMatthew Dillon /*
131219d632cSMatthew Dillon  * kern_timeout_callwheel_init() - initialize previously reserved callwheel
132219d632cSMatthew Dillon  *				   space.
133219d632cSMatthew Dillon  *
134219d632cSMatthew Dillon  *	This code is called just once, after the space reserved for the
135219d632cSMatthew Dillon  *	callout wheel has been finalized.
136219d632cSMatthew Dillon  */
137219d632cSMatthew Dillon void
138219d632cSMatthew Dillon kern_timeout_callwheel_init(void)
139219d632cSMatthew Dillon {
140219d632cSMatthew Dillon 	int i;
141219d632cSMatthew Dillon 
142219d632cSMatthew Dillon 	SLIST_INIT(&callfree);
143219d632cSMatthew Dillon 	for (i = 0; i < ncallout; i++) {
144219d632cSMatthew Dillon 		callout_init(&callout[i], 0);
145219d632cSMatthew Dillon 		callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
146219d632cSMatthew Dillon 		SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
147219d632cSMatthew Dillon 	}
148219d632cSMatthew Dillon 	for (i = 0; i < callwheelsize; i++) {
149219d632cSMatthew Dillon 		TAILQ_INIT(&callwheel[i]);
150219d632cSMatthew Dillon 	}
1516008862bSJohn Baldwin 	mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
15248b0f4b6SKirk McKusick #ifdef DIAGNOSTIC
153d87526cfSPoul-Henning Kamp 	mtx_init(&dont_sleep_in_callout, "dont_sleep_in_callout", NULL, MTX_DEF);
15448b0f4b6SKirk McKusick #endif
1552c1bb207SColin Percival 	mtx_init(&callout_wait_lock, "callout_wait_lock", NULL, MTX_DEF);
1562c1bb207SColin Percival 	cv_init(&callout_wait, "callout_wait");
157219d632cSMatthew Dillon }
158219d632cSMatthew Dillon 
159219d632cSMatthew Dillon /*
160ab36c067SJustin T. Gibbs  * The callout mechanism is based on the work of Adam M. Costello and
161ab36c067SJustin T. Gibbs  * George Varghese, published in a technical report entitled "Redesigning
162ab36c067SJustin T. Gibbs  * the BSD Callout and Timer Facilities" and modified slightly for inclusion
163ab36c067SJustin T. Gibbs  * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
164024035e8SHiten Pandya  * used in this implementation was published by G. Varghese and T. Lauck in
165ab36c067SJustin T. Gibbs  * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
166ab36c067SJustin T. Gibbs  * the Efficient Implementation of a Timer Facility" in the Proceedings of
167ab36c067SJustin T. Gibbs  * the 11th ACM Annual Symposium on Operating Systems Principles,
168ab36c067SJustin T. Gibbs  * Austin, Texas Nov 1987.
169ab36c067SJustin T. Gibbs  */
170a50ec505SPoul-Henning Kamp 
171ab36c067SJustin T. Gibbs /*
172df8bae1dSRodney W. Grimes  * Software (low priority) clock interrupt.
173df8bae1dSRodney W. Grimes  * Run periodic events from timeout queue.
174df8bae1dSRodney W. Grimes  */
175df8bae1dSRodney W. Grimes void
1768088699fSJohn Baldwin softclock(void *dummy)
177df8bae1dSRodney W. Grimes {
178b336df68SPoul-Henning Kamp 	struct callout *c;
179b336df68SPoul-Henning Kamp 	struct callout_tailq *bucket;
180b336df68SPoul-Henning Kamp 	int curticks;
181b336df68SPoul-Henning Kamp 	int steps;	/* #steps since we last allowed interrupts */
18222ee8c4fSPoul-Henning Kamp 	int depth;
18322ee8c4fSPoul-Henning Kamp 	int mpcalls;
18422ee8c4fSPoul-Henning Kamp 	int gcalls;
1852c1bb207SColin Percival 	int wakeup_cookie;
18648b0f4b6SKirk McKusick #ifdef DIAGNOSTIC
18748b0f4b6SKirk McKusick 	struct bintime bt1, bt2;
18848b0f4b6SKirk McKusick 	struct timespec ts2;
18948b0f4b6SKirk McKusick 	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
190377e7be4SPoul-Henning Kamp 	static timeout_t *lastfunc;
19148b0f4b6SKirk McKusick #endif
192df8bae1dSRodney W. Grimes 
19315b7a470SPoul-Henning Kamp #ifndef MAX_SOFTCLOCK_STEPS
19415b7a470SPoul-Henning Kamp #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
19515b7a470SPoul-Henning Kamp #endif /* MAX_SOFTCLOCK_STEPS */
196ab36c067SJustin T. Gibbs 
19722ee8c4fSPoul-Henning Kamp 	mpcalls = 0;
19822ee8c4fSPoul-Henning Kamp 	gcalls = 0;
19922ee8c4fSPoul-Henning Kamp 	depth = 0;
200ab36c067SJustin T. Gibbs 	steps = 0;
2019ed346baSBosko Milekic 	mtx_lock_spin(&callout_lock);
202ab36c067SJustin T. Gibbs 	while (softticks != ticks) {
20345327611SJustin T. Gibbs 		softticks++;
20445327611SJustin T. Gibbs 		/*
20545327611SJustin T. Gibbs 		 * softticks may be modified by hard clock, so cache
20645327611SJustin T. Gibbs 		 * it while we work on a given bucket.
20745327611SJustin T. Gibbs 		 */
20845327611SJustin T. Gibbs 		curticks = softticks;
20945327611SJustin T. Gibbs 		bucket = &callwheel[curticks & callwheelmask];
21045327611SJustin T. Gibbs 		c = TAILQ_FIRST(bucket);
211ab36c067SJustin T. Gibbs 		while (c) {
21222ee8c4fSPoul-Henning Kamp 			depth++;
21345327611SJustin T. Gibbs 			if (c->c_time != curticks) {
214ab36c067SJustin T. Gibbs 				c = TAILQ_NEXT(c, c_links.tqe);
215ab36c067SJustin T. Gibbs 				++steps;
216ab36c067SJustin T. Gibbs 				if (steps >= MAX_SOFTCLOCK_STEPS) {
217ab36c067SJustin T. Gibbs 					nextsoftcheck = c;
21845327611SJustin T. Gibbs 					/* Give interrupts a chance. */
2199ed346baSBosko Milekic 					mtx_unlock_spin(&callout_lock);
220ab32297dSJohn Baldwin 					;	/* nothing */
2219ed346baSBosko Milekic 					mtx_lock_spin(&callout_lock);
222ab36c067SJustin T. Gibbs 					c = nextsoftcheck;
223ab36c067SJustin T. Gibbs 					steps = 0;
224df8bae1dSRodney W. Grimes 				}
225ab36c067SJustin T. Gibbs 			} else {
226ab36c067SJustin T. Gibbs 				void (*c_func)(void *);
227ab36c067SJustin T. Gibbs 				void *c_arg;
228fa2fbc3dSJake Burkholder 				int c_flags;
229ab36c067SJustin T. Gibbs 
230ab36c067SJustin T. Gibbs 				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
23145327611SJustin T. Gibbs 				TAILQ_REMOVE(bucket, c, c_links.tqe);
232ab36c067SJustin T. Gibbs 				c_func = c->c_func;
233ab36c067SJustin T. Gibbs 				c_arg = c->c_arg;
234fa2fbc3dSJake Burkholder 				c_flags = c->c_flags;
235ab36c067SJustin T. Gibbs 				c->c_func = NULL;
236acc8326dSGarrett Wollman 				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
237acc8326dSGarrett Wollman 					c->c_flags = CALLOUT_LOCAL_ALLOC;
238acc8326dSGarrett Wollman 					SLIST_INSERT_HEAD(&callfree, c,
239acc8326dSGarrett Wollman 							  c_links.sle);
240acc8326dSGarrett Wollman 				} else {
241acc8326dSGarrett Wollman 					c->c_flags =
2429b8b58e0SJonathan Lemon 					    (c->c_flags & ~CALLOUT_PENDING);
243acc8326dSGarrett Wollman 				}
2442c1bb207SColin Percival 				curr_callout = c;
2459ed346baSBosko Milekic 				mtx_unlock_spin(&callout_lock);
24622ee8c4fSPoul-Henning Kamp 				if (!(c_flags & CALLOUT_MPSAFE)) {
2479ed346baSBosko Milekic 					mtx_lock(&Giant);
24822ee8c4fSPoul-Henning Kamp 					gcalls++;
249ff7ec58aSRobert Watson 					CTR1(KTR_CALLOUT, "callout %p", c_func);
25022ee8c4fSPoul-Henning Kamp 				} else {
25122ee8c4fSPoul-Henning Kamp 					mpcalls++;
252ff7ec58aSRobert Watson 					CTR1(KTR_CALLOUT, "callout mpsafe %p",
253ff7ec58aSRobert Watson 					    c_func);
25422ee8c4fSPoul-Henning Kamp 				}
25548b0f4b6SKirk McKusick #ifdef DIAGNOSTIC
25648b0f4b6SKirk McKusick 				binuptime(&bt1);
257d87526cfSPoul-Henning Kamp 				mtx_lock(&dont_sleep_in_callout);
25848b0f4b6SKirk McKusick #endif
259ab36c067SJustin T. Gibbs 				c_func(c_arg);
26048b0f4b6SKirk McKusick #ifdef DIAGNOSTIC
261d87526cfSPoul-Henning Kamp 				mtx_unlock(&dont_sleep_in_callout);
26248b0f4b6SKirk McKusick 				binuptime(&bt2);
26348b0f4b6SKirk McKusick 				bintime_sub(&bt2, &bt1);
26448b0f4b6SKirk McKusick 				if (bt2.frac > maxdt) {
265377e7be4SPoul-Henning Kamp 					if (lastfunc != c_func ||
266377e7be4SPoul-Henning Kamp 					    bt2.frac > maxdt * 2) {
26748b0f4b6SKirk McKusick 						bintime2timespec(&bt2, &ts2);
26848b0f4b6SKirk McKusick 						printf(
269377e7be4SPoul-Henning Kamp 			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
27048b0f4b6SKirk McKusick 						    c_func, c_arg,
271377e7be4SPoul-Henning Kamp 						    (intmax_t)ts2.tv_sec,
272377e7be4SPoul-Henning Kamp 						    ts2.tv_nsec);
273377e7be4SPoul-Henning Kamp 					}
274377e7be4SPoul-Henning Kamp 					maxdt = bt2.frac;
275377e7be4SPoul-Henning Kamp 					lastfunc = c_func;
27648b0f4b6SKirk McKusick 				}
27748b0f4b6SKirk McKusick #endif
278fa2fbc3dSJake Burkholder 				if (!(c_flags & CALLOUT_MPSAFE))
2799ed346baSBosko Milekic 					mtx_unlock(&Giant);
2809ed346baSBosko Milekic 				mtx_lock_spin(&callout_lock);
2812c1bb207SColin Percival 				curr_callout = NULL;
2822c1bb207SColin Percival 				if (wakeup_needed) {
2832c1bb207SColin Percival 					/*
2842c1bb207SColin Percival 					 * There might be someone waiting
2852c1bb207SColin Percival 					 * for the callout to complete.
2862c1bb207SColin Percival 					 */
2872c1bb207SColin Percival 					wakeup_cookie = wakeup_ctr;
2882c1bb207SColin Percival 					mtx_unlock_spin(&callout_lock);
2892c1bb207SColin Percival 					mtx_lock(&callout_wait_lock);
2902c1bb207SColin Percival 					cv_broadcast(&callout_wait);
2912c1bb207SColin Percival 					wakeup_done_ctr = wakeup_cookie;
2922c1bb207SColin Percival 					mtx_unlock(&callout_wait_lock);
2932c1bb207SColin Percival 					mtx_lock_spin(&callout_lock);
2942c1bb207SColin Percival 					wakeup_needed = 0;
29549a74476SColin Percival 				}
296ab36c067SJustin T. Gibbs 				steps = 0;
297ab36c067SJustin T. Gibbs 				c = nextsoftcheck;
298ab36c067SJustin T. Gibbs 			}
299ab36c067SJustin T. Gibbs 		}
300ab36c067SJustin T. Gibbs 	}
30122ee8c4fSPoul-Henning Kamp 	avg_depth += (depth * 1000 - avg_depth) >> 8;
30222ee8c4fSPoul-Henning Kamp 	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
30322ee8c4fSPoul-Henning Kamp 	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
304ab36c067SJustin T. Gibbs 	nextsoftcheck = NULL;
3059ed346baSBosko Milekic 	mtx_unlock_spin(&callout_lock);
306df8bae1dSRodney W. Grimes }
307df8bae1dSRodney W. Grimes 
308df8bae1dSRodney W. Grimes /*
309df8bae1dSRodney W. Grimes  * timeout --
310df8bae1dSRodney W. Grimes  *	Execute a function after a specified length of time.
311df8bae1dSRodney W. Grimes  *
312df8bae1dSRodney W. Grimes  * untimeout --
313df8bae1dSRodney W. Grimes  *	Cancel previous timeout function call.
314df8bae1dSRodney W. Grimes  *
315ab36c067SJustin T. Gibbs  * callout_handle_init --
316ab36c067SJustin T. Gibbs  *	Initialize a handle so that using it with untimeout is benign.
317ab36c067SJustin T. Gibbs  *
318df8bae1dSRodney W. Grimes  *	See AT&T BCI Driver Reference Manual for specification.  This
319ab36c067SJustin T. Gibbs  *	implementation differs from that one in that although an
320ab36c067SJustin T. Gibbs  *	identification value is returned from timeout, the original
321ab36c067SJustin T. Gibbs  *	arguments to timeout as well as the identifier are used to
322ab36c067SJustin T. Gibbs  *	identify entries for untimeout.
323df8bae1dSRodney W. Grimes  */
324ab36c067SJustin T. Gibbs struct callout_handle
325ab36c067SJustin T. Gibbs timeout(ftn, arg, to_ticks)
3268f03c6f1SBruce Evans 	timeout_t *ftn;
327df8bae1dSRodney W. Grimes 	void *arg;
328e82ac18eSJonathan Lemon 	int to_ticks;
329df8bae1dSRodney W. Grimes {
330ab36c067SJustin T. Gibbs 	struct callout *new;
331ab36c067SJustin T. Gibbs 	struct callout_handle handle;
332df8bae1dSRodney W. Grimes 
3339ed346baSBosko Milekic 	mtx_lock_spin(&callout_lock);
334df8bae1dSRodney W. Grimes 
335df8bae1dSRodney W. Grimes 	/* Fill in the next free callout structure. */
336ab36c067SJustin T. Gibbs 	new = SLIST_FIRST(&callfree);
337ab36c067SJustin T. Gibbs 	if (new == NULL)
338ab36c067SJustin T. Gibbs 		/* XXX Attempt to malloc first */
339df8bae1dSRodney W. Grimes 		panic("timeout table full");
340ab36c067SJustin T. Gibbs 	SLIST_REMOVE_HEAD(&callfree, c_links.sle);
341df8bae1dSRodney W. Grimes 
342acc8326dSGarrett Wollman 	callout_reset(new, to_ticks, ftn, arg);
343acc8326dSGarrett Wollman 
344ab36c067SJustin T. Gibbs 	handle.callout = new;
3459ed346baSBosko Milekic 	mtx_unlock_spin(&callout_lock);
346ab36c067SJustin T. Gibbs 	return (handle);
347df8bae1dSRodney W. Grimes }
348df8bae1dSRodney W. Grimes 
349df8bae1dSRodney W. Grimes void
350ab36c067SJustin T. Gibbs untimeout(ftn, arg, handle)
3518f03c6f1SBruce Evans 	timeout_t *ftn;
352df8bae1dSRodney W. Grimes 	void *arg;
353ab36c067SJustin T. Gibbs 	struct callout_handle handle;
354df8bae1dSRodney W. Grimes {
355df8bae1dSRodney W. Grimes 
356ab36c067SJustin T. Gibbs 	/*
357ab36c067SJustin T. Gibbs 	 * Check for a handle that was initialized
358ab36c067SJustin T. Gibbs 	 * by callout_handle_init, but never used
359ab36c067SJustin T. Gibbs 	 * for a real timeout.
360ab36c067SJustin T. Gibbs 	 */
361ab36c067SJustin T. Gibbs 	if (handle.callout == NULL)
362ab36c067SJustin T. Gibbs 		return;
363df8bae1dSRodney W. Grimes 
3649ed346baSBosko Milekic 	mtx_lock_spin(&callout_lock);
365acc8326dSGarrett Wollman 	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
366acc8326dSGarrett Wollman 		callout_stop(handle.callout);
3679ed346baSBosko Milekic 	mtx_unlock_spin(&callout_lock);
368df8bae1dSRodney W. Grimes }
369df8bae1dSRodney W. Grimes 
3703c816944SBruce Evans void
371ab36c067SJustin T. Gibbs callout_handle_init(struct callout_handle *handle)
372ab36c067SJustin T. Gibbs {
373ab36c067SJustin T. Gibbs 	handle->callout = NULL;
374ab36c067SJustin T. Gibbs }
375ab36c067SJustin T. Gibbs 
376acc8326dSGarrett Wollman /*
377acc8326dSGarrett Wollman  * New interface; clients allocate their own callout structures.
378acc8326dSGarrett Wollman  *
379acc8326dSGarrett Wollman  * callout_reset() - establish or change a timeout
380acc8326dSGarrett Wollman  * callout_stop() - disestablish a timeout
381acc8326dSGarrett Wollman  * callout_init() - initialize a callout structure so that it can
382acc8326dSGarrett Wollman  *	safely be passed to callout_reset() and callout_stop()
383acc8326dSGarrett Wollman  *
3849b8b58e0SJonathan Lemon  * <sys/callout.h> defines three convenience macros:
385acc8326dSGarrett Wollman  *
3869b8b58e0SJonathan Lemon  * callout_active() - returns truth if callout has not been serviced
3879b8b58e0SJonathan Lemon  * callout_pending() - returns truth if callout is still waiting for timeout
3889b8b58e0SJonathan Lemon  * callout_deactivate() - marks the callout as having been serviced
389acc8326dSGarrett Wollman  */
390acc8326dSGarrett Wollman void
391e82ac18eSJonathan Lemon callout_reset(c, to_ticks, ftn, arg)
392acc8326dSGarrett Wollman 	struct	callout *c;
393acc8326dSGarrett Wollman 	int	to_ticks;
3944d77a549SAlfred Perlstein 	void	(*ftn)(void *);
395acc8326dSGarrett Wollman 	void	*arg;
396acc8326dSGarrett Wollman {
397acc8326dSGarrett Wollman 
3989ed346baSBosko Milekic 	mtx_lock_spin(&callout_lock);
3992c1bb207SColin Percival 	if (c == curr_callout && wakeup_needed) {
4002c1bb207SColin Percival 		/*
4012c1bb207SColin Percival 		 * We're being asked to reschedule a callout which is
4022c1bb207SColin Percival 		 * currently in progress, and someone has called
4032c1bb207SColin Percival 		 * callout_drain to kill that callout.  Don't reschedule.
4042c1bb207SColin Percival 		 */
4052c1bb207SColin Percival 		mtx_unlock_spin(&callout_lock);
4062c1bb207SColin Percival 		return;
40749a74476SColin Percival 	}
4080413bacdSColin Percival 	if (c->c_flags & CALLOUT_PENDING) {
4090413bacdSColin Percival 		if (nextsoftcheck == c) {
4100413bacdSColin Percival 			nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
4110413bacdSColin Percival 		}
4120413bacdSColin Percival 		TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c,
4130413bacdSColin Percival 		    c_links.tqe);
4140413bacdSColin Percival 
4150413bacdSColin Percival 		/*
4160413bacdSColin Percival 		 * Part of the normal "stop a pending callout" process
4170413bacdSColin Percival 		 * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING
4180413bacdSColin Percival 		 * flags.  We're not going to bother doing that here,
4190413bacdSColin Percival 		 * because we're going to be setting those flags ten lines
4200413bacdSColin Percival 		 * after this point, and we're holding callout_lock
4210413bacdSColin Percival 		 * between now and then.
4220413bacdSColin Percival 		 */
4230413bacdSColin Percival 	}
424acc8326dSGarrett Wollman 
425acc8326dSGarrett Wollman 	/*
426ab32297dSJohn Baldwin 	 * We could unlock callout_lock here and lock it again before the
427ab32297dSJohn Baldwin 	 * TAILQ_INSERT_TAIL, but there's no point since doing this setup
428ab32297dSJohn Baldwin 	 * doesn't take much time.
429acc8326dSGarrett Wollman 	 */
430acc8326dSGarrett Wollman 	if (to_ticks <= 0)
431acc8326dSGarrett Wollman 		to_ticks = 1;
432acc8326dSGarrett Wollman 
433acc8326dSGarrett Wollman 	c->c_arg = arg;
434e82ac18eSJonathan Lemon 	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
435acc8326dSGarrett Wollman 	c->c_func = ftn;
436acc8326dSGarrett Wollman 	c->c_time = ticks + to_ticks;
437acc8326dSGarrett Wollman 	TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
438acc8326dSGarrett Wollman 			  c, c_links.tqe);
4399ed346baSBosko Milekic 	mtx_unlock_spin(&callout_lock);
440acc8326dSGarrett Wollman }
441acc8326dSGarrett Wollman 
4422c1bb207SColin Percival int
4432c1bb207SColin Percival _callout_stop_safe(c, safe)
4442c1bb207SColin Percival 	struct	callout *c;
4452c1bb207SColin Percival 	int	safe;
4462c1bb207SColin Percival {
4472c1bb207SColin Percival 	int wakeup_cookie;
4482c1bb207SColin Percival 
4499ed346baSBosko Milekic 	mtx_lock_spin(&callout_lock);
450acc8326dSGarrett Wollman 	/*
451acc8326dSGarrett Wollman 	 * Don't attempt to delete a callout that's not on the queue.
452acc8326dSGarrett Wollman 	 */
453acc8326dSGarrett Wollman 	if (!(c->c_flags & CALLOUT_PENDING)) {
4549b8b58e0SJonathan Lemon 		c->c_flags &= ~CALLOUT_ACTIVE;
4552c1bb207SColin Percival 		if (c == curr_callout && safe) {
45649a74476SColin Percival 			/* We need to wait until the callout is finished. */
4572c1bb207SColin Percival 			wakeup_needed = 1;
4582c1bb207SColin Percival 			wakeup_cookie = wakeup_ctr++;
4592c1bb207SColin Percival 			mtx_unlock_spin(&callout_lock);
4602c1bb207SColin Percival 			mtx_lock(&callout_wait_lock);
46149a74476SColin Percival 
4622c1bb207SColin Percival 			/*
4632c1bb207SColin Percival 			 * Check to make sure that softclock() didn't
4642c1bb207SColin Percival 			 * do the wakeup in between our dropping
4652c1bb207SColin Percival 			 * callout_lock and picking up callout_wait_lock
4662c1bb207SColin Percival 			 */
4672c1bb207SColin Percival 			if (wakeup_cookie - wakeup_done_ctr > 0)
4682c1bb207SColin Percival 				cv_wait(&callout_wait, &callout_wait_lock);
4692c1bb207SColin Percival 
4702c1bb207SColin Percival 			mtx_unlock(&callout_wait_lock);
4712c1bb207SColin Percival 		} else
4729ed346baSBosko Milekic 			mtx_unlock_spin(&callout_lock);
473a45982d2SJohn Baldwin 		return (0);
474acc8326dSGarrett Wollman 	}
4759b8b58e0SJonathan Lemon 	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
476acc8326dSGarrett Wollman 
477acc8326dSGarrett Wollman 	if (nextsoftcheck == c) {
478acc8326dSGarrett Wollman 		nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
479acc8326dSGarrett Wollman 	}
480acc8326dSGarrett Wollman 	TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe);
481acc8326dSGarrett Wollman 	c->c_func = NULL;
482acc8326dSGarrett Wollman 
483acc8326dSGarrett Wollman 	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
484acc8326dSGarrett Wollman 		SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
485acc8326dSGarrett Wollman 	}
4869ed346baSBosko Milekic 	mtx_unlock_spin(&callout_lock);
487a45982d2SJohn Baldwin 	return (1);
488acc8326dSGarrett Wollman }
489acc8326dSGarrett Wollman 
490acc8326dSGarrett Wollman void
491e82ac18eSJonathan Lemon callout_init(c, mpsafe)
492acc8326dSGarrett Wollman 	struct	callout *c;
493e82ac18eSJonathan Lemon 	int mpsafe;
494acc8326dSGarrett Wollman {
4957347e1c6SGarrett Wollman 	bzero(c, sizeof *c);
496e82ac18eSJonathan Lemon 	if (mpsafe)
497e82ac18eSJonathan Lemon 		c->c_flags |= CALLOUT_MPSAFE;
498acc8326dSGarrett Wollman }
499acc8326dSGarrett Wollman 
500e1d6dc65SNate Williams #ifdef APM_FIXUP_CALLTODO
501e1d6dc65SNate Williams /*
502e1d6dc65SNate Williams  * Adjust the kernel calltodo timeout list.  This routine is used after
503e1d6dc65SNate Williams  * an APM resume to recalculate the calltodo timer list values with the
504e1d6dc65SNate Williams  * number of hz's we have been sleeping.  The next hardclock() will detect
505e1d6dc65SNate Williams  * that there are fired timers and run softclock() to execute them.
506e1d6dc65SNate Williams  *
507e1d6dc65SNate Williams  * Please note, I have not done an exhaustive analysis of what code this
508e1d6dc65SNate Williams  * might break.  I am motivated to have my select()'s and alarm()'s that
509e1d6dc65SNate Williams  * have expired during suspend firing upon resume so that the applications
510e1d6dc65SNate Williams  * which set the timer can do the maintanence the timer was for as close
511e1d6dc65SNate Williams  * as possible to the originally intended time.  Testing this code for a
512e1d6dc65SNate Williams  * week showed that resuming from a suspend resulted in 22 to 25 timers
513e1d6dc65SNate Williams  * firing, which seemed independant on whether the suspend was 2 hours or
514e1d6dc65SNate Williams  * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
515e1d6dc65SNate Williams  */
516e1d6dc65SNate Williams void
517e1d6dc65SNate Williams adjust_timeout_calltodo(time_change)
518e1d6dc65SNate Williams     struct timeval *time_change;
519e1d6dc65SNate Williams {
520e1d6dc65SNate Williams 	register struct callout *p;
521e1d6dc65SNate Williams 	unsigned long delta_ticks;
522e1d6dc65SNate Williams 
523e1d6dc65SNate Williams 	/*
524e1d6dc65SNate Williams 	 * How many ticks were we asleep?
525c8b47828SBruce Evans 	 * (stolen from tvtohz()).
526e1d6dc65SNate Williams 	 */
527e1d6dc65SNate Williams 
528e1d6dc65SNate Williams 	/* Don't do anything */
529e1d6dc65SNate Williams 	if (time_change->tv_sec < 0)
530e1d6dc65SNate Williams 		return;
531e1d6dc65SNate Williams 	else if (time_change->tv_sec <= LONG_MAX / 1000000)
532e1d6dc65SNate Williams 		delta_ticks = (time_change->tv_sec * 1000000 +
533e1d6dc65SNate Williams 			       time_change->tv_usec + (tick - 1)) / tick + 1;
534e1d6dc65SNate Williams 	else if (time_change->tv_sec <= LONG_MAX / hz)
535e1d6dc65SNate Williams 		delta_ticks = time_change->tv_sec * hz +
536e1d6dc65SNate Williams 			      (time_change->tv_usec + (tick - 1)) / tick + 1;
537e1d6dc65SNate Williams 	else
538e1d6dc65SNate Williams 		delta_ticks = LONG_MAX;
539e1d6dc65SNate Williams 
540e1d6dc65SNate Williams 	if (delta_ticks > INT_MAX)
541e1d6dc65SNate Williams 		delta_ticks = INT_MAX;
542e1d6dc65SNate Williams 
543e1d6dc65SNate Williams 	/*
544e1d6dc65SNate Williams 	 * Now rip through the timer calltodo list looking for timers
545e1d6dc65SNate Williams 	 * to expire.
546e1d6dc65SNate Williams 	 */
547e1d6dc65SNate Williams 
548e1d6dc65SNate Williams 	/* don't collide with softclock() */
5499ed346baSBosko Milekic 	mtx_lock_spin(&callout_lock);
550e1d6dc65SNate Williams 	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
551e1d6dc65SNate Williams 		p->c_time -= delta_ticks;
552e1d6dc65SNate Williams 
553e1d6dc65SNate Williams 		/* Break if the timer had more time on it than delta_ticks */
554e1d6dc65SNate Williams 		if (p->c_time > 0)
555e1d6dc65SNate Williams 			break;
556e1d6dc65SNate Williams 
557e1d6dc65SNate Williams 		/* take back the ticks the timer didn't use (p->c_time <= 0) */
558e1d6dc65SNate Williams 		delta_ticks = -p->c_time;
559e1d6dc65SNate Williams 	}
5609ed346baSBosko Milekic 	mtx_unlock_spin(&callout_lock);
561e1d6dc65SNate Williams 
562e1d6dc65SNate Williams 	return;
563e1d6dc65SNate Williams }
564e1d6dc65SNate Williams #endif /* APM_FIXUP_CALLTODO */
565