xref: /freebsd/sys/kern/kern_timeout.c (revision 6e8394b8baa7d5d9153ab90de6824bcd19b3b4e1)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
39  *	$Id: kern_timeout.c,v 1.56 1999/03/06 04:46:19 wollman Exp $
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/callout.h>
45 #include <sys/kernel.h>
46 
47 /*
48  * TODO:
49  *	allocate more timeout table slots when table overflows.
50  */
51 
52 /* Exported to machdep.c and/or kern_clock.c.  */
53 struct callout *callout;
54 struct callout_list callfree;
55 int callwheelsize, callwheelbits, callwheelmask;
56 struct callout_tailq *callwheel;
57 int softticks;			/* Like ticks, but for softclock(). */
58 
59 static struct callout *nextsoftcheck;	/* Next callout to be checked. */
60 
61 /*
62  * The callout mechanism is based on the work of Adam M. Costello and
63  * George Varghese, published in a technical report entitled "Redesigning
64  * the BSD Callout and Timer Facilities" and modified slightly for inclusion
65  * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
66  * used in this implementation was published by G.Varghese and A. Lauck in
67  * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
68  * the Efficient Implementation of a Timer Facility" in the Proceedings of
69  * the 11th ACM Annual Symposium on Operating Systems Principles,
70  * Austin, Texas Nov 1987.
71  */
72 
73 /*
74  * Software (low priority) clock interrupt.
75  * Run periodic events from timeout queue.
76  */
77 void
78 softclock()
79 {
80 	register struct callout *c;
81 	register struct callout_tailq *bucket;
82 	register int s;
83 	register int curticks;
84 	register int steps;	/* #steps since we last allowed interrupts */
85 
86 #ifndef MAX_SOFTCLOCK_STEPS
87 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
88 #endif /* MAX_SOFTCLOCK_STEPS */
89 
90 	steps = 0;
91 	s = splhigh();
92 	while (softticks != ticks) {
93 		softticks++;
94 		/*
95 		 * softticks may be modified by hard clock, so cache
96 		 * it while we work on a given bucket.
97 		 */
98 		curticks = softticks;
99 		bucket = &callwheel[curticks & callwheelmask];
100 		c = TAILQ_FIRST(bucket);
101 		while (c) {
102 			if (c->c_time != curticks) {
103 				c = TAILQ_NEXT(c, c_links.tqe);
104 				++steps;
105 				if (steps >= MAX_SOFTCLOCK_STEPS) {
106 					nextsoftcheck = c;
107 					/* Give interrupts a chance. */
108 					splx(s);
109 					s = splhigh();
110 					c = nextsoftcheck;
111 					steps = 0;
112 				}
113 			} else {
114 				void (*c_func)(void *);
115 				void *c_arg;
116 
117 				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
118 				TAILQ_REMOVE(bucket, c, c_links.tqe);
119 				c_func = c->c_func;
120 				c_arg = c->c_arg;
121 				c->c_func = NULL;
122 				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
123 					c->c_flags = CALLOUT_LOCAL_ALLOC;
124 					SLIST_INSERT_HEAD(&callfree, c,
125 							  c_links.sle);
126 				} else {
127 					c->c_flags =
128 						(c->c_flags & ~CALLOUT_PENDING)
129 						| CALLOUT_FIRED;
130 				}
131 				splx(s);
132 				c_func(c_arg);
133 				s = splhigh();
134 				steps = 0;
135 				c = nextsoftcheck;
136 			}
137 		}
138 	}
139 	nextsoftcheck = NULL;
140 	splx(s);
141 }
142 
143 /*
144  * timeout --
145  *	Execute a function after a specified length of time.
146  *
147  * untimeout --
148  *	Cancel previous timeout function call.
149  *
150  * callout_handle_init --
151  *	Initialize a handle so that using it with untimeout is benign.
152  *
153  *	See AT&T BCI Driver Reference Manual for specification.  This
154  *	implementation differs from that one in that although an
155  *	identification value is returned from timeout, the original
156  *	arguments to timeout as well as the identifier are used to
157  *	identify entries for untimeout.
158  */
159 struct callout_handle
160 timeout(ftn, arg, to_ticks)
161 	timeout_t *ftn;
162 	void *arg;
163 	register int to_ticks;
164 {
165 	int s;
166 	struct callout *new;
167 	struct callout_handle handle;
168 
169 	s = splhigh();
170 
171 	/* Fill in the next free callout structure. */
172 	new = SLIST_FIRST(&callfree);
173 	if (new == NULL)
174 		/* XXX Attempt to malloc first */
175 		panic("timeout table full");
176 	SLIST_REMOVE_HEAD(&callfree, c_links.sle);
177 
178 	callout_reset(new, to_ticks, ftn, arg);
179 
180 	handle.callout = new;
181 	splx(s);
182 	return (handle);
183 }
184 
185 void
186 untimeout(ftn, arg, handle)
187 	timeout_t *ftn;
188 	void *arg;
189 	struct callout_handle handle;
190 {
191 	register int s;
192 
193 	/*
194 	 * Check for a handle that was initialized
195 	 * by callout_handle_init, but never used
196 	 * for a real timeout.
197 	 */
198 	if (handle.callout == NULL)
199 		return;
200 
201 	s = splhigh();
202 	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
203 		callout_stop(handle.callout);
204 	splx(s);
205 }
206 
207 void
208 callout_handle_init(struct callout_handle *handle)
209 {
210 	handle->callout = NULL;
211 }
212 
213 /*
214  * New interface; clients allocate their own callout structures.
215  *
216  * callout_reset() - establish or change a timeout
217  * callout_stop() - disestablish a timeout
218  * callout_init() - initialize a callout structure so that it can
219  *	safely be passed to callout_reset() and callout_stop()
220  *
221  * <sys/callout.h> defines two convenience macros:
222  *
223  * callout_pending() - returns number of ticks until callout fires, or 0
224  *	if not scheduled
225  * callout_fired() - returns truth if callout has already been fired
226  */
227 void
228 callout_reset(c, to_ticks, ftn, arg)
229 	struct	callout *c;
230 	int	to_ticks;
231 	void	(*ftn) __P((void *));
232 	void	*arg;
233 {
234 	int	s;
235 
236 	s = splhigh();
237 	if (c->c_flags & CALLOUT_PENDING)
238 		callout_stop(c);
239 
240 	/*
241 	 * We could spl down here and back up at the TAILQ_INSERT_TAIL,
242 	 * but there's no point since doing this setup doesn't take much
243 	 ^ time.
244 	 */
245 	if (to_ticks <= 0)
246 		to_ticks = 1;
247 
248 	c->c_arg = arg;
249 	c->c_flags = (c->c_flags & ~CALLOUT_FIRED) | CALLOUT_PENDING;
250 	c->c_func = ftn;
251 	c->c_time = ticks + to_ticks;
252 	TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
253 			  c, c_links.tqe);
254 	splx(s);
255 
256 }
257 
258 void
259 callout_stop(c)
260 	struct	callout *c;
261 {
262 	int	s;
263 
264 	s = splhigh();
265 	/*
266 	 * Don't attempt to delete a callout that's not on the queue.
267 	 */
268 	if (!(c->c_flags & CALLOUT_PENDING)) {
269 		splx(s);
270 		return;
271 	}
272 	c->c_flags &= ~CALLOUT_PENDING;
273 
274 	if (nextsoftcheck == c) {
275 		nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
276 	}
277 	TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe);
278 	c->c_func = NULL;
279 
280 	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
281 		SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
282 	}
283 	splx(s);
284 }
285 
286 void
287 callout_init(c)
288 	struct	callout *c;
289 {
290 	bzero(c, sizeof *c);
291 }
292 
293 #ifdef APM_FIXUP_CALLTODO
294 /*
295  * Adjust the kernel calltodo timeout list.  This routine is used after
296  * an APM resume to recalculate the calltodo timer list values with the
297  * number of hz's we have been sleeping.  The next hardclock() will detect
298  * that there are fired timers and run softclock() to execute them.
299  *
300  * Please note, I have not done an exhaustive analysis of what code this
301  * might break.  I am motivated to have my select()'s and alarm()'s that
302  * have expired during suspend firing upon resume so that the applications
303  * which set the timer can do the maintanence the timer was for as close
304  * as possible to the originally intended time.  Testing this code for a
305  * week showed that resuming from a suspend resulted in 22 to 25 timers
306  * firing, which seemed independant on whether the suspend was 2 hours or
307  * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
308  */
309 void
310 adjust_timeout_calltodo(time_change)
311     struct timeval *time_change;
312 {
313 	register struct callout *p;
314 	unsigned long delta_ticks;
315 	int s;
316 
317 	/*
318 	 * How many ticks were we asleep?
319 	 * (stolen from tvtohz()).
320 	 */
321 
322 	/* Don't do anything */
323 	if (time_change->tv_sec < 0)
324 		return;
325 	else if (time_change->tv_sec <= LONG_MAX / 1000000)
326 		delta_ticks = (time_change->tv_sec * 1000000 +
327 			       time_change->tv_usec + (tick - 1)) / tick + 1;
328 	else if (time_change->tv_sec <= LONG_MAX / hz)
329 		delta_ticks = time_change->tv_sec * hz +
330 			      (time_change->tv_usec + (tick - 1)) / tick + 1;
331 	else
332 		delta_ticks = LONG_MAX;
333 
334 	if (delta_ticks > INT_MAX)
335 		delta_ticks = INT_MAX;
336 
337 	/*
338 	 * Now rip through the timer calltodo list looking for timers
339 	 * to expire.
340 	 */
341 
342 	/* don't collide with softclock() */
343 	s = splhigh();
344 	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
345 		p->c_time -= delta_ticks;
346 
347 		/* Break if the timer had more time on it than delta_ticks */
348 		if (p->c_time > 0)
349 			break;
350 
351 		/* take back the ticks the timer didn't use (p->c_time <= 0) */
352 		delta_ticks = -p->c_time;
353 	}
354 	splx(s);
355 
356 	return;
357 }
358 #endif /* APM_FIXUP_CALLTODO */
359