xref: /freebsd/sys/kern/kern_timeout.c (revision 80ff3b155fc42479ec6c512cae13e709f700d28b)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
39  * $FreeBSD$
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/callout.h>
45 #include <sys/kernel.h>
46 #include <sys/mutex.h>
47 
48 /*
49  * TODO:
50  *	allocate more timeout table slots when table overflows.
51  */
52 
53 /* Exported to machdep.c and/or kern_clock.c.  */
54 struct callout *callout;
55 struct callout_list callfree;
56 int callwheelsize, callwheelbits, callwheelmask;
57 struct callout_tailq *callwheel;
58 int softticks;			/* Like ticks, but for softclock(). */
59 struct mtx callout_lock;
60 
61 static struct callout *nextsoftcheck;	/* Next callout to be checked. */
62 
63 /*
64  * The callout mechanism is based on the work of Adam M. Costello and
65  * George Varghese, published in a technical report entitled "Redesigning
66  * the BSD Callout and Timer Facilities" and modified slightly for inclusion
67  * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
68  * used in this implementation was published by G.Varghese and A. Lauck in
69  * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
70  * the Efficient Implementation of a Timer Facility" in the Proceedings of
71  * the 11th ACM Annual Symposium on Operating Systems Principles,
72  * Austin, Texas Nov 1987.
73  */
74 
75 /*
76  * Software (low priority) clock interrupt.
77  * Run periodic events from timeout queue.
78  */
79 void
80 softclock(void *dummy)
81 {
82 	register struct callout *c;
83 	register struct callout_tailq *bucket;
84 	register int s;
85 	register int curticks;
86 	register int steps;	/* #steps since we last allowed interrupts */
87 
88 #ifndef MAX_SOFTCLOCK_STEPS
89 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
90 #endif /* MAX_SOFTCLOCK_STEPS */
91 
92 	steps = 0;
93 	s = splhigh();
94 	mtx_enter(&callout_lock, MTX_SPIN);
95 	while (softticks != ticks) {
96 		softticks++;
97 		/*
98 		 * softticks may be modified by hard clock, so cache
99 		 * it while we work on a given bucket.
100 		 */
101 		curticks = softticks;
102 		bucket = &callwheel[curticks & callwheelmask];
103 		c = TAILQ_FIRST(bucket);
104 		while (c) {
105 			if (c->c_time != curticks) {
106 				c = TAILQ_NEXT(c, c_links.tqe);
107 				++steps;
108 				if (steps >= MAX_SOFTCLOCK_STEPS) {
109 					nextsoftcheck = c;
110 					/* Give interrupts a chance. */
111 					mtx_exit(&callout_lock, MTX_SPIN);
112 					splx(s);
113 					s = splhigh();
114 					mtx_enter(&callout_lock, MTX_SPIN);
115 					c = nextsoftcheck;
116 					steps = 0;
117 				}
118 			} else {
119 				void (*c_func)(void *);
120 				void *c_arg;
121 				int c_flags;
122 
123 				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
124 				TAILQ_REMOVE(bucket, c, c_links.tqe);
125 				c_func = c->c_func;
126 				c_arg = c->c_arg;
127 				c_flags = c->c_flags;
128 				c->c_func = NULL;
129 				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
130 					c->c_flags = CALLOUT_LOCAL_ALLOC;
131 					SLIST_INSERT_HEAD(&callfree, c,
132 							  c_links.sle);
133 				} else {
134 					c->c_flags =
135 					    (c->c_flags & ~CALLOUT_PENDING);
136 				}
137 				mtx_exit(&callout_lock, MTX_SPIN);
138 				if (!(c_flags & CALLOUT_MPSAFE))
139 					mtx_enter(&Giant, MTX_DEF);
140 				splx(s);
141 				c_func(c_arg);
142 				s = splhigh();
143 				if (!(c_flags & CALLOUT_MPSAFE))
144 					mtx_exit(&Giant, MTX_DEF);
145 				mtx_enter(&callout_lock, MTX_SPIN);
146 				steps = 0;
147 				c = nextsoftcheck;
148 			}
149 		}
150 	}
151 	nextsoftcheck = NULL;
152 	mtx_exit(&callout_lock, MTX_SPIN);
153 	splx(s);
154 }
155 
156 /*
157  * timeout --
158  *	Execute a function after a specified length of time.
159  *
160  * untimeout --
161  *	Cancel previous timeout function call.
162  *
163  * callout_handle_init --
164  *	Initialize a handle so that using it with untimeout is benign.
165  *
166  *	See AT&T BCI Driver Reference Manual for specification.  This
167  *	implementation differs from that one in that although an
168  *	identification value is returned from timeout, the original
169  *	arguments to timeout as well as the identifier are used to
170  *	identify entries for untimeout.
171  */
172 struct callout_handle
173 timeout(ftn, arg, to_ticks)
174 	timeout_t *ftn;
175 	void *arg;
176 	int to_ticks;
177 {
178 	int s;
179 	struct callout *new;
180 	struct callout_handle handle;
181 
182 	s = splhigh();
183 	mtx_enter(&callout_lock, MTX_SPIN);
184 
185 	/* Fill in the next free callout structure. */
186 	new = SLIST_FIRST(&callfree);
187 	if (new == NULL)
188 		/* XXX Attempt to malloc first */
189 		panic("timeout table full");
190 	SLIST_REMOVE_HEAD(&callfree, c_links.sle);
191 
192 	callout_reset(new, to_ticks, ftn, arg);
193 
194 	handle.callout = new;
195 	mtx_exit(&callout_lock, MTX_SPIN);
196 	splx(s);
197 	return (handle);
198 }
199 
200 void
201 untimeout(ftn, arg, handle)
202 	timeout_t *ftn;
203 	void *arg;
204 	struct callout_handle handle;
205 {
206 	register int s;
207 
208 	/*
209 	 * Check for a handle that was initialized
210 	 * by callout_handle_init, but never used
211 	 * for a real timeout.
212 	 */
213 	if (handle.callout == NULL)
214 		return;
215 
216 	s = splhigh();
217 	mtx_enter(&callout_lock, MTX_SPIN);
218 	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
219 		callout_stop(handle.callout);
220 	mtx_exit(&callout_lock, MTX_SPIN);
221 	splx(s);
222 }
223 
224 void
225 callout_handle_init(struct callout_handle *handle)
226 {
227 	handle->callout = NULL;
228 }
229 
230 /*
231  * New interface; clients allocate their own callout structures.
232  *
233  * callout_reset() - establish or change a timeout
234  * callout_stop() - disestablish a timeout
235  * callout_init() - initialize a callout structure so that it can
236  *	safely be passed to callout_reset() and callout_stop()
237  *
238  * <sys/callout.h> defines three convenience macros:
239  *
240  * callout_active() - returns truth if callout has not been serviced
241  * callout_pending() - returns truth if callout is still waiting for timeout
242  * callout_deactivate() - marks the callout as having been serviced
243  */
244 void
245 callout_reset(c, to_ticks, ftn, arg)
246 	struct	callout *c;
247 	int	to_ticks;
248 	void	(*ftn) __P((void *));
249 	void	*arg;
250 {
251 	int	s;
252 
253 	s = splhigh();
254 	mtx_enter(&callout_lock, MTX_SPIN);
255 	if (c->c_flags & CALLOUT_PENDING)
256 		callout_stop(c);
257 
258 	/*
259 	 * We could spl down here and back up at the TAILQ_INSERT_TAIL,
260 	 * but there's no point since doing this setup doesn't take much
261 	 * time.
262 	 */
263 	if (to_ticks <= 0)
264 		to_ticks = 1;
265 
266 	c->c_arg = arg;
267 	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
268 	c->c_func = ftn;
269 	c->c_time = ticks + to_ticks;
270 	TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
271 			  c, c_links.tqe);
272 	mtx_exit(&callout_lock, MTX_SPIN);
273 	splx(s);
274 }
275 
276 void
277 callout_stop(c)
278 	struct	callout *c;
279 {
280 	int	s;
281 
282 	s = splhigh();
283 	mtx_enter(&callout_lock, MTX_SPIN);
284 	/*
285 	 * Don't attempt to delete a callout that's not on the queue.
286 	 */
287 	if (!(c->c_flags & CALLOUT_PENDING)) {
288 		c->c_flags &= ~CALLOUT_ACTIVE;
289 		mtx_exit(&callout_lock, MTX_SPIN);
290 		splx(s);
291 		return;
292 	}
293 	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
294 
295 	if (nextsoftcheck == c) {
296 		nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
297 	}
298 	TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe);
299 	c->c_func = NULL;
300 
301 	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
302 		SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
303 	}
304 	mtx_exit(&callout_lock, MTX_SPIN);
305 	splx(s);
306 }
307 
308 void
309 callout_init(c, mpsafe)
310 	struct	callout *c;
311 	int mpsafe;
312 {
313 	bzero(c, sizeof *c);
314 	if (mpsafe)
315 		c->c_flags |= CALLOUT_MPSAFE;
316 }
317 
318 #ifdef APM_FIXUP_CALLTODO
319 /*
320  * Adjust the kernel calltodo timeout list.  This routine is used after
321  * an APM resume to recalculate the calltodo timer list values with the
322  * number of hz's we have been sleeping.  The next hardclock() will detect
323  * that there are fired timers and run softclock() to execute them.
324  *
325  * Please note, I have not done an exhaustive analysis of what code this
326  * might break.  I am motivated to have my select()'s and alarm()'s that
327  * have expired during suspend firing upon resume so that the applications
328  * which set the timer can do the maintanence the timer was for as close
329  * as possible to the originally intended time.  Testing this code for a
330  * week showed that resuming from a suspend resulted in 22 to 25 timers
331  * firing, which seemed independant on whether the suspend was 2 hours or
332  * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
333  */
334 void
335 adjust_timeout_calltodo(time_change)
336     struct timeval *time_change;
337 {
338 	register struct callout *p;
339 	unsigned long delta_ticks;
340 	int s;
341 
342 	/*
343 	 * How many ticks were we asleep?
344 	 * (stolen from tvtohz()).
345 	 */
346 
347 	/* Don't do anything */
348 	if (time_change->tv_sec < 0)
349 		return;
350 	else if (time_change->tv_sec <= LONG_MAX / 1000000)
351 		delta_ticks = (time_change->tv_sec * 1000000 +
352 			       time_change->tv_usec + (tick - 1)) / tick + 1;
353 	else if (time_change->tv_sec <= LONG_MAX / hz)
354 		delta_ticks = time_change->tv_sec * hz +
355 			      (time_change->tv_usec + (tick - 1)) / tick + 1;
356 	else
357 		delta_ticks = LONG_MAX;
358 
359 	if (delta_ticks > INT_MAX)
360 		delta_ticks = INT_MAX;
361 
362 	/*
363 	 * Now rip through the timer calltodo list looking for timers
364 	 * to expire.
365 	 */
366 
367 	/* don't collide with softclock() */
368 	s = splhigh();
369 	mtx_enter(&callout_lock, MTX_SPIN);
370 	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
371 		p->c_time -= delta_ticks;
372 
373 		/* Break if the timer had more time on it than delta_ticks */
374 		if (p->c_time > 0)
375 			break;
376 
377 		/* take back the ticks the timer didn't use (p->c_time <= 0) */
378 		delta_ticks = -p->c_time;
379 	}
380 	mtx_exit(&callout_lock, MTX_SPIN);
381 	splx(s);
382 
383 	return;
384 }
385 #endif /* APM_FIXUP_CALLTODO */
386