xref: /freebsd/sys/kern/kern_timeout.c (revision 6990ffd8a95caaba6858ad44ff1b3157d1efba8f)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
39  * $FreeBSD$
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/callout.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 
49 /*
50  * TODO:
51  *	allocate more timeout table slots when table overflows.
52  */
53 
54 /* Exported to machdep.c and/or kern_clock.c.  */
55 struct callout *callout;
56 struct callout_list callfree;
57 int callwheelsize, callwheelbits, callwheelmask;
58 struct callout_tailq *callwheel;
59 int softticks;			/* Like ticks, but for softclock(). */
60 struct mtx callout_lock;
61 
62 static struct callout *nextsoftcheck;	/* Next callout to be checked. */
63 
64 /*
65  * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
66  *
67  *	This code is called very early in the kernel initialization sequence,
68  *	and may be called more then once.
69  */
70 caddr_t
71 kern_timeout_callwheel_alloc(caddr_t v)
72 {
73 	/*
74 	 * Calculate callout wheel size
75 	 */
76 	for (callwheelsize = 1, callwheelbits = 0;
77 	     callwheelsize < ncallout;
78 	     callwheelsize <<= 1, ++callwheelbits)
79 		;
80 	callwheelmask = callwheelsize - 1;
81 
82 	callout = (struct callout *)v;
83 	v = (caddr_t)(callout + ncallout);
84 	callwheel = (struct callout_tailq *)v;
85 	v = (caddr_t)(callwheel + callwheelsize);
86 	return(v);
87 }
88 
89 /*
90  * kern_timeout_callwheel_init() - initialize previously reserved callwheel
91  *				   space.
92  *
93  *	This code is called just once, after the space reserved for the
94  *	callout wheel has been finalized.
95  */
96 void
97 kern_timeout_callwheel_init(void)
98 {
99 	int i;
100 
101 	SLIST_INIT(&callfree);
102 	for (i = 0; i < ncallout; i++) {
103 		callout_init(&callout[i], 0);
104 		callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
105 		SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
106 	}
107 	for (i = 0; i < callwheelsize; i++) {
108 		TAILQ_INIT(&callwheel[i]);
109 	}
110 	mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE);
111 }
112 
113 /*
114  * The callout mechanism is based on the work of Adam M. Costello and
115  * George Varghese, published in a technical report entitled "Redesigning
116  * the BSD Callout and Timer Facilities" and modified slightly for inclusion
117  * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
118  * used in this implementation was published by G.Varghese and A. Lauck in
119  * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
120  * the Efficient Implementation of a Timer Facility" in the Proceedings of
121  * the 11th ACM Annual Symposium on Operating Systems Principles,
122  * Austin, Texas Nov 1987.
123  */
124 
125 /*
126  * Software (low priority) clock interrupt.
127  * Run periodic events from timeout queue.
128  */
129 void
130 softclock(void *dummy)
131 {
132 	register struct callout *c;
133 	register struct callout_tailq *bucket;
134 	register int curticks;
135 	register int steps;	/* #steps since we last allowed interrupts */
136 
137 #ifndef MAX_SOFTCLOCK_STEPS
138 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
139 #endif /* MAX_SOFTCLOCK_STEPS */
140 
141 	steps = 0;
142 	mtx_lock_spin(&callout_lock);
143 	while (softticks != ticks) {
144 		softticks++;
145 		/*
146 		 * softticks may be modified by hard clock, so cache
147 		 * it while we work on a given bucket.
148 		 */
149 		curticks = softticks;
150 		bucket = &callwheel[curticks & callwheelmask];
151 		c = TAILQ_FIRST(bucket);
152 		while (c) {
153 			if (c->c_time != curticks) {
154 				c = TAILQ_NEXT(c, c_links.tqe);
155 				++steps;
156 				if (steps >= MAX_SOFTCLOCK_STEPS) {
157 					nextsoftcheck = c;
158 					/* Give interrupts a chance. */
159 					mtx_unlock_spin(&callout_lock);
160 					;	/* nothing */
161 					mtx_lock_spin(&callout_lock);
162 					c = nextsoftcheck;
163 					steps = 0;
164 				}
165 			} else {
166 				void (*c_func)(void *);
167 				void *c_arg;
168 				int c_flags;
169 
170 				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
171 				TAILQ_REMOVE(bucket, c, c_links.tqe);
172 				c_func = c->c_func;
173 				c_arg = c->c_arg;
174 				c_flags = c->c_flags;
175 				c->c_func = NULL;
176 				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
177 					c->c_flags = CALLOUT_LOCAL_ALLOC;
178 					SLIST_INSERT_HEAD(&callfree, c,
179 							  c_links.sle);
180 				} else {
181 					c->c_flags =
182 					    (c->c_flags & ~CALLOUT_PENDING);
183 				}
184 				mtx_unlock_spin(&callout_lock);
185 				if (!(c_flags & CALLOUT_MPSAFE))
186 					mtx_lock(&Giant);
187 				c_func(c_arg);
188 				if (!(c_flags & CALLOUT_MPSAFE))
189 					mtx_unlock(&Giant);
190 				mtx_lock_spin(&callout_lock);
191 				steps = 0;
192 				c = nextsoftcheck;
193 			}
194 		}
195 	}
196 	nextsoftcheck = NULL;
197 	mtx_unlock_spin(&callout_lock);
198 }
199 
200 /*
201  * timeout --
202  *	Execute a function after a specified length of time.
203  *
204  * untimeout --
205  *	Cancel previous timeout function call.
206  *
207  * callout_handle_init --
208  *	Initialize a handle so that using it with untimeout is benign.
209  *
210  *	See AT&T BCI Driver Reference Manual for specification.  This
211  *	implementation differs from that one in that although an
212  *	identification value is returned from timeout, the original
213  *	arguments to timeout as well as the identifier are used to
214  *	identify entries for untimeout.
215  */
216 struct callout_handle
217 timeout(ftn, arg, to_ticks)
218 	timeout_t *ftn;
219 	void *arg;
220 	int to_ticks;
221 {
222 	struct callout *new;
223 	struct callout_handle handle;
224 
225 	mtx_lock_spin(&callout_lock);
226 
227 	/* Fill in the next free callout structure. */
228 	new = SLIST_FIRST(&callfree);
229 	if (new == NULL)
230 		/* XXX Attempt to malloc first */
231 		panic("timeout table full");
232 	SLIST_REMOVE_HEAD(&callfree, c_links.sle);
233 
234 	callout_reset(new, to_ticks, ftn, arg);
235 
236 	handle.callout = new;
237 	mtx_unlock_spin(&callout_lock);
238 	return (handle);
239 }
240 
241 void
242 untimeout(ftn, arg, handle)
243 	timeout_t *ftn;
244 	void *arg;
245 	struct callout_handle handle;
246 {
247 
248 	/*
249 	 * Check for a handle that was initialized
250 	 * by callout_handle_init, but never used
251 	 * for a real timeout.
252 	 */
253 	if (handle.callout == NULL)
254 		return;
255 
256 	mtx_lock_spin(&callout_lock);
257 	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
258 		callout_stop(handle.callout);
259 	mtx_unlock_spin(&callout_lock);
260 }
261 
262 void
263 callout_handle_init(struct callout_handle *handle)
264 {
265 	handle->callout = NULL;
266 }
267 
268 /*
269  * New interface; clients allocate their own callout structures.
270  *
271  * callout_reset() - establish or change a timeout
272  * callout_stop() - disestablish a timeout
273  * callout_init() - initialize a callout structure so that it can
274  *	safely be passed to callout_reset() and callout_stop()
275  *
276  * <sys/callout.h> defines three convenience macros:
277  *
278  * callout_active() - returns truth if callout has not been serviced
279  * callout_pending() - returns truth if callout is still waiting for timeout
280  * callout_deactivate() - marks the callout as having been serviced
281  */
282 void
283 callout_reset(c, to_ticks, ftn, arg)
284 	struct	callout *c;
285 	int	to_ticks;
286 	void	(*ftn) __P((void *));
287 	void	*arg;
288 {
289 
290 	mtx_lock_spin(&callout_lock);
291 	if (c->c_flags & CALLOUT_PENDING)
292 		callout_stop(c);
293 
294 	/*
295 	 * We could unlock callout_lock here and lock it again before the
296 	 * TAILQ_INSERT_TAIL, but there's no point since doing this setup
297 	 * doesn't take much time.
298 	 */
299 	if (to_ticks <= 0)
300 		to_ticks = 1;
301 
302 	c->c_arg = arg;
303 	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
304 	c->c_func = ftn;
305 	c->c_time = ticks + to_ticks;
306 	TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
307 			  c, c_links.tqe);
308 	mtx_unlock_spin(&callout_lock);
309 }
310 
311 int
312 callout_stop(c)
313 	struct	callout *c;
314 {
315 
316 	mtx_lock_spin(&callout_lock);
317 	/*
318 	 * Don't attempt to delete a callout that's not on the queue.
319 	 */
320 	if (!(c->c_flags & CALLOUT_PENDING)) {
321 		c->c_flags &= ~CALLOUT_ACTIVE;
322 		mtx_unlock_spin(&callout_lock);
323 		return (0);
324 	}
325 	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
326 
327 	if (nextsoftcheck == c) {
328 		nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
329 	}
330 	TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe);
331 	c->c_func = NULL;
332 
333 	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
334 		SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
335 	}
336 	mtx_unlock_spin(&callout_lock);
337 	return (1);
338 }
339 
340 void
341 callout_init(c, mpsafe)
342 	struct	callout *c;
343 	int mpsafe;
344 {
345 	bzero(c, sizeof *c);
346 	if (mpsafe)
347 		c->c_flags |= CALLOUT_MPSAFE;
348 }
349 
350 #ifdef APM_FIXUP_CALLTODO
351 /*
352  * Adjust the kernel calltodo timeout list.  This routine is used after
353  * an APM resume to recalculate the calltodo timer list values with the
354  * number of hz's we have been sleeping.  The next hardclock() will detect
355  * that there are fired timers and run softclock() to execute them.
356  *
357  * Please note, I have not done an exhaustive analysis of what code this
358  * might break.  I am motivated to have my select()'s and alarm()'s that
359  * have expired during suspend firing upon resume so that the applications
360  * which set the timer can do the maintanence the timer was for as close
361  * as possible to the originally intended time.  Testing this code for a
362  * week showed that resuming from a suspend resulted in 22 to 25 timers
363  * firing, which seemed independant on whether the suspend was 2 hours or
364  * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
365  */
366 void
367 adjust_timeout_calltodo(time_change)
368     struct timeval *time_change;
369 {
370 	register struct callout *p;
371 	unsigned long delta_ticks;
372 
373 	/*
374 	 * How many ticks were we asleep?
375 	 * (stolen from tvtohz()).
376 	 */
377 
378 	/* Don't do anything */
379 	if (time_change->tv_sec < 0)
380 		return;
381 	else if (time_change->tv_sec <= LONG_MAX / 1000000)
382 		delta_ticks = (time_change->tv_sec * 1000000 +
383 			       time_change->tv_usec + (tick - 1)) / tick + 1;
384 	else if (time_change->tv_sec <= LONG_MAX / hz)
385 		delta_ticks = time_change->tv_sec * hz +
386 			      (time_change->tv_usec + (tick - 1)) / tick + 1;
387 	else
388 		delta_ticks = LONG_MAX;
389 
390 	if (delta_ticks > INT_MAX)
391 		delta_ticks = INT_MAX;
392 
393 	/*
394 	 * Now rip through the timer calltodo list looking for timers
395 	 * to expire.
396 	 */
397 
398 	/* don't collide with softclock() */
399 	mtx_lock_spin(&callout_lock);
400 	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
401 		p->c_time -= delta_ticks;
402 
403 		/* Break if the timer had more time on it than delta_ticks */
404 		if (p->c_time > 0)
405 			break;
406 
407 		/* take back the ticks the timer didn't use (p->c_time <= 0) */
408 		delta_ticks = -p->c_time;
409 	}
410 	mtx_unlock_spin(&callout_lock);
411 
412 	return;
413 }
414 #endif /* APM_FIXUP_CALLTODO */
415