xref: /freebsd/sys/kern/kern_timeout.c (revision 7773002178c8dbc52b44e4d705f07706409af8e4)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/callout.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/mutex.h>
50 #include <sys/sysctl.h>
51 
52 static int avg_depth;
53 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
54     "Average number of items examined per softclock call. Units = 1/1000");
55 static int avg_gcalls;
56 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
57     "Average number of Giant callouts made per softclock call. Units = 1/1000");
58 static int avg_mpcalls;
59 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
60     "Average number of MP callouts made per softclock call. Units = 1/1000");
61 /*
62  * TODO:
63  *	allocate more timeout table slots when table overflows.
64  */
65 
66 /* Exported to machdep.c and/or kern_clock.c.  */
67 struct callout *callout;
68 struct callout_list callfree;
69 int callwheelsize, callwheelbits, callwheelmask;
70 struct callout_tailq *callwheel;
71 int softticks;			/* Like ticks, but for softclock(). */
72 struct mtx callout_lock;
73 
74 static struct callout *nextsoftcheck;	/* Next callout to be checked. */
75 
76 /*
77  * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
78  *
79  *	This code is called very early in the kernel initialization sequence,
80  *	and may be called more then once.
81  */
82 caddr_t
83 kern_timeout_callwheel_alloc(caddr_t v)
84 {
85 	/*
86 	 * Calculate callout wheel size
87 	 */
88 	for (callwheelsize = 1, callwheelbits = 0;
89 	     callwheelsize < ncallout;
90 	     callwheelsize <<= 1, ++callwheelbits)
91 		;
92 	callwheelmask = callwheelsize - 1;
93 
94 	callout = (struct callout *)v;
95 	v = (caddr_t)(callout + ncallout);
96 	callwheel = (struct callout_tailq *)v;
97 	v = (caddr_t)(callwheel + callwheelsize);
98 	return(v);
99 }
100 
101 /*
102  * kern_timeout_callwheel_init() - initialize previously reserved callwheel
103  *				   space.
104  *
105  *	This code is called just once, after the space reserved for the
106  *	callout wheel has been finalized.
107  */
108 void
109 kern_timeout_callwheel_init(void)
110 {
111 	int i;
112 
113 	SLIST_INIT(&callfree);
114 	for (i = 0; i < ncallout; i++) {
115 		callout_init(&callout[i], 0);
116 		callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
117 		SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
118 	}
119 	for (i = 0; i < callwheelsize; i++) {
120 		TAILQ_INIT(&callwheel[i]);
121 	}
122 	mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
123 }
124 
125 /*
126  * The callout mechanism is based on the work of Adam M. Costello and
127  * George Varghese, published in a technical report entitled "Redesigning
128  * the BSD Callout and Timer Facilities" and modified slightly for inclusion
129  * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
130  * used in this implementation was published by G.Varghese and A. Lauck in
131  * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
132  * the Efficient Implementation of a Timer Facility" in the Proceedings of
133  * the 11th ACM Annual Symposium on Operating Systems Principles,
134  * Austin, Texas Nov 1987.
135  */
136 
137 /*
138  * Software (low priority) clock interrupt.
139  * Run periodic events from timeout queue.
140  */
141 void
142 softclock(void *dummy)
143 {
144 	struct callout *c;
145 	struct callout_tailq *bucket;
146 	int curticks;
147 	int steps;	/* #steps since we last allowed interrupts */
148 	int depth;
149 	int mpcalls;
150 	int gcalls;
151 
152 #ifndef MAX_SOFTCLOCK_STEPS
153 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
154 #endif /* MAX_SOFTCLOCK_STEPS */
155 
156 	mpcalls = 0;
157 	gcalls = 0;
158 	depth = 0;
159 	steps = 0;
160 	mtx_lock_spin(&callout_lock);
161 	while (softticks != ticks) {
162 		softticks++;
163 		/*
164 		 * softticks may be modified by hard clock, so cache
165 		 * it while we work on a given bucket.
166 		 */
167 		curticks = softticks;
168 		bucket = &callwheel[curticks & callwheelmask];
169 		c = TAILQ_FIRST(bucket);
170 		while (c) {
171 			depth++;
172 			if (c->c_time != curticks) {
173 				c = TAILQ_NEXT(c, c_links.tqe);
174 				++steps;
175 				if (steps >= MAX_SOFTCLOCK_STEPS) {
176 					nextsoftcheck = c;
177 					/* Give interrupts a chance. */
178 					mtx_unlock_spin(&callout_lock);
179 					;	/* nothing */
180 					mtx_lock_spin(&callout_lock);
181 					c = nextsoftcheck;
182 					steps = 0;
183 				}
184 			} else {
185 				void (*c_func)(void *);
186 				void *c_arg;
187 				int c_flags;
188 
189 				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
190 				TAILQ_REMOVE(bucket, c, c_links.tqe);
191 				c_func = c->c_func;
192 				c_arg = c->c_arg;
193 				c_flags = c->c_flags;
194 				c->c_func = NULL;
195 				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
196 					c->c_flags = CALLOUT_LOCAL_ALLOC;
197 					SLIST_INSERT_HEAD(&callfree, c,
198 							  c_links.sle);
199 				} else {
200 					c->c_flags =
201 					    (c->c_flags & ~CALLOUT_PENDING);
202 				}
203 				mtx_unlock_spin(&callout_lock);
204 				if (!(c_flags & CALLOUT_MPSAFE)) {
205 					mtx_lock(&Giant);
206 					gcalls++;
207 				} else {
208 					mpcalls++;
209 				}
210 				c_func(c_arg);
211 				if (!(c_flags & CALLOUT_MPSAFE))
212 					mtx_unlock(&Giant);
213 				mtx_lock_spin(&callout_lock);
214 				steps = 0;
215 				c = nextsoftcheck;
216 			}
217 		}
218 	}
219 	avg_depth += (depth * 1000 - avg_depth) >> 8;
220 	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
221 	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
222 	nextsoftcheck = NULL;
223 	mtx_unlock_spin(&callout_lock);
224 }
225 
226 /*
227  * timeout --
228  *	Execute a function after a specified length of time.
229  *
230  * untimeout --
231  *	Cancel previous timeout function call.
232  *
233  * callout_handle_init --
234  *	Initialize a handle so that using it with untimeout is benign.
235  *
236  *	See AT&T BCI Driver Reference Manual for specification.  This
237  *	implementation differs from that one in that although an
238  *	identification value is returned from timeout, the original
239  *	arguments to timeout as well as the identifier are used to
240  *	identify entries for untimeout.
241  */
242 struct callout_handle
243 timeout(ftn, arg, to_ticks)
244 	timeout_t *ftn;
245 	void *arg;
246 	int to_ticks;
247 {
248 	struct callout *new;
249 	struct callout_handle handle;
250 
251 	mtx_lock_spin(&callout_lock);
252 
253 	/* Fill in the next free callout structure. */
254 	new = SLIST_FIRST(&callfree);
255 	if (new == NULL)
256 		/* XXX Attempt to malloc first */
257 		panic("timeout table full");
258 	SLIST_REMOVE_HEAD(&callfree, c_links.sle);
259 
260 	callout_reset(new, to_ticks, ftn, arg);
261 
262 	handle.callout = new;
263 	mtx_unlock_spin(&callout_lock);
264 	return (handle);
265 }
266 
267 void
268 untimeout(ftn, arg, handle)
269 	timeout_t *ftn;
270 	void *arg;
271 	struct callout_handle handle;
272 {
273 
274 	/*
275 	 * Check for a handle that was initialized
276 	 * by callout_handle_init, but never used
277 	 * for a real timeout.
278 	 */
279 	if (handle.callout == NULL)
280 		return;
281 
282 	mtx_lock_spin(&callout_lock);
283 	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
284 		callout_stop(handle.callout);
285 	mtx_unlock_spin(&callout_lock);
286 }
287 
288 void
289 callout_handle_init(struct callout_handle *handle)
290 {
291 	handle->callout = NULL;
292 }
293 
294 /*
295  * New interface; clients allocate their own callout structures.
296  *
297  * callout_reset() - establish or change a timeout
298  * callout_stop() - disestablish a timeout
299  * callout_init() - initialize a callout structure so that it can
300  *	safely be passed to callout_reset() and callout_stop()
301  *
302  * <sys/callout.h> defines three convenience macros:
303  *
304  * callout_active() - returns truth if callout has not been serviced
305  * callout_pending() - returns truth if callout is still waiting for timeout
306  * callout_deactivate() - marks the callout as having been serviced
307  */
308 void
309 callout_reset(c, to_ticks, ftn, arg)
310 	struct	callout *c;
311 	int	to_ticks;
312 	void	(*ftn)(void *);
313 	void	*arg;
314 {
315 
316 	mtx_lock_spin(&callout_lock);
317 	if (c->c_flags & CALLOUT_PENDING)
318 		callout_stop(c);
319 
320 	/*
321 	 * We could unlock callout_lock here and lock it again before the
322 	 * TAILQ_INSERT_TAIL, but there's no point since doing this setup
323 	 * doesn't take much time.
324 	 */
325 	if (to_ticks <= 0)
326 		to_ticks = 1;
327 
328 	c->c_arg = arg;
329 	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
330 	c->c_func = ftn;
331 	c->c_time = ticks + to_ticks;
332 	TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
333 			  c, c_links.tqe);
334 	mtx_unlock_spin(&callout_lock);
335 }
336 
337 int
338 callout_stop(c)
339 	struct	callout *c;
340 {
341 
342 	mtx_lock_spin(&callout_lock);
343 	/*
344 	 * Don't attempt to delete a callout that's not on the queue.
345 	 */
346 	if (!(c->c_flags & CALLOUT_PENDING)) {
347 		c->c_flags &= ~CALLOUT_ACTIVE;
348 		mtx_unlock_spin(&callout_lock);
349 		return (0);
350 	}
351 	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
352 
353 	if (nextsoftcheck == c) {
354 		nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
355 	}
356 	TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe);
357 	c->c_func = NULL;
358 
359 	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
360 		SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
361 	}
362 	mtx_unlock_spin(&callout_lock);
363 	return (1);
364 }
365 
366 void
367 callout_init(c, mpsafe)
368 	struct	callout *c;
369 	int mpsafe;
370 {
371 	bzero(c, sizeof *c);
372 	if (mpsafe)
373 		c->c_flags |= CALLOUT_MPSAFE;
374 }
375 
376 #ifdef APM_FIXUP_CALLTODO
377 /*
378  * Adjust the kernel calltodo timeout list.  This routine is used after
379  * an APM resume to recalculate the calltodo timer list values with the
380  * number of hz's we have been sleeping.  The next hardclock() will detect
381  * that there are fired timers and run softclock() to execute them.
382  *
383  * Please note, I have not done an exhaustive analysis of what code this
384  * might break.  I am motivated to have my select()'s and alarm()'s that
385  * have expired during suspend firing upon resume so that the applications
386  * which set the timer can do the maintanence the timer was for as close
387  * as possible to the originally intended time.  Testing this code for a
388  * week showed that resuming from a suspend resulted in 22 to 25 timers
389  * firing, which seemed independant on whether the suspend was 2 hours or
390  * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
391  */
392 void
393 adjust_timeout_calltodo(time_change)
394     struct timeval *time_change;
395 {
396 	register struct callout *p;
397 	unsigned long delta_ticks;
398 
399 	/*
400 	 * How many ticks were we asleep?
401 	 * (stolen from tvtohz()).
402 	 */
403 
404 	/* Don't do anything */
405 	if (time_change->tv_sec < 0)
406 		return;
407 	else if (time_change->tv_sec <= LONG_MAX / 1000000)
408 		delta_ticks = (time_change->tv_sec * 1000000 +
409 			       time_change->tv_usec + (tick - 1)) / tick + 1;
410 	else if (time_change->tv_sec <= LONG_MAX / hz)
411 		delta_ticks = time_change->tv_sec * hz +
412 			      (time_change->tv_usec + (tick - 1)) / tick + 1;
413 	else
414 		delta_ticks = LONG_MAX;
415 
416 	if (delta_ticks > INT_MAX)
417 		delta_ticks = INT_MAX;
418 
419 	/*
420 	 * Now rip through the timer calltodo list looking for timers
421 	 * to expire.
422 	 */
423 
424 	/* don't collide with softclock() */
425 	mtx_lock_spin(&callout_lock);
426 	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
427 		p->c_time -= delta_ticks;
428 
429 		/* Break if the timer had more time on it than delta_ticks */
430 		if (p->c_time > 0)
431 			break;
432 
433 		/* take back the ticks the timer didn't use (p->c_time <= 0) */
434 		delta_ticks = -p->c_time;
435 	}
436 	mtx_unlock_spin(&callout_lock);
437 
438 	return;
439 }
440 #endif /* APM_FIXUP_CALLTODO */
441