xref: /freebsd/sys/kern/kern_timeout.c (revision 7660b554bc59a07be0431c17e0e33815818baa69)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/callout.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/mutex.h>
50 #include <sys/sysctl.h>
51 
52 static int avg_depth;
53 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
54     "Average number of items examined per softclock call. Units = 1/1000");
55 static int avg_gcalls;
56 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
57     "Average number of Giant callouts made per softclock call. Units = 1/1000");
58 static int avg_mpcalls;
59 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
60     "Average number of MP callouts made per softclock call. Units = 1/1000");
61 /*
62  * TODO:
63  *	allocate more timeout table slots when table overflows.
64  */
65 
66 /* Exported to machdep.c and/or kern_clock.c.  */
67 struct callout *callout;
68 struct callout_list callfree;
69 int callwheelsize, callwheelbits, callwheelmask;
70 struct callout_tailq *callwheel;
71 int softticks;			/* Like ticks, but for softclock(). */
72 struct mtx callout_lock;
73 #ifdef DIAGNOSTIC
74 struct mtx callout_dont_sleep;
75 #endif
76 
77 static struct callout *nextsoftcheck;	/* Next callout to be checked. */
78 
79 /*
80  * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
81  *
82  *	This code is called very early in the kernel initialization sequence,
83  *	and may be called more then once.
84  */
85 caddr_t
86 kern_timeout_callwheel_alloc(caddr_t v)
87 {
88 	/*
89 	 * Calculate callout wheel size
90 	 */
91 	for (callwheelsize = 1, callwheelbits = 0;
92 	     callwheelsize < ncallout;
93 	     callwheelsize <<= 1, ++callwheelbits)
94 		;
95 	callwheelmask = callwheelsize - 1;
96 
97 	callout = (struct callout *)v;
98 	v = (caddr_t)(callout + ncallout);
99 	callwheel = (struct callout_tailq *)v;
100 	v = (caddr_t)(callwheel + callwheelsize);
101 	return(v);
102 }
103 
104 /*
105  * kern_timeout_callwheel_init() - initialize previously reserved callwheel
106  *				   space.
107  *
108  *	This code is called just once, after the space reserved for the
109  *	callout wheel has been finalized.
110  */
111 void
112 kern_timeout_callwheel_init(void)
113 {
114 	int i;
115 
116 	SLIST_INIT(&callfree);
117 	for (i = 0; i < ncallout; i++) {
118 		callout_init(&callout[i], 0);
119 		callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
120 		SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
121 	}
122 	for (i = 0; i < callwheelsize; i++) {
123 		TAILQ_INIT(&callwheel[i]);
124 	}
125 	mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
126 #ifdef DIAGNOSTIC
127 	mtx_init(&callout_dont_sleep, "callout_dont_sleep", NULL, MTX_DEF);
128 #endif
129 }
130 
131 /*
132  * The callout mechanism is based on the work of Adam M. Costello and
133  * George Varghese, published in a technical report entitled "Redesigning
134  * the BSD Callout and Timer Facilities" and modified slightly for inclusion
135  * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
136  * used in this implementation was published by G.Varghese and A. Lauck in
137  * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
138  * the Efficient Implementation of a Timer Facility" in the Proceedings of
139  * the 11th ACM Annual Symposium on Operating Systems Principles,
140  * Austin, Texas Nov 1987.
141  */
142 
143 /*
144  * Software (low priority) clock interrupt.
145  * Run periodic events from timeout queue.
146  */
147 void
148 softclock(void *dummy)
149 {
150 	struct callout *c;
151 	struct callout_tailq *bucket;
152 	int curticks;
153 	int steps;	/* #steps since we last allowed interrupts */
154 	int depth;
155 	int mpcalls;
156 	int gcalls;
157 #ifdef DIAGNOSTIC
158 	struct bintime bt1, bt2;
159 	struct timespec ts2;
160 	static uint64_t maxdt = 18446744073709551LL;	/* 1 msec */
161 #endif
162 
163 #ifndef MAX_SOFTCLOCK_STEPS
164 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
165 #endif /* MAX_SOFTCLOCK_STEPS */
166 
167 	mpcalls = 0;
168 	gcalls = 0;
169 	depth = 0;
170 	steps = 0;
171 	mtx_lock_spin(&callout_lock);
172 	while (softticks != ticks) {
173 		softticks++;
174 		/*
175 		 * softticks may be modified by hard clock, so cache
176 		 * it while we work on a given bucket.
177 		 */
178 		curticks = softticks;
179 		bucket = &callwheel[curticks & callwheelmask];
180 		c = TAILQ_FIRST(bucket);
181 		while (c) {
182 			depth++;
183 			if (c->c_time != curticks) {
184 				c = TAILQ_NEXT(c, c_links.tqe);
185 				++steps;
186 				if (steps >= MAX_SOFTCLOCK_STEPS) {
187 					nextsoftcheck = c;
188 					/* Give interrupts a chance. */
189 					mtx_unlock_spin(&callout_lock);
190 					;	/* nothing */
191 					mtx_lock_spin(&callout_lock);
192 					c = nextsoftcheck;
193 					steps = 0;
194 				}
195 			} else {
196 				void (*c_func)(void *);
197 				void *c_arg;
198 				int c_flags;
199 
200 				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
201 				TAILQ_REMOVE(bucket, c, c_links.tqe);
202 				c_func = c->c_func;
203 				c_arg = c->c_arg;
204 				c_flags = c->c_flags;
205 				c->c_func = NULL;
206 				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
207 					c->c_flags = CALLOUT_LOCAL_ALLOC;
208 					SLIST_INSERT_HEAD(&callfree, c,
209 							  c_links.sle);
210 				} else {
211 					c->c_flags =
212 					    (c->c_flags & ~CALLOUT_PENDING);
213 				}
214 				mtx_unlock_spin(&callout_lock);
215 				if (!(c_flags & CALLOUT_MPSAFE)) {
216 					mtx_lock(&Giant);
217 					gcalls++;
218 				} else {
219 					mpcalls++;
220 				}
221 #ifdef DIAGNOSTIC
222 				binuptime(&bt1);
223 				mtx_lock(&callout_dont_sleep);
224 #endif
225 				c_func(c_arg);
226 #ifdef DIAGNOSTIC
227 				mtx_unlock(&callout_dont_sleep);
228 				binuptime(&bt2);
229 				bintime_sub(&bt2, &bt1);
230 				if (bt2.frac > maxdt) {
231 					maxdt = bt2.frac;
232 					bintime2timespec(&bt2, &ts2);
233 					printf(
234 			"Expensive timeout(9) function: %p(%p) %ld.%09ld s\n",
235 					c_func, c_arg,
236 					(long)ts2.tv_sec, ts2.tv_nsec);
237 				}
238 #endif
239 				if (!(c_flags & CALLOUT_MPSAFE))
240 					mtx_unlock(&Giant);
241 				mtx_lock_spin(&callout_lock);
242 				steps = 0;
243 				c = nextsoftcheck;
244 			}
245 		}
246 	}
247 	avg_depth += (depth * 1000 - avg_depth) >> 8;
248 	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
249 	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
250 	nextsoftcheck = NULL;
251 	mtx_unlock_spin(&callout_lock);
252 }
253 
254 /*
255  * timeout --
256  *	Execute a function after a specified length of time.
257  *
258  * untimeout --
259  *	Cancel previous timeout function call.
260  *
261  * callout_handle_init --
262  *	Initialize a handle so that using it with untimeout is benign.
263  *
264  *	See AT&T BCI Driver Reference Manual for specification.  This
265  *	implementation differs from that one in that although an
266  *	identification value is returned from timeout, the original
267  *	arguments to timeout as well as the identifier are used to
268  *	identify entries for untimeout.
269  */
270 struct callout_handle
271 timeout(ftn, arg, to_ticks)
272 	timeout_t *ftn;
273 	void *arg;
274 	int to_ticks;
275 {
276 	struct callout *new;
277 	struct callout_handle handle;
278 
279 	mtx_lock_spin(&callout_lock);
280 
281 	/* Fill in the next free callout structure. */
282 	new = SLIST_FIRST(&callfree);
283 	if (new == NULL)
284 		/* XXX Attempt to malloc first */
285 		panic("timeout table full");
286 	SLIST_REMOVE_HEAD(&callfree, c_links.sle);
287 
288 	callout_reset(new, to_ticks, ftn, arg);
289 
290 	handle.callout = new;
291 	mtx_unlock_spin(&callout_lock);
292 	return (handle);
293 }
294 
295 void
296 untimeout(ftn, arg, handle)
297 	timeout_t *ftn;
298 	void *arg;
299 	struct callout_handle handle;
300 {
301 
302 	/*
303 	 * Check for a handle that was initialized
304 	 * by callout_handle_init, but never used
305 	 * for a real timeout.
306 	 */
307 	if (handle.callout == NULL)
308 		return;
309 
310 	mtx_lock_spin(&callout_lock);
311 	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
312 		callout_stop(handle.callout);
313 	mtx_unlock_spin(&callout_lock);
314 }
315 
316 void
317 callout_handle_init(struct callout_handle *handle)
318 {
319 	handle->callout = NULL;
320 }
321 
322 /*
323  * New interface; clients allocate their own callout structures.
324  *
325  * callout_reset() - establish or change a timeout
326  * callout_stop() - disestablish a timeout
327  * callout_init() - initialize a callout structure so that it can
328  *	safely be passed to callout_reset() and callout_stop()
329  *
330  * <sys/callout.h> defines three convenience macros:
331  *
332  * callout_active() - returns truth if callout has not been serviced
333  * callout_pending() - returns truth if callout is still waiting for timeout
334  * callout_deactivate() - marks the callout as having been serviced
335  */
336 void
337 callout_reset(c, to_ticks, ftn, arg)
338 	struct	callout *c;
339 	int	to_ticks;
340 	void	(*ftn)(void *);
341 	void	*arg;
342 {
343 
344 	mtx_lock_spin(&callout_lock);
345 	if (c->c_flags & CALLOUT_PENDING)
346 		callout_stop(c);
347 
348 	/*
349 	 * We could unlock callout_lock here and lock it again before the
350 	 * TAILQ_INSERT_TAIL, but there's no point since doing this setup
351 	 * doesn't take much time.
352 	 */
353 	if (to_ticks <= 0)
354 		to_ticks = 1;
355 
356 	c->c_arg = arg;
357 	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
358 	c->c_func = ftn;
359 	c->c_time = ticks + to_ticks;
360 	TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
361 			  c, c_links.tqe);
362 	mtx_unlock_spin(&callout_lock);
363 }
364 
365 int
366 callout_stop(c)
367 	struct	callout *c;
368 {
369 
370 	mtx_lock_spin(&callout_lock);
371 	/*
372 	 * Don't attempt to delete a callout that's not on the queue.
373 	 */
374 	if (!(c->c_flags & CALLOUT_PENDING)) {
375 		c->c_flags &= ~CALLOUT_ACTIVE;
376 		mtx_unlock_spin(&callout_lock);
377 		return (0);
378 	}
379 	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
380 
381 	if (nextsoftcheck == c) {
382 		nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
383 	}
384 	TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe);
385 	c->c_func = NULL;
386 
387 	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
388 		SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
389 	}
390 	mtx_unlock_spin(&callout_lock);
391 	return (1);
392 }
393 
394 void
395 callout_init(c, mpsafe)
396 	struct	callout *c;
397 	int mpsafe;
398 {
399 	bzero(c, sizeof *c);
400 	if (mpsafe)
401 		c->c_flags |= CALLOUT_MPSAFE;
402 }
403 
404 #ifdef APM_FIXUP_CALLTODO
405 /*
406  * Adjust the kernel calltodo timeout list.  This routine is used after
407  * an APM resume to recalculate the calltodo timer list values with the
408  * number of hz's we have been sleeping.  The next hardclock() will detect
409  * that there are fired timers and run softclock() to execute them.
410  *
411  * Please note, I have not done an exhaustive analysis of what code this
412  * might break.  I am motivated to have my select()'s and alarm()'s that
413  * have expired during suspend firing upon resume so that the applications
414  * which set the timer can do the maintanence the timer was for as close
415  * as possible to the originally intended time.  Testing this code for a
416  * week showed that resuming from a suspend resulted in 22 to 25 timers
417  * firing, which seemed independant on whether the suspend was 2 hours or
418  * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
419  */
420 void
421 adjust_timeout_calltodo(time_change)
422     struct timeval *time_change;
423 {
424 	register struct callout *p;
425 	unsigned long delta_ticks;
426 
427 	/*
428 	 * How many ticks were we asleep?
429 	 * (stolen from tvtohz()).
430 	 */
431 
432 	/* Don't do anything */
433 	if (time_change->tv_sec < 0)
434 		return;
435 	else if (time_change->tv_sec <= LONG_MAX / 1000000)
436 		delta_ticks = (time_change->tv_sec * 1000000 +
437 			       time_change->tv_usec + (tick - 1)) / tick + 1;
438 	else if (time_change->tv_sec <= LONG_MAX / hz)
439 		delta_ticks = time_change->tv_sec * hz +
440 			      (time_change->tv_usec + (tick - 1)) / tick + 1;
441 	else
442 		delta_ticks = LONG_MAX;
443 
444 	if (delta_ticks > INT_MAX)
445 		delta_ticks = INT_MAX;
446 
447 	/*
448 	 * Now rip through the timer calltodo list looking for timers
449 	 * to expire.
450 	 */
451 
452 	/* don't collide with softclock() */
453 	mtx_lock_spin(&callout_lock);
454 	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
455 		p->c_time -= delta_ticks;
456 
457 		/* Break if the timer had more time on it than delta_ticks */
458 		if (p->c_time > 0)
459 			break;
460 
461 		/* take back the ticks the timer didn't use (p->c_time <= 0) */
462 		delta_ticks = -p->c_time;
463 	}
464 	mtx_unlock_spin(&callout_lock);
465 
466 	return;
467 }
468 #endif /* APM_FIXUP_CALLTODO */
469