xref: /freebsd/sys/kern/kern_timeout.c (revision 81d1ffee089aab2652954909acbe6aadd8a1a72c)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
39  * $FreeBSD$
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/callout.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 
49 /*
50  * TODO:
51  *	allocate more timeout table slots when table overflows.
52  */
53 
54 /* Exported to machdep.c and/or kern_clock.c.  */
55 struct callout *callout;
56 struct callout_list callfree;
57 int callwheelsize, callwheelbits, callwheelmask;
58 struct callout_tailq *callwheel;
59 int softticks;			/* Like ticks, but for softclock(). */
60 struct mtx callout_lock;
61 
62 static struct callout *nextsoftcheck;	/* Next callout to be checked. */
63 
64 /*
65  * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
66  *
67  *	This code is called very early in the kernel initialization sequence,
68  *	and may be called more then once.
69  */
70 caddr_t
71 kern_timeout_callwheel_alloc(caddr_t v)
72 {
73 	/*
74 	 * Calculate callout wheel size
75 	 */
76 	for (callwheelsize = 1, callwheelbits = 0;
77 	     callwheelsize < ncallout;
78 	     callwheelsize <<= 1, ++callwheelbits)
79 		;
80 	callwheelmask = callwheelsize - 1;
81 
82 	callout = (struct callout *)v;
83 	v = (caddr_t)(callout + ncallout);
84 	callwheel = (struct callout_tailq *)v;
85 	v = (caddr_t)(callwheel + callwheelsize);
86 	return(v);
87 }
88 
89 /*
90  * kern_timeout_callwheel_init() - initialize previously reserved callwheel
91  *				   space.
92  *
93  *	This code is called just once, after the space reserved for the
94  *	callout wheel has been finalized.
95  */
96 void
97 kern_timeout_callwheel_init(void)
98 {
99 	int i;
100 
101 	SLIST_INIT(&callfree);
102 	for (i = 0; i < ncallout; i++) {
103 		callout_init(&callout[i], 0);
104 		callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
105 		SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
106 	}
107 	for (i = 0; i < callwheelsize; i++) {
108 		TAILQ_INIT(&callwheel[i]);
109 	}
110 	mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
111 }
112 
113 /*
114  * The callout mechanism is based on the work of Adam M. Costello and
115  * George Varghese, published in a technical report entitled "Redesigning
116  * the BSD Callout and Timer Facilities" and modified slightly for inclusion
117  * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
118  * used in this implementation was published by G.Varghese and A. Lauck in
119  * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
120  * the Efficient Implementation of a Timer Facility" in the Proceedings of
121  * the 11th ACM Annual Symposium on Operating Systems Principles,
122  * Austin, Texas Nov 1987.
123  */
124 
125 /*
126  * Software (low priority) clock interrupt.
127  * Run periodic events from timeout queue.
128  */
129 void
130 softclock(void *dummy)
131 {
132 	struct callout *c;
133 	struct callout_tailq *bucket;
134 	int curticks;
135 	int steps;	/* #steps since we last allowed interrupts */
136 #ifdef DIAGNOSTIC
137 	struct bintime bt1, bt2;
138 	struct timespec ts2;
139 	static uint64_t maxdt = 18446744073709551LL;	/* 1 msec */
140 #endif
141 
142 #ifndef MAX_SOFTCLOCK_STEPS
143 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
144 #endif /* MAX_SOFTCLOCK_STEPS */
145 
146 	steps = 0;
147 	mtx_lock_spin(&callout_lock);
148 	while (softticks != ticks) {
149 		softticks++;
150 		/*
151 		 * softticks may be modified by hard clock, so cache
152 		 * it while we work on a given bucket.
153 		 */
154 		curticks = softticks;
155 		bucket = &callwheel[curticks & callwheelmask];
156 		c = TAILQ_FIRST(bucket);
157 		while (c) {
158 			if (c->c_time != curticks) {
159 				c = TAILQ_NEXT(c, c_links.tqe);
160 				++steps;
161 				if (steps >= MAX_SOFTCLOCK_STEPS) {
162 					nextsoftcheck = c;
163 					/* Give interrupts a chance. */
164 					mtx_unlock_spin(&callout_lock);
165 					;	/* nothing */
166 					mtx_lock_spin(&callout_lock);
167 					c = nextsoftcheck;
168 					steps = 0;
169 				}
170 			} else {
171 				void (*c_func)(void *);
172 				void *c_arg;
173 				int c_flags;
174 
175 				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
176 				TAILQ_REMOVE(bucket, c, c_links.tqe);
177 				c_func = c->c_func;
178 				c_arg = c->c_arg;
179 				c_flags = c->c_flags;
180 				c->c_func = NULL;
181 				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
182 					c->c_flags = CALLOUT_LOCAL_ALLOC;
183 					SLIST_INSERT_HEAD(&callfree, c,
184 							  c_links.sle);
185 				} else {
186 					c->c_flags =
187 					    (c->c_flags & ~CALLOUT_PENDING);
188 				}
189 				mtx_unlock_spin(&callout_lock);
190 				if (!(c_flags & CALLOUT_MPSAFE))
191 					mtx_lock(&Giant);
192 #ifdef DIAGNOSTIC
193 				binuptime(&bt1);
194 #endif
195 				c_func(c_arg);
196 #ifdef DIAGNOSTIC
197 				binuptime(&bt2);
198 				bintime_sub(&bt2, &bt1);
199 				if (bt2.frac > maxdt) {
200 					maxdt = bt2.frac;
201 					bintime2timespec(&bt2, &ts2);
202 					printf(
203 			"Expensive timeout(9) function: %p(%p) %d.%09ld s\n",
204 					c_func, c_arg,
205 					ts2.tv_sec, ts2.tv_nsec);
206 				}
207 #endif
208 				if (!(c_flags & CALLOUT_MPSAFE))
209 					mtx_unlock(&Giant);
210 				mtx_lock_spin(&callout_lock);
211 				steps = 0;
212 				c = nextsoftcheck;
213 			}
214 		}
215 	}
216 	nextsoftcheck = NULL;
217 	mtx_unlock_spin(&callout_lock);
218 }
219 
220 /*
221  * timeout --
222  *	Execute a function after a specified length of time.
223  *
224  * untimeout --
225  *	Cancel previous timeout function call.
226  *
227  * callout_handle_init --
228  *	Initialize a handle so that using it with untimeout is benign.
229  *
230  *	See AT&T BCI Driver Reference Manual for specification.  This
231  *	implementation differs from that one in that although an
232  *	identification value is returned from timeout, the original
233  *	arguments to timeout as well as the identifier are used to
234  *	identify entries for untimeout.
235  */
236 struct callout_handle
237 timeout(ftn, arg, to_ticks)
238 	timeout_t *ftn;
239 	void *arg;
240 	int to_ticks;
241 {
242 	struct callout *new;
243 	struct callout_handle handle;
244 
245 	mtx_lock_spin(&callout_lock);
246 
247 	/* Fill in the next free callout structure. */
248 	new = SLIST_FIRST(&callfree);
249 	if (new == NULL)
250 		/* XXX Attempt to malloc first */
251 		panic("timeout table full");
252 	SLIST_REMOVE_HEAD(&callfree, c_links.sle);
253 
254 	callout_reset(new, to_ticks, ftn, arg);
255 
256 	handle.callout = new;
257 	mtx_unlock_spin(&callout_lock);
258 	return (handle);
259 }
260 
261 void
262 untimeout(ftn, arg, handle)
263 	timeout_t *ftn;
264 	void *arg;
265 	struct callout_handle handle;
266 {
267 
268 	/*
269 	 * Check for a handle that was initialized
270 	 * by callout_handle_init, but never used
271 	 * for a real timeout.
272 	 */
273 	if (handle.callout == NULL)
274 		return;
275 
276 	mtx_lock_spin(&callout_lock);
277 	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
278 		callout_stop(handle.callout);
279 	mtx_unlock_spin(&callout_lock);
280 }
281 
282 void
283 callout_handle_init(struct callout_handle *handle)
284 {
285 	handle->callout = NULL;
286 }
287 
288 /*
289  * New interface; clients allocate their own callout structures.
290  *
291  * callout_reset() - establish or change a timeout
292  * callout_stop() - disestablish a timeout
293  * callout_init() - initialize a callout structure so that it can
294  *	safely be passed to callout_reset() and callout_stop()
295  *
296  * <sys/callout.h> defines three convenience macros:
297  *
298  * callout_active() - returns truth if callout has not been serviced
299  * callout_pending() - returns truth if callout is still waiting for timeout
300  * callout_deactivate() - marks the callout as having been serviced
301  */
302 void
303 callout_reset(c, to_ticks, ftn, arg)
304 	struct	callout *c;
305 	int	to_ticks;
306 	void	(*ftn)(void *);
307 	void	*arg;
308 {
309 
310 	mtx_lock_spin(&callout_lock);
311 	if (c->c_flags & CALLOUT_PENDING)
312 		callout_stop(c);
313 
314 	/*
315 	 * We could unlock callout_lock here and lock it again before the
316 	 * TAILQ_INSERT_TAIL, but there's no point since doing this setup
317 	 * doesn't take much time.
318 	 */
319 	if (to_ticks <= 0)
320 		to_ticks = 1;
321 
322 	c->c_arg = arg;
323 	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
324 	c->c_func = ftn;
325 	c->c_time = ticks + to_ticks;
326 	TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
327 			  c, c_links.tqe);
328 	mtx_unlock_spin(&callout_lock);
329 }
330 
331 int
332 callout_stop(c)
333 	struct	callout *c;
334 {
335 
336 	mtx_lock_spin(&callout_lock);
337 	/*
338 	 * Don't attempt to delete a callout that's not on the queue.
339 	 */
340 	if (!(c->c_flags & CALLOUT_PENDING)) {
341 		c->c_flags &= ~CALLOUT_ACTIVE;
342 		mtx_unlock_spin(&callout_lock);
343 		return (0);
344 	}
345 	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
346 
347 	if (nextsoftcheck == c) {
348 		nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
349 	}
350 	TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe);
351 	c->c_func = NULL;
352 
353 	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
354 		SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
355 	}
356 	mtx_unlock_spin(&callout_lock);
357 	return (1);
358 }
359 
360 void
361 callout_init(c, mpsafe)
362 	struct	callout *c;
363 	int mpsafe;
364 {
365 	bzero(c, sizeof *c);
366 	if (mpsafe)
367 		c->c_flags |= CALLOUT_MPSAFE;
368 }
369 
370 #ifdef APM_FIXUP_CALLTODO
371 /*
372  * Adjust the kernel calltodo timeout list.  This routine is used after
373  * an APM resume to recalculate the calltodo timer list values with the
374  * number of hz's we have been sleeping.  The next hardclock() will detect
375  * that there are fired timers and run softclock() to execute them.
376  *
377  * Please note, I have not done an exhaustive analysis of what code this
378  * might break.  I am motivated to have my select()'s and alarm()'s that
379  * have expired during suspend firing upon resume so that the applications
380  * which set the timer can do the maintanence the timer was for as close
381  * as possible to the originally intended time.  Testing this code for a
382  * week showed that resuming from a suspend resulted in 22 to 25 timers
383  * firing, which seemed independant on whether the suspend was 2 hours or
384  * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
385  */
386 void
387 adjust_timeout_calltodo(time_change)
388     struct timeval *time_change;
389 {
390 	register struct callout *p;
391 	unsigned long delta_ticks;
392 
393 	/*
394 	 * How many ticks were we asleep?
395 	 * (stolen from tvtohz()).
396 	 */
397 
398 	/* Don't do anything */
399 	if (time_change->tv_sec < 0)
400 		return;
401 	else if (time_change->tv_sec <= LONG_MAX / 1000000)
402 		delta_ticks = (time_change->tv_sec * 1000000 +
403 			       time_change->tv_usec + (tick - 1)) / tick + 1;
404 	else if (time_change->tv_sec <= LONG_MAX / hz)
405 		delta_ticks = time_change->tv_sec * hz +
406 			      (time_change->tv_usec + (tick - 1)) / tick + 1;
407 	else
408 		delta_ticks = LONG_MAX;
409 
410 	if (delta_ticks > INT_MAX)
411 		delta_ticks = INT_MAX;
412 
413 	/*
414 	 * Now rip through the timer calltodo list looking for timers
415 	 * to expire.
416 	 */
417 
418 	/* don't collide with softclock() */
419 	mtx_lock_spin(&callout_lock);
420 	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
421 		p->c_time -= delta_ticks;
422 
423 		/* Break if the timer had more time on it than delta_ticks */
424 		if (p->c_time > 0)
425 			break;
426 
427 		/* take back the ticks the timer didn't use (p->c_time <= 0) */
428 		delta_ticks = -p->c_time;
429 	}
430 	mtx_unlock_spin(&callout_lock);
431 
432 	return;
433 }
434 #endif /* APM_FIXUP_CALLTODO */
435