xref: /freebsd/sys/kern/kern_timeout.c (revision f0a75d274af375d15b97b830966b99a02b7db911)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/callout.h>
43 #include <sys/condvar.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 #include <sys/sysctl.h>
50 
51 static int avg_depth;
52 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
53     "Average number of items examined per softclock call. Units = 1/1000");
54 static int avg_gcalls;
55 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
56     "Average number of Giant callouts made per softclock call. Units = 1/1000");
57 static int avg_mtxcalls;
58 SYSCTL_INT(_debug, OID_AUTO, to_avg_mtxcalls, CTLFLAG_RD, &avg_mtxcalls, 0,
59     "Average number of mtx callouts made per softclock call. Units = 1/1000");
60 static int avg_mpcalls;
61 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
62     "Average number of MP callouts made per softclock call. Units = 1/1000");
63 /*
64  * TODO:
65  *	allocate more timeout table slots when table overflows.
66  */
67 
68 /* Exported to machdep.c and/or kern_clock.c.  */
69 struct callout *callout;
70 struct callout_list callfree;
71 int callwheelsize, callwheelbits, callwheelmask;
72 struct callout_tailq *callwheel;
73 int softticks;			/* Like ticks, but for softclock(). */
74 struct mtx callout_lock;
75 
76 static struct callout *nextsoftcheck;	/* Next callout to be checked. */
77 
78 /**
79  * Locked by callout_lock:
80  *   curr_callout    - If a callout is in progress, it is curr_callout.
81  *                     If curr_callout is non-NULL, threads waiting in
82  *                     callout_drain() will be woken up as soon as the
83  *                     relevant callout completes.
84  *   curr_cancelled  - Changing to 1 with both callout_lock and c_mtx held
85  *                     guarantees that the current callout will not run.
86  *                     The softclock() function sets this to 0 before it
87  *                     drops callout_lock to acquire c_mtx, and it calls
88  *                     the handler only if curr_cancelled is still 0 after
89  *                     c_mtx is successfully acquired.
90  *   callout_wait    - If a thread is waiting in callout_drain(), then
91  *                     callout_wait is nonzero.  Set only when
92  *                     curr_callout is non-NULL.
93  */
94 static struct callout *curr_callout;
95 static int curr_cancelled;
96 static int callout_wait;
97 
98 /*
99  * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
100  *
101  *	This code is called very early in the kernel initialization sequence,
102  *	and may be called more then once.
103  */
104 caddr_t
105 kern_timeout_callwheel_alloc(caddr_t v)
106 {
107 	/*
108 	 * Calculate callout wheel size
109 	 */
110 	for (callwheelsize = 1, callwheelbits = 0;
111 	     callwheelsize < ncallout;
112 	     callwheelsize <<= 1, ++callwheelbits)
113 		;
114 	callwheelmask = callwheelsize - 1;
115 
116 	callout = (struct callout *)v;
117 	v = (caddr_t)(callout + ncallout);
118 	callwheel = (struct callout_tailq *)v;
119 	v = (caddr_t)(callwheel + callwheelsize);
120 	return(v);
121 }
122 
123 /*
124  * kern_timeout_callwheel_init() - initialize previously reserved callwheel
125  *				   space.
126  *
127  *	This code is called just once, after the space reserved for the
128  *	callout wheel has been finalized.
129  */
130 void
131 kern_timeout_callwheel_init(void)
132 {
133 	int i;
134 
135 	SLIST_INIT(&callfree);
136 	for (i = 0; i < ncallout; i++) {
137 		callout_init(&callout[i], 0);
138 		callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
139 		SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
140 	}
141 	for (i = 0; i < callwheelsize; i++) {
142 		TAILQ_INIT(&callwheel[i]);
143 	}
144 	mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
145 }
146 
147 /*
148  * The callout mechanism is based on the work of Adam M. Costello and
149  * George Varghese, published in a technical report entitled "Redesigning
150  * the BSD Callout and Timer Facilities" and modified slightly for inclusion
151  * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
152  * used in this implementation was published by G. Varghese and T. Lauck in
153  * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
154  * the Efficient Implementation of a Timer Facility" in the Proceedings of
155  * the 11th ACM Annual Symposium on Operating Systems Principles,
156  * Austin, Texas Nov 1987.
157  */
158 
159 /*
160  * Software (low priority) clock interrupt.
161  * Run periodic events from timeout queue.
162  */
163 void
164 softclock(void *dummy)
165 {
166 	struct callout *c;
167 	struct callout_tailq *bucket;
168 	int curticks;
169 	int steps;	/* #steps since we last allowed interrupts */
170 	int depth;
171 	int mpcalls;
172 	int mtxcalls;
173 	int gcalls;
174 #ifdef DIAGNOSTIC
175 	struct bintime bt1, bt2;
176 	struct timespec ts2;
177 	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
178 	static timeout_t *lastfunc;
179 #endif
180 
181 #ifndef MAX_SOFTCLOCK_STEPS
182 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
183 #endif /* MAX_SOFTCLOCK_STEPS */
184 
185 	mpcalls = 0;
186 	mtxcalls = 0;
187 	gcalls = 0;
188 	depth = 0;
189 	steps = 0;
190 	mtx_lock_spin(&callout_lock);
191 	while (softticks != ticks) {
192 		softticks++;
193 		/*
194 		 * softticks may be modified by hard clock, so cache
195 		 * it while we work on a given bucket.
196 		 */
197 		curticks = softticks;
198 		bucket = &callwheel[curticks & callwheelmask];
199 		c = TAILQ_FIRST(bucket);
200 		while (c) {
201 			depth++;
202 			if (c->c_time != curticks) {
203 				c = TAILQ_NEXT(c, c_links.tqe);
204 				++steps;
205 				if (steps >= MAX_SOFTCLOCK_STEPS) {
206 					nextsoftcheck = c;
207 					/* Give interrupts a chance. */
208 					mtx_unlock_spin(&callout_lock);
209 					;	/* nothing */
210 					mtx_lock_spin(&callout_lock);
211 					c = nextsoftcheck;
212 					steps = 0;
213 				}
214 			} else {
215 				void (*c_func)(void *);
216 				void *c_arg;
217 				struct mtx *c_mtx;
218 				int c_flags;
219 
220 				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
221 				TAILQ_REMOVE(bucket, c, c_links.tqe);
222 				c_func = c->c_func;
223 				c_arg = c->c_arg;
224 				c_mtx = c->c_mtx;
225 				c_flags = c->c_flags;
226 				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
227 					c->c_func = NULL;
228 					c->c_flags = CALLOUT_LOCAL_ALLOC;
229 					SLIST_INSERT_HEAD(&callfree, c,
230 							  c_links.sle);
231 					curr_callout = NULL;
232 				} else {
233 					c->c_flags =
234 					    (c->c_flags & ~CALLOUT_PENDING);
235 					curr_callout = c;
236 				}
237 				curr_cancelled = 0;
238 				mtx_unlock_spin(&callout_lock);
239 				if (c_mtx != NULL) {
240 					mtx_lock(c_mtx);
241 					/*
242 					 * The callout may have been cancelled
243 					 * while we switched locks.
244 					 */
245 					if (curr_cancelled) {
246 						mtx_unlock(c_mtx);
247 						goto skip;
248 					}
249 					/* The callout cannot be stopped now. */
250 					curr_cancelled = 1;
251 
252 					if (c_mtx == &Giant) {
253 						gcalls++;
254 						CTR3(KTR_CALLOUT,
255 						    "callout %p func %p arg %p",
256 						    c, c_func, c_arg);
257 					} else {
258 						mtxcalls++;
259 						CTR3(KTR_CALLOUT, "callout mtx"
260 						    " %p func %p arg %p",
261 						    c, c_func, c_arg);
262 					}
263 				} else {
264 					mpcalls++;
265 					CTR3(KTR_CALLOUT,
266 					    "callout mpsafe %p func %p arg %p",
267 					    c, c_func, c_arg);
268 				}
269 #ifdef DIAGNOSTIC
270 				binuptime(&bt1);
271 #endif
272 				THREAD_NO_SLEEPING();
273 				c_func(c_arg);
274 				THREAD_SLEEPING_OK();
275 #ifdef DIAGNOSTIC
276 				binuptime(&bt2);
277 				bintime_sub(&bt2, &bt1);
278 				if (bt2.frac > maxdt) {
279 					if (lastfunc != c_func ||
280 					    bt2.frac > maxdt * 2) {
281 						bintime2timespec(&bt2, &ts2);
282 						printf(
283 			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
284 						    c_func, c_arg,
285 						    (intmax_t)ts2.tv_sec,
286 						    ts2.tv_nsec);
287 					}
288 					maxdt = bt2.frac;
289 					lastfunc = c_func;
290 				}
291 #endif
292 				if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
293 					mtx_unlock(c_mtx);
294 			skip:
295 				mtx_lock_spin(&callout_lock);
296 				curr_callout = NULL;
297 				if (callout_wait) {
298 					/*
299 					 * There is someone waiting
300 					 * for the callout to complete.
301 					 */
302 					wakeup(&callout_wait);
303 					callout_wait = 0;
304 				}
305 				steps = 0;
306 				c = nextsoftcheck;
307 			}
308 		}
309 	}
310 	avg_depth += (depth * 1000 - avg_depth) >> 8;
311 	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
312 	avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8;
313 	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
314 	nextsoftcheck = NULL;
315 	mtx_unlock_spin(&callout_lock);
316 }
317 
318 /*
319  * timeout --
320  *	Execute a function after a specified length of time.
321  *
322  * untimeout --
323  *	Cancel previous timeout function call.
324  *
325  * callout_handle_init --
326  *	Initialize a handle so that using it with untimeout is benign.
327  *
328  *	See AT&T BCI Driver Reference Manual for specification.  This
329  *	implementation differs from that one in that although an
330  *	identification value is returned from timeout, the original
331  *	arguments to timeout as well as the identifier are used to
332  *	identify entries for untimeout.
333  */
334 struct callout_handle
335 timeout(ftn, arg, to_ticks)
336 	timeout_t *ftn;
337 	void *arg;
338 	int to_ticks;
339 {
340 	struct callout *new;
341 	struct callout_handle handle;
342 
343 	mtx_lock_spin(&callout_lock);
344 
345 	/* Fill in the next free callout structure. */
346 	new = SLIST_FIRST(&callfree);
347 	if (new == NULL)
348 		/* XXX Attempt to malloc first */
349 		panic("timeout table full");
350 	SLIST_REMOVE_HEAD(&callfree, c_links.sle);
351 
352 	callout_reset(new, to_ticks, ftn, arg);
353 
354 	handle.callout = new;
355 	mtx_unlock_spin(&callout_lock);
356 	return (handle);
357 }
358 
359 void
360 untimeout(ftn, arg, handle)
361 	timeout_t *ftn;
362 	void *arg;
363 	struct callout_handle handle;
364 {
365 
366 	/*
367 	 * Check for a handle that was initialized
368 	 * by callout_handle_init, but never used
369 	 * for a real timeout.
370 	 */
371 	if (handle.callout == NULL)
372 		return;
373 
374 	mtx_lock_spin(&callout_lock);
375 	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
376 		callout_stop(handle.callout);
377 	mtx_unlock_spin(&callout_lock);
378 }
379 
380 void
381 callout_handle_init(struct callout_handle *handle)
382 {
383 	handle->callout = NULL;
384 }
385 
386 /*
387  * New interface; clients allocate their own callout structures.
388  *
389  * callout_reset() - establish or change a timeout
390  * callout_stop() - disestablish a timeout
391  * callout_init() - initialize a callout structure so that it can
392  *	safely be passed to callout_reset() and callout_stop()
393  *
394  * <sys/callout.h> defines three convenience macros:
395  *
396  * callout_active() - returns truth if callout has not been stopped,
397  *	drained, or deactivated since the last time the callout was
398  *	reset.
399  * callout_pending() - returns truth if callout is still waiting for timeout
400  * callout_deactivate() - marks the callout as having been serviced
401  */
402 int
403 callout_reset(c, to_ticks, ftn, arg)
404 	struct	callout *c;
405 	int	to_ticks;
406 	void	(*ftn)(void *);
407 	void	*arg;
408 {
409 	int cancelled = 0;
410 
411 #ifdef notyet /* Some callers of timeout() do not hold Giant. */
412 	if (c->c_mtx != NULL)
413 		mtx_assert(c->c_mtx, MA_OWNED);
414 #endif
415 
416 	mtx_lock_spin(&callout_lock);
417 	if (c == curr_callout) {
418 		/*
419 		 * We're being asked to reschedule a callout which is
420 		 * currently in progress.  If there is a mutex then we
421 		 * can cancel the callout if it has not really started.
422 		 */
423 		if (c->c_mtx != NULL && !curr_cancelled)
424 			cancelled = curr_cancelled = 1;
425 		if (callout_wait) {
426 			/*
427 			 * Someone has called callout_drain to kill this
428 			 * callout.  Don't reschedule.
429 			 */
430 			CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
431 			    cancelled ? "cancelled" : "failed to cancel",
432 			    c, c->c_func, c->c_arg);
433 			mtx_unlock_spin(&callout_lock);
434 			return (cancelled);
435 		}
436 	}
437 	if (c->c_flags & CALLOUT_PENDING) {
438 		if (nextsoftcheck == c) {
439 			nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
440 		}
441 		TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c,
442 		    c_links.tqe);
443 
444 		cancelled = 1;
445 
446 		/*
447 		 * Part of the normal "stop a pending callout" process
448 		 * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING
449 		 * flags.  We're not going to bother doing that here,
450 		 * because we're going to be setting those flags ten lines
451 		 * after this point, and we're holding callout_lock
452 		 * between now and then.
453 		 */
454 	}
455 
456 	/*
457 	 * We could unlock callout_lock here and lock it again before the
458 	 * TAILQ_INSERT_TAIL, but there's no point since doing this setup
459 	 * doesn't take much time.
460 	 */
461 	if (to_ticks <= 0)
462 		to_ticks = 1;
463 
464 	c->c_arg = arg;
465 	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
466 	c->c_func = ftn;
467 	c->c_time = ticks + to_ticks;
468 	TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
469 			  c, c_links.tqe);
470 	CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
471 	    cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
472 	mtx_unlock_spin(&callout_lock);
473 
474 	return (cancelled);
475 }
476 
477 int
478 _callout_stop_safe(c, safe)
479 	struct	callout *c;
480 	int	safe;
481 {
482 	int use_mtx;
483 
484 	if (!safe && c->c_mtx != NULL) {
485 #ifdef notyet /* Some callers do not hold Giant for Giant-locked callouts. */
486 		mtx_assert(c->c_mtx, MA_OWNED);
487 		use_mtx = 1;
488 #else
489 		use_mtx = mtx_owned(c->c_mtx);
490 #endif
491 	} else {
492 		use_mtx = 0;
493 	}
494 
495 	mtx_lock_spin(&callout_lock);
496 	/*
497 	 * If the callout isn't pending, it's not on the queue, so
498 	 * don't attempt to remove it from the queue.  We can try to
499 	 * stop it by other means however.
500 	 */
501 	if (!(c->c_flags & CALLOUT_PENDING)) {
502 		c->c_flags &= ~CALLOUT_ACTIVE;
503 
504 		/*
505 		 * If it wasn't on the queue and it isn't the current
506 		 * callout, then we can't stop it, so just bail.
507 		 */
508 		if (c != curr_callout) {
509 			CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
510 			    c, c->c_func, c->c_arg);
511 			mtx_unlock_spin(&callout_lock);
512 			return (0);
513 		}
514 
515 		if (safe) {
516 			/*
517 			 * The current callout is running (or just
518 			 * about to run) and blocking is allowed, so
519 			 * just wait for the current invocation to
520 			 * finish.
521 			 */
522 			while (c == curr_callout) {
523 				callout_wait = 1;
524 				msleep_spin(&callout_wait, &callout_lock,
525 				    "codrain", 0);
526 			}
527 		} else if (use_mtx && !curr_cancelled) {
528 			/*
529 			 * The current callout is waiting for it's
530 			 * mutex which we hold.  Cancel the callout
531 			 * and return.  After our caller drops the
532 			 * mutex, the callout will be skipped in
533 			 * softclock().
534 			 */
535 			curr_cancelled = 1;
536 			CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
537 			    c, c->c_func, c->c_arg);
538 			mtx_unlock_spin(&callout_lock);
539 			return (1);
540 		}
541 		CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
542 		    c, c->c_func, c->c_arg);
543 		mtx_unlock_spin(&callout_lock);
544 		return (0);
545 	}
546 	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
547 
548 	if (nextsoftcheck == c) {
549 		nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
550 	}
551 	TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe);
552 
553 	CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
554 	    c, c->c_func, c->c_arg);
555 
556 	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
557 		c->c_func = NULL;
558 		SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
559 	}
560 	mtx_unlock_spin(&callout_lock);
561 	return (1);
562 }
563 
564 void
565 callout_init(c, mpsafe)
566 	struct	callout *c;
567 	int mpsafe;
568 {
569 	bzero(c, sizeof *c);
570 	if (mpsafe) {
571 		c->c_mtx = NULL;
572 		c->c_flags = CALLOUT_RETURNUNLOCKED;
573 	} else {
574 		c->c_mtx = &Giant;
575 		c->c_flags = 0;
576 	}
577 }
578 
579 void
580 callout_init_mtx(c, mtx, flags)
581 	struct	callout *c;
582 	struct	mtx *mtx;
583 	int flags;
584 {
585 	bzero(c, sizeof *c);
586 	c->c_mtx = mtx;
587 	KASSERT((flags & ~CALLOUT_RETURNUNLOCKED) == 0,
588 	    ("callout_init_mtx: bad flags %d", flags));
589 	/* CALLOUT_RETURNUNLOCKED makes no sense without a mutex. */
590 	KASSERT(mtx != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
591 	    ("callout_init_mtx: CALLOUT_RETURNUNLOCKED with no mutex"));
592 	c->c_flags = flags & CALLOUT_RETURNUNLOCKED;
593 }
594 
595 #ifdef APM_FIXUP_CALLTODO
596 /*
597  * Adjust the kernel calltodo timeout list.  This routine is used after
598  * an APM resume to recalculate the calltodo timer list values with the
599  * number of hz's we have been sleeping.  The next hardclock() will detect
600  * that there are fired timers and run softclock() to execute them.
601  *
602  * Please note, I have not done an exhaustive analysis of what code this
603  * might break.  I am motivated to have my select()'s and alarm()'s that
604  * have expired during suspend firing upon resume so that the applications
605  * which set the timer can do the maintanence the timer was for as close
606  * as possible to the originally intended time.  Testing this code for a
607  * week showed that resuming from a suspend resulted in 22 to 25 timers
608  * firing, which seemed independant on whether the suspend was 2 hours or
609  * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
610  */
611 void
612 adjust_timeout_calltodo(time_change)
613     struct timeval *time_change;
614 {
615 	register struct callout *p;
616 	unsigned long delta_ticks;
617 
618 	/*
619 	 * How many ticks were we asleep?
620 	 * (stolen from tvtohz()).
621 	 */
622 
623 	/* Don't do anything */
624 	if (time_change->tv_sec < 0)
625 		return;
626 	else if (time_change->tv_sec <= LONG_MAX / 1000000)
627 		delta_ticks = (time_change->tv_sec * 1000000 +
628 			       time_change->tv_usec + (tick - 1)) / tick + 1;
629 	else if (time_change->tv_sec <= LONG_MAX / hz)
630 		delta_ticks = time_change->tv_sec * hz +
631 			      (time_change->tv_usec + (tick - 1)) / tick + 1;
632 	else
633 		delta_ticks = LONG_MAX;
634 
635 	if (delta_ticks > INT_MAX)
636 		delta_ticks = INT_MAX;
637 
638 	/*
639 	 * Now rip through the timer calltodo list looking for timers
640 	 * to expire.
641 	 */
642 
643 	/* don't collide with softclock() */
644 	mtx_lock_spin(&callout_lock);
645 	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
646 		p->c_time -= delta_ticks;
647 
648 		/* Break if the timer had more time on it than delta_ticks */
649 		if (p->c_time > 0)
650 			break;
651 
652 		/* take back the ticks the timer didn't use (p->c_time <= 0) */
653 		delta_ticks = -p->c_time;
654 	}
655 	mtx_unlock_spin(&callout_lock);
656 
657 	return;
658 }
659 #endif /* APM_FIXUP_CALLTODO */
660