xref: /freebsd/sys/kern/kern_timeout.c (revision 1e413cf93298b5b97441a21d9a50fdcd0ee9945e)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/callout.h>
43 #include <sys/condvar.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 #include <sys/sleepqueue.h>
50 #include <sys/sysctl.h>
51 
52 static int avg_depth;
53 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
54     "Average number of items examined per softclock call. Units = 1/1000");
55 static int avg_gcalls;
56 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
57     "Average number of Giant callouts made per softclock call. Units = 1/1000");
58 static int avg_lockcalls;
59 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
60     "Average number of lock callouts made per softclock call. Units = 1/1000");
61 static int avg_mpcalls;
62 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
63     "Average number of MP callouts made per softclock call. Units = 1/1000");
64 /*
65  * TODO:
66  *	allocate more timeout table slots when table overflows.
67  */
68 
69 /* Exported to machdep.c and/or kern_clock.c.  */
70 struct callout *callout;
71 struct callout_list callfree;
72 int callwheelsize, callwheelbits, callwheelmask;
73 struct callout_tailq *callwheel;
74 int softticks;			/* Like ticks, but for softclock(). */
75 struct mtx callout_lock;
76 
77 static struct callout *nextsoftcheck;	/* Next callout to be checked. */
78 
79 /**
80  * Locked by callout_lock:
81  *   curr_callout    - If a callout is in progress, it is curr_callout.
82  *                     If curr_callout is non-NULL, threads waiting in
83  *                     callout_drain() will be woken up as soon as the
84  *                     relevant callout completes.
85  *   curr_cancelled  - Changing to 1 with both callout_lock and c_lock held
86  *                     guarantees that the current callout will not run.
87  *                     The softclock() function sets this to 0 before it
88  *                     drops callout_lock to acquire c_lock, and it calls
89  *                     the handler only if curr_cancelled is still 0 after
90  *                     c_lock is successfully acquired.
91  *   callout_wait    - If a thread is waiting in callout_drain(), then
92  *                     callout_wait is nonzero.  Set only when
93  *                     curr_callout is non-NULL.
94  */
95 static struct callout *curr_callout;
96 static int curr_cancelled;
97 static int callout_wait;
98 
99 /*
100  * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
101  *
102  *	This code is called very early in the kernel initialization sequence,
103  *	and may be called more then once.
104  */
105 caddr_t
106 kern_timeout_callwheel_alloc(caddr_t v)
107 {
108 	/*
109 	 * Calculate callout wheel size
110 	 */
111 	for (callwheelsize = 1, callwheelbits = 0;
112 	     callwheelsize < ncallout;
113 	     callwheelsize <<= 1, ++callwheelbits)
114 		;
115 	callwheelmask = callwheelsize - 1;
116 
117 	callout = (struct callout *)v;
118 	v = (caddr_t)(callout + ncallout);
119 	callwheel = (struct callout_tailq *)v;
120 	v = (caddr_t)(callwheel + callwheelsize);
121 	return(v);
122 }
123 
124 /*
125  * kern_timeout_callwheel_init() - initialize previously reserved callwheel
126  *				   space.
127  *
128  *	This code is called just once, after the space reserved for the
129  *	callout wheel has been finalized.
130  */
131 void
132 kern_timeout_callwheel_init(void)
133 {
134 	int i;
135 
136 	SLIST_INIT(&callfree);
137 	for (i = 0; i < ncallout; i++) {
138 		callout_init(&callout[i], 0);
139 		callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
140 		SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
141 	}
142 	for (i = 0; i < callwheelsize; i++) {
143 		TAILQ_INIT(&callwheel[i]);
144 	}
145 	mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
146 }
147 
148 /*
149  * The callout mechanism is based on the work of Adam M. Costello and
150  * George Varghese, published in a technical report entitled "Redesigning
151  * the BSD Callout and Timer Facilities" and modified slightly for inclusion
152  * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
153  * used in this implementation was published by G. Varghese and T. Lauck in
154  * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
155  * the Efficient Implementation of a Timer Facility" in the Proceedings of
156  * the 11th ACM Annual Symposium on Operating Systems Principles,
157  * Austin, Texas Nov 1987.
158  */
159 
160 /*
161  * Software (low priority) clock interrupt.
162  * Run periodic events from timeout queue.
163  */
164 void
165 softclock(void *dummy)
166 {
167 	struct callout *c;
168 	struct callout_tailq *bucket;
169 	int curticks;
170 	int steps;	/* #steps since we last allowed interrupts */
171 	int depth;
172 	int mpcalls;
173 	int lockcalls;
174 	int gcalls;
175 #ifdef DIAGNOSTIC
176 	struct bintime bt1, bt2;
177 	struct timespec ts2;
178 	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
179 	static timeout_t *lastfunc;
180 #endif
181 
182 #ifndef MAX_SOFTCLOCK_STEPS
183 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
184 #endif /* MAX_SOFTCLOCK_STEPS */
185 
186 	mpcalls = 0;
187 	lockcalls = 0;
188 	gcalls = 0;
189 	depth = 0;
190 	steps = 0;
191 	mtx_lock_spin(&callout_lock);
192 	while (softticks != ticks) {
193 		softticks++;
194 		/*
195 		 * softticks may be modified by hard clock, so cache
196 		 * it while we work on a given bucket.
197 		 */
198 		curticks = softticks;
199 		bucket = &callwheel[curticks & callwheelmask];
200 		c = TAILQ_FIRST(bucket);
201 		while (c) {
202 			depth++;
203 			if (c->c_time != curticks) {
204 				c = TAILQ_NEXT(c, c_links.tqe);
205 				++steps;
206 				if (steps >= MAX_SOFTCLOCK_STEPS) {
207 					nextsoftcheck = c;
208 					/* Give interrupts a chance. */
209 					mtx_unlock_spin(&callout_lock);
210 					;	/* nothing */
211 					mtx_lock_spin(&callout_lock);
212 					c = nextsoftcheck;
213 					steps = 0;
214 				}
215 			} else {
216 				void (*c_func)(void *);
217 				void *c_arg;
218 				struct lock_class *class;
219 				struct lock_object *c_lock;
220 				int c_flags, sharedlock;
221 
222 				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
223 				TAILQ_REMOVE(bucket, c, c_links.tqe);
224 				class = (c->c_lock != NULL) ?
225 				    LOCK_CLASS(c->c_lock) : NULL;
226 				sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ?
227 				    0 : 1;
228 				c_lock = c->c_lock;
229 				c_func = c->c_func;
230 				c_arg = c->c_arg;
231 				c_flags = c->c_flags;
232 				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
233 					c->c_func = NULL;
234 					c->c_flags = CALLOUT_LOCAL_ALLOC;
235 					SLIST_INSERT_HEAD(&callfree, c,
236 							  c_links.sle);
237 					curr_callout = NULL;
238 				} else {
239 					c->c_flags =
240 					    (c->c_flags & ~CALLOUT_PENDING);
241 					curr_callout = c;
242 				}
243 				curr_cancelled = 0;
244 				mtx_unlock_spin(&callout_lock);
245 				if (c_lock != NULL) {
246 					class->lc_lock(c_lock, sharedlock);
247 					/*
248 					 * The callout may have been cancelled
249 					 * while we switched locks.
250 					 */
251 					if (curr_cancelled) {
252 						class->lc_unlock(c_lock);
253 						goto skip;
254 					}
255 					/* The callout cannot be stopped now. */
256 					curr_cancelled = 1;
257 
258 					if (c_lock == &Giant.lock_object) {
259 						gcalls++;
260 						CTR3(KTR_CALLOUT,
261 						    "callout %p func %p arg %p",
262 						    c, c_func, c_arg);
263 					} else {
264 						lockcalls++;
265 						CTR3(KTR_CALLOUT, "callout lock"
266 						    " %p func %p arg %p",
267 						    c, c_func, c_arg);
268 					}
269 				} else {
270 					mpcalls++;
271 					CTR3(KTR_CALLOUT,
272 					    "callout mpsafe %p func %p arg %p",
273 					    c, c_func, c_arg);
274 				}
275 #ifdef DIAGNOSTIC
276 				binuptime(&bt1);
277 #endif
278 				THREAD_NO_SLEEPING();
279 				c_func(c_arg);
280 				THREAD_SLEEPING_OK();
281 #ifdef DIAGNOSTIC
282 				binuptime(&bt2);
283 				bintime_sub(&bt2, &bt1);
284 				if (bt2.frac > maxdt) {
285 					if (lastfunc != c_func ||
286 					    bt2.frac > maxdt * 2) {
287 						bintime2timespec(&bt2, &ts2);
288 						printf(
289 			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
290 						    c_func, c_arg,
291 						    (intmax_t)ts2.tv_sec,
292 						    ts2.tv_nsec);
293 					}
294 					maxdt = bt2.frac;
295 					lastfunc = c_func;
296 				}
297 #endif
298 				if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
299 					class->lc_unlock(c_lock);
300 			skip:
301 				mtx_lock_spin(&callout_lock);
302 				curr_callout = NULL;
303 				if (callout_wait) {
304 					/*
305 					 * There is someone waiting
306 					 * for the callout to complete.
307 					 */
308 					callout_wait = 0;
309 					mtx_unlock_spin(&callout_lock);
310 					wakeup(&callout_wait);
311 					mtx_lock_spin(&callout_lock);
312 				}
313 				steps = 0;
314 				c = nextsoftcheck;
315 			}
316 		}
317 	}
318 	avg_depth += (depth * 1000 - avg_depth) >> 8;
319 	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
320 	avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
321 	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
322 	nextsoftcheck = NULL;
323 	mtx_unlock_spin(&callout_lock);
324 }
325 
326 /*
327  * timeout --
328  *	Execute a function after a specified length of time.
329  *
330  * untimeout --
331  *	Cancel previous timeout function call.
332  *
333  * callout_handle_init --
334  *	Initialize a handle so that using it with untimeout is benign.
335  *
336  *	See AT&T BCI Driver Reference Manual for specification.  This
337  *	implementation differs from that one in that although an
338  *	identification value is returned from timeout, the original
339  *	arguments to timeout as well as the identifier are used to
340  *	identify entries for untimeout.
341  */
342 struct callout_handle
343 timeout(ftn, arg, to_ticks)
344 	timeout_t *ftn;
345 	void *arg;
346 	int to_ticks;
347 {
348 	struct callout *new;
349 	struct callout_handle handle;
350 
351 	mtx_lock_spin(&callout_lock);
352 
353 	/* Fill in the next free callout structure. */
354 	new = SLIST_FIRST(&callfree);
355 	if (new == NULL)
356 		/* XXX Attempt to malloc first */
357 		panic("timeout table full");
358 	SLIST_REMOVE_HEAD(&callfree, c_links.sle);
359 
360 	callout_reset(new, to_ticks, ftn, arg);
361 
362 	handle.callout = new;
363 	mtx_unlock_spin(&callout_lock);
364 	return (handle);
365 }
366 
367 void
368 untimeout(ftn, arg, handle)
369 	timeout_t *ftn;
370 	void *arg;
371 	struct callout_handle handle;
372 {
373 
374 	/*
375 	 * Check for a handle that was initialized
376 	 * by callout_handle_init, but never used
377 	 * for a real timeout.
378 	 */
379 	if (handle.callout == NULL)
380 		return;
381 
382 	mtx_lock_spin(&callout_lock);
383 	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
384 		callout_stop(handle.callout);
385 	mtx_unlock_spin(&callout_lock);
386 }
387 
388 void
389 callout_handle_init(struct callout_handle *handle)
390 {
391 	handle->callout = NULL;
392 }
393 
394 /*
395  * New interface; clients allocate their own callout structures.
396  *
397  * callout_reset() - establish or change a timeout
398  * callout_stop() - disestablish a timeout
399  * callout_init() - initialize a callout structure so that it can
400  *	safely be passed to callout_reset() and callout_stop()
401  *
402  * <sys/callout.h> defines three convenience macros:
403  *
404  * callout_active() - returns truth if callout has not been stopped,
405  *	drained, or deactivated since the last time the callout was
406  *	reset.
407  * callout_pending() - returns truth if callout is still waiting for timeout
408  * callout_deactivate() - marks the callout as having been serviced
409  */
410 int
411 callout_reset(c, to_ticks, ftn, arg)
412 	struct	callout *c;
413 	int	to_ticks;
414 	void	(*ftn)(void *);
415 	void	*arg;
416 {
417 	int cancelled = 0;
418 
419 	mtx_lock_spin(&callout_lock);
420 	if (c == curr_callout) {
421 		/*
422 		 * We're being asked to reschedule a callout which is
423 		 * currently in progress.  If there is a lock then we
424 		 * can cancel the callout if it has not really started.
425 		 */
426 		if (c->c_lock != NULL && !curr_cancelled)
427 			cancelled = curr_cancelled = 1;
428 		if (callout_wait) {
429 			/*
430 			 * Someone has called callout_drain to kill this
431 			 * callout.  Don't reschedule.
432 			 */
433 			CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
434 			    cancelled ? "cancelled" : "failed to cancel",
435 			    c, c->c_func, c->c_arg);
436 			mtx_unlock_spin(&callout_lock);
437 			return (cancelled);
438 		}
439 	}
440 	if (c->c_flags & CALLOUT_PENDING) {
441 		if (nextsoftcheck == c) {
442 			nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
443 		}
444 		TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c,
445 		    c_links.tqe);
446 
447 		cancelled = 1;
448 
449 		/*
450 		 * Part of the normal "stop a pending callout" process
451 		 * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING
452 		 * flags.  We're not going to bother doing that here,
453 		 * because we're going to be setting those flags ten lines
454 		 * after this point, and we're holding callout_lock
455 		 * between now and then.
456 		 */
457 	}
458 
459 	/*
460 	 * We could unlock callout_lock here and lock it again before the
461 	 * TAILQ_INSERT_TAIL, but there's no point since doing this setup
462 	 * doesn't take much time.
463 	 */
464 	if (to_ticks <= 0)
465 		to_ticks = 1;
466 
467 	c->c_arg = arg;
468 	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
469 	c->c_func = ftn;
470 	c->c_time = ticks + to_ticks;
471 	TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
472 			  c, c_links.tqe);
473 	CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
474 	    cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
475 	mtx_unlock_spin(&callout_lock);
476 
477 	return (cancelled);
478 }
479 
480 int
481 _callout_stop_safe(c, safe)
482 	struct	callout *c;
483 	int	safe;
484 {
485 	struct lock_class *class;
486 	int use_lock, sq_locked;
487 
488 	/*
489 	 * Some old subsystems don't hold Giant while running a callout_stop(),
490 	 * so just discard this check for the moment.
491 	 */
492 	if (!safe && c->c_lock != NULL) {
493 		if (c->c_lock == &Giant.lock_object)
494 			use_lock = mtx_owned(&Giant);
495 		else {
496 			use_lock = 1;
497 			class = LOCK_CLASS(c->c_lock);
498 			class->lc_assert(c->c_lock, LA_XLOCKED);
499 		}
500 	} else
501 		use_lock = 0;
502 
503 	sq_locked = 0;
504 again:
505 	mtx_lock_spin(&callout_lock);
506 	/*
507 	 * If the callout isn't pending, it's not on the queue, so
508 	 * don't attempt to remove it from the queue.  We can try to
509 	 * stop it by other means however.
510 	 */
511 	if (!(c->c_flags & CALLOUT_PENDING)) {
512 		c->c_flags &= ~CALLOUT_ACTIVE;
513 
514 		/*
515 		 * If it wasn't on the queue and it isn't the current
516 		 * callout, then we can't stop it, so just bail.
517 		 */
518 		if (c != curr_callout) {
519 			CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
520 			    c, c->c_func, c->c_arg);
521 			mtx_unlock_spin(&callout_lock);
522 			if (sq_locked)
523 				sleepq_release(&callout_wait);
524 			return (0);
525 		}
526 
527 		if (safe) {
528 			/*
529 			 * The current callout is running (or just
530 			 * about to run) and blocking is allowed, so
531 			 * just wait for the current invocation to
532 			 * finish.
533 			 */
534 			while (c == curr_callout) {
535 
536 				/*
537 				 * Use direct calls to sleepqueue interface
538 				 * instead of cv/msleep in order to avoid
539 				 * a LOR between callout_lock and sleepqueue
540 				 * chain spinlocks.  This piece of code
541 				 * emulates a msleep_spin() call actually.
542 				 *
543 				 * If we already have the sleepqueue chain
544 				 * locked, then we can safely block.  If we
545 				 * don't already have it locked, however,
546 				 * we have to drop the callout_lock to lock
547 				 * it.  This opens several races, so we
548 				 * restart at the beginning once we have
549 				 * both locks.  If nothing has changed, then
550 				 * we will end up back here with sq_locked
551 				 * set.
552 				 */
553 				if (!sq_locked) {
554 					mtx_unlock_spin(&callout_lock);
555 					sleepq_lock(&callout_wait);
556 					sq_locked = 1;
557 					goto again;
558 				}
559 
560 				callout_wait = 1;
561 				DROP_GIANT();
562 				mtx_unlock_spin(&callout_lock);
563 				sleepq_add(&callout_wait,
564 				    &callout_lock.lock_object, "codrain",
565 				    SLEEPQ_SLEEP, 0);
566 				sleepq_wait(&callout_wait);
567 				sq_locked = 0;
568 
569 				/* Reacquire locks previously released. */
570 				PICKUP_GIANT();
571 				mtx_lock_spin(&callout_lock);
572 			}
573 		} else if (use_lock && !curr_cancelled) {
574 			/*
575 			 * The current callout is waiting for its
576 			 * lock which we hold.  Cancel the callout
577 			 * and return.  After our caller drops the
578 			 * lock, the callout will be skipped in
579 			 * softclock().
580 			 */
581 			curr_cancelled = 1;
582 			CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
583 			    c, c->c_func, c->c_arg);
584 			mtx_unlock_spin(&callout_lock);
585 			KASSERT(!sq_locked, ("sleepqueue chain locked"));
586 			return (1);
587 		}
588 		CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
589 		    c, c->c_func, c->c_arg);
590 		mtx_unlock_spin(&callout_lock);
591 		KASSERT(!sq_locked, ("sleepqueue chain still locked"));
592 		return (0);
593 	}
594 	if (sq_locked)
595 		sleepq_release(&callout_wait);
596 
597 	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
598 
599 	if (nextsoftcheck == c) {
600 		nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
601 	}
602 	TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe);
603 
604 	CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
605 	    c, c->c_func, c->c_arg);
606 
607 	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
608 		c->c_func = NULL;
609 		SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
610 	}
611 	mtx_unlock_spin(&callout_lock);
612 	return (1);
613 }
614 
615 void
616 callout_init(c, mpsafe)
617 	struct	callout *c;
618 	int mpsafe;
619 {
620 	bzero(c, sizeof *c);
621 	if (mpsafe) {
622 		c->c_lock = NULL;
623 		c->c_flags = CALLOUT_RETURNUNLOCKED;
624 	} else {
625 		c->c_lock = &Giant.lock_object;
626 		c->c_flags = 0;
627 	}
628 }
629 
630 void
631 _callout_init_lock(c, lock, flags)
632 	struct	callout *c;
633 	struct	lock_object *lock;
634 	int flags;
635 {
636 	bzero(c, sizeof *c);
637 	c->c_lock = lock;
638 	KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
639 	    ("callout_init_lock: bad flags %d", flags));
640 	KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
641 	    ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
642 	KASSERT(lock == NULL || LOCK_CLASS(lock) == &lock_class_mtx_sleep ||
643 	    LOCK_CLASS(lock) == &lock_class_rw, ("%s: invalid lock class",
644 	    __func__));
645 	c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
646 }
647 
648 #ifdef APM_FIXUP_CALLTODO
649 /*
650  * Adjust the kernel calltodo timeout list.  This routine is used after
651  * an APM resume to recalculate the calltodo timer list values with the
652  * number of hz's we have been sleeping.  The next hardclock() will detect
653  * that there are fired timers and run softclock() to execute them.
654  *
655  * Please note, I have not done an exhaustive analysis of what code this
656  * might break.  I am motivated to have my select()'s and alarm()'s that
657  * have expired during suspend firing upon resume so that the applications
658  * which set the timer can do the maintanence the timer was for as close
659  * as possible to the originally intended time.  Testing this code for a
660  * week showed that resuming from a suspend resulted in 22 to 25 timers
661  * firing, which seemed independant on whether the suspend was 2 hours or
662  * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
663  */
664 void
665 adjust_timeout_calltodo(time_change)
666     struct timeval *time_change;
667 {
668 	register struct callout *p;
669 	unsigned long delta_ticks;
670 
671 	/*
672 	 * How many ticks were we asleep?
673 	 * (stolen from tvtohz()).
674 	 */
675 
676 	/* Don't do anything */
677 	if (time_change->tv_sec < 0)
678 		return;
679 	else if (time_change->tv_sec <= LONG_MAX / 1000000)
680 		delta_ticks = (time_change->tv_sec * 1000000 +
681 			       time_change->tv_usec + (tick - 1)) / tick + 1;
682 	else if (time_change->tv_sec <= LONG_MAX / hz)
683 		delta_ticks = time_change->tv_sec * hz +
684 			      (time_change->tv_usec + (tick - 1)) / tick + 1;
685 	else
686 		delta_ticks = LONG_MAX;
687 
688 	if (delta_ticks > INT_MAX)
689 		delta_ticks = INT_MAX;
690 
691 	/*
692 	 * Now rip through the timer calltodo list looking for timers
693 	 * to expire.
694 	 */
695 
696 	/* don't collide with softclock() */
697 	mtx_lock_spin(&callout_lock);
698 	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
699 		p->c_time -= delta_ticks;
700 
701 		/* Break if the timer had more time on it than delta_ticks */
702 		if (p->c_time > 0)
703 			break;
704 
705 		/* take back the ticks the timer didn't use (p->c_time <= 0) */
706 		delta_ticks = -p->c_time;
707 	}
708 	mtx_unlock_spin(&callout_lock);
709 
710 	return;
711 }
712 #endif /* APM_FIXUP_CALLTODO */
713