xref: /freebsd/sys/kern/kern_timeout.c (revision 195ebc7e9e4b129de810833791a19dfb4349d6a9)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_kdtrace.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/callout.h>
46 #include <sys/condvar.h>
47 #include <sys/interrupt.h>
48 #include <sys/kernel.h>
49 #include <sys/ktr.h>
50 #include <sys/lock.h>
51 #include <sys/malloc.h>
52 #include <sys/mutex.h>
53 #include <sys/proc.h>
54 #include <sys/sdt.h>
55 #include <sys/sleepqueue.h>
56 #include <sys/sysctl.h>
57 #include <sys/smp.h>
58 
59 SDT_PROVIDER_DEFINE(callout_execute);
60 SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start);
61 SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0,
62     "struct callout *");
63 SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end);
64 SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0,
65     "struct callout *");
66 
67 static int avg_depth;
68 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
69     "Average number of items examined per softclock call. Units = 1/1000");
70 static int avg_gcalls;
71 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
72     "Average number of Giant callouts made per softclock call. Units = 1/1000");
73 static int avg_lockcalls;
74 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
75     "Average number of lock callouts made per softclock call. Units = 1/1000");
76 static int avg_mpcalls;
77 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
78     "Average number of MP callouts made per softclock call. Units = 1/1000");
79 /*
80  * TODO:
81  *	allocate more timeout table slots when table overflows.
82  */
83 int callwheelsize, callwheelbits, callwheelmask;
84 
85 struct callout_cpu {
86 	struct mtx		cc_lock;
87 	struct callout		*cc_callout;
88 	struct callout_tailq	*cc_callwheel;
89 	struct callout_list	cc_callfree;
90 	struct callout		*cc_next;
91 	struct callout		*cc_curr;
92 	void			*cc_cookie;
93 	int 			cc_softticks;
94 	int			cc_cancel;
95 	int			cc_waiting;
96 };
97 
98 #ifdef SMP
99 struct callout_cpu cc_cpu[MAXCPU];
100 #define	CC_CPU(cpu)	(&cc_cpu[(cpu)])
101 #define	CC_SELF()	CC_CPU(PCPU_GET(cpuid))
102 #else
103 struct callout_cpu cc_cpu;
104 #define	CC_CPU(cpu)	&cc_cpu
105 #define	CC_SELF()	&cc_cpu
106 #endif
107 #define	CC_LOCK(cc)	mtx_lock_spin(&(cc)->cc_lock)
108 #define	CC_UNLOCK(cc)	mtx_unlock_spin(&(cc)->cc_lock)
109 
110 static int timeout_cpu;
111 
112 MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
113 
114 /**
115  * Locked by cc_lock:
116  *   cc_curr         - If a callout is in progress, it is curr_callout.
117  *                     If curr_callout is non-NULL, threads waiting in
118  *                     callout_drain() will be woken up as soon as the
119  *                     relevant callout completes.
120  *   cc_cancel       - Changing to 1 with both callout_lock and c_lock held
121  *                     guarantees that the current callout will not run.
122  *                     The softclock() function sets this to 0 before it
123  *                     drops callout_lock to acquire c_lock, and it calls
124  *                     the handler only if curr_cancelled is still 0 after
125  *                     c_lock is successfully acquired.
126  *   cc_waiting      - If a thread is waiting in callout_drain(), then
127  *                     callout_wait is nonzero.  Set only when
128  *                     curr_callout is non-NULL.
129  */
130 
131 /*
132  * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
133  *
134  *	This code is called very early in the kernel initialization sequence,
135  *	and may be called more then once.
136  */
137 caddr_t
138 kern_timeout_callwheel_alloc(caddr_t v)
139 {
140 	struct callout_cpu *cc;
141 
142 	timeout_cpu = PCPU_GET(cpuid);
143 	cc = CC_CPU(timeout_cpu);
144 	/*
145 	 * Calculate callout wheel size
146 	 */
147 	for (callwheelsize = 1, callwheelbits = 0;
148 	     callwheelsize < ncallout;
149 	     callwheelsize <<= 1, ++callwheelbits)
150 		;
151 	callwheelmask = callwheelsize - 1;
152 
153 	cc->cc_callout = (struct callout *)v;
154 	v = (caddr_t)(cc->cc_callout + ncallout);
155 	cc->cc_callwheel = (struct callout_tailq *)v;
156 	v = (caddr_t)(cc->cc_callwheel + callwheelsize);
157 	return(v);
158 }
159 
160 static void
161 callout_cpu_init(struct callout_cpu *cc)
162 {
163 	struct callout *c;
164 	int i;
165 
166 	mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
167 	SLIST_INIT(&cc->cc_callfree);
168 	for (i = 0; i < callwheelsize; i++) {
169 		TAILQ_INIT(&cc->cc_callwheel[i]);
170 	}
171 	if (cc->cc_callout == NULL)
172 		return;
173 	for (i = 0; i < ncallout; i++) {
174 		c = &cc->cc_callout[i];
175 		callout_init(c, 0);
176 		c->c_flags = CALLOUT_LOCAL_ALLOC;
177 		SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
178 	}
179 }
180 
181 /*
182  * kern_timeout_callwheel_init() - initialize previously reserved callwheel
183  *				   space.
184  *
185  *	This code is called just once, after the space reserved for the
186  *	callout wheel has been finalized.
187  */
188 void
189 kern_timeout_callwheel_init(void)
190 {
191 	callout_cpu_init(CC_CPU(timeout_cpu));
192 }
193 
194 /*
195  * Start standard softclock thread.
196  */
197 void    *softclock_ih;
198 
199 static void
200 start_softclock(void *dummy)
201 {
202 	struct callout_cpu *cc;
203 #ifdef SMP
204 	int cpu;
205 #endif
206 
207 	cc = CC_CPU(timeout_cpu);
208 	if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK,
209 	    INTR_MPSAFE, &softclock_ih))
210 		panic("died while creating standard software ithreads");
211 	cc->cc_cookie = softclock_ih;
212 #ifdef SMP
213 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
214 		if (cpu == timeout_cpu)
215 			continue;
216 		if (CPU_ABSENT(cpu))
217 			continue;
218 		cc = CC_CPU(cpu);
219 		if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
220 		    INTR_MPSAFE, &cc->cc_cookie))
221 			panic("died while creating standard software ithreads");
222 		cc->cc_callout = NULL;	/* Only cpu0 handles timeout(). */
223 		cc->cc_callwheel = malloc(
224 		    sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT,
225 		    M_WAITOK);
226 		callout_cpu_init(cc);
227 	}
228 #endif
229 }
230 
231 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
232 
233 void
234 callout_tick(void)
235 {
236 	struct callout_cpu *cc;
237 	int need_softclock;
238 	int bucket;
239 
240 	/*
241 	 * Process callouts at a very low cpu priority, so we don't keep the
242 	 * relatively high clock interrupt priority any longer than necessary.
243 	 */
244 	need_softclock = 0;
245 	cc = CC_SELF();
246 	mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
247 	for (; (cc->cc_softticks - ticks) < 0; cc->cc_softticks++) {
248 		bucket = cc->cc_softticks & callwheelmask;
249 		if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) {
250 			need_softclock = 1;
251 			break;
252 		}
253 	}
254 	mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
255 	/*
256 	 * swi_sched acquires the thread lock, so we don't want to call it
257 	 * with cc_lock held; incorrect locking order.
258 	 */
259 	if (need_softclock)
260 		swi_sched(cc->cc_cookie, 0);
261 }
262 
263 static struct callout_cpu *
264 callout_lock(struct callout *c)
265 {
266 	struct callout_cpu *cc;
267 	int cpu;
268 
269 	for (;;) {
270 		cpu = c->c_cpu;
271 		cc = CC_CPU(cpu);
272 		CC_LOCK(cc);
273 		if (cpu == c->c_cpu)
274 			break;
275 		CC_UNLOCK(cc);
276 	}
277 	return (cc);
278 }
279 
280 /*
281  * The callout mechanism is based on the work of Adam M. Costello and
282  * George Varghese, published in a technical report entitled "Redesigning
283  * the BSD Callout and Timer Facilities" and modified slightly for inclusion
284  * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
285  * used in this implementation was published by G. Varghese and T. Lauck in
286  * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
287  * the Efficient Implementation of a Timer Facility" in the Proceedings of
288  * the 11th ACM Annual Symposium on Operating Systems Principles,
289  * Austin, Texas Nov 1987.
290  */
291 
292 /*
293  * Software (low priority) clock interrupt.
294  * Run periodic events from timeout queue.
295  */
296 void
297 softclock(void *arg)
298 {
299 	struct callout_cpu *cc;
300 	struct callout *c;
301 	struct callout_tailq *bucket;
302 	int curticks;
303 	int steps;	/* #steps since we last allowed interrupts */
304 	int depth;
305 	int mpcalls;
306 	int lockcalls;
307 	int gcalls;
308 #ifdef DIAGNOSTIC
309 	struct bintime bt1, bt2;
310 	struct timespec ts2;
311 	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
312 	static timeout_t *lastfunc;
313 #endif
314 
315 #ifndef MAX_SOFTCLOCK_STEPS
316 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
317 #endif /* MAX_SOFTCLOCK_STEPS */
318 
319 	mpcalls = 0;
320 	lockcalls = 0;
321 	gcalls = 0;
322 	depth = 0;
323 	steps = 0;
324 	cc = (struct callout_cpu *)arg;
325 	CC_LOCK(cc);
326 	while (cc->cc_softticks != ticks) {
327 		/*
328 		 * cc_softticks may be modified by hard clock, so cache
329 		 * it while we work on a given bucket.
330 		 */
331 		curticks = cc->cc_softticks;
332 		cc->cc_softticks++;
333 		bucket = &cc->cc_callwheel[curticks & callwheelmask];
334 		c = TAILQ_FIRST(bucket);
335 		while (c) {
336 			depth++;
337 			if (c->c_time != curticks) {
338 				c = TAILQ_NEXT(c, c_links.tqe);
339 				++steps;
340 				if (steps >= MAX_SOFTCLOCK_STEPS) {
341 					cc->cc_next = c;
342 					/* Give interrupts a chance. */
343 					CC_UNLOCK(cc);
344 					;	/* nothing */
345 					CC_LOCK(cc);
346 					c = cc->cc_next;
347 					steps = 0;
348 				}
349 			} else {
350 				void (*c_func)(void *);
351 				void *c_arg;
352 				struct lock_class *class;
353 				struct lock_object *c_lock;
354 				int c_flags, sharedlock;
355 
356 				cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
357 				TAILQ_REMOVE(bucket, c, c_links.tqe);
358 				class = (c->c_lock != NULL) ?
359 				    LOCK_CLASS(c->c_lock) : NULL;
360 				sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ?
361 				    0 : 1;
362 				c_lock = c->c_lock;
363 				c_func = c->c_func;
364 				c_arg = c->c_arg;
365 				c_flags = c->c_flags;
366 				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
367 					c->c_flags = CALLOUT_LOCAL_ALLOC;
368 				} else {
369 					c->c_flags =
370 					    (c->c_flags & ~CALLOUT_PENDING);
371 				}
372 				cc->cc_curr = c;
373 				cc->cc_cancel = 0;
374 				CC_UNLOCK(cc);
375 				if (c_lock != NULL) {
376 					class->lc_lock(c_lock, sharedlock);
377 					/*
378 					 * The callout may have been cancelled
379 					 * while we switched locks.
380 					 */
381 					if (cc->cc_cancel) {
382 						class->lc_unlock(c_lock);
383 						goto skip;
384 					}
385 					/* The callout cannot be stopped now. */
386 					cc->cc_cancel = 1;
387 
388 					if (c_lock == &Giant.lock_object) {
389 						gcalls++;
390 						CTR3(KTR_CALLOUT,
391 						    "callout %p func %p arg %p",
392 						    c, c_func, c_arg);
393 					} else {
394 						lockcalls++;
395 						CTR3(KTR_CALLOUT, "callout lock"
396 						    " %p func %p arg %p",
397 						    c, c_func, c_arg);
398 					}
399 				} else {
400 					mpcalls++;
401 					CTR3(KTR_CALLOUT,
402 					    "callout mpsafe %p func %p arg %p",
403 					    c, c_func, c_arg);
404 				}
405 #ifdef DIAGNOSTIC
406 				binuptime(&bt1);
407 #endif
408 				THREAD_NO_SLEEPING();
409 				SDT_PROBE(callout_execute, kernel, ,
410 				    callout_start, c, 0, 0, 0, 0);
411 				c_func(c_arg);
412 				SDT_PROBE(callout_execute, kernel, ,
413 				    callout_end, c, 0, 0, 0, 0);
414 				THREAD_SLEEPING_OK();
415 #ifdef DIAGNOSTIC
416 				binuptime(&bt2);
417 				bintime_sub(&bt2, &bt1);
418 				if (bt2.frac > maxdt) {
419 					if (lastfunc != c_func ||
420 					    bt2.frac > maxdt * 2) {
421 						bintime2timespec(&bt2, &ts2);
422 						printf(
423 			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
424 						    c_func, c_arg,
425 						    (intmax_t)ts2.tv_sec,
426 						    ts2.tv_nsec);
427 					}
428 					maxdt = bt2.frac;
429 					lastfunc = c_func;
430 				}
431 #endif
432 				CTR1(KTR_CALLOUT, "callout %p finished", c);
433 				if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
434 					class->lc_unlock(c_lock);
435 			skip:
436 				CC_LOCK(cc);
437 				/*
438 				 * If the current callout is locally
439 				 * allocated (from timeout(9))
440 				 * then put it on the freelist.
441 				 *
442 				 * Note: we need to check the cached
443 				 * copy of c_flags because if it was not
444 				 * local, then it's not safe to deref the
445 				 * callout pointer.
446 				 */
447 				if (c_flags & CALLOUT_LOCAL_ALLOC) {
448 					KASSERT(c->c_flags ==
449 					    CALLOUT_LOCAL_ALLOC,
450 					    ("corrupted callout"));
451 					c->c_func = NULL;
452 					SLIST_INSERT_HEAD(&cc->cc_callfree, c,
453 					    c_links.sle);
454 				}
455 				cc->cc_curr = NULL;
456 				if (cc->cc_waiting) {
457 					/*
458 					 * There is someone waiting
459 					 * for the callout to complete.
460 					 */
461 					cc->cc_waiting = 0;
462 					CC_UNLOCK(cc);
463 					wakeup(&cc->cc_waiting);
464 					CC_LOCK(cc);
465 				}
466 				steps = 0;
467 				c = cc->cc_next;
468 			}
469 		}
470 	}
471 	avg_depth += (depth * 1000 - avg_depth) >> 8;
472 	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
473 	avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
474 	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
475 	cc->cc_next = NULL;
476 	CC_UNLOCK(cc);
477 }
478 
479 /*
480  * timeout --
481  *	Execute a function after a specified length of time.
482  *
483  * untimeout --
484  *	Cancel previous timeout function call.
485  *
486  * callout_handle_init --
487  *	Initialize a handle so that using it with untimeout is benign.
488  *
489  *	See AT&T BCI Driver Reference Manual for specification.  This
490  *	implementation differs from that one in that although an
491  *	identification value is returned from timeout, the original
492  *	arguments to timeout as well as the identifier are used to
493  *	identify entries for untimeout.
494  */
495 struct callout_handle
496 timeout(ftn, arg, to_ticks)
497 	timeout_t *ftn;
498 	void *arg;
499 	int to_ticks;
500 {
501 	struct callout_cpu *cc;
502 	struct callout *new;
503 	struct callout_handle handle;
504 
505 	cc = CC_CPU(timeout_cpu);
506 	CC_LOCK(cc);
507 	/* Fill in the next free callout structure. */
508 	new = SLIST_FIRST(&cc->cc_callfree);
509 	if (new == NULL)
510 		/* XXX Attempt to malloc first */
511 		panic("timeout table full");
512 	SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
513 	callout_reset(new, to_ticks, ftn, arg);
514 	handle.callout = new;
515 	CC_UNLOCK(cc);
516 
517 	return (handle);
518 }
519 
520 void
521 untimeout(ftn, arg, handle)
522 	timeout_t *ftn;
523 	void *arg;
524 	struct callout_handle handle;
525 {
526 	struct callout_cpu *cc;
527 
528 	/*
529 	 * Check for a handle that was initialized
530 	 * by callout_handle_init, but never used
531 	 * for a real timeout.
532 	 */
533 	if (handle.callout == NULL)
534 		return;
535 
536 	cc = callout_lock(handle.callout);
537 	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
538 		callout_stop(handle.callout);
539 	CC_UNLOCK(cc);
540 }
541 
542 void
543 callout_handle_init(struct callout_handle *handle)
544 {
545 	handle->callout = NULL;
546 }
547 
548 /*
549  * New interface; clients allocate their own callout structures.
550  *
551  * callout_reset() - establish or change a timeout
552  * callout_stop() - disestablish a timeout
553  * callout_init() - initialize a callout structure so that it can
554  *	safely be passed to callout_reset() and callout_stop()
555  *
556  * <sys/callout.h> defines three convenience macros:
557  *
558  * callout_active() - returns truth if callout has not been stopped,
559  *	drained, or deactivated since the last time the callout was
560  *	reset.
561  * callout_pending() - returns truth if callout is still waiting for timeout
562  * callout_deactivate() - marks the callout as having been serviced
563  */
564 int
565 callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *),
566     void *arg, int cpu)
567 {
568 	struct callout_cpu *cc;
569 	int cancelled = 0;
570 
571 	/*
572 	 * Don't allow migration of pre-allocated callouts lest they
573 	 * become unbalanced.
574 	 */
575 	if (c->c_flags & CALLOUT_LOCAL_ALLOC)
576 		cpu = c->c_cpu;
577 retry:
578 	cc = callout_lock(c);
579 	if (cc->cc_curr == c) {
580 		/*
581 		 * We're being asked to reschedule a callout which is
582 		 * currently in progress.  If there is a lock then we
583 		 * can cancel the callout if it has not really started.
584 		 */
585 		if (c->c_lock != NULL && !cc->cc_cancel)
586 			cancelled = cc->cc_cancel = 1;
587 		if (cc->cc_waiting) {
588 			/*
589 			 * Someone has called callout_drain to kill this
590 			 * callout.  Don't reschedule.
591 			 */
592 			CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
593 			    cancelled ? "cancelled" : "failed to cancel",
594 			    c, c->c_func, c->c_arg);
595 			CC_UNLOCK(cc);
596 			return (cancelled);
597 		}
598 	}
599 	if (c->c_flags & CALLOUT_PENDING) {
600 		if (cc->cc_next == c) {
601 			cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
602 		}
603 		TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
604 		    c_links.tqe);
605 
606 		cancelled = 1;
607 		c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
608 	}
609 	/*
610 	 * If the lock must migrate we have to check the state again as
611 	 * we can't hold both the new and old locks simultaneously.
612 	 */
613 	if (c->c_cpu != cpu) {
614 		c->c_cpu = cpu;
615 		CC_UNLOCK(cc);
616 		goto retry;
617 	}
618 
619 	if (to_ticks <= 0)
620 		to_ticks = 1;
621 
622 	c->c_arg = arg;
623 	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
624 	c->c_func = ftn;
625 	c->c_time = ticks + to_ticks;
626 	TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask],
627 			  c, c_links.tqe);
628 	CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
629 	    cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
630 	CC_UNLOCK(cc);
631 
632 	return (cancelled);
633 }
634 
635 /*
636  * Common idioms that can be optimized in the future.
637  */
638 int
639 callout_schedule_on(struct callout *c, int to_ticks, int cpu)
640 {
641 	return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
642 }
643 
644 int
645 callout_schedule(struct callout *c, int to_ticks)
646 {
647 	return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
648 }
649 
650 int
651 _callout_stop_safe(c, safe)
652 	struct	callout *c;
653 	int	safe;
654 {
655 	struct callout_cpu *cc;
656 	struct lock_class *class;
657 	int use_lock, sq_locked;
658 
659 	/*
660 	 * Some old subsystems don't hold Giant while running a callout_stop(),
661 	 * so just discard this check for the moment.
662 	 */
663 	if (!safe && c->c_lock != NULL) {
664 		if (c->c_lock == &Giant.lock_object)
665 			use_lock = mtx_owned(&Giant);
666 		else {
667 			use_lock = 1;
668 			class = LOCK_CLASS(c->c_lock);
669 			class->lc_assert(c->c_lock, LA_XLOCKED);
670 		}
671 	} else
672 		use_lock = 0;
673 
674 	sq_locked = 0;
675 again:
676 	cc = callout_lock(c);
677 	/*
678 	 * If the callout isn't pending, it's not on the queue, so
679 	 * don't attempt to remove it from the queue.  We can try to
680 	 * stop it by other means however.
681 	 */
682 	if (!(c->c_flags & CALLOUT_PENDING)) {
683 		c->c_flags &= ~CALLOUT_ACTIVE;
684 
685 		/*
686 		 * If it wasn't on the queue and it isn't the current
687 		 * callout, then we can't stop it, so just bail.
688 		 */
689 		if (cc->cc_curr != c) {
690 			CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
691 			    c, c->c_func, c->c_arg);
692 			CC_UNLOCK(cc);
693 			if (sq_locked)
694 				sleepq_release(&cc->cc_waiting);
695 			return (0);
696 		}
697 
698 		if (safe) {
699 			/*
700 			 * The current callout is running (or just
701 			 * about to run) and blocking is allowed, so
702 			 * just wait for the current invocation to
703 			 * finish.
704 			 */
705 			while (cc->cc_curr == c) {
706 
707 				/*
708 				 * Use direct calls to sleepqueue interface
709 				 * instead of cv/msleep in order to avoid
710 				 * a LOR between cc_lock and sleepqueue
711 				 * chain spinlocks.  This piece of code
712 				 * emulates a msleep_spin() call actually.
713 				 *
714 				 * If we already have the sleepqueue chain
715 				 * locked, then we can safely block.  If we
716 				 * don't already have it locked, however,
717 				 * we have to drop the cc_lock to lock
718 				 * it.  This opens several races, so we
719 				 * restart at the beginning once we have
720 				 * both locks.  If nothing has changed, then
721 				 * we will end up back here with sq_locked
722 				 * set.
723 				 */
724 				if (!sq_locked) {
725 					CC_UNLOCK(cc);
726 					sleepq_lock(&cc->cc_waiting);
727 					sq_locked = 1;
728 					goto again;
729 				}
730 				cc->cc_waiting = 1;
731 				DROP_GIANT();
732 				CC_UNLOCK(cc);
733 				sleepq_add(&cc->cc_waiting,
734 				    &cc->cc_lock.lock_object, "codrain",
735 				    SLEEPQ_SLEEP, 0);
736 				sleepq_wait(&cc->cc_waiting, 0);
737 				sq_locked = 0;
738 
739 				/* Reacquire locks previously released. */
740 				PICKUP_GIANT();
741 				CC_LOCK(cc);
742 			}
743 		} else if (use_lock && !cc->cc_cancel) {
744 			/*
745 			 * The current callout is waiting for its
746 			 * lock which we hold.  Cancel the callout
747 			 * and return.  After our caller drops the
748 			 * lock, the callout will be skipped in
749 			 * softclock().
750 			 */
751 			cc->cc_cancel = 1;
752 			CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
753 			    c, c->c_func, c->c_arg);
754 			CC_UNLOCK(cc);
755 			KASSERT(!sq_locked, ("sleepqueue chain locked"));
756 			return (1);
757 		}
758 		CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
759 		    c, c->c_func, c->c_arg);
760 		CC_UNLOCK(cc);
761 		KASSERT(!sq_locked, ("sleepqueue chain still locked"));
762 		return (0);
763 	}
764 	if (sq_locked)
765 		sleepq_release(&cc->cc_waiting);
766 
767 	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
768 
769 	if (cc->cc_next == c) {
770 		cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
771 	}
772 	TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
773 	    c_links.tqe);
774 
775 	CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
776 	    c, c->c_func, c->c_arg);
777 
778 	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
779 		c->c_func = NULL;
780 		SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
781 	}
782 	CC_UNLOCK(cc);
783 	return (1);
784 }
785 
786 void
787 callout_init(c, mpsafe)
788 	struct	callout *c;
789 	int mpsafe;
790 {
791 	bzero(c, sizeof *c);
792 	if (mpsafe) {
793 		c->c_lock = NULL;
794 		c->c_flags = CALLOUT_RETURNUNLOCKED;
795 	} else {
796 		c->c_lock = &Giant.lock_object;
797 		c->c_flags = 0;
798 	}
799 	c->c_cpu = timeout_cpu;
800 }
801 
802 void
803 _callout_init_lock(c, lock, flags)
804 	struct	callout *c;
805 	struct	lock_object *lock;
806 	int flags;
807 {
808 	bzero(c, sizeof *c);
809 	c->c_lock = lock;
810 	KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
811 	    ("callout_init_lock: bad flags %d", flags));
812 	KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
813 	    ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
814 	KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
815 	    (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
816 	    __func__));
817 	c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
818 	c->c_cpu = timeout_cpu;
819 }
820 
821 #ifdef APM_FIXUP_CALLTODO
822 /*
823  * Adjust the kernel calltodo timeout list.  This routine is used after
824  * an APM resume to recalculate the calltodo timer list values with the
825  * number of hz's we have been sleeping.  The next hardclock() will detect
826  * that there are fired timers and run softclock() to execute them.
827  *
828  * Please note, I have not done an exhaustive analysis of what code this
829  * might break.  I am motivated to have my select()'s and alarm()'s that
830  * have expired during suspend firing upon resume so that the applications
831  * which set the timer can do the maintanence the timer was for as close
832  * as possible to the originally intended time.  Testing this code for a
833  * week showed that resuming from a suspend resulted in 22 to 25 timers
834  * firing, which seemed independant on whether the suspend was 2 hours or
835  * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
836  */
837 void
838 adjust_timeout_calltodo(time_change)
839     struct timeval *time_change;
840 {
841 	register struct callout *p;
842 	unsigned long delta_ticks;
843 
844 	/*
845 	 * How many ticks were we asleep?
846 	 * (stolen from tvtohz()).
847 	 */
848 
849 	/* Don't do anything */
850 	if (time_change->tv_sec < 0)
851 		return;
852 	else if (time_change->tv_sec <= LONG_MAX / 1000000)
853 		delta_ticks = (time_change->tv_sec * 1000000 +
854 			       time_change->tv_usec + (tick - 1)) / tick + 1;
855 	else if (time_change->tv_sec <= LONG_MAX / hz)
856 		delta_ticks = time_change->tv_sec * hz +
857 			      (time_change->tv_usec + (tick - 1)) / tick + 1;
858 	else
859 		delta_ticks = LONG_MAX;
860 
861 	if (delta_ticks > INT_MAX)
862 		delta_ticks = INT_MAX;
863 
864 	/*
865 	 * Now rip through the timer calltodo list looking for timers
866 	 * to expire.
867 	 */
868 
869 	/* don't collide with softclock() */
870 	CC_LOCK(cc);
871 	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
872 		p->c_time -= delta_ticks;
873 
874 		/* Break if the timer had more time on it than delta_ticks */
875 		if (p->c_time > 0)
876 			break;
877 
878 		/* take back the ticks the timer didn't use (p->c_time <= 0) */
879 		delta_ticks = -p->c_time;
880 	}
881 	CC_UNLOCK(cc);
882 
883 	return;
884 }
885 #endif /* APM_FIXUP_CALLTODO */
886