xref: /freebsd/sys/kern/kern_timeout.c (revision 7750ad47a9a7dbc83f87158464170c8640723293)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_kdtrace.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/callout.h>
46 #include <sys/condvar.h>
47 #include <sys/interrupt.h>
48 #include <sys/kernel.h>
49 #include <sys/ktr.h>
50 #include <sys/lock.h>
51 #include <sys/malloc.h>
52 #include <sys/mutex.h>
53 #include <sys/proc.h>
54 #include <sys/sdt.h>
55 #include <sys/sleepqueue.h>
56 #include <sys/sysctl.h>
57 #include <sys/smp.h>
58 
59 #ifdef SMP
60 #include <machine/cpu.h>
61 #endif
62 
63 SDT_PROVIDER_DEFINE(callout_execute);
64 SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start);
65 SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0,
66     "struct callout *");
67 SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end);
68 SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0,
69     "struct callout *");
70 
71 static int avg_depth;
72 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
73     "Average number of items examined per softclock call. Units = 1/1000");
74 static int avg_gcalls;
75 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
76     "Average number of Giant callouts made per softclock call. Units = 1/1000");
77 static int avg_lockcalls;
78 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
79     "Average number of lock callouts made per softclock call. Units = 1/1000");
80 static int avg_mpcalls;
81 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
82     "Average number of MP callouts made per softclock call. Units = 1/1000");
83 /*
84  * TODO:
85  *	allocate more timeout table slots when table overflows.
86  */
87 int callwheelsize, callwheelbits, callwheelmask;
88 
89 /*
90  * The callout cpu migration entity represents informations necessary for
91  * describing the migrating callout to the new callout cpu.
92  * The cached informations are very important for deferring migration when
93  * the migrating callout is already running.
94  */
95 struct cc_mig_ent {
96 #ifdef SMP
97 	void	(*ce_migration_func)(void *);
98 	void	*ce_migration_arg;
99 	int	ce_migration_cpu;
100 	int	ce_migration_ticks;
101 #endif
102 };
103 
104 /*
105  * There is one struct callout_cpu per cpu, holding all relevant
106  * state for the callout processing thread on the individual CPU.
107  * In particular:
108  *	cc_ticks is incremented once per tick in callout_cpu().
109  *	It tracks the global 'ticks' but in a way that the individual
110  *	threads should not worry about races in the order in which
111  *	hardclock() and hardclock_cpu() run on the various CPUs.
112  *	cc_softclock is advanced in callout_cpu() to point to the
113  *	first entry in cc_callwheel that may need handling. In turn,
114  *	a softclock() is scheduled so it can serve the various entries i
115  *	such that cc_softclock <= i <= cc_ticks .
116  *	XXX maybe cc_softclock and cc_ticks should be volatile ?
117  *
118  *	cc_ticks is also used in callout_reset_cpu() to determine
119  *	when the callout should be served.
120  */
121 struct callout_cpu {
122 	struct cc_mig_ent	cc_migrating_entity;
123 	struct mtx		cc_lock;
124 	struct callout		*cc_callout;
125 	struct callout_tailq	*cc_callwheel;
126 	struct callout_list	cc_callfree;
127 	struct callout		*cc_next;
128 	struct callout		*cc_curr;
129 	void			*cc_cookie;
130 	int 			cc_ticks;
131 	int 			cc_softticks;
132 	int			cc_cancel;
133 	int			cc_waiting;
134 	int 			cc_firsttick;
135 };
136 
137 #ifdef SMP
138 #define	cc_migration_func	cc_migrating_entity.ce_migration_func
139 #define	cc_migration_arg	cc_migrating_entity.ce_migration_arg
140 #define	cc_migration_cpu	cc_migrating_entity.ce_migration_cpu
141 #define	cc_migration_ticks	cc_migrating_entity.ce_migration_ticks
142 
143 struct callout_cpu cc_cpu[MAXCPU];
144 #define	CPUBLOCK	MAXCPU
145 #define	CC_CPU(cpu)	(&cc_cpu[(cpu)])
146 #define	CC_SELF()	CC_CPU(PCPU_GET(cpuid))
147 #else
148 struct callout_cpu cc_cpu;
149 #define	CC_CPU(cpu)	&cc_cpu
150 #define	CC_SELF()	&cc_cpu
151 #endif
152 #define	CC_LOCK(cc)	mtx_lock_spin(&(cc)->cc_lock)
153 #define	CC_UNLOCK(cc)	mtx_unlock_spin(&(cc)->cc_lock)
154 #define	CC_LOCK_ASSERT(cc)	mtx_assert(&(cc)->cc_lock, MA_OWNED)
155 
156 static int timeout_cpu;
157 void (*callout_new_inserted)(int cpu, int ticks) = NULL;
158 
159 static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
160 
161 /**
162  * Locked by cc_lock:
163  *   cc_curr         - If a callout is in progress, it is curr_callout.
164  *                     If curr_callout is non-NULL, threads waiting in
165  *                     callout_drain() will be woken up as soon as the
166  *                     relevant callout completes.
167  *   cc_cancel       - Changing to 1 with both callout_lock and c_lock held
168  *                     guarantees that the current callout will not run.
169  *                     The softclock() function sets this to 0 before it
170  *                     drops callout_lock to acquire c_lock, and it calls
171  *                     the handler only if curr_cancelled is still 0 after
172  *                     c_lock is successfully acquired.
173  *   cc_waiting      - If a thread is waiting in callout_drain(), then
174  *                     callout_wait is nonzero.  Set only when
175  *                     curr_callout is non-NULL.
176  */
177 
178 /*
179  * Resets the migration entity tied to a specific callout cpu.
180  */
181 static void
182 cc_cme_cleanup(struct callout_cpu *cc)
183 {
184 
185 #ifdef SMP
186 	cc->cc_migration_cpu = CPUBLOCK;
187 	cc->cc_migration_ticks = 0;
188 	cc->cc_migration_func = NULL;
189 	cc->cc_migration_arg = NULL;
190 #endif
191 }
192 
193 /*
194  * Checks if migration is requested by a specific callout cpu.
195  */
196 static int
197 cc_cme_migrating(struct callout_cpu *cc)
198 {
199 
200 #ifdef SMP
201 	return (cc->cc_migration_cpu != CPUBLOCK);
202 #else
203 	return (0);
204 #endif
205 }
206 
207 /*
208  * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
209  *
210  *	This code is called very early in the kernel initialization sequence,
211  *	and may be called more then once.
212  */
213 caddr_t
214 kern_timeout_callwheel_alloc(caddr_t v)
215 {
216 	struct callout_cpu *cc;
217 
218 	timeout_cpu = PCPU_GET(cpuid);
219 	cc = CC_CPU(timeout_cpu);
220 	/*
221 	 * Calculate callout wheel size
222 	 */
223 	for (callwheelsize = 1, callwheelbits = 0;
224 	     callwheelsize < ncallout;
225 	     callwheelsize <<= 1, ++callwheelbits)
226 		;
227 	callwheelmask = callwheelsize - 1;
228 
229 	cc->cc_callout = (struct callout *)v;
230 	v = (caddr_t)(cc->cc_callout + ncallout);
231 	cc->cc_callwheel = (struct callout_tailq *)v;
232 	v = (caddr_t)(cc->cc_callwheel + callwheelsize);
233 	return(v);
234 }
235 
236 static void
237 callout_cpu_init(struct callout_cpu *cc)
238 {
239 	struct callout *c;
240 	int i;
241 
242 	mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
243 	SLIST_INIT(&cc->cc_callfree);
244 	for (i = 0; i < callwheelsize; i++) {
245 		TAILQ_INIT(&cc->cc_callwheel[i]);
246 	}
247 	cc_cme_cleanup(cc);
248 	if (cc->cc_callout == NULL)
249 		return;
250 	for (i = 0; i < ncallout; i++) {
251 		c = &cc->cc_callout[i];
252 		callout_init(c, 0);
253 		c->c_flags = CALLOUT_LOCAL_ALLOC;
254 		SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
255 	}
256 }
257 
258 #ifdef SMP
259 /*
260  * Switches the cpu tied to a specific callout.
261  * The function expects a locked incoming callout cpu and returns with
262  * locked outcoming callout cpu.
263  */
264 static struct callout_cpu *
265 callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
266 {
267 	struct callout_cpu *new_cc;
268 
269 	MPASS(c != NULL && cc != NULL);
270 	CC_LOCK_ASSERT(cc);
271 
272 	/*
273 	 * Avoid interrupts and preemption firing after the callout cpu
274 	 * is blocked in order to avoid deadlocks as the new thread
275 	 * may be willing to acquire the callout cpu lock.
276 	 */
277 	c->c_cpu = CPUBLOCK;
278 	spinlock_enter();
279 	CC_UNLOCK(cc);
280 	new_cc = CC_CPU(new_cpu);
281 	CC_LOCK(new_cc);
282 	spinlock_exit();
283 	c->c_cpu = new_cpu;
284 	return (new_cc);
285 }
286 #endif
287 
288 /*
289  * kern_timeout_callwheel_init() - initialize previously reserved callwheel
290  *				   space.
291  *
292  *	This code is called just once, after the space reserved for the
293  *	callout wheel has been finalized.
294  */
295 void
296 kern_timeout_callwheel_init(void)
297 {
298 	callout_cpu_init(CC_CPU(timeout_cpu));
299 }
300 
301 /*
302  * Start standard softclock thread.
303  */
304 static void
305 start_softclock(void *dummy)
306 {
307 	struct callout_cpu *cc;
308 #ifdef SMP
309 	int cpu;
310 #endif
311 
312 	cc = CC_CPU(timeout_cpu);
313 	if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK,
314 	    INTR_MPSAFE, &cc->cc_cookie))
315 		panic("died while creating standard software ithreads");
316 #ifdef SMP
317 	CPU_FOREACH(cpu) {
318 		if (cpu == timeout_cpu)
319 			continue;
320 		cc = CC_CPU(cpu);
321 		if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
322 		    INTR_MPSAFE, &cc->cc_cookie))
323 			panic("died while creating standard software ithreads");
324 		cc->cc_callout = NULL;	/* Only cpu0 handles timeout(). */
325 		cc->cc_callwheel = malloc(
326 		    sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT,
327 		    M_WAITOK);
328 		callout_cpu_init(cc);
329 	}
330 #endif
331 }
332 
333 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
334 
335 void
336 callout_tick(void)
337 {
338 	struct callout_cpu *cc;
339 	int need_softclock;
340 	int bucket;
341 
342 	/*
343 	 * Process callouts at a very low cpu priority, so we don't keep the
344 	 * relatively high clock interrupt priority any longer than necessary.
345 	 */
346 	need_softclock = 0;
347 	cc = CC_SELF();
348 	mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
349 	cc->cc_firsttick = cc->cc_ticks = ticks;
350 	for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) {
351 		bucket = cc->cc_softticks & callwheelmask;
352 		if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) {
353 			need_softclock = 1;
354 			break;
355 		}
356 	}
357 	mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
358 	/*
359 	 * swi_sched acquires the thread lock, so we don't want to call it
360 	 * with cc_lock held; incorrect locking order.
361 	 */
362 	if (need_softclock)
363 		swi_sched(cc->cc_cookie, 0);
364 }
365 
366 int
367 callout_tickstofirst(int limit)
368 {
369 	struct callout_cpu *cc;
370 	struct callout *c;
371 	struct callout_tailq *sc;
372 	int curticks;
373 	int skip = 1;
374 
375 	cc = CC_SELF();
376 	mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
377 	curticks = cc->cc_ticks;
378 	while( skip < ncallout && skip < limit ) {
379 		sc = &cc->cc_callwheel[ (curticks+skip) & callwheelmask ];
380 		/* search scanning ticks */
381 		TAILQ_FOREACH( c, sc, c_links.tqe ){
382 			if (c->c_time - curticks <= ncallout)
383 				goto out;
384 		}
385 		skip++;
386 	}
387 out:
388 	cc->cc_firsttick = curticks + skip;
389 	mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
390 	return (skip);
391 }
392 
393 static struct callout_cpu *
394 callout_lock(struct callout *c)
395 {
396 	struct callout_cpu *cc;
397 	int cpu;
398 
399 	for (;;) {
400 		cpu = c->c_cpu;
401 #ifdef SMP
402 		if (cpu == CPUBLOCK) {
403 			while (c->c_cpu == CPUBLOCK)
404 				cpu_spinwait();
405 			continue;
406 		}
407 #endif
408 		cc = CC_CPU(cpu);
409 		CC_LOCK(cc);
410 		if (cpu == c->c_cpu)
411 			break;
412 		CC_UNLOCK(cc);
413 	}
414 	return (cc);
415 }
416 
417 static void
418 callout_cc_add(struct callout *c, struct callout_cpu *cc, int to_ticks,
419     void (*func)(void *), void *arg, int cpu)
420 {
421 
422 	CC_LOCK_ASSERT(cc);
423 
424 	if (to_ticks <= 0)
425 		to_ticks = 1;
426 	c->c_arg = arg;
427 	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
428 	c->c_func = func;
429 	c->c_time = ticks + to_ticks;
430 	TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask],
431 	    c, c_links.tqe);
432 	if ((c->c_time - cc->cc_firsttick) < 0 &&
433 	    callout_new_inserted != NULL) {
434 		cc->cc_firsttick = c->c_time;
435 		(*callout_new_inserted)(cpu,
436 		    to_ticks + (ticks - cc->cc_ticks));
437 	}
438 }
439 
440 static void
441 callout_cc_del(struct callout *c, struct callout_cpu *cc)
442 {
443 
444 	if (cc->cc_next == c)
445 		cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
446 	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
447 		c->c_func = NULL;
448 		SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
449 	}
450 }
451 
452 static struct callout *
453 softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
454     int *lockcalls, int *gcalls)
455 {
456 	void (*c_func)(void *);
457 	void *c_arg;
458 	struct lock_class *class;
459 	struct lock_object *c_lock;
460 	int c_flags, sharedlock;
461 #ifdef SMP
462 	struct callout_cpu *new_cc;
463 	void (*new_func)(void *);
464 	void *new_arg;
465 	int new_cpu, new_ticks;
466 #endif
467 #ifdef DIAGNOSTIC
468 	struct bintime bt1, bt2;
469 	struct timespec ts2;
470 	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
471 	static timeout_t *lastfunc;
472 #endif
473 
474 	cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
475 	class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
476 	sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1;
477 	c_lock = c->c_lock;
478 	c_func = c->c_func;
479 	c_arg = c->c_arg;
480 	c_flags = c->c_flags;
481 	if (c->c_flags & CALLOUT_LOCAL_ALLOC)
482 		c->c_flags = CALLOUT_LOCAL_ALLOC;
483 	else
484 		c->c_flags &= ~CALLOUT_PENDING;
485 	cc->cc_curr = c;
486 	cc->cc_cancel = 0;
487 	CC_UNLOCK(cc);
488 	if (c_lock != NULL) {
489 		class->lc_lock(c_lock, sharedlock);
490 		/*
491 		 * The callout may have been cancelled
492 		 * while we switched locks.
493 		 */
494 		if (cc->cc_cancel) {
495 			class->lc_unlock(c_lock);
496 			goto skip;
497 		}
498 		/* The callout cannot be stopped now. */
499 		cc->cc_cancel = 1;
500 
501 		if (c_lock == &Giant.lock_object) {
502 			(*gcalls)++;
503 			CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
504 			    c, c_func, c_arg);
505 		} else {
506 			(*lockcalls)++;
507 			CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p",
508 			    c, c_func, c_arg);
509 		}
510 	} else {
511 		(*mpcalls)++;
512 		CTR3(KTR_CALLOUT, "callout mpsafe %p func %p arg %p",
513 		    c, c_func, c_arg);
514 	}
515 #ifdef DIAGNOSTIC
516 	binuptime(&bt1);
517 #endif
518 	THREAD_NO_SLEEPING();
519 	SDT_PROBE(callout_execute, kernel, , callout_start, c, 0, 0, 0, 0);
520 	c_func(c_arg);
521 	SDT_PROBE(callout_execute, kernel, , callout_end, c, 0, 0, 0, 0);
522 	THREAD_SLEEPING_OK();
523 #ifdef DIAGNOSTIC
524 	binuptime(&bt2);
525 	bintime_sub(&bt2, &bt1);
526 	if (bt2.frac > maxdt) {
527 		if (lastfunc != c_func || bt2.frac > maxdt * 2) {
528 			bintime2timespec(&bt2, &ts2);
529 			printf(
530 		"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
531 			    c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
532 		}
533 		maxdt = bt2.frac;
534 		lastfunc = c_func;
535 	}
536 #endif
537 	CTR1(KTR_CALLOUT, "callout %p finished", c);
538 	if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
539 		class->lc_unlock(c_lock);
540 skip:
541 	CC_LOCK(cc);
542 	/*
543 	 * If the current callout is locally allocated (from
544 	 * timeout(9)) then put it on the freelist.
545 	 *
546 	 * Note: we need to check the cached copy of c_flags because
547 	 * if it was not local, then it's not safe to deref the
548 	 * callout pointer.
549 	 */
550 	if (c_flags & CALLOUT_LOCAL_ALLOC) {
551 		KASSERT(c->c_flags == CALLOUT_LOCAL_ALLOC,
552 		    ("corrupted callout"));
553 		c->c_func = NULL;
554 		SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
555 	}
556 	cc->cc_curr = NULL;
557 	if (cc->cc_waiting) {
558 		/*
559 		 * There is someone waiting for the
560 		 * callout to complete.
561 		 * If the callout was scheduled for
562 		 * migration just cancel it.
563 		 */
564 		if (cc_cme_migrating(cc))
565 			cc_cme_cleanup(cc);
566 		cc->cc_waiting = 0;
567 		CC_UNLOCK(cc);
568 		wakeup(&cc->cc_waiting);
569 		CC_LOCK(cc);
570 	} else if (cc_cme_migrating(cc)) {
571 #ifdef SMP
572 		/*
573 		 * If the callout was scheduled for
574 		 * migration just perform it now.
575 		 */
576 		new_cpu = cc->cc_migration_cpu;
577 		new_ticks = cc->cc_migration_ticks;
578 		new_func = cc->cc_migration_func;
579 		new_arg = cc->cc_migration_arg;
580 		cc_cme_cleanup(cc);
581 
582 		/*
583 		 * Handle deferred callout stops
584 		 */
585 		if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) {
586 			CTR3(KTR_CALLOUT,
587 			     "deferred cancelled %p func %p arg %p",
588 			     c, new_func, new_arg);
589 			callout_cc_del(c, cc);
590 			goto nextc;
591 		}
592 
593 		c->c_flags &= ~CALLOUT_DFRMIGRATION;
594 
595 		/*
596 		 * It should be assert here that the
597 		 * callout is not destroyed but that
598 		 * is not easy.
599 		 */
600 		new_cc = callout_cpu_switch(c, cc, new_cpu);
601 		callout_cc_add(c, new_cc, new_ticks, new_func, new_arg,
602 		    new_cpu);
603 		CC_UNLOCK(new_cc);
604 		CC_LOCK(cc);
605 #else
606 		panic("migration should not happen");
607 #endif
608 	}
609 #ifdef SMP
610 nextc:
611 #endif
612 	return (cc->cc_next);
613 }
614 
615 /*
616  * The callout mechanism is based on the work of Adam M. Costello and
617  * George Varghese, published in a technical report entitled "Redesigning
618  * the BSD Callout and Timer Facilities" and modified slightly for inclusion
619  * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
620  * used in this implementation was published by G. Varghese and T. Lauck in
621  * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
622  * the Efficient Implementation of a Timer Facility" in the Proceedings of
623  * the 11th ACM Annual Symposium on Operating Systems Principles,
624  * Austin, Texas Nov 1987.
625  */
626 
627 /*
628  * Software (low priority) clock interrupt.
629  * Run periodic events from timeout queue.
630  */
631 void
632 softclock(void *arg)
633 {
634 	struct callout_cpu *cc;
635 	struct callout *c;
636 	struct callout_tailq *bucket;
637 	int curticks;
638 	int steps;	/* #steps since we last allowed interrupts */
639 	int depth;
640 	int mpcalls;
641 	int lockcalls;
642 	int gcalls;
643 
644 #ifndef MAX_SOFTCLOCK_STEPS
645 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
646 #endif /* MAX_SOFTCLOCK_STEPS */
647 
648 	mpcalls = 0;
649 	lockcalls = 0;
650 	gcalls = 0;
651 	depth = 0;
652 	steps = 0;
653 	cc = (struct callout_cpu *)arg;
654 	CC_LOCK(cc);
655 	while (cc->cc_softticks - 1 != cc->cc_ticks) {
656 		/*
657 		 * cc_softticks may be modified by hard clock, so cache
658 		 * it while we work on a given bucket.
659 		 */
660 		curticks = cc->cc_softticks;
661 		cc->cc_softticks++;
662 		bucket = &cc->cc_callwheel[curticks & callwheelmask];
663 		c = TAILQ_FIRST(bucket);
664 		while (c != NULL) {
665 			depth++;
666 			if (c->c_time != curticks) {
667 				c = TAILQ_NEXT(c, c_links.tqe);
668 				++steps;
669 				if (steps >= MAX_SOFTCLOCK_STEPS) {
670 					cc->cc_next = c;
671 					/* Give interrupts a chance. */
672 					CC_UNLOCK(cc);
673 					;	/* nothing */
674 					CC_LOCK(cc);
675 					c = cc->cc_next;
676 					steps = 0;
677 				}
678 			} else {
679 				TAILQ_REMOVE(bucket, c, c_links.tqe);
680 				c = softclock_call_cc(c, cc, &mpcalls,
681 				    &lockcalls, &gcalls);
682 				steps = 0;
683 			}
684 		}
685 	}
686 	avg_depth += (depth * 1000 - avg_depth) >> 8;
687 	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
688 	avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
689 	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
690 	cc->cc_next = NULL;
691 	CC_UNLOCK(cc);
692 }
693 
694 /*
695  * timeout --
696  *	Execute a function after a specified length of time.
697  *
698  * untimeout --
699  *	Cancel previous timeout function call.
700  *
701  * callout_handle_init --
702  *	Initialize a handle so that using it with untimeout is benign.
703  *
704  *	See AT&T BCI Driver Reference Manual for specification.  This
705  *	implementation differs from that one in that although an
706  *	identification value is returned from timeout, the original
707  *	arguments to timeout as well as the identifier are used to
708  *	identify entries for untimeout.
709  */
710 struct callout_handle
711 timeout(ftn, arg, to_ticks)
712 	timeout_t *ftn;
713 	void *arg;
714 	int to_ticks;
715 {
716 	struct callout_cpu *cc;
717 	struct callout *new;
718 	struct callout_handle handle;
719 
720 	cc = CC_CPU(timeout_cpu);
721 	CC_LOCK(cc);
722 	/* Fill in the next free callout structure. */
723 	new = SLIST_FIRST(&cc->cc_callfree);
724 	if (new == NULL)
725 		/* XXX Attempt to malloc first */
726 		panic("timeout table full");
727 	SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
728 	callout_reset(new, to_ticks, ftn, arg);
729 	handle.callout = new;
730 	CC_UNLOCK(cc);
731 
732 	return (handle);
733 }
734 
735 void
736 untimeout(ftn, arg, handle)
737 	timeout_t *ftn;
738 	void *arg;
739 	struct callout_handle handle;
740 {
741 	struct callout_cpu *cc;
742 
743 	/*
744 	 * Check for a handle that was initialized
745 	 * by callout_handle_init, but never used
746 	 * for a real timeout.
747 	 */
748 	if (handle.callout == NULL)
749 		return;
750 
751 	cc = callout_lock(handle.callout);
752 	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
753 		callout_stop(handle.callout);
754 	CC_UNLOCK(cc);
755 }
756 
757 void
758 callout_handle_init(struct callout_handle *handle)
759 {
760 	handle->callout = NULL;
761 }
762 
763 /*
764  * New interface; clients allocate their own callout structures.
765  *
766  * callout_reset() - establish or change a timeout
767  * callout_stop() - disestablish a timeout
768  * callout_init() - initialize a callout structure so that it can
769  *	safely be passed to callout_reset() and callout_stop()
770  *
771  * <sys/callout.h> defines three convenience macros:
772  *
773  * callout_active() - returns truth if callout has not been stopped,
774  *	drained, or deactivated since the last time the callout was
775  *	reset.
776  * callout_pending() - returns truth if callout is still waiting for timeout
777  * callout_deactivate() - marks the callout as having been serviced
778  */
779 int
780 callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *),
781     void *arg, int cpu)
782 {
783 	struct callout_cpu *cc;
784 	int cancelled = 0;
785 
786 	/*
787 	 * Don't allow migration of pre-allocated callouts lest they
788 	 * become unbalanced.
789 	 */
790 	if (c->c_flags & CALLOUT_LOCAL_ALLOC)
791 		cpu = c->c_cpu;
792 	cc = callout_lock(c);
793 	if (cc->cc_curr == c) {
794 		/*
795 		 * We're being asked to reschedule a callout which is
796 		 * currently in progress.  If there is a lock then we
797 		 * can cancel the callout if it has not really started.
798 		 */
799 		if (c->c_lock != NULL && !cc->cc_cancel)
800 			cancelled = cc->cc_cancel = 1;
801 		if (cc->cc_waiting) {
802 			/*
803 			 * Someone has called callout_drain to kill this
804 			 * callout.  Don't reschedule.
805 			 */
806 			CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
807 			    cancelled ? "cancelled" : "failed to cancel",
808 			    c, c->c_func, c->c_arg);
809 			CC_UNLOCK(cc);
810 			return (cancelled);
811 		}
812 	}
813 	if (c->c_flags & CALLOUT_PENDING) {
814 		if (cc->cc_next == c) {
815 			cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
816 		}
817 		TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
818 		    c_links.tqe);
819 
820 		cancelled = 1;
821 		c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
822 	}
823 
824 #ifdef SMP
825 	/*
826 	 * If the callout must migrate try to perform it immediately.
827 	 * If the callout is currently running, just defer the migration
828 	 * to a more appropriate moment.
829 	 */
830 	if (c->c_cpu != cpu) {
831 		if (cc->cc_curr == c) {
832 			cc->cc_migration_cpu = cpu;
833 			cc->cc_migration_ticks = to_ticks;
834 			cc->cc_migration_func = ftn;
835 			cc->cc_migration_arg = arg;
836 			c->c_flags |= CALLOUT_DFRMIGRATION;
837 			CTR5(KTR_CALLOUT,
838 		    "migration of %p func %p arg %p in %d to %u deferred",
839 			    c, c->c_func, c->c_arg, to_ticks, cpu);
840 			CC_UNLOCK(cc);
841 			return (cancelled);
842 		}
843 		cc = callout_cpu_switch(c, cc, cpu);
844 	}
845 #endif
846 
847 	callout_cc_add(c, cc, to_ticks, ftn, arg, cpu);
848 	CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
849 	    cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
850 	CC_UNLOCK(cc);
851 
852 	return (cancelled);
853 }
854 
855 /*
856  * Common idioms that can be optimized in the future.
857  */
858 int
859 callout_schedule_on(struct callout *c, int to_ticks, int cpu)
860 {
861 	return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
862 }
863 
864 int
865 callout_schedule(struct callout *c, int to_ticks)
866 {
867 	return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
868 }
869 
870 int
871 _callout_stop_safe(c, safe)
872 	struct	callout *c;
873 	int	safe;
874 {
875 	struct callout_cpu *cc, *old_cc;
876 	struct lock_class *class;
877 	int use_lock, sq_locked;
878 
879 	/*
880 	 * Some old subsystems don't hold Giant while running a callout_stop(),
881 	 * so just discard this check for the moment.
882 	 */
883 	if (!safe && c->c_lock != NULL) {
884 		if (c->c_lock == &Giant.lock_object)
885 			use_lock = mtx_owned(&Giant);
886 		else {
887 			use_lock = 1;
888 			class = LOCK_CLASS(c->c_lock);
889 			class->lc_assert(c->c_lock, LA_XLOCKED);
890 		}
891 	} else
892 		use_lock = 0;
893 
894 	sq_locked = 0;
895 	old_cc = NULL;
896 again:
897 	cc = callout_lock(c);
898 
899 	/*
900 	 * If the callout was migrating while the callout cpu lock was
901 	 * dropped,  just drop the sleepqueue lock and check the states
902 	 * again.
903 	 */
904 	if (sq_locked != 0 && cc != old_cc) {
905 #ifdef SMP
906 		CC_UNLOCK(cc);
907 		sleepq_release(&old_cc->cc_waiting);
908 		sq_locked = 0;
909 		old_cc = NULL;
910 		goto again;
911 #else
912 		panic("migration should not happen");
913 #endif
914 	}
915 
916 	/*
917 	 * If the callout isn't pending, it's not on the queue, so
918 	 * don't attempt to remove it from the queue.  We can try to
919 	 * stop it by other means however.
920 	 */
921 	if (!(c->c_flags & CALLOUT_PENDING)) {
922 		c->c_flags &= ~CALLOUT_ACTIVE;
923 
924 		/*
925 		 * If it wasn't on the queue and it isn't the current
926 		 * callout, then we can't stop it, so just bail.
927 		 */
928 		if (cc->cc_curr != c) {
929 			CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
930 			    c, c->c_func, c->c_arg);
931 			CC_UNLOCK(cc);
932 			if (sq_locked)
933 				sleepq_release(&cc->cc_waiting);
934 			return (0);
935 		}
936 
937 		if (safe) {
938 			/*
939 			 * The current callout is running (or just
940 			 * about to run) and blocking is allowed, so
941 			 * just wait for the current invocation to
942 			 * finish.
943 			 */
944 			while (cc->cc_curr == c) {
945 
946 				/*
947 				 * Use direct calls to sleepqueue interface
948 				 * instead of cv/msleep in order to avoid
949 				 * a LOR between cc_lock and sleepqueue
950 				 * chain spinlocks.  This piece of code
951 				 * emulates a msleep_spin() call actually.
952 				 *
953 				 * If we already have the sleepqueue chain
954 				 * locked, then we can safely block.  If we
955 				 * don't already have it locked, however,
956 				 * we have to drop the cc_lock to lock
957 				 * it.  This opens several races, so we
958 				 * restart at the beginning once we have
959 				 * both locks.  If nothing has changed, then
960 				 * we will end up back here with sq_locked
961 				 * set.
962 				 */
963 				if (!sq_locked) {
964 					CC_UNLOCK(cc);
965 					sleepq_lock(&cc->cc_waiting);
966 					sq_locked = 1;
967 					old_cc = cc;
968 					goto again;
969 				}
970 
971 				/*
972 				 * Migration could be cancelled here, but
973 				 * as long as it is still not sure when it
974 				 * will be packed up, just let softclock()
975 				 * take care of it.
976 				 */
977 				cc->cc_waiting = 1;
978 				DROP_GIANT();
979 				CC_UNLOCK(cc);
980 				sleepq_add(&cc->cc_waiting,
981 				    &cc->cc_lock.lock_object, "codrain",
982 				    SLEEPQ_SLEEP, 0);
983 				sleepq_wait(&cc->cc_waiting, 0);
984 				sq_locked = 0;
985 				old_cc = NULL;
986 
987 				/* Reacquire locks previously released. */
988 				PICKUP_GIANT();
989 				CC_LOCK(cc);
990 			}
991 		} else if (use_lock && !cc->cc_cancel) {
992 			/*
993 			 * The current callout is waiting for its
994 			 * lock which we hold.  Cancel the callout
995 			 * and return.  After our caller drops the
996 			 * lock, the callout will be skipped in
997 			 * softclock().
998 			 */
999 			cc->cc_cancel = 1;
1000 			CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1001 			    c, c->c_func, c->c_arg);
1002 			KASSERT(!cc_cme_migrating(cc),
1003 			    ("callout wrongly scheduled for migration"));
1004 			CC_UNLOCK(cc);
1005 			KASSERT(!sq_locked, ("sleepqueue chain locked"));
1006 			return (1);
1007 		} else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) {
1008 			c->c_flags &= ~CALLOUT_DFRMIGRATION;
1009 			CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
1010 			    c, c->c_func, c->c_arg);
1011 			CC_UNLOCK(cc);
1012 			return (1);
1013 		}
1014 		CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1015 		    c, c->c_func, c->c_arg);
1016 		CC_UNLOCK(cc);
1017 		KASSERT(!sq_locked, ("sleepqueue chain still locked"));
1018 		return (0);
1019 	}
1020 	if (sq_locked)
1021 		sleepq_release(&cc->cc_waiting);
1022 
1023 	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
1024 
1025 	CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1026 	    c, c->c_func, c->c_arg);
1027 	TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
1028 	    c_links.tqe);
1029 	callout_cc_del(c, cc);
1030 
1031 	CC_UNLOCK(cc);
1032 	return (1);
1033 }
1034 
1035 void
1036 callout_init(c, mpsafe)
1037 	struct	callout *c;
1038 	int mpsafe;
1039 {
1040 	bzero(c, sizeof *c);
1041 	if (mpsafe) {
1042 		c->c_lock = NULL;
1043 		c->c_flags = CALLOUT_RETURNUNLOCKED;
1044 	} else {
1045 		c->c_lock = &Giant.lock_object;
1046 		c->c_flags = 0;
1047 	}
1048 	c->c_cpu = timeout_cpu;
1049 }
1050 
1051 void
1052 _callout_init_lock(c, lock, flags)
1053 	struct	callout *c;
1054 	struct	lock_object *lock;
1055 	int flags;
1056 {
1057 	bzero(c, sizeof *c);
1058 	c->c_lock = lock;
1059 	KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
1060 	    ("callout_init_lock: bad flags %d", flags));
1061 	KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
1062 	    ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
1063 	KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
1064 	    (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
1065 	    __func__));
1066 	c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
1067 	c->c_cpu = timeout_cpu;
1068 }
1069 
1070 #ifdef APM_FIXUP_CALLTODO
1071 /*
1072  * Adjust the kernel calltodo timeout list.  This routine is used after
1073  * an APM resume to recalculate the calltodo timer list values with the
1074  * number of hz's we have been sleeping.  The next hardclock() will detect
1075  * that there are fired timers and run softclock() to execute them.
1076  *
1077  * Please note, I have not done an exhaustive analysis of what code this
1078  * might break.  I am motivated to have my select()'s and alarm()'s that
1079  * have expired during suspend firing upon resume so that the applications
1080  * which set the timer can do the maintanence the timer was for as close
1081  * as possible to the originally intended time.  Testing this code for a
1082  * week showed that resuming from a suspend resulted in 22 to 25 timers
1083  * firing, which seemed independant on whether the suspend was 2 hours or
1084  * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
1085  */
1086 void
1087 adjust_timeout_calltodo(time_change)
1088     struct timeval *time_change;
1089 {
1090 	register struct callout *p;
1091 	unsigned long delta_ticks;
1092 
1093 	/*
1094 	 * How many ticks were we asleep?
1095 	 * (stolen from tvtohz()).
1096 	 */
1097 
1098 	/* Don't do anything */
1099 	if (time_change->tv_sec < 0)
1100 		return;
1101 	else if (time_change->tv_sec <= LONG_MAX / 1000000)
1102 		delta_ticks = (time_change->tv_sec * 1000000 +
1103 			       time_change->tv_usec + (tick - 1)) / tick + 1;
1104 	else if (time_change->tv_sec <= LONG_MAX / hz)
1105 		delta_ticks = time_change->tv_sec * hz +
1106 			      (time_change->tv_usec + (tick - 1)) / tick + 1;
1107 	else
1108 		delta_ticks = LONG_MAX;
1109 
1110 	if (delta_ticks > INT_MAX)
1111 		delta_ticks = INT_MAX;
1112 
1113 	/*
1114 	 * Now rip through the timer calltodo list looking for timers
1115 	 * to expire.
1116 	 */
1117 
1118 	/* don't collide with softclock() */
1119 	CC_LOCK(cc);
1120 	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
1121 		p->c_time -= delta_ticks;
1122 
1123 		/* Break if the timer had more time on it than delta_ticks */
1124 		if (p->c_time > 0)
1125 			break;
1126 
1127 		/* take back the ticks the timer didn't use (p->c_time <= 0) */
1128 		delta_ticks = -p->c_time;
1129 	}
1130 	CC_UNLOCK(cc);
1131 
1132 	return;
1133 }
1134 #endif /* APM_FIXUP_CALLTODO */
1135