xref: /freebsd/sys/kern/kern_timeout.c (revision 15b1eb142c4c215f5e80926f070650bee57b3e59)
1df8bae1dSRodney W. Grimes /*-
2df8bae1dSRodney W. Grimes  * Copyright (c) 1982, 1986, 1991, 1993
3df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4df8bae1dSRodney W. Grimes  * (c) UNIX System Laboratories, Inc.
5df8bae1dSRodney W. Grimes  * All or some portions of this file are derived from material licensed
6df8bae1dSRodney W. Grimes  * to the University of California by American Telephone and Telegraph
7df8bae1dSRodney W. Grimes  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8df8bae1dSRodney W. Grimes  * the permission of UNIX System Laboratories, Inc.
9df8bae1dSRodney W. Grimes  *
10df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
11df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
12df8bae1dSRodney W. Grimes  * are met:
13df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
15df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
17df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
18df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
19df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
20df8bae1dSRodney W. Grimes  *    without specific prior written permission.
21df8bae1dSRodney W. Grimes  *
22df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
33df8bae1dSRodney W. Grimes  *
34acc8326dSGarrett Wollman  *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
35df8bae1dSRodney W. Grimes  */
36df8bae1dSRodney W. Grimes 
37677b542eSDavid E. O'Brien #include <sys/cdefs.h>
38677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
39677b542eSDavid E. O'Brien 
405b999a6bSDavide Italiano #include "opt_callout_profiling.h"
415b999a6bSDavide Italiano #if defined(__arm__)
425b999a6bSDavide Italiano #include "opt_timer.h"
435b999a6bSDavide Italiano #endif
44c445c3c7SAdrian Chadd #include "opt_rss.h"
4591dd9aaeSRobert Watson 
46df8bae1dSRodney W. Grimes #include <sys/param.h>
47df8bae1dSRodney W. Grimes #include <sys/systm.h>
488d809d50SJeff Roberson #include <sys/bus.h>
4915b7a470SPoul-Henning Kamp #include <sys/callout.h>
50f8ccf82aSAndre Oppermann #include <sys/file.h>
518d809d50SJeff Roberson #include <sys/interrupt.h>
52df8bae1dSRodney W. Grimes #include <sys/kernel.h>
53ff7ec58aSRobert Watson #include <sys/ktr.h>
54f34fa851SJohn Baldwin #include <sys/lock.h>
558d809d50SJeff Roberson #include <sys/malloc.h>
56cb799bfeSJohn Baldwin #include <sys/mutex.h>
5721f9e816SJohn Baldwin #include <sys/proc.h>
5891dd9aaeSRobert Watson #include <sys/sdt.h>
596a0ce57dSAttilio Rao #include <sys/sleepqueue.h>
6022ee8c4fSPoul-Henning Kamp #include <sys/sysctl.h>
618d809d50SJeff Roberson #include <sys/smp.h>
62df8bae1dSRodney W. Grimes 
631283e9cdSAttilio Rao #ifdef SMP
641283e9cdSAttilio Rao #include <machine/cpu.h>
651283e9cdSAttilio Rao #endif
661283e9cdSAttilio Rao 
675b999a6bSDavide Italiano #ifndef NO_EVENTTIMERS
685b999a6bSDavide Italiano DPCPU_DECLARE(sbintime_t, hardclocktime);
695b999a6bSDavide Italiano #endif
705b999a6bSDavide Italiano 
7191dd9aaeSRobert Watson SDT_PROVIDER_DEFINE(callout_execute);
72d9fae5abSAndriy Gapon SDT_PROBE_DEFINE1(callout_execute, kernel, , callout__start,
7391dd9aaeSRobert Watson     "struct callout *");
74d9fae5abSAndriy Gapon SDT_PROBE_DEFINE1(callout_execute, kernel, , callout__end,
7591dd9aaeSRobert Watson     "struct callout *");
7691dd9aaeSRobert Watson 
775b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING
7822ee8c4fSPoul-Henning Kamp static int avg_depth;
7922ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
8022ee8c4fSPoul-Henning Kamp     "Average number of items examined per softclock call. Units = 1/1000");
8122ee8c4fSPoul-Henning Kamp static int avg_gcalls;
8222ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
8322ee8c4fSPoul-Henning Kamp     "Average number of Giant callouts made per softclock call. Units = 1/1000");
8464b9ee20SAttilio Rao static int avg_lockcalls;
8564b9ee20SAttilio Rao SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
8664b9ee20SAttilio Rao     "Average number of lock callouts made per softclock call. Units = 1/1000");
8722ee8c4fSPoul-Henning Kamp static int avg_mpcalls;
8822ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
8922ee8c4fSPoul-Henning Kamp     "Average number of MP callouts made per softclock call. Units = 1/1000");
905b999a6bSDavide Italiano static int avg_depth_dir;
915b999a6bSDavide Italiano SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0,
925b999a6bSDavide Italiano     "Average number of direct callouts examined per callout_process call. "
935b999a6bSDavide Italiano     "Units = 1/1000");
945b999a6bSDavide Italiano static int avg_lockcalls_dir;
955b999a6bSDavide Italiano SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD,
965b999a6bSDavide Italiano     &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per "
975b999a6bSDavide Italiano     "callout_process call. Units = 1/1000");
985b999a6bSDavide Italiano static int avg_mpcalls_dir;
995b999a6bSDavide Italiano SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir,
1005b999a6bSDavide Italiano     0, "Average number of MP direct callouts made per callout_process call. "
1015b999a6bSDavide Italiano     "Units = 1/1000");
1025b999a6bSDavide Italiano #endif
103f8ccf82aSAndre Oppermann 
104f8ccf82aSAndre Oppermann static int ncallout;
105af3b2549SHans Petter Selasky SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &ncallout, 0,
106f8ccf82aSAndre Oppermann     "Number of entries in callwheel and size of timeout() preallocation");
107f8ccf82aSAndre Oppermann 
108c445c3c7SAdrian Chadd #ifdef	RSS
109c445c3c7SAdrian Chadd static int pin_default_swi = 1;
110c445c3c7SAdrian Chadd static int pin_pcpu_swi = 1;
111c445c3c7SAdrian Chadd #else
112ac75ee9fSAdrian Chadd static int pin_default_swi = 0;
113ac75ee9fSAdrian Chadd static int pin_pcpu_swi = 0;
114c445c3c7SAdrian Chadd #endif
115ac75ee9fSAdrian Chadd 
116af3b2549SHans Petter Selasky SYSCTL_INT(_kern, OID_AUTO, pin_default_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_default_swi,
117ac75ee9fSAdrian Chadd     0, "Pin the default (non-per-cpu) swi (shared with PCPU 0 swi)");
118af3b2549SHans Petter Selasky SYSCTL_INT(_kern, OID_AUTO, pin_pcpu_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_pcpu_swi,
119ac75ee9fSAdrian Chadd     0, "Pin the per-CPU swis (except PCPU 0, which is also default");
120ac75ee9fSAdrian Chadd 
12115b7a470SPoul-Henning Kamp /*
12215b7a470SPoul-Henning Kamp  * TODO:
12315b7a470SPoul-Henning Kamp  *	allocate more timeout table slots when table overflows.
12415b7a470SPoul-Henning Kamp  */
1253f555c45SDavide Italiano u_int callwheelsize, callwheelmask;
126f23b4c91SGarrett Wollman 
12720c510f8SLuigi Rizzo /*
128a115fb62SHans Petter Selasky  * The callout cpu exec entities represent informations necessary for
129a115fb62SHans Petter Selasky  * describing the state of callouts currently running on the CPU and the ones
130a115fb62SHans Petter Selasky  * necessary for migrating callouts to the new callout cpu. In particular,
131a115fb62SHans Petter Selasky  * the first entry of the array cc_exec_entity holds informations for callout
132a115fb62SHans Petter Selasky  * running in SWI thread context, while the second one holds informations
133a115fb62SHans Petter Selasky  * for callout running directly from hardware interrupt context.
134a115fb62SHans Petter Selasky  * The cached informations are very important for deferring migration when
135a115fb62SHans Petter Selasky  * the migrating callout is already running.
1361283e9cdSAttilio Rao  */
1375b999a6bSDavide Italiano struct cc_exec {
1385b999a6bSDavide Italiano 	struct callout		*cc_curr;
139a115fb62SHans Petter Selasky #ifdef SMP
140a115fb62SHans Petter Selasky 	void			(*ce_migration_func)(void *);
141a115fb62SHans Petter Selasky 	void			*ce_migration_arg;
142a115fb62SHans Petter Selasky 	int			ce_migration_cpu;
143a115fb62SHans Petter Selasky 	sbintime_t		ce_migration_time;
144a115fb62SHans Petter Selasky 	sbintime_t		ce_migration_prec;
145a115fb62SHans Petter Selasky #endif
146a4a3ce99SDavide Italiano 	bool			cc_cancel;
147a115fb62SHans Petter Selasky 	bool			cc_waiting;
1481283e9cdSAttilio Rao };
1491283e9cdSAttilio Rao 
1501283e9cdSAttilio Rao /*
151a115fb62SHans Petter Selasky  * There is one struct callout_cpu per cpu, holding all relevant
15220c510f8SLuigi Rizzo  * state for the callout processing thread on the individual CPU.
15320c510f8SLuigi Rizzo  */
1548d809d50SJeff Roberson struct callout_cpu {
1554ceaf45dSAttilio Rao 	struct mtx_padalign	cc_lock;
1565b999a6bSDavide Italiano 	struct cc_exec 		cc_exec_entity[2];
15766525b2dSRandall Stewart 	struct callout		*cc_next;
1588d809d50SJeff Roberson 	struct callout		*cc_callout;
1595b999a6bSDavide Italiano 	struct callout_list	*cc_callwheel;
1605b999a6bSDavide Italiano 	struct callout_tailq	cc_expireq;
1615b999a6bSDavide Italiano 	struct callout_slist	cc_callfree;
1625b999a6bSDavide Italiano 	sbintime_t		cc_firstevent;
1635b999a6bSDavide Italiano 	sbintime_t		cc_lastscan;
1648d809d50SJeff Roberson 	void			*cc_cookie;
1655b999a6bSDavide Italiano 	u_int			cc_bucket;
166*15b1eb14SRandall Stewart 	u_int			cc_inited;
167232e8b52SJohn Baldwin 	char			cc_ktr_event_name[20];
1688d809d50SJeff Roberson };
1698d809d50SJeff Roberson 
170d2854fa4SRandall Stewart #define	cc_exec_curr(cc, dir)		cc->cc_exec_entity[dir].cc_curr
17166525b2dSRandall Stewart #define	cc_exec_next(cc)		cc->cc_next
172d2854fa4SRandall Stewart #define	cc_exec_cancel(cc, dir)		cc->cc_exec_entity[dir].cc_cancel
173d2854fa4SRandall Stewart #define	cc_exec_waiting(cc, dir)	cc->cc_exec_entity[dir].cc_waiting
1748d809d50SJeff Roberson #ifdef SMP
175d2854fa4SRandall Stewart #define	cc_migration_func(cc, dir)	cc->cc_exec_entity[dir].ce_migration_func
176d2854fa4SRandall Stewart #define	cc_migration_arg(cc, dir)	cc->cc_exec_entity[dir].ce_migration_arg
177d2854fa4SRandall Stewart #define	cc_migration_cpu(cc, dir)	cc->cc_exec_entity[dir].ce_migration_cpu
178d2854fa4SRandall Stewart #define	cc_migration_time(cc, dir)	cc->cc_exec_entity[dir].ce_migration_time
179d2854fa4SRandall Stewart #define	cc_migration_prec(cc, dir)	cc->cc_exec_entity[dir].ce_migration_prec
180a115fb62SHans Petter Selasky 
1818d809d50SJeff Roberson struct callout_cpu cc_cpu[MAXCPU];
1821283e9cdSAttilio Rao #define	CPUBLOCK	MAXCPU
1838d809d50SJeff Roberson #define	CC_CPU(cpu)	(&cc_cpu[(cpu)])
1848d809d50SJeff Roberson #define	CC_SELF()	CC_CPU(PCPU_GET(cpuid))
1858d809d50SJeff Roberson #else
1868d809d50SJeff Roberson struct callout_cpu cc_cpu;
1878d809d50SJeff Roberson #define	CC_CPU(cpu)	&cc_cpu
1888d809d50SJeff Roberson #define	CC_SELF()	&cc_cpu
1898d809d50SJeff Roberson #endif
1908d809d50SJeff Roberson #define	CC_LOCK(cc)	mtx_lock_spin(&(cc)->cc_lock)
1918d809d50SJeff Roberson #define	CC_UNLOCK(cc)	mtx_unlock_spin(&(cc)->cc_lock)
1921283e9cdSAttilio Rao #define	CC_LOCK_ASSERT(cc)	mtx_assert(&(cc)->cc_lock, MA_OWNED)
1938d809d50SJeff Roberson 
1948d809d50SJeff Roberson static int timeout_cpu;
1955b999a6bSDavide Italiano 
196232e8b52SJohn Baldwin static void	callout_cpu_init(struct callout_cpu *cc, int cpu);
1975b999a6bSDavide Italiano static void	softclock_call_cc(struct callout *c, struct callout_cpu *cc,
1985b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING
1995b999a6bSDavide Italiano 		    int *mpcalls, int *lockcalls, int *gcalls,
2005b999a6bSDavide Italiano #endif
2015b999a6bSDavide Italiano 		    int direct);
2028d809d50SJeff Roberson 
203d745c852SEd Schouten static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
20449a74476SColin Percival 
205a115fb62SHans Petter Selasky /**
206a115fb62SHans Petter Selasky  * Locked by cc_lock:
207a115fb62SHans Petter Selasky  *   cc_curr         - If a callout is in progress, it is cc_curr.
208a115fb62SHans Petter Selasky  *                     If cc_curr is non-NULL, threads waiting in
209a115fb62SHans Petter Selasky  *                     callout_drain() will be woken up as soon as the
210a115fb62SHans Petter Selasky  *                     relevant callout completes.
211a115fb62SHans Petter Selasky  *   cc_cancel       - Changing to 1 with both callout_lock and cc_lock held
212a115fb62SHans Petter Selasky  *                     guarantees that the current callout will not run.
213a115fb62SHans Petter Selasky  *                     The softclock() function sets this to 0 before it
214a115fb62SHans Petter Selasky  *                     drops callout_lock to acquire c_lock, and it calls
215a115fb62SHans Petter Selasky  *                     the handler only if curr_cancelled is still 0 after
216a115fb62SHans Petter Selasky  *                     cc_lock is successfully acquired.
217a115fb62SHans Petter Selasky  *   cc_waiting      - If a thread is waiting in callout_drain(), then
218a115fb62SHans Petter Selasky  *                     callout_wait is nonzero.  Set only when
219a115fb62SHans Petter Selasky  *                     cc_curr is non-NULL.
220a115fb62SHans Petter Selasky  */
221a115fb62SHans Petter Selasky 
222df8bae1dSRodney W. Grimes /*
223a115fb62SHans Petter Selasky  * Resets the execution entity tied to a specific callout cpu.
224a115fb62SHans Petter Selasky  */
225a115fb62SHans Petter Selasky static void
226a115fb62SHans Petter Selasky cc_cce_cleanup(struct callout_cpu *cc, int direct)
227a115fb62SHans Petter Selasky {
228a115fb62SHans Petter Selasky 
229d2854fa4SRandall Stewart 	cc_exec_curr(cc, direct) = NULL;
230d2854fa4SRandall Stewart 	cc_exec_cancel(cc, direct) = false;
231d2854fa4SRandall Stewart 	cc_exec_waiting(cc, direct) = false;
232a115fb62SHans Petter Selasky #ifdef SMP
233d2854fa4SRandall Stewart 	cc_migration_cpu(cc, direct) = CPUBLOCK;
234d2854fa4SRandall Stewart 	cc_migration_time(cc, direct) = 0;
235d2854fa4SRandall Stewart 	cc_migration_prec(cc, direct) = 0;
236d2854fa4SRandall Stewart 	cc_migration_func(cc, direct) = NULL;
237d2854fa4SRandall Stewart 	cc_migration_arg(cc, direct) = NULL;
238a115fb62SHans Petter Selasky #endif
239a115fb62SHans Petter Selasky }
240a115fb62SHans Petter Selasky 
241a115fb62SHans Petter Selasky /*
242a115fb62SHans Petter Selasky  * Checks if migration is requested by a specific callout cpu.
243a115fb62SHans Petter Selasky  */
244a115fb62SHans Petter Selasky static int
245a115fb62SHans Petter Selasky cc_cce_migrating(struct callout_cpu *cc, int direct)
246a115fb62SHans Petter Selasky {
247a115fb62SHans Petter Selasky 
248a115fb62SHans Petter Selasky #ifdef SMP
249d2854fa4SRandall Stewart 	return (cc_migration_cpu(cc, direct) != CPUBLOCK);
250a115fb62SHans Petter Selasky #else
251a115fb62SHans Petter Selasky 	return (0);
252a115fb62SHans Petter Selasky #endif
253a115fb62SHans Petter Selasky }
254a115fb62SHans Petter Selasky 
255a115fb62SHans Petter Selasky /*
256a115fb62SHans Petter Selasky  * Kernel low level callwheel initialization
257a115fb62SHans Petter Selasky  * called on cpu0 during kernel startup.
258219d632cSMatthew Dillon  */
25915ae0c9aSAndre Oppermann static void
26015ae0c9aSAndre Oppermann callout_callwheel_init(void *dummy)
261219d632cSMatthew Dillon {
2628d809d50SJeff Roberson 	struct callout_cpu *cc;
2638d809d50SJeff Roberson 
264f8ccf82aSAndre Oppermann 	/*
265f8ccf82aSAndre Oppermann 	 * Calculate the size of the callout wheel and the preallocated
266f8ccf82aSAndre Oppermann 	 * timeout() structures.
267a7aea132SAndre Oppermann 	 * XXX: Clip callout to result of previous function of maxusers
268a7aea132SAndre Oppermann 	 * maximum 384.  This is still huge, but acceptable.
269f8ccf82aSAndre Oppermann 	 */
270*15b1eb14SRandall Stewart 	memset(cc_cpu, 0, sizeof(cc_cpu));
271f8ccf82aSAndre Oppermann 	ncallout = imin(16 + maxproc + maxfiles, 18508);
272f8ccf82aSAndre Oppermann 	TUNABLE_INT_FETCH("kern.ncallout", &ncallout);
273f8ccf82aSAndre Oppermann 
274219d632cSMatthew Dillon 	/*
275922314f0SAlfred Perlstein 	 * Calculate callout wheel size, should be next power of two higher
276922314f0SAlfred Perlstein 	 * than 'ncallout'.
277219d632cSMatthew Dillon 	 */
278922314f0SAlfred Perlstein 	callwheelsize = 1 << fls(ncallout);
279219d632cSMatthew Dillon 	callwheelmask = callwheelsize - 1;
280219d632cSMatthew Dillon 
28115ae0c9aSAndre Oppermann 	/*
282ac75ee9fSAdrian Chadd 	 * Fetch whether we're pinning the swi's or not.
283ac75ee9fSAdrian Chadd 	 */
284ac75ee9fSAdrian Chadd 	TUNABLE_INT_FETCH("kern.pin_default_swi", &pin_default_swi);
285ac75ee9fSAdrian Chadd 	TUNABLE_INT_FETCH("kern.pin_pcpu_swi", &pin_pcpu_swi);
286ac75ee9fSAdrian Chadd 
287ac75ee9fSAdrian Chadd 	/*
28815ae0c9aSAndre Oppermann 	 * Only cpu0 handles timeout(9) and receives a preallocation.
28915ae0c9aSAndre Oppermann 	 *
29015ae0c9aSAndre Oppermann 	 * XXX: Once all timeout(9) consumers are converted this can
29115ae0c9aSAndre Oppermann 	 * be removed.
29215ae0c9aSAndre Oppermann 	 */
29315ae0c9aSAndre Oppermann 	timeout_cpu = PCPU_GET(cpuid);
29415ae0c9aSAndre Oppermann 	cc = CC_CPU(timeout_cpu);
29515ae0c9aSAndre Oppermann 	cc->cc_callout = malloc(ncallout * sizeof(struct callout),
29615ae0c9aSAndre Oppermann 	    M_CALLOUT, M_WAITOK);
297232e8b52SJohn Baldwin 	callout_cpu_init(cc, timeout_cpu);
298219d632cSMatthew Dillon }
29915ae0c9aSAndre Oppermann SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
300219d632cSMatthew Dillon 
30115ae0c9aSAndre Oppermann /*
30215ae0c9aSAndre Oppermann  * Initialize the per-cpu callout structures.
30315ae0c9aSAndre Oppermann  */
3048d809d50SJeff Roberson static void
305232e8b52SJohn Baldwin callout_cpu_init(struct callout_cpu *cc, int cpu)
3068d809d50SJeff Roberson {
3078d809d50SJeff Roberson 	struct callout *c;
3088d809d50SJeff Roberson 	int i;
3098d809d50SJeff Roberson 
3108d809d50SJeff Roberson 	mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
3118d809d50SJeff Roberson 	SLIST_INIT(&cc->cc_callfree);
312*15b1eb14SRandall Stewart 	cc->cc_inited = 1;
313c5904471SDavide Italiano 	cc->cc_callwheel = malloc(sizeof(struct callout_list) * callwheelsize,
31415ae0c9aSAndre Oppermann 	    M_CALLOUT, M_WAITOK);
3155b999a6bSDavide Italiano 	for (i = 0; i < callwheelsize; i++)
3165b999a6bSDavide Italiano 		LIST_INIT(&cc->cc_callwheel[i]);
3175b999a6bSDavide Italiano 	TAILQ_INIT(&cc->cc_expireq);
3184bc38a5aSDavide Italiano 	cc->cc_firstevent = SBT_MAX;
319a115fb62SHans Petter Selasky 	for (i = 0; i < 2; i++)
320a115fb62SHans Petter Selasky 		cc_cce_cleanup(cc, i);
321232e8b52SJohn Baldwin 	snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name),
322232e8b52SJohn Baldwin 	    "callwheel cpu %d", cpu);
32315ae0c9aSAndre Oppermann 	if (cc->cc_callout == NULL)	/* Only cpu0 handles timeout(9) */
3248d809d50SJeff Roberson 		return;
3258d809d50SJeff Roberson 	for (i = 0; i < ncallout; i++) {
3268d809d50SJeff Roberson 		c = &cc->cc_callout[i];
3278d809d50SJeff Roberson 		callout_init(c, 0);
328*15b1eb14SRandall Stewart 		c->c_iflags = CALLOUT_LOCAL_ALLOC;
3298d809d50SJeff Roberson 		SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
3308d809d50SJeff Roberson 	}
3318d809d50SJeff Roberson }
3328d809d50SJeff Roberson 
333a115fb62SHans Petter Selasky #ifdef SMP
334a115fb62SHans Petter Selasky /*
335a115fb62SHans Petter Selasky  * Switches the cpu tied to a specific callout.
336a115fb62SHans Petter Selasky  * The function expects a locked incoming callout cpu and returns with
337a115fb62SHans Petter Selasky  * locked outcoming callout cpu.
338a115fb62SHans Petter Selasky  */
339a115fb62SHans Petter Selasky static struct callout_cpu *
340a115fb62SHans Petter Selasky callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
341a115fb62SHans Petter Selasky {
342a115fb62SHans Petter Selasky 	struct callout_cpu *new_cc;
343a115fb62SHans Petter Selasky 
344a115fb62SHans Petter Selasky 	MPASS(c != NULL && cc != NULL);
345a115fb62SHans Petter Selasky 	CC_LOCK_ASSERT(cc);
346a115fb62SHans Petter Selasky 
347a115fb62SHans Petter Selasky 	/*
348a115fb62SHans Petter Selasky 	 * Avoid interrupts and preemption firing after the callout cpu
349a115fb62SHans Petter Selasky 	 * is blocked in order to avoid deadlocks as the new thread
350a115fb62SHans Petter Selasky 	 * may be willing to acquire the callout cpu lock.
351a115fb62SHans Petter Selasky 	 */
352a115fb62SHans Petter Selasky 	c->c_cpu = CPUBLOCK;
353a115fb62SHans Petter Selasky 	spinlock_enter();
354a115fb62SHans Petter Selasky 	CC_UNLOCK(cc);
355a115fb62SHans Petter Selasky 	new_cc = CC_CPU(new_cpu);
356a115fb62SHans Petter Selasky 	CC_LOCK(new_cc);
357a115fb62SHans Petter Selasky 	spinlock_exit();
358a115fb62SHans Petter Selasky 	c->c_cpu = new_cpu;
359a115fb62SHans Petter Selasky 	return (new_cc);
360a115fb62SHans Petter Selasky }
361a115fb62SHans Petter Selasky #endif
362a115fb62SHans Petter Selasky 
363219d632cSMatthew Dillon /*
3648d809d50SJeff Roberson  * Start standard softclock thread.
3658d809d50SJeff Roberson  */
3668d809d50SJeff Roberson static void
3678d809d50SJeff Roberson start_softclock(void *dummy)
3688d809d50SJeff Roberson {
3698d809d50SJeff Roberson 	struct callout_cpu *cc;
370f44e2a4cSAdrian Chadd 	char name[MAXCOMLEN];
3718d809d50SJeff Roberson #ifdef SMP
3728d809d50SJeff Roberson 	int cpu;
373ac75ee9fSAdrian Chadd 	struct intr_event *ie;
3748d809d50SJeff Roberson #endif
3758d809d50SJeff Roberson 
3768d809d50SJeff Roberson 	cc = CC_CPU(timeout_cpu);
377f44e2a4cSAdrian Chadd 	snprintf(name, sizeof(name), "clock (%d)", timeout_cpu);
378f44e2a4cSAdrian Chadd 	if (swi_add(&clk_intr_event, name, softclock, cc, SWI_CLOCK,
3793350df48SJohn Baldwin 	    INTR_MPSAFE, &cc->cc_cookie))
3808d809d50SJeff Roberson 		panic("died while creating standard software ithreads");
381ac75ee9fSAdrian Chadd 	if (pin_default_swi &&
382ac75ee9fSAdrian Chadd 	    (intr_event_bind(clk_intr_event, timeout_cpu) != 0)) {
383ac75ee9fSAdrian Chadd 		printf("%s: timeout clock couldn't be pinned to cpu %d\n",
384ac75ee9fSAdrian Chadd 		    __func__,
385ac75ee9fSAdrian Chadd 		    timeout_cpu);
386ac75ee9fSAdrian Chadd 	}
387ac75ee9fSAdrian Chadd 
3888d809d50SJeff Roberson #ifdef SMP
3893aa6d94eSJohn Baldwin 	CPU_FOREACH(cpu) {
3908d809d50SJeff Roberson 		if (cpu == timeout_cpu)
3918d809d50SJeff Roberson 			continue;
3928d809d50SJeff Roberson 		cc = CC_CPU(cpu);
39315ae0c9aSAndre Oppermann 		cc->cc_callout = NULL;	/* Only cpu0 handles timeout(9). */
394232e8b52SJohn Baldwin 		callout_cpu_init(cc, cpu);
395f44e2a4cSAdrian Chadd 		snprintf(name, sizeof(name), "clock (%d)", cpu);
396ac75ee9fSAdrian Chadd 		ie = NULL;
397ac75ee9fSAdrian Chadd 		if (swi_add(&ie, name, softclock, cc, SWI_CLOCK,
3988d809d50SJeff Roberson 		    INTR_MPSAFE, &cc->cc_cookie))
3998d809d50SJeff Roberson 			panic("died while creating standard software ithreads");
400ac75ee9fSAdrian Chadd 		if (pin_pcpu_swi && (intr_event_bind(ie, cpu) != 0)) {
401ac75ee9fSAdrian Chadd 			printf("%s: per-cpu clock couldn't be pinned to "
402ac75ee9fSAdrian Chadd 			    "cpu %d\n",
403ac75ee9fSAdrian Chadd 			    __func__,
404ac75ee9fSAdrian Chadd 			    cpu);
405ac75ee9fSAdrian Chadd 		}
406219d632cSMatthew Dillon 	}
4078d809d50SJeff Roberson #endif
408219d632cSMatthew Dillon }
4098d809d50SJeff Roberson SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
4108d809d50SJeff Roberson 
4115b999a6bSDavide Italiano #define	CC_HASH_SHIFT	8
4128d809d50SJeff Roberson 
4135b999a6bSDavide Italiano static inline u_int
4145b999a6bSDavide Italiano callout_hash(sbintime_t sbt)
4155b999a6bSDavide Italiano {
4165b999a6bSDavide Italiano 
4175b999a6bSDavide Italiano 	return (sbt >> (32 - CC_HASH_SHIFT));
4185b999a6bSDavide Italiano }
4195b999a6bSDavide Italiano 
4205b999a6bSDavide Italiano static inline u_int
4215b999a6bSDavide Italiano callout_get_bucket(sbintime_t sbt)
4225b999a6bSDavide Italiano {
4235b999a6bSDavide Italiano 
4245b999a6bSDavide Italiano 	return (callout_hash(sbt) & callwheelmask);
4255b999a6bSDavide Italiano }
4265b999a6bSDavide Italiano 
4275b999a6bSDavide Italiano void
4285b999a6bSDavide Italiano callout_process(sbintime_t now)
4295b999a6bSDavide Italiano {
4305b999a6bSDavide Italiano 	struct callout *tmp, *tmpn;
4315b999a6bSDavide Italiano 	struct callout_cpu *cc;
4325b999a6bSDavide Italiano 	struct callout_list *sc;
4335b999a6bSDavide Italiano 	sbintime_t first, last, max, tmp_max;
4345b999a6bSDavide Italiano 	uint32_t lookahead;
4355b999a6bSDavide Italiano 	u_int firstb, lastb, nowb;
4365b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING
4375b999a6bSDavide Italiano 	int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0;
4385b999a6bSDavide Italiano #endif
439a115fb62SHans Petter Selasky 
4408d809d50SJeff Roberson 	cc = CC_SELF();
441a115fb62SHans Petter Selasky 	mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
4425b999a6bSDavide Italiano 
4435b999a6bSDavide Italiano 	/* Compute the buckets of the last scan and present times. */
4445b999a6bSDavide Italiano 	firstb = callout_hash(cc->cc_lastscan);
4455b999a6bSDavide Italiano 	cc->cc_lastscan = now;
4465b999a6bSDavide Italiano 	nowb = callout_hash(now);
4475b999a6bSDavide Italiano 
4485b999a6bSDavide Italiano 	/* Compute the last bucket and minimum time of the bucket after it. */
4495b999a6bSDavide Italiano 	if (nowb == firstb)
4505b999a6bSDavide Italiano 		lookahead = (SBT_1S / 16);
4515b999a6bSDavide Italiano 	else if (nowb - firstb == 1)
4525b999a6bSDavide Italiano 		lookahead = (SBT_1S / 8);
4535b999a6bSDavide Italiano 	else
4545b999a6bSDavide Italiano 		lookahead = (SBT_1S / 2);
4555b999a6bSDavide Italiano 	first = last = now;
4565b999a6bSDavide Italiano 	first += (lookahead / 2);
4575b999a6bSDavide Italiano 	last += lookahead;
4585b999a6bSDavide Italiano 	last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT));
4595b999a6bSDavide Italiano 	lastb = callout_hash(last) - 1;
4605b999a6bSDavide Italiano 	max = last;
4615b999a6bSDavide Italiano 
4625b999a6bSDavide Italiano 	/*
4635b999a6bSDavide Italiano 	 * Check if we wrapped around the entire wheel from the last scan.
4645b999a6bSDavide Italiano 	 * In case, we need to scan entirely the wheel for pending callouts.
4655b999a6bSDavide Italiano 	 */
4665b999a6bSDavide Italiano 	if (lastb - firstb >= callwheelsize) {
4675b999a6bSDavide Italiano 		lastb = firstb + callwheelsize - 1;
4685b999a6bSDavide Italiano 		if (nowb - firstb >= callwheelsize)
4695b999a6bSDavide Italiano 			nowb = lastb;
4709fc51b0bSJeff Roberson 	}
4715b999a6bSDavide Italiano 
4725b999a6bSDavide Italiano 	/* Iterate callwheel from firstb to nowb and then up to lastb. */
4735b999a6bSDavide Italiano 	do {
4745b999a6bSDavide Italiano 		sc = &cc->cc_callwheel[firstb & callwheelmask];
4755b999a6bSDavide Italiano 		tmp = LIST_FIRST(sc);
4765b999a6bSDavide Italiano 		while (tmp != NULL) {
4775b999a6bSDavide Italiano 			/* Run the callout if present time within allowed. */
4785b999a6bSDavide Italiano 			if (tmp->c_time <= now) {
4795b999a6bSDavide Italiano 				/*
4805b999a6bSDavide Italiano 				 * Consumer told us the callout may be run
4815b999a6bSDavide Italiano 				 * directly from hardware interrupt context.
4825b999a6bSDavide Italiano 				 */
483*15b1eb14SRandall Stewart 				if (tmp->c_iflags & CALLOUT_DIRECT) {
4845b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING
4855b999a6bSDavide Italiano 					++depth_dir;
4865b999a6bSDavide Italiano #endif
48766525b2dSRandall Stewart 					cc_exec_next(cc) =
4885b999a6bSDavide Italiano 					    LIST_NEXT(tmp, c_links.le);
4895b999a6bSDavide Italiano 					cc->cc_bucket = firstb & callwheelmask;
4905b999a6bSDavide Italiano 					LIST_REMOVE(tmp, c_links.le);
4915b999a6bSDavide Italiano 					softclock_call_cc(tmp, cc,
4925b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING
4935b999a6bSDavide Italiano 					    &mpcalls_dir, &lockcalls_dir, NULL,
4945b999a6bSDavide Italiano #endif
4955b999a6bSDavide Italiano 					    1);
49666525b2dSRandall Stewart 					tmp = cc_exec_next(cc);
49766525b2dSRandall Stewart 					cc_exec_next(cc) = NULL;
4985b999a6bSDavide Italiano 				} else {
4995b999a6bSDavide Italiano 					tmpn = LIST_NEXT(tmp, c_links.le);
5005b999a6bSDavide Italiano 					LIST_REMOVE(tmp, c_links.le);
5015b999a6bSDavide Italiano 					TAILQ_INSERT_TAIL(&cc->cc_expireq,
5025b999a6bSDavide Italiano 					    tmp, c_links.tqe);
503*15b1eb14SRandall Stewart 					tmp->c_iflags |= CALLOUT_PROCESSED;
5045b999a6bSDavide Italiano 					tmp = tmpn;
5059fc51b0bSJeff Roberson 				}
5065b999a6bSDavide Italiano 				continue;
5075b999a6bSDavide Italiano 			}
5085b999a6bSDavide Italiano 			/* Skip events from distant future. */
5095b999a6bSDavide Italiano 			if (tmp->c_time >= max)
5105b999a6bSDavide Italiano 				goto next;
5115b999a6bSDavide Italiano 			/*
5125b999a6bSDavide Italiano 			 * Event minimal time is bigger than present maximal
5135b999a6bSDavide Italiano 			 * time, so it cannot be aggregated.
5145b999a6bSDavide Italiano 			 */
5155b999a6bSDavide Italiano 			if (tmp->c_time > last) {
5165b999a6bSDavide Italiano 				lastb = nowb;
5175b999a6bSDavide Italiano 				goto next;
5185b999a6bSDavide Italiano 			}
5195b999a6bSDavide Italiano 			/* Update first and last time, respecting this event. */
5205b999a6bSDavide Italiano 			if (tmp->c_time < first)
5215b999a6bSDavide Italiano 				first = tmp->c_time;
5225b999a6bSDavide Italiano 			tmp_max = tmp->c_time + tmp->c_precision;
5235b999a6bSDavide Italiano 			if (tmp_max < last)
5245b999a6bSDavide Italiano 				last = tmp_max;
5255b999a6bSDavide Italiano next:
5265b999a6bSDavide Italiano 			tmp = LIST_NEXT(tmp, c_links.le);
5275b999a6bSDavide Italiano 		}
5285b999a6bSDavide Italiano 		/* Proceed with the next bucket. */
5295b999a6bSDavide Italiano 		firstb++;
5305b999a6bSDavide Italiano 		/*
5315b999a6bSDavide Italiano 		 * Stop if we looked after present time and found
5325b999a6bSDavide Italiano 		 * some event we can't execute at now.
5335b999a6bSDavide Italiano 		 * Stop if we looked far enough into the future.
5345b999a6bSDavide Italiano 		 */
5355b999a6bSDavide Italiano 	} while (((int)(firstb - lastb)) <= 0);
5365b999a6bSDavide Italiano 	cc->cc_firstevent = last;
5375b999a6bSDavide Italiano #ifndef NO_EVENTTIMERS
5385b999a6bSDavide Italiano 	cpu_new_callout(curcpu, last, first);
5395b999a6bSDavide Italiano #endif
5405b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING
5415b999a6bSDavide Italiano 	avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8;
5425b999a6bSDavide Italiano 	avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8;
5435b999a6bSDavide Italiano 	avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8;
5445b999a6bSDavide Italiano #endif
545a115fb62SHans Petter Selasky 	mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
5468d809d50SJeff Roberson 	/*
5478d809d50SJeff Roberson 	 * swi_sched acquires the thread lock, so we don't want to call it
5488d809d50SJeff Roberson 	 * with cc_lock held; incorrect locking order.
5498d809d50SJeff Roberson 	 */
5505b999a6bSDavide Italiano 	if (!TAILQ_EMPTY(&cc->cc_expireq))
5518d809d50SJeff Roberson 		swi_sched(cc->cc_cookie, 0);
5528d809d50SJeff Roberson }
5538d809d50SJeff Roberson 
5548d809d50SJeff Roberson static struct callout_cpu *
5558d809d50SJeff Roberson callout_lock(struct callout *c)
5568d809d50SJeff Roberson {
5578d809d50SJeff Roberson 	struct callout_cpu *cc;
558a115fb62SHans Petter Selasky 	int cpu;
559a115fb62SHans Petter Selasky 
560a115fb62SHans Petter Selasky 	for (;;) {
561a115fb62SHans Petter Selasky 		cpu = c->c_cpu;
562a115fb62SHans Petter Selasky #ifdef SMP
563a115fb62SHans Petter Selasky 		if (cpu == CPUBLOCK) {
564a115fb62SHans Petter Selasky 			while (c->c_cpu == CPUBLOCK)
565a115fb62SHans Petter Selasky 				cpu_spinwait();
566a115fb62SHans Petter Selasky 			continue;
567a115fb62SHans Petter Selasky 		}
568a115fb62SHans Petter Selasky #endif
569a115fb62SHans Petter Selasky 		cc = CC_CPU(cpu);
5708d809d50SJeff Roberson 		CC_LOCK(cc);
571a115fb62SHans Petter Selasky 		if (cpu == c->c_cpu)
572a115fb62SHans Petter Selasky 			break;
573a115fb62SHans Petter Selasky 		CC_UNLOCK(cc);
574a115fb62SHans Petter Selasky 	}
5758d809d50SJeff Roberson 	return (cc);
576219d632cSMatthew Dillon }
577219d632cSMatthew Dillon 
578a115fb62SHans Petter Selasky static void
579a115fb62SHans Petter Selasky callout_cc_add(struct callout *c, struct callout_cpu *cc,
580a115fb62SHans Petter Selasky     sbintime_t sbt, sbintime_t precision, void (*func)(void *),
58166525b2dSRandall Stewart     void *arg, int cpu, int flags)
5821283e9cdSAttilio Rao {
5835b999a6bSDavide Italiano 	int bucket;
5841283e9cdSAttilio Rao 
5851283e9cdSAttilio Rao 	CC_LOCK_ASSERT(cc);
586a115fb62SHans Petter Selasky 	if (sbt < cc->cc_lastscan)
587a115fb62SHans Petter Selasky 		sbt = cc->cc_lastscan;
588a115fb62SHans Petter Selasky 	c->c_arg = arg;
589*15b1eb14SRandall Stewart 	c->c_iflags |= CALLOUT_PENDING;
590*15b1eb14SRandall Stewart 	c->c_iflags &= ~CALLOUT_PROCESSED;
591*15b1eb14SRandall Stewart 	c->c_flags |= CALLOUT_ACTIVE;
592a115fb62SHans Petter Selasky 	c->c_func = func;
593a115fb62SHans Petter Selasky 	c->c_time = sbt;
594a115fb62SHans Petter Selasky 	c->c_precision = precision;
5955b999a6bSDavide Italiano 	bucket = callout_get_bucket(c->c_time);
5965b999a6bSDavide Italiano 	CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x",
5975b999a6bSDavide Italiano 	    c, (int)(c->c_precision >> 32),
5985b999a6bSDavide Italiano 	    (u_int)(c->c_precision & 0xffffffff));
5995b999a6bSDavide Italiano 	LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le);
6005b999a6bSDavide Italiano 	if (cc->cc_bucket == bucket)
60166525b2dSRandall Stewart 		cc_exec_next(cc) = c;
6025b999a6bSDavide Italiano #ifndef NO_EVENTTIMERS
6035b999a6bSDavide Italiano 	/*
6045b999a6bSDavide Italiano 	 * Inform the eventtimers(4) subsystem there's a new callout
6055b999a6bSDavide Italiano 	 * that has been inserted, but only if really required.
6065b999a6bSDavide Italiano 	 */
6074bc38a5aSDavide Italiano 	if (SBT_MAX - c->c_time < c->c_precision)
6084bc38a5aSDavide Italiano 		c->c_precision = SBT_MAX - c->c_time;
6095b999a6bSDavide Italiano 	sbt = c->c_time + c->c_precision;
6105b999a6bSDavide Italiano 	if (sbt < cc->cc_firstevent) {
6115b999a6bSDavide Italiano 		cc->cc_firstevent = sbt;
612a115fb62SHans Petter Selasky 		cpu_new_callout(cpu, sbt, c->c_time);
6131283e9cdSAttilio Rao 	}
6145b999a6bSDavide Italiano #endif
6151283e9cdSAttilio Rao }
6161283e9cdSAttilio Rao 
6176098e7acSKonstantin Belousov static void
6186098e7acSKonstantin Belousov callout_cc_del(struct callout *c, struct callout_cpu *cc)
6196098e7acSKonstantin Belousov {
6206098e7acSKonstantin Belousov 
621*15b1eb14SRandall Stewart 	if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) == 0)
622a115fb62SHans Petter Selasky 		return;
6236098e7acSKonstantin Belousov 	c->c_func = NULL;
6246098e7acSKonstantin Belousov 	SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
6256098e7acSKonstantin Belousov }
6266098e7acSKonstantin Belousov 
627eb8a7186SKonstantin Belousov static void
6285b999a6bSDavide Italiano softclock_call_cc(struct callout *c, struct callout_cpu *cc,
6295b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING
6305b999a6bSDavide Italiano     int *mpcalls, int *lockcalls, int *gcalls,
6315b999a6bSDavide Italiano #endif
6325b999a6bSDavide Italiano     int direct)
6336098e7acSKonstantin Belousov {
634a115fb62SHans Petter Selasky 	struct rm_priotracker tracker;
635a115fb62SHans Petter Selasky 	void (*c_func)(void *);
6366098e7acSKonstantin Belousov 	void *c_arg;
637a115fb62SHans Petter Selasky 	struct lock_class *class;
6386098e7acSKonstantin Belousov 	struct lock_object *c_lock;
639a115fb62SHans Petter Selasky 	uintptr_t lock_status;
640*15b1eb14SRandall Stewart 	int c_iflags;
641a115fb62SHans Petter Selasky #ifdef SMP
642a115fb62SHans Petter Selasky 	struct callout_cpu *new_cc;
643a115fb62SHans Petter Selasky 	void (*new_func)(void *);
644a115fb62SHans Petter Selasky 	void *new_arg;
645a115fb62SHans Petter Selasky 	int flags, new_cpu;
646a115fb62SHans Petter Selasky 	sbintime_t new_prec, new_time;
647a115fb62SHans Petter Selasky #endif
6485b999a6bSDavide Italiano #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
64903763781SDavide Italiano 	sbintime_t sbt1, sbt2;
6506098e7acSKonstantin Belousov 	struct timespec ts2;
6515b999a6bSDavide Italiano 	static sbintime_t maxdt = 2 * SBT_1MS;	/* 2 msec */
6526098e7acSKonstantin Belousov 	static timeout_t *lastfunc;
6536098e7acSKonstantin Belousov #endif
6546098e7acSKonstantin Belousov 
655*15b1eb14SRandall Stewart 	KASSERT((c->c_iflags & CALLOUT_PENDING) == CALLOUT_PENDING,
656*15b1eb14SRandall Stewart 	    ("softclock_call_cc: pend %p %x", c, c->c_iflags));
657*15b1eb14SRandall Stewart 	KASSERT((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE,
658*15b1eb14SRandall Stewart 	    ("softclock_call_cc: act %p %x", c, c->c_flags));
659a115fb62SHans Petter Selasky 	class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
660a115fb62SHans Petter Selasky 	lock_status = 0;
661a115fb62SHans Petter Selasky 	if (c->c_flags & CALLOUT_SHAREDLOCK) {
662a115fb62SHans Petter Selasky 		if (class == &lock_class_rm)
663a115fb62SHans Petter Selasky 			lock_status = (uintptr_t)&tracker;
664a115fb62SHans Petter Selasky 		else
665a115fb62SHans Petter Selasky 			lock_status = 1;
666a115fb62SHans Petter Selasky 	}
6676098e7acSKonstantin Belousov 	c_lock = c->c_lock;
6686098e7acSKonstantin Belousov 	c_func = c->c_func;
6696098e7acSKonstantin Belousov 	c_arg = c->c_arg;
670*15b1eb14SRandall Stewart 	c_iflags = c->c_iflags;
671*15b1eb14SRandall Stewart 	if (c->c_iflags & CALLOUT_LOCAL_ALLOC)
672*15b1eb14SRandall Stewart 		c->c_iflags = CALLOUT_LOCAL_ALLOC;
673a115fb62SHans Petter Selasky 	else
674*15b1eb14SRandall Stewart 		c->c_iflags &= ~CALLOUT_PENDING;
675d2854fa4SRandall Stewart 
676d2854fa4SRandall Stewart 	cc_exec_curr(cc, direct) = c;
677d2854fa4SRandall Stewart 	cc_exec_cancel(cc, direct) = false;
6786098e7acSKonstantin Belousov 	CC_UNLOCK(cc);
679a115fb62SHans Petter Selasky 	if (c_lock != NULL) {
680a115fb62SHans Petter Selasky 		class->lc_lock(c_lock, lock_status);
6816098e7acSKonstantin Belousov 		/*
682a115fb62SHans Petter Selasky 		 * The callout may have been cancelled
683a115fb62SHans Petter Selasky 		 * while we switched locks.
6846098e7acSKonstantin Belousov 		 */
685d2854fa4SRandall Stewart 		if (cc_exec_cancel(cc, direct)) {
686a115fb62SHans Petter Selasky 			class->lc_unlock(c_lock);
687a115fb62SHans Petter Selasky 			goto skip;
6886098e7acSKonstantin Belousov 		}
689a115fb62SHans Petter Selasky 		/* The callout cannot be stopped now. */
690d2854fa4SRandall Stewart 		cc_exec_cancel(cc, direct) = true;
6916098e7acSKonstantin Belousov 		if (c_lock == &Giant.lock_object) {
6925b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING
6936098e7acSKonstantin Belousov 			(*gcalls)++;
6945b999a6bSDavide Italiano #endif
6955b999a6bSDavide Italiano 			CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p",
6966098e7acSKonstantin Belousov 			    c, c_func, c_arg);
6976098e7acSKonstantin Belousov 		} else {
6985b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING
6996098e7acSKonstantin Belousov 			(*lockcalls)++;
7005b999a6bSDavide Italiano #endif
7016098e7acSKonstantin Belousov 			CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p",
7026098e7acSKonstantin Belousov 			    c, c_func, c_arg);
7036098e7acSKonstantin Belousov 		}
7046098e7acSKonstantin Belousov 	} else {
7055b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING
7066098e7acSKonstantin Belousov 		(*mpcalls)++;
7075b999a6bSDavide Italiano #endif
7085b999a6bSDavide Italiano 		CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
7096098e7acSKonstantin Belousov 		    c, c_func, c_arg);
7106098e7acSKonstantin Belousov 	}
711232e8b52SJohn Baldwin 	KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running",
712232e8b52SJohn Baldwin 	    "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct);
71303763781SDavide Italiano #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
7145b999a6bSDavide Italiano 	sbt1 = sbinuptime();
7156098e7acSKonstantin Belousov #endif
7166098e7acSKonstantin Belousov 	THREAD_NO_SLEEPING();
717d9fae5abSAndriy Gapon 	SDT_PROBE(callout_execute, kernel, , callout__start, c, 0, 0, 0, 0);
7186098e7acSKonstantin Belousov 	c_func(c_arg);
719d9fae5abSAndriy Gapon 	SDT_PROBE(callout_execute, kernel, , callout__end, c, 0, 0, 0, 0);
7206098e7acSKonstantin Belousov 	THREAD_SLEEPING_OK();
72103763781SDavide Italiano #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
72203763781SDavide Italiano 	sbt2 = sbinuptime();
72303763781SDavide Italiano 	sbt2 -= sbt1;
72403763781SDavide Italiano 	if (sbt2 > maxdt) {
72503763781SDavide Italiano 		if (lastfunc != c_func || sbt2 > maxdt * 2) {
72603763781SDavide Italiano 			ts2 = sbttots(sbt2);
7276098e7acSKonstantin Belousov 			printf(
7286098e7acSKonstantin Belousov 		"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
7296098e7acSKonstantin Belousov 			    c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
7306098e7acSKonstantin Belousov 		}
73103763781SDavide Italiano 		maxdt = sbt2;
7326098e7acSKonstantin Belousov 		lastfunc = c_func;
7336098e7acSKonstantin Belousov 	}
7346098e7acSKonstantin Belousov #endif
735232e8b52SJohn Baldwin 	KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle");
7366098e7acSKonstantin Belousov 	CTR1(KTR_CALLOUT, "callout %p finished", c);
737*15b1eb14SRandall Stewart 	if ((c_iflags & CALLOUT_RETURNUNLOCKED) == 0)
738a115fb62SHans Petter Selasky 		class->lc_unlock(c_lock);
739a115fb62SHans Petter Selasky skip:
7406098e7acSKonstantin Belousov 	CC_LOCK(cc);
741d2854fa4SRandall Stewart 	KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr"));
742d2854fa4SRandall Stewart 	cc_exec_curr(cc, direct) = NULL;
743d2854fa4SRandall Stewart 	if (cc_exec_waiting(cc, direct)) {
744bdf9120cSAttilio Rao 		/*
745a115fb62SHans Petter Selasky 		 * There is someone waiting for the
746a115fb62SHans Petter Selasky 		 * callout to complete.
747a115fb62SHans Petter Selasky 		 * If the callout was scheduled for
748a115fb62SHans Petter Selasky 		 * migration just cancel it.
749bdf9120cSAttilio Rao 		 */
750a115fb62SHans Petter Selasky 		if (cc_cce_migrating(cc, direct)) {
751a115fb62SHans Petter Selasky 			cc_cce_cleanup(cc, direct);
752a115fb62SHans Petter Selasky 
753a115fb62SHans Petter Selasky 			/*
754a115fb62SHans Petter Selasky 			 * It should be assert here that the callout is not
755a115fb62SHans Petter Selasky 			 * destroyed but that is not easy.
756a115fb62SHans Petter Selasky 			 */
757*15b1eb14SRandall Stewart 			c->c_iflags &= ~CALLOUT_DFRMIGRATION;
7586098e7acSKonstantin Belousov 		}
759d2854fa4SRandall Stewart 		cc_exec_waiting(cc, direct) = false;
760a115fb62SHans Petter Selasky 		CC_UNLOCK(cc);
761d2854fa4SRandall Stewart 		wakeup(&cc_exec_waiting(cc, direct));
762a115fb62SHans Petter Selasky 		CC_LOCK(cc);
763a115fb62SHans Petter Selasky 	} else if (cc_cce_migrating(cc, direct)) {
764*15b1eb14SRandall Stewart 		KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0,
765a115fb62SHans Petter Selasky 		    ("Migrating legacy callout %p", c));
766a115fb62SHans Petter Selasky #ifdef SMP
767a115fb62SHans Petter Selasky 		/*
768a115fb62SHans Petter Selasky 		 * If the callout was scheduled for
769a115fb62SHans Petter Selasky 		 * migration just perform it now.
770a115fb62SHans Petter Selasky 		 */
771d2854fa4SRandall Stewart 		new_cpu = cc_migration_cpu(cc, direct);
772d2854fa4SRandall Stewart 		new_time = cc_migration_time(cc, direct);
773d2854fa4SRandall Stewart 		new_prec = cc_migration_prec(cc, direct);
774d2854fa4SRandall Stewart 		new_func = cc_migration_func(cc, direct);
775d2854fa4SRandall Stewart 		new_arg = cc_migration_arg(cc, direct);
776a115fb62SHans Petter Selasky 		cc_cce_cleanup(cc, direct);
777a115fb62SHans Petter Selasky 
778a115fb62SHans Petter Selasky 		/*
779a115fb62SHans Petter Selasky 		 * It should be assert here that the callout is not destroyed
780a115fb62SHans Petter Selasky 		 * but that is not easy.
781a115fb62SHans Petter Selasky 		 *
782a115fb62SHans Petter Selasky 		 * As first thing, handle deferred callout stops.
783a115fb62SHans Petter Selasky 		 */
784d2854fa4SRandall Stewart 		if (!callout_migrating(c)) {
785a115fb62SHans Petter Selasky 			CTR3(KTR_CALLOUT,
786a115fb62SHans Petter Selasky 			     "deferred cancelled %p func %p arg %p",
787a115fb62SHans Petter Selasky 			     c, new_func, new_arg);
788a115fb62SHans Petter Selasky 			callout_cc_del(c, cc);
789a115fb62SHans Petter Selasky 			return;
790a115fb62SHans Petter Selasky 		}
791*15b1eb14SRandall Stewart 		c->c_iflags &= ~CALLOUT_DFRMIGRATION;
792a115fb62SHans Petter Selasky 
793a115fb62SHans Petter Selasky 		new_cc = callout_cpu_switch(c, cc, new_cpu);
794a115fb62SHans Petter Selasky 		flags = (direct) ? C_DIRECT_EXEC : 0;
795a115fb62SHans Petter Selasky 		callout_cc_add(c, new_cc, new_time, new_prec, new_func,
79666525b2dSRandall Stewart 		    new_arg, new_cpu, flags);
797a115fb62SHans Petter Selasky 		CC_UNLOCK(new_cc);
798a115fb62SHans Petter Selasky 		CC_LOCK(cc);
799a115fb62SHans Petter Selasky #else
800a115fb62SHans Petter Selasky 		panic("migration should not happen");
801a115fb62SHans Petter Selasky #endif
802a115fb62SHans Petter Selasky 	}
803a115fb62SHans Petter Selasky 	/*
804a115fb62SHans Petter Selasky 	 * If the current callout is locally allocated (from
805a115fb62SHans Petter Selasky 	 * timeout(9)) then put it on the freelist.
806a115fb62SHans Petter Selasky 	 *
807*15b1eb14SRandall Stewart 	 * Note: we need to check the cached copy of c_iflags because
808a115fb62SHans Petter Selasky 	 * if it was not local, then it's not safe to deref the
809a115fb62SHans Petter Selasky 	 * callout pointer.
810a115fb62SHans Petter Selasky 	 */
811*15b1eb14SRandall Stewart 	KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0 ||
812*15b1eb14SRandall Stewart 	    c->c_iflags == CALLOUT_LOCAL_ALLOC,
813a115fb62SHans Petter Selasky 	    ("corrupted callout"));
814*15b1eb14SRandall Stewart 	if (c_iflags & CALLOUT_LOCAL_ALLOC)
815a115fb62SHans Petter Selasky 		callout_cc_del(c, cc);
8166098e7acSKonstantin Belousov }
8176098e7acSKonstantin Belousov 
818219d632cSMatthew Dillon /*
819ab36c067SJustin T. Gibbs  * The callout mechanism is based on the work of Adam M. Costello and
820ab36c067SJustin T. Gibbs  * George Varghese, published in a technical report entitled "Redesigning
821ab36c067SJustin T. Gibbs  * the BSD Callout and Timer Facilities" and modified slightly for inclusion
822ab36c067SJustin T. Gibbs  * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
823024035e8SHiten Pandya  * used in this implementation was published by G. Varghese and T. Lauck in
824ab36c067SJustin T. Gibbs  * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
825ab36c067SJustin T. Gibbs  * the Efficient Implementation of a Timer Facility" in the Proceedings of
826ab36c067SJustin T. Gibbs  * the 11th ACM Annual Symposium on Operating Systems Principles,
827ab36c067SJustin T. Gibbs  * Austin, Texas Nov 1987.
828ab36c067SJustin T. Gibbs  */
829a50ec505SPoul-Henning Kamp 
830ab36c067SJustin T. Gibbs /*
831df8bae1dSRodney W. Grimes  * Software (low priority) clock interrupt.
832df8bae1dSRodney W. Grimes  * Run periodic events from timeout queue.
833df8bae1dSRodney W. Grimes  */
834df8bae1dSRodney W. Grimes void
8358d809d50SJeff Roberson softclock(void *arg)
836df8bae1dSRodney W. Grimes {
8378d809d50SJeff Roberson 	struct callout_cpu *cc;
838b336df68SPoul-Henning Kamp 	struct callout *c;
8395b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING
8405b999a6bSDavide Italiano 	int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0;
8415b999a6bSDavide Italiano #endif
842df8bae1dSRodney W. Grimes 
8438d809d50SJeff Roberson 	cc = (struct callout_cpu *)arg;
8448d809d50SJeff Roberson 	CC_LOCK(cc);
8455b999a6bSDavide Italiano 	while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) {
8465b999a6bSDavide Italiano 		TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
8475b999a6bSDavide Italiano 		softclock_call_cc(c, cc,
8485b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING
8495b999a6bSDavide Italiano 		    &mpcalls, &lockcalls, &gcalls,
8505b999a6bSDavide Italiano #endif
8515b999a6bSDavide Italiano 		    0);
8525b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING
8535b999a6bSDavide Italiano 		++depth;
8545b999a6bSDavide Italiano #endif
855df8bae1dSRodney W. Grimes 	}
8565b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING
85722ee8c4fSPoul-Henning Kamp 	avg_depth += (depth * 1000 - avg_depth) >> 8;
85822ee8c4fSPoul-Henning Kamp 	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
85964b9ee20SAttilio Rao 	avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
86022ee8c4fSPoul-Henning Kamp 	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
8615b999a6bSDavide Italiano #endif
8628d809d50SJeff Roberson 	CC_UNLOCK(cc);
863df8bae1dSRodney W. Grimes }
864df8bae1dSRodney W. Grimes 
865df8bae1dSRodney W. Grimes /*
866df8bae1dSRodney W. Grimes  * timeout --
867df8bae1dSRodney W. Grimes  *	Execute a function after a specified length of time.
868df8bae1dSRodney W. Grimes  *
869df8bae1dSRodney W. Grimes  * untimeout --
870df8bae1dSRodney W. Grimes  *	Cancel previous timeout function call.
871df8bae1dSRodney W. Grimes  *
872ab36c067SJustin T. Gibbs  * callout_handle_init --
873ab36c067SJustin T. Gibbs  *	Initialize a handle so that using it with untimeout is benign.
874ab36c067SJustin T. Gibbs  *
875df8bae1dSRodney W. Grimes  *	See AT&T BCI Driver Reference Manual for specification.  This
876ab36c067SJustin T. Gibbs  *	implementation differs from that one in that although an
877ab36c067SJustin T. Gibbs  *	identification value is returned from timeout, the original
878ab36c067SJustin T. Gibbs  *	arguments to timeout as well as the identifier are used to
879ab36c067SJustin T. Gibbs  *	identify entries for untimeout.
880df8bae1dSRodney W. Grimes  */
881ab36c067SJustin T. Gibbs struct callout_handle
882e392e44cSDavide Italiano timeout(timeout_t *ftn, void *arg, int to_ticks)
883df8bae1dSRodney W. Grimes {
8848d809d50SJeff Roberson 	struct callout_cpu *cc;
885ab36c067SJustin T. Gibbs 	struct callout *new;
886ab36c067SJustin T. Gibbs 	struct callout_handle handle;
887df8bae1dSRodney W. Grimes 
8888d809d50SJeff Roberson 	cc = CC_CPU(timeout_cpu);
8898d809d50SJeff Roberson 	CC_LOCK(cc);
890df8bae1dSRodney W. Grimes 	/* Fill in the next free callout structure. */
8918d809d50SJeff Roberson 	new = SLIST_FIRST(&cc->cc_callfree);
892ab36c067SJustin T. Gibbs 	if (new == NULL)
893ab36c067SJustin T. Gibbs 		/* XXX Attempt to malloc first */
894df8bae1dSRodney W. Grimes 		panic("timeout table full");
8958d809d50SJeff Roberson 	SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
896a115fb62SHans Petter Selasky 	callout_reset(new, to_ticks, ftn, arg);
897ab36c067SJustin T. Gibbs 	handle.callout = new;
8988d809d50SJeff Roberson 	CC_UNLOCK(cc);
8998d809d50SJeff Roberson 
900ab36c067SJustin T. Gibbs 	return (handle);
901df8bae1dSRodney W. Grimes }
902df8bae1dSRodney W. Grimes 
903df8bae1dSRodney W. Grimes void
904e392e44cSDavide Italiano untimeout(timeout_t *ftn, void *arg, struct callout_handle handle)
905df8bae1dSRodney W. Grimes {
9068d809d50SJeff Roberson 	struct callout_cpu *cc;
907df8bae1dSRodney W. Grimes 
908ab36c067SJustin T. Gibbs 	/*
909ab36c067SJustin T. Gibbs 	 * Check for a handle that was initialized
910ab36c067SJustin T. Gibbs 	 * by callout_handle_init, but never used
911ab36c067SJustin T. Gibbs 	 * for a real timeout.
912ab36c067SJustin T. Gibbs 	 */
913ab36c067SJustin T. Gibbs 	if (handle.callout == NULL)
914ab36c067SJustin T. Gibbs 		return;
915df8bae1dSRodney W. Grimes 
9168d809d50SJeff Roberson 	cc = callout_lock(handle.callout);
917a115fb62SHans Petter Selasky 	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
9181a26c3c0SHans Petter Selasky 		callout_stop(handle.callout);
919a115fb62SHans Petter Selasky 	CC_UNLOCK(cc);
920df8bae1dSRodney W. Grimes }
921df8bae1dSRodney W. Grimes 
9223c816944SBruce Evans void
923ab36c067SJustin T. Gibbs callout_handle_init(struct callout_handle *handle)
924ab36c067SJustin T. Gibbs {
925ab36c067SJustin T. Gibbs 	handle->callout = NULL;
926ab36c067SJustin T. Gibbs }
927ab36c067SJustin T. Gibbs 
928acc8326dSGarrett Wollman /*
929acc8326dSGarrett Wollman  * New interface; clients allocate their own callout structures.
930acc8326dSGarrett Wollman  *
931acc8326dSGarrett Wollman  * callout_reset() - establish or change a timeout
932acc8326dSGarrett Wollman  * callout_stop() - disestablish a timeout
933acc8326dSGarrett Wollman  * callout_init() - initialize a callout structure so that it can
934acc8326dSGarrett Wollman  *	safely be passed to callout_reset() and callout_stop()
935acc8326dSGarrett Wollman  *
9369b8b58e0SJonathan Lemon  * <sys/callout.h> defines three convenience macros:
937acc8326dSGarrett Wollman  *
93886fd19deSColin Percival  * callout_active() - returns truth if callout has not been stopped,
93986fd19deSColin Percival  *	drained, or deactivated since the last time the callout was
94086fd19deSColin Percival  *	reset.
9419b8b58e0SJonathan Lemon  * callout_pending() - returns truth if callout is still waiting for timeout
9429b8b58e0SJonathan Lemon  * callout_deactivate() - marks the callout as having been serviced
943acc8326dSGarrett Wollman  */
944d04304d1SGleb Smirnoff int
9455b999a6bSDavide Italiano callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision,
946a115fb62SHans Petter Selasky     void (*ftn)(void *), void *arg, int cpu, int flags)
947acc8326dSGarrett Wollman {
948a115fb62SHans Petter Selasky 	sbintime_t to_sbt, pr;
949a115fb62SHans Petter Selasky 	struct callout_cpu *cc;
950a115fb62SHans Petter Selasky 	int cancelled, direct;
951*15b1eb14SRandall Stewart 	int ignore_cpu=0;
952acc8326dSGarrett Wollman 
953a115fb62SHans Petter Selasky 	cancelled = 0;
954*15b1eb14SRandall Stewart 	if (cpu == -1) {
955*15b1eb14SRandall Stewart 		ignore_cpu = 1;
956*15b1eb14SRandall Stewart 	} else if ((cpu >= MAXCPU) ||
957*15b1eb14SRandall Stewart 		   (cc_cpu[cpu].cc_inited == 0)) {
958*15b1eb14SRandall Stewart 		/* Invalid CPU spec */
959*15b1eb14SRandall Stewart 		panic("Invalid CPU in callout %d", cpu);
960*15b1eb14SRandall Stewart 	}
961a115fb62SHans Petter Selasky 	if (flags & C_ABSOLUTE) {
962a115fb62SHans Petter Selasky 		to_sbt = sbt;
9635b999a6bSDavide Italiano 	} else {
964a115fb62SHans Petter Selasky 		if ((flags & C_HARDCLOCK) && (sbt < tick_sbt))
9655b999a6bSDavide Italiano 			sbt = tick_sbt;
966a115fb62SHans Petter Selasky 		if ((flags & C_HARDCLOCK) ||
9675b999a6bSDavide Italiano #ifdef NO_EVENTTIMERS
9685b999a6bSDavide Italiano 		    sbt >= sbt_timethreshold) {
969a115fb62SHans Petter Selasky 			to_sbt = getsbinuptime();
9705b999a6bSDavide Italiano 
9715b999a6bSDavide Italiano 			/* Add safety belt for the case of hz > 1000. */
972a115fb62SHans Petter Selasky 			to_sbt += tc_tick_sbt - tick_sbt;
9735b999a6bSDavide Italiano #else
9745b999a6bSDavide Italiano 		    sbt >= sbt_tickthreshold) {
9755b999a6bSDavide Italiano 			/*
9765b999a6bSDavide Italiano 			 * Obtain the time of the last hardclock() call on
9775b999a6bSDavide Italiano 			 * this CPU directly from the kern_clocksource.c.
9785b999a6bSDavide Italiano 			 * This value is per-CPU, but it is equal for all
9795b999a6bSDavide Italiano 			 * active ones.
9805b999a6bSDavide Italiano 			 */
9815b999a6bSDavide Italiano #ifdef __LP64__
982a115fb62SHans Petter Selasky 			to_sbt = DPCPU_GET(hardclocktime);
9835b999a6bSDavide Italiano #else
9845b999a6bSDavide Italiano 			spinlock_enter();
985a115fb62SHans Petter Selasky 			to_sbt = DPCPU_GET(hardclocktime);
9865b999a6bSDavide Italiano 			spinlock_exit();
9875b999a6bSDavide Italiano #endif
9885b999a6bSDavide Italiano #endif
989a115fb62SHans Petter Selasky 			if ((flags & C_HARDCLOCK) == 0)
990a115fb62SHans Petter Selasky 				to_sbt += tick_sbt;
9915b999a6bSDavide Italiano 		} else
992a115fb62SHans Petter Selasky 			to_sbt = sbinuptime();
993a115fb62SHans Petter Selasky 		if (SBT_MAX - to_sbt < sbt)
994a115fb62SHans Petter Selasky 			to_sbt = SBT_MAX;
9951b0c144fSDavide Italiano 		else
996a115fb62SHans Petter Selasky 			to_sbt += sbt;
997a115fb62SHans Petter Selasky 		pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp :
998a115fb62SHans Petter Selasky 		    sbt >> C_PRELGET(flags));
999a115fb62SHans Petter Selasky 		if (pr > precision)
1000a115fb62SHans Petter Selasky 			precision = pr;
1001a115fb62SHans Petter Selasky 	}
1002a115fb62SHans Petter Selasky 	/*
100366525b2dSRandall Stewart 	 * This flag used to be added by callout_cc_add, but the
100466525b2dSRandall Stewart 	 * first time you call this we could end up with the
100566525b2dSRandall Stewart 	 * wrong direct flag if we don't do it before we add.
100666525b2dSRandall Stewart 	 */
100766525b2dSRandall Stewart 	if (flags & C_DIRECT_EXEC) {
1008*15b1eb14SRandall Stewart 		direct = 1;
1009*15b1eb14SRandall Stewart 	} else {
1010*15b1eb14SRandall Stewart 		direct = 0;
101166525b2dSRandall Stewart 	}
1012a115fb62SHans Petter Selasky 	KASSERT(!direct || c->c_lock == NULL,
1013a115fb62SHans Petter Selasky 	    ("%s: direct callout %p has lock", __func__, c));
1014a115fb62SHans Petter Selasky 	cc = callout_lock(c);
1015*15b1eb14SRandall Stewart 	/*
1016*15b1eb14SRandall Stewart 	 * Don't allow migration of pre-allocated callouts lest they
1017*15b1eb14SRandall Stewart 	 * become unbalanced or handle the case where the user does
1018*15b1eb14SRandall Stewart 	 * not care.
1019*15b1eb14SRandall Stewart 	 */
1020*15b1eb14SRandall Stewart 	if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) ||
1021*15b1eb14SRandall Stewart 	    ignore_cpu) {
1022*15b1eb14SRandall Stewart 		cpu = c->c_cpu;
1023*15b1eb14SRandall Stewart 	}
1024*15b1eb14SRandall Stewart 
1025d2854fa4SRandall Stewart 	if (cc_exec_curr(cc, direct) == c) {
1026a115fb62SHans Petter Selasky 		/*
1027a115fb62SHans Petter Selasky 		 * We're being asked to reschedule a callout which is
1028a115fb62SHans Petter Selasky 		 * currently in progress.  If there is a lock then we
1029a115fb62SHans Petter Selasky 		 * can cancel the callout if it has not really started.
1030a115fb62SHans Petter Selasky 		 */
1031d2854fa4SRandall Stewart 		if (c->c_lock != NULL && cc_exec_cancel(cc, direct))
1032d2854fa4SRandall Stewart 			cancelled = cc_exec_cancel(cc, direct) = true;
1033d2854fa4SRandall Stewart 		if (cc_exec_waiting(cc, direct)) {
1034a115fb62SHans Petter Selasky 			/*
1035a115fb62SHans Petter Selasky 			 * Someone has called callout_drain to kill this
1036a115fb62SHans Petter Selasky 			 * callout.  Don't reschedule.
1037a115fb62SHans Petter Selasky 			 */
1038a115fb62SHans Petter Selasky 			CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
1039a115fb62SHans Petter Selasky 			    cancelled ? "cancelled" : "failed to cancel",
1040a115fb62SHans Petter Selasky 			    c, c->c_func, c->c_arg);
1041a115fb62SHans Petter Selasky 			CC_UNLOCK(cc);
1042a115fb62SHans Petter Selasky 			return (cancelled);
1043a115fb62SHans Petter Selasky 		}
1044d2854fa4SRandall Stewart #ifdef SMP
1045d2854fa4SRandall Stewart 		if (callout_migrating(c)) {
1046d2854fa4SRandall Stewart 			/*
1047d2854fa4SRandall Stewart 			 * This only occurs when a second callout_reset_sbt_on
1048d2854fa4SRandall Stewart 			 * is made after a previous one moved it into
1049d2854fa4SRandall Stewart 			 * deferred migration (below). Note we do *not* change
1050d2854fa4SRandall Stewart 			 * the prev_cpu even though the previous target may
1051d2854fa4SRandall Stewart 			 * be different.
1052d2854fa4SRandall Stewart 			 */
1053d2854fa4SRandall Stewart 			cc_migration_cpu(cc, direct) = cpu;
1054d2854fa4SRandall Stewart 			cc_migration_time(cc, direct) = to_sbt;
1055d2854fa4SRandall Stewart 			cc_migration_prec(cc, direct) = precision;
1056d2854fa4SRandall Stewart 			cc_migration_func(cc, direct) = ftn;
1057d2854fa4SRandall Stewart 			cc_migration_arg(cc, direct) = arg;
1058d2854fa4SRandall Stewart 			cancelled = 1;
1059d2854fa4SRandall Stewart 			CC_UNLOCK(cc);
1060d2854fa4SRandall Stewart 			return (cancelled);
1061d2854fa4SRandall Stewart 		}
1062d2854fa4SRandall Stewart #endif
1063a115fb62SHans Petter Selasky 	}
1064*15b1eb14SRandall Stewart 	if (c->c_iflags & CALLOUT_PENDING) {
1065*15b1eb14SRandall Stewart 		if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
106666525b2dSRandall Stewart 			if (cc_exec_next(cc) == c)
106766525b2dSRandall Stewart 				cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
1068a115fb62SHans Petter Selasky 			LIST_REMOVE(c, c_links.le);
1069*15b1eb14SRandall Stewart 		} else {
1070a115fb62SHans Petter Selasky 			TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1071*15b1eb14SRandall Stewart 		}
1072a115fb62SHans Petter Selasky 		cancelled = 1;
1073*15b1eb14SRandall Stewart 		c->c_iflags &= ~ CALLOUT_PENDING;
1074*15b1eb14SRandall Stewart 		c->c_flags &= ~ CALLOUT_ACTIVE;
10758d809d50SJeff Roberson 	}
10761283e9cdSAttilio Rao 
1077a115fb62SHans Petter Selasky #ifdef SMP
1078a115fb62SHans Petter Selasky 	/*
1079a115fb62SHans Petter Selasky 	 * If the callout must migrate try to perform it immediately.
1080a115fb62SHans Petter Selasky 	 * If the callout is currently running, just defer the migration
1081a115fb62SHans Petter Selasky 	 * to a more appropriate moment.
1082a115fb62SHans Petter Selasky 	 */
1083a115fb62SHans Petter Selasky 	if (c->c_cpu != cpu) {
1084d2854fa4SRandall Stewart 		if (cc_exec_curr(cc, direct) == c) {
1085d2854fa4SRandall Stewart 			/*
1086d2854fa4SRandall Stewart 			 * Pending will have been removed since we are
1087d2854fa4SRandall Stewart 			 * actually executing the callout on another
1088d2854fa4SRandall Stewart 			 * CPU. That callout should be waiting on the
1089d2854fa4SRandall Stewart 			 * lock the caller holds. If we set both
1090d2854fa4SRandall Stewart 			 * active/and/pending after we return and the
1091d2854fa4SRandall Stewart 			 * lock on the executing callout proceeds, it
1092d2854fa4SRandall Stewart 			 * will then see pending is true and return.
1093d2854fa4SRandall Stewart 			 * At the return from the actual callout execution
1094d2854fa4SRandall Stewart 			 * the migration will occur in softclock_call_cc
1095d2854fa4SRandall Stewart 			 * and this new callout will be placed on the
1096d2854fa4SRandall Stewart 			 * new CPU via a call to callout_cpu_switch() which
1097d2854fa4SRandall Stewart 			 * will get the lock on the right CPU followed
1098d2854fa4SRandall Stewart 			 * by a call callout_cc_add() which will add it there.
1099d2854fa4SRandall Stewart 			 * (see above in softclock_call_cc()).
1100d2854fa4SRandall Stewart 			 */
1101d2854fa4SRandall Stewart 			cc_migration_cpu(cc, direct) = cpu;
1102d2854fa4SRandall Stewart 			cc_migration_time(cc, direct) = to_sbt;
1103d2854fa4SRandall Stewart 			cc_migration_prec(cc, direct) = precision;
1104d2854fa4SRandall Stewart 			cc_migration_func(cc, direct) = ftn;
1105d2854fa4SRandall Stewart 			cc_migration_arg(cc, direct) = arg;
1106*15b1eb14SRandall Stewart 			c->c_iflags |= (CALLOUT_DFRMIGRATION | CALLOUT_PENDING);
1107*15b1eb14SRandall Stewart 			c->c_flags |= CALLOUT_ACTIVE;
1108a115fb62SHans Petter Selasky 			CTR6(KTR_CALLOUT,
1109a115fb62SHans Petter Selasky 		    "migration of %p func %p arg %p in %d.%08x to %u deferred",
1110a115fb62SHans Petter Selasky 			    c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
1111a115fb62SHans Petter Selasky 			    (u_int)(to_sbt & 0xffffffff), cpu);
1112a115fb62SHans Petter Selasky 			CC_UNLOCK(cc);
1113a115fb62SHans Petter Selasky 			return (cancelled);
1114a115fb62SHans Petter Selasky 		}
1115a115fb62SHans Petter Selasky 		cc = callout_cpu_switch(c, cc, cpu);
1116a115fb62SHans Petter Selasky 	}
1117a115fb62SHans Petter Selasky #endif
1118a115fb62SHans Petter Selasky 
111966525b2dSRandall Stewart 	callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags);
1120a115fb62SHans Petter Selasky 	CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x",
1121a115fb62SHans Petter Selasky 	    cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
1122a115fb62SHans Petter Selasky 	    (u_int)(to_sbt & 0xffffffff));
1123a115fb62SHans Petter Selasky 	CC_UNLOCK(cc);
1124a115fb62SHans Petter Selasky 
1125a115fb62SHans Petter Selasky 	return (cancelled);
1126acc8326dSGarrett Wollman }
1127acc8326dSGarrett Wollman 
11286e0186d5SSam Leffler /*
11296e0186d5SSam Leffler  * Common idioms that can be optimized in the future.
11306e0186d5SSam Leffler  */
11316e0186d5SSam Leffler int
11326e0186d5SSam Leffler callout_schedule_on(struct callout *c, int to_ticks, int cpu)
11336e0186d5SSam Leffler {
11346e0186d5SSam Leffler 	return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
11356e0186d5SSam Leffler }
11366e0186d5SSam Leffler 
11376e0186d5SSam Leffler int
11386e0186d5SSam Leffler callout_schedule(struct callout *c, int to_ticks)
11396e0186d5SSam Leffler {
11406e0186d5SSam Leffler 	return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
11416e0186d5SSam Leffler }
11426e0186d5SSam Leffler 
11432c1bb207SColin Percival int
1144a115fb62SHans Petter Selasky _callout_stop_safe(struct callout *c, int safe)
11452c1bb207SColin Percival {
1146a115fb62SHans Petter Selasky 	struct callout_cpu *cc, *old_cc;
1147a115fb62SHans Petter Selasky 	struct lock_class *class;
1148a115fb62SHans Petter Selasky 	int direct, sq_locked, use_lock;
1149d2854fa4SRandall Stewart 	int not_on_a_list;
11501283e9cdSAttilio Rao 
11519500dd9fSAdrian Chadd 	if (safe)
11529500dd9fSAdrian Chadd 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, c->c_lock,
11539500dd9fSAdrian Chadd 		    "calling %s", __func__);
11549500dd9fSAdrian Chadd 
11551283e9cdSAttilio Rao 	/*
1156a115fb62SHans Petter Selasky 	 * Some old subsystems don't hold Giant while running a callout_stop(),
1157a115fb62SHans Petter Selasky 	 * so just discard this check for the moment.
11581283e9cdSAttilio Rao 	 */
1159a115fb62SHans Petter Selasky 	if (!safe && c->c_lock != NULL) {
1160a115fb62SHans Petter Selasky 		if (c->c_lock == &Giant.lock_object)
1161a115fb62SHans Petter Selasky 			use_lock = mtx_owned(&Giant);
1162a115fb62SHans Petter Selasky 		else {
1163a115fb62SHans Petter Selasky 			use_lock = 1;
1164a115fb62SHans Petter Selasky 			class = LOCK_CLASS(c->c_lock);
1165a115fb62SHans Petter Selasky 			class->lc_assert(c->c_lock, LA_XLOCKED);
11661283e9cdSAttilio Rao 		}
1167a115fb62SHans Petter Selasky 	} else
1168a115fb62SHans Petter Selasky 		use_lock = 0;
1169*15b1eb14SRandall Stewart 	if (c->c_iflags & CALLOUT_DIRECT) {
1170*15b1eb14SRandall Stewart 		direct = 1;
1171*15b1eb14SRandall Stewart 	} else {
1172*15b1eb14SRandall Stewart 		direct = 0;
1173*15b1eb14SRandall Stewart 	}
1174a115fb62SHans Petter Selasky 	sq_locked = 0;
1175a115fb62SHans Petter Selasky 	old_cc = NULL;
1176a115fb62SHans Petter Selasky again:
1177a115fb62SHans Petter Selasky 	cc = callout_lock(c);
1178a115fb62SHans Petter Selasky 
1179*15b1eb14SRandall Stewart 	if ((c->c_iflags & (CALLOUT_DFRMIGRATION | CALLOUT_PENDING)) ==
1180*15b1eb14SRandall Stewart 	    (CALLOUT_DFRMIGRATION | CALLOUT_PENDING) &&
1181*15b1eb14SRandall Stewart 	    ((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE)) {
1182d2854fa4SRandall Stewart 		/*
1183d2854fa4SRandall Stewart 		 * Special case where this slipped in while we
1184d2854fa4SRandall Stewart 		 * were migrating *as* the callout is about to
1185d2854fa4SRandall Stewart 		 * execute. The caller probably holds the lock
1186d2854fa4SRandall Stewart 		 * the callout wants.
1187d2854fa4SRandall Stewart 		 *
1188d2854fa4SRandall Stewart 		 * Get rid of the migration first. Then set
1189d2854fa4SRandall Stewart 		 * the flag that tells this code *not* to
1190d2854fa4SRandall Stewart 		 * try to remove it from any lists (its not
1191d2854fa4SRandall Stewart 		 * on one yet). When the callout wheel runs,
1192d2854fa4SRandall Stewart 		 * it will ignore this callout.
1193d2854fa4SRandall Stewart 		 */
1194*15b1eb14SRandall Stewart 		c->c_iflags &= ~CALLOUT_PENDING;
1195*15b1eb14SRandall Stewart 		c->c_flags &= ~CALLOUT_ACTIVE;
1196d2854fa4SRandall Stewart 		not_on_a_list = 1;
1197d2854fa4SRandall Stewart 	} else {
1198d2854fa4SRandall Stewart 		not_on_a_list = 0;
1199d2854fa4SRandall Stewart 	}
1200d2854fa4SRandall Stewart 
1201a115fb62SHans Petter Selasky 	/*
1202a115fb62SHans Petter Selasky 	 * If the callout was migrating while the callout cpu lock was
1203a115fb62SHans Petter Selasky 	 * dropped,  just drop the sleepqueue lock and check the states
1204a115fb62SHans Petter Selasky 	 * again.
1205a115fb62SHans Petter Selasky 	 */
1206a115fb62SHans Petter Selasky 	if (sq_locked != 0 && cc != old_cc) {
1207a115fb62SHans Petter Selasky #ifdef SMP
1208a115fb62SHans Petter Selasky 		CC_UNLOCK(cc);
1209d2854fa4SRandall Stewart 		sleepq_release(&cc_exec_waiting(old_cc, direct));
1210a115fb62SHans Petter Selasky 		sq_locked = 0;
1211a115fb62SHans Petter Selasky 		old_cc = NULL;
1212a115fb62SHans Petter Selasky 		goto again;
1213a115fb62SHans Petter Selasky #else
1214a115fb62SHans Petter Selasky 		panic("migration should not happen");
1215a115fb62SHans Petter Selasky #endif
1216a115fb62SHans Petter Selasky 	}
1217a115fb62SHans Petter Selasky 
1218a115fb62SHans Petter Selasky 	/*
1219a115fb62SHans Petter Selasky 	 * If the callout isn't pending, it's not on the queue, so
1220a115fb62SHans Petter Selasky 	 * don't attempt to remove it from the queue.  We can try to
1221a115fb62SHans Petter Selasky 	 * stop it by other means however.
1222a115fb62SHans Petter Selasky 	 */
1223*15b1eb14SRandall Stewart 	if (!(c->c_iflags & CALLOUT_PENDING)) {
1224a115fb62SHans Petter Selasky 		c->c_flags &= ~CALLOUT_ACTIVE;
1225a115fb62SHans Petter Selasky 
1226a115fb62SHans Petter Selasky 		/*
1227a115fb62SHans Petter Selasky 		 * If it wasn't on the queue and it isn't the current
1228a115fb62SHans Petter Selasky 		 * callout, then we can't stop it, so just bail.
1229a115fb62SHans Petter Selasky 		 */
1230d2854fa4SRandall Stewart 		if (cc_exec_curr(cc, direct) != c) {
1231a115fb62SHans Petter Selasky 			CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1232a115fb62SHans Petter Selasky 			    c, c->c_func, c->c_arg);
1233a115fb62SHans Petter Selasky 			CC_UNLOCK(cc);
1234a115fb62SHans Petter Selasky 			if (sq_locked)
1235d2854fa4SRandall Stewart 				sleepq_release(&cc_exec_waiting(cc, direct));
1236a115fb62SHans Petter Selasky 			return (0);
1237a115fb62SHans Petter Selasky 		}
1238a115fb62SHans Petter Selasky 
1239a115fb62SHans Petter Selasky 		if (safe) {
1240a115fb62SHans Petter Selasky 			/*
1241a115fb62SHans Petter Selasky 			 * The current callout is running (or just
1242a115fb62SHans Petter Selasky 			 * about to run) and blocking is allowed, so
1243a115fb62SHans Petter Selasky 			 * just wait for the current invocation to
1244a115fb62SHans Petter Selasky 			 * finish.
1245a115fb62SHans Petter Selasky 			 */
1246d2854fa4SRandall Stewart 			while (cc_exec_curr(cc, direct) == c) {
1247a115fb62SHans Petter Selasky 				/*
1248a115fb62SHans Petter Selasky 				 * Use direct calls to sleepqueue interface
1249a115fb62SHans Petter Selasky 				 * instead of cv/msleep in order to avoid
1250a115fb62SHans Petter Selasky 				 * a LOR between cc_lock and sleepqueue
1251a115fb62SHans Petter Selasky 				 * chain spinlocks.  This piece of code
1252a115fb62SHans Petter Selasky 				 * emulates a msleep_spin() call actually.
1253a115fb62SHans Petter Selasky 				 *
1254a115fb62SHans Petter Selasky 				 * If we already have the sleepqueue chain
1255a115fb62SHans Petter Selasky 				 * locked, then we can safely block.  If we
1256a115fb62SHans Petter Selasky 				 * don't already have it locked, however,
1257a115fb62SHans Petter Selasky 				 * we have to drop the cc_lock to lock
1258a115fb62SHans Petter Selasky 				 * it.  This opens several races, so we
1259a115fb62SHans Petter Selasky 				 * restart at the beginning once we have
1260a115fb62SHans Petter Selasky 				 * both locks.  If nothing has changed, then
1261a115fb62SHans Petter Selasky 				 * we will end up back here with sq_locked
1262a115fb62SHans Petter Selasky 				 * set.
1263a115fb62SHans Petter Selasky 				 */
1264a115fb62SHans Petter Selasky 				if (!sq_locked) {
1265a115fb62SHans Petter Selasky 					CC_UNLOCK(cc);
1266a115fb62SHans Petter Selasky 					sleepq_lock(
1267d2854fa4SRandall Stewart 					    &cc_exec_waiting(cc, direct));
1268a115fb62SHans Petter Selasky 					sq_locked = 1;
1269a115fb62SHans Petter Selasky 					old_cc = cc;
1270a115fb62SHans Petter Selasky 					goto again;
1271a115fb62SHans Petter Selasky 				}
1272a115fb62SHans Petter Selasky 
1273a115fb62SHans Petter Selasky 				/*
1274a115fb62SHans Petter Selasky 				 * Migration could be cancelled here, but
1275a115fb62SHans Petter Selasky 				 * as long as it is still not sure when it
1276a115fb62SHans Petter Selasky 				 * will be packed up, just let softclock()
1277a115fb62SHans Petter Selasky 				 * take care of it.
1278a115fb62SHans Petter Selasky 				 */
1279d2854fa4SRandall Stewart 				cc_exec_waiting(cc, direct) = true;
1280a115fb62SHans Petter Selasky 				DROP_GIANT();
1281a115fb62SHans Petter Selasky 				CC_UNLOCK(cc);
1282a115fb62SHans Petter Selasky 				sleepq_add(
1283d2854fa4SRandall Stewart 				    &cc_exec_waiting(cc, direct),
1284a115fb62SHans Petter Selasky 				    &cc->cc_lock.lock_object, "codrain",
1285a115fb62SHans Petter Selasky 				    SLEEPQ_SLEEP, 0);
1286a115fb62SHans Petter Selasky 				sleepq_wait(
1287d2854fa4SRandall Stewart 				    &cc_exec_waiting(cc, direct),
1288a115fb62SHans Petter Selasky 					     0);
1289a115fb62SHans Petter Selasky 				sq_locked = 0;
1290a115fb62SHans Petter Selasky 				old_cc = NULL;
1291a115fb62SHans Petter Selasky 
1292a115fb62SHans Petter Selasky 				/* Reacquire locks previously released. */
1293a115fb62SHans Petter Selasky 				PICKUP_GIANT();
1294a115fb62SHans Petter Selasky 				CC_LOCK(cc);
1295a115fb62SHans Petter Selasky 			}
1296a115fb62SHans Petter Selasky 		} else if (use_lock &&
1297d2854fa4SRandall Stewart 			   !cc_exec_cancel(cc, direct)) {
1298d2854fa4SRandall Stewart 
1299a115fb62SHans Petter Selasky 			/*
1300a115fb62SHans Petter Selasky 			 * The current callout is waiting for its
1301a115fb62SHans Petter Selasky 			 * lock which we hold.  Cancel the callout
1302a115fb62SHans Petter Selasky 			 * and return.  After our caller drops the
1303a115fb62SHans Petter Selasky 			 * lock, the callout will be skipped in
1304a115fb62SHans Petter Selasky 			 * softclock().
1305a115fb62SHans Petter Selasky 			 */
1306d2854fa4SRandall Stewart 			cc_exec_cancel(cc, direct) = true;
1307a115fb62SHans Petter Selasky 			CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
1308a115fb62SHans Petter Selasky 			    c, c->c_func, c->c_arg);
1309a115fb62SHans Petter Selasky 			KASSERT(!cc_cce_migrating(cc, direct),
1310a115fb62SHans Petter Selasky 			    ("callout wrongly scheduled for migration"));
1311*15b1eb14SRandall Stewart 			if (callout_migrating(c)) {
1312*15b1eb14SRandall Stewart 				c->c_iflags &= ~CALLOUT_DFRMIGRATION;
1313*15b1eb14SRandall Stewart #ifdef SMP
1314*15b1eb14SRandall Stewart 				cc_migration_cpu(cc, direct) = CPUBLOCK;
1315*15b1eb14SRandall Stewart 				cc_migration_time(cc, direct) = 0;
1316*15b1eb14SRandall Stewart 				cc_migration_prec(cc, direct) = 0;
1317*15b1eb14SRandall Stewart 				cc_migration_func(cc, direct) = NULL;
1318*15b1eb14SRandall Stewart 				cc_migration_arg(cc, direct) = NULL;
1319*15b1eb14SRandall Stewart #endif
1320*15b1eb14SRandall Stewart 			}
1321a115fb62SHans Petter Selasky 			CC_UNLOCK(cc);
1322a115fb62SHans Petter Selasky 			KASSERT(!sq_locked, ("sleepqueue chain locked"));
1323a115fb62SHans Petter Selasky 			return (1);
1324d2854fa4SRandall Stewart 		} else if (callout_migrating(c)) {
1325d2854fa4SRandall Stewart 			/*
1326d2854fa4SRandall Stewart 			 * The callout is currently being serviced
1327d2854fa4SRandall Stewart 			 * and the "next" callout is scheduled at
1328d2854fa4SRandall Stewart 			 * its completion with a migration. We remove
1329d2854fa4SRandall Stewart 			 * the migration flag so it *won't* get rescheduled,
1330d2854fa4SRandall Stewart 			 * but we can't stop the one thats running so
1331d2854fa4SRandall Stewart 			 * we return 0.
1332d2854fa4SRandall Stewart 			 */
1333*15b1eb14SRandall Stewart 			c->c_iflags &= ~CALLOUT_DFRMIGRATION;
1334d2854fa4SRandall Stewart #ifdef SMP
1335d2854fa4SRandall Stewart 			/*
1336d2854fa4SRandall Stewart 			 * We can't call cc_cce_cleanup here since
1337d2854fa4SRandall Stewart 			 * if we do it will remove .ce_curr and
1338d2854fa4SRandall Stewart 			 * its still running. This will prevent a
1339d2854fa4SRandall Stewart 			 * reschedule of the callout when the
1340d2854fa4SRandall Stewart 			 * execution completes.
1341d2854fa4SRandall Stewart 			 */
1342d2854fa4SRandall Stewart 			cc_migration_cpu(cc, direct) = CPUBLOCK;
1343d2854fa4SRandall Stewart 			cc_migration_time(cc, direct) = 0;
1344d2854fa4SRandall Stewart 			cc_migration_prec(cc, direct) = 0;
1345d2854fa4SRandall Stewart 			cc_migration_func(cc, direct) = NULL;
1346d2854fa4SRandall Stewart 			cc_migration_arg(cc, direct) = NULL;
1347d2854fa4SRandall Stewart #endif
1348a115fb62SHans Petter Selasky 			CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
1349a115fb62SHans Petter Selasky 			    c, c->c_func, c->c_arg);
1350a115fb62SHans Petter Selasky 			CC_UNLOCK(cc);
1351d2854fa4SRandall Stewart 			return (0);
1352a115fb62SHans Petter Selasky 		}
1353a115fb62SHans Petter Selasky 		CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1354a115fb62SHans Petter Selasky 		    c, c->c_func, c->c_arg);
1355a115fb62SHans Petter Selasky 		CC_UNLOCK(cc);
1356a115fb62SHans Petter Selasky 		KASSERT(!sq_locked, ("sleepqueue chain still locked"));
1357a115fb62SHans Petter Selasky 		return (0);
1358a115fb62SHans Petter Selasky 	}
1359a115fb62SHans Petter Selasky 	if (sq_locked)
1360d2854fa4SRandall Stewart 		sleepq_release(&cc_exec_waiting(cc, direct));
1361a115fb62SHans Petter Selasky 
1362*15b1eb14SRandall Stewart 	c->c_iflags &= ~CALLOUT_PENDING;
1363*15b1eb14SRandall Stewart 	c->c_flags &= ~CALLOUT_ACTIVE;
13641283e9cdSAttilio Rao 
136568a57ebfSGleb Smirnoff 	CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
136668a57ebfSGleb Smirnoff 	    c, c->c_func, c->c_arg);
1367d2854fa4SRandall Stewart 	if (not_on_a_list == 0) {
1368*15b1eb14SRandall Stewart 		if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
136966525b2dSRandall Stewart 			if (cc_exec_next(cc) == c)
137066525b2dSRandall Stewart 				cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
1371a115fb62SHans Petter Selasky 			LIST_REMOVE(c, c_links.le);
1372*15b1eb14SRandall Stewart 		} else {
1373a115fb62SHans Petter Selasky 			TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1374d2854fa4SRandall Stewart 		}
1375*15b1eb14SRandall Stewart 	}
1376a115fb62SHans Petter Selasky 	callout_cc_del(c, cc);
1377a115fb62SHans Petter Selasky 	CC_UNLOCK(cc);
1378a115fb62SHans Petter Selasky 	return (1);
1379acc8326dSGarrett Wollman }
1380acc8326dSGarrett Wollman 
1381acc8326dSGarrett Wollman void
1382e392e44cSDavide Italiano callout_init(struct callout *c, int mpsafe)
1383acc8326dSGarrett Wollman {
1384a115fb62SHans Petter Selasky 	bzero(c, sizeof *c);
138598c926b2SIan Dowse 	if (mpsafe) {
1386a115fb62SHans Petter Selasky 		c->c_lock = NULL;
1387*15b1eb14SRandall Stewart 		c->c_iflags = CALLOUT_RETURNUNLOCKED;
138898c926b2SIan Dowse 	} else {
1389a115fb62SHans Petter Selasky 		c->c_lock = &Giant.lock_object;
1390*15b1eb14SRandall Stewart 		c->c_iflags = 0;
139198c926b2SIan Dowse 	}
1392a115fb62SHans Petter Selasky 	c->c_cpu = timeout_cpu;
139398c926b2SIan Dowse }
139498c926b2SIan Dowse 
139598c926b2SIan Dowse void
1396e392e44cSDavide Italiano _callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
139798c926b2SIan Dowse {
139898c926b2SIan Dowse 	bzero(c, sizeof *c);
139964b9ee20SAttilio Rao 	c->c_lock = lock;
1400a115fb62SHans Petter Selasky 	KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
1401a115fb62SHans Petter Selasky 	    ("callout_init_lock: bad flags %d", flags));
1402a115fb62SHans Petter Selasky 	KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
1403a115fb62SHans Petter Selasky 	    ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
1404a115fb62SHans Petter Selasky 	KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
1405a115fb62SHans Petter Selasky 	    (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
1406a115fb62SHans Petter Selasky 	    __func__));
1407*15b1eb14SRandall Stewart 	c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
14088d809d50SJeff Roberson 	c->c_cpu = timeout_cpu;
1409acc8326dSGarrett Wollman }
1410acc8326dSGarrett Wollman 
1411e1d6dc65SNate Williams #ifdef APM_FIXUP_CALLTODO
1412e1d6dc65SNate Williams /*
1413e1d6dc65SNate Williams  * Adjust the kernel calltodo timeout list.  This routine is used after
1414e1d6dc65SNate Williams  * an APM resume to recalculate the calltodo timer list values with the
1415e1d6dc65SNate Williams  * number of hz's we have been sleeping.  The next hardclock() will detect
1416e1d6dc65SNate Williams  * that there are fired timers and run softclock() to execute them.
1417e1d6dc65SNate Williams  *
1418e1d6dc65SNate Williams  * Please note, I have not done an exhaustive analysis of what code this
1419e1d6dc65SNate Williams  * might break.  I am motivated to have my select()'s and alarm()'s that
1420e1d6dc65SNate Williams  * have expired during suspend firing upon resume so that the applications
1421e1d6dc65SNate Williams  * which set the timer can do the maintanence the timer was for as close
1422e1d6dc65SNate Williams  * as possible to the originally intended time.  Testing this code for a
1423e1d6dc65SNate Williams  * week showed that resuming from a suspend resulted in 22 to 25 timers
1424e1d6dc65SNate Williams  * firing, which seemed independant on whether the suspend was 2 hours or
1425e1d6dc65SNate Williams  * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
1426e1d6dc65SNate Williams  */
1427e1d6dc65SNate Williams void
1428e392e44cSDavide Italiano adjust_timeout_calltodo(struct timeval *time_change)
1429e1d6dc65SNate Williams {
1430e1d6dc65SNate Williams 	register struct callout *p;
1431e1d6dc65SNate Williams 	unsigned long delta_ticks;
1432e1d6dc65SNate Williams 
1433e1d6dc65SNate Williams 	/*
1434e1d6dc65SNate Williams 	 * How many ticks were we asleep?
1435c8b47828SBruce Evans 	 * (stolen from tvtohz()).
1436e1d6dc65SNate Williams 	 */
1437e1d6dc65SNate Williams 
1438e1d6dc65SNate Williams 	/* Don't do anything */
1439e1d6dc65SNate Williams 	if (time_change->tv_sec < 0)
1440e1d6dc65SNate Williams 		return;
1441e1d6dc65SNate Williams 	else if (time_change->tv_sec <= LONG_MAX / 1000000)
1442e1d6dc65SNate Williams 		delta_ticks = (time_change->tv_sec * 1000000 +
1443e1d6dc65SNate Williams 			       time_change->tv_usec + (tick - 1)) / tick + 1;
1444e1d6dc65SNate Williams 	else if (time_change->tv_sec <= LONG_MAX / hz)
1445e1d6dc65SNate Williams 		delta_ticks = time_change->tv_sec * hz +
1446e1d6dc65SNate Williams 			      (time_change->tv_usec + (tick - 1)) / tick + 1;
1447e1d6dc65SNate Williams 	else
1448e1d6dc65SNate Williams 		delta_ticks = LONG_MAX;
1449e1d6dc65SNate Williams 
1450e1d6dc65SNate Williams 	if (delta_ticks > INT_MAX)
1451e1d6dc65SNate Williams 		delta_ticks = INT_MAX;
1452e1d6dc65SNate Williams 
1453e1d6dc65SNate Williams 	/*
1454e1d6dc65SNate Williams 	 * Now rip through the timer calltodo list looking for timers
1455e1d6dc65SNate Williams 	 * to expire.
1456e1d6dc65SNate Williams 	 */
1457e1d6dc65SNate Williams 
1458e1d6dc65SNate Williams 	/* don't collide with softclock() */
14598d809d50SJeff Roberson 	CC_LOCK(cc);
1460e1d6dc65SNate Williams 	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
1461e1d6dc65SNate Williams 		p->c_time -= delta_ticks;
1462e1d6dc65SNate Williams 
1463e1d6dc65SNate Williams 		/* Break if the timer had more time on it than delta_ticks */
1464e1d6dc65SNate Williams 		if (p->c_time > 0)
1465e1d6dc65SNate Williams 			break;
1466e1d6dc65SNate Williams 
1467e1d6dc65SNate Williams 		/* take back the ticks the timer didn't use (p->c_time <= 0) */
1468e1d6dc65SNate Williams 		delta_ticks = -p->c_time;
1469e1d6dc65SNate Williams 	}
14708d809d50SJeff Roberson 	CC_UNLOCK(cc);
1471e1d6dc65SNate Williams 
1472e1d6dc65SNate Williams 	return;
1473e1d6dc65SNate Williams }
1474e1d6dc65SNate Williams #endif /* APM_FIXUP_CALLTODO */
14755b999a6bSDavide Italiano 
14765b999a6bSDavide Italiano static int
14775b999a6bSDavide Italiano flssbt(sbintime_t sbt)
14785b999a6bSDavide Italiano {
14795b999a6bSDavide Italiano 
14805b999a6bSDavide Italiano 	sbt += (uint64_t)sbt >> 1;
14815b999a6bSDavide Italiano 	if (sizeof(long) >= sizeof(sbintime_t))
14825b999a6bSDavide Italiano 		return (flsl(sbt));
14835b999a6bSDavide Italiano 	if (sbt >= SBT_1S)
14845b999a6bSDavide Italiano 		return (flsl(((uint64_t)sbt) >> 32) + 32);
14855b999a6bSDavide Italiano 	return (flsl(sbt));
14865b999a6bSDavide Italiano }
14875b999a6bSDavide Italiano 
14885b999a6bSDavide Italiano /*
14895b999a6bSDavide Italiano  * Dump immediate statistic snapshot of the scheduled callouts.
14905b999a6bSDavide Italiano  */
14915b999a6bSDavide Italiano static int
14925b999a6bSDavide Italiano sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS)
14935b999a6bSDavide Italiano {
14945b999a6bSDavide Italiano 	struct callout *tmp;
14955b999a6bSDavide Italiano 	struct callout_cpu *cc;
14965b999a6bSDavide Italiano 	struct callout_list *sc;
14975b999a6bSDavide Italiano 	sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t;
14985b999a6bSDavide Italiano 	int ct[64], cpr[64], ccpbk[32];
14995b999a6bSDavide Italiano 	int error, val, i, count, tcum, pcum, maxc, c, medc;
15005b999a6bSDavide Italiano #ifdef SMP
15015b999a6bSDavide Italiano 	int cpu;
15025b999a6bSDavide Italiano #endif
15035b999a6bSDavide Italiano 
15045b999a6bSDavide Italiano 	val = 0;
15055b999a6bSDavide Italiano 	error = sysctl_handle_int(oidp, &val, 0, req);
15065b999a6bSDavide Italiano 	if (error != 0 || req->newptr == NULL)
15075b999a6bSDavide Italiano 		return (error);
15085b999a6bSDavide Italiano 	count = maxc = 0;
15095b999a6bSDavide Italiano 	st = spr = maxt = maxpr = 0;
15105b999a6bSDavide Italiano 	bzero(ccpbk, sizeof(ccpbk));
15115b999a6bSDavide Italiano 	bzero(ct, sizeof(ct));
15125b999a6bSDavide Italiano 	bzero(cpr, sizeof(cpr));
15135b999a6bSDavide Italiano 	now = sbinuptime();
15145b999a6bSDavide Italiano #ifdef SMP
15155b999a6bSDavide Italiano 	CPU_FOREACH(cpu) {
15165b999a6bSDavide Italiano 		cc = CC_CPU(cpu);
15175b999a6bSDavide Italiano #else
15185b999a6bSDavide Italiano 		cc = CC_CPU(timeout_cpu);
15195b999a6bSDavide Italiano #endif
15205b999a6bSDavide Italiano 		CC_LOCK(cc);
15215b999a6bSDavide Italiano 		for (i = 0; i < callwheelsize; i++) {
15225b999a6bSDavide Italiano 			sc = &cc->cc_callwheel[i];
15235b999a6bSDavide Italiano 			c = 0;
15245b999a6bSDavide Italiano 			LIST_FOREACH(tmp, sc, c_links.le) {
15255b999a6bSDavide Italiano 				c++;
15265b999a6bSDavide Italiano 				t = tmp->c_time - now;
15275b999a6bSDavide Italiano 				if (t < 0)
15285b999a6bSDavide Italiano 					t = 0;
15295b999a6bSDavide Italiano 				st += t / SBT_1US;
15305b999a6bSDavide Italiano 				spr += tmp->c_precision / SBT_1US;
15315b999a6bSDavide Italiano 				if (t > maxt)
15325b999a6bSDavide Italiano 					maxt = t;
15335b999a6bSDavide Italiano 				if (tmp->c_precision > maxpr)
15345b999a6bSDavide Italiano 					maxpr = tmp->c_precision;
15355b999a6bSDavide Italiano 				ct[flssbt(t)]++;
15365b999a6bSDavide Italiano 				cpr[flssbt(tmp->c_precision)]++;
15375b999a6bSDavide Italiano 			}
15385b999a6bSDavide Italiano 			if (c > maxc)
15395b999a6bSDavide Italiano 				maxc = c;
15405b999a6bSDavide Italiano 			ccpbk[fls(c + c / 2)]++;
15415b999a6bSDavide Italiano 			count += c;
15425b999a6bSDavide Italiano 		}
15435b999a6bSDavide Italiano 		CC_UNLOCK(cc);
15445b999a6bSDavide Italiano #ifdef SMP
15455b999a6bSDavide Italiano 	}
15465b999a6bSDavide Italiano #endif
15475b999a6bSDavide Italiano 
15485b999a6bSDavide Italiano 	for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++)
15495b999a6bSDavide Italiano 		tcum += ct[i];
15505b999a6bSDavide Italiano 	medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
15515b999a6bSDavide Italiano 	for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++)
15525b999a6bSDavide Italiano 		pcum += cpr[i];
15535b999a6bSDavide Italiano 	medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
15545b999a6bSDavide Italiano 	for (i = 0, c = 0; i < 32 && c < count / 2; i++)
15555b999a6bSDavide Italiano 		c += ccpbk[i];
15565b999a6bSDavide Italiano 	medc = (i >= 2) ? (1 << (i - 2)) : 0;
15575b999a6bSDavide Italiano 
15585b999a6bSDavide Italiano 	printf("Scheduled callouts statistic snapshot:\n");
15595b999a6bSDavide Italiano 	printf("  Callouts: %6d  Buckets: %6d*%-3d  Bucket size: 0.%06ds\n",
15605b999a6bSDavide Italiano 	    count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT);
15615b999a6bSDavide Italiano 	printf("  C/Bk: med %5d         avg %6d.%06jd  max %6d\n",
15625b999a6bSDavide Italiano 	    medc,
15635b999a6bSDavide Italiano 	    count / callwheelsize / mp_ncpus,
15645b999a6bSDavide Italiano 	    (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000,
15655b999a6bSDavide Italiano 	    maxc);
15665b999a6bSDavide Italiano 	printf("  Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
15675b999a6bSDavide Italiano 	    medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32,
15685b999a6bSDavide Italiano 	    (st / count) / 1000000, (st / count) % 1000000,
15695b999a6bSDavide Italiano 	    maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32);
15705b999a6bSDavide Italiano 	printf("  Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
15715b999a6bSDavide Italiano 	    medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32,
15725b999a6bSDavide Italiano 	    (spr / count) / 1000000, (spr / count) % 1000000,
15735b999a6bSDavide Italiano 	    maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32);
15745b999a6bSDavide Italiano 	printf("  Distribution:       \tbuckets\t   time\t   tcum\t"
15755b999a6bSDavide Italiano 	    "   prec\t   pcum\n");
15765b999a6bSDavide Italiano 	for (i = 0, tcum = pcum = 0; i < 64; i++) {
15775b999a6bSDavide Italiano 		if (ct[i] == 0 && cpr[i] == 0)
15785b999a6bSDavide Italiano 			continue;
15795b999a6bSDavide Italiano 		t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0;
15805b999a6bSDavide Italiano 		tcum += ct[i];
15815b999a6bSDavide Italiano 		pcum += cpr[i];
15825b999a6bSDavide Italiano 		printf("  %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n",
15835b999a6bSDavide Italiano 		    t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32,
15845b999a6bSDavide Italiano 		    i - 1 - (32 - CC_HASH_SHIFT),
15855b999a6bSDavide Italiano 		    ct[i], tcum, cpr[i], pcum);
15865b999a6bSDavide Italiano 	}
15875b999a6bSDavide Italiano 	return (error);
15885b999a6bSDavide Italiano }
15895b999a6bSDavide Italiano SYSCTL_PROC(_kern, OID_AUTO, callout_stat,
15905b999a6bSDavide Italiano     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
15915b999a6bSDavide Italiano     0, 0, sysctl_kern_callout_stat, "I",
15925b999a6bSDavide Italiano     "Dump immediate statistic snapshot of the scheduled callouts");
1593