1df8bae1dSRodney W. Grimes /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1982, 1986, 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * (c) UNIX System Laboratories, Inc. 5df8bae1dSRodney W. Grimes * All or some portions of this file are derived from material licensed 6df8bae1dSRodney W. Grimes * to the University of California by American Telephone and Telegraph 7df8bae1dSRodney W. Grimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8df8bae1dSRodney W. Grimes * the permission of UNIX System Laboratories, Inc. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 19df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 20df8bae1dSRodney W. Grimes * without specific prior written permission. 21df8bae1dSRodney W. Grimes * 22df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32df8bae1dSRodney W. Grimes * SUCH DAMAGE. 33df8bae1dSRodney W. Grimes * 34acc8326dSGarrett Wollman * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35df8bae1dSRodney W. Grimes */ 36df8bae1dSRodney W. Grimes 37677b542eSDavid E. O'Brien #include <sys/cdefs.h> 38677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 39677b542eSDavid E. O'Brien 4091dd9aaeSRobert Watson #include "opt_kdtrace.h" 4191dd9aaeSRobert Watson 42df8bae1dSRodney W. Grimes #include <sys/param.h> 43df8bae1dSRodney W. Grimes #include <sys/systm.h> 448d809d50SJeff Roberson #include <sys/bus.h> 4515b7a470SPoul-Henning Kamp #include <sys/callout.h> 462c1bb207SColin Percival #include <sys/condvar.h> 478d809d50SJeff Roberson #include <sys/interrupt.h> 48df8bae1dSRodney W. Grimes #include <sys/kernel.h> 49ff7ec58aSRobert Watson #include <sys/ktr.h> 50f34fa851SJohn Baldwin #include <sys/lock.h> 518d809d50SJeff Roberson #include <sys/malloc.h> 52cb799bfeSJohn Baldwin #include <sys/mutex.h> 5321f9e816SJohn Baldwin #include <sys/proc.h> 5491dd9aaeSRobert Watson #include <sys/sdt.h> 556a0ce57dSAttilio Rao #include <sys/sleepqueue.h> 5622ee8c4fSPoul-Henning Kamp #include <sys/sysctl.h> 578d809d50SJeff Roberson #include <sys/smp.h> 58df8bae1dSRodney W. Grimes 591283e9cdSAttilio Rao #ifdef SMP 601283e9cdSAttilio Rao #include <machine/cpu.h> 611283e9cdSAttilio Rao #endif 621283e9cdSAttilio Rao 6391dd9aaeSRobert Watson SDT_PROVIDER_DEFINE(callout_execute); 6479856499SRui Paulo SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start); 6591dd9aaeSRobert Watson SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0, 6691dd9aaeSRobert Watson "struct callout *"); 6779856499SRui Paulo SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end); 6891dd9aaeSRobert Watson SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0, 6991dd9aaeSRobert Watson "struct callout *"); 7091dd9aaeSRobert Watson 7122ee8c4fSPoul-Henning Kamp static int avg_depth; 7222ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 7322ee8c4fSPoul-Henning Kamp "Average number of items examined per softclock call. Units = 1/1000"); 7422ee8c4fSPoul-Henning Kamp static int avg_gcalls; 7522ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 7622ee8c4fSPoul-Henning Kamp "Average number of Giant callouts made per softclock call. Units = 1/1000"); 7764b9ee20SAttilio Rao static int avg_lockcalls; 7864b9ee20SAttilio Rao SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 7964b9ee20SAttilio Rao "Average number of lock callouts made per softclock call. Units = 1/1000"); 8022ee8c4fSPoul-Henning Kamp static int avg_mpcalls; 8122ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 8222ee8c4fSPoul-Henning Kamp "Average number of MP callouts made per softclock call. Units = 1/1000"); 8315b7a470SPoul-Henning Kamp /* 8415b7a470SPoul-Henning Kamp * TODO: 8515b7a470SPoul-Henning Kamp * allocate more timeout table slots when table overflows. 8615b7a470SPoul-Henning Kamp */ 87ab36c067SJustin T. Gibbs int callwheelsize, callwheelbits, callwheelmask; 88f23b4c91SGarrett Wollman 8920c510f8SLuigi Rizzo /* 901283e9cdSAttilio Rao * The callout cpu migration entity represents informations necessary for 911283e9cdSAttilio Rao * describing the migrating callout to the new callout cpu. 921283e9cdSAttilio Rao * The cached informations are very important for deferring migration when 931283e9cdSAttilio Rao * the migrating callout is already running. 941283e9cdSAttilio Rao */ 951283e9cdSAttilio Rao struct cc_mig_ent { 961283e9cdSAttilio Rao #ifdef SMP 971283e9cdSAttilio Rao void (*ce_migration_func)(void *); 981283e9cdSAttilio Rao void *ce_migration_arg; 991283e9cdSAttilio Rao int ce_migration_cpu; 1001283e9cdSAttilio Rao int ce_migration_ticks; 1011283e9cdSAttilio Rao #endif 1021283e9cdSAttilio Rao }; 1031283e9cdSAttilio Rao 1041283e9cdSAttilio Rao /* 10520c510f8SLuigi Rizzo * There is one struct callout_cpu per cpu, holding all relevant 10620c510f8SLuigi Rizzo * state for the callout processing thread on the individual CPU. 10720c510f8SLuigi Rizzo * In particular: 10820c510f8SLuigi Rizzo * cc_ticks is incremented once per tick in callout_cpu(). 10920c510f8SLuigi Rizzo * It tracks the global 'ticks' but in a way that the individual 11020c510f8SLuigi Rizzo * threads should not worry about races in the order in which 11120c510f8SLuigi Rizzo * hardclock() and hardclock_cpu() run on the various CPUs. 11220c510f8SLuigi Rizzo * cc_softclock is advanced in callout_cpu() to point to the 11320c510f8SLuigi Rizzo * first entry in cc_callwheel that may need handling. In turn, 11420c510f8SLuigi Rizzo * a softclock() is scheduled so it can serve the various entries i 11520c510f8SLuigi Rizzo * such that cc_softclock <= i <= cc_ticks . 11620c510f8SLuigi Rizzo * XXX maybe cc_softclock and cc_ticks should be volatile ? 11720c510f8SLuigi Rizzo * 11820c510f8SLuigi Rizzo * cc_ticks is also used in callout_reset_cpu() to determine 11920c510f8SLuigi Rizzo * when the callout should be served. 12020c510f8SLuigi Rizzo */ 1218d809d50SJeff Roberson struct callout_cpu { 1221283e9cdSAttilio Rao struct cc_mig_ent cc_migrating_entity; 1238d809d50SJeff Roberson struct mtx cc_lock; 1248d809d50SJeff Roberson struct callout *cc_callout; 1258d809d50SJeff Roberson struct callout_tailq *cc_callwheel; 1268d809d50SJeff Roberson struct callout_list cc_callfree; 1278d809d50SJeff Roberson struct callout *cc_next; 1288d809d50SJeff Roberson struct callout *cc_curr; 1298d809d50SJeff Roberson void *cc_cookie; 13020c510f8SLuigi Rizzo int cc_ticks; 1318d809d50SJeff Roberson int cc_softticks; 1328d809d50SJeff Roberson int cc_cancel; 1338d809d50SJeff Roberson int cc_waiting; 134a157e425SAlexander Motin int cc_firsttick; 1358d809d50SJeff Roberson }; 1368d809d50SJeff Roberson 1378d809d50SJeff Roberson #ifdef SMP 1381283e9cdSAttilio Rao #define cc_migration_func cc_migrating_entity.ce_migration_func 1391283e9cdSAttilio Rao #define cc_migration_arg cc_migrating_entity.ce_migration_arg 1401283e9cdSAttilio Rao #define cc_migration_cpu cc_migrating_entity.ce_migration_cpu 1411283e9cdSAttilio Rao #define cc_migration_ticks cc_migrating_entity.ce_migration_ticks 1421283e9cdSAttilio Rao 1438d809d50SJeff Roberson struct callout_cpu cc_cpu[MAXCPU]; 1441283e9cdSAttilio Rao #define CPUBLOCK MAXCPU 1458d809d50SJeff Roberson #define CC_CPU(cpu) (&cc_cpu[(cpu)]) 1468d809d50SJeff Roberson #define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 1478d809d50SJeff Roberson #else 1488d809d50SJeff Roberson struct callout_cpu cc_cpu; 1498d809d50SJeff Roberson #define CC_CPU(cpu) &cc_cpu 1508d809d50SJeff Roberson #define CC_SELF() &cc_cpu 1518d809d50SJeff Roberson #endif 1528d809d50SJeff Roberson #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 1538d809d50SJeff Roberson #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 1541283e9cdSAttilio Rao #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) 1558d809d50SJeff Roberson 1568d809d50SJeff Roberson static int timeout_cpu; 157a157e425SAlexander Motin void (*callout_new_inserted)(int cpu, int ticks) = NULL; 1588d809d50SJeff Roberson 159*d745c852SEd Schouten static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 16049a74476SColin Percival 161e9dec2c4SColin Percival /** 1628d809d50SJeff Roberson * Locked by cc_lock: 1638d809d50SJeff Roberson * cc_curr - If a callout is in progress, it is curr_callout. 164b36f4588SJohn Baldwin * If curr_callout is non-NULL, threads waiting in 165b36f4588SJohn Baldwin * callout_drain() will be woken up as soon as the 1662c1bb207SColin Percival * relevant callout completes. 1678d809d50SJeff Roberson * cc_cancel - Changing to 1 with both callout_lock and c_lock held 16898c926b2SIan Dowse * guarantees that the current callout will not run. 16998c926b2SIan Dowse * The softclock() function sets this to 0 before it 17064b9ee20SAttilio Rao * drops callout_lock to acquire c_lock, and it calls 171b36f4588SJohn Baldwin * the handler only if curr_cancelled is still 0 after 17264b9ee20SAttilio Rao * c_lock is successfully acquired. 1738d809d50SJeff Roberson * cc_waiting - If a thread is waiting in callout_drain(), then 174b36f4588SJohn Baldwin * callout_wait is nonzero. Set only when 1752c1bb207SColin Percival * curr_callout is non-NULL. 1762c1bb207SColin Percival */ 177df8bae1dSRodney W. Grimes 178df8bae1dSRodney W. Grimes /* 1791283e9cdSAttilio Rao * Resets the migration entity tied to a specific callout cpu. 1801283e9cdSAttilio Rao */ 1811283e9cdSAttilio Rao static void 1821283e9cdSAttilio Rao cc_cme_cleanup(struct callout_cpu *cc) 1831283e9cdSAttilio Rao { 1841283e9cdSAttilio Rao 1851283e9cdSAttilio Rao #ifdef SMP 1861283e9cdSAttilio Rao cc->cc_migration_cpu = CPUBLOCK; 1871283e9cdSAttilio Rao cc->cc_migration_ticks = 0; 1881283e9cdSAttilio Rao cc->cc_migration_func = NULL; 1891283e9cdSAttilio Rao cc->cc_migration_arg = NULL; 1901283e9cdSAttilio Rao #endif 1911283e9cdSAttilio Rao } 1921283e9cdSAttilio Rao 1931283e9cdSAttilio Rao /* 1941283e9cdSAttilio Rao * Checks if migration is requested by a specific callout cpu. 1951283e9cdSAttilio Rao */ 1961283e9cdSAttilio Rao static int 1971283e9cdSAttilio Rao cc_cme_migrating(struct callout_cpu *cc) 1981283e9cdSAttilio Rao { 1991283e9cdSAttilio Rao 2001283e9cdSAttilio Rao #ifdef SMP 2011283e9cdSAttilio Rao return (cc->cc_migration_cpu != CPUBLOCK); 2021283e9cdSAttilio Rao #else 2031283e9cdSAttilio Rao return (0); 2041283e9cdSAttilio Rao #endif 2051283e9cdSAttilio Rao } 2061283e9cdSAttilio Rao 2071283e9cdSAttilio Rao /* 208219d632cSMatthew Dillon * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization 209219d632cSMatthew Dillon * 210219d632cSMatthew Dillon * This code is called very early in the kernel initialization sequence, 211219d632cSMatthew Dillon * and may be called more then once. 212219d632cSMatthew Dillon */ 213219d632cSMatthew Dillon caddr_t 214219d632cSMatthew Dillon kern_timeout_callwheel_alloc(caddr_t v) 215219d632cSMatthew Dillon { 2168d809d50SJeff Roberson struct callout_cpu *cc; 2178d809d50SJeff Roberson 2188d809d50SJeff Roberson timeout_cpu = PCPU_GET(cpuid); 2198d809d50SJeff Roberson cc = CC_CPU(timeout_cpu); 220219d632cSMatthew Dillon /* 221219d632cSMatthew Dillon * Calculate callout wheel size 222219d632cSMatthew Dillon */ 223219d632cSMatthew Dillon for (callwheelsize = 1, callwheelbits = 0; 224219d632cSMatthew Dillon callwheelsize < ncallout; 225219d632cSMatthew Dillon callwheelsize <<= 1, ++callwheelbits) 226219d632cSMatthew Dillon ; 227219d632cSMatthew Dillon callwheelmask = callwheelsize - 1; 228219d632cSMatthew Dillon 2298d809d50SJeff Roberson cc->cc_callout = (struct callout *)v; 2308d809d50SJeff Roberson v = (caddr_t)(cc->cc_callout + ncallout); 2318d809d50SJeff Roberson cc->cc_callwheel = (struct callout_tailq *)v; 2328d809d50SJeff Roberson v = (caddr_t)(cc->cc_callwheel + callwheelsize); 233219d632cSMatthew Dillon return(v); 234219d632cSMatthew Dillon } 235219d632cSMatthew Dillon 2368d809d50SJeff Roberson static void 2378d809d50SJeff Roberson callout_cpu_init(struct callout_cpu *cc) 2388d809d50SJeff Roberson { 2398d809d50SJeff Roberson struct callout *c; 2408d809d50SJeff Roberson int i; 2418d809d50SJeff Roberson 2428d809d50SJeff Roberson mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 2438d809d50SJeff Roberson SLIST_INIT(&cc->cc_callfree); 2448d809d50SJeff Roberson for (i = 0; i < callwheelsize; i++) { 2458d809d50SJeff Roberson TAILQ_INIT(&cc->cc_callwheel[i]); 2468d809d50SJeff Roberson } 2471283e9cdSAttilio Rao cc_cme_cleanup(cc); 2488d809d50SJeff Roberson if (cc->cc_callout == NULL) 2498d809d50SJeff Roberson return; 2508d809d50SJeff Roberson for (i = 0; i < ncallout; i++) { 2518d809d50SJeff Roberson c = &cc->cc_callout[i]; 2528d809d50SJeff Roberson callout_init(c, 0); 2538d809d50SJeff Roberson c->c_flags = CALLOUT_LOCAL_ALLOC; 2548d809d50SJeff Roberson SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 2558d809d50SJeff Roberson } 2568d809d50SJeff Roberson } 2578d809d50SJeff Roberson 2581283e9cdSAttilio Rao #ifdef SMP 2591283e9cdSAttilio Rao /* 2601283e9cdSAttilio Rao * Switches the cpu tied to a specific callout. 2611283e9cdSAttilio Rao * The function expects a locked incoming callout cpu and returns with 2621283e9cdSAttilio Rao * locked outcoming callout cpu. 2631283e9cdSAttilio Rao */ 2641283e9cdSAttilio Rao static struct callout_cpu * 2651283e9cdSAttilio Rao callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) 2661283e9cdSAttilio Rao { 2671283e9cdSAttilio Rao struct callout_cpu *new_cc; 2681283e9cdSAttilio Rao 2691283e9cdSAttilio Rao MPASS(c != NULL && cc != NULL); 2701283e9cdSAttilio Rao CC_LOCK_ASSERT(cc); 2711283e9cdSAttilio Rao 272e75baa28SAttilio Rao /* 273e75baa28SAttilio Rao * Avoid interrupts and preemption firing after the callout cpu 274e75baa28SAttilio Rao * is blocked in order to avoid deadlocks as the new thread 275e75baa28SAttilio Rao * may be willing to acquire the callout cpu lock. 276e75baa28SAttilio Rao */ 2771283e9cdSAttilio Rao c->c_cpu = CPUBLOCK; 278e75baa28SAttilio Rao spinlock_enter(); 2791283e9cdSAttilio Rao CC_UNLOCK(cc); 2801283e9cdSAttilio Rao new_cc = CC_CPU(new_cpu); 2811283e9cdSAttilio Rao CC_LOCK(new_cc); 282e75baa28SAttilio Rao spinlock_exit(); 2831283e9cdSAttilio Rao c->c_cpu = new_cpu; 2841283e9cdSAttilio Rao return (new_cc); 2851283e9cdSAttilio Rao } 2861283e9cdSAttilio Rao #endif 2871283e9cdSAttilio Rao 288219d632cSMatthew Dillon /* 289219d632cSMatthew Dillon * kern_timeout_callwheel_init() - initialize previously reserved callwheel 290219d632cSMatthew Dillon * space. 291219d632cSMatthew Dillon * 292219d632cSMatthew Dillon * This code is called just once, after the space reserved for the 293219d632cSMatthew Dillon * callout wheel has been finalized. 294219d632cSMatthew Dillon */ 295219d632cSMatthew Dillon void 296219d632cSMatthew Dillon kern_timeout_callwheel_init(void) 297219d632cSMatthew Dillon { 2988d809d50SJeff Roberson callout_cpu_init(CC_CPU(timeout_cpu)); 2998d809d50SJeff Roberson } 300219d632cSMatthew Dillon 3018d809d50SJeff Roberson /* 3028d809d50SJeff Roberson * Start standard softclock thread. 3038d809d50SJeff Roberson */ 3048d809d50SJeff Roberson static void 3058d809d50SJeff Roberson start_softclock(void *dummy) 3068d809d50SJeff Roberson { 3078d809d50SJeff Roberson struct callout_cpu *cc; 3088d809d50SJeff Roberson #ifdef SMP 3098d809d50SJeff Roberson int cpu; 3108d809d50SJeff Roberson #endif 3118d809d50SJeff Roberson 3128d809d50SJeff Roberson cc = CC_CPU(timeout_cpu); 3138d809d50SJeff Roberson if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK, 3143350df48SJohn Baldwin INTR_MPSAFE, &cc->cc_cookie)) 3158d809d50SJeff Roberson panic("died while creating standard software ithreads"); 3168d809d50SJeff Roberson #ifdef SMP 3173aa6d94eSJohn Baldwin CPU_FOREACH(cpu) { 3188d809d50SJeff Roberson if (cpu == timeout_cpu) 3198d809d50SJeff Roberson continue; 3208d809d50SJeff Roberson cc = CC_CPU(cpu); 3218d809d50SJeff Roberson if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, 3228d809d50SJeff Roberson INTR_MPSAFE, &cc->cc_cookie)) 3238d809d50SJeff Roberson panic("died while creating standard software ithreads"); 3248d809d50SJeff Roberson cc->cc_callout = NULL; /* Only cpu0 handles timeout(). */ 3258d809d50SJeff Roberson cc->cc_callwheel = malloc( 3268d809d50SJeff Roberson sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT, 3278d809d50SJeff Roberson M_WAITOK); 3288d809d50SJeff Roberson callout_cpu_init(cc); 329219d632cSMatthew Dillon } 3308d809d50SJeff Roberson #endif 331219d632cSMatthew Dillon } 3328d809d50SJeff Roberson 3338d809d50SJeff Roberson SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 3348d809d50SJeff Roberson 3358d809d50SJeff Roberson void 3368d809d50SJeff Roberson callout_tick(void) 3378d809d50SJeff Roberson { 3388d809d50SJeff Roberson struct callout_cpu *cc; 3399fc51b0bSJeff Roberson int need_softclock; 3409fc51b0bSJeff Roberson int bucket; 3418d809d50SJeff Roberson 3428d809d50SJeff Roberson /* 3438d809d50SJeff Roberson * Process callouts at a very low cpu priority, so we don't keep the 3448d809d50SJeff Roberson * relatively high clock interrupt priority any longer than necessary. 3458d809d50SJeff Roberson */ 3469fc51b0bSJeff Roberson need_softclock = 0; 3478d809d50SJeff Roberson cc = CC_SELF(); 3488d809d50SJeff Roberson mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 349a157e425SAlexander Motin cc->cc_firsttick = cc->cc_ticks = ticks; 35020c510f8SLuigi Rizzo for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) { 3519fc51b0bSJeff Roberson bucket = cc->cc_softticks & callwheelmask; 3529fc51b0bSJeff Roberson if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) { 3538d809d50SJeff Roberson need_softclock = 1; 3549fc51b0bSJeff Roberson break; 3559fc51b0bSJeff Roberson } 3569fc51b0bSJeff Roberson } 3578d809d50SJeff Roberson mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 3588d809d50SJeff Roberson /* 3598d809d50SJeff Roberson * swi_sched acquires the thread lock, so we don't want to call it 3608d809d50SJeff Roberson * with cc_lock held; incorrect locking order. 3618d809d50SJeff Roberson */ 3628d809d50SJeff Roberson if (need_softclock) 3638d809d50SJeff Roberson swi_sched(cc->cc_cookie, 0); 3648d809d50SJeff Roberson } 3658d809d50SJeff Roberson 366a157e425SAlexander Motin int 3670e189873SAlexander Motin callout_tickstofirst(int limit) 368a157e425SAlexander Motin { 369a157e425SAlexander Motin struct callout_cpu *cc; 370a157e425SAlexander Motin struct callout *c; 371a157e425SAlexander Motin struct callout_tailq *sc; 372a157e425SAlexander Motin int curticks; 373a157e425SAlexander Motin int skip = 1; 374a157e425SAlexander Motin 375a157e425SAlexander Motin cc = CC_SELF(); 376a157e425SAlexander Motin mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 377a157e425SAlexander Motin curticks = cc->cc_ticks; 3780e189873SAlexander Motin while( skip < ncallout && skip < limit ) { 379a157e425SAlexander Motin sc = &cc->cc_callwheel[ (curticks+skip) & callwheelmask ]; 380a157e425SAlexander Motin /* search scanning ticks */ 381a157e425SAlexander Motin TAILQ_FOREACH( c, sc, c_links.tqe ){ 382189795feSAlexander Motin if (c->c_time - curticks <= ncallout) 383a157e425SAlexander Motin goto out; 384a157e425SAlexander Motin } 385a157e425SAlexander Motin skip++; 386a157e425SAlexander Motin } 387a157e425SAlexander Motin out: 388a157e425SAlexander Motin cc->cc_firsttick = curticks + skip; 389a157e425SAlexander Motin mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 390a157e425SAlexander Motin return (skip); 391a157e425SAlexander Motin } 392a157e425SAlexander Motin 3938d809d50SJeff Roberson static struct callout_cpu * 3948d809d50SJeff Roberson callout_lock(struct callout *c) 3958d809d50SJeff Roberson { 3968d809d50SJeff Roberson struct callout_cpu *cc; 3978d809d50SJeff Roberson int cpu; 3988d809d50SJeff Roberson 3998d809d50SJeff Roberson for (;;) { 4008d809d50SJeff Roberson cpu = c->c_cpu; 4011283e9cdSAttilio Rao #ifdef SMP 4021283e9cdSAttilio Rao if (cpu == CPUBLOCK) { 4031283e9cdSAttilio Rao while (c->c_cpu == CPUBLOCK) 4041283e9cdSAttilio Rao cpu_spinwait(); 4051283e9cdSAttilio Rao continue; 4061283e9cdSAttilio Rao } 4071283e9cdSAttilio Rao #endif 4088d809d50SJeff Roberson cc = CC_CPU(cpu); 4098d809d50SJeff Roberson CC_LOCK(cc); 4108d809d50SJeff Roberson if (cpu == c->c_cpu) 4118d809d50SJeff Roberson break; 4128d809d50SJeff Roberson CC_UNLOCK(cc); 4138d809d50SJeff Roberson } 4148d809d50SJeff Roberson return (cc); 415219d632cSMatthew Dillon } 416219d632cSMatthew Dillon 4171283e9cdSAttilio Rao static void 4181283e9cdSAttilio Rao callout_cc_add(struct callout *c, struct callout_cpu *cc, int to_ticks, 4191283e9cdSAttilio Rao void (*func)(void *), void *arg, int cpu) 4201283e9cdSAttilio Rao { 4211283e9cdSAttilio Rao 4221283e9cdSAttilio Rao CC_LOCK_ASSERT(cc); 4231283e9cdSAttilio Rao 4241283e9cdSAttilio Rao if (to_ticks <= 0) 4251283e9cdSAttilio Rao to_ticks = 1; 4261283e9cdSAttilio Rao c->c_arg = arg; 4271283e9cdSAttilio Rao c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 4281283e9cdSAttilio Rao c->c_func = func; 4291283e9cdSAttilio Rao c->c_time = ticks + to_ticks; 4301283e9cdSAttilio Rao TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask], 4311283e9cdSAttilio Rao c, c_links.tqe); 4321283e9cdSAttilio Rao if ((c->c_time - cc->cc_firsttick) < 0 && 4331283e9cdSAttilio Rao callout_new_inserted != NULL) { 4341283e9cdSAttilio Rao cc->cc_firsttick = c->c_time; 4351283e9cdSAttilio Rao (*callout_new_inserted)(cpu, 4361283e9cdSAttilio Rao to_ticks + (ticks - cc->cc_ticks)); 4371283e9cdSAttilio Rao } 4381283e9cdSAttilio Rao } 4391283e9cdSAttilio Rao 440219d632cSMatthew Dillon /* 441ab36c067SJustin T. Gibbs * The callout mechanism is based on the work of Adam M. Costello and 442ab36c067SJustin T. Gibbs * George Varghese, published in a technical report entitled "Redesigning 443ab36c067SJustin T. Gibbs * the BSD Callout and Timer Facilities" and modified slightly for inclusion 444ab36c067SJustin T. Gibbs * in FreeBSD by Justin T. Gibbs. The original work on the data structures 445024035e8SHiten Pandya * used in this implementation was published by G. Varghese and T. Lauck in 446ab36c067SJustin T. Gibbs * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 447ab36c067SJustin T. Gibbs * the Efficient Implementation of a Timer Facility" in the Proceedings of 448ab36c067SJustin T. Gibbs * the 11th ACM Annual Symposium on Operating Systems Principles, 449ab36c067SJustin T. Gibbs * Austin, Texas Nov 1987. 450ab36c067SJustin T. Gibbs */ 451a50ec505SPoul-Henning Kamp 452ab36c067SJustin T. Gibbs /* 453df8bae1dSRodney W. Grimes * Software (low priority) clock interrupt. 454df8bae1dSRodney W. Grimes * Run periodic events from timeout queue. 455df8bae1dSRodney W. Grimes */ 456df8bae1dSRodney W. Grimes void 4578d809d50SJeff Roberson softclock(void *arg) 458df8bae1dSRodney W. Grimes { 4598d809d50SJeff Roberson struct callout_cpu *cc; 460b336df68SPoul-Henning Kamp struct callout *c; 461b336df68SPoul-Henning Kamp struct callout_tailq *bucket; 462b336df68SPoul-Henning Kamp int curticks; 463b336df68SPoul-Henning Kamp int steps; /* #steps since we last allowed interrupts */ 46422ee8c4fSPoul-Henning Kamp int depth; 46522ee8c4fSPoul-Henning Kamp int mpcalls; 46664b9ee20SAttilio Rao int lockcalls; 46722ee8c4fSPoul-Henning Kamp int gcalls; 46848b0f4b6SKirk McKusick #ifdef DIAGNOSTIC 46948b0f4b6SKirk McKusick struct bintime bt1, bt2; 47048b0f4b6SKirk McKusick struct timespec ts2; 47148b0f4b6SKirk McKusick static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ 472377e7be4SPoul-Henning Kamp static timeout_t *lastfunc; 47348b0f4b6SKirk McKusick #endif 474df8bae1dSRodney W. Grimes 47515b7a470SPoul-Henning Kamp #ifndef MAX_SOFTCLOCK_STEPS 47615b7a470SPoul-Henning Kamp #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ 47715b7a470SPoul-Henning Kamp #endif /* MAX_SOFTCLOCK_STEPS */ 478ab36c067SJustin T. Gibbs 47922ee8c4fSPoul-Henning Kamp mpcalls = 0; 48064b9ee20SAttilio Rao lockcalls = 0; 48122ee8c4fSPoul-Henning Kamp gcalls = 0; 48222ee8c4fSPoul-Henning Kamp depth = 0; 483ab36c067SJustin T. Gibbs steps = 0; 4848d809d50SJeff Roberson cc = (struct callout_cpu *)arg; 4858d809d50SJeff Roberson CC_LOCK(cc); 48620c510f8SLuigi Rizzo while (cc->cc_softticks - 1 != cc->cc_ticks) { 48745327611SJustin T. Gibbs /* 4888d809d50SJeff Roberson * cc_softticks may be modified by hard clock, so cache 48945327611SJustin T. Gibbs * it while we work on a given bucket. 49045327611SJustin T. Gibbs */ 4918d809d50SJeff Roberson curticks = cc->cc_softticks; 4929fc51b0bSJeff Roberson cc->cc_softticks++; 4938d809d50SJeff Roberson bucket = &cc->cc_callwheel[curticks & callwheelmask]; 49445327611SJustin T. Gibbs c = TAILQ_FIRST(bucket); 495ab36c067SJustin T. Gibbs while (c) { 49622ee8c4fSPoul-Henning Kamp depth++; 49745327611SJustin T. Gibbs if (c->c_time != curticks) { 498ab36c067SJustin T. Gibbs c = TAILQ_NEXT(c, c_links.tqe); 499ab36c067SJustin T. Gibbs ++steps; 500ab36c067SJustin T. Gibbs if (steps >= MAX_SOFTCLOCK_STEPS) { 5018d809d50SJeff Roberson cc->cc_next = c; 50245327611SJustin T. Gibbs /* Give interrupts a chance. */ 5038d809d50SJeff Roberson CC_UNLOCK(cc); 504ab32297dSJohn Baldwin ; /* nothing */ 5058d809d50SJeff Roberson CC_LOCK(cc); 5068d809d50SJeff Roberson c = cc->cc_next; 507ab36c067SJustin T. Gibbs steps = 0; 508df8bae1dSRodney W. Grimes } 509ab36c067SJustin T. Gibbs } else { 510ab36c067SJustin T. Gibbs void (*c_func)(void *); 51108e4ac8aSAttilio Rao void *c_arg; 51264b9ee20SAttilio Rao struct lock_class *class; 513557f5e51SAttilio Rao struct lock_object *c_lock; 51464b9ee20SAttilio Rao int c_flags, sharedlock; 515ab36c067SJustin T. Gibbs 5168d809d50SJeff Roberson cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 51745327611SJustin T. Gibbs TAILQ_REMOVE(bucket, c, c_links.tqe); 51864b9ee20SAttilio Rao class = (c->c_lock != NULL) ? 51964b9ee20SAttilio Rao LOCK_CLASS(c->c_lock) : NULL; 52064b9ee20SAttilio Rao sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 52164b9ee20SAttilio Rao 0 : 1; 522557f5e51SAttilio Rao c_lock = c->c_lock; 523ab36c067SJustin T. Gibbs c_func = c->c_func; 524ab36c067SJustin T. Gibbs c_arg = c->c_arg; 525fa2fbc3dSJake Burkholder c_flags = c->c_flags; 526acc8326dSGarrett Wollman if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 527acc8326dSGarrett Wollman c->c_flags = CALLOUT_LOCAL_ALLOC; 528acc8326dSGarrett Wollman } else { 529acc8326dSGarrett Wollman c->c_flags = 5309b8b58e0SJonathan Lemon (c->c_flags & ~CALLOUT_PENDING); 53157c037beSIan Dowse } 5328d809d50SJeff Roberson cc->cc_curr = c; 5338d809d50SJeff Roberson cc->cc_cancel = 0; 5348d809d50SJeff Roberson CC_UNLOCK(cc); 535557f5e51SAttilio Rao if (c_lock != NULL) { 536557f5e51SAttilio Rao class->lc_lock(c_lock, sharedlock); 53798c926b2SIan Dowse /* 53898c926b2SIan Dowse * The callout may have been cancelled 53998c926b2SIan Dowse * while we switched locks. 54098c926b2SIan Dowse */ 5418d809d50SJeff Roberson if (cc->cc_cancel) { 542557f5e51SAttilio Rao class->lc_unlock(c_lock); 543b36f4588SJohn Baldwin goto skip; 54498c926b2SIan Dowse } 54598c926b2SIan Dowse /* The callout cannot be stopped now. */ 5468d809d50SJeff Roberson cc->cc_cancel = 1; 54798c926b2SIan Dowse 548557f5e51SAttilio Rao if (c_lock == &Giant.lock_object) { 54922ee8c4fSPoul-Henning Kamp gcalls++; 55068a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, 55168a57ebfSGleb Smirnoff "callout %p func %p arg %p", 55268a57ebfSGleb Smirnoff c, c_func, c_arg); 55398c926b2SIan Dowse } else { 55464b9ee20SAttilio Rao lockcalls++; 55564b9ee20SAttilio Rao CTR3(KTR_CALLOUT, "callout lock" 55668a57ebfSGleb Smirnoff " %p func %p arg %p", 55768a57ebfSGleb Smirnoff c, c_func, c_arg); 55898c926b2SIan Dowse } 55922ee8c4fSPoul-Henning Kamp } else { 56022ee8c4fSPoul-Henning Kamp mpcalls++; 56168a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, 56268a57ebfSGleb Smirnoff "callout mpsafe %p func %p arg %p", 56368a57ebfSGleb Smirnoff c, c_func, c_arg); 56422ee8c4fSPoul-Henning Kamp } 56548b0f4b6SKirk McKusick #ifdef DIAGNOSTIC 56648b0f4b6SKirk McKusick binuptime(&bt1); 56748b0f4b6SKirk McKusick #endif 56853c0e1ffSJohn Baldwin THREAD_NO_SLEEPING(); 56991dd9aaeSRobert Watson SDT_PROBE(callout_execute, kernel, , 57091dd9aaeSRobert Watson callout_start, c, 0, 0, 0, 0); 571ab36c067SJustin T. Gibbs c_func(c_arg); 57291dd9aaeSRobert Watson SDT_PROBE(callout_execute, kernel, , 57391dd9aaeSRobert Watson callout_end, c, 0, 0, 0, 0); 57453c0e1ffSJohn Baldwin THREAD_SLEEPING_OK(); 57548b0f4b6SKirk McKusick #ifdef DIAGNOSTIC 57648b0f4b6SKirk McKusick binuptime(&bt2); 57748b0f4b6SKirk McKusick bintime_sub(&bt2, &bt1); 57848b0f4b6SKirk McKusick if (bt2.frac > maxdt) { 579377e7be4SPoul-Henning Kamp if (lastfunc != c_func || 580377e7be4SPoul-Henning Kamp bt2.frac > maxdt * 2) { 58148b0f4b6SKirk McKusick bintime2timespec(&bt2, &ts2); 58248b0f4b6SKirk McKusick printf( 583377e7be4SPoul-Henning Kamp "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 58448b0f4b6SKirk McKusick c_func, c_arg, 585377e7be4SPoul-Henning Kamp (intmax_t)ts2.tv_sec, 586377e7be4SPoul-Henning Kamp ts2.tv_nsec); 587377e7be4SPoul-Henning Kamp } 588377e7be4SPoul-Henning Kamp maxdt = bt2.frac; 589377e7be4SPoul-Henning Kamp lastfunc = c_func; 59048b0f4b6SKirk McKusick } 59148b0f4b6SKirk McKusick #endif 592b7f1c1d2SJohn Baldwin CTR1(KTR_CALLOUT, "callout %p finished", c); 59398c926b2SIan Dowse if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 594557f5e51SAttilio Rao class->lc_unlock(c_lock); 595b36f4588SJohn Baldwin skip: 5968d809d50SJeff Roberson CC_LOCK(cc); 597435cdf88SAlfred Perlstein /* 598435cdf88SAlfred Perlstein * If the current callout is locally 599435cdf88SAlfred Perlstein * allocated (from timeout(9)) 600435cdf88SAlfred Perlstein * then put it on the freelist. 601435cdf88SAlfred Perlstein * 602435cdf88SAlfred Perlstein * Note: we need to check the cached 603435cdf88SAlfred Perlstein * copy of c_flags because if it was not 604435cdf88SAlfred Perlstein * local, then it's not safe to deref the 605435cdf88SAlfred Perlstein * callout pointer. 606435cdf88SAlfred Perlstein */ 607435cdf88SAlfred Perlstein if (c_flags & CALLOUT_LOCAL_ALLOC) { 608435cdf88SAlfred Perlstein KASSERT(c->c_flags == 609435cdf88SAlfred Perlstein CALLOUT_LOCAL_ALLOC, 610435cdf88SAlfred Perlstein ("corrupted callout")); 611435cdf88SAlfred Perlstein c->c_func = NULL; 6128d809d50SJeff Roberson SLIST_INSERT_HEAD(&cc->cc_callfree, c, 613435cdf88SAlfred Perlstein c_links.sle); 614435cdf88SAlfred Perlstein } 6158d809d50SJeff Roberson cc->cc_curr = NULL; 61608e4ac8aSAttilio Rao if (cc->cc_waiting) { 6171283e9cdSAttilio Rao 6182c1bb207SColin Percival /* 6191283e9cdSAttilio Rao * There is someone waiting for the 6201283e9cdSAttilio Rao * callout to complete. 6211283e9cdSAttilio Rao * If the callout was scheduled for 6221283e9cdSAttilio Rao * migration just cancel it. 6232c1bb207SColin Percival */ 6241283e9cdSAttilio Rao if (cc_cme_migrating(cc)) 6251283e9cdSAttilio Rao cc_cme_cleanup(cc); 6268d809d50SJeff Roberson cc->cc_waiting = 0; 6278d809d50SJeff Roberson CC_UNLOCK(cc); 6288d809d50SJeff Roberson wakeup(&cc->cc_waiting); 6298d809d50SJeff Roberson CC_LOCK(cc); 6301283e9cdSAttilio Rao } else if (cc_cme_migrating(cc)) { 6311283e9cdSAttilio Rao #ifdef SMP 6321283e9cdSAttilio Rao struct callout_cpu *new_cc; 6331283e9cdSAttilio Rao void (*new_func)(void *); 6341283e9cdSAttilio Rao void *new_arg; 6351283e9cdSAttilio Rao int new_cpu, new_ticks; 6361283e9cdSAttilio Rao 6371283e9cdSAttilio Rao /* 6381283e9cdSAttilio Rao * If the callout was scheduled for 6391283e9cdSAttilio Rao * migration just perform it now. 6401283e9cdSAttilio Rao */ 6411283e9cdSAttilio Rao new_cpu = cc->cc_migration_cpu; 6421283e9cdSAttilio Rao new_ticks = cc->cc_migration_ticks; 6431283e9cdSAttilio Rao new_func = cc->cc_migration_func; 6441283e9cdSAttilio Rao new_arg = cc->cc_migration_arg; 6451283e9cdSAttilio Rao cc_cme_cleanup(cc); 6461283e9cdSAttilio Rao 6471283e9cdSAttilio Rao /* 6481283e9cdSAttilio Rao * It should be assert here that the 6491283e9cdSAttilio Rao * callout is not destroyed but that 6501283e9cdSAttilio Rao * is not easy. 6511283e9cdSAttilio Rao */ 6521283e9cdSAttilio Rao new_cc = callout_cpu_switch(c, cc, 6531283e9cdSAttilio Rao new_cpu); 6541283e9cdSAttilio Rao callout_cc_add(c, new_cc, new_ticks, 6551283e9cdSAttilio Rao new_func, new_arg, new_cpu); 6561283e9cdSAttilio Rao CC_UNLOCK(new_cc); 6571283e9cdSAttilio Rao CC_LOCK(cc); 6581283e9cdSAttilio Rao #else 6591283e9cdSAttilio Rao panic("migration should not happen"); 6601283e9cdSAttilio Rao #endif 66149a74476SColin Percival } 662ab36c067SJustin T. Gibbs steps = 0; 6638d809d50SJeff Roberson c = cc->cc_next; 664ab36c067SJustin T. Gibbs } 665ab36c067SJustin T. Gibbs } 666ab36c067SJustin T. Gibbs } 66722ee8c4fSPoul-Henning Kamp avg_depth += (depth * 1000 - avg_depth) >> 8; 66822ee8c4fSPoul-Henning Kamp avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 66964b9ee20SAttilio Rao avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 67022ee8c4fSPoul-Henning Kamp avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 6718d809d50SJeff Roberson cc->cc_next = NULL; 6728d809d50SJeff Roberson CC_UNLOCK(cc); 673df8bae1dSRodney W. Grimes } 674df8bae1dSRodney W. Grimes 675df8bae1dSRodney W. Grimes /* 676df8bae1dSRodney W. Grimes * timeout -- 677df8bae1dSRodney W. Grimes * Execute a function after a specified length of time. 678df8bae1dSRodney W. Grimes * 679df8bae1dSRodney W. Grimes * untimeout -- 680df8bae1dSRodney W. Grimes * Cancel previous timeout function call. 681df8bae1dSRodney W. Grimes * 682ab36c067SJustin T. Gibbs * callout_handle_init -- 683ab36c067SJustin T. Gibbs * Initialize a handle so that using it with untimeout is benign. 684ab36c067SJustin T. Gibbs * 685df8bae1dSRodney W. Grimes * See AT&T BCI Driver Reference Manual for specification. This 686ab36c067SJustin T. Gibbs * implementation differs from that one in that although an 687ab36c067SJustin T. Gibbs * identification value is returned from timeout, the original 688ab36c067SJustin T. Gibbs * arguments to timeout as well as the identifier are used to 689ab36c067SJustin T. Gibbs * identify entries for untimeout. 690df8bae1dSRodney W. Grimes */ 691ab36c067SJustin T. Gibbs struct callout_handle 692ab36c067SJustin T. Gibbs timeout(ftn, arg, to_ticks) 6938f03c6f1SBruce Evans timeout_t *ftn; 694df8bae1dSRodney W. Grimes void *arg; 695e82ac18eSJonathan Lemon int to_ticks; 696df8bae1dSRodney W. Grimes { 6978d809d50SJeff Roberson struct callout_cpu *cc; 698ab36c067SJustin T. Gibbs struct callout *new; 699ab36c067SJustin T. Gibbs struct callout_handle handle; 700df8bae1dSRodney W. Grimes 7018d809d50SJeff Roberson cc = CC_CPU(timeout_cpu); 7028d809d50SJeff Roberson CC_LOCK(cc); 703df8bae1dSRodney W. Grimes /* Fill in the next free callout structure. */ 7048d809d50SJeff Roberson new = SLIST_FIRST(&cc->cc_callfree); 705ab36c067SJustin T. Gibbs if (new == NULL) 706ab36c067SJustin T. Gibbs /* XXX Attempt to malloc first */ 707df8bae1dSRodney W. Grimes panic("timeout table full"); 7088d809d50SJeff Roberson SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); 709acc8326dSGarrett Wollman callout_reset(new, to_ticks, ftn, arg); 710ab36c067SJustin T. Gibbs handle.callout = new; 7118d809d50SJeff Roberson CC_UNLOCK(cc); 7128d809d50SJeff Roberson 713ab36c067SJustin T. Gibbs return (handle); 714df8bae1dSRodney W. Grimes } 715df8bae1dSRodney W. Grimes 716df8bae1dSRodney W. Grimes void 717ab36c067SJustin T. Gibbs untimeout(ftn, arg, handle) 7188f03c6f1SBruce Evans timeout_t *ftn; 719df8bae1dSRodney W. Grimes void *arg; 720ab36c067SJustin T. Gibbs struct callout_handle handle; 721df8bae1dSRodney W. Grimes { 7228d809d50SJeff Roberson struct callout_cpu *cc; 723df8bae1dSRodney W. Grimes 724ab36c067SJustin T. Gibbs /* 725ab36c067SJustin T. Gibbs * Check for a handle that was initialized 726ab36c067SJustin T. Gibbs * by callout_handle_init, but never used 727ab36c067SJustin T. Gibbs * for a real timeout. 728ab36c067SJustin T. Gibbs */ 729ab36c067SJustin T. Gibbs if (handle.callout == NULL) 730ab36c067SJustin T. Gibbs return; 731df8bae1dSRodney W. Grimes 7328d809d50SJeff Roberson cc = callout_lock(handle.callout); 733acc8326dSGarrett Wollman if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 734acc8326dSGarrett Wollman callout_stop(handle.callout); 7358d809d50SJeff Roberson CC_UNLOCK(cc); 736df8bae1dSRodney W. Grimes } 737df8bae1dSRodney W. Grimes 7383c816944SBruce Evans void 739ab36c067SJustin T. Gibbs callout_handle_init(struct callout_handle *handle) 740ab36c067SJustin T. Gibbs { 741ab36c067SJustin T. Gibbs handle->callout = NULL; 742ab36c067SJustin T. Gibbs } 743ab36c067SJustin T. Gibbs 744acc8326dSGarrett Wollman /* 745acc8326dSGarrett Wollman * New interface; clients allocate their own callout structures. 746acc8326dSGarrett Wollman * 747acc8326dSGarrett Wollman * callout_reset() - establish or change a timeout 748acc8326dSGarrett Wollman * callout_stop() - disestablish a timeout 749acc8326dSGarrett Wollman * callout_init() - initialize a callout structure so that it can 750acc8326dSGarrett Wollman * safely be passed to callout_reset() and callout_stop() 751acc8326dSGarrett Wollman * 7529b8b58e0SJonathan Lemon * <sys/callout.h> defines three convenience macros: 753acc8326dSGarrett Wollman * 75486fd19deSColin Percival * callout_active() - returns truth if callout has not been stopped, 75586fd19deSColin Percival * drained, or deactivated since the last time the callout was 75686fd19deSColin Percival * reset. 7579b8b58e0SJonathan Lemon * callout_pending() - returns truth if callout is still waiting for timeout 7589b8b58e0SJonathan Lemon * callout_deactivate() - marks the callout as having been serviced 759acc8326dSGarrett Wollman */ 760d04304d1SGleb Smirnoff int 7618d809d50SJeff Roberson callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *), 7628d809d50SJeff Roberson void *arg, int cpu) 763acc8326dSGarrett Wollman { 7648d809d50SJeff Roberson struct callout_cpu *cc; 765d04304d1SGleb Smirnoff int cancelled = 0; 766acc8326dSGarrett Wollman 7678d809d50SJeff Roberson /* 7688d809d50SJeff Roberson * Don't allow migration of pre-allocated callouts lest they 7698d809d50SJeff Roberson * become unbalanced. 7708d809d50SJeff Roberson */ 7718d809d50SJeff Roberson if (c->c_flags & CALLOUT_LOCAL_ALLOC) 7728d809d50SJeff Roberson cpu = c->c_cpu; 7738d809d50SJeff Roberson cc = callout_lock(c); 7748d809d50SJeff Roberson if (cc->cc_curr == c) { 7752c1bb207SColin Percival /* 7762c1bb207SColin Percival * We're being asked to reschedule a callout which is 77764b9ee20SAttilio Rao * currently in progress. If there is a lock then we 77898c926b2SIan Dowse * can cancel the callout if it has not really started. 77998c926b2SIan Dowse */ 7808d809d50SJeff Roberson if (c->c_lock != NULL && !cc->cc_cancel) 7818d809d50SJeff Roberson cancelled = cc->cc_cancel = 1; 7828d809d50SJeff Roberson if (cc->cc_waiting) { 78398c926b2SIan Dowse /* 78498c926b2SIan Dowse * Someone has called callout_drain to kill this 78598c926b2SIan Dowse * callout. Don't reschedule. 7862c1bb207SColin Percival */ 78768a57ebfSGleb Smirnoff CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 78868a57ebfSGleb Smirnoff cancelled ? "cancelled" : "failed to cancel", 78968a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 7908d809d50SJeff Roberson CC_UNLOCK(cc); 791d04304d1SGleb Smirnoff return (cancelled); 79249a74476SColin Percival } 79398c926b2SIan Dowse } 7940413bacdSColin Percival if (c->c_flags & CALLOUT_PENDING) { 7958d809d50SJeff Roberson if (cc->cc_next == c) { 7968d809d50SJeff Roberson cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 7970413bacdSColin Percival } 7988d809d50SJeff Roberson TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, 7990413bacdSColin Percival c_links.tqe); 8000413bacdSColin Percival 801d04304d1SGleb Smirnoff cancelled = 1; 8028d809d50SJeff Roberson c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 8038d809d50SJeff Roberson } 8041283e9cdSAttilio Rao 8051283e9cdSAttilio Rao #ifdef SMP 8060413bacdSColin Percival /* 8071283e9cdSAttilio Rao * If the callout must migrate try to perform it immediately. 8081283e9cdSAttilio Rao * If the callout is currently running, just defer the migration 8091283e9cdSAttilio Rao * to a more appropriate moment. 8100413bacdSColin Percival */ 8118d809d50SJeff Roberson if (c->c_cpu != cpu) { 8121283e9cdSAttilio Rao if (cc->cc_curr == c) { 8131283e9cdSAttilio Rao cc->cc_migration_cpu = cpu; 8141283e9cdSAttilio Rao cc->cc_migration_ticks = to_ticks; 8151283e9cdSAttilio Rao cc->cc_migration_func = ftn; 8161283e9cdSAttilio Rao cc->cc_migration_arg = arg; 8171283e9cdSAttilio Rao CTR5(KTR_CALLOUT, 8181283e9cdSAttilio Rao "migration of %p func %p arg %p in %d to %u deferred", 8191283e9cdSAttilio Rao c, c->c_func, c->c_arg, to_ticks, cpu); 82008e4ac8aSAttilio Rao CC_UNLOCK(cc); 8211283e9cdSAttilio Rao return (cancelled); 822a157e425SAlexander Motin } 8231283e9cdSAttilio Rao cc = callout_cpu_switch(c, cc, cpu); 82408e4ac8aSAttilio Rao } 8251283e9cdSAttilio Rao #endif 8261283e9cdSAttilio Rao 8271283e9cdSAttilio Rao callout_cc_add(c, cc, to_ticks, ftn, arg, cpu); 82868a57ebfSGleb Smirnoff CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", 82968a57ebfSGleb Smirnoff cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); 8308d809d50SJeff Roberson CC_UNLOCK(cc); 831d04304d1SGleb Smirnoff 832d04304d1SGleb Smirnoff return (cancelled); 833acc8326dSGarrett Wollman } 834acc8326dSGarrett Wollman 8356e0186d5SSam Leffler /* 8366e0186d5SSam Leffler * Common idioms that can be optimized in the future. 8376e0186d5SSam Leffler */ 8386e0186d5SSam Leffler int 8396e0186d5SSam Leffler callout_schedule_on(struct callout *c, int to_ticks, int cpu) 8406e0186d5SSam Leffler { 8416e0186d5SSam Leffler return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 8426e0186d5SSam Leffler } 8436e0186d5SSam Leffler 8446e0186d5SSam Leffler int 8456e0186d5SSam Leffler callout_schedule(struct callout *c, int to_ticks) 8466e0186d5SSam Leffler { 8476e0186d5SSam Leffler return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 8486e0186d5SSam Leffler } 8496e0186d5SSam Leffler 8502c1bb207SColin Percival int 8512c1bb207SColin Percival _callout_stop_safe(c, safe) 8522c1bb207SColin Percival struct callout *c; 8532c1bb207SColin Percival int safe; 8542c1bb207SColin Percival { 8551283e9cdSAttilio Rao struct callout_cpu *cc, *old_cc; 85664b9ee20SAttilio Rao struct lock_class *class; 85764b9ee20SAttilio Rao int use_lock, sq_locked; 85898c926b2SIan Dowse 85964b9ee20SAttilio Rao /* 86064b9ee20SAttilio Rao * Some old subsystems don't hold Giant while running a callout_stop(), 86164b9ee20SAttilio Rao * so just discard this check for the moment. 86264b9ee20SAttilio Rao */ 86364b9ee20SAttilio Rao if (!safe && c->c_lock != NULL) { 86464b9ee20SAttilio Rao if (c->c_lock == &Giant.lock_object) 86564b9ee20SAttilio Rao use_lock = mtx_owned(&Giant); 86664b9ee20SAttilio Rao else { 86764b9ee20SAttilio Rao use_lock = 1; 86864b9ee20SAttilio Rao class = LOCK_CLASS(c->c_lock); 86964b9ee20SAttilio Rao class->lc_assert(c->c_lock, LA_XLOCKED); 87098c926b2SIan Dowse } 87164b9ee20SAttilio Rao } else 87264b9ee20SAttilio Rao use_lock = 0; 8732c1bb207SColin Percival 87467b158d8SJohn Baldwin sq_locked = 0; 8751283e9cdSAttilio Rao old_cc = NULL; 87667b158d8SJohn Baldwin again: 8778d809d50SJeff Roberson cc = callout_lock(c); 8781283e9cdSAttilio Rao 8791283e9cdSAttilio Rao /* 8801283e9cdSAttilio Rao * If the callout was migrating while the callout cpu lock was 8811283e9cdSAttilio Rao * dropped, just drop the sleepqueue lock and check the states 8821283e9cdSAttilio Rao * again. 8831283e9cdSAttilio Rao */ 8841283e9cdSAttilio Rao if (sq_locked != 0 && cc != old_cc) { 8851283e9cdSAttilio Rao #ifdef SMP 8861283e9cdSAttilio Rao CC_UNLOCK(cc); 8871283e9cdSAttilio Rao sleepq_release(&old_cc->cc_waiting); 8881283e9cdSAttilio Rao sq_locked = 0; 8891283e9cdSAttilio Rao old_cc = NULL; 8901283e9cdSAttilio Rao goto again; 8911283e9cdSAttilio Rao #else 8921283e9cdSAttilio Rao panic("migration should not happen"); 8931283e9cdSAttilio Rao #endif 8941283e9cdSAttilio Rao } 8951283e9cdSAttilio Rao 896acc8326dSGarrett Wollman /* 897b36f4588SJohn Baldwin * If the callout isn't pending, it's not on the queue, so 898b36f4588SJohn Baldwin * don't attempt to remove it from the queue. We can try to 899b36f4588SJohn Baldwin * stop it by other means however. 900acc8326dSGarrett Wollman */ 901acc8326dSGarrett Wollman if (!(c->c_flags & CALLOUT_PENDING)) { 9029b8b58e0SJonathan Lemon c->c_flags &= ~CALLOUT_ACTIVE; 903b36f4588SJohn Baldwin 904b36f4588SJohn Baldwin /* 905b36f4588SJohn Baldwin * If it wasn't on the queue and it isn't the current 906b36f4588SJohn Baldwin * callout, then we can't stop it, so just bail. 907b36f4588SJohn Baldwin */ 9088d809d50SJeff Roberson if (cc->cc_curr != c) { 90968a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 91068a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 9118d809d50SJeff Roberson CC_UNLOCK(cc); 91267b158d8SJohn Baldwin if (sq_locked) 913ce62b59cSJeff Roberson sleepq_release(&cc->cc_waiting); 91498c926b2SIan Dowse return (0); 91598c926b2SIan Dowse } 916b36f4588SJohn Baldwin 91798c926b2SIan Dowse if (safe) { 9182c1bb207SColin Percival /* 919b36f4588SJohn Baldwin * The current callout is running (or just 920b36f4588SJohn Baldwin * about to run) and blocking is allowed, so 921b36f4588SJohn Baldwin * just wait for the current invocation to 922b36f4588SJohn Baldwin * finish. 9232c1bb207SColin Percival */ 9248d809d50SJeff Roberson while (cc->cc_curr == c) { 9256a0ce57dSAttilio Rao 9266a0ce57dSAttilio Rao /* 9276a0ce57dSAttilio Rao * Use direct calls to sleepqueue interface 9286a0ce57dSAttilio Rao * instead of cv/msleep in order to avoid 9298d809d50SJeff Roberson * a LOR between cc_lock and sleepqueue 9306a0ce57dSAttilio Rao * chain spinlocks. This piece of code 9316a0ce57dSAttilio Rao * emulates a msleep_spin() call actually. 93267b158d8SJohn Baldwin * 93367b158d8SJohn Baldwin * If we already have the sleepqueue chain 93467b158d8SJohn Baldwin * locked, then we can safely block. If we 93567b158d8SJohn Baldwin * don't already have it locked, however, 9368d809d50SJeff Roberson * we have to drop the cc_lock to lock 93767b158d8SJohn Baldwin * it. This opens several races, so we 93867b158d8SJohn Baldwin * restart at the beginning once we have 93967b158d8SJohn Baldwin * both locks. If nothing has changed, then 94067b158d8SJohn Baldwin * we will end up back here with sq_locked 94167b158d8SJohn Baldwin * set. 9426a0ce57dSAttilio Rao */ 94367b158d8SJohn Baldwin if (!sq_locked) { 9448d809d50SJeff Roberson CC_UNLOCK(cc); 945ce62b59cSJeff Roberson sleepq_lock(&cc->cc_waiting); 94667b158d8SJohn Baldwin sq_locked = 1; 9471283e9cdSAttilio Rao old_cc = cc; 94867b158d8SJohn Baldwin goto again; 9496a0ce57dSAttilio Rao } 9501283e9cdSAttilio Rao 9511283e9cdSAttilio Rao /* 9521283e9cdSAttilio Rao * Migration could be cancelled here, but 9531283e9cdSAttilio Rao * as long as it is still not sure when it 9541283e9cdSAttilio Rao * will be packed up, just let softclock() 9551283e9cdSAttilio Rao * take care of it. 9561283e9cdSAttilio Rao */ 9578d809d50SJeff Roberson cc->cc_waiting = 1; 9586a0ce57dSAttilio Rao DROP_GIANT(); 9598d809d50SJeff Roberson CC_UNLOCK(cc); 960ce62b59cSJeff Roberson sleepq_add(&cc->cc_waiting, 9618d809d50SJeff Roberson &cc->cc_lock.lock_object, "codrain", 9626a0ce57dSAttilio Rao SLEEPQ_SLEEP, 0); 963ce62b59cSJeff Roberson sleepq_wait(&cc->cc_waiting, 0); 96467b158d8SJohn Baldwin sq_locked = 0; 9651283e9cdSAttilio Rao old_cc = NULL; 9666a0ce57dSAttilio Rao 9676a0ce57dSAttilio Rao /* Reacquire locks previously released. */ 9686a0ce57dSAttilio Rao PICKUP_GIANT(); 9698d809d50SJeff Roberson CC_LOCK(cc); 970b36f4588SJohn Baldwin } 9718d809d50SJeff Roberson } else if (use_lock && !cc->cc_cancel) { 972b36f4588SJohn Baldwin /* 97364b9ee20SAttilio Rao * The current callout is waiting for its 97464b9ee20SAttilio Rao * lock which we hold. Cancel the callout 975b36f4588SJohn Baldwin * and return. After our caller drops the 97664b9ee20SAttilio Rao * lock, the callout will be skipped in 977b36f4588SJohn Baldwin * softclock(). 978b36f4588SJohn Baldwin */ 9798d809d50SJeff Roberson cc->cc_cancel = 1; 98068a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 98168a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 9821283e9cdSAttilio Rao KASSERT(!cc_cme_migrating(cc), 9831283e9cdSAttilio Rao ("callout wrongly scheduled for migration")); 9848d809d50SJeff Roberson CC_UNLOCK(cc); 98567b158d8SJohn Baldwin KASSERT(!sq_locked, ("sleepqueue chain locked")); 98698c926b2SIan Dowse return (1); 987b36f4588SJohn Baldwin } 98868a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 98968a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 9908d809d50SJeff Roberson CC_UNLOCK(cc); 99167b158d8SJohn Baldwin KASSERT(!sq_locked, ("sleepqueue chain still locked")); 992a45982d2SJohn Baldwin return (0); 993acc8326dSGarrett Wollman } 99467b158d8SJohn Baldwin if (sq_locked) 995ce62b59cSJeff Roberson sleepq_release(&cc->cc_waiting); 99667b158d8SJohn Baldwin 9979b8b58e0SJonathan Lemon c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 998acc8326dSGarrett Wollman 9998d809d50SJeff Roberson if (cc->cc_next == c) { 10008d809d50SJeff Roberson cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 1001acc8326dSGarrett Wollman } 10028d809d50SJeff Roberson TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, 10038d809d50SJeff Roberson c_links.tqe); 1004acc8326dSGarrett Wollman 100568a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 100668a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 100768a57ebfSGleb Smirnoff 1008acc8326dSGarrett Wollman if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 10097834081cSColin Percival c->c_func = NULL; 10108d809d50SJeff Roberson SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 1011acc8326dSGarrett Wollman } 10128d809d50SJeff Roberson CC_UNLOCK(cc); 1013a45982d2SJohn Baldwin return (1); 1014acc8326dSGarrett Wollman } 1015acc8326dSGarrett Wollman 1016acc8326dSGarrett Wollman void 1017e82ac18eSJonathan Lemon callout_init(c, mpsafe) 1018acc8326dSGarrett Wollman struct callout *c; 1019e82ac18eSJonathan Lemon int mpsafe; 1020acc8326dSGarrett Wollman { 10217347e1c6SGarrett Wollman bzero(c, sizeof *c); 102298c926b2SIan Dowse if (mpsafe) { 102364b9ee20SAttilio Rao c->c_lock = NULL; 102498c926b2SIan Dowse c->c_flags = CALLOUT_RETURNUNLOCKED; 102598c926b2SIan Dowse } else { 102664b9ee20SAttilio Rao c->c_lock = &Giant.lock_object; 102798c926b2SIan Dowse c->c_flags = 0; 102898c926b2SIan Dowse } 10298d809d50SJeff Roberson c->c_cpu = timeout_cpu; 103098c926b2SIan Dowse } 103198c926b2SIan Dowse 103298c926b2SIan Dowse void 103364b9ee20SAttilio Rao _callout_init_lock(c, lock, flags) 103498c926b2SIan Dowse struct callout *c; 103564b9ee20SAttilio Rao struct lock_object *lock; 103698c926b2SIan Dowse int flags; 103798c926b2SIan Dowse { 103898c926b2SIan Dowse bzero(c, sizeof *c); 103964b9ee20SAttilio Rao c->c_lock = lock; 104064b9ee20SAttilio Rao KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 104164b9ee20SAttilio Rao ("callout_init_lock: bad flags %d", flags)); 104264b9ee20SAttilio Rao KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 104364b9ee20SAttilio Rao ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 104413ddf72dSAttilio Rao KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 104513ddf72dSAttilio Rao (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 104664b9ee20SAttilio Rao __func__)); 104764b9ee20SAttilio Rao c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 10488d809d50SJeff Roberson c->c_cpu = timeout_cpu; 1049acc8326dSGarrett Wollman } 1050acc8326dSGarrett Wollman 1051e1d6dc65SNate Williams #ifdef APM_FIXUP_CALLTODO 1052e1d6dc65SNate Williams /* 1053e1d6dc65SNate Williams * Adjust the kernel calltodo timeout list. This routine is used after 1054e1d6dc65SNate Williams * an APM resume to recalculate the calltodo timer list values with the 1055e1d6dc65SNate Williams * number of hz's we have been sleeping. The next hardclock() will detect 1056e1d6dc65SNate Williams * that there are fired timers and run softclock() to execute them. 1057e1d6dc65SNate Williams * 1058e1d6dc65SNate Williams * Please note, I have not done an exhaustive analysis of what code this 1059e1d6dc65SNate Williams * might break. I am motivated to have my select()'s and alarm()'s that 1060e1d6dc65SNate Williams * have expired during suspend firing upon resume so that the applications 1061e1d6dc65SNate Williams * which set the timer can do the maintanence the timer was for as close 1062e1d6dc65SNate Williams * as possible to the originally intended time. Testing this code for a 1063e1d6dc65SNate Williams * week showed that resuming from a suspend resulted in 22 to 25 timers 1064e1d6dc65SNate Williams * firing, which seemed independant on whether the suspend was 2 hours or 1065e1d6dc65SNate Williams * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 1066e1d6dc65SNate Williams */ 1067e1d6dc65SNate Williams void 1068e1d6dc65SNate Williams adjust_timeout_calltodo(time_change) 1069e1d6dc65SNate Williams struct timeval *time_change; 1070e1d6dc65SNate Williams { 1071e1d6dc65SNate Williams register struct callout *p; 1072e1d6dc65SNate Williams unsigned long delta_ticks; 1073e1d6dc65SNate Williams 1074e1d6dc65SNate Williams /* 1075e1d6dc65SNate Williams * How many ticks were we asleep? 1076c8b47828SBruce Evans * (stolen from tvtohz()). 1077e1d6dc65SNate Williams */ 1078e1d6dc65SNate Williams 1079e1d6dc65SNate Williams /* Don't do anything */ 1080e1d6dc65SNate Williams if (time_change->tv_sec < 0) 1081e1d6dc65SNate Williams return; 1082e1d6dc65SNate Williams else if (time_change->tv_sec <= LONG_MAX / 1000000) 1083e1d6dc65SNate Williams delta_ticks = (time_change->tv_sec * 1000000 + 1084e1d6dc65SNate Williams time_change->tv_usec + (tick - 1)) / tick + 1; 1085e1d6dc65SNate Williams else if (time_change->tv_sec <= LONG_MAX / hz) 1086e1d6dc65SNate Williams delta_ticks = time_change->tv_sec * hz + 1087e1d6dc65SNate Williams (time_change->tv_usec + (tick - 1)) / tick + 1; 1088e1d6dc65SNate Williams else 1089e1d6dc65SNate Williams delta_ticks = LONG_MAX; 1090e1d6dc65SNate Williams 1091e1d6dc65SNate Williams if (delta_ticks > INT_MAX) 1092e1d6dc65SNate Williams delta_ticks = INT_MAX; 1093e1d6dc65SNate Williams 1094e1d6dc65SNate Williams /* 1095e1d6dc65SNate Williams * Now rip through the timer calltodo list looking for timers 1096e1d6dc65SNate Williams * to expire. 1097e1d6dc65SNate Williams */ 1098e1d6dc65SNate Williams 1099e1d6dc65SNate Williams /* don't collide with softclock() */ 11008d809d50SJeff Roberson CC_LOCK(cc); 1101e1d6dc65SNate Williams for (p = calltodo.c_next; p != NULL; p = p->c_next) { 1102e1d6dc65SNate Williams p->c_time -= delta_ticks; 1103e1d6dc65SNate Williams 1104e1d6dc65SNate Williams /* Break if the timer had more time on it than delta_ticks */ 1105e1d6dc65SNate Williams if (p->c_time > 0) 1106e1d6dc65SNate Williams break; 1107e1d6dc65SNate Williams 1108e1d6dc65SNate Williams /* take back the ticks the timer didn't use (p->c_time <= 0) */ 1109e1d6dc65SNate Williams delta_ticks = -p->c_time; 1110e1d6dc65SNate Williams } 11118d809d50SJeff Roberson CC_UNLOCK(cc); 1112e1d6dc65SNate Williams 1113e1d6dc65SNate Williams return; 1114e1d6dc65SNate Williams } 1115e1d6dc65SNate Williams #endif /* APM_FIXUP_CALLTODO */ 1116