1df8bae1dSRodney W. Grimes /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1982, 1986, 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * (c) UNIX System Laboratories, Inc. 5df8bae1dSRodney W. Grimes * All or some portions of this file are derived from material licensed 6df8bae1dSRodney W. Grimes * to the University of California by American Telephone and Telegraph 7df8bae1dSRodney W. Grimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8df8bae1dSRodney W. Grimes * the permission of UNIX System Laboratories, Inc. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 19df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 20df8bae1dSRodney W. Grimes * without specific prior written permission. 21df8bae1dSRodney W. Grimes * 22df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32df8bae1dSRodney W. Grimes * SUCH DAMAGE. 33df8bae1dSRodney W. Grimes * 34acc8326dSGarrett Wollman * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35df8bae1dSRodney W. Grimes */ 36df8bae1dSRodney W. Grimes 37677b542eSDavid E. O'Brien #include <sys/cdefs.h> 38677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 39677b542eSDavid E. O'Brien 4091dd9aaeSRobert Watson #include "opt_kdtrace.h" 4191dd9aaeSRobert Watson 42df8bae1dSRodney W. Grimes #include <sys/param.h> 43df8bae1dSRodney W. Grimes #include <sys/systm.h> 448d809d50SJeff Roberson #include <sys/bus.h> 4515b7a470SPoul-Henning Kamp #include <sys/callout.h> 462c1bb207SColin Percival #include <sys/condvar.h> 478d809d50SJeff Roberson #include <sys/interrupt.h> 48df8bae1dSRodney W. Grimes #include <sys/kernel.h> 49ff7ec58aSRobert Watson #include <sys/ktr.h> 50f34fa851SJohn Baldwin #include <sys/lock.h> 518d809d50SJeff Roberson #include <sys/malloc.h> 52cb799bfeSJohn Baldwin #include <sys/mutex.h> 5321f9e816SJohn Baldwin #include <sys/proc.h> 5491dd9aaeSRobert Watson #include <sys/sdt.h> 556a0ce57dSAttilio Rao #include <sys/sleepqueue.h> 5622ee8c4fSPoul-Henning Kamp #include <sys/sysctl.h> 578d809d50SJeff Roberson #include <sys/smp.h> 58df8bae1dSRodney W. Grimes 59*1283e9cdSAttilio Rao #ifdef SMP 60*1283e9cdSAttilio Rao #include <machine/cpu.h> 61*1283e9cdSAttilio Rao #endif 62*1283e9cdSAttilio Rao 6391dd9aaeSRobert Watson SDT_PROVIDER_DEFINE(callout_execute); 6479856499SRui Paulo SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start); 6591dd9aaeSRobert Watson SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0, 6691dd9aaeSRobert Watson "struct callout *"); 6779856499SRui Paulo SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end); 6891dd9aaeSRobert Watson SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0, 6991dd9aaeSRobert Watson "struct callout *"); 7091dd9aaeSRobert Watson 7122ee8c4fSPoul-Henning Kamp static int avg_depth; 7222ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 7322ee8c4fSPoul-Henning Kamp "Average number of items examined per softclock call. Units = 1/1000"); 7422ee8c4fSPoul-Henning Kamp static int avg_gcalls; 7522ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 7622ee8c4fSPoul-Henning Kamp "Average number of Giant callouts made per softclock call. Units = 1/1000"); 7764b9ee20SAttilio Rao static int avg_lockcalls; 7864b9ee20SAttilio Rao SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 7964b9ee20SAttilio Rao "Average number of lock callouts made per softclock call. Units = 1/1000"); 8022ee8c4fSPoul-Henning Kamp static int avg_mpcalls; 8122ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 8222ee8c4fSPoul-Henning Kamp "Average number of MP callouts made per softclock call. Units = 1/1000"); 8315b7a470SPoul-Henning Kamp /* 8415b7a470SPoul-Henning Kamp * TODO: 8515b7a470SPoul-Henning Kamp * allocate more timeout table slots when table overflows. 8615b7a470SPoul-Henning Kamp */ 87ab36c067SJustin T. Gibbs int callwheelsize, callwheelbits, callwheelmask; 88f23b4c91SGarrett Wollman 8920c510f8SLuigi Rizzo /* 90*1283e9cdSAttilio Rao * The callout cpu migration entity represents informations necessary for 91*1283e9cdSAttilio Rao * describing the migrating callout to the new callout cpu. 92*1283e9cdSAttilio Rao * The cached informations are very important for deferring migration when 93*1283e9cdSAttilio Rao * the migrating callout is already running. 94*1283e9cdSAttilio Rao */ 95*1283e9cdSAttilio Rao struct cc_mig_ent { 96*1283e9cdSAttilio Rao #ifdef SMP 97*1283e9cdSAttilio Rao void (*ce_migration_func)(void *); 98*1283e9cdSAttilio Rao void *ce_migration_arg; 99*1283e9cdSAttilio Rao int ce_migration_cpu; 100*1283e9cdSAttilio Rao int ce_migration_ticks; 101*1283e9cdSAttilio Rao #endif 102*1283e9cdSAttilio Rao }; 103*1283e9cdSAttilio Rao 104*1283e9cdSAttilio Rao /* 10520c510f8SLuigi Rizzo * There is one struct callout_cpu per cpu, holding all relevant 10620c510f8SLuigi Rizzo * state for the callout processing thread on the individual CPU. 10720c510f8SLuigi Rizzo * In particular: 10820c510f8SLuigi Rizzo * cc_ticks is incremented once per tick in callout_cpu(). 10920c510f8SLuigi Rizzo * It tracks the global 'ticks' but in a way that the individual 11020c510f8SLuigi Rizzo * threads should not worry about races in the order in which 11120c510f8SLuigi Rizzo * hardclock() and hardclock_cpu() run on the various CPUs. 11220c510f8SLuigi Rizzo * cc_softclock is advanced in callout_cpu() to point to the 11320c510f8SLuigi Rizzo * first entry in cc_callwheel that may need handling. In turn, 11420c510f8SLuigi Rizzo * a softclock() is scheduled so it can serve the various entries i 11520c510f8SLuigi Rizzo * such that cc_softclock <= i <= cc_ticks . 11620c510f8SLuigi Rizzo * XXX maybe cc_softclock and cc_ticks should be volatile ? 11720c510f8SLuigi Rizzo * 11820c510f8SLuigi Rizzo * cc_ticks is also used in callout_reset_cpu() to determine 11920c510f8SLuigi Rizzo * when the callout should be served. 12020c510f8SLuigi Rizzo */ 1218d809d50SJeff Roberson struct callout_cpu { 122*1283e9cdSAttilio Rao struct cc_mig_ent cc_migrating_entity; 1238d809d50SJeff Roberson struct mtx cc_lock; 1248d809d50SJeff Roberson struct callout *cc_callout; 1258d809d50SJeff Roberson struct callout_tailq *cc_callwheel; 1268d809d50SJeff Roberson struct callout_list cc_callfree; 1278d809d50SJeff Roberson struct callout *cc_next; 1288d809d50SJeff Roberson struct callout *cc_curr; 1298d809d50SJeff Roberson void *cc_cookie; 13020c510f8SLuigi Rizzo int cc_ticks; 1318d809d50SJeff Roberson int cc_softticks; 1328d809d50SJeff Roberson int cc_cancel; 1338d809d50SJeff Roberson int cc_waiting; 134a157e425SAlexander Motin int cc_firsttick; 1358d809d50SJeff Roberson }; 1368d809d50SJeff Roberson 1378d809d50SJeff Roberson #ifdef SMP 138*1283e9cdSAttilio Rao #define cc_migration_func cc_migrating_entity.ce_migration_func 139*1283e9cdSAttilio Rao #define cc_migration_arg cc_migrating_entity.ce_migration_arg 140*1283e9cdSAttilio Rao #define cc_migration_cpu cc_migrating_entity.ce_migration_cpu 141*1283e9cdSAttilio Rao #define cc_migration_ticks cc_migrating_entity.ce_migration_ticks 142*1283e9cdSAttilio Rao 1438d809d50SJeff Roberson struct callout_cpu cc_cpu[MAXCPU]; 144*1283e9cdSAttilio Rao #define CPUBLOCK MAXCPU 1458d809d50SJeff Roberson #define CC_CPU(cpu) (&cc_cpu[(cpu)]) 1468d809d50SJeff Roberson #define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 1478d809d50SJeff Roberson #else 1488d809d50SJeff Roberson struct callout_cpu cc_cpu; 1498d809d50SJeff Roberson #define CC_CPU(cpu) &cc_cpu 1508d809d50SJeff Roberson #define CC_SELF() &cc_cpu 1518d809d50SJeff Roberson #endif 1528d809d50SJeff Roberson #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 1538d809d50SJeff Roberson #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 154*1283e9cdSAttilio Rao #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) 1558d809d50SJeff Roberson 1568d809d50SJeff Roberson static int timeout_cpu; 157a157e425SAlexander Motin void (*callout_new_inserted)(int cpu, int ticks) = NULL; 1588d809d50SJeff Roberson 1598d809d50SJeff Roberson MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 16049a74476SColin Percival 161e9dec2c4SColin Percival /** 1628d809d50SJeff Roberson * Locked by cc_lock: 1638d809d50SJeff Roberson * cc_curr - If a callout is in progress, it is curr_callout. 164b36f4588SJohn Baldwin * If curr_callout is non-NULL, threads waiting in 165b36f4588SJohn Baldwin * callout_drain() will be woken up as soon as the 1662c1bb207SColin Percival * relevant callout completes. 1678d809d50SJeff Roberson * cc_cancel - Changing to 1 with both callout_lock and c_lock held 16898c926b2SIan Dowse * guarantees that the current callout will not run. 16998c926b2SIan Dowse * The softclock() function sets this to 0 before it 17064b9ee20SAttilio Rao * drops callout_lock to acquire c_lock, and it calls 171b36f4588SJohn Baldwin * the handler only if curr_cancelled is still 0 after 17264b9ee20SAttilio Rao * c_lock is successfully acquired. 1738d809d50SJeff Roberson * cc_waiting - If a thread is waiting in callout_drain(), then 174b36f4588SJohn Baldwin * callout_wait is nonzero. Set only when 1752c1bb207SColin Percival * curr_callout is non-NULL. 1762c1bb207SColin Percival */ 177df8bae1dSRodney W. Grimes 178df8bae1dSRodney W. Grimes /* 179*1283e9cdSAttilio Rao * Resets the migration entity tied to a specific callout cpu. 180*1283e9cdSAttilio Rao */ 181*1283e9cdSAttilio Rao static void 182*1283e9cdSAttilio Rao cc_cme_cleanup(struct callout_cpu *cc) 183*1283e9cdSAttilio Rao { 184*1283e9cdSAttilio Rao 185*1283e9cdSAttilio Rao #ifdef SMP 186*1283e9cdSAttilio Rao cc->cc_migration_cpu = CPUBLOCK; 187*1283e9cdSAttilio Rao cc->cc_migration_ticks = 0; 188*1283e9cdSAttilio Rao cc->cc_migration_func = NULL; 189*1283e9cdSAttilio Rao cc->cc_migration_arg = NULL; 190*1283e9cdSAttilio Rao #endif 191*1283e9cdSAttilio Rao } 192*1283e9cdSAttilio Rao 193*1283e9cdSAttilio Rao /* 194*1283e9cdSAttilio Rao * Checks if migration is requested by a specific callout cpu. 195*1283e9cdSAttilio Rao */ 196*1283e9cdSAttilio Rao static int 197*1283e9cdSAttilio Rao cc_cme_migrating(struct callout_cpu *cc) 198*1283e9cdSAttilio Rao { 199*1283e9cdSAttilio Rao 200*1283e9cdSAttilio Rao #ifdef SMP 201*1283e9cdSAttilio Rao return (cc->cc_migration_cpu != CPUBLOCK); 202*1283e9cdSAttilio Rao #else 203*1283e9cdSAttilio Rao return (0); 204*1283e9cdSAttilio Rao #endif 205*1283e9cdSAttilio Rao } 206*1283e9cdSAttilio Rao 207*1283e9cdSAttilio Rao /* 208219d632cSMatthew Dillon * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization 209219d632cSMatthew Dillon * 210219d632cSMatthew Dillon * This code is called very early in the kernel initialization sequence, 211219d632cSMatthew Dillon * and may be called more then once. 212219d632cSMatthew Dillon */ 213219d632cSMatthew Dillon caddr_t 214219d632cSMatthew Dillon kern_timeout_callwheel_alloc(caddr_t v) 215219d632cSMatthew Dillon { 2168d809d50SJeff Roberson struct callout_cpu *cc; 2178d809d50SJeff Roberson 2188d809d50SJeff Roberson timeout_cpu = PCPU_GET(cpuid); 2198d809d50SJeff Roberson cc = CC_CPU(timeout_cpu); 220219d632cSMatthew Dillon /* 221219d632cSMatthew Dillon * Calculate callout wheel size 222219d632cSMatthew Dillon */ 223219d632cSMatthew Dillon for (callwheelsize = 1, callwheelbits = 0; 224219d632cSMatthew Dillon callwheelsize < ncallout; 225219d632cSMatthew Dillon callwheelsize <<= 1, ++callwheelbits) 226219d632cSMatthew Dillon ; 227219d632cSMatthew Dillon callwheelmask = callwheelsize - 1; 228219d632cSMatthew Dillon 2298d809d50SJeff Roberson cc->cc_callout = (struct callout *)v; 2308d809d50SJeff Roberson v = (caddr_t)(cc->cc_callout + ncallout); 2318d809d50SJeff Roberson cc->cc_callwheel = (struct callout_tailq *)v; 2328d809d50SJeff Roberson v = (caddr_t)(cc->cc_callwheel + callwheelsize); 233219d632cSMatthew Dillon return(v); 234219d632cSMatthew Dillon } 235219d632cSMatthew Dillon 2368d809d50SJeff Roberson static void 2378d809d50SJeff Roberson callout_cpu_init(struct callout_cpu *cc) 2388d809d50SJeff Roberson { 2398d809d50SJeff Roberson struct callout *c; 2408d809d50SJeff Roberson int i; 2418d809d50SJeff Roberson 2428d809d50SJeff Roberson mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 2438d809d50SJeff Roberson SLIST_INIT(&cc->cc_callfree); 2448d809d50SJeff Roberson for (i = 0; i < callwheelsize; i++) { 2458d809d50SJeff Roberson TAILQ_INIT(&cc->cc_callwheel[i]); 2468d809d50SJeff Roberson } 247*1283e9cdSAttilio Rao cc_cme_cleanup(cc); 2488d809d50SJeff Roberson if (cc->cc_callout == NULL) 2498d809d50SJeff Roberson return; 2508d809d50SJeff Roberson for (i = 0; i < ncallout; i++) { 2518d809d50SJeff Roberson c = &cc->cc_callout[i]; 2528d809d50SJeff Roberson callout_init(c, 0); 2538d809d50SJeff Roberson c->c_flags = CALLOUT_LOCAL_ALLOC; 2548d809d50SJeff Roberson SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 2558d809d50SJeff Roberson } 2568d809d50SJeff Roberson } 2578d809d50SJeff Roberson 258*1283e9cdSAttilio Rao #ifdef SMP 259*1283e9cdSAttilio Rao /* 260*1283e9cdSAttilio Rao * Switches the cpu tied to a specific callout. 261*1283e9cdSAttilio Rao * The function expects a locked incoming callout cpu and returns with 262*1283e9cdSAttilio Rao * locked outcoming callout cpu. 263*1283e9cdSAttilio Rao */ 264*1283e9cdSAttilio Rao static struct callout_cpu * 265*1283e9cdSAttilio Rao callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) 266*1283e9cdSAttilio Rao { 267*1283e9cdSAttilio Rao struct callout_cpu *new_cc; 268*1283e9cdSAttilio Rao 269*1283e9cdSAttilio Rao MPASS(c != NULL && cc != NULL); 270*1283e9cdSAttilio Rao CC_LOCK_ASSERT(cc); 271*1283e9cdSAttilio Rao 272*1283e9cdSAttilio Rao c->c_cpu = CPUBLOCK; 273*1283e9cdSAttilio Rao CC_UNLOCK(cc); 274*1283e9cdSAttilio Rao new_cc = CC_CPU(new_cpu); 275*1283e9cdSAttilio Rao CC_LOCK(new_cc); 276*1283e9cdSAttilio Rao c->c_cpu = new_cpu; 277*1283e9cdSAttilio Rao return (new_cc); 278*1283e9cdSAttilio Rao } 279*1283e9cdSAttilio Rao #endif 280*1283e9cdSAttilio Rao 281219d632cSMatthew Dillon /* 282219d632cSMatthew Dillon * kern_timeout_callwheel_init() - initialize previously reserved callwheel 283219d632cSMatthew Dillon * space. 284219d632cSMatthew Dillon * 285219d632cSMatthew Dillon * This code is called just once, after the space reserved for the 286219d632cSMatthew Dillon * callout wheel has been finalized. 287219d632cSMatthew Dillon */ 288219d632cSMatthew Dillon void 289219d632cSMatthew Dillon kern_timeout_callwheel_init(void) 290219d632cSMatthew Dillon { 2918d809d50SJeff Roberson callout_cpu_init(CC_CPU(timeout_cpu)); 2928d809d50SJeff Roberson } 293219d632cSMatthew Dillon 2948d809d50SJeff Roberson /* 2958d809d50SJeff Roberson * Start standard softclock thread. 2968d809d50SJeff Roberson */ 2978d809d50SJeff Roberson static void 2988d809d50SJeff Roberson start_softclock(void *dummy) 2998d809d50SJeff Roberson { 3008d809d50SJeff Roberson struct callout_cpu *cc; 3018d809d50SJeff Roberson #ifdef SMP 3028d809d50SJeff Roberson int cpu; 3038d809d50SJeff Roberson #endif 3048d809d50SJeff Roberson 3058d809d50SJeff Roberson cc = CC_CPU(timeout_cpu); 3068d809d50SJeff Roberson if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK, 3073350df48SJohn Baldwin INTR_MPSAFE, &cc->cc_cookie)) 3088d809d50SJeff Roberson panic("died while creating standard software ithreads"); 3098d809d50SJeff Roberson #ifdef SMP 3103aa6d94eSJohn Baldwin CPU_FOREACH(cpu) { 3118d809d50SJeff Roberson if (cpu == timeout_cpu) 3128d809d50SJeff Roberson continue; 3138d809d50SJeff Roberson cc = CC_CPU(cpu); 3148d809d50SJeff Roberson if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, 3158d809d50SJeff Roberson INTR_MPSAFE, &cc->cc_cookie)) 3168d809d50SJeff Roberson panic("died while creating standard software ithreads"); 3178d809d50SJeff Roberson cc->cc_callout = NULL; /* Only cpu0 handles timeout(). */ 3188d809d50SJeff Roberson cc->cc_callwheel = malloc( 3198d809d50SJeff Roberson sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT, 3208d809d50SJeff Roberson M_WAITOK); 3218d809d50SJeff Roberson callout_cpu_init(cc); 322219d632cSMatthew Dillon } 3238d809d50SJeff Roberson #endif 324219d632cSMatthew Dillon } 3258d809d50SJeff Roberson 3268d809d50SJeff Roberson SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 3278d809d50SJeff Roberson 3288d809d50SJeff Roberson void 3298d809d50SJeff Roberson callout_tick(void) 3308d809d50SJeff Roberson { 3318d809d50SJeff Roberson struct callout_cpu *cc; 3329fc51b0bSJeff Roberson int need_softclock; 3339fc51b0bSJeff Roberson int bucket; 3348d809d50SJeff Roberson 3358d809d50SJeff Roberson /* 3368d809d50SJeff Roberson * Process callouts at a very low cpu priority, so we don't keep the 3378d809d50SJeff Roberson * relatively high clock interrupt priority any longer than necessary. 3388d809d50SJeff Roberson */ 3399fc51b0bSJeff Roberson need_softclock = 0; 3408d809d50SJeff Roberson cc = CC_SELF(); 3418d809d50SJeff Roberson mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 342a157e425SAlexander Motin cc->cc_firsttick = cc->cc_ticks = ticks; 34320c510f8SLuigi Rizzo for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) { 3449fc51b0bSJeff Roberson bucket = cc->cc_softticks & callwheelmask; 3459fc51b0bSJeff Roberson if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) { 3468d809d50SJeff Roberson need_softclock = 1; 3479fc51b0bSJeff Roberson break; 3489fc51b0bSJeff Roberson } 3499fc51b0bSJeff Roberson } 3508d809d50SJeff Roberson mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 3518d809d50SJeff Roberson /* 3528d809d50SJeff Roberson * swi_sched acquires the thread lock, so we don't want to call it 3538d809d50SJeff Roberson * with cc_lock held; incorrect locking order. 3548d809d50SJeff Roberson */ 3558d809d50SJeff Roberson if (need_softclock) 3568d809d50SJeff Roberson swi_sched(cc->cc_cookie, 0); 3578d809d50SJeff Roberson } 3588d809d50SJeff Roberson 359a157e425SAlexander Motin int 3600e189873SAlexander Motin callout_tickstofirst(int limit) 361a157e425SAlexander Motin { 362a157e425SAlexander Motin struct callout_cpu *cc; 363a157e425SAlexander Motin struct callout *c; 364a157e425SAlexander Motin struct callout_tailq *sc; 365a157e425SAlexander Motin int curticks; 366a157e425SAlexander Motin int skip = 1; 367a157e425SAlexander Motin 368a157e425SAlexander Motin cc = CC_SELF(); 369a157e425SAlexander Motin mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 370a157e425SAlexander Motin curticks = cc->cc_ticks; 3710e189873SAlexander Motin while( skip < ncallout && skip < limit ) { 372a157e425SAlexander Motin sc = &cc->cc_callwheel[ (curticks+skip) & callwheelmask ]; 373a157e425SAlexander Motin /* search scanning ticks */ 374a157e425SAlexander Motin TAILQ_FOREACH( c, sc, c_links.tqe ){ 375189795feSAlexander Motin if (c->c_time - curticks <= ncallout) 376a157e425SAlexander Motin goto out; 377a157e425SAlexander Motin } 378a157e425SAlexander Motin skip++; 379a157e425SAlexander Motin } 380a157e425SAlexander Motin out: 381a157e425SAlexander Motin cc->cc_firsttick = curticks + skip; 382a157e425SAlexander Motin mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 383a157e425SAlexander Motin return (skip); 384a157e425SAlexander Motin } 385a157e425SAlexander Motin 3868d809d50SJeff Roberson static struct callout_cpu * 3878d809d50SJeff Roberson callout_lock(struct callout *c) 3888d809d50SJeff Roberson { 3898d809d50SJeff Roberson struct callout_cpu *cc; 3908d809d50SJeff Roberson int cpu; 3918d809d50SJeff Roberson 3928d809d50SJeff Roberson for (;;) { 3938d809d50SJeff Roberson cpu = c->c_cpu; 394*1283e9cdSAttilio Rao #ifdef SMP 395*1283e9cdSAttilio Rao if (cpu == CPUBLOCK) { 396*1283e9cdSAttilio Rao while (c->c_cpu == CPUBLOCK) 397*1283e9cdSAttilio Rao cpu_spinwait(); 398*1283e9cdSAttilio Rao continue; 399*1283e9cdSAttilio Rao } 400*1283e9cdSAttilio Rao #endif 4018d809d50SJeff Roberson cc = CC_CPU(cpu); 4028d809d50SJeff Roberson CC_LOCK(cc); 4038d809d50SJeff Roberson if (cpu == c->c_cpu) 4048d809d50SJeff Roberson break; 4058d809d50SJeff Roberson CC_UNLOCK(cc); 4068d809d50SJeff Roberson } 4078d809d50SJeff Roberson return (cc); 408219d632cSMatthew Dillon } 409219d632cSMatthew Dillon 410*1283e9cdSAttilio Rao static void 411*1283e9cdSAttilio Rao callout_cc_add(struct callout *c, struct callout_cpu *cc, int to_ticks, 412*1283e9cdSAttilio Rao void (*func)(void *), void *arg, int cpu) 413*1283e9cdSAttilio Rao { 414*1283e9cdSAttilio Rao 415*1283e9cdSAttilio Rao CC_LOCK_ASSERT(cc); 416*1283e9cdSAttilio Rao 417*1283e9cdSAttilio Rao if (to_ticks <= 0) 418*1283e9cdSAttilio Rao to_ticks = 1; 419*1283e9cdSAttilio Rao c->c_arg = arg; 420*1283e9cdSAttilio Rao c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 421*1283e9cdSAttilio Rao c->c_func = func; 422*1283e9cdSAttilio Rao c->c_time = ticks + to_ticks; 423*1283e9cdSAttilio Rao TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask], 424*1283e9cdSAttilio Rao c, c_links.tqe); 425*1283e9cdSAttilio Rao if ((c->c_time - cc->cc_firsttick) < 0 && 426*1283e9cdSAttilio Rao callout_new_inserted != NULL) { 427*1283e9cdSAttilio Rao cc->cc_firsttick = c->c_time; 428*1283e9cdSAttilio Rao (*callout_new_inserted)(cpu, 429*1283e9cdSAttilio Rao to_ticks + (ticks - cc->cc_ticks)); 430*1283e9cdSAttilio Rao } 431*1283e9cdSAttilio Rao } 432*1283e9cdSAttilio Rao 433219d632cSMatthew Dillon /* 434ab36c067SJustin T. Gibbs * The callout mechanism is based on the work of Adam M. Costello and 435ab36c067SJustin T. Gibbs * George Varghese, published in a technical report entitled "Redesigning 436ab36c067SJustin T. Gibbs * the BSD Callout and Timer Facilities" and modified slightly for inclusion 437ab36c067SJustin T. Gibbs * in FreeBSD by Justin T. Gibbs. The original work on the data structures 438024035e8SHiten Pandya * used in this implementation was published by G. Varghese and T. Lauck in 439ab36c067SJustin T. Gibbs * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 440ab36c067SJustin T. Gibbs * the Efficient Implementation of a Timer Facility" in the Proceedings of 441ab36c067SJustin T. Gibbs * the 11th ACM Annual Symposium on Operating Systems Principles, 442ab36c067SJustin T. Gibbs * Austin, Texas Nov 1987. 443ab36c067SJustin T. Gibbs */ 444a50ec505SPoul-Henning Kamp 445ab36c067SJustin T. Gibbs /* 446df8bae1dSRodney W. Grimes * Software (low priority) clock interrupt. 447df8bae1dSRodney W. Grimes * Run periodic events from timeout queue. 448df8bae1dSRodney W. Grimes */ 449df8bae1dSRodney W. Grimes void 4508d809d50SJeff Roberson softclock(void *arg) 451df8bae1dSRodney W. Grimes { 4528d809d50SJeff Roberson struct callout_cpu *cc; 453b336df68SPoul-Henning Kamp struct callout *c; 454b336df68SPoul-Henning Kamp struct callout_tailq *bucket; 455b336df68SPoul-Henning Kamp int curticks; 456b336df68SPoul-Henning Kamp int steps; /* #steps since we last allowed interrupts */ 45722ee8c4fSPoul-Henning Kamp int depth; 45822ee8c4fSPoul-Henning Kamp int mpcalls; 45964b9ee20SAttilio Rao int lockcalls; 46022ee8c4fSPoul-Henning Kamp int gcalls; 46148b0f4b6SKirk McKusick #ifdef DIAGNOSTIC 46248b0f4b6SKirk McKusick struct bintime bt1, bt2; 46348b0f4b6SKirk McKusick struct timespec ts2; 46448b0f4b6SKirk McKusick static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ 465377e7be4SPoul-Henning Kamp static timeout_t *lastfunc; 46648b0f4b6SKirk McKusick #endif 467df8bae1dSRodney W. Grimes 46815b7a470SPoul-Henning Kamp #ifndef MAX_SOFTCLOCK_STEPS 46915b7a470SPoul-Henning Kamp #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ 47015b7a470SPoul-Henning Kamp #endif /* MAX_SOFTCLOCK_STEPS */ 471ab36c067SJustin T. Gibbs 47222ee8c4fSPoul-Henning Kamp mpcalls = 0; 47364b9ee20SAttilio Rao lockcalls = 0; 47422ee8c4fSPoul-Henning Kamp gcalls = 0; 47522ee8c4fSPoul-Henning Kamp depth = 0; 476ab36c067SJustin T. Gibbs steps = 0; 4778d809d50SJeff Roberson cc = (struct callout_cpu *)arg; 4788d809d50SJeff Roberson CC_LOCK(cc); 47920c510f8SLuigi Rizzo while (cc->cc_softticks - 1 != cc->cc_ticks) { 48045327611SJustin T. Gibbs /* 4818d809d50SJeff Roberson * cc_softticks may be modified by hard clock, so cache 48245327611SJustin T. Gibbs * it while we work on a given bucket. 48345327611SJustin T. Gibbs */ 4848d809d50SJeff Roberson curticks = cc->cc_softticks; 4859fc51b0bSJeff Roberson cc->cc_softticks++; 4868d809d50SJeff Roberson bucket = &cc->cc_callwheel[curticks & callwheelmask]; 48745327611SJustin T. Gibbs c = TAILQ_FIRST(bucket); 488ab36c067SJustin T. Gibbs while (c) { 48922ee8c4fSPoul-Henning Kamp depth++; 49045327611SJustin T. Gibbs if (c->c_time != curticks) { 491ab36c067SJustin T. Gibbs c = TAILQ_NEXT(c, c_links.tqe); 492ab36c067SJustin T. Gibbs ++steps; 493ab36c067SJustin T. Gibbs if (steps >= MAX_SOFTCLOCK_STEPS) { 4948d809d50SJeff Roberson cc->cc_next = c; 49545327611SJustin T. Gibbs /* Give interrupts a chance. */ 4968d809d50SJeff Roberson CC_UNLOCK(cc); 497ab32297dSJohn Baldwin ; /* nothing */ 4988d809d50SJeff Roberson CC_LOCK(cc); 4998d809d50SJeff Roberson c = cc->cc_next; 500ab36c067SJustin T. Gibbs steps = 0; 501df8bae1dSRodney W. Grimes } 502ab36c067SJustin T. Gibbs } else { 503ab36c067SJustin T. Gibbs void (*c_func)(void *); 50408e4ac8aSAttilio Rao void *c_arg; 50564b9ee20SAttilio Rao struct lock_class *class; 506557f5e51SAttilio Rao struct lock_object *c_lock; 50764b9ee20SAttilio Rao int c_flags, sharedlock; 508ab36c067SJustin T. Gibbs 5098d809d50SJeff Roberson cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 51045327611SJustin T. Gibbs TAILQ_REMOVE(bucket, c, c_links.tqe); 51164b9ee20SAttilio Rao class = (c->c_lock != NULL) ? 51264b9ee20SAttilio Rao LOCK_CLASS(c->c_lock) : NULL; 51364b9ee20SAttilio Rao sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 51464b9ee20SAttilio Rao 0 : 1; 515557f5e51SAttilio Rao c_lock = c->c_lock; 516ab36c067SJustin T. Gibbs c_func = c->c_func; 517ab36c067SJustin T. Gibbs c_arg = c->c_arg; 518fa2fbc3dSJake Burkholder c_flags = c->c_flags; 519acc8326dSGarrett Wollman if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 520acc8326dSGarrett Wollman c->c_flags = CALLOUT_LOCAL_ALLOC; 521acc8326dSGarrett Wollman } else { 522acc8326dSGarrett Wollman c->c_flags = 5239b8b58e0SJonathan Lemon (c->c_flags & ~CALLOUT_PENDING); 52457c037beSIan Dowse } 5258d809d50SJeff Roberson cc->cc_curr = c; 5268d809d50SJeff Roberson cc->cc_cancel = 0; 5278d809d50SJeff Roberson CC_UNLOCK(cc); 528557f5e51SAttilio Rao if (c_lock != NULL) { 529557f5e51SAttilio Rao class->lc_lock(c_lock, sharedlock); 53098c926b2SIan Dowse /* 53198c926b2SIan Dowse * The callout may have been cancelled 53298c926b2SIan Dowse * while we switched locks. 53398c926b2SIan Dowse */ 5348d809d50SJeff Roberson if (cc->cc_cancel) { 535557f5e51SAttilio Rao class->lc_unlock(c_lock); 536b36f4588SJohn Baldwin goto skip; 53798c926b2SIan Dowse } 53898c926b2SIan Dowse /* The callout cannot be stopped now. */ 5398d809d50SJeff Roberson cc->cc_cancel = 1; 54098c926b2SIan Dowse 541557f5e51SAttilio Rao if (c_lock == &Giant.lock_object) { 54222ee8c4fSPoul-Henning Kamp gcalls++; 54368a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, 54468a57ebfSGleb Smirnoff "callout %p func %p arg %p", 54568a57ebfSGleb Smirnoff c, c_func, c_arg); 54698c926b2SIan Dowse } else { 54764b9ee20SAttilio Rao lockcalls++; 54864b9ee20SAttilio Rao CTR3(KTR_CALLOUT, "callout lock" 54968a57ebfSGleb Smirnoff " %p func %p arg %p", 55068a57ebfSGleb Smirnoff c, c_func, c_arg); 55198c926b2SIan Dowse } 55222ee8c4fSPoul-Henning Kamp } else { 55322ee8c4fSPoul-Henning Kamp mpcalls++; 55468a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, 55568a57ebfSGleb Smirnoff "callout mpsafe %p func %p arg %p", 55668a57ebfSGleb Smirnoff c, c_func, c_arg); 55722ee8c4fSPoul-Henning Kamp } 55848b0f4b6SKirk McKusick #ifdef DIAGNOSTIC 55948b0f4b6SKirk McKusick binuptime(&bt1); 56048b0f4b6SKirk McKusick #endif 56153c0e1ffSJohn Baldwin THREAD_NO_SLEEPING(); 56291dd9aaeSRobert Watson SDT_PROBE(callout_execute, kernel, , 56391dd9aaeSRobert Watson callout_start, c, 0, 0, 0, 0); 564ab36c067SJustin T. Gibbs c_func(c_arg); 56591dd9aaeSRobert Watson SDT_PROBE(callout_execute, kernel, , 56691dd9aaeSRobert Watson callout_end, c, 0, 0, 0, 0); 56753c0e1ffSJohn Baldwin THREAD_SLEEPING_OK(); 56848b0f4b6SKirk McKusick #ifdef DIAGNOSTIC 56948b0f4b6SKirk McKusick binuptime(&bt2); 57048b0f4b6SKirk McKusick bintime_sub(&bt2, &bt1); 57148b0f4b6SKirk McKusick if (bt2.frac > maxdt) { 572377e7be4SPoul-Henning Kamp if (lastfunc != c_func || 573377e7be4SPoul-Henning Kamp bt2.frac > maxdt * 2) { 57448b0f4b6SKirk McKusick bintime2timespec(&bt2, &ts2); 57548b0f4b6SKirk McKusick printf( 576377e7be4SPoul-Henning Kamp "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 57748b0f4b6SKirk McKusick c_func, c_arg, 578377e7be4SPoul-Henning Kamp (intmax_t)ts2.tv_sec, 579377e7be4SPoul-Henning Kamp ts2.tv_nsec); 580377e7be4SPoul-Henning Kamp } 581377e7be4SPoul-Henning Kamp maxdt = bt2.frac; 582377e7be4SPoul-Henning Kamp lastfunc = c_func; 58348b0f4b6SKirk McKusick } 58448b0f4b6SKirk McKusick #endif 585b7f1c1d2SJohn Baldwin CTR1(KTR_CALLOUT, "callout %p finished", c); 58698c926b2SIan Dowse if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 587557f5e51SAttilio Rao class->lc_unlock(c_lock); 588b36f4588SJohn Baldwin skip: 5898d809d50SJeff Roberson CC_LOCK(cc); 590435cdf88SAlfred Perlstein /* 591435cdf88SAlfred Perlstein * If the current callout is locally 592435cdf88SAlfred Perlstein * allocated (from timeout(9)) 593435cdf88SAlfred Perlstein * then put it on the freelist. 594435cdf88SAlfred Perlstein * 595435cdf88SAlfred Perlstein * Note: we need to check the cached 596435cdf88SAlfred Perlstein * copy of c_flags because if it was not 597435cdf88SAlfred Perlstein * local, then it's not safe to deref the 598435cdf88SAlfred Perlstein * callout pointer. 599435cdf88SAlfred Perlstein */ 600435cdf88SAlfred Perlstein if (c_flags & CALLOUT_LOCAL_ALLOC) { 601435cdf88SAlfred Perlstein KASSERT(c->c_flags == 602435cdf88SAlfred Perlstein CALLOUT_LOCAL_ALLOC, 603435cdf88SAlfred Perlstein ("corrupted callout")); 604435cdf88SAlfred Perlstein c->c_func = NULL; 6058d809d50SJeff Roberson SLIST_INSERT_HEAD(&cc->cc_callfree, c, 606435cdf88SAlfred Perlstein c_links.sle); 607435cdf88SAlfred Perlstein } 6088d809d50SJeff Roberson cc->cc_curr = NULL; 60908e4ac8aSAttilio Rao if (cc->cc_waiting) { 610*1283e9cdSAttilio Rao 6112c1bb207SColin Percival /* 612*1283e9cdSAttilio Rao * There is someone waiting for the 613*1283e9cdSAttilio Rao * callout to complete. 614*1283e9cdSAttilio Rao * If the callout was scheduled for 615*1283e9cdSAttilio Rao * migration just cancel it. 6162c1bb207SColin Percival */ 617*1283e9cdSAttilio Rao if (cc_cme_migrating(cc)) 618*1283e9cdSAttilio Rao cc_cme_cleanup(cc); 6198d809d50SJeff Roberson cc->cc_waiting = 0; 6208d809d50SJeff Roberson CC_UNLOCK(cc); 6218d809d50SJeff Roberson wakeup(&cc->cc_waiting); 6228d809d50SJeff Roberson CC_LOCK(cc); 623*1283e9cdSAttilio Rao } else if (cc_cme_migrating(cc)) { 624*1283e9cdSAttilio Rao #ifdef SMP 625*1283e9cdSAttilio Rao struct callout_cpu *new_cc; 626*1283e9cdSAttilio Rao void (*new_func)(void *); 627*1283e9cdSAttilio Rao void *new_arg; 628*1283e9cdSAttilio Rao int new_cpu, new_ticks; 629*1283e9cdSAttilio Rao 630*1283e9cdSAttilio Rao /* 631*1283e9cdSAttilio Rao * If the callout was scheduled for 632*1283e9cdSAttilio Rao * migration just perform it now. 633*1283e9cdSAttilio Rao */ 634*1283e9cdSAttilio Rao new_cpu = cc->cc_migration_cpu; 635*1283e9cdSAttilio Rao new_ticks = cc->cc_migration_ticks; 636*1283e9cdSAttilio Rao new_func = cc->cc_migration_func; 637*1283e9cdSAttilio Rao new_arg = cc->cc_migration_arg; 638*1283e9cdSAttilio Rao cc_cme_cleanup(cc); 639*1283e9cdSAttilio Rao 640*1283e9cdSAttilio Rao /* 641*1283e9cdSAttilio Rao * It should be assert here that the 642*1283e9cdSAttilio Rao * callout is not destroyed but that 643*1283e9cdSAttilio Rao * is not easy. 644*1283e9cdSAttilio Rao */ 645*1283e9cdSAttilio Rao new_cc = callout_cpu_switch(c, cc, 646*1283e9cdSAttilio Rao new_cpu); 647*1283e9cdSAttilio Rao callout_cc_add(c, new_cc, new_ticks, 648*1283e9cdSAttilio Rao new_func, new_arg, new_cpu); 649*1283e9cdSAttilio Rao CC_UNLOCK(new_cc); 650*1283e9cdSAttilio Rao CC_LOCK(cc); 651*1283e9cdSAttilio Rao #else 652*1283e9cdSAttilio Rao panic("migration should not happen"); 653*1283e9cdSAttilio Rao #endif 65449a74476SColin Percival } 655ab36c067SJustin T. Gibbs steps = 0; 6568d809d50SJeff Roberson c = cc->cc_next; 657ab36c067SJustin T. Gibbs } 658ab36c067SJustin T. Gibbs } 659ab36c067SJustin T. Gibbs } 66022ee8c4fSPoul-Henning Kamp avg_depth += (depth * 1000 - avg_depth) >> 8; 66122ee8c4fSPoul-Henning Kamp avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 66264b9ee20SAttilio Rao avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 66322ee8c4fSPoul-Henning Kamp avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 6648d809d50SJeff Roberson cc->cc_next = NULL; 6658d809d50SJeff Roberson CC_UNLOCK(cc); 666df8bae1dSRodney W. Grimes } 667df8bae1dSRodney W. Grimes 668df8bae1dSRodney W. Grimes /* 669df8bae1dSRodney W. Grimes * timeout -- 670df8bae1dSRodney W. Grimes * Execute a function after a specified length of time. 671df8bae1dSRodney W. Grimes * 672df8bae1dSRodney W. Grimes * untimeout -- 673df8bae1dSRodney W. Grimes * Cancel previous timeout function call. 674df8bae1dSRodney W. Grimes * 675ab36c067SJustin T. Gibbs * callout_handle_init -- 676ab36c067SJustin T. Gibbs * Initialize a handle so that using it with untimeout is benign. 677ab36c067SJustin T. Gibbs * 678df8bae1dSRodney W. Grimes * See AT&T BCI Driver Reference Manual for specification. This 679ab36c067SJustin T. Gibbs * implementation differs from that one in that although an 680ab36c067SJustin T. Gibbs * identification value is returned from timeout, the original 681ab36c067SJustin T. Gibbs * arguments to timeout as well as the identifier are used to 682ab36c067SJustin T. Gibbs * identify entries for untimeout. 683df8bae1dSRodney W. Grimes */ 684ab36c067SJustin T. Gibbs struct callout_handle 685ab36c067SJustin T. Gibbs timeout(ftn, arg, to_ticks) 6868f03c6f1SBruce Evans timeout_t *ftn; 687df8bae1dSRodney W. Grimes void *arg; 688e82ac18eSJonathan Lemon int to_ticks; 689df8bae1dSRodney W. Grimes { 6908d809d50SJeff Roberson struct callout_cpu *cc; 691ab36c067SJustin T. Gibbs struct callout *new; 692ab36c067SJustin T. Gibbs struct callout_handle handle; 693df8bae1dSRodney W. Grimes 6948d809d50SJeff Roberson cc = CC_CPU(timeout_cpu); 6958d809d50SJeff Roberson CC_LOCK(cc); 696df8bae1dSRodney W. Grimes /* Fill in the next free callout structure. */ 6978d809d50SJeff Roberson new = SLIST_FIRST(&cc->cc_callfree); 698ab36c067SJustin T. Gibbs if (new == NULL) 699ab36c067SJustin T. Gibbs /* XXX Attempt to malloc first */ 700df8bae1dSRodney W. Grimes panic("timeout table full"); 7018d809d50SJeff Roberson SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); 702acc8326dSGarrett Wollman callout_reset(new, to_ticks, ftn, arg); 703ab36c067SJustin T. Gibbs handle.callout = new; 7048d809d50SJeff Roberson CC_UNLOCK(cc); 7058d809d50SJeff Roberson 706ab36c067SJustin T. Gibbs return (handle); 707df8bae1dSRodney W. Grimes } 708df8bae1dSRodney W. Grimes 709df8bae1dSRodney W. Grimes void 710ab36c067SJustin T. Gibbs untimeout(ftn, arg, handle) 7118f03c6f1SBruce Evans timeout_t *ftn; 712df8bae1dSRodney W. Grimes void *arg; 713ab36c067SJustin T. Gibbs struct callout_handle handle; 714df8bae1dSRodney W. Grimes { 7158d809d50SJeff Roberson struct callout_cpu *cc; 716df8bae1dSRodney W. Grimes 717ab36c067SJustin T. Gibbs /* 718ab36c067SJustin T. Gibbs * Check for a handle that was initialized 719ab36c067SJustin T. Gibbs * by callout_handle_init, but never used 720ab36c067SJustin T. Gibbs * for a real timeout. 721ab36c067SJustin T. Gibbs */ 722ab36c067SJustin T. Gibbs if (handle.callout == NULL) 723ab36c067SJustin T. Gibbs return; 724df8bae1dSRodney W. Grimes 7258d809d50SJeff Roberson cc = callout_lock(handle.callout); 726acc8326dSGarrett Wollman if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 727acc8326dSGarrett Wollman callout_stop(handle.callout); 7288d809d50SJeff Roberson CC_UNLOCK(cc); 729df8bae1dSRodney W. Grimes } 730df8bae1dSRodney W. Grimes 7313c816944SBruce Evans void 732ab36c067SJustin T. Gibbs callout_handle_init(struct callout_handle *handle) 733ab36c067SJustin T. Gibbs { 734ab36c067SJustin T. Gibbs handle->callout = NULL; 735ab36c067SJustin T. Gibbs } 736ab36c067SJustin T. Gibbs 737acc8326dSGarrett Wollman /* 738acc8326dSGarrett Wollman * New interface; clients allocate their own callout structures. 739acc8326dSGarrett Wollman * 740acc8326dSGarrett Wollman * callout_reset() - establish or change a timeout 741acc8326dSGarrett Wollman * callout_stop() - disestablish a timeout 742acc8326dSGarrett Wollman * callout_init() - initialize a callout structure so that it can 743acc8326dSGarrett Wollman * safely be passed to callout_reset() and callout_stop() 744acc8326dSGarrett Wollman * 7459b8b58e0SJonathan Lemon * <sys/callout.h> defines three convenience macros: 746acc8326dSGarrett Wollman * 74786fd19deSColin Percival * callout_active() - returns truth if callout has not been stopped, 74886fd19deSColin Percival * drained, or deactivated since the last time the callout was 74986fd19deSColin Percival * reset. 7509b8b58e0SJonathan Lemon * callout_pending() - returns truth if callout is still waiting for timeout 7519b8b58e0SJonathan Lemon * callout_deactivate() - marks the callout as having been serviced 752acc8326dSGarrett Wollman */ 753d04304d1SGleb Smirnoff int 7548d809d50SJeff Roberson callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *), 7558d809d50SJeff Roberson void *arg, int cpu) 756acc8326dSGarrett Wollman { 7578d809d50SJeff Roberson struct callout_cpu *cc; 758d04304d1SGleb Smirnoff int cancelled = 0; 759acc8326dSGarrett Wollman 7608d809d50SJeff Roberson /* 7618d809d50SJeff Roberson * Don't allow migration of pre-allocated callouts lest they 7628d809d50SJeff Roberson * become unbalanced. 7638d809d50SJeff Roberson */ 7648d809d50SJeff Roberson if (c->c_flags & CALLOUT_LOCAL_ALLOC) 7658d809d50SJeff Roberson cpu = c->c_cpu; 7668d809d50SJeff Roberson cc = callout_lock(c); 7678d809d50SJeff Roberson if (cc->cc_curr == c) { 7682c1bb207SColin Percival /* 7692c1bb207SColin Percival * We're being asked to reschedule a callout which is 77064b9ee20SAttilio Rao * currently in progress. If there is a lock then we 77198c926b2SIan Dowse * can cancel the callout if it has not really started. 77298c926b2SIan Dowse */ 7738d809d50SJeff Roberson if (c->c_lock != NULL && !cc->cc_cancel) 7748d809d50SJeff Roberson cancelled = cc->cc_cancel = 1; 7758d809d50SJeff Roberson if (cc->cc_waiting) { 77698c926b2SIan Dowse /* 77798c926b2SIan Dowse * Someone has called callout_drain to kill this 77898c926b2SIan Dowse * callout. Don't reschedule. 7792c1bb207SColin Percival */ 78068a57ebfSGleb Smirnoff CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 78168a57ebfSGleb Smirnoff cancelled ? "cancelled" : "failed to cancel", 78268a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 7838d809d50SJeff Roberson CC_UNLOCK(cc); 784d04304d1SGleb Smirnoff return (cancelled); 78549a74476SColin Percival } 78698c926b2SIan Dowse } 7870413bacdSColin Percival if (c->c_flags & CALLOUT_PENDING) { 7888d809d50SJeff Roberson if (cc->cc_next == c) { 7898d809d50SJeff Roberson cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 7900413bacdSColin Percival } 7918d809d50SJeff Roberson TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, 7920413bacdSColin Percival c_links.tqe); 7930413bacdSColin Percival 794d04304d1SGleb Smirnoff cancelled = 1; 7958d809d50SJeff Roberson c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 7968d809d50SJeff Roberson } 797*1283e9cdSAttilio Rao 798*1283e9cdSAttilio Rao #ifdef SMP 7990413bacdSColin Percival /* 800*1283e9cdSAttilio Rao * If the callout must migrate try to perform it immediately. 801*1283e9cdSAttilio Rao * If the callout is currently running, just defer the migration 802*1283e9cdSAttilio Rao * to a more appropriate moment. 8030413bacdSColin Percival */ 8048d809d50SJeff Roberson if (c->c_cpu != cpu) { 805*1283e9cdSAttilio Rao if (cc->cc_curr == c) { 806*1283e9cdSAttilio Rao cc->cc_migration_cpu = cpu; 807*1283e9cdSAttilio Rao cc->cc_migration_ticks = to_ticks; 808*1283e9cdSAttilio Rao cc->cc_migration_func = ftn; 809*1283e9cdSAttilio Rao cc->cc_migration_arg = arg; 810*1283e9cdSAttilio Rao CTR5(KTR_CALLOUT, 811*1283e9cdSAttilio Rao "migration of %p func %p arg %p in %d to %u deferred", 812*1283e9cdSAttilio Rao c, c->c_func, c->c_arg, to_ticks, cpu); 81308e4ac8aSAttilio Rao CC_UNLOCK(cc); 814*1283e9cdSAttilio Rao return (cancelled); 815a157e425SAlexander Motin } 816*1283e9cdSAttilio Rao cc = callout_cpu_switch(c, cc, cpu); 81708e4ac8aSAttilio Rao } 818*1283e9cdSAttilio Rao #endif 819*1283e9cdSAttilio Rao 820*1283e9cdSAttilio Rao callout_cc_add(c, cc, to_ticks, ftn, arg, cpu); 82168a57ebfSGleb Smirnoff CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", 82268a57ebfSGleb Smirnoff cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); 8238d809d50SJeff Roberson CC_UNLOCK(cc); 824d04304d1SGleb Smirnoff 825d04304d1SGleb Smirnoff return (cancelled); 826acc8326dSGarrett Wollman } 827acc8326dSGarrett Wollman 8286e0186d5SSam Leffler /* 8296e0186d5SSam Leffler * Common idioms that can be optimized in the future. 8306e0186d5SSam Leffler */ 8316e0186d5SSam Leffler int 8326e0186d5SSam Leffler callout_schedule_on(struct callout *c, int to_ticks, int cpu) 8336e0186d5SSam Leffler { 8346e0186d5SSam Leffler return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 8356e0186d5SSam Leffler } 8366e0186d5SSam Leffler 8376e0186d5SSam Leffler int 8386e0186d5SSam Leffler callout_schedule(struct callout *c, int to_ticks) 8396e0186d5SSam Leffler { 8406e0186d5SSam Leffler return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 8416e0186d5SSam Leffler } 8426e0186d5SSam Leffler 8432c1bb207SColin Percival int 8442c1bb207SColin Percival _callout_stop_safe(c, safe) 8452c1bb207SColin Percival struct callout *c; 8462c1bb207SColin Percival int safe; 8472c1bb207SColin Percival { 848*1283e9cdSAttilio Rao struct callout_cpu *cc, *old_cc; 84964b9ee20SAttilio Rao struct lock_class *class; 85064b9ee20SAttilio Rao int use_lock, sq_locked; 85198c926b2SIan Dowse 85264b9ee20SAttilio Rao /* 85364b9ee20SAttilio Rao * Some old subsystems don't hold Giant while running a callout_stop(), 85464b9ee20SAttilio Rao * so just discard this check for the moment. 85564b9ee20SAttilio Rao */ 85664b9ee20SAttilio Rao if (!safe && c->c_lock != NULL) { 85764b9ee20SAttilio Rao if (c->c_lock == &Giant.lock_object) 85864b9ee20SAttilio Rao use_lock = mtx_owned(&Giant); 85964b9ee20SAttilio Rao else { 86064b9ee20SAttilio Rao use_lock = 1; 86164b9ee20SAttilio Rao class = LOCK_CLASS(c->c_lock); 86264b9ee20SAttilio Rao class->lc_assert(c->c_lock, LA_XLOCKED); 86398c926b2SIan Dowse } 86464b9ee20SAttilio Rao } else 86564b9ee20SAttilio Rao use_lock = 0; 8662c1bb207SColin Percival 86767b158d8SJohn Baldwin sq_locked = 0; 868*1283e9cdSAttilio Rao old_cc = NULL; 86967b158d8SJohn Baldwin again: 8708d809d50SJeff Roberson cc = callout_lock(c); 871*1283e9cdSAttilio Rao 872*1283e9cdSAttilio Rao /* 873*1283e9cdSAttilio Rao * If the callout was migrating while the callout cpu lock was 874*1283e9cdSAttilio Rao * dropped, just drop the sleepqueue lock and check the states 875*1283e9cdSAttilio Rao * again. 876*1283e9cdSAttilio Rao */ 877*1283e9cdSAttilio Rao if (sq_locked != 0 && cc != old_cc) { 878*1283e9cdSAttilio Rao #ifdef SMP 879*1283e9cdSAttilio Rao CC_UNLOCK(cc); 880*1283e9cdSAttilio Rao sleepq_release(&old_cc->cc_waiting); 881*1283e9cdSAttilio Rao sq_locked = 0; 882*1283e9cdSAttilio Rao old_cc = NULL; 883*1283e9cdSAttilio Rao goto again; 884*1283e9cdSAttilio Rao #else 885*1283e9cdSAttilio Rao panic("migration should not happen"); 886*1283e9cdSAttilio Rao #endif 887*1283e9cdSAttilio Rao } 888*1283e9cdSAttilio Rao 889acc8326dSGarrett Wollman /* 890b36f4588SJohn Baldwin * If the callout isn't pending, it's not on the queue, so 891b36f4588SJohn Baldwin * don't attempt to remove it from the queue. We can try to 892b36f4588SJohn Baldwin * stop it by other means however. 893acc8326dSGarrett Wollman */ 894acc8326dSGarrett Wollman if (!(c->c_flags & CALLOUT_PENDING)) { 8959b8b58e0SJonathan Lemon c->c_flags &= ~CALLOUT_ACTIVE; 896b36f4588SJohn Baldwin 897b36f4588SJohn Baldwin /* 898b36f4588SJohn Baldwin * If it wasn't on the queue and it isn't the current 899b36f4588SJohn Baldwin * callout, then we can't stop it, so just bail. 900b36f4588SJohn Baldwin */ 9018d809d50SJeff Roberson if (cc->cc_curr != c) { 90268a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 90368a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 9048d809d50SJeff Roberson CC_UNLOCK(cc); 90567b158d8SJohn Baldwin if (sq_locked) 906ce62b59cSJeff Roberson sleepq_release(&cc->cc_waiting); 90798c926b2SIan Dowse return (0); 90898c926b2SIan Dowse } 909b36f4588SJohn Baldwin 91098c926b2SIan Dowse if (safe) { 9112c1bb207SColin Percival /* 912b36f4588SJohn Baldwin * The current callout is running (or just 913b36f4588SJohn Baldwin * about to run) and blocking is allowed, so 914b36f4588SJohn Baldwin * just wait for the current invocation to 915b36f4588SJohn Baldwin * finish. 9162c1bb207SColin Percival */ 9178d809d50SJeff Roberson while (cc->cc_curr == c) { 9186a0ce57dSAttilio Rao 9196a0ce57dSAttilio Rao /* 9206a0ce57dSAttilio Rao * Use direct calls to sleepqueue interface 9216a0ce57dSAttilio Rao * instead of cv/msleep in order to avoid 9228d809d50SJeff Roberson * a LOR between cc_lock and sleepqueue 9236a0ce57dSAttilio Rao * chain spinlocks. This piece of code 9246a0ce57dSAttilio Rao * emulates a msleep_spin() call actually. 92567b158d8SJohn Baldwin * 92667b158d8SJohn Baldwin * If we already have the sleepqueue chain 92767b158d8SJohn Baldwin * locked, then we can safely block. If we 92867b158d8SJohn Baldwin * don't already have it locked, however, 9298d809d50SJeff Roberson * we have to drop the cc_lock to lock 93067b158d8SJohn Baldwin * it. This opens several races, so we 93167b158d8SJohn Baldwin * restart at the beginning once we have 93267b158d8SJohn Baldwin * both locks. If nothing has changed, then 93367b158d8SJohn Baldwin * we will end up back here with sq_locked 93467b158d8SJohn Baldwin * set. 9356a0ce57dSAttilio Rao */ 93667b158d8SJohn Baldwin if (!sq_locked) { 9378d809d50SJeff Roberson CC_UNLOCK(cc); 938ce62b59cSJeff Roberson sleepq_lock(&cc->cc_waiting); 93967b158d8SJohn Baldwin sq_locked = 1; 940*1283e9cdSAttilio Rao old_cc = cc; 94167b158d8SJohn Baldwin goto again; 9426a0ce57dSAttilio Rao } 943*1283e9cdSAttilio Rao 944*1283e9cdSAttilio Rao /* 945*1283e9cdSAttilio Rao * Migration could be cancelled here, but 946*1283e9cdSAttilio Rao * as long as it is still not sure when it 947*1283e9cdSAttilio Rao * will be packed up, just let softclock() 948*1283e9cdSAttilio Rao * take care of it. 949*1283e9cdSAttilio Rao */ 9508d809d50SJeff Roberson cc->cc_waiting = 1; 9516a0ce57dSAttilio Rao DROP_GIANT(); 9528d809d50SJeff Roberson CC_UNLOCK(cc); 953ce62b59cSJeff Roberson sleepq_add(&cc->cc_waiting, 9548d809d50SJeff Roberson &cc->cc_lock.lock_object, "codrain", 9556a0ce57dSAttilio Rao SLEEPQ_SLEEP, 0); 956ce62b59cSJeff Roberson sleepq_wait(&cc->cc_waiting, 0); 95767b158d8SJohn Baldwin sq_locked = 0; 958*1283e9cdSAttilio Rao old_cc = NULL; 9596a0ce57dSAttilio Rao 9606a0ce57dSAttilio Rao /* Reacquire locks previously released. */ 9616a0ce57dSAttilio Rao PICKUP_GIANT(); 9628d809d50SJeff Roberson CC_LOCK(cc); 963b36f4588SJohn Baldwin } 9648d809d50SJeff Roberson } else if (use_lock && !cc->cc_cancel) { 965b36f4588SJohn Baldwin /* 96664b9ee20SAttilio Rao * The current callout is waiting for its 96764b9ee20SAttilio Rao * lock which we hold. Cancel the callout 968b36f4588SJohn Baldwin * and return. After our caller drops the 96964b9ee20SAttilio Rao * lock, the callout will be skipped in 970b36f4588SJohn Baldwin * softclock(). 971b36f4588SJohn Baldwin */ 9728d809d50SJeff Roberson cc->cc_cancel = 1; 97368a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 97468a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 975*1283e9cdSAttilio Rao KASSERT(!cc_cme_migrating(cc), 976*1283e9cdSAttilio Rao ("callout wrongly scheduled for migration")); 9778d809d50SJeff Roberson CC_UNLOCK(cc); 97867b158d8SJohn Baldwin KASSERT(!sq_locked, ("sleepqueue chain locked")); 97998c926b2SIan Dowse return (1); 980b36f4588SJohn Baldwin } 98168a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 98268a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 9838d809d50SJeff Roberson CC_UNLOCK(cc); 98467b158d8SJohn Baldwin KASSERT(!sq_locked, ("sleepqueue chain still locked")); 985a45982d2SJohn Baldwin return (0); 986acc8326dSGarrett Wollman } 98767b158d8SJohn Baldwin if (sq_locked) 988ce62b59cSJeff Roberson sleepq_release(&cc->cc_waiting); 98967b158d8SJohn Baldwin 9909b8b58e0SJonathan Lemon c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 991acc8326dSGarrett Wollman 9928d809d50SJeff Roberson if (cc->cc_next == c) { 9938d809d50SJeff Roberson cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 994acc8326dSGarrett Wollman } 9958d809d50SJeff Roberson TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, 9968d809d50SJeff Roberson c_links.tqe); 997acc8326dSGarrett Wollman 99868a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 99968a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 100068a57ebfSGleb Smirnoff 1001acc8326dSGarrett Wollman if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 10027834081cSColin Percival c->c_func = NULL; 10038d809d50SJeff Roberson SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 1004acc8326dSGarrett Wollman } 10058d809d50SJeff Roberson CC_UNLOCK(cc); 1006a45982d2SJohn Baldwin return (1); 1007acc8326dSGarrett Wollman } 1008acc8326dSGarrett Wollman 1009acc8326dSGarrett Wollman void 1010e82ac18eSJonathan Lemon callout_init(c, mpsafe) 1011acc8326dSGarrett Wollman struct callout *c; 1012e82ac18eSJonathan Lemon int mpsafe; 1013acc8326dSGarrett Wollman { 10147347e1c6SGarrett Wollman bzero(c, sizeof *c); 101598c926b2SIan Dowse if (mpsafe) { 101664b9ee20SAttilio Rao c->c_lock = NULL; 101798c926b2SIan Dowse c->c_flags = CALLOUT_RETURNUNLOCKED; 101898c926b2SIan Dowse } else { 101964b9ee20SAttilio Rao c->c_lock = &Giant.lock_object; 102098c926b2SIan Dowse c->c_flags = 0; 102198c926b2SIan Dowse } 10228d809d50SJeff Roberson c->c_cpu = timeout_cpu; 102398c926b2SIan Dowse } 102498c926b2SIan Dowse 102598c926b2SIan Dowse void 102664b9ee20SAttilio Rao _callout_init_lock(c, lock, flags) 102798c926b2SIan Dowse struct callout *c; 102864b9ee20SAttilio Rao struct lock_object *lock; 102998c926b2SIan Dowse int flags; 103098c926b2SIan Dowse { 103198c926b2SIan Dowse bzero(c, sizeof *c); 103264b9ee20SAttilio Rao c->c_lock = lock; 103364b9ee20SAttilio Rao KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 103464b9ee20SAttilio Rao ("callout_init_lock: bad flags %d", flags)); 103564b9ee20SAttilio Rao KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 103664b9ee20SAttilio Rao ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 103713ddf72dSAttilio Rao KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 103813ddf72dSAttilio Rao (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 103964b9ee20SAttilio Rao __func__)); 104064b9ee20SAttilio Rao c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 10418d809d50SJeff Roberson c->c_cpu = timeout_cpu; 1042acc8326dSGarrett Wollman } 1043acc8326dSGarrett Wollman 1044e1d6dc65SNate Williams #ifdef APM_FIXUP_CALLTODO 1045e1d6dc65SNate Williams /* 1046e1d6dc65SNate Williams * Adjust the kernel calltodo timeout list. This routine is used after 1047e1d6dc65SNate Williams * an APM resume to recalculate the calltodo timer list values with the 1048e1d6dc65SNate Williams * number of hz's we have been sleeping. The next hardclock() will detect 1049e1d6dc65SNate Williams * that there are fired timers and run softclock() to execute them. 1050e1d6dc65SNate Williams * 1051e1d6dc65SNate Williams * Please note, I have not done an exhaustive analysis of what code this 1052e1d6dc65SNate Williams * might break. I am motivated to have my select()'s and alarm()'s that 1053e1d6dc65SNate Williams * have expired during suspend firing upon resume so that the applications 1054e1d6dc65SNate Williams * which set the timer can do the maintanence the timer was for as close 1055e1d6dc65SNate Williams * as possible to the originally intended time. Testing this code for a 1056e1d6dc65SNate Williams * week showed that resuming from a suspend resulted in 22 to 25 timers 1057e1d6dc65SNate Williams * firing, which seemed independant on whether the suspend was 2 hours or 1058e1d6dc65SNate Williams * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 1059e1d6dc65SNate Williams */ 1060e1d6dc65SNate Williams void 1061e1d6dc65SNate Williams adjust_timeout_calltodo(time_change) 1062e1d6dc65SNate Williams struct timeval *time_change; 1063e1d6dc65SNate Williams { 1064e1d6dc65SNate Williams register struct callout *p; 1065e1d6dc65SNate Williams unsigned long delta_ticks; 1066e1d6dc65SNate Williams 1067e1d6dc65SNate Williams /* 1068e1d6dc65SNate Williams * How many ticks were we asleep? 1069c8b47828SBruce Evans * (stolen from tvtohz()). 1070e1d6dc65SNate Williams */ 1071e1d6dc65SNate Williams 1072e1d6dc65SNate Williams /* Don't do anything */ 1073e1d6dc65SNate Williams if (time_change->tv_sec < 0) 1074e1d6dc65SNate Williams return; 1075e1d6dc65SNate Williams else if (time_change->tv_sec <= LONG_MAX / 1000000) 1076e1d6dc65SNate Williams delta_ticks = (time_change->tv_sec * 1000000 + 1077e1d6dc65SNate Williams time_change->tv_usec + (tick - 1)) / tick + 1; 1078e1d6dc65SNate Williams else if (time_change->tv_sec <= LONG_MAX / hz) 1079e1d6dc65SNate Williams delta_ticks = time_change->tv_sec * hz + 1080e1d6dc65SNate Williams (time_change->tv_usec + (tick - 1)) / tick + 1; 1081e1d6dc65SNate Williams else 1082e1d6dc65SNate Williams delta_ticks = LONG_MAX; 1083e1d6dc65SNate Williams 1084e1d6dc65SNate Williams if (delta_ticks > INT_MAX) 1085e1d6dc65SNate Williams delta_ticks = INT_MAX; 1086e1d6dc65SNate Williams 1087e1d6dc65SNate Williams /* 1088e1d6dc65SNate Williams * Now rip through the timer calltodo list looking for timers 1089e1d6dc65SNate Williams * to expire. 1090e1d6dc65SNate Williams */ 1091e1d6dc65SNate Williams 1092e1d6dc65SNate Williams /* don't collide with softclock() */ 10938d809d50SJeff Roberson CC_LOCK(cc); 1094e1d6dc65SNate Williams for (p = calltodo.c_next; p != NULL; p = p->c_next) { 1095e1d6dc65SNate Williams p->c_time -= delta_ticks; 1096e1d6dc65SNate Williams 1097e1d6dc65SNate Williams /* Break if the timer had more time on it than delta_ticks */ 1098e1d6dc65SNate Williams if (p->c_time > 0) 1099e1d6dc65SNate Williams break; 1100e1d6dc65SNate Williams 1101e1d6dc65SNate Williams /* take back the ticks the timer didn't use (p->c_time <= 0) */ 1102e1d6dc65SNate Williams delta_ticks = -p->c_time; 1103e1d6dc65SNate Williams } 11048d809d50SJeff Roberson CC_UNLOCK(cc); 1105e1d6dc65SNate Williams 1106e1d6dc65SNate Williams return; 1107e1d6dc65SNate Williams } 1108e1d6dc65SNate Williams #endif /* APM_FIXUP_CALLTODO */ 1109