1df8bae1dSRodney W. Grimes /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1982, 1986, 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * (c) UNIX System Laboratories, Inc. 5df8bae1dSRodney W. Grimes * All or some portions of this file are derived from material licensed 6df8bae1dSRodney W. Grimes * to the University of California by American Telephone and Telegraph 7df8bae1dSRodney W. Grimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8df8bae1dSRodney W. Grimes * the permission of UNIX System Laboratories, Inc. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 19df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 20df8bae1dSRodney W. Grimes * without specific prior written permission. 21df8bae1dSRodney W. Grimes * 22df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32df8bae1dSRodney W. Grimes * SUCH DAMAGE. 33df8bae1dSRodney W. Grimes * 34acc8326dSGarrett Wollman * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35df8bae1dSRodney W. Grimes */ 36df8bae1dSRodney W. Grimes 37677b542eSDavid E. O'Brien #include <sys/cdefs.h> 38677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 39677b542eSDavid E. O'Brien 4091dd9aaeSRobert Watson #include "opt_kdtrace.h" 4191dd9aaeSRobert Watson 42df8bae1dSRodney W. Grimes #include <sys/param.h> 43df8bae1dSRodney W. Grimes #include <sys/systm.h> 448d809d50SJeff Roberson #include <sys/bus.h> 4515b7a470SPoul-Henning Kamp #include <sys/callout.h> 462c1bb207SColin Percival #include <sys/condvar.h> 478d809d50SJeff Roberson #include <sys/interrupt.h> 48df8bae1dSRodney W. Grimes #include <sys/kernel.h> 49ff7ec58aSRobert Watson #include <sys/ktr.h> 50f34fa851SJohn Baldwin #include <sys/lock.h> 518d809d50SJeff Roberson #include <sys/malloc.h> 52cb799bfeSJohn Baldwin #include <sys/mutex.h> 5321f9e816SJohn Baldwin #include <sys/proc.h> 5491dd9aaeSRobert Watson #include <sys/sdt.h> 556a0ce57dSAttilio Rao #include <sys/sleepqueue.h> 5622ee8c4fSPoul-Henning Kamp #include <sys/sysctl.h> 578d809d50SJeff Roberson #include <sys/smp.h> 58df8bae1dSRodney W. Grimes 591283e9cdSAttilio Rao #ifdef SMP 601283e9cdSAttilio Rao #include <machine/cpu.h> 611283e9cdSAttilio Rao #endif 621283e9cdSAttilio Rao 6391dd9aaeSRobert Watson SDT_PROVIDER_DEFINE(callout_execute); 6479856499SRui Paulo SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start); 6591dd9aaeSRobert Watson SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0, 6691dd9aaeSRobert Watson "struct callout *"); 6779856499SRui Paulo SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end); 6891dd9aaeSRobert Watson SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0, 6991dd9aaeSRobert Watson "struct callout *"); 7091dd9aaeSRobert Watson 7122ee8c4fSPoul-Henning Kamp static int avg_depth; 7222ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 7322ee8c4fSPoul-Henning Kamp "Average number of items examined per softclock call. Units = 1/1000"); 7422ee8c4fSPoul-Henning Kamp static int avg_gcalls; 7522ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 7622ee8c4fSPoul-Henning Kamp "Average number of Giant callouts made per softclock call. Units = 1/1000"); 7764b9ee20SAttilio Rao static int avg_lockcalls; 7864b9ee20SAttilio Rao SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 7964b9ee20SAttilio Rao "Average number of lock callouts made per softclock call. Units = 1/1000"); 8022ee8c4fSPoul-Henning Kamp static int avg_mpcalls; 8122ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 8222ee8c4fSPoul-Henning Kamp "Average number of MP callouts made per softclock call. Units = 1/1000"); 8315b7a470SPoul-Henning Kamp /* 8415b7a470SPoul-Henning Kamp * TODO: 8515b7a470SPoul-Henning Kamp * allocate more timeout table slots when table overflows. 8615b7a470SPoul-Henning Kamp */ 87*922314f0SAlfred Perlstein int callwheelsize, callwheelmask; 88f23b4c91SGarrett Wollman 8920c510f8SLuigi Rizzo /* 901283e9cdSAttilio Rao * The callout cpu migration entity represents informations necessary for 911283e9cdSAttilio Rao * describing the migrating callout to the new callout cpu. 921283e9cdSAttilio Rao * The cached informations are very important for deferring migration when 931283e9cdSAttilio Rao * the migrating callout is already running. 941283e9cdSAttilio Rao */ 951283e9cdSAttilio Rao struct cc_mig_ent { 961283e9cdSAttilio Rao #ifdef SMP 971283e9cdSAttilio Rao void (*ce_migration_func)(void *); 981283e9cdSAttilio Rao void *ce_migration_arg; 991283e9cdSAttilio Rao int ce_migration_cpu; 1001283e9cdSAttilio Rao int ce_migration_ticks; 1011283e9cdSAttilio Rao #endif 1021283e9cdSAttilio Rao }; 1031283e9cdSAttilio Rao 1041283e9cdSAttilio Rao /* 10520c510f8SLuigi Rizzo * There is one struct callout_cpu per cpu, holding all relevant 10620c510f8SLuigi Rizzo * state for the callout processing thread on the individual CPU. 10720c510f8SLuigi Rizzo * In particular: 10820c510f8SLuigi Rizzo * cc_ticks is incremented once per tick in callout_cpu(). 10920c510f8SLuigi Rizzo * It tracks the global 'ticks' but in a way that the individual 11020c510f8SLuigi Rizzo * threads should not worry about races in the order in which 11120c510f8SLuigi Rizzo * hardclock() and hardclock_cpu() run on the various CPUs. 11220c510f8SLuigi Rizzo * cc_softclock is advanced in callout_cpu() to point to the 11320c510f8SLuigi Rizzo * first entry in cc_callwheel that may need handling. In turn, 11420c510f8SLuigi Rizzo * a softclock() is scheduled so it can serve the various entries i 11520c510f8SLuigi Rizzo * such that cc_softclock <= i <= cc_ticks . 11620c510f8SLuigi Rizzo * XXX maybe cc_softclock and cc_ticks should be volatile ? 11720c510f8SLuigi Rizzo * 11820c510f8SLuigi Rizzo * cc_ticks is also used in callout_reset_cpu() to determine 11920c510f8SLuigi Rizzo * when the callout should be served. 12020c510f8SLuigi Rizzo */ 1218d809d50SJeff Roberson struct callout_cpu { 1224ceaf45dSAttilio Rao struct mtx_padalign cc_lock; 1234ceaf45dSAttilio Rao struct cc_mig_ent cc_migrating_entity; 1248d809d50SJeff Roberson struct callout *cc_callout; 1258d809d50SJeff Roberson struct callout_tailq *cc_callwheel; 1268d809d50SJeff Roberson struct callout_list cc_callfree; 1278d809d50SJeff Roberson struct callout *cc_next; 1288d809d50SJeff Roberson struct callout *cc_curr; 1298d809d50SJeff Roberson void *cc_cookie; 13020c510f8SLuigi Rizzo int cc_ticks; 1318d809d50SJeff Roberson int cc_softticks; 1328d809d50SJeff Roberson int cc_cancel; 1338d809d50SJeff Roberson int cc_waiting; 134a157e425SAlexander Motin int cc_firsttick; 1358d809d50SJeff Roberson }; 1368d809d50SJeff Roberson 1378d809d50SJeff Roberson #ifdef SMP 1381283e9cdSAttilio Rao #define cc_migration_func cc_migrating_entity.ce_migration_func 1391283e9cdSAttilio Rao #define cc_migration_arg cc_migrating_entity.ce_migration_arg 1401283e9cdSAttilio Rao #define cc_migration_cpu cc_migrating_entity.ce_migration_cpu 1411283e9cdSAttilio Rao #define cc_migration_ticks cc_migrating_entity.ce_migration_ticks 1421283e9cdSAttilio Rao 1438d809d50SJeff Roberson struct callout_cpu cc_cpu[MAXCPU]; 1441283e9cdSAttilio Rao #define CPUBLOCK MAXCPU 1458d809d50SJeff Roberson #define CC_CPU(cpu) (&cc_cpu[(cpu)]) 1468d809d50SJeff Roberson #define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 1478d809d50SJeff Roberson #else 1488d809d50SJeff Roberson struct callout_cpu cc_cpu; 1498d809d50SJeff Roberson #define CC_CPU(cpu) &cc_cpu 1508d809d50SJeff Roberson #define CC_SELF() &cc_cpu 1518d809d50SJeff Roberson #endif 1528d809d50SJeff Roberson #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 1538d809d50SJeff Roberson #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 1541283e9cdSAttilio Rao #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) 1558d809d50SJeff Roberson 1568d809d50SJeff Roberson static int timeout_cpu; 157a157e425SAlexander Motin void (*callout_new_inserted)(int cpu, int ticks) = NULL; 1588d809d50SJeff Roberson 159d745c852SEd Schouten static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 16049a74476SColin Percival 161e9dec2c4SColin Percival /** 1628d809d50SJeff Roberson * Locked by cc_lock: 1638d809d50SJeff Roberson * cc_curr - If a callout is in progress, it is curr_callout. 164b36f4588SJohn Baldwin * If curr_callout is non-NULL, threads waiting in 165b36f4588SJohn Baldwin * callout_drain() will be woken up as soon as the 1662c1bb207SColin Percival * relevant callout completes. 1678d809d50SJeff Roberson * cc_cancel - Changing to 1 with both callout_lock and c_lock held 16898c926b2SIan Dowse * guarantees that the current callout will not run. 16998c926b2SIan Dowse * The softclock() function sets this to 0 before it 17064b9ee20SAttilio Rao * drops callout_lock to acquire c_lock, and it calls 171b36f4588SJohn Baldwin * the handler only if curr_cancelled is still 0 after 17264b9ee20SAttilio Rao * c_lock is successfully acquired. 1738d809d50SJeff Roberson * cc_waiting - If a thread is waiting in callout_drain(), then 174b36f4588SJohn Baldwin * callout_wait is nonzero. Set only when 1752c1bb207SColin Percival * curr_callout is non-NULL. 1762c1bb207SColin Percival */ 177df8bae1dSRodney W. Grimes 178df8bae1dSRodney W. Grimes /* 1791283e9cdSAttilio Rao * Resets the migration entity tied to a specific callout cpu. 1801283e9cdSAttilio Rao */ 1811283e9cdSAttilio Rao static void 1821283e9cdSAttilio Rao cc_cme_cleanup(struct callout_cpu *cc) 1831283e9cdSAttilio Rao { 1841283e9cdSAttilio Rao 1851283e9cdSAttilio Rao #ifdef SMP 1861283e9cdSAttilio Rao cc->cc_migration_cpu = CPUBLOCK; 1871283e9cdSAttilio Rao cc->cc_migration_ticks = 0; 1881283e9cdSAttilio Rao cc->cc_migration_func = NULL; 1891283e9cdSAttilio Rao cc->cc_migration_arg = NULL; 1901283e9cdSAttilio Rao #endif 1911283e9cdSAttilio Rao } 1921283e9cdSAttilio Rao 1931283e9cdSAttilio Rao /* 1941283e9cdSAttilio Rao * Checks if migration is requested by a specific callout cpu. 1951283e9cdSAttilio Rao */ 1961283e9cdSAttilio Rao static int 1971283e9cdSAttilio Rao cc_cme_migrating(struct callout_cpu *cc) 1981283e9cdSAttilio Rao { 1991283e9cdSAttilio Rao 2001283e9cdSAttilio Rao #ifdef SMP 2011283e9cdSAttilio Rao return (cc->cc_migration_cpu != CPUBLOCK); 2021283e9cdSAttilio Rao #else 2031283e9cdSAttilio Rao return (0); 2041283e9cdSAttilio Rao #endif 2051283e9cdSAttilio Rao } 2061283e9cdSAttilio Rao 2071283e9cdSAttilio Rao /* 208219d632cSMatthew Dillon * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization 209219d632cSMatthew Dillon * 210219d632cSMatthew Dillon * This code is called very early in the kernel initialization sequence, 211219d632cSMatthew Dillon * and may be called more then once. 212219d632cSMatthew Dillon */ 213219d632cSMatthew Dillon caddr_t 214219d632cSMatthew Dillon kern_timeout_callwheel_alloc(caddr_t v) 215219d632cSMatthew Dillon { 2168d809d50SJeff Roberson struct callout_cpu *cc; 2178d809d50SJeff Roberson 2188d809d50SJeff Roberson timeout_cpu = PCPU_GET(cpuid); 2198d809d50SJeff Roberson cc = CC_CPU(timeout_cpu); 220219d632cSMatthew Dillon /* 221*922314f0SAlfred Perlstein * Calculate callout wheel size, should be next power of two higher 222*922314f0SAlfred Perlstein * than 'ncallout'. 223219d632cSMatthew Dillon */ 224*922314f0SAlfred Perlstein callwheelsize = 1 << fls(ncallout); 225219d632cSMatthew Dillon callwheelmask = callwheelsize - 1; 226219d632cSMatthew Dillon 2278d809d50SJeff Roberson cc->cc_callout = (struct callout *)v; 2288d809d50SJeff Roberson v = (caddr_t)(cc->cc_callout + ncallout); 2298d809d50SJeff Roberson cc->cc_callwheel = (struct callout_tailq *)v; 2308d809d50SJeff Roberson v = (caddr_t)(cc->cc_callwheel + callwheelsize); 231219d632cSMatthew Dillon return(v); 232219d632cSMatthew Dillon } 233219d632cSMatthew Dillon 2348d809d50SJeff Roberson static void 2358d809d50SJeff Roberson callout_cpu_init(struct callout_cpu *cc) 2368d809d50SJeff Roberson { 2378d809d50SJeff Roberson struct callout *c; 2388d809d50SJeff Roberson int i; 2398d809d50SJeff Roberson 2408d809d50SJeff Roberson mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 2418d809d50SJeff Roberson SLIST_INIT(&cc->cc_callfree); 2428d809d50SJeff Roberson for (i = 0; i < callwheelsize; i++) { 2438d809d50SJeff Roberson TAILQ_INIT(&cc->cc_callwheel[i]); 2448d809d50SJeff Roberson } 2451283e9cdSAttilio Rao cc_cme_cleanup(cc); 2468d809d50SJeff Roberson if (cc->cc_callout == NULL) 2478d809d50SJeff Roberson return; 2488d809d50SJeff Roberson for (i = 0; i < ncallout; i++) { 2498d809d50SJeff Roberson c = &cc->cc_callout[i]; 2508d809d50SJeff Roberson callout_init(c, 0); 2518d809d50SJeff Roberson c->c_flags = CALLOUT_LOCAL_ALLOC; 2528d809d50SJeff Roberson SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 2538d809d50SJeff Roberson } 2548d809d50SJeff Roberson } 2558d809d50SJeff Roberson 2561283e9cdSAttilio Rao #ifdef SMP 2571283e9cdSAttilio Rao /* 2581283e9cdSAttilio Rao * Switches the cpu tied to a specific callout. 2591283e9cdSAttilio Rao * The function expects a locked incoming callout cpu and returns with 2601283e9cdSAttilio Rao * locked outcoming callout cpu. 2611283e9cdSAttilio Rao */ 2621283e9cdSAttilio Rao static struct callout_cpu * 2631283e9cdSAttilio Rao callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) 2641283e9cdSAttilio Rao { 2651283e9cdSAttilio Rao struct callout_cpu *new_cc; 2661283e9cdSAttilio Rao 2671283e9cdSAttilio Rao MPASS(c != NULL && cc != NULL); 2681283e9cdSAttilio Rao CC_LOCK_ASSERT(cc); 2691283e9cdSAttilio Rao 270e75baa28SAttilio Rao /* 271e75baa28SAttilio Rao * Avoid interrupts and preemption firing after the callout cpu 272e75baa28SAttilio Rao * is blocked in order to avoid deadlocks as the new thread 273e75baa28SAttilio Rao * may be willing to acquire the callout cpu lock. 274e75baa28SAttilio Rao */ 2751283e9cdSAttilio Rao c->c_cpu = CPUBLOCK; 276e75baa28SAttilio Rao spinlock_enter(); 2771283e9cdSAttilio Rao CC_UNLOCK(cc); 2781283e9cdSAttilio Rao new_cc = CC_CPU(new_cpu); 2791283e9cdSAttilio Rao CC_LOCK(new_cc); 280e75baa28SAttilio Rao spinlock_exit(); 2811283e9cdSAttilio Rao c->c_cpu = new_cpu; 2821283e9cdSAttilio Rao return (new_cc); 2831283e9cdSAttilio Rao } 2841283e9cdSAttilio Rao #endif 2851283e9cdSAttilio Rao 286219d632cSMatthew Dillon /* 287219d632cSMatthew Dillon * kern_timeout_callwheel_init() - initialize previously reserved callwheel 288219d632cSMatthew Dillon * space. 289219d632cSMatthew Dillon * 290219d632cSMatthew Dillon * This code is called just once, after the space reserved for the 291219d632cSMatthew Dillon * callout wheel has been finalized. 292219d632cSMatthew Dillon */ 293219d632cSMatthew Dillon void 294219d632cSMatthew Dillon kern_timeout_callwheel_init(void) 295219d632cSMatthew Dillon { 2968d809d50SJeff Roberson callout_cpu_init(CC_CPU(timeout_cpu)); 2978d809d50SJeff Roberson } 298219d632cSMatthew Dillon 2998d809d50SJeff Roberson /* 3008d809d50SJeff Roberson * Start standard softclock thread. 3018d809d50SJeff Roberson */ 3028d809d50SJeff Roberson static void 3038d809d50SJeff Roberson start_softclock(void *dummy) 3048d809d50SJeff Roberson { 3058d809d50SJeff Roberson struct callout_cpu *cc; 3068d809d50SJeff Roberson #ifdef SMP 3078d809d50SJeff Roberson int cpu; 3088d809d50SJeff Roberson #endif 3098d809d50SJeff Roberson 3108d809d50SJeff Roberson cc = CC_CPU(timeout_cpu); 3118d809d50SJeff Roberson if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK, 3123350df48SJohn Baldwin INTR_MPSAFE, &cc->cc_cookie)) 3138d809d50SJeff Roberson panic("died while creating standard software ithreads"); 3148d809d50SJeff Roberson #ifdef SMP 3153aa6d94eSJohn Baldwin CPU_FOREACH(cpu) { 3168d809d50SJeff Roberson if (cpu == timeout_cpu) 3178d809d50SJeff Roberson continue; 3188d809d50SJeff Roberson cc = CC_CPU(cpu); 3198d809d50SJeff Roberson if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, 3208d809d50SJeff Roberson INTR_MPSAFE, &cc->cc_cookie)) 3218d809d50SJeff Roberson panic("died while creating standard software ithreads"); 3228d809d50SJeff Roberson cc->cc_callout = NULL; /* Only cpu0 handles timeout(). */ 3238d809d50SJeff Roberson cc->cc_callwheel = malloc( 3248d809d50SJeff Roberson sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT, 3258d809d50SJeff Roberson M_WAITOK); 3268d809d50SJeff Roberson callout_cpu_init(cc); 327219d632cSMatthew Dillon } 3288d809d50SJeff Roberson #endif 329219d632cSMatthew Dillon } 3308d809d50SJeff Roberson 3318d809d50SJeff Roberson SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 3328d809d50SJeff Roberson 3338d809d50SJeff Roberson void 3348d809d50SJeff Roberson callout_tick(void) 3358d809d50SJeff Roberson { 3368d809d50SJeff Roberson struct callout_cpu *cc; 3379fc51b0bSJeff Roberson int need_softclock; 3389fc51b0bSJeff Roberson int bucket; 3398d809d50SJeff Roberson 3408d809d50SJeff Roberson /* 3418d809d50SJeff Roberson * Process callouts at a very low cpu priority, so we don't keep the 3428d809d50SJeff Roberson * relatively high clock interrupt priority any longer than necessary. 3438d809d50SJeff Roberson */ 3449fc51b0bSJeff Roberson need_softclock = 0; 3458d809d50SJeff Roberson cc = CC_SELF(); 3468d809d50SJeff Roberson mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 347a157e425SAlexander Motin cc->cc_firsttick = cc->cc_ticks = ticks; 34820c510f8SLuigi Rizzo for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) { 3499fc51b0bSJeff Roberson bucket = cc->cc_softticks & callwheelmask; 3509fc51b0bSJeff Roberson if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) { 3518d809d50SJeff Roberson need_softclock = 1; 3529fc51b0bSJeff Roberson break; 3539fc51b0bSJeff Roberson } 3549fc51b0bSJeff Roberson } 3558d809d50SJeff Roberson mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 3568d809d50SJeff Roberson /* 3578d809d50SJeff Roberson * swi_sched acquires the thread lock, so we don't want to call it 3588d809d50SJeff Roberson * with cc_lock held; incorrect locking order. 3598d809d50SJeff Roberson */ 3608d809d50SJeff Roberson if (need_softclock) 3618d809d50SJeff Roberson swi_sched(cc->cc_cookie, 0); 3628d809d50SJeff Roberson } 3638d809d50SJeff Roberson 364a157e425SAlexander Motin int 3650e189873SAlexander Motin callout_tickstofirst(int limit) 366a157e425SAlexander Motin { 367a157e425SAlexander Motin struct callout_cpu *cc; 368a157e425SAlexander Motin struct callout *c; 369a157e425SAlexander Motin struct callout_tailq *sc; 370a157e425SAlexander Motin int curticks; 371a157e425SAlexander Motin int skip = 1; 372a157e425SAlexander Motin 373a157e425SAlexander Motin cc = CC_SELF(); 374a157e425SAlexander Motin mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 375a157e425SAlexander Motin curticks = cc->cc_ticks; 3760e189873SAlexander Motin while( skip < ncallout && skip < limit ) { 377a157e425SAlexander Motin sc = &cc->cc_callwheel[ (curticks+skip) & callwheelmask ]; 378a157e425SAlexander Motin /* search scanning ticks */ 379a157e425SAlexander Motin TAILQ_FOREACH( c, sc, c_links.tqe ){ 380189795feSAlexander Motin if (c->c_time - curticks <= ncallout) 381a157e425SAlexander Motin goto out; 382a157e425SAlexander Motin } 383a157e425SAlexander Motin skip++; 384a157e425SAlexander Motin } 385a157e425SAlexander Motin out: 386a157e425SAlexander Motin cc->cc_firsttick = curticks + skip; 387a157e425SAlexander Motin mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 388a157e425SAlexander Motin return (skip); 389a157e425SAlexander Motin } 390a157e425SAlexander Motin 3918d809d50SJeff Roberson static struct callout_cpu * 3928d809d50SJeff Roberson callout_lock(struct callout *c) 3938d809d50SJeff Roberson { 3948d809d50SJeff Roberson struct callout_cpu *cc; 3958d809d50SJeff Roberson int cpu; 3968d809d50SJeff Roberson 3978d809d50SJeff Roberson for (;;) { 3988d809d50SJeff Roberson cpu = c->c_cpu; 3991283e9cdSAttilio Rao #ifdef SMP 4001283e9cdSAttilio Rao if (cpu == CPUBLOCK) { 4011283e9cdSAttilio Rao while (c->c_cpu == CPUBLOCK) 4021283e9cdSAttilio Rao cpu_spinwait(); 4031283e9cdSAttilio Rao continue; 4041283e9cdSAttilio Rao } 4051283e9cdSAttilio Rao #endif 4068d809d50SJeff Roberson cc = CC_CPU(cpu); 4078d809d50SJeff Roberson CC_LOCK(cc); 4088d809d50SJeff Roberson if (cpu == c->c_cpu) 4098d809d50SJeff Roberson break; 4108d809d50SJeff Roberson CC_UNLOCK(cc); 4118d809d50SJeff Roberson } 4128d809d50SJeff Roberson return (cc); 413219d632cSMatthew Dillon } 414219d632cSMatthew Dillon 4151283e9cdSAttilio Rao static void 4161283e9cdSAttilio Rao callout_cc_add(struct callout *c, struct callout_cpu *cc, int to_ticks, 4171283e9cdSAttilio Rao void (*func)(void *), void *arg, int cpu) 4181283e9cdSAttilio Rao { 4191283e9cdSAttilio Rao 4201283e9cdSAttilio Rao CC_LOCK_ASSERT(cc); 4211283e9cdSAttilio Rao 4221283e9cdSAttilio Rao if (to_ticks <= 0) 4231283e9cdSAttilio Rao to_ticks = 1; 4241283e9cdSAttilio Rao c->c_arg = arg; 4251283e9cdSAttilio Rao c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 4261283e9cdSAttilio Rao c->c_func = func; 4271283e9cdSAttilio Rao c->c_time = ticks + to_ticks; 4281283e9cdSAttilio Rao TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask], 4291283e9cdSAttilio Rao c, c_links.tqe); 4301283e9cdSAttilio Rao if ((c->c_time - cc->cc_firsttick) < 0 && 4311283e9cdSAttilio Rao callout_new_inserted != NULL) { 4321283e9cdSAttilio Rao cc->cc_firsttick = c->c_time; 4331283e9cdSAttilio Rao (*callout_new_inserted)(cpu, 4341283e9cdSAttilio Rao to_ticks + (ticks - cc->cc_ticks)); 4351283e9cdSAttilio Rao } 4361283e9cdSAttilio Rao } 4371283e9cdSAttilio Rao 4386098e7acSKonstantin Belousov static void 4396098e7acSKonstantin Belousov callout_cc_del(struct callout *c, struct callout_cpu *cc) 4406098e7acSKonstantin Belousov { 4416098e7acSKonstantin Belousov 4426098e7acSKonstantin Belousov if (cc->cc_next == c) 4436098e7acSKonstantin Belousov cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 4446098e7acSKonstantin Belousov if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 4456098e7acSKonstantin Belousov c->c_func = NULL; 4466098e7acSKonstantin Belousov SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 4476098e7acSKonstantin Belousov } 4486098e7acSKonstantin Belousov } 4496098e7acSKonstantin Belousov 4506098e7acSKonstantin Belousov static struct callout * 4516098e7acSKonstantin Belousov softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls, 4526098e7acSKonstantin Belousov int *lockcalls, int *gcalls) 4536098e7acSKonstantin Belousov { 4546098e7acSKonstantin Belousov void (*c_func)(void *); 4556098e7acSKonstantin Belousov void *c_arg; 4566098e7acSKonstantin Belousov struct lock_class *class; 4576098e7acSKonstantin Belousov struct lock_object *c_lock; 4586098e7acSKonstantin Belousov int c_flags, sharedlock; 4596098e7acSKonstantin Belousov #ifdef SMP 4606098e7acSKonstantin Belousov struct callout_cpu *new_cc; 4616098e7acSKonstantin Belousov void (*new_func)(void *); 4626098e7acSKonstantin Belousov void *new_arg; 4636098e7acSKonstantin Belousov int new_cpu, new_ticks; 4646098e7acSKonstantin Belousov #endif 4656098e7acSKonstantin Belousov #ifdef DIAGNOSTIC 4666098e7acSKonstantin Belousov struct bintime bt1, bt2; 4676098e7acSKonstantin Belousov struct timespec ts2; 4686098e7acSKonstantin Belousov static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ 4696098e7acSKonstantin Belousov static timeout_t *lastfunc; 4706098e7acSKonstantin Belousov #endif 4716098e7acSKonstantin Belousov 4726098e7acSKonstantin Belousov cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 4736098e7acSKonstantin Belousov class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; 4746098e7acSKonstantin Belousov sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1; 4756098e7acSKonstantin Belousov c_lock = c->c_lock; 4766098e7acSKonstantin Belousov c_func = c->c_func; 4776098e7acSKonstantin Belousov c_arg = c->c_arg; 4786098e7acSKonstantin Belousov c_flags = c->c_flags; 4796098e7acSKonstantin Belousov if (c->c_flags & CALLOUT_LOCAL_ALLOC) 4806098e7acSKonstantin Belousov c->c_flags = CALLOUT_LOCAL_ALLOC; 4816098e7acSKonstantin Belousov else 4826098e7acSKonstantin Belousov c->c_flags &= ~CALLOUT_PENDING; 4836098e7acSKonstantin Belousov cc->cc_curr = c; 4846098e7acSKonstantin Belousov cc->cc_cancel = 0; 4856098e7acSKonstantin Belousov CC_UNLOCK(cc); 4866098e7acSKonstantin Belousov if (c_lock != NULL) { 4876098e7acSKonstantin Belousov class->lc_lock(c_lock, sharedlock); 4886098e7acSKonstantin Belousov /* 4896098e7acSKonstantin Belousov * The callout may have been cancelled 4906098e7acSKonstantin Belousov * while we switched locks. 4916098e7acSKonstantin Belousov */ 4926098e7acSKonstantin Belousov if (cc->cc_cancel) { 4936098e7acSKonstantin Belousov class->lc_unlock(c_lock); 4946098e7acSKonstantin Belousov goto skip; 4956098e7acSKonstantin Belousov } 4966098e7acSKonstantin Belousov /* The callout cannot be stopped now. */ 4976098e7acSKonstantin Belousov cc->cc_cancel = 1; 4986098e7acSKonstantin Belousov 4996098e7acSKonstantin Belousov if (c_lock == &Giant.lock_object) { 5006098e7acSKonstantin Belousov (*gcalls)++; 5016098e7acSKonstantin Belousov CTR3(KTR_CALLOUT, "callout %p func %p arg %p", 5026098e7acSKonstantin Belousov c, c_func, c_arg); 5036098e7acSKonstantin Belousov } else { 5046098e7acSKonstantin Belousov (*lockcalls)++; 5056098e7acSKonstantin Belousov CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p", 5066098e7acSKonstantin Belousov c, c_func, c_arg); 5076098e7acSKonstantin Belousov } 5086098e7acSKonstantin Belousov } else { 5096098e7acSKonstantin Belousov (*mpcalls)++; 5106098e7acSKonstantin Belousov CTR3(KTR_CALLOUT, "callout mpsafe %p func %p arg %p", 5116098e7acSKonstantin Belousov c, c_func, c_arg); 5126098e7acSKonstantin Belousov } 5136098e7acSKonstantin Belousov #ifdef DIAGNOSTIC 5146098e7acSKonstantin Belousov binuptime(&bt1); 5156098e7acSKonstantin Belousov #endif 5166098e7acSKonstantin Belousov THREAD_NO_SLEEPING(); 5176098e7acSKonstantin Belousov SDT_PROBE(callout_execute, kernel, , callout_start, c, 0, 0, 0, 0); 5186098e7acSKonstantin Belousov c_func(c_arg); 5196098e7acSKonstantin Belousov SDT_PROBE(callout_execute, kernel, , callout_end, c, 0, 0, 0, 0); 5206098e7acSKonstantin Belousov THREAD_SLEEPING_OK(); 5216098e7acSKonstantin Belousov #ifdef DIAGNOSTIC 5226098e7acSKonstantin Belousov binuptime(&bt2); 5236098e7acSKonstantin Belousov bintime_sub(&bt2, &bt1); 5246098e7acSKonstantin Belousov if (bt2.frac > maxdt) { 5256098e7acSKonstantin Belousov if (lastfunc != c_func || bt2.frac > maxdt * 2) { 5266098e7acSKonstantin Belousov bintime2timespec(&bt2, &ts2); 5276098e7acSKonstantin Belousov printf( 5286098e7acSKonstantin Belousov "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 5296098e7acSKonstantin Belousov c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); 5306098e7acSKonstantin Belousov } 5316098e7acSKonstantin Belousov maxdt = bt2.frac; 5326098e7acSKonstantin Belousov lastfunc = c_func; 5336098e7acSKonstantin Belousov } 5346098e7acSKonstantin Belousov #endif 5356098e7acSKonstantin Belousov CTR1(KTR_CALLOUT, "callout %p finished", c); 5366098e7acSKonstantin Belousov if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 5376098e7acSKonstantin Belousov class->lc_unlock(c_lock); 5386098e7acSKonstantin Belousov skip: 5396098e7acSKonstantin Belousov CC_LOCK(cc); 5406098e7acSKonstantin Belousov /* 5416098e7acSKonstantin Belousov * If the current callout is locally allocated (from 5426098e7acSKonstantin Belousov * timeout(9)) then put it on the freelist. 5436098e7acSKonstantin Belousov * 5446098e7acSKonstantin Belousov * Note: we need to check the cached copy of c_flags because 5456098e7acSKonstantin Belousov * if it was not local, then it's not safe to deref the 5466098e7acSKonstantin Belousov * callout pointer. 5476098e7acSKonstantin Belousov */ 5486098e7acSKonstantin Belousov if (c_flags & CALLOUT_LOCAL_ALLOC) { 5496098e7acSKonstantin Belousov KASSERT(c->c_flags == CALLOUT_LOCAL_ALLOC, 5506098e7acSKonstantin Belousov ("corrupted callout")); 5516098e7acSKonstantin Belousov c->c_func = NULL; 5526098e7acSKonstantin Belousov SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 5536098e7acSKonstantin Belousov } 5546098e7acSKonstantin Belousov cc->cc_curr = NULL; 5556098e7acSKonstantin Belousov if (cc->cc_waiting) { 5566098e7acSKonstantin Belousov /* 5576098e7acSKonstantin Belousov * There is someone waiting for the 5586098e7acSKonstantin Belousov * callout to complete. 5596098e7acSKonstantin Belousov * If the callout was scheduled for 5606098e7acSKonstantin Belousov * migration just cancel it. 5616098e7acSKonstantin Belousov */ 5626098e7acSKonstantin Belousov if (cc_cme_migrating(cc)) 5636098e7acSKonstantin Belousov cc_cme_cleanup(cc); 5646098e7acSKonstantin Belousov cc->cc_waiting = 0; 5656098e7acSKonstantin Belousov CC_UNLOCK(cc); 5666098e7acSKonstantin Belousov wakeup(&cc->cc_waiting); 5676098e7acSKonstantin Belousov CC_LOCK(cc); 5686098e7acSKonstantin Belousov } else if (cc_cme_migrating(cc)) { 5696098e7acSKonstantin Belousov #ifdef SMP 5706098e7acSKonstantin Belousov /* 5716098e7acSKonstantin Belousov * If the callout was scheduled for 5726098e7acSKonstantin Belousov * migration just perform it now. 5736098e7acSKonstantin Belousov */ 5746098e7acSKonstantin Belousov new_cpu = cc->cc_migration_cpu; 5756098e7acSKonstantin Belousov new_ticks = cc->cc_migration_ticks; 5766098e7acSKonstantin Belousov new_func = cc->cc_migration_func; 5776098e7acSKonstantin Belousov new_arg = cc->cc_migration_arg; 5786098e7acSKonstantin Belousov cc_cme_cleanup(cc); 5796098e7acSKonstantin Belousov 5806098e7acSKonstantin Belousov /* 5816098e7acSKonstantin Belousov * Handle deferred callout stops 5826098e7acSKonstantin Belousov */ 5836098e7acSKonstantin Belousov if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) { 5846098e7acSKonstantin Belousov CTR3(KTR_CALLOUT, 5856098e7acSKonstantin Belousov "deferred cancelled %p func %p arg %p", 5866098e7acSKonstantin Belousov c, new_func, new_arg); 5876098e7acSKonstantin Belousov callout_cc_del(c, cc); 5886098e7acSKonstantin Belousov goto nextc; 5896098e7acSKonstantin Belousov } 5906098e7acSKonstantin Belousov 5916098e7acSKonstantin Belousov c->c_flags &= ~CALLOUT_DFRMIGRATION; 5926098e7acSKonstantin Belousov 5936098e7acSKonstantin Belousov /* 5946098e7acSKonstantin Belousov * It should be assert here that the 5956098e7acSKonstantin Belousov * callout is not destroyed but that 5966098e7acSKonstantin Belousov * is not easy. 5976098e7acSKonstantin Belousov */ 5986098e7acSKonstantin Belousov new_cc = callout_cpu_switch(c, cc, new_cpu); 5996098e7acSKonstantin Belousov callout_cc_add(c, new_cc, new_ticks, new_func, new_arg, 6006098e7acSKonstantin Belousov new_cpu); 6016098e7acSKonstantin Belousov CC_UNLOCK(new_cc); 6026098e7acSKonstantin Belousov CC_LOCK(cc); 6036098e7acSKonstantin Belousov #else 6046098e7acSKonstantin Belousov panic("migration should not happen"); 6056098e7acSKonstantin Belousov #endif 6066098e7acSKonstantin Belousov } 6076098e7acSKonstantin Belousov #ifdef SMP 6086098e7acSKonstantin Belousov nextc: 6096098e7acSKonstantin Belousov #endif 6106098e7acSKonstantin Belousov return (cc->cc_next); 6116098e7acSKonstantin Belousov } 6126098e7acSKonstantin Belousov 613219d632cSMatthew Dillon /* 614ab36c067SJustin T. Gibbs * The callout mechanism is based on the work of Adam M. Costello and 615ab36c067SJustin T. Gibbs * George Varghese, published in a technical report entitled "Redesigning 616ab36c067SJustin T. Gibbs * the BSD Callout and Timer Facilities" and modified slightly for inclusion 617ab36c067SJustin T. Gibbs * in FreeBSD by Justin T. Gibbs. The original work on the data structures 618024035e8SHiten Pandya * used in this implementation was published by G. Varghese and T. Lauck in 619ab36c067SJustin T. Gibbs * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 620ab36c067SJustin T. Gibbs * the Efficient Implementation of a Timer Facility" in the Proceedings of 621ab36c067SJustin T. Gibbs * the 11th ACM Annual Symposium on Operating Systems Principles, 622ab36c067SJustin T. Gibbs * Austin, Texas Nov 1987. 623ab36c067SJustin T. Gibbs */ 624a50ec505SPoul-Henning Kamp 625ab36c067SJustin T. Gibbs /* 626df8bae1dSRodney W. Grimes * Software (low priority) clock interrupt. 627df8bae1dSRodney W. Grimes * Run periodic events from timeout queue. 628df8bae1dSRodney W. Grimes */ 629df8bae1dSRodney W. Grimes void 6308d809d50SJeff Roberson softclock(void *arg) 631df8bae1dSRodney W. Grimes { 6328d809d50SJeff Roberson struct callout_cpu *cc; 633b336df68SPoul-Henning Kamp struct callout *c; 634b336df68SPoul-Henning Kamp struct callout_tailq *bucket; 635b336df68SPoul-Henning Kamp int curticks; 636b336df68SPoul-Henning Kamp int steps; /* #steps since we last allowed interrupts */ 63722ee8c4fSPoul-Henning Kamp int depth; 63822ee8c4fSPoul-Henning Kamp int mpcalls; 63964b9ee20SAttilio Rao int lockcalls; 64022ee8c4fSPoul-Henning Kamp int gcalls; 641df8bae1dSRodney W. Grimes 64215b7a470SPoul-Henning Kamp #ifndef MAX_SOFTCLOCK_STEPS 64315b7a470SPoul-Henning Kamp #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ 64415b7a470SPoul-Henning Kamp #endif /* MAX_SOFTCLOCK_STEPS */ 645ab36c067SJustin T. Gibbs 64622ee8c4fSPoul-Henning Kamp mpcalls = 0; 64764b9ee20SAttilio Rao lockcalls = 0; 64822ee8c4fSPoul-Henning Kamp gcalls = 0; 64922ee8c4fSPoul-Henning Kamp depth = 0; 650ab36c067SJustin T. Gibbs steps = 0; 6518d809d50SJeff Roberson cc = (struct callout_cpu *)arg; 6528d809d50SJeff Roberson CC_LOCK(cc); 65320c510f8SLuigi Rizzo while (cc->cc_softticks - 1 != cc->cc_ticks) { 65445327611SJustin T. Gibbs /* 6558d809d50SJeff Roberson * cc_softticks may be modified by hard clock, so cache 65645327611SJustin T. Gibbs * it while we work on a given bucket. 65745327611SJustin T. Gibbs */ 6588d809d50SJeff Roberson curticks = cc->cc_softticks; 6599fc51b0bSJeff Roberson cc->cc_softticks++; 6608d809d50SJeff Roberson bucket = &cc->cc_callwheel[curticks & callwheelmask]; 66145327611SJustin T. Gibbs c = TAILQ_FIRST(bucket); 6626098e7acSKonstantin Belousov while (c != NULL) { 66322ee8c4fSPoul-Henning Kamp depth++; 66445327611SJustin T. Gibbs if (c->c_time != curticks) { 665ab36c067SJustin T. Gibbs c = TAILQ_NEXT(c, c_links.tqe); 666ab36c067SJustin T. Gibbs ++steps; 667ab36c067SJustin T. Gibbs if (steps >= MAX_SOFTCLOCK_STEPS) { 6688d809d50SJeff Roberson cc->cc_next = c; 66945327611SJustin T. Gibbs /* Give interrupts a chance. */ 6708d809d50SJeff Roberson CC_UNLOCK(cc); 671ab32297dSJohn Baldwin ; /* nothing */ 6728d809d50SJeff Roberson CC_LOCK(cc); 6738d809d50SJeff Roberson c = cc->cc_next; 674ab36c067SJustin T. Gibbs steps = 0; 675df8bae1dSRodney W. Grimes } 676ab36c067SJustin T. Gibbs } else { 67745327611SJustin T. Gibbs TAILQ_REMOVE(bucket, c, c_links.tqe); 6786098e7acSKonstantin Belousov c = softclock_call_cc(c, cc, &mpcalls, 6796098e7acSKonstantin Belousov &lockcalls, &gcalls); 680ab36c067SJustin T. Gibbs steps = 0; 681ab36c067SJustin T. Gibbs } 682ab36c067SJustin T. Gibbs } 683ab36c067SJustin T. Gibbs } 68422ee8c4fSPoul-Henning Kamp avg_depth += (depth * 1000 - avg_depth) >> 8; 68522ee8c4fSPoul-Henning Kamp avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 68664b9ee20SAttilio Rao avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 68722ee8c4fSPoul-Henning Kamp avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 6888d809d50SJeff Roberson cc->cc_next = NULL; 6898d809d50SJeff Roberson CC_UNLOCK(cc); 690df8bae1dSRodney W. Grimes } 691df8bae1dSRodney W. Grimes 692df8bae1dSRodney W. Grimes /* 693df8bae1dSRodney W. Grimes * timeout -- 694df8bae1dSRodney W. Grimes * Execute a function after a specified length of time. 695df8bae1dSRodney W. Grimes * 696df8bae1dSRodney W. Grimes * untimeout -- 697df8bae1dSRodney W. Grimes * Cancel previous timeout function call. 698df8bae1dSRodney W. Grimes * 699ab36c067SJustin T. Gibbs * callout_handle_init -- 700ab36c067SJustin T. Gibbs * Initialize a handle so that using it with untimeout is benign. 701ab36c067SJustin T. Gibbs * 702df8bae1dSRodney W. Grimes * See AT&T BCI Driver Reference Manual for specification. This 703ab36c067SJustin T. Gibbs * implementation differs from that one in that although an 704ab36c067SJustin T. Gibbs * identification value is returned from timeout, the original 705ab36c067SJustin T. Gibbs * arguments to timeout as well as the identifier are used to 706ab36c067SJustin T. Gibbs * identify entries for untimeout. 707df8bae1dSRodney W. Grimes */ 708ab36c067SJustin T. Gibbs struct callout_handle 709ab36c067SJustin T. Gibbs timeout(ftn, arg, to_ticks) 7108f03c6f1SBruce Evans timeout_t *ftn; 711df8bae1dSRodney W. Grimes void *arg; 712e82ac18eSJonathan Lemon int to_ticks; 713df8bae1dSRodney W. Grimes { 7148d809d50SJeff Roberson struct callout_cpu *cc; 715ab36c067SJustin T. Gibbs struct callout *new; 716ab36c067SJustin T. Gibbs struct callout_handle handle; 717df8bae1dSRodney W. Grimes 7188d809d50SJeff Roberson cc = CC_CPU(timeout_cpu); 7198d809d50SJeff Roberson CC_LOCK(cc); 720df8bae1dSRodney W. Grimes /* Fill in the next free callout structure. */ 7218d809d50SJeff Roberson new = SLIST_FIRST(&cc->cc_callfree); 722ab36c067SJustin T. Gibbs if (new == NULL) 723ab36c067SJustin T. Gibbs /* XXX Attempt to malloc first */ 724df8bae1dSRodney W. Grimes panic("timeout table full"); 7258d809d50SJeff Roberson SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); 726acc8326dSGarrett Wollman callout_reset(new, to_ticks, ftn, arg); 727ab36c067SJustin T. Gibbs handle.callout = new; 7288d809d50SJeff Roberson CC_UNLOCK(cc); 7298d809d50SJeff Roberson 730ab36c067SJustin T. Gibbs return (handle); 731df8bae1dSRodney W. Grimes } 732df8bae1dSRodney W. Grimes 733df8bae1dSRodney W. Grimes void 734ab36c067SJustin T. Gibbs untimeout(ftn, arg, handle) 7358f03c6f1SBruce Evans timeout_t *ftn; 736df8bae1dSRodney W. Grimes void *arg; 737ab36c067SJustin T. Gibbs struct callout_handle handle; 738df8bae1dSRodney W. Grimes { 7398d809d50SJeff Roberson struct callout_cpu *cc; 740df8bae1dSRodney W. Grimes 741ab36c067SJustin T. Gibbs /* 742ab36c067SJustin T. Gibbs * Check for a handle that was initialized 743ab36c067SJustin T. Gibbs * by callout_handle_init, but never used 744ab36c067SJustin T. Gibbs * for a real timeout. 745ab36c067SJustin T. Gibbs */ 746ab36c067SJustin T. Gibbs if (handle.callout == NULL) 747ab36c067SJustin T. Gibbs return; 748df8bae1dSRodney W. Grimes 7498d809d50SJeff Roberson cc = callout_lock(handle.callout); 750acc8326dSGarrett Wollman if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 751acc8326dSGarrett Wollman callout_stop(handle.callout); 7528d809d50SJeff Roberson CC_UNLOCK(cc); 753df8bae1dSRodney W. Grimes } 754df8bae1dSRodney W. Grimes 7553c816944SBruce Evans void 756ab36c067SJustin T. Gibbs callout_handle_init(struct callout_handle *handle) 757ab36c067SJustin T. Gibbs { 758ab36c067SJustin T. Gibbs handle->callout = NULL; 759ab36c067SJustin T. Gibbs } 760ab36c067SJustin T. Gibbs 761acc8326dSGarrett Wollman /* 762acc8326dSGarrett Wollman * New interface; clients allocate their own callout structures. 763acc8326dSGarrett Wollman * 764acc8326dSGarrett Wollman * callout_reset() - establish or change a timeout 765acc8326dSGarrett Wollman * callout_stop() - disestablish a timeout 766acc8326dSGarrett Wollman * callout_init() - initialize a callout structure so that it can 767acc8326dSGarrett Wollman * safely be passed to callout_reset() and callout_stop() 768acc8326dSGarrett Wollman * 7699b8b58e0SJonathan Lemon * <sys/callout.h> defines three convenience macros: 770acc8326dSGarrett Wollman * 77186fd19deSColin Percival * callout_active() - returns truth if callout has not been stopped, 77286fd19deSColin Percival * drained, or deactivated since the last time the callout was 77386fd19deSColin Percival * reset. 7749b8b58e0SJonathan Lemon * callout_pending() - returns truth if callout is still waiting for timeout 7759b8b58e0SJonathan Lemon * callout_deactivate() - marks the callout as having been serviced 776acc8326dSGarrett Wollman */ 777d04304d1SGleb Smirnoff int 7788d809d50SJeff Roberson callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *), 7798d809d50SJeff Roberson void *arg, int cpu) 780acc8326dSGarrett Wollman { 7818d809d50SJeff Roberson struct callout_cpu *cc; 782d04304d1SGleb Smirnoff int cancelled = 0; 783acc8326dSGarrett Wollman 7848d809d50SJeff Roberson /* 7858d809d50SJeff Roberson * Don't allow migration of pre-allocated callouts lest they 7868d809d50SJeff Roberson * become unbalanced. 7878d809d50SJeff Roberson */ 7888d809d50SJeff Roberson if (c->c_flags & CALLOUT_LOCAL_ALLOC) 7898d809d50SJeff Roberson cpu = c->c_cpu; 7908d809d50SJeff Roberson cc = callout_lock(c); 7918d809d50SJeff Roberson if (cc->cc_curr == c) { 7922c1bb207SColin Percival /* 7932c1bb207SColin Percival * We're being asked to reschedule a callout which is 79464b9ee20SAttilio Rao * currently in progress. If there is a lock then we 79598c926b2SIan Dowse * can cancel the callout if it has not really started. 79698c926b2SIan Dowse */ 7978d809d50SJeff Roberson if (c->c_lock != NULL && !cc->cc_cancel) 7988d809d50SJeff Roberson cancelled = cc->cc_cancel = 1; 7998d809d50SJeff Roberson if (cc->cc_waiting) { 80098c926b2SIan Dowse /* 80198c926b2SIan Dowse * Someone has called callout_drain to kill this 80298c926b2SIan Dowse * callout. Don't reschedule. 8032c1bb207SColin Percival */ 80468a57ebfSGleb Smirnoff CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 80568a57ebfSGleb Smirnoff cancelled ? "cancelled" : "failed to cancel", 80668a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 8078d809d50SJeff Roberson CC_UNLOCK(cc); 808d04304d1SGleb Smirnoff return (cancelled); 80949a74476SColin Percival } 81098c926b2SIan Dowse } 8110413bacdSColin Percival if (c->c_flags & CALLOUT_PENDING) { 8128d809d50SJeff Roberson if (cc->cc_next == c) { 8138d809d50SJeff Roberson cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 8140413bacdSColin Percival } 8158d809d50SJeff Roberson TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, 8160413bacdSColin Percival c_links.tqe); 8170413bacdSColin Percival 818d04304d1SGleb Smirnoff cancelled = 1; 8198d809d50SJeff Roberson c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 8208d809d50SJeff Roberson } 8211283e9cdSAttilio Rao 8221283e9cdSAttilio Rao #ifdef SMP 8230413bacdSColin Percival /* 8241283e9cdSAttilio Rao * If the callout must migrate try to perform it immediately. 8251283e9cdSAttilio Rao * If the callout is currently running, just defer the migration 8261283e9cdSAttilio Rao * to a more appropriate moment. 8270413bacdSColin Percival */ 8288d809d50SJeff Roberson if (c->c_cpu != cpu) { 8291283e9cdSAttilio Rao if (cc->cc_curr == c) { 8301283e9cdSAttilio Rao cc->cc_migration_cpu = cpu; 8311283e9cdSAttilio Rao cc->cc_migration_ticks = to_ticks; 8321283e9cdSAttilio Rao cc->cc_migration_func = ftn; 8331283e9cdSAttilio Rao cc->cc_migration_arg = arg; 83457d07ca9SKonstantin Belousov c->c_flags |= CALLOUT_DFRMIGRATION; 8351283e9cdSAttilio Rao CTR5(KTR_CALLOUT, 8361283e9cdSAttilio Rao "migration of %p func %p arg %p in %d to %u deferred", 8371283e9cdSAttilio Rao c, c->c_func, c->c_arg, to_ticks, cpu); 83808e4ac8aSAttilio Rao CC_UNLOCK(cc); 8391283e9cdSAttilio Rao return (cancelled); 840a157e425SAlexander Motin } 8411283e9cdSAttilio Rao cc = callout_cpu_switch(c, cc, cpu); 84208e4ac8aSAttilio Rao } 8431283e9cdSAttilio Rao #endif 8441283e9cdSAttilio Rao 8451283e9cdSAttilio Rao callout_cc_add(c, cc, to_ticks, ftn, arg, cpu); 84668a57ebfSGleb Smirnoff CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", 84768a57ebfSGleb Smirnoff cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); 8488d809d50SJeff Roberson CC_UNLOCK(cc); 849d04304d1SGleb Smirnoff 850d04304d1SGleb Smirnoff return (cancelled); 851acc8326dSGarrett Wollman } 852acc8326dSGarrett Wollman 8536e0186d5SSam Leffler /* 8546e0186d5SSam Leffler * Common idioms that can be optimized in the future. 8556e0186d5SSam Leffler */ 8566e0186d5SSam Leffler int 8576e0186d5SSam Leffler callout_schedule_on(struct callout *c, int to_ticks, int cpu) 8586e0186d5SSam Leffler { 8596e0186d5SSam Leffler return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 8606e0186d5SSam Leffler } 8616e0186d5SSam Leffler 8626e0186d5SSam Leffler int 8636e0186d5SSam Leffler callout_schedule(struct callout *c, int to_ticks) 8646e0186d5SSam Leffler { 8656e0186d5SSam Leffler return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 8666e0186d5SSam Leffler } 8676e0186d5SSam Leffler 8682c1bb207SColin Percival int 8692c1bb207SColin Percival _callout_stop_safe(c, safe) 8702c1bb207SColin Percival struct callout *c; 8712c1bb207SColin Percival int safe; 8722c1bb207SColin Percival { 8731283e9cdSAttilio Rao struct callout_cpu *cc, *old_cc; 87464b9ee20SAttilio Rao struct lock_class *class; 87564b9ee20SAttilio Rao int use_lock, sq_locked; 87698c926b2SIan Dowse 87764b9ee20SAttilio Rao /* 87864b9ee20SAttilio Rao * Some old subsystems don't hold Giant while running a callout_stop(), 87964b9ee20SAttilio Rao * so just discard this check for the moment. 88064b9ee20SAttilio Rao */ 88164b9ee20SAttilio Rao if (!safe && c->c_lock != NULL) { 88264b9ee20SAttilio Rao if (c->c_lock == &Giant.lock_object) 88364b9ee20SAttilio Rao use_lock = mtx_owned(&Giant); 88464b9ee20SAttilio Rao else { 88564b9ee20SAttilio Rao use_lock = 1; 88664b9ee20SAttilio Rao class = LOCK_CLASS(c->c_lock); 88764b9ee20SAttilio Rao class->lc_assert(c->c_lock, LA_XLOCKED); 88898c926b2SIan Dowse } 88964b9ee20SAttilio Rao } else 89064b9ee20SAttilio Rao use_lock = 0; 8912c1bb207SColin Percival 89267b158d8SJohn Baldwin sq_locked = 0; 8931283e9cdSAttilio Rao old_cc = NULL; 89467b158d8SJohn Baldwin again: 8958d809d50SJeff Roberson cc = callout_lock(c); 8961283e9cdSAttilio Rao 8971283e9cdSAttilio Rao /* 8981283e9cdSAttilio Rao * If the callout was migrating while the callout cpu lock was 8991283e9cdSAttilio Rao * dropped, just drop the sleepqueue lock and check the states 9001283e9cdSAttilio Rao * again. 9011283e9cdSAttilio Rao */ 9021283e9cdSAttilio Rao if (sq_locked != 0 && cc != old_cc) { 9031283e9cdSAttilio Rao #ifdef SMP 9041283e9cdSAttilio Rao CC_UNLOCK(cc); 9051283e9cdSAttilio Rao sleepq_release(&old_cc->cc_waiting); 9061283e9cdSAttilio Rao sq_locked = 0; 9071283e9cdSAttilio Rao old_cc = NULL; 9081283e9cdSAttilio Rao goto again; 9091283e9cdSAttilio Rao #else 9101283e9cdSAttilio Rao panic("migration should not happen"); 9111283e9cdSAttilio Rao #endif 9121283e9cdSAttilio Rao } 9131283e9cdSAttilio Rao 914acc8326dSGarrett Wollman /* 915b36f4588SJohn Baldwin * If the callout isn't pending, it's not on the queue, so 916b36f4588SJohn Baldwin * don't attempt to remove it from the queue. We can try to 917b36f4588SJohn Baldwin * stop it by other means however. 918acc8326dSGarrett Wollman */ 919acc8326dSGarrett Wollman if (!(c->c_flags & CALLOUT_PENDING)) { 9209b8b58e0SJonathan Lemon c->c_flags &= ~CALLOUT_ACTIVE; 921b36f4588SJohn Baldwin 922b36f4588SJohn Baldwin /* 923b36f4588SJohn Baldwin * If it wasn't on the queue and it isn't the current 924b36f4588SJohn Baldwin * callout, then we can't stop it, so just bail. 925b36f4588SJohn Baldwin */ 9268d809d50SJeff Roberson if (cc->cc_curr != c) { 92768a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 92868a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 9298d809d50SJeff Roberson CC_UNLOCK(cc); 93067b158d8SJohn Baldwin if (sq_locked) 931ce62b59cSJeff Roberson sleepq_release(&cc->cc_waiting); 93298c926b2SIan Dowse return (0); 93398c926b2SIan Dowse } 934b36f4588SJohn Baldwin 93598c926b2SIan Dowse if (safe) { 9362c1bb207SColin Percival /* 937b36f4588SJohn Baldwin * The current callout is running (or just 938b36f4588SJohn Baldwin * about to run) and blocking is allowed, so 939b36f4588SJohn Baldwin * just wait for the current invocation to 940b36f4588SJohn Baldwin * finish. 9412c1bb207SColin Percival */ 9428d809d50SJeff Roberson while (cc->cc_curr == c) { 9436a0ce57dSAttilio Rao 9446a0ce57dSAttilio Rao /* 9456a0ce57dSAttilio Rao * Use direct calls to sleepqueue interface 9466a0ce57dSAttilio Rao * instead of cv/msleep in order to avoid 9478d809d50SJeff Roberson * a LOR between cc_lock and sleepqueue 9486a0ce57dSAttilio Rao * chain spinlocks. This piece of code 9496a0ce57dSAttilio Rao * emulates a msleep_spin() call actually. 95067b158d8SJohn Baldwin * 95167b158d8SJohn Baldwin * If we already have the sleepqueue chain 95267b158d8SJohn Baldwin * locked, then we can safely block. If we 95367b158d8SJohn Baldwin * don't already have it locked, however, 9548d809d50SJeff Roberson * we have to drop the cc_lock to lock 95567b158d8SJohn Baldwin * it. This opens several races, so we 95667b158d8SJohn Baldwin * restart at the beginning once we have 95767b158d8SJohn Baldwin * both locks. If nothing has changed, then 95867b158d8SJohn Baldwin * we will end up back here with sq_locked 95967b158d8SJohn Baldwin * set. 9606a0ce57dSAttilio Rao */ 96167b158d8SJohn Baldwin if (!sq_locked) { 9628d809d50SJeff Roberson CC_UNLOCK(cc); 963ce62b59cSJeff Roberson sleepq_lock(&cc->cc_waiting); 96467b158d8SJohn Baldwin sq_locked = 1; 9651283e9cdSAttilio Rao old_cc = cc; 96667b158d8SJohn Baldwin goto again; 9676a0ce57dSAttilio Rao } 9681283e9cdSAttilio Rao 9691283e9cdSAttilio Rao /* 9701283e9cdSAttilio Rao * Migration could be cancelled here, but 9711283e9cdSAttilio Rao * as long as it is still not sure when it 9721283e9cdSAttilio Rao * will be packed up, just let softclock() 9731283e9cdSAttilio Rao * take care of it. 9741283e9cdSAttilio Rao */ 9758d809d50SJeff Roberson cc->cc_waiting = 1; 9766a0ce57dSAttilio Rao DROP_GIANT(); 9778d809d50SJeff Roberson CC_UNLOCK(cc); 978ce62b59cSJeff Roberson sleepq_add(&cc->cc_waiting, 9798d809d50SJeff Roberson &cc->cc_lock.lock_object, "codrain", 9806a0ce57dSAttilio Rao SLEEPQ_SLEEP, 0); 981ce62b59cSJeff Roberson sleepq_wait(&cc->cc_waiting, 0); 98267b158d8SJohn Baldwin sq_locked = 0; 9831283e9cdSAttilio Rao old_cc = NULL; 9846a0ce57dSAttilio Rao 9856a0ce57dSAttilio Rao /* Reacquire locks previously released. */ 9866a0ce57dSAttilio Rao PICKUP_GIANT(); 9878d809d50SJeff Roberson CC_LOCK(cc); 988b36f4588SJohn Baldwin } 9898d809d50SJeff Roberson } else if (use_lock && !cc->cc_cancel) { 990b36f4588SJohn Baldwin /* 99164b9ee20SAttilio Rao * The current callout is waiting for its 99264b9ee20SAttilio Rao * lock which we hold. Cancel the callout 993b36f4588SJohn Baldwin * and return. After our caller drops the 99464b9ee20SAttilio Rao * lock, the callout will be skipped in 995b36f4588SJohn Baldwin * softclock(). 996b36f4588SJohn Baldwin */ 9978d809d50SJeff Roberson cc->cc_cancel = 1; 99868a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 99968a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 10001283e9cdSAttilio Rao KASSERT(!cc_cme_migrating(cc), 10011283e9cdSAttilio Rao ("callout wrongly scheduled for migration")); 10028d809d50SJeff Roberson CC_UNLOCK(cc); 100367b158d8SJohn Baldwin KASSERT(!sq_locked, ("sleepqueue chain locked")); 100498c926b2SIan Dowse return (1); 100557d07ca9SKonstantin Belousov } else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) { 100657d07ca9SKonstantin Belousov c->c_flags &= ~CALLOUT_DFRMIGRATION; 100757d07ca9SKonstantin Belousov CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", 100857d07ca9SKonstantin Belousov c, c->c_func, c->c_arg); 100957d07ca9SKonstantin Belousov CC_UNLOCK(cc); 101057d07ca9SKonstantin Belousov return (1); 1011b36f4588SJohn Baldwin } 101268a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 101368a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 10148d809d50SJeff Roberson CC_UNLOCK(cc); 101567b158d8SJohn Baldwin KASSERT(!sq_locked, ("sleepqueue chain still locked")); 1016a45982d2SJohn Baldwin return (0); 1017acc8326dSGarrett Wollman } 101867b158d8SJohn Baldwin if (sq_locked) 1019ce62b59cSJeff Roberson sleepq_release(&cc->cc_waiting); 102067b158d8SJohn Baldwin 10219b8b58e0SJonathan Lemon c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 1022acc8326dSGarrett Wollman 102368a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 102468a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 10256098e7acSKonstantin Belousov TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, 10266098e7acSKonstantin Belousov c_links.tqe); 10276098e7acSKonstantin Belousov callout_cc_del(c, cc); 102868a57ebfSGleb Smirnoff 10298d809d50SJeff Roberson CC_UNLOCK(cc); 1030a45982d2SJohn Baldwin return (1); 1031acc8326dSGarrett Wollman } 1032acc8326dSGarrett Wollman 1033acc8326dSGarrett Wollman void 1034e82ac18eSJonathan Lemon callout_init(c, mpsafe) 1035acc8326dSGarrett Wollman struct callout *c; 1036e82ac18eSJonathan Lemon int mpsafe; 1037acc8326dSGarrett Wollman { 10387347e1c6SGarrett Wollman bzero(c, sizeof *c); 103998c926b2SIan Dowse if (mpsafe) { 104064b9ee20SAttilio Rao c->c_lock = NULL; 104198c926b2SIan Dowse c->c_flags = CALLOUT_RETURNUNLOCKED; 104298c926b2SIan Dowse } else { 104364b9ee20SAttilio Rao c->c_lock = &Giant.lock_object; 104498c926b2SIan Dowse c->c_flags = 0; 104598c926b2SIan Dowse } 10468d809d50SJeff Roberson c->c_cpu = timeout_cpu; 104798c926b2SIan Dowse } 104898c926b2SIan Dowse 104998c926b2SIan Dowse void 105064b9ee20SAttilio Rao _callout_init_lock(c, lock, flags) 105198c926b2SIan Dowse struct callout *c; 105264b9ee20SAttilio Rao struct lock_object *lock; 105398c926b2SIan Dowse int flags; 105498c926b2SIan Dowse { 105598c926b2SIan Dowse bzero(c, sizeof *c); 105664b9ee20SAttilio Rao c->c_lock = lock; 105764b9ee20SAttilio Rao KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 105864b9ee20SAttilio Rao ("callout_init_lock: bad flags %d", flags)); 105964b9ee20SAttilio Rao KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 106064b9ee20SAttilio Rao ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 106113ddf72dSAttilio Rao KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 106213ddf72dSAttilio Rao (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 106364b9ee20SAttilio Rao __func__)); 106464b9ee20SAttilio Rao c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 10658d809d50SJeff Roberson c->c_cpu = timeout_cpu; 1066acc8326dSGarrett Wollman } 1067acc8326dSGarrett Wollman 1068e1d6dc65SNate Williams #ifdef APM_FIXUP_CALLTODO 1069e1d6dc65SNate Williams /* 1070e1d6dc65SNate Williams * Adjust the kernel calltodo timeout list. This routine is used after 1071e1d6dc65SNate Williams * an APM resume to recalculate the calltodo timer list values with the 1072e1d6dc65SNate Williams * number of hz's we have been sleeping. The next hardclock() will detect 1073e1d6dc65SNate Williams * that there are fired timers and run softclock() to execute them. 1074e1d6dc65SNate Williams * 1075e1d6dc65SNate Williams * Please note, I have not done an exhaustive analysis of what code this 1076e1d6dc65SNate Williams * might break. I am motivated to have my select()'s and alarm()'s that 1077e1d6dc65SNate Williams * have expired during suspend firing upon resume so that the applications 1078e1d6dc65SNate Williams * which set the timer can do the maintanence the timer was for as close 1079e1d6dc65SNate Williams * as possible to the originally intended time. Testing this code for a 1080e1d6dc65SNate Williams * week showed that resuming from a suspend resulted in 22 to 25 timers 1081e1d6dc65SNate Williams * firing, which seemed independant on whether the suspend was 2 hours or 1082e1d6dc65SNate Williams * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 1083e1d6dc65SNate Williams */ 1084e1d6dc65SNate Williams void 1085e1d6dc65SNate Williams adjust_timeout_calltodo(time_change) 1086e1d6dc65SNate Williams struct timeval *time_change; 1087e1d6dc65SNate Williams { 1088e1d6dc65SNate Williams register struct callout *p; 1089e1d6dc65SNate Williams unsigned long delta_ticks; 1090e1d6dc65SNate Williams 1091e1d6dc65SNate Williams /* 1092e1d6dc65SNate Williams * How many ticks were we asleep? 1093c8b47828SBruce Evans * (stolen from tvtohz()). 1094e1d6dc65SNate Williams */ 1095e1d6dc65SNate Williams 1096e1d6dc65SNate Williams /* Don't do anything */ 1097e1d6dc65SNate Williams if (time_change->tv_sec < 0) 1098e1d6dc65SNate Williams return; 1099e1d6dc65SNate Williams else if (time_change->tv_sec <= LONG_MAX / 1000000) 1100e1d6dc65SNate Williams delta_ticks = (time_change->tv_sec * 1000000 + 1101e1d6dc65SNate Williams time_change->tv_usec + (tick - 1)) / tick + 1; 1102e1d6dc65SNate Williams else if (time_change->tv_sec <= LONG_MAX / hz) 1103e1d6dc65SNate Williams delta_ticks = time_change->tv_sec * hz + 1104e1d6dc65SNate Williams (time_change->tv_usec + (tick - 1)) / tick + 1; 1105e1d6dc65SNate Williams else 1106e1d6dc65SNate Williams delta_ticks = LONG_MAX; 1107e1d6dc65SNate Williams 1108e1d6dc65SNate Williams if (delta_ticks > INT_MAX) 1109e1d6dc65SNate Williams delta_ticks = INT_MAX; 1110e1d6dc65SNate Williams 1111e1d6dc65SNate Williams /* 1112e1d6dc65SNate Williams * Now rip through the timer calltodo list looking for timers 1113e1d6dc65SNate Williams * to expire. 1114e1d6dc65SNate Williams */ 1115e1d6dc65SNate Williams 1116e1d6dc65SNate Williams /* don't collide with softclock() */ 11178d809d50SJeff Roberson CC_LOCK(cc); 1118e1d6dc65SNate Williams for (p = calltodo.c_next; p != NULL; p = p->c_next) { 1119e1d6dc65SNate Williams p->c_time -= delta_ticks; 1120e1d6dc65SNate Williams 1121e1d6dc65SNate Williams /* Break if the timer had more time on it than delta_ticks */ 1122e1d6dc65SNate Williams if (p->c_time > 0) 1123e1d6dc65SNate Williams break; 1124e1d6dc65SNate Williams 1125e1d6dc65SNate Williams /* take back the ticks the timer didn't use (p->c_time <= 0) */ 1126e1d6dc65SNate Williams delta_ticks = -p->c_time; 1127e1d6dc65SNate Williams } 11288d809d50SJeff Roberson CC_UNLOCK(cc); 1129e1d6dc65SNate Williams 1130e1d6dc65SNate Williams return; 1131e1d6dc65SNate Williams } 1132e1d6dc65SNate Williams #endif /* APM_FIXUP_CALLTODO */ 1133