1df8bae1dSRodney W. Grimes /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1982, 1986, 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * (c) UNIX System Laboratories, Inc. 5df8bae1dSRodney W. Grimes * All or some portions of this file are derived from material licensed 6df8bae1dSRodney W. Grimes * to the University of California by American Telephone and Telegraph 7df8bae1dSRodney W. Grimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8df8bae1dSRodney W. Grimes * the permission of UNIX System Laboratories, Inc. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 19df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 20df8bae1dSRodney W. Grimes * without specific prior written permission. 21df8bae1dSRodney W. Grimes * 22df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32df8bae1dSRodney W. Grimes * SUCH DAMAGE. 33df8bae1dSRodney W. Grimes * 34acc8326dSGarrett Wollman * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35df8bae1dSRodney W. Grimes */ 36df8bae1dSRodney W. Grimes 37677b542eSDavid E. O'Brien #include <sys/cdefs.h> 38677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 39677b542eSDavid E. O'Brien 40df8bae1dSRodney W. Grimes #include <sys/param.h> 41df8bae1dSRodney W. Grimes #include <sys/systm.h> 4215b7a470SPoul-Henning Kamp #include <sys/callout.h> 432c1bb207SColin Percival #include <sys/condvar.h> 44df8bae1dSRodney W. Grimes #include <sys/kernel.h> 45ff7ec58aSRobert Watson #include <sys/ktr.h> 46f34fa851SJohn Baldwin #include <sys/lock.h> 47cb799bfeSJohn Baldwin #include <sys/mutex.h> 4821f9e816SJohn Baldwin #include <sys/proc.h> 4922ee8c4fSPoul-Henning Kamp #include <sys/sysctl.h> 50df8bae1dSRodney W. Grimes 5122ee8c4fSPoul-Henning Kamp static int avg_depth; 5222ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 5322ee8c4fSPoul-Henning Kamp "Average number of items examined per softclock call. Units = 1/1000"); 5422ee8c4fSPoul-Henning Kamp static int avg_gcalls; 5522ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 5622ee8c4fSPoul-Henning Kamp "Average number of Giant callouts made per softclock call. Units = 1/1000"); 5798c926b2SIan Dowse static int avg_mtxcalls; 5898c926b2SIan Dowse SYSCTL_INT(_debug, OID_AUTO, to_avg_mtxcalls, CTLFLAG_RD, &avg_mtxcalls, 0, 5998c926b2SIan Dowse "Average number of mtx callouts made per softclock call. Units = 1/1000"); 6022ee8c4fSPoul-Henning Kamp static int avg_mpcalls; 6122ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 6222ee8c4fSPoul-Henning Kamp "Average number of MP callouts made per softclock call. Units = 1/1000"); 6315b7a470SPoul-Henning Kamp /* 6415b7a470SPoul-Henning Kamp * TODO: 6515b7a470SPoul-Henning Kamp * allocate more timeout table slots when table overflows. 6615b7a470SPoul-Henning Kamp */ 6715b7a470SPoul-Henning Kamp 6815b7a470SPoul-Henning Kamp /* Exported to machdep.c and/or kern_clock.c. */ 69ab36c067SJustin T. Gibbs struct callout *callout; 70ab36c067SJustin T. Gibbs struct callout_list callfree; 71ab36c067SJustin T. Gibbs int callwheelsize, callwheelbits, callwheelmask; 72ab36c067SJustin T. Gibbs struct callout_tailq *callwheel; 7315b7a470SPoul-Henning Kamp int softticks; /* Like ticks, but for softclock(). */ 74166400b7SPoul-Henning Kamp struct mtx callout_lock; 75f23b4c91SGarrett Wollman 76ab36c067SJustin T. Gibbs static struct callout *nextsoftcheck; /* Next callout to be checked. */ 7749a74476SColin Percival 78e9dec2c4SColin Percival /** 792c1bb207SColin Percival * Locked by callout_lock: 802c1bb207SColin Percival * curr_callout - If a callout is in progress, it is curr_callout. 812c1bb207SColin Percival * If curr_callout is non-NULL, threads waiting on 822c1bb207SColin Percival * callout_wait will be woken up as soon as the 832c1bb207SColin Percival * relevant callout completes. 8498c926b2SIan Dowse * curr_cancelled - Changing to 1 with both callout_lock and c_mtx held 8598c926b2SIan Dowse * guarantees that the current callout will not run. 8698c926b2SIan Dowse * The softclock() function sets this to 0 before it 8798c926b2SIan Dowse * drops callout_lock to acquire c_mtx, and it calls 8898c926b2SIan Dowse * the handler only if curr_cancelled still 0 when 8998c926b2SIan Dowse * c_mtx is successfully acquired. 902c1bb207SColin Percival * wakeup_ctr - Incremented every time a thread wants to wait 912c1bb207SColin Percival * for a callout to complete. Modified only when 922c1bb207SColin Percival * curr_callout is non-NULL. 9349a74476SColin Percival * wakeup_needed - If a thread is waiting on callout_wait, then 9449a74476SColin Percival * wakeup_needed is nonzero. Increased only when 9549a74476SColin Percival * cutt_callout is non-NULL. 962c1bb207SColin Percival */ 972c1bb207SColin Percival static struct callout *curr_callout; 9898c926b2SIan Dowse static int curr_cancelled; 992c1bb207SColin Percival static int wakeup_ctr; 10049a74476SColin Percival static int wakeup_needed; 10149a74476SColin Percival 102e9dec2c4SColin Percival /** 1032c1bb207SColin Percival * Locked by callout_wait_lock: 1042c1bb207SColin Percival * callout_wait - If wakeup_needed is set, callout_wait will be 1052c1bb207SColin Percival * triggered after the current callout finishes. 1062c1bb207SColin Percival * wakeup_done_ctr - Set to the current value of wakeup_ctr after 1072c1bb207SColin Percival * callout_wait is triggered. 1082c1bb207SColin Percival */ 1092c1bb207SColin Percival static struct mtx callout_wait_lock; 1102c1bb207SColin Percival static struct cv callout_wait; 1112c1bb207SColin Percival static int wakeup_done_ctr; 112df8bae1dSRodney W. Grimes 113df8bae1dSRodney W. Grimes /* 114219d632cSMatthew Dillon * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization 115219d632cSMatthew Dillon * 116219d632cSMatthew Dillon * This code is called very early in the kernel initialization sequence, 117219d632cSMatthew Dillon * and may be called more then once. 118219d632cSMatthew Dillon */ 119219d632cSMatthew Dillon caddr_t 120219d632cSMatthew Dillon kern_timeout_callwheel_alloc(caddr_t v) 121219d632cSMatthew Dillon { 122219d632cSMatthew Dillon /* 123219d632cSMatthew Dillon * Calculate callout wheel size 124219d632cSMatthew Dillon */ 125219d632cSMatthew Dillon for (callwheelsize = 1, callwheelbits = 0; 126219d632cSMatthew Dillon callwheelsize < ncallout; 127219d632cSMatthew Dillon callwheelsize <<= 1, ++callwheelbits) 128219d632cSMatthew Dillon ; 129219d632cSMatthew Dillon callwheelmask = callwheelsize - 1; 130219d632cSMatthew Dillon 131219d632cSMatthew Dillon callout = (struct callout *)v; 132219d632cSMatthew Dillon v = (caddr_t)(callout + ncallout); 133219d632cSMatthew Dillon callwheel = (struct callout_tailq *)v; 134219d632cSMatthew Dillon v = (caddr_t)(callwheel + callwheelsize); 135219d632cSMatthew Dillon return(v); 136219d632cSMatthew Dillon } 137219d632cSMatthew Dillon 138219d632cSMatthew Dillon /* 139219d632cSMatthew Dillon * kern_timeout_callwheel_init() - initialize previously reserved callwheel 140219d632cSMatthew Dillon * space. 141219d632cSMatthew Dillon * 142219d632cSMatthew Dillon * This code is called just once, after the space reserved for the 143219d632cSMatthew Dillon * callout wheel has been finalized. 144219d632cSMatthew Dillon */ 145219d632cSMatthew Dillon void 146219d632cSMatthew Dillon kern_timeout_callwheel_init(void) 147219d632cSMatthew Dillon { 148219d632cSMatthew Dillon int i; 149219d632cSMatthew Dillon 150219d632cSMatthew Dillon SLIST_INIT(&callfree); 151219d632cSMatthew Dillon for (i = 0; i < ncallout; i++) { 152219d632cSMatthew Dillon callout_init(&callout[i], 0); 153219d632cSMatthew Dillon callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 154219d632cSMatthew Dillon SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 155219d632cSMatthew Dillon } 156219d632cSMatthew Dillon for (i = 0; i < callwheelsize; i++) { 157219d632cSMatthew Dillon TAILQ_INIT(&callwheel[i]); 158219d632cSMatthew Dillon } 1596008862bSJohn Baldwin mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 1602c1bb207SColin Percival mtx_init(&callout_wait_lock, "callout_wait_lock", NULL, MTX_DEF); 1612c1bb207SColin Percival cv_init(&callout_wait, "callout_wait"); 162219d632cSMatthew Dillon } 163219d632cSMatthew Dillon 164219d632cSMatthew Dillon /* 165ab36c067SJustin T. Gibbs * The callout mechanism is based on the work of Adam M. Costello and 166ab36c067SJustin T. Gibbs * George Varghese, published in a technical report entitled "Redesigning 167ab36c067SJustin T. Gibbs * the BSD Callout and Timer Facilities" and modified slightly for inclusion 168ab36c067SJustin T. Gibbs * in FreeBSD by Justin T. Gibbs. The original work on the data structures 169024035e8SHiten Pandya * used in this implementation was published by G. Varghese and T. Lauck in 170ab36c067SJustin T. Gibbs * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 171ab36c067SJustin T. Gibbs * the Efficient Implementation of a Timer Facility" in the Proceedings of 172ab36c067SJustin T. Gibbs * the 11th ACM Annual Symposium on Operating Systems Principles, 173ab36c067SJustin T. Gibbs * Austin, Texas Nov 1987. 174ab36c067SJustin T. Gibbs */ 175a50ec505SPoul-Henning Kamp 176ab36c067SJustin T. Gibbs /* 177df8bae1dSRodney W. Grimes * Software (low priority) clock interrupt. 178df8bae1dSRodney W. Grimes * Run periodic events from timeout queue. 179df8bae1dSRodney W. Grimes */ 180df8bae1dSRodney W. Grimes void 1818088699fSJohn Baldwin softclock(void *dummy) 182df8bae1dSRodney W. Grimes { 183b336df68SPoul-Henning Kamp struct callout *c; 184b336df68SPoul-Henning Kamp struct callout_tailq *bucket; 185b336df68SPoul-Henning Kamp int curticks; 186b336df68SPoul-Henning Kamp int steps; /* #steps since we last allowed interrupts */ 18722ee8c4fSPoul-Henning Kamp int depth; 18822ee8c4fSPoul-Henning Kamp int mpcalls; 18998c926b2SIan Dowse int mtxcalls; 19022ee8c4fSPoul-Henning Kamp int gcalls; 1912c1bb207SColin Percival int wakeup_cookie; 19248b0f4b6SKirk McKusick #ifdef DIAGNOSTIC 19348b0f4b6SKirk McKusick struct bintime bt1, bt2; 19448b0f4b6SKirk McKusick struct timespec ts2; 19548b0f4b6SKirk McKusick static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ 196377e7be4SPoul-Henning Kamp static timeout_t *lastfunc; 19748b0f4b6SKirk McKusick #endif 198df8bae1dSRodney W. Grimes 19915b7a470SPoul-Henning Kamp #ifndef MAX_SOFTCLOCK_STEPS 20015b7a470SPoul-Henning Kamp #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ 20115b7a470SPoul-Henning Kamp #endif /* MAX_SOFTCLOCK_STEPS */ 202ab36c067SJustin T. Gibbs 20322ee8c4fSPoul-Henning Kamp mpcalls = 0; 20498c926b2SIan Dowse mtxcalls = 0; 20522ee8c4fSPoul-Henning Kamp gcalls = 0; 20622ee8c4fSPoul-Henning Kamp depth = 0; 207ab36c067SJustin T. Gibbs steps = 0; 2089ed346baSBosko Milekic mtx_lock_spin(&callout_lock); 209ab36c067SJustin T. Gibbs while (softticks != ticks) { 21045327611SJustin T. Gibbs softticks++; 21145327611SJustin T. Gibbs /* 21245327611SJustin T. Gibbs * softticks may be modified by hard clock, so cache 21345327611SJustin T. Gibbs * it while we work on a given bucket. 21445327611SJustin T. Gibbs */ 21545327611SJustin T. Gibbs curticks = softticks; 21645327611SJustin T. Gibbs bucket = &callwheel[curticks & callwheelmask]; 21745327611SJustin T. Gibbs c = TAILQ_FIRST(bucket); 218ab36c067SJustin T. Gibbs while (c) { 21922ee8c4fSPoul-Henning Kamp depth++; 22045327611SJustin T. Gibbs if (c->c_time != curticks) { 221ab36c067SJustin T. Gibbs c = TAILQ_NEXT(c, c_links.tqe); 222ab36c067SJustin T. Gibbs ++steps; 223ab36c067SJustin T. Gibbs if (steps >= MAX_SOFTCLOCK_STEPS) { 224ab36c067SJustin T. Gibbs nextsoftcheck = c; 22545327611SJustin T. Gibbs /* Give interrupts a chance. */ 2269ed346baSBosko Milekic mtx_unlock_spin(&callout_lock); 227ab32297dSJohn Baldwin ; /* nothing */ 2289ed346baSBosko Milekic mtx_lock_spin(&callout_lock); 229ab36c067SJustin T. Gibbs c = nextsoftcheck; 230ab36c067SJustin T. Gibbs steps = 0; 231df8bae1dSRodney W. Grimes } 232ab36c067SJustin T. Gibbs } else { 233ab36c067SJustin T. Gibbs void (*c_func)(void *); 234ab36c067SJustin T. Gibbs void *c_arg; 23598c926b2SIan Dowse struct mtx *c_mtx; 236fa2fbc3dSJake Burkholder int c_flags; 237ab36c067SJustin T. Gibbs 238ab36c067SJustin T. Gibbs nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 23945327611SJustin T. Gibbs TAILQ_REMOVE(bucket, c, c_links.tqe); 240ab36c067SJustin T. Gibbs c_func = c->c_func; 241ab36c067SJustin T. Gibbs c_arg = c->c_arg; 24298c926b2SIan Dowse c_mtx = c->c_mtx; 243fa2fbc3dSJake Burkholder c_flags = c->c_flags; 244acc8326dSGarrett Wollman if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 2450ceba3d6SColin Percival c->c_func = NULL; 246acc8326dSGarrett Wollman c->c_flags = CALLOUT_LOCAL_ALLOC; 247acc8326dSGarrett Wollman SLIST_INSERT_HEAD(&callfree, c, 248acc8326dSGarrett Wollman c_links.sle); 24957c037beSIan Dowse curr_callout = NULL; 250acc8326dSGarrett Wollman } else { 251acc8326dSGarrett Wollman c->c_flags = 2529b8b58e0SJonathan Lemon (c->c_flags & ~CALLOUT_PENDING); 2532c1bb207SColin Percival curr_callout = c; 25457c037beSIan Dowse } 25598c926b2SIan Dowse curr_cancelled = 0; 2569ed346baSBosko Milekic mtx_unlock_spin(&callout_lock); 25798c926b2SIan Dowse if (c_mtx != NULL) { 25898c926b2SIan Dowse mtx_lock(c_mtx); 25998c926b2SIan Dowse /* 26098c926b2SIan Dowse * The callout may have been cancelled 26198c926b2SIan Dowse * while we switched locks. 26298c926b2SIan Dowse */ 26398c926b2SIan Dowse if (curr_cancelled) { 26498c926b2SIan Dowse mtx_unlock(c_mtx); 26598c926b2SIan Dowse mtx_lock_spin(&callout_lock); 26698c926b2SIan Dowse goto done_locked; 26798c926b2SIan Dowse } 26898c926b2SIan Dowse /* The callout cannot be stopped now. */ 26998c926b2SIan Dowse curr_cancelled = 1; 27098c926b2SIan Dowse 27198c926b2SIan Dowse if (c_mtx == &Giant) { 27222ee8c4fSPoul-Henning Kamp gcalls++; 27398c926b2SIan Dowse CTR1(KTR_CALLOUT, "callout %p", 27498c926b2SIan Dowse c_func); 27598c926b2SIan Dowse } else { 27698c926b2SIan Dowse mtxcalls++; 27798c926b2SIan Dowse CTR1(KTR_CALLOUT, 27898c926b2SIan Dowse "callout mtx %p", 27998c926b2SIan Dowse c_func); 28098c926b2SIan Dowse } 28122ee8c4fSPoul-Henning Kamp } else { 28222ee8c4fSPoul-Henning Kamp mpcalls++; 283ff7ec58aSRobert Watson CTR1(KTR_CALLOUT, "callout mpsafe %p", 284ff7ec58aSRobert Watson c_func); 28522ee8c4fSPoul-Henning Kamp } 28648b0f4b6SKirk McKusick #ifdef DIAGNOSTIC 28748b0f4b6SKirk McKusick binuptime(&bt1); 28848b0f4b6SKirk McKusick #endif 28953c0e1ffSJohn Baldwin THREAD_NO_SLEEPING(); 290ab36c067SJustin T. Gibbs c_func(c_arg); 29153c0e1ffSJohn Baldwin THREAD_SLEEPING_OK(); 29248b0f4b6SKirk McKusick #ifdef DIAGNOSTIC 29348b0f4b6SKirk McKusick binuptime(&bt2); 29448b0f4b6SKirk McKusick bintime_sub(&bt2, &bt1); 29548b0f4b6SKirk McKusick if (bt2.frac > maxdt) { 296377e7be4SPoul-Henning Kamp if (lastfunc != c_func || 297377e7be4SPoul-Henning Kamp bt2.frac > maxdt * 2) { 29848b0f4b6SKirk McKusick bintime2timespec(&bt2, &ts2); 29948b0f4b6SKirk McKusick printf( 300377e7be4SPoul-Henning Kamp "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 30148b0f4b6SKirk McKusick c_func, c_arg, 302377e7be4SPoul-Henning Kamp (intmax_t)ts2.tv_sec, 303377e7be4SPoul-Henning Kamp ts2.tv_nsec); 304377e7be4SPoul-Henning Kamp } 305377e7be4SPoul-Henning Kamp maxdt = bt2.frac; 306377e7be4SPoul-Henning Kamp lastfunc = c_func; 30748b0f4b6SKirk McKusick } 30848b0f4b6SKirk McKusick #endif 30998c926b2SIan Dowse if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 31098c926b2SIan Dowse mtx_unlock(c_mtx); 3119ed346baSBosko Milekic mtx_lock_spin(&callout_lock); 31298c926b2SIan Dowse done_locked: 3132c1bb207SColin Percival curr_callout = NULL; 3142c1bb207SColin Percival if (wakeup_needed) { 3152c1bb207SColin Percival /* 3162c1bb207SColin Percival * There might be someone waiting 3172c1bb207SColin Percival * for the callout to complete. 3182c1bb207SColin Percival */ 3192c1bb207SColin Percival wakeup_cookie = wakeup_ctr; 3202c1bb207SColin Percival mtx_unlock_spin(&callout_lock); 3212c1bb207SColin Percival mtx_lock(&callout_wait_lock); 3222c1bb207SColin Percival cv_broadcast(&callout_wait); 3232c1bb207SColin Percival wakeup_done_ctr = wakeup_cookie; 3242c1bb207SColin Percival mtx_unlock(&callout_wait_lock); 3252c1bb207SColin Percival mtx_lock_spin(&callout_lock); 3262c1bb207SColin Percival wakeup_needed = 0; 32749a74476SColin Percival } 328ab36c067SJustin T. Gibbs steps = 0; 329ab36c067SJustin T. Gibbs c = nextsoftcheck; 330ab36c067SJustin T. Gibbs } 331ab36c067SJustin T. Gibbs } 332ab36c067SJustin T. Gibbs } 33322ee8c4fSPoul-Henning Kamp avg_depth += (depth * 1000 - avg_depth) >> 8; 33422ee8c4fSPoul-Henning Kamp avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 33598c926b2SIan Dowse avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8; 33622ee8c4fSPoul-Henning Kamp avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 337ab36c067SJustin T. Gibbs nextsoftcheck = NULL; 3389ed346baSBosko Milekic mtx_unlock_spin(&callout_lock); 339df8bae1dSRodney W. Grimes } 340df8bae1dSRodney W. Grimes 341df8bae1dSRodney W. Grimes /* 342df8bae1dSRodney W. Grimes * timeout -- 343df8bae1dSRodney W. Grimes * Execute a function after a specified length of time. 344df8bae1dSRodney W. Grimes * 345df8bae1dSRodney W. Grimes * untimeout -- 346df8bae1dSRodney W. Grimes * Cancel previous timeout function call. 347df8bae1dSRodney W. Grimes * 348ab36c067SJustin T. Gibbs * callout_handle_init -- 349ab36c067SJustin T. Gibbs * Initialize a handle so that using it with untimeout is benign. 350ab36c067SJustin T. Gibbs * 351df8bae1dSRodney W. Grimes * See AT&T BCI Driver Reference Manual for specification. This 352ab36c067SJustin T. Gibbs * implementation differs from that one in that although an 353ab36c067SJustin T. Gibbs * identification value is returned from timeout, the original 354ab36c067SJustin T. Gibbs * arguments to timeout as well as the identifier are used to 355ab36c067SJustin T. Gibbs * identify entries for untimeout. 356df8bae1dSRodney W. Grimes */ 357ab36c067SJustin T. Gibbs struct callout_handle 358ab36c067SJustin T. Gibbs timeout(ftn, arg, to_ticks) 3598f03c6f1SBruce Evans timeout_t *ftn; 360df8bae1dSRodney W. Grimes void *arg; 361e82ac18eSJonathan Lemon int to_ticks; 362df8bae1dSRodney W. Grimes { 363ab36c067SJustin T. Gibbs struct callout *new; 364ab36c067SJustin T. Gibbs struct callout_handle handle; 365df8bae1dSRodney W. Grimes 3669ed346baSBosko Milekic mtx_lock_spin(&callout_lock); 367df8bae1dSRodney W. Grimes 368df8bae1dSRodney W. Grimes /* Fill in the next free callout structure. */ 369ab36c067SJustin T. Gibbs new = SLIST_FIRST(&callfree); 370ab36c067SJustin T. Gibbs if (new == NULL) 371ab36c067SJustin T. Gibbs /* XXX Attempt to malloc first */ 372df8bae1dSRodney W. Grimes panic("timeout table full"); 373ab36c067SJustin T. Gibbs SLIST_REMOVE_HEAD(&callfree, c_links.sle); 374df8bae1dSRodney W. Grimes 375acc8326dSGarrett Wollman callout_reset(new, to_ticks, ftn, arg); 376acc8326dSGarrett Wollman 377ab36c067SJustin T. Gibbs handle.callout = new; 3789ed346baSBosko Milekic mtx_unlock_spin(&callout_lock); 379ab36c067SJustin T. Gibbs return (handle); 380df8bae1dSRodney W. Grimes } 381df8bae1dSRodney W. Grimes 382df8bae1dSRodney W. Grimes void 383ab36c067SJustin T. Gibbs untimeout(ftn, arg, handle) 3848f03c6f1SBruce Evans timeout_t *ftn; 385df8bae1dSRodney W. Grimes void *arg; 386ab36c067SJustin T. Gibbs struct callout_handle handle; 387df8bae1dSRodney W. Grimes { 388df8bae1dSRodney W. Grimes 389ab36c067SJustin T. Gibbs /* 390ab36c067SJustin T. Gibbs * Check for a handle that was initialized 391ab36c067SJustin T. Gibbs * by callout_handle_init, but never used 392ab36c067SJustin T. Gibbs * for a real timeout. 393ab36c067SJustin T. Gibbs */ 394ab36c067SJustin T. Gibbs if (handle.callout == NULL) 395ab36c067SJustin T. Gibbs return; 396df8bae1dSRodney W. Grimes 3979ed346baSBosko Milekic mtx_lock_spin(&callout_lock); 398acc8326dSGarrett Wollman if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 399acc8326dSGarrett Wollman callout_stop(handle.callout); 4009ed346baSBosko Milekic mtx_unlock_spin(&callout_lock); 401df8bae1dSRodney W. Grimes } 402df8bae1dSRodney W. Grimes 4033c816944SBruce Evans void 404ab36c067SJustin T. Gibbs callout_handle_init(struct callout_handle *handle) 405ab36c067SJustin T. Gibbs { 406ab36c067SJustin T. Gibbs handle->callout = NULL; 407ab36c067SJustin T. Gibbs } 408ab36c067SJustin T. Gibbs 409acc8326dSGarrett Wollman /* 410acc8326dSGarrett Wollman * New interface; clients allocate their own callout structures. 411acc8326dSGarrett Wollman * 412acc8326dSGarrett Wollman * callout_reset() - establish or change a timeout 413acc8326dSGarrett Wollman * callout_stop() - disestablish a timeout 414acc8326dSGarrett Wollman * callout_init() - initialize a callout structure so that it can 415acc8326dSGarrett Wollman * safely be passed to callout_reset() and callout_stop() 416acc8326dSGarrett Wollman * 4179b8b58e0SJonathan Lemon * <sys/callout.h> defines three convenience macros: 418acc8326dSGarrett Wollman * 41986fd19deSColin Percival * callout_active() - returns truth if callout has not been stopped, 42086fd19deSColin Percival * drained, or deactivated since the last time the callout was 42186fd19deSColin Percival * reset. 4229b8b58e0SJonathan Lemon * callout_pending() - returns truth if callout is still waiting for timeout 4239b8b58e0SJonathan Lemon * callout_deactivate() - marks the callout as having been serviced 424acc8326dSGarrett Wollman */ 425d04304d1SGleb Smirnoff int 426e82ac18eSJonathan Lemon callout_reset(c, to_ticks, ftn, arg) 427acc8326dSGarrett Wollman struct callout *c; 428acc8326dSGarrett Wollman int to_ticks; 4294d77a549SAlfred Perlstein void (*ftn)(void *); 430acc8326dSGarrett Wollman void *arg; 431acc8326dSGarrett Wollman { 432d04304d1SGleb Smirnoff int cancelled = 0; 433acc8326dSGarrett Wollman 43498c926b2SIan Dowse #ifdef notyet /* Some callers of timeout() do not hold Giant. */ 43598c926b2SIan Dowse if (c->c_mtx != NULL) 43698c926b2SIan Dowse mtx_assert(c->c_mtx, MA_OWNED); 43798c926b2SIan Dowse #endif 43898c926b2SIan Dowse 4399ed346baSBosko Milekic mtx_lock_spin(&callout_lock); 44098c926b2SIan Dowse if (c == curr_callout) { 4412c1bb207SColin Percival /* 4422c1bb207SColin Percival * We're being asked to reschedule a callout which is 44398c926b2SIan Dowse * currently in progress. If there is a mutex then we 44498c926b2SIan Dowse * can cancel the callout if it has not really started. 44598c926b2SIan Dowse */ 44698c926b2SIan Dowse if (c->c_mtx != NULL && !curr_cancelled) 447d04304d1SGleb Smirnoff cancelled = curr_cancelled = 1; 44898c926b2SIan Dowse if (wakeup_needed) { 44998c926b2SIan Dowse /* 45098c926b2SIan Dowse * Someone has called callout_drain to kill this 45198c926b2SIan Dowse * callout. Don't reschedule. 4522c1bb207SColin Percival */ 4532c1bb207SColin Percival mtx_unlock_spin(&callout_lock); 454d04304d1SGleb Smirnoff return (cancelled); 45549a74476SColin Percival } 45698c926b2SIan Dowse } 4570413bacdSColin Percival if (c->c_flags & CALLOUT_PENDING) { 4580413bacdSColin Percival if (nextsoftcheck == c) { 4590413bacdSColin Percival nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 4600413bacdSColin Percival } 4610413bacdSColin Percival TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, 4620413bacdSColin Percival c_links.tqe); 4630413bacdSColin Percival 464d04304d1SGleb Smirnoff cancelled = 1; 465d04304d1SGleb Smirnoff 4660413bacdSColin Percival /* 4670413bacdSColin Percival * Part of the normal "stop a pending callout" process 4680413bacdSColin Percival * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING 4690413bacdSColin Percival * flags. We're not going to bother doing that here, 4700413bacdSColin Percival * because we're going to be setting those flags ten lines 4710413bacdSColin Percival * after this point, and we're holding callout_lock 4720413bacdSColin Percival * between now and then. 4730413bacdSColin Percival */ 4740413bacdSColin Percival } 475acc8326dSGarrett Wollman 476acc8326dSGarrett Wollman /* 477ab32297dSJohn Baldwin * We could unlock callout_lock here and lock it again before the 478ab32297dSJohn Baldwin * TAILQ_INSERT_TAIL, but there's no point since doing this setup 479ab32297dSJohn Baldwin * doesn't take much time. 480acc8326dSGarrett Wollman */ 481acc8326dSGarrett Wollman if (to_ticks <= 0) 482acc8326dSGarrett Wollman to_ticks = 1; 483acc8326dSGarrett Wollman 484acc8326dSGarrett Wollman c->c_arg = arg; 485e82ac18eSJonathan Lemon c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 486acc8326dSGarrett Wollman c->c_func = ftn; 487acc8326dSGarrett Wollman c->c_time = ticks + to_ticks; 488acc8326dSGarrett Wollman TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask], 489acc8326dSGarrett Wollman c, c_links.tqe); 4909ed346baSBosko Milekic mtx_unlock_spin(&callout_lock); 491d04304d1SGleb Smirnoff 492d04304d1SGleb Smirnoff return (cancelled); 493acc8326dSGarrett Wollman } 494acc8326dSGarrett Wollman 4952c1bb207SColin Percival int 4962c1bb207SColin Percival _callout_stop_safe(c, safe) 4972c1bb207SColin Percival struct callout *c; 4982c1bb207SColin Percival int safe; 4992c1bb207SColin Percival { 50098c926b2SIan Dowse int use_mtx, wakeup_cookie; 50198c926b2SIan Dowse 50298c926b2SIan Dowse if (!safe && c->c_mtx != NULL) { 50398c926b2SIan Dowse #ifdef notyet /* Some callers do not hold Giant for Giant-locked callouts. */ 50498c926b2SIan Dowse mtx_assert(c->c_mtx, MA_OWNED); 50598c926b2SIan Dowse use_mtx = 1; 50698c926b2SIan Dowse #else 50798c926b2SIan Dowse use_mtx = mtx_owned(c->c_mtx); 50898c926b2SIan Dowse #endif 50998c926b2SIan Dowse } else { 51098c926b2SIan Dowse use_mtx = 0; 51198c926b2SIan Dowse } 5122c1bb207SColin Percival 5139ed346baSBosko Milekic mtx_lock_spin(&callout_lock); 514acc8326dSGarrett Wollman /* 515acc8326dSGarrett Wollman * Don't attempt to delete a callout that's not on the queue. 516acc8326dSGarrett Wollman */ 517acc8326dSGarrett Wollman if (!(c->c_flags & CALLOUT_PENDING)) { 5189b8b58e0SJonathan Lemon c->c_flags &= ~CALLOUT_ACTIVE; 51998c926b2SIan Dowse if (c != curr_callout) { 52098c926b2SIan Dowse mtx_unlock_spin(&callout_lock); 52198c926b2SIan Dowse return (0); 52298c926b2SIan Dowse } 52398c926b2SIan Dowse if (safe) { 52449a74476SColin Percival /* We need to wait until the callout is finished. */ 5252c1bb207SColin Percival wakeup_needed = 1; 5262c1bb207SColin Percival wakeup_cookie = wakeup_ctr++; 5272c1bb207SColin Percival mtx_unlock_spin(&callout_lock); 5282c1bb207SColin Percival mtx_lock(&callout_wait_lock); 52949a74476SColin Percival 5302c1bb207SColin Percival /* 5312c1bb207SColin Percival * Check to make sure that softclock() didn't 5322c1bb207SColin Percival * do the wakeup in between our dropping 5332c1bb207SColin Percival * callout_lock and picking up callout_wait_lock 5342c1bb207SColin Percival */ 5352c1bb207SColin Percival if (wakeup_cookie - wakeup_done_ctr > 0) 5362c1bb207SColin Percival cv_wait(&callout_wait, &callout_wait_lock); 5372c1bb207SColin Percival 5382c1bb207SColin Percival mtx_unlock(&callout_wait_lock); 53998c926b2SIan Dowse } else if (use_mtx && !curr_cancelled) { 54098c926b2SIan Dowse /* We can stop the callout before it runs. */ 54198c926b2SIan Dowse curr_cancelled = 1; 54298c926b2SIan Dowse mtx_unlock_spin(&callout_lock); 54398c926b2SIan Dowse return (1); 5442c1bb207SColin Percival } else 5459ed346baSBosko Milekic mtx_unlock_spin(&callout_lock); 546a45982d2SJohn Baldwin return (0); 547acc8326dSGarrett Wollman } 5489b8b58e0SJonathan Lemon c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 549acc8326dSGarrett Wollman 550acc8326dSGarrett Wollman if (nextsoftcheck == c) { 551acc8326dSGarrett Wollman nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 552acc8326dSGarrett Wollman } 553acc8326dSGarrett Wollman TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe); 554acc8326dSGarrett Wollman 555acc8326dSGarrett Wollman if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 5567834081cSColin Percival c->c_func = NULL; 557acc8326dSGarrett Wollman SLIST_INSERT_HEAD(&callfree, c, c_links.sle); 558acc8326dSGarrett Wollman } 5599ed346baSBosko Milekic mtx_unlock_spin(&callout_lock); 560a45982d2SJohn Baldwin return (1); 561acc8326dSGarrett Wollman } 562acc8326dSGarrett Wollman 563acc8326dSGarrett Wollman void 564e82ac18eSJonathan Lemon callout_init(c, mpsafe) 565acc8326dSGarrett Wollman struct callout *c; 566e82ac18eSJonathan Lemon int mpsafe; 567acc8326dSGarrett Wollman { 5687347e1c6SGarrett Wollman bzero(c, sizeof *c); 56998c926b2SIan Dowse if (mpsafe) { 57098c926b2SIan Dowse c->c_mtx = NULL; 57198c926b2SIan Dowse c->c_flags = CALLOUT_RETURNUNLOCKED; 57298c926b2SIan Dowse } else { 57398c926b2SIan Dowse c->c_mtx = &Giant; 57498c926b2SIan Dowse c->c_flags = 0; 57598c926b2SIan Dowse } 57698c926b2SIan Dowse } 57798c926b2SIan Dowse 57898c926b2SIan Dowse void 57998c926b2SIan Dowse callout_init_mtx(c, mtx, flags) 58098c926b2SIan Dowse struct callout *c; 58198c926b2SIan Dowse struct mtx *mtx; 58298c926b2SIan Dowse int flags; 58398c926b2SIan Dowse { 58498c926b2SIan Dowse bzero(c, sizeof *c); 58598c926b2SIan Dowse c->c_mtx = mtx; 58698c926b2SIan Dowse KASSERT((flags & ~CALLOUT_RETURNUNLOCKED) == 0, 58798c926b2SIan Dowse ("callout_init_mtx: bad flags %d", flags)); 58898c926b2SIan Dowse /* CALLOUT_RETURNUNLOCKED makes no sense without a mutex. */ 58998c926b2SIan Dowse KASSERT(mtx != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 59098c926b2SIan Dowse ("callout_init_mtx: CALLOUT_RETURNUNLOCKED with no mutex")); 59198c926b2SIan Dowse c->c_flags = flags & CALLOUT_RETURNUNLOCKED; 592acc8326dSGarrett Wollman } 593acc8326dSGarrett Wollman 594e1d6dc65SNate Williams #ifdef APM_FIXUP_CALLTODO 595e1d6dc65SNate Williams /* 596e1d6dc65SNate Williams * Adjust the kernel calltodo timeout list. This routine is used after 597e1d6dc65SNate Williams * an APM resume to recalculate the calltodo timer list values with the 598e1d6dc65SNate Williams * number of hz's we have been sleeping. The next hardclock() will detect 599e1d6dc65SNate Williams * that there are fired timers and run softclock() to execute them. 600e1d6dc65SNate Williams * 601e1d6dc65SNate Williams * Please note, I have not done an exhaustive analysis of what code this 602e1d6dc65SNate Williams * might break. I am motivated to have my select()'s and alarm()'s that 603e1d6dc65SNate Williams * have expired during suspend firing upon resume so that the applications 604e1d6dc65SNate Williams * which set the timer can do the maintanence the timer was for as close 605e1d6dc65SNate Williams * as possible to the originally intended time. Testing this code for a 606e1d6dc65SNate Williams * week showed that resuming from a suspend resulted in 22 to 25 timers 607e1d6dc65SNate Williams * firing, which seemed independant on whether the suspend was 2 hours or 608e1d6dc65SNate Williams * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 609e1d6dc65SNate Williams */ 610e1d6dc65SNate Williams void 611e1d6dc65SNate Williams adjust_timeout_calltodo(time_change) 612e1d6dc65SNate Williams struct timeval *time_change; 613e1d6dc65SNate Williams { 614e1d6dc65SNate Williams register struct callout *p; 615e1d6dc65SNate Williams unsigned long delta_ticks; 616e1d6dc65SNate Williams 617e1d6dc65SNate Williams /* 618e1d6dc65SNate Williams * How many ticks were we asleep? 619c8b47828SBruce Evans * (stolen from tvtohz()). 620e1d6dc65SNate Williams */ 621e1d6dc65SNate Williams 622e1d6dc65SNate Williams /* Don't do anything */ 623e1d6dc65SNate Williams if (time_change->tv_sec < 0) 624e1d6dc65SNate Williams return; 625e1d6dc65SNate Williams else if (time_change->tv_sec <= LONG_MAX / 1000000) 626e1d6dc65SNate Williams delta_ticks = (time_change->tv_sec * 1000000 + 627e1d6dc65SNate Williams time_change->tv_usec + (tick - 1)) / tick + 1; 628e1d6dc65SNate Williams else if (time_change->tv_sec <= LONG_MAX / hz) 629e1d6dc65SNate Williams delta_ticks = time_change->tv_sec * hz + 630e1d6dc65SNate Williams (time_change->tv_usec + (tick - 1)) / tick + 1; 631e1d6dc65SNate Williams else 632e1d6dc65SNate Williams delta_ticks = LONG_MAX; 633e1d6dc65SNate Williams 634e1d6dc65SNate Williams if (delta_ticks > INT_MAX) 635e1d6dc65SNate Williams delta_ticks = INT_MAX; 636e1d6dc65SNate Williams 637e1d6dc65SNate Williams /* 638e1d6dc65SNate Williams * Now rip through the timer calltodo list looking for timers 639e1d6dc65SNate Williams * to expire. 640e1d6dc65SNate Williams */ 641e1d6dc65SNate Williams 642e1d6dc65SNate Williams /* don't collide with softclock() */ 6439ed346baSBosko Milekic mtx_lock_spin(&callout_lock); 644e1d6dc65SNate Williams for (p = calltodo.c_next; p != NULL; p = p->c_next) { 645e1d6dc65SNate Williams p->c_time -= delta_ticks; 646e1d6dc65SNate Williams 647e1d6dc65SNate Williams /* Break if the timer had more time on it than delta_ticks */ 648e1d6dc65SNate Williams if (p->c_time > 0) 649e1d6dc65SNate Williams break; 650e1d6dc65SNate Williams 651e1d6dc65SNate Williams /* take back the ticks the timer didn't use (p->c_time <= 0) */ 652e1d6dc65SNate Williams delta_ticks = -p->c_time; 653e1d6dc65SNate Williams } 6549ed346baSBosko Milekic mtx_unlock_spin(&callout_lock); 655e1d6dc65SNate Williams 656e1d6dc65SNate Williams return; 657e1d6dc65SNate Williams } 658e1d6dc65SNate Williams #endif /* APM_FIXUP_CALLTODO */ 659