1df8bae1dSRodney W. Grimes /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1982, 1986, 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * (c) UNIX System Laboratories, Inc. 5df8bae1dSRodney W. Grimes * All or some portions of this file are derived from material licensed 6df8bae1dSRodney W. Grimes * to the University of California by American Telephone and Telegraph 7df8bae1dSRodney W. Grimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8df8bae1dSRodney W. Grimes * the permission of UNIX System Laboratories, Inc. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 19df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 20df8bae1dSRodney W. Grimes * without specific prior written permission. 21df8bae1dSRodney W. Grimes * 22df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32df8bae1dSRodney W. Grimes * SUCH DAMAGE. 33df8bae1dSRodney W. Grimes * 34acc8326dSGarrett Wollman * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35df8bae1dSRodney W. Grimes */ 36df8bae1dSRodney W. Grimes 37677b542eSDavid E. O'Brien #include <sys/cdefs.h> 38677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 39677b542eSDavid E. O'Brien 40df8bae1dSRodney W. Grimes #include <sys/param.h> 41df8bae1dSRodney W. Grimes #include <sys/systm.h> 4215b7a470SPoul-Henning Kamp #include <sys/callout.h> 432c1bb207SColin Percival #include <sys/condvar.h> 44df8bae1dSRodney W. Grimes #include <sys/kernel.h> 45ff7ec58aSRobert Watson #include <sys/ktr.h> 46f34fa851SJohn Baldwin #include <sys/lock.h> 47cb799bfeSJohn Baldwin #include <sys/mutex.h> 4821f9e816SJohn Baldwin #include <sys/proc.h> 496a0ce57dSAttilio Rao #include <sys/sleepqueue.h> 5022ee8c4fSPoul-Henning Kamp #include <sys/sysctl.h> 51df8bae1dSRodney W. Grimes 5222ee8c4fSPoul-Henning Kamp static int avg_depth; 5322ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 5422ee8c4fSPoul-Henning Kamp "Average number of items examined per softclock call. Units = 1/1000"); 5522ee8c4fSPoul-Henning Kamp static int avg_gcalls; 5622ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 5722ee8c4fSPoul-Henning Kamp "Average number of Giant callouts made per softclock call. Units = 1/1000"); 5864b9ee20SAttilio Rao static int avg_lockcalls; 5964b9ee20SAttilio Rao SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 6064b9ee20SAttilio Rao "Average number of lock callouts made per softclock call. Units = 1/1000"); 6122ee8c4fSPoul-Henning Kamp static int avg_mpcalls; 6222ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 6322ee8c4fSPoul-Henning Kamp "Average number of MP callouts made per softclock call. Units = 1/1000"); 6415b7a470SPoul-Henning Kamp /* 6515b7a470SPoul-Henning Kamp * TODO: 6615b7a470SPoul-Henning Kamp * allocate more timeout table slots when table overflows. 6715b7a470SPoul-Henning Kamp */ 6815b7a470SPoul-Henning Kamp 6915b7a470SPoul-Henning Kamp /* Exported to machdep.c and/or kern_clock.c. */ 70ab36c067SJustin T. Gibbs struct callout *callout; 71ab36c067SJustin T. Gibbs struct callout_list callfree; 72ab36c067SJustin T. Gibbs int callwheelsize, callwheelbits, callwheelmask; 73ab36c067SJustin T. Gibbs struct callout_tailq *callwheel; 7415b7a470SPoul-Henning Kamp int softticks; /* Like ticks, but for softclock(). */ 75166400b7SPoul-Henning Kamp struct mtx callout_lock; 76f23b4c91SGarrett Wollman 77ab36c067SJustin T. Gibbs static struct callout *nextsoftcheck; /* Next callout to be checked. */ 7849a74476SColin Percival 79e9dec2c4SColin Percival /** 802c1bb207SColin Percival * Locked by callout_lock: 812c1bb207SColin Percival * curr_callout - If a callout is in progress, it is curr_callout. 82b36f4588SJohn Baldwin * If curr_callout is non-NULL, threads waiting in 83b36f4588SJohn Baldwin * callout_drain() will be woken up as soon as the 842c1bb207SColin Percival * relevant callout completes. 8564b9ee20SAttilio Rao * curr_cancelled - Changing to 1 with both callout_lock and c_lock held 8698c926b2SIan Dowse * guarantees that the current callout will not run. 8798c926b2SIan Dowse * The softclock() function sets this to 0 before it 8864b9ee20SAttilio Rao * drops callout_lock to acquire c_lock, and it calls 89b36f4588SJohn Baldwin * the handler only if curr_cancelled is still 0 after 9064b9ee20SAttilio Rao * c_lock is successfully acquired. 91b36f4588SJohn Baldwin * callout_wait - If a thread is waiting in callout_drain(), then 92b36f4588SJohn Baldwin * callout_wait is nonzero. Set only when 932c1bb207SColin Percival * curr_callout is non-NULL. 942c1bb207SColin Percival */ 952c1bb207SColin Percival static struct callout *curr_callout; 9698c926b2SIan Dowse static int curr_cancelled; 97b36f4588SJohn Baldwin static int callout_wait; 98df8bae1dSRodney W. Grimes 99df8bae1dSRodney W. Grimes /* 100219d632cSMatthew Dillon * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization 101219d632cSMatthew Dillon * 102219d632cSMatthew Dillon * This code is called very early in the kernel initialization sequence, 103219d632cSMatthew Dillon * and may be called more then once. 104219d632cSMatthew Dillon */ 105219d632cSMatthew Dillon caddr_t 106219d632cSMatthew Dillon kern_timeout_callwheel_alloc(caddr_t v) 107219d632cSMatthew Dillon { 108219d632cSMatthew Dillon /* 109219d632cSMatthew Dillon * Calculate callout wheel size 110219d632cSMatthew Dillon */ 111219d632cSMatthew Dillon for (callwheelsize = 1, callwheelbits = 0; 112219d632cSMatthew Dillon callwheelsize < ncallout; 113219d632cSMatthew Dillon callwheelsize <<= 1, ++callwheelbits) 114219d632cSMatthew Dillon ; 115219d632cSMatthew Dillon callwheelmask = callwheelsize - 1; 116219d632cSMatthew Dillon 117219d632cSMatthew Dillon callout = (struct callout *)v; 118219d632cSMatthew Dillon v = (caddr_t)(callout + ncallout); 119219d632cSMatthew Dillon callwheel = (struct callout_tailq *)v; 120219d632cSMatthew Dillon v = (caddr_t)(callwheel + callwheelsize); 121219d632cSMatthew Dillon return(v); 122219d632cSMatthew Dillon } 123219d632cSMatthew Dillon 124219d632cSMatthew Dillon /* 125219d632cSMatthew Dillon * kern_timeout_callwheel_init() - initialize previously reserved callwheel 126219d632cSMatthew Dillon * space. 127219d632cSMatthew Dillon * 128219d632cSMatthew Dillon * This code is called just once, after the space reserved for the 129219d632cSMatthew Dillon * callout wheel has been finalized. 130219d632cSMatthew Dillon */ 131219d632cSMatthew Dillon void 132219d632cSMatthew Dillon kern_timeout_callwheel_init(void) 133219d632cSMatthew Dillon { 134219d632cSMatthew Dillon int i; 135219d632cSMatthew Dillon 136219d632cSMatthew Dillon SLIST_INIT(&callfree); 137219d632cSMatthew Dillon for (i = 0; i < ncallout; i++) { 138219d632cSMatthew Dillon callout_init(&callout[i], 0); 139219d632cSMatthew Dillon callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 140219d632cSMatthew Dillon SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 141219d632cSMatthew Dillon } 142219d632cSMatthew Dillon for (i = 0; i < callwheelsize; i++) { 143219d632cSMatthew Dillon TAILQ_INIT(&callwheel[i]); 144219d632cSMatthew Dillon } 1456008862bSJohn Baldwin mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 146219d632cSMatthew Dillon } 147219d632cSMatthew Dillon 148219d632cSMatthew Dillon /* 149ab36c067SJustin T. Gibbs * The callout mechanism is based on the work of Adam M. Costello and 150ab36c067SJustin T. Gibbs * George Varghese, published in a technical report entitled "Redesigning 151ab36c067SJustin T. Gibbs * the BSD Callout and Timer Facilities" and modified slightly for inclusion 152ab36c067SJustin T. Gibbs * in FreeBSD by Justin T. Gibbs. The original work on the data structures 153024035e8SHiten Pandya * used in this implementation was published by G. Varghese and T. Lauck in 154ab36c067SJustin T. Gibbs * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 155ab36c067SJustin T. Gibbs * the Efficient Implementation of a Timer Facility" in the Proceedings of 156ab36c067SJustin T. Gibbs * the 11th ACM Annual Symposium on Operating Systems Principles, 157ab36c067SJustin T. Gibbs * Austin, Texas Nov 1987. 158ab36c067SJustin T. Gibbs */ 159a50ec505SPoul-Henning Kamp 160ab36c067SJustin T. Gibbs /* 161df8bae1dSRodney W. Grimes * Software (low priority) clock interrupt. 162df8bae1dSRodney W. Grimes * Run periodic events from timeout queue. 163df8bae1dSRodney W. Grimes */ 164df8bae1dSRodney W. Grimes void 1658088699fSJohn Baldwin softclock(void *dummy) 166df8bae1dSRodney W. Grimes { 167b336df68SPoul-Henning Kamp struct callout *c; 168b336df68SPoul-Henning Kamp struct callout_tailq *bucket; 169b336df68SPoul-Henning Kamp int curticks; 170b336df68SPoul-Henning Kamp int steps; /* #steps since we last allowed interrupts */ 17122ee8c4fSPoul-Henning Kamp int depth; 17222ee8c4fSPoul-Henning Kamp int mpcalls; 17364b9ee20SAttilio Rao int lockcalls; 17422ee8c4fSPoul-Henning Kamp int gcalls; 17548b0f4b6SKirk McKusick #ifdef DIAGNOSTIC 17648b0f4b6SKirk McKusick struct bintime bt1, bt2; 17748b0f4b6SKirk McKusick struct timespec ts2; 17848b0f4b6SKirk McKusick static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ 179377e7be4SPoul-Henning Kamp static timeout_t *lastfunc; 18048b0f4b6SKirk McKusick #endif 181df8bae1dSRodney W. Grimes 18215b7a470SPoul-Henning Kamp #ifndef MAX_SOFTCLOCK_STEPS 18315b7a470SPoul-Henning Kamp #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ 18415b7a470SPoul-Henning Kamp #endif /* MAX_SOFTCLOCK_STEPS */ 185ab36c067SJustin T. Gibbs 18622ee8c4fSPoul-Henning Kamp mpcalls = 0; 18764b9ee20SAttilio Rao lockcalls = 0; 18822ee8c4fSPoul-Henning Kamp gcalls = 0; 18922ee8c4fSPoul-Henning Kamp depth = 0; 190ab36c067SJustin T. Gibbs steps = 0; 1919ed346baSBosko Milekic mtx_lock_spin(&callout_lock); 192ab36c067SJustin T. Gibbs while (softticks != ticks) { 19345327611SJustin T. Gibbs softticks++; 19445327611SJustin T. Gibbs /* 19545327611SJustin T. Gibbs * softticks may be modified by hard clock, so cache 19645327611SJustin T. Gibbs * it while we work on a given bucket. 19745327611SJustin T. Gibbs */ 19845327611SJustin T. Gibbs curticks = softticks; 19945327611SJustin T. Gibbs bucket = &callwheel[curticks & callwheelmask]; 20045327611SJustin T. Gibbs c = TAILQ_FIRST(bucket); 201ab36c067SJustin T. Gibbs while (c) { 20222ee8c4fSPoul-Henning Kamp depth++; 20345327611SJustin T. Gibbs if (c->c_time != curticks) { 204ab36c067SJustin T. Gibbs c = TAILQ_NEXT(c, c_links.tqe); 205ab36c067SJustin T. Gibbs ++steps; 206ab36c067SJustin T. Gibbs if (steps >= MAX_SOFTCLOCK_STEPS) { 207ab36c067SJustin T. Gibbs nextsoftcheck = c; 20845327611SJustin T. Gibbs /* Give interrupts a chance. */ 2099ed346baSBosko Milekic mtx_unlock_spin(&callout_lock); 210ab32297dSJohn Baldwin ; /* nothing */ 2119ed346baSBosko Milekic mtx_lock_spin(&callout_lock); 212ab36c067SJustin T. Gibbs c = nextsoftcheck; 213ab36c067SJustin T. Gibbs steps = 0; 214df8bae1dSRodney W. Grimes } 215ab36c067SJustin T. Gibbs } else { 216ab36c067SJustin T. Gibbs void (*c_func)(void *); 217ab36c067SJustin T. Gibbs void *c_arg; 21864b9ee20SAttilio Rao struct lock_class *class; 21964b9ee20SAttilio Rao int c_flags, sharedlock; 220ab36c067SJustin T. Gibbs 221ab36c067SJustin T. Gibbs nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 22245327611SJustin T. Gibbs TAILQ_REMOVE(bucket, c, c_links.tqe); 22364b9ee20SAttilio Rao class = (c->c_lock != NULL) ? 22464b9ee20SAttilio Rao LOCK_CLASS(c->c_lock) : NULL; 22564b9ee20SAttilio Rao sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 22664b9ee20SAttilio Rao 0 : 1; 227ab36c067SJustin T. Gibbs c_func = c->c_func; 228ab36c067SJustin T. Gibbs c_arg = c->c_arg; 229fa2fbc3dSJake Burkholder c_flags = c->c_flags; 230acc8326dSGarrett Wollman if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 2310ceba3d6SColin Percival c->c_func = NULL; 232acc8326dSGarrett Wollman c->c_flags = CALLOUT_LOCAL_ALLOC; 233acc8326dSGarrett Wollman SLIST_INSERT_HEAD(&callfree, c, 234acc8326dSGarrett Wollman c_links.sle); 23557c037beSIan Dowse curr_callout = NULL; 236acc8326dSGarrett Wollman } else { 237acc8326dSGarrett Wollman c->c_flags = 2389b8b58e0SJonathan Lemon (c->c_flags & ~CALLOUT_PENDING); 2392c1bb207SColin Percival curr_callout = c; 24057c037beSIan Dowse } 24198c926b2SIan Dowse curr_cancelled = 0; 2429ed346baSBosko Milekic mtx_unlock_spin(&callout_lock); 24364b9ee20SAttilio Rao if (class != NULL) { 24464b9ee20SAttilio Rao class->lc_lock(c->c_lock, sharedlock); 24598c926b2SIan Dowse /* 24698c926b2SIan Dowse * The callout may have been cancelled 24798c926b2SIan Dowse * while we switched locks. 24898c926b2SIan Dowse */ 24998c926b2SIan Dowse if (curr_cancelled) { 25064b9ee20SAttilio Rao class->lc_unlock(c->c_lock); 251b36f4588SJohn Baldwin goto skip; 25298c926b2SIan Dowse } 25398c926b2SIan Dowse /* The callout cannot be stopped now. */ 25498c926b2SIan Dowse curr_cancelled = 1; 25598c926b2SIan Dowse 25664b9ee20SAttilio Rao if (c->c_lock == &Giant.lock_object) { 25722ee8c4fSPoul-Henning Kamp gcalls++; 25868a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, 25968a57ebfSGleb Smirnoff "callout %p func %p arg %p", 26068a57ebfSGleb Smirnoff c, c_func, c_arg); 26198c926b2SIan Dowse } else { 26264b9ee20SAttilio Rao lockcalls++; 26364b9ee20SAttilio Rao CTR3(KTR_CALLOUT, "callout lock" 26468a57ebfSGleb Smirnoff " %p func %p arg %p", 26568a57ebfSGleb Smirnoff c, c_func, c_arg); 26698c926b2SIan Dowse } 26722ee8c4fSPoul-Henning Kamp } else { 26822ee8c4fSPoul-Henning Kamp mpcalls++; 26968a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, 27068a57ebfSGleb Smirnoff "callout mpsafe %p func %p arg %p", 27168a57ebfSGleb Smirnoff c, c_func, c_arg); 27222ee8c4fSPoul-Henning Kamp } 27348b0f4b6SKirk McKusick #ifdef DIAGNOSTIC 27448b0f4b6SKirk McKusick binuptime(&bt1); 27548b0f4b6SKirk McKusick #endif 27653c0e1ffSJohn Baldwin THREAD_NO_SLEEPING(); 277ab36c067SJustin T. Gibbs c_func(c_arg); 27853c0e1ffSJohn Baldwin THREAD_SLEEPING_OK(); 27948b0f4b6SKirk McKusick #ifdef DIAGNOSTIC 28048b0f4b6SKirk McKusick binuptime(&bt2); 28148b0f4b6SKirk McKusick bintime_sub(&bt2, &bt1); 28248b0f4b6SKirk McKusick if (bt2.frac > maxdt) { 283377e7be4SPoul-Henning Kamp if (lastfunc != c_func || 284377e7be4SPoul-Henning Kamp bt2.frac > maxdt * 2) { 28548b0f4b6SKirk McKusick bintime2timespec(&bt2, &ts2); 28648b0f4b6SKirk McKusick printf( 287377e7be4SPoul-Henning Kamp "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 28848b0f4b6SKirk McKusick c_func, c_arg, 289377e7be4SPoul-Henning Kamp (intmax_t)ts2.tv_sec, 290377e7be4SPoul-Henning Kamp ts2.tv_nsec); 291377e7be4SPoul-Henning Kamp } 292377e7be4SPoul-Henning Kamp maxdt = bt2.frac; 293377e7be4SPoul-Henning Kamp lastfunc = c_func; 29448b0f4b6SKirk McKusick } 29548b0f4b6SKirk McKusick #endif 29698c926b2SIan Dowse if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 29764b9ee20SAttilio Rao class->lc_unlock(c->c_lock); 298b36f4588SJohn Baldwin skip: 2999ed346baSBosko Milekic mtx_lock_spin(&callout_lock); 3002c1bb207SColin Percival curr_callout = NULL; 301b36f4588SJohn Baldwin if (callout_wait) { 3022c1bb207SColin Percival /* 303b36f4588SJohn Baldwin * There is someone waiting 3042c1bb207SColin Percival * for the callout to complete. 3052c1bb207SColin Percival */ 306b36f4588SJohn Baldwin callout_wait = 0; 3076a0ce57dSAttilio Rao mtx_unlock_spin(&callout_lock); 3086a0ce57dSAttilio Rao wakeup(&callout_wait); 3096a0ce57dSAttilio Rao mtx_lock_spin(&callout_lock); 31049a74476SColin Percival } 311ab36c067SJustin T. Gibbs steps = 0; 312ab36c067SJustin T. Gibbs c = nextsoftcheck; 313ab36c067SJustin T. Gibbs } 314ab36c067SJustin T. Gibbs } 315ab36c067SJustin T. Gibbs } 31622ee8c4fSPoul-Henning Kamp avg_depth += (depth * 1000 - avg_depth) >> 8; 31722ee8c4fSPoul-Henning Kamp avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 31864b9ee20SAttilio Rao avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 31922ee8c4fSPoul-Henning Kamp avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 320ab36c067SJustin T. Gibbs nextsoftcheck = NULL; 3219ed346baSBosko Milekic mtx_unlock_spin(&callout_lock); 322df8bae1dSRodney W. Grimes } 323df8bae1dSRodney W. Grimes 324df8bae1dSRodney W. Grimes /* 325df8bae1dSRodney W. Grimes * timeout -- 326df8bae1dSRodney W. Grimes * Execute a function after a specified length of time. 327df8bae1dSRodney W. Grimes * 328df8bae1dSRodney W. Grimes * untimeout -- 329df8bae1dSRodney W. Grimes * Cancel previous timeout function call. 330df8bae1dSRodney W. Grimes * 331ab36c067SJustin T. Gibbs * callout_handle_init -- 332ab36c067SJustin T. Gibbs * Initialize a handle so that using it with untimeout is benign. 333ab36c067SJustin T. Gibbs * 334df8bae1dSRodney W. Grimes * See AT&T BCI Driver Reference Manual for specification. This 335ab36c067SJustin T. Gibbs * implementation differs from that one in that although an 336ab36c067SJustin T. Gibbs * identification value is returned from timeout, the original 337ab36c067SJustin T. Gibbs * arguments to timeout as well as the identifier are used to 338ab36c067SJustin T. Gibbs * identify entries for untimeout. 339df8bae1dSRodney W. Grimes */ 340ab36c067SJustin T. Gibbs struct callout_handle 341ab36c067SJustin T. Gibbs timeout(ftn, arg, to_ticks) 3428f03c6f1SBruce Evans timeout_t *ftn; 343df8bae1dSRodney W. Grimes void *arg; 344e82ac18eSJonathan Lemon int to_ticks; 345df8bae1dSRodney W. Grimes { 346ab36c067SJustin T. Gibbs struct callout *new; 347ab36c067SJustin T. Gibbs struct callout_handle handle; 348df8bae1dSRodney W. Grimes 3499ed346baSBosko Milekic mtx_lock_spin(&callout_lock); 350df8bae1dSRodney W. Grimes 351df8bae1dSRodney W. Grimes /* Fill in the next free callout structure. */ 352ab36c067SJustin T. Gibbs new = SLIST_FIRST(&callfree); 353ab36c067SJustin T. Gibbs if (new == NULL) 354ab36c067SJustin T. Gibbs /* XXX Attempt to malloc first */ 355df8bae1dSRodney W. Grimes panic("timeout table full"); 356ab36c067SJustin T. Gibbs SLIST_REMOVE_HEAD(&callfree, c_links.sle); 357df8bae1dSRodney W. Grimes 358acc8326dSGarrett Wollman callout_reset(new, to_ticks, ftn, arg); 359acc8326dSGarrett Wollman 360ab36c067SJustin T. Gibbs handle.callout = new; 3619ed346baSBosko Milekic mtx_unlock_spin(&callout_lock); 362ab36c067SJustin T. Gibbs return (handle); 363df8bae1dSRodney W. Grimes } 364df8bae1dSRodney W. Grimes 365df8bae1dSRodney W. Grimes void 366ab36c067SJustin T. Gibbs untimeout(ftn, arg, handle) 3678f03c6f1SBruce Evans timeout_t *ftn; 368df8bae1dSRodney W. Grimes void *arg; 369ab36c067SJustin T. Gibbs struct callout_handle handle; 370df8bae1dSRodney W. Grimes { 371df8bae1dSRodney W. Grimes 372ab36c067SJustin T. Gibbs /* 373ab36c067SJustin T. Gibbs * Check for a handle that was initialized 374ab36c067SJustin T. Gibbs * by callout_handle_init, but never used 375ab36c067SJustin T. Gibbs * for a real timeout. 376ab36c067SJustin T. Gibbs */ 377ab36c067SJustin T. Gibbs if (handle.callout == NULL) 378ab36c067SJustin T. Gibbs return; 379df8bae1dSRodney W. Grimes 3809ed346baSBosko Milekic mtx_lock_spin(&callout_lock); 381acc8326dSGarrett Wollman if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 382acc8326dSGarrett Wollman callout_stop(handle.callout); 3839ed346baSBosko Milekic mtx_unlock_spin(&callout_lock); 384df8bae1dSRodney W. Grimes } 385df8bae1dSRodney W. Grimes 3863c816944SBruce Evans void 387ab36c067SJustin T. Gibbs callout_handle_init(struct callout_handle *handle) 388ab36c067SJustin T. Gibbs { 389ab36c067SJustin T. Gibbs handle->callout = NULL; 390ab36c067SJustin T. Gibbs } 391ab36c067SJustin T. Gibbs 392acc8326dSGarrett Wollman /* 393acc8326dSGarrett Wollman * New interface; clients allocate their own callout structures. 394acc8326dSGarrett Wollman * 395acc8326dSGarrett Wollman * callout_reset() - establish or change a timeout 396acc8326dSGarrett Wollman * callout_stop() - disestablish a timeout 397acc8326dSGarrett Wollman * callout_init() - initialize a callout structure so that it can 398acc8326dSGarrett Wollman * safely be passed to callout_reset() and callout_stop() 399acc8326dSGarrett Wollman * 4009b8b58e0SJonathan Lemon * <sys/callout.h> defines three convenience macros: 401acc8326dSGarrett Wollman * 40286fd19deSColin Percival * callout_active() - returns truth if callout has not been stopped, 40386fd19deSColin Percival * drained, or deactivated since the last time the callout was 40486fd19deSColin Percival * reset. 4059b8b58e0SJonathan Lemon * callout_pending() - returns truth if callout is still waiting for timeout 4069b8b58e0SJonathan Lemon * callout_deactivate() - marks the callout as having been serviced 407acc8326dSGarrett Wollman */ 408d04304d1SGleb Smirnoff int 409e82ac18eSJonathan Lemon callout_reset(c, to_ticks, ftn, arg) 410acc8326dSGarrett Wollman struct callout *c; 411acc8326dSGarrett Wollman int to_ticks; 4124d77a549SAlfred Perlstein void (*ftn)(void *); 413acc8326dSGarrett Wollman void *arg; 414acc8326dSGarrett Wollman { 415d04304d1SGleb Smirnoff int cancelled = 0; 416acc8326dSGarrett Wollman 4179ed346baSBosko Milekic mtx_lock_spin(&callout_lock); 41898c926b2SIan Dowse if (c == curr_callout) { 4192c1bb207SColin Percival /* 4202c1bb207SColin Percival * We're being asked to reschedule a callout which is 42164b9ee20SAttilio Rao * currently in progress. If there is a lock then we 42298c926b2SIan Dowse * can cancel the callout if it has not really started. 42398c926b2SIan Dowse */ 42464b9ee20SAttilio Rao if (c->c_lock != NULL && !curr_cancelled) 425d04304d1SGleb Smirnoff cancelled = curr_cancelled = 1; 426b36f4588SJohn Baldwin if (callout_wait) { 42798c926b2SIan Dowse /* 42898c926b2SIan Dowse * Someone has called callout_drain to kill this 42998c926b2SIan Dowse * callout. Don't reschedule. 4302c1bb207SColin Percival */ 43168a57ebfSGleb Smirnoff CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 43268a57ebfSGleb Smirnoff cancelled ? "cancelled" : "failed to cancel", 43368a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 4342c1bb207SColin Percival mtx_unlock_spin(&callout_lock); 435d04304d1SGleb Smirnoff return (cancelled); 43649a74476SColin Percival } 43798c926b2SIan Dowse } 4380413bacdSColin Percival if (c->c_flags & CALLOUT_PENDING) { 4390413bacdSColin Percival if (nextsoftcheck == c) { 4400413bacdSColin Percival nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 4410413bacdSColin Percival } 4420413bacdSColin Percival TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, 4430413bacdSColin Percival c_links.tqe); 4440413bacdSColin Percival 445d04304d1SGleb Smirnoff cancelled = 1; 446d04304d1SGleb Smirnoff 4470413bacdSColin Percival /* 4480413bacdSColin Percival * Part of the normal "stop a pending callout" process 4490413bacdSColin Percival * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING 4500413bacdSColin Percival * flags. We're not going to bother doing that here, 4510413bacdSColin Percival * because we're going to be setting those flags ten lines 4520413bacdSColin Percival * after this point, and we're holding callout_lock 4530413bacdSColin Percival * between now and then. 4540413bacdSColin Percival */ 4550413bacdSColin Percival } 456acc8326dSGarrett Wollman 457acc8326dSGarrett Wollman /* 458ab32297dSJohn Baldwin * We could unlock callout_lock here and lock it again before the 459ab32297dSJohn Baldwin * TAILQ_INSERT_TAIL, but there's no point since doing this setup 460ab32297dSJohn Baldwin * doesn't take much time. 461acc8326dSGarrett Wollman */ 462acc8326dSGarrett Wollman if (to_ticks <= 0) 463acc8326dSGarrett Wollman to_ticks = 1; 464acc8326dSGarrett Wollman 465acc8326dSGarrett Wollman c->c_arg = arg; 466e82ac18eSJonathan Lemon c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 467acc8326dSGarrett Wollman c->c_func = ftn; 468acc8326dSGarrett Wollman c->c_time = ticks + to_ticks; 469acc8326dSGarrett Wollman TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask], 470acc8326dSGarrett Wollman c, c_links.tqe); 47168a57ebfSGleb Smirnoff CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", 47268a57ebfSGleb Smirnoff cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); 4739ed346baSBosko Milekic mtx_unlock_spin(&callout_lock); 474d04304d1SGleb Smirnoff 475d04304d1SGleb Smirnoff return (cancelled); 476acc8326dSGarrett Wollman } 477acc8326dSGarrett Wollman 4782c1bb207SColin Percival int 4792c1bb207SColin Percival _callout_stop_safe(c, safe) 4802c1bb207SColin Percival struct callout *c; 4812c1bb207SColin Percival int safe; 4822c1bb207SColin Percival { 48364b9ee20SAttilio Rao struct lock_class *class; 48464b9ee20SAttilio Rao int use_lock, sq_locked; 48598c926b2SIan Dowse 48664b9ee20SAttilio Rao /* 48764b9ee20SAttilio Rao * Some old subsystems don't hold Giant while running a callout_stop(), 48864b9ee20SAttilio Rao * so just discard this check for the moment. 48964b9ee20SAttilio Rao */ 49064b9ee20SAttilio Rao if (!safe && c->c_lock != NULL) { 49164b9ee20SAttilio Rao if (c->c_lock == &Giant.lock_object) 49264b9ee20SAttilio Rao use_lock = mtx_owned(&Giant); 49364b9ee20SAttilio Rao else { 49464b9ee20SAttilio Rao use_lock = 1; 49564b9ee20SAttilio Rao class = LOCK_CLASS(c->c_lock); 49664b9ee20SAttilio Rao class->lc_assert(c->c_lock, LA_XLOCKED); 49798c926b2SIan Dowse } 49864b9ee20SAttilio Rao } else 49964b9ee20SAttilio Rao use_lock = 0; 5002c1bb207SColin Percival 50167b158d8SJohn Baldwin sq_locked = 0; 50267b158d8SJohn Baldwin again: 5039ed346baSBosko Milekic mtx_lock_spin(&callout_lock); 504acc8326dSGarrett Wollman /* 505b36f4588SJohn Baldwin * If the callout isn't pending, it's not on the queue, so 506b36f4588SJohn Baldwin * don't attempt to remove it from the queue. We can try to 507b36f4588SJohn Baldwin * stop it by other means however. 508acc8326dSGarrett Wollman */ 509acc8326dSGarrett Wollman if (!(c->c_flags & CALLOUT_PENDING)) { 5109b8b58e0SJonathan Lemon c->c_flags &= ~CALLOUT_ACTIVE; 511b36f4588SJohn Baldwin 512b36f4588SJohn Baldwin /* 513b36f4588SJohn Baldwin * If it wasn't on the queue and it isn't the current 514b36f4588SJohn Baldwin * callout, then we can't stop it, so just bail. 515b36f4588SJohn Baldwin */ 51698c926b2SIan Dowse if (c != curr_callout) { 51768a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 51868a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 51998c926b2SIan Dowse mtx_unlock_spin(&callout_lock); 52067b158d8SJohn Baldwin if (sq_locked) 52167b158d8SJohn Baldwin sleepq_release(&callout_wait); 52298c926b2SIan Dowse return (0); 52398c926b2SIan Dowse } 524b36f4588SJohn Baldwin 52598c926b2SIan Dowse if (safe) { 5262c1bb207SColin Percival /* 527b36f4588SJohn Baldwin * The current callout is running (or just 528b36f4588SJohn Baldwin * about to run) and blocking is allowed, so 529b36f4588SJohn Baldwin * just wait for the current invocation to 530b36f4588SJohn Baldwin * finish. 5312c1bb207SColin Percival */ 532b36f4588SJohn Baldwin while (c == curr_callout) { 5336a0ce57dSAttilio Rao 5346a0ce57dSAttilio Rao /* 5356a0ce57dSAttilio Rao * Use direct calls to sleepqueue interface 5366a0ce57dSAttilio Rao * instead of cv/msleep in order to avoid 5376a0ce57dSAttilio Rao * a LOR between callout_lock and sleepqueue 5386a0ce57dSAttilio Rao * chain spinlocks. This piece of code 5396a0ce57dSAttilio Rao * emulates a msleep_spin() call actually. 54067b158d8SJohn Baldwin * 54167b158d8SJohn Baldwin * If we already have the sleepqueue chain 54267b158d8SJohn Baldwin * locked, then we can safely block. If we 54367b158d8SJohn Baldwin * don't already have it locked, however, 54467b158d8SJohn Baldwin * we have to drop the callout_lock to lock 54567b158d8SJohn Baldwin * it. This opens several races, so we 54667b158d8SJohn Baldwin * restart at the beginning once we have 54767b158d8SJohn Baldwin * both locks. If nothing has changed, then 54867b158d8SJohn Baldwin * we will end up back here with sq_locked 54967b158d8SJohn Baldwin * set. 5506a0ce57dSAttilio Rao */ 55167b158d8SJohn Baldwin if (!sq_locked) { 5526a0ce57dSAttilio Rao mtx_unlock_spin(&callout_lock); 5536a0ce57dSAttilio Rao sleepq_lock(&callout_wait); 55467b158d8SJohn Baldwin sq_locked = 1; 55567b158d8SJohn Baldwin goto again; 5566a0ce57dSAttilio Rao } 55767b158d8SJohn Baldwin 558b36f4588SJohn Baldwin callout_wait = 1; 5596a0ce57dSAttilio Rao DROP_GIANT(); 5606a0ce57dSAttilio Rao mtx_unlock_spin(&callout_lock); 5616a0ce57dSAttilio Rao sleepq_add(&callout_wait, 5626a0ce57dSAttilio Rao &callout_lock.lock_object, "codrain", 5636a0ce57dSAttilio Rao SLEEPQ_SLEEP, 0); 5646a0ce57dSAttilio Rao sleepq_wait(&callout_wait); 56567b158d8SJohn Baldwin sq_locked = 0; 5666a0ce57dSAttilio Rao 5676a0ce57dSAttilio Rao /* Reacquire locks previously released. */ 5686a0ce57dSAttilio Rao PICKUP_GIANT(); 5696a0ce57dSAttilio Rao mtx_lock_spin(&callout_lock); 570b36f4588SJohn Baldwin } 57164b9ee20SAttilio Rao } else if (use_lock && !curr_cancelled) { 572b36f4588SJohn Baldwin /* 57364b9ee20SAttilio Rao * The current callout is waiting for its 57464b9ee20SAttilio Rao * lock which we hold. Cancel the callout 575b36f4588SJohn Baldwin * and return. After our caller drops the 57664b9ee20SAttilio Rao * lock, the callout will be skipped in 577b36f4588SJohn Baldwin * softclock(). 578b36f4588SJohn Baldwin */ 57998c926b2SIan Dowse curr_cancelled = 1; 58068a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 58168a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 58298c926b2SIan Dowse mtx_unlock_spin(&callout_lock); 58367b158d8SJohn Baldwin KASSERT(!sq_locked, ("sleepqueue chain locked")); 58498c926b2SIan Dowse return (1); 585b36f4588SJohn Baldwin } 58668a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 58768a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 5889ed346baSBosko Milekic mtx_unlock_spin(&callout_lock); 58967b158d8SJohn Baldwin KASSERT(!sq_locked, ("sleepqueue chain still locked")); 590a45982d2SJohn Baldwin return (0); 591acc8326dSGarrett Wollman } 59267b158d8SJohn Baldwin if (sq_locked) 59367b158d8SJohn Baldwin sleepq_release(&callout_wait); 59467b158d8SJohn Baldwin 5959b8b58e0SJonathan Lemon c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 596acc8326dSGarrett Wollman 597acc8326dSGarrett Wollman if (nextsoftcheck == c) { 598acc8326dSGarrett Wollman nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 599acc8326dSGarrett Wollman } 600acc8326dSGarrett Wollman TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe); 601acc8326dSGarrett Wollman 60268a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 60368a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 60468a57ebfSGleb Smirnoff 605acc8326dSGarrett Wollman if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 6067834081cSColin Percival c->c_func = NULL; 607acc8326dSGarrett Wollman SLIST_INSERT_HEAD(&callfree, c, c_links.sle); 608acc8326dSGarrett Wollman } 6099ed346baSBosko Milekic mtx_unlock_spin(&callout_lock); 610a45982d2SJohn Baldwin return (1); 611acc8326dSGarrett Wollman } 612acc8326dSGarrett Wollman 613acc8326dSGarrett Wollman void 614e82ac18eSJonathan Lemon callout_init(c, mpsafe) 615acc8326dSGarrett Wollman struct callout *c; 616e82ac18eSJonathan Lemon int mpsafe; 617acc8326dSGarrett Wollman { 6187347e1c6SGarrett Wollman bzero(c, sizeof *c); 61998c926b2SIan Dowse if (mpsafe) { 62064b9ee20SAttilio Rao c->c_lock = NULL; 62198c926b2SIan Dowse c->c_flags = CALLOUT_RETURNUNLOCKED; 62298c926b2SIan Dowse } else { 62364b9ee20SAttilio Rao c->c_lock = &Giant.lock_object; 62498c926b2SIan Dowse c->c_flags = 0; 62598c926b2SIan Dowse } 62698c926b2SIan Dowse } 62798c926b2SIan Dowse 62898c926b2SIan Dowse void 62964b9ee20SAttilio Rao _callout_init_lock(c, lock, flags) 63098c926b2SIan Dowse struct callout *c; 63164b9ee20SAttilio Rao struct lock_object *lock; 63298c926b2SIan Dowse int flags; 63398c926b2SIan Dowse { 63498c926b2SIan Dowse bzero(c, sizeof *c); 63564b9ee20SAttilio Rao c->c_lock = lock; 63664b9ee20SAttilio Rao KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 63764b9ee20SAttilio Rao ("callout_init_lock: bad flags %d", flags)); 63864b9ee20SAttilio Rao KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 63964b9ee20SAttilio Rao ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 64064b9ee20SAttilio Rao KASSERT(lock == NULL || LOCK_CLASS(lock) == &lock_class_mtx_sleep || 64164b9ee20SAttilio Rao LOCK_CLASS(lock) == &lock_class_rw, ("%s: invalid lock class", 64264b9ee20SAttilio Rao __func__)); 64364b9ee20SAttilio Rao c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 644acc8326dSGarrett Wollman } 645acc8326dSGarrett Wollman 646e1d6dc65SNate Williams #ifdef APM_FIXUP_CALLTODO 647e1d6dc65SNate Williams /* 648e1d6dc65SNate Williams * Adjust the kernel calltodo timeout list. This routine is used after 649e1d6dc65SNate Williams * an APM resume to recalculate the calltodo timer list values with the 650e1d6dc65SNate Williams * number of hz's we have been sleeping. The next hardclock() will detect 651e1d6dc65SNate Williams * that there are fired timers and run softclock() to execute them. 652e1d6dc65SNate Williams * 653e1d6dc65SNate Williams * Please note, I have not done an exhaustive analysis of what code this 654e1d6dc65SNate Williams * might break. I am motivated to have my select()'s and alarm()'s that 655e1d6dc65SNate Williams * have expired during suspend firing upon resume so that the applications 656e1d6dc65SNate Williams * which set the timer can do the maintanence the timer was for as close 657e1d6dc65SNate Williams * as possible to the originally intended time. Testing this code for a 658e1d6dc65SNate Williams * week showed that resuming from a suspend resulted in 22 to 25 timers 659e1d6dc65SNate Williams * firing, which seemed independant on whether the suspend was 2 hours or 660e1d6dc65SNate Williams * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 661e1d6dc65SNate Williams */ 662e1d6dc65SNate Williams void 663e1d6dc65SNate Williams adjust_timeout_calltodo(time_change) 664e1d6dc65SNate Williams struct timeval *time_change; 665e1d6dc65SNate Williams { 666e1d6dc65SNate Williams register struct callout *p; 667e1d6dc65SNate Williams unsigned long delta_ticks; 668e1d6dc65SNate Williams 669e1d6dc65SNate Williams /* 670e1d6dc65SNate Williams * How many ticks were we asleep? 671c8b47828SBruce Evans * (stolen from tvtohz()). 672e1d6dc65SNate Williams */ 673e1d6dc65SNate Williams 674e1d6dc65SNate Williams /* Don't do anything */ 675e1d6dc65SNate Williams if (time_change->tv_sec < 0) 676e1d6dc65SNate Williams return; 677e1d6dc65SNate Williams else if (time_change->tv_sec <= LONG_MAX / 1000000) 678e1d6dc65SNate Williams delta_ticks = (time_change->tv_sec * 1000000 + 679e1d6dc65SNate Williams time_change->tv_usec + (tick - 1)) / tick + 1; 680e1d6dc65SNate Williams else if (time_change->tv_sec <= LONG_MAX / hz) 681e1d6dc65SNate Williams delta_ticks = time_change->tv_sec * hz + 682e1d6dc65SNate Williams (time_change->tv_usec + (tick - 1)) / tick + 1; 683e1d6dc65SNate Williams else 684e1d6dc65SNate Williams delta_ticks = LONG_MAX; 685e1d6dc65SNate Williams 686e1d6dc65SNate Williams if (delta_ticks > INT_MAX) 687e1d6dc65SNate Williams delta_ticks = INT_MAX; 688e1d6dc65SNate Williams 689e1d6dc65SNate Williams /* 690e1d6dc65SNate Williams * Now rip through the timer calltodo list looking for timers 691e1d6dc65SNate Williams * to expire. 692e1d6dc65SNate Williams */ 693e1d6dc65SNate Williams 694e1d6dc65SNate Williams /* don't collide with softclock() */ 6959ed346baSBosko Milekic mtx_lock_spin(&callout_lock); 696e1d6dc65SNate Williams for (p = calltodo.c_next; p != NULL; p = p->c_next) { 697e1d6dc65SNate Williams p->c_time -= delta_ticks; 698e1d6dc65SNate Williams 699e1d6dc65SNate Williams /* Break if the timer had more time on it than delta_ticks */ 700e1d6dc65SNate Williams if (p->c_time > 0) 701e1d6dc65SNate Williams break; 702e1d6dc65SNate Williams 703e1d6dc65SNate Williams /* take back the ticks the timer didn't use (p->c_time <= 0) */ 704e1d6dc65SNate Williams delta_ticks = -p->c_time; 705e1d6dc65SNate Williams } 7069ed346baSBosko Milekic mtx_unlock_spin(&callout_lock); 707e1d6dc65SNate Williams 708e1d6dc65SNate Williams return; 709e1d6dc65SNate Williams } 710e1d6dc65SNate Williams #endif /* APM_FIXUP_CALLTODO */ 711