1df8bae1dSRodney W. Grimes /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1982, 1986, 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * (c) UNIX System Laboratories, Inc. 5df8bae1dSRodney W. Grimes * All or some portions of this file are derived from material licensed 6df8bae1dSRodney W. Grimes * to the University of California by American Telephone and Telegraph 7df8bae1dSRodney W. Grimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8df8bae1dSRodney W. Grimes * the permission of UNIX System Laboratories, Inc. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 19df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 20df8bae1dSRodney W. Grimes * without specific prior written permission. 21df8bae1dSRodney W. Grimes * 22df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32df8bae1dSRodney W. Grimes * SUCH DAMAGE. 33df8bae1dSRodney W. Grimes * 34acc8326dSGarrett Wollman * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35df8bae1dSRodney W. Grimes */ 36df8bae1dSRodney W. Grimes 37677b542eSDavid E. O'Brien #include <sys/cdefs.h> 38677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 39677b542eSDavid E. O'Brien 405b999a6bSDavide Italiano #include "opt_callout_profiling.h" 4191dd9aaeSRobert Watson #include "opt_kdtrace.h" 425b999a6bSDavide Italiano #if defined(__arm__) 435b999a6bSDavide Italiano #include "opt_timer.h" 445b999a6bSDavide Italiano #endif 4591dd9aaeSRobert Watson 46df8bae1dSRodney W. Grimes #include <sys/param.h> 47df8bae1dSRodney W. Grimes #include <sys/systm.h> 488d809d50SJeff Roberson #include <sys/bus.h> 4915b7a470SPoul-Henning Kamp #include <sys/callout.h> 50f8ccf82aSAndre Oppermann #include <sys/file.h> 518d809d50SJeff Roberson #include <sys/interrupt.h> 52df8bae1dSRodney W. Grimes #include <sys/kernel.h> 53ff7ec58aSRobert Watson #include <sys/ktr.h> 54f34fa851SJohn Baldwin #include <sys/lock.h> 558d809d50SJeff Roberson #include <sys/malloc.h> 56cb799bfeSJohn Baldwin #include <sys/mutex.h> 5721f9e816SJohn Baldwin #include <sys/proc.h> 5891dd9aaeSRobert Watson #include <sys/sdt.h> 596a0ce57dSAttilio Rao #include <sys/sleepqueue.h> 6022ee8c4fSPoul-Henning Kamp #include <sys/sysctl.h> 618d809d50SJeff Roberson #include <sys/smp.h> 62df8bae1dSRodney W. Grimes 631283e9cdSAttilio Rao #ifdef SMP 641283e9cdSAttilio Rao #include <machine/cpu.h> 651283e9cdSAttilio Rao #endif 661283e9cdSAttilio Rao 675b999a6bSDavide Italiano #ifndef NO_EVENTTIMERS 685b999a6bSDavide Italiano DPCPU_DECLARE(sbintime_t, hardclocktime); 695b999a6bSDavide Italiano #endif 705b999a6bSDavide Italiano 7191dd9aaeSRobert Watson SDT_PROVIDER_DEFINE(callout_execute); 7279856499SRui Paulo SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start); 7391dd9aaeSRobert Watson SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0, 7491dd9aaeSRobert Watson "struct callout *"); 7579856499SRui Paulo SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end); 7691dd9aaeSRobert Watson SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0, 7791dd9aaeSRobert Watson "struct callout *"); 7891dd9aaeSRobert Watson 795b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 8022ee8c4fSPoul-Henning Kamp static int avg_depth; 8122ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 8222ee8c4fSPoul-Henning Kamp "Average number of items examined per softclock call. Units = 1/1000"); 8322ee8c4fSPoul-Henning Kamp static int avg_gcalls; 8422ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 8522ee8c4fSPoul-Henning Kamp "Average number of Giant callouts made per softclock call. Units = 1/1000"); 8664b9ee20SAttilio Rao static int avg_lockcalls; 8764b9ee20SAttilio Rao SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 8864b9ee20SAttilio Rao "Average number of lock callouts made per softclock call. Units = 1/1000"); 8922ee8c4fSPoul-Henning Kamp static int avg_mpcalls; 9022ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 9122ee8c4fSPoul-Henning Kamp "Average number of MP callouts made per softclock call. Units = 1/1000"); 925b999a6bSDavide Italiano static int avg_depth_dir; 935b999a6bSDavide Italiano SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0, 945b999a6bSDavide Italiano "Average number of direct callouts examined per callout_process call. " 955b999a6bSDavide Italiano "Units = 1/1000"); 965b999a6bSDavide Italiano static int avg_lockcalls_dir; 975b999a6bSDavide Italiano SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD, 985b999a6bSDavide Italiano &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per " 995b999a6bSDavide Italiano "callout_process call. Units = 1/1000"); 1005b999a6bSDavide Italiano static int avg_mpcalls_dir; 1015b999a6bSDavide Italiano SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir, 1025b999a6bSDavide Italiano 0, "Average number of MP direct callouts made per callout_process call. " 1035b999a6bSDavide Italiano "Units = 1/1000"); 1045b999a6bSDavide Italiano #endif 105f8ccf82aSAndre Oppermann 106f8ccf82aSAndre Oppermann static int ncallout; 107f8ccf82aSAndre Oppermann SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN, &ncallout, 0, 108f8ccf82aSAndre Oppermann "Number of entries in callwheel and size of timeout() preallocation"); 109f8ccf82aSAndre Oppermann 11015b7a470SPoul-Henning Kamp /* 11115b7a470SPoul-Henning Kamp * TODO: 11215b7a470SPoul-Henning Kamp * allocate more timeout table slots when table overflows. 11315b7a470SPoul-Henning Kamp */ 1143f555c45SDavide Italiano u_int callwheelsize, callwheelmask; 115f23b4c91SGarrett Wollman 11620c510f8SLuigi Rizzo /* 1175b999a6bSDavide Italiano * The callout cpu exec entities represent informations necessary for 1185b999a6bSDavide Italiano * describing the state of callouts currently running on the CPU and the ones 1195b999a6bSDavide Italiano * necessary for migrating callouts to the new callout cpu. In particular, 1205b999a6bSDavide Italiano * the first entry of the array cc_exec_entity holds informations for callout 1215b999a6bSDavide Italiano * running in SWI thread context, while the second one holds informations 1225b999a6bSDavide Italiano * for callout running directly from hardware interrupt context. 1231283e9cdSAttilio Rao * The cached informations are very important for deferring migration when 1241283e9cdSAttilio Rao * the migrating callout is already running. 1251283e9cdSAttilio Rao */ 1265b999a6bSDavide Italiano struct cc_exec { 1275b999a6bSDavide Italiano struct callout *cc_next; 1285b999a6bSDavide Italiano struct callout *cc_curr; 1291283e9cdSAttilio Rao #ifdef SMP 1301283e9cdSAttilio Rao void (*ce_migration_func)(void *); 1311283e9cdSAttilio Rao void *ce_migration_arg; 1321283e9cdSAttilio Rao int ce_migration_cpu; 1335b999a6bSDavide Italiano sbintime_t ce_migration_time; 1341283e9cdSAttilio Rao #endif 135a4a3ce99SDavide Italiano bool cc_cancel; 136a4a3ce99SDavide Italiano bool cc_waiting; 1371283e9cdSAttilio Rao }; 1381283e9cdSAttilio Rao 1391283e9cdSAttilio Rao /* 14020c510f8SLuigi Rizzo * There is one struct callout_cpu per cpu, holding all relevant 14120c510f8SLuigi Rizzo * state for the callout processing thread on the individual CPU. 14220c510f8SLuigi Rizzo */ 1438d809d50SJeff Roberson struct callout_cpu { 1444ceaf45dSAttilio Rao struct mtx_padalign cc_lock; 1455b999a6bSDavide Italiano struct cc_exec cc_exec_entity[2]; 1468d809d50SJeff Roberson struct callout *cc_callout; 1475b999a6bSDavide Italiano struct callout_list *cc_callwheel; 1485b999a6bSDavide Italiano struct callout_tailq cc_expireq; 1495b999a6bSDavide Italiano struct callout_slist cc_callfree; 1505b999a6bSDavide Italiano sbintime_t cc_firstevent; 1515b999a6bSDavide Italiano sbintime_t cc_lastscan; 1528d809d50SJeff Roberson void *cc_cookie; 1535b999a6bSDavide Italiano u_int cc_bucket; 1548d809d50SJeff Roberson }; 1558d809d50SJeff Roberson 1565b999a6bSDavide Italiano #define cc_exec_curr cc_exec_entity[0].cc_curr 1575b999a6bSDavide Italiano #define cc_exec_next cc_exec_entity[0].cc_next 1585b999a6bSDavide Italiano #define cc_exec_cancel cc_exec_entity[0].cc_cancel 1595b999a6bSDavide Italiano #define cc_exec_waiting cc_exec_entity[0].cc_waiting 1605b999a6bSDavide Italiano #define cc_exec_curr_dir cc_exec_entity[1].cc_curr 1615b999a6bSDavide Italiano #define cc_exec_next_dir cc_exec_entity[1].cc_next 1625b999a6bSDavide Italiano #define cc_exec_cancel_dir cc_exec_entity[1].cc_cancel 1635b999a6bSDavide Italiano #define cc_exec_waiting_dir cc_exec_entity[1].cc_waiting 1645b999a6bSDavide Italiano 1658d809d50SJeff Roberson #ifdef SMP 1665b999a6bSDavide Italiano #define cc_migration_func cc_exec_entity[0].ce_migration_func 1675b999a6bSDavide Italiano #define cc_migration_arg cc_exec_entity[0].ce_migration_arg 1685b999a6bSDavide Italiano #define cc_migration_cpu cc_exec_entity[0].ce_migration_cpu 1695b999a6bSDavide Italiano #define cc_migration_time cc_exec_entity[0].ce_migration_time 1705b999a6bSDavide Italiano #define cc_migration_func_dir cc_exec_entity[1].ce_migration_func 1715b999a6bSDavide Italiano #define cc_migration_arg_dir cc_exec_entity[1].ce_migration_arg 1725b999a6bSDavide Italiano #define cc_migration_cpu_dir cc_exec_entity[1].ce_migration_cpu 1735b999a6bSDavide Italiano #define cc_migration_time_dir cc_exec_entity[1].ce_migration_time 1741283e9cdSAttilio Rao 1758d809d50SJeff Roberson struct callout_cpu cc_cpu[MAXCPU]; 1761283e9cdSAttilio Rao #define CPUBLOCK MAXCPU 1778d809d50SJeff Roberson #define CC_CPU(cpu) (&cc_cpu[(cpu)]) 1788d809d50SJeff Roberson #define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 1798d809d50SJeff Roberson #else 1808d809d50SJeff Roberson struct callout_cpu cc_cpu; 1818d809d50SJeff Roberson #define CC_CPU(cpu) &cc_cpu 1828d809d50SJeff Roberson #define CC_SELF() &cc_cpu 1838d809d50SJeff Roberson #endif 1848d809d50SJeff Roberson #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 1858d809d50SJeff Roberson #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 1861283e9cdSAttilio Rao #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) 1878d809d50SJeff Roberson 1888d809d50SJeff Roberson static int timeout_cpu; 1895b999a6bSDavide Italiano 190*15ae0c9aSAndre Oppermann static void callout_cpu_init(struct callout_cpu *cc); 1915b999a6bSDavide Italiano static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, 1925b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 1935b999a6bSDavide Italiano int *mpcalls, int *lockcalls, int *gcalls, 1945b999a6bSDavide Italiano #endif 1955b999a6bSDavide Italiano int direct); 1968d809d50SJeff Roberson 197d745c852SEd Schouten static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 19849a74476SColin Percival 199e9dec2c4SColin Percival /** 2008d809d50SJeff Roberson * Locked by cc_lock: 2015b999a6bSDavide Italiano * cc_curr - If a callout is in progress, it is cc_curr. 2025b999a6bSDavide Italiano * If cc_curr is non-NULL, threads waiting in 203b36f4588SJohn Baldwin * callout_drain() will be woken up as soon as the 2042c1bb207SColin Percival * relevant callout completes. 2055b999a6bSDavide Italiano * cc_cancel - Changing to 1 with both callout_lock and cc_lock held 20698c926b2SIan Dowse * guarantees that the current callout will not run. 20798c926b2SIan Dowse * The softclock() function sets this to 0 before it 20864b9ee20SAttilio Rao * drops callout_lock to acquire c_lock, and it calls 209b36f4588SJohn Baldwin * the handler only if curr_cancelled is still 0 after 2105b999a6bSDavide Italiano * cc_lock is successfully acquired. 2118d809d50SJeff Roberson * cc_waiting - If a thread is waiting in callout_drain(), then 212b36f4588SJohn Baldwin * callout_wait is nonzero. Set only when 2135b999a6bSDavide Italiano * cc_curr is non-NULL. 2142c1bb207SColin Percival */ 215df8bae1dSRodney W. Grimes 216df8bae1dSRodney W. Grimes /* 2175b999a6bSDavide Italiano * Resets the execution entity tied to a specific callout cpu. 2181283e9cdSAttilio Rao */ 2191283e9cdSAttilio Rao static void 2205b999a6bSDavide Italiano cc_cce_cleanup(struct callout_cpu *cc, int direct) 2211283e9cdSAttilio Rao { 2221283e9cdSAttilio Rao 2235b999a6bSDavide Italiano cc->cc_exec_entity[direct].cc_curr = NULL; 2245b999a6bSDavide Italiano cc->cc_exec_entity[direct].cc_next = NULL; 225ac42a172SDavide Italiano cc->cc_exec_entity[direct].cc_cancel = false; 226ac42a172SDavide Italiano cc->cc_exec_entity[direct].cc_waiting = false; 2271283e9cdSAttilio Rao #ifdef SMP 2285b999a6bSDavide Italiano cc->cc_exec_entity[direct].ce_migration_cpu = CPUBLOCK; 2295b999a6bSDavide Italiano cc->cc_exec_entity[direct].ce_migration_time = 0; 2305b999a6bSDavide Italiano cc->cc_exec_entity[direct].ce_migration_func = NULL; 2315b999a6bSDavide Italiano cc->cc_exec_entity[direct].ce_migration_arg = NULL; 2321283e9cdSAttilio Rao #endif 2331283e9cdSAttilio Rao } 2341283e9cdSAttilio Rao 2351283e9cdSAttilio Rao /* 2361283e9cdSAttilio Rao * Checks if migration is requested by a specific callout cpu. 2371283e9cdSAttilio Rao */ 2381283e9cdSAttilio Rao static int 2395b999a6bSDavide Italiano cc_cce_migrating(struct callout_cpu *cc, int direct) 2401283e9cdSAttilio Rao { 2411283e9cdSAttilio Rao 2421283e9cdSAttilio Rao #ifdef SMP 2435b999a6bSDavide Italiano return (cc->cc_exec_entity[direct].ce_migration_cpu != CPUBLOCK); 2441283e9cdSAttilio Rao #else 2451283e9cdSAttilio Rao return (0); 2461283e9cdSAttilio Rao #endif 2471283e9cdSAttilio Rao } 2481283e9cdSAttilio Rao 2491283e9cdSAttilio Rao /* 250*15ae0c9aSAndre Oppermann * Kernel low level callwheel initialization 251*15ae0c9aSAndre Oppermann * called on cpu0 during kernel startup. 252219d632cSMatthew Dillon */ 253*15ae0c9aSAndre Oppermann static void 254*15ae0c9aSAndre Oppermann callout_callwheel_init(void *dummy) 255219d632cSMatthew Dillon { 2568d809d50SJeff Roberson struct callout_cpu *cc; 2578d809d50SJeff Roberson 258f8ccf82aSAndre Oppermann /* 259f8ccf82aSAndre Oppermann * Calculate the size of the callout wheel and the preallocated 260f8ccf82aSAndre Oppermann * timeout() structures. 261f8ccf82aSAndre Oppermann */ 262f8ccf82aSAndre Oppermann ncallout = imin(16 + maxproc + maxfiles, 18508); 263f8ccf82aSAndre Oppermann TUNABLE_INT_FETCH("kern.ncallout", &ncallout); 264f8ccf82aSAndre Oppermann 265219d632cSMatthew Dillon /* 266922314f0SAlfred Perlstein * Calculate callout wheel size, should be next power of two higher 267922314f0SAlfred Perlstein * than 'ncallout'. 268219d632cSMatthew Dillon */ 269922314f0SAlfred Perlstein callwheelsize = 1 << fls(ncallout); 270219d632cSMatthew Dillon callwheelmask = callwheelsize - 1; 271219d632cSMatthew Dillon 272*15ae0c9aSAndre Oppermann /* 273*15ae0c9aSAndre Oppermann * Only cpu0 handles timeout(9) and receives a preallocation. 274*15ae0c9aSAndre Oppermann * 275*15ae0c9aSAndre Oppermann * XXX: Once all timeout(9) consumers are converted this can 276*15ae0c9aSAndre Oppermann * be removed. 277*15ae0c9aSAndre Oppermann */ 278*15ae0c9aSAndre Oppermann timeout_cpu = PCPU_GET(cpuid); 279*15ae0c9aSAndre Oppermann cc = CC_CPU(timeout_cpu); 280*15ae0c9aSAndre Oppermann cc->cc_callout = malloc(ncallout * sizeof(struct callout), 281*15ae0c9aSAndre Oppermann M_CALLOUT, M_WAITOK); 282*15ae0c9aSAndre Oppermann callout_cpu_init(cc); 283219d632cSMatthew Dillon } 284*15ae0c9aSAndre Oppermann SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL); 285219d632cSMatthew Dillon 286*15ae0c9aSAndre Oppermann /* 287*15ae0c9aSAndre Oppermann * Initialize the per-cpu callout structures. 288*15ae0c9aSAndre Oppermann */ 2898d809d50SJeff Roberson static void 2908d809d50SJeff Roberson callout_cpu_init(struct callout_cpu *cc) 2918d809d50SJeff Roberson { 2928d809d50SJeff Roberson struct callout *c; 2938d809d50SJeff Roberson int i; 2948d809d50SJeff Roberson 2958d809d50SJeff Roberson mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 2968d809d50SJeff Roberson SLIST_INIT(&cc->cc_callfree); 297*15ae0c9aSAndre Oppermann cc->cc_callwheel = malloc(sizeof(struct callout_tailq) * callwheelsize, 298*15ae0c9aSAndre Oppermann M_CALLOUT, M_WAITOK); 2995b999a6bSDavide Italiano for (i = 0; i < callwheelsize; i++) 3005b999a6bSDavide Italiano LIST_INIT(&cc->cc_callwheel[i]); 3015b999a6bSDavide Italiano TAILQ_INIT(&cc->cc_expireq); 3025b999a6bSDavide Italiano cc->cc_firstevent = INT64_MAX; 3035b999a6bSDavide Italiano for (i = 0; i < 2; i++) 3045b999a6bSDavide Italiano cc_cce_cleanup(cc, i); 305*15ae0c9aSAndre Oppermann if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */ 3068d809d50SJeff Roberson return; 3078d809d50SJeff Roberson for (i = 0; i < ncallout; i++) { 3088d809d50SJeff Roberson c = &cc->cc_callout[i]; 3098d809d50SJeff Roberson callout_init(c, 0); 3108d809d50SJeff Roberson c->c_flags = CALLOUT_LOCAL_ALLOC; 3118d809d50SJeff Roberson SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 3128d809d50SJeff Roberson } 3138d809d50SJeff Roberson } 3148d809d50SJeff Roberson 3151283e9cdSAttilio Rao #ifdef SMP 3161283e9cdSAttilio Rao /* 3171283e9cdSAttilio Rao * Switches the cpu tied to a specific callout. 3181283e9cdSAttilio Rao * The function expects a locked incoming callout cpu and returns with 3191283e9cdSAttilio Rao * locked outcoming callout cpu. 3201283e9cdSAttilio Rao */ 3211283e9cdSAttilio Rao static struct callout_cpu * 3221283e9cdSAttilio Rao callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) 3231283e9cdSAttilio Rao { 3241283e9cdSAttilio Rao struct callout_cpu *new_cc; 3251283e9cdSAttilio Rao 3261283e9cdSAttilio Rao MPASS(c != NULL && cc != NULL); 3271283e9cdSAttilio Rao CC_LOCK_ASSERT(cc); 3281283e9cdSAttilio Rao 329e75baa28SAttilio Rao /* 330e75baa28SAttilio Rao * Avoid interrupts and preemption firing after the callout cpu 331e75baa28SAttilio Rao * is blocked in order to avoid deadlocks as the new thread 332e75baa28SAttilio Rao * may be willing to acquire the callout cpu lock. 333e75baa28SAttilio Rao */ 3341283e9cdSAttilio Rao c->c_cpu = CPUBLOCK; 335e75baa28SAttilio Rao spinlock_enter(); 3361283e9cdSAttilio Rao CC_UNLOCK(cc); 3371283e9cdSAttilio Rao new_cc = CC_CPU(new_cpu); 3381283e9cdSAttilio Rao CC_LOCK(new_cc); 339e75baa28SAttilio Rao spinlock_exit(); 3401283e9cdSAttilio Rao c->c_cpu = new_cpu; 3411283e9cdSAttilio Rao return (new_cc); 3421283e9cdSAttilio Rao } 3431283e9cdSAttilio Rao #endif 3441283e9cdSAttilio Rao 345219d632cSMatthew Dillon /* 3468d809d50SJeff Roberson * Start standard softclock thread. 3478d809d50SJeff Roberson */ 3488d809d50SJeff Roberson static void 3498d809d50SJeff Roberson start_softclock(void *dummy) 3508d809d50SJeff Roberson { 3518d809d50SJeff Roberson struct callout_cpu *cc; 3528d809d50SJeff Roberson #ifdef SMP 3538d809d50SJeff Roberson int cpu; 3548d809d50SJeff Roberson #endif 3558d809d50SJeff Roberson 3568d809d50SJeff Roberson cc = CC_CPU(timeout_cpu); 3578d809d50SJeff Roberson if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK, 3583350df48SJohn Baldwin INTR_MPSAFE, &cc->cc_cookie)) 3598d809d50SJeff Roberson panic("died while creating standard software ithreads"); 3608d809d50SJeff Roberson #ifdef SMP 3613aa6d94eSJohn Baldwin CPU_FOREACH(cpu) { 3628d809d50SJeff Roberson if (cpu == timeout_cpu) 3638d809d50SJeff Roberson continue; 3648d809d50SJeff Roberson cc = CC_CPU(cpu); 365*15ae0c9aSAndre Oppermann cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */ 366*15ae0c9aSAndre Oppermann callout_cpu_init(cc); 3678d809d50SJeff Roberson if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, 3688d809d50SJeff Roberson INTR_MPSAFE, &cc->cc_cookie)) 3698d809d50SJeff Roberson panic("died while creating standard software ithreads"); 370219d632cSMatthew Dillon } 3718d809d50SJeff Roberson #endif 372219d632cSMatthew Dillon } 3738d809d50SJeff Roberson SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 3748d809d50SJeff Roberson 3755b999a6bSDavide Italiano #define CC_HASH_SHIFT 8 3768d809d50SJeff Roberson 3775b999a6bSDavide Italiano static inline u_int 3785b999a6bSDavide Italiano callout_hash(sbintime_t sbt) 3795b999a6bSDavide Italiano { 3805b999a6bSDavide Italiano 3815b999a6bSDavide Italiano return (sbt >> (32 - CC_HASH_SHIFT)); 3825b999a6bSDavide Italiano } 3835b999a6bSDavide Italiano 3845b999a6bSDavide Italiano static inline u_int 3855b999a6bSDavide Italiano callout_get_bucket(sbintime_t sbt) 3865b999a6bSDavide Italiano { 3875b999a6bSDavide Italiano 3885b999a6bSDavide Italiano return (callout_hash(sbt) & callwheelmask); 3895b999a6bSDavide Italiano } 3905b999a6bSDavide Italiano 3915b999a6bSDavide Italiano void 3925b999a6bSDavide Italiano callout_process(sbintime_t now) 3935b999a6bSDavide Italiano { 3945b999a6bSDavide Italiano struct callout *tmp, *tmpn; 3955b999a6bSDavide Italiano struct callout_cpu *cc; 3965b999a6bSDavide Italiano struct callout_list *sc; 3975b999a6bSDavide Italiano sbintime_t first, last, max, tmp_max; 3985b999a6bSDavide Italiano uint32_t lookahead; 3995b999a6bSDavide Italiano u_int firstb, lastb, nowb; 4005b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 4015b999a6bSDavide Italiano int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0; 4025b999a6bSDavide Italiano #endif 4035b999a6bSDavide Italiano 4048d809d50SJeff Roberson cc = CC_SELF(); 4058d809d50SJeff Roberson mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 4065b999a6bSDavide Italiano 4075b999a6bSDavide Italiano /* Compute the buckets of the last scan and present times. */ 4085b999a6bSDavide Italiano firstb = callout_hash(cc->cc_lastscan); 4095b999a6bSDavide Italiano cc->cc_lastscan = now; 4105b999a6bSDavide Italiano nowb = callout_hash(now); 4115b999a6bSDavide Italiano 4125b999a6bSDavide Italiano /* Compute the last bucket and minimum time of the bucket after it. */ 4135b999a6bSDavide Italiano if (nowb == firstb) 4145b999a6bSDavide Italiano lookahead = (SBT_1S / 16); 4155b999a6bSDavide Italiano else if (nowb - firstb == 1) 4165b999a6bSDavide Italiano lookahead = (SBT_1S / 8); 4175b999a6bSDavide Italiano else 4185b999a6bSDavide Italiano lookahead = (SBT_1S / 2); 4195b999a6bSDavide Italiano first = last = now; 4205b999a6bSDavide Italiano first += (lookahead / 2); 4215b999a6bSDavide Italiano last += lookahead; 4225b999a6bSDavide Italiano last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT)); 4235b999a6bSDavide Italiano lastb = callout_hash(last) - 1; 4245b999a6bSDavide Italiano max = last; 4255b999a6bSDavide Italiano 4265b999a6bSDavide Italiano /* 4275b999a6bSDavide Italiano * Check if we wrapped around the entire wheel from the last scan. 4285b999a6bSDavide Italiano * In case, we need to scan entirely the wheel for pending callouts. 4295b999a6bSDavide Italiano */ 4305b999a6bSDavide Italiano if (lastb - firstb >= callwheelsize) { 4315b999a6bSDavide Italiano lastb = firstb + callwheelsize - 1; 4325b999a6bSDavide Italiano if (nowb - firstb >= callwheelsize) 4335b999a6bSDavide Italiano nowb = lastb; 4349fc51b0bSJeff Roberson } 4355b999a6bSDavide Italiano 4365b999a6bSDavide Italiano /* Iterate callwheel from firstb to nowb and then up to lastb. */ 4375b999a6bSDavide Italiano do { 4385b999a6bSDavide Italiano sc = &cc->cc_callwheel[firstb & callwheelmask]; 4395b999a6bSDavide Italiano tmp = LIST_FIRST(sc); 4405b999a6bSDavide Italiano while (tmp != NULL) { 4415b999a6bSDavide Italiano /* Run the callout if present time within allowed. */ 4425b999a6bSDavide Italiano if (tmp->c_time <= now) { 4435b999a6bSDavide Italiano /* 4445b999a6bSDavide Italiano * Consumer told us the callout may be run 4455b999a6bSDavide Italiano * directly from hardware interrupt context. 4465b999a6bSDavide Italiano */ 4475b999a6bSDavide Italiano if (tmp->c_flags & CALLOUT_DIRECT) { 4485b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 4495b999a6bSDavide Italiano ++depth_dir; 4505b999a6bSDavide Italiano #endif 4515b999a6bSDavide Italiano cc->cc_exec_next_dir = 4525b999a6bSDavide Italiano LIST_NEXT(tmp, c_links.le); 4535b999a6bSDavide Italiano cc->cc_bucket = firstb & callwheelmask; 4545b999a6bSDavide Italiano LIST_REMOVE(tmp, c_links.le); 4555b999a6bSDavide Italiano softclock_call_cc(tmp, cc, 4565b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 4575b999a6bSDavide Italiano &mpcalls_dir, &lockcalls_dir, NULL, 4585b999a6bSDavide Italiano #endif 4595b999a6bSDavide Italiano 1); 4605b999a6bSDavide Italiano tmp = cc->cc_exec_next_dir; 4615b999a6bSDavide Italiano } else { 4625b999a6bSDavide Italiano tmpn = LIST_NEXT(tmp, c_links.le); 4635b999a6bSDavide Italiano LIST_REMOVE(tmp, c_links.le); 4645b999a6bSDavide Italiano TAILQ_INSERT_TAIL(&cc->cc_expireq, 4655b999a6bSDavide Italiano tmp, c_links.tqe); 4665b999a6bSDavide Italiano tmp->c_flags |= CALLOUT_PROCESSED; 4675b999a6bSDavide Italiano tmp = tmpn; 4689fc51b0bSJeff Roberson } 4695b999a6bSDavide Italiano continue; 4705b999a6bSDavide Italiano } 4715b999a6bSDavide Italiano /* Skip events from distant future. */ 4725b999a6bSDavide Italiano if (tmp->c_time >= max) 4735b999a6bSDavide Italiano goto next; 4745b999a6bSDavide Italiano /* 4755b999a6bSDavide Italiano * Event minimal time is bigger than present maximal 4765b999a6bSDavide Italiano * time, so it cannot be aggregated. 4775b999a6bSDavide Italiano */ 4785b999a6bSDavide Italiano if (tmp->c_time > last) { 4795b999a6bSDavide Italiano lastb = nowb; 4805b999a6bSDavide Italiano goto next; 4815b999a6bSDavide Italiano } 4825b999a6bSDavide Italiano /* Update first and last time, respecting this event. */ 4835b999a6bSDavide Italiano if (tmp->c_time < first) 4845b999a6bSDavide Italiano first = tmp->c_time; 4855b999a6bSDavide Italiano tmp_max = tmp->c_time + tmp->c_precision; 4865b999a6bSDavide Italiano if (tmp_max < last) 4875b999a6bSDavide Italiano last = tmp_max; 4885b999a6bSDavide Italiano next: 4895b999a6bSDavide Italiano tmp = LIST_NEXT(tmp, c_links.le); 4905b999a6bSDavide Italiano } 4915b999a6bSDavide Italiano /* Proceed with the next bucket. */ 4925b999a6bSDavide Italiano firstb++; 4935b999a6bSDavide Italiano /* 4945b999a6bSDavide Italiano * Stop if we looked after present time and found 4955b999a6bSDavide Italiano * some event we can't execute at now. 4965b999a6bSDavide Italiano * Stop if we looked far enough into the future. 4975b999a6bSDavide Italiano */ 4985b999a6bSDavide Italiano } while (((int)(firstb - lastb)) <= 0); 4995b999a6bSDavide Italiano cc->cc_firstevent = last; 5005b999a6bSDavide Italiano #ifndef NO_EVENTTIMERS 5015b999a6bSDavide Italiano cpu_new_callout(curcpu, last, first); 5025b999a6bSDavide Italiano #endif 5035b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 5045b999a6bSDavide Italiano avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8; 5055b999a6bSDavide Italiano avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8; 5065b999a6bSDavide Italiano avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8; 5075b999a6bSDavide Italiano #endif 5088d809d50SJeff Roberson mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 5098d809d50SJeff Roberson /* 5108d809d50SJeff Roberson * swi_sched acquires the thread lock, so we don't want to call it 5118d809d50SJeff Roberson * with cc_lock held; incorrect locking order. 5128d809d50SJeff Roberson */ 5135b999a6bSDavide Italiano if (!TAILQ_EMPTY(&cc->cc_expireq)) 5148d809d50SJeff Roberson swi_sched(cc->cc_cookie, 0); 5158d809d50SJeff Roberson } 5168d809d50SJeff Roberson 5178d809d50SJeff Roberson static struct callout_cpu * 5188d809d50SJeff Roberson callout_lock(struct callout *c) 5198d809d50SJeff Roberson { 5208d809d50SJeff Roberson struct callout_cpu *cc; 5218d809d50SJeff Roberson int cpu; 5228d809d50SJeff Roberson 5238d809d50SJeff Roberson for (;;) { 5248d809d50SJeff Roberson cpu = c->c_cpu; 5251283e9cdSAttilio Rao #ifdef SMP 5261283e9cdSAttilio Rao if (cpu == CPUBLOCK) { 5271283e9cdSAttilio Rao while (c->c_cpu == CPUBLOCK) 5281283e9cdSAttilio Rao cpu_spinwait(); 5291283e9cdSAttilio Rao continue; 5301283e9cdSAttilio Rao } 5311283e9cdSAttilio Rao #endif 5328d809d50SJeff Roberson cc = CC_CPU(cpu); 5338d809d50SJeff Roberson CC_LOCK(cc); 5348d809d50SJeff Roberson if (cpu == c->c_cpu) 5358d809d50SJeff Roberson break; 5368d809d50SJeff Roberson CC_UNLOCK(cc); 5378d809d50SJeff Roberson } 5388d809d50SJeff Roberson return (cc); 539219d632cSMatthew Dillon } 540219d632cSMatthew Dillon 5411283e9cdSAttilio Rao static void 5425b999a6bSDavide Italiano callout_cc_add(struct callout *c, struct callout_cpu *cc, 5435b999a6bSDavide Italiano sbintime_t sbt, sbintime_t precision, void (*func)(void *), 5445b999a6bSDavide Italiano void *arg, int cpu, int flags) 5451283e9cdSAttilio Rao { 5465b999a6bSDavide Italiano int bucket; 5471283e9cdSAttilio Rao 5481283e9cdSAttilio Rao CC_LOCK_ASSERT(cc); 5495b999a6bSDavide Italiano if (sbt < cc->cc_lastscan) 5505b999a6bSDavide Italiano sbt = cc->cc_lastscan; 5511283e9cdSAttilio Rao c->c_arg = arg; 5521283e9cdSAttilio Rao c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 5535b999a6bSDavide Italiano if (flags & C_DIRECT_EXEC) 5545b999a6bSDavide Italiano c->c_flags |= CALLOUT_DIRECT; 5555b999a6bSDavide Italiano c->c_flags &= ~CALLOUT_PROCESSED; 5561283e9cdSAttilio Rao c->c_func = func; 5575b999a6bSDavide Italiano c->c_time = sbt; 5585b999a6bSDavide Italiano c->c_precision = precision; 5595b999a6bSDavide Italiano bucket = callout_get_bucket(c->c_time); 5605b999a6bSDavide Italiano CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x", 5615b999a6bSDavide Italiano c, (int)(c->c_precision >> 32), 5625b999a6bSDavide Italiano (u_int)(c->c_precision & 0xffffffff)); 5635b999a6bSDavide Italiano LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le); 5645b999a6bSDavide Italiano if (cc->cc_bucket == bucket) 5655b999a6bSDavide Italiano cc->cc_exec_next_dir = c; 5665b999a6bSDavide Italiano #ifndef NO_EVENTTIMERS 5675b999a6bSDavide Italiano /* 5685b999a6bSDavide Italiano * Inform the eventtimers(4) subsystem there's a new callout 5695b999a6bSDavide Italiano * that has been inserted, but only if really required. 5705b999a6bSDavide Italiano */ 5715b999a6bSDavide Italiano sbt = c->c_time + c->c_precision; 5725b999a6bSDavide Italiano if (sbt < cc->cc_firstevent) { 5735b999a6bSDavide Italiano cc->cc_firstevent = sbt; 5745b999a6bSDavide Italiano cpu_new_callout(cpu, sbt, c->c_time); 5751283e9cdSAttilio Rao } 5765b999a6bSDavide Italiano #endif 5771283e9cdSAttilio Rao } 5781283e9cdSAttilio Rao 5796098e7acSKonstantin Belousov static void 5806098e7acSKonstantin Belousov callout_cc_del(struct callout *c, struct callout_cpu *cc) 5816098e7acSKonstantin Belousov { 5826098e7acSKonstantin Belousov 583eb8a7186SKonstantin Belousov if ((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0) 584eb8a7186SKonstantin Belousov return; 5856098e7acSKonstantin Belousov c->c_func = NULL; 5866098e7acSKonstantin Belousov SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 5876098e7acSKonstantin Belousov } 5886098e7acSKonstantin Belousov 589eb8a7186SKonstantin Belousov static void 5905b999a6bSDavide Italiano softclock_call_cc(struct callout *c, struct callout_cpu *cc, 5915b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 5925b999a6bSDavide Italiano int *mpcalls, int *lockcalls, int *gcalls, 5935b999a6bSDavide Italiano #endif 5945b999a6bSDavide Italiano int direct) 5956098e7acSKonstantin Belousov { 5966098e7acSKonstantin Belousov void (*c_func)(void *); 5976098e7acSKonstantin Belousov void *c_arg; 5986098e7acSKonstantin Belousov struct lock_class *class; 5996098e7acSKonstantin Belousov struct lock_object *c_lock; 6006098e7acSKonstantin Belousov int c_flags, sharedlock; 6016098e7acSKonstantin Belousov #ifdef SMP 6026098e7acSKonstantin Belousov struct callout_cpu *new_cc; 6036098e7acSKonstantin Belousov void (*new_func)(void *); 6046098e7acSKonstantin Belousov void *new_arg; 6055b999a6bSDavide Italiano int flags, new_cpu; 6065b999a6bSDavide Italiano sbintime_t new_time; 6076098e7acSKonstantin Belousov #endif 6085b999a6bSDavide Italiano #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 60903763781SDavide Italiano sbintime_t sbt1, sbt2; 6106098e7acSKonstantin Belousov struct timespec ts2; 6115b999a6bSDavide Italiano static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */ 6126098e7acSKonstantin Belousov static timeout_t *lastfunc; 6136098e7acSKonstantin Belousov #endif 6146098e7acSKonstantin Belousov 615eb8a7186SKonstantin Belousov KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) == 616eb8a7186SKonstantin Belousov (CALLOUT_PENDING | CALLOUT_ACTIVE), 617eb8a7186SKonstantin Belousov ("softclock_call_cc: pend|act %p %x", c, c->c_flags)); 6186098e7acSKonstantin Belousov class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; 6196098e7acSKonstantin Belousov sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1; 6206098e7acSKonstantin Belousov c_lock = c->c_lock; 6216098e7acSKonstantin Belousov c_func = c->c_func; 6226098e7acSKonstantin Belousov c_arg = c->c_arg; 6236098e7acSKonstantin Belousov c_flags = c->c_flags; 6246098e7acSKonstantin Belousov if (c->c_flags & CALLOUT_LOCAL_ALLOC) 6256098e7acSKonstantin Belousov c->c_flags = CALLOUT_LOCAL_ALLOC; 6266098e7acSKonstantin Belousov else 6276098e7acSKonstantin Belousov c->c_flags &= ~CALLOUT_PENDING; 6285b999a6bSDavide Italiano cc->cc_exec_entity[direct].cc_curr = c; 629ac42a172SDavide Italiano cc->cc_exec_entity[direct].cc_cancel = false; 6306098e7acSKonstantin Belousov CC_UNLOCK(cc); 6316098e7acSKonstantin Belousov if (c_lock != NULL) { 6326098e7acSKonstantin Belousov class->lc_lock(c_lock, sharedlock); 6336098e7acSKonstantin Belousov /* 6346098e7acSKonstantin Belousov * The callout may have been cancelled 6356098e7acSKonstantin Belousov * while we switched locks. 6366098e7acSKonstantin Belousov */ 6375b999a6bSDavide Italiano if (cc->cc_exec_entity[direct].cc_cancel) { 6386098e7acSKonstantin Belousov class->lc_unlock(c_lock); 6396098e7acSKonstantin Belousov goto skip; 6406098e7acSKonstantin Belousov } 6416098e7acSKonstantin Belousov /* The callout cannot be stopped now. */ 642ac42a172SDavide Italiano cc->cc_exec_entity[direct].cc_cancel = true; 6436098e7acSKonstantin Belousov if (c_lock == &Giant.lock_object) { 6445b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 6456098e7acSKonstantin Belousov (*gcalls)++; 6465b999a6bSDavide Italiano #endif 6475b999a6bSDavide Italiano CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p", 6486098e7acSKonstantin Belousov c, c_func, c_arg); 6496098e7acSKonstantin Belousov } else { 6505b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 6516098e7acSKonstantin Belousov (*lockcalls)++; 6525b999a6bSDavide Italiano #endif 6536098e7acSKonstantin Belousov CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p", 6546098e7acSKonstantin Belousov c, c_func, c_arg); 6556098e7acSKonstantin Belousov } 6566098e7acSKonstantin Belousov } else { 6575b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 6586098e7acSKonstantin Belousov (*mpcalls)++; 6595b999a6bSDavide Italiano #endif 6605b999a6bSDavide Italiano CTR3(KTR_CALLOUT, "callout %p func %p arg %p", 6616098e7acSKonstantin Belousov c, c_func, c_arg); 6626098e7acSKonstantin Belousov } 66303763781SDavide Italiano #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 6645b999a6bSDavide Italiano sbt1 = sbinuptime(); 6656098e7acSKonstantin Belousov #endif 6666098e7acSKonstantin Belousov THREAD_NO_SLEEPING(); 6676098e7acSKonstantin Belousov SDT_PROBE(callout_execute, kernel, , callout_start, c, 0, 0, 0, 0); 6686098e7acSKonstantin Belousov c_func(c_arg); 6696098e7acSKonstantin Belousov SDT_PROBE(callout_execute, kernel, , callout_end, c, 0, 0, 0, 0); 6706098e7acSKonstantin Belousov THREAD_SLEEPING_OK(); 67103763781SDavide Italiano #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 67203763781SDavide Italiano sbt2 = sbinuptime(); 67303763781SDavide Italiano sbt2 -= sbt1; 67403763781SDavide Italiano if (sbt2 > maxdt) { 67503763781SDavide Italiano if (lastfunc != c_func || sbt2 > maxdt * 2) { 67603763781SDavide Italiano ts2 = sbttots(sbt2); 6776098e7acSKonstantin Belousov printf( 6786098e7acSKonstantin Belousov "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 6796098e7acSKonstantin Belousov c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); 6806098e7acSKonstantin Belousov } 68103763781SDavide Italiano maxdt = sbt2; 6826098e7acSKonstantin Belousov lastfunc = c_func; 6836098e7acSKonstantin Belousov } 6846098e7acSKonstantin Belousov #endif 6856098e7acSKonstantin Belousov CTR1(KTR_CALLOUT, "callout %p finished", c); 6866098e7acSKonstantin Belousov if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 6876098e7acSKonstantin Belousov class->lc_unlock(c_lock); 6886098e7acSKonstantin Belousov skip: 6896098e7acSKonstantin Belousov CC_LOCK(cc); 6905b999a6bSDavide Italiano KASSERT(cc->cc_exec_entity[direct].cc_curr == c, ("mishandled cc_curr")); 6915b999a6bSDavide Italiano cc->cc_exec_entity[direct].cc_curr = NULL; 6925b999a6bSDavide Italiano if (cc->cc_exec_entity[direct].cc_waiting) { 6936098e7acSKonstantin Belousov /* 6946098e7acSKonstantin Belousov * There is someone waiting for the 6956098e7acSKonstantin Belousov * callout to complete. 6966098e7acSKonstantin Belousov * If the callout was scheduled for 6976098e7acSKonstantin Belousov * migration just cancel it. 6986098e7acSKonstantin Belousov */ 6995b999a6bSDavide Italiano if (cc_cce_migrating(cc, direct)) { 7005b999a6bSDavide Italiano cc_cce_cleanup(cc, direct); 701bdf9120cSAttilio Rao 702bdf9120cSAttilio Rao /* 703bdf9120cSAttilio Rao * It should be assert here that the callout is not 704bdf9120cSAttilio Rao * destroyed but that is not easy. 705bdf9120cSAttilio Rao */ 706eb8a7186SKonstantin Belousov c->c_flags &= ~CALLOUT_DFRMIGRATION; 707eb8a7186SKonstantin Belousov } 708ac42a172SDavide Italiano cc->cc_exec_entity[direct].cc_waiting = false; 7096098e7acSKonstantin Belousov CC_UNLOCK(cc); 7105b999a6bSDavide Italiano wakeup(&cc->cc_exec_entity[direct].cc_waiting); 7116098e7acSKonstantin Belousov CC_LOCK(cc); 7125b999a6bSDavide Italiano } else if (cc_cce_migrating(cc, direct)) { 713bdf9120cSAttilio Rao KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0, 714eb8a7186SKonstantin Belousov ("Migrating legacy callout %p", c)); 7156098e7acSKonstantin Belousov #ifdef SMP 7166098e7acSKonstantin Belousov /* 7176098e7acSKonstantin Belousov * If the callout was scheduled for 7186098e7acSKonstantin Belousov * migration just perform it now. 7196098e7acSKonstantin Belousov */ 7205b999a6bSDavide Italiano new_cpu = cc->cc_exec_entity[direct].ce_migration_cpu; 7215b999a6bSDavide Italiano new_time = cc->cc_exec_entity[direct].ce_migration_time; 7225b999a6bSDavide Italiano new_func = cc->cc_exec_entity[direct].ce_migration_func; 7235b999a6bSDavide Italiano new_arg = cc->cc_exec_entity[direct].ce_migration_arg; 7245b999a6bSDavide Italiano cc_cce_cleanup(cc, direct); 7256098e7acSKonstantin Belousov 7266098e7acSKonstantin Belousov /* 727bdf9120cSAttilio Rao * It should be assert here that the callout is not destroyed 728bdf9120cSAttilio Rao * but that is not easy. 729bdf9120cSAttilio Rao * 730bdf9120cSAttilio Rao * As first thing, handle deferred callout stops. 7316098e7acSKonstantin Belousov */ 7326098e7acSKonstantin Belousov if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) { 7336098e7acSKonstantin Belousov CTR3(KTR_CALLOUT, 7346098e7acSKonstantin Belousov "deferred cancelled %p func %p arg %p", 7356098e7acSKonstantin Belousov c, new_func, new_arg); 7366098e7acSKonstantin Belousov callout_cc_del(c, cc); 737eb8a7186SKonstantin Belousov return; 7386098e7acSKonstantin Belousov } 7396098e7acSKonstantin Belousov c->c_flags &= ~CALLOUT_DFRMIGRATION; 7406098e7acSKonstantin Belousov 7416098e7acSKonstantin Belousov new_cc = callout_cpu_switch(c, cc, new_cpu); 7425b999a6bSDavide Italiano flags = (direct) ? C_DIRECT_EXEC : 0; 7435b999a6bSDavide Italiano callout_cc_add(c, new_cc, new_time, c->c_precision, new_func, 7445b999a6bSDavide Italiano new_arg, new_cpu, flags); 7456098e7acSKonstantin Belousov CC_UNLOCK(new_cc); 7466098e7acSKonstantin Belousov CC_LOCK(cc); 7476098e7acSKonstantin Belousov #else 7486098e7acSKonstantin Belousov panic("migration should not happen"); 7496098e7acSKonstantin Belousov #endif 7506098e7acSKonstantin Belousov } 751eb8a7186SKonstantin Belousov /* 752eb8a7186SKonstantin Belousov * If the current callout is locally allocated (from 753eb8a7186SKonstantin Belousov * timeout(9)) then put it on the freelist. 754eb8a7186SKonstantin Belousov * 755eb8a7186SKonstantin Belousov * Note: we need to check the cached copy of c_flags because 756eb8a7186SKonstantin Belousov * if it was not local, then it's not safe to deref the 757eb8a7186SKonstantin Belousov * callout pointer. 758eb8a7186SKonstantin Belousov */ 759eb8a7186SKonstantin Belousov KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 || 760eb8a7186SKonstantin Belousov c->c_flags == CALLOUT_LOCAL_ALLOC, 761eb8a7186SKonstantin Belousov ("corrupted callout")); 762bdf9120cSAttilio Rao if (c_flags & CALLOUT_LOCAL_ALLOC) 763eb8a7186SKonstantin Belousov callout_cc_del(c, cc); 7646098e7acSKonstantin Belousov } 7656098e7acSKonstantin Belousov 766219d632cSMatthew Dillon /* 767ab36c067SJustin T. Gibbs * The callout mechanism is based on the work of Adam M. Costello and 768ab36c067SJustin T. Gibbs * George Varghese, published in a technical report entitled "Redesigning 769ab36c067SJustin T. Gibbs * the BSD Callout and Timer Facilities" and modified slightly for inclusion 770ab36c067SJustin T. Gibbs * in FreeBSD by Justin T. Gibbs. The original work on the data structures 771024035e8SHiten Pandya * used in this implementation was published by G. Varghese and T. Lauck in 772ab36c067SJustin T. Gibbs * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 773ab36c067SJustin T. Gibbs * the Efficient Implementation of a Timer Facility" in the Proceedings of 774ab36c067SJustin T. Gibbs * the 11th ACM Annual Symposium on Operating Systems Principles, 775ab36c067SJustin T. Gibbs * Austin, Texas Nov 1987. 776ab36c067SJustin T. Gibbs */ 777a50ec505SPoul-Henning Kamp 778ab36c067SJustin T. Gibbs /* 779df8bae1dSRodney W. Grimes * Software (low priority) clock interrupt. 780df8bae1dSRodney W. Grimes * Run periodic events from timeout queue. 781df8bae1dSRodney W. Grimes */ 782df8bae1dSRodney W. Grimes void 7838d809d50SJeff Roberson softclock(void *arg) 784df8bae1dSRodney W. Grimes { 7858d809d50SJeff Roberson struct callout_cpu *cc; 786b336df68SPoul-Henning Kamp struct callout *c; 7875b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 7885b999a6bSDavide Italiano int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0; 7895b999a6bSDavide Italiano #endif 790df8bae1dSRodney W. Grimes 7918d809d50SJeff Roberson cc = (struct callout_cpu *)arg; 7928d809d50SJeff Roberson CC_LOCK(cc); 7935b999a6bSDavide Italiano while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) { 7945b999a6bSDavide Italiano TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 7955b999a6bSDavide Italiano softclock_call_cc(c, cc, 7965b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 7975b999a6bSDavide Italiano &mpcalls, &lockcalls, &gcalls, 7985b999a6bSDavide Italiano #endif 7995b999a6bSDavide Italiano 0); 8005b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 8015b999a6bSDavide Italiano ++depth; 8025b999a6bSDavide Italiano #endif 803df8bae1dSRodney W. Grimes } 8045b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 80522ee8c4fSPoul-Henning Kamp avg_depth += (depth * 1000 - avg_depth) >> 8; 80622ee8c4fSPoul-Henning Kamp avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 80764b9ee20SAttilio Rao avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 80822ee8c4fSPoul-Henning Kamp avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 8095b999a6bSDavide Italiano #endif 8108d809d50SJeff Roberson CC_UNLOCK(cc); 811df8bae1dSRodney W. Grimes } 812df8bae1dSRodney W. Grimes 813df8bae1dSRodney W. Grimes /* 814df8bae1dSRodney W. Grimes * timeout -- 815df8bae1dSRodney W. Grimes * Execute a function after a specified length of time. 816df8bae1dSRodney W. Grimes * 817df8bae1dSRodney W. Grimes * untimeout -- 818df8bae1dSRodney W. Grimes * Cancel previous timeout function call. 819df8bae1dSRodney W. Grimes * 820ab36c067SJustin T. Gibbs * callout_handle_init -- 821ab36c067SJustin T. Gibbs * Initialize a handle so that using it with untimeout is benign. 822ab36c067SJustin T. Gibbs * 823df8bae1dSRodney W. Grimes * See AT&T BCI Driver Reference Manual for specification. This 824ab36c067SJustin T. Gibbs * implementation differs from that one in that although an 825ab36c067SJustin T. Gibbs * identification value is returned from timeout, the original 826ab36c067SJustin T. Gibbs * arguments to timeout as well as the identifier are used to 827ab36c067SJustin T. Gibbs * identify entries for untimeout. 828df8bae1dSRodney W. Grimes */ 829ab36c067SJustin T. Gibbs struct callout_handle 830ab36c067SJustin T. Gibbs timeout(ftn, arg, to_ticks) 8318f03c6f1SBruce Evans timeout_t *ftn; 832df8bae1dSRodney W. Grimes void *arg; 833e82ac18eSJonathan Lemon int to_ticks; 834df8bae1dSRodney W. Grimes { 8358d809d50SJeff Roberson struct callout_cpu *cc; 836ab36c067SJustin T. Gibbs struct callout *new; 837ab36c067SJustin T. Gibbs struct callout_handle handle; 838df8bae1dSRodney W. Grimes 8398d809d50SJeff Roberson cc = CC_CPU(timeout_cpu); 8408d809d50SJeff Roberson CC_LOCK(cc); 841df8bae1dSRodney W. Grimes /* Fill in the next free callout structure. */ 8428d809d50SJeff Roberson new = SLIST_FIRST(&cc->cc_callfree); 843ab36c067SJustin T. Gibbs if (new == NULL) 844ab36c067SJustin T. Gibbs /* XXX Attempt to malloc first */ 845df8bae1dSRodney W. Grimes panic("timeout table full"); 8468d809d50SJeff Roberson SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); 847acc8326dSGarrett Wollman callout_reset(new, to_ticks, ftn, arg); 848ab36c067SJustin T. Gibbs handle.callout = new; 8498d809d50SJeff Roberson CC_UNLOCK(cc); 8508d809d50SJeff Roberson 851ab36c067SJustin T. Gibbs return (handle); 852df8bae1dSRodney W. Grimes } 853df8bae1dSRodney W. Grimes 854df8bae1dSRodney W. Grimes void 855ab36c067SJustin T. Gibbs untimeout(ftn, arg, handle) 8568f03c6f1SBruce Evans timeout_t *ftn; 857df8bae1dSRodney W. Grimes void *arg; 858ab36c067SJustin T. Gibbs struct callout_handle handle; 859df8bae1dSRodney W. Grimes { 8608d809d50SJeff Roberson struct callout_cpu *cc; 861df8bae1dSRodney W. Grimes 862ab36c067SJustin T. Gibbs /* 863ab36c067SJustin T. Gibbs * Check for a handle that was initialized 864ab36c067SJustin T. Gibbs * by callout_handle_init, but never used 865ab36c067SJustin T. Gibbs * for a real timeout. 866ab36c067SJustin T. Gibbs */ 867ab36c067SJustin T. Gibbs if (handle.callout == NULL) 868ab36c067SJustin T. Gibbs return; 869df8bae1dSRodney W. Grimes 8708d809d50SJeff Roberson cc = callout_lock(handle.callout); 871acc8326dSGarrett Wollman if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 872acc8326dSGarrett Wollman callout_stop(handle.callout); 8738d809d50SJeff Roberson CC_UNLOCK(cc); 874df8bae1dSRodney W. Grimes } 875df8bae1dSRodney W. Grimes 8763c816944SBruce Evans void 877ab36c067SJustin T. Gibbs callout_handle_init(struct callout_handle *handle) 878ab36c067SJustin T. Gibbs { 879ab36c067SJustin T. Gibbs handle->callout = NULL; 880ab36c067SJustin T. Gibbs } 881ab36c067SJustin T. Gibbs 882acc8326dSGarrett Wollman /* 883acc8326dSGarrett Wollman * New interface; clients allocate their own callout structures. 884acc8326dSGarrett Wollman * 885acc8326dSGarrett Wollman * callout_reset() - establish or change a timeout 886acc8326dSGarrett Wollman * callout_stop() - disestablish a timeout 887acc8326dSGarrett Wollman * callout_init() - initialize a callout structure so that it can 888acc8326dSGarrett Wollman * safely be passed to callout_reset() and callout_stop() 889acc8326dSGarrett Wollman * 8909b8b58e0SJonathan Lemon * <sys/callout.h> defines three convenience macros: 891acc8326dSGarrett Wollman * 89286fd19deSColin Percival * callout_active() - returns truth if callout has not been stopped, 89386fd19deSColin Percival * drained, or deactivated since the last time the callout was 89486fd19deSColin Percival * reset. 8959b8b58e0SJonathan Lemon * callout_pending() - returns truth if callout is still waiting for timeout 8969b8b58e0SJonathan Lemon * callout_deactivate() - marks the callout as having been serviced 897acc8326dSGarrett Wollman */ 898d04304d1SGleb Smirnoff int 8995b999a6bSDavide Italiano callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision, 9005b999a6bSDavide Italiano void (*ftn)(void *), void *arg, int cpu, int flags) 901acc8326dSGarrett Wollman { 9025b999a6bSDavide Italiano sbintime_t to_sbt, pr; 9038d809d50SJeff Roberson struct callout_cpu *cc; 9045b999a6bSDavide Italiano int cancelled, direct; 905acc8326dSGarrett Wollman 9065b999a6bSDavide Italiano cancelled = 0; 9075b999a6bSDavide Italiano if (flags & C_ABSOLUTE) { 9085b999a6bSDavide Italiano to_sbt = sbt; 9095b999a6bSDavide Italiano } else { 9105b999a6bSDavide Italiano if ((flags & C_HARDCLOCK) && (sbt < tick_sbt)) 9115b999a6bSDavide Italiano sbt = tick_sbt; 9125b999a6bSDavide Italiano if ((flags & C_HARDCLOCK) || 9135b999a6bSDavide Italiano #ifdef NO_EVENTTIMERS 9145b999a6bSDavide Italiano sbt >= sbt_timethreshold) { 9155b999a6bSDavide Italiano to_sbt = getsbinuptime(); 9165b999a6bSDavide Italiano 9175b999a6bSDavide Italiano /* Add safety belt for the case of hz > 1000. */ 9185b999a6bSDavide Italiano to_sbt += tc_tick_sbt - tick_sbt; 9195b999a6bSDavide Italiano #else 9205b999a6bSDavide Italiano sbt >= sbt_tickthreshold) { 9215b999a6bSDavide Italiano /* 9225b999a6bSDavide Italiano * Obtain the time of the last hardclock() call on 9235b999a6bSDavide Italiano * this CPU directly from the kern_clocksource.c. 9245b999a6bSDavide Italiano * This value is per-CPU, but it is equal for all 9255b999a6bSDavide Italiano * active ones. 9265b999a6bSDavide Italiano */ 9275b999a6bSDavide Italiano #ifdef __LP64__ 9285b999a6bSDavide Italiano to_sbt = DPCPU_GET(hardclocktime); 9295b999a6bSDavide Italiano #else 9305b999a6bSDavide Italiano spinlock_enter(); 9315b999a6bSDavide Italiano to_sbt = DPCPU_GET(hardclocktime); 9325b999a6bSDavide Italiano spinlock_exit(); 9335b999a6bSDavide Italiano #endif 9345b999a6bSDavide Italiano #endif 9355b999a6bSDavide Italiano if ((flags & C_HARDCLOCK) == 0) 9365b999a6bSDavide Italiano to_sbt += tick_sbt; 9375b999a6bSDavide Italiano } else 9385b999a6bSDavide Italiano to_sbt = sbinuptime(); 9395b999a6bSDavide Italiano to_sbt += sbt; 9405b999a6bSDavide Italiano pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp : 9415b999a6bSDavide Italiano sbt >> C_PRELGET(flags)); 9425b999a6bSDavide Italiano if (pr > precision) 9435b999a6bSDavide Italiano precision = pr; 9445b999a6bSDavide Italiano } 9458d809d50SJeff Roberson /* 9468d809d50SJeff Roberson * Don't allow migration of pre-allocated callouts lest they 9478d809d50SJeff Roberson * become unbalanced. 9488d809d50SJeff Roberson */ 9498d809d50SJeff Roberson if (c->c_flags & CALLOUT_LOCAL_ALLOC) 9508d809d50SJeff Roberson cpu = c->c_cpu; 9515b999a6bSDavide Italiano direct = (c->c_flags & CALLOUT_DIRECT) != 0; 9525b999a6bSDavide Italiano KASSERT(!direct || c->c_lock == NULL, 9535b999a6bSDavide Italiano ("%s: direct callout %p has lock", __func__, c)); 9548d809d50SJeff Roberson cc = callout_lock(c); 9555b999a6bSDavide Italiano if (cc->cc_exec_entity[direct].cc_curr == c) { 9562c1bb207SColin Percival /* 9572c1bb207SColin Percival * We're being asked to reschedule a callout which is 95864b9ee20SAttilio Rao * currently in progress. If there is a lock then we 95998c926b2SIan Dowse * can cancel the callout if it has not really started. 96098c926b2SIan Dowse */ 9615b999a6bSDavide Italiano if (c->c_lock != NULL && !cc->cc_exec_entity[direct].cc_cancel) 962ac42a172SDavide Italiano cancelled = cc->cc_exec_entity[direct].cc_cancel = true; 9635b999a6bSDavide Italiano if (cc->cc_exec_entity[direct].cc_waiting) { 96498c926b2SIan Dowse /* 96598c926b2SIan Dowse * Someone has called callout_drain to kill this 96698c926b2SIan Dowse * callout. Don't reschedule. 9672c1bb207SColin Percival */ 96868a57ebfSGleb Smirnoff CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 96968a57ebfSGleb Smirnoff cancelled ? "cancelled" : "failed to cancel", 97068a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 9718d809d50SJeff Roberson CC_UNLOCK(cc); 972d04304d1SGleb Smirnoff return (cancelled); 97349a74476SColin Percival } 97498c926b2SIan Dowse } 9750413bacdSColin Percival if (c->c_flags & CALLOUT_PENDING) { 9765b999a6bSDavide Italiano if ((c->c_flags & CALLOUT_PROCESSED) == 0) { 9775b999a6bSDavide Italiano if (cc->cc_exec_next_dir == c) 9785b999a6bSDavide Italiano cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le); 9795b999a6bSDavide Italiano LIST_REMOVE(c, c_links.le); 9805b999a6bSDavide Italiano } else 9815b999a6bSDavide Italiano TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 982d04304d1SGleb Smirnoff cancelled = 1; 9838d809d50SJeff Roberson c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 9848d809d50SJeff Roberson } 9851283e9cdSAttilio Rao 9861283e9cdSAttilio Rao #ifdef SMP 9870413bacdSColin Percival /* 9881283e9cdSAttilio Rao * If the callout must migrate try to perform it immediately. 9891283e9cdSAttilio Rao * If the callout is currently running, just defer the migration 9901283e9cdSAttilio Rao * to a more appropriate moment. 9910413bacdSColin Percival */ 9928d809d50SJeff Roberson if (c->c_cpu != cpu) { 9935b999a6bSDavide Italiano if (cc->cc_exec_entity[direct].cc_curr == c) { 9945b999a6bSDavide Italiano cc->cc_exec_entity[direct].ce_migration_cpu = cpu; 9955b999a6bSDavide Italiano cc->cc_exec_entity[direct].ce_migration_time 9965b999a6bSDavide Italiano = to_sbt; 9975b999a6bSDavide Italiano cc->cc_exec_entity[direct].ce_migration_func = ftn; 9985b999a6bSDavide Italiano cc->cc_exec_entity[direct].ce_migration_arg = arg; 99957d07ca9SKonstantin Belousov c->c_flags |= CALLOUT_DFRMIGRATION; 10005b999a6bSDavide Italiano CTR6(KTR_CALLOUT, 10015b999a6bSDavide Italiano "migration of %p func %p arg %p in %d.%08x to %u deferred", 10025b999a6bSDavide Italiano c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 10035b999a6bSDavide Italiano (u_int)(to_sbt & 0xffffffff), cpu); 100408e4ac8aSAttilio Rao CC_UNLOCK(cc); 10051283e9cdSAttilio Rao return (cancelled); 1006a157e425SAlexander Motin } 10071283e9cdSAttilio Rao cc = callout_cpu_switch(c, cc, cpu); 100808e4ac8aSAttilio Rao } 10091283e9cdSAttilio Rao #endif 10101283e9cdSAttilio Rao 10115b999a6bSDavide Italiano callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags); 10125b999a6bSDavide Italiano CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x", 10135b999a6bSDavide Italiano cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 10145b999a6bSDavide Italiano (u_int)(to_sbt & 0xffffffff)); 10158d809d50SJeff Roberson CC_UNLOCK(cc); 1016d04304d1SGleb Smirnoff 1017d04304d1SGleb Smirnoff return (cancelled); 1018acc8326dSGarrett Wollman } 1019acc8326dSGarrett Wollman 10206e0186d5SSam Leffler /* 10216e0186d5SSam Leffler * Common idioms that can be optimized in the future. 10226e0186d5SSam Leffler */ 10236e0186d5SSam Leffler int 10246e0186d5SSam Leffler callout_schedule_on(struct callout *c, int to_ticks, int cpu) 10256e0186d5SSam Leffler { 10266e0186d5SSam Leffler return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 10276e0186d5SSam Leffler } 10286e0186d5SSam Leffler 10296e0186d5SSam Leffler int 10306e0186d5SSam Leffler callout_schedule(struct callout *c, int to_ticks) 10316e0186d5SSam Leffler { 10326e0186d5SSam Leffler return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 10336e0186d5SSam Leffler } 10346e0186d5SSam Leffler 10352c1bb207SColin Percival int 10362c1bb207SColin Percival _callout_stop_safe(c, safe) 10372c1bb207SColin Percival struct callout *c; 10382c1bb207SColin Percival int safe; 10392c1bb207SColin Percival { 10401283e9cdSAttilio Rao struct callout_cpu *cc, *old_cc; 104164b9ee20SAttilio Rao struct lock_class *class; 10425b999a6bSDavide Italiano int direct, sq_locked, use_lock; 104398c926b2SIan Dowse 104464b9ee20SAttilio Rao /* 104564b9ee20SAttilio Rao * Some old subsystems don't hold Giant while running a callout_stop(), 104664b9ee20SAttilio Rao * so just discard this check for the moment. 104764b9ee20SAttilio Rao */ 104864b9ee20SAttilio Rao if (!safe && c->c_lock != NULL) { 104964b9ee20SAttilio Rao if (c->c_lock == &Giant.lock_object) 105064b9ee20SAttilio Rao use_lock = mtx_owned(&Giant); 105164b9ee20SAttilio Rao else { 105264b9ee20SAttilio Rao use_lock = 1; 105364b9ee20SAttilio Rao class = LOCK_CLASS(c->c_lock); 105464b9ee20SAttilio Rao class->lc_assert(c->c_lock, LA_XLOCKED); 105598c926b2SIan Dowse } 105664b9ee20SAttilio Rao } else 105764b9ee20SAttilio Rao use_lock = 0; 10585b999a6bSDavide Italiano direct = (c->c_flags & CALLOUT_DIRECT) != 0; 105967b158d8SJohn Baldwin sq_locked = 0; 10601283e9cdSAttilio Rao old_cc = NULL; 106167b158d8SJohn Baldwin again: 10628d809d50SJeff Roberson cc = callout_lock(c); 10631283e9cdSAttilio Rao 10641283e9cdSAttilio Rao /* 10651283e9cdSAttilio Rao * If the callout was migrating while the callout cpu lock was 10661283e9cdSAttilio Rao * dropped, just drop the sleepqueue lock and check the states 10671283e9cdSAttilio Rao * again. 10681283e9cdSAttilio Rao */ 10691283e9cdSAttilio Rao if (sq_locked != 0 && cc != old_cc) { 10701283e9cdSAttilio Rao #ifdef SMP 10711283e9cdSAttilio Rao CC_UNLOCK(cc); 10725b999a6bSDavide Italiano sleepq_release(&old_cc->cc_exec_entity[direct].cc_waiting); 10731283e9cdSAttilio Rao sq_locked = 0; 10741283e9cdSAttilio Rao old_cc = NULL; 10751283e9cdSAttilio Rao goto again; 10761283e9cdSAttilio Rao #else 10771283e9cdSAttilio Rao panic("migration should not happen"); 10781283e9cdSAttilio Rao #endif 10791283e9cdSAttilio Rao } 10801283e9cdSAttilio Rao 1081acc8326dSGarrett Wollman /* 1082b36f4588SJohn Baldwin * If the callout isn't pending, it's not on the queue, so 1083b36f4588SJohn Baldwin * don't attempt to remove it from the queue. We can try to 1084b36f4588SJohn Baldwin * stop it by other means however. 1085acc8326dSGarrett Wollman */ 1086acc8326dSGarrett Wollman if (!(c->c_flags & CALLOUT_PENDING)) { 10879b8b58e0SJonathan Lemon c->c_flags &= ~CALLOUT_ACTIVE; 1088b36f4588SJohn Baldwin 1089b36f4588SJohn Baldwin /* 1090b36f4588SJohn Baldwin * If it wasn't on the queue and it isn't the current 1091b36f4588SJohn Baldwin * callout, then we can't stop it, so just bail. 1092b36f4588SJohn Baldwin */ 10935b999a6bSDavide Italiano if (cc->cc_exec_entity[direct].cc_curr != c) { 109468a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 109568a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 10968d809d50SJeff Roberson CC_UNLOCK(cc); 109767b158d8SJohn Baldwin if (sq_locked) 10985b999a6bSDavide Italiano sleepq_release( 10995b999a6bSDavide Italiano &cc->cc_exec_entity[direct].cc_waiting); 110098c926b2SIan Dowse return (0); 110198c926b2SIan Dowse } 1102b36f4588SJohn Baldwin 110398c926b2SIan Dowse if (safe) { 11042c1bb207SColin Percival /* 1105b36f4588SJohn Baldwin * The current callout is running (or just 1106b36f4588SJohn Baldwin * about to run) and blocking is allowed, so 1107b36f4588SJohn Baldwin * just wait for the current invocation to 1108b36f4588SJohn Baldwin * finish. 11092c1bb207SColin Percival */ 11105b999a6bSDavide Italiano while (cc->cc_exec_entity[direct].cc_curr == c) { 11116a0ce57dSAttilio Rao /* 11126a0ce57dSAttilio Rao * Use direct calls to sleepqueue interface 11136a0ce57dSAttilio Rao * instead of cv/msleep in order to avoid 11148d809d50SJeff Roberson * a LOR between cc_lock and sleepqueue 11156a0ce57dSAttilio Rao * chain spinlocks. This piece of code 11166a0ce57dSAttilio Rao * emulates a msleep_spin() call actually. 111767b158d8SJohn Baldwin * 111867b158d8SJohn Baldwin * If we already have the sleepqueue chain 111967b158d8SJohn Baldwin * locked, then we can safely block. If we 112067b158d8SJohn Baldwin * don't already have it locked, however, 11218d809d50SJeff Roberson * we have to drop the cc_lock to lock 112267b158d8SJohn Baldwin * it. This opens several races, so we 112367b158d8SJohn Baldwin * restart at the beginning once we have 112467b158d8SJohn Baldwin * both locks. If nothing has changed, then 112567b158d8SJohn Baldwin * we will end up back here with sq_locked 112667b158d8SJohn Baldwin * set. 11276a0ce57dSAttilio Rao */ 112867b158d8SJohn Baldwin if (!sq_locked) { 11298d809d50SJeff Roberson CC_UNLOCK(cc); 11305b999a6bSDavide Italiano sleepq_lock( 11315b999a6bSDavide Italiano &cc->cc_exec_entity[direct].cc_waiting); 113267b158d8SJohn Baldwin sq_locked = 1; 11331283e9cdSAttilio Rao old_cc = cc; 113467b158d8SJohn Baldwin goto again; 11356a0ce57dSAttilio Rao } 11361283e9cdSAttilio Rao 11371283e9cdSAttilio Rao /* 11381283e9cdSAttilio Rao * Migration could be cancelled here, but 11391283e9cdSAttilio Rao * as long as it is still not sure when it 11401283e9cdSAttilio Rao * will be packed up, just let softclock() 11411283e9cdSAttilio Rao * take care of it. 11421283e9cdSAttilio Rao */ 1143ac42a172SDavide Italiano cc->cc_exec_entity[direct].cc_waiting = true; 11446a0ce57dSAttilio Rao DROP_GIANT(); 11458d809d50SJeff Roberson CC_UNLOCK(cc); 11465b999a6bSDavide Italiano sleepq_add( 11475b999a6bSDavide Italiano &cc->cc_exec_entity[direct].cc_waiting, 11488d809d50SJeff Roberson &cc->cc_lock.lock_object, "codrain", 11496a0ce57dSAttilio Rao SLEEPQ_SLEEP, 0); 11505b999a6bSDavide Italiano sleepq_wait( 11515b999a6bSDavide Italiano &cc->cc_exec_entity[direct].cc_waiting, 11525b999a6bSDavide Italiano 0); 115367b158d8SJohn Baldwin sq_locked = 0; 11541283e9cdSAttilio Rao old_cc = NULL; 11556a0ce57dSAttilio Rao 11566a0ce57dSAttilio Rao /* Reacquire locks previously released. */ 11576a0ce57dSAttilio Rao PICKUP_GIANT(); 11588d809d50SJeff Roberson CC_LOCK(cc); 1159b36f4588SJohn Baldwin } 11605b999a6bSDavide Italiano } else if (use_lock && 11615b999a6bSDavide Italiano !cc->cc_exec_entity[direct].cc_cancel) { 1162b36f4588SJohn Baldwin /* 116364b9ee20SAttilio Rao * The current callout is waiting for its 116464b9ee20SAttilio Rao * lock which we hold. Cancel the callout 1165b36f4588SJohn Baldwin * and return. After our caller drops the 116664b9ee20SAttilio Rao * lock, the callout will be skipped in 1167b36f4588SJohn Baldwin * softclock(). 1168b36f4588SJohn Baldwin */ 1169ac42a172SDavide Italiano cc->cc_exec_entity[direct].cc_cancel = true; 117068a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 117168a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 11725b999a6bSDavide Italiano KASSERT(!cc_cce_migrating(cc, direct), 11731283e9cdSAttilio Rao ("callout wrongly scheduled for migration")); 11748d809d50SJeff Roberson CC_UNLOCK(cc); 117567b158d8SJohn Baldwin KASSERT(!sq_locked, ("sleepqueue chain locked")); 117698c926b2SIan Dowse return (1); 117757d07ca9SKonstantin Belousov } else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) { 117857d07ca9SKonstantin Belousov c->c_flags &= ~CALLOUT_DFRMIGRATION; 117957d07ca9SKonstantin Belousov CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", 118057d07ca9SKonstantin Belousov c, c->c_func, c->c_arg); 118157d07ca9SKonstantin Belousov CC_UNLOCK(cc); 118257d07ca9SKonstantin Belousov return (1); 1183b36f4588SJohn Baldwin } 118468a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 118568a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 11868d809d50SJeff Roberson CC_UNLOCK(cc); 118767b158d8SJohn Baldwin KASSERT(!sq_locked, ("sleepqueue chain still locked")); 1188a45982d2SJohn Baldwin return (0); 1189acc8326dSGarrett Wollman } 119067b158d8SJohn Baldwin if (sq_locked) 11915b999a6bSDavide Italiano sleepq_release(&cc->cc_exec_entity[direct].cc_waiting); 119267b158d8SJohn Baldwin 11939b8b58e0SJonathan Lemon c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 1194acc8326dSGarrett Wollman 119568a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 119668a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 11975b999a6bSDavide Italiano if ((c->c_flags & CALLOUT_PROCESSED) == 0) { 11985b999a6bSDavide Italiano if (cc->cc_exec_next_dir == c) 11995b999a6bSDavide Italiano cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le); 12005b999a6bSDavide Italiano LIST_REMOVE(c, c_links.le); 12015b999a6bSDavide Italiano } else 12025b999a6bSDavide Italiano TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 12036098e7acSKonstantin Belousov callout_cc_del(c, cc); 120468a57ebfSGleb Smirnoff 12058d809d50SJeff Roberson CC_UNLOCK(cc); 1206a45982d2SJohn Baldwin return (1); 1207acc8326dSGarrett Wollman } 1208acc8326dSGarrett Wollman 1209acc8326dSGarrett Wollman void 1210e82ac18eSJonathan Lemon callout_init(c, mpsafe) 1211acc8326dSGarrett Wollman struct callout *c; 1212e82ac18eSJonathan Lemon int mpsafe; 1213acc8326dSGarrett Wollman { 12147347e1c6SGarrett Wollman bzero(c, sizeof *c); 121598c926b2SIan Dowse if (mpsafe) { 121664b9ee20SAttilio Rao c->c_lock = NULL; 121798c926b2SIan Dowse c->c_flags = CALLOUT_RETURNUNLOCKED; 121898c926b2SIan Dowse } else { 121964b9ee20SAttilio Rao c->c_lock = &Giant.lock_object; 122098c926b2SIan Dowse c->c_flags = 0; 122198c926b2SIan Dowse } 12228d809d50SJeff Roberson c->c_cpu = timeout_cpu; 122398c926b2SIan Dowse } 122498c926b2SIan Dowse 122598c926b2SIan Dowse void 122664b9ee20SAttilio Rao _callout_init_lock(c, lock, flags) 122798c926b2SIan Dowse struct callout *c; 122864b9ee20SAttilio Rao struct lock_object *lock; 122998c926b2SIan Dowse int flags; 123098c926b2SIan Dowse { 123198c926b2SIan Dowse bzero(c, sizeof *c); 123264b9ee20SAttilio Rao c->c_lock = lock; 123364b9ee20SAttilio Rao KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 123464b9ee20SAttilio Rao ("callout_init_lock: bad flags %d", flags)); 123564b9ee20SAttilio Rao KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 123664b9ee20SAttilio Rao ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 123713ddf72dSAttilio Rao KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 123813ddf72dSAttilio Rao (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 123964b9ee20SAttilio Rao __func__)); 124064b9ee20SAttilio Rao c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 12418d809d50SJeff Roberson c->c_cpu = timeout_cpu; 1242acc8326dSGarrett Wollman } 1243acc8326dSGarrett Wollman 1244e1d6dc65SNate Williams #ifdef APM_FIXUP_CALLTODO 1245e1d6dc65SNate Williams /* 1246e1d6dc65SNate Williams * Adjust the kernel calltodo timeout list. This routine is used after 1247e1d6dc65SNate Williams * an APM resume to recalculate the calltodo timer list values with the 1248e1d6dc65SNate Williams * number of hz's we have been sleeping. The next hardclock() will detect 1249e1d6dc65SNate Williams * that there are fired timers and run softclock() to execute them. 1250e1d6dc65SNate Williams * 1251e1d6dc65SNate Williams * Please note, I have not done an exhaustive analysis of what code this 1252e1d6dc65SNate Williams * might break. I am motivated to have my select()'s and alarm()'s that 1253e1d6dc65SNate Williams * have expired during suspend firing upon resume so that the applications 1254e1d6dc65SNate Williams * which set the timer can do the maintanence the timer was for as close 1255e1d6dc65SNate Williams * as possible to the originally intended time. Testing this code for a 1256e1d6dc65SNate Williams * week showed that resuming from a suspend resulted in 22 to 25 timers 1257e1d6dc65SNate Williams * firing, which seemed independant on whether the suspend was 2 hours or 1258e1d6dc65SNate Williams * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 1259e1d6dc65SNate Williams */ 1260e1d6dc65SNate Williams void 1261e1d6dc65SNate Williams adjust_timeout_calltodo(time_change) 1262e1d6dc65SNate Williams struct timeval *time_change; 1263e1d6dc65SNate Williams { 1264e1d6dc65SNate Williams register struct callout *p; 1265e1d6dc65SNate Williams unsigned long delta_ticks; 1266e1d6dc65SNate Williams 1267e1d6dc65SNate Williams /* 1268e1d6dc65SNate Williams * How many ticks were we asleep? 1269c8b47828SBruce Evans * (stolen from tvtohz()). 1270e1d6dc65SNate Williams */ 1271e1d6dc65SNate Williams 1272e1d6dc65SNate Williams /* Don't do anything */ 1273e1d6dc65SNate Williams if (time_change->tv_sec < 0) 1274e1d6dc65SNate Williams return; 1275e1d6dc65SNate Williams else if (time_change->tv_sec <= LONG_MAX / 1000000) 1276e1d6dc65SNate Williams delta_ticks = (time_change->tv_sec * 1000000 + 1277e1d6dc65SNate Williams time_change->tv_usec + (tick - 1)) / tick + 1; 1278e1d6dc65SNate Williams else if (time_change->tv_sec <= LONG_MAX / hz) 1279e1d6dc65SNate Williams delta_ticks = time_change->tv_sec * hz + 1280e1d6dc65SNate Williams (time_change->tv_usec + (tick - 1)) / tick + 1; 1281e1d6dc65SNate Williams else 1282e1d6dc65SNate Williams delta_ticks = LONG_MAX; 1283e1d6dc65SNate Williams 1284e1d6dc65SNate Williams if (delta_ticks > INT_MAX) 1285e1d6dc65SNate Williams delta_ticks = INT_MAX; 1286e1d6dc65SNate Williams 1287e1d6dc65SNate Williams /* 1288e1d6dc65SNate Williams * Now rip through the timer calltodo list looking for timers 1289e1d6dc65SNate Williams * to expire. 1290e1d6dc65SNate Williams */ 1291e1d6dc65SNate Williams 1292e1d6dc65SNate Williams /* don't collide with softclock() */ 12938d809d50SJeff Roberson CC_LOCK(cc); 1294e1d6dc65SNate Williams for (p = calltodo.c_next; p != NULL; p = p->c_next) { 1295e1d6dc65SNate Williams p->c_time -= delta_ticks; 1296e1d6dc65SNate Williams 1297e1d6dc65SNate Williams /* Break if the timer had more time on it than delta_ticks */ 1298e1d6dc65SNate Williams if (p->c_time > 0) 1299e1d6dc65SNate Williams break; 1300e1d6dc65SNate Williams 1301e1d6dc65SNate Williams /* take back the ticks the timer didn't use (p->c_time <= 0) */ 1302e1d6dc65SNate Williams delta_ticks = -p->c_time; 1303e1d6dc65SNate Williams } 13048d809d50SJeff Roberson CC_UNLOCK(cc); 1305e1d6dc65SNate Williams 1306e1d6dc65SNate Williams return; 1307e1d6dc65SNate Williams } 1308e1d6dc65SNate Williams #endif /* APM_FIXUP_CALLTODO */ 13095b999a6bSDavide Italiano 13105b999a6bSDavide Italiano static int 13115b999a6bSDavide Italiano flssbt(sbintime_t sbt) 13125b999a6bSDavide Italiano { 13135b999a6bSDavide Italiano 13145b999a6bSDavide Italiano sbt += (uint64_t)sbt >> 1; 13155b999a6bSDavide Italiano if (sizeof(long) >= sizeof(sbintime_t)) 13165b999a6bSDavide Italiano return (flsl(sbt)); 13175b999a6bSDavide Italiano if (sbt >= SBT_1S) 13185b999a6bSDavide Italiano return (flsl(((uint64_t)sbt) >> 32) + 32); 13195b999a6bSDavide Italiano return (flsl(sbt)); 13205b999a6bSDavide Italiano } 13215b999a6bSDavide Italiano 13225b999a6bSDavide Italiano /* 13235b999a6bSDavide Italiano * Dump immediate statistic snapshot of the scheduled callouts. 13245b999a6bSDavide Italiano */ 13255b999a6bSDavide Italiano static int 13265b999a6bSDavide Italiano sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS) 13275b999a6bSDavide Italiano { 13285b999a6bSDavide Italiano struct callout *tmp; 13295b999a6bSDavide Italiano struct callout_cpu *cc; 13305b999a6bSDavide Italiano struct callout_list *sc; 13315b999a6bSDavide Italiano sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t; 13325b999a6bSDavide Italiano int ct[64], cpr[64], ccpbk[32]; 13335b999a6bSDavide Italiano int error, val, i, count, tcum, pcum, maxc, c, medc; 13345b999a6bSDavide Italiano #ifdef SMP 13355b999a6bSDavide Italiano int cpu; 13365b999a6bSDavide Italiano #endif 13375b999a6bSDavide Italiano 13385b999a6bSDavide Italiano val = 0; 13395b999a6bSDavide Italiano error = sysctl_handle_int(oidp, &val, 0, req); 13405b999a6bSDavide Italiano if (error != 0 || req->newptr == NULL) 13415b999a6bSDavide Italiano return (error); 13425b999a6bSDavide Italiano count = maxc = 0; 13435b999a6bSDavide Italiano st = spr = maxt = maxpr = 0; 13445b999a6bSDavide Italiano bzero(ccpbk, sizeof(ccpbk)); 13455b999a6bSDavide Italiano bzero(ct, sizeof(ct)); 13465b999a6bSDavide Italiano bzero(cpr, sizeof(cpr)); 13475b999a6bSDavide Italiano now = sbinuptime(); 13485b999a6bSDavide Italiano #ifdef SMP 13495b999a6bSDavide Italiano CPU_FOREACH(cpu) { 13505b999a6bSDavide Italiano cc = CC_CPU(cpu); 13515b999a6bSDavide Italiano #else 13525b999a6bSDavide Italiano cc = CC_CPU(timeout_cpu); 13535b999a6bSDavide Italiano #endif 13545b999a6bSDavide Italiano CC_LOCK(cc); 13555b999a6bSDavide Italiano for (i = 0; i < callwheelsize; i++) { 13565b999a6bSDavide Italiano sc = &cc->cc_callwheel[i]; 13575b999a6bSDavide Italiano c = 0; 13585b999a6bSDavide Italiano LIST_FOREACH(tmp, sc, c_links.le) { 13595b999a6bSDavide Italiano c++; 13605b999a6bSDavide Italiano t = tmp->c_time - now; 13615b999a6bSDavide Italiano if (t < 0) 13625b999a6bSDavide Italiano t = 0; 13635b999a6bSDavide Italiano st += t / SBT_1US; 13645b999a6bSDavide Italiano spr += tmp->c_precision / SBT_1US; 13655b999a6bSDavide Italiano if (t > maxt) 13665b999a6bSDavide Italiano maxt = t; 13675b999a6bSDavide Italiano if (tmp->c_precision > maxpr) 13685b999a6bSDavide Italiano maxpr = tmp->c_precision; 13695b999a6bSDavide Italiano ct[flssbt(t)]++; 13705b999a6bSDavide Italiano cpr[flssbt(tmp->c_precision)]++; 13715b999a6bSDavide Italiano } 13725b999a6bSDavide Italiano if (c > maxc) 13735b999a6bSDavide Italiano maxc = c; 13745b999a6bSDavide Italiano ccpbk[fls(c + c / 2)]++; 13755b999a6bSDavide Italiano count += c; 13765b999a6bSDavide Italiano } 13775b999a6bSDavide Italiano CC_UNLOCK(cc); 13785b999a6bSDavide Italiano #ifdef SMP 13795b999a6bSDavide Italiano } 13805b999a6bSDavide Italiano #endif 13815b999a6bSDavide Italiano 13825b999a6bSDavide Italiano for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++) 13835b999a6bSDavide Italiano tcum += ct[i]; 13845b999a6bSDavide Italiano medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 13855b999a6bSDavide Italiano for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++) 13865b999a6bSDavide Italiano pcum += cpr[i]; 13875b999a6bSDavide Italiano medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 13885b999a6bSDavide Italiano for (i = 0, c = 0; i < 32 && c < count / 2; i++) 13895b999a6bSDavide Italiano c += ccpbk[i]; 13905b999a6bSDavide Italiano medc = (i >= 2) ? (1 << (i - 2)) : 0; 13915b999a6bSDavide Italiano 13925b999a6bSDavide Italiano printf("Scheduled callouts statistic snapshot:\n"); 13935b999a6bSDavide Italiano printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n", 13945b999a6bSDavide Italiano count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT); 13955b999a6bSDavide Italiano printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n", 13965b999a6bSDavide Italiano medc, 13975b999a6bSDavide Italiano count / callwheelsize / mp_ncpus, 13985b999a6bSDavide Italiano (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000, 13995b999a6bSDavide Italiano maxc); 14005b999a6bSDavide Italiano printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 14015b999a6bSDavide Italiano medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32, 14025b999a6bSDavide Italiano (st / count) / 1000000, (st / count) % 1000000, 14035b999a6bSDavide Italiano maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32); 14045b999a6bSDavide Italiano printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 14055b999a6bSDavide Italiano medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32, 14065b999a6bSDavide Italiano (spr / count) / 1000000, (spr / count) % 1000000, 14075b999a6bSDavide Italiano maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32); 14085b999a6bSDavide Italiano printf(" Distribution: \tbuckets\t time\t tcum\t" 14095b999a6bSDavide Italiano " prec\t pcum\n"); 14105b999a6bSDavide Italiano for (i = 0, tcum = pcum = 0; i < 64; i++) { 14115b999a6bSDavide Italiano if (ct[i] == 0 && cpr[i] == 0) 14125b999a6bSDavide Italiano continue; 14135b999a6bSDavide Italiano t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0; 14145b999a6bSDavide Italiano tcum += ct[i]; 14155b999a6bSDavide Italiano pcum += cpr[i]; 14165b999a6bSDavide Italiano printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n", 14175b999a6bSDavide Italiano t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32, 14185b999a6bSDavide Italiano i - 1 - (32 - CC_HASH_SHIFT), 14195b999a6bSDavide Italiano ct[i], tcum, cpr[i], pcum); 14205b999a6bSDavide Italiano } 14215b999a6bSDavide Italiano return (error); 14225b999a6bSDavide Italiano } 14235b999a6bSDavide Italiano SYSCTL_PROC(_kern, OID_AUTO, callout_stat, 14245b999a6bSDavide Italiano CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 14255b999a6bSDavide Italiano 0, 0, sysctl_kern_callout_stat, "I", 14265b999a6bSDavide Italiano "Dump immediate statistic snapshot of the scheduled callouts"); 1427