1df8bae1dSRodney W. Grimes /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1982, 1986, 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * (c) UNIX System Laboratories, Inc. 5df8bae1dSRodney W. Grimes * All or some portions of this file are derived from material licensed 6df8bae1dSRodney W. Grimes * to the University of California by American Telephone and Telegraph 7df8bae1dSRodney W. Grimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8df8bae1dSRodney W. Grimes * the permission of UNIX System Laboratories, Inc. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 19df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 20df8bae1dSRodney W. Grimes * without specific prior written permission. 21df8bae1dSRodney W. Grimes * 22df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32df8bae1dSRodney W. Grimes * SUCH DAMAGE. 33df8bae1dSRodney W. Grimes * 34acc8326dSGarrett Wollman * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35df8bae1dSRodney W. Grimes */ 36df8bae1dSRodney W. Grimes 37677b542eSDavid E. O'Brien #include <sys/cdefs.h> 38677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 39677b542eSDavid E. O'Brien 405b999a6bSDavide Italiano #include "opt_callout_profiling.h" 4191dd9aaeSRobert Watson #include "opt_kdtrace.h" 425b999a6bSDavide Italiano #if defined(__arm__) 435b999a6bSDavide Italiano #include "opt_timer.h" 445b999a6bSDavide Italiano #endif 4591dd9aaeSRobert Watson 46df8bae1dSRodney W. Grimes #include <sys/param.h> 47df8bae1dSRodney W. Grimes #include <sys/systm.h> 488d809d50SJeff Roberson #include <sys/bus.h> 4915b7a470SPoul-Henning Kamp #include <sys/callout.h> 50f8ccf82aSAndre Oppermann #include <sys/file.h> 518d809d50SJeff Roberson #include <sys/interrupt.h> 52df8bae1dSRodney W. Grimes #include <sys/kernel.h> 53ff7ec58aSRobert Watson #include <sys/ktr.h> 54f34fa851SJohn Baldwin #include <sys/lock.h> 558d809d50SJeff Roberson #include <sys/malloc.h> 56cb799bfeSJohn Baldwin #include <sys/mutex.h> 5721f9e816SJohn Baldwin #include <sys/proc.h> 5891dd9aaeSRobert Watson #include <sys/sdt.h> 596a0ce57dSAttilio Rao #include <sys/sleepqueue.h> 6022ee8c4fSPoul-Henning Kamp #include <sys/sysctl.h> 618d809d50SJeff Roberson #include <sys/smp.h> 62df8bae1dSRodney W. Grimes 631283e9cdSAttilio Rao #ifdef SMP 641283e9cdSAttilio Rao #include <machine/cpu.h> 651283e9cdSAttilio Rao #endif 661283e9cdSAttilio Rao 675b999a6bSDavide Italiano #ifndef NO_EVENTTIMERS 685b999a6bSDavide Italiano DPCPU_DECLARE(sbintime_t, hardclocktime); 695b999a6bSDavide Italiano #endif 705b999a6bSDavide Italiano 7191dd9aaeSRobert Watson SDT_PROVIDER_DEFINE(callout_execute); 727b77e1feSMark Johnston SDT_PROBE_DEFINE1(callout_execute, kernel, , callout_start, callout-start, 7391dd9aaeSRobert Watson "struct callout *"); 747b77e1feSMark Johnston SDT_PROBE_DEFINE1(callout_execute, kernel, , callout_end, callout-end, 7591dd9aaeSRobert Watson "struct callout *"); 7691dd9aaeSRobert Watson 775b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 7822ee8c4fSPoul-Henning Kamp static int avg_depth; 7922ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 8022ee8c4fSPoul-Henning Kamp "Average number of items examined per softclock call. Units = 1/1000"); 8122ee8c4fSPoul-Henning Kamp static int avg_gcalls; 8222ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 8322ee8c4fSPoul-Henning Kamp "Average number of Giant callouts made per softclock call. Units = 1/1000"); 8464b9ee20SAttilio Rao static int avg_lockcalls; 8564b9ee20SAttilio Rao SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 8664b9ee20SAttilio Rao "Average number of lock callouts made per softclock call. Units = 1/1000"); 8722ee8c4fSPoul-Henning Kamp static int avg_mpcalls; 8822ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 8922ee8c4fSPoul-Henning Kamp "Average number of MP callouts made per softclock call. Units = 1/1000"); 905b999a6bSDavide Italiano static int avg_depth_dir; 915b999a6bSDavide Italiano SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0, 925b999a6bSDavide Italiano "Average number of direct callouts examined per callout_process call. " 935b999a6bSDavide Italiano "Units = 1/1000"); 945b999a6bSDavide Italiano static int avg_lockcalls_dir; 955b999a6bSDavide Italiano SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD, 965b999a6bSDavide Italiano &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per " 975b999a6bSDavide Italiano "callout_process call. Units = 1/1000"); 985b999a6bSDavide Italiano static int avg_mpcalls_dir; 995b999a6bSDavide Italiano SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir, 1005b999a6bSDavide Italiano 0, "Average number of MP direct callouts made per callout_process call. " 1015b999a6bSDavide Italiano "Units = 1/1000"); 1025b999a6bSDavide Italiano #endif 103f8ccf82aSAndre Oppermann 104f8ccf82aSAndre Oppermann static int ncallout; 105f8ccf82aSAndre Oppermann SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN, &ncallout, 0, 106f8ccf82aSAndre Oppermann "Number of entries in callwheel and size of timeout() preallocation"); 107f8ccf82aSAndre Oppermann 10815b7a470SPoul-Henning Kamp /* 10915b7a470SPoul-Henning Kamp * TODO: 11015b7a470SPoul-Henning Kamp * allocate more timeout table slots when table overflows. 11115b7a470SPoul-Henning Kamp */ 1123f555c45SDavide Italiano u_int callwheelsize, callwheelmask; 113f23b4c91SGarrett Wollman 11420c510f8SLuigi Rizzo /* 1155b999a6bSDavide Italiano * The callout cpu exec entities represent informations necessary for 1165b999a6bSDavide Italiano * describing the state of callouts currently running on the CPU and the ones 1175b999a6bSDavide Italiano * necessary for migrating callouts to the new callout cpu. In particular, 1185b999a6bSDavide Italiano * the first entry of the array cc_exec_entity holds informations for callout 1195b999a6bSDavide Italiano * running in SWI thread context, while the second one holds informations 1205b999a6bSDavide Italiano * for callout running directly from hardware interrupt context. 1211283e9cdSAttilio Rao * The cached informations are very important for deferring migration when 1221283e9cdSAttilio Rao * the migrating callout is already running. 1231283e9cdSAttilio Rao */ 1245b999a6bSDavide Italiano struct cc_exec { 1255b999a6bSDavide Italiano struct callout *cc_next; 1265b999a6bSDavide Italiano struct callout *cc_curr; 1271283e9cdSAttilio Rao #ifdef SMP 1281283e9cdSAttilio Rao void (*ce_migration_func)(void *); 1291283e9cdSAttilio Rao void *ce_migration_arg; 1301283e9cdSAttilio Rao int ce_migration_cpu; 1315b999a6bSDavide Italiano sbintime_t ce_migration_time; 1323f321a4eSDavide Italiano sbintime_t ce_migration_prec; 1331283e9cdSAttilio Rao #endif 134a4a3ce99SDavide Italiano bool cc_cancel; 135a4a3ce99SDavide Italiano bool cc_waiting; 1361283e9cdSAttilio Rao }; 1371283e9cdSAttilio Rao 1381283e9cdSAttilio Rao /* 13920c510f8SLuigi Rizzo * There is one struct callout_cpu per cpu, holding all relevant 14020c510f8SLuigi Rizzo * state for the callout processing thread on the individual CPU. 14120c510f8SLuigi Rizzo */ 1428d809d50SJeff Roberson struct callout_cpu { 1434ceaf45dSAttilio Rao struct mtx_padalign cc_lock; 1445b999a6bSDavide Italiano struct cc_exec cc_exec_entity[2]; 1458d809d50SJeff Roberson struct callout *cc_callout; 1465b999a6bSDavide Italiano struct callout_list *cc_callwheel; 1475b999a6bSDavide Italiano struct callout_tailq cc_expireq; 1485b999a6bSDavide Italiano struct callout_slist cc_callfree; 1495b999a6bSDavide Italiano sbintime_t cc_firstevent; 1505b999a6bSDavide Italiano sbintime_t cc_lastscan; 1518d809d50SJeff Roberson void *cc_cookie; 1525b999a6bSDavide Italiano u_int cc_bucket; 1538d809d50SJeff Roberson }; 1548d809d50SJeff Roberson 1555b999a6bSDavide Italiano #define cc_exec_curr cc_exec_entity[0].cc_curr 1565b999a6bSDavide Italiano #define cc_exec_next cc_exec_entity[0].cc_next 1575b999a6bSDavide Italiano #define cc_exec_cancel cc_exec_entity[0].cc_cancel 1585b999a6bSDavide Italiano #define cc_exec_waiting cc_exec_entity[0].cc_waiting 1595b999a6bSDavide Italiano #define cc_exec_curr_dir cc_exec_entity[1].cc_curr 1605b999a6bSDavide Italiano #define cc_exec_next_dir cc_exec_entity[1].cc_next 1615b999a6bSDavide Italiano #define cc_exec_cancel_dir cc_exec_entity[1].cc_cancel 1625b999a6bSDavide Italiano #define cc_exec_waiting_dir cc_exec_entity[1].cc_waiting 1635b999a6bSDavide Italiano 1648d809d50SJeff Roberson #ifdef SMP 1655b999a6bSDavide Italiano #define cc_migration_func cc_exec_entity[0].ce_migration_func 1665b999a6bSDavide Italiano #define cc_migration_arg cc_exec_entity[0].ce_migration_arg 1675b999a6bSDavide Italiano #define cc_migration_cpu cc_exec_entity[0].ce_migration_cpu 1685b999a6bSDavide Italiano #define cc_migration_time cc_exec_entity[0].ce_migration_time 1693f321a4eSDavide Italiano #define cc_migration_prec cc_exec_entity[0].ce_migration_prec 1705b999a6bSDavide Italiano #define cc_migration_func_dir cc_exec_entity[1].ce_migration_func 1715b999a6bSDavide Italiano #define cc_migration_arg_dir cc_exec_entity[1].ce_migration_arg 1725b999a6bSDavide Italiano #define cc_migration_cpu_dir cc_exec_entity[1].ce_migration_cpu 1735b999a6bSDavide Italiano #define cc_migration_time_dir cc_exec_entity[1].ce_migration_time 1743f321a4eSDavide Italiano #define cc_migration_prec_dir cc_exec_entity[1].ce_migration_prec 1751283e9cdSAttilio Rao 1768d809d50SJeff Roberson struct callout_cpu cc_cpu[MAXCPU]; 1771283e9cdSAttilio Rao #define CPUBLOCK MAXCPU 1788d809d50SJeff Roberson #define CC_CPU(cpu) (&cc_cpu[(cpu)]) 1798d809d50SJeff Roberson #define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 1808d809d50SJeff Roberson #else 1818d809d50SJeff Roberson struct callout_cpu cc_cpu; 1828d809d50SJeff Roberson #define CC_CPU(cpu) &cc_cpu 1838d809d50SJeff Roberson #define CC_SELF() &cc_cpu 1848d809d50SJeff Roberson #endif 1858d809d50SJeff Roberson #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 1868d809d50SJeff Roberson #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 1871283e9cdSAttilio Rao #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) 1888d809d50SJeff Roberson 1898d809d50SJeff Roberson static int timeout_cpu; 1905b999a6bSDavide Italiano 19115ae0c9aSAndre Oppermann static void callout_cpu_init(struct callout_cpu *cc); 1925b999a6bSDavide Italiano static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, 1935b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 1945b999a6bSDavide Italiano int *mpcalls, int *lockcalls, int *gcalls, 1955b999a6bSDavide Italiano #endif 1965b999a6bSDavide Italiano int direct); 1978d809d50SJeff Roberson 198d745c852SEd Schouten static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 19949a74476SColin Percival 200e9dec2c4SColin Percival /** 2018d809d50SJeff Roberson * Locked by cc_lock: 2025b999a6bSDavide Italiano * cc_curr - If a callout is in progress, it is cc_curr. 2035b999a6bSDavide Italiano * If cc_curr is non-NULL, threads waiting in 204b36f4588SJohn Baldwin * callout_drain() will be woken up as soon as the 2052c1bb207SColin Percival * relevant callout completes. 2065b999a6bSDavide Italiano * cc_cancel - Changing to 1 with both callout_lock and cc_lock held 20798c926b2SIan Dowse * guarantees that the current callout will not run. 20898c926b2SIan Dowse * The softclock() function sets this to 0 before it 20964b9ee20SAttilio Rao * drops callout_lock to acquire c_lock, and it calls 210b36f4588SJohn Baldwin * the handler only if curr_cancelled is still 0 after 2115b999a6bSDavide Italiano * cc_lock is successfully acquired. 2128d809d50SJeff Roberson * cc_waiting - If a thread is waiting in callout_drain(), then 213b36f4588SJohn Baldwin * callout_wait is nonzero. Set only when 2145b999a6bSDavide Italiano * cc_curr is non-NULL. 2152c1bb207SColin Percival */ 216df8bae1dSRodney W. Grimes 217df8bae1dSRodney W. Grimes /* 2185b999a6bSDavide Italiano * Resets the execution entity tied to a specific callout cpu. 2191283e9cdSAttilio Rao */ 2201283e9cdSAttilio Rao static void 2215b999a6bSDavide Italiano cc_cce_cleanup(struct callout_cpu *cc, int direct) 2221283e9cdSAttilio Rao { 2231283e9cdSAttilio Rao 2245b999a6bSDavide Italiano cc->cc_exec_entity[direct].cc_curr = NULL; 2255b999a6bSDavide Italiano cc->cc_exec_entity[direct].cc_next = NULL; 226ac42a172SDavide Italiano cc->cc_exec_entity[direct].cc_cancel = false; 227ac42a172SDavide Italiano cc->cc_exec_entity[direct].cc_waiting = false; 2281283e9cdSAttilio Rao #ifdef SMP 2295b999a6bSDavide Italiano cc->cc_exec_entity[direct].ce_migration_cpu = CPUBLOCK; 2305b999a6bSDavide Italiano cc->cc_exec_entity[direct].ce_migration_time = 0; 2313f321a4eSDavide Italiano cc->cc_exec_entity[direct].ce_migration_prec = 0; 2325b999a6bSDavide Italiano cc->cc_exec_entity[direct].ce_migration_func = NULL; 2335b999a6bSDavide Italiano cc->cc_exec_entity[direct].ce_migration_arg = NULL; 2341283e9cdSAttilio Rao #endif 2351283e9cdSAttilio Rao } 2361283e9cdSAttilio Rao 2371283e9cdSAttilio Rao /* 2381283e9cdSAttilio Rao * Checks if migration is requested by a specific callout cpu. 2391283e9cdSAttilio Rao */ 2401283e9cdSAttilio Rao static int 2415b999a6bSDavide Italiano cc_cce_migrating(struct callout_cpu *cc, int direct) 2421283e9cdSAttilio Rao { 2431283e9cdSAttilio Rao 2441283e9cdSAttilio Rao #ifdef SMP 2455b999a6bSDavide Italiano return (cc->cc_exec_entity[direct].ce_migration_cpu != CPUBLOCK); 2461283e9cdSAttilio Rao #else 2471283e9cdSAttilio Rao return (0); 2481283e9cdSAttilio Rao #endif 2491283e9cdSAttilio Rao } 2501283e9cdSAttilio Rao 2511283e9cdSAttilio Rao /* 25215ae0c9aSAndre Oppermann * Kernel low level callwheel initialization 25315ae0c9aSAndre Oppermann * called on cpu0 during kernel startup. 254219d632cSMatthew Dillon */ 25515ae0c9aSAndre Oppermann static void 25615ae0c9aSAndre Oppermann callout_callwheel_init(void *dummy) 257219d632cSMatthew Dillon { 2588d809d50SJeff Roberson struct callout_cpu *cc; 2598d809d50SJeff Roberson 260f8ccf82aSAndre Oppermann /* 261f8ccf82aSAndre Oppermann * Calculate the size of the callout wheel and the preallocated 262f8ccf82aSAndre Oppermann * timeout() structures. 263a7aea132SAndre Oppermann * XXX: Clip callout to result of previous function of maxusers 264a7aea132SAndre Oppermann * maximum 384. This is still huge, but acceptable. 265f8ccf82aSAndre Oppermann */ 266f8ccf82aSAndre Oppermann ncallout = imin(16 + maxproc + maxfiles, 18508); 267f8ccf82aSAndre Oppermann TUNABLE_INT_FETCH("kern.ncallout", &ncallout); 268f8ccf82aSAndre Oppermann 269219d632cSMatthew Dillon /* 270922314f0SAlfred Perlstein * Calculate callout wheel size, should be next power of two higher 271922314f0SAlfred Perlstein * than 'ncallout'. 272219d632cSMatthew Dillon */ 273922314f0SAlfred Perlstein callwheelsize = 1 << fls(ncallout); 274219d632cSMatthew Dillon callwheelmask = callwheelsize - 1; 275219d632cSMatthew Dillon 27615ae0c9aSAndre Oppermann /* 27715ae0c9aSAndre Oppermann * Only cpu0 handles timeout(9) and receives a preallocation. 27815ae0c9aSAndre Oppermann * 27915ae0c9aSAndre Oppermann * XXX: Once all timeout(9) consumers are converted this can 28015ae0c9aSAndre Oppermann * be removed. 28115ae0c9aSAndre Oppermann */ 28215ae0c9aSAndre Oppermann timeout_cpu = PCPU_GET(cpuid); 28315ae0c9aSAndre Oppermann cc = CC_CPU(timeout_cpu); 28415ae0c9aSAndre Oppermann cc->cc_callout = malloc(ncallout * sizeof(struct callout), 28515ae0c9aSAndre Oppermann M_CALLOUT, M_WAITOK); 28615ae0c9aSAndre Oppermann callout_cpu_init(cc); 287219d632cSMatthew Dillon } 28815ae0c9aSAndre Oppermann SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL); 289219d632cSMatthew Dillon 29015ae0c9aSAndre Oppermann /* 29115ae0c9aSAndre Oppermann * Initialize the per-cpu callout structures. 29215ae0c9aSAndre Oppermann */ 2938d809d50SJeff Roberson static void 2948d809d50SJeff Roberson callout_cpu_init(struct callout_cpu *cc) 2958d809d50SJeff Roberson { 2968d809d50SJeff Roberson struct callout *c; 2978d809d50SJeff Roberson int i; 2988d809d50SJeff Roberson 2998d809d50SJeff Roberson mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 3008d809d50SJeff Roberson SLIST_INIT(&cc->cc_callfree); 301c5904471SDavide Italiano cc->cc_callwheel = malloc(sizeof(struct callout_list) * callwheelsize, 30215ae0c9aSAndre Oppermann M_CALLOUT, M_WAITOK); 3035b999a6bSDavide Italiano for (i = 0; i < callwheelsize; i++) 3045b999a6bSDavide Italiano LIST_INIT(&cc->cc_callwheel[i]); 3055b999a6bSDavide Italiano TAILQ_INIT(&cc->cc_expireq); 3065b999a6bSDavide Italiano cc->cc_firstevent = INT64_MAX; 3075b999a6bSDavide Italiano for (i = 0; i < 2; i++) 3085b999a6bSDavide Italiano cc_cce_cleanup(cc, i); 30915ae0c9aSAndre Oppermann if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */ 3108d809d50SJeff Roberson return; 3118d809d50SJeff Roberson for (i = 0; i < ncallout; i++) { 3128d809d50SJeff Roberson c = &cc->cc_callout[i]; 3138d809d50SJeff Roberson callout_init(c, 0); 3148d809d50SJeff Roberson c->c_flags = CALLOUT_LOCAL_ALLOC; 3158d809d50SJeff Roberson SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 3168d809d50SJeff Roberson } 3178d809d50SJeff Roberson } 3188d809d50SJeff Roberson 3191283e9cdSAttilio Rao #ifdef SMP 3201283e9cdSAttilio Rao /* 3211283e9cdSAttilio Rao * Switches the cpu tied to a specific callout. 3221283e9cdSAttilio Rao * The function expects a locked incoming callout cpu and returns with 3231283e9cdSAttilio Rao * locked outcoming callout cpu. 3241283e9cdSAttilio Rao */ 3251283e9cdSAttilio Rao static struct callout_cpu * 3261283e9cdSAttilio Rao callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) 3271283e9cdSAttilio Rao { 3281283e9cdSAttilio Rao struct callout_cpu *new_cc; 3291283e9cdSAttilio Rao 3301283e9cdSAttilio Rao MPASS(c != NULL && cc != NULL); 3311283e9cdSAttilio Rao CC_LOCK_ASSERT(cc); 3321283e9cdSAttilio Rao 333e75baa28SAttilio Rao /* 334e75baa28SAttilio Rao * Avoid interrupts and preemption firing after the callout cpu 335e75baa28SAttilio Rao * is blocked in order to avoid deadlocks as the new thread 336e75baa28SAttilio Rao * may be willing to acquire the callout cpu lock. 337e75baa28SAttilio Rao */ 3381283e9cdSAttilio Rao c->c_cpu = CPUBLOCK; 339e75baa28SAttilio Rao spinlock_enter(); 3401283e9cdSAttilio Rao CC_UNLOCK(cc); 3411283e9cdSAttilio Rao new_cc = CC_CPU(new_cpu); 3421283e9cdSAttilio Rao CC_LOCK(new_cc); 343e75baa28SAttilio Rao spinlock_exit(); 3441283e9cdSAttilio Rao c->c_cpu = new_cpu; 3451283e9cdSAttilio Rao return (new_cc); 3461283e9cdSAttilio Rao } 3471283e9cdSAttilio Rao #endif 3481283e9cdSAttilio Rao 349219d632cSMatthew Dillon /* 3508d809d50SJeff Roberson * Start standard softclock thread. 3518d809d50SJeff Roberson */ 3528d809d50SJeff Roberson static void 3538d809d50SJeff Roberson start_softclock(void *dummy) 3548d809d50SJeff Roberson { 3558d809d50SJeff Roberson struct callout_cpu *cc; 3568d809d50SJeff Roberson #ifdef SMP 3578d809d50SJeff Roberson int cpu; 3588d809d50SJeff Roberson #endif 3598d809d50SJeff Roberson 3608d809d50SJeff Roberson cc = CC_CPU(timeout_cpu); 3618d809d50SJeff Roberson if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK, 3623350df48SJohn Baldwin INTR_MPSAFE, &cc->cc_cookie)) 3638d809d50SJeff Roberson panic("died while creating standard software ithreads"); 3648d809d50SJeff Roberson #ifdef SMP 3653aa6d94eSJohn Baldwin CPU_FOREACH(cpu) { 3668d809d50SJeff Roberson if (cpu == timeout_cpu) 3678d809d50SJeff Roberson continue; 3688d809d50SJeff Roberson cc = CC_CPU(cpu); 36915ae0c9aSAndre Oppermann cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */ 37015ae0c9aSAndre Oppermann callout_cpu_init(cc); 3718d809d50SJeff Roberson if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, 3728d809d50SJeff Roberson INTR_MPSAFE, &cc->cc_cookie)) 3738d809d50SJeff Roberson panic("died while creating standard software ithreads"); 374219d632cSMatthew Dillon } 3758d809d50SJeff Roberson #endif 376219d632cSMatthew Dillon } 3778d809d50SJeff Roberson SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 3788d809d50SJeff Roberson 3795b999a6bSDavide Italiano #define CC_HASH_SHIFT 8 3808d809d50SJeff Roberson 3815b999a6bSDavide Italiano static inline u_int 3825b999a6bSDavide Italiano callout_hash(sbintime_t sbt) 3835b999a6bSDavide Italiano { 3845b999a6bSDavide Italiano 3855b999a6bSDavide Italiano return (sbt >> (32 - CC_HASH_SHIFT)); 3865b999a6bSDavide Italiano } 3875b999a6bSDavide Italiano 3885b999a6bSDavide Italiano static inline u_int 3895b999a6bSDavide Italiano callout_get_bucket(sbintime_t sbt) 3905b999a6bSDavide Italiano { 3915b999a6bSDavide Italiano 3925b999a6bSDavide Italiano return (callout_hash(sbt) & callwheelmask); 3935b999a6bSDavide Italiano } 3945b999a6bSDavide Italiano 3955b999a6bSDavide Italiano void 3965b999a6bSDavide Italiano callout_process(sbintime_t now) 3975b999a6bSDavide Italiano { 3985b999a6bSDavide Italiano struct callout *tmp, *tmpn; 3995b999a6bSDavide Italiano struct callout_cpu *cc; 4005b999a6bSDavide Italiano struct callout_list *sc; 4015b999a6bSDavide Italiano sbintime_t first, last, max, tmp_max; 4025b999a6bSDavide Italiano uint32_t lookahead; 4035b999a6bSDavide Italiano u_int firstb, lastb, nowb; 4045b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 4055b999a6bSDavide Italiano int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0; 4065b999a6bSDavide Italiano #endif 4075b999a6bSDavide Italiano 4088d809d50SJeff Roberson cc = CC_SELF(); 4098d809d50SJeff Roberson mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 4105b999a6bSDavide Italiano 4115b999a6bSDavide Italiano /* Compute the buckets of the last scan and present times. */ 4125b999a6bSDavide Italiano firstb = callout_hash(cc->cc_lastscan); 4135b999a6bSDavide Italiano cc->cc_lastscan = now; 4145b999a6bSDavide Italiano nowb = callout_hash(now); 4155b999a6bSDavide Italiano 4165b999a6bSDavide Italiano /* Compute the last bucket and minimum time of the bucket after it. */ 4175b999a6bSDavide Italiano if (nowb == firstb) 4185b999a6bSDavide Italiano lookahead = (SBT_1S / 16); 4195b999a6bSDavide Italiano else if (nowb - firstb == 1) 4205b999a6bSDavide Italiano lookahead = (SBT_1S / 8); 4215b999a6bSDavide Italiano else 4225b999a6bSDavide Italiano lookahead = (SBT_1S / 2); 4235b999a6bSDavide Italiano first = last = now; 4245b999a6bSDavide Italiano first += (lookahead / 2); 4255b999a6bSDavide Italiano last += lookahead; 4265b999a6bSDavide Italiano last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT)); 4275b999a6bSDavide Italiano lastb = callout_hash(last) - 1; 4285b999a6bSDavide Italiano max = last; 4295b999a6bSDavide Italiano 4305b999a6bSDavide Italiano /* 4315b999a6bSDavide Italiano * Check if we wrapped around the entire wheel from the last scan. 4325b999a6bSDavide Italiano * In case, we need to scan entirely the wheel for pending callouts. 4335b999a6bSDavide Italiano */ 4345b999a6bSDavide Italiano if (lastb - firstb >= callwheelsize) { 4355b999a6bSDavide Italiano lastb = firstb + callwheelsize - 1; 4365b999a6bSDavide Italiano if (nowb - firstb >= callwheelsize) 4375b999a6bSDavide Italiano nowb = lastb; 4389fc51b0bSJeff Roberson } 4395b999a6bSDavide Italiano 4405b999a6bSDavide Italiano /* Iterate callwheel from firstb to nowb and then up to lastb. */ 4415b999a6bSDavide Italiano do { 4425b999a6bSDavide Italiano sc = &cc->cc_callwheel[firstb & callwheelmask]; 4435b999a6bSDavide Italiano tmp = LIST_FIRST(sc); 4445b999a6bSDavide Italiano while (tmp != NULL) { 4455b999a6bSDavide Italiano /* Run the callout if present time within allowed. */ 4465b999a6bSDavide Italiano if (tmp->c_time <= now) { 4475b999a6bSDavide Italiano /* 4485b999a6bSDavide Italiano * Consumer told us the callout may be run 4495b999a6bSDavide Italiano * directly from hardware interrupt context. 4505b999a6bSDavide Italiano */ 4515b999a6bSDavide Italiano if (tmp->c_flags & CALLOUT_DIRECT) { 4525b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 4535b999a6bSDavide Italiano ++depth_dir; 4545b999a6bSDavide Italiano #endif 4555b999a6bSDavide Italiano cc->cc_exec_next_dir = 4565b999a6bSDavide Italiano LIST_NEXT(tmp, c_links.le); 4575b999a6bSDavide Italiano cc->cc_bucket = firstb & callwheelmask; 4585b999a6bSDavide Italiano LIST_REMOVE(tmp, c_links.le); 4595b999a6bSDavide Italiano softclock_call_cc(tmp, cc, 4605b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 4615b999a6bSDavide Italiano &mpcalls_dir, &lockcalls_dir, NULL, 4625b999a6bSDavide Italiano #endif 4635b999a6bSDavide Italiano 1); 4645b999a6bSDavide Italiano tmp = cc->cc_exec_next_dir; 4655b999a6bSDavide Italiano } else { 4665b999a6bSDavide Italiano tmpn = LIST_NEXT(tmp, c_links.le); 4675b999a6bSDavide Italiano LIST_REMOVE(tmp, c_links.le); 4685b999a6bSDavide Italiano TAILQ_INSERT_TAIL(&cc->cc_expireq, 4695b999a6bSDavide Italiano tmp, c_links.tqe); 4705b999a6bSDavide Italiano tmp->c_flags |= CALLOUT_PROCESSED; 4715b999a6bSDavide Italiano tmp = tmpn; 4729fc51b0bSJeff Roberson } 4735b999a6bSDavide Italiano continue; 4745b999a6bSDavide Italiano } 4755b999a6bSDavide Italiano /* Skip events from distant future. */ 4765b999a6bSDavide Italiano if (tmp->c_time >= max) 4775b999a6bSDavide Italiano goto next; 4785b999a6bSDavide Italiano /* 4795b999a6bSDavide Italiano * Event minimal time is bigger than present maximal 4805b999a6bSDavide Italiano * time, so it cannot be aggregated. 4815b999a6bSDavide Italiano */ 4825b999a6bSDavide Italiano if (tmp->c_time > last) { 4835b999a6bSDavide Italiano lastb = nowb; 4845b999a6bSDavide Italiano goto next; 4855b999a6bSDavide Italiano } 4865b999a6bSDavide Italiano /* Update first and last time, respecting this event. */ 4875b999a6bSDavide Italiano if (tmp->c_time < first) 4885b999a6bSDavide Italiano first = tmp->c_time; 4895b999a6bSDavide Italiano tmp_max = tmp->c_time + tmp->c_precision; 4905b999a6bSDavide Italiano if (tmp_max < last) 4915b999a6bSDavide Italiano last = tmp_max; 4925b999a6bSDavide Italiano next: 4935b999a6bSDavide Italiano tmp = LIST_NEXT(tmp, c_links.le); 4945b999a6bSDavide Italiano } 4955b999a6bSDavide Italiano /* Proceed with the next bucket. */ 4965b999a6bSDavide Italiano firstb++; 4975b999a6bSDavide Italiano /* 4985b999a6bSDavide Italiano * Stop if we looked after present time and found 4995b999a6bSDavide Italiano * some event we can't execute at now. 5005b999a6bSDavide Italiano * Stop if we looked far enough into the future. 5015b999a6bSDavide Italiano */ 5025b999a6bSDavide Italiano } while (((int)(firstb - lastb)) <= 0); 5035b999a6bSDavide Italiano cc->cc_firstevent = last; 5045b999a6bSDavide Italiano #ifndef NO_EVENTTIMERS 5055b999a6bSDavide Italiano cpu_new_callout(curcpu, last, first); 5065b999a6bSDavide Italiano #endif 5075b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 5085b999a6bSDavide Italiano avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8; 5095b999a6bSDavide Italiano avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8; 5105b999a6bSDavide Italiano avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8; 5115b999a6bSDavide Italiano #endif 5128d809d50SJeff Roberson mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 5138d809d50SJeff Roberson /* 5148d809d50SJeff Roberson * swi_sched acquires the thread lock, so we don't want to call it 5158d809d50SJeff Roberson * with cc_lock held; incorrect locking order. 5168d809d50SJeff Roberson */ 5175b999a6bSDavide Italiano if (!TAILQ_EMPTY(&cc->cc_expireq)) 5188d809d50SJeff Roberson swi_sched(cc->cc_cookie, 0); 5198d809d50SJeff Roberson } 5208d809d50SJeff Roberson 5218d809d50SJeff Roberson static struct callout_cpu * 5228d809d50SJeff Roberson callout_lock(struct callout *c) 5238d809d50SJeff Roberson { 5248d809d50SJeff Roberson struct callout_cpu *cc; 5258d809d50SJeff Roberson int cpu; 5268d809d50SJeff Roberson 5278d809d50SJeff Roberson for (;;) { 5288d809d50SJeff Roberson cpu = c->c_cpu; 5291283e9cdSAttilio Rao #ifdef SMP 5301283e9cdSAttilio Rao if (cpu == CPUBLOCK) { 5311283e9cdSAttilio Rao while (c->c_cpu == CPUBLOCK) 5321283e9cdSAttilio Rao cpu_spinwait(); 5331283e9cdSAttilio Rao continue; 5341283e9cdSAttilio Rao } 5351283e9cdSAttilio Rao #endif 5368d809d50SJeff Roberson cc = CC_CPU(cpu); 5378d809d50SJeff Roberson CC_LOCK(cc); 5388d809d50SJeff Roberson if (cpu == c->c_cpu) 5398d809d50SJeff Roberson break; 5408d809d50SJeff Roberson CC_UNLOCK(cc); 5418d809d50SJeff Roberson } 5428d809d50SJeff Roberson return (cc); 543219d632cSMatthew Dillon } 544219d632cSMatthew Dillon 5451283e9cdSAttilio Rao static void 5465b999a6bSDavide Italiano callout_cc_add(struct callout *c, struct callout_cpu *cc, 5475b999a6bSDavide Italiano sbintime_t sbt, sbintime_t precision, void (*func)(void *), 5485b999a6bSDavide Italiano void *arg, int cpu, int flags) 5491283e9cdSAttilio Rao { 5505b999a6bSDavide Italiano int bucket; 5511283e9cdSAttilio Rao 5521283e9cdSAttilio Rao CC_LOCK_ASSERT(cc); 5535b999a6bSDavide Italiano if (sbt < cc->cc_lastscan) 5545b999a6bSDavide Italiano sbt = cc->cc_lastscan; 5551283e9cdSAttilio Rao c->c_arg = arg; 5561283e9cdSAttilio Rao c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 5575b999a6bSDavide Italiano if (flags & C_DIRECT_EXEC) 5585b999a6bSDavide Italiano c->c_flags |= CALLOUT_DIRECT; 5595b999a6bSDavide Italiano c->c_flags &= ~CALLOUT_PROCESSED; 5601283e9cdSAttilio Rao c->c_func = func; 5615b999a6bSDavide Italiano c->c_time = sbt; 5625b999a6bSDavide Italiano c->c_precision = precision; 5635b999a6bSDavide Italiano bucket = callout_get_bucket(c->c_time); 5645b999a6bSDavide Italiano CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x", 5655b999a6bSDavide Italiano c, (int)(c->c_precision >> 32), 5665b999a6bSDavide Italiano (u_int)(c->c_precision & 0xffffffff)); 5675b999a6bSDavide Italiano LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le); 5685b999a6bSDavide Italiano if (cc->cc_bucket == bucket) 5695b999a6bSDavide Italiano cc->cc_exec_next_dir = c; 5705b999a6bSDavide Italiano #ifndef NO_EVENTTIMERS 5715b999a6bSDavide Italiano /* 5725b999a6bSDavide Italiano * Inform the eventtimers(4) subsystem there's a new callout 5735b999a6bSDavide Italiano * that has been inserted, but only if really required. 5745b999a6bSDavide Italiano */ 575*1b0c144fSDavide Italiano if (INT64_MAX - c->c_time < c->c_precision) 576*1b0c144fSDavide Italiano c->c_precision = INT64_MAX - c->c_time; 5775b999a6bSDavide Italiano sbt = c->c_time + c->c_precision; 5785b999a6bSDavide Italiano if (sbt < cc->cc_firstevent) { 5795b999a6bSDavide Italiano cc->cc_firstevent = sbt; 5805b999a6bSDavide Italiano cpu_new_callout(cpu, sbt, c->c_time); 5811283e9cdSAttilio Rao } 5825b999a6bSDavide Italiano #endif 5831283e9cdSAttilio Rao } 5841283e9cdSAttilio Rao 5856098e7acSKonstantin Belousov static void 5866098e7acSKonstantin Belousov callout_cc_del(struct callout *c, struct callout_cpu *cc) 5876098e7acSKonstantin Belousov { 5886098e7acSKonstantin Belousov 589eb8a7186SKonstantin Belousov if ((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0) 590eb8a7186SKonstantin Belousov return; 5916098e7acSKonstantin Belousov c->c_func = NULL; 5926098e7acSKonstantin Belousov SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 5936098e7acSKonstantin Belousov } 5946098e7acSKonstantin Belousov 595eb8a7186SKonstantin Belousov static void 5965b999a6bSDavide Italiano softclock_call_cc(struct callout *c, struct callout_cpu *cc, 5975b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 5985b999a6bSDavide Italiano int *mpcalls, int *lockcalls, int *gcalls, 5995b999a6bSDavide Italiano #endif 6005b999a6bSDavide Italiano int direct) 6016098e7acSKonstantin Belousov { 6021f96759fSDavide Italiano struct rm_priotracker tracker; 6036098e7acSKonstantin Belousov void (*c_func)(void *); 6046098e7acSKonstantin Belousov void *c_arg; 6056098e7acSKonstantin Belousov struct lock_class *class; 6066098e7acSKonstantin Belousov struct lock_object *c_lock; 6071f96759fSDavide Italiano uintptr_t lock_status; 6081f96759fSDavide Italiano int c_flags; 6096098e7acSKonstantin Belousov #ifdef SMP 6106098e7acSKonstantin Belousov struct callout_cpu *new_cc; 6116098e7acSKonstantin Belousov void (*new_func)(void *); 6126098e7acSKonstantin Belousov void *new_arg; 6135b999a6bSDavide Italiano int flags, new_cpu; 6143f321a4eSDavide Italiano sbintime_t new_prec, new_time; 6156098e7acSKonstantin Belousov #endif 6165b999a6bSDavide Italiano #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 61703763781SDavide Italiano sbintime_t sbt1, sbt2; 6186098e7acSKonstantin Belousov struct timespec ts2; 6195b999a6bSDavide Italiano static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */ 6206098e7acSKonstantin Belousov static timeout_t *lastfunc; 6216098e7acSKonstantin Belousov #endif 6226098e7acSKonstantin Belousov 623eb8a7186SKonstantin Belousov KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) == 624eb8a7186SKonstantin Belousov (CALLOUT_PENDING | CALLOUT_ACTIVE), 625eb8a7186SKonstantin Belousov ("softclock_call_cc: pend|act %p %x", c, c->c_flags)); 6266098e7acSKonstantin Belousov class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; 6271f96759fSDavide Italiano lock_status = 0; 6281f96759fSDavide Italiano if (c->c_flags & CALLOUT_SHAREDLOCK) { 6291f96759fSDavide Italiano if (class == &lock_class_rm) 6301f96759fSDavide Italiano lock_status = (uintptr_t)&tracker; 6311f96759fSDavide Italiano else 6321f96759fSDavide Italiano lock_status = 1; 6331f96759fSDavide Italiano } 6346098e7acSKonstantin Belousov c_lock = c->c_lock; 6356098e7acSKonstantin Belousov c_func = c->c_func; 6366098e7acSKonstantin Belousov c_arg = c->c_arg; 6376098e7acSKonstantin Belousov c_flags = c->c_flags; 6386098e7acSKonstantin Belousov if (c->c_flags & CALLOUT_LOCAL_ALLOC) 6396098e7acSKonstantin Belousov c->c_flags = CALLOUT_LOCAL_ALLOC; 6406098e7acSKonstantin Belousov else 6416098e7acSKonstantin Belousov c->c_flags &= ~CALLOUT_PENDING; 6425b999a6bSDavide Italiano cc->cc_exec_entity[direct].cc_curr = c; 643ac42a172SDavide Italiano cc->cc_exec_entity[direct].cc_cancel = false; 6446098e7acSKonstantin Belousov CC_UNLOCK(cc); 6456098e7acSKonstantin Belousov if (c_lock != NULL) { 6461f96759fSDavide Italiano class->lc_lock(c_lock, lock_status); 6476098e7acSKonstantin Belousov /* 6486098e7acSKonstantin Belousov * The callout may have been cancelled 6496098e7acSKonstantin Belousov * while we switched locks. 6506098e7acSKonstantin Belousov */ 6515b999a6bSDavide Italiano if (cc->cc_exec_entity[direct].cc_cancel) { 6526098e7acSKonstantin Belousov class->lc_unlock(c_lock); 6536098e7acSKonstantin Belousov goto skip; 6546098e7acSKonstantin Belousov } 6556098e7acSKonstantin Belousov /* The callout cannot be stopped now. */ 656ac42a172SDavide Italiano cc->cc_exec_entity[direct].cc_cancel = true; 6576098e7acSKonstantin Belousov if (c_lock == &Giant.lock_object) { 6585b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 6596098e7acSKonstantin Belousov (*gcalls)++; 6605b999a6bSDavide Italiano #endif 6615b999a6bSDavide Italiano CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p", 6626098e7acSKonstantin Belousov c, c_func, c_arg); 6636098e7acSKonstantin Belousov } else { 6645b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 6656098e7acSKonstantin Belousov (*lockcalls)++; 6665b999a6bSDavide Italiano #endif 6676098e7acSKonstantin Belousov CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p", 6686098e7acSKonstantin Belousov c, c_func, c_arg); 6696098e7acSKonstantin Belousov } 6706098e7acSKonstantin Belousov } else { 6715b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 6726098e7acSKonstantin Belousov (*mpcalls)++; 6735b999a6bSDavide Italiano #endif 6745b999a6bSDavide Italiano CTR3(KTR_CALLOUT, "callout %p func %p arg %p", 6756098e7acSKonstantin Belousov c, c_func, c_arg); 6766098e7acSKonstantin Belousov } 67703763781SDavide Italiano #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 6785b999a6bSDavide Italiano sbt1 = sbinuptime(); 6796098e7acSKonstantin Belousov #endif 6806098e7acSKonstantin Belousov THREAD_NO_SLEEPING(); 6816098e7acSKonstantin Belousov SDT_PROBE(callout_execute, kernel, , callout_start, c, 0, 0, 0, 0); 6826098e7acSKonstantin Belousov c_func(c_arg); 6836098e7acSKonstantin Belousov SDT_PROBE(callout_execute, kernel, , callout_end, c, 0, 0, 0, 0); 6846098e7acSKonstantin Belousov THREAD_SLEEPING_OK(); 68503763781SDavide Italiano #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 68603763781SDavide Italiano sbt2 = sbinuptime(); 68703763781SDavide Italiano sbt2 -= sbt1; 68803763781SDavide Italiano if (sbt2 > maxdt) { 68903763781SDavide Italiano if (lastfunc != c_func || sbt2 > maxdt * 2) { 69003763781SDavide Italiano ts2 = sbttots(sbt2); 6916098e7acSKonstantin Belousov printf( 6926098e7acSKonstantin Belousov "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 6936098e7acSKonstantin Belousov c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); 6946098e7acSKonstantin Belousov } 69503763781SDavide Italiano maxdt = sbt2; 6966098e7acSKonstantin Belousov lastfunc = c_func; 6976098e7acSKonstantin Belousov } 6986098e7acSKonstantin Belousov #endif 6996098e7acSKonstantin Belousov CTR1(KTR_CALLOUT, "callout %p finished", c); 7006098e7acSKonstantin Belousov if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 7016098e7acSKonstantin Belousov class->lc_unlock(c_lock); 7026098e7acSKonstantin Belousov skip: 7036098e7acSKonstantin Belousov CC_LOCK(cc); 7045b999a6bSDavide Italiano KASSERT(cc->cc_exec_entity[direct].cc_curr == c, ("mishandled cc_curr")); 7055b999a6bSDavide Italiano cc->cc_exec_entity[direct].cc_curr = NULL; 7065b999a6bSDavide Italiano if (cc->cc_exec_entity[direct].cc_waiting) { 7076098e7acSKonstantin Belousov /* 7086098e7acSKonstantin Belousov * There is someone waiting for the 7096098e7acSKonstantin Belousov * callout to complete. 7106098e7acSKonstantin Belousov * If the callout was scheduled for 7116098e7acSKonstantin Belousov * migration just cancel it. 7126098e7acSKonstantin Belousov */ 7135b999a6bSDavide Italiano if (cc_cce_migrating(cc, direct)) { 7145b999a6bSDavide Italiano cc_cce_cleanup(cc, direct); 715bdf9120cSAttilio Rao 716bdf9120cSAttilio Rao /* 717bdf9120cSAttilio Rao * It should be assert here that the callout is not 718bdf9120cSAttilio Rao * destroyed but that is not easy. 719bdf9120cSAttilio Rao */ 720eb8a7186SKonstantin Belousov c->c_flags &= ~CALLOUT_DFRMIGRATION; 721eb8a7186SKonstantin Belousov } 722ac42a172SDavide Italiano cc->cc_exec_entity[direct].cc_waiting = false; 7236098e7acSKonstantin Belousov CC_UNLOCK(cc); 7245b999a6bSDavide Italiano wakeup(&cc->cc_exec_entity[direct].cc_waiting); 7256098e7acSKonstantin Belousov CC_LOCK(cc); 7265b999a6bSDavide Italiano } else if (cc_cce_migrating(cc, direct)) { 727bdf9120cSAttilio Rao KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0, 728eb8a7186SKonstantin Belousov ("Migrating legacy callout %p", c)); 7296098e7acSKonstantin Belousov #ifdef SMP 7306098e7acSKonstantin Belousov /* 7316098e7acSKonstantin Belousov * If the callout was scheduled for 7326098e7acSKonstantin Belousov * migration just perform it now. 7336098e7acSKonstantin Belousov */ 7345b999a6bSDavide Italiano new_cpu = cc->cc_exec_entity[direct].ce_migration_cpu; 7355b999a6bSDavide Italiano new_time = cc->cc_exec_entity[direct].ce_migration_time; 7363f321a4eSDavide Italiano new_prec = cc->cc_exec_entity[direct].ce_migration_prec; 7375b999a6bSDavide Italiano new_func = cc->cc_exec_entity[direct].ce_migration_func; 7385b999a6bSDavide Italiano new_arg = cc->cc_exec_entity[direct].ce_migration_arg; 7395b999a6bSDavide Italiano cc_cce_cleanup(cc, direct); 7406098e7acSKonstantin Belousov 7416098e7acSKonstantin Belousov /* 742bdf9120cSAttilio Rao * It should be assert here that the callout is not destroyed 743bdf9120cSAttilio Rao * but that is not easy. 744bdf9120cSAttilio Rao * 745bdf9120cSAttilio Rao * As first thing, handle deferred callout stops. 7466098e7acSKonstantin Belousov */ 7476098e7acSKonstantin Belousov if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) { 7486098e7acSKonstantin Belousov CTR3(KTR_CALLOUT, 7496098e7acSKonstantin Belousov "deferred cancelled %p func %p arg %p", 7506098e7acSKonstantin Belousov c, new_func, new_arg); 7516098e7acSKonstantin Belousov callout_cc_del(c, cc); 752eb8a7186SKonstantin Belousov return; 7536098e7acSKonstantin Belousov } 7546098e7acSKonstantin Belousov c->c_flags &= ~CALLOUT_DFRMIGRATION; 7556098e7acSKonstantin Belousov 7566098e7acSKonstantin Belousov new_cc = callout_cpu_switch(c, cc, new_cpu); 7575b999a6bSDavide Italiano flags = (direct) ? C_DIRECT_EXEC : 0; 7583f321a4eSDavide Italiano callout_cc_add(c, new_cc, new_time, new_prec, new_func, 7595b999a6bSDavide Italiano new_arg, new_cpu, flags); 7606098e7acSKonstantin Belousov CC_UNLOCK(new_cc); 7616098e7acSKonstantin Belousov CC_LOCK(cc); 7626098e7acSKonstantin Belousov #else 7636098e7acSKonstantin Belousov panic("migration should not happen"); 7646098e7acSKonstantin Belousov #endif 7656098e7acSKonstantin Belousov } 766eb8a7186SKonstantin Belousov /* 767eb8a7186SKonstantin Belousov * If the current callout is locally allocated (from 768eb8a7186SKonstantin Belousov * timeout(9)) then put it on the freelist. 769eb8a7186SKonstantin Belousov * 770eb8a7186SKonstantin Belousov * Note: we need to check the cached copy of c_flags because 771eb8a7186SKonstantin Belousov * if it was not local, then it's not safe to deref the 772eb8a7186SKonstantin Belousov * callout pointer. 773eb8a7186SKonstantin Belousov */ 774eb8a7186SKonstantin Belousov KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 || 775eb8a7186SKonstantin Belousov c->c_flags == CALLOUT_LOCAL_ALLOC, 776eb8a7186SKonstantin Belousov ("corrupted callout")); 777bdf9120cSAttilio Rao if (c_flags & CALLOUT_LOCAL_ALLOC) 778eb8a7186SKonstantin Belousov callout_cc_del(c, cc); 7796098e7acSKonstantin Belousov } 7806098e7acSKonstantin Belousov 781219d632cSMatthew Dillon /* 782ab36c067SJustin T. Gibbs * The callout mechanism is based on the work of Adam M. Costello and 783ab36c067SJustin T. Gibbs * George Varghese, published in a technical report entitled "Redesigning 784ab36c067SJustin T. Gibbs * the BSD Callout and Timer Facilities" and modified slightly for inclusion 785ab36c067SJustin T. Gibbs * in FreeBSD by Justin T. Gibbs. The original work on the data structures 786024035e8SHiten Pandya * used in this implementation was published by G. Varghese and T. Lauck in 787ab36c067SJustin T. Gibbs * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 788ab36c067SJustin T. Gibbs * the Efficient Implementation of a Timer Facility" in the Proceedings of 789ab36c067SJustin T. Gibbs * the 11th ACM Annual Symposium on Operating Systems Principles, 790ab36c067SJustin T. Gibbs * Austin, Texas Nov 1987. 791ab36c067SJustin T. Gibbs */ 792a50ec505SPoul-Henning Kamp 793ab36c067SJustin T. Gibbs /* 794df8bae1dSRodney W. Grimes * Software (low priority) clock interrupt. 795df8bae1dSRodney W. Grimes * Run periodic events from timeout queue. 796df8bae1dSRodney W. Grimes */ 797df8bae1dSRodney W. Grimes void 7988d809d50SJeff Roberson softclock(void *arg) 799df8bae1dSRodney W. Grimes { 8008d809d50SJeff Roberson struct callout_cpu *cc; 801b336df68SPoul-Henning Kamp struct callout *c; 8025b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 8035b999a6bSDavide Italiano int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0; 8045b999a6bSDavide Italiano #endif 805df8bae1dSRodney W. Grimes 8068d809d50SJeff Roberson cc = (struct callout_cpu *)arg; 8078d809d50SJeff Roberson CC_LOCK(cc); 8085b999a6bSDavide Italiano while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) { 8095b999a6bSDavide Italiano TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 8105b999a6bSDavide Italiano softclock_call_cc(c, cc, 8115b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 8125b999a6bSDavide Italiano &mpcalls, &lockcalls, &gcalls, 8135b999a6bSDavide Italiano #endif 8145b999a6bSDavide Italiano 0); 8155b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 8165b999a6bSDavide Italiano ++depth; 8175b999a6bSDavide Italiano #endif 818df8bae1dSRodney W. Grimes } 8195b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 82022ee8c4fSPoul-Henning Kamp avg_depth += (depth * 1000 - avg_depth) >> 8; 82122ee8c4fSPoul-Henning Kamp avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 82264b9ee20SAttilio Rao avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 82322ee8c4fSPoul-Henning Kamp avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 8245b999a6bSDavide Italiano #endif 8258d809d50SJeff Roberson CC_UNLOCK(cc); 826df8bae1dSRodney W. Grimes } 827df8bae1dSRodney W. Grimes 828df8bae1dSRodney W. Grimes /* 829df8bae1dSRodney W. Grimes * timeout -- 830df8bae1dSRodney W. Grimes * Execute a function after a specified length of time. 831df8bae1dSRodney W. Grimes * 832df8bae1dSRodney W. Grimes * untimeout -- 833df8bae1dSRodney W. Grimes * Cancel previous timeout function call. 834df8bae1dSRodney W. Grimes * 835ab36c067SJustin T. Gibbs * callout_handle_init -- 836ab36c067SJustin T. Gibbs * Initialize a handle so that using it with untimeout is benign. 837ab36c067SJustin T. Gibbs * 838df8bae1dSRodney W. Grimes * See AT&T BCI Driver Reference Manual for specification. This 839ab36c067SJustin T. Gibbs * implementation differs from that one in that although an 840ab36c067SJustin T. Gibbs * identification value is returned from timeout, the original 841ab36c067SJustin T. Gibbs * arguments to timeout as well as the identifier are used to 842ab36c067SJustin T. Gibbs * identify entries for untimeout. 843df8bae1dSRodney W. Grimes */ 844ab36c067SJustin T. Gibbs struct callout_handle 845ab36c067SJustin T. Gibbs timeout(ftn, arg, to_ticks) 8468f03c6f1SBruce Evans timeout_t *ftn; 847df8bae1dSRodney W. Grimes void *arg; 848e82ac18eSJonathan Lemon int to_ticks; 849df8bae1dSRodney W. Grimes { 8508d809d50SJeff Roberson struct callout_cpu *cc; 851ab36c067SJustin T. Gibbs struct callout *new; 852ab36c067SJustin T. Gibbs struct callout_handle handle; 853df8bae1dSRodney W. Grimes 8548d809d50SJeff Roberson cc = CC_CPU(timeout_cpu); 8558d809d50SJeff Roberson CC_LOCK(cc); 856df8bae1dSRodney W. Grimes /* Fill in the next free callout structure. */ 8578d809d50SJeff Roberson new = SLIST_FIRST(&cc->cc_callfree); 858ab36c067SJustin T. Gibbs if (new == NULL) 859ab36c067SJustin T. Gibbs /* XXX Attempt to malloc first */ 860df8bae1dSRodney W. Grimes panic("timeout table full"); 8618d809d50SJeff Roberson SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); 862acc8326dSGarrett Wollman callout_reset(new, to_ticks, ftn, arg); 863ab36c067SJustin T. Gibbs handle.callout = new; 8648d809d50SJeff Roberson CC_UNLOCK(cc); 8658d809d50SJeff Roberson 866ab36c067SJustin T. Gibbs return (handle); 867df8bae1dSRodney W. Grimes } 868df8bae1dSRodney W. Grimes 869df8bae1dSRodney W. Grimes void 870ab36c067SJustin T. Gibbs untimeout(ftn, arg, handle) 8718f03c6f1SBruce Evans timeout_t *ftn; 872df8bae1dSRodney W. Grimes void *arg; 873ab36c067SJustin T. Gibbs struct callout_handle handle; 874df8bae1dSRodney W. Grimes { 8758d809d50SJeff Roberson struct callout_cpu *cc; 876df8bae1dSRodney W. Grimes 877ab36c067SJustin T. Gibbs /* 878ab36c067SJustin T. Gibbs * Check for a handle that was initialized 879ab36c067SJustin T. Gibbs * by callout_handle_init, but never used 880ab36c067SJustin T. Gibbs * for a real timeout. 881ab36c067SJustin T. Gibbs */ 882ab36c067SJustin T. Gibbs if (handle.callout == NULL) 883ab36c067SJustin T. Gibbs return; 884df8bae1dSRodney W. Grimes 8858d809d50SJeff Roberson cc = callout_lock(handle.callout); 886acc8326dSGarrett Wollman if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 887acc8326dSGarrett Wollman callout_stop(handle.callout); 8888d809d50SJeff Roberson CC_UNLOCK(cc); 889df8bae1dSRodney W. Grimes } 890df8bae1dSRodney W. Grimes 8913c816944SBruce Evans void 892ab36c067SJustin T. Gibbs callout_handle_init(struct callout_handle *handle) 893ab36c067SJustin T. Gibbs { 894ab36c067SJustin T. Gibbs handle->callout = NULL; 895ab36c067SJustin T. Gibbs } 896ab36c067SJustin T. Gibbs 897acc8326dSGarrett Wollman /* 898acc8326dSGarrett Wollman * New interface; clients allocate their own callout structures. 899acc8326dSGarrett Wollman * 900acc8326dSGarrett Wollman * callout_reset() - establish or change a timeout 901acc8326dSGarrett Wollman * callout_stop() - disestablish a timeout 902acc8326dSGarrett Wollman * callout_init() - initialize a callout structure so that it can 903acc8326dSGarrett Wollman * safely be passed to callout_reset() and callout_stop() 904acc8326dSGarrett Wollman * 9059b8b58e0SJonathan Lemon * <sys/callout.h> defines three convenience macros: 906acc8326dSGarrett Wollman * 90786fd19deSColin Percival * callout_active() - returns truth if callout has not been stopped, 90886fd19deSColin Percival * drained, or deactivated since the last time the callout was 90986fd19deSColin Percival * reset. 9109b8b58e0SJonathan Lemon * callout_pending() - returns truth if callout is still waiting for timeout 9119b8b58e0SJonathan Lemon * callout_deactivate() - marks the callout as having been serviced 912acc8326dSGarrett Wollman */ 913d04304d1SGleb Smirnoff int 9145b999a6bSDavide Italiano callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision, 9155b999a6bSDavide Italiano void (*ftn)(void *), void *arg, int cpu, int flags) 916acc8326dSGarrett Wollman { 9175b999a6bSDavide Italiano sbintime_t to_sbt, pr; 9188d809d50SJeff Roberson struct callout_cpu *cc; 9195b999a6bSDavide Italiano int cancelled, direct; 920acc8326dSGarrett Wollman 9215b999a6bSDavide Italiano cancelled = 0; 9225b999a6bSDavide Italiano if (flags & C_ABSOLUTE) { 9235b999a6bSDavide Italiano to_sbt = sbt; 9245b999a6bSDavide Italiano } else { 9255b999a6bSDavide Italiano if ((flags & C_HARDCLOCK) && (sbt < tick_sbt)) 9265b999a6bSDavide Italiano sbt = tick_sbt; 9275b999a6bSDavide Italiano if ((flags & C_HARDCLOCK) || 9285b999a6bSDavide Italiano #ifdef NO_EVENTTIMERS 9295b999a6bSDavide Italiano sbt >= sbt_timethreshold) { 9305b999a6bSDavide Italiano to_sbt = getsbinuptime(); 9315b999a6bSDavide Italiano 9325b999a6bSDavide Italiano /* Add safety belt for the case of hz > 1000. */ 9335b999a6bSDavide Italiano to_sbt += tc_tick_sbt - tick_sbt; 9345b999a6bSDavide Italiano #else 9355b999a6bSDavide Italiano sbt >= sbt_tickthreshold) { 9365b999a6bSDavide Italiano /* 9375b999a6bSDavide Italiano * Obtain the time of the last hardclock() call on 9385b999a6bSDavide Italiano * this CPU directly from the kern_clocksource.c. 9395b999a6bSDavide Italiano * This value is per-CPU, but it is equal for all 9405b999a6bSDavide Italiano * active ones. 9415b999a6bSDavide Italiano */ 9425b999a6bSDavide Italiano #ifdef __LP64__ 9435b999a6bSDavide Italiano to_sbt = DPCPU_GET(hardclocktime); 9445b999a6bSDavide Italiano #else 9455b999a6bSDavide Italiano spinlock_enter(); 9465b999a6bSDavide Italiano to_sbt = DPCPU_GET(hardclocktime); 9475b999a6bSDavide Italiano spinlock_exit(); 9485b999a6bSDavide Italiano #endif 9495b999a6bSDavide Italiano #endif 9505b999a6bSDavide Italiano if ((flags & C_HARDCLOCK) == 0) 9515b999a6bSDavide Italiano to_sbt += tick_sbt; 9525b999a6bSDavide Italiano } else 9535b999a6bSDavide Italiano to_sbt = sbinuptime(); 954*1b0c144fSDavide Italiano if (INT64_MAX - to_sbt < sbt) 955*1b0c144fSDavide Italiano to_sbt = INT64_MAX; 956*1b0c144fSDavide Italiano else 9575b999a6bSDavide Italiano to_sbt += sbt; 9585b999a6bSDavide Italiano pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp : 9595b999a6bSDavide Italiano sbt >> C_PRELGET(flags)); 9605b999a6bSDavide Italiano if (pr > precision) 9615b999a6bSDavide Italiano precision = pr; 9625b999a6bSDavide Italiano } 9638d809d50SJeff Roberson /* 9648d809d50SJeff Roberson * Don't allow migration of pre-allocated callouts lest they 9658d809d50SJeff Roberson * become unbalanced. 9668d809d50SJeff Roberson */ 9678d809d50SJeff Roberson if (c->c_flags & CALLOUT_LOCAL_ALLOC) 9688d809d50SJeff Roberson cpu = c->c_cpu; 9695b999a6bSDavide Italiano direct = (c->c_flags & CALLOUT_DIRECT) != 0; 9705b999a6bSDavide Italiano KASSERT(!direct || c->c_lock == NULL, 9715b999a6bSDavide Italiano ("%s: direct callout %p has lock", __func__, c)); 9728d809d50SJeff Roberson cc = callout_lock(c); 9735b999a6bSDavide Italiano if (cc->cc_exec_entity[direct].cc_curr == c) { 9742c1bb207SColin Percival /* 9752c1bb207SColin Percival * We're being asked to reschedule a callout which is 97664b9ee20SAttilio Rao * currently in progress. If there is a lock then we 97798c926b2SIan Dowse * can cancel the callout if it has not really started. 97898c926b2SIan Dowse */ 9795b999a6bSDavide Italiano if (c->c_lock != NULL && !cc->cc_exec_entity[direct].cc_cancel) 980ac42a172SDavide Italiano cancelled = cc->cc_exec_entity[direct].cc_cancel = true; 9815b999a6bSDavide Italiano if (cc->cc_exec_entity[direct].cc_waiting) { 98298c926b2SIan Dowse /* 98398c926b2SIan Dowse * Someone has called callout_drain to kill this 98498c926b2SIan Dowse * callout. Don't reschedule. 9852c1bb207SColin Percival */ 98668a57ebfSGleb Smirnoff CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 98768a57ebfSGleb Smirnoff cancelled ? "cancelled" : "failed to cancel", 98868a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 9898d809d50SJeff Roberson CC_UNLOCK(cc); 990d04304d1SGleb Smirnoff return (cancelled); 99149a74476SColin Percival } 99298c926b2SIan Dowse } 9930413bacdSColin Percival if (c->c_flags & CALLOUT_PENDING) { 9945b999a6bSDavide Italiano if ((c->c_flags & CALLOUT_PROCESSED) == 0) { 9955b999a6bSDavide Italiano if (cc->cc_exec_next_dir == c) 9965b999a6bSDavide Italiano cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le); 9975b999a6bSDavide Italiano LIST_REMOVE(c, c_links.le); 9985b999a6bSDavide Italiano } else 9995b999a6bSDavide Italiano TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 1000d04304d1SGleb Smirnoff cancelled = 1; 10018d809d50SJeff Roberson c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 10028d809d50SJeff Roberson } 10031283e9cdSAttilio Rao 10041283e9cdSAttilio Rao #ifdef SMP 10050413bacdSColin Percival /* 10061283e9cdSAttilio Rao * If the callout must migrate try to perform it immediately. 10071283e9cdSAttilio Rao * If the callout is currently running, just defer the migration 10081283e9cdSAttilio Rao * to a more appropriate moment. 10090413bacdSColin Percival */ 10108d809d50SJeff Roberson if (c->c_cpu != cpu) { 10115b999a6bSDavide Italiano if (cc->cc_exec_entity[direct].cc_curr == c) { 10125b999a6bSDavide Italiano cc->cc_exec_entity[direct].ce_migration_cpu = cpu; 10135b999a6bSDavide Italiano cc->cc_exec_entity[direct].ce_migration_time 10145b999a6bSDavide Italiano = to_sbt; 10153f321a4eSDavide Italiano cc->cc_exec_entity[direct].ce_migration_prec 10163f321a4eSDavide Italiano = precision; 10175b999a6bSDavide Italiano cc->cc_exec_entity[direct].ce_migration_func = ftn; 10185b999a6bSDavide Italiano cc->cc_exec_entity[direct].ce_migration_arg = arg; 101957d07ca9SKonstantin Belousov c->c_flags |= CALLOUT_DFRMIGRATION; 10205b999a6bSDavide Italiano CTR6(KTR_CALLOUT, 10215b999a6bSDavide Italiano "migration of %p func %p arg %p in %d.%08x to %u deferred", 10225b999a6bSDavide Italiano c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 10235b999a6bSDavide Italiano (u_int)(to_sbt & 0xffffffff), cpu); 102408e4ac8aSAttilio Rao CC_UNLOCK(cc); 10251283e9cdSAttilio Rao return (cancelled); 1026a157e425SAlexander Motin } 10271283e9cdSAttilio Rao cc = callout_cpu_switch(c, cc, cpu); 102808e4ac8aSAttilio Rao } 10291283e9cdSAttilio Rao #endif 10301283e9cdSAttilio Rao 10315b999a6bSDavide Italiano callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags); 10325b999a6bSDavide Italiano CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x", 10335b999a6bSDavide Italiano cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 10345b999a6bSDavide Italiano (u_int)(to_sbt & 0xffffffff)); 10358d809d50SJeff Roberson CC_UNLOCK(cc); 1036d04304d1SGleb Smirnoff 1037d04304d1SGleb Smirnoff return (cancelled); 1038acc8326dSGarrett Wollman } 1039acc8326dSGarrett Wollman 10406e0186d5SSam Leffler /* 10416e0186d5SSam Leffler * Common idioms that can be optimized in the future. 10426e0186d5SSam Leffler */ 10436e0186d5SSam Leffler int 10446e0186d5SSam Leffler callout_schedule_on(struct callout *c, int to_ticks, int cpu) 10456e0186d5SSam Leffler { 10466e0186d5SSam Leffler return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 10476e0186d5SSam Leffler } 10486e0186d5SSam Leffler 10496e0186d5SSam Leffler int 10506e0186d5SSam Leffler callout_schedule(struct callout *c, int to_ticks) 10516e0186d5SSam Leffler { 10526e0186d5SSam Leffler return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 10536e0186d5SSam Leffler } 10546e0186d5SSam Leffler 10552c1bb207SColin Percival int 10562c1bb207SColin Percival _callout_stop_safe(c, safe) 10572c1bb207SColin Percival struct callout *c; 10582c1bb207SColin Percival int safe; 10592c1bb207SColin Percival { 10601283e9cdSAttilio Rao struct callout_cpu *cc, *old_cc; 106164b9ee20SAttilio Rao struct lock_class *class; 10625b999a6bSDavide Italiano int direct, sq_locked, use_lock; 106398c926b2SIan Dowse 106464b9ee20SAttilio Rao /* 106564b9ee20SAttilio Rao * Some old subsystems don't hold Giant while running a callout_stop(), 106664b9ee20SAttilio Rao * so just discard this check for the moment. 106764b9ee20SAttilio Rao */ 106864b9ee20SAttilio Rao if (!safe && c->c_lock != NULL) { 106964b9ee20SAttilio Rao if (c->c_lock == &Giant.lock_object) 107064b9ee20SAttilio Rao use_lock = mtx_owned(&Giant); 107164b9ee20SAttilio Rao else { 107264b9ee20SAttilio Rao use_lock = 1; 107364b9ee20SAttilio Rao class = LOCK_CLASS(c->c_lock); 107464b9ee20SAttilio Rao class->lc_assert(c->c_lock, LA_XLOCKED); 107598c926b2SIan Dowse } 107664b9ee20SAttilio Rao } else 107764b9ee20SAttilio Rao use_lock = 0; 10785b999a6bSDavide Italiano direct = (c->c_flags & CALLOUT_DIRECT) != 0; 107967b158d8SJohn Baldwin sq_locked = 0; 10801283e9cdSAttilio Rao old_cc = NULL; 108167b158d8SJohn Baldwin again: 10828d809d50SJeff Roberson cc = callout_lock(c); 10831283e9cdSAttilio Rao 10841283e9cdSAttilio Rao /* 10851283e9cdSAttilio Rao * If the callout was migrating while the callout cpu lock was 10861283e9cdSAttilio Rao * dropped, just drop the sleepqueue lock and check the states 10871283e9cdSAttilio Rao * again. 10881283e9cdSAttilio Rao */ 10891283e9cdSAttilio Rao if (sq_locked != 0 && cc != old_cc) { 10901283e9cdSAttilio Rao #ifdef SMP 10911283e9cdSAttilio Rao CC_UNLOCK(cc); 10925b999a6bSDavide Italiano sleepq_release(&old_cc->cc_exec_entity[direct].cc_waiting); 10931283e9cdSAttilio Rao sq_locked = 0; 10941283e9cdSAttilio Rao old_cc = NULL; 10951283e9cdSAttilio Rao goto again; 10961283e9cdSAttilio Rao #else 10971283e9cdSAttilio Rao panic("migration should not happen"); 10981283e9cdSAttilio Rao #endif 10991283e9cdSAttilio Rao } 11001283e9cdSAttilio Rao 1101acc8326dSGarrett Wollman /* 1102b36f4588SJohn Baldwin * If the callout isn't pending, it's not on the queue, so 1103b36f4588SJohn Baldwin * don't attempt to remove it from the queue. We can try to 1104b36f4588SJohn Baldwin * stop it by other means however. 1105acc8326dSGarrett Wollman */ 1106acc8326dSGarrett Wollman if (!(c->c_flags & CALLOUT_PENDING)) { 11079b8b58e0SJonathan Lemon c->c_flags &= ~CALLOUT_ACTIVE; 1108b36f4588SJohn Baldwin 1109b36f4588SJohn Baldwin /* 1110b36f4588SJohn Baldwin * If it wasn't on the queue and it isn't the current 1111b36f4588SJohn Baldwin * callout, then we can't stop it, so just bail. 1112b36f4588SJohn Baldwin */ 11135b999a6bSDavide Italiano if (cc->cc_exec_entity[direct].cc_curr != c) { 111468a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 111568a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 11168d809d50SJeff Roberson CC_UNLOCK(cc); 111767b158d8SJohn Baldwin if (sq_locked) 11185b999a6bSDavide Italiano sleepq_release( 11195b999a6bSDavide Italiano &cc->cc_exec_entity[direct].cc_waiting); 112098c926b2SIan Dowse return (0); 112198c926b2SIan Dowse } 1122b36f4588SJohn Baldwin 112398c926b2SIan Dowse if (safe) { 11242c1bb207SColin Percival /* 1125b36f4588SJohn Baldwin * The current callout is running (or just 1126b36f4588SJohn Baldwin * about to run) and blocking is allowed, so 1127b36f4588SJohn Baldwin * just wait for the current invocation to 1128b36f4588SJohn Baldwin * finish. 11292c1bb207SColin Percival */ 11305b999a6bSDavide Italiano while (cc->cc_exec_entity[direct].cc_curr == c) { 11316a0ce57dSAttilio Rao /* 11326a0ce57dSAttilio Rao * Use direct calls to sleepqueue interface 11336a0ce57dSAttilio Rao * instead of cv/msleep in order to avoid 11348d809d50SJeff Roberson * a LOR between cc_lock and sleepqueue 11356a0ce57dSAttilio Rao * chain spinlocks. This piece of code 11366a0ce57dSAttilio Rao * emulates a msleep_spin() call actually. 113767b158d8SJohn Baldwin * 113867b158d8SJohn Baldwin * If we already have the sleepqueue chain 113967b158d8SJohn Baldwin * locked, then we can safely block. If we 114067b158d8SJohn Baldwin * don't already have it locked, however, 11418d809d50SJeff Roberson * we have to drop the cc_lock to lock 114267b158d8SJohn Baldwin * it. This opens several races, so we 114367b158d8SJohn Baldwin * restart at the beginning once we have 114467b158d8SJohn Baldwin * both locks. If nothing has changed, then 114567b158d8SJohn Baldwin * we will end up back here with sq_locked 114667b158d8SJohn Baldwin * set. 11476a0ce57dSAttilio Rao */ 114867b158d8SJohn Baldwin if (!sq_locked) { 11498d809d50SJeff Roberson CC_UNLOCK(cc); 11505b999a6bSDavide Italiano sleepq_lock( 11515b999a6bSDavide Italiano &cc->cc_exec_entity[direct].cc_waiting); 115267b158d8SJohn Baldwin sq_locked = 1; 11531283e9cdSAttilio Rao old_cc = cc; 115467b158d8SJohn Baldwin goto again; 11556a0ce57dSAttilio Rao } 11561283e9cdSAttilio Rao 11571283e9cdSAttilio Rao /* 11581283e9cdSAttilio Rao * Migration could be cancelled here, but 11591283e9cdSAttilio Rao * as long as it is still not sure when it 11601283e9cdSAttilio Rao * will be packed up, just let softclock() 11611283e9cdSAttilio Rao * take care of it. 11621283e9cdSAttilio Rao */ 1163ac42a172SDavide Italiano cc->cc_exec_entity[direct].cc_waiting = true; 11646a0ce57dSAttilio Rao DROP_GIANT(); 11658d809d50SJeff Roberson CC_UNLOCK(cc); 11665b999a6bSDavide Italiano sleepq_add( 11675b999a6bSDavide Italiano &cc->cc_exec_entity[direct].cc_waiting, 11688d809d50SJeff Roberson &cc->cc_lock.lock_object, "codrain", 11696a0ce57dSAttilio Rao SLEEPQ_SLEEP, 0); 11705b999a6bSDavide Italiano sleepq_wait( 11715b999a6bSDavide Italiano &cc->cc_exec_entity[direct].cc_waiting, 11725b999a6bSDavide Italiano 0); 117367b158d8SJohn Baldwin sq_locked = 0; 11741283e9cdSAttilio Rao old_cc = NULL; 11756a0ce57dSAttilio Rao 11766a0ce57dSAttilio Rao /* Reacquire locks previously released. */ 11776a0ce57dSAttilio Rao PICKUP_GIANT(); 11788d809d50SJeff Roberson CC_LOCK(cc); 1179b36f4588SJohn Baldwin } 11805b999a6bSDavide Italiano } else if (use_lock && 11815b999a6bSDavide Italiano !cc->cc_exec_entity[direct].cc_cancel) { 1182b36f4588SJohn Baldwin /* 118364b9ee20SAttilio Rao * The current callout is waiting for its 118464b9ee20SAttilio Rao * lock which we hold. Cancel the callout 1185b36f4588SJohn Baldwin * and return. After our caller drops the 118664b9ee20SAttilio Rao * lock, the callout will be skipped in 1187b36f4588SJohn Baldwin * softclock(). 1188b36f4588SJohn Baldwin */ 1189ac42a172SDavide Italiano cc->cc_exec_entity[direct].cc_cancel = true; 119068a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 119168a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 11925b999a6bSDavide Italiano KASSERT(!cc_cce_migrating(cc, direct), 11931283e9cdSAttilio Rao ("callout wrongly scheduled for migration")); 11948d809d50SJeff Roberson CC_UNLOCK(cc); 119567b158d8SJohn Baldwin KASSERT(!sq_locked, ("sleepqueue chain locked")); 119698c926b2SIan Dowse return (1); 119757d07ca9SKonstantin Belousov } else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) { 119857d07ca9SKonstantin Belousov c->c_flags &= ~CALLOUT_DFRMIGRATION; 119957d07ca9SKonstantin Belousov CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", 120057d07ca9SKonstantin Belousov c, c->c_func, c->c_arg); 120157d07ca9SKonstantin Belousov CC_UNLOCK(cc); 120257d07ca9SKonstantin Belousov return (1); 1203b36f4588SJohn Baldwin } 120468a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 120568a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 12068d809d50SJeff Roberson CC_UNLOCK(cc); 120767b158d8SJohn Baldwin KASSERT(!sq_locked, ("sleepqueue chain still locked")); 1208a45982d2SJohn Baldwin return (0); 1209acc8326dSGarrett Wollman } 121067b158d8SJohn Baldwin if (sq_locked) 12115b999a6bSDavide Italiano sleepq_release(&cc->cc_exec_entity[direct].cc_waiting); 121267b158d8SJohn Baldwin 12139b8b58e0SJonathan Lemon c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 1214acc8326dSGarrett Wollman 121568a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 121668a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 12175b999a6bSDavide Italiano if ((c->c_flags & CALLOUT_PROCESSED) == 0) { 12185b999a6bSDavide Italiano if (cc->cc_exec_next_dir == c) 12195b999a6bSDavide Italiano cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le); 12205b999a6bSDavide Italiano LIST_REMOVE(c, c_links.le); 12215b999a6bSDavide Italiano } else 12225b999a6bSDavide Italiano TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 12236098e7acSKonstantin Belousov callout_cc_del(c, cc); 122468a57ebfSGleb Smirnoff 12258d809d50SJeff Roberson CC_UNLOCK(cc); 1226a45982d2SJohn Baldwin return (1); 1227acc8326dSGarrett Wollman } 1228acc8326dSGarrett Wollman 1229acc8326dSGarrett Wollman void 1230e82ac18eSJonathan Lemon callout_init(c, mpsafe) 1231acc8326dSGarrett Wollman struct callout *c; 1232e82ac18eSJonathan Lemon int mpsafe; 1233acc8326dSGarrett Wollman { 12347347e1c6SGarrett Wollman bzero(c, sizeof *c); 123598c926b2SIan Dowse if (mpsafe) { 123664b9ee20SAttilio Rao c->c_lock = NULL; 123798c926b2SIan Dowse c->c_flags = CALLOUT_RETURNUNLOCKED; 123898c926b2SIan Dowse } else { 123964b9ee20SAttilio Rao c->c_lock = &Giant.lock_object; 124098c926b2SIan Dowse c->c_flags = 0; 124198c926b2SIan Dowse } 12428d809d50SJeff Roberson c->c_cpu = timeout_cpu; 124398c926b2SIan Dowse } 124498c926b2SIan Dowse 124598c926b2SIan Dowse void 124664b9ee20SAttilio Rao _callout_init_lock(c, lock, flags) 124798c926b2SIan Dowse struct callout *c; 124864b9ee20SAttilio Rao struct lock_object *lock; 124998c926b2SIan Dowse int flags; 125098c926b2SIan Dowse { 125198c926b2SIan Dowse bzero(c, sizeof *c); 125264b9ee20SAttilio Rao c->c_lock = lock; 125364b9ee20SAttilio Rao KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 125464b9ee20SAttilio Rao ("callout_init_lock: bad flags %d", flags)); 125564b9ee20SAttilio Rao KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 125664b9ee20SAttilio Rao ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 125713ddf72dSAttilio Rao KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 125813ddf72dSAttilio Rao (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 125964b9ee20SAttilio Rao __func__)); 126064b9ee20SAttilio Rao c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 12618d809d50SJeff Roberson c->c_cpu = timeout_cpu; 1262acc8326dSGarrett Wollman } 1263acc8326dSGarrett Wollman 1264e1d6dc65SNate Williams #ifdef APM_FIXUP_CALLTODO 1265e1d6dc65SNate Williams /* 1266e1d6dc65SNate Williams * Adjust the kernel calltodo timeout list. This routine is used after 1267e1d6dc65SNate Williams * an APM resume to recalculate the calltodo timer list values with the 1268e1d6dc65SNate Williams * number of hz's we have been sleeping. The next hardclock() will detect 1269e1d6dc65SNate Williams * that there are fired timers and run softclock() to execute them. 1270e1d6dc65SNate Williams * 1271e1d6dc65SNate Williams * Please note, I have not done an exhaustive analysis of what code this 1272e1d6dc65SNate Williams * might break. I am motivated to have my select()'s and alarm()'s that 1273e1d6dc65SNate Williams * have expired during suspend firing upon resume so that the applications 1274e1d6dc65SNate Williams * which set the timer can do the maintanence the timer was for as close 1275e1d6dc65SNate Williams * as possible to the originally intended time. Testing this code for a 1276e1d6dc65SNate Williams * week showed that resuming from a suspend resulted in 22 to 25 timers 1277e1d6dc65SNate Williams * firing, which seemed independant on whether the suspend was 2 hours or 1278e1d6dc65SNate Williams * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 1279e1d6dc65SNate Williams */ 1280e1d6dc65SNate Williams void 1281e1d6dc65SNate Williams adjust_timeout_calltodo(time_change) 1282e1d6dc65SNate Williams struct timeval *time_change; 1283e1d6dc65SNate Williams { 1284e1d6dc65SNate Williams register struct callout *p; 1285e1d6dc65SNate Williams unsigned long delta_ticks; 1286e1d6dc65SNate Williams 1287e1d6dc65SNate Williams /* 1288e1d6dc65SNate Williams * How many ticks were we asleep? 1289c8b47828SBruce Evans * (stolen from tvtohz()). 1290e1d6dc65SNate Williams */ 1291e1d6dc65SNate Williams 1292e1d6dc65SNate Williams /* Don't do anything */ 1293e1d6dc65SNate Williams if (time_change->tv_sec < 0) 1294e1d6dc65SNate Williams return; 1295e1d6dc65SNate Williams else if (time_change->tv_sec <= LONG_MAX / 1000000) 1296e1d6dc65SNate Williams delta_ticks = (time_change->tv_sec * 1000000 + 1297e1d6dc65SNate Williams time_change->tv_usec + (tick - 1)) / tick + 1; 1298e1d6dc65SNate Williams else if (time_change->tv_sec <= LONG_MAX / hz) 1299e1d6dc65SNate Williams delta_ticks = time_change->tv_sec * hz + 1300e1d6dc65SNate Williams (time_change->tv_usec + (tick - 1)) / tick + 1; 1301e1d6dc65SNate Williams else 1302e1d6dc65SNate Williams delta_ticks = LONG_MAX; 1303e1d6dc65SNate Williams 1304e1d6dc65SNate Williams if (delta_ticks > INT_MAX) 1305e1d6dc65SNate Williams delta_ticks = INT_MAX; 1306e1d6dc65SNate Williams 1307e1d6dc65SNate Williams /* 1308e1d6dc65SNate Williams * Now rip through the timer calltodo list looking for timers 1309e1d6dc65SNate Williams * to expire. 1310e1d6dc65SNate Williams */ 1311e1d6dc65SNate Williams 1312e1d6dc65SNate Williams /* don't collide with softclock() */ 13138d809d50SJeff Roberson CC_LOCK(cc); 1314e1d6dc65SNate Williams for (p = calltodo.c_next; p != NULL; p = p->c_next) { 1315e1d6dc65SNate Williams p->c_time -= delta_ticks; 1316e1d6dc65SNate Williams 1317e1d6dc65SNate Williams /* Break if the timer had more time on it than delta_ticks */ 1318e1d6dc65SNate Williams if (p->c_time > 0) 1319e1d6dc65SNate Williams break; 1320e1d6dc65SNate Williams 1321e1d6dc65SNate Williams /* take back the ticks the timer didn't use (p->c_time <= 0) */ 1322e1d6dc65SNate Williams delta_ticks = -p->c_time; 1323e1d6dc65SNate Williams } 13248d809d50SJeff Roberson CC_UNLOCK(cc); 1325e1d6dc65SNate Williams 1326e1d6dc65SNate Williams return; 1327e1d6dc65SNate Williams } 1328e1d6dc65SNate Williams #endif /* APM_FIXUP_CALLTODO */ 13295b999a6bSDavide Italiano 13305b999a6bSDavide Italiano static int 13315b999a6bSDavide Italiano flssbt(sbintime_t sbt) 13325b999a6bSDavide Italiano { 13335b999a6bSDavide Italiano 13345b999a6bSDavide Italiano sbt += (uint64_t)sbt >> 1; 13355b999a6bSDavide Italiano if (sizeof(long) >= sizeof(sbintime_t)) 13365b999a6bSDavide Italiano return (flsl(sbt)); 13375b999a6bSDavide Italiano if (sbt >= SBT_1S) 13385b999a6bSDavide Italiano return (flsl(((uint64_t)sbt) >> 32) + 32); 13395b999a6bSDavide Italiano return (flsl(sbt)); 13405b999a6bSDavide Italiano } 13415b999a6bSDavide Italiano 13425b999a6bSDavide Italiano /* 13435b999a6bSDavide Italiano * Dump immediate statistic snapshot of the scheduled callouts. 13445b999a6bSDavide Italiano */ 13455b999a6bSDavide Italiano static int 13465b999a6bSDavide Italiano sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS) 13475b999a6bSDavide Italiano { 13485b999a6bSDavide Italiano struct callout *tmp; 13495b999a6bSDavide Italiano struct callout_cpu *cc; 13505b999a6bSDavide Italiano struct callout_list *sc; 13515b999a6bSDavide Italiano sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t; 13525b999a6bSDavide Italiano int ct[64], cpr[64], ccpbk[32]; 13535b999a6bSDavide Italiano int error, val, i, count, tcum, pcum, maxc, c, medc; 13545b999a6bSDavide Italiano #ifdef SMP 13555b999a6bSDavide Italiano int cpu; 13565b999a6bSDavide Italiano #endif 13575b999a6bSDavide Italiano 13585b999a6bSDavide Italiano val = 0; 13595b999a6bSDavide Italiano error = sysctl_handle_int(oidp, &val, 0, req); 13605b999a6bSDavide Italiano if (error != 0 || req->newptr == NULL) 13615b999a6bSDavide Italiano return (error); 13625b999a6bSDavide Italiano count = maxc = 0; 13635b999a6bSDavide Italiano st = spr = maxt = maxpr = 0; 13645b999a6bSDavide Italiano bzero(ccpbk, sizeof(ccpbk)); 13655b999a6bSDavide Italiano bzero(ct, sizeof(ct)); 13665b999a6bSDavide Italiano bzero(cpr, sizeof(cpr)); 13675b999a6bSDavide Italiano now = sbinuptime(); 13685b999a6bSDavide Italiano #ifdef SMP 13695b999a6bSDavide Italiano CPU_FOREACH(cpu) { 13705b999a6bSDavide Italiano cc = CC_CPU(cpu); 13715b999a6bSDavide Italiano #else 13725b999a6bSDavide Italiano cc = CC_CPU(timeout_cpu); 13735b999a6bSDavide Italiano #endif 13745b999a6bSDavide Italiano CC_LOCK(cc); 13755b999a6bSDavide Italiano for (i = 0; i < callwheelsize; i++) { 13765b999a6bSDavide Italiano sc = &cc->cc_callwheel[i]; 13775b999a6bSDavide Italiano c = 0; 13785b999a6bSDavide Italiano LIST_FOREACH(tmp, sc, c_links.le) { 13795b999a6bSDavide Italiano c++; 13805b999a6bSDavide Italiano t = tmp->c_time - now; 13815b999a6bSDavide Italiano if (t < 0) 13825b999a6bSDavide Italiano t = 0; 13835b999a6bSDavide Italiano st += t / SBT_1US; 13845b999a6bSDavide Italiano spr += tmp->c_precision / SBT_1US; 13855b999a6bSDavide Italiano if (t > maxt) 13865b999a6bSDavide Italiano maxt = t; 13875b999a6bSDavide Italiano if (tmp->c_precision > maxpr) 13885b999a6bSDavide Italiano maxpr = tmp->c_precision; 13895b999a6bSDavide Italiano ct[flssbt(t)]++; 13905b999a6bSDavide Italiano cpr[flssbt(tmp->c_precision)]++; 13915b999a6bSDavide Italiano } 13925b999a6bSDavide Italiano if (c > maxc) 13935b999a6bSDavide Italiano maxc = c; 13945b999a6bSDavide Italiano ccpbk[fls(c + c / 2)]++; 13955b999a6bSDavide Italiano count += c; 13965b999a6bSDavide Italiano } 13975b999a6bSDavide Italiano CC_UNLOCK(cc); 13985b999a6bSDavide Italiano #ifdef SMP 13995b999a6bSDavide Italiano } 14005b999a6bSDavide Italiano #endif 14015b999a6bSDavide Italiano 14025b999a6bSDavide Italiano for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++) 14035b999a6bSDavide Italiano tcum += ct[i]; 14045b999a6bSDavide Italiano medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 14055b999a6bSDavide Italiano for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++) 14065b999a6bSDavide Italiano pcum += cpr[i]; 14075b999a6bSDavide Italiano medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 14085b999a6bSDavide Italiano for (i = 0, c = 0; i < 32 && c < count / 2; i++) 14095b999a6bSDavide Italiano c += ccpbk[i]; 14105b999a6bSDavide Italiano medc = (i >= 2) ? (1 << (i - 2)) : 0; 14115b999a6bSDavide Italiano 14125b999a6bSDavide Italiano printf("Scheduled callouts statistic snapshot:\n"); 14135b999a6bSDavide Italiano printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n", 14145b999a6bSDavide Italiano count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT); 14155b999a6bSDavide Italiano printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n", 14165b999a6bSDavide Italiano medc, 14175b999a6bSDavide Italiano count / callwheelsize / mp_ncpus, 14185b999a6bSDavide Italiano (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000, 14195b999a6bSDavide Italiano maxc); 14205b999a6bSDavide Italiano printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 14215b999a6bSDavide Italiano medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32, 14225b999a6bSDavide Italiano (st / count) / 1000000, (st / count) % 1000000, 14235b999a6bSDavide Italiano maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32); 14245b999a6bSDavide Italiano printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 14255b999a6bSDavide Italiano medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32, 14265b999a6bSDavide Italiano (spr / count) / 1000000, (spr / count) % 1000000, 14275b999a6bSDavide Italiano maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32); 14285b999a6bSDavide Italiano printf(" Distribution: \tbuckets\t time\t tcum\t" 14295b999a6bSDavide Italiano " prec\t pcum\n"); 14305b999a6bSDavide Italiano for (i = 0, tcum = pcum = 0; i < 64; i++) { 14315b999a6bSDavide Italiano if (ct[i] == 0 && cpr[i] == 0) 14325b999a6bSDavide Italiano continue; 14335b999a6bSDavide Italiano t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0; 14345b999a6bSDavide Italiano tcum += ct[i]; 14355b999a6bSDavide Italiano pcum += cpr[i]; 14365b999a6bSDavide Italiano printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n", 14375b999a6bSDavide Italiano t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32, 14385b999a6bSDavide Italiano i - 1 - (32 - CC_HASH_SHIFT), 14395b999a6bSDavide Italiano ct[i], tcum, cpr[i], pcum); 14405b999a6bSDavide Italiano } 14415b999a6bSDavide Italiano return (error); 14425b999a6bSDavide Italiano } 14435b999a6bSDavide Italiano SYSCTL_PROC(_kern, OID_AUTO, callout_stat, 14445b999a6bSDavide Italiano CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 14455b999a6bSDavide Italiano 0, 0, sysctl_kern_callout_stat, "I", 14465b999a6bSDavide Italiano "Dump immediate statistic snapshot of the scheduled callouts"); 1447