1df8bae1dSRodney W. Grimes /*- 2*51369649SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause 3*51369649SPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1982, 1986, 1991, 1993 5df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 6df8bae1dSRodney W. Grimes * (c) UNIX System Laboratories, Inc. 7df8bae1dSRodney W. Grimes * All or some portions of this file are derived from material licensed 8df8bae1dSRodney W. Grimes * to the University of California by American Telephone and Telegraph 9df8bae1dSRodney W. Grimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10df8bae1dSRodney W. Grimes * the permission of UNIX System Laboratories, Inc. 11df8bae1dSRodney W. Grimes * 12df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 13df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 14df8bae1dSRodney W. Grimes * are met: 15df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 17df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 19df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 2069a28758SEd Maste * 3. Neither the name of the University nor the names of its contributors 21df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 22df8bae1dSRodney W. Grimes * without specific prior written permission. 23df8bae1dSRodney W. Grimes * 24df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34df8bae1dSRodney W. Grimes * SUCH DAMAGE. 35df8bae1dSRodney W. Grimes * 36acc8326dSGarrett Wollman * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 37df8bae1dSRodney W. Grimes */ 38df8bae1dSRodney W. Grimes 39677b542eSDavid E. O'Brien #include <sys/cdefs.h> 40677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 41677b542eSDavid E. O'Brien 425b999a6bSDavide Italiano #include "opt_callout_profiling.h" 433af72c11SBjoern A. Zeeb #include "opt_ddb.h" 445b999a6bSDavide Italiano #if defined(__arm__) 455b999a6bSDavide Italiano #include "opt_timer.h" 465b999a6bSDavide Italiano #endif 47c445c3c7SAdrian Chadd #include "opt_rss.h" 4891dd9aaeSRobert Watson 49df8bae1dSRodney W. Grimes #include <sys/param.h> 50df8bae1dSRodney W. Grimes #include <sys/systm.h> 518d809d50SJeff Roberson #include <sys/bus.h> 5215b7a470SPoul-Henning Kamp #include <sys/callout.h> 53f8ccf82aSAndre Oppermann #include <sys/file.h> 548d809d50SJeff Roberson #include <sys/interrupt.h> 55df8bae1dSRodney W. Grimes #include <sys/kernel.h> 56ff7ec58aSRobert Watson #include <sys/ktr.h> 57f34fa851SJohn Baldwin #include <sys/lock.h> 588d809d50SJeff Roberson #include <sys/malloc.h> 59cb799bfeSJohn Baldwin #include <sys/mutex.h> 6021f9e816SJohn Baldwin #include <sys/proc.h> 6191dd9aaeSRobert Watson #include <sys/sdt.h> 626a0ce57dSAttilio Rao #include <sys/sleepqueue.h> 6322ee8c4fSPoul-Henning Kamp #include <sys/sysctl.h> 648d809d50SJeff Roberson #include <sys/smp.h> 65df8bae1dSRodney W. Grimes 663af72c11SBjoern A. Zeeb #ifdef DDB 673af72c11SBjoern A. Zeeb #include <ddb/ddb.h> 683af72c11SBjoern A. Zeeb #include <machine/_inttypes.h> 693af72c11SBjoern A. Zeeb #endif 703af72c11SBjoern A. Zeeb 711283e9cdSAttilio Rao #ifdef SMP 721283e9cdSAttilio Rao #include <machine/cpu.h> 731283e9cdSAttilio Rao #endif 741283e9cdSAttilio Rao 755b999a6bSDavide Italiano #ifndef NO_EVENTTIMERS 765b999a6bSDavide Italiano DPCPU_DECLARE(sbintime_t, hardclocktime); 775b999a6bSDavide Italiano #endif 785b999a6bSDavide Italiano 7991dd9aaeSRobert Watson SDT_PROVIDER_DEFINE(callout_execute); 8036160958SMark Johnston SDT_PROBE_DEFINE1(callout_execute, , , callout__start, "struct callout *"); 8136160958SMark Johnston SDT_PROBE_DEFINE1(callout_execute, , , callout__end, "struct callout *"); 8291dd9aaeSRobert Watson 835b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 8422ee8c4fSPoul-Henning Kamp static int avg_depth; 8522ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 8622ee8c4fSPoul-Henning Kamp "Average number of items examined per softclock call. Units = 1/1000"); 8722ee8c4fSPoul-Henning Kamp static int avg_gcalls; 8822ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 8922ee8c4fSPoul-Henning Kamp "Average number of Giant callouts made per softclock call. Units = 1/1000"); 9064b9ee20SAttilio Rao static int avg_lockcalls; 9164b9ee20SAttilio Rao SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 9264b9ee20SAttilio Rao "Average number of lock callouts made per softclock call. Units = 1/1000"); 9322ee8c4fSPoul-Henning Kamp static int avg_mpcalls; 9422ee8c4fSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 9522ee8c4fSPoul-Henning Kamp "Average number of MP callouts made per softclock call. Units = 1/1000"); 965b999a6bSDavide Italiano static int avg_depth_dir; 975b999a6bSDavide Italiano SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0, 985b999a6bSDavide Italiano "Average number of direct callouts examined per callout_process call. " 995b999a6bSDavide Italiano "Units = 1/1000"); 1005b999a6bSDavide Italiano static int avg_lockcalls_dir; 1015b999a6bSDavide Italiano SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD, 1025b999a6bSDavide Italiano &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per " 1035b999a6bSDavide Italiano "callout_process call. Units = 1/1000"); 1045b999a6bSDavide Italiano static int avg_mpcalls_dir; 1055b999a6bSDavide Italiano SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir, 1065b999a6bSDavide Italiano 0, "Average number of MP direct callouts made per callout_process call. " 1075b999a6bSDavide Italiano "Units = 1/1000"); 1085b999a6bSDavide Italiano #endif 109f8ccf82aSAndre Oppermann 110f8ccf82aSAndre Oppermann static int ncallout; 111af3b2549SHans Petter Selasky SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &ncallout, 0, 112f8ccf82aSAndre Oppermann "Number of entries in callwheel and size of timeout() preallocation"); 113f8ccf82aSAndre Oppermann 114c445c3c7SAdrian Chadd #ifdef RSS 115c445c3c7SAdrian Chadd static int pin_default_swi = 1; 116c445c3c7SAdrian Chadd static int pin_pcpu_swi = 1; 117c445c3c7SAdrian Chadd #else 118ac75ee9fSAdrian Chadd static int pin_default_swi = 0; 119ac75ee9fSAdrian Chadd static int pin_pcpu_swi = 0; 120c445c3c7SAdrian Chadd #endif 121ac75ee9fSAdrian Chadd 122af3b2549SHans Petter Selasky SYSCTL_INT(_kern, OID_AUTO, pin_default_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_default_swi, 123ac75ee9fSAdrian Chadd 0, "Pin the default (non-per-cpu) swi (shared with PCPU 0 swi)"); 124af3b2549SHans Petter Selasky SYSCTL_INT(_kern, OID_AUTO, pin_pcpu_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_pcpu_swi, 125ac75ee9fSAdrian Chadd 0, "Pin the per-CPU swis (except PCPU 0, which is also default"); 126ac75ee9fSAdrian Chadd 12715b7a470SPoul-Henning Kamp /* 12815b7a470SPoul-Henning Kamp * TODO: 12915b7a470SPoul-Henning Kamp * allocate more timeout table slots when table overflows. 13015b7a470SPoul-Henning Kamp */ 1313f555c45SDavide Italiano u_int callwheelsize, callwheelmask; 132f23b4c91SGarrett Wollman 13320c510f8SLuigi Rizzo /* 134a115fb62SHans Petter Selasky * The callout cpu exec entities represent informations necessary for 135a115fb62SHans Petter Selasky * describing the state of callouts currently running on the CPU and the ones 136a115fb62SHans Petter Selasky * necessary for migrating callouts to the new callout cpu. In particular, 137a115fb62SHans Petter Selasky * the first entry of the array cc_exec_entity holds informations for callout 138a115fb62SHans Petter Selasky * running in SWI thread context, while the second one holds informations 139a115fb62SHans Petter Selasky * for callout running directly from hardware interrupt context. 140a115fb62SHans Petter Selasky * The cached informations are very important for deferring migration when 141a115fb62SHans Petter Selasky * the migrating callout is already running. 1421283e9cdSAttilio Rao */ 1435b999a6bSDavide Italiano struct cc_exec { 1445b999a6bSDavide Italiano struct callout *cc_curr; 14518b4fd62SRandall Stewart void (*cc_drain)(void *); 146a115fb62SHans Petter Selasky #ifdef SMP 147a115fb62SHans Petter Selasky void (*ce_migration_func)(void *); 148a115fb62SHans Petter Selasky void *ce_migration_arg; 149a115fb62SHans Petter Selasky int ce_migration_cpu; 150a115fb62SHans Petter Selasky sbintime_t ce_migration_time; 151a115fb62SHans Petter Selasky sbintime_t ce_migration_prec; 152a115fb62SHans Petter Selasky #endif 153a4a3ce99SDavide Italiano bool cc_cancel; 154a115fb62SHans Petter Selasky bool cc_waiting; 1551283e9cdSAttilio Rao }; 1561283e9cdSAttilio Rao 1571283e9cdSAttilio Rao /* 158a115fb62SHans Petter Selasky * There is one struct callout_cpu per cpu, holding all relevant 15920c510f8SLuigi Rizzo * state for the callout processing thread on the individual CPU. 16020c510f8SLuigi Rizzo */ 1618d809d50SJeff Roberson struct callout_cpu { 1624ceaf45dSAttilio Rao struct mtx_padalign cc_lock; 1635b999a6bSDavide Italiano struct cc_exec cc_exec_entity[2]; 16466525b2dSRandall Stewart struct callout *cc_next; 1658d809d50SJeff Roberson struct callout *cc_callout; 1665b999a6bSDavide Italiano struct callout_list *cc_callwheel; 1675b999a6bSDavide Italiano struct callout_tailq cc_expireq; 1685b999a6bSDavide Italiano struct callout_slist cc_callfree; 1695b999a6bSDavide Italiano sbintime_t cc_firstevent; 1705b999a6bSDavide Italiano sbintime_t cc_lastscan; 1718d809d50SJeff Roberson void *cc_cookie; 1725b999a6bSDavide Italiano u_int cc_bucket; 17315b1eb14SRandall Stewart u_int cc_inited; 174232e8b52SJohn Baldwin char cc_ktr_event_name[20]; 1758d809d50SJeff Roberson }; 1768d809d50SJeff Roberson 177403df7a6SRandall Stewart #define callout_migrating(c) ((c)->c_iflags & CALLOUT_DFRMIGRATION) 178403df7a6SRandall Stewart 179d2854fa4SRandall Stewart #define cc_exec_curr(cc, dir) cc->cc_exec_entity[dir].cc_curr 18018b4fd62SRandall Stewart #define cc_exec_drain(cc, dir) cc->cc_exec_entity[dir].cc_drain 18166525b2dSRandall Stewart #define cc_exec_next(cc) cc->cc_next 182d2854fa4SRandall Stewart #define cc_exec_cancel(cc, dir) cc->cc_exec_entity[dir].cc_cancel 183d2854fa4SRandall Stewart #define cc_exec_waiting(cc, dir) cc->cc_exec_entity[dir].cc_waiting 1848d809d50SJeff Roberson #ifdef SMP 185d2854fa4SRandall Stewart #define cc_migration_func(cc, dir) cc->cc_exec_entity[dir].ce_migration_func 186d2854fa4SRandall Stewart #define cc_migration_arg(cc, dir) cc->cc_exec_entity[dir].ce_migration_arg 187d2854fa4SRandall Stewart #define cc_migration_cpu(cc, dir) cc->cc_exec_entity[dir].ce_migration_cpu 188d2854fa4SRandall Stewart #define cc_migration_time(cc, dir) cc->cc_exec_entity[dir].ce_migration_time 189d2854fa4SRandall Stewart #define cc_migration_prec(cc, dir) cc->cc_exec_entity[dir].ce_migration_prec 190a115fb62SHans Petter Selasky 1918d809d50SJeff Roberson struct callout_cpu cc_cpu[MAXCPU]; 1921283e9cdSAttilio Rao #define CPUBLOCK MAXCPU 1938d809d50SJeff Roberson #define CC_CPU(cpu) (&cc_cpu[(cpu)]) 1948d809d50SJeff Roberson #define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 1958d809d50SJeff Roberson #else 1968d809d50SJeff Roberson struct callout_cpu cc_cpu; 1978d809d50SJeff Roberson #define CC_CPU(cpu) &cc_cpu 1988d809d50SJeff Roberson #define CC_SELF() &cc_cpu 1998d809d50SJeff Roberson #endif 2008d809d50SJeff Roberson #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 2018d809d50SJeff Roberson #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 2021283e9cdSAttilio Rao #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) 2038d809d50SJeff Roberson 2048d809d50SJeff Roberson static int timeout_cpu; 2055b999a6bSDavide Italiano 206232e8b52SJohn Baldwin static void callout_cpu_init(struct callout_cpu *cc, int cpu); 2075b999a6bSDavide Italiano static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, 2085b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 2095b999a6bSDavide Italiano int *mpcalls, int *lockcalls, int *gcalls, 2105b999a6bSDavide Italiano #endif 2115b999a6bSDavide Italiano int direct); 2128d809d50SJeff Roberson 213d745c852SEd Schouten static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 21449a74476SColin Percival 215a115fb62SHans Petter Selasky /** 216a115fb62SHans Petter Selasky * Locked by cc_lock: 217a115fb62SHans Petter Selasky * cc_curr - If a callout is in progress, it is cc_curr. 218a115fb62SHans Petter Selasky * If cc_curr is non-NULL, threads waiting in 219a115fb62SHans Petter Selasky * callout_drain() will be woken up as soon as the 220a115fb62SHans Petter Selasky * relevant callout completes. 221a115fb62SHans Petter Selasky * cc_cancel - Changing to 1 with both callout_lock and cc_lock held 222a115fb62SHans Petter Selasky * guarantees that the current callout will not run. 223a115fb62SHans Petter Selasky * The softclock() function sets this to 0 before it 224a115fb62SHans Petter Selasky * drops callout_lock to acquire c_lock, and it calls 225a115fb62SHans Petter Selasky * the handler only if curr_cancelled is still 0 after 226a115fb62SHans Petter Selasky * cc_lock is successfully acquired. 227a115fb62SHans Petter Selasky * cc_waiting - If a thread is waiting in callout_drain(), then 228a115fb62SHans Petter Selasky * callout_wait is nonzero. Set only when 229a115fb62SHans Petter Selasky * cc_curr is non-NULL. 230a115fb62SHans Petter Selasky */ 231a115fb62SHans Petter Selasky 232df8bae1dSRodney W. Grimes /* 233a115fb62SHans Petter Selasky * Resets the execution entity tied to a specific callout cpu. 234a115fb62SHans Petter Selasky */ 235a115fb62SHans Petter Selasky static void 236a115fb62SHans Petter Selasky cc_cce_cleanup(struct callout_cpu *cc, int direct) 237a115fb62SHans Petter Selasky { 238a115fb62SHans Petter Selasky 239d2854fa4SRandall Stewart cc_exec_curr(cc, direct) = NULL; 240d2854fa4SRandall Stewart cc_exec_cancel(cc, direct) = false; 241d2854fa4SRandall Stewart cc_exec_waiting(cc, direct) = false; 242a115fb62SHans Petter Selasky #ifdef SMP 243d2854fa4SRandall Stewart cc_migration_cpu(cc, direct) = CPUBLOCK; 244d2854fa4SRandall Stewart cc_migration_time(cc, direct) = 0; 245d2854fa4SRandall Stewart cc_migration_prec(cc, direct) = 0; 246d2854fa4SRandall Stewart cc_migration_func(cc, direct) = NULL; 247d2854fa4SRandall Stewart cc_migration_arg(cc, direct) = NULL; 248a115fb62SHans Petter Selasky #endif 249a115fb62SHans Petter Selasky } 250a115fb62SHans Petter Selasky 251a115fb62SHans Petter Selasky /* 252a115fb62SHans Petter Selasky * Checks if migration is requested by a specific callout cpu. 253a115fb62SHans Petter Selasky */ 254a115fb62SHans Petter Selasky static int 255a115fb62SHans Petter Selasky cc_cce_migrating(struct callout_cpu *cc, int direct) 256a115fb62SHans Petter Selasky { 257a115fb62SHans Petter Selasky 258a115fb62SHans Petter Selasky #ifdef SMP 259d2854fa4SRandall Stewart return (cc_migration_cpu(cc, direct) != CPUBLOCK); 260a115fb62SHans Petter Selasky #else 261a115fb62SHans Petter Selasky return (0); 262a115fb62SHans Petter Selasky #endif 263a115fb62SHans Petter Selasky } 264a115fb62SHans Petter Selasky 265a115fb62SHans Petter Selasky /* 266a115fb62SHans Petter Selasky * Kernel low level callwheel initialization 267a115fb62SHans Petter Selasky * called on cpu0 during kernel startup. 268219d632cSMatthew Dillon */ 26915ae0c9aSAndre Oppermann static void 27015ae0c9aSAndre Oppermann callout_callwheel_init(void *dummy) 271219d632cSMatthew Dillon { 2728d809d50SJeff Roberson struct callout_cpu *cc; 2738d809d50SJeff Roberson 274f8ccf82aSAndre Oppermann /* 275f8ccf82aSAndre Oppermann * Calculate the size of the callout wheel and the preallocated 276f8ccf82aSAndre Oppermann * timeout() structures. 277a7aea132SAndre Oppermann * XXX: Clip callout to result of previous function of maxusers 278a7aea132SAndre Oppermann * maximum 384. This is still huge, but acceptable. 279f8ccf82aSAndre Oppermann */ 280a04d4122SBjoern A. Zeeb memset(CC_CPU(0), 0, sizeof(cc_cpu)); 281f8ccf82aSAndre Oppermann ncallout = imin(16 + maxproc + maxfiles, 18508); 282f8ccf82aSAndre Oppermann TUNABLE_INT_FETCH("kern.ncallout", &ncallout); 283f8ccf82aSAndre Oppermann 284219d632cSMatthew Dillon /* 285922314f0SAlfred Perlstein * Calculate callout wheel size, should be next power of two higher 286922314f0SAlfred Perlstein * than 'ncallout'. 287219d632cSMatthew Dillon */ 288922314f0SAlfred Perlstein callwheelsize = 1 << fls(ncallout); 289219d632cSMatthew Dillon callwheelmask = callwheelsize - 1; 290219d632cSMatthew Dillon 29115ae0c9aSAndre Oppermann /* 292ac75ee9fSAdrian Chadd * Fetch whether we're pinning the swi's or not. 293ac75ee9fSAdrian Chadd */ 294ac75ee9fSAdrian Chadd TUNABLE_INT_FETCH("kern.pin_default_swi", &pin_default_swi); 295ac75ee9fSAdrian Chadd TUNABLE_INT_FETCH("kern.pin_pcpu_swi", &pin_pcpu_swi); 296ac75ee9fSAdrian Chadd 297ac75ee9fSAdrian Chadd /* 29815ae0c9aSAndre Oppermann * Only cpu0 handles timeout(9) and receives a preallocation. 29915ae0c9aSAndre Oppermann * 30015ae0c9aSAndre Oppermann * XXX: Once all timeout(9) consumers are converted this can 30115ae0c9aSAndre Oppermann * be removed. 30215ae0c9aSAndre Oppermann */ 30315ae0c9aSAndre Oppermann timeout_cpu = PCPU_GET(cpuid); 30415ae0c9aSAndre Oppermann cc = CC_CPU(timeout_cpu); 30515ae0c9aSAndre Oppermann cc->cc_callout = malloc(ncallout * sizeof(struct callout), 30615ae0c9aSAndre Oppermann M_CALLOUT, M_WAITOK); 307232e8b52SJohn Baldwin callout_cpu_init(cc, timeout_cpu); 308219d632cSMatthew Dillon } 30915ae0c9aSAndre Oppermann SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL); 310219d632cSMatthew Dillon 31115ae0c9aSAndre Oppermann /* 31215ae0c9aSAndre Oppermann * Initialize the per-cpu callout structures. 31315ae0c9aSAndre Oppermann */ 3148d809d50SJeff Roberson static void 315232e8b52SJohn Baldwin callout_cpu_init(struct callout_cpu *cc, int cpu) 3168d809d50SJeff Roberson { 3178d809d50SJeff Roberson struct callout *c; 3188d809d50SJeff Roberson int i; 3198d809d50SJeff Roberson 3208d809d50SJeff Roberson mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 3218d809d50SJeff Roberson SLIST_INIT(&cc->cc_callfree); 32215b1eb14SRandall Stewart cc->cc_inited = 1; 323c5904471SDavide Italiano cc->cc_callwheel = malloc(sizeof(struct callout_list) * callwheelsize, 32415ae0c9aSAndre Oppermann M_CALLOUT, M_WAITOK); 3255b999a6bSDavide Italiano for (i = 0; i < callwheelsize; i++) 3265b999a6bSDavide Italiano LIST_INIT(&cc->cc_callwheel[i]); 3275b999a6bSDavide Italiano TAILQ_INIT(&cc->cc_expireq); 3284bc38a5aSDavide Italiano cc->cc_firstevent = SBT_MAX; 329a115fb62SHans Petter Selasky for (i = 0; i < 2; i++) 330a115fb62SHans Petter Selasky cc_cce_cleanup(cc, i); 331232e8b52SJohn Baldwin snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name), 332232e8b52SJohn Baldwin "callwheel cpu %d", cpu); 33315ae0c9aSAndre Oppermann if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */ 3348d809d50SJeff Roberson return; 3358d809d50SJeff Roberson for (i = 0; i < ncallout; i++) { 3368d809d50SJeff Roberson c = &cc->cc_callout[i]; 3378d809d50SJeff Roberson callout_init(c, 0); 33815b1eb14SRandall Stewart c->c_iflags = CALLOUT_LOCAL_ALLOC; 3398d809d50SJeff Roberson SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 3408d809d50SJeff Roberson } 3418d809d50SJeff Roberson } 3428d809d50SJeff Roberson 343a115fb62SHans Petter Selasky #ifdef SMP 344a115fb62SHans Petter Selasky /* 345a115fb62SHans Petter Selasky * Switches the cpu tied to a specific callout. 346a115fb62SHans Petter Selasky * The function expects a locked incoming callout cpu and returns with 347a115fb62SHans Petter Selasky * locked outcoming callout cpu. 348a115fb62SHans Petter Selasky */ 349a115fb62SHans Petter Selasky static struct callout_cpu * 350a115fb62SHans Petter Selasky callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) 351a115fb62SHans Petter Selasky { 352a115fb62SHans Petter Selasky struct callout_cpu *new_cc; 353a115fb62SHans Petter Selasky 354a115fb62SHans Petter Selasky MPASS(c != NULL && cc != NULL); 355a115fb62SHans Petter Selasky CC_LOCK_ASSERT(cc); 356a115fb62SHans Petter Selasky 357a115fb62SHans Petter Selasky /* 358a115fb62SHans Petter Selasky * Avoid interrupts and preemption firing after the callout cpu 359a115fb62SHans Petter Selasky * is blocked in order to avoid deadlocks as the new thread 360a115fb62SHans Petter Selasky * may be willing to acquire the callout cpu lock. 361a115fb62SHans Petter Selasky */ 362a115fb62SHans Petter Selasky c->c_cpu = CPUBLOCK; 363a115fb62SHans Petter Selasky spinlock_enter(); 364a115fb62SHans Petter Selasky CC_UNLOCK(cc); 365a115fb62SHans Petter Selasky new_cc = CC_CPU(new_cpu); 366a115fb62SHans Petter Selasky CC_LOCK(new_cc); 367a115fb62SHans Petter Selasky spinlock_exit(); 368a115fb62SHans Petter Selasky c->c_cpu = new_cpu; 369a115fb62SHans Petter Selasky return (new_cc); 370a115fb62SHans Petter Selasky } 371a115fb62SHans Petter Selasky #endif 372a115fb62SHans Petter Selasky 373219d632cSMatthew Dillon /* 3748d809d50SJeff Roberson * Start standard softclock thread. 3758d809d50SJeff Roberson */ 3768d809d50SJeff Roberson static void 3778d809d50SJeff Roberson start_softclock(void *dummy) 3788d809d50SJeff Roberson { 3798d809d50SJeff Roberson struct callout_cpu *cc; 380f44e2a4cSAdrian Chadd char name[MAXCOMLEN]; 3818d809d50SJeff Roberson #ifdef SMP 3828d809d50SJeff Roberson int cpu; 383ac75ee9fSAdrian Chadd struct intr_event *ie; 3848d809d50SJeff Roberson #endif 3858d809d50SJeff Roberson 3868d809d50SJeff Roberson cc = CC_CPU(timeout_cpu); 387f44e2a4cSAdrian Chadd snprintf(name, sizeof(name), "clock (%d)", timeout_cpu); 388f44e2a4cSAdrian Chadd if (swi_add(&clk_intr_event, name, softclock, cc, SWI_CLOCK, 3893350df48SJohn Baldwin INTR_MPSAFE, &cc->cc_cookie)) 3908d809d50SJeff Roberson panic("died while creating standard software ithreads"); 391ac75ee9fSAdrian Chadd if (pin_default_swi && 392ac75ee9fSAdrian Chadd (intr_event_bind(clk_intr_event, timeout_cpu) != 0)) { 393ac75ee9fSAdrian Chadd printf("%s: timeout clock couldn't be pinned to cpu %d\n", 394ac75ee9fSAdrian Chadd __func__, 395ac75ee9fSAdrian Chadd timeout_cpu); 396ac75ee9fSAdrian Chadd } 397ac75ee9fSAdrian Chadd 3988d809d50SJeff Roberson #ifdef SMP 3993aa6d94eSJohn Baldwin CPU_FOREACH(cpu) { 4008d809d50SJeff Roberson if (cpu == timeout_cpu) 4018d809d50SJeff Roberson continue; 4028d809d50SJeff Roberson cc = CC_CPU(cpu); 40315ae0c9aSAndre Oppermann cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */ 404232e8b52SJohn Baldwin callout_cpu_init(cc, cpu); 405f44e2a4cSAdrian Chadd snprintf(name, sizeof(name), "clock (%d)", cpu); 406ac75ee9fSAdrian Chadd ie = NULL; 407ac75ee9fSAdrian Chadd if (swi_add(&ie, name, softclock, cc, SWI_CLOCK, 4088d809d50SJeff Roberson INTR_MPSAFE, &cc->cc_cookie)) 4098d809d50SJeff Roberson panic("died while creating standard software ithreads"); 410ac75ee9fSAdrian Chadd if (pin_pcpu_swi && (intr_event_bind(ie, cpu) != 0)) { 411ac75ee9fSAdrian Chadd printf("%s: per-cpu clock couldn't be pinned to " 412ac75ee9fSAdrian Chadd "cpu %d\n", 413ac75ee9fSAdrian Chadd __func__, 414ac75ee9fSAdrian Chadd cpu); 415ac75ee9fSAdrian Chadd } 416219d632cSMatthew Dillon } 4178d809d50SJeff Roberson #endif 418219d632cSMatthew Dillon } 4198d809d50SJeff Roberson SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 4208d809d50SJeff Roberson 4215b999a6bSDavide Italiano #define CC_HASH_SHIFT 8 4228d809d50SJeff Roberson 4235b999a6bSDavide Italiano static inline u_int 4245b999a6bSDavide Italiano callout_hash(sbintime_t sbt) 4255b999a6bSDavide Italiano { 4265b999a6bSDavide Italiano 4275b999a6bSDavide Italiano return (sbt >> (32 - CC_HASH_SHIFT)); 4285b999a6bSDavide Italiano } 4295b999a6bSDavide Italiano 4305b999a6bSDavide Italiano static inline u_int 4315b999a6bSDavide Italiano callout_get_bucket(sbintime_t sbt) 4325b999a6bSDavide Italiano { 4335b999a6bSDavide Italiano 4345b999a6bSDavide Italiano return (callout_hash(sbt) & callwheelmask); 4355b999a6bSDavide Italiano } 4365b999a6bSDavide Italiano 4375b999a6bSDavide Italiano void 4385b999a6bSDavide Italiano callout_process(sbintime_t now) 4395b999a6bSDavide Italiano { 4405b999a6bSDavide Italiano struct callout *tmp, *tmpn; 4415b999a6bSDavide Italiano struct callout_cpu *cc; 4425b999a6bSDavide Italiano struct callout_list *sc; 4435b999a6bSDavide Italiano sbintime_t first, last, max, tmp_max; 4445b999a6bSDavide Italiano uint32_t lookahead; 4455b999a6bSDavide Italiano u_int firstb, lastb, nowb; 4465b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 4475b999a6bSDavide Italiano int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0; 4485b999a6bSDavide Italiano #endif 449a115fb62SHans Petter Selasky 4508d809d50SJeff Roberson cc = CC_SELF(); 451a115fb62SHans Petter Selasky mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 4525b999a6bSDavide Italiano 4535b999a6bSDavide Italiano /* Compute the buckets of the last scan and present times. */ 4545b999a6bSDavide Italiano firstb = callout_hash(cc->cc_lastscan); 4555b999a6bSDavide Italiano cc->cc_lastscan = now; 4565b999a6bSDavide Italiano nowb = callout_hash(now); 4575b999a6bSDavide Italiano 4585b999a6bSDavide Italiano /* Compute the last bucket and minimum time of the bucket after it. */ 4595b999a6bSDavide Italiano if (nowb == firstb) 4605b999a6bSDavide Italiano lookahead = (SBT_1S / 16); 4615b999a6bSDavide Italiano else if (nowb - firstb == 1) 4625b999a6bSDavide Italiano lookahead = (SBT_1S / 8); 4635b999a6bSDavide Italiano else 4645b999a6bSDavide Italiano lookahead = (SBT_1S / 2); 4655b999a6bSDavide Italiano first = last = now; 4665b999a6bSDavide Italiano first += (lookahead / 2); 4675b999a6bSDavide Italiano last += lookahead; 4685b999a6bSDavide Italiano last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT)); 4695b999a6bSDavide Italiano lastb = callout_hash(last) - 1; 4705b999a6bSDavide Italiano max = last; 4715b999a6bSDavide Italiano 4725b999a6bSDavide Italiano /* 4735b999a6bSDavide Italiano * Check if we wrapped around the entire wheel from the last scan. 4745b999a6bSDavide Italiano * In case, we need to scan entirely the wheel for pending callouts. 4755b999a6bSDavide Italiano */ 4765b999a6bSDavide Italiano if (lastb - firstb >= callwheelsize) { 4775b999a6bSDavide Italiano lastb = firstb + callwheelsize - 1; 4785b999a6bSDavide Italiano if (nowb - firstb >= callwheelsize) 4795b999a6bSDavide Italiano nowb = lastb; 4809fc51b0bSJeff Roberson } 4815b999a6bSDavide Italiano 4825b999a6bSDavide Italiano /* Iterate callwheel from firstb to nowb and then up to lastb. */ 4835b999a6bSDavide Italiano do { 4845b999a6bSDavide Italiano sc = &cc->cc_callwheel[firstb & callwheelmask]; 4855b999a6bSDavide Italiano tmp = LIST_FIRST(sc); 4865b999a6bSDavide Italiano while (tmp != NULL) { 4875b999a6bSDavide Italiano /* Run the callout if present time within allowed. */ 4885b999a6bSDavide Italiano if (tmp->c_time <= now) { 4895b999a6bSDavide Italiano /* 4905b999a6bSDavide Italiano * Consumer told us the callout may be run 4915b999a6bSDavide Italiano * directly from hardware interrupt context. 4925b999a6bSDavide Italiano */ 49315b1eb14SRandall Stewart if (tmp->c_iflags & CALLOUT_DIRECT) { 4945b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 4955b999a6bSDavide Italiano ++depth_dir; 4965b999a6bSDavide Italiano #endif 49766525b2dSRandall Stewart cc_exec_next(cc) = 4985b999a6bSDavide Italiano LIST_NEXT(tmp, c_links.le); 4995b999a6bSDavide Italiano cc->cc_bucket = firstb & callwheelmask; 5005b999a6bSDavide Italiano LIST_REMOVE(tmp, c_links.le); 5015b999a6bSDavide Italiano softclock_call_cc(tmp, cc, 5025b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 5035b999a6bSDavide Italiano &mpcalls_dir, &lockcalls_dir, NULL, 5045b999a6bSDavide Italiano #endif 5055b999a6bSDavide Italiano 1); 50666525b2dSRandall Stewart tmp = cc_exec_next(cc); 50766525b2dSRandall Stewart cc_exec_next(cc) = NULL; 5085b999a6bSDavide Italiano } else { 5095b999a6bSDavide Italiano tmpn = LIST_NEXT(tmp, c_links.le); 5105b999a6bSDavide Italiano LIST_REMOVE(tmp, c_links.le); 5115b999a6bSDavide Italiano TAILQ_INSERT_TAIL(&cc->cc_expireq, 5125b999a6bSDavide Italiano tmp, c_links.tqe); 51315b1eb14SRandall Stewart tmp->c_iflags |= CALLOUT_PROCESSED; 5145b999a6bSDavide Italiano tmp = tmpn; 5159fc51b0bSJeff Roberson } 5165b999a6bSDavide Italiano continue; 5175b999a6bSDavide Italiano } 5185b999a6bSDavide Italiano /* Skip events from distant future. */ 5195b999a6bSDavide Italiano if (tmp->c_time >= max) 5205b999a6bSDavide Italiano goto next; 5215b999a6bSDavide Italiano /* 5225b999a6bSDavide Italiano * Event minimal time is bigger than present maximal 5235b999a6bSDavide Italiano * time, so it cannot be aggregated. 5245b999a6bSDavide Italiano */ 5255b999a6bSDavide Italiano if (tmp->c_time > last) { 5265b999a6bSDavide Italiano lastb = nowb; 5275b999a6bSDavide Italiano goto next; 5285b999a6bSDavide Italiano } 5295b999a6bSDavide Italiano /* Update first and last time, respecting this event. */ 5305b999a6bSDavide Italiano if (tmp->c_time < first) 5315b999a6bSDavide Italiano first = tmp->c_time; 5325b999a6bSDavide Italiano tmp_max = tmp->c_time + tmp->c_precision; 5335b999a6bSDavide Italiano if (tmp_max < last) 5345b999a6bSDavide Italiano last = tmp_max; 5355b999a6bSDavide Italiano next: 5365b999a6bSDavide Italiano tmp = LIST_NEXT(tmp, c_links.le); 5375b999a6bSDavide Italiano } 5385b999a6bSDavide Italiano /* Proceed with the next bucket. */ 5395b999a6bSDavide Italiano firstb++; 5405b999a6bSDavide Italiano /* 5415b999a6bSDavide Italiano * Stop if we looked after present time and found 5425b999a6bSDavide Italiano * some event we can't execute at now. 5435b999a6bSDavide Italiano * Stop if we looked far enough into the future. 5445b999a6bSDavide Italiano */ 5455b999a6bSDavide Italiano } while (((int)(firstb - lastb)) <= 0); 5465b999a6bSDavide Italiano cc->cc_firstevent = last; 5475b999a6bSDavide Italiano #ifndef NO_EVENTTIMERS 5485b999a6bSDavide Italiano cpu_new_callout(curcpu, last, first); 5495b999a6bSDavide Italiano #endif 5505b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 5515b999a6bSDavide Italiano avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8; 5525b999a6bSDavide Italiano avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8; 5535b999a6bSDavide Italiano avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8; 5545b999a6bSDavide Italiano #endif 555a115fb62SHans Petter Selasky mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 5568d809d50SJeff Roberson /* 5578d809d50SJeff Roberson * swi_sched acquires the thread lock, so we don't want to call it 5588d809d50SJeff Roberson * with cc_lock held; incorrect locking order. 5598d809d50SJeff Roberson */ 5605b999a6bSDavide Italiano if (!TAILQ_EMPTY(&cc->cc_expireq)) 5618d809d50SJeff Roberson swi_sched(cc->cc_cookie, 0); 5628d809d50SJeff Roberson } 5638d809d50SJeff Roberson 5648d809d50SJeff Roberson static struct callout_cpu * 5658d809d50SJeff Roberson callout_lock(struct callout *c) 5668d809d50SJeff Roberson { 5678d809d50SJeff Roberson struct callout_cpu *cc; 568a115fb62SHans Petter Selasky int cpu; 569a115fb62SHans Petter Selasky 570a115fb62SHans Petter Selasky for (;;) { 571a115fb62SHans Petter Selasky cpu = c->c_cpu; 572a115fb62SHans Petter Selasky #ifdef SMP 573a115fb62SHans Petter Selasky if (cpu == CPUBLOCK) { 574a115fb62SHans Petter Selasky while (c->c_cpu == CPUBLOCK) 575a115fb62SHans Petter Selasky cpu_spinwait(); 576a115fb62SHans Petter Selasky continue; 577a115fb62SHans Petter Selasky } 578a115fb62SHans Petter Selasky #endif 579a115fb62SHans Petter Selasky cc = CC_CPU(cpu); 5808d809d50SJeff Roberson CC_LOCK(cc); 581a115fb62SHans Petter Selasky if (cpu == c->c_cpu) 582a115fb62SHans Petter Selasky break; 583a115fb62SHans Petter Selasky CC_UNLOCK(cc); 584a115fb62SHans Petter Selasky } 5858d809d50SJeff Roberson return (cc); 586219d632cSMatthew Dillon } 587219d632cSMatthew Dillon 588a115fb62SHans Petter Selasky static void 589a115fb62SHans Petter Selasky callout_cc_add(struct callout *c, struct callout_cpu *cc, 590a115fb62SHans Petter Selasky sbintime_t sbt, sbintime_t precision, void (*func)(void *), 59166525b2dSRandall Stewart void *arg, int cpu, int flags) 5921283e9cdSAttilio Rao { 5935b999a6bSDavide Italiano int bucket; 5941283e9cdSAttilio Rao 5951283e9cdSAttilio Rao CC_LOCK_ASSERT(cc); 596a115fb62SHans Petter Selasky if (sbt < cc->cc_lastscan) 597a115fb62SHans Petter Selasky sbt = cc->cc_lastscan; 598a115fb62SHans Petter Selasky c->c_arg = arg; 59915b1eb14SRandall Stewart c->c_iflags |= CALLOUT_PENDING; 60015b1eb14SRandall Stewart c->c_iflags &= ~CALLOUT_PROCESSED; 60115b1eb14SRandall Stewart c->c_flags |= CALLOUT_ACTIVE; 60207a2df5dSRandall Stewart if (flags & C_DIRECT_EXEC) 603b132edb5SRandall Stewart c->c_iflags |= CALLOUT_DIRECT; 604a115fb62SHans Petter Selasky c->c_func = func; 605a115fb62SHans Petter Selasky c->c_time = sbt; 606a115fb62SHans Petter Selasky c->c_precision = precision; 6075b999a6bSDavide Italiano bucket = callout_get_bucket(c->c_time); 6085b999a6bSDavide Italiano CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x", 6095b999a6bSDavide Italiano c, (int)(c->c_precision >> 32), 6105b999a6bSDavide Italiano (u_int)(c->c_precision & 0xffffffff)); 6115b999a6bSDavide Italiano LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le); 6125b999a6bSDavide Italiano if (cc->cc_bucket == bucket) 61366525b2dSRandall Stewart cc_exec_next(cc) = c; 6145b999a6bSDavide Italiano #ifndef NO_EVENTTIMERS 6155b999a6bSDavide Italiano /* 6165b999a6bSDavide Italiano * Inform the eventtimers(4) subsystem there's a new callout 6175b999a6bSDavide Italiano * that has been inserted, but only if really required. 6185b999a6bSDavide Italiano */ 6194bc38a5aSDavide Italiano if (SBT_MAX - c->c_time < c->c_precision) 6204bc38a5aSDavide Italiano c->c_precision = SBT_MAX - c->c_time; 6215b999a6bSDavide Italiano sbt = c->c_time + c->c_precision; 6225b999a6bSDavide Italiano if (sbt < cc->cc_firstevent) { 6235b999a6bSDavide Italiano cc->cc_firstevent = sbt; 624a115fb62SHans Petter Selasky cpu_new_callout(cpu, sbt, c->c_time); 6251283e9cdSAttilio Rao } 6265b999a6bSDavide Italiano #endif 6271283e9cdSAttilio Rao } 6281283e9cdSAttilio Rao 6296098e7acSKonstantin Belousov static void 6306098e7acSKonstantin Belousov callout_cc_del(struct callout *c, struct callout_cpu *cc) 6316098e7acSKonstantin Belousov { 6326098e7acSKonstantin Belousov 63315b1eb14SRandall Stewart if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) == 0) 634a115fb62SHans Petter Selasky return; 6356098e7acSKonstantin Belousov c->c_func = NULL; 6366098e7acSKonstantin Belousov SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 6376098e7acSKonstantin Belousov } 6386098e7acSKonstantin Belousov 639eb8a7186SKonstantin Belousov static void 6405b999a6bSDavide Italiano softclock_call_cc(struct callout *c, struct callout_cpu *cc, 6415b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 6425b999a6bSDavide Italiano int *mpcalls, int *lockcalls, int *gcalls, 6435b999a6bSDavide Italiano #endif 6445b999a6bSDavide Italiano int direct) 6456098e7acSKonstantin Belousov { 646a115fb62SHans Petter Selasky struct rm_priotracker tracker; 647a115fb62SHans Petter Selasky void (*c_func)(void *); 6486098e7acSKonstantin Belousov void *c_arg; 649a115fb62SHans Petter Selasky struct lock_class *class; 6506098e7acSKonstantin Belousov struct lock_object *c_lock; 651a115fb62SHans Petter Selasky uintptr_t lock_status; 65215b1eb14SRandall Stewart int c_iflags; 653a115fb62SHans Petter Selasky #ifdef SMP 654a115fb62SHans Petter Selasky struct callout_cpu *new_cc; 655a115fb62SHans Petter Selasky void (*new_func)(void *); 656a115fb62SHans Petter Selasky void *new_arg; 657a115fb62SHans Petter Selasky int flags, new_cpu; 658a115fb62SHans Petter Selasky sbintime_t new_prec, new_time; 659a115fb62SHans Petter Selasky #endif 6605b999a6bSDavide Italiano #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 66103763781SDavide Italiano sbintime_t sbt1, sbt2; 6626098e7acSKonstantin Belousov struct timespec ts2; 6635b999a6bSDavide Italiano static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */ 6646098e7acSKonstantin Belousov static timeout_t *lastfunc; 6656098e7acSKonstantin Belousov #endif 6666098e7acSKonstantin Belousov 66715b1eb14SRandall Stewart KASSERT((c->c_iflags & CALLOUT_PENDING) == CALLOUT_PENDING, 66815b1eb14SRandall Stewart ("softclock_call_cc: pend %p %x", c, c->c_iflags)); 66915b1eb14SRandall Stewart KASSERT((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE, 67015b1eb14SRandall Stewart ("softclock_call_cc: act %p %x", c, c->c_flags)); 671a115fb62SHans Petter Selasky class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; 672a115fb62SHans Petter Selasky lock_status = 0; 673a115fb62SHans Petter Selasky if (c->c_flags & CALLOUT_SHAREDLOCK) { 674a115fb62SHans Petter Selasky if (class == &lock_class_rm) 675a115fb62SHans Petter Selasky lock_status = (uintptr_t)&tracker; 676a115fb62SHans Petter Selasky else 677a115fb62SHans Petter Selasky lock_status = 1; 678a115fb62SHans Petter Selasky } 6796098e7acSKonstantin Belousov c_lock = c->c_lock; 6806098e7acSKonstantin Belousov c_func = c->c_func; 6816098e7acSKonstantin Belousov c_arg = c->c_arg; 68215b1eb14SRandall Stewart c_iflags = c->c_iflags; 68315b1eb14SRandall Stewart if (c->c_iflags & CALLOUT_LOCAL_ALLOC) 68415b1eb14SRandall Stewart c->c_iflags = CALLOUT_LOCAL_ALLOC; 685a115fb62SHans Petter Selasky else 68615b1eb14SRandall Stewart c->c_iflags &= ~CALLOUT_PENDING; 687d2854fa4SRandall Stewart 688d2854fa4SRandall Stewart cc_exec_curr(cc, direct) = c; 689d2854fa4SRandall Stewart cc_exec_cancel(cc, direct) = false; 69018b4fd62SRandall Stewart cc_exec_drain(cc, direct) = NULL; 6916098e7acSKonstantin Belousov CC_UNLOCK(cc); 692a115fb62SHans Petter Selasky if (c_lock != NULL) { 693a115fb62SHans Petter Selasky class->lc_lock(c_lock, lock_status); 6946098e7acSKonstantin Belousov /* 695a115fb62SHans Petter Selasky * The callout may have been cancelled 696a115fb62SHans Petter Selasky * while we switched locks. 6976098e7acSKonstantin Belousov */ 698d2854fa4SRandall Stewart if (cc_exec_cancel(cc, direct)) { 699a115fb62SHans Petter Selasky class->lc_unlock(c_lock); 700a115fb62SHans Petter Selasky goto skip; 7016098e7acSKonstantin Belousov } 702a115fb62SHans Petter Selasky /* The callout cannot be stopped now. */ 703d2854fa4SRandall Stewart cc_exec_cancel(cc, direct) = true; 7046098e7acSKonstantin Belousov if (c_lock == &Giant.lock_object) { 7055b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 7066098e7acSKonstantin Belousov (*gcalls)++; 7075b999a6bSDavide Italiano #endif 7085b999a6bSDavide Italiano CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p", 7096098e7acSKonstantin Belousov c, c_func, c_arg); 7106098e7acSKonstantin Belousov } else { 7115b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 7126098e7acSKonstantin Belousov (*lockcalls)++; 7135b999a6bSDavide Italiano #endif 7146098e7acSKonstantin Belousov CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p", 7156098e7acSKonstantin Belousov c, c_func, c_arg); 7166098e7acSKonstantin Belousov } 7176098e7acSKonstantin Belousov } else { 7185b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 7196098e7acSKonstantin Belousov (*mpcalls)++; 7205b999a6bSDavide Italiano #endif 7215b999a6bSDavide Italiano CTR3(KTR_CALLOUT, "callout %p func %p arg %p", 7226098e7acSKonstantin Belousov c, c_func, c_arg); 7236098e7acSKonstantin Belousov } 724232e8b52SJohn Baldwin KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running", 725232e8b52SJohn Baldwin "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct); 72603763781SDavide Italiano #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 7275b999a6bSDavide Italiano sbt1 = sbinuptime(); 7286098e7acSKonstantin Belousov #endif 7296098e7acSKonstantin Belousov THREAD_NO_SLEEPING(); 73036160958SMark Johnston SDT_PROBE1(callout_execute, , , callout__start, c); 7316098e7acSKonstantin Belousov c_func(c_arg); 73236160958SMark Johnston SDT_PROBE1(callout_execute, , , callout__end, c); 7336098e7acSKonstantin Belousov THREAD_SLEEPING_OK(); 73403763781SDavide Italiano #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 73503763781SDavide Italiano sbt2 = sbinuptime(); 73603763781SDavide Italiano sbt2 -= sbt1; 73703763781SDavide Italiano if (sbt2 > maxdt) { 73803763781SDavide Italiano if (lastfunc != c_func || sbt2 > maxdt * 2) { 73903763781SDavide Italiano ts2 = sbttots(sbt2); 7406098e7acSKonstantin Belousov printf( 7416098e7acSKonstantin Belousov "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 7426098e7acSKonstantin Belousov c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); 7436098e7acSKonstantin Belousov } 74403763781SDavide Italiano maxdt = sbt2; 7456098e7acSKonstantin Belousov lastfunc = c_func; 7466098e7acSKonstantin Belousov } 7476098e7acSKonstantin Belousov #endif 748232e8b52SJohn Baldwin KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle"); 7496098e7acSKonstantin Belousov CTR1(KTR_CALLOUT, "callout %p finished", c); 75015b1eb14SRandall Stewart if ((c_iflags & CALLOUT_RETURNUNLOCKED) == 0) 751a115fb62SHans Petter Selasky class->lc_unlock(c_lock); 752a115fb62SHans Petter Selasky skip: 7536098e7acSKonstantin Belousov CC_LOCK(cc); 754d2854fa4SRandall Stewart KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr")); 755d2854fa4SRandall Stewart cc_exec_curr(cc, direct) = NULL; 75618b4fd62SRandall Stewart if (cc_exec_drain(cc, direct)) { 75718b4fd62SRandall Stewart void (*drain)(void *); 75818b4fd62SRandall Stewart 75918b4fd62SRandall Stewart drain = cc_exec_drain(cc, direct); 76018b4fd62SRandall Stewart cc_exec_drain(cc, direct) = NULL; 76118b4fd62SRandall Stewart CC_UNLOCK(cc); 76218b4fd62SRandall Stewart drain(c_arg); 76318b4fd62SRandall Stewart CC_LOCK(cc); 76418b4fd62SRandall Stewart } 765d2854fa4SRandall Stewart if (cc_exec_waiting(cc, direct)) { 766bdf9120cSAttilio Rao /* 767a115fb62SHans Petter Selasky * There is someone waiting for the 768a115fb62SHans Petter Selasky * callout to complete. 769a115fb62SHans Petter Selasky * If the callout was scheduled for 770a115fb62SHans Petter Selasky * migration just cancel it. 771bdf9120cSAttilio Rao */ 772a115fb62SHans Petter Selasky if (cc_cce_migrating(cc, direct)) { 773a115fb62SHans Petter Selasky cc_cce_cleanup(cc, direct); 774a115fb62SHans Petter Selasky 775a115fb62SHans Petter Selasky /* 776a115fb62SHans Petter Selasky * It should be assert here that the callout is not 777a115fb62SHans Petter Selasky * destroyed but that is not easy. 778a115fb62SHans Petter Selasky */ 77915b1eb14SRandall Stewart c->c_iflags &= ~CALLOUT_DFRMIGRATION; 7806098e7acSKonstantin Belousov } 781d2854fa4SRandall Stewart cc_exec_waiting(cc, direct) = false; 782a115fb62SHans Petter Selasky CC_UNLOCK(cc); 783d2854fa4SRandall Stewart wakeup(&cc_exec_waiting(cc, direct)); 784a115fb62SHans Petter Selasky CC_LOCK(cc); 785a115fb62SHans Petter Selasky } else if (cc_cce_migrating(cc, direct)) { 78615b1eb14SRandall Stewart KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0, 787a115fb62SHans Petter Selasky ("Migrating legacy callout %p", c)); 788a115fb62SHans Petter Selasky #ifdef SMP 789a115fb62SHans Petter Selasky /* 790a115fb62SHans Petter Selasky * If the callout was scheduled for 791a115fb62SHans Petter Selasky * migration just perform it now. 792a115fb62SHans Petter Selasky */ 793d2854fa4SRandall Stewart new_cpu = cc_migration_cpu(cc, direct); 794d2854fa4SRandall Stewart new_time = cc_migration_time(cc, direct); 795d2854fa4SRandall Stewart new_prec = cc_migration_prec(cc, direct); 796d2854fa4SRandall Stewart new_func = cc_migration_func(cc, direct); 797d2854fa4SRandall Stewart new_arg = cc_migration_arg(cc, direct); 798a115fb62SHans Petter Selasky cc_cce_cleanup(cc, direct); 799a115fb62SHans Petter Selasky 800a115fb62SHans Petter Selasky /* 801a115fb62SHans Petter Selasky * It should be assert here that the callout is not destroyed 802a115fb62SHans Petter Selasky * but that is not easy. 803a115fb62SHans Petter Selasky * 804a115fb62SHans Petter Selasky * As first thing, handle deferred callout stops. 805a115fb62SHans Petter Selasky */ 806d2854fa4SRandall Stewart if (!callout_migrating(c)) { 807a115fb62SHans Petter Selasky CTR3(KTR_CALLOUT, 808a115fb62SHans Petter Selasky "deferred cancelled %p func %p arg %p", 809a115fb62SHans Petter Selasky c, new_func, new_arg); 810a115fb62SHans Petter Selasky callout_cc_del(c, cc); 811a115fb62SHans Petter Selasky return; 812a115fb62SHans Petter Selasky } 81315b1eb14SRandall Stewart c->c_iflags &= ~CALLOUT_DFRMIGRATION; 814a115fb62SHans Petter Selasky 815a115fb62SHans Petter Selasky new_cc = callout_cpu_switch(c, cc, new_cpu); 816a115fb62SHans Petter Selasky flags = (direct) ? C_DIRECT_EXEC : 0; 817a115fb62SHans Petter Selasky callout_cc_add(c, new_cc, new_time, new_prec, new_func, 81866525b2dSRandall Stewart new_arg, new_cpu, flags); 819a115fb62SHans Petter Selasky CC_UNLOCK(new_cc); 820a115fb62SHans Petter Selasky CC_LOCK(cc); 821a115fb62SHans Petter Selasky #else 822a115fb62SHans Petter Selasky panic("migration should not happen"); 823a115fb62SHans Petter Selasky #endif 824a115fb62SHans Petter Selasky } 825a115fb62SHans Petter Selasky /* 826a115fb62SHans Petter Selasky * If the current callout is locally allocated (from 827a115fb62SHans Petter Selasky * timeout(9)) then put it on the freelist. 828a115fb62SHans Petter Selasky * 82915b1eb14SRandall Stewart * Note: we need to check the cached copy of c_iflags because 830a115fb62SHans Petter Selasky * if it was not local, then it's not safe to deref the 831a115fb62SHans Petter Selasky * callout pointer. 832a115fb62SHans Petter Selasky */ 83315b1eb14SRandall Stewart KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0 || 83415b1eb14SRandall Stewart c->c_iflags == CALLOUT_LOCAL_ALLOC, 835a115fb62SHans Petter Selasky ("corrupted callout")); 83615b1eb14SRandall Stewart if (c_iflags & CALLOUT_LOCAL_ALLOC) 837a115fb62SHans Petter Selasky callout_cc_del(c, cc); 8386098e7acSKonstantin Belousov } 8396098e7acSKonstantin Belousov 840219d632cSMatthew Dillon /* 841ab36c067SJustin T. Gibbs * The callout mechanism is based on the work of Adam M. Costello and 842ab36c067SJustin T. Gibbs * George Varghese, published in a technical report entitled "Redesigning 843ab36c067SJustin T. Gibbs * the BSD Callout and Timer Facilities" and modified slightly for inclusion 844ab36c067SJustin T. Gibbs * in FreeBSD by Justin T. Gibbs. The original work on the data structures 845024035e8SHiten Pandya * used in this implementation was published by G. Varghese and T. Lauck in 846ab36c067SJustin T. Gibbs * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 847ab36c067SJustin T. Gibbs * the Efficient Implementation of a Timer Facility" in the Proceedings of 848ab36c067SJustin T. Gibbs * the 11th ACM Annual Symposium on Operating Systems Principles, 849ab36c067SJustin T. Gibbs * Austin, Texas Nov 1987. 850ab36c067SJustin T. Gibbs */ 851a50ec505SPoul-Henning Kamp 852ab36c067SJustin T. Gibbs /* 853df8bae1dSRodney W. Grimes * Software (low priority) clock interrupt. 854df8bae1dSRodney W. Grimes * Run periodic events from timeout queue. 855df8bae1dSRodney W. Grimes */ 856df8bae1dSRodney W. Grimes void 8578d809d50SJeff Roberson softclock(void *arg) 858df8bae1dSRodney W. Grimes { 8598d809d50SJeff Roberson struct callout_cpu *cc; 860b336df68SPoul-Henning Kamp struct callout *c; 8615b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 8625b999a6bSDavide Italiano int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0; 8635b999a6bSDavide Italiano #endif 864df8bae1dSRodney W. Grimes 8658d809d50SJeff Roberson cc = (struct callout_cpu *)arg; 8668d809d50SJeff Roberson CC_LOCK(cc); 8675b999a6bSDavide Italiano while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) { 8685b999a6bSDavide Italiano TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 8695b999a6bSDavide Italiano softclock_call_cc(c, cc, 8705b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 8715b999a6bSDavide Italiano &mpcalls, &lockcalls, &gcalls, 8725b999a6bSDavide Italiano #endif 8735b999a6bSDavide Italiano 0); 8745b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 8755b999a6bSDavide Italiano ++depth; 8765b999a6bSDavide Italiano #endif 877df8bae1dSRodney W. Grimes } 8785b999a6bSDavide Italiano #ifdef CALLOUT_PROFILING 87922ee8c4fSPoul-Henning Kamp avg_depth += (depth * 1000 - avg_depth) >> 8; 88022ee8c4fSPoul-Henning Kamp avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 88164b9ee20SAttilio Rao avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 88222ee8c4fSPoul-Henning Kamp avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 8835b999a6bSDavide Italiano #endif 8848d809d50SJeff Roberson CC_UNLOCK(cc); 885df8bae1dSRodney W. Grimes } 886df8bae1dSRodney W. Grimes 887df8bae1dSRodney W. Grimes /* 888df8bae1dSRodney W. Grimes * timeout -- 889df8bae1dSRodney W. Grimes * Execute a function after a specified length of time. 890df8bae1dSRodney W. Grimes * 891df8bae1dSRodney W. Grimes * untimeout -- 892df8bae1dSRodney W. Grimes * Cancel previous timeout function call. 893df8bae1dSRodney W. Grimes * 894ab36c067SJustin T. Gibbs * callout_handle_init -- 895ab36c067SJustin T. Gibbs * Initialize a handle so that using it with untimeout is benign. 896ab36c067SJustin T. Gibbs * 897df8bae1dSRodney W. Grimes * See AT&T BCI Driver Reference Manual for specification. This 898ab36c067SJustin T. Gibbs * implementation differs from that one in that although an 899ab36c067SJustin T. Gibbs * identification value is returned from timeout, the original 900ab36c067SJustin T. Gibbs * arguments to timeout as well as the identifier are used to 901ab36c067SJustin T. Gibbs * identify entries for untimeout. 902df8bae1dSRodney W. Grimes */ 903ab36c067SJustin T. Gibbs struct callout_handle 904e392e44cSDavide Italiano timeout(timeout_t *ftn, void *arg, int to_ticks) 905df8bae1dSRodney W. Grimes { 9068d809d50SJeff Roberson struct callout_cpu *cc; 907ab36c067SJustin T. Gibbs struct callout *new; 908ab36c067SJustin T. Gibbs struct callout_handle handle; 909df8bae1dSRodney W. Grimes 9108d809d50SJeff Roberson cc = CC_CPU(timeout_cpu); 9118d809d50SJeff Roberson CC_LOCK(cc); 912df8bae1dSRodney W. Grimes /* Fill in the next free callout structure. */ 9138d809d50SJeff Roberson new = SLIST_FIRST(&cc->cc_callfree); 914ab36c067SJustin T. Gibbs if (new == NULL) 915ab36c067SJustin T. Gibbs /* XXX Attempt to malloc first */ 916df8bae1dSRodney W. Grimes panic("timeout table full"); 9178d809d50SJeff Roberson SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); 918a115fb62SHans Petter Selasky callout_reset(new, to_ticks, ftn, arg); 919ab36c067SJustin T. Gibbs handle.callout = new; 9208d809d50SJeff Roberson CC_UNLOCK(cc); 9218d809d50SJeff Roberson 922ab36c067SJustin T. Gibbs return (handle); 923df8bae1dSRodney W. Grimes } 924df8bae1dSRodney W. Grimes 925df8bae1dSRodney W. Grimes void 926e392e44cSDavide Italiano untimeout(timeout_t *ftn, void *arg, struct callout_handle handle) 927df8bae1dSRodney W. Grimes { 9288d809d50SJeff Roberson struct callout_cpu *cc; 929df8bae1dSRodney W. Grimes 930ab36c067SJustin T. Gibbs /* 931ab36c067SJustin T. Gibbs * Check for a handle that was initialized 932ab36c067SJustin T. Gibbs * by callout_handle_init, but never used 933ab36c067SJustin T. Gibbs * for a real timeout. 934ab36c067SJustin T. Gibbs */ 935ab36c067SJustin T. Gibbs if (handle.callout == NULL) 936ab36c067SJustin T. Gibbs return; 937df8bae1dSRodney W. Grimes 9388d809d50SJeff Roberson cc = callout_lock(handle.callout); 939a115fb62SHans Petter Selasky if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 9401a26c3c0SHans Petter Selasky callout_stop(handle.callout); 941a115fb62SHans Petter Selasky CC_UNLOCK(cc); 942df8bae1dSRodney W. Grimes } 943df8bae1dSRodney W. Grimes 9443c816944SBruce Evans void 945ab36c067SJustin T. Gibbs callout_handle_init(struct callout_handle *handle) 946ab36c067SJustin T. Gibbs { 947ab36c067SJustin T. Gibbs handle->callout = NULL; 948ab36c067SJustin T. Gibbs } 949ab36c067SJustin T. Gibbs 950a9e182e8SKonstantin Belousov void 951a9e182e8SKonstantin Belousov callout_when(sbintime_t sbt, sbintime_t precision, int flags, 952a9e182e8SKonstantin Belousov sbintime_t *res, sbintime_t *prec_res) 953acc8326dSGarrett Wollman { 954a9e182e8SKonstantin Belousov sbintime_t to_sbt, to_pr; 955acc8326dSGarrett Wollman 956a9e182e8SKonstantin Belousov if ((flags & (C_ABSOLUTE | C_PRECALC)) != 0) { 957a9e182e8SKonstantin Belousov *res = sbt; 958a9e182e8SKonstantin Belousov *prec_res = precision; 959a9e182e8SKonstantin Belousov return; 96015b1eb14SRandall Stewart } 961a9e182e8SKonstantin Belousov if ((flags & C_HARDCLOCK) != 0 && sbt < tick_sbt) 9625b999a6bSDavide Italiano sbt = tick_sbt; 963a9e182e8SKonstantin Belousov if ((flags & C_HARDCLOCK) != 0 || 9645b999a6bSDavide Italiano #ifdef NO_EVENTTIMERS 9655b999a6bSDavide Italiano sbt >= sbt_timethreshold) { 966a115fb62SHans Petter Selasky to_sbt = getsbinuptime(); 9675b999a6bSDavide Italiano 9685b999a6bSDavide Italiano /* Add safety belt for the case of hz > 1000. */ 969a115fb62SHans Petter Selasky to_sbt += tc_tick_sbt - tick_sbt; 9705b999a6bSDavide Italiano #else 9715b999a6bSDavide Italiano sbt >= sbt_tickthreshold) { 9725b999a6bSDavide Italiano /* 9735b999a6bSDavide Italiano * Obtain the time of the last hardclock() call on 9745b999a6bSDavide Italiano * this CPU directly from the kern_clocksource.c. 9755b999a6bSDavide Italiano * This value is per-CPU, but it is equal for all 9765b999a6bSDavide Italiano * active ones. 9775b999a6bSDavide Italiano */ 9785b999a6bSDavide Italiano #ifdef __LP64__ 979a115fb62SHans Petter Selasky to_sbt = DPCPU_GET(hardclocktime); 9805b999a6bSDavide Italiano #else 9815b999a6bSDavide Italiano spinlock_enter(); 982a115fb62SHans Petter Selasky to_sbt = DPCPU_GET(hardclocktime); 9835b999a6bSDavide Italiano spinlock_exit(); 9845b999a6bSDavide Italiano #endif 9855b999a6bSDavide Italiano #endif 9869f3aabb9SJohn Baldwin if (cold && to_sbt == 0) 9879f3aabb9SJohn Baldwin to_sbt = sbinuptime(); 988a115fb62SHans Petter Selasky if ((flags & C_HARDCLOCK) == 0) 989a115fb62SHans Petter Selasky to_sbt += tick_sbt; 9905b999a6bSDavide Italiano } else 991a115fb62SHans Petter Selasky to_sbt = sbinuptime(); 992a115fb62SHans Petter Selasky if (SBT_MAX - to_sbt < sbt) 993a115fb62SHans Petter Selasky to_sbt = SBT_MAX; 9941b0c144fSDavide Italiano else 995a115fb62SHans Petter Selasky to_sbt += sbt; 996a9e182e8SKonstantin Belousov *res = to_sbt; 997a9e182e8SKonstantin Belousov to_pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp : 998a115fb62SHans Petter Selasky sbt >> C_PRELGET(flags)); 999a9e182e8SKonstantin Belousov *prec_res = to_pr > precision ? to_pr : precision; 1000a115fb62SHans Petter Selasky } 1001a9e182e8SKonstantin Belousov 1002a9e182e8SKonstantin Belousov /* 1003a9e182e8SKonstantin Belousov * New interface; clients allocate their own callout structures. 1004a9e182e8SKonstantin Belousov * 1005a9e182e8SKonstantin Belousov * callout_reset() - establish or change a timeout 1006a9e182e8SKonstantin Belousov * callout_stop() - disestablish a timeout 1007a9e182e8SKonstantin Belousov * callout_init() - initialize a callout structure so that it can 1008a9e182e8SKonstantin Belousov * safely be passed to callout_reset() and callout_stop() 1009a9e182e8SKonstantin Belousov * 1010a9e182e8SKonstantin Belousov * <sys/callout.h> defines three convenience macros: 1011a9e182e8SKonstantin Belousov * 1012a9e182e8SKonstantin Belousov * callout_active() - returns truth if callout has not been stopped, 1013a9e182e8SKonstantin Belousov * drained, or deactivated since the last time the callout was 1014a9e182e8SKonstantin Belousov * reset. 1015a9e182e8SKonstantin Belousov * callout_pending() - returns truth if callout is still waiting for timeout 1016a9e182e8SKonstantin Belousov * callout_deactivate() - marks the callout as having been serviced 1017a9e182e8SKonstantin Belousov */ 1018a9e182e8SKonstantin Belousov int 1019a9e182e8SKonstantin Belousov callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec, 1020a9e182e8SKonstantin Belousov void (*ftn)(void *), void *arg, int cpu, int flags) 1021a9e182e8SKonstantin Belousov { 1022a9e182e8SKonstantin Belousov sbintime_t to_sbt, precision; 1023a9e182e8SKonstantin Belousov struct callout_cpu *cc; 1024a9e182e8SKonstantin Belousov int cancelled, direct; 1025a9e182e8SKonstantin Belousov int ignore_cpu=0; 1026a9e182e8SKonstantin Belousov 1027a9e182e8SKonstantin Belousov cancelled = 0; 1028a9e182e8SKonstantin Belousov if (cpu == -1) { 1029a9e182e8SKonstantin Belousov ignore_cpu = 1; 1030a9e182e8SKonstantin Belousov } else if ((cpu >= MAXCPU) || 1031a9e182e8SKonstantin Belousov ((CC_CPU(cpu))->cc_inited == 0)) { 1032a9e182e8SKonstantin Belousov /* Invalid CPU spec */ 1033a9e182e8SKonstantin Belousov panic("Invalid CPU in callout %d", cpu); 1034a9e182e8SKonstantin Belousov } 1035a9e182e8SKonstantin Belousov callout_when(sbt, prec, flags, &to_sbt, &precision); 1036a9e182e8SKonstantin Belousov 1037a115fb62SHans Petter Selasky /* 103866525b2dSRandall Stewart * This flag used to be added by callout_cc_add, but the 103966525b2dSRandall Stewart * first time you call this we could end up with the 104066525b2dSRandall Stewart * wrong direct flag if we don't do it before we add. 104166525b2dSRandall Stewart */ 104266525b2dSRandall Stewart if (flags & C_DIRECT_EXEC) { 104315b1eb14SRandall Stewart direct = 1; 104415b1eb14SRandall Stewart } else { 104515b1eb14SRandall Stewart direct = 0; 104666525b2dSRandall Stewart } 1047a115fb62SHans Petter Selasky KASSERT(!direct || c->c_lock == NULL, 1048a115fb62SHans Petter Selasky ("%s: direct callout %p has lock", __func__, c)); 1049a115fb62SHans Petter Selasky cc = callout_lock(c); 105015b1eb14SRandall Stewart /* 105115b1eb14SRandall Stewart * Don't allow migration of pre-allocated callouts lest they 105215b1eb14SRandall Stewart * become unbalanced or handle the case where the user does 105315b1eb14SRandall Stewart * not care. 105415b1eb14SRandall Stewart */ 105515b1eb14SRandall Stewart if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) || 105615b1eb14SRandall Stewart ignore_cpu) { 105715b1eb14SRandall Stewart cpu = c->c_cpu; 105815b1eb14SRandall Stewart } 105915b1eb14SRandall Stewart 1060d2854fa4SRandall Stewart if (cc_exec_curr(cc, direct) == c) { 1061a115fb62SHans Petter Selasky /* 1062a115fb62SHans Petter Selasky * We're being asked to reschedule a callout which is 1063a115fb62SHans Petter Selasky * currently in progress. If there is a lock then we 1064a115fb62SHans Petter Selasky * can cancel the callout if it has not really started. 1065a115fb62SHans Petter Selasky */ 1066378d5c6cSAndriy Gapon if (c->c_lock != NULL && !cc_exec_cancel(cc, direct)) 1067d2854fa4SRandall Stewart cancelled = cc_exec_cancel(cc, direct) = true; 1068dc4ee9a8SGleb Smirnoff if (cc_exec_waiting(cc, direct) || cc_exec_drain(cc, direct)) { 1069a115fb62SHans Petter Selasky /* 1070a115fb62SHans Petter Selasky * Someone has called callout_drain to kill this 1071a115fb62SHans Petter Selasky * callout. Don't reschedule. 1072a115fb62SHans Petter Selasky */ 1073a115fb62SHans Petter Selasky CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 1074a115fb62SHans Petter Selasky cancelled ? "cancelled" : "failed to cancel", 1075a115fb62SHans Petter Selasky c, c->c_func, c->c_arg); 1076a115fb62SHans Petter Selasky CC_UNLOCK(cc); 1077a115fb62SHans Petter Selasky return (cancelled); 1078a115fb62SHans Petter Selasky } 1079d2854fa4SRandall Stewart #ifdef SMP 1080d2854fa4SRandall Stewart if (callout_migrating(c)) { 1081d2854fa4SRandall Stewart /* 1082d2854fa4SRandall Stewart * This only occurs when a second callout_reset_sbt_on 1083d2854fa4SRandall Stewart * is made after a previous one moved it into 1084d2854fa4SRandall Stewart * deferred migration (below). Note we do *not* change 1085d2854fa4SRandall Stewart * the prev_cpu even though the previous target may 1086d2854fa4SRandall Stewart * be different. 1087d2854fa4SRandall Stewart */ 1088d2854fa4SRandall Stewart cc_migration_cpu(cc, direct) = cpu; 1089d2854fa4SRandall Stewart cc_migration_time(cc, direct) = to_sbt; 1090d2854fa4SRandall Stewart cc_migration_prec(cc, direct) = precision; 1091d2854fa4SRandall Stewart cc_migration_func(cc, direct) = ftn; 1092d2854fa4SRandall Stewart cc_migration_arg(cc, direct) = arg; 1093d2854fa4SRandall Stewart cancelled = 1; 1094d2854fa4SRandall Stewart CC_UNLOCK(cc); 1095d2854fa4SRandall Stewart return (cancelled); 1096d2854fa4SRandall Stewart } 1097d2854fa4SRandall Stewart #endif 1098a115fb62SHans Petter Selasky } 109915b1eb14SRandall Stewart if (c->c_iflags & CALLOUT_PENDING) { 110015b1eb14SRandall Stewart if ((c->c_iflags & CALLOUT_PROCESSED) == 0) { 110166525b2dSRandall Stewart if (cc_exec_next(cc) == c) 110266525b2dSRandall Stewart cc_exec_next(cc) = LIST_NEXT(c, c_links.le); 1103a115fb62SHans Petter Selasky LIST_REMOVE(c, c_links.le); 110415b1eb14SRandall Stewart } else { 1105a115fb62SHans Petter Selasky TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 110615b1eb14SRandall Stewart } 1107a115fb62SHans Petter Selasky cancelled = 1; 110815b1eb14SRandall Stewart c->c_iflags &= ~ CALLOUT_PENDING; 110915b1eb14SRandall Stewart c->c_flags &= ~ CALLOUT_ACTIVE; 11108d809d50SJeff Roberson } 11111283e9cdSAttilio Rao 1112a115fb62SHans Petter Selasky #ifdef SMP 1113a115fb62SHans Petter Selasky /* 1114a115fb62SHans Petter Selasky * If the callout must migrate try to perform it immediately. 1115a115fb62SHans Petter Selasky * If the callout is currently running, just defer the migration 1116a115fb62SHans Petter Selasky * to a more appropriate moment. 1117a115fb62SHans Petter Selasky */ 1118a115fb62SHans Petter Selasky if (c->c_cpu != cpu) { 1119d2854fa4SRandall Stewart if (cc_exec_curr(cc, direct) == c) { 1120d2854fa4SRandall Stewart /* 1121d2854fa4SRandall Stewart * Pending will have been removed since we are 1122d2854fa4SRandall Stewart * actually executing the callout on another 1123d2854fa4SRandall Stewart * CPU. That callout should be waiting on the 1124d2854fa4SRandall Stewart * lock the caller holds. If we set both 1125d2854fa4SRandall Stewart * active/and/pending after we return and the 1126d2854fa4SRandall Stewart * lock on the executing callout proceeds, it 1127d2854fa4SRandall Stewart * will then see pending is true and return. 1128d2854fa4SRandall Stewart * At the return from the actual callout execution 1129d2854fa4SRandall Stewart * the migration will occur in softclock_call_cc 1130d2854fa4SRandall Stewart * and this new callout will be placed on the 1131d2854fa4SRandall Stewart * new CPU via a call to callout_cpu_switch() which 1132d2854fa4SRandall Stewart * will get the lock on the right CPU followed 1133d2854fa4SRandall Stewart * by a call callout_cc_add() which will add it there. 1134d2854fa4SRandall Stewart * (see above in softclock_call_cc()). 1135d2854fa4SRandall Stewart */ 1136d2854fa4SRandall Stewart cc_migration_cpu(cc, direct) = cpu; 1137d2854fa4SRandall Stewart cc_migration_time(cc, direct) = to_sbt; 1138d2854fa4SRandall Stewart cc_migration_prec(cc, direct) = precision; 1139d2854fa4SRandall Stewart cc_migration_func(cc, direct) = ftn; 1140d2854fa4SRandall Stewart cc_migration_arg(cc, direct) = arg; 114115b1eb14SRandall Stewart c->c_iflags |= (CALLOUT_DFRMIGRATION | CALLOUT_PENDING); 114215b1eb14SRandall Stewart c->c_flags |= CALLOUT_ACTIVE; 1143a115fb62SHans Petter Selasky CTR6(KTR_CALLOUT, 1144a115fb62SHans Petter Selasky "migration of %p func %p arg %p in %d.%08x to %u deferred", 1145a115fb62SHans Petter Selasky c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 1146a115fb62SHans Petter Selasky (u_int)(to_sbt & 0xffffffff), cpu); 1147a115fb62SHans Petter Selasky CC_UNLOCK(cc); 1148a115fb62SHans Petter Selasky return (cancelled); 1149a115fb62SHans Petter Selasky } 1150a115fb62SHans Petter Selasky cc = callout_cpu_switch(c, cc, cpu); 1151a115fb62SHans Petter Selasky } 1152a115fb62SHans Petter Selasky #endif 1153a115fb62SHans Petter Selasky 115466525b2dSRandall Stewart callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags); 1155a115fb62SHans Petter Selasky CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x", 1156a115fb62SHans Petter Selasky cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 1157a115fb62SHans Petter Selasky (u_int)(to_sbt & 0xffffffff)); 1158a115fb62SHans Petter Selasky CC_UNLOCK(cc); 1159a115fb62SHans Petter Selasky 1160a115fb62SHans Petter Selasky return (cancelled); 1161acc8326dSGarrett Wollman } 1162acc8326dSGarrett Wollman 11636e0186d5SSam Leffler /* 11646e0186d5SSam Leffler * Common idioms that can be optimized in the future. 11656e0186d5SSam Leffler */ 11666e0186d5SSam Leffler int 11676e0186d5SSam Leffler callout_schedule_on(struct callout *c, int to_ticks, int cpu) 11686e0186d5SSam Leffler { 11696e0186d5SSam Leffler return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 11706e0186d5SSam Leffler } 11716e0186d5SSam Leffler 11726e0186d5SSam Leffler int 11736e0186d5SSam Leffler callout_schedule(struct callout *c, int to_ticks) 11746e0186d5SSam Leffler { 11756e0186d5SSam Leffler return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 11766e0186d5SSam Leffler } 11776e0186d5SSam Leffler 11782c1bb207SColin Percival int 11795db9ed80SKonstantin Belousov _callout_stop_safe(struct callout *c, int flags, void (*drain)(void *)) 11802c1bb207SColin Percival { 1181a115fb62SHans Petter Selasky struct callout_cpu *cc, *old_cc; 1182a115fb62SHans Petter Selasky struct lock_class *class; 1183a115fb62SHans Petter Selasky int direct, sq_locked, use_lock; 118447e42809SGleb Smirnoff int cancelled, not_on_a_list; 11851283e9cdSAttilio Rao 11865db9ed80SKonstantin Belousov if ((flags & CS_DRAIN) != 0) 11879500dd9fSAdrian Chadd WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, c->c_lock, 11889500dd9fSAdrian Chadd "calling %s", __func__); 11899500dd9fSAdrian Chadd 11901283e9cdSAttilio Rao /* 1191a115fb62SHans Petter Selasky * Some old subsystems don't hold Giant while running a callout_stop(), 1192a115fb62SHans Petter Selasky * so just discard this check for the moment. 11931283e9cdSAttilio Rao */ 11945db9ed80SKonstantin Belousov if ((flags & CS_DRAIN) == 0 && c->c_lock != NULL) { 1195a115fb62SHans Petter Selasky if (c->c_lock == &Giant.lock_object) 1196a115fb62SHans Petter Selasky use_lock = mtx_owned(&Giant); 1197a115fb62SHans Petter Selasky else { 1198a115fb62SHans Petter Selasky use_lock = 1; 1199a115fb62SHans Petter Selasky class = LOCK_CLASS(c->c_lock); 1200a115fb62SHans Petter Selasky class->lc_assert(c->c_lock, LA_XLOCKED); 12011283e9cdSAttilio Rao } 1202a115fb62SHans Petter Selasky } else 1203a115fb62SHans Petter Selasky use_lock = 0; 120415b1eb14SRandall Stewart if (c->c_iflags & CALLOUT_DIRECT) { 120515b1eb14SRandall Stewart direct = 1; 120615b1eb14SRandall Stewart } else { 120715b1eb14SRandall Stewart direct = 0; 120815b1eb14SRandall Stewart } 1209a115fb62SHans Petter Selasky sq_locked = 0; 1210a115fb62SHans Petter Selasky old_cc = NULL; 1211a115fb62SHans Petter Selasky again: 1212a115fb62SHans Petter Selasky cc = callout_lock(c); 1213a115fb62SHans Petter Selasky 121415b1eb14SRandall Stewart if ((c->c_iflags & (CALLOUT_DFRMIGRATION | CALLOUT_PENDING)) == 121515b1eb14SRandall Stewart (CALLOUT_DFRMIGRATION | CALLOUT_PENDING) && 121615b1eb14SRandall Stewart ((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE)) { 1217d2854fa4SRandall Stewart /* 1218d2854fa4SRandall Stewart * Special case where this slipped in while we 1219d2854fa4SRandall Stewart * were migrating *as* the callout is about to 1220d2854fa4SRandall Stewart * execute. The caller probably holds the lock 1221d2854fa4SRandall Stewart * the callout wants. 1222d2854fa4SRandall Stewart * 1223d2854fa4SRandall Stewart * Get rid of the migration first. Then set 1224d2854fa4SRandall Stewart * the flag that tells this code *not* to 1225d2854fa4SRandall Stewart * try to remove it from any lists (its not 1226d2854fa4SRandall Stewart * on one yet). When the callout wheel runs, 1227d2854fa4SRandall Stewart * it will ignore this callout. 1228d2854fa4SRandall Stewart */ 122915b1eb14SRandall Stewart c->c_iflags &= ~CALLOUT_PENDING; 123015b1eb14SRandall Stewart c->c_flags &= ~CALLOUT_ACTIVE; 1231d2854fa4SRandall Stewart not_on_a_list = 1; 1232d2854fa4SRandall Stewart } else { 1233d2854fa4SRandall Stewart not_on_a_list = 0; 1234d2854fa4SRandall Stewart } 1235d2854fa4SRandall Stewart 1236a115fb62SHans Petter Selasky /* 1237a115fb62SHans Petter Selasky * If the callout was migrating while the callout cpu lock was 1238a115fb62SHans Petter Selasky * dropped, just drop the sleepqueue lock and check the states 1239a115fb62SHans Petter Selasky * again. 1240a115fb62SHans Petter Selasky */ 1241a115fb62SHans Petter Selasky if (sq_locked != 0 && cc != old_cc) { 1242a115fb62SHans Petter Selasky #ifdef SMP 1243a115fb62SHans Petter Selasky CC_UNLOCK(cc); 1244d2854fa4SRandall Stewart sleepq_release(&cc_exec_waiting(old_cc, direct)); 1245a115fb62SHans Petter Selasky sq_locked = 0; 1246a115fb62SHans Petter Selasky old_cc = NULL; 1247a115fb62SHans Petter Selasky goto again; 1248a115fb62SHans Petter Selasky #else 1249a115fb62SHans Petter Selasky panic("migration should not happen"); 1250a115fb62SHans Petter Selasky #endif 1251a115fb62SHans Petter Selasky } 125247e42809SGleb Smirnoff 1253a115fb62SHans Petter Selasky /* 125447e42809SGleb Smirnoff * If the callout is running, try to stop it or drain it. 1255a115fb62SHans Petter Selasky */ 125647e42809SGleb Smirnoff if (cc_exec_curr(cc, direct) == c) { 1257a115fb62SHans Petter Selasky /* 125847e42809SGleb Smirnoff * Succeed we to stop it or not, we must clear the 12597d88be4cSMark Johnston * active flag - this is what API users expect. If we're 12607d88be4cSMark Johnston * draining and the callout is currently executing, first wait 12617d88be4cSMark Johnston * until it finishes. 1262a115fb62SHans Petter Selasky */ 12637d88be4cSMark Johnston if ((flags & CS_DRAIN) == 0) 126418b4fd62SRandall Stewart c->c_flags &= ~CALLOUT_ACTIVE; 126547e42809SGleb Smirnoff 12665db9ed80SKonstantin Belousov if ((flags & CS_DRAIN) != 0) { 1267a115fb62SHans Petter Selasky /* 1268a115fb62SHans Petter Selasky * The current callout is running (or just 1269a115fb62SHans Petter Selasky * about to run) and blocking is allowed, so 1270a115fb62SHans Petter Selasky * just wait for the current invocation to 1271a115fb62SHans Petter Selasky * finish. 1272a115fb62SHans Petter Selasky */ 1273d2854fa4SRandall Stewart while (cc_exec_curr(cc, direct) == c) { 1274a115fb62SHans Petter Selasky /* 1275a115fb62SHans Petter Selasky * Use direct calls to sleepqueue interface 1276a115fb62SHans Petter Selasky * instead of cv/msleep in order to avoid 1277a115fb62SHans Petter Selasky * a LOR between cc_lock and sleepqueue 1278a115fb62SHans Petter Selasky * chain spinlocks. This piece of code 1279a115fb62SHans Petter Selasky * emulates a msleep_spin() call actually. 1280a115fb62SHans Petter Selasky * 1281a115fb62SHans Petter Selasky * If we already have the sleepqueue chain 1282a115fb62SHans Petter Selasky * locked, then we can safely block. If we 1283a115fb62SHans Petter Selasky * don't already have it locked, however, 1284a115fb62SHans Petter Selasky * we have to drop the cc_lock to lock 1285a115fb62SHans Petter Selasky * it. This opens several races, so we 1286a115fb62SHans Petter Selasky * restart at the beginning once we have 1287a115fb62SHans Petter Selasky * both locks. If nothing has changed, then 1288a115fb62SHans Petter Selasky * we will end up back here with sq_locked 1289a115fb62SHans Petter Selasky * set. 1290a115fb62SHans Petter Selasky */ 1291a115fb62SHans Petter Selasky if (!sq_locked) { 1292a115fb62SHans Petter Selasky CC_UNLOCK(cc); 1293a115fb62SHans Petter Selasky sleepq_lock( 1294d2854fa4SRandall Stewart &cc_exec_waiting(cc, direct)); 1295a115fb62SHans Petter Selasky sq_locked = 1; 1296a115fb62SHans Petter Selasky old_cc = cc; 1297a115fb62SHans Petter Selasky goto again; 1298a115fb62SHans Petter Selasky } 129947e42809SGleb Smirnoff 1300a115fb62SHans Petter Selasky /* 1301a115fb62SHans Petter Selasky * Migration could be cancelled here, but 1302a115fb62SHans Petter Selasky * as long as it is still not sure when it 1303a115fb62SHans Petter Selasky * will be packed up, just let softclock() 1304a115fb62SHans Petter Selasky * take care of it. 1305a115fb62SHans Petter Selasky */ 1306d2854fa4SRandall Stewart cc_exec_waiting(cc, direct) = true; 1307a115fb62SHans Petter Selasky DROP_GIANT(); 1308a115fb62SHans Petter Selasky CC_UNLOCK(cc); 1309a115fb62SHans Petter Selasky sleepq_add( 1310d2854fa4SRandall Stewart &cc_exec_waiting(cc, direct), 1311a115fb62SHans Petter Selasky &cc->cc_lock.lock_object, "codrain", 1312a115fb62SHans Petter Selasky SLEEPQ_SLEEP, 0); 1313a115fb62SHans Petter Selasky sleepq_wait( 1314d2854fa4SRandall Stewart &cc_exec_waiting(cc, direct), 1315a115fb62SHans Petter Selasky 0); 1316a115fb62SHans Petter Selasky sq_locked = 0; 1317a115fb62SHans Petter Selasky old_cc = NULL; 1318a115fb62SHans Petter Selasky 1319a115fb62SHans Petter Selasky /* Reacquire locks previously released. */ 1320a115fb62SHans Petter Selasky PICKUP_GIANT(); 1321a115fb62SHans Petter Selasky CC_LOCK(cc); 1322a115fb62SHans Petter Selasky } 13237d88be4cSMark Johnston c->c_flags &= ~CALLOUT_ACTIVE; 1324a115fb62SHans Petter Selasky } else if (use_lock && 132518b4fd62SRandall Stewart !cc_exec_cancel(cc, direct) && (drain == NULL)) { 1326d2854fa4SRandall Stewart 1327a115fb62SHans Petter Selasky /* 1328a115fb62SHans Petter Selasky * The current callout is waiting for its 1329a115fb62SHans Petter Selasky * lock which we hold. Cancel the callout 1330a115fb62SHans Petter Selasky * and return. After our caller drops the 1331a115fb62SHans Petter Selasky * lock, the callout will be skipped in 133218b4fd62SRandall Stewart * softclock(). This *only* works with a 133318b4fd62SRandall Stewart * callout_stop() *not* callout_drain() or 133418b4fd62SRandall Stewart * callout_async_drain(). 1335a115fb62SHans Petter Selasky */ 1336d2854fa4SRandall Stewart cc_exec_cancel(cc, direct) = true; 1337a115fb62SHans Petter Selasky CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1338a115fb62SHans Petter Selasky c, c->c_func, c->c_arg); 1339a115fb62SHans Petter Selasky KASSERT(!cc_cce_migrating(cc, direct), 1340a115fb62SHans Petter Selasky ("callout wrongly scheduled for migration")); 134115b1eb14SRandall Stewart if (callout_migrating(c)) { 134215b1eb14SRandall Stewart c->c_iflags &= ~CALLOUT_DFRMIGRATION; 134315b1eb14SRandall Stewart #ifdef SMP 134415b1eb14SRandall Stewart cc_migration_cpu(cc, direct) = CPUBLOCK; 134515b1eb14SRandall Stewart cc_migration_time(cc, direct) = 0; 134615b1eb14SRandall Stewart cc_migration_prec(cc, direct) = 0; 134715b1eb14SRandall Stewart cc_migration_func(cc, direct) = NULL; 134815b1eb14SRandall Stewart cc_migration_arg(cc, direct) = NULL; 134915b1eb14SRandall Stewart #endif 135015b1eb14SRandall Stewart } 1351a115fb62SHans Petter Selasky CC_UNLOCK(cc); 1352a115fb62SHans Petter Selasky KASSERT(!sq_locked, ("sleepqueue chain locked")); 1353a115fb62SHans Petter Selasky return (1); 1354d2854fa4SRandall Stewart } else if (callout_migrating(c)) { 1355d2854fa4SRandall Stewart /* 1356d2854fa4SRandall Stewart * The callout is currently being serviced 1357d2854fa4SRandall Stewart * and the "next" callout is scheduled at 1358d2854fa4SRandall Stewart * its completion with a migration. We remove 1359d2854fa4SRandall Stewart * the migration flag so it *won't* get rescheduled, 1360d2854fa4SRandall Stewart * but we can't stop the one thats running so 1361d2854fa4SRandall Stewart * we return 0. 1362d2854fa4SRandall Stewart */ 136315b1eb14SRandall Stewart c->c_iflags &= ~CALLOUT_DFRMIGRATION; 1364d2854fa4SRandall Stewart #ifdef SMP 1365d2854fa4SRandall Stewart /* 1366d2854fa4SRandall Stewart * We can't call cc_cce_cleanup here since 1367d2854fa4SRandall Stewart * if we do it will remove .ce_curr and 1368d2854fa4SRandall Stewart * its still running. This will prevent a 1369d2854fa4SRandall Stewart * reschedule of the callout when the 1370d2854fa4SRandall Stewart * execution completes. 1371d2854fa4SRandall Stewart */ 1372d2854fa4SRandall Stewart cc_migration_cpu(cc, direct) = CPUBLOCK; 1373d2854fa4SRandall Stewart cc_migration_time(cc, direct) = 0; 1374d2854fa4SRandall Stewart cc_migration_prec(cc, direct) = 0; 1375d2854fa4SRandall Stewart cc_migration_func(cc, direct) = NULL; 1376d2854fa4SRandall Stewart cc_migration_arg(cc, direct) = NULL; 1377d2854fa4SRandall Stewart #endif 1378a115fb62SHans Petter Selasky CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", 1379a115fb62SHans Petter Selasky c, c->c_func, c->c_arg); 138018b4fd62SRandall Stewart if (drain) { 138118b4fd62SRandall Stewart cc_exec_drain(cc, direct) = drain; 138218b4fd62SRandall Stewart } 1383a115fb62SHans Petter Selasky CC_UNLOCK(cc); 1384d153eeeeSGleb Smirnoff return ((flags & CS_EXECUTING) != 0); 1385a115fb62SHans Petter Selasky } 1386a115fb62SHans Petter Selasky CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1387a115fb62SHans Petter Selasky c, c->c_func, c->c_arg); 138818b4fd62SRandall Stewart if (drain) { 138918b4fd62SRandall Stewart cc_exec_drain(cc, direct) = drain; 139018b4fd62SRandall Stewart } 13913d84a188SRandall Stewart KASSERT(!sq_locked, ("sleepqueue chain still locked")); 139247e42809SGleb Smirnoff cancelled = ((flags & CS_EXECUTING) != 0); 139347e42809SGleb Smirnoff } else 139447e42809SGleb Smirnoff cancelled = 1; 139547e42809SGleb Smirnoff 13963d84a188SRandall Stewart if (sq_locked) 13973d84a188SRandall Stewart sleepq_release(&cc_exec_waiting(cc, direct)); 1398d153eeeeSGleb Smirnoff 139947e42809SGleb Smirnoff if ((c->c_iflags & CALLOUT_PENDING) == 0) { 140047e42809SGleb Smirnoff CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 140147e42809SGleb Smirnoff c, c->c_func, c->c_arg); 14029f339124SGleb Smirnoff /* 14039f339124SGleb Smirnoff * For not scheduled and not executing callout return 14049f339124SGleb Smirnoff * negative value. 14059f339124SGleb Smirnoff */ 14069f339124SGleb Smirnoff if (cc_exec_curr(cc, direct) != c) 14079f339124SGleb Smirnoff cancelled = -1; 140847e42809SGleb Smirnoff CC_UNLOCK(cc); 14099f339124SGleb Smirnoff return (cancelled); 141047e42809SGleb Smirnoff } 141147e42809SGleb Smirnoff 141215b1eb14SRandall Stewart c->c_iflags &= ~CALLOUT_PENDING; 141315b1eb14SRandall Stewart c->c_flags &= ~CALLOUT_ACTIVE; 14141283e9cdSAttilio Rao 141568a57ebfSGleb Smirnoff CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 141668a57ebfSGleb Smirnoff c, c->c_func, c->c_arg); 1417d2854fa4SRandall Stewart if (not_on_a_list == 0) { 141815b1eb14SRandall Stewart if ((c->c_iflags & CALLOUT_PROCESSED) == 0) { 141966525b2dSRandall Stewart if (cc_exec_next(cc) == c) 142066525b2dSRandall Stewart cc_exec_next(cc) = LIST_NEXT(c, c_links.le); 1421a115fb62SHans Petter Selasky LIST_REMOVE(c, c_links.le); 142215b1eb14SRandall Stewart } else { 1423a115fb62SHans Petter Selasky TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 1424d2854fa4SRandall Stewart } 142515b1eb14SRandall Stewart } 1426a115fb62SHans Petter Selasky callout_cc_del(c, cc); 1427a115fb62SHans Petter Selasky CC_UNLOCK(cc); 142847e42809SGleb Smirnoff return (cancelled); 1429acc8326dSGarrett Wollman } 1430acc8326dSGarrett Wollman 1431acc8326dSGarrett Wollman void 1432e392e44cSDavide Italiano callout_init(struct callout *c, int mpsafe) 1433acc8326dSGarrett Wollman { 1434a115fb62SHans Petter Selasky bzero(c, sizeof *c); 143598c926b2SIan Dowse if (mpsafe) { 1436a115fb62SHans Petter Selasky c->c_lock = NULL; 143715b1eb14SRandall Stewart c->c_iflags = CALLOUT_RETURNUNLOCKED; 143898c926b2SIan Dowse } else { 1439a115fb62SHans Petter Selasky c->c_lock = &Giant.lock_object; 144015b1eb14SRandall Stewart c->c_iflags = 0; 144198c926b2SIan Dowse } 1442a115fb62SHans Petter Selasky c->c_cpu = timeout_cpu; 144398c926b2SIan Dowse } 144498c926b2SIan Dowse 144598c926b2SIan Dowse void 1446e392e44cSDavide Italiano _callout_init_lock(struct callout *c, struct lock_object *lock, int flags) 144798c926b2SIan Dowse { 144898c926b2SIan Dowse bzero(c, sizeof *c); 144964b9ee20SAttilio Rao c->c_lock = lock; 1450a115fb62SHans Petter Selasky KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 1451a115fb62SHans Petter Selasky ("callout_init_lock: bad flags %d", flags)); 1452a115fb62SHans Petter Selasky KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 1453a115fb62SHans Petter Selasky ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 1454a115fb62SHans Petter Selasky KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 1455a115fb62SHans Petter Selasky (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 1456a115fb62SHans Petter Selasky __func__)); 145715b1eb14SRandall Stewart c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 14588d809d50SJeff Roberson c->c_cpu = timeout_cpu; 1459acc8326dSGarrett Wollman } 1460acc8326dSGarrett Wollman 1461e1d6dc65SNate Williams #ifdef APM_FIXUP_CALLTODO 1462e1d6dc65SNate Williams /* 1463e1d6dc65SNate Williams * Adjust the kernel calltodo timeout list. This routine is used after 1464e1d6dc65SNate Williams * an APM resume to recalculate the calltodo timer list values with the 1465e1d6dc65SNate Williams * number of hz's we have been sleeping. The next hardclock() will detect 1466e1d6dc65SNate Williams * that there are fired timers and run softclock() to execute them. 1467e1d6dc65SNate Williams * 1468e1d6dc65SNate Williams * Please note, I have not done an exhaustive analysis of what code this 1469e1d6dc65SNate Williams * might break. I am motivated to have my select()'s and alarm()'s that 1470e1d6dc65SNate Williams * have expired during suspend firing upon resume so that the applications 1471e1d6dc65SNate Williams * which set the timer can do the maintanence the timer was for as close 1472e1d6dc65SNate Williams * as possible to the originally intended time. Testing this code for a 1473e1d6dc65SNate Williams * week showed that resuming from a suspend resulted in 22 to 25 timers 1474e3043798SPedro F. Giffuni * firing, which seemed independent on whether the suspend was 2 hours or 1475e1d6dc65SNate Williams * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 1476e1d6dc65SNate Williams */ 1477e1d6dc65SNate Williams void 1478e392e44cSDavide Italiano adjust_timeout_calltodo(struct timeval *time_change) 1479e1d6dc65SNate Williams { 14803e85b721SEd Maste struct callout *p; 1481e1d6dc65SNate Williams unsigned long delta_ticks; 1482e1d6dc65SNate Williams 1483e1d6dc65SNate Williams /* 1484e1d6dc65SNate Williams * How many ticks were we asleep? 1485c8b47828SBruce Evans * (stolen from tvtohz()). 1486e1d6dc65SNate Williams */ 1487e1d6dc65SNate Williams 1488e1d6dc65SNate Williams /* Don't do anything */ 1489e1d6dc65SNate Williams if (time_change->tv_sec < 0) 1490e1d6dc65SNate Williams return; 1491e1d6dc65SNate Williams else if (time_change->tv_sec <= LONG_MAX / 1000000) 149255e0987aSPedro F. Giffuni delta_ticks = howmany(time_change->tv_sec * 1000000 + 149355e0987aSPedro F. Giffuni time_change->tv_usec, tick) + 1; 1494e1d6dc65SNate Williams else if (time_change->tv_sec <= LONG_MAX / hz) 1495e1d6dc65SNate Williams delta_ticks = time_change->tv_sec * hz + 149655e0987aSPedro F. Giffuni howmany(time_change->tv_usec, tick) + 1; 1497e1d6dc65SNate Williams else 1498e1d6dc65SNate Williams delta_ticks = LONG_MAX; 1499e1d6dc65SNate Williams 1500e1d6dc65SNate Williams if (delta_ticks > INT_MAX) 1501e1d6dc65SNate Williams delta_ticks = INT_MAX; 1502e1d6dc65SNate Williams 1503e1d6dc65SNate Williams /* 1504e1d6dc65SNate Williams * Now rip through the timer calltodo list looking for timers 1505e1d6dc65SNate Williams * to expire. 1506e1d6dc65SNate Williams */ 1507e1d6dc65SNate Williams 1508e1d6dc65SNate Williams /* don't collide with softclock() */ 15098d809d50SJeff Roberson CC_LOCK(cc); 1510e1d6dc65SNate Williams for (p = calltodo.c_next; p != NULL; p = p->c_next) { 1511e1d6dc65SNate Williams p->c_time -= delta_ticks; 1512e1d6dc65SNate Williams 1513e1d6dc65SNate Williams /* Break if the timer had more time on it than delta_ticks */ 1514e1d6dc65SNate Williams if (p->c_time > 0) 1515e1d6dc65SNate Williams break; 1516e1d6dc65SNate Williams 1517e1d6dc65SNate Williams /* take back the ticks the timer didn't use (p->c_time <= 0) */ 1518e1d6dc65SNate Williams delta_ticks = -p->c_time; 1519e1d6dc65SNate Williams } 15208d809d50SJeff Roberson CC_UNLOCK(cc); 1521e1d6dc65SNate Williams 1522e1d6dc65SNate Williams return; 1523e1d6dc65SNate Williams } 1524e1d6dc65SNate Williams #endif /* APM_FIXUP_CALLTODO */ 15255b999a6bSDavide Italiano 15265b999a6bSDavide Italiano static int 15275b999a6bSDavide Italiano flssbt(sbintime_t sbt) 15285b999a6bSDavide Italiano { 15295b999a6bSDavide Italiano 15305b999a6bSDavide Italiano sbt += (uint64_t)sbt >> 1; 15315b999a6bSDavide Italiano if (sizeof(long) >= sizeof(sbintime_t)) 15325b999a6bSDavide Italiano return (flsl(sbt)); 15335b999a6bSDavide Italiano if (sbt >= SBT_1S) 15345b999a6bSDavide Italiano return (flsl(((uint64_t)sbt) >> 32) + 32); 15355b999a6bSDavide Italiano return (flsl(sbt)); 15365b999a6bSDavide Italiano } 15375b999a6bSDavide Italiano 15385b999a6bSDavide Italiano /* 15395b999a6bSDavide Italiano * Dump immediate statistic snapshot of the scheduled callouts. 15405b999a6bSDavide Italiano */ 15415b999a6bSDavide Italiano static int 15425b999a6bSDavide Italiano sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS) 15435b999a6bSDavide Italiano { 15445b999a6bSDavide Italiano struct callout *tmp; 15455b999a6bSDavide Italiano struct callout_cpu *cc; 15465b999a6bSDavide Italiano struct callout_list *sc; 15475b999a6bSDavide Italiano sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t; 15485b999a6bSDavide Italiano int ct[64], cpr[64], ccpbk[32]; 15495b999a6bSDavide Italiano int error, val, i, count, tcum, pcum, maxc, c, medc; 15505b999a6bSDavide Italiano #ifdef SMP 15515b999a6bSDavide Italiano int cpu; 15525b999a6bSDavide Italiano #endif 15535b999a6bSDavide Italiano 15545b999a6bSDavide Italiano val = 0; 15555b999a6bSDavide Italiano error = sysctl_handle_int(oidp, &val, 0, req); 15565b999a6bSDavide Italiano if (error != 0 || req->newptr == NULL) 15575b999a6bSDavide Italiano return (error); 15585b999a6bSDavide Italiano count = maxc = 0; 15595b999a6bSDavide Italiano st = spr = maxt = maxpr = 0; 15605b999a6bSDavide Italiano bzero(ccpbk, sizeof(ccpbk)); 15615b999a6bSDavide Italiano bzero(ct, sizeof(ct)); 15625b999a6bSDavide Italiano bzero(cpr, sizeof(cpr)); 15635b999a6bSDavide Italiano now = sbinuptime(); 15645b999a6bSDavide Italiano #ifdef SMP 15655b999a6bSDavide Italiano CPU_FOREACH(cpu) { 15665b999a6bSDavide Italiano cc = CC_CPU(cpu); 15675b999a6bSDavide Italiano #else 15685b999a6bSDavide Italiano cc = CC_CPU(timeout_cpu); 15695b999a6bSDavide Italiano #endif 15705b999a6bSDavide Italiano CC_LOCK(cc); 15715b999a6bSDavide Italiano for (i = 0; i < callwheelsize; i++) { 15725b999a6bSDavide Italiano sc = &cc->cc_callwheel[i]; 15735b999a6bSDavide Italiano c = 0; 15745b999a6bSDavide Italiano LIST_FOREACH(tmp, sc, c_links.le) { 15755b999a6bSDavide Italiano c++; 15765b999a6bSDavide Italiano t = tmp->c_time - now; 15775b999a6bSDavide Italiano if (t < 0) 15785b999a6bSDavide Italiano t = 0; 15795b999a6bSDavide Italiano st += t / SBT_1US; 15805b999a6bSDavide Italiano spr += tmp->c_precision / SBT_1US; 15815b999a6bSDavide Italiano if (t > maxt) 15825b999a6bSDavide Italiano maxt = t; 15835b999a6bSDavide Italiano if (tmp->c_precision > maxpr) 15845b999a6bSDavide Italiano maxpr = tmp->c_precision; 15855b999a6bSDavide Italiano ct[flssbt(t)]++; 15865b999a6bSDavide Italiano cpr[flssbt(tmp->c_precision)]++; 15875b999a6bSDavide Italiano } 15885b999a6bSDavide Italiano if (c > maxc) 15895b999a6bSDavide Italiano maxc = c; 15905b999a6bSDavide Italiano ccpbk[fls(c + c / 2)]++; 15915b999a6bSDavide Italiano count += c; 15925b999a6bSDavide Italiano } 15935b999a6bSDavide Italiano CC_UNLOCK(cc); 15945b999a6bSDavide Italiano #ifdef SMP 15955b999a6bSDavide Italiano } 15965b999a6bSDavide Italiano #endif 15975b999a6bSDavide Italiano 15985b999a6bSDavide Italiano for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++) 15995b999a6bSDavide Italiano tcum += ct[i]; 16005b999a6bSDavide Italiano medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 16015b999a6bSDavide Italiano for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++) 16025b999a6bSDavide Italiano pcum += cpr[i]; 16035b999a6bSDavide Italiano medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 16045b999a6bSDavide Italiano for (i = 0, c = 0; i < 32 && c < count / 2; i++) 16055b999a6bSDavide Italiano c += ccpbk[i]; 16065b999a6bSDavide Italiano medc = (i >= 2) ? (1 << (i - 2)) : 0; 16075b999a6bSDavide Italiano 16085b999a6bSDavide Italiano printf("Scheduled callouts statistic snapshot:\n"); 16095b999a6bSDavide Italiano printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n", 16105b999a6bSDavide Italiano count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT); 16115b999a6bSDavide Italiano printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n", 16125b999a6bSDavide Italiano medc, 16135b999a6bSDavide Italiano count / callwheelsize / mp_ncpus, 16145b999a6bSDavide Italiano (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000, 16155b999a6bSDavide Italiano maxc); 16165b999a6bSDavide Italiano printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 16175b999a6bSDavide Italiano medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32, 16185b999a6bSDavide Italiano (st / count) / 1000000, (st / count) % 1000000, 16195b999a6bSDavide Italiano maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32); 16205b999a6bSDavide Italiano printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 16215b999a6bSDavide Italiano medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32, 16225b999a6bSDavide Italiano (spr / count) / 1000000, (spr / count) % 1000000, 16235b999a6bSDavide Italiano maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32); 16245b999a6bSDavide Italiano printf(" Distribution: \tbuckets\t time\t tcum\t" 16255b999a6bSDavide Italiano " prec\t pcum\n"); 16265b999a6bSDavide Italiano for (i = 0, tcum = pcum = 0; i < 64; i++) { 16275b999a6bSDavide Italiano if (ct[i] == 0 && cpr[i] == 0) 16285b999a6bSDavide Italiano continue; 16295b999a6bSDavide Italiano t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0; 16305b999a6bSDavide Italiano tcum += ct[i]; 16315b999a6bSDavide Italiano pcum += cpr[i]; 16325b999a6bSDavide Italiano printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n", 16335b999a6bSDavide Italiano t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32, 16345b999a6bSDavide Italiano i - 1 - (32 - CC_HASH_SHIFT), 16355b999a6bSDavide Italiano ct[i], tcum, cpr[i], pcum); 16365b999a6bSDavide Italiano } 16375b999a6bSDavide Italiano return (error); 16385b999a6bSDavide Italiano } 16395b999a6bSDavide Italiano SYSCTL_PROC(_kern, OID_AUTO, callout_stat, 16405b999a6bSDavide Italiano CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 16415b999a6bSDavide Italiano 0, 0, sysctl_kern_callout_stat, "I", 16425b999a6bSDavide Italiano "Dump immediate statistic snapshot of the scheduled callouts"); 164347e42809SGleb Smirnoff 16443af72c11SBjoern A. Zeeb #ifdef DDB 16453af72c11SBjoern A. Zeeb static void 16463af72c11SBjoern A. Zeeb _show_callout(struct callout *c) 16473af72c11SBjoern A. Zeeb { 16483af72c11SBjoern A. Zeeb 16493af72c11SBjoern A. Zeeb db_printf("callout %p\n", c); 16503af72c11SBjoern A. Zeeb #define C_DB_PRINTF(f, e) db_printf(" %s = " f "\n", #e, c->e); 16513af72c11SBjoern A. Zeeb db_printf(" &c_links = %p\n", &(c->c_links)); 16523af72c11SBjoern A. Zeeb C_DB_PRINTF("%" PRId64, c_time); 16533af72c11SBjoern A. Zeeb C_DB_PRINTF("%" PRId64, c_precision); 16543af72c11SBjoern A. Zeeb C_DB_PRINTF("%p", c_arg); 16553af72c11SBjoern A. Zeeb C_DB_PRINTF("%p", c_func); 16563af72c11SBjoern A. Zeeb C_DB_PRINTF("%p", c_lock); 16573af72c11SBjoern A. Zeeb C_DB_PRINTF("%#x", c_flags); 16583af72c11SBjoern A. Zeeb C_DB_PRINTF("%#x", c_iflags); 16593af72c11SBjoern A. Zeeb C_DB_PRINTF("%d", c_cpu); 16603af72c11SBjoern A. Zeeb #undef C_DB_PRINTF 16613af72c11SBjoern A. Zeeb } 16623af72c11SBjoern A. Zeeb 16633af72c11SBjoern A. Zeeb DB_SHOW_COMMAND(callout, db_show_callout) 16643af72c11SBjoern A. Zeeb { 16653af72c11SBjoern A. Zeeb 16663af72c11SBjoern A. Zeeb if (!have_addr) { 16673af72c11SBjoern A. Zeeb db_printf("usage: show callout <struct callout *>\n"); 16683af72c11SBjoern A. Zeeb return; 16693af72c11SBjoern A. Zeeb } 16703af72c11SBjoern A. Zeeb 16713af72c11SBjoern A. Zeeb _show_callout((struct callout *)addr); 16723af72c11SBjoern A. Zeeb } 16733af72c11SBjoern A. Zeeb #endif /* DDB */ 1674