1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include "opt_callout_profiling.h" 43 #include "opt_ddb.h" 44 #include "opt_rss.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/bus.h> 49 #include <sys/callout.h> 50 #include <sys/domainset.h> 51 #include <sys/file.h> 52 #include <sys/interrupt.h> 53 #include <sys/kernel.h> 54 #include <sys/ktr.h> 55 #include <sys/kthread.h> 56 #include <sys/lock.h> 57 #include <sys/malloc.h> 58 #include <sys/mutex.h> 59 #include <sys/proc.h> 60 #include <sys/sched.h> 61 #include <sys/sdt.h> 62 #include <sys/sleepqueue.h> 63 #include <sys/sysctl.h> 64 #include <sys/smp.h> 65 #include <sys/unistd.h> 66 67 #ifdef DDB 68 #include <ddb/ddb.h> 69 #include <ddb/db_sym.h> 70 #include <machine/_inttypes.h> 71 #endif 72 73 #ifdef SMP 74 #include <machine/cpu.h> 75 #endif 76 77 DPCPU_DECLARE(sbintime_t, hardclocktime); 78 79 SDT_PROVIDER_DEFINE(callout_execute); 80 SDT_PROBE_DEFINE1(callout_execute, , , callout__start, "struct callout *"); 81 SDT_PROBE_DEFINE1(callout_execute, , , callout__end, "struct callout *"); 82 83 static void softclock_thread(void *arg); 84 85 #ifdef CALLOUT_PROFILING 86 static int avg_depth; 87 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 88 "Average number of items examined per softclock call. Units = 1/1000"); 89 static int avg_gcalls; 90 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 91 "Average number of Giant callouts made per softclock call. Units = 1/1000"); 92 static int avg_lockcalls; 93 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 94 "Average number of lock callouts made per softclock call. Units = 1/1000"); 95 static int avg_mpcalls; 96 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 97 "Average number of MP callouts made per softclock call. Units = 1/1000"); 98 static int avg_depth_dir; 99 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0, 100 "Average number of direct callouts examined per callout_process call. " 101 "Units = 1/1000"); 102 static int avg_lockcalls_dir; 103 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD, 104 &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per " 105 "callout_process call. Units = 1/1000"); 106 static int avg_mpcalls_dir; 107 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir, 108 0, "Average number of MP direct callouts made per callout_process call. " 109 "Units = 1/1000"); 110 #endif 111 112 static int ncallout; 113 SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &ncallout, 0, 114 "Number of entries in callwheel and size of timeout() preallocation"); 115 116 #ifdef RSS 117 static int pin_default_swi = 1; 118 static int pin_pcpu_swi = 1; 119 #else 120 static int pin_default_swi = 0; 121 static int pin_pcpu_swi = 0; 122 #endif 123 124 SYSCTL_INT(_kern, OID_AUTO, pin_default_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_default_swi, 125 0, "Pin the default (non-per-cpu) swi (shared with PCPU 0 swi)"); 126 SYSCTL_INT(_kern, OID_AUTO, pin_pcpu_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_pcpu_swi, 127 0, "Pin the per-CPU swis (except PCPU 0, which is also default)"); 128 129 /* 130 * TODO: 131 * allocate more timeout table slots when table overflows. 132 */ 133 static u_int __read_mostly callwheelsize; 134 static u_int __read_mostly callwheelmask; 135 136 /* 137 * The callout cpu exec entities represent informations necessary for 138 * describing the state of callouts currently running on the CPU and the ones 139 * necessary for migrating callouts to the new callout cpu. In particular, 140 * the first entry of the array cc_exec_entity holds informations for callout 141 * running in SWI thread context, while the second one holds informations 142 * for callout running directly from hardware interrupt context. 143 * The cached informations are very important for deferring migration when 144 * the migrating callout is already running. 145 */ 146 struct cc_exec { 147 struct callout *cc_curr; 148 callout_func_t *cc_drain; 149 void *cc_last_func; 150 void *cc_last_arg; 151 #ifdef SMP 152 callout_func_t *ce_migration_func; 153 void *ce_migration_arg; 154 sbintime_t ce_migration_time; 155 sbintime_t ce_migration_prec; 156 int ce_migration_cpu; 157 #endif 158 bool cc_cancel; 159 bool cc_waiting; 160 }; 161 162 /* 163 * There is one struct callout_cpu per cpu, holding all relevant 164 * state for the callout processing thread on the individual CPU. 165 */ 166 struct callout_cpu { 167 struct mtx_padalign cc_lock; 168 struct cc_exec cc_exec_entity[2]; 169 struct callout *cc_next; 170 struct callout_list *cc_callwheel; 171 struct callout_tailq cc_expireq; 172 sbintime_t cc_firstevent; 173 sbintime_t cc_lastscan; 174 struct thread *cc_thread; 175 u_int cc_bucket; 176 u_int cc_inited; 177 #ifdef KTR 178 char cc_ktr_event_name[20]; 179 #endif 180 }; 181 182 #define callout_migrating(c) ((c)->c_iflags & CALLOUT_DFRMIGRATION) 183 184 #define cc_exec_curr(cc, dir) cc->cc_exec_entity[dir].cc_curr 185 #define cc_exec_last_func(cc, dir) cc->cc_exec_entity[dir].cc_last_func 186 #define cc_exec_last_arg(cc, dir) cc->cc_exec_entity[dir].cc_last_arg 187 #define cc_exec_drain(cc, dir) cc->cc_exec_entity[dir].cc_drain 188 #define cc_exec_next(cc) cc->cc_next 189 #define cc_exec_cancel(cc, dir) cc->cc_exec_entity[dir].cc_cancel 190 #define cc_exec_waiting(cc, dir) cc->cc_exec_entity[dir].cc_waiting 191 #ifdef SMP 192 #define cc_migration_func(cc, dir) cc->cc_exec_entity[dir].ce_migration_func 193 #define cc_migration_arg(cc, dir) cc->cc_exec_entity[dir].ce_migration_arg 194 #define cc_migration_cpu(cc, dir) cc->cc_exec_entity[dir].ce_migration_cpu 195 #define cc_migration_time(cc, dir) cc->cc_exec_entity[dir].ce_migration_time 196 #define cc_migration_prec(cc, dir) cc->cc_exec_entity[dir].ce_migration_prec 197 198 static struct callout_cpu cc_cpu[MAXCPU]; 199 #define CPUBLOCK MAXCPU 200 #define CC_CPU(cpu) (&cc_cpu[(cpu)]) 201 #define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 202 #else 203 static struct callout_cpu cc_cpu; 204 #define CC_CPU(cpu) (&cc_cpu) 205 #define CC_SELF() (&cc_cpu) 206 #endif 207 #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 208 #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 209 #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) 210 211 static int __read_mostly cc_default_cpu; 212 213 static void callout_cpu_init(struct callout_cpu *cc, int cpu); 214 static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, 215 #ifdef CALLOUT_PROFILING 216 int *mpcalls, int *lockcalls, int *gcalls, 217 #endif 218 int direct); 219 220 static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 221 222 /** 223 * Locked by cc_lock: 224 * cc_curr - If a callout is in progress, it is cc_curr. 225 * If cc_curr is non-NULL, threads waiting in 226 * callout_drain() will be woken up as soon as the 227 * relevant callout completes. 228 * cc_cancel - Changing to 1 with both callout_lock and cc_lock held 229 * guarantees that the current callout will not run. 230 * The softclock_call_cc() function sets this to 0 before it 231 * drops callout_lock to acquire c_lock, and it calls 232 * the handler only if curr_cancelled is still 0 after 233 * cc_lock is successfully acquired. 234 * cc_waiting - If a thread is waiting in callout_drain(), then 235 * callout_wait is nonzero. Set only when 236 * cc_curr is non-NULL. 237 */ 238 239 /* 240 * Resets the execution entity tied to a specific callout cpu. 241 */ 242 static void 243 cc_cce_cleanup(struct callout_cpu *cc, int direct) 244 { 245 246 cc_exec_curr(cc, direct) = NULL; 247 cc_exec_cancel(cc, direct) = false; 248 cc_exec_waiting(cc, direct) = false; 249 #ifdef SMP 250 cc_migration_cpu(cc, direct) = CPUBLOCK; 251 cc_migration_time(cc, direct) = 0; 252 cc_migration_prec(cc, direct) = 0; 253 cc_migration_func(cc, direct) = NULL; 254 cc_migration_arg(cc, direct) = NULL; 255 #endif 256 } 257 258 /* 259 * Checks if migration is requested by a specific callout cpu. 260 */ 261 static int 262 cc_cce_migrating(struct callout_cpu *cc, int direct) 263 { 264 265 #ifdef SMP 266 return (cc_migration_cpu(cc, direct) != CPUBLOCK); 267 #else 268 return (0); 269 #endif 270 } 271 272 /* 273 * Kernel low level callwheel initialization 274 * called on the BSP during kernel startup. 275 */ 276 static void 277 callout_callwheel_init(void *dummy) 278 { 279 struct callout_cpu *cc; 280 int cpu; 281 282 /* 283 * Calculate the size of the callout wheel and the preallocated 284 * timeout() structures. 285 * XXX: Clip callout to result of previous function of maxusers 286 * maximum 384. This is still huge, but acceptable. 287 */ 288 ncallout = imin(16 + maxproc + maxfiles, 18508); 289 TUNABLE_INT_FETCH("kern.ncallout", &ncallout); 290 291 /* 292 * Calculate callout wheel size, should be next power of two higher 293 * than 'ncallout'. 294 */ 295 callwheelsize = 1 << fls(ncallout); 296 callwheelmask = callwheelsize - 1; 297 298 /* 299 * Fetch whether we're pinning the swi's or not. 300 */ 301 TUNABLE_INT_FETCH("kern.pin_default_swi", &pin_default_swi); 302 TUNABLE_INT_FETCH("kern.pin_pcpu_swi", &pin_pcpu_swi); 303 304 /* 305 * Initialize callout wheels. The software interrupt threads 306 * are created later. 307 */ 308 cc_default_cpu = PCPU_GET(cpuid); 309 CPU_FOREACH(cpu) { 310 cc = CC_CPU(cpu); 311 callout_cpu_init(cc, cpu); 312 } 313 } 314 SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL); 315 316 /* 317 * Initialize the per-cpu callout structures. 318 */ 319 static void 320 callout_cpu_init(struct callout_cpu *cc, int cpu) 321 { 322 int i; 323 324 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN); 325 cc->cc_inited = 1; 326 cc->cc_callwheel = malloc_domainset(sizeof(struct callout_list) * 327 callwheelsize, M_CALLOUT, 328 DOMAINSET_PREF(pcpu_find(cpu)->pc_domain), M_WAITOK); 329 for (i = 0; i < callwheelsize; i++) 330 LIST_INIT(&cc->cc_callwheel[i]); 331 TAILQ_INIT(&cc->cc_expireq); 332 cc->cc_firstevent = SBT_MAX; 333 for (i = 0; i < 2; i++) 334 cc_cce_cleanup(cc, i); 335 #ifdef KTR 336 snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name), 337 "callwheel cpu %d", cpu); 338 #endif 339 } 340 341 #ifdef SMP 342 /* 343 * Switches the cpu tied to a specific callout. 344 * The function expects a locked incoming callout cpu and returns with 345 * locked outcoming callout cpu. 346 */ 347 static struct callout_cpu * 348 callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) 349 { 350 struct callout_cpu *new_cc; 351 352 MPASS(c != NULL && cc != NULL); 353 CC_LOCK_ASSERT(cc); 354 355 /* 356 * Avoid interrupts and preemption firing after the callout cpu 357 * is blocked in order to avoid deadlocks as the new thread 358 * may be willing to acquire the callout cpu lock. 359 */ 360 c->c_cpu = CPUBLOCK; 361 spinlock_enter(); 362 CC_UNLOCK(cc); 363 new_cc = CC_CPU(new_cpu); 364 CC_LOCK(new_cc); 365 spinlock_exit(); 366 c->c_cpu = new_cpu; 367 return (new_cc); 368 } 369 #endif 370 371 /* 372 * Start softclock threads. 373 */ 374 static void 375 start_softclock(void *dummy) 376 { 377 struct proc *p; 378 struct thread *td; 379 struct callout_cpu *cc; 380 int cpu, error; 381 bool pin_swi; 382 383 p = NULL; 384 CPU_FOREACH(cpu) { 385 cc = CC_CPU(cpu); 386 error = kproc_kthread_add(softclock_thread, cc, &p, &td, 387 RFSTOPPED, 0, "clock", "clock (%d)", cpu); 388 if (error != 0) 389 panic("failed to create softclock thread for cpu %d: %d", 390 cpu, error); 391 CC_LOCK(cc); 392 cc->cc_thread = td; 393 thread_lock(td); 394 sched_class(td, PRI_ITHD); 395 sched_prio(td, PI_SWI(SWI_CLOCK)); 396 TD_SET_IWAIT(td); 397 thread_lock_set(td, (struct mtx *)&cc->cc_lock); 398 thread_unlock(td); 399 if (cpu == cc_default_cpu) 400 pin_swi = pin_default_swi; 401 else 402 pin_swi = pin_pcpu_swi; 403 if (pin_swi) { 404 error = cpuset_setithread(td->td_tid, cpu); 405 if (error != 0) 406 printf("%s: %s clock couldn't be pinned to cpu %d: %d\n", 407 __func__, cpu == cc_default_cpu ? 408 "default" : "per-cpu", cpu, error); 409 } 410 } 411 } 412 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 413 414 #define CC_HASH_SHIFT 8 415 416 static inline u_int 417 callout_hash(sbintime_t sbt) 418 { 419 420 return (sbt >> (32 - CC_HASH_SHIFT)); 421 } 422 423 static inline u_int 424 callout_get_bucket(sbintime_t sbt) 425 { 426 427 return (callout_hash(sbt) & callwheelmask); 428 } 429 430 void 431 callout_process(sbintime_t now) 432 { 433 struct callout *tmp, *tmpn; 434 struct callout_cpu *cc; 435 struct callout_list *sc; 436 struct thread *td; 437 sbintime_t first, last, lookahead, max, tmp_max; 438 u_int firstb, lastb, nowb; 439 #ifdef CALLOUT_PROFILING 440 int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0; 441 #endif 442 443 cc = CC_SELF(); 444 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 445 446 /* Compute the buckets of the last scan and present times. */ 447 firstb = callout_hash(cc->cc_lastscan); 448 cc->cc_lastscan = now; 449 nowb = callout_hash(now); 450 451 /* Compute the last bucket and minimum time of the bucket after it. */ 452 if (nowb == firstb) 453 lookahead = (SBT_1S / 16); 454 else if (nowb - firstb == 1) 455 lookahead = (SBT_1S / 8); 456 else 457 lookahead = SBT_1S; 458 first = last = now; 459 first += (lookahead / 2); 460 last += lookahead; 461 last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT)); 462 lastb = callout_hash(last) - 1; 463 max = last; 464 465 /* 466 * Check if we wrapped around the entire wheel from the last scan. 467 * In case, we need to scan entirely the wheel for pending callouts. 468 */ 469 if (lastb - firstb >= callwheelsize) { 470 lastb = firstb + callwheelsize - 1; 471 if (nowb - firstb >= callwheelsize) 472 nowb = lastb; 473 } 474 475 /* Iterate callwheel from firstb to nowb and then up to lastb. */ 476 do { 477 sc = &cc->cc_callwheel[firstb & callwheelmask]; 478 tmp = LIST_FIRST(sc); 479 while (tmp != NULL) { 480 /* Run the callout if present time within allowed. */ 481 if (tmp->c_time <= now) { 482 /* 483 * Consumer told us the callout may be run 484 * directly from hardware interrupt context. 485 */ 486 if (tmp->c_iflags & CALLOUT_DIRECT) { 487 #ifdef CALLOUT_PROFILING 488 ++depth_dir; 489 #endif 490 cc_exec_next(cc) = 491 LIST_NEXT(tmp, c_links.le); 492 cc->cc_bucket = firstb & callwheelmask; 493 LIST_REMOVE(tmp, c_links.le); 494 softclock_call_cc(tmp, cc, 495 #ifdef CALLOUT_PROFILING 496 &mpcalls_dir, &lockcalls_dir, NULL, 497 #endif 498 1); 499 tmp = cc_exec_next(cc); 500 cc_exec_next(cc) = NULL; 501 } else { 502 tmpn = LIST_NEXT(tmp, c_links.le); 503 LIST_REMOVE(tmp, c_links.le); 504 TAILQ_INSERT_TAIL(&cc->cc_expireq, 505 tmp, c_links.tqe); 506 tmp->c_iflags |= CALLOUT_PROCESSED; 507 tmp = tmpn; 508 } 509 continue; 510 } 511 /* Skip events from distant future. */ 512 if (tmp->c_time >= max) 513 goto next; 514 /* 515 * Event minimal time is bigger than present maximal 516 * time, so it cannot be aggregated. 517 */ 518 if (tmp->c_time > last) { 519 lastb = nowb; 520 goto next; 521 } 522 /* Update first and last time, respecting this event. */ 523 if (tmp->c_time < first) 524 first = tmp->c_time; 525 tmp_max = tmp->c_time + tmp->c_precision; 526 if (tmp_max < last) 527 last = tmp_max; 528 next: 529 tmp = LIST_NEXT(tmp, c_links.le); 530 } 531 /* Proceed with the next bucket. */ 532 firstb++; 533 /* 534 * Stop if we looked after present time and found 535 * some event we can't execute at now. 536 * Stop if we looked far enough into the future. 537 */ 538 } while (((int)(firstb - lastb)) <= 0); 539 cc->cc_firstevent = last; 540 cpu_new_callout(curcpu, last, first); 541 542 #ifdef CALLOUT_PROFILING 543 avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8; 544 avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8; 545 avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8; 546 #endif 547 if (!TAILQ_EMPTY(&cc->cc_expireq)) { 548 td = cc->cc_thread; 549 if (TD_AWAITING_INTR(td)) { 550 thread_lock_block_wait(td); 551 THREAD_LOCK_ASSERT(td, MA_OWNED); 552 TD_CLR_IWAIT(td); 553 sched_add(td, SRQ_INTR); 554 } else 555 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 556 } else 557 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 558 } 559 560 static struct callout_cpu * 561 callout_lock(struct callout *c) 562 { 563 struct callout_cpu *cc; 564 int cpu; 565 566 for (;;) { 567 cpu = c->c_cpu; 568 #ifdef SMP 569 if (cpu == CPUBLOCK) { 570 while (c->c_cpu == CPUBLOCK) 571 cpu_spinwait(); 572 continue; 573 } 574 #endif 575 cc = CC_CPU(cpu); 576 CC_LOCK(cc); 577 if (cpu == c->c_cpu) 578 break; 579 CC_UNLOCK(cc); 580 } 581 return (cc); 582 } 583 584 static void 585 callout_cc_add(struct callout *c, struct callout_cpu *cc, 586 sbintime_t sbt, sbintime_t precision, void (*func)(void *), 587 void *arg, int cpu, int flags) 588 { 589 int bucket; 590 591 CC_LOCK_ASSERT(cc); 592 if (sbt < cc->cc_lastscan) 593 sbt = cc->cc_lastscan; 594 c->c_arg = arg; 595 c->c_iflags |= CALLOUT_PENDING; 596 c->c_iflags &= ~CALLOUT_PROCESSED; 597 c->c_flags |= CALLOUT_ACTIVE; 598 if (flags & C_DIRECT_EXEC) 599 c->c_iflags |= CALLOUT_DIRECT; 600 c->c_func = func; 601 c->c_time = sbt; 602 c->c_precision = precision; 603 bucket = callout_get_bucket(c->c_time); 604 CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x", 605 c, (int)(c->c_precision >> 32), 606 (u_int)(c->c_precision & 0xffffffff)); 607 LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le); 608 if (cc->cc_bucket == bucket) 609 cc_exec_next(cc) = c; 610 611 /* 612 * Inform the eventtimers(4) subsystem there's a new callout 613 * that has been inserted, but only if really required. 614 */ 615 if (SBT_MAX - c->c_time < c->c_precision) 616 c->c_precision = SBT_MAX - c->c_time; 617 sbt = c->c_time + c->c_precision; 618 if (sbt < cc->cc_firstevent) { 619 cc->cc_firstevent = sbt; 620 cpu_new_callout(cpu, sbt, c->c_time); 621 } 622 } 623 624 static void 625 softclock_call_cc(struct callout *c, struct callout_cpu *cc, 626 #ifdef CALLOUT_PROFILING 627 int *mpcalls, int *lockcalls, int *gcalls, 628 #endif 629 int direct) 630 { 631 struct rm_priotracker tracker; 632 callout_func_t *c_func, *drain; 633 void *c_arg; 634 struct lock_class *class; 635 struct lock_object *c_lock; 636 uintptr_t lock_status; 637 int c_iflags; 638 #ifdef SMP 639 struct callout_cpu *new_cc; 640 callout_func_t *new_func; 641 void *new_arg; 642 int flags, new_cpu; 643 sbintime_t new_prec, new_time; 644 #endif 645 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 646 sbintime_t sbt1, sbt2; 647 struct timespec ts2; 648 static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */ 649 static callout_func_t *lastfunc; 650 #endif 651 652 KASSERT((c->c_iflags & CALLOUT_PENDING) == CALLOUT_PENDING, 653 ("softclock_call_cc: pend %p %x", c, c->c_iflags)); 654 KASSERT((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE, 655 ("softclock_call_cc: act %p %x", c, c->c_flags)); 656 class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; 657 lock_status = 0; 658 if (c->c_flags & CALLOUT_SHAREDLOCK) { 659 if (class == &lock_class_rm) 660 lock_status = (uintptr_t)&tracker; 661 else 662 lock_status = 1; 663 } 664 c_lock = c->c_lock; 665 c_func = c->c_func; 666 c_arg = c->c_arg; 667 c_iflags = c->c_iflags; 668 c->c_iflags &= ~CALLOUT_PENDING; 669 670 cc_exec_curr(cc, direct) = c; 671 cc_exec_last_func(cc, direct) = c_func; 672 cc_exec_last_arg(cc, direct) = c_arg; 673 cc_exec_cancel(cc, direct) = false; 674 cc_exec_drain(cc, direct) = NULL; 675 CC_UNLOCK(cc); 676 if (c_lock != NULL) { 677 class->lc_lock(c_lock, lock_status); 678 /* 679 * The callout may have been cancelled 680 * while we switched locks. 681 */ 682 if (cc_exec_cancel(cc, direct)) { 683 class->lc_unlock(c_lock); 684 goto skip; 685 } 686 /* The callout cannot be stopped now. */ 687 cc_exec_cancel(cc, direct) = true; 688 if (c_lock == &Giant.lock_object) { 689 #ifdef CALLOUT_PROFILING 690 (*gcalls)++; 691 #endif 692 CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p", 693 c, c_func, c_arg); 694 } else { 695 #ifdef CALLOUT_PROFILING 696 (*lockcalls)++; 697 #endif 698 CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p", 699 c, c_func, c_arg); 700 } 701 } else { 702 #ifdef CALLOUT_PROFILING 703 (*mpcalls)++; 704 #endif 705 CTR3(KTR_CALLOUT, "callout %p func %p arg %p", 706 c, c_func, c_arg); 707 } 708 KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running", 709 "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct); 710 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 711 sbt1 = sbinuptime(); 712 #endif 713 THREAD_NO_SLEEPING(); 714 SDT_PROBE1(callout_execute, , , callout__start, c); 715 c_func(c_arg); 716 SDT_PROBE1(callout_execute, , , callout__end, c); 717 THREAD_SLEEPING_OK(); 718 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 719 sbt2 = sbinuptime(); 720 sbt2 -= sbt1; 721 if (sbt2 > maxdt) { 722 if (lastfunc != c_func || sbt2 > maxdt * 2) { 723 ts2 = sbttots(sbt2); 724 printf( 725 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 726 c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); 727 } 728 maxdt = sbt2; 729 lastfunc = c_func; 730 } 731 #endif 732 KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle"); 733 CTR1(KTR_CALLOUT, "callout %p finished", c); 734 if ((c_iflags & CALLOUT_RETURNUNLOCKED) == 0) 735 class->lc_unlock(c_lock); 736 skip: 737 CC_LOCK(cc); 738 KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr")); 739 cc_exec_curr(cc, direct) = NULL; 740 if (cc_exec_drain(cc, direct)) { 741 drain = cc_exec_drain(cc, direct); 742 cc_exec_drain(cc, direct) = NULL; 743 CC_UNLOCK(cc); 744 drain(c_arg); 745 CC_LOCK(cc); 746 } 747 if (cc_exec_waiting(cc, direct)) { 748 /* 749 * There is someone waiting for the 750 * callout to complete. 751 * If the callout was scheduled for 752 * migration just cancel it. 753 */ 754 if (cc_cce_migrating(cc, direct)) { 755 cc_cce_cleanup(cc, direct); 756 757 /* 758 * It should be assert here that the callout is not 759 * destroyed but that is not easy. 760 */ 761 c->c_iflags &= ~CALLOUT_DFRMIGRATION; 762 } 763 cc_exec_waiting(cc, direct) = false; 764 CC_UNLOCK(cc); 765 wakeup(&cc_exec_waiting(cc, direct)); 766 CC_LOCK(cc); 767 } else if (cc_cce_migrating(cc, direct)) { 768 #ifdef SMP 769 /* 770 * If the callout was scheduled for 771 * migration just perform it now. 772 */ 773 new_cpu = cc_migration_cpu(cc, direct); 774 new_time = cc_migration_time(cc, direct); 775 new_prec = cc_migration_prec(cc, direct); 776 new_func = cc_migration_func(cc, direct); 777 new_arg = cc_migration_arg(cc, direct); 778 cc_cce_cleanup(cc, direct); 779 780 /* 781 * It should be assert here that the callout is not destroyed 782 * but that is not easy. 783 * 784 * As first thing, handle deferred callout stops. 785 */ 786 if (!callout_migrating(c)) { 787 CTR3(KTR_CALLOUT, 788 "deferred cancelled %p func %p arg %p", 789 c, new_func, new_arg); 790 return; 791 } 792 c->c_iflags &= ~CALLOUT_DFRMIGRATION; 793 794 new_cc = callout_cpu_switch(c, cc, new_cpu); 795 flags = (direct) ? C_DIRECT_EXEC : 0; 796 callout_cc_add(c, new_cc, new_time, new_prec, new_func, 797 new_arg, new_cpu, flags); 798 CC_UNLOCK(new_cc); 799 CC_LOCK(cc); 800 #else 801 panic("migration should not happen"); 802 #endif 803 } 804 } 805 806 /* 807 * The callout mechanism is based on the work of Adam M. Costello and 808 * George Varghese, published in a technical report entitled "Redesigning 809 * the BSD Callout and Timer Facilities" and modified slightly for inclusion 810 * in FreeBSD by Justin T. Gibbs. The original work on the data structures 811 * used in this implementation was published by G. Varghese and T. Lauck in 812 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 813 * the Efficient Implementation of a Timer Facility" in the Proceedings of 814 * the 11th ACM Annual Symposium on Operating Systems Principles, 815 * Austin, Texas Nov 1987. 816 */ 817 818 /* 819 * Software (low priority) clock interrupt thread handler. 820 * Run periodic events from timeout queue. 821 */ 822 static void 823 softclock_thread(void *arg) 824 { 825 struct thread *td = curthread; 826 struct callout_cpu *cc; 827 struct callout *c; 828 #ifdef CALLOUT_PROFILING 829 int depth, gcalls, lockcalls, mpcalls; 830 #endif 831 832 cc = (struct callout_cpu *)arg; 833 CC_LOCK(cc); 834 for (;;) { 835 while (TAILQ_EMPTY(&cc->cc_expireq)) { 836 /* 837 * Use CC_LOCK(cc) as the thread_lock while 838 * idle. 839 */ 840 thread_lock(td); 841 thread_lock_set(td, (struct mtx *)&cc->cc_lock); 842 TD_SET_IWAIT(td); 843 mi_switch(SW_VOL | SWT_IWAIT); 844 845 /* mi_switch() drops thread_lock(). */ 846 CC_LOCK(cc); 847 } 848 849 #ifdef CALLOUT_PROFILING 850 depth = gcalls = lockcalls = mpcalls = 0; 851 #endif 852 while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) { 853 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 854 softclock_call_cc(c, cc, 855 #ifdef CALLOUT_PROFILING 856 &mpcalls, &lockcalls, &gcalls, 857 #endif 858 0); 859 #ifdef CALLOUT_PROFILING 860 ++depth; 861 #endif 862 } 863 #ifdef CALLOUT_PROFILING 864 avg_depth += (depth * 1000 - avg_depth) >> 8; 865 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 866 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 867 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 868 #endif 869 } 870 } 871 872 void 873 callout_when(sbintime_t sbt, sbintime_t precision, int flags, 874 sbintime_t *res, sbintime_t *prec_res) 875 { 876 sbintime_t to_sbt, to_pr; 877 878 if ((flags & (C_ABSOLUTE | C_PRECALC)) != 0) { 879 *res = sbt; 880 *prec_res = precision; 881 return; 882 } 883 if ((flags & C_HARDCLOCK) != 0 && sbt < tick_sbt) 884 sbt = tick_sbt; 885 if ((flags & C_HARDCLOCK) != 0 || sbt >= sbt_tickthreshold) { 886 /* 887 * Obtain the time of the last hardclock() call on 888 * this CPU directly from the kern_clocksource.c. 889 * This value is per-CPU, but it is equal for all 890 * active ones. 891 */ 892 #ifdef __LP64__ 893 to_sbt = DPCPU_GET(hardclocktime); 894 #else 895 spinlock_enter(); 896 to_sbt = DPCPU_GET(hardclocktime); 897 spinlock_exit(); 898 #endif 899 if (cold && to_sbt == 0) 900 to_sbt = sbinuptime(); 901 if ((flags & C_HARDCLOCK) == 0) 902 to_sbt += tick_sbt; 903 } else 904 to_sbt = sbinuptime(); 905 if (SBT_MAX - to_sbt < sbt) 906 to_sbt = SBT_MAX; 907 else 908 to_sbt += sbt; 909 *res = to_sbt; 910 to_pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp : 911 sbt >> C_PRELGET(flags)); 912 *prec_res = to_pr > precision ? to_pr : precision; 913 } 914 915 /* 916 * New interface; clients allocate their own callout structures. 917 * 918 * callout_reset() - establish or change a timeout 919 * callout_stop() - disestablish a timeout 920 * callout_init() - initialize a callout structure so that it can 921 * safely be passed to callout_reset() and callout_stop() 922 * 923 * <sys/callout.h> defines three convenience macros: 924 * 925 * callout_active() - returns truth if callout has not been stopped, 926 * drained, or deactivated since the last time the callout was 927 * reset. 928 * callout_pending() - returns truth if callout is still waiting for timeout 929 * callout_deactivate() - marks the callout as having been serviced 930 */ 931 int 932 callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec, 933 callout_func_t *ftn, void *arg, int cpu, int flags) 934 { 935 sbintime_t to_sbt, precision; 936 struct callout_cpu *cc; 937 int cancelled, direct; 938 int ignore_cpu=0; 939 940 cancelled = 0; 941 if (cpu == -1) { 942 ignore_cpu = 1; 943 } else if ((cpu >= MAXCPU) || 944 ((CC_CPU(cpu))->cc_inited == 0)) { 945 /* Invalid CPU spec */ 946 panic("Invalid CPU in callout %d", cpu); 947 } 948 callout_when(sbt, prec, flags, &to_sbt, &precision); 949 950 /* 951 * This flag used to be added by callout_cc_add, but the 952 * first time you call this we could end up with the 953 * wrong direct flag if we don't do it before we add. 954 */ 955 if (flags & C_DIRECT_EXEC) { 956 direct = 1; 957 } else { 958 direct = 0; 959 } 960 KASSERT(!direct || c->c_lock == NULL || 961 (LOCK_CLASS(c->c_lock)->lc_flags & LC_SPINLOCK), 962 ("%s: direct callout %p has non-spin lock", __func__, c)); 963 cc = callout_lock(c); 964 /* 965 * Don't allow migration if the user does not care. 966 */ 967 if (ignore_cpu) { 968 cpu = c->c_cpu; 969 } 970 971 if (cc_exec_curr(cc, direct) == c) { 972 /* 973 * We're being asked to reschedule a callout which is 974 * currently in progress. If there is a lock then we 975 * can cancel the callout if it has not really started. 976 */ 977 if (c->c_lock != NULL && !cc_exec_cancel(cc, direct)) 978 cancelled = cc_exec_cancel(cc, direct) = true; 979 if (cc_exec_waiting(cc, direct) || cc_exec_drain(cc, direct)) { 980 /* 981 * Someone has called callout_drain to kill this 982 * callout. Don't reschedule. 983 */ 984 CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 985 cancelled ? "cancelled" : "failed to cancel", 986 c, c->c_func, c->c_arg); 987 CC_UNLOCK(cc); 988 return (cancelled); 989 } 990 #ifdef SMP 991 if (callout_migrating(c)) { 992 /* 993 * This only occurs when a second callout_reset_sbt_on 994 * is made after a previous one moved it into 995 * deferred migration (below). Note we do *not* change 996 * the prev_cpu even though the previous target may 997 * be different. 998 */ 999 cc_migration_cpu(cc, direct) = cpu; 1000 cc_migration_time(cc, direct) = to_sbt; 1001 cc_migration_prec(cc, direct) = precision; 1002 cc_migration_func(cc, direct) = ftn; 1003 cc_migration_arg(cc, direct) = arg; 1004 cancelled = 1; 1005 CC_UNLOCK(cc); 1006 return (cancelled); 1007 } 1008 #endif 1009 } 1010 if (c->c_iflags & CALLOUT_PENDING) { 1011 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) { 1012 if (cc_exec_next(cc) == c) 1013 cc_exec_next(cc) = LIST_NEXT(c, c_links.le); 1014 LIST_REMOVE(c, c_links.le); 1015 } else { 1016 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 1017 } 1018 cancelled = 1; 1019 c->c_iflags &= ~ CALLOUT_PENDING; 1020 c->c_flags &= ~ CALLOUT_ACTIVE; 1021 } 1022 1023 #ifdef SMP 1024 /* 1025 * If the callout must migrate try to perform it immediately. 1026 * If the callout is currently running, just defer the migration 1027 * to a more appropriate moment. 1028 */ 1029 if (c->c_cpu != cpu) { 1030 if (cc_exec_curr(cc, direct) == c) { 1031 /* 1032 * Pending will have been removed since we are 1033 * actually executing the callout on another 1034 * CPU. That callout should be waiting on the 1035 * lock the caller holds. If we set both 1036 * active/and/pending after we return and the 1037 * lock on the executing callout proceeds, it 1038 * will then see pending is true and return. 1039 * At the return from the actual callout execution 1040 * the migration will occur in softclock_call_cc 1041 * and this new callout will be placed on the 1042 * new CPU via a call to callout_cpu_switch() which 1043 * will get the lock on the right CPU followed 1044 * by a call callout_cc_add() which will add it there. 1045 * (see above in softclock_call_cc()). 1046 */ 1047 cc_migration_cpu(cc, direct) = cpu; 1048 cc_migration_time(cc, direct) = to_sbt; 1049 cc_migration_prec(cc, direct) = precision; 1050 cc_migration_func(cc, direct) = ftn; 1051 cc_migration_arg(cc, direct) = arg; 1052 c->c_iflags |= (CALLOUT_DFRMIGRATION | CALLOUT_PENDING); 1053 c->c_flags |= CALLOUT_ACTIVE; 1054 CTR6(KTR_CALLOUT, 1055 "migration of %p func %p arg %p in %d.%08x to %u deferred", 1056 c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 1057 (u_int)(to_sbt & 0xffffffff), cpu); 1058 CC_UNLOCK(cc); 1059 return (cancelled); 1060 } 1061 cc = callout_cpu_switch(c, cc, cpu); 1062 } 1063 #endif 1064 1065 callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags); 1066 CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x", 1067 cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 1068 (u_int)(to_sbt & 0xffffffff)); 1069 CC_UNLOCK(cc); 1070 1071 return (cancelled); 1072 } 1073 1074 /* 1075 * Common idioms that can be optimized in the future. 1076 */ 1077 int 1078 callout_schedule_on(struct callout *c, int to_ticks, int cpu) 1079 { 1080 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 1081 } 1082 1083 int 1084 callout_schedule(struct callout *c, int to_ticks) 1085 { 1086 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 1087 } 1088 1089 int 1090 _callout_stop_safe(struct callout *c, int flags, callout_func_t *drain) 1091 { 1092 struct callout_cpu *cc, *old_cc; 1093 struct lock_class *class; 1094 int direct, sq_locked, use_lock; 1095 int cancelled, not_on_a_list; 1096 1097 if ((flags & CS_DRAIN) != 0) 1098 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, c->c_lock, 1099 "calling %s", __func__); 1100 1101 KASSERT((flags & CS_DRAIN) == 0 || drain == NULL, 1102 ("Cannot set drain callback and CS_DRAIN flag at the same time")); 1103 1104 /* 1105 * Some old subsystems don't hold Giant while running a callout_stop(), 1106 * so just discard this check for the moment. 1107 */ 1108 if ((flags & CS_DRAIN) == 0 && c->c_lock != NULL) { 1109 if (c->c_lock == &Giant.lock_object) 1110 use_lock = mtx_owned(&Giant); 1111 else { 1112 use_lock = 1; 1113 class = LOCK_CLASS(c->c_lock); 1114 class->lc_assert(c->c_lock, LA_XLOCKED); 1115 } 1116 } else 1117 use_lock = 0; 1118 if (c->c_iflags & CALLOUT_DIRECT) { 1119 direct = 1; 1120 } else { 1121 direct = 0; 1122 } 1123 sq_locked = 0; 1124 old_cc = NULL; 1125 again: 1126 cc = callout_lock(c); 1127 1128 if ((c->c_iflags & (CALLOUT_DFRMIGRATION | CALLOUT_PENDING)) == 1129 (CALLOUT_DFRMIGRATION | CALLOUT_PENDING) && 1130 ((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE)) { 1131 /* 1132 * Special case where this slipped in while we 1133 * were migrating *as* the callout is about to 1134 * execute. The caller probably holds the lock 1135 * the callout wants. 1136 * 1137 * Get rid of the migration first. Then set 1138 * the flag that tells this code *not* to 1139 * try to remove it from any lists (its not 1140 * on one yet). When the callout wheel runs, 1141 * it will ignore this callout. 1142 */ 1143 c->c_iflags &= ~CALLOUT_PENDING; 1144 c->c_flags &= ~CALLOUT_ACTIVE; 1145 not_on_a_list = 1; 1146 } else { 1147 not_on_a_list = 0; 1148 } 1149 1150 /* 1151 * If the callout was migrating while the callout cpu lock was 1152 * dropped, just drop the sleepqueue lock and check the states 1153 * again. 1154 */ 1155 if (sq_locked != 0 && cc != old_cc) { 1156 #ifdef SMP 1157 CC_UNLOCK(cc); 1158 sleepq_release(&cc_exec_waiting(old_cc, direct)); 1159 sq_locked = 0; 1160 old_cc = NULL; 1161 goto again; 1162 #else 1163 panic("migration should not happen"); 1164 #endif 1165 } 1166 1167 /* 1168 * If the callout is running, try to stop it or drain it. 1169 */ 1170 if (cc_exec_curr(cc, direct) == c) { 1171 /* 1172 * Succeed we to stop it or not, we must clear the 1173 * active flag - this is what API users expect. If we're 1174 * draining and the callout is currently executing, first wait 1175 * until it finishes. 1176 */ 1177 if ((flags & CS_DRAIN) == 0) 1178 c->c_flags &= ~CALLOUT_ACTIVE; 1179 1180 if ((flags & CS_DRAIN) != 0) { 1181 /* 1182 * The current callout is running (or just 1183 * about to run) and blocking is allowed, so 1184 * just wait for the current invocation to 1185 * finish. 1186 */ 1187 if (cc_exec_curr(cc, direct) == c) { 1188 /* 1189 * Use direct calls to sleepqueue interface 1190 * instead of cv/msleep in order to avoid 1191 * a LOR between cc_lock and sleepqueue 1192 * chain spinlocks. This piece of code 1193 * emulates a msleep_spin() call actually. 1194 * 1195 * If we already have the sleepqueue chain 1196 * locked, then we can safely block. If we 1197 * don't already have it locked, however, 1198 * we have to drop the cc_lock to lock 1199 * it. This opens several races, so we 1200 * restart at the beginning once we have 1201 * both locks. If nothing has changed, then 1202 * we will end up back here with sq_locked 1203 * set. 1204 */ 1205 if (!sq_locked) { 1206 CC_UNLOCK(cc); 1207 sleepq_lock( 1208 &cc_exec_waiting(cc, direct)); 1209 sq_locked = 1; 1210 old_cc = cc; 1211 goto again; 1212 } 1213 1214 /* 1215 * Migration could be cancelled here, but 1216 * as long as it is still not sure when it 1217 * will be packed up, just let softclock() 1218 * take care of it. 1219 */ 1220 cc_exec_waiting(cc, direct) = true; 1221 DROP_GIANT(); 1222 CC_UNLOCK(cc); 1223 sleepq_add( 1224 &cc_exec_waiting(cc, direct), 1225 &cc->cc_lock.lock_object, "codrain", 1226 SLEEPQ_SLEEP, 0); 1227 sleepq_wait( 1228 &cc_exec_waiting(cc, direct), 1229 0); 1230 sq_locked = 0; 1231 old_cc = NULL; 1232 1233 /* Reacquire locks previously released. */ 1234 PICKUP_GIANT(); 1235 goto again; 1236 } 1237 c->c_flags &= ~CALLOUT_ACTIVE; 1238 } else if (use_lock && 1239 !cc_exec_cancel(cc, direct) && (drain == NULL)) { 1240 1241 /* 1242 * The current callout is waiting for its 1243 * lock which we hold. Cancel the callout 1244 * and return. After our caller drops the 1245 * lock, the callout will be skipped in 1246 * softclock(). This *only* works with a 1247 * callout_stop() *not* callout_drain() or 1248 * callout_async_drain(). 1249 */ 1250 cc_exec_cancel(cc, direct) = true; 1251 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1252 c, c->c_func, c->c_arg); 1253 KASSERT(!cc_cce_migrating(cc, direct), 1254 ("callout wrongly scheduled for migration")); 1255 if (callout_migrating(c)) { 1256 c->c_iflags &= ~CALLOUT_DFRMIGRATION; 1257 #ifdef SMP 1258 cc_migration_cpu(cc, direct) = CPUBLOCK; 1259 cc_migration_time(cc, direct) = 0; 1260 cc_migration_prec(cc, direct) = 0; 1261 cc_migration_func(cc, direct) = NULL; 1262 cc_migration_arg(cc, direct) = NULL; 1263 #endif 1264 } 1265 CC_UNLOCK(cc); 1266 KASSERT(!sq_locked, ("sleepqueue chain locked")); 1267 return (1); 1268 } else if (callout_migrating(c)) { 1269 /* 1270 * The callout is currently being serviced 1271 * and the "next" callout is scheduled at 1272 * its completion with a migration. We remove 1273 * the migration flag so it *won't* get rescheduled, 1274 * but we can't stop the one thats running so 1275 * we return 0. 1276 */ 1277 c->c_iflags &= ~CALLOUT_DFRMIGRATION; 1278 #ifdef SMP 1279 /* 1280 * We can't call cc_cce_cleanup here since 1281 * if we do it will remove .ce_curr and 1282 * its still running. This will prevent a 1283 * reschedule of the callout when the 1284 * execution completes. 1285 */ 1286 cc_migration_cpu(cc, direct) = CPUBLOCK; 1287 cc_migration_time(cc, direct) = 0; 1288 cc_migration_prec(cc, direct) = 0; 1289 cc_migration_func(cc, direct) = NULL; 1290 cc_migration_arg(cc, direct) = NULL; 1291 #endif 1292 CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", 1293 c, c->c_func, c->c_arg); 1294 if (drain) { 1295 KASSERT(cc_exec_drain(cc, direct) == NULL, 1296 ("callout drain function already set to %p", 1297 cc_exec_drain(cc, direct))); 1298 cc_exec_drain(cc, direct) = drain; 1299 } 1300 CC_UNLOCK(cc); 1301 return ((flags & CS_EXECUTING) != 0); 1302 } else { 1303 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1304 c, c->c_func, c->c_arg); 1305 if (drain) { 1306 KASSERT(cc_exec_drain(cc, direct) == NULL, 1307 ("callout drain function already set to %p", 1308 cc_exec_drain(cc, direct))); 1309 cc_exec_drain(cc, direct) = drain; 1310 } 1311 } 1312 KASSERT(!sq_locked, ("sleepqueue chain still locked")); 1313 cancelled = ((flags & CS_EXECUTING) != 0); 1314 } else 1315 cancelled = 1; 1316 1317 if (sq_locked) 1318 sleepq_release(&cc_exec_waiting(cc, direct)); 1319 1320 if ((c->c_iflags & CALLOUT_PENDING) == 0) { 1321 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1322 c, c->c_func, c->c_arg); 1323 /* 1324 * For not scheduled and not executing callout return 1325 * negative value. 1326 */ 1327 if (cc_exec_curr(cc, direct) != c) 1328 cancelled = -1; 1329 CC_UNLOCK(cc); 1330 return (cancelled); 1331 } 1332 1333 c->c_iflags &= ~CALLOUT_PENDING; 1334 c->c_flags &= ~CALLOUT_ACTIVE; 1335 1336 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1337 c, c->c_func, c->c_arg); 1338 if (not_on_a_list == 0) { 1339 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) { 1340 if (cc_exec_next(cc) == c) 1341 cc_exec_next(cc) = LIST_NEXT(c, c_links.le); 1342 LIST_REMOVE(c, c_links.le); 1343 } else { 1344 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 1345 } 1346 } 1347 CC_UNLOCK(cc); 1348 return (cancelled); 1349 } 1350 1351 void 1352 callout_init(struct callout *c, int mpsafe) 1353 { 1354 bzero(c, sizeof *c); 1355 if (mpsafe) { 1356 c->c_lock = NULL; 1357 c->c_iflags = CALLOUT_RETURNUNLOCKED; 1358 } else { 1359 c->c_lock = &Giant.lock_object; 1360 c->c_iflags = 0; 1361 } 1362 c->c_cpu = cc_default_cpu; 1363 } 1364 1365 void 1366 _callout_init_lock(struct callout *c, struct lock_object *lock, int flags) 1367 { 1368 bzero(c, sizeof *c); 1369 c->c_lock = lock; 1370 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 1371 ("callout_init_lock: bad flags %d", flags)); 1372 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 1373 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 1374 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & LC_SLEEPABLE), 1375 ("%s: callout %p has sleepable lock", __func__, c)); 1376 c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 1377 c->c_cpu = cc_default_cpu; 1378 } 1379 1380 static int 1381 flssbt(sbintime_t sbt) 1382 { 1383 1384 sbt += (uint64_t)sbt >> 1; 1385 if (sizeof(long) >= sizeof(sbintime_t)) 1386 return (flsl(sbt)); 1387 if (sbt >= SBT_1S) 1388 return (flsl(((uint64_t)sbt) >> 32) + 32); 1389 return (flsl(sbt)); 1390 } 1391 1392 /* 1393 * Dump immediate statistic snapshot of the scheduled callouts. 1394 */ 1395 static int 1396 sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS) 1397 { 1398 struct callout *tmp; 1399 struct callout_cpu *cc; 1400 struct callout_list *sc; 1401 sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t; 1402 int ct[64], cpr[64], ccpbk[32]; 1403 int error, val, i, count, tcum, pcum, maxc, c, medc; 1404 int cpu; 1405 1406 val = 0; 1407 error = sysctl_handle_int(oidp, &val, 0, req); 1408 if (error != 0 || req->newptr == NULL) 1409 return (error); 1410 count = maxc = 0; 1411 st = spr = maxt = maxpr = 0; 1412 bzero(ccpbk, sizeof(ccpbk)); 1413 bzero(ct, sizeof(ct)); 1414 bzero(cpr, sizeof(cpr)); 1415 now = sbinuptime(); 1416 CPU_FOREACH(cpu) { 1417 cc = CC_CPU(cpu); 1418 CC_LOCK(cc); 1419 for (i = 0; i < callwheelsize; i++) { 1420 sc = &cc->cc_callwheel[i]; 1421 c = 0; 1422 LIST_FOREACH(tmp, sc, c_links.le) { 1423 c++; 1424 t = tmp->c_time - now; 1425 if (t < 0) 1426 t = 0; 1427 st += t / SBT_1US; 1428 spr += tmp->c_precision / SBT_1US; 1429 if (t > maxt) 1430 maxt = t; 1431 if (tmp->c_precision > maxpr) 1432 maxpr = tmp->c_precision; 1433 ct[flssbt(t)]++; 1434 cpr[flssbt(tmp->c_precision)]++; 1435 } 1436 if (c > maxc) 1437 maxc = c; 1438 ccpbk[fls(c + c / 2)]++; 1439 count += c; 1440 } 1441 CC_UNLOCK(cc); 1442 } 1443 1444 for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++) 1445 tcum += ct[i]; 1446 medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 1447 for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++) 1448 pcum += cpr[i]; 1449 medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 1450 for (i = 0, c = 0; i < 32 && c < count / 2; i++) 1451 c += ccpbk[i]; 1452 medc = (i >= 2) ? (1 << (i - 2)) : 0; 1453 1454 printf("Scheduled callouts statistic snapshot:\n"); 1455 printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n", 1456 count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT); 1457 printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n", 1458 medc, 1459 count / callwheelsize / mp_ncpus, 1460 (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000, 1461 maxc); 1462 printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 1463 medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32, 1464 (st / count) / 1000000, (st / count) % 1000000, 1465 maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32); 1466 printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 1467 medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32, 1468 (spr / count) / 1000000, (spr / count) % 1000000, 1469 maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32); 1470 printf(" Distribution: \tbuckets\t time\t tcum\t" 1471 " prec\t pcum\n"); 1472 for (i = 0, tcum = pcum = 0; i < 64; i++) { 1473 if (ct[i] == 0 && cpr[i] == 0) 1474 continue; 1475 t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0; 1476 tcum += ct[i]; 1477 pcum += cpr[i]; 1478 printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n", 1479 t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32, 1480 i - 1 - (32 - CC_HASH_SHIFT), 1481 ct[i], tcum, cpr[i], pcum); 1482 } 1483 return (error); 1484 } 1485 SYSCTL_PROC(_kern, OID_AUTO, callout_stat, 1486 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1487 0, 0, sysctl_kern_callout_stat, "I", 1488 "Dump immediate statistic snapshot of the scheduled callouts"); 1489 1490 #ifdef DDB 1491 static void 1492 _show_callout(struct callout *c) 1493 { 1494 1495 db_printf("callout %p\n", c); 1496 #define C_DB_PRINTF(f, e) db_printf(" %s = " f "\n", #e, c->e); 1497 db_printf(" &c_links = %p\n", &(c->c_links)); 1498 C_DB_PRINTF("%" PRId64, c_time); 1499 C_DB_PRINTF("%" PRId64, c_precision); 1500 C_DB_PRINTF("%p", c_arg); 1501 C_DB_PRINTF("%p", c_func); 1502 C_DB_PRINTF("%p", c_lock); 1503 C_DB_PRINTF("%#x", c_flags); 1504 C_DB_PRINTF("%#x", c_iflags); 1505 C_DB_PRINTF("%d", c_cpu); 1506 #undef C_DB_PRINTF 1507 } 1508 1509 DB_SHOW_COMMAND(callout, db_show_callout) 1510 { 1511 1512 if (!have_addr) { 1513 db_printf("usage: show callout <struct callout *>\n"); 1514 return; 1515 } 1516 1517 _show_callout((struct callout *)addr); 1518 } 1519 1520 static void 1521 _show_last_callout(int cpu, int direct, const char *dirstr) 1522 { 1523 struct callout_cpu *cc; 1524 void *func, *arg; 1525 1526 cc = CC_CPU(cpu); 1527 func = cc_exec_last_func(cc, direct); 1528 arg = cc_exec_last_arg(cc, direct); 1529 db_printf("cpu %d last%s callout function: %p ", cpu, dirstr, func); 1530 db_printsym((db_expr_t)func, DB_STGY_ANY); 1531 db_printf("\ncpu %d last%s callout argument: %p\n", cpu, dirstr, arg); 1532 } 1533 1534 DB_SHOW_COMMAND(callout_last, db_show_callout_last) 1535 { 1536 int cpu, last; 1537 1538 if (have_addr) { 1539 if (addr < 0 || addr > mp_maxid || CPU_ABSENT(addr)) { 1540 db_printf("no such cpu: %d\n", (int)addr); 1541 return; 1542 } 1543 cpu = last = addr; 1544 } else { 1545 cpu = 0; 1546 last = mp_maxid; 1547 } 1548 1549 while (cpu <= last) { 1550 if (!CPU_ABSENT(cpu)) { 1551 _show_last_callout(cpu, 0, ""); 1552 _show_last_callout(cpu, 1, " direct"); 1553 } 1554 cpu++; 1555 } 1556 } 1557 #endif /* DDB */ 1558