1 /*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_callout_profiling.h" 41 #if defined(__arm__) 42 #include "opt_timer.h" 43 #endif 44 #include "opt_rss.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/bus.h> 49 #include <sys/callout.h> 50 #include <sys/file.h> 51 #include <sys/interrupt.h> 52 #include <sys/kernel.h> 53 #include <sys/ktr.h> 54 #include <sys/lock.h> 55 #include <sys/malloc.h> 56 #include <sys/mutex.h> 57 #include <sys/proc.h> 58 #include <sys/sdt.h> 59 #include <sys/sleepqueue.h> 60 #include <sys/sysctl.h> 61 #include <sys/smp.h> 62 63 #ifdef SMP 64 #include <machine/cpu.h> 65 #endif 66 67 #ifndef NO_EVENTTIMERS 68 DPCPU_DECLARE(sbintime_t, hardclocktime); 69 #endif 70 71 SDT_PROVIDER_DEFINE(callout_execute); 72 SDT_PROBE_DEFINE1(callout_execute, kernel, , callout__start, 73 "struct callout *"); 74 SDT_PROBE_DEFINE1(callout_execute, kernel, , callout__end, 75 "struct callout *"); 76 77 #ifdef CALLOUT_PROFILING 78 static int avg_depth; 79 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 80 "Average number of items examined per softclock call. Units = 1/1000"); 81 static int avg_gcalls; 82 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 83 "Average number of Giant callouts made per softclock call. Units = 1/1000"); 84 static int avg_lockcalls; 85 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 86 "Average number of lock callouts made per softclock call. Units = 1/1000"); 87 static int avg_mpcalls; 88 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 89 "Average number of MP callouts made per softclock call. Units = 1/1000"); 90 static int avg_depth_dir; 91 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0, 92 "Average number of direct callouts examined per callout_process call. " 93 "Units = 1/1000"); 94 static int avg_lockcalls_dir; 95 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD, 96 &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per " 97 "callout_process call. Units = 1/1000"); 98 static int avg_mpcalls_dir; 99 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir, 100 0, "Average number of MP direct callouts made per callout_process call. " 101 "Units = 1/1000"); 102 #endif 103 104 static int ncallout; 105 SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &ncallout, 0, 106 "Number of entries in callwheel and size of timeout() preallocation"); 107 108 #ifdef RSS 109 static int pin_default_swi = 1; 110 static int pin_pcpu_swi = 1; 111 #else 112 static int pin_default_swi = 0; 113 static int pin_pcpu_swi = 0; 114 #endif 115 116 SYSCTL_INT(_kern, OID_AUTO, pin_default_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_default_swi, 117 0, "Pin the default (non-per-cpu) swi (shared with PCPU 0 swi)"); 118 SYSCTL_INT(_kern, OID_AUTO, pin_pcpu_swi, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pin_pcpu_swi, 119 0, "Pin the per-CPU swis (except PCPU 0, which is also default"); 120 121 /* 122 * TODO: 123 * allocate more timeout table slots when table overflows. 124 */ 125 u_int callwheelsize, callwheelmask; 126 127 /* 128 * The callout cpu exec entities represent informations necessary for 129 * describing the state of callouts currently running on the CPU and the ones 130 * necessary for migrating callouts to the new callout cpu. In particular, 131 * the first entry of the array cc_exec_entity holds informations for callout 132 * running in SWI thread context, while the second one holds informations 133 * for callout running directly from hardware interrupt context. 134 * The cached informations are very important for deferring migration when 135 * the migrating callout is already running. 136 */ 137 struct cc_exec { 138 struct callout *cc_curr; 139 void (*cc_drain)(void *); 140 #ifdef SMP 141 void (*ce_migration_func)(void *); 142 void *ce_migration_arg; 143 int ce_migration_cpu; 144 sbintime_t ce_migration_time; 145 sbintime_t ce_migration_prec; 146 #endif 147 bool cc_cancel; 148 bool cc_waiting; 149 }; 150 151 /* 152 * There is one struct callout_cpu per cpu, holding all relevant 153 * state for the callout processing thread on the individual CPU. 154 */ 155 struct callout_cpu { 156 struct mtx_padalign cc_lock; 157 struct cc_exec cc_exec_entity[2]; 158 struct callout *cc_next; 159 struct callout *cc_callout; 160 struct callout_list *cc_callwheel; 161 struct callout_tailq cc_expireq; 162 struct callout_slist cc_callfree; 163 sbintime_t cc_firstevent; 164 sbintime_t cc_lastscan; 165 void *cc_cookie; 166 u_int cc_bucket; 167 u_int cc_inited; 168 char cc_ktr_event_name[20]; 169 }; 170 171 #define callout_migrating(c) ((c)->c_iflags & CALLOUT_DFRMIGRATION) 172 173 #define cc_exec_curr(cc, dir) cc->cc_exec_entity[dir].cc_curr 174 #define cc_exec_drain(cc, dir) cc->cc_exec_entity[dir].cc_drain 175 #define cc_exec_next(cc) cc->cc_next 176 #define cc_exec_cancel(cc, dir) cc->cc_exec_entity[dir].cc_cancel 177 #define cc_exec_waiting(cc, dir) cc->cc_exec_entity[dir].cc_waiting 178 #ifdef SMP 179 #define cc_migration_func(cc, dir) cc->cc_exec_entity[dir].ce_migration_func 180 #define cc_migration_arg(cc, dir) cc->cc_exec_entity[dir].ce_migration_arg 181 #define cc_migration_cpu(cc, dir) cc->cc_exec_entity[dir].ce_migration_cpu 182 #define cc_migration_time(cc, dir) cc->cc_exec_entity[dir].ce_migration_time 183 #define cc_migration_prec(cc, dir) cc->cc_exec_entity[dir].ce_migration_prec 184 185 struct callout_cpu cc_cpu[MAXCPU]; 186 #define CPUBLOCK MAXCPU 187 #define CC_CPU(cpu) (&cc_cpu[(cpu)]) 188 #define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 189 #else 190 struct callout_cpu cc_cpu; 191 #define CC_CPU(cpu) &cc_cpu 192 #define CC_SELF() &cc_cpu 193 #endif 194 #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 195 #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 196 #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) 197 198 static int timeout_cpu; 199 200 static void callout_cpu_init(struct callout_cpu *cc, int cpu); 201 static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, 202 #ifdef CALLOUT_PROFILING 203 int *mpcalls, int *lockcalls, int *gcalls, 204 #endif 205 int direct); 206 207 static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 208 209 /** 210 * Locked by cc_lock: 211 * cc_curr - If a callout is in progress, it is cc_curr. 212 * If cc_curr is non-NULL, threads waiting in 213 * callout_drain() will be woken up as soon as the 214 * relevant callout completes. 215 * cc_cancel - Changing to 1 with both callout_lock and cc_lock held 216 * guarantees that the current callout will not run. 217 * The softclock() function sets this to 0 before it 218 * drops callout_lock to acquire c_lock, and it calls 219 * the handler only if curr_cancelled is still 0 after 220 * cc_lock is successfully acquired. 221 * cc_waiting - If a thread is waiting in callout_drain(), then 222 * callout_wait is nonzero. Set only when 223 * cc_curr is non-NULL. 224 */ 225 226 /* 227 * Resets the execution entity tied to a specific callout cpu. 228 */ 229 static void 230 cc_cce_cleanup(struct callout_cpu *cc, int direct) 231 { 232 233 cc_exec_curr(cc, direct) = NULL; 234 cc_exec_cancel(cc, direct) = false; 235 cc_exec_waiting(cc, direct) = false; 236 #ifdef SMP 237 cc_migration_cpu(cc, direct) = CPUBLOCK; 238 cc_migration_time(cc, direct) = 0; 239 cc_migration_prec(cc, direct) = 0; 240 cc_migration_func(cc, direct) = NULL; 241 cc_migration_arg(cc, direct) = NULL; 242 #endif 243 } 244 245 /* 246 * Checks if migration is requested by a specific callout cpu. 247 */ 248 static int 249 cc_cce_migrating(struct callout_cpu *cc, int direct) 250 { 251 252 #ifdef SMP 253 return (cc_migration_cpu(cc, direct) != CPUBLOCK); 254 #else 255 return (0); 256 #endif 257 } 258 259 /* 260 * Kernel low level callwheel initialization 261 * called on cpu0 during kernel startup. 262 */ 263 static void 264 callout_callwheel_init(void *dummy) 265 { 266 struct callout_cpu *cc; 267 268 /* 269 * Calculate the size of the callout wheel and the preallocated 270 * timeout() structures. 271 * XXX: Clip callout to result of previous function of maxusers 272 * maximum 384. This is still huge, but acceptable. 273 */ 274 memset(CC_CPU(0), 0, sizeof(cc_cpu)); 275 ncallout = imin(16 + maxproc + maxfiles, 18508); 276 TUNABLE_INT_FETCH("kern.ncallout", &ncallout); 277 278 /* 279 * Calculate callout wheel size, should be next power of two higher 280 * than 'ncallout'. 281 */ 282 callwheelsize = 1 << fls(ncallout); 283 callwheelmask = callwheelsize - 1; 284 285 /* 286 * Fetch whether we're pinning the swi's or not. 287 */ 288 TUNABLE_INT_FETCH("kern.pin_default_swi", &pin_default_swi); 289 TUNABLE_INT_FETCH("kern.pin_pcpu_swi", &pin_pcpu_swi); 290 291 /* 292 * Only cpu0 handles timeout(9) and receives a preallocation. 293 * 294 * XXX: Once all timeout(9) consumers are converted this can 295 * be removed. 296 */ 297 timeout_cpu = PCPU_GET(cpuid); 298 cc = CC_CPU(timeout_cpu); 299 cc->cc_callout = malloc(ncallout * sizeof(struct callout), 300 M_CALLOUT, M_WAITOK); 301 callout_cpu_init(cc, timeout_cpu); 302 } 303 SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL); 304 305 /* 306 * Initialize the per-cpu callout structures. 307 */ 308 static void 309 callout_cpu_init(struct callout_cpu *cc, int cpu) 310 { 311 struct callout *c; 312 int i; 313 314 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 315 SLIST_INIT(&cc->cc_callfree); 316 cc->cc_inited = 1; 317 cc->cc_callwheel = malloc(sizeof(struct callout_list) * callwheelsize, 318 M_CALLOUT, M_WAITOK); 319 for (i = 0; i < callwheelsize; i++) 320 LIST_INIT(&cc->cc_callwheel[i]); 321 TAILQ_INIT(&cc->cc_expireq); 322 cc->cc_firstevent = SBT_MAX; 323 for (i = 0; i < 2; i++) 324 cc_cce_cleanup(cc, i); 325 snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name), 326 "callwheel cpu %d", cpu); 327 if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */ 328 return; 329 for (i = 0; i < ncallout; i++) { 330 c = &cc->cc_callout[i]; 331 callout_init(c, 0); 332 c->c_iflags = CALLOUT_LOCAL_ALLOC; 333 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 334 } 335 } 336 337 #ifdef SMP 338 /* 339 * Switches the cpu tied to a specific callout. 340 * The function expects a locked incoming callout cpu and returns with 341 * locked outcoming callout cpu. 342 */ 343 static struct callout_cpu * 344 callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) 345 { 346 struct callout_cpu *new_cc; 347 348 MPASS(c != NULL && cc != NULL); 349 CC_LOCK_ASSERT(cc); 350 351 /* 352 * Avoid interrupts and preemption firing after the callout cpu 353 * is blocked in order to avoid deadlocks as the new thread 354 * may be willing to acquire the callout cpu lock. 355 */ 356 c->c_cpu = CPUBLOCK; 357 spinlock_enter(); 358 CC_UNLOCK(cc); 359 new_cc = CC_CPU(new_cpu); 360 CC_LOCK(new_cc); 361 spinlock_exit(); 362 c->c_cpu = new_cpu; 363 return (new_cc); 364 } 365 #endif 366 367 /* 368 * Start standard softclock thread. 369 */ 370 static void 371 start_softclock(void *dummy) 372 { 373 struct callout_cpu *cc; 374 char name[MAXCOMLEN]; 375 #ifdef SMP 376 int cpu; 377 struct intr_event *ie; 378 #endif 379 380 cc = CC_CPU(timeout_cpu); 381 snprintf(name, sizeof(name), "clock (%d)", timeout_cpu); 382 if (swi_add(&clk_intr_event, name, softclock, cc, SWI_CLOCK, 383 INTR_MPSAFE, &cc->cc_cookie)) 384 panic("died while creating standard software ithreads"); 385 if (pin_default_swi && 386 (intr_event_bind(clk_intr_event, timeout_cpu) != 0)) { 387 printf("%s: timeout clock couldn't be pinned to cpu %d\n", 388 __func__, 389 timeout_cpu); 390 } 391 392 #ifdef SMP 393 CPU_FOREACH(cpu) { 394 if (cpu == timeout_cpu) 395 continue; 396 cc = CC_CPU(cpu); 397 cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */ 398 callout_cpu_init(cc, cpu); 399 snprintf(name, sizeof(name), "clock (%d)", cpu); 400 ie = NULL; 401 if (swi_add(&ie, name, softclock, cc, SWI_CLOCK, 402 INTR_MPSAFE, &cc->cc_cookie)) 403 panic("died while creating standard software ithreads"); 404 if (pin_pcpu_swi && (intr_event_bind(ie, cpu) != 0)) { 405 printf("%s: per-cpu clock couldn't be pinned to " 406 "cpu %d\n", 407 __func__, 408 cpu); 409 } 410 } 411 #endif 412 } 413 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 414 415 #define CC_HASH_SHIFT 8 416 417 static inline u_int 418 callout_hash(sbintime_t sbt) 419 { 420 421 return (sbt >> (32 - CC_HASH_SHIFT)); 422 } 423 424 static inline u_int 425 callout_get_bucket(sbintime_t sbt) 426 { 427 428 return (callout_hash(sbt) & callwheelmask); 429 } 430 431 void 432 callout_process(sbintime_t now) 433 { 434 struct callout *tmp, *tmpn; 435 struct callout_cpu *cc; 436 struct callout_list *sc; 437 sbintime_t first, last, max, tmp_max; 438 uint32_t lookahead; 439 u_int firstb, lastb, nowb; 440 #ifdef CALLOUT_PROFILING 441 int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0; 442 #endif 443 444 cc = CC_SELF(); 445 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 446 447 /* Compute the buckets of the last scan and present times. */ 448 firstb = callout_hash(cc->cc_lastscan); 449 cc->cc_lastscan = now; 450 nowb = callout_hash(now); 451 452 /* Compute the last bucket and minimum time of the bucket after it. */ 453 if (nowb == firstb) 454 lookahead = (SBT_1S / 16); 455 else if (nowb - firstb == 1) 456 lookahead = (SBT_1S / 8); 457 else 458 lookahead = (SBT_1S / 2); 459 first = last = now; 460 first += (lookahead / 2); 461 last += lookahead; 462 last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT)); 463 lastb = callout_hash(last) - 1; 464 max = last; 465 466 /* 467 * Check if we wrapped around the entire wheel from the last scan. 468 * In case, we need to scan entirely the wheel for pending callouts. 469 */ 470 if (lastb - firstb >= callwheelsize) { 471 lastb = firstb + callwheelsize - 1; 472 if (nowb - firstb >= callwheelsize) 473 nowb = lastb; 474 } 475 476 /* Iterate callwheel from firstb to nowb and then up to lastb. */ 477 do { 478 sc = &cc->cc_callwheel[firstb & callwheelmask]; 479 tmp = LIST_FIRST(sc); 480 while (tmp != NULL) { 481 /* Run the callout if present time within allowed. */ 482 if (tmp->c_time <= now) { 483 /* 484 * Consumer told us the callout may be run 485 * directly from hardware interrupt context. 486 */ 487 if (tmp->c_iflags & CALLOUT_DIRECT) { 488 #ifdef CALLOUT_PROFILING 489 ++depth_dir; 490 #endif 491 cc_exec_next(cc) = 492 LIST_NEXT(tmp, c_links.le); 493 cc->cc_bucket = firstb & callwheelmask; 494 LIST_REMOVE(tmp, c_links.le); 495 softclock_call_cc(tmp, cc, 496 #ifdef CALLOUT_PROFILING 497 &mpcalls_dir, &lockcalls_dir, NULL, 498 #endif 499 1); 500 tmp = cc_exec_next(cc); 501 cc_exec_next(cc) = NULL; 502 } else { 503 tmpn = LIST_NEXT(tmp, c_links.le); 504 LIST_REMOVE(tmp, c_links.le); 505 TAILQ_INSERT_TAIL(&cc->cc_expireq, 506 tmp, c_links.tqe); 507 tmp->c_iflags |= CALLOUT_PROCESSED; 508 tmp = tmpn; 509 } 510 continue; 511 } 512 /* Skip events from distant future. */ 513 if (tmp->c_time >= max) 514 goto next; 515 /* 516 * Event minimal time is bigger than present maximal 517 * time, so it cannot be aggregated. 518 */ 519 if (tmp->c_time > last) { 520 lastb = nowb; 521 goto next; 522 } 523 /* Update first and last time, respecting this event. */ 524 if (tmp->c_time < first) 525 first = tmp->c_time; 526 tmp_max = tmp->c_time + tmp->c_precision; 527 if (tmp_max < last) 528 last = tmp_max; 529 next: 530 tmp = LIST_NEXT(tmp, c_links.le); 531 } 532 /* Proceed with the next bucket. */ 533 firstb++; 534 /* 535 * Stop if we looked after present time and found 536 * some event we can't execute at now. 537 * Stop if we looked far enough into the future. 538 */ 539 } while (((int)(firstb - lastb)) <= 0); 540 cc->cc_firstevent = last; 541 #ifndef NO_EVENTTIMERS 542 cpu_new_callout(curcpu, last, first); 543 #endif 544 #ifdef CALLOUT_PROFILING 545 avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8; 546 avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8; 547 avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8; 548 #endif 549 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 550 /* 551 * swi_sched acquires the thread lock, so we don't want to call it 552 * with cc_lock held; incorrect locking order. 553 */ 554 if (!TAILQ_EMPTY(&cc->cc_expireq)) 555 swi_sched(cc->cc_cookie, 0); 556 } 557 558 static struct callout_cpu * 559 callout_lock(struct callout *c) 560 { 561 struct callout_cpu *cc; 562 int cpu; 563 564 for (;;) { 565 cpu = c->c_cpu; 566 #ifdef SMP 567 if (cpu == CPUBLOCK) { 568 while (c->c_cpu == CPUBLOCK) 569 cpu_spinwait(); 570 continue; 571 } 572 #endif 573 cc = CC_CPU(cpu); 574 CC_LOCK(cc); 575 if (cpu == c->c_cpu) 576 break; 577 CC_UNLOCK(cc); 578 } 579 return (cc); 580 } 581 582 static void 583 callout_cc_add(struct callout *c, struct callout_cpu *cc, 584 sbintime_t sbt, sbintime_t precision, void (*func)(void *), 585 void *arg, int cpu, int flags) 586 { 587 int bucket; 588 589 CC_LOCK_ASSERT(cc); 590 if (sbt < cc->cc_lastscan) 591 sbt = cc->cc_lastscan; 592 c->c_arg = arg; 593 c->c_iflags |= CALLOUT_PENDING; 594 c->c_iflags &= ~CALLOUT_PROCESSED; 595 c->c_flags |= CALLOUT_ACTIVE; 596 if (flags & C_DIRECT_EXEC) 597 c->c_iflags |= CALLOUT_DIRECT; 598 c->c_func = func; 599 c->c_time = sbt; 600 c->c_precision = precision; 601 bucket = callout_get_bucket(c->c_time); 602 CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x", 603 c, (int)(c->c_precision >> 32), 604 (u_int)(c->c_precision & 0xffffffff)); 605 LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le); 606 if (cc->cc_bucket == bucket) 607 cc_exec_next(cc) = c; 608 #ifndef NO_EVENTTIMERS 609 /* 610 * Inform the eventtimers(4) subsystem there's a new callout 611 * that has been inserted, but only if really required. 612 */ 613 if (SBT_MAX - c->c_time < c->c_precision) 614 c->c_precision = SBT_MAX - c->c_time; 615 sbt = c->c_time + c->c_precision; 616 if (sbt < cc->cc_firstevent) { 617 cc->cc_firstevent = sbt; 618 cpu_new_callout(cpu, sbt, c->c_time); 619 } 620 #endif 621 } 622 623 static void 624 callout_cc_del(struct callout *c, struct callout_cpu *cc) 625 { 626 627 if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) == 0) 628 return; 629 c->c_func = NULL; 630 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 631 } 632 633 static void 634 softclock_call_cc(struct callout *c, struct callout_cpu *cc, 635 #ifdef CALLOUT_PROFILING 636 int *mpcalls, int *lockcalls, int *gcalls, 637 #endif 638 int direct) 639 { 640 struct rm_priotracker tracker; 641 void (*c_func)(void *); 642 void *c_arg; 643 struct lock_class *class; 644 struct lock_object *c_lock; 645 uintptr_t lock_status; 646 int c_iflags; 647 #ifdef SMP 648 struct callout_cpu *new_cc; 649 void (*new_func)(void *); 650 void *new_arg; 651 int flags, new_cpu; 652 sbintime_t new_prec, new_time; 653 #endif 654 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 655 sbintime_t sbt1, sbt2; 656 struct timespec ts2; 657 static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */ 658 static timeout_t *lastfunc; 659 #endif 660 661 KASSERT((c->c_iflags & CALLOUT_PENDING) == CALLOUT_PENDING, 662 ("softclock_call_cc: pend %p %x", c, c->c_iflags)); 663 KASSERT((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE, 664 ("softclock_call_cc: act %p %x", c, c->c_flags)); 665 class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; 666 lock_status = 0; 667 if (c->c_flags & CALLOUT_SHAREDLOCK) { 668 if (class == &lock_class_rm) 669 lock_status = (uintptr_t)&tracker; 670 else 671 lock_status = 1; 672 } 673 c_lock = c->c_lock; 674 c_func = c->c_func; 675 c_arg = c->c_arg; 676 c_iflags = c->c_iflags; 677 if (c->c_iflags & CALLOUT_LOCAL_ALLOC) 678 c->c_iflags = CALLOUT_LOCAL_ALLOC; 679 else 680 c->c_iflags &= ~CALLOUT_PENDING; 681 682 cc_exec_curr(cc, direct) = c; 683 cc_exec_cancel(cc, direct) = false; 684 cc_exec_drain(cc, direct) = NULL; 685 CC_UNLOCK(cc); 686 if (c_lock != NULL) { 687 class->lc_lock(c_lock, lock_status); 688 /* 689 * The callout may have been cancelled 690 * while we switched locks. 691 */ 692 if (cc_exec_cancel(cc, direct)) { 693 class->lc_unlock(c_lock); 694 goto skip; 695 } 696 /* The callout cannot be stopped now. */ 697 cc_exec_cancel(cc, direct) = true; 698 if (c_lock == &Giant.lock_object) { 699 #ifdef CALLOUT_PROFILING 700 (*gcalls)++; 701 #endif 702 CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p", 703 c, c_func, c_arg); 704 } else { 705 #ifdef CALLOUT_PROFILING 706 (*lockcalls)++; 707 #endif 708 CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p", 709 c, c_func, c_arg); 710 } 711 } else { 712 #ifdef CALLOUT_PROFILING 713 (*mpcalls)++; 714 #endif 715 CTR3(KTR_CALLOUT, "callout %p func %p arg %p", 716 c, c_func, c_arg); 717 } 718 KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running", 719 "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct); 720 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 721 sbt1 = sbinuptime(); 722 #endif 723 THREAD_NO_SLEEPING(); 724 SDT_PROBE1(callout_execute, kernel, , callout__start, c); 725 c_func(c_arg); 726 SDT_PROBE1(callout_execute, kernel, , callout__end, c); 727 THREAD_SLEEPING_OK(); 728 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 729 sbt2 = sbinuptime(); 730 sbt2 -= sbt1; 731 if (sbt2 > maxdt) { 732 if (lastfunc != c_func || sbt2 > maxdt * 2) { 733 ts2 = sbttots(sbt2); 734 printf( 735 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 736 c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); 737 } 738 maxdt = sbt2; 739 lastfunc = c_func; 740 } 741 #endif 742 KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle"); 743 CTR1(KTR_CALLOUT, "callout %p finished", c); 744 if ((c_iflags & CALLOUT_RETURNUNLOCKED) == 0) 745 class->lc_unlock(c_lock); 746 skip: 747 CC_LOCK(cc); 748 KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr")); 749 cc_exec_curr(cc, direct) = NULL; 750 if (cc_exec_drain(cc, direct)) { 751 void (*drain)(void *); 752 753 drain = cc_exec_drain(cc, direct); 754 cc_exec_drain(cc, direct) = NULL; 755 CC_UNLOCK(cc); 756 drain(c_arg); 757 CC_LOCK(cc); 758 } 759 if (cc_exec_waiting(cc, direct)) { 760 /* 761 * There is someone waiting for the 762 * callout to complete. 763 * If the callout was scheduled for 764 * migration just cancel it. 765 */ 766 if (cc_cce_migrating(cc, direct)) { 767 cc_cce_cleanup(cc, direct); 768 769 /* 770 * It should be assert here that the callout is not 771 * destroyed but that is not easy. 772 */ 773 c->c_iflags &= ~CALLOUT_DFRMIGRATION; 774 } 775 cc_exec_waiting(cc, direct) = false; 776 CC_UNLOCK(cc); 777 wakeup(&cc_exec_waiting(cc, direct)); 778 CC_LOCK(cc); 779 } else if (cc_cce_migrating(cc, direct)) { 780 KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0, 781 ("Migrating legacy callout %p", c)); 782 #ifdef SMP 783 /* 784 * If the callout was scheduled for 785 * migration just perform it now. 786 */ 787 new_cpu = cc_migration_cpu(cc, direct); 788 new_time = cc_migration_time(cc, direct); 789 new_prec = cc_migration_prec(cc, direct); 790 new_func = cc_migration_func(cc, direct); 791 new_arg = cc_migration_arg(cc, direct); 792 cc_cce_cleanup(cc, direct); 793 794 /* 795 * It should be assert here that the callout is not destroyed 796 * but that is not easy. 797 * 798 * As first thing, handle deferred callout stops. 799 */ 800 if (!callout_migrating(c)) { 801 CTR3(KTR_CALLOUT, 802 "deferred cancelled %p func %p arg %p", 803 c, new_func, new_arg); 804 callout_cc_del(c, cc); 805 return; 806 } 807 c->c_iflags &= ~CALLOUT_DFRMIGRATION; 808 809 new_cc = callout_cpu_switch(c, cc, new_cpu); 810 flags = (direct) ? C_DIRECT_EXEC : 0; 811 callout_cc_add(c, new_cc, new_time, new_prec, new_func, 812 new_arg, new_cpu, flags); 813 CC_UNLOCK(new_cc); 814 CC_LOCK(cc); 815 #else 816 panic("migration should not happen"); 817 #endif 818 } 819 /* 820 * If the current callout is locally allocated (from 821 * timeout(9)) then put it on the freelist. 822 * 823 * Note: we need to check the cached copy of c_iflags because 824 * if it was not local, then it's not safe to deref the 825 * callout pointer. 826 */ 827 KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0 || 828 c->c_iflags == CALLOUT_LOCAL_ALLOC, 829 ("corrupted callout")); 830 if (c_iflags & CALLOUT_LOCAL_ALLOC) 831 callout_cc_del(c, cc); 832 } 833 834 /* 835 * The callout mechanism is based on the work of Adam M. Costello and 836 * George Varghese, published in a technical report entitled "Redesigning 837 * the BSD Callout and Timer Facilities" and modified slightly for inclusion 838 * in FreeBSD by Justin T. Gibbs. The original work on the data structures 839 * used in this implementation was published by G. Varghese and T. Lauck in 840 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 841 * the Efficient Implementation of a Timer Facility" in the Proceedings of 842 * the 11th ACM Annual Symposium on Operating Systems Principles, 843 * Austin, Texas Nov 1987. 844 */ 845 846 /* 847 * Software (low priority) clock interrupt. 848 * Run periodic events from timeout queue. 849 */ 850 void 851 softclock(void *arg) 852 { 853 struct callout_cpu *cc; 854 struct callout *c; 855 #ifdef CALLOUT_PROFILING 856 int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0; 857 #endif 858 859 cc = (struct callout_cpu *)arg; 860 CC_LOCK(cc); 861 while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) { 862 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 863 softclock_call_cc(c, cc, 864 #ifdef CALLOUT_PROFILING 865 &mpcalls, &lockcalls, &gcalls, 866 #endif 867 0); 868 #ifdef CALLOUT_PROFILING 869 ++depth; 870 #endif 871 } 872 #ifdef CALLOUT_PROFILING 873 avg_depth += (depth * 1000 - avg_depth) >> 8; 874 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 875 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 876 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 877 #endif 878 CC_UNLOCK(cc); 879 } 880 881 /* 882 * timeout -- 883 * Execute a function after a specified length of time. 884 * 885 * untimeout -- 886 * Cancel previous timeout function call. 887 * 888 * callout_handle_init -- 889 * Initialize a handle so that using it with untimeout is benign. 890 * 891 * See AT&T BCI Driver Reference Manual for specification. This 892 * implementation differs from that one in that although an 893 * identification value is returned from timeout, the original 894 * arguments to timeout as well as the identifier are used to 895 * identify entries for untimeout. 896 */ 897 struct callout_handle 898 timeout(timeout_t *ftn, void *arg, int to_ticks) 899 { 900 struct callout_cpu *cc; 901 struct callout *new; 902 struct callout_handle handle; 903 904 cc = CC_CPU(timeout_cpu); 905 CC_LOCK(cc); 906 /* Fill in the next free callout structure. */ 907 new = SLIST_FIRST(&cc->cc_callfree); 908 if (new == NULL) 909 /* XXX Attempt to malloc first */ 910 panic("timeout table full"); 911 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); 912 callout_reset(new, to_ticks, ftn, arg); 913 handle.callout = new; 914 CC_UNLOCK(cc); 915 916 return (handle); 917 } 918 919 void 920 untimeout(timeout_t *ftn, void *arg, struct callout_handle handle) 921 { 922 struct callout_cpu *cc; 923 924 /* 925 * Check for a handle that was initialized 926 * by callout_handle_init, but never used 927 * for a real timeout. 928 */ 929 if (handle.callout == NULL) 930 return; 931 932 cc = callout_lock(handle.callout); 933 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 934 callout_stop(handle.callout); 935 CC_UNLOCK(cc); 936 } 937 938 void 939 callout_handle_init(struct callout_handle *handle) 940 { 941 handle->callout = NULL; 942 } 943 944 /* 945 * New interface; clients allocate their own callout structures. 946 * 947 * callout_reset() - establish or change a timeout 948 * callout_stop() - disestablish a timeout 949 * callout_init() - initialize a callout structure so that it can 950 * safely be passed to callout_reset() and callout_stop() 951 * 952 * <sys/callout.h> defines three convenience macros: 953 * 954 * callout_active() - returns truth if callout has not been stopped, 955 * drained, or deactivated since the last time the callout was 956 * reset. 957 * callout_pending() - returns truth if callout is still waiting for timeout 958 * callout_deactivate() - marks the callout as having been serviced 959 */ 960 int 961 callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision, 962 void (*ftn)(void *), void *arg, int cpu, int flags) 963 { 964 sbintime_t to_sbt, pr; 965 struct callout_cpu *cc; 966 int cancelled, direct; 967 int ignore_cpu=0; 968 969 cancelled = 0; 970 if (cpu == -1) { 971 ignore_cpu = 1; 972 } else if ((cpu >= MAXCPU) || 973 ((CC_CPU(cpu))->cc_inited == 0)) { 974 /* Invalid CPU spec */ 975 panic("Invalid CPU in callout %d", cpu); 976 } 977 if (flags & C_ABSOLUTE) { 978 to_sbt = sbt; 979 } else { 980 if ((flags & C_HARDCLOCK) && (sbt < tick_sbt)) 981 sbt = tick_sbt; 982 if ((flags & C_HARDCLOCK) || 983 #ifdef NO_EVENTTIMERS 984 sbt >= sbt_timethreshold) { 985 to_sbt = getsbinuptime(); 986 987 /* Add safety belt for the case of hz > 1000. */ 988 to_sbt += tc_tick_sbt - tick_sbt; 989 #else 990 sbt >= sbt_tickthreshold) { 991 /* 992 * Obtain the time of the last hardclock() call on 993 * this CPU directly from the kern_clocksource.c. 994 * This value is per-CPU, but it is equal for all 995 * active ones. 996 */ 997 #ifdef __LP64__ 998 to_sbt = DPCPU_GET(hardclocktime); 999 #else 1000 spinlock_enter(); 1001 to_sbt = DPCPU_GET(hardclocktime); 1002 spinlock_exit(); 1003 #endif 1004 #endif 1005 if ((flags & C_HARDCLOCK) == 0) 1006 to_sbt += tick_sbt; 1007 } else 1008 to_sbt = sbinuptime(); 1009 if (SBT_MAX - to_sbt < sbt) 1010 to_sbt = SBT_MAX; 1011 else 1012 to_sbt += sbt; 1013 pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp : 1014 sbt >> C_PRELGET(flags)); 1015 if (pr > precision) 1016 precision = pr; 1017 } 1018 /* 1019 * This flag used to be added by callout_cc_add, but the 1020 * first time you call this we could end up with the 1021 * wrong direct flag if we don't do it before we add. 1022 */ 1023 if (flags & C_DIRECT_EXEC) { 1024 direct = 1; 1025 } else { 1026 direct = 0; 1027 } 1028 KASSERT(!direct || c->c_lock == NULL, 1029 ("%s: direct callout %p has lock", __func__, c)); 1030 cc = callout_lock(c); 1031 /* 1032 * Don't allow migration of pre-allocated callouts lest they 1033 * become unbalanced or handle the case where the user does 1034 * not care. 1035 */ 1036 if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) || 1037 ignore_cpu) { 1038 cpu = c->c_cpu; 1039 } 1040 1041 if (cc_exec_curr(cc, direct) == c) { 1042 /* 1043 * We're being asked to reschedule a callout which is 1044 * currently in progress. If there is a lock then we 1045 * can cancel the callout if it has not really started. 1046 */ 1047 if (c->c_lock != NULL && !cc_exec_cancel(cc, direct)) 1048 cancelled = cc_exec_cancel(cc, direct) = true; 1049 if (cc_exec_waiting(cc, direct)) { 1050 /* 1051 * Someone has called callout_drain to kill this 1052 * callout. Don't reschedule. 1053 */ 1054 CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 1055 cancelled ? "cancelled" : "failed to cancel", 1056 c, c->c_func, c->c_arg); 1057 CC_UNLOCK(cc); 1058 return (cancelled); 1059 } 1060 #ifdef SMP 1061 if (callout_migrating(c)) { 1062 /* 1063 * This only occurs when a second callout_reset_sbt_on 1064 * is made after a previous one moved it into 1065 * deferred migration (below). Note we do *not* change 1066 * the prev_cpu even though the previous target may 1067 * be different. 1068 */ 1069 cc_migration_cpu(cc, direct) = cpu; 1070 cc_migration_time(cc, direct) = to_sbt; 1071 cc_migration_prec(cc, direct) = precision; 1072 cc_migration_func(cc, direct) = ftn; 1073 cc_migration_arg(cc, direct) = arg; 1074 cancelled = 1; 1075 CC_UNLOCK(cc); 1076 return (cancelled); 1077 } 1078 #endif 1079 } 1080 if (c->c_iflags & CALLOUT_PENDING) { 1081 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) { 1082 if (cc_exec_next(cc) == c) 1083 cc_exec_next(cc) = LIST_NEXT(c, c_links.le); 1084 LIST_REMOVE(c, c_links.le); 1085 } else { 1086 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 1087 } 1088 cancelled = 1; 1089 c->c_iflags &= ~ CALLOUT_PENDING; 1090 c->c_flags &= ~ CALLOUT_ACTIVE; 1091 } 1092 1093 #ifdef SMP 1094 /* 1095 * If the callout must migrate try to perform it immediately. 1096 * If the callout is currently running, just defer the migration 1097 * to a more appropriate moment. 1098 */ 1099 if (c->c_cpu != cpu) { 1100 if (cc_exec_curr(cc, direct) == c) { 1101 /* 1102 * Pending will have been removed since we are 1103 * actually executing the callout on another 1104 * CPU. That callout should be waiting on the 1105 * lock the caller holds. If we set both 1106 * active/and/pending after we return and the 1107 * lock on the executing callout proceeds, it 1108 * will then see pending is true and return. 1109 * At the return from the actual callout execution 1110 * the migration will occur in softclock_call_cc 1111 * and this new callout will be placed on the 1112 * new CPU via a call to callout_cpu_switch() which 1113 * will get the lock on the right CPU followed 1114 * by a call callout_cc_add() which will add it there. 1115 * (see above in softclock_call_cc()). 1116 */ 1117 cc_migration_cpu(cc, direct) = cpu; 1118 cc_migration_time(cc, direct) = to_sbt; 1119 cc_migration_prec(cc, direct) = precision; 1120 cc_migration_func(cc, direct) = ftn; 1121 cc_migration_arg(cc, direct) = arg; 1122 c->c_iflags |= (CALLOUT_DFRMIGRATION | CALLOUT_PENDING); 1123 c->c_flags |= CALLOUT_ACTIVE; 1124 CTR6(KTR_CALLOUT, 1125 "migration of %p func %p arg %p in %d.%08x to %u deferred", 1126 c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 1127 (u_int)(to_sbt & 0xffffffff), cpu); 1128 CC_UNLOCK(cc); 1129 return (cancelled); 1130 } 1131 cc = callout_cpu_switch(c, cc, cpu); 1132 } 1133 #endif 1134 1135 callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags); 1136 CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x", 1137 cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 1138 (u_int)(to_sbt & 0xffffffff)); 1139 CC_UNLOCK(cc); 1140 1141 return (cancelled); 1142 } 1143 1144 /* 1145 * Common idioms that can be optimized in the future. 1146 */ 1147 int 1148 callout_schedule_on(struct callout *c, int to_ticks, int cpu) 1149 { 1150 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 1151 } 1152 1153 int 1154 callout_schedule(struct callout *c, int to_ticks) 1155 { 1156 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 1157 } 1158 1159 int 1160 _callout_stop_safe(struct callout *c, int safe, void (*drain)(void *)) 1161 { 1162 struct callout_cpu *cc, *old_cc; 1163 struct lock_class *class; 1164 int direct, sq_locked, use_lock; 1165 int not_on_a_list; 1166 1167 if (safe) 1168 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, c->c_lock, 1169 "calling %s", __func__); 1170 1171 /* 1172 * Some old subsystems don't hold Giant while running a callout_stop(), 1173 * so just discard this check for the moment. 1174 */ 1175 if (!safe && c->c_lock != NULL) { 1176 if (c->c_lock == &Giant.lock_object) 1177 use_lock = mtx_owned(&Giant); 1178 else { 1179 use_lock = 1; 1180 class = LOCK_CLASS(c->c_lock); 1181 class->lc_assert(c->c_lock, LA_XLOCKED); 1182 } 1183 } else 1184 use_lock = 0; 1185 if (c->c_iflags & CALLOUT_DIRECT) { 1186 direct = 1; 1187 } else { 1188 direct = 0; 1189 } 1190 sq_locked = 0; 1191 old_cc = NULL; 1192 again: 1193 cc = callout_lock(c); 1194 1195 if ((c->c_iflags & (CALLOUT_DFRMIGRATION | CALLOUT_PENDING)) == 1196 (CALLOUT_DFRMIGRATION | CALLOUT_PENDING) && 1197 ((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE)) { 1198 /* 1199 * Special case where this slipped in while we 1200 * were migrating *as* the callout is about to 1201 * execute. The caller probably holds the lock 1202 * the callout wants. 1203 * 1204 * Get rid of the migration first. Then set 1205 * the flag that tells this code *not* to 1206 * try to remove it from any lists (its not 1207 * on one yet). When the callout wheel runs, 1208 * it will ignore this callout. 1209 */ 1210 c->c_iflags &= ~CALLOUT_PENDING; 1211 c->c_flags &= ~CALLOUT_ACTIVE; 1212 not_on_a_list = 1; 1213 } else { 1214 not_on_a_list = 0; 1215 } 1216 1217 /* 1218 * If the callout was migrating while the callout cpu lock was 1219 * dropped, just drop the sleepqueue lock and check the states 1220 * again. 1221 */ 1222 if (sq_locked != 0 && cc != old_cc) { 1223 #ifdef SMP 1224 CC_UNLOCK(cc); 1225 sleepq_release(&cc_exec_waiting(old_cc, direct)); 1226 sq_locked = 0; 1227 old_cc = NULL; 1228 goto again; 1229 #else 1230 panic("migration should not happen"); 1231 #endif 1232 } 1233 1234 /* 1235 * If the callout isn't pending, it's not on the queue, so 1236 * don't attempt to remove it from the queue. We can try to 1237 * stop it by other means however. 1238 */ 1239 if (!(c->c_iflags & CALLOUT_PENDING)) { 1240 /* 1241 * If it wasn't on the queue and it isn't the current 1242 * callout, then we can't stop it, so just bail. 1243 * It probably has already been run (if locking 1244 * is properly done). You could get here if the caller 1245 * calls stop twice in a row for example. The second 1246 * call would fall here without CALLOUT_ACTIVE set. 1247 */ 1248 c->c_flags &= ~CALLOUT_ACTIVE; 1249 if (cc_exec_curr(cc, direct) != c) { 1250 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1251 c, c->c_func, c->c_arg); 1252 CC_UNLOCK(cc); 1253 if (sq_locked) 1254 sleepq_release(&cc_exec_waiting(cc, direct)); 1255 return (-1); 1256 } 1257 1258 if (safe) { 1259 /* 1260 * The current callout is running (or just 1261 * about to run) and blocking is allowed, so 1262 * just wait for the current invocation to 1263 * finish. 1264 */ 1265 while (cc_exec_curr(cc, direct) == c) { 1266 /* 1267 * Use direct calls to sleepqueue interface 1268 * instead of cv/msleep in order to avoid 1269 * a LOR between cc_lock and sleepqueue 1270 * chain spinlocks. This piece of code 1271 * emulates a msleep_spin() call actually. 1272 * 1273 * If we already have the sleepqueue chain 1274 * locked, then we can safely block. If we 1275 * don't already have it locked, however, 1276 * we have to drop the cc_lock to lock 1277 * it. This opens several races, so we 1278 * restart at the beginning once we have 1279 * both locks. If nothing has changed, then 1280 * we will end up back here with sq_locked 1281 * set. 1282 */ 1283 if (!sq_locked) { 1284 CC_UNLOCK(cc); 1285 sleepq_lock( 1286 &cc_exec_waiting(cc, direct)); 1287 sq_locked = 1; 1288 old_cc = cc; 1289 goto again; 1290 } 1291 1292 /* 1293 * Migration could be cancelled here, but 1294 * as long as it is still not sure when it 1295 * will be packed up, just let softclock() 1296 * take care of it. 1297 */ 1298 cc_exec_waiting(cc, direct) = true; 1299 DROP_GIANT(); 1300 CC_UNLOCK(cc); 1301 sleepq_add( 1302 &cc_exec_waiting(cc, direct), 1303 &cc->cc_lock.lock_object, "codrain", 1304 SLEEPQ_SLEEP, 0); 1305 sleepq_wait( 1306 &cc_exec_waiting(cc, direct), 1307 0); 1308 sq_locked = 0; 1309 old_cc = NULL; 1310 1311 /* Reacquire locks previously released. */ 1312 PICKUP_GIANT(); 1313 CC_LOCK(cc); 1314 } 1315 } else if (use_lock && 1316 !cc_exec_cancel(cc, direct) && (drain == NULL)) { 1317 1318 /* 1319 * The current callout is waiting for its 1320 * lock which we hold. Cancel the callout 1321 * and return. After our caller drops the 1322 * lock, the callout will be skipped in 1323 * softclock(). This *only* works with a 1324 * callout_stop() *not* callout_drain() or 1325 * callout_async_drain(). 1326 */ 1327 cc_exec_cancel(cc, direct) = true; 1328 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1329 c, c->c_func, c->c_arg); 1330 KASSERT(!cc_cce_migrating(cc, direct), 1331 ("callout wrongly scheduled for migration")); 1332 if (callout_migrating(c)) { 1333 c->c_iflags &= ~CALLOUT_DFRMIGRATION; 1334 #ifdef SMP 1335 cc_migration_cpu(cc, direct) = CPUBLOCK; 1336 cc_migration_time(cc, direct) = 0; 1337 cc_migration_prec(cc, direct) = 0; 1338 cc_migration_func(cc, direct) = NULL; 1339 cc_migration_arg(cc, direct) = NULL; 1340 #endif 1341 } 1342 CC_UNLOCK(cc); 1343 KASSERT(!sq_locked, ("sleepqueue chain locked")); 1344 return (1); 1345 } else if (callout_migrating(c)) { 1346 /* 1347 * The callout is currently being serviced 1348 * and the "next" callout is scheduled at 1349 * its completion with a migration. We remove 1350 * the migration flag so it *won't* get rescheduled, 1351 * but we can't stop the one thats running so 1352 * we return 0. 1353 */ 1354 c->c_iflags &= ~CALLOUT_DFRMIGRATION; 1355 #ifdef SMP 1356 /* 1357 * We can't call cc_cce_cleanup here since 1358 * if we do it will remove .ce_curr and 1359 * its still running. This will prevent a 1360 * reschedule of the callout when the 1361 * execution completes. 1362 */ 1363 cc_migration_cpu(cc, direct) = CPUBLOCK; 1364 cc_migration_time(cc, direct) = 0; 1365 cc_migration_prec(cc, direct) = 0; 1366 cc_migration_func(cc, direct) = NULL; 1367 cc_migration_arg(cc, direct) = NULL; 1368 #endif 1369 CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", 1370 c, c->c_func, c->c_arg); 1371 if (drain) { 1372 cc_exec_drain(cc, direct) = drain; 1373 } 1374 CC_UNLOCK(cc); 1375 return (0); 1376 } 1377 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1378 c, c->c_func, c->c_arg); 1379 if (drain) { 1380 cc_exec_drain(cc, direct) = drain; 1381 } 1382 CC_UNLOCK(cc); 1383 KASSERT(!sq_locked, ("sleepqueue chain still locked")); 1384 return (0); 1385 } 1386 if (sq_locked) 1387 sleepq_release(&cc_exec_waiting(cc, direct)); 1388 1389 c->c_iflags &= ~CALLOUT_PENDING; 1390 c->c_flags &= ~CALLOUT_ACTIVE; 1391 1392 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1393 c, c->c_func, c->c_arg); 1394 if (not_on_a_list == 0) { 1395 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) { 1396 if (cc_exec_next(cc) == c) 1397 cc_exec_next(cc) = LIST_NEXT(c, c_links.le); 1398 LIST_REMOVE(c, c_links.le); 1399 } else { 1400 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 1401 } 1402 } 1403 callout_cc_del(c, cc); 1404 CC_UNLOCK(cc); 1405 return (1); 1406 } 1407 1408 void 1409 callout_init(struct callout *c, int mpsafe) 1410 { 1411 bzero(c, sizeof *c); 1412 if (mpsafe) { 1413 c->c_lock = NULL; 1414 c->c_iflags = CALLOUT_RETURNUNLOCKED; 1415 } else { 1416 c->c_lock = &Giant.lock_object; 1417 c->c_iflags = 0; 1418 } 1419 c->c_cpu = timeout_cpu; 1420 } 1421 1422 void 1423 _callout_init_lock(struct callout *c, struct lock_object *lock, int flags) 1424 { 1425 bzero(c, sizeof *c); 1426 c->c_lock = lock; 1427 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 1428 ("callout_init_lock: bad flags %d", flags)); 1429 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 1430 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 1431 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 1432 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 1433 __func__)); 1434 c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 1435 c->c_cpu = timeout_cpu; 1436 } 1437 1438 #ifdef APM_FIXUP_CALLTODO 1439 /* 1440 * Adjust the kernel calltodo timeout list. This routine is used after 1441 * an APM resume to recalculate the calltodo timer list values with the 1442 * number of hz's we have been sleeping. The next hardclock() will detect 1443 * that there are fired timers and run softclock() to execute them. 1444 * 1445 * Please note, I have not done an exhaustive analysis of what code this 1446 * might break. I am motivated to have my select()'s and alarm()'s that 1447 * have expired during suspend firing upon resume so that the applications 1448 * which set the timer can do the maintanence the timer was for as close 1449 * as possible to the originally intended time. Testing this code for a 1450 * week showed that resuming from a suspend resulted in 22 to 25 timers 1451 * firing, which seemed independant on whether the suspend was 2 hours or 1452 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 1453 */ 1454 void 1455 adjust_timeout_calltodo(struct timeval *time_change) 1456 { 1457 register struct callout *p; 1458 unsigned long delta_ticks; 1459 1460 /* 1461 * How many ticks were we asleep? 1462 * (stolen from tvtohz()). 1463 */ 1464 1465 /* Don't do anything */ 1466 if (time_change->tv_sec < 0) 1467 return; 1468 else if (time_change->tv_sec <= LONG_MAX / 1000000) 1469 delta_ticks = (time_change->tv_sec * 1000000 + 1470 time_change->tv_usec + (tick - 1)) / tick + 1; 1471 else if (time_change->tv_sec <= LONG_MAX / hz) 1472 delta_ticks = time_change->tv_sec * hz + 1473 (time_change->tv_usec + (tick - 1)) / tick + 1; 1474 else 1475 delta_ticks = LONG_MAX; 1476 1477 if (delta_ticks > INT_MAX) 1478 delta_ticks = INT_MAX; 1479 1480 /* 1481 * Now rip through the timer calltodo list looking for timers 1482 * to expire. 1483 */ 1484 1485 /* don't collide with softclock() */ 1486 CC_LOCK(cc); 1487 for (p = calltodo.c_next; p != NULL; p = p->c_next) { 1488 p->c_time -= delta_ticks; 1489 1490 /* Break if the timer had more time on it than delta_ticks */ 1491 if (p->c_time > 0) 1492 break; 1493 1494 /* take back the ticks the timer didn't use (p->c_time <= 0) */ 1495 delta_ticks = -p->c_time; 1496 } 1497 CC_UNLOCK(cc); 1498 1499 return; 1500 } 1501 #endif /* APM_FIXUP_CALLTODO */ 1502 1503 static int 1504 flssbt(sbintime_t sbt) 1505 { 1506 1507 sbt += (uint64_t)sbt >> 1; 1508 if (sizeof(long) >= sizeof(sbintime_t)) 1509 return (flsl(sbt)); 1510 if (sbt >= SBT_1S) 1511 return (flsl(((uint64_t)sbt) >> 32) + 32); 1512 return (flsl(sbt)); 1513 } 1514 1515 /* 1516 * Dump immediate statistic snapshot of the scheduled callouts. 1517 */ 1518 static int 1519 sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS) 1520 { 1521 struct callout *tmp; 1522 struct callout_cpu *cc; 1523 struct callout_list *sc; 1524 sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t; 1525 int ct[64], cpr[64], ccpbk[32]; 1526 int error, val, i, count, tcum, pcum, maxc, c, medc; 1527 #ifdef SMP 1528 int cpu; 1529 #endif 1530 1531 val = 0; 1532 error = sysctl_handle_int(oidp, &val, 0, req); 1533 if (error != 0 || req->newptr == NULL) 1534 return (error); 1535 count = maxc = 0; 1536 st = spr = maxt = maxpr = 0; 1537 bzero(ccpbk, sizeof(ccpbk)); 1538 bzero(ct, sizeof(ct)); 1539 bzero(cpr, sizeof(cpr)); 1540 now = sbinuptime(); 1541 #ifdef SMP 1542 CPU_FOREACH(cpu) { 1543 cc = CC_CPU(cpu); 1544 #else 1545 cc = CC_CPU(timeout_cpu); 1546 #endif 1547 CC_LOCK(cc); 1548 for (i = 0; i < callwheelsize; i++) { 1549 sc = &cc->cc_callwheel[i]; 1550 c = 0; 1551 LIST_FOREACH(tmp, sc, c_links.le) { 1552 c++; 1553 t = tmp->c_time - now; 1554 if (t < 0) 1555 t = 0; 1556 st += t / SBT_1US; 1557 spr += tmp->c_precision / SBT_1US; 1558 if (t > maxt) 1559 maxt = t; 1560 if (tmp->c_precision > maxpr) 1561 maxpr = tmp->c_precision; 1562 ct[flssbt(t)]++; 1563 cpr[flssbt(tmp->c_precision)]++; 1564 } 1565 if (c > maxc) 1566 maxc = c; 1567 ccpbk[fls(c + c / 2)]++; 1568 count += c; 1569 } 1570 CC_UNLOCK(cc); 1571 #ifdef SMP 1572 } 1573 #endif 1574 1575 for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++) 1576 tcum += ct[i]; 1577 medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 1578 for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++) 1579 pcum += cpr[i]; 1580 medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 1581 for (i = 0, c = 0; i < 32 && c < count / 2; i++) 1582 c += ccpbk[i]; 1583 medc = (i >= 2) ? (1 << (i - 2)) : 0; 1584 1585 printf("Scheduled callouts statistic snapshot:\n"); 1586 printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n", 1587 count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT); 1588 printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n", 1589 medc, 1590 count / callwheelsize / mp_ncpus, 1591 (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000, 1592 maxc); 1593 printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 1594 medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32, 1595 (st / count) / 1000000, (st / count) % 1000000, 1596 maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32); 1597 printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 1598 medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32, 1599 (spr / count) / 1000000, (spr / count) % 1000000, 1600 maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32); 1601 printf(" Distribution: \tbuckets\t time\t tcum\t" 1602 " prec\t pcum\n"); 1603 for (i = 0, tcum = pcum = 0; i < 64; i++) { 1604 if (ct[i] == 0 && cpr[i] == 0) 1605 continue; 1606 t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0; 1607 tcum += ct[i]; 1608 pcum += cpr[i]; 1609 printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n", 1610 t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32, 1611 i - 1 - (32 - CC_HASH_SHIFT), 1612 ct[i], tcum, cpr[i], pcum); 1613 } 1614 return (error); 1615 } 1616 SYSCTL_PROC(_kern, OID_AUTO, callout_stat, 1617 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1618 0, 0, sysctl_kern_callout_stat, "I", 1619 "Dump immediate statistic snapshot of the scheduled callouts"); 1620