1 /*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_callout_profiling.h" 41 #include "opt_kdtrace.h" 42 #if defined(__arm__) 43 #include "opt_timer.h" 44 #endif 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/bus.h> 49 #include <sys/callout.h> 50 #include <sys/file.h> 51 #include <sys/interrupt.h> 52 #include <sys/kernel.h> 53 #include <sys/ktr.h> 54 #include <sys/lock.h> 55 #include <sys/malloc.h> 56 #include <sys/mutex.h> 57 #include <sys/proc.h> 58 #include <sys/sdt.h> 59 #include <sys/sleepqueue.h> 60 #include <sys/sysctl.h> 61 #include <sys/smp.h> 62 63 #ifdef SMP 64 #include <machine/cpu.h> 65 #endif 66 67 #ifndef NO_EVENTTIMERS 68 DPCPU_DECLARE(sbintime_t, hardclocktime); 69 #endif 70 71 SDT_PROVIDER_DEFINE(callout_execute); 72 SDT_PROBE_DEFINE1(callout_execute, kernel, , callout_start, callout-start, 73 "struct callout *"); 74 SDT_PROBE_DEFINE1(callout_execute, kernel, , callout_end, callout-end, 75 "struct callout *"); 76 77 #ifdef CALLOUT_PROFILING 78 static int avg_depth; 79 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 80 "Average number of items examined per softclock call. Units = 1/1000"); 81 static int avg_gcalls; 82 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 83 "Average number of Giant callouts made per softclock call. Units = 1/1000"); 84 static int avg_lockcalls; 85 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 86 "Average number of lock callouts made per softclock call. Units = 1/1000"); 87 static int avg_mpcalls; 88 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 89 "Average number of MP callouts made per softclock call. Units = 1/1000"); 90 static int avg_depth_dir; 91 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0, 92 "Average number of direct callouts examined per callout_process call. " 93 "Units = 1/1000"); 94 static int avg_lockcalls_dir; 95 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD, 96 &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per " 97 "callout_process call. Units = 1/1000"); 98 static int avg_mpcalls_dir; 99 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir, 100 0, "Average number of MP direct callouts made per callout_process call. " 101 "Units = 1/1000"); 102 #endif 103 104 static int ncallout; 105 SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN, &ncallout, 0, 106 "Number of entries in callwheel and size of timeout() preallocation"); 107 108 /* 109 * TODO: 110 * allocate more timeout table slots when table overflows. 111 */ 112 u_int callwheelsize, callwheelmask; 113 114 /* 115 * The callout cpu exec entities represent informations necessary for 116 * describing the state of callouts currently running on the CPU and the ones 117 * necessary for migrating callouts to the new callout cpu. In particular, 118 * the first entry of the array cc_exec_entity holds informations for callout 119 * running in SWI thread context, while the second one holds informations 120 * for callout running directly from hardware interrupt context. 121 * The cached informations are very important for deferring migration when 122 * the migrating callout is already running. 123 */ 124 struct cc_exec { 125 struct callout *cc_next; 126 struct callout *cc_curr; 127 #ifdef SMP 128 void (*ce_migration_func)(void *); 129 void *ce_migration_arg; 130 int ce_migration_cpu; 131 sbintime_t ce_migration_time; 132 sbintime_t ce_migration_prec; 133 #endif 134 bool cc_cancel; 135 bool cc_waiting; 136 }; 137 138 /* 139 * There is one struct callout_cpu per cpu, holding all relevant 140 * state for the callout processing thread on the individual CPU. 141 */ 142 struct callout_cpu { 143 struct mtx_padalign cc_lock; 144 struct cc_exec cc_exec_entity[2]; 145 struct callout *cc_callout; 146 struct callout_list *cc_callwheel; 147 struct callout_tailq cc_expireq; 148 struct callout_slist cc_callfree; 149 sbintime_t cc_firstevent; 150 sbintime_t cc_lastscan; 151 void *cc_cookie; 152 u_int cc_bucket; 153 }; 154 155 #define cc_exec_curr cc_exec_entity[0].cc_curr 156 #define cc_exec_next cc_exec_entity[0].cc_next 157 #define cc_exec_cancel cc_exec_entity[0].cc_cancel 158 #define cc_exec_waiting cc_exec_entity[0].cc_waiting 159 #define cc_exec_curr_dir cc_exec_entity[1].cc_curr 160 #define cc_exec_next_dir cc_exec_entity[1].cc_next 161 #define cc_exec_cancel_dir cc_exec_entity[1].cc_cancel 162 #define cc_exec_waiting_dir cc_exec_entity[1].cc_waiting 163 164 #ifdef SMP 165 #define cc_migration_func cc_exec_entity[0].ce_migration_func 166 #define cc_migration_arg cc_exec_entity[0].ce_migration_arg 167 #define cc_migration_cpu cc_exec_entity[0].ce_migration_cpu 168 #define cc_migration_time cc_exec_entity[0].ce_migration_time 169 #define cc_migration_prec cc_exec_entity[0].ce_migration_prec 170 #define cc_migration_func_dir cc_exec_entity[1].ce_migration_func 171 #define cc_migration_arg_dir cc_exec_entity[1].ce_migration_arg 172 #define cc_migration_cpu_dir cc_exec_entity[1].ce_migration_cpu 173 #define cc_migration_time_dir cc_exec_entity[1].ce_migration_time 174 #define cc_migration_prec_dir cc_exec_entity[1].ce_migration_prec 175 176 struct callout_cpu cc_cpu[MAXCPU]; 177 #define CPUBLOCK MAXCPU 178 #define CC_CPU(cpu) (&cc_cpu[(cpu)]) 179 #define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 180 #else 181 struct callout_cpu cc_cpu; 182 #define CC_CPU(cpu) &cc_cpu 183 #define CC_SELF() &cc_cpu 184 #endif 185 #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 186 #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 187 #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) 188 189 static int timeout_cpu; 190 191 static void callout_cpu_init(struct callout_cpu *cc); 192 static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, 193 #ifdef CALLOUT_PROFILING 194 int *mpcalls, int *lockcalls, int *gcalls, 195 #endif 196 int direct); 197 198 static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 199 200 /** 201 * Locked by cc_lock: 202 * cc_curr - If a callout is in progress, it is cc_curr. 203 * If cc_curr is non-NULL, threads waiting in 204 * callout_drain() will be woken up as soon as the 205 * relevant callout completes. 206 * cc_cancel - Changing to 1 with both callout_lock and cc_lock held 207 * guarantees that the current callout will not run. 208 * The softclock() function sets this to 0 before it 209 * drops callout_lock to acquire c_lock, and it calls 210 * the handler only if curr_cancelled is still 0 after 211 * cc_lock is successfully acquired. 212 * cc_waiting - If a thread is waiting in callout_drain(), then 213 * callout_wait is nonzero. Set only when 214 * cc_curr is non-NULL. 215 */ 216 217 /* 218 * Resets the execution entity tied to a specific callout cpu. 219 */ 220 static void 221 cc_cce_cleanup(struct callout_cpu *cc, int direct) 222 { 223 224 cc->cc_exec_entity[direct].cc_curr = NULL; 225 cc->cc_exec_entity[direct].cc_next = NULL; 226 cc->cc_exec_entity[direct].cc_cancel = false; 227 cc->cc_exec_entity[direct].cc_waiting = false; 228 #ifdef SMP 229 cc->cc_exec_entity[direct].ce_migration_cpu = CPUBLOCK; 230 cc->cc_exec_entity[direct].ce_migration_time = 0; 231 cc->cc_exec_entity[direct].ce_migration_prec = 0; 232 cc->cc_exec_entity[direct].ce_migration_func = NULL; 233 cc->cc_exec_entity[direct].ce_migration_arg = NULL; 234 #endif 235 } 236 237 /* 238 * Checks if migration is requested by a specific callout cpu. 239 */ 240 static int 241 cc_cce_migrating(struct callout_cpu *cc, int direct) 242 { 243 244 #ifdef SMP 245 return (cc->cc_exec_entity[direct].ce_migration_cpu != CPUBLOCK); 246 #else 247 return (0); 248 #endif 249 } 250 251 /* 252 * Kernel low level callwheel initialization 253 * called on cpu0 during kernel startup. 254 */ 255 static void 256 callout_callwheel_init(void *dummy) 257 { 258 struct callout_cpu *cc; 259 260 /* 261 * Calculate the size of the callout wheel and the preallocated 262 * timeout() structures. 263 * XXX: Clip callout to result of previous function of maxusers 264 * maximum 384. This is still huge, but acceptable. 265 */ 266 ncallout = imin(16 + maxproc + maxfiles, 18508); 267 TUNABLE_INT_FETCH("kern.ncallout", &ncallout); 268 269 /* 270 * Calculate callout wheel size, should be next power of two higher 271 * than 'ncallout'. 272 */ 273 callwheelsize = 1 << fls(ncallout); 274 callwheelmask = callwheelsize - 1; 275 276 /* 277 * Only cpu0 handles timeout(9) and receives a preallocation. 278 * 279 * XXX: Once all timeout(9) consumers are converted this can 280 * be removed. 281 */ 282 timeout_cpu = PCPU_GET(cpuid); 283 cc = CC_CPU(timeout_cpu); 284 cc->cc_callout = malloc(ncallout * sizeof(struct callout), 285 M_CALLOUT, M_WAITOK); 286 callout_cpu_init(cc); 287 } 288 SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL); 289 290 /* 291 * Initialize the per-cpu callout structures. 292 */ 293 static void 294 callout_cpu_init(struct callout_cpu *cc) 295 { 296 struct callout *c; 297 int i; 298 299 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 300 SLIST_INIT(&cc->cc_callfree); 301 cc->cc_callwheel = malloc(sizeof(struct callout_list) * callwheelsize, 302 M_CALLOUT, M_WAITOK); 303 for (i = 0; i < callwheelsize; i++) 304 LIST_INIT(&cc->cc_callwheel[i]); 305 TAILQ_INIT(&cc->cc_expireq); 306 cc->cc_firstevent = INT64_MAX; 307 for (i = 0; i < 2; i++) 308 cc_cce_cleanup(cc, i); 309 if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */ 310 return; 311 for (i = 0; i < ncallout; i++) { 312 c = &cc->cc_callout[i]; 313 callout_init(c, 0); 314 c->c_flags = CALLOUT_LOCAL_ALLOC; 315 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 316 } 317 } 318 319 #ifdef SMP 320 /* 321 * Switches the cpu tied to a specific callout. 322 * The function expects a locked incoming callout cpu and returns with 323 * locked outcoming callout cpu. 324 */ 325 static struct callout_cpu * 326 callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) 327 { 328 struct callout_cpu *new_cc; 329 330 MPASS(c != NULL && cc != NULL); 331 CC_LOCK_ASSERT(cc); 332 333 /* 334 * Avoid interrupts and preemption firing after the callout cpu 335 * is blocked in order to avoid deadlocks as the new thread 336 * may be willing to acquire the callout cpu lock. 337 */ 338 c->c_cpu = CPUBLOCK; 339 spinlock_enter(); 340 CC_UNLOCK(cc); 341 new_cc = CC_CPU(new_cpu); 342 CC_LOCK(new_cc); 343 spinlock_exit(); 344 c->c_cpu = new_cpu; 345 return (new_cc); 346 } 347 #endif 348 349 /* 350 * Start standard softclock thread. 351 */ 352 static void 353 start_softclock(void *dummy) 354 { 355 struct callout_cpu *cc; 356 #ifdef SMP 357 int cpu; 358 #endif 359 360 cc = CC_CPU(timeout_cpu); 361 if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK, 362 INTR_MPSAFE, &cc->cc_cookie)) 363 panic("died while creating standard software ithreads"); 364 #ifdef SMP 365 CPU_FOREACH(cpu) { 366 if (cpu == timeout_cpu) 367 continue; 368 cc = CC_CPU(cpu); 369 cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */ 370 callout_cpu_init(cc); 371 if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, 372 INTR_MPSAFE, &cc->cc_cookie)) 373 panic("died while creating standard software ithreads"); 374 } 375 #endif 376 } 377 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 378 379 #define CC_HASH_SHIFT 8 380 381 static inline u_int 382 callout_hash(sbintime_t sbt) 383 { 384 385 return (sbt >> (32 - CC_HASH_SHIFT)); 386 } 387 388 static inline u_int 389 callout_get_bucket(sbintime_t sbt) 390 { 391 392 return (callout_hash(sbt) & callwheelmask); 393 } 394 395 void 396 callout_process(sbintime_t now) 397 { 398 struct callout *tmp, *tmpn; 399 struct callout_cpu *cc; 400 struct callout_list *sc; 401 sbintime_t first, last, max, tmp_max; 402 uint32_t lookahead; 403 u_int firstb, lastb, nowb; 404 #ifdef CALLOUT_PROFILING 405 int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0; 406 #endif 407 408 cc = CC_SELF(); 409 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 410 411 /* Compute the buckets of the last scan and present times. */ 412 firstb = callout_hash(cc->cc_lastscan); 413 cc->cc_lastscan = now; 414 nowb = callout_hash(now); 415 416 /* Compute the last bucket and minimum time of the bucket after it. */ 417 if (nowb == firstb) 418 lookahead = (SBT_1S / 16); 419 else if (nowb - firstb == 1) 420 lookahead = (SBT_1S / 8); 421 else 422 lookahead = (SBT_1S / 2); 423 first = last = now; 424 first += (lookahead / 2); 425 last += lookahead; 426 last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT)); 427 lastb = callout_hash(last) - 1; 428 max = last; 429 430 /* 431 * Check if we wrapped around the entire wheel from the last scan. 432 * In case, we need to scan entirely the wheel for pending callouts. 433 */ 434 if (lastb - firstb >= callwheelsize) { 435 lastb = firstb + callwheelsize - 1; 436 if (nowb - firstb >= callwheelsize) 437 nowb = lastb; 438 } 439 440 /* Iterate callwheel from firstb to nowb and then up to lastb. */ 441 do { 442 sc = &cc->cc_callwheel[firstb & callwheelmask]; 443 tmp = LIST_FIRST(sc); 444 while (tmp != NULL) { 445 /* Run the callout if present time within allowed. */ 446 if (tmp->c_time <= now) { 447 /* 448 * Consumer told us the callout may be run 449 * directly from hardware interrupt context. 450 */ 451 if (tmp->c_flags & CALLOUT_DIRECT) { 452 #ifdef CALLOUT_PROFILING 453 ++depth_dir; 454 #endif 455 cc->cc_exec_next_dir = 456 LIST_NEXT(tmp, c_links.le); 457 cc->cc_bucket = firstb & callwheelmask; 458 LIST_REMOVE(tmp, c_links.le); 459 softclock_call_cc(tmp, cc, 460 #ifdef CALLOUT_PROFILING 461 &mpcalls_dir, &lockcalls_dir, NULL, 462 #endif 463 1); 464 tmp = cc->cc_exec_next_dir; 465 } else { 466 tmpn = LIST_NEXT(tmp, c_links.le); 467 LIST_REMOVE(tmp, c_links.le); 468 TAILQ_INSERT_TAIL(&cc->cc_expireq, 469 tmp, c_links.tqe); 470 tmp->c_flags |= CALLOUT_PROCESSED; 471 tmp = tmpn; 472 } 473 continue; 474 } 475 /* Skip events from distant future. */ 476 if (tmp->c_time >= max) 477 goto next; 478 /* 479 * Event minimal time is bigger than present maximal 480 * time, so it cannot be aggregated. 481 */ 482 if (tmp->c_time > last) { 483 lastb = nowb; 484 goto next; 485 } 486 /* Update first and last time, respecting this event. */ 487 if (tmp->c_time < first) 488 first = tmp->c_time; 489 tmp_max = tmp->c_time + tmp->c_precision; 490 if (tmp_max < last) 491 last = tmp_max; 492 next: 493 tmp = LIST_NEXT(tmp, c_links.le); 494 } 495 /* Proceed with the next bucket. */ 496 firstb++; 497 /* 498 * Stop if we looked after present time and found 499 * some event we can't execute at now. 500 * Stop if we looked far enough into the future. 501 */ 502 } while (((int)(firstb - lastb)) <= 0); 503 cc->cc_firstevent = last; 504 #ifndef NO_EVENTTIMERS 505 cpu_new_callout(curcpu, last, first); 506 #endif 507 #ifdef CALLOUT_PROFILING 508 avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8; 509 avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8; 510 avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8; 511 #endif 512 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 513 /* 514 * swi_sched acquires the thread lock, so we don't want to call it 515 * with cc_lock held; incorrect locking order. 516 */ 517 if (!TAILQ_EMPTY(&cc->cc_expireq)) 518 swi_sched(cc->cc_cookie, 0); 519 } 520 521 static struct callout_cpu * 522 callout_lock(struct callout *c) 523 { 524 struct callout_cpu *cc; 525 int cpu; 526 527 for (;;) { 528 cpu = c->c_cpu; 529 #ifdef SMP 530 if (cpu == CPUBLOCK) { 531 while (c->c_cpu == CPUBLOCK) 532 cpu_spinwait(); 533 continue; 534 } 535 #endif 536 cc = CC_CPU(cpu); 537 CC_LOCK(cc); 538 if (cpu == c->c_cpu) 539 break; 540 CC_UNLOCK(cc); 541 } 542 return (cc); 543 } 544 545 static void 546 callout_cc_add(struct callout *c, struct callout_cpu *cc, 547 sbintime_t sbt, sbintime_t precision, void (*func)(void *), 548 void *arg, int cpu, int flags) 549 { 550 int bucket; 551 552 CC_LOCK_ASSERT(cc); 553 if (sbt < cc->cc_lastscan) 554 sbt = cc->cc_lastscan; 555 c->c_arg = arg; 556 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 557 if (flags & C_DIRECT_EXEC) 558 c->c_flags |= CALLOUT_DIRECT; 559 c->c_flags &= ~CALLOUT_PROCESSED; 560 c->c_func = func; 561 c->c_time = sbt; 562 c->c_precision = precision; 563 bucket = callout_get_bucket(c->c_time); 564 CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x", 565 c, (int)(c->c_precision >> 32), 566 (u_int)(c->c_precision & 0xffffffff)); 567 LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le); 568 if (cc->cc_bucket == bucket) 569 cc->cc_exec_next_dir = c; 570 #ifndef NO_EVENTTIMERS 571 /* 572 * Inform the eventtimers(4) subsystem there's a new callout 573 * that has been inserted, but only if really required. 574 */ 575 sbt = c->c_time + c->c_precision; 576 if (sbt < cc->cc_firstevent) { 577 cc->cc_firstevent = sbt; 578 cpu_new_callout(cpu, sbt, c->c_time); 579 } 580 #endif 581 } 582 583 static void 584 callout_cc_del(struct callout *c, struct callout_cpu *cc) 585 { 586 587 if ((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0) 588 return; 589 c->c_func = NULL; 590 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 591 } 592 593 static void 594 softclock_call_cc(struct callout *c, struct callout_cpu *cc, 595 #ifdef CALLOUT_PROFILING 596 int *mpcalls, int *lockcalls, int *gcalls, 597 #endif 598 int direct) 599 { 600 void (*c_func)(void *); 601 void *c_arg; 602 struct lock_class *class; 603 struct lock_object *c_lock; 604 int c_flags, sharedlock; 605 #ifdef SMP 606 struct callout_cpu *new_cc; 607 void (*new_func)(void *); 608 void *new_arg; 609 int flags, new_cpu; 610 sbintime_t new_prec, new_time; 611 #endif 612 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 613 sbintime_t sbt1, sbt2; 614 struct timespec ts2; 615 static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */ 616 static timeout_t *lastfunc; 617 #endif 618 619 KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) == 620 (CALLOUT_PENDING | CALLOUT_ACTIVE), 621 ("softclock_call_cc: pend|act %p %x", c, c->c_flags)); 622 class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; 623 sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1; 624 c_lock = c->c_lock; 625 c_func = c->c_func; 626 c_arg = c->c_arg; 627 c_flags = c->c_flags; 628 if (c->c_flags & CALLOUT_LOCAL_ALLOC) 629 c->c_flags = CALLOUT_LOCAL_ALLOC; 630 else 631 c->c_flags &= ~CALLOUT_PENDING; 632 cc->cc_exec_entity[direct].cc_curr = c; 633 cc->cc_exec_entity[direct].cc_cancel = false; 634 CC_UNLOCK(cc); 635 if (c_lock != NULL) { 636 class->lc_lock(c_lock, sharedlock); 637 /* 638 * The callout may have been cancelled 639 * while we switched locks. 640 */ 641 if (cc->cc_exec_entity[direct].cc_cancel) { 642 class->lc_unlock(c_lock); 643 goto skip; 644 } 645 /* The callout cannot be stopped now. */ 646 cc->cc_exec_entity[direct].cc_cancel = true; 647 if (c_lock == &Giant.lock_object) { 648 #ifdef CALLOUT_PROFILING 649 (*gcalls)++; 650 #endif 651 CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p", 652 c, c_func, c_arg); 653 } else { 654 #ifdef CALLOUT_PROFILING 655 (*lockcalls)++; 656 #endif 657 CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p", 658 c, c_func, c_arg); 659 } 660 } else { 661 #ifdef CALLOUT_PROFILING 662 (*mpcalls)++; 663 #endif 664 CTR3(KTR_CALLOUT, "callout %p func %p arg %p", 665 c, c_func, c_arg); 666 } 667 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 668 sbt1 = sbinuptime(); 669 #endif 670 THREAD_NO_SLEEPING(); 671 SDT_PROBE(callout_execute, kernel, , callout_start, c, 0, 0, 0, 0); 672 c_func(c_arg); 673 SDT_PROBE(callout_execute, kernel, , callout_end, c, 0, 0, 0, 0); 674 THREAD_SLEEPING_OK(); 675 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 676 sbt2 = sbinuptime(); 677 sbt2 -= sbt1; 678 if (sbt2 > maxdt) { 679 if (lastfunc != c_func || sbt2 > maxdt * 2) { 680 ts2 = sbttots(sbt2); 681 printf( 682 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 683 c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); 684 } 685 maxdt = sbt2; 686 lastfunc = c_func; 687 } 688 #endif 689 CTR1(KTR_CALLOUT, "callout %p finished", c); 690 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 691 class->lc_unlock(c_lock); 692 skip: 693 CC_LOCK(cc); 694 KASSERT(cc->cc_exec_entity[direct].cc_curr == c, ("mishandled cc_curr")); 695 cc->cc_exec_entity[direct].cc_curr = NULL; 696 if (cc->cc_exec_entity[direct].cc_waiting) { 697 /* 698 * There is someone waiting for the 699 * callout to complete. 700 * If the callout was scheduled for 701 * migration just cancel it. 702 */ 703 if (cc_cce_migrating(cc, direct)) { 704 cc_cce_cleanup(cc, direct); 705 706 /* 707 * It should be assert here that the callout is not 708 * destroyed but that is not easy. 709 */ 710 c->c_flags &= ~CALLOUT_DFRMIGRATION; 711 } 712 cc->cc_exec_entity[direct].cc_waiting = false; 713 CC_UNLOCK(cc); 714 wakeup(&cc->cc_exec_entity[direct].cc_waiting); 715 CC_LOCK(cc); 716 } else if (cc_cce_migrating(cc, direct)) { 717 KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0, 718 ("Migrating legacy callout %p", c)); 719 #ifdef SMP 720 /* 721 * If the callout was scheduled for 722 * migration just perform it now. 723 */ 724 new_cpu = cc->cc_exec_entity[direct].ce_migration_cpu; 725 new_time = cc->cc_exec_entity[direct].ce_migration_time; 726 new_prec = cc->cc_exec_entity[direct].ce_migration_prec; 727 new_func = cc->cc_exec_entity[direct].ce_migration_func; 728 new_arg = cc->cc_exec_entity[direct].ce_migration_arg; 729 cc_cce_cleanup(cc, direct); 730 731 /* 732 * It should be assert here that the callout is not destroyed 733 * but that is not easy. 734 * 735 * As first thing, handle deferred callout stops. 736 */ 737 if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) { 738 CTR3(KTR_CALLOUT, 739 "deferred cancelled %p func %p arg %p", 740 c, new_func, new_arg); 741 callout_cc_del(c, cc); 742 return; 743 } 744 c->c_flags &= ~CALLOUT_DFRMIGRATION; 745 746 new_cc = callout_cpu_switch(c, cc, new_cpu); 747 flags = (direct) ? C_DIRECT_EXEC : 0; 748 callout_cc_add(c, new_cc, new_time, new_prec, new_func, 749 new_arg, new_cpu, flags); 750 CC_UNLOCK(new_cc); 751 CC_LOCK(cc); 752 #else 753 panic("migration should not happen"); 754 #endif 755 } 756 /* 757 * If the current callout is locally allocated (from 758 * timeout(9)) then put it on the freelist. 759 * 760 * Note: we need to check the cached copy of c_flags because 761 * if it was not local, then it's not safe to deref the 762 * callout pointer. 763 */ 764 KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 || 765 c->c_flags == CALLOUT_LOCAL_ALLOC, 766 ("corrupted callout")); 767 if (c_flags & CALLOUT_LOCAL_ALLOC) 768 callout_cc_del(c, cc); 769 } 770 771 /* 772 * The callout mechanism is based on the work of Adam M. Costello and 773 * George Varghese, published in a technical report entitled "Redesigning 774 * the BSD Callout and Timer Facilities" and modified slightly for inclusion 775 * in FreeBSD by Justin T. Gibbs. The original work on the data structures 776 * used in this implementation was published by G. Varghese and T. Lauck in 777 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 778 * the Efficient Implementation of a Timer Facility" in the Proceedings of 779 * the 11th ACM Annual Symposium on Operating Systems Principles, 780 * Austin, Texas Nov 1987. 781 */ 782 783 /* 784 * Software (low priority) clock interrupt. 785 * Run periodic events from timeout queue. 786 */ 787 void 788 softclock(void *arg) 789 { 790 struct callout_cpu *cc; 791 struct callout *c; 792 #ifdef CALLOUT_PROFILING 793 int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0; 794 #endif 795 796 cc = (struct callout_cpu *)arg; 797 CC_LOCK(cc); 798 while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) { 799 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 800 softclock_call_cc(c, cc, 801 #ifdef CALLOUT_PROFILING 802 &mpcalls, &lockcalls, &gcalls, 803 #endif 804 0); 805 #ifdef CALLOUT_PROFILING 806 ++depth; 807 #endif 808 } 809 #ifdef CALLOUT_PROFILING 810 avg_depth += (depth * 1000 - avg_depth) >> 8; 811 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 812 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 813 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 814 #endif 815 CC_UNLOCK(cc); 816 } 817 818 /* 819 * timeout -- 820 * Execute a function after a specified length of time. 821 * 822 * untimeout -- 823 * Cancel previous timeout function call. 824 * 825 * callout_handle_init -- 826 * Initialize a handle so that using it with untimeout is benign. 827 * 828 * See AT&T BCI Driver Reference Manual for specification. This 829 * implementation differs from that one in that although an 830 * identification value is returned from timeout, the original 831 * arguments to timeout as well as the identifier are used to 832 * identify entries for untimeout. 833 */ 834 struct callout_handle 835 timeout(ftn, arg, to_ticks) 836 timeout_t *ftn; 837 void *arg; 838 int to_ticks; 839 { 840 struct callout_cpu *cc; 841 struct callout *new; 842 struct callout_handle handle; 843 844 cc = CC_CPU(timeout_cpu); 845 CC_LOCK(cc); 846 /* Fill in the next free callout structure. */ 847 new = SLIST_FIRST(&cc->cc_callfree); 848 if (new == NULL) 849 /* XXX Attempt to malloc first */ 850 panic("timeout table full"); 851 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); 852 callout_reset(new, to_ticks, ftn, arg); 853 handle.callout = new; 854 CC_UNLOCK(cc); 855 856 return (handle); 857 } 858 859 void 860 untimeout(ftn, arg, handle) 861 timeout_t *ftn; 862 void *arg; 863 struct callout_handle handle; 864 { 865 struct callout_cpu *cc; 866 867 /* 868 * Check for a handle that was initialized 869 * by callout_handle_init, but never used 870 * for a real timeout. 871 */ 872 if (handle.callout == NULL) 873 return; 874 875 cc = callout_lock(handle.callout); 876 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 877 callout_stop(handle.callout); 878 CC_UNLOCK(cc); 879 } 880 881 void 882 callout_handle_init(struct callout_handle *handle) 883 { 884 handle->callout = NULL; 885 } 886 887 /* 888 * New interface; clients allocate their own callout structures. 889 * 890 * callout_reset() - establish or change a timeout 891 * callout_stop() - disestablish a timeout 892 * callout_init() - initialize a callout structure so that it can 893 * safely be passed to callout_reset() and callout_stop() 894 * 895 * <sys/callout.h> defines three convenience macros: 896 * 897 * callout_active() - returns truth if callout has not been stopped, 898 * drained, or deactivated since the last time the callout was 899 * reset. 900 * callout_pending() - returns truth if callout is still waiting for timeout 901 * callout_deactivate() - marks the callout as having been serviced 902 */ 903 int 904 callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision, 905 void (*ftn)(void *), void *arg, int cpu, int flags) 906 { 907 sbintime_t to_sbt, pr; 908 struct callout_cpu *cc; 909 int cancelled, direct; 910 911 cancelled = 0; 912 if (flags & C_ABSOLUTE) { 913 to_sbt = sbt; 914 } else { 915 if ((flags & C_HARDCLOCK) && (sbt < tick_sbt)) 916 sbt = tick_sbt; 917 if ((flags & C_HARDCLOCK) || 918 #ifdef NO_EVENTTIMERS 919 sbt >= sbt_timethreshold) { 920 to_sbt = getsbinuptime(); 921 922 /* Add safety belt for the case of hz > 1000. */ 923 to_sbt += tc_tick_sbt - tick_sbt; 924 #else 925 sbt >= sbt_tickthreshold) { 926 /* 927 * Obtain the time of the last hardclock() call on 928 * this CPU directly from the kern_clocksource.c. 929 * This value is per-CPU, but it is equal for all 930 * active ones. 931 */ 932 #ifdef __LP64__ 933 to_sbt = DPCPU_GET(hardclocktime); 934 #else 935 spinlock_enter(); 936 to_sbt = DPCPU_GET(hardclocktime); 937 spinlock_exit(); 938 #endif 939 #endif 940 if ((flags & C_HARDCLOCK) == 0) 941 to_sbt += tick_sbt; 942 } else 943 to_sbt = sbinuptime(); 944 to_sbt += sbt; 945 pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp : 946 sbt >> C_PRELGET(flags)); 947 if (pr > precision) 948 precision = pr; 949 } 950 /* 951 * Don't allow migration of pre-allocated callouts lest they 952 * become unbalanced. 953 */ 954 if (c->c_flags & CALLOUT_LOCAL_ALLOC) 955 cpu = c->c_cpu; 956 direct = (c->c_flags & CALLOUT_DIRECT) != 0; 957 KASSERT(!direct || c->c_lock == NULL, 958 ("%s: direct callout %p has lock", __func__, c)); 959 cc = callout_lock(c); 960 if (cc->cc_exec_entity[direct].cc_curr == c) { 961 /* 962 * We're being asked to reschedule a callout which is 963 * currently in progress. If there is a lock then we 964 * can cancel the callout if it has not really started. 965 */ 966 if (c->c_lock != NULL && !cc->cc_exec_entity[direct].cc_cancel) 967 cancelled = cc->cc_exec_entity[direct].cc_cancel = true; 968 if (cc->cc_exec_entity[direct].cc_waiting) { 969 /* 970 * Someone has called callout_drain to kill this 971 * callout. Don't reschedule. 972 */ 973 CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 974 cancelled ? "cancelled" : "failed to cancel", 975 c, c->c_func, c->c_arg); 976 CC_UNLOCK(cc); 977 return (cancelled); 978 } 979 } 980 if (c->c_flags & CALLOUT_PENDING) { 981 if ((c->c_flags & CALLOUT_PROCESSED) == 0) { 982 if (cc->cc_exec_next_dir == c) 983 cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le); 984 LIST_REMOVE(c, c_links.le); 985 } else 986 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 987 cancelled = 1; 988 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 989 } 990 991 #ifdef SMP 992 /* 993 * If the callout must migrate try to perform it immediately. 994 * If the callout is currently running, just defer the migration 995 * to a more appropriate moment. 996 */ 997 if (c->c_cpu != cpu) { 998 if (cc->cc_exec_entity[direct].cc_curr == c) { 999 cc->cc_exec_entity[direct].ce_migration_cpu = cpu; 1000 cc->cc_exec_entity[direct].ce_migration_time 1001 = to_sbt; 1002 cc->cc_exec_entity[direct].ce_migration_prec 1003 = precision; 1004 cc->cc_exec_entity[direct].ce_migration_func = ftn; 1005 cc->cc_exec_entity[direct].ce_migration_arg = arg; 1006 c->c_flags |= CALLOUT_DFRMIGRATION; 1007 CTR6(KTR_CALLOUT, 1008 "migration of %p func %p arg %p in %d.%08x to %u deferred", 1009 c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 1010 (u_int)(to_sbt & 0xffffffff), cpu); 1011 CC_UNLOCK(cc); 1012 return (cancelled); 1013 } 1014 cc = callout_cpu_switch(c, cc, cpu); 1015 } 1016 #endif 1017 1018 callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags); 1019 CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x", 1020 cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 1021 (u_int)(to_sbt & 0xffffffff)); 1022 CC_UNLOCK(cc); 1023 1024 return (cancelled); 1025 } 1026 1027 /* 1028 * Common idioms that can be optimized in the future. 1029 */ 1030 int 1031 callout_schedule_on(struct callout *c, int to_ticks, int cpu) 1032 { 1033 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 1034 } 1035 1036 int 1037 callout_schedule(struct callout *c, int to_ticks) 1038 { 1039 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 1040 } 1041 1042 int 1043 _callout_stop_safe(c, safe) 1044 struct callout *c; 1045 int safe; 1046 { 1047 struct callout_cpu *cc, *old_cc; 1048 struct lock_class *class; 1049 int direct, sq_locked, use_lock; 1050 1051 /* 1052 * Some old subsystems don't hold Giant while running a callout_stop(), 1053 * so just discard this check for the moment. 1054 */ 1055 if (!safe && c->c_lock != NULL) { 1056 if (c->c_lock == &Giant.lock_object) 1057 use_lock = mtx_owned(&Giant); 1058 else { 1059 use_lock = 1; 1060 class = LOCK_CLASS(c->c_lock); 1061 class->lc_assert(c->c_lock, LA_XLOCKED); 1062 } 1063 } else 1064 use_lock = 0; 1065 direct = (c->c_flags & CALLOUT_DIRECT) != 0; 1066 sq_locked = 0; 1067 old_cc = NULL; 1068 again: 1069 cc = callout_lock(c); 1070 1071 /* 1072 * If the callout was migrating while the callout cpu lock was 1073 * dropped, just drop the sleepqueue lock and check the states 1074 * again. 1075 */ 1076 if (sq_locked != 0 && cc != old_cc) { 1077 #ifdef SMP 1078 CC_UNLOCK(cc); 1079 sleepq_release(&old_cc->cc_exec_entity[direct].cc_waiting); 1080 sq_locked = 0; 1081 old_cc = NULL; 1082 goto again; 1083 #else 1084 panic("migration should not happen"); 1085 #endif 1086 } 1087 1088 /* 1089 * If the callout isn't pending, it's not on the queue, so 1090 * don't attempt to remove it from the queue. We can try to 1091 * stop it by other means however. 1092 */ 1093 if (!(c->c_flags & CALLOUT_PENDING)) { 1094 c->c_flags &= ~CALLOUT_ACTIVE; 1095 1096 /* 1097 * If it wasn't on the queue and it isn't the current 1098 * callout, then we can't stop it, so just bail. 1099 */ 1100 if (cc->cc_exec_entity[direct].cc_curr != c) { 1101 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1102 c, c->c_func, c->c_arg); 1103 CC_UNLOCK(cc); 1104 if (sq_locked) 1105 sleepq_release( 1106 &cc->cc_exec_entity[direct].cc_waiting); 1107 return (0); 1108 } 1109 1110 if (safe) { 1111 /* 1112 * The current callout is running (or just 1113 * about to run) and blocking is allowed, so 1114 * just wait for the current invocation to 1115 * finish. 1116 */ 1117 while (cc->cc_exec_entity[direct].cc_curr == c) { 1118 /* 1119 * Use direct calls to sleepqueue interface 1120 * instead of cv/msleep in order to avoid 1121 * a LOR between cc_lock and sleepqueue 1122 * chain spinlocks. This piece of code 1123 * emulates a msleep_spin() call actually. 1124 * 1125 * If we already have the sleepqueue chain 1126 * locked, then we can safely block. If we 1127 * don't already have it locked, however, 1128 * we have to drop the cc_lock to lock 1129 * it. This opens several races, so we 1130 * restart at the beginning once we have 1131 * both locks. If nothing has changed, then 1132 * we will end up back here with sq_locked 1133 * set. 1134 */ 1135 if (!sq_locked) { 1136 CC_UNLOCK(cc); 1137 sleepq_lock( 1138 &cc->cc_exec_entity[direct].cc_waiting); 1139 sq_locked = 1; 1140 old_cc = cc; 1141 goto again; 1142 } 1143 1144 /* 1145 * Migration could be cancelled here, but 1146 * as long as it is still not sure when it 1147 * will be packed up, just let softclock() 1148 * take care of it. 1149 */ 1150 cc->cc_exec_entity[direct].cc_waiting = true; 1151 DROP_GIANT(); 1152 CC_UNLOCK(cc); 1153 sleepq_add( 1154 &cc->cc_exec_entity[direct].cc_waiting, 1155 &cc->cc_lock.lock_object, "codrain", 1156 SLEEPQ_SLEEP, 0); 1157 sleepq_wait( 1158 &cc->cc_exec_entity[direct].cc_waiting, 1159 0); 1160 sq_locked = 0; 1161 old_cc = NULL; 1162 1163 /* Reacquire locks previously released. */ 1164 PICKUP_GIANT(); 1165 CC_LOCK(cc); 1166 } 1167 } else if (use_lock && 1168 !cc->cc_exec_entity[direct].cc_cancel) { 1169 /* 1170 * The current callout is waiting for its 1171 * lock which we hold. Cancel the callout 1172 * and return. After our caller drops the 1173 * lock, the callout will be skipped in 1174 * softclock(). 1175 */ 1176 cc->cc_exec_entity[direct].cc_cancel = true; 1177 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1178 c, c->c_func, c->c_arg); 1179 KASSERT(!cc_cce_migrating(cc, direct), 1180 ("callout wrongly scheduled for migration")); 1181 CC_UNLOCK(cc); 1182 KASSERT(!sq_locked, ("sleepqueue chain locked")); 1183 return (1); 1184 } else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) { 1185 c->c_flags &= ~CALLOUT_DFRMIGRATION; 1186 CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", 1187 c, c->c_func, c->c_arg); 1188 CC_UNLOCK(cc); 1189 return (1); 1190 } 1191 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1192 c, c->c_func, c->c_arg); 1193 CC_UNLOCK(cc); 1194 KASSERT(!sq_locked, ("sleepqueue chain still locked")); 1195 return (0); 1196 } 1197 if (sq_locked) 1198 sleepq_release(&cc->cc_exec_entity[direct].cc_waiting); 1199 1200 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 1201 1202 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1203 c, c->c_func, c->c_arg); 1204 if ((c->c_flags & CALLOUT_PROCESSED) == 0) { 1205 if (cc->cc_exec_next_dir == c) 1206 cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le); 1207 LIST_REMOVE(c, c_links.le); 1208 } else 1209 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 1210 callout_cc_del(c, cc); 1211 1212 CC_UNLOCK(cc); 1213 return (1); 1214 } 1215 1216 void 1217 callout_init(c, mpsafe) 1218 struct callout *c; 1219 int mpsafe; 1220 { 1221 bzero(c, sizeof *c); 1222 if (mpsafe) { 1223 c->c_lock = NULL; 1224 c->c_flags = CALLOUT_RETURNUNLOCKED; 1225 } else { 1226 c->c_lock = &Giant.lock_object; 1227 c->c_flags = 0; 1228 } 1229 c->c_cpu = timeout_cpu; 1230 } 1231 1232 void 1233 _callout_init_lock(c, lock, flags) 1234 struct callout *c; 1235 struct lock_object *lock; 1236 int flags; 1237 { 1238 bzero(c, sizeof *c); 1239 c->c_lock = lock; 1240 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 1241 ("callout_init_lock: bad flags %d", flags)); 1242 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 1243 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 1244 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 1245 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 1246 __func__)); 1247 c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 1248 c->c_cpu = timeout_cpu; 1249 } 1250 1251 #ifdef APM_FIXUP_CALLTODO 1252 /* 1253 * Adjust the kernel calltodo timeout list. This routine is used after 1254 * an APM resume to recalculate the calltodo timer list values with the 1255 * number of hz's we have been sleeping. The next hardclock() will detect 1256 * that there are fired timers and run softclock() to execute them. 1257 * 1258 * Please note, I have not done an exhaustive analysis of what code this 1259 * might break. I am motivated to have my select()'s and alarm()'s that 1260 * have expired during suspend firing upon resume so that the applications 1261 * which set the timer can do the maintanence the timer was for as close 1262 * as possible to the originally intended time. Testing this code for a 1263 * week showed that resuming from a suspend resulted in 22 to 25 timers 1264 * firing, which seemed independant on whether the suspend was 2 hours or 1265 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 1266 */ 1267 void 1268 adjust_timeout_calltodo(time_change) 1269 struct timeval *time_change; 1270 { 1271 register struct callout *p; 1272 unsigned long delta_ticks; 1273 1274 /* 1275 * How many ticks were we asleep? 1276 * (stolen from tvtohz()). 1277 */ 1278 1279 /* Don't do anything */ 1280 if (time_change->tv_sec < 0) 1281 return; 1282 else if (time_change->tv_sec <= LONG_MAX / 1000000) 1283 delta_ticks = (time_change->tv_sec * 1000000 + 1284 time_change->tv_usec + (tick - 1)) / tick + 1; 1285 else if (time_change->tv_sec <= LONG_MAX / hz) 1286 delta_ticks = time_change->tv_sec * hz + 1287 (time_change->tv_usec + (tick - 1)) / tick + 1; 1288 else 1289 delta_ticks = LONG_MAX; 1290 1291 if (delta_ticks > INT_MAX) 1292 delta_ticks = INT_MAX; 1293 1294 /* 1295 * Now rip through the timer calltodo list looking for timers 1296 * to expire. 1297 */ 1298 1299 /* don't collide with softclock() */ 1300 CC_LOCK(cc); 1301 for (p = calltodo.c_next; p != NULL; p = p->c_next) { 1302 p->c_time -= delta_ticks; 1303 1304 /* Break if the timer had more time on it than delta_ticks */ 1305 if (p->c_time > 0) 1306 break; 1307 1308 /* take back the ticks the timer didn't use (p->c_time <= 0) */ 1309 delta_ticks = -p->c_time; 1310 } 1311 CC_UNLOCK(cc); 1312 1313 return; 1314 } 1315 #endif /* APM_FIXUP_CALLTODO */ 1316 1317 static int 1318 flssbt(sbintime_t sbt) 1319 { 1320 1321 sbt += (uint64_t)sbt >> 1; 1322 if (sizeof(long) >= sizeof(sbintime_t)) 1323 return (flsl(sbt)); 1324 if (sbt >= SBT_1S) 1325 return (flsl(((uint64_t)sbt) >> 32) + 32); 1326 return (flsl(sbt)); 1327 } 1328 1329 /* 1330 * Dump immediate statistic snapshot of the scheduled callouts. 1331 */ 1332 static int 1333 sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS) 1334 { 1335 struct callout *tmp; 1336 struct callout_cpu *cc; 1337 struct callout_list *sc; 1338 sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t; 1339 int ct[64], cpr[64], ccpbk[32]; 1340 int error, val, i, count, tcum, pcum, maxc, c, medc; 1341 #ifdef SMP 1342 int cpu; 1343 #endif 1344 1345 val = 0; 1346 error = sysctl_handle_int(oidp, &val, 0, req); 1347 if (error != 0 || req->newptr == NULL) 1348 return (error); 1349 count = maxc = 0; 1350 st = spr = maxt = maxpr = 0; 1351 bzero(ccpbk, sizeof(ccpbk)); 1352 bzero(ct, sizeof(ct)); 1353 bzero(cpr, sizeof(cpr)); 1354 now = sbinuptime(); 1355 #ifdef SMP 1356 CPU_FOREACH(cpu) { 1357 cc = CC_CPU(cpu); 1358 #else 1359 cc = CC_CPU(timeout_cpu); 1360 #endif 1361 CC_LOCK(cc); 1362 for (i = 0; i < callwheelsize; i++) { 1363 sc = &cc->cc_callwheel[i]; 1364 c = 0; 1365 LIST_FOREACH(tmp, sc, c_links.le) { 1366 c++; 1367 t = tmp->c_time - now; 1368 if (t < 0) 1369 t = 0; 1370 st += t / SBT_1US; 1371 spr += tmp->c_precision / SBT_1US; 1372 if (t > maxt) 1373 maxt = t; 1374 if (tmp->c_precision > maxpr) 1375 maxpr = tmp->c_precision; 1376 ct[flssbt(t)]++; 1377 cpr[flssbt(tmp->c_precision)]++; 1378 } 1379 if (c > maxc) 1380 maxc = c; 1381 ccpbk[fls(c + c / 2)]++; 1382 count += c; 1383 } 1384 CC_UNLOCK(cc); 1385 #ifdef SMP 1386 } 1387 #endif 1388 1389 for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++) 1390 tcum += ct[i]; 1391 medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 1392 for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++) 1393 pcum += cpr[i]; 1394 medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 1395 for (i = 0, c = 0; i < 32 && c < count / 2; i++) 1396 c += ccpbk[i]; 1397 medc = (i >= 2) ? (1 << (i - 2)) : 0; 1398 1399 printf("Scheduled callouts statistic snapshot:\n"); 1400 printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n", 1401 count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT); 1402 printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n", 1403 medc, 1404 count / callwheelsize / mp_ncpus, 1405 (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000, 1406 maxc); 1407 printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 1408 medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32, 1409 (st / count) / 1000000, (st / count) % 1000000, 1410 maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32); 1411 printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 1412 medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32, 1413 (spr / count) / 1000000, (spr / count) % 1000000, 1414 maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32); 1415 printf(" Distribution: \tbuckets\t time\t tcum\t" 1416 " prec\t pcum\n"); 1417 for (i = 0, tcum = pcum = 0; i < 64; i++) { 1418 if (ct[i] == 0 && cpr[i] == 0) 1419 continue; 1420 t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0; 1421 tcum += ct[i]; 1422 pcum += cpr[i]; 1423 printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n", 1424 t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32, 1425 i - 1 - (32 - CC_HASH_SHIFT), 1426 ct[i], tcum, cpr[i], pcum); 1427 } 1428 return (error); 1429 } 1430 SYSCTL_PROC(_kern, OID_AUTO, callout_stat, 1431 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1432 0, 0, sysctl_kern_callout_stat, "I", 1433 "Dump immediate statistic snapshot of the scheduled callouts"); 1434