1 /*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/callout.h> 43 #include <sys/condvar.h> 44 #include <sys/kernel.h> 45 #include <sys/ktr.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/proc.h> 49 #include <sys/sleepqueue.h> 50 #include <sys/sysctl.h> 51 52 static int avg_depth; 53 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 54 "Average number of items examined per softclock call. Units = 1/1000"); 55 static int avg_gcalls; 56 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 57 "Average number of Giant callouts made per softclock call. Units = 1/1000"); 58 static int avg_lockcalls; 59 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 60 "Average number of lock callouts made per softclock call. Units = 1/1000"); 61 static int avg_mpcalls; 62 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 63 "Average number of MP callouts made per softclock call. Units = 1/1000"); 64 /* 65 * TODO: 66 * allocate more timeout table slots when table overflows. 67 */ 68 69 /* Exported to machdep.c and/or kern_clock.c. */ 70 struct callout *callout; 71 struct callout_list callfree; 72 int callwheelsize, callwheelbits, callwheelmask; 73 struct callout_tailq *callwheel; 74 int softticks; /* Like ticks, but for softclock(). */ 75 struct mtx callout_lock; 76 77 static struct callout *nextsoftcheck; /* Next callout to be checked. */ 78 79 /** 80 * Locked by callout_lock: 81 * curr_callout - If a callout is in progress, it is curr_callout. 82 * If curr_callout is non-NULL, threads waiting in 83 * callout_drain() will be woken up as soon as the 84 * relevant callout completes. 85 * curr_cancelled - Changing to 1 with both callout_lock and c_lock held 86 * guarantees that the current callout will not run. 87 * The softclock() function sets this to 0 before it 88 * drops callout_lock to acquire c_lock, and it calls 89 * the handler only if curr_cancelled is still 0 after 90 * c_lock is successfully acquired. 91 * callout_wait - If a thread is waiting in callout_drain(), then 92 * callout_wait is nonzero. Set only when 93 * curr_callout is non-NULL. 94 */ 95 static struct callout *curr_callout; 96 static int curr_cancelled; 97 static int callout_wait; 98 99 /* 100 * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization 101 * 102 * This code is called very early in the kernel initialization sequence, 103 * and may be called more then once. 104 */ 105 caddr_t 106 kern_timeout_callwheel_alloc(caddr_t v) 107 { 108 /* 109 * Calculate callout wheel size 110 */ 111 for (callwheelsize = 1, callwheelbits = 0; 112 callwheelsize < ncallout; 113 callwheelsize <<= 1, ++callwheelbits) 114 ; 115 callwheelmask = callwheelsize - 1; 116 117 callout = (struct callout *)v; 118 v = (caddr_t)(callout + ncallout); 119 callwheel = (struct callout_tailq *)v; 120 v = (caddr_t)(callwheel + callwheelsize); 121 return(v); 122 } 123 124 /* 125 * kern_timeout_callwheel_init() - initialize previously reserved callwheel 126 * space. 127 * 128 * This code is called just once, after the space reserved for the 129 * callout wheel has been finalized. 130 */ 131 void 132 kern_timeout_callwheel_init(void) 133 { 134 int i; 135 136 SLIST_INIT(&callfree); 137 for (i = 0; i < ncallout; i++) { 138 callout_init(&callout[i], 0); 139 callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 140 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 141 } 142 for (i = 0; i < callwheelsize; i++) { 143 TAILQ_INIT(&callwheel[i]); 144 } 145 mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 146 } 147 148 /* 149 * The callout mechanism is based on the work of Adam M. Costello and 150 * George Varghese, published in a technical report entitled "Redesigning 151 * the BSD Callout and Timer Facilities" and modified slightly for inclusion 152 * in FreeBSD by Justin T. Gibbs. The original work on the data structures 153 * used in this implementation was published by G. Varghese and T. Lauck in 154 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 155 * the Efficient Implementation of a Timer Facility" in the Proceedings of 156 * the 11th ACM Annual Symposium on Operating Systems Principles, 157 * Austin, Texas Nov 1987. 158 */ 159 160 /* 161 * Software (low priority) clock interrupt. 162 * Run periodic events from timeout queue. 163 */ 164 void 165 softclock(void *dummy) 166 { 167 struct callout *c; 168 struct callout_tailq *bucket; 169 int curticks; 170 int steps; /* #steps since we last allowed interrupts */ 171 int depth; 172 int mpcalls; 173 int lockcalls; 174 int gcalls; 175 #ifdef DIAGNOSTIC 176 struct bintime bt1, bt2; 177 struct timespec ts2; 178 static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ 179 static timeout_t *lastfunc; 180 #endif 181 182 #ifndef MAX_SOFTCLOCK_STEPS 183 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ 184 #endif /* MAX_SOFTCLOCK_STEPS */ 185 186 mpcalls = 0; 187 lockcalls = 0; 188 gcalls = 0; 189 depth = 0; 190 steps = 0; 191 mtx_lock_spin(&callout_lock); 192 while (softticks != ticks) { 193 softticks++; 194 /* 195 * softticks may be modified by hard clock, so cache 196 * it while we work on a given bucket. 197 */ 198 curticks = softticks; 199 bucket = &callwheel[curticks & callwheelmask]; 200 c = TAILQ_FIRST(bucket); 201 while (c) { 202 depth++; 203 if (c->c_time != curticks) { 204 c = TAILQ_NEXT(c, c_links.tqe); 205 ++steps; 206 if (steps >= MAX_SOFTCLOCK_STEPS) { 207 nextsoftcheck = c; 208 /* Give interrupts a chance. */ 209 mtx_unlock_spin(&callout_lock); 210 ; /* nothing */ 211 mtx_lock_spin(&callout_lock); 212 c = nextsoftcheck; 213 steps = 0; 214 } 215 } else { 216 void (*c_func)(void *); 217 void *c_arg; 218 struct lock_class *class; 219 struct lock_object *c_lock; 220 int c_flags, sharedlock; 221 222 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 223 TAILQ_REMOVE(bucket, c, c_links.tqe); 224 class = (c->c_lock != NULL) ? 225 LOCK_CLASS(c->c_lock) : NULL; 226 sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 227 0 : 1; 228 c_lock = c->c_lock; 229 c_func = c->c_func; 230 c_arg = c->c_arg; 231 c_flags = c->c_flags; 232 if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 233 c->c_flags = CALLOUT_LOCAL_ALLOC; 234 curr_callout = c; 235 } else { 236 c->c_flags = 237 (c->c_flags & ~CALLOUT_PENDING); 238 curr_callout = c; 239 } 240 curr_cancelled = 0; 241 mtx_unlock_spin(&callout_lock); 242 if (c_lock != NULL) { 243 class->lc_lock(c_lock, sharedlock); 244 /* 245 * The callout may have been cancelled 246 * while we switched locks. 247 */ 248 if (curr_cancelled) { 249 class->lc_unlock(c_lock); 250 goto skip; 251 } 252 /* The callout cannot be stopped now. */ 253 curr_cancelled = 1; 254 255 if (c_lock == &Giant.lock_object) { 256 gcalls++; 257 CTR3(KTR_CALLOUT, 258 "callout %p func %p arg %p", 259 c, c_func, c_arg); 260 } else { 261 lockcalls++; 262 CTR3(KTR_CALLOUT, "callout lock" 263 " %p func %p arg %p", 264 c, c_func, c_arg); 265 } 266 } else { 267 mpcalls++; 268 CTR3(KTR_CALLOUT, 269 "callout mpsafe %p func %p arg %p", 270 c, c_func, c_arg); 271 } 272 #ifdef DIAGNOSTIC 273 binuptime(&bt1); 274 #endif 275 THREAD_NO_SLEEPING(); 276 c_func(c_arg); 277 THREAD_SLEEPING_OK(); 278 #ifdef DIAGNOSTIC 279 binuptime(&bt2); 280 bintime_sub(&bt2, &bt1); 281 if (bt2.frac > maxdt) { 282 if (lastfunc != c_func || 283 bt2.frac > maxdt * 2) { 284 bintime2timespec(&bt2, &ts2); 285 printf( 286 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 287 c_func, c_arg, 288 (intmax_t)ts2.tv_sec, 289 ts2.tv_nsec); 290 } 291 maxdt = bt2.frac; 292 lastfunc = c_func; 293 } 294 #endif 295 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 296 class->lc_unlock(c_lock); 297 skip: 298 mtx_lock_spin(&callout_lock); 299 /* 300 * If the current callout is locally 301 * allocated (from timeout(9)) 302 * then put it on the freelist. 303 * 304 * Note: we need to check the cached 305 * copy of c_flags because if it was not 306 * local, then it's not safe to deref the 307 * callout pointer. 308 */ 309 if (c_flags & CALLOUT_LOCAL_ALLOC) { 310 KASSERT(c->c_flags == 311 CALLOUT_LOCAL_ALLOC, 312 ("corrupted callout")); 313 c->c_func = NULL; 314 SLIST_INSERT_HEAD(&callfree, c, 315 c_links.sle); 316 } 317 curr_callout = NULL; 318 if (callout_wait) { 319 /* 320 * There is someone waiting 321 * for the callout to complete. 322 */ 323 callout_wait = 0; 324 mtx_unlock_spin(&callout_lock); 325 wakeup(&callout_wait); 326 mtx_lock_spin(&callout_lock); 327 } 328 steps = 0; 329 c = nextsoftcheck; 330 } 331 } 332 } 333 avg_depth += (depth * 1000 - avg_depth) >> 8; 334 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 335 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 336 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 337 nextsoftcheck = NULL; 338 mtx_unlock_spin(&callout_lock); 339 } 340 341 /* 342 * timeout -- 343 * Execute a function after a specified length of time. 344 * 345 * untimeout -- 346 * Cancel previous timeout function call. 347 * 348 * callout_handle_init -- 349 * Initialize a handle so that using it with untimeout is benign. 350 * 351 * See AT&T BCI Driver Reference Manual for specification. This 352 * implementation differs from that one in that although an 353 * identification value is returned from timeout, the original 354 * arguments to timeout as well as the identifier are used to 355 * identify entries for untimeout. 356 */ 357 struct callout_handle 358 timeout(ftn, arg, to_ticks) 359 timeout_t *ftn; 360 void *arg; 361 int to_ticks; 362 { 363 struct callout *new; 364 struct callout_handle handle; 365 366 mtx_lock_spin(&callout_lock); 367 368 /* Fill in the next free callout structure. */ 369 new = SLIST_FIRST(&callfree); 370 if (new == NULL) 371 /* XXX Attempt to malloc first */ 372 panic("timeout table full"); 373 SLIST_REMOVE_HEAD(&callfree, c_links.sle); 374 375 callout_reset(new, to_ticks, ftn, arg); 376 377 handle.callout = new; 378 mtx_unlock_spin(&callout_lock); 379 return (handle); 380 } 381 382 void 383 untimeout(ftn, arg, handle) 384 timeout_t *ftn; 385 void *arg; 386 struct callout_handle handle; 387 { 388 389 /* 390 * Check for a handle that was initialized 391 * by callout_handle_init, but never used 392 * for a real timeout. 393 */ 394 if (handle.callout == NULL) 395 return; 396 397 mtx_lock_spin(&callout_lock); 398 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 399 callout_stop(handle.callout); 400 mtx_unlock_spin(&callout_lock); 401 } 402 403 void 404 callout_handle_init(struct callout_handle *handle) 405 { 406 handle->callout = NULL; 407 } 408 409 /* 410 * New interface; clients allocate their own callout structures. 411 * 412 * callout_reset() - establish or change a timeout 413 * callout_stop() - disestablish a timeout 414 * callout_init() - initialize a callout structure so that it can 415 * safely be passed to callout_reset() and callout_stop() 416 * 417 * <sys/callout.h> defines three convenience macros: 418 * 419 * callout_active() - returns truth if callout has not been stopped, 420 * drained, or deactivated since the last time the callout was 421 * reset. 422 * callout_pending() - returns truth if callout is still waiting for timeout 423 * callout_deactivate() - marks the callout as having been serviced 424 */ 425 int 426 callout_reset(c, to_ticks, ftn, arg) 427 struct callout *c; 428 int to_ticks; 429 void (*ftn)(void *); 430 void *arg; 431 { 432 int cancelled = 0; 433 434 mtx_lock_spin(&callout_lock); 435 if (c == curr_callout) { 436 /* 437 * We're being asked to reschedule a callout which is 438 * currently in progress. If there is a lock then we 439 * can cancel the callout if it has not really started. 440 */ 441 if (c->c_lock != NULL && !curr_cancelled) 442 cancelled = curr_cancelled = 1; 443 if (callout_wait) { 444 /* 445 * Someone has called callout_drain to kill this 446 * callout. Don't reschedule. 447 */ 448 CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 449 cancelled ? "cancelled" : "failed to cancel", 450 c, c->c_func, c->c_arg); 451 mtx_unlock_spin(&callout_lock); 452 return (cancelled); 453 } 454 } 455 if (c->c_flags & CALLOUT_PENDING) { 456 if (nextsoftcheck == c) { 457 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 458 } 459 TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, 460 c_links.tqe); 461 462 cancelled = 1; 463 464 /* 465 * Part of the normal "stop a pending callout" process 466 * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING 467 * flags. We're not going to bother doing that here, 468 * because we're going to be setting those flags ten lines 469 * after this point, and we're holding callout_lock 470 * between now and then. 471 */ 472 } 473 474 /* 475 * We could unlock callout_lock here and lock it again before the 476 * TAILQ_INSERT_TAIL, but there's no point since doing this setup 477 * doesn't take much time. 478 */ 479 if (to_ticks <= 0) 480 to_ticks = 1; 481 482 c->c_arg = arg; 483 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 484 c->c_func = ftn; 485 c->c_time = ticks + to_ticks; 486 TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask], 487 c, c_links.tqe); 488 CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", 489 cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); 490 mtx_unlock_spin(&callout_lock); 491 492 return (cancelled); 493 } 494 495 int 496 _callout_stop_safe(c, safe) 497 struct callout *c; 498 int safe; 499 { 500 struct lock_class *class; 501 int use_lock, sq_locked; 502 503 /* 504 * Some old subsystems don't hold Giant while running a callout_stop(), 505 * so just discard this check for the moment. 506 */ 507 if (!safe && c->c_lock != NULL) { 508 if (c->c_lock == &Giant.lock_object) 509 use_lock = mtx_owned(&Giant); 510 else { 511 use_lock = 1; 512 class = LOCK_CLASS(c->c_lock); 513 class->lc_assert(c->c_lock, LA_XLOCKED); 514 } 515 } else 516 use_lock = 0; 517 518 sq_locked = 0; 519 again: 520 mtx_lock_spin(&callout_lock); 521 /* 522 * If the callout isn't pending, it's not on the queue, so 523 * don't attempt to remove it from the queue. We can try to 524 * stop it by other means however. 525 */ 526 if (!(c->c_flags & CALLOUT_PENDING)) { 527 c->c_flags &= ~CALLOUT_ACTIVE; 528 529 /* 530 * If it wasn't on the queue and it isn't the current 531 * callout, then we can't stop it, so just bail. 532 */ 533 if (c != curr_callout) { 534 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 535 c, c->c_func, c->c_arg); 536 mtx_unlock_spin(&callout_lock); 537 if (sq_locked) 538 sleepq_release(&callout_wait); 539 return (0); 540 } 541 542 if (safe) { 543 /* 544 * The current callout is running (or just 545 * about to run) and blocking is allowed, so 546 * just wait for the current invocation to 547 * finish. 548 */ 549 while (c == curr_callout) { 550 551 /* 552 * Use direct calls to sleepqueue interface 553 * instead of cv/msleep in order to avoid 554 * a LOR between callout_lock and sleepqueue 555 * chain spinlocks. This piece of code 556 * emulates a msleep_spin() call actually. 557 * 558 * If we already have the sleepqueue chain 559 * locked, then we can safely block. If we 560 * don't already have it locked, however, 561 * we have to drop the callout_lock to lock 562 * it. This opens several races, so we 563 * restart at the beginning once we have 564 * both locks. If nothing has changed, then 565 * we will end up back here with sq_locked 566 * set. 567 */ 568 if (!sq_locked) { 569 mtx_unlock_spin(&callout_lock); 570 sleepq_lock(&callout_wait); 571 sq_locked = 1; 572 goto again; 573 } 574 575 callout_wait = 1; 576 DROP_GIANT(); 577 mtx_unlock_spin(&callout_lock); 578 sleepq_add(&callout_wait, 579 &callout_lock.lock_object, "codrain", 580 SLEEPQ_SLEEP, 0); 581 sleepq_wait(&callout_wait, 0); 582 sq_locked = 0; 583 584 /* Reacquire locks previously released. */ 585 PICKUP_GIANT(); 586 mtx_lock_spin(&callout_lock); 587 } 588 } else if (use_lock && !curr_cancelled) { 589 /* 590 * The current callout is waiting for its 591 * lock which we hold. Cancel the callout 592 * and return. After our caller drops the 593 * lock, the callout will be skipped in 594 * softclock(). 595 */ 596 curr_cancelled = 1; 597 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 598 c, c->c_func, c->c_arg); 599 mtx_unlock_spin(&callout_lock); 600 KASSERT(!sq_locked, ("sleepqueue chain locked")); 601 return (1); 602 } 603 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 604 c, c->c_func, c->c_arg); 605 mtx_unlock_spin(&callout_lock); 606 KASSERT(!sq_locked, ("sleepqueue chain still locked")); 607 return (0); 608 } 609 if (sq_locked) 610 sleepq_release(&callout_wait); 611 612 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 613 614 if (nextsoftcheck == c) { 615 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 616 } 617 TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe); 618 619 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 620 c, c->c_func, c->c_arg); 621 622 if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 623 c->c_func = NULL; 624 SLIST_INSERT_HEAD(&callfree, c, c_links.sle); 625 } 626 mtx_unlock_spin(&callout_lock); 627 return (1); 628 } 629 630 void 631 callout_init(c, mpsafe) 632 struct callout *c; 633 int mpsafe; 634 { 635 bzero(c, sizeof *c); 636 if (mpsafe) { 637 c->c_lock = NULL; 638 c->c_flags = CALLOUT_RETURNUNLOCKED; 639 } else { 640 c->c_lock = &Giant.lock_object; 641 c->c_flags = 0; 642 } 643 } 644 645 void 646 _callout_init_lock(c, lock, flags) 647 struct callout *c; 648 struct lock_object *lock; 649 int flags; 650 { 651 bzero(c, sizeof *c); 652 c->c_lock = lock; 653 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 654 ("callout_init_lock: bad flags %d", flags)); 655 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 656 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 657 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 658 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 659 __func__)); 660 c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 661 } 662 663 #ifdef APM_FIXUP_CALLTODO 664 /* 665 * Adjust the kernel calltodo timeout list. This routine is used after 666 * an APM resume to recalculate the calltodo timer list values with the 667 * number of hz's we have been sleeping. The next hardclock() will detect 668 * that there are fired timers and run softclock() to execute them. 669 * 670 * Please note, I have not done an exhaustive analysis of what code this 671 * might break. I am motivated to have my select()'s and alarm()'s that 672 * have expired during suspend firing upon resume so that the applications 673 * which set the timer can do the maintanence the timer was for as close 674 * as possible to the originally intended time. Testing this code for a 675 * week showed that resuming from a suspend resulted in 22 to 25 timers 676 * firing, which seemed independant on whether the suspend was 2 hours or 677 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 678 */ 679 void 680 adjust_timeout_calltodo(time_change) 681 struct timeval *time_change; 682 { 683 register struct callout *p; 684 unsigned long delta_ticks; 685 686 /* 687 * How many ticks were we asleep? 688 * (stolen from tvtohz()). 689 */ 690 691 /* Don't do anything */ 692 if (time_change->tv_sec < 0) 693 return; 694 else if (time_change->tv_sec <= LONG_MAX / 1000000) 695 delta_ticks = (time_change->tv_sec * 1000000 + 696 time_change->tv_usec + (tick - 1)) / tick + 1; 697 else if (time_change->tv_sec <= LONG_MAX / hz) 698 delta_ticks = time_change->tv_sec * hz + 699 (time_change->tv_usec + (tick - 1)) / tick + 1; 700 else 701 delta_ticks = LONG_MAX; 702 703 if (delta_ticks > INT_MAX) 704 delta_ticks = INT_MAX; 705 706 /* 707 * Now rip through the timer calltodo list looking for timers 708 * to expire. 709 */ 710 711 /* don't collide with softclock() */ 712 mtx_lock_spin(&callout_lock); 713 for (p = calltodo.c_next; p != NULL; p = p->c_next) { 714 p->c_time -= delta_ticks; 715 716 /* Break if the timer had more time on it than delta_ticks */ 717 if (p->c_time > 0) 718 break; 719 720 /* take back the ticks the timer didn't use (p->c_time <= 0) */ 721 delta_ticks = -p->c_time; 722 } 723 mtx_unlock_spin(&callout_lock); 724 725 return; 726 } 727 #endif /* APM_FIXUP_CALLTODO */ 728