1 /*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_kdtrace.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/bus.h> 45 #include <sys/callout.h> 46 #include <sys/condvar.h> 47 #include <sys/interrupt.h> 48 #include <sys/kernel.h> 49 #include <sys/ktr.h> 50 #include <sys/lock.h> 51 #include <sys/malloc.h> 52 #include <sys/mutex.h> 53 #include <sys/proc.h> 54 #include <sys/sdt.h> 55 #include <sys/sleepqueue.h> 56 #include <sys/sysctl.h> 57 #include <sys/smp.h> 58 59 SDT_PROVIDER_DEFINE(callout_execute); 60 SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start); 61 SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0, 62 "struct callout *"); 63 SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end); 64 SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0, 65 "struct callout *"); 66 67 static int avg_depth; 68 SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 69 "Average number of items examined per softclock call. Units = 1/1000"); 70 static int avg_gcalls; 71 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 72 "Average number of Giant callouts made per softclock call. Units = 1/1000"); 73 static int avg_lockcalls; 74 SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 75 "Average number of lock callouts made per softclock call. Units = 1/1000"); 76 static int avg_mpcalls; 77 SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 78 "Average number of MP callouts made per softclock call. Units = 1/1000"); 79 /* 80 * TODO: 81 * allocate more timeout table slots when table overflows. 82 */ 83 int callwheelsize, callwheelbits, callwheelmask; 84 85 /* 86 * There is one struct callout_cpu per cpu, holding all relevant 87 * state for the callout processing thread on the individual CPU. 88 * In particular: 89 * cc_ticks is incremented once per tick in callout_cpu(). 90 * It tracks the global 'ticks' but in a way that the individual 91 * threads should not worry about races in the order in which 92 * hardclock() and hardclock_cpu() run on the various CPUs. 93 * cc_softclock is advanced in callout_cpu() to point to the 94 * first entry in cc_callwheel that may need handling. In turn, 95 * a softclock() is scheduled so it can serve the various entries i 96 * such that cc_softclock <= i <= cc_ticks . 97 * XXX maybe cc_softclock and cc_ticks should be volatile ? 98 * 99 * cc_ticks is also used in callout_reset_cpu() to determine 100 * when the callout should be served. 101 */ 102 struct callout_cpu { 103 struct mtx cc_lock; 104 struct callout *cc_callout; 105 struct callout_tailq *cc_callwheel; 106 struct callout_list cc_callfree; 107 struct callout *cc_next; 108 struct callout *cc_curr; 109 void *cc_cookie; 110 int cc_ticks; 111 int cc_softticks; 112 int cc_cancel; 113 int cc_waiting; 114 }; 115 116 #ifdef SMP 117 struct callout_cpu cc_cpu[MAXCPU]; 118 #define CC_CPU(cpu) (&cc_cpu[(cpu)]) 119 #define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 120 #else 121 struct callout_cpu cc_cpu; 122 #define CC_CPU(cpu) &cc_cpu 123 #define CC_SELF() &cc_cpu 124 #endif 125 #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 126 #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 127 128 static int timeout_cpu; 129 130 MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 131 132 /** 133 * Locked by cc_lock: 134 * cc_curr - If a callout is in progress, it is curr_callout. 135 * If curr_callout is non-NULL, threads waiting in 136 * callout_drain() will be woken up as soon as the 137 * relevant callout completes. 138 * cc_cancel - Changing to 1 with both callout_lock and c_lock held 139 * guarantees that the current callout will not run. 140 * The softclock() function sets this to 0 before it 141 * drops callout_lock to acquire c_lock, and it calls 142 * the handler only if curr_cancelled is still 0 after 143 * c_lock is successfully acquired. 144 * cc_waiting - If a thread is waiting in callout_drain(), then 145 * callout_wait is nonzero. Set only when 146 * curr_callout is non-NULL. 147 */ 148 149 /* 150 * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization 151 * 152 * This code is called very early in the kernel initialization sequence, 153 * and may be called more then once. 154 */ 155 caddr_t 156 kern_timeout_callwheel_alloc(caddr_t v) 157 { 158 struct callout_cpu *cc; 159 160 timeout_cpu = PCPU_GET(cpuid); 161 cc = CC_CPU(timeout_cpu); 162 /* 163 * Calculate callout wheel size 164 */ 165 for (callwheelsize = 1, callwheelbits = 0; 166 callwheelsize < ncallout; 167 callwheelsize <<= 1, ++callwheelbits) 168 ; 169 callwheelmask = callwheelsize - 1; 170 171 cc->cc_callout = (struct callout *)v; 172 v = (caddr_t)(cc->cc_callout + ncallout); 173 cc->cc_callwheel = (struct callout_tailq *)v; 174 v = (caddr_t)(cc->cc_callwheel + callwheelsize); 175 return(v); 176 } 177 178 static void 179 callout_cpu_init(struct callout_cpu *cc) 180 { 181 struct callout *c; 182 int i; 183 184 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 185 SLIST_INIT(&cc->cc_callfree); 186 for (i = 0; i < callwheelsize; i++) { 187 TAILQ_INIT(&cc->cc_callwheel[i]); 188 } 189 if (cc->cc_callout == NULL) 190 return; 191 for (i = 0; i < ncallout; i++) { 192 c = &cc->cc_callout[i]; 193 callout_init(c, 0); 194 c->c_flags = CALLOUT_LOCAL_ALLOC; 195 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 196 } 197 } 198 199 /* 200 * kern_timeout_callwheel_init() - initialize previously reserved callwheel 201 * space. 202 * 203 * This code is called just once, after the space reserved for the 204 * callout wheel has been finalized. 205 */ 206 void 207 kern_timeout_callwheel_init(void) 208 { 209 callout_cpu_init(CC_CPU(timeout_cpu)); 210 } 211 212 /* 213 * Start standard softclock thread. 214 */ 215 void *softclock_ih; 216 217 static void 218 start_softclock(void *dummy) 219 { 220 struct callout_cpu *cc; 221 #ifdef SMP 222 int cpu; 223 #endif 224 225 cc = CC_CPU(timeout_cpu); 226 if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK, 227 INTR_MPSAFE, &softclock_ih)) 228 panic("died while creating standard software ithreads"); 229 cc->cc_cookie = softclock_ih; 230 #ifdef SMP 231 for (cpu = 0; cpu <= mp_maxid; cpu++) { 232 if (cpu == timeout_cpu) 233 continue; 234 if (CPU_ABSENT(cpu)) 235 continue; 236 cc = CC_CPU(cpu); 237 if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, 238 INTR_MPSAFE, &cc->cc_cookie)) 239 panic("died while creating standard software ithreads"); 240 cc->cc_callout = NULL; /* Only cpu0 handles timeout(). */ 241 cc->cc_callwheel = malloc( 242 sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT, 243 M_WAITOK); 244 callout_cpu_init(cc); 245 } 246 #endif 247 } 248 249 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 250 251 void 252 callout_tick(void) 253 { 254 struct callout_cpu *cc; 255 int need_softclock; 256 int bucket; 257 258 /* 259 * Process callouts at a very low cpu priority, so we don't keep the 260 * relatively high clock interrupt priority any longer than necessary. 261 */ 262 need_softclock = 0; 263 cc = CC_SELF(); 264 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 265 cc->cc_ticks++; 266 for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) { 267 bucket = cc->cc_softticks & callwheelmask; 268 if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) { 269 need_softclock = 1; 270 break; 271 } 272 } 273 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 274 /* 275 * swi_sched acquires the thread lock, so we don't want to call it 276 * with cc_lock held; incorrect locking order. 277 */ 278 if (need_softclock) 279 swi_sched(cc->cc_cookie, 0); 280 } 281 282 static struct callout_cpu * 283 callout_lock(struct callout *c) 284 { 285 struct callout_cpu *cc; 286 int cpu; 287 288 for (;;) { 289 cpu = c->c_cpu; 290 cc = CC_CPU(cpu); 291 CC_LOCK(cc); 292 if (cpu == c->c_cpu) 293 break; 294 CC_UNLOCK(cc); 295 } 296 return (cc); 297 } 298 299 /* 300 * The callout mechanism is based on the work of Adam M. Costello and 301 * George Varghese, published in a technical report entitled "Redesigning 302 * the BSD Callout and Timer Facilities" and modified slightly for inclusion 303 * in FreeBSD by Justin T. Gibbs. The original work on the data structures 304 * used in this implementation was published by G. Varghese and T. Lauck in 305 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 306 * the Efficient Implementation of a Timer Facility" in the Proceedings of 307 * the 11th ACM Annual Symposium on Operating Systems Principles, 308 * Austin, Texas Nov 1987. 309 */ 310 311 /* 312 * Software (low priority) clock interrupt. 313 * Run periodic events from timeout queue. 314 */ 315 void 316 softclock(void *arg) 317 { 318 struct callout_cpu *cc; 319 struct callout *c; 320 struct callout_tailq *bucket; 321 int curticks; 322 int steps; /* #steps since we last allowed interrupts */ 323 int depth; 324 int mpcalls; 325 int lockcalls; 326 int gcalls; 327 #ifdef DIAGNOSTIC 328 struct bintime bt1, bt2; 329 struct timespec ts2; 330 static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ 331 static timeout_t *lastfunc; 332 #endif 333 334 #ifndef MAX_SOFTCLOCK_STEPS 335 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ 336 #endif /* MAX_SOFTCLOCK_STEPS */ 337 338 mpcalls = 0; 339 lockcalls = 0; 340 gcalls = 0; 341 depth = 0; 342 steps = 0; 343 cc = (struct callout_cpu *)arg; 344 CC_LOCK(cc); 345 while (cc->cc_softticks - 1 != cc->cc_ticks) { 346 /* 347 * cc_softticks may be modified by hard clock, so cache 348 * it while we work on a given bucket. 349 */ 350 curticks = cc->cc_softticks; 351 cc->cc_softticks++; 352 bucket = &cc->cc_callwheel[curticks & callwheelmask]; 353 c = TAILQ_FIRST(bucket); 354 while (c) { 355 depth++; 356 if (c->c_time != curticks) { 357 c = TAILQ_NEXT(c, c_links.tqe); 358 ++steps; 359 if (steps >= MAX_SOFTCLOCK_STEPS) { 360 cc->cc_next = c; 361 /* Give interrupts a chance. */ 362 CC_UNLOCK(cc); 363 ; /* nothing */ 364 CC_LOCK(cc); 365 c = cc->cc_next; 366 steps = 0; 367 } 368 } else { 369 void (*c_func)(void *); 370 void *c_arg; 371 struct lock_class *class; 372 struct lock_object *c_lock; 373 int c_flags, sharedlock; 374 375 cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 376 TAILQ_REMOVE(bucket, c, c_links.tqe); 377 class = (c->c_lock != NULL) ? 378 LOCK_CLASS(c->c_lock) : NULL; 379 sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 380 0 : 1; 381 c_lock = c->c_lock; 382 c_func = c->c_func; 383 c_arg = c->c_arg; 384 c_flags = c->c_flags; 385 if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 386 c->c_flags = CALLOUT_LOCAL_ALLOC; 387 } else { 388 c->c_flags = 389 (c->c_flags & ~CALLOUT_PENDING); 390 } 391 cc->cc_curr = c; 392 cc->cc_cancel = 0; 393 CC_UNLOCK(cc); 394 if (c_lock != NULL) { 395 class->lc_lock(c_lock, sharedlock); 396 /* 397 * The callout may have been cancelled 398 * while we switched locks. 399 */ 400 if (cc->cc_cancel) { 401 class->lc_unlock(c_lock); 402 goto skip; 403 } 404 /* The callout cannot be stopped now. */ 405 cc->cc_cancel = 1; 406 407 if (c_lock == &Giant.lock_object) { 408 gcalls++; 409 CTR3(KTR_CALLOUT, 410 "callout %p func %p arg %p", 411 c, c_func, c_arg); 412 } else { 413 lockcalls++; 414 CTR3(KTR_CALLOUT, "callout lock" 415 " %p func %p arg %p", 416 c, c_func, c_arg); 417 } 418 } else { 419 mpcalls++; 420 CTR3(KTR_CALLOUT, 421 "callout mpsafe %p func %p arg %p", 422 c, c_func, c_arg); 423 } 424 #ifdef DIAGNOSTIC 425 binuptime(&bt1); 426 #endif 427 THREAD_NO_SLEEPING(); 428 SDT_PROBE(callout_execute, kernel, , 429 callout_start, c, 0, 0, 0, 0); 430 c_func(c_arg); 431 SDT_PROBE(callout_execute, kernel, , 432 callout_end, c, 0, 0, 0, 0); 433 THREAD_SLEEPING_OK(); 434 #ifdef DIAGNOSTIC 435 binuptime(&bt2); 436 bintime_sub(&bt2, &bt1); 437 if (bt2.frac > maxdt) { 438 if (lastfunc != c_func || 439 bt2.frac > maxdt * 2) { 440 bintime2timespec(&bt2, &ts2); 441 printf( 442 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 443 c_func, c_arg, 444 (intmax_t)ts2.tv_sec, 445 ts2.tv_nsec); 446 } 447 maxdt = bt2.frac; 448 lastfunc = c_func; 449 } 450 #endif 451 CTR1(KTR_CALLOUT, "callout %p finished", c); 452 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 453 class->lc_unlock(c_lock); 454 skip: 455 CC_LOCK(cc); 456 /* 457 * If the current callout is locally 458 * allocated (from timeout(9)) 459 * then put it on the freelist. 460 * 461 * Note: we need to check the cached 462 * copy of c_flags because if it was not 463 * local, then it's not safe to deref the 464 * callout pointer. 465 */ 466 if (c_flags & CALLOUT_LOCAL_ALLOC) { 467 KASSERT(c->c_flags == 468 CALLOUT_LOCAL_ALLOC, 469 ("corrupted callout")); 470 c->c_func = NULL; 471 SLIST_INSERT_HEAD(&cc->cc_callfree, c, 472 c_links.sle); 473 } 474 cc->cc_curr = NULL; 475 if (cc->cc_waiting) { 476 /* 477 * There is someone waiting 478 * for the callout to complete. 479 */ 480 cc->cc_waiting = 0; 481 CC_UNLOCK(cc); 482 wakeup(&cc->cc_waiting); 483 CC_LOCK(cc); 484 } 485 steps = 0; 486 c = cc->cc_next; 487 } 488 } 489 } 490 avg_depth += (depth * 1000 - avg_depth) >> 8; 491 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 492 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 493 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 494 cc->cc_next = NULL; 495 CC_UNLOCK(cc); 496 } 497 498 /* 499 * timeout -- 500 * Execute a function after a specified length of time. 501 * 502 * untimeout -- 503 * Cancel previous timeout function call. 504 * 505 * callout_handle_init -- 506 * Initialize a handle so that using it with untimeout is benign. 507 * 508 * See AT&T BCI Driver Reference Manual for specification. This 509 * implementation differs from that one in that although an 510 * identification value is returned from timeout, the original 511 * arguments to timeout as well as the identifier are used to 512 * identify entries for untimeout. 513 */ 514 struct callout_handle 515 timeout(ftn, arg, to_ticks) 516 timeout_t *ftn; 517 void *arg; 518 int to_ticks; 519 { 520 struct callout_cpu *cc; 521 struct callout *new; 522 struct callout_handle handle; 523 524 cc = CC_CPU(timeout_cpu); 525 CC_LOCK(cc); 526 /* Fill in the next free callout structure. */ 527 new = SLIST_FIRST(&cc->cc_callfree); 528 if (new == NULL) 529 /* XXX Attempt to malloc first */ 530 panic("timeout table full"); 531 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); 532 callout_reset(new, to_ticks, ftn, arg); 533 handle.callout = new; 534 CC_UNLOCK(cc); 535 536 return (handle); 537 } 538 539 void 540 untimeout(ftn, arg, handle) 541 timeout_t *ftn; 542 void *arg; 543 struct callout_handle handle; 544 { 545 struct callout_cpu *cc; 546 547 /* 548 * Check for a handle that was initialized 549 * by callout_handle_init, but never used 550 * for a real timeout. 551 */ 552 if (handle.callout == NULL) 553 return; 554 555 cc = callout_lock(handle.callout); 556 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 557 callout_stop(handle.callout); 558 CC_UNLOCK(cc); 559 } 560 561 void 562 callout_handle_init(struct callout_handle *handle) 563 { 564 handle->callout = NULL; 565 } 566 567 /* 568 * New interface; clients allocate their own callout structures. 569 * 570 * callout_reset() - establish or change a timeout 571 * callout_stop() - disestablish a timeout 572 * callout_init() - initialize a callout structure so that it can 573 * safely be passed to callout_reset() and callout_stop() 574 * 575 * <sys/callout.h> defines three convenience macros: 576 * 577 * callout_active() - returns truth if callout has not been stopped, 578 * drained, or deactivated since the last time the callout was 579 * reset. 580 * callout_pending() - returns truth if callout is still waiting for timeout 581 * callout_deactivate() - marks the callout as having been serviced 582 */ 583 int 584 callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *), 585 void *arg, int cpu) 586 { 587 struct callout_cpu *cc; 588 int cancelled = 0; 589 590 /* 591 * Don't allow migration of pre-allocated callouts lest they 592 * become unbalanced. 593 */ 594 if (c->c_flags & CALLOUT_LOCAL_ALLOC) 595 cpu = c->c_cpu; 596 retry: 597 cc = callout_lock(c); 598 if (cc->cc_curr == c) { 599 /* 600 * We're being asked to reschedule a callout which is 601 * currently in progress. If there is a lock then we 602 * can cancel the callout if it has not really started. 603 */ 604 if (c->c_lock != NULL && !cc->cc_cancel) 605 cancelled = cc->cc_cancel = 1; 606 if (cc->cc_waiting) { 607 /* 608 * Someone has called callout_drain to kill this 609 * callout. Don't reschedule. 610 */ 611 CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 612 cancelled ? "cancelled" : "failed to cancel", 613 c, c->c_func, c->c_arg); 614 CC_UNLOCK(cc); 615 return (cancelled); 616 } 617 } 618 if (c->c_flags & CALLOUT_PENDING) { 619 if (cc->cc_next == c) { 620 cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 621 } 622 TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, 623 c_links.tqe); 624 625 cancelled = 1; 626 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 627 } 628 /* 629 * If the lock must migrate we have to check the state again as 630 * we can't hold both the new and old locks simultaneously. 631 */ 632 if (c->c_cpu != cpu) { 633 c->c_cpu = cpu; 634 CC_UNLOCK(cc); 635 goto retry; 636 } 637 638 if (to_ticks <= 0) 639 to_ticks = 1; 640 641 c->c_arg = arg; 642 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 643 c->c_func = ftn; 644 c->c_time = cc->cc_ticks + to_ticks; 645 TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask], 646 c, c_links.tqe); 647 CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", 648 cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); 649 CC_UNLOCK(cc); 650 651 return (cancelled); 652 } 653 654 /* 655 * Common idioms that can be optimized in the future. 656 */ 657 int 658 callout_schedule_on(struct callout *c, int to_ticks, int cpu) 659 { 660 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 661 } 662 663 int 664 callout_schedule(struct callout *c, int to_ticks) 665 { 666 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 667 } 668 669 int 670 _callout_stop_safe(c, safe) 671 struct callout *c; 672 int safe; 673 { 674 struct callout_cpu *cc; 675 struct lock_class *class; 676 int use_lock, sq_locked; 677 678 /* 679 * Some old subsystems don't hold Giant while running a callout_stop(), 680 * so just discard this check for the moment. 681 */ 682 if (!safe && c->c_lock != NULL) { 683 if (c->c_lock == &Giant.lock_object) 684 use_lock = mtx_owned(&Giant); 685 else { 686 use_lock = 1; 687 class = LOCK_CLASS(c->c_lock); 688 class->lc_assert(c->c_lock, LA_XLOCKED); 689 } 690 } else 691 use_lock = 0; 692 693 sq_locked = 0; 694 again: 695 cc = callout_lock(c); 696 /* 697 * If the callout isn't pending, it's not on the queue, so 698 * don't attempt to remove it from the queue. We can try to 699 * stop it by other means however. 700 */ 701 if (!(c->c_flags & CALLOUT_PENDING)) { 702 c->c_flags &= ~CALLOUT_ACTIVE; 703 704 /* 705 * If it wasn't on the queue and it isn't the current 706 * callout, then we can't stop it, so just bail. 707 */ 708 if (cc->cc_curr != c) { 709 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 710 c, c->c_func, c->c_arg); 711 CC_UNLOCK(cc); 712 if (sq_locked) 713 sleepq_release(&cc->cc_waiting); 714 return (0); 715 } 716 717 if (safe) { 718 /* 719 * The current callout is running (or just 720 * about to run) and blocking is allowed, so 721 * just wait for the current invocation to 722 * finish. 723 */ 724 while (cc->cc_curr == c) { 725 726 /* 727 * Use direct calls to sleepqueue interface 728 * instead of cv/msleep in order to avoid 729 * a LOR between cc_lock and sleepqueue 730 * chain spinlocks. This piece of code 731 * emulates a msleep_spin() call actually. 732 * 733 * If we already have the sleepqueue chain 734 * locked, then we can safely block. If we 735 * don't already have it locked, however, 736 * we have to drop the cc_lock to lock 737 * it. This opens several races, so we 738 * restart at the beginning once we have 739 * both locks. If nothing has changed, then 740 * we will end up back here with sq_locked 741 * set. 742 */ 743 if (!sq_locked) { 744 CC_UNLOCK(cc); 745 sleepq_lock(&cc->cc_waiting); 746 sq_locked = 1; 747 goto again; 748 } 749 cc->cc_waiting = 1; 750 DROP_GIANT(); 751 CC_UNLOCK(cc); 752 sleepq_add(&cc->cc_waiting, 753 &cc->cc_lock.lock_object, "codrain", 754 SLEEPQ_SLEEP, 0); 755 sleepq_wait(&cc->cc_waiting, 0); 756 sq_locked = 0; 757 758 /* Reacquire locks previously released. */ 759 PICKUP_GIANT(); 760 CC_LOCK(cc); 761 } 762 } else if (use_lock && !cc->cc_cancel) { 763 /* 764 * The current callout is waiting for its 765 * lock which we hold. Cancel the callout 766 * and return. After our caller drops the 767 * lock, the callout will be skipped in 768 * softclock(). 769 */ 770 cc->cc_cancel = 1; 771 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 772 c, c->c_func, c->c_arg); 773 CC_UNLOCK(cc); 774 KASSERT(!sq_locked, ("sleepqueue chain locked")); 775 return (1); 776 } 777 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 778 c, c->c_func, c->c_arg); 779 CC_UNLOCK(cc); 780 KASSERT(!sq_locked, ("sleepqueue chain still locked")); 781 return (0); 782 } 783 if (sq_locked) 784 sleepq_release(&cc->cc_waiting); 785 786 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 787 788 if (cc->cc_next == c) { 789 cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 790 } 791 TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, 792 c_links.tqe); 793 794 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 795 c, c->c_func, c->c_arg); 796 797 if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 798 c->c_func = NULL; 799 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 800 } 801 CC_UNLOCK(cc); 802 return (1); 803 } 804 805 void 806 callout_init(c, mpsafe) 807 struct callout *c; 808 int mpsafe; 809 { 810 bzero(c, sizeof *c); 811 if (mpsafe) { 812 c->c_lock = NULL; 813 c->c_flags = CALLOUT_RETURNUNLOCKED; 814 } else { 815 c->c_lock = &Giant.lock_object; 816 c->c_flags = 0; 817 } 818 c->c_cpu = timeout_cpu; 819 } 820 821 void 822 _callout_init_lock(c, lock, flags) 823 struct callout *c; 824 struct lock_object *lock; 825 int flags; 826 { 827 bzero(c, sizeof *c); 828 c->c_lock = lock; 829 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 830 ("callout_init_lock: bad flags %d", flags)); 831 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 832 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 833 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 834 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 835 __func__)); 836 c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 837 c->c_cpu = timeout_cpu; 838 } 839 840 #ifdef APM_FIXUP_CALLTODO 841 /* 842 * Adjust the kernel calltodo timeout list. This routine is used after 843 * an APM resume to recalculate the calltodo timer list values with the 844 * number of hz's we have been sleeping. The next hardclock() will detect 845 * that there are fired timers and run softclock() to execute them. 846 * 847 * Please note, I have not done an exhaustive analysis of what code this 848 * might break. I am motivated to have my select()'s and alarm()'s that 849 * have expired during suspend firing upon resume so that the applications 850 * which set the timer can do the maintanence the timer was for as close 851 * as possible to the originally intended time. Testing this code for a 852 * week showed that resuming from a suspend resulted in 22 to 25 timers 853 * firing, which seemed independant on whether the suspend was 2 hours or 854 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 855 */ 856 void 857 adjust_timeout_calltodo(time_change) 858 struct timeval *time_change; 859 { 860 register struct callout *p; 861 unsigned long delta_ticks; 862 863 /* 864 * How many ticks were we asleep? 865 * (stolen from tvtohz()). 866 */ 867 868 /* Don't do anything */ 869 if (time_change->tv_sec < 0) 870 return; 871 else if (time_change->tv_sec <= LONG_MAX / 1000000) 872 delta_ticks = (time_change->tv_sec * 1000000 + 873 time_change->tv_usec + (tick - 1)) / tick + 1; 874 else if (time_change->tv_sec <= LONG_MAX / hz) 875 delta_ticks = time_change->tv_sec * hz + 876 (time_change->tv_usec + (tick - 1)) / tick + 1; 877 else 878 delta_ticks = LONG_MAX; 879 880 if (delta_ticks > INT_MAX) 881 delta_ticks = INT_MAX; 882 883 /* 884 * Now rip through the timer calltodo list looking for timers 885 * to expire. 886 */ 887 888 /* don't collide with softclock() */ 889 CC_LOCK(cc); 890 for (p = calltodo.c_next; p != NULL; p = p->c_next) { 891 p->c_time -= delta_ticks; 892 893 /* Break if the timer had more time on it than delta_ticks */ 894 if (p->c_time > 0) 895 break; 896 897 /* take back the ticks the timer didn't use (p->c_time <= 0) */ 898 delta_ticks = -p->c_time; 899 } 900 CC_UNLOCK(cc); 901 902 return; 903 } 904 #endif /* APM_FIXUP_CALLTODO */ 905