1 /* 2 * linux/kernel/time/tick-sched.c 3 * 4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner 7 * 8 * No idle tick implementation for low and high resolution timers 9 * 10 * Started by: Thomas Gleixner and Ingo Molnar 11 * 12 * Distribute under GPLv2. 13 */ 14 #include <linux/cpu.h> 15 #include <linux/err.h> 16 #include <linux/hrtimer.h> 17 #include <linux/interrupt.h> 18 #include <linux/kernel_stat.h> 19 #include <linux/percpu.h> 20 #include <linux/profile.h> 21 #include <linux/sched.h> 22 #include <linux/tick.h> 23 24 #include <asm/irq_regs.h> 25 26 #include "tick-internal.h" 27 28 /* 29 * Per cpu nohz control structure 30 */ 31 static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); 32 33 /* 34 * The time, when the last jiffy update happened. Protected by xtime_lock. 35 */ 36 static ktime_t last_jiffies_update; 37 38 struct tick_sched *tick_get_tick_sched(int cpu) 39 { 40 return &per_cpu(tick_cpu_sched, cpu); 41 } 42 43 /* 44 * Must be called with interrupts disabled ! 45 */ 46 static void tick_do_update_jiffies64(ktime_t now) 47 { 48 unsigned long ticks = 0; 49 ktime_t delta; 50 51 /* 52 * Do a quick check without holding xtime_lock: 53 */ 54 delta = ktime_sub(now, last_jiffies_update); 55 if (delta.tv64 < tick_period.tv64) 56 return; 57 58 /* Reevalute with xtime_lock held */ 59 write_seqlock(&xtime_lock); 60 61 delta = ktime_sub(now, last_jiffies_update); 62 if (delta.tv64 >= tick_period.tv64) { 63 64 delta = ktime_sub(delta, tick_period); 65 last_jiffies_update = ktime_add(last_jiffies_update, 66 tick_period); 67 68 /* Slow path for long timeouts */ 69 if (unlikely(delta.tv64 >= tick_period.tv64)) { 70 s64 incr = ktime_to_ns(tick_period); 71 72 ticks = ktime_divns(delta, incr); 73 74 last_jiffies_update = ktime_add_ns(last_jiffies_update, 75 incr * ticks); 76 } 77 do_timer(++ticks); 78 79 /* Keep the tick_next_period variable up to date */ 80 tick_next_period = ktime_add(last_jiffies_update, tick_period); 81 } 82 write_sequnlock(&xtime_lock); 83 } 84 85 /* 86 * Initialize and return retrieve the jiffies update. 87 */ 88 static ktime_t tick_init_jiffy_update(void) 89 { 90 ktime_t period; 91 92 write_seqlock(&xtime_lock); 93 /* Did we start the jiffies update yet ? */ 94 if (last_jiffies_update.tv64 == 0) 95 last_jiffies_update = tick_next_period; 96 period = last_jiffies_update; 97 write_sequnlock(&xtime_lock); 98 return period; 99 } 100 101 /* 102 * NOHZ - aka dynamic tick functionality 103 */ 104 #ifdef CONFIG_NO_HZ 105 /* 106 * NO HZ enabled ? 107 */ 108 static int tick_nohz_enabled __read_mostly = 1; 109 110 /* 111 * Enable / Disable tickless mode 112 */ 113 static int __init setup_tick_nohz(char *str) 114 { 115 if (!strcmp(str, "off")) 116 tick_nohz_enabled = 0; 117 else if (!strcmp(str, "on")) 118 tick_nohz_enabled = 1; 119 else 120 return 0; 121 return 1; 122 } 123 124 __setup("nohz=", setup_tick_nohz); 125 126 /** 127 * tick_nohz_update_jiffies - update jiffies when idle was interrupted 128 * 129 * Called from interrupt entry when the CPU was idle 130 * 131 * In case the sched_tick was stopped on this CPU, we have to check if jiffies 132 * must be updated. Otherwise an interrupt handler could use a stale jiffy 133 * value. We do this unconditionally on any cpu, as we don't know whether the 134 * cpu, which has the update task assigned is in a long sleep. 135 */ 136 void tick_nohz_update_jiffies(void) 137 { 138 int cpu = smp_processor_id(); 139 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 140 unsigned long flags; 141 ktime_t now; 142 143 if (!ts->tick_stopped) 144 return; 145 146 cpu_clear(cpu, nohz_cpu_mask); 147 now = ktime_get(); 148 ts->idle_waketime = now; 149 150 local_irq_save(flags); 151 tick_do_update_jiffies64(now); 152 local_irq_restore(flags); 153 154 touch_softlockup_watchdog(); 155 } 156 157 void tick_nohz_stop_idle(int cpu) 158 { 159 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 160 161 if (ts->idle_active) { 162 ktime_t now, delta; 163 now = ktime_get(); 164 delta = ktime_sub(now, ts->idle_entrytime); 165 ts->idle_lastupdate = now; 166 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); 167 ts->idle_active = 0; 168 169 sched_clock_idle_wakeup_event(0); 170 } 171 } 172 173 static ktime_t tick_nohz_start_idle(struct tick_sched *ts) 174 { 175 ktime_t now, delta; 176 177 now = ktime_get(); 178 if (ts->idle_active) { 179 delta = ktime_sub(now, ts->idle_entrytime); 180 ts->idle_lastupdate = now; 181 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); 182 } 183 ts->idle_entrytime = now; 184 ts->idle_active = 1; 185 sched_clock_idle_sleep_event(); 186 return now; 187 } 188 189 u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) 190 { 191 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 192 193 *last_update_time = ktime_to_us(ts->idle_lastupdate); 194 return ktime_to_us(ts->idle_sleeptime); 195 } 196 197 /** 198 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task 199 * 200 * When the next event is more than a tick into the future, stop the idle tick 201 * Called either from the idle loop or from irq_exit() when an idle period was 202 * just interrupted by an interrupt which did not cause a reschedule. 203 */ 204 void tick_nohz_stop_sched_tick(int inidle) 205 { 206 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; 207 struct tick_sched *ts; 208 ktime_t last_update, expires, now; 209 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 210 int cpu; 211 212 local_irq_save(flags); 213 214 cpu = smp_processor_id(); 215 ts = &per_cpu(tick_cpu_sched, cpu); 216 now = tick_nohz_start_idle(ts); 217 218 /* 219 * If this cpu is offline and it is the one which updates 220 * jiffies, then give up the assignment and let it be taken by 221 * the cpu which runs the tick timer next. If we don't drop 222 * this here the jiffies might be stale and do_timer() never 223 * invoked. 224 */ 225 if (unlikely(!cpu_online(cpu))) { 226 if (cpu == tick_do_timer_cpu) 227 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 228 } 229 230 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 231 goto end; 232 233 if (!inidle && !ts->inidle) 234 goto end; 235 236 ts->inidle = 1; 237 238 if (need_resched()) 239 goto end; 240 241 if (unlikely(local_softirq_pending())) { 242 static int ratelimit; 243 244 if (ratelimit < 10) { 245 printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", 246 local_softirq_pending()); 247 ratelimit++; 248 } 249 goto end; 250 } 251 252 ts->idle_calls++; 253 /* Read jiffies and the time when jiffies were updated last */ 254 do { 255 seq = read_seqbegin(&xtime_lock); 256 last_update = last_jiffies_update; 257 last_jiffies = jiffies; 258 } while (read_seqretry(&xtime_lock, seq)); 259 260 /* Get the next timer wheel timer */ 261 next_jiffies = get_next_timer_interrupt(last_jiffies); 262 delta_jiffies = next_jiffies - last_jiffies; 263 264 if (rcu_needs_cpu(cpu)) 265 delta_jiffies = 1; 266 /* 267 * Do not stop the tick, if we are only one off 268 * or if the cpu is required for rcu 269 */ 270 if (!ts->tick_stopped && delta_jiffies == 1) 271 goto out; 272 273 /* Schedule the tick, if we are at least one jiffie off */ 274 if ((long)delta_jiffies >= 1) { 275 276 if (delta_jiffies > 1) 277 cpu_set(cpu, nohz_cpu_mask); 278 /* 279 * nohz_stop_sched_tick can be called several times before 280 * the nohz_restart_sched_tick is called. This happens when 281 * interrupts arrive which do not cause a reschedule. In the 282 * first call we save the current tick time, so we can restart 283 * the scheduler tick in nohz_restart_sched_tick. 284 */ 285 if (!ts->tick_stopped) { 286 if (select_nohz_load_balancer(1)) { 287 /* 288 * sched tick not stopped! 289 */ 290 cpu_clear(cpu, nohz_cpu_mask); 291 goto out; 292 } 293 294 ts->idle_tick = ts->sched_timer.expires; 295 ts->tick_stopped = 1; 296 ts->idle_jiffies = last_jiffies; 297 rcu_enter_nohz(); 298 } 299 300 /* 301 * If this cpu is the one which updates jiffies, then 302 * give up the assignment and let it be taken by the 303 * cpu which runs the tick timer next, which might be 304 * this cpu as well. If we don't drop this here the 305 * jiffies might be stale and do_timer() never 306 * invoked. 307 */ 308 if (cpu == tick_do_timer_cpu) 309 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 310 311 ts->idle_sleeps++; 312 313 /* 314 * delta_jiffies >= NEXT_TIMER_MAX_DELTA signals that 315 * there is no timer pending or at least extremly far 316 * into the future (12 days for HZ=1000). In this case 317 * we simply stop the tick timer: 318 */ 319 if (unlikely(delta_jiffies >= NEXT_TIMER_MAX_DELTA)) { 320 ts->idle_expires.tv64 = KTIME_MAX; 321 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) 322 hrtimer_cancel(&ts->sched_timer); 323 goto out; 324 } 325 326 /* 327 * calculate the expiry time for the next timer wheel 328 * timer 329 */ 330 expires = ktime_add_ns(last_update, tick_period.tv64 * 331 delta_jiffies); 332 ts->idle_expires = expires; 333 334 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { 335 hrtimer_start(&ts->sched_timer, expires, 336 HRTIMER_MODE_ABS); 337 /* Check, if the timer was already in the past */ 338 if (hrtimer_active(&ts->sched_timer)) 339 goto out; 340 } else if (!tick_program_event(expires, 0)) 341 goto out; 342 /* 343 * We are past the event already. So we crossed a 344 * jiffie boundary. Update jiffies and raise the 345 * softirq. 346 */ 347 tick_do_update_jiffies64(ktime_get()); 348 cpu_clear(cpu, nohz_cpu_mask); 349 } 350 raise_softirq_irqoff(TIMER_SOFTIRQ); 351 out: 352 ts->next_jiffies = next_jiffies; 353 ts->last_jiffies = last_jiffies; 354 ts->sleep_length = ktime_sub(dev->next_event, now); 355 end: 356 local_irq_restore(flags); 357 } 358 359 /** 360 * tick_nohz_get_sleep_length - return the length of the current sleep 361 * 362 * Called from power state control code with interrupts disabled 363 */ 364 ktime_t tick_nohz_get_sleep_length(void) 365 { 366 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 367 368 return ts->sleep_length; 369 } 370 371 /** 372 * tick_nohz_restart_sched_tick - restart the idle tick from the idle task 373 * 374 * Restart the idle tick when the CPU is woken up from idle 375 */ 376 void tick_nohz_restart_sched_tick(void) 377 { 378 int cpu = smp_processor_id(); 379 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 380 unsigned long ticks; 381 ktime_t now; 382 383 local_irq_disable(); 384 tick_nohz_stop_idle(cpu); 385 386 if (!ts->inidle || !ts->tick_stopped) { 387 ts->inidle = 0; 388 local_irq_enable(); 389 return; 390 } 391 392 ts->inidle = 0; 393 394 rcu_exit_nohz(); 395 396 /* Update jiffies first */ 397 select_nohz_load_balancer(0); 398 now = ktime_get(); 399 tick_do_update_jiffies64(now); 400 cpu_clear(cpu, nohz_cpu_mask); 401 402 /* 403 * We stopped the tick in idle. Update process times would miss the 404 * time we slept as update_process_times does only a 1 tick 405 * accounting. Enforce that this is accounted to idle ! 406 */ 407 ticks = jiffies - ts->idle_jiffies; 408 /* 409 * We might be one off. Do not randomly account a huge number of ticks! 410 */ 411 if (ticks && ticks < LONG_MAX) { 412 add_preempt_count(HARDIRQ_OFFSET); 413 account_system_time(current, HARDIRQ_OFFSET, 414 jiffies_to_cputime(ticks)); 415 sub_preempt_count(HARDIRQ_OFFSET); 416 } 417 418 touch_softlockup_watchdog(); 419 /* 420 * Cancel the scheduled timer and restore the tick 421 */ 422 ts->tick_stopped = 0; 423 ts->idle_exittime = now; 424 hrtimer_cancel(&ts->sched_timer); 425 ts->sched_timer.expires = ts->idle_tick; 426 427 while (1) { 428 /* Forward the time to expire in the future */ 429 hrtimer_forward(&ts->sched_timer, now, tick_period); 430 431 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { 432 hrtimer_start(&ts->sched_timer, 433 ts->sched_timer.expires, 434 HRTIMER_MODE_ABS); 435 /* Check, if the timer was already in the past */ 436 if (hrtimer_active(&ts->sched_timer)) 437 break; 438 } else { 439 if (!tick_program_event(ts->sched_timer.expires, 0)) 440 break; 441 } 442 /* Update jiffies and reread time */ 443 tick_do_update_jiffies64(now); 444 now = ktime_get(); 445 } 446 local_irq_enable(); 447 } 448 449 static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) 450 { 451 hrtimer_forward(&ts->sched_timer, now, tick_period); 452 return tick_program_event(ts->sched_timer.expires, 0); 453 } 454 455 /* 456 * The nohz low res interrupt handler 457 */ 458 static void tick_nohz_handler(struct clock_event_device *dev) 459 { 460 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 461 struct pt_regs *regs = get_irq_regs(); 462 int cpu = smp_processor_id(); 463 ktime_t now = ktime_get(); 464 465 dev->next_event.tv64 = KTIME_MAX; 466 467 /* 468 * Check if the do_timer duty was dropped. We don't care about 469 * concurrency: This happens only when the cpu in charge went 470 * into a long sleep. If two cpus happen to assign themself to 471 * this duty, then the jiffies update is still serialized by 472 * xtime_lock. 473 */ 474 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) 475 tick_do_timer_cpu = cpu; 476 477 /* Check, if the jiffies need an update */ 478 if (tick_do_timer_cpu == cpu) 479 tick_do_update_jiffies64(now); 480 481 /* 482 * When we are idle and the tick is stopped, we have to touch 483 * the watchdog as we might not schedule for a really long 484 * time. This happens on complete idle SMP systems while 485 * waiting on the login prompt. We also increment the "start 486 * of idle" jiffy stamp so the idle accounting adjustment we 487 * do when we go busy again does not account too much ticks. 488 */ 489 if (ts->tick_stopped) { 490 touch_softlockup_watchdog(); 491 ts->idle_jiffies++; 492 } 493 494 update_process_times(user_mode(regs)); 495 profile_tick(CPU_PROFILING); 496 497 /* Do not restart, when we are in the idle loop */ 498 if (ts->tick_stopped) 499 return; 500 501 while (tick_nohz_reprogram(ts, now)) { 502 now = ktime_get(); 503 tick_do_update_jiffies64(now); 504 } 505 } 506 507 /** 508 * tick_nohz_switch_to_nohz - switch to nohz mode 509 */ 510 static void tick_nohz_switch_to_nohz(void) 511 { 512 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 513 ktime_t next; 514 515 if (!tick_nohz_enabled) 516 return; 517 518 local_irq_disable(); 519 if (tick_switch_to_oneshot(tick_nohz_handler)) { 520 local_irq_enable(); 521 return; 522 } 523 524 ts->nohz_mode = NOHZ_MODE_LOWRES; 525 526 /* 527 * Recycle the hrtimer in ts, so we can share the 528 * hrtimer_forward with the highres code. 529 */ 530 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 531 /* Get the next period */ 532 next = tick_init_jiffy_update(); 533 534 for (;;) { 535 ts->sched_timer.expires = next; 536 if (!tick_program_event(next, 0)) 537 break; 538 next = ktime_add(next, tick_period); 539 } 540 local_irq_enable(); 541 542 printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", 543 smp_processor_id()); 544 } 545 546 #else 547 548 static inline void tick_nohz_switch_to_nohz(void) { } 549 550 #endif /* NO_HZ */ 551 552 /* 553 * High resolution timer specific code 554 */ 555 #ifdef CONFIG_HIGH_RES_TIMERS 556 /* 557 * We rearm the timer until we get disabled by the idle code. 558 * Called with interrupts disabled and timer->base->cpu_base->lock held. 559 */ 560 static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) 561 { 562 struct tick_sched *ts = 563 container_of(timer, struct tick_sched, sched_timer); 564 struct pt_regs *regs = get_irq_regs(); 565 ktime_t now = ktime_get(); 566 int cpu = smp_processor_id(); 567 568 #ifdef CONFIG_NO_HZ 569 /* 570 * Check if the do_timer duty was dropped. We don't care about 571 * concurrency: This happens only when the cpu in charge went 572 * into a long sleep. If two cpus happen to assign themself to 573 * this duty, then the jiffies update is still serialized by 574 * xtime_lock. 575 */ 576 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) 577 tick_do_timer_cpu = cpu; 578 #endif 579 580 /* Check, if the jiffies need an update */ 581 if (tick_do_timer_cpu == cpu) 582 tick_do_update_jiffies64(now); 583 584 /* 585 * Do not call, when we are not in irq context and have 586 * no valid regs pointer 587 */ 588 if (regs) { 589 /* 590 * When we are idle and the tick is stopped, we have to touch 591 * the watchdog as we might not schedule for a really long 592 * time. This happens on complete idle SMP systems while 593 * waiting on the login prompt. We also increment the "start of 594 * idle" jiffy stamp so the idle accounting adjustment we do 595 * when we go busy again does not account too much ticks. 596 */ 597 if (ts->tick_stopped) { 598 touch_softlockup_watchdog(); 599 ts->idle_jiffies++; 600 } 601 update_process_times(user_mode(regs)); 602 profile_tick(CPU_PROFILING); 603 } 604 605 /* Do not restart, when we are in the idle loop */ 606 if (ts->tick_stopped) 607 return HRTIMER_NORESTART; 608 609 hrtimer_forward(timer, now, tick_period); 610 611 return HRTIMER_RESTART; 612 } 613 614 /** 615 * tick_setup_sched_timer - setup the tick emulation timer 616 */ 617 void tick_setup_sched_timer(void) 618 { 619 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 620 ktime_t now = ktime_get(); 621 u64 offset; 622 623 /* 624 * Emulate tick processing via per-CPU hrtimers: 625 */ 626 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 627 ts->sched_timer.function = tick_sched_timer; 628 ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; 629 630 /* Get the next period (per cpu) */ 631 ts->sched_timer.expires = tick_init_jiffy_update(); 632 offset = ktime_to_ns(tick_period) >> 1; 633 do_div(offset, num_possible_cpus()); 634 offset *= smp_processor_id(); 635 ts->sched_timer.expires = ktime_add_ns(ts->sched_timer.expires, offset); 636 637 for (;;) { 638 hrtimer_forward(&ts->sched_timer, now, tick_period); 639 hrtimer_start(&ts->sched_timer, ts->sched_timer.expires, 640 HRTIMER_MODE_ABS); 641 /* Check, if the timer was already in the past */ 642 if (hrtimer_active(&ts->sched_timer)) 643 break; 644 now = ktime_get(); 645 } 646 647 #ifdef CONFIG_NO_HZ 648 if (tick_nohz_enabled) 649 ts->nohz_mode = NOHZ_MODE_HIGHRES; 650 #endif 651 } 652 #endif /* HIGH_RES_TIMERS */ 653 654 #if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS 655 void tick_cancel_sched_timer(int cpu) 656 { 657 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 658 659 # ifdef CONFIG_HIGH_RES_TIMERS 660 if (ts->sched_timer.base) 661 hrtimer_cancel(&ts->sched_timer); 662 # endif 663 664 ts->nohz_mode = NOHZ_MODE_INACTIVE; 665 } 666 #endif 667 668 /** 669 * Async notification about clocksource changes 670 */ 671 void tick_clock_notify(void) 672 { 673 int cpu; 674 675 for_each_possible_cpu(cpu) 676 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); 677 } 678 679 /* 680 * Async notification about clock event changes 681 */ 682 void tick_oneshot_notify(void) 683 { 684 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 685 686 set_bit(0, &ts->check_clocks); 687 } 688 689 /** 690 * Check, if a change happened, which makes oneshot possible. 691 * 692 * Called cyclic from the hrtimer softirq (driven by the timer 693 * softirq) allow_nohz signals, that we can switch into low-res nohz 694 * mode, because high resolution timers are disabled (either compile 695 * or runtime). 696 */ 697 int tick_check_oneshot_change(int allow_nohz) 698 { 699 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 700 701 if (!test_and_clear_bit(0, &ts->check_clocks)) 702 return 0; 703 704 if (ts->nohz_mode != NOHZ_MODE_INACTIVE) 705 return 0; 706 707 if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) 708 return 0; 709 710 if (!allow_nohz) 711 return 1; 712 713 tick_nohz_switch_to_nohz(); 714 return 0; 715 } 716