Lines Matching +full:local +full:- +full:timer +full:- +full:stop
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
4 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
5 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
27 #include <linux/posix-timers.h>
33 #include "tick-internal.h"
35 #include <trace/events/timer.h>
38 * Per-CPU nohz control structure
63 * 64-bit can do a quick check without holding the jiffies lock and in tick_do_update_jiffies64()
67 * 32-bit cannot do that because the store of 'tick_next_period' in tick_do_update_jiffies64()
68 * consists of two 32-bit stores, and the first store could be in tick_do_update_jiffies64()
93 * Re-evaluate with the lock held. Another CPU might have done the in tick_do_update_jiffies64()
133 * A plain store is good enough on 32-bit, as the quick check in tick_do_update_jiffies64()
172 tick_next_period += TICK_NSEC - rem; in tick_init_jiffy_update()
187 return !!(ts->flags & flag); in tick_sched_flag_test()
194 ts->flags |= flag; in tick_sched_flag_set()
201 ts->flags &= ~flag; in tick_sched_flag_clear()
238 if (ts->last_tick_jiffies != jiffies) { in tick_sched_do_timer()
239 ts->stalled_jiffies = 0; in tick_sched_do_timer()
240 ts->last_tick_jiffies = READ_ONCE(jiffies); in tick_sched_do_timer()
242 if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) { in tick_sched_do_timer()
244 ts->stalled_jiffies = 0; in tick_sched_do_timer()
245 ts->last_tick_jiffies = READ_ONCE(jiffies); in tick_sched_do_timer()
250 ts->got_idle_tick = 1; in tick_sched_do_timer()
267 ts->idle_jiffies++; in tick_sched_handle()
273 ts->next_tick = 0; in tick_sched_handle()
281 * We rearm the timer until we get disabled by the idle code.
284 static enum hrtimer_restart tick_nohz_handler(struct hrtimer *timer) in tick_nohz_handler() argument
286 struct tick_sched *ts = container_of(timer, struct tick_sched, sched_timer); in tick_nohz_handler()
299 ts->next_tick = 0; in tick_nohz_handler()
303 * - to the idle task if in dynticks-idle in tick_nohz_handler()
304 * - to IRQ exit if in full-dynticks. in tick_nohz_handler()
309 hrtimer_forward(timer, now, TICK_NSEC); in tick_nohz_handler()
317 hrtimer_cancel(&ts->sched_timer); in tick_sched_timer_cancel()
376 if (check_tick_dependency(&ts->tick_dep_mask)) in can_stop_full_tick()
379 if (check_tick_dependency(¤t->tick_dep_mask)) in can_stop_full_tick()
382 if (check_tick_dependency(¤t->signal->tick_dep_mask)) in can_stop_full_tick()
398 * re-evaluate its dependency on the tick and restart it if necessary.
412 * re-evaluate its dependency on the tick and restart it if necessary.
430 * activate_task() STORE p->tick_dep_mask in tick_nohz_kick_task()
431 * STORE p->on_rq in tick_nohz_kick_task()
433 * LOCK rq->lock LOAD p->on_rq in tick_nohz_kick_task()
436 * LOAD p->tick_dep_mask in tick_nohz_kick_task()
453 * STORE p->cpu = @cpu in tick_nohz_kick_task()
455 * LOCK rq->lock in tick_nohz_kick_task()
456 * smp_mb__after_spin_lock() STORE p->tick_dep_mask in tick_nohz_kick_task()
458 * LOAD p->tick_dep_mask LOAD p->cpu in tick_nohz_kick_task()
469 * Kick all full dynticks CPUs in order to force these to re-evaluate
510 * Set per-CPU tick dependency. Used by scheduler and perf events in order to
511 * manage event-throttling.
520 prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); in tick_nohz_dep_set_cpu()
523 /* Perf needs local kick that is NMI safe */ in tick_nohz_dep_set_cpu()
527 /* Remote IRQ work not NMI-safe */ in tick_nohz_dep_set_cpu()
540 atomic_andnot(BIT(bit), &ts->tick_dep_mask); in tick_nohz_dep_clear_cpu()
545 * Set a per-task tick dependency. RCU needs this. Also posix CPU timers
550 if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) in tick_nohz_dep_set_task()
557 atomic_andnot(BIT(bit), &tsk->tick_dep_mask); in tick_nohz_dep_clear_task()
562 * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
569 struct signal_struct *sig = tsk->signal; in tick_nohz_dep_set_signal()
571 prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask); in tick_nohz_dep_set_signal()
575 lockdep_assert_held(&tsk->sighand->siglock); in tick_nohz_dep_set_signal()
583 atomic_andnot(BIT(bit), &sig->tick_dep_mask); in tick_nohz_dep_clear_signal()
587 * Re-evaluate the need for the tick as we switch the current task.
601 if (atomic_read(¤t->tick_dep_mask) || in __tick_nohz_task_switch()
602 atomic_read(¤t->signal->tick_dep_mask)) in __tick_nohz_task_switch()
607 /* Get the boot-time nohz CPU list from the kernel parameters. */
629 return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY; in tick_nohz_cpu_down()
645 pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support IRQ work self-IPIs\n"); in tick_nohz_init()
675 * NOHZ - aka dynamic tick functionality
708 * tick_nohz_update_jiffies - update jiffies when idle was interrupted
738 delta = ktime_sub(now, ts->idle_entrytime); in tick_nohz_stop_idle()
740 write_seqcount_begin(&ts->idle_sleeptime_seq); in tick_nohz_stop_idle()
742 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); in tick_nohz_stop_idle()
744 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); in tick_nohz_stop_idle()
746 ts->idle_entrytime = now; in tick_nohz_stop_idle()
748 write_seqcount_end(&ts->idle_sleeptime_seq); in tick_nohz_stop_idle()
755 write_seqcount_begin(&ts->idle_sleeptime_seq); in tick_nohz_start_idle()
756 ts->idle_entrytime = ktime_get(); in tick_nohz_start_idle()
758 write_seqcount_end(&ts->idle_sleeptime_seq); in tick_nohz_start_idle()
770 return -1; in get_cpu_sleep_time_us()
777 seq = read_seqcount_begin(&ts->idle_sleeptime_seq); in get_cpu_sleep_time_us()
780 ktime_t delta = ktime_sub(now, ts->idle_entrytime); in get_cpu_sleep_time_us()
786 } while (read_seqcount_retry(&ts->idle_sleeptime_seq, seq)); in get_cpu_sleep_time_us()
793 * get_cpu_idle_time_us - get the total idle time of a CPU
807 * Return: -1 if NOHZ is not enabled, else total idle time of the @cpu
813 return get_cpu_sleep_time_us(ts, &ts->idle_sleeptime, in get_cpu_idle_time_us()
819 * get_cpu_iowait_time_us - get the total iowait time of a CPU
833 * Return: -1 if NOHZ is not enabled, else total iowait time of @cpu
839 return get_cpu_sleep_time_us(ts, &ts->iowait_sleeptime, in get_cpu_iowait_time_us()
846 hrtimer_cancel(&ts->sched_timer); in tick_nohz_restart()
847 hrtimer_set_expires(&ts->sched_timer, ts->last_tick); in tick_nohz_restart()
850 hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); in tick_nohz_restart()
853 hrtimer_start_expires(&ts->sched_timer, in tick_nohz_restart()
856 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); in tick_nohz_restart()
860 * Reset to make sure the next tick stop doesn't get fooled by past in tick_nohz_restart()
863 ts->next_tick = 0; in tick_nohz_restart()
890 * tick_nohz_next_event() - return the clock monotonic based next event
895 * *%0 - When the next event is a maximum of TICK_NSEC in the future
897 * *%next_event - Next event based on clock monotonic
906 ts->last_jiffies = basejiff; in tick_nohz_next_event()
907 ts->timer_expires_base = basemono; in tick_nohz_next_event()
912 * Aside of that, check whether the local timer softirq is in tick_nohz_next_event()
914 * because there is an already expired timer, so it will request in tick_nohz_next_event()
915 * immediate expiry, which rearms the hardware timer with a in tick_nohz_next_event()
924 * Get the next pending timer. If high resolution in tick_nohz_next_event()
925 * timers are enabled this only takes the timer wheel in tick_nohz_next_event()
931 ts->next_timer = next_tick; in tick_nohz_next_event()
940 * force prod the timer. in tick_nohz_next_event()
942 delta = next_tick - basemono; in tick_nohz_next_event()
945 * We've not stopped the tick yet, and there's a timer in the in tick_nohz_next_event()
949 ts->timer_expires = 0; in tick_nohz_next_event()
966 if (delta < (KTIME_MAX - basemono)) in tick_nohz_next_event()
971 ts->timer_expires = min_t(u64, expires, next_tick); in tick_nohz_next_event()
974 return ts->timer_expires; in tick_nohz_next_event()
980 unsigned long basejiff = ts->last_jiffies; in tick_nohz_stop_tick()
981 u64 basemono = ts->timer_expires_base; in tick_nohz_stop_tick()
986 /* Make sure we won't be trying to stop it twice in a row. */ in tick_nohz_stop_tick()
987 ts->timer_expires_base = 0; in tick_nohz_stop_tick()
990 * Now the tick should be stopped definitely - so the timer base needs in tick_nohz_stop_tick()
991 * to be marked idle as well to not miss a newly queued timer. in tick_nohz_stop_tick()
994 if (expires > ts->timer_expires) { in tick_nohz_stop_tick()
996 * This path could only happen when the first timer was removed in tick_nohz_stop_tick()
998 * high resolution mode is not active, timer could also be a in tick_nohz_stop_tick()
1002 * not stop the tick for too long with a shallow C-state (which in tick_nohz_stop_tick()
1006 expires = ts->timer_expires; in tick_nohz_stop_tick()
1009 /* If the timer base is not idle, retain the not yet stopped tick. */ in tick_nohz_stop_tick()
1016 * the tick timer next, which might be this CPU as well. If we in tick_nohz_stop_tick()
1030 if (tick_sched_flag_test(ts, TS_FLAG_STOPPED) && (expires == ts->next_tick)) { in tick_nohz_stop_tick()
1032 if (expires == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) in tick_nohz_stop_tick()
1035 WARN_ONCE(1, "basemono: %llu ts->next_tick: %llu dev->next_event: %llu " in tick_nohz_stop_tick()
1036 "timer->active: %d timer->expires: %llu\n", basemono, ts->next_tick, in tick_nohz_stop_tick()
1037 dev->next_event, hrtimer_active(&ts->sched_timer), in tick_nohz_stop_tick()
1038 hrtimer_get_expires(&ts->sched_timer)); in tick_nohz_stop_tick()
1052 ts->last_tick = hrtimer_get_expires(&ts->sched_timer); in tick_nohz_stop_tick()
1057 ts->next_tick = expires; in tick_nohz_stop_tick()
1060 * If the expiration time == KTIME_MAX, then we simply stop in tick_nohz_stop_tick()
1061 * the tick timer. in tick_nohz_stop_tick()
1069 hrtimer_start(&ts->sched_timer, expires, in tick_nohz_stop_tick()
1072 hrtimer_set_expires(&ts->sched_timer, expires); in tick_nohz_stop_tick()
1079 ts->timer_expires_base = 0; in tick_nohz_retain_tick()
1098 * Clear the timer idle flag, so we avoid IPIs on remote queueing and in tick_nohz_restart_sched_tick()
1106 /* Cancel the scheduled timer and restore the tick: */ in tick_nohz_restart_sched_tick()
1167 pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n", in report_idle_softirq()
1197 /* Should not happen for nohz-full */ in can_stop_idle_tick()
1206 * tick_nohz_idle_stop_tick - stop the idle tick from the idle task
1208 * When the next event is more than a tick into the future, stop the idle tick
1218 * tick timer expiration time is known already. in tick_nohz_idle_stop_tick()
1220 if (ts->timer_expires_base) in tick_nohz_idle_stop_tick()
1221 expires = ts->timer_expires; in tick_nohz_idle_stop_tick()
1227 ts->idle_calls++; in tick_nohz_idle_stop_tick()
1234 ts->idle_sleeps++; in tick_nohz_idle_stop_tick()
1235 ts->idle_expires = expires; in tick_nohz_idle_stop_tick()
1238 ts->idle_jiffies = ts->last_jiffies; in tick_nohz_idle_stop_tick()
1252 * tick_nohz_idle_enter - prepare for entering idle on the current CPU
1266 WARN_ON_ONCE(ts->timer_expires_base); in tick_nohz_idle_enter()
1275 * tick_nohz_irq_exit - Notify the tick about IRQ exit
1277 * A timer may have been added/modified/deleted either by the current IRQ,
1280 * re-evaluation of the next tick. Depending on the context:
1283 * time accounting. The next tick will be re-evaluated on the next idle
1290 * 2.2) If there is no tick dependency, (re-)evaluate the next tick and
1291 * stop/update it accordingly.
1304 * tick_nohz_idle_got_tick - Check whether or not the tick handler has run
1312 if (ts->got_idle_tick) { in tick_nohz_idle_got_tick()
1313 ts->got_idle_tick = 0; in tick_nohz_idle_got_tick()
1320 * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer
1330 return __this_cpu_read(tick_cpu_device.evtdev)->next_event; in tick_nohz_get_next_hrtimer()
1334 * tick_nohz_get_sleep_length - return the expected length of the current sleep
1354 ktime_t now = ts->idle_entrytime; in tick_nohz_get_sleep_length()
1359 *delta_next = ktime_sub(dev->next_event, now); in tick_nohz_get_sleep_length()
1369 * If the next highres timer to expire is earlier than 'next_event', the in tick_nohz_get_sleep_length()
1373 hrtimer_next_event_without(&ts->sched_timer)); in tick_nohz_get_sleep_length()
1379 * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
1391 return ts->idle_calls; in tick_nohz_get_idle_calls_cpu()
1399 ts->idle_exittime = now; in tick_nohz_account_idle_time()
1408 ticks = jiffies - ts->idle_jiffies; in tick_nohz_account_idle_time()
1438 * tick_nohz_idle_exit - Update the tick upon idle task exit
1448 * then re-evaluate the next tick and try to keep it stopped
1462 WARN_ON_ONCE(ts->timer_expires_base); in tick_nohz_idle_exit()
1481 * In low-resolution mode, the tick handler must be implemented directly
1484 * low-resolution mode (see hrtimer_run_queues()).
1490 dev->next_event = KTIME_MAX; in tick_nohz_lowres_handler()
1492 if (likely(tick_nohz_handler(&ts->sched_timer) == HRTIMER_RESTART)) in tick_nohz_lowres_handler()
1493 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); in tick_nohz_lowres_handler()
1507 * tick_nohz_switch_to_nohz - switch to NOHZ mode
1538 * rare case (typically stop machine). So we must make sure we have a in tick_nohz_irq_enter()
1573 * tick_setup_sched_timer - setup the tick emulation timer
1580 /* Emulate tick processing via per-CPU hrtimers: */ in tick_setup_sched_timer()
1581 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); in tick_setup_sched_timer()
1585 ts->sched_timer.function = tick_nohz_handler; in tick_setup_sched_timer()
1588 /* Get the next period (per-CPU) */ in tick_setup_sched_timer()
1589 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); in tick_setup_sched_timer()
1596 hrtimer_add_expires_ns(&ts->sched_timer, offset); in tick_setup_sched_timer()
1599 hrtimer_forward_now(&ts->sched_timer, TICK_NSEC); in tick_setup_sched_timer()
1601 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); in tick_setup_sched_timer()
1603 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); in tick_setup_sched_timer()
1615 struct clock_event_device *dev = td->evtdev; in tick_sched_timer_dying()
1624 * make sure not to call low-res tick handler. in tick_sched_timer_dying()
1627 dev->event_handler = clockevents_handle_noop; in tick_sched_timer_dying()
1629 idle_sleeptime = ts->idle_sleeptime; in tick_sched_timer_dying()
1630 iowait_sleeptime = ts->iowait_sleeptime; in tick_sched_timer_dying()
1631 idle_calls = ts->idle_calls; in tick_sched_timer_dying()
1632 idle_sleeps = ts->idle_sleeps; in tick_sched_timer_dying()
1634 ts->idle_sleeptime = idle_sleeptime; in tick_sched_timer_dying()
1635 ts->iowait_sleeptime = iowait_sleeptime; in tick_sched_timer_dying()
1636 ts->idle_calls = idle_calls; in tick_sched_timer_dying()
1637 ts->idle_sleeps = idle_sleeps; in tick_sched_timer_dying()
1658 set_bit(0, &ts->check_clocks); in tick_oneshot_notify()
1664 * Called cyclically from the hrtimer softirq (driven by the timer
1665 * softirq). 'allow_nohz' signals that we can switch into low-res NOHZ
1673 if (!test_and_clear_bit(0, &ts->check_clocks)) in tick_check_oneshot_change()