Lines Matching +full:de +full:- +full:serialized

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
4 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
5 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
27 #include <linux/posix-timers.h>
33 #include "tick-internal.h"
38 * Per-CPU nohz control structure
63 * 64-bit can do a quick check without holding the jiffies lock and in tick_do_update_jiffies64()
67 * 32-bit cannot do that because the store of 'tick_next_period' in tick_do_update_jiffies64()
68 * consists of two 32-bit stores, and the first store could be in tick_do_update_jiffies64()
93 * Re-evaluate with the lock held. Another CPU might have done the in tick_do_update_jiffies64()
133 * A plain store is good enough on 32-bit, as the quick check in tick_do_update_jiffies64()
172 tick_next_period += TICK_NSEC - rem; in tick_init_jiffy_update()
187 return !!(ts->flags & flag); in tick_sched_flag_test()
194 ts->flags |= flag; in tick_sched_flag_set()
201 ts->flags &= ~flag; in tick_sched_flag_clear()
214 * this duty, then the jiffies update is still serialized by in tick_sched_do_timer()
238 if (ts->last_tick_jiffies != jiffies) { in tick_sched_do_timer()
239 ts->stalled_jiffies = 0; in tick_sched_do_timer()
240 ts->last_tick_jiffies = READ_ONCE(jiffies); in tick_sched_do_timer()
242 if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) { in tick_sched_do_timer()
244 ts->stalled_jiffies = 0; in tick_sched_do_timer()
245 ts->last_tick_jiffies = READ_ONCE(jiffies); in tick_sched_do_timer()
250 ts->got_idle_tick = 1; in tick_sched_do_timer()
267 ts->idle_jiffies++; in tick_sched_handle()
273 ts->next_tick = 0; in tick_sched_handle()
299 ts->next_tick = 0; in tick_nohz_handler()
303 * - to the idle task if in dynticks-idle in tick_nohz_handler()
304 * - to IRQ exit if in full-dynticks. in tick_nohz_handler()
368 if (check_tick_dependency(&ts->tick_dep_mask)) in can_stop_full_tick()
371 if (check_tick_dependency(&current->tick_dep_mask)) in can_stop_full_tick()
374 if (check_tick_dependency(&current->signal->tick_dep_mask)) in can_stop_full_tick()
390 * re-evaluate its dependency on the tick and restart it if necessary.
404 * re-evaluate its dependency on the tick and restart it if necessary.
422 * activate_task() STORE p->tick_dep_mask in tick_nohz_kick_task()
423 * STORE p->on_rq in tick_nohz_kick_task()
425 * LOCK rq->lock LOAD p->on_rq in tick_nohz_kick_task()
428 * LOAD p->tick_dep_mask in tick_nohz_kick_task()
445 * STORE p->cpu = @cpu in tick_nohz_kick_task()
447 * LOCK rq->lock in tick_nohz_kick_task()
448 * smp_mb__after_spin_lock() STORE p->tick_dep_mask in tick_nohz_kick_task()
450 * LOAD p->tick_dep_mask LOAD p->cpu in tick_nohz_kick_task()
461 * Kick all full dynticks CPUs in order to force these to re-evaluate
502 * Set per-CPU tick dependency. Used by scheduler and perf events in order to
503 * manage event-throttling.
512 prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); in tick_nohz_dep_set_cpu()
519 /* Remote IRQ work not NMI-safe */ in tick_nohz_dep_set_cpu()
532 atomic_andnot(BIT(bit), &ts->tick_dep_mask); in tick_nohz_dep_clear_cpu()
537 * Set a per-task tick dependency. RCU needs this. Also posix CPU timers
542 if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) in tick_nohz_dep_set_task()
549 atomic_andnot(BIT(bit), &tsk->tick_dep_mask); in tick_nohz_dep_clear_task()
554 * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
561 struct signal_struct *sig = tsk->signal; in tick_nohz_dep_set_signal()
563 prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask); in tick_nohz_dep_set_signal()
567 lockdep_assert_held(&tsk->sighand->siglock); in tick_nohz_dep_set_signal()
575 atomic_andnot(BIT(bit), &sig->tick_dep_mask); in tick_nohz_dep_clear_signal()
579 * Re-evaluate the need for the tick as we switch the current task.
593 if (atomic_read(&current->tick_dep_mask) || in __tick_nohz_task_switch()
594 atomic_read(&current->signal->tick_dep_mask)) in __tick_nohz_task_switch()
599 /* Get the boot-time nohz CPU list from the kernel parameters. */
621 return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY; in tick_nohz_cpu_down()
637 pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support IRQ work self-IPIs\n"); in tick_nohz_init()
667 * NOHZ - aka dynamic tick functionality
700 * tick_nohz_update_jiffies - update jiffies when idle was interrupted
730 delta = ktime_sub(now, ts->idle_entrytime); in tick_nohz_stop_idle()
732 write_seqcount_begin(&ts->idle_sleeptime_seq); in tick_nohz_stop_idle()
734 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); in tick_nohz_stop_idle()
736 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); in tick_nohz_stop_idle()
738 ts->idle_entrytime = now; in tick_nohz_stop_idle()
740 write_seqcount_end(&ts->idle_sleeptime_seq); in tick_nohz_stop_idle()
747 write_seqcount_begin(&ts->idle_sleeptime_seq); in tick_nohz_start_idle()
748 ts->idle_entrytime = ktime_get(); in tick_nohz_start_idle()
750 write_seqcount_end(&ts->idle_sleeptime_seq); in tick_nohz_start_idle()
762 return -1; in get_cpu_sleep_time_us()
769 seq = read_seqcount_begin(&ts->idle_sleeptime_seq); in get_cpu_sleep_time_us()
772 ktime_t delta = ktime_sub(now, ts->idle_entrytime); in get_cpu_sleep_time_us()
778 } while (read_seqcount_retry(&ts->idle_sleeptime_seq, seq)); in get_cpu_sleep_time_us()
785 * get_cpu_idle_time_us - get the total idle time of a CPU
799 * Return: -1 if NOHZ is not enabled, else total idle time of the @cpu
805 return get_cpu_sleep_time_us(ts, &ts->idle_sleeptime, in get_cpu_idle_time_us()
811 * get_cpu_iowait_time_us - get the total iowait time of a CPU
825 * Return: -1 if NOHZ is not enabled, else total iowait time of @cpu
831 return get_cpu_sleep_time_us(ts, &ts->iowait_sleeptime, in get_cpu_iowait_time_us()
838 hrtimer_cancel(&ts->sched_timer); in tick_nohz_restart()
839 hrtimer_set_expires(&ts->sched_timer, ts->last_tick); in tick_nohz_restart()
842 hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); in tick_nohz_restart()
845 hrtimer_start_expires(&ts->sched_timer, in tick_nohz_restart()
848 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); in tick_nohz_restart()
855 ts->next_tick = 0; in tick_nohz_restart()
882 * tick_nohz_next_event() - return the clock monotonic based next event
887 * *%0 - When the next event is a maximum of TICK_NSEC in the future
889 * *%next_event - Next event based on clock monotonic
898 ts->last_jiffies = basejiff; in tick_nohz_next_event()
899 ts->timer_expires_base = basemono; in tick_nohz_next_event()
923 ts->next_timer = next_tick; in tick_nohz_next_event()
934 delta = next_tick - basemono; in tick_nohz_next_event()
941 ts->timer_expires = 0; in tick_nohz_next_event()
958 if (delta < (KTIME_MAX - basemono)) in tick_nohz_next_event()
963 ts->timer_expires = min_t(u64, expires, next_tick); in tick_nohz_next_event()
966 return ts->timer_expires; in tick_nohz_next_event()
972 unsigned long basejiff = ts->last_jiffies; in tick_nohz_stop_tick()
973 u64 basemono = ts->timer_expires_base; in tick_nohz_stop_tick()
979 ts->timer_expires_base = 0; in tick_nohz_stop_tick()
982 * Now the tick should be stopped definitely - so the timer base needs in tick_nohz_stop_tick()
986 if (expires > ts->timer_expires) { in tick_nohz_stop_tick()
994 * not stop the tick for too long with a shallow C-state (which in tick_nohz_stop_tick()
998 expires = ts->timer_expires; in tick_nohz_stop_tick()
1022 if (tick_sched_flag_test(ts, TS_FLAG_STOPPED) && (expires == ts->next_tick)) { in tick_nohz_stop_tick()
1024 if (expires == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) in tick_nohz_stop_tick()
1027 WARN_ONCE(1, "basemono: %llu ts->next_tick: %llu dev->next_event: %llu " in tick_nohz_stop_tick()
1028 "timer->active: %d timer->expires: %llu\n", basemono, ts->next_tick, in tick_nohz_stop_tick()
1029 dev->next_event, hrtimer_active(&ts->sched_timer), in tick_nohz_stop_tick()
1030 hrtimer_get_expires(&ts->sched_timer)); in tick_nohz_stop_tick()
1044 ts->last_tick = hrtimer_get_expires(&ts->sched_timer); in tick_nohz_stop_tick()
1049 ts->next_tick = expires; in tick_nohz_stop_tick()
1057 hrtimer_cancel(&ts->sched_timer); in tick_nohz_stop_tick()
1064 hrtimer_start(&ts->sched_timer, expires, in tick_nohz_stop_tick()
1067 hrtimer_set_expires(&ts->sched_timer, expires); in tick_nohz_stop_tick()
1074 ts->timer_expires_base = 0; in tick_nohz_retain_tick()
1162 pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n", in report_idle_softirq()
1192 /* Should not happen for nohz-full */ in can_stop_idle_tick()
1201 * tick_nohz_idle_stop_tick - stop the idle tick from the idle task
1215 if (ts->timer_expires_base) in tick_nohz_idle_stop_tick()
1216 expires = ts->timer_expires; in tick_nohz_idle_stop_tick()
1222 ts->idle_calls++; in tick_nohz_idle_stop_tick()
1229 ts->idle_sleeps++; in tick_nohz_idle_stop_tick()
1230 ts->idle_expires = expires; in tick_nohz_idle_stop_tick()
1233 ts->idle_jiffies = ts->last_jiffies; in tick_nohz_idle_stop_tick()
1247 * tick_nohz_idle_enter - prepare for entering idle on the current CPU
1261 WARN_ON_ONCE(ts->timer_expires_base); in tick_nohz_idle_enter()
1270 * tick_nohz_irq_exit - Notify the tick about IRQ exit
1275 * re-evaluation of the next tick. Depending on the context:
1278 * time accounting. The next tick will be re-evaluated on the next idle
1285 * 2.2) If there is no tick dependency, (re-)evaluate the next tick and
1299 * tick_nohz_idle_got_tick - Check whether or not the tick handler has run
1307 if (ts->got_idle_tick) { in tick_nohz_idle_got_tick()
1308 ts->got_idle_tick = 0; in tick_nohz_idle_got_tick()
1315 * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer
1325 return __this_cpu_read(tick_cpu_device.evtdev)->next_event; in tick_nohz_get_next_hrtimer()
1329 * tick_nohz_get_sleep_length - return the expected length of the current sleep
1349 ktime_t now = ts->idle_entrytime; in tick_nohz_get_sleep_length()
1354 *delta_next = ktime_sub(dev->next_event, now); in tick_nohz_get_sleep_length()
1368 hrtimer_next_event_without(&ts->sched_timer)); in tick_nohz_get_sleep_length()
1374 * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
1386 return ts->idle_calls; in tick_nohz_get_idle_calls_cpu()
1394 ts->idle_exittime = now; in tick_nohz_account_idle_time()
1403 ticks = jiffies - ts->idle_jiffies; in tick_nohz_account_idle_time()
1433 * tick_nohz_idle_exit - Update the tick upon idle task exit
1443 * then re-evaluate the next tick and try to keep it stopped
1457 WARN_ON_ONCE(ts->timer_expires_base); in tick_nohz_idle_exit()
1476 * In low-resolution mode, the tick handler must be implemented directly
1479 * low-resolution mode (see hrtimer_run_queues()).
1485 dev->next_event = KTIME_MAX; in tick_nohz_lowres_handler()
1487 if (likely(tick_nohz_handler(&ts->sched_timer) == HRTIMER_RESTART)) in tick_nohz_lowres_handler()
1488 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); in tick_nohz_lowres_handler()
1502 * tick_nohz_switch_to_nohz - switch to NOHZ mode
1568 * tick_setup_sched_timer - setup the tick emulation timer
1575 /* Emulate tick processing via per-CPU hrtimers: */ in tick_setup_sched_timer()
1576 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); in tick_setup_sched_timer()
1580 ts->sched_timer.function = tick_nohz_handler; in tick_setup_sched_timer()
1583 /* Get the next period (per-CPU) */ in tick_setup_sched_timer()
1584 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); in tick_setup_sched_timer()
1591 hrtimer_add_expires_ns(&ts->sched_timer, offset); in tick_setup_sched_timer()
1594 hrtimer_forward_now(&ts->sched_timer, TICK_NSEC); in tick_setup_sched_timer()
1596 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); in tick_setup_sched_timer()
1598 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); in tick_setup_sched_timer()
1614 hrtimer_cancel(&ts->sched_timer); in tick_sched_timer_dying()
1616 idle_sleeptime = ts->idle_sleeptime; in tick_sched_timer_dying()
1617 iowait_sleeptime = ts->iowait_sleeptime; in tick_sched_timer_dying()
1618 idle_calls = ts->idle_calls; in tick_sched_timer_dying()
1619 idle_sleeps = ts->idle_sleeps; in tick_sched_timer_dying()
1621 ts->idle_sleeptime = idle_sleeptime; in tick_sched_timer_dying()
1622 ts->iowait_sleeptime = iowait_sleeptime; in tick_sched_timer_dying()
1623 ts->idle_calls = idle_calls; in tick_sched_timer_dying()
1624 ts->idle_sleeps = idle_sleeps; in tick_sched_timer_dying()
1645 set_bit(0, &ts->check_clocks); in tick_oneshot_notify()
1652 * softirq). 'allow_nohz' signals that we can switch into low-res NOHZ
1660 if (!test_and_clear_bit(0, &ts->check_clocks)) in tick_check_oneshot_change()