Lines Matching +full:local +full:- +full:timers

1 // SPDX-License-Identifier: GPL-2.0
3 * Kernel internal timers
7 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
14 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
15 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
16 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
33 #include <linux/posix-timers.h>
55 #include "tick-internal.h"
78 * expiry of the timers, this implementation removes the need for recascading
79 * the timers into the lower array levels. The previous 'classic' timer wheel
86 * timeout timers (networking, disk I/O ...) are canceled before expiry. If
90 * The only exception to this are networking timers with a small expiry
94 * We don't have cascading anymore. timers with a expiry time above the
107 * 0 0 1 ms 0 ms - 63 ms
108 * 1 64 8 ms 64 ms - 511 ms
109 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s)
110 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s)
111 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m)
112 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m)
113 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h)
114 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d)
115 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d)
119 * 0 0 3 ms 0 ms - 210 ms
120 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s)
121 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s)
122 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m)
123 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m)
124 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h)
125 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h)
126 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d)
127 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
131 * 0 0 4 ms 0 ms - 255 ms
132 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s)
133 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s)
134 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m)
135 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m)
136 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h)
137 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h)
138 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d)
139 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
143 * 0 0 10 ms 0 ms - 630 ms
144 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s)
145 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s)
146 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m)
147 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m)
148 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h)
149 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d)
150 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
156 #define LVL_CLK_MASK (LVL_CLK_DIV - 1)
165 #define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
170 #define LVL_MASK (LVL_SIZE - 1)
182 #define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
186 * wheels so we have a separate storage for the deferrable timers.
207 * struct timer_base - Per CPU timer base (number of base depends on config)
209 * @running_timer: When expiring timers, the lock is dropped. To make
239 * base. Deferrable timers, which are enqueued remotely
248 * of the timer wheel. The list contains all timers
356 * We don't want all cpus firing their timers at once hitting the in round_jiffies_common()
375 j = j - rem; in round_jiffies_common()
377 j = j - rem + HZ; in round_jiffies_common()
380 j -= cpu * 3; in round_jiffies_common()
390 * __round_jiffies - function to round jiffies to a full second
395 * up or down to (approximately) full seconds. This is useful for timers
399 * By rounding these timers to whole seconds, all such timers will fire
416 * __round_jiffies_relative - function to round jiffies to a full second
421 * up or down to (approximately) full seconds. This is useful for timers
425 * By rounding these timers to whole seconds, all such timers will fire
440 return round_jiffies_common(j + j0, cpu, false) - j0; in __round_jiffies_relative()
445 * round_jiffies - function to round jiffies to a full second
449 * up or down to (approximately) full seconds. This is useful for timers
453 * By rounding these timers to whole seconds, all such timers will fire
466 * round_jiffies_relative - function to round jiffies to a full second
470 * up or down to (approximately) full seconds. This is useful for timers
474 * By rounding these timers to whole seconds, all such timers will fire
487 * __round_jiffies_up - function to round jiffies up to a full second
503 * __round_jiffies_up_relative - function to round jiffies up to a full second
517 return round_jiffies_common(j + j0, cpu, true) - j0; in __round_jiffies_up_relative()
522 * round_jiffies_up - function to round jiffies up to a full second
537 * round_jiffies_up_relative - function to round jiffies up to a full second
554 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT; in timer_get_idx()
559 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) | in timer_set_idx()
574 * - Timer is armed at the edge of a tick in calc_index()
575 * - Truncation of the expiry time in the outer wheel levels in calc_index()
587 unsigned long delta = expires - clk; in calc_wheel_index()
617 idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry); in calc_wheel_index()
626 * Deferrable timers do not prevent the CPU from entering dynticks and in trigger_dyntick_cpu()
630 * the remote IPI for deferrable timers completely. in trigger_dyntick_cpu()
632 if (!is_timers_nohz_active() || timer->flags & TIMER_DEFERRABLE) in trigger_dyntick_cpu()
640 * on the way to idle then it can't set base->is_idle as we hold in trigger_dyntick_cpu()
643 if (base->is_idle) { in trigger_dyntick_cpu()
644 WARN_ON_ONCE(!(timer->flags & TIMER_PINNED || in trigger_dyntick_cpu()
645 tick_nohz_full_cpu(base->cpu))); in trigger_dyntick_cpu()
646 wake_up_nohz_cpu(base->cpu); in trigger_dyntick_cpu()
659 hlist_add_head(&timer->entry, base->vectors + idx); in enqueue_timer()
660 __set_bit(idx, base->pending_map); in enqueue_timer()
668 * (bucket_expiry) instead of timer->expires. in enqueue_timer()
670 if (time_before(bucket_expiry, base->next_expiry)) { in enqueue_timer()
675 WRITE_ONCE(base->next_expiry, bucket_expiry); in enqueue_timer()
676 base->timers_pending = true; in enqueue_timer()
677 base->next_expiry_recalc = false; in enqueue_timer()
687 idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry); in internal_add_timer()
703 .offset = offsetof(container, hintfn) - \
720 if (timer_hints[i].function == timer->function) { in timer_debug_hint()
727 return timer->function; in timer_debug_hint()
734 return (timer->entry.pprev == NULL && in timer_is_static_object()
735 timer->entry.next == TIMER_ENTRY_STATIC); in timer_is_static_object()
740 * - an active object is initialized
756 /* Stub timer callback for improperly used timers. */
764 * - an active object is activated
765 * - an unknown non-static object is activated
786 * - an active object is freed
804 * - an untracked/uninit-ed object is found
899 timer->entry.pprev = NULL; in do_init_timer()
900 timer->function = func; in do_init_timer()
903 timer->flags = flags | raw_smp_processor_id(); in do_init_timer()
904 lockdep_init_map(&timer->lockdep_map, name, key, 0); in do_init_timer()
908 * init_timer_key - initialize a timer
930 struct hlist_node *entry = &timer->entry; in detach_timer()
936 entry->pprev = NULL; in detach_timer()
937 entry->next = LIST_POISON2; in detach_timer()
948 if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) { in detach_if_pending()
949 __clear_bit(idx, base->pending_map); in detach_if_pending()
950 base->next_expiry_recalc = true; in detach_if_pending()
999 * @basej is past base->clk otherwise we might rewind base->clk. in __forward_timer_base()
1001 if (time_before_eq(basej, base->clk)) in __forward_timer_base()
1008 if (time_after(base->next_expiry, basej)) { in __forward_timer_base()
1009 base->clk = basej; in __forward_timer_base()
1011 if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk))) in __forward_timer_base()
1013 base->clk = base->next_expiry; in __forward_timer_base()
1025 * that all timers which are tied to this base are locked, and the base itself
1028 * So __run_timers/migrate_timers can safely modify all timers which could
1029 * be found in the base->vectors array.
1036 __acquires(timer->base->lock) in lock_timer_base()
1044 * might re-read @tf between the check for TIMER_MIGRATING in lock_timer_base()
1047 tf = READ_ONCE(timer->flags); in lock_timer_base()
1051 raw_spin_lock_irqsave(&base->lock, *flags); in lock_timer_base()
1052 if (timer->flags == tf) in lock_timer_base()
1054 raw_spin_unlock_irqrestore(&base->lock, *flags); in lock_timer_base()
1075 * This is a common optimization triggered by the networking code - if in __mod_timer()
1076 * the timer is re-modified to have the same timeout or ends up in the in __mod_timer()
1085 long diff = timer->expires - expires; in __mod_timer()
1104 if (!timer->function) in __mod_timer()
1110 time_before_eq(timer->expires, expires)) { in __mod_timer()
1115 clk = base->clk; in __mod_timer()
1125 timer->expires = expires; in __mod_timer()
1126 else if (time_after(timer->expires, expires)) in __mod_timer()
1127 timer->expires = expires; in __mod_timer()
1138 if (!timer->function) in __mod_timer()
1148 new_base = get_timer_this_cpu_base(timer->flags); in __mod_timer()
1158 if (likely(base->running_timer != timer)) { in __mod_timer()
1160 timer->flags |= TIMER_MIGRATING; in __mod_timer()
1162 raw_spin_unlock(&base->lock); in __mod_timer()
1164 raw_spin_lock(&base->lock); in __mod_timer()
1165 WRITE_ONCE(timer->flags, in __mod_timer()
1166 (timer->flags & ~TIMER_BASEMASK) | base->cpu); in __mod_timer()
1173 timer->expires = expires; in __mod_timer()
1180 if (idx != UINT_MAX && clk == base->clk) in __mod_timer()
1186 raw_spin_unlock_irqrestore(&base->lock, flags); in __mod_timer()
1192 * mod_timer_pending - Modify a pending timer's timeout
1196 * mod_timer_pending() is the same for pending timers as mod_timer(), but
1197 * will not activate inactive timers.
1199 * If @timer->function == NULL then the start operation is silently
1203 * * %0 - The timer was inactive and not modified or was in
1205 * * %1 - The timer was active and requeued to expire at @expires
1214 * mod_timer - Modify a timer's timeout
1220 * del_timer(timer); timer->expires = expires; add_timer(timer);
1230 * If @timer->function == NULL then the start operation is silently
1234 * * %0 - The timer was inactive and started or was in shutdown
1236 * * %1 - The timer was active and requeued to expire at @expires or
1247 * timer_reduce - Modify a timer's timeout if it would reduce the timeout
1255 * If @timer->function == NULL then the start operation is silently
1259 * * %0 - The timer was inactive and started or was in shutdown
1261 * * %1 - The timer was active and requeued to expire at @expires or
1273 * add_timer - Start a timer
1276 * Start @timer to expire at @timer->expires in the future. @timer->expires
1278 * timer->function(timer) will be invoked from soft interrupt context.
1280 * The @timer->expires and @timer->function fields must be set prior
1283 * If @timer->function == NULL then the start operation is silently
1286 * If @timer->expires is already in the past @timer will be queued to
1296 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); in add_timer()
1301 * add_timer_local() - Start a timer on the local CPU
1312 timer->flags |= TIMER_PINNED; in add_timer_local()
1313 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); in add_timer_local()
1318 * add_timer_global() - Start a timer without TIMER_PINNED flag set
1329 timer->flags &= ~TIMER_PINNED; in add_timer_global()
1330 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); in add_timer_global()
1335 * add_timer_on - Start a timer on a particular CPU
1357 timer->flags |= TIMER_PINNED; in add_timer_on()
1359 new_base = get_timer_cpu_base(timer->flags, cpu); in add_timer_on()
1371 if (!timer->function) in add_timer_on()
1375 timer->flags |= TIMER_MIGRATING; in add_timer_on()
1377 raw_spin_unlock(&base->lock); in add_timer_on()
1379 raw_spin_lock(&base->lock); in add_timer_on()
1380 WRITE_ONCE(timer->flags, in add_timer_on()
1381 (timer->flags & ~TIMER_BASEMASK) | cpu); in add_timer_on()
1388 raw_spin_unlock_irqrestore(&base->lock, flags); in add_timer_on()
1393 * __timer_delete - Internal function: Deactivate a timer
1398 * If @shutdown is true then @timer->function is set to NULL under the
1404 * * %0 - The timer was not pending
1405 * * %1 - The timer was pending and deactivated
1421 * timer->function == NULL in the expiry code. in __timer_delete()
1423 * If timer->function is currently executed, then this makes sure in __timer_delete()
1430 timer->function = NULL; in __timer_delete()
1431 raw_spin_unlock_irqrestore(&base->lock, flags); in __timer_delete()
1438 * timer_delete - Deactivate a timer
1448 * * %0 - The timer was not pending
1449 * * %1 - The timer was pending and deactivated
1458 * timer_shutdown - Deactivate a timer and prevent rearming
1469 * * %0 - The timer was not pending
1470 * * %1 - The timer was pending
1479 * __try_to_del_timer_sync - Internal function: Try to deactivate a timer
1484 * If @shutdown is true then @timer->function is set to NULL under the
1494 * * %0 - The timer was not pending
1495 * * %1 - The timer was pending and deactivated
1496 * * %-1 - The timer callback function is running on a different CPU
1502 int ret = -1; in __try_to_del_timer_sync()
1508 if (base->running_timer != timer) in __try_to_del_timer_sync()
1511 timer->function = NULL; in __try_to_del_timer_sync()
1513 raw_spin_unlock_irqrestore(&base->lock, flags); in __try_to_del_timer_sync()
1519 * try_to_del_timer_sync - Try to deactivate a timer
1530 * * %0 - The timer was not pending
1531 * * %1 - The timer was pending and deactivated
1532 * * %-1 - The timer callback function is running on a different CPU
1543 spin_lock_init(&base->expiry_lock); in timer_base_init_expiry_lock()
1548 spin_lock(&base->expiry_lock); in timer_base_lock_expiry()
1553 spin_unlock(&base->expiry_lock); in timer_base_unlock_expiry()
1559 * If there is a waiter for base->expiry_lock, then it was waiting for the
1564 __releases(&base->lock) __releases(&base->expiry_lock) in timer_sync_wait_running()
1565 __acquires(&base->expiry_lock) __acquires(&base->lock) in timer_sync_wait_running()
1567 if (atomic_read(&base->timer_waiters)) { in timer_sync_wait_running()
1568 raw_spin_unlock_irq(&base->lock); in timer_sync_wait_running()
1569 spin_unlock(&base->expiry_lock); in timer_sync_wait_running()
1570 spin_lock(&base->expiry_lock); in timer_sync_wait_running()
1571 raw_spin_lock_irq(&base->lock); in timer_sync_wait_running()
1589 tf = READ_ONCE(timer->flags); in del_timer_wait_running()
1601 atomic_inc(&base->timer_waiters); in del_timer_wait_running()
1602 spin_lock_bh(&base->expiry_lock); in del_timer_wait_running()
1603 atomic_dec(&base->timer_waiters); in del_timer_wait_running()
1604 spin_unlock_bh(&base->expiry_lock); in del_timer_wait_running()
1616 * __timer_delete_sync - Internal function: Deactivate a timer and wait
1619 * @shutdown: If true, @timer->function will be set to NULL under the
1626 * If @shutdown is set then @timer->function is set to NULL under timer
1634 * * %0 - The timer was not pending
1635 * * %1 - The timer was pending and deactivated
1649 lock_map_acquire(&timer->lockdep_map); in __timer_delete_sync()
1650 lock_map_release(&timer->lockdep_map); in __timer_delete_sync()
1657 WARN_ON(in_hardirq() && !(timer->flags & TIMER_IRQSAFE)); in __timer_delete_sync()
1663 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE)) in __timer_delete_sync()
1679 * timer_delete_sync - Deactivate a timer and wait for the handler to finish.
1689 * For !irqsafe timers, the caller must not hold locks that are held in
1694 * ---- ----
1697 * base->running_timer = mytimer;
1702 * while (base->running_timer == mytimer);
1717 * * %0 - The timer was not pending
1718 * * %1 - The timer was pending and deactivated
1727 * timer_shutdown_sync - Shutdown a timer and prevent rearming
1731 * - @timer is not queued
1732 * - The callback function of @timer is not running
1733 * - @timer cannot be enqueued again. Any attempt to rearm
1744 * code has conditionals like 'if (mything->in_shutdown)' to prevent that
1750 * timer_shutdown_sync(&mything->timer);
1751 * workqueue_destroy(&mything->workqueue);
1759 * * %0 - The timer was not pending
1760 * * %1 - The timer was pending
1780 * timer->lockdep_map, make a copy and use that here. in call_timer_fn()
1784 lockdep_copy_map(&lockdep_map, &timer->lockdep_map); in call_timer_fn()
1800 WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n", in call_timer_fn()
1815 * This value is required only for tracing. base->clk was in expire_timers()
1817 * is related to the old base->clk value. in expire_timers()
1819 unsigned long baseclk = base->clk - 1; in expire_timers()
1825 timer = hlist_entry(head->first, struct timer_list, entry); in expire_timers()
1827 base->running_timer = timer; in expire_timers()
1830 fn = timer->function; in expire_timers()
1834 base->running_timer = NULL; in expire_timers()
1838 if (timer->flags & TIMER_IRQSAFE) { in expire_timers()
1839 raw_spin_unlock(&base->lock); in expire_timers()
1841 raw_spin_lock(&base->lock); in expire_timers()
1842 base->running_timer = NULL; in expire_timers()
1844 raw_spin_unlock_irq(&base->lock); in expire_timers()
1846 raw_spin_lock_irq(&base->lock); in expire_timers()
1847 base->running_timer = NULL; in expire_timers()
1856 unsigned long clk = base->clk = base->next_expiry; in collect_expired_timers()
1864 if (__test_and_clear_bit(idx, base->pending_map)) { in collect_expired_timers()
1865 vec = base->vectors + idx; in collect_expired_timers()
1889 pos = find_next_bit(base->pending_map, end, start); in next_pending_bucket()
1891 return pos - start; in next_pending_bucket()
1893 pos = find_next_bit(base->pending_map, start, offset); in next_pending_bucket()
1894 return pos < start ? pos + LVL_SIZE - start : -1; in next_pending_bucket()
1899 * hold base->lock.
1901 * Store next expiry time in base->next_expiry.
1908 next = base->clk + NEXT_TIMER_MAX_DELTA; in timer_recalc_next_expiry()
1909 clk = base->clk; in timer_recalc_next_expiry()
1925 if (pos <= ((LVL_CLK_DIV - lvl_clk) & LVL_CLK_MASK)) in timer_recalc_next_expiry()
1932 * next expiring bucket in that level. base->clk is the next in timer_recalc_next_expiry()
1969 WRITE_ONCE(base->next_expiry, next); in timer_recalc_next_expiry()
1970 base->next_expiry_recalc = false; in timer_recalc_next_expiry()
1971 base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA); in timer_recalc_next_expiry()
1984 * If high resolution timers are enabled in cmp_next_hrtimer_event()
1998 * Round up to the next jiffy. High resolution timers are in cmp_next_hrtimer_event()
2011 if (base->next_expiry_recalc) in next_timer_interrupt()
2022 if (!base->timers_pending) in next_timer_interrupt()
2023 WRITE_ONCE(base->next_expiry, basej + NEXT_TIMER_MAX_DELTA); in next_timer_interrupt()
2025 return base->next_expiry; in next_timer_interrupt()
2045 * it in the local expiry value. The next global event is irrelevant in in fetch_next_timer_interrupt()
2052 tevt->local = basem + (u64)(nextevt - basej) * TICK_NSEC; in fetch_next_timer_interrupt()
2058 * * The remote callers will only take care of the global timers in fetch_next_timer_interrupt()
2059 * as local timers will be handled by CPU itself. When not in fetch_next_timer_interrupt()
2060 * updating tevt->global with the already missed first global in fetch_next_timer_interrupt()
2063 * * The local callers will ignore the tevt->global anyway, when in fetch_next_timer_interrupt()
2067 tevt->global = tevt->local; in fetch_next_timer_interrupt()
2074 * If the local queue expires first, then the global event can be in fetch_next_timer_interrupt()
2077 if (!local_first && base_global->timers_pending) in fetch_next_timer_interrupt()
2078 tevt->global = basem + (u64)(nextevt_global - basej) * TICK_NSEC; in fetch_next_timer_interrupt()
2080 if (base_local->timers_pending) in fetch_next_timer_interrupt()
2081 tevt->local = basem + (u64)(nextevt_local - basej) * TICK_NSEC; in fetch_next_timer_interrupt()
2088 * fetch_next_timer_interrupt_remote() - Store next timers into @tevt
2094 * Stores the next pending local and global timer expiry values in the
2096 * field is set to KTIME_MAX. If local event expires before global
2108 /* Preset local / global events */ in fetch_next_timer_interrupt_remote()
2109 tevt->local = tevt->global = KTIME_MAX; in fetch_next_timer_interrupt_remote()
2114 lockdep_assert_held(&base_local->lock); in fetch_next_timer_interrupt_remote()
2115 lockdep_assert_held(&base_global->lock); in fetch_next_timer_interrupt_remote()
2121 * timer_unlock_remote_bases - unlock timer bases of cpu
2127 __releases(timer_bases[BASE_LOCAL]->lock) in timer_unlock_remote_bases()
2128 __releases(timer_bases[BASE_GLOBAL]->lock) in timer_unlock_remote_bases()
2135 raw_spin_unlock(&base_global->lock); in timer_unlock_remote_bases()
2136 raw_spin_unlock(&base_local->lock); in timer_unlock_remote_bases()
2140 * timer_lock_remote_bases - lock timer bases of cpu
2146 __acquires(timer_bases[BASE_LOCAL]->lock) in timer_lock_remote_bases()
2147 __acquires(timer_bases[BASE_GLOBAL]->lock) in timer_lock_remote_bases()
2156 raw_spin_lock(&base_local->lock); in timer_lock_remote_bases()
2157 raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING); in timer_lock_remote_bases()
2161 * timer_base_is_idle() - Return whether timer base is set idle
2163 * Returns value of local timer base is_idle value.
2173 * timer_expire_remote() - expire global timers of cpu
2176 * Expire timers of global base of remote CPU.
2192 next_tmigr = tmigr_cpu_new_timer(tevt->global); in timer_use_tmigr()
2194 next_tmigr = tmigr_cpu_deactivate(tevt->global); in timer_use_tmigr()
2196 next_tmigr = tmigr_quick_check(tevt->global); in timer_use_tmigr()
2200 * sure the CPU will wake up in time to handle remote timers. in timer_use_tmigr()
2203 if (next_tmigr < tevt->local) { in timer_use_tmigr()
2210 tmp = div_u64(next_tmigr - basem, TICK_NSEC); in timer_use_tmigr()
2213 tevt->local = next_tmigr; in timer_use_tmigr()
2222 * Make sure first event is written into tevt->local to not miss a in timer_use_tmigr()
2225 tevt->local = min_t(u64, tevt->local, tevt->global); in timer_use_tmigr()
2232 struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX }; in __get_next_timer_interrupt()
2244 return tevt.local; in __get_next_timer_interrupt()
2250 raw_spin_lock(&base_local->lock); in __get_next_timer_interrupt()
2251 raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING); in __get_next_timer_interrupt()
2270 base_local->is_idle, &tevt); in __get_next_timer_interrupt()
2280 * Set base->is_idle only when caller is timer_base_try_to_set_idle() in __get_next_timer_interrupt()
2292 * BASE_GLOBAL base, deferrable timers may still see large in __get_next_timer_interrupt()
2295 if (!base_local->is_idle && time_after(nextevt, basej + 1)) { in __get_next_timer_interrupt()
2296 base_local->is_idle = true; in __get_next_timer_interrupt()
2298 * Global timers queued locally while running in a task in __get_next_timer_interrupt()
2299 * in nohz_full mode need a self-IPI to kick reprogramming in __get_next_timer_interrupt()
2302 if (tick_nohz_full_cpu(base_local->cpu)) in __get_next_timer_interrupt()
2303 base_global->is_idle = true; in __get_next_timer_interrupt()
2304 trace_timer_base_idle(true, base_local->cpu); in __get_next_timer_interrupt()
2306 *idle = base_local->is_idle; in __get_next_timer_interrupt()
2310 * tmigr_cpu_deactivate() to prevent inconsistent states - active in __get_next_timer_interrupt()
2316 if (!base_local->is_idle && idle_is_possible) in __get_next_timer_interrupt()
2320 raw_spin_unlock(&base_global->lock); in __get_next_timer_interrupt()
2321 raw_spin_unlock(&base_local->lock); in __get_next_timer_interrupt()
2323 return cmp_next_hrtimer_event(basem, tevt.local); in __get_next_timer_interrupt()
2327 * get_next_timer_interrupt() - return the time (clock mono) of the next timer
2343 * timer_base_try_to_set_idle() - Try to set the idle state of the timer bases
2346 * @idle: pointer to store the value of timer_base->is_idle on return;
2362 * timer_clear_idle - Clear the idle state of the timer base
2380 /* Activate without holding the timer_base->lock */ in timer_clear_idle()
2386 * __run_timers - run all expired timers (if any) on this CPU.
2394 lockdep_assert_held(&base->lock); in __run_timers()
2396 if (base->running_timer) in __run_timers()
2399 while (time_after_eq(jiffies, base->clk) && in __run_timers()
2400 time_after_eq(jiffies, base->next_expiry)) { in __run_timers()
2404 * timer at this clk are that all matching timers have been in __run_timers()
2409 WARN_ON_ONCE(!levels && !base->next_expiry_recalc in __run_timers()
2410 && base->timers_pending); in __run_timers()
2412 * While executing timers, base->clk is set 1 offset ahead of in __run_timers()
2415 base->clk++; in __run_timers()
2418 while (levels--) in __run_timers()
2425 if (time_before(jiffies, base->next_expiry)) in __run_timer_base()
2429 raw_spin_lock_irq(&base->lock); in __run_timer_base()
2431 raw_spin_unlock_irq(&base->lock); in __run_timer_base()
2443 * This function runs timers and the timer-tq in bottom half context.
2458 * Called by the local, per-CPU timer interrupt on SMP.
2472 * the lockless local read, sanity checker could complain about in run_local_timers()
2478 * 1. Remote CPU expires global timers of this CPU and updates in run_local_timers()
2489 * is missed, then the timer would expire one jiffy late - in run_local_timers()
2497 * Possible remote writers are using WRITE_ONCE(). Local reader in run_local_timers()
2500 if (time_after_eq(jiffies, READ_ONCE(base->next_expiry)) || in run_local_timers()
2542 wake_up_process(timeout->task); in process_timeout()
2546 * schedule_timeout - sleep until timeout
2553 * %TASK_RUNNING - the scheduler is called, but the task does not sleep
2557 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
2561 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
2574 * to be non-negative.
2621 timeout = expire - jiffies; in schedule_timeout()
2668 int cpu = new_base->cpu; in migrate_timer_list()
2671 timer = hlist_entry(head->first, struct timer_list, entry); in migrate_timer_list()
2673 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu; in migrate_timer_list()
2685 base->clk = jiffies; in timers_prepare_cpu()
2686 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; in timers_prepare_cpu()
2687 base->next_expiry_recalc = false; in timers_prepare_cpu()
2688 base->timers_pending = false; in timers_prepare_cpu()
2689 base->is_idle = false; in timers_prepare_cpu()
2707 raw_spin_lock_irq(&new_base->lock); in timers_dead_cpu()
2708 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); in timers_dead_cpu()
2712 * before moving the timers over. in timers_dead_cpu()
2716 WARN_ON_ONCE(old_base->running_timer); in timers_dead_cpu()
2717 old_base->running_timer = NULL; in timers_dead_cpu()
2720 migrate_timer_list(new_base, old_base->vectors + i); in timers_dead_cpu()
2722 raw_spin_unlock(&old_base->lock); in timers_dead_cpu()
2723 raw_spin_unlock_irq(&new_base->lock); in timers_dead_cpu()
2738 base->cpu = cpu; in init_timer_cpu()
2739 raw_spin_lock_init(&base->lock); in init_timer_cpu()
2740 base->clk = jiffies; in init_timer_cpu()
2741 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; in init_timer_cpu()
2762 * msleep - sleep safely even with waitqueue interruptions
2776 * msleep_interruptible - sleep waiting for signals
2791 * usleep_range_state - Sleep for an approximate time in a given state
2796 * In non-atomic context where the exact wakeup time is flexible, use
2798 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
2799 * power usage by allowing hrtimers to take advantage of an already-
2806 u64 delta = (u64)(max - min) * NSEC_PER_USEC; in usleep_range_state()