Lines Matching +full:timer +full:- +full:cannot +full:- +full:wake +full:- +full:cpu

1 // SPDX-License-Identifier: GPL-2.0
7 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
14 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
15 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
16 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
33 #include <linux/posix-timers.h>
34 #include <linux/cpu.h>
54 #include "tick-internal.h"
58 #include <trace/events/timer.h>
65 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
72 * The array level of a newly armed timer depends on the relative expiry
76 * Contrary to the original timer wheel implementation, which aims for 'exact'
78 * the timers into the lower array levels. The previous 'classic' timer wheel
83 * This is an optimization of the original timer wheel implementation for the
84 * majority of the timer wheel use cases: timeouts. The vast majority of
106 * 0 0 1 ms 0 ms - 63 ms
107 * 1 64 8 ms 64 ms - 511 ms
108 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s)
109 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s)
110 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m)
111 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m)
112 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h)
113 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d)
114 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d)
118 * 0 0 3 ms 0 ms - 210 ms
119 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s)
120 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s)
121 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m)
122 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m)
123 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h)
124 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h)
125 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d)
126 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
130 * 0 0 4 ms 0 ms - 255 ms
131 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s)
132 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s)
133 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m)
134 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m)
135 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h)
136 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h)
137 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d)
138 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
142 * 0 0 10 ms 0 ms - 630 ms
143 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s)
144 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s)
145 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m)
146 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m)
147 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h)
148 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d)
149 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
155 #define LVL_CLK_MASK (LVL_CLK_DIV - 1)
164 #define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
169 #define LVL_MASK (LVL_SIZE - 1)
181 #define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
206 * struct timer_base - Per CPU timer base (number of base depends on config)
210 * currently running timer, the pointer is set to the
211 * timer, which expires at the moment. If no timer is
214 * timer expiry callback execution and when trying to
215 * delete a running timer and it wasn't successful in
217 * when callback was preempted on a remote CPU and a
218 * caller tries to delete the running timer. It also
220 * delete a timer preempted the softirq thread which
221 * is running the timer callback function.
223 * waiting for the end of the timer callback function
225 * @clk: clock of the timer base; is updated before enqueue
226 * of a timer; during expiry, it is 1 offset ahead of
229 * @next_expiry: expiry value of the first timer; it is updated when
230 * finding the next timer and during enqueue; the
232 * @cpu: Number of CPU the timer base belongs to
234 * required. Value is set true, when a timer was
239 * never wake up an idle CPU. So no matter of supporting it
241 * @timers_pending: Is set, when a timer is pending in the base. It is only
243 * @pending_map: bitmap of the timer wheel; each bit reflects a
245 * single timer is enqueued in the related bucket.
247 * of the timer wheel. The list contains all timers
259 unsigned int cpu; member
348 static unsigned long round_jiffies_common(unsigned long j, int cpu, in round_jiffies_common() argument
356 * same lock or cachelines, so we skew each extra cpu with an extra in round_jiffies_common()
362 j += cpu * 3; in round_jiffies_common()
368 * due to delays of the timer irq, long irq off times etc etc) then in round_jiffies_common()
374 j = j - rem; in round_jiffies_common()
376 j = j - rem + HZ; in round_jiffies_common()
379 j -= cpu * 3; in round_jiffies_common()
389 * __round_jiffies - function to round jiffies to a full second
391 * @cpu: the processor number on which the timeout will happen
400 * of this is to have the CPU wake up less, which saves power.
408 unsigned long __round_jiffies(unsigned long j, int cpu) in __round_jiffies() argument
410 return round_jiffies_common(j, cpu, false); in __round_jiffies()
415 * __round_jiffies_relative - function to round jiffies to a full second
417 * @cpu: the processor number on which the timeout will happen
426 * of this is to have the CPU wake up less, which saves power.
434 unsigned long __round_jiffies_relative(unsigned long j, int cpu) in __round_jiffies_relative() argument
439 return round_jiffies_common(j + j0, cpu, false) - j0; in __round_jiffies_relative()
444 * round_jiffies - function to round jiffies to a full second
454 * of this is to have the CPU wake up less, which saves power.
465 * round_jiffies_relative - function to round jiffies to a full second
475 * of this is to have the CPU wake up less, which saves power.
486 * __round_jiffies_up - function to round jiffies up to a full second
488 * @cpu: the processor number on which the timeout will happen
495 unsigned long __round_jiffies_up(unsigned long j, int cpu) in __round_jiffies_up() argument
497 return round_jiffies_common(j, cpu, true); in __round_jiffies_up()
502 * __round_jiffies_up_relative - function to round jiffies up to a full second
504 * @cpu: the processor number on which the timeout will happen
511 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu) in __round_jiffies_up_relative() argument
516 return round_jiffies_common(j + j0, cpu, true) - j0; in __round_jiffies_up_relative()
521 * round_jiffies_up - function to round jiffies up to a full second
536 * round_jiffies_up_relative - function to round jiffies up to a full second
551 static inline unsigned int timer_get_idx(struct timer_list *timer) in timer_get_idx() argument
553 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT; in timer_get_idx()
556 static inline void timer_set_idx(struct timer_list *timer, unsigned int idx) in timer_set_idx() argument
558 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) | in timer_set_idx()
571 * The timer wheel has to guarantee that a timer does not fire in calc_index()
573 * - Timer is armed at the edge of a tick in calc_index()
574 * - Truncation of the expiry time in the outer wheel levels in calc_index()
586 unsigned long delta = expires - clk; in calc_wheel_index()
616 idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry); in calc_wheel_index()
622 trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) in trigger_dyntick_cpu() argument
625 * Deferrable timers do not prevent the CPU from entering dynticks and in trigger_dyntick_cpu()
627 * new deferrable timer is enqueued will wake up the remote CPU but in trigger_dyntick_cpu()
628 * nothing will be done with the deferrable timer base. Therefore skip in trigger_dyntick_cpu()
631 if (!is_timers_nohz_active() || timer->flags & TIMER_DEFERRABLE) in trigger_dyntick_cpu()
635 * We might have to IPI the remote CPU if the base is idle and the in trigger_dyntick_cpu()
636 * timer is pinned. If it is a non pinned timer, it is only queued in trigger_dyntick_cpu()
637 * on the remote CPU, when timer was running during queueing. Then in trigger_dyntick_cpu()
638 * everything is handled by remote CPU anyway. If the other CPU is in trigger_dyntick_cpu()
639 * on the way to idle then it can't set base->is_idle as we hold in trigger_dyntick_cpu()
642 if (base->is_idle) { in trigger_dyntick_cpu()
643 WARN_ON_ONCE(!(timer->flags & TIMER_PINNED || in trigger_dyntick_cpu()
644 tick_nohz_full_cpu(base->cpu))); in trigger_dyntick_cpu()
645 wake_up_nohz_cpu(base->cpu); in trigger_dyntick_cpu()
650 * Enqueue the timer into the hash bucket, mark it pending in
651 * the bitmap, store the index in the timer flags then wake up
652 * the target CPU if needed.
654 static void enqueue_timer(struct timer_base *base, struct timer_list *timer, in enqueue_timer() argument
658 hlist_add_head(&timer->entry, base->vectors + idx); in enqueue_timer()
659 __set_bit(idx, base->pending_map); in enqueue_timer()
660 timer_set_idx(timer, idx); in enqueue_timer()
662 trace_timer_start(timer, bucket_expiry); in enqueue_timer()
665 * Check whether this is the new first expiring timer. The in enqueue_timer()
666 * effective expiry time of the timer is required here in enqueue_timer()
667 * (bucket_expiry) instead of timer->expires. in enqueue_timer()
669 if (time_before(bucket_expiry, base->next_expiry)) { in enqueue_timer()
671 * Set the next expiry time and kick the CPU so it in enqueue_timer()
674 WRITE_ONCE(base->next_expiry, bucket_expiry); in enqueue_timer()
675 base->timers_pending = true; in enqueue_timer()
676 base->next_expiry_recalc = false; in enqueue_timer()
677 trigger_dyntick_cpu(base, timer); in enqueue_timer()
681 static void internal_add_timer(struct timer_base *base, struct timer_list *timer) in internal_add_timer() argument
686 idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry); in internal_add_timer()
687 enqueue_timer(base, timer, idx, bucket_expiry); in internal_add_timer()
702 .offset = offsetof(container, hintfn) - \
708 struct delayed_work, timer, work.func),
710 struct kthread_delayed_work, timer, work.func),
715 struct timer_list *timer = addr; in timer_debug_hint() local
719 if (timer_hints[i].function == timer->function) { in timer_debug_hint()
726 return timer->function; in timer_debug_hint()
731 struct timer_list *timer = addr; in timer_is_static_object() local
733 return (timer->entry.pprev == NULL && in timer_is_static_object()
734 timer->entry.next == TIMER_ENTRY_STATIC); in timer_is_static_object()
739 * - an active object is initialized
743 struct timer_list *timer = addr; in timer_fixup_init() local
747 del_timer_sync(timer); in timer_fixup_init()
748 debug_object_init(timer, &timer_debug_descr); in timer_fixup_init()
755 /* Stub timer callback for improperly used timers. */
763 * - an active object is activated
764 * - an unknown non-static object is activated
768 struct timer_list *timer = addr; in timer_fixup_activate() local
772 timer_setup(timer, stub_timer, 0); in timer_fixup_activate()
785 * - an active object is freed
789 struct timer_list *timer = addr; in timer_fixup_free() local
793 del_timer_sync(timer); in timer_fixup_free()
794 debug_object_free(timer, &timer_debug_descr); in timer_fixup_free()
803 * - an untracked/uninit-ed object is found
807 struct timer_list *timer = addr; in timer_fixup_assert_init() local
811 timer_setup(timer, stub_timer, 0); in timer_fixup_assert_init()
828 static inline void debug_timer_init(struct timer_list *timer) in debug_timer_init() argument
830 debug_object_init(timer, &timer_debug_descr); in debug_timer_init()
833 static inline void debug_timer_activate(struct timer_list *timer) in debug_timer_activate() argument
835 debug_object_activate(timer, &timer_debug_descr); in debug_timer_activate()
838 static inline void debug_timer_deactivate(struct timer_list *timer) in debug_timer_deactivate() argument
840 debug_object_deactivate(timer, &timer_debug_descr); in debug_timer_deactivate()
843 static inline void debug_timer_assert_init(struct timer_list *timer) in debug_timer_assert_init() argument
845 debug_object_assert_init(timer, &timer_debug_descr); in debug_timer_assert_init()
848 static void do_init_timer(struct timer_list *timer,
853 void init_timer_on_stack_key(struct timer_list *timer, in init_timer_on_stack_key() argument
858 debug_object_init_on_stack(timer, &timer_debug_descr); in init_timer_on_stack_key()
859 do_init_timer(timer, func, flags, name, key); in init_timer_on_stack_key()
863 void destroy_timer_on_stack(struct timer_list *timer) in destroy_timer_on_stack() argument
865 debug_object_free(timer, &timer_debug_descr); in destroy_timer_on_stack()
870 static inline void debug_timer_init(struct timer_list *timer) { } in debug_timer_init() argument
871 static inline void debug_timer_activate(struct timer_list *timer) { } in debug_timer_activate() argument
872 static inline void debug_timer_deactivate(struct timer_list *timer) { } in debug_timer_deactivate() argument
873 static inline void debug_timer_assert_init(struct timer_list *timer) { } in debug_timer_assert_init() argument
876 static inline void debug_init(struct timer_list *timer) in debug_init() argument
878 debug_timer_init(timer); in debug_init()
879 trace_timer_init(timer); in debug_init()
882 static inline void debug_deactivate(struct timer_list *timer) in debug_deactivate() argument
884 debug_timer_deactivate(timer); in debug_deactivate()
885 trace_timer_cancel(timer); in debug_deactivate()
888 static inline void debug_assert_init(struct timer_list *timer) in debug_assert_init() argument
890 debug_timer_assert_init(timer); in debug_assert_init()
893 static void do_init_timer(struct timer_list *timer, in do_init_timer() argument
898 timer->entry.pprev = NULL; in do_init_timer()
899 timer->function = func; in do_init_timer()
902 timer->flags = flags | raw_smp_processor_id(); in do_init_timer()
903 lockdep_init_map(&timer->lockdep_map, name, key, 0); in do_init_timer()
907 * init_timer_key - initialize a timer
908 * @timer: the timer to be initialized
909 * @func: timer callback function
910 * @flags: timer flags
911 * @name: name of the timer
912 * @key: lockdep class key of the fake lock used for tracking timer
915 * init_timer_key() must be done to a timer prior to calling *any* of the
916 * other timer functions.
918 void init_timer_key(struct timer_list *timer, in init_timer_key() argument
922 debug_init(timer); in init_timer_key()
923 do_init_timer(timer, func, flags, name, key); in init_timer_key()
927 static inline void detach_timer(struct timer_list *timer, bool clear_pending) in detach_timer() argument
929 struct hlist_node *entry = &timer->entry; in detach_timer()
931 debug_deactivate(timer); in detach_timer()
935 entry->pprev = NULL; in detach_timer()
936 entry->next = LIST_POISON2; in detach_timer()
939 static int detach_if_pending(struct timer_list *timer, struct timer_base *base, in detach_if_pending() argument
942 unsigned idx = timer_get_idx(timer); in detach_if_pending()
944 if (!timer_pending(timer)) in detach_if_pending()
947 if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) { in detach_if_pending()
948 __clear_bit(idx, base->pending_map); in detach_if_pending()
949 base->next_expiry_recalc = true; in detach_if_pending()
952 detach_timer(timer, clear_pending); in detach_if_pending()
956 static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu) in get_timer_cpu_base() argument
961 * If the timer is deferrable and NO_HZ_COMMON is set then we need in get_timer_cpu_base()
967 return per_cpu_ptr(&timer_bases[index], cpu); in get_timer_cpu_base()
975 * If the timer is deferrable and NO_HZ_COMMON is set then we need in get_timer_this_cpu_base()
994 * @basej is past base->clk otherwise we might rewind base->clk. in __forward_timer_base()
996 if (time_before_eq(basej, base->clk)) in __forward_timer_base()
1003 if (time_after(base->next_expiry, basej)) { in __forward_timer_base()
1004 base->clk = basej; in __forward_timer_base()
1006 if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk))) in __forward_timer_base()
1008 base->clk = base->next_expiry; in __forward_timer_base()
1024 * be found in the base->vectors array.
1026 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
1029 static struct timer_base *lock_timer_base(struct timer_list *timer, in lock_timer_base() argument
1031 __acquires(timer->base->lock) in lock_timer_base()
1039 * might re-read @tf between the check for TIMER_MIGRATING in lock_timer_base()
1042 tf = READ_ONCE(timer->flags); in lock_timer_base()
1046 raw_spin_lock_irqsave(&base->lock, *flags); in lock_timer_base()
1047 if (timer->flags == tf) in lock_timer_base()
1049 raw_spin_unlock_irqrestore(&base->lock, *flags); in lock_timer_base()
1060 __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options) in __mod_timer() argument
1067 debug_assert_init(timer); in __mod_timer()
1070 * This is a common optimization triggered by the networking code - if in __mod_timer()
1071 * the timer is re-modified to have the same timeout or ends up in the in __mod_timer()
1074 if (!(options & MOD_TIMER_NOTPENDING) && timer_pending(timer)) { in __mod_timer()
1078 * timer with this expiry. in __mod_timer()
1080 long diff = timer->expires - expires; in __mod_timer()
1088 * We lock timer base and calculate the bucket index right in __mod_timer()
1089 * here. If the timer ends up in the same bucket, then we in __mod_timer()
1093 base = lock_timer_base(timer, &flags); in __mod_timer()
1095 * Has @timer been shutdown? This needs to be evaluated in __mod_timer()
1099 if (!timer->function) in __mod_timer()
1104 if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) && in __mod_timer()
1105 time_before_eq(timer->expires, expires)) { in __mod_timer()
1110 clk = base->clk; in __mod_timer()
1115 * timer. If it matches set the expiry to the new value so a in __mod_timer()
1118 if (idx == timer_get_idx(timer)) { in __mod_timer()
1120 timer->expires = expires; in __mod_timer()
1121 else if (time_after(timer->expires, expires)) in __mod_timer()
1122 timer->expires = expires; in __mod_timer()
1127 base = lock_timer_base(timer, &flags); in __mod_timer()
1129 * Has @timer been shutdown? This needs to be evaluated in __mod_timer()
1133 if (!timer->function) in __mod_timer()
1139 ret = detach_if_pending(timer, base, false); in __mod_timer()
1143 new_base = get_timer_this_cpu_base(timer->flags); in __mod_timer()
1147 * We are trying to schedule the timer on the new base. in __mod_timer()
1148 * However we can't change timer's base while it is running, in __mod_timer()
1149 * otherwise timer_delete_sync() can't detect that the timer's in __mod_timer()
1151 * timer is serialized wrt itself. in __mod_timer()
1153 if (likely(base->running_timer != timer)) { in __mod_timer()
1155 timer->flags |= TIMER_MIGRATING; in __mod_timer()
1157 raw_spin_unlock(&base->lock); in __mod_timer()
1159 raw_spin_lock(&base->lock); in __mod_timer()
1160 WRITE_ONCE(timer->flags, in __mod_timer()
1161 (timer->flags & ~TIMER_BASEMASK) | base->cpu); in __mod_timer()
1166 debug_timer_activate(timer); in __mod_timer()
1168 timer->expires = expires; in __mod_timer()
1175 if (idx != UINT_MAX && clk == base->clk) in __mod_timer()
1176 enqueue_timer(base, timer, idx, bucket_expiry); in __mod_timer()
1178 internal_add_timer(base, timer); in __mod_timer()
1181 raw_spin_unlock_irqrestore(&base->lock, flags); in __mod_timer()
1187 * mod_timer_pending - Modify a pending timer's timeout
1188 * @timer: The pending timer to be modified
1194 * If @timer->function == NULL then the start operation is silently
1198 * * %0 - The timer was inactive and not modified or was in
1200 * * %1 - The timer was active and requeued to expire at @expires
1202 int mod_timer_pending(struct timer_list *timer, unsigned long expires) in mod_timer_pending() argument
1204 return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY); in mod_timer_pending()
1209 * mod_timer - Modify a timer's timeout
1210 * @timer: The timer to be modified
1213 * mod_timer(timer, expires) is equivalent to:
1215 * del_timer(timer); timer->expires = expires; add_timer(timer);
1218 * case that the timer is inactive, the del_timer() part is a NOP. The
1219 * timer is in any case activated with the new expiry time @expires.
1222 * same timer, then mod_timer() is the only safe way to modify the timeout,
1223 * since add_timer() cannot modify an already running timer.
1225 * If @timer->function == NULL then the start operation is silently
1229 * * %0 - The timer was inactive and started or was in shutdown
1231 * * %1 - The timer was active and requeued to expire at @expires or
1232 * the timer was active and not modified because @expires did
1235 int mod_timer(struct timer_list *timer, unsigned long expires) in mod_timer() argument
1237 return __mod_timer(timer, expires, 0); in mod_timer()
1242 * timer_reduce - Modify a timer's timeout if it would reduce the timeout
1243 * @timer: The timer to be modified
1247 * modify an enqueued timer if that would reduce the expiration time. If
1248 * @timer is not enqueued it starts the timer.
1250 * If @timer->function == NULL then the start operation is silently
1254 * * %0 - The timer was inactive and started or was in shutdown
1256 * * %1 - The timer was active and requeued to expire at @expires or
1257 * the timer was active and not modified because @expires
1259 * timer would expire earlier than already scheduled
1261 int timer_reduce(struct timer_list *timer, unsigned long expires) in timer_reduce() argument
1263 return __mod_timer(timer, expires, MOD_TIMER_REDUCE); in timer_reduce()
1268 * add_timer - Start a timer
1269 * @timer: The timer to be started
1271 * Start @timer to expire at @timer->expires in the future. @timer->expires
1272 * is the absolute expiry time measured in 'jiffies'. When the timer expires
1273 * timer->function(timer) will be invoked from soft interrupt context.
1275 * The @timer->expires and @timer->function fields must be set prior
1278 * If @timer->function == NULL then the start operation is silently
1281 * If @timer->expires is already in the past @timer will be queued to
1282 * expire at the next timer tick.
1284 * This can only operate on an inactive timer. Attempts to invoke this on
1285 * an active timer are rejected with a warning.
1287 void add_timer(struct timer_list *timer) in add_timer() argument
1289 if (WARN_ON_ONCE(timer_pending(timer))) in add_timer()
1291 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); in add_timer()
1296 * add_timer_local() - Start a timer on the local CPU
1297 * @timer: The timer to be started
1299 * Same as add_timer() except that the timer flag TIMER_PINNED is set.
1303 void add_timer_local(struct timer_list *timer) in add_timer_local() argument
1305 if (WARN_ON_ONCE(timer_pending(timer))) in add_timer_local()
1307 timer->flags |= TIMER_PINNED; in add_timer_local()
1308 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); in add_timer_local()
1313 * add_timer_global() - Start a timer without TIMER_PINNED flag set
1314 * @timer: The timer to be started
1316 * Same as add_timer() except that the timer flag TIMER_PINNED is unset.
1320 void add_timer_global(struct timer_list *timer) in add_timer_global() argument
1322 if (WARN_ON_ONCE(timer_pending(timer))) in add_timer_global()
1324 timer->flags &= ~TIMER_PINNED; in add_timer_global()
1325 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); in add_timer_global()
1330 * add_timer_on - Start a timer on a particular CPU
1331 * @timer: The timer to be started
1332 * @cpu: The CPU to start it on
1334 * Same as add_timer() except that it starts the timer on the given CPU and
1335 * the TIMER_PINNED flag is set. When timer shouldn't be a pinned timer in
1341 void add_timer_on(struct timer_list *timer, int cpu) in add_timer_on() argument
1346 debug_assert_init(timer); in add_timer_on()
1348 if (WARN_ON_ONCE(timer_pending(timer))) in add_timer_on()
1351 /* Make sure timer flags have TIMER_PINNED flag set */ in add_timer_on()
1352 timer->flags |= TIMER_PINNED; in add_timer_on()
1354 new_base = get_timer_cpu_base(timer->flags, cpu); in add_timer_on()
1357 * If @timer was on a different CPU, it should be migrated with the in add_timer_on()
1361 base = lock_timer_base(timer, &flags); in add_timer_on()
1363 * Has @timer been shutdown? This needs to be evaluated while in add_timer_on()
1366 if (!timer->function) in add_timer_on()
1370 timer->flags |= TIMER_MIGRATING; in add_timer_on()
1372 raw_spin_unlock(&base->lock); in add_timer_on()
1374 raw_spin_lock(&base->lock); in add_timer_on()
1375 WRITE_ONCE(timer->flags, in add_timer_on()
1376 (timer->flags & ~TIMER_BASEMASK) | cpu); in add_timer_on()
1380 debug_timer_activate(timer); in add_timer_on()
1381 internal_add_timer(base, timer); in add_timer_on()
1383 raw_spin_unlock_irqrestore(&base->lock, flags); in add_timer_on()
1388 * __timer_delete - Internal function: Deactivate a timer
1389 * @timer: The timer to be deactivated
1390 * @shutdown: If true, this indicates that the timer is about to be
1393 * If @shutdown is true then @timer->function is set to NULL under the
1394 * timer base lock which prevents further rearming of the time. In that
1395 * case any attempt to rearm @timer after this function returns will be
1399 * * %0 - The timer was not pending
1400 * * %1 - The timer was pending and deactivated
1402 static int __timer_delete(struct timer_list *timer, bool shutdown) in __timer_delete() argument
1408 debug_assert_init(timer); in __timer_delete()
1412 * timer is pending or not to protect against a concurrent rearm in __timer_delete()
1415 * enqueued timer is dequeued and cannot end up with in __timer_delete()
1416 * timer->function == NULL in the expiry code. in __timer_delete()
1418 * If timer->function is currently executed, then this makes sure in __timer_delete()
1419 * that the callback cannot requeue the timer. in __timer_delete()
1421 if (timer_pending(timer) || shutdown) { in __timer_delete()
1422 base = lock_timer_base(timer, &flags); in __timer_delete()
1423 ret = detach_if_pending(timer, base, true); in __timer_delete()
1425 timer->function = NULL; in __timer_delete()
1426 raw_spin_unlock_irqrestore(&base->lock, flags); in __timer_delete()
1433 * timer_delete - Deactivate a timer
1434 * @timer: The timer to be deactivated
1436 * The function only deactivates a pending timer, but contrary to
1437 * timer_delete_sync() it does not take into account whether the timer's
1438 * callback function is concurrently executed on a different CPU or not.
1439 * It neither prevents rearming of the timer. If @timer can be rearmed
1443 * * %0 - The timer was not pending
1444 * * %1 - The timer was pending and deactivated
1446 int timer_delete(struct timer_list *timer) in timer_delete() argument
1448 return __timer_delete(timer, false); in timer_delete()
1453 * timer_shutdown - Deactivate a timer and prevent rearming
1454 * @timer: The timer to be deactivated
1456 * The function does not wait for an eventually running timer callback on a
1457 * different CPU but it prevents rearming of the timer. Any attempt to arm
1458 * @timer after this function returns will be silently ignored.
1461 * timer_shutdown_sync() cannot be invoked due to locking or context constraints.
1464 * * %0 - The timer was not pending
1465 * * %1 - The timer was pending
1467 int timer_shutdown(struct timer_list *timer) in timer_shutdown() argument
1469 return __timer_delete(timer, true); in timer_shutdown()
1474 * __try_to_del_timer_sync - Internal function: Try to deactivate a timer
1475 * @timer: Timer to deactivate
1476 * @shutdown: If true, this indicates that the timer is about to be
1479 * If @shutdown is true then @timer->function is set to NULL under the
1480 * timer base lock which prevents further rearming of the timer. Any
1481 * attempt to rearm @timer after this function returns will be silently
1484 * This function cannot guarantee that the timer cannot be rearmed
1489 * * %0 - The timer was not pending
1490 * * %1 - The timer was pending and deactivated
1491 * * %-1 - The timer callback function is running on a different CPU
1493 static int __try_to_del_timer_sync(struct timer_list *timer, bool shutdown) in __try_to_del_timer_sync() argument
1497 int ret = -1; in __try_to_del_timer_sync()
1499 debug_assert_init(timer); in __try_to_del_timer_sync()
1501 base = lock_timer_base(timer, &flags); in __try_to_del_timer_sync()
1503 if (base->running_timer != timer) in __try_to_del_timer_sync()
1504 ret = detach_if_pending(timer, base, true); in __try_to_del_timer_sync()
1506 timer->function = NULL; in __try_to_del_timer_sync()
1508 raw_spin_unlock_irqrestore(&base->lock, flags); in __try_to_del_timer_sync()
1514 * try_to_del_timer_sync - Try to deactivate a timer
1515 * @timer: Timer to deactivate
1517 * This function tries to deactivate a timer. On success the timer is not
1518 * queued and the timer callback function is not running on any CPU.
1520 * This function does not guarantee that the timer cannot be rearmed right
1525 * * %0 - The timer was not pending
1526 * * %1 - The timer was pending and deactivated
1527 * * %-1 - The timer callback function is running on a different CPU
1529 int try_to_del_timer_sync(struct timer_list *timer) in try_to_del_timer_sync() argument
1531 return __try_to_del_timer_sync(timer, false); in try_to_del_timer_sync()
1538 spin_lock_init(&base->expiry_lock); in timer_base_init_expiry_lock()
1543 spin_lock(&base->expiry_lock); in timer_base_lock_expiry()
1548 spin_unlock(&base->expiry_lock); in timer_base_unlock_expiry()
1554 * If there is a waiter for base->expiry_lock, then it was waiting for the
1555 * timer callback to finish. Drop expiry_lock and reacquire it. That allows
1559 __releases(&base->lock) __releases(&base->expiry_lock) in timer_sync_wait_running()
1560 __acquires(&base->expiry_lock) __acquires(&base->lock) in timer_sync_wait_running()
1562 if (atomic_read(&base->timer_waiters)) { in timer_sync_wait_running()
1563 raw_spin_unlock_irq(&base->lock); in timer_sync_wait_running()
1564 spin_unlock(&base->expiry_lock); in timer_sync_wait_running()
1565 spin_lock(&base->expiry_lock); in timer_sync_wait_running()
1566 raw_spin_lock_irq(&base->lock); in timer_sync_wait_running()
1572 * deletion of a timer failed because the timer callback function was
1575 * This prevents priority inversion, if the softirq thread on a remote CPU
1577 * delete a timer preempted the softirq thread running the timer callback
1580 static void del_timer_wait_running(struct timer_list *timer) in del_timer_wait_running() argument
1584 tf = READ_ONCE(timer->flags); in del_timer_wait_running()
1590 * which is held by the softirq across the timer in del_timer_wait_running()
1592 * expire the next timer. In theory the timer could already in del_timer_wait_running()
1596 atomic_inc(&base->timer_waiters); in del_timer_wait_running()
1597 spin_lock_bh(&base->expiry_lock); in del_timer_wait_running()
1598 atomic_dec(&base->timer_waiters); in del_timer_wait_running()
1599 spin_unlock_bh(&base->expiry_lock); in del_timer_wait_running()
1607 static inline void del_timer_wait_running(struct timer_list *timer) { } in del_timer_wait_running() argument
1611 * __timer_delete_sync - Internal function: Deactivate a timer and wait
1613 * @timer: The timer to be deactivated
1614 * @shutdown: If true, @timer->function will be set to NULL under the
1615 * timer base lock which prevents rearming of @timer
1617 * If @shutdown is not set the timer can be rearmed later. If the timer can
1621 * If @shutdown is set then @timer->function is set to NULL under timer
1622 * base lock which prevents rearming of the timer. Any attempt to rearm
1623 * a shutdown timer is silently ignored.
1625 * If the timer should be reused after shutdown it has to be initialized
1629 * * %0 - The timer was not pending
1630 * * %1 - The timer was pending and deactivated
1632 static int __timer_delete_sync(struct timer_list *timer, bool shutdown) in __timer_delete_sync() argument
1644 lock_map_acquire(&timer->lockdep_map); in __timer_delete_sync()
1645 lock_map_release(&timer->lockdep_map); in __timer_delete_sync()
1652 WARN_ON(in_hardirq() && !(timer->flags & TIMER_IRQSAFE)); in __timer_delete_sync()
1658 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE)) in __timer_delete_sync()
1662 ret = __try_to_del_timer_sync(timer, shutdown); in __timer_delete_sync()
1665 del_timer_wait_running(timer); in __timer_delete_sync()
1674 * timer_delete_sync - Deactivate a timer and wait for the handler to finish.
1675 * @timer: The timer to be deactivated
1677 * Synchronization rules: Callers must prevent restarting of the timer,
1679 * interrupt contexts unless the timer is an irqsafe one. The caller must
1680 * not hold locks which would prevent completion of the timer's callback
1681 * function. The timer's handler must not call add_timer_on(). Upon exit
1682 * the timer is not queued and the handler is not running on any CPU.
1685 * interrupt context. Even if the lock has nothing to do with the timer in
1689 * ---- ----
1692 * base->running_timer = mytimer;
1697 * while (base->running_timer == mytimer);
1700 * The interrupt on the other CPU is waiting to grab somelock but it has
1703 * This function cannot guarantee that the timer is not rearmed again by
1712 * * %0 - The timer was not pending
1713 * * %1 - The timer was pending and deactivated
1715 int timer_delete_sync(struct timer_list *timer) in timer_delete_sync() argument
1717 return __timer_delete_sync(timer, false); in timer_delete_sync()
1722 * timer_shutdown_sync - Shutdown a timer and prevent rearming
1723 * @timer: The timer to be shutdown
1726 * - @timer is not queued
1727 * - The callback function of @timer is not running
1728 * - @timer cannot be enqueued again. Any attempt to rearm
1729 * @timer is silently ignored.
1734 * the timer is subject to a circular dependency problem.
1736 * A common pattern for this is a timer and a workqueue where the timer can
1737 * schedule work and work can arm the timer. On shutdown the workqueue must
1738 * be destroyed and the timer must be prevented from rearming. Unless the
1739 * code has conditionals like 'if (mything->in_shutdown)' to prevent that
1745 * timer_shutdown_sync(&mything->timer);
1746 * workqueue_destroy(&mything->workqueue);
1750 * This obviously implies that the timer is not required to be functional
1754 * * %0 - The timer was not pending
1755 * * %1 - The timer was pending
1757 int timer_shutdown_sync(struct timer_list *timer) in timer_shutdown_sync() argument
1759 return __timer_delete_sync(timer, true); in timer_shutdown_sync()
1763 static void call_timer_fn(struct timer_list *timer, in call_timer_fn() argument
1771 * It is permissible to free the timer from inside the in call_timer_fn()
1775 * timer->lockdep_map, make a copy and use that here. in call_timer_fn()
1779 lockdep_copy_map(&lockdep_map, &timer->lockdep_map); in call_timer_fn()
1788 trace_timer_expire_entry(timer, baseclk); in call_timer_fn()
1789 fn(timer); in call_timer_fn()
1790 trace_timer_expire_exit(timer); in call_timer_fn()
1795 WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n", in call_timer_fn()
1810 * This value is required only for tracing. base->clk was in expire_timers()
1812 * is related to the old base->clk value. in expire_timers()
1814 unsigned long baseclk = base->clk - 1; in expire_timers()
1817 struct timer_list *timer; in expire_timers() local
1820 timer = hlist_entry(head->first, struct timer_list, entry); in expire_timers()
1822 base->running_timer = timer; in expire_timers()
1823 detach_timer(timer, true); in expire_timers()
1825 fn = timer->function; in expire_timers()
1829 base->running_timer = NULL; in expire_timers()
1833 if (timer->flags & TIMER_IRQSAFE) { in expire_timers()
1834 raw_spin_unlock(&base->lock); in expire_timers()
1835 call_timer_fn(timer, fn, baseclk); in expire_timers()
1836 raw_spin_lock(&base->lock); in expire_timers()
1837 base->running_timer = NULL; in expire_timers()
1839 raw_spin_unlock_irq(&base->lock); in expire_timers()
1840 call_timer_fn(timer, fn, baseclk); in expire_timers()
1841 raw_spin_lock_irq(&base->lock); in expire_timers()
1842 base->running_timer = NULL; in expire_timers()
1851 unsigned long clk = base->clk = base->next_expiry; in collect_expired_timers()
1859 if (__test_and_clear_bit(idx, base->pending_map)) { in collect_expired_timers()
1860 vec = base->vectors + idx; in collect_expired_timers()
1884 pos = find_next_bit(base->pending_map, end, start); in next_pending_bucket()
1886 return pos - start; in next_pending_bucket()
1888 pos = find_next_bit(base->pending_map, start, offset); in next_pending_bucket()
1889 return pos < start ? pos + LVL_SIZE - start : -1; in next_pending_bucket()
1893 * Search the first expiring timer in the various clock levels. Caller must
1894 * hold base->lock.
1896 * Store next expiry time in base->next_expiry.
1903 next = base->clk + NEXT_TIMER_MAX_DELTA; in timer_recalc_next_expiry()
1904 clk = base->clk; in timer_recalc_next_expiry()
1920 if (pos <= ((LVL_CLK_DIV - lvl_clk) & LVL_CLK_MASK)) in timer_recalc_next_expiry()
1927 * next expiring bucket in that level. base->clk is the next in timer_recalc_next_expiry()
1964 WRITE_ONCE(base->next_expiry, next); in timer_recalc_next_expiry()
1965 base->next_expiry_recalc = false; in timer_recalc_next_expiry()
1966 base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA); in timer_recalc_next_expiry()
1971 * Check, if the next hrtimer event is before the next timer wheel
1986 * If the next timer is already expired, return the tick base in cmp_next_hrtimer_event()
1995 * make sure that this tick really expires the timer to avoid in cmp_next_hrtimer_event()
2006 if (base->next_expiry_recalc) in next_timer_interrupt()
2011 * unnecessary raise of the timer softirq when the next_expiry value in next_timer_interrupt()
2012 * will be reached even if there is no timer pending. in next_timer_interrupt()
2015 * easy comparable to find out which base holds the first pending timer. in next_timer_interrupt()
2017 if (!base->timers_pending) in next_timer_interrupt()
2018 WRITE_ONCE(base->next_expiry, basej + NEXT_TIMER_MAX_DELTA); in next_timer_interrupt()
2020 return base->next_expiry; in next_timer_interrupt()
2047 tevt->local = basem + (u64)(nextevt - basej) * TICK_NSEC; in fetch_next_timer_interrupt()
2054 * as local timers will be handled by CPU itself. When not in fetch_next_timer_interrupt()
2055 * updating tevt->global with the already missed first global in fetch_next_timer_interrupt()
2056 * timer, it is possible that it will be missed completely. in fetch_next_timer_interrupt()
2058 * * The local callers will ignore the tevt->global anyway, when in fetch_next_timer_interrupt()
2062 tevt->global = tevt->local; in fetch_next_timer_interrupt()
2072 if (!local_first && base_global->timers_pending) in fetch_next_timer_interrupt()
2073 tevt->global = basem + (u64)(nextevt_global - basej) * TICK_NSEC; in fetch_next_timer_interrupt()
2075 if (base_local->timers_pending) in fetch_next_timer_interrupt()
2076 tevt->local = basem + (u64)(nextevt_local - basej) * TICK_NSEC; in fetch_next_timer_interrupt()
2083 * fetch_next_timer_interrupt_remote() - Store next timers into @tevt
2087 * @cpu: Remote CPU
2089 * Stores the next pending local and global timer expiry values in the
2094 * Caller needs to make sure timer base locks are held (use
2099 unsigned int cpu) in fetch_next_timer_interrupt_remote() argument
2104 tevt->local = tevt->global = KTIME_MAX; in fetch_next_timer_interrupt_remote()
2106 base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu); in fetch_next_timer_interrupt_remote()
2107 base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu); in fetch_next_timer_interrupt_remote()
2109 lockdep_assert_held(&base_local->lock); in fetch_next_timer_interrupt_remote()
2110 lockdep_assert_held(&base_global->lock); in fetch_next_timer_interrupt_remote()
2116 * timer_unlock_remote_bases - unlock timer bases of cpu
2117 * @cpu: Remote CPU
2119 * Unlocks the remote timer bases.
2121 void timer_unlock_remote_bases(unsigned int cpu) in timer_unlock_remote_bases() argument
2122 __releases(timer_bases[BASE_LOCAL]->lock) in timer_unlock_remote_bases()
2123 __releases(timer_bases[BASE_GLOBAL]->lock) in timer_unlock_remote_bases()
2127 base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu); in timer_unlock_remote_bases()
2128 base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu); in timer_unlock_remote_bases()
2130 raw_spin_unlock(&base_global->lock); in timer_unlock_remote_bases()
2131 raw_spin_unlock(&base_local->lock); in timer_unlock_remote_bases()
2135 * timer_lock_remote_bases - lock timer bases of cpu
2136 * @cpu: Remote CPU
2138 * Locks the remote timer bases.
2140 void timer_lock_remote_bases(unsigned int cpu) in timer_lock_remote_bases() argument
2141 __acquires(timer_bases[BASE_LOCAL]->lock) in timer_lock_remote_bases()
2142 __acquires(timer_bases[BASE_GLOBAL]->lock) in timer_lock_remote_bases()
2146 base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu); in timer_lock_remote_bases()
2147 base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu); in timer_lock_remote_bases()
2151 raw_spin_lock(&base_local->lock); in timer_lock_remote_bases()
2152 raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING); in timer_lock_remote_bases()
2156 * timer_base_is_idle() - Return whether timer base is set idle
2158 * Returns value of local timer base is_idle value.
2168 * timer_expire_remote() - expire global timers of cpu
2169 * @cpu: Remote CPU
2171 * Expire timers of global base of remote CPU.
2173 void timer_expire_remote(unsigned int cpu) in timer_expire_remote() argument
2175 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu); in timer_expire_remote()
2187 next_tmigr = tmigr_cpu_new_timer(tevt->global); in timer_use_tmigr()
2189 next_tmigr = tmigr_cpu_deactivate(tevt->global); in timer_use_tmigr()
2191 next_tmigr = tmigr_quick_check(tevt->global); in timer_use_tmigr()
2194 * If the CPU is the last going idle in timer migration hierarchy, make in timer_use_tmigr()
2195 * sure the CPU will wake up in time to handle remote timers. in timer_use_tmigr()
2198 if (next_tmigr < tevt->local) { in timer_use_tmigr()
2205 tmp = div_u64(next_tmigr - basem, TICK_NSEC); in timer_use_tmigr()
2208 tevt->local = next_tmigr; in timer_use_tmigr()
2217 * Make sure first event is written into tevt->local to not miss a in timer_use_tmigr()
2218 * timer on !SMP systems. in timer_use_tmigr()
2220 tevt->local = min_t(u64, tevt->local, tevt->global); in timer_use_tmigr()
2233 * When the CPU is offline, the tick is cancelled and nothing is supposed in __get_next_timer_interrupt()
2245 raw_spin_lock(&base_local->lock); in __get_next_timer_interrupt()
2246 raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING); in __get_next_timer_interrupt()
2253 * timer migration hierarchy related functions. The value for the next in __get_next_timer_interrupt()
2254 * global timer in @tevt struct equals then KTIME_MAX. This is also in __get_next_timer_interrupt()
2255 * true, when the timer base is idle. in __get_next_timer_interrupt()
2257 * The proper timer migration hierarchy function depends on the callsite in __get_next_timer_interrupt()
2258 * and whether timer base is idle or not. @nextevt will be updated when in __get_next_timer_interrupt()
2259 * this CPU needs to handle the first timer migration hierarchy in __get_next_timer_interrupt()
2265 base_local->is_idle, &tevt); in __get_next_timer_interrupt()
2275 * Set base->is_idle only when caller is timer_base_try_to_set_idle() in __get_next_timer_interrupt()
2281 * global timer into timer migration hierarchy. Therefore a new in __get_next_timer_interrupt()
2284 * If the base is marked idle then any timer add operation must in __get_next_timer_interrupt()
2290 if (!base_local->is_idle && time_after(nextevt, basej + 1)) { in __get_next_timer_interrupt()
2291 base_local->is_idle = true; in __get_next_timer_interrupt()
2294 * in nohz_full mode need a self-IPI to kick reprogramming in __get_next_timer_interrupt()
2297 if (tick_nohz_full_cpu(base_local->cpu)) in __get_next_timer_interrupt()
2298 base_global->is_idle = true; in __get_next_timer_interrupt()
2299 trace_timer_base_idle(true, base_local->cpu); in __get_next_timer_interrupt()
2301 *idle = base_local->is_idle; in __get_next_timer_interrupt()
2304 * When timer base is not set idle, undo the effect of in __get_next_timer_interrupt()
2305 * tmigr_cpu_deactivate() to prevent inconsistent states - active in __get_next_timer_interrupt()
2306 * timer base but inactive timer migration hierarchy. in __get_next_timer_interrupt()
2308 * When timer base was already marked idle, nothing will be in __get_next_timer_interrupt()
2311 if (!base_local->is_idle && idle_is_possible) in __get_next_timer_interrupt()
2315 raw_spin_unlock(&base_global->lock); in __get_next_timer_interrupt()
2316 raw_spin_unlock(&base_local->lock); in __get_next_timer_interrupt()
2322 * get_next_timer_interrupt() - return the time (clock mono) of the next timer
2326 * Returns the tick aligned clock monotonic time of the next pending timer or
2327 * KTIME_MAX if no timer is pending. If timer of global base was queued into
2328 * timer migration hierarchy, first global timer is not taken into account. If
2329 * it was the last CPU of timer migration hierarchy going idle, first global
2338 * timer_base_try_to_set_idle() - Try to set the idle state of the timer bases
2341 * @idle: pointer to store the value of timer_base->is_idle on return;
2344 * Returns the tick aligned clock monotonic time of the next pending timer or
2345 * KTIME_MAX if no timer is pending. When tick was already stopped KTIME_MAX is
2357 * timer_clear_idle - Clear the idle state of the timer base
2364 * We do this unlocked. The worst outcome is a remote pinned timer in timer_clear_idle()
2375 /* Activate without holding the timer_base->lock */ in timer_clear_idle()
2381 * __run_timers - run all expired timers (if any) on this CPU.
2382 * @base: the timer vector to be processed.
2389 lockdep_assert_held(&base->lock); in __run_timers()
2391 if (base->running_timer) in __run_timers()
2394 while (time_after_eq(jiffies, base->clk) && in __run_timers()
2395 time_after_eq(jiffies, base->next_expiry)) { in __run_timers()
2399 * timer at this clk are that all matching timers have been in __run_timers()
2400 * dequeued or no timer has been queued since in __run_timers()
2404 WARN_ON_ONCE(!levels && !base->next_expiry_recalc in __run_timers()
2405 && base->timers_pending); in __run_timers()
2407 * While executing timers, base->clk is set 1 offset ahead of in __run_timers()
2410 base->clk++; in __run_timers()
2413 while (levels--) in __run_timers()
2420 /* Can race against a remote CPU updating next_expiry under the lock */ in __run_timer_base()
2421 if (time_before(jiffies, READ_ONCE(base->next_expiry))) in __run_timer_base()
2425 raw_spin_lock_irq(&base->lock); in __run_timer_base()
2427 raw_spin_unlock_irq(&base->lock); in __run_timer_base()
2439 * This function runs timers and the timer-tq in bottom half context.
2454 * Called by the local, per-CPU timer interrupt on SMP.
2466 * timer_base::next_expiry can be written by a remote CPU while in run_local_timers()
2472 * timer_base::next_expiry is written by a remote CPU: in run_local_timers()
2474 * 1. Remote CPU expires global timers of this CPU and updates in run_local_timers()
2477 * worst outcome is a superfluous raise of the timer softirq in run_local_timers()
2480 * 2. A new first pinned timer is enqueued by a remote CPU in run_local_timers()
2483 * problem, as an IPI is executed nevertheless when the CPU in run_local_timers()
2484 * was idle before. When the CPU wasn't idle but the update in run_local_timers()
2485 * is missed, then the timer would expire one jiffy late - in run_local_timers()
2496 if (time_after_eq(jiffies, READ_ONCE(base->next_expiry)) || in run_local_timers()
2505 * Called from the timer interrupt handler to charge one tick to the current
2512 /* Note: this timer irq context must be accounted for as well. */ in update_process_times()
2528 struct timer_list *timer; in migrate_timer_list() local
2529 int cpu = new_base->cpu; in migrate_timer_list() local
2532 timer = hlist_entry(head->first, struct timer_list, entry); in migrate_timer_list()
2533 detach_timer(timer, false); in migrate_timer_list()
2534 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu; in migrate_timer_list()
2535 internal_add_timer(new_base, timer); in migrate_timer_list()
2539 int timers_prepare_cpu(unsigned int cpu) in timers_prepare_cpu() argument
2545 base = per_cpu_ptr(&timer_bases[b], cpu); in timers_prepare_cpu()
2546 base->clk = jiffies; in timers_prepare_cpu()
2547 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; in timers_prepare_cpu()
2548 base->next_expiry_recalc = false; in timers_prepare_cpu()
2549 base->timers_pending = false; in timers_prepare_cpu()
2550 base->is_idle = false; in timers_prepare_cpu()
2555 int timers_dead_cpu(unsigned int cpu) in timers_dead_cpu() argument
2562 old_base = per_cpu_ptr(&timer_bases[b], cpu); in timers_dead_cpu()
2568 raw_spin_lock_irq(&new_base->lock); in timers_dead_cpu()
2569 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); in timers_dead_cpu()
2577 WARN_ON_ONCE(old_base->running_timer); in timers_dead_cpu()
2578 old_base->running_timer = NULL; in timers_dead_cpu()
2581 migrate_timer_list(new_base, old_base->vectors + i); in timers_dead_cpu()
2583 raw_spin_unlock(&old_base->lock); in timers_dead_cpu()
2584 raw_spin_unlock_irq(&new_base->lock); in timers_dead_cpu()
2592 static void __init init_timer_cpu(int cpu) in init_timer_cpu() argument
2598 base = per_cpu_ptr(&timer_bases[i], cpu); in init_timer_cpu()
2599 base->cpu = cpu; in init_timer_cpu()
2600 raw_spin_lock_init(&base->lock); in init_timer_cpu()
2601 base->clk = jiffies; in init_timer_cpu()
2602 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; in init_timer_cpu()
2609 int cpu; in init_timer_cpus() local
2611 for_each_possible_cpu(cpu) in init_timer_cpus()
2612 init_timer_cpu(cpu); in init_timer_cpus()