1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Kernel internal timers
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
8 *
9 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
10 * "A Kernel Model for Precision Timekeeping" by Dave Mills
11 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
12 * serialize accesses to xtime/lost_ticks).
13 * Copyright (C) 1998 Andrea Arcangeli
14 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
15 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
16 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
17 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
18 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
19 */
20
21 #include <linux/kernel_stat.h>
22 #include <linux/export.h>
23 #include <linux/interrupt.h>
24 #include <linux/percpu.h>
25 #include <linux/init.h>
26 #include <linux/mm.h>
27 #include <linux/swap.h>
28 #include <linux/pid_namespace.h>
29 #include <linux/notifier.h>
30 #include <linux/thread_info.h>
31 #include <linux/time.h>
32 #include <linux/jiffies.h>
33 #include <linux/posix-timers.h>
34 #include <linux/cpu.h>
35 #include <linux/syscalls.h>
36 #include <linux/delay.h>
37 #include <linux/tick.h>
38 #include <linux/kallsyms.h>
39 #include <linux/irq_work.h>
40 #include <linux/sched/signal.h>
41 #include <linux/sched/sysctl.h>
42 #include <linux/sched/nohz.h>
43 #include <linux/sched/debug.h>
44 #include <linux/slab.h>
45 #include <linux/compat.h>
46 #include <linux/random.h>
47 #include <linux/sysctl.h>
48
49 #include <linux/uaccess.h>
50 #include <asm/unistd.h>
51 #include <asm/div64.h>
52 #include <asm/timex.h>
53 #include <asm/io.h>
54
55 #include "tick-internal.h"
56 #include "timer_migration.h"
57
58 #define CREATE_TRACE_POINTS
59 #include <trace/events/timer.h>
60
61 __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
62
63 EXPORT_SYMBOL(jiffies_64);
64
65 /*
66 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
67 * LVL_SIZE buckets. Each level is driven by its own clock and therefore each
68 * level has a different granularity.
69 *
70 * The level granularity is: LVL_CLK_DIV ^ level
71 * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level)
72 *
73 * The array level of a newly armed timer depends on the relative expiry
74 * time. The farther the expiry time is away the higher the array level and
75 * therefore the granularity becomes.
76 *
77 * Contrary to the original timer wheel implementation, which aims for 'exact'
78 * expiry of the timers, this implementation removes the need for recascading
79 * the timers into the lower array levels. The previous 'classic' timer wheel
80 * implementation of the kernel already violated the 'exact' expiry by adding
81 * slack to the expiry time to provide batched expiration. The granularity
82 * levels provide implicit batching.
83 *
84 * This is an optimization of the original timer wheel implementation for the
85 * majority of the timer wheel use cases: timeouts. The vast majority of
86 * timeout timers (networking, disk I/O ...) are canceled before expiry. If
87 * the timeout expires it indicates that normal operation is disturbed, so it
88 * does not matter much whether the timeout comes with a slight delay.
89 *
90 * The only exception to this are networking timers with a small expiry
91 * time. They rely on the granularity. Those fit into the first wheel level,
92 * which has HZ granularity.
93 *
94 * We don't have cascading anymore. timers with a expiry time above the
95 * capacity of the last wheel level are force expired at the maximum timeout
96 * value of the last wheel level. From data sampling we know that the maximum
97 * value observed is 5 days (network connection tracking), so this should not
98 * be an issue.
99 *
100 * The currently chosen array constants values are a good compromise between
101 * array size and granularity.
102 *
103 * This results in the following granularity and range levels:
104 *
105 * HZ 1000 steps
106 * Level Offset Granularity Range
107 * 0 0 1 ms 0 ms - 63 ms
108 * 1 64 8 ms 64 ms - 511 ms
109 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s)
110 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s)
111 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m)
112 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m)
113 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h)
114 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d)
115 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d)
116 *
117 * HZ 300
118 * Level Offset Granularity Range
119 * 0 0 3 ms 0 ms - 210 ms
120 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s)
121 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s)
122 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m)
123 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m)
124 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h)
125 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h)
126 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d)
127 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
128 *
129 * HZ 250
130 * Level Offset Granularity Range
131 * 0 0 4 ms 0 ms - 255 ms
132 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s)
133 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s)
134 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m)
135 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m)
136 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h)
137 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h)
138 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d)
139 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
140 *
141 * HZ 100
142 * Level Offset Granularity Range
143 * 0 0 10 ms 0 ms - 630 ms
144 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s)
145 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s)
146 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m)
147 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m)
148 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h)
149 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d)
150 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
151 */
152
153 /* Clock divisor for the next level */
154 #define LVL_CLK_SHIFT 3
155 #define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT)
156 #define LVL_CLK_MASK (LVL_CLK_DIV - 1)
157 #define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT)
158 #define LVL_GRAN(n) (1UL << LVL_SHIFT(n))
159
160 /*
161 * The time start value for each level to select the bucket at enqueue
162 * time. We start from the last possible delta of the previous level
163 * so that we can later add an extra LVL_GRAN(n) to n (see calc_index()).
164 */
165 #define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
166
167 /* Size of each clock level */
168 #define LVL_BITS 6
169 #define LVL_SIZE (1UL << LVL_BITS)
170 #define LVL_MASK (LVL_SIZE - 1)
171 #define LVL_OFFS(n) ((n) * LVL_SIZE)
172
173 /* Level depth */
174 #if HZ > 100
175 # define LVL_DEPTH 9
176 # else
177 # define LVL_DEPTH 8
178 #endif
179
180 /* The cutoff (max. capacity of the wheel) */
181 #define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH))
182 #define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
183
184 /*
185 * The resulting wheel size. If NOHZ is configured we allocate two
186 * wheels so we have a separate storage for the deferrable timers.
187 */
188 #define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH)
189
190 #ifdef CONFIG_NO_HZ_COMMON
191 /*
192 * If multiple bases need to be locked, use the base ordering for lock
193 * nesting, i.e. lowest number first.
194 */
195 # define NR_BASES 3
196 # define BASE_LOCAL 0
197 # define BASE_GLOBAL 1
198 # define BASE_DEF 2
199 #else
200 # define NR_BASES 1
201 # define BASE_LOCAL 0
202 # define BASE_GLOBAL 0
203 # define BASE_DEF 0
204 #endif
205
206 /**
207 * struct timer_base - Per CPU timer base (number of base depends on config)
208 * @lock: Lock protecting the timer_base
209 * @running_timer: When expiring timers, the lock is dropped. To make
210 * sure not to race against deleting/modifying a
211 * currently running timer, the pointer is set to the
212 * timer, which expires at the moment. If no timer is
213 * running, the pointer is NULL.
214 * @expiry_lock: PREEMPT_RT only: Lock is taken in softirq around
215 * timer expiry callback execution and when trying to
216 * delete a running timer and it wasn't successful in
217 * the first glance. It prevents priority inversion
218 * when callback was preempted on a remote CPU and a
219 * caller tries to delete the running timer. It also
220 * prevents a life lock, when the task which tries to
221 * delete a timer preempted the softirq thread which
222 * is running the timer callback function.
223 * @timer_waiters: PREEMPT_RT only: Tells, if there is a waiter
224 * waiting for the end of the timer callback function
225 * execution.
226 * @clk: clock of the timer base; is updated before enqueue
227 * of a timer; during expiry, it is 1 offset ahead of
228 * jiffies to avoid endless requeuing to current
229 * jiffies
230 * @next_expiry: expiry value of the first timer; it is updated when
231 * finding the next timer and during enqueue; the
232 * value is not valid, when next_expiry_recalc is set
233 * @cpu: Number of CPU the timer base belongs to
234 * @next_expiry_recalc: States, whether a recalculation of next_expiry is
235 * required. Value is set true, when a timer was
236 * deleted.
237 * @is_idle: Is set, when timer_base is idle. It is triggered by NOHZ
238 * code. This state is only used in standard
239 * base. Deferrable timers, which are enqueued remotely
240 * never wake up an idle CPU. So no matter of supporting it
241 * for this base.
242 * @timers_pending: Is set, when a timer is pending in the base. It is only
243 * reliable when next_expiry_recalc is not set.
244 * @pending_map: bitmap of the timer wheel; each bit reflects a
245 * bucket of the wheel. When a bit is set, at least a
246 * single timer is enqueued in the related bucket.
247 * @vectors: Array of lists; Each array member reflects a bucket
248 * of the timer wheel. The list contains all timers
249 * which are enqueued into a specific bucket.
250 */
251 struct timer_base {
252 raw_spinlock_t lock;
253 struct timer_list *running_timer;
254 #ifdef CONFIG_PREEMPT_RT
255 spinlock_t expiry_lock;
256 atomic_t timer_waiters;
257 #endif
258 unsigned long clk;
259 unsigned long next_expiry;
260 unsigned int cpu;
261 bool next_expiry_recalc;
262 bool is_idle;
263 bool timers_pending;
264 DECLARE_BITMAP(pending_map, WHEEL_SIZE);
265 struct hlist_head vectors[WHEEL_SIZE];
266 } ____cacheline_aligned;
267
268 static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
269
270 #ifdef CONFIG_NO_HZ_COMMON
271
272 static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
273 static DEFINE_MUTEX(timer_keys_mutex);
274
275 static void timer_update_keys(struct work_struct *work);
276 static DECLARE_WORK(timer_update_work, timer_update_keys);
277
278 #ifdef CONFIG_SMP
279 static unsigned int sysctl_timer_migration = 1;
280
281 DEFINE_STATIC_KEY_FALSE(timers_migration_enabled);
282
timers_update_migration(void)283 static void timers_update_migration(void)
284 {
285 if (sysctl_timer_migration && tick_nohz_active)
286 static_branch_enable(&timers_migration_enabled);
287 else
288 static_branch_disable(&timers_migration_enabled);
289 }
290
291 #ifdef CONFIG_SYSCTL
timer_migration_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)292 static int timer_migration_handler(const struct ctl_table *table, int write,
293 void *buffer, size_t *lenp, loff_t *ppos)
294 {
295 int ret;
296
297 mutex_lock(&timer_keys_mutex);
298 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
299 if (!ret && write)
300 timers_update_migration();
301 mutex_unlock(&timer_keys_mutex);
302 return ret;
303 }
304
305 static struct ctl_table timer_sysctl[] = {
306 {
307 .procname = "timer_migration",
308 .data = &sysctl_timer_migration,
309 .maxlen = sizeof(unsigned int),
310 .mode = 0644,
311 .proc_handler = timer_migration_handler,
312 .extra1 = SYSCTL_ZERO,
313 .extra2 = SYSCTL_ONE,
314 },
315 };
316
timer_sysctl_init(void)317 static int __init timer_sysctl_init(void)
318 {
319 register_sysctl("kernel", timer_sysctl);
320 return 0;
321 }
322 device_initcall(timer_sysctl_init);
323 #endif /* CONFIG_SYSCTL */
324 #else /* CONFIG_SMP */
timers_update_migration(void)325 static inline void timers_update_migration(void) { }
326 #endif /* !CONFIG_SMP */
327
timer_update_keys(struct work_struct * work)328 static void timer_update_keys(struct work_struct *work)
329 {
330 mutex_lock(&timer_keys_mutex);
331 timers_update_migration();
332 static_branch_enable(&timers_nohz_active);
333 mutex_unlock(&timer_keys_mutex);
334 }
335
timers_update_nohz(void)336 void timers_update_nohz(void)
337 {
338 schedule_work(&timer_update_work);
339 }
340
is_timers_nohz_active(void)341 static inline bool is_timers_nohz_active(void)
342 {
343 return static_branch_unlikely(&timers_nohz_active);
344 }
345 #else
is_timers_nohz_active(void)346 static inline bool is_timers_nohz_active(void) { return false; }
347 #endif /* NO_HZ_COMMON */
348
round_jiffies_common(unsigned long j,int cpu,bool force_up)349 static unsigned long round_jiffies_common(unsigned long j, int cpu,
350 bool force_up)
351 {
352 int rem;
353 unsigned long original = j;
354
355 /*
356 * We don't want all cpus firing their timers at once hitting the
357 * same lock or cachelines, so we skew each extra cpu with an extra
358 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
359 * already did this.
360 * The skew is done by adding 3*cpunr, then round, then subtract this
361 * extra offset again.
362 */
363 j += cpu * 3;
364
365 rem = j % HZ;
366
367 /*
368 * If the target jiffy is just after a whole second (which can happen
369 * due to delays of the timer irq, long irq off times etc etc) then
370 * we should round down to the whole second, not up. Use 1/4th second
371 * as cutoff for this rounding as an extreme upper bound for this.
372 * But never round down if @force_up is set.
373 */
374 if (rem < HZ/4 && !force_up) /* round down */
375 j = j - rem;
376 else /* round up */
377 j = j - rem + HZ;
378
379 /* now that we have rounded, subtract the extra skew again */
380 j -= cpu * 3;
381
382 /*
383 * Make sure j is still in the future. Otherwise return the
384 * unmodified value.
385 */
386 return time_is_after_jiffies(j) ? j : original;
387 }
388
389 /**
390 * __round_jiffies - function to round jiffies to a full second
391 * @j: the time in (absolute) jiffies that should be rounded
392 * @cpu: the processor number on which the timeout will happen
393 *
394 * __round_jiffies() rounds an absolute time in the future (in jiffies)
395 * up or down to (approximately) full seconds. This is useful for timers
396 * for which the exact time they fire does not matter too much, as long as
397 * they fire approximately every X seconds.
398 *
399 * By rounding these timers to whole seconds, all such timers will fire
400 * at the same time, rather than at various times spread out. The goal
401 * of this is to have the CPU wake up less, which saves power.
402 *
403 * The exact rounding is skewed for each processor to avoid all
404 * processors firing at the exact same time, which could lead
405 * to lock contention or spurious cache line bouncing.
406 *
407 * The return value is the rounded version of the @j parameter.
408 */
__round_jiffies(unsigned long j,int cpu)409 unsigned long __round_jiffies(unsigned long j, int cpu)
410 {
411 return round_jiffies_common(j, cpu, false);
412 }
413 EXPORT_SYMBOL_GPL(__round_jiffies);
414
415 /**
416 * __round_jiffies_relative - function to round jiffies to a full second
417 * @j: the time in (relative) jiffies that should be rounded
418 * @cpu: the processor number on which the timeout will happen
419 *
420 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
421 * up or down to (approximately) full seconds. This is useful for timers
422 * for which the exact time they fire does not matter too much, as long as
423 * they fire approximately every X seconds.
424 *
425 * By rounding these timers to whole seconds, all such timers will fire
426 * at the same time, rather than at various times spread out. The goal
427 * of this is to have the CPU wake up less, which saves power.
428 *
429 * The exact rounding is skewed for each processor to avoid all
430 * processors firing at the exact same time, which could lead
431 * to lock contention or spurious cache line bouncing.
432 *
433 * The return value is the rounded version of the @j parameter.
434 */
__round_jiffies_relative(unsigned long j,int cpu)435 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
436 {
437 unsigned long j0 = jiffies;
438
439 /* Use j0 because jiffies might change while we run */
440 return round_jiffies_common(j + j0, cpu, false) - j0;
441 }
442 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
443
444 /**
445 * round_jiffies - function to round jiffies to a full second
446 * @j: the time in (absolute) jiffies that should be rounded
447 *
448 * round_jiffies() rounds an absolute time in the future (in jiffies)
449 * up or down to (approximately) full seconds. This is useful for timers
450 * for which the exact time they fire does not matter too much, as long as
451 * they fire approximately every X seconds.
452 *
453 * By rounding these timers to whole seconds, all such timers will fire
454 * at the same time, rather than at various times spread out. The goal
455 * of this is to have the CPU wake up less, which saves power.
456 *
457 * The return value is the rounded version of the @j parameter.
458 */
round_jiffies(unsigned long j)459 unsigned long round_jiffies(unsigned long j)
460 {
461 return round_jiffies_common(j, raw_smp_processor_id(), false);
462 }
463 EXPORT_SYMBOL_GPL(round_jiffies);
464
465 /**
466 * round_jiffies_relative - function to round jiffies to a full second
467 * @j: the time in (relative) jiffies that should be rounded
468 *
469 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
470 * up or down to (approximately) full seconds. This is useful for timers
471 * for which the exact time they fire does not matter too much, as long as
472 * they fire approximately every X seconds.
473 *
474 * By rounding these timers to whole seconds, all such timers will fire
475 * at the same time, rather than at various times spread out. The goal
476 * of this is to have the CPU wake up less, which saves power.
477 *
478 * The return value is the rounded version of the @j parameter.
479 */
round_jiffies_relative(unsigned long j)480 unsigned long round_jiffies_relative(unsigned long j)
481 {
482 return __round_jiffies_relative(j, raw_smp_processor_id());
483 }
484 EXPORT_SYMBOL_GPL(round_jiffies_relative);
485
486 /**
487 * __round_jiffies_up - function to round jiffies up to a full second
488 * @j: the time in (absolute) jiffies that should be rounded
489 * @cpu: the processor number on which the timeout will happen
490 *
491 * This is the same as __round_jiffies() except that it will never
492 * round down. This is useful for timeouts for which the exact time
493 * of firing does not matter too much, as long as they don't fire too
494 * early.
495 */
__round_jiffies_up(unsigned long j,int cpu)496 unsigned long __round_jiffies_up(unsigned long j, int cpu)
497 {
498 return round_jiffies_common(j, cpu, true);
499 }
500 EXPORT_SYMBOL_GPL(__round_jiffies_up);
501
502 /**
503 * __round_jiffies_up_relative - function to round jiffies up to a full second
504 * @j: the time in (relative) jiffies that should be rounded
505 * @cpu: the processor number on which the timeout will happen
506 *
507 * This is the same as __round_jiffies_relative() except that it will never
508 * round down. This is useful for timeouts for which the exact time
509 * of firing does not matter too much, as long as they don't fire too
510 * early.
511 */
__round_jiffies_up_relative(unsigned long j,int cpu)512 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
513 {
514 unsigned long j0 = jiffies;
515
516 /* Use j0 because jiffies might change while we run */
517 return round_jiffies_common(j + j0, cpu, true) - j0;
518 }
519 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
520
521 /**
522 * round_jiffies_up - function to round jiffies up to a full second
523 * @j: the time in (absolute) jiffies that should be rounded
524 *
525 * This is the same as round_jiffies() except that it will never
526 * round down. This is useful for timeouts for which the exact time
527 * of firing does not matter too much, as long as they don't fire too
528 * early.
529 */
round_jiffies_up(unsigned long j)530 unsigned long round_jiffies_up(unsigned long j)
531 {
532 return round_jiffies_common(j, raw_smp_processor_id(), true);
533 }
534 EXPORT_SYMBOL_GPL(round_jiffies_up);
535
536 /**
537 * round_jiffies_up_relative - function to round jiffies up to a full second
538 * @j: the time in (relative) jiffies that should be rounded
539 *
540 * This is the same as round_jiffies_relative() except that it will never
541 * round down. This is useful for timeouts for which the exact time
542 * of firing does not matter too much, as long as they don't fire too
543 * early.
544 */
round_jiffies_up_relative(unsigned long j)545 unsigned long round_jiffies_up_relative(unsigned long j)
546 {
547 return __round_jiffies_up_relative(j, raw_smp_processor_id());
548 }
549 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
550
551
timer_get_idx(struct timer_list * timer)552 static inline unsigned int timer_get_idx(struct timer_list *timer)
553 {
554 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
555 }
556
timer_set_idx(struct timer_list * timer,unsigned int idx)557 static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
558 {
559 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
560 idx << TIMER_ARRAYSHIFT;
561 }
562
563 /*
564 * Helper function to calculate the array index for a given expiry
565 * time.
566 */
calc_index(unsigned long expires,unsigned lvl,unsigned long * bucket_expiry)567 static inline unsigned calc_index(unsigned long expires, unsigned lvl,
568 unsigned long *bucket_expiry)
569 {
570
571 /*
572 * The timer wheel has to guarantee that a timer does not fire
573 * early. Early expiry can happen due to:
574 * - Timer is armed at the edge of a tick
575 * - Truncation of the expiry time in the outer wheel levels
576 *
577 * Round up with level granularity to prevent this.
578 */
579 expires = (expires >> LVL_SHIFT(lvl)) + 1;
580 *bucket_expiry = expires << LVL_SHIFT(lvl);
581 return LVL_OFFS(lvl) + (expires & LVL_MASK);
582 }
583
calc_wheel_index(unsigned long expires,unsigned long clk,unsigned long * bucket_expiry)584 static int calc_wheel_index(unsigned long expires, unsigned long clk,
585 unsigned long *bucket_expiry)
586 {
587 unsigned long delta = expires - clk;
588 unsigned int idx;
589
590 if (delta < LVL_START(1)) {
591 idx = calc_index(expires, 0, bucket_expiry);
592 } else if (delta < LVL_START(2)) {
593 idx = calc_index(expires, 1, bucket_expiry);
594 } else if (delta < LVL_START(3)) {
595 idx = calc_index(expires, 2, bucket_expiry);
596 } else if (delta < LVL_START(4)) {
597 idx = calc_index(expires, 3, bucket_expiry);
598 } else if (delta < LVL_START(5)) {
599 idx = calc_index(expires, 4, bucket_expiry);
600 } else if (delta < LVL_START(6)) {
601 idx = calc_index(expires, 5, bucket_expiry);
602 } else if (delta < LVL_START(7)) {
603 idx = calc_index(expires, 6, bucket_expiry);
604 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
605 idx = calc_index(expires, 7, bucket_expiry);
606 } else if ((long) delta < 0) {
607 idx = clk & LVL_MASK;
608 *bucket_expiry = clk;
609 } else {
610 /*
611 * Force expire obscene large timeouts to expire at the
612 * capacity limit of the wheel.
613 */
614 if (delta >= WHEEL_TIMEOUT_CUTOFF)
615 expires = clk + WHEEL_TIMEOUT_MAX;
616
617 idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry);
618 }
619 return idx;
620 }
621
622 static void
trigger_dyntick_cpu(struct timer_base * base,struct timer_list * timer)623 trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
624 {
625 /*
626 * Deferrable timers do not prevent the CPU from entering dynticks and
627 * are not taken into account on the idle/nohz_full path. An IPI when a
628 * new deferrable timer is enqueued will wake up the remote CPU but
629 * nothing will be done with the deferrable timer base. Therefore skip
630 * the remote IPI for deferrable timers completely.
631 */
632 if (!is_timers_nohz_active() || timer->flags & TIMER_DEFERRABLE)
633 return;
634
635 /*
636 * We might have to IPI the remote CPU if the base is idle and the
637 * timer is pinned. If it is a non pinned timer, it is only queued
638 * on the remote CPU, when timer was running during queueing. Then
639 * everything is handled by remote CPU anyway. If the other CPU is
640 * on the way to idle then it can't set base->is_idle as we hold
641 * the base lock:
642 */
643 if (base->is_idle) {
644 WARN_ON_ONCE(!(timer->flags & TIMER_PINNED ||
645 tick_nohz_full_cpu(base->cpu)));
646 wake_up_nohz_cpu(base->cpu);
647 }
648 }
649
650 /*
651 * Enqueue the timer into the hash bucket, mark it pending in
652 * the bitmap, store the index in the timer flags then wake up
653 * the target CPU if needed.
654 */
enqueue_timer(struct timer_base * base,struct timer_list * timer,unsigned int idx,unsigned long bucket_expiry)655 static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
656 unsigned int idx, unsigned long bucket_expiry)
657 {
658
659 hlist_add_head(&timer->entry, base->vectors + idx);
660 __set_bit(idx, base->pending_map);
661 timer_set_idx(timer, idx);
662
663 trace_timer_start(timer, bucket_expiry);
664
665 /*
666 * Check whether this is the new first expiring timer. The
667 * effective expiry time of the timer is required here
668 * (bucket_expiry) instead of timer->expires.
669 */
670 if (time_before(bucket_expiry, base->next_expiry)) {
671 /*
672 * Set the next expiry time and kick the CPU so it
673 * can reevaluate the wheel:
674 */
675 WRITE_ONCE(base->next_expiry, bucket_expiry);
676 base->timers_pending = true;
677 base->next_expiry_recalc = false;
678 trigger_dyntick_cpu(base, timer);
679 }
680 }
681
internal_add_timer(struct timer_base * base,struct timer_list * timer)682 static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
683 {
684 unsigned long bucket_expiry;
685 unsigned int idx;
686
687 idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry);
688 enqueue_timer(base, timer, idx, bucket_expiry);
689 }
690
691 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
692
693 static const struct debug_obj_descr timer_debug_descr;
694
695 struct timer_hint {
696 void (*function)(struct timer_list *t);
697 long offset;
698 };
699
700 #define TIMER_HINT(fn, container, timr, hintfn) \
701 { \
702 .function = fn, \
703 .offset = offsetof(container, hintfn) - \
704 offsetof(container, timr) \
705 }
706
707 static const struct timer_hint timer_hints[] = {
708 TIMER_HINT(delayed_work_timer_fn,
709 struct delayed_work, timer, work.func),
710 TIMER_HINT(kthread_delayed_work_timer_fn,
711 struct kthread_delayed_work, timer, work.func),
712 };
713
timer_debug_hint(void * addr)714 static void *timer_debug_hint(void *addr)
715 {
716 struct timer_list *timer = addr;
717 int i;
718
719 for (i = 0; i < ARRAY_SIZE(timer_hints); i++) {
720 if (timer_hints[i].function == timer->function) {
721 void (**fn)(void) = addr + timer_hints[i].offset;
722
723 return *fn;
724 }
725 }
726
727 return timer->function;
728 }
729
timer_is_static_object(void * addr)730 static bool timer_is_static_object(void *addr)
731 {
732 struct timer_list *timer = addr;
733
734 return (timer->entry.pprev == NULL &&
735 timer->entry.next == TIMER_ENTRY_STATIC);
736 }
737
738 /*
739 * timer_fixup_init is called when:
740 * - an active object is initialized
741 */
timer_fixup_init(void * addr,enum debug_obj_state state)742 static bool timer_fixup_init(void *addr, enum debug_obj_state state)
743 {
744 struct timer_list *timer = addr;
745
746 switch (state) {
747 case ODEBUG_STATE_ACTIVE:
748 del_timer_sync(timer);
749 debug_object_init(timer, &timer_debug_descr);
750 return true;
751 default:
752 return false;
753 }
754 }
755
756 /* Stub timer callback for improperly used timers. */
stub_timer(struct timer_list * unused)757 static void stub_timer(struct timer_list *unused)
758 {
759 WARN_ON(1);
760 }
761
762 /*
763 * timer_fixup_activate is called when:
764 * - an active object is activated
765 * - an unknown non-static object is activated
766 */
timer_fixup_activate(void * addr,enum debug_obj_state state)767 static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
768 {
769 struct timer_list *timer = addr;
770
771 switch (state) {
772 case ODEBUG_STATE_NOTAVAILABLE:
773 timer_setup(timer, stub_timer, 0);
774 return true;
775
776 case ODEBUG_STATE_ACTIVE:
777 WARN_ON(1);
778 fallthrough;
779 default:
780 return false;
781 }
782 }
783
784 /*
785 * timer_fixup_free is called when:
786 * - an active object is freed
787 */
timer_fixup_free(void * addr,enum debug_obj_state state)788 static bool timer_fixup_free(void *addr, enum debug_obj_state state)
789 {
790 struct timer_list *timer = addr;
791
792 switch (state) {
793 case ODEBUG_STATE_ACTIVE:
794 del_timer_sync(timer);
795 debug_object_free(timer, &timer_debug_descr);
796 return true;
797 default:
798 return false;
799 }
800 }
801
802 /*
803 * timer_fixup_assert_init is called when:
804 * - an untracked/uninit-ed object is found
805 */
timer_fixup_assert_init(void * addr,enum debug_obj_state state)806 static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
807 {
808 struct timer_list *timer = addr;
809
810 switch (state) {
811 case ODEBUG_STATE_NOTAVAILABLE:
812 timer_setup(timer, stub_timer, 0);
813 return true;
814 default:
815 return false;
816 }
817 }
818
819 static const struct debug_obj_descr timer_debug_descr = {
820 .name = "timer_list",
821 .debug_hint = timer_debug_hint,
822 .is_static_object = timer_is_static_object,
823 .fixup_init = timer_fixup_init,
824 .fixup_activate = timer_fixup_activate,
825 .fixup_free = timer_fixup_free,
826 .fixup_assert_init = timer_fixup_assert_init,
827 };
828
debug_timer_init(struct timer_list * timer)829 static inline void debug_timer_init(struct timer_list *timer)
830 {
831 debug_object_init(timer, &timer_debug_descr);
832 }
833
debug_timer_activate(struct timer_list * timer)834 static inline void debug_timer_activate(struct timer_list *timer)
835 {
836 debug_object_activate(timer, &timer_debug_descr);
837 }
838
debug_timer_deactivate(struct timer_list * timer)839 static inline void debug_timer_deactivate(struct timer_list *timer)
840 {
841 debug_object_deactivate(timer, &timer_debug_descr);
842 }
843
debug_timer_assert_init(struct timer_list * timer)844 static inline void debug_timer_assert_init(struct timer_list *timer)
845 {
846 debug_object_assert_init(timer, &timer_debug_descr);
847 }
848
849 static void do_init_timer(struct timer_list *timer,
850 void (*func)(struct timer_list *),
851 unsigned int flags,
852 const char *name, struct lock_class_key *key);
853
init_timer_on_stack_key(struct timer_list * timer,void (* func)(struct timer_list *),unsigned int flags,const char * name,struct lock_class_key * key)854 void init_timer_on_stack_key(struct timer_list *timer,
855 void (*func)(struct timer_list *),
856 unsigned int flags,
857 const char *name, struct lock_class_key *key)
858 {
859 debug_object_init_on_stack(timer, &timer_debug_descr);
860 do_init_timer(timer, func, flags, name, key);
861 }
862 EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
863
destroy_timer_on_stack(struct timer_list * timer)864 void destroy_timer_on_stack(struct timer_list *timer)
865 {
866 debug_object_free(timer, &timer_debug_descr);
867 }
868 EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
869
870 #else
debug_timer_init(struct timer_list * timer)871 static inline void debug_timer_init(struct timer_list *timer) { }
debug_timer_activate(struct timer_list * timer)872 static inline void debug_timer_activate(struct timer_list *timer) { }
debug_timer_deactivate(struct timer_list * timer)873 static inline void debug_timer_deactivate(struct timer_list *timer) { }
debug_timer_assert_init(struct timer_list * timer)874 static inline void debug_timer_assert_init(struct timer_list *timer) { }
875 #endif
876
debug_init(struct timer_list * timer)877 static inline void debug_init(struct timer_list *timer)
878 {
879 debug_timer_init(timer);
880 trace_timer_init(timer);
881 }
882
debug_deactivate(struct timer_list * timer)883 static inline void debug_deactivate(struct timer_list *timer)
884 {
885 debug_timer_deactivate(timer);
886 trace_timer_cancel(timer);
887 }
888
debug_assert_init(struct timer_list * timer)889 static inline void debug_assert_init(struct timer_list *timer)
890 {
891 debug_timer_assert_init(timer);
892 }
893
do_init_timer(struct timer_list * timer,void (* func)(struct timer_list *),unsigned int flags,const char * name,struct lock_class_key * key)894 static void do_init_timer(struct timer_list *timer,
895 void (*func)(struct timer_list *),
896 unsigned int flags,
897 const char *name, struct lock_class_key *key)
898 {
899 timer->entry.pprev = NULL;
900 timer->function = func;
901 if (WARN_ON_ONCE(flags & ~TIMER_INIT_FLAGS))
902 flags &= TIMER_INIT_FLAGS;
903 timer->flags = flags | raw_smp_processor_id();
904 lockdep_init_map(&timer->lockdep_map, name, key, 0);
905 }
906
907 /**
908 * init_timer_key - initialize a timer
909 * @timer: the timer to be initialized
910 * @func: timer callback function
911 * @flags: timer flags
912 * @name: name of the timer
913 * @key: lockdep class key of the fake lock used for tracking timer
914 * sync lock dependencies
915 *
916 * init_timer_key() must be done to a timer prior to calling *any* of the
917 * other timer functions.
918 */
init_timer_key(struct timer_list * timer,void (* func)(struct timer_list *),unsigned int flags,const char * name,struct lock_class_key * key)919 void init_timer_key(struct timer_list *timer,
920 void (*func)(struct timer_list *), unsigned int flags,
921 const char *name, struct lock_class_key *key)
922 {
923 debug_init(timer);
924 do_init_timer(timer, func, flags, name, key);
925 }
926 EXPORT_SYMBOL(init_timer_key);
927
detach_timer(struct timer_list * timer,bool clear_pending)928 static inline void detach_timer(struct timer_list *timer, bool clear_pending)
929 {
930 struct hlist_node *entry = &timer->entry;
931
932 debug_deactivate(timer);
933
934 __hlist_del(entry);
935 if (clear_pending)
936 entry->pprev = NULL;
937 entry->next = LIST_POISON2;
938 }
939
detach_if_pending(struct timer_list * timer,struct timer_base * base,bool clear_pending)940 static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
941 bool clear_pending)
942 {
943 unsigned idx = timer_get_idx(timer);
944
945 if (!timer_pending(timer))
946 return 0;
947
948 if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) {
949 __clear_bit(idx, base->pending_map);
950 base->next_expiry_recalc = true;
951 }
952
953 detach_timer(timer, clear_pending);
954 return 1;
955 }
956
get_timer_cpu_base(u32 tflags,u32 cpu)957 static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
958 {
959 int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL;
960 struct timer_base *base;
961
962 base = per_cpu_ptr(&timer_bases[index], cpu);
963
964 /*
965 * If the timer is deferrable and NO_HZ_COMMON is set then we need
966 * to use the deferrable base.
967 */
968 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
969 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
970 return base;
971 }
972
get_timer_this_cpu_base(u32 tflags)973 static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
974 {
975 int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL;
976 struct timer_base *base;
977
978 base = this_cpu_ptr(&timer_bases[index]);
979
980 /*
981 * If the timer is deferrable and NO_HZ_COMMON is set then we need
982 * to use the deferrable base.
983 */
984 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
985 base = this_cpu_ptr(&timer_bases[BASE_DEF]);
986 return base;
987 }
988
get_timer_base(u32 tflags)989 static inline struct timer_base *get_timer_base(u32 tflags)
990 {
991 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
992 }
993
__forward_timer_base(struct timer_base * base,unsigned long basej)994 static inline void __forward_timer_base(struct timer_base *base,
995 unsigned long basej)
996 {
997 /*
998 * Check whether we can forward the base. We can only do that when
999 * @basej is past base->clk otherwise we might rewind base->clk.
1000 */
1001 if (time_before_eq(basej, base->clk))
1002 return;
1003
1004 /*
1005 * If the next expiry value is > jiffies, then we fast forward to
1006 * jiffies otherwise we forward to the next expiry value.
1007 */
1008 if (time_after(base->next_expiry, basej)) {
1009 base->clk = basej;
1010 } else {
1011 if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk)))
1012 return;
1013 base->clk = base->next_expiry;
1014 }
1015
1016 }
1017
forward_timer_base(struct timer_base * base)1018 static inline void forward_timer_base(struct timer_base *base)
1019 {
1020 __forward_timer_base(base, READ_ONCE(jiffies));
1021 }
1022
1023 /*
1024 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
1025 * that all timers which are tied to this base are locked, and the base itself
1026 * is locked too.
1027 *
1028 * So __run_timers/migrate_timers can safely modify all timers which could
1029 * be found in the base->vectors array.
1030 *
1031 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
1032 * to wait until the migration is done.
1033 */
lock_timer_base(struct timer_list * timer,unsigned long * flags)1034 static struct timer_base *lock_timer_base(struct timer_list *timer,
1035 unsigned long *flags)
1036 __acquires(timer->base->lock)
1037 {
1038 for (;;) {
1039 struct timer_base *base;
1040 u32 tf;
1041
1042 /*
1043 * We need to use READ_ONCE() here, otherwise the compiler
1044 * might re-read @tf between the check for TIMER_MIGRATING
1045 * and spin_lock().
1046 */
1047 tf = READ_ONCE(timer->flags);
1048
1049 if (!(tf & TIMER_MIGRATING)) {
1050 base = get_timer_base(tf);
1051 raw_spin_lock_irqsave(&base->lock, *flags);
1052 if (timer->flags == tf)
1053 return base;
1054 raw_spin_unlock_irqrestore(&base->lock, *flags);
1055 }
1056 cpu_relax();
1057 }
1058 }
1059
1060 #define MOD_TIMER_PENDING_ONLY 0x01
1061 #define MOD_TIMER_REDUCE 0x02
1062 #define MOD_TIMER_NOTPENDING 0x04
1063
1064 static inline int
__mod_timer(struct timer_list * timer,unsigned long expires,unsigned int options)1065 __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options)
1066 {
1067 unsigned long clk = 0, flags, bucket_expiry;
1068 struct timer_base *base, *new_base;
1069 unsigned int idx = UINT_MAX;
1070 int ret = 0;
1071
1072 debug_assert_init(timer);
1073
1074 /*
1075 * This is a common optimization triggered by the networking code - if
1076 * the timer is re-modified to have the same timeout or ends up in the
1077 * same array bucket then just return:
1078 */
1079 if (!(options & MOD_TIMER_NOTPENDING) && timer_pending(timer)) {
1080 /*
1081 * The downside of this optimization is that it can result in
1082 * larger granularity than you would get from adding a new
1083 * timer with this expiry.
1084 */
1085 long diff = timer->expires - expires;
1086
1087 if (!diff)
1088 return 1;
1089 if (options & MOD_TIMER_REDUCE && diff <= 0)
1090 return 1;
1091
1092 /*
1093 * We lock timer base and calculate the bucket index right
1094 * here. If the timer ends up in the same bucket, then we
1095 * just update the expiry time and avoid the whole
1096 * dequeue/enqueue dance.
1097 */
1098 base = lock_timer_base(timer, &flags);
1099 /*
1100 * Has @timer been shutdown? This needs to be evaluated
1101 * while holding base lock to prevent a race against the
1102 * shutdown code.
1103 */
1104 if (!timer->function)
1105 goto out_unlock;
1106
1107 forward_timer_base(base);
1108
1109 if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) &&
1110 time_before_eq(timer->expires, expires)) {
1111 ret = 1;
1112 goto out_unlock;
1113 }
1114
1115 clk = base->clk;
1116 idx = calc_wheel_index(expires, clk, &bucket_expiry);
1117
1118 /*
1119 * Retrieve and compare the array index of the pending
1120 * timer. If it matches set the expiry to the new value so a
1121 * subsequent call will exit in the expires check above.
1122 */
1123 if (idx == timer_get_idx(timer)) {
1124 if (!(options & MOD_TIMER_REDUCE))
1125 timer->expires = expires;
1126 else if (time_after(timer->expires, expires))
1127 timer->expires = expires;
1128 ret = 1;
1129 goto out_unlock;
1130 }
1131 } else {
1132 base = lock_timer_base(timer, &flags);
1133 /*
1134 * Has @timer been shutdown? This needs to be evaluated
1135 * while holding base lock to prevent a race against the
1136 * shutdown code.
1137 */
1138 if (!timer->function)
1139 goto out_unlock;
1140
1141 forward_timer_base(base);
1142 }
1143
1144 ret = detach_if_pending(timer, base, false);
1145 if (!ret && (options & MOD_TIMER_PENDING_ONLY))
1146 goto out_unlock;
1147
1148 new_base = get_timer_this_cpu_base(timer->flags);
1149
1150 if (base != new_base) {
1151 /*
1152 * We are trying to schedule the timer on the new base.
1153 * However we can't change timer's base while it is running,
1154 * otherwise timer_delete_sync() can't detect that the timer's
1155 * handler yet has not finished. This also guarantees that the
1156 * timer is serialized wrt itself.
1157 */
1158 if (likely(base->running_timer != timer)) {
1159 /* See the comment in lock_timer_base() */
1160 timer->flags |= TIMER_MIGRATING;
1161
1162 raw_spin_unlock(&base->lock);
1163 base = new_base;
1164 raw_spin_lock(&base->lock);
1165 WRITE_ONCE(timer->flags,
1166 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
1167 forward_timer_base(base);
1168 }
1169 }
1170
1171 debug_timer_activate(timer);
1172
1173 timer->expires = expires;
1174 /*
1175 * If 'idx' was calculated above and the base time did not advance
1176 * between calculating 'idx' and possibly switching the base, only
1177 * enqueue_timer() is required. Otherwise we need to (re)calculate
1178 * the wheel index via internal_add_timer().
1179 */
1180 if (idx != UINT_MAX && clk == base->clk)
1181 enqueue_timer(base, timer, idx, bucket_expiry);
1182 else
1183 internal_add_timer(base, timer);
1184
1185 out_unlock:
1186 raw_spin_unlock_irqrestore(&base->lock, flags);
1187
1188 return ret;
1189 }
1190
1191 /**
1192 * mod_timer_pending - Modify a pending timer's timeout
1193 * @timer: The pending timer to be modified
1194 * @expires: New absolute timeout in jiffies
1195 *
1196 * mod_timer_pending() is the same for pending timers as mod_timer(), but
1197 * will not activate inactive timers.
1198 *
1199 * If @timer->function == NULL then the start operation is silently
1200 * discarded.
1201 *
1202 * Return:
1203 * * %0 - The timer was inactive and not modified or was in
1204 * shutdown state and the operation was discarded
1205 * * %1 - The timer was active and requeued to expire at @expires
1206 */
mod_timer_pending(struct timer_list * timer,unsigned long expires)1207 int mod_timer_pending(struct timer_list *timer, unsigned long expires)
1208 {
1209 return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY);
1210 }
1211 EXPORT_SYMBOL(mod_timer_pending);
1212
1213 /**
1214 * mod_timer - Modify a timer's timeout
1215 * @timer: The timer to be modified
1216 * @expires: New absolute timeout in jiffies
1217 *
1218 * mod_timer(timer, expires) is equivalent to:
1219 *
1220 * del_timer(timer); timer->expires = expires; add_timer(timer);
1221 *
1222 * mod_timer() is more efficient than the above open coded sequence. In
1223 * case that the timer is inactive, the del_timer() part is a NOP. The
1224 * timer is in any case activated with the new expiry time @expires.
1225 *
1226 * Note that if there are multiple unserialized concurrent users of the
1227 * same timer, then mod_timer() is the only safe way to modify the timeout,
1228 * since add_timer() cannot modify an already running timer.
1229 *
1230 * If @timer->function == NULL then the start operation is silently
1231 * discarded. In this case the return value is 0 and meaningless.
1232 *
1233 * Return:
1234 * * %0 - The timer was inactive and started or was in shutdown
1235 * state and the operation was discarded
1236 * * %1 - The timer was active and requeued to expire at @expires or
1237 * the timer was active and not modified because @expires did
1238 * not change the effective expiry time
1239 */
mod_timer(struct timer_list * timer,unsigned long expires)1240 int mod_timer(struct timer_list *timer, unsigned long expires)
1241 {
1242 return __mod_timer(timer, expires, 0);
1243 }
1244 EXPORT_SYMBOL(mod_timer);
1245
1246 /**
1247 * timer_reduce - Modify a timer's timeout if it would reduce the timeout
1248 * @timer: The timer to be modified
1249 * @expires: New absolute timeout in jiffies
1250 *
1251 * timer_reduce() is very similar to mod_timer(), except that it will only
1252 * modify an enqueued timer if that would reduce the expiration time. If
1253 * @timer is not enqueued it starts the timer.
1254 *
1255 * If @timer->function == NULL then the start operation is silently
1256 * discarded.
1257 *
1258 * Return:
1259 * * %0 - The timer was inactive and started or was in shutdown
1260 * state and the operation was discarded
1261 * * %1 - The timer was active and requeued to expire at @expires or
1262 * the timer was active and not modified because @expires
1263 * did not change the effective expiry time such that the
1264 * timer would expire earlier than already scheduled
1265 */
timer_reduce(struct timer_list * timer,unsigned long expires)1266 int timer_reduce(struct timer_list *timer, unsigned long expires)
1267 {
1268 return __mod_timer(timer, expires, MOD_TIMER_REDUCE);
1269 }
1270 EXPORT_SYMBOL(timer_reduce);
1271
1272 /**
1273 * add_timer - Start a timer
1274 * @timer: The timer to be started
1275 *
1276 * Start @timer to expire at @timer->expires in the future. @timer->expires
1277 * is the absolute expiry time measured in 'jiffies'. When the timer expires
1278 * timer->function(timer) will be invoked from soft interrupt context.
1279 *
1280 * The @timer->expires and @timer->function fields must be set prior
1281 * to calling this function.
1282 *
1283 * If @timer->function == NULL then the start operation is silently
1284 * discarded.
1285 *
1286 * If @timer->expires is already in the past @timer will be queued to
1287 * expire at the next timer tick.
1288 *
1289 * This can only operate on an inactive timer. Attempts to invoke this on
1290 * an active timer are rejected with a warning.
1291 */
add_timer(struct timer_list * timer)1292 void add_timer(struct timer_list *timer)
1293 {
1294 if (WARN_ON_ONCE(timer_pending(timer)))
1295 return;
1296 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING);
1297 }
1298 EXPORT_SYMBOL(add_timer);
1299
1300 /**
1301 * add_timer_local() - Start a timer on the local CPU
1302 * @timer: The timer to be started
1303 *
1304 * Same as add_timer() except that the timer flag TIMER_PINNED is set.
1305 *
1306 * See add_timer() for further details.
1307 */
add_timer_local(struct timer_list * timer)1308 void add_timer_local(struct timer_list *timer)
1309 {
1310 if (WARN_ON_ONCE(timer_pending(timer)))
1311 return;
1312 timer->flags |= TIMER_PINNED;
1313 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING);
1314 }
1315 EXPORT_SYMBOL(add_timer_local);
1316
1317 /**
1318 * add_timer_global() - Start a timer without TIMER_PINNED flag set
1319 * @timer: The timer to be started
1320 *
1321 * Same as add_timer() except that the timer flag TIMER_PINNED is unset.
1322 *
1323 * See add_timer() for further details.
1324 */
add_timer_global(struct timer_list * timer)1325 void add_timer_global(struct timer_list *timer)
1326 {
1327 if (WARN_ON_ONCE(timer_pending(timer)))
1328 return;
1329 timer->flags &= ~TIMER_PINNED;
1330 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING);
1331 }
1332 EXPORT_SYMBOL(add_timer_global);
1333
1334 /**
1335 * add_timer_on - Start a timer on a particular CPU
1336 * @timer: The timer to be started
1337 * @cpu: The CPU to start it on
1338 *
1339 * Same as add_timer() except that it starts the timer on the given CPU and
1340 * the TIMER_PINNED flag is set. When timer shouldn't be a pinned timer in
1341 * the next round, add_timer_global() should be used instead as it unsets
1342 * the TIMER_PINNED flag.
1343 *
1344 * See add_timer() for further details.
1345 */
add_timer_on(struct timer_list * timer,int cpu)1346 void add_timer_on(struct timer_list *timer, int cpu)
1347 {
1348 struct timer_base *new_base, *base;
1349 unsigned long flags;
1350
1351 debug_assert_init(timer);
1352
1353 if (WARN_ON_ONCE(timer_pending(timer)))
1354 return;
1355
1356 /* Make sure timer flags have TIMER_PINNED flag set */
1357 timer->flags |= TIMER_PINNED;
1358
1359 new_base = get_timer_cpu_base(timer->flags, cpu);
1360
1361 /*
1362 * If @timer was on a different CPU, it should be migrated with the
1363 * old base locked to prevent other operations proceeding with the
1364 * wrong base locked. See lock_timer_base().
1365 */
1366 base = lock_timer_base(timer, &flags);
1367 /*
1368 * Has @timer been shutdown? This needs to be evaluated while
1369 * holding base lock to prevent a race against the shutdown code.
1370 */
1371 if (!timer->function)
1372 goto out_unlock;
1373
1374 if (base != new_base) {
1375 timer->flags |= TIMER_MIGRATING;
1376
1377 raw_spin_unlock(&base->lock);
1378 base = new_base;
1379 raw_spin_lock(&base->lock);
1380 WRITE_ONCE(timer->flags,
1381 (timer->flags & ~TIMER_BASEMASK) | cpu);
1382 }
1383 forward_timer_base(base);
1384
1385 debug_timer_activate(timer);
1386 internal_add_timer(base, timer);
1387 out_unlock:
1388 raw_spin_unlock_irqrestore(&base->lock, flags);
1389 }
1390 EXPORT_SYMBOL_GPL(add_timer_on);
1391
1392 /**
1393 * __timer_delete - Internal function: Deactivate a timer
1394 * @timer: The timer to be deactivated
1395 * @shutdown: If true, this indicates that the timer is about to be
1396 * shutdown permanently.
1397 *
1398 * If @shutdown is true then @timer->function is set to NULL under the
1399 * timer base lock which prevents further rearming of the time. In that
1400 * case any attempt to rearm @timer after this function returns will be
1401 * silently ignored.
1402 *
1403 * Return:
1404 * * %0 - The timer was not pending
1405 * * %1 - The timer was pending and deactivated
1406 */
__timer_delete(struct timer_list * timer,bool shutdown)1407 static int __timer_delete(struct timer_list *timer, bool shutdown)
1408 {
1409 struct timer_base *base;
1410 unsigned long flags;
1411 int ret = 0;
1412
1413 debug_assert_init(timer);
1414
1415 /*
1416 * If @shutdown is set then the lock has to be taken whether the
1417 * timer is pending or not to protect against a concurrent rearm
1418 * which might hit between the lockless pending check and the lock
1419 * acquisition. By taking the lock it is ensured that such a newly
1420 * enqueued timer is dequeued and cannot end up with
1421 * timer->function == NULL in the expiry code.
1422 *
1423 * If timer->function is currently executed, then this makes sure
1424 * that the callback cannot requeue the timer.
1425 */
1426 if (timer_pending(timer) || shutdown) {
1427 base = lock_timer_base(timer, &flags);
1428 ret = detach_if_pending(timer, base, true);
1429 if (shutdown)
1430 timer->function = NULL;
1431 raw_spin_unlock_irqrestore(&base->lock, flags);
1432 }
1433
1434 return ret;
1435 }
1436
1437 /**
1438 * timer_delete - Deactivate a timer
1439 * @timer: The timer to be deactivated
1440 *
1441 * The function only deactivates a pending timer, but contrary to
1442 * timer_delete_sync() it does not take into account whether the timer's
1443 * callback function is concurrently executed on a different CPU or not.
1444 * It neither prevents rearming of the timer. If @timer can be rearmed
1445 * concurrently then the return value of this function is meaningless.
1446 *
1447 * Return:
1448 * * %0 - The timer was not pending
1449 * * %1 - The timer was pending and deactivated
1450 */
timer_delete(struct timer_list * timer)1451 int timer_delete(struct timer_list *timer)
1452 {
1453 return __timer_delete(timer, false);
1454 }
1455 EXPORT_SYMBOL(timer_delete);
1456
1457 /**
1458 * timer_shutdown - Deactivate a timer and prevent rearming
1459 * @timer: The timer to be deactivated
1460 *
1461 * The function does not wait for an eventually running timer callback on a
1462 * different CPU but it prevents rearming of the timer. Any attempt to arm
1463 * @timer after this function returns will be silently ignored.
1464 *
1465 * This function is useful for teardown code and should only be used when
1466 * timer_shutdown_sync() cannot be invoked due to locking or context constraints.
1467 *
1468 * Return:
1469 * * %0 - The timer was not pending
1470 * * %1 - The timer was pending
1471 */
timer_shutdown(struct timer_list * timer)1472 int timer_shutdown(struct timer_list *timer)
1473 {
1474 return __timer_delete(timer, true);
1475 }
1476 EXPORT_SYMBOL_GPL(timer_shutdown);
1477
1478 /**
1479 * __try_to_del_timer_sync - Internal function: Try to deactivate a timer
1480 * @timer: Timer to deactivate
1481 * @shutdown: If true, this indicates that the timer is about to be
1482 * shutdown permanently.
1483 *
1484 * If @shutdown is true then @timer->function is set to NULL under the
1485 * timer base lock which prevents further rearming of the timer. Any
1486 * attempt to rearm @timer after this function returns will be silently
1487 * ignored.
1488 *
1489 * This function cannot guarantee that the timer cannot be rearmed
1490 * right after dropping the base lock if @shutdown is false. That
1491 * needs to be prevented by the calling code if necessary.
1492 *
1493 * Return:
1494 * * %0 - The timer was not pending
1495 * * %1 - The timer was pending and deactivated
1496 * * %-1 - The timer callback function is running on a different CPU
1497 */
__try_to_del_timer_sync(struct timer_list * timer,bool shutdown)1498 static int __try_to_del_timer_sync(struct timer_list *timer, bool shutdown)
1499 {
1500 struct timer_base *base;
1501 unsigned long flags;
1502 int ret = -1;
1503
1504 debug_assert_init(timer);
1505
1506 base = lock_timer_base(timer, &flags);
1507
1508 if (base->running_timer != timer)
1509 ret = detach_if_pending(timer, base, true);
1510 if (shutdown)
1511 timer->function = NULL;
1512
1513 raw_spin_unlock_irqrestore(&base->lock, flags);
1514
1515 return ret;
1516 }
1517
1518 /**
1519 * try_to_del_timer_sync - Try to deactivate a timer
1520 * @timer: Timer to deactivate
1521 *
1522 * This function tries to deactivate a timer. On success the timer is not
1523 * queued and the timer callback function is not running on any CPU.
1524 *
1525 * This function does not guarantee that the timer cannot be rearmed right
1526 * after dropping the base lock. That needs to be prevented by the calling
1527 * code if necessary.
1528 *
1529 * Return:
1530 * * %0 - The timer was not pending
1531 * * %1 - The timer was pending and deactivated
1532 * * %-1 - The timer callback function is running on a different CPU
1533 */
try_to_del_timer_sync(struct timer_list * timer)1534 int try_to_del_timer_sync(struct timer_list *timer)
1535 {
1536 return __try_to_del_timer_sync(timer, false);
1537 }
1538 EXPORT_SYMBOL(try_to_del_timer_sync);
1539
1540 #ifdef CONFIG_PREEMPT_RT
timer_base_init_expiry_lock(struct timer_base * base)1541 static __init void timer_base_init_expiry_lock(struct timer_base *base)
1542 {
1543 spin_lock_init(&base->expiry_lock);
1544 }
1545
timer_base_lock_expiry(struct timer_base * base)1546 static inline void timer_base_lock_expiry(struct timer_base *base)
1547 {
1548 spin_lock(&base->expiry_lock);
1549 }
1550
timer_base_unlock_expiry(struct timer_base * base)1551 static inline void timer_base_unlock_expiry(struct timer_base *base)
1552 {
1553 spin_unlock(&base->expiry_lock);
1554 }
1555
1556 /*
1557 * The counterpart to del_timer_wait_running().
1558 *
1559 * If there is a waiter for base->expiry_lock, then it was waiting for the
1560 * timer callback to finish. Drop expiry_lock and reacquire it. That allows
1561 * the waiter to acquire the lock and make progress.
1562 */
timer_sync_wait_running(struct timer_base * base)1563 static void timer_sync_wait_running(struct timer_base *base)
1564 __releases(&base->lock) __releases(&base->expiry_lock)
1565 __acquires(&base->expiry_lock) __acquires(&base->lock)
1566 {
1567 if (atomic_read(&base->timer_waiters)) {
1568 raw_spin_unlock_irq(&base->lock);
1569 spin_unlock(&base->expiry_lock);
1570 spin_lock(&base->expiry_lock);
1571 raw_spin_lock_irq(&base->lock);
1572 }
1573 }
1574
1575 /*
1576 * This function is called on PREEMPT_RT kernels when the fast path
1577 * deletion of a timer failed because the timer callback function was
1578 * running.
1579 *
1580 * This prevents priority inversion, if the softirq thread on a remote CPU
1581 * got preempted, and it prevents a life lock when the task which tries to
1582 * delete a timer preempted the softirq thread running the timer callback
1583 * function.
1584 */
del_timer_wait_running(struct timer_list * timer)1585 static void del_timer_wait_running(struct timer_list *timer)
1586 {
1587 u32 tf;
1588
1589 tf = READ_ONCE(timer->flags);
1590 if (!(tf & (TIMER_MIGRATING | TIMER_IRQSAFE))) {
1591 struct timer_base *base = get_timer_base(tf);
1592
1593 /*
1594 * Mark the base as contended and grab the expiry lock,
1595 * which is held by the softirq across the timer
1596 * callback. Drop the lock immediately so the softirq can
1597 * expire the next timer. In theory the timer could already
1598 * be running again, but that's more than unlikely and just
1599 * causes another wait loop.
1600 */
1601 atomic_inc(&base->timer_waiters);
1602 spin_lock_bh(&base->expiry_lock);
1603 atomic_dec(&base->timer_waiters);
1604 spin_unlock_bh(&base->expiry_lock);
1605 }
1606 }
1607 #else
timer_base_init_expiry_lock(struct timer_base * base)1608 static inline void timer_base_init_expiry_lock(struct timer_base *base) { }
timer_base_lock_expiry(struct timer_base * base)1609 static inline void timer_base_lock_expiry(struct timer_base *base) { }
timer_base_unlock_expiry(struct timer_base * base)1610 static inline void timer_base_unlock_expiry(struct timer_base *base) { }
timer_sync_wait_running(struct timer_base * base)1611 static inline void timer_sync_wait_running(struct timer_base *base) { }
del_timer_wait_running(struct timer_list * timer)1612 static inline void del_timer_wait_running(struct timer_list *timer) { }
1613 #endif
1614
1615 /**
1616 * __timer_delete_sync - Internal function: Deactivate a timer and wait
1617 * for the handler to finish.
1618 * @timer: The timer to be deactivated
1619 * @shutdown: If true, @timer->function will be set to NULL under the
1620 * timer base lock which prevents rearming of @timer
1621 *
1622 * If @shutdown is not set the timer can be rearmed later. If the timer can
1623 * be rearmed concurrently, i.e. after dropping the base lock then the
1624 * return value is meaningless.
1625 *
1626 * If @shutdown is set then @timer->function is set to NULL under timer
1627 * base lock which prevents rearming of the timer. Any attempt to rearm
1628 * a shutdown timer is silently ignored.
1629 *
1630 * If the timer should be reused after shutdown it has to be initialized
1631 * again.
1632 *
1633 * Return:
1634 * * %0 - The timer was not pending
1635 * * %1 - The timer was pending and deactivated
1636 */
__timer_delete_sync(struct timer_list * timer,bool shutdown)1637 static int __timer_delete_sync(struct timer_list *timer, bool shutdown)
1638 {
1639 int ret;
1640
1641 #ifdef CONFIG_LOCKDEP
1642 unsigned long flags;
1643
1644 /*
1645 * If lockdep gives a backtrace here, please reference
1646 * the synchronization rules above.
1647 */
1648 local_irq_save(flags);
1649 lock_map_acquire(&timer->lockdep_map);
1650 lock_map_release(&timer->lockdep_map);
1651 local_irq_restore(flags);
1652 #endif
1653 /*
1654 * don't use it in hardirq context, because it
1655 * could lead to deadlock.
1656 */
1657 WARN_ON(in_hardirq() && !(timer->flags & TIMER_IRQSAFE));
1658
1659 /*
1660 * Must be able to sleep on PREEMPT_RT because of the slowpath in
1661 * del_timer_wait_running().
1662 */
1663 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE))
1664 lockdep_assert_preemption_enabled();
1665
1666 do {
1667 ret = __try_to_del_timer_sync(timer, shutdown);
1668
1669 if (unlikely(ret < 0)) {
1670 del_timer_wait_running(timer);
1671 cpu_relax();
1672 }
1673 } while (ret < 0);
1674
1675 return ret;
1676 }
1677
1678 /**
1679 * timer_delete_sync - Deactivate a timer and wait for the handler to finish.
1680 * @timer: The timer to be deactivated
1681 *
1682 * Synchronization rules: Callers must prevent restarting of the timer,
1683 * otherwise this function is meaningless. It must not be called from
1684 * interrupt contexts unless the timer is an irqsafe one. The caller must
1685 * not hold locks which would prevent completion of the timer's callback
1686 * function. The timer's handler must not call add_timer_on(). Upon exit
1687 * the timer is not queued and the handler is not running on any CPU.
1688 *
1689 * For !irqsafe timers, the caller must not hold locks that are held in
1690 * interrupt context. Even if the lock has nothing to do with the timer in
1691 * question. Here's why::
1692 *
1693 * CPU0 CPU1
1694 * ---- ----
1695 * <SOFTIRQ>
1696 * call_timer_fn();
1697 * base->running_timer = mytimer;
1698 * spin_lock_irq(somelock);
1699 * <IRQ>
1700 * spin_lock(somelock);
1701 * timer_delete_sync(mytimer);
1702 * while (base->running_timer == mytimer);
1703 *
1704 * Now timer_delete_sync() will never return and never release somelock.
1705 * The interrupt on the other CPU is waiting to grab somelock but it has
1706 * interrupted the softirq that CPU0 is waiting to finish.
1707 *
1708 * This function cannot guarantee that the timer is not rearmed again by
1709 * some concurrent or preempting code, right after it dropped the base
1710 * lock. If there is the possibility of a concurrent rearm then the return
1711 * value of the function is meaningless.
1712 *
1713 * If such a guarantee is needed, e.g. for teardown situations then use
1714 * timer_shutdown_sync() instead.
1715 *
1716 * Return:
1717 * * %0 - The timer was not pending
1718 * * %1 - The timer was pending and deactivated
1719 */
timer_delete_sync(struct timer_list * timer)1720 int timer_delete_sync(struct timer_list *timer)
1721 {
1722 return __timer_delete_sync(timer, false);
1723 }
1724 EXPORT_SYMBOL(timer_delete_sync);
1725
1726 /**
1727 * timer_shutdown_sync - Shutdown a timer and prevent rearming
1728 * @timer: The timer to be shutdown
1729 *
1730 * When the function returns it is guaranteed that:
1731 * - @timer is not queued
1732 * - The callback function of @timer is not running
1733 * - @timer cannot be enqueued again. Any attempt to rearm
1734 * @timer is silently ignored.
1735 *
1736 * See timer_delete_sync() for synchronization rules.
1737 *
1738 * This function is useful for final teardown of an infrastructure where
1739 * the timer is subject to a circular dependency problem.
1740 *
1741 * A common pattern for this is a timer and a workqueue where the timer can
1742 * schedule work and work can arm the timer. On shutdown the workqueue must
1743 * be destroyed and the timer must be prevented from rearming. Unless the
1744 * code has conditionals like 'if (mything->in_shutdown)' to prevent that
1745 * there is no way to get this correct with timer_delete_sync().
1746 *
1747 * timer_shutdown_sync() is solving the problem. The correct ordering of
1748 * calls in this case is:
1749 *
1750 * timer_shutdown_sync(&mything->timer);
1751 * workqueue_destroy(&mything->workqueue);
1752 *
1753 * After this 'mything' can be safely freed.
1754 *
1755 * This obviously implies that the timer is not required to be functional
1756 * for the rest of the shutdown operation.
1757 *
1758 * Return:
1759 * * %0 - The timer was not pending
1760 * * %1 - The timer was pending
1761 */
timer_shutdown_sync(struct timer_list * timer)1762 int timer_shutdown_sync(struct timer_list *timer)
1763 {
1764 return __timer_delete_sync(timer, true);
1765 }
1766 EXPORT_SYMBOL_GPL(timer_shutdown_sync);
1767
call_timer_fn(struct timer_list * timer,void (* fn)(struct timer_list *),unsigned long baseclk)1768 static void call_timer_fn(struct timer_list *timer,
1769 void (*fn)(struct timer_list *),
1770 unsigned long baseclk)
1771 {
1772 int count = preempt_count();
1773
1774 #ifdef CONFIG_LOCKDEP
1775 /*
1776 * It is permissible to free the timer from inside the
1777 * function that is called from it, this we need to take into
1778 * account for lockdep too. To avoid bogus "held lock freed"
1779 * warnings as well as problems when looking into
1780 * timer->lockdep_map, make a copy and use that here.
1781 */
1782 struct lockdep_map lockdep_map;
1783
1784 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1785 #endif
1786 /*
1787 * Couple the lock chain with the lock chain at
1788 * timer_delete_sync() by acquiring the lock_map around the fn()
1789 * call here and in timer_delete_sync().
1790 */
1791 lock_map_acquire(&lockdep_map);
1792
1793 trace_timer_expire_entry(timer, baseclk);
1794 fn(timer);
1795 trace_timer_expire_exit(timer);
1796
1797 lock_map_release(&lockdep_map);
1798
1799 if (count != preempt_count()) {
1800 WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n",
1801 fn, count, preempt_count());
1802 /*
1803 * Restore the preempt count. That gives us a decent
1804 * chance to survive and extract information. If the
1805 * callback kept a lock held, bad luck, but not worse
1806 * than the BUG() we had.
1807 */
1808 preempt_count_set(count);
1809 }
1810 }
1811
expire_timers(struct timer_base * base,struct hlist_head * head)1812 static void expire_timers(struct timer_base *base, struct hlist_head *head)
1813 {
1814 /*
1815 * This value is required only for tracing. base->clk was
1816 * incremented directly before expire_timers was called. But expiry
1817 * is related to the old base->clk value.
1818 */
1819 unsigned long baseclk = base->clk - 1;
1820
1821 while (!hlist_empty(head)) {
1822 struct timer_list *timer;
1823 void (*fn)(struct timer_list *);
1824
1825 timer = hlist_entry(head->first, struct timer_list, entry);
1826
1827 base->running_timer = timer;
1828 detach_timer(timer, true);
1829
1830 fn = timer->function;
1831
1832 if (WARN_ON_ONCE(!fn)) {
1833 /* Should never happen. Emphasis on should! */
1834 base->running_timer = NULL;
1835 continue;
1836 }
1837
1838 if (timer->flags & TIMER_IRQSAFE) {
1839 raw_spin_unlock(&base->lock);
1840 call_timer_fn(timer, fn, baseclk);
1841 raw_spin_lock(&base->lock);
1842 base->running_timer = NULL;
1843 } else {
1844 raw_spin_unlock_irq(&base->lock);
1845 call_timer_fn(timer, fn, baseclk);
1846 raw_spin_lock_irq(&base->lock);
1847 base->running_timer = NULL;
1848 timer_sync_wait_running(base);
1849 }
1850 }
1851 }
1852
collect_expired_timers(struct timer_base * base,struct hlist_head * heads)1853 static int collect_expired_timers(struct timer_base *base,
1854 struct hlist_head *heads)
1855 {
1856 unsigned long clk = base->clk = base->next_expiry;
1857 struct hlist_head *vec;
1858 int i, levels = 0;
1859 unsigned int idx;
1860
1861 for (i = 0; i < LVL_DEPTH; i++) {
1862 idx = (clk & LVL_MASK) + i * LVL_SIZE;
1863
1864 if (__test_and_clear_bit(idx, base->pending_map)) {
1865 vec = base->vectors + idx;
1866 hlist_move_list(vec, heads++);
1867 levels++;
1868 }
1869 /* Is it time to look at the next level? */
1870 if (clk & LVL_CLK_MASK)
1871 break;
1872 /* Shift clock for the next level granularity */
1873 clk >>= LVL_CLK_SHIFT;
1874 }
1875 return levels;
1876 }
1877
1878 /*
1879 * Find the next pending bucket of a level. Search from level start (@offset)
1880 * + @clk upwards and if nothing there, search from start of the level
1881 * (@offset) up to @offset + clk.
1882 */
next_pending_bucket(struct timer_base * base,unsigned offset,unsigned clk)1883 static int next_pending_bucket(struct timer_base *base, unsigned offset,
1884 unsigned clk)
1885 {
1886 unsigned pos, start = offset + clk;
1887 unsigned end = offset + LVL_SIZE;
1888
1889 pos = find_next_bit(base->pending_map, end, start);
1890 if (pos < end)
1891 return pos - start;
1892
1893 pos = find_next_bit(base->pending_map, start, offset);
1894 return pos < start ? pos + LVL_SIZE - start : -1;
1895 }
1896
1897 /*
1898 * Search the first expiring timer in the various clock levels. Caller must
1899 * hold base->lock.
1900 *
1901 * Store next expiry time in base->next_expiry.
1902 */
timer_recalc_next_expiry(struct timer_base * base)1903 static void timer_recalc_next_expiry(struct timer_base *base)
1904 {
1905 unsigned long clk, next, adj;
1906 unsigned lvl, offset = 0;
1907
1908 next = base->clk + NEXT_TIMER_MAX_DELTA;
1909 clk = base->clk;
1910 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
1911 int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
1912 unsigned long lvl_clk = clk & LVL_CLK_MASK;
1913
1914 if (pos >= 0) {
1915 unsigned long tmp = clk + (unsigned long) pos;
1916
1917 tmp <<= LVL_SHIFT(lvl);
1918 if (time_before(tmp, next))
1919 next = tmp;
1920
1921 /*
1922 * If the next expiration happens before we reach
1923 * the next level, no need to check further.
1924 */
1925 if (pos <= ((LVL_CLK_DIV - lvl_clk) & LVL_CLK_MASK))
1926 break;
1927 }
1928 /*
1929 * Clock for the next level. If the current level clock lower
1930 * bits are zero, we look at the next level as is. If not we
1931 * need to advance it by one because that's going to be the
1932 * next expiring bucket in that level. base->clk is the next
1933 * expiring jiffy. So in case of:
1934 *
1935 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1936 * 0 0 0 0 0 0
1937 *
1938 * we have to look at all levels @index 0. With
1939 *
1940 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1941 * 0 0 0 0 0 2
1942 *
1943 * LVL0 has the next expiring bucket @index 2. The upper
1944 * levels have the next expiring bucket @index 1.
1945 *
1946 * In case that the propagation wraps the next level the same
1947 * rules apply:
1948 *
1949 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1950 * 0 0 0 0 F 2
1951 *
1952 * So after looking at LVL0 we get:
1953 *
1954 * LVL5 LVL4 LVL3 LVL2 LVL1
1955 * 0 0 0 1 0
1956 *
1957 * So no propagation from LVL1 to LVL2 because that happened
1958 * with the add already, but then we need to propagate further
1959 * from LVL2 to LVL3.
1960 *
1961 * So the simple check whether the lower bits of the current
1962 * level are 0 or not is sufficient for all cases.
1963 */
1964 adj = lvl_clk ? 1 : 0;
1965 clk >>= LVL_CLK_SHIFT;
1966 clk += adj;
1967 }
1968
1969 WRITE_ONCE(base->next_expiry, next);
1970 base->next_expiry_recalc = false;
1971 base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA);
1972 }
1973
1974 #ifdef CONFIG_NO_HZ_COMMON
1975 /*
1976 * Check, if the next hrtimer event is before the next timer wheel
1977 * event:
1978 */
cmp_next_hrtimer_event(u64 basem,u64 expires)1979 static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1980 {
1981 u64 nextevt = hrtimer_get_next_event();
1982
1983 /*
1984 * If high resolution timers are enabled
1985 * hrtimer_get_next_event() returns KTIME_MAX.
1986 */
1987 if (expires <= nextevt)
1988 return expires;
1989
1990 /*
1991 * If the next timer is already expired, return the tick base
1992 * time so the tick is fired immediately.
1993 */
1994 if (nextevt <= basem)
1995 return basem;
1996
1997 /*
1998 * Round up to the next jiffy. High resolution timers are
1999 * off, so the hrtimers are expired in the tick and we need to
2000 * make sure that this tick really expires the timer to avoid
2001 * a ping pong of the nohz stop code.
2002 *
2003 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
2004 */
2005 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
2006 }
2007
next_timer_interrupt(struct timer_base * base,unsigned long basej)2008 static unsigned long next_timer_interrupt(struct timer_base *base,
2009 unsigned long basej)
2010 {
2011 if (base->next_expiry_recalc)
2012 timer_recalc_next_expiry(base);
2013
2014 /*
2015 * Move next_expiry for the empty base into the future to prevent an
2016 * unnecessary raise of the timer softirq when the next_expiry value
2017 * will be reached even if there is no timer pending.
2018 *
2019 * This update is also required to make timer_base::next_expiry values
2020 * easy comparable to find out which base holds the first pending timer.
2021 */
2022 if (!base->timers_pending)
2023 WRITE_ONCE(base->next_expiry, basej + NEXT_TIMER_MAX_DELTA);
2024
2025 return base->next_expiry;
2026 }
2027
fetch_next_timer_interrupt(unsigned long basej,u64 basem,struct timer_base * base_local,struct timer_base * base_global,struct timer_events * tevt)2028 static unsigned long fetch_next_timer_interrupt(unsigned long basej, u64 basem,
2029 struct timer_base *base_local,
2030 struct timer_base *base_global,
2031 struct timer_events *tevt)
2032 {
2033 unsigned long nextevt, nextevt_local, nextevt_global;
2034 bool local_first;
2035
2036 nextevt_local = next_timer_interrupt(base_local, basej);
2037 nextevt_global = next_timer_interrupt(base_global, basej);
2038
2039 local_first = time_before_eq(nextevt_local, nextevt_global);
2040
2041 nextevt = local_first ? nextevt_local : nextevt_global;
2042
2043 /*
2044 * If the @nextevt is at max. one tick away, use @nextevt and store
2045 * it in the local expiry value. The next global event is irrelevant in
2046 * this case and can be left as KTIME_MAX.
2047 */
2048 if (time_before_eq(nextevt, basej + 1)) {
2049 /* If we missed a tick already, force 0 delta */
2050 if (time_before(nextevt, basej))
2051 nextevt = basej;
2052 tevt->local = basem + (u64)(nextevt - basej) * TICK_NSEC;
2053
2054 /*
2055 * This is required for the remote check only but it doesn't
2056 * hurt, when it is done for both call sites:
2057 *
2058 * * The remote callers will only take care of the global timers
2059 * as local timers will be handled by CPU itself. When not
2060 * updating tevt->global with the already missed first global
2061 * timer, it is possible that it will be missed completely.
2062 *
2063 * * The local callers will ignore the tevt->global anyway, when
2064 * nextevt is max. one tick away.
2065 */
2066 if (!local_first)
2067 tevt->global = tevt->local;
2068 return nextevt;
2069 }
2070
2071 /*
2072 * Update tevt.* values:
2073 *
2074 * If the local queue expires first, then the global event can be
2075 * ignored. If the global queue is empty, nothing to do either.
2076 */
2077 if (!local_first && base_global->timers_pending)
2078 tevt->global = basem + (u64)(nextevt_global - basej) * TICK_NSEC;
2079
2080 if (base_local->timers_pending)
2081 tevt->local = basem + (u64)(nextevt_local - basej) * TICK_NSEC;
2082
2083 return nextevt;
2084 }
2085
2086 # ifdef CONFIG_SMP
2087 /**
2088 * fetch_next_timer_interrupt_remote() - Store next timers into @tevt
2089 * @basej: base time jiffies
2090 * @basem: base time clock monotonic
2091 * @tevt: Pointer to the storage for the expiry values
2092 * @cpu: Remote CPU
2093 *
2094 * Stores the next pending local and global timer expiry values in the
2095 * struct pointed to by @tevt. If a queue is empty the corresponding
2096 * field is set to KTIME_MAX. If local event expires before global
2097 * event, global event is set to KTIME_MAX as well.
2098 *
2099 * Caller needs to make sure timer base locks are held (use
2100 * timer_lock_remote_bases() for this purpose).
2101 */
fetch_next_timer_interrupt_remote(unsigned long basej,u64 basem,struct timer_events * tevt,unsigned int cpu)2102 void fetch_next_timer_interrupt_remote(unsigned long basej, u64 basem,
2103 struct timer_events *tevt,
2104 unsigned int cpu)
2105 {
2106 struct timer_base *base_local, *base_global;
2107
2108 /* Preset local / global events */
2109 tevt->local = tevt->global = KTIME_MAX;
2110
2111 base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu);
2112 base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
2113
2114 lockdep_assert_held(&base_local->lock);
2115 lockdep_assert_held(&base_global->lock);
2116
2117 fetch_next_timer_interrupt(basej, basem, base_local, base_global, tevt);
2118 }
2119
2120 /**
2121 * timer_unlock_remote_bases - unlock timer bases of cpu
2122 * @cpu: Remote CPU
2123 *
2124 * Unlocks the remote timer bases.
2125 */
timer_unlock_remote_bases(unsigned int cpu)2126 void timer_unlock_remote_bases(unsigned int cpu)
2127 __releases(timer_bases[BASE_LOCAL]->lock)
2128 __releases(timer_bases[BASE_GLOBAL]->lock)
2129 {
2130 struct timer_base *base_local, *base_global;
2131
2132 base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu);
2133 base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
2134
2135 raw_spin_unlock(&base_global->lock);
2136 raw_spin_unlock(&base_local->lock);
2137 }
2138
2139 /**
2140 * timer_lock_remote_bases - lock timer bases of cpu
2141 * @cpu: Remote CPU
2142 *
2143 * Locks the remote timer bases.
2144 */
timer_lock_remote_bases(unsigned int cpu)2145 void timer_lock_remote_bases(unsigned int cpu)
2146 __acquires(timer_bases[BASE_LOCAL]->lock)
2147 __acquires(timer_bases[BASE_GLOBAL]->lock)
2148 {
2149 struct timer_base *base_local, *base_global;
2150
2151 base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu);
2152 base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
2153
2154 lockdep_assert_irqs_disabled();
2155
2156 raw_spin_lock(&base_local->lock);
2157 raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
2158 }
2159
2160 /**
2161 * timer_base_is_idle() - Return whether timer base is set idle
2162 *
2163 * Returns value of local timer base is_idle value.
2164 */
timer_base_is_idle(void)2165 bool timer_base_is_idle(void)
2166 {
2167 return __this_cpu_read(timer_bases[BASE_LOCAL].is_idle);
2168 }
2169
2170 static void __run_timer_base(struct timer_base *base);
2171
2172 /**
2173 * timer_expire_remote() - expire global timers of cpu
2174 * @cpu: Remote CPU
2175 *
2176 * Expire timers of global base of remote CPU.
2177 */
timer_expire_remote(unsigned int cpu)2178 void timer_expire_remote(unsigned int cpu)
2179 {
2180 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
2181
2182 __run_timer_base(base);
2183 }
2184
timer_use_tmigr(unsigned long basej,u64 basem,unsigned long * nextevt,bool * tick_stop_path,bool timer_base_idle,struct timer_events * tevt)2185 static void timer_use_tmigr(unsigned long basej, u64 basem,
2186 unsigned long *nextevt, bool *tick_stop_path,
2187 bool timer_base_idle, struct timer_events *tevt)
2188 {
2189 u64 next_tmigr;
2190
2191 if (timer_base_idle)
2192 next_tmigr = tmigr_cpu_new_timer(tevt->global);
2193 else if (tick_stop_path)
2194 next_tmigr = tmigr_cpu_deactivate(tevt->global);
2195 else
2196 next_tmigr = tmigr_quick_check(tevt->global);
2197
2198 /*
2199 * If the CPU is the last going idle in timer migration hierarchy, make
2200 * sure the CPU will wake up in time to handle remote timers.
2201 * next_tmigr == KTIME_MAX if other CPUs are still active.
2202 */
2203 if (next_tmigr < tevt->local) {
2204 u64 tmp;
2205
2206 /* If we missed a tick already, force 0 delta */
2207 if (next_tmigr < basem)
2208 next_tmigr = basem;
2209
2210 tmp = div_u64(next_tmigr - basem, TICK_NSEC);
2211
2212 *nextevt = basej + (unsigned long)tmp;
2213 tevt->local = next_tmigr;
2214 }
2215 }
2216 # else
timer_use_tmigr(unsigned long basej,u64 basem,unsigned long * nextevt,bool * tick_stop_path,bool timer_base_idle,struct timer_events * tevt)2217 static void timer_use_tmigr(unsigned long basej, u64 basem,
2218 unsigned long *nextevt, bool *tick_stop_path,
2219 bool timer_base_idle, struct timer_events *tevt)
2220 {
2221 /*
2222 * Make sure first event is written into tevt->local to not miss a
2223 * timer on !SMP systems.
2224 */
2225 tevt->local = min_t(u64, tevt->local, tevt->global);
2226 }
2227 # endif /* CONFIG_SMP */
2228
__get_next_timer_interrupt(unsigned long basej,u64 basem,bool * idle)2229 static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
2230 bool *idle)
2231 {
2232 struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX };
2233 struct timer_base *base_local, *base_global;
2234 unsigned long nextevt;
2235 bool idle_is_possible;
2236
2237 /*
2238 * When the CPU is offline, the tick is cancelled and nothing is supposed
2239 * to try to stop it.
2240 */
2241 if (WARN_ON_ONCE(cpu_is_offline(smp_processor_id()))) {
2242 if (idle)
2243 *idle = true;
2244 return tevt.local;
2245 }
2246
2247 base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
2248 base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]);
2249
2250 raw_spin_lock(&base_local->lock);
2251 raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
2252
2253 nextevt = fetch_next_timer_interrupt(basej, basem, base_local,
2254 base_global, &tevt);
2255
2256 /*
2257 * If the next event is only one jiffy ahead there is no need to call
2258 * timer migration hierarchy related functions. The value for the next
2259 * global timer in @tevt struct equals then KTIME_MAX. This is also
2260 * true, when the timer base is idle.
2261 *
2262 * The proper timer migration hierarchy function depends on the callsite
2263 * and whether timer base is idle or not. @nextevt will be updated when
2264 * this CPU needs to handle the first timer migration hierarchy
2265 * event. See timer_use_tmigr() for detailed information.
2266 */
2267 idle_is_possible = time_after(nextevt, basej + 1);
2268 if (idle_is_possible)
2269 timer_use_tmigr(basej, basem, &nextevt, idle,
2270 base_local->is_idle, &tevt);
2271
2272 /*
2273 * We have a fresh next event. Check whether we can forward the
2274 * base.
2275 */
2276 __forward_timer_base(base_local, basej);
2277 __forward_timer_base(base_global, basej);
2278
2279 /*
2280 * Set base->is_idle only when caller is timer_base_try_to_set_idle()
2281 */
2282 if (idle) {
2283 /*
2284 * Bases are idle if the next event is more than a tick
2285 * away. Caution: @nextevt could have changed by enqueueing a
2286 * global timer into timer migration hierarchy. Therefore a new
2287 * check is required here.
2288 *
2289 * If the base is marked idle then any timer add operation must
2290 * forward the base clk itself to keep granularity small. This
2291 * idle logic is only maintained for the BASE_LOCAL and
2292 * BASE_GLOBAL base, deferrable timers may still see large
2293 * granularity skew (by design).
2294 */
2295 if (!base_local->is_idle && time_after(nextevt, basej + 1)) {
2296 base_local->is_idle = true;
2297 /*
2298 * Global timers queued locally while running in a task
2299 * in nohz_full mode need a self-IPI to kick reprogramming
2300 * in IRQ tail.
2301 */
2302 if (tick_nohz_full_cpu(base_local->cpu))
2303 base_global->is_idle = true;
2304 trace_timer_base_idle(true, base_local->cpu);
2305 }
2306 *idle = base_local->is_idle;
2307
2308 /*
2309 * When timer base is not set idle, undo the effect of
2310 * tmigr_cpu_deactivate() to prevent inconsistent states - active
2311 * timer base but inactive timer migration hierarchy.
2312 *
2313 * When timer base was already marked idle, nothing will be
2314 * changed here.
2315 */
2316 if (!base_local->is_idle && idle_is_possible)
2317 tmigr_cpu_activate();
2318 }
2319
2320 raw_spin_unlock(&base_global->lock);
2321 raw_spin_unlock(&base_local->lock);
2322
2323 return cmp_next_hrtimer_event(basem, tevt.local);
2324 }
2325
2326 /**
2327 * get_next_timer_interrupt() - return the time (clock mono) of the next timer
2328 * @basej: base time jiffies
2329 * @basem: base time clock monotonic
2330 *
2331 * Returns the tick aligned clock monotonic time of the next pending timer or
2332 * KTIME_MAX if no timer is pending. If timer of global base was queued into
2333 * timer migration hierarchy, first global timer is not taken into account. If
2334 * it was the last CPU of timer migration hierarchy going idle, first global
2335 * event is taken into account.
2336 */
get_next_timer_interrupt(unsigned long basej,u64 basem)2337 u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
2338 {
2339 return __get_next_timer_interrupt(basej, basem, NULL);
2340 }
2341
2342 /**
2343 * timer_base_try_to_set_idle() - Try to set the idle state of the timer bases
2344 * @basej: base time jiffies
2345 * @basem: base time clock monotonic
2346 * @idle: pointer to store the value of timer_base->is_idle on return;
2347 * *idle contains the information whether tick was already stopped
2348 *
2349 * Returns the tick aligned clock monotonic time of the next pending timer or
2350 * KTIME_MAX if no timer is pending. When tick was already stopped KTIME_MAX is
2351 * returned as well.
2352 */
timer_base_try_to_set_idle(unsigned long basej,u64 basem,bool * idle)2353 u64 timer_base_try_to_set_idle(unsigned long basej, u64 basem, bool *idle)
2354 {
2355 if (*idle)
2356 return KTIME_MAX;
2357
2358 return __get_next_timer_interrupt(basej, basem, idle);
2359 }
2360
2361 /**
2362 * timer_clear_idle - Clear the idle state of the timer base
2363 *
2364 * Called with interrupts disabled
2365 */
timer_clear_idle(void)2366 void timer_clear_idle(void)
2367 {
2368 /*
2369 * We do this unlocked. The worst outcome is a remote pinned timer
2370 * enqueue sending a pointless IPI, but taking the lock would just
2371 * make the window for sending the IPI a few instructions smaller
2372 * for the cost of taking the lock in the exit from idle
2373 * path. Required for BASE_LOCAL only.
2374 */
2375 __this_cpu_write(timer_bases[BASE_LOCAL].is_idle, false);
2376 if (tick_nohz_full_cpu(smp_processor_id()))
2377 __this_cpu_write(timer_bases[BASE_GLOBAL].is_idle, false);
2378 trace_timer_base_idle(false, smp_processor_id());
2379
2380 /* Activate without holding the timer_base->lock */
2381 tmigr_cpu_activate();
2382 }
2383 #endif
2384
2385 /**
2386 * __run_timers - run all expired timers (if any) on this CPU.
2387 * @base: the timer vector to be processed.
2388 */
__run_timers(struct timer_base * base)2389 static inline void __run_timers(struct timer_base *base)
2390 {
2391 struct hlist_head heads[LVL_DEPTH];
2392 int levels;
2393
2394 lockdep_assert_held(&base->lock);
2395
2396 if (base->running_timer)
2397 return;
2398
2399 while (time_after_eq(jiffies, base->clk) &&
2400 time_after_eq(jiffies, base->next_expiry)) {
2401 levels = collect_expired_timers(base, heads);
2402 /*
2403 * The two possible reasons for not finding any expired
2404 * timer at this clk are that all matching timers have been
2405 * dequeued or no timer has been queued since
2406 * base::next_expiry was set to base::clk +
2407 * NEXT_TIMER_MAX_DELTA.
2408 */
2409 WARN_ON_ONCE(!levels && !base->next_expiry_recalc
2410 && base->timers_pending);
2411 /*
2412 * While executing timers, base->clk is set 1 offset ahead of
2413 * jiffies to avoid endless requeuing to current jiffies.
2414 */
2415 base->clk++;
2416 timer_recalc_next_expiry(base);
2417
2418 while (levels--)
2419 expire_timers(base, heads + levels);
2420 }
2421 }
2422
__run_timer_base(struct timer_base * base)2423 static void __run_timer_base(struct timer_base *base)
2424 {
2425 if (time_before(jiffies, base->next_expiry))
2426 return;
2427
2428 timer_base_lock_expiry(base);
2429 raw_spin_lock_irq(&base->lock);
2430 __run_timers(base);
2431 raw_spin_unlock_irq(&base->lock);
2432 timer_base_unlock_expiry(base);
2433 }
2434
run_timer_base(int index)2435 static void run_timer_base(int index)
2436 {
2437 struct timer_base *base = this_cpu_ptr(&timer_bases[index]);
2438
2439 __run_timer_base(base);
2440 }
2441
2442 /*
2443 * This function runs timers and the timer-tq in bottom half context.
2444 */
run_timer_softirq(void)2445 static __latent_entropy void run_timer_softirq(void)
2446 {
2447 run_timer_base(BASE_LOCAL);
2448 if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) {
2449 run_timer_base(BASE_GLOBAL);
2450 run_timer_base(BASE_DEF);
2451
2452 if (is_timers_nohz_active())
2453 tmigr_handle_remote();
2454 }
2455 }
2456
2457 /*
2458 * Called by the local, per-CPU timer interrupt on SMP.
2459 */
run_local_timers(void)2460 static void run_local_timers(void)
2461 {
2462 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
2463
2464 hrtimer_run_queues();
2465
2466 for (int i = 0; i < NR_BASES; i++, base++) {
2467 /*
2468 * Raise the softirq only if required.
2469 *
2470 * timer_base::next_expiry can be written by a remote CPU while
2471 * holding the lock. If this write happens at the same time than
2472 * the lockless local read, sanity checker could complain about
2473 * data corruption.
2474 *
2475 * There are two possible situations where
2476 * timer_base::next_expiry is written by a remote CPU:
2477 *
2478 * 1. Remote CPU expires global timers of this CPU and updates
2479 * timer_base::next_expiry of BASE_GLOBAL afterwards in
2480 * next_timer_interrupt() or timer_recalc_next_expiry(). The
2481 * worst outcome is a superfluous raise of the timer softirq
2482 * when the not yet updated value is read.
2483 *
2484 * 2. A new first pinned timer is enqueued by a remote CPU
2485 * and therefore timer_base::next_expiry of BASE_LOCAL is
2486 * updated. When this update is missed, this isn't a
2487 * problem, as an IPI is executed nevertheless when the CPU
2488 * was idle before. When the CPU wasn't idle but the update
2489 * is missed, then the timer would expire one jiffy late -
2490 * bad luck.
2491 *
2492 * Those unlikely corner cases where the worst outcome is only a
2493 * one jiffy delay or a superfluous raise of the softirq are
2494 * not that expensive as doing the check always while holding
2495 * the lock.
2496 *
2497 * Possible remote writers are using WRITE_ONCE(). Local reader
2498 * uses therefore READ_ONCE().
2499 */
2500 if (time_after_eq(jiffies, READ_ONCE(base->next_expiry)) ||
2501 (i == BASE_DEF && tmigr_requires_handle_remote())) {
2502 raise_softirq(TIMER_SOFTIRQ);
2503 return;
2504 }
2505 }
2506 }
2507
2508 /*
2509 * Called from the timer interrupt handler to charge one tick to the current
2510 * process. user_tick is 1 if the tick is user time, 0 for system.
2511 */
update_process_times(int user_tick)2512 void update_process_times(int user_tick)
2513 {
2514 struct task_struct *p = current;
2515
2516 /* Note: this timer irq context must be accounted for as well. */
2517 account_process_tick(p, user_tick);
2518 run_local_timers();
2519 rcu_sched_clock_irq(user_tick);
2520 #ifdef CONFIG_IRQ_WORK
2521 if (in_irq())
2522 irq_work_tick();
2523 #endif
2524 sched_tick();
2525 if (IS_ENABLED(CONFIG_POSIX_TIMERS))
2526 run_posix_cpu_timers();
2527 }
2528
2529 /*
2530 * Since schedule_timeout()'s timer is defined on the stack, it must store
2531 * the target task on the stack as well.
2532 */
2533 struct process_timer {
2534 struct timer_list timer;
2535 struct task_struct *task;
2536 };
2537
process_timeout(struct timer_list * t)2538 static void process_timeout(struct timer_list *t)
2539 {
2540 struct process_timer *timeout = from_timer(timeout, t, timer);
2541
2542 wake_up_process(timeout->task);
2543 }
2544
2545 /**
2546 * schedule_timeout - sleep until timeout
2547 * @timeout: timeout value in jiffies
2548 *
2549 * Make the current task sleep until @timeout jiffies have elapsed.
2550 * The function behavior depends on the current task state
2551 * (see also set_current_state() description):
2552 *
2553 * %TASK_RUNNING - the scheduler is called, but the task does not sleep
2554 * at all. That happens because sched_submit_work() does nothing for
2555 * tasks in %TASK_RUNNING state.
2556 *
2557 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
2558 * pass before the routine returns unless the current task is explicitly
2559 * woken up, (e.g. by wake_up_process()).
2560 *
2561 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
2562 * delivered to the current task or the current task is explicitly woken
2563 * up.
2564 *
2565 * The current task state is guaranteed to be %TASK_RUNNING when this
2566 * routine returns.
2567 *
2568 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
2569 * the CPU away without a bound on the timeout. In this case the return
2570 * value will be %MAX_SCHEDULE_TIMEOUT.
2571 *
2572 * Returns 0 when the timer has expired otherwise the remaining time in
2573 * jiffies will be returned. In all cases the return value is guaranteed
2574 * to be non-negative.
2575 */
schedule_timeout(signed long timeout)2576 signed long __sched schedule_timeout(signed long timeout)
2577 {
2578 struct process_timer timer;
2579 unsigned long expire;
2580
2581 switch (timeout)
2582 {
2583 case MAX_SCHEDULE_TIMEOUT:
2584 /*
2585 * These two special cases are useful to be comfortable
2586 * in the caller. Nothing more. We could take
2587 * MAX_SCHEDULE_TIMEOUT from one of the negative value
2588 * but I' d like to return a valid offset (>=0) to allow
2589 * the caller to do everything it want with the retval.
2590 */
2591 schedule();
2592 goto out;
2593 default:
2594 /*
2595 * Another bit of PARANOID. Note that the retval will be
2596 * 0 since no piece of kernel is supposed to do a check
2597 * for a negative retval of schedule_timeout() (since it
2598 * should never happens anyway). You just have the printk()
2599 * that will tell you if something is gone wrong and where.
2600 */
2601 if (timeout < 0) {
2602 printk(KERN_ERR "schedule_timeout: wrong timeout "
2603 "value %lx\n", timeout);
2604 dump_stack();
2605 __set_current_state(TASK_RUNNING);
2606 goto out;
2607 }
2608 }
2609
2610 expire = timeout + jiffies;
2611
2612 timer.task = current;
2613 timer_setup_on_stack(&timer.timer, process_timeout, 0);
2614 __mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING);
2615 schedule();
2616 del_timer_sync(&timer.timer);
2617
2618 /* Remove the timer from the object tracker */
2619 destroy_timer_on_stack(&timer.timer);
2620
2621 timeout = expire - jiffies;
2622
2623 out:
2624 return timeout < 0 ? 0 : timeout;
2625 }
2626 EXPORT_SYMBOL(schedule_timeout);
2627
2628 /*
2629 * We can use __set_current_state() here because schedule_timeout() calls
2630 * schedule() unconditionally.
2631 */
schedule_timeout_interruptible(signed long timeout)2632 signed long __sched schedule_timeout_interruptible(signed long timeout)
2633 {
2634 __set_current_state(TASK_INTERRUPTIBLE);
2635 return schedule_timeout(timeout);
2636 }
2637 EXPORT_SYMBOL(schedule_timeout_interruptible);
2638
schedule_timeout_killable(signed long timeout)2639 signed long __sched schedule_timeout_killable(signed long timeout)
2640 {
2641 __set_current_state(TASK_KILLABLE);
2642 return schedule_timeout(timeout);
2643 }
2644 EXPORT_SYMBOL(schedule_timeout_killable);
2645
schedule_timeout_uninterruptible(signed long timeout)2646 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
2647 {
2648 __set_current_state(TASK_UNINTERRUPTIBLE);
2649 return schedule_timeout(timeout);
2650 }
2651 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
2652
2653 /*
2654 * Like schedule_timeout_uninterruptible(), except this task will not contribute
2655 * to load average.
2656 */
schedule_timeout_idle(signed long timeout)2657 signed long __sched schedule_timeout_idle(signed long timeout)
2658 {
2659 __set_current_state(TASK_IDLE);
2660 return schedule_timeout(timeout);
2661 }
2662 EXPORT_SYMBOL(schedule_timeout_idle);
2663
2664 #ifdef CONFIG_HOTPLUG_CPU
migrate_timer_list(struct timer_base * new_base,struct hlist_head * head)2665 static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
2666 {
2667 struct timer_list *timer;
2668 int cpu = new_base->cpu;
2669
2670 while (!hlist_empty(head)) {
2671 timer = hlist_entry(head->first, struct timer_list, entry);
2672 detach_timer(timer, false);
2673 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
2674 internal_add_timer(new_base, timer);
2675 }
2676 }
2677
timers_prepare_cpu(unsigned int cpu)2678 int timers_prepare_cpu(unsigned int cpu)
2679 {
2680 struct timer_base *base;
2681 int b;
2682
2683 for (b = 0; b < NR_BASES; b++) {
2684 base = per_cpu_ptr(&timer_bases[b], cpu);
2685 base->clk = jiffies;
2686 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
2687 base->next_expiry_recalc = false;
2688 base->timers_pending = false;
2689 base->is_idle = false;
2690 }
2691 return 0;
2692 }
2693
timers_dead_cpu(unsigned int cpu)2694 int timers_dead_cpu(unsigned int cpu)
2695 {
2696 struct timer_base *old_base;
2697 struct timer_base *new_base;
2698 int b, i;
2699
2700 for (b = 0; b < NR_BASES; b++) {
2701 old_base = per_cpu_ptr(&timer_bases[b], cpu);
2702 new_base = get_cpu_ptr(&timer_bases[b]);
2703 /*
2704 * The caller is globally serialized and nobody else
2705 * takes two locks at once, deadlock is not possible.
2706 */
2707 raw_spin_lock_irq(&new_base->lock);
2708 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
2709
2710 /*
2711 * The current CPUs base clock might be stale. Update it
2712 * before moving the timers over.
2713 */
2714 forward_timer_base(new_base);
2715
2716 WARN_ON_ONCE(old_base->running_timer);
2717 old_base->running_timer = NULL;
2718
2719 for (i = 0; i < WHEEL_SIZE; i++)
2720 migrate_timer_list(new_base, old_base->vectors + i);
2721
2722 raw_spin_unlock(&old_base->lock);
2723 raw_spin_unlock_irq(&new_base->lock);
2724 put_cpu_ptr(&timer_bases);
2725 }
2726 return 0;
2727 }
2728
2729 #endif /* CONFIG_HOTPLUG_CPU */
2730
init_timer_cpu(int cpu)2731 static void __init init_timer_cpu(int cpu)
2732 {
2733 struct timer_base *base;
2734 int i;
2735
2736 for (i = 0; i < NR_BASES; i++) {
2737 base = per_cpu_ptr(&timer_bases[i], cpu);
2738 base->cpu = cpu;
2739 raw_spin_lock_init(&base->lock);
2740 base->clk = jiffies;
2741 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
2742 timer_base_init_expiry_lock(base);
2743 }
2744 }
2745
init_timer_cpus(void)2746 static void __init init_timer_cpus(void)
2747 {
2748 int cpu;
2749
2750 for_each_possible_cpu(cpu)
2751 init_timer_cpu(cpu);
2752 }
2753
init_timers(void)2754 void __init init_timers(void)
2755 {
2756 init_timer_cpus();
2757 posix_cputimers_init_work();
2758 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
2759 }
2760
2761 /**
2762 * msleep - sleep safely even with waitqueue interruptions
2763 * @msecs: Time in milliseconds to sleep for
2764 */
msleep(unsigned int msecs)2765 void msleep(unsigned int msecs)
2766 {
2767 unsigned long timeout = msecs_to_jiffies(msecs);
2768
2769 while (timeout)
2770 timeout = schedule_timeout_uninterruptible(timeout);
2771 }
2772
2773 EXPORT_SYMBOL(msleep);
2774
2775 /**
2776 * msleep_interruptible - sleep waiting for signals
2777 * @msecs: Time in milliseconds to sleep for
2778 */
msleep_interruptible(unsigned int msecs)2779 unsigned long msleep_interruptible(unsigned int msecs)
2780 {
2781 unsigned long timeout = msecs_to_jiffies(msecs);
2782
2783 while (timeout && !signal_pending(current))
2784 timeout = schedule_timeout_interruptible(timeout);
2785 return jiffies_to_msecs(timeout);
2786 }
2787
2788 EXPORT_SYMBOL(msleep_interruptible);
2789
2790 /**
2791 * usleep_range_state - Sleep for an approximate time in a given state
2792 * @min: Minimum time in usecs to sleep
2793 * @max: Maximum time in usecs to sleep
2794 * @state: State of the current task that will be while sleeping
2795 *
2796 * In non-atomic context where the exact wakeup time is flexible, use
2797 * usleep_range_state() instead of udelay(). The sleep improves responsiveness
2798 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
2799 * power usage by allowing hrtimers to take advantage of an already-
2800 * scheduled interrupt instead of scheduling a new one just for this sleep.
2801 */
usleep_range_state(unsigned long min,unsigned long max,unsigned int state)2802 void __sched usleep_range_state(unsigned long min, unsigned long max,
2803 unsigned int state)
2804 {
2805 ktime_t exp = ktime_add_us(ktime_get(), min);
2806 u64 delta = (u64)(max - min) * NSEC_PER_USEC;
2807
2808 for (;;) {
2809 __set_current_state(state);
2810 /* Do not return before the requested sleep time has elapsed */
2811 if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
2812 break;
2813 }
2814 }
2815 EXPORT_SYMBOL(usleep_range_state);
2816