1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Infrastructure for migratable timers
4 *
5 * Copyright(C) 2022 linutronix GmbH
6 */
7 #include <linux/cpuhotplug.h>
8 #include <linux/slab.h>
9 #include <linux/smp.h>
10 #include <linux/spinlock.h>
11 #include <linux/timerqueue.h>
12 #include <trace/events/ipi.h>
13 #include <linux/sched/isolation.h>
14
15 #include "timer_migration.h"
16 #include "tick-internal.h"
17
18 #define CREATE_TRACE_POINTS
19 #include <trace/events/timer_migration.h>
20
21 /*
22 * The timer migration mechanism is built on a hierarchy of groups. The
23 * lowest level group contains CPUs, the next level groups of CPU groups
24 * and so forth. The CPU groups are kept per node so for the normal case
25 * lock contention won't happen across nodes. Depending on the number of
26 * CPUs per node even the next level might be kept as groups of CPU groups
27 * per node and only the levels above cross the node topology.
28 *
29 * Example topology for a two node system with 24 CPUs each.
30 *
31 * LVL 2 [GRP2:0]
32 * GRP1:0 = GRP1:M
33 *
34 * LVL 1 [GRP1:0] [GRP1:1]
35 * GRP0:0 - GRP0:2 GRP0:3 - GRP0:5
36 *
37 * LVL 0 [GRP0:0] [GRP0:1] [GRP0:2] [GRP0:3] [GRP0:4] [GRP0:5]
38 * CPUS 0-7 8-15 16-23 24-31 32-39 40-47
39 *
40 * The groups hold a timer queue of events sorted by expiry time. These
41 * queues are updated when CPUs go in idle. When they come out of idle
42 * ignore flag of events is set.
43 *
44 * Each group has a designated migrator CPU/group as long as a CPU/group is
45 * active in the group. This designated role is necessary to avoid that all
46 * active CPUs in a group try to migrate expired timers from other CPUs,
47 * which would result in massive lock bouncing.
48 *
49 * When a CPU is awake, it checks in it's own timer tick the group
50 * hierarchy up to the point where it is assigned the migrator role or if
51 * no CPU is active, it also checks the groups where no migrator is set
52 * (TMIGR_NONE).
53 *
54 * If it finds expired timers in one of the group queues it pulls them over
55 * from the idle CPU and runs the timer function. After that it updates the
56 * group and the parent groups if required.
57 *
58 * CPUs which go idle arm their CPU local timer hardware for the next local
59 * (pinned) timer event. If the next migratable timer expires after the
60 * next local timer or the CPU has no migratable timer pending then the
61 * CPU does not queue an event in the LVL0 group. If the next migratable
62 * timer expires before the next local timer then the CPU queues that timer
63 * in the LVL0 group. In both cases the CPU marks itself idle in the LVL0
64 * group.
65 *
66 * When CPU comes out of idle and when a group has at least a single active
67 * child, the ignore flag of the tmigr_event is set. This indicates, that
68 * the event is ignored even if it is still enqueued in the parent groups
69 * timer queue. It will be removed when touching the timer queue the next
70 * time. This spares locking in active path as the lock protects (after
71 * setup) only event information. For more information about locking,
72 * please read the section "Locking rules".
73 *
74 * If the CPU is the migrator of the group then it delegates that role to
75 * the next active CPU in the group or sets migrator to TMIGR_NONE when
76 * there is no active CPU in the group. This delegation needs to be
77 * propagated up the hierarchy so hand over from other leaves can happen at
78 * all hierarchy levels w/o doing a search.
79 *
80 * When the last CPU in the system goes idle, then it drops all migrator
81 * duties up to the top level of the hierarchy (LVL2 in the example). It
82 * then has to make sure, that it arms it's own local hardware timer for
83 * the earliest event in the system.
84 *
85 *
86 * Lifetime rules:
87 * ---------------
88 *
89 * The groups are built up at init time or when CPUs come online. They are
90 * not destroyed when a group becomes empty due to offlining. The group
91 * just won't participate in the hierarchy management anymore. Destroying
92 * groups would result in interesting race conditions which would just make
93 * the whole mechanism slow and complex.
94 *
95 *
96 * Locking rules:
97 * --------------
98 *
99 * For setting up new groups and handling events it's required to lock both
100 * child and parent group. The lock ordering is always bottom up. This also
101 * includes the per CPU locks in struct tmigr_cpu. For updating the migrator and
102 * active CPU/group information atomic_try_cmpxchg() is used instead and only
103 * the per CPU tmigr_cpu->lock is held.
104 *
105 * During the setup of groups tmigr_level_list is required. It is protected by
106 * @tmigr_mutex.
107 *
108 * When @timer_base->lock as well as tmigr related locks are required, the lock
109 * ordering is: first @timer_base->lock, afterwards tmigr related locks.
110 *
111 *
112 * Protection of the tmigr group state information:
113 * ------------------------------------------------
114 *
115 * The state information with the list of active children and migrator needs to
116 * be protected by a sequence counter. It prevents a race when updates in child
117 * groups are propagated in changed order. The state update is performed
118 * lockless and group wise. The following scenario describes what happens
119 * without updating the sequence counter:
120 *
121 * Therefore, let's take three groups and four CPUs (CPU2 and CPU3 as well
122 * as GRP0:1 will not change during the scenario):
123 *
124 * LVL 1 [GRP1:0]
125 * migrator = GRP0:1
126 * active = GRP0:0, GRP0:1
127 * / \
128 * LVL 0 [GRP0:0] [GRP0:1]
129 * migrator = CPU0 migrator = CPU2
130 * active = CPU0 active = CPU2
131 * / \ / \
132 * CPUs 0 1 2 3
133 * active idle active idle
134 *
135 *
136 * 1. CPU0 goes idle. As the update is performed group wise, in the first step
137 * only GRP0:0 is updated. The update of GRP1:0 is pending as CPU0 has to
138 * walk the hierarchy.
139 *
140 * LVL 1 [GRP1:0]
141 * migrator = GRP0:1
142 * active = GRP0:0, GRP0:1
143 * / \
144 * LVL 0 [GRP0:0] [GRP0:1]
145 * --> migrator = TMIGR_NONE migrator = CPU2
146 * --> active = active = CPU2
147 * / \ / \
148 * CPUs 0 1 2 3
149 * --> idle idle active idle
150 *
151 * 2. While CPU0 goes idle and continues to update the state, CPU1 comes out of
152 * idle. CPU1 updates GRP0:0. The update for GRP1:0 is pending as CPU1 also
153 * has to walk the hierarchy. Both CPUs (CPU0 and CPU1) now walk the
154 * hierarchy to perform the needed update from their point of view. The
155 * currently visible state looks the following:
156 *
157 * LVL 1 [GRP1:0]
158 * migrator = GRP0:1
159 * active = GRP0:0, GRP0:1
160 * / \
161 * LVL 0 [GRP0:0] [GRP0:1]
162 * --> migrator = CPU1 migrator = CPU2
163 * --> active = CPU1 active = CPU2
164 * / \ / \
165 * CPUs 0 1 2 3
166 * idle --> active active idle
167 *
168 * 3. Here is the race condition: CPU1 managed to propagate its changes (from
169 * step 2) through the hierarchy to GRP1:0 before CPU0 (step 1) did. The
170 * active members of GRP1:0 remain unchanged after the update since it is
171 * still valid from CPU1 current point of view:
172 *
173 * LVL 1 [GRP1:0]
174 * --> migrator = GRP0:1
175 * --> active = GRP0:0, GRP0:1
176 * / \
177 * LVL 0 [GRP0:0] [GRP0:1]
178 * migrator = CPU1 migrator = CPU2
179 * active = CPU1 active = CPU2
180 * / \ / \
181 * CPUs 0 1 2 3
182 * idle active active idle
183 *
184 * 4. Now CPU0 finally propagates its changes (from step 1) to GRP1:0.
185 *
186 * LVL 1 [GRP1:0]
187 * --> migrator = GRP0:1
188 * --> active = GRP0:1
189 * / \
190 * LVL 0 [GRP0:0] [GRP0:1]
191 * migrator = CPU1 migrator = CPU2
192 * active = CPU1 active = CPU2
193 * / \ / \
194 * CPUs 0 1 2 3
195 * idle active active idle
196 *
197 *
198 * The race of CPU0 vs. CPU1 led to an inconsistent state in GRP1:0. CPU1 is
199 * active and is correctly listed as active in GRP0:0. However GRP1:0 does not
200 * have GRP0:0 listed as active, which is wrong. The sequence counter has been
201 * added to avoid inconsistent states during updates. The state is updated
202 * atomically only if all members, including the sequence counter, match the
203 * expected value (compare-and-exchange).
204 *
205 * Looking back at the previous example with the addition of the sequence
206 * counter: The update as performed by CPU0 in step 4 will fail. CPU1 changed
207 * the sequence number during the update in step 3 so the expected old value (as
208 * seen by CPU0 before starting the walk) does not match.
209 *
210 * Prevent race between new event and last CPU going inactive
211 * ----------------------------------------------------------
212 *
213 * When the last CPU is going idle and there is a concurrent update of a new
214 * first global timer of an idle CPU, the group and child states have to be read
215 * while holding the lock in tmigr_update_events(). The following scenario shows
216 * what happens, when this is not done.
217 *
218 * 1. Only CPU2 is active:
219 *
220 * LVL 1 [GRP1:0]
221 * migrator = GRP0:1
222 * active = GRP0:1
223 * next_expiry = KTIME_MAX
224 * / \
225 * LVL 0 [GRP0:0] [GRP0:1]
226 * migrator = TMIGR_NONE migrator = CPU2
227 * active = active = CPU2
228 * next_expiry = KTIME_MAX next_expiry = KTIME_MAX
229 * / \ / \
230 * CPUs 0 1 2 3
231 * idle idle active idle
232 *
233 * 2. Now CPU 2 goes idle (and has no global timer, that has to be handled) and
234 * propagates that to GRP0:1:
235 *
236 * LVL 1 [GRP1:0]
237 * migrator = GRP0:1
238 * active = GRP0:1
239 * next_expiry = KTIME_MAX
240 * / \
241 * LVL 0 [GRP0:0] [GRP0:1]
242 * migrator = TMIGR_NONE --> migrator = TMIGR_NONE
243 * active = --> active =
244 * next_expiry = KTIME_MAX next_expiry = KTIME_MAX
245 * / \ / \
246 * CPUs 0 1 2 3
247 * idle idle --> idle idle
248 *
249 * 3. Now the idle state is propagated up to GRP1:0. As this is now the last
250 * child going idle in top level group, the expiry of the next group event
251 * has to be handed back to make sure no event is lost. As there is no event
252 * enqueued, KTIME_MAX is handed back to CPU2.
253 *
254 * LVL 1 [GRP1:0]
255 * --> migrator = TMIGR_NONE
256 * --> active =
257 * next_expiry = KTIME_MAX
258 * / \
259 * LVL 0 [GRP0:0] [GRP0:1]
260 * migrator = TMIGR_NONE migrator = TMIGR_NONE
261 * active = active =
262 * next_expiry = KTIME_MAX next_expiry = KTIME_MAX
263 * / \ / \
264 * CPUs 0 1 2 3
265 * idle idle --> idle idle
266 *
267 * 4. CPU 0 has a new timer queued from idle and it expires at TIMER0. CPU0
268 * propagates that to GRP0:0:
269 *
270 * LVL 1 [GRP1:0]
271 * migrator = TMIGR_NONE
272 * active =
273 * next_expiry = KTIME_MAX
274 * / \
275 * LVL 0 [GRP0:0] [GRP0:1]
276 * migrator = TMIGR_NONE migrator = TMIGR_NONE
277 * active = active =
278 * --> next_expiry = TIMER0 next_expiry = KTIME_MAX
279 * / \ / \
280 * CPUs 0 1 2 3
281 * idle idle idle idle
282 *
283 * 5. GRP0:0 is not active, so the new timer has to be propagated to
284 * GRP1:0. Therefore the GRP1:0 state has to be read. When the stalled value
285 * (from step 2) is read, the timer is enqueued into GRP1:0, but nothing is
286 * handed back to CPU0, as it seems that there is still an active child in
287 * top level group.
288 *
289 * LVL 1 [GRP1:0]
290 * migrator = TMIGR_NONE
291 * active =
292 * --> next_expiry = TIMER0
293 * / \
294 * LVL 0 [GRP0:0] [GRP0:1]
295 * migrator = TMIGR_NONE migrator = TMIGR_NONE
296 * active = active =
297 * next_expiry = TIMER0 next_expiry = KTIME_MAX
298 * / \ / \
299 * CPUs 0 1 2 3
300 * idle idle idle idle
301 *
302 * This is prevented by reading the state when holding the lock (when a new
303 * timer has to be propagated from idle path)::
304 *
305 * CPU2 (tmigr_inactive_up()) CPU0 (tmigr_new_timer_up())
306 * -------------------------- ---------------------------
307 * // step 3:
308 * cmpxchg(&GRP1:0->state);
309 * tmigr_update_events() {
310 * spin_lock(&GRP1:0->lock);
311 * // ... update events ...
312 * // hand back first expiry when GRP1:0 is idle
313 * spin_unlock(&GRP1:0->lock);
314 * // ^^^ release state modification
315 * }
316 * tmigr_update_events() {
317 * spin_lock(&GRP1:0->lock)
318 * // ^^^ acquire state modification
319 * group_state = atomic_read(&GRP1:0->state)
320 * // .... update events ...
321 * // hand back first expiry when GRP1:0 is idle
322 * spin_unlock(&GRP1:0->lock) <3>
323 * // ^^^ makes state visible for other
324 * // callers of tmigr_new_timer_up()
325 * }
326 *
327 * When CPU0 grabs the lock directly after cmpxchg, the first timer is reported
328 * back to CPU0 and also later on to CPU2. So no timer is missed. A concurrent
329 * update of the group state from active path is no problem, as the upcoming CPU
330 * will take care of the group events.
331 *
332 * Required event and timerqueue update after a remote expiry:
333 * -----------------------------------------------------------
334 *
335 * After expiring timers of a remote CPU, a walk through the hierarchy and
336 * update of events and timerqueues is required. It is obviously needed if there
337 * is a 'new' global timer but also if there is no new global timer but the
338 * remote CPU is still idle.
339 *
340 * 1. CPU0 and CPU1 are idle and have both a global timer expiring at the same
341 * time. So both have an event enqueued in the timerqueue of GRP0:0. CPU3 is
342 * also idle and has no global timer pending. CPU2 is the only active CPU and
343 * thus also the migrator:
344 *
345 * LVL 1 [GRP1:0]
346 * migrator = GRP0:1
347 * active = GRP0:1
348 * --> timerqueue = evt-GRP0:0
349 * / \
350 * LVL 0 [GRP0:0] [GRP0:1]
351 * migrator = TMIGR_NONE migrator = CPU2
352 * active = active = CPU2
353 * groupevt.ignore = false groupevt.ignore = true
354 * groupevt.cpu = CPU0 groupevt.cpu =
355 * timerqueue = evt-CPU0, timerqueue =
356 * evt-CPU1
357 * / \ / \
358 * CPUs 0 1 2 3
359 * idle idle active idle
360 *
361 * 2. CPU2 starts to expire remote timers. It starts with LVL0 group
362 * GRP0:1. There is no event queued in the timerqueue, so CPU2 continues with
363 * the parent of GRP0:1: GRP1:0. In GRP1:0 it dequeues the first event. It
364 * looks at tmigr_event::cpu struct member and expires the pending timer(s)
365 * of CPU0.
366 *
367 * LVL 1 [GRP1:0]
368 * migrator = GRP0:1
369 * active = GRP0:1
370 * --> timerqueue =
371 * / \
372 * LVL 0 [GRP0:0] [GRP0:1]
373 * migrator = TMIGR_NONE migrator = CPU2
374 * active = active = CPU2
375 * groupevt.ignore = false groupevt.ignore = true
376 * --> groupevt.cpu = CPU0 groupevt.cpu =
377 * timerqueue = evt-CPU0, timerqueue =
378 * evt-CPU1
379 * / \ / \
380 * CPUs 0 1 2 3
381 * idle idle active idle
382 *
383 * 3. Some work has to be done after expiring the timers of CPU0. If we stop
384 * here, then CPU1's pending global timer(s) will not expire in time and the
385 * timerqueue of GRP0:0 has still an event for CPU0 enqueued which has just
386 * been processed. So it is required to walk the hierarchy from CPU0's point
387 * of view and update it accordingly. CPU0's event will be removed from the
388 * timerqueue because it has no pending timer. If CPU0 would have a timer
389 * pending then it has to expire after CPU1's first timer because all timers
390 * from this period were just expired. Either way CPU1's event will be first
391 * in GRP0:0's timerqueue and therefore set in the CPU field of the group
392 * event which is then enqueued in GRP1:0's timerqueue as GRP0:0 is still not
393 * active:
394 *
395 * LVL 1 [GRP1:0]
396 * migrator = GRP0:1
397 * active = GRP0:1
398 * --> timerqueue = evt-GRP0:0
399 * / \
400 * LVL 0 [GRP0:0] [GRP0:1]
401 * migrator = TMIGR_NONE migrator = CPU2
402 * active = active = CPU2
403 * groupevt.ignore = false groupevt.ignore = true
404 * --> groupevt.cpu = CPU1 groupevt.cpu =
405 * --> timerqueue = evt-CPU1 timerqueue =
406 * / \ / \
407 * CPUs 0 1 2 3
408 * idle idle active idle
409 *
410 * Now CPU2 (migrator) will continue step 2 at GRP1:0 and will expire the
411 * timer(s) of CPU1.
412 *
413 * The hierarchy walk in step 3 can be skipped if the migrator notices that a
414 * CPU of GRP0:0 is active again. The CPU will mark GRP0:0 active and take care
415 * of the group as migrator and any needed updates within the hierarchy.
416 */
417
418 static DEFINE_MUTEX(tmigr_mutex);
419 static struct list_head *tmigr_level_list __read_mostly;
420
421 static unsigned int tmigr_hierarchy_levels __read_mostly;
422 static unsigned int tmigr_crossnode_level __read_mostly;
423
424 static struct tmigr_group *tmigr_root;
425
426 static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu);
427
428 /*
429 * CPUs available for timer migration.
430 * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock.
431 * Additionally tmigr_available_mutex serializes set/clear operations with each other.
432 */
433 static cpumask_var_t tmigr_available_cpumask;
434 static DEFINE_MUTEX(tmigr_available_mutex);
435
436 /* Enabled during late initcall */
437 static DEFINE_STATIC_KEY_FALSE(tmigr_exclude_isolated);
438
439 #define TMIGR_NONE 0xFF
440 #define BIT_CNT 8
441
tmigr_is_not_available(struct tmigr_cpu * tmc)442 static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc)
443 {
444 return !(tmc->tmgroup && tmc->available);
445 }
446
447 /*
448 * Returns true if @cpu should be excluded from the hierarchy as isolated.
449 * Domain isolated CPUs don't participate in timer migration, nohz_full CPUs
450 * are still part of the hierarchy but become idle (from a tick and timer
451 * migration perspective) when they stop their tick. This lets the timekeeping
452 * CPU handle their global timers. Marking also isolated CPUs as idle would be
453 * too costly, hence they are completely excluded from the hierarchy.
454 * This check is necessary, for instance, to prevent offline isolated CPUs from
455 * being incorrectly marked as available once getting back online.
456 *
457 * This function returns false during early boot and the isolation logic is
458 * enabled only after isolated CPUs are marked as unavailable at late boot.
459 * The tick CPU can be isolated at boot, however we cannot mark it as
460 * unavailable to avoid having no global migrator for the nohz_full CPUs. This
461 * should be ensured by the callers of this function: implicitly from hotplug
462 * callbacks and explicitly in tmigr_init_isolation() and
463 * tmigr_isolated_exclude_cpumask().
464 */
tmigr_is_isolated(int cpu)465 static inline bool tmigr_is_isolated(int cpu)
466 {
467 if (!static_branch_unlikely(&tmigr_exclude_isolated))
468 return false;
469 return (!housekeeping_cpu(cpu, HK_TYPE_DOMAIN) &&
470 housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE));
471 }
472
473 /*
474 * Returns true, when @childmask corresponds to the group migrator or when the
475 * group is not active - so no migrator is set.
476 */
tmigr_check_migrator(struct tmigr_group * group,u8 childmask)477 static bool tmigr_check_migrator(struct tmigr_group *group, u8 childmask)
478 {
479 union tmigr_state s;
480
481 s.state = atomic_read(&group->migr_state);
482
483 if ((s.migrator == childmask) || (s.migrator == TMIGR_NONE))
484 return true;
485
486 return false;
487 }
488
tmigr_check_migrator_and_lonely(struct tmigr_group * group,u8 childmask)489 static bool tmigr_check_migrator_and_lonely(struct tmigr_group *group, u8 childmask)
490 {
491 bool lonely, migrator = false;
492 unsigned long active;
493 union tmigr_state s;
494
495 s.state = atomic_read(&group->migr_state);
496
497 if ((s.migrator == childmask) || (s.migrator == TMIGR_NONE))
498 migrator = true;
499
500 active = s.active;
501 lonely = bitmap_weight(&active, BIT_CNT) <= 1;
502
503 return (migrator && lonely);
504 }
505
tmigr_check_lonely(struct tmigr_group * group)506 static bool tmigr_check_lonely(struct tmigr_group *group)
507 {
508 unsigned long active;
509 union tmigr_state s;
510
511 s.state = atomic_read(&group->migr_state);
512
513 active = s.active;
514
515 return bitmap_weight(&active, BIT_CNT) <= 1;
516 }
517
518 /**
519 * struct tmigr_walk - data required for walking the hierarchy
520 * @nextexp: Next CPU event expiry information which is handed into
521 * the timer migration code by the timer code
522 * (get_next_timer_interrupt())
523 * @firstexp: Contains the first event expiry information when
524 * hierarchy is completely idle. When CPU itself was the
525 * last going idle, information makes sure, that CPU will
526 * be back in time. When using this value in the remote
527 * expiry case, firstexp is stored in the per CPU tmigr_cpu
528 * struct of CPU which expires remote timers. It is updated
529 * in top level group only. Be aware, there could occur a
530 * new top level of the hierarchy between the 'top level
531 * call' in tmigr_update_events() and the check for the
532 * parent group in walk_groups(). Then @firstexp might
533 * contain a value != KTIME_MAX even if it was not the
534 * final top level. This is not a problem, as the worst
535 * outcome is a CPU which might wake up a little early.
536 * @evt: Pointer to tmigr_event which needs to be queued (of idle
537 * child group)
538 * @childmask: groupmask of child group
539 * @remote: Is set, when the new timer path is executed in
540 * tmigr_handle_remote_cpu()
541 * @basej: timer base in jiffies
542 * @now: timer base monotonic
543 * @check: is set if there is the need to handle remote timers;
544 * required in tmigr_requires_handle_remote() only
545 */
546 struct tmigr_walk {
547 u64 nextexp;
548 u64 firstexp;
549 struct tmigr_event *evt;
550 u8 childmask;
551 bool remote;
552 unsigned long basej;
553 u64 now;
554 bool check;
555 };
556
557 typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, struct tmigr_walk *);
558
__walk_groups_from(up_f up,struct tmigr_walk * data,struct tmigr_group * child,struct tmigr_group * group)559 static void __walk_groups_from(up_f up, struct tmigr_walk *data,
560 struct tmigr_group *child, struct tmigr_group *group)
561 {
562 do {
563 WARN_ON_ONCE(group->level >= tmigr_hierarchy_levels);
564
565 if (up(group, child, data))
566 break;
567
568 child = group;
569 /*
570 * Pairs with the store release on group connection
571 * to make sure group initialization is visible.
572 */
573 group = READ_ONCE(group->parent);
574 data->childmask = child->groupmask;
575 WARN_ON_ONCE(!data->childmask);
576 } while (group);
577 }
578
__walk_groups(up_f up,struct tmigr_walk * data,struct tmigr_cpu * tmc)579 static void __walk_groups(up_f up, struct tmigr_walk *data,
580 struct tmigr_cpu *tmc)
581 {
582 __walk_groups_from(up, data, NULL, tmc->tmgroup);
583 }
584
walk_groups(up_f up,struct tmigr_walk * data,struct tmigr_cpu * tmc)585 static void walk_groups(up_f up, struct tmigr_walk *data, struct tmigr_cpu *tmc)
586 {
587 lockdep_assert_held(&tmc->lock);
588
589 __walk_groups(up, data, tmc);
590 }
591
592 /*
593 * Returns the next event of the timerqueue @group->events
594 *
595 * Removes timers with ignore flag and update next_expiry of the group. Values
596 * of the group event are updated in tmigr_update_events() only.
597 */
tmigr_next_groupevt(struct tmigr_group * group)598 static struct tmigr_event *tmigr_next_groupevt(struct tmigr_group *group)
599 {
600 struct timerqueue_node *node = NULL;
601 struct tmigr_event *evt = NULL;
602
603 lockdep_assert_held(&group->lock);
604
605 WRITE_ONCE(group->next_expiry, KTIME_MAX);
606
607 while ((node = timerqueue_getnext(&group->events))) {
608 evt = container_of(node, struct tmigr_event, nextevt);
609
610 if (!READ_ONCE(evt->ignore)) {
611 WRITE_ONCE(group->next_expiry, evt->nextevt.expires);
612 return evt;
613 }
614
615 /*
616 * Remove next timers with ignore flag, because the group lock
617 * is held anyway
618 */
619 if (!timerqueue_del(&group->events, node))
620 break;
621 }
622
623 return NULL;
624 }
625
626 /*
627 * Return the next event (with the expiry equal or before @now)
628 *
629 * Event, which is returned, is also removed from the queue.
630 */
tmigr_next_expired_groupevt(struct tmigr_group * group,u64 now)631 static struct tmigr_event *tmigr_next_expired_groupevt(struct tmigr_group *group,
632 u64 now)
633 {
634 struct tmigr_event *evt = tmigr_next_groupevt(group);
635
636 if (!evt || now < evt->nextevt.expires)
637 return NULL;
638
639 /*
640 * The event is ready to expire. Remove it and update next group event.
641 */
642 timerqueue_del(&group->events, &evt->nextevt);
643 tmigr_next_groupevt(group);
644
645 return evt;
646 }
647
tmigr_next_groupevt_expires(struct tmigr_group * group)648 static u64 tmigr_next_groupevt_expires(struct tmigr_group *group)
649 {
650 struct tmigr_event *evt;
651
652 evt = tmigr_next_groupevt(group);
653
654 if (!evt)
655 return KTIME_MAX;
656 else
657 return evt->nextevt.expires;
658 }
659
tmigr_active_up(struct tmigr_group * group,struct tmigr_group * child,struct tmigr_walk * data)660 static bool tmigr_active_up(struct tmigr_group *group,
661 struct tmigr_group *child,
662 struct tmigr_walk *data)
663 {
664 union tmigr_state curstate, newstate;
665 bool walk_done;
666 u8 childmask;
667
668 childmask = data->childmask;
669 /*
670 * No memory barrier is required here in contrast to
671 * tmigr_inactive_up(), as the group state change does not depend on the
672 * child state.
673 */
674 curstate.state = atomic_read(&group->migr_state);
675
676 do {
677 newstate = curstate;
678 walk_done = true;
679
680 if (newstate.migrator == TMIGR_NONE) {
681 newstate.migrator = childmask;
682
683 /* Changes need to be propagated */
684 walk_done = false;
685 }
686
687 newstate.active |= childmask;
688 newstate.seq++;
689
690 } while (!atomic_try_cmpxchg(&group->migr_state, &curstate.state, newstate.state));
691
692 trace_tmigr_group_set_cpu_active(group, newstate, childmask);
693
694 /*
695 * The group is active (again). The group event might be still queued
696 * into the parent group's timerqueue but can now be handled by the
697 * migrator of this group. Therefore the ignore flag for the group event
698 * is updated to reflect this.
699 *
700 * The update of the ignore flag in the active path is done lockless. In
701 * worst case the migrator of the parent group observes the change too
702 * late and expires remotely all events belonging to this group. The
703 * lock is held while updating the ignore flag in idle path. So this
704 * state change will not be lost.
705 */
706 WRITE_ONCE(group->groupevt.ignore, true);
707
708 return walk_done;
709 }
710
__tmigr_cpu_activate(struct tmigr_cpu * tmc)711 static void __tmigr_cpu_activate(struct tmigr_cpu *tmc)
712 {
713 struct tmigr_walk data;
714
715 data.childmask = tmc->groupmask;
716
717 trace_tmigr_cpu_active(tmc);
718
719 tmc->cpuevt.ignore = true;
720 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
721
722 walk_groups(&tmigr_active_up, &data, tmc);
723 }
724
725 /**
726 * tmigr_cpu_activate() - set this CPU active in timer migration hierarchy
727 *
728 * Call site timer_clear_idle() is called with interrupts disabled.
729 */
tmigr_cpu_activate(void)730 void tmigr_cpu_activate(void)
731 {
732 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
733
734 if (tmigr_is_not_available(tmc))
735 return;
736
737 if (WARN_ON_ONCE(!tmc->idle))
738 return;
739
740 raw_spin_lock(&tmc->lock);
741 tmc->idle = false;
742 __tmigr_cpu_activate(tmc);
743 raw_spin_unlock(&tmc->lock);
744 }
745
746 /*
747 * Returns true, if there is nothing to be propagated to the next level
748 *
749 * @data->firstexp is set to expiry of first global event of the (top level of
750 * the) hierarchy, but only when hierarchy is completely idle.
751 *
752 * The child and group states need to be read under the lock, to prevent a race
753 * against a concurrent tmigr_inactive_up() run when the last CPU goes idle. See
754 * also section "Prevent race between new event and last CPU going inactive" in
755 * the documentation at the top.
756 *
757 * This is the only place where the group event expiry value is set.
758 */
759 static
tmigr_update_events(struct tmigr_group * group,struct tmigr_group * child,struct tmigr_walk * data)760 bool tmigr_update_events(struct tmigr_group *group, struct tmigr_group *child,
761 struct tmigr_walk *data)
762 {
763 struct tmigr_event *evt, *first_childevt;
764 union tmigr_state childstate, groupstate;
765 bool remote = data->remote;
766 bool walk_done = false;
767 bool ignore;
768 u64 nextexp;
769
770 if (child) {
771 raw_spin_lock(&child->lock);
772 raw_spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING);
773
774 childstate.state = atomic_read(&child->migr_state);
775 groupstate.state = atomic_read(&group->migr_state);
776
777 if (childstate.active) {
778 walk_done = true;
779 goto unlock;
780 }
781
782 first_childevt = tmigr_next_groupevt(child);
783 nextexp = child->next_expiry;
784 evt = &child->groupevt;
785
786 /*
787 * This can race with concurrent idle exit (activate).
788 * If the current writer wins, a useless remote expiration may
789 * be scheduled. If the activate wins, the event is properly
790 * ignored.
791 */
792 ignore = (nextexp == KTIME_MAX) ? true : false;
793 WRITE_ONCE(evt->ignore, ignore);
794 } else {
795 nextexp = data->nextexp;
796
797 first_childevt = evt = data->evt;
798 ignore = evt->ignore;
799
800 /*
801 * Walking the hierarchy is required in any case when a
802 * remote expiry was done before. This ensures to not lose
803 * already queued events in non active groups (see section
804 * "Required event and timerqueue update after a remote
805 * expiry" in the documentation at the top).
806 *
807 * The two call sites which are executed without a remote expiry
808 * before, are not prevented from propagating changes through
809 * the hierarchy by the return:
810 * - When entering this path by tmigr_new_timer(), @evt->ignore
811 * is never set.
812 * - tmigr_inactive_up() takes care of the propagation by
813 * itself and ignores the return value. But an immediate
814 * return is possible if there is a parent, sparing group
815 * locking at this level, because the upper walking call to
816 * the parent will take care about removing this event from
817 * within the group and update next_expiry accordingly.
818 *
819 * However if there is no parent, ie: the hierarchy has only a
820 * single level so @group is the top level group, make sure the
821 * first event information of the group is updated properly and
822 * also handled properly, so skip this fast return path.
823 */
824 if (ignore && !remote && group->parent)
825 return true;
826
827 raw_spin_lock(&group->lock);
828
829 childstate.state = 0;
830 groupstate.state = atomic_read(&group->migr_state);
831 }
832
833 /*
834 * If the child event is already queued in the group, remove it from the
835 * queue when the expiry time changed only or when it could be ignored.
836 */
837 if (timerqueue_node_queued(&evt->nextevt)) {
838 if ((evt->nextevt.expires == nextexp) && !ignore) {
839 /* Make sure not to miss a new CPU event with the same expiry */
840 evt->cpu = first_childevt->cpu;
841 goto check_toplvl;
842 }
843
844 if (!timerqueue_del(&group->events, &evt->nextevt))
845 WRITE_ONCE(group->next_expiry, KTIME_MAX);
846 }
847
848 if (ignore) {
849 /*
850 * When the next child event could be ignored (nextexp is
851 * KTIME_MAX) and there was no remote timer handling before or
852 * the group is already active, there is no need to walk the
853 * hierarchy even if there is a parent group.
854 *
855 * The other way round: even if the event could be ignored, but
856 * if a remote timer handling was executed before and the group
857 * is not active, walking the hierarchy is required to not miss
858 * an enqueued timer in the non active group. The enqueued timer
859 * of the group needs to be propagated to a higher level to
860 * ensure it is handled.
861 */
862 if (!remote || groupstate.active)
863 walk_done = true;
864 } else {
865 evt->nextevt.expires = nextexp;
866 evt->cpu = first_childevt->cpu;
867
868 if (timerqueue_add(&group->events, &evt->nextevt))
869 WRITE_ONCE(group->next_expiry, nextexp);
870 }
871
872 check_toplvl:
873 if (!group->parent && (groupstate.migrator == TMIGR_NONE)) {
874 walk_done = true;
875
876 /*
877 * Nothing to do when update was done during remote timer
878 * handling. First timer in top level group which needs to be
879 * handled when top level group is not active, is calculated
880 * directly in tmigr_handle_remote_up().
881 */
882 if (remote)
883 goto unlock;
884
885 /*
886 * The top level group is idle and it has to be ensured the
887 * global timers are handled in time. (This could be optimized
888 * by keeping track of the last global scheduled event and only
889 * arming it on the CPU if the new event is earlier. Not sure if
890 * its worth the complexity.)
891 */
892 data->firstexp = tmigr_next_groupevt_expires(group);
893 }
894
895 trace_tmigr_update_events(child, group, childstate, groupstate,
896 nextexp);
897
898 unlock:
899 raw_spin_unlock(&group->lock);
900
901 if (child)
902 raw_spin_unlock(&child->lock);
903
904 return walk_done;
905 }
906
tmigr_new_timer_up(struct tmigr_group * group,struct tmigr_group * child,struct tmigr_walk * data)907 static bool tmigr_new_timer_up(struct tmigr_group *group,
908 struct tmigr_group *child,
909 struct tmigr_walk *data)
910 {
911 return tmigr_update_events(group, child, data);
912 }
913
914 /*
915 * Returns the expiry of the next timer that needs to be handled. KTIME_MAX is
916 * returned, if an active CPU will handle all the timer migration hierarchy
917 * timers.
918 */
tmigr_new_timer(struct tmigr_cpu * tmc,u64 nextexp)919 static u64 tmigr_new_timer(struct tmigr_cpu *tmc, u64 nextexp)
920 {
921 struct tmigr_walk data = { .nextexp = nextexp,
922 .firstexp = KTIME_MAX,
923 .evt = &tmc->cpuevt };
924
925 lockdep_assert_held(&tmc->lock);
926
927 if (tmc->remote)
928 return KTIME_MAX;
929
930 trace_tmigr_cpu_new_timer(tmc);
931
932 tmc->cpuevt.ignore = false;
933 data.remote = false;
934
935 walk_groups(&tmigr_new_timer_up, &data, tmc);
936
937 /* If there is a new first global event, make sure it is handled */
938 return data.firstexp;
939 }
940
tmigr_handle_remote_cpu(unsigned int cpu,u64 now,unsigned long jif)941 static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now,
942 unsigned long jif)
943 {
944 struct timer_events tevt;
945 struct tmigr_walk data;
946 struct tmigr_cpu *tmc;
947
948 tmc = per_cpu_ptr(&tmigr_cpu, cpu);
949
950 raw_spin_lock_irq(&tmc->lock);
951
952 /*
953 * If the remote CPU is offline then the timers have been migrated to
954 * another CPU.
955 *
956 * If tmigr_cpu::remote is set, at the moment another CPU already
957 * expires the timers of the remote CPU.
958 *
959 * If tmigr_event::ignore is set, then the CPU returns from idle and
960 * takes care of its timers.
961 *
962 * If the next event expires in the future, then the event has been
963 * updated and there are no timers to expire right now. The CPU which
964 * updated the event takes care when hierarchy is completely
965 * idle. Otherwise the migrator does it as the event is enqueued.
966 */
967 if (!tmc->available || tmc->remote || tmc->cpuevt.ignore ||
968 now < tmc->cpuevt.nextevt.expires) {
969 raw_spin_unlock_irq(&tmc->lock);
970 return;
971 }
972
973 trace_tmigr_handle_remote_cpu(tmc);
974
975 tmc->remote = true;
976 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
977
978 /* Drop the lock to allow the remote CPU to exit idle */
979 raw_spin_unlock_irq(&tmc->lock);
980
981 if (cpu != smp_processor_id())
982 timer_expire_remote(cpu);
983
984 /*
985 * Lock ordering needs to be preserved - timer_base locks before tmigr
986 * related locks (see section "Locking rules" in the documentation at
987 * the top). During fetching the next timer interrupt, also tmc->lock
988 * needs to be held. Otherwise there is a possible race window against
989 * the CPU itself when it comes out of idle, updates the first timer in
990 * the hierarchy and goes back to idle.
991 *
992 * timer base locks are dropped as fast as possible: After checking
993 * whether the remote CPU went offline in the meantime and after
994 * fetching the next remote timer interrupt. Dropping the locks as fast
995 * as possible keeps the locking region small and prevents holding
996 * several (unnecessary) locks during walking the hierarchy for updating
997 * the timerqueue and group events.
998 */
999 local_irq_disable();
1000 timer_lock_remote_bases(cpu);
1001 raw_spin_lock(&tmc->lock);
1002
1003 /*
1004 * When the CPU went offline in the meantime, no hierarchy walk has to
1005 * be done for updating the queued events, because the walk was
1006 * already done during marking the CPU offline in the hierarchy.
1007 *
1008 * When the CPU is no longer idle, the CPU takes care of the timers and
1009 * also of the timers in the hierarchy.
1010 *
1011 * (See also section "Required event and timerqueue update after a
1012 * remote expiry" in the documentation at the top)
1013 */
1014 if (!tmc->available || !tmc->idle) {
1015 timer_unlock_remote_bases(cpu);
1016 goto unlock;
1017 }
1018
1019 /* next event of CPU */
1020 fetch_next_timer_interrupt_remote(jif, now, &tevt, cpu);
1021 timer_unlock_remote_bases(cpu);
1022
1023 data.nextexp = tevt.global;
1024 data.firstexp = KTIME_MAX;
1025 data.evt = &tmc->cpuevt;
1026 data.remote = true;
1027
1028 /*
1029 * The update is done even when there is no 'new' global timer pending
1030 * on the remote CPU (see section "Required event and timerqueue update
1031 * after a remote expiry" in the documentation at the top)
1032 */
1033 walk_groups(&tmigr_new_timer_up, &data, tmc);
1034
1035 unlock:
1036 tmc->remote = false;
1037 raw_spin_unlock_irq(&tmc->lock);
1038 }
1039
tmigr_handle_remote_up(struct tmigr_group * group,struct tmigr_group * child,struct tmigr_walk * data)1040 static bool tmigr_handle_remote_up(struct tmigr_group *group,
1041 struct tmigr_group *child,
1042 struct tmigr_walk *data)
1043 {
1044 struct tmigr_event *evt;
1045 unsigned long jif;
1046 u8 childmask;
1047 u64 now;
1048
1049 jif = data->basej;
1050 now = data->now;
1051
1052 childmask = data->childmask;
1053
1054 trace_tmigr_handle_remote(group);
1055 again:
1056 /*
1057 * Handle the group only if @childmask is the migrator or if the
1058 * group has no migrator. Otherwise the group is active and is
1059 * handled by its own migrator.
1060 */
1061 if (!tmigr_check_migrator(group, childmask))
1062 return true;
1063
1064 raw_spin_lock_irq(&group->lock);
1065
1066 evt = tmigr_next_expired_groupevt(group, now);
1067
1068 if (evt) {
1069 unsigned int remote_cpu = evt->cpu;
1070
1071 raw_spin_unlock_irq(&group->lock);
1072
1073 tmigr_handle_remote_cpu(remote_cpu, now, jif);
1074
1075 /* check if there is another event, that needs to be handled */
1076 goto again;
1077 }
1078
1079 /*
1080 * Keep track of the expiry of the first event that needs to be handled
1081 * (group->next_expiry was updated by tmigr_next_expired_groupevt(),
1082 * next was set by tmigr_handle_remote_cpu()).
1083 */
1084 data->firstexp = group->next_expiry;
1085
1086 raw_spin_unlock_irq(&group->lock);
1087
1088 return false;
1089 }
1090
1091 /**
1092 * tmigr_handle_remote() - Handle global timers of remote idle CPUs
1093 *
1094 * Called from the timer soft interrupt with interrupts enabled.
1095 */
tmigr_handle_remote(void)1096 void tmigr_handle_remote(void)
1097 {
1098 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1099 struct tmigr_walk data;
1100
1101 if (tmigr_is_not_available(tmc))
1102 return;
1103
1104 data.childmask = tmc->groupmask;
1105 data.firstexp = KTIME_MAX;
1106
1107 /*
1108 * NOTE: This is a doubled check because the migrator test will be done
1109 * in tmigr_handle_remote_up() anyway. Keep this check to speed up the
1110 * return when nothing has to be done.
1111 */
1112 if (!tmigr_check_migrator(tmc->tmgroup, tmc->groupmask)) {
1113 /*
1114 * If this CPU was an idle migrator, make sure to clear its wakeup
1115 * value so it won't chase timers that have already expired elsewhere.
1116 * This avoids endless requeue from tmigr_new_timer().
1117 */
1118 if (READ_ONCE(tmc->wakeup) == KTIME_MAX)
1119 return;
1120 }
1121
1122 data.now = get_jiffies_update(&data.basej);
1123
1124 /*
1125 * Update @tmc->wakeup only at the end and do not reset @tmc->wakeup to
1126 * KTIME_MAX. Even if tmc->lock is not held during the whole remote
1127 * handling, tmc->wakeup is fine to be stale as it is called in
1128 * interrupt context and tick_nohz_next_event() is executed in interrupt
1129 * exit path only after processing the last pending interrupt.
1130 */
1131
1132 __walk_groups(&tmigr_handle_remote_up, &data, tmc);
1133
1134 raw_spin_lock_irq(&tmc->lock);
1135 WRITE_ONCE(tmc->wakeup, data.firstexp);
1136 raw_spin_unlock_irq(&tmc->lock);
1137 }
1138
tmigr_requires_handle_remote_up(struct tmigr_group * group,struct tmigr_group * child,struct tmigr_walk * data)1139 static bool tmigr_requires_handle_remote_up(struct tmigr_group *group,
1140 struct tmigr_group *child,
1141 struct tmigr_walk *data)
1142 {
1143 u8 childmask;
1144
1145 childmask = data->childmask;
1146
1147 /*
1148 * Handle the group only if the child is the migrator or if the group
1149 * has no migrator. Otherwise the group is active and is handled by its
1150 * own migrator.
1151 */
1152 if (!tmigr_check_migrator(group, childmask))
1153 return true;
1154 /*
1155 * The lock is required on 32bit architectures to read the variable
1156 * consistently with a concurrent writer. On 64bit the lock is not
1157 * required because the read operation is not split and so it is always
1158 * consistent.
1159 */
1160 if (IS_ENABLED(CONFIG_64BIT)) {
1161 data->firstexp = READ_ONCE(group->next_expiry);
1162 if (data->now >= data->firstexp) {
1163 data->check = true;
1164 return true;
1165 }
1166 } else {
1167 raw_spin_lock(&group->lock);
1168 data->firstexp = group->next_expiry;
1169 if (data->now >= group->next_expiry) {
1170 data->check = true;
1171 raw_spin_unlock(&group->lock);
1172 return true;
1173 }
1174 raw_spin_unlock(&group->lock);
1175 }
1176
1177 return false;
1178 }
1179
1180 /**
1181 * tmigr_requires_handle_remote() - Check the need of remote timer handling
1182 *
1183 * Must be called with interrupts disabled.
1184 */
tmigr_requires_handle_remote(void)1185 bool tmigr_requires_handle_remote(void)
1186 {
1187 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1188 struct tmigr_walk data;
1189 unsigned long jif;
1190 bool ret = false;
1191
1192 if (tmigr_is_not_available(tmc))
1193 return ret;
1194
1195 data.now = get_jiffies_update(&jif);
1196 data.childmask = tmc->groupmask;
1197 data.firstexp = KTIME_MAX;
1198 data.check = false;
1199
1200 /*
1201 * If the CPU is active, walk the hierarchy to check whether a remote
1202 * expiry is required.
1203 *
1204 * Check is done lockless as interrupts are disabled and @tmc->idle is
1205 * set only by the local CPU.
1206 */
1207 if (!tmc->idle) {
1208 __walk_groups(&tmigr_requires_handle_remote_up, &data, tmc);
1209
1210 return data.check;
1211 }
1212
1213 /*
1214 * When the CPU is idle, compare @tmc->wakeup with @data.now. The lock
1215 * is required on 32bit architectures to read the variable consistently
1216 * with a concurrent writer. On 64bit the lock is not required because
1217 * the read operation is not split and so it is always consistent.
1218 */
1219 if (IS_ENABLED(CONFIG_64BIT)) {
1220 if (data.now >= READ_ONCE(tmc->wakeup))
1221 return true;
1222 } else {
1223 raw_spin_lock(&tmc->lock);
1224 if (data.now >= tmc->wakeup)
1225 ret = true;
1226 raw_spin_unlock(&tmc->lock);
1227 }
1228
1229 return ret;
1230 }
1231
1232 /**
1233 * tmigr_cpu_new_timer() - enqueue next global timer into hierarchy (idle tmc)
1234 * @nextexp: Next expiry of global timer (or KTIME_MAX if not)
1235 *
1236 * The CPU is already deactivated in the timer migration
1237 * hierarchy. tick_nohz_get_sleep_length() calls tick_nohz_next_event()
1238 * and thereby the timer idle path is executed once more. @tmc->wakeup
1239 * holds the first timer, when the timer migration hierarchy is
1240 * completely idle.
1241 *
1242 * Returns the first timer that needs to be handled by this CPU or KTIME_MAX if
1243 * nothing needs to be done.
1244 */
tmigr_cpu_new_timer(u64 nextexp)1245 u64 tmigr_cpu_new_timer(u64 nextexp)
1246 {
1247 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1248 u64 ret;
1249
1250 if (tmigr_is_not_available(tmc))
1251 return nextexp;
1252
1253 raw_spin_lock(&tmc->lock);
1254
1255 ret = READ_ONCE(tmc->wakeup);
1256 if (nextexp != KTIME_MAX) {
1257 if (nextexp != tmc->cpuevt.nextevt.expires ||
1258 tmc->cpuevt.ignore) {
1259 ret = tmigr_new_timer(tmc, nextexp);
1260 /*
1261 * Make sure the reevaluation of timers in idle path
1262 * will not miss an event.
1263 */
1264 WRITE_ONCE(tmc->wakeup, ret);
1265 }
1266 }
1267 trace_tmigr_cpu_new_timer_idle(tmc, nextexp);
1268 raw_spin_unlock(&tmc->lock);
1269 return ret;
1270 }
1271
tmigr_inactive_up(struct tmigr_group * group,struct tmigr_group * child,struct tmigr_walk * data)1272 static bool tmigr_inactive_up(struct tmigr_group *group,
1273 struct tmigr_group *child,
1274 struct tmigr_walk *data)
1275 {
1276 union tmigr_state curstate, newstate, childstate;
1277 bool walk_done;
1278 u8 childmask;
1279
1280 childmask = data->childmask;
1281 childstate.state = 0;
1282
1283 /*
1284 * The memory barrier is paired with the cmpxchg() in tmigr_active_up()
1285 * to make sure the updates of child and group states are ordered. The
1286 * ordering is mandatory, as the group state change depends on the child
1287 * state.
1288 */
1289 curstate.state = atomic_read_acquire(&group->migr_state);
1290
1291 for (;;) {
1292 if (child)
1293 childstate.state = atomic_read(&child->migr_state);
1294
1295 newstate = curstate;
1296 walk_done = true;
1297
1298 /* Reset active bit when the child is no longer active */
1299 if (!childstate.active)
1300 newstate.active &= ~childmask;
1301
1302 if (newstate.migrator == childmask) {
1303 /*
1304 * Find a new migrator for the group, because the child
1305 * group is idle!
1306 */
1307 if (!childstate.active) {
1308 unsigned long new_migr_bit, active = newstate.active;
1309
1310 new_migr_bit = find_first_bit(&active, BIT_CNT);
1311
1312 if (new_migr_bit != BIT_CNT) {
1313 newstate.migrator = BIT(new_migr_bit);
1314 } else {
1315 newstate.migrator = TMIGR_NONE;
1316
1317 /* Changes need to be propagated */
1318 walk_done = false;
1319 }
1320 }
1321 }
1322
1323 newstate.seq++;
1324
1325 WARN_ON_ONCE((newstate.migrator != TMIGR_NONE) && !(newstate.active));
1326
1327 if (atomic_try_cmpxchg(&group->migr_state, &curstate.state, newstate.state)) {
1328 trace_tmigr_group_set_cpu_inactive(group, newstate, childmask);
1329 break;
1330 }
1331
1332 /*
1333 * The memory barrier is paired with the cmpxchg() in
1334 * tmigr_active_up() to make sure the updates of child and group
1335 * states are ordered. It is required only when the above
1336 * try_cmpxchg() fails.
1337 */
1338 smp_mb__after_atomic();
1339 }
1340
1341 data->remote = false;
1342
1343 /* Event Handling */
1344 tmigr_update_events(group, child, data);
1345
1346 return walk_done;
1347 }
1348
__tmigr_cpu_deactivate(struct tmigr_cpu * tmc,u64 nextexp)1349 static u64 __tmigr_cpu_deactivate(struct tmigr_cpu *tmc, u64 nextexp)
1350 {
1351 struct tmigr_walk data = { .nextexp = nextexp,
1352 .firstexp = KTIME_MAX,
1353 .evt = &tmc->cpuevt,
1354 .childmask = tmc->groupmask };
1355
1356 /*
1357 * If nextexp is KTIME_MAX, the CPU event will be ignored because the
1358 * local timer expires before the global timer, no global timer is set
1359 * or CPU goes offline.
1360 */
1361 if (nextexp != KTIME_MAX)
1362 tmc->cpuevt.ignore = false;
1363
1364 walk_groups(&tmigr_inactive_up, &data, tmc);
1365 return data.firstexp;
1366 }
1367
1368 /**
1369 * tmigr_cpu_deactivate() - Put current CPU into inactive state
1370 * @nextexp: The next global timer expiry of the current CPU
1371 *
1372 * Must be called with interrupts disabled.
1373 *
1374 * Return: the next event expiry of the current CPU or the next event expiry
1375 * from the hierarchy if this CPU is the top level migrator or the hierarchy is
1376 * completely idle.
1377 */
tmigr_cpu_deactivate(u64 nextexp)1378 u64 tmigr_cpu_deactivate(u64 nextexp)
1379 {
1380 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1381 u64 ret;
1382
1383 if (tmigr_is_not_available(tmc))
1384 return nextexp;
1385
1386 raw_spin_lock(&tmc->lock);
1387
1388 ret = __tmigr_cpu_deactivate(tmc, nextexp);
1389
1390 tmc->idle = true;
1391
1392 /*
1393 * Make sure the reevaluation of timers in idle path will not miss an
1394 * event.
1395 */
1396 WRITE_ONCE(tmc->wakeup, ret);
1397
1398 trace_tmigr_cpu_idle(tmc, nextexp);
1399 raw_spin_unlock(&tmc->lock);
1400 return ret;
1401 }
1402
1403 /**
1404 * tmigr_quick_check() - Quick forecast of next tmigr event when CPU wants to
1405 * go idle
1406 * @nextevt: The next global timer expiry of the current CPU
1407 *
1408 * Return:
1409 * * KTIME_MAX - when it is probable that nothing has to be done (not
1410 * the only one in the level 0 group; and if it is the
1411 * only one in level 0 group, but there are more than a
1412 * single group active on the way to top level)
1413 * * nextevt - when CPU is offline and has to handle timer on its own
1414 * or when on the way to top in every group only a single
1415 * child is active but @nextevt is before the lowest
1416 * next_expiry encountered while walking up to top level.
1417 * * next_expiry - value of lowest expiry encountered while walking groups
1418 * if only a single child is active on each and @nextevt
1419 * is after this lowest expiry.
1420 */
tmigr_quick_check(u64 nextevt)1421 u64 tmigr_quick_check(u64 nextevt)
1422 {
1423 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1424 struct tmigr_group *group = tmc->tmgroup;
1425
1426 if (tmigr_is_not_available(tmc))
1427 return nextevt;
1428
1429 if (WARN_ON_ONCE(tmc->idle))
1430 return nextevt;
1431
1432 if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->groupmask))
1433 return KTIME_MAX;
1434
1435 do {
1436 if (!tmigr_check_lonely(group))
1437 return KTIME_MAX;
1438
1439 /*
1440 * Since current CPU is active, events may not be sorted
1441 * from bottom to the top because the CPU's event is ignored
1442 * up to the top and its sibling's events not propagated upwards.
1443 * Thus keep track of the lowest observed expiry.
1444 */
1445 nextevt = min_t(u64, nextevt, READ_ONCE(group->next_expiry));
1446 group = group->parent;
1447 } while (group);
1448
1449 return nextevt;
1450 }
1451
1452 /*
1453 * tmigr_trigger_active() - trigger a CPU to become active again
1454 *
1455 * This function is executed on a CPU which is part of cpu_online_mask, when the
1456 * last active CPU in the hierarchy is offlining. With this, it is ensured that
1457 * the other CPU is active and takes over the migrator duty.
1458 */
tmigr_trigger_active(void * unused)1459 static long tmigr_trigger_active(void *unused)
1460 {
1461 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1462
1463 WARN_ON_ONCE(!tmc->available || tmc->idle);
1464
1465 return 0;
1466 }
1467
tmigr_clear_cpu_available(unsigned int cpu)1468 static int tmigr_clear_cpu_available(unsigned int cpu)
1469 {
1470 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1471 int migrator;
1472 u64 firstexp;
1473
1474 guard(mutex)(&tmigr_available_mutex);
1475
1476 cpumask_clear_cpu(cpu, tmigr_available_cpumask);
1477 scoped_guard(raw_spinlock_irq, &tmc->lock) {
1478 if (!tmc->available)
1479 return 0;
1480 tmc->available = false;
1481 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
1482
1483 /*
1484 * CPU has to handle the local events on his own, when on the way to
1485 * offline; Therefore nextevt value is set to KTIME_MAX
1486 */
1487 firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
1488 trace_tmigr_cpu_unavailable(tmc);
1489 }
1490
1491 if (firstexp != KTIME_MAX) {
1492 migrator = cpumask_any(tmigr_available_cpumask);
1493 work_on_cpu(migrator, tmigr_trigger_active, NULL);
1494 }
1495
1496 return 0;
1497 }
1498
__tmigr_set_cpu_available(unsigned int cpu)1499 static int __tmigr_set_cpu_available(unsigned int cpu)
1500 {
1501 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1502
1503 /* Check whether CPU data was successfully initialized */
1504 if (WARN_ON_ONCE(!tmc->tmgroup))
1505 return -EINVAL;
1506
1507 guard(mutex)(&tmigr_available_mutex);
1508
1509 cpumask_set_cpu(cpu, tmigr_available_cpumask);
1510 scoped_guard(raw_spinlock_irq, &tmc->lock) {
1511 if (tmc->available)
1512 return 0;
1513 trace_tmigr_cpu_available(tmc);
1514 tmc->idle = timer_base_is_idle();
1515 if (!tmc->idle)
1516 __tmigr_cpu_activate(tmc);
1517 tmc->available = true;
1518 }
1519 return 0;
1520 }
1521
tmigr_set_cpu_available(unsigned int cpu)1522 static int tmigr_set_cpu_available(unsigned int cpu)
1523 {
1524 if (tmigr_is_isolated(cpu))
1525 return 0;
1526
1527 return __tmigr_set_cpu_available(cpu);
1528 }
1529
tmigr_cpu_isolate(struct work_struct * ignored)1530 static void tmigr_cpu_isolate(struct work_struct *ignored)
1531 {
1532 tmigr_clear_cpu_available(smp_processor_id());
1533 }
1534
tmigr_cpu_unisolate(struct work_struct * ignored)1535 static void tmigr_cpu_unisolate(struct work_struct *ignored)
1536 {
1537 /*
1538 * Don't call tmigr_is_isolated() ->housekeeping_cpu() directly because
1539 * the cpuset mutex is correctly held by the workqueue caller but lockdep
1540 * doesn't know that.
1541 */
1542 __tmigr_set_cpu_available(smp_processor_id());
1543 }
1544
1545 /**
1546 * tmigr_isolated_exclude_cpumask - Exclude given CPUs from hierarchy
1547 * @exclude_cpumask: the cpumask to be excluded from timer migration hierarchy
1548 *
1549 * This function can be called from cpuset code to provide the new set of
1550 * isolated CPUs that should be excluded from the hierarchy.
1551 * Online CPUs not present in exclude_cpumask but already excluded are brought
1552 * back to the hierarchy.
1553 * Functions to isolate/unisolate need to be called locally and can sleep.
1554 */
tmigr_isolated_exclude_cpumask(struct cpumask * exclude_cpumask)1555 int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
1556 {
1557 struct work_struct __percpu *works __free(free_percpu) =
1558 alloc_percpu(struct work_struct);
1559 cpumask_var_t cpumask __free(free_cpumask_var) = CPUMASK_VAR_NULL;
1560 int cpu;
1561
1562 lockdep_assert_cpus_held();
1563
1564 if (!works)
1565 return -ENOMEM;
1566 if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
1567 return -ENOMEM;
1568
1569 /*
1570 * First set previously isolated CPUs as available (unisolate).
1571 * This cpumask contains only CPUs that switched to available now.
1572 */
1573 cpumask_andnot(cpumask, cpu_online_mask, exclude_cpumask);
1574 cpumask_andnot(cpumask, cpumask, tmigr_available_cpumask);
1575
1576 for_each_cpu(cpu, cpumask) {
1577 struct work_struct *work = per_cpu_ptr(works, cpu);
1578
1579 INIT_WORK(work, tmigr_cpu_unisolate);
1580 schedule_work_on(cpu, work);
1581 }
1582 for_each_cpu(cpu, cpumask)
1583 flush_work(per_cpu_ptr(works, cpu));
1584
1585 /*
1586 * Then clear previously available CPUs (isolate).
1587 * This cpumask contains only CPUs that switched to not available now.
1588 * There cannot be overlap with the newly available ones.
1589 */
1590 cpumask_and(cpumask, exclude_cpumask, tmigr_available_cpumask);
1591 cpumask_and(cpumask, cpumask, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE));
1592 /*
1593 * Handle this here and not in the cpuset code because exclude_cpumask
1594 * might include also the tick CPU if included in isolcpus.
1595 */
1596 for_each_cpu(cpu, cpumask) {
1597 if (!tick_nohz_cpu_hotpluggable(cpu)) {
1598 cpumask_clear_cpu(cpu, cpumask);
1599 break;
1600 }
1601 }
1602
1603 for_each_cpu(cpu, cpumask) {
1604 struct work_struct *work = per_cpu_ptr(works, cpu);
1605
1606 INIT_WORK(work, tmigr_cpu_isolate);
1607 schedule_work_on(cpu, work);
1608 }
1609 for_each_cpu(cpu, cpumask)
1610 flush_work(per_cpu_ptr(works, cpu));
1611
1612 return 0;
1613 }
1614
tmigr_init_isolation(void)1615 static int __init tmigr_init_isolation(void)
1616 {
1617 cpumask_var_t cpumask __free(free_cpumask_var) = CPUMASK_VAR_NULL;
1618
1619 static_branch_enable(&tmigr_exclude_isolated);
1620
1621 if (!housekeeping_enabled(HK_TYPE_DOMAIN))
1622 return 0;
1623 if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
1624 return -ENOMEM;
1625
1626 cpumask_andnot(cpumask, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN));
1627
1628 /* Protect against RCU torture hotplug testing */
1629 guard(cpus_read_lock)();
1630 return tmigr_isolated_exclude_cpumask(cpumask);
1631 }
1632 late_initcall(tmigr_init_isolation);
1633
tmigr_init_group(struct tmigr_group * group,unsigned int lvl,int node)1634 static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
1635 int node)
1636 {
1637 union tmigr_state s;
1638
1639 raw_spin_lock_init(&group->lock);
1640
1641 group->level = lvl;
1642 group->numa_node = lvl < tmigr_crossnode_level ? node : NUMA_NO_NODE;
1643
1644 group->num_children = 0;
1645
1646 s.migrator = TMIGR_NONE;
1647 s.active = 0;
1648 s.seq = 0;
1649 atomic_set(&group->migr_state, s.state);
1650
1651 timerqueue_init_head(&group->events);
1652 timerqueue_init(&group->groupevt.nextevt);
1653 group->groupevt.nextevt.expires = KTIME_MAX;
1654 WRITE_ONCE(group->next_expiry, KTIME_MAX);
1655 group->groupevt.ignore = true;
1656 }
1657
tmigr_get_group(int node,unsigned int lvl)1658 static struct tmigr_group *tmigr_get_group(int node, unsigned int lvl)
1659 {
1660 struct tmigr_group *tmp, *group = NULL;
1661
1662 lockdep_assert_held(&tmigr_mutex);
1663
1664 /* Try to attach to an existing group first */
1665 list_for_each_entry(tmp, &tmigr_level_list[lvl], list) {
1666 /*
1667 * If @lvl is below the cross NUMA node level, check whether
1668 * this group belongs to the same NUMA node.
1669 */
1670 if (lvl < tmigr_crossnode_level && tmp->numa_node != node)
1671 continue;
1672
1673 /* Capacity left? */
1674 if (tmp->num_children >= TMIGR_CHILDREN_PER_GROUP)
1675 continue;
1676
1677 /*
1678 * TODO: A possible further improvement: Make sure that all CPU
1679 * siblings end up in the same group of the lowest level of the
1680 * hierarchy. Rely on the topology sibling mask would be a
1681 * reasonable solution.
1682 */
1683
1684 group = tmp;
1685 break;
1686 }
1687
1688 if (group)
1689 return group;
1690
1691 /* Allocate and set up a new group */
1692 group = kzalloc_node(sizeof(*group), GFP_KERNEL, node);
1693 if (!group)
1694 return ERR_PTR(-ENOMEM);
1695
1696 tmigr_init_group(group, lvl, node);
1697
1698 /* Setup successful. Add it to the hierarchy */
1699 list_add(&group->list, &tmigr_level_list[lvl]);
1700 trace_tmigr_group_set(group);
1701 return group;
1702 }
1703
tmigr_init_root(struct tmigr_group * group,bool activate)1704 static bool tmigr_init_root(struct tmigr_group *group, bool activate)
1705 {
1706 if (!group->parent && group != tmigr_root) {
1707 /*
1708 * This is the new top-level, prepare its groupmask in advance
1709 * to avoid accidents where yet another new top-level is
1710 * created in the future and made visible before this groupmask.
1711 */
1712 group->groupmask = BIT(0);
1713 WARN_ON_ONCE(activate);
1714
1715 return true;
1716 }
1717
1718 return false;
1719
1720 }
1721
tmigr_connect_child_parent(struct tmigr_group * child,struct tmigr_group * parent,bool activate)1722 static void tmigr_connect_child_parent(struct tmigr_group *child,
1723 struct tmigr_group *parent,
1724 bool activate)
1725 {
1726 if (tmigr_init_root(parent, activate)) {
1727 /*
1728 * The previous top level had prepared its groupmask already,
1729 * simply account it in advance as the first child. If some groups
1730 * have been created between the old and new root due to node
1731 * mismatch, the new root's child will be intialized accordingly.
1732 */
1733 parent->num_children = 1;
1734 }
1735
1736 /* Connecting old root to new root ? */
1737 if (!parent->parent && activate) {
1738 /*
1739 * @child is the old top, or in case of node mismatch, some
1740 * intermediate group between the old top and the new one in
1741 * @parent. In this case the @child must be pre-accounted above
1742 * as the first child. Its new inactive sibling corresponding
1743 * to the CPU going up has been accounted as the second child.
1744 */
1745 WARN_ON_ONCE(parent->num_children != 2);
1746 child->groupmask = BIT(0);
1747 } else {
1748 /* Common case adding @child for the CPU going up to @parent. */
1749 child->groupmask = BIT(parent->num_children++);
1750 }
1751
1752 /*
1753 * Make sure parent initialization is visible before publishing it to a
1754 * racing CPU entering/exiting idle. This RELEASE barrier enforces an
1755 * address dependency that pairs with the READ_ONCE() in __walk_groups().
1756 */
1757 smp_store_release(&child->parent, parent);
1758
1759 trace_tmigr_connect_child_parent(child);
1760 }
1761
tmigr_setup_groups(unsigned int cpu,unsigned int node,struct tmigr_group * start,bool activate)1762 static int tmigr_setup_groups(unsigned int cpu, unsigned int node,
1763 struct tmigr_group *start, bool activate)
1764 {
1765 struct tmigr_group *group, *child, **stack;
1766 int i, top = 0, err = 0, start_lvl = 0;
1767 bool root_mismatch = false;
1768
1769 stack = kzalloc_objs(*stack, tmigr_hierarchy_levels);
1770 if (!stack)
1771 return -ENOMEM;
1772
1773 if (start) {
1774 stack[start->level] = start;
1775 start_lvl = start->level + 1;
1776 }
1777
1778 if (tmigr_root)
1779 root_mismatch = tmigr_root->numa_node != node;
1780
1781 for (i = start_lvl; i < tmigr_hierarchy_levels; i++) {
1782 group = tmigr_get_group(node, i);
1783 if (IS_ERR(group)) {
1784 err = PTR_ERR(group);
1785 i--;
1786 break;
1787 }
1788
1789 top = i;
1790 stack[i] = group;
1791
1792 /*
1793 * When booting only less CPUs of a system than CPUs are
1794 * available, not all calculated hierarchy levels are required,
1795 * unless a node mismatch is detected.
1796 *
1797 * The loop is aborted as soon as the highest level, which might
1798 * be different from tmigr_hierarchy_levels, contains only a
1799 * single group, unless the nodes mismatch below tmigr_crossnode_level
1800 */
1801 if (group->parent)
1802 break;
1803 if ((!root_mismatch || i >= tmigr_crossnode_level) &&
1804 list_is_singular(&tmigr_level_list[i]))
1805 break;
1806 }
1807
1808 /* Assert single root without parent */
1809 if (WARN_ON_ONCE(i >= tmigr_hierarchy_levels))
1810 return -EINVAL;
1811
1812 for (; i >= start_lvl; i--) {
1813 group = stack[i];
1814
1815 if (err < 0) {
1816 list_del(&group->list);
1817 kfree(group);
1818 continue;
1819 }
1820
1821 WARN_ON_ONCE(i != group->level);
1822
1823 /*
1824 * Update tmc -> group / child -> group connection
1825 */
1826 if (i == 0) {
1827 struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
1828
1829 tmc->tmgroup = group;
1830 tmc->groupmask = BIT(group->num_children++);
1831
1832 tmigr_init_root(group, activate);
1833
1834 trace_tmigr_connect_cpu_parent(tmc);
1835
1836 /* There are no children that need to be connected */
1837 continue;
1838 } else {
1839 child = stack[i - 1];
1840 tmigr_connect_child_parent(child, group, activate);
1841 }
1842 }
1843
1844 if (err < 0)
1845 goto out;
1846
1847 if (activate) {
1848 struct tmigr_walk data;
1849 union tmigr_state state;
1850
1851 /*
1852 * To prevent inconsistent states, active children need to be active in
1853 * the new parent as well. Inactive children are already marked inactive
1854 * in the parent group:
1855 *
1856 * * When new groups were created by tmigr_setup_groups() starting from
1857 * the lowest level, then they are not active. They will be set active
1858 * when the new online CPU comes active.
1859 *
1860 * * But if new groups above the current top level are required, it is
1861 * mandatory to propagate the active state of the already existing
1862 * child to the new parents. So tmigr_active_up() activates the
1863 * new parents while walking up from the old root to the new.
1864 *
1865 * * It is ensured that @start is active, as this setup path is
1866 * executed in hotplug prepare callback. This is executed by an
1867 * already connected and !idle CPU. Even if all other CPUs go idle,
1868 * the CPU executing the setup will be responsible up to current top
1869 * level group. And the next time it goes inactive, it will release
1870 * the new childmask and parent to subsequent walkers through this
1871 * @child. Therefore propagate active state unconditionally.
1872 */
1873 state.state = atomic_read(&start->migr_state);
1874 WARN_ON_ONCE(!state.active);
1875 WARN_ON_ONCE(!start->parent);
1876 data.childmask = start->groupmask;
1877 __walk_groups_from(tmigr_active_up, &data, start, start->parent);
1878 }
1879
1880 /* Root update */
1881 if (list_is_singular(&tmigr_level_list[top])) {
1882 group = list_first_entry(&tmigr_level_list[top],
1883 typeof(*group), list);
1884 WARN_ON_ONCE(group->parent);
1885 if (tmigr_root) {
1886 /* Old root should be the same or below */
1887 WARN_ON_ONCE(tmigr_root->level > top);
1888 }
1889 tmigr_root = group;
1890 }
1891 out:
1892 kfree(stack);
1893
1894 return err;
1895 }
1896
tmigr_add_cpu(unsigned int cpu)1897 static int tmigr_add_cpu(unsigned int cpu)
1898 {
1899 struct tmigr_group *old_root = tmigr_root;
1900 int node = cpu_to_node(cpu);
1901 int ret;
1902
1903 guard(mutex)(&tmigr_mutex);
1904
1905 ret = tmigr_setup_groups(cpu, node, NULL, false);
1906
1907 /* Root has changed? Connect the old one to the new */
1908 if (ret >= 0 && old_root && old_root != tmigr_root) {
1909 /*
1910 * The target CPU must never do the prepare work, except
1911 * on early boot when the boot CPU is the target. Otherwise
1912 * it may spuriously activate the old top level group inside
1913 * the new one (nevertheless whether old top level group is
1914 * active or not) and/or release an uninitialized childmask.
1915 */
1916 WARN_ON_ONCE(cpu == raw_smp_processor_id());
1917 /*
1918 * The (likely) current CPU is expected to be online in the hierarchy,
1919 * otherwise the old root may not be active as expected.
1920 */
1921 WARN_ON_ONCE(!per_cpu_ptr(&tmigr_cpu, raw_smp_processor_id())->available);
1922 ret = tmigr_setup_groups(-1, old_root->numa_node, old_root, true);
1923 }
1924
1925 return ret;
1926 }
1927
tmigr_cpu_prepare(unsigned int cpu)1928 static int tmigr_cpu_prepare(unsigned int cpu)
1929 {
1930 struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
1931 int ret = 0;
1932
1933 /* Not first online attempt? */
1934 if (tmc->tmgroup)
1935 return ret;
1936
1937 raw_spin_lock_init(&tmc->lock);
1938 timerqueue_init(&tmc->cpuevt.nextevt);
1939 tmc->cpuevt.nextevt.expires = KTIME_MAX;
1940 tmc->cpuevt.ignore = true;
1941 tmc->cpuevt.cpu = cpu;
1942 tmc->remote = false;
1943 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
1944
1945 ret = tmigr_add_cpu(cpu);
1946 if (ret < 0)
1947 return ret;
1948
1949 if (tmc->groupmask == 0)
1950 return -EINVAL;
1951
1952 return ret;
1953 }
1954
tmigr_init(void)1955 static int __init tmigr_init(void)
1956 {
1957 unsigned int cpulvl, nodelvl, cpus_per_node, i;
1958 unsigned int nnodes = num_possible_nodes();
1959 unsigned int ncpus = num_possible_cpus();
1960 int ret = -ENOMEM;
1961
1962 BUILD_BUG_ON_NOT_POWER_OF_2(TMIGR_CHILDREN_PER_GROUP);
1963
1964 /* Nothing to do if running on UP */
1965 if (ncpus == 1)
1966 return 0;
1967
1968 if (!zalloc_cpumask_var(&tmigr_available_cpumask, GFP_KERNEL)) {
1969 ret = -ENOMEM;
1970 goto err;
1971 }
1972
1973 /*
1974 * Calculate the required hierarchy levels. Unfortunately there is no
1975 * reliable information available, unless all possible CPUs have been
1976 * brought up and all NUMA nodes are populated.
1977 *
1978 * Estimate the number of levels with the number of possible nodes and
1979 * the number of possible CPUs. Assume CPUs are spread evenly across
1980 * nodes. We cannot rely on cpumask_of_node() because it only works for
1981 * online CPUs.
1982 */
1983 cpus_per_node = DIV_ROUND_UP(ncpus, nnodes);
1984
1985 /* Calc the hierarchy levels required to hold the CPUs of a node */
1986 cpulvl = DIV_ROUND_UP(order_base_2(cpus_per_node),
1987 ilog2(TMIGR_CHILDREN_PER_GROUP));
1988
1989 /* Calculate the extra levels to connect all nodes */
1990 nodelvl = DIV_ROUND_UP(order_base_2(nnodes),
1991 ilog2(TMIGR_CHILDREN_PER_GROUP));
1992
1993 tmigr_hierarchy_levels = cpulvl + nodelvl;
1994
1995 /*
1996 * If a NUMA node spawns more than one CPU level group then the next
1997 * level(s) of the hierarchy contains groups which handle all CPU groups
1998 * of the same NUMA node. The level above goes across NUMA nodes. Store
1999 * this information for the setup code to decide in which level node
2000 * matching is no longer required.
2001 */
2002 tmigr_crossnode_level = cpulvl;
2003
2004 tmigr_level_list = kzalloc_objs(struct list_head,
2005 tmigr_hierarchy_levels);
2006 if (!tmigr_level_list)
2007 goto err;
2008
2009 for (i = 0; i < tmigr_hierarchy_levels; i++)
2010 INIT_LIST_HEAD(&tmigr_level_list[i]);
2011
2012 pr_info("Timer migration: %d hierarchy levels; %d children per group;"
2013 " %d crossnode level\n",
2014 tmigr_hierarchy_levels, TMIGR_CHILDREN_PER_GROUP,
2015 tmigr_crossnode_level);
2016
2017 ret = cpuhp_setup_state(CPUHP_TMIGR_PREPARE, "tmigr:prepare",
2018 tmigr_cpu_prepare, NULL);
2019 if (ret)
2020 goto err;
2021
2022 ret = cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, "tmigr:online",
2023 tmigr_set_cpu_available, tmigr_clear_cpu_available);
2024 if (ret)
2025 goto err;
2026
2027 return 0;
2028
2029 err:
2030 pr_err("Timer migration setup failed\n");
2031 return ret;
2032 }
2033 early_initcall(tmigr_init);
2034