1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/core.c
4 *
5 * Core kernel CPU scheduler code
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat
9 */
10 #define INSTANTIATE_EXPORTED_MIGRATE_DISABLE
11 #include <linux/sched.h>
12 #include <linux/highmem.h>
13 #include <linux/hrtimer_api.h>
14 #include <linux/ktime_api.h>
15 #include <linux/sched/signal.h>
16 #include <linux/syscalls_api.h>
17 #include <linux/debug_locks.h>
18 #include <linux/prefetch.h>
19 #include <linux/capability.h>
20 #include <linux/pgtable_api.h>
21 #include <linux/wait_bit.h>
22 #include <linux/jiffies.h>
23 #include <linux/spinlock_api.h>
24 #include <linux/cpumask_api.h>
25 #include <linux/lockdep_api.h>
26 #include <linux/hardirq.h>
27 #include <linux/softirq.h>
28 #include <linux/refcount_api.h>
29 #include <linux/topology.h>
30 #include <linux/sched/clock.h>
31 #include <linux/sched/cond_resched.h>
32 #include <linux/sched/cputime.h>
33 #include <linux/sched/debug.h>
34 #include <linux/sched/hotplug.h>
35 #include <linux/sched/init.h>
36 #include <linux/sched/isolation.h>
37 #include <linux/sched/loadavg.h>
38 #include <linux/sched/mm.h>
39 #include <linux/sched/nohz.h>
40 #include <linux/sched/rseq_api.h>
41 #include <linux/sched/rt.h>
42
43 #include <linux/blkdev.h>
44 #include <linux/context_tracking.h>
45 #include <linux/cpuset.h>
46 #include <linux/delayacct.h>
47 #include <linux/init_task.h>
48 #include <linux/interrupt.h>
49 #include <linux/ioprio.h>
50 #include <linux/kallsyms.h>
51 #include <linux/kcov.h>
52 #include <linux/kprobes.h>
53 #include <linux/llist_api.h>
54 #include <linux/mmu_context.h>
55 #include <linux/mmzone.h>
56 #include <linux/mutex_api.h>
57 #include <linux/nmi.h>
58 #include <linux/nospec.h>
59 #include <linux/perf_event_api.h>
60 #include <linux/profile.h>
61 #include <linux/psi.h>
62 #include <linux/rcuwait_api.h>
63 #include <linux/rseq.h>
64 #include <linux/sched/wake_q.h>
65 #include <linux/scs.h>
66 #include <linux/slab.h>
67 #include <linux/syscalls.h>
68 #include <linux/vtime.h>
69 #include <linux/wait_api.h>
70 #include <linux/workqueue_api.h>
71 #include <linux/livepatch_sched.h>
72
73 #ifdef CONFIG_PREEMPT_DYNAMIC
74 # ifdef CONFIG_GENERIC_IRQ_ENTRY
75 # include <linux/irq-entry-common.h>
76 # endif
77 #endif
78
79 #include <uapi/linux/sched/types.h>
80
81 #include <asm/irq_regs.h>
82 #include <asm/switch_to.h>
83 #include <asm/tlb.h>
84
85 #define CREATE_TRACE_POINTS
86 #include <linux/sched/rseq_api.h>
87 #include <trace/events/sched.h>
88 #include <trace/events/ipi.h>
89 #undef CREATE_TRACE_POINTS
90
91 #include "sched.h"
92 #include "stats.h"
93
94 #include "autogroup.h"
95 #include "pelt.h"
96 #include "smp.h"
97
98 #include "../workqueue_internal.h"
99 #include "../../io_uring/io-wq.h"
100 #include "../smpboot.h"
101 #include "../locking/mutex.h"
102
103 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
104 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
105
106 /*
107 * Export tracepoints that act as a bare tracehook (ie: have no trace event
108 * associated with them) to allow external modules to probe them.
109 */
110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
112 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
113 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
114 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
115 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp);
116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
117 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
118 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
119 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
120 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
121 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
122 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_entry_tp);
123 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_exit_tp);
124 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_set_need_resched_tp);
125 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_throttle_tp);
126 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_replenish_tp);
127 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_update_tp);
128 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_server_start_tp);
129 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_dl_server_stop_tp);
130
131 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
132 DEFINE_PER_CPU(struct rnd_state, sched_rnd_state);
133
134 #ifdef CONFIG_SCHED_PROXY_EXEC
135 DEFINE_STATIC_KEY_TRUE(__sched_proxy_exec);
setup_proxy_exec(char * str)136 static int __init setup_proxy_exec(char *str)
137 {
138 bool proxy_enable = true;
139
140 if (*str && kstrtobool(str + 1, &proxy_enable)) {
141 pr_warn("Unable to parse sched_proxy_exec=\n");
142 return 0;
143 }
144
145 if (proxy_enable) {
146 pr_info("sched_proxy_exec enabled via boot arg\n");
147 static_branch_enable(&__sched_proxy_exec);
148 } else {
149 pr_info("sched_proxy_exec disabled via boot arg\n");
150 static_branch_disable(&__sched_proxy_exec);
151 }
152 return 1;
153 }
154 #else
setup_proxy_exec(char * str)155 static int __init setup_proxy_exec(char *str)
156 {
157 pr_warn("CONFIG_SCHED_PROXY_EXEC=n, so it cannot be enabled or disabled at boot time\n");
158 return 0;
159 }
160 #endif
161 __setup("sched_proxy_exec", setup_proxy_exec);
162
163 /*
164 * Debugging: various feature bits
165 *
166 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
167 * sysctl_sched_features, defined in sched.h, to allow constants propagation
168 * at compile time and compiler optimization based on features default.
169 */
170 #define SCHED_FEAT(name, enabled) \
171 (1UL << __SCHED_FEAT_##name) * enabled |
172 __read_mostly unsigned int sysctl_sched_features =
173 #include "features.h"
174 0;
175 #undef SCHED_FEAT
176
177 /*
178 * Print a warning if need_resched is set for the given duration (if
179 * LATENCY_WARN is enabled).
180 *
181 * If sysctl_resched_latency_warn_once is set, only one warning will be shown
182 * per boot.
183 */
184 __read_mostly int sysctl_resched_latency_warn_ms = 100;
185 __read_mostly int sysctl_resched_latency_warn_once = 1;
186
187 /*
188 * Number of tasks to iterate in a single balance run.
189 * Limited because this is done with IRQs disabled.
190 */
191 __read_mostly unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
192
193 __read_mostly int scheduler_running;
194
195 #ifdef CONFIG_SCHED_CORE
196
197 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
198
199 /* kernel prio, less is more */
__task_prio(const struct task_struct * p)200 static inline int __task_prio(const struct task_struct *p)
201 {
202 if (p->sched_class == &stop_sched_class) /* trumps deadline */
203 return -2;
204
205 if (p->dl_server)
206 return -1; /* deadline */
207
208 if (rt_or_dl_prio(p->prio))
209 return p->prio; /* [-1, 99] */
210
211 if (p->sched_class == &idle_sched_class)
212 return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
213
214 if (task_on_scx(p))
215 return MAX_RT_PRIO + MAX_NICE + 1; /* 120, squash ext */
216
217 return MAX_RT_PRIO + MAX_NICE; /* 119, squash fair */
218 }
219
220 /*
221 * l(a,b)
222 * le(a,b) := !l(b,a)
223 * g(a,b) := l(b,a)
224 * ge(a,b) := !l(a,b)
225 */
226
227 /* real prio, less is less */
prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)228 static inline bool prio_less(const struct task_struct *a,
229 const struct task_struct *b, bool in_fi)
230 {
231
232 int pa = __task_prio(a), pb = __task_prio(b);
233
234 if (-pa < -pb)
235 return true;
236
237 if (-pb < -pa)
238 return false;
239
240 if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */
241 const struct sched_dl_entity *a_dl, *b_dl;
242
243 a_dl = &a->dl;
244 /*
245 * Since,'a' and 'b' can be CFS tasks served by DL server,
246 * __task_prio() can return -1 (for DL) even for those. In that
247 * case, get to the dl_server's DL entity.
248 */
249 if (a->dl_server)
250 a_dl = a->dl_server;
251
252 b_dl = &b->dl;
253 if (b->dl_server)
254 b_dl = b->dl_server;
255
256 return !dl_time_before(a_dl->deadline, b_dl->deadline);
257 }
258
259 if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
260 return cfs_prio_less(a, b, in_fi);
261
262 #ifdef CONFIG_SCHED_CLASS_EXT
263 if (pa == MAX_RT_PRIO + MAX_NICE + 1) /* ext */
264 return scx_prio_less(a, b, in_fi);
265 #endif
266
267 return false;
268 }
269
__sched_core_less(const struct task_struct * a,const struct task_struct * b)270 static inline bool __sched_core_less(const struct task_struct *a,
271 const struct task_struct *b)
272 {
273 if (a->core_cookie < b->core_cookie)
274 return true;
275
276 if (a->core_cookie > b->core_cookie)
277 return false;
278
279 /* flip prio, so high prio is leftmost */
280 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
281 return true;
282
283 return false;
284 }
285
286 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
287
rb_sched_core_less(struct rb_node * a,const struct rb_node * b)288 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
289 {
290 return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
291 }
292
rb_sched_core_cmp(const void * key,const struct rb_node * node)293 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
294 {
295 const struct task_struct *p = __node_2_sc(node);
296 unsigned long cookie = (unsigned long)key;
297
298 if (cookie < p->core_cookie)
299 return -1;
300
301 if (cookie > p->core_cookie)
302 return 1;
303
304 return 0;
305 }
306
sched_core_enqueue(struct rq * rq,struct task_struct * p)307 void sched_core_enqueue(struct rq *rq, struct task_struct *p)
308 {
309 if (p->se.sched_delayed)
310 return;
311
312 rq->core->core_task_seq++;
313
314 if (!p->core_cookie)
315 return;
316
317 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
318 }
319
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)320 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
321 {
322 if (p->se.sched_delayed)
323 return;
324
325 rq->core->core_task_seq++;
326
327 if (sched_core_enqueued(p)) {
328 rb_erase(&p->core_node, &rq->core_tree);
329 RB_CLEAR_NODE(&p->core_node);
330 }
331
332 /*
333 * Migrating the last task off the cpu, with the cpu in forced idle
334 * state. Reschedule to create an accounting edge for forced idle,
335 * and re-examine whether the core is still in forced idle state.
336 */
337 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
338 rq->core->core_forceidle_count && rq->curr == rq->idle)
339 resched_curr(rq);
340 }
341
sched_task_is_throttled(struct task_struct * p,int cpu)342 static int sched_task_is_throttled(struct task_struct *p, int cpu)
343 {
344 if (p->sched_class->task_is_throttled)
345 return p->sched_class->task_is_throttled(p, cpu);
346
347 return 0;
348 }
349
sched_core_next(struct task_struct * p,unsigned long cookie)350 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
351 {
352 struct rb_node *node = &p->core_node;
353 int cpu = task_cpu(p);
354
355 do {
356 node = rb_next(node);
357 if (!node)
358 return NULL;
359
360 p = __node_2_sc(node);
361 if (p->core_cookie != cookie)
362 return NULL;
363
364 } while (sched_task_is_throttled(p, cpu));
365
366 return p;
367 }
368
369 /*
370 * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
371 * If no suitable task is found, NULL will be returned.
372 */
sched_core_find(struct rq * rq,unsigned long cookie)373 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
374 {
375 struct task_struct *p;
376 struct rb_node *node;
377
378 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
379 if (!node)
380 return NULL;
381
382 p = __node_2_sc(node);
383 if (!sched_task_is_throttled(p, rq->cpu))
384 return p;
385
386 return sched_core_next(p, cookie);
387 }
388
389 /*
390 * Magic required such that:
391 *
392 * raw_spin_rq_lock(rq);
393 * ...
394 * raw_spin_rq_unlock(rq);
395 *
396 * ends up locking and unlocking the _same_ lock, and all CPUs
397 * always agree on what rq has what lock.
398 *
399 * XXX entirely possible to selectively enable cores, don't bother for now.
400 */
401
402 static DEFINE_MUTEX(sched_core_mutex);
403 static atomic_t sched_core_count;
404 static struct cpumask sched_core_mask;
405
sched_core_lock(int cpu,unsigned long * flags)406 static void sched_core_lock(int cpu, unsigned long *flags)
407 __context_unsafe(/* acquires multiple */)
408 __acquires(&runqueues.__lock) /* overapproximation */
409 {
410 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
411 int t, i = 0;
412
413 local_irq_save(*flags);
414 for_each_cpu(t, smt_mask)
415 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
416 }
417
sched_core_unlock(int cpu,unsigned long * flags)418 static void sched_core_unlock(int cpu, unsigned long *flags)
419 __context_unsafe(/* releases multiple */)
420 __releases(&runqueues.__lock) /* overapproximation */
421 {
422 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
423 int t;
424
425 for_each_cpu(t, smt_mask)
426 raw_spin_unlock(&cpu_rq(t)->__lock);
427 local_irq_restore(*flags);
428 }
429
__sched_core_flip(bool enabled)430 static void __sched_core_flip(bool enabled)
431 {
432 unsigned long flags;
433 int cpu, t;
434
435 cpus_read_lock();
436
437 /*
438 * Toggle the online cores, one by one.
439 */
440 cpumask_copy(&sched_core_mask, cpu_online_mask);
441 for_each_cpu(cpu, &sched_core_mask) {
442 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
443
444 sched_core_lock(cpu, &flags);
445
446 for_each_cpu(t, smt_mask)
447 cpu_rq(t)->core_enabled = enabled;
448
449 cpu_rq(cpu)->core->core_forceidle_start = 0;
450
451 sched_core_unlock(cpu, &flags);
452
453 cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
454 }
455
456 /*
457 * Toggle the offline CPUs.
458 */
459 for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
460 cpu_rq(cpu)->core_enabled = enabled;
461
462 cpus_read_unlock();
463 }
464
sched_core_assert_empty(void)465 static void sched_core_assert_empty(void)
466 {
467 int cpu;
468
469 for_each_possible_cpu(cpu)
470 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
471 }
472
__sched_core_enable(void)473 static void __sched_core_enable(void)
474 {
475 static_branch_enable(&__sched_core_enabled);
476 /*
477 * Ensure all previous instances of raw_spin_rq_*lock() have finished
478 * and future ones will observe !sched_core_disabled().
479 */
480 synchronize_rcu();
481 __sched_core_flip(true);
482 sched_core_assert_empty();
483 }
484
__sched_core_disable(void)485 static void __sched_core_disable(void)
486 {
487 sched_core_assert_empty();
488 __sched_core_flip(false);
489 static_branch_disable(&__sched_core_enabled);
490 }
491
sched_core_get(void)492 void sched_core_get(void)
493 {
494 if (atomic_inc_not_zero(&sched_core_count))
495 return;
496
497 mutex_lock(&sched_core_mutex);
498 if (!atomic_read(&sched_core_count))
499 __sched_core_enable();
500
501 smp_mb__before_atomic();
502 atomic_inc(&sched_core_count);
503 mutex_unlock(&sched_core_mutex);
504 }
505
__sched_core_put(struct work_struct * work)506 static void __sched_core_put(struct work_struct *work)
507 {
508 if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
509 __sched_core_disable();
510 mutex_unlock(&sched_core_mutex);
511 }
512 }
513
sched_core_put(void)514 void sched_core_put(void)
515 {
516 static DECLARE_WORK(_work, __sched_core_put);
517
518 /*
519 * "There can be only one"
520 *
521 * Either this is the last one, or we don't actually need to do any
522 * 'work'. If it is the last *again*, we rely on
523 * WORK_STRUCT_PENDING_BIT.
524 */
525 if (!atomic_add_unless(&sched_core_count, -1, 1))
526 schedule_work(&_work);
527 }
528
529 #else /* !CONFIG_SCHED_CORE: */
530
sched_core_enqueue(struct rq * rq,struct task_struct * p)531 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
532 static inline void
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)533 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
534
535 #endif /* !CONFIG_SCHED_CORE */
536
537 /* need a wrapper since we may need to trace from modules */
538 EXPORT_TRACEPOINT_SYMBOL(sched_set_state_tp);
539
540 /* Call via the helper macro trace_set_current_state. */
__trace_set_current_state(int state_value)541 void __trace_set_current_state(int state_value)
542 {
543 trace_sched_set_state_tp(current, state_value);
544 }
545 EXPORT_SYMBOL(__trace_set_current_state);
546
547 /*
548 * Serialization rules:
549 *
550 * Lock order:
551 *
552 * p->pi_lock
553 * rq->lock
554 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
555 *
556 * rq1->lock
557 * rq2->lock where: rq1 < rq2
558 *
559 * Regular state:
560 *
561 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
562 * local CPU's rq->lock, it optionally removes the task from the runqueue and
563 * always looks at the local rq data structures to find the most eligible task
564 * to run next.
565 *
566 * Task enqueue is also under rq->lock, possibly taken from another CPU.
567 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
568 * the local CPU to avoid bouncing the runqueue state around [ see
569 * ttwu_queue_wakelist() ]
570 *
571 * Task wakeup, specifically wakeups that involve migration, are horribly
572 * complicated to avoid having to take two rq->locks.
573 *
574 * Special state:
575 *
576 * System-calls and anything external will use task_rq_lock() which acquires
577 * both p->pi_lock and rq->lock. As a consequence the state they change is
578 * stable while holding either lock:
579 *
580 * - sched_setaffinity()/
581 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
582 * - set_user_nice(): p->se.load, p->*prio
583 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
584 * p->se.load, p->rt_priority,
585 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
586 * - sched_setnuma(): p->numa_preferred_nid
587 * - sched_move_task(): p->sched_task_group
588 * - uclamp_update_active() p->uclamp*
589 *
590 * p->state <- TASK_*:
591 *
592 * is changed locklessly using set_current_state(), __set_current_state() or
593 * set_special_state(), see their respective comments, or by
594 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
595 * concurrent self.
596 *
597 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
598 *
599 * is set by activate_task() and cleared by deactivate_task()/block_task(),
600 * under rq->lock. Non-zero indicates the task is runnable, the special
601 * ON_RQ_MIGRATING state is used for migration without holding both
602 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
603 *
604 * Additionally it is possible to be ->on_rq but still be considered not
605 * runnable when p->se.sched_delayed is true. These tasks are on the runqueue
606 * but will be dequeued as soon as they get picked again. See the
607 * task_is_runnable() helper.
608 *
609 * p->on_cpu <- { 0, 1 }:
610 *
611 * is set by prepare_task() and cleared by finish_task() such that it will be
612 * set before p is scheduled-in and cleared after p is scheduled-out, both
613 * under rq->lock. Non-zero indicates the task is running on its CPU.
614 *
615 * [ The astute reader will observe that it is possible for two tasks on one
616 * CPU to have ->on_cpu = 1 at the same time. ]
617 *
618 * task_cpu(p): is changed by set_task_cpu(), the rules are:
619 *
620 * - Don't call set_task_cpu() on a blocked task:
621 *
622 * We don't care what CPU we're not running on, this simplifies hotplug,
623 * the CPU assignment of blocked tasks isn't required to be valid.
624 *
625 * - for try_to_wake_up(), called under p->pi_lock:
626 *
627 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
628 *
629 * - for migration called under rq->lock:
630 * [ see task_on_rq_migrating() in task_rq_lock() ]
631 *
632 * o move_queued_task()
633 * o detach_task()
634 *
635 * - for migration called under double_rq_lock():
636 *
637 * o __migrate_swap_task()
638 * o push_rt_task() / pull_rt_task()
639 * o push_dl_task() / pull_dl_task()
640 * o dl_task_offline_migration()
641 *
642 */
643
raw_spin_rq_lock_nested(struct rq * rq,int subclass)644 void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
645 __context_unsafe()
646 {
647 raw_spinlock_t *lock;
648
649 /* Matches synchronize_rcu() in __sched_core_enable() */
650 preempt_disable();
651 if (sched_core_disabled()) {
652 raw_spin_lock_nested(&rq->__lock, subclass);
653 /* preempt_count *MUST* be > 1 */
654 preempt_enable_no_resched();
655 return;
656 }
657
658 for (;;) {
659 lock = __rq_lockp(rq);
660 raw_spin_lock_nested(lock, subclass);
661 if (likely(lock == __rq_lockp(rq))) {
662 /* preempt_count *MUST* be > 1 */
663 preempt_enable_no_resched();
664 return;
665 }
666 raw_spin_unlock(lock);
667 }
668 }
669
raw_spin_rq_trylock(struct rq * rq)670 bool raw_spin_rq_trylock(struct rq *rq)
671 __context_unsafe()
672 {
673 raw_spinlock_t *lock;
674 bool ret;
675
676 /* Matches synchronize_rcu() in __sched_core_enable() */
677 preempt_disable();
678 if (sched_core_disabled()) {
679 ret = raw_spin_trylock(&rq->__lock);
680 preempt_enable();
681 return ret;
682 }
683
684 for (;;) {
685 lock = __rq_lockp(rq);
686 ret = raw_spin_trylock(lock);
687 if (!ret || (likely(lock == __rq_lockp(rq)))) {
688 preempt_enable();
689 return ret;
690 }
691 raw_spin_unlock(lock);
692 }
693 }
694
695 /*
696 * double_rq_lock - safely lock two runqueues
697 */
double_rq_lock(struct rq * rq1,struct rq * rq2)698 void double_rq_lock(struct rq *rq1, struct rq *rq2)
699 {
700 lockdep_assert_irqs_disabled();
701
702 if (rq_order_less(rq2, rq1))
703 swap(rq1, rq2);
704
705 raw_spin_rq_lock(rq1);
706 if (__rq_lockp(rq1) != __rq_lockp(rq2))
707 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
708 else
709 __acquire_ctx_lock(__rq_lockp(rq2)); /* fake acquire */
710
711 double_rq_clock_clear_update(rq1, rq2);
712 }
713
714 /*
715 * ___task_rq_lock - lock the rq @p resides on.
716 */
___task_rq_lock(struct task_struct * p,struct rq_flags * rf)717 struct rq *___task_rq_lock(struct task_struct *p, struct rq_flags *rf)
718 {
719 struct rq *rq;
720
721 lockdep_assert_held(&p->pi_lock);
722
723 for (;;) {
724 rq = task_rq(p);
725 raw_spin_rq_lock(rq);
726 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
727 rq_pin_lock(rq, rf);
728 return rq;
729 }
730 raw_spin_rq_unlock(rq);
731
732 while (unlikely(task_on_rq_migrating(p)))
733 cpu_relax();
734 }
735 }
736
737 /*
738 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
739 */
_task_rq_lock(struct task_struct * p,struct rq_flags * rf)740 struct rq *_task_rq_lock(struct task_struct *p, struct rq_flags *rf)
741 {
742 struct rq *rq;
743
744 for (;;) {
745 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
746 rq = task_rq(p);
747 raw_spin_rq_lock(rq);
748 /*
749 * move_queued_task() task_rq_lock()
750 *
751 * ACQUIRE (rq->lock)
752 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
753 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
754 * [S] ->cpu = new_cpu [L] task_rq()
755 * [L] ->on_rq
756 * RELEASE (rq->lock)
757 *
758 * If we observe the old CPU in task_rq_lock(), the acquire of
759 * the old rq->lock will fully serialize against the stores.
760 *
761 * If we observe the new CPU in task_rq_lock(), the address
762 * dependency headed by '[L] rq = task_rq()' and the acquire
763 * will pair with the WMB to ensure we then also see migrating.
764 */
765 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
766 rq_pin_lock(rq, rf);
767 return rq;
768 }
769 raw_spin_rq_unlock(rq);
770 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
771
772 while (unlikely(task_on_rq_migrating(p)))
773 cpu_relax();
774 }
775 }
776
777 /*
778 * RQ-clock updating methods:
779 */
780
781 /* Use CONFIG_PARAVIRT as this will avoid more #ifdef in arch code. */
782 #ifdef CONFIG_PARAVIRT
783 struct static_key paravirt_steal_rq_enabled;
784 #endif
785
update_rq_clock_task(struct rq * rq,s64 delta)786 static void update_rq_clock_task(struct rq *rq, s64 delta)
787 {
788 /*
789 * In theory, the compile should just see 0 here, and optimize out the call
790 * to sched_rt_avg_update. But I don't trust it...
791 */
792 s64 __maybe_unused steal = 0, irq_delta = 0;
793
794 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
795 if (irqtime_enabled()) {
796 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
797
798 /*
799 * Since irq_time is only updated on {soft,}irq_exit, we might run into
800 * this case when a previous update_rq_clock() happened inside a
801 * {soft,}IRQ region.
802 *
803 * When this happens, we stop ->clock_task and only update the
804 * prev_irq_time stamp to account for the part that fit, so that a next
805 * update will consume the rest. This ensures ->clock_task is
806 * monotonic.
807 *
808 * It does however cause some slight miss-attribution of {soft,}IRQ
809 * time, a more accurate solution would be to update the irq_time using
810 * the current rq->clock timestamp, except that would require using
811 * atomic ops.
812 */
813 if (irq_delta > delta)
814 irq_delta = delta;
815
816 rq->prev_irq_time += irq_delta;
817 delta -= irq_delta;
818 delayacct_irq(rq->curr, irq_delta);
819 }
820 #endif
821 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
822 if (static_key_false((¶virt_steal_rq_enabled))) {
823 u64 prev_steal;
824
825 steal = prev_steal = paravirt_steal_clock(cpu_of(rq));
826 steal -= rq->prev_steal_time_rq;
827
828 if (unlikely(steal > delta))
829 steal = delta;
830
831 rq->prev_steal_time_rq = prev_steal;
832 delta -= steal;
833 }
834 #endif
835
836 rq->clock_task += delta;
837
838 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
839 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
840 update_irq_load_avg(rq, irq_delta + steal);
841 #endif
842 update_rq_clock_pelt(rq, delta);
843 }
844
update_rq_clock(struct rq * rq)845 void update_rq_clock(struct rq *rq)
846 {
847 s64 delta;
848 u64 clock;
849
850 lockdep_assert_rq_held(rq);
851
852 if (rq->clock_update_flags & RQCF_ACT_SKIP)
853 return;
854
855 if (sched_feat(WARN_DOUBLE_CLOCK))
856 WARN_ON_ONCE(rq->clock_update_flags & RQCF_UPDATED);
857 rq->clock_update_flags |= RQCF_UPDATED;
858
859 clock = sched_clock_cpu(cpu_of(rq));
860 scx_rq_clock_update(rq, clock);
861
862 delta = clock - rq->clock;
863 if (delta < 0)
864 return;
865 rq->clock += delta;
866
867 update_rq_clock_task(rq, delta);
868 }
869
870 #ifdef CONFIG_SCHED_HRTICK
871 /*
872 * Use HR-timers to deliver accurate preemption points.
873 */
874
875 enum {
876 HRTICK_SCHED_NONE = 0,
877 HRTICK_SCHED_DEFER = BIT(1),
878 HRTICK_SCHED_START = BIT(2),
879 HRTICK_SCHED_REARM_HRTIMER = BIT(3)
880 };
881
hrtick_clear(struct rq * rq)882 static void __used hrtick_clear(struct rq *rq)
883 {
884 if (hrtimer_active(&rq->hrtick_timer))
885 hrtimer_cancel(&rq->hrtick_timer);
886 }
887
888 /*
889 * High-resolution timer tick.
890 * Runs from hardirq context with interrupts disabled.
891 */
hrtick(struct hrtimer * timer)892 static enum hrtimer_restart hrtick(struct hrtimer *timer)
893 {
894 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
895 struct rq_flags rf;
896
897 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
898
899 rq_lock(rq, &rf);
900 update_rq_clock(rq);
901 rq->donor->sched_class->task_tick(rq, rq->donor, 1);
902 rq_unlock(rq, &rf);
903
904 return HRTIMER_NORESTART;
905 }
906
hrtick_needs_rearm(struct hrtimer * timer,ktime_t expires)907 static inline bool hrtick_needs_rearm(struct hrtimer *timer, ktime_t expires)
908 {
909 /*
910 * Queued is false when the timer is not started or currently
911 * running the callback. In both cases, restart. If queued check
912 * whether the expiry time actually changes substantially.
913 */
914 return !hrtimer_is_queued(timer) ||
915 abs(expires - hrtimer_get_expires(timer)) > 5000;
916 }
917
hrtick_cond_restart(struct rq * rq)918 static void hrtick_cond_restart(struct rq *rq)
919 {
920 struct hrtimer *timer = &rq->hrtick_timer;
921 ktime_t time = rq->hrtick_time;
922
923 if (hrtick_needs_rearm(timer, time))
924 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
925 }
926
927 /*
928 * called from hardirq (IPI) context
929 */
__hrtick_start(void * arg)930 static void __hrtick_start(void *arg)
931 {
932 struct rq *rq = arg;
933 struct rq_flags rf;
934
935 rq_lock(rq, &rf);
936 hrtick_cond_restart(rq);
937 rq_unlock(rq, &rf);
938 }
939
940 /*
941 * Called to set the hrtick timer state.
942 *
943 * called with rq->lock held and IRQs disabled
944 */
hrtick_start(struct rq * rq,u64 delay)945 void hrtick_start(struct rq *rq, u64 delay)
946 {
947 s64 delta;
948
949 /*
950 * Don't schedule slices shorter than 10000ns, that just
951 * doesn't make sense and can cause timer DoS.
952 */
953 delta = max_t(s64, delay, 10000LL);
954
955 /*
956 * If this is in the middle of schedule() only note the delay
957 * and let hrtick_schedule_exit() deal with it.
958 */
959 if (rq->hrtick_sched) {
960 rq->hrtick_sched |= HRTICK_SCHED_START;
961 rq->hrtick_delay = delta;
962 return;
963 }
964
965 rq->hrtick_time = ktime_add_ns(ktime_get(), delta);
966 if (!hrtick_needs_rearm(&rq->hrtick_timer, rq->hrtick_time))
967 return;
968
969 if (rq == this_rq())
970 hrtimer_start(&rq->hrtick_timer, rq->hrtick_time, HRTIMER_MODE_ABS_PINNED_HARD);
971 else
972 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
973 }
974
hrtick_schedule_enter(struct rq * rq)975 static inline void hrtick_schedule_enter(struct rq *rq)
976 {
977 rq->hrtick_sched = HRTICK_SCHED_DEFER;
978 if (hrtimer_test_and_clear_rearm_deferred())
979 rq->hrtick_sched |= HRTICK_SCHED_REARM_HRTIMER;
980 }
981
hrtick_schedule_exit(struct rq * rq)982 static inline void hrtick_schedule_exit(struct rq *rq)
983 {
984 if (rq->hrtick_sched & HRTICK_SCHED_START) {
985 rq->hrtick_time = ktime_add_ns(ktime_get(), rq->hrtick_delay);
986 hrtick_cond_restart(rq);
987 } else if (idle_rq(rq)) {
988 /*
989 * No need for using hrtimer_is_active(). The timer is CPU local
990 * and interrupts are disabled, so the callback cannot be
991 * running and the queued state is valid.
992 */
993 if (hrtimer_is_queued(&rq->hrtick_timer))
994 hrtimer_cancel(&rq->hrtick_timer);
995 }
996
997 if (rq->hrtick_sched & HRTICK_SCHED_REARM_HRTIMER)
998 __hrtimer_rearm_deferred();
999
1000 rq->hrtick_sched = HRTICK_SCHED_NONE;
1001 }
1002
hrtick_rq_init(struct rq * rq)1003 static void hrtick_rq_init(struct rq *rq)
1004 {
1005 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
1006 rq->hrtick_sched = HRTICK_SCHED_NONE;
1007 hrtimer_setup(&rq->hrtick_timer, hrtick, CLOCK_MONOTONIC,
1008 HRTIMER_MODE_REL_HARD | HRTIMER_MODE_LAZY_REARM);
1009 }
1010 #else /* !CONFIG_SCHED_HRTICK: */
hrtick_clear(struct rq * rq)1011 static inline void hrtick_clear(struct rq *rq) { }
hrtick_rq_init(struct rq * rq)1012 static inline void hrtick_rq_init(struct rq *rq) { }
hrtick_schedule_enter(struct rq * rq)1013 static inline void hrtick_schedule_enter(struct rq *rq) { }
hrtick_schedule_exit(struct rq * rq)1014 static inline void hrtick_schedule_exit(struct rq *rq) { }
1015 #endif /* !CONFIG_SCHED_HRTICK */
1016
1017 /*
1018 * try_cmpxchg based fetch_or() macro so it works for different integer types:
1019 */
1020 #define fetch_or(ptr, mask) \
1021 ({ \
1022 typeof(ptr) _ptr = (ptr); \
1023 typeof(mask) _mask = (mask); \
1024 typeof(*_ptr) _val = *_ptr; \
1025 \
1026 do { \
1027 } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \
1028 _val; \
1029 })
1030
1031 #ifdef TIF_POLLING_NRFLAG
1032 /*
1033 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
1034 * this avoids any races wrt polling state changes and thereby avoids
1035 * spurious IPIs.
1036 */
set_nr_and_not_polling(struct thread_info * ti,int tif)1037 static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
1038 {
1039 return !(fetch_or(&ti->flags, 1 << tif) & _TIF_POLLING_NRFLAG);
1040 }
1041
1042 /*
1043 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
1044 *
1045 * If this returns true, then the idle task promises to call
1046 * sched_ttwu_pending() and reschedule soon.
1047 */
set_nr_if_polling(struct task_struct * p)1048 static bool set_nr_if_polling(struct task_struct *p)
1049 {
1050 struct thread_info *ti = task_thread_info(p);
1051 typeof(ti->flags) val = READ_ONCE(ti->flags);
1052
1053 do {
1054 if (!(val & _TIF_POLLING_NRFLAG))
1055 return false;
1056 if (val & _TIF_NEED_RESCHED)
1057 return true;
1058 } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
1059
1060 return true;
1061 }
1062
1063 #else
set_nr_and_not_polling(struct thread_info * ti,int tif)1064 static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
1065 {
1066 set_ti_thread_flag(ti, tif);
1067 return true;
1068 }
1069
set_nr_if_polling(struct task_struct * p)1070 static inline bool set_nr_if_polling(struct task_struct *p)
1071 {
1072 return false;
1073 }
1074 #endif
1075
__wake_q_add(struct wake_q_head * head,struct task_struct * task)1076 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
1077 {
1078 struct wake_q_node *node = &task->wake_q;
1079
1080 /*
1081 * Atomically grab the task, if ->wake_q is !nil already it means
1082 * it's already queued (either by us or someone else) and will get the
1083 * wakeup due to that.
1084 *
1085 * In order to ensure that a pending wakeup will observe our pending
1086 * state, even in the failed case, an explicit smp_mb() must be used.
1087 */
1088 smp_mb__before_atomic();
1089 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
1090 return false;
1091
1092 /*
1093 * The head is context local, there can be no concurrency.
1094 */
1095 *head->lastp = node;
1096 head->lastp = &node->next;
1097 return true;
1098 }
1099
1100 /**
1101 * wake_q_add() - queue a wakeup for 'later' waking.
1102 * @head: the wake_q_head to add @task to
1103 * @task: the task to queue for 'later' wakeup
1104 *
1105 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1106 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1107 * instantly.
1108 *
1109 * This function must be used as-if it were wake_up_process(); IOW the task
1110 * must be ready to be woken at this location.
1111 */
wake_q_add(struct wake_q_head * head,struct task_struct * task)1112 void wake_q_add(struct wake_q_head *head, struct task_struct *task)
1113 {
1114 if (__wake_q_add(head, task))
1115 get_task_struct(task);
1116 }
1117
1118 /**
1119 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1120 * @head: the wake_q_head to add @task to
1121 * @task: the task to queue for 'later' wakeup
1122 *
1123 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1124 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1125 * instantly.
1126 *
1127 * This function must be used as-if it were wake_up_process(); IOW the task
1128 * must be ready to be woken at this location.
1129 *
1130 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1131 * that already hold reference to @task can call the 'safe' version and trust
1132 * wake_q to do the right thing depending whether or not the @task is already
1133 * queued for wakeup.
1134 */
wake_q_add_safe(struct wake_q_head * head,struct task_struct * task)1135 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
1136 {
1137 if (!__wake_q_add(head, task))
1138 put_task_struct(task);
1139 }
1140
wake_up_q(struct wake_q_head * head)1141 void wake_up_q(struct wake_q_head *head)
1142 {
1143 struct wake_q_node *node = head->first;
1144
1145 while (node != WAKE_Q_TAIL) {
1146 struct task_struct *task;
1147
1148 task = container_of(node, struct task_struct, wake_q);
1149 node = node->next;
1150 /* pairs with cmpxchg_relaxed() in __wake_q_add() */
1151 WRITE_ONCE(task->wake_q.next, NULL);
1152 /* Task can safely be re-inserted now. */
1153
1154 /*
1155 * wake_up_process() executes a full barrier, which pairs with
1156 * the queueing in wake_q_add() so as not to miss wakeups.
1157 */
1158 wake_up_process(task);
1159 put_task_struct(task);
1160 }
1161 }
1162
1163 /*
1164 * resched_curr - mark rq's current task 'to be rescheduled now'.
1165 *
1166 * On UP this means the setting of the need_resched flag, on SMP it
1167 * might also involve a cross-CPU call to trigger the scheduler on
1168 * the target CPU.
1169 */
__resched_curr(struct rq * rq,int tif)1170 static void __resched_curr(struct rq *rq, int tif)
1171 {
1172 struct task_struct *curr = rq->curr;
1173 struct thread_info *cti = task_thread_info(curr);
1174 int cpu;
1175
1176 lockdep_assert_rq_held(rq);
1177
1178 /*
1179 * Always immediately preempt the idle task; no point in delaying doing
1180 * actual work.
1181 */
1182 if (is_idle_task(curr) && tif == TIF_NEED_RESCHED_LAZY)
1183 tif = TIF_NEED_RESCHED;
1184
1185 if (cti->flags & ((1 << tif) | _TIF_NEED_RESCHED))
1186 return;
1187
1188 cpu = cpu_of(rq);
1189
1190 trace_sched_set_need_resched_tp(curr, cpu, tif);
1191 if (cpu == smp_processor_id()) {
1192 set_ti_thread_flag(cti, tif);
1193 if (tif == TIF_NEED_RESCHED)
1194 set_preempt_need_resched();
1195 return;
1196 }
1197
1198 if (set_nr_and_not_polling(cti, tif)) {
1199 if (tif == TIF_NEED_RESCHED)
1200 smp_send_reschedule(cpu);
1201 } else {
1202 trace_sched_wake_idle_without_ipi(cpu);
1203 }
1204 }
1205
__trace_set_need_resched(struct task_struct * curr,int tif)1206 void __trace_set_need_resched(struct task_struct *curr, int tif)
1207 {
1208 trace_sched_set_need_resched_tp(curr, smp_processor_id(), tif);
1209 }
1210 EXPORT_SYMBOL_GPL(__trace_set_need_resched);
1211
resched_curr(struct rq * rq)1212 void resched_curr(struct rq *rq)
1213 {
1214 __resched_curr(rq, TIF_NEED_RESCHED);
1215 }
1216
1217 #ifdef CONFIG_PREEMPT_DYNAMIC
1218 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_preempt_lazy);
dynamic_preempt_lazy(void)1219 static __always_inline bool dynamic_preempt_lazy(void)
1220 {
1221 return static_branch_unlikely(&sk_dynamic_preempt_lazy);
1222 }
1223 #else
dynamic_preempt_lazy(void)1224 static __always_inline bool dynamic_preempt_lazy(void)
1225 {
1226 return IS_ENABLED(CONFIG_PREEMPT_LAZY);
1227 }
1228 #endif
1229
get_lazy_tif_bit(void)1230 static __always_inline int get_lazy_tif_bit(void)
1231 {
1232 if (dynamic_preempt_lazy())
1233 return TIF_NEED_RESCHED_LAZY;
1234
1235 return TIF_NEED_RESCHED;
1236 }
1237
resched_curr_lazy(struct rq * rq)1238 void resched_curr_lazy(struct rq *rq)
1239 {
1240 __resched_curr(rq, get_lazy_tif_bit());
1241 }
1242
resched_cpu(int cpu)1243 void resched_cpu(int cpu)
1244 {
1245 struct rq *rq = cpu_rq(cpu);
1246 unsigned long flags;
1247
1248 raw_spin_rq_lock_irqsave(rq, flags);
1249 if (cpu_online(cpu) || cpu == smp_processor_id())
1250 resched_curr(rq);
1251 raw_spin_rq_unlock_irqrestore(rq, flags);
1252 }
1253
1254 #ifdef CONFIG_NO_HZ_COMMON
1255 /*
1256 * In the semi idle case, use the nearest busy CPU for migrating timers
1257 * from an idle CPU. This is good for power-savings.
1258 *
1259 * We don't do similar optimization for completely idle system, as
1260 * selecting an idle CPU will add more delays to the timers than intended
1261 * (as that CPU's timer base may not be up to date wrt jiffies etc).
1262 */
get_nohz_timer_target(void)1263 int get_nohz_timer_target(void)
1264 {
1265 int i, cpu = smp_processor_id(), default_cpu = -1;
1266 struct sched_domain *sd;
1267 const struct cpumask *hk_mask;
1268
1269 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) {
1270 if (!idle_cpu(cpu))
1271 return cpu;
1272 default_cpu = cpu;
1273 }
1274
1275 hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE);
1276
1277 guard(rcu)();
1278
1279 for_each_domain(cpu, sd) {
1280 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
1281 if (cpu == i)
1282 continue;
1283
1284 if (!idle_cpu(i))
1285 return i;
1286 }
1287 }
1288
1289 if (default_cpu == -1)
1290 default_cpu = housekeeping_any_cpu(HK_TYPE_KERNEL_NOISE);
1291
1292 return default_cpu;
1293 }
1294
1295 /*
1296 * When add_timer_on() enqueues a timer into the timer wheel of an
1297 * idle CPU then this timer might expire before the next timer event
1298 * which is scheduled to wake up that CPU. In case of a completely
1299 * idle system the next event might even be infinite time into the
1300 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1301 * leaves the inner idle loop so the newly added timer is taken into
1302 * account when the CPU goes back to idle and evaluates the timer
1303 * wheel for the next timer event.
1304 */
wake_up_idle_cpu(int cpu)1305 static void wake_up_idle_cpu(int cpu)
1306 {
1307 struct rq *rq = cpu_rq(cpu);
1308
1309 if (cpu == smp_processor_id())
1310 return;
1311
1312 /*
1313 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
1314 * part of the idle loop. This forces an exit from the idle loop
1315 * and a round trip to schedule(). Now this could be optimized
1316 * because a simple new idle loop iteration is enough to
1317 * re-evaluate the next tick. Provided some re-ordering of tick
1318 * nohz functions that would need to follow TIF_NR_POLLING
1319 * clearing:
1320 *
1321 * - On most architectures, a simple fetch_or on ti::flags with a
1322 * "0" value would be enough to know if an IPI needs to be sent.
1323 *
1324 * - x86 needs to perform a last need_resched() check between
1325 * monitor and mwait which doesn't take timers into account.
1326 * There a dedicated TIF_TIMER flag would be required to
1327 * fetch_or here and be checked along with TIF_NEED_RESCHED
1328 * before mwait().
1329 *
1330 * However, remote timer enqueue is not such a frequent event
1331 * and testing of the above solutions didn't appear to report
1332 * much benefits.
1333 */
1334 if (set_nr_and_not_polling(task_thread_info(rq->idle), TIF_NEED_RESCHED))
1335 smp_send_reschedule(cpu);
1336 else
1337 trace_sched_wake_idle_without_ipi(cpu);
1338 }
1339
wake_up_full_nohz_cpu(int cpu)1340 static bool wake_up_full_nohz_cpu(int cpu)
1341 {
1342 /*
1343 * We just need the target to call irq_exit() and re-evaluate
1344 * the next tick. The nohz full kick at least implies that.
1345 * If needed we can still optimize that later with an
1346 * empty IRQ.
1347 */
1348 if (cpu_is_offline(cpu))
1349 return true; /* Don't try to wake offline CPUs. */
1350 if (tick_nohz_full_cpu(cpu)) {
1351 if (cpu != smp_processor_id() ||
1352 tick_nohz_tick_stopped())
1353 tick_nohz_full_kick_cpu(cpu);
1354 return true;
1355 }
1356
1357 return false;
1358 }
1359
1360 /*
1361 * Wake up the specified CPU. If the CPU is going offline, it is the
1362 * caller's responsibility to deal with the lost wakeup, for example,
1363 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1364 */
wake_up_nohz_cpu(int cpu)1365 void wake_up_nohz_cpu(int cpu)
1366 {
1367 if (!wake_up_full_nohz_cpu(cpu))
1368 wake_up_idle_cpu(cpu);
1369 }
1370
nohz_csd_func(void * info)1371 static void nohz_csd_func(void *info)
1372 {
1373 struct rq *rq = info;
1374 int cpu = cpu_of(rq);
1375 unsigned int flags;
1376
1377 /*
1378 * Release the rq::nohz_csd.
1379 */
1380 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
1381 WARN_ON(!(flags & NOHZ_KICK_MASK));
1382
1383 rq->idle_balance = idle_cpu(cpu);
1384 if (rq->idle_balance) {
1385 rq->nohz_idle_balance = flags;
1386 __raise_softirq_irqoff(SCHED_SOFTIRQ);
1387 }
1388 }
1389
1390 #endif /* CONFIG_NO_HZ_COMMON */
1391
1392 #ifdef CONFIG_NO_HZ_FULL
__need_bw_check(struct rq * rq,struct task_struct * p)1393 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
1394 {
1395 if (rq->nr_running != 1)
1396 return false;
1397
1398 if (p->sched_class != &fair_sched_class)
1399 return false;
1400
1401 if (!task_on_rq_queued(p))
1402 return false;
1403
1404 return true;
1405 }
1406
sched_can_stop_tick(struct rq * rq)1407 bool sched_can_stop_tick(struct rq *rq)
1408 {
1409 int fifo_nr_running;
1410
1411 /* Deadline tasks, even if single, need the tick */
1412 if (rq->dl.dl_nr_running)
1413 return false;
1414
1415 /*
1416 * If there are more than one RR tasks, we need the tick to affect the
1417 * actual RR behaviour.
1418 */
1419 if (rq->rt.rr_nr_running) {
1420 if (rq->rt.rr_nr_running == 1)
1421 return true;
1422 else
1423 return false;
1424 }
1425
1426 /*
1427 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1428 * forced preemption between FIFO tasks.
1429 */
1430 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1431 if (fifo_nr_running)
1432 return true;
1433
1434 /*
1435 * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks
1436 * left. For CFS, if there's more than one we need the tick for
1437 * involuntary preemption. For SCX, ask.
1438 */
1439 if (scx_enabled() && !scx_can_stop_tick(rq))
1440 return false;
1441
1442 if (rq->cfs.h_nr_queued > 1)
1443 return false;
1444
1445 /*
1446 * If there is one task and it has CFS runtime bandwidth constraints
1447 * and it's on the cpu now we don't want to stop the tick.
1448 * This check prevents clearing the bit if a newly enqueued task here is
1449 * dequeued by migrating while the constrained task continues to run.
1450 * E.g. going from 2->1 without going through pick_next_task().
1451 */
1452 if (__need_bw_check(rq, rq->curr)) {
1453 if (cfs_task_bw_constrained(rq->curr))
1454 return false;
1455 }
1456
1457 return true;
1458 }
1459 #endif /* CONFIG_NO_HZ_FULL */
1460
1461 #if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_FAIR_GROUP_SCHED)
1462 /*
1463 * Iterate task_group tree rooted at *from, calling @down when first entering a
1464 * node and @up when leaving it for the final time.
1465 *
1466 * Caller must hold rcu_lock or sufficient equivalent.
1467 */
walk_tg_tree_from(struct task_group * from,tg_visitor down,tg_visitor up,void * data)1468 int walk_tg_tree_from(struct task_group *from,
1469 tg_visitor down, tg_visitor up, void *data)
1470 {
1471 struct task_group *parent, *child;
1472 int ret;
1473
1474 parent = from;
1475
1476 down:
1477 ret = (*down)(parent, data);
1478 if (ret)
1479 goto out;
1480 list_for_each_entry_rcu(child, &parent->children, siblings) {
1481 parent = child;
1482 goto down;
1483
1484 up:
1485 continue;
1486 }
1487 ret = (*up)(parent, data);
1488 if (ret || parent == from)
1489 goto out;
1490
1491 child = parent;
1492 parent = parent->parent;
1493 if (parent)
1494 goto up;
1495 out:
1496 return ret;
1497 }
1498
tg_nop(struct task_group * tg,void * data)1499 int tg_nop(struct task_group *tg, void *data)
1500 {
1501 return 0;
1502 }
1503 #endif
1504
set_load_weight(struct task_struct * p,bool update_load)1505 void set_load_weight(struct task_struct *p, bool update_load)
1506 {
1507 int prio = p->static_prio - MAX_RT_PRIO;
1508 struct load_weight lw;
1509
1510 if (task_has_idle_policy(p)) {
1511 lw.weight = scale_load(WEIGHT_IDLEPRIO);
1512 lw.inv_weight = WMULT_IDLEPRIO;
1513 } else {
1514 lw.weight = scale_load(sched_prio_to_weight[prio]);
1515 lw.inv_weight = sched_prio_to_wmult[prio];
1516 }
1517
1518 /*
1519 * SCHED_OTHER tasks have to update their load when changing their
1520 * weight
1521 */
1522 if (update_load && p->sched_class->reweight_task)
1523 p->sched_class->reweight_task(task_rq(p), p, &lw);
1524 else
1525 p->se.load = lw;
1526 }
1527
1528 #ifdef CONFIG_UCLAMP_TASK
1529 /*
1530 * Serializes updates of utilization clamp values
1531 *
1532 * The (slow-path) user-space triggers utilization clamp value updates which
1533 * can require updates on (fast-path) scheduler's data structures used to
1534 * support enqueue/dequeue operations.
1535 * While the per-CPU rq lock protects fast-path update operations, user-space
1536 * requests are serialized using a mutex to reduce the risk of conflicting
1537 * updates or API abuses.
1538 */
1539 static __maybe_unused DEFINE_MUTEX(uclamp_mutex);
1540
1541 /* Max allowed minimum utilization */
1542 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
1543
1544 /* Max allowed maximum utilization */
1545 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
1546
1547 /*
1548 * By default RT tasks run at the maximum performance point/capacity of the
1549 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1550 * SCHED_CAPACITY_SCALE.
1551 *
1552 * This knob allows admins to change the default behavior when uclamp is being
1553 * used. In battery powered devices, particularly, running at the maximum
1554 * capacity and frequency will increase energy consumption and shorten the
1555 * battery life.
1556 *
1557 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1558 *
1559 * This knob will not override the system default sched_util_clamp_min defined
1560 * above.
1561 */
1562 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
1563
1564 /* All clamps are required to be less or equal than these values */
1565 static struct uclamp_se uclamp_default[UCLAMP_CNT];
1566
1567 /*
1568 * This static key is used to reduce the uclamp overhead in the fast path. It
1569 * primarily disables the call to uclamp_rq_{inc, dec}() in
1570 * enqueue/dequeue_task().
1571 *
1572 * This allows users to continue to enable uclamp in their kernel config with
1573 * minimum uclamp overhead in the fast path.
1574 *
1575 * As soon as userspace modifies any of the uclamp knobs, the static key is
1576 * enabled, since we have an actual users that make use of uclamp
1577 * functionality.
1578 *
1579 * The knobs that would enable this static key are:
1580 *
1581 * * A task modifying its uclamp value with sched_setattr().
1582 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1583 * * An admin modifying the cgroup cpu.uclamp.{min, max}
1584 */
1585 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1586
1587 static inline unsigned int
uclamp_idle_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1588 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1589 unsigned int clamp_value)
1590 {
1591 /*
1592 * Avoid blocked utilization pushing up the frequency when we go
1593 * idle (which drops the max-clamp) by retaining the last known
1594 * max-clamp.
1595 */
1596 if (clamp_id == UCLAMP_MAX) {
1597 rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1598 return clamp_value;
1599 }
1600
1601 return uclamp_none(UCLAMP_MIN);
1602 }
1603
uclamp_idle_reset(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1604 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1605 unsigned int clamp_value)
1606 {
1607 /* Reset max-clamp retention only on idle exit */
1608 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1609 return;
1610
1611 uclamp_rq_set(rq, clamp_id, clamp_value);
1612 }
1613
1614 static inline
uclamp_rq_max_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1615 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1616 unsigned int clamp_value)
1617 {
1618 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1619 int bucket_id = UCLAMP_BUCKETS - 1;
1620
1621 /*
1622 * Since both min and max clamps are max aggregated, find the
1623 * top most bucket with tasks in.
1624 */
1625 for ( ; bucket_id >= 0; bucket_id--) {
1626 if (!bucket[bucket_id].tasks)
1627 continue;
1628 return bucket[bucket_id].value;
1629 }
1630
1631 /* No tasks -- default clamp values */
1632 return uclamp_idle_value(rq, clamp_id, clamp_value);
1633 }
1634
__uclamp_update_util_min_rt_default(struct task_struct * p)1635 static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1636 {
1637 unsigned int default_util_min;
1638 struct uclamp_se *uc_se;
1639
1640 lockdep_assert_held(&p->pi_lock);
1641
1642 uc_se = &p->uclamp_req[UCLAMP_MIN];
1643
1644 /* Only sync if user didn't override the default */
1645 if (uc_se->user_defined)
1646 return;
1647
1648 default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1649 uclamp_se_set(uc_se, default_util_min, false);
1650 }
1651
uclamp_update_util_min_rt_default(struct task_struct * p)1652 static void uclamp_update_util_min_rt_default(struct task_struct *p)
1653 {
1654 if (!rt_task(p))
1655 return;
1656
1657 /* Protect updates to p->uclamp_* */
1658 guard(task_rq_lock)(p);
1659 __uclamp_update_util_min_rt_default(p);
1660 }
1661
1662 static inline struct uclamp_se
uclamp_tg_restrict(struct task_struct * p,enum uclamp_id clamp_id)1663 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1664 {
1665 /* Copy by value as we could modify it */
1666 struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1667 #ifdef CONFIG_UCLAMP_TASK_GROUP
1668 unsigned int tg_min, tg_max, value;
1669
1670 /*
1671 * Tasks in autogroups or root task group will be
1672 * restricted by system defaults.
1673 */
1674 if (task_group_is_autogroup(task_group(p)))
1675 return uc_req;
1676 if (task_group(p) == &root_task_group)
1677 return uc_req;
1678
1679 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1680 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1681 value = uc_req.value;
1682 value = clamp(value, tg_min, tg_max);
1683 uclamp_se_set(&uc_req, value, false);
1684 #endif
1685
1686 return uc_req;
1687 }
1688
1689 /*
1690 * The effective clamp bucket index of a task depends on, by increasing
1691 * priority:
1692 * - the task specific clamp value, when explicitly requested from userspace
1693 * - the task group effective clamp value, for tasks not either in the root
1694 * group or in an autogroup
1695 * - the system default clamp value, defined by the sysadmin
1696 */
1697 static inline struct uclamp_se
uclamp_eff_get(struct task_struct * p,enum uclamp_id clamp_id)1698 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1699 {
1700 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1701 struct uclamp_se uc_max = uclamp_default[clamp_id];
1702
1703 /* System default restrictions always apply */
1704 if (unlikely(uc_req.value > uc_max.value))
1705 return uc_max;
1706
1707 return uc_req;
1708 }
1709
uclamp_eff_value(struct task_struct * p,enum uclamp_id clamp_id)1710 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1711 {
1712 struct uclamp_se uc_eff;
1713
1714 /* Task currently refcounted: use back-annotated (effective) value */
1715 if (p->uclamp[clamp_id].active)
1716 return (unsigned long)p->uclamp[clamp_id].value;
1717
1718 uc_eff = uclamp_eff_get(p, clamp_id);
1719
1720 return (unsigned long)uc_eff.value;
1721 }
1722
1723 /*
1724 * When a task is enqueued on a rq, the clamp bucket currently defined by the
1725 * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1726 * updates the rq's clamp value if required.
1727 *
1728 * Tasks can have a task-specific value requested from user-space, track
1729 * within each bucket the maximum value for tasks refcounted in it.
1730 * This "local max aggregation" allows to track the exact "requested" value
1731 * for each bucket when all its RUNNABLE tasks require the same clamp.
1732 */
uclamp_rq_inc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1733 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1734 enum uclamp_id clamp_id)
1735 {
1736 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1737 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1738 struct uclamp_bucket *bucket;
1739
1740 lockdep_assert_rq_held(rq);
1741
1742 /* Update task effective clamp */
1743 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1744
1745 bucket = &uc_rq->bucket[uc_se->bucket_id];
1746 bucket->tasks++;
1747 uc_se->active = true;
1748
1749 uclamp_idle_reset(rq, clamp_id, uc_se->value);
1750
1751 /*
1752 * Local max aggregation: rq buckets always track the max
1753 * "requested" clamp value of its RUNNABLE tasks.
1754 */
1755 if (bucket->tasks == 1 || uc_se->value > bucket->value)
1756 bucket->value = uc_se->value;
1757
1758 if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1759 uclamp_rq_set(rq, clamp_id, uc_se->value);
1760 }
1761
1762 /*
1763 * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1764 * is released. If this is the last task reference counting the rq's max
1765 * active clamp value, then the rq's clamp value is updated.
1766 *
1767 * Both refcounted tasks and rq's cached clamp values are expected to be
1768 * always valid. If it's detected they are not, as defensive programming,
1769 * enforce the expected state and warn.
1770 */
uclamp_rq_dec_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1771 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1772 enum uclamp_id clamp_id)
1773 {
1774 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1775 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1776 struct uclamp_bucket *bucket;
1777 unsigned int bkt_clamp;
1778 unsigned int rq_clamp;
1779
1780 lockdep_assert_rq_held(rq);
1781
1782 /*
1783 * If sched_uclamp_used was enabled after task @p was enqueued,
1784 * we could end up with unbalanced call to uclamp_rq_dec_id().
1785 *
1786 * In this case the uc_se->active flag should be false since no uclamp
1787 * accounting was performed at enqueue time and we can just return
1788 * here.
1789 *
1790 * Need to be careful of the following enqueue/dequeue ordering
1791 * problem too
1792 *
1793 * enqueue(taskA)
1794 * // sched_uclamp_used gets enabled
1795 * enqueue(taskB)
1796 * dequeue(taskA)
1797 * // Must not decrement bucket->tasks here
1798 * dequeue(taskB)
1799 *
1800 * where we could end up with stale data in uc_se and
1801 * bucket[uc_se->bucket_id].
1802 *
1803 * The following check here eliminates the possibility of such race.
1804 */
1805 if (unlikely(!uc_se->active))
1806 return;
1807
1808 bucket = &uc_rq->bucket[uc_se->bucket_id];
1809
1810 WARN_ON_ONCE(!bucket->tasks);
1811 if (likely(bucket->tasks))
1812 bucket->tasks--;
1813
1814 uc_se->active = false;
1815
1816 /*
1817 * Keep "local max aggregation" simple and accept to (possibly)
1818 * overboost some RUNNABLE tasks in the same bucket.
1819 * The rq clamp bucket value is reset to its base value whenever
1820 * there are no more RUNNABLE tasks refcounting it.
1821 */
1822 if (likely(bucket->tasks))
1823 return;
1824
1825 rq_clamp = uclamp_rq_get(rq, clamp_id);
1826 /*
1827 * Defensive programming: this should never happen. If it happens,
1828 * e.g. due to future modification, warn and fix up the expected value.
1829 */
1830 WARN_ON_ONCE(bucket->value > rq_clamp);
1831 if (bucket->value >= rq_clamp) {
1832 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1833 uclamp_rq_set(rq, clamp_id, bkt_clamp);
1834 }
1835 }
1836
uclamp_rq_inc(struct rq * rq,struct task_struct * p,int flags)1837 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags)
1838 {
1839 enum uclamp_id clamp_id;
1840
1841 /*
1842 * Avoid any overhead until uclamp is actually used by the userspace.
1843 *
1844 * The condition is constructed such that a NOP is generated when
1845 * sched_uclamp_used is disabled.
1846 */
1847 if (!uclamp_is_used())
1848 return;
1849
1850 if (unlikely(!p->sched_class->uclamp_enabled))
1851 return;
1852
1853 /* Only inc the delayed task which being woken up. */
1854 if (p->se.sched_delayed && !(flags & ENQUEUE_DELAYED))
1855 return;
1856
1857 for_each_clamp_id(clamp_id)
1858 uclamp_rq_inc_id(rq, p, clamp_id);
1859
1860 /* Reset clamp idle holding when there is one RUNNABLE task */
1861 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1862 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1863 }
1864
uclamp_rq_dec(struct rq * rq,struct task_struct * p)1865 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1866 {
1867 enum uclamp_id clamp_id;
1868
1869 /*
1870 * Avoid any overhead until uclamp is actually used by the userspace.
1871 *
1872 * The condition is constructed such that a NOP is generated when
1873 * sched_uclamp_used is disabled.
1874 */
1875 if (!uclamp_is_used())
1876 return;
1877
1878 if (unlikely(!p->sched_class->uclamp_enabled))
1879 return;
1880
1881 if (p->se.sched_delayed)
1882 return;
1883
1884 for_each_clamp_id(clamp_id)
1885 uclamp_rq_dec_id(rq, p, clamp_id);
1886 }
1887
uclamp_rq_reinc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1888 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1889 enum uclamp_id clamp_id)
1890 {
1891 if (!p->uclamp[clamp_id].active)
1892 return;
1893
1894 uclamp_rq_dec_id(rq, p, clamp_id);
1895 uclamp_rq_inc_id(rq, p, clamp_id);
1896
1897 /*
1898 * Make sure to clear the idle flag if we've transiently reached 0
1899 * active tasks on rq.
1900 */
1901 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1902 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1903 }
1904
1905 static inline void
uclamp_update_active(struct task_struct * p)1906 uclamp_update_active(struct task_struct *p)
1907 {
1908 enum uclamp_id clamp_id;
1909 struct rq_flags rf;
1910 struct rq *rq;
1911
1912 /*
1913 * Lock the task and the rq where the task is (or was) queued.
1914 *
1915 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1916 * price to pay to safely serialize util_{min,max} updates with
1917 * enqueues, dequeues and migration operations.
1918 * This is the same locking schema used by __set_cpus_allowed_ptr().
1919 */
1920 rq = task_rq_lock(p, &rf);
1921
1922 /*
1923 * Setting the clamp bucket is serialized by task_rq_lock().
1924 * If the task is not yet RUNNABLE and its task_struct is not
1925 * affecting a valid clamp bucket, the next time it's enqueued,
1926 * it will already see the updated clamp bucket value.
1927 */
1928 for_each_clamp_id(clamp_id)
1929 uclamp_rq_reinc_id(rq, p, clamp_id);
1930
1931 task_rq_unlock(rq, p, &rf);
1932 }
1933
1934 #ifdef CONFIG_UCLAMP_TASK_GROUP
1935 static inline void
uclamp_update_active_tasks(struct cgroup_subsys_state * css)1936 uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1937 {
1938 struct css_task_iter it;
1939 struct task_struct *p;
1940
1941 css_task_iter_start(css, 0, &it);
1942 while ((p = css_task_iter_next(&it)))
1943 uclamp_update_active(p);
1944 css_task_iter_end(&it);
1945 }
1946
1947 static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1948 #endif
1949
1950 #ifdef CONFIG_SYSCTL
1951 #ifdef CONFIG_UCLAMP_TASK_GROUP
uclamp_update_root_tg(void)1952 static void uclamp_update_root_tg(void)
1953 {
1954 struct task_group *tg = &root_task_group;
1955
1956 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1957 sysctl_sched_uclamp_util_min, false);
1958 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1959 sysctl_sched_uclamp_util_max, false);
1960
1961 guard(rcu)();
1962 cpu_util_update_eff(&root_task_group.css);
1963 }
1964 #else
uclamp_update_root_tg(void)1965 static void uclamp_update_root_tg(void) { }
1966 #endif
1967
uclamp_sync_util_min_rt_default(void)1968 static void uclamp_sync_util_min_rt_default(void)
1969 {
1970 struct task_struct *g, *p;
1971
1972 /*
1973 * copy_process() sysctl_uclamp
1974 * uclamp_min_rt = X;
1975 * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1976 * // link thread smp_mb__after_spinlock()
1977 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1978 * sched_post_fork() for_each_process_thread()
1979 * __uclamp_sync_rt() __uclamp_sync_rt()
1980 *
1981 * Ensures that either sched_post_fork() will observe the new
1982 * uclamp_min_rt or for_each_process_thread() will observe the new
1983 * task.
1984 */
1985 read_lock(&tasklist_lock);
1986 smp_mb__after_spinlock();
1987 read_unlock(&tasklist_lock);
1988
1989 guard(rcu)();
1990 for_each_process_thread(g, p)
1991 uclamp_update_util_min_rt_default(p);
1992 }
1993
sysctl_sched_uclamp_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1994 static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
1995 void *buffer, size_t *lenp, loff_t *ppos)
1996 {
1997 bool update_root_tg = false;
1998 int old_min, old_max, old_min_rt;
1999 int result;
2000
2001 guard(mutex)(&uclamp_mutex);
2002
2003 old_min = sysctl_sched_uclamp_util_min;
2004 old_max = sysctl_sched_uclamp_util_max;
2005 old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
2006
2007 result = proc_dointvec(table, write, buffer, lenp, ppos);
2008 if (result)
2009 goto undo;
2010 if (!write)
2011 return 0;
2012
2013 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
2014 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
2015 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
2016
2017 result = -EINVAL;
2018 goto undo;
2019 }
2020
2021 if (old_min != sysctl_sched_uclamp_util_min) {
2022 uclamp_se_set(&uclamp_default[UCLAMP_MIN],
2023 sysctl_sched_uclamp_util_min, false);
2024 update_root_tg = true;
2025 }
2026 if (old_max != sysctl_sched_uclamp_util_max) {
2027 uclamp_se_set(&uclamp_default[UCLAMP_MAX],
2028 sysctl_sched_uclamp_util_max, false);
2029 update_root_tg = true;
2030 }
2031
2032 if (update_root_tg) {
2033 sched_uclamp_enable();
2034 uclamp_update_root_tg();
2035 }
2036
2037 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
2038 sched_uclamp_enable();
2039 uclamp_sync_util_min_rt_default();
2040 }
2041
2042 /*
2043 * We update all RUNNABLE tasks only when task groups are in use.
2044 * Otherwise, keep it simple and do just a lazy update at each next
2045 * task enqueue time.
2046 */
2047 return 0;
2048
2049 undo:
2050 sysctl_sched_uclamp_util_min = old_min;
2051 sysctl_sched_uclamp_util_max = old_max;
2052 sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
2053 return result;
2054 }
2055 #endif /* CONFIG_SYSCTL */
2056
uclamp_fork(struct task_struct * p)2057 static void uclamp_fork(struct task_struct *p)
2058 {
2059 enum uclamp_id clamp_id;
2060
2061 /*
2062 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
2063 * as the task is still at its early fork stages.
2064 */
2065 for_each_clamp_id(clamp_id)
2066 p->uclamp[clamp_id].active = false;
2067
2068 if (likely(!p->sched_reset_on_fork))
2069 return;
2070
2071 for_each_clamp_id(clamp_id) {
2072 uclamp_se_set(&p->uclamp_req[clamp_id],
2073 uclamp_none(clamp_id), false);
2074 }
2075 }
2076
uclamp_post_fork(struct task_struct * p)2077 static void uclamp_post_fork(struct task_struct *p)
2078 {
2079 uclamp_update_util_min_rt_default(p);
2080 }
2081
init_uclamp_rq(struct rq * rq)2082 static void __init init_uclamp_rq(struct rq *rq)
2083 {
2084 enum uclamp_id clamp_id;
2085 struct uclamp_rq *uc_rq = rq->uclamp;
2086
2087 for_each_clamp_id(clamp_id) {
2088 uc_rq[clamp_id] = (struct uclamp_rq) {
2089 .value = uclamp_none(clamp_id)
2090 };
2091 }
2092
2093 rq->uclamp_flags = UCLAMP_FLAG_IDLE;
2094 }
2095
init_uclamp(void)2096 static void __init init_uclamp(void)
2097 {
2098 struct uclamp_se uc_max = {};
2099 enum uclamp_id clamp_id;
2100 int cpu;
2101
2102 for_each_possible_cpu(cpu)
2103 init_uclamp_rq(cpu_rq(cpu));
2104
2105 for_each_clamp_id(clamp_id) {
2106 uclamp_se_set(&init_task.uclamp_req[clamp_id],
2107 uclamp_none(clamp_id), false);
2108 }
2109
2110 /* System defaults allow max clamp values for both indexes */
2111 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
2112 for_each_clamp_id(clamp_id) {
2113 uclamp_default[clamp_id] = uc_max;
2114 #ifdef CONFIG_UCLAMP_TASK_GROUP
2115 root_task_group.uclamp_req[clamp_id] = uc_max;
2116 root_task_group.uclamp[clamp_id] = uc_max;
2117 #endif
2118 }
2119 }
2120
2121 #else /* !CONFIG_UCLAMP_TASK: */
uclamp_rq_inc(struct rq * rq,struct task_struct * p,int flags)2122 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags) { }
uclamp_rq_dec(struct rq * rq,struct task_struct * p)2123 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
uclamp_fork(struct task_struct * p)2124 static inline void uclamp_fork(struct task_struct *p) { }
uclamp_post_fork(struct task_struct * p)2125 static inline void uclamp_post_fork(struct task_struct *p) { }
init_uclamp(void)2126 static inline void init_uclamp(void) { }
2127 #endif /* !CONFIG_UCLAMP_TASK */
2128
sched_task_on_rq(struct task_struct * p)2129 bool sched_task_on_rq(struct task_struct *p)
2130 {
2131 return task_on_rq_queued(p);
2132 }
2133
get_wchan(struct task_struct * p)2134 unsigned long get_wchan(struct task_struct *p)
2135 {
2136 unsigned long ip = 0;
2137 unsigned int state;
2138
2139 if (!p || p == current)
2140 return 0;
2141
2142 /* Only get wchan if task is blocked and we can keep it that way. */
2143 raw_spin_lock_irq(&p->pi_lock);
2144 state = READ_ONCE(p->__state);
2145 smp_rmb(); /* see try_to_wake_up() */
2146 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2147 ip = __get_wchan(p);
2148 raw_spin_unlock_irq(&p->pi_lock);
2149
2150 return ip;
2151 }
2152
enqueue_task(struct rq * rq,struct task_struct * p,int flags)2153 void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2154 {
2155 if (!(flags & ENQUEUE_NOCLOCK))
2156 update_rq_clock(rq);
2157
2158 /*
2159 * Can be before ->enqueue_task() because uclamp considers the
2160 * ENQUEUE_DELAYED task before its ->sched_delayed gets cleared
2161 * in ->enqueue_task().
2162 */
2163 uclamp_rq_inc(rq, p, flags);
2164
2165 p->sched_class->enqueue_task(rq, p, flags);
2166
2167 psi_enqueue(p, flags);
2168
2169 if (!(flags & ENQUEUE_RESTORE))
2170 sched_info_enqueue(rq, p);
2171
2172 if (sched_core_enabled(rq))
2173 sched_core_enqueue(rq, p);
2174 }
2175
2176 /*
2177 * Must only return false when DEQUEUE_SLEEP.
2178 */
dequeue_task(struct rq * rq,struct task_struct * p,int flags)2179 inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
2180 {
2181 if (sched_core_enabled(rq))
2182 sched_core_dequeue(rq, p, flags);
2183
2184 if (!(flags & DEQUEUE_NOCLOCK))
2185 update_rq_clock(rq);
2186
2187 if (!(flags & DEQUEUE_SAVE))
2188 sched_info_dequeue(rq, p);
2189
2190 psi_dequeue(p, flags);
2191
2192 /*
2193 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail'
2194 * and mark the task ->sched_delayed.
2195 */
2196 uclamp_rq_dec(rq, p);
2197 return p->sched_class->dequeue_task(rq, p, flags);
2198 }
2199
activate_task(struct rq * rq,struct task_struct * p,int flags)2200 void activate_task(struct rq *rq, struct task_struct *p, int flags)
2201 {
2202 if (task_on_rq_migrating(p))
2203 flags |= ENQUEUE_MIGRATED;
2204
2205 enqueue_task(rq, p, flags);
2206
2207 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
2208 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2209 }
2210
deactivate_task(struct rq * rq,struct task_struct * p,int flags)2211 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
2212 {
2213 WARN_ON_ONCE(flags & DEQUEUE_SLEEP);
2214
2215 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
2216 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2217
2218 /*
2219 * Code explicitly relies on TASK_ON_RQ_MIGRATING begin set *before*
2220 * dequeue_task() and cleared *after* enqueue_task().
2221 */
2222
2223 dequeue_task(rq, p, flags);
2224 }
2225
block_task(struct rq * rq,struct task_struct * p,int flags)2226 static void block_task(struct rq *rq, struct task_struct *p, int flags)
2227 {
2228 if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags))
2229 __block_task(rq, p);
2230 }
2231
2232 /**
2233 * task_curr - is this task currently executing on a CPU?
2234 * @p: the task in question.
2235 *
2236 * Return: 1 if the task is currently executing. 0 otherwise.
2237 */
task_curr(const struct task_struct * p)2238 inline int task_curr(const struct task_struct *p)
2239 {
2240 return cpu_curr(task_cpu(p)) == p;
2241 }
2242
wakeup_preempt(struct rq * rq,struct task_struct * p,int flags)2243 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
2244 {
2245 struct task_struct *donor = rq->donor;
2246
2247 if (p->sched_class == rq->next_class) {
2248 rq->next_class->wakeup_preempt(rq, p, flags);
2249
2250 } else if (sched_class_above(p->sched_class, rq->next_class)) {
2251 rq->next_class->wakeup_preempt(rq, p, flags);
2252 resched_curr(rq);
2253 rq->next_class = p->sched_class;
2254 }
2255
2256 /*
2257 * A queue event has occurred, and we're going to schedule. In
2258 * this case, we can save a useless back to back clock update.
2259 */
2260 if (task_on_rq_queued(donor) && test_tsk_need_resched(rq->curr))
2261 rq_clock_skip_update(rq);
2262 }
2263
2264 static __always_inline
__task_state_match(struct task_struct * p,unsigned int state)2265 int __task_state_match(struct task_struct *p, unsigned int state)
2266 {
2267 if (READ_ONCE(p->__state) & state)
2268 return 1;
2269
2270 if (READ_ONCE(p->saved_state) & state)
2271 return -1;
2272
2273 return 0;
2274 }
2275
2276 static __always_inline
task_state_match(struct task_struct * p,unsigned int state)2277 int task_state_match(struct task_struct *p, unsigned int state)
2278 {
2279 /*
2280 * Serialize against current_save_and_set_rtlock_wait_state(),
2281 * current_restore_rtlock_saved_state(), and __refrigerator().
2282 */
2283 guard(raw_spinlock_irq)(&p->pi_lock);
2284 return __task_state_match(p, state);
2285 }
2286
2287 /*
2288 * wait_task_inactive - wait for a thread to unschedule.
2289 *
2290 * Wait for the thread to block in any of the states set in @match_state.
2291 * If it changes, i.e. @p might have woken up, then return zero. When we
2292 * succeed in waiting for @p to be off its CPU, we return a positive number
2293 * (its total switch count). If a second call a short while later returns the
2294 * same number, the caller can be sure that @p has remained unscheduled the
2295 * whole time.
2296 *
2297 * The caller must ensure that the task *will* unschedule sometime soon,
2298 * else this function might spin for a *long* time. This function can't
2299 * be called with interrupts off, or it may introduce deadlock with
2300 * smp_call_function() if an IPI is sent by the same process we are
2301 * waiting to become inactive.
2302 */
wait_task_inactive(struct task_struct * p,unsigned int match_state)2303 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
2304 {
2305 int running, queued, match;
2306 struct rq_flags rf;
2307 unsigned long ncsw;
2308 struct rq *rq;
2309
2310 for (;;) {
2311 /*
2312 * We do the initial early heuristics without holding
2313 * any task-queue locks at all. We'll only try to get
2314 * the runqueue lock when things look like they will
2315 * work out!
2316 */
2317 rq = task_rq(p);
2318
2319 /*
2320 * If the task is actively running on another CPU
2321 * still, just relax and busy-wait without holding
2322 * any locks.
2323 *
2324 * NOTE! Since we don't hold any locks, it's not
2325 * even sure that "rq" stays as the right runqueue!
2326 * But we don't care, since "task_on_cpu()" will
2327 * return false if the runqueue has changed and p
2328 * is actually now running somewhere else!
2329 */
2330 while (task_on_cpu(rq, p)) {
2331 if (!task_state_match(p, match_state))
2332 return 0;
2333 cpu_relax();
2334 }
2335
2336 /*
2337 * Ok, time to look more closely! We need the rq
2338 * lock now, to be *sure*. If we're wrong, we'll
2339 * just go back and repeat.
2340 */
2341 rq = task_rq_lock(p, &rf);
2342 /*
2343 * If task is sched_delayed, force dequeue it, to avoid always
2344 * hitting the tick timeout in the queued case
2345 */
2346 if (p->se.sched_delayed)
2347 dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
2348 trace_sched_wait_task(p);
2349 running = task_on_cpu(rq, p);
2350 queued = task_on_rq_queued(p);
2351 ncsw = 0;
2352 if ((match = __task_state_match(p, match_state))) {
2353 /*
2354 * When matching on p->saved_state, consider this task
2355 * still queued so it will wait.
2356 */
2357 if (match < 0)
2358 queued = 1;
2359 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2360 }
2361 task_rq_unlock(rq, p, &rf);
2362
2363 /*
2364 * If it changed from the expected state, bail out now.
2365 */
2366 if (unlikely(!ncsw))
2367 break;
2368
2369 /*
2370 * Was it really running after all now that we
2371 * checked with the proper locks actually held?
2372 *
2373 * Oops. Go back and try again..
2374 */
2375 if (unlikely(running)) {
2376 cpu_relax();
2377 continue;
2378 }
2379
2380 /*
2381 * It's not enough that it's not actively running,
2382 * it must be off the runqueue _entirely_, and not
2383 * preempted!
2384 *
2385 * So if it was still runnable (but just not actively
2386 * running right now), it's preempted, and we should
2387 * yield - it could be a while.
2388 */
2389 if (unlikely(queued)) {
2390 ktime_t to = NSEC_PER_SEC / HZ;
2391
2392 set_current_state(TASK_UNINTERRUPTIBLE);
2393 schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
2394 continue;
2395 }
2396
2397 /*
2398 * Ahh, all good. It wasn't running, and it wasn't
2399 * runnable, which means that it will never become
2400 * running in the future either. We're all done!
2401 */
2402 break;
2403 }
2404
2405 return ncsw;
2406 }
2407
2408 static void
2409 do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2410
migrate_disable_switch(struct rq * rq,struct task_struct * p)2411 static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2412 {
2413 struct affinity_context ac = {
2414 .new_mask = cpumask_of(rq->cpu),
2415 .flags = SCA_MIGRATE_DISABLE,
2416 };
2417
2418 if (likely(!p->migration_disabled))
2419 return;
2420
2421 if (p->cpus_ptr != &p->cpus_mask)
2422 return;
2423
2424 scoped_guard (task_rq_lock, p)
2425 do_set_cpus_allowed(p, &ac);
2426 }
2427
___migrate_enable(void)2428 void ___migrate_enable(void)
2429 {
2430 struct task_struct *p = current;
2431 struct affinity_context ac = {
2432 .new_mask = &p->cpus_mask,
2433 .flags = SCA_MIGRATE_ENABLE,
2434 };
2435
2436 __set_cpus_allowed_ptr(p, &ac);
2437 }
2438 EXPORT_SYMBOL_GPL(___migrate_enable);
2439
migrate_disable(void)2440 void migrate_disable(void)
2441 {
2442 __migrate_disable();
2443 }
2444 EXPORT_SYMBOL_GPL(migrate_disable);
2445
migrate_enable(void)2446 void migrate_enable(void)
2447 {
2448 __migrate_enable();
2449 }
2450 EXPORT_SYMBOL_GPL(migrate_enable);
2451
rq_has_pinned_tasks(struct rq * rq)2452 static inline bool rq_has_pinned_tasks(struct rq *rq)
2453 {
2454 return rq->nr_pinned;
2455 }
2456
2457 /*
2458 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2459 * __set_cpus_allowed_ptr() and select_fallback_rq().
2460 */
is_cpu_allowed(struct task_struct * p,int cpu)2461 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2462 {
2463 /* When not in the task's cpumask, no point in looking further. */
2464 if (!task_allowed_on_cpu(p, cpu))
2465 return false;
2466
2467 /* migrate_disabled() must be allowed to finish. */
2468 if (is_migration_disabled(p))
2469 return cpu_online(cpu);
2470
2471 /* Non kernel threads are not allowed during either online or offline. */
2472 if (!(p->flags & PF_KTHREAD))
2473 return cpu_active(cpu);
2474
2475 /* KTHREAD_IS_PER_CPU is always allowed. */
2476 if (kthread_is_per_cpu(p))
2477 return cpu_online(cpu);
2478
2479 /* Regular kernel threads don't get to stay during offline. */
2480 if (cpu_dying(cpu))
2481 return false;
2482
2483 /* But are allowed during online. */
2484 return cpu_online(cpu);
2485 }
2486
2487 /*
2488 * This is how migration works:
2489 *
2490 * 1) we invoke migration_cpu_stop() on the target CPU using
2491 * stop_one_cpu().
2492 * 2) stopper starts to run (implicitly forcing the migrated thread
2493 * off the CPU)
2494 * 3) it checks whether the migrated task is still in the wrong runqueue.
2495 * 4) if it's in the wrong runqueue then the migration thread removes
2496 * it and puts it into the right queue.
2497 * 5) stopper completes and stop_one_cpu() returns and the migration
2498 * is done.
2499 */
2500
2501 /*
2502 * move_queued_task - move a queued task to new rq.
2503 *
2504 * Returns (locked) new rq. Old rq's lock is released.
2505 */
move_queued_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int new_cpu)2506 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2507 struct task_struct *p, int new_cpu)
2508 __must_hold(__rq_lockp(rq))
2509 {
2510 lockdep_assert_rq_held(rq);
2511
2512 deactivate_task(rq, p, DEQUEUE_NOCLOCK);
2513 set_task_cpu(p, new_cpu);
2514 rq_unlock(rq, rf);
2515
2516 rq = cpu_rq(new_cpu);
2517
2518 rq_lock(rq, rf);
2519 WARN_ON_ONCE(task_cpu(p) != new_cpu);
2520 activate_task(rq, p, 0);
2521 wakeup_preempt(rq, p, 0);
2522
2523 return rq;
2524 }
2525
2526 struct migration_arg {
2527 struct task_struct *task;
2528 int dest_cpu;
2529 struct set_affinity_pending *pending;
2530 };
2531
2532 /*
2533 * @refs: number of wait_for_completion()
2534 * @stop_pending: is @stop_work in use
2535 */
2536 struct set_affinity_pending {
2537 refcount_t refs;
2538 unsigned int stop_pending;
2539 struct completion done;
2540 struct cpu_stop_work stop_work;
2541 struct migration_arg arg;
2542 };
2543
2544 /*
2545 * Move (not current) task off this CPU, onto the destination CPU. We're doing
2546 * this because either it can't run here any more (set_cpus_allowed()
2547 * away from this CPU, or CPU going down), or because we're
2548 * attempting to rebalance this task on exec (sched_exec).
2549 *
2550 * So we race with normal scheduler movements, but that's OK, as long
2551 * as the task is no longer on this CPU.
2552 */
__migrate_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int dest_cpu)2553 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2554 struct task_struct *p, int dest_cpu)
2555 __must_hold(__rq_lockp(rq))
2556 {
2557 /* Affinity changed (again). */
2558 if (!is_cpu_allowed(p, dest_cpu))
2559 return rq;
2560
2561 rq = move_queued_task(rq, rf, p, dest_cpu);
2562
2563 return rq;
2564 }
2565
2566 /*
2567 * migration_cpu_stop - this will be executed by a high-prio stopper thread
2568 * and performs thread migration by bumping thread off CPU then
2569 * 'pushing' onto another runqueue.
2570 */
migration_cpu_stop(void * data)2571 static int migration_cpu_stop(void *data)
2572 {
2573 struct migration_arg *arg = data;
2574 struct set_affinity_pending *pending = arg->pending;
2575 struct task_struct *p = arg->task;
2576 struct rq *rq = this_rq();
2577 bool complete = false;
2578 struct rq_flags rf;
2579
2580 /*
2581 * The original target CPU might have gone down and we might
2582 * be on another CPU but it doesn't matter.
2583 */
2584 local_irq_save(rf.flags);
2585 /*
2586 * We need to explicitly wake pending tasks before running
2587 * __migrate_task() such that we will not miss enforcing cpus_ptr
2588 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2589 */
2590 flush_smp_call_function_queue();
2591
2592 /*
2593 * We may change the underlying rq, but the locks held will
2594 * appropriately be "transferred" when switching.
2595 */
2596 context_unsafe_alias(rq);
2597
2598 raw_spin_lock(&p->pi_lock);
2599 rq_lock(rq, &rf);
2600
2601 /*
2602 * If we were passed a pending, then ->stop_pending was set, thus
2603 * p->migration_pending must have remained stable.
2604 */
2605 WARN_ON_ONCE(pending && pending != p->migration_pending);
2606
2607 /*
2608 * If task_rq(p) != rq, it cannot be migrated here, because we're
2609 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2610 * we're holding p->pi_lock.
2611 */
2612 if (task_rq(p) == rq) {
2613 if (is_migration_disabled(p))
2614 goto out;
2615
2616 if (pending) {
2617 p->migration_pending = NULL;
2618 complete = true;
2619
2620 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2621 goto out;
2622 }
2623
2624 if (task_on_rq_queued(p)) {
2625 update_rq_clock(rq);
2626 rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2627 } else {
2628 p->wake_cpu = arg->dest_cpu;
2629 }
2630
2631 /*
2632 * XXX __migrate_task() can fail, at which point we might end
2633 * up running on a dodgy CPU, AFAICT this can only happen
2634 * during CPU hotplug, at which point we'll get pushed out
2635 * anyway, so it's probably not a big deal.
2636 */
2637
2638 } else if (pending) {
2639 /*
2640 * This happens when we get migrated between migrate_enable()'s
2641 * preempt_enable() and scheduling the stopper task. At that
2642 * point we're a regular task again and not current anymore.
2643 *
2644 * A !PREEMPT kernel has a giant hole here, which makes it far
2645 * more likely.
2646 */
2647
2648 /*
2649 * The task moved before the stopper got to run. We're holding
2650 * ->pi_lock, so the allowed mask is stable - if it got
2651 * somewhere allowed, we're done.
2652 */
2653 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2654 p->migration_pending = NULL;
2655 complete = true;
2656 goto out;
2657 }
2658
2659 /*
2660 * When migrate_enable() hits a rq mis-match we can't reliably
2661 * determine is_migration_disabled() and so have to chase after
2662 * it.
2663 */
2664 WARN_ON_ONCE(!pending->stop_pending);
2665 preempt_disable();
2666 rq_unlock(rq, &rf);
2667 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2668 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2669 &pending->arg, &pending->stop_work);
2670 preempt_enable();
2671 return 0;
2672 }
2673 out:
2674 if (pending)
2675 pending->stop_pending = false;
2676 rq_unlock(rq, &rf);
2677 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2678
2679 if (complete)
2680 complete_all(&pending->done);
2681
2682 return 0;
2683 }
2684
push_cpu_stop(void * arg)2685 int push_cpu_stop(void *arg)
2686 {
2687 struct rq *lowest_rq = NULL, *rq = this_rq();
2688 struct task_struct *p = arg;
2689
2690 raw_spin_lock_irq(&p->pi_lock);
2691 raw_spin_rq_lock(rq);
2692
2693 if (task_rq(p) != rq)
2694 goto out_unlock;
2695
2696 if (is_migration_disabled(p)) {
2697 p->migration_flags |= MDF_PUSH;
2698 goto out_unlock;
2699 }
2700
2701 p->migration_flags &= ~MDF_PUSH;
2702
2703 if (p->sched_class->find_lock_rq)
2704 lowest_rq = p->sched_class->find_lock_rq(p, rq);
2705
2706 if (!lowest_rq)
2707 goto out_unlock;
2708
2709 lockdep_assert_rq_held(lowest_rq);
2710
2711 // XXX validate p is still the highest prio task
2712 if (task_rq(p) == rq) {
2713 move_queued_task_locked(rq, lowest_rq, p);
2714 resched_curr(lowest_rq);
2715 }
2716
2717 double_unlock_balance(rq, lowest_rq);
2718
2719 out_unlock:
2720 rq->push_busy = false;
2721 raw_spin_rq_unlock(rq);
2722 raw_spin_unlock_irq(&p->pi_lock);
2723
2724 put_task_struct(p);
2725 return 0;
2726 }
2727
2728 static inline void mm_update_cpus_allowed(struct mm_struct *mm, const cpumask_t *affmask);
2729
2730 /*
2731 * sched_class::set_cpus_allowed must do the below, but is not required to
2732 * actually call this function.
2733 */
set_cpus_allowed_common(struct task_struct * p,struct affinity_context * ctx)2734 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
2735 {
2736 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2737 p->cpus_ptr = ctx->new_mask;
2738 return;
2739 }
2740
2741 cpumask_copy(&p->cpus_mask, ctx->new_mask);
2742 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2743 mm_update_cpus_allowed(p->mm, ctx->new_mask);
2744
2745 /*
2746 * Swap in a new user_cpus_ptr if SCA_USER flag set
2747 */
2748 if (ctx->flags & SCA_USER)
2749 swap(p->user_cpus_ptr, ctx->user_mask);
2750 }
2751
2752 static void
do_set_cpus_allowed(struct task_struct * p,struct affinity_context * ctx)2753 do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
2754 {
2755 scoped_guard (sched_change, p, DEQUEUE_SAVE)
2756 p->sched_class->set_cpus_allowed(p, ctx);
2757 }
2758
2759 /*
2760 * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2761 * affinity (if any) should be destroyed too.
2762 */
set_cpus_allowed_force(struct task_struct * p,const struct cpumask * new_mask)2763 void set_cpus_allowed_force(struct task_struct *p, const struct cpumask *new_mask)
2764 {
2765 struct affinity_context ac = {
2766 .new_mask = new_mask,
2767 .user_mask = NULL,
2768 .flags = SCA_USER, /* clear the user requested mask */
2769 };
2770 union cpumask_rcuhead {
2771 cpumask_t cpumask;
2772 struct rcu_head rcu;
2773 };
2774
2775 scoped_guard (__task_rq_lock, p)
2776 do_set_cpus_allowed(p, &ac);
2777
2778 /*
2779 * Because this is called with p->pi_lock held, it is not possible
2780 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2781 * kfree_rcu().
2782 */
2783 kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
2784 }
2785
dup_user_cpus_ptr(struct task_struct * dst,struct task_struct * src,int node)2786 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2787 int node)
2788 {
2789 cpumask_t *user_mask;
2790 unsigned long flags;
2791
2792 /*
2793 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2794 * may differ by now due to racing.
2795 */
2796 dst->user_cpus_ptr = NULL;
2797
2798 /*
2799 * This check is racy and losing the race is a valid situation.
2800 * It is not worth the extra overhead of taking the pi_lock on
2801 * every fork/clone.
2802 */
2803 if (data_race(!src->user_cpus_ptr))
2804 return 0;
2805
2806 user_mask = alloc_user_cpus_ptr(node);
2807 if (!user_mask)
2808 return -ENOMEM;
2809
2810 /*
2811 * Use pi_lock to protect content of user_cpus_ptr
2812 *
2813 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2814 * set_cpus_allowed_force().
2815 */
2816 raw_spin_lock_irqsave(&src->pi_lock, flags);
2817 if (src->user_cpus_ptr) {
2818 swap(dst->user_cpus_ptr, user_mask);
2819 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2820 }
2821 raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2822
2823 if (unlikely(user_mask))
2824 kfree(user_mask);
2825
2826 return 0;
2827 }
2828
clear_user_cpus_ptr(struct task_struct * p)2829 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2830 {
2831 struct cpumask *user_mask = NULL;
2832
2833 swap(p->user_cpus_ptr, user_mask);
2834
2835 return user_mask;
2836 }
2837
release_user_cpus_ptr(struct task_struct * p)2838 void release_user_cpus_ptr(struct task_struct *p)
2839 {
2840 kfree(clear_user_cpus_ptr(p));
2841 }
2842
2843 /*
2844 * This function is wildly self concurrent; here be dragons.
2845 *
2846 *
2847 * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2848 * designated task is enqueued on an allowed CPU. If that task is currently
2849 * running, we have to kick it out using the CPU stopper.
2850 *
2851 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2852 * Consider:
2853 *
2854 * Initial conditions: P0->cpus_mask = [0, 1]
2855 *
2856 * P0@CPU0 P1
2857 *
2858 * migrate_disable();
2859 * <preempted>
2860 * set_cpus_allowed_ptr(P0, [1]);
2861 *
2862 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2863 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2864 * This means we need the following scheme:
2865 *
2866 * P0@CPU0 P1
2867 *
2868 * migrate_disable();
2869 * <preempted>
2870 * set_cpus_allowed_ptr(P0, [1]);
2871 * <blocks>
2872 * <resumes>
2873 * migrate_enable();
2874 * __set_cpus_allowed_ptr();
2875 * <wakes local stopper>
2876 * `--> <woken on migration completion>
2877 *
2878 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2879 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2880 * task p are serialized by p->pi_lock, which we can leverage: the one that
2881 * should come into effect at the end of the Migrate-Disable region is the last
2882 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2883 * but we still need to properly signal those waiting tasks at the appropriate
2884 * moment.
2885 *
2886 * This is implemented using struct set_affinity_pending. The first
2887 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2888 * setup an instance of that struct and install it on the targeted task_struct.
2889 * Any and all further callers will reuse that instance. Those then wait for
2890 * a completion signaled at the tail of the CPU stopper callback (1), triggered
2891 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2892 *
2893 *
2894 * (1) In the cases covered above. There is one more where the completion is
2895 * signaled within affine_move_task() itself: when a subsequent affinity request
2896 * occurs after the stopper bailed out due to the targeted task still being
2897 * Migrate-Disable. Consider:
2898 *
2899 * Initial conditions: P0->cpus_mask = [0, 1]
2900 *
2901 * CPU0 P1 P2
2902 * <P0>
2903 * migrate_disable();
2904 * <preempted>
2905 * set_cpus_allowed_ptr(P0, [1]);
2906 * <blocks>
2907 * <migration/0>
2908 * migration_cpu_stop()
2909 * is_migration_disabled()
2910 * <bails>
2911 * set_cpus_allowed_ptr(P0, [0, 1]);
2912 * <signal completion>
2913 * <awakes>
2914 *
2915 * Note that the above is safe vs a concurrent migrate_enable(), as any
2916 * pending affinity completion is preceded by an uninstallation of
2917 * p->migration_pending done with p->pi_lock held.
2918 */
affine_move_task(struct rq * rq,struct task_struct * p,struct rq_flags * rf,int dest_cpu,unsigned int flags)2919 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2920 int dest_cpu, unsigned int flags)
2921 __releases(__rq_lockp(rq), &p->pi_lock)
2922 {
2923 struct set_affinity_pending my_pending = { }, *pending = NULL;
2924 bool stop_pending, complete = false;
2925
2926 /*
2927 * Can the task run on the task's current CPU? If so, we're done
2928 *
2929 * We are also done if the task is the current donor, boosting a lock-
2930 * holding proxy, (and potentially has been migrated outside its
2931 * current or previous affinity mask)
2932 */
2933 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask) ||
2934 (task_current_donor(rq, p) && !task_current(rq, p))) {
2935 struct task_struct *push_task = NULL;
2936
2937 if ((flags & SCA_MIGRATE_ENABLE) &&
2938 (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2939 rq->push_busy = true;
2940 push_task = get_task_struct(p);
2941 }
2942
2943 /*
2944 * If there are pending waiters, but no pending stop_work,
2945 * then complete now.
2946 */
2947 pending = p->migration_pending;
2948 if (pending && !pending->stop_pending) {
2949 p->migration_pending = NULL;
2950 complete = true;
2951 }
2952
2953 preempt_disable();
2954 task_rq_unlock(rq, p, rf);
2955 if (push_task) {
2956 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2957 p, &rq->push_work);
2958 }
2959 preempt_enable();
2960
2961 if (complete)
2962 complete_all(&pending->done);
2963
2964 return 0;
2965 }
2966
2967 if (!(flags & SCA_MIGRATE_ENABLE)) {
2968 /* serialized by p->pi_lock */
2969 if (!p->migration_pending) {
2970 /* Install the request */
2971 refcount_set(&my_pending.refs, 1);
2972 init_completion(&my_pending.done);
2973 my_pending.arg = (struct migration_arg) {
2974 .task = p,
2975 .dest_cpu = dest_cpu,
2976 .pending = &my_pending,
2977 };
2978
2979 p->migration_pending = &my_pending;
2980 } else {
2981 pending = p->migration_pending;
2982 refcount_inc(&pending->refs);
2983 /*
2984 * Affinity has changed, but we've already installed a
2985 * pending. migration_cpu_stop() *must* see this, else
2986 * we risk a completion of the pending despite having a
2987 * task on a disallowed CPU.
2988 *
2989 * Serialized by p->pi_lock, so this is safe.
2990 */
2991 pending->arg.dest_cpu = dest_cpu;
2992 }
2993 }
2994 pending = p->migration_pending;
2995 /*
2996 * - !MIGRATE_ENABLE:
2997 * we'll have installed a pending if there wasn't one already.
2998 *
2999 * - MIGRATE_ENABLE:
3000 * we're here because the current CPU isn't matching anymore,
3001 * the only way that can happen is because of a concurrent
3002 * set_cpus_allowed_ptr() call, which should then still be
3003 * pending completion.
3004 *
3005 * Either way, we really should have a @pending here.
3006 */
3007 if (WARN_ON_ONCE(!pending)) {
3008 task_rq_unlock(rq, p, rf);
3009 return -EINVAL;
3010 }
3011
3012 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
3013 /*
3014 * MIGRATE_ENABLE gets here because 'p == current', but for
3015 * anything else we cannot do is_migration_disabled(), punt
3016 * and have the stopper function handle it all race-free.
3017 */
3018 stop_pending = pending->stop_pending;
3019 if (!stop_pending)
3020 pending->stop_pending = true;
3021
3022 if (flags & SCA_MIGRATE_ENABLE)
3023 p->migration_flags &= ~MDF_PUSH;
3024
3025 preempt_disable();
3026 task_rq_unlock(rq, p, rf);
3027 if (!stop_pending) {
3028 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
3029 &pending->arg, &pending->stop_work);
3030 }
3031 preempt_enable();
3032
3033 if (flags & SCA_MIGRATE_ENABLE)
3034 return 0;
3035 } else {
3036
3037 if (!is_migration_disabled(p)) {
3038 if (task_on_rq_queued(p))
3039 rq = move_queued_task(rq, rf, p, dest_cpu);
3040
3041 if (!pending->stop_pending) {
3042 p->migration_pending = NULL;
3043 complete = true;
3044 }
3045 }
3046 task_rq_unlock(rq, p, rf);
3047
3048 if (complete)
3049 complete_all(&pending->done);
3050 }
3051
3052 wait_for_completion(&pending->done);
3053
3054 if (refcount_dec_and_test(&pending->refs))
3055 wake_up_var(&pending->refs); /* No UaF, just an address */
3056
3057 /*
3058 * Block the original owner of &pending until all subsequent callers
3059 * have seen the completion and decremented the refcount
3060 */
3061 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
3062
3063 /* ARGH */
3064 WARN_ON_ONCE(my_pending.stop_pending);
3065
3066 return 0;
3067 }
3068
3069 /*
3070 * Called with both p->pi_lock and rq->lock held; drops both before returning.
3071 */
__set_cpus_allowed_ptr_locked(struct task_struct * p,struct affinity_context * ctx,struct rq * rq,struct rq_flags * rf)3072 static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
3073 struct affinity_context *ctx,
3074 struct rq *rq,
3075 struct rq_flags *rf)
3076 __releases(__rq_lockp(rq), &p->pi_lock)
3077 {
3078 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
3079 const struct cpumask *cpu_valid_mask = cpu_active_mask;
3080 bool kthread = p->flags & PF_KTHREAD;
3081 unsigned int dest_cpu;
3082 int ret = 0;
3083
3084 if (kthread || is_migration_disabled(p)) {
3085 /*
3086 * Kernel threads are allowed on online && !active CPUs,
3087 * however, during cpu-hot-unplug, even these might get pushed
3088 * away if not KTHREAD_IS_PER_CPU.
3089 *
3090 * Specifically, migration_disabled() tasks must not fail the
3091 * cpumask_any_and_distribute() pick below, esp. so on
3092 * SCA_MIGRATE_ENABLE, otherwise we'll not call
3093 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
3094 */
3095 cpu_valid_mask = cpu_online_mask;
3096 }
3097
3098 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
3099 ret = -EINVAL;
3100 goto out;
3101 }
3102
3103 /*
3104 * Must re-check here, to close a race against __kthread_bind(),
3105 * sched_setaffinity() is not guaranteed to observe the flag.
3106 */
3107 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
3108 ret = -EINVAL;
3109 goto out;
3110 }
3111
3112 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
3113 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
3114 if (ctx->flags & SCA_USER)
3115 swap(p->user_cpus_ptr, ctx->user_mask);
3116 goto out;
3117 }
3118
3119 if (WARN_ON_ONCE(p == current &&
3120 is_migration_disabled(p) &&
3121 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
3122 ret = -EBUSY;
3123 goto out;
3124 }
3125 }
3126
3127 /*
3128 * Picking a ~random cpu helps in cases where we are changing affinity
3129 * for groups of tasks (ie. cpuset), so that load balancing is not
3130 * immediately required to distribute the tasks within their new mask.
3131 */
3132 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
3133 if (dest_cpu >= nr_cpu_ids) {
3134 ret = -EINVAL;
3135 goto out;
3136 }
3137
3138 do_set_cpus_allowed(p, ctx);
3139
3140 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
3141
3142 out:
3143 task_rq_unlock(rq, p, rf);
3144
3145 return ret;
3146 }
3147
3148 /*
3149 * Change a given task's CPU affinity. Migrate the thread to a
3150 * proper CPU and schedule it away if the CPU it's executing on
3151 * is removed from the allowed bitmask.
3152 *
3153 * NOTE: the caller must have a valid reference to the task, the
3154 * task must not exit() & deallocate itself prematurely. The
3155 * call is not atomic; no spinlocks may be held.
3156 */
__set_cpus_allowed_ptr(struct task_struct * p,struct affinity_context * ctx)3157 int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx)
3158 {
3159 struct rq_flags rf;
3160 struct rq *rq;
3161
3162 rq = task_rq_lock(p, &rf);
3163 /*
3164 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3165 * flags are set.
3166 */
3167 if (p->user_cpus_ptr &&
3168 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3169 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3170 ctx->new_mask = rq->scratch_mask;
3171
3172 return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
3173 }
3174
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)3175 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3176 {
3177 struct affinity_context ac = {
3178 .new_mask = new_mask,
3179 .flags = 0,
3180 };
3181
3182 return __set_cpus_allowed_ptr(p, &ac);
3183 }
3184 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3185
3186 /*
3187 * Change a given task's CPU affinity to the intersection of its current
3188 * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3189 * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3190 * affinity or use cpu_online_mask instead.
3191 *
3192 * If the resulting mask is empty, leave the affinity unchanged and return
3193 * -EINVAL.
3194 */
restrict_cpus_allowed_ptr(struct task_struct * p,struct cpumask * new_mask,const struct cpumask * subset_mask)3195 static int restrict_cpus_allowed_ptr(struct task_struct *p,
3196 struct cpumask *new_mask,
3197 const struct cpumask *subset_mask)
3198 {
3199 struct affinity_context ac = {
3200 .new_mask = new_mask,
3201 .flags = 0,
3202 };
3203 struct rq_flags rf;
3204 struct rq *rq;
3205 int err;
3206
3207 rq = task_rq_lock(p, &rf);
3208
3209 /*
3210 * Forcefully restricting the affinity of a deadline task is
3211 * likely to cause problems, so fail and noisily override the
3212 * mask entirely.
3213 */
3214 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
3215 err = -EPERM;
3216 goto err_unlock;
3217 }
3218
3219 if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
3220 err = -EINVAL;
3221 goto err_unlock;
3222 }
3223
3224 return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
3225
3226 err_unlock:
3227 task_rq_unlock(rq, p, &rf);
3228 return err;
3229 }
3230
3231 /*
3232 * Restrict the CPU affinity of task @p so that it is a subset of
3233 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3234 * old affinity mask. If the resulting mask is empty, we warn and walk
3235 * up the cpuset hierarchy until we find a suitable mask.
3236 */
force_compatible_cpus_allowed_ptr(struct task_struct * p)3237 void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3238 {
3239 cpumask_var_t new_mask;
3240 const struct cpumask *override_mask = task_cpu_possible_mask(p);
3241
3242 alloc_cpumask_var(&new_mask, GFP_KERNEL);
3243
3244 /*
3245 * __migrate_task() can fail silently in the face of concurrent
3246 * offlining of the chosen destination CPU, so take the hotplug
3247 * lock to ensure that the migration succeeds.
3248 */
3249 cpus_read_lock();
3250 if (!cpumask_available(new_mask))
3251 goto out_set_mask;
3252
3253 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3254 goto out_free_mask;
3255
3256 /*
3257 * We failed to find a valid subset of the affinity mask for the
3258 * task, so override it based on its cpuset hierarchy.
3259 */
3260 cpuset_cpus_allowed(p, new_mask);
3261 override_mask = new_mask;
3262
3263 out_set_mask:
3264 if (printk_ratelimit()) {
3265 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3266 task_pid_nr(p), p->comm,
3267 cpumask_pr_args(override_mask));
3268 }
3269
3270 WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3271 out_free_mask:
3272 cpus_read_unlock();
3273 free_cpumask_var(new_mask);
3274 }
3275
3276 /*
3277 * Restore the affinity of a task @p which was previously restricted by a
3278 * call to force_compatible_cpus_allowed_ptr().
3279 *
3280 * It is the caller's responsibility to serialise this with any calls to
3281 * force_compatible_cpus_allowed_ptr(@p).
3282 */
relax_compatible_cpus_allowed_ptr(struct task_struct * p)3283 void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3284 {
3285 struct affinity_context ac = {
3286 .new_mask = task_user_cpus(p),
3287 .flags = 0,
3288 };
3289 int ret;
3290
3291 /*
3292 * Try to restore the old affinity mask with __sched_setaffinity().
3293 * Cpuset masking will be done there too.
3294 */
3295 ret = __sched_setaffinity(p, &ac);
3296 WARN_ON_ONCE(ret);
3297 }
3298
3299 #ifdef CONFIG_SMP
3300
set_task_cpu(struct task_struct * p,unsigned int new_cpu)3301 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
3302 {
3303 unsigned int state = READ_ONCE(p->__state);
3304
3305 /*
3306 * We should never call set_task_cpu() on a blocked task,
3307 * ttwu() will sort out the placement.
3308 */
3309 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3310
3311 /*
3312 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3313 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3314 * time relying on p->on_rq.
3315 */
3316 WARN_ON_ONCE(state == TASK_RUNNING &&
3317 p->sched_class == &fair_sched_class &&
3318 (p->on_rq && !task_on_rq_migrating(p)));
3319
3320 #ifdef CONFIG_LOCKDEP
3321 /*
3322 * The caller should hold either p->pi_lock or rq->lock, when changing
3323 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3324 *
3325 * sched_move_task() holds both and thus holding either pins the cgroup,
3326 * see task_group().
3327 *
3328 * Furthermore, all task_rq users should acquire both locks, see
3329 * task_rq_lock().
3330 */
3331 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3332 lockdep_is_held(__rq_lockp(task_rq(p)))));
3333 #endif
3334 /*
3335 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3336 */
3337 WARN_ON_ONCE(!cpu_online(new_cpu));
3338
3339 WARN_ON_ONCE(is_migration_disabled(p));
3340
3341 trace_sched_migrate_task(p, new_cpu);
3342
3343 if (task_cpu(p) != new_cpu) {
3344 if (p->sched_class->migrate_task_rq)
3345 p->sched_class->migrate_task_rq(p, new_cpu);
3346 p->se.nr_migrations++;
3347 perf_event_task_migrate(p);
3348 }
3349
3350 __set_task_cpu(p, new_cpu);
3351 }
3352 #endif /* CONFIG_SMP */
3353
3354 #ifdef CONFIG_NUMA_BALANCING
__migrate_swap_task(struct task_struct * p,int cpu)3355 static void __migrate_swap_task(struct task_struct *p, int cpu)
3356 {
3357 if (task_on_rq_queued(p)) {
3358 struct rq *src_rq, *dst_rq;
3359 struct rq_flags srf, drf;
3360
3361 src_rq = task_rq(p);
3362 dst_rq = cpu_rq(cpu);
3363
3364 rq_pin_lock(src_rq, &srf);
3365 rq_pin_lock(dst_rq, &drf);
3366
3367 move_queued_task_locked(src_rq, dst_rq, p);
3368 wakeup_preempt(dst_rq, p, 0);
3369
3370 rq_unpin_lock(dst_rq, &drf);
3371 rq_unpin_lock(src_rq, &srf);
3372
3373 } else {
3374 /*
3375 * Task isn't running anymore; make it appear like we migrated
3376 * it before it went to sleep. This means on wakeup we make the
3377 * previous CPU our target instead of where it really is.
3378 */
3379 p->wake_cpu = cpu;
3380 }
3381 }
3382
3383 struct migration_swap_arg {
3384 struct task_struct *src_task, *dst_task;
3385 int src_cpu, dst_cpu;
3386 };
3387
migrate_swap_stop(void * data)3388 static int migrate_swap_stop(void *data)
3389 {
3390 struct migration_swap_arg *arg = data;
3391 struct rq *src_rq, *dst_rq;
3392
3393 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3394 return -EAGAIN;
3395
3396 src_rq = cpu_rq(arg->src_cpu);
3397 dst_rq = cpu_rq(arg->dst_cpu);
3398
3399 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
3400 guard(double_rq_lock)(src_rq, dst_rq);
3401
3402 if (task_cpu(arg->dst_task) != arg->dst_cpu)
3403 return -EAGAIN;
3404
3405 if (task_cpu(arg->src_task) != arg->src_cpu)
3406 return -EAGAIN;
3407
3408 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3409 return -EAGAIN;
3410
3411 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3412 return -EAGAIN;
3413
3414 __migrate_swap_task(arg->src_task, arg->dst_cpu);
3415 __migrate_swap_task(arg->dst_task, arg->src_cpu);
3416
3417 return 0;
3418 }
3419
3420 /*
3421 * Cross migrate two tasks
3422 */
migrate_swap(struct task_struct * cur,struct task_struct * p,int target_cpu,int curr_cpu)3423 int migrate_swap(struct task_struct *cur, struct task_struct *p,
3424 int target_cpu, int curr_cpu)
3425 {
3426 struct migration_swap_arg arg;
3427 int ret = -EINVAL;
3428
3429 arg = (struct migration_swap_arg){
3430 .src_task = cur,
3431 .src_cpu = curr_cpu,
3432 .dst_task = p,
3433 .dst_cpu = target_cpu,
3434 };
3435
3436 if (arg.src_cpu == arg.dst_cpu)
3437 goto out;
3438
3439 /*
3440 * These three tests are all lockless; this is OK since all of them
3441 * will be re-checked with proper locks held further down the line.
3442 */
3443 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3444 goto out;
3445
3446 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3447 goto out;
3448
3449 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3450 goto out;
3451
3452 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
3453 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3454
3455 out:
3456 return ret;
3457 }
3458 #endif /* CONFIG_NUMA_BALANCING */
3459
3460 /***
3461 * kick_process - kick a running thread to enter/exit the kernel
3462 * @p: the to-be-kicked thread
3463 *
3464 * Cause a process which is running on another CPU to enter
3465 * kernel-mode, without any delay. (to get signals handled.)
3466 *
3467 * NOTE: this function doesn't have to take the runqueue lock,
3468 * because all it wants to ensure is that the remote task enters
3469 * the kernel. If the IPI races and the task has been migrated
3470 * to another CPU then no harm is done and the purpose has been
3471 * achieved as well.
3472 */
kick_process(struct task_struct * p)3473 void kick_process(struct task_struct *p)
3474 {
3475 guard(preempt)();
3476 int cpu = task_cpu(p);
3477
3478 if ((cpu != smp_processor_id()) && task_curr(p))
3479 smp_send_reschedule(cpu);
3480 }
3481 EXPORT_SYMBOL_GPL(kick_process);
3482
3483 /*
3484 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3485 *
3486 * A few notes on cpu_active vs cpu_online:
3487 *
3488 * - cpu_active must be a subset of cpu_online
3489 *
3490 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3491 * see __set_cpus_allowed_ptr(). At this point the newly online
3492 * CPU isn't yet part of the sched domains, and balancing will not
3493 * see it.
3494 *
3495 * - on CPU-down we clear cpu_active() to mask the sched domains and
3496 * avoid the load balancer to place new tasks on the to be removed
3497 * CPU. Existing tasks will remain running there and will be taken
3498 * off.
3499 *
3500 * This means that fallback selection must not select !active CPUs.
3501 * And can assume that any active CPU must be online. Conversely
3502 * select_task_rq() below may allow selection of !active CPUs in order
3503 * to satisfy the above rules.
3504 */
select_fallback_rq(int cpu,struct task_struct * p)3505 static int select_fallback_rq(int cpu, struct task_struct *p)
3506 {
3507 int nid = cpu_to_node(cpu);
3508 const struct cpumask *nodemask = NULL;
3509 enum { cpuset, possible, fail } state = cpuset;
3510 int dest_cpu;
3511
3512 /*
3513 * If the node that the CPU is on has been offlined, cpu_to_node()
3514 * will return -1. There is no CPU on the node, and we should
3515 * select the CPU on the other node.
3516 */
3517 if (nid != -1) {
3518 nodemask = cpumask_of_node(nid);
3519
3520 /* Look for allowed, online CPU in same node. */
3521 for_each_cpu(dest_cpu, nodemask) {
3522 if (is_cpu_allowed(p, dest_cpu))
3523 return dest_cpu;
3524 }
3525 }
3526
3527 for (;;) {
3528 /* Any allowed, online CPU? */
3529 for_each_cpu(dest_cpu, p->cpus_ptr) {
3530 if (!is_cpu_allowed(p, dest_cpu))
3531 continue;
3532
3533 goto out;
3534 }
3535
3536 /* No more Mr. Nice Guy. */
3537 switch (state) {
3538 case cpuset:
3539 if (cpuset_cpus_allowed_fallback(p)) {
3540 state = possible;
3541 break;
3542 }
3543 fallthrough;
3544 case possible:
3545 set_cpus_allowed_force(p, task_cpu_fallback_mask(p));
3546 state = fail;
3547 break;
3548 case fail:
3549 BUG();
3550 break;
3551 }
3552 }
3553
3554 out:
3555 if (state != cpuset) {
3556 /*
3557 * Don't tell them about moving exiting tasks or
3558 * kernel threads (both mm NULL), since they never
3559 * leave kernel.
3560 */
3561 if (p->mm && printk_ratelimit()) {
3562 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3563 task_pid_nr(p), p->comm, cpu);
3564 }
3565 }
3566
3567 return dest_cpu;
3568 }
3569
3570 /*
3571 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3572 */
3573 static inline
select_task_rq(struct task_struct * p,int cpu,int * wake_flags)3574 int select_task_rq(struct task_struct *p, int cpu, int *wake_flags)
3575 {
3576 lockdep_assert_held(&p->pi_lock);
3577
3578 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) {
3579 cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags);
3580 *wake_flags |= WF_RQ_SELECTED;
3581 } else {
3582 cpu = cpumask_any(p->cpus_ptr);
3583 }
3584
3585 /*
3586 * In order not to call set_task_cpu() on a blocking task we need
3587 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3588 * CPU.
3589 *
3590 * Since this is common to all placement strategies, this lives here.
3591 *
3592 * [ this allows ->select_task() to simply return task_cpu(p) and
3593 * not worry about this generic constraint ]
3594 */
3595 if (unlikely(!is_cpu_allowed(p, cpu)))
3596 cpu = select_fallback_rq(task_cpu(p), p);
3597
3598 return cpu;
3599 }
3600
sched_set_stop_task(int cpu,struct task_struct * stop)3601 void sched_set_stop_task(int cpu, struct task_struct *stop)
3602 {
3603 static struct lock_class_key stop_pi_lock;
3604 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3605 struct task_struct *old_stop = cpu_rq(cpu)->stop;
3606
3607 if (stop) {
3608 /*
3609 * Make it appear like a SCHED_FIFO task, its something
3610 * userspace knows about and won't get confused about.
3611 *
3612 * Also, it will make PI more or less work without too
3613 * much confusion -- but then, stop work should not
3614 * rely on PI working anyway.
3615 */
3616 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
3617
3618 stop->sched_class = &stop_sched_class;
3619
3620 /*
3621 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3622 * adjust the effective priority of a task. As a result,
3623 * rt_mutex_setprio() can trigger (RT) balancing operations,
3624 * which can then trigger wakeups of the stop thread to push
3625 * around the current task.
3626 *
3627 * The stop task itself will never be part of the PI-chain, it
3628 * never blocks, therefore that ->pi_lock recursion is safe.
3629 * Tell lockdep about this by placing the stop->pi_lock in its
3630 * own class.
3631 */
3632 lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3633 }
3634
3635 cpu_rq(cpu)->stop = stop;
3636
3637 if (old_stop) {
3638 /*
3639 * Reset it back to a normal scheduling class so that
3640 * it can die in pieces.
3641 */
3642 old_stop->sched_class = &rt_sched_class;
3643 }
3644 }
3645
3646 static void
ttwu_stat(struct task_struct * p,int cpu,int wake_flags)3647 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
3648 {
3649 struct rq *rq;
3650
3651 if (!schedstat_enabled())
3652 return;
3653
3654 rq = this_rq();
3655
3656 if (cpu == rq->cpu) {
3657 __schedstat_inc(rq->ttwu_local);
3658 __schedstat_inc(p->stats.nr_wakeups_local);
3659 } else {
3660 struct sched_domain *sd;
3661
3662 __schedstat_inc(p->stats.nr_wakeups_remote);
3663
3664 guard(rcu)();
3665 for_each_domain(rq->cpu, sd) {
3666 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3667 __schedstat_inc(sd->ttwu_wake_remote);
3668 break;
3669 }
3670 }
3671 }
3672
3673 if (wake_flags & WF_MIGRATED)
3674 __schedstat_inc(p->stats.nr_wakeups_migrate);
3675
3676 __schedstat_inc(rq->ttwu_count);
3677 __schedstat_inc(p->stats.nr_wakeups);
3678
3679 if (wake_flags & WF_SYNC)
3680 __schedstat_inc(p->stats.nr_wakeups_sync);
3681 }
3682
3683 /*
3684 * Mark the task runnable.
3685 */
ttwu_do_wakeup(struct task_struct * p)3686 static inline void ttwu_do_wakeup(struct task_struct *p)
3687 {
3688 WRITE_ONCE(p->__state, TASK_RUNNING);
3689 trace_sched_wakeup(p);
3690 }
3691
update_rq_avg_idle(struct rq * rq)3692 void update_rq_avg_idle(struct rq *rq)
3693 {
3694 u64 delta = rq_clock(rq) - rq->idle_stamp;
3695 u64 max = 2*rq->max_idle_balance_cost;
3696
3697 update_avg(&rq->avg_idle, delta);
3698
3699 if (rq->avg_idle > max)
3700 rq->avg_idle = max;
3701 rq->idle_stamp = 0;
3702 }
3703
3704 static void
ttwu_do_activate(struct rq * rq,struct task_struct * p,int wake_flags,struct rq_flags * rf)3705 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3706 struct rq_flags *rf)
3707 {
3708 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3709
3710 lockdep_assert_rq_held(rq);
3711
3712 if (p->sched_contributes_to_load)
3713 rq->nr_uninterruptible--;
3714
3715 if (wake_flags & WF_RQ_SELECTED)
3716 en_flags |= ENQUEUE_RQ_SELECTED;
3717 if (wake_flags & WF_MIGRATED)
3718 en_flags |= ENQUEUE_MIGRATED;
3719 else
3720 if (p->in_iowait) {
3721 delayacct_blkio_end(p);
3722 atomic_dec(&task_rq(p)->nr_iowait);
3723 }
3724
3725 activate_task(rq, p, en_flags);
3726 wakeup_preempt(rq, p, wake_flags);
3727
3728 ttwu_do_wakeup(p);
3729
3730 if (p->sched_class->task_woken) {
3731 /*
3732 * Our task @p is fully woken up and running; so it's safe to
3733 * drop the rq->lock, hereafter rq is only used for statistics.
3734 */
3735 rq_unpin_lock(rq, rf);
3736 p->sched_class->task_woken(rq, p);
3737 rq_repin_lock(rq, rf);
3738 }
3739 }
3740
3741 /*
3742 * Consider @p being inside a wait loop:
3743 *
3744 * for (;;) {
3745 * set_current_state(TASK_UNINTERRUPTIBLE);
3746 *
3747 * if (CONDITION)
3748 * break;
3749 *
3750 * schedule();
3751 * }
3752 * __set_current_state(TASK_RUNNING);
3753 *
3754 * between set_current_state() and schedule(). In this case @p is still
3755 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3756 * an atomic manner.
3757 *
3758 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3759 * then schedule() must still happen and p->state can be changed to
3760 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3761 * need to do a full wakeup with enqueue.
3762 *
3763 * Returns: %true when the wakeup is done,
3764 * %false otherwise.
3765 */
ttwu_runnable(struct task_struct * p,int wake_flags)3766 static int ttwu_runnable(struct task_struct *p, int wake_flags)
3767 {
3768 struct rq_flags rf;
3769 struct rq *rq;
3770 int ret = 0;
3771
3772 rq = __task_rq_lock(p, &rf);
3773 if (task_on_rq_queued(p)) {
3774 update_rq_clock(rq);
3775 if (p->se.sched_delayed)
3776 enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
3777 if (!task_on_cpu(rq, p)) {
3778 /*
3779 * When on_rq && !on_cpu the task is preempted, see if
3780 * it should preempt the task that is current now.
3781 */
3782 wakeup_preempt(rq, p, wake_flags);
3783 }
3784 ttwu_do_wakeup(p);
3785 ret = 1;
3786 }
3787 __task_rq_unlock(rq, p, &rf);
3788
3789 return ret;
3790 }
3791
sched_ttwu_pending(void * arg)3792 void sched_ttwu_pending(void *arg)
3793 {
3794 struct llist_node *llist = arg;
3795 struct rq *rq = this_rq();
3796 struct task_struct *p, *t;
3797 struct rq_flags rf;
3798
3799 if (!llist)
3800 return;
3801
3802 rq_lock_irqsave(rq, &rf);
3803 update_rq_clock(rq);
3804
3805 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3806 if (WARN_ON_ONCE(p->on_cpu))
3807 smp_cond_load_acquire(&p->on_cpu, !VAL);
3808
3809 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3810 set_task_cpu(p, cpu_of(rq));
3811
3812 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3813 }
3814
3815 /*
3816 * Must be after enqueueing at least once task such that
3817 * idle_cpu() does not observe a false-negative -- if it does,
3818 * it is possible for select_idle_siblings() to stack a number
3819 * of tasks on this CPU during that window.
3820 *
3821 * It is OK to clear ttwu_pending when another task pending.
3822 * We will receive IPI after local IRQ enabled and then enqueue it.
3823 * Since now nr_running > 0, idle_cpu() will always get correct result.
3824 */
3825 WRITE_ONCE(rq->ttwu_pending, 0);
3826 rq_unlock_irqrestore(rq, &rf);
3827 }
3828
3829 /*
3830 * Prepare the scene for sending an IPI for a remote smp_call
3831 *
3832 * Returns true if the caller can proceed with sending the IPI.
3833 * Returns false otherwise.
3834 */
call_function_single_prep_ipi(int cpu)3835 bool call_function_single_prep_ipi(int cpu)
3836 {
3837 if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
3838 trace_sched_wake_idle_without_ipi(cpu);
3839 return false;
3840 }
3841
3842 return true;
3843 }
3844
3845 /*
3846 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3847 * necessary. The wakee CPU on receipt of the IPI will queue the task
3848 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3849 * of the wakeup instead of the waker.
3850 */
__ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3851 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3852 {
3853 struct rq *rq = cpu_rq(cpu);
3854
3855 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3856
3857 WRITE_ONCE(rq->ttwu_pending, 1);
3858 #ifdef CONFIG_SMP
3859 __smp_call_single_queue(cpu, &p->wake_entry.llist);
3860 #endif
3861 }
3862
wake_up_if_idle(int cpu)3863 void wake_up_if_idle(int cpu)
3864 {
3865 struct rq *rq = cpu_rq(cpu);
3866
3867 guard(rcu)();
3868 if (is_idle_task(rcu_dereference(rq->curr))) {
3869 guard(rq_lock_irqsave)(rq);
3870 if (is_idle_task(rq->curr))
3871 resched_curr(rq);
3872 }
3873 }
3874
cpus_equal_capacity(int this_cpu,int that_cpu)3875 bool cpus_equal_capacity(int this_cpu, int that_cpu)
3876 {
3877 if (!sched_asym_cpucap_active())
3878 return true;
3879
3880 if (this_cpu == that_cpu)
3881 return true;
3882
3883 return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu);
3884 }
3885
cpus_share_cache(int this_cpu,int that_cpu)3886 bool cpus_share_cache(int this_cpu, int that_cpu)
3887 {
3888 if (this_cpu == that_cpu)
3889 return true;
3890
3891 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3892 }
3893
3894 /*
3895 * Whether CPUs are share cache resources, which means LLC on non-cluster
3896 * machines and LLC tag or L2 on machines with clusters.
3897 */
cpus_share_resources(int this_cpu,int that_cpu)3898 bool cpus_share_resources(int this_cpu, int that_cpu)
3899 {
3900 if (this_cpu == that_cpu)
3901 return true;
3902
3903 return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu);
3904 }
3905
ttwu_queue_cond(struct task_struct * p,int cpu)3906 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
3907 {
3908 int this_cpu = smp_processor_id();
3909
3910 /* See SCX_OPS_ALLOW_QUEUED_WAKEUP. */
3911 if (!scx_allow_ttwu_queue(p))
3912 return false;
3913
3914 #ifdef CONFIG_SMP
3915 if (p->sched_class == &stop_sched_class)
3916 return false;
3917 #endif
3918
3919 /*
3920 * Do not complicate things with the async wake_list while the CPU is
3921 * in hotplug state.
3922 */
3923 if (!cpu_active(cpu))
3924 return false;
3925
3926 /* Ensure the task will still be allowed to run on the CPU. */
3927 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3928 return false;
3929
3930 /*
3931 * If the CPU does not share cache, then queue the task on the
3932 * remote rqs wakelist to avoid accessing remote data.
3933 */
3934 if (!cpus_share_cache(this_cpu, cpu))
3935 return true;
3936
3937 if (cpu == this_cpu)
3938 return false;
3939
3940 /*
3941 * If the wakee cpu is idle, or the task is descheduling and the
3942 * only running task on the CPU, then use the wakelist to offload
3943 * the task activation to the idle (or soon-to-be-idle) CPU as
3944 * the current CPU is likely busy. nr_running is checked to
3945 * avoid unnecessary task stacking.
3946 *
3947 * Note that we can only get here with (wakee) p->on_rq=0,
3948 * p->on_cpu can be whatever, we've done the dequeue, so
3949 * the wakee has been accounted out of ->nr_running.
3950 */
3951 if (!cpu_rq(cpu)->nr_running)
3952 return true;
3953
3954 return false;
3955 }
3956
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3957 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3958 {
3959 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
3960 sched_clock_cpu(cpu); /* Sync clocks across CPUs */
3961 __ttwu_queue_wakelist(p, cpu, wake_flags);
3962 return true;
3963 }
3964
3965 return false;
3966 }
3967
ttwu_queue(struct task_struct * p,int cpu,int wake_flags)3968 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
3969 {
3970 struct rq *rq = cpu_rq(cpu);
3971 struct rq_flags rf;
3972
3973 if (ttwu_queue_wakelist(p, cpu, wake_flags))
3974 return;
3975
3976 rq_lock(rq, &rf);
3977 update_rq_clock(rq);
3978 ttwu_do_activate(rq, p, wake_flags, &rf);
3979 rq_unlock(rq, &rf);
3980 }
3981
3982 /*
3983 * Invoked from try_to_wake_up() to check whether the task can be woken up.
3984 *
3985 * The caller holds p::pi_lock if p != current or has preemption
3986 * disabled when p == current.
3987 *
3988 * The rules of saved_state:
3989 *
3990 * The related locking code always holds p::pi_lock when updating
3991 * p::saved_state, which means the code is fully serialized in both cases.
3992 *
3993 * For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
3994 * No other bits set. This allows to distinguish all wakeup scenarios.
3995 *
3996 * For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
3997 * allows us to prevent early wakeup of tasks before they can be run on
3998 * asymmetric ISA architectures (eg ARMv9).
3999 */
4000 static __always_inline
ttwu_state_match(struct task_struct * p,unsigned int state,int * success)4001 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
4002 {
4003 int match;
4004
4005 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
4006 WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
4007 state != TASK_RTLOCK_WAIT);
4008 }
4009
4010 *success = !!(match = __task_state_match(p, state));
4011
4012 /*
4013 * Saved state preserves the task state across blocking on
4014 * an RT lock or TASK_FREEZABLE tasks. If the state matches,
4015 * set p::saved_state to TASK_RUNNING, but do not wake the task
4016 * because it waits for a lock wakeup or __thaw_task(). Also
4017 * indicate success because from the regular waker's point of
4018 * view this has succeeded.
4019 *
4020 * After acquiring the lock the task will restore p::__state
4021 * from p::saved_state which ensures that the regular
4022 * wakeup is not lost. The restore will also set
4023 * p::saved_state to TASK_RUNNING so any further tests will
4024 * not result in false positives vs. @success
4025 */
4026 if (match < 0)
4027 p->saved_state = TASK_RUNNING;
4028
4029 return match > 0;
4030 }
4031
4032 /*
4033 * Notes on Program-Order guarantees on SMP systems.
4034 *
4035 * MIGRATION
4036 *
4037 * The basic program-order guarantee on SMP systems is that when a task [t]
4038 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4039 * execution on its new CPU [c1].
4040 *
4041 * For migration (of runnable tasks) this is provided by the following means:
4042 *
4043 * A) UNLOCK of the rq(c0)->lock scheduling out task t
4044 * B) migration for t is required to synchronize *both* rq(c0)->lock and
4045 * rq(c1)->lock (if not at the same time, then in that order).
4046 * C) LOCK of the rq(c1)->lock scheduling in task
4047 *
4048 * Release/acquire chaining guarantees that B happens after A and C after B.
4049 * Note: the CPU doing B need not be c0 or c1
4050 *
4051 * Example:
4052 *
4053 * CPU0 CPU1 CPU2
4054 *
4055 * LOCK rq(0)->lock
4056 * sched-out X
4057 * sched-in Y
4058 * UNLOCK rq(0)->lock
4059 *
4060 * LOCK rq(0)->lock // orders against CPU0
4061 * dequeue X
4062 * UNLOCK rq(0)->lock
4063 *
4064 * LOCK rq(1)->lock
4065 * enqueue X
4066 * UNLOCK rq(1)->lock
4067 *
4068 * LOCK rq(1)->lock // orders against CPU2
4069 * sched-out Z
4070 * sched-in X
4071 * UNLOCK rq(1)->lock
4072 *
4073 *
4074 * BLOCKING -- aka. SLEEP + WAKEUP
4075 *
4076 * For blocking we (obviously) need to provide the same guarantee as for
4077 * migration. However the means are completely different as there is no lock
4078 * chain to provide order. Instead we do:
4079 *
4080 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
4081 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4082 *
4083 * Example:
4084 *
4085 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
4086 *
4087 * LOCK rq(0)->lock LOCK X->pi_lock
4088 * dequeue X
4089 * sched-out X
4090 * smp_store_release(X->on_cpu, 0);
4091 *
4092 * smp_cond_load_acquire(&X->on_cpu, !VAL);
4093 * X->state = WAKING
4094 * set_task_cpu(X,2)
4095 *
4096 * LOCK rq(2)->lock
4097 * enqueue X
4098 * X->state = RUNNING
4099 * UNLOCK rq(2)->lock
4100 *
4101 * LOCK rq(2)->lock // orders against CPU1
4102 * sched-out Z
4103 * sched-in X
4104 * UNLOCK rq(2)->lock
4105 *
4106 * UNLOCK X->pi_lock
4107 * UNLOCK rq(0)->lock
4108 *
4109 *
4110 * However, for wakeups there is a second guarantee we must provide, namely we
4111 * must ensure that CONDITION=1 done by the caller can not be reordered with
4112 * accesses to the task state; see try_to_wake_up() and set_current_state().
4113 */
4114
4115 /**
4116 * try_to_wake_up - wake up a thread
4117 * @p: the thread to be awakened
4118 * @state: the mask of task states that can be woken
4119 * @wake_flags: wake modifier flags (WF_*)
4120 *
4121 * Conceptually does:
4122 *
4123 * If (@state & @p->state) @p->state = TASK_RUNNING.
4124 *
4125 * If the task was not queued/runnable, also place it back on a runqueue.
4126 *
4127 * This function is atomic against schedule() which would dequeue the task.
4128 *
4129 * It issues a full memory barrier before accessing @p->state, see the comment
4130 * with set_current_state().
4131 *
4132 * Uses p->pi_lock to serialize against concurrent wake-ups.
4133 *
4134 * Relies on p->pi_lock stabilizing:
4135 * - p->sched_class
4136 * - p->cpus_ptr
4137 * - p->sched_task_group
4138 * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4139 *
4140 * Tries really hard to only take one task_rq(p)->lock for performance.
4141 * Takes rq->lock in:
4142 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4143 * - ttwu_queue() -- new rq, for enqueue of the task;
4144 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4145 *
4146 * As a consequence we race really badly with just about everything. See the
4147 * many memory barriers and their comments for details.
4148 *
4149 * Return: %true if @p->state changes (an actual wakeup was done),
4150 * %false otherwise.
4151 */
try_to_wake_up(struct task_struct * p,unsigned int state,int wake_flags)4152 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
4153 {
4154 guard(preempt)();
4155 int cpu, success = 0;
4156
4157 wake_flags |= WF_TTWU;
4158
4159 if (p == current) {
4160 /*
4161 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4162 * == smp_processor_id()'. Together this means we can special
4163 * case the whole 'p->on_rq && ttwu_runnable()' case below
4164 * without taking any locks.
4165 *
4166 * Specifically, given current runs ttwu() we must be before
4167 * schedule()'s block_task(), as such this must not observe
4168 * sched_delayed.
4169 *
4170 * In particular:
4171 * - we rely on Program-Order guarantees for all the ordering,
4172 * - we're serialized against set_special_state() by virtue of
4173 * it disabling IRQs (this allows not taking ->pi_lock).
4174 */
4175 WARN_ON_ONCE(p->se.sched_delayed);
4176 if (!ttwu_state_match(p, state, &success))
4177 goto out;
4178
4179 trace_sched_waking(p);
4180 ttwu_do_wakeup(p);
4181 goto out;
4182 }
4183
4184 /*
4185 * If we are going to wake up a thread waiting for CONDITION we
4186 * need to ensure that CONDITION=1 done by the caller can not be
4187 * reordered with p->state check below. This pairs with smp_store_mb()
4188 * in set_current_state() that the waiting thread does.
4189 */
4190 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
4191 smp_mb__after_spinlock();
4192 if (!ttwu_state_match(p, state, &success))
4193 break;
4194
4195 trace_sched_waking(p);
4196
4197 /*
4198 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4199 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4200 * in smp_cond_load_acquire() below.
4201 *
4202 * sched_ttwu_pending() try_to_wake_up()
4203 * STORE p->on_rq = 1 LOAD p->state
4204 * UNLOCK rq->lock
4205 *
4206 * __schedule() (switch to task 'p')
4207 * LOCK rq->lock smp_rmb();
4208 * smp_mb__after_spinlock();
4209 * UNLOCK rq->lock
4210 *
4211 * [task p]
4212 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
4213 *
4214 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4215 * __schedule(). See the comment for smp_mb__after_spinlock().
4216 *
4217 * A similar smp_rmb() lives in __task_needs_rq_lock().
4218 */
4219 smp_rmb();
4220 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4221 break;
4222
4223 /*
4224 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4225 * possible to, falsely, observe p->on_cpu == 0.
4226 *
4227 * One must be running (->on_cpu == 1) in order to remove oneself
4228 * from the runqueue.
4229 *
4230 * __schedule() (switch to task 'p') try_to_wake_up()
4231 * STORE p->on_cpu = 1 LOAD p->on_rq
4232 * UNLOCK rq->lock
4233 *
4234 * __schedule() (put 'p' to sleep)
4235 * LOCK rq->lock smp_rmb();
4236 * smp_mb__after_spinlock();
4237 * STORE p->on_rq = 0 LOAD p->on_cpu
4238 *
4239 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4240 * __schedule(). See the comment for smp_mb__after_spinlock().
4241 *
4242 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4243 * schedule()'s block_task() has 'happened' and p will no longer
4244 * care about it's own p->state. See the comment in __schedule().
4245 */
4246 smp_acquire__after_ctrl_dep();
4247
4248 /*
4249 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4250 * == 0), which means we need to do an enqueue, change p->state to
4251 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4252 * enqueue, such as ttwu_queue_wakelist().
4253 */
4254 WRITE_ONCE(p->__state, TASK_WAKING);
4255
4256 /*
4257 * If the owning (remote) CPU is still in the middle of schedule() with
4258 * this task as prev, considering queueing p on the remote CPUs wake_list
4259 * which potentially sends an IPI instead of spinning on p->on_cpu to
4260 * let the waker make forward progress. This is safe because IRQs are
4261 * disabled and the IPI will deliver after on_cpu is cleared.
4262 *
4263 * Ensure we load task_cpu(p) after p->on_cpu:
4264 *
4265 * set_task_cpu(p, cpu);
4266 * STORE p->cpu = @cpu
4267 * __schedule() (switch to task 'p')
4268 * LOCK rq->lock
4269 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
4270 * STORE p->on_cpu = 1 LOAD p->cpu
4271 *
4272 * to ensure we observe the correct CPU on which the task is currently
4273 * scheduling.
4274 */
4275 if (smp_load_acquire(&p->on_cpu) &&
4276 ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4277 break;
4278
4279 /*
4280 * If the owning (remote) CPU is still in the middle of schedule() with
4281 * this task as prev, wait until it's done referencing the task.
4282 *
4283 * Pairs with the smp_store_release() in finish_task().
4284 *
4285 * This ensures that tasks getting woken will be fully ordered against
4286 * their previous state and preserve Program Order.
4287 */
4288 smp_cond_load_acquire(&p->on_cpu, !VAL);
4289
4290 cpu = select_task_rq(p, p->wake_cpu, &wake_flags);
4291 if (task_cpu(p) != cpu) {
4292 if (p->in_iowait) {
4293 delayacct_blkio_end(p);
4294 atomic_dec(&task_rq(p)->nr_iowait);
4295 }
4296
4297 wake_flags |= WF_MIGRATED;
4298 psi_ttwu_dequeue(p);
4299 set_task_cpu(p, cpu);
4300 }
4301
4302 ttwu_queue(p, cpu, wake_flags);
4303 }
4304 out:
4305 if (success)
4306 ttwu_stat(p, task_cpu(p), wake_flags);
4307
4308 return success;
4309 }
4310
__task_needs_rq_lock(struct task_struct * p)4311 static bool __task_needs_rq_lock(struct task_struct *p)
4312 {
4313 unsigned int state = READ_ONCE(p->__state);
4314
4315 /*
4316 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4317 * the task is blocked. Make sure to check @state since ttwu() can drop
4318 * locks at the end, see ttwu_queue_wakelist().
4319 */
4320 if (state == TASK_RUNNING || state == TASK_WAKING)
4321 return true;
4322
4323 /*
4324 * Ensure we load p->on_rq after p->__state, otherwise it would be
4325 * possible to, falsely, observe p->on_rq == 0.
4326 *
4327 * See try_to_wake_up() for a longer comment.
4328 */
4329 smp_rmb();
4330 if (p->on_rq)
4331 return true;
4332
4333 /*
4334 * Ensure the task has finished __schedule() and will not be referenced
4335 * anymore. Again, see try_to_wake_up() for a longer comment.
4336 */
4337 smp_rmb();
4338 smp_cond_load_acquire(&p->on_cpu, !VAL);
4339
4340 return false;
4341 }
4342
4343 /**
4344 * task_call_func - Invoke a function on task in fixed state
4345 * @p: Process for which the function is to be invoked, can be @current.
4346 * @func: Function to invoke.
4347 * @arg: Argument to function.
4348 *
4349 * Fix the task in it's current state by avoiding wakeups and or rq operations
4350 * and call @func(@arg) on it. This function can use task_is_runnable() and
4351 * task_curr() to work out what the state is, if required. Given that @func
4352 * can be invoked with a runqueue lock held, it had better be quite
4353 * lightweight.
4354 *
4355 * Returns:
4356 * Whatever @func returns
4357 */
task_call_func(struct task_struct * p,task_call_f func,void * arg)4358 int task_call_func(struct task_struct *p, task_call_f func, void *arg)
4359 {
4360 struct rq_flags rf;
4361 int ret;
4362
4363 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4364
4365 if (__task_needs_rq_lock(p)) {
4366 struct rq *rq = __task_rq_lock(p, &rf);
4367
4368 /*
4369 * At this point the task is pinned; either:
4370 * - blocked and we're holding off wakeups (pi->lock)
4371 * - woken, and we're holding off enqueue (rq->lock)
4372 * - queued, and we're holding off schedule (rq->lock)
4373 * - running, and we're holding off de-schedule (rq->lock)
4374 *
4375 * The called function (@func) can use: task_curr(), p->on_rq and
4376 * p->__state to differentiate between these states.
4377 */
4378 ret = func(p, arg);
4379
4380 __task_rq_unlock(rq, p, &rf);
4381 } else {
4382 ret = func(p, arg);
4383 }
4384
4385 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4386 return ret;
4387 }
4388
4389 /**
4390 * cpu_curr_snapshot - Return a snapshot of the currently running task
4391 * @cpu: The CPU on which to snapshot the task.
4392 *
4393 * Returns the task_struct pointer of the task "currently" running on
4394 * the specified CPU.
4395 *
4396 * If the specified CPU was offline, the return value is whatever it
4397 * is, perhaps a pointer to the task_struct structure of that CPU's idle
4398 * task, but there is no guarantee. Callers wishing a useful return
4399 * value must take some action to ensure that the specified CPU remains
4400 * online throughout.
4401 *
4402 * This function executes full memory barriers before and after fetching
4403 * the pointer, which permits the caller to confine this function's fetch
4404 * with respect to the caller's accesses to other shared variables.
4405 */
cpu_curr_snapshot(int cpu)4406 struct task_struct *cpu_curr_snapshot(int cpu)
4407 {
4408 struct rq *rq = cpu_rq(cpu);
4409 struct task_struct *t;
4410 struct rq_flags rf;
4411
4412 rq_lock_irqsave(rq, &rf);
4413 smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
4414 t = rcu_dereference(cpu_curr(cpu));
4415 rq_unlock_irqrestore(rq, &rf);
4416 smp_mb(); /* Pairing determined by caller's synchronization design. */
4417
4418 return t;
4419 }
4420
4421 /**
4422 * wake_up_process - Wake up a specific process
4423 * @p: The process to be woken up.
4424 *
4425 * Attempt to wake up the nominated process and move it to the set of runnable
4426 * processes.
4427 *
4428 * Return: 1 if the process was woken up, 0 if it was already running.
4429 *
4430 * This function executes a full memory barrier before accessing the task state.
4431 */
wake_up_process(struct task_struct * p)4432 int wake_up_process(struct task_struct *p)
4433 {
4434 return try_to_wake_up(p, TASK_NORMAL, 0);
4435 }
4436 EXPORT_SYMBOL(wake_up_process);
4437
wake_up_state(struct task_struct * p,unsigned int state)4438 int wake_up_state(struct task_struct *p, unsigned int state)
4439 {
4440 return try_to_wake_up(p, state, 0);
4441 }
4442
4443 /*
4444 * Perform scheduler related setup for a newly forked process p.
4445 * p is forked by current.
4446 *
4447 * __sched_fork() is basic setup which is also used by sched_init() to
4448 * initialize the boot CPU's idle task.
4449 */
__sched_fork(u64 clone_flags,struct task_struct * p)4450 static void __sched_fork(u64 clone_flags, struct task_struct *p)
4451 {
4452 p->on_rq = 0;
4453
4454 p->se.on_rq = 0;
4455 p->se.exec_start = 0;
4456 p->se.sum_exec_runtime = 0;
4457 p->se.prev_sum_exec_runtime = 0;
4458 p->se.nr_migrations = 0;
4459 p->se.vruntime = 0;
4460 p->se.vlag = 0;
4461 p->se.rel_deadline = 0;
4462 INIT_LIST_HEAD(&p->se.group_node);
4463
4464 /* A delayed task cannot be in clone(). */
4465 WARN_ON_ONCE(p->se.sched_delayed);
4466
4467 #ifdef CONFIG_FAIR_GROUP_SCHED
4468 p->se.cfs_rq = NULL;
4469 #ifdef CONFIG_CFS_BANDWIDTH
4470 init_cfs_throttle_work(p);
4471 #endif
4472 #endif
4473
4474 #ifdef CONFIG_SCHEDSTATS
4475 /* Even if schedstat is disabled, there should not be garbage */
4476 memset(&p->stats, 0, sizeof(p->stats));
4477 #endif
4478
4479 init_dl_entity(&p->dl);
4480
4481 INIT_LIST_HEAD(&p->rt.run_list);
4482 p->rt.timeout = 0;
4483 p->rt.time_slice = sched_rr_timeslice;
4484 p->rt.on_rq = 0;
4485 p->rt.on_list = 0;
4486
4487 #ifdef CONFIG_SCHED_CLASS_EXT
4488 init_scx_entity(&p->scx);
4489 #endif
4490
4491 #ifdef CONFIG_PREEMPT_NOTIFIERS
4492 INIT_HLIST_HEAD(&p->preempt_notifiers);
4493 #endif
4494
4495 #ifdef CONFIG_COMPACTION
4496 p->capture_control = NULL;
4497 #endif
4498 init_numa_balancing(clone_flags, p);
4499 p->wake_entry.u_flags = CSD_TYPE_TTWU;
4500 p->migration_pending = NULL;
4501 }
4502
4503 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4504
4505 #ifdef CONFIG_NUMA_BALANCING
4506
4507 int sysctl_numa_balancing_mode;
4508
__set_numabalancing_state(bool enabled)4509 static void __set_numabalancing_state(bool enabled)
4510 {
4511 if (enabled)
4512 static_branch_enable(&sched_numa_balancing);
4513 else
4514 static_branch_disable(&sched_numa_balancing);
4515 }
4516
set_numabalancing_state(bool enabled)4517 void set_numabalancing_state(bool enabled)
4518 {
4519 if (enabled)
4520 sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
4521 else
4522 sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
4523 __set_numabalancing_state(enabled);
4524 }
4525
4526 #ifdef CONFIG_PROC_SYSCTL
reset_memory_tiering(void)4527 static void reset_memory_tiering(void)
4528 {
4529 struct pglist_data *pgdat;
4530
4531 for_each_online_pgdat(pgdat) {
4532 pgdat->nbp_threshold = 0;
4533 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4534 pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4535 }
4536 }
4537
sysctl_numa_balancing(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4538 static int sysctl_numa_balancing(const struct ctl_table *table, int write,
4539 void *buffer, size_t *lenp, loff_t *ppos)
4540 {
4541 struct ctl_table t;
4542 int err;
4543 int state = sysctl_numa_balancing_mode;
4544
4545 if (write && !capable(CAP_SYS_ADMIN))
4546 return -EPERM;
4547
4548 t = *table;
4549 t.data = &state;
4550 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4551 if (err < 0)
4552 return err;
4553 if (write) {
4554 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4555 (state & NUMA_BALANCING_MEMORY_TIERING))
4556 reset_memory_tiering();
4557 sysctl_numa_balancing_mode = state;
4558 __set_numabalancing_state(state);
4559 }
4560 return err;
4561 }
4562 #endif /* CONFIG_PROC_SYSCTL */
4563 #endif /* CONFIG_NUMA_BALANCING */
4564
4565 #ifdef CONFIG_SCHEDSTATS
4566
4567 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4568
set_schedstats(bool enabled)4569 static void set_schedstats(bool enabled)
4570 {
4571 if (enabled)
4572 static_branch_enable(&sched_schedstats);
4573 else
4574 static_branch_disable(&sched_schedstats);
4575 }
4576
force_schedstat_enabled(void)4577 void force_schedstat_enabled(void)
4578 {
4579 if (!schedstat_enabled()) {
4580 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4581 static_branch_enable(&sched_schedstats);
4582 }
4583 }
4584
setup_schedstats(char * str)4585 static int __init setup_schedstats(char *str)
4586 {
4587 int ret = 0;
4588 if (!str)
4589 goto out;
4590
4591 if (!strcmp(str, "enable")) {
4592 set_schedstats(true);
4593 ret = 1;
4594 } else if (!strcmp(str, "disable")) {
4595 set_schedstats(false);
4596 ret = 1;
4597 }
4598 out:
4599 if (!ret)
4600 pr_warn("Unable to parse schedstats=\n");
4601
4602 return ret;
4603 }
4604 __setup("schedstats=", setup_schedstats);
4605
4606 #ifdef CONFIG_PROC_SYSCTL
sysctl_schedstats(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4607 static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer,
4608 size_t *lenp, loff_t *ppos)
4609 {
4610 struct ctl_table t;
4611 int err;
4612 int state = static_branch_likely(&sched_schedstats);
4613
4614 if (write && !capable(CAP_SYS_ADMIN))
4615 return -EPERM;
4616
4617 t = *table;
4618 t.data = &state;
4619 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4620 if (err < 0)
4621 return err;
4622 if (write)
4623 set_schedstats(state);
4624 return err;
4625 }
4626 #endif /* CONFIG_PROC_SYSCTL */
4627 #endif /* CONFIG_SCHEDSTATS */
4628
4629 #ifdef CONFIG_SYSCTL
4630 static const struct ctl_table sched_core_sysctls[] = {
4631 #ifdef CONFIG_SCHEDSTATS
4632 {
4633 .procname = "sched_schedstats",
4634 .data = NULL,
4635 .maxlen = sizeof(unsigned int),
4636 .mode = 0644,
4637 .proc_handler = sysctl_schedstats,
4638 .extra1 = SYSCTL_ZERO,
4639 .extra2 = SYSCTL_ONE,
4640 },
4641 #endif /* CONFIG_SCHEDSTATS */
4642 #ifdef CONFIG_UCLAMP_TASK
4643 {
4644 .procname = "sched_util_clamp_min",
4645 .data = &sysctl_sched_uclamp_util_min,
4646 .maxlen = sizeof(unsigned int),
4647 .mode = 0644,
4648 .proc_handler = sysctl_sched_uclamp_handler,
4649 },
4650 {
4651 .procname = "sched_util_clamp_max",
4652 .data = &sysctl_sched_uclamp_util_max,
4653 .maxlen = sizeof(unsigned int),
4654 .mode = 0644,
4655 .proc_handler = sysctl_sched_uclamp_handler,
4656 },
4657 {
4658 .procname = "sched_util_clamp_min_rt_default",
4659 .data = &sysctl_sched_uclamp_util_min_rt_default,
4660 .maxlen = sizeof(unsigned int),
4661 .mode = 0644,
4662 .proc_handler = sysctl_sched_uclamp_handler,
4663 },
4664 #endif /* CONFIG_UCLAMP_TASK */
4665 #ifdef CONFIG_NUMA_BALANCING
4666 {
4667 .procname = "numa_balancing",
4668 .data = NULL, /* filled in by handler */
4669 .maxlen = sizeof(unsigned int),
4670 .mode = 0644,
4671 .proc_handler = sysctl_numa_balancing,
4672 .extra1 = SYSCTL_ZERO,
4673 .extra2 = SYSCTL_FOUR,
4674 },
4675 #endif /* CONFIG_NUMA_BALANCING */
4676 };
sched_core_sysctl_init(void)4677 static int __init sched_core_sysctl_init(void)
4678 {
4679 register_sysctl_init("kernel", sched_core_sysctls);
4680 return 0;
4681 }
4682 late_initcall(sched_core_sysctl_init);
4683 #endif /* CONFIG_SYSCTL */
4684
4685 /*
4686 * fork()/clone()-time setup:
4687 */
sched_fork(u64 clone_flags,struct task_struct * p)4688 int sched_fork(u64 clone_flags, struct task_struct *p)
4689 {
4690 __sched_fork(clone_flags, p);
4691 /*
4692 * We mark the process as NEW here. This guarantees that
4693 * nobody will actually run it, and a signal or other external
4694 * event cannot wake it up and insert it on the runqueue either.
4695 */
4696 p->__state = TASK_NEW;
4697
4698 /*
4699 * Make sure we do not leak PI boosting priority to the child.
4700 */
4701 p->prio = current->normal_prio;
4702
4703 uclamp_fork(p);
4704
4705 /*
4706 * Revert to default priority/policy on fork if requested.
4707 */
4708 if (unlikely(p->sched_reset_on_fork)) {
4709 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
4710 p->policy = SCHED_NORMAL;
4711 p->static_prio = NICE_TO_PRIO(0);
4712 p->rt_priority = 0;
4713 } else if (PRIO_TO_NICE(p->static_prio) < 0)
4714 p->static_prio = NICE_TO_PRIO(0);
4715
4716 p->prio = p->normal_prio = p->static_prio;
4717 set_load_weight(p, false);
4718 p->se.custom_slice = 0;
4719 p->se.slice = sysctl_sched_base_slice;
4720
4721 /*
4722 * We don't need the reset flag anymore after the fork. It has
4723 * fulfilled its duty:
4724 */
4725 p->sched_reset_on_fork = 0;
4726 }
4727
4728 if (dl_prio(p->prio))
4729 return -EAGAIN;
4730
4731 scx_pre_fork(p);
4732
4733 if (rt_prio(p->prio)) {
4734 p->sched_class = &rt_sched_class;
4735 #ifdef CONFIG_SCHED_CLASS_EXT
4736 } else if (task_should_scx(p->policy)) {
4737 p->sched_class = &ext_sched_class;
4738 #endif
4739 } else {
4740 p->sched_class = &fair_sched_class;
4741 }
4742
4743 init_entity_runnable_average(&p->se);
4744
4745
4746 #ifdef CONFIG_SCHED_INFO
4747 if (likely(sched_info_on()))
4748 memset(&p->sched_info, 0, sizeof(p->sched_info));
4749 #endif
4750 p->on_cpu = 0;
4751 init_task_preempt_count(p);
4752 plist_node_init(&p->pushable_tasks, MAX_PRIO);
4753 RB_CLEAR_NODE(&p->pushable_dl_tasks);
4754
4755 return 0;
4756 }
4757
sched_cgroup_fork(struct task_struct * p,struct kernel_clone_args * kargs)4758 int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4759 {
4760 unsigned long flags;
4761
4762 /*
4763 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4764 * required yet, but lockdep gets upset if rules are violated.
4765 */
4766 raw_spin_lock_irqsave(&p->pi_lock, flags);
4767 #ifdef CONFIG_CGROUP_SCHED
4768 if (1) {
4769 struct task_group *tg;
4770 tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4771 struct task_group, css);
4772 tg = autogroup_task_group(p, tg);
4773 p->sched_task_group = tg;
4774 }
4775 #endif
4776 /*
4777 * We're setting the CPU for the first time, we don't migrate,
4778 * so use __set_task_cpu().
4779 */
4780 __set_task_cpu(p, smp_processor_id());
4781 if (p->sched_class->task_fork)
4782 p->sched_class->task_fork(p);
4783 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4784
4785 return scx_fork(p, kargs);
4786 }
4787
sched_cancel_fork(struct task_struct * p)4788 void sched_cancel_fork(struct task_struct *p)
4789 {
4790 scx_cancel_fork(p);
4791 }
4792
4793 static void sched_mm_cid_fork(struct task_struct *t);
4794
sched_post_fork(struct task_struct * p)4795 void sched_post_fork(struct task_struct *p)
4796 {
4797 sched_mm_cid_fork(p);
4798 uclamp_post_fork(p);
4799 scx_post_fork(p);
4800 }
4801
to_ratio(u64 period,u64 runtime)4802 u64 to_ratio(u64 period, u64 runtime)
4803 {
4804 if (runtime == RUNTIME_INF)
4805 return BW_UNIT;
4806
4807 /*
4808 * Doing this here saves a lot of checks in all
4809 * the calling paths, and returning zero seems
4810 * safe for them anyway.
4811 */
4812 if (period == 0)
4813 return 0;
4814
4815 return div64_u64(runtime << BW_SHIFT, period);
4816 }
4817
4818 /*
4819 * wake_up_new_task - wake up a newly created task for the first time.
4820 *
4821 * This function will do some initial scheduler statistics housekeeping
4822 * that must be done for every newly created context, then puts the task
4823 * on the runqueue and wakes it.
4824 */
wake_up_new_task(struct task_struct * p)4825 void wake_up_new_task(struct task_struct *p)
4826 {
4827 struct rq_flags rf;
4828 struct rq *rq;
4829 int wake_flags = WF_FORK;
4830
4831 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4832 WRITE_ONCE(p->__state, TASK_RUNNING);
4833 /*
4834 * Fork balancing, do it here and not earlier because:
4835 * - cpus_ptr can change in the fork path
4836 * - any previously selected CPU might disappear through hotplug
4837 *
4838 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4839 * as we're not fully set-up yet.
4840 */
4841 p->recent_used_cpu = task_cpu(p);
4842 __set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags));
4843 rq = __task_rq_lock(p, &rf);
4844 update_rq_clock(rq);
4845 post_init_entity_util_avg(p);
4846
4847 activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
4848 trace_sched_wakeup_new(p);
4849 wakeup_preempt(rq, p, wake_flags);
4850 if (p->sched_class->task_woken) {
4851 /*
4852 * Nothing relies on rq->lock after this, so it's fine to
4853 * drop it.
4854 */
4855 rq_unpin_lock(rq, &rf);
4856 p->sched_class->task_woken(rq, p);
4857 rq_repin_lock(rq, &rf);
4858 }
4859 task_rq_unlock(rq, p, &rf);
4860 }
4861
4862 #ifdef CONFIG_PREEMPT_NOTIFIERS
4863
4864 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
4865
preempt_notifier_inc(void)4866 void preempt_notifier_inc(void)
4867 {
4868 static_branch_inc(&preempt_notifier_key);
4869 }
4870 EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4871
preempt_notifier_dec(void)4872 void preempt_notifier_dec(void)
4873 {
4874 static_branch_dec(&preempt_notifier_key);
4875 }
4876 EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4877
4878 /**
4879 * preempt_notifier_register - tell me when current is being preempted & rescheduled
4880 * @notifier: notifier struct to register
4881 */
preempt_notifier_register(struct preempt_notifier * notifier)4882 void preempt_notifier_register(struct preempt_notifier *notifier)
4883 {
4884 if (!static_branch_unlikely(&preempt_notifier_key))
4885 WARN(1, "registering preempt_notifier while notifiers disabled\n");
4886
4887 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
4888 }
4889 EXPORT_SYMBOL_GPL(preempt_notifier_register);
4890
4891 /**
4892 * preempt_notifier_unregister - no longer interested in preemption notifications
4893 * @notifier: notifier struct to unregister
4894 *
4895 * This is *not* safe to call from within a preemption notifier.
4896 */
preempt_notifier_unregister(struct preempt_notifier * notifier)4897 void preempt_notifier_unregister(struct preempt_notifier *notifier)
4898 {
4899 hlist_del(¬ifier->link);
4900 }
4901 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4902
__fire_sched_in_preempt_notifiers(struct task_struct * curr)4903 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
4904 {
4905 struct preempt_notifier *notifier;
4906
4907 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4908 notifier->ops->sched_in(notifier, raw_smp_processor_id());
4909 }
4910
fire_sched_in_preempt_notifiers(struct task_struct * curr)4911 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4912 {
4913 if (static_branch_unlikely(&preempt_notifier_key))
4914 __fire_sched_in_preempt_notifiers(curr);
4915 }
4916
4917 static void
__fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4918 __fire_sched_out_preempt_notifiers(struct task_struct *curr,
4919 struct task_struct *next)
4920 {
4921 struct preempt_notifier *notifier;
4922
4923 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4924 notifier->ops->sched_out(notifier, next);
4925 }
4926
4927 static __always_inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4928 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4929 struct task_struct *next)
4930 {
4931 if (static_branch_unlikely(&preempt_notifier_key))
4932 __fire_sched_out_preempt_notifiers(curr, next);
4933 }
4934
4935 #else /* !CONFIG_PREEMPT_NOTIFIERS: */
4936
fire_sched_in_preempt_notifiers(struct task_struct * curr)4937 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4938 {
4939 }
4940
4941 static inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4942 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4943 struct task_struct *next)
4944 {
4945 }
4946
4947 #endif /* !CONFIG_PREEMPT_NOTIFIERS */
4948
prepare_task(struct task_struct * next)4949 static inline void prepare_task(struct task_struct *next)
4950 {
4951 /*
4952 * Claim the task as running, we do this before switching to it
4953 * such that any running task will have this set.
4954 *
4955 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4956 * its ordering comment.
4957 */
4958 WRITE_ONCE(next->on_cpu, 1);
4959 }
4960
finish_task(struct task_struct * prev)4961 static inline void finish_task(struct task_struct *prev)
4962 {
4963 /*
4964 * This must be the very last reference to @prev from this CPU. After
4965 * p->on_cpu is cleared, the task can be moved to a different CPU. We
4966 * must ensure this doesn't happen until the switch is completely
4967 * finished.
4968 *
4969 * In particular, the load of prev->state in finish_task_switch() must
4970 * happen before this.
4971 *
4972 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
4973 */
4974 smp_store_release(&prev->on_cpu, 0);
4975 }
4976
4977 /*
4978 * Only called from __schedule context
4979 *
4980 * There are some cases where we are going to re-do the action
4981 * that added the balance callbacks. We may not be in a state
4982 * where we can run them, so just zap them so they can be
4983 * properly re-added on the next time around. This is similar
4984 * handling to running the callbacks, except we just don't call
4985 * them.
4986 */
zap_balance_callbacks(struct rq * rq)4987 static void zap_balance_callbacks(struct rq *rq)
4988 {
4989 struct balance_callback *next, *head;
4990 bool found = false;
4991
4992 lockdep_assert_rq_held(rq);
4993
4994 head = rq->balance_callback;
4995 while (head) {
4996 if (head == &balance_push_callback)
4997 found = true;
4998 next = head->next;
4999 head->next = NULL;
5000 head = next;
5001 }
5002 rq->balance_callback = found ? &balance_push_callback : NULL;
5003 }
5004
do_balance_callbacks(struct rq * rq,struct balance_callback * head)5005 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
5006 {
5007 void (*func)(struct rq *rq);
5008 struct balance_callback *next;
5009
5010 lockdep_assert_rq_held(rq);
5011
5012 while (head) {
5013 func = (void (*)(struct rq *))head->func;
5014 next = head->next;
5015 head->next = NULL;
5016 head = next;
5017
5018 func(rq);
5019 }
5020 }
5021
5022 static void balance_push(struct rq *rq);
5023
5024 /*
5025 * balance_push_callback is a right abuse of the callback interface and plays
5026 * by significantly different rules.
5027 *
5028 * Where the normal balance_callback's purpose is to be ran in the same context
5029 * that queued it (only later, when it's safe to drop rq->lock again),
5030 * balance_push_callback is specifically targeted at __schedule().
5031 *
5032 * This abuse is tolerated because it places all the unlikely/odd cases behind
5033 * a single test, namely: rq->balance_callback == NULL.
5034 */
5035 struct balance_callback balance_push_callback = {
5036 .next = NULL,
5037 .func = balance_push,
5038 };
5039
5040 static inline struct balance_callback *
__splice_balance_callbacks(struct rq * rq,bool split)5041 __splice_balance_callbacks(struct rq *rq, bool split)
5042 {
5043 struct balance_callback *head = rq->balance_callback;
5044
5045 if (likely(!head))
5046 return NULL;
5047
5048 lockdep_assert_rq_held(rq);
5049 /*
5050 * Must not take balance_push_callback off the list when
5051 * splice_balance_callbacks() and balance_callbacks() are not
5052 * in the same rq->lock section.
5053 *
5054 * In that case it would be possible for __schedule() to interleave
5055 * and observe the list empty.
5056 */
5057 if (split && head == &balance_push_callback)
5058 head = NULL;
5059 else
5060 rq->balance_callback = NULL;
5061
5062 return head;
5063 }
5064
splice_balance_callbacks(struct rq * rq)5065 struct balance_callback *splice_balance_callbacks(struct rq *rq)
5066 {
5067 return __splice_balance_callbacks(rq, true);
5068 }
5069
__balance_callbacks(struct rq * rq,struct rq_flags * rf)5070 void __balance_callbacks(struct rq *rq, struct rq_flags *rf)
5071 {
5072 if (rf)
5073 rq_unpin_lock(rq, rf);
5074 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
5075 if (rf)
5076 rq_repin_lock(rq, rf);
5077 }
5078
balance_callbacks(struct rq * rq,struct balance_callback * head)5079 void balance_callbacks(struct rq *rq, struct balance_callback *head)
5080 {
5081 unsigned long flags;
5082
5083 if (unlikely(head)) {
5084 raw_spin_rq_lock_irqsave(rq, flags);
5085 do_balance_callbacks(rq, head);
5086 raw_spin_rq_unlock_irqrestore(rq, flags);
5087 }
5088 }
5089
5090 static inline void
prepare_lock_switch(struct rq * rq,struct task_struct * next,struct rq_flags * rf)5091 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
5092 __releases(__rq_lockp(rq))
5093 __acquires(__rq_lockp(this_rq()))
5094 {
5095 /*
5096 * Since the runqueue lock will be released by the next
5097 * task (which is an invalid locking op but in the case
5098 * of the scheduler it's an obvious special-case), so we
5099 * do an early lockdep release here:
5100 */
5101 rq_unpin_lock(rq, rf);
5102 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
5103 #ifdef CONFIG_DEBUG_SPINLOCK
5104 /* this is a valid case when another task releases the spinlock */
5105 rq_lockp(rq)->owner = next;
5106 #endif
5107 /*
5108 * Model the rq reference switcheroo.
5109 */
5110 __release(__rq_lockp(rq));
5111 __acquire(__rq_lockp(this_rq()));
5112 }
5113
finish_lock_switch(struct rq * rq)5114 static inline void finish_lock_switch(struct rq *rq)
5115 __releases(__rq_lockp(rq))
5116 {
5117 /*
5118 * If we are tracking spinlock dependencies then we have to
5119 * fix up the runqueue lock - which gets 'carried over' from
5120 * prev into current:
5121 */
5122 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
5123 __balance_callbacks(rq, NULL);
5124 hrtick_schedule_exit(rq);
5125 raw_spin_rq_unlock_irq(rq);
5126 }
5127
5128 /*
5129 * NOP if the arch has not defined these:
5130 */
5131
5132 #ifndef prepare_arch_switch
5133 # define prepare_arch_switch(next) do { } while (0)
5134 #endif
5135
5136 #ifndef finish_arch_post_lock_switch
5137 # define finish_arch_post_lock_switch() do { } while (0)
5138 #endif
5139
kmap_local_sched_out(void)5140 static inline void kmap_local_sched_out(void)
5141 {
5142 #ifdef CONFIG_KMAP_LOCAL
5143 if (unlikely(current->kmap_ctrl.idx))
5144 __kmap_local_sched_out();
5145 #endif
5146 }
5147
kmap_local_sched_in(void)5148 static inline void kmap_local_sched_in(void)
5149 {
5150 #ifdef CONFIG_KMAP_LOCAL
5151 if (unlikely(current->kmap_ctrl.idx))
5152 __kmap_local_sched_in();
5153 #endif
5154 }
5155
5156 /**
5157 * prepare_task_switch - prepare to switch tasks
5158 * @rq: the runqueue preparing to switch
5159 * @prev: the current task that is being switched out
5160 * @next: the task we are going to switch to.
5161 *
5162 * This is called with the rq lock held and interrupts off. It must
5163 * be paired with a subsequent finish_task_switch after the context
5164 * switch.
5165 *
5166 * prepare_task_switch sets up locking and calls architecture specific
5167 * hooks.
5168 */
5169 static inline void
prepare_task_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)5170 prepare_task_switch(struct rq *rq, struct task_struct *prev,
5171 struct task_struct *next)
5172 __must_hold(__rq_lockp(rq))
5173 {
5174 kcov_prepare_switch(prev);
5175 sched_info_switch(rq, prev, next);
5176 perf_event_task_sched_out(prev, next);
5177 fire_sched_out_preempt_notifiers(prev, next);
5178 kmap_local_sched_out();
5179 prepare_task(next);
5180 prepare_arch_switch(next);
5181 }
5182
5183 /**
5184 * finish_task_switch - clean up after a task-switch
5185 * @prev: the thread we just switched away from.
5186 *
5187 * finish_task_switch must be called after the context switch, paired
5188 * with a prepare_task_switch call before the context switch.
5189 * finish_task_switch will reconcile locking set up by prepare_task_switch,
5190 * and do any other architecture-specific cleanup actions.
5191 *
5192 * Note that we may have delayed dropping an mm in context_switch(). If
5193 * so, we finish that here outside of the runqueue lock. (Doing it
5194 * with the lock held can cause deadlocks; see schedule() for
5195 * details.)
5196 *
5197 * The context switch have flipped the stack from under us and restored the
5198 * local variables which were saved when this task called schedule() in the
5199 * past. 'prev == current' is still correct but we need to recalculate this_rq
5200 * because prev may have moved to another CPU.
5201 */
finish_task_switch(struct task_struct * prev)5202 static struct rq *finish_task_switch(struct task_struct *prev)
5203 __releases(__rq_lockp(this_rq()))
5204 {
5205 struct rq *rq = this_rq();
5206 struct mm_struct *mm = rq->prev_mm;
5207 unsigned int prev_state;
5208
5209 /*
5210 * The previous task will have left us with a preempt_count of 2
5211 * because it left us after:
5212 *
5213 * schedule()
5214 * preempt_disable(); // 1
5215 * __schedule()
5216 * raw_spin_lock_irq(&rq->lock) // 2
5217 *
5218 * Also, see FORK_PREEMPT_COUNT.
5219 */
5220 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5221 "corrupted preempt_count: %s/%d/0x%x\n",
5222 current->comm, current->pid, preempt_count()))
5223 preempt_count_set(FORK_PREEMPT_COUNT);
5224
5225 rq->prev_mm = NULL;
5226
5227 /*
5228 * A task struct has one reference for the use as "current".
5229 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5230 * schedule one last time. The schedule call will never return, and
5231 * the scheduled task must drop that reference.
5232 *
5233 * We must observe prev->state before clearing prev->on_cpu (in
5234 * finish_task), otherwise a concurrent wakeup can get prev
5235 * running on another CPU and we could rave with its RUNNING -> DEAD
5236 * transition, resulting in a double drop.
5237 */
5238 prev_state = READ_ONCE(prev->__state);
5239 vtime_task_switch(prev);
5240 perf_event_task_sched_in(prev, current);
5241 finish_task(prev);
5242 tick_nohz_task_switch();
5243 finish_lock_switch(rq);
5244 finish_arch_post_lock_switch();
5245 kcov_finish_switch(current);
5246 /*
5247 * kmap_local_sched_out() is invoked with rq::lock held and
5248 * interrupts disabled. There is no requirement for that, but the
5249 * sched out code does not have an interrupt enabled section.
5250 * Restoring the maps on sched in does not require interrupts being
5251 * disabled either.
5252 */
5253 kmap_local_sched_in();
5254
5255 fire_sched_in_preempt_notifiers(current);
5256 /*
5257 * When switching through a kernel thread, the loop in
5258 * membarrier_{private,global}_expedited() may have observed that
5259 * kernel thread and not issued an IPI. It is therefore possible to
5260 * schedule between user->kernel->user threads without passing though
5261 * switch_mm(). Membarrier requires a barrier after storing to
5262 * rq->curr, before returning to userspace, so provide them here:
5263 *
5264 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5265 * provided by mmdrop_lazy_tlb(),
5266 * - a sync_core for SYNC_CORE.
5267 */
5268 if (mm) {
5269 membarrier_mm_sync_core_before_usermode(mm);
5270 mmdrop_lazy_tlb_sched(mm);
5271 }
5272
5273 if (unlikely(prev_state == TASK_DEAD)) {
5274 if (prev->sched_class->task_dead)
5275 prev->sched_class->task_dead(prev);
5276
5277 /*
5278 * sched_ext_dead() must come before cgroup_task_dead() to
5279 * prevent cgroups from being removed while its member tasks are
5280 * visible to SCX schedulers.
5281 */
5282 sched_ext_dead(prev);
5283 cgroup_task_dead(prev);
5284
5285 /* Task is done with its stack. */
5286 put_task_stack(prev);
5287
5288 put_task_struct_rcu_user(prev);
5289 }
5290
5291 return rq;
5292 }
5293
5294 /**
5295 * schedule_tail - first thing a freshly forked thread must call.
5296 * @prev: the thread we just switched away from.
5297 */
schedule_tail(struct task_struct * prev)5298 asmlinkage __visible void schedule_tail(struct task_struct *prev)
5299 __releases(__rq_lockp(this_rq()))
5300 {
5301 /*
5302 * New tasks start with FORK_PREEMPT_COUNT, see there and
5303 * finish_task_switch() for details.
5304 *
5305 * finish_task_switch() will drop rq->lock() and lower preempt_count
5306 * and the preempt_enable() will end up enabling preemption (on
5307 * PREEMPT_COUNT kernels).
5308 */
5309
5310 finish_task_switch(prev);
5311 /*
5312 * This is a special case: the newly created task has just
5313 * switched the context for the first time. It is returning from
5314 * schedule for the first time in this path.
5315 */
5316 trace_sched_exit_tp(true);
5317 preempt_enable();
5318
5319 if (current->set_child_tid)
5320 put_user(task_pid_vnr(current), current->set_child_tid);
5321
5322 calculate_sigpending();
5323 }
5324
5325 /*
5326 * context_switch - switch to the new MM and the new thread's register state.
5327 */
5328 static __always_inline struct rq *
context_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next,struct rq_flags * rf)5329 context_switch(struct rq *rq, struct task_struct *prev,
5330 struct task_struct *next, struct rq_flags *rf)
5331 __releases(__rq_lockp(rq))
5332 {
5333 prepare_task_switch(rq, prev, next);
5334
5335 /*
5336 * For paravirt, this is coupled with an exit in switch_to to
5337 * combine the page table reload and the switch backend into
5338 * one hypercall.
5339 */
5340 arch_start_context_switch(prev);
5341
5342 /*
5343 * kernel -> kernel lazy + transfer active
5344 * user -> kernel lazy + mmgrab_lazy_tlb() active
5345 *
5346 * kernel -> user switch + mmdrop_lazy_tlb() active
5347 * user -> user switch
5348 */
5349 if (!next->mm) { // to kernel
5350 enter_lazy_tlb(prev->active_mm, next);
5351
5352 next->active_mm = prev->active_mm;
5353 if (prev->mm) // from user
5354 mmgrab_lazy_tlb(prev->active_mm);
5355 else
5356 prev->active_mm = NULL;
5357 } else { // to user
5358 membarrier_switch_mm(rq, prev->active_mm, next->mm);
5359 /*
5360 * sys_membarrier() requires an smp_mb() between setting
5361 * rq->curr / membarrier_switch_mm() and returning to userspace.
5362 *
5363 * The below provides this either through switch_mm(), or in
5364 * case 'prev->active_mm == next->mm' through
5365 * finish_task_switch()'s mmdrop().
5366 */
5367 switch_mm_irqs_off(prev->active_mm, next->mm, next);
5368 lru_gen_use_mm(next->mm);
5369
5370 if (!prev->mm) { // from kernel
5371 /* will mmdrop_lazy_tlb() in finish_task_switch(). */
5372 rq->prev_mm = prev->active_mm;
5373 prev->active_mm = NULL;
5374 }
5375 }
5376
5377 mm_cid_switch_to(prev, next);
5378
5379 /*
5380 * Tell rseq that the task was scheduled in. Must be after
5381 * switch_mm_cid() to get the TIF flag set.
5382 */
5383 rseq_sched_switch_event(next);
5384
5385 prepare_lock_switch(rq, next, rf);
5386
5387 /* Here we just switch the register state and the stack. */
5388 switch_to(prev, next, prev);
5389 barrier();
5390
5391 return finish_task_switch(prev);
5392 }
5393
5394 /*
5395 * nr_running and nr_context_switches:
5396 *
5397 * externally visible scheduler statistics: current number of runnable
5398 * threads, total number of context switches performed since bootup.
5399 */
nr_running(void)5400 unsigned int nr_running(void)
5401 {
5402 unsigned int i, sum = 0;
5403
5404 for_each_online_cpu(i)
5405 sum += cpu_rq(i)->nr_running;
5406
5407 return sum;
5408 }
5409
5410 /*
5411 * Check if only the current task is running on the CPU.
5412 *
5413 * Caution: this function does not check that the caller has disabled
5414 * preemption, thus the result might have a time-of-check-to-time-of-use
5415 * race. The caller is responsible to use it correctly, for example:
5416 *
5417 * - from a non-preemptible section (of course)
5418 *
5419 * - from a thread that is bound to a single CPU
5420 *
5421 * - in a loop with very short iterations (e.g. a polling loop)
5422 */
single_task_running(void)5423 bool single_task_running(void)
5424 {
5425 return raw_rq()->nr_running == 1;
5426 }
5427 EXPORT_SYMBOL(single_task_running);
5428
nr_context_switches_cpu(int cpu)5429 unsigned long long nr_context_switches_cpu(int cpu)
5430 {
5431 return cpu_rq(cpu)->nr_switches;
5432 }
5433
nr_context_switches(void)5434 unsigned long long nr_context_switches(void)
5435 {
5436 int i;
5437 unsigned long long sum = 0;
5438
5439 for_each_possible_cpu(i)
5440 sum += cpu_rq(i)->nr_switches;
5441
5442 return sum;
5443 }
5444
5445 /*
5446 * Consumers of these two interfaces, like for example the cpuidle menu
5447 * governor, are using nonsensical data. Preferring shallow idle state selection
5448 * for a CPU that has IO-wait which might not even end up running the task when
5449 * it does become runnable.
5450 */
5451
nr_iowait_cpu(int cpu)5452 unsigned int nr_iowait_cpu(int cpu)
5453 {
5454 return atomic_read(&cpu_rq(cpu)->nr_iowait);
5455 }
5456
5457 /*
5458 * IO-wait accounting, and how it's mostly bollocks (on SMP).
5459 *
5460 * The idea behind IO-wait account is to account the idle time that we could
5461 * have spend running if it were not for IO. That is, if we were to improve the
5462 * storage performance, we'd have a proportional reduction in IO-wait time.
5463 *
5464 * This all works nicely on UP, where, when a task blocks on IO, we account
5465 * idle time as IO-wait, because if the storage were faster, it could've been
5466 * running and we'd not be idle.
5467 *
5468 * This has been extended to SMP, by doing the same for each CPU. This however
5469 * is broken.
5470 *
5471 * Imagine for instance the case where two tasks block on one CPU, only the one
5472 * CPU will have IO-wait accounted, while the other has regular idle. Even
5473 * though, if the storage were faster, both could've ran at the same time,
5474 * utilising both CPUs.
5475 *
5476 * This means, that when looking globally, the current IO-wait accounting on
5477 * SMP is a lower bound, by reason of under accounting.
5478 *
5479 * Worse, since the numbers are provided per CPU, they are sometimes
5480 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5481 * associated with any one particular CPU, it can wake to another CPU than it
5482 * blocked on. This means the per CPU IO-wait number is meaningless.
5483 *
5484 * Task CPU affinities can make all that even more 'interesting'.
5485 */
5486
nr_iowait(void)5487 unsigned int nr_iowait(void)
5488 {
5489 unsigned int i, sum = 0;
5490
5491 for_each_possible_cpu(i)
5492 sum += nr_iowait_cpu(i);
5493
5494 return sum;
5495 }
5496
5497 /*
5498 * sched_exec - execve() is a valuable balancing opportunity, because at
5499 * this point the task has the smallest effective memory and cache footprint.
5500 */
sched_exec(void)5501 void sched_exec(void)
5502 {
5503 struct task_struct *p = current;
5504 struct migration_arg arg;
5505 int dest_cpu;
5506
5507 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
5508 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5509 if (dest_cpu == smp_processor_id())
5510 return;
5511
5512 if (unlikely(!cpu_active(dest_cpu)))
5513 return;
5514
5515 arg = (struct migration_arg){ p, dest_cpu };
5516 }
5517 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
5518 }
5519
5520 DEFINE_PER_CPU(struct kernel_stat, kstat);
5521 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
5522
5523 EXPORT_PER_CPU_SYMBOL(kstat);
5524 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
5525
5526 /*
5527 * The function fair_sched_class.update_curr accesses the struct curr
5528 * and its field curr->exec_start; when called from task_sched_runtime(),
5529 * we observe a high rate of cache misses in practice.
5530 * Prefetching this data results in improved performance.
5531 */
prefetch_curr_exec_start(struct task_struct * p)5532 static inline void prefetch_curr_exec_start(struct task_struct *p)
5533 {
5534 #ifdef CONFIG_FAIR_GROUP_SCHED
5535 struct sched_entity *curr = p->se.cfs_rq->curr;
5536 #else
5537 struct sched_entity *curr = task_rq(p)->cfs.curr;
5538 #endif
5539 prefetch(curr);
5540 prefetch(&curr->exec_start);
5541 }
5542
5543 /*
5544 * Return accounted runtime for the task.
5545 * In case the task is currently running, return the runtime plus current's
5546 * pending runtime that have not been accounted yet.
5547 */
task_sched_runtime(struct task_struct * p)5548 unsigned long long task_sched_runtime(struct task_struct *p)
5549 {
5550 struct rq_flags rf;
5551 struct rq *rq;
5552 u64 ns;
5553
5554 #ifdef CONFIG_64BIT
5555 /*
5556 * 64-bit doesn't need locks to atomically read a 64-bit value.
5557 * So we have a optimization chance when the task's delta_exec is 0.
5558 * Reading ->on_cpu is racy, but this is OK.
5559 *
5560 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5561 * If we race with it entering CPU, unaccounted time is 0. This is
5562 * indistinguishable from the read occurring a few cycles earlier.
5563 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5564 * been accounted, so we're correct here as well.
5565 */
5566 if (!p->on_cpu || !task_on_rq_queued(p))
5567 return p->se.sum_exec_runtime;
5568 #endif
5569
5570 rq = task_rq_lock(p, &rf);
5571 /*
5572 * Must be ->curr _and_ ->on_rq. If dequeued, we would
5573 * project cycles that may never be accounted to this
5574 * thread, breaking clock_gettime().
5575 */
5576 if (task_current_donor(rq, p) && task_on_rq_queued(p)) {
5577 prefetch_curr_exec_start(p);
5578 update_rq_clock(rq);
5579 p->sched_class->update_curr(rq);
5580 }
5581 ns = p->se.sum_exec_runtime;
5582 task_rq_unlock(rq, p, &rf);
5583
5584 return ns;
5585 }
5586
cpu_resched_latency(struct rq * rq)5587 static u64 cpu_resched_latency(struct rq *rq)
5588 {
5589 int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5590 u64 resched_latency, now = rq_clock(rq);
5591 static bool warned_once;
5592
5593 if (sysctl_resched_latency_warn_once && warned_once)
5594 return 0;
5595
5596 if (!need_resched() || !latency_warn_ms)
5597 return 0;
5598
5599 if (system_state == SYSTEM_BOOTING)
5600 return 0;
5601
5602 if (!rq->last_seen_need_resched_ns) {
5603 rq->last_seen_need_resched_ns = now;
5604 rq->ticks_without_resched = 0;
5605 return 0;
5606 }
5607
5608 rq->ticks_without_resched++;
5609 resched_latency = now - rq->last_seen_need_resched_ns;
5610 if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5611 return 0;
5612
5613 warned_once = true;
5614
5615 return resched_latency;
5616 }
5617
setup_resched_latency_warn_ms(char * str)5618 static int __init setup_resched_latency_warn_ms(char *str)
5619 {
5620 long val;
5621
5622 if ((kstrtol(str, 0, &val))) {
5623 pr_warn("Unable to set resched_latency_warn_ms\n");
5624 return 1;
5625 }
5626
5627 sysctl_resched_latency_warn_ms = val;
5628 return 1;
5629 }
5630 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5631
5632 /*
5633 * This function gets called by the timer code, with HZ frequency.
5634 * We call it with interrupts disabled.
5635 */
sched_tick(void)5636 void sched_tick(void)
5637 {
5638 int cpu = smp_processor_id();
5639 struct rq *rq = cpu_rq(cpu);
5640 /* accounting goes to the donor task */
5641 struct task_struct *donor;
5642 struct rq_flags rf;
5643 unsigned long hw_pressure;
5644 u64 resched_latency;
5645
5646 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5647 arch_scale_freq_tick();
5648
5649 sched_clock_tick();
5650
5651 rq_lock(rq, &rf);
5652 donor = rq->donor;
5653
5654 psi_account_irqtime(rq, donor, NULL);
5655
5656 update_rq_clock(rq);
5657 hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
5658 update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
5659
5660 if (dynamic_preempt_lazy() && tif_test_bit(TIF_NEED_RESCHED_LAZY))
5661 resched_curr(rq);
5662
5663 donor->sched_class->task_tick(rq, donor, 0);
5664 if (sched_feat(LATENCY_WARN))
5665 resched_latency = cpu_resched_latency(rq);
5666 calc_global_load_tick(rq);
5667 sched_core_tick(rq);
5668 scx_tick(rq);
5669
5670 rq_unlock(rq, &rf);
5671
5672 if (sched_feat(LATENCY_WARN) && resched_latency)
5673 resched_latency_warn(cpu, resched_latency);
5674
5675 perf_event_task_tick();
5676
5677 if (donor->flags & PF_WQ_WORKER)
5678 wq_worker_tick(donor);
5679
5680 if (!scx_switched_all()) {
5681 rq->idle_balance = idle_cpu(cpu);
5682 sched_balance_trigger(rq);
5683 }
5684 }
5685
5686 #ifdef CONFIG_NO_HZ_FULL
5687
5688 struct tick_work {
5689 int cpu;
5690 atomic_t state;
5691 struct delayed_work work;
5692 };
5693 /* Values for ->state, see diagram below. */
5694 #define TICK_SCHED_REMOTE_OFFLINE 0
5695 #define TICK_SCHED_REMOTE_OFFLINING 1
5696 #define TICK_SCHED_REMOTE_RUNNING 2
5697
5698 /*
5699 * State diagram for ->state:
5700 *
5701 *
5702 * TICK_SCHED_REMOTE_OFFLINE
5703 * | ^
5704 * | |
5705 * | | sched_tick_remote()
5706 * | |
5707 * | |
5708 * +--TICK_SCHED_REMOTE_OFFLINING
5709 * | ^
5710 * | |
5711 * sched_tick_start() | | sched_tick_stop()
5712 * | |
5713 * V |
5714 * TICK_SCHED_REMOTE_RUNNING
5715 *
5716 *
5717 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5718 * and sched_tick_start() are happy to leave the state in RUNNING.
5719 */
5720
5721 static struct tick_work __percpu *tick_work_cpu;
5722
sched_tick_remote(struct work_struct * work)5723 static void sched_tick_remote(struct work_struct *work)
5724 {
5725 struct delayed_work *dwork = to_delayed_work(work);
5726 struct tick_work *twork = container_of(dwork, struct tick_work, work);
5727 int cpu = twork->cpu;
5728 struct rq *rq = cpu_rq(cpu);
5729 int os;
5730
5731 /*
5732 * Handle the tick only if it appears the remote CPU is running in full
5733 * dynticks mode. The check is racy by nature, but missing a tick or
5734 * having one too much is no big deal because the scheduler tick updates
5735 * statistics and checks timeslices in a time-independent way, regardless
5736 * of when exactly it is running.
5737 */
5738 if (tick_nohz_tick_stopped_cpu(cpu)) {
5739 guard(rq_lock_irq)(rq);
5740 struct task_struct *curr = rq->curr;
5741
5742 if (cpu_online(cpu)) {
5743 /*
5744 * Since this is a remote tick for full dynticks mode,
5745 * we are always sure that there is no proxy (only a
5746 * single task is running).
5747 */
5748 WARN_ON_ONCE(rq->curr != rq->donor);
5749 update_rq_clock(rq);
5750
5751 if (!is_idle_task(curr)) {
5752 /*
5753 * Make sure the next tick runs within a
5754 * reasonable amount of time.
5755 */
5756 u64 delta = rq_clock_task(rq) - curr->se.exec_start;
5757 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 30);
5758 }
5759 curr->sched_class->task_tick(rq, curr, 0);
5760
5761 calc_load_nohz_remote(rq);
5762 }
5763 }
5764
5765 /*
5766 * Run the remote tick once per second (1Hz). This arbitrary
5767 * frequency is large enough to avoid overload but short enough
5768 * to keep scheduler internal stats reasonably up to date. But
5769 * first update state to reflect hotplug activity if required.
5770 */
5771 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5772 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5773 if (os == TICK_SCHED_REMOTE_RUNNING)
5774 queue_delayed_work(system_dfl_wq, dwork, HZ);
5775 }
5776
sched_tick_start(int cpu)5777 static void sched_tick_start(int cpu)
5778 {
5779 int os;
5780 struct tick_work *twork;
5781
5782 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5783 return;
5784
5785 WARN_ON_ONCE(!tick_work_cpu);
5786
5787 twork = per_cpu_ptr(tick_work_cpu, cpu);
5788 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5789 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5790 if (os == TICK_SCHED_REMOTE_OFFLINE) {
5791 twork->cpu = cpu;
5792 INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5793 queue_delayed_work(system_dfl_wq, &twork->work, HZ);
5794 }
5795 }
5796
5797 #ifdef CONFIG_HOTPLUG_CPU
sched_tick_stop(int cpu)5798 static void sched_tick_stop(int cpu)
5799 {
5800 struct tick_work *twork;
5801 int os;
5802
5803 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5804 return;
5805
5806 WARN_ON_ONCE(!tick_work_cpu);
5807
5808 twork = per_cpu_ptr(tick_work_cpu, cpu);
5809 /* There cannot be competing actions, but don't rely on stop-machine. */
5810 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5811 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5812 /* Don't cancel, as this would mess up the state machine. */
5813 }
5814 #endif /* CONFIG_HOTPLUG_CPU */
5815
sched_tick_offload_init(void)5816 int __init sched_tick_offload_init(void)
5817 {
5818 tick_work_cpu = alloc_percpu(struct tick_work);
5819 BUG_ON(!tick_work_cpu);
5820 return 0;
5821 }
5822
5823 #else /* !CONFIG_NO_HZ_FULL: */
sched_tick_start(int cpu)5824 static inline void sched_tick_start(int cpu) { }
sched_tick_stop(int cpu)5825 static inline void sched_tick_stop(int cpu) { }
5826 #endif /* !CONFIG_NO_HZ_FULL */
5827
5828 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5829 defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5830 /*
5831 * If the value passed in is equal to the current preempt count
5832 * then we just disabled preemption. Start timing the latency.
5833 */
preempt_latency_start(int val)5834 static inline void preempt_latency_start(int val)
5835 {
5836 if (preempt_count() == val) {
5837 unsigned long ip = get_lock_parent_ip();
5838 #ifdef CONFIG_DEBUG_PREEMPT
5839 current->preempt_disable_ip = ip;
5840 #endif
5841 trace_preempt_off(CALLER_ADDR0, ip);
5842 }
5843 }
5844
preempt_count_add(int val)5845 void preempt_count_add(int val)
5846 {
5847 #ifdef CONFIG_DEBUG_PREEMPT
5848 /*
5849 * Underflow?
5850 */
5851 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5852 return;
5853 #endif
5854 __preempt_count_add(val);
5855 #ifdef CONFIG_DEBUG_PREEMPT
5856 /*
5857 * Spinlock count overflowing soon?
5858 */
5859 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5860 PREEMPT_MASK - 10);
5861 #endif
5862 preempt_latency_start(val);
5863 }
5864 EXPORT_SYMBOL(preempt_count_add);
5865 NOKPROBE_SYMBOL(preempt_count_add);
5866
5867 /*
5868 * If the value passed in equals to the current preempt count
5869 * then we just enabled preemption. Stop timing the latency.
5870 */
preempt_latency_stop(int val)5871 static inline void preempt_latency_stop(int val)
5872 {
5873 if (preempt_count() == val)
5874 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5875 }
5876
preempt_count_sub(int val)5877 void preempt_count_sub(int val)
5878 {
5879 #ifdef CONFIG_DEBUG_PREEMPT
5880 /*
5881 * Underflow?
5882 */
5883 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
5884 return;
5885 /*
5886 * Is the spinlock portion underflowing?
5887 */
5888 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5889 !(preempt_count() & PREEMPT_MASK)))
5890 return;
5891 #endif
5892
5893 preempt_latency_stop(val);
5894 __preempt_count_sub(val);
5895 }
5896 EXPORT_SYMBOL(preempt_count_sub);
5897 NOKPROBE_SYMBOL(preempt_count_sub);
5898
5899 #else
preempt_latency_start(int val)5900 static inline void preempt_latency_start(int val) { }
preempt_latency_stop(int val)5901 static inline void preempt_latency_stop(int val) { }
5902 #endif
5903
get_preempt_disable_ip(struct task_struct * p)5904 static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5905 {
5906 #ifdef CONFIG_DEBUG_PREEMPT
5907 return p->preempt_disable_ip;
5908 #else
5909 return 0;
5910 #endif
5911 }
5912
5913 /*
5914 * Print scheduling while atomic bug:
5915 */
__schedule_bug(struct task_struct * prev)5916 static noinline void __schedule_bug(struct task_struct *prev)
5917 {
5918 /* Save this before calling printk(), since that will clobber it */
5919 unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5920
5921 if (oops_in_progress)
5922 return;
5923
5924 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5925 prev->comm, prev->pid, preempt_count());
5926
5927 debug_show_held_locks(prev);
5928 print_modules();
5929 if (irqs_disabled())
5930 print_irqtrace_events(prev);
5931 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
5932 pr_err("Preemption disabled at:");
5933 print_ip_sym(KERN_ERR, preempt_disable_ip);
5934 }
5935 check_panic_on_warn("scheduling while atomic");
5936
5937 dump_stack();
5938 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5939 }
5940
5941 /*
5942 * Various schedule()-time debugging checks and statistics:
5943 */
schedule_debug(struct task_struct * prev,bool preempt)5944 static inline void schedule_debug(struct task_struct *prev, bool preempt)
5945 {
5946 #ifdef CONFIG_SCHED_STACK_END_CHECK
5947 if (task_stack_end_corrupted(prev))
5948 panic("corrupted stack end detected inside scheduler\n");
5949
5950 if (task_scs_end_corrupted(prev))
5951 panic("corrupted shadow stack detected inside scheduler\n");
5952 #endif
5953
5954 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5955 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5956 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5957 prev->comm, prev->pid, prev->non_block_count);
5958 dump_stack();
5959 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5960 }
5961 #endif
5962
5963 if (unlikely(in_atomic_preempt_off())) {
5964 __schedule_bug(prev);
5965 preempt_count_set(PREEMPT_DISABLED);
5966 }
5967 rcu_sleep_check();
5968 WARN_ON_ONCE(ct_state() == CT_STATE_USER);
5969
5970 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5971
5972 schedstat_inc(this_rq()->sched_count);
5973 }
5974
prev_balance(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5975 static void prev_balance(struct rq *rq, struct task_struct *prev,
5976 struct rq_flags *rf)
5977 {
5978 const struct sched_class *start_class = prev->sched_class;
5979 const struct sched_class *class;
5980
5981 /*
5982 * We must do the balancing pass before put_prev_task(), such
5983 * that when we release the rq->lock the task is in the same
5984 * state as before we took rq->lock.
5985 *
5986 * We can terminate the balance pass as soon as we know there is
5987 * a runnable task of @class priority or higher.
5988 */
5989 for_active_class_range(class, start_class, &idle_sched_class) {
5990 if (class->balance && class->balance(rq, prev, rf))
5991 break;
5992 }
5993 }
5994
5995 /*
5996 * Pick up the highest-prio task:
5997 */
5998 static inline struct task_struct *
__pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5999 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6000 __must_hold(__rq_lockp(rq))
6001 {
6002 const struct sched_class *class;
6003 struct task_struct *p;
6004
6005 rq->dl_server = NULL;
6006
6007 if (scx_enabled())
6008 goto restart;
6009
6010 /*
6011 * Optimization: we know that if all tasks are in the fair class we can
6012 * call that function directly, but only if the @prev task wasn't of a
6013 * higher scheduling class, because otherwise those lose the
6014 * opportunity to pull in more work from other CPUs.
6015 */
6016 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
6017 rq->nr_running == rq->cfs.h_nr_queued)) {
6018
6019 p = pick_next_task_fair(rq, prev, rf);
6020 if (unlikely(p == RETRY_TASK))
6021 goto restart;
6022
6023 /* Assume the next prioritized class is idle_sched_class */
6024 if (!p) {
6025 p = pick_task_idle(rq, rf);
6026 put_prev_set_next_task(rq, prev, p);
6027 }
6028
6029 return p;
6030 }
6031
6032 restart:
6033 prev_balance(rq, prev, rf);
6034
6035 for_each_active_class(class) {
6036 if (class->pick_next_task) {
6037 p = class->pick_next_task(rq, prev, rf);
6038 if (unlikely(p == RETRY_TASK))
6039 goto restart;
6040 if (p)
6041 return p;
6042 } else {
6043 p = class->pick_task(rq, rf);
6044 if (unlikely(p == RETRY_TASK))
6045 goto restart;
6046 if (p) {
6047 put_prev_set_next_task(rq, prev, p);
6048 return p;
6049 }
6050 }
6051 }
6052
6053 BUG(); /* The idle class should always have a runnable task. */
6054 }
6055
6056 #ifdef CONFIG_SCHED_CORE
is_task_rq_idle(struct task_struct * t)6057 static inline bool is_task_rq_idle(struct task_struct *t)
6058 {
6059 return (task_rq(t)->idle == t);
6060 }
6061
cookie_equals(struct task_struct * a,unsigned long cookie)6062 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
6063 {
6064 return is_task_rq_idle(a) || (a->core_cookie == cookie);
6065 }
6066
cookie_match(struct task_struct * a,struct task_struct * b)6067 static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
6068 {
6069 if (is_task_rq_idle(a) || is_task_rq_idle(b))
6070 return true;
6071
6072 return a->core_cookie == b->core_cookie;
6073 }
6074
6075 /*
6076 * Careful; this can return RETRY_TASK, it does not include the retry-loop
6077 * itself due to the whole SMT pick retry thing below.
6078 */
pick_task(struct rq * rq,struct rq_flags * rf)6079 static inline struct task_struct *pick_task(struct rq *rq, struct rq_flags *rf)
6080 {
6081 const struct sched_class *class;
6082 struct task_struct *p;
6083
6084 rq->dl_server = NULL;
6085
6086 for_each_active_class(class) {
6087 p = class->pick_task(rq, rf);
6088 if (p)
6089 return p;
6090 }
6091
6092 BUG(); /* The idle class should always have a runnable task. */
6093 }
6094
6095 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6096
6097 static void queue_core_balance(struct rq *rq);
6098
6099 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6100 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6101 __must_hold(__rq_lockp(rq))
6102 {
6103 struct task_struct *next, *p, *max;
6104 const struct cpumask *smt_mask;
6105 bool fi_before = false;
6106 bool core_clock_updated = (rq == rq->core);
6107 unsigned long cookie;
6108 int i, cpu, occ = 0;
6109 struct rq *rq_i;
6110 bool need_sync;
6111
6112 if (!sched_core_enabled(rq))
6113 return __pick_next_task(rq, prev, rf);
6114
6115 cpu = cpu_of(rq);
6116
6117 /* Stopper task is switching into idle, no need core-wide selection. */
6118 if (cpu_is_offline(cpu)) {
6119 /*
6120 * Reset core_pick so that we don't enter the fastpath when
6121 * coming online. core_pick would already be migrated to
6122 * another cpu during offline.
6123 */
6124 rq->core_pick = NULL;
6125 rq->core_dl_server = NULL;
6126 return __pick_next_task(rq, prev, rf);
6127 }
6128
6129 /*
6130 * If there were no {en,de}queues since we picked (IOW, the task
6131 * pointers are all still valid), and we haven't scheduled the last
6132 * pick yet, do so now.
6133 *
6134 * rq->core_pick can be NULL if no selection was made for a CPU because
6135 * it was either offline or went offline during a sibling's core-wide
6136 * selection. In this case, do a core-wide selection.
6137 */
6138 if (rq->core->core_pick_seq == rq->core->core_task_seq &&
6139 rq->core->core_pick_seq != rq->core_sched_seq &&
6140 rq->core_pick) {
6141 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
6142
6143 next = rq->core_pick;
6144 rq->dl_server = rq->core_dl_server;
6145 rq->core_pick = NULL;
6146 rq->core_dl_server = NULL;
6147 goto out_set_next;
6148 }
6149
6150 prev_balance(rq, prev, rf);
6151
6152 smt_mask = cpu_smt_mask(cpu);
6153 need_sync = !!rq->core->core_cookie;
6154
6155 /* reset state */
6156 rq->core->core_cookie = 0UL;
6157 if (rq->core->core_forceidle_count) {
6158 if (!core_clock_updated) {
6159 update_rq_clock(rq->core);
6160 core_clock_updated = true;
6161 }
6162 sched_core_account_forceidle(rq);
6163 /* reset after accounting force idle */
6164 rq->core->core_forceidle_start = 0;
6165 rq->core->core_forceidle_count = 0;
6166 rq->core->core_forceidle_occupation = 0;
6167 need_sync = true;
6168 fi_before = true;
6169 }
6170
6171 /*
6172 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6173 *
6174 * @task_seq guards the task state ({en,de}queues)
6175 * @pick_seq is the @task_seq we did a selection on
6176 * @sched_seq is the @pick_seq we scheduled
6177 *
6178 * However, preemptions can cause multiple picks on the same task set.
6179 * 'Fix' this by also increasing @task_seq for every pick.
6180 */
6181 rq->core->core_task_seq++;
6182
6183 /*
6184 * Optimize for common case where this CPU has no cookies
6185 * and there are no cookied tasks running on siblings.
6186 */
6187 if (!need_sync) {
6188 restart_single:
6189 next = pick_task(rq, rf);
6190 if (unlikely(next == RETRY_TASK))
6191 goto restart_single;
6192 if (!next->core_cookie) {
6193 rq->core_pick = NULL;
6194 rq->core_dl_server = NULL;
6195 /*
6196 * For robustness, update the min_vruntime_fi for
6197 * unconstrained picks as well.
6198 */
6199 WARN_ON_ONCE(fi_before);
6200 task_vruntime_update(rq, next, false);
6201 goto out_set_next;
6202 }
6203 }
6204
6205 /*
6206 * For each thread: do the regular task pick and find the max prio task
6207 * amongst them.
6208 *
6209 * Tie-break prio towards the current CPU
6210 */
6211 restart_multi:
6212 max = NULL;
6213 for_each_cpu_wrap(i, smt_mask, cpu) {
6214 rq_i = cpu_rq(i);
6215
6216 /*
6217 * Current cpu always has its clock updated on entrance to
6218 * pick_next_task(). If the current cpu is not the core,
6219 * the core may also have been updated above.
6220 */
6221 if (i != cpu && (rq_i != rq->core || !core_clock_updated))
6222 update_rq_clock(rq_i);
6223
6224 p = pick_task(rq_i, rf);
6225 if (unlikely(p == RETRY_TASK))
6226 goto restart_multi;
6227
6228 rq_i->core_pick = p;
6229 rq_i->core_dl_server = rq_i->dl_server;
6230
6231 if (!max || prio_less(max, p, fi_before))
6232 max = p;
6233 }
6234
6235 cookie = rq->core->core_cookie = max->core_cookie;
6236
6237 /*
6238 * For each thread: try and find a runnable task that matches @max or
6239 * force idle.
6240 */
6241 for_each_cpu(i, smt_mask) {
6242 rq_i = cpu_rq(i);
6243 p = rq_i->core_pick;
6244
6245 if (!cookie_equals(p, cookie)) {
6246 p = NULL;
6247 if (cookie)
6248 p = sched_core_find(rq_i, cookie);
6249 if (!p)
6250 p = idle_sched_class.pick_task(rq_i, rf);
6251 }
6252
6253 rq_i->core_pick = p;
6254 rq_i->core_dl_server = NULL;
6255
6256 if (p == rq_i->idle) {
6257 if (rq_i->nr_running) {
6258 rq->core->core_forceidle_count++;
6259 if (!fi_before)
6260 rq->core->core_forceidle_seq++;
6261 }
6262 } else {
6263 occ++;
6264 }
6265 }
6266
6267 if (schedstat_enabled() && rq->core->core_forceidle_count) {
6268 rq->core->core_forceidle_start = rq_clock(rq->core);
6269 rq->core->core_forceidle_occupation = occ;
6270 }
6271
6272 rq->core->core_pick_seq = rq->core->core_task_seq;
6273 next = rq->core_pick;
6274 rq->core_sched_seq = rq->core->core_pick_seq;
6275
6276 /* Something should have been selected for current CPU */
6277 WARN_ON_ONCE(!next);
6278
6279 /*
6280 * Reschedule siblings
6281 *
6282 * NOTE: L1TF -- at this point we're no longer running the old task and
6283 * sending an IPI (below) ensures the sibling will no longer be running
6284 * their task. This ensures there is no inter-sibling overlap between
6285 * non-matching user state.
6286 */
6287 for_each_cpu(i, smt_mask) {
6288 rq_i = cpu_rq(i);
6289
6290 /*
6291 * An online sibling might have gone offline before a task
6292 * could be picked for it, or it might be offline but later
6293 * happen to come online, but its too late and nothing was
6294 * picked for it. That's Ok - it will pick tasks for itself,
6295 * so ignore it.
6296 */
6297 if (!rq_i->core_pick)
6298 continue;
6299
6300 /*
6301 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6302 * fi_before fi update?
6303 * 0 0 1
6304 * 0 1 1
6305 * 1 0 1
6306 * 1 1 0
6307 */
6308 if (!(fi_before && rq->core->core_forceidle_count))
6309 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
6310
6311 rq_i->core_pick->core_occupation = occ;
6312
6313 if (i == cpu) {
6314 rq_i->core_pick = NULL;
6315 rq_i->core_dl_server = NULL;
6316 continue;
6317 }
6318
6319 /* Did we break L1TF mitigation requirements? */
6320 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6321
6322 if (rq_i->curr == rq_i->core_pick) {
6323 rq_i->core_pick = NULL;
6324 rq_i->core_dl_server = NULL;
6325 continue;
6326 }
6327
6328 resched_curr(rq_i);
6329 }
6330
6331 out_set_next:
6332 put_prev_set_next_task(rq, prev, next);
6333 if (rq->core->core_forceidle_count && next == rq->idle)
6334 queue_core_balance(rq);
6335
6336 return next;
6337 }
6338
try_steal_cookie(int this,int that)6339 static bool try_steal_cookie(int this, int that)
6340 {
6341 struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6342 struct task_struct *p;
6343 unsigned long cookie;
6344 bool success = false;
6345
6346 guard(irq)();
6347 guard(double_rq_lock)(dst, src);
6348
6349 cookie = dst->core->core_cookie;
6350 if (!cookie)
6351 return false;
6352
6353 if (dst->curr != dst->idle)
6354 return false;
6355
6356 p = sched_core_find(src, cookie);
6357 if (!p)
6358 return false;
6359
6360 do {
6361 if (p == src->core_pick || p == src->curr)
6362 goto next;
6363
6364 if (!is_cpu_allowed(p, this))
6365 goto next;
6366
6367 if (p->core_occupation > dst->idle->core_occupation)
6368 goto next;
6369 /*
6370 * sched_core_find() and sched_core_next() will ensure
6371 * that task @p is not throttled now, we also need to
6372 * check whether the runqueue of the destination CPU is
6373 * being throttled.
6374 */
6375 if (sched_task_is_throttled(p, this))
6376 goto next;
6377
6378 move_queued_task_locked(src, dst, p);
6379 resched_curr(dst);
6380
6381 success = true;
6382 break;
6383
6384 next:
6385 p = sched_core_next(p, cookie);
6386 } while (p);
6387
6388 return success;
6389 }
6390
steal_cookie_task(int cpu,struct sched_domain * sd)6391 static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6392 {
6393 int i;
6394
6395 for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
6396 if (i == cpu)
6397 continue;
6398
6399 if (need_resched())
6400 break;
6401
6402 if (try_steal_cookie(cpu, i))
6403 return true;
6404 }
6405
6406 return false;
6407 }
6408
sched_core_balance(struct rq * rq)6409 static void sched_core_balance(struct rq *rq)
6410 __must_hold(__rq_lockp(rq))
6411 {
6412 struct sched_domain *sd;
6413 int cpu = cpu_of(rq);
6414
6415 guard(preempt)();
6416 guard(rcu)();
6417
6418 raw_spin_rq_unlock_irq(rq);
6419 for_each_domain(cpu, sd) {
6420 if (need_resched())
6421 break;
6422
6423 if (steal_cookie_task(cpu, sd))
6424 break;
6425 }
6426 raw_spin_rq_lock_irq(rq);
6427 }
6428
6429 static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
6430
queue_core_balance(struct rq * rq)6431 static void queue_core_balance(struct rq *rq)
6432 {
6433 if (!sched_core_enabled(rq))
6434 return;
6435
6436 if (!rq->core->core_cookie)
6437 return;
6438
6439 if (!rq->nr_running) /* not forced idle */
6440 return;
6441
6442 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6443 }
6444
6445 DEFINE_LOCK_GUARD_1(core_lock, int,
6446 sched_core_lock(*_T->lock, &_T->flags),
6447 sched_core_unlock(*_T->lock, &_T->flags),
6448 unsigned long flags)
6449
sched_core_cpu_starting(unsigned int cpu)6450 static void sched_core_cpu_starting(unsigned int cpu)
6451 {
6452 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6453 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6454 int t;
6455
6456 guard(core_lock)(&cpu);
6457
6458 WARN_ON_ONCE(rq->core != rq);
6459
6460 /* if we're the first, we'll be our own leader */
6461 if (cpumask_weight(smt_mask) == 1)
6462 return;
6463
6464 /* find the leader */
6465 for_each_cpu(t, smt_mask) {
6466 if (t == cpu)
6467 continue;
6468 rq = cpu_rq(t);
6469 if (rq->core == rq) {
6470 core_rq = rq;
6471 break;
6472 }
6473 }
6474
6475 if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6476 return;
6477
6478 /* install and validate core_rq */
6479 for_each_cpu(t, smt_mask) {
6480 rq = cpu_rq(t);
6481
6482 if (t == cpu)
6483 rq->core = core_rq;
6484
6485 WARN_ON_ONCE(rq->core != core_rq);
6486 }
6487 }
6488
sched_core_cpu_deactivate(unsigned int cpu)6489 static void sched_core_cpu_deactivate(unsigned int cpu)
6490 {
6491 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6492 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6493 int t;
6494
6495 guard(core_lock)(&cpu);
6496
6497 /* if we're the last man standing, nothing to do */
6498 if (cpumask_weight(smt_mask) == 1) {
6499 WARN_ON_ONCE(rq->core != rq);
6500 return;
6501 }
6502
6503 /* if we're not the leader, nothing to do */
6504 if (rq->core != rq)
6505 return;
6506
6507 /* find a new leader */
6508 for_each_cpu(t, smt_mask) {
6509 if (t == cpu)
6510 continue;
6511 core_rq = cpu_rq(t);
6512 break;
6513 }
6514
6515 if (WARN_ON_ONCE(!core_rq)) /* impossible */
6516 return;
6517
6518 /* copy the shared state to the new leader */
6519 core_rq->core_task_seq = rq->core_task_seq;
6520 core_rq->core_pick_seq = rq->core_pick_seq;
6521 core_rq->core_cookie = rq->core_cookie;
6522 core_rq->core_forceidle_count = rq->core_forceidle_count;
6523 core_rq->core_forceidle_seq = rq->core_forceidle_seq;
6524 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6525
6526 /*
6527 * Accounting edge for forced idle is handled in pick_next_task().
6528 * Don't need another one here, since the hotplug thread shouldn't
6529 * have a cookie.
6530 */
6531 core_rq->core_forceidle_start = 0;
6532
6533 /* install new leader */
6534 for_each_cpu(t, smt_mask) {
6535 rq = cpu_rq(t);
6536 rq->core = core_rq;
6537 }
6538 }
6539
sched_core_cpu_dying(unsigned int cpu)6540 static inline void sched_core_cpu_dying(unsigned int cpu)
6541 {
6542 struct rq *rq = cpu_rq(cpu);
6543
6544 if (rq->core != rq)
6545 rq->core = rq;
6546 }
6547
6548 #else /* !CONFIG_SCHED_CORE: */
6549
sched_core_cpu_starting(unsigned int cpu)6550 static inline void sched_core_cpu_starting(unsigned int cpu) {}
sched_core_cpu_deactivate(unsigned int cpu)6551 static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
sched_core_cpu_dying(unsigned int cpu)6552 static inline void sched_core_cpu_dying(unsigned int cpu) {}
6553
6554 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6555 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6556 __must_hold(__rq_lockp(rq))
6557 {
6558 return __pick_next_task(rq, prev, rf);
6559 }
6560
6561 #endif /* !CONFIG_SCHED_CORE */
6562
6563 /*
6564 * Constants for the sched_mode argument of __schedule().
6565 *
6566 * The mode argument allows RT enabled kernels to differentiate a
6567 * preemption from blocking on an 'sleeping' spin/rwlock.
6568 */
6569 #define SM_IDLE (-1)
6570 #define SM_NONE 0
6571 #define SM_PREEMPT 1
6572 #define SM_RTLOCK_WAIT 2
6573
6574 /*
6575 * Helper function for __schedule()
6576 *
6577 * Tries to deactivate the task, unless the should_block arg
6578 * is false or if a signal is pending. In the case a signal
6579 * is pending, marks the task's __state as RUNNING (and clear
6580 * blocked_on).
6581 */
try_to_block_task(struct rq * rq,struct task_struct * p,unsigned long * task_state_p,bool should_block)6582 static bool try_to_block_task(struct rq *rq, struct task_struct *p,
6583 unsigned long *task_state_p, bool should_block)
6584 {
6585 unsigned long task_state = *task_state_p;
6586 int flags = DEQUEUE_NOCLOCK;
6587
6588 if (signal_pending_state(task_state, p)) {
6589 WRITE_ONCE(p->__state, TASK_RUNNING);
6590 *task_state_p = TASK_RUNNING;
6591 set_task_blocked_on_waking(p, NULL);
6592
6593 return false;
6594 }
6595
6596 /*
6597 * We check should_block after signal_pending because we
6598 * will want to wake the task in that case. But if
6599 * should_block is false, its likely due to the task being
6600 * blocked on a mutex, and we want to keep it on the runqueue
6601 * to be selectable for proxy-execution.
6602 */
6603 if (!should_block)
6604 return false;
6605
6606 p->sched_contributes_to_load =
6607 (task_state & TASK_UNINTERRUPTIBLE) &&
6608 !(task_state & TASK_NOLOAD) &&
6609 !(task_state & TASK_FROZEN);
6610
6611 if (unlikely(is_special_task_state(task_state)))
6612 flags |= DEQUEUE_SPECIAL;
6613
6614 /*
6615 * __schedule() ttwu()
6616 * prev_state = prev->state; if (p->on_rq && ...)
6617 * if (prev_state) goto out;
6618 * p->on_rq = 0; smp_acquire__after_ctrl_dep();
6619 * p->state = TASK_WAKING
6620 *
6621 * Where __schedule() and ttwu() have matching control dependencies.
6622 *
6623 * After this, schedule() must not care about p->state any more.
6624 */
6625 block_task(rq, p, flags);
6626 return true;
6627 }
6628
6629 #ifdef CONFIG_SCHED_PROXY_EXEC
proxy_set_task_cpu(struct task_struct * p,int cpu)6630 static inline void proxy_set_task_cpu(struct task_struct *p, int cpu)
6631 {
6632 unsigned int wake_cpu;
6633
6634 /*
6635 * Since we are enqueuing a blocked task on a cpu it may
6636 * not be able to run on, preserve wake_cpu when we
6637 * __set_task_cpu so we can return the task to where it
6638 * was previously runnable.
6639 */
6640 wake_cpu = p->wake_cpu;
6641 __set_task_cpu(p, cpu);
6642 p->wake_cpu = wake_cpu;
6643 }
6644
proxy_resched_idle(struct rq * rq)6645 static inline struct task_struct *proxy_resched_idle(struct rq *rq)
6646 {
6647 put_prev_set_next_task(rq, rq->donor, rq->idle);
6648 rq_set_donor(rq, rq->idle);
6649 set_tsk_need_resched(rq->idle);
6650 return rq->idle;
6651 }
6652
proxy_deactivate(struct rq * rq,struct task_struct * donor)6653 static bool proxy_deactivate(struct rq *rq, struct task_struct *donor)
6654 {
6655 unsigned long state = READ_ONCE(donor->__state);
6656
6657 /* Don't deactivate if the state has been changed to TASK_RUNNING */
6658 if (state == TASK_RUNNING)
6659 return false;
6660 /*
6661 * Because we got donor from pick_next_task(), it is *crucial*
6662 * that we call proxy_resched_idle() before we deactivate it.
6663 * As once we deactivate donor, donor->on_rq is set to zero,
6664 * which allows ttwu() to immediately try to wake the task on
6665 * another rq. So we cannot use *any* references to donor
6666 * after that point. So things like cfs_rq->curr or rq->donor
6667 * need to be changed from next *before* we deactivate.
6668 */
6669 proxy_resched_idle(rq);
6670 return try_to_block_task(rq, donor, &state, true);
6671 }
6672
proxy_release_rq_lock(struct rq * rq,struct rq_flags * rf)6673 static inline void proxy_release_rq_lock(struct rq *rq, struct rq_flags *rf)
6674 __releases(__rq_lockp(rq))
6675 {
6676 /*
6677 * The class scheduler may have queued a balance callback
6678 * from pick_next_task() called earlier.
6679 *
6680 * So here we have to zap callbacks before unlocking the rq
6681 * as another CPU may jump in and call sched_balance_rq
6682 * which can trip the warning in rq_pin_lock() if we
6683 * leave callbacks set.
6684 *
6685 * After we later reaquire the rq lock, we will force __schedule()
6686 * to pick_again, so the callbacks will get re-established.
6687 */
6688 zap_balance_callbacks(rq);
6689 rq_unpin_lock(rq, rf);
6690 raw_spin_rq_unlock(rq);
6691 }
6692
proxy_reacquire_rq_lock(struct rq * rq,struct rq_flags * rf)6693 static inline void proxy_reacquire_rq_lock(struct rq *rq, struct rq_flags *rf)
6694 __acquires(__rq_lockp(rq))
6695 {
6696 raw_spin_rq_lock(rq);
6697 rq_repin_lock(rq, rf);
6698 update_rq_clock(rq);
6699 }
6700
6701 /*
6702 * If the blocked-on relationship crosses CPUs, migrate @p to the
6703 * owner's CPU.
6704 *
6705 * This is because we must respect the CPU affinity of execution
6706 * contexts (owner) but we can ignore affinity for scheduling
6707 * contexts (@p). So we have to move scheduling contexts towards
6708 * potential execution contexts.
6709 *
6710 * Note: The owner can disappear, but simply migrate to @target_cpu
6711 * and leave that CPU to sort things out.
6712 */
proxy_migrate_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int target_cpu)6713 static void proxy_migrate_task(struct rq *rq, struct rq_flags *rf,
6714 struct task_struct *p, int target_cpu)
6715 __must_hold(__rq_lockp(rq))
6716 {
6717 struct rq *target_rq = cpu_rq(target_cpu);
6718
6719 lockdep_assert_rq_held(rq);
6720 WARN_ON(p == rq->curr);
6721 /*
6722 * Since we are migrating a blocked donor, it could be rq->donor,
6723 * and we want to make sure there aren't any references from this
6724 * rq to it before we drop the lock. This avoids another cpu
6725 * jumping in and grabbing the rq lock and referencing rq->donor
6726 * or cfs_rq->curr, etc after we have migrated it to another cpu,
6727 * and before we pick_again in __schedule.
6728 *
6729 * So call proxy_resched_idle() to drop the rq->donor references
6730 * before we release the lock.
6731 */
6732 proxy_resched_idle(rq);
6733
6734 deactivate_task(rq, p, DEQUEUE_NOCLOCK);
6735 proxy_set_task_cpu(p, target_cpu);
6736
6737 proxy_release_rq_lock(rq, rf);
6738
6739 attach_one_task(target_rq, p);
6740
6741 proxy_reacquire_rq_lock(rq, rf);
6742 }
6743
proxy_force_return(struct rq * rq,struct rq_flags * rf,struct task_struct * p)6744 static void proxy_force_return(struct rq *rq, struct rq_flags *rf,
6745 struct task_struct *p)
6746 __must_hold(__rq_lockp(rq))
6747 {
6748 struct rq *task_rq, *target_rq = NULL;
6749 int cpu, wake_flag = WF_TTWU;
6750
6751 lockdep_assert_rq_held(rq);
6752 WARN_ON(p == rq->curr);
6753
6754 if (p == rq->donor)
6755 proxy_resched_idle(rq);
6756
6757 proxy_release_rq_lock(rq, rf);
6758 /*
6759 * We drop the rq lock, and re-grab task_rq_lock to get
6760 * the pi_lock (needed for select_task_rq) as well.
6761 */
6762 scoped_guard (task_rq_lock, p) {
6763 task_rq = scope.rq;
6764
6765 /*
6766 * Since we let go of the rq lock, the task may have been
6767 * woken or migrated to another rq before we got the
6768 * task_rq_lock. So re-check we're on the same RQ. If
6769 * not, the task has already been migrated and that CPU
6770 * will handle any futher migrations.
6771 */
6772 if (task_rq != rq)
6773 break;
6774
6775 /*
6776 * Similarly, if we've been dequeued, someone else will
6777 * wake us
6778 */
6779 if (!task_on_rq_queued(p))
6780 break;
6781
6782 /*
6783 * Since we should only be calling here from __schedule()
6784 * -> find_proxy_task(), no one else should have
6785 * assigned current out from under us. But check and warn
6786 * if we see this, then bail.
6787 */
6788 if (task_current(task_rq, p) || task_on_cpu(task_rq, p)) {
6789 WARN_ONCE(1, "%s rq: %i current/on_cpu task %s %d on_cpu: %i\n",
6790 __func__, cpu_of(task_rq),
6791 p->comm, p->pid, p->on_cpu);
6792 break;
6793 }
6794
6795 update_rq_clock(task_rq);
6796 deactivate_task(task_rq, p, DEQUEUE_NOCLOCK);
6797 cpu = select_task_rq(p, p->wake_cpu, &wake_flag);
6798 set_task_cpu(p, cpu);
6799 target_rq = cpu_rq(cpu);
6800 clear_task_blocked_on(p, NULL);
6801 }
6802
6803 if (target_rq)
6804 attach_one_task(target_rq, p);
6805
6806 proxy_reacquire_rq_lock(rq, rf);
6807 }
6808
6809 /*
6810 * Find runnable lock owner to proxy for mutex blocked donor
6811 *
6812 * Follow the blocked-on relation:
6813 * task->blocked_on -> mutex->owner -> task...
6814 *
6815 * Lock order:
6816 *
6817 * p->pi_lock
6818 * rq->lock
6819 * mutex->wait_lock
6820 * p->blocked_lock
6821 *
6822 * Returns the task that is going to be used as execution context (the one
6823 * that is actually going to be run on cpu_of(rq)).
6824 */
6825 static struct task_struct *
find_proxy_task(struct rq * rq,struct task_struct * donor,struct rq_flags * rf)6826 find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
6827 __must_hold(__rq_lockp(rq))
6828 {
6829 struct task_struct *owner = NULL;
6830 bool curr_in_chain = false;
6831 int this_cpu = cpu_of(rq);
6832 struct task_struct *p;
6833 struct mutex *mutex;
6834 int owner_cpu;
6835
6836 /* Follow blocked_on chain. */
6837 for (p = donor; (mutex = p->blocked_on); p = owner) {
6838 /* if its PROXY_WAKING, do return migration or run if current */
6839 if (mutex == PROXY_WAKING) {
6840 if (task_current(rq, p)) {
6841 clear_task_blocked_on(p, PROXY_WAKING);
6842 return p;
6843 }
6844 goto force_return;
6845 }
6846
6847 /*
6848 * By taking mutex->wait_lock we hold off concurrent mutex_unlock()
6849 * and ensure @owner sticks around.
6850 */
6851 guard(raw_spinlock)(&mutex->wait_lock);
6852 guard(raw_spinlock)(&p->blocked_lock);
6853
6854 /* Check again that p is blocked with blocked_lock held */
6855 if (mutex != __get_task_blocked_on(p)) {
6856 /*
6857 * Something changed in the blocked_on chain and
6858 * we don't know if only at this level. So, let's
6859 * just bail out completely and let __schedule()
6860 * figure things out (pick_again loop).
6861 */
6862 return NULL;
6863 }
6864
6865 if (task_current(rq, p))
6866 curr_in_chain = true;
6867
6868 owner = __mutex_owner(mutex);
6869 if (!owner) {
6870 /*
6871 * If there is no owner, either clear blocked_on
6872 * and return p (if it is current and safe to
6873 * just run on this rq), or return-migrate the task.
6874 */
6875 if (task_current(rq, p)) {
6876 __clear_task_blocked_on(p, NULL);
6877 return p;
6878 }
6879 goto force_return;
6880 }
6881
6882 if (!READ_ONCE(owner->on_rq) || owner->se.sched_delayed) {
6883 /* XXX Don't handle blocked owners/delayed dequeue yet */
6884 if (curr_in_chain)
6885 return proxy_resched_idle(rq);
6886 goto deactivate;
6887 }
6888
6889 owner_cpu = task_cpu(owner);
6890 if (owner_cpu != this_cpu) {
6891 /*
6892 * @owner can disappear, simply migrate to @owner_cpu
6893 * and leave that CPU to sort things out.
6894 */
6895 if (curr_in_chain)
6896 return proxy_resched_idle(rq);
6897 goto migrate_task;
6898 }
6899
6900 if (task_on_rq_migrating(owner)) {
6901 /*
6902 * One of the chain of mutex owners is currently migrating to this
6903 * CPU, but has not yet been enqueued because we are holding the
6904 * rq lock. As a simple solution, just schedule rq->idle to give
6905 * the migration a chance to complete. Much like the migrate_task
6906 * case we should end up back in find_proxy_task(), this time
6907 * hopefully with all relevant tasks already enqueued.
6908 */
6909 return proxy_resched_idle(rq);
6910 }
6911
6912 /*
6913 * Its possible to race where after we check owner->on_rq
6914 * but before we check (owner_cpu != this_cpu) that the
6915 * task on another cpu was migrated back to this cpu. In
6916 * that case it could slip by our checks. So double check
6917 * we are still on this cpu and not migrating. If we get
6918 * inconsistent results, try again.
6919 */
6920 if (!task_on_rq_queued(owner) || task_cpu(owner) != this_cpu)
6921 return NULL;
6922
6923 if (owner == p) {
6924 /*
6925 * It's possible we interleave with mutex_unlock like:
6926 *
6927 * lock(&rq->lock);
6928 * find_proxy_task()
6929 * mutex_unlock()
6930 * lock(&wait_lock);
6931 * donor(owner) = current->blocked_donor;
6932 * unlock(&wait_lock);
6933 *
6934 * wake_up_q();
6935 * ...
6936 * ttwu_runnable()
6937 * __task_rq_lock()
6938 * lock(&wait_lock);
6939 * owner == p
6940 *
6941 * Which leaves us to finish the ttwu_runnable() and make it go.
6942 *
6943 * So schedule rq->idle so that ttwu_runnable() can get the rq
6944 * lock and mark owner as running.
6945 */
6946 return proxy_resched_idle(rq);
6947 }
6948 /*
6949 * OK, now we're absolutely sure @owner is on this
6950 * rq, therefore holding @rq->lock is sufficient to
6951 * guarantee its existence, as per ttwu_remote().
6952 */
6953 }
6954 WARN_ON_ONCE(owner && !owner->on_rq);
6955 return owner;
6956
6957 deactivate:
6958 if (proxy_deactivate(rq, donor))
6959 return NULL;
6960 /* If deactivate fails, force return */
6961 p = donor;
6962 force_return:
6963 proxy_force_return(rq, rf, p);
6964 return NULL;
6965 migrate_task:
6966 proxy_migrate_task(rq, rf, p, owner_cpu);
6967 return NULL;
6968 }
6969 #else /* SCHED_PROXY_EXEC */
6970 static struct task_struct *
find_proxy_task(struct rq * rq,struct task_struct * donor,struct rq_flags * rf)6971 find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
6972 {
6973 WARN_ONCE(1, "This should never be called in the !SCHED_PROXY_EXEC case\n");
6974 return donor;
6975 }
6976 #endif /* SCHED_PROXY_EXEC */
6977
6978 /*
6979 * __schedule() is the main scheduler function.
6980 *
6981 * The main means of driving the scheduler and thus entering this function are:
6982 *
6983 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6984 *
6985 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6986 * paths. For example, see arch/x86/entry_64.S.
6987 *
6988 * To drive preemption between tasks, the scheduler sets the flag in timer
6989 * interrupt handler sched_tick().
6990 *
6991 * 3. Wakeups don't really cause entry into schedule(). They add a
6992 * task to the run-queue and that's it.
6993 *
6994 * Now, if the new task added to the run-queue preempts the current
6995 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6996 * called on the nearest possible occasion:
6997 *
6998 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6999 *
7000 * - in syscall or exception context, at the next outmost
7001 * preempt_enable(). (this might be as soon as the wake_up()'s
7002 * spin_unlock()!)
7003 *
7004 * - in IRQ context, return from interrupt-handler to
7005 * preemptible context
7006 *
7007 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
7008 * then at the next:
7009 *
7010 * - cond_resched() call
7011 * - explicit schedule() call
7012 * - return from syscall or exception to user-space
7013 * - return from interrupt-handler to user-space
7014 *
7015 * WARNING: must be called with preemption disabled!
7016 */
__schedule(int sched_mode)7017 static void __sched notrace __schedule(int sched_mode)
7018 {
7019 struct task_struct *prev, *next;
7020 /*
7021 * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted
7022 * as a preemption by schedule_debug() and RCU.
7023 */
7024 bool preempt = sched_mode > SM_NONE;
7025 bool is_switch = false;
7026 unsigned long *switch_count;
7027 unsigned long prev_state;
7028 struct rq_flags rf;
7029 struct rq *rq;
7030 int cpu;
7031
7032 /* Trace preemptions consistently with task switches */
7033 trace_sched_entry_tp(sched_mode == SM_PREEMPT);
7034
7035 cpu = smp_processor_id();
7036 rq = cpu_rq(cpu);
7037 prev = rq->curr;
7038
7039 schedule_debug(prev, preempt);
7040
7041 klp_sched_try_switch(prev);
7042
7043 local_irq_disable();
7044 rcu_note_context_switch(preempt);
7045 migrate_disable_switch(rq, prev);
7046
7047 /*
7048 * Make sure that signal_pending_state()->signal_pending() below
7049 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
7050 * done by the caller to avoid the race with signal_wake_up():
7051 *
7052 * __set_current_state(@state) signal_wake_up()
7053 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
7054 * wake_up_state(p, state)
7055 * LOCK rq->lock LOCK p->pi_state
7056 * smp_mb__after_spinlock() smp_mb__after_spinlock()
7057 * if (signal_pending_state()) if (p->state & @state)
7058 *
7059 * Also, the membarrier system call requires a full memory barrier
7060 * after coming from user-space, before storing to rq->curr; this
7061 * barrier matches a full barrier in the proximity of the membarrier
7062 * system call exit.
7063 */
7064 rq_lock(rq, &rf);
7065 smp_mb__after_spinlock();
7066
7067 hrtick_schedule_enter(rq);
7068
7069 /* Promote REQ to ACT */
7070 rq->clock_update_flags <<= 1;
7071 update_rq_clock(rq);
7072 rq->clock_update_flags = RQCF_UPDATED;
7073
7074 switch_count = &prev->nivcsw;
7075
7076 /* Task state changes only considers SM_PREEMPT as preemption */
7077 preempt = sched_mode == SM_PREEMPT;
7078
7079 /*
7080 * We must load prev->state once (task_struct::state is volatile), such
7081 * that we form a control dependency vs deactivate_task() below.
7082 */
7083 prev_state = READ_ONCE(prev->__state);
7084 if (sched_mode == SM_IDLE) {
7085 /* SCX must consult the BPF scheduler to tell if rq is empty */
7086 if (!rq->nr_running && !scx_enabled()) {
7087 next = prev;
7088 rq->next_class = &idle_sched_class;
7089 goto picked;
7090 }
7091 } else if (!preempt && prev_state) {
7092 /*
7093 * We pass task_is_blocked() as the should_block arg
7094 * in order to keep mutex-blocked tasks on the runqueue
7095 * for slection with proxy-exec (without proxy-exec
7096 * task_is_blocked() will always be false).
7097 */
7098 try_to_block_task(rq, prev, &prev_state,
7099 !task_is_blocked(prev));
7100 switch_count = &prev->nvcsw;
7101 }
7102
7103 pick_again:
7104 assert_balance_callbacks_empty(rq);
7105 next = pick_next_task(rq, rq->donor, &rf);
7106 rq->next_class = next->sched_class;
7107 if (sched_proxy_exec()) {
7108 struct task_struct *prev_donor = rq->donor;
7109
7110 rq_set_donor(rq, next);
7111 if (unlikely(next->blocked_on)) {
7112 next = find_proxy_task(rq, next, &rf);
7113 if (!next) {
7114 zap_balance_callbacks(rq);
7115 goto pick_again;
7116 }
7117 if (next == rq->idle) {
7118 zap_balance_callbacks(rq);
7119 goto keep_resched;
7120 }
7121 }
7122 if (rq->donor == prev_donor && prev != next) {
7123 struct task_struct *donor = rq->donor;
7124 /*
7125 * When transitioning like:
7126 *
7127 * prev next
7128 * donor: B B
7129 * curr: A B or C
7130 *
7131 * then put_prev_set_next_task() will not have done
7132 * anything, since B == B. However, A might have
7133 * missed a RT/DL balance opportunity due to being
7134 * on_cpu.
7135 */
7136 donor->sched_class->put_prev_task(rq, donor, donor);
7137 donor->sched_class->set_next_task(rq, donor, true);
7138 }
7139 } else {
7140 rq_set_donor(rq, next);
7141 }
7142
7143 picked:
7144 clear_tsk_need_resched(prev);
7145 clear_preempt_need_resched();
7146 keep_resched:
7147 rq->last_seen_need_resched_ns = 0;
7148
7149 is_switch = prev != next;
7150 if (likely(is_switch)) {
7151 rq->nr_switches++;
7152 /*
7153 * RCU users of rcu_dereference(rq->curr) may not see
7154 * changes to task_struct made by pick_next_task().
7155 */
7156 RCU_INIT_POINTER(rq->curr, next);
7157
7158 /*
7159 * The membarrier system call requires each architecture
7160 * to have a full memory barrier after updating
7161 * rq->curr, before returning to user-space.
7162 *
7163 * Here are the schemes providing that barrier on the
7164 * various architectures:
7165 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
7166 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm()
7167 * on PowerPC and on RISC-V.
7168 * - finish_lock_switch() for weakly-ordered
7169 * architectures where spin_unlock is a full barrier,
7170 * - switch_to() for arm64 (weakly-ordered, spin_unlock
7171 * is a RELEASE barrier),
7172 *
7173 * The barrier matches a full barrier in the proximity of
7174 * the membarrier system call entry.
7175 *
7176 * On RISC-V, this barrier pairing is also needed for the
7177 * SYNC_CORE command when switching between processes, cf.
7178 * the inline comments in membarrier_arch_switch_mm().
7179 */
7180 ++*switch_count;
7181
7182 psi_account_irqtime(rq, prev, next);
7183 psi_sched_switch(prev, next, !task_on_rq_queued(prev) ||
7184 prev->se.sched_delayed);
7185
7186 trace_sched_switch(preempt, prev, next, prev_state);
7187
7188 /* Also unlocks the rq: */
7189 rq = context_switch(rq, prev, next, &rf);
7190 } else {
7191 rq_unpin_lock(rq, &rf);
7192 __balance_callbacks(rq, NULL);
7193 hrtick_schedule_exit(rq);
7194 raw_spin_rq_unlock_irq(rq);
7195 }
7196 trace_sched_exit_tp(is_switch);
7197 }
7198
do_task_dead(void)7199 void __noreturn do_task_dead(void)
7200 {
7201 /* Causes final put_task_struct in finish_task_switch(): */
7202 set_special_state(TASK_DEAD);
7203
7204 /* Tell freezer to ignore us: */
7205 current->flags |= PF_NOFREEZE;
7206
7207 __schedule(SM_NONE);
7208 BUG();
7209
7210 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
7211 for (;;)
7212 cpu_relax();
7213 }
7214
sched_submit_work(struct task_struct * tsk)7215 static inline void sched_submit_work(struct task_struct *tsk)
7216 {
7217 static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
7218 unsigned int task_flags;
7219
7220 /*
7221 * Establish LD_WAIT_CONFIG context to ensure none of the code called
7222 * will use a blocking primitive -- which would lead to recursion.
7223 */
7224 lock_map_acquire_try(&sched_map);
7225
7226 task_flags = tsk->flags;
7227 /*
7228 * If a worker goes to sleep, notify and ask workqueue whether it
7229 * wants to wake up a task to maintain concurrency.
7230 */
7231 if (task_flags & PF_WQ_WORKER)
7232 wq_worker_sleeping(tsk);
7233 else if (task_flags & PF_IO_WORKER)
7234 io_wq_worker_sleeping(tsk);
7235
7236 /*
7237 * spinlock and rwlock must not flush block requests. This will
7238 * deadlock if the callback attempts to acquire a lock which is
7239 * already acquired.
7240 */
7241 WARN_ON_ONCE(current->__state & TASK_RTLOCK_WAIT);
7242
7243 /*
7244 * If we are going to sleep and we have plugged IO queued,
7245 * make sure to submit it to avoid deadlocks.
7246 */
7247 blk_flush_plug(tsk->plug, true);
7248
7249 lock_map_release(&sched_map);
7250 }
7251
sched_update_worker(struct task_struct * tsk)7252 static void sched_update_worker(struct task_struct *tsk)
7253 {
7254 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
7255 if (tsk->flags & PF_BLOCK_TS)
7256 blk_plug_invalidate_ts(tsk);
7257 if (tsk->flags & PF_WQ_WORKER)
7258 wq_worker_running(tsk);
7259 else if (tsk->flags & PF_IO_WORKER)
7260 io_wq_worker_running(tsk);
7261 }
7262 }
7263
__schedule_loop(int sched_mode)7264 static __always_inline void __schedule_loop(int sched_mode)
7265 {
7266 do {
7267 preempt_disable();
7268 __schedule(sched_mode);
7269 sched_preempt_enable_no_resched();
7270 } while (need_resched());
7271 }
7272
schedule(void)7273 asmlinkage __visible void __sched schedule(void)
7274 {
7275 struct task_struct *tsk = current;
7276
7277 #ifdef CONFIG_RT_MUTEXES
7278 lockdep_assert(!tsk->sched_rt_mutex);
7279 #endif
7280
7281 if (!task_is_running(tsk))
7282 sched_submit_work(tsk);
7283 __schedule_loop(SM_NONE);
7284 sched_update_worker(tsk);
7285 }
7286 EXPORT_SYMBOL(schedule);
7287
7288 /*
7289 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
7290 * state (have scheduled out non-voluntarily) by making sure that all
7291 * tasks have either left the run queue or have gone into user space.
7292 * As idle tasks do not do either, they must not ever be preempted
7293 * (schedule out non-voluntarily).
7294 *
7295 * schedule_idle() is similar to schedule_preempt_disable() except that it
7296 * never enables preemption because it does not call sched_submit_work().
7297 */
schedule_idle(void)7298 void __sched schedule_idle(void)
7299 {
7300 /*
7301 * As this skips calling sched_submit_work(), which the idle task does
7302 * regardless because that function is a NOP when the task is in a
7303 * TASK_RUNNING state, make sure this isn't used someplace that the
7304 * current task can be in any other state. Note, idle is always in the
7305 * TASK_RUNNING state.
7306 */
7307 WARN_ON_ONCE(current->__state);
7308 do {
7309 __schedule(SM_IDLE);
7310 } while (need_resched());
7311 }
7312
7313 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
schedule_user(void)7314 asmlinkage __visible void __sched schedule_user(void)
7315 {
7316 /*
7317 * If we come here after a random call to set_need_resched(),
7318 * or we have been woken up remotely but the IPI has not yet arrived,
7319 * we haven't yet exited the RCU idle mode. Do it here manually until
7320 * we find a better solution.
7321 *
7322 * NB: There are buggy callers of this function. Ideally we
7323 * should warn if prev_state != CT_STATE_USER, but that will trigger
7324 * too frequently to make sense yet.
7325 */
7326 enum ctx_state prev_state = exception_enter();
7327 schedule();
7328 exception_exit(prev_state);
7329 }
7330 #endif
7331
7332 /**
7333 * schedule_preempt_disabled - called with preemption disabled
7334 *
7335 * Returns with preemption disabled. Note: preempt_count must be 1
7336 */
schedule_preempt_disabled(void)7337 void __sched schedule_preempt_disabled(void)
7338 {
7339 sched_preempt_enable_no_resched();
7340 schedule();
7341 preempt_disable();
7342 }
7343
7344 #ifdef CONFIG_PREEMPT_RT
schedule_rtlock(void)7345 void __sched notrace schedule_rtlock(void)
7346 {
7347 __schedule_loop(SM_RTLOCK_WAIT);
7348 }
7349 NOKPROBE_SYMBOL(schedule_rtlock);
7350 #endif
7351
preempt_schedule_common(void)7352 static void __sched notrace preempt_schedule_common(void)
7353 {
7354 do {
7355 /*
7356 * Because the function tracer can trace preempt_count_sub()
7357 * and it also uses preempt_enable/disable_notrace(), if
7358 * NEED_RESCHED is set, the preempt_enable_notrace() called
7359 * by the function tracer will call this function again and
7360 * cause infinite recursion.
7361 *
7362 * Preemption must be disabled here before the function
7363 * tracer can trace. Break up preempt_disable() into two
7364 * calls. One to disable preemption without fear of being
7365 * traced. The other to still record the preemption latency,
7366 * which can also be traced by the function tracer.
7367 */
7368 preempt_disable_notrace();
7369 preempt_latency_start(1);
7370 __schedule(SM_PREEMPT);
7371 preempt_latency_stop(1);
7372 preempt_enable_no_resched_notrace();
7373
7374 /*
7375 * Check again in case we missed a preemption opportunity
7376 * between schedule and now.
7377 */
7378 } while (need_resched());
7379 }
7380
7381 #ifdef CONFIG_PREEMPTION
7382 /*
7383 * This is the entry point to schedule() from in-kernel preemption
7384 * off of preempt_enable.
7385 */
preempt_schedule(void)7386 asmlinkage __visible void __sched notrace preempt_schedule(void)
7387 {
7388 /*
7389 * If there is a non-zero preempt_count or interrupts are disabled,
7390 * we do not want to preempt the current task. Just return..
7391 */
7392 if (likely(!preemptible()))
7393 return;
7394 preempt_schedule_common();
7395 }
7396 NOKPROBE_SYMBOL(preempt_schedule);
7397 EXPORT_SYMBOL(preempt_schedule);
7398
7399 #ifdef CONFIG_PREEMPT_DYNAMIC
7400 # ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7401 # ifndef preempt_schedule_dynamic_enabled
7402 # define preempt_schedule_dynamic_enabled preempt_schedule
7403 # define preempt_schedule_dynamic_disabled NULL
7404 # endif
7405 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
7406 EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
7407 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7408 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
dynamic_preempt_schedule(void)7409 void __sched notrace dynamic_preempt_schedule(void)
7410 {
7411 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
7412 return;
7413 preempt_schedule();
7414 }
7415 NOKPROBE_SYMBOL(dynamic_preempt_schedule);
7416 EXPORT_SYMBOL(dynamic_preempt_schedule);
7417 # endif
7418 #endif /* CONFIG_PREEMPT_DYNAMIC */
7419
7420 /**
7421 * preempt_schedule_notrace - preempt_schedule called by tracing
7422 *
7423 * The tracing infrastructure uses preempt_enable_notrace to prevent
7424 * recursion and tracing preempt enabling caused by the tracing
7425 * infrastructure itself. But as tracing can happen in areas coming
7426 * from userspace or just about to enter userspace, a preempt enable
7427 * can occur before user_exit() is called. This will cause the scheduler
7428 * to be called when the system is still in usermode.
7429 *
7430 * To prevent this, the preempt_enable_notrace will use this function
7431 * instead of preempt_schedule() to exit user context if needed before
7432 * calling the scheduler.
7433 */
preempt_schedule_notrace(void)7434 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
7435 {
7436 enum ctx_state prev_ctx;
7437
7438 if (likely(!preemptible()))
7439 return;
7440
7441 do {
7442 /*
7443 * Because the function tracer can trace preempt_count_sub()
7444 * and it also uses preempt_enable/disable_notrace(), if
7445 * NEED_RESCHED is set, the preempt_enable_notrace() called
7446 * by the function tracer will call this function again and
7447 * cause infinite recursion.
7448 *
7449 * Preemption must be disabled here before the function
7450 * tracer can trace. Break up preempt_disable() into two
7451 * calls. One to disable preemption without fear of being
7452 * traced. The other to still record the preemption latency,
7453 * which can also be traced by the function tracer.
7454 */
7455 preempt_disable_notrace();
7456 preempt_latency_start(1);
7457 /*
7458 * Needs preempt disabled in case user_exit() is traced
7459 * and the tracer calls preempt_enable_notrace() causing
7460 * an infinite recursion.
7461 */
7462 prev_ctx = exception_enter();
7463 __schedule(SM_PREEMPT);
7464 exception_exit(prev_ctx);
7465
7466 preempt_latency_stop(1);
7467 preempt_enable_no_resched_notrace();
7468 } while (need_resched());
7469 }
7470 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
7471
7472 #ifdef CONFIG_PREEMPT_DYNAMIC
7473 # if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7474 # ifndef preempt_schedule_notrace_dynamic_enabled
7475 # define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
7476 # define preempt_schedule_notrace_dynamic_disabled NULL
7477 # endif
7478 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
7479 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
7480 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7481 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
dynamic_preempt_schedule_notrace(void)7482 void __sched notrace dynamic_preempt_schedule_notrace(void)
7483 {
7484 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
7485 return;
7486 preempt_schedule_notrace();
7487 }
7488 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
7489 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
7490 # endif
7491 #endif
7492
7493 #endif /* CONFIG_PREEMPTION */
7494
7495 /*
7496 * This is the entry point to schedule() from kernel preemption
7497 * off of IRQ context.
7498 * Note, that this is called and return with IRQs disabled. This will
7499 * protect us against recursive calling from IRQ contexts.
7500 */
preempt_schedule_irq(void)7501 asmlinkage __visible void __sched preempt_schedule_irq(void)
7502 {
7503 enum ctx_state prev_state;
7504
7505 /* Catch callers which need to be fixed */
7506 BUG_ON(preempt_count() || !irqs_disabled());
7507
7508 prev_state = exception_enter();
7509
7510 do {
7511 preempt_disable();
7512 local_irq_enable();
7513 __schedule(SM_PREEMPT);
7514 local_irq_disable();
7515 sched_preempt_enable_no_resched();
7516 } while (need_resched());
7517
7518 exception_exit(prev_state);
7519 }
7520
default_wake_function(wait_queue_entry_t * curr,unsigned mode,int wake_flags,void * key)7521 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
7522 void *key)
7523 {
7524 WARN_ON_ONCE(wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
7525 return try_to_wake_up(curr->private, mode, wake_flags);
7526 }
7527 EXPORT_SYMBOL(default_wake_function);
7528
__setscheduler_class(int policy,int prio)7529 const struct sched_class *__setscheduler_class(int policy, int prio)
7530 {
7531 if (dl_prio(prio))
7532 return &dl_sched_class;
7533
7534 if (rt_prio(prio))
7535 return &rt_sched_class;
7536
7537 #ifdef CONFIG_SCHED_CLASS_EXT
7538 if (task_should_scx(policy))
7539 return &ext_sched_class;
7540 #endif
7541
7542 return &fair_sched_class;
7543 }
7544
7545 #ifdef CONFIG_RT_MUTEXES
7546
7547 /*
7548 * Would be more useful with typeof()/auto_type but they don't mix with
7549 * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7550 * name such that if someone were to implement this function we get to compare
7551 * notes.
7552 */
7553 #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
7554
rt_mutex_pre_schedule(void)7555 void rt_mutex_pre_schedule(void)
7556 {
7557 lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
7558 sched_submit_work(current);
7559 }
7560
rt_mutex_schedule(void)7561 void rt_mutex_schedule(void)
7562 {
7563 lockdep_assert(current->sched_rt_mutex);
7564 __schedule_loop(SM_NONE);
7565 }
7566
rt_mutex_post_schedule(void)7567 void rt_mutex_post_schedule(void)
7568 {
7569 sched_update_worker(current);
7570 lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
7571 }
7572
7573 /*
7574 * rt_mutex_setprio - set the current priority of a task
7575 * @p: task to boost
7576 * @pi_task: donor task
7577 *
7578 * This function changes the 'effective' priority of a task. It does
7579 * not touch ->normal_prio like __setscheduler().
7580 *
7581 * Used by the rt_mutex code to implement priority inheritance
7582 * logic. Call site only calls if the priority of the task changed.
7583 */
rt_mutex_setprio(struct task_struct * p,struct task_struct * pi_task)7584 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
7585 {
7586 int prio, oldprio, queue_flag =
7587 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7588 const struct sched_class *prev_class, *next_class;
7589 struct rq_flags rf;
7590 struct rq *rq;
7591
7592 /* XXX used to be waiter->prio, not waiter->task->prio */
7593 prio = __rt_effective_prio(pi_task, p->normal_prio);
7594
7595 /*
7596 * If nothing changed; bail early.
7597 */
7598 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
7599 return;
7600
7601 rq = __task_rq_lock(p, &rf);
7602 update_rq_clock(rq);
7603 /*
7604 * Set under pi_lock && rq->lock, such that the value can be used under
7605 * either lock.
7606 *
7607 * Note that there is loads of tricky to make this pointer cache work
7608 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7609 * ensure a task is de-boosted (pi_task is set to NULL) before the
7610 * task is allowed to run again (and can exit). This ensures the pointer
7611 * points to a blocked task -- which guarantees the task is present.
7612 */
7613 p->pi_top_task = pi_task;
7614
7615 /*
7616 * For FIFO/RR we only need to set prio, if that matches we're done.
7617 */
7618 if (prio == p->prio && !dl_prio(prio))
7619 goto out_unlock;
7620
7621 /*
7622 * Idle task boosting is a no-no in general. There is one
7623 * exception, when PREEMPT_RT and NOHZ is active:
7624 *
7625 * The idle task calls get_next_timer_interrupt() and holds
7626 * the timer wheel base->lock on the CPU and another CPU wants
7627 * to access the timer (probably to cancel it). We can safely
7628 * ignore the boosting request, as the idle CPU runs this code
7629 * with interrupts disabled and will complete the lock
7630 * protected section without being interrupted. So there is no
7631 * real need to boost.
7632 */
7633 if (unlikely(p == rq->idle)) {
7634 WARN_ON(p != rq->curr);
7635 WARN_ON(p->pi_blocked_on);
7636 goto out_unlock;
7637 }
7638
7639 trace_sched_pi_setprio(p, pi_task);
7640 oldprio = p->prio;
7641
7642 if (oldprio == prio && !dl_prio(prio))
7643 queue_flag &= ~DEQUEUE_MOVE;
7644
7645 prev_class = p->sched_class;
7646 next_class = __setscheduler_class(p->policy, prio);
7647
7648 if (prev_class != next_class)
7649 queue_flag |= DEQUEUE_CLASS;
7650
7651 scoped_guard (sched_change, p, queue_flag) {
7652 /*
7653 * Boosting condition are:
7654 * 1. -rt task is running and holds mutex A
7655 * --> -dl task blocks on mutex A
7656 *
7657 * 2. -dl task is running and holds mutex A
7658 * --> -dl task blocks on mutex A and could preempt the
7659 * running task
7660 */
7661 if (dl_prio(prio)) {
7662 if (!dl_prio(p->normal_prio) ||
7663 (pi_task && dl_prio(pi_task->prio) &&
7664 dl_entity_preempt(&pi_task->dl, &p->dl))) {
7665 p->dl.pi_se = pi_task->dl.pi_se;
7666 scope->flags |= ENQUEUE_REPLENISH;
7667 } else {
7668 p->dl.pi_se = &p->dl;
7669 }
7670 } else if (rt_prio(prio)) {
7671 if (dl_prio(oldprio))
7672 p->dl.pi_se = &p->dl;
7673 if (oldprio < prio)
7674 scope->flags |= ENQUEUE_HEAD;
7675 } else {
7676 if (dl_prio(oldprio))
7677 p->dl.pi_se = &p->dl;
7678 if (rt_prio(oldprio))
7679 p->rt.timeout = 0;
7680 }
7681
7682 p->sched_class = next_class;
7683 p->prio = prio;
7684 }
7685 out_unlock:
7686 /* Caller holds task_struct::pi_lock, IRQs are still disabled */
7687
7688 __balance_callbacks(rq, &rf);
7689 __task_rq_unlock(rq, p, &rf);
7690 }
7691 #endif /* CONFIG_RT_MUTEXES */
7692
7693 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
__cond_resched(void)7694 int __sched __cond_resched(void)
7695 {
7696 if (should_resched(0) && !irqs_disabled()) {
7697 preempt_schedule_common();
7698 return 1;
7699 }
7700 /*
7701 * In PREEMPT_RCU kernels, ->rcu_read_lock_nesting tells the tick
7702 * whether the current CPU is in an RCU read-side critical section,
7703 * so the tick can report quiescent states even for CPUs looping
7704 * in kernel context. In contrast, in non-preemptible kernels,
7705 * RCU readers leave no in-memory hints, which means that CPU-bound
7706 * processes executing in kernel context might never report an
7707 * RCU quiescent state. Therefore, the following code causes
7708 * cond_resched() to report a quiescent state, but only when RCU
7709 * is in urgent need of one.
7710 * A third case, preemptible, but non-PREEMPT_RCU provides for
7711 * urgently needed quiescent states via rcu_flavor_sched_clock_irq().
7712 */
7713 #ifndef CONFIG_PREEMPT_RCU
7714 rcu_all_qs();
7715 #endif
7716 return 0;
7717 }
7718 EXPORT_SYMBOL(__cond_resched);
7719 #endif
7720
7721 #ifdef CONFIG_PREEMPT_DYNAMIC
7722 # ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7723 # define cond_resched_dynamic_enabled __cond_resched
7724 # define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
7725 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
7726 EXPORT_STATIC_CALL_TRAMP(cond_resched);
7727
7728 # define might_resched_dynamic_enabled __cond_resched
7729 # define might_resched_dynamic_disabled ((void *)&__static_call_return0)
7730 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
7731 EXPORT_STATIC_CALL_TRAMP(might_resched);
7732 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7733 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
dynamic_cond_resched(void)7734 int __sched dynamic_cond_resched(void)
7735 {
7736 if (!static_branch_unlikely(&sk_dynamic_cond_resched))
7737 return 0;
7738 return __cond_resched();
7739 }
7740 EXPORT_SYMBOL(dynamic_cond_resched);
7741
7742 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
dynamic_might_resched(void)7743 int __sched dynamic_might_resched(void)
7744 {
7745 if (!static_branch_unlikely(&sk_dynamic_might_resched))
7746 return 0;
7747 return __cond_resched();
7748 }
7749 EXPORT_SYMBOL(dynamic_might_resched);
7750 # endif
7751 #endif /* CONFIG_PREEMPT_DYNAMIC */
7752
7753 /*
7754 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7755 * call schedule, and on return reacquire the lock.
7756 *
7757 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7758 * operations here to prevent schedule() from being called twice (once via
7759 * spin_unlock(), once by hand).
7760 */
__cond_resched_lock(spinlock_t * lock)7761 int __cond_resched_lock(spinlock_t *lock)
7762 {
7763 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7764 int ret = 0;
7765
7766 lockdep_assert_held(lock);
7767
7768 if (spin_needbreak(lock) || resched) {
7769 spin_unlock(lock);
7770 if (!_cond_resched())
7771 cpu_relax();
7772 ret = 1;
7773 spin_lock(lock);
7774 }
7775 return ret;
7776 }
7777 EXPORT_SYMBOL(__cond_resched_lock);
7778
__cond_resched_rwlock_read(rwlock_t * lock)7779 int __cond_resched_rwlock_read(rwlock_t *lock)
7780 {
7781 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7782 int ret = 0;
7783
7784 lockdep_assert_held_read(lock);
7785
7786 if (rwlock_needbreak(lock) || resched) {
7787 read_unlock(lock);
7788 if (!_cond_resched())
7789 cpu_relax();
7790 ret = 1;
7791 read_lock(lock);
7792 }
7793 return ret;
7794 }
7795 EXPORT_SYMBOL(__cond_resched_rwlock_read);
7796
__cond_resched_rwlock_write(rwlock_t * lock)7797 int __cond_resched_rwlock_write(rwlock_t *lock)
7798 {
7799 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7800 int ret = 0;
7801
7802 lockdep_assert_held_write(lock);
7803
7804 if (rwlock_needbreak(lock) || resched) {
7805 write_unlock(lock);
7806 if (!_cond_resched())
7807 cpu_relax();
7808 ret = 1;
7809 write_lock(lock);
7810 }
7811 return ret;
7812 }
7813 EXPORT_SYMBOL(__cond_resched_rwlock_write);
7814
7815 #ifdef CONFIG_PREEMPT_DYNAMIC
7816
7817 # ifdef CONFIG_GENERIC_IRQ_ENTRY
7818 # include <linux/irq-entry-common.h>
7819 # endif
7820
7821 /*
7822 * SC:cond_resched
7823 * SC:might_resched
7824 * SC:preempt_schedule
7825 * SC:preempt_schedule_notrace
7826 * SC:irqentry_exit_cond_resched
7827 *
7828 *
7829 * NONE:
7830 * cond_resched <- __cond_resched
7831 * might_resched <- RET0
7832 * preempt_schedule <- NOP
7833 * preempt_schedule_notrace <- NOP
7834 * irqentry_exit_cond_resched <- NOP
7835 * dynamic_preempt_lazy <- false
7836 *
7837 * VOLUNTARY:
7838 * cond_resched <- __cond_resched
7839 * might_resched <- __cond_resched
7840 * preempt_schedule <- NOP
7841 * preempt_schedule_notrace <- NOP
7842 * irqentry_exit_cond_resched <- NOP
7843 * dynamic_preempt_lazy <- false
7844 *
7845 * FULL:
7846 * cond_resched <- RET0
7847 * might_resched <- RET0
7848 * preempt_schedule <- preempt_schedule
7849 * preempt_schedule_notrace <- preempt_schedule_notrace
7850 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7851 * dynamic_preempt_lazy <- false
7852 *
7853 * LAZY:
7854 * cond_resched <- RET0
7855 * might_resched <- RET0
7856 * preempt_schedule <- preempt_schedule
7857 * preempt_schedule_notrace <- preempt_schedule_notrace
7858 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7859 * dynamic_preempt_lazy <- true
7860 */
7861
7862 enum {
7863 preempt_dynamic_undefined = -1,
7864 preempt_dynamic_none,
7865 preempt_dynamic_voluntary,
7866 preempt_dynamic_full,
7867 preempt_dynamic_lazy,
7868 };
7869
7870 int preempt_dynamic_mode = preempt_dynamic_undefined;
7871
sched_dynamic_mode(const char * str)7872 int sched_dynamic_mode(const char *str)
7873 {
7874 # if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
7875 if (!strcmp(str, "none"))
7876 return preempt_dynamic_none;
7877
7878 if (!strcmp(str, "voluntary"))
7879 return preempt_dynamic_voluntary;
7880 # endif
7881
7882 if (!strcmp(str, "full"))
7883 return preempt_dynamic_full;
7884
7885 # ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
7886 if (!strcmp(str, "lazy"))
7887 return preempt_dynamic_lazy;
7888 # endif
7889
7890 return -EINVAL;
7891 }
7892
7893 # define preempt_dynamic_key_enable(f) static_key_enable(&sk_dynamic_##f.key)
7894 # define preempt_dynamic_key_disable(f) static_key_disable(&sk_dynamic_##f.key)
7895
7896 # if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7897 # define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
7898 # define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
7899 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7900 # define preempt_dynamic_enable(f) preempt_dynamic_key_enable(f)
7901 # define preempt_dynamic_disable(f) preempt_dynamic_key_disable(f)
7902 # else
7903 # error "Unsupported PREEMPT_DYNAMIC mechanism"
7904 # endif
7905
7906 static DEFINE_MUTEX(sched_dynamic_mutex);
7907
__sched_dynamic_update(int mode)7908 static void __sched_dynamic_update(int mode)
7909 {
7910 /*
7911 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
7912 * the ZERO state, which is invalid.
7913 */
7914 preempt_dynamic_enable(cond_resched);
7915 preempt_dynamic_enable(might_resched);
7916 preempt_dynamic_enable(preempt_schedule);
7917 preempt_dynamic_enable(preempt_schedule_notrace);
7918 preempt_dynamic_enable(irqentry_exit_cond_resched);
7919 preempt_dynamic_key_disable(preempt_lazy);
7920
7921 switch (mode) {
7922 case preempt_dynamic_none:
7923 preempt_dynamic_enable(cond_resched);
7924 preempt_dynamic_disable(might_resched);
7925 preempt_dynamic_disable(preempt_schedule);
7926 preempt_dynamic_disable(preempt_schedule_notrace);
7927 preempt_dynamic_disable(irqentry_exit_cond_resched);
7928 preempt_dynamic_key_disable(preempt_lazy);
7929 if (mode != preempt_dynamic_mode)
7930 pr_info("Dynamic Preempt: none\n");
7931 break;
7932
7933 case preempt_dynamic_voluntary:
7934 preempt_dynamic_enable(cond_resched);
7935 preempt_dynamic_enable(might_resched);
7936 preempt_dynamic_disable(preempt_schedule);
7937 preempt_dynamic_disable(preempt_schedule_notrace);
7938 preempt_dynamic_disable(irqentry_exit_cond_resched);
7939 preempt_dynamic_key_disable(preempt_lazy);
7940 if (mode != preempt_dynamic_mode)
7941 pr_info("Dynamic Preempt: voluntary\n");
7942 break;
7943
7944 case preempt_dynamic_full:
7945 preempt_dynamic_disable(cond_resched);
7946 preempt_dynamic_disable(might_resched);
7947 preempt_dynamic_enable(preempt_schedule);
7948 preempt_dynamic_enable(preempt_schedule_notrace);
7949 preempt_dynamic_enable(irqentry_exit_cond_resched);
7950 preempt_dynamic_key_disable(preempt_lazy);
7951 if (mode != preempt_dynamic_mode)
7952 pr_info("Dynamic Preempt: full\n");
7953 break;
7954
7955 case preempt_dynamic_lazy:
7956 preempt_dynamic_disable(cond_resched);
7957 preempt_dynamic_disable(might_resched);
7958 preempt_dynamic_enable(preempt_schedule);
7959 preempt_dynamic_enable(preempt_schedule_notrace);
7960 preempt_dynamic_enable(irqentry_exit_cond_resched);
7961 preempt_dynamic_key_enable(preempt_lazy);
7962 if (mode != preempt_dynamic_mode)
7963 pr_info("Dynamic Preempt: lazy\n");
7964 break;
7965 }
7966
7967 preempt_dynamic_mode = mode;
7968 }
7969
sched_dynamic_update(int mode)7970 void sched_dynamic_update(int mode)
7971 {
7972 mutex_lock(&sched_dynamic_mutex);
7973 __sched_dynamic_update(mode);
7974 mutex_unlock(&sched_dynamic_mutex);
7975 }
7976
setup_preempt_mode(char * str)7977 static int __init setup_preempt_mode(char *str)
7978 {
7979 int mode = sched_dynamic_mode(str);
7980 if (mode < 0) {
7981 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
7982 return 0;
7983 }
7984
7985 sched_dynamic_update(mode);
7986 return 1;
7987 }
7988 __setup("preempt=", setup_preempt_mode);
7989
preempt_dynamic_init(void)7990 static void __init preempt_dynamic_init(void)
7991 {
7992 if (preempt_dynamic_mode == preempt_dynamic_undefined) {
7993 if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
7994 sched_dynamic_update(preempt_dynamic_none);
7995 } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
7996 sched_dynamic_update(preempt_dynamic_voluntary);
7997 } else if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) {
7998 sched_dynamic_update(preempt_dynamic_lazy);
7999 } else {
8000 /* Default static call setting, nothing to do */
8001 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
8002 preempt_dynamic_mode = preempt_dynamic_full;
8003 pr_info("Dynamic Preempt: full\n");
8004 }
8005 }
8006 }
8007
8008 # define PREEMPT_MODEL_ACCESSOR(mode) \
8009 bool preempt_model_##mode(void) \
8010 { \
8011 WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
8012 return preempt_dynamic_mode == preempt_dynamic_##mode; \
8013 } \
8014 EXPORT_SYMBOL_GPL(preempt_model_##mode)
8015
8016 PREEMPT_MODEL_ACCESSOR(none);
8017 PREEMPT_MODEL_ACCESSOR(voluntary);
8018 PREEMPT_MODEL_ACCESSOR(full);
8019 PREEMPT_MODEL_ACCESSOR(lazy);
8020
8021 #else /* !CONFIG_PREEMPT_DYNAMIC: */
8022
8023 #define preempt_dynamic_mode -1
8024
preempt_dynamic_init(void)8025 static inline void preempt_dynamic_init(void) { }
8026
8027 #endif /* CONFIG_PREEMPT_DYNAMIC */
8028
8029 const char *preempt_modes[] = {
8030 "none", "voluntary", "full", "lazy", NULL,
8031 };
8032
preempt_model_str(void)8033 const char *preempt_model_str(void)
8034 {
8035 bool brace = IS_ENABLED(CONFIG_PREEMPT_RT) &&
8036 (IS_ENABLED(CONFIG_PREEMPT_DYNAMIC) ||
8037 IS_ENABLED(CONFIG_PREEMPT_LAZY));
8038 static char buf[128];
8039
8040 if (IS_ENABLED(CONFIG_PREEMPT_BUILD)) {
8041 struct seq_buf s;
8042
8043 seq_buf_init(&s, buf, sizeof(buf));
8044 seq_buf_puts(&s, "PREEMPT");
8045
8046 if (IS_ENABLED(CONFIG_PREEMPT_RT))
8047 seq_buf_printf(&s, "%sRT%s",
8048 brace ? "_{" : "_",
8049 brace ? "," : "");
8050
8051 if (IS_ENABLED(CONFIG_PREEMPT_DYNAMIC)) {
8052 seq_buf_printf(&s, "(%s)%s",
8053 preempt_dynamic_mode >= 0 ?
8054 preempt_modes[preempt_dynamic_mode] : "undef",
8055 brace ? "}" : "");
8056 return seq_buf_str(&s);
8057 }
8058
8059 if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) {
8060 seq_buf_printf(&s, "LAZY%s",
8061 brace ? "}" : "");
8062 return seq_buf_str(&s);
8063 }
8064
8065 return seq_buf_str(&s);
8066 }
8067
8068 if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY_BUILD))
8069 return "VOLUNTARY";
8070
8071 return "NONE";
8072 }
8073
io_schedule_prepare(void)8074 int io_schedule_prepare(void)
8075 {
8076 int old_iowait = current->in_iowait;
8077
8078 current->in_iowait = 1;
8079 blk_flush_plug(current->plug, true);
8080 return old_iowait;
8081 }
8082
io_schedule_finish(int token)8083 void io_schedule_finish(int token)
8084 {
8085 current->in_iowait = token;
8086 }
8087
8088 /*
8089 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
8090 * that process accounting knows that this is a task in IO wait state.
8091 */
io_schedule_timeout(long timeout)8092 long __sched io_schedule_timeout(long timeout)
8093 {
8094 int token;
8095 long ret;
8096
8097 token = io_schedule_prepare();
8098 ret = schedule_timeout(timeout);
8099 io_schedule_finish(token);
8100
8101 return ret;
8102 }
8103 EXPORT_SYMBOL(io_schedule_timeout);
8104
io_schedule(void)8105 void __sched io_schedule(void)
8106 {
8107 int token;
8108
8109 token = io_schedule_prepare();
8110 schedule();
8111 io_schedule_finish(token);
8112 }
8113 EXPORT_SYMBOL(io_schedule);
8114
sched_show_task(struct task_struct * p)8115 void sched_show_task(struct task_struct *p)
8116 {
8117 unsigned long free;
8118 int ppid;
8119
8120 if (!try_get_task_stack(p))
8121 return;
8122
8123 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
8124
8125 if (task_is_running(p))
8126 pr_cont(" running task ");
8127 free = stack_not_used(p);
8128 ppid = 0;
8129 rcu_read_lock();
8130 if (pid_alive(p))
8131 ppid = task_pid_nr(rcu_dereference(p->real_parent));
8132 rcu_read_unlock();
8133 pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d task_flags:0x%04x flags:0x%08lx\n",
8134 free, task_pid_nr(p), task_tgid_nr(p),
8135 ppid, p->flags, read_task_thread_flags(p));
8136
8137 print_worker_info(KERN_INFO, p);
8138 print_stop_info(KERN_INFO, p);
8139 print_scx_info(KERN_INFO, p);
8140 show_stack(p, NULL, KERN_INFO);
8141 put_task_stack(p);
8142 }
8143 EXPORT_SYMBOL_GPL(sched_show_task);
8144
8145 static inline bool
state_filter_match(unsigned long state_filter,struct task_struct * p)8146 state_filter_match(unsigned long state_filter, struct task_struct *p)
8147 {
8148 unsigned int state = READ_ONCE(p->__state);
8149
8150 /* no filter, everything matches */
8151 if (!state_filter)
8152 return true;
8153
8154 /* filter, but doesn't match */
8155 if (!(state & state_filter))
8156 return false;
8157
8158 /*
8159 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
8160 * TASK_KILLABLE).
8161 */
8162 if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
8163 return false;
8164
8165 return true;
8166 }
8167
8168
show_state_filter(unsigned int state_filter)8169 void show_state_filter(unsigned int state_filter)
8170 {
8171 struct task_struct *g, *p;
8172
8173 rcu_read_lock();
8174 for_each_process_thread(g, p) {
8175 /*
8176 * reset the NMI-timeout, listing all files on a slow
8177 * console might take a lot of time:
8178 * Also, reset softlockup watchdogs on all CPUs, because
8179 * another CPU might be blocked waiting for us to process
8180 * an IPI.
8181 */
8182 touch_nmi_watchdog();
8183 touch_all_softlockup_watchdogs();
8184 if (state_filter_match(state_filter, p))
8185 sched_show_task(p);
8186 }
8187
8188 if (!state_filter)
8189 sysrq_sched_debug_show();
8190
8191 rcu_read_unlock();
8192 /*
8193 * Only show locks if all tasks are dumped:
8194 */
8195 if (!state_filter)
8196 debug_show_all_locks();
8197 }
8198
8199 /**
8200 * init_idle - set up an idle thread for a given CPU
8201 * @idle: task in question
8202 * @cpu: CPU the idle task belongs to
8203 *
8204 * NOTE: this function does not set the idle thread's NEED_RESCHED
8205 * flag, to make booting more robust.
8206 */
init_idle(struct task_struct * idle,int cpu)8207 void __init init_idle(struct task_struct *idle, int cpu)
8208 {
8209 struct affinity_context ac = (struct affinity_context) {
8210 .new_mask = cpumask_of(cpu),
8211 .flags = 0,
8212 };
8213 struct rq *rq = cpu_rq(cpu);
8214 unsigned long flags;
8215
8216 raw_spin_lock_irqsave(&idle->pi_lock, flags);
8217 raw_spin_rq_lock(rq);
8218
8219 idle->__state = TASK_RUNNING;
8220 idle->se.exec_start = sched_clock();
8221 /*
8222 * PF_KTHREAD should already be set at this point; regardless, make it
8223 * look like a proper per-CPU kthread.
8224 */
8225 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
8226 kthread_set_per_cpu(idle, cpu);
8227
8228 /*
8229 * No validation and serialization required at boot time and for
8230 * setting up the idle tasks of not yet online CPUs.
8231 */
8232 set_cpus_allowed_common(idle, &ac);
8233 /*
8234 * We're having a chicken and egg problem, even though we are
8235 * holding rq->lock, the CPU isn't yet set to this CPU so the
8236 * lockdep check in task_group() will fail.
8237 *
8238 * Similar case to sched_fork(). / Alternatively we could
8239 * use task_rq_lock() here and obtain the other rq->lock.
8240 *
8241 * Silence PROVE_RCU
8242 */
8243 rcu_read_lock();
8244 __set_task_cpu(idle, cpu);
8245 rcu_read_unlock();
8246
8247 rq->idle = idle;
8248 rq_set_donor(rq, idle);
8249 rcu_assign_pointer(rq->curr, idle);
8250 idle->on_rq = TASK_ON_RQ_QUEUED;
8251 idle->on_cpu = 1;
8252 raw_spin_rq_unlock(rq);
8253 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
8254
8255 /* Set the preempt count _outside_ the spinlocks! */
8256 init_idle_preempt_count(idle, cpu);
8257
8258 /*
8259 * The idle tasks have their own, simple scheduling class:
8260 */
8261 idle->sched_class = &idle_sched_class;
8262 ftrace_graph_init_idle_task(idle, cpu);
8263 vtime_init_idle(idle, cpu);
8264 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
8265 }
8266
cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)8267 int cpuset_cpumask_can_shrink(const struct cpumask *cur,
8268 const struct cpumask *trial)
8269 {
8270 int ret = 1;
8271
8272 if (cpumask_empty(cur))
8273 return ret;
8274
8275 ret = dl_cpuset_cpumask_can_shrink(cur, trial);
8276
8277 return ret;
8278 }
8279
task_can_attach(struct task_struct * p)8280 int task_can_attach(struct task_struct *p)
8281 {
8282 int ret = 0;
8283
8284 /*
8285 * Kthreads which disallow setaffinity shouldn't be moved
8286 * to a new cpuset; we don't want to change their CPU
8287 * affinity and isolating such threads by their set of
8288 * allowed nodes is unnecessary. Thus, cpusets are not
8289 * applicable for such threads. This prevents checking for
8290 * success of set_cpus_allowed_ptr() on all attached tasks
8291 * before cpus_mask may be changed.
8292 */
8293 if (p->flags & PF_NO_SETAFFINITY)
8294 ret = -EINVAL;
8295
8296 return ret;
8297 }
8298
8299 bool sched_smp_initialized __read_mostly;
8300
8301 #ifdef CONFIG_NUMA_BALANCING
8302 /* Migrate current task p to target_cpu */
migrate_task_to(struct task_struct * p,int target_cpu)8303 int migrate_task_to(struct task_struct *p, int target_cpu)
8304 {
8305 struct migration_arg arg = { p, target_cpu };
8306 int curr_cpu = task_cpu(p);
8307
8308 if (curr_cpu == target_cpu)
8309 return 0;
8310
8311 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
8312 return -EINVAL;
8313
8314 /* TODO: This is not properly updating schedstats */
8315
8316 trace_sched_move_numa(p, curr_cpu, target_cpu);
8317 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
8318 }
8319
8320 /*
8321 * Requeue a task on a given node and accurately track the number of NUMA
8322 * tasks on the runqueues
8323 */
sched_setnuma(struct task_struct * p,int nid)8324 void sched_setnuma(struct task_struct *p, int nid)
8325 {
8326 guard(task_rq_lock)(p);
8327 scoped_guard (sched_change, p, DEQUEUE_SAVE)
8328 p->numa_preferred_nid = nid;
8329 }
8330 #endif /* CONFIG_NUMA_BALANCING */
8331
8332 #ifdef CONFIG_HOTPLUG_CPU
8333 /*
8334 * Invoked on the outgoing CPU in context of the CPU hotplug thread
8335 * after ensuring that there are no user space tasks left on the CPU.
8336 *
8337 * If there is a lazy mm in use on the hotplug thread, drop it and
8338 * switch to init_mm.
8339 *
8340 * The reference count on init_mm is dropped in finish_cpu().
8341 */
sched_force_init_mm(void)8342 static void sched_force_init_mm(void)
8343 {
8344 struct mm_struct *mm = current->active_mm;
8345
8346 if (mm != &init_mm) {
8347 mmgrab_lazy_tlb(&init_mm);
8348 local_irq_disable();
8349 current->active_mm = &init_mm;
8350 switch_mm_irqs_off(mm, &init_mm, current);
8351 local_irq_enable();
8352 finish_arch_post_lock_switch();
8353 mmdrop_lazy_tlb(mm);
8354 }
8355
8356 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
8357 }
8358
__balance_push_cpu_stop(void * arg)8359 static int __balance_push_cpu_stop(void *arg)
8360 {
8361 struct task_struct *p = arg;
8362 struct rq *rq = this_rq();
8363 struct rq_flags rf;
8364 int cpu;
8365
8366 scoped_guard (raw_spinlock_irq, &p->pi_lock) {
8367 /*
8368 * We may change the underlying rq, but the locks held will
8369 * appropriately be "transferred" when switching.
8370 */
8371 context_unsafe_alias(rq);
8372
8373 cpu = select_fallback_rq(rq->cpu, p);
8374
8375 rq_lock(rq, &rf);
8376 update_rq_clock(rq);
8377 if (task_rq(p) == rq && task_on_rq_queued(p))
8378 rq = __migrate_task(rq, &rf, p, cpu);
8379 rq_unlock(rq, &rf);
8380 }
8381
8382 put_task_struct(p);
8383
8384 return 0;
8385 }
8386
8387 static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
8388
8389 /*
8390 * Ensure we only run per-cpu kthreads once the CPU goes !active.
8391 *
8392 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
8393 * effective when the hotplug motion is down.
8394 */
balance_push(struct rq * rq)8395 static void balance_push(struct rq *rq)
8396 __must_hold(__rq_lockp(rq))
8397 {
8398 struct task_struct *push_task = rq->curr;
8399
8400 lockdep_assert_rq_held(rq);
8401
8402 /*
8403 * Ensure the thing is persistent until balance_push_set(.on = false);
8404 */
8405 rq->balance_callback = &balance_push_callback;
8406
8407 /*
8408 * Only active while going offline and when invoked on the outgoing
8409 * CPU.
8410 */
8411 if (!cpu_dying(rq->cpu) || rq != this_rq())
8412 return;
8413
8414 /*
8415 * Both the cpu-hotplug and stop task are in this case and are
8416 * required to complete the hotplug process.
8417 */
8418 if (kthread_is_per_cpu(push_task) ||
8419 is_migration_disabled(push_task)) {
8420
8421 /*
8422 * If this is the idle task on the outgoing CPU try to wake
8423 * up the hotplug control thread which might wait for the
8424 * last task to vanish. The rcuwait_active() check is
8425 * accurate here because the waiter is pinned on this CPU
8426 * and can't obviously be running in parallel.
8427 *
8428 * On RT kernels this also has to check whether there are
8429 * pinned and scheduled out tasks on the runqueue. They
8430 * need to leave the migrate disabled section first.
8431 */
8432 if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
8433 rcuwait_active(&rq->hotplug_wait)) {
8434 raw_spin_rq_unlock(rq);
8435 rcuwait_wake_up(&rq->hotplug_wait);
8436 raw_spin_rq_lock(rq);
8437 }
8438 return;
8439 }
8440
8441 get_task_struct(push_task);
8442 /*
8443 * Temporarily drop rq->lock such that we can wake-up the stop task.
8444 * Both preemption and IRQs are still disabled.
8445 */
8446 preempt_disable();
8447 raw_spin_rq_unlock(rq);
8448 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
8449 this_cpu_ptr(&push_work));
8450 preempt_enable();
8451 /*
8452 * At this point need_resched() is true and we'll take the loop in
8453 * schedule(). The next pick is obviously going to be the stop task
8454 * which kthread_is_per_cpu() and will push this task away.
8455 */
8456 raw_spin_rq_lock(rq);
8457 }
8458
balance_push_set(int cpu,bool on)8459 static void balance_push_set(int cpu, bool on)
8460 {
8461 struct rq *rq = cpu_rq(cpu);
8462 struct rq_flags rf;
8463
8464 rq_lock_irqsave(rq, &rf);
8465 if (on) {
8466 WARN_ON_ONCE(rq->balance_callback);
8467 rq->balance_callback = &balance_push_callback;
8468 } else if (rq->balance_callback == &balance_push_callback) {
8469 rq->balance_callback = NULL;
8470 }
8471 rq_unlock_irqrestore(rq, &rf);
8472 }
8473
8474 /*
8475 * Invoked from a CPUs hotplug control thread after the CPU has been marked
8476 * inactive. All tasks which are not per CPU kernel threads are either
8477 * pushed off this CPU now via balance_push() or placed on a different CPU
8478 * during wakeup. Wait until the CPU is quiescent.
8479 */
balance_hotplug_wait(void)8480 static void balance_hotplug_wait(void)
8481 {
8482 struct rq *rq = this_rq();
8483
8484 rcuwait_wait_event(&rq->hotplug_wait,
8485 rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
8486 TASK_UNINTERRUPTIBLE);
8487 }
8488
8489 #else /* !CONFIG_HOTPLUG_CPU: */
8490
balance_push(struct rq * rq)8491 static inline void balance_push(struct rq *rq)
8492 {
8493 }
8494
balance_push_set(int cpu,bool on)8495 static inline void balance_push_set(int cpu, bool on)
8496 {
8497 }
8498
balance_hotplug_wait(void)8499 static inline void balance_hotplug_wait(void)
8500 {
8501 }
8502
8503 #endif /* !CONFIG_HOTPLUG_CPU */
8504
set_rq_online(struct rq * rq)8505 void set_rq_online(struct rq *rq)
8506 {
8507 if (!rq->online) {
8508 const struct sched_class *class;
8509
8510 cpumask_set_cpu(rq->cpu, rq->rd->online);
8511 rq->online = 1;
8512
8513 for_each_class(class) {
8514 if (class->rq_online)
8515 class->rq_online(rq);
8516 }
8517 }
8518 }
8519
set_rq_offline(struct rq * rq)8520 void set_rq_offline(struct rq *rq)
8521 {
8522 if (rq->online) {
8523 const struct sched_class *class;
8524
8525 update_rq_clock(rq);
8526 for_each_class(class) {
8527 if (class->rq_offline)
8528 class->rq_offline(rq);
8529 }
8530
8531 cpumask_clear_cpu(rq->cpu, rq->rd->online);
8532 rq->online = 0;
8533 }
8534 }
8535
sched_set_rq_online(struct rq * rq,int cpu)8536 static inline void sched_set_rq_online(struct rq *rq, int cpu)
8537 {
8538 struct rq_flags rf;
8539
8540 rq_lock_irqsave(rq, &rf);
8541 if (rq->rd) {
8542 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8543 set_rq_online(rq);
8544 }
8545 rq_unlock_irqrestore(rq, &rf);
8546 }
8547
sched_set_rq_offline(struct rq * rq,int cpu)8548 static inline void sched_set_rq_offline(struct rq *rq, int cpu)
8549 {
8550 struct rq_flags rf;
8551
8552 rq_lock_irqsave(rq, &rf);
8553 if (rq->rd) {
8554 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8555 set_rq_offline(rq);
8556 }
8557 rq_unlock_irqrestore(rq, &rf);
8558 }
8559
8560 /*
8561 * used to mark begin/end of suspend/resume:
8562 */
8563 static int num_cpus_frozen;
8564
8565 /*
8566 * Update cpusets according to cpu_active mask. If cpusets are
8567 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
8568 * around partition_sched_domains().
8569 *
8570 * If we come here as part of a suspend/resume, don't touch cpusets because we
8571 * want to restore it back to its original state upon resume anyway.
8572 */
cpuset_cpu_active(void)8573 static void cpuset_cpu_active(void)
8574 {
8575 if (cpuhp_tasks_frozen) {
8576 /*
8577 * num_cpus_frozen tracks how many CPUs are involved in suspend
8578 * resume sequence. As long as this is not the last online
8579 * operation in the resume sequence, just build a single sched
8580 * domain, ignoring cpusets.
8581 */
8582 cpuset_reset_sched_domains();
8583 if (--num_cpus_frozen)
8584 return;
8585 /*
8586 * This is the last CPU online operation. So fall through and
8587 * restore the original sched domains by considering the
8588 * cpuset configurations.
8589 */
8590 cpuset_force_rebuild();
8591 }
8592 cpuset_update_active_cpus();
8593 }
8594
cpuset_cpu_inactive(unsigned int cpu)8595 static void cpuset_cpu_inactive(unsigned int cpu)
8596 {
8597 if (!cpuhp_tasks_frozen) {
8598 cpuset_update_active_cpus();
8599 } else {
8600 num_cpus_frozen++;
8601 cpuset_reset_sched_domains();
8602 }
8603 }
8604
sched_smt_present_inc(int cpu)8605 static inline void sched_smt_present_inc(int cpu)
8606 {
8607 #ifdef CONFIG_SCHED_SMT
8608 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8609 static_branch_inc_cpuslocked(&sched_smt_present);
8610 #endif
8611 }
8612
sched_smt_present_dec(int cpu)8613 static inline void sched_smt_present_dec(int cpu)
8614 {
8615 #ifdef CONFIG_SCHED_SMT
8616 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8617 static_branch_dec_cpuslocked(&sched_smt_present);
8618 #endif
8619 }
8620
sched_cpu_activate(unsigned int cpu)8621 int sched_cpu_activate(unsigned int cpu)
8622 {
8623 struct rq *rq = cpu_rq(cpu);
8624
8625 /*
8626 * Clear the balance_push callback and prepare to schedule
8627 * regular tasks.
8628 */
8629 balance_push_set(cpu, false);
8630
8631 /*
8632 * When going up, increment the number of cores with SMT present.
8633 */
8634 sched_smt_present_inc(cpu);
8635 set_cpu_active(cpu, true);
8636
8637 if (sched_smp_initialized) {
8638 sched_update_numa(cpu, true);
8639 sched_domains_numa_masks_set(cpu);
8640 cpuset_cpu_active();
8641 }
8642
8643 scx_rq_activate(rq);
8644
8645 /*
8646 * Put the rq online, if not already. This happens:
8647 *
8648 * 1) In the early boot process, because we build the real domains
8649 * after all CPUs have been brought up.
8650 *
8651 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
8652 * domains.
8653 */
8654 sched_set_rq_online(rq, cpu);
8655
8656 return 0;
8657 }
8658
sched_cpu_deactivate(unsigned int cpu)8659 int sched_cpu_deactivate(unsigned int cpu)
8660 {
8661 struct rq *rq = cpu_rq(cpu);
8662 int ret;
8663
8664 ret = dl_bw_deactivate(cpu);
8665
8666 if (ret)
8667 return ret;
8668
8669 /*
8670 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
8671 * load balancing when not active
8672 */
8673 nohz_balance_exit_idle(rq);
8674
8675 set_cpu_active(cpu, false);
8676
8677 /*
8678 * From this point forward, this CPU will refuse to run any task that
8679 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
8680 * push those tasks away until this gets cleared, see
8681 * sched_cpu_dying().
8682 */
8683 balance_push_set(cpu, true);
8684
8685 /*
8686 * We've cleared cpu_active_mask / set balance_push, wait for all
8687 * preempt-disabled and RCU users of this state to go away such that
8688 * all new such users will observe it.
8689 *
8690 * Specifically, we rely on ttwu to no longer target this CPU, see
8691 * ttwu_queue_cond() and is_cpu_allowed().
8692 *
8693 * Do sync before park smpboot threads to take care the RCU boost case.
8694 */
8695 synchronize_rcu();
8696
8697 sched_set_rq_offline(rq, cpu);
8698
8699 scx_rq_deactivate(rq);
8700
8701 /*
8702 * When going down, decrement the number of cores with SMT present.
8703 */
8704 sched_smt_present_dec(cpu);
8705
8706 #ifdef CONFIG_SCHED_SMT
8707 sched_core_cpu_deactivate(cpu);
8708 #endif
8709
8710 if (!sched_smp_initialized)
8711 return 0;
8712
8713 sched_update_numa(cpu, false);
8714 cpuset_cpu_inactive(cpu);
8715 sched_domains_numa_masks_clear(cpu);
8716 return 0;
8717 }
8718
sched_rq_cpu_starting(unsigned int cpu)8719 static void sched_rq_cpu_starting(unsigned int cpu)
8720 {
8721 struct rq *rq = cpu_rq(cpu);
8722
8723 rq->calc_load_update = calc_load_update;
8724 update_max_interval();
8725 }
8726
sched_cpu_starting(unsigned int cpu)8727 int sched_cpu_starting(unsigned int cpu)
8728 {
8729 sched_core_cpu_starting(cpu);
8730 sched_rq_cpu_starting(cpu);
8731 sched_tick_start(cpu);
8732 return 0;
8733 }
8734
8735 #ifdef CONFIG_HOTPLUG_CPU
8736
8737 /*
8738 * Invoked immediately before the stopper thread is invoked to bring the
8739 * CPU down completely. At this point all per CPU kthreads except the
8740 * hotplug thread (current) and the stopper thread (inactive) have been
8741 * either parked or have been unbound from the outgoing CPU. Ensure that
8742 * any of those which might be on the way out are gone.
8743 *
8744 * If after this point a bound task is being woken on this CPU then the
8745 * responsible hotplug callback has failed to do it's job.
8746 * sched_cpu_dying() will catch it with the appropriate fireworks.
8747 */
sched_cpu_wait_empty(unsigned int cpu)8748 int sched_cpu_wait_empty(unsigned int cpu)
8749 {
8750 balance_hotplug_wait();
8751 sched_force_init_mm();
8752 return 0;
8753 }
8754
8755 /*
8756 * Since this CPU is going 'away' for a while, fold any nr_active delta we
8757 * might have. Called from the CPU stopper task after ensuring that the
8758 * stopper is the last running task on the CPU, so nr_active count is
8759 * stable. We need to take the tear-down thread which is calling this into
8760 * account, so we hand in adjust = 1 to the load calculation.
8761 *
8762 * Also see the comment "Global load-average calculations".
8763 */
calc_load_migrate(struct rq * rq)8764 static void calc_load_migrate(struct rq *rq)
8765 {
8766 long delta = calc_load_fold_active(rq, 1);
8767
8768 if (delta)
8769 atomic_long_add(delta, &calc_load_tasks);
8770 }
8771
dump_rq_tasks(struct rq * rq,const char * loglvl)8772 static void dump_rq_tasks(struct rq *rq, const char *loglvl)
8773 {
8774 struct task_struct *g, *p;
8775 int cpu = cpu_of(rq);
8776
8777 lockdep_assert_rq_held(rq);
8778
8779 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
8780 for_each_process_thread(g, p) {
8781 if (task_cpu(p) != cpu)
8782 continue;
8783
8784 if (!task_on_rq_queued(p))
8785 continue;
8786
8787 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
8788 }
8789 }
8790
sched_cpu_dying(unsigned int cpu)8791 int sched_cpu_dying(unsigned int cpu)
8792 {
8793 struct rq *rq = cpu_rq(cpu);
8794 struct rq_flags rf;
8795
8796 /* Handle pending wakeups and then migrate everything off */
8797 sched_tick_stop(cpu);
8798
8799 rq_lock_irqsave(rq, &rf);
8800 update_rq_clock(rq);
8801 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
8802 WARN(true, "Dying CPU not properly vacated!");
8803 dump_rq_tasks(rq, KERN_WARNING);
8804 }
8805 dl_server_stop(&rq->fair_server);
8806 #ifdef CONFIG_SCHED_CLASS_EXT
8807 dl_server_stop(&rq->ext_server);
8808 #endif
8809 rq_unlock_irqrestore(rq, &rf);
8810
8811 calc_load_migrate(rq);
8812 update_max_interval();
8813 hrtick_clear(rq);
8814 sched_core_cpu_dying(cpu);
8815 return 0;
8816 }
8817 #endif /* CONFIG_HOTPLUG_CPU */
8818
sched_init_smp(void)8819 void __init sched_init_smp(void)
8820 {
8821 sched_init_numa(NUMA_NO_NODE);
8822
8823 prandom_init_once(&sched_rnd_state);
8824
8825 /*
8826 * There's no userspace yet to cause hotplug operations; hence all the
8827 * CPU masks are stable and all blatant races in the below code cannot
8828 * happen.
8829 */
8830 sched_domains_mutex_lock();
8831 sched_init_domains(cpu_active_mask);
8832 sched_domains_mutex_unlock();
8833
8834 /* Move init over to a non-isolated CPU */
8835 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
8836 BUG();
8837 current->flags &= ~PF_NO_SETAFFINITY;
8838 sched_init_granularity();
8839
8840 init_sched_rt_class();
8841 init_sched_dl_class();
8842
8843 sched_init_dl_servers();
8844
8845 sched_smp_initialized = true;
8846 }
8847
migration_init(void)8848 static int __init migration_init(void)
8849 {
8850 sched_cpu_starting(smp_processor_id());
8851 return 0;
8852 }
8853 early_initcall(migration_init);
8854
in_sched_functions(unsigned long addr)8855 int in_sched_functions(unsigned long addr)
8856 {
8857 return in_lock_functions(addr) ||
8858 (addr >= (unsigned long)__sched_text_start
8859 && addr < (unsigned long)__sched_text_end);
8860 }
8861
8862 #ifdef CONFIG_CGROUP_SCHED
8863 /*
8864 * Default task group.
8865 * Every task in system belongs to this group at bootup.
8866 */
8867 struct task_group root_task_group;
8868 LIST_HEAD(task_groups);
8869
8870 /* Cacheline aligned slab cache for task_group */
8871 static struct kmem_cache *task_group_cache __ro_after_init;
8872 #endif
8873
sched_init(void)8874 void __init sched_init(void)
8875 {
8876 unsigned long ptr = 0;
8877 int i;
8878
8879 /* Make sure the linker didn't screw up */
8880 BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class));
8881 BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class));
8882 BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class));
8883 BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class));
8884 #ifdef CONFIG_SCHED_CLASS_EXT
8885 BUG_ON(!sched_class_above(&fair_sched_class, &ext_sched_class));
8886 BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
8887 #endif
8888
8889 wait_bit_init();
8890
8891 #ifdef CONFIG_FAIR_GROUP_SCHED
8892 ptr += 2 * nr_cpu_ids * sizeof(void **);
8893 #endif
8894 #ifdef CONFIG_RT_GROUP_SCHED
8895 ptr += 2 * nr_cpu_ids * sizeof(void **);
8896 #endif
8897 if (ptr) {
8898 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
8899
8900 #ifdef CONFIG_FAIR_GROUP_SCHED
8901 root_task_group.se = (struct sched_entity **)ptr;
8902 ptr += nr_cpu_ids * sizeof(void **);
8903
8904 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
8905 ptr += nr_cpu_ids * sizeof(void **);
8906
8907 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
8908 init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
8909 #endif /* CONFIG_FAIR_GROUP_SCHED */
8910 #ifdef CONFIG_EXT_GROUP_SCHED
8911 scx_tg_init(&root_task_group);
8912 #endif /* CONFIG_EXT_GROUP_SCHED */
8913 #ifdef CONFIG_RT_GROUP_SCHED
8914 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
8915 ptr += nr_cpu_ids * sizeof(void **);
8916
8917 root_task_group.rt_rq = (struct rt_rq **)ptr;
8918 ptr += nr_cpu_ids * sizeof(void **);
8919
8920 #endif /* CONFIG_RT_GROUP_SCHED */
8921 }
8922
8923 init_defrootdomain();
8924
8925 #ifdef CONFIG_RT_GROUP_SCHED
8926 init_rt_bandwidth(&root_task_group.rt_bandwidth,
8927 global_rt_period(), global_rt_runtime());
8928 #endif /* CONFIG_RT_GROUP_SCHED */
8929
8930 #ifdef CONFIG_CGROUP_SCHED
8931 task_group_cache = KMEM_CACHE(task_group, 0);
8932
8933 list_add(&root_task_group.list, &task_groups);
8934 INIT_LIST_HEAD(&root_task_group.children);
8935 INIT_LIST_HEAD(&root_task_group.siblings);
8936 autogroup_init(&init_task);
8937 #endif /* CONFIG_CGROUP_SCHED */
8938
8939 for_each_possible_cpu(i) {
8940 struct rq *rq;
8941
8942 rq = cpu_rq(i);
8943 raw_spin_lock_init(&rq->__lock);
8944 rq->nr_running = 0;
8945 rq->calc_load_active = 0;
8946 rq->calc_load_update = jiffies + LOAD_FREQ;
8947 init_cfs_rq(&rq->cfs);
8948 init_rt_rq(&rq->rt);
8949 init_dl_rq(&rq->dl);
8950 #ifdef CONFIG_FAIR_GROUP_SCHED
8951 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
8952 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
8953 /*
8954 * How much CPU bandwidth does root_task_group get?
8955 *
8956 * In case of task-groups formed through the cgroup filesystem, it
8957 * gets 100% of the CPU resources in the system. This overall
8958 * system CPU resource is divided among the tasks of
8959 * root_task_group and its child task-groups in a fair manner,
8960 * based on each entity's (task or task-group's) weight
8961 * (se->load.weight).
8962 *
8963 * In other words, if root_task_group has 10 tasks of weight
8964 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8965 * then A0's share of the CPU resource is:
8966 *
8967 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8968 *
8969 * We achieve this by letting root_task_group's tasks sit
8970 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
8971 */
8972 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
8973 #endif /* CONFIG_FAIR_GROUP_SCHED */
8974
8975 #ifdef CONFIG_RT_GROUP_SCHED
8976 /*
8977 * This is required for init cpu because rt.c:__enable_runtime()
8978 * starts working after scheduler_running, which is not the case
8979 * yet.
8980 */
8981 rq->rt.rt_runtime = global_rt_runtime();
8982 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
8983 #endif
8984 rq->next_class = &idle_sched_class;
8985
8986 rq->sd = NULL;
8987 rq->rd = NULL;
8988 rq->cpu_capacity = SCHED_CAPACITY_SCALE;
8989 rq->balance_callback = &balance_push_callback;
8990 rq->active_balance = 0;
8991 rq->next_balance = jiffies;
8992 rq->push_cpu = 0;
8993 rq->cpu = i;
8994 rq->online = 0;
8995 rq->idle_stamp = 0;
8996 rq->avg_idle = 2*sysctl_sched_migration_cost;
8997 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
8998
8999 INIT_LIST_HEAD(&rq->cfs_tasks);
9000
9001 rq_attach_root(rq, &def_root_domain);
9002 #ifdef CONFIG_NO_HZ_COMMON
9003 rq->last_blocked_load_update_tick = jiffies;
9004 atomic_set(&rq->nohz_flags, 0);
9005
9006 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
9007 #endif
9008 #ifdef CONFIG_HOTPLUG_CPU
9009 rcuwait_init(&rq->hotplug_wait);
9010 #endif
9011 hrtick_rq_init(rq);
9012 atomic_set(&rq->nr_iowait, 0);
9013 fair_server_init(rq);
9014 #ifdef CONFIG_SCHED_CLASS_EXT
9015 ext_server_init(rq);
9016 #endif
9017
9018 #ifdef CONFIG_SCHED_CORE
9019 rq->core = rq;
9020 rq->core_pick = NULL;
9021 rq->core_dl_server = NULL;
9022 rq->core_enabled = 0;
9023 rq->core_tree = RB_ROOT;
9024 rq->core_forceidle_count = 0;
9025 rq->core_forceidle_occupation = 0;
9026 rq->core_forceidle_start = 0;
9027
9028 rq->core_cookie = 0UL;
9029 #endif
9030 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
9031 }
9032
9033 set_load_weight(&init_task, false);
9034 init_task.se.slice = sysctl_sched_base_slice,
9035
9036 /*
9037 * The boot idle thread does lazy MMU switching as well:
9038 */
9039 mmgrab_lazy_tlb(&init_mm);
9040 enter_lazy_tlb(&init_mm, current);
9041
9042 /*
9043 * The idle task doesn't need the kthread struct to function, but it
9044 * is dressed up as a per-CPU kthread and thus needs to play the part
9045 * if we want to avoid special-casing it in code that deals with per-CPU
9046 * kthreads.
9047 */
9048 WARN_ON(!set_kthread_struct(current));
9049
9050 /*
9051 * Make us the idle thread. Technically, schedule() should not be
9052 * called from this thread, however somewhere below it might be,
9053 * but because we are the idle thread, we just pick up running again
9054 * when this runqueue becomes "idle".
9055 */
9056 __sched_fork(0, current);
9057 init_idle(current, smp_processor_id());
9058
9059 calc_load_update = jiffies + LOAD_FREQ;
9060
9061 idle_thread_set_boot_cpu();
9062
9063 balance_push_set(smp_processor_id(), false);
9064 init_sched_fair_class();
9065 init_sched_ext_class();
9066
9067 psi_init();
9068
9069 init_uclamp();
9070
9071 preempt_dynamic_init();
9072
9073 scheduler_running = 1;
9074 }
9075
9076 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
9077
__might_sleep(const char * file,int line)9078 void __might_sleep(const char *file, int line)
9079 {
9080 unsigned int state = get_current_state();
9081 /*
9082 * Blocking primitives will set (and therefore destroy) current->state,
9083 * since we will exit with TASK_RUNNING make sure we enter with it,
9084 * otherwise we will destroy state.
9085 */
9086 WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
9087 "do not call blocking ops when !TASK_RUNNING; "
9088 "state=%x set at [<%p>] %pS\n", state,
9089 (void *)current->task_state_change,
9090 (void *)current->task_state_change);
9091
9092 __might_resched(file, line, 0);
9093 }
9094 EXPORT_SYMBOL(__might_sleep);
9095
print_preempt_disable_ip(int preempt_offset,unsigned long ip)9096 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
9097 {
9098 if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
9099 return;
9100
9101 if (preempt_count() == preempt_offset)
9102 return;
9103
9104 pr_err("Preemption disabled at:");
9105 print_ip_sym(KERN_ERR, ip);
9106 }
9107
resched_offsets_ok(unsigned int offsets)9108 static inline bool resched_offsets_ok(unsigned int offsets)
9109 {
9110 unsigned int nested = preempt_count();
9111
9112 nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
9113
9114 return nested == offsets;
9115 }
9116
__might_resched(const char * file,int line,unsigned int offsets)9117 void __might_resched(const char *file, int line, unsigned int offsets)
9118 {
9119 /* Ratelimiting timestamp: */
9120 static unsigned long prev_jiffy;
9121
9122 unsigned long preempt_disable_ip;
9123
9124 /* WARN_ON_ONCE() by default, no rate limit required: */
9125 rcu_sleep_check();
9126
9127 if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
9128 !is_idle_task(current) && !current->non_block_count) ||
9129 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
9130 oops_in_progress)
9131 return;
9132
9133 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
9134 return;
9135 prev_jiffy = jiffies;
9136
9137 /* Save this before calling printk(), since that will clobber it: */
9138 preempt_disable_ip = get_preempt_disable_ip(current);
9139
9140 pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
9141 file, line);
9142 pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
9143 in_atomic(), irqs_disabled(), current->non_block_count,
9144 current->pid, current->comm);
9145 pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
9146 offsets & MIGHT_RESCHED_PREEMPT_MASK);
9147
9148 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
9149 pr_err("RCU nest depth: %d, expected: %u\n",
9150 rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
9151 }
9152
9153 if (task_stack_end_corrupted(current))
9154 pr_emerg("Thread overran stack, or stack corrupted\n");
9155
9156 debug_show_held_locks(current);
9157 if (irqs_disabled())
9158 print_irqtrace_events(current);
9159
9160 print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
9161 preempt_disable_ip);
9162
9163 dump_stack();
9164 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
9165 }
9166 EXPORT_SYMBOL(__might_resched);
9167
__cant_sleep(const char * file,int line,int preempt_offset)9168 void __cant_sleep(const char *file, int line, int preempt_offset)
9169 {
9170 static unsigned long prev_jiffy;
9171
9172 if (irqs_disabled())
9173 return;
9174
9175 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
9176 return;
9177
9178 if (preempt_count() > preempt_offset)
9179 return;
9180
9181 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
9182 return;
9183 prev_jiffy = jiffies;
9184
9185 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
9186 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
9187 in_atomic(), irqs_disabled(),
9188 current->pid, current->comm);
9189
9190 debug_show_held_locks(current);
9191 dump_stack();
9192 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
9193 }
9194 EXPORT_SYMBOL_GPL(__cant_sleep);
9195
9196 # ifdef CONFIG_SMP
__cant_migrate(const char * file,int line)9197 void __cant_migrate(const char *file, int line)
9198 {
9199 static unsigned long prev_jiffy;
9200
9201 if (irqs_disabled())
9202 return;
9203
9204 if (is_migration_disabled(current))
9205 return;
9206
9207 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
9208 return;
9209
9210 if (preempt_count() > 0)
9211 return;
9212
9213 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
9214 return;
9215 prev_jiffy = jiffies;
9216
9217 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
9218 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
9219 in_atomic(), irqs_disabled(), is_migration_disabled(current),
9220 current->pid, current->comm);
9221
9222 debug_show_held_locks(current);
9223 dump_stack();
9224 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
9225 }
9226 EXPORT_SYMBOL_GPL(__cant_migrate);
9227 # endif /* CONFIG_SMP */
9228 #endif /* CONFIG_DEBUG_ATOMIC_SLEEP */
9229
9230 #ifdef CONFIG_MAGIC_SYSRQ
normalize_rt_tasks(void)9231 void normalize_rt_tasks(void)
9232 {
9233 struct task_struct *g, *p;
9234 struct sched_attr attr = {
9235 .sched_policy = SCHED_NORMAL,
9236 };
9237
9238 read_lock(&tasklist_lock);
9239 for_each_process_thread(g, p) {
9240 /*
9241 * Only normalize user tasks:
9242 */
9243 if (p->flags & PF_KTHREAD)
9244 continue;
9245
9246 p->se.exec_start = 0;
9247 schedstat_set(p->stats.wait_start, 0);
9248 schedstat_set(p->stats.sleep_start, 0);
9249 schedstat_set(p->stats.block_start, 0);
9250
9251 if (!rt_or_dl_task(p)) {
9252 /*
9253 * Renice negative nice level userspace
9254 * tasks back to 0:
9255 */
9256 if (task_nice(p) < 0)
9257 set_user_nice(p, 0);
9258 continue;
9259 }
9260
9261 __sched_setscheduler(p, &attr, false, false);
9262 }
9263 read_unlock(&tasklist_lock);
9264 }
9265
9266 #endif /* CONFIG_MAGIC_SYSRQ */
9267
9268 #ifdef CONFIG_KGDB_KDB
9269 /*
9270 * These functions are only useful for KDB.
9271 *
9272 * They can only be called when the whole system has been
9273 * stopped - every CPU needs to be quiescent, and no scheduling
9274 * activity can take place. Using them for anything else would
9275 * be a serious bug, and as a result, they aren't even visible
9276 * under any other configuration.
9277 */
9278
9279 /**
9280 * curr_task - return the current task for a given CPU.
9281 * @cpu: the processor in question.
9282 *
9283 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
9284 *
9285 * Return: The current task for @cpu.
9286 */
curr_task(int cpu)9287 struct task_struct *curr_task(int cpu)
9288 {
9289 return cpu_curr(cpu);
9290 }
9291
9292 #endif /* CONFIG_KGDB_KDB */
9293
9294 #ifdef CONFIG_CGROUP_SCHED
9295 /* task_group_lock serializes the addition/removal of task groups */
9296 static DEFINE_SPINLOCK(task_group_lock);
9297
alloc_uclamp_sched_group(struct task_group * tg,struct task_group * parent)9298 static inline void alloc_uclamp_sched_group(struct task_group *tg,
9299 struct task_group *parent)
9300 {
9301 #ifdef CONFIG_UCLAMP_TASK_GROUP
9302 enum uclamp_id clamp_id;
9303
9304 for_each_clamp_id(clamp_id) {
9305 uclamp_se_set(&tg->uclamp_req[clamp_id],
9306 uclamp_none(clamp_id), false);
9307 tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
9308 }
9309 #endif
9310 }
9311
sched_free_group(struct task_group * tg)9312 static void sched_free_group(struct task_group *tg)
9313 {
9314 free_fair_sched_group(tg);
9315 free_rt_sched_group(tg);
9316 autogroup_free(tg);
9317 kmem_cache_free(task_group_cache, tg);
9318 }
9319
sched_free_group_rcu(struct rcu_head * rcu)9320 static void sched_free_group_rcu(struct rcu_head *rcu)
9321 {
9322 sched_free_group(container_of(rcu, struct task_group, rcu));
9323 }
9324
sched_unregister_group(struct task_group * tg)9325 static void sched_unregister_group(struct task_group *tg)
9326 {
9327 unregister_fair_sched_group(tg);
9328 unregister_rt_sched_group(tg);
9329 /*
9330 * We have to wait for yet another RCU grace period to expire, as
9331 * print_cfs_stats() might run concurrently.
9332 */
9333 call_rcu(&tg->rcu, sched_free_group_rcu);
9334 }
9335
9336 /* allocate runqueue etc for a new task group */
sched_create_group(struct task_group * parent)9337 struct task_group *sched_create_group(struct task_group *parent)
9338 {
9339 struct task_group *tg;
9340
9341 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
9342 if (!tg)
9343 return ERR_PTR(-ENOMEM);
9344
9345 if (!alloc_fair_sched_group(tg, parent))
9346 goto err;
9347
9348 if (!alloc_rt_sched_group(tg, parent))
9349 goto err;
9350
9351 scx_tg_init(tg);
9352 alloc_uclamp_sched_group(tg, parent);
9353
9354 return tg;
9355
9356 err:
9357 sched_free_group(tg);
9358 return ERR_PTR(-ENOMEM);
9359 }
9360
sched_online_group(struct task_group * tg,struct task_group * parent)9361 void sched_online_group(struct task_group *tg, struct task_group *parent)
9362 {
9363 unsigned long flags;
9364
9365 spin_lock_irqsave(&task_group_lock, flags);
9366 list_add_tail_rcu(&tg->list, &task_groups);
9367
9368 /* Root should already exist: */
9369 WARN_ON(!parent);
9370
9371 tg->parent = parent;
9372 INIT_LIST_HEAD(&tg->children);
9373 list_add_rcu(&tg->siblings, &parent->children);
9374 spin_unlock_irqrestore(&task_group_lock, flags);
9375
9376 online_fair_sched_group(tg);
9377 }
9378
9379 /* RCU callback to free various structures associated with a task group */
sched_unregister_group_rcu(struct rcu_head * rhp)9380 static void sched_unregister_group_rcu(struct rcu_head *rhp)
9381 {
9382 /* Now it should be safe to free those cfs_rqs: */
9383 sched_unregister_group(container_of(rhp, struct task_group, rcu));
9384 }
9385
sched_destroy_group(struct task_group * tg)9386 void sched_destroy_group(struct task_group *tg)
9387 {
9388 /* Wait for possible concurrent references to cfs_rqs complete: */
9389 call_rcu(&tg->rcu, sched_unregister_group_rcu);
9390 }
9391
sched_release_group(struct task_group * tg)9392 void sched_release_group(struct task_group *tg)
9393 {
9394 unsigned long flags;
9395
9396 /*
9397 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
9398 * sched_cfs_period_timer()).
9399 *
9400 * For this to be effective, we have to wait for all pending users of
9401 * this task group to leave their RCU critical section to ensure no new
9402 * user will see our dying task group any more. Specifically ensure
9403 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
9404 *
9405 * We therefore defer calling unregister_fair_sched_group() to
9406 * sched_unregister_group() which is guarantied to get called only after the
9407 * current RCU grace period has expired.
9408 */
9409 spin_lock_irqsave(&task_group_lock, flags);
9410 list_del_rcu(&tg->list);
9411 list_del_rcu(&tg->siblings);
9412 spin_unlock_irqrestore(&task_group_lock, flags);
9413 }
9414
sched_change_group(struct task_struct * tsk)9415 static void sched_change_group(struct task_struct *tsk)
9416 {
9417 struct task_group *tg;
9418
9419 /*
9420 * All callers are synchronized by task_rq_lock(); we do not use RCU
9421 * which is pointless here. Thus, we pass "true" to task_css_check()
9422 * to prevent lockdep warnings.
9423 */
9424 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
9425 struct task_group, css);
9426 tg = autogroup_task_group(tsk, tg);
9427 tsk->sched_task_group = tg;
9428
9429 #ifdef CONFIG_FAIR_GROUP_SCHED
9430 if (tsk->sched_class->task_change_group)
9431 tsk->sched_class->task_change_group(tsk);
9432 else
9433 #endif
9434 set_task_rq(tsk, task_cpu(tsk));
9435 }
9436
9437 /*
9438 * Change task's runqueue when it moves between groups.
9439 *
9440 * The caller of this function should have put the task in its new group by
9441 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
9442 * its new group.
9443 */
sched_move_task(struct task_struct * tsk,bool for_autogroup)9444 void sched_move_task(struct task_struct *tsk, bool for_autogroup)
9445 {
9446 unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
9447 bool resched = false;
9448 bool queued = false;
9449 struct rq *rq;
9450
9451 CLASS(task_rq_lock, rq_guard)(tsk);
9452 rq = rq_guard.rq;
9453
9454 scoped_guard (sched_change, tsk, queue_flags) {
9455 sched_change_group(tsk);
9456 if (!for_autogroup)
9457 scx_cgroup_move_task(tsk);
9458 if (scope->running)
9459 resched = true;
9460 queued = scope->queued;
9461 }
9462
9463 if (resched)
9464 resched_curr(rq);
9465 else if (queued)
9466 wakeup_preempt(rq, tsk, 0);
9467
9468 __balance_callbacks(rq, &rq_guard.rf);
9469 }
9470
9471 static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)9472 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
9473 {
9474 struct task_group *parent = css_tg(parent_css);
9475 struct task_group *tg;
9476
9477 if (!parent) {
9478 /* This is early initialization for the top cgroup */
9479 return &root_task_group.css;
9480 }
9481
9482 tg = sched_create_group(parent);
9483 if (IS_ERR(tg))
9484 return ERR_PTR(-ENOMEM);
9485
9486 return &tg->css;
9487 }
9488
9489 /* Expose task group only after completing cgroup initialization */
cpu_cgroup_css_online(struct cgroup_subsys_state * css)9490 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
9491 {
9492 struct task_group *tg = css_tg(css);
9493 struct task_group *parent = css_tg(css->parent);
9494 int ret;
9495
9496 ret = scx_tg_online(tg);
9497 if (ret)
9498 return ret;
9499
9500 if (parent)
9501 sched_online_group(tg, parent);
9502
9503 #ifdef CONFIG_UCLAMP_TASK_GROUP
9504 /* Propagate the effective uclamp value for the new group */
9505 guard(mutex)(&uclamp_mutex);
9506 guard(rcu)();
9507 cpu_util_update_eff(css);
9508 #endif
9509
9510 return 0;
9511 }
9512
cpu_cgroup_css_offline(struct cgroup_subsys_state * css)9513 static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
9514 {
9515 struct task_group *tg = css_tg(css);
9516
9517 scx_tg_offline(tg);
9518 }
9519
cpu_cgroup_css_released(struct cgroup_subsys_state * css)9520 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
9521 {
9522 struct task_group *tg = css_tg(css);
9523
9524 sched_release_group(tg);
9525 }
9526
cpu_cgroup_css_free(struct cgroup_subsys_state * css)9527 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
9528 {
9529 struct task_group *tg = css_tg(css);
9530
9531 /*
9532 * Relies on the RCU grace period between css_released() and this.
9533 */
9534 sched_unregister_group(tg);
9535 }
9536
cpu_cgroup_can_attach(struct cgroup_taskset * tset)9537 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
9538 {
9539 #ifdef CONFIG_RT_GROUP_SCHED
9540 struct task_struct *task;
9541 struct cgroup_subsys_state *css;
9542
9543 if (!rt_group_sched_enabled())
9544 goto scx_check;
9545
9546 cgroup_taskset_for_each(task, css, tset) {
9547 if (!sched_rt_can_attach(css_tg(css), task))
9548 return -EINVAL;
9549 }
9550 scx_check:
9551 #endif /* CONFIG_RT_GROUP_SCHED */
9552 return scx_cgroup_can_attach(tset);
9553 }
9554
cpu_cgroup_attach(struct cgroup_taskset * tset)9555 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
9556 {
9557 struct task_struct *task;
9558 struct cgroup_subsys_state *css;
9559
9560 cgroup_taskset_for_each(task, css, tset)
9561 sched_move_task(task, false);
9562 }
9563
cpu_cgroup_cancel_attach(struct cgroup_taskset * tset)9564 static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset)
9565 {
9566 scx_cgroup_cancel_attach(tset);
9567 }
9568
9569 #ifdef CONFIG_UCLAMP_TASK_GROUP
cpu_util_update_eff(struct cgroup_subsys_state * css)9570 static void cpu_util_update_eff(struct cgroup_subsys_state *css)
9571 {
9572 struct cgroup_subsys_state *top_css = css;
9573 struct uclamp_se *uc_parent = NULL;
9574 struct uclamp_se *uc_se = NULL;
9575 unsigned int eff[UCLAMP_CNT];
9576 enum uclamp_id clamp_id;
9577 unsigned int clamps;
9578
9579 lockdep_assert_held(&uclamp_mutex);
9580 WARN_ON_ONCE(!rcu_read_lock_held());
9581
9582 css_for_each_descendant_pre(css, top_css) {
9583 uc_parent = css_tg(css)->parent
9584 ? css_tg(css)->parent->uclamp : NULL;
9585
9586 for_each_clamp_id(clamp_id) {
9587 /* Assume effective clamps matches requested clamps */
9588 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
9589 /* Cap effective clamps with parent's effective clamps */
9590 if (uc_parent &&
9591 eff[clamp_id] > uc_parent[clamp_id].value) {
9592 eff[clamp_id] = uc_parent[clamp_id].value;
9593 }
9594 }
9595 /* Ensure protection is always capped by limit */
9596 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
9597
9598 /* Propagate most restrictive effective clamps */
9599 clamps = 0x0;
9600 uc_se = css_tg(css)->uclamp;
9601 for_each_clamp_id(clamp_id) {
9602 if (eff[clamp_id] == uc_se[clamp_id].value)
9603 continue;
9604 uc_se[clamp_id].value = eff[clamp_id];
9605 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
9606 clamps |= (0x1 << clamp_id);
9607 }
9608 if (!clamps) {
9609 css = css_rightmost_descendant(css);
9610 continue;
9611 }
9612
9613 /* Immediately update descendants RUNNABLE tasks */
9614 uclamp_update_active_tasks(css);
9615 }
9616 }
9617
9618 /*
9619 * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
9620 * C expression. Since there is no way to convert a macro argument (N) into a
9621 * character constant, use two levels of macros.
9622 */
9623 #define _POW10(exp) ((unsigned int)1e##exp)
9624 #define POW10(exp) _POW10(exp)
9625
9626 struct uclamp_request {
9627 #define UCLAMP_PERCENT_SHIFT 2
9628 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT))
9629 s64 percent;
9630 u64 util;
9631 int ret;
9632 };
9633
9634 static inline struct uclamp_request
capacity_from_percent(char * buf)9635 capacity_from_percent(char *buf)
9636 {
9637 struct uclamp_request req = {
9638 .percent = UCLAMP_PERCENT_SCALE,
9639 .util = SCHED_CAPACITY_SCALE,
9640 .ret = 0,
9641 };
9642
9643 buf = strim(buf);
9644 if (strcmp(buf, "max")) {
9645 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
9646 &req.percent);
9647 if (req.ret)
9648 return req;
9649 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
9650 req.ret = -ERANGE;
9651 return req;
9652 }
9653
9654 req.util = req.percent << SCHED_CAPACITY_SHIFT;
9655 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
9656 }
9657
9658 return req;
9659 }
9660
cpu_uclamp_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,enum uclamp_id clamp_id)9661 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
9662 size_t nbytes, loff_t off,
9663 enum uclamp_id clamp_id)
9664 {
9665 struct uclamp_request req;
9666 struct task_group *tg;
9667
9668 req = capacity_from_percent(buf);
9669 if (req.ret)
9670 return req.ret;
9671
9672 sched_uclamp_enable();
9673
9674 guard(mutex)(&uclamp_mutex);
9675 guard(rcu)();
9676
9677 tg = css_tg(of_css(of));
9678 if (tg->uclamp_req[clamp_id].value != req.util)
9679 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
9680
9681 /*
9682 * Because of not recoverable conversion rounding we keep track of the
9683 * exact requested value
9684 */
9685 tg->uclamp_pct[clamp_id] = req.percent;
9686
9687 /* Update effective clamps to track the most restrictive value */
9688 cpu_util_update_eff(of_css(of));
9689
9690 return nbytes;
9691 }
9692
cpu_uclamp_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9693 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
9694 char *buf, size_t nbytes,
9695 loff_t off)
9696 {
9697 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
9698 }
9699
cpu_uclamp_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9700 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
9701 char *buf, size_t nbytes,
9702 loff_t off)
9703 {
9704 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
9705 }
9706
cpu_uclamp_print(struct seq_file * sf,enum uclamp_id clamp_id)9707 static inline void cpu_uclamp_print(struct seq_file *sf,
9708 enum uclamp_id clamp_id)
9709 {
9710 struct task_group *tg;
9711 u64 util_clamp;
9712 u64 percent;
9713 u32 rem;
9714
9715 scoped_guard (rcu) {
9716 tg = css_tg(seq_css(sf));
9717 util_clamp = tg->uclamp_req[clamp_id].value;
9718 }
9719
9720 if (util_clamp == SCHED_CAPACITY_SCALE) {
9721 seq_puts(sf, "max\n");
9722 return;
9723 }
9724
9725 percent = tg->uclamp_pct[clamp_id];
9726 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
9727 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
9728 }
9729
cpu_uclamp_min_show(struct seq_file * sf,void * v)9730 static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
9731 {
9732 cpu_uclamp_print(sf, UCLAMP_MIN);
9733 return 0;
9734 }
9735
cpu_uclamp_max_show(struct seq_file * sf,void * v)9736 static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
9737 {
9738 cpu_uclamp_print(sf, UCLAMP_MAX);
9739 return 0;
9740 }
9741 #endif /* CONFIG_UCLAMP_TASK_GROUP */
9742
9743 #ifdef CONFIG_GROUP_SCHED_WEIGHT
tg_weight(struct task_group * tg)9744 static unsigned long tg_weight(struct task_group *tg)
9745 {
9746 #ifdef CONFIG_FAIR_GROUP_SCHED
9747 return scale_load_down(tg->shares);
9748 #else
9749 return sched_weight_from_cgroup(tg->scx.weight);
9750 #endif
9751 }
9752
cpu_shares_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 shareval)9753 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
9754 struct cftype *cftype, u64 shareval)
9755 {
9756 int ret;
9757
9758 if (shareval > scale_load_down(ULONG_MAX))
9759 shareval = MAX_SHARES;
9760 ret = sched_group_set_shares(css_tg(css), scale_load(shareval));
9761 if (!ret)
9762 scx_group_set_weight(css_tg(css),
9763 sched_weight_to_cgroup(shareval));
9764 return ret;
9765 }
9766
cpu_shares_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9767 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
9768 struct cftype *cft)
9769 {
9770 return tg_weight(css_tg(css));
9771 }
9772 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9773
9774 #ifdef CONFIG_CFS_BANDWIDTH
9775 static DEFINE_MUTEX(cfs_constraints_mutex);
9776
9777 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
9778
tg_set_cfs_bandwidth(struct task_group * tg,u64 period_us,u64 quota_us,u64 burst_us)9779 static int tg_set_cfs_bandwidth(struct task_group *tg,
9780 u64 period_us, u64 quota_us, u64 burst_us)
9781 {
9782 int i, ret = 0, runtime_enabled, runtime_was_enabled;
9783 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9784 u64 period, quota, burst;
9785
9786 period = (u64)period_us * NSEC_PER_USEC;
9787
9788 if (quota_us == RUNTIME_INF)
9789 quota = RUNTIME_INF;
9790 else
9791 quota = (u64)quota_us * NSEC_PER_USEC;
9792
9793 burst = (u64)burst_us * NSEC_PER_USEC;
9794
9795 /*
9796 * Prevent race between setting of cfs_rq->runtime_enabled and
9797 * unthrottle_offline_cfs_rqs().
9798 */
9799 guard(cpus_read_lock)();
9800 guard(mutex)(&cfs_constraints_mutex);
9801
9802 ret = __cfs_schedulable(tg, period, quota);
9803 if (ret)
9804 return ret;
9805
9806 runtime_enabled = quota != RUNTIME_INF;
9807 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
9808 /*
9809 * If we need to toggle cfs_bandwidth_used, off->on must occur
9810 * before making related changes, and on->off must occur afterwards
9811 */
9812 if (runtime_enabled && !runtime_was_enabled)
9813 cfs_bandwidth_usage_inc();
9814
9815 scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
9816 cfs_b->period = ns_to_ktime(period);
9817 cfs_b->quota = quota;
9818 cfs_b->burst = burst;
9819
9820 __refill_cfs_bandwidth_runtime(cfs_b);
9821
9822 /*
9823 * Restart the period timer (if active) to handle new
9824 * period expiry:
9825 */
9826 if (runtime_enabled)
9827 start_cfs_bandwidth(cfs_b);
9828 }
9829
9830 for_each_online_cpu(i) {
9831 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
9832 struct rq *rq = cfs_rq->rq;
9833
9834 guard(rq_lock_irq)(rq);
9835 cfs_rq->runtime_enabled = runtime_enabled;
9836 cfs_rq->runtime_remaining = 1;
9837
9838 if (cfs_rq->throttled)
9839 unthrottle_cfs_rq(cfs_rq);
9840 }
9841
9842 if (runtime_was_enabled && !runtime_enabled)
9843 cfs_bandwidth_usage_dec();
9844
9845 return 0;
9846 }
9847
tg_get_cfs_period(struct task_group * tg)9848 static u64 tg_get_cfs_period(struct task_group *tg)
9849 {
9850 u64 cfs_period_us;
9851
9852 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
9853 do_div(cfs_period_us, NSEC_PER_USEC);
9854
9855 return cfs_period_us;
9856 }
9857
tg_get_cfs_quota(struct task_group * tg)9858 static u64 tg_get_cfs_quota(struct task_group *tg)
9859 {
9860 u64 quota_us;
9861
9862 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
9863 return RUNTIME_INF;
9864
9865 quota_us = tg->cfs_bandwidth.quota;
9866 do_div(quota_us, NSEC_PER_USEC);
9867
9868 return quota_us;
9869 }
9870
tg_get_cfs_burst(struct task_group * tg)9871 static u64 tg_get_cfs_burst(struct task_group *tg)
9872 {
9873 u64 burst_us;
9874
9875 burst_us = tg->cfs_bandwidth.burst;
9876 do_div(burst_us, NSEC_PER_USEC);
9877
9878 return burst_us;
9879 }
9880
9881 struct cfs_schedulable_data {
9882 struct task_group *tg;
9883 u64 period, quota;
9884 };
9885
9886 /*
9887 * normalize group quota/period to be quota/max_period
9888 * note: units are usecs
9889 */
normalize_cfs_quota(struct task_group * tg,struct cfs_schedulable_data * d)9890 static u64 normalize_cfs_quota(struct task_group *tg,
9891 struct cfs_schedulable_data *d)
9892 {
9893 u64 quota, period;
9894
9895 if (tg == d->tg) {
9896 period = d->period;
9897 quota = d->quota;
9898 } else {
9899 period = tg_get_cfs_period(tg);
9900 quota = tg_get_cfs_quota(tg);
9901 }
9902
9903 /* note: these should typically be equivalent */
9904 if (quota == RUNTIME_INF || quota == -1)
9905 return RUNTIME_INF;
9906
9907 return to_ratio(period, quota);
9908 }
9909
tg_cfs_schedulable_down(struct task_group * tg,void * data)9910 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
9911 {
9912 struct cfs_schedulable_data *d = data;
9913 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9914 s64 quota = 0, parent_quota = -1;
9915
9916 if (!tg->parent) {
9917 quota = RUNTIME_INF;
9918 } else {
9919 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
9920
9921 quota = normalize_cfs_quota(tg, d);
9922 parent_quota = parent_b->hierarchical_quota;
9923
9924 /*
9925 * Ensure max(child_quota) <= parent_quota. On cgroup2,
9926 * always take the non-RUNTIME_INF min. On cgroup1, only
9927 * inherit when no limit is set. In both cases this is used
9928 * by the scheduler to determine if a given CFS task has a
9929 * bandwidth constraint at some higher level.
9930 */
9931 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
9932 if (quota == RUNTIME_INF)
9933 quota = parent_quota;
9934 else if (parent_quota != RUNTIME_INF)
9935 quota = min(quota, parent_quota);
9936 } else {
9937 if (quota == RUNTIME_INF)
9938 quota = parent_quota;
9939 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
9940 return -EINVAL;
9941 }
9942 }
9943 cfs_b->hierarchical_quota = quota;
9944
9945 return 0;
9946 }
9947
__cfs_schedulable(struct task_group * tg,u64 period,u64 quota)9948 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9949 {
9950 struct cfs_schedulable_data data = {
9951 .tg = tg,
9952 .period = period,
9953 .quota = quota,
9954 };
9955
9956 if (quota != RUNTIME_INF) {
9957 do_div(data.period, NSEC_PER_USEC);
9958 do_div(data.quota, NSEC_PER_USEC);
9959 }
9960
9961 guard(rcu)();
9962 return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
9963 }
9964
cpu_cfs_stat_show(struct seq_file * sf,void * v)9965 static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
9966 {
9967 struct task_group *tg = css_tg(seq_css(sf));
9968 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9969
9970 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
9971 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
9972 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
9973
9974 if (schedstat_enabled() && tg != &root_task_group) {
9975 struct sched_statistics *stats;
9976 u64 ws = 0;
9977 int i;
9978
9979 for_each_possible_cpu(i) {
9980 stats = __schedstats_from_se(tg->se[i]);
9981 ws += schedstat_val(stats->wait_sum);
9982 }
9983
9984 seq_printf(sf, "wait_sum %llu\n", ws);
9985 }
9986
9987 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
9988 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
9989
9990 return 0;
9991 }
9992
throttled_time_self(struct task_group * tg)9993 static u64 throttled_time_self(struct task_group *tg)
9994 {
9995 int i;
9996 u64 total = 0;
9997
9998 for_each_possible_cpu(i) {
9999 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
10000 }
10001
10002 return total;
10003 }
10004
cpu_cfs_local_stat_show(struct seq_file * sf,void * v)10005 static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
10006 {
10007 struct task_group *tg = css_tg(seq_css(sf));
10008
10009 seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
10010
10011 return 0;
10012 }
10013 #endif /* CONFIG_CFS_BANDWIDTH */
10014
10015 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
10016 const u64 max_bw_quota_period_us = 1 * USEC_PER_SEC; /* 1s */
10017 static const u64 min_bw_quota_period_us = 1 * USEC_PER_MSEC; /* 1ms */
10018 /* More than 203 days if BW_SHIFT equals 20. */
10019 static const u64 max_bw_runtime_us = MAX_BW;
10020
tg_bandwidth(struct task_group * tg,u64 * period_us_p,u64 * quota_us_p,u64 * burst_us_p)10021 static void tg_bandwidth(struct task_group *tg,
10022 u64 *period_us_p, u64 *quota_us_p, u64 *burst_us_p)
10023 {
10024 #ifdef CONFIG_CFS_BANDWIDTH
10025 if (period_us_p)
10026 *period_us_p = tg_get_cfs_period(tg);
10027 if (quota_us_p)
10028 *quota_us_p = tg_get_cfs_quota(tg);
10029 if (burst_us_p)
10030 *burst_us_p = tg_get_cfs_burst(tg);
10031 #else /* !CONFIG_CFS_BANDWIDTH */
10032 if (period_us_p)
10033 *period_us_p = tg->scx.bw_period_us;
10034 if (quota_us_p)
10035 *quota_us_p = tg->scx.bw_quota_us;
10036 if (burst_us_p)
10037 *burst_us_p = tg->scx.bw_burst_us;
10038 #endif /* CONFIG_CFS_BANDWIDTH */
10039 }
10040
cpu_period_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)10041 static u64 cpu_period_read_u64(struct cgroup_subsys_state *css,
10042 struct cftype *cft)
10043 {
10044 u64 period_us;
10045
10046 tg_bandwidth(css_tg(css), &period_us, NULL, NULL);
10047 return period_us;
10048 }
10049
tg_set_bandwidth(struct task_group * tg,u64 period_us,u64 quota_us,u64 burst_us)10050 static int tg_set_bandwidth(struct task_group *tg,
10051 u64 period_us, u64 quota_us, u64 burst_us)
10052 {
10053 const u64 max_usec = U64_MAX / NSEC_PER_USEC;
10054 int ret = 0;
10055
10056 if (tg == &root_task_group)
10057 return -EINVAL;
10058
10059 /* Values should survive translation to nsec */
10060 if (period_us > max_usec ||
10061 (quota_us != RUNTIME_INF && quota_us > max_usec) ||
10062 burst_us > max_usec)
10063 return -EINVAL;
10064
10065 /*
10066 * Ensure we have some amount of bandwidth every period. This is to
10067 * prevent reaching a state of large arrears when throttled via
10068 * entity_tick() resulting in prolonged exit starvation.
10069 */
10070 if (quota_us < min_bw_quota_period_us ||
10071 period_us < min_bw_quota_period_us)
10072 return -EINVAL;
10073
10074 /*
10075 * Likewise, bound things on the other side by preventing insane quota
10076 * periods. This also allows us to normalize in computing quota
10077 * feasibility.
10078 */
10079 if (period_us > max_bw_quota_period_us)
10080 return -EINVAL;
10081
10082 /*
10083 * Bound quota to defend quota against overflow during bandwidth shift.
10084 */
10085 if (quota_us != RUNTIME_INF && quota_us > max_bw_runtime_us)
10086 return -EINVAL;
10087
10088 if (quota_us != RUNTIME_INF && (burst_us > quota_us ||
10089 burst_us + quota_us > max_bw_runtime_us))
10090 return -EINVAL;
10091
10092 #ifdef CONFIG_CFS_BANDWIDTH
10093 ret = tg_set_cfs_bandwidth(tg, period_us, quota_us, burst_us);
10094 #endif /* CONFIG_CFS_BANDWIDTH */
10095 if (!ret)
10096 scx_group_set_bandwidth(tg, period_us, quota_us, burst_us);
10097 return ret;
10098 }
10099
cpu_quota_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)10100 static s64 cpu_quota_read_s64(struct cgroup_subsys_state *css,
10101 struct cftype *cft)
10102 {
10103 u64 quota_us;
10104
10105 tg_bandwidth(css_tg(css), NULL, "a_us, NULL);
10106 return quota_us; /* (s64)RUNTIME_INF becomes -1 */
10107 }
10108
cpu_burst_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)10109 static u64 cpu_burst_read_u64(struct cgroup_subsys_state *css,
10110 struct cftype *cft)
10111 {
10112 u64 burst_us;
10113
10114 tg_bandwidth(css_tg(css), NULL, NULL, &burst_us);
10115 return burst_us;
10116 }
10117
cpu_period_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 period_us)10118 static int cpu_period_write_u64(struct cgroup_subsys_state *css,
10119 struct cftype *cftype, u64 period_us)
10120 {
10121 struct task_group *tg = css_tg(css);
10122 u64 quota_us, burst_us;
10123
10124 tg_bandwidth(tg, NULL, "a_us, &burst_us);
10125 return tg_set_bandwidth(tg, period_us, quota_us, burst_us);
10126 }
10127
cpu_quota_write_s64(struct cgroup_subsys_state * css,struct cftype * cftype,s64 quota_us)10128 static int cpu_quota_write_s64(struct cgroup_subsys_state *css,
10129 struct cftype *cftype, s64 quota_us)
10130 {
10131 struct task_group *tg = css_tg(css);
10132 u64 period_us, burst_us;
10133
10134 if (quota_us < 0)
10135 quota_us = RUNTIME_INF;
10136
10137 tg_bandwidth(tg, &period_us, NULL, &burst_us);
10138 return tg_set_bandwidth(tg, period_us, quota_us, burst_us);
10139 }
10140
cpu_burst_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 burst_us)10141 static int cpu_burst_write_u64(struct cgroup_subsys_state *css,
10142 struct cftype *cftype, u64 burst_us)
10143 {
10144 struct task_group *tg = css_tg(css);
10145 u64 period_us, quota_us;
10146
10147 tg_bandwidth(tg, &period_us, "a_us, NULL);
10148 return tg_set_bandwidth(tg, period_us, quota_us, burst_us);
10149 }
10150 #endif /* CONFIG_GROUP_SCHED_BANDWIDTH */
10151
10152 #ifdef CONFIG_RT_GROUP_SCHED
cpu_rt_runtime_write(struct cgroup_subsys_state * css,struct cftype * cft,s64 val)10153 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
10154 struct cftype *cft, s64 val)
10155 {
10156 return sched_group_set_rt_runtime(css_tg(css), val);
10157 }
10158
cpu_rt_runtime_read(struct cgroup_subsys_state * css,struct cftype * cft)10159 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
10160 struct cftype *cft)
10161 {
10162 return sched_group_rt_runtime(css_tg(css));
10163 }
10164
cpu_rt_period_write_uint(struct cgroup_subsys_state * css,struct cftype * cftype,u64 rt_period_us)10165 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
10166 struct cftype *cftype, u64 rt_period_us)
10167 {
10168 return sched_group_set_rt_period(css_tg(css), rt_period_us);
10169 }
10170
cpu_rt_period_read_uint(struct cgroup_subsys_state * css,struct cftype * cft)10171 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
10172 struct cftype *cft)
10173 {
10174 return sched_group_rt_period(css_tg(css));
10175 }
10176 #endif /* CONFIG_RT_GROUP_SCHED */
10177
10178 #ifdef CONFIG_GROUP_SCHED_WEIGHT
cpu_idle_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)10179 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
10180 struct cftype *cft)
10181 {
10182 return css_tg(css)->idle;
10183 }
10184
cpu_idle_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 idle)10185 static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
10186 struct cftype *cft, s64 idle)
10187 {
10188 int ret;
10189
10190 ret = sched_group_set_idle(css_tg(css), idle);
10191 if (!ret)
10192 scx_group_set_idle(css_tg(css), idle);
10193 return ret;
10194 }
10195 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
10196
10197 static struct cftype cpu_legacy_files[] = {
10198 #ifdef CONFIG_GROUP_SCHED_WEIGHT
10199 {
10200 .name = "shares",
10201 .read_u64 = cpu_shares_read_u64,
10202 .write_u64 = cpu_shares_write_u64,
10203 },
10204 {
10205 .name = "idle",
10206 .read_s64 = cpu_idle_read_s64,
10207 .write_s64 = cpu_idle_write_s64,
10208 },
10209 #endif
10210 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
10211 {
10212 .name = "cfs_period_us",
10213 .read_u64 = cpu_period_read_u64,
10214 .write_u64 = cpu_period_write_u64,
10215 },
10216 {
10217 .name = "cfs_quota_us",
10218 .read_s64 = cpu_quota_read_s64,
10219 .write_s64 = cpu_quota_write_s64,
10220 },
10221 {
10222 .name = "cfs_burst_us",
10223 .read_u64 = cpu_burst_read_u64,
10224 .write_u64 = cpu_burst_write_u64,
10225 },
10226 #endif
10227 #ifdef CONFIG_CFS_BANDWIDTH
10228 {
10229 .name = "stat",
10230 .seq_show = cpu_cfs_stat_show,
10231 },
10232 {
10233 .name = "stat.local",
10234 .seq_show = cpu_cfs_local_stat_show,
10235 },
10236 #endif
10237 #ifdef CONFIG_UCLAMP_TASK_GROUP
10238 {
10239 .name = "uclamp.min",
10240 .flags = CFTYPE_NOT_ON_ROOT,
10241 .seq_show = cpu_uclamp_min_show,
10242 .write = cpu_uclamp_min_write,
10243 },
10244 {
10245 .name = "uclamp.max",
10246 .flags = CFTYPE_NOT_ON_ROOT,
10247 .seq_show = cpu_uclamp_max_show,
10248 .write = cpu_uclamp_max_write,
10249 },
10250 #endif
10251 { } /* Terminate */
10252 };
10253
10254 #ifdef CONFIG_RT_GROUP_SCHED
10255 static struct cftype rt_group_files[] = {
10256 {
10257 .name = "rt_runtime_us",
10258 .read_s64 = cpu_rt_runtime_read,
10259 .write_s64 = cpu_rt_runtime_write,
10260 },
10261 {
10262 .name = "rt_period_us",
10263 .read_u64 = cpu_rt_period_read_uint,
10264 .write_u64 = cpu_rt_period_write_uint,
10265 },
10266 { } /* Terminate */
10267 };
10268
10269 # ifdef CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED
10270 DEFINE_STATIC_KEY_FALSE(rt_group_sched);
10271 # else
10272 DEFINE_STATIC_KEY_TRUE(rt_group_sched);
10273 # endif
10274
setup_rt_group_sched(char * str)10275 static int __init setup_rt_group_sched(char *str)
10276 {
10277 long val;
10278
10279 if (kstrtol(str, 0, &val) || val < 0 || val > 1) {
10280 pr_warn("Unable to set rt_group_sched\n");
10281 return 1;
10282 }
10283 if (val)
10284 static_branch_enable(&rt_group_sched);
10285 else
10286 static_branch_disable(&rt_group_sched);
10287
10288 return 1;
10289 }
10290 __setup("rt_group_sched=", setup_rt_group_sched);
10291
cpu_rt_group_init(void)10292 static int __init cpu_rt_group_init(void)
10293 {
10294 if (!rt_group_sched_enabled())
10295 return 0;
10296
10297 WARN_ON(cgroup_add_legacy_cftypes(&cpu_cgrp_subsys, rt_group_files));
10298 return 0;
10299 }
10300 subsys_initcall(cpu_rt_group_init);
10301 #endif /* CONFIG_RT_GROUP_SCHED */
10302
cpu_extra_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)10303 static int cpu_extra_stat_show(struct seq_file *sf,
10304 struct cgroup_subsys_state *css)
10305 {
10306 #ifdef CONFIG_CFS_BANDWIDTH
10307 {
10308 struct task_group *tg = css_tg(css);
10309 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
10310 u64 throttled_usec, burst_usec;
10311
10312 throttled_usec = cfs_b->throttled_time;
10313 do_div(throttled_usec, NSEC_PER_USEC);
10314 burst_usec = cfs_b->burst_time;
10315 do_div(burst_usec, NSEC_PER_USEC);
10316
10317 seq_printf(sf, "nr_periods %d\n"
10318 "nr_throttled %d\n"
10319 "throttled_usec %llu\n"
10320 "nr_bursts %d\n"
10321 "burst_usec %llu\n",
10322 cfs_b->nr_periods, cfs_b->nr_throttled,
10323 throttled_usec, cfs_b->nr_burst, burst_usec);
10324 }
10325 #endif /* CONFIG_CFS_BANDWIDTH */
10326 return 0;
10327 }
10328
cpu_local_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)10329 static int cpu_local_stat_show(struct seq_file *sf,
10330 struct cgroup_subsys_state *css)
10331 {
10332 #ifdef CONFIG_CFS_BANDWIDTH
10333 {
10334 struct task_group *tg = css_tg(css);
10335 u64 throttled_self_usec;
10336
10337 throttled_self_usec = throttled_time_self(tg);
10338 do_div(throttled_self_usec, NSEC_PER_USEC);
10339
10340 seq_printf(sf, "throttled_usec %llu\n",
10341 throttled_self_usec);
10342 }
10343 #endif
10344 return 0;
10345 }
10346
10347 #ifdef CONFIG_GROUP_SCHED_WEIGHT
10348
cpu_weight_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)10349 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
10350 struct cftype *cft)
10351 {
10352 return sched_weight_to_cgroup(tg_weight(css_tg(css)));
10353 }
10354
cpu_weight_write_u64(struct cgroup_subsys_state * css,struct cftype * cft,u64 cgrp_weight)10355 static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
10356 struct cftype *cft, u64 cgrp_weight)
10357 {
10358 unsigned long weight;
10359 int ret;
10360
10361 if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX)
10362 return -ERANGE;
10363
10364 weight = sched_weight_from_cgroup(cgrp_weight);
10365
10366 ret = sched_group_set_shares(css_tg(css), scale_load(weight));
10367 if (!ret)
10368 scx_group_set_weight(css_tg(css), cgrp_weight);
10369 return ret;
10370 }
10371
cpu_weight_nice_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)10372 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
10373 struct cftype *cft)
10374 {
10375 unsigned long weight = tg_weight(css_tg(css));
10376 int last_delta = INT_MAX;
10377 int prio, delta;
10378
10379 /* find the closest nice value to the current weight */
10380 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
10381 delta = abs(sched_prio_to_weight[prio] - weight);
10382 if (delta >= last_delta)
10383 break;
10384 last_delta = delta;
10385 }
10386
10387 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
10388 }
10389
cpu_weight_nice_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 nice)10390 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
10391 struct cftype *cft, s64 nice)
10392 {
10393 unsigned long weight;
10394 int idx, ret;
10395
10396 if (nice < MIN_NICE || nice > MAX_NICE)
10397 return -ERANGE;
10398
10399 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
10400 idx = array_index_nospec(idx, 40);
10401 weight = sched_prio_to_weight[idx];
10402
10403 ret = sched_group_set_shares(css_tg(css), scale_load(weight));
10404 if (!ret)
10405 scx_group_set_weight(css_tg(css),
10406 sched_weight_to_cgroup(weight));
10407 return ret;
10408 }
10409 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
10410
cpu_period_quota_print(struct seq_file * sf,long period,long quota)10411 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
10412 long period, long quota)
10413 {
10414 if (quota < 0)
10415 seq_puts(sf, "max");
10416 else
10417 seq_printf(sf, "%ld", quota);
10418
10419 seq_printf(sf, " %ld\n", period);
10420 }
10421
10422 /* caller should put the current value in *@periodp before calling */
cpu_period_quota_parse(char * buf,u64 * period_us_p,u64 * quota_us_p)10423 static int __maybe_unused cpu_period_quota_parse(char *buf, u64 *period_us_p,
10424 u64 *quota_us_p)
10425 {
10426 char tok[21]; /* U64_MAX */
10427
10428 if (sscanf(buf, "%20s %llu", tok, period_us_p) < 1)
10429 return -EINVAL;
10430
10431 if (sscanf(tok, "%llu", quota_us_p) < 1) {
10432 if (!strcmp(tok, "max"))
10433 *quota_us_p = RUNTIME_INF;
10434 else
10435 return -EINVAL;
10436 }
10437
10438 return 0;
10439 }
10440
10441 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
cpu_max_show(struct seq_file * sf,void * v)10442 static int cpu_max_show(struct seq_file *sf, void *v)
10443 {
10444 struct task_group *tg = css_tg(seq_css(sf));
10445 u64 period_us, quota_us;
10446
10447 tg_bandwidth(tg, &period_us, "a_us, NULL);
10448 cpu_period_quota_print(sf, period_us, quota_us);
10449 return 0;
10450 }
10451
cpu_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)10452 static ssize_t cpu_max_write(struct kernfs_open_file *of,
10453 char *buf, size_t nbytes, loff_t off)
10454 {
10455 struct task_group *tg = css_tg(of_css(of));
10456 u64 period_us, quota_us, burst_us;
10457 int ret;
10458
10459 tg_bandwidth(tg, &period_us, NULL, &burst_us);
10460 ret = cpu_period_quota_parse(buf, &period_us, "a_us);
10461 if (!ret)
10462 ret = tg_set_bandwidth(tg, period_us, quota_us, burst_us);
10463 return ret ?: nbytes;
10464 }
10465 #endif /* CONFIG_CFS_BANDWIDTH */
10466
10467 static struct cftype cpu_files[] = {
10468 #ifdef CONFIG_GROUP_SCHED_WEIGHT
10469 {
10470 .name = "weight",
10471 .flags = CFTYPE_NOT_ON_ROOT,
10472 .read_u64 = cpu_weight_read_u64,
10473 .write_u64 = cpu_weight_write_u64,
10474 },
10475 {
10476 .name = "weight.nice",
10477 .flags = CFTYPE_NOT_ON_ROOT,
10478 .read_s64 = cpu_weight_nice_read_s64,
10479 .write_s64 = cpu_weight_nice_write_s64,
10480 },
10481 {
10482 .name = "idle",
10483 .flags = CFTYPE_NOT_ON_ROOT,
10484 .read_s64 = cpu_idle_read_s64,
10485 .write_s64 = cpu_idle_write_s64,
10486 },
10487 #endif
10488 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
10489 {
10490 .name = "max",
10491 .flags = CFTYPE_NOT_ON_ROOT,
10492 .seq_show = cpu_max_show,
10493 .write = cpu_max_write,
10494 },
10495 {
10496 .name = "max.burst",
10497 .flags = CFTYPE_NOT_ON_ROOT,
10498 .read_u64 = cpu_burst_read_u64,
10499 .write_u64 = cpu_burst_write_u64,
10500 },
10501 #endif /* CONFIG_CFS_BANDWIDTH */
10502 #ifdef CONFIG_UCLAMP_TASK_GROUP
10503 {
10504 .name = "uclamp.min",
10505 .flags = CFTYPE_NOT_ON_ROOT,
10506 .seq_show = cpu_uclamp_min_show,
10507 .write = cpu_uclamp_min_write,
10508 },
10509 {
10510 .name = "uclamp.max",
10511 .flags = CFTYPE_NOT_ON_ROOT,
10512 .seq_show = cpu_uclamp_max_show,
10513 .write = cpu_uclamp_max_write,
10514 },
10515 #endif /* CONFIG_UCLAMP_TASK_GROUP */
10516 { } /* terminate */
10517 };
10518
10519 struct cgroup_subsys cpu_cgrp_subsys = {
10520 .css_alloc = cpu_cgroup_css_alloc,
10521 .css_online = cpu_cgroup_css_online,
10522 .css_offline = cpu_cgroup_css_offline,
10523 .css_released = cpu_cgroup_css_released,
10524 .css_free = cpu_cgroup_css_free,
10525 .css_extra_stat_show = cpu_extra_stat_show,
10526 .css_local_stat_show = cpu_local_stat_show,
10527 .can_attach = cpu_cgroup_can_attach,
10528 .attach = cpu_cgroup_attach,
10529 .cancel_attach = cpu_cgroup_cancel_attach,
10530 .legacy_cftypes = cpu_legacy_files,
10531 .dfl_cftypes = cpu_files,
10532 .early_init = true,
10533 .threaded = true,
10534 };
10535
10536 #endif /* CONFIG_CGROUP_SCHED */
10537
dump_cpu_task(int cpu)10538 void dump_cpu_task(int cpu)
10539 {
10540 if (in_hardirq() && cpu == smp_processor_id()) {
10541 struct pt_regs *regs;
10542
10543 regs = get_irq_regs();
10544 if (regs) {
10545 show_regs(regs);
10546 return;
10547 }
10548 }
10549
10550 if (trigger_single_cpu_backtrace(cpu))
10551 return;
10552
10553 pr_info("Task dump for CPU %d:\n", cpu);
10554 sched_show_task(cpu_curr(cpu));
10555 }
10556
10557 /*
10558 * Nice levels are multiplicative, with a gentle 10% change for every
10559 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
10560 * nice 1, it will get ~10% less CPU time than another CPU-bound task
10561 * that remained on nice 0.
10562 *
10563 * The "10% effect" is relative and cumulative: from _any_ nice level,
10564 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
10565 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
10566 * If a task goes up by ~10% and another task goes down by ~10% then
10567 * the relative distance between them is ~25%.)
10568 */
10569 const int sched_prio_to_weight[40] = {
10570 /* -20 */ 88761, 71755, 56483, 46273, 36291,
10571 /* -15 */ 29154, 23254, 18705, 14949, 11916,
10572 /* -10 */ 9548, 7620, 6100, 4904, 3906,
10573 /* -5 */ 3121, 2501, 1991, 1586, 1277,
10574 /* 0 */ 1024, 820, 655, 526, 423,
10575 /* 5 */ 335, 272, 215, 172, 137,
10576 /* 10 */ 110, 87, 70, 56, 45,
10577 /* 15 */ 36, 29, 23, 18, 15,
10578 };
10579
10580 /*
10581 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
10582 *
10583 * In cases where the weight does not change often, we can use the
10584 * pre-calculated inverse to speed up arithmetics by turning divisions
10585 * into multiplications:
10586 */
10587 const u32 sched_prio_to_wmult[40] = {
10588 /* -20 */ 48388, 59856, 76040, 92818, 118348,
10589 /* -15 */ 147320, 184698, 229616, 287308, 360437,
10590 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
10591 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
10592 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
10593 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
10594 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
10595 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
10596 };
10597
call_trace_sched_update_nr_running(struct rq * rq,int count)10598 void call_trace_sched_update_nr_running(struct rq *rq, int count)
10599 {
10600 trace_sched_update_nr_running_tp(rq, count);
10601 }
10602
10603 #ifdef CONFIG_SCHED_MM_CID
10604 /*
10605 * Concurrency IDentifier management
10606 *
10607 * Serialization rules:
10608 *
10609 * mm::mm_cid::mutex: Serializes fork() and exit() and therefore
10610 * protects mm::mm_cid::users and mode switch
10611 * transitions
10612 *
10613 * mm::mm_cid::lock: Serializes mm_update_max_cids() and
10614 * mm_update_cpus_allowed(). Nests in mm_cid::mutex
10615 * and runqueue lock.
10616 *
10617 * The mm_cidmask bitmap is not protected by any of the mm::mm_cid locks
10618 * and can only be modified with atomic operations.
10619 *
10620 * The mm::mm_cid:pcpu per CPU storage is protected by the CPUs runqueue
10621 * lock.
10622 *
10623 * CID ownership:
10624 *
10625 * A CID is either owned by a task (stored in task_struct::mm_cid.cid) or
10626 * by a CPU (stored in mm::mm_cid.pcpu::cid). CIDs owned by CPUs have the
10627 * MM_CID_ONCPU bit set.
10628 *
10629 * During the transition of ownership mode, the MM_CID_TRANSIT bit is set
10630 * on the CIDs. When this bit is set the tasks drop the CID back into the
10631 * pool when scheduling out.
10632 *
10633 * Both bits (ONCPU and TRANSIT) are filtered out by task_cid() when the
10634 * CID is actually handed over to user space in the RSEQ memory.
10635 *
10636 * Mode switching:
10637 *
10638 * The ownership mode is per process and stored in mm:mm_cid::mode with the
10639 * following possible states:
10640 *
10641 * 0: Per task ownership
10642 * 0 | MM_CID_TRANSIT: Transition from per CPU to per task
10643 * MM_CID_ONCPU: Per CPU ownership
10644 * MM_CID_ONCPU | MM_CID_TRANSIT: Transition from per task to per CPU
10645 *
10646 * All transitions of ownership mode happen in two phases:
10647 *
10648 * 1) mm:mm_cid::mode has the MM_CID_TRANSIT bit set. This is OR'ed on the
10649 * CIDs and denotes that the CID is only temporarily owned by a
10650 * task. When the task schedules out it drops the CID back into the
10651 * pool if this bit is set.
10652 *
10653 * 2) The initiating context walks the per CPU space or the tasks to fixup
10654 * or drop the CIDs and after completion it clears MM_CID_TRANSIT in
10655 * mm:mm_cid::mode. After that point the CIDs are strictly task or CPU
10656 * owned again.
10657 *
10658 * This two phase transition is required to prevent CID space exhaustion
10659 * during the transition as a direct transfer of ownership would fail:
10660 *
10661 * - On task to CPU mode switch if a task is scheduled in on one CPU and
10662 * then migrated to another CPU before the fixup freed enough per task
10663 * CIDs.
10664 *
10665 * - On CPU to task mode switch if two tasks are scheduled in on the same
10666 * CPU before the fixup freed per CPU CIDs.
10667 *
10668 * Both scenarios can result in a live lock because sched_in() is invoked
10669 * with runqueue lock held and loops in search of a CID and the fixup
10670 * thread can't make progress freeing them up because it is stuck on the
10671 * same runqueue lock.
10672 *
10673 * While MM_CID_TRANSIT is active during the transition phase the MM_CID
10674 * bitmap can be contended, but that's a temporary contention bound to the
10675 * transition period. After that everything goes back into steady state and
10676 * nothing except fork() and exit() will touch the bitmap. This is an
10677 * acceptable tradeoff as it completely avoids complex serialization,
10678 * memory barriers and atomic operations for the common case.
10679 *
10680 * Aside of that this mechanism also ensures RT compability:
10681 *
10682 * - The task which runs the fixup is fully preemptible except for the
10683 * short runqueue lock held sections.
10684 *
10685 * - The transient impact of the bitmap contention is only problematic
10686 * when there is a thundering herd scenario of tasks scheduling in and
10687 * out concurrently. There is not much which can be done about that
10688 * except for avoiding mode switching by a proper overall system
10689 * configuration.
10690 *
10691 * Switching to per CPU mode happens when the user count becomes greater
10692 * than the maximum number of CIDs, which is calculated by:
10693 *
10694 * opt_cids = min(mm_cid::nr_cpus_allowed, mm_cid::users);
10695 * max_cids = min(1.25 * opt_cids, num_possible_cpus());
10696 *
10697 * The +25% allowance is useful for tight CPU masks in scenarios where only
10698 * a few threads are created and destroyed to avoid frequent mode
10699 * switches. Though this allowance shrinks, the closer opt_cids becomes to
10700 * num_possible_cpus(), which is the (unfortunate) hard ABI limit.
10701 *
10702 * At the point of switching to per CPU mode the new user is not yet
10703 * visible in the system, so the task which initiated the fork() runs the
10704 * fixup function. mm_cid_fixup_tasks_to_cpu() walks the thread list and
10705 * either marks each task owned CID with MM_CID_TRANSIT if the task is
10706 * running on a CPU or drops it into the CID pool if a task is not on a
10707 * CPU. Tasks which schedule in before the task walk reaches them do the
10708 * handover in mm_cid_schedin(). When mm_cid_fixup_tasks_to_cpus()
10709 * completes it is guaranteed that no task related to that MM owns a CID
10710 * anymore.
10711 *
10712 * Switching back to task mode happens when the user count goes below the
10713 * threshold which was recorded on the per CPU mode switch:
10714 *
10715 * pcpu_thrs = min(opt_cids - (opt_cids / 4), num_possible_cpus() / 2);
10716 *
10717 * This threshold is updated when a affinity change increases the number of
10718 * allowed CPUs for the MM, which might cause a switch back to per task
10719 * mode.
10720 *
10721 * If the switch back was initiated by a exiting task, then that task runs
10722 * the fixup function. If it was initiated by a affinity change, then it's
10723 * run either in the deferred update function in context of a workqueue or
10724 * by a task which forks a new one or by a task which exits. Whatever
10725 * happens first. mm_cid_fixup_cpus_to_task() walks through the possible
10726 * CPUs and either marks the CPU owned CIDs with MM_CID_TRANSIT if a
10727 * related task is running on the CPU or drops it into the pool. Tasks
10728 * which are scheduled in before the fixup covered them do the handover
10729 * themself. When mm_cid_fixup_cpus_to_tasks() completes it is guaranteed
10730 * that no CID related to that MM is owned by a CPU anymore.
10731 */
10732
10733 /*
10734 * Update the CID range properties when the constraints change. Invoked via
10735 * fork(), exit() and affinity changes
10736 */
__mm_update_max_cids(struct mm_mm_cid * mc)10737 static void __mm_update_max_cids(struct mm_mm_cid *mc)
10738 {
10739 unsigned int opt_cids, max_cids;
10740
10741 /* Calculate the new optimal constraint */
10742 opt_cids = min(mc->nr_cpus_allowed, mc->users);
10743
10744 /* Adjust the maximum CIDs to +25% limited by the number of possible CPUs */
10745 max_cids = min(opt_cids + (opt_cids / 4), num_possible_cpus());
10746 WRITE_ONCE(mc->max_cids, max_cids);
10747 }
10748
mm_cid_calc_pcpu_thrs(struct mm_mm_cid * mc)10749 static inline unsigned int mm_cid_calc_pcpu_thrs(struct mm_mm_cid *mc)
10750 {
10751 unsigned int opt_cids;
10752
10753 opt_cids = min(mc->nr_cpus_allowed, mc->users);
10754 /* Has to be at least 1 because 0 indicates PCPU mode off */
10755 return max(min(opt_cids - opt_cids / 4, num_possible_cpus() / 2), 1);
10756 }
10757
mm_update_max_cids(struct mm_struct * mm)10758 static bool mm_update_max_cids(struct mm_struct *mm)
10759 {
10760 struct mm_mm_cid *mc = &mm->mm_cid;
10761 bool percpu = cid_on_cpu(mc->mode);
10762
10763 lockdep_assert_held(&mm->mm_cid.lock);
10764
10765 /* Clear deferred mode switch flag. A change is handled by the caller */
10766 mc->update_deferred = false;
10767 __mm_update_max_cids(mc);
10768
10769 /* Check whether owner mode must be changed */
10770 if (!percpu) {
10771 /* Enable per CPU mode when the number of users is above max_cids */
10772 if (mc->users > mc->max_cids)
10773 mc->pcpu_thrs = mm_cid_calc_pcpu_thrs(mc);
10774 } else {
10775 /* Switch back to per task if user count under threshold */
10776 if (mc->users < mc->pcpu_thrs)
10777 mc->pcpu_thrs = 0;
10778 }
10779
10780 /* Mode change required? */
10781 if (percpu == !!mc->pcpu_thrs)
10782 return false;
10783
10784 /* Flip the mode and set the transition flag to bridge the transfer */
10785 WRITE_ONCE(mc->mode, mc->mode ^ (MM_CID_TRANSIT | MM_CID_ONCPU));
10786 /*
10787 * Order the store against the subsequent fixups so that
10788 * acquire(rq::lock) cannot be reordered by the CPU before the
10789 * store.
10790 */
10791 smp_mb();
10792 return true;
10793 }
10794
mm_update_cpus_allowed(struct mm_struct * mm,const struct cpumask * affmsk)10795 static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk)
10796 {
10797 struct cpumask *mm_allowed;
10798 struct mm_mm_cid *mc;
10799 unsigned int weight;
10800
10801 if (!mm || !READ_ONCE(mm->mm_cid.users))
10802 return;
10803 /*
10804 * mm::mm_cid::mm_cpus_allowed is the superset of each threads
10805 * allowed CPUs mask which means it can only grow.
10806 */
10807 mc = &mm->mm_cid;
10808 guard(raw_spinlock)(&mc->lock);
10809 mm_allowed = mm_cpus_allowed(mm);
10810 weight = cpumask_weighted_or(mm_allowed, mm_allowed, affmsk);
10811 if (weight == mc->nr_cpus_allowed)
10812 return;
10813
10814 WRITE_ONCE(mc->nr_cpus_allowed, weight);
10815 __mm_update_max_cids(mc);
10816 if (!cid_on_cpu(mc->mode))
10817 return;
10818
10819 /* Adjust the threshold to the wider set */
10820 mc->pcpu_thrs = mm_cid_calc_pcpu_thrs(mc);
10821 /* Switch back to per task mode? */
10822 if (mc->users >= mc->pcpu_thrs)
10823 return;
10824
10825 /* Don't queue twice */
10826 if (mc->update_deferred)
10827 return;
10828
10829 /* Queue the irq work, which schedules the real work */
10830 mc->update_deferred = true;
10831 irq_work_queue(&mc->irq_work);
10832 }
10833
mm_cid_complete_transit(struct mm_struct * mm,unsigned int mode)10834 static inline void mm_cid_complete_transit(struct mm_struct *mm, unsigned int mode)
10835 {
10836 /*
10837 * Ensure that the store removing the TRANSIT bit cannot be
10838 * reordered by the CPU before the fixups have been completed.
10839 */
10840 smp_mb();
10841 WRITE_ONCE(mm->mm_cid.mode, mode);
10842 }
10843
mm_cid_transit_to_task(struct task_struct * t,struct mm_cid_pcpu * pcp)10844 static inline void mm_cid_transit_to_task(struct task_struct *t, struct mm_cid_pcpu *pcp)
10845 {
10846 if (cid_on_cpu(t->mm_cid.cid)) {
10847 unsigned int cid = cpu_cid_to_cid(t->mm_cid.cid);
10848
10849 t->mm_cid.cid = cid_to_transit_cid(cid);
10850 pcp->cid = t->mm_cid.cid;
10851 }
10852 }
10853
mm_cid_fixup_cpus_to_tasks(struct mm_struct * mm)10854 static void mm_cid_fixup_cpus_to_tasks(struct mm_struct *mm)
10855 {
10856 unsigned int cpu;
10857
10858 /* Walk the CPUs and fixup all stale CIDs */
10859 for_each_possible_cpu(cpu) {
10860 struct mm_cid_pcpu *pcp = per_cpu_ptr(mm->mm_cid.pcpu, cpu);
10861 struct rq *rq = cpu_rq(cpu);
10862
10863 /* Remote access to mm::mm_cid::pcpu requires rq_lock */
10864 guard(rq_lock_irq)(rq);
10865 /* Is the CID still owned by the CPU? */
10866 if (cid_on_cpu(pcp->cid)) {
10867 /*
10868 * If rq->curr has @mm, transfer it with the
10869 * transition bit set. Otherwise drop it.
10870 */
10871 if (rq->curr->mm == mm && rq->curr->mm_cid.active)
10872 mm_cid_transit_to_task(rq->curr, pcp);
10873 else
10874 mm_drop_cid_on_cpu(mm, pcp);
10875
10876 } else if (rq->curr->mm == mm && rq->curr->mm_cid.active) {
10877 unsigned int cid = rq->curr->mm_cid.cid;
10878
10879 /* Ensure it has the transition bit set */
10880 if (!cid_in_transit(cid)) {
10881 cid = cid_to_transit_cid(cid);
10882 rq->curr->mm_cid.cid = cid;
10883 pcp->cid = cid;
10884 }
10885 }
10886 }
10887 mm_cid_complete_transit(mm, 0);
10888 }
10889
mm_cid_transit_to_cpu(struct task_struct * t,struct mm_cid_pcpu * pcp)10890 static inline void mm_cid_transit_to_cpu(struct task_struct *t, struct mm_cid_pcpu *pcp)
10891 {
10892 if (cid_on_task(t->mm_cid.cid)) {
10893 t->mm_cid.cid = cid_to_transit_cid(t->mm_cid.cid);
10894 pcp->cid = t->mm_cid.cid;
10895 }
10896 }
10897
mm_cid_fixup_task_to_cpu(struct task_struct * t,struct mm_struct * mm)10898 static void mm_cid_fixup_task_to_cpu(struct task_struct *t, struct mm_struct *mm)
10899 {
10900 /* Remote access to mm::mm_cid::pcpu requires rq_lock */
10901 guard(task_rq_lock)(t);
10902 if (cid_on_task(t->mm_cid.cid)) {
10903 /* If running on the CPU, put the CID in transit mode, otherwise drop it */
10904 if (task_rq(t)->curr == t)
10905 mm_cid_transit_to_cpu(t, per_cpu_ptr(mm->mm_cid.pcpu, task_cpu(t)));
10906 else
10907 mm_unset_cid_on_task(t);
10908 }
10909 }
10910
mm_cid_fixup_tasks_to_cpus(void)10911 static void mm_cid_fixup_tasks_to_cpus(void)
10912 {
10913 struct mm_struct *mm = current->mm;
10914 struct task_struct *t;
10915
10916 lockdep_assert_held(&mm->mm_cid.mutex);
10917
10918 hlist_for_each_entry(t, &mm->mm_cid.user_list, mm_cid.node) {
10919 /* Current has already transferred before invoking the fixup. */
10920 if (t != current)
10921 mm_cid_fixup_task_to_cpu(t, mm);
10922 }
10923
10924 mm_cid_complete_transit(mm, MM_CID_ONCPU);
10925 }
10926
sched_mm_cid_add_user(struct task_struct * t,struct mm_struct * mm)10927 static bool sched_mm_cid_add_user(struct task_struct *t, struct mm_struct *mm)
10928 {
10929 lockdep_assert_held(&mm->mm_cid.lock);
10930
10931 t->mm_cid.active = 1;
10932 hlist_add_head(&t->mm_cid.node, &mm->mm_cid.user_list);
10933 mm->mm_cid.users++;
10934 return mm_update_max_cids(mm);
10935 }
10936
sched_mm_cid_fork(struct task_struct * t)10937 static void sched_mm_cid_fork(struct task_struct *t)
10938 {
10939 struct mm_struct *mm = t->mm;
10940 bool percpu;
10941
10942 if (!mm)
10943 return;
10944
10945 WARN_ON_ONCE(t->mm_cid.cid != MM_CID_UNSET);
10946
10947 guard(mutex)(&mm->mm_cid.mutex);
10948 scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
10949 struct mm_cid_pcpu *pcp = this_cpu_ptr(mm->mm_cid.pcpu);
10950
10951 /* First user ? */
10952 if (!mm->mm_cid.users) {
10953 sched_mm_cid_add_user(t, mm);
10954 t->mm_cid.cid = mm_get_cid(mm);
10955 /* Required for execve() */
10956 pcp->cid = t->mm_cid.cid;
10957 return;
10958 }
10959
10960 if (!sched_mm_cid_add_user(t, mm)) {
10961 if (!cid_on_cpu(mm->mm_cid.mode))
10962 t->mm_cid.cid = mm_get_cid(mm);
10963 return;
10964 }
10965
10966 /* Handle the mode change and transfer current's CID */
10967 percpu = cid_on_cpu(mm->mm_cid.mode);
10968 if (!percpu)
10969 mm_cid_transit_to_task(current, pcp);
10970 else
10971 mm_cid_transit_to_cpu(current, pcp);
10972 }
10973
10974 if (percpu) {
10975 mm_cid_fixup_tasks_to_cpus();
10976 } else {
10977 mm_cid_fixup_cpus_to_tasks(mm);
10978 t->mm_cid.cid = mm_get_cid(mm);
10979 }
10980 }
10981
sched_mm_cid_remove_user(struct task_struct * t)10982 static bool sched_mm_cid_remove_user(struct task_struct *t)
10983 {
10984 lockdep_assert_held(&t->mm->mm_cid.lock);
10985
10986 t->mm_cid.active = 0;
10987 /* Clear the transition bit */
10988 t->mm_cid.cid = cid_from_transit_cid(t->mm_cid.cid);
10989 mm_unset_cid_on_task(t);
10990 hlist_del_init(&t->mm_cid.node);
10991 t->mm->mm_cid.users--;
10992 return mm_update_max_cids(t->mm);
10993 }
10994
__sched_mm_cid_exit(struct task_struct * t)10995 static bool __sched_mm_cid_exit(struct task_struct *t)
10996 {
10997 struct mm_struct *mm = t->mm;
10998
10999 if (!sched_mm_cid_remove_user(t))
11000 return false;
11001 /*
11002 * Contrary to fork() this only deals with a switch back to per
11003 * task mode either because the above decreased users or an
11004 * affinity change increased the number of allowed CPUs and the
11005 * deferred fixup did not run yet.
11006 */
11007 if (WARN_ON_ONCE(cid_on_cpu(mm->mm_cid.mode)))
11008 return false;
11009 /*
11010 * A failed fork(2) cleanup never gets here, so @current must have
11011 * the same MM as @t. That's true for exit() and the failed
11012 * pthread_create() cleanup case.
11013 */
11014 if (WARN_ON_ONCE(current->mm != mm))
11015 return false;
11016 return true;
11017 }
11018
11019 /*
11020 * When a task exits, the MM CID held by the task is not longer required as
11021 * the task cannot return to user space.
11022 */
sched_mm_cid_exit(struct task_struct * t)11023 void sched_mm_cid_exit(struct task_struct *t)
11024 {
11025 struct mm_struct *mm = t->mm;
11026
11027 if (!mm || !t->mm_cid.active)
11028 return;
11029 /*
11030 * Ensure that only one instance is doing MM CID operations within
11031 * a MM. The common case is uncontended. The rare fixup case adds
11032 * some overhead.
11033 */
11034 scoped_guard(mutex, &mm->mm_cid.mutex) {
11035 /* mm_cid::mutex is sufficient to protect mm_cid::users */
11036 if (likely(mm->mm_cid.users > 1)) {
11037 scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
11038 if (!__sched_mm_cid_exit(t))
11039 return;
11040 /*
11041 * Mode change. The task has the CID unset
11042 * already and dealt with an eventually set
11043 * TRANSIT bit. If the CID is owned by the CPU
11044 * then drop it.
11045 */
11046 mm_drop_cid_on_cpu(mm, this_cpu_ptr(mm->mm_cid.pcpu));
11047 }
11048 mm_cid_fixup_cpus_to_tasks(mm);
11049 return;
11050 }
11051 /* Last user */
11052 scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
11053 /* Required across execve() */
11054 if (t == current)
11055 mm_cid_transit_to_task(t, this_cpu_ptr(mm->mm_cid.pcpu));
11056 /* Ignore mode change. There is nothing to do. */
11057 sched_mm_cid_remove_user(t);
11058 }
11059 }
11060
11061 /*
11062 * As this is the last user (execve(), process exit or failed
11063 * fork(2)) there is no concurrency anymore.
11064 *
11065 * Synchronize eventually pending work to ensure that there are no
11066 * dangling references left. @t->mm_cid.users is zero so nothing
11067 * can queue this work anymore.
11068 */
11069 irq_work_sync(&mm->mm_cid.irq_work);
11070 cancel_work_sync(&mm->mm_cid.work);
11071 }
11072
11073 /* Deactivate MM CID allocation across execve() */
sched_mm_cid_before_execve(struct task_struct * t)11074 void sched_mm_cid_before_execve(struct task_struct *t)
11075 {
11076 sched_mm_cid_exit(t);
11077 }
11078
11079 /* Reactivate MM CID after execve() */
sched_mm_cid_after_execve(struct task_struct * t)11080 void sched_mm_cid_after_execve(struct task_struct *t)
11081 {
11082 if (t->mm)
11083 sched_mm_cid_fork(t);
11084 }
11085
mm_cid_work_fn(struct work_struct * work)11086 static void mm_cid_work_fn(struct work_struct *work)
11087 {
11088 struct mm_struct *mm = container_of(work, struct mm_struct, mm_cid.work);
11089
11090 guard(mutex)(&mm->mm_cid.mutex);
11091 /* Did the last user task exit already? */
11092 if (!mm->mm_cid.users)
11093 return;
11094
11095 scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
11096 /* Have fork() or exit() handled it already? */
11097 if (!mm->mm_cid.update_deferred)
11098 return;
11099 /* This clears mm_cid::update_deferred */
11100 if (!mm_update_max_cids(mm))
11101 return;
11102 /* Affinity changes can only switch back to task mode */
11103 if (WARN_ON_ONCE(cid_on_cpu(mm->mm_cid.mode)))
11104 return;
11105 }
11106 mm_cid_fixup_cpus_to_tasks(mm);
11107 }
11108
mm_cid_irq_work(struct irq_work * work)11109 static void mm_cid_irq_work(struct irq_work *work)
11110 {
11111 struct mm_struct *mm = container_of(work, struct mm_struct, mm_cid.irq_work);
11112
11113 /*
11114 * Needs to be unconditional because mm_cid::lock cannot be held
11115 * when scheduling work as mm_update_cpus_allowed() nests inside
11116 * rq::lock and schedule_work() might end up in wakeup...
11117 */
11118 schedule_work(&mm->mm_cid.work);
11119 }
11120
mm_init_cid(struct mm_struct * mm,struct task_struct * p)11121 void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
11122 {
11123 mm->mm_cid.max_cids = 0;
11124 mm->mm_cid.mode = 0;
11125 mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
11126 mm->mm_cid.users = 0;
11127 mm->mm_cid.pcpu_thrs = 0;
11128 mm->mm_cid.update_deferred = 0;
11129 raw_spin_lock_init(&mm->mm_cid.lock);
11130 mutex_init(&mm->mm_cid.mutex);
11131 mm->mm_cid.irq_work = IRQ_WORK_INIT_HARD(mm_cid_irq_work);
11132 INIT_WORK(&mm->mm_cid.work, mm_cid_work_fn);
11133 INIT_HLIST_HEAD(&mm->mm_cid.user_list);
11134 cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
11135 bitmap_zero(mm_cidmask(mm), num_possible_cpus());
11136 }
11137 #else /* CONFIG_SCHED_MM_CID */
mm_update_cpus_allowed(struct mm_struct * mm,const struct cpumask * affmsk)11138 static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk) { }
sched_mm_cid_fork(struct task_struct * t)11139 static inline void sched_mm_cid_fork(struct task_struct *t) { }
11140 #endif /* !CONFIG_SCHED_MM_CID */
11141
11142 static DEFINE_PER_CPU(struct sched_change_ctx, sched_change_ctx);
11143
sched_change_begin(struct task_struct * p,unsigned int flags)11144 struct sched_change_ctx *sched_change_begin(struct task_struct *p, unsigned int flags)
11145 {
11146 struct sched_change_ctx *ctx = this_cpu_ptr(&sched_change_ctx);
11147 struct rq *rq = task_rq(p);
11148
11149 /*
11150 * Must exclusively use matched flags since this is both dequeue and
11151 * enqueue.
11152 */
11153 WARN_ON_ONCE(flags & 0xFFFF0000);
11154
11155 lockdep_assert_rq_held(rq);
11156
11157 if (!(flags & DEQUEUE_NOCLOCK)) {
11158 update_rq_clock(rq);
11159 flags |= DEQUEUE_NOCLOCK;
11160 }
11161
11162 if ((flags & DEQUEUE_CLASS) && p->sched_class->switching_from)
11163 p->sched_class->switching_from(rq, p);
11164
11165 *ctx = (struct sched_change_ctx){
11166 .p = p,
11167 .class = p->sched_class,
11168 .flags = flags,
11169 .queued = task_on_rq_queued(p),
11170 .running = task_current_donor(rq, p),
11171 };
11172
11173 if (!(flags & DEQUEUE_CLASS)) {
11174 if (p->sched_class->get_prio)
11175 ctx->prio = p->sched_class->get_prio(rq, p);
11176 else
11177 ctx->prio = p->prio;
11178 }
11179
11180 if (ctx->queued)
11181 dequeue_task(rq, p, flags);
11182 if (ctx->running)
11183 put_prev_task(rq, p);
11184
11185 if ((flags & DEQUEUE_CLASS) && p->sched_class->switched_from)
11186 p->sched_class->switched_from(rq, p);
11187
11188 return ctx;
11189 }
11190
sched_change_end(struct sched_change_ctx * ctx)11191 void sched_change_end(struct sched_change_ctx *ctx)
11192 {
11193 struct task_struct *p = ctx->p;
11194 struct rq *rq = task_rq(p);
11195
11196 lockdep_assert_rq_held(rq);
11197
11198 /*
11199 * Changing class without *QUEUE_CLASS is bad.
11200 */
11201 WARN_ON_ONCE(p->sched_class != ctx->class && !(ctx->flags & ENQUEUE_CLASS));
11202
11203 if ((ctx->flags & ENQUEUE_CLASS) && p->sched_class->switching_to)
11204 p->sched_class->switching_to(rq, p);
11205
11206 if (ctx->queued)
11207 enqueue_task(rq, p, ctx->flags);
11208 if (ctx->running)
11209 set_next_task(rq, p);
11210
11211 if (ctx->flags & ENQUEUE_CLASS) {
11212 if (p->sched_class->switched_to)
11213 p->sched_class->switched_to(rq, p);
11214
11215 if (ctx->running) {
11216 /*
11217 * If this was a class promotion; let the old class
11218 * know it got preempted. Note that none of the
11219 * switch*_from() methods know the new class and none
11220 * of the switch*_to() methods know the old class.
11221 */
11222 if (sched_class_above(p->sched_class, ctx->class)) {
11223 rq->next_class->wakeup_preempt(rq, p, 0);
11224 rq->next_class = p->sched_class;
11225 }
11226 /*
11227 * If this was a degradation in class; make sure to
11228 * reschedule.
11229 */
11230 if (sched_class_above(ctx->class, p->sched_class))
11231 resched_curr(rq);
11232 }
11233 } else {
11234 p->sched_class->prio_changed(rq, p, ctx->prio);
11235 }
11236 }
11237